2 #ifndef __ASM_HARDIRQ_H
3 #define __ASM_HARDIRQ_H
5 #include <linux/config.h>
6 #include <linux/cache.h>
7 #include <linux/smp_lock.h>
10 /* The __last_jiffy_stamp field is needed to ensure that no decrementer
11 * interrupt is lost on SMP machines. Since on most CPUs it is in the same
12 * cache line as local_irq_count, it is cheap to access and is also used on UP
16 unsigned long __softirq_pending; /* set_bit is used on this */
17 unsigned int __last_jiffy_stamp;
18 } ____cacheline_aligned irq_cpustat_t;
20 #include <linux/irq_cpustat.h> /* Standard mappings for irq_cpustat_t above */
22 #define last_jiffy_stamp(cpu) __IRQ_STAT((cpu), __last_jiffy_stamp)
25 * We put the hardirq and softirq counter into the preemption
26 * counter. The bitmask has the following meaning:
28 * - bits 0-7 are the preemption count (max preemption depth: 256)
29 * - bits 8-15 are the softirq count (max # of softirqs: 256)
30 * - bits 16-23 are the hardirq count (max # of hardirqs: 256)
32 * - ( bit 26 is the PREEMPT_ACTIVE flag. )
34 * PREEMPT_MASK: 0x000000ff
35 * SOFTIRQ_MASK: 0x0000ff00
36 * HARDIRQ_MASK: 0x00ff0000
39 #define PREEMPT_BITS 8
40 #define SOFTIRQ_BITS 8
41 #define HARDIRQ_BITS 8
43 #define PREEMPT_SHIFT 0
44 #define SOFTIRQ_SHIFT (PREEMPT_SHIFT + PREEMPT_BITS)
45 #define HARDIRQ_SHIFT (SOFTIRQ_SHIFT + SOFTIRQ_BITS)
47 #define __MASK(x) ((1UL << (x))-1)
49 #define PREEMPT_MASK (__MASK(PREEMPT_BITS) << PREEMPT_SHIFT)
50 #define HARDIRQ_MASK (__MASK(HARDIRQ_BITS) << HARDIRQ_SHIFT)
51 #define SOFTIRQ_MASK (__MASK(SOFTIRQ_BITS) << SOFTIRQ_SHIFT)
53 #define hardirq_count() (preempt_count() & HARDIRQ_MASK)
54 #define softirq_count() (preempt_count() & SOFTIRQ_MASK)
55 #define irq_count() (preempt_count() & (HARDIRQ_MASK | SOFTIRQ_MASK))
57 #define PREEMPT_OFFSET (1UL << PREEMPT_SHIFT)
58 #define SOFTIRQ_OFFSET (1UL << SOFTIRQ_SHIFT)
59 #define HARDIRQ_OFFSET (1UL << HARDIRQ_SHIFT)
62 * The hardirq mask has to be large enough to have
63 * space for potentially all IRQ sources in the system
64 * nesting on a single CPU:
66 #if (1 << HARDIRQ_BITS) < NR_IRQS
67 # error HARDIRQ_BITS is too low!
71 * Are we doing bottom half or hardware interrupt processing?
72 * Are we in a softirq context? Interrupt context?
74 #define in_irq() (hardirq_count())
75 #define in_softirq() (softirq_count())
76 #define in_interrupt() (irq_count())
79 #define hardirq_trylock() (!in_interrupt())
80 #define hardirq_endlock() do { } while (0)
82 #define irq_enter() (preempt_count() += HARDIRQ_OFFSET)
85 # define in_atomic() ((preempt_count() & ~PREEMPT_ACTIVE) != kernel_locked())
86 # define preemptible() (preempt_count() == 0 && !irqs_disabled())
87 # define IRQ_EXIT_OFFSET (HARDIRQ_OFFSET-1)
90 # define in_atomic() (preempt_count() != 0)
91 # define preemptible() 0
92 # define IRQ_EXIT_OFFSET HARDIRQ_OFFSET
97 preempt_count() -= IRQ_EXIT_OFFSET; \
98 if (!in_interrupt() && softirq_pending(smp_processor_id())) \
100 preempt_enable_no_resched(); \
104 # define synchronize_irq(irq) barrier()
106 extern void synchronize_irq(unsigned int irq);
107 #endif /* CONFIG_SMP */
109 #endif /* __ASM_HARDIRQ_H */
110 #endif /* __KERNEL__ */