1 #ifndef __ASM_HARDIRQ_H
2 #define __ASM_HARDIRQ_H
4 #include <linux/config.h>
5 #include <linux/cache.h>
6 #include <linux/threads.h>
10 unsigned int __softirq_pending;
11 unsigned int __local_irq_count;
12 unsigned int __local_bh_count;
13 unsigned int __syscall_count;
14 struct task_struct * __ksoftirqd_task; /* waitqueue is too large */
15 } ____cacheline_aligned irq_cpustat_t;
17 #include <linux/irq_cpustat.h> /* Standard mappings for irq_cpustat_t above */
20 * We put the hardirq and softirq counter into the preemption
21 * counter. The bitmask has the following meaning:
23 * - bits 0-7 are the preemption count (max depth: 256)
24 * - bits 8-15 are the softirq count (max # of softirqs: 256)
25 * - bits 16-24 are the hardirq count (max # of hardirqs: 512)
26 * - bit 26 is the PREEMPT_ACTIVE flag
28 * We optimize HARDIRQ_BITS for immediate constant, and only
29 * increase it if really needed.
31 #define PREEMPT_BITS 8
32 #define SOFTIRQ_BITS 8
35 #define HARDIRQ_BITS 9
37 #define HARDIRQ_BITS 8
40 #define PREEMPT_SHIFT 0
41 #define SOFTIRQ_SHIFT (PREEMPT_SHIFT + PREEMPT_BITS)
42 #define HARDIRQ_SHIFT (SOFTIRQ_SHIFT + SOFTIRQ_BITS)
45 * The hardirq mask has to be large enough to have space
46 * for potentially all IRQ sources in the system nesting
49 #if (1 << HARDIRQ_BITS) < NR_IRQS
50 # error HARDIRQ_BITS is too low!
53 #define irq_enter() (preempt_count() += HARDIRQ_OFFSET)
56 extern asmlinkage void __do_softirq(void);
60 preempt_count() -= IRQ_EXIT_OFFSET; \
61 if (!in_interrupt() && local_softirq_pending()) \
63 preempt_enable_no_resched(); \
67 #endif /* __ASM_HARDIRQ_H */