2 * include/asm-s390/hardirq.h
5 * Copyright (C) 1999,2000 IBM Deutschland Entwicklung GmbH, IBM Corporation
6 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
7 * Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com)
9 * Derived from "include/asm-i386/hardirq.h"
12 #ifndef __ASM_HARDIRQ_H
13 #define __ASM_HARDIRQ_H
15 #include <linux/config.h>
16 #include <linux/threads.h>
17 #include <linux/sched.h>
18 #include <linux/cache.h>
19 #include <asm/lowcore.h>
21 /* irq_cpustat_t is unused currently, but could be converted
22 * into a percpu variable instead of storing softirq_pending
25 unsigned int __softirq_pending;
28 #define local_softirq_pending() (S390_lowcore.softirq_pending)
30 /* this is always called with cpu == smp_processor_id() at the moment */
32 softirq_pending(unsigned int cpu)
34 if (cpu == smp_processor_id())
35 return local_softirq_pending();
36 return lowcore_ptr[cpu]->softirq_pending;
39 #define __ARCH_IRQ_STAT
42 * We put the hardirq and softirq counter into the preemption
43 * counter. The bitmask has the following meaning:
45 * - bits 0-7 are the preemption count (max preemption depth: 256)
46 * - bits 8-15 are the softirq count (max # of softirqs: 256)
47 * - bits 16-23 are the hardirq count (max # of hardirqs: 256)
49 * - ( bit 26 is the PREEMPT_ACTIVE flag. )
51 * PREEMPT_MASK: 0x000000ff
52 * SOFTIRQ_MASK: 0x0000ff00
53 * HARDIRQ_MASK: 0x00ff0000
56 #define PREEMPT_BITS 8
57 #define SOFTIRQ_BITS 8
58 #define HARDIRQ_BITS 8
60 #define PREEMPT_SHIFT 0
61 #define SOFTIRQ_SHIFT (PREEMPT_SHIFT + PREEMPT_BITS)
62 #define HARDIRQ_SHIFT (SOFTIRQ_SHIFT + SOFTIRQ_BITS)
64 #define __MASK(x) ((1UL << (x))-1)
66 #define PREEMPT_MASK (__MASK(PREEMPT_BITS) << PREEMPT_SHIFT)
67 #define SOFTIRQ_MASK (__MASK(SOFTIRQ_BITS) << SOFTIRQ_SHIFT)
68 #define HARDIRQ_MASK (__MASK(HARDIRQ_BITS) << HARDIRQ_SHIFT)
70 #define hardirq_count() (preempt_count() & HARDIRQ_MASK)
71 #define softirq_count() (preempt_count() & SOFTIRQ_MASK)
72 #define irq_count() (preempt_count() & (HARDIRQ_MASK | SOFTIRQ_MASK))
74 #define PREEMPT_OFFSET (1UL << PREEMPT_SHIFT)
75 #define SOFTIRQ_OFFSET (1UL << SOFTIRQ_SHIFT)
76 #define HARDIRQ_OFFSET (1UL << HARDIRQ_SHIFT)
79 * Are we doing bottom half or hardware interrupt processing?
80 * Are we in a softirq context? Interrupt context?
82 #define in_irq() (hardirq_count())
83 #define in_softirq() (softirq_count())
84 #define in_interrupt() (irq_count())
87 #define hardirq_trylock() (!in_interrupt())
88 #define hardirq_endlock() do { } while (0)
92 (preempt_count() += HARDIRQ_OFFSET); \
96 extern void do_call_softirq(void);
97 extern void account_ticks(struct pt_regs *);
99 #define invoke_softirq() do_call_softirq()
101 #ifdef CONFIG_PREEMPT
102 # define in_atomic() ((preempt_count() & ~PREEMPT_ACTIVE) != kernel_locked())
103 # define IRQ_EXIT_OFFSET (HARDIRQ_OFFSET-1)
105 # define in_atomic() (preempt_count() != 0)
106 # define IRQ_EXIT_OFFSET HARDIRQ_OFFSET
111 preempt_count() -= IRQ_EXIT_OFFSET; \
112 if (!in_interrupt() && local_softirq_pending()) \
113 /* Use the async. stack for softirq */ \
115 preempt_enable_no_resched(); \
118 #endif /* __ASM_HARDIRQ_H */