2 #ifndef _LINUX_INTERRUPT_H
3 #define _LINUX_INTERRUPT_H
5 #include <linux/config.h>
6 #include <linux/kernel.h>
7 #include <linux/linkage.h>
8 #include <linux/bitops.h>
9 #include <linux/preempt.h>
10 #include <linux/cpumask.h>
11 #include <linux/hardirq.h>
12 #include <asm/atomic.h>
13 #include <asm/ptrace.h>
14 #include <asm/system.h>
17 * For 2.4.x compatibility, 2.4.x can use
19 * typedef void irqreturn_t;
22 * #define IRQ_RETVAL(x)
24 * To mix old-style and new-style irq handler returns.
26 * IRQ_NONE means we didn't handle it.
27 * IRQ_HANDLED means that we did have a valid interrupt and handled it.
28 * IRQ_RETVAL(x) selects on the two depending on x being non-zero (for handled)
30 typedef int irqreturn_t;
33 #define IRQ_HANDLED (1)
34 #define IRQ_RETVAL(x) ((x) != 0)
37 irqreturn_t (*handler)(int, void *, struct pt_regs *);
42 struct irqaction *next;
44 struct proc_dir_entry *dir;
47 extern irqreturn_t no_action(int cpl, void *dev_id, struct pt_regs *regs);
48 extern int request_irq(unsigned int,
49 irqreturn_t (*handler)(int, void *, struct pt_regs *),
50 unsigned long, const char *, void *);
51 extern void free_irq(unsigned int, void *);
54 #ifdef CONFIG_GENERIC_HARDIRQS
55 extern void disable_irq_nosync(unsigned int irq);
56 extern void disable_irq(unsigned int irq);
57 extern void enable_irq(unsigned int irq);
61 * Temporary defines for UP kernels, until all code gets fixed.
64 # define cli() local_irq_disable()
65 # define sti() local_irq_enable()
66 # define save_flags(x) local_save_flags(x)
67 # define restore_flags(x) local_irq_restore(x)
68 # define save_and_cli(x) local_irq_save(x)
71 /* SoftIRQ primitives. */
72 #define local_bh_disable() \
73 do { preempt_count() += SOFTIRQ_OFFSET; barrier(); } while (0)
74 #define __local_bh_enable() \
75 do { barrier(); preempt_count() -= SOFTIRQ_OFFSET; } while (0)
77 extern void local_bh_enable(void);
79 /* PLEASE, avoid to allocate new softirqs, if you need not _really_ high
80 frequency threaded job scheduling. For almost all the purposes
81 tasklets are more than enough. F.e. all serial device BHs et
82 al. should be converted to tasklets, not to softirqs.
95 /* softirq mask and active fields moved to irq_cpustat_t in
96 * asm/hardirq.h to get better cache usage. KAO
101 void (*action)(struct softirq_action *);
105 asmlinkage void do_softirq(void);
106 extern void open_softirq(int nr, void (*action)(struct softirq_action*), void *data);
107 extern void softirq_init(void);
108 #define __raise_softirq_irqoff(nr) do { local_softirq_pending() |= 1UL << (nr); } while (0)
109 extern void FASTCALL(raise_softirq_irqoff(unsigned int nr));
110 extern void FASTCALL(raise_softirq(unsigned int nr));
113 /* Tasklets --- multithreaded analogue of BHs.
115 Main feature differing them of generic softirqs: tasklet
116 is running only on one CPU simultaneously.
118 Main feature differing them of BHs: different tasklets
119 may be run simultaneously on different CPUs.
122 * If tasklet_schedule() is called, then tasklet is guaranteed
123 to be executed on some cpu at least once after this.
124 * If the tasklet is already scheduled, but its excecution is still not
125 started, it will be executed only once.
126 * If this tasklet is already running on another CPU (or schedule is called
127 from tasklet itself), it is rescheduled for later.
128 * Tasklet is strictly serialized wrt itself, but not
129 wrt another tasklets. If client needs some intertask synchronization,
130 he makes it with spinlocks.
133 struct tasklet_struct
135 struct tasklet_struct *next;
138 void (*func)(unsigned long);
142 #define DECLARE_TASKLET(name, func, data) \
143 struct tasklet_struct name = { NULL, 0, ATOMIC_INIT(0), func, data }
145 #define DECLARE_TASKLET_DISABLED(name, func, data) \
146 struct tasklet_struct name = { NULL, 0, ATOMIC_INIT(1), func, data }
151 TASKLET_STATE_SCHED, /* Tasklet is scheduled for execution */
152 TASKLET_STATE_RUN /* Tasklet is running (SMP only) */
156 static inline int tasklet_trylock(struct tasklet_struct *t)
158 return !test_and_set_bit(TASKLET_STATE_RUN, &(t)->state);
161 static inline void tasklet_unlock(struct tasklet_struct *t)
163 smp_mb__before_clear_bit();
164 clear_bit(TASKLET_STATE_RUN, &(t)->state);
167 static inline void tasklet_unlock_wait(struct tasklet_struct *t)
169 while (test_bit(TASKLET_STATE_RUN, &(t)->state)) { barrier(); }
172 #define tasklet_trylock(t) 1
173 #define tasklet_unlock_wait(t) do { } while (0)
174 #define tasklet_unlock(t) do { } while (0)
177 extern void FASTCALL(__tasklet_schedule(struct tasklet_struct *t));
179 static inline void tasklet_schedule(struct tasklet_struct *t)
181 if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state))
182 __tasklet_schedule(t);
185 extern void FASTCALL(__tasklet_hi_schedule(struct tasklet_struct *t));
187 static inline void tasklet_hi_schedule(struct tasklet_struct *t)
189 if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state))
190 __tasklet_hi_schedule(t);
194 static inline void tasklet_disable_nosync(struct tasklet_struct *t)
196 atomic_inc(&t->count);
197 smp_mb__after_atomic_inc();
200 static inline void tasklet_disable(struct tasklet_struct *t)
202 tasklet_disable_nosync(t);
203 tasklet_unlock_wait(t);
207 static inline void tasklet_enable(struct tasklet_struct *t)
209 smp_mb__before_atomic_dec();
210 atomic_dec(&t->count);
213 static inline void tasklet_hi_enable(struct tasklet_struct *t)
215 smp_mb__before_atomic_dec();
216 atomic_dec(&t->count);
219 extern void tasklet_kill(struct tasklet_struct *t);
220 extern void tasklet_kill_immediate(struct tasklet_struct *t, unsigned int cpu);
221 extern void tasklet_init(struct tasklet_struct *t,
222 void (*func)(unsigned long), unsigned long data);
225 * Autoprobing for irqs:
227 * probe_irq_on() and probe_irq_off() provide robust primitives
228 * for accurate IRQ probing during kernel initialization. They are
229 * reasonably simple to use, are not "fooled" by spurious interrupts,
230 * and, unlike other attempts at IRQ probing, they do not get hung on
231 * stuck interrupts (such as unused PS2 mouse interfaces on ASUS boards).
233 * For reasonably foolproof probing, use them as follows:
235 * 1. clear and/or mask the device's internal interrupt.
237 * 3. irqs = probe_irq_on(); // "take over" all unassigned idle IRQs
238 * 4. enable the device and cause it to trigger an interrupt.
239 * 5. wait for the device to interrupt, using non-intrusive polling or a delay.
240 * 6. irq = probe_irq_off(irqs); // get IRQ number, 0=none, negative=multiple
241 * 7. service the device to clear its pending interrupt.
242 * 8. loop again if paranoia is required.
244 * probe_irq_on() returns a mask of allocated irq's.
246 * probe_irq_off() takes the mask as a parameter,
247 * and returns the irq number which occurred,
248 * or zero if none occurred, or a negative irq number
249 * if more than one irq occurred.
252 #if defined(CONFIG_GENERIC_HARDIRQS) && !defined(CONFIG_GENERIC_IRQ_PROBE)
253 static inline unsigned long probe_irq_on(void)
257 static inline int probe_irq_off(unsigned long val)
261 static inline unsigned int probe_irq_mask(unsigned long val)
266 extern unsigned long probe_irq_on(void); /* returns 0 on failure */
267 extern int probe_irq_off(unsigned long); /* returns 0 or negative on failure */
268 extern unsigned int probe_irq_mask(unsigned long); /* returns mask of ISA interrupts */
272 extern void dump_clear_tasklet(void);
273 extern void dump_run_tasklet(void);