2 * linux/kernel/softirq.c
4 * Copyright (C) 1992 Linus Torvalds
6 * Rewritten. Old one was good in 2.2, but in 2.3 it was immoral. --ANK (990903)
9 #include <linux/module.h>
10 #include <linux/kernel_stat.h>
11 #include <linux/interrupt.h>
12 #include <linux/init.h>
14 #include <linux/notifier.h>
15 #include <linux/percpu.h>
16 #include <linux/cpu.h>
17 #include <linux/kthread.h>
21 - No shared variables, all the data are CPU local.
22 - If a softirq needs serialization, let it serialize itself
24 - Even if softirq is serialized, only local cpu is marked for
25 execution. Hence, we get something sort of weak cpu binding.
26 Though it is still not clear, will it result in better locality
30 - NET RX softirq. It is multithreaded and does not require
31 any global serialization.
32 - NET TX softirq. It kicks software netdevice queues, hence
33 it is logically serialized per device, but this serialization
34 is invisible to common code.
35 - Tasklets: serialized wrt itself.
38 #ifndef __ARCH_IRQ_STAT
39 irq_cpustat_t irq_stat[NR_CPUS] ____cacheline_aligned;
40 EXPORT_SYMBOL(irq_stat);
43 static struct softirq_action softirq_vec[32] __cacheline_aligned_in_smp;
45 static DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
48 * we cannot loop indefinitely here to avoid userspace starvation,
49 * but we also don't want to introduce a worst case 1/HZ latency
50 * to the pending events, so lets the scheduler to balance
51 * the softirq load for us.
53 static inline void wakeup_softirqd(void)
55 /* Interrupts are disabled: no need to stop preemption */
56 struct task_struct *tsk = __get_cpu_var(ksoftirqd);
58 if (tsk && tsk->state != TASK_RUNNING)
63 * We restart softirq processing MAX_SOFTIRQ_RESTART times,
64 * and we fall back to softirqd after that.
66 * This number has been established via experimentation.
67 * The two things to balance is latency against fairness -
68 * we want to handle softirqs as soon as possible, but they
69 * should not be able to lock up the box.
71 #define MAX_SOFTIRQ_RESTART 10
73 asmlinkage void __do_softirq(void)
75 struct softirq_action *h;
77 int max_restart = MAX_SOFTIRQ_RESTART;
79 pending = local_softirq_pending();
83 /* Reset the pending bitmask before enabling irqs */
84 local_softirq_pending() = 0;
99 pending = local_softirq_pending();
100 if (pending && --max_restart)
109 #ifndef __ARCH_HAS_DO_SOFTIRQ
111 asmlinkage void do_softirq(void)
119 local_irq_save(flags);
121 pending = local_softirq_pending();
126 local_irq_restore(flags);
129 EXPORT_SYMBOL(do_softirq);
133 void local_bh_enable(void)
136 WARN_ON(irqs_disabled());
137 if (unlikely(!in_interrupt() &&
138 local_softirq_pending()))
140 preempt_check_resched();
142 EXPORT_SYMBOL(local_bh_enable);
145 * This function must run with irqs disabled!
147 inline fastcall void raise_softirq_irqoff(unsigned int nr)
149 __raise_softirq_irqoff(nr);
152 * If we're in an interrupt or softirq, we're done
153 * (this also catches softirq-disabled code). We will
154 * actually run the softirq once we return from
155 * the irq or softirq.
157 * Otherwise we wake up ksoftirqd to make sure we
158 * schedule the softirq soon.
164 EXPORT_SYMBOL(raise_softirq_irqoff);
166 void fastcall raise_softirq(unsigned int nr)
170 local_irq_save(flags);
171 raise_softirq_irqoff(nr);
172 local_irq_restore(flags);
175 EXPORT_SYMBOL(raise_softirq);
177 void open_softirq(int nr, void (*action)(struct softirq_action*), void *data)
179 softirq_vec[nr].data = data;
180 softirq_vec[nr].action = action;
183 EXPORT_SYMBOL(open_softirq);
188 struct tasklet_struct *list;
191 /* Some compilers disobey section attribute on statics when not
193 static DEFINE_PER_CPU(struct tasklet_head, tasklet_vec) = { NULL };
194 static DEFINE_PER_CPU(struct tasklet_head, tasklet_hi_vec) = { NULL };
196 void fastcall __tasklet_schedule(struct tasklet_struct *t)
200 local_irq_save(flags);
201 t->next = __get_cpu_var(tasklet_vec).list;
202 __get_cpu_var(tasklet_vec).list = t;
203 raise_softirq_irqoff(TASKLET_SOFTIRQ);
204 local_irq_restore(flags);
207 EXPORT_SYMBOL(__tasklet_schedule);
209 void fastcall __tasklet_hi_schedule(struct tasklet_struct *t)
213 local_irq_save(flags);
214 t->next = __get_cpu_var(tasklet_hi_vec).list;
215 __get_cpu_var(tasklet_hi_vec).list = t;
216 raise_softirq_irqoff(HI_SOFTIRQ);
217 local_irq_restore(flags);
220 EXPORT_SYMBOL(__tasklet_hi_schedule);
222 static void tasklet_action(struct softirq_action *a)
224 struct tasklet_struct *list;
227 list = __get_cpu_var(tasklet_vec).list;
228 __get_cpu_var(tasklet_vec).list = NULL;
232 struct tasklet_struct *t = list;
236 if (tasklet_trylock(t)) {
237 if (!atomic_read(&t->count)) {
238 if (!test_and_clear_bit(TASKLET_STATE_SCHED, &t->state))
248 t->next = __get_cpu_var(tasklet_vec).list;
249 __get_cpu_var(tasklet_vec).list = t;
250 __raise_softirq_irqoff(TASKLET_SOFTIRQ);
255 static void tasklet_hi_action(struct softirq_action *a)
257 struct tasklet_struct *list;
260 list = __get_cpu_var(tasklet_hi_vec).list;
261 __get_cpu_var(tasklet_hi_vec).list = NULL;
265 struct tasklet_struct *t = list;
269 if (tasklet_trylock(t)) {
270 if (!atomic_read(&t->count)) {
271 if (!test_and_clear_bit(TASKLET_STATE_SCHED, &t->state))
281 t->next = __get_cpu_var(tasklet_hi_vec).list;
282 __get_cpu_var(tasklet_hi_vec).list = t;
283 __raise_softirq_irqoff(HI_SOFTIRQ);
289 void tasklet_init(struct tasklet_struct *t,
290 void (*func)(unsigned long), unsigned long data)
294 atomic_set(&t->count, 0);
299 EXPORT_SYMBOL(tasklet_init);
301 void tasklet_kill(struct tasklet_struct *t)
304 printk("Attempt to kill tasklet from interrupt\n");
306 while (test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) {
309 while (test_bit(TASKLET_STATE_SCHED, &t->state));
311 tasklet_unlock_wait(t);
312 clear_bit(TASKLET_STATE_SCHED, &t->state);
315 EXPORT_SYMBOL(tasklet_kill);
317 void __init softirq_init(void)
319 open_softirq(TASKLET_SOFTIRQ, tasklet_action, NULL);
320 open_softirq(HI_SOFTIRQ, tasklet_hi_action, NULL);
323 static int ksoftirqd(void * __bind_cpu)
325 set_user_nice(current, 19);
326 current->flags |= PF_NOFREEZE;
328 set_current_state(TASK_INTERRUPTIBLE);
330 while (!kthread_should_stop()) {
331 if (!local_softirq_pending())
334 __set_current_state(TASK_RUNNING);
336 while (local_softirq_pending()) {
337 /* Preempt disable stops cpu going offline.
338 If already offline, we'll be on wrong CPU:
341 if (cpu_is_offline((long)__bind_cpu))
348 set_current_state(TASK_INTERRUPTIBLE);
350 __set_current_state(TASK_RUNNING);
355 /* Wait for kthread_stop */
356 set_current_state(TASK_INTERRUPTIBLE);
357 while (!kthread_should_stop()) {
359 set_current_state(TASK_INTERRUPTIBLE);
361 __set_current_state(TASK_RUNNING);
365 #ifdef CONFIG_HOTPLUG_CPU
367 * tasklet_kill_immediate is called to remove a tasklet which can already be
368 * scheduled for execution on @cpu.
370 * Unlike tasklet_kill, this function removes the tasklet
371 * _immediately_, even if the tasklet is in TASKLET_STATE_SCHED state.
373 * When this function is called, @cpu must be in the CPU_DEAD state.
375 void tasklet_kill_immediate(struct tasklet_struct *t, unsigned int cpu)
377 struct tasklet_struct **i;
379 BUG_ON(cpu_online(cpu));
380 BUG_ON(test_bit(TASKLET_STATE_RUN, &t->state));
382 if (!test_bit(TASKLET_STATE_SCHED, &t->state))
385 /* CPU is dead, so no lock needed. */
386 for (i = &per_cpu(tasklet_vec, cpu).list; *i; i = &(*i)->next) {
395 static void takeover_tasklets(unsigned int cpu)
397 struct tasklet_struct **i;
399 /* CPU is dead, so no lock needed. */
402 /* Find end, append list for that CPU. */
403 for (i = &__get_cpu_var(tasklet_vec).list; *i; i = &(*i)->next);
404 *i = per_cpu(tasklet_vec, cpu).list;
405 per_cpu(tasklet_vec, cpu).list = NULL;
406 raise_softirq_irqoff(TASKLET_SOFTIRQ);
408 for (i = &__get_cpu_var(tasklet_hi_vec).list; *i; i = &(*i)->next);
409 *i = per_cpu(tasklet_hi_vec, cpu).list;
410 per_cpu(tasklet_hi_vec, cpu).list = NULL;
411 raise_softirq_irqoff(HI_SOFTIRQ);
415 #endif /* CONFIG_HOTPLUG_CPU */
417 static int __devinit cpu_callback(struct notifier_block *nfb,
418 unsigned long action,
421 int hotcpu = (unsigned long)hcpu;
422 struct task_struct *p;
426 BUG_ON(per_cpu(tasklet_vec, hotcpu).list);
427 BUG_ON(per_cpu(tasklet_hi_vec, hotcpu).list);
428 p = kthread_create(ksoftirqd, hcpu, "ksoftirqd/%d", hotcpu);
430 printk("ksoftirqd for %i failed\n", hotcpu);
433 kthread_bind(p, hotcpu);
434 per_cpu(ksoftirqd, hotcpu) = p;
437 wake_up_process(per_cpu(ksoftirqd, hotcpu));
439 #ifdef CONFIG_HOTPLUG_CPU
440 case CPU_UP_CANCELED:
441 /* Unbind so it can run. Fall thru. */
442 kthread_bind(per_cpu(ksoftirqd, hotcpu), smp_processor_id());
444 p = per_cpu(ksoftirqd, hotcpu);
445 per_cpu(ksoftirqd, hotcpu) = NULL;
447 takeover_tasklets(hotcpu);
449 #endif /* CONFIG_HOTPLUG_CPU */
454 static struct notifier_block __devinitdata cpu_nfb = {
455 .notifier_call = cpu_callback
458 __init int spawn_ksoftirqd(void)
460 void *cpu = (void *)(long)smp_processor_id();
461 cpu_callback(&cpu_nfb, CPU_UP_PREPARE, cpu);
462 cpu_callback(&cpu_nfb, CPU_ONLINE, cpu);
463 register_cpu_notifier(&cpu_nfb);