2 * linux/kernel/softirq.c
4 * Copyright (C) 1992 Linus Torvalds
6 * Rewritten. Old one was good in 2.2, but in 2.3 it was immoral. --ANK (990903)
9 #include <linux/module.h>
10 #include <linux/kernel_stat.h>
11 #include <linux/interrupt.h>
12 #include <linux/init.h>
14 #include <linux/notifier.h>
15 #include <linux/percpu.h>
16 #include <linux/cpu.h>
17 #include <linux/kthread.h>
18 #include <linux/rcupdate.h>
22 - No shared variables, all the data are CPU local.
23 - If a softirq needs serialization, let it serialize itself
25 - Even if softirq is serialized, only local cpu is marked for
26 execution. Hence, we get something sort of weak cpu binding.
27 Though it is still not clear, will it result in better locality
31 - NET RX softirq. It is multithreaded and does not require
32 any global serialization.
33 - NET TX softirq. It kicks software netdevice queues, hence
34 it is logically serialized per device, but this serialization
35 is invisible to common code.
36 - Tasklets: serialized wrt itself.
39 #ifndef __ARCH_IRQ_STAT
40 irq_cpustat_t irq_stat[NR_CPUS] ____cacheline_aligned;
41 EXPORT_SYMBOL(irq_stat);
44 static struct softirq_action softirq_vec[32] __cacheline_aligned_in_smp;
46 static DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
49 * we cannot loop indefinitely here to avoid userspace starvation,
50 * but we also don't want to introduce a worst case 1/HZ latency
51 * to the pending events, so lets the scheduler to balance
52 * the softirq load for us.
54 static inline void wakeup_softirqd(void)
56 /* Interrupts are disabled: no need to stop preemption */
57 struct task_struct *tsk = __get_cpu_var(ksoftirqd);
59 if (tsk && tsk->state != TASK_RUNNING)
64 * We restart softirq processing MAX_SOFTIRQ_RESTART times,
65 * and we fall back to softirqd after that.
67 * This number has been established via experimentation.
68 * The two things to balance is latency against fairness -
69 * we want to handle softirqs as soon as possible, but they
70 * should not be able to lock up the box.
72 #define MAX_SOFTIRQ_RESTART 10
74 asmlinkage void __do_softirq(void)
76 struct softirq_action *h;
78 int max_restart = MAX_SOFTIRQ_RESTART;
81 pending = local_softirq_pending();
84 cpu = smp_processor_id();
86 /* Reset the pending bitmask before enabling irqs */
87 local_softirq_pending() = 0;
96 rcu_bh_qsctr_inc(cpu);
104 pending = local_softirq_pending();
105 if (pending && --max_restart)
114 #ifndef __ARCH_HAS_DO_SOFTIRQ
116 asmlinkage void do_softirq(void)
124 local_irq_save(flags);
126 pending = local_softirq_pending();
131 local_irq_restore(flags);
134 EXPORT_SYMBOL(do_softirq);
138 void local_bh_enable(void)
140 WARN_ON(irqs_disabled());
142 * Keep preemption disabled until we are done with
143 * softirq processing:
145 preempt_count() -= SOFTIRQ_OFFSET - 1;
147 if (unlikely(!in_interrupt() && local_softirq_pending()))
151 preempt_check_resched();
153 EXPORT_SYMBOL(local_bh_enable);
156 * This function must run with irqs disabled!
158 inline fastcall void raise_softirq_irqoff(unsigned int nr)
160 __raise_softirq_irqoff(nr);
163 * If we're in an interrupt or softirq, we're done
164 * (this also catches softirq-disabled code). We will
165 * actually run the softirq once we return from
166 * the irq or softirq.
168 * Otherwise we wake up ksoftirqd to make sure we
169 * schedule the softirq soon.
175 EXPORT_SYMBOL(raise_softirq_irqoff);
177 void fastcall raise_softirq(unsigned int nr)
181 local_irq_save(flags);
182 raise_softirq_irqoff(nr);
183 local_irq_restore(flags);
186 void open_softirq(int nr, void (*action)(struct softirq_action*), void *data)
188 softirq_vec[nr].data = data;
189 softirq_vec[nr].action = action;
192 EXPORT_SYMBOL(open_softirq);
197 struct tasklet_struct *list;
200 /* Some compilers disobey section attribute on statics when not
202 static DEFINE_PER_CPU(struct tasklet_head, tasklet_vec) = { NULL };
203 static DEFINE_PER_CPU(struct tasklet_head, tasklet_hi_vec) = { NULL };
205 void fastcall __tasklet_schedule(struct tasklet_struct *t)
209 local_irq_save(flags);
210 t->next = __get_cpu_var(tasklet_vec).list;
211 __get_cpu_var(tasklet_vec).list = t;
212 raise_softirq_irqoff(TASKLET_SOFTIRQ);
213 local_irq_restore(flags);
216 EXPORT_SYMBOL(__tasklet_schedule);
218 void fastcall __tasklet_hi_schedule(struct tasklet_struct *t)
222 local_irq_save(flags);
223 t->next = __get_cpu_var(tasklet_hi_vec).list;
224 __get_cpu_var(tasklet_hi_vec).list = t;
225 raise_softirq_irqoff(HI_SOFTIRQ);
226 local_irq_restore(flags);
229 EXPORT_SYMBOL(__tasklet_hi_schedule);
231 static void tasklet_action(struct softirq_action *a)
233 struct tasklet_struct *list;
236 list = __get_cpu_var(tasklet_vec).list;
237 __get_cpu_var(tasklet_vec).list = NULL;
241 struct tasklet_struct *t = list;
245 if (tasklet_trylock(t)) {
246 if (!atomic_read(&t->count)) {
247 if (!test_and_clear_bit(TASKLET_STATE_SCHED, &t->state))
257 t->next = __get_cpu_var(tasklet_vec).list;
258 __get_cpu_var(tasklet_vec).list = t;
259 __raise_softirq_irqoff(TASKLET_SOFTIRQ);
264 static void tasklet_hi_action(struct softirq_action *a)
266 struct tasklet_struct *list;
269 list = __get_cpu_var(tasklet_hi_vec).list;
270 __get_cpu_var(tasklet_hi_vec).list = NULL;
274 struct tasklet_struct *t = list;
278 if (tasklet_trylock(t)) {
279 if (!atomic_read(&t->count)) {
280 if (!test_and_clear_bit(TASKLET_STATE_SCHED, &t->state))
290 t->next = __get_cpu_var(tasklet_hi_vec).list;
291 __get_cpu_var(tasklet_hi_vec).list = t;
292 __raise_softirq_irqoff(HI_SOFTIRQ);
298 void tasklet_init(struct tasklet_struct *t,
299 void (*func)(unsigned long), unsigned long data)
303 atomic_set(&t->count, 0);
308 EXPORT_SYMBOL(tasklet_init);
310 void tasklet_kill(struct tasklet_struct *t)
313 printk("Attempt to kill tasklet from interrupt\n");
315 while (test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) {
318 while (test_bit(TASKLET_STATE_SCHED, &t->state));
320 tasklet_unlock_wait(t);
321 clear_bit(TASKLET_STATE_SCHED, &t->state);
324 EXPORT_SYMBOL(tasklet_kill);
326 struct tasklet_head saved_tasklet;
328 void dump_clear_tasklet(void)
330 saved_tasklet.list = __get_cpu_var(tasklet_vec).list;
331 __get_cpu_var(tasklet_vec).list = NULL;
334 EXPORT_SYMBOL_GPL(dump_clear_tasklet);
336 void dump_run_tasklet(void)
338 struct tasklet_struct *list;
340 list = __get_cpu_var(tasklet_vec).list;
341 __get_cpu_var(tasklet_vec).list = NULL;
344 struct tasklet_struct *t = list;
347 if (!atomic_read(&t->count) &&
348 (test_and_clear_bit(TASKLET_STATE_SCHED, &t->state)))
351 t->next = __get_cpu_var(tasklet_vec).list;
352 __get_cpu_var(tasklet_vec).list = t;
356 EXPORT_SYMBOL_GPL(dump_run_tasklet);
358 void __init softirq_init(void)
360 open_softirq(TASKLET_SOFTIRQ, tasklet_action, NULL);
361 open_softirq(HI_SOFTIRQ, tasklet_hi_action, NULL);
364 static int ksoftirqd(void * __bind_cpu)
366 set_user_nice(current, 19);
367 current->flags |= PF_NOFREEZE;
369 set_current_state(TASK_INTERRUPTIBLE);
371 while (!kthread_should_stop()) {
372 if (!local_softirq_pending())
375 __set_current_state(TASK_RUNNING);
377 while (local_softirq_pending()) {
378 /* Preempt disable stops cpu going offline.
379 If already offline, we'll be on wrong CPU:
382 if (cpu_is_offline((long)__bind_cpu))
389 set_current_state(TASK_INTERRUPTIBLE);
391 __set_current_state(TASK_RUNNING);
396 /* Wait for kthread_stop */
397 set_current_state(TASK_INTERRUPTIBLE);
398 while (!kthread_should_stop()) {
400 set_current_state(TASK_INTERRUPTIBLE);
402 __set_current_state(TASK_RUNNING);
406 #ifdef CONFIG_HOTPLUG_CPU
408 * tasklet_kill_immediate is called to remove a tasklet which can already be
409 * scheduled for execution on @cpu.
411 * Unlike tasklet_kill, this function removes the tasklet
412 * _immediately_, even if the tasklet is in TASKLET_STATE_SCHED state.
414 * When this function is called, @cpu must be in the CPU_DEAD state.
416 void tasklet_kill_immediate(struct tasklet_struct *t, unsigned int cpu)
418 struct tasklet_struct **i;
420 BUG_ON(cpu_online(cpu));
421 BUG_ON(test_bit(TASKLET_STATE_RUN, &t->state));
423 if (!test_bit(TASKLET_STATE_SCHED, &t->state))
426 /* CPU is dead, so no lock needed. */
427 for (i = &per_cpu(tasklet_vec, cpu).list; *i; i = &(*i)->next) {
436 static void takeover_tasklets(unsigned int cpu)
438 struct tasklet_struct **i;
440 /* CPU is dead, so no lock needed. */
443 /* Find end, append list for that CPU. */
444 for (i = &__get_cpu_var(tasklet_vec).list; *i; i = &(*i)->next);
445 *i = per_cpu(tasklet_vec, cpu).list;
446 per_cpu(tasklet_vec, cpu).list = NULL;
447 raise_softirq_irqoff(TASKLET_SOFTIRQ);
449 for (i = &__get_cpu_var(tasklet_hi_vec).list; *i; i = &(*i)->next);
450 *i = per_cpu(tasklet_hi_vec, cpu).list;
451 per_cpu(tasklet_hi_vec, cpu).list = NULL;
452 raise_softirq_irqoff(HI_SOFTIRQ);
456 #endif /* CONFIG_HOTPLUG_CPU */
458 static int __devinit cpu_callback(struct notifier_block *nfb,
459 unsigned long action,
462 int hotcpu = (unsigned long)hcpu;
463 struct task_struct *p;
467 BUG_ON(per_cpu(tasklet_vec, hotcpu).list);
468 BUG_ON(per_cpu(tasklet_hi_vec, hotcpu).list);
469 p = kthread_create(ksoftirqd, hcpu, "ksoftirqd/%d", hotcpu);
471 printk("ksoftirqd for %i failed\n", hotcpu);
474 kthread_bind(p, hotcpu);
475 per_cpu(ksoftirqd, hotcpu) = p;
478 wake_up_process(per_cpu(ksoftirqd, hotcpu));
480 #ifdef CONFIG_HOTPLUG_CPU
481 case CPU_UP_CANCELED:
482 /* Unbind so it can run. Fall thru. */
483 kthread_bind(per_cpu(ksoftirqd, hotcpu), smp_processor_id());
485 p = per_cpu(ksoftirqd, hotcpu);
486 per_cpu(ksoftirqd, hotcpu) = NULL;
488 takeover_tasklets(hotcpu);
490 #endif /* CONFIG_HOTPLUG_CPU */
495 static struct notifier_block __devinitdata cpu_nfb = {
496 .notifier_call = cpu_callback
499 __init int spawn_ksoftirqd(void)
501 void *cpu = (void *)(long)smp_processor_id();
502 cpu_callback(&cpu_nfb, CPU_UP_PREPARE, cpu);
503 cpu_callback(&cpu_nfb, CPU_ONLINE, cpu);
504 register_cpu_notifier(&cpu_nfb);