2 * linux/kernel/softirq.c
4 * Copyright (C) 1992 Linus Torvalds
6 * Rewritten. Old one was good in 2.2, but in 2.3 it was immoral. --ANK (990903)
9 #include <linux/module.h>
10 #include <linux/kernel_stat.h>
11 #include <linux/interrupt.h>
12 #include <linux/init.h>
14 #include <linux/notifier.h>
15 #include <linux/percpu.h>
16 #include <linux/cpu.h>
17 #include <linux/kthread.h>
18 #include <linux/rcupdate.h>
19 #include <linux/smp.h>
20 #include <linux/vs_context.h>
24 - No shared variables, all the data are CPU local.
25 - If a softirq needs serialization, let it serialize itself
27 - Even if softirq is serialized, only local cpu is marked for
28 execution. Hence, we get something sort of weak cpu binding.
29 Though it is still not clear, will it result in better locality
33 - NET RX softirq. It is multithreaded and does not require
34 any global serialization.
35 - NET TX softirq. It kicks software netdevice queues, hence
36 it is logically serialized per device, but this serialization
37 is invisible to common code.
38 - Tasklets: serialized wrt itself.
41 #ifndef __ARCH_IRQ_STAT
42 irq_cpustat_t irq_stat[NR_CPUS] ____cacheline_aligned;
43 EXPORT_SYMBOL(irq_stat);
46 static struct softirq_action softirq_vec[32] __cacheline_aligned_in_smp;
48 static DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
51 * we cannot loop indefinitely here to avoid userspace starvation,
52 * but we also don't want to introduce a worst case 1/HZ latency
53 * to the pending events, so lets the scheduler to balance
54 * the softirq load for us.
56 static inline void wakeup_softirqd(void)
58 /* Interrupts are disabled: no need to stop preemption */
59 struct task_struct *tsk = __get_cpu_var(ksoftirqd);
61 if (tsk && tsk->state != TASK_RUNNING)
66 * This one is for softirq.c-internal use,
67 * where hardirqs are disabled legitimately:
69 #ifdef CONFIG_TRACE_IRQFLAGS
70 static void __local_bh_disable(unsigned long ip)
74 WARN_ON_ONCE(in_irq());
76 raw_local_irq_save(flags);
77 add_preempt_count(SOFTIRQ_OFFSET);
79 * Were softirqs turned off above:
81 if (softirq_count() == SOFTIRQ_OFFSET)
82 trace_softirqs_off(ip);
83 raw_local_irq_restore(flags);
85 #else /* !CONFIG_TRACE_IRQFLAGS */
86 static inline void __local_bh_disable(unsigned long ip)
88 add_preempt_count(SOFTIRQ_OFFSET);
91 #endif /* CONFIG_TRACE_IRQFLAGS */
93 void local_bh_disable(void)
95 __local_bh_disable((unsigned long)__builtin_return_address(0));
98 EXPORT_SYMBOL(local_bh_disable);
100 void __local_bh_enable(void)
102 WARN_ON_ONCE(in_irq());
105 * softirqs should never be enabled by __local_bh_enable(),
106 * it always nests inside local_bh_enable() sections:
108 WARN_ON_ONCE(softirq_count() == SOFTIRQ_OFFSET);
110 sub_preempt_count(SOFTIRQ_OFFSET);
112 EXPORT_SYMBOL_GPL(__local_bh_enable);
115 * Special-case - softirqs can safely be enabled in
116 * cond_resched_softirq(), or by __do_softirq(),
117 * without processing still-pending softirqs:
119 void _local_bh_enable(void)
121 WARN_ON_ONCE(in_irq());
122 WARN_ON_ONCE(!irqs_disabled());
124 if (softirq_count() == SOFTIRQ_OFFSET)
125 trace_softirqs_on((unsigned long)__builtin_return_address(0));
126 sub_preempt_count(SOFTIRQ_OFFSET);
129 EXPORT_SYMBOL(_local_bh_enable);
131 void local_bh_enable(void)
133 #ifdef CONFIG_TRACE_IRQFLAGS
136 WARN_ON_ONCE(in_irq());
138 WARN_ON_ONCE(irqs_disabled());
140 #ifdef CONFIG_TRACE_IRQFLAGS
141 local_irq_save(flags);
144 * Are softirqs going to be turned on now:
146 if (softirq_count() == SOFTIRQ_OFFSET)
147 trace_softirqs_on((unsigned long)__builtin_return_address(0));
149 * Keep preemption disabled until we are done with
150 * softirq processing:
152 sub_preempt_count(SOFTIRQ_OFFSET - 1);
154 if (unlikely(!in_interrupt() && local_softirq_pending()))
158 #ifdef CONFIG_TRACE_IRQFLAGS
159 local_irq_restore(flags);
161 preempt_check_resched();
163 EXPORT_SYMBOL(local_bh_enable);
165 void local_bh_enable_ip(unsigned long ip)
167 #ifdef CONFIG_TRACE_IRQFLAGS
170 WARN_ON_ONCE(in_irq());
172 local_irq_save(flags);
175 * Are softirqs going to be turned on now:
177 if (softirq_count() == SOFTIRQ_OFFSET)
178 trace_softirqs_on(ip);
180 * Keep preemption disabled until we are done with
181 * softirq processing:
183 sub_preempt_count(SOFTIRQ_OFFSET - 1);
185 if (unlikely(!in_interrupt() && local_softirq_pending()))
189 #ifdef CONFIG_TRACE_IRQFLAGS
190 local_irq_restore(flags);
192 preempt_check_resched();
194 EXPORT_SYMBOL(local_bh_enable_ip);
197 * We restart softirq processing MAX_SOFTIRQ_RESTART times,
198 * and we fall back to softirqd after that.
200 * This number has been established via experimentation.
201 * The two things to balance is latency against fairness -
202 * we want to handle softirqs as soon as possible, but they
203 * should not be able to lock up the box.
205 #define MAX_SOFTIRQ_RESTART 10
207 asmlinkage void __do_softirq(void)
209 struct softirq_action *h;
211 int max_restart = MAX_SOFTIRQ_RESTART;
214 pending = local_softirq_pending();
215 account_system_vtime(current);
217 __local_bh_disable((unsigned long)__builtin_return_address(0));
218 trace_softirq_enter();
220 cpu = smp_processor_id();
222 /* Reset the pending bitmask before enabling irqs */
223 set_softirq_pending(0);
232 rcu_bh_qsctr_inc(cpu);
240 pending = local_softirq_pending();
241 if (pending && --max_restart)
247 trace_softirq_exit();
249 account_system_vtime(current);
253 #ifndef __ARCH_HAS_DO_SOFTIRQ
255 asmlinkage void do_softirq(void)
263 local_irq_save(flags);
265 pending = local_softirq_pending();
270 local_irq_restore(flags);
273 EXPORT_SYMBOL(do_softirq);
277 #ifdef __ARCH_IRQ_EXIT_IRQS_DISABLED
278 # define invoke_softirq() __do_softirq()
280 # define invoke_softirq() do_softirq()
284 * Exit an interrupt context. Process softirqs if needed and possible:
288 account_system_vtime(current);
289 trace_hardirq_exit();
290 sub_preempt_count(IRQ_EXIT_OFFSET);
291 if (!in_interrupt() && local_softirq_pending())
293 preempt_enable_no_resched();
297 * This function must run with irqs disabled!
299 inline fastcall void raise_softirq_irqoff(unsigned int nr)
301 __raise_softirq_irqoff(nr);
304 * If we're in an interrupt or softirq, we're done
305 * (this also catches softirq-disabled code). We will
306 * actually run the softirq once we return from
307 * the irq or softirq.
309 * Otherwise we wake up ksoftirqd to make sure we
310 * schedule the softirq soon.
316 EXPORT_SYMBOL(raise_softirq_irqoff);
318 void fastcall raise_softirq(unsigned int nr)
322 local_irq_save(flags);
323 raise_softirq_irqoff(nr);
324 local_irq_restore(flags);
327 void open_softirq(int nr, void (*action)(struct softirq_action*), void *data)
329 softirq_vec[nr].data = data;
330 softirq_vec[nr].action = action;
336 struct tasklet_struct *list;
339 /* Some compilers disobey section attribute on statics when not
341 static DEFINE_PER_CPU(struct tasklet_head, tasklet_vec) = { NULL };
342 static DEFINE_PER_CPU(struct tasklet_head, tasklet_hi_vec) = { NULL };
344 void fastcall __tasklet_schedule(struct tasklet_struct *t)
348 local_irq_save(flags);
349 t->next = __get_cpu_var(tasklet_vec).list;
350 __get_cpu_var(tasklet_vec).list = t;
351 raise_softirq_irqoff(TASKLET_SOFTIRQ);
352 local_irq_restore(flags);
355 EXPORT_SYMBOL(__tasklet_schedule);
357 void fastcall __tasklet_hi_schedule(struct tasklet_struct *t)
361 local_irq_save(flags);
362 t->next = __get_cpu_var(tasklet_hi_vec).list;
363 __get_cpu_var(tasklet_hi_vec).list = t;
364 raise_softirq_irqoff(HI_SOFTIRQ);
365 local_irq_restore(flags);
368 EXPORT_SYMBOL(__tasklet_hi_schedule);
370 static void tasklet_action(struct softirq_action *a)
372 struct tasklet_struct *list;
375 list = __get_cpu_var(tasklet_vec).list;
376 __get_cpu_var(tasklet_vec).list = NULL;
380 struct tasklet_struct *t = list;
384 if (tasklet_trylock(t)) {
385 if (!atomic_read(&t->count)) {
386 if (!test_and_clear_bit(TASKLET_STATE_SCHED, &t->state))
396 t->next = __get_cpu_var(tasklet_vec).list;
397 __get_cpu_var(tasklet_vec).list = t;
398 __raise_softirq_irqoff(TASKLET_SOFTIRQ);
403 static void tasklet_hi_action(struct softirq_action *a)
405 struct tasklet_struct *list;
408 list = __get_cpu_var(tasklet_hi_vec).list;
409 __get_cpu_var(tasklet_hi_vec).list = NULL;
413 struct tasklet_struct *t = list;
417 if (tasklet_trylock(t)) {
418 if (!atomic_read(&t->count)) {
419 if (!test_and_clear_bit(TASKLET_STATE_SCHED, &t->state))
429 t->next = __get_cpu_var(tasklet_hi_vec).list;
430 __get_cpu_var(tasklet_hi_vec).list = t;
431 __raise_softirq_irqoff(HI_SOFTIRQ);
437 void tasklet_init(struct tasklet_struct *t,
438 void (*func)(unsigned long), unsigned long data)
442 atomic_set(&t->count, 0);
447 EXPORT_SYMBOL(tasklet_init);
449 void tasklet_kill(struct tasklet_struct *t)
452 printk("Attempt to kill tasklet from interrupt\n");
454 while (test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) {
457 while (test_bit(TASKLET_STATE_SCHED, &t->state));
459 tasklet_unlock_wait(t);
460 clear_bit(TASKLET_STATE_SCHED, &t->state);
463 EXPORT_SYMBOL(tasklet_kill);
465 void __init softirq_init(void)
467 open_softirq(TASKLET_SOFTIRQ, tasklet_action, NULL);
468 open_softirq(HI_SOFTIRQ, tasklet_hi_action, NULL);
471 static int ksoftirqd(void * __bind_cpu)
473 set_user_nice(current, 19);
474 current->flags |= PF_NOFREEZE;
476 set_current_state(TASK_INTERRUPTIBLE);
478 while (!kthread_should_stop()) {
480 if (!local_softirq_pending()) {
481 preempt_enable_no_resched();
486 __set_current_state(TASK_RUNNING);
488 while (local_softirq_pending()) {
489 /* Preempt disable stops cpu going offline.
490 If already offline, we'll be on wrong CPU:
492 if (cpu_is_offline((long)__bind_cpu))
495 preempt_enable_no_resched();
500 set_current_state(TASK_INTERRUPTIBLE);
502 __set_current_state(TASK_RUNNING);
507 /* Wait for kthread_stop */
508 set_current_state(TASK_INTERRUPTIBLE);
509 while (!kthread_should_stop()) {
511 set_current_state(TASK_INTERRUPTIBLE);
513 __set_current_state(TASK_RUNNING);
517 #ifdef CONFIG_HOTPLUG_CPU
519 * tasklet_kill_immediate is called to remove a tasklet which can already be
520 * scheduled for execution on @cpu.
522 * Unlike tasklet_kill, this function removes the tasklet
523 * _immediately_, even if the tasklet is in TASKLET_STATE_SCHED state.
525 * When this function is called, @cpu must be in the CPU_DEAD state.
527 void tasklet_kill_immediate(struct tasklet_struct *t, unsigned int cpu)
529 struct tasklet_struct **i;
531 BUG_ON(cpu_online(cpu));
532 BUG_ON(test_bit(TASKLET_STATE_RUN, &t->state));
534 if (!test_bit(TASKLET_STATE_SCHED, &t->state))
537 /* CPU is dead, so no lock needed. */
538 for (i = &per_cpu(tasklet_vec, cpu).list; *i; i = &(*i)->next) {
547 static void takeover_tasklets(unsigned int cpu)
549 struct tasklet_struct **i;
551 /* CPU is dead, so no lock needed. */
554 /* Find end, append list for that CPU. */
555 for (i = &__get_cpu_var(tasklet_vec).list; *i; i = &(*i)->next);
556 *i = per_cpu(tasklet_vec, cpu).list;
557 per_cpu(tasklet_vec, cpu).list = NULL;
558 raise_softirq_irqoff(TASKLET_SOFTIRQ);
560 for (i = &__get_cpu_var(tasklet_hi_vec).list; *i; i = &(*i)->next);
561 *i = per_cpu(tasklet_hi_vec, cpu).list;
562 per_cpu(tasklet_hi_vec, cpu).list = NULL;
563 raise_softirq_irqoff(HI_SOFTIRQ);
567 #endif /* CONFIG_HOTPLUG_CPU */
569 static int __cpuinit cpu_callback(struct notifier_block *nfb,
570 unsigned long action,
573 int hotcpu = (unsigned long)hcpu;
574 struct task_struct *p;
578 p = kthread_create(ksoftirqd, hcpu, "ksoftirqd/%d", hotcpu);
580 printk("ksoftirqd for %i failed\n", hotcpu);
583 kthread_bind(p, hotcpu);
584 per_cpu(ksoftirqd, hotcpu) = p;
587 wake_up_process(per_cpu(ksoftirqd, hotcpu));
589 #ifdef CONFIG_HOTPLUG_CPU
590 case CPU_UP_CANCELED:
591 if (!per_cpu(ksoftirqd, hotcpu))
593 /* Unbind so it can run. Fall thru. */
594 kthread_bind(per_cpu(ksoftirqd, hotcpu),
595 any_online_cpu(cpu_online_map));
597 p = per_cpu(ksoftirqd, hotcpu);
598 per_cpu(ksoftirqd, hotcpu) = NULL;
600 takeover_tasklets(hotcpu);
602 #endif /* CONFIG_HOTPLUG_CPU */
607 static struct notifier_block __cpuinitdata cpu_nfb = {
608 .notifier_call = cpu_callback
611 __init int spawn_ksoftirqd(void)
613 void *cpu = (void *)(long)smp_processor_id();
614 int err = cpu_callback(&cpu_nfb, CPU_UP_PREPARE, cpu);
616 BUG_ON(err == NOTIFY_BAD);
617 cpu_callback(&cpu_nfb, CPU_ONLINE, cpu);
618 register_cpu_notifier(&cpu_nfb);
624 * Call a function on all processors
626 int on_each_cpu(void (*func) (void *info), void *info, int retry, int wait)
631 ret = smp_call_function(func, info, retry, wait);
638 EXPORT_SYMBOL(on_each_cpu);