2 * linux/kernel/softirq.c
4 * Copyright (C) 1992 Linus Torvalds
6 * Rewritten. Old one was good in 2.2, but in 2.3 it was immoral. --ANK (990903)
9 #include <linux/module.h>
10 #include <linux/kernel_stat.h>
11 #include <linux/interrupt.h>
12 #include <linux/init.h>
14 #include <linux/notifier.h>
15 #include <linux/percpu.h>
16 #include <linux/cpu.h>
17 #include <linux/kthread.h>
18 #include <linux/rcupdate.h>
19 #include <linux/smp.h>
20 #include <linux/vs_context.h>
24 - No shared variables, all the data are CPU local.
25 - If a softirq needs serialization, let it serialize itself
27 - Even if softirq is serialized, only local cpu is marked for
28 execution. Hence, we get something sort of weak cpu binding.
29 Though it is still not clear, will it result in better locality
33 - NET RX softirq. It is multithreaded and does not require
34 any global serialization.
35 - NET TX softirq. It kicks software netdevice queues, hence
36 it is logically serialized per device, but this serialization
37 is invisible to common code.
38 - Tasklets: serialized wrt itself.
41 #ifndef __ARCH_IRQ_STAT
42 irq_cpustat_t irq_stat[NR_CPUS] ____cacheline_aligned;
43 EXPORT_SYMBOL(irq_stat);
46 static struct softirq_action softirq_vec[32] __cacheline_aligned_in_smp;
48 static DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
51 * we cannot loop indefinitely here to avoid userspace starvation,
52 * but we also don't want to introduce a worst case 1/HZ latency
53 * to the pending events, so lets the scheduler to balance
54 * the softirq load for us.
56 static inline void wakeup_softirqd(void)
58 /* Interrupts are disabled: no need to stop preemption */
59 struct task_struct *tsk = __get_cpu_var(ksoftirqd);
61 if (tsk && tsk->state != TASK_RUNNING)
66 * This one is for softirq.c-internal use,
67 * where hardirqs are disabled legitimately:
69 #ifdef CONFIG_TRACE_IRQFLAGS
70 static void __local_bh_disable(unsigned long ip)
74 WARN_ON_ONCE(in_irq());
76 raw_local_irq_save(flags);
77 add_preempt_count(SOFTIRQ_OFFSET);
79 * Were softirqs turned off above:
81 if (softirq_count() == SOFTIRQ_OFFSET)
82 trace_softirqs_off(ip);
83 raw_local_irq_restore(flags);
85 #else /* !CONFIG_TRACE_IRQFLAGS */
86 static inline void __local_bh_disable(unsigned long ip)
88 add_preempt_count(SOFTIRQ_OFFSET);
91 #endif /* CONFIG_TRACE_IRQFLAGS */
93 void local_bh_disable(void)
95 __local_bh_disable((unsigned long)__builtin_return_address(0));
98 EXPORT_SYMBOL(local_bh_disable);
100 void __local_bh_enable(void)
102 WARN_ON_ONCE(in_irq());
105 * softirqs should never be enabled by __local_bh_enable(),
106 * it always nests inside local_bh_enable() sections:
108 WARN_ON_ONCE(softirq_count() == SOFTIRQ_OFFSET);
110 sub_preempt_count(SOFTIRQ_OFFSET);
112 EXPORT_SYMBOL_GPL(__local_bh_enable);
115 * Special-case - softirqs can safely be enabled in
116 * cond_resched_softirq(), or by __do_softirq(),
117 * without processing still-pending softirqs:
119 void _local_bh_enable(void)
121 WARN_ON_ONCE(in_irq());
122 WARN_ON_ONCE(!irqs_disabled());
124 if (softirq_count() == SOFTIRQ_OFFSET)
125 trace_softirqs_on((unsigned long)__builtin_return_address(0));
126 sub_preempt_count(SOFTIRQ_OFFSET);
129 EXPORT_SYMBOL(_local_bh_enable);
131 void local_bh_enable(void)
133 #ifdef CONFIG_TRACE_IRQFLAGS
136 WARN_ON_ONCE(in_irq());
138 WARN_ON_ONCE(irqs_disabled());
140 #ifdef CONFIG_TRACE_IRQFLAGS
141 local_irq_save(flags);
144 * Are softirqs going to be turned on now:
146 if (softirq_count() == SOFTIRQ_OFFSET)
147 trace_softirqs_on((unsigned long)__builtin_return_address(0));
149 * Keep preemption disabled until we are done with
150 * softirq processing:
152 sub_preempt_count(SOFTIRQ_OFFSET - 1);
154 if (unlikely(!in_interrupt() && local_softirq_pending()))
158 #ifdef CONFIG_TRACE_IRQFLAGS
159 local_irq_restore(flags);
161 preempt_check_resched();
163 EXPORT_SYMBOL(local_bh_enable);
165 void local_bh_enable_ip(unsigned long ip)
167 #ifdef CONFIG_TRACE_IRQFLAGS
170 WARN_ON_ONCE(in_irq());
172 local_irq_save(flags);
175 * Are softirqs going to be turned on now:
177 if (softirq_count() == SOFTIRQ_OFFSET)
178 trace_softirqs_on(ip);
180 * Keep preemption disabled until we are done with
181 * softirq processing:
183 sub_preempt_count(SOFTIRQ_OFFSET - 1);
185 if (unlikely(!in_interrupt() && local_softirq_pending()))
189 #ifdef CONFIG_TRACE_IRQFLAGS
190 local_irq_restore(flags);
192 preempt_check_resched();
194 EXPORT_SYMBOL(local_bh_enable_ip);
197 * We restart softirq processing MAX_SOFTIRQ_RESTART times,
198 * and we fall back to softirqd after that.
200 * This number has been established via experimentation.
201 * The two things to balance is latency against fairness -
202 * we want to handle softirqs as soon as possible, but they
203 * should not be able to lock up the box.
205 #define MAX_SOFTIRQ_RESTART 10
207 asmlinkage void __do_softirq(void)
209 struct vx_info_save vxis;
210 struct softirq_action *h;
212 int max_restart = MAX_SOFTIRQ_RESTART;
215 pending = local_softirq_pending();
216 account_system_vtime(current);
218 __local_bh_disable((unsigned long)__builtin_return_address(0));
219 __enter_vx_admin(&vxis);
220 trace_softirq_enter();
222 cpu = smp_processor_id();
224 /* Reset the pending bitmask before enabling irqs */
225 set_softirq_pending(0);
234 rcu_bh_qsctr_inc(cpu);
242 pending = local_softirq_pending();
243 if (pending && --max_restart)
249 trace_softirq_exit();
251 __leave_vx_admin(&vxis);
252 account_system_vtime(current);
256 #ifndef __ARCH_HAS_DO_SOFTIRQ
258 asmlinkage void do_softirq(void)
266 local_irq_save(flags);
268 pending = local_softirq_pending();
273 local_irq_restore(flags);
276 EXPORT_SYMBOL(do_softirq);
280 #ifdef __ARCH_IRQ_EXIT_IRQS_DISABLED
281 # define invoke_softirq() __do_softirq()
283 # define invoke_softirq() do_softirq()
287 * Exit an interrupt context. Process softirqs if needed and possible:
291 account_system_vtime(current);
292 trace_hardirq_exit();
293 sub_preempt_count(IRQ_EXIT_OFFSET);
294 if (!in_interrupt() && local_softirq_pending())
296 preempt_enable_no_resched();
300 * This function must run with irqs disabled!
302 inline fastcall void raise_softirq_irqoff(unsigned int nr)
304 __raise_softirq_irqoff(nr);
307 * If we're in an interrupt or softirq, we're done
308 * (this also catches softirq-disabled code). We will
309 * actually run the softirq once we return from
310 * the irq or softirq.
312 * Otherwise we wake up ksoftirqd to make sure we
313 * schedule the softirq soon.
319 EXPORT_SYMBOL(raise_softirq_irqoff);
321 void fastcall raise_softirq(unsigned int nr)
325 local_irq_save(flags);
326 raise_softirq_irqoff(nr);
327 local_irq_restore(flags);
330 void open_softirq(int nr, void (*action)(struct softirq_action*), void *data)
332 softirq_vec[nr].data = data;
333 softirq_vec[nr].action = action;
339 struct tasklet_struct *list;
342 /* Some compilers disobey section attribute on statics when not
344 static DEFINE_PER_CPU(struct tasklet_head, tasklet_vec) = { NULL };
345 static DEFINE_PER_CPU(struct tasklet_head, tasklet_hi_vec) = { NULL };
347 void fastcall __tasklet_schedule(struct tasklet_struct *t)
351 local_irq_save(flags);
352 t->next = __get_cpu_var(tasklet_vec).list;
353 __get_cpu_var(tasklet_vec).list = t;
354 raise_softirq_irqoff(TASKLET_SOFTIRQ);
355 local_irq_restore(flags);
358 EXPORT_SYMBOL(__tasklet_schedule);
360 void fastcall __tasklet_hi_schedule(struct tasklet_struct *t)
364 local_irq_save(flags);
365 t->next = __get_cpu_var(tasklet_hi_vec).list;
366 __get_cpu_var(tasklet_hi_vec).list = t;
367 raise_softirq_irqoff(HI_SOFTIRQ);
368 local_irq_restore(flags);
371 EXPORT_SYMBOL(__tasklet_hi_schedule);
373 static void tasklet_action(struct softirq_action *a)
375 struct tasklet_struct *list;
378 list = __get_cpu_var(tasklet_vec).list;
379 __get_cpu_var(tasklet_vec).list = NULL;
383 struct tasklet_struct *t = list;
387 if (tasklet_trylock(t)) {
388 if (!atomic_read(&t->count)) {
389 if (!test_and_clear_bit(TASKLET_STATE_SCHED, &t->state))
399 t->next = __get_cpu_var(tasklet_vec).list;
400 __get_cpu_var(tasklet_vec).list = t;
401 __raise_softirq_irqoff(TASKLET_SOFTIRQ);
406 static void tasklet_hi_action(struct softirq_action *a)
408 struct tasklet_struct *list;
411 list = __get_cpu_var(tasklet_hi_vec).list;
412 __get_cpu_var(tasklet_hi_vec).list = NULL;
416 struct tasklet_struct *t = list;
420 if (tasklet_trylock(t)) {
421 if (!atomic_read(&t->count)) {
422 if (!test_and_clear_bit(TASKLET_STATE_SCHED, &t->state))
432 t->next = __get_cpu_var(tasklet_hi_vec).list;
433 __get_cpu_var(tasklet_hi_vec).list = t;
434 __raise_softirq_irqoff(HI_SOFTIRQ);
440 void tasklet_init(struct tasklet_struct *t,
441 void (*func)(unsigned long), unsigned long data)
445 atomic_set(&t->count, 0);
450 EXPORT_SYMBOL(tasklet_init);
452 void tasklet_kill(struct tasklet_struct *t)
455 printk("Attempt to kill tasklet from interrupt\n");
457 while (test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) {
460 while (test_bit(TASKLET_STATE_SCHED, &t->state));
462 tasklet_unlock_wait(t);
463 clear_bit(TASKLET_STATE_SCHED, &t->state);
466 EXPORT_SYMBOL(tasklet_kill);
468 void __init softirq_init(void)
470 open_softirq(TASKLET_SOFTIRQ, tasklet_action, NULL);
471 open_softirq(HI_SOFTIRQ, tasklet_hi_action, NULL);
474 static int ksoftirqd(void * __bind_cpu)
476 set_user_nice(current, 19);
477 current->flags |= PF_NOFREEZE;
479 set_current_state(TASK_INTERRUPTIBLE);
481 while (!kthread_should_stop()) {
483 if (!local_softirq_pending()) {
484 preempt_enable_no_resched();
489 __set_current_state(TASK_RUNNING);
491 while (local_softirq_pending()) {
492 /* Preempt disable stops cpu going offline.
493 If already offline, we'll be on wrong CPU:
495 if (cpu_is_offline((long)__bind_cpu))
498 preempt_enable_no_resched();
503 set_current_state(TASK_INTERRUPTIBLE);
505 __set_current_state(TASK_RUNNING);
510 /* Wait for kthread_stop */
511 set_current_state(TASK_INTERRUPTIBLE);
512 while (!kthread_should_stop()) {
514 set_current_state(TASK_INTERRUPTIBLE);
516 __set_current_state(TASK_RUNNING);
520 #ifdef CONFIG_HOTPLUG_CPU
522 * tasklet_kill_immediate is called to remove a tasklet which can already be
523 * scheduled for execution on @cpu.
525 * Unlike tasklet_kill, this function removes the tasklet
526 * _immediately_, even if the tasklet is in TASKLET_STATE_SCHED state.
528 * When this function is called, @cpu must be in the CPU_DEAD state.
530 void tasklet_kill_immediate(struct tasklet_struct *t, unsigned int cpu)
532 struct tasklet_struct **i;
534 BUG_ON(cpu_online(cpu));
535 BUG_ON(test_bit(TASKLET_STATE_RUN, &t->state));
537 if (!test_bit(TASKLET_STATE_SCHED, &t->state))
540 /* CPU is dead, so no lock needed. */
541 for (i = &per_cpu(tasklet_vec, cpu).list; *i; i = &(*i)->next) {
550 static void takeover_tasklets(unsigned int cpu)
552 struct tasklet_struct **i;
554 /* CPU is dead, so no lock needed. */
557 /* Find end, append list for that CPU. */
558 for (i = &__get_cpu_var(tasklet_vec).list; *i; i = &(*i)->next);
559 *i = per_cpu(tasklet_vec, cpu).list;
560 per_cpu(tasklet_vec, cpu).list = NULL;
561 raise_softirq_irqoff(TASKLET_SOFTIRQ);
563 for (i = &__get_cpu_var(tasklet_hi_vec).list; *i; i = &(*i)->next);
564 *i = per_cpu(tasklet_hi_vec, cpu).list;
565 per_cpu(tasklet_hi_vec, cpu).list = NULL;
566 raise_softirq_irqoff(HI_SOFTIRQ);
570 #endif /* CONFIG_HOTPLUG_CPU */
572 static int __cpuinit cpu_callback(struct notifier_block *nfb,
573 unsigned long action,
576 int hotcpu = (unsigned long)hcpu;
577 struct task_struct *p;
581 BUG_ON(per_cpu(tasklet_vec, hotcpu).list);
582 BUG_ON(per_cpu(tasklet_hi_vec, hotcpu).list);
583 p = kthread_create(ksoftirqd, hcpu, "ksoftirqd/%d", hotcpu);
585 printk("ksoftirqd for %i failed\n", hotcpu);
588 kthread_bind(p, hotcpu);
589 per_cpu(ksoftirqd, hotcpu) = p;
592 wake_up_process(per_cpu(ksoftirqd, hotcpu));
594 #ifdef CONFIG_HOTPLUG_CPU
595 case CPU_UP_CANCELED:
596 if (!per_cpu(ksoftirqd, hotcpu))
598 /* Unbind so it can run. Fall thru. */
599 kthread_bind(per_cpu(ksoftirqd, hotcpu),
600 any_online_cpu(cpu_online_map));
602 p = per_cpu(ksoftirqd, hotcpu);
603 per_cpu(ksoftirqd, hotcpu) = NULL;
605 takeover_tasklets(hotcpu);
607 #endif /* CONFIG_HOTPLUG_CPU */
612 static struct notifier_block __cpuinitdata cpu_nfb = {
613 .notifier_call = cpu_callback
616 __init int spawn_ksoftirqd(void)
618 void *cpu = (void *)(long)smp_processor_id();
619 cpu_callback(&cpu_nfb, CPU_UP_PREPARE, cpu);
620 cpu_callback(&cpu_nfb, CPU_ONLINE, cpu);
621 register_cpu_notifier(&cpu_nfb);
627 * Call a function on all processors
629 int on_each_cpu(void (*func) (void *info), void *info, int retry, int wait)
634 ret = smp_call_function(func, info, retry, wait);
641 EXPORT_SYMBOL(on_each_cpu);