2 * linux/kernel/softirq.c
4 * Copyright (C) 1992 Linus Torvalds
6 * Rewritten. Old one was good in 2.2, but in 2.3 it was immoral. --ANK (990903)
9 #include <linux/module.h>
10 #include <linux/kernel_stat.h>
11 #include <linux/interrupt.h>
12 #include <linux/init.h>
14 #include <linux/notifier.h>
15 #include <linux/percpu.h>
16 #include <linux/cpu.h>
17 #include <linux/kthread.h>
18 #include <linux/rcupdate.h>
22 - No shared variables, all the data are CPU local.
23 - If a softirq needs serialization, let it serialize itself
25 - Even if softirq is serialized, only local cpu is marked for
26 execution. Hence, we get something sort of weak cpu binding.
27 Though it is still not clear, will it result in better locality
31 - NET RX softirq. It is multithreaded and does not require
32 any global serialization.
33 - NET TX softirq. It kicks software netdevice queues, hence
34 it is logically serialized per device, but this serialization
35 is invisible to common code.
36 - Tasklets: serialized wrt itself.
39 #ifndef __ARCH_IRQ_STAT
40 irq_cpustat_t irq_stat[NR_CPUS] ____cacheline_aligned;
41 EXPORT_SYMBOL(irq_stat);
44 static struct softirq_action softirq_vec[32] __cacheline_aligned_in_smp;
46 static DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
49 * we cannot loop indefinitely here to avoid userspace starvation,
50 * but we also don't want to introduce a worst case 1/HZ latency
51 * to the pending events, so lets the scheduler to balance
52 * the softirq load for us.
54 static inline void wakeup_softirqd(void)
56 /* Interrupts are disabled: no need to stop preemption */
57 struct task_struct *tsk = __get_cpu_var(ksoftirqd);
59 if (tsk && tsk->state != TASK_RUNNING)
64 * We restart softirq processing MAX_SOFTIRQ_RESTART times,
65 * and we fall back to softirqd after that.
67 * This number has been established via experimentation.
68 * The two things to balance is latency against fairness -
69 * we want to handle softirqs as soon as possible, but they
70 * should not be able to lock up the box.
72 #define MAX_SOFTIRQ_RESTART 10
74 asmlinkage void __do_softirq(void)
76 struct softirq_action *h;
78 int max_restart = MAX_SOFTIRQ_RESTART;
81 pending = local_softirq_pending();
84 cpu = smp_processor_id();
86 /* Reset the pending bitmask before enabling irqs */
87 local_softirq_pending() = 0;
96 rcu_bh_qsctr_inc(cpu);
104 pending = local_softirq_pending();
105 if (pending && --max_restart)
114 #ifndef __ARCH_HAS_DO_SOFTIRQ
116 asmlinkage void do_softirq(void)
124 local_irq_save(flags);
126 pending = local_softirq_pending();
131 local_irq_restore(flags);
134 EXPORT_SYMBOL(do_softirq);
138 void local_bh_enable(void)
141 WARN_ON(irqs_disabled());
142 if (unlikely(!in_interrupt() &&
143 local_softirq_pending()))
145 preempt_check_resched();
147 EXPORT_SYMBOL(local_bh_enable);
150 * This function must run with irqs disabled!
152 inline fastcall void raise_softirq_irqoff(unsigned int nr)
154 __raise_softirq_irqoff(nr);
157 * If we're in an interrupt or softirq, we're done
158 * (this also catches softirq-disabled code). We will
159 * actually run the softirq once we return from
160 * the irq or softirq.
162 * Otherwise we wake up ksoftirqd to make sure we
163 * schedule the softirq soon.
169 EXPORT_SYMBOL(raise_softirq_irqoff);
171 void fastcall raise_softirq(unsigned int nr)
175 local_irq_save(flags);
176 raise_softirq_irqoff(nr);
177 local_irq_restore(flags);
180 void open_softirq(int nr, void (*action)(struct softirq_action*), void *data)
182 softirq_vec[nr].data = data;
183 softirq_vec[nr].action = action;
186 EXPORT_SYMBOL(open_softirq);
191 struct tasklet_struct *list;
194 /* Some compilers disobey section attribute on statics when not
196 static DEFINE_PER_CPU(struct tasklet_head, tasklet_vec) = { NULL };
197 static DEFINE_PER_CPU(struct tasklet_head, tasklet_hi_vec) = { NULL };
199 void fastcall __tasklet_schedule(struct tasklet_struct *t)
203 local_irq_save(flags);
204 t->next = __get_cpu_var(tasklet_vec).list;
205 __get_cpu_var(tasklet_vec).list = t;
206 raise_softirq_irqoff(TASKLET_SOFTIRQ);
207 local_irq_restore(flags);
210 EXPORT_SYMBOL(__tasklet_schedule);
212 void fastcall __tasklet_hi_schedule(struct tasklet_struct *t)
216 local_irq_save(flags);
217 t->next = __get_cpu_var(tasklet_hi_vec).list;
218 __get_cpu_var(tasklet_hi_vec).list = t;
219 raise_softirq_irqoff(HI_SOFTIRQ);
220 local_irq_restore(flags);
223 EXPORT_SYMBOL(__tasklet_hi_schedule);
225 static void tasklet_action(struct softirq_action *a)
227 struct tasklet_struct *list;
230 list = __get_cpu_var(tasklet_vec).list;
231 __get_cpu_var(tasklet_vec).list = NULL;
235 struct tasklet_struct *t = list;
239 if (tasklet_trylock(t)) {
240 if (!atomic_read(&t->count)) {
241 if (!test_and_clear_bit(TASKLET_STATE_SCHED, &t->state))
251 t->next = __get_cpu_var(tasklet_vec).list;
252 __get_cpu_var(tasklet_vec).list = t;
253 __raise_softirq_irqoff(TASKLET_SOFTIRQ);
258 static void tasklet_hi_action(struct softirq_action *a)
260 struct tasklet_struct *list;
263 list = __get_cpu_var(tasklet_hi_vec).list;
264 __get_cpu_var(tasklet_hi_vec).list = NULL;
268 struct tasklet_struct *t = list;
272 if (tasklet_trylock(t)) {
273 if (!atomic_read(&t->count)) {
274 if (!test_and_clear_bit(TASKLET_STATE_SCHED, &t->state))
284 t->next = __get_cpu_var(tasklet_hi_vec).list;
285 __get_cpu_var(tasklet_hi_vec).list = t;
286 __raise_softirq_irqoff(HI_SOFTIRQ);
292 void tasklet_init(struct tasklet_struct *t,
293 void (*func)(unsigned long), unsigned long data)
297 atomic_set(&t->count, 0);
302 EXPORT_SYMBOL(tasklet_init);
304 void tasklet_kill(struct tasklet_struct *t)
307 printk("Attempt to kill tasklet from interrupt\n");
309 while (test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) {
312 while (test_bit(TASKLET_STATE_SCHED, &t->state));
314 tasklet_unlock_wait(t);
315 clear_bit(TASKLET_STATE_SCHED, &t->state);
318 EXPORT_SYMBOL(tasklet_kill);
320 void __init softirq_init(void)
322 open_softirq(TASKLET_SOFTIRQ, tasklet_action, NULL);
323 open_softirq(HI_SOFTIRQ, tasklet_hi_action, NULL);
326 static int ksoftirqd(void * __bind_cpu)
328 set_user_nice(current, 19);
329 current->flags |= PF_NOFREEZE;
331 set_current_state(TASK_INTERRUPTIBLE);
333 while (!kthread_should_stop()) {
334 if (!local_softirq_pending())
337 __set_current_state(TASK_RUNNING);
339 while (local_softirq_pending()) {
340 /* Preempt disable stops cpu going offline.
341 If already offline, we'll be on wrong CPU:
344 if (cpu_is_offline((long)__bind_cpu))
351 set_current_state(TASK_INTERRUPTIBLE);
353 __set_current_state(TASK_RUNNING);
358 /* Wait for kthread_stop */
359 set_current_state(TASK_INTERRUPTIBLE);
360 while (!kthread_should_stop()) {
362 set_current_state(TASK_INTERRUPTIBLE);
364 __set_current_state(TASK_RUNNING);
368 #ifdef CONFIG_HOTPLUG_CPU
370 * tasklet_kill_immediate is called to remove a tasklet which can already be
371 * scheduled for execution on @cpu.
373 * Unlike tasklet_kill, this function removes the tasklet
374 * _immediately_, even if the tasklet is in TASKLET_STATE_SCHED state.
376 * When this function is called, @cpu must be in the CPU_DEAD state.
378 void tasklet_kill_immediate(struct tasklet_struct *t, unsigned int cpu)
380 struct tasklet_struct **i;
382 BUG_ON(cpu_online(cpu));
383 BUG_ON(test_bit(TASKLET_STATE_RUN, &t->state));
385 if (!test_bit(TASKLET_STATE_SCHED, &t->state))
388 /* CPU is dead, so no lock needed. */
389 for (i = &per_cpu(tasklet_vec, cpu).list; *i; i = &(*i)->next) {
398 static void takeover_tasklets(unsigned int cpu)
400 struct tasklet_struct **i;
402 /* CPU is dead, so no lock needed. */
405 /* Find end, append list for that CPU. */
406 for (i = &__get_cpu_var(tasklet_vec).list; *i; i = &(*i)->next);
407 *i = per_cpu(tasklet_vec, cpu).list;
408 per_cpu(tasklet_vec, cpu).list = NULL;
409 raise_softirq_irqoff(TASKLET_SOFTIRQ);
411 for (i = &__get_cpu_var(tasklet_hi_vec).list; *i; i = &(*i)->next);
412 *i = per_cpu(tasklet_hi_vec, cpu).list;
413 per_cpu(tasklet_hi_vec, cpu).list = NULL;
414 raise_softirq_irqoff(HI_SOFTIRQ);
418 #endif /* CONFIG_HOTPLUG_CPU */
420 static int __devinit cpu_callback(struct notifier_block *nfb,
421 unsigned long action,
424 int hotcpu = (unsigned long)hcpu;
425 struct task_struct *p;
429 BUG_ON(per_cpu(tasklet_vec, hotcpu).list);
430 BUG_ON(per_cpu(tasklet_hi_vec, hotcpu).list);
431 p = kthread_create(ksoftirqd, hcpu, "ksoftirqd/%d", hotcpu);
433 printk("ksoftirqd for %i failed\n", hotcpu);
436 kthread_bind(p, hotcpu);
437 per_cpu(ksoftirqd, hotcpu) = p;
440 wake_up_process(per_cpu(ksoftirqd, hotcpu));
442 #ifdef CONFIG_HOTPLUG_CPU
443 case CPU_UP_CANCELED:
444 /* Unbind so it can run. Fall thru. */
445 kthread_bind(per_cpu(ksoftirqd, hotcpu), smp_processor_id());
447 p = per_cpu(ksoftirqd, hotcpu);
448 per_cpu(ksoftirqd, hotcpu) = NULL;
450 takeover_tasklets(hotcpu);
452 #endif /* CONFIG_HOTPLUG_CPU */
457 static struct notifier_block __devinitdata cpu_nfb = {
458 .notifier_call = cpu_callback
461 __init int spawn_ksoftirqd(void)
463 void *cpu = (void *)(long)smp_processor_id();
464 cpu_callback(&cpu_nfb, CPU_UP_PREPARE, cpu);
465 cpu_callback(&cpu_nfb, CPU_ONLINE, cpu);
466 register_cpu_notifier(&cpu_nfb);