linux 2.6.16.38 w/ vs2.0.3-rc1
[linux-2.6.git] / kernel / softirq.c
index 4a3da9b..6abbc72 100644 (file)
@@ -84,7 +84,7 @@ asmlinkage void __do_softirq(void)
        cpu = smp_processor_id();
 restart:
        /* Reset the pending bitmask before enabling irqs */
-       local_softirq_pending() = 0;
+       set_softirq_pending(0);
 
        local_irq_enable();
 
@@ -137,15 +137,39 @@ EXPORT_SYMBOL(do_softirq);
 
 void local_bh_enable(void)
 {
-       __local_bh_enable();
        WARN_ON(irqs_disabled());
-       if (unlikely(!in_interrupt() &&
-                    local_softirq_pending()))
-               invoke_softirq();
+       /*
+        * Keep preemption disabled until we are done with
+        * softirq processing:
+        */
+       sub_preempt_count(SOFTIRQ_OFFSET - 1);
+
+       if (unlikely(!in_interrupt() && local_softirq_pending()))
+               do_softirq();
+
+       dec_preempt_count();
        preempt_check_resched();
 }
 EXPORT_SYMBOL(local_bh_enable);
 
+#ifdef __ARCH_IRQ_EXIT_IRQS_DISABLED
+# define invoke_softirq()      __do_softirq()
+#else
+# define invoke_softirq()      do_softirq()
+#endif
+
+/*
+ * Exit an interrupt context. Process softirqs if needed and possible:
+ */
+void irq_exit(void)
+{
+       account_system_vtime(current);
+       sub_preempt_count(IRQ_EXIT_OFFSET);
+       if (!in_interrupt() && local_softirq_pending())
+               invoke_softirq();
+       preempt_enable_no_resched();
+}
+
 /*
  * This function must run with irqs disabled!
  */
@@ -177,8 +201,6 @@ void fastcall raise_softirq(unsigned int nr)
        local_irq_restore(flags);
 }
 
-EXPORT_SYMBOL(raise_softirq);
-
 void open_softirq(int nr, void (*action)(struct softirq_action*), void *data)
 {
        softirq_vec[nr].data = data;
@@ -333,8 +355,12 @@ static int ksoftirqd(void * __bind_cpu)
        set_current_state(TASK_INTERRUPTIBLE);
 
        while (!kthread_should_stop()) {
-               if (!local_softirq_pending())
+               preempt_disable();
+               if (!local_softirq_pending()) {
+                       preempt_enable_no_resched();
                        schedule();
+                       preempt_disable();
+               }
 
                __set_current_state(TASK_RUNNING);
 
@@ -342,14 +368,14 @@ static int ksoftirqd(void * __bind_cpu)
                        /* Preempt disable stops cpu going offline.
                           If already offline, we'll be on wrong CPU:
                           don't process */
-                       preempt_disable();
                        if (cpu_is_offline((long)__bind_cpu))
                                goto wait_to_die;
                        do_softirq();
-                       preempt_enable();
+                       preempt_enable_no_resched();
                        cond_resched();
+                       preempt_disable();
                }
-
+               preempt_enable();
                set_current_state(TASK_INTERRUPTIBLE);
        }
        __set_current_state(TASK_RUNNING);
@@ -428,8 +454,6 @@ static int __devinit cpu_callback(struct notifier_block *nfb,
 
        switch (action) {
        case CPU_UP_PREPARE:
-               BUG_ON(per_cpu(tasklet_vec, hotcpu).list);
-               BUG_ON(per_cpu(tasklet_hi_vec, hotcpu).list);
                p = kthread_create(ksoftirqd, hcpu, "ksoftirqd/%d", hotcpu);
                if (IS_ERR(p)) {
                        printk("ksoftirqd for %i failed\n", hotcpu);
@@ -444,7 +468,8 @@ static int __devinit cpu_callback(struct notifier_block *nfb,
 #ifdef CONFIG_HOTPLUG_CPU
        case CPU_UP_CANCELED:
                /* Unbind so it can run.  Fall thru. */
-               kthread_bind(per_cpu(ksoftirqd, hotcpu), smp_processor_id());
+               kthread_bind(per_cpu(ksoftirqd, hotcpu),
+                            any_online_cpu(cpu_online_map));
        case CPU_DEAD:
                p = per_cpu(ksoftirqd, hotcpu);
                per_cpu(ksoftirqd, hotcpu) = NULL;