#include <linux/cpu.h>
#include <linux/kthread.h>
#include <linux/rcupdate.h>
+#include <linux/smp.h>
+#include <linux/vs_context.h>
#include <asm/irq.h>
/*
wake_up_process(tsk);
}
+/*
+ * This one is for softirq.c-internal use,
+ * where hardirqs are disabled legitimately:
+ */
+#ifdef CONFIG_TRACE_IRQFLAGS
+static void __local_bh_disable(unsigned long ip)
+{
+ unsigned long flags;
+
+ WARN_ON_ONCE(in_irq());
+
+ raw_local_irq_save(flags);
+ add_preempt_count(SOFTIRQ_OFFSET);
+ /*
+ * Were softirqs turned off above:
+ */
+ if (softirq_count() == SOFTIRQ_OFFSET)
+ trace_softirqs_off(ip);
+ raw_local_irq_restore(flags);
+}
+#else /* !CONFIG_TRACE_IRQFLAGS */
+static inline void __local_bh_disable(unsigned long ip)
+{
+ add_preempt_count(SOFTIRQ_OFFSET);
+ barrier();
+}
+#endif /* CONFIG_TRACE_IRQFLAGS */
+
+void local_bh_disable(void)
+{
+ __local_bh_disable((unsigned long)__builtin_return_address(0));
+}
+
+EXPORT_SYMBOL(local_bh_disable);
+
+void __local_bh_enable(void)
+{
+ WARN_ON_ONCE(in_irq());
+
+ /*
+ * softirqs should never be enabled by __local_bh_enable(),
+ * it always nests inside local_bh_enable() sections:
+ */
+ WARN_ON_ONCE(softirq_count() == SOFTIRQ_OFFSET);
+
+ sub_preempt_count(SOFTIRQ_OFFSET);
+}
+EXPORT_SYMBOL_GPL(__local_bh_enable);
+
+/*
+ * Special-case - softirqs can safely be enabled in
+ * cond_resched_softirq(), or by __do_softirq(),
+ * without processing still-pending softirqs:
+ */
+void _local_bh_enable(void)
+{
+ WARN_ON_ONCE(in_irq());
+ WARN_ON_ONCE(!irqs_disabled());
+
+ if (softirq_count() == SOFTIRQ_OFFSET)
+ trace_softirqs_on((unsigned long)__builtin_return_address(0));
+ sub_preempt_count(SOFTIRQ_OFFSET);
+}
+
+EXPORT_SYMBOL(_local_bh_enable);
+
+void local_bh_enable(void)
+{
+#ifdef CONFIG_TRACE_IRQFLAGS
+ unsigned long flags;
+
+ WARN_ON_ONCE(in_irq());
+#endif
+ WARN_ON_ONCE(irqs_disabled());
+
+#ifdef CONFIG_TRACE_IRQFLAGS
+ local_irq_save(flags);
+#endif
+ /*
+ * Are softirqs going to be turned on now:
+ */
+ if (softirq_count() == SOFTIRQ_OFFSET)
+ trace_softirqs_on((unsigned long)__builtin_return_address(0));
+ /*
+ * Keep preemption disabled until we are done with
+ * softirq processing:
+ */
+ sub_preempt_count(SOFTIRQ_OFFSET - 1);
+
+ if (unlikely(!in_interrupt() && local_softirq_pending()))
+ do_softirq();
+
+ dec_preempt_count();
+#ifdef CONFIG_TRACE_IRQFLAGS
+ local_irq_restore(flags);
+#endif
+ preempt_check_resched();
+}
+EXPORT_SYMBOL(local_bh_enable);
+
+void local_bh_enable_ip(unsigned long ip)
+{
+#ifdef CONFIG_TRACE_IRQFLAGS
+ unsigned long flags;
+
+ WARN_ON_ONCE(in_irq());
+
+ local_irq_save(flags);
+#endif
+ /*
+ * Are softirqs going to be turned on now:
+ */
+ if (softirq_count() == SOFTIRQ_OFFSET)
+ trace_softirqs_on(ip);
+ /*
+ * Keep preemption disabled until we are done with
+ * softirq processing:
+ */
+ sub_preempt_count(SOFTIRQ_OFFSET - 1);
+
+ if (unlikely(!in_interrupt() && local_softirq_pending()))
+ do_softirq();
+
+ dec_preempt_count();
+#ifdef CONFIG_TRACE_IRQFLAGS
+ local_irq_restore(flags);
+#endif
+ preempt_check_resched();
+}
+EXPORT_SYMBOL(local_bh_enable_ip);
+
/*
* We restart softirq processing MAX_SOFTIRQ_RESTART times,
* and we fall back to softirqd after that.
asmlinkage void __do_softirq(void)
{
+ struct vx_info_save vxis;
struct softirq_action *h;
__u32 pending;
int max_restart = MAX_SOFTIRQ_RESTART;
int cpu;
pending = local_softirq_pending();
+ account_system_vtime(current);
+
+ __local_bh_disable((unsigned long)__builtin_return_address(0));
+ __enter_vx_admin(&vxis);
+ trace_softirq_enter();
- local_bh_disable();
cpu = smp_processor_id();
restart:
/* Reset the pending bitmask before enabling irqs */
- local_softirq_pending() = 0;
+ set_softirq_pending(0);
local_irq_enable();
if (pending)
wakeup_softirqd();
- __local_bh_enable();
+ trace_softirq_exit();
+
+ __leave_vx_admin(&vxis);
+ account_system_vtime(current);
+ _local_bh_enable();
}
#ifndef __ARCH_HAS_DO_SOFTIRQ
#endif
-void local_bh_enable(void)
+#ifdef __ARCH_IRQ_EXIT_IRQS_DISABLED
+# define invoke_softirq() __do_softirq()
+#else
+# define invoke_softirq() do_softirq()
+#endif
+
+/*
+ * Exit an interrupt context. Process softirqs if needed and possible:
+ */
+void irq_exit(void)
{
- __local_bh_enable();
- WARN_ON(irqs_disabled());
- if (unlikely(!in_interrupt() &&
- local_softirq_pending()))
+ account_system_vtime(current);
+ trace_hardirq_exit();
+ sub_preempt_count(IRQ_EXIT_OFFSET);
+ if (!in_interrupt() && local_softirq_pending())
invoke_softirq();
- preempt_check_resched();
+ preempt_enable_no_resched();
}
-EXPORT_SYMBOL(local_bh_enable);
/*
* This function must run with irqs disabled!
local_irq_restore(flags);
}
-EXPORT_SYMBOL(raise_softirq);
-
void open_softirq(int nr, void (*action)(struct softirq_action*), void *data)
{
softirq_vec[nr].data = data;
softirq_vec[nr].action = action;
}
-EXPORT_SYMBOL(open_softirq);
-
/* Tasklets */
struct tasklet_head
{
set_current_state(TASK_INTERRUPTIBLE);
while (!kthread_should_stop()) {
- if (!local_softirq_pending())
+ preempt_disable();
+ if (!local_softirq_pending()) {
+ preempt_enable_no_resched();
schedule();
+ preempt_disable();
+ }
__set_current_state(TASK_RUNNING);
/* Preempt disable stops cpu going offline.
If already offline, we'll be on wrong CPU:
don't process */
- preempt_disable();
if (cpu_is_offline((long)__bind_cpu))
goto wait_to_die;
do_softirq();
- preempt_enable();
+ preempt_enable_no_resched();
cond_resched();
+ preempt_disable();
}
-
+ preempt_enable();
set_current_state(TASK_INTERRUPTIBLE);
}
__set_current_state(TASK_RUNNING);
}
#endif /* CONFIG_HOTPLUG_CPU */
-static int __devinit cpu_callback(struct notifier_block *nfb,
+static int __cpuinit cpu_callback(struct notifier_block *nfb,
unsigned long action,
void *hcpu)
{
break;
#ifdef CONFIG_HOTPLUG_CPU
case CPU_UP_CANCELED:
+ if (!per_cpu(ksoftirqd, hotcpu))
+ break;
/* Unbind so it can run. Fall thru. */
- kthread_bind(per_cpu(ksoftirqd, hotcpu), smp_processor_id());
+ kthread_bind(per_cpu(ksoftirqd, hotcpu),
+ any_online_cpu(cpu_online_map));
case CPU_DEAD:
p = per_cpu(ksoftirqd, hotcpu);
per_cpu(ksoftirqd, hotcpu) = NULL;
return NOTIFY_OK;
}
-static struct notifier_block __devinitdata cpu_nfb = {
+static struct notifier_block __cpuinitdata cpu_nfb = {
.notifier_call = cpu_callback
};
register_cpu_notifier(&cpu_nfb);
return 0;
}
+
+#ifdef CONFIG_SMP
+/*
+ * Call a function on all processors
+ */
+int on_each_cpu(void (*func) (void *info), void *info, int retry, int wait)
+{
+ int ret = 0;
+
+ preempt_disable();
+ ret = smp_call_function(func, info, retry, wait);
+ local_irq_disable();
+ func(info);
+ local_irq_enable();
+ preempt_enable();
+ return ret;
+}
+EXPORT_SYMBOL(on_each_cpu);
+#endif