*
* Copyright (C) IBM Corporation, 2001
*
- * Author: Dipankar Sarma <dipankar@in.ibm.com>
+ * Authors: Dipankar Sarma <dipankar@in.ibm.com>
+ * Manfred Spraul <manfred@colorfullife.com>
*
- * Based on the original work by Paul McKenney <paul.mckenney@us.ibm.com>
+ * Based on the original work by Paul McKenney <paulmck@us.ibm.com>
* and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen.
* Papers:
* http://www.rdrop.com/users/paulmck/paper/rclockpdcsproof.pdf
#include <linux/init.h>
#include <linux/spinlock.h>
#include <linux/smp.h>
+#include <linux/rcupdate.h>
#include <linux/interrupt.h>
#include <linux/sched.h>
#include <asm/atomic.h>
-#include <asm/bitops.h>
+#include <linux/bitops.h>
#include <linux/module.h>
#include <linux/completion.h>
+#include <linux/moduleparam.h>
#include <linux/percpu.h>
#include <linux/notifier.h>
#include <linux/rcupdate.h>
#include <linux/cpu.h>
+#include <linux/mutex.h>
/* Definition for rcupdate control block. */
-struct rcu_ctrlblk rcu_ctrlblk =
- { .cur = -300, .completed = -300 , .lock = SEQCNT_ZERO };
-
-/* Bookkeeping of the progress of the grace period */
-struct {
- spinlock_t mutex; /* Guard this struct and writes to rcu_ctrlblk */
- cpumask_t rcu_cpu_mask; /* CPUs that need to switch in order */
- /* for current batch to proceed. */
-} rcu_state ____cacheline_maxaligned_in_smp =
- {.mutex = SPIN_LOCK_UNLOCKED, .rcu_cpu_mask = CPU_MASK_NONE };
-
+static struct rcu_ctrlblk rcu_ctrlblk = {
+ .cur = -300,
+ .completed = -300,
+ .lock = __SPIN_LOCK_UNLOCKED(&rcu_ctrlblk.lock),
+ .cpumask = CPU_MASK_NONE,
+};
+static struct rcu_ctrlblk rcu_bh_ctrlblk = {
+ .cur = -300,
+ .completed = -300,
+ .lock = __SPIN_LOCK_UNLOCKED(&rcu_bh_ctrlblk.lock),
+ .cpumask = CPU_MASK_NONE,
+};
DEFINE_PER_CPU(struct rcu_data, rcu_data) = { 0L };
+DEFINE_PER_CPU(struct rcu_data, rcu_bh_data) = { 0L };
/* Fake initialization required by compiler */
static DEFINE_PER_CPU(struct tasklet_struct, rcu_tasklet) = {NULL};
-#define RCU_tasklet(cpu) (per_cpu(rcu_tasklet, cpu))
+static int blimit = 10;
+static int qhimark = 10000;
+static int qlowmark = 100;
+#ifdef CONFIG_SMP
+static int rsinterval = 1000;
+#endif
+
+static atomic_t rcu_barrier_cpu_count;
+static DEFINE_MUTEX(rcu_barrier_mutex);
+static struct completion rcu_barrier_completion;
+
+#ifdef CONFIG_SMP
+static void force_quiescent_state(struct rcu_data *rdp,
+ struct rcu_ctrlblk *rcp)
+{
+ int cpu;
+ cpumask_t cpumask;
+ set_need_resched();
+ if (unlikely(rdp->qlen - rdp->last_rs_qlen > rsinterval)) {
+ rdp->last_rs_qlen = rdp->qlen;
+ /*
+ * Don't send IPI to itself. With irqs disabled,
+ * rdp->cpu is the current cpu.
+ */
+ cpumask = rcp->cpumask;
+ cpu_clear(rdp->cpu, cpumask);
+ for_each_cpu_mask(cpu, cpumask)
+ smp_send_reschedule(cpu);
+ }
+}
+#else
+static inline void force_quiescent_state(struct rcu_data *rdp,
+ struct rcu_ctrlblk *rcp)
+{
+ set_need_resched();
+}
+#endif
/**
- * call_rcu - Queue an RCU update request.
+ * call_rcu - Queue an RCU callback for invocation after a grace period.
* @head: structure to be used for queueing the RCU updates.
* @func: actual update function to be invoked after the grace period
*
- * The update function will be invoked as soon as all CPUs have performed
- * a context switch or been seen in the idle loop or in a user process.
- * The read-side of critical section that use call_rcu() for updation must
- * be protected by rcu_read_lock()/rcu_read_unlock().
+ * The update function will be invoked some time after a full grace
+ * period elapses, in other words after all currently executing RCU
+ * read-side critical sections have completed. RCU read-side critical
+ * sections are delimited by rcu_read_lock() and rcu_read_unlock(),
+ * and may be nested.
*/
void fastcall call_rcu(struct rcu_head *head,
void (*func)(struct rcu_head *rcu))
{
- int cpu;
unsigned long flags;
+ struct rcu_data *rdp;
+
+ head->func = func;
+ head->next = NULL;
+ local_irq_save(flags);
+ rdp = &__get_cpu_var(rcu_data);
+ *rdp->nxttail = head;
+ rdp->nxttail = &head->next;
+ if (unlikely(++rdp->qlen > qhimark)) {
+ rdp->blimit = INT_MAX;
+ force_quiescent_state(rdp, &rcu_ctrlblk);
+ }
+ local_irq_restore(flags);
+}
+
+/**
+ * call_rcu_bh - Queue an RCU for invocation after a quicker grace period.
+ * @head: structure to be used for queueing the RCU updates.
+ * @func: actual update function to be invoked after the grace period
+ *
+ * The update function will be invoked some time after a full grace
+ * period elapses, in other words after all currently executing RCU
+ * read-side critical sections have completed. call_rcu_bh() assumes
+ * that the read-side critical sections end on completion of a softirq
+ * handler. This means that read-side critical sections in process
+ * context must not be interrupted by softirqs. This interface is to be
+ * used when most of the read-side critical sections are in softirq context.
+ * RCU read-side critical sections are delimited by rcu_read_lock() and
+ * rcu_read_unlock(), * if in interrupt context or rcu_read_lock_bh()
+ * and rcu_read_unlock_bh(), if in process context. These may be nested.
+ */
+void fastcall call_rcu_bh(struct rcu_head *head,
+ void (*func)(struct rcu_head *rcu))
+{
+ unsigned long flags;
+ struct rcu_data *rdp;
head->func = func;
head->next = NULL;
local_irq_save(flags);
- cpu = smp_processor_id();
- *RCU_nxttail(cpu) = head;
- RCU_nxttail(cpu) = &head->next;
+ rdp = &__get_cpu_var(rcu_bh_data);
+ *rdp->nxttail = head;
+ rdp->nxttail = &head->next;
+
+ if (unlikely(++rdp->qlen > qhimark)) {
+ rdp->blimit = INT_MAX;
+ force_quiescent_state(rdp, &rcu_bh_ctrlblk);
+ }
+
local_irq_restore(flags);
}
+/*
+ * Return the number of RCU batches processed thus far. Useful
+ * for debug and statistics.
+ */
+long rcu_batches_completed(void)
+{
+ return rcu_ctrlblk.completed;
+}
+
+/*
+ * Return the number of RCU batches processed thus far. Useful
+ * for debug and statistics.
+ */
+long rcu_batches_completed_bh(void)
+{
+ return rcu_bh_ctrlblk.completed;
+}
+
+static void rcu_barrier_callback(struct rcu_head *notused)
+{
+ if (atomic_dec_and_test(&rcu_barrier_cpu_count))
+ complete(&rcu_barrier_completion);
+}
+
+/*
+ * Called with preemption disabled, and from cross-cpu IRQ context.
+ */
+static void rcu_barrier_func(void *notused)
+{
+ int cpu = smp_processor_id();
+ struct rcu_data *rdp = &per_cpu(rcu_data, cpu);
+ struct rcu_head *head;
+
+ head = &rdp->barrier;
+ atomic_inc(&rcu_barrier_cpu_count);
+ call_rcu(head, rcu_barrier_callback);
+}
+
+/**
+ * rcu_barrier - Wait until all the in-flight RCUs are complete.
+ */
+void rcu_barrier(void)
+{
+ BUG_ON(in_interrupt());
+ /* Take cpucontrol mutex to protect against CPU hotplug */
+ mutex_lock(&rcu_barrier_mutex);
+ init_completion(&rcu_barrier_completion);
+ atomic_set(&rcu_barrier_cpu_count, 0);
+ on_each_cpu(rcu_barrier_func, NULL, 0, 1);
+ wait_for_completion(&rcu_barrier_completion);
+ mutex_unlock(&rcu_barrier_mutex);
+}
+EXPORT_SYMBOL_GPL(rcu_barrier);
+
/*
* Invoke the completed RCU callbacks. They are expected to be in
* a per-cpu list.
*/
-static void rcu_do_batch(struct rcu_head *list)
+static void rcu_do_batch(struct rcu_data *rdp)
{
- struct rcu_head *next;
+ struct rcu_head *next, *list;
+ int count = 0;
+ list = rdp->donelist;
while (list) {
- next = list->next;
+ next = rdp->donelist = list->next;
list->func(list);
list = next;
+ if (++count >= rdp->blimit)
+ break;
}
+
+ local_irq_disable();
+ rdp->qlen -= count;
+ local_irq_enable();
+ if (rdp->blimit == INT_MAX && rdp->qlen <= qlowmark)
+ rdp->blimit = blimit;
+
+ if (!rdp->donelist)
+ rdp->donetail = &rdp->donelist;
+ else
+ tasklet_schedule(&per_cpu(rcu_tasklet, rdp->cpu));
}
/*
* The grace period handling consists out of two steps:
* - A new grace period is started.
* This is done by rcu_start_batch. The start is not broadcasted to
- * all cpus, they must pick this up by comparing rcu_ctrlblk.cur with
- * RCU_quiescbatch(cpu). All cpus are recorded in the
- * rcu_state.rcu_cpu_mask bitmap.
+ * all cpus, they must pick this up by comparing rcp->cur with
+ * rdp->quiescbatch. All cpus are recorded in the
+ * rcu_ctrlblk.cpumask bitmap.
* - All cpus must go through a quiescent state.
* Since the start of the grace period is not broadcasted, at least two
* calls to rcu_check_quiescent_state are required:
* The first call just notices that a new grace period is running. The
* following calls check if there was a quiescent state since the beginning
- * of the grace period. If so, it updates rcu_state.rcu_cpu_mask. If
+ * of the grace period. If so, it updates rcu_ctrlblk.cpumask. If
* the bitmap is empty, then the grace period is completed.
* rcu_check_quiescent_state calls rcu_start_batch(0) to start the next grace
* period (if necessary).
/*
* Register a new batch of callbacks, and start it up if there is currently no
* active batch and the batch to be registered has not already occurred.
- * Caller must hold rcu_state.mutex.
+ * Caller must hold rcu_ctrlblk.lock.
*/
-static void rcu_start_batch(int next_pending)
+static void rcu_start_batch(struct rcu_ctrlblk *rcp)
{
- if (next_pending)
- rcu_ctrlblk.next_pending = 1;
-
- if (rcu_ctrlblk.next_pending &&
- rcu_ctrlblk.completed == rcu_ctrlblk.cur) {
- /* Can't change, since spin lock held. */
- cpus_andnot(rcu_state.rcu_cpu_mask, cpu_online_map,
- nohz_cpu_mask);
- write_seqcount_begin(&rcu_ctrlblk.lock);
- rcu_ctrlblk.next_pending = 0;
- rcu_ctrlblk.cur++;
- write_seqcount_end(&rcu_ctrlblk.lock);
+ if (rcp->next_pending &&
+ rcp->completed == rcp->cur) {
+ rcp->next_pending = 0;
+ /*
+ * next_pending == 0 must be visible in
+ * __rcu_process_callbacks() before it can see new value of cur.
+ */
+ smp_wmb();
+ rcp->cur++;
+
+ /*
+ * Accessing nohz_cpu_mask before incrementing rcp->cur needs a
+ * Barrier Otherwise it can cause tickless idle CPUs to be
+ * included in rcp->cpumask, which will extend graceperiods
+ * unnecessarily.
+ */
+ smp_mb();
+ cpus_andnot(rcp->cpumask, cpu_online_map, nohz_cpu_mask);
+
}
}
* Clear it from the cpu mask and complete the grace period if it was the last
* cpu. Start another grace period if someone has further entries pending
*/
-static void cpu_quiet(int cpu)
+static void cpu_quiet(int cpu, struct rcu_ctrlblk *rcp)
{
- cpu_clear(cpu, rcu_state.rcu_cpu_mask);
- if (cpus_empty(rcu_state.rcu_cpu_mask)) {
+ cpu_clear(cpu, rcp->cpumask);
+ if (cpus_empty(rcp->cpumask)) {
/* batch completed ! */
- rcu_ctrlblk.completed = rcu_ctrlblk.cur;
- rcu_start_batch(0);
+ rcp->completed = rcp->cur;
+ rcu_start_batch(rcp);
}
}
* switch). If so and if it already hasn't done so in this RCU
* quiescent cycle, then indicate that it has done so.
*/
-static void rcu_check_quiescent_state(void)
+static void rcu_check_quiescent_state(struct rcu_ctrlblk *rcp,
+ struct rcu_data *rdp)
{
- int cpu = smp_processor_id();
-
- if (RCU_quiescbatch(cpu) != rcu_ctrlblk.cur) {
- /* new grace period: record qsctr value. */
- RCU_qs_pending(cpu) = 1;
- RCU_last_qsctr(cpu) = RCU_qsctr(cpu);
- RCU_quiescbatch(cpu) = rcu_ctrlblk.cur;
+ if (rdp->quiescbatch != rcp->cur) {
+ /* start new grace period: */
+ rdp->qs_pending = 1;
+ rdp->passed_quiesc = 0;
+ rdp->quiescbatch = rcp->cur;
return;
}
* qs_pending is checked instead of the actual bitmap to avoid
* cacheline trashing.
*/
- if (!RCU_qs_pending(cpu))
+ if (!rdp->qs_pending)
return;
/*
- * Races with local timer interrupt - in the worst case
- * we may miss one quiescent state of that CPU. That is
- * tolerable. So no need to disable interrupts.
+ * Was there a quiescent state since the beginning of the grace
+ * period? If no, then exit and wait for the next call.
*/
- if (RCU_qsctr(cpu) == RCU_last_qsctr(cpu))
+ if (!rdp->passed_quiesc)
return;
- RCU_qs_pending(cpu) = 0;
+ rdp->qs_pending = 0;
- spin_lock(&rcu_state.mutex);
+ spin_lock(&rcp->lock);
/*
- * RCU_quiescbatch/batch.cur and the cpu bitmap can come out of sync
+ * rdp->quiescbatch/rcp->cur and the cpu bitmap can come out of sync
* during cpu startup. Ignore the quiescent state.
*/
- if (likely(RCU_quiescbatch(cpu) == rcu_ctrlblk.cur))
- cpu_quiet(cpu);
+ if (likely(rdp->quiescbatch == rcp->cur))
+ cpu_quiet(rdp->cpu, rcp);
- spin_unlock(&rcu_state.mutex);
+ spin_unlock(&rcp->lock);
}
* locking requirements, the list it's pulling from has to belong to a cpu
* which is dead and hence not processing interrupts.
*/
-static void rcu_move_batch(struct rcu_head *list)
+static void rcu_move_batch(struct rcu_data *this_rdp, struct rcu_head *list,
+ struct rcu_head **tail)
{
- int cpu;
-
local_irq_disable();
-
- cpu = smp_processor_id();
-
- while (list != NULL) {
- *RCU_nxttail(cpu) = list;
- RCU_nxttail(cpu) = &list->next;
- list = list->next;
- }
+ *this_rdp->nxttail = list;
+ if (list)
+ this_rdp->nxttail = tail;
local_irq_enable();
}
-static void rcu_offline_cpu(int cpu)
+static void __rcu_offline_cpu(struct rcu_data *this_rdp,
+ struct rcu_ctrlblk *rcp, struct rcu_data *rdp)
{
/* if the cpu going offline owns the grace period
* we can block indefinitely waiting for it, so flush
* it here
*/
- spin_lock_bh(&rcu_state.mutex);
- if (rcu_ctrlblk.cur != rcu_ctrlblk.completed)
- cpu_quiet(cpu);
- spin_unlock_bh(&rcu_state.mutex);
-
- rcu_move_batch(RCU_curlist(cpu));
- rcu_move_batch(RCU_nxtlist(cpu));
+ spin_lock_bh(&rcp->lock);
+ if (rcp->cur != rcp->completed)
+ cpu_quiet(rdp->cpu, rcp);
+ spin_unlock_bh(&rcp->lock);
+ rcu_move_batch(this_rdp, rdp->curlist, rdp->curtail);
+ rcu_move_batch(this_rdp, rdp->nxtlist, rdp->nxttail);
+ rcu_move_batch(this_rdp, rdp->donelist, rdp->donetail);
+}
- tasklet_kill_immediate(&RCU_tasklet(cpu), cpu);
+static void rcu_offline_cpu(int cpu)
+{
+ struct rcu_data *this_rdp = &get_cpu_var(rcu_data);
+ struct rcu_data *this_bh_rdp = &get_cpu_var(rcu_bh_data);
+
+ __rcu_offline_cpu(this_rdp, &rcu_ctrlblk,
+ &per_cpu(rcu_data, cpu));
+ __rcu_offline_cpu(this_bh_rdp, &rcu_bh_ctrlblk,
+ &per_cpu(rcu_bh_data, cpu));
+ put_cpu_var(rcu_data);
+ put_cpu_var(rcu_bh_data);
+ tasklet_kill_immediate(&per_cpu(rcu_tasklet, cpu), cpu);
}
-#endif
+#else
-void rcu_restart_cpu(int cpu)
+static void rcu_offline_cpu(int cpu)
{
- spin_lock_bh(&rcu_state.mutex);
- RCU_quiescbatch(cpu) = rcu_ctrlblk.completed;
- RCU_qs_pending(cpu) = 0;
- spin_unlock_bh(&rcu_state.mutex);
}
+#endif
+
/*
* This does the RCU processing work from tasklet context.
*/
-static void rcu_process_callbacks(unsigned long unused)
+static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp,
+ struct rcu_data *rdp)
{
- int cpu = smp_processor_id();
- struct rcu_head *rcu_list = NULL;
-
- if (RCU_curlist(cpu) &&
- !rcu_batch_before(rcu_ctrlblk.completed, RCU_batch(cpu))) {
- rcu_list = RCU_curlist(cpu);
- RCU_curlist(cpu) = NULL;
+ if (rdp->curlist && !rcu_batch_before(rcp->completed, rdp->batch)) {
+ *rdp->donetail = rdp->curlist;
+ rdp->donetail = rdp->curtail;
+ rdp->curlist = NULL;
+ rdp->curtail = &rdp->curlist;
}
- local_irq_disable();
- if (RCU_nxtlist(cpu) && !RCU_curlist(cpu)) {
- int next_pending, seq;
-
- RCU_curlist(cpu) = RCU_nxtlist(cpu);
- RCU_nxtlist(cpu) = NULL;
- RCU_nxttail(cpu) = &RCU_nxtlist(cpu);
+ if (rdp->nxtlist && !rdp->curlist) {
+ local_irq_disable();
+ rdp->curlist = rdp->nxtlist;
+ rdp->curtail = rdp->nxttail;
+ rdp->nxtlist = NULL;
+ rdp->nxttail = &rdp->nxtlist;
local_irq_enable();
/*
* start the next batch of callbacks
*/
- do {
- seq = read_seqcount_begin(&rcu_ctrlblk.lock);
- /* determine batch number */
- RCU_batch(cpu) = rcu_ctrlblk.cur + 1;
- next_pending = rcu_ctrlblk.next_pending;
- } while (read_seqcount_retry(&rcu_ctrlblk.lock, seq));
-
- if (!next_pending) {
+
+ /* determine batch number */
+ rdp->batch = rcp->cur + 1;
+ /* see the comment and corresponding wmb() in
+ * the rcu_start_batch()
+ */
+ smp_rmb();
+
+ if (!rcp->next_pending) {
/* and start it/schedule start if it's a new batch */
- spin_lock(&rcu_state.mutex);
- rcu_start_batch(1);
- spin_unlock(&rcu_state.mutex);
+ spin_lock(&rcp->lock);
+ rcp->next_pending = 1;
+ rcu_start_batch(rcp);
+ spin_unlock(&rcp->lock);
}
- } else {
- local_irq_enable();
}
- rcu_check_quiescent_state();
- if (rcu_list)
- rcu_do_batch(rcu_list);
+
+ rcu_check_quiescent_state(rcp, rdp);
+ if (rdp->donelist)
+ rcu_do_batch(rdp);
+}
+
+static void rcu_process_callbacks(unsigned long unused)
+{
+ __rcu_process_callbacks(&rcu_ctrlblk, &__get_cpu_var(rcu_data));
+ __rcu_process_callbacks(&rcu_bh_ctrlblk, &__get_cpu_var(rcu_bh_data));
+}
+
+static int __rcu_pending(struct rcu_ctrlblk *rcp, struct rcu_data *rdp)
+{
+ /* This cpu has pending rcu entries and the grace period
+ * for them has completed.
+ */
+ if (rdp->curlist && !rcu_batch_before(rcp->completed, rdp->batch))
+ return 1;
+
+ /* This cpu has no pending entries, but there are new entries */
+ if (!rdp->curlist && rdp->nxtlist)
+ return 1;
+
+ /* This cpu has finished callbacks to invoke */
+ if (rdp->donelist)
+ return 1;
+
+ /* The rcu core waits for a quiescent state from the cpu */
+ if (rdp->quiescbatch != rcp->cur || rdp->qs_pending)
+ return 1;
+
+ /* nothing to do */
+ return 0;
+}
+
+/*
+ * Check to see if there is any immediate RCU-related work to be done
+ * by the current CPU, returning 1 if so. This function is part of the
+ * RCU implementation; it is -not- an exported member of the RCU API.
+ */
+int rcu_pending(int cpu)
+{
+ return __rcu_pending(&rcu_ctrlblk, &per_cpu(rcu_data, cpu)) ||
+ __rcu_pending(&rcu_bh_ctrlblk, &per_cpu(rcu_bh_data, cpu));
+}
+
+/*
+ * Check to see if any future RCU-related work will need to be done
+ * by the current CPU, even if none need be done immediately, returning
+ * 1 if so. This function is part of the RCU implementation; it is -not-
+ * an exported member of the RCU API.
+ */
+int rcu_needs_cpu(int cpu)
+{
+ struct rcu_data *rdp = &per_cpu(rcu_data, cpu);
+ struct rcu_data *rdp_bh = &per_cpu(rcu_bh_data, cpu);
+
+ return (!!rdp->curlist || !!rdp_bh->curlist || rcu_pending(cpu));
}
void rcu_check_callbacks(int cpu, int user)
{
if (user ||
(idle_cpu(cpu) && !in_softirq() &&
- hardirq_count() <= (1 << HARDIRQ_SHIFT)))
- RCU_qsctr(cpu)++;
- tasklet_schedule(&RCU_tasklet(cpu));
+ hardirq_count() <= (1 << HARDIRQ_SHIFT))) {
+ rcu_qsctr_inc(cpu);
+ rcu_bh_qsctr_inc(cpu);
+ } else if (!in_softirq())
+ rcu_bh_qsctr_inc(cpu);
+ tasklet_schedule(&per_cpu(rcu_tasklet, cpu));
+}
+
+static void rcu_init_percpu_data(int cpu, struct rcu_ctrlblk *rcp,
+ struct rcu_data *rdp)
+{
+ memset(rdp, 0, sizeof(*rdp));
+ rdp->curtail = &rdp->curlist;
+ rdp->nxttail = &rdp->nxtlist;
+ rdp->donetail = &rdp->donelist;
+ rdp->quiescbatch = rcp->completed;
+ rdp->qs_pending = 0;
+ rdp->cpu = cpu;
+ rdp->blimit = blimit;
}
static void __devinit rcu_online_cpu(int cpu)
{
- memset(&per_cpu(rcu_data, cpu), 0, sizeof(struct rcu_data));
- tasklet_init(&RCU_tasklet(cpu), rcu_process_callbacks, 0UL);
- RCU_nxttail(cpu) = &RCU_nxtlist(cpu);
- RCU_quiescbatch(cpu) = rcu_ctrlblk.completed;
- RCU_qs_pending(cpu) = 0;
+ struct rcu_data *rdp = &per_cpu(rcu_data, cpu);
+ struct rcu_data *bh_rdp = &per_cpu(rcu_bh_data, cpu);
+
+ rcu_init_percpu_data(cpu, &rcu_ctrlblk, rdp);
+ rcu_init_percpu_data(cpu, &rcu_bh_ctrlblk, bh_rdp);
+ tasklet_init(&per_cpu(rcu_tasklet, cpu), rcu_process_callbacks, 0UL);
}
-static int __devinit rcu_cpu_notify(struct notifier_block *self,
+static int __cpuinit rcu_cpu_notify(struct notifier_block *self,
unsigned long action, void *hcpu)
{
long cpu = (long)hcpu;
case CPU_UP_PREPARE:
rcu_online_cpu(cpu);
break;
-#ifdef CONFIG_HOTPLUG_CPU
case CPU_DEAD:
rcu_offline_cpu(cpu);
break;
-#endif
default:
break;
}
return NOTIFY_OK;
}
-static struct notifier_block __devinitdata rcu_nb = {
+static struct notifier_block __cpuinitdata rcu_nb = {
.notifier_call = rcu_cpu_notify,
};
}
/**
- * synchronize-kernel - wait until all the CPUs have gone
- * through a "quiescent" state. It may sleep.
+ * synchronize_rcu - wait until a grace period has elapsed.
+ *
+ * Control will return to the caller some time after a full grace
+ * period has elapsed, in other words after all currently executing RCU
+ * read-side critical sections have completed. RCU read-side critical
+ * sections are delimited by rcu_read_lock() and rcu_read_unlock(),
+ * and may be nested.
+ *
+ * If your read-side code is not protected by rcu_read_lock(), do -not-
+ * use synchronize_rcu().
*/
-void synchronize_kernel(void)
+void synchronize_rcu(void)
{
struct rcu_synchronize rcu;
wait_for_completion(&rcu.completion);
}
-
-EXPORT_SYMBOL(call_rcu);
-EXPORT_SYMBOL(synchronize_kernel);
+module_param(blimit, int, 0);
+module_param(qhimark, int, 0);
+module_param(qlowmark, int, 0);
+#ifdef CONFIG_SMP
+module_param(rsinterval, int, 0);
+#endif
+EXPORT_SYMBOL_GPL(rcu_batches_completed);
+EXPORT_SYMBOL_GPL(rcu_batches_completed_bh);
+EXPORT_SYMBOL_GPL(call_rcu);
+EXPORT_SYMBOL_GPL(call_rcu_bh);
+EXPORT_SYMBOL_GPL(synchronize_rcu);