X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=kernel%2Frcupdate.c;h=2058f88c7bbb3d9c9d0c7e195f3d67f25d8f64ed;hb=43bc926fffd92024b46cafaf7350d669ba9ca884;hp=13a1b5a5825f694be25643b7ef133a8d2776143f;hpb=9213980e6a70d8473e0ffd4b39ab5b6caaba9ff5;p=linux-2.6.git diff --git a/kernel/rcupdate.c b/kernel/rcupdate.c index 13a1b5a58..2058f88c7 100644 --- a/kernel/rcupdate.c +++ b/kernel/rcupdate.c @@ -17,9 +17,10 @@ * * Copyright (C) IBM Corporation, 2001 * - * Author: Dipankar Sarma + * Authors: Dipankar Sarma + * Manfred Spraul * - * Based on the original work by Paul McKenney + * Based on the original work by Paul McKenney * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen. * Papers: * http://www.rdrop.com/users/paulmck/paper/rclockpdcsproof.pdf @@ -34,88 +35,275 @@ #include #include #include +#include #include #include #include -#include +#include #include #include +#include #include #include #include #include +#include /* Definition for rcupdate control block. */ -struct rcu_ctrlblk rcu_ctrlblk = - { .mutex = SPIN_LOCK_UNLOCKED, .curbatch = 1, - .maxbatch = 1, .rcu_cpu_mask = CPU_MASK_NONE }; +static struct rcu_ctrlblk rcu_ctrlblk = { + .cur = -300, + .completed = -300, + .lock = SPIN_LOCK_UNLOCKED, + .cpumask = CPU_MASK_NONE, +}; +static struct rcu_ctrlblk rcu_bh_ctrlblk = { + .cur = -300, + .completed = -300, + .lock = SPIN_LOCK_UNLOCKED, + .cpumask = CPU_MASK_NONE, +}; + DEFINE_PER_CPU(struct rcu_data, rcu_data) = { 0L }; +DEFINE_PER_CPU(struct rcu_data, rcu_bh_data) = { 0L }; /* Fake initialization required by compiler */ static DEFINE_PER_CPU(struct tasklet_struct, rcu_tasklet) = {NULL}; -#define RCU_tasklet(cpu) (per_cpu(rcu_tasklet, cpu)) +static int blimit = 10; +static int qhimark = 10000; +static int qlowmark = 100; +#ifdef CONFIG_SMP +static int rsinterval = 1000; +#endif + +static atomic_t rcu_barrier_cpu_count; +static DEFINE_MUTEX(rcu_barrier_mutex); +static struct completion rcu_barrier_completion; + +#ifdef CONFIG_SMP +static void force_quiescent_state(struct rcu_data *rdp, + struct rcu_ctrlblk *rcp) +{ + int cpu; + cpumask_t cpumask; + set_need_resched(); + if (unlikely(rdp->qlen - rdp->last_rs_qlen > rsinterval)) { + rdp->last_rs_qlen = rdp->qlen; + /* + * Don't send IPI to itself. With irqs disabled, + * rdp->cpu is the current cpu. + */ + cpumask = rcp->cpumask; + cpu_clear(rdp->cpu, cpumask); + for_each_cpu_mask(cpu, cpumask) + smp_send_reschedule(cpu); + } +} +#else +static inline void force_quiescent_state(struct rcu_data *rdp, + struct rcu_ctrlblk *rcp) +{ + set_need_resched(); +} +#endif /** - * call_rcu - Queue an RCU update request. + * call_rcu - Queue an RCU callback for invocation after a grace period. * @head: structure to be used for queueing the RCU updates. * @func: actual update function to be invoked after the grace period - * @arg: argument to be passed to the update function * - * The update function will be invoked as soon as all CPUs have performed - * a context switch or been seen in the idle loop or in a user process. - * The read-side of critical section that use call_rcu() for updation must - * be protected by rcu_read_lock()/rcu_read_unlock(). + * The update function will be invoked some time after a full grace + * period elapses, in other words after all currently executing RCU + * read-side critical sections have completed. RCU read-side critical + * sections are delimited by rcu_read_lock() and rcu_read_unlock(), + * and may be nested. */ -void fastcall call_rcu(struct rcu_head *head, void (*func)(void *arg), void *arg) +void fastcall call_rcu(struct rcu_head *head, + void (*func)(struct rcu_head *rcu)) { - int cpu; unsigned long flags; + struct rcu_data *rdp; head->func = func; - head->arg = arg; + head->next = NULL; local_irq_save(flags); - cpu = smp_processor_id(); - list_add_tail(&head->list, &RCU_nxtlist(cpu)); + rdp = &__get_cpu_var(rcu_data); + *rdp->nxttail = head; + rdp->nxttail = &head->next; + if (unlikely(++rdp->qlen > qhimark)) { + rdp->blimit = INT_MAX; + force_quiescent_state(rdp, &rcu_ctrlblk); + } + local_irq_restore(flags); +} + +/** + * call_rcu_bh - Queue an RCU for invocation after a quicker grace period. + * @head: structure to be used for queueing the RCU updates. + * @func: actual update function to be invoked after the grace period + * + * The update function will be invoked some time after a full grace + * period elapses, in other words after all currently executing RCU + * read-side critical sections have completed. call_rcu_bh() assumes + * that the read-side critical sections end on completion of a softirq + * handler. This means that read-side critical sections in process + * context must not be interrupted by softirqs. This interface is to be + * used when most of the read-side critical sections are in softirq context. + * RCU read-side critical sections are delimited by rcu_read_lock() and + * rcu_read_unlock(), * if in interrupt context or rcu_read_lock_bh() + * and rcu_read_unlock_bh(), if in process context. These may be nested. + */ +void fastcall call_rcu_bh(struct rcu_head *head, + void (*func)(struct rcu_head *rcu)) +{ + unsigned long flags; + struct rcu_data *rdp; + + head->func = func; + head->next = NULL; + local_irq_save(flags); + rdp = &__get_cpu_var(rcu_bh_data); + *rdp->nxttail = head; + rdp->nxttail = &head->next; + + if (unlikely(++rdp->qlen > qhimark)) { + rdp->blimit = INT_MAX; + force_quiescent_state(rdp, &rcu_bh_ctrlblk); + } + local_irq_restore(flags); } /* - * Invoke the completed RCU callbacks. They are expected to be in - * a per-cpu list. + * Return the number of RCU batches processed thus far. Useful + * for debug and statistics. + */ +long rcu_batches_completed(void) +{ + return rcu_ctrlblk.completed; +} + +static void rcu_barrier_callback(struct rcu_head *notused) +{ + if (atomic_dec_and_test(&rcu_barrier_cpu_count)) + complete(&rcu_barrier_completion); +} + +/* + * Called with preemption disabled, and from cross-cpu IRQ context. */ -static void rcu_do_batch(struct list_head *list) +static void rcu_barrier_func(void *notused) { - struct list_head *entry; + int cpu = smp_processor_id(); + struct rcu_data *rdp = &per_cpu(rcu_data, cpu); struct rcu_head *head; - while (!list_empty(list)) { - entry = list->next; - list_del(entry); - head = list_entry(entry, struct rcu_head, list); - head->func(head->arg); + head = &rdp->barrier; + atomic_inc(&rcu_barrier_cpu_count); + call_rcu(head, rcu_barrier_callback); +} + +/** + * rcu_barrier - Wait until all the in-flight RCUs are complete. + */ +void rcu_barrier(void) +{ + BUG_ON(in_interrupt()); + /* Take cpucontrol mutex to protect against CPU hotplug */ + mutex_lock(&rcu_barrier_mutex); + init_completion(&rcu_barrier_completion); + atomic_set(&rcu_barrier_cpu_count, 0); + on_each_cpu(rcu_barrier_func, NULL, 0, 1); + wait_for_completion(&rcu_barrier_completion); + mutex_unlock(&rcu_barrier_mutex); +} +EXPORT_SYMBOL_GPL(rcu_barrier); + +/* + * Invoke the completed RCU callbacks. They are expected to be in + * a per-cpu list. + */ +static void rcu_do_batch(struct rcu_data *rdp) +{ + struct rcu_head *next, *list; + int count = 0; + + list = rdp->donelist; + while (list) { + next = rdp->donelist = list->next; + list->func(list); + list = next; + rdp->qlen--; + if (++count >= rdp->blimit) + break; } + if (rdp->blimit == INT_MAX && rdp->qlen <= qlowmark) + rdp->blimit = blimit; + if (!rdp->donelist) + rdp->donetail = &rdp->donelist; + else + tasklet_schedule(&per_cpu(rcu_tasklet, rdp->cpu)); } +/* + * Grace period handling: + * The grace period handling consists out of two steps: + * - A new grace period is started. + * This is done by rcu_start_batch. The start is not broadcasted to + * all cpus, they must pick this up by comparing rcp->cur with + * rdp->quiescbatch. All cpus are recorded in the + * rcu_ctrlblk.cpumask bitmap. + * - All cpus must go through a quiescent state. + * Since the start of the grace period is not broadcasted, at least two + * calls to rcu_check_quiescent_state are required: + * The first call just notices that a new grace period is running. The + * following calls check if there was a quiescent state since the beginning + * of the grace period. If so, it updates rcu_ctrlblk.cpumask. If + * the bitmap is empty, then the grace period is completed. + * rcu_check_quiescent_state calls rcu_start_batch(0) to start the next grace + * period (if necessary). + */ /* * Register a new batch of callbacks, and start it up if there is currently no * active batch and the batch to be registered has not already occurred. - * Caller must hold the rcu_ctrlblk lock. + * Caller must hold rcu_ctrlblk.lock. */ -static void rcu_start_batch(long newbatch) +static void rcu_start_batch(struct rcu_ctrlblk *rcp) { - cpumask_t active; + if (rcp->next_pending && + rcp->completed == rcp->cur) { + rcp->next_pending = 0; + /* + * next_pending == 0 must be visible in + * __rcu_process_callbacks() before it can see new value of cur. + */ + smp_wmb(); + rcp->cur++; + + /* + * Accessing nohz_cpu_mask before incrementing rcp->cur needs a + * Barrier Otherwise it can cause tickless idle CPUs to be + * included in rcp->cpumask, which will extend graceperiods + * unnecessarily. + */ + smp_mb(); + cpus_andnot(rcp->cpumask, cpu_online_map, nohz_cpu_mask); - if (rcu_batch_before(rcu_ctrlblk.maxbatch, newbatch)) { - rcu_ctrlblk.maxbatch = newbatch; } - if (rcu_batch_before(rcu_ctrlblk.maxbatch, rcu_ctrlblk.curbatch) || - !cpus_empty(rcu_ctrlblk.rcu_cpu_mask)) { - return; +} + +/* + * cpu went through a quiescent state since the beginning of the grace period. + * Clear it from the cpu mask and complete the grace period if it was the last + * cpu. Start another grace period if someone has further entries pending + */ +static void cpu_quiet(int cpu, struct rcu_ctrlblk *rcp) +{ + cpu_clear(cpu, rcp->cpumask); + if (cpus_empty(rcp->cpumask)) { + /* batch completed ! */ + rcp->completed = rcp->cur; + rcu_start_batch(rcp); } - /* Can't change, since spin lock held. */ - active = nohz_cpu_mask; - cpus_complement(active); - cpus_and(rcu_ctrlblk.rcu_cpu_mask, cpu_online_map, active); } /* @@ -123,39 +311,41 @@ static void rcu_start_batch(long newbatch) * switch). If so and if it already hasn't done so in this RCU * quiescent cycle, then indicate that it has done so. */ -static void rcu_check_quiescent_state(void) +static void rcu_check_quiescent_state(struct rcu_ctrlblk *rcp, + struct rcu_data *rdp) { - int cpu = smp_processor_id(); + if (rdp->quiescbatch != rcp->cur) { + /* start new grace period: */ + rdp->qs_pending = 1; + rdp->passed_quiesc = 0; + rdp->quiescbatch = rcp->cur; + return; + } - if (!cpu_isset(cpu, rcu_ctrlblk.rcu_cpu_mask)) + /* Grace period already completed for this cpu? + * qs_pending is checked instead of the actual bitmap to avoid + * cacheline trashing. + */ + if (!rdp->qs_pending) return; /* - * Races with local timer interrupt - in the worst case - * we may miss one quiescent state of that CPU. That is - * tolerable. So no need to disable interrupts. + * Was there a quiescent state since the beginning of the grace + * period? If no, then exit and wait for the next call. */ - if (RCU_last_qsctr(cpu) == RCU_QSCTR_INVALID) { - RCU_last_qsctr(cpu) = RCU_qsctr(cpu); + if (!rdp->passed_quiesc) return; - } - if (RCU_qsctr(cpu) == RCU_last_qsctr(cpu)) - return; - - spin_lock(&rcu_ctrlblk.mutex); - if (!cpu_isset(cpu, rcu_ctrlblk.rcu_cpu_mask)) - goto out_unlock; + rdp->qs_pending = 0; - cpu_clear(cpu, rcu_ctrlblk.rcu_cpu_mask); - RCU_last_qsctr(cpu) = RCU_QSCTR_INVALID; - if (!cpus_empty(rcu_ctrlblk.rcu_cpu_mask)) - goto out_unlock; - - rcu_ctrlblk.curbatch++; - rcu_start_batch(rcu_ctrlblk.maxbatch); + spin_lock(&rcp->lock); + /* + * rdp->quiescbatch/rcp->cur and the cpu bitmap can come out of sync + * during cpu startup. Ignore the quiescent state. + */ + if (likely(rdp->quiescbatch == rcp->cur)) + cpu_quiet(rdp->cpu, rcp); -out_unlock: - spin_unlock(&rcu_ctrlblk.mutex); + spin_unlock(&rcp->lock); } @@ -165,50 +355,50 @@ out_unlock: * locking requirements, the list it's pulling from has to belong to a cpu * which is dead and hence not processing interrupts. */ -static void rcu_move_batch(struct list_head *list) +static void rcu_move_batch(struct rcu_data *this_rdp, struct rcu_head *list, + struct rcu_head **tail) { - struct list_head *entry; - int cpu = smp_processor_id(); - local_irq_disable(); - while (!list_empty(list)) { - entry = list->next; - list_del(entry); - list_add_tail(entry, &RCU_nxtlist(cpu)); - } + *this_rdp->nxttail = list; + if (list) + this_rdp->nxttail = tail; local_irq_enable(); } -static void rcu_offline_cpu(int cpu) +static void __rcu_offline_cpu(struct rcu_data *this_rdp, + struct rcu_ctrlblk *rcp, struct rcu_data *rdp) { /* if the cpu going offline owns the grace period * we can block indefinitely waiting for it, so flush * it here */ - spin_lock_irq(&rcu_ctrlblk.mutex); - if (cpus_empty(rcu_ctrlblk.rcu_cpu_mask)) - goto unlock; - - cpu_clear(cpu, rcu_ctrlblk.rcu_cpu_mask); - if (cpus_empty(rcu_ctrlblk.rcu_cpu_mask)) { - rcu_ctrlblk.curbatch++; - /* We may avoid calling start batch if - * we are starting the batch only - * because of the DEAD CPU (the current - * CPU will start a new batch anyway for - * the callbacks we will move to current CPU). - * However, we will avoid this optimisation - * for now. - */ - rcu_start_batch(rcu_ctrlblk.maxbatch); - } -unlock: - spin_unlock_irq(&rcu_ctrlblk.mutex); + spin_lock_bh(&rcp->lock); + if (rcp->cur != rcp->completed) + cpu_quiet(rdp->cpu, rcp); + spin_unlock_bh(&rcp->lock); + rcu_move_batch(this_rdp, rdp->curlist, rdp->curtail); + rcu_move_batch(this_rdp, rdp->nxtlist, rdp->nxttail); + rcu_move_batch(this_rdp, rdp->donelist, rdp->donetail); +} - rcu_move_batch(&RCU_curlist(cpu)); - rcu_move_batch(&RCU_nxtlist(cpu)); +static void rcu_offline_cpu(int cpu) +{ + struct rcu_data *this_rdp = &get_cpu_var(rcu_data); + struct rcu_data *this_bh_rdp = &get_cpu_var(rcu_bh_data); + + __rcu_offline_cpu(this_rdp, &rcu_ctrlblk, + &per_cpu(rcu_data, cpu)); + __rcu_offline_cpu(this_bh_rdp, &rcu_bh_ctrlblk, + &per_cpu(rcu_bh_data, cpu)); + put_cpu_var(rcu_data); + put_cpu_var(rcu_bh_data); + tasklet_kill_immediate(&per_cpu(rcu_tasklet, cpu), cpu); +} + +#else - tasklet_kill_immediate(&RCU_tasklet(cpu), cpu); +static void rcu_offline_cpu(int cpu) +{ } #endif @@ -216,56 +406,140 @@ unlock: /* * This does the RCU processing work from tasklet context. */ -static void rcu_process_callbacks(unsigned long unused) +static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp, + struct rcu_data *rdp) { - int cpu = smp_processor_id(); - LIST_HEAD(list); - - if (!list_empty(&RCU_curlist(cpu)) && - rcu_batch_after(rcu_ctrlblk.curbatch, RCU_batch(cpu))) { - __list_splice(&RCU_curlist(cpu), &list); - INIT_LIST_HEAD(&RCU_curlist(cpu)); + if (rdp->curlist && !rcu_batch_before(rcp->completed, rdp->batch)) { + *rdp->donetail = rdp->curlist; + rdp->donetail = rdp->curtail; + rdp->curlist = NULL; + rdp->curtail = &rdp->curlist; } - local_irq_disable(); - if (!list_empty(&RCU_nxtlist(cpu)) && list_empty(&RCU_curlist(cpu))) { - __list_splice(&RCU_nxtlist(cpu), &RCU_curlist(cpu)); - INIT_LIST_HEAD(&RCU_nxtlist(cpu)); + if (rdp->nxtlist && !rdp->curlist) { + local_irq_disable(); + rdp->curlist = rdp->nxtlist; + rdp->curtail = rdp->nxttail; + rdp->nxtlist = NULL; + rdp->nxttail = &rdp->nxtlist; local_irq_enable(); /* * start the next batch of callbacks */ - spin_lock(&rcu_ctrlblk.mutex); - RCU_batch(cpu) = rcu_ctrlblk.curbatch + 1; - rcu_start_batch(RCU_batch(cpu)); - spin_unlock(&rcu_ctrlblk.mutex); - } else { - local_irq_enable(); + + /* determine batch number */ + rdp->batch = rcp->cur + 1; + /* see the comment and corresponding wmb() in + * the rcu_start_batch() + */ + smp_rmb(); + + if (!rcp->next_pending) { + /* and start it/schedule start if it's a new batch */ + spin_lock(&rcp->lock); + rcp->next_pending = 1; + rcu_start_batch(rcp); + spin_unlock(&rcp->lock); + } } - rcu_check_quiescent_state(); - if (!list_empty(&list)) - rcu_do_batch(&list); + + rcu_check_quiescent_state(rcp, rdp); + if (rdp->donelist) + rcu_do_batch(rdp); +} + +static void rcu_process_callbacks(unsigned long unused) +{ + __rcu_process_callbacks(&rcu_ctrlblk, &__get_cpu_var(rcu_data)); + __rcu_process_callbacks(&rcu_bh_ctrlblk, &__get_cpu_var(rcu_bh_data)); +} + +static int __rcu_pending(struct rcu_ctrlblk *rcp, struct rcu_data *rdp) +{ + /* This cpu has pending rcu entries and the grace period + * for them has completed. + */ + if (rdp->curlist && !rcu_batch_before(rcp->completed, rdp->batch)) + return 1; + + /* This cpu has no pending entries, but there are new entries */ + if (!rdp->curlist && rdp->nxtlist) + return 1; + + /* This cpu has finished callbacks to invoke */ + if (rdp->donelist) + return 1; + + /* The rcu core waits for a quiescent state from the cpu */ + if (rdp->quiescbatch != rcp->cur || rdp->qs_pending) + return 1; + + /* nothing to do */ + return 0; +} + +/* + * Check to see if there is any immediate RCU-related work to be done + * by the current CPU, returning 1 if so. This function is part of the + * RCU implementation; it is -not- an exported member of the RCU API. + */ +int rcu_pending(int cpu) +{ + return __rcu_pending(&rcu_ctrlblk, &per_cpu(rcu_data, cpu)) || + __rcu_pending(&rcu_bh_ctrlblk, &per_cpu(rcu_bh_data, cpu)); +} + +/* + * Check to see if any future RCU-related work will need to be done + * by the current CPU, even if none need be done immediately, returning + * 1 if so. This function is part of the RCU implementation; it is -not- + * an exported member of the RCU API. + */ +int rcu_needs_cpu(int cpu) +{ + struct rcu_data *rdp = &per_cpu(rcu_data, cpu); + struct rcu_data *rdp_bh = &per_cpu(rcu_bh_data, cpu); + + return (!!rdp->curlist || !!rdp_bh->curlist || rcu_pending(cpu)); } void rcu_check_callbacks(int cpu, int user) { if (user || (idle_cpu(cpu) && !in_softirq() && - hardirq_count() <= (1 << HARDIRQ_SHIFT))) - RCU_qsctr(cpu)++; - tasklet_schedule(&RCU_tasklet(cpu)); + hardirq_count() <= (1 << HARDIRQ_SHIFT))) { + rcu_qsctr_inc(cpu); + rcu_bh_qsctr_inc(cpu); + } else if (!in_softirq()) + rcu_bh_qsctr_inc(cpu); + tasklet_schedule(&per_cpu(rcu_tasklet, cpu)); +} + +static void rcu_init_percpu_data(int cpu, struct rcu_ctrlblk *rcp, + struct rcu_data *rdp) +{ + memset(rdp, 0, sizeof(*rdp)); + rdp->curtail = &rdp->curlist; + rdp->nxttail = &rdp->nxtlist; + rdp->donetail = &rdp->donelist; + rdp->quiescbatch = rcp->completed; + rdp->qs_pending = 0; + rdp->cpu = cpu; + rdp->blimit = blimit; } static void __devinit rcu_online_cpu(int cpu) { - memset(&per_cpu(rcu_data, cpu), 0, sizeof(struct rcu_data)); - tasklet_init(&RCU_tasklet(cpu), rcu_process_callbacks, 0UL); - INIT_LIST_HEAD(&RCU_nxtlist(cpu)); - INIT_LIST_HEAD(&RCU_curlist(cpu)); + struct rcu_data *rdp = &per_cpu(rcu_data, cpu); + struct rcu_data *bh_rdp = &per_cpu(rcu_bh_data, cpu); + + rcu_init_percpu_data(cpu, &rcu_ctrlblk, rdp); + rcu_init_percpu_data(cpu, &rcu_bh_ctrlblk, bh_rdp); + tasklet_init(&per_cpu(rcu_tasklet, cpu), rcu_process_callbacks, 0UL); } -static int __devinit rcu_cpu_notify(struct notifier_block *self, +static int rcu_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu) { long cpu = (long)hcpu; @@ -273,18 +547,16 @@ static int __devinit rcu_cpu_notify(struct notifier_block *self, case CPU_UP_PREPARE: rcu_online_cpu(cpu); break; -#ifdef CONFIG_HOTPLUG_CPU case CPU_DEAD: rcu_offline_cpu(cpu); break; -#endif default: break; } return NOTIFY_OK; } -static struct notifier_block __devinitdata rcu_nb = { +static struct notifier_block rcu_nb = { .notifier_call = rcu_cpu_notify, }; @@ -302,29 +574,60 @@ void __init rcu_init(void) register_cpu_notifier(&rcu_nb); } +struct rcu_synchronize { + struct rcu_head head; + struct completion completion; +}; /* Because of FASTCALL declaration of complete, we use this wrapper */ -static void wakeme_after_rcu(void *completion) +static void wakeme_after_rcu(struct rcu_head *head) { - complete(completion); + struct rcu_synchronize *rcu; + + rcu = container_of(head, struct rcu_synchronize, head); + complete(&rcu->completion); } /** - * synchronize-kernel - wait until all the CPUs have gone - * through a "quiescent" state. It may sleep. + * synchronize_rcu - wait until a grace period has elapsed. + * + * Control will return to the caller some time after a full grace + * period has elapsed, in other words after all currently executing RCU + * read-side critical sections have completed. RCU read-side critical + * sections are delimited by rcu_read_lock() and rcu_read_unlock(), + * and may be nested. + * + * If your read-side code is not protected by rcu_read_lock(), do -not- + * use synchronize_rcu(). */ -void synchronize_kernel(void) +void synchronize_rcu(void) { - struct rcu_head rcu; - DECLARE_COMPLETION(completion); + struct rcu_synchronize rcu; + init_completion(&rcu.completion); /* Will wake me after RCU finished */ - call_rcu(&rcu, wakeme_after_rcu, &completion); + call_rcu(&rcu.head, wakeme_after_rcu); /* Wait for it */ - wait_for_completion(&completion); + wait_for_completion(&rcu.completion); } +/* + * Deprecated, use synchronize_rcu() or synchronize_sched() instead. + */ +void synchronize_kernel(void) +{ + synchronize_rcu(); +} -EXPORT_SYMBOL(call_rcu); -EXPORT_SYMBOL(synchronize_kernel); +module_param(blimit, int, 0); +module_param(qhimark, int, 0); +module_param(qlowmark, int, 0); +#ifdef CONFIG_SMP +module_param(rsinterval, int, 0); +#endif +EXPORT_SYMBOL_GPL(rcu_batches_completed); +EXPORT_SYMBOL_GPL_FUTURE(call_rcu); /* WARNING: GPL-only in April 2006. */ +EXPORT_SYMBOL_GPL_FUTURE(call_rcu_bh); /* WARNING: GPL-only in April 2006. */ +EXPORT_SYMBOL_GPL(synchronize_rcu); +EXPORT_SYMBOL_GPL_FUTURE(synchronize_kernel); /* WARNING: GPL-only in April 2006. */