2 * Read-Copy Update mechanism for mutual exclusion
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
18 * Copyright (C) IBM Corporation, 2001
20 * Author: Dipankar Sarma <dipankar@in.ibm.com>
22 * Based on the original work by Paul McKenney <paul.mckenney@us.ibm.com>
23 * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen.
25 * http://www.rdrop.com/users/paulmck/paper/rclockpdcsproof.pdf
26 * http://lse.sourceforge.net/locking/rclock_OLS.2001.05.01c.sc.pdf (OLS2001)
28 * For detailed explanation of Read-Copy Update mechanism see -
29 * http://lse.sourceforge.net/locking/rcupdate.html
32 #include <linux/types.h>
33 #include <linux/kernel.h>
34 #include <linux/init.h>
35 #include <linux/spinlock.h>
36 #include <linux/smp.h>
37 #include <linux/interrupt.h>
38 #include <linux/sched.h>
39 #include <asm/atomic.h>
40 #include <asm/bitops.h>
41 #include <linux/module.h>
42 #include <linux/completion.h>
43 #include <linux/percpu.h>
44 #include <linux/notifier.h>
45 #include <linux/rcupdate.h>
46 #include <linux/cpu.h>
48 /* Definition for rcupdate control block. */
49 struct rcu_ctrlblk rcu_ctrlblk =
50 { .cur = -300, .completed = -300 , .lock = SEQCNT_ZERO };
52 /* Bookkeeping of the progress of the grace period */
54 spinlock_t mutex; /* Guard this struct and writes to rcu_ctrlblk */
55 cpumask_t rcu_cpu_mask; /* CPUs that need to switch in order */
56 /* for current batch to proceed. */
57 } rcu_state ____cacheline_maxaligned_in_smp =
58 {.mutex = SPIN_LOCK_UNLOCKED, .rcu_cpu_mask = CPU_MASK_NONE };
61 DEFINE_PER_CPU(struct rcu_data, rcu_data) = { 0L };
63 /* Fake initialization required by compiler */
64 static DEFINE_PER_CPU(struct tasklet_struct, rcu_tasklet) = {NULL};
65 #define RCU_tasklet(cpu) (per_cpu(rcu_tasklet, cpu))
68 * call_rcu - Queue an RCU update request.
69 * @head: structure to be used for queueing the RCU updates.
70 * @func: actual update function to be invoked after the grace period
72 * The update function will be invoked as soon as all CPUs have performed
73 * a context switch or been seen in the idle loop or in a user process.
74 * The read-side of critical section that use call_rcu() for updation must
75 * be protected by rcu_read_lock()/rcu_read_unlock().
77 void fastcall call_rcu(struct rcu_head *head,
78 void (*func)(struct rcu_head *rcu))
85 local_irq_save(flags);
86 cpu = smp_processor_id();
87 *RCU_nxttail(cpu) = head;
88 RCU_nxttail(cpu) = &head->next;
89 local_irq_restore(flags);
93 * Invoke the completed RCU callbacks. They are expected to be in
96 static void rcu_do_batch(struct rcu_head *list)
98 struct rcu_head *next;
108 * Grace period handling:
109 * The grace period handling consists out of two steps:
110 * - A new grace period is started.
111 * This is done by rcu_start_batch. The start is not broadcasted to
112 * all cpus, they must pick this up by comparing rcu_ctrlblk.cur with
113 * RCU_quiescbatch(cpu). All cpus are recorded in the
114 * rcu_state.rcu_cpu_mask bitmap.
115 * - All cpus must go through a quiescent state.
116 * Since the start of the grace period is not broadcasted, at least two
117 * calls to rcu_check_quiescent_state are required:
118 * The first call just notices that a new grace period is running. The
119 * following calls check if there was a quiescent state since the beginning
120 * of the grace period. If so, it updates rcu_state.rcu_cpu_mask. If
121 * the bitmap is empty, then the grace period is completed.
122 * rcu_check_quiescent_state calls rcu_start_batch(0) to start the next grace
123 * period (if necessary).
126 * Register a new batch of callbacks, and start it up if there is currently no
127 * active batch and the batch to be registered has not already occurred.
128 * Caller must hold rcu_state.mutex.
130 static void rcu_start_batch(int next_pending)
133 rcu_ctrlblk.next_pending = 1;
135 if (rcu_ctrlblk.next_pending &&
136 rcu_ctrlblk.completed == rcu_ctrlblk.cur) {
137 /* Can't change, since spin lock held. */
138 cpus_andnot(rcu_state.rcu_cpu_mask, cpu_online_map,
140 write_seqcount_begin(&rcu_ctrlblk.lock);
141 rcu_ctrlblk.next_pending = 0;
143 write_seqcount_end(&rcu_ctrlblk.lock);
148 * cpu went through a quiescent state since the beginning of the grace period.
149 * Clear it from the cpu mask and complete the grace period if it was the last
150 * cpu. Start another grace period if someone has further entries pending
152 static void cpu_quiet(int cpu)
154 cpu_clear(cpu, rcu_state.rcu_cpu_mask);
155 if (cpus_empty(rcu_state.rcu_cpu_mask)) {
156 /* batch completed ! */
157 rcu_ctrlblk.completed = rcu_ctrlblk.cur;
163 * Check if the cpu has gone through a quiescent state (say context
164 * switch). If so and if it already hasn't done so in this RCU
165 * quiescent cycle, then indicate that it has done so.
167 static void rcu_check_quiescent_state(void)
169 int cpu = smp_processor_id();
171 if (RCU_quiescbatch(cpu) != rcu_ctrlblk.cur) {
172 /* new grace period: record qsctr value. */
173 RCU_qs_pending(cpu) = 1;
174 RCU_last_qsctr(cpu) = RCU_qsctr(cpu);
175 RCU_quiescbatch(cpu) = rcu_ctrlblk.cur;
179 /* Grace period already completed for this cpu?
180 * qs_pending is checked instead of the actual bitmap to avoid
181 * cacheline trashing.
183 if (!RCU_qs_pending(cpu))
187 * Races with local timer interrupt - in the worst case
188 * we may miss one quiescent state of that CPU. That is
189 * tolerable. So no need to disable interrupts.
191 if (RCU_qsctr(cpu) == RCU_last_qsctr(cpu))
193 RCU_qs_pending(cpu) = 0;
195 spin_lock(&rcu_state.mutex);
197 * RCU_quiescbatch/batch.cur and the cpu bitmap can come out of sync
198 * during cpu startup. Ignore the quiescent state.
200 if (likely(RCU_quiescbatch(cpu) == rcu_ctrlblk.cur))
203 spin_unlock(&rcu_state.mutex);
207 #ifdef CONFIG_HOTPLUG_CPU
209 /* warning! helper for rcu_offline_cpu. do not use elsewhere without reviewing
210 * locking requirements, the list it's pulling from has to belong to a cpu
211 * which is dead and hence not processing interrupts.
213 static void rcu_move_batch(struct list_head *list)
215 struct list_head *entry;
216 int cpu = smp_processor_id();
219 while (!list_empty(list)) {
222 list_add_tail(entry, &RCU_nxtlist(cpu));
227 static void rcu_offline_cpu(int cpu)
229 /* if the cpu going offline owns the grace period
230 * we can block indefinitely waiting for it, so flush
233 spin_lock_bh(&rcu_state.mutex);
234 if (rcu_ctrlblk.cur != rcu_ctrlblk.completed)
237 spin_unlock_bh(&rcu_state.mutex);
239 rcu_move_batch(&RCU_curlist(cpu));
240 rcu_move_batch(&RCU_nxtlist(cpu));
242 tasklet_kill_immediate(&RCU_tasklet(cpu), cpu);
247 void rcu_restart_cpu(int cpu)
249 spin_lock_bh(&rcu_state.mutex);
250 RCU_quiescbatch(cpu) = rcu_ctrlblk.completed;
251 RCU_qs_pending(cpu) = 0;
252 spin_unlock_bh(&rcu_state.mutex);
256 * This does the RCU processing work from tasklet context.
258 static void rcu_process_callbacks(unsigned long unused)
260 int cpu = smp_processor_id();
261 struct rcu_head *rcu_list = NULL;
263 if (RCU_curlist(cpu) &&
264 !rcu_batch_before(rcu_ctrlblk.completed, RCU_batch(cpu))) {
265 rcu_list = RCU_curlist(cpu);
266 RCU_curlist(cpu) = NULL;
270 if (RCU_nxtlist(cpu) && !RCU_curlist(cpu)) {
271 int next_pending, seq;
273 RCU_curlist(cpu) = RCU_nxtlist(cpu);
274 RCU_nxtlist(cpu) = NULL;
275 RCU_nxttail(cpu) = &RCU_nxtlist(cpu);
279 * start the next batch of callbacks
282 seq = read_seqcount_begin(&rcu_ctrlblk.lock);
283 /* determine batch number */
284 RCU_batch(cpu) = rcu_ctrlblk.cur + 1;
285 next_pending = rcu_ctrlblk.next_pending;
286 } while (read_seqcount_retry(&rcu_ctrlblk.lock, seq));
289 /* and start it/schedule start if it's a new batch */
290 spin_lock(&rcu_state.mutex);
292 spin_unlock(&rcu_state.mutex);
297 rcu_check_quiescent_state();
299 rcu_do_batch(rcu_list);
302 void rcu_check_callbacks(int cpu, int user)
305 (idle_cpu(cpu) && !in_softirq() &&
306 hardirq_count() <= (1 << HARDIRQ_SHIFT)))
308 tasklet_schedule(&RCU_tasklet(cpu));
311 static void __devinit rcu_online_cpu(int cpu)
313 memset(&per_cpu(rcu_data, cpu), 0, sizeof(struct rcu_data));
314 tasklet_init(&RCU_tasklet(cpu), rcu_process_callbacks, 0UL);
315 RCU_nxttail(cpu) = &RCU_nxtlist(cpu);
316 RCU_quiescbatch(cpu) = rcu_ctrlblk.completed;
317 RCU_qs_pending(cpu) = 0;
320 static int __devinit rcu_cpu_notify(struct notifier_block *self,
321 unsigned long action, void *hcpu)
323 long cpu = (long)hcpu;
328 #ifdef CONFIG_HOTPLUG_CPU
330 rcu_offline_cpu(cpu);
339 static struct notifier_block __devinitdata rcu_nb = {
340 .notifier_call = rcu_cpu_notify,
344 * Initializes rcu mechanism. Assumed to be called early.
345 * That is before local timer(SMP) or jiffie timer (uniproc) is setup.
346 * Note that rcu_qsctr and friends are implicitly
347 * initialized due to the choice of ``0'' for RCU_CTR_INVALID.
349 void __init rcu_init(void)
351 rcu_cpu_notify(&rcu_nb, CPU_UP_PREPARE,
352 (void *)(long)smp_processor_id());
353 /* Register notifier for non-boot CPUs */
354 register_cpu_notifier(&rcu_nb);
357 struct rcu_synchronize {
358 struct rcu_head head;
359 struct completion completion;
362 /* Because of FASTCALL declaration of complete, we use this wrapper */
363 static void wakeme_after_rcu(struct rcu_head *head)
365 struct rcu_synchronize *rcu;
367 rcu = container_of(head, struct rcu_synchronize, head);
368 complete(&rcu->completion);
372 * synchronize-kernel - wait until all the CPUs have gone
373 * through a "quiescent" state. It may sleep.
375 void synchronize_kernel(void)
377 struct rcu_synchronize rcu;
379 init_completion(&rcu.completion);
380 /* Will wake me after RCU finished */
381 call_rcu(&rcu.head, wakeme_after_rcu);
384 wait_for_completion(&rcu.completion);
388 EXPORT_SYMBOL(call_rcu);
389 EXPORT_SYMBOL(synchronize_kernel);