X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=include%2Flinux%2Frcupdate.h;h=970284f571a6d522f60362c0f585ba7d837b1cb2;hb=43bc926fffd92024b46cafaf7350d669ba9ca884;hp=b12bdd9d8f6d144559769acbbe13c9f84ae9a33e;hpb=c7b5ebbddf7bcd3651947760f423e3783bbe6573;p=linux-2.6.git diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h index b12bdd9d8..970284f57 100644 --- a/include/linux/rcupdate.h +++ b/include/linux/rcupdate.h @@ -52,8 +52,8 @@ struct rcu_head { void (*func)(struct rcu_head *head); }; -#define RCU_HEAD_INIT(head) { .next = NULL, .func = NULL } -#define RCU_HEAD(head) struct rcu_head head = RCU_HEAD_INIT(head) +#define RCU_HEAD_INIT { .next = NULL, .func = NULL } +#define RCU_HEAD(head) struct rcu_head head = RCU_HEAD_INIT #define INIT_RCU_HEAD(ptr) do { \ (ptr)->next = NULL; (ptr)->func = NULL; \ } while (0) @@ -65,8 +65,11 @@ struct rcu_ctrlblk { long cur; /* Current batch number. */ long completed; /* Number of the last completed batch */ int next_pending; /* Is the next batch already waiting? */ - seqcount_t lock; /* For atomic reads of cur and next_pending. */ -} ____cacheline_maxaligned_in_smp; + + spinlock_t lock ____cacheline_internodealigned_in_smp; + cpumask_t cpumask; /* CPUs that need to switch in order */ + /* for current batch to proceed. */ +} ____cacheline_internodealigned_in_smp; /* Is batch a before batch b ? */ static inline int rcu_batch_before(long a, long b) @@ -88,78 +91,55 @@ static inline int rcu_batch_after(long a, long b) struct rcu_data { /* 1) quiescent state handling : */ long quiescbatch; /* Batch # for grace period */ - long qsctr; /* User-mode/idle loop etc. */ - long last_qsctr; /* value of qsctr at beginning */ - /* of rcu grace period */ + int passed_quiesc; /* User-mode/idle loop etc. */ int qs_pending; /* core waits for quiesc state */ /* 2) batch handling */ long batch; /* Batch # for current RCU batch */ struct rcu_head *nxtlist; struct rcu_head **nxttail; + long qlen; /* # of queued callbacks */ struct rcu_head *curlist; struct rcu_head **curtail; struct rcu_head *donelist; struct rcu_head **donetail; + long blimit; /* Upper limit on a processed batch */ int cpu; + struct rcu_head barrier; +#ifdef CONFIG_SMP + long last_rs_qlen; /* qlen during the last resched */ +#endif }; DECLARE_PER_CPU(struct rcu_data, rcu_data); DECLARE_PER_CPU(struct rcu_data, rcu_bh_data); -extern struct rcu_ctrlblk rcu_ctrlblk; -extern struct rcu_ctrlblk rcu_bh_ctrlblk; /* - * Increment the quiscent state counter. + * Increment the quiescent state counter. + * The counter is a bit degenerated: We do not need to know + * how many quiescent states passed, just if there was at least + * one since the start of the grace period. Thus just a flag. */ static inline void rcu_qsctr_inc(int cpu) { struct rcu_data *rdp = &per_cpu(rcu_data, cpu); - rdp->qsctr++; + rdp->passed_quiesc = 1; } static inline void rcu_bh_qsctr_inc(int cpu) { struct rcu_data *rdp = &per_cpu(rcu_bh_data, cpu); - rdp->qsctr++; + rdp->passed_quiesc = 1; } -static inline int __rcu_pending(struct rcu_ctrlblk *rcp, - struct rcu_data *rdp) -{ - /* This cpu has pending rcu entries and the grace period - * for them has completed. - */ - if (rdp->curlist && !rcu_batch_before(rcp->completed, rdp->batch)) - return 1; - - /* This cpu has no pending entries, but there are new entries */ - if (!rdp->curlist && rdp->nxtlist) - return 1; - - /* This cpu has finished callbacks to invoke */ - if (rdp->donelist) - return 1; - - /* The rcu core waits for a quiescent state from the cpu */ - if (rdp->quiescbatch != rcp->cur || rdp->qs_pending) - return 1; - - /* nothing to do */ - return 0; -} - -static inline int rcu_pending(int cpu) -{ - return __rcu_pending(&rcu_ctrlblk, &per_cpu(rcu_data, cpu)) || - __rcu_pending(&rcu_bh_ctrlblk, &per_cpu(rcu_bh_data, cpu)); -} +extern int rcu_pending(int cpu); +extern int rcu_needs_cpu(int cpu); /** * rcu_read_lock - mark the beginning of an RCU read-side critical section. * - * When synchronize_kernel() is invoked on one CPU while other CPUs + * When synchronize_rcu() is invoked on one CPU while other CPUs * are within RCU read-side critical sections, then the - * synchronize_kernel() is guaranteed to block until after all the other + * synchronize_rcu() is guaranteed to block until after all the other * CPUs exit their critical sections. Similarly, if call_rcu() is invoked * on one CPU while other CPUs are within RCU read-side critical * sections, invocation of the corresponding RCU callback is deferred @@ -238,16 +218,56 @@ static inline int rcu_pending(int cpu) (_________p1); \ }) +/** + * rcu_assign_pointer - assign (publicize) a pointer to a newly + * initialized structure that will be dereferenced by RCU read-side + * critical sections. Returns the value assigned. + * + * Inserts memory barriers on architectures that require them + * (pretty much all of them other than x86), and also prevents + * the compiler from reordering the code that initializes the + * structure after the pointer assignment. More importantly, this + * call documents which pointers will be dereferenced by RCU read-side + * code. + */ + +#define rcu_assign_pointer(p, v) ({ \ + smp_wmb(); \ + (p) = (v); \ + }) + +/** + * synchronize_sched - block until all CPUs have exited any non-preemptive + * kernel code sequences. + * + * This means that all preempt_disable code sequences, including NMI and + * hardware-interrupt handlers, in progress on entry will have completed + * before this primitive returns. However, this does not guarantee that + * softirq handlers will have completed, since in some kernels, these + * handlers can run in process context, and can block. + * + * This primitive provides the guarantees made by the (deprecated) + * synchronize_kernel() API. In contrast, synchronize_rcu() only + * guarantees that rcu_read_lock() sections will have completed. + * In "classic RCU", these two guarantees happen to be one and + * the same, but can differ in realtime RCU implementations. + */ +#define synchronize_sched() synchronize_rcu() + extern void rcu_init(void); extern void rcu_check_callbacks(int cpu, int user); extern void rcu_restart_cpu(int cpu); +extern long rcu_batches_completed(void); /* Exported interfaces */ extern void FASTCALL(call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *head))); extern void FASTCALL(call_rcu_bh(struct rcu_head *head, void (*func)(struct rcu_head *head))); -extern void synchronize_kernel(void); +extern __deprecated_for_modules void synchronize_kernel(void); +extern void synchronize_rcu(void); +void synchronize_idle(void); +extern void rcu_barrier(void); #endif /* __KERNEL__ */ #endif /* __LINUX_RCUPDATE_H */