vserver 1.9.5.x5
[linux-2.6.git] / kernel / rcupdate.c
index 1b16bfc..f0ae3c3 100644 (file)
@@ -38,7 +38,7 @@
 #include <linux/interrupt.h>
 #include <linux/sched.h>
 #include <asm/atomic.h>
-#include <asm/bitops.h>
+#include <linux/bitops.h>
 #include <linux/module.h>
 #include <linux/completion.h>
 #include <linux/moduleparam.h>
@@ -49,9 +49,9 @@
 
 /* Definition for rcupdate control block. */
 struct rcu_ctrlblk rcu_ctrlblk = 
-       { .cur = -300, .completed = -300 , .lock = SEQCNT_ZERO };
+       { .cur = -300, .completed = -300 };
 struct rcu_ctrlblk rcu_bh_ctrlblk =
-       { .cur = -300, .completed = -300 , .lock = SEQCNT_ZERO };
+       { .cur = -300, .completed = -300 };
 
 /* Bookkeeping of the progress of the grace period */
 struct rcu_state {
@@ -60,9 +60,9 @@ struct rcu_state {
                                      /* for current batch to proceed.        */
 };
 
-struct rcu_state rcu_state ____cacheline_maxaligned_in_smp =
+static struct rcu_state rcu_state ____cacheline_maxaligned_in_smp =
          {.lock = SPIN_LOCK_UNLOCKED, .cpumask = CPU_MASK_NONE };
-struct rcu_state rcu_bh_state ____cacheline_maxaligned_in_smp =
+static struct rcu_state rcu_bh_state ____cacheline_maxaligned_in_smp =
          {.lock = SPIN_LOCK_UNLOCKED, .cpumask = CPU_MASK_NONE };
 
 DEFINE_PER_CPU(struct rcu_data, rcu_data) = { 0L };
@@ -185,10 +185,13 @@ static void rcu_start_batch(struct rcu_ctrlblk *rcp, struct rcu_state *rsp,
                        rcp->completed == rcp->cur) {
                /* Can't change, since spin lock held. */
                cpus_andnot(rsp->cpumask, cpu_online_map, nohz_cpu_mask);
-               write_seqcount_begin(&rcp->lock);
+
                rcp->next_pending = 0;
+               /* next_pending == 0 must be visible in __rcu_process_callbacks()
+                * before it can see new value of cur.
+                */
+               smp_wmb();
                rcp->cur++;
-               write_seqcount_end(&rcp->lock);
        }
 }
 
@@ -216,9 +219,9 @@ static void rcu_check_quiescent_state(struct rcu_ctrlblk *rcp,
                        struct rcu_state *rsp, struct rcu_data *rdp)
 {
        if (rdp->quiescbatch != rcp->cur) {
-               /* new grace period: record qsctr value. */
+               /* start new grace period: */
                rdp->qs_pending = 1;
-               rdp->last_qsctr = rdp->qsctr;
+               rdp->passed_quiesc = 0;
                rdp->quiescbatch = rcp->cur;
                return;
        }
@@ -231,11 +234,10 @@ static void rcu_check_quiescent_state(struct rcu_ctrlblk *rcp,
                return;
 
        /* 
-        * Races with local timer interrupt - in the worst case
-        * we may miss one quiescent state of that CPU. That is
-        * tolerable. So no need to disable interrupts.
+        * Was there a quiescent state since the beginning of the grace
+        * period? If no, then exit and wait for the next call.
         */
-       if (rdp->qsctr == rdp->last_qsctr)
+       if (!rdp->passed_quiesc)
                return;
        rdp->qs_pending = 0;
 
@@ -319,8 +321,6 @@ static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp,
 
        local_irq_disable();
        if (rdp->nxtlist && !rdp->curlist) {
-               int next_pending, seq;
-
                rdp->curlist = rdp->nxtlist;
                rdp->curtail = rdp->nxttail;
                rdp->nxtlist = NULL;
@@ -330,14 +330,15 @@ static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp,
                /*
                 * start the next batch of callbacks
                 */
-               do {
-                       seq = read_seqcount_begin(&rcp->lock);
-                       /* determine batch number */
-                       rdp->batch = rcp->cur + 1;
-                       next_pending = rcp->next_pending;
-               } while (read_seqcount_retry(&rcp->lock, seq));
-
-               if (!next_pending) {
+
+               /* determine batch number */
+               rdp->batch = rcp->cur + 1;
+               /* see the comment and corresponding wmb() in
+                * the rcu_start_batch()
+                */
+               smp_rmb();
+
+               if (!rcp->next_pending) {
                        /* and start it/schedule start if it's a new batch */
                        spin_lock(&rsp->lock);
                        rcu_start_batch(rcp, rsp, 1);