VServer 1.9.2 (patch-2.6.8.1-vs1.9.2.diff)
[linux-2.6.git] / lib / rwsem-spinlock.c
index a71152d..96255f4 100644 (file)
@@ -1,5 +1,5 @@
-/* rwsem-spinlock.c: R/W semaphores: contention handling functions for generic spinlock
- *                                   implementation
+/* rwsem-spinlock.c: R/W semaphores: contention handling functions for
+ * generic spinlock implementation
  *
  * Copyright (c) 2001   David Howells (dhowells@redhat.com).
  * - Derived partially from idea by Andrea Arcangeli <andrea@suse.de>
@@ -10,9 +10,9 @@
 #include <linux/module.h>
 
 struct rwsem_waiter {
-       struct list_head        list;
-       struct task_struct      *task;
-       unsigned int            flags;
+       struct list_head list;
+       struct task_struct *task;
+       unsigned int flags;
 #define RWSEM_WAITING_FOR_READ 0x00000001
 #define RWSEM_WAITING_FOR_WRITE        0x00000002
 };
@@ -22,7 +22,8 @@ void rwsemtrace(struct rw_semaphore *sem, const char *str)
 {
        if (sem->debug)
                printk("[%d] %s({%d,%d})\n",
-                      current->pid,str,sem->activity,list_empty(&sem->wait_list)?0:1);
+                      current->pid, str, sem->activity,
+                      list_empty(&sem->wait_list) ? 0 : 1);
 }
 #endif
 
@@ -40,7 +41,7 @@ void fastcall init_rwsem(struct rw_semaphore *sem)
 }
 
 /*
- * handle the lock being released whilst there are processes blocked on it that can now run
+ * handle the lock release when processes blocked on it that can now run
  * - if we come here, then:
  *   - the 'active count' _reached_ zero
  *   - the 'waiting count' is non-zero
@@ -48,15 +49,16 @@ void fastcall init_rwsem(struct rw_semaphore *sem)
  * - woken process blocks are discarded from the list after having task zeroed
  * - writers are only woken if wakewrite is non-zero
  */
-static inline struct rw_semaphore *__rwsem_do_wake(struct rw_semaphore *sem, int wakewrite)
+static inline struct rw_semaphore *
+__rwsem_do_wake(struct rw_semaphore *sem, int wakewrite)
 {
        struct rwsem_waiter *waiter;
        struct task_struct *tsk;
        int woken;
 
-       rwsemtrace(sem,"Entering __rwsem_do_wake");
+       rwsemtrace(sem, "Entering __rwsem_do_wake");
 
-       waiter = list_entry(sem->wait_list.next,struct rwsem_waiter,list);
+       waiter = list_entry(sem->wait_list.next, struct rwsem_waiter, list);
 
        if (!wakewrite) {
                if (waiter->flags & RWSEM_WAITING_FOR_WRITE)
@@ -64,14 +66,16 @@ static inline struct rw_semaphore *__rwsem_do_wake(struct rw_semaphore *sem, int
                goto dont_wake_writers;
        }
 
-       /* if we are allowed to wake writers try to grant a single write lock if there's a
-        * writer at the front of the queue
-        * - we leave the 'waiting count' incremented to signify potential contention
+       /* if we are allowed to wake writers try to grant a single write lock
+        * if there's a writer at the front of the queue
+        * - we leave the 'waiting count' incremented to signify potential
+        *   contention
         */
        if (waiter->flags & RWSEM_WAITING_FOR_WRITE) {
                sem->activity = -1;
                list_del(&waiter->list);
                tsk = waiter->task;
+               /* Don't touch waiter after ->task has been NULLed */
                mb();
                waiter->task = NULL;
                wake_up_process(tsk);
@@ -79,10 +83,10 @@ static inline struct rw_semaphore *__rwsem_do_wake(struct rw_semaphore *sem, int
                goto out;
        }
 
-       /* grant an infinite number of read locks to the readers at the front of the queue */
+       /* grant an infinite number of read locks to the front of the queue */
  dont_wake_writers:
        woken = 0;
-       while (waiter->flags&RWSEM_WAITING_FOR_READ) {
+       while (waiter->flags & RWSEM_WAITING_FOR_READ) {
                struct list_head *next = waiter->list.next;
 
                list_del(&waiter->list);
@@ -94,27 +98,28 @@ static inline struct rw_semaphore *__rwsem_do_wake(struct rw_semaphore *sem, int
                woken++;
                if (list_empty(&sem->wait_list))
                        break;
-               waiter = list_entry(next,struct rwsem_waiter,list);
+               waiter = list_entry(next, struct rwsem_waiter, list);
        }
 
        sem->activity += woken;
 
  out:
-       rwsemtrace(sem,"Leaving __rwsem_do_wake");
+       rwsemtrace(sem, "Leaving __rwsem_do_wake");
        return sem;
 }
 
 /*
  * wake a single writer
  */
-static inline struct rw_semaphore *__rwsem_wake_one_writer(struct rw_semaphore *sem)
+static inline struct rw_semaphore *
+__rwsem_wake_one_writer(struct rw_semaphore *sem)
 {
        struct rwsem_waiter *waiter;
        struct task_struct *tsk;
 
        sem->activity = -1;
 
-       waiter = list_entry(sem->wait_list.next,struct rwsem_waiter,list);
+       waiter = list_entry(sem->wait_list.next, struct rwsem_waiter, list);
        list_del(&waiter->list);
 
        tsk = waiter->task;
@@ -128,16 +133,16 @@ static inline struct rw_semaphore *__rwsem_wake_one_writer(struct rw_semaphore *
 /*
  * get a read lock on the semaphore
  */
-void fastcall __down_read(struct rw_semaphore *sem)
+void fastcall __sched __down_read(struct rw_semaphore *sem)
 {
        struct rwsem_waiter waiter;
        struct task_struct *tsk;
 
-       rwsemtrace(sem,"Entering __down_read");
+       rwsemtrace(sem, "Entering __down_read");
 
        spin_lock(&sem->wait_lock);
 
-       if (sem->activity>=0 && list_empty(&sem->wait_list)) {
+       if (sem->activity >= 0 && list_empty(&sem->wait_list)) {
                /* granted */
                sem->activity++;
                spin_unlock(&sem->wait_lock);
@@ -145,14 +150,14 @@ void fastcall __down_read(struct rw_semaphore *sem)
        }
 
        tsk = current;
-       set_task_state(tsk,TASK_UNINTERRUPTIBLE);
+       set_task_state(tsk, TASK_UNINTERRUPTIBLE);
 
        /* set up my own style of waitqueue */
        waiter.task = tsk;
        waiter.flags = RWSEM_WAITING_FOR_READ;
        get_task_struct(tsk);
 
-       list_add_tail(&waiter.list,&sem->wait_list);
+       list_add_tail(&waiter.list, &sem->wait_list);
 
        /* we don't need to touch the semaphore struct anymore */
        spin_unlock(&sem->wait_lock);
@@ -168,7 +173,7 @@ void fastcall __down_read(struct rw_semaphore *sem)
        tsk->state = TASK_RUNNING;
 
  out:
-       rwsemtrace(sem,"Leaving __down_read");
+       rwsemtrace(sem, "Leaving __down_read");
 }
 
 /*
@@ -177,11 +182,11 @@ void fastcall __down_read(struct rw_semaphore *sem)
 int fastcall __down_read_trylock(struct rw_semaphore *sem)
 {
        int ret = 0;
-       rwsemtrace(sem,"Entering __down_read_trylock");
+       rwsemtrace(sem, "Entering __down_read_trylock");
 
        spin_lock(&sem->wait_lock);
 
-       if (sem->activity>=0 && list_empty(&sem->wait_list)) {
+       if (sem->activity >= 0 && list_empty(&sem->wait_list)) {
                /* granted */
                sem->activity++;
                ret = 1;
@@ -189,24 +194,24 @@ int fastcall __down_read_trylock(struct rw_semaphore *sem)
 
        spin_unlock(&sem->wait_lock);
 
-       rwsemtrace(sem,"Leaving __down_read_trylock");
+       rwsemtrace(sem, "Leaving __down_read_trylock");
        return ret;
 }
 
 /*
  * get a write lock on the semaphore
- * - note that we increment the waiting count anyway to indicate an exclusive lock
+ * - we increment the waiting count anyway to indicate an exclusive lock
  */
-void fastcall __down_write(struct rw_semaphore *sem)
+void fastcall __sched __down_write(struct rw_semaphore *sem)
 {
        struct rwsem_waiter waiter;
        struct task_struct *tsk;
 
-       rwsemtrace(sem,"Entering __down_write");
+       rwsemtrace(sem, "Entering __down_write");
 
        spin_lock(&sem->wait_lock);
 
-       if (sem->activity==0 && list_empty(&sem->wait_list)) {
+       if (sem->activity == 0 && list_empty(&sem->wait_list)) {
                /* granted */
                sem->activity = -1;
                spin_unlock(&sem->wait_lock);
@@ -214,14 +219,14 @@ void fastcall __down_write(struct rw_semaphore *sem)
        }
 
        tsk = current;
-       set_task_state(tsk,TASK_UNINTERRUPTIBLE);
+       set_task_state(tsk, TASK_UNINTERRUPTIBLE);
 
        /* set up my own style of waitqueue */
        waiter.task = tsk;
        waiter.flags = RWSEM_WAITING_FOR_WRITE;
        get_task_struct(tsk);
 
-       list_add_tail(&waiter.list,&sem->wait_list);
+       list_add_tail(&waiter.list, &sem->wait_list);
 
        /* we don't need to touch the semaphore struct anymore */
        spin_unlock(&sem->wait_lock);
@@ -237,7 +242,7 @@ void fastcall __down_write(struct rw_semaphore *sem)
        tsk->state = TASK_RUNNING;
 
  out:
-       rwsemtrace(sem,"Leaving __down_write");
+       rwsemtrace(sem, "Leaving __down_write");
 }
 
 /*
@@ -246,11 +251,11 @@ void fastcall __down_write(struct rw_semaphore *sem)
 int fastcall __down_write_trylock(struct rw_semaphore *sem)
 {
        int ret = 0;
-       rwsemtrace(sem,"Entering __down_write_trylock");
+       rwsemtrace(sem, "Entering __down_write_trylock");
 
        spin_lock(&sem->wait_lock);
 
-       if (sem->activity==0 && list_empty(&sem->wait_list)) {
+       if (sem->activity == 0 && list_empty(&sem->wait_list)) {
                /* granted */
                sem->activity = -1;
                ret = 1;
@@ -258,7 +263,7 @@ int fastcall __down_write_trylock(struct rw_semaphore *sem)
 
        spin_unlock(&sem->wait_lock);
 
-       rwsemtrace(sem,"Leaving __down_write_trylock");
+       rwsemtrace(sem, "Leaving __down_write_trylock");
        return ret;
 }
 
@@ -267,16 +272,16 @@ int fastcall __down_write_trylock(struct rw_semaphore *sem)
  */
 void fastcall __up_read(struct rw_semaphore *sem)
 {
-       rwsemtrace(sem,"Entering __up_read");
+       rwsemtrace(sem, "Entering __up_read");
 
        spin_lock(&sem->wait_lock);
 
-       if (--sem->activity==0 && !list_empty(&sem->wait_list))
+       if (--sem->activity == 0 && !list_empty(&sem->wait_list))
                sem = __rwsem_wake_one_writer(sem);
 
        spin_unlock(&sem->wait_lock);
 
-       rwsemtrace(sem,"Leaving __up_read");
+       rwsemtrace(sem, "Leaving __up_read");
 }
 
 /*
@@ -284,7 +289,7 @@ void fastcall __up_read(struct rw_semaphore *sem)
  */
 void fastcall __up_write(struct rw_semaphore *sem)
 {
-       rwsemtrace(sem,"Entering __up_write");
+       rwsemtrace(sem, "Entering __up_write");
 
        spin_lock(&sem->wait_lock);
 
@@ -294,7 +299,7 @@ void fastcall __up_write(struct rw_semaphore *sem)
 
        spin_unlock(&sem->wait_lock);
 
-       rwsemtrace(sem,"Leaving __up_write");
+       rwsemtrace(sem, "Leaving __up_write");
 }
 
 /*
@@ -303,17 +308,17 @@ void fastcall __up_write(struct rw_semaphore *sem)
  */
 void fastcall __downgrade_write(struct rw_semaphore *sem)
 {
-       rwsemtrace(sem,"Entering __downgrade_write");
+       rwsemtrace(sem, "Entering __downgrade_write");
 
        spin_lock(&sem->wait_lock);
 
        sem->activity = 1;
        if (!list_empty(&sem->wait_list))
-               sem = __rwsem_do_wake(sem,0);
+               sem = __rwsem_do_wake(sem, 0);
 
        spin_unlock(&sem->wait_lock);
 
-       rwsemtrace(sem,"Leaving __downgrade_write");
+       rwsemtrace(sem, "Leaving __downgrade_write");
 }
 
 EXPORT_SYMBOL(init_rwsem);