git://git.onelab.eu
/
linux-2.6.git
/ blobdiff
commit
grep
author
committer
pickaxe
?
search:
re
summary
|
shortlog
|
log
|
commit
|
commitdiff
|
tree
raw
|
inline
| side by side
Merge to Fedora kernel-2.6.18-1.2255_FC5-vs2.0.2.2-rc9 patched with stable patch...
[linux-2.6.git]
/
lib
/
rwsem-spinlock.c
diff --git
a/lib/rwsem-spinlock.c
b/lib/rwsem-spinlock.c
index
a71152d
..
c4cfd6c
100644
(file)
--- a/
lib/rwsem-spinlock.c
+++ b/
lib/rwsem-spinlock.c
@@
-1,5
+1,5
@@
-/* rwsem-spinlock.c: R/W semaphores: contention handling functions for
generic spinlock
- *
implementation
+/* rwsem-spinlock.c: R/W semaphores: contention handling functions for
+ *
generic spinlock
implementation
*
* Copyright (c) 2001 David Howells (dhowells@redhat.com).
* - Derived partially from idea by Andrea Arcangeli <andrea@suse.de>
*
* Copyright (c) 2001 David Howells (dhowells@redhat.com).
* - Derived partially from idea by Andrea Arcangeli <andrea@suse.de>
@@
-10,37
+10,33
@@
#include <linux/module.h>
struct rwsem_waiter {
#include <linux/module.h>
struct rwsem_waiter {
- struct list_head
list;
- struct task_struct
*task;
- unsigned int
flags;
+ struct list_head list;
+ struct task_struct *task;
+ unsigned int flags;
#define RWSEM_WAITING_FOR_READ 0x00000001
#define RWSEM_WAITING_FOR_WRITE 0x00000002
};
#define RWSEM_WAITING_FOR_READ 0x00000001
#define RWSEM_WAITING_FOR_WRITE 0x00000002
};
-#if RWSEM_DEBUG
-void rwsemtrace(struct rw_semaphore *sem, const char *str)
-{
- if (sem->debug)
- printk("[%d] %s({%d,%d})\n",
- current->pid,str,sem->activity,list_empty(&sem->wait_list)?0:1);
-}
-#endif
-
/*
* initialise the semaphore
*/
/*
* initialise the semaphore
*/
-void fastcall init_rwsem(struct rw_semaphore *sem)
+void __init_rwsem(struct rw_semaphore *sem, const char *name,
+ struct lock_class_key *key)
{
{
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+ /*
+ * Make sure we are not reinitializing a held semaphore:
+ */
+ debug_check_no_locks_freed((void *)sem, sizeof(*sem));
+ lockdep_init_map(&sem->dep_map, name, key, 0);
+#endif
sem->activity = 0;
spin_lock_init(&sem->wait_lock);
INIT_LIST_HEAD(&sem->wait_list);
sem->activity = 0;
spin_lock_init(&sem->wait_lock);
INIT_LIST_HEAD(&sem->wait_list);
-#if RWSEM_DEBUG
- sem->debug = 0;
-#endif
}
/*
}
/*
- * handle the lock
being released whilst there are
processes blocked on it that can now run
+ * handle the lock
release when
processes blocked on it that can now run
* - if we come here, then:
* - the 'active count' _reached_ zero
* - the 'waiting count' is non-zero
* - if we come here, then:
* - the 'active count' _reached_ zero
* - the 'waiting count' is non-zero
@@
-48,15
+44,14
@@
void fastcall init_rwsem(struct rw_semaphore *sem)
* - woken process blocks are discarded from the list after having task zeroed
* - writers are only woken if wakewrite is non-zero
*/
* - woken process blocks are discarded from the list after having task zeroed
* - writers are only woken if wakewrite is non-zero
*/
-static inline struct rw_semaphore *__rwsem_do_wake(struct rw_semaphore *sem, int wakewrite)
+static inline struct rw_semaphore *
+__rwsem_do_wake(struct rw_semaphore *sem, int wakewrite)
{
struct rwsem_waiter *waiter;
struct task_struct *tsk;
int woken;
{
struct rwsem_waiter *waiter;
struct task_struct *tsk;
int woken;
- rwsemtrace(sem,"Entering __rwsem_do_wake");
-
- waiter = list_entry(sem->wait_list.next,struct rwsem_waiter,list);
+ waiter = list_entry(sem->wait_list.next, struct rwsem_waiter, list);
if (!wakewrite) {
if (waiter->flags & RWSEM_WAITING_FOR_WRITE)
if (!wakewrite) {
if (waiter->flags & RWSEM_WAITING_FOR_WRITE)
@@
-64,61
+59,63
@@
static inline struct rw_semaphore *__rwsem_do_wake(struct rw_semaphore *sem, int
goto dont_wake_writers;
}
goto dont_wake_writers;
}
- /* if we are allowed to wake writers try to grant a single write lock if there's a
- * writer at the front of the queue
- * - we leave the 'waiting count' incremented to signify potential contention
+ /* if we are allowed to wake writers try to grant a single write lock
+ * if there's a writer at the front of the queue
+ * - we leave the 'waiting count' incremented to signify potential
+ * contention
*/
if (waiter->flags & RWSEM_WAITING_FOR_WRITE) {
sem->activity = -1;
list_del(&waiter->list);
tsk = waiter->task;
*/
if (waiter->flags & RWSEM_WAITING_FOR_WRITE) {
sem->activity = -1;
list_del(&waiter->list);
tsk = waiter->task;
- mb();
+ /* Don't touch waiter after ->task has been NULLed */
+ smp_mb();
waiter->task = NULL;
wake_up_process(tsk);
put_task_struct(tsk);
goto out;
}
waiter->task = NULL;
wake_up_process(tsk);
put_task_struct(tsk);
goto out;
}
- /* grant an infinite number of read locks to the
readers at the
front of the queue */
+ /* grant an infinite number of read locks to the front of the queue */
dont_wake_writers:
woken = 0;
dont_wake_writers:
woken = 0;
- while (waiter->flags
&
RWSEM_WAITING_FOR_READ) {
+ while (waiter->flags
&
RWSEM_WAITING_FOR_READ) {
struct list_head *next = waiter->list.next;
list_del(&waiter->list);
tsk = waiter->task;
struct list_head *next = waiter->list.next;
list_del(&waiter->list);
tsk = waiter->task;
- mb();
+
smp_
mb();
waiter->task = NULL;
wake_up_process(tsk);
put_task_struct(tsk);
woken++;
if (list_empty(&sem->wait_list))
break;
waiter->task = NULL;
wake_up_process(tsk);
put_task_struct(tsk);
woken++;
if (list_empty(&sem->wait_list))
break;
- waiter = list_entry(next,
struct rwsem_waiter,
list);
+ waiter = list_entry(next,
struct rwsem_waiter,
list);
}
sem->activity += woken;
out:
}
sem->activity += woken;
out:
- rwsemtrace(sem,"Leaving __rwsem_do_wake");
return sem;
}
/*
* wake a single writer
*/
return sem;
}
/*
* wake a single writer
*/
-static inline struct rw_semaphore *__rwsem_wake_one_writer(struct rw_semaphore *sem)
+static inline struct rw_semaphore *
+__rwsem_wake_one_writer(struct rw_semaphore *sem)
{
struct rwsem_waiter *waiter;
struct task_struct *tsk;
sem->activity = -1;
{
struct rwsem_waiter *waiter;
struct task_struct *tsk;
sem->activity = -1;
- waiter = list_entry(sem->wait_list.next,
struct rwsem_waiter,
list);
+ waiter = list_entry(sem->wait_list.next,
struct rwsem_waiter,
list);
list_del(&waiter->list);
tsk = waiter->task;
list_del(&waiter->list);
tsk = waiter->task;
- mb();
+
smp_
mb();
waiter->task = NULL;
wake_up_process(tsk);
put_task_struct(tsk);
waiter->task = NULL;
wake_up_process(tsk);
put_task_struct(tsk);
@@
-128,34
+125,32
@@
static inline struct rw_semaphore *__rwsem_wake_one_writer(struct rw_semaphore *
/*
* get a read lock on the semaphore
*/
/*
* get a read lock on the semaphore
*/
-void fastcall __down_read(struct rw_semaphore *sem)
+void fastcall __
sched __
down_read(struct rw_semaphore *sem)
{
struct rwsem_waiter waiter;
struct task_struct *tsk;
{
struct rwsem_waiter waiter;
struct task_struct *tsk;
-
rwsemtrace(sem,"Entering __down_read"
);
+
spin_lock_irq(&sem->wait_lock
);
- spin_lock(&sem->wait_lock);
-
- if (sem->activity>=0 && list_empty(&sem->wait_list)) {
+ if (sem->activity >= 0 && list_empty(&sem->wait_list)) {
/* granted */
sem->activity++;
/* granted */
sem->activity++;
- spin_unlock(&sem->wait_lock);
+ spin_unlock
_irq
(&sem->wait_lock);
goto out;
}
tsk = current;
goto out;
}
tsk = current;
- set_task_state(tsk,TASK_UNINTERRUPTIBLE);
+ set_task_state(tsk,
TASK_UNINTERRUPTIBLE);
/* set up my own style of waitqueue */
waiter.task = tsk;
waiter.flags = RWSEM_WAITING_FOR_READ;
get_task_struct(tsk);
/* set up my own style of waitqueue */
waiter.task = tsk;
waiter.flags = RWSEM_WAITING_FOR_READ;
get_task_struct(tsk);
- list_add_tail(&waiter.list,&sem->wait_list);
+ list_add_tail(&waiter.list,
&sem->wait_list);
/* we don't need to touch the semaphore struct anymore */
/* we don't need to touch the semaphore struct anymore */
- spin_unlock(&sem->wait_lock);
+ spin_unlock
_irq
(&sem->wait_lock);
/* wait to be given the lock */
for (;;) {
/* wait to be given the lock */
for (;;) {
@@
-166,9
+161,8
@@
void fastcall __down_read(struct rw_semaphore *sem)
}
tsk->state = TASK_RUNNING;
}
tsk->state = TASK_RUNNING;
-
out:
out:
-
rwsemtrace(sem,"Leaving __down_read")
;
+ ;
}
/*
}
/*
@@
-176,55
+170,53
@@
void fastcall __down_read(struct rw_semaphore *sem)
*/
int fastcall __down_read_trylock(struct rw_semaphore *sem)
{
*/
int fastcall __down_read_trylock(struct rw_semaphore *sem)
{
+ unsigned long flags;
int ret = 0;
int ret = 0;
- rwsemtrace(sem,"Entering __down_read_trylock");
- spin_lock(&sem->wait_lock);
- if (sem->activity>=0 && list_empty(&sem->wait_list)) {
+ spin_lock_irqsave(&sem->wait_lock, flags);
+
+ if (sem->activity >= 0 && list_empty(&sem->wait_list)) {
/* granted */
sem->activity++;
ret = 1;
}
/* granted */
sem->activity++;
ret = 1;
}
- spin_unlock
(&sem->wait_lock
);
+ spin_unlock
_irqrestore(&sem->wait_lock, flags
);
- rwsemtrace(sem,"Leaving __down_read_trylock");
return ret;
}
/*
* get a write lock on the semaphore
return ret;
}
/*
* get a write lock on the semaphore
- * -
note that
we increment the waiting count anyway to indicate an exclusive lock
+ * - we increment the waiting count anyway to indicate an exclusive lock
*/
*/
-void fastcall __
down_write(struct rw_semaphore *sem
)
+void fastcall __
sched __down_write_nested(struct rw_semaphore *sem, int subclass
)
{
struct rwsem_waiter waiter;
struct task_struct *tsk;
{
struct rwsem_waiter waiter;
struct task_struct *tsk;
- rwsemtrace(sem,"Entering __down_write");
-
- spin_lock(&sem->wait_lock);
+ spin_lock_irq(&sem->wait_lock);
- if (sem->activity
==
0 && list_empty(&sem->wait_list)) {
+ if (sem->activity
==
0 && list_empty(&sem->wait_list)) {
/* granted */
sem->activity = -1;
/* granted */
sem->activity = -1;
- spin_unlock(&sem->wait_lock);
+ spin_unlock
_irq
(&sem->wait_lock);
goto out;
}
tsk = current;
goto out;
}
tsk = current;
- set_task_state(tsk,TASK_UNINTERRUPTIBLE);
+ set_task_state(tsk,
TASK_UNINTERRUPTIBLE);
/* set up my own style of waitqueue */
waiter.task = tsk;
waiter.flags = RWSEM_WAITING_FOR_WRITE;
get_task_struct(tsk);
/* set up my own style of waitqueue */
waiter.task = tsk;
waiter.flags = RWSEM_WAITING_FOR_WRITE;
get_task_struct(tsk);
- list_add_tail(&waiter.list,&sem->wait_list);
+ list_add_tail(&waiter.list,
&sem->wait_list);
/* we don't need to touch the semaphore struct anymore */
/* we don't need to touch the semaphore struct anymore */
- spin_unlock(&sem->wait_lock);
+ spin_unlock
_irq
(&sem->wait_lock);
/* wait to be given the lock */
for (;;) {
/* wait to be given the lock */
for (;;) {
@@
-235,9
+227,13
@@
void fastcall __down_write(struct rw_semaphore *sem)
}
tsk->state = TASK_RUNNING;
}
tsk->state = TASK_RUNNING;
-
out:
out:
- rwsemtrace(sem,"Leaving __down_write");
+ ;
+}
+
+void fastcall __sched __down_write(struct rw_semaphore *sem)
+{
+ __down_write_nested(sem, 0);
}
/*
}
/*
@@
-245,20
+241,19
@@
void fastcall __down_write(struct rw_semaphore *sem)
*/
int fastcall __down_write_trylock(struct rw_semaphore *sem)
{
*/
int fastcall __down_write_trylock(struct rw_semaphore *sem)
{
+ unsigned long flags;
int ret = 0;
int ret = 0;
- rwsemtrace(sem,"Entering __down_write_trylock");
- spin_lock
(&sem->wait_lock
);
+ spin_lock
_irqsave(&sem->wait_lock, flags
);
- if (sem->activity
==
0 && list_empty(&sem->wait_list)) {
+ if (sem->activity
==
0 && list_empty(&sem->wait_list)) {
/* granted */
sem->activity = -1;
ret = 1;
}
/* granted */
sem->activity = -1;
ret = 1;
}
- spin_unlock
(&sem->wait_lock
);
+ spin_unlock
_irqrestore(&sem->wait_lock, flags
);
- rwsemtrace(sem,"Leaving __down_write_trylock");
return ret;
}
return ret;
}
@@
-267,16
+262,14
@@
int fastcall __down_write_trylock(struct rw_semaphore *sem)
*/
void fastcall __up_read(struct rw_semaphore *sem)
{
*/
void fastcall __up_read(struct rw_semaphore *sem)
{
-
rwsemtrace(sem,"Entering __up_read")
;
+
unsigned long flags
;
- spin_lock
(&sem->wait_lock
);
+ spin_lock
_irqsave(&sem->wait_lock, flags
);
- if (--sem->activity
==
0 && !list_empty(&sem->wait_list))
+ if (--sem->activity
==
0 && !list_empty(&sem->wait_list))
sem = __rwsem_wake_one_writer(sem);
sem = __rwsem_wake_one_writer(sem);
- spin_unlock(&sem->wait_lock);
-
- rwsemtrace(sem,"Leaving __up_read");
+ spin_unlock_irqrestore(&sem->wait_lock, flags);
}
/*
}
/*
@@
-284,17
+277,15
@@
void fastcall __up_read(struct rw_semaphore *sem)
*/
void fastcall __up_write(struct rw_semaphore *sem)
{
*/
void fastcall __up_write(struct rw_semaphore *sem)
{
-
rwsemtrace(sem,"Entering __up_write")
;
+
unsigned long flags
;
- spin_lock
(&sem->wait_lock
);
+ spin_lock
_irqsave(&sem->wait_lock, flags
);
sem->activity = 0;
if (!list_empty(&sem->wait_list))
sem = __rwsem_do_wake(sem, 1);
sem->activity = 0;
if (!list_empty(&sem->wait_list))
sem = __rwsem_do_wake(sem, 1);
- spin_unlock(&sem->wait_lock);
-
- rwsemtrace(sem,"Leaving __up_write");
+ spin_unlock_irqrestore(&sem->wait_lock, flags);
}
/*
}
/*
@@
-303,27
+294,23
@@
void fastcall __up_write(struct rw_semaphore *sem)
*/
void fastcall __downgrade_write(struct rw_semaphore *sem)
{
*/
void fastcall __downgrade_write(struct rw_semaphore *sem)
{
-
rwsemtrace(sem,"Entering __downgrade_write")
;
+
unsigned long flags
;
- spin_lock
(&sem->wait_lock
);
+ spin_lock
_irqsave(&sem->wait_lock, flags
);
sem->activity = 1;
if (!list_empty(&sem->wait_list))
sem->activity = 1;
if (!list_empty(&sem->wait_list))
- sem = __rwsem_do_wake(sem,0);
+ sem = __rwsem_do_wake(sem,
0);
- spin_unlock(&sem->wait_lock);
-
- rwsemtrace(sem,"Leaving __downgrade_write");
+ spin_unlock_irqrestore(&sem->wait_lock, flags);
}
}
-EXPORT_SYMBOL(init_rwsem);
+EXPORT_SYMBOL(
__
init_rwsem);
EXPORT_SYMBOL(__down_read);
EXPORT_SYMBOL(__down_read_trylock);
EXPORT_SYMBOL(__down_read);
EXPORT_SYMBOL(__down_read_trylock);
+EXPORT_SYMBOL(__down_write_nested);
EXPORT_SYMBOL(__down_write);
EXPORT_SYMBOL(__down_write_trylock);
EXPORT_SYMBOL(__up_read);
EXPORT_SYMBOL(__up_write);
EXPORT_SYMBOL(__downgrade_write);
EXPORT_SYMBOL(__down_write);
EXPORT_SYMBOL(__down_write_trylock);
EXPORT_SYMBOL(__up_read);
EXPORT_SYMBOL(__up_write);
EXPORT_SYMBOL(__downgrade_write);
-#if RWSEM_DEBUG
-EXPORT_SYMBOL(rwsemtrace);
-#endif