X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=kernel%2Fuser.c;h=bcbd0b3631f4a15b36fc1bbeea75837d8ec207f9;hb=43bc926fffd92024b46cafaf7350d669ba9ca884;hp=023d8c0dac7ef917ed95cdf808b460ae1f94ef44;hpb=cee37fe97739d85991964371c1f3a745c00dd236;p=linux-2.6.git diff --git a/kernel/user.c b/kernel/user.c index 023d8c0da..bcbd0b363 100644 --- a/kernel/user.c +++ b/kernel/user.c @@ -13,6 +13,7 @@ #include #include #include +#include /* * UID task count cache, to get fast user lookup in "alloc_uid" @@ -27,6 +28,16 @@ static kmem_cache_t *uid_cachep; static struct list_head uidhash_table[UIDHASH_SZ]; + +/* + * The uidhash_lock is mostly taken from process context, but it is + * occasionally also taken from softirq/tasklet context, when + * task-structs get RCU-freed. Hence all locking must be softirq-safe. + * But free_uid() is also called with local interrupts disabled, and running + * local_bh_enable() with local interrupts disabled is an error - we'll run + * softirq callbacks, and they can unconditionally enable interrupts, and + * the caller of free_uid() didn't expect that.. + */ static DEFINE_SPINLOCK(uidhash_lock); struct user_struct root_user = { @@ -82,21 +93,30 @@ static inline struct user_struct *uid_hash_find(xid_t xid, uid_t uid, struct lis struct user_struct *find_user(xid_t xid, uid_t uid) { struct user_struct *ret; + unsigned long flags; - spin_lock(&uidhash_lock); + spin_lock_irqsave(&uidhash_lock, flags); ret = uid_hash_find(xid, uid, uidhashentry(xid, uid)); - spin_unlock(&uidhash_lock); + spin_unlock_irqrestore(&uidhash_lock, flags); return ret; } void free_uid(struct user_struct *up) { - if (up && atomic_dec_and_lock(&up->__count, &uidhash_lock)) { + unsigned long flags; + + if (!up) + return; + + local_irq_save(flags); + if (atomic_dec_and_lock(&up->__count, &uidhash_lock)) { uid_hash_remove(up); + spin_unlock_irqrestore(&uidhash_lock, flags); key_put(up->uid_keyring); key_put(up->session_keyring); kmem_cache_free(uid_cachep, up); - spin_unlock(&uidhash_lock); + } else { + local_irq_restore(flags); } } @@ -105,9 +125,9 @@ struct user_struct * alloc_uid(xid_t xid, uid_t uid) struct list_head *hashent = uidhashentry(xid, uid); struct user_struct *up; - spin_lock(&uidhash_lock); + spin_lock_irq(&uidhash_lock); up = uid_hash_find(xid, uid, hashent); - spin_unlock(&uidhash_lock); + spin_unlock_irq(&uidhash_lock); if (!up) { struct user_struct *new; @@ -121,6 +141,10 @@ struct user_struct * alloc_uid(xid_t xid, uid_t uid) atomic_set(&new->processes, 0); atomic_set(&new->files, 0); atomic_set(&new->sigpending, 0); +#ifdef CONFIG_INOTIFY + atomic_set(&new->inotify_watches, 0); + atomic_set(&new->inotify_devs, 0); +#endif new->mq_bytes = 0; new->locked_shm = 0; @@ -134,7 +158,7 @@ struct user_struct * alloc_uid(xid_t xid, uid_t uid) * Before adding this, check whether we raced * on adding the same user already.. */ - spin_lock(&uidhash_lock); + spin_lock_irq(&uidhash_lock); up = uid_hash_find(xid, uid, hashent); if (up) { key_put(new->uid_keyring); @@ -144,7 +168,7 @@ struct user_struct * alloc_uid(xid_t xid, uid_t uid) uid_hash_insert(new, hashent); up = new; } - spin_unlock(&uidhash_lock); + spin_unlock_irq(&uidhash_lock); } return up; @@ -180,9 +204,9 @@ static int __init uid_cache_init(void) INIT_LIST_HEAD(uidhash_table + n); /* Insert the root user immediately (init already runs as root) */ - spin_lock(&uidhash_lock); + spin_lock_irq(&uidhash_lock); uid_hash_insert(&root_user, uidhashentry(0,0)); - spin_unlock(&uidhash_lock); + spin_unlock_irq(&uidhash_lock); return 0; }