X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=kernel%2Fuser.c;h=640ee48c8edc1654b894898e3170b86800246dfa;hb=7172c64a7cee4dfa95864f49c914f7ea8cf497c8;hp=5e6a221726ab53ee7b8b4629b8acdb73d69c6b87;hpb=c7b5ebbddf7bcd3651947760f423e3783bbe6573;p=linux-2.6.git diff --git a/kernel/user.c b/kernel/user.c index 5e6a22172..640ee48c8 100644 --- a/kernel/user.c +++ b/kernel/user.c @@ -12,12 +12,15 @@ #include #include #include +#include +#include /* * UID task count cache, to get fast user lookup in "alloc_uid" * when changing user ID's (ie setuid() and friends). */ -#define UIDHASH_BITS 8 + +#define UIDHASH_BITS (CONFIG_BASE_SMALL ? 3 : 8) #define UIDHASH_SZ (1 << UIDHASH_BITS) #define UIDHASH_MASK (UIDHASH_SZ - 1) #define __uidhashfn(xid,uid) ((((uid) >> UIDHASH_BITS) + ((uid)^(xid))) & UIDHASH_MASK) @@ -25,7 +28,17 @@ static kmem_cache_t *uid_cachep; static struct list_head uidhash_table[UIDHASH_SZ]; -static spinlock_t uidhash_lock = SPIN_LOCK_UNLOCKED; + +/* + * The uidhash_lock is mostly taken from process context, but it is + * occasionally also taken from softirq/tasklet context, when + * task-structs get RCU-freed. Hence all locking must be softirq-safe. + * But free_uid() is also called with local interrupts disabled, and running + * local_bh_enable() with local interrupts disabled is an error - we'll run + * softirq callbacks, and they can unconditionally enable interrupts, and + * the caller of free_uid() didn't expect that.. + */ +static DEFINE_SPINLOCK(uidhash_lock); struct user_struct root_user = { .__count = ATOMIC_INIT(1), @@ -34,6 +47,10 @@ struct user_struct root_user = { .sigpending = ATOMIC_INIT(0), .mq_bytes = 0, .locked_shm = 0, +#ifdef CONFIG_KEYS + .uid_keyring = &root_user_keyring, + .session_keyring = &root_session_keyring, +#endif }; /* @@ -76,19 +93,30 @@ static inline struct user_struct *uid_hash_find(xid_t xid, uid_t uid, struct lis struct user_struct *find_user(xid_t xid, uid_t uid) { struct user_struct *ret; + unsigned long flags; - spin_lock(&uidhash_lock); + spin_lock_irqsave(&uidhash_lock, flags); ret = uid_hash_find(xid, uid, uidhashentry(xid, uid)); - spin_unlock(&uidhash_lock); + spin_unlock_irqrestore(&uidhash_lock, flags); return ret; } void free_uid(struct user_struct *up) { - if (up && atomic_dec_and_lock(&up->__count, &uidhash_lock)) { + unsigned long flags; + + if (!up) + return; + + local_irq_save(flags); + if (atomic_dec_and_lock(&up->__count, &uidhash_lock)) { uid_hash_remove(up); + spin_unlock_irqrestore(&uidhash_lock, flags); + key_put(up->uid_keyring); + key_put(up->session_keyring); kmem_cache_free(uid_cachep, up); - spin_unlock(&uidhash_lock); + } else { + local_irq_restore(flags); } } @@ -97,9 +125,9 @@ struct user_struct * alloc_uid(xid_t xid, uid_t uid) struct list_head *hashent = uidhashentry(xid, uid); struct user_struct *up; - spin_lock(&uidhash_lock); + spin_lock_irq(&uidhash_lock); up = uid_hash_find(xid, uid, hashent); - spin_unlock(&uidhash_lock); + spin_unlock_irq(&uidhash_lock); if (!up) { struct user_struct *new; @@ -113,23 +141,34 @@ struct user_struct * alloc_uid(xid_t xid, uid_t uid) atomic_set(&new->processes, 0); atomic_set(&new->files, 0); atomic_set(&new->sigpending, 0); +#ifdef CONFIG_INOTIFY_USER + atomic_set(&new->inotify_watches, 0); + atomic_set(&new->inotify_devs, 0); +#endif new->mq_bytes = 0; new->locked_shm = 0; + if (alloc_uid_keyring(new, current) < 0) { + kmem_cache_free(uid_cachep, new); + return NULL; + } + /* * Before adding this, check whether we raced * on adding the same user already.. */ - spin_lock(&uidhash_lock); + spin_lock_irq(&uidhash_lock); up = uid_hash_find(xid, uid, hashent); if (up) { + key_put(new->uid_keyring); + key_put(new->session_keyring); kmem_cache_free(uid_cachep, new); } else { uid_hash_insert(new, hashent); up = new; } - spin_unlock(&uidhash_lock); + spin_unlock_irq(&uidhash_lock); } return up; @@ -147,8 +186,10 @@ void switch_uid(struct user_struct *new_user) old_user = current->user; atomic_inc(&new_user->processes); atomic_dec(&old_user->processes); + switch_uid_keyring(new_user); current->user = new_user; free_uid(old_user); + suid_keys(current); } @@ -163,9 +204,9 @@ static int __init uid_cache_init(void) INIT_LIST_HEAD(uidhash_table + n); /* Insert the root user immediately (init already runs as root) */ - spin_lock(&uidhash_lock); + spin_lock_irq(&uidhash_lock); uid_hash_insert(&root_user, uidhashentry(0,0)); - spin_unlock(&uidhash_lock); + spin_unlock_irq(&uidhash_lock); return 0; }