#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/bitops.h>
+#include <linux/key.h>
/*
* UID task count cache, to get fast user lookup in "alloc_uid"
* when changing user ID's (ie setuid() and friends).
*/
-#define UIDHASH_BITS 8
+
+#define UIDHASH_BITS (CONFIG_BASE_SMALL ? 3 : 8)
#define UIDHASH_SZ (1 << UIDHASH_BITS)
#define UIDHASH_MASK (UIDHASH_SZ - 1)
#define __uidhashfn(xid,uid) ((((uid) >> UIDHASH_BITS) + ((uid)^(xid))) & UIDHASH_MASK)
static kmem_cache_t *uid_cachep;
static struct list_head uidhash_table[UIDHASH_SZ];
-static spinlock_t uidhash_lock = SPIN_LOCK_UNLOCKED;
+static DEFINE_SPINLOCK(uidhash_lock);
struct user_struct root_user = {
.__count = ATOMIC_INIT(1),
.processes = ATOMIC_INIT(1),
.files = ATOMIC_INIT(0),
.sigpending = ATOMIC_INIT(0),
- .mq_bytes = 0
+ .mq_bytes = 0,
+ .locked_shm = 0,
+#ifdef CONFIG_KEYS
+ .uid_keyring = &root_user_keyring,
+ .session_keyring = &root_session_keyring,
+#endif
};
/*
{
if (up && atomic_dec_and_lock(&up->__count, &uidhash_lock)) {
uid_hash_remove(up);
+ key_put(up->uid_keyring);
+ key_put(up->session_keyring);
kmem_cache_free(uid_cachep, up);
spin_unlock(&uidhash_lock);
}
atomic_set(&new->sigpending, 0);
new->mq_bytes = 0;
+ new->locked_shm = 0;
+
+ if (alloc_uid_keyring(new) < 0) {
+ kmem_cache_free(uid_cachep, new);
+ return NULL;
+ }
/*
* Before adding this, check whether we raced
spin_lock(&uidhash_lock);
up = uid_hash_find(xid, uid, hashent);
if (up) {
+ key_put(new->uid_keyring);
+ key_put(new->session_keyring);
kmem_cache_free(uid_cachep, new);
} else {
uid_hash_insert(new, hashent);
old_user = current->user;
atomic_inc(&new_user->processes);
atomic_dec(&old_user->processes);
+ switch_uid_keyring(new_user);
current->user = new_user;
free_uid(old_user);
+ suid_keys(current);
}