#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/bitops.h>
+#include <linux/key.h>
/*
* UID task count cache, to get fast user lookup in "alloc_uid"
#define UIDHASH_BITS 8
#define UIDHASH_SZ (1 << UIDHASH_BITS)
#define UIDHASH_MASK (UIDHASH_SZ - 1)
-#define __uidhashfn(uid) (((uid >> UIDHASH_BITS) + uid) & UIDHASH_MASK)
-#define uidhashentry(uid) (uidhash_table + __uidhashfn((uid)))
+#define __uidhashfn(xid,uid) ((((uid) >> UIDHASH_BITS) + ((uid)^(xid))) & UIDHASH_MASK)
+#define uidhashentry(xid,uid) (uidhash_table + __uidhashfn((xid),(uid)))
static kmem_cache_t *uid_cachep;
static struct list_head uidhash_table[UIDHASH_SZ];
-static spinlock_t uidhash_lock = SPIN_LOCK_UNLOCKED;
+static DEFINE_SPINLOCK(uidhash_lock);
struct user_struct root_user = {
.__count = ATOMIC_INIT(1),
.processes = ATOMIC_INIT(1),
- .files = ATOMIC_INIT(0)
+ .files = ATOMIC_INIT(0),
+ .sigpending = ATOMIC_INIT(0),
+ .mq_bytes = 0,
+ .locked_shm = 0,
+#ifdef CONFIG_KEYS
+ .uid_keyring = &root_user_keyring,
+ .session_keyring = &root_session_keyring,
+#endif
};
/*
list_del(&up->uidhash_list);
}
-static inline struct user_struct *uid_hash_find(uid_t uid, struct list_head *hashent)
+static inline struct user_struct *uid_hash_find(xid_t xid, uid_t uid, struct list_head *hashent)
{
struct list_head *up;
user = list_entry(up, struct user_struct, uidhash_list);
- if(user->uid == uid) {
+ if(user->uid == uid && user->xid == xid) {
atomic_inc(&user->__count);
return user;
}
return NULL;
}
-struct user_struct *find_user(uid_t uid)
+/*
+ * Locate the user_struct for the passed UID. If found, take a ref on it. The
+ * caller must undo that ref with free_uid().
+ *
+ * If the user_struct could not be found, return NULL.
+ */
+struct user_struct *find_user(xid_t xid, uid_t uid)
{
- return uid_hash_find(uid, uidhashentry(uid));
+ struct user_struct *ret;
+
+ spin_lock(&uidhash_lock);
+ ret = uid_hash_find(xid, uid, uidhashentry(xid, uid));
+ spin_unlock(&uidhash_lock);
+ return ret;
}
void free_uid(struct user_struct *up)
{
if (up && atomic_dec_and_lock(&up->__count, &uidhash_lock)) {
uid_hash_remove(up);
+ key_put(up->uid_keyring);
+ key_put(up->session_keyring);
kmem_cache_free(uid_cachep, up);
spin_unlock(&uidhash_lock);
}
}
-struct user_struct * alloc_uid(uid_t uid)
+struct user_struct * alloc_uid(xid_t xid, uid_t uid)
{
- struct list_head *hashent = uidhashentry(uid);
+ struct list_head *hashent = uidhashentry(xid, uid);
struct user_struct *up;
spin_lock(&uidhash_lock);
- up = uid_hash_find(uid, hashent);
+ up = uid_hash_find(xid, uid, hashent);
spin_unlock(&uidhash_lock);
if (!up) {
if (!new)
return NULL;
new->uid = uid;
+ new->xid = xid;
atomic_set(&new->__count, 1);
atomic_set(&new->processes, 0);
atomic_set(&new->files, 0);
+ atomic_set(&new->sigpending, 0);
+
+ new->mq_bytes = 0;
+ new->locked_shm = 0;
+
+ if (alloc_uid_keyring(new) < 0) {
+ kmem_cache_free(uid_cachep, new);
+ return NULL;
+ }
/*
* Before adding this, check whether we raced
* on adding the same user already..
*/
spin_lock(&uidhash_lock);
- up = uid_hash_find(uid, hashent);
+ up = uid_hash_find(xid, uid, hashent);
if (up) {
+ key_put(new->uid_keyring);
+ key_put(new->session_keyring);
kmem_cache_free(uid_cachep, new);
} else {
uid_hash_insert(new, hashent);
old_user = current->user;
atomic_inc(&new_user->processes);
atomic_dec(&old_user->processes);
+ switch_uid_keyring(new_user);
current->user = new_user;
free_uid(old_user);
+ suid_keys(current);
}
int n;
uid_cachep = kmem_cache_create("uid_cache", sizeof(struct user_struct),
- 0,
- SLAB_HWCACHE_ALIGN, NULL, NULL);
- if(!uid_cachep)
- panic("Cannot create uid taskcount SLAB cache\n");
+ 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL, NULL);
for(n = 0; n < UIDHASH_SZ; ++n)
INIT_LIST_HEAD(uidhash_table + n);
/* Insert the root user immediately (init already runs as root) */
spin_lock(&uidhash_lock);
- uid_hash_insert(&root_user, uidhashentry(0));
+ uid_hash_insert(&root_user, uidhashentry(0,0));
spin_unlock(&uidhash_lock);
return 0;