4 * (C) Copyright 1991-2000 Linus Torvalds
6 * We have a per-user structure to keep track of how many
7 * processes, files etc the user has claimed, in order to be
8 * able to have per-user limits for system resources.
11 #include <linux/init.h>
12 #include <linux/sched.h>
13 #include <linux/slab.h>
14 #include <linux/bitops.h>
17 * UID task count cache, to get fast user lookup in "alloc_uid"
18 * when changing user ID's (ie setuid() and friends).
20 #define UIDHASH_BITS 8
21 #define UIDHASH_SZ (1 << UIDHASH_BITS)
22 #define UIDHASH_MASK (UIDHASH_SZ - 1)
23 #define __uidhashfn(xid,uid) ((((uid) >> UIDHASH_BITS) + ((uid)^(xid))) & UIDHASH_MASK)
24 #define uidhashentry(xid,uid) (uidhash_table + __uidhashfn((xid),(uid)))
26 static kmem_cache_t *uid_cachep;
27 static struct list_head uidhash_table[UIDHASH_SZ];
28 static spinlock_t uidhash_lock = SPIN_LOCK_UNLOCKED;
30 struct user_struct root_user = {
31 .__count = ATOMIC_INIT(1),
32 .processes = ATOMIC_INIT(1),
33 .files = ATOMIC_INIT(0)
37 * These routines must be called with the uidhash spinlock held!
39 static inline void uid_hash_insert(struct user_struct *up, struct list_head *hashent)
41 list_add(&up->uidhash_list, hashent);
44 static inline void uid_hash_remove(struct user_struct *up)
46 list_del(&up->uidhash_list);
49 static inline struct user_struct *uid_hash_find(xid_t xid, uid_t uid, struct list_head *hashent)
53 list_for_each(up, hashent) {
54 struct user_struct *user;
56 user = list_entry(up, struct user_struct, uidhash_list);
58 if(user->uid == uid && user->xid == xid) {
59 atomic_inc(&user->__count);
68 * Locate the user_struct for the passed UID. If found, take a ref on it. The
69 * caller must undo that ref with free_uid().
71 * If the user_struct could not be found, return NULL.
73 struct user_struct *find_user(xid_t xid, uid_t uid)
75 struct user_struct *ret;
77 spin_lock(&uidhash_lock);
78 ret = uid_hash_find(xid, uid, uidhashentry(xid, uid));
79 spin_unlock(&uidhash_lock);
83 void free_uid(struct user_struct *up)
85 if (up && atomic_dec_and_lock(&up->__count, &uidhash_lock)) {
87 kmem_cache_free(uid_cachep, up);
88 spin_unlock(&uidhash_lock);
92 struct user_struct * alloc_uid(xid_t xid, uid_t uid)
94 struct list_head *hashent = uidhashentry(xid, uid);
95 struct user_struct *up;
97 spin_lock(&uidhash_lock);
98 up = uid_hash_find(xid, uid, hashent);
99 spin_unlock(&uidhash_lock);
102 struct user_struct *new;
104 new = kmem_cache_alloc(uid_cachep, SLAB_KERNEL);
109 atomic_set(&new->__count, 1);
110 atomic_set(&new->processes, 0);
111 atomic_set(&new->files, 0);
114 * Before adding this, check whether we raced
115 * on adding the same user already..
117 spin_lock(&uidhash_lock);
118 up = uid_hash_find(xid, uid, hashent);
120 kmem_cache_free(uid_cachep, new);
122 uid_hash_insert(new, hashent);
125 spin_unlock(&uidhash_lock);
131 void switch_uid(struct user_struct *new_user)
133 struct user_struct *old_user;
135 /* What if a process setreuid()'s and this brings the
136 * new uid over his NPROC rlimit? We can check this now
137 * cheaply with the new uid cache, so if it matters
138 * we should be checking for it. -DaveM
140 old_user = current->user;
141 atomic_inc(&new_user->processes);
142 atomic_dec(&old_user->processes);
143 current->user = new_user;
148 static int __init uid_cache_init(void)
152 uid_cachep = kmem_cache_create("uid_cache", sizeof(struct user_struct),
153 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL, NULL);
155 for(n = 0; n < UIDHASH_SZ; ++n)
156 INIT_LIST_HEAD(uidhash_table + n);
158 /* Insert the root user immediately (init already runs as root) */
159 spin_lock(&uidhash_lock);
160 uid_hash_insert(&root_user, uidhashentry(0,0));
161 spin_unlock(&uidhash_lock);
166 module_init(uid_cache_init);