4 * (C) Copyright 1991-2000 Linus Torvalds
6 * We have a per-user structure to keep track of how many
7 * processes, files etc the user has claimed, in order to be
8 * able to have per-user limits for system resources.
11 #include <linux/init.h>
12 #include <linux/sched.h>
13 #include <linux/slab.h>
14 #include <linux/bitops.h>
17 * UID task count cache, to get fast user lookup in "alloc_uid"
18 * when changing user ID's (ie setuid() and friends).
20 #define UIDHASH_BITS 8
21 #define UIDHASH_SZ (1 << UIDHASH_BITS)
22 #define UIDHASH_MASK (UIDHASH_SZ - 1)
23 #define __uidhashfn(xid,uid) ((((uid) >> UIDHASH_BITS) + ((uid)^(xid))) & UIDHASH_MASK)
24 #define uidhashentry(xid,uid) (uidhash_table + __uidhashfn((xid),(uid)))
26 static kmem_cache_t *uid_cachep;
27 static struct list_head uidhash_table[UIDHASH_SZ];
28 static spinlock_t uidhash_lock = SPIN_LOCK_UNLOCKED;
30 struct user_struct root_user = {
31 .__count = ATOMIC_INIT(1),
32 .processes = ATOMIC_INIT(1),
33 .files = ATOMIC_INIT(0),
34 .sigpending = ATOMIC_INIT(0),
39 * These routines must be called with the uidhash spinlock held!
41 static inline void uid_hash_insert(struct user_struct *up, struct list_head *hashent)
43 list_add(&up->uidhash_list, hashent);
46 static inline void uid_hash_remove(struct user_struct *up)
48 list_del(&up->uidhash_list);
51 static inline struct user_struct *uid_hash_find(xid_t xid, uid_t uid, struct list_head *hashent)
55 list_for_each(up, hashent) {
56 struct user_struct *user;
58 user = list_entry(up, struct user_struct, uidhash_list);
60 if(user->uid == uid && user->xid == xid) {
61 atomic_inc(&user->__count);
70 * Locate the user_struct for the passed UID. If found, take a ref on it. The
71 * caller must undo that ref with free_uid().
73 * If the user_struct could not be found, return NULL.
75 struct user_struct *find_user(xid_t xid, uid_t uid)
77 struct user_struct *ret;
79 spin_lock(&uidhash_lock);
80 ret = uid_hash_find(xid, uid, uidhashentry(xid, uid));
81 spin_unlock(&uidhash_lock);
85 void free_uid(struct user_struct *up)
87 if (up && atomic_dec_and_lock(&up->__count, &uidhash_lock)) {
89 kmem_cache_free(uid_cachep, up);
90 spin_unlock(&uidhash_lock);
94 struct user_struct * alloc_uid(xid_t xid, uid_t uid)
96 struct list_head *hashent = uidhashentry(xid, uid);
97 struct user_struct *up;
99 spin_lock(&uidhash_lock);
100 up = uid_hash_find(xid, uid, hashent);
101 spin_unlock(&uidhash_lock);
104 struct user_struct *new;
106 new = kmem_cache_alloc(uid_cachep, SLAB_KERNEL);
111 atomic_set(&new->__count, 1);
112 atomic_set(&new->processes, 0);
113 atomic_set(&new->files, 0);
114 atomic_set(&new->sigpending, 0);
119 * Before adding this, check whether we raced
120 * on adding the same user already..
122 spin_lock(&uidhash_lock);
123 up = uid_hash_find(xid, uid, hashent);
125 kmem_cache_free(uid_cachep, new);
127 uid_hash_insert(new, hashent);
130 spin_unlock(&uidhash_lock);
136 void switch_uid(struct user_struct *new_user)
138 struct user_struct *old_user;
140 /* What if a process setreuid()'s and this brings the
141 * new uid over his NPROC rlimit? We can check this now
142 * cheaply with the new uid cache, so if it matters
143 * we should be checking for it. -DaveM
145 old_user = current->user;
146 atomic_inc(&new_user->processes);
147 atomic_dec(&old_user->processes);
148 current->user = new_user;
153 static int __init uid_cache_init(void)
157 uid_cachep = kmem_cache_create("uid_cache", sizeof(struct user_struct),
158 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL, NULL);
160 for(n = 0; n < UIDHASH_SZ; ++n)
161 INIT_LIST_HEAD(uidhash_table + n);
163 /* Insert the root user immediately (init already runs as root) */
164 spin_lock(&uidhash_lock);
165 uid_hash_insert(&root_user, uidhashentry(0,0));
166 spin_unlock(&uidhash_lock);
171 module_init(uid_cache_init);