2 * Generic pidhash and scalable, time-bounded PID allocator
4 * (C) 2002-2003 William Irwin, IBM
5 * (C) 2004 William Irwin, Oracle
6 * (C) 2002-2004 Ingo Molnar, Red Hat
8 * pid-structures are backing objects for tasks sharing a given ID to chain
9 * against. There is very little to them aside from hashing them and
10 * parking tasks using given ID's on a list.
12 * The hash is always changed with the tasklist_lock write-acquired,
13 * and the hash is only accessed with the tasklist_lock at least
14 * read-acquired, so there's no additional SMP locking needed here.
16 * We have a list of bitmap pages, which bitmaps represent the PID space.
17 * Allocating and freeing PIDs is completely lockless. The worst-case
18 * allocation scenario when all but one out of 1 million PIDs possible are
19 * allocated already: the scanning of 32 list entries and at most PAGE_SIZE
20 * bytes. The typical fastpath is a single successful setbit. Freeing is O(1).
24 #include <linux/module.h>
25 #include <linux/slab.h>
26 #include <linux/init.h>
27 #include <linux/bootmem.h>
28 #include <linux/hash.h>
29 #include <linux/vs_base.h>
30 #include <linux/vs_cvirt.h>
32 #define pid_hashfn(nr) hash_long((unsigned long)nr, pidhash_shift)
33 static struct hlist_head *pid_hash;
34 static int pidhash_shift;
35 static kmem_cache_t *pid_cachep;
37 int pid_max = PID_MAX_DEFAULT;
40 #define RESERVED_PIDS 300
42 int pid_max_min = RESERVED_PIDS + 1;
43 int pid_max_max = PID_MAX_LIMIT;
45 #define PIDMAP_ENTRIES ((PID_MAX_LIMIT + 8*PAGE_SIZE - 1)/PAGE_SIZE/8)
46 #define BITS_PER_PAGE (PAGE_SIZE*8)
47 #define BITS_PER_PAGE_MASK (BITS_PER_PAGE-1)
48 #define mk_pid(map, off) (((map) - pidmap_array)*BITS_PER_PAGE + (off))
49 #define find_next_offset(map, off) \
50 find_next_zero_bit((map)->page, BITS_PER_PAGE, off)
53 * PID-map pages start out as NULL, they get allocated upon
54 * first use and are never deallocated. This way a low pid_max
55 * value does not cause lots of bitmaps to be allocated, but
56 * the scheme scales to up to 4 million PIDs, runtime.
58 typedef struct pidmap {
63 static pidmap_t pidmap_array[PIDMAP_ENTRIES] =
64 { [ 0 ... PIDMAP_ENTRIES-1 ] = { ATOMIC_INIT(BITS_PER_PAGE), NULL } };
67 * Note: disable interrupts while the pidmap_lock is held as an
68 * interrupt might come in and do read_lock(&tasklist_lock).
70 * If we don't disable interrupts there is a nasty deadlock between
71 * detach_pid()->free_pid() and another cpu that does
72 * spin_lock(&pidmap_lock) followed by an interrupt routine that does
73 * read_lock(&tasklist_lock);
75 * After we clean up the tasklist_lock and know there are no
76 * irq handlers that take it we can leave the interrupts enabled.
77 * For now it is easier to be safe than to prove it can't happen.
79 static __cacheline_aligned_in_smp DEFINE_SPINLOCK(pidmap_lock);
81 static fastcall void free_pidmap(int pid)
83 pidmap_t *map = pidmap_array + pid / BITS_PER_PAGE;
84 int offset = pid & BITS_PER_PAGE_MASK;
86 clear_bit(offset, map->page);
87 atomic_inc(&map->nr_free);
90 static int alloc_pidmap(void)
92 int i, offset, max_scan, pid, last = last_pid;
98 offset = pid & BITS_PER_PAGE_MASK;
99 map = &pidmap_array[pid/BITS_PER_PAGE];
100 max_scan = (pid_max + BITS_PER_PAGE - 1)/BITS_PER_PAGE - !offset;
101 for (i = 0; i <= max_scan; ++i) {
102 if (unlikely(!map->page)) {
103 unsigned long page = get_zeroed_page(GFP_KERNEL);
105 * Free the page if someone raced with us
108 spin_lock_irq(&pidmap_lock);
112 map->page = (void *)page;
113 spin_unlock_irq(&pidmap_lock);
114 if (unlikely(!map->page))
117 if (likely(atomic_read(&map->nr_free))) {
119 if (!test_and_set_bit(offset, map->page)) {
120 atomic_dec(&map->nr_free);
124 offset = find_next_offset(map, offset);
125 pid = mk_pid(map, offset);
127 * find_next_offset() found a bit, the pid from it
128 * is in-bounds, and if we fell back to the last
129 * bitmap block and the final block was the same
130 * as the starting point, pid is before last_pid.
132 } while (offset < BITS_PER_PAGE && pid < pid_max &&
133 (i != max_scan || pid < last ||
134 !((last+1) & BITS_PER_PAGE_MASK)));
136 if (map < &pidmap_array[(pid_max-1)/BITS_PER_PAGE]) {
140 map = &pidmap_array[0];
141 offset = RESERVED_PIDS;
142 if (unlikely(last == offset))
145 pid = mk_pid(map, offset);
150 fastcall void put_pid(struct pid *pid)
154 if ((atomic_read(&pid->count) == 1) ||
155 atomic_dec_and_test(&pid->count))
156 kmem_cache_free(pid_cachep, pid);
159 static void delayed_put_pid(struct rcu_head *rhp)
161 struct pid *pid = container_of(rhp, struct pid, rcu);
165 fastcall void free_pid(struct pid *pid)
167 /* We can be called with write_lock_irq(&tasklist_lock) held */
170 spin_lock_irqsave(&pidmap_lock, flags);
171 hlist_del_rcu(&pid->pid_chain);
172 spin_unlock_irqrestore(&pidmap_lock, flags);
174 free_pidmap(pid->nr);
175 call_rcu(&pid->rcu, delayed_put_pid);
178 struct pid *alloc_pid(void)
184 pid = kmem_cache_alloc(pid_cachep, GFP_KERNEL);
192 atomic_set(&pid->count, 1);
194 for (type = 0; type < PIDTYPE_MAX; ++type)
195 INIT_HLIST_HEAD(&pid->tasks[type]);
197 spin_lock_irq(&pidmap_lock);
198 hlist_add_head_rcu(&pid->pid_chain, &pid_hash[pid_hashfn(pid->nr)]);
199 spin_unlock_irq(&pidmap_lock);
205 kmem_cache_free(pid_cachep, pid);
210 struct pid * fastcall find_pid(int nr)
212 struct hlist_node *elem;
215 hlist_for_each_entry_rcu(pid, elem,
216 &pid_hash[pid_hashfn(nr)], pid_chain) {
223 int fastcall attach_pid(struct task_struct *task, enum pid_type type, int nr)
225 struct pid_link *link;
228 WARN_ON(!task->pid); /* to be removed soon */
229 WARN_ON(!nr); /* to be removed soon */
231 link = &task->pids[type];
232 link->pid = pid = find_pid(nr);
233 hlist_add_head_rcu(&link->node, &pid->tasks[type]);
238 void fastcall detach_pid(struct task_struct *task, enum pid_type type)
240 struct pid_link *link;
244 link = &task->pids[type];
247 hlist_del_rcu(&link->node);
250 for (tmp = PIDTYPE_MAX; --tmp >= 0; )
251 if (!hlist_empty(&pid->tasks[tmp]))
257 struct task_struct * fastcall pid_task(struct pid *pid, enum pid_type type)
259 struct task_struct *result = NULL;
261 struct hlist_node *first;
262 first = rcu_dereference(pid->tasks[type].first);
264 result = hlist_entry(first, struct task_struct, pids[(type)].node);
265 if (result && (pid->nr != 1) &&
266 !vx_check(vx_task_xid(result), VX_WATCH|VX_ADMIN|VX_IDENT)) {
267 vxwprintk((type == PIDTYPE_PID) && (current->xid),
268 "pid_task(%d,%d): task %p[#%u,%u] did lookup %p[#%u,%u]",
269 pid->nr, type, current, vx_current_xid(), current->pid,
270 result, vx_task_xid(result), result->pid);
278 * Must be called under rcu_read_lock() or with tasklist_lock read-held.
280 struct task_struct *find_task_by_pid_type(int type, int nr)
282 if (type == PIDTYPE_PID)
283 nr = vx_rmap_pid(nr);
284 else if (type == PIDTYPE_REALPID)
286 return pid_task(find_pid(nr), type);
289 EXPORT_SYMBOL(find_task_by_pid_type);
291 struct task_struct *fastcall get_pid_task(struct pid *pid, enum pid_type type)
293 struct task_struct *result;
295 result = pid_task(pid, type);
297 get_task_struct(result);
302 struct pid *find_get_pid(pid_t nr)
307 pid = get_pid(find_pid(nr));
314 * The pid hash table is scaled according to the amount of memory in the
315 * machine. From a minimum of 16 slots up to 4096 slots at one gigabyte or
318 void __init pidhash_init(void)
321 unsigned long megabytes = nr_kernel_pages >> (20 - PAGE_SHIFT);
323 pidhash_shift = max(4, fls(megabytes * 4));
324 pidhash_shift = min(12, pidhash_shift);
325 pidhash_size = 1 << pidhash_shift;
327 printk("PID hash table entries: %d (order: %d, %Zd bytes)\n",
328 pidhash_size, pidhash_shift,
329 pidhash_size * sizeof(struct hlist_head));
331 pid_hash = alloc_bootmem(pidhash_size * sizeof(*(pid_hash)));
333 panic("Could not alloc pidhash!\n");
334 for (i = 0; i < pidhash_size; i++)
335 INIT_HLIST_HEAD(&pid_hash[i]);
338 void __init pidmap_init(void)
340 pidmap_array->page = (void *)get_zeroed_page(GFP_KERNEL);
341 /* Reserve PID 0. We never call free_pidmap(0) */
342 set_bit(0, pidmap_array->page);
343 atomic_dec(&pidmap_array->nr_free);
345 pid_cachep = kmem_cache_create("pid", sizeof(struct pid),
346 __alignof__(struct pid),
347 SLAB_PANIC, NULL, NULL);