2 * Generic pidhash and scalable, time-bounded PID allocator
4 * (C) 2002 William Irwin, IBM
5 * (C) 2002 Ingo Molnar, Red Hat
7 * pid-structures are backing objects for tasks sharing a given ID to chain
8 * against. There is very little to them aside from hashing them and
9 * parking tasks using given ID's on a list.
11 * The hash is always changed with the tasklist_lock write-acquired,
12 * and the hash is only accessed with the tasklist_lock at least
13 * read-acquired, so there's no additional SMP locking needed here.
15 * We have a list of bitmap pages, which bitmaps represent the PID space.
16 * Allocating and freeing PIDs is completely lockless. The worst-case
17 * allocation scenario when all but one out of 1 million PIDs possible are
18 * allocated already: the scanning of 32 list entries and at most PAGE_SIZE
19 * bytes. The typical fastpath is a single successful setbit. Freeing is O(1).
23 #include <linux/module.h>
24 #include <linux/slab.h>
25 #include <linux/init.h>
26 #include <linux/bootmem.h>
27 #include <linux/hash.h>
28 #include <linux/vs_cvirt.h>
30 #define pid_hashfn(nr) hash_long((unsigned long)nr, pidhash_shift)
31 static struct list_head *pid_hash[PIDTYPE_MAX];
32 static int pidhash_shift;
34 int pid_max = PID_MAX_DEFAULT;
37 #define RESERVED_PIDS 300
39 #define PIDMAP_ENTRIES (PID_MAX_LIMIT/PAGE_SIZE/8)
40 #define BITS_PER_PAGE (PAGE_SIZE*8)
41 #define BITS_PER_PAGE_MASK (BITS_PER_PAGE-1)
44 * PID-map pages start out as NULL, they get allocated upon
45 * first use and are never deallocated. This way a low pid_max
46 * value does not cause lots of bitmaps to be allocated, but
47 * the scheme scales to up to 4 million PIDs, runtime.
49 typedef struct pidmap {
54 static pidmap_t pidmap_array[PIDMAP_ENTRIES] =
55 { [ 0 ... PIDMAP_ENTRIES-1 ] = { ATOMIC_INIT(BITS_PER_PAGE), NULL } };
57 static pidmap_t *map_limit = pidmap_array + PIDMAP_ENTRIES;
59 static spinlock_t pidmap_lock __cacheline_aligned_in_smp = SPIN_LOCK_UNLOCKED;
61 fastcall void free_pidmap(int pid)
63 pidmap_t *map = pidmap_array + pid / BITS_PER_PAGE;
64 int offset = pid & BITS_PER_PAGE_MASK;
66 clear_bit(offset, map->page);
67 atomic_inc(&map->nr_free);
71 * Here we search for the next map that has free bits left.
72 * Normally the next map has free PIDs.
74 static inline pidmap_t *next_free_map(pidmap_t *map, int *max_steps)
76 while (--*max_steps) {
77 if (++map == map_limit)
79 if (unlikely(!map->page)) {
80 unsigned long page = get_zeroed_page(GFP_KERNEL);
82 * Free the page if someone raced with us
85 spin_lock(&pidmap_lock);
89 map->page = (void *)page;
90 spin_unlock(&pidmap_lock);
95 if (atomic_read(&map->nr_free))
101 int alloc_pidmap(void)
103 int pid, offset, max_steps = PIDMAP_ENTRIES + 1;
110 offset = pid & BITS_PER_PAGE_MASK;
111 map = pidmap_array + pid / BITS_PER_PAGE;
113 if (likely(map->page && !test_and_set_bit(offset, map->page))) {
115 * There is a small window for last_pid updates to race,
116 * but in that case the next allocation will go into the
117 * slowpath and that fixes things up.
120 atomic_dec(&map->nr_free);
125 if (!offset || !atomic_read(&map->nr_free)) {
127 map = next_free_map(map, &max_steps);
133 * Find the next zero bit:
136 offset = find_next_zero_bit(map->page, BITS_PER_PAGE, offset);
137 if (offset >= BITS_PER_PAGE)
139 if (test_and_set_bit(offset, map->page))
142 /* we got the PID: */
143 pid = (map - pidmap_array) * BITS_PER_PAGE + offset;
150 fastcall struct pid *find_pid(enum pid_type type, int nr)
152 struct list_head *elem, *bucket = &pid_hash[type][pid_hashfn(nr)];
155 __list_for_each(elem, bucket) {
156 pid = list_entry(elem, struct pid, hash_chain);
163 void fastcall link_pid(task_t *task, struct pid_link *link, struct pid *pid)
165 atomic_inc(&pid->count);
166 list_add_tail(&link->pid_chain, &pid->task_list);
170 int fastcall attach_pid(task_t *task, enum pid_type type, int nr)
172 struct pid *pid = find_pid(type, nr);
175 atomic_inc(&pid->count);
177 pid = &task->pids[type].pid;
179 atomic_set(&pid->count, 1);
180 INIT_LIST_HEAD(&pid->task_list);
182 get_task_struct(task);
183 list_add(&pid->hash_chain, &pid_hash[type][pid_hashfn(nr)]);
185 list_add_tail(&task->pids[type].pid_chain, &pid->task_list);
186 task->pids[type].pidptr = pid;
191 static inline int __detach_pid(task_t *task, enum pid_type type)
193 struct pid_link *link = task->pids + type;
194 struct pid *pid = link->pidptr;
197 list_del(&link->pid_chain);
198 if (!atomic_dec_and_test(&pid->count))
202 list_del(&pid->hash_chain);
203 put_task_struct(pid->task);
208 static void _detach_pid(task_t *task, enum pid_type type)
210 __detach_pid(task, type);
213 void fastcall detach_pid(task_t *task, enum pid_type type)
215 int nr = __detach_pid(task, type);
220 for (type = 0; type < PIDTYPE_MAX; ++type)
221 if (find_pid(type, nr))
226 task_t *find_task_by_pid(int nr)
228 struct pid *pid = find_pid(PIDTYPE_PID,
229 vx_rmap_tgid(current->vx_info, nr));
233 return pid_task(pid->task_list.next, PIDTYPE_PID);
236 EXPORT_SYMBOL(find_task_by_pid);
239 * This function switches the PIDs if a non-leader thread calls
240 * sys_execve() - this must be done without releasing the PID.
241 * (which a detach_pid() would eventually do.)
243 void switch_exec_pids(task_t *leader, task_t *thread)
245 _detach_pid(leader, PIDTYPE_PID);
246 _detach_pid(leader, PIDTYPE_TGID);
247 _detach_pid(leader, PIDTYPE_PGID);
248 _detach_pid(leader, PIDTYPE_SID);
250 _detach_pid(thread, PIDTYPE_PID);
251 _detach_pid(thread, PIDTYPE_TGID);
253 leader->pid = leader->tgid = thread->pid;
254 thread->pid = thread->tgid;
256 attach_pid(thread, PIDTYPE_PID, thread->pid);
257 attach_pid(thread, PIDTYPE_TGID, thread->tgid);
258 attach_pid(thread, PIDTYPE_PGID, thread->signal->pgrp);
259 attach_pid(thread, PIDTYPE_SID, thread->signal->session);
260 list_add_tail(&thread->tasks, &init_task.tasks);
262 attach_pid(leader, PIDTYPE_PID, leader->pid);
263 attach_pid(leader, PIDTYPE_TGID, leader->tgid);
264 attach_pid(leader, PIDTYPE_PGID, leader->signal->pgrp);
265 attach_pid(leader, PIDTYPE_SID, leader->signal->session);
269 * The pid hash table is scaled according to the amount of memory in the
270 * machine. From a minimum of 16 slots up to 4096 slots at one gigabyte or
273 void __init pidhash_init(void)
275 int i, j, pidhash_size;
276 unsigned long megabytes = max_pfn >> (20 - PAGE_SHIFT);
278 pidhash_shift = max(4, fls(megabytes * 4));
279 pidhash_shift = min(12, pidhash_shift);
280 pidhash_size = 1 << pidhash_shift;
282 printk("PID hash table entries: %d (order %d: %Zd bytes)\n",
283 pidhash_size, pidhash_shift,
284 pidhash_size * sizeof(struct list_head));
286 for (i = 0; i < PIDTYPE_MAX; i++) {
287 pid_hash[i] = alloc_bootmem(pidhash_size *
288 sizeof(struct list_head));
290 panic("Could not alloc pidhash!\n");
291 for (j = 0; j < pidhash_size; j++)
292 INIT_LIST_HEAD(&pid_hash[i][j]);
296 void __init pidmap_init(void)
300 pidmap_array->page = (void *)get_zeroed_page(GFP_KERNEL);
301 set_bit(0, pidmap_array->page);
302 atomic_dec(&pidmap_array->nr_free);
305 * Allocate PID 0, and hash it via all PID types:
308 for (i = 0; i < PIDTYPE_MAX; i++)
309 attach_pid(current, i, 0);