X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=kernel%2Fpid.c;h=5eeb50538164de353554fe235115673dc031c787;hb=7172c64a7cee4dfa95864f49c914f7ea8cf497c8;hp=23706b226f3ad8149984328b95fe73f3742089ea;hpb=9213980e6a70d8473e0ffd4b39ab5b6caaba9ff5;p=linux-2.6.git diff --git a/kernel/pid.c b/kernel/pid.c index 23706b226..5eeb50538 100644 --- a/kernel/pid.c +++ b/kernel/pid.c @@ -1,8 +1,9 @@ /* * Generic pidhash and scalable, time-bounded PID allocator * - * (C) 2002 William Irwin, IBM - * (C) 2002 Ingo Molnar, Red Hat + * (C) 2002-2003 William Irwin, IBM + * (C) 2004 William Irwin, Oracle + * (C) 2002-2004 Ingo Molnar, Red Hat * * pid-structures are backing objects for tasks sharing a given ID to chain * against. There is very little to them aside from hashing them and @@ -28,17 +29,24 @@ #include #define pid_hashfn(nr) hash_long((unsigned long)nr, pidhash_shift) -static struct list_head *pid_hash[PIDTYPE_MAX]; +static struct hlist_head *pid_hash; static int pidhash_shift; +static kmem_cache_t *pid_cachep; int pid_max = PID_MAX_DEFAULT; int last_pid; #define RESERVED_PIDS 300 -#define PIDMAP_ENTRIES (PID_MAX_LIMIT/PAGE_SIZE/8) +int pid_max_min = RESERVED_PIDS + 1; +int pid_max_max = PID_MAX_LIMIT; + +#define PIDMAP_ENTRIES ((PID_MAX_LIMIT + 8*PAGE_SIZE - 1)/PAGE_SIZE/8) #define BITS_PER_PAGE (PAGE_SIZE*8) #define BITS_PER_PAGE_MASK (BITS_PER_PAGE-1) +#define mk_pid(map, off) (((map) - pidmap_array)*BITS_PER_PAGE + (off)) +#define find_next_offset(map, off) \ + find_next_zero_bit((map)->page, BITS_PER_PAGE, off) /* * PID-map pages start out as NULL, they get allocated upon @@ -54,11 +62,22 @@ typedef struct pidmap { static pidmap_t pidmap_array[PIDMAP_ENTRIES] = { [ 0 ... PIDMAP_ENTRIES-1 ] = { ATOMIC_INIT(BITS_PER_PAGE), NULL } }; -static pidmap_t *map_limit = pidmap_array + PIDMAP_ENTRIES; - -static spinlock_t pidmap_lock __cacheline_aligned_in_smp = SPIN_LOCK_UNLOCKED; +/* + * Note: disable interrupts while the pidmap_lock is held as an + * interrupt might come in and do read_lock(&tasklist_lock). + * + * If we don't disable interrupts there is a nasty deadlock between + * detach_pid()->free_pid() and another cpu that does + * spin_lock(&pidmap_lock) followed by an interrupt routine that does + * read_lock(&tasklist_lock); + * + * After we clean up the tasklist_lock and know there are no + * irq handlers that take it we can leave the interrupts enabled. + * For now it is easier to be safe than to prove it can't happen. + */ +static __cacheline_aligned_in_smp DEFINE_SPINLOCK(pidmap_lock); -fastcall void free_pidmap(int pid) +static fastcall void free_pidmap(int pid) { pidmap_t *map = pidmap_array + pid / BITS_PER_PAGE; int offset = pid & BITS_PER_PAGE_MASK; @@ -67,202 +86,227 @@ fastcall void free_pidmap(int pid) atomic_inc(&map->nr_free); } -/* - * Here we search for the next map that has free bits left. - * Normally the next map has free PIDs. - */ -static inline pidmap_t *next_free_map(pidmap_t *map, int *max_steps) +static int alloc_pidmap(void) { - while (--*max_steps) { - if (++map == map_limit) - map = pidmap_array; + int i, offset, max_scan, pid, last = last_pid; + pidmap_t *map; + + pid = last + 1; + if (pid >= pid_max) + pid = RESERVED_PIDS; + offset = pid & BITS_PER_PAGE_MASK; + map = &pidmap_array[pid/BITS_PER_PAGE]; + max_scan = (pid_max + BITS_PER_PAGE - 1)/BITS_PER_PAGE - !offset; + for (i = 0; i <= max_scan; ++i) { if (unlikely(!map->page)) { unsigned long page = get_zeroed_page(GFP_KERNEL); /* * Free the page if someone raced with us * installing it: */ - spin_lock(&pidmap_lock); + spin_lock_irq(&pidmap_lock); if (map->page) free_page(page); else map->page = (void *)page; - spin_unlock(&pidmap_lock); - - if (!map->page) + spin_unlock_irq(&pidmap_lock); + if (unlikely(!map->page)) + break; + } + if (likely(atomic_read(&map->nr_free))) { + do { + if (!test_and_set_bit(offset, map->page)) { + atomic_dec(&map->nr_free); + last_pid = pid; + return pid; + } + offset = find_next_offset(map, offset); + pid = mk_pid(map, offset); + /* + * find_next_offset() found a bit, the pid from it + * is in-bounds, and if we fell back to the last + * bitmap block and the final block was the same + * as the starting point, pid is before last_pid. + */ + } while (offset < BITS_PER_PAGE && pid < pid_max && + (i != max_scan || pid < last || + !((last+1) & BITS_PER_PAGE_MASK))); + } + if (map < &pidmap_array[(pid_max-1)/BITS_PER_PAGE]) { + ++map; + offset = 0; + } else { + map = &pidmap_array[0]; + offset = RESERVED_PIDS; + if (unlikely(last == offset)) break; } - if (atomic_read(&map->nr_free)) - return map; + pid = mk_pid(map, offset); } - return NULL; + return -1; } -int alloc_pidmap(void) +fastcall void put_pid(struct pid *pid) { - int pid, offset, max_steps = PIDMAP_ENTRIES + 1; - pidmap_t *map; + if (!pid) + return; + if ((atomic_read(&pid->count) == 1) || + atomic_dec_and_test(&pid->count)) + kmem_cache_free(pid_cachep, pid); +} - pid = last_pid + 1; - if (pid >= pid_max) - pid = RESERVED_PIDS; +static void delayed_put_pid(struct rcu_head *rhp) +{ + struct pid *pid = container_of(rhp, struct pid, rcu); + put_pid(pid); +} - offset = pid & BITS_PER_PAGE_MASK; - map = pidmap_array + pid / BITS_PER_PAGE; - - if (likely(map->page && !test_and_set_bit(offset, map->page))) { - /* - * There is a small window for last_pid updates to race, - * but in that case the next allocation will go into the - * slowpath and that fixes things up. - */ -return_pid: - atomic_dec(&map->nr_free); - last_pid = pid; - return pid; - } - - if (!offset || !atomic_read(&map->nr_free)) { -next_map: - map = next_free_map(map, &max_steps); - if (!map) - goto failure; - offset = 0; - } - /* - * Find the next zero bit: - */ -scan_more: - offset = find_next_zero_bit(map->page, BITS_PER_PAGE, offset); - if (offset >= BITS_PER_PAGE) - goto next_map; - if (test_and_set_bit(offset, map->page)) - goto scan_more; - - /* we got the PID: */ - pid = (map - pidmap_array) * BITS_PER_PAGE + offset; - goto return_pid; - -failure: - return -1; +fastcall void free_pid(struct pid *pid) +{ + /* We can be called with write_lock_irq(&tasklist_lock) held */ + unsigned long flags; + + spin_lock_irqsave(&pidmap_lock, flags); + hlist_del_rcu(&pid->pid_chain); + spin_unlock_irqrestore(&pidmap_lock, flags); + + free_pidmap(pid->nr); + call_rcu(&pid->rcu, delayed_put_pid); +} + +struct pid *alloc_pid(void) +{ + struct pid *pid; + enum pid_type type; + int nr = -1; + + pid = kmem_cache_alloc(pid_cachep, GFP_KERNEL); + if (!pid) + goto out; + + nr = alloc_pidmap(); + if (nr < 0) + goto out_free; + + atomic_set(&pid->count, 1); + pid->nr = nr; + for (type = 0; type < PIDTYPE_MAX; ++type) + INIT_HLIST_HEAD(&pid->tasks[type]); + + spin_lock_irq(&pidmap_lock); + hlist_add_head_rcu(&pid->pid_chain, &pid_hash[pid_hashfn(pid->nr)]); + spin_unlock_irq(&pidmap_lock); + +out: + return pid; + +out_free: + kmem_cache_free(pid_cachep, pid); + pid = NULL; + goto out; } -fastcall struct pid *find_pid(enum pid_type type, int nr) +struct pid * fastcall find_pid(int nr) { - struct list_head *elem, *bucket = &pid_hash[type][pid_hashfn(nr)]; + struct hlist_node *elem; struct pid *pid; - __list_for_each(elem, bucket) { - pid = list_entry(elem, struct pid, hash_chain); + hlist_for_each_entry_rcu(pid, elem, + &pid_hash[pid_hashfn(nr)], pid_chain) { if (pid->nr == nr) return pid; } return NULL; } -void fastcall link_pid(task_t *task, struct pid_link *link, struct pid *pid) +int fastcall attach_pid(struct task_struct *task, enum pid_type type, int nr) { - atomic_inc(&pid->count); - list_add_tail(&link->pid_chain, &pid->task_list); - link->pidptr = pid; -} + struct pid_link *link; + struct pid *pid; -int fastcall attach_pid(task_t *task, enum pid_type type, int nr) -{ - struct pid *pid = find_pid(type, nr); - - if (pid) - atomic_inc(&pid->count); - else { - pid = &task->pids[type].pid; - pid->nr = nr; - atomic_set(&pid->count, 1); - INIT_LIST_HEAD(&pid->task_list); - pid->task = task; - get_task_struct(task); - list_add(&pid->hash_chain, &pid_hash[type][pid_hashfn(nr)]); - } - list_add_tail(&task->pids[type].pid_chain, &pid->task_list); - task->pids[type].pidptr = pid; + WARN_ON(!task->pid); /* to be removed soon */ + WARN_ON(!nr); /* to be removed soon */ + + link = &task->pids[type]; + link->pid = pid = find_pid(nr); + hlist_add_head_rcu(&link->node, &pid->tasks[type]); return 0; } -static inline int __detach_pid(task_t *task, enum pid_type type) +void fastcall detach_pid(struct task_struct *task, enum pid_type type) { - struct pid_link *link = task->pids + type; - struct pid *pid = link->pidptr; - int nr; + struct pid_link *link; + struct pid *pid; + int tmp; + + link = &task->pids[type]; + pid = link->pid; - list_del(&link->pid_chain); - if (!atomic_dec_and_test(&pid->count)) - return 0; + hlist_del_rcu(&link->node); + link->pid = NULL; - nr = pid->nr; - list_del(&pid->hash_chain); - put_task_struct(pid->task); + for (tmp = PIDTYPE_MAX; --tmp >= 0; ) + if (!hlist_empty(&pid->tasks[tmp])) + return; - return nr; + free_pid(pid); } -static void _detach_pid(task_t *task, enum pid_type type) +struct task_struct * fastcall pid_task(struct pid *pid, enum pid_type type) { - __detach_pid(task, type); + struct task_struct *result = NULL; + if (pid) { + struct hlist_node *first; + first = rcu_dereference(pid->tasks[type].first); + if (first) + result = hlist_entry(first, struct task_struct, pids[(type)].node); + if (result && (pid->nr != 1) && + !vx_check(vx_task_xid(result), VX_WATCH|VX_ADMIN|VX_IDENT)) { + vxwprintk((type == PIDTYPE_PID) && (current->xid), + "pid_task(%d,%d): task %p[#%u,%u] did lookup %p[#%u,%u]", + pid->nr, type, current, vx_current_xid(), current->pid, + result, vx_task_xid(result), result->pid); + result = NULL; + } + } + return result; } -void fastcall detach_pid(task_t *task, enum pid_type type) +/* + * Must be called under rcu_read_lock() or with tasklist_lock read-held. + */ +struct task_struct *find_task_by_pid_type(int type, int nr) { - int nr = __detach_pid(task, type); + if (type == PIDTYPE_PID) + nr = vx_rmap_pid(nr); + else if (type == PIDTYPE_REALPID) + type = PIDTYPE_PID; + return pid_task(find_pid(nr), type); +} - if (!nr) - return; +EXPORT_SYMBOL(find_task_by_pid_type); - for (type = 0; type < PIDTYPE_MAX; ++type) - if (find_pid(type, nr)) - return; - free_pidmap(nr); +struct task_struct *fastcall get_pid_task(struct pid *pid, enum pid_type type) +{ + struct task_struct *result; + rcu_read_lock(); + result = pid_task(pid, type); + if (result) + get_task_struct(result); + rcu_read_unlock(); + return result; } -task_t *find_task_by_pid(int nr) +struct pid *find_get_pid(pid_t nr) { - struct pid *pid = find_pid(PIDTYPE_PID, - vx_rmap_tgid(current->vx_info, nr)); - - if (!pid) - return NULL; - return pid_task(pid->task_list.next, PIDTYPE_PID); -} + struct pid *pid; -EXPORT_SYMBOL(find_task_by_pid); + rcu_read_lock(); + pid = get_pid(find_pid(nr)); + rcu_read_unlock(); -/* - * This function switches the PIDs if a non-leader thread calls - * sys_execve() - this must be done without releasing the PID. - * (which a detach_pid() would eventually do.) - */ -void switch_exec_pids(task_t *leader, task_t *thread) -{ - _detach_pid(leader, PIDTYPE_PID); - _detach_pid(leader, PIDTYPE_TGID); - _detach_pid(leader, PIDTYPE_PGID); - _detach_pid(leader, PIDTYPE_SID); - - _detach_pid(thread, PIDTYPE_PID); - _detach_pid(thread, PIDTYPE_TGID); - - leader->pid = leader->tgid = thread->pid; - thread->pid = thread->tgid; - - attach_pid(thread, PIDTYPE_PID, thread->pid); - attach_pid(thread, PIDTYPE_TGID, thread->tgid); - attach_pid(thread, PIDTYPE_PGID, thread->signal->pgrp); - attach_pid(thread, PIDTYPE_SID, thread->signal->session); - list_add_tail(&thread->tasks, &init_task.tasks); - - attach_pid(leader, PIDTYPE_PID, leader->pid); - attach_pid(leader, PIDTYPE_TGID, leader->tgid); - attach_pid(leader, PIDTYPE_PGID, leader->signal->pgrp); - attach_pid(leader, PIDTYPE_SID, leader->signal->session); + return pid; } /* @@ -272,39 +316,32 @@ void switch_exec_pids(task_t *leader, task_t *thread) */ void __init pidhash_init(void) { - int i, j, pidhash_size; - unsigned long megabytes = max_pfn >> (20 - PAGE_SHIFT); + int i, pidhash_size; + unsigned long megabytes = nr_kernel_pages >> (20 - PAGE_SHIFT); pidhash_shift = max(4, fls(megabytes * 4)); pidhash_shift = min(12, pidhash_shift); pidhash_size = 1 << pidhash_shift; - printk("PID hash table entries: %d (order %d: %Zd bytes)\n", + printk("PID hash table entries: %d (order: %d, %Zd bytes)\n", pidhash_size, pidhash_shift, - pidhash_size * sizeof(struct list_head)); - - for (i = 0; i < PIDTYPE_MAX; i++) { - pid_hash[i] = alloc_bootmem(pidhash_size * - sizeof(struct list_head)); - if (!pid_hash[i]) - panic("Could not alloc pidhash!\n"); - for (j = 0; j < pidhash_size; j++) - INIT_LIST_HEAD(&pid_hash[i][j]); - } + pidhash_size * sizeof(struct hlist_head)); + + pid_hash = alloc_bootmem(pidhash_size * sizeof(*(pid_hash))); + if (!pid_hash) + panic("Could not alloc pidhash!\n"); + for (i = 0; i < pidhash_size; i++) + INIT_HLIST_HEAD(&pid_hash[i]); } void __init pidmap_init(void) { - int i; - pidmap_array->page = (void *)get_zeroed_page(GFP_KERNEL); + /* Reserve PID 0. We never call free_pidmap(0) */ set_bit(0, pidmap_array->page); atomic_dec(&pidmap_array->nr_free); - /* - * Allocate PID 0, and hash it via all PID types: - */ - - for (i = 0; i < PIDTYPE_MAX; i++) - attach_pid(current, i, 0); + pid_cachep = kmem_cache_create("pid", sizeof(struct pid), + __alignof__(struct pid), + SLAB_PANIC, NULL, NULL); }