/*
* Generic pidhash and scalable, time-bounded PID allocator
*
- * (C) 2002 William Irwin, IBM
- * (C) 2002 Ingo Molnar, Red Hat
+ * (C) 2002-2003 William Irwin, IBM
+ * (C) 2004 William Irwin, Oracle
+ * (C) 2002-2004 Ingo Molnar, Red Hat
*
* pid-structures are backing objects for tasks sharing a given ID to chain
* against. There is very little to them aside from hashing them and
#include <linux/init.h>
#include <linux/bootmem.h>
#include <linux/hash.h>
+#include <linux/pid_namespace.h>
+#include <linux/vs_pid.h>
#define pid_hashfn(nr) hash_long((unsigned long)nr, pidhash_shift)
-static struct list_head *pid_hash[PIDTYPE_MAX];
+static struct hlist_head *pid_hash;
static int pidhash_shift;
+static struct kmem_cache *pid_cachep;
int pid_max = PID_MAX_DEFAULT;
-int last_pid;
#define RESERVED_PIDS 300
-#define PIDMAP_ENTRIES (PID_MAX_LIMIT/PAGE_SIZE/8)
+int pid_max_min = RESERVED_PIDS + 1;
+int pid_max_max = PID_MAX_LIMIT;
+
#define BITS_PER_PAGE (PAGE_SIZE*8)
#define BITS_PER_PAGE_MASK (BITS_PER_PAGE-1)
+static inline int mk_pid(struct pid_namespace *pid_ns,
+ struct pidmap *map, int off)
+{
+ return (map - pid_ns->pidmap)*BITS_PER_PAGE + off;
+}
+
+#define find_next_offset(map, off) \
+ find_next_zero_bit((map)->page, BITS_PER_PAGE, off)
+
/*
* PID-map pages start out as NULL, they get allocated upon
* first use and are never deallocated. This way a low pid_max
* value does not cause lots of bitmaps to be allocated, but
* the scheme scales to up to 4 million PIDs, runtime.
*/
-typedef struct pidmap {
- atomic_t nr_free;
- void *page;
-} pidmap_t;
-
-static pidmap_t pidmap_array[PIDMAP_ENTRIES] =
- { [ 0 ... PIDMAP_ENTRIES-1 ] = { ATOMIC_INIT(BITS_PER_PAGE), NULL } };
+struct pid_namespace init_pid_ns = {
+ .kref = {
+ .refcount = ATOMIC_INIT(2),
+ },
+ .pidmap = {
+ [ 0 ... PIDMAP_ENTRIES-1] = { ATOMIC_INIT(BITS_PER_PAGE), NULL }
+ },
+ .last_pid = 0,
+ .child_reaper = &init_task
+};
-static pidmap_t *map_limit = pidmap_array + PIDMAP_ENTRIES;
+/*
+ * Note: disable interrupts while the pidmap_lock is held as an
+ * interrupt might come in and do read_lock(&tasklist_lock).
+ *
+ * If we don't disable interrupts there is a nasty deadlock between
+ * detach_pid()->free_pid() and another cpu that does
+ * spin_lock(&pidmap_lock) followed by an interrupt routine that does
+ * read_lock(&tasklist_lock);
+ *
+ * After we clean up the tasklist_lock and know there are no
+ * irq handlers that take it we can leave the interrupts enabled.
+ * For now it is easier to be safe than to prove it can't happen.
+ */
-static spinlock_t pidmap_lock __cacheline_aligned_in_smp = SPIN_LOCK_UNLOCKED;
+static __cacheline_aligned_in_smp DEFINE_SPINLOCK(pidmap_lock);
-fastcall void free_pidmap(int pid)
+static fastcall void free_pidmap(struct pid_namespace *pid_ns, int pid)
{
- pidmap_t *map = pidmap_array + pid / BITS_PER_PAGE;
+ struct pidmap *map = pid_ns->pidmap + pid / BITS_PER_PAGE;
int offset = pid & BITS_PER_PAGE_MASK;
clear_bit(offset, map->page);
atomic_inc(&map->nr_free);
}
-/*
- * Here we search for the next map that has free bits left.
- * Normally the next map has free PIDs.
- */
-static inline pidmap_t *next_free_map(pidmap_t *map, int *max_steps)
+static int alloc_pidmap(struct pid_namespace *pid_ns)
{
- while (--*max_steps) {
- if (++map == map_limit)
- map = pidmap_array;
+ int i, offset, max_scan, pid, last = pid_ns->last_pid;
+ struct pidmap *map;
+
+ pid = last + 1;
+ if (pid >= pid_max)
+ pid = RESERVED_PIDS;
+ offset = pid & BITS_PER_PAGE_MASK;
+ map = &pid_ns->pidmap[pid/BITS_PER_PAGE];
+ max_scan = (pid_max + BITS_PER_PAGE - 1)/BITS_PER_PAGE - !offset;
+ for (i = 0; i <= max_scan; ++i) {
if (unlikely(!map->page)) {
- unsigned long page = get_zeroed_page(GFP_KERNEL);
+ void *page = kzalloc(PAGE_SIZE, GFP_KERNEL);
/*
* Free the page if someone raced with us
* installing it:
*/
- spin_lock(&pidmap_lock);
+ spin_lock_irq(&pidmap_lock);
if (map->page)
- free_page(page);
+ kfree(page);
else
- map->page = (void *)page;
- spin_unlock(&pidmap_lock);
-
- if (!map->page)
+ map->page = page;
+ spin_unlock_irq(&pidmap_lock);
+ if (unlikely(!map->page))
break;
}
- if (atomic_read(&map->nr_free))
- return map;
+ if (likely(atomic_read(&map->nr_free))) {
+ do {
+ if (!test_and_set_bit(offset, map->page)) {
+ atomic_dec(&map->nr_free);
+ pid_ns->last_pid = pid;
+ return pid;
+ }
+ offset = find_next_offset(map, offset);
+ pid = mk_pid(pid_ns, map, offset);
+ /*
+ * find_next_offset() found a bit, the pid from it
+ * is in-bounds, and if we fell back to the last
+ * bitmap block and the final block was the same
+ * as the starting point, pid is before last_pid.
+ */
+ } while (offset < BITS_PER_PAGE && pid < pid_max &&
+ (i != max_scan || pid < last ||
+ !((last+1) & BITS_PER_PAGE_MASK)));
+ }
+ if (map < &pid_ns->pidmap[(pid_max-1)/BITS_PER_PAGE]) {
+ ++map;
+ offset = 0;
+ } else {
+ map = &pid_ns->pidmap[0];
+ offset = RESERVED_PIDS;
+ if (unlikely(last == offset))
+ break;
+ }
+ pid = mk_pid(pid_ns, map, offset);
}
- return NULL;
+ return -1;
}
-int alloc_pidmap(void)
+static int next_pidmap(struct pid_namespace *pid_ns, int last)
{
- int pid, offset, max_steps = PIDMAP_ENTRIES + 1;
- pidmap_t *map;
-
- pid = last_pid + 1;
- if (pid >= pid_max)
- pid = RESERVED_PIDS;
-
- offset = pid & BITS_PER_PAGE_MASK;
- map = pidmap_array + pid / BITS_PER_PAGE;
-
- if (likely(map->page && !test_and_set_bit(offset, map->page))) {
- /*
- * There is a small window for last_pid updates to race,
- * but in that case the next allocation will go into the
- * slowpath and that fixes things up.
- */
-return_pid:
- atomic_dec(&map->nr_free);
- last_pid = pid;
- return pid;
- }
-
- if (!offset || !atomic_read(&map->nr_free)) {
-next_map:
- map = next_free_map(map, &max_steps);
- if (!map)
- goto failure;
- offset = 0;
+ int offset;
+ struct pidmap *map, *end;
+
+ offset = (last + 1) & BITS_PER_PAGE_MASK;
+ map = &pid_ns->pidmap[(last + 1)/BITS_PER_PAGE];
+ end = &pid_ns->pidmap[PIDMAP_ENTRIES];
+ for (; map < end; map++, offset = 0) {
+ if (unlikely(!map->page))
+ continue;
+ offset = find_next_bit((map)->page, BITS_PER_PAGE, offset);
+ if (offset < BITS_PER_PAGE)
+ return mk_pid(pid_ns, map, offset);
}
- /*
- * Find the next zero bit:
- */
-scan_more:
- offset = find_next_zero_bit(map->page, BITS_PER_PAGE, offset);
- if (offset >= BITS_PER_PAGE)
- goto next_map;
- if (test_and_set_bit(offset, map->page))
- goto scan_more;
-
- /* we got the PID: */
- pid = (map - pidmap_array) * BITS_PER_PAGE + offset;
- goto return_pid;
-
-failure:
return -1;
}
-fastcall struct pid *find_pid(enum pid_type type, int nr)
+fastcall void put_pid(struct pid *pid)
+{
+ if (!pid)
+ return;
+ if ((atomic_read(&pid->count) == 1) ||
+ atomic_dec_and_test(&pid->count))
+ kmem_cache_free(pid_cachep, pid);
+}
+EXPORT_SYMBOL_GPL(put_pid);
+
+static void delayed_put_pid(struct rcu_head *rhp)
+{
+ struct pid *pid = container_of(rhp, struct pid, rcu);
+ put_pid(pid);
+}
+
+fastcall void free_pid(struct pid *pid)
+{
+ /* We can be called with write_lock_irq(&tasklist_lock) held */
+ unsigned long flags;
+
+ spin_lock_irqsave(&pidmap_lock, flags);
+ hlist_del_rcu(&pid->pid_chain);
+ spin_unlock_irqrestore(&pidmap_lock, flags);
+
+ free_pidmap(&init_pid_ns, pid->nr);
+ call_rcu(&pid->rcu, delayed_put_pid);
+}
+
+struct pid *alloc_pid(void)
{
- struct list_head *elem, *bucket = &pid_hash[type][pid_hashfn(nr)];
struct pid *pid;
+ enum pid_type type;
+ int nr = -1;
+
+ pid = kmem_cache_alloc(pid_cachep, GFP_KERNEL);
+ if (!pid)
+ goto out;
+
+ nr = alloc_pidmap(current->nsproxy->pid_ns);
+ if (nr < 0)
+ goto out_free;
+
+ atomic_set(&pid->count, 1);
+ pid->nr = nr;
+ for (type = 0; type < PIDTYPE_MAX; ++type)
+ INIT_HLIST_HEAD(&pid->tasks[type]);
+
+ spin_lock_irq(&pidmap_lock);
+ hlist_add_head_rcu(&pid->pid_chain, &pid_hash[pid_hashfn(pid->nr)]);
+ spin_unlock_irq(&pidmap_lock);
+
+out:
+ return pid;
+
+out_free:
+ kmem_cache_free(pid_cachep, pid);
+ pid = NULL;
+ goto out;
+}
- __list_for_each(elem, bucket) {
- pid = list_entry(elem, struct pid, hash_chain);
+struct pid * fastcall find_pid(int nr)
+{
+ struct hlist_node *elem;
+ struct pid *pid;
+
+ hlist_for_each_entry_rcu(pid, elem,
+ &pid_hash[pid_hashfn(nr)], pid_chain) {
if (pid->nr == nr)
return pid;
}
return NULL;
}
+EXPORT_SYMBOL_GPL(find_pid);
-void fastcall link_pid(task_t *task, struct pid_link *link, struct pid *pid)
+int fastcall attach_pid(struct task_struct *task, enum pid_type type, int nr)
{
- atomic_inc(&pid->count);
- list_add_tail(&link->pid_chain, &pid->task_list);
- link->pidptr = pid;
-}
+ struct pid_link *link;
+ struct pid *pid;
-int fastcall attach_pid(task_t *task, enum pid_type type, int nr)
-{
- struct pid *pid = find_pid(type, nr);
-
- if (pid)
- atomic_inc(&pid->count);
- else {
- pid = &task->pids[type].pid;
- pid->nr = nr;
- atomic_set(&pid->count, 1);
- INIT_LIST_HEAD(&pid->task_list);
- pid->task = task;
- get_task_struct(task);
- list_add(&pid->hash_chain, &pid_hash[type][pid_hashfn(nr)]);
- }
- list_add_tail(&task->pids[type].pid_chain, &pid->task_list);
- task->pids[type].pidptr = pid;
+ link = &task->pids[type];
+ link->pid = pid = find_pid(nr);
+ hlist_add_head_rcu(&link->node, &pid->tasks[type]);
return 0;
}
-static inline int __detach_pid(task_t *task, enum pid_type type)
+void fastcall detach_pid(struct task_struct *task, enum pid_type type)
{
- struct pid_link *link = task->pids + type;
- struct pid *pid = link->pidptr;
- int nr;
+ struct pid_link *link;
+ struct pid *pid;
+ int tmp;
- list_del(&link->pid_chain);
- if (!atomic_dec_and_test(&pid->count))
- return 0;
+ link = &task->pids[type];
+ pid = link->pid;
+
+ hlist_del_rcu(&link->node);
+ link->pid = NULL;
- nr = pid->nr;
- list_del(&pid->hash_chain);
- put_task_struct(pid->task);
+ for (tmp = PIDTYPE_MAX; --tmp >= 0; )
+ if (!hlist_empty(&pid->tasks[tmp]))
+ return;
+
+ free_pid(pid);
+}
- return nr;
+/* transfer_pid is an optimization of attach_pid(new), detach_pid(old) */
+void fastcall transfer_pid(struct task_struct *old, struct task_struct *new,
+ enum pid_type type)
+{
+ new->pids[type].pid = old->pids[type].pid;
+ hlist_replace_rcu(&old->pids[type].node, &new->pids[type].node);
+ old->pids[type].pid = NULL;
}
-static void _detach_pid(task_t *task, enum pid_type type)
+struct task_struct * fastcall pid_task(struct pid *pid, enum pid_type type)
{
- __detach_pid(task, type);
+ struct task_struct *result = NULL;
+
+ if (type == PIDTYPE_REALPID)
+ type = PIDTYPE_PID;
+ if (pid) {
+ struct hlist_node *first;
+ first = rcu_dereference(pid->tasks[type].first);
+ if (first)
+ result = hlist_entry(first, struct task_struct, pids[(type)].node);
+ }
+ return result;
}
-void fastcall detach_pid(task_t *task, enum pid_type type)
+/*
+ * Must be called under rcu_read_lock() or with tasklist_lock read-held.
+ */
+struct task_struct *find_task_by_pid_type(int type, int nr)
{
- int nr = __detach_pid(task, type);
+ struct task_struct *task;
- if (!nr)
- return;
+ if (type == PIDTYPE_PID)
+ nr = vx_rmap_pid(nr);
- for (type = 0; type < PIDTYPE_MAX; ++type)
- if (find_pid(type, nr))
- return;
- free_pidmap(nr);
+ task = pid_task(find_pid(nr), type);
+ if (task && (type != PIDTYPE_REALPID) &&
+ /* maybe VS_WATCH_P in the future? */
+ !vx_check(task->xid, VS_WATCH|VS_IDENT))
+ return NULL;
+ return task;
}
-task_t *find_task_by_pid(int nr)
+EXPORT_SYMBOL(find_task_by_pid_type);
+
+struct pid *get_task_pid(struct task_struct *task, enum pid_type type)
{
- struct pid *pid = find_pid(PIDTYPE_PID, nr);
+ struct pid *pid;
+ rcu_read_lock();
+ pid = get_pid(task->pids[type].pid);
+ rcu_read_unlock();
+ return pid;
+}
- if (!pid)
- return NULL;
- return pid_task(pid->task_list.next, PIDTYPE_PID);
+struct task_struct *fastcall get_pid_task(struct pid *pid, enum pid_type type)
+{
+ struct task_struct *result;
+ rcu_read_lock();
+ result = pid_task(pid, type);
+ if (result)
+ get_task_struct(result);
+ rcu_read_unlock();
+ return result;
}
-EXPORT_SYMBOL(find_task_by_pid);
+struct pid *find_get_pid(pid_t nr)
+{
+ struct pid *pid;
+
+ rcu_read_lock();
+ pid = get_pid(find_pid(nr));
+ rcu_read_unlock();
+
+ return pid;
+}
/*
- * This function switches the PIDs if a non-leader thread calls
- * sys_execve() - this must be done without releasing the PID.
- * (which a detach_pid() would eventually do.)
+ * Used by proc to find the first pid that is greater then or equal to nr.
+ *
+ * If there is a pid at nr this function is exactly the same as find_pid.
*/
-void switch_exec_pids(task_t *leader, task_t *thread)
+struct pid *find_ge_pid(int nr)
{
- _detach_pid(leader, PIDTYPE_PID);
- _detach_pid(leader, PIDTYPE_TGID);
- _detach_pid(leader, PIDTYPE_PGID);
- _detach_pid(leader, PIDTYPE_SID);
-
- _detach_pid(thread, PIDTYPE_PID);
- _detach_pid(thread, PIDTYPE_TGID);
-
- leader->pid = leader->tgid = thread->pid;
- thread->pid = thread->tgid;
-
- attach_pid(thread, PIDTYPE_PID, thread->pid);
- attach_pid(thread, PIDTYPE_TGID, thread->tgid);
- attach_pid(thread, PIDTYPE_PGID, thread->signal->pgrp);
- attach_pid(thread, PIDTYPE_SID, thread->signal->session);
- list_add_tail(&thread->tasks, &init_task.tasks);
-
- attach_pid(leader, PIDTYPE_PID, leader->pid);
- attach_pid(leader, PIDTYPE_TGID, leader->tgid);
- attach_pid(leader, PIDTYPE_PGID, leader->signal->pgrp);
- attach_pid(leader, PIDTYPE_SID, leader->signal->session);
+ struct pid *pid;
+
+ do {
+ pid = find_pid(nr);
+ if (pid)
+ break;
+ nr = next_pidmap(current->nsproxy->pid_ns, nr);
+ } while (nr > 0);
+
+ return pid;
+}
+EXPORT_SYMBOL_GPL(find_get_pid);
+
+int copy_pid_ns(int flags, struct task_struct *tsk)
+{
+ struct pid_namespace *old_ns = tsk->nsproxy->pid_ns;
+ int err = 0;
+
+ if (!old_ns)
+ return 0;
+
+ get_pid_ns(old_ns);
+ return err;
+}
+
+void free_pid_ns(struct kref *kref)
+{
+ struct pid_namespace *ns;
+
+ ns = container_of(kref, struct pid_namespace, kref);
+ kfree(ns);
}
/*
*/
void __init pidhash_init(void)
{
- int i, j, pidhash_size;
- unsigned long megabytes = max_pfn >> (20 - PAGE_SHIFT);
+ int i, pidhash_size;
+ unsigned long megabytes = nr_kernel_pages >> (20 - PAGE_SHIFT);
pidhash_shift = max(4, fls(megabytes * 4));
pidhash_shift = min(12, pidhash_shift);
pidhash_size = 1 << pidhash_shift;
- printk("PID hash table entries: %d (order %d: %Zd bytes)\n",
+ printk("PID hash table entries: %d (order: %d, %Zd bytes)\n",
pidhash_size, pidhash_shift,
- pidhash_size * sizeof(struct list_head));
-
- for (i = 0; i < PIDTYPE_MAX; i++) {
- pid_hash[i] = alloc_bootmem(pidhash_size *
- sizeof(struct list_head));
- if (!pid_hash[i])
- panic("Could not alloc pidhash!\n");
- for (j = 0; j < pidhash_size; j++)
- INIT_LIST_HEAD(&pid_hash[i][j]);
- }
+ pidhash_size * sizeof(struct hlist_head));
+
+ pid_hash = alloc_bootmem(pidhash_size * sizeof(*(pid_hash)));
+ if (!pid_hash)
+ panic("Could not alloc pidhash!\n");
+ for (i = 0; i < pidhash_size; i++)
+ INIT_HLIST_HEAD(&pid_hash[i]);
}
void __init pidmap_init(void)
{
- int i;
-
- pidmap_array->page = (void *)get_zeroed_page(GFP_KERNEL);
- set_bit(0, pidmap_array->page);
- atomic_dec(&pidmap_array->nr_free);
-
- /*
- * Allocate PID 0, and hash it via all PID types:
- */
-
- for (i = 0; i < PIDTYPE_MAX; i++)
- attach_pid(current, i, 0);
+ init_pid_ns.pidmap[0].page = kzalloc(PAGE_SIZE, GFP_KERNEL);
+ /* Reserve PID 0. We never call free_pidmap(0) */
+ set_bit(0, init_pid_ns.pidmap[0].page);
+ atomic_dec(&init_pid_ns.pidmap[0].nr_free);
+
+ pid_cachep = kmem_cache_create("pid", sizeof(struct pid),
+ __alignof__(struct pid),
+ SLAB_PANIC, NULL, NULL);
}