/*
* Generic pidhash and scalable, time-bounded PID allocator
*
- * (C) 2002 William Irwin, IBM
- * (C) 2002 Ingo Molnar, Red Hat
+ * (C) 2002-2003 William Irwin, IBM
+ * (C) 2004 William Irwin, Oracle
+ * (C) 2002-2004 Ingo Molnar, Red Hat
*
* pid-structures are backing objects for tasks sharing a given ID to chain
* against. There is very little to them aside from hashing them and
#include <linux/init.h>
#include <linux/bootmem.h>
#include <linux/hash.h>
+#include <linux/vs_cvirt.h>
#define pid_hashfn(nr) hash_long((unsigned long)nr, pidhash_shift)
-static struct list_head *pid_hash[PIDTYPE_MAX];
+static struct hlist_head *pid_hash[PIDTYPE_MAX];
static int pidhash_shift;
int pid_max = PID_MAX_DEFAULT;
#define RESERVED_PIDS 300
-#define PIDMAP_ENTRIES (PID_MAX_LIMIT/PAGE_SIZE/8)
+int pid_max_min = RESERVED_PIDS + 1;
+int pid_max_max = PID_MAX_LIMIT;
+
+#define PIDMAP_ENTRIES ((PID_MAX_LIMIT + 8*PAGE_SIZE - 1)/PAGE_SIZE/8)
#define BITS_PER_PAGE (PAGE_SIZE*8)
#define BITS_PER_PAGE_MASK (BITS_PER_PAGE-1)
+#define mk_pid(map, off) (((map) - pidmap_array)*BITS_PER_PAGE + (off))
+#define find_next_offset(map, off) \
+ find_next_zero_bit((map)->page, BITS_PER_PAGE, off)
/*
* PID-map pages start out as NULL, they get allocated upon
static pidmap_t pidmap_array[PIDMAP_ENTRIES] =
{ [ 0 ... PIDMAP_ENTRIES-1 ] = { ATOMIC_INIT(BITS_PER_PAGE), NULL } };
-static pidmap_t *map_limit = pidmap_array + PIDMAP_ENTRIES;
-
-static spinlock_t pidmap_lock __cacheline_aligned_in_smp = SPIN_LOCK_UNLOCKED;
+static __cacheline_aligned_in_smp DEFINE_SPINLOCK(pidmap_lock);
fastcall void free_pidmap(int pid)
{
atomic_inc(&map->nr_free);
}
-/*
- * Here we search for the next map that has free bits left.
- * Normally the next map has free PIDs.
- */
-static inline pidmap_t *next_free_map(pidmap_t *map, int *max_steps)
+int alloc_pidmap(void)
{
- while (--*max_steps) {
- if (++map == map_limit)
- map = pidmap_array;
+ int i, offset, max_scan, pid, last = last_pid;
+ pidmap_t *map;
+
+ pid = last + 1;
+ if (pid >= pid_max)
+ pid = RESERVED_PIDS;
+ offset = pid & BITS_PER_PAGE_MASK;
+ map = &pidmap_array[pid/BITS_PER_PAGE];
+ max_scan = (pid_max + BITS_PER_PAGE - 1)/BITS_PER_PAGE - !offset;
+ for (i = 0; i <= max_scan; ++i) {
if (unlikely(!map->page)) {
unsigned long page = get_zeroed_page(GFP_KERNEL);
/*
else
map->page = (void *)page;
spin_unlock(&pidmap_lock);
-
- if (!map->page)
+ if (unlikely(!map->page))
break;
}
- if (atomic_read(&map->nr_free))
- return map;
- }
- return NULL;
-}
-
-int alloc_pidmap(void)
-{
- int pid, offset, max_steps = PIDMAP_ENTRIES + 1;
- pidmap_t *map;
-
- pid = last_pid + 1;
- if (pid >= pid_max)
- pid = RESERVED_PIDS;
-
- offset = pid & BITS_PER_PAGE_MASK;
- map = pidmap_array + pid / BITS_PER_PAGE;
-
- if (likely(map->page && !test_and_set_bit(offset, map->page))) {
- /*
- * There is a small window for last_pid updates to race,
- * but in that case the next allocation will go into the
- * slowpath and that fixes things up.
- */
-return_pid:
- atomic_dec(&map->nr_free);
- last_pid = pid;
- return pid;
- }
-
- if (!offset || !atomic_read(&map->nr_free)) {
-next_map:
- map = next_free_map(map, &max_steps);
- if (!map)
- goto failure;
- offset = 0;
+ if (likely(atomic_read(&map->nr_free))) {
+ do {
+ if (!test_and_set_bit(offset, map->page)) {
+ atomic_dec(&map->nr_free);
+ last_pid = pid;
+ return pid;
+ }
+ offset = find_next_offset(map, offset);
+ pid = mk_pid(map, offset);
+ /*
+ * find_next_offset() found a bit, the pid from it
+ * is in-bounds, and if we fell back to the last
+ * bitmap block and the final block was the same
+ * as the starting point, pid is before last_pid.
+ */
+ } while (offset < BITS_PER_PAGE && pid < pid_max &&
+ (i != max_scan || pid < last ||
+ !((last+1) & BITS_PER_PAGE_MASK)));
+ }
+ if (map < &pidmap_array[(pid_max-1)/BITS_PER_PAGE]) {
+ ++map;
+ offset = 0;
+ } else {
+ map = &pidmap_array[0];
+ offset = RESERVED_PIDS;
+ if (unlikely(last == offset))
+ break;
+ }
+ pid = mk_pid(map, offset);
}
- /*
- * Find the next zero bit:
- */
-scan_more:
- offset = find_next_zero_bit(map->page, BITS_PER_PAGE, offset);
- if (offset >= BITS_PER_PAGE)
- goto next_map;
- if (test_and_set_bit(offset, map->page))
- goto scan_more;
-
- /* we got the PID: */
- pid = (map - pidmap_array) * BITS_PER_PAGE + offset;
- goto return_pid;
-
-failure:
return -1;
}
-fastcall struct pid *find_pid(enum pid_type type, int nr)
+struct pid * fastcall find_pid(enum pid_type type, int nr)
{
- struct list_head *elem, *bucket = &pid_hash[type][pid_hashfn(nr)];
+ struct hlist_node *elem;
struct pid *pid;
- __list_for_each(elem, bucket) {
- pid = list_entry(elem, struct pid, hash_chain);
+ hlist_for_each_entry_rcu(pid, elem,
+ &pid_hash[type][pid_hashfn(nr)], pid_chain) {
if (pid->nr == nr)
return pid;
}
return NULL;
}
-void fastcall link_pid(task_t *task, struct pid_link *link, struct pid *pid)
-{
- atomic_inc(&pid->count);
- list_add_tail(&link->pid_chain, &pid->task_list);
- link->pidptr = pid;
-}
-
int fastcall attach_pid(task_t *task, enum pid_type type, int nr)
{
- struct pid *pid = find_pid(type, nr);
-
- if (pid)
- atomic_inc(&pid->count);
- else {
- pid = &task->pids[type].pid;
- pid->nr = nr;
- atomic_set(&pid->count, 1);
- INIT_LIST_HEAD(&pid->task_list);
- pid->task = task;
- get_task_struct(task);
- list_add(&pid->hash_chain, &pid_hash[type][pid_hashfn(nr)]);
+ struct pid *pid, *task_pid;
+
+ task_pid = &task->pids[type];
+ pid = find_pid(type, nr);
+ task_pid->nr = nr;
+ if (pid == NULL) {
+ INIT_LIST_HEAD(&task_pid->pid_list);
+ hlist_add_head_rcu(&task_pid->pid_chain,
+ &pid_hash[type][pid_hashfn(nr)]);
+ } else {
+ INIT_HLIST_NODE(&task_pid->pid_chain);
+ list_add_tail_rcu(&task_pid->pid_list, &pid->pid_list);
}
- list_add_tail(&task->pids[type].pid_chain, &pid->task_list);
- task->pids[type].pidptr = pid;
return 0;
}
-static inline int __detach_pid(task_t *task, enum pid_type type)
+static fastcall int __detach_pid(task_t *task, enum pid_type type)
{
- struct pid_link *link = task->pids + type;
- struct pid *pid = link->pidptr;
- int nr;
-
- list_del(&link->pid_chain);
- if (!atomic_dec_and_test(&pid->count))
- return 0;
+ struct pid *pid, *pid_next;
+ int nr = 0;
+
+ pid = &task->pids[type];
+ if (!hlist_unhashed(&pid->pid_chain)) {
+
+ if (list_empty(&pid->pid_list)) {
+ nr = pid->nr;
+ hlist_del_rcu(&pid->pid_chain);
+ } else {
+ pid_next = list_entry(pid->pid_list.next,
+ struct pid, pid_list);
+ /* insert next pid from pid_list to hash */
+ hlist_replace_rcu(&pid->pid_chain,
+ &pid_next->pid_chain);
+ }
+ }
- nr = pid->nr;
- list_del(&pid->hash_chain);
- put_task_struct(pid->task);
+ list_del_rcu(&pid->pid_list);
+ pid->nr = 0;
return nr;
}
-static void _detach_pid(task_t *task, enum pid_type type)
-{
- __detach_pid(task, type);
-}
-
void fastcall detach_pid(task_t *task, enum pid_type type)
{
- int nr = __detach_pid(task, type);
+ int tmp, nr;
+ nr = __detach_pid(task, type);
if (!nr)
return;
- for (type = 0; type < PIDTYPE_MAX; ++type)
- if (find_pid(type, nr))
+ for (tmp = PIDTYPE_MAX; --tmp >= 0; )
+ if (tmp != type && find_pid(tmp, nr))
return;
+
free_pidmap(nr);
}
-task_t *find_task_by_pid(int nr)
+task_t *find_task_by_pid_type(int type, int nr)
{
- struct pid *pid = find_pid(PIDTYPE_PID,
- vx_rmap_tgid(current->vx_info, nr));
+ struct pid *pid;
+ if (type == PIDTYPE_REALPID)
+ type = PIDTYPE_PID;
+ else if (type == PIDTYPE_PID)
+ nr = vx_rmap_pid(nr);
+
+ pid = find_pid(type, nr);
if (!pid)
return NULL;
- return pid_task(pid->task_list.next, PIDTYPE_PID);
+
+ return pid_task(&pid->pid_list, type);
}
-EXPORT_SYMBOL(find_task_by_pid);
+EXPORT_SYMBOL(find_task_by_pid_type);
/*
* This function switches the PIDs if a non-leader thread calls
*/
void switch_exec_pids(task_t *leader, task_t *thread)
{
- _detach_pid(leader, PIDTYPE_PID);
- _detach_pid(leader, PIDTYPE_TGID);
- _detach_pid(leader, PIDTYPE_PGID);
- _detach_pid(leader, PIDTYPE_SID);
+ __detach_pid(leader, PIDTYPE_PID);
+ __detach_pid(leader, PIDTYPE_TGID);
+ __detach_pid(leader, PIDTYPE_PGID);
+ __detach_pid(leader, PIDTYPE_SID);
- _detach_pid(thread, PIDTYPE_PID);
- _detach_pid(thread, PIDTYPE_TGID);
+ __detach_pid(thread, PIDTYPE_PID);
+ __detach_pid(thread, PIDTYPE_TGID);
leader->pid = leader->tgid = thread->pid;
thread->pid = thread->tgid;
void __init pidhash_init(void)
{
int i, j, pidhash_size;
- unsigned long megabytes = max_pfn >> (20 - PAGE_SHIFT);
+ unsigned long megabytes = nr_kernel_pages >> (20 - PAGE_SHIFT);
pidhash_shift = max(4, fls(megabytes * 4));
pidhash_shift = min(12, pidhash_shift);
pidhash_size = 1 << pidhash_shift;
- printk("PID hash table entries: %d (order %d: %Zd bytes)\n",
+ printk("PID hash table entries: %d (order: %d, %Zd bytes)\n",
pidhash_size, pidhash_shift,
- pidhash_size * sizeof(struct list_head));
+ PIDTYPE_MAX * pidhash_size * sizeof(struct hlist_head));
for (i = 0; i < PIDTYPE_MAX; i++) {
pid_hash[i] = alloc_bootmem(pidhash_size *
- sizeof(struct list_head));
+ sizeof(*(pid_hash[i])));
if (!pid_hash[i])
panic("Could not alloc pidhash!\n");
for (j = 0; j < pidhash_size; j++)
- INIT_LIST_HEAD(&pid_hash[i][j]);
+ INIT_HLIST_HEAD(&pid_hash[i][j]);
}
}