#include <linux/mempolicy.h>
#include <linux/sem.h>
#include <linux/file.h>
+#include <linux/key.h>
#include <linux/binfmts.h>
#include <linux/mman.h>
#include <linux/fs.h>
free_task(tsk);
}
-void fastcall add_wait_queue(wait_queue_head_t *q, wait_queue_t * wait)
-{
- unsigned long flags;
-
- wait->flags &= ~WQ_FLAG_EXCLUSIVE;
- spin_lock_irqsave(&q->lock, flags);
- __add_wait_queue(q, wait);
- spin_unlock_irqrestore(&q->lock, flags);
-}
-
-EXPORT_SYMBOL(add_wait_queue);
-
-void fastcall add_wait_queue_exclusive(wait_queue_head_t *q, wait_queue_t * wait)
-{
- unsigned long flags;
-
- wait->flags |= WQ_FLAG_EXCLUSIVE;
- spin_lock_irqsave(&q->lock, flags);
- __add_wait_queue_tail(q, wait);
- spin_unlock_irqrestore(&q->lock, flags);
-}
-
-EXPORT_SYMBOL(add_wait_queue_exclusive);
-
-void fastcall remove_wait_queue(wait_queue_head_t *q, wait_queue_t * wait)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&q->lock, flags);
- __remove_wait_queue(q, wait);
- spin_unlock_irqrestore(&q->lock, flags);
-}
-
-EXPORT_SYMBOL(remove_wait_queue);
-
-
-/*
- * Note: we use "set_current_state()" _after_ the wait-queue add,
- * because we need a memory barrier there on SMP, so that any
- * wake-function that tests for the wait-queue being active
- * will be guaranteed to see waitqueue addition _or_ subsequent
- * tests in this thread will see the wakeup having taken place.
- *
- * The spin_unlock() itself is semi-permeable and only protects
- * one way (it only protects stuff inside the critical region and
- * stops them from bleeding out - it would still allow subsequent
- * loads to move into the the critical region).
- */
-void fastcall prepare_to_wait(wait_queue_head_t *q, wait_queue_t *wait, int state)
-{
- unsigned long flags;
-
- wait->flags &= ~WQ_FLAG_EXCLUSIVE;
- spin_lock_irqsave(&q->lock, flags);
- if (list_empty(&wait->task_list))
- __add_wait_queue(q, wait);
- /*
- * don't alter the task state if this is just going to
- * queue an async wait queue callback
- */
- if (is_sync_wait(wait))
- set_current_state(state);
- spin_unlock_irqrestore(&q->lock, flags);
-}
-
-EXPORT_SYMBOL(prepare_to_wait);
-
-void fastcall
-prepare_to_wait_exclusive(wait_queue_head_t *q, wait_queue_t *wait, int state)
-{
- unsigned long flags;
-
- wait->flags |= WQ_FLAG_EXCLUSIVE;
- spin_lock_irqsave(&q->lock, flags);
- if (list_empty(&wait->task_list))
- __add_wait_queue_tail(q, wait);
- /*
- * don't alter the task state if this is just going to
- * queue an async wait queue callback
- */
- if (is_sync_wait(wait))
- set_current_state(state);
- spin_unlock_irqrestore(&q->lock, flags);
-}
-
-EXPORT_SYMBOL(prepare_to_wait_exclusive);
-
-void fastcall finish_wait(wait_queue_head_t *q, wait_queue_t *wait)
-{
- unsigned long flags;
-
- __set_current_state(TASK_RUNNING);
- /*
- * We can check for list emptiness outside the lock
- * IFF:
- * - we use the "careful" check that verifies both
- * the next and prev pointers, so that there cannot
- * be any half-pending updates in progress on other
- * CPU's that we haven't seen yet (and that might
- * still change the stack area.
- * and
- * - all other users take the lock (ie we can only
- * have _one_ other CPU that looks at or modifies
- * the list).
- */
- if (!list_empty_careful(&wait->task_list)) {
- spin_lock_irqsave(&q->lock, flags);
- list_del_init(&wait->task_list);
- spin_unlock_irqrestore(&q->lock, flags);
- }
-}
-
-EXPORT_SYMBOL(finish_wait);
-
-int autoremove_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *key)
-{
- int ret = default_wake_function(wait, mode, sync, key);
-
- if (ret)
- list_del_init(&wait->task_list);
- return ret;
-}
-
-EXPORT_SYMBOL(autoremove_wake_function);
-
void __init fork_init(unsigned long mempages)
{
#ifndef __HAVE_ARCH_TASK_STRUCT_ALLOCATOR
* value: the thread structures can take up at most half
* of memory.
*/
- max_threads = mempages / (THREAD_SIZE/PAGE_SIZE) / 8;
+ max_threads = mempages / (8 * THREAD_SIZE / PAGE_SIZE);
+
/*
* we need to allow at least 20 threads to boot a system
*/
if(max_threads < 20)
max_threads = 20;
- init_task.rlim[RLIMIT_NPROC].rlim_cur = max_threads/2;
- init_task.rlim[RLIMIT_NPROC].rlim_max = max_threads/2;
+ init_task.signal->rlim[RLIMIT_NPROC].rlim_cur = max_threads/2;
+ init_task.signal->rlim[RLIMIT_NPROC].rlim_max = max_threads/2;
}
static struct task_struct *dup_task_struct(struct task_struct *orig)
rb_parent = NULL;
pprev = &mm->mmap;
- /*
- * Add it to the mmlist after the parent.
- * Doing it this way means that we can order the list,
- * and fork() won't mess up the ordering significantly.
- * Add it first so that swapoff can see any swap entries.
- */
- spin_lock(&mmlist_lock);
- list_add(&mm->mmlist, ¤t->mm->mmlist);
- mmlist_nr++;
- spin_unlock(&mmlist_lock);
-
for (mpnt = current->mm->mmap ; mpnt ; mpnt = mpnt->vm_next) {
struct file *file;
#endif /* CONFIG_MMU */
spinlock_t mmlist_lock __cacheline_aligned_in_smp = SPIN_LOCK_UNLOCKED;
-int mmlist_nr;
#define allocate_mm() (kmem_cache_alloc(mm_cachep, SLAB_KERNEL))
#define free_mm(mm) (kmem_cache_free(mm_cachep, (mm)))
atomic_set(&mm->mm_users, 1);
atomic_set(&mm->mm_count, 1);
init_rwsem(&mm->mmap_sem);
+ INIT_LIST_HEAD(&mm->mmlist);
mm->core_waiters = 0;
- mm->page_table_lock = SPIN_LOCK_UNLOCKED;
- mm->ioctx_list_lock = RW_LOCK_UNLOCKED;
+ mm->nr_ptes = 0;
+ spin_lock_init(&mm->page_table_lock);
+ rwlock_init(&mm->ioctx_list_lock);
mm->ioctx_list = NULL;
mm->default_kioctx = (struct kioctx)INIT_KIOCTX(mm->default_kioctx, *mm);
mm->free_area_cache = TASK_UNMAPPED_BASE;
*/
void mmput(struct mm_struct *mm)
{
- if (atomic_dec_and_lock(&mm->mm_users, &mmlist_lock)) {
- list_del(&mm->mmlist);
- mmlist_nr--;
- spin_unlock(&mmlist_lock);
+ if (atomic_dec_and_test(&mm->mm_users)) {
exit_aio(mm);
exit_mmap(mm);
+ if (!list_empty(&mm->mmlist)) {
+ spin_lock(&mmlist_lock);
+ list_del(&mm->mmlist);
+ spin_unlock(&mmlist_lock);
+ }
put_swap_token(mm);
mmdrop(mm);
}
/**
* get_task_mm - acquire a reference to the task's mm
*
- * Returns %NULL if the task has no mm. Checks if the use count
- * of the mm is non-zero and if so returns a reference to it, after
+ * Returns %NULL if the task has no mm. Checks PF_BORROWED_MM (meaning
+ * this kernel workthread has transiently adopted a user mm with use_mm,
+ * to do its AIO) is not set and if so returns a reference to it, after
* bumping up the use count. User must release the mm via mmput()
* after use. Typically used by /proc and ptrace.
- *
- * If the use count is zero, it means that this mm is going away,
- * so return %NULL. This only happens in the case of an AIO daemon
- * which has temporarily adopted an mm (see use_mm), in the course
- * of its final mmput, before exit_aio has completed.
*/
struct mm_struct *get_task_mm(struct task_struct *task)
{
task_lock(task);
mm = task->mm;
if (mm) {
- spin_lock(&mmlist_lock);
- if (!atomic_read(&mm->mm_users))
+ if (task->flags & PF_BORROWED_MM)
mm = NULL;
else
atomic_inc(&mm->mm_users);
- spin_unlock(&mmlist_lock);
}
task_unlock(task);
return mm;
/* We don't need to lock fs - think why ;-) */
if (fs) {
atomic_set(&fs->count, 1);
- fs->lock = RW_LOCK_UNLOCKED;
+ rwlock_init(&fs->lock);
fs->umask = old->umask;
read_lock(&old->lock);
fs->rootmnt = mntget(old->rootmnt);
atomic_set(&newf->count, 1);
- newf->file_lock = SPIN_LOCK_UNLOCKED;
+ spin_lock_init(&newf->file_lock);
newf->next_fd = 0;
newf->max_fds = NR_OPEN_DEFAULT;
newf->max_fdset = __FD_SETSIZE;
for (i = open_files; i != 0; i--) {
struct file *f = *old_fds++;
- if (f)
+ if (f) {
get_file(f);
+ } else {
+ /*
+ * The fd may be claimed in the fd bitmap but not yet
+ * instantiated in the files array if a sibling thread
+ * is partway through open(). So make sure that this
+ * fd is available to the new process.
+ */
+ FD_CLR(open_files - i, newf->open_fds);
+ }
*new_fds++ = f;
}
spin_unlock(&oldf->file_lock);
if (clone_flags & CLONE_THREAD) {
atomic_inc(¤t->signal->count);
+ atomic_inc(¤t->signal->live);
return 0;
}
sig = kmem_cache_alloc(signal_cachep, GFP_KERNEL);
if (!sig)
return -ENOMEM;
atomic_set(&sig->count, 1);
+ atomic_set(&sig->live, 1);
sig->group_exit = 0;
sig->group_exit_code = 0;
sig->group_exit_task = NULL;
sig->group_stop_count = 0;
+ sig->stop_state = 0;
sig->curr_target = NULL;
init_sigpending(&sig->shared_pending);
INIT_LIST_HEAD(&sig->posix_timers);
sig->nvcsw = sig->nivcsw = sig->cnvcsw = sig->cnivcsw = 0;
sig->min_flt = sig->maj_flt = sig->cmin_flt = sig->cmaj_flt = 0;
+ task_lock(current->group_leader);
+ memcpy(sig->rlim, current->signal->rlim, sizeof sig->rlim);
+ task_unlock(current->group_leader);
+
return 0;
}
goto bad_fork_cleanup_vm;
if (atomic_read(&p->user->processes) >=
- p->rlim[RLIMIT_NPROC].rlim_cur) {
+ p->signal->rlim[RLIMIT_NPROC].rlim_cur) {
if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE) &&
p->user != &root_user)
goto bad_fork_cleanup_vm;
}
#endif
+ p->tgid = p->pid;
+ if (clone_flags & CLONE_THREAD)
+ p->tgid = current->tgid;
+
if ((retval = security_task_alloc(p)))
goto bad_fork_cleanup_policy;
if ((retval = audit_alloc(p)))
goto bad_fork_cleanup_sighand;
if ((retval = copy_mm(clone_flags, p)))
goto bad_fork_cleanup_signal;
- if ((retval = copy_namespace(clone_flags, p)))
+ if ((retval = copy_keys(clone_flags, p)))
goto bad_fork_cleanup_mm;
+ if ((retval = copy_namespace(clone_flags, p)))
+ goto bad_fork_cleanup_keys;
retval = copy_thread(0, clone_flags, stack_start, stack_size, p, regs);
if (retval)
goto bad_fork_cleanup_namespace;
* Ok, make it visible to the rest of the system.
* We dont wake it up yet.
*/
- p->tgid = p->pid;
p->group_leader = p;
INIT_LIST_HEAD(&p->ptrace_children);
INIT_LIST_HEAD(&p->ptrace_list);
retval = -EAGAIN;
goto bad_fork_cleanup_namespace;
}
- p->tgid = current->tgid;
p->group_leader = current->group_leader;
if (current->signal->group_stop_count > 0) {
bad_fork_cleanup_namespace:
exit_namespace(p);
+bad_fork_cleanup_keys:
+ exit_keys(p);
bad_fork_cleanup_mm:
if (p->mm)
mmput(p->mm);