* management can be a bitch. See 'mm/memory.c': 'copy_page_range()'
*/
-#include <linux/config.h>
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/unistd.h>
#include <linux/binfmts.h>
#include <linux/mman.h>
#include <linux/fs.h>
+#include <linux/capability.h>
#include <linux/cpu.h>
+#include <linux/cpuset.h>
#include <linux/security.h>
#include <linux/swap.h>
#include <linux/syscalls.h>
#include <linux/jiffies.h>
#include <linux/futex.h>
-#include <linux/ptrace.h>
+#include <linux/rcupdate.h>
+#include <linux/tracehook.h>
#include <linux/mount.h>
#include <linux/audit.h>
#include <linux/profile.h>
#include <linux/rmap.h>
#include <linux/acct.h>
+#include <linux/cn_proc.h>
+#include <linux/delayacct.h>
+#include <linux/taskstats_kern.h>
+#include <linux/vs_context.h>
#include <linux/vs_network.h>
#include <linux/vs_limit.h>
#include <linux/vs_memory.h>
DEFINE_PER_CPU(unsigned long, process_counts) = 0;
- __cacheline_aligned DEFINE_RWLOCK(tasklist_lock); /* outer */
-
-EXPORT_SYMBOL(tasklist_lock);
+__cacheline_aligned DEFINE_RWLOCK(tasklist_lock); /* outer */
int nr_processes(void)
{
static kmem_cache_t *task_struct_cachep;
#endif
+/* SLAB cache for signal_struct structures (tsk->signal) */
+static kmem_cache_t *signal_cachep;
+
+/* SLAB cache for sighand_struct structures (tsk->sighand) */
+kmem_cache_t *sighand_cachep;
+
+/* SLAB cache for files_struct structures (tsk->files) */
+kmem_cache_t *files_cachep;
+
+/* SLAB cache for fs_struct structures (tsk->fs) */
+kmem_cache_t *fs_cachep;
+
+/* SLAB cache for vm_area_struct structures */
+kmem_cache_t *vm_area_cachep;
+
+/* SLAB cache for mm_struct structures (tsk->mm) */
+static kmem_cache_t *mm_cachep;
+
void free_task(struct task_struct *tsk)
{
free_thread_info(tsk->thread_info);
+ rt_mutex_debug_task_free(tsk);
clr_vx_info(&tsk->vx_info);
clr_nx_info(&tsk->nx_info);
free_task_struct(tsk);
WARN_ON(atomic_read(&tsk->usage));
WARN_ON(tsk == current);
- if (unlikely(tsk->audit_context))
- audit_free(tsk);
security_task_free(tsk);
free_uid(tsk->user);
put_group_info(tsk->group_info);
+ delayacct_tsk_free(tsk);
if (!profile_handoff_task(tsk))
free_task(tsk);
init_task.signal->rlim[RLIMIT_NPROC].rlim_cur = max_threads/2;
init_task.signal->rlim[RLIMIT_NPROC].rlim_max = max_threads/2;
+ init_task.signal->rlim[RLIMIT_SIGPENDING] =
+ init_task.signal->rlim[RLIMIT_NPROC];
}
static struct task_struct *dup_task_struct(struct task_struct *orig)
return NULL;
}
- *ti = *orig->thread_info;
*tsk = *orig;
tsk->thread_info = ti;
- ti->task = tsk;
+ setup_thread_stack(tsk, orig);
/* One for us, one for whoever does the "release_task()" (usually parent) */
atomic_set(&tsk->usage,2);
+ atomic_set(&tsk->fs_excl, 0);
+ tsk->btrace_seq = 0;
+ tsk->splice_pipe = NULL;
return tsk;
}
#ifdef CONFIG_MMU
-static inline int dup_mmap(struct mm_struct * mm, struct mm_struct * oldmm)
+static inline int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
{
- struct vm_area_struct * mpnt, *tmp, **pprev;
+ struct vm_area_struct *mpnt, *tmp, **pprev;
struct rb_node **rb_link, *rb_parent;
int retval;
unsigned long charge;
struct mempolicy *pol;
down_write(&oldmm->mmap_sem);
- flush_cache_mm(current->mm);
+ flush_cache_mm(oldmm);
+ /*
+ * Not linked in yet - no deadlock potential:
+ */
+ down_write_nested(&mm->mmap_sem, SINGLE_DEPTH_NESTING);
+
mm->locked_vm = 0;
mm->mmap = NULL;
mm->mmap_cache = NULL;
mm->free_area_cache = oldmm->mmap_base;
+ mm->cached_hole_size = ~0UL;
mm->map_count = 0;
- mm->rss = 0;
- mm->anon_rss = 0;
+ __set_mm_counter(mm, file_rss, 0);
+ __set_mm_counter(mm, anon_rss, 0);
cpus_clear(mm->cpu_vm_mask);
mm->mm_rb = RB_ROOT;
rb_link = &mm->mm_rb.rb_node;
rb_parent = NULL;
pprev = &mm->mmap;
- for (mpnt = current->mm->mmap ; mpnt ; mpnt = mpnt->vm_next) {
+ for (mpnt = oldmm->mmap; mpnt; mpnt = mpnt->vm_next) {
struct file *file;
if (mpnt->vm_flags & VM_DONTCOPY) {
- __vm_stat_account(mm, mpnt->vm_flags, mpnt->vm_file,
- -vma_pages(mpnt));
+ long pages = vma_pages(mpnt);
+ vx_vmpages_sub(mm, pages);
+ vm_stat_account(mm, mpnt->vm_flags, mpnt->vm_file,
+ -pages);
continue;
}
charge = 0;
}
/*
- * Link in the new vma and copy the page table entries:
- * link in first so that swapoff can see swap entries,
- * and try_to_unmap_one's find_vma find the new vma.
+ * Link in the new vma and copy the page table entries.
*/
- spin_lock(&mm->page_table_lock);
*pprev = tmp;
pprev = &tmp->vm_next;
rb_parent = &tmp->vm_rb;
mm->map_count++;
- retval = copy_page_range(mm, current->mm, tmp);
- spin_unlock(&mm->page_table_lock);
+ retval = copy_page_range(mm, oldmm, mpnt);
if (tmp->vm_ops && tmp->vm_ops->open)
tmp->vm_ops->open(tmp);
if (retval)
goto out;
}
+#ifdef arch_dup_mmap
+ arch_dup_mmap(mm, oldmm);
+#endif
retval = 0;
-
out:
- flush_tlb_mm(current->mm);
+ up_write(&mm->mmap_sem);
+ flush_tlb_mm(oldmm);
up_write(&oldmm->mmap_sem);
return retval;
fail_nomem_policy:
spin_lock_init(&mm->page_table_lock);
rwlock_init(&mm->ioctx_list_lock);
mm->ioctx_list = NULL;
- mm->default_kioctx = (struct kioctx)INIT_KIOCTX(mm->default_kioctx, *mm);
mm->free_area_cache = TASK_UNMAPPED_BASE;
+ mm->cached_hole_size = ~0UL;
if (likely(!mm_alloc_pgd(mm))) {
mm->def_flags = 0;
*/
void mmput(struct mm_struct *mm)
{
+ might_sleep();
+
if (atomic_dec_and_test(&mm->mm_users)) {
exit_aio(mm);
exit_mmap(mm);
}
}
+/*
+ * Allocate a new mm structure and copy contents from the
+ * mm structure of the passed in task structure.
+ */
+static struct mm_struct *dup_mm(struct task_struct *tsk)
+{
+ struct mm_struct *mm, *oldmm = current->mm;
+ int err;
+
+ if (!oldmm)
+ return NULL;
+
+ mm = allocate_mm();
+ if (!mm)
+ goto fail_nomem;
+
+ memcpy(mm, oldmm, sizeof(*mm));
+ mm->mm_vx_info = NULL;
+
+ if (!mm_init(mm))
+ goto fail_nomem;
+
+ if (init_new_context(tsk, mm))
+ goto fail_nocontext;
+
+ err = dup_mmap(mm, oldmm);
+ if (err)
+ goto free_pt;
+
+ mm->hiwater_rss = get_mm_rss(mm);
+ mm->hiwater_vm = mm->total_vm;
+
+ return mm;
+
+free_pt:
+ mmput(mm);
+
+fail_nomem:
+ return NULL;
+
+fail_nocontext:
+ /*
+ * If init_new_context() failed, we cannot use mmput() to free the mm
+ * because it calls destroy_context()
+ */
+ clr_vx_info(&mm->mm_vx_info);
+ mm_free_pgd(mm);
+ free_mm(mm);
+ return NULL;
+}
+
static int copy_mm(unsigned long clone_flags, struct task_struct * tsk)
{
struct mm_struct * mm, *oldmm;
if (clone_flags & CLONE_VM) {
atomic_inc(&oldmm->mm_users);
mm = oldmm;
- /*
- * There are cases where the PTL is held to ensure no
- * new threads start up in user mode using an mm, which
- * allows optimizing out ipis; the tlb_gather_mmu code
- * is an example.
- */
- spin_unlock_wait(&oldmm->page_table_lock);
goto good_mm;
}
retval = -ENOMEM;
- mm = allocate_mm();
+ mm = dup_mm(tsk);
if (!mm)
goto fail_nomem;
- /* Copy the current MM stuff.. */
- memcpy(mm, oldmm, sizeof(*mm));
- mm->mm_vx_info = NULL;
- if (!mm_init(mm))
- goto fail_nomem;
-
- if (init_new_context(tsk,mm))
- goto fail_nocontext;
-
- retval = dup_mmap(mm, oldmm);
- if (retval)
- goto free_pt;
-
- mm->hiwater_rss = mm->rss;
- mm->hiwater_vm = mm->total_vm;
-
good_mm:
tsk->mm = mm;
tsk->active_mm = mm;
return 0;
-free_pt:
- mmput(mm);
fail_nomem:
return retval;
-
-fail_nocontext:
- /*
- * If init_new_context() failed, we cannot use mmput() to free the mm
- * because it calls destroy_context()
- */
- clr_vx_info(&mm->mm_vx_info);
- mm_free_pgd(mm);
- free_mm(mm);
- return retval;
}
static inline struct fs_struct *__copy_fs_struct(struct fs_struct *old)
return 0;
}
-static int count_open_files(struct files_struct *files, int size)
+static int count_open_files(struct fdtable *fdt)
{
+ int size = fdt->max_fdset;
int i;
/* Find the last open fd */
for (i = size/(8*sizeof(long)); i > 0; ) {
- if (files->open_fds->fds_bits[--i])
+ if (fdt->open_fds->fds_bits[--i])
break;
}
i = (i+1) * 8 * sizeof(long);
return i;
}
-static int copy_files(unsigned long clone_flags, struct task_struct * tsk)
+static struct files_struct *alloc_files(void)
{
- struct files_struct *oldf, *newf;
- struct file **old_fds, **new_fds;
- int open_files, size, i, error = 0, expand;
-
- /*
- * A background process may not have any files ...
- */
- oldf = current->files;
- if (!oldf)
- goto out;
+ struct files_struct *newf;
+ struct fdtable *fdt;
- if (clone_flags & CLONE_FILES) {
- atomic_inc(&oldf->count);
- goto out;
- }
-
- /*
- * Note: we may be using current for both targets (See exec.c)
- * This works because we cache current->files (old) as oldf. Don't
- * break this.
- */
- tsk->files = NULL;
- error = -ENOMEM;
newf = kmem_cache_alloc(files_cachep, SLAB_KERNEL);
- if (!newf)
+ if (!newf)
goto out;
atomic_set(&newf->count, 1);
spin_lock_init(&newf->file_lock);
- newf->next_fd = 0;
- newf->max_fds = NR_OPEN_DEFAULT;
- newf->max_fdset = __FD_SETSIZE;
- newf->close_on_exec = &newf->close_on_exec_init;
- newf->open_fds = &newf->open_fds_init;
- newf->fd = &newf->fd_array[0];
+ newf->next_fd = 0;
+ fdt = &newf->fdtab;
+ fdt->max_fds = NR_OPEN_DEFAULT;
+ fdt->max_fdset = EMBEDDED_FD_SET_SIZE;
+ fdt->close_on_exec = (fd_set *)&newf->close_on_exec_init;
+ fdt->open_fds = (fd_set *)&newf->open_fds_init;
+ fdt->fd = &newf->fd_array[0];
+ INIT_RCU_HEAD(&fdt->rcu);
+ fdt->free_files = NULL;
+ fdt->next = NULL;
+ rcu_assign_pointer(newf->fdt, fdt);
+out:
+ return newf;
+}
- spin_lock(&oldf->file_lock);
+/*
+ * Allocate a new files structure and copy contents from the
+ * passed in files structure.
+ * errorp will be valid only when the returned files_struct is NULL.
+ */
+static struct files_struct *dup_fd(struct files_struct *oldf, int *errorp)
+{
+ struct files_struct *newf;
+ struct file **old_fds, **new_fds;
+ int open_files, size, i, expand;
+ struct fdtable *old_fdt, *new_fdt;
- open_files = count_open_files(oldf, oldf->max_fdset);
+ *errorp = -ENOMEM;
+ newf = alloc_files();
+ if (!newf)
+ goto out;
+
+ spin_lock(&oldf->file_lock);
+ old_fdt = files_fdtable(oldf);
+ new_fdt = files_fdtable(newf);
+ size = old_fdt->max_fdset;
+ open_files = count_open_files(old_fdt);
expand = 0;
/*
* Check whether we need to allocate a larger fd array or fd set.
* Note: we're not a clone task, so the open count won't change.
*/
- if (open_files > newf->max_fdset) {
- newf->max_fdset = 0;
+ if (open_files > new_fdt->max_fdset) {
+ new_fdt->max_fdset = 0;
expand = 1;
}
- if (open_files > newf->max_fds) {
- newf->max_fds = 0;
+ if (open_files > new_fdt->max_fds) {
+ new_fdt->max_fds = 0;
expand = 1;
}
if (expand) {
spin_unlock(&oldf->file_lock);
spin_lock(&newf->file_lock);
- error = expand_files(newf, open_files-1);
+ *errorp = expand_files(newf, open_files-1);
spin_unlock(&newf->file_lock);
- if (error < 0)
+ if (*errorp < 0)
goto out_release;
+ new_fdt = files_fdtable(newf);
+ /*
+ * Reacquire the oldf lock and a pointer to its fd table
+ * who knows it may have a new bigger fd table. We need
+ * the latest pointer.
+ */
spin_lock(&oldf->file_lock);
+ old_fdt = files_fdtable(oldf);
}
- old_fds = oldf->fd;
- new_fds = newf->fd;
+ old_fds = old_fdt->fd;
+ new_fds = new_fdt->fd;
- memcpy(newf->open_fds->fds_bits, oldf->open_fds->fds_bits, open_files/8);
- memcpy(newf->close_on_exec->fds_bits, oldf->close_on_exec->fds_bits, open_files/8);
+ memcpy(new_fdt->open_fds->fds_bits, old_fdt->open_fds->fds_bits, open_files/8);
+ memcpy(new_fdt->close_on_exec->fds_bits, old_fdt->close_on_exec->fds_bits, open_files/8);
for (i = open_files; i != 0; i--) {
struct file *f = *old_fds++;
if (f) {
get_file(f);
- /* FIXME sum it first for avail check and performance */
+ /* FIXME: sum it first for check and performance */
vx_openfd_inc(open_files - i);
} else {
/*
* is partway through open(). So make sure that this
* fd is available to the new process.
*/
- FD_CLR(open_files - i, newf->open_fds);
+ FD_CLR(open_files - i, new_fdt->open_fds);
}
- *new_fds++ = f;
+ rcu_assign_pointer(*new_fds++, f);
}
spin_unlock(&oldf->file_lock);
/* compute the remainder to be cleared */
- size = (newf->max_fds - open_files) * sizeof(struct file *);
+ size = (new_fdt->max_fds - open_files) * sizeof(struct file *);
/* This is long word aligned thus could use a optimized version */
memset(new_fds, 0, size);
- if (newf->max_fdset > open_files) {
- int left = (newf->max_fdset-open_files)/8;
+ if (new_fdt->max_fdset > open_files) {
+ int left = (new_fdt->max_fdset-open_files)/8;
int start = open_files / (8 * sizeof(unsigned long));
- memset(&newf->open_fds->fds_bits[start], 0, left);
- memset(&newf->close_on_exec->fds_bits[start], 0, left);
+ memset(&new_fdt->open_fds->fds_bits[start], 0, left);
+ memset(&new_fdt->close_on_exec->fds_bits[start], 0, left);
}
- tsk->files = newf;
- error = 0;
out:
- return error;
+ return newf;
out_release:
- free_fdset (newf->close_on_exec, newf->max_fdset);
- free_fdset (newf->open_fds, newf->max_fdset);
- free_fd_array(newf->fd, newf->max_fds);
+ free_fdset (new_fdt->close_on_exec, new_fdt->max_fdset);
+ free_fdset (new_fdt->open_fds, new_fdt->max_fdset);
+ free_fd_array(new_fdt->fd, new_fdt->max_fds);
kmem_cache_free(files_cachep, newf);
- goto out;
+ return NULL;
+}
+
+static int copy_files(unsigned long clone_flags, struct task_struct * tsk)
+{
+ struct files_struct *oldf, *newf;
+ int error = 0;
+
+ /*
+ * A background process may not have any files ...
+ */
+ oldf = current->files;
+ if (!oldf)
+ goto out;
+
+ if (clone_flags & CLONE_FILES) {
+ atomic_inc(&oldf->count);
+ goto out;
+ }
+
+ /*
+ * Note: we may be using current for both targets (See exec.c)
+ * This works because we cache current->files (old) as oldf. Don't
+ * break this.
+ */
+ tsk->files = NULL;
+ newf = dup_fd(oldf, &error);
+ if (!newf)
+ goto out;
+
+ tsk->files = newf;
+ error = 0;
+out:
+ return error;
}
/*
struct files_struct *files = current->files;
int rc;
- if(!files)
- BUG();
+ BUG_ON(!files);
/* This can race but the race causes us to copy when we don't
need to and drop the copy */
return 0;
}
sig = kmem_cache_alloc(sighand_cachep, GFP_KERNEL);
- tsk->sighand = sig;
+ rcu_assign_pointer(tsk->sighand, sig);
if (!sig)
return -ENOMEM;
- spin_lock_init(&sig->siglock);
atomic_set(&sig->count, 1);
memcpy(sig->action, current->sighand->action, sizeof(sig->action));
return 0;
}
+void __cleanup_sighand(struct sighand_struct *sighand)
+{
+ if (atomic_dec_and_test(&sighand->count))
+ kmem_cache_free(sighand_cachep, sighand);
+}
+
static inline int copy_signal(unsigned long clone_flags, struct task_struct * tsk)
{
struct signal_struct *sig;
+ int ret;
if (clone_flags & CLONE_THREAD) {
atomic_inc(¤t->signal->count);
atomic_inc(¤t->signal->live);
+ taskstats_tgid_alloc(current->signal);
return 0;
}
sig = kmem_cache_alloc(signal_cachep, GFP_KERNEL);
tsk->signal = sig;
if (!sig)
return -ENOMEM;
+
+ ret = copy_thread_group_keys(tsk);
+ if (ret < 0) {
+ kmem_cache_free(signal_cachep, sig);
+ return ret;
+ }
+
atomic_set(&sig->count, 1);
atomic_set(&sig->live, 1);
init_waitqueue_head(&sig->wait_chldexit);
init_sigpending(&sig->shared_pending);
INIT_LIST_HEAD(&sig->posix_timers);
- sig->tty = current->signal->tty;
- sig->pgrp = process_group(current);
- sig->session = current->signal->session;
+ hrtimer_init(&sig->real_timer, CLOCK_MONOTONIC, HRTIMER_REL);
+ sig->it_real_incr.tv64 = 0;
+ sig->real_timer.function = it_real_fn;
+ sig->tsk = tsk;
+
+ sig->it_virt_expires = cputime_zero;
+ sig->it_virt_incr = cputime_zero;
+ sig->it_prof_expires = cputime_zero;
+ sig->it_prof_incr = cputime_zero;
+
sig->leader = 0; /* session leadership doesn't inherit */
sig->tty_old_pgrp = 0;
sig->utime = sig->stime = sig->cutime = sig->cstime = cputime_zero;
sig->nvcsw = sig->nivcsw = sig->cnvcsw = sig->cnivcsw = 0;
sig->min_flt = sig->maj_flt = sig->cmin_flt = sig->cmaj_flt = 0;
+ sig->sched_time = 0;
+ INIT_LIST_HEAD(&sig->cpu_timers[0]);
+ INIT_LIST_HEAD(&sig->cpu_timers[1]);
+ INIT_LIST_HEAD(&sig->cpu_timers[2]);
+ taskstats_tgid_init(sig);
task_lock(current->group_leader);
memcpy(sig->rlim, current->signal->rlim, sizeof sig->rlim);
task_unlock(current->group_leader);
+ if (sig->rlim[RLIMIT_CPU].rlim_cur != RLIM_INFINITY) {
+ /*
+ * New sole thread in the process gets an expiry time
+ * of the whole CPU time limit.
+ */
+ tsk->it_prof_expires =
+ secs_to_cputime(sig->rlim[RLIMIT_CPU].rlim_cur);
+ }
+ acct_init_pacct(&sig->pacct);
+
return 0;
}
+void __cleanup_signal(struct signal_struct *sig)
+{
+ exit_thread_group_keys(sig);
+ taskstats_tgid_free(sig);
+ kmem_cache_free(signal_cachep, sig);
+}
+
+static inline void cleanup_signal(struct task_struct *tsk)
+{
+ struct signal_struct *sig = tsk->signal;
+
+ atomic_dec(&sig->live);
+
+ if (atomic_dec_and_test(&sig->count))
+ __cleanup_signal(sig);
+}
+
static inline void copy_flags(unsigned long clone_flags, struct task_struct *p)
{
unsigned long new_flags = p->flags;
- new_flags &= ~PF_SUPERPRIV;
+ new_flags &= ~(PF_SUPERPRIV | PF_NOFREEZE);
new_flags |= PF_FORKNOEXEC;
- if (!(clone_flags & CLONE_PTRACE))
- p->ptrace = 0;
+ new_flags |= PF_STARTING;
p->flags = new_flags;
}
return current->pid;
}
+static inline void rt_mutex_init_task(struct task_struct *p)
+{
+#ifdef CONFIG_RT_MUTEXES
+ spin_lock_init(&p->pi_lock);
+ plist_head_init(&p->pi_waiters, &p->pi_lock);
+ p->pi_blocked_on = NULL;
+#endif
+}
+
/*
* This creates a new process as a copy of the old one,
* but does not actually start it yet.
* parts of the process environment (as per the clone
* flags). The actual kick-off is left to the caller.
*/
-static task_t *copy_process(unsigned long clone_flags,
- unsigned long stack_start,
- struct pt_regs *regs,
- unsigned long stack_size,
- int __user *parent_tidptr,
- int __user *child_tidptr,
- int pid)
+static struct task_struct *copy_process(unsigned long clone_flags,
+ unsigned long stack_start,
+ struct pt_regs *regs,
+ unsigned long stack_size,
+ int __user *parent_tidptr,
+ int __user *child_tidptr,
+ int pid)
{
int retval;
struct task_struct *p = NULL;
struct vx_info *vxi;
+ struct nx_info *nxi;
if ((clone_flags & (CLONE_NEWNS|CLONE_FS)) == (CLONE_NEWNS|CLONE_FS))
return ERR_PTR(-EINVAL);
if (!p)
goto fork_out;
+ rt_mutex_init_task(p);
+
+ p->tux_info = NULL;
+
+#ifdef CONFIG_TRACE_IRQFLAGS
+ DEBUG_LOCKS_WARN_ON(!p->hardirqs_enabled);
+ DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled);
+#endif
init_vx_info(&p->vx_info, current->vx_info);
- p->nx_info = NULL;
- set_nx_info(&p->nx_info, current->nx_info);
+ init_nx_info(&p->nx_info, current->nx_info);
/* check vserver memory */
if (p->mm && !(clone_flags & CLONE_VM)) {
if (vx_vmpages_avail(p->mm, p->mm->total_vm))
- vx_pages_add(p->mm->mm_vx_info, RLIMIT_AS, p->mm->total_vm);
+ vx_pages_add(p->vx_info, RLIMIT_AS, p->mm->total_vm);
else
goto bad_fork_free;
}
if (p->mm && vx_flags(VXF_FORK_RSS, 0)) {
- if (!vx_rsspages_avail(p->mm, p->mm->rss))
+ if (!vx_rsspages_avail(p->mm, get_mm_counter(p->mm, file_rss)))
goto bad_fork_cleanup_vm;
}
if (nr_threads >= max_threads)
goto bad_fork_cleanup_count;
- if (!try_module_get(p->thread_info->exec_domain->module))
+ if (!try_module_get(task_thread_info(p)->exec_domain->module))
goto bad_fork_cleanup_count;
if (p->binfmt && !try_module_get(p->binfmt->module))
goto bad_fork_cleanup_put_domain;
p->did_exec = 0;
+ delayacct_tsk_init(p); /* Must remain after dup_task_struct() */
copy_flags(clone_flags, p);
p->pid = pid;
retval = -EFAULT;
if (clone_flags & CLONE_PARENT_SETTID)
if (put_user(p->pid, parent_tidptr))
- goto bad_fork_cleanup;
-
- p->proc_dentry = NULL;
+ goto bad_fork_cleanup_delays_binfmt;
INIT_LIST_HEAD(&p->children);
INIT_LIST_HEAD(&p->sibling);
p->vfork_done = NULL;
spin_lock_init(&p->alloc_lock);
- spin_lock_init(&p->proc_lock);
+#ifdef CONFIG_PTRACE
+ INIT_LIST_HEAD(&p->ptracees);
+#endif
clear_tsk_thread_flag(p, TIF_SIGPENDING);
init_sigpending(&p->pending);
- p->it_real_value = 0;
- p->it_real_incr = 0;
- p->it_virt_value = cputime_zero;
- p->it_virt_incr = cputime_zero;
- p->it_prof_value = cputime_zero;
- p->it_prof_incr = cputime_zero;
- init_timer(&p->real_timer);
- p->real_timer.data = (unsigned long) p;
-
p->utime = cputime_zero;
p->stime = cputime_zero;
+ p->sched_time = 0;
p->rchar = 0; /* I/O counter: bytes read */
p->wchar = 0; /* I/O counter: bytes written */
p->syscr = 0; /* I/O counter: read syscalls */
p->syscw = 0; /* I/O counter: write syscalls */
acct_clear_integrals(p);
+ p->it_virt_expires = cputime_zero;
+ p->it_prof_expires = cputime_zero;
+ p->it_sched_expires = 0;
+ INIT_LIST_HEAD(&p->cpu_timers[0]);
+ INIT_LIST_HEAD(&p->cpu_timers[1]);
+ INIT_LIST_HEAD(&p->cpu_timers[2]);
+
p->lock_depth = -1; /* -1 = no lock */
do_posix_clock_monotonic_gettime(&p->start_time);
p->security = NULL;
p->io_context = NULL;
p->io_wait = NULL;
p->audit_context = NULL;
+ cpuset_fork(p);
#ifdef CONFIG_NUMA
p->mempolicy = mpol_copy(p->mempolicy);
if (IS_ERR(p->mempolicy)) {
retval = PTR_ERR(p->mempolicy);
p->mempolicy = NULL;
- goto bad_fork_cleanup;
+ goto bad_fork_cleanup_cpuset;
}
+ mpol_fix_fork_child_flag(p);
+#endif
+#ifdef CONFIG_TRACE_IRQFLAGS
+ p->irq_events = 0;
+ p->hardirqs_enabled = 0;
+ p->hardirq_enable_ip = 0;
+ p->hardirq_enable_event = 0;
+ p->hardirq_disable_ip = _THIS_IP_;
+ p->hardirq_disable_event = 0;
+ p->softirqs_enabled = 1;
+ p->softirq_enable_ip = _THIS_IP_;
+ p->softirq_enable_event = 0;
+ p->softirq_disable_ip = 0;
+ p->softirq_disable_event = 0;
+ p->hardirq_context = 0;
+ p->softirq_context = 0;
+#endif
+#ifdef CONFIG_LOCKDEP
+ p->lockdep_depth = 0; /* no locks held yet */
+ p->curr_chain_key = 0;
+ p->lockdep_recursion = 0;
+#endif
+
+#ifdef CONFIG_DEBUG_MUTEXES
+ p->blocked_on = NULL; /* not blocked yet */
#endif
p->tgid = p->pid;
* Clear TID on mm_release()?
*/
p->clear_child_tid = (clone_flags & CLONE_CHILD_CLEARTID) ? child_tidptr: NULL;
+ p->robust_list = NULL;
+#ifdef CONFIG_COMPAT
+ p->compat_robust_list = NULL;
+#endif
+ INIT_LIST_HEAD(&p->pi_state_list);
+ p->pi_state_cache = NULL;
+
+ /*
+ * sigaltstack should be cleared when sharing the same VM
+ */
+ if ((clone_flags & (CLONE_VM|CLONE_VFORK)) == CLONE_VM)
+ p->sas_ss_sp = p->sas_ss_size = 0;
/*
* Syscall tracing should be turned off in the child regardless
* of CLONE_PTRACE.
*/
clear_tsk_thread_flag(p, TIF_SYSCALL_TRACE);
+#ifdef TIF_SYSCALL_EMU
+ clear_tsk_thread_flag(p, TIF_SYSCALL_EMU);
+#endif
/* Our parent execution domain becomes current domain
These must match for thread signalling to apply */
p->pdeath_signal = 0;
p->exit_state = 0;
- /* Perform scheduler related setup */
- sched_fork(p);
-
/*
* Ok, make it visible to the rest of the system.
* We dont wake it up yet.
*/
p->group_leader = p;
- INIT_LIST_HEAD(&p->ptrace_children);
- INIT_LIST_HEAD(&p->ptrace_list);
+ INIT_LIST_HEAD(&p->thread_group);
+
+ /* Perform scheduler related setup. Assign this task to a CPU. */
+ sched_fork(p, clone_flags);
/* Need tasklist lock for parent etc handling! */
write_lock_irq(&tasklist_lock);
/*
- * The task hasn't been attached yet, so cpus_allowed mask cannot
- * have changed. The cpus_allowed mask of the parent may have
- * changed after it was copied first time, and it may then move to
- * another CPU - so we re-copy it here and set the child's CPU to
- * the parent's CPU. This avoids alot of nasty races.
+ * The task hasn't been attached yet, so its cpus_allowed mask will
+ * not be changed, nor will its assigned CPU.
+ *
+ * The cpus_allowed mask of the parent may have changed after it was
+ * copied first time - so re-copy it here, then check the child's CPU
+ * to ensure it is on a valid CPU (and if not, just force it back to
+ * parent's CPU). This avoids alot of nasty races.
*/
p->cpus_allowed = current->cpus_allowed;
- set_task_cpu(p, smp_processor_id());
+ if (unlikely(!cpu_isset(task_cpu(p), p->cpus_allowed) ||
+ !cpu_online(task_cpu(p))))
+ set_task_cpu(p, smp_processor_id());
+
+ /* CLONE_PARENT re-uses the old parent */
+ if (clone_flags & (CLONE_PARENT|CLONE_THREAD))
+ p->parent = current->parent;
+ else
+ p->parent = current;
+
+ spin_lock(¤t->sighand->siglock);
/*
- * Check for pending SIGKILL! The new thread should not be allowed
- * to slip out of an OOM kill. (or normal SIGKILL.)
- */
- if (sigismember(¤t->pending.signal, SIGKILL)) {
+ * Process group and session signals need to be delivered to just the
+ * parent before the fork or both the parent and the child after the
+ * fork. Restart if a signal comes in before we add the new process to
+ * it's process group.
+ * A fatal signal pending means that current will exit, so the new
+ * thread can't slip out of an OOM kill (or normal SIGKILL).
+ */
+ recalc_sigpending();
+ if (signal_pending(current)) {
+ spin_unlock(¤t->sighand->siglock);
write_unlock_irq(&tasklist_lock);
- retval = -EINTR;
+ retval = -ERESTARTNOINTR;
goto bad_fork_cleanup_namespace;
}
- /* CLONE_PARENT re-uses the old parent */
- if (clone_flags & (CLONE_PARENT|CLONE_THREAD))
- p->real_parent = current->real_parent;
- else
- p->real_parent = current;
- p->parent = p->real_parent;
-
if (clone_flags & CLONE_THREAD) {
- spin_lock(¤t->sighand->siglock);
- /*
- * Important: if an exit-all has been started then
- * do not create this new thread - the whole thread
- * group is supposed to exit anyway.
- */
- if (current->signal->flags & SIGNAL_GROUP_EXIT) {
- spin_unlock(¤t->sighand->siglock);
- write_unlock_irq(&tasklist_lock);
- retval = -EAGAIN;
- goto bad_fork_cleanup_namespace;
- }
p->group_leader = current->group_leader;
-
- if (current->signal->group_stop_count > 0) {
+ list_add_tail_rcu(&p->thread_group, &p->group_leader->thread_group);
+
+ if (!cputime_eq(current->signal->it_virt_expires,
+ cputime_zero) ||
+ !cputime_eq(current->signal->it_prof_expires,
+ cputime_zero) ||
+ current->signal->rlim[RLIMIT_CPU].rlim_cur != RLIM_INFINITY ||
+ !list_empty(¤t->signal->cpu_timers[0]) ||
+ !list_empty(¤t->signal->cpu_timers[1]) ||
+ !list_empty(¤t->signal->cpu_timers[2])) {
/*
- * There is an all-stop in progress for the group.
- * We ourselves will stop as soon as we check signals.
- * Make the new thread part of that group stop too.
+ * Have child wake up on its first tick to check
+ * for process CPU timers.
*/
- current->signal->group_stop_count++;
- set_tsk_thread_flag(p, TIF_SIGPENDING);
+ p->it_prof_expires = jiffies_to_cputime(1);
}
-
- spin_unlock(¤t->sighand->siglock);
}
- SET_LINKS(p);
- if (unlikely(p->ptrace & PT_PTRACED))
- __ptrace_link(p, current->parent);
+ /*
+ * inherit ioprio
+ */
+ p->ioprio = current->ioprio;
+
+ if (likely(p->pid)) {
+ add_parent(p);
+ tracehook_init_task(p);
- attach_pid(p, PIDTYPE_PID, p->pid);
- attach_pid(p, PIDTYPE_TGID, p->tgid);
- if (thread_group_leader(p)) {
- attach_pid(p, PIDTYPE_PGID, process_group(p));
- attach_pid(p, PIDTYPE_SID, p->signal->session);
- if (p->pid)
+ if (thread_group_leader(p)) {
+ p->signal->tty = current->signal->tty;
+ p->signal->pgrp = process_group(current);
+ p->signal->session = current->signal->session;
+ attach_pid(p, PIDTYPE_PGID, process_group(p));
+ attach_pid(p, PIDTYPE_SID, p->signal->session);
+
+ list_add_tail_rcu(&p->tasks, &init_task.tasks);
__get_cpu_var(process_counts)++;
+ }
+ attach_pid(p, PIDTYPE_PID, p->pid);
+ nr_threads++;
}
- nr_threads++;
total_forks++;
+ spin_unlock(¤t->sighand->siglock);
/* p is copy of current */
vxi = p->vx_info;
atomic_inc(&vxi->cvirt.total_forks);
vx_nproc_inc(p);
}
+ nxi = p->nx_info;
+ if (nxi)
+ claim_nx_info(nxi, p);
write_unlock_irq(&tasklist_lock);
- retval = 0;
-
-fork_out:
- if (retval)
- return ERR_PTR(retval);
+ proc_fork_connector(p);
return p;
bad_fork_cleanup_namespace:
if (p->mm)
mmput(p->mm);
bad_fork_cleanup_signal:
- exit_signal(p);
+ cleanup_signal(p);
bad_fork_cleanup_sighand:
- exit_sighand(p);
+ __cleanup_sighand(p->sighand);
bad_fork_cleanup_fs:
exit_fs(p); /* blocking */
bad_fork_cleanup_files:
bad_fork_cleanup_policy:
#ifdef CONFIG_NUMA
mpol_free(p->mempolicy);
+bad_fork_cleanup_cpuset:
#endif
-bad_fork_cleanup:
+ cpuset_exit(p);
+bad_fork_cleanup_delays_binfmt:
+ delayacct_tsk_free(p);
if (p->binfmt)
module_put(p->binfmt->module);
bad_fork_cleanup_put_domain:
- module_put(p->thread_info->exec_domain->module);
+ module_put(task_thread_info(p)->exec_domain->module);
bad_fork_cleanup_count:
put_group_info(p->group_info);
atomic_dec(&p->user->processes);
free_uid(p->user);
bad_fork_cleanup_vm:
if (p->mm && !(clone_flags & CLONE_VM))
- vx_pages_sub(p->mm->mm_vx_info, RLIMIT_AS, p->mm->total_vm);
+ vx_pages_sub(p->vx_info, RLIMIT_AS, p->mm->total_vm);
bad_fork_free:
free_task(p);
- goto fork_out;
+fork_out:
+ return ERR_PTR(retval);
}
struct pt_regs * __devinit __attribute__((weak)) idle_regs(struct pt_regs *regs)
return regs;
}
-task_t * __devinit fork_idle(int cpu)
+struct task_struct * __devinit fork_idle(int cpu)
{
- task_t *task;
+ struct task_struct *task;
struct pt_regs regs;
task = copy_process(CLONE_VM, 0, idle_regs(®s), 0, NULL, NULL, 0);
if (!task)
return ERR_PTR(-ENOMEM);
init_idle(task, cpu);
- unhash_process(task);
- return task;
-}
-
-static inline int fork_traceflag (unsigned clone_flags)
-{
- if (clone_flags & CLONE_UNTRACED)
- return 0;
- else if (clone_flags & CLONE_VFORK) {
- if (current->ptrace & PT_TRACE_VFORK)
- return PTRACE_EVENT_VFORK;
- } else if ((clone_flags & CSIGNAL) != SIGCHLD) {
- if (current->ptrace & PT_TRACE_CLONE)
- return PTRACE_EVENT_CLONE;
- } else if (current->ptrace & PT_TRACE_FORK)
- return PTRACE_EVENT_FORK;
- return 0;
+ return task;
}
/*
int __user *child_tidptr)
{
struct task_struct *p;
- int trace = 0;
- long pid = alloc_pidmap();
+ struct pid *pid = alloc_pid();
+ long nr;
- if (pid < 0)
+ if (!pid)
return -EAGAIN;
- if (unlikely(current->ptrace)) {
- trace = fork_traceflag (clone_flags);
- if (trace)
- clone_flags |= CLONE_PTRACE;
- }
+ nr = pid->nr;
- p = copy_process(clone_flags, stack_start, regs, stack_size, parent_tidptr, child_tidptr, pid);
+ p = copy_process(clone_flags, stack_start, regs, stack_size, parent_tidptr, child_tidptr, nr);
/*
* Do this prior waking up the new thread - the thread pointer
* might get invalid after that point, if the thread exits quickly.
init_completion(&vfork);
}
- if ((p->ptrace & PT_PTRACED) || (clone_flags & CLONE_STOPPED)) {
+ tracehook_report_clone(clone_flags, p);
+
+ p->flags &= ~PF_STARTING;
+
+ if (clone_flags & CLONE_STOPPED) {
/*
* We'll start up with an immediate SIGSTOP.
*/
sigaddset(&p->pending.signal, SIGSTOP);
set_tsk_thread_flag(p, TIF_SIGPENDING);
+ p->state = TASK_STOPPED;
}
-
- if (!(clone_flags & CLONE_STOPPED))
- wake_up_new_task(p, clone_flags);
else
- p->state = TASK_STOPPED;
+ wake_up_new_task(p, clone_flags);
- if (unlikely (trace)) {
- current->ptrace_message = pid;
- ptrace_notify ((trace << 8) | SIGTRAP);
- }
+ tracehook_report_clone_complete(clone_flags, nr, p);
if (clone_flags & CLONE_VFORK) {
wait_for_completion(&vfork);
- if (unlikely (current->ptrace & PT_TRACE_VFORK_DONE))
- ptrace_notify ((PTRACE_EVENT_VFORK_DONE << 8) | SIGTRAP);
+ tracehook_report_vfork_done(p, nr);
}
} else {
- free_pidmap(pid);
- pid = PTR_ERR(p);
+ free_pid(pid);
+ nr = PTR_ERR(p);
}
- return pid;
+ return nr;
}
-/* SLAB cache for signal_struct structures (tsk->signal) */
-kmem_cache_t *signal_cachep;
-
-/* SLAB cache for sighand_struct structures (tsk->sighand) */
-kmem_cache_t *sighand_cachep;
-
-/* SLAB cache for files_struct structures (tsk->files) */
-kmem_cache_t *files_cachep;
-
-/* SLAB cache for fs_struct structures (tsk->fs) */
-kmem_cache_t *fs_cachep;
+#ifndef ARCH_MIN_MMSTRUCT_ALIGN
+#define ARCH_MIN_MMSTRUCT_ALIGN 0
+#endif
-/* SLAB cache for vm_area_struct structures */
-kmem_cache_t *vm_area_cachep;
+static void sighand_ctor(void *data, kmem_cache_t *cachep, unsigned long flags)
+{
+ struct sighand_struct *sighand = data;
-/* SLAB cache for mm_struct structures (tsk->mm) */
-kmem_cache_t *mm_cachep;
+ if ((flags & (SLAB_CTOR_VERIFY | SLAB_CTOR_CONSTRUCTOR)) ==
+ SLAB_CTOR_CONSTRUCTOR)
+ spin_lock_init(&sighand->siglock);
+}
void __init proc_caches_init(void)
{
sighand_cachep = kmem_cache_create("sighand_cache",
sizeof(struct sighand_struct), 0,
- SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL, NULL);
+ SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_DESTROY_BY_RCU,
+ sighand_ctor, NULL);
signal_cachep = kmem_cache_create("signal_cache",
sizeof(struct signal_struct), 0,
SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL, NULL);
sizeof(struct vm_area_struct), 0,
SLAB_PANIC, NULL, NULL);
mm_cachep = kmem_cache_create("mm_struct",
- sizeof(struct mm_struct), 0,
+ sizeof(struct mm_struct), ARCH_MIN_MMSTRUCT_ALIGN,
SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL, NULL);
}
+
+
+/*
+ * Check constraints on flags passed to the unshare system call and
+ * force unsharing of additional process context as appropriate.
+ */
+static inline void check_unshare_flags(unsigned long *flags_ptr)
+{
+ /*
+ * If unsharing a thread from a thread group, must also
+ * unshare vm.
+ */
+ if (*flags_ptr & CLONE_THREAD)
+ *flags_ptr |= CLONE_VM;
+
+ /*
+ * If unsharing vm, must also unshare signal handlers.
+ */
+ if (*flags_ptr & CLONE_VM)
+ *flags_ptr |= CLONE_SIGHAND;
+
+ /*
+ * If unsharing signal handlers and the task was created
+ * using CLONE_THREAD, then must unshare the thread
+ */
+ if ((*flags_ptr & CLONE_SIGHAND) &&
+ (atomic_read(¤t->signal->count) > 1))
+ *flags_ptr |= CLONE_THREAD;
+
+ /*
+ * If unsharing namespace, must also unshare filesystem information.
+ */
+ if (*flags_ptr & CLONE_NEWNS)
+ *flags_ptr |= CLONE_FS;
+}
+
+/*
+ * Unsharing of tasks created with CLONE_THREAD is not supported yet
+ */
+static int unshare_thread(unsigned long unshare_flags)
+{
+ if (unshare_flags & CLONE_THREAD)
+ return -EINVAL;
+
+ return 0;
+}
+
+/*
+ * Unshare the filesystem structure if it is being shared
+ */
+static int unshare_fs(unsigned long unshare_flags, struct fs_struct **new_fsp)
+{
+ struct fs_struct *fs = current->fs;
+
+ if ((unshare_flags & CLONE_FS) &&
+ (fs && atomic_read(&fs->count) > 1)) {
+ *new_fsp = __copy_fs_struct(current->fs);
+ if (!*new_fsp)
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+/*
+ * Unshare the namespace structure if it is being shared
+ */
+static int unshare_namespace(unsigned long unshare_flags, struct namespace **new_nsp, struct fs_struct *new_fs)
+{
+ struct namespace *ns = current->namespace;
+
+ if ((unshare_flags & CLONE_NEWNS) &&
+ (ns && atomic_read(&ns->count) > 1)) {
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ *new_nsp = dup_namespace(current, new_fs ? new_fs : current->fs);
+ if (!*new_nsp)
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+/*
+ * Unsharing of sighand for tasks created with CLONE_SIGHAND is not
+ * supported yet
+ */
+static int unshare_sighand(unsigned long unshare_flags, struct sighand_struct **new_sighp)
+{
+ struct sighand_struct *sigh = current->sighand;
+
+ if ((unshare_flags & CLONE_SIGHAND) &&
+ (sigh && atomic_read(&sigh->count) > 1))
+ return -EINVAL;
+ else
+ return 0;
+}
+
+/*
+ * Unshare vm if it is being shared
+ */
+static int unshare_vm(unsigned long unshare_flags, struct mm_struct **new_mmp)
+{
+ struct mm_struct *mm = current->mm;
+
+ if ((unshare_flags & CLONE_VM) &&
+ (mm && atomic_read(&mm->mm_users) > 1)) {
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/*
+ * Unshare file descriptor table if it is being shared
+ */
+static int unshare_fd(unsigned long unshare_flags, struct files_struct **new_fdp)
+{
+ struct files_struct *fd = current->files;
+ int error = 0;
+
+ if ((unshare_flags & CLONE_FILES) &&
+ (fd && atomic_read(&fd->count) > 1)) {
+ *new_fdp = dup_fd(fd, &error);
+ if (!*new_fdp)
+ return error;
+ }
+
+ return 0;
+}
+
+/*
+ * Unsharing of semundo for tasks created with CLONE_SYSVSEM is not
+ * supported yet
+ */
+static int unshare_semundo(unsigned long unshare_flags, struct sem_undo_list **new_ulistp)
+{
+ if (unshare_flags & CLONE_SYSVSEM)
+ return -EINVAL;
+
+ return 0;
+}
+
+/*
+ * unshare allows a process to 'unshare' part of the process
+ * context which was originally shared using clone. copy_*
+ * functions used by do_fork() cannot be used here directly
+ * because they modify an inactive task_struct that is being
+ * constructed. Here we are modifying the current, active,
+ * task_struct.
+ */
+asmlinkage long sys_unshare(unsigned long unshare_flags)
+{
+ int err = 0;
+ struct fs_struct *fs, *new_fs = NULL;
+ struct namespace *ns, *new_ns = NULL;
+ struct sighand_struct *sigh, *new_sigh = NULL;
+ struct mm_struct *mm, *new_mm = NULL, *active_mm = NULL;
+ struct files_struct *fd, *new_fd = NULL;
+ struct sem_undo_list *new_ulist = NULL;
+
+ check_unshare_flags(&unshare_flags);
+
+ /* Return -EINVAL for all unsupported flags */
+ err = -EINVAL;
+ if (unshare_flags & ~(CLONE_THREAD|CLONE_FS|CLONE_NEWNS|CLONE_SIGHAND|
+ CLONE_VM|CLONE_FILES|CLONE_SYSVSEM))
+ goto bad_unshare_out;
+
+ if ((err = unshare_thread(unshare_flags)))
+ goto bad_unshare_out;
+ if ((err = unshare_fs(unshare_flags, &new_fs)))
+ goto bad_unshare_cleanup_thread;
+ if ((err = unshare_namespace(unshare_flags, &new_ns, new_fs)))
+ goto bad_unshare_cleanup_fs;
+ if ((err = unshare_sighand(unshare_flags, &new_sigh)))
+ goto bad_unshare_cleanup_ns;
+ if ((err = unshare_vm(unshare_flags, &new_mm)))
+ goto bad_unshare_cleanup_sigh;
+ if ((err = unshare_fd(unshare_flags, &new_fd)))
+ goto bad_unshare_cleanup_vm;
+ if ((err = unshare_semundo(unshare_flags, &new_ulist)))
+ goto bad_unshare_cleanup_fd;
+
+ if (new_fs || new_ns || new_sigh || new_mm || new_fd || new_ulist) {
+
+ task_lock(current);
+
+ if (new_fs) {
+ fs = current->fs;
+ current->fs = new_fs;
+ new_fs = fs;
+ }
+
+ if (new_ns) {
+ ns = current->namespace;
+ current->namespace = new_ns;
+ new_ns = ns;
+ }
+
+ if (new_sigh) {
+ sigh = current->sighand;
+ rcu_assign_pointer(current->sighand, new_sigh);
+ new_sigh = sigh;
+ }
+
+ if (new_mm) {
+ mm = current->mm;
+ active_mm = current->active_mm;
+ current->mm = new_mm;
+ current->active_mm = new_mm;
+ activate_mm(active_mm, new_mm);
+ new_mm = mm;
+ }
+
+ if (new_fd) {
+ fd = current->files;
+ current->files = new_fd;
+ new_fd = fd;
+ }
+
+ task_unlock(current);
+ }
+
+bad_unshare_cleanup_fd:
+ if (new_fd)
+ put_files_struct(new_fd);
+
+bad_unshare_cleanup_vm:
+ if (new_mm)
+ mmput(new_mm);
+
+bad_unshare_cleanup_sigh:
+ if (new_sigh)
+ if (atomic_dec_and_test(&new_sigh->count))
+ kmem_cache_free(sighand_cachep, new_sigh);
+
+bad_unshare_cleanup_ns:
+ if (new_ns)
+ put_namespace(new_ns);
+
+bad_unshare_cleanup_fs:
+ if (new_fs)
+ put_fs_struct(new_fs);
+
+bad_unshare_cleanup_thread:
+bad_unshare_out:
+ return err;
+}