* management can be a bitch. See 'mm/memory.c': 'copy_page_range()'
*/
-#include <linux/config.h>
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/unistd.h>
#include <linux/jiffies.h>
#include <linux/futex.h>
#include <linux/rcupdate.h>
-#include <linux/ptrace.h>
+#include <linux/tracehook.h>
#include <linux/mount.h>
#include <linux/audit.h>
#include <linux/profile.h>
#include <linux/rmap.h>
#include <linux/acct.h>
#include <linux/cn_proc.h>
+#include <linux/delayacct.h>
+#include <linux/taskstats_kern.h>
#include <linux/vs_context.h>
#include <linux/vs_network.h>
#include <linux/vs_limit.h>
DEFINE_PER_CPU(unsigned long, process_counts) = 0;
- __cacheline_aligned DEFINE_RWLOCK(tasklist_lock); /* outer */
-
-EXPORT_SYMBOL(tasklist_lock);
+__cacheline_aligned DEFINE_RWLOCK(tasklist_lock); /* outer */
int nr_processes(void)
{
void free_task(struct task_struct *tsk)
{
free_thread_info(tsk->thread_info);
+ rt_mutex_debug_task_free(tsk);
clr_vx_info(&tsk->vx_info);
clr_nx_info(&tsk->nx_info);
free_task_struct(tsk);
security_task_free(tsk);
free_uid(tsk->user);
put_group_info(tsk->group_info);
+ delayacct_tsk_free(tsk);
if (!profile_handoff_task(tsk))
free_task(tsk);
down_write(&oldmm->mmap_sem);
flush_cache_mm(oldmm);
- down_write(&mm->mmap_sem);
+ /*
+ * Not linked in yet - no deadlock potential:
+ */
+ down_write_nested(&mm->mmap_sem, SINGLE_DEPTH_NESTING);
mm->locked_vm = 0;
mm->mmap = NULL;
if (retval)
goto out;
}
+#ifdef arch_dup_mmap
+ arch_dup_mmap(mm, oldmm);
+#endif
retval = 0;
out:
up_write(&mm->mmap_sem);
*/
void mmput(struct mm_struct *mm)
{
+ might_sleep();
+
if (atomic_dec_and_test(&mm->mm_users)) {
exit_aio(mm);
exit_mmap(mm);
/*
* Allocate a new files structure and copy contents from the
* passed in files structure.
+ * errorp will be valid only when the returned files_struct is NULL.
*/
static struct files_struct *dup_fd(struct files_struct *oldf, int *errorp)
{
int open_files, size, i, expand;
struct fdtable *old_fdt, *new_fdt;
+ *errorp = -ENOMEM;
newf = alloc_files();
if (!newf)
goto out;
* break this.
*/
tsk->files = NULL;
- error = -ENOMEM;
newf = dup_fd(oldf, &error);
if (!newf)
goto out;
if (clone_flags & CLONE_THREAD) {
atomic_inc(¤t->signal->count);
atomic_inc(¤t->signal->live);
+ taskstats_tgid_alloc(current->signal);
return 0;
}
sig = kmem_cache_alloc(signal_cachep, GFP_KERNEL);
INIT_LIST_HEAD(&sig->cpu_timers[0]);
INIT_LIST_HEAD(&sig->cpu_timers[1]);
INIT_LIST_HEAD(&sig->cpu_timers[2]);
+ taskstats_tgid_init(sig);
task_lock(current->group_leader);
memcpy(sig->rlim, current->signal->rlim, sizeof sig->rlim);
tsk->it_prof_expires =
secs_to_cputime(sig->rlim[RLIMIT_CPU].rlim_cur);
}
+ acct_init_pacct(&sig->pacct);
return 0;
}
void __cleanup_signal(struct signal_struct *sig)
{
exit_thread_group_keys(sig);
+ taskstats_tgid_free(sig);
kmem_cache_free(signal_cachep, sig);
}
new_flags &= ~(PF_SUPERPRIV | PF_NOFREEZE);
new_flags |= PF_FORKNOEXEC;
- if (!(clone_flags & CLONE_PTRACE))
- p->ptrace = 0;
+ new_flags |= PF_STARTING;
p->flags = new_flags;
}
return current->pid;
}
+static inline void rt_mutex_init_task(struct task_struct *p)
+{
+#ifdef CONFIG_RT_MUTEXES
+ spin_lock_init(&p->pi_lock);
+ plist_head_init(&p->pi_waiters, &p->pi_lock);
+ p->pi_blocked_on = NULL;
+#endif
+}
+
/*
* This creates a new process as a copy of the old one,
* but does not actually start it yet.
* parts of the process environment (as per the clone
* flags). The actual kick-off is left to the caller.
*/
-static task_t *copy_process(unsigned long clone_flags,
- unsigned long stack_start,
- struct pt_regs *regs,
- unsigned long stack_size,
- int __user *parent_tidptr,
- int __user *child_tidptr,
- int pid)
+static struct task_struct *copy_process(unsigned long clone_flags,
+ unsigned long stack_start,
+ struct pt_regs *regs,
+ unsigned long stack_size,
+ int __user *parent_tidptr,
+ int __user *child_tidptr,
+ int pid)
{
int retval;
struct task_struct *p = NULL;
p = dup_task_struct(current);
if (!p)
goto fork_out;
+
+ rt_mutex_init_task(p);
+
p->tux_info = NULL;
+#ifdef CONFIG_TRACE_IRQFLAGS
+ DEBUG_LOCKS_WARN_ON(!p->hardirqs_enabled);
+ DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled);
+#endif
init_vx_info(&p->vx_info, current->vx_info);
init_nx_info(&p->nx_info, current->nx_info);
goto bad_fork_cleanup_put_domain;
p->did_exec = 0;
+ delayacct_tsk_init(p); /* Must remain after dup_task_struct() */
copy_flags(clone_flags, p);
p->pid = pid;
retval = -EFAULT;
if (clone_flags & CLONE_PARENT_SETTID)
if (put_user(p->pid, parent_tidptr))
- goto bad_fork_cleanup;
-
- p->proc_dentry = NULL;
+ goto bad_fork_cleanup_delays_binfmt;
INIT_LIST_HEAD(&p->children);
INIT_LIST_HEAD(&p->sibling);
p->vfork_done = NULL;
spin_lock_init(&p->alloc_lock);
- spin_lock_init(&p->proc_lock);
+#ifdef CONFIG_PTRACE
+ INIT_LIST_HEAD(&p->ptracees);
+#endif
clear_tsk_thread_flag(p, TIF_SIGPENDING);
init_sigpending(&p->pending);
}
mpol_fix_fork_child_flag(p);
#endif
+#ifdef CONFIG_TRACE_IRQFLAGS
+ p->irq_events = 0;
+ p->hardirqs_enabled = 0;
+ p->hardirq_enable_ip = 0;
+ p->hardirq_enable_event = 0;
+ p->hardirq_disable_ip = _THIS_IP_;
+ p->hardirq_disable_event = 0;
+ p->softirqs_enabled = 1;
+ p->softirq_enable_ip = _THIS_IP_;
+ p->softirq_enable_event = 0;
+ p->softirq_disable_ip = 0;
+ p->softirq_disable_event = 0;
+ p->hardirq_context = 0;
+ p->softirq_context = 0;
+#endif
+#ifdef CONFIG_LOCKDEP
+ p->lockdep_depth = 0; /* no locks held yet */
+ p->curr_chain_key = 0;
+ p->lockdep_recursion = 0;
+#endif
#ifdef CONFIG_DEBUG_MUTEXES
p->blocked_on = NULL; /* not blocked yet */
#ifdef CONFIG_COMPAT
p->compat_robust_list = NULL;
#endif
+ INIT_LIST_HEAD(&p->pi_state_list);
+ p->pi_state_cache = NULL;
+
/*
* sigaltstack should be cleared when sharing the same VM
*/
*/
p->group_leader = p;
INIT_LIST_HEAD(&p->thread_group);
- INIT_LIST_HEAD(&p->ptrace_children);
- INIT_LIST_HEAD(&p->ptrace_list);
/* Perform scheduler related setup. Assign this task to a CPU. */
sched_fork(p, clone_flags);
/* CLONE_PARENT re-uses the old parent */
if (clone_flags & (CLONE_PARENT|CLONE_THREAD))
- p->real_parent = current->real_parent;
+ p->parent = current->parent;
else
- p->real_parent = current;
- p->parent = p->real_parent;
+ p->parent = current;
spin_lock(¤t->sighand->siglock);
}
if (clone_flags & CLONE_THREAD) {
- /*
- * Important: if an exit-all has been started then
- * do not create this new thread - the whole thread
- * group is supposed to exit anyway.
- */
- if (current->signal->flags & SIGNAL_GROUP_EXIT) {
- spin_unlock(¤t->sighand->siglock);
- write_unlock_irq(&tasklist_lock);
- retval = -EAGAIN;
- goto bad_fork_cleanup_namespace;
- }
-
p->group_leader = current->group_leader;
list_add_tail_rcu(&p->thread_group, &p->group_leader->thread_group);
if (likely(p->pid)) {
add_parent(p);
- if (unlikely(p->ptrace & PT_PTRACED))
- __ptrace_link(p, current->parent);
+ tracehook_init_task(p);
if (thread_group_leader(p)) {
p->signal->tty = current->signal->tty;
bad_fork_cleanup_cpuset:
#endif
cpuset_exit(p);
-bad_fork_cleanup:
+bad_fork_cleanup_delays_binfmt:
+ delayacct_tsk_free(p);
if (p->binfmt)
module_put(p->binfmt->module);
bad_fork_cleanup_put_domain:
return regs;
}
-task_t * __devinit fork_idle(int cpu)
+struct task_struct * __devinit fork_idle(int cpu)
{
- task_t *task;
+ struct task_struct *task;
struct pt_regs regs;
task = copy_process(CLONE_VM, 0, idle_regs(®s), 0, NULL, NULL, 0);
return task;
}
-static inline int fork_traceflag (unsigned clone_flags)
-{
- if (clone_flags & CLONE_UNTRACED)
- return 0;
- else if (clone_flags & CLONE_VFORK) {
- if (current->ptrace & PT_TRACE_VFORK)
- return PTRACE_EVENT_VFORK;
- } else if ((clone_flags & CSIGNAL) != SIGCHLD) {
- if (current->ptrace & PT_TRACE_CLONE)
- return PTRACE_EVENT_CLONE;
- } else if (current->ptrace & PT_TRACE_FORK)
- return PTRACE_EVENT_FORK;
-
- return 0;
-}
-
/*
* Ok, this is the main fork-routine.
*
int __user *child_tidptr)
{
struct task_struct *p;
- int trace = 0;
struct pid *pid = alloc_pid();
long nr;
if (!pid)
return -EAGAIN;
nr = pid->nr;
- if (unlikely(current->ptrace)) {
- trace = fork_traceflag (clone_flags);
- if (trace)
- clone_flags |= CLONE_PTRACE;
- }
p = copy_process(clone_flags, stack_start, regs, stack_size, parent_tidptr, child_tidptr, nr);
/*
init_completion(&vfork);
}
- if ((p->ptrace & PT_PTRACED) || (clone_flags & CLONE_STOPPED)) {
+ tracehook_report_clone(clone_flags, p);
+
+ p->flags &= ~PF_STARTING;
+
+ if (clone_flags & CLONE_STOPPED) {
/*
* We'll start up with an immediate SIGSTOP.
*/
sigaddset(&p->pending.signal, SIGSTOP);
set_tsk_thread_flag(p, TIF_SIGPENDING);
+ p->state = TASK_STOPPED;
}
-
- if (!(clone_flags & CLONE_STOPPED))
- wake_up_new_task(p, clone_flags);
else
- p->state = TASK_STOPPED;
+ wake_up_new_task(p, clone_flags);
- if (unlikely (trace)) {
- current->ptrace_message = nr;
- ptrace_notify ((trace << 8) | SIGTRAP);
- }
+ tracehook_report_clone_complete(clone_flags, nr, p);
if (clone_flags & CLONE_VFORK) {
wait_for_completion(&vfork);
- if (unlikely (current->ptrace & PT_TRACE_VFORK_DONE))
- ptrace_notify ((PTRACE_EVENT_VFORK_DONE << 8) | SIGTRAP);
+ tracehook_report_vfork_done(p, nr);
}
} else {
free_pid(pid);