#include <linux/mman.h>
#include <linux/fs.h>
#include <linux/cpu.h>
+#include <linux/cpuset.h>
#include <linux/security.h>
#include <linux/swap.h>
#include <linux/syscalls.h>
static kmem_cache_t *task_struct_cachep;
#endif
+/* SLAB cache for signal_struct structures (tsk->signal) */
+kmem_cache_t *signal_cachep;
+
+/* SLAB cache for sighand_struct structures (tsk->sighand) */
+kmem_cache_t *sighand_cachep;
+
+/* SLAB cache for files_struct structures (tsk->files) */
+kmem_cache_t *files_cachep;
+
+/* SLAB cache for fs_struct structures (tsk->fs) */
+kmem_cache_t *fs_cachep;
+
+/* SLAB cache for vm_area_struct structures */
+kmem_cache_t *vm_area_cachep;
+
+/* SLAB cache for mm_struct structures (tsk->mm) */
+static kmem_cache_t *mm_cachep;
+
void free_task(struct task_struct *tsk)
{
free_thread_info(tsk->thread_info);
init_task.signal->rlim[RLIMIT_NPROC].rlim_cur = max_threads/2;
init_task.signal->rlim[RLIMIT_NPROC].rlim_max = max_threads/2;
+ init_task.signal->rlim[RLIMIT_SIGPENDING] =
+ init_task.signal->rlim[RLIMIT_NPROC];
}
static struct task_struct *dup_task_struct(struct task_struct *orig)
mm->mmap_cache = NULL;
mm->free_area_cache = oldmm->mmap_base;
mm->map_count = 0;
- mm->rss = 0;
- mm->anon_rss = 0;
+ __set_mm_counter(mm, rss, 0);
+ __set_mm_counter(mm, anon_rss, 0);
cpus_clear(mm->cpu_vm_mask);
mm->mm_rb = RB_ROOT;
rb_link = &mm->mm_rb.rb_node;
if (retval)
goto free_pt;
- mm->hiwater_rss = mm->rss;
+ mm->hiwater_rss = get_mm_counter(mm,rss);
mm->hiwater_vm = mm->total_vm;
good_mm:
struct file *f = *old_fds++;
if (f) {
get_file(f);
- /* FIXME sum it first for avail check and performance */
+ /* FIXME sum it first for check and performance */
vx_openfd_inc(open_files - i);
} else {
/*
static inline int copy_signal(unsigned long clone_flags, struct task_struct * tsk)
{
struct signal_struct *sig;
+ int ret;
if (clone_flags & CLONE_THREAD) {
atomic_inc(¤t->signal->count);
tsk->signal = sig;
if (!sig)
return -ENOMEM;
+
+ ret = copy_thread_group_keys(tsk);
+ if (ret < 0) {
+ kmem_cache_free(signal_cachep, sig);
+ return ret;
+ }
+
atomic_set(&sig->count, 1);
atomic_set(&sig->live, 1);
init_waitqueue_head(&sig->wait_chldexit);
init_sigpending(&sig->shared_pending);
INIT_LIST_HEAD(&sig->posix_timers);
+ sig->it_real_value = sig->it_real_incr = 0;
+ sig->real_timer.function = it_real_fn;
+ sig->real_timer.data = (unsigned long) tsk;
+ init_timer(&sig->real_timer);
+
+ sig->it_virt_expires = cputime_zero;
+ sig->it_virt_incr = cputime_zero;
+ sig->it_prof_expires = cputime_zero;
+ sig->it_prof_incr = cputime_zero;
+
sig->tty = current->signal->tty;
sig->pgrp = process_group(current);
sig->session = current->signal->session;
sig->utime = sig->stime = sig->cutime = sig->cstime = cputime_zero;
sig->nvcsw = sig->nivcsw = sig->cnvcsw = sig->cnivcsw = 0;
sig->min_flt = sig->maj_flt = sig->cmin_flt = sig->cmaj_flt = 0;
+ sig->sched_time = 0;
+ INIT_LIST_HEAD(&sig->cpu_timers[0]);
+ INIT_LIST_HEAD(&sig->cpu_timers[1]);
+ INIT_LIST_HEAD(&sig->cpu_timers[2]);
task_lock(current->group_leader);
memcpy(sig->rlim, current->signal->rlim, sizeof sig->rlim);
task_unlock(current->group_leader);
+ if (sig->rlim[RLIMIT_CPU].rlim_cur != RLIM_INFINITY) {
+ /*
+ * New sole thread in the process gets an expiry time
+ * of the whole CPU time limit.
+ */
+ tsk->it_prof_expires =
+ secs_to_cputime(sig->rlim[RLIMIT_CPU].rlim_cur);
+ }
+
return 0;
}
goto bad_fork_free;
}
if (p->mm && vx_flags(VXF_FORK_RSS, 0)) {
- if (!vx_rsspages_avail(p->mm, p->mm->rss))
+ if (!vx_rsspages_avail(p->mm, get_mm_counter(p->mm, rss)))
goto bad_fork_cleanup_vm;
}
clear_tsk_thread_flag(p, TIF_SIGPENDING);
init_sigpending(&p->pending);
- p->it_real_value = 0;
- p->it_real_incr = 0;
- p->it_virt_value = cputime_zero;
- p->it_virt_incr = cputime_zero;
- p->it_prof_value = cputime_zero;
- p->it_prof_incr = cputime_zero;
- init_timer(&p->real_timer);
- p->real_timer.data = (unsigned long) p;
-
p->utime = cputime_zero;
p->stime = cputime_zero;
+ p->sched_time = 0;
p->rchar = 0; /* I/O counter: bytes read */
p->wchar = 0; /* I/O counter: bytes written */
p->syscr = 0; /* I/O counter: read syscalls */
p->syscw = 0; /* I/O counter: write syscalls */
acct_clear_integrals(p);
+ p->it_virt_expires = cputime_zero;
+ p->it_prof_expires = cputime_zero;
+ p->it_sched_expires = 0;
+ INIT_LIST_HEAD(&p->cpu_timers[0]);
+ INIT_LIST_HEAD(&p->cpu_timers[1]);
+ INIT_LIST_HEAD(&p->cpu_timers[2]);
+
p->lock_depth = -1; /* -1 = no lock */
do_posix_clock_monotonic_gettime(&p->start_time);
p->security = NULL;
set_tsk_thread_flag(p, TIF_SIGPENDING);
}
+ if (!cputime_eq(current->signal->it_virt_expires,
+ cputime_zero) ||
+ !cputime_eq(current->signal->it_prof_expires,
+ cputime_zero) ||
+ current->signal->rlim[RLIMIT_CPU].rlim_cur != RLIM_INFINITY ||
+ !list_empty(¤t->signal->cpu_timers[0]) ||
+ !list_empty(¤t->signal->cpu_timers[1]) ||
+ !list_empty(¤t->signal->cpu_timers[2])) {
+ /*
+ * Have child wake up on its first tick to check
+ * for process CPU timers.
+ */
+ p->it_prof_expires = jiffies_to_cputime(1);
+ }
+
spin_unlock(¤t->sighand->siglock);
}
if (unlikely(p->ptrace & PT_PTRACED))
__ptrace_link(p, current->parent);
+ cpuset_fork(p);
+
attach_pid(p, PIDTYPE_PID, p->pid);
attach_pid(p, PIDTYPE_TGID, p->tgid);
if (thread_group_leader(p)) {
return pid;
}
-/* SLAB cache for signal_struct structures (tsk->signal) */
-kmem_cache_t *signal_cachep;
-
-/* SLAB cache for sighand_struct structures (tsk->sighand) */
-kmem_cache_t *sighand_cachep;
-
-/* SLAB cache for files_struct structures (tsk->files) */
-kmem_cache_t *files_cachep;
-
-/* SLAB cache for fs_struct structures (tsk->fs) */
-kmem_cache_t *fs_cachep;
-
-/* SLAB cache for vm_area_struct structures */
-kmem_cache_t *vm_area_cachep;
-
-/* SLAB cache for mm_struct structures (tsk->mm) */
-kmem_cache_t *mm_cachep;
-
void __init proc_caches_init(void)
{
sighand_cachep = kmem_cache_create("sighand_cache",