#include <linux/mount.h>
#include <linux/audit.h>
#include <linux/rmap.h>
+#include <linux/vs_network.h>
+#include <linux/vs_limit.h>
+#include <linux/ckrm.h>
+#include <linux/ckrm_tsk.h>
#include <asm/pgtable.h>
#include <asm/pgalloc.h>
static void free_task(struct task_struct *tsk)
{
free_thread_info(tsk->thread_info);
+ vxdprintk("freeing up task %p\n", tsk);
+ clr_vx_info(&tsk->vx_info);
+ clr_nx_info(&tsk->nx_info);
free_task_struct(tsk);
}
tsk->thread_info = ti;
ti->task = tsk;
+ ckrm_cb_newtask(tsk);
/* One for us, one for whoever does the "release_task()" (usually parent) */
atomic_set(&tsk->usage,2);
return tsk;
if (likely(!mm_alloc_pgd(mm))) {
mm->def_flags = 0;
+#ifdef __HAVE_ARCH_MMAP_TOP
+ mm->mmap_top = mmap_top();
+#endif
+ set_vx_info(&mm->mm_vx_info, current->vx_info);
return mm;
}
free_mm(mm);
BUG_ON(mm == &init_mm);
mm_free_pgd(mm);
destroy_context(mm);
+ clr_vx_info(&mm->mm_vx_info);
free_mm(mm);
}
/* Copy the current MM stuff.. */
memcpy(mm, oldmm, sizeof(*mm));
+ mm->mm_vx_info = NULL;
if (!mm_init(mm))
goto fail_nomem;
{
int retval;
struct task_struct *p = NULL;
+ struct vx_info *vxi;
if ((clone_flags & (CLONE_NEWNS|CLONE_FS)) == (CLONE_NEWNS|CLONE_FS))
return ERR_PTR(-EINVAL);
goto fork_out;
retval = -ENOMEM;
+
p = dup_task_struct(current);
if (!p)
goto fork_out;
+ p->vx_info = NULL;
+ set_vx_info(&p->vx_info, current->vx_info);
+ p->nx_info = NULL;
+ set_nx_info(&p->nx_info, current->nx_info);
+
+ /* check vserver memory */
+ if (p->mm && !(clone_flags & CLONE_VM)) {
+ if (vx_vmpages_avail(p->mm, p->mm->total_vm))
+ vx_pages_add(p->mm->mm_vx_info, RLIMIT_AS, p->mm->total_vm);
+ else
+ goto bad_fork_free;
+ }
+ if (p->mm && vx_flags(VXF_FORK_RSS, 0)) {
+ if (!vx_rsspages_avail(p->mm, p->mm->rss))
+ goto bad_fork_free;
+ }
+
retval = -EAGAIN;
+ if (!vx_nproc_avail(1))
+ goto bad_fork_free;
+
if (atomic_read(&p->user->processes) >=
p->rlim[RLIMIT_NPROC].rlim_cur) {
if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE) &&
if (p->binfmt && !try_module_get(p->binfmt->module))
goto bad_fork_cleanup_put_domain;
+ init_delays(p);
p->did_exec = 0;
copy_flags(clone_flags, p);
if (clone_flags & CLONE_IDLETASK)
link_pid(p, p->pids + PIDTYPE_TGID, &p->group_leader->pids[PIDTYPE_TGID].pid);
nr_threads++;
+ vxi = current->vx_info;
+ if (vxi) {
+ atomic_inc(&vxi->cacct.nr_threads);
+ // atomic_inc(&vxi->limit.rcur[RLIMIT_NPROC]);
+ }
+ vx_nproc_inc();
write_unlock_irq(&tasklist_lock);
retval = 0;
clone_flags |= CLONE_PTRACE;
}
+#ifdef CONFIG_CKRM_TYPE_TASKCLASS
+ if (numtasks_get_ref(current->taskclass, 0) == 0) {
+ return -ENOMEM;
+ }
+#endif
+
p = copy_process(clone_flags, stack_start, regs, stack_size, parent_tidptr, child_tidptr);
/*
* Do this prior waking up the new thread - the thread pointer
if (!IS_ERR(p)) {
struct completion vfork;
+ ckrm_cb_fork(p);
+
if (clone_flags & CLONE_VFORK) {
p->vfork_done = &vfork;
init_completion(&vfork);
* COW overhead when the child exec()s afterwards.
*/
set_need_resched();
+ } else {
+#ifdef CONFIG_CKRM_TYPE_TASKCLASS
+ numtasks_put_ref(current->taskclass);
+#endif
}
return pid;
}