#include <linux/ptrace.h>
#include <linux/mount.h>
#include <linux/audit.h>
+#include <linux/vinline.h>
+#include <linux/ninline.h>
#include <asm/pgtable.h>
#include <asm/pgalloc.h>
static void free_task(struct task_struct *tsk)
{
free_thread_info(tsk->thread_info);
+ clr_vx_info(&tsk->vx_info);
+ clr_nx_info(&tsk->nx_info);
free_task_struct(tsk);
}
if (likely(!mm_alloc_pgd(mm))) {
mm->def_flags = 0;
+ set_vx_info(&mm->mm_vx_info, current->vx_info);
return mm;
}
free_mm(mm);
BUG_ON(mm == &init_mm);
mm_free_pgd(mm);
destroy_context(mm);
+ clr_vx_info(&mm->mm_vx_info);
free_mm(mm);
}
/* Copy the current MM stuff.. */
memcpy(mm, oldmm, sizeof(*mm));
+ mm->mm_vx_info = NULL;
if (!mm_init(mm))
goto fail_nomem;
{
int retval;
struct task_struct *p = NULL;
+ struct vx_info *vxi;
+ struct nx_info *nxi;
if ((clone_flags & (CLONE_NEWNS|CLONE_FS)) == (CLONE_NEWNS|CLONE_FS))
return ERR_PTR(-EINVAL);
goto fork_out;
retval = -ENOMEM;
+
p = dup_task_struct(current);
if (!p)
goto fork_out;
+ vxi = get_vx_info(current->vx_info);
+ nxi = get_nx_info(current->nx_info);
+
+ /* check vserver memory */
+ if (p->mm && !(clone_flags & CLONE_VM)) {
+ if (vx_vmpages_avail(p->mm, p->mm->total_vm))
+ vx_pages_add(p->mm->mm_vx_info, RLIMIT_AS, p->mm->total_vm);
+ else
+ goto bad_fork_free;
+ }
+ if (p->mm && vx_flags(VXF_FORK_RSS, 0)) {
+ if (!vx_rsspages_avail(p->mm, p->mm->rss))
+ goto bad_fork_free;
+ }
+
retval = -EAGAIN;
+ if (vxi && (atomic_read(&vxi->limit.res[RLIMIT_NPROC])
+ >= vxi->limit.rlim[RLIMIT_NPROC]))
+ goto bad_fork_free;
+
if (atomic_read(&p->user->processes) >=
p->rlim[RLIMIT_NPROC].rlim_cur) {
if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE) &&
link_pid(p, p->pids + PIDTYPE_TGID, &p->group_leader->pids[PIDTYPE_TGID].pid);
nr_threads++;
+ if (vxi) {
+ atomic_inc(&vxi->cacct.nr_threads);
+ atomic_inc(&vxi->limit.res[RLIMIT_NPROC]);
+ }
write_unlock_irq(&tasklist_lock);
retval = 0;