#include <linux/mempolicy.h>
#include <linux/sem.h>
#include <linux/file.h>
-#include <linux/key.h>
#include <linux/binfmts.h>
#include <linux/mman.h>
#include <linux/fs.h>
#include <linux/audit.h>
#include <linux/profile.h>
#include <linux/rmap.h>
-#include <linux/ckrm_events.h>
-#include <linux/ckrm_tsk.h>
-#include <linux/ckrm_mem_inline.h>
#include <linux/vs_network.h>
#include <linux/vs_limit.h>
#include <linux/vs_memory.h>
+#include <linux/ckrm.h>
+#include <linux/ckrm_tsk.h>
+#include <linux/ckrm_mem_inline.h>
#include <asm/pgtable.h>
#include <asm/pgalloc.h>
free_task(tsk);
}
+void fastcall add_wait_queue(wait_queue_head_t *q, wait_queue_t * wait)
+{
+ unsigned long flags;
+
+ wait->flags &= ~WQ_FLAG_EXCLUSIVE;
+ spin_lock_irqsave(&q->lock, flags);
+ __add_wait_queue(q, wait);
+ spin_unlock_irqrestore(&q->lock, flags);
+}
+
+EXPORT_SYMBOL(add_wait_queue);
+
+void fastcall add_wait_queue_exclusive(wait_queue_head_t *q, wait_queue_t * wait)
+{
+ unsigned long flags;
+
+ wait->flags |= WQ_FLAG_EXCLUSIVE;
+ spin_lock_irqsave(&q->lock, flags);
+ __add_wait_queue_tail(q, wait);
+ spin_unlock_irqrestore(&q->lock, flags);
+}
+
+EXPORT_SYMBOL(add_wait_queue_exclusive);
+
+void fastcall remove_wait_queue(wait_queue_head_t *q, wait_queue_t * wait)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&q->lock, flags);
+ __remove_wait_queue(q, wait);
+ spin_unlock_irqrestore(&q->lock, flags);
+}
+
+EXPORT_SYMBOL(remove_wait_queue);
+
+
+/*
+ * Note: we use "set_current_state()" _after_ the wait-queue add,
+ * because we need a memory barrier there on SMP, so that any
+ * wake-function that tests for the wait-queue being active
+ * will be guaranteed to see waitqueue addition _or_ subsequent
+ * tests in this thread will see the wakeup having taken place.
+ *
+ * The spin_unlock() itself is semi-permeable and only protects
+ * one way (it only protects stuff inside the critical region and
+ * stops them from bleeding out - it would still allow subsequent
+ * loads to move into the the critical region).
+ */
+void fastcall prepare_to_wait(wait_queue_head_t *q, wait_queue_t *wait, int state)
+{
+ unsigned long flags;
+
+ wait->flags &= ~WQ_FLAG_EXCLUSIVE;
+ spin_lock_irqsave(&q->lock, flags);
+ if (list_empty(&wait->task_list))
+ __add_wait_queue(q, wait);
+ /*
+ * don't alter the task state if this is just going to
+ * queue an async wait queue callback
+ */
+ if (is_sync_wait(wait))
+ set_current_state(state);
+ spin_unlock_irqrestore(&q->lock, flags);
+}
+
+EXPORT_SYMBOL(prepare_to_wait);
+
+void fastcall
+prepare_to_wait_exclusive(wait_queue_head_t *q, wait_queue_t *wait, int state)
+{
+ unsigned long flags;
+
+ wait->flags |= WQ_FLAG_EXCLUSIVE;
+ spin_lock_irqsave(&q->lock, flags);
+ if (list_empty(&wait->task_list))
+ __add_wait_queue_tail(q, wait);
+ /*
+ * don't alter the task state if this is just going to
+ * queue an async wait queue callback
+ */
+ if (is_sync_wait(wait))
+ set_current_state(state);
+ spin_unlock_irqrestore(&q->lock, flags);
+}
+
+EXPORT_SYMBOL(prepare_to_wait_exclusive);
+
+void fastcall finish_wait(wait_queue_head_t *q, wait_queue_t *wait)
+{
+ unsigned long flags;
+
+ __set_current_state(TASK_RUNNING);
+ /*
+ * We can check for list emptiness outside the lock
+ * IFF:
+ * - we use the "careful" check that verifies both
+ * the next and prev pointers, so that there cannot
+ * be any half-pending updates in progress on other
+ * CPU's that we haven't seen yet (and that might
+ * still change the stack area.
+ * and
+ * - all other users take the lock (ie we can only
+ * have _one_ other CPU that looks at or modifies
+ * the list).
+ */
+ if (!list_empty_careful(&wait->task_list)) {
+ spin_lock_irqsave(&q->lock, flags);
+ list_del_init(&wait->task_list);
+ spin_unlock_irqrestore(&q->lock, flags);
+ }
+}
+
+EXPORT_SYMBOL(finish_wait);
+
+int autoremove_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *key)
+{
+ int ret = default_wake_function(wait, mode, sync, key);
+
+ if (ret)
+ list_del_init(&wait->task_list);
+ return ret;
+}
+
+EXPORT_SYMBOL(autoremove_wake_function);
+
void __init fork_init(unsigned long mempages)
{
#ifndef __HAVE_ARCH_TASK_STRUCT_ALLOCATOR
* value: the thread structures can take up at most half
* of memory.
*/
- max_threads = mempages / (8 * THREAD_SIZE / PAGE_SIZE);
-
+ max_threads = mempages / (THREAD_SIZE/PAGE_SIZE) / 8;
/*
* we need to allow at least 20 threads to boot a system
*/
if(max_threads < 20)
max_threads = 20;
- init_task.signal->rlim[RLIMIT_NPROC].rlim_cur = max_threads/2;
- init_task.signal->rlim[RLIMIT_NPROC].rlim_max = max_threads/2;
+ init_task.rlim[RLIMIT_NPROC].rlim_cur = max_threads/2;
+ init_task.rlim[RLIMIT_NPROC].rlim_max = max_threads/2;
}
static struct task_struct *dup_task_struct(struct task_struct *orig)
ti->task = tsk;
ckrm_cb_newtask(tsk);
- ckrm_task_mm_init(tsk);
/* One for us, one for whoever does the "release_task()" (usually parent) */
atomic_set(&tsk->usage,2);
+#ifdef CONFIG_CKRM_RES_MEM
+ INIT_LIST_HEAD(&tsk->mm_peers);
+#endif
return tsk;
}
rb_parent = NULL;
pprev = &mm->mmap;
+ /*
+ * Add it to the mmlist after the parent.
+ * Doing it this way means that we can order the list,
+ * and fork() won't mess up the ordering significantly.
+ * Add it first so that swapoff can see any swap entries.
+ */
+ spin_lock(&mmlist_lock);
+ list_add(&mm->mmlist, ¤t->mm->mmlist);
+ mmlist_nr++;
+ spin_unlock(&mmlist_lock);
+
for (mpnt = current->mm->mmap ; mpnt ; mpnt = mpnt->vm_next) {
struct file *file;
#endif /* CONFIG_MMU */
spinlock_t mmlist_lock __cacheline_aligned_in_smp = SPIN_LOCK_UNLOCKED;
+int mmlist_nr;
#define allocate_mm() (kmem_cache_alloc(mm_cachep, SLAB_KERNEL))
#define free_mm(mm) (kmem_cache_free(mm_cachep, (mm)))
atomic_set(&mm->mm_users, 1);
atomic_set(&mm->mm_count, 1);
init_rwsem(&mm->mmap_sem);
- INIT_LIST_HEAD(&mm->mmlist);
mm->core_waiters = 0;
- mm->nr_ptes = 0;
- spin_lock_init(&mm->page_table_lock);
- rwlock_init(&mm->ioctx_list_lock);
+ mm->page_table_lock = SPIN_LOCK_UNLOCKED;
+ mm->ioctx_list_lock = RW_LOCK_UNLOCKED;
mm->ioctx_list = NULL;
mm->default_kioctx = (struct kioctx)INIT_KIOCTX(mm->default_kioctx, *mm);
mm->free_area_cache = TASK_UNMAPPED_BASE;
- ckrm_mm_init(mm);
+#ifdef CONFIG_CKRM_RES_MEM
+ INIT_LIST_HEAD(&mm->tasklist);
+ mm->peertask_lock = SPIN_LOCK_UNLOCKED;
+#endif
if (likely(!mm_alloc_pgd(mm))) {
mm->def_flags = 0;
if (mm) {
memset(mm, 0, sizeof(*mm));
mm = mm_init(mm);
- ckrm_mm_setclass(mm, ckrm_get_mem_class(current));
+#ifdef CONFIG_CKRM_RES_MEM
+ mm->memclass = GET_MEM_CLASS(current);
+ mem_class_get(mm->memclass);
+#endif
}
return mm;
}
BUG_ON(mm == &init_mm);
mm_free_pgd(mm);
destroy_context(mm);
- ckrm_mm_clearclass(mm);
clr_vx_info(&mm->mm_vx_info);
+#ifdef CONFIG_CKRM_RES_MEM
+ /* class can be null and mm's tasklist can be empty here */
+ if (mm->memclass) {
+ mem_class_put(mm->memclass);
+ mm->memclass = NULL;
+ }
+#endif
free_mm(mm);
}
*/
void mmput(struct mm_struct *mm)
{
- if (atomic_dec_and_test(&mm->mm_users)) {
+ if (atomic_dec_and_lock(&mm->mm_users, &mmlist_lock)) {
+ list_del(&mm->mmlist);
+ mmlist_nr--;
+ spin_unlock(&mmlist_lock);
exit_aio(mm);
exit_mmap(mm);
- if (!list_empty(&mm->mmlist)) {
- spin_lock(&mmlist_lock);
- list_del(&mm->mmlist);
- spin_unlock(&mmlist_lock);
- }
put_swap_token(mm);
mmdrop(mm);
}
/**
* get_task_mm - acquire a reference to the task's mm
*
- * Returns %NULL if the task has no mm. Checks PF_BORROWED_MM (meaning
- * this kernel workthread has transiently adopted a user mm with use_mm,
- * to do its AIO) is not set and if so returns a reference to it, after
+ * Returns %NULL if the task has no mm. Checks if the use count
+ * of the mm is non-zero and if so returns a reference to it, after
* bumping up the use count. User must release the mm via mmput()
* after use. Typically used by /proc and ptrace.
+ *
+ * If the use count is zero, it means that this mm is going away,
+ * so return %NULL. This only happens in the case of an AIO daemon
+ * which has temporarily adopted an mm (see use_mm), in the course
+ * of its final mmput, before exit_aio has completed.
*/
struct mm_struct *get_task_mm(struct task_struct *task)
{
task_lock(task);
mm = task->mm;
if (mm) {
- if (task->flags & PF_BORROWED_MM)
+ spin_lock(&mmlist_lock);
+ if (!atomic_read(&mm->mm_users))
mm = NULL;
else
atomic_inc(&mm->mm_users);
+ spin_unlock(&mmlist_lock);
}
task_unlock(task);
return mm;
goto free_pt;
good_mm:
- ckrm_mm_setclass(mm, oldmm->memclass);
tsk->mm = mm;
tsk->active_mm = mm;
ckrm_init_mm_to_task(mm, tsk);
/* We don't need to lock fs - think why ;-) */
if (fs) {
atomic_set(&fs->count, 1);
- rwlock_init(&fs->lock);
+ fs->lock = RW_LOCK_UNLOCKED;
fs->umask = old->umask;
read_lock(&old->lock);
fs->rootmnt = mntget(old->rootmnt);
atomic_set(&newf->count, 1);
- spin_lock_init(&newf->file_lock);
+ newf->file_lock = SPIN_LOCK_UNLOCKED;
newf->next_fd = 0;
newf->max_fds = NR_OPEN_DEFAULT;
newf->max_fdset = __FD_SETSIZE;
for (i = open_files; i != 0; i--) {
struct file *f = *old_fds++;
- if (f) {
+ if (f)
get_file(f);
- } else {
- /*
- * The fd may be claimed in the fd bitmap but not yet
- * instantiated in the files array if a sibling thread
- * is partway through open(). So make sure that this
- * fd is available to the new process.
- */
- FD_CLR(open_files - i, newf->open_fds);
- }
*new_fds++ = f;
}
spin_unlock(&oldf->file_lock);
if (clone_flags & CLONE_THREAD) {
atomic_inc(¤t->signal->count);
- atomic_inc(¤t->signal->live);
return 0;
}
sig = kmem_cache_alloc(signal_cachep, GFP_KERNEL);
if (!sig)
return -ENOMEM;
atomic_set(&sig->count, 1);
- atomic_set(&sig->live, 1);
sig->group_exit = 0;
sig->group_exit_code = 0;
sig->group_exit_task = NULL;
sig->group_stop_count = 0;
- sig->stop_state = 0;
sig->curr_target = NULL;
init_sigpending(&sig->shared_pending);
INIT_LIST_HEAD(&sig->posix_timers);
sig->nvcsw = sig->nivcsw = sig->cnvcsw = sig->cnivcsw = 0;
sig->min_flt = sig->maj_flt = sig->cmin_flt = sig->cmaj_flt = 0;
- task_lock(current->group_leader);
- memcpy(sig->rlim, current->signal->rlim, sizeof sig->rlim);
- task_unlock(current->group_leader);
-
return 0;
}
goto bad_fork_cleanup_vm;
}
- p->vx_info = NULL;
- set_vx_info(&p->vx_info, current->vx_info);
- p->nx_info = NULL;
- set_nx_info(&p->nx_info, current->nx_info);
-
- /* check vserver memory */
- if (p->mm && !(clone_flags & CLONE_VM)) {
- if (vx_vmpages_avail(p->mm, p->mm->total_vm))
- vx_pages_add(p->mm->mm_vx_info, RLIMIT_AS, p->mm->total_vm);
- else
- goto bad_fork_free;
- }
- if (p->mm && vx_flags(VXF_FORK_RSS, 0)) {
- if (!vx_rsspages_avail(p->mm, p->mm->rss))
- goto bad_fork_cleanup_vm;
- }
-
retval = -EAGAIN;
if (!vx_nproc_avail(1))
goto bad_fork_cleanup_vm;
if (atomic_read(&p->user->processes) >=
- p->signal->rlim[RLIMIT_NPROC].rlim_cur) {
+ p->rlim[RLIMIT_NPROC].rlim_cur) {
if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE) &&
p->user != &root_user)
goto bad_fork_cleanup_vm;
}
#endif
- p->tgid = p->pid;
- if (clone_flags & CLONE_THREAD)
- p->tgid = current->tgid;
-
if ((retval = security_task_alloc(p)))
goto bad_fork_cleanup_policy;
if ((retval = audit_alloc(p)))
goto bad_fork_cleanup_sighand;
if ((retval = copy_mm(clone_flags, p)))
goto bad_fork_cleanup_signal;
- if ((retval = copy_keys(clone_flags, p)))
- goto bad_fork_cleanup_mm;
if ((retval = copy_namespace(clone_flags, p)))
- goto bad_fork_cleanup_keys;
+ goto bad_fork_cleanup_mm;
retval = copy_thread(0, clone_flags, stack_start, stack_size, p, regs);
if (retval)
goto bad_fork_cleanup_namespace;
* Ok, make it visible to the rest of the system.
* We dont wake it up yet.
*/
+ p->tgid = p->pid;
p->group_leader = p;
INIT_LIST_HEAD(&p->ptrace_children);
INIT_LIST_HEAD(&p->ptrace_list);
retval = -EAGAIN;
goto bad_fork_cleanup_namespace;
}
+ p->tgid = current->tgid;
p->group_leader = current->group_leader;
if (current->signal->group_stop_count > 0) {
bad_fork_cleanup_namespace:
exit_namespace(p);
-bad_fork_cleanup_keys:
- exit_keys(p);
bad_fork_cleanup_mm:
if (p->mm)
mmput(p->mm);
clone_flags |= CLONE_PTRACE;
}
+#ifdef CONFIG_CKRM_TYPE_TASKCLASS
if (numtasks_get_ref(current->taskclass, 0) == 0) {
return -ENOMEM;
}
- p = copy_process(clone_flags, stack_start, regs, stack_size, parent_tidptr, child_tidptr, pid);
+#endif
+ p = copy_process(clone_flags, stack_start, regs, stack_size, parent_tidptr, child_tidptr, pid);
/*
* Do this prior waking up the new thread - the thread pointer
* might get invalid after that point, if the thread exits quickly.
ptrace_notify ((PTRACE_EVENT_VFORK_DONE << 8) | SIGTRAP);
}
} else {
+#ifdef CONFIG_CKRM_TYPE_TASKCLASS
numtasks_put_ref(current->taskclass);
+#endif
free_pidmap(pid);
pid = PTR_ERR(p);
}