Merge to VServer 1.9.2 (patch-2.6.8.1-vs1.9.2.diff)
[linux-2.6.git] / kernel / fork.c
1 /*
2  *  linux/kernel/fork.c
3  *
4  *  Copyright (C) 1991, 1992  Linus Torvalds
5  */
6
7 /*
8  *  'fork.c' contains the help-routines for the 'fork' system call
9  * (see also entry.S and others).
10  * Fork is rather simple, once you get the hang of it, but the memory
11  * management can be a bitch. See 'mm/memory.c': 'copy_page_range()'
12  */
13
14 #include <linux/config.h>
15 #include <linux/slab.h>
16 #include <linux/init.h>
17 #include <linux/unistd.h>
18 #include <linux/smp_lock.h>
19 #include <linux/module.h>
20 #include <linux/vmalloc.h>
21 #include <linux/completion.h>
22 #include <linux/namespace.h>
23 #include <linux/personality.h>
24 #include <linux/mempolicy.h>
25 #include <linux/sem.h>
26 #include <linux/file.h>
27 #include <linux/binfmts.h>
28 #include <linux/mman.h>
29 #include <linux/fs.h>
30 #include <linux/cpu.h>
31 #include <linux/security.h>
32 #include <linux/swap.h>
33 #include <linux/syscalls.h>
34 #include <linux/jiffies.h>
35 #include <linux/futex.h>
36 #include <linux/ptrace.h>
37 #include <linux/mount.h>
38 #include <linux/audit.h>
39 #include <linux/rmap.h>
40 #include <linux/vs_network.h>
41 #include <linux/vs_limit.h>
42 #include <linux/vs_memory.h>
43 #include <linux/ckrm.h>
44 #include <linux/ckrm_tsk.h>
45
46 #include <asm/pgtable.h>
47 #include <asm/pgalloc.h>
48 #include <asm/uaccess.h>
49 #include <asm/mmu_context.h>
50 #include <asm/cacheflush.h>
51 #include <asm/tlbflush.h>
52
53 /* The idle threads do not count..
54  * Protected by write_lock_irq(&tasklist_lock)
55  */
56 int nr_threads;
57
58 int max_threads;
59 unsigned long total_forks;      /* Handle normal Linux uptimes. */
60
61 DEFINE_PER_CPU(unsigned long, process_counts) = 0;
62
63 rwlock_t tasklist_lock __cacheline_aligned = RW_LOCK_UNLOCKED;  /* outer */
64
65 EXPORT_SYMBOL(tasklist_lock);
66
67 int nr_processes(void)
68 {
69         int cpu;
70         int total = 0;
71
72         for_each_online_cpu(cpu)
73                 total += per_cpu(process_counts, cpu);
74
75         return total;
76 }
77
78 #ifndef __HAVE_ARCH_TASK_STRUCT_ALLOCATOR
79 # define alloc_task_struct()    kmem_cache_alloc(task_struct_cachep, GFP_KERNEL)
80 # define free_task_struct(tsk)  kmem_cache_free(task_struct_cachep, (tsk))
81 static kmem_cache_t *task_struct_cachep;
82 #endif
83
84 static void free_task(struct task_struct *tsk)
85 {
86         free_thread_info(tsk->thread_info);
87         clr_vx_info(&tsk->vx_info);
88         clr_nx_info(&tsk->nx_info);
89         free_task_struct(tsk);
90 }
91
92 void __put_task_struct(struct task_struct *tsk)
93 {
94         WARN_ON(!(tsk->state & (TASK_DEAD | TASK_ZOMBIE)));
95         WARN_ON(atomic_read(&tsk->usage));
96         WARN_ON(tsk == current);
97
98         if (unlikely(tsk->audit_context))
99                 audit_free(tsk);
100         security_task_free(tsk);
101         free_uid(tsk->user);
102         put_group_info(tsk->group_info);
103         free_task(tsk);
104 }
105
106 void fastcall add_wait_queue(wait_queue_head_t *q, wait_queue_t * wait)
107 {
108         unsigned long flags;
109
110         wait->flags &= ~WQ_FLAG_EXCLUSIVE;
111         spin_lock_irqsave(&q->lock, flags);
112         __add_wait_queue(q, wait);
113         spin_unlock_irqrestore(&q->lock, flags);
114 }
115
116 EXPORT_SYMBOL(add_wait_queue);
117
118 void fastcall add_wait_queue_exclusive(wait_queue_head_t *q, wait_queue_t * wait)
119 {
120         unsigned long flags;
121
122         wait->flags |= WQ_FLAG_EXCLUSIVE;
123         spin_lock_irqsave(&q->lock, flags);
124         __add_wait_queue_tail(q, wait);
125         spin_unlock_irqrestore(&q->lock, flags);
126 }
127
128 EXPORT_SYMBOL(add_wait_queue_exclusive);
129
130 void fastcall remove_wait_queue(wait_queue_head_t *q, wait_queue_t * wait)
131 {
132         unsigned long flags;
133
134         spin_lock_irqsave(&q->lock, flags);
135         __remove_wait_queue(q, wait);
136         spin_unlock_irqrestore(&q->lock, flags);
137 }
138
139 EXPORT_SYMBOL(remove_wait_queue);
140
141
142 /*
143  * Note: we use "set_current_state()" _after_ the wait-queue add,
144  * because we need a memory barrier there on SMP, so that any
145  * wake-function that tests for the wait-queue being active
146  * will be guaranteed to see waitqueue addition _or_ subsequent
147  * tests in this thread will see the wakeup having taken place.
148  *
149  * The spin_unlock() itself is semi-permeable and only protects
150  * one way (it only protects stuff inside the critical region and
151  * stops them from bleeding out - it would still allow subsequent
152  * loads to move into the the critical region).
153  */
154 void fastcall prepare_to_wait(wait_queue_head_t *q, wait_queue_t *wait, int state)
155 {
156         unsigned long flags;
157
158         wait->flags &= ~WQ_FLAG_EXCLUSIVE;
159         spin_lock_irqsave(&q->lock, flags);
160         if (list_empty(&wait->task_list))
161                 __add_wait_queue(q, wait);
162         set_current_state(state);
163         spin_unlock_irqrestore(&q->lock, flags);
164 }
165
166 EXPORT_SYMBOL(prepare_to_wait);
167
168 void fastcall
169 prepare_to_wait_exclusive(wait_queue_head_t *q, wait_queue_t *wait, int state)
170 {
171         unsigned long flags;
172
173         wait->flags |= WQ_FLAG_EXCLUSIVE;
174         spin_lock_irqsave(&q->lock, flags);
175         if (list_empty(&wait->task_list))
176                 __add_wait_queue_tail(q, wait);
177         set_current_state(state);
178         spin_unlock_irqrestore(&q->lock, flags);
179 }
180
181 EXPORT_SYMBOL(prepare_to_wait_exclusive);
182
183 void fastcall finish_wait(wait_queue_head_t *q, wait_queue_t *wait)
184 {
185         unsigned long flags;
186
187         __set_current_state(TASK_RUNNING);
188         /*
189          * We can check for list emptiness outside the lock
190          * IFF:
191          *  - we use the "careful" check that verifies both
192          *    the next and prev pointers, so that there cannot
193          *    be any half-pending updates in progress on other
194          *    CPU's that we haven't seen yet (and that might
195          *    still change the stack area.
196          * and
197          *  - all other users take the lock (ie we can only
198          *    have _one_ other CPU that looks at or modifies
199          *    the list).
200          */
201         if (!list_empty_careful(&wait->task_list)) {
202                 spin_lock_irqsave(&q->lock, flags);
203                 list_del_init(&wait->task_list);
204                 spin_unlock_irqrestore(&q->lock, flags);
205         }
206 }
207
208 EXPORT_SYMBOL(finish_wait);
209
210 int autoremove_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *key)
211 {
212         int ret = default_wake_function(wait, mode, sync, key);
213
214         if (ret)
215                 list_del_init(&wait->task_list);
216         return ret;
217 }
218
219 EXPORT_SYMBOL(autoremove_wake_function);
220
221 void __init fork_init(unsigned long mempages)
222 {
223 #ifndef __HAVE_ARCH_TASK_STRUCT_ALLOCATOR
224 #ifndef ARCH_MIN_TASKALIGN
225 #define ARCH_MIN_TASKALIGN      L1_CACHE_BYTES
226 #endif
227         /* create a slab on which task_structs can be allocated */
228         task_struct_cachep =
229                 kmem_cache_create("task_struct", sizeof(struct task_struct),
230                         ARCH_MIN_TASKALIGN, SLAB_PANIC, NULL, NULL);
231 #endif
232
233         /*
234          * The default maximum number of threads is set to a safe
235          * value: the thread structures can take up at most half
236          * of memory.
237          */
238         max_threads = mempages / (THREAD_SIZE/PAGE_SIZE) / 8;
239         /*
240          * we need to allow at least 20 threads to boot a system
241          */
242         if(max_threads < 20)
243                 max_threads = 20;
244
245         init_task.rlim[RLIMIT_NPROC].rlim_cur = max_threads/2;
246         init_task.rlim[RLIMIT_NPROC].rlim_max = max_threads/2;
247 }
248
249 static struct task_struct *dup_task_struct(struct task_struct *orig)
250 {
251         struct task_struct *tsk;
252         struct thread_info *ti;
253
254         prepare_to_copy(orig);
255
256         tsk = alloc_task_struct();
257         if (!tsk)
258                 return NULL;
259
260         ti = alloc_thread_info(tsk);
261         if (!ti) {
262                 free_task_struct(tsk);
263                 return NULL;
264         }
265
266         *ti = *orig->thread_info;
267         *tsk = *orig;
268         tsk->thread_info = ti;
269         ti->task = tsk;
270
271         ckrm_cb_newtask(tsk);
272         /* One for us, one for whoever does the "release_task()" (usually parent) */
273         atomic_set(&tsk->usage,2);
274         return tsk;
275 }
276
277 #ifdef CONFIG_MMU
278 static inline int dup_mmap(struct mm_struct * mm, struct mm_struct * oldmm)
279 {
280         struct vm_area_struct * mpnt, *tmp, **pprev;
281         struct rb_node **rb_link, *rb_parent;
282         int retval;
283         unsigned long charge;
284         struct mempolicy *pol;
285
286         down_write(&oldmm->mmap_sem);
287         flush_cache_mm(current->mm);
288         mm->locked_vm = 0;
289         mm->mmap = NULL;
290         mm->mmap_cache = NULL;
291         mm->free_area_cache = oldmm->mmap_base;
292         mm->map_count = 0;
293         mm->rss = 0;
294         cpus_clear(mm->cpu_vm_mask);
295         mm->mm_rb = RB_ROOT;
296         rb_link = &mm->mm_rb.rb_node;
297         rb_parent = NULL;
298         pprev = &mm->mmap;
299
300         /*
301          * Add it to the mmlist after the parent.
302          * Doing it this way means that we can order the list,
303          * and fork() won't mess up the ordering significantly.
304          * Add it first so that swapoff can see any swap entries.
305          */
306         spin_lock(&mmlist_lock);
307         list_add(&mm->mmlist, &current->mm->mmlist);
308         mmlist_nr++;
309         spin_unlock(&mmlist_lock);
310
311         for (mpnt = current->mm->mmap ; mpnt ; mpnt = mpnt->vm_next) {
312                 struct file *file;
313
314                 if(mpnt->vm_flags & VM_DONTCOPY)
315                         continue;
316                 charge = 0;
317                 if (mpnt->vm_flags & VM_ACCOUNT) {
318                         unsigned int len = (mpnt->vm_end - mpnt->vm_start) >> PAGE_SHIFT;
319                         if (security_vm_enough_memory(len))
320                                 goto fail_nomem;
321                         charge = len;
322                 }
323                 tmp = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
324                 if (!tmp)
325                         goto fail_nomem;
326                 *tmp = *mpnt;
327                 pol = mpol_copy(vma_policy(mpnt));
328                 retval = PTR_ERR(pol);
329                 if (IS_ERR(pol))
330                         goto fail_nomem_policy;
331                 vma_set_policy(tmp, pol);
332                 tmp->vm_flags &= ~VM_LOCKED;
333                 tmp->vm_mm = mm;
334                 tmp->vm_next = NULL;
335                 anon_vma_link(tmp);
336                 vma_prio_tree_init(tmp);
337                 file = tmp->vm_file;
338                 if (file) {
339                         struct inode *inode = file->f_dentry->d_inode;
340                         get_file(file);
341                         if (tmp->vm_flags & VM_DENYWRITE)
342                                 atomic_dec(&inode->i_writecount);
343       
344                         /* insert tmp into the share list, just after mpnt */
345                         spin_lock(&file->f_mapping->i_mmap_lock);
346                         flush_dcache_mmap_lock(file->f_mapping);
347                         vma_prio_tree_add(tmp, mpnt);
348                         flush_dcache_mmap_unlock(file->f_mapping);
349                         spin_unlock(&file->f_mapping->i_mmap_lock);
350                 }
351
352                 /*
353                  * Link in the new vma and copy the page table entries:
354                  * link in first so that swapoff can see swap entries,
355                  * and try_to_unmap_one's find_vma find the new vma.
356                  */
357                 spin_lock(&mm->page_table_lock);
358                 *pprev = tmp;
359                 pprev = &tmp->vm_next;
360
361                 __vma_link_rb(mm, tmp, rb_link, rb_parent);
362                 rb_link = &tmp->vm_rb.rb_right;
363                 rb_parent = &tmp->vm_rb;
364
365                 mm->map_count++;
366                 retval = copy_page_range(mm, current->mm, tmp);
367                 spin_unlock(&mm->page_table_lock);
368
369                 if (tmp->vm_ops && tmp->vm_ops->open)
370                         tmp->vm_ops->open(tmp);
371
372                 if (retval)
373                         goto out;
374         }
375         retval = 0;
376
377 out:
378         flush_tlb_mm(current->mm);
379         up_write(&oldmm->mmap_sem);
380         return retval;
381 fail_nomem_policy:
382         kmem_cache_free(vm_area_cachep, tmp);
383 fail_nomem:
384         retval = -ENOMEM;
385         vm_unacct_memory(charge);
386         goto out;
387 }
388
389 static inline int mm_alloc_pgd(struct mm_struct * mm)
390 {
391         mm->pgd = pgd_alloc(mm);
392         if (unlikely(!mm->pgd))
393                 return -ENOMEM;
394         return 0;
395 }
396
397 static inline void mm_free_pgd(struct mm_struct * mm)
398 {
399         pgd_free(mm->pgd);
400 }
401 #else
402 #define dup_mmap(mm, oldmm)     (0)
403 #define mm_alloc_pgd(mm)        (0)
404 #define mm_free_pgd(mm)
405 #endif /* CONFIG_MMU */
406
407 spinlock_t mmlist_lock __cacheline_aligned_in_smp = SPIN_LOCK_UNLOCKED;
408 int mmlist_nr;
409
410 #define allocate_mm()   (kmem_cache_alloc(mm_cachep, SLAB_KERNEL))
411 #define free_mm(mm)     (kmem_cache_free(mm_cachep, (mm)))
412
413 #include <linux/init_task.h>
414
415 static struct mm_struct * mm_init(struct mm_struct * mm)
416 {
417         atomic_set(&mm->mm_users, 1);
418         atomic_set(&mm->mm_count, 1);
419         init_rwsem(&mm->mmap_sem);
420         mm->core_waiters = 0;
421         mm->page_table_lock = SPIN_LOCK_UNLOCKED;
422         mm->ioctx_list_lock = RW_LOCK_UNLOCKED;
423         mm->ioctx_list = NULL;
424         mm->default_kioctx = (struct kioctx)INIT_KIOCTX(mm->default_kioctx, *mm);
425         mm->free_area_cache = TASK_UNMAPPED_BASE;
426
427         if (likely(!mm_alloc_pgd(mm))) {
428                 mm->def_flags = 0;
429                 set_vx_info(&mm->mm_vx_info, current->vx_info);
430                 return mm;
431         }
432         free_mm(mm);
433         return NULL;
434 }
435
436 /*
437  * Allocate and initialize an mm_struct.
438  */
439 struct mm_struct * mm_alloc(void)
440 {
441         struct mm_struct * mm;
442
443         mm = allocate_mm();
444         if (mm) {
445                 memset(mm, 0, sizeof(*mm));
446                 mm = mm_init(mm);
447         }
448         return mm;
449 }
450
451 /*
452  * Called when the last reference to the mm
453  * is dropped: either by a lazy thread or by
454  * mmput. Free the page directory and the mm.
455  */
456 void fastcall __mmdrop(struct mm_struct *mm)
457 {
458         BUG_ON(mm == &init_mm);
459         mm_free_pgd(mm);
460         destroy_context(mm);
461         clr_vx_info(&mm->mm_vx_info);
462         free_mm(mm);
463 }
464
465 /*
466  * Decrement the use count and release all resources for an mm.
467  */
468 void mmput(struct mm_struct *mm)
469 {
470         if (atomic_dec_and_lock(&mm->mm_users, &mmlist_lock)) {
471                 list_del(&mm->mmlist);
472                 mmlist_nr--;
473                 spin_unlock(&mmlist_lock);
474                 exit_aio(mm);
475                 exit_mmap(mm);
476                 put_swap_token(mm);
477                 mmdrop(mm);
478         }
479 }
480
481 /*
482  * Checks if the use count of an mm is non-zero and if so
483  * returns a reference to it after bumping up the use count.
484  * If the use count is zero, it means this mm is going away,
485  * so return NULL.
486  */
487 struct mm_struct *mmgrab(struct mm_struct *mm)
488 {
489         spin_lock(&mmlist_lock);
490         if (!atomic_read(&mm->mm_users))
491                 mm = NULL;
492         else
493                 atomic_inc(&mm->mm_users);
494         spin_unlock(&mmlist_lock);
495         return mm;
496 }
497
498 /* Please note the differences between mmput and mm_release.
499  * mmput is called whenever we stop holding onto a mm_struct,
500  * error success whatever.
501  *
502  * mm_release is called after a mm_struct has been removed
503  * from the current process.
504  *
505  * This difference is important for error handling, when we
506  * only half set up a mm_struct for a new process and need to restore
507  * the old one.  Because we mmput the new mm_struct before
508  * restoring the old one. . .
509  * Eric Biederman 10 January 1998
510  */
511 void mm_release(struct task_struct *tsk, struct mm_struct *mm)
512 {
513         struct completion *vfork_done = tsk->vfork_done;
514
515         /* Get rid of any cached register state */
516         deactivate_mm(tsk, mm);
517
518         /* notify parent sleeping on vfork() */
519         if (vfork_done) {
520                 tsk->vfork_done = NULL;
521                 complete(vfork_done);
522         }
523         if (tsk->clear_child_tid && atomic_read(&mm->mm_users) > 1) {
524                 u32 __user * tidptr = tsk->clear_child_tid;
525                 tsk->clear_child_tid = NULL;
526
527                 /*
528                  * We don't check the error code - if userspace has
529                  * not set up a proper pointer then tough luck.
530                  */
531                 put_user(0, tidptr);
532                 sys_futex(tidptr, FUTEX_WAKE, 1, NULL, NULL, 0);
533         }
534 }
535
536 static int copy_mm(unsigned long clone_flags, struct task_struct * tsk)
537 {
538         struct mm_struct * mm, *oldmm;
539         int retval;
540
541         tsk->min_flt = tsk->maj_flt = 0;
542         tsk->cmin_flt = tsk->cmaj_flt = 0;
543         tsk->nvcsw = tsk->nivcsw = tsk->cnvcsw = tsk->cnivcsw = 0;
544
545         tsk->mm = NULL;
546         tsk->active_mm = NULL;
547
548         /*
549          * Are we cloning a kernel thread?
550          *
551          * We need to steal a active VM for that..
552          */
553         oldmm = current->mm;
554         if (!oldmm)
555                 return 0;
556
557         if (clone_flags & CLONE_VM) {
558                 atomic_inc(&oldmm->mm_users);
559                 mm = oldmm;
560                 /*
561                  * There are cases where the PTL is held to ensure no
562                  * new threads start up in user mode using an mm, which
563                  * allows optimizing out ipis; the tlb_gather_mmu code
564                  * is an example.
565                  */
566                 spin_unlock_wait(&oldmm->page_table_lock);
567                 goto good_mm;
568         }
569
570         retval = -ENOMEM;
571         mm = allocate_mm();
572         if (!mm)
573                 goto fail_nomem;
574
575         /* Copy the current MM stuff.. */
576         memcpy(mm, oldmm, sizeof(*mm));
577         mm->mm_vx_info = NULL;
578         if (!mm_init(mm))
579                 goto fail_nomem;
580
581         if (init_new_context(tsk,mm))
582                 goto fail_nocontext;
583
584         retval = dup_mmap(mm, oldmm);
585         if (retval)
586                 goto free_pt;
587
588 good_mm:
589         tsk->mm = mm;
590         tsk->active_mm = mm;
591         return 0;
592
593 free_pt:
594         mmput(mm);
595 fail_nomem:
596         return retval;
597
598 fail_nocontext:
599         /*
600          * If init_new_context() failed, we cannot use mmput() to free the mm
601          * because it calls destroy_context()
602          */
603         mm_free_pgd(mm);
604         free_mm(mm);
605         return retval;
606 }
607
608 static inline struct fs_struct *__copy_fs_struct(struct fs_struct *old)
609 {
610         struct fs_struct *fs = kmem_cache_alloc(fs_cachep, GFP_KERNEL);
611         /* We don't need to lock fs - think why ;-) */
612         if (fs) {
613                 atomic_set(&fs->count, 1);
614                 fs->lock = RW_LOCK_UNLOCKED;
615                 fs->umask = old->umask;
616                 read_lock(&old->lock);
617                 fs->rootmnt = mntget(old->rootmnt);
618                 fs->root = dget(old->root);
619                 fs->pwdmnt = mntget(old->pwdmnt);
620                 fs->pwd = dget(old->pwd);
621                 if (old->altroot) {
622                         fs->altrootmnt = mntget(old->altrootmnt);
623                         fs->altroot = dget(old->altroot);
624                 } else {
625                         fs->altrootmnt = NULL;
626                         fs->altroot = NULL;
627                 }
628                 read_unlock(&old->lock);
629         }
630         return fs;
631 }
632
633 struct fs_struct *copy_fs_struct(struct fs_struct *old)
634 {
635         return __copy_fs_struct(old);
636 }
637
638 EXPORT_SYMBOL_GPL(copy_fs_struct);
639
640 static inline int copy_fs(unsigned long clone_flags, struct task_struct * tsk)
641 {
642         if (clone_flags & CLONE_FS) {
643                 atomic_inc(&current->fs->count);
644                 return 0;
645         }
646         tsk->fs = __copy_fs_struct(current->fs);
647         if (!tsk->fs)
648                 return -ENOMEM;
649         return 0;
650 }
651
652 static int count_open_files(struct files_struct *files, int size)
653 {
654         int i;
655
656         /* Find the last open fd */
657         for (i = size/(8*sizeof(long)); i > 0; ) {
658                 if (files->open_fds->fds_bits[--i])
659                         break;
660         }
661         i = (i+1) * 8 * sizeof(long);
662         return i;
663 }
664
665 static int copy_files(unsigned long clone_flags, struct task_struct * tsk)
666 {
667         struct files_struct *oldf, *newf;
668         struct file **old_fds, **new_fds;
669         int open_files, nfds, size, i, error = 0;
670
671         /*
672          * A background process may not have any files ...
673          */
674         oldf = current->files;
675         if (!oldf)
676                 goto out;
677
678         if (clone_flags & CLONE_FILES) {
679                 atomic_inc(&oldf->count);
680                 goto out;
681         }
682
683         /*
684          * Note: we may be using current for both targets (See exec.c)
685          * This works because we cache current->files (old) as oldf. Don't
686          * break this.
687          */
688         tsk->files = NULL;
689         error = -ENOMEM;
690         newf = kmem_cache_alloc(files_cachep, SLAB_KERNEL);
691         if (!newf) 
692                 goto out;
693
694         atomic_set(&newf->count, 1);
695
696         newf->file_lock     = SPIN_LOCK_UNLOCKED;
697         newf->next_fd       = 0;
698         newf->max_fds       = NR_OPEN_DEFAULT;
699         newf->max_fdset     = __FD_SETSIZE;
700         newf->close_on_exec = &newf->close_on_exec_init;
701         newf->open_fds      = &newf->open_fds_init;
702         newf->fd            = &newf->fd_array[0];
703
704         /* We don't yet have the oldf readlock, but even if the old
705            fdset gets grown now, we'll only copy up to "size" fds */
706         size = oldf->max_fdset;
707         if (size > __FD_SETSIZE) {
708                 newf->max_fdset = 0;
709                 spin_lock(&newf->file_lock);
710                 error = expand_fdset(newf, size-1);
711                 spin_unlock(&newf->file_lock);
712                 if (error)
713                         goto out_release;
714         }
715         spin_lock(&oldf->file_lock);
716
717         open_files = count_open_files(oldf, size);
718
719         /*
720          * Check whether we need to allocate a larger fd array.
721          * Note: we're not a clone task, so the open count won't
722          * change.
723          */
724         nfds = NR_OPEN_DEFAULT;
725         if (open_files > nfds) {
726                 spin_unlock(&oldf->file_lock);
727                 newf->max_fds = 0;
728                 spin_lock(&newf->file_lock);
729                 error = expand_fd_array(newf, open_files-1);
730                 spin_unlock(&newf->file_lock);
731                 if (error) 
732                         goto out_release;
733                 nfds = newf->max_fds;
734                 spin_lock(&oldf->file_lock);
735         }
736
737         old_fds = oldf->fd;
738         new_fds = newf->fd;
739
740         memcpy(newf->open_fds->fds_bits, oldf->open_fds->fds_bits, open_files/8);
741         memcpy(newf->close_on_exec->fds_bits, oldf->close_on_exec->fds_bits, open_files/8);
742
743         for (i = open_files; i != 0; i--) {
744                 struct file *f = *old_fds++;
745                 if (f)
746                         get_file(f);
747                 *new_fds++ = f;
748         }
749         spin_unlock(&oldf->file_lock);
750
751         /* compute the remainder to be cleared */
752         size = (newf->max_fds - open_files) * sizeof(struct file *);
753
754         /* This is long word aligned thus could use a optimized version */ 
755         memset(new_fds, 0, size); 
756
757         if (newf->max_fdset > open_files) {
758                 int left = (newf->max_fdset-open_files)/8;
759                 int start = open_files / (8 * sizeof(unsigned long));
760
761                 memset(&newf->open_fds->fds_bits[start], 0, left);
762                 memset(&newf->close_on_exec->fds_bits[start], 0, left);
763         }
764
765         tsk->files = newf;
766         error = 0;
767 out:
768         return error;
769
770 out_release:
771         free_fdset (newf->close_on_exec, newf->max_fdset);
772         free_fdset (newf->open_fds, newf->max_fdset);
773         kmem_cache_free(files_cachep, newf);
774         goto out;
775 }
776
777 /*
778  *      Helper to unshare the files of the current task.
779  *      We don't want to expose copy_files internals to
780  *      the exec layer of the kernel.
781  */
782
783 int unshare_files(void)
784 {
785         struct files_struct *files  = current->files;
786         int rc;
787
788         if(!files)
789                 BUG();
790
791         /* This can race but the race causes us to copy when we don't
792            need to and drop the copy */
793         if(atomic_read(&files->count) == 1)
794         {
795                 atomic_inc(&files->count);
796                 return 0;
797         }
798         rc = copy_files(0, current);
799         if(rc)
800                 current->files = files;
801         return rc;
802 }
803
804 EXPORT_SYMBOL(unshare_files);
805
806 static inline int copy_sighand(unsigned long clone_flags, struct task_struct * tsk)
807 {
808         struct sighand_struct *sig;
809
810         if (clone_flags & (CLONE_SIGHAND | CLONE_THREAD)) {
811                 atomic_inc(&current->sighand->count);
812                 return 0;
813         }
814         sig = kmem_cache_alloc(sighand_cachep, GFP_KERNEL);
815         tsk->sighand = sig;
816         if (!sig)
817                 return -ENOMEM;
818         spin_lock_init(&sig->siglock);
819         atomic_set(&sig->count, 1);
820         memcpy(sig->action, current->sighand->action, sizeof(sig->action));
821         return 0;
822 }
823
824 static inline int copy_signal(unsigned long clone_flags, struct task_struct * tsk)
825 {
826         struct signal_struct *sig;
827
828         if (clone_flags & CLONE_THREAD) {
829                 atomic_inc(&current->signal->count);
830                 return 0;
831         }
832         sig = kmem_cache_alloc(signal_cachep, GFP_KERNEL);
833         tsk->signal = sig;
834         if (!sig)
835                 return -ENOMEM;
836         atomic_set(&sig->count, 1);
837         sig->group_exit = 0;
838         sig->group_exit_code = 0;
839         sig->group_exit_task = NULL;
840         sig->group_stop_count = 0;
841         sig->curr_target = NULL;
842         init_sigpending(&sig->shared_pending);
843         INIT_LIST_HEAD(&sig->posix_timers);
844
845         sig->tty = current->signal->tty;
846         sig->pgrp = process_group(current);
847         sig->session = current->signal->session;
848         sig->leader = 0;        /* session leadership doesn't inherit */
849         sig->tty_old_pgrp = 0;
850
851         return 0;
852 }
853
854 static inline void copy_flags(unsigned long clone_flags, struct task_struct *p)
855 {
856         unsigned long new_flags = p->flags;
857
858         new_flags &= ~PF_SUPERPRIV;
859         new_flags |= PF_FORKNOEXEC;
860         if (!(clone_flags & CLONE_PTRACE))
861                 p->ptrace = 0;
862         p->flags = new_flags;
863 }
864
865 asmlinkage long sys_set_tid_address(int __user *tidptr)
866 {
867         current->clear_child_tid = tidptr;
868
869         return current->pid;
870 }
871
872 /*
873  * This creates a new process as a copy of the old one,
874  * but does not actually start it yet.
875  *
876  * It copies the registers, and all the appropriate
877  * parts of the process environment (as per the clone
878  * flags). The actual kick-off is left to the caller.
879  */
880 struct task_struct *copy_process(unsigned long clone_flags,
881                                  unsigned long stack_start,
882                                  struct pt_regs *regs,
883                                  unsigned long stack_size,
884                                  int __user *parent_tidptr,
885                                  int __user *child_tidptr)
886 {
887         int retval;
888         struct task_struct *p = NULL;
889         struct vx_info *vxi;
890
891         if ((clone_flags & (CLONE_NEWNS|CLONE_FS)) == (CLONE_NEWNS|CLONE_FS))
892                 return ERR_PTR(-EINVAL);
893
894         /*
895          * Thread groups must share signals as well, and detached threads
896          * can only be started up within the thread group.
897          */
898         if ((clone_flags & CLONE_THREAD) && !(clone_flags & CLONE_SIGHAND))
899                 return ERR_PTR(-EINVAL);
900
901         /*
902          * Shared signal handlers imply shared VM. By way of the above,
903          * thread groups also imply shared VM. Blocking this case allows
904          * for various simplifications in other code.
905          */
906         if ((clone_flags & CLONE_SIGHAND) && !(clone_flags & CLONE_VM))
907                 return ERR_PTR(-EINVAL);
908
909         retval = security_task_create(clone_flags);
910         if (retval)
911                 goto fork_out;
912
913         retval = -ENOMEM;
914         p = dup_task_struct(current);
915         if (!p)
916                 goto fork_out;
917         p->tux_info = NULL;
918
919         p->vx_info = NULL;
920         set_vx_info(&p->vx_info, current->vx_info);
921         p->nx_info = NULL;
922         set_nx_info(&p->nx_info, current->nx_info);
923
924         /* check vserver memory */
925         if (p->mm && !(clone_flags & CLONE_VM)) {
926                 if (vx_vmpages_avail(p->mm, p->mm->total_vm))
927                         vx_pages_add(p->mm->mm_vx_info, RLIMIT_AS, p->mm->total_vm);
928                 else
929                         goto bad_fork_free;
930         }
931         if (p->mm && vx_flags(VXF_FORK_RSS, 0)) {
932                 if (!vx_rsspages_avail(p->mm, p->mm->rss))
933                         goto bad_fork_cleanup_vm;
934         }
935
936         retval = -EAGAIN;
937         if (!vx_nproc_avail(1))
938                 goto bad_fork_cleanup_vm;
939
940         if (atomic_read(&p->user->processes) >=
941                         p->rlim[RLIMIT_NPROC].rlim_cur) {
942                 if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE) &&
943                                 p->user != &root_user)
944                         goto bad_fork_cleanup_vm;
945         }
946
947         atomic_inc(&p->user->__count);
948         atomic_inc(&p->user->processes);
949         get_group_info(p->group_info);
950
951         /*
952          * If multiple threads are within copy_process(), then this check
953          * triggers too late. This doesn't hurt, the check is only there
954          * to stop root fork bombs.
955          */
956         if (nr_threads >= max_threads)
957                 goto bad_fork_cleanup_count;
958
959         if (!try_module_get(p->thread_info->exec_domain->module))
960                 goto bad_fork_cleanup_count;
961
962         if (p->binfmt && !try_module_get(p->binfmt->module))
963                 goto bad_fork_cleanup_put_domain;
964
965         init_delays(p);
966         p->did_exec = 0;
967         copy_flags(clone_flags, p);
968         if (clone_flags & CLONE_IDLETASK)
969                 p->pid = 0;
970         else {
971                 p->pid = alloc_pidmap();
972                 if (p->pid == -1)
973                         goto bad_fork_cleanup;
974         }
975         retval = -EFAULT;
976         if (clone_flags & CLONE_PARENT_SETTID)
977                 if (put_user(p->pid, parent_tidptr))
978                         goto bad_fork_cleanup;
979
980         p->proc_dentry = NULL;
981
982         INIT_LIST_HEAD(&p->children);
983         INIT_LIST_HEAD(&p->sibling);
984         init_waitqueue_head(&p->wait_chldexit);
985         p->vfork_done = NULL;
986         spin_lock_init(&p->alloc_lock);
987         spin_lock_init(&p->proc_lock);
988
989         clear_tsk_thread_flag(p, TIF_SIGPENDING);
990         init_sigpending(&p->pending);
991
992         p->it_real_value = p->it_virt_value = p->it_prof_value = 0;
993         p->it_real_incr = p->it_virt_incr = p->it_prof_incr = 0;
994         init_timer(&p->real_timer);
995         p->real_timer.data = (unsigned long) p;
996
997         p->utime = p->stime = 0;
998         p->cutime = p->cstime = 0;
999         p->lock_depth = -1;             /* -1 = no lock */
1000         p->start_time = get_jiffies_64();
1001         p->security = NULL;
1002         p->io_context = NULL;
1003         p->audit_context = NULL;
1004 #ifdef CONFIG_NUMA
1005         p->mempolicy = mpol_copy(p->mempolicy);
1006         if (IS_ERR(p->mempolicy)) {
1007                 retval = PTR_ERR(p->mempolicy);
1008                 p->mempolicy = NULL;
1009                 goto bad_fork_cleanup;
1010         }
1011 #endif
1012
1013         if ((retval = security_task_alloc(p)))
1014                 goto bad_fork_cleanup_policy;
1015         if ((retval = audit_alloc(p)))
1016                 goto bad_fork_cleanup_security;
1017         /* copy all the process information */
1018         if ((retval = copy_semundo(clone_flags, p)))
1019                 goto bad_fork_cleanup_audit;
1020         if ((retval = copy_files(clone_flags, p)))
1021                 goto bad_fork_cleanup_semundo;
1022         if ((retval = copy_fs(clone_flags, p)))
1023                 goto bad_fork_cleanup_files;
1024         if ((retval = copy_sighand(clone_flags, p)))
1025                 goto bad_fork_cleanup_fs;
1026         if ((retval = copy_signal(clone_flags, p)))
1027                 goto bad_fork_cleanup_sighand;
1028         if ((retval = copy_mm(clone_flags, p)))
1029                 goto bad_fork_cleanup_signal;
1030         if ((retval = copy_namespace(clone_flags, p)))
1031                 goto bad_fork_cleanup_mm;
1032         retval = copy_thread(0, clone_flags, stack_start, stack_size, p, regs);
1033         if (retval)
1034                 goto bad_fork_cleanup_namespace;
1035
1036         p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? child_tidptr : NULL;
1037         /*
1038          * Clear TID on mm_release()?
1039          */
1040         p->clear_child_tid = (clone_flags & CLONE_CHILD_CLEARTID) ? child_tidptr: NULL;
1041
1042         /*
1043          * Syscall tracing should be turned off in the child regardless
1044          * of CLONE_PTRACE.
1045          */
1046         clear_tsk_thread_flag(p, TIF_SYSCALL_TRACE);
1047
1048         /* Our parent execution domain becomes current domain
1049            These must match for thread signalling to apply */
1050            
1051         p->parent_exec_id = p->self_exec_id;
1052
1053         /* ok, now we should be set up.. */
1054         p->exit_signal = (clone_flags & CLONE_THREAD) ? -1 : (clone_flags & CSIGNAL);
1055         p->pdeath_signal = 0;
1056
1057         /* Perform scheduler related setup */
1058         sched_fork(p);
1059
1060         /*
1061          * Ok, make it visible to the rest of the system.
1062          * We dont wake it up yet.
1063          */
1064         p->tgid = p->pid;
1065         p->group_leader = p;
1066         INIT_LIST_HEAD(&p->ptrace_children);
1067         INIT_LIST_HEAD(&p->ptrace_list);
1068
1069         /* Need tasklist lock for parent etc handling! */
1070         write_lock_irq(&tasklist_lock);
1071         /*
1072          * Check for pending SIGKILL! The new thread should not be allowed
1073          * to slip out of an OOM kill. (or normal SIGKILL.)
1074          */
1075         if (sigismember(&current->pending.signal, SIGKILL)) {
1076                 write_unlock_irq(&tasklist_lock);
1077                 retval = -EINTR;
1078                 goto bad_fork_cleanup_namespace;
1079         }
1080
1081         /* CLONE_PARENT re-uses the old parent */
1082         if (clone_flags & CLONE_PARENT)
1083                 p->real_parent = current->real_parent;
1084         else
1085                 p->real_parent = current;
1086         p->parent = p->real_parent;
1087
1088         if (clone_flags & CLONE_THREAD) {
1089                 spin_lock(&current->sighand->siglock);
1090                 /*
1091                  * Important: if an exit-all has been started then
1092                  * do not create this new thread - the whole thread
1093                  * group is supposed to exit anyway.
1094                  */
1095                 if (current->signal->group_exit) {
1096                         spin_unlock(&current->sighand->siglock);
1097                         write_unlock_irq(&tasklist_lock);
1098                         retval = -EAGAIN;
1099                         goto bad_fork_cleanup_namespace;
1100                 }
1101                 p->tgid = current->tgid;
1102                 p->group_leader = current->group_leader;
1103
1104                 if (current->signal->group_stop_count > 0) {
1105                         /*
1106                          * There is an all-stop in progress for the group.
1107                          * We ourselves will stop as soon as we check signals.
1108                          * Make the new thread part of that group stop too.
1109                          */
1110                         current->signal->group_stop_count++;
1111                         set_tsk_thread_flag(p, TIF_SIGPENDING);
1112                 }
1113
1114                 spin_unlock(&current->sighand->siglock);
1115         }
1116
1117         SET_LINKS(p);
1118         if (p->ptrace & PT_PTRACED)
1119                 __ptrace_link(p, current->parent);
1120
1121         attach_pid(p, PIDTYPE_PID, p->pid);
1122         if (thread_group_leader(p)) {
1123                 attach_pid(p, PIDTYPE_TGID, p->tgid);
1124                 attach_pid(p, PIDTYPE_PGID, process_group(p));
1125                 attach_pid(p, PIDTYPE_SID, p->signal->session);
1126                 if (p->pid)
1127                         __get_cpu_var(process_counts)++;
1128         } else
1129                 link_pid(p, p->pids + PIDTYPE_TGID, &p->group_leader->pids[PIDTYPE_TGID].pid);
1130
1131         nr_threads++;
1132         /* p is copy of current */
1133         vxi = p->vx_info;
1134         if (vxi) {
1135                 atomic_inc(&vxi->cacct.nr_threads);
1136                 atomic_inc(&vxi->limit.rcur[RLIMIT_NPROC]);
1137         }
1138         write_unlock_irq(&tasklist_lock);
1139         retval = 0;
1140
1141 fork_out:
1142         if (retval)
1143                 return ERR_PTR(retval);
1144         return p;
1145
1146 bad_fork_cleanup_namespace:
1147         exit_namespace(p);
1148 bad_fork_cleanup_mm:
1149         exit_mm(p);
1150         if (p->active_mm)
1151                 mmdrop(p->active_mm);
1152 bad_fork_cleanup_signal:
1153         exit_signal(p);
1154 bad_fork_cleanup_sighand:
1155         exit_sighand(p);
1156 bad_fork_cleanup_fs:
1157         exit_fs(p); /* blocking */
1158 bad_fork_cleanup_files:
1159         exit_files(p); /* blocking */
1160 bad_fork_cleanup_semundo:
1161         exit_sem(p);
1162 bad_fork_cleanup_audit:
1163         audit_free(p);
1164 bad_fork_cleanup_security:
1165         security_task_free(p);
1166 bad_fork_cleanup_policy:
1167 #ifdef CONFIG_NUMA
1168         mpol_free(p->mempolicy);
1169 #endif
1170 bad_fork_cleanup:
1171         if (p->pid > 0)
1172                 free_pidmap(p->pid);
1173         if (p->binfmt)
1174                 module_put(p->binfmt->module);
1175 bad_fork_cleanup_put_domain:
1176         module_put(p->thread_info->exec_domain->module);
1177 bad_fork_cleanup_count:
1178         put_group_info(p->group_info);
1179         atomic_dec(&p->user->processes);
1180         free_uid(p->user);
1181 bad_fork_cleanup_vm:
1182         if (p->mm && !(clone_flags & CLONE_VM))
1183                 vx_pages_sub(p->mm->mm_vx_info, RLIMIT_AS, p->mm->total_vm);
1184 bad_fork_free:
1185         free_task(p);
1186         goto fork_out;
1187 }
1188
1189 static inline int fork_traceflag (unsigned clone_flags)
1190 {
1191         if (clone_flags & (CLONE_UNTRACED | CLONE_IDLETASK))
1192                 return 0;
1193         else if (clone_flags & CLONE_VFORK) {
1194                 if (current->ptrace & PT_TRACE_VFORK)
1195                         return PTRACE_EVENT_VFORK;
1196         } else if ((clone_flags & CSIGNAL) != SIGCHLD) {
1197                 if (current->ptrace & PT_TRACE_CLONE)
1198                         return PTRACE_EVENT_CLONE;
1199         } else if (current->ptrace & PT_TRACE_FORK)
1200                 return PTRACE_EVENT_FORK;
1201
1202         return 0;
1203 }
1204
1205 /*
1206  *  Ok, this is the main fork-routine.
1207  *
1208  * It copies the process, and if successful kick-starts
1209  * it and waits for it to finish using the VM if required.
1210  */
1211 long do_fork(unsigned long clone_flags,
1212               unsigned long stack_start,
1213               struct pt_regs *regs,
1214               unsigned long stack_size,
1215               int __user *parent_tidptr,
1216               int __user *child_tidptr)
1217 {
1218         struct task_struct *p;
1219         int trace = 0;
1220         long pid;
1221
1222         if (unlikely(current->ptrace)) {
1223                 trace = fork_traceflag (clone_flags);
1224                 if (trace)
1225                         clone_flags |= CLONE_PTRACE;
1226         }
1227
1228 #ifdef CONFIG_CKRM_TYPE_TASKCLASS
1229         if (numtasks_get_ref(current->taskclass, 0) == 0) {
1230                 return -ENOMEM;
1231         }
1232 #endif
1233
1234         p = copy_process(clone_flags, stack_start, regs, stack_size, parent_tidptr, child_tidptr);
1235         /*
1236          * Do this prior waking up the new thread - the thread pointer
1237          * might get invalid after that point, if the thread exits quickly.
1238          */
1239         pid = IS_ERR(p) ? PTR_ERR(p) : p->pid;
1240
1241         if (!IS_ERR(p)) {
1242                 struct completion vfork;
1243
1244                 ckrm_cb_fork(p);
1245
1246                 if (clone_flags & CLONE_VFORK) {
1247                         p->vfork_done = &vfork;
1248                         init_completion(&vfork);
1249                 }
1250
1251                 if ((p->ptrace & PT_PTRACED) || (clone_flags & CLONE_STOPPED)) {
1252                         /*
1253                          * We'll start up with an immediate SIGSTOP.
1254                          */
1255                         sigaddset(&p->pending.signal, SIGSTOP);
1256                         set_tsk_thread_flag(p, TIF_SIGPENDING);
1257                 }
1258
1259                 if (!(clone_flags & CLONE_STOPPED)) {
1260                         /*
1261                          * Do the wakeup last. On SMP we treat fork() and
1262                          * CLONE_VM separately, because fork() has already
1263                          * created cache footprint on this CPU (due to
1264                          * copying the pagetables), hence migration would
1265                          * probably be costy. Threads on the other hand
1266                          * have less traction to the current CPU, and if
1267                          * there's an imbalance then the scheduler can
1268                          * migrate this fresh thread now, before it
1269                          * accumulates a larger cache footprint:
1270                          */
1271                         if (clone_flags & CLONE_VM)
1272                                 wake_up_forked_thread(p);
1273                         else
1274                                 wake_up_forked_process(p);
1275                 } else {
1276                         int cpu = get_cpu();
1277
1278                         p->state = TASK_STOPPED;
1279                         if (cpu_is_offline(task_cpu(p)))
1280                                 set_task_cpu(p, cpu);
1281
1282                         put_cpu();
1283                 }
1284                 ++total_forks;
1285
1286                 if (unlikely (trace)) {
1287                         current->ptrace_message = pid;
1288                         ptrace_notify ((trace << 8) | SIGTRAP);
1289                 }
1290
1291                 if (clone_flags & CLONE_VFORK) {
1292                         wait_for_completion(&vfork);
1293                         if (unlikely (current->ptrace & PT_TRACE_VFORK_DONE))
1294                                 ptrace_notify ((PTRACE_EVENT_VFORK_DONE << 8) | SIGTRAP);
1295                 } else
1296                         /*
1297                          * Let the child process run first, to avoid most of the
1298                          * COW overhead when the child exec()s afterwards.
1299                          */
1300                         set_need_resched();
1301         } else {
1302 #ifdef CONFIG_CKRM_TYPE_TASKCLASS
1303                 numtasks_put_ref(current->taskclass);
1304 #endif
1305         }
1306         return pid;
1307 }
1308
1309 /* SLAB cache for signal_struct structures (tsk->signal) */
1310 kmem_cache_t *signal_cachep;
1311
1312 /* SLAB cache for sighand_struct structures (tsk->sighand) */
1313 kmem_cache_t *sighand_cachep;
1314
1315 /* SLAB cache for files_struct structures (tsk->files) */
1316 kmem_cache_t *files_cachep;
1317
1318 /* SLAB cache for fs_struct structures (tsk->fs) */
1319 kmem_cache_t *fs_cachep;
1320
1321 /* SLAB cache for vm_area_struct structures */
1322 kmem_cache_t *vm_area_cachep;
1323
1324 /* SLAB cache for mm_struct structures (tsk->mm) */
1325 kmem_cache_t *mm_cachep;
1326
1327 void __init proc_caches_init(void)
1328 {
1329         sighand_cachep = kmem_cache_create("sighand_cache",
1330                         sizeof(struct sighand_struct), 0,
1331                         SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL, NULL);
1332         signal_cachep = kmem_cache_create("signal_cache",
1333                         sizeof(struct signal_struct), 0,
1334                         SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL, NULL);
1335         files_cachep = kmem_cache_create("files_cache", 
1336                         sizeof(struct files_struct), 0,
1337                         SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL, NULL);
1338         fs_cachep = kmem_cache_create("fs_cache", 
1339                         sizeof(struct fs_struct), 0,
1340                         SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL, NULL);
1341         vm_area_cachep = kmem_cache_create("vm_area_struct",
1342                         sizeof(struct vm_area_struct), 0,
1343                         SLAB_PANIC, NULL, NULL);
1344         mm_cachep = kmem_cache_create("mm_struct",
1345                         sizeof(struct mm_struct), 0,
1346                         SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL, NULL);
1347 }