12a9b2114ee5104f223fa5b41b9b6e4389d9b22d
[linux-2.6.git] / kernel / fork.c
1 /*
2  *  linux/kernel/fork.c
3  *
4  *  Copyright (C) 1991, 1992  Linus Torvalds
5  */
6
7 /*
8  *  'fork.c' contains the help-routines for the 'fork' system call
9  * (see also entry.S and others).
10  * Fork is rather simple, once you get the hang of it, but the memory
11  * management can be a bitch. See 'mm/memory.c': 'copy_page_range()'
12  */
13
14 #include <linux/slab.h>
15 #include <linux/init.h>
16 #include <linux/unistd.h>
17 #include <linux/smp_lock.h>
18 #include <linux/module.h>
19 #include <linux/vmalloc.h>
20 #include <linux/completion.h>
21 #include <linux/namespace.h>
22 #include <linux/personality.h>
23 #include <linux/mempolicy.h>
24 #include <linux/sem.h>
25 #include <linux/file.h>
26 #include <linux/key.h>
27 #include <linux/binfmts.h>
28 #include <linux/mman.h>
29 #include <linux/fs.h>
30 #include <linux/capability.h>
31 #include <linux/cpu.h>
32 #include <linux/cpuset.h>
33 #include <linux/security.h>
34 #include <linux/swap.h>
35 #include <linux/syscalls.h>
36 #include <linux/jiffies.h>
37 #include <linux/futex.h>
38 #include <linux/rcupdate.h>
39 #include <linux/tracehook.h>
40 #include <linux/mount.h>
41 #include <linux/audit.h>
42 #include <linux/profile.h>
43 #include <linux/rmap.h>
44 #include <linux/acct.h>
45 #include <linux/cn_proc.h>
46 #include <linux/delayacct.h>
47 #include <linux/taskstats_kern.h>
48 #include <linux/vs_base.h>
49 #include <linux/vs_context.h>
50 #include <linux/vs_network.h>
51 #include <linux/vs_limit.h>
52 #include <linux/vs_memory.h>
53
54 #include <asm/pgtable.h>
55 #include <asm/pgalloc.h>
56 #include <asm/uaccess.h>
57 #include <asm/mmu_context.h>
58 #include <asm/cacheflush.h>
59 #include <asm/tlbflush.h>
60
61 /*
62  * Protected counters by write_lock_irq(&tasklist_lock)
63  */
64 unsigned long total_forks;      /* Handle normal Linux uptimes. */
65 int nr_threads;                 /* The idle threads do not count.. */
66
67 int max_threads;                /* tunable limit on nr_threads */
68
69 DEFINE_PER_CPU(unsigned long, process_counts) = 0;
70
71 __cacheline_aligned DEFINE_RWLOCK(tasklist_lock);  /* outer */
72
73 int nr_processes(void)
74 {
75         int cpu;
76         int total = 0;
77
78         for_each_online_cpu(cpu)
79                 total += per_cpu(process_counts, cpu);
80
81         return total;
82 }
83
84 #ifndef __HAVE_ARCH_TASK_STRUCT_ALLOCATOR
85 # define alloc_task_struct()    kmem_cache_alloc(task_struct_cachep, GFP_KERNEL)
86 # define free_task_struct(tsk)  kmem_cache_free(task_struct_cachep, (tsk))
87 static kmem_cache_t *task_struct_cachep;
88 #endif
89
90 /* SLAB cache for signal_struct structures (tsk->signal) */
91 static kmem_cache_t *signal_cachep;
92
93 /* SLAB cache for sighand_struct structures (tsk->sighand) */
94 kmem_cache_t *sighand_cachep;
95
96 /* SLAB cache for files_struct structures (tsk->files) */
97 kmem_cache_t *files_cachep;
98
99 /* SLAB cache for fs_struct structures (tsk->fs) */
100 kmem_cache_t *fs_cachep;
101
102 /* SLAB cache for vm_area_struct structures */
103 kmem_cache_t *vm_area_cachep;
104
105 /* SLAB cache for mm_struct structures (tsk->mm) */
106 static kmem_cache_t *mm_cachep;
107
108 void free_task(struct task_struct *tsk)
109 {
110         free_thread_info(tsk->thread_info);
111         rt_mutex_debug_task_free(tsk);
112         clr_vx_info(&tsk->vx_info);
113         clr_nx_info(&tsk->nx_info);
114         free_task_struct(tsk);
115 }
116 EXPORT_SYMBOL(free_task);
117
118 void __put_task_struct(struct task_struct *tsk)
119 {
120         WARN_ON(!(tsk->exit_state & (EXIT_DEAD | EXIT_ZOMBIE)));
121         WARN_ON(atomic_read(&tsk->usage));
122         WARN_ON(tsk == current);
123
124         security_task_free(tsk);
125         free_uid(tsk->user);
126         put_group_info(tsk->group_info);
127         delayacct_tsk_free(tsk);
128
129         if (!profile_handoff_task(tsk))
130                 free_task(tsk);
131 }
132
133 void __init fork_init(unsigned long mempages)
134 {
135 #ifndef __HAVE_ARCH_TASK_STRUCT_ALLOCATOR
136 #ifndef ARCH_MIN_TASKALIGN
137 #define ARCH_MIN_TASKALIGN      L1_CACHE_BYTES
138 #endif
139         /* create a slab on which task_structs can be allocated */
140         task_struct_cachep =
141                 kmem_cache_create("task_struct", sizeof(struct task_struct),
142                         ARCH_MIN_TASKALIGN, SLAB_PANIC, NULL, NULL);
143 #endif
144
145         /*
146          * The default maximum number of threads is set to a safe
147          * value: the thread structures can take up at most half
148          * of memory.
149          */
150         max_threads = mempages / (8 * THREAD_SIZE / PAGE_SIZE);
151
152         /*
153          * we need to allow at least 20 threads to boot a system
154          */
155         if(max_threads < 20)
156                 max_threads = 20;
157
158         init_task.signal->rlim[RLIMIT_NPROC].rlim_cur = max_threads/2;
159         init_task.signal->rlim[RLIMIT_NPROC].rlim_max = max_threads/2;
160         init_task.signal->rlim[RLIMIT_SIGPENDING] =
161                 init_task.signal->rlim[RLIMIT_NPROC];
162 }
163
164 static struct task_struct *dup_task_struct(struct task_struct *orig)
165 {
166         struct task_struct *tsk;
167         struct thread_info *ti;
168
169         prepare_to_copy(orig);
170
171         tsk = alloc_task_struct();
172         if (!tsk)
173                 return NULL;
174
175         ti = alloc_thread_info(tsk);
176         if (!ti) {
177                 free_task_struct(tsk);
178                 return NULL;
179         }
180
181         *tsk = *orig;
182         tsk->thread_info = ti;
183         setup_thread_stack(tsk, orig);
184
185         /* One for us, one for whoever does the "release_task()" (usually parent) */
186         atomic_set(&tsk->usage,2);
187         atomic_set(&tsk->fs_excl, 0);
188         tsk->btrace_seq = 0;
189         tsk->splice_pipe = NULL;
190         return tsk;
191 }
192
193 #ifdef CONFIG_MMU
194 static inline int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
195 {
196         struct vm_area_struct *mpnt, *tmp, **pprev;
197         struct rb_node **rb_link, *rb_parent;
198         int retval;
199         unsigned long charge;
200         struct mempolicy *pol;
201
202         down_write(&oldmm->mmap_sem);
203         flush_cache_mm(oldmm);
204         /*
205          * Not linked in yet - no deadlock potential:
206          */
207         down_write_nested(&mm->mmap_sem, SINGLE_DEPTH_NESTING);
208
209         mm->locked_vm = 0;
210         mm->mmap = NULL;
211         mm->mmap_cache = NULL;
212         mm->free_area_cache = oldmm->mmap_base;
213         mm->cached_hole_size = ~0UL;
214         mm->map_count = 0;
215         __set_mm_counter(mm, file_rss, 0);
216         __set_mm_counter(mm, anon_rss, 0);
217         cpus_clear(mm->cpu_vm_mask);
218         mm->mm_rb = RB_ROOT;
219         rb_link = &mm->mm_rb.rb_node;
220         rb_parent = NULL;
221         pprev = &mm->mmap;
222
223         for (mpnt = oldmm->mmap; mpnt; mpnt = mpnt->vm_next) {
224                 struct file *file;
225
226                 if (mpnt->vm_flags & VM_DONTCOPY) {
227                         long pages = vma_pages(mpnt);
228                         vx_vmpages_sub(mm, pages);
229                         vm_stat_account(mm, mpnt->vm_flags, mpnt->vm_file,
230                                                                 -pages);
231                         continue;
232                 }
233                 charge = 0;
234                 if (mpnt->vm_flags & VM_ACCOUNT) {
235                         unsigned int len = (mpnt->vm_end - mpnt->vm_start) >> PAGE_SHIFT;
236                         if (security_vm_enough_memory(len))
237                                 goto fail_nomem;
238                         charge = len;
239                 }
240                 tmp = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
241                 if (!tmp)
242                         goto fail_nomem;
243                 *tmp = *mpnt;
244                 pol = mpol_copy(vma_policy(mpnt));
245                 retval = PTR_ERR(pol);
246                 if (IS_ERR(pol))
247                         goto fail_nomem_policy;
248                 vma_set_policy(tmp, pol);
249                 tmp->vm_flags &= ~VM_LOCKED;
250                 tmp->vm_mm = mm;
251                 tmp->vm_next = NULL;
252                 anon_vma_link(tmp);
253                 file = tmp->vm_file;
254                 if (file) {
255                         struct inode *inode = file->f_dentry->d_inode;
256                         get_file(file);
257                         if (tmp->vm_flags & VM_DENYWRITE)
258                                 atomic_dec(&inode->i_writecount);
259       
260                         /* insert tmp into the share list, just after mpnt */
261                         spin_lock(&file->f_mapping->i_mmap_lock);
262                         tmp->vm_truncate_count = mpnt->vm_truncate_count;
263                         flush_dcache_mmap_lock(file->f_mapping);
264                         vma_prio_tree_add(tmp, mpnt);
265                         flush_dcache_mmap_unlock(file->f_mapping);
266                         spin_unlock(&file->f_mapping->i_mmap_lock);
267                 }
268
269                 /*
270                  * Link in the new vma and copy the page table entries.
271                  */
272                 *pprev = tmp;
273                 pprev = &tmp->vm_next;
274
275                 __vma_link_rb(mm, tmp, rb_link, rb_parent);
276                 rb_link = &tmp->vm_rb.rb_right;
277                 rb_parent = &tmp->vm_rb;
278
279                 mm->map_count++;
280                 retval = copy_page_range(mm, oldmm, mpnt);
281
282                 if (tmp->vm_ops && tmp->vm_ops->open)
283                         tmp->vm_ops->open(tmp);
284
285                 if (retval)
286                         goto out;
287         }
288 #ifdef arch_dup_mmap
289         arch_dup_mmap(mm, oldmm);
290 #endif
291         retval = 0;
292 out:
293         up_write(&mm->mmap_sem);
294         flush_tlb_mm(oldmm);
295         up_write(&oldmm->mmap_sem);
296         return retval;
297 fail_nomem_policy:
298         kmem_cache_free(vm_area_cachep, tmp);
299 fail_nomem:
300         retval = -ENOMEM;
301         vm_unacct_memory(charge);
302         goto out;
303 }
304
305 static inline int mm_alloc_pgd(struct mm_struct * mm)
306 {
307         mm->pgd = pgd_alloc(mm);
308         if (unlikely(!mm->pgd))
309                 return -ENOMEM;
310         return 0;
311 }
312
313 static inline void mm_free_pgd(struct mm_struct * mm)
314 {
315         pgd_free(mm->pgd);
316 }
317 #else
318 #define dup_mmap(mm, oldmm)     (0)
319 #define mm_alloc_pgd(mm)        (0)
320 #define mm_free_pgd(mm)
321 #endif /* CONFIG_MMU */
322
323  __cacheline_aligned_in_smp DEFINE_SPINLOCK(mmlist_lock);
324
325 #define allocate_mm()   (kmem_cache_alloc(mm_cachep, SLAB_KERNEL))
326 #define free_mm(mm)     (kmem_cache_free(mm_cachep, (mm)))
327
328 #include <linux/init_task.h>
329
330 static struct mm_struct * mm_init(struct mm_struct * mm)
331 {
332         atomic_set(&mm->mm_users, 1);
333         atomic_set(&mm->mm_count, 1);
334         init_rwsem(&mm->mmap_sem);
335         INIT_LIST_HEAD(&mm->mmlist);
336         mm->core_waiters = 0;
337         mm->nr_ptes = 0;
338         spin_lock_init(&mm->page_table_lock);
339         rwlock_init(&mm->ioctx_list_lock);
340         mm->ioctx_list = NULL;
341         mm->free_area_cache = TASK_UNMAPPED_BASE;
342         mm->cached_hole_size = ~0UL;
343
344         if (likely(!mm_alloc_pgd(mm))) {
345                 mm->def_flags = 0;
346                 set_vx_info(&mm->mm_vx_info, current->vx_info);
347                 return mm;
348         }
349         free_mm(mm);
350         return NULL;
351 }
352
353 /*
354  * Allocate and initialize an mm_struct.
355  */
356 struct mm_struct * mm_alloc(void)
357 {
358         struct mm_struct * mm;
359
360         mm = allocate_mm();
361         if (mm) {
362                 memset(mm, 0, sizeof(*mm));
363                 mm = mm_init(mm);
364         }
365         return mm;
366 }
367
368 /*
369  * Called when the last reference to the mm
370  * is dropped: either by a lazy thread or by
371  * mmput. Free the page directory and the mm.
372  */
373 void fastcall __mmdrop(struct mm_struct *mm)
374 {
375         BUG_ON(mm == &init_mm);
376         mm_free_pgd(mm);
377         destroy_context(mm);
378         clr_vx_info(&mm->mm_vx_info);
379         free_mm(mm);
380 }
381
382 /*
383  * Decrement the use count and release all resources for an mm.
384  */
385 void mmput(struct mm_struct *mm)
386 {
387         might_sleep();
388
389         if (atomic_dec_and_test(&mm->mm_users)) {
390                 exit_aio(mm);
391                 exit_mmap(mm);
392                 if (!list_empty(&mm->mmlist)) {
393                         spin_lock(&mmlist_lock);
394                         list_del(&mm->mmlist);
395                         spin_unlock(&mmlist_lock);
396                 }
397                 put_swap_token(mm);
398                 mmdrop(mm);
399         }
400 }
401 EXPORT_SYMBOL_GPL(mmput);
402
403 /**
404  * get_task_mm - acquire a reference to the task's mm
405  *
406  * Returns %NULL if the task has no mm.  Checks PF_BORROWED_MM (meaning
407  * this kernel workthread has transiently adopted a user mm with use_mm,
408  * to do its AIO) is not set and if so returns a reference to it, after
409  * bumping up the use count.  User must release the mm via mmput()
410  * after use.  Typically used by /proc and ptrace.
411  */
412 struct mm_struct *get_task_mm(struct task_struct *task)
413 {
414         struct mm_struct *mm;
415
416         task_lock(task);
417         mm = task->mm;
418         if (mm) {
419                 if (task->flags & PF_BORROWED_MM)
420                         mm = NULL;
421                 else
422                         atomic_inc(&mm->mm_users);
423         }
424         task_unlock(task);
425         return mm;
426 }
427 EXPORT_SYMBOL_GPL(get_task_mm);
428
429 /* Please note the differences between mmput and mm_release.
430  * mmput is called whenever we stop holding onto a mm_struct,
431  * error success whatever.
432  *
433  * mm_release is called after a mm_struct has been removed
434  * from the current process.
435  *
436  * This difference is important for error handling, when we
437  * only half set up a mm_struct for a new process and need to restore
438  * the old one.  Because we mmput the new mm_struct before
439  * restoring the old one. . .
440  * Eric Biederman 10 January 1998
441  */
442 void mm_release(struct task_struct *tsk, struct mm_struct *mm)
443 {
444         struct completion *vfork_done = tsk->vfork_done;
445
446         /* Get rid of any cached register state */
447         deactivate_mm(tsk, mm);
448
449         /* notify parent sleeping on vfork() */
450         if (vfork_done) {
451                 tsk->vfork_done = NULL;
452                 complete(vfork_done);
453         }
454         if (tsk->clear_child_tid && atomic_read(&mm->mm_users) > 1) {
455                 u32 __user * tidptr = tsk->clear_child_tid;
456                 tsk->clear_child_tid = NULL;
457
458                 /*
459                  * We don't check the error code - if userspace has
460                  * not set up a proper pointer then tough luck.
461                  */
462                 put_user(0, tidptr);
463                 sys_futex(tidptr, FUTEX_WAKE, 1, NULL, NULL, 0);
464         }
465 }
466
467 /*
468  * Allocate a new mm structure and copy contents from the
469  * mm structure of the passed in task structure.
470  */
471 static struct mm_struct *dup_mm(struct task_struct *tsk)
472 {
473         struct mm_struct *mm, *oldmm = current->mm;
474         int err;
475
476         if (!oldmm)
477                 return NULL;
478
479         mm = allocate_mm();
480         if (!mm)
481                 goto fail_nomem;
482
483         memcpy(mm, oldmm, sizeof(*mm));
484         mm->mm_vx_info = NULL;
485
486         if (!mm_init(mm))
487                 goto fail_nomem;
488
489         if (init_new_context(tsk, mm))
490                 goto fail_nocontext;
491
492         err = dup_mmap(mm, oldmm);
493         if (err)
494                 goto free_pt;
495
496         mm->hiwater_rss = get_mm_rss(mm);
497         mm->hiwater_vm = mm->total_vm;
498
499         return mm;
500
501 free_pt:
502         mmput(mm);
503
504 fail_nomem:
505         return NULL;
506
507 fail_nocontext:
508         /*
509          * If init_new_context() failed, we cannot use mmput() to free the mm
510          * because it calls destroy_context()
511          */
512         clr_vx_info(&mm->mm_vx_info);
513         mm_free_pgd(mm);
514         free_mm(mm);
515         return NULL;
516 }
517
518 static int copy_mm(unsigned long clone_flags, struct task_struct * tsk)
519 {
520         struct mm_struct * mm, *oldmm;
521         int retval;
522
523         tsk->min_flt = tsk->maj_flt = 0;
524         tsk->nvcsw = tsk->nivcsw = 0;
525
526         tsk->mm = NULL;
527         tsk->active_mm = NULL;
528
529         /*
530          * Are we cloning a kernel thread?
531          *
532          * We need to steal a active VM for that..
533          */
534         oldmm = current->mm;
535         if (!oldmm)
536                 return 0;
537
538         if (clone_flags & CLONE_VM) {
539                 atomic_inc(&oldmm->mm_users);
540                 mm = oldmm;
541                 goto good_mm;
542         }
543
544         retval = -ENOMEM;
545         mm = dup_mm(tsk);
546         if (!mm)
547                 goto fail_nomem;
548
549 good_mm:
550         tsk->mm = mm;
551         tsk->active_mm = mm;
552         return 0;
553
554 fail_nomem:
555         return retval;
556 }
557
558 static inline struct fs_struct *__copy_fs_struct(struct fs_struct *old)
559 {
560         struct fs_struct *fs = kmem_cache_alloc(fs_cachep, GFP_KERNEL);
561         /* We don't need to lock fs - think why ;-) */
562         if (fs) {
563                 atomic_set(&fs->count, 1);
564                 rwlock_init(&fs->lock);
565                 fs->umask = old->umask;
566                 read_lock(&old->lock);
567                 fs->rootmnt = mntget(old->rootmnt);
568                 fs->root = dget(old->root);
569                 fs->pwdmnt = mntget(old->pwdmnt);
570                 fs->pwd = dget(old->pwd);
571                 if (old->altroot) {
572                         fs->altrootmnt = mntget(old->altrootmnt);
573                         fs->altroot = dget(old->altroot);
574                 } else {
575                         fs->altrootmnt = NULL;
576                         fs->altroot = NULL;
577                 }
578                 read_unlock(&old->lock);
579         }
580         return fs;
581 }
582
583 struct fs_struct *copy_fs_struct(struct fs_struct *old)
584 {
585         return __copy_fs_struct(old);
586 }
587
588 EXPORT_SYMBOL_GPL(copy_fs_struct);
589
590 static inline int copy_fs(unsigned long clone_flags, struct task_struct * tsk)
591 {
592         if (clone_flags & CLONE_FS) {
593                 atomic_inc(&current->fs->count);
594                 return 0;
595         }
596         tsk->fs = __copy_fs_struct(current->fs);
597         if (!tsk->fs)
598                 return -ENOMEM;
599         return 0;
600 }
601
602 static int count_open_files(struct fdtable *fdt)
603 {
604         int size = fdt->max_fdset;
605         int i;
606
607         /* Find the last open fd */
608         for (i = size/(8*sizeof(long)); i > 0; ) {
609                 if (fdt->open_fds->fds_bits[--i])
610                         break;
611         }
612         i = (i+1) * 8 * sizeof(long);
613         return i;
614 }
615
616 static struct files_struct *alloc_files(void)
617 {
618         struct files_struct *newf;
619         struct fdtable *fdt;
620
621         newf = kmem_cache_alloc(files_cachep, SLAB_KERNEL);
622         if (!newf)
623                 goto out;
624
625         atomic_set(&newf->count, 1);
626
627         spin_lock_init(&newf->file_lock);
628         newf->next_fd = 0;
629         fdt = &newf->fdtab;
630         fdt->max_fds = NR_OPEN_DEFAULT;
631         fdt->max_fdset = EMBEDDED_FD_SET_SIZE;
632         fdt->close_on_exec = (fd_set *)&newf->close_on_exec_init;
633         fdt->open_fds = (fd_set *)&newf->open_fds_init;
634         fdt->fd = &newf->fd_array[0];
635         INIT_RCU_HEAD(&fdt->rcu);
636         fdt->free_files = NULL;
637         fdt->next = NULL;
638         rcu_assign_pointer(newf->fdt, fdt);
639 out:
640         return newf;
641 }
642
643 /*
644  * Allocate a new files structure and copy contents from the
645  * passed in files structure.
646  * errorp will be valid only when the returned files_struct is NULL.
647  */
648 static struct files_struct *dup_fd(struct files_struct *oldf, int *errorp)
649 {
650         struct files_struct *newf;
651         struct file **old_fds, **new_fds;
652         int open_files, size, i, expand;
653         struct fdtable *old_fdt, *new_fdt;
654
655         *errorp = -ENOMEM;
656         newf = alloc_files();
657         if (!newf)
658                 goto out;
659
660         spin_lock(&oldf->file_lock);
661         old_fdt = files_fdtable(oldf);
662         new_fdt = files_fdtable(newf);
663         size = old_fdt->max_fdset;
664         open_files = count_open_files(old_fdt);
665         expand = 0;
666
667         /*
668          * Check whether we need to allocate a larger fd array or fd set.
669          * Note: we're not a clone task, so the open count won't  change.
670          */
671         if (open_files > new_fdt->max_fdset) {
672                 new_fdt->max_fdset = 0;
673                 expand = 1;
674         }
675         if (open_files > new_fdt->max_fds) {
676                 new_fdt->max_fds = 0;
677                 expand = 1;
678         }
679
680         /* if the old fdset gets grown now, we'll only copy up to "size" fds */
681         if (expand) {
682                 spin_unlock(&oldf->file_lock);
683                 spin_lock(&newf->file_lock);
684                 *errorp = expand_files(newf, open_files-1);
685                 spin_unlock(&newf->file_lock);
686                 if (*errorp < 0)
687                         goto out_release;
688                 new_fdt = files_fdtable(newf);
689                 /*
690                  * Reacquire the oldf lock and a pointer to its fd table
691                  * who knows it may have a new bigger fd table. We need
692                  * the latest pointer.
693                  */
694                 spin_lock(&oldf->file_lock);
695                 old_fdt = files_fdtable(oldf);
696         }
697
698         old_fds = old_fdt->fd;
699         new_fds = new_fdt->fd;
700
701         memcpy(new_fdt->open_fds->fds_bits, old_fdt->open_fds->fds_bits, open_files/8);
702         memcpy(new_fdt->close_on_exec->fds_bits, old_fdt->close_on_exec->fds_bits, open_files/8);
703
704         for (i = open_files; i != 0; i--) {
705                 struct file *f = *old_fds++;
706                 if (f) {
707                         get_file(f);
708                         /* FIXME: sum it first for check and performance */
709                         vx_openfd_inc(open_files - i);
710                 } else {
711                         /*
712                          * The fd may be claimed in the fd bitmap but not yet
713                          * instantiated in the files array if a sibling thread
714                          * is partway through open().  So make sure that this
715                          * fd is available to the new process.
716                          */
717                         FD_CLR(open_files - i, new_fdt->open_fds);
718                 }
719                 rcu_assign_pointer(*new_fds++, f);
720         }
721         spin_unlock(&oldf->file_lock);
722
723         /* compute the remainder to be cleared */
724         size = (new_fdt->max_fds - open_files) * sizeof(struct file *);
725
726         /* This is long word aligned thus could use a optimized version */ 
727         memset(new_fds, 0, size); 
728
729         if (new_fdt->max_fdset > open_files) {
730                 int left = (new_fdt->max_fdset-open_files)/8;
731                 int start = open_files / (8 * sizeof(unsigned long));
732
733                 memset(&new_fdt->open_fds->fds_bits[start], 0, left);
734                 memset(&new_fdt->close_on_exec->fds_bits[start], 0, left);
735         }
736
737 out:
738         return newf;
739
740 out_release:
741         free_fdset (new_fdt->close_on_exec, new_fdt->max_fdset);
742         free_fdset (new_fdt->open_fds, new_fdt->max_fdset);
743         free_fd_array(new_fdt->fd, new_fdt->max_fds);
744         kmem_cache_free(files_cachep, newf);
745         return NULL;
746 }
747
748 static int copy_files(unsigned long clone_flags, struct task_struct * tsk)
749 {
750         struct files_struct *oldf, *newf;
751         int error = 0;
752
753         /*
754          * A background process may not have any files ...
755          */
756         oldf = current->files;
757         if (!oldf)
758                 goto out;
759
760         if (clone_flags & CLONE_FILES) {
761                 atomic_inc(&oldf->count);
762                 goto out;
763         }
764
765         /*
766          * Note: we may be using current for both targets (See exec.c)
767          * This works because we cache current->files (old) as oldf. Don't
768          * break this.
769          */
770         tsk->files = NULL;
771         newf = dup_fd(oldf, &error);
772         if (!newf)
773                 goto out;
774
775         tsk->files = newf;
776         error = 0;
777 out:
778         return error;
779 }
780
781 /*
782  *      Helper to unshare the files of the current task.
783  *      We don't want to expose copy_files internals to
784  *      the exec layer of the kernel.
785  */
786
787 int unshare_files(void)
788 {
789         struct files_struct *files  = current->files;
790         int rc;
791
792         BUG_ON(!files);
793
794         /* This can race but the race causes us to copy when we don't
795            need to and drop the copy */
796         if(atomic_read(&files->count) == 1)
797         {
798                 atomic_inc(&files->count);
799                 return 0;
800         }
801         rc = copy_files(0, current);
802         if(rc)
803                 current->files = files;
804         return rc;
805 }
806
807 EXPORT_SYMBOL(unshare_files);
808
809 static inline int copy_sighand(unsigned long clone_flags, struct task_struct * tsk)
810 {
811         struct sighand_struct *sig;
812
813         if (clone_flags & (CLONE_SIGHAND | CLONE_THREAD)) {
814                 atomic_inc(&current->sighand->count);
815                 return 0;
816         }
817         sig = kmem_cache_alloc(sighand_cachep, GFP_KERNEL);
818         rcu_assign_pointer(tsk->sighand, sig);
819         if (!sig)
820                 return -ENOMEM;
821         atomic_set(&sig->count, 1);
822         memcpy(sig->action, current->sighand->action, sizeof(sig->action));
823         return 0;
824 }
825
826 void __cleanup_sighand(struct sighand_struct *sighand)
827 {
828         if (atomic_dec_and_test(&sighand->count))
829                 kmem_cache_free(sighand_cachep, sighand);
830 }
831
832 static inline int copy_signal(unsigned long clone_flags, struct task_struct * tsk)
833 {
834         struct signal_struct *sig;
835         int ret;
836
837         if (clone_flags & CLONE_THREAD) {
838                 atomic_inc(&current->signal->count);
839                 atomic_inc(&current->signal->live);
840                 taskstats_tgid_alloc(current->signal);
841                 return 0;
842         }
843         sig = kmem_cache_alloc(signal_cachep, GFP_KERNEL);
844         tsk->signal = sig;
845         if (!sig)
846                 return -ENOMEM;
847
848         ret = copy_thread_group_keys(tsk);
849         if (ret < 0) {
850                 kmem_cache_free(signal_cachep, sig);
851                 return ret;
852         }
853
854         atomic_set(&sig->count, 1);
855         atomic_set(&sig->live, 1);
856         init_waitqueue_head(&sig->wait_chldexit);
857         sig->flags = 0;
858         sig->group_exit_code = 0;
859         sig->group_exit_task = NULL;
860         sig->group_stop_count = 0;
861         sig->curr_target = NULL;
862         init_sigpending(&sig->shared_pending);
863         INIT_LIST_HEAD(&sig->posix_timers);
864
865         hrtimer_init(&sig->real_timer, CLOCK_MONOTONIC, HRTIMER_REL);
866         sig->it_real_incr.tv64 = 0;
867         sig->real_timer.function = it_real_fn;
868         sig->tsk = tsk;
869
870         sig->it_virt_expires = cputime_zero;
871         sig->it_virt_incr = cputime_zero;
872         sig->it_prof_expires = cputime_zero;
873         sig->it_prof_incr = cputime_zero;
874
875         sig->leader = 0;        /* session leadership doesn't inherit */
876         sig->tty_old_pgrp = 0;
877
878         sig->utime = sig->stime = sig->cutime = sig->cstime = cputime_zero;
879         sig->nvcsw = sig->nivcsw = sig->cnvcsw = sig->cnivcsw = 0;
880         sig->min_flt = sig->maj_flt = sig->cmin_flt = sig->cmaj_flt = 0;
881         sig->sched_time = 0;
882         INIT_LIST_HEAD(&sig->cpu_timers[0]);
883         INIT_LIST_HEAD(&sig->cpu_timers[1]);
884         INIT_LIST_HEAD(&sig->cpu_timers[2]);
885         taskstats_tgid_init(sig);
886
887         task_lock(current->group_leader);
888         memcpy(sig->rlim, current->signal->rlim, sizeof sig->rlim);
889         task_unlock(current->group_leader);
890
891         if (sig->rlim[RLIMIT_CPU].rlim_cur != RLIM_INFINITY) {
892                 /*
893                  * New sole thread in the process gets an expiry time
894                  * of the whole CPU time limit.
895                  */
896                 tsk->it_prof_expires =
897                         secs_to_cputime(sig->rlim[RLIMIT_CPU].rlim_cur);
898         }
899         acct_init_pacct(&sig->pacct);
900
901         return 0;
902 }
903
904 void __cleanup_signal(struct signal_struct *sig)
905 {
906         exit_thread_group_keys(sig);
907         taskstats_tgid_free(sig);
908         kmem_cache_free(signal_cachep, sig);
909 }
910
911 static inline void cleanup_signal(struct task_struct *tsk)
912 {
913         struct signal_struct *sig = tsk->signal;
914
915         atomic_dec(&sig->live);
916
917         if (atomic_dec_and_test(&sig->count))
918                 __cleanup_signal(sig);
919 }
920
921 static inline void copy_flags(unsigned long clone_flags, struct task_struct *p)
922 {
923         unsigned long new_flags = p->flags;
924
925         new_flags &= ~(PF_SUPERPRIV | PF_NOFREEZE);
926         new_flags |= PF_FORKNOEXEC;
927         new_flags |= PF_STARTING;
928         p->flags = new_flags;
929 }
930
931 asmlinkage long sys_set_tid_address(int __user *tidptr)
932 {
933         current->clear_child_tid = tidptr;
934
935         return current->pid;
936 }
937
938 static inline void rt_mutex_init_task(struct task_struct *p)
939 {
940 #ifdef CONFIG_RT_MUTEXES
941         spin_lock_init(&p->pi_lock);
942         plist_head_init(&p->pi_waiters, &p->pi_lock);
943         p->pi_blocked_on = NULL;
944 #endif
945 }
946
947 /*
948  * This creates a new process as a copy of the old one,
949  * but does not actually start it yet.
950  *
951  * It copies the registers, and all the appropriate
952  * parts of the process environment (as per the clone
953  * flags). The actual kick-off is left to the caller.
954  */
955 static struct task_struct *copy_process(unsigned long clone_flags,
956                                         unsigned long stack_start,
957                                         struct pt_regs *regs,
958                                         unsigned long stack_size,
959                                         int __user *parent_tidptr,
960                                         int __user *child_tidptr,
961                                         int pid)
962 {
963         int retval;
964         struct task_struct *p = NULL;
965         struct vx_info *vxi;
966         struct nx_info *nxi;
967
968         if ((clone_flags & (CLONE_NEWNS|CLONE_FS)) == (CLONE_NEWNS|CLONE_FS))
969                 return ERR_PTR(-EINVAL);
970
971         /*
972          * Thread groups must share signals as well, and detached threads
973          * can only be started up within the thread group.
974          */
975         if ((clone_flags & CLONE_THREAD) && !(clone_flags & CLONE_SIGHAND))
976                 return ERR_PTR(-EINVAL);
977
978         /*
979          * Shared signal handlers imply shared VM. By way of the above,
980          * thread groups also imply shared VM. Blocking this case allows
981          * for various simplifications in other code.
982          */
983         if ((clone_flags & CLONE_SIGHAND) && !(clone_flags & CLONE_VM))
984                 return ERR_PTR(-EINVAL);
985
986         retval = security_task_create(clone_flags);
987         if (retval)
988                 goto fork_out;
989
990         retval = -ENOMEM;
991         p = dup_task_struct(current);
992         if (!p)
993                 goto fork_out;
994
995         rt_mutex_init_task(p);
996
997         p->tux_info = NULL;
998
999 #ifdef CONFIG_TRACE_IRQFLAGS
1000         DEBUG_LOCKS_WARN_ON(!p->hardirqs_enabled);
1001         DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled);
1002 #endif
1003         init_vx_info(&p->vx_info, current->vx_info);
1004         init_nx_info(&p->nx_info, current->nx_info);
1005
1006         /* check vserver memory */
1007         if (p->mm && !(clone_flags & CLONE_VM)) {
1008                 if (vx_vmpages_avail(p->mm, p->mm->total_vm))
1009                         vx_pages_add(p->vx_info, RLIMIT_AS, p->mm->total_vm);
1010                 else
1011                         goto bad_fork_free;
1012         }
1013         if (p->mm && vx_flags(VXF_FORK_RSS, 0)) {
1014                 if (!vx_rsspages_avail(p->mm, get_mm_counter(p->mm, file_rss)))
1015                         goto bad_fork_cleanup_vm;
1016         }
1017
1018         retval = -EAGAIN;
1019         if (!vx_nproc_avail(1))
1020                 goto bad_fork_cleanup_vm;
1021
1022         if (atomic_read(&p->user->processes) >=
1023                         p->signal->rlim[RLIMIT_NPROC].rlim_cur) {
1024                 if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE) &&
1025                                 p->user != &root_user)
1026                         goto bad_fork_cleanup_vm;
1027         }
1028
1029         atomic_inc(&p->user->__count);
1030         atomic_inc(&p->user->processes);
1031         get_group_info(p->group_info);
1032
1033         /*
1034          * If multiple threads are within copy_process(), then this check
1035          * triggers too late. This doesn't hurt, the check is only there
1036          * to stop root fork bombs.
1037          */
1038         if (nr_threads >= max_threads)
1039                 goto bad_fork_cleanup_count;
1040
1041         if (!try_module_get(task_thread_info(p)->exec_domain->module))
1042                 goto bad_fork_cleanup_count;
1043
1044         if (p->binfmt && !try_module_get(p->binfmt->module))
1045                 goto bad_fork_cleanup_put_domain;
1046
1047         p->did_exec = 0;
1048         delayacct_tsk_init(p);  /* Must remain after dup_task_struct() */
1049         copy_flags(clone_flags, p);
1050         p->pid = pid;
1051         retval = -EFAULT;
1052         if (clone_flags & CLONE_PARENT_SETTID)
1053                 if (put_user(p->pid, parent_tidptr))
1054                         goto bad_fork_cleanup_delays_binfmt;
1055
1056         INIT_LIST_HEAD(&p->children);
1057         INIT_LIST_HEAD(&p->sibling);
1058         p->vfork_done = NULL;
1059         spin_lock_init(&p->alloc_lock);
1060 #ifdef CONFIG_PTRACE
1061         INIT_LIST_HEAD(&p->ptracees);
1062 #endif
1063
1064         clear_tsk_thread_flag(p, TIF_SIGPENDING);
1065         init_sigpending(&p->pending);
1066
1067         p->utime = cputime_zero;
1068         p->stime = cputime_zero;
1069         p->sched_time = 0;
1070         p->rchar = 0;           /* I/O counter: bytes read */
1071         p->wchar = 0;           /* I/O counter: bytes written */
1072         p->syscr = 0;           /* I/O counter: read syscalls */
1073         p->syscw = 0;           /* I/O counter: write syscalls */
1074         acct_clear_integrals(p);
1075
1076         p->it_virt_expires = cputime_zero;
1077         p->it_prof_expires = cputime_zero;
1078         p->it_sched_expires = 0;
1079         INIT_LIST_HEAD(&p->cpu_timers[0]);
1080         INIT_LIST_HEAD(&p->cpu_timers[1]);
1081         INIT_LIST_HEAD(&p->cpu_timers[2]);
1082
1083         p->lock_depth = -1;             /* -1 = no lock */
1084         do_posix_clock_monotonic_gettime(&p->start_time);
1085         p->security = NULL;
1086         p->io_context = NULL;
1087         p->io_wait = NULL;
1088         p->audit_context = NULL;
1089         cpuset_fork(p);
1090 #ifdef CONFIG_NUMA
1091         p->mempolicy = mpol_copy(p->mempolicy);
1092         if (IS_ERR(p->mempolicy)) {
1093                 retval = PTR_ERR(p->mempolicy);
1094                 p->mempolicy = NULL;
1095                 goto bad_fork_cleanup_cpuset;
1096         }
1097         mpol_fix_fork_child_flag(p);
1098 #endif
1099 #ifdef CONFIG_TRACE_IRQFLAGS
1100         p->irq_events = 0;
1101         p->hardirqs_enabled = 0;
1102         p->hardirq_enable_ip = 0;
1103         p->hardirq_enable_event = 0;
1104         p->hardirq_disable_ip = _THIS_IP_;
1105         p->hardirq_disable_event = 0;
1106         p->softirqs_enabled = 1;
1107         p->softirq_enable_ip = _THIS_IP_;
1108         p->softirq_enable_event = 0;
1109         p->softirq_disable_ip = 0;
1110         p->softirq_disable_event = 0;
1111         p->hardirq_context = 0;
1112         p->softirq_context = 0;
1113 #endif
1114 #ifdef CONFIG_LOCKDEP
1115         p->lockdep_depth = 0; /* no locks held yet */
1116         p->curr_chain_key = 0;
1117         p->lockdep_recursion = 0;
1118 #endif
1119
1120 #ifdef CONFIG_DEBUG_MUTEXES
1121         p->blocked_on = NULL; /* not blocked yet */
1122 #endif
1123
1124         p->tgid = p->pid;
1125         if (clone_flags & CLONE_THREAD)
1126                 p->tgid = current->tgid;
1127
1128         if ((retval = security_task_alloc(p)))
1129                 goto bad_fork_cleanup_policy;
1130         if ((retval = audit_alloc(p)))
1131                 goto bad_fork_cleanup_security;
1132         /* copy all the process information */
1133         if ((retval = copy_semundo(clone_flags, p)))
1134                 goto bad_fork_cleanup_audit;
1135         if ((retval = copy_files(clone_flags, p)))
1136                 goto bad_fork_cleanup_semundo;
1137         if ((retval = copy_fs(clone_flags, p)))
1138                 goto bad_fork_cleanup_files;
1139         if ((retval = copy_sighand(clone_flags, p)))
1140                 goto bad_fork_cleanup_fs;
1141         if ((retval = copy_signal(clone_flags, p)))
1142                 goto bad_fork_cleanup_sighand;
1143         if ((retval = copy_mm(clone_flags, p)))
1144                 goto bad_fork_cleanup_signal;
1145         if ((retval = copy_keys(clone_flags, p)))
1146                 goto bad_fork_cleanup_mm;
1147         if ((retval = copy_namespace(clone_flags, p)))
1148                 goto bad_fork_cleanup_keys;
1149         retval = copy_thread(0, clone_flags, stack_start, stack_size, p, regs);
1150         if (retval)
1151                 goto bad_fork_cleanup_namespace;
1152
1153         p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? child_tidptr : NULL;
1154         /*
1155          * Clear TID on mm_release()?
1156          */
1157         p->clear_child_tid = (clone_flags & CLONE_CHILD_CLEARTID) ? child_tidptr: NULL;
1158         p->robust_list = NULL;
1159 #ifdef CONFIG_COMPAT
1160         p->compat_robust_list = NULL;
1161 #endif
1162         INIT_LIST_HEAD(&p->pi_state_list);
1163         p->pi_state_cache = NULL;
1164
1165         /*
1166          * sigaltstack should be cleared when sharing the same VM
1167          */
1168         if ((clone_flags & (CLONE_VM|CLONE_VFORK)) == CLONE_VM)
1169                 p->sas_ss_sp = p->sas_ss_size = 0;
1170
1171         /*
1172          * Syscall tracing should be turned off in the child regardless
1173          * of CLONE_PTRACE.
1174          */
1175         clear_tsk_thread_flag(p, TIF_SYSCALL_TRACE);
1176 #ifdef TIF_SYSCALL_EMU
1177         clear_tsk_thread_flag(p, TIF_SYSCALL_EMU);
1178 #endif
1179
1180         /* Our parent execution domain becomes current domain
1181            These must match for thread signalling to apply */
1182            
1183         p->parent_exec_id = p->self_exec_id;
1184
1185         /* ok, now we should be set up.. */
1186         p->exit_signal = (clone_flags & CLONE_THREAD) ? -1 : (clone_flags & CSIGNAL);
1187         p->pdeath_signal = 0;
1188         p->exit_state = 0;
1189
1190         /*
1191          * Ok, make it visible to the rest of the system.
1192          * We dont wake it up yet.
1193          */
1194         p->group_leader = p;
1195         INIT_LIST_HEAD(&p->thread_group);
1196
1197         /* Perform scheduler related setup. Assign this task to a CPU. */
1198         sched_fork(p, clone_flags);
1199
1200         /* Need tasklist lock for parent etc handling! */
1201         write_lock_irq(&tasklist_lock);
1202
1203         /*
1204          * The task hasn't been attached yet, so its cpus_allowed mask will
1205          * not be changed, nor will its assigned CPU.
1206          *
1207          * The cpus_allowed mask of the parent may have changed after it was
1208          * copied first time - so re-copy it here, then check the child's CPU
1209          * to ensure it is on a valid CPU (and if not, just force it back to
1210          * parent's CPU). This avoids alot of nasty races.
1211          */
1212         p->cpus_allowed = current->cpus_allowed;
1213         if (unlikely(!cpu_isset(task_cpu(p), p->cpus_allowed) ||
1214                         !cpu_online(task_cpu(p))))
1215                 set_task_cpu(p, smp_processor_id());
1216
1217         /* CLONE_PARENT re-uses the old parent */
1218         if (clone_flags & (CLONE_PARENT|CLONE_THREAD))
1219                 p->parent = current->parent;
1220         else
1221                 p->parent = current;
1222
1223         spin_lock(&current->sighand->siglock);
1224
1225         /*
1226          * Process group and session signals need to be delivered to just the
1227          * parent before the fork or both the parent and the child after the
1228          * fork. Restart if a signal comes in before we add the new process to
1229          * it's process group.
1230          * A fatal signal pending means that current will exit, so the new
1231          * thread can't slip out of an OOM kill (or normal SIGKILL).
1232          */
1233         recalc_sigpending();
1234         if (signal_pending(current)) {
1235                 spin_unlock(&current->sighand->siglock);
1236                 write_unlock_irq(&tasklist_lock);
1237                 retval = -ERESTARTNOINTR;
1238                 goto bad_fork_cleanup_namespace;
1239         }
1240
1241         if (clone_flags & CLONE_THREAD) {
1242                 p->group_leader = current->group_leader;
1243                 list_add_tail_rcu(&p->thread_group, &p->group_leader->thread_group);
1244
1245                 if (!cputime_eq(current->signal->it_virt_expires,
1246                                 cputime_zero) ||
1247                     !cputime_eq(current->signal->it_prof_expires,
1248                                 cputime_zero) ||
1249                     current->signal->rlim[RLIMIT_CPU].rlim_cur != RLIM_INFINITY ||
1250                     !list_empty(&current->signal->cpu_timers[0]) ||
1251                     !list_empty(&current->signal->cpu_timers[1]) ||
1252                     !list_empty(&current->signal->cpu_timers[2])) {
1253                         /*
1254                          * Have child wake up on its first tick to check
1255                          * for process CPU timers.
1256                          */
1257                         p->it_prof_expires = jiffies_to_cputime(1);
1258                 }
1259         }
1260
1261         /*
1262          * inherit ioprio
1263          */
1264         p->ioprio = current->ioprio;
1265
1266         if (likely(p->pid)) {
1267                 add_parent(p);
1268                 tracehook_init_task(p);
1269
1270                 if (thread_group_leader(p)) {
1271                         p->signal->tty = current->signal->tty;
1272                         p->signal->pgrp = process_group(current);
1273                         p->signal->session = current->signal->session;
1274                         attach_pid(p, PIDTYPE_PGID, process_group(p));
1275                         attach_pid(p, PIDTYPE_SID, p->signal->session);
1276
1277                         list_add_tail_rcu(&p->tasks, &init_task.tasks);
1278                         __get_cpu_var(process_counts)++;
1279                 }
1280                 attach_pid(p, PIDTYPE_PID, p->pid);
1281                 nr_threads++;
1282         }
1283
1284         total_forks++;
1285         spin_unlock(&current->sighand->siglock);
1286
1287         /* p is copy of current */
1288         vxi = p->vx_info;
1289         if (vxi) {
1290                 claim_vx_info(vxi, p);
1291                 atomic_inc(&vxi->cvirt.nr_threads);
1292                 atomic_inc(&vxi->cvirt.total_forks);
1293                 vx_nproc_inc(p);
1294         }
1295         nxi = p->nx_info;
1296         if (nxi)
1297                 claim_nx_info(nxi, p);
1298         write_unlock_irq(&tasklist_lock);
1299         proc_fork_connector(p);
1300         return p;
1301
1302 bad_fork_cleanup_namespace:
1303         exit_namespace(p);
1304 bad_fork_cleanup_keys:
1305         exit_keys(p);
1306 bad_fork_cleanup_mm:
1307         if (p->mm)
1308                 mmput(p->mm);
1309 bad_fork_cleanup_signal:
1310         cleanup_signal(p);
1311 bad_fork_cleanup_sighand:
1312         __cleanup_sighand(p->sighand);
1313 bad_fork_cleanup_fs:
1314         exit_fs(p); /* blocking */
1315 bad_fork_cleanup_files:
1316         exit_files(p); /* blocking */
1317 bad_fork_cleanup_semundo:
1318         exit_sem(p);
1319 bad_fork_cleanup_audit:
1320         audit_free(p);
1321 bad_fork_cleanup_security:
1322         security_task_free(p);
1323 bad_fork_cleanup_policy:
1324 #ifdef CONFIG_NUMA
1325         mpol_free(p->mempolicy);
1326 bad_fork_cleanup_cpuset:
1327 #endif
1328         cpuset_exit(p);
1329 bad_fork_cleanup_delays_binfmt:
1330         delayacct_tsk_free(p);
1331         if (p->binfmt)
1332                 module_put(p->binfmt->module);
1333 bad_fork_cleanup_put_domain:
1334         module_put(task_thread_info(p)->exec_domain->module);
1335 bad_fork_cleanup_count:
1336         put_group_info(p->group_info);
1337         atomic_dec(&p->user->processes);
1338         free_uid(p->user);
1339 bad_fork_cleanup_vm:
1340         if (p->mm && !(clone_flags & CLONE_VM))
1341                 vx_pages_sub(p->vx_info, RLIMIT_AS, p->mm->total_vm);
1342 bad_fork_free:
1343         free_task(p);
1344 fork_out:
1345         return ERR_PTR(retval);
1346 }
1347
1348 struct pt_regs * __devinit __attribute__((weak)) idle_regs(struct pt_regs *regs)
1349 {
1350         memset(regs, 0, sizeof(struct pt_regs));
1351         return regs;
1352 }
1353
1354 struct task_struct * __devinit fork_idle(int cpu)
1355 {
1356         struct task_struct *task;
1357         struct pt_regs regs;
1358
1359         task = copy_process(CLONE_VM, 0, idle_regs(&regs), 0, NULL, NULL, 0);
1360         if (!task)
1361                 return ERR_PTR(-ENOMEM);
1362         init_idle(task, cpu);
1363
1364         return task;
1365 }
1366
1367 /*
1368  *  Ok, this is the main fork-routine.
1369  *
1370  * It copies the process, and if successful kick-starts
1371  * it and waits for it to finish using the VM if required.
1372  */
1373 long do_fork(unsigned long clone_flags,
1374               unsigned long stack_start,
1375               struct pt_regs *regs,
1376               unsigned long stack_size,
1377               int __user *parent_tidptr,
1378               int __user *child_tidptr)
1379 {
1380         struct task_struct *p;
1381         struct pid *pid = alloc_pid();
1382         long nr;
1383
1384         if (!pid)
1385                 return -EAGAIN;
1386         nr = pid->nr;
1387
1388         p = copy_process(clone_flags, stack_start, regs, stack_size, parent_tidptr, child_tidptr, nr);
1389         /*
1390          * Do this prior waking up the new thread - the thread pointer
1391          * might get invalid after that point, if the thread exits quickly.
1392          */
1393         if (!IS_ERR(p)) {
1394                 struct completion vfork;
1395
1396                 if (clone_flags & CLONE_VFORK) {
1397                         p->vfork_done = &vfork;
1398                         init_completion(&vfork);
1399                 }
1400
1401                 tracehook_report_clone(clone_flags, p);
1402
1403                 p->flags &= ~PF_STARTING;
1404
1405                 if (clone_flags & CLONE_STOPPED) {
1406                         /*
1407                          * We'll start up with an immediate SIGSTOP.
1408                          */
1409                         sigaddset(&p->pending.signal, SIGSTOP);
1410                         set_tsk_thread_flag(p, TIF_SIGPENDING);
1411                         p->state = TASK_STOPPED;
1412                 }
1413                 else
1414                         wake_up_new_task(p, clone_flags);
1415
1416                 tracehook_report_clone_complete(clone_flags, nr, p);
1417
1418                 if (clone_flags & CLONE_VFORK) {
1419                         wait_for_completion(&vfork);
1420                         tracehook_report_vfork_done(p, nr);
1421                 }
1422         } else {
1423                 free_pid(pid);
1424                 nr = PTR_ERR(p);
1425         }
1426         return nr;
1427 }
1428
1429 #ifndef ARCH_MIN_MMSTRUCT_ALIGN
1430 #define ARCH_MIN_MMSTRUCT_ALIGN 0
1431 #endif
1432
1433 static void sighand_ctor(void *data, kmem_cache_t *cachep, unsigned long flags)
1434 {
1435         struct sighand_struct *sighand = data;
1436
1437         if ((flags & (SLAB_CTOR_VERIFY | SLAB_CTOR_CONSTRUCTOR)) ==
1438                                         SLAB_CTOR_CONSTRUCTOR)
1439                 spin_lock_init(&sighand->siglock);
1440 }
1441
1442 void __init proc_caches_init(void)
1443 {
1444         sighand_cachep = kmem_cache_create("sighand_cache",
1445                         sizeof(struct sighand_struct), 0,
1446                         SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_DESTROY_BY_RCU,
1447                         sighand_ctor, NULL);
1448         signal_cachep = kmem_cache_create("signal_cache",
1449                         sizeof(struct signal_struct), 0,
1450                         SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL, NULL);
1451         files_cachep = kmem_cache_create("files_cache", 
1452                         sizeof(struct files_struct), 0,
1453                         SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL, NULL);
1454         fs_cachep = kmem_cache_create("fs_cache", 
1455                         sizeof(struct fs_struct), 0,
1456                         SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL, NULL);
1457         vm_area_cachep = kmem_cache_create("vm_area_struct",
1458                         sizeof(struct vm_area_struct), 0,
1459                         SLAB_PANIC, NULL, NULL);
1460         mm_cachep = kmem_cache_create("mm_struct",
1461                         sizeof(struct mm_struct), ARCH_MIN_MMSTRUCT_ALIGN,
1462                         SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL, NULL);
1463 }
1464
1465
1466 /*
1467  * Check constraints on flags passed to the unshare system call and
1468  * force unsharing of additional process context as appropriate.
1469  */
1470 static inline void check_unshare_flags(unsigned long *flags_ptr)
1471 {
1472         /*
1473          * If unsharing a thread from a thread group, must also
1474          * unshare vm.
1475          */
1476         if (*flags_ptr & CLONE_THREAD)
1477                 *flags_ptr |= CLONE_VM;
1478
1479         /*
1480          * If unsharing vm, must also unshare signal handlers.
1481          */
1482         if (*flags_ptr & CLONE_VM)
1483                 *flags_ptr |= CLONE_SIGHAND;
1484
1485         /*
1486          * If unsharing signal handlers and the task was created
1487          * using CLONE_THREAD, then must unshare the thread
1488          */
1489         if ((*flags_ptr & CLONE_SIGHAND) &&
1490             (atomic_read(&current->signal->count) > 1))
1491                 *flags_ptr |= CLONE_THREAD;
1492
1493         /*
1494          * If unsharing namespace, must also unshare filesystem information.
1495          */
1496         if (*flags_ptr & CLONE_NEWNS)
1497                 *flags_ptr |= CLONE_FS;
1498 }
1499
1500 /*
1501  * Unsharing of tasks created with CLONE_THREAD is not supported yet
1502  */
1503 static int unshare_thread(unsigned long unshare_flags)
1504 {
1505         if (unshare_flags & CLONE_THREAD)
1506                 return -EINVAL;
1507
1508         return 0;
1509 }
1510
1511 /*
1512  * Unshare the filesystem structure if it is being shared
1513  */
1514 static int unshare_fs(unsigned long unshare_flags, struct fs_struct **new_fsp)
1515 {
1516         struct fs_struct *fs = current->fs;
1517
1518         if ((unshare_flags & CLONE_FS) &&
1519             (fs && atomic_read(&fs->count) > 1)) {
1520                 *new_fsp = __copy_fs_struct(current->fs);
1521                 if (!*new_fsp)
1522                         return -ENOMEM;
1523         }
1524
1525         return 0;
1526 }
1527
1528 /*
1529  * Unshare the namespace structure if it is being shared
1530  */
1531 static int unshare_namespace(unsigned long unshare_flags, struct namespace **new_nsp, struct fs_struct *new_fs)
1532 {
1533         struct namespace *ns = current->namespace;
1534
1535         if ((unshare_flags & CLONE_NEWNS) &&
1536             (ns && atomic_read(&ns->count) > 1)) {
1537                 if (!capable(CAP_SYS_ADMIN))
1538                         return -EPERM;
1539
1540                 *new_nsp = dup_namespace(current, new_fs ? new_fs : current->fs);
1541                 if (!*new_nsp)
1542                         return -ENOMEM;
1543         }
1544
1545         return 0;
1546 }
1547
1548 /*
1549  * Unsharing of sighand for tasks created with CLONE_SIGHAND is not
1550  * supported yet
1551  */
1552 static int unshare_sighand(unsigned long unshare_flags, struct sighand_struct **new_sighp)
1553 {
1554         struct sighand_struct *sigh = current->sighand;
1555
1556         if ((unshare_flags & CLONE_SIGHAND) &&
1557             (sigh && atomic_read(&sigh->count) > 1))
1558                 return -EINVAL;
1559         else
1560                 return 0;
1561 }
1562
1563 /*
1564  * Unshare vm if it is being shared
1565  */
1566 static int unshare_vm(unsigned long unshare_flags, struct mm_struct **new_mmp)
1567 {
1568         struct mm_struct *mm = current->mm;
1569
1570         if ((unshare_flags & CLONE_VM) &&
1571             (mm && atomic_read(&mm->mm_users) > 1)) {
1572                 return -EINVAL;
1573         }
1574
1575         return 0;
1576 }
1577
1578 /*
1579  * Unshare file descriptor table if it is being shared
1580  */
1581 static int unshare_fd(unsigned long unshare_flags, struct files_struct **new_fdp)
1582 {
1583         struct files_struct *fd = current->files;
1584         int error = 0;
1585
1586         if ((unshare_flags & CLONE_FILES) &&
1587             (fd && atomic_read(&fd->count) > 1)) {
1588                 *new_fdp = dup_fd(fd, &error);
1589                 if (!*new_fdp)
1590                         return error;
1591         }
1592
1593         return 0;
1594 }
1595
1596 /*
1597  * Unsharing of semundo for tasks created with CLONE_SYSVSEM is not
1598  * supported yet
1599  */
1600 static int unshare_semundo(unsigned long unshare_flags, struct sem_undo_list **new_ulistp)
1601 {
1602         if (unshare_flags & CLONE_SYSVSEM)
1603                 return -EINVAL;
1604
1605         return 0;
1606 }
1607
1608 /*
1609  * unshare allows a process to 'unshare' part of the process
1610  * context which was originally shared using clone.  copy_*
1611  * functions used by do_fork() cannot be used here directly
1612  * because they modify an inactive task_struct that is being
1613  * constructed. Here we are modifying the current, active,
1614  * task_struct.
1615  */
1616 asmlinkage long sys_unshare(unsigned long unshare_flags)
1617 {
1618         int err = 0;
1619         struct fs_struct *fs, *new_fs = NULL;
1620         struct namespace *ns, *new_ns = NULL;
1621         struct sighand_struct *sigh, *new_sigh = NULL;
1622         struct mm_struct *mm, *new_mm = NULL, *active_mm = NULL;
1623         struct files_struct *fd, *new_fd = NULL;
1624         struct sem_undo_list *new_ulist = NULL;
1625
1626         check_unshare_flags(&unshare_flags);
1627
1628         /* Return -EINVAL for all unsupported flags */
1629         err = -EINVAL;
1630         if (unshare_flags & ~(CLONE_THREAD|CLONE_FS|CLONE_NEWNS|CLONE_SIGHAND|
1631                                 CLONE_VM|CLONE_FILES|CLONE_SYSVSEM))
1632                 goto bad_unshare_out;
1633
1634         if ((err = unshare_thread(unshare_flags)))
1635                 goto bad_unshare_out;
1636         if ((err = unshare_fs(unshare_flags, &new_fs)))
1637                 goto bad_unshare_cleanup_thread;
1638         if ((err = unshare_namespace(unshare_flags, &new_ns, new_fs)))
1639                 goto bad_unshare_cleanup_fs;
1640         if ((err = unshare_sighand(unshare_flags, &new_sigh)))
1641                 goto bad_unshare_cleanup_ns;
1642         if ((err = unshare_vm(unshare_flags, &new_mm)))
1643                 goto bad_unshare_cleanup_sigh;
1644         if ((err = unshare_fd(unshare_flags, &new_fd)))
1645                 goto bad_unshare_cleanup_vm;
1646         if ((err = unshare_semundo(unshare_flags, &new_ulist)))
1647                 goto bad_unshare_cleanup_fd;
1648
1649         if (new_fs || new_ns || new_sigh || new_mm || new_fd || new_ulist) {
1650
1651                 task_lock(current);
1652
1653                 if (new_fs) {
1654                         fs = current->fs;
1655                         current->fs = new_fs;
1656                         new_fs = fs;
1657                 }
1658
1659                 if (new_ns) {
1660                         ns = current->namespace;
1661                         current->namespace = new_ns;
1662                         new_ns = ns;
1663                 }
1664
1665                 if (new_sigh) {
1666                         sigh = current->sighand;
1667                         rcu_assign_pointer(current->sighand, new_sigh);
1668                         new_sigh = sigh;
1669                 }
1670
1671                 if (new_mm) {
1672                         mm = current->mm;
1673                         active_mm = current->active_mm;
1674                         current->mm = new_mm;
1675                         current->active_mm = new_mm;
1676                         activate_mm(active_mm, new_mm);
1677                         new_mm = mm;
1678                 }
1679
1680                 if (new_fd) {
1681                         fd = current->files;
1682                         current->files = new_fd;
1683                         new_fd = fd;
1684                 }
1685
1686                 task_unlock(current);
1687         }
1688
1689 bad_unshare_cleanup_fd:
1690         if (new_fd)
1691                 put_files_struct(new_fd);
1692
1693 bad_unshare_cleanup_vm:
1694         if (new_mm)
1695                 mmput(new_mm);
1696
1697 bad_unshare_cleanup_sigh:
1698         if (new_sigh)
1699                 if (atomic_dec_and_test(&new_sigh->count))
1700                         kmem_cache_free(sighand_cachep, new_sigh);
1701
1702 bad_unshare_cleanup_ns:
1703         if (new_ns)
1704                 put_namespace(new_ns);
1705
1706 bad_unshare_cleanup_fs:
1707         if (new_fs)
1708                 put_fs_struct(new_fs);
1709
1710 bad_unshare_cleanup_thread:
1711 bad_unshare_out:
1712         return err;
1713 }