4 * (C) Copyright Al Viro 2000, 2001
5 * Released under GPL v2.
7 * Based on code from fs/super.c, copyright Linus Torvalds and others.
11 #include <linux/config.h>
12 #include <linux/slab.h>
13 #include <linux/sched.h>
14 #include <linux/smp_lock.h>
15 #include <linux/init.h>
16 #include <linux/quotaops.h>
17 #include <linux/acct.h>
18 #include <linux/module.h>
19 #include <linux/seq_file.h>
20 #include <linux/namespace.h>
21 #include <linux/namei.h>
22 #include <linux/security.h>
23 #include <linux/mount.h>
24 #include <linux/vs_base.h>
26 #include <asm/uaccess.h>
27 #include <asm/unistd.h>
29 extern int __init init_rootfs(void);
32 extern int __init sysfs_init(void);
34 static inline int sysfs_init(void)
40 /* spinlock for vfsmount related operations, inplace of dcache_lock */
41 spinlock_t vfsmount_lock __cacheline_aligned_in_smp = SPIN_LOCK_UNLOCKED;
43 static struct list_head *mount_hashtable;
44 static int hash_mask, hash_bits;
45 static kmem_cache_t *mnt_cache;
47 static inline unsigned long hash(struct vfsmount *mnt, struct dentry *dentry)
49 unsigned long tmp = ((unsigned long) mnt / L1_CACHE_BYTES);
50 tmp += ((unsigned long) dentry / L1_CACHE_BYTES);
51 tmp = tmp + (tmp >> hash_bits);
52 return tmp & hash_mask;
55 struct vfsmount *alloc_vfsmnt(const char *name)
57 struct vfsmount *mnt = kmem_cache_alloc(mnt_cache, GFP_KERNEL);
59 memset(mnt, 0, sizeof(struct vfsmount));
60 atomic_set(&mnt->mnt_count,1);
61 INIT_LIST_HEAD(&mnt->mnt_hash);
62 INIT_LIST_HEAD(&mnt->mnt_child);
63 INIT_LIST_HEAD(&mnt->mnt_mounts);
64 INIT_LIST_HEAD(&mnt->mnt_list);
66 int size = strlen(name)+1;
67 char *newname = kmalloc(size, GFP_KERNEL);
69 memcpy(newname, name, size);
70 mnt->mnt_devname = newname;
77 void free_vfsmnt(struct vfsmount *mnt)
79 kfree(mnt->mnt_devname);
80 kmem_cache_free(mnt_cache, mnt);
84 * Now, lookup_mnt increments the ref count before returning
85 * the vfsmount struct.
87 struct vfsmount *lookup_mnt(struct vfsmount *mnt, struct dentry *dentry)
89 struct list_head * head = mount_hashtable + hash(mnt, dentry);
90 struct list_head * tmp = head;
91 struct vfsmount *p, *found = NULL;
93 spin_lock(&vfsmount_lock);
99 p = list_entry(tmp, struct vfsmount, mnt_hash);
100 if (p->mnt_parent == mnt && p->mnt_mountpoint == dentry) {
105 spin_unlock(&vfsmount_lock);
109 EXPORT_SYMBOL(lookup_mnt);
111 static int check_mnt(struct vfsmount *mnt)
113 spin_lock(&vfsmount_lock);
114 while (mnt->mnt_parent != mnt)
115 mnt = mnt->mnt_parent;
116 spin_unlock(&vfsmount_lock);
117 return mnt == current->namespace->root;
120 static void detach_mnt(struct vfsmount *mnt, struct nameidata *old_nd)
122 old_nd->dentry = mnt->mnt_mountpoint;
123 old_nd->mnt = mnt->mnt_parent;
124 mnt->mnt_parent = mnt;
125 mnt->mnt_mountpoint = mnt->mnt_root;
126 list_del_init(&mnt->mnt_child);
127 list_del_init(&mnt->mnt_hash);
128 old_nd->dentry->d_mounted--;
131 static void attach_mnt(struct vfsmount *mnt, struct nameidata *nd)
133 mnt->mnt_parent = mntget(nd->mnt);
134 mnt->mnt_mountpoint = dget(nd->dentry);
135 list_add(&mnt->mnt_hash, mount_hashtable+hash(nd->mnt, nd->dentry));
136 list_add_tail(&mnt->mnt_child, &nd->mnt->mnt_mounts);
137 nd->dentry->d_mounted++;
140 static struct vfsmount *next_mnt(struct vfsmount *p, struct vfsmount *root)
142 struct list_head *next = p->mnt_mounts.next;
143 if (next == &p->mnt_mounts) {
147 next = p->mnt_child.next;
148 if (next != &p->mnt_parent->mnt_mounts)
153 return list_entry(next, struct vfsmount, mnt_child);
156 static struct vfsmount *
157 clone_mnt(struct vfsmount *old, struct dentry *root)
159 struct super_block *sb = old->mnt_sb;
160 struct vfsmount *mnt = alloc_vfsmnt(old->mnt_devname);
163 mnt->mnt_flags = old->mnt_flags;
164 atomic_inc(&sb->s_active);
166 mnt->mnt_root = dget(root);
167 mnt->mnt_mountpoint = mnt->mnt_root;
168 mnt->mnt_parent = mnt;
173 void __mntput(struct vfsmount *mnt)
175 struct super_block *sb = mnt->mnt_sb;
178 deactivate_super(sb);
181 EXPORT_SYMBOL(__mntput);
184 static void *m_start(struct seq_file *m, loff_t *pos)
186 struct namespace *n = m->private;
191 list_for_each(p, &n->list)
193 return list_entry(p, struct vfsmount, mnt_list);
197 static void *m_next(struct seq_file *m, void *v, loff_t *pos)
199 struct namespace *n = m->private;
200 struct list_head *p = ((struct vfsmount *)v)->mnt_list.next;
202 return p==&n->list ? NULL : list_entry(p, struct vfsmount, mnt_list);
205 static void m_stop(struct seq_file *m, void *v)
207 struct namespace *n = m->private;
211 static inline void mangle(struct seq_file *m, const char *s)
213 seq_escape(m, s, " \t\n\\");
216 static int show_vfsmnt(struct seq_file *m, void *v)
218 struct vfsmount *mnt = v;
220 static struct proc_fs_info {
224 { MS_SYNCHRONOUS, ",sync" },
225 { MS_DIRSYNC, ",dirsync" },
226 { MS_MANDLOCK, ",mand" },
227 { MS_NOATIME, ",noatime" },
228 { MS_NODIRATIME, ",nodiratime" },
231 static struct proc_fs_info mnt_info[] = {
232 { MNT_NOSUID, ",nosuid" },
233 { MNT_NODEV, ",nodev" },
234 { MNT_NOEXEC, ",noexec" },
237 struct proc_fs_info *fs_infop;
239 if (vx_flags(VXF_HIDE_MOUNT, 0))
242 mangle(m, mnt->mnt_devname ? mnt->mnt_devname : "none");
244 seq_path(m, mnt, mnt->mnt_root, " \t\n\\");
246 mangle(m, mnt->mnt_sb->s_type->name);
247 seq_puts(m, mnt->mnt_sb->s_flags & MS_RDONLY ? " ro" : " rw");
248 for (fs_infop = fs_info; fs_infop->flag; fs_infop++) {
249 if (mnt->mnt_sb->s_flags & fs_infop->flag)
250 seq_puts(m, fs_infop->str);
252 for (fs_infop = mnt_info; fs_infop->flag; fs_infop++) {
253 if (mnt->mnt_flags & fs_infop->flag)
254 seq_puts(m, fs_infop->str);
256 if (mnt->mnt_sb->s_op->show_options)
257 err = mnt->mnt_sb->s_op->show_options(m, mnt);
258 seq_puts(m, " 0 0\n");
262 struct seq_operations mounts_op = {
270 * may_umount_tree - check if a mount tree is busy
271 * @mnt: root of mount tree
273 * This is called to check if a tree of mounts has any
274 * open files, pwds, chroots or sub mounts that are
277 int may_umount_tree(struct vfsmount *mnt)
279 struct list_head *next;
280 struct vfsmount *this_parent = mnt;
284 spin_lock(&vfsmount_lock);
285 actual_refs = atomic_read(&mnt->mnt_count);
288 next = this_parent->mnt_mounts.next;
290 while (next != &this_parent->mnt_mounts) {
291 struct vfsmount *p = list_entry(next, struct vfsmount, mnt_child);
295 actual_refs += atomic_read(&p->mnt_count);
298 if (!list_empty(&p->mnt_mounts)) {
304 if (this_parent != mnt) {
305 next = this_parent->mnt_child.next;
306 this_parent = this_parent->mnt_parent;
309 spin_unlock(&vfsmount_lock);
311 if (actual_refs > minimum_refs)
317 EXPORT_SYMBOL(may_umount_tree);
320 * may_umount - check if a mount point is busy
321 * @mnt: root of mount
323 * This is called to check if a mount point has any
324 * open files, pwds, chroots or sub mounts. If the
325 * mount has sub mounts this will return busy
326 * regardless of whether the sub mounts are busy.
328 * Doesn't take quota and stuff into account. IOW, in some cases it will
329 * give false negatives. The main reason why it's here is that we need
330 * a non-destructive way to look for easily umountable filesystems.
332 int may_umount(struct vfsmount *mnt)
334 if (atomic_read(&mnt->mnt_count) > 2)
339 EXPORT_SYMBOL(may_umount);
341 static inline void __umount_tree(struct vfsmount *mnt, struct list_head *kill)
343 while (!list_empty(kill)) {
344 mnt = list_entry(kill->next, struct vfsmount, mnt_list);
345 list_del_init(&mnt->mnt_list);
346 if (mnt->mnt_parent == mnt) {
347 spin_unlock(&vfsmount_lock);
349 struct nameidata old_nd;
350 detach_mnt(mnt, &old_nd);
351 spin_unlock(&vfsmount_lock);
352 path_release(&old_nd);
355 spin_lock(&vfsmount_lock);
359 void umount_tree(struct vfsmount *mnt)
364 for (p = mnt; p; p = next_mnt(p, mnt)) {
365 list_del(&p->mnt_list);
366 list_add(&p->mnt_list, &kill);
368 __umount_tree(mnt, &kill);
371 void umount_unused(struct vfsmount *mnt, struct fs_struct *fs)
376 for (p = mnt; p; p = next_mnt(p, mnt)) {
377 if (p == fs->rootmnt || p == fs->pwdmnt)
379 list_del(&p->mnt_list);
380 list_add(&p->mnt_list, &kill);
382 __umount_tree(mnt, &kill);
385 static int do_umount(struct vfsmount *mnt, int flags)
387 struct super_block * sb = mnt->mnt_sb;
390 retval = security_sb_umount(mnt, flags);
395 * If we may have to abort operations to get out of this
396 * mount, and they will themselves hold resources we must
397 * allow the fs to do things. In the Unix tradition of
398 * 'Gee thats tricky lets do it in userspace' the umount_begin
399 * might fail to complete on the first run through as other tasks
400 * must return, and the like. Thats for the mount program to worry
401 * about for the moment.
405 if( (flags&MNT_FORCE) && sb->s_op->umount_begin)
406 sb->s_op->umount_begin(sb);
410 * No sense to grab the lock for this test, but test itself looks
411 * somewhat bogus. Suggestions for better replacement?
412 * Ho-hum... In principle, we might treat that as umount + switch
413 * to rootfs. GC would eventually take care of the old vfsmount.
414 * Actually it makes sense, especially if rootfs would contain a
415 * /reboot - static binary that would close all descriptors and
416 * call reboot(9). Then init(8) could umount root and exec /reboot.
418 if (mnt == current->fs->rootmnt && !(flags & MNT_DETACH)) {
420 * Special case for "unmounting" root ...
421 * we just try to remount it readonly.
423 down_write(&sb->s_umount);
424 if (!(sb->s_flags & MS_RDONLY)) {
426 retval = do_remount_sb(sb, MS_RDONLY, 0, 0);
429 up_write(&sb->s_umount);
433 down_write(¤t->namespace->sem);
434 spin_lock(&vfsmount_lock);
436 if (atomic_read(&sb->s_active) == 1) {
437 /* last instance - try to be smart */
438 spin_unlock(&vfsmount_lock);
443 security_sb_umount_close(mnt);
444 spin_lock(&vfsmount_lock);
447 if (atomic_read(&mnt->mnt_count) == 2 || flags & MNT_DETACH) {
448 if (!list_empty(&mnt->mnt_list))
452 spin_unlock(&vfsmount_lock);
454 security_sb_umount_busy(mnt);
455 up_write(¤t->namespace->sem);
460 * Now umount can handle mount points as well as block devices.
461 * This is important for filesystems which use unnamed block devices.
463 * We now support a flag for forced unmount like the other 'big iron'
464 * unixes. Our API is identical to OSF/1 to avoid making a mess of AMD
467 asmlinkage long sys_umount(char __user * name, int flags)
472 retval = __user_walk(name, LOOKUP_FOLLOW, &nd);
476 if (nd.dentry != nd.mnt->mnt_root)
478 if (!check_mnt(nd.mnt))
482 if (!capable(CAP_SYS_ADMIN) && !vx_ccaps(VXC_SECURE_MOUNT))
485 retval = do_umount(nd.mnt, flags);
492 #ifdef __ARCH_WANT_SYS_OLDUMOUNT
495 * The 2.0 compatible umount. No flags.
498 asmlinkage long sys_oldumount(char __user * name)
500 return sys_umount(name,0);
505 static int mount_is_safe(struct nameidata *nd)
507 if (capable(CAP_SYS_ADMIN))
509 if (vx_ccaps(VXC_SECURE_MOUNT))
513 if (S_ISLNK(nd->dentry->d_inode->i_mode))
515 if (nd->dentry->d_inode->i_mode & S_ISVTX) {
516 if (current->uid != nd->dentry->d_inode->i_uid)
519 if (permission(nd->dentry->d_inode, MAY_WRITE, nd))
526 lives_below_in_same_fs(struct dentry *d, struct dentry *dentry)
531 if (d == NULL || d == d->d_parent)
537 static struct vfsmount *copy_tree(struct vfsmount *mnt, struct dentry *dentry)
539 struct vfsmount *res, *p, *q, *r, *s;
543 res = q = clone_mnt(mnt, dentry);
546 q->mnt_mountpoint = mnt->mnt_mountpoint;
549 for (h = mnt->mnt_mounts.next; h != &mnt->mnt_mounts; h = h->next) {
550 r = list_entry(h, struct vfsmount, mnt_child);
551 if (!lives_below_in_same_fs(r->mnt_mountpoint, dentry))
554 for (s = r; s; s = next_mnt(s, r)) {
555 while (p != s->mnt_parent) {
561 nd.dentry = p->mnt_mountpoint;
562 q = clone_mnt(p, p->mnt_root);
565 spin_lock(&vfsmount_lock);
566 list_add_tail(&q->mnt_list, &res->mnt_list);
568 spin_unlock(&vfsmount_lock);
574 spin_lock(&vfsmount_lock);
576 spin_unlock(&vfsmount_lock);
581 static int graft_tree(struct vfsmount *mnt, struct nameidata *nd)
584 if (mnt->mnt_sb->s_flags & MS_NOUSER)
587 if (S_ISDIR(nd->dentry->d_inode->i_mode) !=
588 S_ISDIR(mnt->mnt_root->d_inode->i_mode))
592 down(&nd->dentry->d_inode->i_sem);
593 if (IS_DEADDIR(nd->dentry->d_inode))
596 err = security_sb_check_sb(mnt, nd);
601 spin_lock(&vfsmount_lock);
602 if (IS_ROOT(nd->dentry) || !d_unhashed(nd->dentry)) {
603 struct list_head head;
606 list_add_tail(&head, &mnt->mnt_list);
607 list_splice(&head, current->namespace->list.prev);
611 spin_unlock(&vfsmount_lock);
613 up(&nd->dentry->d_inode->i_sem);
615 security_sb_post_addmount(mnt, nd);
622 static int do_loopback(struct nameidata *nd, char *old_name, int recurse)
624 struct nameidata old_nd;
625 struct vfsmount *mnt = NULL;
626 int err = mount_is_safe(nd);
629 if (!old_name || !*old_name)
631 err = path_lookup(old_name, LOOKUP_FOLLOW, &old_nd);
635 down_write(¤t->namespace->sem);
637 if (check_mnt(nd->mnt) && (!recurse || check_mnt(old_nd.mnt))) {
640 mnt = copy_tree(old_nd.mnt, old_nd.dentry);
642 mnt = clone_mnt(old_nd.mnt, old_nd.dentry);
646 err = graft_tree(mnt, nd);
648 spin_lock(&vfsmount_lock);
650 spin_unlock(&vfsmount_lock);
655 up_write(¤t->namespace->sem);
656 path_release(&old_nd);
661 * change filesystem flags. dir should be a physical root of filesystem.
662 * If you've mounted a non-root directory somewhere and want to do remount
663 * on it - tough luck.
666 static int do_remount(struct nameidata *nd,int flags,int mnt_flags,void *data)
669 struct super_block * sb = nd->mnt->mnt_sb;
671 if (!capable(CAP_SYS_ADMIN))
674 if (!check_mnt(nd->mnt))
677 if (nd->dentry != nd->mnt->mnt_root)
680 down_write(&sb->s_umount);
681 err = do_remount_sb(sb, flags, data, 0);
683 nd->mnt->mnt_flags=mnt_flags;
684 up_write(&sb->s_umount);
686 security_sb_post_remount(nd->mnt, flags, data);
690 static int do_move_mount(struct nameidata *nd, char *old_name)
692 struct nameidata old_nd, parent_nd;
695 if (!capable(CAP_SYS_ADMIN))
697 if (!old_name || !*old_name)
699 err = path_lookup(old_name, LOOKUP_FOLLOW, &old_nd);
703 down_write(¤t->namespace->sem);
704 while(d_mountpoint(nd->dentry) && follow_down(&nd->mnt, &nd->dentry))
707 if (!check_mnt(nd->mnt) || !check_mnt(old_nd.mnt))
711 down(&nd->dentry->d_inode->i_sem);
712 if (IS_DEADDIR(nd->dentry->d_inode))
715 spin_lock(&vfsmount_lock);
716 if (!IS_ROOT(nd->dentry) && d_unhashed(nd->dentry))
720 if (old_nd.dentry != old_nd.mnt->mnt_root)
723 if (old_nd.mnt == old_nd.mnt->mnt_parent)
726 if (S_ISDIR(nd->dentry->d_inode->i_mode) !=
727 S_ISDIR(old_nd.dentry->d_inode->i_mode))
731 for (p = nd->mnt; p->mnt_parent!=p; p = p->mnt_parent)
736 detach_mnt(old_nd.mnt, &parent_nd);
737 attach_mnt(old_nd.mnt, nd);
739 spin_unlock(&vfsmount_lock);
741 up(&nd->dentry->d_inode->i_sem);
743 up_write(¤t->namespace->sem);
745 path_release(&parent_nd);
746 path_release(&old_nd);
750 static int do_add_mount(struct nameidata *nd, char *type, int flags,
751 int mnt_flags, char *name, void *data)
753 struct vfsmount *mnt;
756 if (!type || !memchr(type, 0, PAGE_SIZE))
759 /* we need capabilities... */
760 if (!capable(CAP_SYS_ADMIN) && !vx_ccaps(VXC_SECURE_MOUNT))
763 mnt = do_kern_mount(type, flags, name, data);
768 down_write(¤t->namespace->sem);
769 /* Something was mounted here while we slept */
770 while(d_mountpoint(nd->dentry) && follow_down(&nd->mnt, &nd->dentry))
773 if (!check_mnt(nd->mnt))
776 /* Refuse the same filesystem on the same mount point */
778 if (nd->mnt->mnt_sb == mnt->mnt_sb && nd->mnt->mnt_root == nd->dentry)
782 if (S_ISLNK(mnt->mnt_root->d_inode->i_mode))
785 mnt->mnt_flags = mnt_flags;
786 err = graft_tree(mnt, nd);
788 up_write(¤t->namespace->sem);
794 int copy_mount_options (const void __user *data, unsigned long *where)
804 if (!(page = __get_free_page(GFP_KERNEL)))
807 /* We only care that *some* data at the address the user
808 * gave us is valid. Just in case, we'll zero
809 * the remainder of the page.
811 /* copy_from_user cannot cross TASK_SIZE ! */
812 size = TASK_SIZE - (unsigned long)data;
813 if (size > PAGE_SIZE)
816 i = size - copy_from_user((void *)page, data, size);
822 memset((char *)page + i, 0, PAGE_SIZE - i);
828 * Flags is a 32-bit value that allows up to 31 non-fs dependent flags to
829 * be given to the mount() call (ie: read-only, no-dev, no-suid etc).
831 * data is a (void *) that can point to any structure up to
832 * PAGE_SIZE-1 bytes, which can contain arbitrary fs-dependent
833 * information (or be NULL).
835 * Pre-0.97 versions of mount() didn't have a flags word.
836 * When the flags word was introduced its top half was required
837 * to have the magic value 0xC0ED, and this remained so until 2.4.0-test9.
838 * Therefore, if this magic number is present, it carries no information
839 * and must be discarded.
841 long do_mount(char * dev_name, char * dir_name, char *type_page,
842 unsigned long flags, void *data_page)
849 if ((flags & MS_MGC_MSK) == MS_MGC_VAL)
850 flags &= ~MS_MGC_MSK;
852 /* Basic sanity checks */
854 if (!dir_name || !*dir_name || !memchr(dir_name, 0, PAGE_SIZE))
856 if (dev_name && !memchr(dev_name, 0, PAGE_SIZE))
860 ((char *)data_page)[PAGE_SIZE - 1] = 0;
862 /* Separate the per-mountpoint flags */
863 if (flags & MS_NOSUID)
864 mnt_flags |= MNT_NOSUID;
865 if (flags & MS_NODEV)
866 mnt_flags |= MNT_NODEV;
867 if (flags & MS_NOEXEC)
868 mnt_flags |= MNT_NOEXEC;
869 flags &= ~(MS_NOSUID|MS_NOEXEC|MS_NODEV|MS_ACTIVE);
871 if (vx_ccaps(VXC_SECURE_MOUNT))
872 mnt_flags |= MNT_NODEV;
874 /* ... and get the mountpoint */
875 retval = path_lookup(dir_name, LOOKUP_FOLLOW, &nd);
879 retval = security_sb_mount(dev_name, &nd, type_page, flags, data_page);
883 if (flags & MS_REMOUNT)
884 retval = do_remount(&nd, flags & ~MS_REMOUNT, mnt_flags,
886 else if (flags & MS_BIND)
887 retval = do_loopback(&nd, dev_name, flags & MS_REC);
888 else if (flags & MS_MOVE)
889 retval = do_move_mount(&nd, dev_name);
891 retval = do_add_mount(&nd, type_page, flags, mnt_flags,
892 dev_name, data_page);
898 int copy_namespace(int flags, struct task_struct *tsk)
900 struct namespace *namespace = tsk->namespace;
901 struct namespace *new_ns;
902 struct vfsmount *rootmnt = NULL, *pwdmnt = NULL, *altrootmnt = NULL;
903 struct fs_struct *fs = tsk->fs;
908 get_namespace(namespace);
910 if (!(flags & CLONE_NEWNS))
913 if (!capable(CAP_SYS_ADMIN)) {
914 put_namespace(namespace);
918 new_ns = kmalloc(sizeof(struct namespace), GFP_KERNEL);
922 atomic_set(&new_ns->count, 1);
923 init_rwsem(&new_ns->sem);
924 INIT_LIST_HEAD(&new_ns->list);
926 down_write(&tsk->namespace->sem);
927 /* First pass: copy the tree topology */
928 new_ns->root = copy_tree(namespace->root, namespace->root->mnt_root);
930 up_write(&tsk->namespace->sem);
934 spin_lock(&vfsmount_lock);
935 list_add_tail(&new_ns->list, &new_ns->root->mnt_list);
936 spin_unlock(&vfsmount_lock);
938 /* Second pass: switch the tsk->fs->* elements */
940 struct vfsmount *p, *q;
941 write_lock(&fs->lock);
946 if (p == fs->rootmnt) {
948 fs->rootmnt = mntget(q);
950 if (p == fs->pwdmnt) {
952 fs->pwdmnt = mntget(q);
954 if (p == fs->altrootmnt) {
956 fs->altrootmnt = mntget(q);
958 p = next_mnt(p, namespace->root);
959 q = next_mnt(q, new_ns->root);
961 write_unlock(&fs->lock);
963 up_write(&tsk->namespace->sem);
965 tsk->namespace = new_ns;
974 put_namespace(namespace);
978 put_namespace(namespace);
982 asmlinkage long sys_mount(char __user * dev_name, char __user * dir_name,
983 char __user * type, unsigned long flags,
987 unsigned long data_page;
988 unsigned long type_page;
989 unsigned long dev_page;
992 retval = copy_mount_options (type, &type_page);
996 dir_page = getname(dir_name);
997 retval = PTR_ERR(dir_page);
998 if (IS_ERR(dir_page))
1001 retval = copy_mount_options (dev_name, &dev_page);
1005 retval = copy_mount_options (data, &data_page);
1010 retval = do_mount((char*)dev_page, dir_page, (char*)type_page,
1011 flags, (void*)data_page);
1013 free_page(data_page);
1016 free_page(dev_page);
1020 free_page(type_page);
1025 * Replace the fs->{rootmnt,root} with {mnt,dentry}. Put the old values.
1026 * It can block. Requires the big lock held.
1028 void set_fs_root(struct fs_struct *fs, struct vfsmount *mnt,
1029 struct dentry *dentry)
1031 struct dentry *old_root;
1032 struct vfsmount *old_rootmnt;
1033 write_lock(&fs->lock);
1034 old_root = fs->root;
1035 old_rootmnt = fs->rootmnt;
1036 fs->rootmnt = mntget(mnt);
1037 fs->root = dget(dentry);
1038 write_unlock(&fs->lock);
1041 mntput(old_rootmnt);
1045 EXPORT_SYMBOL(set_fs_root);
1048 * Replace the fs->{pwdmnt,pwd} with {mnt,dentry}. Put the old values.
1049 * It can block. Requires the big lock held.
1051 void set_fs_pwd(struct fs_struct *fs, struct vfsmount *mnt,
1052 struct dentry *dentry)
1054 struct dentry *old_pwd;
1055 struct vfsmount *old_pwdmnt;
1057 write_lock(&fs->lock);
1059 old_pwdmnt = fs->pwdmnt;
1060 fs->pwdmnt = mntget(mnt);
1061 fs->pwd = dget(dentry);
1062 write_unlock(&fs->lock);
1070 EXPORT_SYMBOL(set_fs_pwd);
1072 static void chroot_fs_refs(struct nameidata *old_nd, struct nameidata *new_nd)
1074 struct task_struct *g, *p;
1075 struct fs_struct *fs;
1077 read_lock(&tasklist_lock);
1078 do_each_thread(g, p) {
1082 atomic_inc(&fs->count);
1084 if (fs->root==old_nd->dentry&&fs->rootmnt==old_nd->mnt)
1085 set_fs_root(fs, new_nd->mnt, new_nd->dentry);
1086 if (fs->pwd==old_nd->dentry&&fs->pwdmnt==old_nd->mnt)
1087 set_fs_pwd(fs, new_nd->mnt, new_nd->dentry);
1091 } while_each_thread(g, p);
1092 read_unlock(&tasklist_lock);
1096 * Moves the current root to put_root, and sets root/cwd of all processes
1097 * which had them on the old root to new_root.
1100 * - we don't move root/cwd if they are not at the root (reason: if something
1101 * cared enough to change them, it's probably wrong to force them elsewhere)
1102 * - it's okay to pick a root that isn't the root of a file system, e.g.
1103 * /nfs/my_root where /nfs is the mount point. It must be a mountpoint,
1104 * though, so you may need to say mount --bind /nfs/my_root /nfs/my_root
1108 asmlinkage long sys_pivot_root(const char __user *new_root, const char __user *put_old)
1110 struct vfsmount *tmp;
1111 struct nameidata new_nd, old_nd, parent_nd, root_parent, user_nd;
1114 if (!capable(CAP_SYS_ADMIN))
1119 error = __user_walk(new_root, LOOKUP_FOLLOW|LOOKUP_DIRECTORY, &new_nd);
1123 if (!check_mnt(new_nd.mnt))
1126 error = __user_walk(put_old, LOOKUP_FOLLOW|LOOKUP_DIRECTORY, &old_nd);
1130 error = security_sb_pivotroot(&old_nd, &new_nd);
1132 path_release(&old_nd);
1136 read_lock(¤t->fs->lock);
1137 user_nd.mnt = mntget(current->fs->rootmnt);
1138 user_nd.dentry = dget(current->fs->root);
1139 read_unlock(¤t->fs->lock);
1140 down_write(¤t->namespace->sem);
1141 down(&old_nd.dentry->d_inode->i_sem);
1143 if (!check_mnt(user_nd.mnt))
1146 if (IS_DEADDIR(new_nd.dentry->d_inode))
1148 if (d_unhashed(new_nd.dentry) && !IS_ROOT(new_nd.dentry))
1150 if (d_unhashed(old_nd.dentry) && !IS_ROOT(old_nd.dentry))
1153 if (new_nd.mnt == user_nd.mnt || old_nd.mnt == user_nd.mnt)
1154 goto out2; /* loop */
1156 if (user_nd.mnt->mnt_root != user_nd.dentry)
1158 if (new_nd.mnt->mnt_root != new_nd.dentry)
1159 goto out2; /* not a mountpoint */
1160 tmp = old_nd.mnt; /* make sure we can reach put_old from new_root */
1161 spin_lock(&vfsmount_lock);
1162 if (tmp != new_nd.mnt) {
1164 if (tmp->mnt_parent == tmp)
1166 if (tmp->mnt_parent == new_nd.mnt)
1168 tmp = tmp->mnt_parent;
1170 if (!is_subdir(tmp->mnt_mountpoint, new_nd.dentry))
1172 } else if (!is_subdir(old_nd.dentry, new_nd.dentry))
1174 detach_mnt(new_nd.mnt, &parent_nd);
1175 detach_mnt(user_nd.mnt, &root_parent);
1176 attach_mnt(user_nd.mnt, &old_nd);
1177 attach_mnt(new_nd.mnt, &root_parent);
1178 spin_unlock(&vfsmount_lock);
1179 chroot_fs_refs(&user_nd, &new_nd);
1180 security_sb_post_pivotroot(&user_nd, &new_nd);
1182 path_release(&root_parent);
1183 path_release(&parent_nd);
1185 up(&old_nd.dentry->d_inode->i_sem);
1186 up_write(¤t->namespace->sem);
1187 path_release(&user_nd);
1188 path_release(&old_nd);
1190 path_release(&new_nd);
1195 spin_unlock(&vfsmount_lock);
1199 static void __init init_mount_tree(void)
1201 struct vfsmount *mnt;
1202 struct namespace *namespace;
1203 struct task_struct *g, *p;
1205 mnt = do_kern_mount("rootfs", 0, "rootfs", NULL);
1207 panic("Can't create rootfs");
1208 namespace = kmalloc(sizeof(*namespace), GFP_KERNEL);
1210 panic("Can't allocate initial namespace");
1211 atomic_set(&namespace->count, 1);
1212 INIT_LIST_HEAD(&namespace->list);
1213 init_rwsem(&namespace->sem);
1214 list_add(&mnt->mnt_list, &namespace->list);
1215 namespace->root = mnt;
1217 init_task.namespace = namespace;
1218 read_lock(&tasklist_lock);
1219 do_each_thread(g, p) {
1220 get_namespace(namespace);
1221 p->namespace = namespace;
1222 } while_each_thread(g, p);
1223 read_unlock(&tasklist_lock);
1225 set_fs_pwd(current->fs, namespace->root, namespace->root->mnt_root);
1226 set_fs_root(current->fs, namespace->root, namespace->root->mnt_root);
1229 void __init mnt_init(unsigned long mempages)
1231 struct list_head *d;
1232 unsigned long order;
1233 unsigned int nr_hash;
1236 mnt_cache = kmem_cache_create("mnt_cache", sizeof(struct vfsmount),
1237 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL, NULL);
1240 mount_hashtable = (struct list_head *)
1241 __get_free_pages(GFP_ATOMIC, order);
1243 if (!mount_hashtable)
1244 panic("Failed to allocate mount hash table\n");
1247 * Find the power-of-two list-heads that can fit into the allocation..
1248 * We don't guarantee that "sizeof(struct list_head)" is necessarily
1251 nr_hash = (1UL << order) * PAGE_SIZE / sizeof(struct list_head);
1255 } while ((nr_hash >> hash_bits) != 0);
1259 * Re-calculate the actual number of entries and the mask
1260 * from the number of bits we can fit.
1262 nr_hash = 1UL << hash_bits;
1263 hash_mask = nr_hash-1;
1265 printk("Mount-cache hash table entries: %d (order: %ld, %ld bytes)\n",
1266 nr_hash, order, (PAGE_SIZE << order));
1268 /* And initialize the newly allocated array */
1269 d = mount_hashtable;
1281 void __put_namespace(struct namespace *namespace)
1283 down_write(&namespace->sem);
1284 spin_lock(&vfsmount_lock);
1285 umount_tree(namespace->root);
1286 spin_unlock(&vfsmount_lock);
1287 up_write(&namespace->sem);