4 * (C) Copyright Al Viro 2000, 2001
5 * Released under GPL v2.
7 * Based on code from fs/super.c, copyright Linus Torvalds and others.
11 #include <linux/config.h>
12 #include <linux/slab.h>
13 #include <linux/sched.h>
14 #include <linux/smp_lock.h>
15 #include <linux/init.h>
16 #include <linux/quotaops.h>
17 #include <linux/acct.h>
18 #include <linux/module.h>
19 #include <linux/seq_file.h>
20 #include <linux/namespace.h>
21 #include <linux/namei.h>
22 #include <linux/security.h>
23 #include <linux/mount.h>
24 #include <asm/uaccess.h>
25 #include <asm/unistd.h>
27 extern int __init init_rootfs(void);
30 extern int __init sysfs_init(void);
32 static inline int sysfs_init(void)
38 /* spinlock for vfsmount related operations, inplace of dcache_lock */
39 spinlock_t vfsmount_lock __cacheline_aligned_in_smp = SPIN_LOCK_UNLOCKED;
41 static struct list_head *mount_hashtable;
42 static int hash_mask, hash_bits;
43 static kmem_cache_t *mnt_cache;
45 static inline unsigned long hash(struct vfsmount *mnt, struct dentry *dentry)
47 unsigned long tmp = ((unsigned long) mnt / L1_CACHE_BYTES);
48 tmp += ((unsigned long) dentry / L1_CACHE_BYTES);
49 tmp = tmp + (tmp >> hash_bits);
50 return tmp & hash_mask;
53 struct vfsmount *alloc_vfsmnt(const char *name)
55 struct vfsmount *mnt = kmem_cache_alloc(mnt_cache, GFP_KERNEL);
57 memset(mnt, 0, sizeof(struct vfsmount));
58 atomic_set(&mnt->mnt_count,1);
59 INIT_LIST_HEAD(&mnt->mnt_hash);
60 INIT_LIST_HEAD(&mnt->mnt_child);
61 INIT_LIST_HEAD(&mnt->mnt_mounts);
62 INIT_LIST_HEAD(&mnt->mnt_list);
63 INIT_LIST_HEAD(&mnt->mnt_fslink);
65 int size = strlen(name)+1;
66 char *newname = kmalloc(size, GFP_KERNEL);
68 memcpy(newname, name, size);
69 mnt->mnt_devname = newname;
76 void free_vfsmnt(struct vfsmount *mnt)
78 kfree(mnt->mnt_devname);
79 kmem_cache_free(mnt_cache, mnt);
83 * Now, lookup_mnt increments the ref count before returning
84 * the vfsmount struct.
86 struct vfsmount *lookup_mnt(struct vfsmount *mnt, struct dentry *dentry)
88 struct list_head * head = mount_hashtable + hash(mnt, dentry);
89 struct list_head * tmp = head;
90 struct vfsmount *p, *found = NULL;
92 spin_lock(&vfsmount_lock);
98 p = list_entry(tmp, struct vfsmount, mnt_hash);
99 if (p->mnt_parent == mnt && p->mnt_mountpoint == dentry) {
104 spin_unlock(&vfsmount_lock);
108 EXPORT_SYMBOL(lookup_mnt);
110 static inline int check_mnt(struct vfsmount *mnt)
112 return mnt->mnt_namespace == current->namespace;
115 static void detach_mnt(struct vfsmount *mnt, struct nameidata *old_nd)
117 old_nd->dentry = mnt->mnt_mountpoint;
118 old_nd->mnt = mnt->mnt_parent;
119 mnt->mnt_parent = mnt;
120 mnt->mnt_mountpoint = mnt->mnt_root;
121 list_del_init(&mnt->mnt_child);
122 list_del_init(&mnt->mnt_hash);
123 old_nd->dentry->d_mounted--;
126 static void attach_mnt(struct vfsmount *mnt, struct nameidata *nd)
128 mnt->mnt_parent = mntget(nd->mnt);
129 mnt->mnt_mountpoint = dget(nd->dentry);
130 list_add(&mnt->mnt_hash, mount_hashtable+hash(nd->mnt, nd->dentry));
131 list_add_tail(&mnt->mnt_child, &nd->mnt->mnt_mounts);
132 nd->dentry->d_mounted++;
135 static struct vfsmount *next_mnt(struct vfsmount *p, struct vfsmount *root)
137 struct list_head *next = p->mnt_mounts.next;
138 if (next == &p->mnt_mounts) {
142 next = p->mnt_child.next;
143 if (next != &p->mnt_parent->mnt_mounts)
148 return list_entry(next, struct vfsmount, mnt_child);
151 static struct vfsmount *
152 clone_mnt(struct vfsmount *old, struct dentry *root)
154 struct super_block *sb = old->mnt_sb;
155 struct vfsmount *mnt = alloc_vfsmnt(old->mnt_devname);
158 mnt->mnt_flags = old->mnt_flags;
159 atomic_inc(&sb->s_active);
161 mnt->mnt_root = dget(root);
162 mnt->mnt_mountpoint = mnt->mnt_root;
163 mnt->mnt_parent = mnt;
164 mnt->mnt_namespace = old->mnt_namespace;
166 /* stick the duplicate mount on the same expiry list
167 * as the original if that was on one */
168 spin_lock(&vfsmount_lock);
169 if (!list_empty(&old->mnt_fslink))
170 list_add(&mnt->mnt_fslink, &old->mnt_fslink);
171 spin_unlock(&vfsmount_lock);
176 void __mntput(struct vfsmount *mnt)
178 struct super_block *sb = mnt->mnt_sb;
181 deactivate_super(sb);
184 EXPORT_SYMBOL(__mntput);
187 static void *m_start(struct seq_file *m, loff_t *pos)
189 struct namespace *n = m->private;
194 list_for_each(p, &n->list)
196 return list_entry(p, struct vfsmount, mnt_list);
200 static void *m_next(struct seq_file *m, void *v, loff_t *pos)
202 struct namespace *n = m->private;
203 struct list_head *p = ((struct vfsmount *)v)->mnt_list.next;
205 return p==&n->list ? NULL : list_entry(p, struct vfsmount, mnt_list);
208 static void m_stop(struct seq_file *m, void *v)
210 struct namespace *n = m->private;
214 static inline void mangle(struct seq_file *m, const char *s)
216 seq_escape(m, s, " \t\n\\");
219 static int show_vfsmnt(struct seq_file *m, void *v)
221 struct vfsmount *mnt = v;
223 static struct proc_fs_info {
227 { MS_SYNCHRONOUS, ",sync" },
228 { MS_DIRSYNC, ",dirsync" },
229 { MS_MANDLOCK, ",mand" },
230 { MS_NOATIME, ",noatime" },
231 { MS_NODIRATIME, ",nodiratime" },
234 static struct proc_fs_info mnt_info[] = {
235 { MNT_NOSUID, ",nosuid" },
236 { MNT_NODEV, ",nodev" },
237 { MNT_NOEXEC, ",noexec" },
240 struct proc_fs_info *fs_infop;
242 mangle(m, mnt->mnt_devname ? mnt->mnt_devname : "none");
244 seq_path(m, mnt, mnt->mnt_root, " \t\n\\");
246 mangle(m, mnt->mnt_sb->s_type->name);
247 seq_puts(m, mnt->mnt_sb->s_flags & MS_RDONLY ? " ro" : " rw");
248 for (fs_infop = fs_info; fs_infop->flag; fs_infop++) {
249 if (mnt->mnt_sb->s_flags & fs_infop->flag)
250 seq_puts(m, fs_infop->str);
252 for (fs_infop = mnt_info; fs_infop->flag; fs_infop++) {
253 if (mnt->mnt_flags & fs_infop->flag)
254 seq_puts(m, fs_infop->str);
256 if (mnt->mnt_sb->s_op->show_options)
257 err = mnt->mnt_sb->s_op->show_options(m, mnt);
258 seq_puts(m, " 0 0\n");
262 struct seq_operations mounts_op = {
270 * may_umount_tree - check if a mount tree is busy
271 * @mnt: root of mount tree
273 * This is called to check if a tree of mounts has any
274 * open files, pwds, chroots or sub mounts that are
277 int may_umount_tree(struct vfsmount *mnt)
279 struct list_head *next;
280 struct vfsmount *this_parent = mnt;
284 spin_lock(&vfsmount_lock);
285 actual_refs = atomic_read(&mnt->mnt_count);
288 next = this_parent->mnt_mounts.next;
290 while (next != &this_parent->mnt_mounts) {
291 struct vfsmount *p = list_entry(next, struct vfsmount, mnt_child);
295 actual_refs += atomic_read(&p->mnt_count);
298 if (!list_empty(&p->mnt_mounts)) {
304 if (this_parent != mnt) {
305 next = this_parent->mnt_child.next;
306 this_parent = this_parent->mnt_parent;
309 spin_unlock(&vfsmount_lock);
311 if (actual_refs > minimum_refs)
317 EXPORT_SYMBOL(may_umount_tree);
320 * may_umount - check if a mount point is busy
321 * @mnt: root of mount
323 * This is called to check if a mount point has any
324 * open files, pwds, chroots or sub mounts. If the
325 * mount has sub mounts this will return busy
326 * regardless of whether the sub mounts are busy.
328 * Doesn't take quota and stuff into account. IOW, in some cases it will
329 * give false negatives. The main reason why it's here is that we need
330 * a non-destructive way to look for easily umountable filesystems.
332 int may_umount(struct vfsmount *mnt)
334 if (atomic_read(&mnt->mnt_count) > 2)
339 EXPORT_SYMBOL(may_umount);
341 void umount_tree(struct vfsmount *mnt)
346 for (p = mnt; p; p = next_mnt(p, mnt)) {
347 list_del(&p->mnt_list);
348 list_add(&p->mnt_list, &kill);
351 while (!list_empty(&kill)) {
352 mnt = list_entry(kill.next, struct vfsmount, mnt_list);
353 list_del_init(&mnt->mnt_list);
354 list_del_init(&mnt->mnt_fslink);
355 if (mnt->mnt_parent == mnt) {
356 spin_unlock(&vfsmount_lock);
358 struct nameidata old_nd;
359 detach_mnt(mnt, &old_nd);
360 spin_unlock(&vfsmount_lock);
361 path_release(&old_nd);
364 spin_lock(&vfsmount_lock);
368 static int do_umount(struct vfsmount *mnt, int flags)
370 struct super_block * sb = mnt->mnt_sb;
373 retval = security_sb_umount(mnt, flags);
378 * Allow userspace to request a mountpoint be expired rather than
379 * unmounting unconditionally. Unmount only happens if:
380 * (1) the mark is already set (the mark is cleared by mntput())
381 * (2) the usage count == 1 [parent vfsmount] + 1 [sys_umount]
383 if (flags & MNT_EXPIRE) {
384 if (mnt == current->fs->rootmnt ||
385 flags & (MNT_FORCE | MNT_DETACH))
388 if (atomic_read(&mnt->mnt_count) != 2)
391 if (!xchg(&mnt->mnt_expiry_mark, 1))
396 * If we may have to abort operations to get out of this
397 * mount, and they will themselves hold resources we must
398 * allow the fs to do things. In the Unix tradition of
399 * 'Gee thats tricky lets do it in userspace' the umount_begin
400 * might fail to complete on the first run through as other tasks
401 * must return, and the like. Thats for the mount program to worry
402 * about for the moment.
406 if( (flags&MNT_FORCE) && sb->s_op->umount_begin)
407 sb->s_op->umount_begin(sb);
411 * No sense to grab the lock for this test, but test itself looks
412 * somewhat bogus. Suggestions for better replacement?
413 * Ho-hum... In principle, we might treat that as umount + switch
414 * to rootfs. GC would eventually take care of the old vfsmount.
415 * Actually it makes sense, especially if rootfs would contain a
416 * /reboot - static binary that would close all descriptors and
417 * call reboot(9). Then init(8) could umount root and exec /reboot.
419 if (mnt == current->fs->rootmnt && !(flags & MNT_DETACH)) {
421 * Special case for "unmounting" root ...
422 * we just try to remount it readonly.
424 down_write(&sb->s_umount);
425 if (!(sb->s_flags & MS_RDONLY)) {
427 retval = do_remount_sb(sb, MS_RDONLY, NULL, 0);
430 up_write(&sb->s_umount);
434 down_write(¤t->namespace->sem);
435 spin_lock(&vfsmount_lock);
437 if (atomic_read(&sb->s_active) == 1) {
438 /* last instance - try to be smart */
439 spin_unlock(&vfsmount_lock);
444 security_sb_umount_close(mnt);
445 spin_lock(&vfsmount_lock);
448 if (atomic_read(&mnt->mnt_count) == 2 || flags & MNT_DETACH) {
449 if (!list_empty(&mnt->mnt_list))
453 spin_unlock(&vfsmount_lock);
455 security_sb_umount_busy(mnt);
456 up_write(¤t->namespace->sem);
461 * Now umount can handle mount points as well as block devices.
462 * This is important for filesystems which use unnamed block devices.
464 * We now support a flag for forced unmount like the other 'big iron'
465 * unixes. Our API is identical to OSF/1 to avoid making a mess of AMD
468 asmlinkage long sys_umount(char __user * name, int flags)
473 retval = __user_walk(name, LOOKUP_FOLLOW, &nd);
477 if (nd.dentry != nd.mnt->mnt_root)
479 if (!check_mnt(nd.mnt))
483 if (!capable(CAP_SYS_ADMIN))
486 retval = do_umount(nd.mnt, flags);
488 path_release_on_umount(&nd);
493 #ifdef __ARCH_WANT_SYS_OLDUMOUNT
496 * The 2.0 compatible umount. No flags.
499 asmlinkage long sys_oldumount(char __user * name)
501 return sys_umount(name,0);
506 static int mount_is_safe(struct nameidata *nd)
508 if (capable(CAP_SYS_ADMIN))
512 if (S_ISLNK(nd->dentry->d_inode->i_mode))
514 if (nd->dentry->d_inode->i_mode & S_ISVTX) {
515 if (current->uid != nd->dentry->d_inode->i_uid)
518 if (permission(nd->dentry->d_inode, MAY_WRITE, nd))
525 lives_below_in_same_fs(struct dentry *d, struct dentry *dentry)
530 if (d == NULL || d == d->d_parent)
536 static struct vfsmount *copy_tree(struct vfsmount *mnt, struct dentry *dentry)
538 struct vfsmount *res, *p, *q, *r, *s;
542 res = q = clone_mnt(mnt, dentry);
545 q->mnt_mountpoint = mnt->mnt_mountpoint;
548 for (h = mnt->mnt_mounts.next; h != &mnt->mnt_mounts; h = h->next) {
549 r = list_entry(h, struct vfsmount, mnt_child);
550 if (!lives_below_in_same_fs(r->mnt_mountpoint, dentry))
553 for (s = r; s; s = next_mnt(s, r)) {
554 while (p != s->mnt_parent) {
560 nd.dentry = p->mnt_mountpoint;
561 q = clone_mnt(p, p->mnt_root);
564 spin_lock(&vfsmount_lock);
565 list_add_tail(&q->mnt_list, &res->mnt_list);
567 spin_unlock(&vfsmount_lock);
573 spin_lock(&vfsmount_lock);
575 spin_unlock(&vfsmount_lock);
580 static int graft_tree(struct vfsmount *mnt, struct nameidata *nd)
583 if (mnt->mnt_sb->s_flags & MS_NOUSER)
586 if (S_ISDIR(nd->dentry->d_inode->i_mode) !=
587 S_ISDIR(mnt->mnt_root->d_inode->i_mode))
591 down(&nd->dentry->d_inode->i_sem);
592 if (IS_DEADDIR(nd->dentry->d_inode))
595 err = security_sb_check_sb(mnt, nd);
600 spin_lock(&vfsmount_lock);
601 if (IS_ROOT(nd->dentry) || !d_unhashed(nd->dentry)) {
602 struct list_head head;
605 list_add_tail(&head, &mnt->mnt_list);
606 list_splice(&head, current->namespace->list.prev);
610 spin_unlock(&vfsmount_lock);
612 up(&nd->dentry->d_inode->i_sem);
614 security_sb_post_addmount(mnt, nd);
621 static int do_loopback(struct nameidata *nd, char *old_name, int recurse)
623 struct nameidata old_nd;
624 struct vfsmount *mnt = NULL;
625 int err = mount_is_safe(nd);
628 if (!old_name || !*old_name)
630 err = path_lookup(old_name, LOOKUP_FOLLOW, &old_nd);
634 down_write(¤t->namespace->sem);
636 if (check_mnt(nd->mnt) && (!recurse || check_mnt(old_nd.mnt))) {
639 mnt = copy_tree(old_nd.mnt, old_nd.dentry);
641 mnt = clone_mnt(old_nd.mnt, old_nd.dentry);
645 /* stop bind mounts from expiring */
646 spin_lock(&vfsmount_lock);
647 list_del_init(&mnt->mnt_fslink);
648 spin_unlock(&vfsmount_lock);
650 err = graft_tree(mnt, nd);
652 spin_lock(&vfsmount_lock);
654 spin_unlock(&vfsmount_lock);
659 up_write(¤t->namespace->sem);
660 path_release(&old_nd);
665 * change filesystem flags. dir should be a physical root of filesystem.
666 * If you've mounted a non-root directory somewhere and want to do remount
667 * on it - tough luck.
670 static int do_remount(struct nameidata *nd, int flags, int mnt_flags,
674 struct super_block * sb = nd->mnt->mnt_sb;
676 if (!capable(CAP_SYS_ADMIN))
679 if (!check_mnt(nd->mnt))
682 if (nd->dentry != nd->mnt->mnt_root)
685 down_write(&sb->s_umount);
686 err = do_remount_sb(sb, flags, data, 0);
688 nd->mnt->mnt_flags=mnt_flags;
689 up_write(&sb->s_umount);
691 security_sb_post_remount(nd->mnt, flags, data);
695 static int do_move_mount(struct nameidata *nd, char *old_name)
697 struct nameidata old_nd, parent_nd;
700 if (!capable(CAP_SYS_ADMIN))
702 if (!old_name || !*old_name)
704 err = path_lookup(old_name, LOOKUP_FOLLOW, &old_nd);
708 down_write(¤t->namespace->sem);
709 while(d_mountpoint(nd->dentry) && follow_down(&nd->mnt, &nd->dentry))
712 if (!check_mnt(nd->mnt) || !check_mnt(old_nd.mnt))
716 down(&nd->dentry->d_inode->i_sem);
717 if (IS_DEADDIR(nd->dentry->d_inode))
720 spin_lock(&vfsmount_lock);
721 if (!IS_ROOT(nd->dentry) && d_unhashed(nd->dentry))
725 if (old_nd.dentry != old_nd.mnt->mnt_root)
728 if (old_nd.mnt == old_nd.mnt->mnt_parent)
731 if (S_ISDIR(nd->dentry->d_inode->i_mode) !=
732 S_ISDIR(old_nd.dentry->d_inode->i_mode))
736 for (p = nd->mnt; p->mnt_parent!=p; p = p->mnt_parent)
741 detach_mnt(old_nd.mnt, &parent_nd);
742 attach_mnt(old_nd.mnt, nd);
744 /* if the mount is moved, it should no longer be expire
746 list_del_init(&old_nd.mnt->mnt_fslink);
748 spin_unlock(&vfsmount_lock);
750 up(&nd->dentry->d_inode->i_sem);
752 up_write(¤t->namespace->sem);
754 path_release(&parent_nd);
755 path_release(&old_nd);
760 * create a new mount for userspace and request it to be added into the
763 static int do_new_mount(struct nameidata *nd, char *type, int flags,
764 int mnt_flags, char *name, void *data)
766 struct vfsmount *mnt;
768 if (!type || !memchr(type, 0, PAGE_SIZE))
771 /* we need capabilities... */
772 if (!capable(CAP_SYS_ADMIN))
775 mnt = do_kern_mount(type, flags, name, data);
779 return do_add_mount(mnt, nd, mnt_flags, NULL);
783 * add a mount into a namespace's mount tree
784 * - provide the option of adding the new mount to an expiration list
786 int do_add_mount(struct vfsmount *newmnt, struct nameidata *nd,
787 int mnt_flags, struct list_head *fslist)
791 down_write(¤t->namespace->sem);
792 /* Something was mounted here while we slept */
793 while(d_mountpoint(nd->dentry) && follow_down(&nd->mnt, &nd->dentry))
796 if (!check_mnt(nd->mnt))
799 /* Refuse the same filesystem on the same mount point */
801 if (nd->mnt->mnt_sb == newmnt->mnt_sb &&
802 nd->mnt->mnt_root == nd->dentry)
806 if (S_ISLNK(newmnt->mnt_root->d_inode->i_mode))
809 newmnt->mnt_flags = mnt_flags;
810 err = graft_tree(newmnt, nd);
812 if (err == 0 && fslist) {
813 /* add to the specified expiration list */
814 spin_lock(&vfsmount_lock);
815 list_add_tail(&newmnt->mnt_fslink, fslist);
816 spin_unlock(&vfsmount_lock);
820 up_write(¤t->namespace->sem);
825 EXPORT_SYMBOL_GPL(do_add_mount);
828 * process a list of expirable mountpoints with the intent of discarding any
829 * mountpoints that aren't in use and haven't been touched since last we came
832 void mark_mounts_for_expiry(struct list_head *mounts)
834 struct namespace *namespace;
835 struct vfsmount *mnt, *next;
836 LIST_HEAD(graveyard);
838 if (list_empty(mounts))
841 spin_lock(&vfsmount_lock);
843 /* extract from the expiration list every vfsmount that matches the
844 * following criteria:
845 * - only referenced by its parent vfsmount
846 * - still marked for expiry (marked on the last call here; marks are
847 * cleared by mntput())
849 list_for_each_entry_safe(mnt, next, mounts, mnt_fslink) {
850 if (!xchg(&mnt->mnt_expiry_mark, 1) ||
851 atomic_read(&mnt->mnt_count) != 1)
855 list_move(&mnt->mnt_fslink, &graveyard);
859 * go through the vfsmounts we've just consigned to the graveyard to
860 * - check that they're still dead
861 * - delete the vfsmount from the appropriate namespace under lock
862 * - dispose of the corpse
864 while (!list_empty(&graveyard)) {
865 mnt = list_entry(graveyard.next, struct vfsmount, mnt_fslink);
866 list_del_init(&mnt->mnt_fslink);
868 /* don't do anything if the namespace is dead - all the
869 * vfsmounts from it are going away anyway */
870 namespace = mnt->mnt_namespace;
871 if (!namespace || atomic_read(&namespace->count) <= 0)
873 get_namespace(namespace);
875 spin_unlock(&vfsmount_lock);
876 down_write(&namespace->sem);
877 spin_lock(&vfsmount_lock);
879 /* check that it is still dead: the count should now be 2 - as
880 * contributed by the vfsmount parent and the mntget above */
881 if (atomic_read(&mnt->mnt_count) == 2) {
882 struct vfsmount *xdmnt;
883 struct dentry *xdentry;
885 /* delete from the namespace */
886 list_del_init(&mnt->mnt_list);
887 list_del_init(&mnt->mnt_child);
888 list_del_init(&mnt->mnt_hash);
889 mnt->mnt_mountpoint->d_mounted--;
891 xdentry = mnt->mnt_mountpoint;
892 mnt->mnt_mountpoint = mnt->mnt_root;
893 xdmnt = mnt->mnt_parent;
894 mnt->mnt_parent = mnt;
896 spin_unlock(&vfsmount_lock);
901 /* now lay it to rest if this was the last ref on the
903 if (atomic_read(&mnt->mnt_sb->s_active) == 1) {
904 /* last instance - try to be smart */
906 DQUOT_OFF(mnt->mnt_sb);
907 acct_auto_close(mnt->mnt_sb);
913 /* someone brought it back to life whilst we didn't
914 * have any locks held so return it to the expiration
916 list_add_tail(&mnt->mnt_fslink, mounts);
917 spin_unlock(&vfsmount_lock);
920 up_write(&namespace->sem);
923 put_namespace(namespace);
925 spin_lock(&vfsmount_lock);
928 spin_unlock(&vfsmount_lock);
931 EXPORT_SYMBOL_GPL(mark_mounts_for_expiry);
933 int copy_mount_options (const void __user *data, unsigned long *where)
943 if (!(page = __get_free_page(GFP_KERNEL)))
946 /* We only care that *some* data at the address the user
947 * gave us is valid. Just in case, we'll zero
948 * the remainder of the page.
950 /* copy_from_user cannot cross TASK_SIZE ! */
951 size = TASK_SIZE - (unsigned long)data;
952 if (size > PAGE_SIZE)
955 i = size - copy_from_user((void *)page, data, size);
961 memset((char *)page + i, 0, PAGE_SIZE - i);
967 * Flags is a 32-bit value that allows up to 31 non-fs dependent flags to
968 * be given to the mount() call (ie: read-only, no-dev, no-suid etc).
970 * data is a (void *) that can point to any structure up to
971 * PAGE_SIZE-1 bytes, which can contain arbitrary fs-dependent
972 * information (or be NULL).
974 * Pre-0.97 versions of mount() didn't have a flags word.
975 * When the flags word was introduced its top half was required
976 * to have the magic value 0xC0ED, and this remained so until 2.4.0-test9.
977 * Therefore, if this magic number is present, it carries no information
978 * and must be discarded.
980 long do_mount(char * dev_name, char * dir_name, char *type_page,
981 unsigned long flags, void *data_page)
988 if ((flags & MS_MGC_MSK) == MS_MGC_VAL)
989 flags &= ~MS_MGC_MSK;
991 /* Basic sanity checks */
993 if (!dir_name || !*dir_name || !memchr(dir_name, 0, PAGE_SIZE))
995 if (dev_name && !memchr(dev_name, 0, PAGE_SIZE))
999 ((char *)data_page)[PAGE_SIZE - 1] = 0;
1001 /* Separate the per-mountpoint flags */
1002 if (flags & MS_NOSUID)
1003 mnt_flags |= MNT_NOSUID;
1004 if (flags & MS_NODEV)
1005 mnt_flags |= MNT_NODEV;
1006 if (flags & MS_NOEXEC)
1007 mnt_flags |= MNT_NOEXEC;
1008 flags &= ~(MS_NOSUID|MS_NOEXEC|MS_NODEV|MS_ACTIVE);
1010 /* ... and get the mountpoint */
1011 retval = path_lookup(dir_name, LOOKUP_FOLLOW, &nd);
1015 retval = security_sb_mount(dev_name, &nd, type_page, flags, data_page);
1019 if (flags & MS_REMOUNT)
1020 retval = do_remount(&nd, flags & ~MS_REMOUNT, mnt_flags,
1022 else if (flags & MS_BIND)
1023 retval = do_loopback(&nd, dev_name, flags & MS_REC);
1024 else if (flags & MS_MOVE)
1025 retval = do_move_mount(&nd, dev_name);
1027 retval = do_new_mount(&nd, type_page, flags, mnt_flags,
1028 dev_name, data_page);
1034 int copy_namespace(int flags, struct task_struct *tsk)
1036 struct namespace *namespace = tsk->namespace;
1037 struct namespace *new_ns;
1038 struct vfsmount *rootmnt = NULL, *pwdmnt = NULL, *altrootmnt = NULL;
1039 struct fs_struct *fs = tsk->fs;
1044 get_namespace(namespace);
1046 if (!(flags & CLONE_NEWNS))
1049 if (!capable(CAP_SYS_ADMIN)) {
1050 put_namespace(namespace);
1054 new_ns = kmalloc(sizeof(struct namespace), GFP_KERNEL);
1058 atomic_set(&new_ns->count, 1);
1059 init_rwsem(&new_ns->sem);
1060 INIT_LIST_HEAD(&new_ns->list);
1062 down_write(&tsk->namespace->sem);
1063 /* First pass: copy the tree topology */
1064 new_ns->root = copy_tree(namespace->root, namespace->root->mnt_root);
1065 if (!new_ns->root) {
1066 up_write(&tsk->namespace->sem);
1070 spin_lock(&vfsmount_lock);
1071 list_add_tail(&new_ns->list, &new_ns->root->mnt_list);
1072 spin_unlock(&vfsmount_lock);
1074 /* Second pass: switch the tsk->fs->* elements */
1076 struct vfsmount *p, *q;
1077 write_lock(&fs->lock);
1079 p = namespace->root;
1082 if (p == fs->rootmnt) {
1084 fs->rootmnt = mntget(q);
1086 if (p == fs->pwdmnt) {
1088 fs->pwdmnt = mntget(q);
1090 if (p == fs->altrootmnt) {
1092 fs->altrootmnt = mntget(q);
1094 p = next_mnt(p, namespace->root);
1095 q = next_mnt(q, new_ns->root);
1097 write_unlock(&fs->lock);
1099 up_write(&tsk->namespace->sem);
1101 tsk->namespace = new_ns;
1110 put_namespace(namespace);
1114 put_namespace(namespace);
1118 asmlinkage long sys_mount(char __user * dev_name, char __user * dir_name,
1119 char __user * type, unsigned long flags,
1123 unsigned long data_page;
1124 unsigned long type_page;
1125 unsigned long dev_page;
1128 retval = copy_mount_options (type, &type_page);
1132 dir_page = getname(dir_name);
1133 retval = PTR_ERR(dir_page);
1134 if (IS_ERR(dir_page))
1137 retval = copy_mount_options (dev_name, &dev_page);
1141 retval = copy_mount_options (data, &data_page);
1146 retval = do_mount((char*)dev_page, dir_page, (char*)type_page,
1147 flags, (void*)data_page);
1149 free_page(data_page);
1152 free_page(dev_page);
1156 free_page(type_page);
1161 * Replace the fs->{rootmnt,root} with {mnt,dentry}. Put the old values.
1162 * It can block. Requires the big lock held.
1164 void set_fs_root(struct fs_struct *fs, struct vfsmount *mnt,
1165 struct dentry *dentry)
1167 struct dentry *old_root;
1168 struct vfsmount *old_rootmnt;
1169 write_lock(&fs->lock);
1170 old_root = fs->root;
1171 old_rootmnt = fs->rootmnt;
1172 fs->rootmnt = mntget(mnt);
1173 fs->root = dget(dentry);
1174 write_unlock(&fs->lock);
1177 mntput(old_rootmnt);
1181 EXPORT_SYMBOL(set_fs_root);
1184 * Replace the fs->{pwdmnt,pwd} with {mnt,dentry}. Put the old values.
1185 * It can block. Requires the big lock held.
1187 void set_fs_pwd(struct fs_struct *fs, struct vfsmount *mnt,
1188 struct dentry *dentry)
1190 struct dentry *old_pwd;
1191 struct vfsmount *old_pwdmnt;
1193 write_lock(&fs->lock);
1195 old_pwdmnt = fs->pwdmnt;
1196 fs->pwdmnt = mntget(mnt);
1197 fs->pwd = dget(dentry);
1198 write_unlock(&fs->lock);
1206 EXPORT_SYMBOL(set_fs_pwd);
1208 static void chroot_fs_refs(struct nameidata *old_nd, struct nameidata *new_nd)
1210 struct task_struct *g, *p;
1211 struct fs_struct *fs;
1213 read_lock(&tasklist_lock);
1214 do_each_thread(g, p) {
1218 atomic_inc(&fs->count);
1220 if (fs->root==old_nd->dentry&&fs->rootmnt==old_nd->mnt)
1221 set_fs_root(fs, new_nd->mnt, new_nd->dentry);
1222 if (fs->pwd==old_nd->dentry&&fs->pwdmnt==old_nd->mnt)
1223 set_fs_pwd(fs, new_nd->mnt, new_nd->dentry);
1227 } while_each_thread(g, p);
1228 read_unlock(&tasklist_lock);
1232 * Moves the current root to put_root, and sets root/cwd of all processes
1233 * which had them on the old root to new_root.
1236 * - we don't move root/cwd if they are not at the root (reason: if something
1237 * cared enough to change them, it's probably wrong to force them elsewhere)
1238 * - it's okay to pick a root that isn't the root of a file system, e.g.
1239 * /nfs/my_root where /nfs is the mount point. It must be a mountpoint,
1240 * though, so you may need to say mount --bind /nfs/my_root /nfs/my_root
1244 asmlinkage long sys_pivot_root(const char __user *new_root, const char __user *put_old)
1246 struct vfsmount *tmp;
1247 struct nameidata new_nd, old_nd, parent_nd, root_parent, user_nd;
1250 if (!capable(CAP_SYS_ADMIN))
1255 error = __user_walk(new_root, LOOKUP_FOLLOW|LOOKUP_DIRECTORY, &new_nd);
1259 if (!check_mnt(new_nd.mnt))
1262 error = __user_walk(put_old, LOOKUP_FOLLOW|LOOKUP_DIRECTORY, &old_nd);
1266 error = security_sb_pivotroot(&old_nd, &new_nd);
1268 path_release(&old_nd);
1272 read_lock(¤t->fs->lock);
1273 user_nd.mnt = mntget(current->fs->rootmnt);
1274 user_nd.dentry = dget(current->fs->root);
1275 read_unlock(¤t->fs->lock);
1276 down_write(¤t->namespace->sem);
1277 down(&old_nd.dentry->d_inode->i_sem);
1279 if (!check_mnt(user_nd.mnt))
1282 if (IS_DEADDIR(new_nd.dentry->d_inode))
1284 if (d_unhashed(new_nd.dentry) && !IS_ROOT(new_nd.dentry))
1286 if (d_unhashed(old_nd.dentry) && !IS_ROOT(old_nd.dentry))
1289 if (new_nd.mnt == user_nd.mnt || old_nd.mnt == user_nd.mnt)
1290 goto out2; /* loop */
1292 if (user_nd.mnt->mnt_root != user_nd.dentry)
1294 if (new_nd.mnt->mnt_root != new_nd.dentry)
1295 goto out2; /* not a mountpoint */
1296 tmp = old_nd.mnt; /* make sure we can reach put_old from new_root */
1297 spin_lock(&vfsmount_lock);
1298 if (tmp != new_nd.mnt) {
1300 if (tmp->mnt_parent == tmp)
1302 if (tmp->mnt_parent == new_nd.mnt)
1304 tmp = tmp->mnt_parent;
1306 if (!is_subdir(tmp->mnt_mountpoint, new_nd.dentry))
1308 } else if (!is_subdir(old_nd.dentry, new_nd.dentry))
1310 detach_mnt(new_nd.mnt, &parent_nd);
1311 detach_mnt(user_nd.mnt, &root_parent);
1312 attach_mnt(user_nd.mnt, &old_nd);
1313 attach_mnt(new_nd.mnt, &root_parent);
1314 spin_unlock(&vfsmount_lock);
1315 chroot_fs_refs(&user_nd, &new_nd);
1316 security_sb_post_pivotroot(&user_nd, &new_nd);
1318 path_release(&root_parent);
1319 path_release(&parent_nd);
1321 up(&old_nd.dentry->d_inode->i_sem);
1322 up_write(¤t->namespace->sem);
1323 path_release(&user_nd);
1324 path_release(&old_nd);
1326 path_release(&new_nd);
1331 spin_unlock(&vfsmount_lock);
1335 static void __init init_mount_tree(void)
1337 struct vfsmount *mnt;
1338 struct namespace *namespace;
1339 struct task_struct *g, *p;
1341 mnt = do_kern_mount("rootfs", 0, "rootfs", NULL);
1343 panic("Can't create rootfs");
1344 namespace = kmalloc(sizeof(*namespace), GFP_KERNEL);
1346 panic("Can't allocate initial namespace");
1347 atomic_set(&namespace->count, 1);
1348 INIT_LIST_HEAD(&namespace->list);
1349 init_rwsem(&namespace->sem);
1350 list_add(&mnt->mnt_list, &namespace->list);
1351 namespace->root = mnt;
1352 mnt->mnt_namespace = namespace;
1354 init_task.namespace = namespace;
1355 read_lock(&tasklist_lock);
1356 do_each_thread(g, p) {
1357 get_namespace(namespace);
1358 p->namespace = namespace;
1359 } while_each_thread(g, p);
1360 read_unlock(&tasklist_lock);
1362 set_fs_pwd(current->fs, namespace->root, namespace->root->mnt_root);
1363 set_fs_root(current->fs, namespace->root, namespace->root->mnt_root);
1366 void __init mnt_init(unsigned long mempages)
1368 struct list_head *d;
1369 unsigned long order;
1370 unsigned int nr_hash;
1373 mnt_cache = kmem_cache_create("mnt_cache", sizeof(struct vfsmount),
1374 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL, NULL);
1377 mount_hashtable = (struct list_head *)
1378 __get_free_pages(GFP_ATOMIC, order);
1380 if (!mount_hashtable)
1381 panic("Failed to allocate mount hash table\n");
1384 * Find the power-of-two list-heads that can fit into the allocation..
1385 * We don't guarantee that "sizeof(struct list_head)" is necessarily
1388 nr_hash = (1UL << order) * PAGE_SIZE / sizeof(struct list_head);
1392 } while ((nr_hash >> hash_bits) != 0);
1396 * Re-calculate the actual number of entries and the mask
1397 * from the number of bits we can fit.
1399 nr_hash = 1UL << hash_bits;
1400 hash_mask = nr_hash-1;
1402 printk("Mount-cache hash table entries: %d (order: %ld, %ld bytes)\n",
1403 nr_hash, order, (PAGE_SIZE << order));
1405 /* And initialize the newly allocated array */
1406 d = mount_hashtable;
1418 void __put_namespace(struct namespace *namespace)
1420 struct vfsmount *mnt;
1422 down_write(&namespace->sem);
1423 spin_lock(&vfsmount_lock);
1425 list_for_each_entry(mnt, &namespace->list, mnt_list) {
1426 mnt->mnt_namespace = NULL;
1429 umount_tree(namespace->root);
1430 spin_unlock(&vfsmount_lock);
1431 up_write(&namespace->sem);