#include <linux/namei.h>
#include <linux/security.h>
#include <linux/mount.h>
+#include <linux/vs_base.h>
+
#include <asm/uaccess.h>
+#include <asm/unistd.h>
extern int __init init_rootfs(void);
/* spinlock for vfsmount related operations, inplace of dcache_lock */
spinlock_t vfsmount_lock __cacheline_aligned_in_smp = SPIN_LOCK_UNLOCKED;
+
static struct list_head *mount_hashtable;
static int hash_mask, hash_bits;
static kmem_cache_t *mnt_cache;
};
struct proc_fs_info *fs_infop;
+ if (vx_flags(VXF_HIDE_MOUNT, 0))
+ return 0;
+
mangle(m, mnt->mnt_devname ? mnt->mnt_devname : "none");
seq_putc(m, ' ');
seq_path(m, mnt, mnt->mnt_root, " \t\n\\");
.show = show_vfsmnt
};
-/*
+/**
+ * may_umount_tree - check if a mount tree is busy
+ * @mnt: root of mount tree
+ *
+ * This is called to check if a tree of mounts has any
+ * open files, pwds, chroots or sub mounts that are
+ * busy.
+ */
+int may_umount_tree(struct vfsmount *mnt)
+{
+ struct list_head *next;
+ struct vfsmount *this_parent = mnt;
+ int actual_refs;
+ int minimum_refs;
+
+ spin_lock(&vfsmount_lock);
+ actual_refs = atomic_read(&mnt->mnt_count);
+ minimum_refs = 2;
+repeat:
+ next = this_parent->mnt_mounts.next;
+resume:
+ while (next != &this_parent->mnt_mounts) {
+ struct vfsmount *p = list_entry(next, struct vfsmount, mnt_child);
+
+ next = next->next;
+
+ actual_refs += atomic_read(&p->mnt_count);
+ minimum_refs += 2;
+
+ if (!list_empty(&p->mnt_mounts)) {
+ this_parent = p;
+ goto repeat;
+ }
+ }
+
+ if (this_parent != mnt) {
+ next = this_parent->mnt_child.next;
+ this_parent = this_parent->mnt_parent;
+ goto resume;
+ }
+ spin_unlock(&vfsmount_lock);
+
+ if (actual_refs > minimum_refs)
+ return -EBUSY;
+
+ return 0;
+}
+
+EXPORT_SYMBOL(may_umount_tree);
+
+/**
+ * may_umount - check if a mount point is busy
+ * @mnt: root of mount
+ *
+ * This is called to check if a mount point has any
+ * open files, pwds, chroots or sub mounts. If the
+ * mount has sub mounts this will return busy
+ * regardless of whether the sub mounts are busy.
+ *
* Doesn't take quota and stuff into account. IOW, in some cases it will
* give false negatives. The main reason why it's here is that we need
* a non-destructive way to look for easily umountable filesystems.
EXPORT_SYMBOL(may_umount);
-void umount_tree(struct vfsmount *mnt)
+static inline void __umount_tree(struct vfsmount *mnt, struct list_head *kill)
{
- struct vfsmount *p;
- LIST_HEAD(kill);
-
- for (p = mnt; p; p = next_mnt(p, mnt)) {
- list_del(&p->mnt_list);
- list_add(&p->mnt_list, &kill);
- }
-
- while (!list_empty(&kill)) {
- mnt = list_entry(kill.next, struct vfsmount, mnt_list);
+ while (!list_empty(kill)) {
+ mnt = list_entry(kill->next, struct vfsmount, mnt_list);
list_del_init(&mnt->mnt_list);
if (mnt->mnt_parent == mnt) {
spin_unlock(&vfsmount_lock);
}
}
+void umount_tree(struct vfsmount *mnt)
+{
+ struct vfsmount *p;
+ LIST_HEAD(kill);
+
+ for (p = mnt; p; p = next_mnt(p, mnt)) {
+ list_del(&p->mnt_list);
+ list_add(&p->mnt_list, &kill);
+ }
+ __umount_tree(mnt, &kill);
+}
+
+void umount_unused(struct vfsmount *mnt, struct fs_struct *fs)
+{
+ struct vfsmount *p;
+ LIST_HEAD(kill);
+
+ for (p = mnt; p; p = next_mnt(p, mnt)) {
+ if (p == fs->rootmnt || p == fs->pwdmnt)
+ continue;
+ list_del(&p->mnt_list);
+ list_add(&p->mnt_list, &kill);
+ }
+ __umount_tree(mnt, &kill);
+}
+
static int do_umount(struct vfsmount *mnt, int flags)
{
struct super_block * sb = mnt->mnt_sb;
goto dput_and_out;
retval = -EPERM;
- if (!capable(CAP_SYS_ADMIN))
+ if (!capable(CAP_SYS_ADMIN) && !vx_ccaps(VXC_SECURE_MOUNT))
goto dput_and_out;
retval = do_umount(nd.mnt, flags);
return retval;
}
+#ifdef __ARCH_WANT_SYS_OLDUMOUNT
+
/*
* The 2.0 compatible umount. No flags.
*/
return sys_umount(name,0);
}
+#endif
+
static int mount_is_safe(struct nameidata *nd)
{
if (capable(CAP_SYS_ADMIN))
return 0;
+ if (vx_ccaps(VXC_SECURE_MOUNT))
+ return 0;
return -EPERM;
#ifdef notyet
if (S_ISLNK(nd->dentry->d_inode->i_mode))
return -EINVAL;
/* we need capabilities... */
- if (!capable(CAP_SYS_ADMIN))
+ if (!capable(CAP_SYS_ADMIN) && !vx_ccaps(VXC_SECURE_MOUNT))
return -EPERM;
mnt = do_kern_mount(type, flags, name, data);
mnt_flags |= MNT_NOEXEC;
flags &= ~(MS_NOSUID|MS_NOEXEC|MS_NODEV|MS_ACTIVE);
+ if (vx_ccaps(VXC_SECURE_MOUNT))
+ mnt_flags |= MNT_NODEV;
+
/* ... and get the mountpoint */
retval = path_lookup(dir_name, LOOKUP_FOLLOW, &nd);
if (retval)
int i;
mnt_cache = kmem_cache_create("mnt_cache", sizeof(struct vfsmount),
- 0, SLAB_HWCACHE_ALIGN, NULL, NULL);
- if (!mnt_cache)
- panic("Cannot create vfsmount cache");
+ 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL, NULL);
order = 0;
mount_hashtable = (struct list_head *)