*
* Virtual Server: Context Disk Limits
*
- * Copyright (C) 2004 Herbert Pötzl
+ * Copyright (C) 2004-2007 Herbert Pötzl
*
* V0.01 initial version
+ * V0.02 compat32 splitup
*
*/
-#include <linux/config.h>
#include <linux/fs.h>
-#include <linux/namespace.h>
+// #include <linux/mnt_namespace.h>
#include <linux/namei.h>
#include <linux/statfs.h>
+#include <linux/compat.h>
#include <linux/vserver/switch.h>
-#include <linux/vs_base.h>
#include <linux/vs_context.h>
+#include <linux/vs_tag.h>
#include <linux/vs_dlimit.h>
+#include <linux/vserver/dlimit_cmd.h>
#include <asm/errno.h>
#include <asm/uaccess.h>
* allocate an initialized dl_info struct
* doesn't make it visible (hash) */
-static struct dl_info *__alloc_dl_info(struct super_block *sb, xid_t xid)
+static struct dl_info *__alloc_dl_info(struct super_block *sb, tag_t tag)
{
struct dl_info *new = NULL;
-
- vxdprintk("alloc_dl_info(%p,%d)\n", sb, xid);
+
+ vxdprintk(VXD_CBIT(dlim, 5),
+ "alloc_dl_info(%p,%d)*", sb, tag);
/* would this benefit from a slab cache? */
new = kmalloc(sizeof(struct dl_info), GFP_KERNEL);
return 0;
memset (new, 0, sizeof(struct dl_info));
- new->dl_xid = xid;
+ new->dl_tag = tag;
new->dl_sb = sb;
INIT_RCU_HEAD(&new->dl_rcu);
INIT_HLIST_NODE(&new->dl_hlist);
/* rest of init goes here */
- vxdprintk("alloc_dl_info(%p,%d) = %p\n", sb, xid, new);
+ vxdprintk(VXD_CBIT(dlim, 4),
+ "alloc_dl_info(%p,%d) = %p", sb, tag, new);
return new;
}
static void __dealloc_dl_info(struct dl_info *dli)
{
- vxdprintk("dealloc_dl_info(%p)\n", dli);
+ vxdprintk(VXD_CBIT(dlim, 4),
+ "dealloc_dl_info(%p)", dli);
dli->dl_hlist.next = LIST_POISON1;
- dli->dl_xid = -1;
+ dli->dl_tag = -1;
dli->dl_sb = 0;
BUG_ON(atomic_read(&dli->dl_usecnt));
/* hash table for dl_info hash */
-#define DL_HASH_SIZE 13
+#define DL_HASH_SIZE 13
struct hlist_head dl_info_hash[DL_HASH_SIZE];
static spinlock_t dl_info_hash_lock = SPIN_LOCK_UNLOCKED;
-static inline unsigned int __hashval(struct super_block *sb, xid_t xid)
+static inline unsigned int __hashval(struct super_block *sb, tag_t tag)
{
- return ((xid ^ (unsigned int)sb) % DL_HASH_SIZE);
+ return ((tag ^ (unsigned long)sb) % DL_HASH_SIZE);
}
static inline void __hash_dl_info(struct dl_info *dli)
{
struct hlist_head *head;
-
- vxdprintk("__hash_dl_info: %p[#%d]\n", dli, dli->dl_xid);
+
+ vxdprintk(VXD_CBIT(dlim, 6),
+ "__hash_dl_info: %p[#%d]", dli, dli->dl_tag);
get_dl_info(dli);
- head = &dl_info_hash[__hashval(dli->dl_sb, dli->dl_xid)];
+ head = &dl_info_hash[__hashval(dli->dl_sb, dli->dl_tag)];
hlist_add_head_rcu(&dli->dl_hlist, head);
}
static inline void __unhash_dl_info(struct dl_info *dli)
{
- vxdprintk("__unhash_dl_info: %p[#%d]\n", dli, dli->dl_xid);
+ vxdprintk(VXD_CBIT(dlim, 6),
+ "__unhash_dl_info: %p[#%d]", dli, dli->dl_tag);
hlist_del_rcu(&dli->dl_hlist);
put_dl_info(dli);
}
-#define hlist_for_each_rcu(pos, head) \
- for (pos = (head)->first; pos && ({ prefetch(pos->next); 1;}); \
- pos = pos->next, ({ smp_read_barrier_depends(); 0;}))
-
-
/* __lookup_dl_info()
* requires the rcu_read_lock()
* doesn't increment the dl_refcnt */
-static inline struct dl_info *__lookup_dl_info(struct super_block *sb, xid_t xid)
+static inline struct dl_info *__lookup_dl_info(struct super_block *sb, tag_t tag)
{
- struct hlist_head *head = &dl_info_hash[__hashval(sb, xid)];
+ struct hlist_head *head = &dl_info_hash[__hashval(sb, tag)];
struct hlist_node *pos;
+ struct dl_info *dli;
- hlist_for_each_rcu(pos, head) {
- struct dl_info *dli =
- hlist_entry(pos, struct dl_info, dl_hlist);
+ hlist_for_each_entry_rcu(dli, pos, head, dl_hlist) {
- if (dli->dl_xid == xid && dli->dl_sb == sb) {
+ if (dli->dl_tag == tag && dli->dl_sb == sb) {
return dli;
}
}
}
-struct dl_info *locate_dl_info(struct super_block *sb, xid_t xid)
+struct dl_info *locate_dl_info(struct super_block *sb, tag_t tag)
{
- struct dl_info *dli;
+ struct dl_info *dli;
rcu_read_lock();
- dli = get_dl_info(__lookup_dl_info(sb, xid));
+ dli = get_dl_info(__lookup_dl_info(sb, tag));
+ vxdprintk(VXD_CBIT(dlim, 7),
+ "locate_dl_info(%p,#%d) = %p", sb, tag, dli);
rcu_read_unlock();
- return dli;
+ return dli;
}
void rcu_free_dl_info(struct rcu_head *head)
struct dl_info *dli = container_of(head, struct dl_info, dl_rcu);
int usecnt, refcnt;
- BUG_ON(!dli);
+ BUG_ON(!dli || !head);
usecnt = atomic_read(&dli->dl_usecnt);
BUG_ON(usecnt < 0);
refcnt = atomic_read(&dli->dl_refcnt);
BUG_ON(refcnt < 0);
+ vxdprintk(VXD_CBIT(dlim, 3),
+ "rcu_free_dl_info(%p)", dli);
if (!usecnt)
__dealloc_dl_info(dli);
else
-int vc_add_dlimit(uint32_t id, void __user *data)
+static int do_addrem_dlimit(uint32_t id, const char __user *name,
+ uint32_t flags, int add)
{
struct nameidata nd;
- struct vcmd_ctx_dlimit_base_v0 vc_data;
int ret;
- if (!vx_check(0, VX_ADMIN))
- return -ENOSYS;
- if (copy_from_user (&vc_data, data, sizeof(vc_data)))
- return -EFAULT;
-
- ret = user_path_walk_link(vc_data.name, &nd);
+ ret = user_path_walk_link(name, &nd);
if (!ret) {
struct super_block *sb;
struct dl_info *dli;
if (!nd.dentry->d_inode)
goto out_release;
if (!(sb = nd.dentry->d_inode->i_sb))
- goto out_release;
-
- dli = __alloc_dl_info(sb, id);
- spin_lock(&dl_info_hash_lock);
-
- ret = -EEXIST;
- if (__lookup_dl_info(sb, id))
- goto out_unlock;
- __hash_dl_info(dli);
- dli = NULL;
- ret = 0;
+ goto out_release;
+ if (add) {
+ dli = __alloc_dl_info(sb, id);
+ spin_lock(&dl_info_hash_lock);
+
+ ret = -EEXIST;
+ if (__lookup_dl_info(sb, id))
+ goto out_unlock;
+ __hash_dl_info(dli);
+ dli = NULL;
+ } else {
+ spin_lock(&dl_info_hash_lock);
+ dli = __lookup_dl_info(sb, id);
+
+ ret = -ESRCH;
+ if (!dli)
+ goto out_unlock;
+ __unhash_dl_info(dli);
+ }
+ ret = 0;
out_unlock:
- spin_unlock(&dl_info_hash_lock);
- if (dli)
+ spin_unlock(&dl_info_hash_lock);
+ if (add && dli)
__dealloc_dl_info(dli);
out_release:
path_release(&nd);
return ret;
}
+int vc_add_dlimit(uint32_t id, void __user *data)
+{
+ struct vcmd_ctx_dlimit_base_v0 vc_data;
+
+ if (copy_from_user (&vc_data, data, sizeof(vc_data)))
+ return -EFAULT;
+
+ return do_addrem_dlimit(id, vc_data.name, vc_data.flags, 1);
+}
int vc_rem_dlimit(uint32_t id, void __user *data)
{
- struct nameidata nd;
struct vcmd_ctx_dlimit_base_v0 vc_data;
- int ret;
- if (!vx_check(0, VX_ADMIN))
- return -ENOSYS;
if (copy_from_user (&vc_data, data, sizeof(vc_data)))
return -EFAULT;
- ret = user_path_walk_link(vc_data.name, &nd);
- if (!ret) {
- struct super_block *sb;
- struct dl_info *dli;
+ return do_addrem_dlimit(id, vc_data.name, vc_data.flags, 0);
+}
- ret = -EINVAL;
- if (!nd.dentry->d_inode)
- goto out_release;
- if (!(sb = nd.dentry->d_inode->i_sb))
- goto out_release;
-
- spin_lock(&dl_info_hash_lock);
- dli = __lookup_dl_info(sb, id);
+#ifdef CONFIG_COMPAT
- ret = -ESRCH;
- if (!dli)
- goto out_unlock;
-
- __unhash_dl_info(dli);
- ret = 0;
+int vc_add_dlimit_x32(uint32_t id, void __user *data)
+{
+ struct vcmd_ctx_dlimit_base_v0_x32 vc_data;
- out_unlock:
- spin_unlock(&dl_info_hash_lock);
- out_release:
- path_release(&nd);
- }
- return ret;
-}
+ if (copy_from_user (&vc_data, data, sizeof(vc_data)))
+ return -EFAULT;
+ return do_addrem_dlimit(id,
+ compat_ptr(vc_data.name_ptr), vc_data.flags, 1);
+}
-int vc_set_dlimit(uint32_t id, void __user *data)
+int vc_rem_dlimit_x32(uint32_t id, void __user *data)
{
- struct nameidata nd;
- struct vcmd_ctx_dlimit_v0 vc_data;
- int ret;
+ struct vcmd_ctx_dlimit_base_v0_x32 vc_data;
- if (!vx_check(0, VX_ADMIN))
- return -ENOSYS;
if (copy_from_user (&vc_data, data, sizeof(vc_data)))
return -EFAULT;
- ret = user_path_walk_link(vc_data.name, &nd);
+ return do_addrem_dlimit(id,
+ compat_ptr(vc_data.name_ptr), vc_data.flags, 0);
+}
+
+#endif /* CONFIG_COMPAT */
+
+
+static inline
+int do_set_dlimit(uint32_t id, const char __user *name,
+ uint32_t space_used, uint32_t space_total,
+ uint32_t inodes_used, uint32_t inodes_total,
+ uint32_t reserved, uint32_t flags)
+{
+ struct nameidata nd;
+ int ret;
+
+ ret = user_path_walk_link(name, &nd);
if (!ret) {
struct super_block *sb;
struct dl_info *dli;
if (!nd.dentry->d_inode)
goto out_release;
if (!(sb = nd.dentry->d_inode->i_sb))
- goto out_release;
- if (vc_data.reserved > 100 ||
- vc_data.inodes_used > vc_data.inodes_total ||
- vc_data.space_used > vc_data.space_total)
+ goto out_release;
+ if ((reserved != CDLIM_KEEP &&
+ reserved > 100) ||
+ (inodes_used != CDLIM_KEEP &&
+ inodes_used > inodes_total) ||
+ (space_used != CDLIM_KEEP &&
+ space_used > space_total))
goto out_release;
ret = -ESRCH;
if (!dli)
goto out_release;
- spin_lock(&dli->dl_lock);
+ spin_lock(&dli->dl_lock);
- if (vc_data.inodes_used != (uint32_t)CDLIM_KEEP)
- dli->dl_inodes_used = vc_data.inodes_used;
- if (vc_data.inodes_total != (uint32_t)CDLIM_KEEP)
- dli->dl_inodes_total = vc_data.inodes_total;
- if (vc_data.space_used != (uint32_t)CDLIM_KEEP) {
- dli->dl_space_used = vc_data.space_used;
+ if (inodes_used != CDLIM_KEEP)
+ dli->dl_inodes_used = inodes_used;
+ if (inodes_total != CDLIM_KEEP)
+ dli->dl_inodes_total = inodes_total;
+ if (space_used != CDLIM_KEEP) {
+ dli->dl_space_used = space_used;
dli->dl_space_used <<= 10;
}
- if (vc_data.space_total == (uint32_t)CDLIM_INFINITY)
- dli->dl_space_total = (uint64_t)CDLIM_INFINITY;
- else if (vc_data.space_total != (uint32_t)CDLIM_KEEP) {
- dli->dl_space_total = vc_data.space_total;
+ if (space_total == CDLIM_INFINITY)
+ dli->dl_space_total = DLIM_INFINITY;
+ else if (space_total != CDLIM_KEEP) {
+ dli->dl_space_total = space_total;
dli->dl_space_total <<= 10;
}
- if (vc_data.reserved != (uint32_t)CDLIM_KEEP)
- dli->dl_nrlmult = (1 << 10) * (100 - vc_data.reserved) / 100;
+ if (reserved != CDLIM_KEEP)
+ dli->dl_nrlmult = (1 << 10) * (100 - reserved) / 100;
+
+ spin_unlock(&dli->dl_lock);
- spin_unlock(&dli->dl_lock);
-
put_dl_info(dli);
ret = 0;
return ret;
}
-int vc_get_dlimit(uint32_t id, void __user *data)
+int vc_set_dlimit(uint32_t id, void __user *data)
{
- struct nameidata nd;
struct vcmd_ctx_dlimit_v0 vc_data;
- int ret;
- if (!vx_check(0, VX_ADMIN))
- return -ENOSYS;
if (copy_from_user (&vc_data, data, sizeof(vc_data)))
return -EFAULT;
- ret = user_path_walk_link(vc_data.name, &nd);
+ return do_set_dlimit(id, vc_data.name,
+ vc_data.space_used, vc_data.space_total,
+ vc_data.inodes_used, vc_data.inodes_total,
+ vc_data.reserved, vc_data.flags);
+}
+
+#ifdef CONFIG_COMPAT
+
+int vc_set_dlimit_x32(uint32_t id, void __user *data)
+{
+ struct vcmd_ctx_dlimit_v0_x32 vc_data;
+
+ if (copy_from_user (&vc_data, data, sizeof(vc_data)))
+ return -EFAULT;
+
+ return do_set_dlimit(id, compat_ptr(vc_data.name_ptr),
+ vc_data.space_used, vc_data.space_total,
+ vc_data.inodes_used, vc_data.inodes_total,
+ vc_data.reserved, vc_data.flags);
+}
+
+#endif /* CONFIG_COMPAT */
+
+
+static inline
+int do_get_dlimit(uint32_t id, const char __user *name,
+ uint32_t *space_used, uint32_t *space_total,
+ uint32_t *inodes_used, uint32_t *inodes_total,
+ uint32_t *reserved, uint32_t *flags)
+{
+ struct nameidata nd;
+ int ret;
+
+ ret = user_path_walk_link(name, &nd);
if (!ret) {
struct super_block *sb;
struct dl_info *dli;
if (!nd.dentry->d_inode)
goto out_release;
if (!(sb = nd.dentry->d_inode->i_sb))
- goto out_release;
- if (vc_data.reserved > 100 ||
- vc_data.inodes_used > vc_data.inodes_total ||
- vc_data.space_used > vc_data.space_total)
goto out_release;
ret = -ESRCH;
if (!dli)
goto out_release;
- spin_lock(&dli->dl_lock);
- vc_data.inodes_used = dli->dl_inodes_used;
- vc_data.inodes_total = dli->dl_inodes_total;
- vc_data.space_used = dli->dl_space_used >> 10;
- if (dli->dl_space_total == (uint64_t)CDLIM_INFINITY)
- vc_data.space_total = (uint32_t)CDLIM_INFINITY;
+ spin_lock(&dli->dl_lock);
+ *inodes_used = dli->dl_inodes_used;
+ *inodes_total = dli->dl_inodes_total;
+ *space_used = dli->dl_space_used >> 10;
+ if (dli->dl_space_total == DLIM_INFINITY)
+ *space_total = CDLIM_INFINITY;
else
- vc_data.space_total = dli->dl_space_total >> 10;
+ *space_total = dli->dl_space_total >> 10;
+
+ *reserved = 100 - ((dli->dl_nrlmult * 100 + 512) >> 10);
+ spin_unlock(&dli->dl_lock);
- vc_data.reserved = 100 - ((dli->dl_nrlmult * 100 + 512) >> 10);
- spin_unlock(&dli->dl_lock);
-
put_dl_info(dli);
ret = -EFAULT;
- if (copy_to_user(data, &vc_data, sizeof(vc_data)))
- goto out_release;
ret = 0;
out_release:
}
+int vc_get_dlimit(uint32_t id, void __user *data)
+{
+ struct vcmd_ctx_dlimit_v0 vc_data;
+ int ret;
+
+ if (copy_from_user (&vc_data, data, sizeof(vc_data)))
+ return -EFAULT;
+
+ ret = do_get_dlimit(id, vc_data.name,
+ &vc_data.space_used, &vc_data.space_total,
+ &vc_data.inodes_used, &vc_data.inodes_total,
+ &vc_data.reserved, &vc_data.flags);
+ if (ret)
+ return ret;
+
+ if (copy_to_user(data, &vc_data, sizeof(vc_data)))
+ return -EFAULT;
+ return 0;
+}
+
+#ifdef CONFIG_COMPAT
+
+int vc_get_dlimit_x32(uint32_t id, void __user *data)
+{
+ struct vcmd_ctx_dlimit_v0_x32 vc_data;
+ int ret;
+
+ if (copy_from_user (&vc_data, data, sizeof(vc_data)))
+ return -EFAULT;
+
+ ret = do_get_dlimit(id, compat_ptr(vc_data.name_ptr),
+ &vc_data.space_used, &vc_data.space_total,
+ &vc_data.inodes_used, &vc_data.inodes_total,
+ &vc_data.reserved, &vc_data.flags);
+ if (ret)
+ return ret;
+
+ if (copy_to_user(data, &vc_data, sizeof(vc_data)))
+ return -EFAULT;
+ return 0;
+}
+
+#endif /* CONFIG_COMPAT */
+
+
void vx_vsi_statfs(struct super_block *sb, struct kstatfs *buf)
{
struct dl_info *dli;
- __u64 blimit, bfree, bavail;
- __u32 ifree;
-
- dli = locate_dl_info(sb, current->xid);
+ __u64 blimit, bfree, bavail;
+ __u32 ifree;
+
+ dli = locate_dl_info(sb, dx_current_tag());
if (!dli)
return;
spin_lock(&dli->dl_lock);
- if (dli->dl_inodes_total == (uint32_t)CDLIM_INFINITY)
+ if (dli->dl_inodes_total == (unsigned long)DLIM_INFINITY)
goto no_ilim;
/* reduce max inodes available to limit */
buf->f_ffree = ifree;
no_ilim:
- if (dli->dl_space_total == (uint64_t)CDLIM_INFINITY)
+ if (dli->dl_space_total == DLIM_INFINITY)
goto no_blim;
blimit = dli->dl_space_total >> sb->s_blocksize_bits;
no_blim:
spin_unlock(&dli->dl_lock);
put_dl_info(dli);
-
- return;
+
+ return;
}
+#include <linux/module.h>
+
+EXPORT_SYMBOL_GPL(locate_dl_info);
+EXPORT_SYMBOL_GPL(rcu_free_dl_info);
+