*
* Virtual Server: Context Disk Limits
*
- * Copyright (C) 2004 Herbert Pötzl
+ * Copyright (C) 2004-2005 Herbert Pötzl
*
* V0.01 initial version
*
#include <linux/namei.h>
#include <linux/statfs.h>
#include <linux/vserver/switch.h>
-#include <linux/vs_base.h>
#include <linux/vs_context.h>
#include <linux/vs_dlimit.h>
static struct dl_info *__alloc_dl_info(struct super_block *sb, xid_t xid)
{
struct dl_info *new = NULL;
-
- vxdprintk("alloc_dl_info(%p,%d)\n", sb, xid);
+
+ vxdprintk(VXD_CBIT(dlim, 5),
+ "alloc_dl_info(%p,%d)*", sb, xid);
/* would this benefit from a slab cache? */
new = kmalloc(sizeof(struct dl_info), GFP_KERNEL);
/* rest of init goes here */
- vxdprintk("alloc_dl_info(%p,%d) = %p\n", sb, xid, new);
+ vxdprintk(VXD_CBIT(dlim, 4),
+ "alloc_dl_info(%p,%d) = %p", sb, xid, new);
return new;
}
static void __dealloc_dl_info(struct dl_info *dli)
{
- vxdprintk("dealloc_dl_info(%p)\n", dli);
+ vxdprintk(VXD_CBIT(dlim, 4),
+ "dealloc_dl_info(%p)", dli);
dli->dl_hlist.next = LIST_POISON1;
dli->dl_xid = -1;
/* hash table for dl_info hash */
-#define DL_HASH_SIZE 13
+#define DL_HASH_SIZE 13
struct hlist_head dl_info_hash[DL_HASH_SIZE];
static inline unsigned int __hashval(struct super_block *sb, xid_t xid)
{
- return ((xid ^ (unsigned int)sb) % DL_HASH_SIZE);
+ return ((xid ^ (unsigned long)sb) % DL_HASH_SIZE);
}
static inline void __hash_dl_info(struct dl_info *dli)
{
struct hlist_head *head;
-
- vxdprintk("__hash_dl_info: %p[#%d]\n", dli, dli->dl_xid);
+
+ vxdprintk(VXD_CBIT(dlim, 6),
+ "__hash_dl_info: %p[#%d]", dli, dli->dl_xid);
get_dl_info(dli);
head = &dl_info_hash[__hashval(dli->dl_sb, dli->dl_xid)];
hlist_add_head_rcu(&dli->dl_hlist, head);
static inline void __unhash_dl_info(struct dl_info *dli)
{
- vxdprintk("__unhash_dl_info: %p[#%d]\n", dli, dli->dl_xid);
+ vxdprintk(VXD_CBIT(dlim, 6),
+ "__unhash_dl_info: %p[#%d]", dli, dli->dl_xid);
hlist_del_rcu(&dli->dl_hlist);
put_dl_info(dli);
}
-#define hlist_for_each_rcu(pos, head) \
- for (pos = (head)->first; pos && ({ prefetch(pos->next); 1;}); \
- pos = pos->next, ({ smp_read_barrier_depends(); 0;}))
-
-
/* __lookup_dl_info()
* requires the rcu_read_lock()
struct dl_info *locate_dl_info(struct super_block *sb, xid_t xid)
{
- struct dl_info *dli;
+ struct dl_info *dli;
rcu_read_lock();
dli = get_dl_info(__lookup_dl_info(sb, xid));
+ vxdprintk(VXD_CBIT(dlim, 7),
+ "locate_dl_info(%p,#%d) = %p", sb, xid, dli);
rcu_read_unlock();
- return dli;
+ return dli;
}
-void rcu_free_dl_info(void *obj)
+void rcu_free_dl_info(struct rcu_head *head)
{
- struct dl_info *dli = obj;
+ struct dl_info *dli = container_of(head, struct dl_info, dl_rcu);
int usecnt, refcnt;
- BUG_ON(!dli);
+ BUG_ON(!dli || !head);
usecnt = atomic_read(&dli->dl_usecnt);
BUG_ON(usecnt < 0);
refcnt = atomic_read(&dli->dl_refcnt);
BUG_ON(refcnt < 0);
+ vxdprintk(VXD_CBIT(dlim, 3),
+ "rcu_free_dl_info(%p)", dli);
if (!usecnt)
__dealloc_dl_info(dli);
else
if (!nd.dentry->d_inode)
goto out_release;
if (!(sb = nd.dentry->d_inode->i_sb))
- goto out_release;
-
+ goto out_release;
+
dli = __alloc_dl_info(sb, id);
- spin_lock(&dl_info_hash_lock);
+ spin_lock(&dl_info_hash_lock);
ret = -EEXIST;
if (__lookup_dl_info(sb, id))
- goto out_unlock;
+ goto out_unlock;
__hash_dl_info(dli);
dli = NULL;
ret = 0;
out_unlock:
- spin_unlock(&dl_info_hash_lock);
+ spin_unlock(&dl_info_hash_lock);
if (dli)
__dealloc_dl_info(dli);
out_release:
if (!nd.dentry->d_inode)
goto out_release;
if (!(sb = nd.dentry->d_inode->i_sb))
- goto out_release;
-
- spin_lock(&dl_info_hash_lock);
+ goto out_release;
+
+ spin_lock(&dl_info_hash_lock);
dli = __lookup_dl_info(sb, id);
ret = -ESRCH;
if (!dli)
goto out_unlock;
-
+
__unhash_dl_info(dli);
ret = 0;
out_unlock:
- spin_unlock(&dl_info_hash_lock);
+ spin_unlock(&dl_info_hash_lock);
out_release:
path_release(&nd);
}
if (!nd.dentry->d_inode)
goto out_release;
if (!(sb = nd.dentry->d_inode->i_sb))
- goto out_release;
- if (vc_data.reserved > 100 ||
- vc_data.inodes_used > vc_data.inodes_total ||
- vc_data.space_used > vc_data.space_total)
+ goto out_release;
+ if ((vc_data.reserved != (uint32_t)CDLIM_KEEP &&
+ vc_data.reserved > 100) ||
+ (vc_data.inodes_used != (uint32_t)CDLIM_KEEP &&
+ vc_data.inodes_used > vc_data.inodes_total) ||
+ (vc_data.space_used != (uint32_t)CDLIM_KEEP &&
+ vc_data.space_used > vc_data.space_total))
goto out_release;
ret = -ESRCH;
if (!dli)
goto out_release;
- spin_lock(&dli->dl_lock);
+ spin_lock(&dli->dl_lock);
if (vc_data.inodes_used != (uint32_t)CDLIM_KEEP)
dli->dl_inodes_used = vc_data.inodes_used;
if (vc_data.reserved != (uint32_t)CDLIM_KEEP)
dli->dl_nrlmult = (1 << 10) * (100 - vc_data.reserved) / 100;
- spin_unlock(&dli->dl_lock);
-
+ spin_unlock(&dli->dl_lock);
+
put_dl_info(dli);
ret = 0;
if (!nd.dentry->d_inode)
goto out_release;
if (!(sb = nd.dentry->d_inode->i_sb))
- goto out_release;
+ goto out_release;
if (vc_data.reserved > 100 ||
vc_data.inodes_used > vc_data.inodes_total ||
vc_data.space_used > vc_data.space_total)
if (!dli)
goto out_release;
- spin_lock(&dli->dl_lock);
+ spin_lock(&dli->dl_lock);
vc_data.inodes_used = dli->dl_inodes_used;
vc_data.inodes_total = dli->dl_inodes_total;
vc_data.space_used = dli->dl_space_used >> 10;
vc_data.space_total = dli->dl_space_total >> 10;
vc_data.reserved = 100 - ((dli->dl_nrlmult * 100 + 512) >> 10);
- spin_unlock(&dli->dl_lock);
-
+ spin_unlock(&dli->dl_lock);
+
put_dl_info(dli);
ret = -EFAULT;
if (copy_to_user(data, &vc_data, sizeof(vc_data)))
void vx_vsi_statfs(struct super_block *sb, struct kstatfs *buf)
{
struct dl_info *dli;
- __u64 blimit, bfree, bavail;
- __u32 ifree;
-
- dli = locate_dl_info(sb, current->xid);
+ __u64 blimit, bfree, bavail;
+ __u32 ifree;
+
+ dli = locate_dl_info(sb, vx_current_xid());
if (!dli)
return;
no_blim:
spin_unlock(&dli->dl_lock);
put_dl_info(dli);
-
- return;
+
+ return;
}
+#include <linux/module.h>
+
+EXPORT_SYMBOL_GPL(locate_dl_info);
+EXPORT_SYMBOL_GPL(rcu_free_dl_info);
+