2 * linux/kernel/vserver/dlimit.c
4 * Virtual Server: Context Disk Limits
6 * Copyright (C) 2004 Herbert Pƶtzl
8 * V0.01 initial version
12 #include <linux/config.h>
14 #include <linux/namespace.h>
15 #include <linux/namei.h>
16 #include <linux/statfs.h>
17 #include <linux/vserver/switch.h>
18 #include <linux/vs_base.h>
19 #include <linux/vs_context.h>
20 #include <linux/vs_dlimit.h>
22 #include <asm/errno.h>
23 #include <asm/uaccess.h>
27 * allocate an initialized dl_info struct
28 * doesn't make it visible (hash) */
30 static struct dl_info *__alloc_dl_info(struct super_block *sb, xid_t xid)
32 struct dl_info *new = NULL;
34 vxdprintk(VXD_CBIT(dlim, 5),
35 "alloc_dl_info(%p,%d)*", sb, xid);
37 /* would this benefit from a slab cache? */
38 new = kmalloc(sizeof(struct dl_info), GFP_KERNEL);
42 memset (new, 0, sizeof(struct dl_info));
45 INIT_RCU_HEAD(&new->dl_rcu);
46 INIT_HLIST_NODE(&new->dl_hlist);
47 spin_lock_init(&new->dl_lock);
48 atomic_set(&new->dl_refcnt, 0);
49 atomic_set(&new->dl_usecnt, 0);
51 /* rest of init goes here */
53 vxdprintk(VXD_CBIT(dlim, 4),
54 "alloc_dl_info(%p,%d) = %p", sb, xid, new);
58 /* __dealloc_dl_info()
60 * final disposal of dl_info */
62 static void __dealloc_dl_info(struct dl_info *dli)
64 vxdprintk(VXD_CBIT(dlim, 4),
65 "dealloc_dl_info(%p)", dli);
67 dli->dl_hlist.next = LIST_POISON1;
71 BUG_ON(atomic_read(&dli->dl_usecnt));
72 BUG_ON(atomic_read(&dli->dl_refcnt));
78 /* hash table for dl_info hash */
80 #define DL_HASH_SIZE 13
82 struct hlist_head dl_info_hash[DL_HASH_SIZE];
84 static spinlock_t dl_info_hash_lock = SPIN_LOCK_UNLOCKED;
87 static inline unsigned int __hashval(struct super_block *sb, xid_t xid)
89 return ((xid ^ (unsigned long)sb) % DL_HASH_SIZE);
96 * add the dli to the global hash table
97 * requires the hash_lock to be held */
99 static inline void __hash_dl_info(struct dl_info *dli)
101 struct hlist_head *head;
103 vxdprintk(VXD_CBIT(dlim, 6),
104 "__hash_dl_info: %p[#%d]", dli, dli->dl_xid);
106 head = &dl_info_hash[__hashval(dli->dl_sb, dli->dl_xid)];
107 hlist_add_head_rcu(&dli->dl_hlist, head);
110 /* __unhash_dl_info()
112 * remove the dli from the global hash table
113 * requires the hash_lock to be held */
115 static inline void __unhash_dl_info(struct dl_info *dli)
117 vxdprintk(VXD_CBIT(dlim, 6),
118 "__unhash_dl_info: %p[#%d]", dli, dli->dl_xid);
119 hlist_del_rcu(&dli->dl_hlist);
124 /* __lookup_dl_info()
126 * requires the rcu_read_lock()
127 * doesn't increment the dl_refcnt */
129 static inline struct dl_info *__lookup_dl_info(struct super_block *sb, xid_t xid)
131 struct hlist_head *head = &dl_info_hash[__hashval(sb, xid)];
132 struct hlist_node *pos;
134 hlist_for_each_rcu(pos, head) {
135 struct dl_info *dli =
136 hlist_entry(pos, struct dl_info, dl_hlist);
138 if (dli->dl_xid == xid && dli->dl_sb == sb) {
146 struct dl_info *locate_dl_info(struct super_block *sb, xid_t xid)
151 dli = get_dl_info(__lookup_dl_info(sb, xid));
152 vxdprintk(VXD_CBIT(dlim, 7),
153 "locate_dl_info(%p,#%d) = %p", sb, xid, dli);
158 void rcu_free_dl_info(struct rcu_head *head)
160 struct dl_info *dli = container_of(head, struct dl_info, dl_rcu);
163 BUG_ON(!dli || !head);
165 usecnt = atomic_read(&dli->dl_usecnt);
168 refcnt = atomic_read(&dli->dl_refcnt);
171 vxdprintk(VXD_CBIT(dlim, 3),
172 "rcu_free_dl_info(%p)", dli);
174 __dealloc_dl_info(dli);
176 printk("!!! rcu didn't free\n");
182 int vc_add_dlimit(uint32_t id, void __user *data)
185 struct vcmd_ctx_dlimit_base_v0 vc_data;
188 if (!vx_check(0, VX_ADMIN))
190 if (copy_from_user (&vc_data, data, sizeof(vc_data)))
193 ret = user_path_walk_link(vc_data.name, &nd);
195 struct super_block *sb;
199 if (!nd.dentry->d_inode)
201 if (!(sb = nd.dentry->d_inode->i_sb))
204 dli = __alloc_dl_info(sb, id);
205 spin_lock(&dl_info_hash_lock);
208 if (__lookup_dl_info(sb, id))
215 spin_unlock(&dl_info_hash_lock);
217 __dealloc_dl_info(dli);
225 int vc_rem_dlimit(uint32_t id, void __user *data)
228 struct vcmd_ctx_dlimit_base_v0 vc_data;
231 if (!vx_check(0, VX_ADMIN))
233 if (copy_from_user (&vc_data, data, sizeof(vc_data)))
236 ret = user_path_walk_link(vc_data.name, &nd);
238 struct super_block *sb;
242 if (!nd.dentry->d_inode)
244 if (!(sb = nd.dentry->d_inode->i_sb))
247 spin_lock(&dl_info_hash_lock);
248 dli = __lookup_dl_info(sb, id);
254 __unhash_dl_info(dli);
258 spin_unlock(&dl_info_hash_lock);
266 int vc_set_dlimit(uint32_t id, void __user *data)
269 struct vcmd_ctx_dlimit_v0 vc_data;
272 if (!vx_check(0, VX_ADMIN))
274 if (copy_from_user (&vc_data, data, sizeof(vc_data)))
277 ret = user_path_walk_link(vc_data.name, &nd);
279 struct super_block *sb;
283 if (!nd.dentry->d_inode)
285 if (!(sb = nd.dentry->d_inode->i_sb))
287 if ((vc_data.reserved != (uint32_t)CDLIM_KEEP &&
288 vc_data.reserved > 100) ||
289 (vc_data.inodes_used != (uint32_t)CDLIM_KEEP &&
290 vc_data.inodes_used > vc_data.inodes_total) ||
291 (vc_data.space_used != (uint32_t)CDLIM_KEEP &&
292 vc_data.space_used > vc_data.space_total))
296 dli = locate_dl_info(sb, id);
300 spin_lock(&dli->dl_lock);
302 if (vc_data.inodes_used != (uint32_t)CDLIM_KEEP)
303 dli->dl_inodes_used = vc_data.inodes_used;
304 if (vc_data.inodes_total != (uint32_t)CDLIM_KEEP)
305 dli->dl_inodes_total = vc_data.inodes_total;
306 if (vc_data.space_used != (uint32_t)CDLIM_KEEP) {
307 dli->dl_space_used = vc_data.space_used;
308 dli->dl_space_used <<= 10;
310 if (vc_data.space_total == (uint32_t)CDLIM_INFINITY)
311 dli->dl_space_total = (uint64_t)CDLIM_INFINITY;
312 else if (vc_data.space_total != (uint32_t)CDLIM_KEEP) {
313 dli->dl_space_total = vc_data.space_total;
314 dli->dl_space_total <<= 10;
316 if (vc_data.reserved != (uint32_t)CDLIM_KEEP)
317 dli->dl_nrlmult = (1 << 10) * (100 - vc_data.reserved) / 100;
319 spin_unlock(&dli->dl_lock);
330 int vc_get_dlimit(uint32_t id, void __user *data)
333 struct vcmd_ctx_dlimit_v0 vc_data;
336 if (!vx_check(0, VX_ADMIN))
338 if (copy_from_user (&vc_data, data, sizeof(vc_data)))
341 ret = user_path_walk_link(vc_data.name, &nd);
343 struct super_block *sb;
347 if (!nd.dentry->d_inode)
349 if (!(sb = nd.dentry->d_inode->i_sb))
351 if (vc_data.reserved > 100 ||
352 vc_data.inodes_used > vc_data.inodes_total ||
353 vc_data.space_used > vc_data.space_total)
357 dli = locate_dl_info(sb, id);
361 spin_lock(&dli->dl_lock);
362 vc_data.inodes_used = dli->dl_inodes_used;
363 vc_data.inodes_total = dli->dl_inodes_total;
364 vc_data.space_used = dli->dl_space_used >> 10;
365 if (dli->dl_space_total == (uint64_t)CDLIM_INFINITY)
366 vc_data.space_total = (uint32_t)CDLIM_INFINITY;
368 vc_data.space_total = dli->dl_space_total >> 10;
370 vc_data.reserved = 100 - ((dli->dl_nrlmult * 100 + 512) >> 10);
371 spin_unlock(&dli->dl_lock);
375 if (copy_to_user(data, &vc_data, sizeof(vc_data)))
386 void vx_vsi_statfs(struct super_block *sb, struct kstatfs *buf)
389 __u64 blimit, bfree, bavail;
392 dli = locate_dl_info(sb, current->xid);
396 spin_lock(&dli->dl_lock);
397 if (dli->dl_inodes_total == (uint32_t)CDLIM_INFINITY)
400 /* reduce max inodes available to limit */
401 if (buf->f_files > dli->dl_inodes_total)
402 buf->f_files = dli->dl_inodes_total;
404 ifree = dli->dl_inodes_total - dli->dl_inodes_used;
405 /* reduce free inodes to min */
406 if (ifree < buf->f_ffree)
407 buf->f_ffree = ifree;
410 if (dli->dl_space_total == (uint64_t)CDLIM_INFINITY)
413 blimit = dli->dl_space_total >> sb->s_blocksize_bits;
415 if (dli->dl_space_total < dli->dl_space_used)
418 bfree = (dli->dl_space_total - dli->dl_space_used)
419 >> sb->s_blocksize_bits;
421 bavail = ((dli->dl_space_total >> 10) * dli->dl_nrlmult);
422 if (bavail < dli->dl_space_used)
425 bavail = (bavail - dli->dl_space_used)
426 >> sb->s_blocksize_bits;
428 /* reduce max space available to limit */
429 if (buf->f_blocks > blimit)
430 buf->f_blocks = blimit;
432 /* reduce free space to min */
433 if (bfree < buf->f_bfree)
434 buf->f_bfree = bfree;
436 /* reduce avail space to min */
437 if (bavail < buf->f_bavail)
438 buf->f_bavail = bavail;
441 spin_unlock(&dli->dl_lock);
447 #include <linux/module.h>
449 EXPORT_SYMBOL_GPL(locate_dl_info);
450 EXPORT_SYMBOL_GPL(rcu_free_dl_info);