2 * linux/kernel/vserver/dlimit.c
4 * Virtual Server: Context Disk Limits
6 * Copyright (C) 2004 Herbert Pƶtzl
8 * V0.01 initial version
12 #include <linux/config.h>
14 #include <linux/namespace.h>
15 #include <linux/namei.h>
16 #include <linux/statfs.h>
17 #include <linux/vserver/switch.h>
18 #include <linux/vs_base.h>
19 #include <linux/vs_context.h>
20 #include <linux/vs_dlimit.h>
22 #include <asm/errno.h>
23 #include <asm/uaccess.h>
27 * allocate an initialized dl_info struct
28 * doesn't make it visible (hash) */
30 static struct dl_info *__alloc_dl_info(struct super_block *sb, xid_t xid)
32 struct dl_info *new = NULL;
34 vxdprintk("alloc_dl_info(%p,%d)\n", sb, xid);
36 /* would this benefit from a slab cache? */
37 new = kmalloc(sizeof(struct dl_info), GFP_KERNEL);
41 memset (new, 0, sizeof(struct dl_info));
44 INIT_RCU_HEAD(&new->dl_rcu);
45 INIT_HLIST_NODE(&new->dl_hlist);
46 spin_lock_init(&new->dl_lock);
47 atomic_set(&new->dl_refcnt, 0);
48 atomic_set(&new->dl_usecnt, 0);
50 /* rest of init goes here */
52 vxdprintk("alloc_dl_info(%p,%d) = %p\n", sb, xid, new);
56 /* __dealloc_dl_info()
58 * final disposal of dl_info */
60 static void __dealloc_dl_info(struct dl_info *dli)
62 vxdprintk("dealloc_dl_info(%p)\n", dli);
64 dli->dl_hlist.next = LIST_POISON1;
68 BUG_ON(atomic_read(&dli->dl_usecnt));
69 BUG_ON(atomic_read(&dli->dl_refcnt));
75 /* hash table for dl_info hash */
77 #define DL_HASH_SIZE 13
79 struct hlist_head dl_info_hash[DL_HASH_SIZE];
81 static spinlock_t dl_info_hash_lock = SPIN_LOCK_UNLOCKED;
84 static inline unsigned int __hashval(struct super_block *sb, xid_t xid)
86 return ((xid ^ (unsigned int)sb) % DL_HASH_SIZE);
93 * add the dli to the global hash table
94 * requires the hash_lock to be held */
96 static inline void __hash_dl_info(struct dl_info *dli)
98 struct hlist_head *head;
100 vxdprintk("__hash_dl_info: %p[#%d]\n", dli, dli->dl_xid);
102 head = &dl_info_hash[__hashval(dli->dl_sb, dli->dl_xid)];
103 hlist_add_head_rcu(&dli->dl_hlist, head);
106 /* __unhash_dl_info()
108 * remove the dli from the global hash table
109 * requires the hash_lock to be held */
111 static inline void __unhash_dl_info(struct dl_info *dli)
113 vxdprintk("__unhash_dl_info: %p[#%d]\n", dli, dli->dl_xid);
114 hlist_del_rcu(&dli->dl_hlist);
119 #define hlist_for_each_rcu(pos, head) \
120 for (pos = (head)->first; pos && ({ prefetch(pos->next); 1;}); \
121 pos = pos->next, ({ smp_read_barrier_depends(); 0;}))
124 /* __lookup_dl_info()
126 * requires the rcu_read_lock()
127 * doesn't increment the dl_refcnt */
129 static inline struct dl_info *__lookup_dl_info(struct super_block *sb, xid_t xid)
131 struct hlist_head *head = &dl_info_hash[__hashval(sb, xid)];
132 struct hlist_node *pos;
134 hlist_for_each_rcu(pos, head) {
135 struct dl_info *dli =
136 hlist_entry(pos, struct dl_info, dl_hlist);
138 if (dli->dl_xid == xid && dli->dl_sb == sb) {
146 struct dl_info *locate_dl_info(struct super_block *sb, xid_t xid)
151 dli = get_dl_info(__lookup_dl_info(sb, xid));
156 void rcu_free_dl_info(struct rcu_head *head)
158 struct dl_info *dli = container_of(head, struct dl_info, dl_rcu);
161 BUG_ON(!dli || !head);
163 usecnt = atomic_read(&dli->dl_usecnt);
166 refcnt = atomic_read(&dli->dl_refcnt);
170 __dealloc_dl_info(dli);
172 printk("!!! rcu didn't free\n");
178 int vc_add_dlimit(uint32_t id, void __user *data)
181 struct vcmd_ctx_dlimit_base_v0 vc_data;
184 if (!vx_check(0, VX_ADMIN))
186 if (copy_from_user (&vc_data, data, sizeof(vc_data)))
189 ret = user_path_walk_link(vc_data.name, &nd);
191 struct super_block *sb;
195 if (!nd.dentry->d_inode)
197 if (!(sb = nd.dentry->d_inode->i_sb))
200 dli = __alloc_dl_info(sb, id);
201 spin_lock(&dl_info_hash_lock);
204 if (__lookup_dl_info(sb, id))
211 spin_unlock(&dl_info_hash_lock);
213 __dealloc_dl_info(dli);
221 int vc_rem_dlimit(uint32_t id, void __user *data)
224 struct vcmd_ctx_dlimit_base_v0 vc_data;
227 if (!vx_check(0, VX_ADMIN))
229 if (copy_from_user (&vc_data, data, sizeof(vc_data)))
232 ret = user_path_walk_link(vc_data.name, &nd);
234 struct super_block *sb;
238 if (!nd.dentry->d_inode)
240 if (!(sb = nd.dentry->d_inode->i_sb))
243 spin_lock(&dl_info_hash_lock);
244 dli = __lookup_dl_info(sb, id);
250 __unhash_dl_info(dli);
254 spin_unlock(&dl_info_hash_lock);
262 int vc_set_dlimit(uint32_t id, void __user *data)
265 struct vcmd_ctx_dlimit_v0 vc_data;
268 if (!vx_check(0, VX_ADMIN))
270 if (copy_from_user (&vc_data, data, sizeof(vc_data)))
273 ret = user_path_walk_link(vc_data.name, &nd);
275 struct super_block *sb;
279 if (!nd.dentry->d_inode)
281 if (!(sb = nd.dentry->d_inode->i_sb))
283 if (vc_data.reserved > 100 ||
284 vc_data.inodes_used > vc_data.inodes_total ||
285 vc_data.space_used > vc_data.space_total)
289 dli = locate_dl_info(sb, id);
293 spin_lock(&dli->dl_lock);
295 if (vc_data.inodes_used != (uint32_t)CDLIM_KEEP)
296 dli->dl_inodes_used = vc_data.inodes_used;
297 if (vc_data.inodes_total != (uint32_t)CDLIM_KEEP)
298 dli->dl_inodes_total = vc_data.inodes_total;
299 if (vc_data.space_used != (uint32_t)CDLIM_KEEP) {
300 dli->dl_space_used = vc_data.space_used;
301 dli->dl_space_used <<= 10;
303 if (vc_data.space_total == (uint32_t)CDLIM_INFINITY)
304 dli->dl_space_total = (uint64_t)CDLIM_INFINITY;
305 else if (vc_data.space_total != (uint32_t)CDLIM_KEEP) {
306 dli->dl_space_total = vc_data.space_total;
307 dli->dl_space_total <<= 10;
309 if (vc_data.reserved != (uint32_t)CDLIM_KEEP)
310 dli->dl_nrlmult = (1 << 10) * (100 - vc_data.reserved) / 100;
312 spin_unlock(&dli->dl_lock);
323 int vc_get_dlimit(uint32_t id, void __user *data)
326 struct vcmd_ctx_dlimit_v0 vc_data;
329 if (!vx_check(0, VX_ADMIN))
331 if (copy_from_user (&vc_data, data, sizeof(vc_data)))
334 ret = user_path_walk_link(vc_data.name, &nd);
336 struct super_block *sb;
340 if (!nd.dentry->d_inode)
342 if (!(sb = nd.dentry->d_inode->i_sb))
344 if (vc_data.reserved > 100 ||
345 vc_data.inodes_used > vc_data.inodes_total ||
346 vc_data.space_used > vc_data.space_total)
350 dli = locate_dl_info(sb, id);
354 spin_lock(&dli->dl_lock);
355 vc_data.inodes_used = dli->dl_inodes_used;
356 vc_data.inodes_total = dli->dl_inodes_total;
357 vc_data.space_used = dli->dl_space_used >> 10;
358 if (dli->dl_space_total == (uint64_t)CDLIM_INFINITY)
359 vc_data.space_total = (uint32_t)CDLIM_INFINITY;
361 vc_data.space_total = dli->dl_space_total >> 10;
363 vc_data.reserved = 100 - ((dli->dl_nrlmult * 100 + 512) >> 10);
364 spin_unlock(&dli->dl_lock);
368 if (copy_to_user(data, &vc_data, sizeof(vc_data)))
379 void vx_vsi_statfs(struct super_block *sb, struct kstatfs *buf)
382 __u64 blimit, bfree, bavail;
385 dli = locate_dl_info(sb, current->xid);
389 spin_lock(&dli->dl_lock);
390 if (dli->dl_inodes_total == (uint32_t)CDLIM_INFINITY)
393 /* reduce max inodes available to limit */
394 if (buf->f_files > dli->dl_inodes_total)
395 buf->f_files = dli->dl_inodes_total;
397 ifree = dli->dl_inodes_total - dli->dl_inodes_used;
398 /* reduce free inodes to min */
399 if (ifree < buf->f_ffree)
400 buf->f_ffree = ifree;
403 if (dli->dl_space_total == (uint64_t)CDLIM_INFINITY)
406 blimit = dli->dl_space_total >> sb->s_blocksize_bits;
408 if (dli->dl_space_total < dli->dl_space_used)
411 bfree = (dli->dl_space_total - dli->dl_space_used)
412 >> sb->s_blocksize_bits;
414 bavail = ((dli->dl_space_total >> 10) * dli->dl_nrlmult);
415 if (bavail < dli->dl_space_used)
418 bavail = (bavail - dli->dl_space_used)
419 >> sb->s_blocksize_bits;
421 /* reduce max space available to limit */
422 if (buf->f_blocks > blimit)
423 buf->f_blocks = blimit;
425 /* reduce free space to min */
426 if (bfree < buf->f_bfree)
427 buf->f_bfree = bfree;
429 /* reduce avail space to min */
430 if (bavail < buf->f_bavail)
431 buf->f_bavail = bavail;
434 spin_unlock(&dli->dl_lock);