2 * linux/kernel/vserver/dlimit.c
4 * Virtual Server: Context Disk Limits
6 * Copyright (C) 2004-2005 Herbert Pƶtzl
8 * V0.01 initial version
9 * V0.02 compat32 splitup
13 #include <linux/config.h>
15 #include <linux/namespace.h>
16 #include <linux/namei.h>
17 #include <linux/statfs.h>
18 #include <linux/compat.h>
19 #include <linux/vserver/switch.h>
20 #include <linux/vs_context.h>
21 #include <linux/vs_dlimit.h>
22 #include <linux/vserver/dlimit_cmd.h>
24 #include <asm/errno.h>
25 #include <asm/uaccess.h>
29 * allocate an initialized dl_info struct
30 * doesn't make it visible (hash) */
32 static struct dl_info *__alloc_dl_info(struct super_block *sb, xid_t xid)
34 struct dl_info *new = NULL;
36 vxdprintk(VXD_CBIT(dlim, 5),
37 "alloc_dl_info(%p,%d)*", sb, xid);
39 /* would this benefit from a slab cache? */
40 new = kmalloc(sizeof(struct dl_info), GFP_KERNEL);
44 memset (new, 0, sizeof(struct dl_info));
47 INIT_RCU_HEAD(&new->dl_rcu);
48 INIT_HLIST_NODE(&new->dl_hlist);
49 spin_lock_init(&new->dl_lock);
50 atomic_set(&new->dl_refcnt, 0);
51 atomic_set(&new->dl_usecnt, 0);
53 /* rest of init goes here */
55 vxdprintk(VXD_CBIT(dlim, 4),
56 "alloc_dl_info(%p,%d) = %p", sb, xid, new);
60 /* __dealloc_dl_info()
62 * final disposal of dl_info */
64 static void __dealloc_dl_info(struct dl_info *dli)
66 vxdprintk(VXD_CBIT(dlim, 4),
67 "dealloc_dl_info(%p)", dli);
69 dli->dl_hlist.next = LIST_POISON1;
73 BUG_ON(atomic_read(&dli->dl_usecnt));
74 BUG_ON(atomic_read(&dli->dl_refcnt));
80 /* hash table for dl_info hash */
82 #define DL_HASH_SIZE 13
84 struct hlist_head dl_info_hash[DL_HASH_SIZE];
86 static spinlock_t dl_info_hash_lock = SPIN_LOCK_UNLOCKED;
89 static inline unsigned int __hashval(struct super_block *sb, xid_t xid)
91 return ((xid ^ (unsigned long)sb) % DL_HASH_SIZE);
98 * add the dli to the global hash table
99 * requires the hash_lock to be held */
101 static inline void __hash_dl_info(struct dl_info *dli)
103 struct hlist_head *head;
105 vxdprintk(VXD_CBIT(dlim, 6),
106 "__hash_dl_info: %p[#%d]", dli, dli->dl_xid);
108 head = &dl_info_hash[__hashval(dli->dl_sb, dli->dl_xid)];
109 hlist_add_head_rcu(&dli->dl_hlist, head);
112 /* __unhash_dl_info()
114 * remove the dli from the global hash table
115 * requires the hash_lock to be held */
117 static inline void __unhash_dl_info(struct dl_info *dli)
119 vxdprintk(VXD_CBIT(dlim, 6),
120 "__unhash_dl_info: %p[#%d]", dli, dli->dl_xid);
121 hlist_del_rcu(&dli->dl_hlist);
126 /* __lookup_dl_info()
128 * requires the rcu_read_lock()
129 * doesn't increment the dl_refcnt */
131 static inline struct dl_info *__lookup_dl_info(struct super_block *sb, xid_t xid)
133 struct hlist_head *head = &dl_info_hash[__hashval(sb, xid)];
134 struct hlist_node *pos;
136 hlist_for_each_rcu(pos, head) {
137 struct dl_info *dli =
138 hlist_entry(pos, struct dl_info, dl_hlist);
140 if (dli->dl_xid == xid && dli->dl_sb == sb) {
148 struct dl_info *locate_dl_info(struct super_block *sb, xid_t xid)
153 dli = get_dl_info(__lookup_dl_info(sb, xid));
154 vxdprintk(VXD_CBIT(dlim, 7),
155 "locate_dl_info(%p,#%d) = %p", sb, xid, dli);
160 void rcu_free_dl_info(struct rcu_head *head)
162 struct dl_info *dli = container_of(head, struct dl_info, dl_rcu);
165 BUG_ON(!dli || !head);
167 usecnt = atomic_read(&dli->dl_usecnt);
170 refcnt = atomic_read(&dli->dl_refcnt);
173 vxdprintk(VXD_CBIT(dlim, 3),
174 "rcu_free_dl_info(%p)", dli);
176 __dealloc_dl_info(dli);
178 printk("!!! rcu didn't free\n");
184 int do_addrem_dlimit(uint32_t id, const char __user *name,
185 uint32_t flags, int add)
190 ret = user_path_walk_link(name, &nd);
192 struct super_block *sb;
196 if (!nd.dentry->d_inode)
198 if (!(sb = nd.dentry->d_inode->i_sb))
202 dli = __alloc_dl_info(sb, id);
203 spin_lock(&dl_info_hash_lock);
206 if (__lookup_dl_info(sb, id))
211 spin_lock(&dl_info_hash_lock);
212 dli = __lookup_dl_info(sb, id);
217 __unhash_dl_info(dli);
221 spin_unlock(&dl_info_hash_lock);
223 __dealloc_dl_info(dli);
230 int vc_add_dlimit(uint32_t id, void __user *data)
232 struct vcmd_ctx_dlimit_base_v0 vc_data;
234 if (!vx_check(0, VX_ADMIN))
236 if (copy_from_user (&vc_data, data, sizeof(vc_data)))
239 return do_addrem_dlimit(id, vc_data.name, vc_data.flags, 1);
242 int vc_rem_dlimit(uint32_t id, void __user *data)
244 struct vcmd_ctx_dlimit_base_v0 vc_data;
246 if (!vx_check(0, VX_ADMIN))
248 if (copy_from_user (&vc_data, data, sizeof(vc_data)))
251 return do_addrem_dlimit(id, vc_data.name, vc_data.flags, 0);
256 int vc_add_dlimit_x32(uint32_t id, void __user *data)
258 struct vcmd_ctx_dlimit_base_v0_x32 vc_data;
260 if (!vx_check(0, VX_ADMIN))
262 if (copy_from_user (&vc_data, data, sizeof(vc_data)))
265 return do_addrem_dlimit(id,
266 compat_ptr(vc_data.name_ptr), vc_data.flags, 1);
269 int vc_rem_dlimit_x32(uint32_t id, void __user *data)
271 struct vcmd_ctx_dlimit_base_v0_x32 vc_data;
273 if (!vx_check(0, VX_ADMIN))
275 if (copy_from_user (&vc_data, data, sizeof(vc_data)))
278 return do_addrem_dlimit(id,
279 compat_ptr(vc_data.name_ptr), vc_data.flags, 0);
282 #endif /* CONFIG_COMPAT */
286 int do_set_dlimit(uint32_t id, const char __user *name,
287 uint32_t space_used, uint32_t space_total,
288 uint32_t inodes_used, uint32_t inodes_total,
289 uint32_t reserved, uint32_t flags)
294 ret = user_path_walk_link(name, &nd);
296 struct super_block *sb;
300 if (!nd.dentry->d_inode)
302 if (!(sb = nd.dentry->d_inode->i_sb))
304 if ((reserved != (uint32_t)CDLIM_KEEP &&
306 (inodes_used != (uint32_t)CDLIM_KEEP &&
307 inodes_used > inodes_total) ||
308 (space_used != (uint32_t)CDLIM_KEEP &&
309 space_used > space_total))
313 dli = locate_dl_info(sb, id);
317 spin_lock(&dli->dl_lock);
319 if (inodes_used != (uint32_t)CDLIM_KEEP)
320 dli->dl_inodes_used = inodes_used;
321 if (inodes_total != (uint32_t)CDLIM_KEEP)
322 dli->dl_inodes_total = inodes_total;
323 if (space_used != (uint32_t)CDLIM_KEEP) {
324 dli->dl_space_used = space_used;
325 dli->dl_space_used <<= 10;
327 if (space_total == (uint32_t)CDLIM_INFINITY)
328 dli->dl_space_total = (uint64_t)CDLIM_INFINITY;
329 else if (space_total != (uint32_t)CDLIM_KEEP) {
330 dli->dl_space_total = space_total;
331 dli->dl_space_total <<= 10;
333 if (reserved != (uint32_t)CDLIM_KEEP)
334 dli->dl_nrlmult = (1 << 10) * (100 - reserved) / 100;
336 spin_unlock(&dli->dl_lock);
347 int vc_set_dlimit(uint32_t id, void __user *data)
349 struct vcmd_ctx_dlimit_v0 vc_data;
351 if (!vx_check(0, VX_ADMIN))
353 if (copy_from_user (&vc_data, data, sizeof(vc_data)))
356 return do_set_dlimit(id, vc_data.name,
357 vc_data.space_used, vc_data.space_total,
358 vc_data.inodes_used, vc_data.inodes_total,
359 vc_data.reserved, vc_data.flags);
364 int vc_set_dlimit_x32(uint32_t id, void __user *data)
366 struct vcmd_ctx_dlimit_v0_x32 vc_data;
368 if (!vx_check(0, VX_ADMIN))
370 if (copy_from_user (&vc_data, data, sizeof(vc_data)))
373 return do_set_dlimit(id, compat_ptr(vc_data.name_ptr),
374 vc_data.space_used, vc_data.space_total,
375 vc_data.inodes_used, vc_data.inodes_total,
376 vc_data.reserved, vc_data.flags);
379 #endif /* CONFIG_COMPAT */
383 int do_get_dlimit(uint32_t id, const char __user *name,
384 uint32_t *space_used, uint32_t *space_total,
385 uint32_t *inodes_used, uint32_t *inodes_total,
386 uint32_t *reserved, uint32_t *flags)
391 ret = user_path_walk_link(name, &nd);
393 struct super_block *sb;
397 if (!nd.dentry->d_inode)
399 if (!(sb = nd.dentry->d_inode->i_sb))
403 dli = locate_dl_info(sb, id);
407 spin_lock(&dli->dl_lock);
408 *inodes_used = dli->dl_inodes_used;
409 *inodes_total = dli->dl_inodes_total;
410 *space_used = dli->dl_space_used >> 10;
411 if (dli->dl_space_total == (uint64_t)CDLIM_INFINITY)
412 *space_total = (uint32_t)CDLIM_INFINITY;
414 *space_total = dli->dl_space_total >> 10;
416 *reserved = 100 - ((dli->dl_nrlmult * 100 + 512) >> 10);
417 spin_unlock(&dli->dl_lock);
430 int vc_get_dlimit(uint32_t id, void __user *data)
432 struct vcmd_ctx_dlimit_v0 vc_data;
435 if (!vx_check(0, VX_ADMIN))
437 if (copy_from_user (&vc_data, data, sizeof(vc_data)))
440 ret = do_get_dlimit(id, vc_data.name,
441 &vc_data.space_used, &vc_data.space_total,
442 &vc_data.inodes_used, &vc_data.inodes_total,
443 &vc_data.reserved, &vc_data.flags);
447 if (copy_to_user(data, &vc_data, sizeof(vc_data)))
454 int vc_get_dlimit_x32(uint32_t id, void __user *data)
456 struct vcmd_ctx_dlimit_v0_x32 vc_data;
459 if (!vx_check(0, VX_ADMIN))
461 if (copy_from_user (&vc_data, data, sizeof(vc_data)))
464 ret = do_get_dlimit(id, compat_ptr(vc_data.name_ptr),
465 &vc_data.space_used, &vc_data.space_total,
466 &vc_data.inodes_used, &vc_data.inodes_total,
467 &vc_data.reserved, &vc_data.flags);
471 if (copy_to_user(data, &vc_data, sizeof(vc_data)))
476 #endif /* CONFIG_COMPAT */
479 void vx_vsi_statfs(struct super_block *sb, struct kstatfs *buf)
482 __u64 blimit, bfree, bavail;
485 dli = locate_dl_info(sb, vx_current_xid());
489 spin_lock(&dli->dl_lock);
490 if (dli->dl_inodes_total == (uint32_t)CDLIM_INFINITY)
493 /* reduce max inodes available to limit */
494 if (buf->f_files > dli->dl_inodes_total)
495 buf->f_files = dli->dl_inodes_total;
497 ifree = dli->dl_inodes_total - dli->dl_inodes_used;
498 /* reduce free inodes to min */
499 if (ifree < buf->f_ffree)
500 buf->f_ffree = ifree;
503 if (dli->dl_space_total == (uint64_t)CDLIM_INFINITY)
506 blimit = dli->dl_space_total >> sb->s_blocksize_bits;
508 if (dli->dl_space_total < dli->dl_space_used)
511 bfree = (dli->dl_space_total - dli->dl_space_used)
512 >> sb->s_blocksize_bits;
514 bavail = ((dli->dl_space_total >> 10) * dli->dl_nrlmult);
515 if (bavail < dli->dl_space_used)
518 bavail = (bavail - dli->dl_space_used)
519 >> sb->s_blocksize_bits;
521 /* reduce max space available to limit */
522 if (buf->f_blocks > blimit)
523 buf->f_blocks = blimit;
525 /* reduce free space to min */
526 if (bfree < buf->f_bfree)
527 buf->f_bfree = bfree;
529 /* reduce avail space to min */
530 if (bavail < buf->f_bavail)
531 buf->f_bavail = bavail;
534 spin_unlock(&dli->dl_lock);
540 #include <linux/module.h>
542 EXPORT_SYMBOL_GPL(locate_dl_info);
543 EXPORT_SYMBOL_GPL(rcu_free_dl_info);