2 * linux/kernel/vserver/dlimit.c
4 * Virtual Server: Context Disk Limits
6 * Copyright (C) 2004-2005 Herbert Pƶtzl
8 * V0.01 initial version
9 * V0.02 compat32 splitup
14 #include <linux/namespace.h>
15 #include <linux/namei.h>
16 #include <linux/statfs.h>
17 #include <linux/compat.h>
18 #include <linux/vserver/switch.h>
19 #include <linux/vs_context.h>
20 #include <linux/vs_dlimit.h>
21 #include <linux/vserver/dlimit_cmd.h>
23 #include <asm/errno.h>
24 #include <asm/uaccess.h>
28 * allocate an initialized dl_info struct
29 * doesn't make it visible (hash) */
31 static struct dl_info *__alloc_dl_info(struct super_block *sb, xid_t xid)
33 struct dl_info *new = NULL;
35 vxdprintk(VXD_CBIT(dlim, 5),
36 "alloc_dl_info(%p,%d)*", sb, xid);
38 /* would this benefit from a slab cache? */
39 new = kmalloc(sizeof(struct dl_info), GFP_KERNEL);
43 memset (new, 0, sizeof(struct dl_info));
46 INIT_RCU_HEAD(&new->dl_rcu);
47 INIT_HLIST_NODE(&new->dl_hlist);
48 spin_lock_init(&new->dl_lock);
49 atomic_set(&new->dl_refcnt, 0);
50 atomic_set(&new->dl_usecnt, 0);
52 /* rest of init goes here */
54 vxdprintk(VXD_CBIT(dlim, 4),
55 "alloc_dl_info(%p,%d) = %p", sb, xid, new);
59 /* __dealloc_dl_info()
61 * final disposal of dl_info */
63 static void __dealloc_dl_info(struct dl_info *dli)
65 vxdprintk(VXD_CBIT(dlim, 4),
66 "dealloc_dl_info(%p)", dli);
68 dli->dl_hlist.next = LIST_POISON1;
72 BUG_ON(atomic_read(&dli->dl_usecnt));
73 BUG_ON(atomic_read(&dli->dl_refcnt));
79 /* hash table for dl_info hash */
81 #define DL_HASH_SIZE 13
83 struct hlist_head dl_info_hash[DL_HASH_SIZE];
85 static spinlock_t dl_info_hash_lock = SPIN_LOCK_UNLOCKED;
88 static inline unsigned int __hashval(struct super_block *sb, xid_t xid)
90 return ((xid ^ (unsigned long)sb) % DL_HASH_SIZE);
97 * add the dli to the global hash table
98 * requires the hash_lock to be held */
100 static inline void __hash_dl_info(struct dl_info *dli)
102 struct hlist_head *head;
104 vxdprintk(VXD_CBIT(dlim, 6),
105 "__hash_dl_info: %p[#%d]", dli, dli->dl_xid);
107 head = &dl_info_hash[__hashval(dli->dl_sb, dli->dl_xid)];
108 hlist_add_head_rcu(&dli->dl_hlist, head);
111 /* __unhash_dl_info()
113 * remove the dli from the global hash table
114 * requires the hash_lock to be held */
116 static inline void __unhash_dl_info(struct dl_info *dli)
118 vxdprintk(VXD_CBIT(dlim, 6),
119 "__unhash_dl_info: %p[#%d]", dli, dli->dl_xid);
120 hlist_del_rcu(&dli->dl_hlist);
125 /* __lookup_dl_info()
127 * requires the rcu_read_lock()
128 * doesn't increment the dl_refcnt */
130 static inline struct dl_info *__lookup_dl_info(struct super_block *sb, xid_t xid)
132 struct hlist_head *head = &dl_info_hash[__hashval(sb, xid)];
133 struct hlist_node *pos;
136 hlist_for_each_entry_rcu(dli, pos, head, dl_hlist) {
138 if (dli->dl_xid == xid && dli->dl_sb == sb) {
146 struct dl_info *locate_dl_info(struct super_block *sb, xid_t xid)
151 dli = get_dl_info(__lookup_dl_info(sb, xid));
152 vxdprintk(VXD_CBIT(dlim, 7),
153 "locate_dl_info(%p,#%d) = %p", sb, xid, dli);
158 void rcu_free_dl_info(struct rcu_head *head)
160 struct dl_info *dli = container_of(head, struct dl_info, dl_rcu);
163 BUG_ON(!dli || !head);
165 usecnt = atomic_read(&dli->dl_usecnt);
168 refcnt = atomic_read(&dli->dl_refcnt);
171 vxdprintk(VXD_CBIT(dlim, 3),
172 "rcu_free_dl_info(%p)", dli);
174 __dealloc_dl_info(dli);
176 printk("!!! rcu didn't free\n");
182 static int do_addrem_dlimit(uint32_t id, const char __user *name,
183 uint32_t flags, int add)
188 ret = user_path_walk_link(name, &nd);
190 struct super_block *sb;
194 if (!nd.dentry->d_inode)
196 if (!(sb = nd.dentry->d_inode->i_sb))
200 dli = __alloc_dl_info(sb, id);
201 spin_lock(&dl_info_hash_lock);
204 if (__lookup_dl_info(sb, id))
209 spin_lock(&dl_info_hash_lock);
210 dli = __lookup_dl_info(sb, id);
215 __unhash_dl_info(dli);
219 spin_unlock(&dl_info_hash_lock);
221 __dealloc_dl_info(dli);
228 int vc_add_dlimit(uint32_t id, void __user *data)
230 struct vcmd_ctx_dlimit_base_v0 vc_data;
232 if (!vx_check(0, VX_ADMIN))
234 if (copy_from_user (&vc_data, data, sizeof(vc_data)))
237 return do_addrem_dlimit(id, vc_data.name, vc_data.flags, 1);
240 int vc_rem_dlimit(uint32_t id, void __user *data)
242 struct vcmd_ctx_dlimit_base_v0 vc_data;
244 if (!vx_check(0, VX_ADMIN))
246 if (copy_from_user (&vc_data, data, sizeof(vc_data)))
249 return do_addrem_dlimit(id, vc_data.name, vc_data.flags, 0);
254 int vc_add_dlimit_x32(uint32_t id, void __user *data)
256 struct vcmd_ctx_dlimit_base_v0_x32 vc_data;
258 if (!vx_check(0, VX_ADMIN))
260 if (copy_from_user (&vc_data, data, sizeof(vc_data)))
263 return do_addrem_dlimit(id,
264 compat_ptr(vc_data.name_ptr), vc_data.flags, 1);
267 int vc_rem_dlimit_x32(uint32_t id, void __user *data)
269 struct vcmd_ctx_dlimit_base_v0_x32 vc_data;
271 if (!vx_check(0, VX_ADMIN))
273 if (copy_from_user (&vc_data, data, sizeof(vc_data)))
276 return do_addrem_dlimit(id,
277 compat_ptr(vc_data.name_ptr), vc_data.flags, 0);
280 #endif /* CONFIG_COMPAT */
284 int do_set_dlimit(uint32_t id, const char __user *name,
285 uint32_t space_used, uint32_t space_total,
286 uint32_t inodes_used, uint32_t inodes_total,
287 uint32_t reserved, uint32_t flags)
292 ret = user_path_walk_link(name, &nd);
294 struct super_block *sb;
298 if (!nd.dentry->d_inode)
300 if (!(sb = nd.dentry->d_inode->i_sb))
302 if ((reserved != (uint32_t)CDLIM_KEEP &&
304 (inodes_used != (uint32_t)CDLIM_KEEP &&
305 inodes_used > inodes_total) ||
306 (space_used != (uint32_t)CDLIM_KEEP &&
307 space_used > space_total))
311 dli = locate_dl_info(sb, id);
315 spin_lock(&dli->dl_lock);
317 if (inodes_used != (uint32_t)CDLIM_KEEP)
318 dli->dl_inodes_used = inodes_used;
319 if (inodes_total != (uint32_t)CDLIM_KEEP)
320 dli->dl_inodes_total = inodes_total;
321 if (space_used != (uint32_t)CDLIM_KEEP) {
322 dli->dl_space_used = space_used;
323 dli->dl_space_used <<= 10;
325 if (space_total == (uint32_t)CDLIM_INFINITY)
326 dli->dl_space_total = (uint64_t)CDLIM_INFINITY;
327 else if (space_total != (uint32_t)CDLIM_KEEP) {
328 dli->dl_space_total = space_total;
329 dli->dl_space_total <<= 10;
331 if (reserved != (uint32_t)CDLIM_KEEP)
332 dli->dl_nrlmult = (1 << 10) * (100 - reserved) / 100;
334 spin_unlock(&dli->dl_lock);
345 int vc_set_dlimit(uint32_t id, void __user *data)
347 struct vcmd_ctx_dlimit_v0 vc_data;
349 if (!vx_check(0, VX_ADMIN))
351 if (copy_from_user (&vc_data, data, sizeof(vc_data)))
354 return do_set_dlimit(id, vc_data.name,
355 vc_data.space_used, vc_data.space_total,
356 vc_data.inodes_used, vc_data.inodes_total,
357 vc_data.reserved, vc_data.flags);
362 int vc_set_dlimit_x32(uint32_t id, void __user *data)
364 struct vcmd_ctx_dlimit_v0_x32 vc_data;
366 if (!vx_check(0, VX_ADMIN))
368 if (copy_from_user (&vc_data, data, sizeof(vc_data)))
371 return do_set_dlimit(id, compat_ptr(vc_data.name_ptr),
372 vc_data.space_used, vc_data.space_total,
373 vc_data.inodes_used, vc_data.inodes_total,
374 vc_data.reserved, vc_data.flags);
377 #endif /* CONFIG_COMPAT */
381 int do_get_dlimit(uint32_t id, const char __user *name,
382 uint32_t *space_used, uint32_t *space_total,
383 uint32_t *inodes_used, uint32_t *inodes_total,
384 uint32_t *reserved, uint32_t *flags)
389 ret = user_path_walk_link(name, &nd);
391 struct super_block *sb;
395 if (!nd.dentry->d_inode)
397 if (!(sb = nd.dentry->d_inode->i_sb))
401 dli = locate_dl_info(sb, id);
405 spin_lock(&dli->dl_lock);
406 *inodes_used = dli->dl_inodes_used;
407 *inodes_total = dli->dl_inodes_total;
408 *space_used = dli->dl_space_used >> 10;
409 if (dli->dl_space_total == (uint64_t)CDLIM_INFINITY)
410 *space_total = (uint32_t)CDLIM_INFINITY;
412 *space_total = dli->dl_space_total >> 10;
414 *reserved = 100 - ((dli->dl_nrlmult * 100 + 512) >> 10);
415 spin_unlock(&dli->dl_lock);
428 int vc_get_dlimit(uint32_t id, void __user *data)
430 struct vcmd_ctx_dlimit_v0 vc_data;
433 if (!vx_check(0, VX_ADMIN))
435 if (copy_from_user (&vc_data, data, sizeof(vc_data)))
438 ret = do_get_dlimit(id, vc_data.name,
439 &vc_data.space_used, &vc_data.space_total,
440 &vc_data.inodes_used, &vc_data.inodes_total,
441 &vc_data.reserved, &vc_data.flags);
445 if (copy_to_user(data, &vc_data, sizeof(vc_data)))
452 int vc_get_dlimit_x32(uint32_t id, void __user *data)
454 struct vcmd_ctx_dlimit_v0_x32 vc_data;
457 if (!vx_check(0, VX_ADMIN))
459 if (copy_from_user (&vc_data, data, sizeof(vc_data)))
462 ret = do_get_dlimit(id, compat_ptr(vc_data.name_ptr),
463 &vc_data.space_used, &vc_data.space_total,
464 &vc_data.inodes_used, &vc_data.inodes_total,
465 &vc_data.reserved, &vc_data.flags);
469 if (copy_to_user(data, &vc_data, sizeof(vc_data)))
474 #endif /* CONFIG_COMPAT */
477 void vx_vsi_statfs(struct super_block *sb, struct kstatfs *buf)
480 __u64 blimit, bfree, bavail;
483 dli = locate_dl_info(sb, vx_current_xid());
487 spin_lock(&dli->dl_lock);
488 if (dli->dl_inodes_total == (uint32_t)CDLIM_INFINITY)
491 /* reduce max inodes available to limit */
492 if (buf->f_files > dli->dl_inodes_total)
493 buf->f_files = dli->dl_inodes_total;
495 /* inode hack for reiserfs */
496 if ((buf->f_files == 0) && (dli->dl_inodes_total > 0)) {
497 buf->f_files = dli->dl_inodes_total;
498 buf->f_ffree = dli->dl_inodes_total;
501 ifree = dli->dl_inodes_total - dli->dl_inodes_used;
502 /* reduce free inodes to min */
503 if (ifree < buf->f_ffree)
504 buf->f_ffree = ifree;
507 if (dli->dl_space_total == (uint64_t)CDLIM_INFINITY)
510 blimit = dli->dl_space_total >> sb->s_blocksize_bits;
512 if (dli->dl_space_total < dli->dl_space_used)
515 bfree = (dli->dl_space_total - dli->dl_space_used)
516 >> sb->s_blocksize_bits;
518 bavail = ((dli->dl_space_total >> 10) * dli->dl_nrlmult);
519 if (bavail < dli->dl_space_used)
522 bavail = (bavail - dli->dl_space_used)
523 >> sb->s_blocksize_bits;
525 /* reduce max space available to limit */
526 if (buf->f_blocks > blimit)
527 buf->f_blocks = blimit;
529 /* reduce free space to min */
530 if (bfree < buf->f_bfree)
531 buf->f_bfree = bfree;
533 /* reduce avail space to min */
534 if (bavail < buf->f_bavail)
535 buf->f_bavail = bavail;
538 spin_unlock(&dli->dl_lock);
544 #include <linux/module.h>
546 EXPORT_SYMBOL_GPL(locate_dl_info);
547 EXPORT_SYMBOL_GPL(rcu_free_dl_info);