2 * linux/kernel/vserver/dlimit.c
4 * Virtual Server: Context Disk Limits
6 * Copyright (C) 2004-2007 Herbert Pƶtzl
8 * V0.01 initial version
9 * V0.02 compat32 splitup
14 // #include <linux/mnt_namespace.h>
15 #include <linux/namei.h>
16 #include <linux/statfs.h>
17 #include <linux/compat.h>
18 #include <linux/vs_base.h>
19 #include <linux/vs_context.h>
20 #include <linux/vs_tag.h>
21 #include <linux/vs_dlimit.h>
22 #include <linux/vserver/switch.h>
23 #include <linux/vserver/dlimit_cmd.h>
25 #include <asm/errno.h>
26 #include <asm/uaccess.h>
30 * allocate an initialized dl_info struct
31 * doesn't make it visible (hash) */
33 static struct dl_info *__alloc_dl_info(struct super_block *sb, tag_t tag)
35 struct dl_info *new = NULL;
37 vxdprintk(VXD_CBIT(dlim, 5),
38 "alloc_dl_info(%p,%d)*", sb, tag);
40 /* would this benefit from a slab cache? */
41 new = kmalloc(sizeof(struct dl_info), GFP_KERNEL);
45 memset (new, 0, sizeof(struct dl_info));
48 INIT_RCU_HEAD(&new->dl_rcu);
49 INIT_HLIST_NODE(&new->dl_hlist);
50 spin_lock_init(&new->dl_lock);
51 atomic_set(&new->dl_refcnt, 0);
52 atomic_set(&new->dl_usecnt, 0);
54 /* rest of init goes here */
56 vxdprintk(VXD_CBIT(dlim, 4),
57 "alloc_dl_info(%p,%d) = %p", sb, tag, new);
61 /* __dealloc_dl_info()
63 * final disposal of dl_info */
65 static void __dealloc_dl_info(struct dl_info *dli)
67 vxdprintk(VXD_CBIT(dlim, 4),
68 "dealloc_dl_info(%p)", dli);
70 dli->dl_hlist.next = LIST_POISON1;
74 BUG_ON(atomic_read(&dli->dl_usecnt));
75 BUG_ON(atomic_read(&dli->dl_refcnt));
81 /* hash table for dl_info hash */
83 #define DL_HASH_SIZE 13
85 struct hlist_head dl_info_hash[DL_HASH_SIZE];
87 static spinlock_t dl_info_hash_lock = SPIN_LOCK_UNLOCKED;
90 static inline unsigned int __hashval(struct super_block *sb, tag_t tag)
92 return ((tag ^ (unsigned long)sb) % DL_HASH_SIZE);
99 * add the dli to the global hash table
100 * requires the hash_lock to be held */
102 static inline void __hash_dl_info(struct dl_info *dli)
104 struct hlist_head *head;
106 vxdprintk(VXD_CBIT(dlim, 6),
107 "__hash_dl_info: %p[#%d]", dli, dli->dl_tag);
109 head = &dl_info_hash[__hashval(dli->dl_sb, dli->dl_tag)];
110 hlist_add_head_rcu(&dli->dl_hlist, head);
113 /* __unhash_dl_info()
115 * remove the dli from the global hash table
116 * requires the hash_lock to be held */
118 static inline void __unhash_dl_info(struct dl_info *dli)
120 vxdprintk(VXD_CBIT(dlim, 6),
121 "__unhash_dl_info: %p[#%d]", dli, dli->dl_tag);
122 hlist_del_rcu(&dli->dl_hlist);
127 /* __lookup_dl_info()
129 * requires the rcu_read_lock()
130 * doesn't increment the dl_refcnt */
132 static inline struct dl_info *__lookup_dl_info(struct super_block *sb, tag_t tag)
134 struct hlist_head *head = &dl_info_hash[__hashval(sb, tag)];
135 struct hlist_node *pos;
138 hlist_for_each_entry_rcu(dli, pos, head, dl_hlist) {
140 if (dli->dl_tag == tag && dli->dl_sb == sb) {
148 struct dl_info *locate_dl_info(struct super_block *sb, tag_t tag)
153 dli = get_dl_info(__lookup_dl_info(sb, tag));
154 vxdprintk(VXD_CBIT(dlim, 7),
155 "locate_dl_info(%p,#%d) = %p", sb, tag, dli);
160 void rcu_free_dl_info(struct rcu_head *head)
162 struct dl_info *dli = container_of(head, struct dl_info, dl_rcu);
165 BUG_ON(!dli || !head);
167 usecnt = atomic_read(&dli->dl_usecnt);
170 refcnt = atomic_read(&dli->dl_refcnt);
173 vxdprintk(VXD_CBIT(dlim, 3),
174 "rcu_free_dl_info(%p)", dli);
176 __dealloc_dl_info(dli);
178 printk("!!! rcu didn't free\n");
184 static int do_addrem_dlimit(uint32_t id, const char __user *name,
185 uint32_t flags, int add)
190 ret = user_path_walk_link(name, &nd);
192 struct super_block *sb;
196 if (!nd.dentry->d_inode)
198 if (!(sb = nd.dentry->d_inode->i_sb))
202 dli = __alloc_dl_info(sb, id);
203 spin_lock(&dl_info_hash_lock);
206 if (__lookup_dl_info(sb, id))
211 spin_lock(&dl_info_hash_lock);
212 dli = __lookup_dl_info(sb, id);
217 __unhash_dl_info(dli);
221 spin_unlock(&dl_info_hash_lock);
223 __dealloc_dl_info(dli);
230 int vc_add_dlimit(uint32_t id, void __user *data)
232 struct vcmd_ctx_dlimit_base_v0 vc_data;
234 if (copy_from_user (&vc_data, data, sizeof(vc_data)))
237 return do_addrem_dlimit(id, vc_data.name, vc_data.flags, 1);
240 int vc_rem_dlimit(uint32_t id, void __user *data)
242 struct vcmd_ctx_dlimit_base_v0 vc_data;
244 if (copy_from_user (&vc_data, data, sizeof(vc_data)))
247 return do_addrem_dlimit(id, vc_data.name, vc_data.flags, 0);
252 int vc_add_dlimit_x32(uint32_t id, void __user *data)
254 struct vcmd_ctx_dlimit_base_v0_x32 vc_data;
256 if (copy_from_user (&vc_data, data, sizeof(vc_data)))
259 return do_addrem_dlimit(id,
260 compat_ptr(vc_data.name_ptr), vc_data.flags, 1);
263 int vc_rem_dlimit_x32(uint32_t id, void __user *data)
265 struct vcmd_ctx_dlimit_base_v0_x32 vc_data;
267 if (copy_from_user (&vc_data, data, sizeof(vc_data)))
270 return do_addrem_dlimit(id,
271 compat_ptr(vc_data.name_ptr), vc_data.flags, 0);
274 #endif /* CONFIG_COMPAT */
278 int do_set_dlimit(uint32_t id, const char __user *name,
279 uint32_t space_used, uint32_t space_total,
280 uint32_t inodes_used, uint32_t inodes_total,
281 uint32_t reserved, uint32_t flags)
286 ret = user_path_walk_link(name, &nd);
288 struct super_block *sb;
292 if (!nd.dentry->d_inode)
294 if (!(sb = nd.dentry->d_inode->i_sb))
296 if ((reserved != CDLIM_KEEP &&
298 (inodes_used != CDLIM_KEEP &&
299 inodes_used > inodes_total) ||
300 (space_used != CDLIM_KEEP &&
301 space_used > space_total))
305 dli = locate_dl_info(sb, id);
309 spin_lock(&dli->dl_lock);
311 if (inodes_used != CDLIM_KEEP)
312 dli->dl_inodes_used = inodes_used;
313 if (inodes_total != CDLIM_KEEP)
314 dli->dl_inodes_total = inodes_total;
315 if (space_used != CDLIM_KEEP) {
316 dli->dl_space_used = space_used;
317 dli->dl_space_used <<= 10;
319 if (space_total == CDLIM_INFINITY)
320 dli->dl_space_total = DLIM_INFINITY;
321 else if (space_total != CDLIM_KEEP) {
322 dli->dl_space_total = space_total;
323 dli->dl_space_total <<= 10;
325 if (reserved != CDLIM_KEEP)
326 dli->dl_nrlmult = (1 << 10) * (100 - reserved) / 100;
328 spin_unlock(&dli->dl_lock);
339 int vc_set_dlimit(uint32_t id, void __user *data)
341 struct vcmd_ctx_dlimit_v0 vc_data;
343 if (copy_from_user (&vc_data, data, sizeof(vc_data)))
346 return do_set_dlimit(id, vc_data.name,
347 vc_data.space_used, vc_data.space_total,
348 vc_data.inodes_used, vc_data.inodes_total,
349 vc_data.reserved, vc_data.flags);
354 int vc_set_dlimit_x32(uint32_t id, void __user *data)
356 struct vcmd_ctx_dlimit_v0_x32 vc_data;
358 if (copy_from_user (&vc_data, data, sizeof(vc_data)))
361 return do_set_dlimit(id, compat_ptr(vc_data.name_ptr),
362 vc_data.space_used, vc_data.space_total,
363 vc_data.inodes_used, vc_data.inodes_total,
364 vc_data.reserved, vc_data.flags);
367 #endif /* CONFIG_COMPAT */
371 int do_get_dlimit(uint32_t id, const char __user *name,
372 uint32_t *space_used, uint32_t *space_total,
373 uint32_t *inodes_used, uint32_t *inodes_total,
374 uint32_t *reserved, uint32_t *flags)
379 ret = user_path_walk_link(name, &nd);
381 struct super_block *sb;
385 if (!nd.dentry->d_inode)
387 if (!(sb = nd.dentry->d_inode->i_sb))
391 dli = locate_dl_info(sb, id);
395 spin_lock(&dli->dl_lock);
396 *inodes_used = dli->dl_inodes_used;
397 *inodes_total = dli->dl_inodes_total;
398 *space_used = dli->dl_space_used >> 10;
399 if (dli->dl_space_total == DLIM_INFINITY)
400 *space_total = CDLIM_INFINITY;
402 *space_total = dli->dl_space_total >> 10;
404 *reserved = 100 - ((dli->dl_nrlmult * 100 + 512) >> 10);
405 spin_unlock(&dli->dl_lock);
418 int vc_get_dlimit(uint32_t id, void __user *data)
420 struct vcmd_ctx_dlimit_v0 vc_data;
423 if (copy_from_user (&vc_data, data, sizeof(vc_data)))
426 ret = do_get_dlimit(id, vc_data.name,
427 &vc_data.space_used, &vc_data.space_total,
428 &vc_data.inodes_used, &vc_data.inodes_total,
429 &vc_data.reserved, &vc_data.flags);
433 if (copy_to_user(data, &vc_data, sizeof(vc_data)))
440 int vc_get_dlimit_x32(uint32_t id, void __user *data)
442 struct vcmd_ctx_dlimit_v0_x32 vc_data;
445 if (copy_from_user (&vc_data, data, sizeof(vc_data)))
448 ret = do_get_dlimit(id, compat_ptr(vc_data.name_ptr),
449 &vc_data.space_used, &vc_data.space_total,
450 &vc_data.inodes_used, &vc_data.inodes_total,
451 &vc_data.reserved, &vc_data.flags);
455 if (copy_to_user(data, &vc_data, sizeof(vc_data)))
460 #endif /* CONFIG_COMPAT */
463 void vx_vsi_statfs(struct super_block *sb, struct kstatfs *buf)
466 __u64 blimit, bfree, bavail;
469 dli = locate_dl_info(sb, dx_current_tag());
473 spin_lock(&dli->dl_lock);
474 if (dli->dl_inodes_total == (unsigned long)DLIM_INFINITY)
477 /* reduce max inodes available to limit */
478 if (buf->f_files > dli->dl_inodes_total)
479 buf->f_files = dli->dl_inodes_total;
481 ifree = dli->dl_inodes_total - dli->dl_inodes_used;
482 /* reduce free inodes to min */
483 if (ifree < buf->f_ffree)
484 buf->f_ffree = ifree;
487 if (dli->dl_space_total == DLIM_INFINITY)
490 blimit = dli->dl_space_total >> sb->s_blocksize_bits;
492 if (dli->dl_space_total < dli->dl_space_used)
495 bfree = (dli->dl_space_total - dli->dl_space_used)
496 >> sb->s_blocksize_bits;
498 bavail = ((dli->dl_space_total >> 10) * dli->dl_nrlmult);
499 if (bavail < dli->dl_space_used)
502 bavail = (bavail - dli->dl_space_used)
503 >> sb->s_blocksize_bits;
505 /* reduce max space available to limit */
506 if (buf->f_blocks > blimit)
507 buf->f_blocks = blimit;
509 /* reduce free space to min */
510 if (bfree < buf->f_bfree)
511 buf->f_bfree = bfree;
513 /* reduce avail space to min */
514 if (bavail < buf->f_bavail)
515 buf->f_bavail = bavail;
518 spin_unlock(&dli->dl_lock);
524 #include <linux/module.h>
526 EXPORT_SYMBOL_GPL(locate_dl_info);
527 EXPORT_SYMBOL_GPL(rcu_free_dl_info);