2 * linux/kernel/vserver/dlimit.c
4 * Virtual Server: Context Disk Limits
6 * Copyright (C) 2004-2005 Herbert Pƶtzl
8 * V0.01 initial version
12 #include <linux/config.h>
14 #include <linux/namespace.h>
15 #include <linux/namei.h>
16 #include <linux/statfs.h>
17 #include <linux/vserver/switch.h>
18 #include <linux/vs_context.h>
19 #include <linux/vs_dlimit.h>
21 #include <asm/errno.h>
22 #include <asm/uaccess.h>
26 * allocate an initialized dl_info struct
27 * doesn't make it visible (hash) */
29 static struct dl_info *__alloc_dl_info(struct super_block *sb, xid_t xid)
31 struct dl_info *new = NULL;
33 vxdprintk(VXD_CBIT(dlim, 5),
34 "alloc_dl_info(%p,%d)*", sb, xid);
36 /* would this benefit from a slab cache? */
37 new = kmalloc(sizeof(struct dl_info), GFP_KERNEL);
41 memset (new, 0, sizeof(struct dl_info));
44 INIT_RCU_HEAD(&new->dl_rcu);
45 INIT_HLIST_NODE(&new->dl_hlist);
46 spin_lock_init(&new->dl_lock);
47 atomic_set(&new->dl_refcnt, 0);
48 atomic_set(&new->dl_usecnt, 0);
50 /* rest of init goes here */
52 vxdprintk(VXD_CBIT(dlim, 4),
53 "alloc_dl_info(%p,%d) = %p", sb, xid, new);
57 /* __dealloc_dl_info()
59 * final disposal of dl_info */
61 static void __dealloc_dl_info(struct dl_info *dli)
63 vxdprintk(VXD_CBIT(dlim, 4),
64 "dealloc_dl_info(%p)", dli);
66 dli->dl_hlist.next = LIST_POISON1;
70 BUG_ON(atomic_read(&dli->dl_usecnt));
71 BUG_ON(atomic_read(&dli->dl_refcnt));
77 /* hash table for dl_info hash */
79 #define DL_HASH_SIZE 13
81 struct hlist_head dl_info_hash[DL_HASH_SIZE];
83 static spinlock_t dl_info_hash_lock = SPIN_LOCK_UNLOCKED;
86 static inline unsigned int __hashval(struct super_block *sb, xid_t xid)
88 return ((xid ^ (unsigned long)sb) % DL_HASH_SIZE);
95 * add the dli to the global hash table
96 * requires the hash_lock to be held */
98 static inline void __hash_dl_info(struct dl_info *dli)
100 struct hlist_head *head;
102 vxdprintk(VXD_CBIT(dlim, 6),
103 "__hash_dl_info: %p[#%d]", dli, dli->dl_xid);
105 head = &dl_info_hash[__hashval(dli->dl_sb, dli->dl_xid)];
106 hlist_add_head_rcu(&dli->dl_hlist, head);
109 /* __unhash_dl_info()
111 * remove the dli from the global hash table
112 * requires the hash_lock to be held */
114 static inline void __unhash_dl_info(struct dl_info *dli)
116 vxdprintk(VXD_CBIT(dlim, 6),
117 "__unhash_dl_info: %p[#%d]", dli, dli->dl_xid);
118 hlist_del_rcu(&dli->dl_hlist);
123 /* __lookup_dl_info()
125 * requires the rcu_read_lock()
126 * doesn't increment the dl_refcnt */
128 static inline struct dl_info *__lookup_dl_info(struct super_block *sb, xid_t xid)
130 struct hlist_head *head = &dl_info_hash[__hashval(sb, xid)];
131 struct hlist_node *pos;
133 hlist_for_each_rcu(pos, head) {
134 struct dl_info *dli =
135 hlist_entry(pos, struct dl_info, dl_hlist);
137 if (dli->dl_xid == xid && dli->dl_sb == sb) {
145 struct dl_info *locate_dl_info(struct super_block *sb, xid_t xid)
150 dli = get_dl_info(__lookup_dl_info(sb, xid));
151 vxdprintk(VXD_CBIT(dlim, 7),
152 "locate_dl_info(%p,#%d) = %p", sb, xid, dli);
157 void rcu_free_dl_info(struct rcu_head *head)
159 struct dl_info *dli = container_of(head, struct dl_info, dl_rcu);
162 BUG_ON(!dli || !head);
164 usecnt = atomic_read(&dli->dl_usecnt);
167 refcnt = atomic_read(&dli->dl_refcnt);
170 vxdprintk(VXD_CBIT(dlim, 3),
171 "rcu_free_dl_info(%p)", dli);
173 __dealloc_dl_info(dli);
175 printk("!!! rcu didn't free\n");
181 int vc_add_dlimit(uint32_t id, void __user *data)
184 struct vcmd_ctx_dlimit_base_v0 vc_data;
187 if (!vx_check(0, VX_ADMIN))
189 if (copy_from_user (&vc_data, data, sizeof(vc_data)))
192 ret = user_path_walk_link(vc_data.name, &nd);
194 struct super_block *sb;
198 if (!nd.dentry->d_inode)
200 if (!(sb = nd.dentry->d_inode->i_sb))
203 dli = __alloc_dl_info(sb, id);
204 spin_lock(&dl_info_hash_lock);
207 if (__lookup_dl_info(sb, id))
214 spin_unlock(&dl_info_hash_lock);
216 __dealloc_dl_info(dli);
224 int vc_rem_dlimit(uint32_t id, void __user *data)
227 struct vcmd_ctx_dlimit_base_v0 vc_data;
230 if (!vx_check(0, VX_ADMIN))
232 if (copy_from_user (&vc_data, data, sizeof(vc_data)))
235 ret = user_path_walk_link(vc_data.name, &nd);
237 struct super_block *sb;
241 if (!nd.dentry->d_inode)
243 if (!(sb = nd.dentry->d_inode->i_sb))
246 spin_lock(&dl_info_hash_lock);
247 dli = __lookup_dl_info(sb, id);
253 __unhash_dl_info(dli);
257 spin_unlock(&dl_info_hash_lock);
265 int vc_set_dlimit(uint32_t id, void __user *data)
268 struct vcmd_ctx_dlimit_v0 vc_data;
271 if (!vx_check(0, VX_ADMIN))
273 if (copy_from_user (&vc_data, data, sizeof(vc_data)))
276 ret = user_path_walk_link(vc_data.name, &nd);
278 struct super_block *sb;
282 if (!nd.dentry->d_inode)
284 if (!(sb = nd.dentry->d_inode->i_sb))
286 if ((vc_data.reserved != (uint32_t)CDLIM_KEEP &&
287 vc_data.reserved > 100) ||
288 (vc_data.inodes_used != (uint32_t)CDLIM_KEEP &&
289 vc_data.inodes_used > vc_data.inodes_total) ||
290 (vc_data.space_used != (uint32_t)CDLIM_KEEP &&
291 vc_data.space_used > vc_data.space_total))
295 dli = locate_dl_info(sb, id);
299 spin_lock(&dli->dl_lock);
301 if (vc_data.inodes_used != (uint32_t)CDLIM_KEEP)
302 dli->dl_inodes_used = vc_data.inodes_used;
303 if (vc_data.inodes_total != (uint32_t)CDLIM_KEEP)
304 dli->dl_inodes_total = vc_data.inodes_total;
305 if (vc_data.space_used != (uint32_t)CDLIM_KEEP) {
306 dli->dl_space_used = vc_data.space_used;
307 dli->dl_space_used <<= 10;
309 if (vc_data.space_total == (uint32_t)CDLIM_INFINITY)
310 dli->dl_space_total = (uint64_t)CDLIM_INFINITY;
311 else if (vc_data.space_total != (uint32_t)CDLIM_KEEP) {
312 dli->dl_space_total = vc_data.space_total;
313 dli->dl_space_total <<= 10;
315 if (vc_data.reserved != (uint32_t)CDLIM_KEEP)
316 dli->dl_nrlmult = (1 << 10) * (100 - vc_data.reserved) / 100;
318 spin_unlock(&dli->dl_lock);
329 int vc_get_dlimit(uint32_t id, void __user *data)
332 struct vcmd_ctx_dlimit_v0 vc_data;
335 if (!vx_check(0, VX_ADMIN))
337 if (copy_from_user (&vc_data, data, sizeof(vc_data)))
340 ret = user_path_walk_link(vc_data.name, &nd);
342 struct super_block *sb;
346 if (!nd.dentry->d_inode)
348 if (!(sb = nd.dentry->d_inode->i_sb))
350 if (vc_data.reserved > 100 ||
351 vc_data.inodes_used > vc_data.inodes_total ||
352 vc_data.space_used > vc_data.space_total)
356 dli = locate_dl_info(sb, id);
360 spin_lock(&dli->dl_lock);
361 vc_data.inodes_used = dli->dl_inodes_used;
362 vc_data.inodes_total = dli->dl_inodes_total;
363 vc_data.space_used = dli->dl_space_used >> 10;
364 if (dli->dl_space_total == (uint64_t)CDLIM_INFINITY)
365 vc_data.space_total = (uint32_t)CDLIM_INFINITY;
367 vc_data.space_total = dli->dl_space_total >> 10;
369 vc_data.reserved = 100 - ((dli->dl_nrlmult * 100 + 512) >> 10);
370 spin_unlock(&dli->dl_lock);
374 if (copy_to_user(data, &vc_data, sizeof(vc_data)))
385 void vx_vsi_statfs(struct super_block *sb, struct kstatfs *buf)
388 __u64 blimit, bfree, bavail;
391 dli = locate_dl_info(sb, vx_current_xid());
395 spin_lock(&dli->dl_lock);
396 if (dli->dl_inodes_total == (uint32_t)CDLIM_INFINITY)
399 /* reduce max inodes available to limit */
400 if (buf->f_files > dli->dl_inodes_total)
401 buf->f_files = dli->dl_inodes_total;
403 ifree = dli->dl_inodes_total - dli->dl_inodes_used;
404 /* reduce free inodes to min */
405 if (ifree < buf->f_ffree)
406 buf->f_ffree = ifree;
409 if (dli->dl_space_total == (uint64_t)CDLIM_INFINITY)
412 blimit = dli->dl_space_total >> sb->s_blocksize_bits;
414 if (dli->dl_space_total < dli->dl_space_used)
417 bfree = (dli->dl_space_total - dli->dl_space_used)
418 >> sb->s_blocksize_bits;
420 bavail = ((dli->dl_space_total >> 10) * dli->dl_nrlmult);
421 if (bavail < dli->dl_space_used)
424 bavail = (bavail - dli->dl_space_used)
425 >> sb->s_blocksize_bits;
427 /* reduce max space available to limit */
428 if (buf->f_blocks > blimit)
429 buf->f_blocks = blimit;
431 /* reduce free space to min */
432 if (bfree < buf->f_bfree)
433 buf->f_bfree = bfree;
435 /* reduce avail space to min */
436 if (bavail < buf->f_bavail)
437 buf->f_bavail = bavail;
440 spin_unlock(&dli->dl_lock);
446 #include <linux/module.h>
448 EXPORT_SYMBOL_GPL(locate_dl_info);
449 EXPORT_SYMBOL_GPL(rcu_free_dl_info);