vserver 1.9.3
[linux-2.6.git] / kernel / vserver / dlimit.c
1 /*
2  *  linux/kernel/vserver/dlimit.c
3  *
4  *  Virtual Server: Context Disk Limits
5  *
6  *  Copyright (C) 2004  Herbert Pƶtzl
7  *
8  *  V0.01  initial version
9  *
10  */
11
12 #include <linux/config.h>
13 #include <linux/fs.h>
14 #include <linux/namespace.h>
15 #include <linux/namei.h>
16 #include <linux/statfs.h>
17 #include <linux/vserver/switch.h>
18 #include <linux/vs_base.h>
19 #include <linux/vs_context.h>
20 #include <linux/vs_dlimit.h>
21
22 #include <asm/errno.h>
23 #include <asm/uaccess.h>
24
25 /*      __alloc_dl_info()
26
27         * allocate an initialized dl_info struct
28         * doesn't make it visible (hash)                        */
29
30 static struct dl_info *__alloc_dl_info(struct super_block *sb, xid_t xid)
31 {
32         struct dl_info *new = NULL;
33
34         vxdprintk(VXD_CBIT(dlim, 5),
35                 "alloc_dl_info(%p,%d)*", sb, xid);
36
37         /* would this benefit from a slab cache? */
38         new = kmalloc(sizeof(struct dl_info), GFP_KERNEL);
39         if (!new)
40                 return 0;
41
42         memset (new, 0, sizeof(struct dl_info));
43         new->dl_xid = xid;
44         new->dl_sb = sb;
45         INIT_RCU_HEAD(&new->dl_rcu);
46         INIT_HLIST_NODE(&new->dl_hlist);
47         spin_lock_init(&new->dl_lock);
48         atomic_set(&new->dl_refcnt, 0);
49         atomic_set(&new->dl_usecnt, 0);
50
51         /* rest of init goes here */
52
53         vxdprintk(VXD_CBIT(dlim, 4),
54                 "alloc_dl_info(%p,%d) = %p", sb, xid, new);
55         return new;
56 }
57
58 /*      __dealloc_dl_info()
59
60         * final disposal of dl_info                             */
61
62 static void __dealloc_dl_info(struct dl_info *dli)
63 {
64         vxdprintk(VXD_CBIT(dlim, 4),
65                 "dealloc_dl_info(%p)", dli);
66
67         dli->dl_hlist.next = LIST_POISON1;
68         dli->dl_xid = -1;
69         dli->dl_sb = 0;
70
71         BUG_ON(atomic_read(&dli->dl_usecnt));
72         BUG_ON(atomic_read(&dli->dl_refcnt));
73
74         kfree(dli);
75 }
76
77
78 /*      hash table for dl_info hash */
79
80 #define DL_HASH_SIZE    13
81
82 struct hlist_head dl_info_hash[DL_HASH_SIZE];
83
84 static spinlock_t dl_info_hash_lock = SPIN_LOCK_UNLOCKED;
85
86
87 static inline unsigned int __hashval(struct super_block *sb, xid_t xid)
88 {
89         return ((xid ^ (unsigned long)sb) % DL_HASH_SIZE);
90 }
91
92
93
94 /*      __hash_dl_info()
95
96         * add the dli to the global hash table
97         * requires the hash_lock to be held                     */
98
99 static inline void __hash_dl_info(struct dl_info *dli)
100 {
101         struct hlist_head *head;
102
103         vxdprintk(VXD_CBIT(dlim, 6),
104                 "__hash_dl_info: %p[#%d]", dli, dli->dl_xid);
105         get_dl_info(dli);
106         head = &dl_info_hash[__hashval(dli->dl_sb, dli->dl_xid)];
107         hlist_add_head_rcu(&dli->dl_hlist, head);
108 }
109
110 /*      __unhash_dl_info()
111
112         * remove the dli from the global hash table
113         * requires the hash_lock to be held                     */
114
115 static inline void __unhash_dl_info(struct dl_info *dli)
116 {
117         vxdprintk(VXD_CBIT(dlim, 6),
118                 "__unhash_dl_info: %p[#%d]", dli, dli->dl_xid);
119         hlist_del_rcu(&dli->dl_hlist);
120         put_dl_info(dli);
121 }
122
123
124 /*      __lookup_dl_info()
125
126         * requires the rcu_read_lock()
127         * doesn't increment the dl_refcnt                       */
128
129 static inline struct dl_info *__lookup_dl_info(struct super_block *sb, xid_t xid)
130 {
131         struct hlist_head *head = &dl_info_hash[__hashval(sb, xid)];
132         struct hlist_node *pos;
133
134         hlist_for_each_rcu(pos, head) {
135                 struct dl_info *dli =
136                         hlist_entry(pos, struct dl_info, dl_hlist);
137
138                 if (dli->dl_xid == xid && dli->dl_sb == sb) {
139                         return dli;
140                 }
141         }
142         return NULL;
143 }
144
145
146 struct dl_info *locate_dl_info(struct super_block *sb, xid_t xid)
147 {
148         struct dl_info *dli;
149
150         rcu_read_lock();
151         dli = get_dl_info(__lookup_dl_info(sb, xid));
152         vxdprintk(VXD_CBIT(dlim, 7),
153                 "locate_dl_info(%p,#%d) = %p", sb, xid, dli);
154         rcu_read_unlock();
155         return dli;
156 }
157
158 void rcu_free_dl_info(struct rcu_head *head)
159 {
160         struct dl_info *dli = container_of(head, struct dl_info, dl_rcu);
161         int usecnt, refcnt;
162
163         BUG_ON(!dli || !head);
164
165         usecnt = atomic_read(&dli->dl_usecnt);
166         BUG_ON(usecnt < 0);
167
168         refcnt = atomic_read(&dli->dl_refcnt);
169         BUG_ON(refcnt < 0);
170
171         vxdprintk(VXD_CBIT(dlim, 3),
172                 "rcu_free_dl_info(%p)", dli);
173         if (!usecnt)
174                 __dealloc_dl_info(dli);
175         else
176                 printk("!!! rcu didn't free\n");
177 }
178
179
180
181
182 int vc_add_dlimit(uint32_t id, void __user *data)
183 {
184         struct nameidata nd;
185         struct vcmd_ctx_dlimit_base_v0 vc_data;
186         int ret;
187
188         if (!vx_check(0, VX_ADMIN))
189                 return -ENOSYS;
190         if (copy_from_user (&vc_data, data, sizeof(vc_data)))
191                 return -EFAULT;
192
193         ret = user_path_walk_link(vc_data.name, &nd);
194         if (!ret) {
195                 struct super_block *sb;
196                 struct dl_info *dli;
197
198                 ret = -EINVAL;
199                 if (!nd.dentry->d_inode)
200                         goto out_release;
201                 if (!(sb = nd.dentry->d_inode->i_sb))
202                         goto out_release;
203
204                 dli = __alloc_dl_info(sb, id);
205                 spin_lock(&dl_info_hash_lock);
206
207                 ret = -EEXIST;
208                 if (__lookup_dl_info(sb, id))
209                         goto out_unlock;
210                 __hash_dl_info(dli);
211                 dli = NULL;
212                 ret = 0;
213
214         out_unlock:
215                 spin_unlock(&dl_info_hash_lock);
216                 if (dli)
217                         __dealloc_dl_info(dli);
218         out_release:
219                 path_release(&nd);
220         }
221         return ret;
222 }
223
224
225 int vc_rem_dlimit(uint32_t id, void __user *data)
226 {
227         struct nameidata nd;
228         struct vcmd_ctx_dlimit_base_v0 vc_data;
229         int ret;
230
231         if (!vx_check(0, VX_ADMIN))
232                 return -ENOSYS;
233         if (copy_from_user (&vc_data, data, sizeof(vc_data)))
234                 return -EFAULT;
235
236         ret = user_path_walk_link(vc_data.name, &nd);
237         if (!ret) {
238                 struct super_block *sb;
239                 struct dl_info *dli;
240
241                 ret = -EINVAL;
242                 if (!nd.dentry->d_inode)
243                         goto out_release;
244                 if (!(sb = nd.dentry->d_inode->i_sb))
245                         goto out_release;
246
247                 spin_lock(&dl_info_hash_lock);
248                 dli = __lookup_dl_info(sb, id);
249
250                 ret = -ESRCH;
251                 if (!dli)
252                         goto out_unlock;
253
254                 __unhash_dl_info(dli);
255                 ret = 0;
256
257         out_unlock:
258                 spin_unlock(&dl_info_hash_lock);
259         out_release:
260                 path_release(&nd);
261         }
262         return ret;
263 }
264
265
266 int vc_set_dlimit(uint32_t id, void __user *data)
267 {
268         struct nameidata nd;
269         struct vcmd_ctx_dlimit_v0 vc_data;
270         int ret;
271
272         if (!vx_check(0, VX_ADMIN))
273                 return -ENOSYS;
274         if (copy_from_user (&vc_data, data, sizeof(vc_data)))
275                 return -EFAULT;
276
277         ret = user_path_walk_link(vc_data.name, &nd);
278         if (!ret) {
279                 struct super_block *sb;
280                 struct dl_info *dli;
281
282                 ret = -EINVAL;
283                 if (!nd.dentry->d_inode)
284                         goto out_release;
285                 if (!(sb = nd.dentry->d_inode->i_sb))
286                         goto out_release;
287                 if ((vc_data.reserved != (uint32_t)CDLIM_KEEP &&
288                         vc_data.reserved > 100) ||
289                         (vc_data.inodes_used != (uint32_t)CDLIM_KEEP &&
290                         vc_data.inodes_used > vc_data.inodes_total) ||
291                         (vc_data.space_used != (uint32_t)CDLIM_KEEP &&
292                         vc_data.space_used > vc_data.space_total))
293                         goto out_release;
294
295                 ret = -ESRCH;
296                 dli = locate_dl_info(sb, id);
297                 if (!dli)
298                         goto out_release;
299
300                 spin_lock(&dli->dl_lock);
301
302                 if (vc_data.inodes_used != (uint32_t)CDLIM_KEEP)
303                         dli->dl_inodes_used = vc_data.inodes_used;
304                 if (vc_data.inodes_total != (uint32_t)CDLIM_KEEP)
305                         dli->dl_inodes_total = vc_data.inodes_total;
306                 if (vc_data.space_used != (uint32_t)CDLIM_KEEP) {
307                         dli->dl_space_used = vc_data.space_used;
308                         dli->dl_space_used <<= 10;
309                 }
310                 if (vc_data.space_total == (uint32_t)CDLIM_INFINITY)
311                         dli->dl_space_total = (uint64_t)CDLIM_INFINITY;
312                 else if (vc_data.space_total != (uint32_t)CDLIM_KEEP) {
313                         dli->dl_space_total = vc_data.space_total;
314                         dli->dl_space_total <<= 10;
315                 }
316                 if (vc_data.reserved != (uint32_t)CDLIM_KEEP)
317                         dli->dl_nrlmult = (1 << 10) * (100 - vc_data.reserved) / 100;
318
319                 spin_unlock(&dli->dl_lock);
320
321                 put_dl_info(dli);
322                 ret = 0;
323
324         out_release:
325                 path_release(&nd);
326         }
327         return ret;
328 }
329
330 int vc_get_dlimit(uint32_t id, void __user *data)
331 {
332         struct nameidata nd;
333         struct vcmd_ctx_dlimit_v0 vc_data;
334         int ret;
335
336         if (!vx_check(0, VX_ADMIN))
337                 return -ENOSYS;
338         if (copy_from_user (&vc_data, data, sizeof(vc_data)))
339                 return -EFAULT;
340
341         ret = user_path_walk_link(vc_data.name, &nd);
342         if (!ret) {
343                 struct super_block *sb;
344                 struct dl_info *dli;
345
346                 ret = -EINVAL;
347                 if (!nd.dentry->d_inode)
348                         goto out_release;
349                 if (!(sb = nd.dentry->d_inode->i_sb))
350                         goto out_release;
351                 if (vc_data.reserved > 100 ||
352                         vc_data.inodes_used > vc_data.inodes_total ||
353                         vc_data.space_used > vc_data.space_total)
354                         goto out_release;
355
356                 ret = -ESRCH;
357                 dli = locate_dl_info(sb, id);
358                 if (!dli)
359                         goto out_release;
360
361                 spin_lock(&dli->dl_lock);
362                 vc_data.inodes_used = dli->dl_inodes_used;
363                 vc_data.inodes_total = dli->dl_inodes_total;
364                 vc_data.space_used = dli->dl_space_used >> 10;
365                 if (dli->dl_space_total == (uint64_t)CDLIM_INFINITY)
366                         vc_data.space_total = (uint32_t)CDLIM_INFINITY;
367                 else
368                         vc_data.space_total = dli->dl_space_total >> 10;
369
370                 vc_data.reserved = 100 - ((dli->dl_nrlmult * 100 + 512) >> 10);
371                 spin_unlock(&dli->dl_lock);
372
373                 put_dl_info(dli);
374                 ret = -EFAULT;
375                 if (copy_to_user(data, &vc_data, sizeof(vc_data)))
376                         goto out_release;
377
378                 ret = 0;
379         out_release:
380                 path_release(&nd);
381         }
382         return ret;
383 }
384
385
386 void vx_vsi_statfs(struct super_block *sb, struct kstatfs *buf)
387 {
388         struct dl_info *dli;
389         __u64 blimit, bfree, bavail;
390         __u32 ifree;
391
392         dli = locate_dl_info(sb, current->xid);
393         if (!dli)
394                 return;
395
396         spin_lock(&dli->dl_lock);
397         if (dli->dl_inodes_total == (uint32_t)CDLIM_INFINITY)
398                 goto no_ilim;
399
400         /* reduce max inodes available to limit */
401         if (buf->f_files > dli->dl_inodes_total)
402                 buf->f_files = dli->dl_inodes_total;
403
404         ifree = dli->dl_inodes_total - dli->dl_inodes_used;
405         /* reduce free inodes to min */
406         if (ifree < buf->f_ffree)
407                 buf->f_ffree = ifree;
408
409 no_ilim:
410         if (dli->dl_space_total == (uint64_t)CDLIM_INFINITY)
411                 goto no_blim;
412
413         blimit = dli->dl_space_total >> sb->s_blocksize_bits;
414
415         if (dli->dl_space_total < dli->dl_space_used)
416                 bfree = 0;
417         else
418                 bfree = (dli->dl_space_total - dli->dl_space_used)
419                         >> sb->s_blocksize_bits;
420
421         bavail = ((dli->dl_space_total >> 10) * dli->dl_nrlmult);
422         if (bavail < dli->dl_space_used)
423                 bavail = 0;
424         else
425                 bavail = (bavail - dli->dl_space_used)
426                         >> sb->s_blocksize_bits;
427
428         /* reduce max space available to limit */
429         if (buf->f_blocks > blimit)
430                 buf->f_blocks = blimit;
431
432         /* reduce free space to min */
433         if (bfree < buf->f_bfree)
434                 buf->f_bfree = bfree;
435
436         /* reduce avail space to min */
437         if (bavail < buf->f_bavail)
438                 buf->f_bavail = bavail;
439
440 no_blim:
441         spin_unlock(&dli->dl_lock);
442         put_dl_info(dli);
443
444         return;
445 }
446
447 #include <linux/module.h>
448
449 EXPORT_SYMBOL_GPL(locate_dl_info);
450 EXPORT_SYMBOL_GPL(rcu_free_dl_info);
451