2 * Quota code necessary even when VFS quota support is not compiled
3 * into the kernel. The interesting stuff is over in dquot.c, here
4 * we have symbols for initial quotactl(2) handling, the sysctl(2)
5 * variables, etc - things needed even when quota support disabled.
9 #include <linux/namei.h>
10 #include <linux/slab.h>
11 #include <asm/current.h>
12 #include <asm/uaccess.h>
13 #include <linux/kernel.h>
14 #include <linux/smp_lock.h>
15 #include <linux/security.h>
16 #include <linux/syscalls.h>
17 #include <linux/buffer_head.h>
18 #include <linux/capability.h>
19 #include <linux/quotaops.h>
20 #include <linux/major.h>
21 #include <linux/blkdev.h>
22 #include <linux/vserver/debug.h>
24 /* Check validity of generic quotactl commands */
25 static int generic_quotactl_valid(struct super_block *sb, int type, int cmd, qid_t id)
27 if (type >= MAXQUOTAS)
29 if (!sb && cmd != Q_SYNC)
31 /* Is operation supported? */
32 if (sb && !sb->s_qcop)
39 if (!sb->s_qcop->quota_on)
43 if (!sb->s_qcop->quota_off)
47 if (!sb->s_qcop->set_info)
51 if (!sb->s_qcop->get_info)
55 if (!sb->s_qcop->set_dqblk)
59 if (!sb->s_qcop->get_dqblk)
63 if (sb && !sb->s_qcop->quota_sync)
70 /* Is quota turned on for commands which need it? */
78 /* This is just informative test so we are satisfied without a lock */
79 if (!sb_has_quota_enabled(sb, type))
83 /* Check privileges */
84 if (cmd == Q_GETQUOTA) {
85 if (((type == USRQUOTA && current->euid != id) ||
86 (type == GRPQUOTA && !in_egroup_p(id))) &&
87 !vx_capable(CAP_SYS_ADMIN, VXC_QUOTA_CTL))
90 else if (cmd != Q_GETFMT && cmd != Q_SYNC && cmd != Q_GETINFO)
91 if (!vx_capable(CAP_SYS_ADMIN, VXC_QUOTA_CTL))
97 /* Check validity of XFS Quota Manager commands */
98 static int xqm_quotactl_valid(struct super_block *sb, int type, int cmd, qid_t id)
100 if (type >= XQM_MAXQUOTAS)
111 if (!sb->s_qcop->set_xstate)
115 if (!sb->s_qcop->get_xstate)
119 if (!sb->s_qcop->set_xquota)
123 if (!sb->s_qcop->get_xquota)
127 if (!sb->s_qcop->quota_sync)
134 /* Check privileges */
135 if (cmd == Q_XGETQUOTA) {
136 if (((type == XQM_USRQUOTA && current->euid != id) ||
137 (type == XQM_GRPQUOTA && !in_egroup_p(id))) &&
138 !vx_capable(CAP_SYS_ADMIN, VXC_QUOTA_CTL))
140 } else if (cmd != Q_XGETQSTAT && cmd != Q_XQUOTASYNC) {
141 if (!vx_capable(CAP_SYS_ADMIN, VXC_QUOTA_CTL))
148 static int check_quotactl_valid(struct super_block *sb, int type, int cmd, qid_t id)
152 if (XQM_COMMAND(cmd))
153 error = xqm_quotactl_valid(sb, type, cmd, id);
155 error = generic_quotactl_valid(sb, type, cmd, id);
157 error = security_quotactl(cmd, type, id, sb);
161 static void quota_sync_sb(struct super_block *sb, int type)
164 struct inode *discard[MAXQUOTAS];
166 sb->s_qcop->quota_sync(sb, type);
167 /* This is not very clever (and fast) but currently I don't know about
168 * any other simple way of getting quota data to disk and we must get
169 * them there for userspace to be visible... */
170 if (sb->s_op->sync_fs)
171 sb->s_op->sync_fs(sb, 1);
172 sync_blockdev(sb->s_bdev);
174 /* Now when everything is written we can discard the pagecache so
175 * that userspace sees the changes. We need i_mutex and so we could
176 * not do it inside dqonoff_mutex. Moreover we need to be carefull
177 * about races with quotaoff() (that is the reason why we have own
178 * reference to inode). */
179 mutex_lock(&sb_dqopt(sb)->dqonoff_mutex);
180 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
182 if (type != -1 && cnt != type)
184 if (!sb_has_quota_enabled(sb, cnt))
186 discard[cnt] = igrab(sb_dqopt(sb)->files[cnt]);
188 mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex);
189 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
191 mutex_lock(&discard[cnt]->i_mutex);
192 truncate_inode_pages(&discard[cnt]->i_data, 0);
193 mutex_unlock(&discard[cnt]->i_mutex);
199 void sync_dquots(struct super_block *sb, int type)
204 if (sb->s_qcop->quota_sync)
205 quota_sync_sb(sb, type);
211 list_for_each_entry(sb, &super_blocks, s_list) {
212 /* This test just improves performance so it needn't be reliable... */
213 for (cnt = 0, dirty = 0; cnt < MAXQUOTAS; cnt++)
214 if ((type == cnt || type == -1) && sb_has_quota_enabled(sb, cnt)
215 && info_any_dirty(&sb_dqopt(sb)->info[cnt]))
220 spin_unlock(&sb_lock);
221 down_read(&sb->s_umount);
222 if (sb->s_root && sb->s_qcop->quota_sync)
223 quota_sync_sb(sb, type);
224 up_read(&sb->s_umount);
226 if (__put_super_and_need_restart(sb))
229 spin_unlock(&sb_lock);
232 /* Copy parameters and call proper function */
233 static int do_quotactl(struct super_block *sb, int type, int cmd, qid_t id, void __user *addr)
241 if (IS_ERR(pathname = getname(addr)))
242 return PTR_ERR(pathname);
243 ret = sb->s_qcop->quota_on(sb, type, id, pathname);
248 return sb->s_qcop->quota_off(sb, type);
253 down_read(&sb_dqopt(sb)->dqptr_sem);
254 if (!sb_has_quota_enabled(sb, type)) {
255 up_read(&sb_dqopt(sb)->dqptr_sem);
258 fmt = sb_dqopt(sb)->info[type].dqi_format->qf_fmt_id;
259 up_read(&sb_dqopt(sb)->dqptr_sem);
260 if (copy_to_user(addr, &fmt, sizeof(fmt)))
265 struct if_dqinfo info;
267 if ((ret = sb->s_qcop->get_info(sb, type, &info)))
269 if (copy_to_user(addr, &info, sizeof(info)))
274 struct if_dqinfo info;
276 if (copy_from_user(&info, addr, sizeof(info)))
278 return sb->s_qcop->set_info(sb, type, &info);
283 if ((ret = sb->s_qcop->get_dqblk(sb, type, id, &idq)))
285 if (copy_to_user(addr, &idq, sizeof(idq)))
292 if (copy_from_user(&idq, addr, sizeof(idq)))
294 return sb->s_qcop->set_dqblk(sb, type, id, &idq);
297 sync_dquots(sb, type);
305 if (copy_from_user(&flags, addr, sizeof(flags)))
307 return sb->s_qcop->set_xstate(sb, flags, cmd);
310 struct fs_quota_stat fqs;
312 if ((ret = sb->s_qcop->get_xstate(sb, &fqs)))
314 if (copy_to_user(addr, &fqs, sizeof(fqs)))
319 struct fs_disk_quota fdq;
321 if (copy_from_user(&fdq, addr, sizeof(fdq)))
323 return sb->s_qcop->set_xquota(sb, type, id, &fdq);
326 struct fs_disk_quota fdq;
328 if ((ret = sb->s_qcop->get_xquota(sb, type, id, &fdq)))
330 if (copy_to_user(addr, &fdq, sizeof(fdq)))
335 return sb->s_qcop->quota_sync(sb, type);
336 /* We never reach here unless validity check is broken */
343 #if defined(CONFIG_BLK_DEV_VROOT) || defined(CONFIG_BLK_DEV_VROOT_MODULE)
345 #include <linux/vroot.h>
346 #include <linux/kallsyms.h>
348 static vroot_grb_func *vroot_get_real_bdev = NULL;
350 static spinlock_t vroot_grb_lock = SPIN_LOCK_UNLOCKED;
352 int register_vroot_grb(vroot_grb_func *func) {
355 spin_lock(&vroot_grb_lock);
356 if (!vroot_get_real_bdev) {
357 vroot_get_real_bdev = func;
360 spin_unlock(&vroot_grb_lock);
363 EXPORT_SYMBOL(register_vroot_grb);
365 int unregister_vroot_grb(vroot_grb_func *func) {
368 spin_lock(&vroot_grb_lock);
369 if (vroot_get_real_bdev) {
370 vroot_get_real_bdev = NULL;
373 spin_unlock(&vroot_grb_lock);
376 EXPORT_SYMBOL(unregister_vroot_grb);
381 * This is the system call interface. This communicates with
382 * the user-level programs. Currently this only supports diskquota
383 * calls. Maybe we need to add the process quotas etc. in the future,
384 * but we probably should use rlimits for that.
386 asmlinkage long sys_quotactl(unsigned int cmd, const char __user *special, qid_t id, void __user *addr)
389 struct super_block *sb = NULL;
390 struct block_device *bdev;
394 cmds = cmd >> SUBCMDSHIFT;
395 type = cmd & SUBCMDMASK;
397 if (cmds != Q_SYNC || special) {
398 tmp = getname(special);
401 bdev = lookup_bdev(tmp);
404 return PTR_ERR(bdev);
405 #if defined(CONFIG_BLK_DEV_VROOT) || defined(CONFIG_BLK_DEV_VROOT_MODULE)
406 if (bdev && bdev->bd_inode &&
407 imajor(bdev->bd_inode) == VROOT_MAJOR) {
408 struct block_device *bdnew = (void *)-EINVAL;
410 if (vroot_get_real_bdev)
411 bdnew = vroot_get_real_bdev(bdev);
413 vxdprintk(VXD_CBIT(misc, 0),
414 "vroot_get_real_bdev not set");
418 return PTR_ERR(bdnew);
422 sb = get_super(bdev);
428 ret = check_quotactl_valid(sb, type, cmds, id);
430 ret = do_quotactl(sb, type, cmds, id, addr);