2 * Quota code necessary even when VFS quota support is not compiled
3 * into the kernel. The interesting stuff is over in dquot.c, here
4 * we have symbols for initial quotactl(2) handling, the sysctl(2)
5 * variables, etc - things needed even when quota support disabled.
9 #include <linux/namei.h>
10 #include <linux/slab.h>
11 #include <asm/current.h>
12 #include <asm/uaccess.h>
13 #include <linux/kernel.h>
14 #include <linux/smp_lock.h>
15 #include <linux/security.h>
16 #include <linux/syscalls.h>
17 #include <linux/buffer_head.h>
18 #include <linux/capability.h>
19 #include <linux/quotaops.h>
20 #include <linux/major.h>
21 #include <linux/blkdev.h>
22 #include <linux/vs_base.h>
23 #include <linux/vserver/debug.h>
25 /* Check validity of generic quotactl commands */
26 static int generic_quotactl_valid(struct super_block *sb, int type, int cmd, qid_t id)
28 if (type >= MAXQUOTAS)
30 if (!sb && cmd != Q_SYNC)
32 /* Is operation supported? */
33 if (sb && !sb->s_qcop)
40 if (!sb->s_qcop->quota_on)
44 if (!sb->s_qcop->quota_off)
48 if (!sb->s_qcop->set_info)
52 if (!sb->s_qcop->get_info)
56 if (!sb->s_qcop->set_dqblk)
60 if (!sb->s_qcop->get_dqblk)
64 if (sb && !sb->s_qcop->quota_sync)
71 /* Is quota turned on for commands which need it? */
79 /* This is just informative test so we are satisfied without a lock */
80 if (!sb_has_quota_enabled(sb, type))
84 /* Check privileges */
85 if (cmd == Q_GETQUOTA) {
86 if (((type == USRQUOTA && current->euid != id) ||
87 (type == GRPQUOTA && !in_egroup_p(id))) &&
88 !vx_capable(CAP_SYS_ADMIN, VXC_QUOTA_CTL))
91 else if (cmd != Q_GETFMT && cmd != Q_SYNC && cmd != Q_GETINFO)
92 if (!vx_capable(CAP_SYS_ADMIN, VXC_QUOTA_CTL))
98 /* Check validity of XFS Quota Manager commands */
99 static int xqm_quotactl_valid(struct super_block *sb, int type, int cmd, qid_t id)
101 if (type >= XQM_MAXQUOTAS)
112 if (!sb->s_qcop->set_xstate)
116 if (!sb->s_qcop->get_xstate)
120 if (!sb->s_qcop->set_xquota)
124 if (!sb->s_qcop->get_xquota)
128 if (!sb->s_qcop->quota_sync)
135 /* Check privileges */
136 if (cmd == Q_XGETQUOTA) {
137 if (((type == XQM_USRQUOTA && current->euid != id) ||
138 (type == XQM_GRPQUOTA && !in_egroup_p(id))) &&
139 !vx_capable(CAP_SYS_ADMIN, VXC_QUOTA_CTL))
141 } else if (cmd != Q_XGETQSTAT && cmd != Q_XQUOTASYNC) {
142 if (!vx_capable(CAP_SYS_ADMIN, VXC_QUOTA_CTL))
149 static int check_quotactl_valid(struct super_block *sb, int type, int cmd, qid_t id)
153 if (XQM_COMMAND(cmd))
154 error = xqm_quotactl_valid(sb, type, cmd, id);
156 error = generic_quotactl_valid(sb, type, cmd, id);
158 error = security_quotactl(cmd, type, id, sb);
162 static void quota_sync_sb(struct super_block *sb, int type)
165 struct inode *discard[MAXQUOTAS];
167 sb->s_qcop->quota_sync(sb, type);
168 /* This is not very clever (and fast) but currently I don't know about
169 * any other simple way of getting quota data to disk and we must get
170 * them there for userspace to be visible... */
171 if (sb->s_op->sync_fs)
172 sb->s_op->sync_fs(sb, 1);
173 sync_blockdev(sb->s_bdev);
175 /* Now when everything is written we can discard the pagecache so
176 * that userspace sees the changes. We need i_mutex and so we could
177 * not do it inside dqonoff_mutex. Moreover we need to be carefull
178 * about races with quotaoff() (that is the reason why we have own
179 * reference to inode). */
180 mutex_lock(&sb_dqopt(sb)->dqonoff_mutex);
181 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
183 if (type != -1 && cnt != type)
185 if (!sb_has_quota_enabled(sb, cnt))
187 discard[cnt] = igrab(sb_dqopt(sb)->files[cnt]);
189 mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex);
190 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
192 mutex_lock(&discard[cnt]->i_mutex);
193 truncate_inode_pages(&discard[cnt]->i_data, 0);
194 mutex_unlock(&discard[cnt]->i_mutex);
200 void sync_dquots(struct super_block *sb, int type)
205 if (sb->s_qcop->quota_sync)
206 quota_sync_sb(sb, type);
212 list_for_each_entry(sb, &super_blocks, s_list) {
213 /* This test just improves performance so it needn't be reliable... */
214 for (cnt = 0, dirty = 0; cnt < MAXQUOTAS; cnt++)
215 if ((type == cnt || type == -1) && sb_has_quota_enabled(sb, cnt)
216 && info_any_dirty(&sb_dqopt(sb)->info[cnt]))
221 spin_unlock(&sb_lock);
222 down_read(&sb->s_umount);
223 if (sb->s_root && sb->s_qcop->quota_sync)
224 quota_sync_sb(sb, type);
225 up_read(&sb->s_umount);
227 if (__put_super_and_need_restart(sb))
230 spin_unlock(&sb_lock);
233 /* Copy parameters and call proper function */
234 static int do_quotactl(struct super_block *sb, int type, int cmd, qid_t id, void __user *addr)
242 if (IS_ERR(pathname = getname(addr)))
243 return PTR_ERR(pathname);
244 ret = sb->s_qcop->quota_on(sb, type, id, pathname);
249 return sb->s_qcop->quota_off(sb, type);
254 down_read(&sb_dqopt(sb)->dqptr_sem);
255 if (!sb_has_quota_enabled(sb, type)) {
256 up_read(&sb_dqopt(sb)->dqptr_sem);
259 fmt = sb_dqopt(sb)->info[type].dqi_format->qf_fmt_id;
260 up_read(&sb_dqopt(sb)->dqptr_sem);
261 if (copy_to_user(addr, &fmt, sizeof(fmt)))
266 struct if_dqinfo info;
268 if ((ret = sb->s_qcop->get_info(sb, type, &info)))
270 if (copy_to_user(addr, &info, sizeof(info)))
275 struct if_dqinfo info;
277 if (copy_from_user(&info, addr, sizeof(info)))
279 return sb->s_qcop->set_info(sb, type, &info);
284 if ((ret = sb->s_qcop->get_dqblk(sb, type, id, &idq)))
286 if (copy_to_user(addr, &idq, sizeof(idq)))
293 if (copy_from_user(&idq, addr, sizeof(idq)))
295 return sb->s_qcop->set_dqblk(sb, type, id, &idq);
298 sync_dquots(sb, type);
306 if (copy_from_user(&flags, addr, sizeof(flags)))
308 return sb->s_qcop->set_xstate(sb, flags, cmd);
311 struct fs_quota_stat fqs;
313 if ((ret = sb->s_qcop->get_xstate(sb, &fqs)))
315 if (copy_to_user(addr, &fqs, sizeof(fqs)))
320 struct fs_disk_quota fdq;
322 if (copy_from_user(&fdq, addr, sizeof(fdq)))
324 return sb->s_qcop->set_xquota(sb, type, id, &fdq);
327 struct fs_disk_quota fdq;
329 if ((ret = sb->s_qcop->get_xquota(sb, type, id, &fdq)))
331 if (copy_to_user(addr, &fdq, sizeof(fdq)))
336 return sb->s_qcop->quota_sync(sb, type);
337 /* We never reach here unless validity check is broken */
344 #if defined(CONFIG_BLK_DEV_VROOT) || defined(CONFIG_BLK_DEV_VROOT_MODULE)
346 #include <linux/vroot.h>
347 #include <linux/kallsyms.h>
349 static vroot_grb_func *vroot_get_real_bdev = NULL;
351 static spinlock_t vroot_grb_lock = SPIN_LOCK_UNLOCKED;
353 int register_vroot_grb(vroot_grb_func *func) {
356 spin_lock(&vroot_grb_lock);
357 if (!vroot_get_real_bdev) {
358 vroot_get_real_bdev = func;
361 spin_unlock(&vroot_grb_lock);
364 EXPORT_SYMBOL(register_vroot_grb);
366 int unregister_vroot_grb(vroot_grb_func *func) {
369 spin_lock(&vroot_grb_lock);
370 if (vroot_get_real_bdev) {
371 vroot_get_real_bdev = NULL;
374 spin_unlock(&vroot_grb_lock);
377 EXPORT_SYMBOL(unregister_vroot_grb);
382 * This is the system call interface. This communicates with
383 * the user-level programs. Currently this only supports diskquota
384 * calls. Maybe we need to add the process quotas etc. in the future,
385 * but we probably should use rlimits for that.
387 asmlinkage long sys_quotactl(unsigned int cmd, const char __user *special, qid_t id, void __user *addr)
390 struct super_block *sb = NULL;
391 struct block_device *bdev;
395 cmds = cmd >> SUBCMDSHIFT;
396 type = cmd & SUBCMDMASK;
398 if (cmds != Q_SYNC || special) {
399 tmp = getname(special);
402 bdev = lookup_bdev(tmp);
405 return PTR_ERR(bdev);
406 #if defined(CONFIG_BLK_DEV_VROOT) || defined(CONFIG_BLK_DEV_VROOT_MODULE)
407 if (bdev && bdev->bd_inode &&
408 imajor(bdev->bd_inode) == VROOT_MAJOR) {
409 struct block_device *bdnew = (void *)-EINVAL;
411 if (vroot_get_real_bdev)
412 bdnew = vroot_get_real_bdev(bdev);
414 vxdprintk(VXD_CBIT(misc, 0),
415 "vroot_get_real_bdev not set");
419 return PTR_ERR(bdnew);
423 sb = get_super(bdev);
429 ret = check_quotactl_valid(sb, type, cmds, id);
431 ret = do_quotactl(sb, type, cmds, id, addr);