df4280050e6989ccbc6baf5663d90a40b09e24a4
[linux-2.6.git] / fs / dquot.c
1 /*
2  * Implementation of the diskquota system for the LINUX operating system. QUOTA
3  * is implemented using the BSD system call interface as the means of
4  * communication with the user level. This file contains the generic routines
5  * called by the different filesystems on allocation of an inode or block.
6  * These routines take care of the administration needed to have a consistent
7  * diskquota tracking system. The ideas of both user and group quotas are based
8  * on the Melbourne quota system as used on BSD derived systems. The internal
9  * implementation is based on one of the several variants of the LINUX
10  * inode-subsystem with added complexity of the diskquota system.
11  * 
12  * Version: $Id: dquot.c,v 6.3 1996/11/17 18:35:34 mvw Exp mvw $
13  * 
14  * Author:      Marco van Wieringen <mvw@planets.elm.net>
15  *
16  * Fixes:   Dmitry Gorodchanin <pgmdsg@ibi.com>, 11 Feb 96
17  *
18  *              Revised list management to avoid races
19  *              -- Bill Hawes, <whawes@star.net>, 9/98
20  *
21  *              Fixed races in dquot_transfer(), dqget() and dquot_alloc_...().
22  *              As the consequence the locking was moved from dquot_decr_...(),
23  *              dquot_incr_...() to calling functions.
24  *              invalidate_dquots() now writes modified dquots.
25  *              Serialized quota_off() and quota_on() for mount point.
26  *              Fixed a few bugs in grow_dquots().
27  *              Fixed deadlock in write_dquot() - we no longer account quotas on
28  *              quota files
29  *              remove_dquot_ref() moved to inode.c - it now traverses through inodes
30  *              add_dquot_ref() restarts after blocking
31  *              Added check for bogus uid and fixed check for group in quotactl.
32  *              Jan Kara, <jack@suse.cz>, sponsored by SuSE CR, 10-11/99
33  *
34  *              Used struct list_head instead of own list struct
35  *              Invalidation of referenced dquots is no longer possible
36  *              Improved free_dquots list management
37  *              Quota and i_blocks are now updated in one place to avoid races
38  *              Warnings are now delayed so we won't block in critical section
39  *              Write updated not to require dquot lock
40  *              Jan Kara, <jack@suse.cz>, 9/2000
41  *
42  *              Added dynamic quota structure allocation
43  *              Jan Kara <jack@suse.cz> 12/2000
44  *
45  *              Rewritten quota interface. Implemented new quota format and
46  *              formats registering.
47  *              Jan Kara, <jack@suse.cz>, 2001,2002
48  *
49  *              New SMP locking.
50  *              Jan Kara, <jack@suse.cz>, 10/2002
51  *
52  *              Added journalled quota support
53  *              Jan Kara, <jack@suse.cz>, 2003,2004
54  *
55  * (C) Copyright 1994 - 1997 Marco van Wieringen 
56  */
57
58 #include <linux/errno.h>
59 #include <linux/kernel.h>
60 #include <linux/fs.h>
61 #include <linux/mount.h>
62 #include <linux/mm.h>
63 #include <linux/time.h>
64 #include <linux/types.h>
65 #include <linux/string.h>
66 #include <linux/fcntl.h>
67 #include <linux/stat.h>
68 #include <linux/tty.h>
69 #include <linux/file.h>
70 #include <linux/slab.h>
71 #include <linux/sysctl.h>
72 #include <linux/smp_lock.h>
73 #include <linux/init.h>
74 #include <linux/module.h>
75 #include <linux/proc_fs.h>
76 #include <linux/security.h>
77 #include <linux/kmod.h>
78 #include <linux/pagemap.h>
79
80 #include <asm/uaccess.h>
81
82 #define __DQUOT_PARANOIA
83
84 /*
85  * There are two quota SMP locks. dq_list_lock protects all lists with quotas
86  * and quota formats and also dqstats structure containing statistics about the
87  * lists. dq_data_lock protects data from dq_dqb and also mem_dqinfo structures
88  * and also guards consistency of dquot->dq_dqb with inode->i_blocks, i_bytes.
89  * i_blocks and i_bytes updates itself are guarded by i_lock acquired directly
90  * in inode_add_bytes() and inode_sub_bytes().
91  *
92  * The spinlock ordering is hence: dq_data_lock > dq_list_lock > i_lock
93  *
94  * Note that some things (eg. sb pointer, type, id) doesn't change during
95  * the life of the dquot structure and so needn't to be protected by a lock
96  *
97  * Any operation working on dquots via inode pointers must hold dqptr_sem.  If
98  * operation is just reading pointers from inode (or not using them at all) the
99  * read lock is enough. If pointers are altered function must hold write lock.
100  * If operation is holding reference to dquot in other way (e.g. quotactl ops)
101  * it must be guarded by dqonoff_sem.
102  * This locking assures that:
103  *   a) update/access to dquot pointers in inode is serialized
104  *   b) everyone is guarded against invalidate_dquots()
105  *
106  * Each dquot has its dq_lock semaphore. Locked dquots might not be referenced
107  * from inodes (dquot_alloc_space() and such don't check the dq_lock).
108  * Currently dquot is locked only when it is being read to memory (or space for
109  * it is being allocated) on the first dqget() and when it is being released on
110  * the last dqput(). The allocation and release oparations are serialized by
111  * the dq_lock and by checking the use count in dquot_release().  Write
112  * operations on dquots don't hold dq_lock as they copy data under dq_data_lock
113  * spinlock to internal buffers before writing.
114  *
115  * Lock ordering (including journal_lock) is following:
116  *  dqonoff_sem > journal_lock > dqptr_sem > dquot->dq_lock > dqio_sem
117  */
118
119 spinlock_t dq_list_lock = SPIN_LOCK_UNLOCKED;
120 spinlock_t dq_data_lock = SPIN_LOCK_UNLOCKED;
121
122 static char *quotatypes[] = INITQFNAMES;
123 static struct quota_format_type *quota_formats; /* List of registered formats */
124 static struct quota_module_name module_names[] = INIT_QUOTA_MODULE_NAMES;
125
126 int register_quota_format(struct quota_format_type *fmt)
127 {
128         spin_lock(&dq_list_lock);
129         fmt->qf_next = quota_formats;
130         quota_formats = fmt;
131         spin_unlock(&dq_list_lock);
132         return 0;
133 }
134
135 void unregister_quota_format(struct quota_format_type *fmt)
136 {
137         struct quota_format_type **actqf;
138
139         spin_lock(&dq_list_lock);
140         for (actqf = &quota_formats; *actqf && *actqf != fmt; actqf = &(*actqf)->qf_next);
141         if (*actqf)
142                 *actqf = (*actqf)->qf_next;
143         spin_unlock(&dq_list_lock);
144 }
145
146 static struct quota_format_type *find_quota_format(int id)
147 {
148         struct quota_format_type *actqf;
149
150         spin_lock(&dq_list_lock);
151         for (actqf = quota_formats; actqf && actqf->qf_fmt_id != id; actqf = actqf->qf_next);
152         if (!actqf || !try_module_get(actqf->qf_owner)) {
153                 int qm;
154
155                 spin_unlock(&dq_list_lock);
156                 
157                 for (qm = 0; module_names[qm].qm_fmt_id && module_names[qm].qm_fmt_id != id; qm++);
158                 if (!module_names[qm].qm_fmt_id || request_module(module_names[qm].qm_mod_name))
159                         return NULL;
160
161                 spin_lock(&dq_list_lock);
162                 for (actqf = quota_formats; actqf && actqf->qf_fmt_id != id; actqf = actqf->qf_next);
163                 if (actqf && !try_module_get(actqf->qf_owner))
164                         actqf = NULL;
165         }
166         spin_unlock(&dq_list_lock);
167         return actqf;
168 }
169
170 static void put_quota_format(struct quota_format_type *fmt)
171 {
172         module_put(fmt->qf_owner);
173 }
174
175 /*
176  * Dquot List Management:
177  * The quota code uses three lists for dquot management: the inuse_list,
178  * free_dquots, and dquot_hash[] array. A single dquot structure may be
179  * on all three lists, depending on its current state.
180  *
181  * All dquots are placed to the end of inuse_list when first created, and this
182  * list is used for the sync and invalidate operations, which must look
183  * at every dquot.
184  *
185  * Unused dquots (dq_count == 0) are added to the free_dquots list when freed,
186  * and this list is searched whenever we need an available dquot.  Dquots are
187  * removed from the list as soon as they are used again, and
188  * dqstats.free_dquots gives the number of dquots on the list. When
189  * dquot is invalidated it's completely released from memory.
190  *
191  * Dquots with a specific identity (device, type and id) are placed on
192  * one of the dquot_hash[] hash chains. The provides an efficient search
193  * mechanism to locate a specific dquot.
194  */
195
196 static LIST_HEAD(inuse_list);
197 static LIST_HEAD(free_dquots);
198 unsigned int dq_hash_bits, dq_hash_mask;
199 static struct hlist_head *dquot_hash;
200
201 struct dqstats dqstats;
202
203 static void dqput(struct dquot *dquot);
204
205 static inline int const hashfn(struct super_block *sb, unsigned int id, int type)
206 {
207         unsigned long tmp = (((unsigned long)sb>>L1_CACHE_SHIFT) ^ id) * (MAXQUOTAS - type);
208         return (tmp + (tmp >> dq_hash_bits)) & dq_hash_mask;
209 }
210
211 /*
212  * Following list functions expect dq_list_lock to be held
213  */
214 static inline void insert_dquot_hash(struct dquot *dquot)
215 {
216         struct hlist_head *head = dquot_hash + hashfn(dquot->dq_sb, dquot->dq_id, dquot->dq_type);
217         hlist_add_head(&dquot->dq_hash, head);
218 }
219
220 static inline void remove_dquot_hash(struct dquot *dquot)
221 {
222         hlist_del_init(&dquot->dq_hash);
223 }
224
225 static inline struct dquot *find_dquot(unsigned int hashent, struct super_block *sb, unsigned int id, int type)
226 {
227         struct hlist_node *node;
228         struct dquot *dquot;
229
230         hlist_for_each (node, dquot_hash+hashent) {
231                 dquot = hlist_entry(node, struct dquot, dq_hash);
232                 if (dquot->dq_sb == sb && dquot->dq_id == id && dquot->dq_type == type)
233                         return dquot;
234         }
235         return NODQUOT;
236 }
237
238 /* Add a dquot to the tail of the free list */
239 static inline void put_dquot_last(struct dquot *dquot)
240 {
241         list_add(&dquot->dq_free, free_dquots.prev);
242         dqstats.free_dquots++;
243 }
244
245 static inline void remove_free_dquot(struct dquot *dquot)
246 {
247         if (list_empty(&dquot->dq_free))
248                 return;
249         list_del_init(&dquot->dq_free);
250         dqstats.free_dquots--;
251 }
252
253 static inline void put_inuse(struct dquot *dquot)
254 {
255         /* We add to the back of inuse list so we don't have to restart
256          * when traversing this list and we block */
257         list_add(&dquot->dq_inuse, inuse_list.prev);
258         dqstats.allocated_dquots++;
259 }
260
261 static inline void remove_inuse(struct dquot *dquot)
262 {
263         dqstats.allocated_dquots--;
264         list_del(&dquot->dq_inuse);
265 }
266 /*
267  * End of list functions needing dq_list_lock
268  */
269
270 static void wait_on_dquot(struct dquot *dquot)
271 {
272         down(&dquot->dq_lock);
273         up(&dquot->dq_lock);
274 }
275
276 #define mark_dquot_dirty(dquot) ((dquot)->dq_sb->dq_op->mark_dirty(dquot))
277
278 int dquot_mark_dquot_dirty(struct dquot *dquot)
279 {
280         spin_lock(&dq_list_lock);
281         if (!test_and_set_bit(DQ_MOD_B, &dquot->dq_flags))
282                 list_add(&dquot->dq_dirty, &sb_dqopt(dquot->dq_sb)->
283                                 info[dquot->dq_type].dqi_dirty_list);
284         spin_unlock(&dq_list_lock);
285         return 0;
286 }
287
288 /* This function needs dq_list_lock */
289 static inline int clear_dquot_dirty(struct dquot *dquot)
290 {
291         if (!test_and_clear_bit(DQ_MOD_B, &dquot->dq_flags))
292                 return 0;
293         list_del_init(&dquot->dq_dirty);
294         return 1;
295 }
296
297 void mark_info_dirty(struct super_block *sb, int type)
298 {
299         set_bit(DQF_INFO_DIRTY_B, &sb_dqopt(sb)->info[type].dqi_flags);
300 }
301 EXPORT_SYMBOL(mark_info_dirty);
302
303 /*
304  *      Read dquot from disk and alloc space for it
305  */
306
307 int dquot_acquire(struct dquot *dquot)
308 {
309         int ret = 0;
310         struct quota_info *dqopt = sb_dqopt(dquot->dq_sb);
311
312         down(&dquot->dq_lock);
313         down(&dqopt->dqio_sem);
314         if (!test_bit(DQ_READ_B, &dquot->dq_flags))
315                 ret = dqopt->ops[dquot->dq_type]->read_dqblk(dquot);
316         if (ret < 0)
317                 goto out_iolock;
318         set_bit(DQ_READ_B, &dquot->dq_flags);
319         /* Instantiate dquot if needed */
320         if (!test_bit(DQ_ACTIVE_B, &dquot->dq_flags) && !dquot->dq_off) {
321                 ret = dqopt->ops[dquot->dq_type]->commit_dqblk(dquot);
322                 if (ret < 0)
323                         goto out_iolock;
324         }
325         set_bit(DQ_ACTIVE_B, &dquot->dq_flags);
326 out_iolock:
327         up(&dqopt->dqio_sem);
328         up(&dquot->dq_lock);
329         return ret;
330 }
331
332 /*
333  *      Write dquot to disk
334  */
335 int dquot_commit(struct dquot *dquot)
336 {
337         int ret = 0;
338         struct quota_info *dqopt = sb_dqopt(dquot->dq_sb);
339
340         down(&dqopt->dqio_sem);
341         spin_lock(&dq_list_lock);
342         if (!clear_dquot_dirty(dquot)) {
343                 spin_unlock(&dq_list_lock);
344                 goto out_sem;
345         }
346         spin_unlock(&dq_list_lock);
347         /* Inactive dquot can be only if there was error during read/init
348          * => we have better not writing it */
349         if (test_bit(DQ_ACTIVE_B, &dquot->dq_flags))
350                 ret = dqopt->ops[dquot->dq_type]->commit_dqblk(dquot);
351 out_sem:
352         up(&dqopt->dqio_sem);
353         if (info_dirty(&dqopt->info[dquot->dq_type]))
354                 dquot->dq_sb->dq_op->write_info(dquot->dq_sb, dquot->dq_type);
355         return ret;
356 }
357
358 /*
359  *      Release dquot
360  */
361 int dquot_release(struct dquot *dquot)
362 {
363         int ret = 0;
364         struct quota_info *dqopt = sb_dqopt(dquot->dq_sb);
365
366         down(&dquot->dq_lock);
367         /* Check whether we are not racing with some other dqget() */
368         if (atomic_read(&dquot->dq_count) > 1)
369                 goto out_dqlock;
370         down(&dqopt->dqio_sem);
371         if (dqopt->ops[dquot->dq_type]->release_dqblk)
372                 ret = dqopt->ops[dquot->dq_type]->release_dqblk(dquot);
373         clear_bit(DQ_ACTIVE_B, &dquot->dq_flags);
374         up(&dqopt->dqio_sem);
375 out_dqlock:
376         up(&dquot->dq_lock);
377         return ret;
378 }
379
380 /* Invalidate all dquots on the list. Note that this function is called after
381  * quota is disabled and pointers from inodes removed so there cannot be new
382  * quota users. Also because we hold dqonoff_sem there can be no quota users
383  * for this sb+type at all. */
384 static void invalidate_dquots(struct super_block *sb, int type)
385 {
386         struct dquot *dquot;
387         struct list_head *head;
388
389         spin_lock(&dq_list_lock);
390         for (head = inuse_list.next; head != &inuse_list;) {
391                 dquot = list_entry(head, struct dquot, dq_inuse);
392                 head = head->next;
393                 if (dquot->dq_sb != sb)
394                         continue;
395                 if (dquot->dq_type != type)
396                         continue;
397 #ifdef __DQUOT_PARANOIA
398                 if (atomic_read(&dquot->dq_count))
399                         BUG();
400 #endif
401                 /* Quota now has no users and it has been written on last dqput() */
402                 remove_dquot_hash(dquot);
403                 remove_free_dquot(dquot);
404                 remove_inuse(dquot);
405                 kmem_cache_free(dquot_cachep, dquot);
406         }
407         spin_unlock(&dq_list_lock);
408 }
409
410 int vfs_quota_sync(struct super_block *sb, int type)
411 {
412         struct list_head *dirty;
413         struct dquot *dquot;
414         struct quota_info *dqopt = sb_dqopt(sb);
415         int cnt;
416
417         down(&dqopt->dqonoff_sem);
418         for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
419                 if (type != -1 && cnt != type)
420                         continue;
421                 if (!sb_has_quota_enabled(sb, cnt))
422                         continue;
423                 spin_lock(&dq_list_lock);
424                 dirty = &dqopt->info[cnt].dqi_dirty_list;
425                 while (!list_empty(dirty)) {
426                         dquot = list_entry(dirty->next, struct dquot, dq_dirty);
427                         /* Dirty and inactive can be only bad dquot... */
428                         if (!test_bit(DQ_ACTIVE_B, &dquot->dq_flags)) {
429                                 clear_dquot_dirty(dquot);
430                                 continue;
431                         }
432                         /* Now we have active dquot from which someone is
433                          * holding reference so we can safely just increase
434                          * use count */
435                         atomic_inc(&dquot->dq_count);
436                         dqstats.lookups++;
437                         spin_unlock(&dq_list_lock);
438                         sb->dq_op->write_dquot(dquot);
439                         dqput(dquot);
440                         spin_lock(&dq_list_lock);
441                 }
442                 spin_unlock(&dq_list_lock);
443         }
444
445         for (cnt = 0; cnt < MAXQUOTAS; cnt++)
446                 if ((cnt == type || type == -1) && sb_has_quota_enabled(sb, cnt)
447                         && info_dirty(&dqopt->info[cnt]))
448                         sb->dq_op->write_info(sb, cnt);
449         spin_lock(&dq_list_lock);
450         dqstats.syncs++;
451         spin_unlock(&dq_list_lock);
452         up(&dqopt->dqonoff_sem);
453
454         return 0;
455 }
456
457 /* Free unused dquots from cache */
458 static void prune_dqcache(int count)
459 {
460         struct list_head *head;
461         struct dquot *dquot;
462
463         head = free_dquots.prev;
464         while (head != &free_dquots && count) {
465                 dquot = list_entry(head, struct dquot, dq_free);
466                 remove_dquot_hash(dquot);
467                 remove_free_dquot(dquot);
468                 remove_inuse(dquot);
469                 kmem_cache_free(dquot_cachep, dquot);
470                 count--;
471                 head = free_dquots.prev;
472         }
473 }
474
475 /*
476  * This is called from kswapd when we think we need some
477  * more memory
478  */
479
480 static int shrink_dqcache_memory(int nr, unsigned int gfp_mask)
481 {
482         int ret;
483
484         spin_lock(&dq_list_lock);
485         if (nr)
486                 prune_dqcache(nr);
487         ret = dqstats.allocated_dquots;
488         spin_unlock(&dq_list_lock);
489         return ret;
490 }
491
492 /*
493  * Put reference to dquot
494  * NOTE: If you change this function please check whether dqput_blocks() works right...
495  * MUST be called with either dqptr_sem or dqonoff_sem held
496  */
497 static void dqput(struct dquot *dquot)
498 {
499         if (!dquot)
500                 return;
501 #ifdef __DQUOT_PARANOIA
502         if (!atomic_read(&dquot->dq_count)) {
503                 printk("VFS: dqput: trying to free free dquot\n");
504                 printk("VFS: device %s, dquot of %s %d\n",
505                         dquot->dq_sb->s_id,
506                         quotatypes[dquot->dq_type],
507                         dquot->dq_id);
508                 BUG();
509         }
510 #endif
511         
512         spin_lock(&dq_list_lock);
513         dqstats.drops++;
514         spin_unlock(&dq_list_lock);
515 we_slept:
516         spin_lock(&dq_list_lock);
517         if (atomic_read(&dquot->dq_count) > 1) {
518                 /* We have more than one user... nothing to do */
519                 atomic_dec(&dquot->dq_count);
520                 spin_unlock(&dq_list_lock);
521                 return;
522         }
523         /* Need to release dquot? */
524         if (test_bit(DQ_ACTIVE_B, &dquot->dq_flags) && dquot_dirty(dquot)) {
525                 spin_unlock(&dq_list_lock);
526                 /* Commit dquot before releasing */
527                 dquot->dq_sb->dq_op->write_dquot(dquot);
528                 goto we_slept;
529         }
530         /* Clear flag in case dquot was inactive (something bad happened) */
531         clear_dquot_dirty(dquot);
532         if (test_bit(DQ_ACTIVE_B, &dquot->dq_flags)) {
533                 spin_unlock(&dq_list_lock);
534                 dquot->dq_sb->dq_op->release_dquot(dquot);
535                 goto we_slept;
536         }
537         atomic_dec(&dquot->dq_count);
538 #ifdef __DQUOT_PARANOIA
539         /* sanity check */
540         if (!list_empty(&dquot->dq_free))
541                 BUG();
542 #endif
543         put_dquot_last(dquot);
544         spin_unlock(&dq_list_lock);
545 }
546
547 static struct dquot *get_empty_dquot(struct super_block *sb, int type)
548 {
549         struct dquot *dquot;
550
551         dquot = kmem_cache_alloc(dquot_cachep, SLAB_NOFS);
552         if(!dquot)
553                 return NODQUOT;
554
555         memset((caddr_t)dquot, 0, sizeof(struct dquot));
556         sema_init(&dquot->dq_lock, 1);
557         INIT_LIST_HEAD(&dquot->dq_free);
558         INIT_LIST_HEAD(&dquot->dq_inuse);
559         INIT_HLIST_NODE(&dquot->dq_hash);
560         INIT_LIST_HEAD(&dquot->dq_dirty);
561         dquot->dq_sb = sb;
562         dquot->dq_type = type;
563         atomic_set(&dquot->dq_count, 1);
564
565         return dquot;
566 }
567
568 /*
569  * Get reference to dquot
570  * MUST be called with either dqptr_sem or dqonoff_sem held
571  */
572 static struct dquot *dqget(struct super_block *sb, unsigned int id, int type)
573 {
574         unsigned int hashent = hashfn(sb, id, type);
575         struct dquot *dquot, *empty = NODQUOT;
576
577         if (!sb_has_quota_enabled(sb, type))
578                 return NODQUOT;
579 we_slept:
580         spin_lock(&dq_list_lock);
581         if ((dquot = find_dquot(hashent, sb, id, type)) == NODQUOT) {
582                 if (empty == NODQUOT) {
583                         spin_unlock(&dq_list_lock);
584                         if ((empty = get_empty_dquot(sb, type)) == NODQUOT)
585                                 schedule();     /* Try to wait for a moment... */
586                         goto we_slept;
587                 }
588                 dquot = empty;
589                 dquot->dq_id = id;
590                 /* all dquots go on the inuse_list */
591                 put_inuse(dquot);
592                 /* hash it first so it can be found */
593                 insert_dquot_hash(dquot);
594                 dqstats.lookups++;
595                 spin_unlock(&dq_list_lock);
596         } else {
597                 if (!atomic_read(&dquot->dq_count))
598                         remove_free_dquot(dquot);
599                 atomic_inc(&dquot->dq_count);
600                 dqstats.cache_hits++;
601                 dqstats.lookups++;
602                 spin_unlock(&dq_list_lock);
603                 if (empty)
604                         kmem_cache_free(dquot_cachep, empty);
605         }
606         /* Wait for dq_lock - after this we know that either dquot_release() is already
607          * finished or it will be canceled due to dq_count > 1 test */
608         wait_on_dquot(dquot);
609         /* Read the dquot and instantiate it (everything done only if needed) */
610         if (!test_bit(DQ_ACTIVE_B, &dquot->dq_flags) && sb->dq_op->acquire_dquot(dquot) < 0) {
611                 dqput(dquot);
612                 return NODQUOT;
613         }
614 #ifdef __DQUOT_PARANOIA
615         if (!dquot->dq_sb)      /* Has somebody invalidated entry under us? */
616                 BUG();
617 #endif
618
619         return dquot;
620 }
621
622 static int dqinit_needed(struct inode *inode, int type)
623 {
624         int cnt;
625
626         if (IS_NOQUOTA(inode))
627                 return 0;
628         if (type != -1)
629                 return inode->i_dquot[type] == NODQUOT;
630         for (cnt = 0; cnt < MAXQUOTAS; cnt++)
631                 if (inode->i_dquot[cnt] == NODQUOT)
632                         return 1;
633         return 0;
634 }
635
636 /* This routine is guarded by dqonoff_sem semaphore */
637 static void add_dquot_ref(struct super_block *sb, int type)
638 {
639         struct list_head *p;
640
641 restart:
642         file_list_lock();
643         list_for_each(p, &sb->s_files) {
644                 struct file *filp = list_entry(p, struct file, f_list);
645                 struct inode *inode = filp->f_dentry->d_inode;
646                 if (filp->f_mode & FMODE_WRITE && dqinit_needed(inode, type)) {
647                         struct dentry *dentry = dget(filp->f_dentry);
648                         file_list_unlock();
649                         sb->dq_op->initialize(inode, type);
650                         dput(dentry);
651                         /* As we may have blocked we had better restart... */
652                         goto restart;
653                 }
654         }
655         file_list_unlock();
656 }
657
658 /* Return 0 if dqput() won't block (note that 1 doesn't necessarily mean blocking) */
659 static inline int dqput_blocks(struct dquot *dquot)
660 {
661         if (atomic_read(&dquot->dq_count) <= 1)
662                 return 1;
663         return 0;
664 }
665
666 /* Remove references to dquots from inode - add dquot to list for freeing if needed */
667 /* We can't race with anybody because we hold dqptr_sem for writing... */
668 int remove_inode_dquot_ref(struct inode *inode, int type, struct list_head *tofree_head)
669 {
670         struct dquot *dquot = inode->i_dquot[type];
671         int cnt;
672
673         inode->i_dquot[type] = NODQUOT;
674         /* any other quota in use? */
675         for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
676                 if (inode->i_dquot[cnt] != NODQUOT)
677                         goto put_it;
678         }
679         inode->i_flags &= ~S_QUOTA;
680 put_it:
681         if (dquot != NODQUOT) {
682                 if (dqput_blocks(dquot)) {
683 #ifdef __DQUOT_PARANOIA
684                         if (atomic_read(&dquot->dq_count) != 1)
685                                 printk(KERN_WARNING "VFS: Adding dquot with dq_count %d to dispose list.\n", atomic_read(&dquot->dq_count));
686 #endif
687                         spin_lock(&dq_list_lock);
688                         list_add(&dquot->dq_free, tofree_head); /* As dquot must have currently users it can't be on the free list... */
689                         spin_unlock(&dq_list_lock);
690                         return 1;
691                 }
692                 else
693                         dqput(dquot);   /* We have guaranteed we won't block */
694         }
695         return 0;
696 }
697
698 /* Free list of dquots - called from inode.c */
699 /* dquots are removed from inodes, no new references can be got so we are the only ones holding reference */
700 static void put_dquot_list(struct list_head *tofree_head)
701 {
702         struct list_head *act_head;
703         struct dquot *dquot;
704
705         act_head = tofree_head->next;
706         /* So now we have dquots on the list... Just free them */
707         while (act_head != tofree_head) {
708                 dquot = list_entry(act_head, struct dquot, dq_free);
709                 act_head = act_head->next;
710                 list_del_init(&dquot->dq_free); /* Remove dquot from the list so we won't have problems... */
711                 dqput(dquot);
712         }
713 }
714
715 /* Function in inode.c - remove pointers to dquots in icache */
716 extern void remove_dquot_ref(struct super_block *, int, struct list_head *);
717
718 /* Gather all references from inodes and drop them */
719 static void drop_dquot_ref(struct super_block *sb, int type)
720 {
721         LIST_HEAD(tofree_head);
722
723         down_write(&sb_dqopt(sb)->dqptr_sem);
724         remove_dquot_ref(sb, type, &tofree_head);
725         up_write(&sb_dqopt(sb)->dqptr_sem);
726         put_dquot_list(&tofree_head);
727 }
728
729 static inline void dquot_incr_inodes(struct dquot *dquot, unsigned long number)
730 {
731         dquot->dq_dqb.dqb_curinodes += number;
732 }
733
734 static inline void dquot_incr_space(struct dquot *dquot, qsize_t number)
735 {
736         dquot->dq_dqb.dqb_curspace += number;
737 }
738
739 static inline void dquot_decr_inodes(struct dquot *dquot, unsigned long number)
740 {
741         if (dquot->dq_dqb.dqb_curinodes > number)
742                 dquot->dq_dqb.dqb_curinodes -= number;
743         else
744                 dquot->dq_dqb.dqb_curinodes = 0;
745         if (dquot->dq_dqb.dqb_curinodes < dquot->dq_dqb.dqb_isoftlimit)
746                 dquot->dq_dqb.dqb_itime = (time_t) 0;
747         clear_bit(DQ_INODES_B, &dquot->dq_flags);
748 }
749
750 static inline void dquot_decr_space(struct dquot *dquot, qsize_t number)
751 {
752         if (dquot->dq_dqb.dqb_curspace > number)
753                 dquot->dq_dqb.dqb_curspace -= number;
754         else
755                 dquot->dq_dqb.dqb_curspace = 0;
756         if (toqb(dquot->dq_dqb.dqb_curspace) < dquot->dq_dqb.dqb_bsoftlimit)
757                 dquot->dq_dqb.dqb_btime = (time_t) 0;
758         clear_bit(DQ_BLKS_B, &dquot->dq_flags);
759 }
760
761 static inline int need_print_warning(struct dquot *dquot)
762 {
763         switch (dquot->dq_type) {
764                 case USRQUOTA:
765                         return current->fsuid == dquot->dq_id;
766                 case GRPQUOTA:
767                         return in_group_p(dquot->dq_id);
768         }
769         return 0;
770 }
771
772 /* Values of warnings */
773 #define NOWARN 0
774 #define IHARDWARN 1
775 #define ISOFTLONGWARN 2
776 #define ISOFTWARN 3
777 #define BHARDWARN 4
778 #define BSOFTLONGWARN 5
779 #define BSOFTWARN 6
780
781 /* Print warning to user which exceeded quota */
782 static void print_warning(struct dquot *dquot, const char warntype)
783 {
784         char *msg = NULL;
785         int flag = (warntype == BHARDWARN || warntype == BSOFTLONGWARN) ? DQ_BLKS_B :
786           ((warntype == IHARDWARN || warntype == ISOFTLONGWARN) ? DQ_INODES_B : 0);
787
788         if (!need_print_warning(dquot) || (flag && test_and_set_bit(flag, &dquot->dq_flags)))
789                 return;
790         tty_write_message(current->signal->tty, dquot->dq_sb->s_id);
791         if (warntype == ISOFTWARN || warntype == BSOFTWARN)
792                 tty_write_message(current->signal->tty, ": warning, ");
793         else
794                 tty_write_message(current->signal->tty, ": write failed, ");
795         tty_write_message(current->signal->tty, quotatypes[dquot->dq_type]);
796         switch (warntype) {
797                 case IHARDWARN:
798                         msg = " file limit reached.\n";
799                         break;
800                 case ISOFTLONGWARN:
801                         msg = " file quota exceeded too long.\n";
802                         break;
803                 case ISOFTWARN:
804                         msg = " file quota exceeded.\n";
805                         break;
806                 case BHARDWARN:
807                         msg = " block limit reached.\n";
808                         break;
809                 case BSOFTLONGWARN:
810                         msg = " block quota exceeded too long.\n";
811                         break;
812                 case BSOFTWARN:
813                         msg = " block quota exceeded.\n";
814                         break;
815         }
816         tty_write_message(current->signal->tty, msg);
817 }
818
819 static inline void flush_warnings(struct dquot **dquots, char *warntype)
820 {
821         int i;
822
823         for (i = 0; i < MAXQUOTAS; i++)
824                 if (dquots[i] != NODQUOT && warntype[i] != NOWARN)
825                         print_warning(dquots[i], warntype[i]);
826 }
827
828 static inline char ignore_hardlimit(struct dquot *dquot)
829 {
830         struct mem_dqinfo *info = &sb_dqopt(dquot->dq_sb)->info[dquot->dq_type];
831
832         return capable(CAP_SYS_RESOURCE) &&
833             (info->dqi_format->qf_fmt_id != QFMT_VFS_OLD || !(info->dqi_flags & V1_DQF_RSQUASH));
834 }
835
836 /* needs dq_data_lock */
837 static int check_idq(struct dquot *dquot, ulong inodes, char *warntype)
838 {
839         *warntype = NOWARN;
840         if (inodes <= 0 || test_bit(DQ_FAKE_B, &dquot->dq_flags))
841                 return QUOTA_OK;
842
843         if (dquot->dq_dqb.dqb_ihardlimit &&
844            (dquot->dq_dqb.dqb_curinodes + inodes) > dquot->dq_dqb.dqb_ihardlimit &&
845             !ignore_hardlimit(dquot)) {
846                 *warntype = IHARDWARN;
847                 return NO_QUOTA;
848         }
849
850         if (dquot->dq_dqb.dqb_isoftlimit &&
851            (dquot->dq_dqb.dqb_curinodes + inodes) > dquot->dq_dqb.dqb_isoftlimit &&
852             dquot->dq_dqb.dqb_itime && get_seconds() >= dquot->dq_dqb.dqb_itime &&
853             !ignore_hardlimit(dquot)) {
854                 *warntype = ISOFTLONGWARN;
855                 return NO_QUOTA;
856         }
857
858         if (dquot->dq_dqb.dqb_isoftlimit &&
859            (dquot->dq_dqb.dqb_curinodes + inodes) > dquot->dq_dqb.dqb_isoftlimit &&
860             dquot->dq_dqb.dqb_itime == 0) {
861                 *warntype = ISOFTWARN;
862                 dquot->dq_dqb.dqb_itime = get_seconds() + sb_dqopt(dquot->dq_sb)->info[dquot->dq_type].dqi_igrace;
863         }
864
865         return QUOTA_OK;
866 }
867
868 /* needs dq_data_lock */
869 static int check_bdq(struct dquot *dquot, qsize_t space, int prealloc, char *warntype)
870 {
871         *warntype = 0;
872         if (space <= 0 || test_bit(DQ_FAKE_B, &dquot->dq_flags))
873                 return QUOTA_OK;
874
875         if (dquot->dq_dqb.dqb_bhardlimit &&
876            toqb(dquot->dq_dqb.dqb_curspace + space) > dquot->dq_dqb.dqb_bhardlimit &&
877             !ignore_hardlimit(dquot)) {
878                 if (!prealloc)
879                         *warntype = BHARDWARN;
880                 return NO_QUOTA;
881         }
882
883         if (dquot->dq_dqb.dqb_bsoftlimit &&
884            toqb(dquot->dq_dqb.dqb_curspace + space) > dquot->dq_dqb.dqb_bsoftlimit &&
885             dquot->dq_dqb.dqb_btime && get_seconds() >= dquot->dq_dqb.dqb_btime &&
886             !ignore_hardlimit(dquot)) {
887                 if (!prealloc)
888                         *warntype = BSOFTLONGWARN;
889                 return NO_QUOTA;
890         }
891
892         if (dquot->dq_dqb.dqb_bsoftlimit &&
893            toqb(dquot->dq_dqb.dqb_curspace + space) > dquot->dq_dqb.dqb_bsoftlimit &&
894             dquot->dq_dqb.dqb_btime == 0) {
895                 if (!prealloc) {
896                         *warntype = BSOFTWARN;
897                         dquot->dq_dqb.dqb_btime = get_seconds() + sb_dqopt(dquot->dq_sb)->info[dquot->dq_type].dqi_bgrace;
898                 }
899                 else
900                         /*
901                          * We don't allow preallocation to exceed softlimit so exceeding will
902                          * be always printed
903                          */
904                         return NO_QUOTA;
905         }
906
907         return QUOTA_OK;
908 }
909
910 /*
911  *      Initialize quota pointers in inode
912  *      Transaction must be started at entry
913  */
914 int dquot_initialize(struct inode *inode, int type)
915 {
916         unsigned int id = 0;
917         int cnt, ret = 0;
918
919         /* First test before acquiring semaphore - solves deadlocks when we
920          * re-enter the quota code and are already holding the semaphore */
921         if (IS_NOQUOTA(inode))
922                 return 0;
923         down_write(&sb_dqopt(inode->i_sb)->dqptr_sem);
924         /* Having dqptr_sem we know NOQUOTA flags can't be altered... */
925         if (IS_NOQUOTA(inode))
926                 goto out_err;
927         for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
928                 if (type != -1 && cnt != type)
929                         continue;
930                 if (inode->i_dquot[cnt] == NODQUOT) {
931                         switch (cnt) {
932                                 case USRQUOTA:
933                                         id = inode->i_uid;
934                                         break;
935                                 case GRPQUOTA:
936                                         id = inode->i_gid;
937                                         break;
938                         }
939                         inode->i_dquot[cnt] = dqget(inode->i_sb, id, cnt);
940                         if (inode->i_dquot[cnt])
941                                 inode->i_flags |= S_QUOTA;
942                 }
943         }
944 out_err:
945         up_write(&sb_dqopt(inode->i_sb)->dqptr_sem);
946         return ret;
947 }
948
949 /*
950  *      Release all quotas referenced by inode
951  *      Transaction must be started at an entry
952  */
953 int dquot_drop(struct inode *inode)
954 {
955         int cnt;
956
957         down_write(&sb_dqopt(inode->i_sb)->dqptr_sem);
958         inode->i_flags &= ~S_QUOTA;
959         for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
960                 if (inode->i_dquot[cnt] != NODQUOT) {
961                         dqput(inode->i_dquot[cnt]);
962                         inode->i_dquot[cnt] = NODQUOT;
963                 }
964         }
965         up_write(&sb_dqopt(inode->i_sb)->dqptr_sem);
966         return 0;
967 }
968
969 /*
970  * Following four functions update i_blocks+i_bytes fields and
971  * quota information (together with appropriate checks)
972  * NOTE: We absolutely rely on the fact that caller dirties
973  * the inode (usually macros in quotaops.h care about this) and
974  * holds a handle for the current transaction so that dquot write and
975  * inode write go into the same transaction.
976  */
977
978 /*
979  * This operation can block, but only after everything is updated
980  */
981 int dquot_alloc_space(struct inode *inode, qsize_t number, int warn)
982 {
983         int cnt, ret = NO_QUOTA;
984         char warntype[MAXQUOTAS];
985
986         /* First test before acquiring semaphore - solves deadlocks when we
987          * re-enter the quota code and are already holding the semaphore */
988         if (IS_NOQUOTA(inode)) {
989 out_add:
990                 inode_add_bytes(inode, number);
991                 return QUOTA_OK;
992         }
993         for (cnt = 0; cnt < MAXQUOTAS; cnt++)
994                 warntype[cnt] = NOWARN;
995
996         down_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
997         if (IS_NOQUOTA(inode)) {        /* Now we can do reliable test... */
998                 up_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
999                 goto out_add;
1000         }
1001         spin_lock(&dq_data_lock);
1002         for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1003                 if (inode->i_dquot[cnt] == NODQUOT)
1004                         continue;
1005                 if (check_bdq(inode->i_dquot[cnt], number, warn, warntype+cnt) == NO_QUOTA)
1006                         goto warn_put_all;
1007         }
1008         for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1009                 if (inode->i_dquot[cnt] == NODQUOT)
1010                         continue;
1011                 dquot_incr_space(inode->i_dquot[cnt], number);
1012         }
1013         inode_add_bytes(inode, number);
1014         ret = QUOTA_OK;
1015 warn_put_all:
1016         spin_unlock(&dq_data_lock);
1017         if (ret == QUOTA_OK)
1018                 /* Dirtify all the dquots - this can block when journalling */
1019                 for (cnt = 0; cnt < MAXQUOTAS; cnt++)
1020                         if (inode->i_dquot[cnt])
1021                                 mark_dquot_dirty(inode->i_dquot[cnt]);
1022         flush_warnings(inode->i_dquot, warntype);
1023         up_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
1024         return ret;
1025 }
1026
1027 /*
1028  * This operation can block, but only after everything is updated
1029  */
1030 int dquot_alloc_inode(const struct inode *inode, unsigned long number)
1031 {
1032         int cnt, ret = NO_QUOTA;
1033         char warntype[MAXQUOTAS];
1034
1035         /* First test before acquiring semaphore - solves deadlocks when we
1036          * re-enter the quota code and are already holding the semaphore */
1037         if (IS_NOQUOTA(inode))
1038                 return QUOTA_OK;
1039         for (cnt = 0; cnt < MAXQUOTAS; cnt++)
1040                 warntype[cnt] = NOWARN;
1041         down_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
1042         if (IS_NOQUOTA(inode)) {
1043                 up_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
1044                 return QUOTA_OK;
1045         }
1046         spin_lock(&dq_data_lock);
1047         for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1048                 if (inode->i_dquot[cnt] == NODQUOT)
1049                         continue;
1050                 if (check_idq(inode->i_dquot[cnt], number, warntype+cnt) == NO_QUOTA)
1051                         goto warn_put_all;
1052         }
1053
1054         for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1055                 if (inode->i_dquot[cnt] == NODQUOT)
1056                         continue;
1057                 dquot_incr_inodes(inode->i_dquot[cnt], number);
1058         }
1059         ret = QUOTA_OK;
1060 warn_put_all:
1061         spin_unlock(&dq_data_lock);
1062         if (ret == QUOTA_OK)
1063                 /* Dirtify all the dquots - this can block when journalling */
1064                 for (cnt = 0; cnt < MAXQUOTAS; cnt++)
1065                         if (inode->i_dquot[cnt])
1066                                 mark_dquot_dirty(inode->i_dquot[cnt]);
1067         flush_warnings((struct dquot **)inode->i_dquot, warntype);
1068         up_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
1069         return ret;
1070 }
1071
1072 /*
1073  * This is a non-blocking operation.
1074  */
1075 int dquot_free_space(struct inode *inode, qsize_t number)
1076 {
1077         unsigned int cnt;
1078
1079         /* First test before acquiring semaphore - solves deadlocks when we
1080          * re-enter the quota code and are already holding the semaphore */
1081         if (IS_NOQUOTA(inode)) {
1082 out_sub:
1083                 inode_sub_bytes(inode, number);
1084                 return QUOTA_OK;
1085         }
1086         down_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
1087         /* Now recheck reliably when holding dqptr_sem */
1088         if (IS_NOQUOTA(inode)) {
1089                 up_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
1090                 goto out_sub;
1091         }
1092         spin_lock(&dq_data_lock);
1093         for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1094                 if (inode->i_dquot[cnt] == NODQUOT)
1095                         continue;
1096                 dquot_decr_space(inode->i_dquot[cnt], number);
1097         }
1098         inode_sub_bytes(inode, number);
1099         spin_unlock(&dq_data_lock);
1100         /* Dirtify all the dquots - this can block when journalling */
1101         for (cnt = 0; cnt < MAXQUOTAS; cnt++)
1102                 if (inode->i_dquot[cnt])
1103                         mark_dquot_dirty(inode->i_dquot[cnt]);
1104         up_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
1105         return QUOTA_OK;
1106 }
1107
1108 /*
1109  * This is a non-blocking operation.
1110  */
1111 int dquot_free_inode(const struct inode *inode, unsigned long number)
1112 {
1113         unsigned int cnt;
1114
1115         /* First test before acquiring semaphore - solves deadlocks when we
1116          * re-enter the quota code and are already holding the semaphore */
1117         if (IS_NOQUOTA(inode))
1118                 return QUOTA_OK;
1119         down_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
1120         /* Now recheck reliably when holding dqptr_sem */
1121         if (IS_NOQUOTA(inode)) {
1122                 up_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
1123                 return QUOTA_OK;
1124         }
1125         spin_lock(&dq_data_lock);
1126         for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1127                 if (inode->i_dquot[cnt] == NODQUOT)
1128                         continue;
1129                 dquot_decr_inodes(inode->i_dquot[cnt], number);
1130         }
1131         spin_unlock(&dq_data_lock);
1132         /* Dirtify all the dquots - this can block when journalling */
1133         for (cnt = 0; cnt < MAXQUOTAS; cnt++)
1134                 if (inode->i_dquot[cnt])
1135                         mark_dquot_dirty(inode->i_dquot[cnt]);
1136         up_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
1137         return QUOTA_OK;
1138 }
1139
1140 /*
1141  * Transfer the number of inode and blocks from one diskquota to an other.
1142  *
1143  * This operation can block, but only after everything is updated
1144  */
1145 int dquot_transfer(struct inode *inode, struct iattr *iattr)
1146 {
1147         qsize_t space;
1148         struct dquot *transfer_from[MAXQUOTAS];
1149         struct dquot *transfer_to[MAXQUOTAS];
1150         int cnt, ret = NO_QUOTA, chuid = (iattr->ia_valid & ATTR_UID) && inode->i_uid != iattr->ia_uid,
1151             chgid = (iattr->ia_valid & ATTR_GID) && inode->i_gid != iattr->ia_gid;
1152         char warntype[MAXQUOTAS];
1153
1154         /* First test before acquiring semaphore - solves deadlocks when we
1155          * re-enter the quota code and are already holding the semaphore */
1156         if (IS_NOQUOTA(inode))
1157                 return QUOTA_OK;
1158         /* Clear the arrays */
1159         for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1160                 transfer_to[cnt] = transfer_from[cnt] = NODQUOT;
1161                 warntype[cnt] = NOWARN;
1162         }
1163         down_write(&sb_dqopt(inode->i_sb)->dqptr_sem);
1164         /* Now recheck reliably when holding dqptr_sem */
1165         if (IS_NOQUOTA(inode)) {        /* File without quota accounting? */
1166                 up_write(&sb_dqopt(inode->i_sb)->dqptr_sem);
1167                 return QUOTA_OK;
1168         }
1169         /* First build the transfer_to list - here we can block on
1170          * reading/instantiating of dquots.  We know that the transaction for
1171          * us was already started so we don't violate lock ranking here */
1172         for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1173                 switch (cnt) {
1174                         case USRQUOTA:
1175                                 if (!chuid)
1176                                         continue;
1177                                 transfer_to[cnt] = dqget(inode->i_sb, iattr->ia_uid, cnt);
1178                                 break;
1179                         case GRPQUOTA:
1180                                 if (!chgid)
1181                                         continue;
1182                                 transfer_to[cnt] = dqget(inode->i_sb, iattr->ia_gid, cnt);
1183                                 break;
1184                 }
1185         }
1186         spin_lock(&dq_data_lock);
1187         space = inode_get_bytes(inode);
1188         /* Build the transfer_from list and check the limits */
1189         for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1190                 if (transfer_to[cnt] == NODQUOT)
1191                         continue;
1192                 transfer_from[cnt] = inode->i_dquot[cnt];
1193                 if (check_idq(transfer_to[cnt], 1, warntype+cnt) == NO_QUOTA ||
1194                     check_bdq(transfer_to[cnt], space, 0, warntype+cnt) == NO_QUOTA)
1195                         goto warn_put_all;
1196         }
1197
1198         /*
1199          * Finally perform the needed transfer from transfer_from to transfer_to
1200          */
1201         for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1202                 /*
1203                  * Skip changes for same uid or gid or for turned off quota-type.
1204                  */
1205                 if (transfer_to[cnt] == NODQUOT)
1206                         continue;
1207
1208                 /* Due to IO error we might not have transfer_from[] structure */
1209                 if (transfer_from[cnt]) {
1210                         dquot_decr_inodes(transfer_from[cnt], 1);
1211                         dquot_decr_space(transfer_from[cnt], space);
1212                 }
1213
1214                 dquot_incr_inodes(transfer_to[cnt], 1);
1215                 dquot_incr_space(transfer_to[cnt], space);
1216
1217                 inode->i_dquot[cnt] = transfer_to[cnt];
1218         }
1219         ret = QUOTA_OK;
1220 warn_put_all:
1221         spin_unlock(&dq_data_lock);
1222         /* Dirtify all the dquots - this can block when journalling */
1223         for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1224                 if (transfer_from[cnt])
1225                         mark_dquot_dirty(transfer_from[cnt]);
1226                 if (transfer_to[cnt])
1227                         mark_dquot_dirty(transfer_to[cnt]);
1228         }
1229         flush_warnings(transfer_to, warntype);
1230         
1231         for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1232                 if (ret == QUOTA_OK && transfer_from[cnt] != NODQUOT)
1233                         dqput(transfer_from[cnt]);
1234                 if (ret == NO_QUOTA && transfer_to[cnt] != NODQUOT)
1235                         dqput(transfer_to[cnt]);
1236         }
1237         up_write(&sb_dqopt(inode->i_sb)->dqptr_sem);
1238         return ret;
1239 }
1240
1241 /*
1242  * Write info of quota file to disk
1243  */
1244 int dquot_commit_info(struct super_block *sb, int type)
1245 {
1246         int ret;
1247         struct quota_info *dqopt = sb_dqopt(sb);
1248
1249         down(&dqopt->dqio_sem);
1250         ret = dqopt->ops[type]->write_file_info(sb, type);
1251         up(&dqopt->dqio_sem);
1252         return ret;
1253 }
1254
1255 /*
1256  * Definitions of diskquota operations.
1257  */
1258 struct dquot_operations dquot_operations = {
1259         .initialize     = dquot_initialize,
1260         .drop           = dquot_drop,
1261         .alloc_space    = dquot_alloc_space,
1262         .alloc_inode    = dquot_alloc_inode,
1263         .free_space     = dquot_free_space,
1264         .free_inode     = dquot_free_inode,
1265         .transfer       = dquot_transfer,
1266         .write_dquot    = dquot_commit,
1267         .acquire_dquot  = dquot_acquire,
1268         .release_dquot  = dquot_release,
1269         .mark_dirty     = dquot_mark_dquot_dirty,
1270         .write_info     = dquot_commit_info
1271 };
1272
1273 static inline void set_enable_flags(struct quota_info *dqopt, int type)
1274 {
1275         switch (type) {
1276                 case USRQUOTA:
1277                         dqopt->flags |= DQUOT_USR_ENABLED;
1278                         break;
1279                 case GRPQUOTA:
1280                         dqopt->flags |= DQUOT_GRP_ENABLED;
1281                         break;
1282         }
1283 }
1284
1285 static inline void reset_enable_flags(struct quota_info *dqopt, int type)
1286 {
1287         switch (type) {
1288                 case USRQUOTA:
1289                         dqopt->flags &= ~DQUOT_USR_ENABLED;
1290                         break;
1291                 case GRPQUOTA:
1292                         dqopt->flags &= ~DQUOT_GRP_ENABLED;
1293                         break;
1294         }
1295 }
1296
1297 /*
1298  * Turn quota off on a device. type == -1 ==> quotaoff for all types (umount)
1299  */
1300 int vfs_quota_off(struct super_block *sb, int type)
1301 {
1302         int cnt;
1303         struct quota_info *dqopt = sb_dqopt(sb);
1304
1305         /* We need to serialize quota_off() for device */
1306         down(&dqopt->dqonoff_sem);
1307         for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1308                 if (type != -1 && cnt != type)
1309                         continue;
1310                 if (!sb_has_quota_enabled(sb, cnt))
1311                         continue;
1312                 reset_enable_flags(dqopt, cnt);
1313
1314                 /* Note: these are blocking operations */
1315                 drop_dquot_ref(sb, cnt);
1316                 invalidate_dquots(sb, cnt);
1317                 /*
1318                  * Now all dquots should be invalidated, all writes done so we should be only
1319                  * users of the info. No locks needed.
1320                  */
1321                 if (info_dirty(&dqopt->info[cnt]))
1322                         sb->dq_op->write_info(sb, cnt);
1323                 if (dqopt->ops[cnt]->free_file_info)
1324                         dqopt->ops[cnt]->free_file_info(sb, cnt);
1325                 put_quota_format(dqopt->info[cnt].dqi_format);
1326
1327                 fput(dqopt->files[cnt]);
1328                 dqopt->files[cnt] = NULL;
1329                 dqopt->info[cnt].dqi_flags = 0;
1330                 dqopt->info[cnt].dqi_igrace = 0;
1331                 dqopt->info[cnt].dqi_bgrace = 0;
1332                 dqopt->ops[cnt] = NULL;
1333         }
1334         up(&dqopt->dqonoff_sem);
1335         return 0;
1336 }
1337
1338 /*
1339  *      Turn quotas on on a device
1340  */
1341
1342 /* Helper function when we already have file open */
1343 static int vfs_quota_on_file(struct file *f, int type, int format_id)
1344 {
1345         struct quota_format_type *fmt = find_quota_format(format_id);
1346         struct inode *inode;
1347         struct super_block *sb = f->f_dentry->d_sb;
1348         struct quota_info *dqopt = sb_dqopt(sb);
1349         struct dquot *to_drop[MAXQUOTAS];
1350         int error, cnt;
1351         unsigned int oldflags;
1352
1353         if (!fmt)
1354                 return -ESRCH;
1355         error = -EIO;
1356         if (!f->f_op || !f->f_op->read || !f->f_op->write)
1357                 goto out_fmt;
1358         inode = f->f_dentry->d_inode;
1359         error = -EACCES;
1360         if (!S_ISREG(inode->i_mode))
1361                 goto out_fmt;
1362
1363         down(&dqopt->dqonoff_sem);
1364         if (sb_has_quota_enabled(sb, type)) {
1365                 error = -EBUSY;
1366                 goto out_lock;
1367         }
1368         oldflags = inode->i_flags;
1369         dqopt->files[type] = f;
1370         error = -EINVAL;
1371         if (!fmt->qf_ops->check_quota_file(sb, type))
1372                 goto out_file_init;
1373         /* We don't want quota and atime on quota files (deadlocks possible)
1374          * We also need to set GFP mask differently because we cannot recurse
1375          * into filesystem when allocating page for quota inode */
1376         down_write(&dqopt->dqptr_sem);
1377         inode->i_flags |= S_NOQUOTA | S_NOATIME;
1378
1379         /*
1380          * We write to quota files deep within filesystem code.  We don't want
1381          * the VFS to reenter filesystem code when it tries to allocate a
1382          * pagecache page for the quota file write.  So clear __GFP_FS in
1383          * the quota file's allocation flags.
1384          */
1385         mapping_set_gfp_mask(inode->i_mapping,
1386                 mapping_gfp_mask(inode->i_mapping) & ~__GFP_FS);
1387
1388         for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1389                 to_drop[cnt] = inode->i_dquot[cnt];
1390                 inode->i_dquot[cnt] = NODQUOT;
1391         }
1392         inode->i_flags &= ~S_QUOTA;
1393         up_write(&dqopt->dqptr_sem);
1394         /* We must put dquots outside of dqptr_sem because we may need to
1395          * start transaction for dquot_release() */
1396         for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1397                 if (to_drop[cnt])
1398                         dqput(to_drop[cnt]);
1399         }
1400
1401         dqopt->ops[type] = fmt->qf_ops;
1402         dqopt->info[type].dqi_format = fmt;
1403         INIT_LIST_HEAD(&dqopt->info[type].dqi_dirty_list);
1404         down(&dqopt->dqio_sem);
1405         if ((error = dqopt->ops[type]->read_file_info(sb, type)) < 0) {
1406                 up(&dqopt->dqio_sem);
1407                 goto out_file_init;
1408         }
1409         up(&dqopt->dqio_sem);
1410         set_enable_flags(dqopt, type);
1411
1412         add_dquot_ref(sb, type);
1413         up(&dqopt->dqonoff_sem);
1414
1415         return 0;
1416
1417 out_file_init:
1418         inode->i_flags = oldflags;
1419         dqopt->files[type] = NULL;
1420 out_lock:
1421         up_write(&dqopt->dqptr_sem);
1422         up(&dqopt->dqonoff_sem);
1423 out_fmt:
1424         put_quota_format(fmt);
1425
1426         return error; 
1427 }
1428
1429 /* Actual function called from quotactl() */
1430 int vfs_quota_on(struct super_block *sb, int type, int format_id, char *path)
1431 {
1432         struct file *f;
1433         int error;
1434
1435         f = filp_open(path, O_RDWR, 0600);
1436         if (IS_ERR(f))
1437                 return PTR_ERR(f);
1438         error = security_quota_on(f);
1439         if (error)
1440                 goto out_f;
1441         error = vfs_quota_on_file(f, type, format_id);
1442         if (!error)
1443                 return 0;
1444 out_f:
1445         filp_close(f, NULL);
1446         return error;
1447 }
1448
1449 /*
1450  * Function used by filesystems when filp_open() would fail (filesystem is
1451  * being mounted now). We will use a private file structure. Caller is
1452  * responsible that it's IO functions won't need vfsmnt structure or
1453  * some dentry tricks...
1454  */
1455 int vfs_quota_on_mount(int type, int format_id, struct dentry *dentry)
1456 {
1457         struct file *f;
1458         int error;
1459
1460         dget(dentry);   /* Get a reference for struct file */
1461         f = dentry_open(dentry, NULL, O_RDWR);
1462         if (IS_ERR(f)) {
1463                 error = PTR_ERR(f);
1464                 goto out_dentry;
1465         }
1466         error = vfs_quota_on_file(f, type, format_id);
1467         if (!error)
1468                 return 0;
1469         fput(f);
1470 out_dentry:
1471         dput(dentry);
1472         return error;
1473 }
1474
1475 /* Generic routine for getting common part of quota structure */
1476 static void do_get_dqblk(struct dquot *dquot, struct if_dqblk *di)
1477 {
1478         struct mem_dqblk *dm = &dquot->dq_dqb;
1479
1480         spin_lock(&dq_data_lock);
1481         di->dqb_bhardlimit = dm->dqb_bhardlimit;
1482         di->dqb_bsoftlimit = dm->dqb_bsoftlimit;
1483         di->dqb_curspace = dm->dqb_curspace;
1484         di->dqb_ihardlimit = dm->dqb_ihardlimit;
1485         di->dqb_isoftlimit = dm->dqb_isoftlimit;
1486         di->dqb_curinodes = dm->dqb_curinodes;
1487         di->dqb_btime = dm->dqb_btime;
1488         di->dqb_itime = dm->dqb_itime;
1489         di->dqb_valid = QIF_ALL;
1490         spin_unlock(&dq_data_lock);
1491 }
1492
1493 int vfs_get_dqblk(struct super_block *sb, int type, qid_t id, struct if_dqblk *di)
1494 {
1495         struct dquot *dquot;
1496
1497         down(&sb_dqopt(sb)->dqonoff_sem);
1498         if (!(dquot = dqget(sb, id, type))) {
1499                 up(&sb_dqopt(sb)->dqonoff_sem);
1500                 return -ESRCH;
1501         }
1502         do_get_dqblk(dquot, di);
1503         dqput(dquot);
1504         up(&sb_dqopt(sb)->dqonoff_sem);
1505         return 0;
1506 }
1507
1508 /* Generic routine for setting common part of quota structure */
1509 static void do_set_dqblk(struct dquot *dquot, struct if_dqblk *di)
1510 {
1511         struct mem_dqblk *dm = &dquot->dq_dqb;
1512         int check_blim = 0, check_ilim = 0;
1513
1514         spin_lock(&dq_data_lock);
1515         if (di->dqb_valid & QIF_SPACE) {
1516                 dm->dqb_curspace = di->dqb_curspace;
1517                 check_blim = 1;
1518         }
1519         if (di->dqb_valid & QIF_BLIMITS) {
1520                 dm->dqb_bsoftlimit = di->dqb_bsoftlimit;
1521                 dm->dqb_bhardlimit = di->dqb_bhardlimit;
1522                 check_blim = 1;
1523         }
1524         if (di->dqb_valid & QIF_INODES) {
1525                 dm->dqb_curinodes = di->dqb_curinodes;
1526                 check_ilim = 1;
1527         }
1528         if (di->dqb_valid & QIF_ILIMITS) {
1529                 dm->dqb_isoftlimit = di->dqb_isoftlimit;
1530                 dm->dqb_ihardlimit = di->dqb_ihardlimit;
1531                 check_ilim = 1;
1532         }
1533         if (di->dqb_valid & QIF_BTIME)
1534                 dm->dqb_btime = di->dqb_btime;
1535         if (di->dqb_valid & QIF_ITIME)
1536                 dm->dqb_itime = di->dqb_itime;
1537
1538         if (check_blim) {
1539                 if (!dm->dqb_bsoftlimit || toqb(dm->dqb_curspace) < dm->dqb_bsoftlimit) {
1540                         dm->dqb_btime = 0;
1541                         clear_bit(DQ_BLKS_B, &dquot->dq_flags);
1542                 }
1543                 else if (!(di->dqb_valid & QIF_BTIME))  /* Set grace only if user hasn't provided his own... */
1544                         dm->dqb_btime = get_seconds() + sb_dqopt(dquot->dq_sb)->info[dquot->dq_type].dqi_bgrace;
1545         }
1546         if (check_ilim) {
1547                 if (!dm->dqb_isoftlimit || dm->dqb_curinodes < dm->dqb_isoftlimit) {
1548                         dm->dqb_itime = 0;
1549                         clear_bit(DQ_INODES_B, &dquot->dq_flags);
1550                 }
1551                 else if (!(di->dqb_valid & QIF_ITIME))  /* Set grace only if user hasn't provided his own... */
1552                         dm->dqb_itime = get_seconds() + sb_dqopt(dquot->dq_sb)->info[dquot->dq_type].dqi_igrace;
1553         }
1554         if (dm->dqb_bhardlimit || dm->dqb_bsoftlimit || dm->dqb_ihardlimit || dm->dqb_isoftlimit)
1555                 clear_bit(DQ_FAKE_B, &dquot->dq_flags);
1556         else
1557                 set_bit(DQ_FAKE_B, &dquot->dq_flags);
1558         spin_unlock(&dq_data_lock);
1559         mark_dquot_dirty(dquot);
1560 }
1561
1562 int vfs_set_dqblk(struct super_block *sb, int type, qid_t id, struct if_dqblk *di)
1563 {
1564         struct dquot *dquot;
1565
1566         down(&sb_dqopt(sb)->dqonoff_sem);
1567         if (!(dquot = dqget(sb, id, type))) {
1568                 up(&sb_dqopt(sb)->dqonoff_sem);
1569                 return -ESRCH;
1570         }
1571         do_set_dqblk(dquot, di);
1572         dqput(dquot);
1573         up(&sb_dqopt(sb)->dqonoff_sem);
1574         return 0;
1575 }
1576
1577 /* Generic routine for getting common part of quota file information */
1578 int vfs_get_dqinfo(struct super_block *sb, int type, struct if_dqinfo *ii)
1579 {
1580         struct mem_dqinfo *mi;
1581   
1582         down(&sb_dqopt(sb)->dqonoff_sem);
1583         if (!sb_has_quota_enabled(sb, type)) {
1584                 up(&sb_dqopt(sb)->dqonoff_sem);
1585                 return -ESRCH;
1586         }
1587         mi = sb_dqopt(sb)->info + type;
1588         spin_lock(&dq_data_lock);
1589         ii->dqi_bgrace = mi->dqi_bgrace;
1590         ii->dqi_igrace = mi->dqi_igrace;
1591         ii->dqi_flags = mi->dqi_flags & DQF_MASK;
1592         ii->dqi_valid = IIF_ALL;
1593         spin_unlock(&dq_data_lock);
1594         up(&sb_dqopt(sb)->dqonoff_sem);
1595         return 0;
1596 }
1597
1598 /* Generic routine for setting common part of quota file information */
1599 int vfs_set_dqinfo(struct super_block *sb, int type, struct if_dqinfo *ii)
1600 {
1601         struct mem_dqinfo *mi;
1602
1603         down(&sb_dqopt(sb)->dqonoff_sem);
1604         if (!sb_has_quota_enabled(sb, type)) {
1605                 up(&sb_dqopt(sb)->dqonoff_sem);
1606                 return -ESRCH;
1607         }
1608         mi = sb_dqopt(sb)->info + type;
1609         spin_lock(&dq_data_lock);
1610         if (ii->dqi_valid & IIF_BGRACE)
1611                 mi->dqi_bgrace = ii->dqi_bgrace;
1612         if (ii->dqi_valid & IIF_IGRACE)
1613                 mi->dqi_igrace = ii->dqi_igrace;
1614         if (ii->dqi_valid & IIF_FLAGS)
1615                 mi->dqi_flags = (mi->dqi_flags & ~DQF_MASK) | (ii->dqi_flags & DQF_MASK);
1616         spin_unlock(&dq_data_lock);
1617         mark_info_dirty(sb, type);
1618         /* Force write to disk */
1619         sb->dq_op->write_info(sb, type);
1620         up(&sb_dqopt(sb)->dqonoff_sem);
1621         return 0;
1622 }
1623
1624 struct quotactl_ops vfs_quotactl_ops = {
1625         .quota_on       = vfs_quota_on,
1626         .quota_off      = vfs_quota_off,
1627         .quota_sync     = vfs_quota_sync,
1628         .get_info       = vfs_get_dqinfo,
1629         .set_info       = vfs_set_dqinfo,
1630         .get_dqblk      = vfs_get_dqblk,
1631         .set_dqblk      = vfs_set_dqblk
1632 };
1633
1634 static ctl_table fs_dqstats_table[] = {
1635         {
1636                 .ctl_name       = FS_DQ_LOOKUPS,
1637                 .procname       = "lookups",
1638                 .data           = &dqstats.lookups,
1639                 .maxlen         = sizeof(int),
1640                 .mode           = 0444,
1641                 .proc_handler   = &proc_dointvec,
1642         },
1643         {
1644                 .ctl_name       = FS_DQ_DROPS,
1645                 .procname       = "drops",
1646                 .data           = &dqstats.drops,
1647                 .maxlen         = sizeof(int),
1648                 .mode           = 0444,
1649                 .proc_handler   = &proc_dointvec,
1650         },
1651         {
1652                 .ctl_name       = FS_DQ_READS,
1653                 .procname       = "reads",
1654                 .data           = &dqstats.reads,
1655                 .maxlen         = sizeof(int),
1656                 .mode           = 0444,
1657                 .proc_handler   = &proc_dointvec,
1658         },
1659         {
1660                 .ctl_name       = FS_DQ_WRITES,
1661                 .procname       = "writes",
1662                 .data           = &dqstats.writes,
1663                 .maxlen         = sizeof(int),
1664                 .mode           = 0444,
1665                 .proc_handler   = &proc_dointvec,
1666         },
1667         {
1668                 .ctl_name       = FS_DQ_CACHE_HITS,
1669                 .procname       = "cache_hits",
1670                 .data           = &dqstats.cache_hits,
1671                 .maxlen         = sizeof(int),
1672                 .mode           = 0444,
1673                 .proc_handler   = &proc_dointvec,
1674         },
1675         {
1676                 .ctl_name       = FS_DQ_ALLOCATED,
1677                 .procname       = "allocated_dquots",
1678                 .data           = &dqstats.allocated_dquots,
1679                 .maxlen         = sizeof(int),
1680                 .mode           = 0444,
1681                 .proc_handler   = &proc_dointvec,
1682         },
1683         {
1684                 .ctl_name       = FS_DQ_FREE,
1685                 .procname       = "free_dquots",
1686                 .data           = &dqstats.free_dquots,
1687                 .maxlen         = sizeof(int),
1688                 .mode           = 0444,
1689                 .proc_handler   = &proc_dointvec,
1690         },
1691         {
1692                 .ctl_name       = FS_DQ_SYNCS,
1693                 .procname       = "syncs",
1694                 .data           = &dqstats.syncs,
1695                 .maxlen         = sizeof(int),
1696                 .mode           = 0444,
1697                 .proc_handler   = &proc_dointvec,
1698         },
1699         { .ctl_name = 0 },
1700 };
1701
1702 static ctl_table fs_table[] = {
1703         {
1704                 .ctl_name       = FS_DQSTATS,
1705                 .procname       = "quota",
1706                 .mode           = 0555,
1707                 .child          = fs_dqstats_table,
1708         },
1709         { .ctl_name = 0 },
1710 };
1711
1712 static ctl_table sys_table[] = {
1713         {
1714                 .ctl_name       = CTL_FS,
1715                 .procname       = "fs",
1716                 .mode           = 0555,
1717                 .child          = fs_table,
1718         },
1719         { .ctl_name = 0 },
1720 };
1721
1722 /* SLAB cache for dquot structures */
1723 kmem_cache_t *dquot_cachep;
1724
1725 static int __init dquot_init(void)
1726 {
1727         int i;
1728         unsigned long nr_hash, order;
1729
1730         printk(KERN_NOTICE "VFS: Disk quotas %s\n", __DQUOT_VERSION__);
1731
1732         register_sysctl_table(sys_table, 0);
1733
1734         dquot_cachep = kmem_cache_create("dquot", 
1735                         sizeof(struct dquot), sizeof(unsigned long) * 4,
1736                         SLAB_HWCACHE_ALIGN|SLAB_RECLAIM_ACCOUNT|SLAB_PANIC,
1737                         NULL, NULL);
1738
1739         order = 0;
1740         dquot_hash = (struct hlist_head *)__get_free_pages(GFP_ATOMIC, order);
1741         if (!dquot_hash)
1742                 panic("Cannot create dquot hash table");
1743
1744         /* Find power-of-two hlist_heads which can fit into allocation */
1745         nr_hash = (1UL << order) * PAGE_SIZE / sizeof(struct hlist_head);
1746         dq_hash_bits = 0;
1747         do {
1748                 dq_hash_bits++;
1749         } while (nr_hash >> dq_hash_bits);
1750         dq_hash_bits--;
1751
1752         nr_hash = 1UL << dq_hash_bits;
1753         dq_hash_mask = nr_hash - 1;
1754         for (i = 0; i < nr_hash; i++)
1755                 INIT_HLIST_HEAD(dquot_hash + i);
1756
1757         printk("Dquot-cache hash table entries: %ld (order %ld, %ld bytes)\n",
1758                         nr_hash, order, (PAGE_SIZE << order));
1759
1760         set_shrinker(DEFAULT_SEEKS, shrink_dqcache_memory);
1761
1762         return 0;
1763 }
1764 module_init(dquot_init);
1765
1766 EXPORT_SYMBOL(register_quota_format);
1767 EXPORT_SYMBOL(unregister_quota_format);
1768 EXPORT_SYMBOL(dqstats);
1769 EXPORT_SYMBOL(dq_list_lock);
1770 EXPORT_SYMBOL(dq_data_lock);
1771 EXPORT_SYMBOL(vfs_quota_on);
1772 EXPORT_SYMBOL(vfs_quota_on_mount);
1773 EXPORT_SYMBOL(vfs_quota_off);
1774 EXPORT_SYMBOL(vfs_quota_sync);
1775 EXPORT_SYMBOL(vfs_get_dqinfo);
1776 EXPORT_SYMBOL(vfs_set_dqinfo);
1777 EXPORT_SYMBOL(vfs_get_dqblk);
1778 EXPORT_SYMBOL(vfs_set_dqblk);
1779 EXPORT_SYMBOL(dquot_commit);
1780 EXPORT_SYMBOL(dquot_commit_info);
1781 EXPORT_SYMBOL(dquot_acquire);
1782 EXPORT_SYMBOL(dquot_release);
1783 EXPORT_SYMBOL(dquot_mark_dquot_dirty);
1784 EXPORT_SYMBOL(dquot_initialize);
1785 EXPORT_SYMBOL(dquot_drop);
1786 EXPORT_SYMBOL(dquot_alloc_space);
1787 EXPORT_SYMBOL(dquot_alloc_inode);
1788 EXPORT_SYMBOL(dquot_free_space);
1789 EXPORT_SYMBOL(dquot_free_inode);
1790 EXPORT_SYMBOL(dquot_transfer);