fedora core 6 1.2949 + vserver 2.2.0
[linux-2.6.git] / fs / xfs / linux-2.6 / xfs_super.c
1 /*
2  * Copyright (c) 2000-2006 Silicon Graphics, Inc.
3  * All Rights Reserved.
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU General Public License as
7  * published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it would be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write the Free Software Foundation,
16  * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
17  */
18 #include "xfs.h"
19 #include "xfs_bit.h"
20 #include "xfs_log.h"
21 #include "xfs_clnt.h"
22 #include "xfs_inum.h"
23 #include "xfs_trans.h"
24 #include "xfs_sb.h"
25 #include "xfs_ag.h"
26 #include "xfs_dir2.h"
27 #include "xfs_alloc.h"
28 #include "xfs_dmapi.h"
29 #include "xfs_quota.h"
30 #include "xfs_mount.h"
31 #include "xfs_bmap_btree.h"
32 #include "xfs_alloc_btree.h"
33 #include "xfs_ialloc_btree.h"
34 #include "xfs_dir2_sf.h"
35 #include "xfs_attr_sf.h"
36 #include "xfs_dinode.h"
37 #include "xfs_inode.h"
38 #include "xfs_btree.h"
39 #include "xfs_ialloc.h"
40 #include "xfs_bmap.h"
41 #include "xfs_rtalloc.h"
42 #include "xfs_error.h"
43 #include "xfs_itable.h"
44 #include "xfs_rw.h"
45 #include "xfs_acl.h"
46 #include "xfs_cap.h"
47 #include "xfs_mac.h"
48 #include "xfs_attr.h"
49 #include "xfs_buf_item.h"
50 #include "xfs_utils.h"
51 #include "xfs_version.h"
52
53 #include <linux/namei.h>
54 #include <linux/init.h>
55 #include <linux/mount.h>
56 #include <linux/mempool.h>
57 #include <linux/writeback.h>
58 #include <linux/kthread.h>
59 #include <linux/freezer.h>
60
61 STATIC struct quotactl_ops xfs_quotactl_operations;
62 STATIC struct super_operations xfs_super_operations;
63 STATIC kmem_zone_t *xfs_vnode_zone;
64 STATIC kmem_zone_t *xfs_ioend_zone;
65 mempool_t *xfs_ioend_pool;
66
67 STATIC struct xfs_mount_args *
68 xfs_args_allocate(
69         struct super_block      *sb,
70         int                     silent)
71 {
72         struct xfs_mount_args   *args;
73
74         args = kmem_zalloc(sizeof(struct xfs_mount_args), KM_SLEEP);
75         args->logbufs = args->logbufsize = -1;
76         strncpy(args->fsname, sb->s_id, MAXNAMELEN);
77
78         /* Copy the already-parsed mount(2) flags we're interested in */
79         if (sb->s_flags & MS_DIRSYNC)
80                 args->flags |= XFSMNT_DIRSYNC;
81         if (sb->s_flags & MS_SYNCHRONOUS)
82                 args->flags |= XFSMNT_WSYNC;
83         if (silent)
84                 args->flags |= XFSMNT_QUIET;
85         args->flags |= XFSMNT_32BITINODES;
86
87         return args;
88 }
89
90 __uint64_t
91 xfs_max_file_offset(
92         unsigned int            blockshift)
93 {
94         unsigned int            pagefactor = 1;
95         unsigned int            bitshift = BITS_PER_LONG - 1;
96
97         /* Figure out maximum filesize, on Linux this can depend on
98          * the filesystem blocksize (on 32 bit platforms).
99          * __block_prepare_write does this in an [unsigned] long...
100          *      page->index << (PAGE_CACHE_SHIFT - bbits)
101          * So, for page sized blocks (4K on 32 bit platforms),
102          * this wraps at around 8Tb (hence MAX_LFS_FILESIZE which is
103          *      (((u64)PAGE_CACHE_SIZE << (BITS_PER_LONG-1))-1)
104          * but for smaller blocksizes it is less (bbits = log2 bsize).
105          * Note1: get_block_t takes a long (implicit cast from above)
106          * Note2: The Large Block Device (LBD and HAVE_SECTOR_T) patch
107          * can optionally convert the [unsigned] long from above into
108          * an [unsigned] long long.
109          */
110
111 #if BITS_PER_LONG == 32
112 # if defined(CONFIG_LBD)
113         ASSERT(sizeof(sector_t) == 8);
114         pagefactor = PAGE_CACHE_SIZE;
115         bitshift = BITS_PER_LONG;
116 # else
117         pagefactor = PAGE_CACHE_SIZE >> (PAGE_CACHE_SHIFT - blockshift);
118 # endif
119 #endif
120
121         return (((__uint64_t)pagefactor) << bitshift) - 1;
122 }
123
124 STATIC __inline__ void
125 xfs_set_inodeops(
126         struct inode            *inode)
127 {
128         switch (inode->i_mode & S_IFMT) {
129         case S_IFREG:
130                 inode->i_op = &xfs_inode_operations;
131                 inode->i_fop = &xfs_file_operations;
132                 inode->i_mapping->a_ops = &xfs_address_space_operations;
133                 break;
134         case S_IFDIR:
135                 inode->i_op = &xfs_dir_inode_operations;
136                 inode->i_fop = &xfs_dir_file_operations;
137                 break;
138         case S_IFLNK:
139                 inode->i_op = &xfs_symlink_inode_operations;
140                 if (inode->i_blocks)
141                         inode->i_mapping->a_ops = &xfs_address_space_operations;
142                 break;
143         default:
144                 inode->i_op = &xfs_inode_operations;
145                 init_special_inode(inode, inode->i_mode, inode->i_rdev);
146                 break;
147         }
148 }
149
150 STATIC __inline__ void
151 xfs_revalidate_inode(
152         xfs_mount_t             *mp,
153         bhv_vnode_t             *vp,
154         xfs_inode_t             *ip)
155 {
156         struct inode            *inode = vn_to_inode(vp);
157
158         inode->i_mode   = ip->i_d.di_mode;
159         inode->i_nlink  = ip->i_d.di_nlink;
160         inode->i_uid    = ip->i_d.di_uid;
161         inode->i_gid    = ip->i_d.di_gid;
162         inode->i_tag    = ip->i_d.di_tag;
163
164         switch (inode->i_mode & S_IFMT) {
165         case S_IFBLK:
166         case S_IFCHR:
167                 inode->i_rdev =
168                         MKDEV(sysv_major(ip->i_df.if_u2.if_rdev) & 0x1ff,
169                               sysv_minor(ip->i_df.if_u2.if_rdev));
170                 break;
171         default:
172                 inode->i_rdev = 0;
173                 break;
174         }
175
176         inode->i_generation = ip->i_d.di_gen;
177         i_size_write(inode, ip->i_d.di_size);
178         inode->i_blocks =
179                 XFS_FSB_TO_BB(mp, ip->i_d.di_nblocks + ip->i_delayed_blks);
180         inode->i_atime.tv_sec   = ip->i_d.di_atime.t_sec;
181         inode->i_atime.tv_nsec  = ip->i_d.di_atime.t_nsec;
182         inode->i_mtime.tv_sec   = ip->i_d.di_mtime.t_sec;
183         inode->i_mtime.tv_nsec  = ip->i_d.di_mtime.t_nsec;
184         inode->i_ctime.tv_sec   = ip->i_d.di_ctime.t_sec;
185         inode->i_ctime.tv_nsec  = ip->i_d.di_ctime.t_nsec;
186         if (ip->i_d.di_flags & XFS_DIFLAG_IMMUTABLE)
187                 inode->i_flags |= S_IMMUTABLE;
188         else
189                 inode->i_flags &= ~S_IMMUTABLE;
190         if (ip->i_d.di_flags & XFS_DIFLAG_IUNLINK)
191                 inode->i_flags |= S_IUNLINK;
192         else
193                 inode->i_flags &= ~S_IUNLINK;
194         if (ip->i_d.di_flags & XFS_DIFLAG_BARRIER)
195                 inode->i_flags |= S_BARRIER;
196         else
197                 inode->i_flags &= ~S_BARRIER;
198         if (ip->i_d.di_flags & XFS_DIFLAG_APPEND)
199                 inode->i_flags |= S_APPEND;
200         else
201                 inode->i_flags &= ~S_APPEND;
202         if (ip->i_d.di_flags & XFS_DIFLAG_SYNC)
203                 inode->i_flags |= S_SYNC;
204         else
205                 inode->i_flags &= ~S_SYNC;
206         if (ip->i_d.di_flags & XFS_DIFLAG_NOATIME)
207                 inode->i_flags |= S_NOATIME;
208         else
209                 inode->i_flags &= ~S_NOATIME;
210         vp->v_flag &= ~VMODIFIED;
211 }
212
213 void
214 xfs_initialize_vnode(
215         bhv_desc_t              *bdp,
216         bhv_vnode_t             *vp,
217         bhv_desc_t              *inode_bhv,
218         int                     unlock)
219 {
220         xfs_inode_t             *ip = XFS_BHVTOI(inode_bhv);
221         struct inode            *inode = vn_to_inode(vp);
222
223         if (!inode_bhv->bd_vobj) {
224                 vp->v_vfsp = bhvtovfs(bdp);
225                 bhv_desc_init(inode_bhv, ip, vp, &xfs_vnodeops);
226                 bhv_insert(VN_BHV_HEAD(vp), inode_bhv);
227         }
228
229         /*
230          * We need to set the ops vectors, and unlock the inode, but if
231          * we have been called during the new inode create process, it is
232          * too early to fill in the Linux inode.  We will get called a
233          * second time once the inode is properly set up, and then we can
234          * finish our work.
235          */
236         if (ip->i_d.di_mode != 0 && unlock && (inode->i_state & I_NEW)) {
237                 xfs_revalidate_inode(XFS_BHVTOM(bdp), vp, ip);
238                 xfs_set_inodeops(inode);
239
240                 xfs_iflags_clear(ip, XFS_INEW);
241                 barrier();
242
243                 unlock_new_inode(inode);
244         }
245 }
246
247 int
248 xfs_blkdev_get(
249         xfs_mount_t             *mp,
250         const char              *name,
251         struct block_device     **bdevp)
252 {
253         int                     error = 0;
254
255         *bdevp = open_bdev_excl(name, 0, mp);
256         if (IS_ERR(*bdevp)) {
257                 error = PTR_ERR(*bdevp);
258                 printk("XFS: Invalid device [%s], error=%d\n", name, error);
259         }
260
261         return -error;
262 }
263
264 void
265 xfs_blkdev_put(
266         struct block_device     *bdev)
267 {
268         if (bdev)
269                 close_bdev_excl(bdev);
270 }
271
272 /*
273  * Try to write out the superblock using barriers.
274  */
275 STATIC int
276 xfs_barrier_test(
277         xfs_mount_t     *mp)
278 {
279         xfs_buf_t       *sbp = xfs_getsb(mp, 0);
280         int             error;
281
282         XFS_BUF_UNDONE(sbp);
283         XFS_BUF_UNREAD(sbp);
284         XFS_BUF_UNDELAYWRITE(sbp);
285         XFS_BUF_WRITE(sbp);
286         XFS_BUF_UNASYNC(sbp);
287         XFS_BUF_ORDERED(sbp);
288
289         xfsbdstrat(mp, sbp);
290         error = xfs_iowait(sbp);
291
292         /*
293          * Clear all the flags we set and possible error state in the
294          * buffer.  We only did the write to try out whether barriers
295          * worked and shouldn't leave any traces in the superblock
296          * buffer.
297          */
298         XFS_BUF_DONE(sbp);
299         XFS_BUF_ERROR(sbp, 0);
300         XFS_BUF_UNORDERED(sbp);
301
302         xfs_buf_relse(sbp);
303         return error;
304 }
305
306 void
307 xfs_mountfs_check_barriers(xfs_mount_t *mp)
308 {
309         int error;
310
311         if (mp->m_logdev_targp != mp->m_ddev_targp) {
312                 xfs_fs_cmn_err(CE_NOTE, mp,
313                   "Disabling barriers, not supported with external log device");
314                 mp->m_flags &= ~XFS_MOUNT_BARRIER;
315                 return;
316         }
317
318         if (mp->m_ddev_targp->bt_bdev->bd_disk->queue->ordered ==
319                                         QUEUE_ORDERED_NONE) {
320                 xfs_fs_cmn_err(CE_NOTE, mp,
321                   "Disabling barriers, not supported by the underlying device");
322                 mp->m_flags &= ~XFS_MOUNT_BARRIER;
323                 return;
324         }
325
326         if (xfs_readonly_buftarg(mp->m_ddev_targp)) {
327                 xfs_fs_cmn_err(CE_NOTE, mp,
328                   "Disabling barriers, underlying device is readonly");
329                 mp->m_flags &= ~XFS_MOUNT_BARRIER;
330                 return;
331         }
332
333         error = xfs_barrier_test(mp);
334         if (error) {
335                 xfs_fs_cmn_err(CE_NOTE, mp,
336                   "Disabling barriers, trial barrier write failed");
337                 mp->m_flags &= ~XFS_MOUNT_BARRIER;
338                 return;
339         }
340 }
341
342 void
343 xfs_blkdev_issue_flush(
344         xfs_buftarg_t           *buftarg)
345 {
346         blkdev_issue_flush(buftarg->bt_bdev, NULL);
347 }
348
349 STATIC struct inode *
350 xfs_fs_alloc_inode(
351         struct super_block      *sb)
352 {
353         bhv_vnode_t             *vp;
354
355         vp = kmem_zone_alloc(xfs_vnode_zone, KM_SLEEP);
356         if (unlikely(!vp))
357                 return NULL;
358         return vn_to_inode(vp);
359 }
360
361 STATIC void
362 xfs_fs_destroy_inode(
363         struct inode            *inode)
364 {
365         kmem_zone_free(xfs_vnode_zone, vn_from_inode(inode));
366 }
367
368 STATIC void
369 xfs_fs_inode_init_once(
370         void                    *vnode,
371         kmem_zone_t             *zonep,
372         unsigned long           flags)
373 {
374         if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) ==
375                       SLAB_CTOR_CONSTRUCTOR)
376                 inode_init_once(vn_to_inode((bhv_vnode_t *)vnode));
377 }
378
379 STATIC int
380 xfs_init_zones(void)
381 {
382         xfs_vnode_zone = kmem_zone_init_flags(sizeof(bhv_vnode_t), "xfs_vnode",
383                                         KM_ZONE_HWALIGN | KM_ZONE_RECLAIM |
384                                         KM_ZONE_SPREAD,
385                                         xfs_fs_inode_init_once);
386         if (!xfs_vnode_zone)
387                 goto out;
388
389         xfs_ioend_zone = kmem_zone_init(sizeof(xfs_ioend_t), "xfs_ioend");
390         if (!xfs_ioend_zone)
391                 goto out_destroy_vnode_zone;
392
393         xfs_ioend_pool = mempool_create_slab_pool(4 * MAX_BUF_PER_PAGE,
394                                                   xfs_ioend_zone);
395         if (!xfs_ioend_pool)
396                 goto out_free_ioend_zone;
397         return 0;
398
399  out_free_ioend_zone:
400         kmem_zone_destroy(xfs_ioend_zone);
401  out_destroy_vnode_zone:
402         kmem_zone_destroy(xfs_vnode_zone);
403  out:
404         return -ENOMEM;
405 }
406
407 STATIC void
408 xfs_destroy_zones(void)
409 {
410         mempool_destroy(xfs_ioend_pool);
411         kmem_zone_destroy(xfs_vnode_zone);
412         kmem_zone_destroy(xfs_ioend_zone);
413 }
414
415 /*
416  * Attempt to flush the inode, this will actually fail
417  * if the inode is pinned, but we dirty the inode again
418  * at the point when it is unpinned after a log write,
419  * since this is when the inode itself becomes flushable.
420  */
421 STATIC int
422 xfs_fs_write_inode(
423         struct inode            *inode,
424         int                     sync)
425 {
426         bhv_vnode_t             *vp = vn_from_inode(inode);
427         int                     error = 0, flags = FLUSH_INODE;
428
429         if (vp) {
430                 vn_trace_entry(vp, __FUNCTION__, (inst_t *)__return_address);
431                 if (sync)
432                         flags |= FLUSH_SYNC;
433                 error = bhv_vop_iflush(vp, flags);
434                 if (error == EAGAIN)
435                         error = sync? bhv_vop_iflush(vp, flags | FLUSH_LOG) : 0;
436         }
437         return -error;
438 }
439
440 STATIC void
441 xfs_fs_clear_inode(
442         struct inode            *inode)
443 {
444         bhv_vnode_t             *vp = vn_from_inode(inode);
445
446         vn_trace_entry(vp, __FUNCTION__, (inst_t *)__return_address);
447
448         XFS_STATS_INC(vn_rele);
449         XFS_STATS_INC(vn_remove);
450         XFS_STATS_INC(vn_reclaim);
451         XFS_STATS_DEC(vn_active);
452
453         /*
454          * This can happen because xfs_iget_core calls xfs_idestroy if we
455          * find an inode with di_mode == 0 but without IGET_CREATE set.
456          */
457         if (VNHEAD(vp))
458                 bhv_vop_inactive(vp, NULL);
459
460         VN_LOCK(vp);
461         vp->v_flag &= ~VMODIFIED;
462         VN_UNLOCK(vp, 0);
463
464         if (VNHEAD(vp))
465                 if (bhv_vop_reclaim(vp))
466                         panic("%s: cannot reclaim 0x%p\n", __FUNCTION__, vp);
467
468         ASSERT(VNHEAD(vp) == NULL);
469
470 #ifdef XFS_VNODE_TRACE
471         ktrace_free(vp->v_trace);
472 #endif
473 }
474
475 /*
476  * Enqueue a work item to be picked up by the vfs xfssyncd thread.
477  * Doing this has two advantages:
478  * - It saves on stack space, which is tight in certain situations
479  * - It can be used (with care) as a mechanism to avoid deadlocks.
480  * Flushing while allocating in a full filesystem requires both.
481  */
482 STATIC void
483 xfs_syncd_queue_work(
484         struct bhv_vfs  *vfs,
485         void            *data,
486         void            (*syncer)(bhv_vfs_t *, void *))
487 {
488         struct bhv_vfs_sync_work *work;
489
490         work = kmem_alloc(sizeof(struct bhv_vfs_sync_work), KM_SLEEP);
491         INIT_LIST_HEAD(&work->w_list);
492         work->w_syncer = syncer;
493         work->w_data = data;
494         work->w_vfs = vfs;
495         spin_lock(&vfs->vfs_sync_lock);
496         list_add_tail(&work->w_list, &vfs->vfs_sync_list);
497         spin_unlock(&vfs->vfs_sync_lock);
498         wake_up_process(vfs->vfs_sync_task);
499 }
500
501 /*
502  * Flush delayed allocate data, attempting to free up reserved space
503  * from existing allocations.  At this point a new allocation attempt
504  * has failed with ENOSPC and we are in the process of scratching our
505  * heads, looking about for more room...
506  */
507 STATIC void
508 xfs_flush_inode_work(
509         bhv_vfs_t       *vfs,
510         void            *inode)
511 {
512         filemap_flush(((struct inode *)inode)->i_mapping);
513         iput((struct inode *)inode);
514 }
515
516 void
517 xfs_flush_inode(
518         xfs_inode_t     *ip)
519 {
520         struct inode    *inode = vn_to_inode(XFS_ITOV(ip));
521         struct bhv_vfs  *vfs = XFS_MTOVFS(ip->i_mount);
522
523         igrab(inode);
524         xfs_syncd_queue_work(vfs, inode, xfs_flush_inode_work);
525         delay(msecs_to_jiffies(500));
526 }
527
528 /*
529  * This is the "bigger hammer" version of xfs_flush_inode_work...
530  * (IOW, "If at first you don't succeed, use a Bigger Hammer").
531  */
532 STATIC void
533 xfs_flush_device_work(
534         bhv_vfs_t       *vfs,
535         void            *inode)
536 {
537         sync_blockdev(vfs->vfs_super->s_bdev);
538         iput((struct inode *)inode);
539 }
540
541 void
542 xfs_flush_device(
543         xfs_inode_t     *ip)
544 {
545         struct inode    *inode = vn_to_inode(XFS_ITOV(ip));
546         struct bhv_vfs  *vfs = XFS_MTOVFS(ip->i_mount);
547
548         igrab(inode);
549         xfs_syncd_queue_work(vfs, inode, xfs_flush_device_work);
550         delay(msecs_to_jiffies(500));
551         xfs_log_force(ip->i_mount, (xfs_lsn_t)0, XFS_LOG_FORCE|XFS_LOG_SYNC);
552 }
553
554 STATIC void
555 vfs_sync_worker(
556         bhv_vfs_t       *vfsp,
557         void            *unused)
558 {
559         int             error;
560
561         if (!(vfsp->vfs_flag & VFS_RDONLY))
562                 error = bhv_vfs_sync(vfsp, SYNC_FSDATA | SYNC_BDFLUSH | \
563                                         SYNC_ATTR | SYNC_REFCACHE, NULL);
564         vfsp->vfs_sync_seq++;
565         wmb();
566         wake_up(&vfsp->vfs_wait_single_sync_task);
567 }
568
569 STATIC int
570 xfssyncd(
571         void                    *arg)
572 {
573         long                    timeleft;
574         bhv_vfs_t               *vfsp = (bhv_vfs_t *) arg;
575         bhv_vfs_sync_work_t     *work, *n;
576         LIST_HEAD               (tmp);
577
578         timeleft = xfs_syncd_centisecs * msecs_to_jiffies(10);
579         for (;;) {
580                 timeleft = schedule_timeout_interruptible(timeleft);
581                 /* swsusp */
582                 try_to_freeze();
583                 if (kthread_should_stop() && list_empty(&vfsp->vfs_sync_list))
584                         break;
585
586                 spin_lock(&vfsp->vfs_sync_lock);
587                 /*
588                  * We can get woken by laptop mode, to do a sync -
589                  * that's the (only!) case where the list would be
590                  * empty with time remaining.
591                  */
592                 if (!timeleft || list_empty(&vfsp->vfs_sync_list)) {
593                         if (!timeleft)
594                                 timeleft = xfs_syncd_centisecs *
595                                                         msecs_to_jiffies(10);
596                         INIT_LIST_HEAD(&vfsp->vfs_sync_work.w_list);
597                         list_add_tail(&vfsp->vfs_sync_work.w_list,
598                                         &vfsp->vfs_sync_list);
599                 }
600                 list_for_each_entry_safe(work, n, &vfsp->vfs_sync_list, w_list)
601                         list_move(&work->w_list, &tmp);
602                 spin_unlock(&vfsp->vfs_sync_lock);
603
604                 list_for_each_entry_safe(work, n, &tmp, w_list) {
605                         (*work->w_syncer)(vfsp, work->w_data);
606                         list_del(&work->w_list);
607                         if (work == &vfsp->vfs_sync_work)
608                                 continue;
609                         kmem_free(work, sizeof(struct bhv_vfs_sync_work));
610                 }
611         }
612
613         return 0;
614 }
615
616 STATIC int
617 xfs_fs_start_syncd(
618         bhv_vfs_t               *vfsp)
619 {
620         vfsp->vfs_sync_work.w_syncer = vfs_sync_worker;
621         vfsp->vfs_sync_work.w_vfs = vfsp;
622         vfsp->vfs_sync_task = kthread_run(xfssyncd, vfsp, "xfssyncd");
623         if (IS_ERR(vfsp->vfs_sync_task))
624                 return -PTR_ERR(vfsp->vfs_sync_task);
625         return 0;
626 }
627
628 STATIC void
629 xfs_fs_stop_syncd(
630         bhv_vfs_t               *vfsp)
631 {
632         kthread_stop(vfsp->vfs_sync_task);
633 }
634
635 STATIC void
636 xfs_fs_put_super(
637         struct super_block      *sb)
638 {
639         bhv_vfs_t               *vfsp = vfs_from_sb(sb);
640         int                     error;
641
642         xfs_fs_stop_syncd(vfsp);
643         bhv_vfs_sync(vfsp, SYNC_ATTR | SYNC_DELWRI, NULL);
644         error = bhv_vfs_unmount(vfsp, 0, NULL);
645         if (error) {
646                 printk("XFS: unmount got error=%d\n", error);
647                 printk("%s: vfs=0x%p left dangling!\n", __FUNCTION__, vfsp);
648         } else {
649                 vfs_deallocate(vfsp);
650         }
651 }
652
653 STATIC void
654 xfs_fs_write_super(
655         struct super_block      *sb)
656 {
657         if (!(sb->s_flags & MS_RDONLY))
658                 bhv_vfs_sync(vfs_from_sb(sb), SYNC_FSDATA, NULL);
659         sb->s_dirt = 0;
660 }
661
662 STATIC int
663 xfs_fs_sync_super(
664         struct super_block      *sb,
665         int                     wait)
666 {
667         bhv_vfs_t               *vfsp = vfs_from_sb(sb);
668         int                     error;
669         int                     flags;
670
671         if (unlikely(sb->s_frozen == SB_FREEZE_WRITE))
672                 flags = SYNC_QUIESCE;
673         else
674                 flags = SYNC_FSDATA | (wait ? SYNC_WAIT : 0);
675
676         error = bhv_vfs_sync(vfsp, flags, NULL);
677         sb->s_dirt = 0;
678
679         if (unlikely(laptop_mode)) {
680                 int     prev_sync_seq = vfsp->vfs_sync_seq;
681
682                 /*
683                  * The disk must be active because we're syncing.
684                  * We schedule xfssyncd now (now that the disk is
685                  * active) instead of later (when it might not be).
686                  */
687                 wake_up_process(vfsp->vfs_sync_task);
688                 /*
689                  * We have to wait for the sync iteration to complete.
690                  * If we don't, the disk activity caused by the sync
691                  * will come after the sync is completed, and that
692                  * triggers another sync from laptop mode.
693                  */
694                 wait_event(vfsp->vfs_wait_single_sync_task,
695                                 vfsp->vfs_sync_seq != prev_sync_seq);
696         }
697
698         return -error;
699 }
700
701 STATIC int
702 xfs_fs_statfs(
703         struct dentry           *dentry,
704         struct kstatfs          *statp)
705 {
706         return -bhv_vfs_statvfs(vfs_from_sb(dentry->d_sb), statp,
707                                 vn_from_inode(dentry->d_inode));
708 }
709
710 STATIC int
711 xfs_fs_remount(
712         struct super_block      *sb,
713         int                     *flags,
714         char                    *options)
715 {
716         bhv_vfs_t               *vfsp = vfs_from_sb(sb);
717         struct xfs_mount_args   *args = xfs_args_allocate(sb, 0);
718         int                     error;
719
720         error = bhv_vfs_parseargs(vfsp, options, args, 1);
721         if ((args->flags2 & XFSMNT2_TAGGED) &&
722                 !(sb->s_flags & MS_TAGGED)) {
723                 printk("XFS: %s: tagging not permitted on remount.\n",
724                         sb->s_id);
725                 error = EINVAL;
726         }
727         if (!error)
728                 error = bhv_vfs_mntupdate(vfsp, flags, args);
729         kmem_free(args, sizeof(*args));
730         return -error;
731 }
732
733 STATIC void
734 xfs_fs_lockfs(
735         struct super_block      *sb)
736 {
737         bhv_vfs_freeze(vfs_from_sb(sb));
738 }
739
740 STATIC int
741 xfs_fs_show_options(
742         struct seq_file         *m,
743         struct vfsmount         *mnt)
744 {
745         return -bhv_vfs_showargs(vfs_from_sb(mnt->mnt_sb), m);
746 }
747
748 STATIC int
749 xfs_fs_quotasync(
750         struct super_block      *sb,
751         int                     type)
752 {
753         return -bhv_vfs_quotactl(vfs_from_sb(sb), Q_XQUOTASYNC, 0, NULL);
754 }
755
756 STATIC int
757 xfs_fs_getxstate(
758         struct super_block      *sb,
759         struct fs_quota_stat    *fqs)
760 {
761         return -bhv_vfs_quotactl(vfs_from_sb(sb), Q_XGETQSTAT, 0, (caddr_t)fqs);
762 }
763
764 STATIC int
765 xfs_fs_setxstate(
766         struct super_block      *sb,
767         unsigned int            flags,
768         int                     op)
769 {
770         return -bhv_vfs_quotactl(vfs_from_sb(sb), op, 0, (caddr_t)&flags);
771 }
772
773 STATIC int
774 xfs_fs_getxquota(
775         struct super_block      *sb,
776         int                     type,
777         qid_t                   id,
778         struct fs_disk_quota    *fdq)
779 {
780         return -bhv_vfs_quotactl(vfs_from_sb(sb),
781                                  (type == USRQUOTA) ? Q_XGETQUOTA :
782                                   ((type == GRPQUOTA) ? Q_XGETGQUOTA :
783                                    Q_XGETPQUOTA), id, (caddr_t)fdq);
784 }
785
786 STATIC int
787 xfs_fs_setxquota(
788         struct super_block      *sb,
789         int                     type,
790         qid_t                   id,
791         struct fs_disk_quota    *fdq)
792 {
793         return -bhv_vfs_quotactl(vfs_from_sb(sb),
794                                  (type == USRQUOTA) ? Q_XSETQLIM :
795                                   ((type == GRPQUOTA) ? Q_XSETGQLIM :
796                                    Q_XSETPQLIM), id, (caddr_t)fdq);
797 }
798
799 STATIC int
800 xfs_fs_fill_super(
801         struct super_block      *sb,
802         void                    *data,
803         int                     silent)
804 {
805         struct bhv_vnode        *rootvp;
806         struct bhv_vfs          *vfsp = vfs_allocate(sb);
807         struct xfs_mount_args   *args = xfs_args_allocate(sb, silent);
808         struct kstatfs          statvfs;
809         int                     error;
810
811         bhv_insert_all_vfsops(vfsp);
812
813         error = bhv_vfs_parseargs(vfsp, (char *)data, args, 0);
814         if (error) {
815                 bhv_remove_all_vfsops(vfsp, 1);
816                 goto fail_vfsop;
817         }
818
819         sb_min_blocksize(sb, BBSIZE);
820         sb->s_export_op = &xfs_export_operations;
821         sb->s_qcop = &xfs_quotactl_operations;
822         sb->s_op = &xfs_super_operations;
823
824         error = bhv_vfs_mount(vfsp, args, NULL);
825         if (error) {
826                 bhv_remove_all_vfsops(vfsp, 1);
827                 goto fail_vfsop;
828         }
829
830         error = bhv_vfs_statvfs(vfsp, &statvfs, NULL);
831         if (error)
832                 goto fail_unmount;
833
834         sb->s_dirt = 1;
835         sb->s_magic = statvfs.f_type;
836         sb->s_blocksize = statvfs.f_bsize;
837         sb->s_blocksize_bits = ffs(statvfs.f_bsize) - 1;
838         sb->s_maxbytes = xfs_max_file_offset(sb->s_blocksize_bits);
839         sb->s_time_gran = 1;
840         set_posix_acl_flag(sb);
841
842         error = bhv_vfs_root(vfsp, &rootvp);
843         if (error)
844                 goto fail_unmount;
845
846         sb->s_root = d_alloc_root(vn_to_inode(rootvp));
847         if (!sb->s_root) {
848                 error = ENOMEM;
849                 goto fail_vnrele;
850         }
851         if (is_bad_inode(sb->s_root->d_inode)) {
852                 error = EINVAL;
853                 goto fail_vnrele;
854         }
855         if ((error = xfs_fs_start_syncd(vfsp)))
856                 goto fail_vnrele;
857         vn_trace_exit(rootvp, __FUNCTION__, (inst_t *)__return_address);
858
859         kmem_free(args, sizeof(*args));
860         return 0;
861
862 fail_vnrele:
863         if (sb->s_root) {
864                 dput(sb->s_root);
865                 sb->s_root = NULL;
866         } else {
867                 VN_RELE(rootvp);
868         }
869
870 fail_unmount:
871         bhv_vfs_unmount(vfsp, 0, NULL);
872
873 fail_vfsop:
874         vfs_deallocate(vfsp);
875         kmem_free(args, sizeof(*args));
876         return -error;
877 }
878
879 STATIC int
880 xfs_fs_get_sb(
881         struct file_system_type *fs_type,
882         int                     flags,
883         const char              *dev_name,
884         void                    *data,
885         struct vfsmount         *mnt)
886 {
887         return get_sb_bdev(fs_type, flags, dev_name, data, xfs_fs_fill_super,
888                            mnt);
889 }
890
891 STATIC struct super_operations xfs_super_operations = {
892         .alloc_inode            = xfs_fs_alloc_inode,
893         .destroy_inode          = xfs_fs_destroy_inode,
894         .write_inode            = xfs_fs_write_inode,
895         .clear_inode            = xfs_fs_clear_inode,
896         .put_super              = xfs_fs_put_super,
897         .write_super            = xfs_fs_write_super,
898         .sync_fs                = xfs_fs_sync_super,
899         .write_super_lockfs     = xfs_fs_lockfs,
900         .statfs                 = xfs_fs_statfs,
901         .remount_fs             = xfs_fs_remount,
902         .show_options           = xfs_fs_show_options,
903 };
904
905 STATIC struct quotactl_ops xfs_quotactl_operations = {
906         .quota_sync             = xfs_fs_quotasync,
907         .get_xstate             = xfs_fs_getxstate,
908         .set_xstate             = xfs_fs_setxstate,
909         .get_xquota             = xfs_fs_getxquota,
910         .set_xquota             = xfs_fs_setxquota,
911 };
912
913 STATIC struct file_system_type xfs_fs_type = {
914         .owner                  = THIS_MODULE,
915         .name                   = "xfs",
916         .get_sb                 = xfs_fs_get_sb,
917         .kill_sb                = kill_block_super,
918         .fs_flags               = FS_REQUIRES_DEV,
919 };
920
921
922 STATIC int __init
923 init_xfs_fs( void )
924 {
925         int                     error;
926         struct sysinfo          si;
927         static char             message[] __initdata = KERN_INFO \
928                 XFS_VERSION_STRING " with " XFS_BUILD_OPTIONS " enabled\n";
929
930         printk(message);
931
932         si_meminfo(&si);
933         xfs_physmem = si.totalram;
934
935         ktrace_init(64);
936
937         error = xfs_init_zones();
938         if (error < 0)
939                 goto undo_zones;
940
941         error = xfs_buf_init();
942         if (error < 0)
943                 goto undo_buffers;
944
945         vn_init();
946         xfs_init();
947         uuid_init();
948         vfs_initquota();
949
950         error = register_filesystem(&xfs_fs_type);
951         if (error)
952                 goto undo_register;
953         return 0;
954
955 undo_register:
956         xfs_buf_terminate();
957
958 undo_buffers:
959         xfs_destroy_zones();
960
961 undo_zones:
962         return error;
963 }
964
965 STATIC void __exit
966 exit_xfs_fs( void )
967 {
968         vfs_exitquota();
969         unregister_filesystem(&xfs_fs_type);
970         xfs_cleanup();
971         xfs_buf_terminate();
972         xfs_destroy_zones();
973         ktrace_uninit();
974 }
975
976 module_init(init_xfs_fs);
977 module_exit(exit_xfs_fs);
978
979 MODULE_AUTHOR("Silicon Graphics, Inc.");
980 MODULE_DESCRIPTION(XFS_VERSION_STRING " with " XFS_BUILD_OPTIONS " enabled");
981 MODULE_LICENSE("GPL");