2 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
23 #include "xfs_trans.h"
28 #include "xfs_alloc.h"
29 #include "xfs_dmapi.h"
30 #include "xfs_quota.h"
31 #include "xfs_mount.h"
32 #include "xfs_bmap_btree.h"
33 #include "xfs_alloc_btree.h"
34 #include "xfs_ialloc_btree.h"
35 #include "xfs_dir_sf.h"
36 #include "xfs_dir2_sf.h"
37 #include "xfs_attr_sf.h"
38 #include "xfs_dinode.h"
39 #include "xfs_inode.h"
40 #include "xfs_btree.h"
41 #include "xfs_ialloc.h"
43 #include "xfs_rtalloc.h"
44 #include "xfs_error.h"
45 #include "xfs_itable.h"
51 #include "xfs_buf_item.h"
52 #include "xfs_utils.h"
53 #include "xfs_version.h"
55 #include <linux/namei.h>
56 #include <linux/init.h>
57 #include <linux/mount.h>
58 #include <linux/mempool.h>
59 #include <linux/writeback.h>
60 #include <linux/kthread.h>
62 STATIC struct quotactl_ops linvfs_qops;
63 STATIC struct super_operations linvfs_sops;
64 STATIC kmem_zone_t *xfs_vnode_zone;
65 STATIC kmem_zone_t *xfs_ioend_zone;
66 mempool_t *xfs_ioend_pool;
68 STATIC struct xfs_mount_args *
70 struct super_block *sb)
72 struct xfs_mount_args *args;
74 args = kmem_zalloc(sizeof(struct xfs_mount_args), KM_SLEEP);
75 args->logbufs = args->logbufsize = -1;
76 strncpy(args->fsname, sb->s_id, MAXNAMELEN);
78 /* Copy the already-parsed mount(2) flags we're interested in */
79 if (sb->s_flags & MS_NOATIME)
80 args->flags |= XFSMNT_NOATIME;
81 if (sb->s_flags & MS_DIRSYNC)
82 args->flags |= XFSMNT_DIRSYNC;
83 if (sb->s_flags & MS_SYNCHRONOUS)
84 args->flags |= XFSMNT_WSYNC;
86 /* Default to 32 bit inodes on Linux all the time */
87 args->flags |= XFSMNT_32BITINODES;
94 unsigned int blockshift)
96 unsigned int pagefactor = 1;
97 unsigned int bitshift = BITS_PER_LONG - 1;
99 /* Figure out maximum filesize, on Linux this can depend on
100 * the filesystem blocksize (on 32 bit platforms).
101 * __block_prepare_write does this in an [unsigned] long...
102 * page->index << (PAGE_CACHE_SHIFT - bbits)
103 * So, for page sized blocks (4K on 32 bit platforms),
104 * this wraps at around 8Tb (hence MAX_LFS_FILESIZE which is
105 * (((u64)PAGE_CACHE_SIZE << (BITS_PER_LONG-1))-1)
106 * but for smaller blocksizes it is less (bbits = log2 bsize).
107 * Note1: get_block_t takes a long (implicit cast from above)
108 * Note2: The Large Block Device (LBD and HAVE_SECTOR_T) patch
109 * can optionally convert the [unsigned] long from above into
110 * an [unsigned] long long.
113 #if BITS_PER_LONG == 32
114 # if defined(CONFIG_LBD)
115 ASSERT(sizeof(sector_t) == 8);
116 pagefactor = PAGE_CACHE_SIZE;
117 bitshift = BITS_PER_LONG;
119 pagefactor = PAGE_CACHE_SIZE >> (PAGE_CACHE_SHIFT - blockshift);
123 return (((__uint64_t)pagefactor) << bitshift) - 1;
126 STATIC __inline__ void
130 switch (inode->i_mode & S_IFMT) {
132 inode->i_op = &linvfs_file_inode_operations;
133 inode->i_fop = &linvfs_file_operations;
134 inode->i_mapping->a_ops = &linvfs_aops;
137 inode->i_op = &linvfs_dir_inode_operations;
138 inode->i_fop = &linvfs_dir_operations;
141 inode->i_op = &linvfs_symlink_inode_operations;
143 inode->i_mapping->a_ops = &linvfs_aops;
146 inode->i_op = &linvfs_file_inode_operations;
147 init_special_inode(inode, inode->i_mode, inode->i_rdev);
152 STATIC __inline__ void
153 xfs_revalidate_inode(
158 struct inode *inode = LINVFS_GET_IP(vp);
160 inode->i_mode = ip->i_d.di_mode;
161 inode->i_nlink = ip->i_d.di_nlink;
162 inode->i_uid = ip->i_d.di_uid;
163 inode->i_gid = ip->i_d.di_gid;
164 inode->i_xid = ip->i_d.di_xid;
166 switch (inode->i_mode & S_IFMT) {
170 MKDEV(sysv_major(ip->i_df.if_u2.if_rdev) & 0x1ff,
171 sysv_minor(ip->i_df.if_u2.if_rdev));
178 inode->i_blksize = xfs_preferred_iosize(mp);
179 inode->i_generation = ip->i_d.di_gen;
180 i_size_write(inode, ip->i_d.di_size);
182 XFS_FSB_TO_BB(mp, ip->i_d.di_nblocks + ip->i_delayed_blks);
183 inode->i_atime.tv_sec = ip->i_d.di_atime.t_sec;
184 inode->i_atime.tv_nsec = ip->i_d.di_atime.t_nsec;
185 inode->i_mtime.tv_sec = ip->i_d.di_mtime.t_sec;
186 inode->i_mtime.tv_nsec = ip->i_d.di_mtime.t_nsec;
187 inode->i_ctime.tv_sec = ip->i_d.di_ctime.t_sec;
188 inode->i_ctime.tv_nsec = ip->i_d.di_ctime.t_nsec;
189 if (ip->i_d.di_flags & XFS_DIFLAG_IMMUTABLE)
190 inode->i_flags |= S_IMMUTABLE;
192 inode->i_flags &= ~S_IMMUTABLE;
193 if (ip->i_d.di_flags & XFS_DIFLAG_IUNLINK)
194 inode->i_flags |= S_IUNLINK;
196 inode->i_flags &= ~S_IUNLINK;
197 if (ip->i_d.di_flags & XFS_DIFLAG_BARRIER)
198 inode->i_flags |= S_BARRIER;
200 inode->i_flags &= ~S_BARRIER;
201 if (ip->i_d.di_flags & XFS_DIFLAG_APPEND)
202 inode->i_flags |= S_APPEND;
204 inode->i_flags &= ~S_APPEND;
205 if (ip->i_d.di_flags & XFS_DIFLAG_SYNC)
206 inode->i_flags |= S_SYNC;
208 inode->i_flags &= ~S_SYNC;
209 if (ip->i_d.di_flags & XFS_DIFLAG_NOATIME)
210 inode->i_flags |= S_NOATIME;
212 inode->i_flags &= ~S_NOATIME;
213 vp->v_flag &= ~VMODIFIED;
217 xfs_initialize_vnode(
220 bhv_desc_t *inode_bhv,
223 xfs_inode_t *ip = XFS_BHVTOI(inode_bhv);
224 struct inode *inode = LINVFS_GET_IP(vp);
226 if (!inode_bhv->bd_vobj) {
227 vp->v_vfsp = bhvtovfs(bdp);
228 bhv_desc_init(inode_bhv, ip, vp, &xfs_vnodeops);
229 bhv_insert(VN_BHV_HEAD(vp), inode_bhv);
233 * We need to set the ops vectors, and unlock the inode, but if
234 * we have been called during the new inode create process, it is
235 * too early to fill in the Linux inode. We will get called a
236 * second time once the inode is properly set up, and then we can
239 if (ip->i_d.di_mode != 0 && unlock && (inode->i_state & I_NEW)) {
240 xfs_revalidate_inode(XFS_BHVTOM(bdp), vp, ip);
241 xfs_set_inodeops(inode);
243 ip->i_flags &= ~XFS_INEW;
246 unlock_new_inode(inode);
254 struct block_device **bdevp)
258 *bdevp = open_bdev_excl(name, 0, mp);
259 if (IS_ERR(*bdevp)) {
260 error = PTR_ERR(*bdevp);
261 printk("XFS: Invalid device [%s], error=%d\n", name, error);
269 struct block_device *bdev)
272 close_bdev_excl(bdev);
276 * Try to write out the superblock using barriers.
282 xfs_buf_t *sbp = xfs_getsb(mp, 0);
287 XFS_BUF_UNDELAYWRITE(sbp);
289 XFS_BUF_UNASYNC(sbp);
290 XFS_BUF_ORDERED(sbp);
293 error = xfs_iowait(sbp);
296 * Clear all the flags we set and possible error state in the
297 * buffer. We only did the write to try out whether barriers
298 * worked and shouldn't leave any traces in the superblock
302 XFS_BUF_ERROR(sbp, 0);
303 XFS_BUF_UNORDERED(sbp);
310 xfs_mountfs_check_barriers(xfs_mount_t *mp)
314 if (mp->m_logdev_targp != mp->m_ddev_targp) {
315 xfs_fs_cmn_err(CE_NOTE, mp,
316 "Disabling barriers, not supported with external log device");
317 mp->m_flags &= ~XFS_MOUNT_BARRIER;
321 if (mp->m_ddev_targp->bt_bdev->bd_disk->queue->ordered ==
322 QUEUE_ORDERED_NONE) {
323 xfs_fs_cmn_err(CE_NOTE, mp,
324 "Disabling barriers, not supported by the underlying device");
325 mp->m_flags &= ~XFS_MOUNT_BARRIER;
329 error = xfs_barrier_test(mp);
331 xfs_fs_cmn_err(CE_NOTE, mp,
332 "Disabling barriers, trial barrier write failed");
333 mp->m_flags &= ~XFS_MOUNT_BARRIER;
339 xfs_blkdev_issue_flush(
340 xfs_buftarg_t *buftarg)
342 blkdev_issue_flush(buftarg->bt_bdev, NULL);
345 STATIC struct inode *
347 struct super_block *sb)
351 vp = kmem_cache_alloc(xfs_vnode_zone, kmem_flags_convert(KM_SLEEP));
354 return LINVFS_GET_IP(vp);
358 linvfs_destroy_inode(
361 kmem_zone_free(xfs_vnode_zone, LINVFS_GET_VP(inode));
365 linvfs_inode_init_once(
367 kmem_cache_t *cachep,
370 vnode_t *vp = (vnode_t *)data;
372 if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) ==
373 SLAB_CTOR_CONSTRUCTOR)
374 inode_init_once(LINVFS_GET_IP(vp));
378 linvfs_init_zones(void)
380 xfs_vnode_zone = kmem_cache_create("xfs_vnode",
381 sizeof(vnode_t), 0, SLAB_RECLAIM_ACCOUNT,
382 linvfs_inode_init_once, NULL);
386 xfs_ioend_zone = kmem_zone_init(sizeof(xfs_ioend_t), "xfs_ioend");
388 goto out_destroy_vnode_zone;
390 xfs_ioend_pool = mempool_create(4 * MAX_BUF_PER_PAGE,
391 mempool_alloc_slab, mempool_free_slab,
394 goto out_free_ioend_zone;
400 kmem_zone_destroy(xfs_ioend_zone);
401 out_destroy_vnode_zone:
402 kmem_zone_destroy(xfs_vnode_zone);
408 linvfs_destroy_zones(void)
410 mempool_destroy(xfs_ioend_pool);
411 kmem_zone_destroy(xfs_vnode_zone);
412 kmem_zone_destroy(xfs_ioend_zone);
416 * Attempt to flush the inode, this will actually fail
417 * if the inode is pinned, but we dirty the inode again
418 * at the point when it is unpinned after a log write,
419 * since this is when the inode itself becomes flushable.
426 vnode_t *vp = LINVFS_GET_VP(inode);
427 int error = 0, flags = FLUSH_INODE;
430 vn_trace_entry(vp, __FUNCTION__, (inst_t *)__return_address);
433 VOP_IFLUSH(vp, flags, error);
434 if (error == EAGAIN) {
436 VOP_IFLUSH(vp, flags | FLUSH_LOG, error);
449 vnode_t *vp = LINVFS_GET_VP(inode);
452 vn_trace_entry(vp, "clear_inode", (inst_t *)__return_address);
454 XFS_STATS_INC(vn_rele);
455 XFS_STATS_INC(vn_remove);
456 XFS_STATS_INC(vn_reclaim);
457 XFS_STATS_DEC(vn_active);
460 * This can happen because xfs_iget_core calls xfs_idestroy if we
461 * find an inode with di_mode == 0 but without IGET_CREATE set.
464 VOP_INACTIVE(vp, NULL, cache);
467 vp->v_flag &= ~VMODIFIED;
471 VOP_RECLAIM(vp, error);
473 panic("vn_purge: cannot reclaim");
476 ASSERT(vp->v_fbhv == NULL);
478 #ifdef XFS_VNODE_TRACE
479 ktrace_free(vp->v_trace);
484 * Enqueue a work item to be picked up by the vfs xfssyncd thread.
485 * Doing this has two advantages:
486 * - It saves on stack space, which is tight in certain situations
487 * - It can be used (with care) as a mechanism to avoid deadlocks.
488 * Flushing while allocating in a full filesystem requires both.
491 xfs_syncd_queue_work(
494 void (*syncer)(vfs_t *, void *))
496 vfs_sync_work_t *work;
498 work = kmem_alloc(sizeof(struct vfs_sync_work), KM_SLEEP);
499 INIT_LIST_HEAD(&work->w_list);
500 work->w_syncer = syncer;
503 spin_lock(&vfs->vfs_sync_lock);
504 list_add_tail(&work->w_list, &vfs->vfs_sync_list);
505 spin_unlock(&vfs->vfs_sync_lock);
506 wake_up_process(vfs->vfs_sync_task);
510 * Flush delayed allocate data, attempting to free up reserved space
511 * from existing allocations. At this point a new allocation attempt
512 * has failed with ENOSPC and we are in the process of scratching our
513 * heads, looking about for more room...
516 xfs_flush_inode_work(
520 filemap_flush(((struct inode *)inode)->i_mapping);
521 iput((struct inode *)inode);
528 struct inode *inode = LINVFS_GET_IP(XFS_ITOV(ip));
529 struct vfs *vfs = XFS_MTOVFS(ip->i_mount);
532 xfs_syncd_queue_work(vfs, inode, xfs_flush_inode_work);
533 delay(msecs_to_jiffies(500));
537 * This is the "bigger hammer" version of xfs_flush_inode_work...
538 * (IOW, "If at first you don't succeed, use a Bigger Hammer").
541 xfs_flush_device_work(
545 sync_blockdev(vfs->vfs_super->s_bdev);
546 iput((struct inode *)inode);
553 struct inode *inode = LINVFS_GET_IP(XFS_ITOV(ip));
554 struct vfs *vfs = XFS_MTOVFS(ip->i_mount);
557 xfs_syncd_queue_work(vfs, inode, xfs_flush_device_work);
558 delay(msecs_to_jiffies(500));
559 xfs_log_force(ip->i_mount, (xfs_lsn_t)0, XFS_LOG_FORCE|XFS_LOG_SYNC);
562 #define SYNCD_FLAGS (SYNC_FSDATA|SYNC_BDFLUSH|SYNC_ATTR)
570 if (!(vfsp->vfs_flag & VFS_RDONLY))
571 VFS_SYNC(vfsp, SYNCD_FLAGS, NULL, error);
572 vfsp->vfs_sync_seq++;
574 wake_up(&vfsp->vfs_wait_single_sync_task);
582 vfs_t *vfsp = (vfs_t *) arg;
583 struct vfs_sync_work *work, *n;
586 timeleft = xfs_syncd_centisecs * msecs_to_jiffies(10);
588 timeleft = schedule_timeout_interruptible(timeleft);
591 if (kthread_should_stop() && list_empty(&vfsp->vfs_sync_list))
594 spin_lock(&vfsp->vfs_sync_lock);
596 * We can get woken by laptop mode, to do a sync -
597 * that's the (only!) case where the list would be
598 * empty with time remaining.
600 if (!timeleft || list_empty(&vfsp->vfs_sync_list)) {
602 timeleft = xfs_syncd_centisecs *
603 msecs_to_jiffies(10);
604 INIT_LIST_HEAD(&vfsp->vfs_sync_work.w_list);
605 list_add_tail(&vfsp->vfs_sync_work.w_list,
606 &vfsp->vfs_sync_list);
608 list_for_each_entry_safe(work, n, &vfsp->vfs_sync_list, w_list)
609 list_move(&work->w_list, &tmp);
610 spin_unlock(&vfsp->vfs_sync_lock);
612 list_for_each_entry_safe(work, n, &tmp, w_list) {
613 (*work->w_syncer)(vfsp, work->w_data);
614 list_del(&work->w_list);
615 if (work == &vfsp->vfs_sync_work)
617 kmem_free(work, sizeof(struct vfs_sync_work));
628 vfsp->vfs_sync_work.w_syncer = vfs_sync_worker;
629 vfsp->vfs_sync_work.w_vfs = vfsp;
630 vfsp->vfs_sync_task = kthread_run(xfssyncd, vfsp, "xfssyncd");
631 if (IS_ERR(vfsp->vfs_sync_task))
632 return -PTR_ERR(vfsp->vfs_sync_task);
640 kthread_stop(vfsp->vfs_sync_task);
645 struct super_block *sb)
647 vfs_t *vfsp = LINVFS_GET_VFS(sb);
650 linvfs_stop_syncd(vfsp);
651 VFS_SYNC(vfsp, SYNC_ATTR|SYNC_DELWRI, NULL, error);
653 VFS_UNMOUNT(vfsp, 0, NULL, error);
655 printk("XFS unmount got error %d\n", error);
656 printk("%s: vfsp/0x%p left dangling!\n", __FUNCTION__, vfsp);
660 vfs_deallocate(vfsp);
665 struct super_block *sb)
667 vfs_t *vfsp = LINVFS_GET_VFS(sb);
670 if (sb->s_flags & MS_RDONLY) {
671 sb->s_dirt = 0; /* paranoia */
674 /* Push the log and superblock a little */
675 VFS_SYNC(vfsp, SYNC_FSDATA, NULL, error);
681 struct super_block *sb,
684 vfs_t *vfsp = LINVFS_GET_VFS(sb);
686 int flags = SYNC_FSDATA;
688 if (unlikely(sb->s_frozen == SB_FREEZE_WRITE))
689 flags = SYNC_QUIESCE;
691 flags = SYNC_FSDATA | (wait ? SYNC_WAIT : 0);
693 VFS_SYNC(vfsp, flags, NULL, error);
696 if (unlikely(laptop_mode)) {
697 int prev_sync_seq = vfsp->vfs_sync_seq;
700 * The disk must be active because we're syncing.
701 * We schedule xfssyncd now (now that the disk is
702 * active) instead of later (when it might not be).
704 wake_up_process(vfsp->vfs_sync_task);
706 * We have to wait for the sync iteration to complete.
707 * If we don't, the disk activity caused by the sync
708 * will come after the sync is completed, and that
709 * triggers another sync from laptop mode.
711 wait_event(vfsp->vfs_wait_single_sync_task,
712 vfsp->vfs_sync_seq != prev_sync_seq);
720 struct super_block *sb,
721 struct kstatfs *statp)
723 vfs_t *vfsp = LINVFS_GET_VFS(sb);
726 VFS_STATVFS(vfsp, statp, NULL, error);
732 struct super_block *sb,
736 vfs_t *vfsp = LINVFS_GET_VFS(sb);
737 struct xfs_mount_args *args = xfs_args_allocate(sb);
740 VFS_PARSEARGS(vfsp, options, args, 1, error);
741 if ((args->flags2 & XFSMNT2_TAGXID) &&
742 !(sb->s_flags & MS_TAGXID)) {
743 printk("XFS: %s: tagxid not permitted on remount.\n",
748 VFS_MNTUPDATE(vfsp, flags, args, error);
749 kmem_free(args, sizeof(*args));
755 struct super_block *sb)
757 VFS_FREEZE(LINVFS_GET_VFS(sb));
763 struct vfsmount *mnt)
765 struct vfs *vfsp = LINVFS_GET_VFS(mnt->mnt_sb);
768 VFS_SHOWARGS(vfsp, m, error);
774 struct super_block *sb,
777 struct vfs *vfsp = LINVFS_GET_VFS(sb);
780 VFS_QUOTACTL(vfsp, Q_XQUOTASYNC, 0, (caddr_t)NULL, error);
786 struct super_block *sb,
787 struct fs_quota_stat *fqs)
789 struct vfs *vfsp = LINVFS_GET_VFS(sb);
792 VFS_QUOTACTL(vfsp, Q_XGETQSTAT, 0, (caddr_t)fqs, error);
798 struct super_block *sb,
802 struct vfs *vfsp = LINVFS_GET_VFS(sb);
805 VFS_QUOTACTL(vfsp, op, 0, (caddr_t)&flags, error);
811 struct super_block *sb,
814 struct fs_disk_quota *fdq)
816 struct vfs *vfsp = LINVFS_GET_VFS(sb);
819 getmode = (type == USRQUOTA) ? Q_XGETQUOTA :
820 ((type == GRPQUOTA) ? Q_XGETGQUOTA : Q_XGETPQUOTA);
821 VFS_QUOTACTL(vfsp, getmode, id, (caddr_t)fdq, error);
827 struct super_block *sb,
830 struct fs_disk_quota *fdq)
832 struct vfs *vfsp = LINVFS_GET_VFS(sb);
835 setmode = (type == USRQUOTA) ? Q_XSETQLIM :
836 ((type == GRPQUOTA) ? Q_XSETGQLIM : Q_XSETPQLIM);
837 VFS_QUOTACTL(vfsp, setmode, id, (caddr_t)fdq, error);
843 struct super_block *sb,
848 struct vfs *vfsp = vfs_allocate();
849 struct xfs_mount_args *args = xfs_args_allocate(sb);
850 struct kstatfs statvfs;
853 vfsp->vfs_super = sb;
854 LINVFS_SET_VFS(sb, vfsp);
855 if (sb->s_flags & MS_RDONLY)
856 vfsp->vfs_flag |= VFS_RDONLY;
857 bhv_insert_all_vfsops(vfsp);
859 VFS_PARSEARGS(vfsp, (char *)data, args, 0, error);
861 bhv_remove_all_vfsops(vfsp, 1);
865 sb_min_blocksize(sb, BBSIZE);
866 #ifdef CONFIG_XFS_EXPORT
867 sb->s_export_op = &linvfs_export_ops;
869 sb->s_qcop = &linvfs_qops;
870 sb->s_op = &linvfs_sops;
872 VFS_MOUNT(vfsp, args, NULL, error);
874 bhv_remove_all_vfsops(vfsp, 1);
878 VFS_STATVFS(vfsp, &statvfs, NULL, error);
883 sb->s_magic = statvfs.f_type;
884 sb->s_blocksize = statvfs.f_bsize;
885 sb->s_blocksize_bits = ffs(statvfs.f_bsize) - 1;
886 sb->s_maxbytes = xfs_max_file_offset(sb->s_blocksize_bits);
888 set_posix_acl_flag(sb);
890 VFS_ROOT(vfsp, &rootvp, error);
894 sb->s_root = d_alloc_root(LINVFS_GET_IP(rootvp));
899 if (is_bad_inode(sb->s_root->d_inode)) {
903 if ((error = linvfs_start_syncd(vfsp)))
905 vn_trace_exit(rootvp, __FUNCTION__, (inst_t *)__return_address);
907 kmem_free(args, sizeof(*args));
919 VFS_UNMOUNT(vfsp, 0, NULL, error2);
922 vfs_deallocate(vfsp);
923 kmem_free(args, sizeof(*args));
927 STATIC struct super_block *
929 struct file_system_type *fs_type,
931 const char *dev_name,
934 return get_sb_bdev(fs_type, flags, dev_name, data, linvfs_fill_super);
937 STATIC struct super_operations linvfs_sops = {
938 .alloc_inode = linvfs_alloc_inode,
939 .destroy_inode = linvfs_destroy_inode,
940 .write_inode = linvfs_write_inode,
941 .clear_inode = linvfs_clear_inode,
942 .put_super = linvfs_put_super,
943 .write_super = linvfs_write_super,
944 .sync_fs = linvfs_sync_super,
945 .write_super_lockfs = linvfs_freeze_fs,
946 .statfs = linvfs_statfs,
947 .remount_fs = linvfs_remount,
948 .show_options = linvfs_show_options,
951 STATIC struct quotactl_ops linvfs_qops = {
952 .quota_sync = linvfs_quotasync,
953 .get_xstate = linvfs_getxstate,
954 .set_xstate = linvfs_setxstate,
955 .get_xquota = linvfs_getxquota,
956 .set_xquota = linvfs_setxquota,
959 STATIC struct file_system_type xfs_fs_type = {
960 .owner = THIS_MODULE,
962 .get_sb = linvfs_get_sb,
963 .kill_sb = kill_block_super,
964 .fs_flags = FS_REQUIRES_DEV,
973 static char message[] __initdata = KERN_INFO \
974 XFS_VERSION_STRING " with " XFS_BUILD_OPTIONS " enabled\n";
979 xfs_physmem = si.totalram;
983 error = linvfs_init_zones();
987 error = xfs_buf_init();
996 error = register_filesystem(&xfs_fs_type);
999 XFS_DM_INIT(&xfs_fs_type);
1003 xfs_buf_terminate();
1006 linvfs_destroy_zones();
1016 XFS_DM_EXIT(&xfs_fs_type);
1017 unregister_filesystem(&xfs_fs_type);
1019 xfs_buf_terminate();
1020 linvfs_destroy_zones();
1024 module_init(init_xfs_fs);
1025 module_exit(exit_xfs_fs);
1027 MODULE_AUTHOR("Silicon Graphics, Inc.");
1028 MODULE_DESCRIPTION(XFS_VERSION_STRING " with " XFS_BUILD_OPTIONS " enabled");
1029 MODULE_LICENSE("GPL");