2 * Copyright (c) 2000-2004 Silicon Graphics, Inc. All Rights Reserved.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of version 2 of the GNU General Public License as
6 * published by the Free Software Foundation.
8 * This program is distributed in the hope that it would be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
12 * Further, this software is distributed without any warranty that it is
13 * free of the rightful claim of any third person regarding infringement
14 * or the like. Any license provided herein, whether implied or
15 * otherwise, applies only to this software file. Patent licenses, if
16 * any, provided herein do not apply to combinations of this program with
17 * other software, or any other product whatsoever.
19 * You should have received a copy of the GNU General Public License along
20 * with this program; if not, write the Free Software Foundation, Inc., 59
21 * Temple Place - Suite 330, Boston MA 02111-1307, USA.
23 * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
24 * Mountain View, CA 94043, or:
28 * For further information regarding this notice, see:
30 * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
38 #include "xfs_trans.h"
42 #include "xfs_alloc.h"
43 #include "xfs_dmapi.h"
44 #include "xfs_quota.h"
45 #include "xfs_mount.h"
46 #include "xfs_alloc_btree.h"
47 #include "xfs_bmap_btree.h"
48 #include "xfs_ialloc_btree.h"
49 #include "xfs_btree.h"
50 #include "xfs_ialloc.h"
51 #include "xfs_attr_sf.h"
52 #include "xfs_dir_sf.h"
53 #include "xfs_dir2_sf.h"
54 #include "xfs_dinode.h"
55 #include "xfs_inode.h"
58 #include "xfs_rtalloc.h"
59 #include "xfs_error.h"
60 #include "xfs_itable.h"
66 #include "xfs_buf_item.h"
67 #include "xfs_utils.h"
68 #include "xfs_version.h"
70 #include <linux/init.h>
72 STATIC struct quotactl_ops linvfs_qops;
73 STATIC struct super_operations linvfs_sops;
74 STATIC kmem_zone_t *linvfs_inode_zone;
75 STATIC kmem_shaker_t xfs_inode_shaker;
77 STATIC struct xfs_mount_args *
79 struct super_block *sb)
81 struct xfs_mount_args *args;
83 args = kmem_zalloc(sizeof(struct xfs_mount_args), KM_SLEEP);
84 args->logbufs = args->logbufsize = -1;
85 strncpy(args->fsname, bdevname(sb->s_dev), MAXNAMELEN);
87 /* Copy the already-parsed mount(2) flags we're interested in */
88 if (sb->s_flags & MS_NOATIME)
89 args->flags |= XFSMNT_NOATIME;
91 /* Default to 32 bit inodes on Linux all the time */
92 args->flags |= XFSMNT_32BITINODES;
99 unsigned int blockshift)
101 unsigned int pagefactor = 1;
102 unsigned int bitshift = BITS_PER_LONG - 1;
104 /* Figure out maximum filesize, on Linux this can depend on
105 * the filesystem blocksize (on 32 bit platforms).
106 * __block_prepare_write does this in an [unsigned] long...
107 * page->index << (PAGE_CACHE_SHIFT - bbits)
108 * So, for page sized blocks (4K on 32 bit platforms),
109 * this wraps at around 8Tb (hence MAX_LFS_FILESIZE which is
110 * (((u64)PAGE_CACHE_SIZE << (BITS_PER_LONG-1))-1)
111 * but for smaller blocksizes it is less (bbits = log2 bsize).
112 * Note1: get_block_t takes a long (implicit cast from above)
113 * Note2: The Large Block Device (LBD and HAVE_SECTOR_T) patch
114 * can optionally convert the [unsigned] long from above into
115 * an [unsigned] long long.
118 #if BITS_PER_LONG == 32
119 pagefactor = PAGE_CACHE_SIZE >> (PAGE_CACHE_SHIFT - blockshift);
122 return (((__uint64_t)pagefactor) << bitshift) - 1;
125 STATIC __inline__ void
129 vnode_t *vp = LINVFS_GET_VP(inode);
131 if (vp->v_type == VNON) {
133 } else if (S_ISREG(inode->i_mode)) {
134 inode->i_op = &linvfs_file_inode_operations;
135 inode->i_fop = &linvfs_file_operations;
136 inode->i_mapping->a_ops = &linvfs_aops;
137 } else if (S_ISDIR(inode->i_mode)) {
138 inode->i_op = &linvfs_dir_inode_operations;
139 inode->i_fop = &linvfs_dir_operations;
140 } else if (S_ISLNK(inode->i_mode)) {
141 inode->i_op = &linvfs_symlink_inode_operations;
143 inode->i_mapping->a_ops = &linvfs_aops;
145 inode->i_op = &linvfs_file_inode_operations;
146 init_special_inode(inode, inode->i_mode,
147 kdev_t_to_nr(inode->i_rdev));
151 STATIC __inline__ void
152 xfs_revalidate_inode(
157 struct inode *inode = LINVFS_GET_IP(vp);
159 inode->i_mode = (ip->i_d.di_mode & MODEMASK) | VTTOIF(vp->v_type);
160 inode->i_nlink = ip->i_d.di_nlink;
161 inode->i_uid = ip->i_d.di_uid;
162 inode->i_gid = ip->i_d.di_gid;
163 if (((1 << vp->v_type) & ((1<<VBLK) | (1<<VCHR))) == 0) {
164 inode->i_rdev = NODEV;
166 xfs_dev_t dev = ip->i_df.if_u2.if_rdev;
167 inode->i_rdev = XFS_DEV_TO_KDEVT(dev);
169 inode->i_blksize = PAGE_CACHE_SIZE;
170 inode->i_generation = ip->i_d.di_gen;
171 i_size_write(inode, ip->i_d.di_size);
173 XFS_FSB_TO_BB(mp, ip->i_d.di_nblocks + ip->i_delayed_blks);
174 inode->i_atime = ip->i_d.di_atime.t_sec;
175 inode->i_mtime = ip->i_d.di_mtime.t_sec;
176 inode->i_ctime = ip->i_d.di_ctime.t_sec;
177 if (ip->i_d.di_flags & XFS_DIFLAG_IMMUTABLE)
178 inode->i_flags |= S_IMMUTABLE;
180 inode->i_flags &= ~S_IMMUTABLE;
181 if (ip->i_d.di_flags & XFS_DIFLAG_APPEND)
182 inode->i_flags |= S_APPEND;
184 inode->i_flags &= ~S_APPEND;
185 if (ip->i_d.di_flags & XFS_DIFLAG_SYNC)
186 inode->i_flags |= S_SYNC;
188 inode->i_flags &= ~S_SYNC;
189 if (ip->i_d.di_flags & XFS_DIFLAG_NOATIME)
190 inode->i_flags |= S_NOATIME;
192 inode->i_flags &= ~S_NOATIME;
194 vp->v_flag &= ~VMODIFIED;
198 xfs_initialize_vnode(
201 bhv_desc_t *inode_bhv,
204 xfs_inode_t *ip = XFS_BHVTOI(inode_bhv);
205 struct inode *inode = LINVFS_GET_IP(vp);
207 if (!inode_bhv->bd_vobj) {
208 vp->v_vfsp = bhvtovfs(bdp);
209 bhv_desc_init(inode_bhv, ip, vp, &xfs_vnodeops);
210 bhv_insert(VN_BHV_HEAD(vp), inode_bhv);
214 * We need to set the ops vectors, and unlock the inode, but if
215 * we have been called during the new inode create process, it is
216 * too early to fill in the Linux inode. We will get called a
217 * second time once the inode is properly set up, and then we can
220 if (ip->i_d.di_mode != 0 && unlock && (inode->i_state & I_NEW)) {
221 vp->v_type = IFTOVT(ip->i_d.di_mode);
222 xfs_revalidate_inode(XFS_BHVTOM(bdp), vp, ip);
223 xfs_set_inodeops(inode);
225 ip->i_flags &= ~XFS_INEW;
228 unlock_new_inode(inode);
235 unsigned int gfp_mask)
239 pages = kmem_zone_shrink(linvfs_inode_zone);
240 pages += kmem_zone_shrink(xfs_inode_zone);
250 struct vfs *vfsp = bhvtovfs(bdp);
252 return iget_locked(vfsp->vfs_super, ino);
256 d_alloc_anon(struct inode *inode)
258 struct dentry *dentry;
260 spin_lock(&dcache_lock);
261 list_for_each_entry(dentry, &inode->i_dentry, d_alias) {
262 if (!(dentry->d_flags & DCACHE_NFSD_DISCONNECTED))
265 spin_unlock(&dcache_lock);
267 dentry = d_alloc_root(inode);
268 if (likely(dentry != NULL))
269 dentry->d_flags |= DCACHE_NFSD_DISCONNECTED;
273 dentry->d_vfs_flags |= DCACHE_REFERENCED;
274 spin_unlock(&dcache_lock);
284 struct block_device **bdevp)
289 error = path_lookup(name, LOOKUP_POSITIVE|LOOKUP_FOLLOW, &nd);
291 printk("XFS: Invalid device [%s], error=%d\n", name, error);
295 /* I think we actually want bd_acquire here.. --hch */
296 *bdevp = bdget(kdev_t_to_nr(nd.dentry->d_inode->i_rdev));
298 error = blkdev_get(*bdevp, FMODE_READ|FMODE_WRITE, 0, BDEV_FS);
308 struct block_device *bdev)
311 blkdev_put(bdev, BDEV_FS);
314 STATIC struct inode *
316 struct super_block *sb)
320 vp = (vnode_t *)kmem_cache_alloc(linvfs_inode_zone,
321 kmem_flags_convert(KM_SLEEP));
324 return LINVFS_GET_IP(vp);
328 linvfs_destroy_inode(
331 kmem_cache_free(linvfs_inode_zone, LINVFS_GET_VP(inode));
335 (sizeof(vnode_t) - sizeof(struct inode) + offsetof(struct inode, u))
340 kmem_cache_t *cachep,
343 vnode_t *vp = (vnode_t *)data;
345 if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) ==
346 SLAB_CTOR_CONSTRUCTOR) {
347 struct inode *inode = LINVFS_GET_IP(vp);
348 memset(vp, 0, VNODE_SIZE);
349 __inode_init_once(inode);
354 init_inodecache( void )
356 linvfs_inode_zone = kmem_cache_create("linvfs_icache",
357 VNODE_SIZE, 0, SLAB_HWCACHE_ALIGN,
359 if (linvfs_inode_zone == NULL)
365 destroy_inodecache( void )
367 if (kmem_cache_destroy(linvfs_inode_zone))
368 printk(KERN_WARNING "%s: cache still in use!\n", __FUNCTION__);
372 * Attempt to flush the inode, this will actually fail
373 * if the inode is pinned, but we dirty the inode again
374 * at the point when it is unpinned after a log write,
375 * since this is when the inode itself becomes flushable.
382 vnode_t *vp = LINVFS_GET_VP(inode);
383 int error, flags = FLUSH_INODE;
386 vn_trace_entry(vp, __FUNCTION__, (inst_t *)__return_address);
389 VOP_IFLUSH(vp, flags, error);
397 vnode_t *vp = LINVFS_GET_VP(inode);
401 vn_trace_entry(vp, __FUNCTION__, (inst_t *)__return_address);
403 * Do all our cleanup, and remove this vnode.
411 * Enqueue a work item to be picked up by the vfs xfssyncd thread.
412 * Doing this has two advantages:
413 * - It saves on stack space, which is tight in certain situations
414 * - It can be used (with care) as a mechanism to avoid deadlocks.
415 * Flushing while allocating in a full filesystem requires both.
418 xfs_syncd_queue_work(
421 void (*syncer)(vfs_t *, void *))
423 vfs_sync_work_t *work;
425 work = kmem_alloc(sizeof(struct vfs_sync_work), KM_SLEEP);
426 INIT_LIST_HEAD(&work->w_list);
427 work->w_syncer = syncer;
430 spin_lock(&vfs->vfs_sync_lock);
431 list_add_tail(&work->w_list, &vfs->vfs_sync_list);
432 spin_unlock(&vfs->vfs_sync_lock);
433 wake_up_process(vfs->vfs_sync_task);
437 * Flush delayed allocate data, attempting to free up reserved space
438 * from existing allocations. At this point a new allocation attempt
439 * has failed with ENOSPC and we are in the process of scratching our
440 * heads, looking about for more room...
443 xfs_flush_inode_work(
447 filemap_fdatawrite(((struct inode *)inode)->i_mapping);
448 iput((struct inode *)inode);
455 struct inode *inode = LINVFS_GET_IP(XFS_ITOV(ip));
456 struct vfs *vfs = XFS_MTOVFS(ip->i_mount);
459 xfs_syncd_queue_work(vfs, inode, xfs_flush_inode_work);
464 * This is the "bigger hammer" version of xfs_flush_inode_work...
465 * (IOW, "If at first you don't succeed, use a Bigger Hammer").
468 xfs_flush_device_work(
472 fsync_no_super(((struct inode *)inode)->i_dev);
473 iput((struct inode *)inode);
480 struct inode *inode = LINVFS_GET_IP(XFS_ITOV(ip));
481 struct vfs *vfs = XFS_MTOVFS(ip->i_mount);
484 xfs_syncd_queue_work(vfs, inode, xfs_flush_device_work);
486 xfs_log_force(ip->i_mount, (xfs_lsn_t)0, XFS_LOG_FORCE|XFS_LOG_SYNC);
489 #define SYNCD_FLAGS (SYNC_FSDATA|SYNC_BDFLUSH|SYNC_ATTR|SYNC_REFCACHE)
497 if (!(vfsp->vfs_flag & VFS_RDONLY))
498 VFS_SYNC(vfsp, SYNCD_FLAGS, NULL, error);
506 vfs_t *vfsp = (vfs_t *) arg;
507 struct list_head tmp;
508 struct vfs_sync_work *work, *n;
513 sigfillset(¤t->blocked);
514 __recalc_sigpending(current);
517 sprintf(current->comm, "xfssyncd");
519 vfsp->vfs_sync_work.w_vfs = vfsp;
520 vfsp->vfs_sync_work.w_syncer = vfs_sync_worker;
521 vfsp->vfs_sync_task = current;
523 wake_up(&vfsp->vfs_wait_sync_task);
525 INIT_LIST_HEAD(&tmp);
526 timeleft = (xfs_syncd_centisecs * HZ) / 100;
528 set_current_state(TASK_INTERRUPTIBLE);
529 timeleft = schedule_timeout(timeleft);
530 if (vfsp->vfs_flag & VFS_UMOUNT)
533 spin_lock(&vfsp->vfs_sync_lock);
535 timeleft = (xfs_syncd_centisecs * HZ) / 100;
536 INIT_LIST_HEAD(&vfsp->vfs_sync_work.w_list);
537 list_add_tail(&vfsp->vfs_sync_work.w_list,
538 &vfsp->vfs_sync_list);
540 list_for_each_entry_safe(work, n, &vfsp->vfs_sync_list, w_list)
541 list_move(&work->w_list, &tmp);
542 spin_unlock(&vfsp->vfs_sync_lock);
544 list_for_each_entry_safe(work, n, &tmp, w_list) {
545 (*work->w_syncer)(vfsp, work->w_data);
546 list_del(&work->w_list);
547 if (work == &vfsp->vfs_sync_work)
549 kmem_free(work, sizeof(struct vfs_sync_work));
553 vfsp->vfs_sync_task = NULL;
555 wake_up(&vfsp->vfs_wait_sync_task);
566 pid = kernel_thread(xfssyncd, (void *) vfsp,
567 CLONE_VM | CLONE_FS | CLONE_FILES);
570 wait_event(vfsp->vfs_wait_sync_task, vfsp->vfs_sync_task);
578 vfsp->vfs_flag |= VFS_UMOUNT;
581 wake_up_process(vfsp->vfs_sync_task);
582 wait_event(vfsp->vfs_wait_sync_task, !vfsp->vfs_sync_task);
587 struct super_block *sb)
589 vfs_t *vfsp = LINVFS_GET_VFS(sb);
592 linvfs_stop_syncd(vfsp);
593 VFS_SYNC(vfsp, SYNC_ATTR|SYNC_DELWRI, NULL, error);
595 VFS_UNMOUNT(vfsp, 0, NULL, error);
597 printk("XFS unmount got error %d\n", error);
598 printk("%s: vfsp/0x%p left dangling!\n", __FUNCTION__, vfsp);
602 vfs_deallocate(vfsp);
607 struct super_block *sb)
609 vfs_t *vfsp = LINVFS_GET_VFS(sb);
612 if (sb->s_flags & MS_RDONLY) {
613 sb->s_dirt = 0; /* paranoia */
616 /* Push the log and superblock a little */
617 VFS_SYNC(vfsp, SYNC_FSDATA, NULL, error);
623 struct super_block *sb)
625 vfs_t *vfsp = LINVFS_GET_VFS(sb);
628 VFS_SYNC(vfsp, SYNC_FSDATA|SYNC_WAIT, NULL, error);
635 struct super_block *sb,
636 struct statfs *statp)
638 vfs_t *vfsp = LINVFS_GET_VFS(sb);
641 VFS_STATVFS(vfsp, statp, NULL, error);
647 struct super_block *sb,
651 vfs_t *vfsp = LINVFS_GET_VFS(sb);
652 struct xfs_mount_args *args = xfs_args_allocate(sb);
655 VFS_PARSEARGS(vfsp, options, args, 1, error);
657 VFS_MNTUPDATE(vfsp, flags, args, error);
658 kmem_free(args, sizeof(*args));
662 struct super_block *freeze_bdev(struct block_device *bdev)
664 struct super_block *sb;
668 sb = get_super(to_kdev_t(bdev->bd_dev));
669 if (sb && !(sb->s_flags & MS_RDONLY)) {
670 vfsp = LINVFS_GET_VFS(sb);
672 /* Stop new writers */
673 vfsp->vfs_frozen = SB_FREEZE_WRITE;
676 /* Flush the refcache */
677 VFS_SYNC(vfsp, SYNC_REFCACHE|SYNC_WAIT, NULL, error);
679 /* Flush delalloc and delwri data */
680 VFS_SYNC(vfsp, SYNC_DELWRI|SYNC_WAIT, NULL, error);
682 /* Pause transaction subsystem */
683 vfsp->vfs_frozen = SB_FREEZE_TRANS;
686 /* Flush any remaining inodes into buffers */
687 VFS_SYNC(vfsp, SYNC_ATTR|SYNC_WAIT, NULL, error);
689 /* Push all buffers out to disk */
690 sync_buffers(sb->s_dev, 1);
692 /* Push the superblock and write an unmount record */
696 sync_buffers(to_kdev_t(bdev->bd_dev), 1);
697 return sb; /* thaw_bdev releases sb->s_umount */
700 void thaw_bdev(struct block_device *bdev, struct super_block *sb)
703 struct vfs *vfsp = LINVFS_GET_VFS(sb);
705 BUG_ON(sb->s_bdev != bdev);
707 vfsp->vfs_frozen = SB_UNFROZEN;
709 wake_up(&vfsp->vfs_wait_unfrozen);
717 struct super_block *sb)
719 if (sb->s_flags & MS_RDONLY)
721 freeze_bdev(sb->s_bdev);
726 struct super_block *sb)
728 thaw_bdev(sb->s_bdev, sb);
733 struct dentry *dentry,
738 struct inode *inode = dentry->d_inode ;
739 vnode_t *vp = LINVFS_GET_VP(inode);
747 VOP_FID2(vp, (struct fid *)&fid, error);
748 data[0] = (__u32)fid.fid_ino; /* 32 bits of inode is OK */
749 data[1] = fid.fid_gen;
752 if (maxlen < 4 || ! need_parent)
755 inode = dentry->d_parent->d_inode ;
756 vp = LINVFS_GET_VP(inode);
758 VOP_FID2(vp, (struct fid *)&fid, error);
759 data[2] = (__u32)fid.fid_ino; /* 32 bits of inode is OK */
763 data[3] = fid.fid_gen;
768 STATIC struct dentry *
770 struct super_block *sb,
777 struct inode *inode = NULL;
778 struct dentry *result;
780 vfs_t *vfsp = LINVFS_GET_VFS(sb);
783 xfid.fid_len = sizeof(xfs_fid2_t) - sizeof(xfid.fid_len);
787 xfid.fid_gen = data[1];
788 xfid.fid_ino = (__u64)data[0];
792 "XFS: detected filehandle without "
793 "parent inode generation information.");
794 return ERR_PTR(-ESTALE);
797 xfid.fid_gen = data[3];
798 xfid.fid_ino = (__u64)data[2];
801 VFS_VGET(vfsp, &vp, (fid_t *)&xfid, error);
802 if (error || vp == NULL)
803 return ERR_PTR(-ESTALE) ;
805 inode = LINVFS_GET_IP(vp);
807 result = d_alloc_anon(inode);
808 if (unlikely(result == NULL)) {
810 return ERR_PTR(-ENOMEM);
818 struct vfsmount *mnt)
820 struct vfs *vfsp = LINVFS_GET_VFS(mnt->mnt_sb);
823 VFS_SHOWARGS(vfsp, m, error);
829 struct super_block *sb,
830 struct fs_quota_stat *fqs)
832 struct vfs *vfsp = LINVFS_GET_VFS(sb);
835 VFS_QUOTACTL(vfsp, Q_XGETQSTAT, 0, (caddr_t)fqs, error);
841 struct super_block *sb,
845 struct vfs *vfsp = LINVFS_GET_VFS(sb);
848 VFS_QUOTACTL(vfsp, op, 0, (caddr_t)&flags, error);
854 struct super_block *sb,
857 struct fs_disk_quota *fdq)
859 struct vfs *vfsp = LINVFS_GET_VFS(sb);
862 getmode = (type == GRPQUOTA) ? Q_XGETGQUOTA : Q_XGETQUOTA;
863 VFS_QUOTACTL(vfsp, getmode, id, (caddr_t)fdq, error);
869 struct super_block *sb,
872 struct fs_disk_quota *fdq)
874 struct vfs *vfsp = LINVFS_GET_VFS(sb);
877 setmode = (type == GRPQUOTA) ? Q_XSETGQLIM : Q_XSETQLIM;
878 VFS_QUOTACTL(vfsp, setmode, id, (caddr_t)fdq, error);
882 STATIC struct super_block *
884 struct super_block *sb,
889 struct vfs *vfsp = vfs_allocate();
890 struct xfs_mount_args *args = xfs_args_allocate(sb);
891 struct statfs statvfs;
894 vfsp->vfs_super = sb;
895 LINVFS_SET_VFS(sb, vfsp);
896 if (sb->s_flags & MS_RDONLY)
897 vfsp->vfs_flag |= VFS_RDONLY;
898 bhv_insert_all_vfsops(vfsp);
900 VFS_PARSEARGS(vfsp, (char *)data, args, 0, error);
902 bhv_remove_all_vfsops(vfsp, 1);
906 sb_min_blocksize(sb, BBSIZE);
907 sb->s_qcop = &linvfs_qops;
908 sb->s_op = &linvfs_sops;
910 VFS_MOUNT(vfsp, args, NULL, error);
912 bhv_remove_all_vfsops(vfsp, 1);
916 VFS_STATVFS(vfsp, &statvfs, NULL, error);
921 sb->s_magic = statvfs.f_type;
922 sb->s_blocksize = statvfs.f_bsize;
923 sb->s_blocksize_bits = ffs(statvfs.f_bsize) - 1;
924 sb->s_maxbytes = xfs_max_file_offset(sb->s_blocksize_bits);
925 set_posix_acl_flag(sb);
927 VFS_ROOT(vfsp, &rootvp, error);
931 sb->s_root = d_alloc_root(LINVFS_GET_IP(rootvp));
934 if (is_bad_inode(sb->s_root->d_inode))
936 if (linvfs_start_syncd(vfsp))
938 vn_trace_exit(rootvp, __FUNCTION__, (inst_t *)__return_address);
940 kmem_free(args, sizeof(*args));
952 VFS_UNMOUNT(vfsp, 0, NULL, error);
955 vfs_deallocate(vfsp);
956 kmem_free(args, sizeof(*args));
961 STATIC struct super_operations linvfs_sops = {
962 .alloc_inode = linvfs_alloc_inode,
963 .destroy_inode = linvfs_destroy_inode,
964 .write_inode = linvfs_write_inode,
965 .clear_inode = linvfs_clear_inode,
966 .put_super = linvfs_put_super,
967 .write_super = linvfs_write_super,
968 .sync_fs = linvfs_sync_super,
969 .write_super_lockfs = linvfs_freeze_fs,
970 .unlockfs = linvfs_unfreeze_fs,
971 .statfs = linvfs_statfs,
972 .remount_fs = linvfs_remount,
973 .fh_to_dentry = linvfs_fh_to_dentry,
974 .dentry_to_fh = linvfs_dentry_to_fh,
975 .show_options = linvfs_show_options,
978 STATIC struct quotactl_ops linvfs_qops = {
979 .get_xstate = linvfs_getxstate,
980 .set_xstate = linvfs_setxstate,
981 .get_xquota = linvfs_getxquota,
982 .set_xquota = linvfs_setxquota,
985 STATIC struct file_system_type xfs_fs_type = {
986 .owner = THIS_MODULE,
988 .read_super = linvfs_read_super,
989 .fs_flags = FS_REQUIRES_DEV,
998 static char message[] __initdata = KERN_INFO \
999 XFS_VERSION_STRING " with " XFS_BUILD_OPTIONS " enabled\n";
1004 xfs_physmem = si.totalram;
1008 error = init_inodecache();
1010 goto undo_inodecache;
1012 error = pagebuf_init();
1022 xfs_inode_shaker = kmem_shake_register(xfs_inode_shake);
1023 if (!xfs_inode_shaker) {
1028 error = register_filesystem(&xfs_fs_type);
1031 XFS_DM_INIT(&xfs_fs_type);
1035 kmem_shake_deregister(xfs_inode_shaker);
1038 pagebuf_terminate();
1041 destroy_inodecache();
1050 XFS_DM_EXIT(&xfs_fs_type);
1051 unregister_filesystem(&xfs_fs_type);
1052 kmem_shake_deregister(xfs_inode_shaker);
1056 pagebuf_terminate();
1057 destroy_inodecache();
1061 module_init(init_xfs_fs);
1062 module_exit(exit_xfs_fs);
1064 MODULE_AUTHOR("Silicon Graphics, Inc.");
1065 MODULE_DESCRIPTION(XFS_VERSION_STRING " with " XFS_BUILD_OPTIONS " enabled");
1066 MODULE_LICENSE("GPL");