2 * Copyright (c) 2000-2003 Silicon Graphics, Inc. All Rights Reserved.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of version 2 of the GNU General Public License as
6 * published by the Free Software Foundation.
8 * This program is distributed in the hope that it would be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
12 * Further, this software is distributed without any warranty that it is
13 * free of the rightful claim of any third person regarding infringement
14 * or the like. Any license provided herein, whether implied or
15 * otherwise, applies only to this software file. Patent licenses, if
16 * any, provided herein do not apply to combinations of this program with
17 * other software, or any other product whatsoever.
19 * You should have received a copy of the GNU General Public License along
20 * with this program; if not, write the Free Software Foundation, Inc., 59
21 * Temple Place - Suite 330, Boston MA 02111-1307, USA.
23 * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
24 * Mountain View, CA 94043, or:
28 * For further information regarding this notice, see:
30 * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
38 #include "xfs_trans.h"
42 #include "xfs_alloc.h"
43 #include "xfs_dmapi.h"
44 #include "xfs_mount.h"
45 #include "xfs_alloc_btree.h"
46 #include "xfs_bmap_btree.h"
47 #include "xfs_ialloc_btree.h"
48 #include "xfs_btree.h"
49 #include "xfs_ialloc.h"
50 #include "xfs_attr_sf.h"
51 #include "xfs_dir_sf.h"
52 #include "xfs_dir2_sf.h"
53 #include "xfs_dinode.h"
54 #include "xfs_inode.h"
57 #include "xfs_rtalloc.h"
58 #include "xfs_error.h"
59 #include "xfs_itable.h"
65 #include "xfs_buf_item.h"
66 #include "xfs_utils.h"
67 #include "xfs_dfrag.h"
68 #include "xfs_fsops.h"
70 #include <linux/dcache.h>
71 #include <linux/mount.h>
72 #include <linux/namei.h>
73 #include <linux/pagemap.h>
76 * xfs_find_handle maps from userspace xfs_fsop_handlereq structure to
77 * a file or fs handle.
79 * XFS_IOC_PATH_TO_FSHANDLE
80 * returns fs handle for a mount point or path within that mount point
81 * XFS_IOC_FD_TO_HANDLE
82 * returns full handle for a FD opened in user space
83 * XFS_IOC_PATH_TO_HANDLE
84 * returns full handle for a path
93 xfs_fsop_handlereq_t hreq;
97 if (copy_from_user(&hreq, arg, sizeof(hreq)))
98 return -XFS_ERROR(EFAULT);
100 memset((char *)&handle, 0, sizeof(handle));
103 case XFS_IOC_PATH_TO_FSHANDLE:
104 case XFS_IOC_PATH_TO_HANDLE: {
108 error = user_path_walk_link((const char __user *)hreq.path, &nd);
113 ASSERT(nd.dentry->d_inode);
114 inode = igrab(nd.dentry->d_inode);
119 case XFS_IOC_FD_TO_HANDLE: {
122 file = fget(hreq.fd);
126 ASSERT(file->f_dentry);
127 ASSERT(file->f_dentry->d_inode);
128 inode = igrab(file->f_dentry->d_inode);
135 return -XFS_ERROR(EINVAL);
138 if (inode->i_sb->s_magic != XFS_SB_MAGIC) {
139 /* we're not in XFS anymore, Toto */
141 return -XFS_ERROR(EINVAL);
144 /* we need the vnode */
145 vp = LINVFS_GET_VP(inode);
146 if (vp->v_type != VREG && vp->v_type != VDIR && vp->v_type != VLNK) {
148 return -XFS_ERROR(EBADF);
151 /* now we can grab the fsid */
152 memcpy(&handle.ha_fsid, vp->v_vfsp->vfs_altfsid, sizeof(xfs_fsid_t));
153 hsize = sizeof(xfs_fsid_t);
155 if (cmd != XFS_IOC_PATH_TO_FSHANDLE) {
160 /* need to get access to the xfs_inode to read the generation */
161 bhv = vn_bhv_lookup_unlocked(VN_BHV_HEAD(vp), &xfs_vnodeops);
163 ip = XFS_BHVTOI(bhv);
165 lock_mode = xfs_ilock_map_shared(ip);
167 /* fill in fid section of handle from inode */
168 handle.ha_fid.xfs_fid_len = sizeof(xfs_fid_t) -
169 sizeof(handle.ha_fid.xfs_fid_len);
170 handle.ha_fid.xfs_fid_pad = 0;
171 handle.ha_fid.xfs_fid_gen = ip->i_d.di_gen;
172 handle.ha_fid.xfs_fid_ino = ip->i_ino;
174 xfs_iunlock_map_shared(ip, lock_mode);
176 hsize = XFS_HSIZE(handle);
179 /* now copy our handle into the user buffer & write out the size */
180 if (copy_to_user(hreq.ohandle, &handle, hsize) ||
181 copy_to_user(hreq.ohandlen, &hsize, sizeof(__s32))) {
183 return -XFS_ERROR(EFAULT);
192 * Convert userspace handle data into vnode (and inode).
193 * We [ab]use the fact that all the fsop_handlereq ioctl calls
194 * have a data structure argument whose first component is always
195 * a xfs_fsop_handlereq_t, so we can cast to and from this type.
196 * This allows us to optimise the copy_from_user calls and gives
197 * a handy, shared routine.
199 * If no error, caller must always VN_RELE the returned vp.
202 xfs_vget_fsop_handlereq(
204 struct inode *parinode, /* parent inode pointer */
205 int cap, /* capability level for op */
206 void __user *arg, /* userspace data pointer */
207 unsigned long size, /* size of expected struct */
208 /* output arguments */
209 xfs_fsop_handlereq_t *hreq,
211 struct inode **inode)
216 xfs_handle_t *handlep;
219 struct inode *inodep;
226 return XFS_ERROR(EPERM);
229 * Only allow handle opens under a directory.
231 if (!S_ISDIR(parinode->i_mode))
232 return XFS_ERROR(ENOTDIR);
235 * Copy the handle down from the user and validate
236 * that it looks to be in the correct format.
238 if (copy_from_user(hreq, arg, size))
239 return XFS_ERROR(EFAULT);
241 hanp = hreq->ihandle;
242 hlen = hreq->ihandlen;
245 if (hlen < sizeof(handlep->ha_fsid) || hlen > sizeof(*handlep))
246 return XFS_ERROR(EINVAL);
247 if (copy_from_user(handlep, hanp, hlen))
248 return XFS_ERROR(EFAULT);
249 if (hlen < sizeof(*handlep))
250 memset(((char *)handlep) + hlen, 0, sizeof(*handlep) - hlen);
251 if (hlen > sizeof(handlep->ha_fsid)) {
252 if (handlep->ha_fid.xfs_fid_len !=
253 (hlen - sizeof(handlep->ha_fsid)
254 - sizeof(handlep->ha_fid.xfs_fid_len))
255 || handlep->ha_fid.xfs_fid_pad)
256 return XFS_ERROR(EINVAL);
260 * Crack the handle, obtain the inode # & generation #
262 xfid = (struct xfs_fid *)&handlep->ha_fid;
263 if (xfid->xfs_fid_len == sizeof(*xfid) - sizeof(xfid->xfs_fid_len)) {
264 ino = xfid->xfs_fid_ino;
265 igen = xfid->xfs_fid_gen;
267 return XFS_ERROR(EINVAL);
271 * Get the XFS inode, building a vnode to go with it.
273 error = xfs_iget(mp, NULL, ino, 0, XFS_ILOCK_SHARED, &ip, 0);
277 return XFS_ERROR(EIO);
278 if (ip->i_d.di_mode == 0 || ip->i_d.di_gen != igen) {
279 xfs_iput_new(ip, XFS_ILOCK_SHARED);
280 return XFS_ERROR(ENOENT);
284 inodep = LINVFS_GET_IP(vpp);
285 xfs_iunlock(ip, XFS_ILOCK_SHARED);
296 struct file *parfilp,
297 struct inode *parinode)
304 struct dentry *dentry;
306 xfs_fsop_handlereq_t hreq;
308 error = xfs_vget_fsop_handlereq(mp, parinode, CAP_SYS_ADMIN, arg,
309 sizeof(xfs_fsop_handlereq_t),
314 /* Restrict xfs_open_by_handle to directories & regular files. */
315 if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode))) {
317 return -XFS_ERROR(EINVAL);
320 #if BITS_PER_LONG != 32
321 hreq.oflags |= O_LARGEFILE;
323 /* Put open permission in namei format. */
324 permflag = hreq.oflags;
325 if ((permflag+1) & O_ACCMODE)
327 if (permflag & O_TRUNC)
330 if ((!(permflag & O_APPEND) || (permflag & O_TRUNC)) &&
331 (permflag & FMODE_WRITE) && IS_APPEND(inode)) {
333 return -XFS_ERROR(EPERM);
336 if ((permflag & FMODE_WRITE) && IS_IMMUTABLE(inode)) {
338 return -XFS_ERROR(EACCES);
341 /* Can't write directories. */
342 if ( S_ISDIR(inode->i_mode) && (permflag & FMODE_WRITE)) {
344 return -XFS_ERROR(EISDIR);
347 if ((new_fd = get_unused_fd()) < 0) {
352 dentry = d_alloc_anon(inode);
353 if (dentry == NULL) {
355 put_unused_fd(new_fd);
356 return -XFS_ERROR(ENOMEM);
359 /* Ensure umount returns EBUSY on umounts while this file is open. */
360 mntget(parfilp->f_vfsmnt);
362 /* Create file pointer. */
363 filp = dentry_open(dentry, parfilp->f_vfsmnt, hreq.oflags);
365 put_unused_fd(new_fd);
366 return -XFS_ERROR(-PTR_ERR(filp));
368 if (inode->i_mode & S_IFREG)
369 filp->f_op = &linvfs_invis_file_operations;
371 fd_install(new_fd, filp);
376 xfs_readlink_by_handle(
379 struct file *parfilp,
380 struct inode *parinode)
386 xfs_fsop_handlereq_t hreq;
390 error = xfs_vget_fsop_handlereq(mp, parinode, CAP_SYS_ADMIN, arg,
391 sizeof(xfs_fsop_handlereq_t),
396 /* Restrict this handle operation to symlinks only. */
397 if (vp->v_type != VLNK) {
399 return -XFS_ERROR(EINVAL);
402 if (copy_from_user(&olen, hreq.ohandlen, sizeof(__u32))) {
404 return -XFS_ERROR(EFAULT);
407 aiov.iov_base = hreq.ohandle;
409 auio.uio_iov = &aiov;
412 auio.uio_segflg = UIO_USERSPACE;
413 auio.uio_resid = olen;
415 VOP_READLINK(vp, &auio, IO_INVIS, NULL, error);
418 return (olen - auio.uio_resid);
422 xfs_fssetdm_by_handle(
425 struct file *parfilp,
426 struct inode *parinode)
429 struct fsdmidata fsd;
430 xfs_fsop_setdm_handlereq_t dmhreq;
435 error = xfs_vget_fsop_handlereq(mp, parinode, CAP_MKNOD, arg,
436 sizeof(xfs_fsop_setdm_handlereq_t),
437 (xfs_fsop_handlereq_t *)&dmhreq,
442 if (IS_IMMUTABLE(inode) || IS_APPEND(inode)) {
444 return -XFS_ERROR(EPERM);
447 if (copy_from_user(&fsd, dmhreq.data, sizeof(fsd))) {
449 return -XFS_ERROR(EFAULT);
452 bdp = bhv_base_unlocked(VN_BHV_HEAD(vp));
453 error = xfs_set_dmattrs(bdp, fsd.fsd_dmevmask, fsd.fsd_dmstate, NULL);
462 xfs_attrlist_by_handle(
465 struct file *parfilp,
466 struct inode *parinode)
469 attrlist_cursor_kern_t *cursor;
470 xfs_fsop_attrlist_handlereq_t al_hreq;
474 error = xfs_vget_fsop_handlereq(mp, parinode, CAP_SYS_ADMIN, arg,
475 sizeof(xfs_fsop_attrlist_handlereq_t),
476 (xfs_fsop_handlereq_t *)&al_hreq,
481 cursor = (attrlist_cursor_kern_t *)&al_hreq.pos;
482 VOP_ATTR_LIST(vp, al_hreq.buffer, al_hreq.buflen, al_hreq.flags,
483 cursor, NULL, error);
491 xfs_attrmulti_by_handle(
494 struct file *parfilp,
495 struct inode *parinode)
498 xfs_attr_multiop_t *ops;
499 xfs_fsop_attrmulti_handlereq_t am_hreq;
502 unsigned int i, size;
504 error = xfs_vget_fsop_handlereq(mp, parinode, CAP_SYS_ADMIN, arg,
505 sizeof(xfs_fsop_attrmulti_handlereq_t),
506 (xfs_fsop_handlereq_t *)&am_hreq,
511 size = am_hreq.opcount * sizeof(attr_multiop_t);
512 if (!size || size > 16 * PAGE_SIZE) {
514 return -XFS_ERROR(E2BIG);
517 ops = (xfs_attr_multiop_t *)kmalloc(size, GFP_KERNEL);
520 return -XFS_ERROR(ENOMEM);
523 if (copy_from_user(ops, am_hreq.ops, size)) {
526 return -XFS_ERROR(EFAULT);
529 for (i = 0; i < am_hreq.opcount; i++) {
530 switch(ops[i].am_opcode) {
532 VOP_ATTR_GET(vp,ops[i].am_attrname, ops[i].am_attrvalue,
533 &ops[i].am_length, ops[i].am_flags,
534 NULL, ops[i].am_error);
537 if (IS_IMMUTABLE(inode) || IS_APPEND(inode)) {
538 ops[i].am_error = EPERM;
541 VOP_ATTR_SET(vp,ops[i].am_attrname, ops[i].am_attrvalue,
542 ops[i].am_length, ops[i].am_flags,
543 NULL, ops[i].am_error);
546 if (IS_IMMUTABLE(inode) || IS_APPEND(inode)) {
547 ops[i].am_error = EPERM;
550 VOP_ATTR_REMOVE(vp, ops[i].am_attrname, ops[i].am_flags,
551 NULL, ops[i].am_error);
554 ops[i].am_error = EINVAL;
558 if (copy_to_user(am_hreq.ops, ops, size))
559 error = -XFS_ERROR(EFAULT);
566 /* prototypes for a few of the stack-hungry cases that have
567 * their own functions. Functions are defined after their use
568 * so gcc doesn't get fancy and inline them with -03 */
586 xfs_ioc_fsgeometry_v1(
630 vp = LINVFS_GET_VP(inode);
632 vn_trace_entry(vp, "xfs_ioctl", (inst_t *)__return_address);
634 ip = XFS_BHVTOI(bdp);
639 case XFS_IOC_ALLOCSP:
642 case XFS_IOC_UNRESVSP:
643 case XFS_IOC_ALLOCSP64:
644 case XFS_IOC_FREESP64:
645 case XFS_IOC_RESVSP64:
646 case XFS_IOC_UNRESVSP64:
648 * Only allow the sys admin to reserve space unless
649 * unwritten extents are enabled.
651 if (!XFS_SB_VERSION_HASEXTFLGBIT(&mp->m_sb) &&
652 !capable(CAP_SYS_ADMIN))
655 return xfs_ioc_space(bdp, vp, filp, ioflags, cmd, arg);
657 case XFS_IOC_DIOINFO: {
659 xfs_buftarg_t *target =
660 (ip->i_d.di_flags & XFS_DIFLAG_REALTIME) ?
661 mp->m_rtdev_targp : mp->m_ddev_targp;
663 da.d_mem = da.d_miniosz = 1 << target->pbr_sshift;
664 /* The size dio will do in one go */
665 da.d_maxiosz = 64 * PAGE_CACHE_SIZE;
667 if (copy_to_user(arg, &da, sizeof(da)))
668 return -XFS_ERROR(EFAULT);
672 case XFS_IOC_FSBULKSTAT_SINGLE:
673 case XFS_IOC_FSBULKSTAT:
674 case XFS_IOC_FSINUMBERS:
675 return xfs_ioc_bulkstat(mp, cmd, arg);
677 case XFS_IOC_FSGEOMETRY_V1:
678 return xfs_ioc_fsgeometry_v1(mp, arg);
680 case XFS_IOC_FSGEOMETRY:
681 return xfs_ioc_fsgeometry(mp, arg);
683 case XFS_IOC_GETVERSION:
684 case XFS_IOC_GETXFLAGS:
685 case XFS_IOC_SETXFLAGS:
686 case XFS_IOC_FSGETXATTR:
687 case XFS_IOC_FSSETXATTR:
688 case XFS_IOC_FSGETXATTRA:
689 return xfs_ioc_xattr(vp, ip, filp, cmd, arg);
691 case XFS_IOC_FSSETDM: {
692 struct fsdmidata dmi;
694 if (copy_from_user(&dmi, arg, sizeof(dmi)))
695 return -XFS_ERROR(EFAULT);
697 error = xfs_set_dmattrs(bdp, dmi.fsd_dmevmask, dmi.fsd_dmstate,
702 case XFS_IOC_GETBMAP:
703 case XFS_IOC_GETBMAPA:
704 return xfs_ioc_getbmap(bdp, filp, ioflags, cmd, arg);
706 case XFS_IOC_GETBMAPX:
707 return xfs_ioc_getbmapx(bdp, arg);
709 case XFS_IOC_FD_TO_HANDLE:
710 case XFS_IOC_PATH_TO_HANDLE:
711 case XFS_IOC_PATH_TO_FSHANDLE:
712 return xfs_find_handle(cmd, arg);
714 case XFS_IOC_OPEN_BY_HANDLE:
715 return xfs_open_by_handle(mp, arg, filp, inode);
717 case XFS_IOC_FSSETDM_BY_HANDLE:
718 return xfs_fssetdm_by_handle(mp, arg, filp, inode);
720 case XFS_IOC_READLINK_BY_HANDLE:
721 return xfs_readlink_by_handle(mp, arg, filp, inode);
723 case XFS_IOC_ATTRLIST_BY_HANDLE:
724 return xfs_attrlist_by_handle(mp, arg, filp, inode);
726 case XFS_IOC_ATTRMULTI_BY_HANDLE:
727 return xfs_attrmulti_by_handle(mp, arg, filp, inode);
729 case XFS_IOC_SWAPEXT: {
730 error = xfs_swapext((struct xfs_swapext __user *)arg);
734 case XFS_IOC_FSCOUNTS: {
735 xfs_fsop_counts_t out;
737 error = xfs_fs_counts(mp, &out);
741 if (copy_to_user(arg, &out, sizeof(out)))
742 return -XFS_ERROR(EFAULT);
746 case XFS_IOC_SET_RESBLKS: {
747 xfs_fsop_resblks_t inout;
750 if (!capable(CAP_SYS_ADMIN))
753 if (copy_from_user(&inout, arg, sizeof(inout)))
754 return -XFS_ERROR(EFAULT);
756 /* input parameter is passed in resblks field of structure */
758 error = xfs_reserve_blocks(mp, &in, &inout);
762 if (copy_to_user(arg, &inout, sizeof(inout)))
763 return -XFS_ERROR(EFAULT);
767 case XFS_IOC_GET_RESBLKS: {
768 xfs_fsop_resblks_t out;
770 if (!capable(CAP_SYS_ADMIN))
773 error = xfs_reserve_blocks(mp, NULL, &out);
777 if (copy_to_user(arg, &out, sizeof(out)))
778 return -XFS_ERROR(EFAULT);
783 case XFS_IOC_FSGROWFSDATA: {
784 xfs_growfs_data_t in;
786 if (!capable(CAP_SYS_ADMIN))
789 if (copy_from_user(&in, arg, sizeof(in)))
790 return -XFS_ERROR(EFAULT);
792 error = xfs_growfs_data(mp, &in);
796 case XFS_IOC_FSGROWFSLOG: {
799 if (!capable(CAP_SYS_ADMIN))
802 if (copy_from_user(&in, arg, sizeof(in)))
803 return -XFS_ERROR(EFAULT);
805 error = xfs_growfs_log(mp, &in);
809 case XFS_IOC_FSGROWFSRT: {
812 if (!capable(CAP_SYS_ADMIN))
815 if (copy_from_user(&in, arg, sizeof(in)))
816 return -XFS_ERROR(EFAULT);
818 error = xfs_growfs_rt(mp, &in);
823 if (!capable(CAP_SYS_ADMIN))
826 if (inode->i_sb->s_frozen == SB_UNFROZEN)
827 freeze_bdev(inode->i_sb->s_bdev);
831 if (!capable(CAP_SYS_ADMIN))
833 if (inode->i_sb->s_frozen != SB_UNFROZEN)
834 thaw_bdev(inode->i_sb->s_bdev, inode->i_sb);
837 case XFS_IOC_GOINGDOWN: {
840 if (!capable(CAP_SYS_ADMIN))
843 if (get_user(in, (__uint32_t __user *)arg))
844 return -XFS_ERROR(EFAULT);
846 error = xfs_fs_goingdown(mp, in);
850 case XFS_IOC_ERROR_INJECTION: {
851 xfs_error_injection_t in;
853 if (!capable(CAP_SYS_ADMIN))
856 if (copy_from_user(&in, arg, sizeof(in)))
857 return -XFS_ERROR(EFAULT);
859 error = xfs_errortag_add(in.errtag, mp);
863 case XFS_IOC_ERROR_CLEARALL:
864 if (!capable(CAP_SYS_ADMIN))
867 error = xfs_errortag_clearall(mp);
888 if (vp->v_inode.i_flags & (S_IMMUTABLE|S_APPEND))
889 return -XFS_ERROR(EPERM);
891 if (!(filp->f_flags & FMODE_WRITE))
892 return -XFS_ERROR(EBADF);
894 if (vp->v_type != VREG)
895 return -XFS_ERROR(EINVAL);
897 if (copy_from_user(&bf, arg, sizeof(bf)))
898 return -XFS_ERROR(EFAULT);
900 if (filp->f_flags & (O_NDELAY|O_NONBLOCK))
901 attr_flags |= ATTR_NONBLOCK;
902 if (ioflags & IO_INVIS)
903 attr_flags |= ATTR_DMI;
905 error = xfs_change_file_space(bdp, cmd, &bf, filp->f_pos,
916 xfs_fsop_bulkreq_t bulkreq;
917 int count; /* # of records returned */
918 xfs_ino_t inlast; /* last inode number */
922 /* done = 1 if there are more stats to get and if bulkstat */
923 /* should be called again (unused here, but used in dmapi) */
925 if (!capable(CAP_SYS_ADMIN))
928 if (XFS_FORCED_SHUTDOWN(mp))
929 return -XFS_ERROR(EIO);
931 if (copy_from_user(&bulkreq, arg, sizeof(xfs_fsop_bulkreq_t)))
932 return -XFS_ERROR(EFAULT);
934 if (copy_from_user(&inlast, bulkreq.lastip, sizeof(__s64)))
935 return -XFS_ERROR(EFAULT);
937 if ((count = bulkreq.icount) <= 0)
938 return -XFS_ERROR(EINVAL);
940 if (cmd == XFS_IOC_FSINUMBERS)
941 error = xfs_inumbers(mp, &inlast, &count,
943 else if (cmd == XFS_IOC_FSBULKSTAT_SINGLE)
944 error = xfs_bulkstat_single(mp, &inlast,
945 bulkreq.ubuffer, &done);
946 else { /* XFS_IOC_FSBULKSTAT */
947 if (count == 1 && inlast != 0) {
949 error = xfs_bulkstat_single(mp, &inlast,
950 bulkreq.ubuffer, &done);
952 error = xfs_bulkstat(mp, &inlast, &count,
953 (bulkstat_one_pf)xfs_bulkstat_one, NULL,
954 sizeof(xfs_bstat_t), bulkreq.ubuffer,
955 BULKSTAT_FG_QUICK, &done);
962 if (bulkreq.ocount != NULL) {
963 if (copy_to_user(bulkreq.lastip, &inlast,
965 return -XFS_ERROR(EFAULT);
967 if (copy_to_user(bulkreq.ocount, &count, sizeof(count)))
968 return -XFS_ERROR(EFAULT);
975 xfs_ioc_fsgeometry_v1(
979 xfs_fsop_geom_v1_t fsgeo;
982 error = xfs_fs_geometry(mp, (xfs_fsop_geom_t *)&fsgeo, 3);
986 if (copy_to_user(arg, &fsgeo, sizeof(fsgeo)))
987 return -XFS_ERROR(EFAULT);
996 xfs_fsop_geom_t fsgeo;
999 error = xfs_fs_geometry(mp, &fsgeo, 4);
1003 if (copy_to_user(arg, &fsgeo, sizeof(fsgeo)))
1004 return -XFS_ERROR(EFAULT);
1009 * Linux extended inode flags interface.
1011 #define LINUX_XFLAG_SYNC 0x00000008 /* Synchronous updates */
1012 #define LINUX_XFLAG_IMMUTABLE 0x00000010 /* Immutable file */
1013 #define LINUX_XFLAG_APPEND 0x00000020 /* writes to file may only append */
1014 #define LINUX_XFLAG_NODUMP 0x00000040 /* do not dump file */
1015 #define LINUX_XFLAG_NOATIME 0x00000080 /* do not update atime */
1016 #define LINUX_XFLAG_BARRIER 0x00004000 /* chroot() barrier */
1017 #define LINUX_XFLAG_IUNLINK 0x00008000 /* immutable unlink */
1020 xfs_merge_ioc_xflags(
1024 unsigned int xflags = start;
1026 if (flags & LINUX_XFLAG_IMMUTABLE)
1027 xflags |= XFS_XFLAG_IMMUTABLE;
1029 xflags &= ~XFS_XFLAG_IMMUTABLE;
1030 if (flags & LINUX_XFLAG_APPEND)
1031 xflags |= XFS_XFLAG_APPEND;
1033 xflags &= ~XFS_XFLAG_APPEND;
1034 if (flags & LINUX_XFLAG_SYNC)
1035 xflags |= XFS_XFLAG_SYNC;
1037 xflags &= ~XFS_XFLAG_SYNC;
1038 if (flags & LINUX_XFLAG_NOATIME)
1039 xflags |= XFS_XFLAG_NOATIME;
1041 xflags &= ~XFS_XFLAG_NOATIME;
1042 if (flags & LINUX_XFLAG_NODUMP)
1043 xflags |= XFS_XFLAG_NODUMP;
1045 xflags &= ~XFS_XFLAG_NODUMP;
1052 __uint16_t di_flags)
1054 unsigned int flags = 0;
1056 if (di_flags & XFS_DIFLAG_IMMUTABLE)
1057 flags |= LINUX_XFLAG_IMMUTABLE;
1058 if (di_flags & XFS_DIFLAG_IUNLINK)
1059 flags |= LINUX_XFLAG_IUNLINK;
1060 if (di_flags & XFS_DIFLAG_BARRIER)
1061 flags |= LINUX_XFLAG_BARRIER;
1062 if (di_flags & XFS_DIFLAG_APPEND)
1063 flags |= LINUX_XFLAG_APPEND;
1064 if (di_flags & XFS_DIFLAG_SYNC)
1065 flags |= LINUX_XFLAG_SYNC;
1066 if (di_flags & XFS_DIFLAG_NOATIME)
1067 flags |= LINUX_XFLAG_NOATIME;
1068 if (di_flags & XFS_DIFLAG_NODUMP)
1069 flags |= LINUX_XFLAG_NODUMP;
1088 case XFS_IOC_FSGETXATTR: {
1089 va.va_mask = XFS_AT_XFLAGS|XFS_AT_EXTSIZE|XFS_AT_NEXTENTS;
1090 VOP_GETATTR(vp, &va, 0, NULL, error);
1094 fa.fsx_xflags = va.va_xflags;
1095 fa.fsx_extsize = va.va_extsize;
1096 fa.fsx_nextents = va.va_nextents;
1098 if (copy_to_user(arg, &fa, sizeof(fa)))
1099 return -XFS_ERROR(EFAULT);
1103 case XFS_IOC_FSSETXATTR: {
1104 if (copy_from_user(&fa, arg, sizeof(fa)))
1105 return -XFS_ERROR(EFAULT);
1108 if (filp->f_flags & (O_NDELAY|O_NONBLOCK))
1109 attr_flags |= ATTR_NONBLOCK;
1111 va.va_mask = XFS_AT_XFLAGS | XFS_AT_EXTSIZE;
1112 va.va_xflags = fa.fsx_xflags;
1113 va.va_extsize = fa.fsx_extsize;
1115 VOP_SETATTR(vp, &va, attr_flags, NULL, error);
1117 vn_revalidate(vp); /* update Linux inode flags */
1121 case XFS_IOC_FSGETXATTRA: {
1122 va.va_mask = XFS_AT_XFLAGS|XFS_AT_EXTSIZE|XFS_AT_ANEXTENTS;
1123 VOP_GETATTR(vp, &va, 0, NULL, error);
1127 fa.fsx_xflags = va.va_xflags;
1128 fa.fsx_extsize = va.va_extsize;
1129 fa.fsx_nextents = va.va_anextents;
1131 if (copy_to_user(arg, &fa, sizeof(fa)))
1132 return -XFS_ERROR(EFAULT);
1136 case XFS_IOC_GETXFLAGS: {
1137 flags = xfs_di2lxflags(ip->i_d.di_flags);
1138 if (copy_to_user(arg, &flags, sizeof(flags)))
1139 return -XFS_ERROR(EFAULT);
1143 case XFS_IOC_SETXFLAGS: {
1144 if (copy_from_user(&flags, arg, sizeof(flags)))
1145 return -XFS_ERROR(EFAULT);
1147 if (flags & ~(LINUX_XFLAG_IMMUTABLE | LINUX_XFLAG_APPEND | \
1148 LINUX_XFLAG_NOATIME | LINUX_XFLAG_NODUMP | \
1150 return -XFS_ERROR(EOPNOTSUPP);
1153 if (filp->f_flags & (O_NDELAY|O_NONBLOCK))
1154 attr_flags |= ATTR_NONBLOCK;
1156 va.va_mask = XFS_AT_XFLAGS;
1157 va.va_xflags = xfs_merge_ioc_xflags(flags,
1158 xfs_dic2xflags(&ip->i_d, ARCH_NOCONVERT));
1160 VOP_SETATTR(vp, &va, attr_flags, NULL, error);
1162 vn_revalidate(vp); /* update Linux inode flags */
1166 case XFS_IOC_GETVERSION: {
1167 flags = LINVFS_GET_IP(vp)->i_generation;
1168 if (copy_to_user(arg, &flags, sizeof(flags)))
1169 return -XFS_ERROR(EFAULT);
1190 if (copy_from_user(&bm, arg, sizeof(bm)))
1191 return -XFS_ERROR(EFAULT);
1193 if (bm.bmv_count < 2)
1194 return -XFS_ERROR(EINVAL);
1196 iflags = (cmd == XFS_IOC_GETBMAPA ? BMV_IF_ATTRFORK : 0);
1197 if (ioflags & IO_INVIS)
1198 iflags |= BMV_IF_NO_DMAPI_READ;
1200 error = xfs_getbmap(bdp, &bm, (struct getbmap __user *)arg+1, iflags);
1204 if (copy_to_user(arg, &bm, sizeof(bm)))
1205 return -XFS_ERROR(EFAULT);
1214 struct getbmapx bmx;
1219 if (copy_from_user(&bmx, arg, sizeof(bmx)))
1220 return -XFS_ERROR(EFAULT);
1222 if (bmx.bmv_count < 2)
1223 return -XFS_ERROR(EINVAL);
1226 * Map input getbmapx structure to a getbmap
1227 * structure for xfs_getbmap.
1229 GETBMAP_CONVERT(bmx, bm);
1231 iflags = bmx.bmv_iflags;
1233 if (iflags & (~BMV_IF_VALID))
1234 return -XFS_ERROR(EINVAL);
1236 iflags |= BMV_IF_EXTENDED;
1238 error = xfs_getbmap(bdp, &bm, (struct getbmapx __user *)arg+1, iflags);
1242 GETBMAP_CONVERT(bm, bmx);
1244 if (copy_to_user(arg, &bmx, sizeof(bmx)))
1245 return -XFS_ERROR(EFAULT);