#include "xfs_trans.h"
#include "xfs_sb.h"
#include "xfs_ag.h"
+#include "xfs_dir.h"
#include "xfs_dir2.h"
#include "xfs_alloc.h"
#include "xfs_dmapi.h"
#include "xfs_bmap_btree.h"
#include "xfs_alloc_btree.h"
#include "xfs_ialloc_btree.h"
+#include "xfs_dir_sf.h"
#include "xfs_dir2_sf.h"
#include "xfs_attr_sf.h"
#include "xfs_dinode.h"
(void *)((unsigned long)ioflags),
(void *)((unsigned long)((io->io_new_size >> 32) & 0xffffffff)),
(void *)((unsigned long)(io->io_new_size & 0xffffffff)),
- (void *)((unsigned long)current_pid()),
+ (void *)NULL,
(void *)NULL,
(void *)NULL,
(void *)NULL,
(void *)((unsigned long)(first & 0xffffffff)),
(void *)((unsigned long)((last >> 32) & 0xffffffff)),
(void *)((unsigned long)(last & 0xffffffff)),
- (void *)((unsigned long)current_pid()),
+ (void *)NULL,
(void *)NULL,
(void *)NULL,
(void *)NULL,
xfs_fsize_t n;
xfs_inode_t *ip;
xfs_mount_t *mp;
- bhv_vnode_t *vp;
+ vnode_t *vp;
unsigned long seg;
ip = XFS_BHVTOI(bdp);
if (n < size)
size = n;
- if (XFS_FORCED_SHUTDOWN(mp))
+ if (XFS_FORCED_SHUTDOWN(mp)) {
return -EIO;
+ }
if (unlikely(ioflags & IO_ISDIRECT))
mutex_lock(&inode->i_mutex);
if (DM_EVENT_ENABLED(vp->v_vfsp, ip, DM_EVENT_READ) &&
!(ioflags & IO_INVIS)) {
- bhv_vrwlock_t locktype = VRWLOCK_READ;
+ vrwlock_t locktype = VRWLOCK_READ;
int dmflags = FILP_DELAY_FLAG(file) | DM_SEM_FLAG_RD(ioflags);
ret = -XFS_SEND_DATA(mp, DM_EVENT_READ,
dmflags, &locktype);
if (ret) {
xfs_iunlock(ip, XFS_IOLOCK_SHARED);
- if (unlikely(ioflags & IO_ISDIRECT))
- mutex_unlock(&inode->i_mutex);
- return ret;
+ goto unlock_isem;
}
}
- if (unlikely((ioflags & IO_ISDIRECT) && VN_CACHED(vp)))
- bhv_vop_flushinval_pages(vp, ctooff(offtoct(*offset)),
- -1, FI_REMAPF_LOCKED);
-
- if (unlikely(ioflags & IO_ISDIRECT))
- mutex_unlock(&inode->i_mutex);
-
xfs_rw_enter_trace(XFS_READ_ENTER, &ip->i_iocore,
(void *)iovp, segs, *offset, ioflags);
ret = __generic_file_aio_read(iocb, iovp, segs, offset);
XFS_STATS_ADD(xs_read_bytes, ret);
xfs_iunlock(ip, XFS_IOLOCK_SHARED);
+
+unlock_isem:
+ if (unlikely(ioflags & IO_ISDIRECT))
+ mutex_unlock(&inode->i_mutex);
return ret;
}
void *target,
cred_t *credp)
{
- xfs_inode_t *ip = XFS_BHVTOI(bdp);
- xfs_mount_t *mp = ip->i_mount;
ssize_t ret;
+ xfs_fsize_t n;
+ xfs_inode_t *ip;
+ xfs_mount_t *mp;
+ vnode_t *vp;
+
+ ip = XFS_BHVTOI(bdp);
+ vp = BHV_TO_VNODE(bdp);
+ mp = ip->i_mount;
XFS_STATS_INC(xs_read_calls);
- if (XFS_FORCED_SHUTDOWN(mp))
+
+ n = XFS_MAXIOFFSET(mp) - *offset;
+ if ((n <= 0) || (count == 0))
+ return 0;
+
+ if (n < count)
+ count = n;
+
+ if (XFS_FORCED_SHUTDOWN(ip->i_mount))
return -EIO;
xfs_ilock(ip, XFS_IOLOCK_SHARED);
- if (DM_EVENT_ENABLED(BHV_TO_VNODE(bdp)->v_vfsp, ip, DM_EVENT_READ) &&
+ if (DM_EVENT_ENABLED(vp->v_vfsp, ip, DM_EVENT_READ) &&
(!(ioflags & IO_INVIS))) {
- bhv_vrwlock_t locktype = VRWLOCK_READ;
+ vrwlock_t locktype = VRWLOCK_READ;
int error;
- error = XFS_SEND_DATA(mp, DM_EVENT_READ, BHV_TO_VNODE(bdp),
- *offset, count,
+ error = XFS_SEND_DATA(mp, DM_EVENT_READ, BHV_TO_VNODE(bdp), *offset, count,
FILP_DELAY_FLAG(filp), &locktype);
if (error) {
xfs_iunlock(ip, XFS_IOLOCK_SHARED);
xfs_rw_enter_trace(XFS_SENDFILE_ENTER, &ip->i_iocore,
(void *)(unsigned long)target, count, *offset, ioflags);
ret = generic_file_sendfile(filp, offset, count, actor, target);
- if (ret > 0)
- XFS_STATS_ADD(xs_read_bytes, ret);
xfs_iunlock(ip, XFS_IOLOCK_SHARED);
- return ret;
-}
-
-ssize_t
-xfs_splice_read(
- bhv_desc_t *bdp,
- struct file *infilp,
- loff_t *ppos,
- struct pipe_inode_info *pipe,
- size_t count,
- int flags,
- int ioflags,
- cred_t *credp)
-{
- xfs_inode_t *ip = XFS_BHVTOI(bdp);
- xfs_mount_t *mp = ip->i_mount;
- ssize_t ret;
- XFS_STATS_INC(xs_read_calls);
- if (XFS_FORCED_SHUTDOWN(ip->i_mount))
- return -EIO;
-
- xfs_ilock(ip, XFS_IOLOCK_SHARED);
-
- if (DM_EVENT_ENABLED(BHV_TO_VNODE(bdp)->v_vfsp, ip, DM_EVENT_READ) &&
- (!(ioflags & IO_INVIS))) {
- bhv_vrwlock_t locktype = VRWLOCK_READ;
- int error;
-
- error = XFS_SEND_DATA(mp, DM_EVENT_READ, BHV_TO_VNODE(bdp),
- *ppos, count,
- FILP_DELAY_FLAG(infilp), &locktype);
- if (error) {
- xfs_iunlock(ip, XFS_IOLOCK_SHARED);
- return -error;
- }
- }
- xfs_rw_enter_trace(XFS_SPLICE_READ_ENTER, &ip->i_iocore,
- pipe, count, *ppos, ioflags);
- ret = generic_file_splice_read(infilp, ppos, pipe, count, flags);
if (ret > 0)
XFS_STATS_ADD(xs_read_bytes, ret);
- xfs_iunlock(ip, XFS_IOLOCK_SHARED);
- return ret;
-}
-
-ssize_t
-xfs_splice_write(
- bhv_desc_t *bdp,
- struct pipe_inode_info *pipe,
- struct file *outfilp,
- loff_t *ppos,
- size_t count,
- int flags,
- int ioflags,
- cred_t *credp)
-{
- xfs_inode_t *ip = XFS_BHVTOI(bdp);
- xfs_mount_t *mp = ip->i_mount;
- ssize_t ret;
- struct inode *inode = outfilp->f_mapping->host;
- xfs_fsize_t isize;
-
- XFS_STATS_INC(xs_write_calls);
- if (XFS_FORCED_SHUTDOWN(ip->i_mount))
- return -EIO;
-
- xfs_ilock(ip, XFS_IOLOCK_EXCL);
-
- if (DM_EVENT_ENABLED(BHV_TO_VNODE(bdp)->v_vfsp, ip, DM_EVENT_WRITE) &&
- (!(ioflags & IO_INVIS))) {
- bhv_vrwlock_t locktype = VRWLOCK_WRITE;
- int error;
-
- error = XFS_SEND_DATA(mp, DM_EVENT_WRITE, BHV_TO_VNODE(bdp),
- *ppos, count,
- FILP_DELAY_FLAG(outfilp), &locktype);
- if (error) {
- xfs_iunlock(ip, XFS_IOLOCK_EXCL);
- return -error;
- }
- }
- xfs_rw_enter_trace(XFS_SPLICE_WRITE_ENTER, &ip->i_iocore,
- pipe, count, *ppos, ioflags);
- ret = generic_file_splice_write(pipe, outfilp, ppos, count, flags);
- if (ret > 0)
- XFS_STATS_ADD(xs_write_bytes, ret);
-
- isize = i_size_read(inode);
- if (unlikely(ret < 0 && ret != -EFAULT && *ppos > isize))
- *ppos = isize;
-
- if (*ppos > ip->i_d.di_size) {
- xfs_ilock(ip, XFS_ILOCK_EXCL);
- if (*ppos > ip->i_d.di_size) {
- ip->i_d.di_size = *ppos;
- i_size_write(inode, *ppos);
- ip->i_update_core = 1;
- ip->i_update_size = 1;
- }
- xfs_iunlock(ip, XFS_ILOCK_EXCL);
- }
- xfs_iunlock(ip, XFS_IOLOCK_EXCL);
return ret;
}
xfs_fsize_t end_size)
{
xfs_fileoff_t last_fsb;
- xfs_mount_t *mp = io->io_mount;
+ xfs_mount_t *mp;
int nimaps;
int zero_offset;
int zero_len;
ASSERT(ismrlocked(io->io_lock, MR_UPDATE) != 0);
+ mp = io->io_mount;
+
zero_offset = XFS_B_FSB_OFFSET(mp, isize);
if (zero_offset == 0) {
/*
last_fsb = XFS_B_TO_FSBT(mp, isize);
nimaps = 1;
error = XFS_BMAPI(mp, NULL, io, last_fsb, 1, 0, NULL, 0, &imap,
- &nimaps, NULL, NULL);
+ &nimaps, NULL);
if (error) {
return error;
}
* don't deadlock when the buffer cache calls back to us.
*/
XFS_IUNLOCK(mp, io, XFS_ILOCK_EXCL| XFS_EXTSIZE_RD);
-
loff = XFS_FSB_TO_B(mp, last_fsb);
+
zero_len = mp->m_sb.sb_blocksize - zero_offset;
+
error = xfs_iozero(ip, loff + zero_offset, zero_len, end_size);
XFS_ILOCK(mp, io, XFS_ILOCK_EXCL|XFS_EXTSIZE_RD);
int /* error (positive) */
xfs_zero_eof(
- bhv_vnode_t *vp,
+ vnode_t *vp,
xfs_iocore_t *io,
xfs_off_t offset, /* starting I/O offset */
xfs_fsize_t isize, /* current inode size */
xfs_fsize_t end_size) /* terminal inode size */
{
- struct inode *ip = vn_to_inode(vp);
+ struct inode *ip = LINVFS_GET_IP(vp);
xfs_fileoff_t start_zero_fsb;
xfs_fileoff_t end_zero_fsb;
xfs_fileoff_t zero_count_fsb;
xfs_fileoff_t last_fsb;
- xfs_mount_t *mp = io->io_mount;
+ xfs_extlen_t buf_len_fsb;
+ xfs_mount_t *mp;
int nimaps;
int error = 0;
xfs_bmbt_irec_t imap;
ASSERT(ismrlocked(io->io_iolock, MR_UPDATE));
ASSERT(offset > isize);
+ mp = io->io_mount;
+
/*
* First handle zeroing the block on which isize resides.
* We only zero a part of that block so it is handled specially.
nimaps = 1;
zero_count_fsb = end_zero_fsb - start_zero_fsb + 1;
error = XFS_BMAPI(mp, NULL, io, start_zero_fsb, zero_count_fsb,
- 0, NULL, 0, &imap, &nimaps, NULL, NULL);
+ 0, NULL, 0, &imap, &nimaps, NULL);
if (error) {
ASSERT(ismrlocked(io->io_lock, MR_UPDATE));
ASSERT(ismrlocked(io->io_iolock, MR_UPDATE));
}
/*
- * There are blocks we need to zero.
+ * There are blocks in the range requested.
+ * Zero them a single write at a time. We actually
+ * don't zero the entire range returned if it is
+ * too big and simply loop around to get the rest.
+ * That is not the most efficient thing to do, but it
+ * is simple and this path should not be exercised often.
+ */
+ buf_len_fsb = XFS_FILBLKS_MIN(imap.br_blockcount,
+ mp->m_writeio_blocks << 8);
+ /*
* Drop the inode lock while we're doing the I/O.
* We'll still have the iolock to protect us.
*/
error = xfs_iozero(ip,
XFS_FSB_TO_B(mp, start_zero_fsb),
- XFS_FSB_TO_B(mp, imap.br_blockcount),
+ XFS_FSB_TO_B(mp, buf_len_fsb),
end_size);
+
if (error) {
goto out_lock;
}
- start_zero_fsb = imap.br_startoff + imap.br_blockcount;
+ start_zero_fsb = imap.br_startoff + buf_len_fsb;
ASSERT(start_zero_fsb <= (end_zero_fsb + 1));
XFS_ILOCK(mp, io, XFS_ILOCK_EXCL|XFS_EXTSIZE_RD);
ssize_t ret = 0, error = 0;
xfs_fsize_t isize, new_size;
xfs_iocore_t *io;
- bhv_vnode_t *vp;
+ vnode_t *vp;
unsigned long seg;
int iolock;
int eventsent = 0;
- bhv_vrwlock_t locktype;
+ vrwlock_t locktype;
size_t ocount = 0, count;
loff_t pos;
- int need_i_mutex = 1, need_flush = 0;
+ int need_isem = 1, need_flush = 0;
XFS_STATS_INC(xs_write_calls);
io = &xip->i_iocore;
mp = io->io_mount;
- vfs_wait_for_freeze(vp->v_vfsp, SB_FREEZE_WRITE);
-
if (XFS_FORCED_SHUTDOWN(mp))
return -EIO;
+ fs_check_frozen(vp->v_vfsp, SB_FREEZE_WRITE);
+
if (ioflags & IO_ISDIRECT) {
xfs_buftarg_t *target =
(xip->i_d.di_flags & XFS_DIFLAG_REALTIME) ?
return XFS_ERROR(-EINVAL);
if (!VN_CACHED(vp) && pos < i_size_read(inode))
- need_i_mutex = 0;
+ need_isem = 0;
if (VN_CACHED(vp))
need_flush = 1;
}
relock:
- if (need_i_mutex) {
+ if (need_isem) {
iolock = XFS_IOLOCK_EXCL;
locktype = VRWLOCK_WRITE;
S_ISBLK(inode->i_mode));
if (error) {
xfs_iunlock(xip, XFS_ILOCK_EXCL|iolock);
- goto out_unlock_mutex;
+ goto out_unlock_isem;
}
new_size = pos + count;
loff_t savedsize = pos;
int dmflags = FILP_DELAY_FLAG(file);
- if (need_i_mutex)
+ if (need_isem)
dmflags |= DM_FLAGS_IMUX;
xfs_iunlock(xip, XFS_ILOCK_EXCL);
dmflags, &locktype);
if (error) {
xfs_iunlock(xip, iolock);
- goto out_unlock_mutex;
+ goto out_unlock_isem;
}
xfs_ilock(xip, XFS_ILOCK_EXCL);
eventsent = 1;
/*
- * The iolock was dropped and reacquired in XFS_SEND_DATA
+ * The iolock was dropped and reaquired in XFS_SEND_DATA
* so we have to recheck the size when appending.
* We will only "goto start;" once, since having sent the
* event prevents another call to XFS_SEND_DATA, which is
isize, pos + count);
if (error) {
xfs_iunlock(xip, XFS_ILOCK_EXCL|iolock);
- goto out_unlock_mutex;
+ goto out_unlock_isem;
}
}
xfs_iunlock(xip, XFS_ILOCK_EXCL);
error = -remove_suid(file->f_dentry);
if (unlikely(error)) {
xfs_iunlock(xip, iolock);
- goto out_unlock_mutex;
+ goto out_unlock_isem;
}
}
if (need_flush) {
xfs_inval_cached_trace(io, pos, -1,
ctooff(offtoct(pos)), -1);
- bhv_vop_flushinval_pages(vp, ctooff(offtoct(pos)),
+ VOP_FLUSHINVAL_PAGES(vp, ctooff(offtoct(pos)),
-1, FI_REMAPF_LOCKED);
}
- if (need_i_mutex) {
+ if (need_isem) {
/* demote the lock now the cached pages are gone */
XFS_ILOCK_DEMOTE(mp, io, XFS_IOLOCK_EXCL);
mutex_unlock(&inode->i_mutex);
iolock = XFS_IOLOCK_SHARED;
locktype = VRWLOCK_WRITE_DIRECT;
- need_i_mutex = 0;
+ need_isem = 0;
}
xfs_rw_enter_trace(XFS_DIOWR_ENTER, io, (void *)iovp, segs,
pos += ret;
count -= ret;
- need_i_mutex = 1;
+ need_isem = 1;
ioflags &= ~IO_ISDIRECT;
xfs_iunlock(xip, iolock);
goto relock;
!(ioflags & IO_INVIS)) {
xfs_rwunlock(bdp, locktype);
- if (need_i_mutex)
+ if (need_isem)
mutex_unlock(&inode->i_mutex);
error = XFS_SEND_NAMESP(xip->i_mount, DM_EVENT_NOSPACE, vp,
DM_RIGHT_NULL, vp, DM_RIGHT_NULL, NULL, NULL,
0, 0, 0); /* Delay flag intentionally unused */
if (error)
goto out_nounlocks;
- if (need_i_mutex)
+ if (need_isem)
mutex_lock(&inode->i_mutex);
xfs_rwlock(bdp, locktype);
pos = xip->i_d.di_size;
/* Handle various SYNC-type writes */
if ((file->f_flags & O_SYNC) || IS_SYNC(inode)) {
- error = xfs_write_sync_logforce(mp, xip);
- if (error)
- goto out_unlock_internal;
+ /*
+ * If we're treating this as O_DSYNC and we have not updated the
+ * size, force the log.
+ */
+ if (!(mp->m_flags & XFS_MOUNT_OSYNCISOSYNC) &&
+ !(xip->i_update_size)) {
+ xfs_inode_log_item_t *iip = xip->i_itemp;
+
+ /*
+ * If an allocation transaction occurred
+ * without extending the size, then we have to force
+ * the log up the proper point to ensure that the
+ * allocation is permanent. We can't count on
+ * the fact that buffered writes lock out direct I/O
+ * writes - the direct I/O write could have extended
+ * the size nontransactionally, then finished before
+ * we started. xfs_write_file will think that the file
+ * didn't grow but the update isn't safe unless the
+ * size change is logged.
+ *
+ * Force the log if we've committed a transaction
+ * against the inode or if someone else has and
+ * the commit record hasn't gone to disk (e.g.
+ * the inode is pinned). This guarantees that
+ * all changes affecting the inode are permanent
+ * when we return.
+ */
+ if (iip && iip->ili_last_lsn) {
+ xfs_log_force(mp, iip->ili_last_lsn,
+ XFS_LOG_FORCE | XFS_LOG_SYNC);
+ } else if (xfs_ipincount(xip) > 0) {
+ xfs_log_force(mp, (xfs_lsn_t)0,
+ XFS_LOG_FORCE | XFS_LOG_SYNC);
+ }
+ } else {
+ xfs_trans_t *tp;
+
+ /*
+ * O_SYNC or O_DSYNC _with_ a size update are handled
+ * the same way.
+ *
+ * If the write was synchronous then we need to make
+ * sure that the inode modification time is permanent.
+ * We'll have updated the timestamp above, so here
+ * we use a synchronous transaction to log the inode.
+ * It's not fast, but it's necessary.
+ *
+ * If this a dsync write and the size got changed
+ * non-transactionally, then we need to ensure that
+ * the size change gets logged in a synchronous
+ * transaction.
+ */
+
+ tp = xfs_trans_alloc(mp, XFS_TRANS_WRITE_SYNC);
+ if ((error = xfs_trans_reserve(tp, 0,
+ XFS_SWRITE_LOG_RES(mp),
+ 0, 0, 0))) {
+ /* Transaction reserve failed */
+ xfs_trans_cancel(tp, 0);
+ } else {
+ /* Transaction reserve successful */
+ xfs_ilock(xip, XFS_ILOCK_EXCL);
+ xfs_trans_ijoin(tp, xip, XFS_ILOCK_EXCL);
+ xfs_trans_ihold(tp, xip);
+ xfs_trans_log_inode(tp, xip, XFS_ILOG_CORE);
+ xfs_trans_set_sync(tp);
+ error = xfs_trans_commit(tp, 0, NULL);
+ xfs_iunlock(xip, XFS_ILOCK_EXCL);
+ }
+ if (error)
+ goto out_unlock_internal;
+ }
+
xfs_rwunlock(bdp, locktype);
- if (need_i_mutex)
+ if (need_isem)
mutex_unlock(&inode->i_mutex);
error = sync_page_range(inode, mapping, pos, ret);
out_unlock_internal:
xfs_rwunlock(bdp, locktype);
- out_unlock_mutex:
- if (need_i_mutex)
+ out_unlock_isem:
+ if (need_isem)
mutex_unlock(&inode->i_mutex);
out_nounlocks:
return -error;