#include "xfs_inum.h"
#include "xfs_sb.h"
#include "xfs_ag.h"
+#include "xfs_dir.h"
#include "xfs_dir2.h"
#include "xfs_trans.h"
#include "xfs_dmapi.h"
#include "xfs_bmap_btree.h"
#include "xfs_alloc_btree.h"
#include "xfs_ialloc_btree.h"
+#include "xfs_dir_sf.h"
#include "xfs_dir2_sf.h"
#include "xfs_attr_sf.h"
#include "xfs_dinode.h"
int mask)
{
xfs_inode_t *ip;
- bhv_vnode_t *vp = vn_from_inode(inode);
+ vnode_t *vp = vn_from_inode(inode);
loff_t isize = i_size_read(inode);
loff_t offset = page_offset(page);
int delalloc = -1, unmapped = -1, unwritten = -1;
for (bh = ioend->io_buffer_head; bh; bh = next) {
next = bh->b_private;
- bh->b_end_io(bh, !ioend->io_error);
+ bh->b_end_io(bh, ioend->io_uptodate);
}
- if (unlikely(ioend->io_error))
- vn_ioerror(ioend->io_vnode, ioend->io_error, __FILE__,__LINE__);
+
vn_iowake(ioend->io_vnode);
mempool_free(ioend, xfs_ioend_pool);
}
void *data)
{
xfs_ioend_t *ioend = data;
- bhv_vnode_t *vp = ioend->io_vnode;
+ vnode_t *vp = ioend->io_vnode;
xfs_off_t offset = ioend->io_offset;
size_t size = ioend->io_size;
+ int error;
- if (likely(!ioend->io_error))
- bhv_vop_bmap(vp, offset, size, BMAPI_UNWRITTEN, NULL, NULL);
+ if (ioend->io_uptodate)
+ VOP_BMAP(vp, offset, size, BMAPI_UNWRITTEN, NULL, NULL, error);
xfs_destroy_ioend(ioend);
}
* all the I/O from calling the completion routine too early.
*/
atomic_set(&ioend->io_remaining, 1);
- ioend->io_error = 0;
+ ioend->io_uptodate = 1; /* cleared if any I/O fails */
ioend->io_list = NULL;
ioend->io_type = type;
ioend->io_vnode = vn_from_inode(inode);
xfs_iomap_t *mapp,
int flags)
{
- bhv_vnode_t *vp = vn_from_inode(inode);
+ vnode_t *vp = vn_from_inode(inode);
int error, nmaps = 1;
- error = bhv_vop_bmap(vp, offset, count, flags, mapp, &nmaps);
+ VOP_BMAP(vp, offset, count, flags, mapp, &nmaps, error);
if (!error && (flags & (BMAPI_WRITE|BMAPI_ALLOCATE)))
VMODIFY(vp);
return -error;
if (bio->bi_size)
return 1;
+ ASSERT(ioend);
ASSERT(atomic_read(&bio->bi_cnt) >= 1);
- ioend->io_error = test_bit(BIO_UPTODATE, &bio->bi_flags) ? 0 : error;
/* Toss bio and pass work off to an xfsdatad thread */
+ if (!test_bit(BIO_UPTODATE, &bio->bi_flags))
+ ioend->io_uptodate = 0;
bio->bi_private = NULL;
bio->bi_end_io = NULL;
- bio_put(bio);
+ bio_put(bio);
xfs_finish_ioend(ioend);
return 0;
}
* then mark the page dirty again and leave the page
* as is.
*/
- if (current_test_flags(PF_FSTRANS) && need_trans)
+ if (PFLAGS_TEST_FSTRANS() && need_trans)
goto out_fail;
/*
return error;
}
-STATIC int
-xfs_vm_writepages(
- struct address_space *mapping,
- struct writeback_control *wbc)
-{
- struct bhv_vnode *vp = vn_from_inode(mapping->host);
-
- if (VN_TRUNC(vp))
- VUNTRUNCATE(vp);
- return generic_writepages(mapping, wbc);
-}
-
/*
* Called to move a page into cleanable state - and from there
* to be released. Possibly the page is already clean. We always
/* If we are already inside a transaction or the thread cannot
* do I/O, we cannot release this page.
*/
- if (current_test_flags(PF_FSTRANS))
+ if (PFLAGS_TEST_FSTRANS())
return 0;
/*
int direct,
bmapi_flags_t flags)
{
- bhv_vnode_t *vp = vn_from_inode(inode);
+ vnode_t *vp = vn_from_inode(inode);
xfs_iomap_t iomap;
xfs_off_t offset;
ssize_t size;
offset = (xfs_off_t)iblock << inode->i_blkbits;
ASSERT(bh_result->b_size >= (1 << inode->i_blkbits));
size = bh_result->b_size;
- error = bhv_vop_bmap(vp, offset, size,
- create ? flags : BMAPI_READ, &iomap, &niomap);
+ VOP_BMAP(vp, offset, size,
+ create ? flags : BMAPI_READ, &iomap, &niomap, error);
if (error)
return -error;
if (niomap == 0)
{
struct file *file = iocb->ki_filp;
struct inode *inode = file->f_mapping->host;
- bhv_vnode_t *vp = vn_from_inode(inode);
+ vnode_t *vp = vn_from_inode(inode);
xfs_iomap_t iomap;
int maps = 1;
int error;
ssize_t ret;
- error = bhv_vop_bmap(vp, offset, 0, BMAPI_DEVICE, &iomap, &maps);
+ VOP_BMAP(vp, offset, 0, BMAPI_DEVICE, &iomap, &maps, error);
if (error)
return -error;
iocb->private = xfs_alloc_ioend(inode, IOMAP_UNWRITTEN);
- if (rw == WRITE) {
- ret = blockdev_direct_IO_own_locking(rw, iocb, inode,
- iomap.iomap_target->bt_bdev,
- iov, offset, nr_segs,
- xfs_get_blocks_direct,
- xfs_end_io_direct);
- } else {
- ret = blockdev_direct_IO_no_locking(rw, iocb, inode,
- iomap.iomap_target->bt_bdev,
- iov, offset, nr_segs,
- xfs_get_blocks_direct,
- xfs_end_io_direct);
- }
+ ret = blockdev_direct_IO_own_locking(rw, iocb, inode,
+ iomap.iomap_target->bt_bdev,
+ iov, offset, nr_segs,
+ xfs_get_blocks_direct,
+ xfs_end_io_direct);
if (unlikely(ret <= 0 && iocb->private))
xfs_destroy_ioend(iocb->private);
sector_t block)
{
struct inode *inode = (struct inode *)mapping->host;
- bhv_vnode_t *vp = vn_from_inode(inode);
+ vnode_t *vp = vn_from_inode(inode);
+ int error;
vn_trace_entry(vp, __FUNCTION__, (inst_t *)__return_address);
- bhv_vop_rwlock(vp, VRWLOCK_READ);
- bhv_vop_flush_pages(vp, (xfs_off_t)0, -1, 0, FI_REMAPF);
- bhv_vop_rwunlock(vp, VRWLOCK_READ);
+
+ VOP_RWLOCK(vp, VRWLOCK_READ);
+ VOP_FLUSH_PAGES(vp, (xfs_off_t)0, -1, 0, FI_REMAPF, error);
+ VOP_RWUNLOCK(vp, VRWLOCK_READ);
return generic_block_bmap(mapping, block, xfs_get_blocks);
}
block_invalidatepage(page, offset);
}
-const struct address_space_operations xfs_address_space_operations = {
+struct address_space_operations xfs_address_space_operations = {
.readpage = xfs_vm_readpage,
.readpages = xfs_vm_readpages,
.writepage = xfs_vm_writepage,
- .writepages = xfs_vm_writepages,
.sync_page = block_sync_page,
.releasepage = xfs_vm_releasepage,
.invalidatepage = xfs_vm_invalidatepage,