X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=fs%2Fxfs%2Flinux-2.6%2Fxfs_aops.c;h=7b54461695e2de979924c14cfe79a1c56d2587ef;hb=97bf2856c6014879bd04983a3e9dfcdac1e7fe85;hp=a98073629b48723fbd862a99dba6563d901bb7c2;hpb=76828883507a47dae78837ab5dec5a5b4513c667;p=linux-2.6.git diff --git a/fs/xfs/linux-2.6/xfs_aops.c b/fs/xfs/linux-2.6/xfs_aops.c index a98073629..7b5446169 100644 --- a/fs/xfs/linux-2.6/xfs_aops.c +++ b/fs/xfs/linux-2.6/xfs_aops.c @@ -21,7 +21,6 @@ #include "xfs_inum.h" #include "xfs_sb.h" #include "xfs_ag.h" -#include "xfs_dir.h" #include "xfs_dir2.h" #include "xfs_trans.h" #include "xfs_dmapi.h" @@ -29,7 +28,6 @@ #include "xfs_bmap_btree.h" #include "xfs_alloc_btree.h" #include "xfs_ialloc_btree.h" -#include "xfs_dir_sf.h" #include "xfs_dir2_sf.h" #include "xfs_attr_sf.h" #include "xfs_dinode.h" @@ -43,7 +41,29 @@ #include #include -STATIC void xfs_count_page_state(struct page *, int *, int *, int *); +STATIC void +xfs_count_page_state( + struct page *page, + int *delalloc, + int *unmapped, + int *unwritten) +{ + struct buffer_head *bh, *head; + + *delalloc = *unmapped = *unwritten = 0; + + bh = head = page_buffers(page); + do { + if (buffer_uptodate(bh) && !buffer_mapped(bh)) + (*unmapped) = 1; + else if (buffer_unwritten(bh) && !buffer_delay(bh)) + clear_buffer_unwritten(bh); + else if (buffer_unwritten(bh)) + (*unwritten) = 1; + else if (buffer_delay(bh)) + (*delalloc) = 1; + } while ((bh = bh->b_this_page) != head); +} #if defined(XFS_RW_TRACE) void @@ -51,10 +71,10 @@ xfs_page_trace( int tag, struct inode *inode, struct page *page, - int mask) + unsigned long pgoff) { xfs_inode_t *ip; - vnode_t *vp = LINVFS_GET_VP(inode); + bhv_vnode_t *vp = vn_from_inode(inode); loff_t isize = i_size_read(inode); loff_t offset = page_offset(page); int delalloc = -1, unmapped = -1, unwritten = -1; @@ -71,7 +91,7 @@ xfs_page_trace( (void *)ip, (void *)inode, (void *)page, - (void *)((unsigned long)mask), + (void *)pgoff, (void *)((unsigned long)((ip->i_d.di_size >> 32) & 0xffffffff)), (void *)((unsigned long)(ip->i_d.di_size & 0xffffffff)), (void *)((unsigned long)((isize >> 32) & 0xffffffff)), @@ -81,11 +101,11 @@ xfs_page_trace( (void *)((unsigned long)delalloc), (void *)((unsigned long)unmapped), (void *)((unsigned long)unwritten), - (void *)NULL, + (void *)((unsigned long)current_pid()), (void *)NULL); } #else -#define xfs_page_trace(tag, inode, page, mask) +#define xfs_page_trace(tag, inode, page, pgoff) #endif /* @@ -114,9 +134,10 @@ xfs_destroy_ioend( for (bh = ioend->io_buffer_head; bh; bh = next) { next = bh->b_private; - bh->b_end_io(bh, ioend->io_uptodate); + bh->b_end_io(bh, !ioend->io_error); } - + if (unlikely(ioend->io_error)) + vn_ioerror(ioend->io_vnode, ioend->io_error, __FILE__,__LINE__); vn_iowake(ioend->io_vnode); mempool_free(ioend, xfs_ioend_pool); } @@ -128,9 +149,10 @@ xfs_destroy_ioend( */ STATIC void xfs_end_bio_delalloc( - void *data) + struct work_struct *work) { - xfs_ioend_t *ioend = data; + xfs_ioend_t *ioend = + container_of(work, xfs_ioend_t, io_work); xfs_destroy_ioend(ioend); } @@ -140,9 +162,10 @@ xfs_end_bio_delalloc( */ STATIC void xfs_end_bio_written( - void *data) + struct work_struct *work) { - xfs_ioend_t *ioend = data; + xfs_ioend_t *ioend = + container_of(work, xfs_ioend_t, io_work); xfs_destroy_ioend(ioend); } @@ -155,16 +178,16 @@ xfs_end_bio_written( */ STATIC void xfs_end_bio_unwritten( - void *data) + struct work_struct *work) { - xfs_ioend_t *ioend = data; - vnode_t *vp = ioend->io_vnode; + xfs_ioend_t *ioend = + container_of(work, xfs_ioend_t, io_work); + bhv_vnode_t *vp = ioend->io_vnode; xfs_off_t offset = ioend->io_offset; size_t size = ioend->io_size; - int error; - if (ioend->io_uptodate) - VOP_BMAP(vp, offset, size, BMAPI_UNWRITTEN, NULL, NULL, error); + if (likely(!ioend->io_error)) + bhv_vop_bmap(vp, offset, size, BMAPI_UNWRITTEN, NULL, NULL); xfs_destroy_ioend(ioend); } @@ -189,10 +212,10 @@ xfs_alloc_ioend( * all the I/O from calling the completion routine too early. */ atomic_set(&ioend->io_remaining, 1); - ioend->io_uptodate = 1; /* cleared if any I/O fails */ + ioend->io_error = 0; ioend->io_list = NULL; ioend->io_type = type; - ioend->io_vnode = LINVFS_GET_VP(inode); + ioend->io_vnode = vn_from_inode(inode); ioend->io_buffer_head = NULL; ioend->io_buffer_tail = NULL; atomic_inc(&ioend->io_vnode->v_iocount); @@ -200,11 +223,11 @@ xfs_alloc_ioend( ioend->io_size = 0; if (type == IOMAP_UNWRITTEN) - INIT_WORK(&ioend->io_work, xfs_end_bio_unwritten, ioend); + INIT_WORK(&ioend->io_work, xfs_end_bio_unwritten); else if (type == IOMAP_DELAY) - INIT_WORK(&ioend->io_work, xfs_end_bio_delalloc, ioend); + INIT_WORK(&ioend->io_work, xfs_end_bio_delalloc); else - INIT_WORK(&ioend->io_work, xfs_end_bio_written, ioend); + INIT_WORK(&ioend->io_work, xfs_end_bio_written); return ioend; } @@ -217,10 +240,10 @@ xfs_map_blocks( xfs_iomap_t *mapp, int flags) { - vnode_t *vp = LINVFS_GET_VP(inode); + bhv_vnode_t *vp = vn_from_inode(inode); int error, nmaps = 1; - VOP_BMAP(vp, offset, count, flags, mapp, &nmaps, error); + error = bhv_vop_bmap(vp, offset, count, flags, mapp, &nmaps); if (!error && (flags & (BMAPI_WRITE|BMAPI_ALLOCATE))) VMODIFY(vp); return -error; @@ -249,16 +272,14 @@ xfs_end_bio( if (bio->bi_size) return 1; - ASSERT(ioend); ASSERT(atomic_read(&bio->bi_cnt) >= 1); + ioend->io_error = test_bit(BIO_UPTODATE, &bio->bi_flags) ? 0 : error; /* Toss bio and pass work off to an xfsdatad thread */ - if (!test_bit(BIO_UPTODATE, &bio->bi_flags)) - ioend->io_uptodate = 0; bio->bi_private = NULL; bio->bi_end_io = NULL; - bio_put(bio); + xfs_finish_ioend(ioend); return 0; } @@ -320,9 +341,9 @@ xfs_start_page_writeback( { ASSERT(PageLocked(page)); ASSERT(!PageWriteback(page)); - set_page_writeback(page); if (clear_dirty) - clear_page_dirty(page); + clear_page_dirty_for_io(page); + set_page_writeback(page); unlock_page(page); if (!buffers) { end_page_writeback(page); @@ -350,7 +371,7 @@ static inline int bio_add_buffer(struct bio *bio, struct buffer_head *bh) * assumes that all buffers on the page are started at the same time. * * The fix is two passes across the ioend list - one to start writeback on the - * bufferheads, and then the second one submit them for I/O. + * buffer_heads, and then submit them for I/O on the second pass. */ STATIC void xfs_submit_ioend( @@ -461,6 +482,26 @@ xfs_add_to_ioend( ioend->io_size += bh->b_size; } +STATIC void +xfs_map_buffer( + struct buffer_head *bh, + xfs_iomap_t *mp, + xfs_off_t offset, + uint block_bits) +{ + sector_t bn; + + ASSERT(mp->iomap_bn != IOMAP_DADDR_NULL); + + bn = (mp->iomap_bn >> (block_bits - BBSHIFT)) + + ((offset - mp->iomap_offset) >> block_bits); + + ASSERT(bn || (mp->iomap_flags & IOMAP_REALTIME)); + + bh->b_blocknr = bn; + set_buffer_mapped(bh); +} + STATIC void xfs_map_at_offset( struct buffer_head *bh, @@ -468,22 +509,11 @@ xfs_map_at_offset( int block_bits, xfs_iomap_t *iomapp) { - xfs_daddr_t bn; - int sector_shift; - ASSERT(!(iomapp->iomap_flags & IOMAP_HOLE)); ASSERT(!(iomapp->iomap_flags & IOMAP_DELAY)); - ASSERT(iomapp->iomap_bn != IOMAP_DADDR_NULL); - - sector_shift = block_bits - BBSHIFT; - bn = (iomapp->iomap_bn >> sector_shift) + - ((offset - iomapp->iomap_offset) >> block_bits); - - ASSERT(bn || (iomapp->iomap_flags & IOMAP_REALTIME)); - ASSERT((bn << sector_shift) >= iomapp->iomap_bn); lock_buffer(bh); - bh->b_blocknr = bn; + xfs_map_buffer(bh, iomapp, offset, block_bits); bh->b_bdev = iomapp->iomap_target->bt_bdev; set_buffer_mapped(bh); clear_buffer_delay(bh); @@ -668,7 +698,7 @@ xfs_convert_page( /* * page_dirty is initially a count of buffers on the page before - * EOF and is decrememted as we move each into a cleanable state. + * EOF and is decremented as we move each into a cleanable state. * * Derivation: * @@ -811,7 +841,7 @@ xfs_cluster_write( * page if possible. * The bh->b_state's cannot know if any of the blocks or which block for * that matter are dirty due to mmap writes, and therefore bh uptodate is - * only vaild if the page itself isn't completely uptodate. Some layers + * only valid if the page itself isn't completely uptodate. Some layers * may clear the page dirty flag prior to calling write page, under the * assumption the entire page will be written out; by not writing out the * whole page the page can be reused before all valid dirty data is @@ -839,12 +869,14 @@ xfs_page_state_convert( pgoff_t end_index, last_index, tlast; ssize_t size, len; int flags, err, iomap_valid = 0, uptodate = 1; - int page_dirty, count = 0, trylock_flag = 0; + int page_dirty, count = 0; + int trylock = 0; int all_bh = unmapped; - /* wait for other IO threads? */ - if (startio && (wbc->sync_mode == WB_SYNC_NONE && wbc->nonblocking)) - trylock_flag |= BMAPI_TRYLOCK; + if (startio) { + if (wbc->sync_mode == WB_SYNC_NONE && wbc->nonblocking) + trylock |= BMAPI_TRYLOCK; + } /* Is this page beyond the end of the file? */ offset = i_size_read(inode); @@ -861,7 +893,7 @@ xfs_page_state_convert( /* * page_dirty is initially a count of buffers on the page before - * EOF and is decrememted as we move each into a cleanable state. + * EOF and is decremented as we move each into a cleanable state. * * Derivation: * @@ -925,15 +957,13 @@ xfs_page_state_convert( if (buffer_unwritten(bh)) { type = IOMAP_UNWRITTEN; - flags = BMAPI_WRITE|BMAPI_IGNSTATE; + flags = BMAPI_WRITE | BMAPI_IGNSTATE; } else if (buffer_delay(bh)) { type = IOMAP_DELAY; - flags = BMAPI_ALLOCATE; - if (!startio) - flags |= trylock_flag; + flags = BMAPI_ALLOCATE | trylock; } else { type = IOMAP_NEW; - flags = BMAPI_WRITE|BMAPI_MMAP; + flags = BMAPI_WRITE | BMAPI_MMAP; } if (!iomap_valid) { @@ -1040,54 +1070,203 @@ error: return err; } +/* + * writepage: Called from one of two places: + * + * 1. we are flushing a delalloc buffer head. + * + * 2. we are writing out a dirty page. Typically the page dirty + * state is cleared before we get here. In this case is it + * conceivable we have no buffer heads. + * + * For delalloc space on the page we need to allocate space and + * flush it. For unmapped buffer heads on the page we should + * allocate space if the page is uptodate. For any other dirty + * buffer heads on the page we should flush them. + * + * If we detect that a transaction would be required to flush + * the page, we have to check the process flags first, if we + * are already in a transaction or disk I/O during allocations + * is off, we need to fail the writepage and redirty the page. + */ + +STATIC int +xfs_vm_writepage( + struct page *page, + struct writeback_control *wbc) +{ + int error; + int need_trans; + int delalloc, unmapped, unwritten; + struct inode *inode = page->mapping->host; + + xfs_page_trace(XFS_WRITEPAGE_ENTER, inode, page, 0); + + /* + * We need a transaction if: + * 1. There are delalloc buffers on the page + * 2. The page is uptodate and we have unmapped buffers + * 3. The page is uptodate and we have no buffers + * 4. There are unwritten buffers on the page + */ + + if (!page_has_buffers(page)) { + unmapped = 1; + need_trans = 1; + } else { + xfs_count_page_state(page, &delalloc, &unmapped, &unwritten); + if (!PageUptodate(page)) + unmapped = 0; + need_trans = delalloc + unmapped + unwritten; + } + + /* + * If we need a transaction and the process flags say + * we are already in a transaction, or no IO is allowed + * then mark the page dirty again and leave the page + * as is. + */ + if (current_test_flags(PF_FSTRANS) && need_trans) + goto out_fail; + + /* + * Delay hooking up buffer heads until we have + * made our go/no-go decision. + */ + if (!page_has_buffers(page)) + create_empty_buffers(page, 1 << inode->i_blkbits, 0); + + /* + * Convert delayed allocate, unwritten or unmapped space + * to real space and flush out to disk. + */ + error = xfs_page_state_convert(inode, page, wbc, 1, unmapped); + if (error == -EAGAIN) + goto out_fail; + if (unlikely(error < 0)) + goto out_unlock; + + return 0; + +out_fail: + redirty_page_for_writepage(wbc, page); + unlock_page(page); + return 0; +out_unlock: + unlock_page(page); + return error; +} + +STATIC int +xfs_vm_writepages( + struct address_space *mapping, + struct writeback_control *wbc) +{ + struct bhv_vnode *vp = vn_from_inode(mapping->host); + + if (VN_TRUNC(vp)) + VUNTRUNCATE(vp); + return generic_writepages(mapping, wbc); +} + +/* + * Called to move a page into cleanable state - and from there + * to be released. Possibly the page is already clean. We always + * have buffer heads in this call. + * + * Returns 0 if the page is ok to release, 1 otherwise. + * + * Possible scenarios are: + * + * 1. We are being called to release a page which has been written + * to via regular I/O. buffer heads will be dirty and possibly + * delalloc. If no delalloc buffer heads in this case then we + * can just return zero. + * + * 2. We are called to release a page which has been written via + * mmap, all we need to do is ensure there is no delalloc + * state in the buffer heads, if not we can let the caller + * free them and we should come back later via writepage. + */ +STATIC int +xfs_vm_releasepage( + struct page *page, + gfp_t gfp_mask) +{ + struct inode *inode = page->mapping->host; + int dirty, delalloc, unmapped, unwritten; + struct writeback_control wbc = { + .sync_mode = WB_SYNC_ALL, + .nr_to_write = 1, + }; + + xfs_page_trace(XFS_RELEASEPAGE_ENTER, inode, page, 0); + + if (!page_has_buffers(page)) + return 0; + + xfs_count_page_state(page, &delalloc, &unmapped, &unwritten); + if (!delalloc && !unwritten) + goto free_buffers; + + if (!(gfp_mask & __GFP_FS)) + return 0; + + /* If we are already inside a transaction or the thread cannot + * do I/O, we cannot release this page. + */ + if (current_test_flags(PF_FSTRANS)) + return 0; + + /* + * Convert delalloc space to real space, do not flush the + * data out to disk, that will be done by the caller. + * Never need to allocate space here - we will always + * come back to writepage in that case. + */ + dirty = xfs_page_state_convert(inode, page, &wbc, 0, 0); + if (dirty == 0 && !unwritten) + goto free_buffers; + return 0; + +free_buffers: + return try_to_free_buffers(page); +} + STATIC int -__linvfs_get_block( +__xfs_get_blocks( struct inode *inode, sector_t iblock, - unsigned long blocks, struct buffer_head *bh_result, int create, int direct, bmapi_flags_t flags) { - vnode_t *vp = LINVFS_GET_VP(inode); + bhv_vnode_t *vp = vn_from_inode(inode); xfs_iomap_t iomap; xfs_off_t offset; ssize_t size; - int retpbbm = 1; + int niomap = 1; int error; offset = (xfs_off_t)iblock << inode->i_blkbits; - if (blocks) - size = (ssize_t) min_t(xfs_off_t, LONG_MAX, - (xfs_off_t)blocks << inode->i_blkbits); - else - size = 1 << inode->i_blkbits; - - VOP_BMAP(vp, offset, size, - create ? flags : BMAPI_READ, &iomap, &retpbbm, error); + ASSERT(bh_result->b_size >= (1 << inode->i_blkbits)); + size = bh_result->b_size; + error = bhv_vop_bmap(vp, offset, size, + create ? flags : BMAPI_READ, &iomap, &niomap); if (error) return -error; - - if (retpbbm == 0) + if (niomap == 0) return 0; if (iomap.iomap_bn != IOMAP_DADDR_NULL) { - xfs_daddr_t bn; - xfs_off_t delta; - - /* For unwritten extents do not report a disk address on + /* + * For unwritten extents do not report a disk address on * the read case (treat as if we're reading into a hole). */ if (create || !(iomap.iomap_flags & IOMAP_UNWRITTEN)) { - delta = offset - iomap.iomap_offset; - delta >>= inode->i_blkbits; - - bn = iomap.iomap_bn >> (inode->i_blkbits - BBSHIFT); - bn += delta; - BUG_ON(!bn && !(iomap.iomap_flags & IOMAP_REALTIME)); - bh_result->b_blocknr = bn; - set_buffer_mapped(bh_result); + xfs_map_buffer(bh_result, &iomap, offset, + inode->i_blkbits); } if (create && (iomap.iomap_flags & IOMAP_UNWRITTEN)) { if (direct) @@ -1097,12 +1276,16 @@ __linvfs_get_block( } } - /* If this is a realtime file, data might be on a new device */ + /* + * If this is a realtime file, data may be on a different device. + * to that pointed to from the buffer_head b_bdev currently. + */ bh_result->b_bdev = iomap.iomap_target->bt_bdev; - /* If we previously allocated a block out beyond eof and - * we are now coming back to use it then we will need to - * flag it as new even if it has a disk address. + /* + * If we previously allocated a block out beyond eof and we are + * now coming back to use it then we will need to flag it as new + * even if it has a disk address. */ if (create && ((!buffer_mapped(bh_result) && !buffer_uptodate(bh_result)) || @@ -1118,42 +1301,40 @@ __linvfs_get_block( } } - if (blocks) { + if (direct || size > (1 << inode->i_blkbits)) { ASSERT(iomap.iomap_bsize - iomap.iomap_delta > 0); offset = min_t(xfs_off_t, - iomap.iomap_bsize - iomap.iomap_delta, - (xfs_off_t)blocks << inode->i_blkbits); - bh_result->b_size = (u32) min_t(xfs_off_t, UINT_MAX, offset); + iomap.iomap_bsize - iomap.iomap_delta, size); + bh_result->b_size = (ssize_t)min_t(xfs_off_t, LONG_MAX, offset); } return 0; } int -linvfs_get_block( +xfs_get_blocks( struct inode *inode, sector_t iblock, struct buffer_head *bh_result, int create) { - return __linvfs_get_block(inode, iblock, 0, bh_result, - create, 0, BMAPI_WRITE); + return __xfs_get_blocks(inode, iblock, + bh_result, create, 0, BMAPI_WRITE); } STATIC int -linvfs_get_blocks_direct( +xfs_get_blocks_direct( struct inode *inode, sector_t iblock, - unsigned long max_blocks, struct buffer_head *bh_result, int create) { - return __linvfs_get_block(inode, iblock, max_blocks, bh_result, - create, 1, BMAPI_WRITE|BMAPI_DIRECT); + return __xfs_get_blocks(inode, iblock, + bh_result, create, 1, BMAPI_WRITE|BMAPI_DIRECT); } STATIC void -linvfs_end_io_direct( +xfs_end_io_direct( struct kiocb *iocb, loff_t offset, ssize_t size, @@ -1164,9 +1345,9 @@ linvfs_end_io_direct( /* * Non-NULL private data means we need to issue a transaction to * convert a range from unwritten to written extents. This needs - * to happen from process contect but aio+dio I/O completion + * to happen from process context but aio+dio I/O completion * happens from irq context so we need to defer it to a workqueue. - * This is not nessecary for synchronous direct I/O, but we do + * This is not necessary for synchronous direct I/O, but we do * it anyway to keep the code uniform and simpler. * * The core direct I/O code might be changed to always call the @@ -1178,12 +1359,11 @@ linvfs_end_io_direct( ioend->io_size = size; xfs_finish_ioend(ioend); } else { - ASSERT(size >= 0); xfs_destroy_ioend(ioend); } /* - * blockdev_direct_IO can return an error even afer the I/O + * blockdev_direct_IO can return an error even after the I/O * completion handler was called. Thus we need to protect * against double-freeing. */ @@ -1191,7 +1371,7 @@ linvfs_end_io_direct( } STATIC ssize_t -linvfs_direct_IO( +xfs_vm_direct_IO( int rw, struct kiocb *iocb, const struct iovec *iov, @@ -1200,268 +1380,101 @@ linvfs_direct_IO( { struct file *file = iocb->ki_filp; struct inode *inode = file->f_mapping->host; - vnode_t *vp = LINVFS_GET_VP(inode); + bhv_vnode_t *vp = vn_from_inode(inode); xfs_iomap_t iomap; int maps = 1; int error; ssize_t ret; - VOP_BMAP(vp, offset, 0, BMAPI_DEVICE, &iomap, &maps, error); + error = bhv_vop_bmap(vp, offset, 0, BMAPI_DEVICE, &iomap, &maps); if (error) return -error; iocb->private = xfs_alloc_ioend(inode, IOMAP_UNWRITTEN); - ret = blockdev_direct_IO_own_locking(rw, iocb, inode, - iomap.iomap_target->bt_bdev, - iov, offset, nr_segs, - linvfs_get_blocks_direct, - linvfs_end_io_direct); + if (rw == WRITE) { + ret = blockdev_direct_IO_own_locking(rw, iocb, inode, + iomap.iomap_target->bt_bdev, + iov, offset, nr_segs, + xfs_get_blocks_direct, + xfs_end_io_direct); + } else { + ret = blockdev_direct_IO_no_locking(rw, iocb, inode, + iomap.iomap_target->bt_bdev, + iov, offset, nr_segs, + xfs_get_blocks_direct, + xfs_end_io_direct); + } - if (unlikely(ret <= 0 && iocb->private)) + if (unlikely(ret != -EIOCBQUEUED && iocb->private)) xfs_destroy_ioend(iocb->private); return ret; } +STATIC int +xfs_vm_prepare_write( + struct file *file, + struct page *page, + unsigned int from, + unsigned int to) +{ + return block_prepare_write(page, from, to, xfs_get_blocks); +} STATIC sector_t -linvfs_bmap( +xfs_vm_bmap( struct address_space *mapping, sector_t block) { struct inode *inode = (struct inode *)mapping->host; - vnode_t *vp = LINVFS_GET_VP(inode); - int error; + bhv_vnode_t *vp = vn_from_inode(inode); - vn_trace_entry(vp, "linvfs_bmap", (inst_t *)__return_address); - - VOP_RWLOCK(vp, VRWLOCK_READ); - VOP_FLUSH_PAGES(vp, (xfs_off_t)0, -1, 0, FI_REMAPF, error); - VOP_RWUNLOCK(vp, VRWLOCK_READ); - return generic_block_bmap(mapping, block, linvfs_get_block); + vn_trace_entry(vp, __FUNCTION__, (inst_t *)__return_address); + bhv_vop_rwlock(vp, VRWLOCK_READ); + bhv_vop_flush_pages(vp, (xfs_off_t)0, -1, 0, FI_REMAPF); + bhv_vop_rwunlock(vp, VRWLOCK_READ); + return generic_block_bmap(mapping, block, xfs_get_blocks); } STATIC int -linvfs_readpage( +xfs_vm_readpage( struct file *unused, struct page *page) { - return mpage_readpage(page, linvfs_get_block); + return mpage_readpage(page, xfs_get_blocks); } STATIC int -linvfs_readpages( +xfs_vm_readpages( struct file *unused, struct address_space *mapping, struct list_head *pages, unsigned nr_pages) { - return mpage_readpages(mapping, pages, nr_pages, linvfs_get_block); + return mpage_readpages(mapping, pages, nr_pages, xfs_get_blocks); } STATIC void -xfs_count_page_state( - struct page *page, - int *delalloc, - int *unmapped, - int *unwritten) -{ - struct buffer_head *bh, *head; - - *delalloc = *unmapped = *unwritten = 0; - - bh = head = page_buffers(page); - do { - if (buffer_uptodate(bh) && !buffer_mapped(bh)) - (*unmapped) = 1; - else if (buffer_unwritten(bh) && !buffer_delay(bh)) - clear_buffer_unwritten(bh); - else if (buffer_unwritten(bh)) - (*unwritten) = 1; - else if (buffer_delay(bh)) - (*delalloc) = 1; - } while ((bh = bh->b_this_page) != head); -} - - -/* - * writepage: Called from one of two places: - * - * 1. we are flushing a delalloc buffer head. - * - * 2. we are writing out a dirty page. Typically the page dirty - * state is cleared before we get here. In this case is it - * conceivable we have no buffer heads. - * - * For delalloc space on the page we need to allocate space and - * flush it. For unmapped buffer heads on the page we should - * allocate space if the page is uptodate. For any other dirty - * buffer heads on the page we should flush them. - * - * If we detect that a transaction would be required to flush - * the page, we have to check the process flags first, if we - * are already in a transaction or disk I/O during allocations - * is off, we need to fail the writepage and redirty the page. - */ - -STATIC int -linvfs_writepage( - struct page *page, - struct writeback_control *wbc) -{ - int error; - int need_trans; - int delalloc, unmapped, unwritten; - struct inode *inode = page->mapping->host; - - xfs_page_trace(XFS_WRITEPAGE_ENTER, inode, page, 0); - - /* - * We need a transaction if: - * 1. There are delalloc buffers on the page - * 2. The page is uptodate and we have unmapped buffers - * 3. The page is uptodate and we have no buffers - * 4. There are unwritten buffers on the page - */ - - if (!page_has_buffers(page)) { - unmapped = 1; - need_trans = 1; - } else { - xfs_count_page_state(page, &delalloc, &unmapped, &unwritten); - if (!PageUptodate(page)) - unmapped = 0; - need_trans = delalloc + unmapped + unwritten; - } - - /* - * If we need a transaction and the process flags say - * we are already in a transaction, or no IO is allowed - * then mark the page dirty again and leave the page - * as is. - */ - if (PFLAGS_TEST_FSTRANS() && need_trans) - goto out_fail; - - /* - * Delay hooking up buffer heads until we have - * made our go/no-go decision. - */ - if (!page_has_buffers(page)) - create_empty_buffers(page, 1 << inode->i_blkbits, 0); - - /* - * Convert delayed allocate, unwritten or unmapped space - * to real space and flush out to disk. - */ - error = xfs_page_state_convert(inode, page, wbc, 1, unmapped); - if (error == -EAGAIN) - goto out_fail; - if (unlikely(error < 0)) - goto out_unlock; - - return 0; - -out_fail: - redirty_page_for_writepage(wbc, page); - unlock_page(page); - return 0; -out_unlock: - unlock_page(page); - return error; -} - -STATIC int -linvfs_invalidate_page( +xfs_vm_invalidatepage( struct page *page, unsigned long offset) { xfs_page_trace(XFS_INVALIDPAGE_ENTER, page->mapping->host, page, offset); - return block_invalidatepage(page, offset); -} - -/* - * Called to move a page into cleanable state - and from there - * to be released. Possibly the page is already clean. We always - * have buffer heads in this call. - * - * Returns 0 if the page is ok to release, 1 otherwise. - * - * Possible scenarios are: - * - * 1. We are being called to release a page which has been written - * to via regular I/O. buffer heads will be dirty and possibly - * delalloc. If no delalloc buffer heads in this case then we - * can just return zero. - * - * 2. We are called to release a page which has been written via - * mmap, all we need to do is ensure there is no delalloc - * state in the buffer heads, if not we can let the caller - * free them and we should come back later via writepage. - */ -STATIC int -linvfs_release_page( - struct page *page, - gfp_t gfp_mask) -{ - struct inode *inode = page->mapping->host; - int dirty, delalloc, unmapped, unwritten; - struct writeback_control wbc = { - .sync_mode = WB_SYNC_ALL, - .nr_to_write = 1, - }; - - xfs_page_trace(XFS_RELEASEPAGE_ENTER, inode, page, gfp_mask); - - xfs_count_page_state(page, &delalloc, &unmapped, &unwritten); - if (!delalloc && !unwritten) - goto free_buffers; - - if (!(gfp_mask & __GFP_FS)) - return 0; - - /* If we are already inside a transaction or the thread cannot - * do I/O, we cannot release this page. - */ - if (PFLAGS_TEST_FSTRANS()) - return 0; - - /* - * Convert delalloc space to real space, do not flush the - * data out to disk, that will be done by the caller. - * Never need to allocate space here - we will always - * come back to writepage in that case. - */ - dirty = xfs_page_state_convert(inode, page, &wbc, 0, 0); - if (dirty == 0 && !unwritten) - goto free_buffers; - return 0; - -free_buffers: - return try_to_free_buffers(page); -} - -STATIC int -linvfs_prepare_write( - struct file *file, - struct page *page, - unsigned int from, - unsigned int to) -{ - return block_prepare_write(page, from, to, linvfs_get_block); + block_invalidatepage(page, offset); } -struct address_space_operations linvfs_aops = { - .readpage = linvfs_readpage, - .readpages = linvfs_readpages, - .writepage = linvfs_writepage, +const struct address_space_operations xfs_address_space_operations = { + .readpage = xfs_vm_readpage, + .readpages = xfs_vm_readpages, + .writepage = xfs_vm_writepage, + .writepages = xfs_vm_writepages, .sync_page = block_sync_page, - .releasepage = linvfs_release_page, - .invalidatepage = linvfs_invalidate_page, - .prepare_write = linvfs_prepare_write, + .releasepage = xfs_vm_releasepage, + .invalidatepage = xfs_vm_invalidatepage, + .prepare_write = xfs_vm_prepare_write, .commit_write = generic_commit_write, - .bmap = linvfs_bmap, - .direct_IO = linvfs_direct_IO, + .bmap = xfs_vm_bmap, + .direct_IO = xfs_vm_direct_IO, .migratepage = buffer_migrate_page, };