linux 2.6.16.38 w/ vs2.0.3-rc1
[linux-2.6.git] / fs / xfs / linux-2.6 / xfs_aops.c
index 200159f..a980736 100644 (file)
@@ -1,39 +1,26 @@
 /*
- * Copyright (c) 2000-2004 Silicon Graphics, Inc.  All Rights Reserved.
+ * Copyright (c) 2000-2005 Silicon Graphics, Inc.
+ * All Rights Reserved.
  *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
  * published by the Free Software Foundation.
  *
- * This program is distributed in the hope that it would be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ * This program is distributed in the hope that it would be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
  *
- * Further, this software is distributed without any warranty that it is
- * free of the rightful claim of any third person regarding infringement
- * or the like.         Any license provided herein, whether implied or
- * otherwise, applies only to this software file.  Patent licenses, if
- * any, provided herein do not apply to combinations of this program with
- * other software, or any other product whatsoever.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write the Free Software Foundation, Inc., 59
- * Temple Place - Suite 330, Boston MA 02111-1307, USA.
- *
- * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
- * Mountain View, CA  94043, or:
- *
- * http://www.sgi.com
- *
- * For further information regarding this notice, see:
- *
- * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write the Free Software Foundation,
+ * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
  */
-
 #include "xfs.h"
-#include "xfs_inum.h"
+#include "xfs_bit.h"
 #include "xfs_log.h"
+#include "xfs_inum.h"
 #include "xfs_sb.h"
+#include "xfs_ag.h"
 #include "xfs_dir.h"
 #include "xfs_dir2.h"
 #include "xfs_trans.h"
 #include "xfs_bmap_btree.h"
 #include "xfs_alloc_btree.h"
 #include "xfs_ialloc_btree.h"
-#include "xfs_alloc.h"
-#include "xfs_btree.h"
-#include "xfs_attr_sf.h"
 #include "xfs_dir_sf.h"
 #include "xfs_dir2_sf.h"
+#include "xfs_attr_sf.h"
 #include "xfs_dinode.h"
 #include "xfs_inode.h"
+#include "xfs_alloc.h"
+#include "xfs_btree.h"
 #include "xfs_error.h"
 #include "xfs_rw.h"
 #include "xfs_iomap.h"
 #include <linux/mpage.h>
+#include <linux/pagevec.h>
 #include <linux/writeback.h>
 
 STATIC void xfs_count_page_state(struct page *, int *, int *, int *);
-STATIC void xfs_convert_page(struct inode *, struct page *, xfs_iomap_t *,
-               struct writeback_control *wbc, void *, int, int);
 
 #if defined(XFS_RW_TRACE)
 void
@@ -68,17 +54,15 @@ xfs_page_trace(
        int             mask)
 {
        xfs_inode_t     *ip;
-       bhv_desc_t      *bdp;
        vnode_t         *vp = LINVFS_GET_VP(inode);
        loff_t          isize = i_size_read(inode);
-       loff_t          offset = page->index << PAGE_CACHE_SHIFT;
+       loff_t          offset = page_offset(page);
        int             delalloc = -1, unmapped = -1, unwritten = -1;
 
        if (page_has_buffers(page))
                xfs_count_page_state(page, &delalloc, &unmapped, &unwritten);
 
-       bdp = vn_bhv_lookup(VN_BHV_HEAD(vp), &xfs_vnodeops);
-       ip = XFS_BHVTOI(bdp);
+       ip = xfs_vtoi(vp);
        if (!ip->i_rwtrace)
                return;
 
@@ -104,67 +88,125 @@ xfs_page_trace(
 #define xfs_page_trace(tag, inode, page, mask)
 #endif
 
-void
-linvfs_unwritten_done(
-       struct buffer_head      *bh,
-       int                     uptodate)
+/*
+ * Schedule IO completion handling on a xfsdatad if this was
+ * the final hold on this ioend.
+ */
+STATIC void
+xfs_finish_ioend(
+       xfs_ioend_t             *ioend)
 {
-       xfs_buf_t               *pb = (xfs_buf_t *)bh->b_private;
+       if (atomic_dec_and_test(&ioend->io_remaining))
+               queue_work(xfsdatad_workqueue, &ioend->io_work);
+}
 
-       ASSERT(buffer_unwritten(bh));
-       bh->b_end_io = NULL;
-       clear_buffer_unwritten(bh);
-       if (!uptodate)
-               pagebuf_ioerror(pb, EIO);
-       if (atomic_dec_and_test(&pb->pb_io_remaining) == 1) {
-               pagebuf_iodone(pb, 1, 1);
+/*
+ * We're now finished for good with this ioend structure.
+ * Update the page state via the associated buffer_heads,
+ * release holds on the inode and bio, and finally free
+ * up memory.  Do not use the ioend after this.
+ */
+STATIC void
+xfs_destroy_ioend(
+       xfs_ioend_t             *ioend)
+{
+       struct buffer_head      *bh, *next;
+
+       for (bh = ioend->io_buffer_head; bh; bh = next) {
+               next = bh->b_private;
+               bh->b_end_io(bh, ioend->io_uptodate);
        }
-       end_buffer_async_write(bh, uptodate);
+
+       vn_iowake(ioend->io_vnode);
+       mempool_free(ioend, xfs_ioend_pool);
 }
 
 /*
- * Issue transactions to convert a buffer range from unwritten
- * to written extents (buffered IO).
+ * Buffered IO write completion for delayed allocate extents.
+ * TODO: Update ondisk isize now that we know the file data
+ * has been flushed (i.e. the notorious "NULL file" problem).
  */
 STATIC void
-linvfs_unwritten_convert(
-       xfs_buf_t       *bp)
+xfs_end_bio_delalloc(
+       void                    *data)
 {
-       vnode_t         *vp = XFS_BUF_FSPRIVATE(bp, vnode_t *);
-       int             error;
+       xfs_ioend_t             *ioend = data;
+
+       xfs_destroy_ioend(ioend);
+}
+
+/*
+ * Buffered IO write completion for regular, written extents.
+ */
+STATIC void
+xfs_end_bio_written(
+       void                    *data)
+{
+       xfs_ioend_t             *ioend = data;
 
-       BUG_ON(atomic_read(&bp->pb_hold) < 1);
-       VOP_BMAP(vp, XFS_BUF_OFFSET(bp), XFS_BUF_SIZE(bp),
-                       BMAPI_UNWRITTEN, NULL, NULL, error);
-       XFS_BUF_SET_FSPRIVATE(bp, NULL);
-       XFS_BUF_CLR_IODONE_FUNC(bp);
-       XFS_BUF_UNDATAIO(bp);
-       iput(LINVFS_GET_IP(vp));
-       pagebuf_iodone(bp, 0, 0);
+       xfs_destroy_ioend(ioend);
 }
 
 /*
+ * IO write completion for unwritten extents.
+ *
  * Issue transactions to convert a buffer range from unwritten
- * to written extents (direct IO).
+ * to written extents.
  */
 STATIC void
-linvfs_unwritten_convert_direct(
-       struct inode    *inode,
-       loff_t          offset,
-       ssize_t         size,
-       void            *private)
+xfs_end_bio_unwritten(
+       void                    *data)
+{
+       xfs_ioend_t             *ioend = data;
+       vnode_t                 *vp = ioend->io_vnode;
+       xfs_off_t               offset = ioend->io_offset;
+       size_t                  size = ioend->io_size;
+       int                     error;
+
+       if (ioend->io_uptodate)
+               VOP_BMAP(vp, offset, size, BMAPI_UNWRITTEN, NULL, NULL, error);
+       xfs_destroy_ioend(ioend);
+}
+
+/*
+ * Allocate and initialise an IO completion structure.
+ * We need to track unwritten extent write completion here initially.
+ * We'll need to extend this for updating the ondisk inode size later
+ * (vs. incore size).
+ */
+STATIC xfs_ioend_t *
+xfs_alloc_ioend(
+       struct inode            *inode,
+       unsigned int            type)
 {
-       ASSERT(!private || inode == (struct inode *)private);
+       xfs_ioend_t             *ioend;
+
+       ioend = mempool_alloc(xfs_ioend_pool, GFP_NOFS);
 
-       /* private indicates an unwritten extent lay beneath this IO,
-        * see linvfs_get_block_core.
+       /*
+        * Set the count to 1 initially, which will prevent an I/O
+        * completion callback from happening before we have started
+        * all the I/O from calling the completion routine too early.
         */
-       if (private && size > 0) {
-               vnode_t *vp = LINVFS_GET_VP(inode);
-               int     error;
+       atomic_set(&ioend->io_remaining, 1);
+       ioend->io_uptodate = 1; /* cleared if any I/O fails */
+       ioend->io_list = NULL;
+       ioend->io_type = type;
+       ioend->io_vnode = LINVFS_GET_VP(inode);
+       ioend->io_buffer_head = NULL;
+       ioend->io_buffer_tail = NULL;
+       atomic_inc(&ioend->io_vnode->v_iocount);
+       ioend->io_offset = 0;
+       ioend->io_size = 0;
+
+       if (type == IOMAP_UNWRITTEN)
+               INIT_WORK(&ioend->io_work, xfs_end_bio_unwritten, ioend);
+       else if (type == IOMAP_DELAY)
+               INIT_WORK(&ioend->io_work, xfs_end_bio_delalloc, ioend);
+       else
+               INIT_WORK(&ioend->io_work, xfs_end_bio_written, ioend);
 
-               VOP_BMAP(vp, offset, size, BMAPI_UNWRITTEN, NULL, NULL, error);
-       }
+       return ioend;
 }
 
 STATIC int
@@ -172,162 +214,295 @@ xfs_map_blocks(
        struct inode            *inode,
        loff_t                  offset,
        ssize_t                 count,
-       xfs_iomap_t             *iomapp,
+       xfs_iomap_t             *mapp,
        int                     flags)
 {
        vnode_t                 *vp = LINVFS_GET_VP(inode);
-       int                     error, niomaps = 1;
-
-       if (((flags & (BMAPI_DIRECT|BMAPI_SYNC)) == BMAPI_DIRECT) &&
-           (offset >= i_size_read(inode)))
-               count = max_t(ssize_t, count, XFS_WRITE_IO_LOG);
-retry:
-       VOP_BMAP(vp, offset, count, flags, iomapp, &niomaps, error);
-       if ((error == EAGAIN) || (error == EIO))
-               return -error;
-       if (unlikely((flags & (BMAPI_WRITE|BMAPI_DIRECT)) ==
-                                       (BMAPI_WRITE|BMAPI_DIRECT) && niomaps &&
-                                       (iomapp->iomap_flags & IOMAP_DELAY))) {
-               flags = BMAPI_ALLOCATE;
-               goto retry;
-       }
-       if (flags & (BMAPI_WRITE|BMAPI_ALLOCATE)) {
+       int                     error, nmaps = 1;
+
+       VOP_BMAP(vp, offset, count, flags, mapp, &nmaps, error);
+       if (!error && (flags & (BMAPI_WRITE|BMAPI_ALLOCATE)))
                VMODIFY(vp);
-       }
        return -error;
 }
 
+STATIC inline int
+xfs_iomap_valid(
+       xfs_iomap_t             *iomapp,
+       loff_t                  offset)
+{
+       return offset >= iomapp->iomap_offset &&
+               offset < iomapp->iomap_offset + iomapp->iomap_bsize;
+}
+
 /*
- * Finds the corresponding mapping in block @map array of the
- * given @offset within a @page.
+ * BIO completion handler for buffered IO.
  */
-STATIC xfs_iomap_t *
-xfs_offset_to_map(
+STATIC int
+xfs_end_bio(
+       struct bio              *bio,
+       unsigned int            bytes_done,
+       int                     error)
+{
+       xfs_ioend_t             *ioend = bio->bi_private;
+
+       if (bio->bi_size)
+               return 1;
+
+       ASSERT(ioend);
+       ASSERT(atomic_read(&bio->bi_cnt) >= 1);
+
+       /* Toss bio and pass work off to an xfsdatad thread */
+       if (!test_bit(BIO_UPTODATE, &bio->bi_flags))
+               ioend->io_uptodate = 0;
+       bio->bi_private = NULL;
+       bio->bi_end_io = NULL;
+
+       bio_put(bio);
+       xfs_finish_ioend(ioend);
+       return 0;
+}
+
+STATIC void
+xfs_submit_ioend_bio(
+       xfs_ioend_t     *ioend,
+       struct bio      *bio)
+{
+       atomic_inc(&ioend->io_remaining);
+
+       bio->bi_private = ioend;
+       bio->bi_end_io = xfs_end_bio;
+
+       submit_bio(WRITE, bio);
+       ASSERT(!bio_flagged(bio, BIO_EOPNOTSUPP));
+       bio_put(bio);
+}
+
+STATIC struct bio *
+xfs_alloc_ioend_bio(
+       struct buffer_head      *bh)
+{
+       struct bio              *bio;
+       int                     nvecs = bio_get_nr_vecs(bh->b_bdev);
+
+       do {
+               bio = bio_alloc(GFP_NOIO, nvecs);
+               nvecs >>= 1;
+       } while (!bio);
+
+       ASSERT(bio->bi_private == NULL);
+       bio->bi_sector = bh->b_blocknr * (bh->b_size >> 9);
+       bio->bi_bdev = bh->b_bdev;
+       bio_get(bio);
+       return bio;
+}
+
+STATIC void
+xfs_start_buffer_writeback(
+       struct buffer_head      *bh)
+{
+       ASSERT(buffer_mapped(bh));
+       ASSERT(buffer_locked(bh));
+       ASSERT(!buffer_delay(bh));
+       ASSERT(!buffer_unwritten(bh));
+
+       mark_buffer_async_write(bh);
+       set_buffer_uptodate(bh);
+       clear_buffer_dirty(bh);
+}
+
+STATIC void
+xfs_start_page_writeback(
        struct page             *page,
-       xfs_iomap_t             *iomapp,
-       unsigned long           offset)
+       struct writeback_control *wbc,
+       int                     clear_dirty,
+       int                     buffers)
 {
-       loff_t                  full_offset;    /* offset from start of file */
+       ASSERT(PageLocked(page));
+       ASSERT(!PageWriteback(page));
+       set_page_writeback(page);
+       if (clear_dirty)
+               clear_page_dirty(page);
+       unlock_page(page);
+       if (!buffers) {
+               end_page_writeback(page);
+               wbc->pages_skipped++;   /* We didn't write this page */
+       }
+}
+
+static inline int bio_add_buffer(struct bio *bio, struct buffer_head *bh)
+{
+       return bio_add_page(bio, bh->b_page, bh->b_size, bh_offset(bh));
+}
+
+/*
+ * Submit all of the bios for all of the ioends we have saved up, covering the
+ * initial writepage page and also any probed pages.
+ *
+ * Because we may have multiple ioends spanning a page, we need to start
+ * writeback on all the buffers before we submit them for I/O. If we mark the
+ * buffers as we got, then we can end up with a page that only has buffers
+ * marked async write and I/O complete on can occur before we mark the other
+ * buffers async write.
+ *
+ * The end result of this is that we trip a bug in end_page_writeback() because
+ * we call it twice for the one page as the code in end_buffer_async_write()
+ * assumes that all buffers on the page are started at the same time.
+ *
+ * The fix is two passes across the ioend list - one to start writeback on the
+ * bufferheads, and then the second one submit them for I/O.
+ */
+STATIC void
+xfs_submit_ioend(
+       xfs_ioend_t             *ioend)
+{
+       xfs_ioend_t             *head = ioend;
+       xfs_ioend_t             *next;
+       struct buffer_head      *bh;
+       struct bio              *bio;
+       sector_t                lastblock = 0;
+
+       /* Pass 1 - start writeback */
+       do {
+               next = ioend->io_list;
+               for (bh = ioend->io_buffer_head; bh; bh = bh->b_private) {
+                       xfs_start_buffer_writeback(bh);
+               }
+       } while ((ioend = next) != NULL);
+
+       /* Pass 2 - submit I/O */
+       ioend = head;
+       do {
+               next = ioend->io_list;
+               bio = NULL;
+
+               for (bh = ioend->io_buffer_head; bh; bh = bh->b_private) {
+
+                       if (!bio) {
+ retry:
+                               bio = xfs_alloc_ioend_bio(bh);
+                       } else if (bh->b_blocknr != lastblock + 1) {
+                               xfs_submit_ioend_bio(ioend, bio);
+                               goto retry;
+                       }
+
+                       if (bio_add_buffer(bio, bh) != bh->b_size) {
+                               xfs_submit_ioend_bio(ioend, bio);
+                               goto retry;
+                       }
 
-       ASSERT(offset < PAGE_CACHE_SIZE);
+                       lastblock = bh->b_blocknr;
+               }
+               if (bio)
+                       xfs_submit_ioend_bio(ioend, bio);
+               xfs_finish_ioend(ioend);
+       } while ((ioend = next) != NULL);
+}
+
+/*
+ * Cancel submission of all buffer_heads so far in this endio.
+ * Toss the endio too.  Only ever called for the initial page
+ * in a writepage request, so only ever one page.
+ */
+STATIC void
+xfs_cancel_ioend(
+       xfs_ioend_t             *ioend)
+{
+       xfs_ioend_t             *next;
+       struct buffer_head      *bh, *next_bh;
+
+       do {
+               next = ioend->io_list;
+               bh = ioend->io_buffer_head;
+               do {
+                       next_bh = bh->b_private;
+                       clear_buffer_async_write(bh);
+                       unlock_buffer(bh);
+               } while ((bh = next_bh) != NULL);
+
+               vn_iowake(ioend->io_vnode);
+               mempool_free(ioend, xfs_ioend_pool);
+       } while ((ioend = next) != NULL);
+}
 
-       full_offset = page->index;              /* NB: using 64bit number */
-       full_offset <<= PAGE_CACHE_SHIFT;       /* offset from file start */
-       full_offset += offset;                  /* offset from page start */
+/*
+ * Test to see if we've been building up a completion structure for
+ * earlier buffers -- if so, we try to append to this ioend if we
+ * can, otherwise we finish off any current ioend and start another.
+ * Return true if we've finished the given ioend.
+ */
+STATIC void
+xfs_add_to_ioend(
+       struct inode            *inode,
+       struct buffer_head      *bh,
+       xfs_off_t               offset,
+       unsigned int            type,
+       xfs_ioend_t             **result,
+       int                     need_ioend)
+{
+       xfs_ioend_t             *ioend = *result;
+
+       if (!ioend || need_ioend || type != ioend->io_type) {
+               xfs_ioend_t     *previous = *result;
+
+               ioend = xfs_alloc_ioend(inode, type);
+               ioend->io_offset = offset;
+               ioend->io_buffer_head = bh;
+               ioend->io_buffer_tail = bh;
+               if (previous)
+                       previous->io_list = ioend;
+               *result = ioend;
+       } else {
+               ioend->io_buffer_tail->b_private = bh;
+               ioend->io_buffer_tail = bh;
+       }
 
-       if (full_offset < iomapp->iomap_offset)
-               return NULL;
-       if (iomapp->iomap_offset + (iomapp->iomap_bsize -1) >= full_offset)
-               return iomapp;
-       return NULL;
+       bh->b_private = NULL;
+       ioend->io_size += bh->b_size;
 }
 
 STATIC void
 xfs_map_at_offset(
-       struct page             *page,
        struct buffer_head      *bh,
-       unsigned long           offset,
+       loff_t                  offset,
        int                     block_bits,
        xfs_iomap_t             *iomapp)
 {
        xfs_daddr_t             bn;
-       loff_t                  delta;
        int                     sector_shift;
 
        ASSERT(!(iomapp->iomap_flags & IOMAP_HOLE));
        ASSERT(!(iomapp->iomap_flags & IOMAP_DELAY));
        ASSERT(iomapp->iomap_bn != IOMAP_DADDR_NULL);
 
-       delta = page->index;
-       delta <<= PAGE_CACHE_SHIFT;
-       delta += offset;
-       delta -= iomapp->iomap_offset;
-       delta >>= block_bits;
-
        sector_shift = block_bits - BBSHIFT;
-       bn = iomapp->iomap_bn >> sector_shift;
-       bn += delta;
+       bn = (iomapp->iomap_bn >> sector_shift) +
+             ((offset - iomapp->iomap_offset) >> block_bits);
+
+       ASSERT(bn || (iomapp->iomap_flags & IOMAP_REALTIME));
        ASSERT((bn << sector_shift) >= iomapp->iomap_bn);
 
        lock_buffer(bh);
        bh->b_blocknr = bn;
-       bh->b_bdev = iomapp->iomap_target->pbr_bdev;
+       bh->b_bdev = iomapp->iomap_target->bt_bdev;
        set_buffer_mapped(bh);
        clear_buffer_delay(bh);
+       clear_buffer_unwritten(bh);
 }
 
 /*
- * Look for a page at index which is unlocked and contains our
- * unwritten extent flagged buffers at its head.  Returns page
- * locked and with an extra reference count, and length of the
- * unwritten extent component on this page that we can write,
- * in units of filesystem blocks.
- */
-STATIC struct page *
-xfs_probe_unwritten_page(
-       struct address_space    *mapping,
-       pgoff_t                 index,
-       xfs_iomap_t             *iomapp,
-       xfs_buf_t               *pb,
-       unsigned long           max_offset,
-       unsigned long           *fsbs,
-       unsigned int            bbits)
-{
-       struct page             *page;
-
-       page = find_trylock_page(mapping, index);
-       if (!page)
-               return 0;
-       if (PageWriteback(page))
-               goto out;
-
-       if (page->mapping && page_has_buffers(page)) {
-               struct buffer_head      *bh, *head;
-               unsigned long           p_offset = 0;
-
-               *fsbs = 0;
-               bh = head = page_buffers(page);
-               do {
-                       if (!buffer_unwritten(bh))
-                               break;
-                       if (!xfs_offset_to_map(page, iomapp, p_offset))
-                               break;
-                       if (p_offset >= max_offset)
-                               break;
-                       xfs_map_at_offset(page, bh, p_offset, bbits, iomapp);
-                       set_buffer_unwritten_io(bh);
-                       bh->b_private = pb;
-                       p_offset += bh->b_size;
-                       (*fsbs)++;
-               } while ((bh = bh->b_this_page) != head);
-
-               if (p_offset)
-                       return page;
-       }
-
-out:
-       unlock_page(page);
-       return NULL;
-}
-
-/*
- * Look for a page at index which is unlocked and not mapped
- * yet - clustering for mmap write case.
+ * Look for a page at index that is suitable for clustering.
  */
 STATIC unsigned int
-xfs_probe_unmapped_page(
-       struct address_space    *mapping,
-       pgoff_t                 index,
-       unsigned int            pg_offset)
+xfs_probe_page(
+       struct page             *page,
+       unsigned int            pg_offset,
+       int                     mapped)
 {
-       struct page             *page;
        int                     ret = 0;
 
-       page = find_trylock_page(mapping, index);
-       if (!page)
-               return 0;
        if (PageWriteback(page))
-               goto out;
+               return 0;
 
        if (page->mapping && PageDirty(page)) {
                if (page_has_buffers(page)) {
@@ -335,79 +510,101 @@ xfs_probe_unmapped_page(
 
                        bh = head = page_buffers(page);
                        do {
-                               if (buffer_mapped(bh) || !buffer_uptodate(bh))
+                               if (!buffer_uptodate(bh))
+                                       break;
+                               if (mapped != buffer_mapped(bh))
                                        break;
                                ret += bh->b_size;
                                if (ret >= pg_offset)
                                        break;
                        } while ((bh = bh->b_this_page) != head);
                } else
-                       ret = PAGE_CACHE_SIZE;
+                       ret = mapped ? 0 : PAGE_CACHE_SIZE;
        }
 
-out:
-       unlock_page(page);
        return ret;
 }
 
-STATIC unsigned int
-xfs_probe_unmapped_cluster(
+STATIC size_t
+xfs_probe_cluster(
        struct inode            *inode,
        struct page             *startpage,
        struct buffer_head      *bh,
-       struct buffer_head      *head)
+       struct buffer_head      *head,
+       int                     mapped)
 {
+       struct pagevec          pvec;
        pgoff_t                 tindex, tlast, tloff;
-       unsigned int            pg_offset, len, total = 0;
-       struct address_space    *mapping = inode->i_mapping;
+       size_t                  total = 0;
+       int                     done = 0, i;
 
        /* First sum forwards in this page */
        do {
-               if (buffer_mapped(bh))
-                       break;
+               if (!buffer_uptodate(bh) || (mapped != buffer_mapped(bh)))
+                       return total;
                total += bh->b_size;
        } while ((bh = bh->b_this_page) != head);
 
-       /* If we reached the end of the page, sum forwards in
-        * following pages.
-        */
-       if (bh == head) {
-               tlast = i_size_read(inode) >> PAGE_CACHE_SHIFT;
-               /* Prune this back to avoid pathological behavior */
-               tloff = min(tlast, startpage->index + 64);
-               for (tindex = startpage->index + 1; tindex < tloff; tindex++) {
-                       len = xfs_probe_unmapped_page(mapping, tindex,
-                                                       PAGE_CACHE_SIZE);
-                       if (!len)
-                               return total;
+       /* if we reached the end of the page, sum forwards in following pages */
+       tlast = i_size_read(inode) >> PAGE_CACHE_SHIFT;
+       tindex = startpage->index + 1;
+
+       /* Prune this back to avoid pathological behavior */
+       tloff = min(tlast, startpage->index + 64);
+
+       pagevec_init(&pvec, 0);
+       while (!done && tindex <= tloff) {
+               unsigned len = min_t(pgoff_t, PAGEVEC_SIZE, tlast - tindex + 1);
+
+               if (!pagevec_lookup(&pvec, inode->i_mapping, tindex, len))
+                       break;
+
+               for (i = 0; i < pagevec_count(&pvec); i++) {
+                       struct page *page = pvec.pages[i];
+                       size_t pg_offset, len = 0;
+
+                       if (tindex == tlast) {
+                               pg_offset =
+                                   i_size_read(inode) & (PAGE_CACHE_SIZE - 1);
+                               if (!pg_offset) {
+                                       done = 1;
+                                       break;
+                               }
+                       } else
+                               pg_offset = PAGE_CACHE_SIZE;
+
+                       if (page->index == tindex && !TestSetPageLocked(page)) {
+                               len = xfs_probe_page(page, pg_offset, mapped);
+                               unlock_page(page);
+                       }
+
+                       if (!len) {
+                               done = 1;
+                               break;
+                       }
+
                        total += len;
+                       tindex++;
                }
-               if (tindex == tlast &&
-                   (pg_offset = i_size_read(inode) & (PAGE_CACHE_SIZE - 1))) {
-                       total += xfs_probe_unmapped_page(mapping,
-                                                       tindex, pg_offset);
-               }
+
+               pagevec_release(&pvec);
+               cond_resched();
        }
+
        return total;
 }
 
 /*
- * Probe for a given page (index) in the inode and test if it is delayed
- * and without unwritten buffers.  Returns page locked and with an extra
- * reference count.
+ * Test if a given page is suitable for writing as part of an unwritten
+ * or delayed allocate extent.
  */
-STATIC struct page *
-xfs_probe_delalloc_page(
-       struct inode            *inode,
-       pgoff_t                 index)
+STATIC int
+xfs_is_delayed_page(
+       struct page             *page,
+       unsigned int            type)
 {
-       struct page             *page;
-
-       page = find_trylock_page(inode->i_mapping, index);
-       if (!page)
-               return NULL;
        if (PageWriteback(page))
-               goto out;
+               return 0;
 
        if (page->mapping && page_has_buffers(page)) {
                struct buffer_head      *bh, *head;
@@ -415,259 +612,157 @@ xfs_probe_delalloc_page(
 
                bh = head = page_buffers(page);
                do {
-                       if (buffer_unwritten(bh)) {
-                               acceptable = 0;
+                       if (buffer_unwritten(bh))
+                               acceptable = (type == IOMAP_UNWRITTEN);
+                       else if (buffer_delay(bh))
+                               acceptable = (type == IOMAP_DELAY);
+                       else if (buffer_dirty(bh) && buffer_mapped(bh))
+                               acceptable = (type == 0);
+                       else
                                break;
-                       } else if (buffer_delay(bh)) {
-                               acceptable = 1;
-                       }
                } while ((bh = bh->b_this_page) != head);
 
                if (acceptable)
-                       return page;
-       }
-
-out:
-       unlock_page(page);
-       return NULL;
-}
-
-STATIC int
-xfs_map_unwritten(
-       struct inode            *inode,
-       struct page             *start_page,
-       struct buffer_head      *head,
-       struct buffer_head      *curr,
-       unsigned long           p_offset,
-       int                     block_bits,
-       xfs_iomap_t             *iomapp,
-       struct writeback_control *wbc,
-       int                     startio,
-       int                     all_bh)
-{
-       struct buffer_head      *bh = curr;
-       xfs_iomap_t             *tmp;
-       xfs_buf_t               *pb;
-       loff_t                  offset, size;
-       unsigned long           nblocks = 0;
-
-       offset = start_page->index;
-       offset <<= PAGE_CACHE_SHIFT;
-       offset += p_offset;
-
-       /* get an "empty" pagebuf to manage IO completion
-        * Proper values will be set before returning */
-       pb = pagebuf_lookup(iomapp->iomap_target, 0, 0, 0);
-       if (!pb)
-               return -EAGAIN;
-
-       /* Take a reference to the inode to prevent it from
-        * being reclaimed while we have outstanding unwritten
-        * extent IO on it.
-        */
-       if ((igrab(inode)) != inode) {
-               pagebuf_free(pb);
-               return -EAGAIN;
-       }
-
-       /* Set the count to 1 initially, this will stop an I/O
-        * completion callout which happens before we have started
-        * all the I/O from calling pagebuf_iodone too early.
-        */
-       atomic_set(&pb->pb_io_remaining, 1);
-
-       /* First map forwards in the page consecutive buffers
-        * covering this unwritten extent
-        */
-       do {
-               if (!buffer_unwritten(bh))
-                       break;
-               tmp = xfs_offset_to_map(start_page, iomapp, p_offset);
-               if (!tmp)
-                       break;
-               xfs_map_at_offset(start_page, bh, p_offset, block_bits, iomapp);
-               set_buffer_unwritten_io(bh);
-               bh->b_private = pb;
-               p_offset += bh->b_size;
-               nblocks++;
-       } while ((bh = bh->b_this_page) != head);
-
-       atomic_add(nblocks, &pb->pb_io_remaining);
-
-       /* If we reached the end of the page, map forwards in any
-        * following pages which are also covered by this extent.
-        */
-       if (bh == head) {
-               struct address_space    *mapping = inode->i_mapping;
-               pgoff_t                 tindex, tloff, tlast;
-               unsigned long           bs;
-               unsigned int            pg_offset, bbits = inode->i_blkbits;
-               struct page             *page;
-
-               tlast = i_size_read(inode) >> PAGE_CACHE_SHIFT;
-               tloff = (iomapp->iomap_offset + iomapp->iomap_bsize) >> PAGE_CACHE_SHIFT;
-               tloff = min(tlast, tloff);
-               for (tindex = start_page->index + 1; tindex < tloff; tindex++) {
-                       page = xfs_probe_unwritten_page(mapping,
-                                               tindex, iomapp, pb,
-                                               PAGE_CACHE_SIZE, &bs, bbits);
-                       if (!page)
-                               break;
-                       nblocks += bs;
-                       atomic_add(bs, &pb->pb_io_remaining);
-                       xfs_convert_page(inode, page, iomapp, wbc, pb,
-                                                       startio, all_bh);
-                       /* stop if converting the next page might add
-                        * enough blocks that the corresponding byte
-                        * count won't fit in our ulong page buf length */
-                       if (nblocks >= ((ULONG_MAX - PAGE_SIZE) >> block_bits))
-                               goto enough;
-               }
-
-               if (tindex == tlast &&
-                   (pg_offset = (i_size_read(inode) & (PAGE_CACHE_SIZE - 1)))) {
-                       page = xfs_probe_unwritten_page(mapping,
-                                                       tindex, iomapp, pb,
-                                                       pg_offset, &bs, bbits);
-                       if (page) {
-                               nblocks += bs;
-                               atomic_add(bs, &pb->pb_io_remaining);
-                               xfs_convert_page(inode, page, iomapp, wbc, pb,
-                                                       startio, all_bh);
-                               if (nblocks >= ((ULONG_MAX - PAGE_SIZE) >> block_bits))
-                                       goto enough;
-                       }
-               }
-       }
-
-enough:
-       size = nblocks;         /* NB: using 64bit number here */
-       size <<= block_bits;    /* convert fsb's to byte range */
-
-       XFS_BUF_DATAIO(pb);
-       XFS_BUF_ASYNC(pb);
-       XFS_BUF_SET_SIZE(pb, size);
-       XFS_BUF_SET_COUNT(pb, size);
-       XFS_BUF_SET_OFFSET(pb, offset);
-       XFS_BUF_SET_FSPRIVATE(pb, LINVFS_GET_VP(inode));
-       XFS_BUF_SET_IODONE_FUNC(pb, linvfs_unwritten_convert);
-
-       if (atomic_dec_and_test(&pb->pb_io_remaining) == 1) {
-               pagebuf_iodone(pb, 1, 1);
+                       return 1;
        }
 
        return 0;
 }
 
-STATIC void
-xfs_submit_page(
-       struct page             *page,
-       struct buffer_head      *bh_arr[],
-       int                     cnt)
-{
-       struct buffer_head      *bh;
-       int                     i;
-
-       BUG_ON(PageWriteback(page));
-       set_page_writeback(page);
-       clear_page_dirty(page);
-       unlock_page(page);
-
-       if (cnt) {
-               for (i = 0; i < cnt; i++) {
-                       bh = bh_arr[i];
-                       mark_buffer_async_write(bh);
-                       if (buffer_unwritten(bh))
-                               set_buffer_unwritten_io(bh);
-                       set_buffer_uptodate(bh);
-                       clear_buffer_dirty(bh);
-               }
-
-               for (i = 0; i < cnt; i++)
-                       submit_bh(WRITE, bh_arr[i]);
-       } else
-               end_page_writeback(page);
-}
-
 /*
  * Allocate & map buffers for page given the extent map. Write it out.
  * except for the original page of a writepage, this is called on
  * delalloc/unwritten pages only, for the original page it is possible
  * that the page has no mapping at all.
  */
-STATIC void
+STATIC int
 xfs_convert_page(
        struct inode            *inode,
        struct page             *page,
-       xfs_iomap_t             *iomapp,
+       loff_t                  tindex,
+       xfs_iomap_t             *mp,
+       xfs_ioend_t             **ioendp,
        struct writeback_control *wbc,
-       void                    *private,
        int                     startio,
        int                     all_bh)
 {
-       struct buffer_head      *bh_arr[MAX_BUF_PER_PAGE], *bh, *head;
-       xfs_iomap_t             *mp = iomapp, *tmp;
-       unsigned long           end, offset;
-       pgoff_t                 end_index;
-       int                     i = 0, index = 0;
+       struct buffer_head      *bh, *head;
+       xfs_off_t               end_offset;
+       unsigned long           p_offset;
+       unsigned int            type;
        int                     bbits = inode->i_blkbits;
+       int                     len, page_dirty;
+       int                     count = 0, done = 0, uptodate = 1;
+       xfs_off_t               offset = page_offset(page);
+
+       if (page->index != tindex)
+               goto fail;
+       if (TestSetPageLocked(page))
+               goto fail;
+       if (PageWriteback(page))
+               goto fail_unlock_page;
+       if (page->mapping != inode->i_mapping)
+               goto fail_unlock_page;
+       if (!xfs_is_delayed_page(page, (*ioendp)->io_type))
+               goto fail_unlock_page;
+
+       /*
+        * page_dirty is initially a count of buffers on the page before
+        * EOF and is decrememted as we move each into a cleanable state.
+        *
+        * Derivation:
+        *
+        * End offset is the highest offset that this page should represent.
+        * If we are on the last page, (end_offset & (PAGE_CACHE_SIZE - 1))
+        * will evaluate non-zero and be less than PAGE_CACHE_SIZE and
+        * hence give us the correct page_dirty count. On any other page,
+        * it will be zero and in that case we need page_dirty to be the
+        * count of buffers on the page.
+        */
+       end_offset = min_t(unsigned long long,
+                       (xfs_off_t)(page->index + 1) << PAGE_CACHE_SHIFT,
+                       i_size_read(inode));
+
+       len = 1 << inode->i_blkbits;
+       p_offset = min_t(unsigned long, end_offset & (PAGE_CACHE_SIZE - 1),
+                                       PAGE_CACHE_SIZE);
+       p_offset = p_offset ? roundup(p_offset, len) : PAGE_CACHE_SIZE;
+       page_dirty = p_offset / len;
 
-       end_index = i_size_read(inode) >> PAGE_CACHE_SHIFT;
-       if (page->index < end_index) {
-               end = PAGE_CACHE_SIZE;
-       } else {
-               end = i_size_read(inode) & (PAGE_CACHE_SIZE-1);
-       }
        bh = head = page_buffers(page);
        do {
-               offset = i << bbits;
-               if (!(PageUptodate(page) || buffer_uptodate(bh)))
-                       continue;
-               if (buffer_mapped(bh) && all_bh &&
-                   !buffer_unwritten(bh) && !buffer_delay(bh)) {
-                       if (startio && (offset < end)) {
-                               lock_buffer(bh);
-                               bh_arr[index++] = bh;
-                       }
+               if (offset >= end_offset)
+                       break;
+               if (!buffer_uptodate(bh))
+                       uptodate = 0;
+               if (!(PageUptodate(page) || buffer_uptodate(bh))) {
+                       done = 1;
                        continue;
                }
-               tmp = xfs_offset_to_map(page, mp, offset);
-               if (!tmp)
-                       continue;
-               ASSERT(!(tmp->iomap_flags & IOMAP_HOLE));
-               ASSERT(!(tmp->iomap_flags & IOMAP_DELAY));
 
-               /* If this is a new unwritten extent buffer (i.e. one
-                * that we haven't passed in private data for, we must
-                * now map this buffer too.
-                */
-               if (buffer_unwritten(bh) && !bh->b_end_io) {
-                       ASSERT(tmp->iomap_flags & IOMAP_UNWRITTEN);
-                       xfs_map_unwritten(inode, page, head, bh, offset,
-                                       bbits, tmp, wbc, startio, all_bh);
-               } else if (! (buffer_unwritten(bh) && buffer_locked(bh))) {
-                       xfs_map_at_offset(page, bh, offset, bbits, tmp);
-                       if (buffer_unwritten(bh)) {
-                               set_buffer_unwritten_io(bh);
-                               bh->b_private = private;
-                               ASSERT(private);
+               if (buffer_unwritten(bh) || buffer_delay(bh)) {
+                       if (buffer_unwritten(bh))
+                               type = IOMAP_UNWRITTEN;
+                       else
+                               type = IOMAP_DELAY;
+
+                       if (!xfs_iomap_valid(mp, offset)) {
+                               done = 1;
+                               continue;
                        }
-               }
-               if (startio && (offset < end)) {
-                       bh_arr[index++] = bh;
+
+                       ASSERT(!(mp->iomap_flags & IOMAP_HOLE));
+                       ASSERT(!(mp->iomap_flags & IOMAP_DELAY));
+
+                       xfs_map_at_offset(bh, offset, bbits, mp);
+                       if (startio) {
+                               xfs_add_to_ioend(inode, bh, offset,
+                                               type, ioendp, done);
+                       } else {
+                               set_buffer_dirty(bh);
+                               unlock_buffer(bh);
+                               mark_buffer_dirty(bh);
+                       }
+                       page_dirty--;
+                       count++;
                } else {
-                       set_buffer_dirty(bh);
-                       unlock_buffer(bh);
-                       mark_buffer_dirty(bh);
+                       type = 0;
+                       if (buffer_mapped(bh) && all_bh && startio) {
+                               lock_buffer(bh);
+                               xfs_add_to_ioend(inode, bh, offset,
+                                               type, ioendp, done);
+                               count++;
+                               page_dirty--;
+                       } else {
+                               done = 1;
+                       }
                }
-       } while (i++, (bh = bh->b_this_page) != head);
+       } while (offset += len, (bh = bh->b_this_page) != head);
+
+       if (uptodate && bh == head)
+               SetPageUptodate(page);
 
        if (startio) {
-               wbc->nr_to_write--;
-               xfs_submit_page(page, bh_arr, index);
-       } else {
-               unlock_page(page);
+               if (count) {
+                       struct backing_dev_info *bdi;
+
+                       bdi = inode->i_mapping->backing_dev_info;
+                       wbc->nr_to_write--;
+                       if (bdi_write_congested(bdi)) {
+                               wbc->encountered_congestion = 1;
+                               done = 1;
+                       } else if (wbc->nr_to_write <= 0) {
+                               done = 1;
+                       }
+               }
+               xfs_start_page_writeback(page, wbc, !page_dirty, count);
        }
+
+       return done;
+ fail_unlock_page:
+       unlock_page(page);
+ fail:
+       return 1;
 }
 
 /*
@@ -679,20 +774,31 @@ xfs_cluster_write(
        struct inode            *inode,
        pgoff_t                 tindex,
        xfs_iomap_t             *iomapp,
+       xfs_ioend_t             **ioendp,
        struct writeback_control *wbc,
        int                     startio,
-       int                     all_bh)
+       int                     all_bh,
+       pgoff_t                 tlast)
 {
-       pgoff_t                 tlast;
-       struct page             *page;
+       struct pagevec          pvec;
+       int                     done = 0, i;
+
+       pagevec_init(&pvec, 0);
+       while (!done && tindex <= tlast) {
+               unsigned len = min_t(pgoff_t, PAGEVEC_SIZE, tlast - tindex + 1);
 
-       tlast = (iomapp->iomap_offset + iomapp->iomap_bsize) >> PAGE_CACHE_SHIFT;
-       for (; tindex < tlast; tindex++) {
-               page = xfs_probe_delalloc_page(inode, tindex);
-               if (!page)
+               if (!pagevec_lookup(&pvec, inode->i_mapping, tindex, len))
                        break;
-               xfs_convert_page(inode, page, iomapp, wbc, NULL,
-                               startio, all_bh);
+
+               for (i = 0; i < pagevec_count(&pvec); i++) {
+                       done = xfs_convert_page(inode, pvec.pages[i], tindex++,
+                                       iomapp, ioendp, wbc, startio, all_bh);
+                       if (done)
+                               break;
+               }
+
+               pagevec_release(&pvec);
+               cond_resched();
        }
 }
 
@@ -723,164 +829,203 @@ xfs_page_state_convert(
        int             startio,
        int             unmapped) /* also implies page uptodate */
 {
-       struct buffer_head      *bh_arr[MAX_BUF_PER_PAGE], *bh, *head;
-       xfs_iomap_t             *iomp, iomap;
-       unsigned long           p_offset = 0;
-       pgoff_t                 end_index;
+       struct buffer_head      *bh, *head;
+       xfs_iomap_t             iomap;
+       xfs_ioend_t             *ioend = NULL, *iohead = NULL;
        loff_t                  offset;
-       unsigned long long      end_offset;
-       int                     len, err, i, cnt = 0, uptodate = 1;
-       int                     flags = startio ? 0 : BMAPI_TRYLOCK;
-       int                     page_dirty = 1;
-
-
-       /* Are we off the end of the file ? */
-       end_index = i_size_read(inode) >> PAGE_CACHE_SHIFT;
+       unsigned long           p_offset = 0;
+       unsigned int            type;
+       __uint64_t              end_offset;
+       pgoff_t                 end_index, last_index, tlast;
+       ssize_t                 size, len;
+       int                     flags, err, iomap_valid = 0, uptodate = 1;
+       int                     page_dirty, count = 0, trylock_flag = 0;
+       int                     all_bh = unmapped;
+
+       /* wait for other IO threads? */
+       if (startio && (wbc->sync_mode == WB_SYNC_NONE && wbc->nonblocking))
+               trylock_flag |= BMAPI_TRYLOCK;
+
+       /* Is this page beyond the end of the file? */
+       offset = i_size_read(inode);
+       end_index = offset >> PAGE_CACHE_SHIFT;
+       last_index = (offset - 1) >> PAGE_CACHE_SHIFT;
        if (page->index >= end_index) {
                if ((page->index >= end_index + 1) ||
                    !(i_size_read(inode) & (PAGE_CACHE_SIZE - 1))) {
-                       err = -EIO;
-                       goto error;
+                       if (startio)
+                               unlock_page(page);
+                       return 0;
                }
        }
 
-       offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
+       /*
+        * page_dirty is initially a count of buffers on the page before
+        * EOF and is decrememted as we move each into a cleanable state.
+        *
+        * Derivation:
+        *
+        * End offset is the highest offset that this page should represent.
+        * If we are on the last page, (end_offset & (PAGE_CACHE_SIZE - 1))
+        * will evaluate non-zero and be less than PAGE_CACHE_SIZE and
+        * hence give us the correct page_dirty count. On any other page,
+        * it will be zero and in that case we need page_dirty to be the
+        * count of buffers on the page.
+        */
        end_offset = min_t(unsigned long long,
-                       offset + PAGE_CACHE_SIZE, i_size_read(inode));
+                       (xfs_off_t)(page->index + 1) << PAGE_CACHE_SHIFT, offset);
+       len = 1 << inode->i_blkbits;
+       p_offset = min_t(unsigned long, end_offset & (PAGE_CACHE_SIZE - 1),
+                                       PAGE_CACHE_SIZE);
+       p_offset = p_offset ? roundup(p_offset, len) : PAGE_CACHE_SIZE;
+       page_dirty = p_offset / len;
 
        bh = head = page_buffers(page);
-       iomp = NULL;
+       offset = page_offset(page);
+       flags = -1;
+       type = 0;
+
+       /* TODO: cleanup count and page_dirty */
 
-       len = bh->b_size;
        do {
                if (offset >= end_offset)
                        break;
                if (!buffer_uptodate(bh))
                        uptodate = 0;
-               if (!(PageUptodate(page) || buffer_uptodate(bh)) && !startio)
+               if (!(PageUptodate(page) || buffer_uptodate(bh)) && !startio) {
+                       /*
+                        * the iomap is actually still valid, but the ioend
+                        * isn't.  shouldn't happen too often.
+                        */
+                       iomap_valid = 0;
                        continue;
-
-               if (iomp) {
-                       iomp = xfs_offset_to_map(page, &iomap, p_offset);
                }
 
+               if (iomap_valid)
+                       iomap_valid = xfs_iomap_valid(&iomap, offset);
+
                /*
                 * First case, map an unwritten extent and prepare for
                 * extent state conversion transaction on completion.
-                */
-               if (buffer_unwritten(bh)) {
-                       if (!iomp) {
-                               err = xfs_map_blocks(inode, offset, len, &iomap,
-                                               BMAPI_READ|BMAPI_IGNSTATE);
-                               if (err) {
-                                       goto error;
-                               }
-                               iomp = xfs_offset_to_map(page, &iomap,
-                                                               p_offset);
-                       }
-                       if (iomp && startio) {
-                               if (!bh->b_end_io) {
-                                       err = xfs_map_unwritten(inode, page,
-                                                       head, bh, p_offset,
-                                                       inode->i_blkbits, iomp,
-                                                       wbc, startio, unmapped);
-                                       if (err) {
-                                               goto error;
-                                       }
-                               }
-                               bh_arr[cnt++] = bh;
-                               page_dirty = 0;
-                       }
-               /*
+                *
                 * Second case, allocate space for a delalloc buffer.
                 * We can return EAGAIN here in the release page case.
-                */
-               } else if (buffer_delay(bh)) {
-                       if (!iomp) {
-                               err = xfs_map_blocks(inode, offset, len, &iomap,
-                                               BMAPI_ALLOCATE | flags);
-                               if (err) {
-                                       goto error;
+                *
+                * Third case, an unmapped buffer was found, and we are
+                * in a path where we need to write the whole page out.
+                */
+               if (buffer_unwritten(bh) || buffer_delay(bh) ||
+                   ((buffer_uptodate(bh) || PageUptodate(page)) &&
+                    !buffer_mapped(bh) && (unmapped || startio))) {
+                       /*
+                        * Make sure we don't use a read-only iomap
+                        */
+                       if (flags == BMAPI_READ)
+                               iomap_valid = 0;
+
+                       if (buffer_unwritten(bh)) {
+                               type = IOMAP_UNWRITTEN;
+                               flags = BMAPI_WRITE|BMAPI_IGNSTATE;
+                       } else if (buffer_delay(bh)) {
+                               type = IOMAP_DELAY;
+                               flags = BMAPI_ALLOCATE;
+                               if (!startio)
+                                       flags |= trylock_flag;
+                       } else {
+                               type = IOMAP_NEW;
+                               flags = BMAPI_WRITE|BMAPI_MMAP;
+                       }
+
+                       if (!iomap_valid) {
+                               if (type == IOMAP_NEW) {
+                                       size = xfs_probe_cluster(inode,
+                                                       page, bh, head, 0);
+                               } else {
+                                       size = len;
                                }
-                               iomp = xfs_offset_to_map(page, &iomap,
-                                                               p_offset);
+
+                               err = xfs_map_blocks(inode, offset, size,
+                                               &iomap, flags);
+                               if (err)
+                                       goto error;
+                               iomap_valid = xfs_iomap_valid(&iomap, offset);
                        }
-                       if (iomp) {
-                               xfs_map_at_offset(page, bh, p_offset,
-                                               inode->i_blkbits, iomp);
+                       if (iomap_valid) {
+                               xfs_map_at_offset(bh, offset,
+                                               inode->i_blkbits, &iomap);
                                if (startio) {
-                                       bh_arr[cnt++] = bh;
+                                       xfs_add_to_ioend(inode, bh, offset,
+                                                       type, &ioend,
+                                                       !iomap_valid);
                                } else {
                                        set_buffer_dirty(bh);
                                        unlock_buffer(bh);
                                        mark_buffer_dirty(bh);
                                }
-                               page_dirty = 0;
+                               page_dirty--;
+                               count++;
+                       }
+               } else if (buffer_uptodate(bh) && startio) {
+                       /*
+                        * we got here because the buffer is already mapped.
+                        * That means it must already have extents allocated
+                        * underneath it. Map the extent by reading it.
+                        */
+                       if (!iomap_valid || type != 0) {
+                               flags = BMAPI_READ;
+                               size = xfs_probe_cluster(inode, page, bh,
+                                                               head, 1);
+                               err = xfs_map_blocks(inode, offset, size,
+                                               &iomap, flags);
+                               if (err)
+                                       goto error;
+                               iomap_valid = xfs_iomap_valid(&iomap, offset);
                        }
-               } else if ((buffer_uptodate(bh) || PageUptodate(page)) &&
-                          (unmapped || startio)) {
 
-                       if (!buffer_mapped(bh)) {
-                               int     size;
-
-                               /*
-                                * Getting here implies an unmapped buffer
-                                * was found, and we are in a path where we
-                                * need to write the whole page out.
-                                */
-                               if (!iomp) {
-                                       size = xfs_probe_unmapped_cluster(
-                                                       inode, page, bh, head);
-                                       err = xfs_map_blocks(inode, offset,
-                                                       size, &iomap,
-                                                       BMAPI_WRITE|BMAPI_MMAP);
-                                       if (err) {
-                                               goto error;
-                                       }
-                                       iomp = xfs_offset_to_map(page, &iomap,
-                                                                    p_offset);
-                               }
-                               if (iomp) {
-                                       xfs_map_at_offset(page,
-                                                       bh, p_offset,
-                                                       inode->i_blkbits, iomp);
-                                       if (startio) {
-                                               bh_arr[cnt++] = bh;
-                                       } else {
-                                               set_buffer_dirty(bh);
-                                               unlock_buffer(bh);
-                                               mark_buffer_dirty(bh);
-                                       }
-                                       page_dirty = 0;
-                               }
-                       } else if (startio) {
-                               if (buffer_uptodate(bh) &&
-                                   !test_and_set_bit(BH_Lock, &bh->b_state)) {
-                                       bh_arr[cnt++] = bh;
-                                       page_dirty = 0;
-                               }
+                       type = 0;
+                       if (!test_and_set_bit(BH_Lock, &bh->b_state)) {
+                               ASSERT(buffer_mapped(bh));
+                               if (iomap_valid)
+                                       all_bh = 1;
+                               xfs_add_to_ioend(inode, bh, offset, type,
+                                               &ioend, !iomap_valid);
+                               page_dirty--;
+                               count++;
+                       } else {
+                               iomap_valid = 0;
                        }
+               } else if ((buffer_uptodate(bh) || PageUptodate(page)) &&
+                          (unmapped || startio)) {
+                       iomap_valid = 0;
                }
-       } while (offset += len, p_offset += len,
-               ((bh = bh->b_this_page) != head));
+
+               if (!iohead)
+                       iohead = ioend;
+
+       } while (offset += len, ((bh = bh->b_this_page) != head));
 
        if (uptodate && bh == head)
                SetPageUptodate(page);
 
        if (startio)
-               xfs_submit_page(page, bh_arr, cnt);
-
-       if (iomp) {
-               xfs_cluster_write(inode, page->index + 1, iomp, wbc,
-                               startio, unmapped);
+               xfs_start_page_writeback(page, wbc, 1, count);
+
+       if (ioend && iomap_valid) {
+               offset = (iomap.iomap_offset + iomap.iomap_bsize - 1) >>
+                                       PAGE_CACHE_SHIFT;
+               tlast = min_t(pgoff_t, offset, last_index);
+               xfs_cluster_write(inode, page->index + 1, &iomap, &ioend,
+                                       wbc, startio, all_bh, tlast);
        }
 
+       if (iohead)
+               xfs_submit_ioend(iohead);
+
        return page_dirty;
 
 error:
-       for (i = 0; i < cnt; i++) {
-               unlock_buffer(bh_arr[i]);
-       }
+       if (iohead)
+               xfs_cancel_ioend(iohead);
 
        /*
         * If it's delalloc and we have nowhere to put it,
@@ -888,16 +1033,15 @@ error:
         * us to try again.
         */
        if (err != -EAGAIN) {
-               if (!unmapped) {
+               if (!unmapped)
                        block_invalidatepage(page, 0);
-               }
                ClearPageUptodate(page);
        }
        return err;
 }
 
 STATIC int
-linvfs_get_block_core(
+__linvfs_get_block(
        struct inode            *inode,
        sector_t                iblock,
        unsigned long           blocks,
@@ -908,13 +1052,15 @@ linvfs_get_block_core(
 {
        vnode_t                 *vp = LINVFS_GET_VP(inode);
        xfs_iomap_t             iomap;
+       xfs_off_t               offset;
+       ssize_t                 size;
        int                     retpbbm = 1;
        int                     error;
-       ssize_t                 size;
-       loff_t                  offset = (loff_t)iblock << inode->i_blkbits;
 
+       offset = (xfs_off_t)iblock << inode->i_blkbits;
        if (blocks)
-               size = blocks << inode->i_blkbits;
+               size = (ssize_t) min_t(xfs_off_t, LONG_MAX,
+                                       (xfs_off_t)blocks << inode->i_blkbits);
        else
                size = 1 << inode->i_blkbits;
 
@@ -927,8 +1073,8 @@ linvfs_get_block_core(
                return 0;
 
        if (iomap.iomap_bn != IOMAP_DADDR_NULL) {
-               xfs_daddr_t             bn;
-               loff_t                  delta;
+               xfs_daddr_t     bn;
+               xfs_off_t       delta;
 
                /* For unwritten extents do not report a disk address on
                 * the read case (treat as if we're reading into a hole).
@@ -939,9 +1085,8 @@ linvfs_get_block_core(
 
                        bn = iomap.iomap_bn >> (inode->i_blkbits - BBSHIFT);
                        bn += delta;
-
+                       BUG_ON(!bn && !(iomap.iomap_flags & IOMAP_REALTIME));
                        bh_result->b_blocknr = bn;
-                       bh_result->b_bdev = iomap.iomap_target->pbr_bdev;
                        set_buffer_mapped(bh_result);
                }
                if (create && (iomap.iomap_flags & IOMAP_UNWRITTEN)) {
@@ -953,7 +1098,7 @@ linvfs_get_block_core(
        }
 
        /* If this is a realtime file, data might be on a new device */
-       bh_result->b_bdev = iomap.iomap_target->pbr_bdev;
+       bh_result->b_bdev = iomap.iomap_target->bt_bdev;
 
        /* If we previously allocated a block out beyond eof and
         * we are now coming back to use it then we will need to
@@ -961,26 +1106,24 @@ linvfs_get_block_core(
         */
        if (create &&
            ((!buffer_mapped(bh_result) && !buffer_uptodate(bh_result)) ||
-            (offset >= i_size_read(inode)) || (iomap.iomap_flags & IOMAP_NEW))) {
+            (offset >= i_size_read(inode)) || (iomap.iomap_flags & IOMAP_NEW)))
                set_buffer_new(bh_result);
-       }
 
        if (iomap.iomap_flags & IOMAP_DELAY) {
-               if (unlikely(direct))
-                       BUG();
+               BUG_ON(direct);
                if (create) {
-                       set_buffer_mapped(bh_result);
                        set_buffer_uptodate(bh_result);
+                       set_buffer_mapped(bh_result);
+                       set_buffer_delay(bh_result);
                }
-               bh_result->b_bdev = iomap.iomap_target->pbr_bdev;
-               set_buffer_delay(bh_result);
        }
 
        if (blocks) {
-               loff_t iosize;
-               iosize = (iomap.iomap_bsize - iomap.iomap_delta);
-               bh_result->b_size =
-                   (ssize_t)min(iosize, (loff_t)(blocks << inode->i_blkbits));
+               ASSERT(iomap.iomap_bsize - iomap.iomap_delta > 0);
+               offset = min_t(xfs_off_t,
+                               iomap.iomap_bsize - iomap.iomap_delta,
+                               (xfs_off_t)blocks << inode->i_blkbits);
+               bh_result->b_size = (u32) min_t(xfs_off_t, UINT_MAX, offset);
        }
 
        return 0;
@@ -993,21 +1136,10 @@ linvfs_get_block(
        struct buffer_head      *bh_result,
        int                     create)
 {
-       return linvfs_get_block_core(inode, iblock, 0, bh_result,
+       return __linvfs_get_block(inode, iblock, 0, bh_result,
                                        create, 0, BMAPI_WRITE);
 }
 
-STATIC int
-linvfs_get_block_sync(
-       struct inode            *inode,
-       sector_t                iblock,
-       struct buffer_head      *bh_result,
-       int                     create)
-{
-       return linvfs_get_block_core(inode, iblock, 0, bh_result,
-                                       create, 0, BMAPI_SYNC|BMAPI_WRITE);
-}
-
 STATIC int
 linvfs_get_blocks_direct(
        struct inode            *inode,
@@ -1016,10 +1148,48 @@ linvfs_get_blocks_direct(
        struct buffer_head      *bh_result,
        int                     create)
 {
-       return linvfs_get_block_core(inode, iblock, max_blocks, bh_result,
+       return __linvfs_get_block(inode, iblock, max_blocks, bh_result,
                                        create, 1, BMAPI_WRITE|BMAPI_DIRECT);
 }
 
+STATIC void
+linvfs_end_io_direct(
+       struct kiocb    *iocb,
+       loff_t          offset,
+       ssize_t         size,
+       void            *private)
+{
+       xfs_ioend_t     *ioend = iocb->private;
+
+       /*
+        * Non-NULL private data means we need to issue a transaction to
+        * convert a range from unwritten to written extents.  This needs
+        * to happen from process contect but aio+dio I/O completion
+        * happens from irq context so we need to defer it to a workqueue.
+        * This is not nessecary for synchronous direct I/O, but we do
+        * it anyway to keep the code uniform and simpler.
+        *
+        * The core direct I/O code might be changed to always call the
+        * completion handler in the future, in which case all this can
+        * go away.
+        */
+       if (private && size > 0) {
+               ioend->io_offset = offset;
+               ioend->io_size = size;
+               xfs_finish_ioend(ioend);
+       } else {
+               ASSERT(size >= 0);
+               xfs_destroy_ioend(ioend);
+       }
+
+       /*
+        * blockdev_direct_IO can return an error even afer the I/O
+        * completion handler was called.  Thus we need to protect
+        * against double-freeing.
+        */
+       iocb->private = NULL;
+}
+
 STATIC ssize_t
 linvfs_direct_IO(
        int                     rw,
@@ -1034,16 +1204,23 @@ linvfs_direct_IO(
        xfs_iomap_t     iomap;
        int             maps = 1;
        int             error;
+       ssize_t         ret;
 
        VOP_BMAP(vp, offset, 0, BMAPI_DEVICE, &iomap, &maps, error);
        if (error)
                return -error;
 
-       return blockdev_direct_IO_no_locking(rw, iocb, inode,
-               iomap.iomap_target->pbr_bdev,
+       iocb->private = xfs_alloc_ioend(inode, IOMAP_UNWRITTEN);
+
+       ret = blockdev_direct_IO_own_locking(rw, iocb, inode,
+               iomap.iomap_target->bt_bdev,
                iov, offset, nr_segs,
                linvfs_get_blocks_direct,
-               linvfs_unwritten_convert_direct);
+               linvfs_end_io_direct);
+
+       if (unlikely(ret <= 0 && iocb->private))
+               xfs_destroy_ioend(iocb->private);
+       return ret;
 }
 
 
@@ -1186,7 +1363,7 @@ linvfs_writepage(
        return 0;
 
 out_fail:
-       set_page_dirty(page);
+       redirty_page_for_writepage(wbc, page);
        unlock_page(page);
        return 0;
 out_unlock:
@@ -1194,6 +1371,16 @@ out_unlock:
        return error;
 }
 
+STATIC int
+linvfs_invalidate_page(
+       struct page             *page,
+       unsigned long           offset)
+{
+       xfs_page_trace(XFS_INVALIDPAGE_ENTER,
+                       page->mapping->host, page, offset);
+       return block_invalidatepage(page, offset);
+}
+
 /*
  * Called to move a page into cleanable state - and from there
  * to be released. Possibly the page is already clean. We always
@@ -1216,7 +1403,7 @@ out_unlock:
 STATIC int
 linvfs_release_page(
        struct page             *page,
-       int                     gfp_mask)
+       gfp_t                   gfp_mask)
 {
        struct inode            *inode = page->mapping->host;
        int                     dirty, delalloc, unmapped, unwritten;
@@ -1262,13 +1449,7 @@ linvfs_prepare_write(
        unsigned int            from,
        unsigned int            to)
 {
-       if (file && (file->f_flags & O_SYNC)) {
-               return block_prepare_write(page, from, to,
-                                               linvfs_get_block_sync);
-       } else {
-               return block_prepare_write(page, from, to,
-                                               linvfs_get_block);
-       }
+       return block_prepare_write(page, from, to, linvfs_get_block);
 }
 
 struct address_space_operations linvfs_aops = {
@@ -1277,8 +1458,10 @@ struct address_space_operations linvfs_aops = {
        .writepage              = linvfs_writepage,
        .sync_page              = block_sync_page,
        .releasepage            = linvfs_release_page,
+       .invalidatepage         = linvfs_invalidate_page,
        .prepare_write          = linvfs_prepare_write,
        .commit_write           = generic_commit_write,
        .bmap                   = linvfs_bmap,
        .direct_IO              = linvfs_direct_IO,
+       .migratepage            = buffer_migrate_page,
 };