/*
- * Copyright (c) 2000-2004 Silicon Graphics, Inc. All Rights Reserved.
+ * Copyright (c) 2000-2005 Silicon Graphics, Inc. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
bhv_desc_t *bdp;
vnode_t *vp = LINVFS_GET_VP(inode);
loff_t isize = i_size_read(inode);
- loff_t offset = page->index << PAGE_CACHE_SHIFT;
+ loff_t offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
int delalloc = -1, unmapped = -1, unwritten = -1;
if (page_has_buffers(page))
{
ASSERT(!private || inode == (struct inode *)private);
- /* private indicates an unwritten extent lay beneath this IO,
- * see linvfs_get_block_core.
- */
+ /* private indicates an unwritten extent lay beneath this IO */
if (private && size > 0) {
vnode_t *vp = LINVFS_GET_VP(inode);
int error;
sector_shift = block_bits - BBSHIFT;
bn = iomapp->iomap_bn >> sector_shift;
bn += delta;
+ BUG_ON(!bn && !(iomapp->iomap_flags & IOMAP_REALTIME));
ASSERT((bn << sector_shift) >= iomapp->iomap_bn);
lock_buffer(bh);
page = find_trylock_page(mapping, index);
if (!page)
- return 0;
+ return NULL;
if (PageWriteback(page))
goto out;
STATIC void
xfs_submit_page(
struct page *page,
+ struct writeback_control *wbc,
struct buffer_head *bh_arr[],
- int cnt)
+ int bh_count,
+ int probed_page,
+ int clear_dirty)
{
struct buffer_head *bh;
int i;
BUG_ON(PageWriteback(page));
set_page_writeback(page);
- clear_page_dirty(page);
+ if (clear_dirty)
+ clear_page_dirty(page);
unlock_page(page);
- if (cnt) {
- for (i = 0; i < cnt; i++) {
+ if (bh_count) {
+ for (i = 0; i < bh_count; i++) {
bh = bh_arr[i];
mark_buffer_async_write(bh);
if (buffer_unwritten(bh))
clear_buffer_dirty(bh);
}
- for (i = 0; i < cnt; i++)
+ for (i = 0; i < bh_count; i++)
submit_bh(WRITE, bh_arr[i]);
- } else
+
+ if (probed_page && clear_dirty)
+ wbc->nr_to_write--; /* Wrote an "extra" page */
+ } else {
end_page_writeback(page);
+ wbc->pages_skipped++; /* We didn't write this page */
+ }
}
/*
bh = head = page_buffers(page);
do {
offset = i << bbits;
+ if (offset >= end)
+ break;
if (!(PageUptodate(page) || buffer_uptodate(bh)))
continue;
if (buffer_mapped(bh) && all_bh &&
- !buffer_unwritten(bh) && !buffer_delay(bh)) {
- if (startio && (offset < end)) {
+ !(buffer_unwritten(bh) || buffer_delay(bh))) {
+ if (startio) {
lock_buffer(bh);
bh_arr[index++] = bh;
}
ASSERT(private);
}
}
- if (startio && (offset < end)) {
+ if (startio) {
bh_arr[index++] = bh;
} else {
set_buffer_dirty(bh);
} while (i++, (bh = bh->b_this_page) != head);
if (startio) {
- wbc->nr_to_write--;
- xfs_submit_page(page, bh_arr, index);
+ xfs_submit_page(page, wbc, bh_arr, index, 1, index == i);
} else {
unlock_page(page);
}
pgoff_t end_index, last_index, tlast;
int len, err, i, cnt = 0, uptodate = 1;
int flags = startio ? 0 : BMAPI_TRYLOCK;
- int page_dirty = 1;
- int delalloc = 0;
-
+ int page_dirty, delalloc = 0;
- /* Are we off the end of the file ? */
+ /* Is this page beyond the end of the file? */
offset = i_size_read(inode);
end_index = offset >> PAGE_CACHE_SHIFT;
last_index = (offset - 1) >> PAGE_CACHE_SHIFT;
bh = head = page_buffers(page);
iomp = NULL;
+ /*
+ * page_dirty is initially a count of buffers on the page and
+ * is decrememted as we move each into a cleanable state.
+ */
len = bh->b_size;
+ page_dirty = PAGE_CACHE_SIZE / len;
+
do {
if (offset >= end_offset)
break;
}
BUG_ON(!buffer_locked(bh));
bh_arr[cnt++] = bh;
- page_dirty = 0;
+ page_dirty--;
}
/*
* Second case, allocate space for a delalloc buffer.
unlock_buffer(bh);
mark_buffer_dirty(bh);
}
- page_dirty = 0;
+ page_dirty--;
}
} else if ((buffer_uptodate(bh) || PageUptodate(page)) &&
(unmapped || startio)) {
unlock_buffer(bh);
mark_buffer_dirty(bh);
}
- page_dirty = 0;
+ page_dirty--;
}
} else if (startio) {
if (buffer_uptodate(bh) &&
!test_and_set_bit(BH_Lock, &bh->b_state)) {
bh_arr[cnt++] = bh;
- page_dirty = 0;
+ page_dirty--;
}
}
}
SetPageUptodate(page);
if (startio)
- xfs_submit_page(page, bh_arr, cnt);
+ xfs_submit_page(page, wbc, bh_arr, cnt, 0, 1);
if (iomp) {
tlast = (iomp->iomap_offset + iomp->iomap_bsize - 1) >>
}
STATIC int
-linvfs_get_block_core(
+__linvfs_get_block(
struct inode *inode,
sector_t iblock,
unsigned long blocks,
bn = iomap.iomap_bn >> (inode->i_blkbits - BBSHIFT);
bn += delta;
-
+ BUG_ON(!bn && !(iomap.iomap_flags & IOMAP_REALTIME));
bh_result->b_blocknr = bn;
- bh_result->b_bdev = iomap.iomap_target->pbr_bdev;
set_buffer_mapped(bh_result);
}
if (create && (iomap.iomap_flags & IOMAP_UNWRITTEN)) {
}
if (iomap.iomap_flags & IOMAP_DELAY) {
- if (unlikely(direct))
- BUG();
+ BUG_ON(direct);
if (create) {
- set_buffer_mapped(bh_result);
set_buffer_uptodate(bh_result);
+ set_buffer_mapped(bh_result);
+ set_buffer_delay(bh_result);
}
- bh_result->b_bdev = iomap.iomap_target->pbr_bdev;
- set_buffer_delay(bh_result);
}
if (blocks) {
- loff_t iosize;
- iosize = (iomap.iomap_bsize - iomap.iomap_delta);
- bh_result->b_size =
- (ssize_t)min(iosize, (loff_t)(blocks << inode->i_blkbits));
+ bh_result->b_size = (ssize_t)min(
+ (loff_t)(iomap.iomap_bsize - iomap.iomap_delta),
+ (loff_t)(blocks << inode->i_blkbits));
}
return 0;
struct buffer_head *bh_result,
int create)
{
- return linvfs_get_block_core(inode, iblock, 0, bh_result,
+ return __linvfs_get_block(inode, iblock, 0, bh_result,
create, 0, BMAPI_WRITE);
}
-STATIC int
-linvfs_get_block_sync(
- struct inode *inode,
- sector_t iblock,
- struct buffer_head *bh_result,
- int create)
-{
- return linvfs_get_block_core(inode, iblock, 0, bh_result,
- create, 0, BMAPI_SYNC|BMAPI_WRITE);
-}
-
STATIC int
linvfs_get_blocks_direct(
struct inode *inode,
struct buffer_head *bh_result,
int create)
{
- return linvfs_get_block_core(inode, iblock, max_blocks, bh_result,
+ return __linvfs_get_block(inode, iblock, max_blocks, bh_result,
create, 1, BMAPI_WRITE|BMAPI_DIRECT);
}
if (error)
return -error;
- return blockdev_direct_IO_no_locking(rw, iocb, inode,
+ return blockdev_direct_IO_own_locking(rw, iocb, inode,
iomap.iomap_target->pbr_bdev,
iov, offset, nr_segs,
linvfs_get_blocks_direct,
return 0;
out_fail:
- set_page_dirty(page);
+ redirty_page_for_writepage(wbc, page);
unlock_page(page);
return 0;
out_unlock:
unsigned int from,
unsigned int to)
{
- if (file && (file->f_flags & O_SYNC)) {
- return block_prepare_write(page, from, to,
- linvfs_get_block_sync);
- } else {
- return block_prepare_write(page, from, to,
- linvfs_get_block);
- }
+ return block_prepare_write(page, from, to, linvfs_get_block);
}
struct address_space_operations linvfs_aops = {