alloc_new:
if (bio == NULL) {
bio = mpage_alloc(bdev, blocks[0] << (blkbits - 9),
- nr_pages, GFP_KERNEL);
+ min_t(int, nr_pages, bio_get_nr_vecs(bdev)),
+ GFP_KERNEL);
if (bio == NULL)
goto confused;
}
struct block_device *boundary_bdev = NULL;
int length;
struct buffer_head map_bh;
+ loff_t i_size = i_size_read(inode);
if (page_has_buffers(page)) {
struct buffer_head *head = page_buffers(page);
*/
BUG_ON(!PageUptodate(page));
block_in_file = page->index << (PAGE_CACHE_SHIFT - blkbits);
- last_block = (i_size_read(inode) - 1) >> blkbits;
+ last_block = (i_size - 1) >> blkbits;
map_bh.b_page = page;
for (page_block = 0; page_block < blocks_per_page; ) {
first_unmapped = page_block;
- end_index = i_size_read(inode) >> PAGE_CACHE_SHIFT;
+page_is_mapped:
+ end_index = i_size >> PAGE_CACHE_SHIFT;
if (page->index >= end_index) {
- unsigned offset = i_size_read(inode) & (PAGE_CACHE_SIZE - 1);
+ /*
+ * The page straddles i_size. It must be zeroed out on each
+ * and every writepage invokation because it may be mmapped.
+ * "A file is mapped in multiples of the page size. For a file
+ * that is not a multiple of the page size, the remaining memory
+ * is zeroed when mapped, and writes to that region are not
+ * written out to the file."
+ */
+ unsigned offset = i_size & (PAGE_CACHE_SIZE - 1);
char *kaddr;
if (page->index > end_index || !offset)
kunmap_atomic(kaddr, KM_USER0);
}
-page_is_mapped:
-
/*
* This page will go to BIO. Do we need to send this BIO off first?
*/
goto confused;
}
+ /*
+ * Must try to add the page before marking the buffer clean or
+ * the confused fail path above (OOM) will be very confused when
+ * it finds all bh marked clean (i.e. it will not write anything)
+ */
+ length = first_unmapped << blkbits;
+ if (bio_add_page(bio, page, length, 0) < length) {
+ bio = mpage_bio_submit(WRITE, bio);
+ goto alloc_new;
+ }
+
/*
* OK, we have our BIO, so we can now mark the buffers clean. Make
* sure to only clean buffers which we know we'll be writing.
bh = bh->b_this_page;
} while (bh != head);
- if (buffer_heads_over_limit)
+ /*
+ * we cannot drop the bh if the page is not uptodate
+ * or a concurrent readpage would fail to serialize with the bh
+ * and it would read from disk before we reach the platter.
+ */
+ if (buffer_heads_over_limit && PageUptodate(page))
try_to_free_buffers(page);
}
- length = first_unmapped << blkbits;
- if (bio_add_page(bio, page, length, 0) < length) {
- bio = mpage_bio_submit(WRITE, bio);
- goto alloc_new;
- }
-
BUG_ON(PageWriteback(page));
set_page_writeback(page);
unlock_page(page);
struct pagevec pvec;
int nr_pages;
pgoff_t index;
+ pgoff_t end = -1; /* Inclusive */
int scanned = 0;
+ int is_range = 0;
if (wbc->nonblocking && bdi_write_congested(bdi)) {
wbc->encountered_congestion = 1;
index = 0; /* whole-file sweep */
scanned = 1;
}
+ if (wbc->start || wbc->end) {
+ index = wbc->start >> PAGE_CACHE_SHIFT;
+ end = wbc->end >> PAGE_CACHE_SHIFT;
+ is_range = 1;
+ scanned = 1;
+ }
retry:
- while (!done && (nr_pages = pagevec_lookup_tag(&pvec, mapping, &index,
- PAGECACHE_TAG_DIRTY, PAGEVEC_SIZE))) {
+ while (!done && (index <= end) &&
+ (nr_pages = pagevec_lookup_tag(&pvec, mapping, &index,
+ PAGECACHE_TAG_DIRTY,
+ min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1))) {
unsigned i;
scanned = 1;
lock_page(page);
+ if (unlikely(page->mapping != mapping)) {
+ unlock_page(page);
+ continue;
+ }
+
+ if (unlikely(is_range) && page->index > end) {
+ done = 1;
+ unlock_page(page);
+ continue;
+ }
+
if (wbc->sync_mode != WB_SYNC_NONE)
wait_on_page_writeback(page);
- if (page->mapping != mapping || PageWriteback(page) ||
+ if (PageWriteback(page) ||
!clear_page_dirty_for_io(page)) {
unlock_page(page);
continue;
index = 0;
goto retry;
}
- mapping->writeback_index = index;
+ if (!is_range)
+ mapping->writeback_index = index;
if (bio)
mpage_bio_submit(WRITE, bio);
return ret;