X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=fs%2Fmpage.c;fp=fs%2Fmpage.c;h=e431cb3878d699561e668d9378009815cfb4256a;hb=64ba3f394c830ec48a1c31b53dcae312c56f1604;hp=1e4598247d0b962f02eca7f390bf6683c9df3e4e;hpb=be1e6109ac94a859551f8e1774eb9a8469fe055c;p=linux-2.6.git diff --git a/fs/mpage.c b/fs/mpage.c index 1e4598247..e431cb387 100644 --- a/fs/mpage.c +++ b/fs/mpage.c @@ -163,19 +163,9 @@ map_buffer_to_page(struct page *page, struct buffer_head *bh, int page_block) } while (page_bh != head); } -/* - * This is the worker routine which does all the work of mapping the disk - * blocks and constructs largest possible bios, submits them for IO if the - * blocks are not contiguous on the disk. - * - * We pass a buffer_head back and forth and use its buffer_mapped() flag to - * represent the validity of its disk mapping and to decide when to do the next - * get_block() call. - */ static struct bio * do_mpage_readpage(struct bio *bio, struct page *page, unsigned nr_pages, - sector_t *last_block_in_bio, struct buffer_head *map_bh, - unsigned long *first_logical_block, get_block_t get_block) + sector_t *last_block_in_bio, get_block_t get_block) { struct inode *inode = page->mapping->host; const unsigned blkbits = inode->i_blkbits; @@ -183,72 +173,33 @@ do_mpage_readpage(struct bio *bio, struct page *page, unsigned nr_pages, const unsigned blocksize = 1 << blkbits; sector_t block_in_file; sector_t last_block; - sector_t last_block_in_file; sector_t blocks[MAX_BUF_PER_PAGE]; unsigned page_block; unsigned first_hole = blocks_per_page; struct block_device *bdev = NULL; + struct buffer_head bh; int length; int fully_mapped = 1; - unsigned nblocks; - unsigned relative_block; if (page_has_buffers(page)) goto confused; block_in_file = (sector_t)page->index << (PAGE_CACHE_SHIFT - blkbits); - last_block = block_in_file + nr_pages * blocks_per_page; - last_block_in_file = (i_size_read(inode) + blocksize - 1) >> blkbits; - if (last_block > last_block_in_file) - last_block = last_block_in_file; - page_block = 0; - - /* - * Map blocks using the result from the previous get_blocks call first. - */ - nblocks = map_bh->b_size >> blkbits; - if (buffer_mapped(map_bh) && block_in_file > *first_logical_block && - block_in_file < (*first_logical_block + nblocks)) { - unsigned map_offset = block_in_file - *first_logical_block; - unsigned last = nblocks - map_offset; - - for (relative_block = 0; ; relative_block++) { - if (relative_block == last) { - clear_buffer_mapped(map_bh); - break; - } - if (page_block == blocks_per_page) - break; - blocks[page_block] = map_bh->b_blocknr + map_offset + - relative_block; - page_block++; - block_in_file++; - } - bdev = map_bh->b_bdev; - } - - /* - * Then do more get_blocks calls until we are done with this page. - */ - map_bh->b_page = page; - while (page_block < blocks_per_page) { - map_bh->b_state = 0; - map_bh->b_size = 0; + last_block = (i_size_read(inode) + blocksize - 1) >> blkbits; + bh.b_page = page; + for (page_block = 0; page_block < blocks_per_page; + page_block++, block_in_file++) { + bh.b_state = 0; if (block_in_file < last_block) { - map_bh->b_size = (last_block-block_in_file) << blkbits; - if (get_block(inode, block_in_file, map_bh, 0)) + if (get_block(inode, block_in_file, &bh, 0)) goto confused; - *first_logical_block = block_in_file; } - if (!buffer_mapped(map_bh)) { + if (!buffer_mapped(&bh)) { fully_mapped = 0; if (first_hole == blocks_per_page) first_hole = page_block; - page_block++; - block_in_file++; - clear_buffer_mapped(map_bh); continue; } @@ -258,8 +209,8 @@ do_mpage_readpage(struct bio *bio, struct page *page, unsigned nr_pages, * we just collected from get_block into the page's buffers * so readpage doesn't have to repeat the get_block call */ - if (buffer_uptodate(map_bh)) { - map_buffer_to_page(page, map_bh, page_block); + if (buffer_uptodate(&bh)) { + map_buffer_to_page(page, &bh, page_block); goto confused; } @@ -267,20 +218,10 @@ do_mpage_readpage(struct bio *bio, struct page *page, unsigned nr_pages, goto confused; /* hole -> non-hole */ /* Contiguous blocks? */ - if (page_block && blocks[page_block-1] != map_bh->b_blocknr-1) + if (page_block && blocks[page_block-1] != bh.b_blocknr-1) goto confused; - nblocks = map_bh->b_size >> blkbits; - for (relative_block = 0; ; relative_block++) { - if (relative_block == nblocks) { - clear_buffer_mapped(map_bh); - break; - } else if (page_block == blocks_per_page) - break; - blocks[page_block] = map_bh->b_blocknr+relative_block; - page_block++; - block_in_file++; - } - bdev = map_bh->b_bdev; + blocks[page_block] = bh.b_blocknr; + bdev = bh.b_bdev; } if (first_hole != blocks_per_page) { @@ -319,7 +260,7 @@ alloc_new: goto alloc_new; } - if (buffer_boundary(map_bh) || (first_hole != blocks_per_page)) + if (buffer_boundary(&bh) || (first_hole != blocks_per_page)) bio = mpage_bio_submit(READ, bio); else *last_block_in_bio = blocks[blocks_per_page - 1]; @@ -390,10 +331,7 @@ mpage_readpages(struct address_space *mapping, struct list_head *pages, unsigned page_idx; sector_t last_block_in_bio = 0; struct pagevec lru_pvec; - struct buffer_head map_bh; - unsigned long first_logical_block = 0; - clear_buffer_mapped(&map_bh); pagevec_init(&lru_pvec, 0); for (page_idx = 0; page_idx < nr_pages; page_idx++) { struct page *page = list_entry(pages->prev, struct page, lru); @@ -404,9 +342,7 @@ mpage_readpages(struct address_space *mapping, struct list_head *pages, page->index, GFP_KERNEL)) { bio = do_mpage_readpage(bio, page, nr_pages - page_idx, - &last_block_in_bio, &map_bh, - &first_logical_block, - get_block); + &last_block_in_bio, get_block); if (!pagevec_add(&lru_pvec, page)) __pagevec_lru_add(&lru_pvec); } else { @@ -428,12 +364,9 @@ int mpage_readpage(struct page *page, get_block_t get_block) { struct bio *bio = NULL; sector_t last_block_in_bio = 0; - struct buffer_head map_bh; - unsigned long first_logical_block = 0; - clear_buffer_mapped(&map_bh); - bio = do_mpage_readpage(bio, page, 1, &last_block_in_bio, - &map_bh, &first_logical_block, get_block); + bio = do_mpage_readpage(bio, page, 1, + &last_block_in_bio, get_block); if (bio) mpage_bio_submit(READ, bio); return 0; @@ -539,7 +472,6 @@ __mpage_writepage(struct bio *bio, struct page *page, get_block_t get_block, for (page_block = 0; page_block < blocks_per_page; ) { map_bh.b_state = 0; - map_bh.b_size = 1 << blkbits; if (get_block(inode, block_in_file, &map_bh, 1)) goto confused; if (buffer_new(&map_bh)) @@ -707,9 +639,9 @@ mpage_writepages(struct address_space *mapping, struct pagevec pvec; int nr_pages; pgoff_t index; - pgoff_t end; /* Inclusive */ + pgoff_t end = -1; /* Inclusive */ int scanned = 0; - int range_whole = 0; + int is_range = 0; if (wbc->nonblocking && bdi_write_congested(bdi)) { wbc->encountered_congestion = 1; @@ -721,14 +653,16 @@ mpage_writepages(struct address_space *mapping, writepage = mapping->a_ops->writepage; pagevec_init(&pvec, 0); - if (wbc->range_cyclic) { + if (wbc->sync_mode == WB_SYNC_NONE) { index = mapping->writeback_index; /* Start from prev offset */ - end = -1; } else { - index = wbc->range_start >> PAGE_CACHE_SHIFT; - end = wbc->range_end >> PAGE_CACHE_SHIFT; - if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX) - range_whole = 1; + index = 0; /* whole-file sweep */ + scanned = 1; + } + if (wbc->start || wbc->end) { + index = wbc->start >> PAGE_CACHE_SHIFT; + end = wbc->end >> PAGE_CACHE_SHIFT; + is_range = 1; scanned = 1; } retry: @@ -757,7 +691,7 @@ retry: continue; } - if (!wbc->range_cyclic && page->index > end) { + if (unlikely(is_range) && page->index > end) { done = 1; unlock_page(page); continue; @@ -808,7 +742,7 @@ retry: index = 0; goto retry; } - if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0)) + if (!is_range) mapping->writeback_index = index; if (bio) mpage_bio_submit(WRITE, bio);