X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=fs%2Fbuffer.c;h=670a36e2c2aaa5b9e2314be9f80f775b19501724;hb=97bf2856c6014879bd04983a3e9dfcdac1e7fe85;hp=81f31297b81e5c99057737ddf534d89c30f481ae;hpb=c7b5ebbddf7bcd3651947760f423e3783bbe6573;p=linux-2.6.git diff --git a/fs/buffer.c b/fs/buffer.c index 81f31297b..670a36e2c 100644 --- a/fs/buffer.c +++ b/fs/buffer.c @@ -18,13 +18,14 @@ * async buffer flushing, 1999 Andrea Arcangeli */ -#include #include +#include #include #include #include #include #include +#include #include #include #include @@ -34,44 +35,19 @@ #include #include #include +#include #include #include #include -#include +#include +#include +#include static int fsync_buffers_list(spinlock_t *lock, struct list_head *list); static void invalidate_bh_lrus(void); #define BH_ENTRY(list) list_entry((list), struct buffer_head, b_assoc_buffers) -struct bh_wait_queue { - struct buffer_head *bh; - wait_queue_t wait; -}; - -#define __DEFINE_BH_WAIT(name, b, f) \ - struct bh_wait_queue name = { \ - .bh = b, \ - .wait = { \ - .task = current, \ - .flags = f, \ - .func = bh_wake_function, \ - .task_list = \ - LIST_HEAD_INIT(name.wait.task_list),\ - }, \ - } -#define DEFINE_BH_WAIT(name, bh) __DEFINE_BH_WAIT(name, bh, 0) -#define DEFINE_BH_WAIT_EXCLUSIVE(name, bh) \ - __DEFINE_BH_WAIT(name, bh, WQ_FLAG_EXCLUSIVE) - -/* - * Hashed waitqueue_head's for wait_on_buffer() - */ -#define BH_WAIT_TABLE_ORDER 7 -static struct bh_wait_queue_head { - wait_queue_head_t wqh; -} ____cacheline_aligned_in_smp bh_wait_queue_heads[1<b_private = private; } -/* - * Return the address of the waitqueue_head to be used for this - * buffer_head - */ -wait_queue_head_t *bh_waitq_head(struct buffer_head *bh) -{ - return &bh_wait_queue_heads[hash_ptr(bh, BH_WAIT_TABLE_ORDER)].wqh; -} -EXPORT_SYMBOL(bh_waitq_head); - -void wake_up_buffer(struct buffer_head *bh) -{ - wait_queue_head_t *wq = bh_waitq_head(bh); - - smp_mb(); - if (waitqueue_active(wq)) - __wake_up(wq, TASK_INTERRUPTIBLE|TASK_UNINTERRUPTIBLE, 1, bh); -} -EXPORT_SYMBOL(wake_up_buffer); - -static int bh_wake_function(wait_queue_t *wait, unsigned mode, - int sync, void *key) -{ - struct buffer_head *bh = key; - struct bh_wait_queue *wq; - - wq = container_of(wait, struct bh_wait_queue, wait); - if (wq->bh != bh || buffer_locked(bh)) - return 0; - else - return autoremove_wake_function(wait, mode, sync, key); -} - -static void sync_buffer(struct buffer_head *bh) +static int sync_buffer(void *word) { struct block_device *bd; + struct buffer_head *bh + = container_of(word, struct buffer_head, b_state); smp_mb(); bd = bh->b_bdev; if (bd) blk_run_address_space(bd->bd_inode->i_mapping); + io_schedule(); + return 0; } void fastcall __lock_buffer(struct buffer_head *bh) { - wait_queue_head_t *wqh = bh_waitq_head(bh); - DEFINE_BH_WAIT_EXCLUSIVE(wait, bh); - - do { - prepare_to_wait_exclusive(wqh, &wait.wait, - TASK_UNINTERRUPTIBLE); - if (buffer_locked(bh)) { - sync_buffer(bh); - io_schedule(); - } - } while (test_set_buffer_locked(bh)); - finish_wait(wqh, &wait.wait); + wait_on_bit_lock(&bh->b_state, BH_Lock, sync_buffer, + TASK_UNINTERRUPTIBLE); } EXPORT_SYMBOL(__lock_buffer); void fastcall unlock_buffer(struct buffer_head *bh) { + smp_mb__before_clear_bit(); clear_buffer_locked(bh); smp_mb__after_clear_bit(); - wake_up_buffer(bh); + wake_up_bit(&bh->b_state, BH_Lock); } /* @@ -153,32 +91,14 @@ void fastcall unlock_buffer(struct buffer_head *bh) */ void __wait_on_buffer(struct buffer_head * bh) { - wait_queue_head_t *wqh = bh_waitq_head(bh); - DEFINE_BH_WAIT(wait, bh); - - do { - prepare_to_wait(wqh, &wait.wait, TASK_UNINTERRUPTIBLE); - if (buffer_locked(bh)) { - sync_buffer(bh); - io_schedule(); - } - } while (buffer_locked(bh)); - finish_wait(wqh, &wait.wait); -} - -static void -__set_page_buffers(struct page *page, struct buffer_head *head) -{ - page_cache_get(page); - SetPagePrivate(page); - page->private = (unsigned long)head; + wait_on_bit(&bh->b_state, BH_Lock, sync_buffer, TASK_UNINTERRUPTIBLE); } static void __clear_page_buffers(struct page *page) { ClearPagePrivate(page); - page->private = 0; + set_page_private(page, 0); page_cache_release(page); } @@ -235,38 +155,13 @@ int sync_blockdev(struct block_device *bdev) { int ret = 0; - if (bdev) { - int err; - - ret = filemap_fdatawrite(bdev->bd_inode->i_mapping); - err = filemap_fdatawait(bdev->bd_inode->i_mapping); - if (!ret) - ret = err; - } + if (bdev) + ret = filemap_write_and_wait(bdev->bd_inode->i_mapping); return ret; } EXPORT_SYMBOL(sync_blockdev); -/* - * Write out and wait upon all dirty data associated with this - * superblock. Filesystem data as well as the underlying block - * device. Takes the superblock lock. - */ -int fsync_super(struct super_block *sb) -{ - sync_inodes_sb(sb, 0); - DQUOT_SYNC(sb); - lock_super(sb); - if (sb->s_dirt && sb->s_op->write_super) - sb->s_op->write_super(sb); - unlock_super(sb); - if (sb->s_op->sync_fs) - sb->s_op->sync_fs(sb, 1); - sync_blockdev(sb->s_bdev); - sync_inodes_sb(sb, 1); - - return sync_blockdev(sb->s_bdev); -} +EXPORT_SYMBOL(fsync_super); /* * Write out and wait upon all dirty data associated with this @@ -301,24 +196,12 @@ struct super_block *freeze_bdev(struct block_device *bdev) sb = get_super(bdev); if (sb && !(sb->s_flags & MS_RDONLY)) { sb->s_frozen = SB_FREEZE_WRITE; - wmb(); - - sync_inodes_sb(sb, 0); - DQUOT_SYNC(sb); - - lock_super(sb); - if (sb->s_dirt && sb->s_op->write_super) - sb->s_op->write_super(sb); - unlock_super(sb); + smp_wmb(); - if (sb->s_op->sync_fs) - sb->s_op->sync_fs(sb, 1); - - sync_blockdev(sb->s_bdev); - sync_inodes_sb(sb, 1); + __fsync_super(sb); sb->s_frozen = SB_FREEZE_TRANS; - wmb(); + smp_wmb(); sync_blockdev(sb->s_bdev); @@ -346,7 +229,7 @@ void thaw_bdev(struct block_device *bdev, struct super_block *sb) if (sb->s_op->unlockfs) sb->s_op->unlockfs(sb); sb->s_frozen = SB_UNFROZEN; - wmb(); + smp_wmb(); wake_up(&sb->s_wait_unfrozen); drop_super(sb); } @@ -355,136 +238,6 @@ void thaw_bdev(struct block_device *bdev, struct super_block *sb) } EXPORT_SYMBOL(thaw_bdev); -/* - * sync everything. Start out by waking pdflush, because that writes back - * all queues in parallel. - */ -static void do_sync(unsigned long wait) -{ - wakeup_bdflush(0); - sync_inodes(0); /* All mappings, inodes and their blockdevs */ - DQUOT_SYNC(NULL); - sync_supers(); /* Write the superblocks */ - sync_filesystems(0); /* Start syncing the filesystems */ - sync_filesystems(wait); /* Waitingly sync the filesystems */ - sync_inodes(wait); /* Mappings, inodes and blockdevs, again. */ - if (!wait) - printk("Emergency Sync complete\n"); - if (unlikely(laptop_mode)) - laptop_sync_completion(); -} - -asmlinkage long sys_sync(void) -{ - do_sync(1); - return 0; -} - -void emergency_sync(void) -{ - pdflush_operation(do_sync, 0); -} - -/* - * Generic function to fsync a file. - * - * filp may be NULL if called via the msync of a vma. - */ - -int file_fsync(struct file *filp, struct dentry *dentry, int datasync) -{ - struct inode * inode = dentry->d_inode; - struct super_block * sb; - int ret; - - /* sync the inode to buffers */ - write_inode_now(inode, 0); - - /* sync the superblock to buffers */ - sb = inode->i_sb; - lock_super(sb); - if (sb->s_op->write_super) - sb->s_op->write_super(sb); - unlock_super(sb); - - /* .. finally sync the buffers to disk */ - ret = sync_blockdev(sb->s_bdev); - return ret; -} - -asmlinkage long sys_fsync(unsigned int fd) -{ - struct file * file; - struct address_space *mapping; - int ret, err; - - ret = -EBADF; - file = fget(fd); - if (!file) - goto out; - - mapping = file->f_mapping; - - ret = -EINVAL; - if (!file->f_op || !file->f_op->fsync) { - /* Why? We can still call filemap_fdatawrite */ - goto out_putf; - } - - /* We need to protect against concurrent writers.. */ - down(&mapping->host->i_sem); - current->flags |= PF_SYNCWRITE; - ret = filemap_fdatawrite(mapping); - err = file->f_op->fsync(file, file->f_dentry, 0); - if (!ret) - ret = err; - err = filemap_fdatawait(mapping); - if (!ret) - ret = err; - current->flags &= ~PF_SYNCWRITE; - up(&mapping->host->i_sem); - -out_putf: - fput(file); -out: - return ret; -} - -asmlinkage long sys_fdatasync(unsigned int fd) -{ - struct file * file; - struct address_space *mapping; - int ret, err; - - ret = -EBADF; - file = fget(fd); - if (!file) - goto out; - - ret = -EINVAL; - if (!file->f_op || !file->f_op->fsync) - goto out_putf; - - mapping = file->f_mapping; - - down(&mapping->host->i_sem); - current->flags |= PF_SYNCWRITE; - ret = filemap_fdatawrite(mapping); - err = file->f_op->fsync(file, file->f_dentry, 1); - if (!ret) - ret = err; - err = filemap_fdatawait(mapping); - if (!ret) - ret = err; - current->flags &= ~PF_SYNCWRITE; - up(&mapping->host->i_sem); - -out_putf: - fput(file); -out: - return ret; -} - /* * Various filesystems appear to want __find_get_block to be non-blocking. * But it's the page lock which protects the buffers. To get around this, @@ -497,7 +250,7 @@ out: * private_lock is contended then so is mapping->tree_lock). */ static struct buffer_head * -__find_get_block_slow(struct block_device *bdev, sector_t block, int unused) +__find_get_block_slow(struct block_device *bdev, sector_t block) { struct inode *bd_inode = bdev->bd_inode; struct address_space *bd_mapping = bd_inode->i_mapping; @@ -506,6 +259,7 @@ __find_get_block_slow(struct block_device *bdev, sector_t block, int unused) struct buffer_head *bh; struct buffer_head *head; struct page *page; + int all_mapped = 1; index = block >> (PAGE_CACHE_SHIFT - bd_inode->i_blkbits); page = find_get_page(bd_mapping, index); @@ -523,14 +277,25 @@ __find_get_block_slow(struct block_device *bdev, sector_t block, int unused) get_bh(bh); goto out_unlock; } + if (!buffer_mapped(bh)) + all_mapped = 0; bh = bh->b_this_page; } while (bh != head); - printk("__find_get_block_slow() failed. " - "block=%llu, b_blocknr=%llu\n", - (unsigned long long)block, (unsigned long long)bh->b_blocknr); - printk("b_state=0x%08lx, b_size=%u\n", bh->b_state, bh->b_size); - printk("device blocksize: %d\n", 1 << bd_inode->i_blkbits); + /* we might be here because some of the buffers on this page are + * not mapped. This is due to various races between + * file io on the block device and getblk. It gets dealt with + * elsewhere, don't buffer_error if we had some unmapped buffers + */ + if (all_mapped) { + printk("__find_get_block_slow() failed. " + "block=%llu, b_blocknr=%llu\n", + (unsigned long long)block, + (unsigned long long)bh->b_blocknr); + printk("b_state=0x%08lx, b_size=%zu\n", + bh->b_state, bh->b_size); + printk("device blocksize: %d\n", 1 << bd_inode->i_blkbits); + } out_unlock: spin_unlock(&bd_mapping->private_lock); page_cache_release(page); @@ -572,13 +337,18 @@ out: pass does the actual I/O. */ void invalidate_bdev(struct block_device *bdev, int destroy_dirty_buffers) { + struct address_space *mapping = bdev->bd_inode->i_mapping; + + if (mapping->nrpages == 0) + return; + invalidate_bh_lrus(); /* * FIXME: what about destroy_dirty_buffers? * We really want to use invalidate_inode_pages2() for * that, but not until that's cleaned up. */ - invalidate_inode_pages(bdev->bd_inode->i_mapping); + invalidate_inode_pages(mapping); } /* @@ -589,13 +359,13 @@ static void free_more_memory(void) struct zone **zones; pg_data_t *pgdat; - wakeup_bdflush(1024); + wakeup_pdflush(1024); yield(); - for_each_pgdat(pgdat) { - zones = pgdat->node_zonelists[GFP_NOFS&GFP_ZONEMASK].zones; + for_each_online_pgdat(pgdat) { + zones = pgdat->node_zonelists[gfp_zone(GFP_NOFS)].zones; if (*zones) - try_to_free_pages(zones, GFP_NOFS, 0); + try_to_free_pages(zones, GFP_NOFS); } } @@ -605,8 +375,8 @@ static void free_more_memory(void) */ static void end_buffer_async_read(struct buffer_head *bh, int uptodate) { - static spinlock_t page_uptodate_lock = SPIN_LOCK_UNLOCKED; unsigned long flags; + struct buffer_head *first; struct buffer_head *tmp; struct page *page; int page_uptodate = 1; @@ -618,7 +388,8 @@ static void end_buffer_async_read(struct buffer_head *bh, int uptodate) set_buffer_uptodate(bh); } else { clear_buffer_uptodate(bh); - buffer_io_error(bh); + if (printk_ratelimit()) + buffer_io_error(bh); SetPageError(page); } @@ -627,7 +398,9 @@ static void end_buffer_async_read(struct buffer_head *bh, int uptodate) * two buffer heads end IO at almost the same time and both * decide that the page is now completely done. */ - spin_lock_irqsave(&page_uptodate_lock, flags); + first = page_buffers(page); + local_irq_save(flags); + bit_spin_lock(BH_Uptodate_Lock, &first->b_state); clear_buffer_async_read(bh); unlock_buffer(bh); tmp = bh; @@ -640,7 +413,8 @@ static void end_buffer_async_read(struct buffer_head *bh, int uptodate) } tmp = tmp->b_this_page; } while (tmp != bh); - spin_unlock_irqrestore(&page_uptodate_lock, flags); + bit_spin_unlock(BH_Uptodate_Lock, &first->b_state); + local_irq_restore(flags); /* * If none of the buffers had errors and they are all @@ -652,7 +426,8 @@ static void end_buffer_async_read(struct buffer_head *bh, int uptodate) return; still_busy: - spin_unlock_irqrestore(&page_uptodate_lock, flags); + bit_spin_unlock(BH_Uptodate_Lock, &first->b_state); + local_irq_restore(flags); return; } @@ -660,11 +435,11 @@ still_busy: * Completion handler for block_write_full_page() - pages which are unlocked * during I/O, and which have PageWriteback cleared upon I/O completion. */ -void end_buffer_async_write(struct buffer_head *bh, int uptodate) +static void end_buffer_async_write(struct buffer_head *bh, int uptodate) { char b[BDEVNAME_SIZE]; - static spinlock_t page_uptodate_lock = SPIN_LOCK_UNLOCKED; unsigned long flags; + struct buffer_head *first; struct buffer_head *tmp; struct page *page; @@ -681,11 +456,15 @@ void end_buffer_async_write(struct buffer_head *bh, int uptodate) bdevname(bh->b_bdev, b)); } set_bit(AS_EIO, &page->mapping->flags); + set_buffer_write_io_error(bh); clear_buffer_uptodate(bh); SetPageError(page); } - spin_lock_irqsave(&page_uptodate_lock, flags); + first = page_buffers(page); + local_irq_save(flags); + bit_spin_lock(BH_Uptodate_Lock, &first->b_state); + clear_buffer_async_write(bh); unlock_buffer(bh); tmp = bh->b_this_page; @@ -696,12 +475,14 @@ void end_buffer_async_write(struct buffer_head *bh, int uptodate) } tmp = tmp->b_this_page; } - spin_unlock_irqrestore(&page_uptodate_lock, flags); + bit_spin_unlock(BH_Uptodate_Lock, &first->b_state); + local_irq_restore(flags); end_page_writeback(page); return; still_busy: - spin_unlock_irqrestore(&page_uptodate_lock, flags); + bit_spin_unlock(BH_Uptodate_Lock, &first->b_state); + local_irq_restore(flags); return; } @@ -795,6 +576,10 @@ EXPORT_SYMBOL(mark_buffer_async_write); static inline void __remove_assoc_queue(struct buffer_head *bh) { list_del_init(&bh->b_assoc_buffers); + WARN_ON(!bh->b_assoc_map); + if (buffer_write_io_error(bh)) + set_bit(AS_EIO, &bh->b_assoc_map->flags); + bh->b_assoc_map = NULL; } int inode_has_buffers(struct inode *inode) @@ -840,15 +625,14 @@ repeat: /** * sync_mapping_buffers - write out and wait upon a mapping's "associated" * buffers - * @buffer_mapping - the mapping which backs the buffers' data - * @mapping - the mapping which wants those buffers written + * @mapping: the mapping which wants those buffers written * * Starts I/O against the buffers at mapping->private_list, and waits upon * that I/O. * - * Basically, this is a convenience function for fsync(). @buffer_mapping is - * the blockdev which "owns" the buffers and @mapping is a file or directory - * which needs those buffers to be written for a successful fsync(). + * Basically, this is a convenience function for fsync(). + * @mapping is a file or directory which needs those buffers to be written for + * a successful fsync(). */ int sync_mapping_buffers(struct address_space *mapping) { @@ -888,13 +672,13 @@ void mark_buffer_dirty_inode(struct buffer_head *bh, struct inode *inode) if (!mapping->assoc_mapping) { mapping->assoc_mapping = buffer_mapping; } else { - if (mapping->assoc_mapping != buffer_mapping) - BUG(); + BUG_ON(mapping->assoc_mapping != buffer_mapping); } if (list_empty(&bh->b_assoc_buffers)) { spin_lock(&buffer_mapping->private_lock); list_move_tail(&bh->b_assoc_buffers, &mapping->private_list); + bh->b_assoc_map = mapping; spin_unlock(&buffer_mapping->private_lock); } } @@ -927,7 +711,10 @@ EXPORT_SYMBOL(mark_buffer_dirty_inode); */ int __set_page_dirty_buffers(struct page *page) { - struct address_space * const mapping = page->mapping; + struct address_space * const mapping = page_mapping(page); + + if (unlikely(!mapping)) + return !TestSetPageDirty(page); spin_lock(&mapping->private_lock); if (page_has_buffers(page)) { @@ -941,20 +728,21 @@ int __set_page_dirty_buffers(struct page *page) } spin_unlock(&mapping->private_lock); - if (!TestSetPageDirty(page)) { - spin_lock_irq(&mapping->tree_lock); - if (page->mapping) { /* Race with truncate? */ - if (!mapping->backing_dev_info->memory_backed) - inc_page_state(nr_dirty); - radix_tree_tag_set(&mapping->page_tree, - page_index(page), - PAGECACHE_TAG_DIRTY); + if (TestSetPageDirty(page)) + return 0; + + write_lock_irq(&mapping->tree_lock); + if (page->mapping) { /* Race with truncate? */ + if (mapping_cap_account_dirty(mapping)) { + __inc_zone_page_state(page, NR_FILE_DIRTY); + task_io_account_write(PAGE_CACHE_SIZE); } - spin_unlock_irq(&mapping->tree_lock); - __mark_inode_dirty(mapping->host, I_DIRTY_PAGES); + radix_tree_tag_set(&mapping->page_tree, + page_index(page), PAGECACHE_TAG_DIRTY); } - - return 0; + write_unlock_irq(&mapping->tree_lock); + __mark_inode_dirty(mapping->host, I_DIRTY_PAGES); + return 1; } EXPORT_SYMBOL(__set_page_dirty_buffers); @@ -988,7 +776,7 @@ static int fsync_buffers_list(spinlock_t *lock, struct list_head *list) spin_lock(lock); while (!list_empty(list)) { bh = BH_ENTRY(list->next); - list_del_init(&bh->b_assoc_buffers); + __remove_assoc_queue(bh); if (buffer_dirty(bh) || buffer_locked(bh)) { list_add(&bh->b_assoc_buffers, &tmp); if (buffer_dirty(bh)) { @@ -1000,8 +788,7 @@ static int fsync_buffers_list(spinlock_t *lock, struct list_head *list) * contents - it is a noop if I/O is still in * flight on potentially older contents. */ - wait_on_buffer(bh); - ll_rw_block(WRITE, 1, &bh); + ll_rw_block(SWRITE, 1, &bh); brelse(bh); spin_lock(lock); } @@ -1010,7 +797,7 @@ static int fsync_buffers_list(spinlock_t *lock, struct list_head *list) while (!list_empty(&tmp)) { bh = BH_ENTRY(tmp.prev); - __remove_assoc_queue(bh); + list_del_init(&bh->b_assoc_buffers); get_bh(bh); spin_unlock(lock); wait_on_buffer(bh); @@ -1089,8 +876,8 @@ int remove_inode_buffers(struct inode *inode) * The retry flag is used to differentiate async IO (paging, swapping) * which may not fail from ordinary buffer allocations. */ -static struct buffer_head * -create_buffers(struct page * page, unsigned long size, int retry) +struct buffer_head *alloc_page_buffers(struct page *page, unsigned long size, + int retry) { struct buffer_head *bh, *head; long offset; @@ -1110,12 +897,13 @@ try_again: bh->b_state = 0; atomic_set(&bh->b_count, 0); + bh->b_private = NULL; bh->b_size = size; /* Link the buffer to its page */ set_bh_page(bh, page, offset); - bh->b_end_io = NULL; + init_buffer(bh, NULL, NULL); } return head; /* @@ -1148,6 +936,7 @@ no_grow: free_more_memory(); goto try_again; } +EXPORT_SYMBOL_GPL(alloc_page_buffers); static inline void link_dev_buffers(struct page *page, struct buffer_head *head) @@ -1160,7 +949,7 @@ link_dev_buffers(struct page *page, struct buffer_head *head) bh = bh->b_this_page; } while (bh); tail->b_this_page = head; - __set_page_buffers(page, head); + attach_page_buffers(page, head); } /* @@ -1172,18 +961,16 @@ init_page_buffers(struct page *page, struct block_device *bdev, { struct buffer_head *head = page_buffers(page); struct buffer_head *bh = head; - unsigned int b_state; - - b_state = 1 << BH_Mapped; - if (PageUptodate(page)) - b_state |= 1 << BH_Uptodate; + int uptodate = PageUptodate(page); do { - if (!(bh->b_state & (1 << BH_Mapped))) { + if (!buffer_mapped(bh)) { init_buffer(bh, NULL, NULL); bh->b_bdev = bdev; bh->b_blocknr = block; - bh->b_state = b_state; + if (uptodate) + set_buffer_uptodate(bh); + set_buffer_mapped(bh); } block++; bh = bh->b_this_page; @@ -1207,13 +994,14 @@ grow_dev_page(struct block_device *bdev, sector_t block, if (!page) return NULL; - if (!PageLocked(page)) - BUG(); + BUG_ON(!PageLocked(page)); if (page_has_buffers(page)) { bh = page_buffers(page); - if (bh->b_size == size) + if (bh->b_size == size) { + init_page_buffers(page, bdev, block, size); return page; + } if (!try_to_free_buffers(page)) goto failed; } @@ -1221,7 +1009,7 @@ grow_dev_page(struct block_device *bdev, sector_t block, /* * Allocate some buffers for this page */ - bh = create_buffers(page, size, 0); + bh = alloc_page_buffers(page, size, 0); if (!bh) goto failed; @@ -1252,7 +1040,7 @@ failed: * some of those buffers may be aliases of filesystem data. * grow_dev_page() will go BUG() if this happens. */ -static inline int +static int grow_buffers(struct block_device *bdev, sector_t block, int size) { struct page *page; @@ -1265,8 +1053,21 @@ grow_buffers(struct block_device *bdev, sector_t block, int size) } while ((size << sizebits) < PAGE_SIZE); index = block >> sizebits; - block = index << sizebits; + /* + * Check for a block which wants to lie outside our maximum possible + * pagecache index. (this comparison is done using sector_t types). + */ + if (unlikely(index != block >> sizebits)) { + char b[BDEVNAME_SIZE]; + + printk(KERN_ERR "%s: requested out-of-range block %llu for " + "device %s\n", + __FUNCTION__, (unsigned long long)block, + bdevname(bdev, b)); + return -EIO; + } + block = index << sizebits; /* Create a page with the proper size buffers.. */ page = grow_dev_page(bdev, block, index, size); if (!page) @@ -1276,7 +1077,7 @@ grow_buffers(struct block_device *bdev, sector_t block, int size) return 1; } -struct buffer_head * +static struct buffer_head * __getblk_slow(struct block_device *bdev, sector_t block, int size) { /* Size must be multiple of hard sectorsize */ @@ -1293,12 +1094,16 @@ __getblk_slow(struct block_device *bdev, sector_t block, int size) for (;;) { struct buffer_head * bh; + int ret; bh = __find_get_block(bdev, block, size); if (bh) return bh; - if (!grow_buffers(bdev, block, size)) + ret = grow_buffers(bdev, block, size); + if (ret < 0) + return NULL; + if (ret == 0) free_more_memory(); } } @@ -1328,6 +1133,7 @@ __getblk_slow(struct block_device *bdev, sector_t block, int size) /** * mark_buffer_dirty - mark a buffer_head as needing writeout + * @bh: the buffer_head to mark dirty * * mark_buffer_dirty() will set the dirty bit against the buffer, then set its * backing page dirty, then tag the page as dirty in its address_space's radix @@ -1372,6 +1178,7 @@ void __bforget(struct buffer_head *bh) spin_lock(&buffer_mapping->private_lock); list_del_init(&bh->b_assoc_buffers); + bh->b_assoc_map = NULL; spin_unlock(&buffer_mapping->private_lock); } __brelse(bh); @@ -1477,7 +1284,7 @@ static void bh_lru_install(struct buffer_head *bh) /* * Look up the bh in this cpu's LRU. If it's there, move it to the head. */ -static inline struct buffer_head * +static struct buffer_head * lookup_bh_lru(struct block_device *bdev, sector_t block, int size) { struct buffer_head *ret = NULL; @@ -1519,7 +1326,7 @@ __find_get_block(struct block_device *bdev, sector_t block, int size) struct buffer_head *bh = lookup_bh_lru(bdev, block, size); if (bh == NULL) { - bh = __find_get_block_slow(bdev, block, size); + bh = __find_get_block_slow(bdev, block); if (bh) bh_lru_install(bh); } @@ -1559,13 +1366,16 @@ EXPORT_SYMBOL(__getblk); void __breadahead(struct block_device *bdev, sector_t block, int size) { struct buffer_head *bh = __getblk(bdev, block, size); - ll_rw_block(READA, 1, &bh); - brelse(bh); + if (likely(bh)) { + ll_rw_block(READA, 1, &bh); + brelse(bh); + } } EXPORT_SYMBOL(__breadahead); /** * __bread() - reads a specified block and returns the bh + * @bdev: the block_device to read from * @block: number of block * @size: size (in bytes) to read * @@ -1577,7 +1387,7 @@ __bread(struct block_device *bdev, sector_t block, int size) { struct buffer_head *bh = __getblk(bdev, block, size); - if (!buffer_uptodate(bh)) + if (likely(bh) && !buffer_uptodate(bh)) bh = __bread_slow(bh); return bh; } @@ -1609,8 +1419,7 @@ void set_bh_page(struct buffer_head *bh, struct page *page, unsigned long offset) { bh->b_page = page; - if (offset >= PAGE_SIZE) - BUG(); + BUG_ON(offset >= PAGE_SIZE); if (PageHighMem(page)) /* * This catches illegal uses and preserves the offset: @@ -1624,7 +1433,7 @@ EXPORT_SYMBOL(set_bh_page); /* * Called when truncating a buffer on a page completely. */ -static inline void discard_buffer(struct buffer_head * bh) +static void discard_buffer(struct buffer_head * bh) { lock_buffer(bh); clear_buffer_dirty(bh); @@ -1636,35 +1445,6 @@ static inline void discard_buffer(struct buffer_head * bh) unlock_buffer(bh); } -/** - * try_to_release_page() - release old fs-specific metadata on a page - * - * @page: the page which the kernel is trying to free - * @gfp_mask: memory allocation flags (and I/O mode) - * - * The address_space is to try to release any data against the page - * (presumably at page->private). If the release was successful, return `1'. - * Otherwise return zero. - * - * The @gfp_mask argument specifies whether I/O may be performed to release - * this page (__GFP_IO), and whether the call may block (__GFP_WAIT). - * - * NOTE: @gfp_mask may go away, and this function may become non-blocking. - */ -int try_to_release_page(struct page *page, int gfp_mask) -{ - struct address_space * const mapping = page->mapping; - - BUG_ON(!PageLocked(page)); - if (PageWriteback(page)) - return 0; - - if (mapping && mapping->a_ops->releasepage) - return mapping->a_ops->releasepage(page, gfp_mask); - return try_to_free_buffers(page); -} -EXPORT_SYMBOL(try_to_release_page); - /** * block_invalidatepage - invalidate part of all of a buffer-backed page * @@ -1680,11 +1460,10 @@ EXPORT_SYMBOL(try_to_release_page); * point. Because the caller is about to free (and possibly reuse) those * blocks on-disk. */ -int block_invalidatepage(struct page *page, unsigned long offset) +void block_invalidatepage(struct page *page, unsigned long offset) { struct buffer_head *head, *bh, *next; unsigned int curr_off = 0; - int ret = 1; BUG_ON(!PageLocked(page)); if (!page_has_buffers(page)) @@ -1711,9 +1490,9 @@ int block_invalidatepage(struct page *page, unsigned long offset) * so real IO is not possible anymore. */ if (offset == 0) - ret = try_to_release_page(page, 0); + try_to_release_page(page, 0); out: - return ret; + return; } EXPORT_SYMBOL(block_invalidatepage); @@ -1727,7 +1506,7 @@ void create_empty_buffers(struct page *page, { struct buffer_head *bh, *head, *tail; - head = create_buffers(page, blocksize, 1); + head = alloc_page_buffers(page, blocksize, 1); bh = head; do { bh->b_state |= b_state; @@ -1747,7 +1526,7 @@ void create_empty_buffers(struct page *page, bh = bh->b_this_page; } while (bh != head); } - __set_page_buffers(page, head); + attach_page_buffers(page, head); spin_unlock(&page->mapping->private_lock); } EXPORT_SYMBOL(create_empty_buffers); @@ -1774,7 +1553,7 @@ void unmap_underlying_metadata(struct block_device *bdev, sector_t block) might_sleep(); - old_bh = __find_get_block_slow(bdev, block, 0); + old_bh = __find_get_block_slow(bdev, block); if (old_bh) { clear_buffer_dirty(old_bh); wait_on_buffer(old_bh); @@ -1816,6 +1595,7 @@ static int __block_write_full_page(struct inode *inode, struct page *page, sector_t block; sector_t last_block; struct buffer_head *bh, *head; + const unsigned blocksize = 1 << inode->i_blkbits; int nr_underway = 0; BUG_ON(!PageLocked(page)); @@ -1823,7 +1603,7 @@ static int __block_write_full_page(struct inode *inode, struct page *page, last_block = (i_size_read(inode) - 1) >> inode->i_blkbits; if (!page_has_buffers(page)) { - create_empty_buffers(page, 1 << inode->i_blkbits, + create_empty_buffers(page, blocksize, (1 << BH_Dirty)|(1 << BH_Uptodate)); } @@ -1837,7 +1617,7 @@ static int __block_write_full_page(struct inode *inode, struct page *page, * handle that here by just cleaning them. */ - block = page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits); + block = (sector_t)page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits); head = page_buffers(page); bh = head; @@ -1858,6 +1638,7 @@ static int __block_write_full_page(struct inode *inode, struct page *page, clear_buffer_dirty(bh); set_buffer_uptodate(bh); } else if (!buffer_mapped(bh) && buffer_dirty(bh)) { + WARN_ON(bh->b_size != blocksize); err = get_block(inode, block, bh, 1); if (err) goto recover; @@ -1873,7 +1654,6 @@ static int __block_write_full_page(struct inode *inode, struct page *page, } while (bh != head); do { - get_bh(bh); if (!buffer_mapped(bh)) continue; /* @@ -1902,7 +1682,6 @@ static int __block_write_full_page(struct inode *inode, struct page *page, */ BUG_ON(PageWriteback(page)); set_page_writeback(page); - unlock_page(page); do { struct buffer_head *next = bh->b_this_page; @@ -1910,9 +1689,9 @@ static int __block_write_full_page(struct inode *inode, struct page *page, submit_bh(WRITE, bh); nr_underway++; } - put_bh(bh); bh = next; } while (bh != head); + unlock_page(page); err = 0; done: @@ -1951,7 +1730,6 @@ recover: bh = head; /* Recovery: lock and submit the mapped buffers */ do { - get_bh(bh); if (buffer_mapped(bh) && buffer_dirty(bh)) { lock_buffer(bh); mark_buffer_async_write(bh); @@ -1974,7 +1752,6 @@ recover: submit_bh(WRITE, bh); nr_underway++; } - put_bh(bh); bh = next; } while (bh != head); goto done; @@ -2015,11 +1792,11 @@ static int __block_prepare_write(struct inode *inode, struct page *page, if (buffer_new(bh)) clear_buffer_new(bh); if (!buffer_mapped(bh)) { + WARN_ON(bh->b_size != blocksize); err = get_block(inode, block, bh, 1); if (err) - goto out; + break; if (buffer_new(bh)) { - clear_buffer_new(bh); unmap_underlying_metadata(bh->b_bdev, bh->b_blocknr); if (PageUptodate(page)) { @@ -2059,10 +1836,17 @@ static int __block_prepare_write(struct inode *inode, struct page *page, while(wait_bh > wait) { wait_on_buffer(*--wait_bh); if (!buffer_uptodate(*wait_bh)) - return -EIO; + err = -EIO; } - return 0; -out: + if (!err) { + bh = head; + do { + if (buffer_new(bh)) + clear_buffer_new(bh); + } while ((bh = bh->b_this_page) != head); + return 0; + } + /* Error case: */ /* * Zero out any newly allocated blocks to avoid exposing stale * data. If BH_New is set, we know that the block was newly @@ -2082,6 +1866,7 @@ out: clear_buffer_new(bh); kaddr = kmap_atomic(page, KM_USER0); memset(kaddr+block_start, 0, bh->b_size); + flush_dcache_page(page); kunmap_atomic(kaddr, KM_USER0); set_buffer_uptodate(bh); mark_buffer_dirty(bh); @@ -2143,8 +1928,7 @@ int block_read_full_page(struct page *page, get_block_t *get_block) int nr, i; int fully_mapped = 1; - if (!PageLocked(page)) - PAGE_BUG(page); + BUG_ON(!PageLocked(page)); blocksize = 1 << inode->i_blkbits; if (!page_has_buffers(page)) create_empty_buffers(page, blocksize, 0); @@ -2161,9 +1945,13 @@ int block_read_full_page(struct page *page, get_block_t *get_block) continue; if (!buffer_mapped(bh)) { + int err = 0; + fully_mapped = 0; if (iblock < lblock) { - if (get_block(inode, iblock, bh, 0)) + WARN_ON(bh->b_size != blocksize); + err = get_block(inode, iblock, bh, 0); + if (err) SetPageError(page); } if (!buffer_mapped(bh)) { @@ -2171,7 +1959,8 @@ int block_read_full_page(struct page *page, get_block_t *get_block) memset(kaddr + i * blocksize, 0, blocksize); flush_dcache_page(page); kunmap_atomic(kaddr, KM_USER0); - set_buffer_uptodate(bh); + if (!err) + set_buffer_uptodate(bh); continue; } /* @@ -2224,15 +2013,16 @@ int block_read_full_page(struct page *page, get_block_t *get_block) * truncates. Uses prepare/commit_write to allow the filesystem to * deal with the hole. */ -int generic_cont_expand(struct inode *inode, loff_t size) +static int __generic_cont_expand(struct inode *inode, loff_t size, + pgoff_t index, unsigned int offset) { struct address_space *mapping = inode->i_mapping; struct page *page; - unsigned long index, offset, limit; + unsigned long limit; int err; err = -EFBIG; - limit = current->rlim[RLIMIT_FSIZE].rlim_cur; + limit = current->signal->rlim[RLIMIT_FSIZE].rlim_cur; if (limit != RLIM_INFINITY && size > (loff_t)limit) { send_sig(SIGXFSZ, current, 0); goto out; @@ -2240,24 +2030,24 @@ int generic_cont_expand(struct inode *inode, loff_t size) if (size > inode->i_sb->s_maxbytes) goto out; - offset = (size & (PAGE_CACHE_SIZE-1)); /* Within page */ - - /* ugh. in prepare/commit_write, if from==to==start of block, we - ** skip the prepare. make sure we never send an offset for the start - ** of a block - */ - if ((offset & (inode->i_sb->s_blocksize - 1)) == 0) { - offset++; - } - index = size >> PAGE_CACHE_SHIFT; err = -ENOMEM; page = grab_cache_page(mapping, index); if (!page) goto out; err = mapping->a_ops->prepare_write(NULL, page, offset, offset); - if (!err) { - err = mapping->a_ops->commit_write(NULL, page, offset, offset); + if (err) { + /* + * ->prepare_write() may have instantiated a few blocks + * outside i_size. Trim these off again. + */ + unlock_page(page); + page_cache_release(page); + vmtruncate(inode, inode->i_size); + goto out; } + + err = mapping->a_ops->commit_write(NULL, page, offset, offset); + unlock_page(page); page_cache_release(page); if (err > 0) @@ -2266,6 +2056,36 @@ out: return err; } +int generic_cont_expand(struct inode *inode, loff_t size) +{ + pgoff_t index; + unsigned int offset; + + offset = (size & (PAGE_CACHE_SIZE - 1)); /* Within page */ + + /* ugh. in prepare/commit_write, if from==to==start of block, we + ** skip the prepare. make sure we never send an offset for the start + ** of a block + */ + if ((offset & (inode->i_sb->s_blocksize - 1)) == 0) { + /* caller must handle this extra byte. */ + offset++; + } + index = size >> PAGE_CACHE_SHIFT; + + return __generic_cont_expand(inode, size, index, offset); +} + +int generic_cont_expand_simple(struct inode *inode, loff_t size) +{ + loff_t pos = size - 1; + pgoff_t index = pos >> PAGE_CACHE_SHIFT; + unsigned int offset = (pos & (PAGE_CACHE_SIZE - 1)) + 1; + + /* prepare/commit_write can handle even if from==to==start of block. */ + return __generic_cont_expand(inode, size, index, offset); +} + /* * For moronic filesystems that do not allow holes in file. * We may have to extend the file. @@ -2307,8 +2127,7 @@ int cont_prepare_write(struct page *page, unsigned offset, memset(kaddr+zerofrom, 0, PAGE_CACHE_SIZE-zerofrom); flush_dcache_page(new_page); kunmap_atomic(kaddr, KM_USER0); - __block_commit_write(inode, new_page, - zerofrom, PAGE_CACHE_SIZE); + generic_commit_write(NULL, new_page, zerofrom, PAGE_CACHE_SIZE); unlock_page(new_page); page_cache_release(new_page); } @@ -2378,7 +2197,7 @@ int generic_commit_write(struct file *file, struct page *page, __block_commit_write(inode,page,from,to); /* * No need to use i_size_read() here, the i_size - * cannot change under us because we hold i_sem. + * cannot change under us because we hold i_mutex. */ if (pos > inode->i_size) { i_size_write(inode, pos); @@ -2452,6 +2271,7 @@ int nobh_prepare_write(struct page *page, unsigned from, unsigned to, create = 1; if (block_start >= to) create = 0; + map_bh.b_size = blocksize; ret = get_block(inode, block_in_file + block_in_page, &map_bh, create); if (ret) @@ -2553,6 +2373,7 @@ failed: */ kaddr = kmap_atomic(page, KM_USER0); memset(kaddr, 0, PAGE_CACHE_SIZE); + flush_dcache_page(page); kunmap_atomic(kaddr, KM_USER0); SetPageUptodate(page); set_page_dirty(page); @@ -2575,6 +2396,61 @@ int nobh_commit_write(struct file *file, struct page *page, } EXPORT_SYMBOL(nobh_commit_write); +/* + * nobh_writepage() - based on block_full_write_page() except + * that it tries to operate without attaching bufferheads to + * the page. + */ +int nobh_writepage(struct page *page, get_block_t *get_block, + struct writeback_control *wbc) +{ + struct inode * const inode = page->mapping->host; + loff_t i_size = i_size_read(inode); + const pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT; + unsigned offset; + void *kaddr; + int ret; + + /* Is the page fully inside i_size? */ + if (page->index < end_index) + goto out; + + /* Is the page fully outside i_size? (truncate in progress) */ + offset = i_size & (PAGE_CACHE_SIZE-1); + if (page->index >= end_index+1 || !offset) { + /* + * The page may have dirty, unmapped buffers. For example, + * they may have been added in ext3_writepage(). Make them + * freeable here, so the page does not leak. + */ +#if 0 + /* Not really sure about this - do we need this ? */ + if (page->mapping->a_ops->invalidatepage) + page->mapping->a_ops->invalidatepage(page, offset); +#endif + unlock_page(page); + return 0; /* don't care */ + } + + /* + * The page straddles i_size. It must be zeroed out on each and every + * writepage invocation because it may be mmapped. "A file is mapped + * in multiples of the page size. For a file that is not a multiple of + * the page size, the remaining memory is zeroed when mapped, and + * writes to that region are not written out to the file." + */ + kaddr = kmap_atomic(page, KM_USER0); + memset(kaddr + offset, 0, PAGE_CACHE_SIZE - offset); + flush_dcache_page(page); + kunmap_atomic(kaddr, KM_USER0); +out: + ret = mpage_writepage(page, get_block, wbc); + if (ret == -EAGAIN) + ret = __block_write_full_page(inode, page, get_block, wbc); + return ret; +} +EXPORT_SYMBOL(nobh_writepage); + /* * This function assumes that ->prepare_write() uses nobh_prepare_write(). */ @@ -2586,7 +2462,7 @@ int nobh_truncate_page(struct address_space *mapping, loff_t from) unsigned offset = from & (PAGE_CACHE_SIZE-1); unsigned to; struct page *page; - struct address_space_operations *a_ops = mapping->a_ops; + const struct address_space_operations *a_ops = mapping->a_ops; char *kaddr; int ret = 0; @@ -2620,7 +2496,7 @@ int block_truncate_page(struct address_space *mapping, pgoff_t index = from >> PAGE_CACHE_SHIFT; unsigned offset = from & (PAGE_CACHE_SIZE-1); unsigned blocksize; - pgoff_t iblock; + sector_t iblock; unsigned length, pos; struct inode *inode = mapping->host; struct page *page; @@ -2636,7 +2512,7 @@ int block_truncate_page(struct address_space *mapping, return 0; length = blocksize - length; - iblock = index << (PAGE_CACHE_SHIFT - inode->i_blkbits); + iblock = (sector_t)index << (PAGE_CACHE_SHIFT - inode->i_blkbits); page = grab_cache_page(mapping, index); err = -ENOMEM; @@ -2657,6 +2533,7 @@ int block_truncate_page(struct address_space *mapping, err = 0; if (!buffer_mapped(bh)) { + WARN_ON(bh->b_size != blocksize); err = get_block(inode, iblock, bh, 0); if (err) goto unlock; @@ -2717,7 +2594,7 @@ int block_write_full_page(struct page *page, get_block_t *get_block, * they may have been added in ext3_writepage(). Make them * freeable here, so the page does not leak. */ - block_invalidatepage(page, 0); + do_invalidatepage(page, 0); unlock_page(page); return 0; /* don't care */ } @@ -2743,6 +2620,7 @@ sector_t generic_block_bmap(struct address_space *mapping, sector_t block, struct inode *inode = mapping->host; tmp.b_state = 0; tmp.b_blocknr = 0; + tmp.b_size = 1 << inode->i_blkbits; get_block(inode, block, &tmp, 0); return tmp.b_blocknr; } @@ -2814,21 +2692,22 @@ int submit_bh(int rw, struct buffer_head * bh) /** * ll_rw_block: low-level access to block devices (DEPRECATED) - * @rw: whether to %READ or %WRITE or maybe %READA (readahead) + * @rw: whether to %READ or %WRITE or %SWRITE or maybe %READA (readahead) * @nr: number of &struct buffer_heads in the array * @bhs: array of pointers to &struct buffer_head * - * ll_rw_block() takes an array of pointers to &struct buffer_heads, - * and requests an I/O operation on them, either a %READ or a %WRITE. - * The third %READA option is described in the documentation for - * generic_make_request() which ll_rw_block() calls. + * ll_rw_block() takes an array of pointers to &struct buffer_heads, and + * requests an I/O operation on them, either a %READ or a %WRITE. The third + * %SWRITE is like %WRITE only we make sure that the *current* data in buffers + * are sent to disk. The fourth %READA option is described in the documentation + * for generic_make_request() which ll_rw_block() calls. * * This function drops any buffer that it cannot get a lock on (with the - * BH_Lock state bit), any buffer that appears to be clean when doing a - * write request, and any buffer that appears to be up-to-date when doing - * read request. Further it marks as clean buffers that are processed for - * writing (the buffer cache won't assume that they are actually clean until - * the buffer gets unlocked). + * BH_Lock state bit) unless SWRITE is required, any buffer that appears to be + * clean when doing a write request, and any buffer that appears to be + * up-to-date when doing read request. Further it marks as clean buffers that + * are processed for writing (the buffer cache won't assume that they are + * actually clean until the buffer gets unlocked). * * ll_rw_block sets b_end_io to simple completion handler that marks * the buffer up-to-date (if approriate), unlocks the buffer and wakes @@ -2844,25 +2723,27 @@ void ll_rw_block(int rw, int nr, struct buffer_head *bhs[]) for (i = 0; i < nr; i++) { struct buffer_head *bh = bhs[i]; - if (test_set_buffer_locked(bh)) + if (rw == SWRITE) + lock_buffer(bh); + else if (test_set_buffer_locked(bh)) continue; - get_bh(bh); - if (rw == WRITE) { - bh->b_end_io = end_buffer_write_sync; + if (rw == WRITE || rw == SWRITE) { if (test_clear_buffer_dirty(bh)) { + bh->b_end_io = end_buffer_write_sync; + get_bh(bh); submit_bh(WRITE, bh); continue; } } else { - bh->b_end_io = end_buffer_read_sync; if (!buffer_uptodate(bh)) { + bh->b_end_io = end_buffer_read_sync; + get_bh(bh); submit_bh(rw, bh); continue; } } unlock_buffer(bh); - put_bh(bh); } } @@ -2928,7 +2809,7 @@ drop_buffers(struct page *page, struct buffer_head **buffers_to_free) bh = head; do { - if (buffer_write_io_error(bh)) + if (buffer_write_io_error(bh) && page->mapping) set_bit(AS_EIO, &page->mapping->flags); if (buffer_busy(bh)) goto failed; @@ -2966,17 +2847,23 @@ int try_to_free_buffers(struct page *page) spin_lock(&mapping->private_lock); ret = drop_buffers(page, &buffers_to_free); - if (ret) { - /* - * If the filesystem writes its buffers by hand (eg ext3) - * then we can have clean buffers against a dirty page. We - * clean the page here; otherwise later reattachment of buffers - * could encounter a non-uptodate page, which is unresolvable. - * This only applies in the rare case where try_to_free_buffers - * succeeds but the page is not freed. - */ - clear_page_dirty(page); - } + + /* + * If the filesystem writes its buffers by hand (eg ext3) + * then we can have clean buffers against a dirty page. We + * clean the page here; otherwise the VM will never notice + * that the filesystem did any IO at all. + * + * Also, during truncate, discard_buffer will have marked all + * the page's buffers clean. We discover that here and clean + * the page also. + * + * private_lock must be held over this entire operation in order + * to synchronise against __set_page_dirty_buffers and prevent the + * dirty bit from being lost. + */ + if (ret) + cancel_dirty_page(page, PAGE_CACHE_SIZE); spin_unlock(&mapping->private_lock); out: if (buffers_to_free) { @@ -2992,7 +2879,7 @@ out: } EXPORT_SYMBOL(try_to_free_buffers); -int block_sync_page(struct page *page) +void block_sync_page(struct page *page) { struct address_space *mapping; @@ -3000,7 +2887,6 @@ int block_sync_page(struct page *page) mapping = page_mapping(page); if (mapping) blk_run_backing_dev(mapping->backing_dev_info, page); - return 0; } /* @@ -3033,7 +2919,7 @@ asmlinkage long sys_bdflush(int func, long data) /* * Buffer-head allocation */ -static kmem_cache_t *bh_cachep; +static struct kmem_cache *bh_cachep; /* * Once the number of bh's in the machine exceeds this level, we start @@ -3058,19 +2944,18 @@ static void recalc_bh_state(void) if (__get_cpu_var(bh_accounting).ratelimit++ < 4096) return; __get_cpu_var(bh_accounting).ratelimit = 0; - for_each_cpu(i) + for_each_online_cpu(i) tot += per_cpu(bh_accounting, i).nr; buffer_heads_over_limit = (tot > max_buffer_heads); } -struct buffer_head *alloc_buffer_head(int gfp_flags) +struct buffer_head *alloc_buffer_head(gfp_t gfp_flags) { struct buffer_head *ret = kmem_cache_alloc(bh_cachep, gfp_flags); if (ret) { - preempt_disable(); - __get_cpu_var(bh_accounting).nr++; + get_cpu_var(bh_accounting).nr++; recalc_bh_state(); - preempt_enable(); + put_cpu_var(bh_accounting); } return ret; } @@ -3080,15 +2965,14 @@ void free_buffer_head(struct buffer_head *bh) { BUG_ON(!list_empty(&bh->b_assoc_buffers)); kmem_cache_free(bh_cachep, bh); - preempt_disable(); - __get_cpu_var(bh_accounting).nr--; + get_cpu_var(bh_accounting).nr--; recalc_bh_state(); - preempt_enable(); + put_cpu_var(bh_accounting); } EXPORT_SYMBOL(free_buffer_head); static void -init_buffer_head(void *data, kmem_cache_t *cachep, unsigned long flags) +init_buffer_head(void *data, struct kmem_cache *cachep, unsigned long flags) { if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) == SLAB_CTOR_CONSTRUCTOR) { @@ -3099,7 +2983,6 @@ init_buffer_head(void *data, kmem_cache_t *cachep, unsigned long flags) } } -#ifdef CONFIG_HOTPLUG_CPU static void buffer_exit_cpu(int cpu) { int i; @@ -3109,6 +2992,9 @@ static void buffer_exit_cpu(int cpu) brelse(b->bhs[i]); b->bhs[i] = NULL; } + get_cpu_var(bh_accounting).nr += per_cpu(bh_accounting, cpu).nr; + per_cpu(bh_accounting, cpu).nr = 0; + put_cpu_var(bh_accounting); } static int buffer_cpu_notify(struct notifier_block *self, @@ -3118,18 +3004,17 @@ static int buffer_cpu_notify(struct notifier_block *self, buffer_exit_cpu((unsigned long)hcpu); return NOTIFY_OK; } -#endif /* CONFIG_HOTPLUG_CPU */ void __init buffer_init(void) { - int i; int nrpages; bh_cachep = kmem_cache_create("buffer_head", - sizeof(struct buffer_head), 0, - SLAB_PANIC, init_buffer_head, NULL); - for (i = 0; i < ARRAY_SIZE(bh_wait_queue_heads); i++) - init_waitqueue_head(&bh_wait_queue_heads[i].wqh); + sizeof(struct buffer_head), 0, + (SLAB_RECLAIM_ACCOUNT|SLAB_PANIC| + SLAB_MEM_SPREAD), + init_buffer_head, + NULL); /* * Limit the bh occupancy to 10% of ZONE_NORMAL @@ -3149,7 +3034,6 @@ EXPORT_SYMBOL(block_sync_page); EXPORT_SYMBOL(block_truncate_page); EXPORT_SYMBOL(block_write_full_page); EXPORT_SYMBOL(cont_prepare_write); -EXPORT_SYMBOL(end_buffer_async_write); EXPORT_SYMBOL(end_buffer_read_sync); EXPORT_SYMBOL(end_buffer_write_sync); EXPORT_SYMBOL(file_fsync); @@ -3157,6 +3041,7 @@ EXPORT_SYMBOL(fsync_bdev); EXPORT_SYMBOL(generic_block_bmap); EXPORT_SYMBOL(generic_commit_write); EXPORT_SYMBOL(generic_cont_expand); +EXPORT_SYMBOL(generic_cont_expand_simple); EXPORT_SYMBOL(init_buffer); EXPORT_SYMBOL(invalidate_bdev); EXPORT_SYMBOL(ll_rw_block);