Merge to Fedora kernel-2.6.17-1.2187_FC5 patched with stable patch-2.6.17.13-vs2...
[linux-2.6.git] / fs / buffer.c
index 3c40d63..7f6d659 100644 (file)
@@ -26,6 +26,7 @@
 #include <linux/percpu.h>
 #include <linux/slab.h>
 #include <linux/smp_lock.h>
+#include <linux/capability.h>
 #include <linux/blkdev.h>
 #include <linux/file.h>
 #include <linux/quotaops.h>
@@ -39,6 +40,8 @@
 #include <linux/notifier.h>
 #include <linux/cpu.h>
 #include <linux/bitops.h>
+#include <linux/mpage.h>
+#include <linux/bit_spinlock.h>
 
 static int fsync_buffers_list(spinlock_t *lock, struct list_head *list);
 static void invalidate_bh_lrus(void);
@@ -94,7 +97,7 @@ static void
 __clear_page_buffers(struct page *page)
 {
        ClearPagePrivate(page);
-       page->private = 0;
+       set_page_private(page, 0);
        page_cache_release(page);
 }
 
@@ -151,24 +154,13 @@ int sync_blockdev(struct block_device *bdev)
 {
        int ret = 0;
 
-       if (bdev) {
-               int err;
-
-               ret = filemap_fdatawrite(bdev->bd_inode->i_mapping);
-               err = filemap_fdatawait(bdev->bd_inode->i_mapping);
-               if (!ret)
-                       ret = err;
-       }
+       if (bdev)
+               ret = filemap_write_and_wait(bdev->bd_inode->i_mapping);
        return ret;
 }
 EXPORT_SYMBOL(sync_blockdev);
 
-/*
- * Write out and wait upon all dirty data associated with this
- * superblock.  Filesystem data as well as the underlying block
- * device.  Takes the superblock lock.
- */
-int fsync_super(struct super_block *sb)
+static void __fsync_super(struct super_block *sb)
 {
        sync_inodes_sb(sb, 0);
        DQUOT_SYNC(sb);
@@ -180,7 +172,16 @@ int fsync_super(struct super_block *sb)
                sb->s_op->sync_fs(sb, 1);
        sync_blockdev(sb->s_bdev);
        sync_inodes_sb(sb, 1);
+}
 
+/*
+ * Write out and wait upon all dirty data associated with this
+ * superblock.  Filesystem data as well as the underlying block
+ * device.  Takes the superblock lock.
+ */
+int fsync_super(struct super_block *sb)
+{
+       __fsync_super(sb);
        return sync_blockdev(sb->s_bdev);
 }
 
@@ -204,7 +205,7 @@ int fsync_bdev(struct block_device *bdev)
  * freeze_bdev  --  lock a filesystem and force it into a consistent state
  * @bdev:      blockdevice to lock
  *
- * This takes the block device bd_mount_sem to make sure no new mounts
+ * This takes the block device bd_mount_mutex to make sure no new mounts
  * happen on bdev until thaw_bdev() is called.
  * If a superblock is found on this device, we take the s_umount semaphore
  * on it to make sure nobody unmounts until the snapshot creation is done.
@@ -213,28 +214,16 @@ struct super_block *freeze_bdev(struct block_device *bdev)
 {
        struct super_block *sb;
 
-       down(&bdev->bd_mount_sem);
+       mutex_lock(&bdev->bd_mount_mutex);
        sb = get_super(bdev);
        if (sb && !(sb->s_flags & MS_RDONLY)) {
                sb->s_frozen = SB_FREEZE_WRITE;
-               wmb();
-
-               sync_inodes_sb(sb, 0);
-               DQUOT_SYNC(sb);
-
-               lock_super(sb);
-               if (sb->s_dirt && sb->s_op->write_super)
-                       sb->s_op->write_super(sb);
-               unlock_super(sb);
+               smp_wmb();
 
-               if (sb->s_op->sync_fs)
-                       sb->s_op->sync_fs(sb, 1);
-
-               sync_blockdev(sb->s_bdev);
-               sync_inodes_sb(sb, 1);
+               __fsync_super(sb);
 
                sb->s_frozen = SB_FREEZE_TRANS;
-               wmb();
+               smp_wmb();
 
                sync_blockdev(sb->s_bdev);
 
@@ -262,12 +251,12 @@ void thaw_bdev(struct block_device *bdev, struct super_block *sb)
                if (sb->s_op->unlockfs)
                        sb->s_op->unlockfs(sb);
                sb->s_frozen = SB_UNFROZEN;
-               wmb();
+               smp_wmb();
                wake_up(&sb->s_wait_unfrozen);
                drop_super(sb);
        }
 
-       up(&bdev->bd_mount_sem);
+       mutex_unlock(&bdev->bd_mount_mutex);
 }
 EXPORT_SYMBOL(thaw_bdev);
 
@@ -277,7 +266,7 @@ EXPORT_SYMBOL(thaw_bdev);
  */
 static void do_sync(unsigned long wait)
 {
-       wakeup_bdflush(0);
+       wakeup_pdflush(0);
        sync_inodes(0);         /* All mappings, inodes and their blockdevs */
        DQUOT_SYNC(NULL);
        sync_supers();          /* Write the superblocks */
@@ -330,81 +319,59 @@ int file_fsync(struct file *filp, struct dentry *dentry, int datasync)
        return ret;
 }
 
-asmlinkage long sys_fsync(unsigned int fd)
+long do_fsync(struct file *file, int datasync)
 {
-       struct file * file;
-       struct address_space *mapping;
-       int ret, err;
-
-       ret = -EBADF;
-       file = fget(fd);
-       if (!file)
-               goto out;
-
-       mapping = file->f_mapping;
+       int ret;
+       int err;
+       struct address_space *mapping = file->f_mapping;
 
-       ret = -EINVAL;
        if (!file->f_op || !file->f_op->fsync) {
                /* Why?  We can still call filemap_fdatawrite */
-               goto out_putf;
+               ret = -EINVAL;
+               goto out;
        }
 
        current->flags |= PF_SYNCWRITE;
        ret = filemap_fdatawrite(mapping);
 
        /*
-        * We need to protect against concurrent writers,
-        * which could cause livelocks in fsync_buffers_list
+        * We need to protect against concurrent writers, which could cause
+        * livelocks in fsync_buffers_list().
         */
-       down(&mapping->host->i_sem);
-       err = file->f_op->fsync(file, file->f_dentry, 0);
+       mutex_lock(&mapping->host->i_mutex);
+       err = file->f_op->fsync(file, file->f_dentry, datasync);
        if (!ret)
                ret = err;
-       up(&mapping->host->i_sem);
+       mutex_unlock(&mapping->host->i_mutex);
        err = filemap_fdatawait(mapping);
        if (!ret)
                ret = err;
        current->flags &= ~PF_SYNCWRITE;
-
-out_putf:
-       fput(file);
 out:
        return ret;
 }
 
-asmlinkage long sys_fdatasync(unsigned int fd)
+static long __do_fsync(unsigned int fd, int datasync)
 {
-       struct file * file;
-       struct address_space *mapping;
-       int ret, err;
+       struct file *file;
+       int ret = -EBADF;
 
-       ret = -EBADF;
        file = fget(fd);
-       if (!file)
-               goto out;
-
-       ret = -EINVAL;
-       if (!file->f_op || !file->f_op->fsync)
-               goto out_putf;
-
-       mapping = file->f_mapping;
+       if (file) {
+               ret = do_fsync(file, datasync);
+               fput(file);
+       }
+       return ret;
+}
 
-       current->flags |= PF_SYNCWRITE;
-       ret = filemap_fdatawrite(mapping);
-       down(&mapping->host->i_sem);
-       err = file->f_op->fsync(file, file->f_dentry, 1);
-       if (!ret)
-               ret = err;
-       up(&mapping->host->i_sem);
-       err = filemap_fdatawait(mapping);
-       if (!ret)
-               ret = err;
-       current->flags &= ~PF_SYNCWRITE;
+asmlinkage long sys_fsync(unsigned int fd)
+{
+       return __do_fsync(fd, 0);
+}
 
-out_putf:
-       fput(file);
-out:
-       return ret;
+asmlinkage long sys_fdatasync(unsigned int fd)
+{
+       return __do_fsync(fd, 1);
 }
 
 /*
@@ -419,7 +386,7 @@ out:
  * private_lock is contended then so is mapping->tree_lock).
  */
 static struct buffer_head *
-__find_get_block_slow(struct block_device *bdev, sector_t block, int unused)
+__find_get_block_slow(struct block_device *bdev, sector_t block)
 {
        struct inode *bd_inode = bdev->bd_inode;
        struct address_space *bd_mapping = bd_inode->i_mapping;
@@ -459,8 +426,10 @@ __find_get_block_slow(struct block_device *bdev, sector_t block, int unused)
        if (all_mapped) {
                printk("__find_get_block_slow() failed. "
                        "block=%llu, b_blocknr=%llu\n",
-                       (unsigned long long)block, (unsigned long long)bh->b_blocknr);
-               printk("b_state=0x%08lx, b_size=%u\n", bh->b_state, bh->b_size);
+                       (unsigned long long)block,
+                       (unsigned long long)bh->b_blocknr);
+               printk("b_state=0x%08lx, b_size=%zu\n",
+                       bh->b_state, bh->b_size);
                printk("device blocksize: %d\n", 1 << bd_inode->i_blkbits);
        }
 out_unlock:
@@ -504,13 +473,18 @@ out:
    pass does the actual I/O. */
 void invalidate_bdev(struct block_device *bdev, int destroy_dirty_buffers)
 {
+       struct address_space *mapping = bdev->bd_inode->i_mapping;
+
+       if (mapping->nrpages == 0)
+               return;
+
        invalidate_bh_lrus();
        /*
         * FIXME: what about destroy_dirty_buffers?
         * We really want to use invalidate_inode_pages2() for
         * that, but not until that's cleaned up.
         */
-       invalidate_inode_pages(bdev->bd_inode->i_mapping);
+       invalidate_inode_pages(mapping);
 }
 
 /*
@@ -521,13 +495,13 @@ static void free_more_memory(void)
        struct zone **zones;
        pg_data_t *pgdat;
 
-       wakeup_bdflush(1024);
+       wakeup_pdflush(1024);
        yield();
 
-       for_each_pgdat(pgdat) {
-               zones = pgdat->node_zonelists[GFP_NOFS&GFP_ZONEMASK].zones;
+       for_each_online_pgdat(pgdat) {
+               zones = pgdat->node_zonelists[gfp_zone(GFP_NOFS)].zones;
                if (*zones)
-                       try_to_free_pages(zones, GFP_NOFS, 0);
+                       try_to_free_pages(zones, GFP_NOFS);
        }
 }
 
@@ -537,8 +511,8 @@ static void free_more_memory(void)
  */
 static void end_buffer_async_read(struct buffer_head *bh, int uptodate)
 {
-       static DEFINE_SPINLOCK(page_uptodate_lock);
        unsigned long flags;
+       struct buffer_head *first;
        struct buffer_head *tmp;
        struct page *page;
        int page_uptodate = 1;
@@ -560,7 +534,9 @@ static void end_buffer_async_read(struct buffer_head *bh, int uptodate)
         * two buffer heads end IO at almost the same time and both
         * decide that the page is now completely done.
         */
-       spin_lock_irqsave(&page_uptodate_lock, flags);
+       first = page_buffers(page);
+       local_irq_save(flags);
+       bit_spin_lock(BH_Uptodate_Lock, &first->b_state);
        clear_buffer_async_read(bh);
        unlock_buffer(bh);
        tmp = bh;
@@ -573,7 +549,8 @@ static void end_buffer_async_read(struct buffer_head *bh, int uptodate)
                }
                tmp = tmp->b_this_page;
        } while (tmp != bh);
-       spin_unlock_irqrestore(&page_uptodate_lock, flags);
+       bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
+       local_irq_restore(flags);
 
        /*
         * If none of the buffers had errors and they are all
@@ -585,7 +562,8 @@ static void end_buffer_async_read(struct buffer_head *bh, int uptodate)
        return;
 
 still_busy:
-       spin_unlock_irqrestore(&page_uptodate_lock, flags);
+       bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
+       local_irq_restore(flags);
        return;
 }
 
@@ -596,8 +574,8 @@ still_busy:
 void end_buffer_async_write(struct buffer_head *bh, int uptodate)
 {
        char b[BDEVNAME_SIZE];
-       static DEFINE_SPINLOCK(page_uptodate_lock);
        unsigned long flags;
+       struct buffer_head *first;
        struct buffer_head *tmp;
        struct page *page;
 
@@ -618,7 +596,10 @@ void end_buffer_async_write(struct buffer_head *bh, int uptodate)
                SetPageError(page);
        }
 
-       spin_lock_irqsave(&page_uptodate_lock, flags);
+       first = page_buffers(page);
+       local_irq_save(flags);
+       bit_spin_lock(BH_Uptodate_Lock, &first->b_state);
+
        clear_buffer_async_write(bh);
        unlock_buffer(bh);
        tmp = bh->b_this_page;
@@ -629,12 +610,14 @@ void end_buffer_async_write(struct buffer_head *bh, int uptodate)
                }
                tmp = tmp->b_this_page;
        }
-       spin_unlock_irqrestore(&page_uptodate_lock, flags);
+       bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
+       local_irq_restore(flags);
        end_page_writeback(page);
        return;
 
 still_busy:
-       spin_unlock_irqrestore(&page_uptodate_lock, flags);
+       bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
+       local_irq_restore(flags);
        return;
 }
 
@@ -773,15 +756,14 @@ repeat:
 /**
  * sync_mapping_buffers - write out and wait upon a mapping's "associated"
  *                        buffers
- * @buffer_mapping - the mapping which backs the buffers' data
- * @mapping - the mapping which wants those buffers written
+ * @mapping: the mapping which wants those buffers written
  *
  * Starts I/O against the buffers at mapping->private_list, and waits upon
  * that I/O.
  *
- * Basically, this is a convenience function for fsync().  @buffer_mapping is
- * the blockdev which "owns" the buffers and @mapping is a file or directory
- * which needs those buffers to be written for a successful fsync().
+ * Basically, this is a convenience function for fsync().
+ * @mapping is a file or directory which needs those buffers to be written for
+ * a successful fsync().
  */
 int sync_mapping_buffers(struct address_space *mapping)
 {
@@ -821,8 +803,7 @@ void mark_buffer_dirty_inode(struct buffer_head *bh, struct inode *inode)
        if (!mapping->assoc_mapping) {
                mapping->assoc_mapping = buffer_mapping;
        } else {
-               if (mapping->assoc_mapping != buffer_mapping)
-                       BUG();
+               BUG_ON(mapping->assoc_mapping != buffer_mapping);
        }
        if (list_empty(&bh->b_assoc_buffers)) {
                spin_lock(&buffer_mapping->private_lock);
@@ -875,18 +856,18 @@ int __set_page_dirty_buffers(struct page *page)
        spin_unlock(&mapping->private_lock);
 
        if (!TestSetPageDirty(page)) {
-               spin_lock_irq(&mapping->tree_lock);
+               write_lock_irq(&mapping->tree_lock);
                if (page->mapping) {    /* Race with truncate? */
-                       if (!mapping->backing_dev_info->memory_backed)
+                       if (mapping_cap_account_dirty(mapping))
                                inc_page_state(nr_dirty);
                        radix_tree_tag_set(&mapping->page_tree,
                                                page_index(page),
                                                PAGECACHE_TAG_DIRTY);
                }
-               spin_unlock_irq(&mapping->tree_lock);
+               write_unlock_irq(&mapping->tree_lock);
                __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
+               return 1;
        }
-       
        return 0;
 }
 EXPORT_SYMBOL(__set_page_dirty_buffers);
@@ -933,8 +914,7 @@ static int fsync_buffers_list(spinlock_t *lock, struct list_head *list)
                                 * contents - it is a noop if I/O is still in
                                 * flight on potentially older contents.
                                 */
-                               wait_on_buffer(bh);
-                               ll_rw_block(WRITE, 1, &bh);
+                               ll_rw_block(SWRITE, 1, &bh);
                                brelse(bh);
                                spin_lock(lock);
                        }
@@ -1043,12 +1023,13 @@ try_again:
 
                bh->b_state = 0;
                atomic_set(&bh->b_count, 0);
+               bh->b_private = NULL;
                bh->b_size = size;
 
                /* Link the buffer to its page */
                set_bh_page(bh, page, offset);
 
-               bh->b_end_io = NULL;
+               init_buffer(bh, NULL, NULL);
        }
        return head;
 /*
@@ -1139,8 +1120,7 @@ grow_dev_page(struct block_device *bdev, sector_t block,
        if (!page)
                return NULL;
 
-       if (!PageLocked(page))
-               BUG();
+       BUG_ON(!PageLocked(page));
 
        if (page_has_buffers(page)) {
                bh = page_buffers(page);
@@ -1186,7 +1166,7 @@ failed:
  * some of those buffers may be aliases of filesystem data.
  * grow_dev_page() will go BUG() if this happens.
  */
-static inline int
+static int
 grow_buffers(struct block_device *bdev, sector_t block, int size)
 {
        struct page *page;
@@ -1210,7 +1190,7 @@ grow_buffers(struct block_device *bdev, sector_t block, int size)
        return 1;
 }
 
-struct buffer_head *
+static struct buffer_head *
 __getblk_slow(struct block_device *bdev, sector_t block, int size)
 {
        /* Size must be multiple of hard sectorsize */
@@ -1262,6 +1242,7 @@ __getblk_slow(struct block_device *bdev, sector_t block, int size)
 
 /**
  * mark_buffer_dirty - mark a buffer_head as needing writeout
+ * @bh: the buffer_head to mark dirty
  *
  * mark_buffer_dirty() will set the dirty bit against the buffer, then set its
  * backing page dirty, then tag the page as dirty in its address_space's radix
@@ -1411,7 +1392,7 @@ static void bh_lru_install(struct buffer_head *bh)
 /*
  * Look up the bh in this cpu's LRU.  If it's there, move it to the head.
  */
-static inline struct buffer_head *
+static struct buffer_head *
 lookup_bh_lru(struct block_device *bdev, sector_t block, int size)
 {
        struct buffer_head *ret = NULL;
@@ -1453,7 +1434,7 @@ __find_get_block(struct block_device *bdev, sector_t block, int size)
        struct buffer_head *bh = lookup_bh_lru(bdev, block, size);
 
        if (bh == NULL) {
-               bh = __find_get_block_slow(bdev, block, size);
+               bh = __find_get_block_slow(bdev, block);
                if (bh)
                        bh_lru_install(bh);
        }
@@ -1493,13 +1474,16 @@ EXPORT_SYMBOL(__getblk);
 void __breadahead(struct block_device *bdev, sector_t block, int size)
 {
        struct buffer_head *bh = __getblk(bdev, block, size);
-       ll_rw_block(READA, 1, &bh);
-       brelse(bh);
+       if (likely(bh)) {
+               ll_rw_block(READA, 1, &bh);
+               brelse(bh);
+       }
 }
 EXPORT_SYMBOL(__breadahead);
 
 /**
  *  __bread() - reads a specified block and returns the bh
+ *  @bdev: the block_device to read from
  *  @block: number of block
  *  @size: size (in bytes) to read
  * 
@@ -1511,7 +1495,7 @@ __bread(struct block_device *bdev, sector_t block, int size)
 {
        struct buffer_head *bh = __getblk(bdev, block, size);
 
-       if (!buffer_uptodate(bh))
+       if (likely(bh) && !buffer_uptodate(bh))
                bh = __bread_slow(bh);
        return bh;
 }
@@ -1543,8 +1527,7 @@ void set_bh_page(struct buffer_head *bh,
                struct page *page, unsigned long offset)
 {
        bh->b_page = page;
-       if (offset >= PAGE_SIZE)
-               BUG();
+       BUG_ON(offset >= PAGE_SIZE);
        if (PageHighMem(page))
                /*
                 * This catches illegal uses and preserves the offset:
@@ -1558,7 +1541,7 @@ EXPORT_SYMBOL(set_bh_page);
 /*
  * Called when truncating a buffer on a page completely.
  */
-static inline void discard_buffer(struct buffer_head * bh)
+static void discard_buffer(struct buffer_head * bh)
 {
        lock_buffer(bh);
        clear_buffer_dirty(bh);
@@ -1585,7 +1568,7 @@ static inline void discard_buffer(struct buffer_head * bh)
  *
  * NOTE: @gfp_mask may go away, and this function may become non-blocking.
  */
-int try_to_release_page(struct page *page, int gfp_mask)
+int try_to_release_page(struct page *page, gfp_t gfp_mask)
 {
        struct address_space * const mapping = page->mapping;
 
@@ -1614,11 +1597,10 @@ EXPORT_SYMBOL(try_to_release_page);
  * point.  Because the caller is about to free (and possibly reuse) those
  * blocks on-disk.
  */
-int block_invalidatepage(struct page *page, unsigned long offset)
+void block_invalidatepage(struct page *page, unsigned long offset)
 {
        struct buffer_head *head, *bh, *next;
        unsigned int curr_off = 0;
-       int ret = 1;
 
        BUG_ON(!PageLocked(page));
        if (!page_has_buffers(page))
@@ -1645,12 +1627,20 @@ int block_invalidatepage(struct page *page, unsigned long offset)
         * so real IO is not possible anymore.
         */
        if (offset == 0)
-               ret = try_to_release_page(page, 0);
+               try_to_release_page(page, 0);
 out:
-       return ret;
+       return;
 }
 EXPORT_SYMBOL(block_invalidatepage);
 
+void do_invalidatepage(struct page *page, unsigned long offset)
+{
+       void (*invalidatepage)(struct page *, unsigned long);
+       invalidatepage = page->mapping->a_ops->invalidatepage ? :
+               block_invalidatepage;
+       (*invalidatepage)(page, offset);
+}
+
 /*
  * We attach and possibly dirty the buffers atomically wrt
  * __set_page_dirty_buffers() via private_lock.  try_to_free_buffers
@@ -1708,7 +1698,7 @@ void unmap_underlying_metadata(struct block_device *bdev, sector_t block)
 
        might_sleep();
 
-       old_bh = __find_get_block_slow(bdev, block, 0);
+       old_bh = __find_get_block_slow(bdev, block);
        if (old_bh) {
                clear_buffer_dirty(old_bh);
                wait_on_buffer(old_bh);
@@ -1750,6 +1740,7 @@ static int __block_write_full_page(struct inode *inode, struct page *page,
        sector_t block;
        sector_t last_block;
        struct buffer_head *bh, *head;
+       const unsigned blocksize = 1 << inode->i_blkbits;
        int nr_underway = 0;
 
        BUG_ON(!PageLocked(page));
@@ -1757,7 +1748,7 @@ static int __block_write_full_page(struct inode *inode, struct page *page,
        last_block = (i_size_read(inode) - 1) >> inode->i_blkbits;
 
        if (!page_has_buffers(page)) {
-               create_empty_buffers(page, 1 << inode->i_blkbits,
+               create_empty_buffers(page, blocksize,
                                        (1 << BH_Dirty)|(1 << BH_Uptodate));
        }
 
@@ -1771,7 +1762,7 @@ static int __block_write_full_page(struct inode *inode, struct page *page,
         * handle that here by just cleaning them.
         */
 
-       block = page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
+       block = (sector_t)page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
        head = page_buffers(page);
        bh = head;
 
@@ -1792,6 +1783,7 @@ static int __block_write_full_page(struct inode *inode, struct page *page,
                        clear_buffer_dirty(bh);
                        set_buffer_uptodate(bh);
                } else if (!buffer_mapped(bh) && buffer_dirty(bh)) {
+                       WARN_ON(bh->b_size != blocksize);
                        err = get_block(inode, block, bh, 1);
                        if (err)
                                goto recover;
@@ -1807,7 +1799,6 @@ static int __block_write_full_page(struct inode *inode, struct page *page,
        } while (bh != head);
 
        do {
-               get_bh(bh);
                if (!buffer_mapped(bh))
                        continue;
                /*
@@ -1836,7 +1827,6 @@ static int __block_write_full_page(struct inode *inode, struct page *page,
         */
        BUG_ON(PageWriteback(page));
        set_page_writeback(page);
-       unlock_page(page);
 
        do {
                struct buffer_head *next = bh->b_this_page;
@@ -1844,9 +1834,9 @@ static int __block_write_full_page(struct inode *inode, struct page *page,
                        submit_bh(WRITE, bh);
                        nr_underway++;
                }
-               put_bh(bh);
                bh = next;
        } while (bh != head);
+       unlock_page(page);
 
        err = 0;
 done:
@@ -1885,7 +1875,6 @@ recover:
        bh = head;
        /* Recovery: lock and submit the mapped buffers */
        do {
-               get_bh(bh);
                if (buffer_mapped(bh) && buffer_dirty(bh)) {
                        lock_buffer(bh);
                        mark_buffer_async_write(bh);
@@ -1908,7 +1897,6 @@ recover:
                        submit_bh(WRITE, bh);
                        nr_underway++;
                }
-               put_bh(bh);
                bh = next;
        } while (bh != head);
        goto done;
@@ -1949,11 +1937,11 @@ static int __block_prepare_write(struct inode *inode, struct page *page,
                if (buffer_new(bh))
                        clear_buffer_new(bh);
                if (!buffer_mapped(bh)) {
+                       WARN_ON(bh->b_size != blocksize);
                        err = get_block(inode, block, bh, 1);
                        if (err)
-                               goto out;
+                               break;
                        if (buffer_new(bh)) {
-                               clear_buffer_new(bh);
                                unmap_underlying_metadata(bh->b_bdev,
                                                        bh->b_blocknr);
                                if (PageUptodate(page)) {
@@ -1993,10 +1981,17 @@ static int __block_prepare_write(struct inode *inode, struct page *page,
        while(wait_bh > wait) {
                wait_on_buffer(*--wait_bh);
                if (!buffer_uptodate(*wait_bh))
-                       return -EIO;
+                       err = -EIO;
        }
-       return 0;
-out:
+       if (!err) {
+               bh = head;
+               do {
+                       if (buffer_new(bh))
+                               clear_buffer_new(bh);
+               } while ((bh = bh->b_this_page) != head);
+               return 0;
+       }
+       /* Error case: */
        /*
         * Zero out any newly allocated blocks to avoid exposing stale
         * data.  If BH_New is set, we know that the block was newly
@@ -2077,8 +2072,7 @@ int block_read_full_page(struct page *page, get_block_t *get_block)
        int nr, i;
        int fully_mapped = 1;
 
-       if (!PageLocked(page))
-               PAGE_BUG(page);
+       BUG_ON(!PageLocked(page));
        blocksize = 1 << inode->i_blkbits;
        if (!page_has_buffers(page))
                create_empty_buffers(page, blocksize, 0);
@@ -2095,9 +2089,13 @@ int block_read_full_page(struct page *page, get_block_t *get_block)
                        continue;
 
                if (!buffer_mapped(bh)) {
+                       int err = 0;
+
                        fully_mapped = 0;
                        if (iblock < lblock) {
-                               if (get_block(inode, iblock, bh, 0))
+                               WARN_ON(bh->b_size != blocksize);
+                               err = get_block(inode, iblock, bh, 0);
+                               if (err)
                                        SetPageError(page);
                        }
                        if (!buffer_mapped(bh)) {
@@ -2105,7 +2103,8 @@ int block_read_full_page(struct page *page, get_block_t *get_block)
                                memset(kaddr + i * blocksize, 0, blocksize);
                                flush_dcache_page(page);
                                kunmap_atomic(kaddr, KM_USER0);
-                               set_buffer_uptodate(bh);
+                               if (!err)
+                                       set_buffer_uptodate(bh);
                                continue;
                        }
                        /*
@@ -2158,11 +2157,12 @@ int block_read_full_page(struct page *page, get_block_t *get_block)
  * truncates.  Uses prepare/commit_write to allow the filesystem to
  * deal with the hole.  
  */
-int generic_cont_expand(struct inode *inode, loff_t size)
+static int __generic_cont_expand(struct inode *inode, loff_t size,
+                                pgoff_t index, unsigned int offset)
 {
        struct address_space *mapping = inode->i_mapping;
        struct page *page;
-       unsigned long index, offset, limit;
+       unsigned long limit;
        int err;
 
        err = -EFBIG;
@@ -2174,24 +2174,24 @@ int generic_cont_expand(struct inode *inode, loff_t size)
        if (size > inode->i_sb->s_maxbytes)
                goto out;
 
-       offset = (size & (PAGE_CACHE_SIZE-1)); /* Within page */
-
-       /* ugh.  in prepare/commit_write, if from==to==start of block, we 
-       ** skip the prepare.  make sure we never send an offset for the start
-       ** of a block
-       */
-       if ((offset & (inode->i_sb->s_blocksize - 1)) == 0) {
-               offset++;
-       }
-       index = size >> PAGE_CACHE_SHIFT;
        err = -ENOMEM;
        page = grab_cache_page(mapping, index);
        if (!page)
                goto out;
        err = mapping->a_ops->prepare_write(NULL, page, offset, offset);
-       if (!err) {
-               err = mapping->a_ops->commit_write(NULL, page, offset, offset);
+       if (err) {
+               /*
+                * ->prepare_write() may have instantiated a few blocks
+                * outside i_size.  Trim these off again.
+                */
+               unlock_page(page);
+               page_cache_release(page);
+               vmtruncate(inode, inode->i_size);
+               goto out;
        }
+
+       err = mapping->a_ops->commit_write(NULL, page, offset, offset);
+
        unlock_page(page);
        page_cache_release(page);
        if (err > 0)
@@ -2200,6 +2200,36 @@ out:
        return err;
 }
 
+int generic_cont_expand(struct inode *inode, loff_t size)
+{
+       pgoff_t index;
+       unsigned int offset;
+
+       offset = (size & (PAGE_CACHE_SIZE - 1)); /* Within page */
+
+       /* ugh.  in prepare/commit_write, if from==to==start of block, we
+       ** skip the prepare.  make sure we never send an offset for the start
+       ** of a block
+       */
+       if ((offset & (inode->i_sb->s_blocksize - 1)) == 0) {
+               /* caller must handle this extra byte. */
+               offset++;
+       }
+       index = size >> PAGE_CACHE_SHIFT;
+
+       return __generic_cont_expand(inode, size, index, offset);
+}
+
+int generic_cont_expand_simple(struct inode *inode, loff_t size)
+{
+       loff_t pos = size - 1;
+       pgoff_t index = pos >> PAGE_CACHE_SHIFT;
+       unsigned int offset = (pos & (PAGE_CACHE_SIZE - 1)) + 1;
+
+       /* prepare/commit_write can handle even if from==to==start of block. */
+       return __generic_cont_expand(inode, size, index, offset);
+}
+
 /*
  * For moronic filesystems that do not allow holes in file.
  * We may have to extend the file.
@@ -2311,7 +2341,7 @@ int generic_commit_write(struct file *file, struct page *page,
        __block_commit_write(inode,page,from,to);
        /*
         * No need to use i_size_read() here, the i_size
-        * cannot change under us because we hold i_sem.
+        * cannot change under us because we hold i_mutex.
         */
        if (pos > inode->i_size) {
                i_size_write(inode, pos);
@@ -2385,6 +2415,7 @@ int nobh_prepare_write(struct page *page, unsigned from, unsigned to,
                create = 1;
                if (block_start >= to)
                        create = 0;
+               map_bh.b_size = blocksize;
                ret = get_block(inode, block_in_file + block_in_page,
                                        &map_bh, create);
                if (ret)
@@ -2508,6 +2539,61 @@ int nobh_commit_write(struct file *file, struct page *page,
 }
 EXPORT_SYMBOL(nobh_commit_write);
 
+/*
+ * nobh_writepage() - based on block_full_write_page() except
+ * that it tries to operate without attaching bufferheads to
+ * the page.
+ */
+int nobh_writepage(struct page *page, get_block_t *get_block,
+                       struct writeback_control *wbc)
+{
+       struct inode * const inode = page->mapping->host;
+       loff_t i_size = i_size_read(inode);
+       const pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
+       unsigned offset;
+       void *kaddr;
+       int ret;
+
+       /* Is the page fully inside i_size? */
+       if (page->index < end_index)
+               goto out;
+
+       /* Is the page fully outside i_size? (truncate in progress) */
+       offset = i_size & (PAGE_CACHE_SIZE-1);
+       if (page->index >= end_index+1 || !offset) {
+               /*
+                * The page may have dirty, unmapped buffers.  For example,
+                * they may have been added in ext3_writepage().  Make them
+                * freeable here, so the page does not leak.
+                */
+#if 0
+               /* Not really sure about this  - do we need this ? */
+               if (page->mapping->a_ops->invalidatepage)
+                       page->mapping->a_ops->invalidatepage(page, offset);
+#endif
+               unlock_page(page);
+               return 0; /* don't care */
+       }
+
+       /*
+        * The page straddles i_size.  It must be zeroed out on each and every
+        * writepage invocation because it may be mmapped.  "A file is mapped
+        * in multiples of the page size.  For a file that is not a multiple of
+        * the  page size, the remaining memory is zeroed when mapped, and
+        * writes to that region are not written out to the file."
+        */
+       kaddr = kmap_atomic(page, KM_USER0);
+       memset(kaddr + offset, 0, PAGE_CACHE_SIZE - offset);
+       flush_dcache_page(page);
+       kunmap_atomic(kaddr, KM_USER0);
+out:
+       ret = mpage_writepage(page, get_block, wbc);
+       if (ret == -EAGAIN)
+               ret = __block_write_full_page(inode, page, get_block, wbc);
+       return ret;
+}
+EXPORT_SYMBOL(nobh_writepage);
+
 /*
  * This function assumes that ->prepare_write() uses nobh_prepare_write().
  */
@@ -2553,7 +2639,7 @@ int block_truncate_page(struct address_space *mapping,
        pgoff_t index = from >> PAGE_CACHE_SHIFT;
        unsigned offset = from & (PAGE_CACHE_SIZE-1);
        unsigned blocksize;
-       pgoff_t iblock;
+       sector_t iblock;
        unsigned length, pos;
        struct inode *inode = mapping->host;
        struct page *page;
@@ -2569,7 +2655,7 @@ int block_truncate_page(struct address_space *mapping,
                return 0;
 
        length = blocksize - length;
-       iblock = index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
+       iblock = (sector_t)index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
        
        page = grab_cache_page(mapping, index);
        err = -ENOMEM;
@@ -2590,6 +2676,7 @@ int block_truncate_page(struct address_space *mapping,
 
        err = 0;
        if (!buffer_mapped(bh)) {
+               WARN_ON(bh->b_size != blocksize);
                err = get_block(inode, iblock, bh, 0);
                if (err)
                        goto unlock;
@@ -2650,7 +2737,7 @@ int block_write_full_page(struct page *page, get_block_t *get_block,
                 * they may have been added in ext3_writepage().  Make them
                 * freeable here, so the page does not leak.
                 */
-               block_invalidatepage(page, 0);
+               do_invalidatepage(page, 0);
                unlock_page(page);
                return 0; /* don't care */
        }
@@ -2676,6 +2763,7 @@ sector_t generic_block_bmap(struct address_space *mapping, sector_t block,
        struct inode *inode = mapping->host;
        tmp.b_state = 0;
        tmp.b_blocknr = 0;
+       tmp.b_size = 1 << inode->i_blkbits;
        get_block(inode, block, &tmp, 0);
        return tmp.b_blocknr;
 }
@@ -2747,21 +2835,22 @@ int submit_bh(int rw, struct buffer_head * bh)
 
 /**
  * ll_rw_block: low-level access to block devices (DEPRECATED)
- * @rw: whether to %READ or %WRITE or maybe %READA (readahead)
+ * @rw: whether to %READ or %WRITE or %SWRITE or maybe %READA (readahead)
  * @nr: number of &struct buffer_heads in the array
  * @bhs: array of pointers to &struct buffer_head
  *
- * ll_rw_block() takes an array of pointers to &struct buffer_heads,
- * and requests an I/O operation on them, either a %READ or a %WRITE.
- * The third %READA option is described in the documentation for
- * generic_make_request() which ll_rw_block() calls.
+ * ll_rw_block() takes an array of pointers to &struct buffer_heads, and
+ * requests an I/O operation on them, either a %READ or a %WRITE.  The third
+ * %SWRITE is like %WRITE only we make sure that the *current* data in buffers
+ * are sent to disk. The fourth %READA option is described in the documentation
+ * for generic_make_request() which ll_rw_block() calls.
  *
  * This function drops any buffer that it cannot get a lock on (with the
- * BH_Lock state bit), any buffer that appears to be clean when doing a
- * write request, and any buffer that appears to be up-to-date when doing
- * read request.  Further it marks as clean buffers that are processed for
- * writing (the buffer cache won't assume that they are actually clean until
- * the buffer gets unlocked).
+ * BH_Lock state bit) unless SWRITE is required, any buffer that appears to be
+ * clean when doing a write request, and any buffer that appears to be
+ * up-to-date when doing read request.  Further it marks as clean buffers that
+ * are processed for writing (the buffer cache won't assume that they are
+ * actually clean until the buffer gets unlocked).
  *
  * ll_rw_block sets b_end_io to simple completion handler that marks
  * the buffer up-to-date (if approriate), unlocks the buffer and wakes
@@ -2777,25 +2866,27 @@ void ll_rw_block(int rw, int nr, struct buffer_head *bhs[])
        for (i = 0; i < nr; i++) {
                struct buffer_head *bh = bhs[i];
 
-               if (test_set_buffer_locked(bh))
+               if (rw == SWRITE)
+                       lock_buffer(bh);
+               else if (test_set_buffer_locked(bh))
                        continue;
 
-               get_bh(bh);
-               if (rw == WRITE) {
-                       bh->b_end_io = end_buffer_write_sync;
+               if (rw == WRITE || rw == SWRITE) {
                        if (test_clear_buffer_dirty(bh)) {
+                               bh->b_end_io = end_buffer_write_sync;
+                               get_bh(bh);
                                submit_bh(WRITE, bh);
                                continue;
                        }
                } else {
-                       bh->b_end_io = end_buffer_read_sync;
                        if (!buffer_uptodate(bh)) {
+                               bh->b_end_io = end_buffer_read_sync;
+                               get_bh(bh);
                                submit_bh(rw, bh);
                                continue;
                        }
                }
                unlock_buffer(bh);
-               put_bh(bh);
        }
 }
 
@@ -2861,7 +2952,7 @@ drop_buffers(struct page *page, struct buffer_head **buffers_to_free)
 
        bh = head;
        do {
-               if (buffer_write_io_error(bh))
+               if (buffer_write_io_error(bh) && page->mapping)
                        set_bit(AS_EIO, &page->mapping->flags);
                if (buffer_busy(bh))
                        goto failed;
@@ -2925,7 +3016,7 @@ out:
 }
 EXPORT_SYMBOL(try_to_free_buffers);
 
-int block_sync_page(struct page *page)
+void block_sync_page(struct page *page)
 {
        struct address_space *mapping;
 
@@ -2933,7 +3024,6 @@ int block_sync_page(struct page *page)
        mapping = page_mapping(page);
        if (mapping)
                blk_run_backing_dev(mapping->backing_dev_info, page);
-       return 0;
 }
 
 /*
@@ -2991,19 +3081,18 @@ static void recalc_bh_state(void)
        if (__get_cpu_var(bh_accounting).ratelimit++ < 4096)
                return;
        __get_cpu_var(bh_accounting).ratelimit = 0;
-       for_each_cpu(i)
+       for_each_online_cpu(i)
                tot += per_cpu(bh_accounting, i).nr;
        buffer_heads_over_limit = (tot > max_buffer_heads);
 }
        
-struct buffer_head *alloc_buffer_head(int gfp_flags)
+struct buffer_head *alloc_buffer_head(gfp_t gfp_flags)
 {
        struct buffer_head *ret = kmem_cache_alloc(bh_cachep, gfp_flags);
        if (ret) {
-               preempt_disable();
-               __get_cpu_var(bh_accounting).nr++;
+               get_cpu_var(bh_accounting).nr++;
                recalc_bh_state();
-               preempt_enable();
+               put_cpu_var(bh_accounting);
        }
        return ret;
 }
@@ -3013,10 +3102,9 @@ void free_buffer_head(struct buffer_head *bh)
 {
        BUG_ON(!list_empty(&bh->b_assoc_buffers));
        kmem_cache_free(bh_cachep, bh);
-       preempt_disable();
-       __get_cpu_var(bh_accounting).nr--;
+       get_cpu_var(bh_accounting).nr--;
        recalc_bh_state();
-       preempt_enable();
+       put_cpu_var(bh_accounting);
 }
 EXPORT_SYMBOL(free_buffer_head);
 
@@ -3042,6 +3130,9 @@ static void buffer_exit_cpu(int cpu)
                brelse(b->bhs[i]);
                b->bhs[i] = NULL;
        }
+       get_cpu_var(bh_accounting).nr += per_cpu(bh_accounting, cpu).nr;
+       per_cpu(bh_accounting, cpu).nr = 0;
+       put_cpu_var(bh_accounting);
 }
 
 static int buffer_cpu_notify(struct notifier_block *self,
@@ -3058,8 +3149,11 @@ void __init buffer_init(void)
        int nrpages;
 
        bh_cachep = kmem_cache_create("buffer_head",
-                       sizeof(struct buffer_head), 0,
-                       SLAB_PANIC, init_buffer_head, NULL);
+                                       sizeof(struct buffer_head), 0,
+                                       (SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|
+                                       SLAB_MEM_SPREAD),
+                                       init_buffer_head,
+                                       NULL);
 
        /*
         * Limit the bh occupancy to 10% of ZONE_NORMAL
@@ -3087,6 +3181,7 @@ EXPORT_SYMBOL(fsync_bdev);
 EXPORT_SYMBOL(generic_block_bmap);
 EXPORT_SYMBOL(generic_commit_write);
 EXPORT_SYMBOL(generic_cont_expand);
+EXPORT_SYMBOL(generic_cont_expand_simple);
 EXPORT_SYMBOL(init_buffer);
 EXPORT_SYMBOL(invalidate_bdev);
 EXPORT_SYMBOL(ll_rw_block);