vserver 2.0 rc7
[linux-2.6.git] / fs / buffer.c
index 3c40d63..7e9e409 100644 (file)
@@ -39,6 +39,7 @@
 #include <linux/notifier.h>
 #include <linux/cpu.h>
 #include <linux/bitops.h>
+#include <linux/mpage.h>
 
 static int fsync_buffers_list(spinlock_t *lock, struct list_head *list);
 static void invalidate_bh_lrus(void);
@@ -217,7 +218,7 @@ struct super_block *freeze_bdev(struct block_device *bdev)
        sb = get_super(bdev);
        if (sb && !(sb->s_flags & MS_RDONLY)) {
                sb->s_frozen = SB_FREEZE_WRITE;
-               wmb();
+               smp_wmb();
 
                sync_inodes_sb(sb, 0);
                DQUOT_SYNC(sb);
@@ -234,7 +235,7 @@ struct super_block *freeze_bdev(struct block_device *bdev)
                sync_inodes_sb(sb, 1);
 
                sb->s_frozen = SB_FREEZE_TRANS;
-               wmb();
+               smp_wmb();
 
                sync_blockdev(sb->s_bdev);
 
@@ -262,7 +263,7 @@ void thaw_bdev(struct block_device *bdev, struct super_block *sb)
                if (sb->s_op->unlockfs)
                        sb->s_op->unlockfs(sb);
                sb->s_frozen = SB_UNFROZEN;
-               wmb();
+               smp_wmb();
                wake_up(&sb->s_wait_unfrozen);
                drop_super(sb);
        }
@@ -773,15 +774,14 @@ repeat:
 /**
  * sync_mapping_buffers - write out and wait upon a mapping's "associated"
  *                        buffers
- * @buffer_mapping - the mapping which backs the buffers' data
- * @mapping - the mapping which wants those buffers written
+ * @mapping: the mapping which wants those buffers written
  *
  * Starts I/O against the buffers at mapping->private_list, and waits upon
  * that I/O.
  *
- * Basically, this is a convenience function for fsync().  @buffer_mapping is
- * the blockdev which "owns" the buffers and @mapping is a file or directory
- * which needs those buffers to be written for a successful fsync().
+ * Basically, this is a convenience function for fsync().
+ * @mapping is a file or directory which needs those buffers to be written for
+ * a successful fsync().
  */
 int sync_mapping_buffers(struct address_space *mapping)
 {
@@ -875,15 +875,15 @@ int __set_page_dirty_buffers(struct page *page)
        spin_unlock(&mapping->private_lock);
 
        if (!TestSetPageDirty(page)) {
-               spin_lock_irq(&mapping->tree_lock);
+               write_lock_irq(&mapping->tree_lock);
                if (page->mapping) {    /* Race with truncate? */
-                       if (!mapping->backing_dev_info->memory_backed)
+                       if (mapping_cap_account_dirty(mapping))
                                inc_page_state(nr_dirty);
                        radix_tree_tag_set(&mapping->page_tree,
                                                page_index(page),
                                                PAGECACHE_TAG_DIRTY);
                }
-               spin_unlock_irq(&mapping->tree_lock);
+               write_unlock_irq(&mapping->tree_lock);
                __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
        }
        
@@ -1210,7 +1210,7 @@ grow_buffers(struct block_device *bdev, sector_t block, int size)
        return 1;
 }
 
-struct buffer_head *
+static struct buffer_head *
 __getblk_slow(struct block_device *bdev, sector_t block, int size)
 {
        /* Size must be multiple of hard sectorsize */
@@ -1262,6 +1262,7 @@ __getblk_slow(struct block_device *bdev, sector_t block, int size)
 
 /**
  * mark_buffer_dirty - mark a buffer_head as needing writeout
+ * @bh: the buffer_head to mark dirty
  *
  * mark_buffer_dirty() will set the dirty bit against the buffer, then set its
  * backing page dirty, then tag the page as dirty in its address_space's radix
@@ -1500,6 +1501,7 @@ EXPORT_SYMBOL(__breadahead);
 
 /**
  *  __bread() - reads a specified block and returns the bh
+ *  @bdev: the block_device to read from
  *  @block: number of block
  *  @size: size (in bytes) to read
  * 
@@ -1807,7 +1809,6 @@ static int __block_write_full_page(struct inode *inode, struct page *page,
        } while (bh != head);
 
        do {
-               get_bh(bh);
                if (!buffer_mapped(bh))
                        continue;
                /*
@@ -1836,7 +1837,6 @@ static int __block_write_full_page(struct inode *inode, struct page *page,
         */
        BUG_ON(PageWriteback(page));
        set_page_writeback(page);
-       unlock_page(page);
 
        do {
                struct buffer_head *next = bh->b_this_page;
@@ -1844,9 +1844,9 @@ static int __block_write_full_page(struct inode *inode, struct page *page,
                        submit_bh(WRITE, bh);
                        nr_underway++;
                }
-               put_bh(bh);
                bh = next;
        } while (bh != head);
+       unlock_page(page);
 
        err = 0;
 done:
@@ -1885,7 +1885,6 @@ recover:
        bh = head;
        /* Recovery: lock and submit the mapped buffers */
        do {
-               get_bh(bh);
                if (buffer_mapped(bh) && buffer_dirty(bh)) {
                        lock_buffer(bh);
                        mark_buffer_async_write(bh);
@@ -1908,7 +1907,6 @@ recover:
                        submit_bh(WRITE, bh);
                        nr_underway++;
                }
-               put_bh(bh);
                bh = next;
        } while (bh != head);
        goto done;
@@ -1951,7 +1949,7 @@ static int __block_prepare_write(struct inode *inode, struct page *page,
                if (!buffer_mapped(bh)) {
                        err = get_block(inode, block, bh, 1);
                        if (err)
-                               goto out;
+                               break;
                        if (buffer_new(bh)) {
                                clear_buffer_new(bh);
                                unmap_underlying_metadata(bh->b_bdev,
@@ -1993,10 +1991,12 @@ static int __block_prepare_write(struct inode *inode, struct page *page,
        while(wait_bh > wait) {
                wait_on_buffer(*--wait_bh);
                if (!buffer_uptodate(*wait_bh))
-                       return -EIO;
+                       err = -EIO;
        }
-       return 0;
-out:
+       if (!err)
+               return err;
+
+       /* Error case: */
        /*
         * Zero out any newly allocated blocks to avoid exposing stale
         * data.  If BH_New is set, we know that the block was newly
@@ -2077,8 +2077,7 @@ int block_read_full_page(struct page *page, get_block_t *get_block)
        int nr, i;
        int fully_mapped = 1;
 
-       if (!PageLocked(page))
-               PAGE_BUG(page);
+       BUG_ON(!PageLocked(page));
        blocksize = 1 << inode->i_blkbits;
        if (!page_has_buffers(page))
                create_empty_buffers(page, blocksize, 0);
@@ -2095,9 +2094,12 @@ int block_read_full_page(struct page *page, get_block_t *get_block)
                        continue;
 
                if (!buffer_mapped(bh)) {
+                       int err = 0;
+
                        fully_mapped = 0;
                        if (iblock < lblock) {
-                               if (get_block(inode, iblock, bh, 0))
+                               err = get_block(inode, iblock, bh, 0);
+                               if (err)
                                        SetPageError(page);
                        }
                        if (!buffer_mapped(bh)) {
@@ -2105,7 +2107,8 @@ int block_read_full_page(struct page *page, get_block_t *get_block)
                                memset(kaddr + i * blocksize, 0, blocksize);
                                flush_dcache_page(page);
                                kunmap_atomic(kaddr, KM_USER0);
-                               set_buffer_uptodate(bh);
+                               if (!err)
+                                       set_buffer_uptodate(bh);
                                continue;
                        }
                        /*
@@ -2508,6 +2511,61 @@ int nobh_commit_write(struct file *file, struct page *page,
 }
 EXPORT_SYMBOL(nobh_commit_write);
 
+/*
+ * nobh_writepage() - based on block_full_write_page() except
+ * that it tries to operate without attaching bufferheads to
+ * the page.
+ */
+int nobh_writepage(struct page *page, get_block_t *get_block,
+                       struct writeback_control *wbc)
+{
+       struct inode * const inode = page->mapping->host;
+       loff_t i_size = i_size_read(inode);
+       const pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
+       unsigned offset;
+       void *kaddr;
+       int ret;
+
+       /* Is the page fully inside i_size? */
+       if (page->index < end_index)
+               goto out;
+
+       /* Is the page fully outside i_size? (truncate in progress) */
+       offset = i_size & (PAGE_CACHE_SIZE-1);
+       if (page->index >= end_index+1 || !offset) {
+               /*
+                * The page may have dirty, unmapped buffers.  For example,
+                * they may have been added in ext3_writepage().  Make them
+                * freeable here, so the page does not leak.
+                */
+#if 0
+               /* Not really sure about this  - do we need this ? */
+               if (page->mapping->a_ops->invalidatepage)
+                       page->mapping->a_ops->invalidatepage(page, offset);
+#endif
+               unlock_page(page);
+               return 0; /* don't care */
+       }
+
+       /*
+        * The page straddles i_size.  It must be zeroed out on each and every
+        * writepage invocation because it may be mmapped.  "A file is mapped
+        * in multiples of the page size.  For a file that is not a multiple of
+        * the  page size, the remaining memory is zeroed when mapped, and
+        * writes to that region are not written out to the file."
+        */
+       kaddr = kmap_atomic(page, KM_USER0);
+       memset(kaddr + offset, 0, PAGE_CACHE_SIZE - offset);
+       flush_dcache_page(page);
+       kunmap_atomic(kaddr, KM_USER0);
+out:
+       ret = mpage_writepage(page, get_block, wbc);
+       if (ret == -EAGAIN)
+               ret = __block_write_full_page(inode, page, get_block, wbc);
+       return ret;
+}
+EXPORT_SYMBOL(nobh_writepage);
+
 /*
  * This function assumes that ->prepare_write() uses nobh_prepare_write().
  */
@@ -2782,14 +2840,14 @@ void ll_rw_block(int rw, int nr, struct buffer_head *bhs[])
 
                get_bh(bh);
                if (rw == WRITE) {
-                       bh->b_end_io = end_buffer_write_sync;
                        if (test_clear_buffer_dirty(bh)) {
+                               bh->b_end_io = end_buffer_write_sync;
                                submit_bh(WRITE, bh);
                                continue;
                        }
                } else {
-                       bh->b_end_io = end_buffer_read_sync;
                        if (!buffer_uptodate(bh)) {
+                               bh->b_end_io = end_buffer_read_sync;
                                submit_bh(rw, bh);
                                continue;
                        }
@@ -2861,7 +2919,7 @@ drop_buffers(struct page *page, struct buffer_head **buffers_to_free)
 
        bh = head;
        do {
-               if (buffer_write_io_error(bh))
+               if (buffer_write_io_error(bh) && page->mapping)
                        set_bit(AS_EIO, &page->mapping->flags);
                if (buffer_busy(bh))
                        goto failed;
@@ -2996,7 +3054,7 @@ static void recalc_bh_state(void)
        buffer_heads_over_limit = (tot > max_buffer_heads);
 }
        
-struct buffer_head *alloc_buffer_head(int gfp_flags)
+struct buffer_head *alloc_buffer_head(unsigned int __nocast gfp_flags)
 {
        struct buffer_head *ret = kmem_cache_alloc(bh_cachep, gfp_flags);
        if (ret) {
@@ -3059,7 +3117,7 @@ void __init buffer_init(void)
 
        bh_cachep = kmem_cache_create("buffer_head",
                        sizeof(struct buffer_head), 0,
-                       SLAB_PANIC, init_buffer_head, NULL);
+                       SLAB_RECLAIM_ACCOUNT|SLAB_PANIC, init_buffer_head, NULL);
 
        /*
         * Limit the bh occupancy to 10% of ZONE_NORMAL