vserver 1.9.3
[linux-2.6.git] / fs / buffer.c
index 0337733..81f3129 100644 (file)
 #include <linux/cpu.h>
 #include <asm/bitops.h>
 
+static int fsync_buffers_list(spinlock_t *lock, struct list_head *list);
 static void invalidate_bh_lrus(void);
 
 #define BH_ENTRY(list) list_entry((list), struct buffer_head, b_assoc_buffers)
 
+struct bh_wait_queue {
+       struct buffer_head *bh;
+       wait_queue_t wait;
+};
+
+#define __DEFINE_BH_WAIT(name, b, f)                                   \
+       struct bh_wait_queue name = {                                   \
+               .bh     = b,                                            \
+               .wait   = {                                             \
+                               .task   = current,                      \
+                               .flags  = f,                            \
+                               .func   = bh_wake_function,             \
+                               .task_list =                            \
+                                       LIST_HEAD_INIT(name.wait.task_list),\
+                       },                                              \
+       }
+#define DEFINE_BH_WAIT(name, bh)       __DEFINE_BH_WAIT(name, bh, 0)
+#define DEFINE_BH_WAIT_EXCLUSIVE(name, bh) \
+               __DEFINE_BH_WAIT(name, bh, WQ_FLAG_EXCLUSIVE)
+
 /*
  * Hashed waitqueue_head's for wait_on_buffer()
  */
@@ -74,10 +95,50 @@ void wake_up_buffer(struct buffer_head *bh)
 
        smp_mb();
        if (waitqueue_active(wq))
-               wake_up_all(wq);
+               __wake_up(wq, TASK_INTERRUPTIBLE|TASK_UNINTERRUPTIBLE, 1, bh);
 }
 EXPORT_SYMBOL(wake_up_buffer);
 
+static int bh_wake_function(wait_queue_t *wait, unsigned mode,
+                               int sync, void *key)
+{
+       struct buffer_head *bh = key;
+       struct bh_wait_queue *wq;
+
+       wq = container_of(wait, struct bh_wait_queue, wait);
+       if (wq->bh != bh || buffer_locked(bh))
+               return 0;
+       else
+               return autoremove_wake_function(wait, mode, sync, key);
+}
+
+static void sync_buffer(struct buffer_head *bh)
+{
+       struct block_device *bd;
+
+       smp_mb();
+       bd = bh->b_bdev;
+       if (bd)
+               blk_run_address_space(bd->bd_inode->i_mapping);
+}
+
+void fastcall __lock_buffer(struct buffer_head *bh)
+{
+       wait_queue_head_t *wqh = bh_waitq_head(bh);
+       DEFINE_BH_WAIT_EXCLUSIVE(wait, bh);
+
+       do {
+               prepare_to_wait_exclusive(wqh, &wait.wait,
+                                       TASK_UNINTERRUPTIBLE);
+               if (buffer_locked(bh)) {
+                       sync_buffer(bh);
+                       io_schedule();
+               }
+       } while (test_set_buffer_locked(bh));
+       finish_wait(wqh, &wait.wait);
+}
+EXPORT_SYMBOL(__lock_buffer);
+
 void fastcall unlock_buffer(struct buffer_head *bh)
 {
        clear_buffer_locked(bh);
@@ -93,20 +154,16 @@ void fastcall unlock_buffer(struct buffer_head *bh)
 void __wait_on_buffer(struct buffer_head * bh)
 {
        wait_queue_head_t *wqh = bh_waitq_head(bh);
-       DEFINE_WAIT(wait);
+       DEFINE_BH_WAIT(wait, bh);
 
        do {
-               prepare_to_wait(wqh, &wait, TASK_UNINTERRUPTIBLE);
+               prepare_to_wait(wqh, &wait.wait, TASK_UNINTERRUPTIBLE);
                if (buffer_locked(bh)) {
-                       struct block_device *bd;
-                       smp_mb();
-                       bd = bh->b_bdev;
-                       if (bd)
-                               blk_run_address_space(bd->bd_inode->i_mapping);
+                       sync_buffer(bh);
                        io_schedule();
                }
        } while (buffer_locked(bh));
-       finish_wait(wqh, &wait);
+       finish_wait(wqh, &wait.wait);
 }
 
 static void
@@ -157,7 +214,7 @@ void end_buffer_write_sync(struct buffer_head *bh, int uptodate)
        if (uptodate) {
                set_buffer_uptodate(bh);
        } else {
-               if (printk_ratelimit()) {
+               if (!buffer_eopnotsupp(bh) && printk_ratelimit()) {
                        buffer_io_error(bh);
                        printk(KERN_WARNING "lost page write due to "
                                        "I/O error on %s\n",
@@ -669,12 +726,11 @@ still_busy:
  * PageLocked prevents anyone from starting writeback of a page which is
  * under read I/O (PageWriteback is only ever set against a locked page).
  */
-void mark_buffer_async_read(struct buffer_head *bh)
+static void mark_buffer_async_read(struct buffer_head *bh)
 {
        bh->b_end_io = end_buffer_async_read;
        set_buffer_async_read(bh);
 }
-EXPORT_SYMBOL(mark_buffer_async_read);
 
 void mark_buffer_async_write(struct buffer_head *bh)
 {
@@ -733,14 +789,6 @@ EXPORT_SYMBOL(mark_buffer_async_write);
  * b_inode back.
  */
 
-void buffer_insert_list(spinlock_t *lock,
-               struct buffer_head *bh, struct list_head *list)
-{
-       spin_lock(lock);
-       list_move_tail(&bh->b_assoc_buffers, list);
-       spin_unlock(lock);
-}
-
 /*
  * The buffer's backing address_space's private_lock must be held
  */
@@ -843,9 +891,12 @@ void mark_buffer_dirty_inode(struct buffer_head *bh, struct inode *inode)
                if (mapping->assoc_mapping != buffer_mapping)
                        BUG();
        }
-       if (list_empty(&bh->b_assoc_buffers))
-               buffer_insert_list(&buffer_mapping->private_lock,
-                               bh, &mapping->private_list);
+       if (list_empty(&bh->b_assoc_buffers)) {
+               spin_lock(&buffer_mapping->private_lock);
+               list_move_tail(&bh->b_assoc_buffers,
+                               &mapping->private_list);
+               spin_unlock(&buffer_mapping->private_lock);
+       }
 }
 EXPORT_SYMBOL(mark_buffer_dirty_inode);
 
@@ -895,7 +946,8 @@ int __set_page_dirty_buffers(struct page *page)
                if (page->mapping) {    /* Race with truncate? */
                        if (!mapping->backing_dev_info->memory_backed)
                                inc_page_state(nr_dirty);
-                       radix_tree_tag_set(&mapping->page_tree, page->index,
+                       radix_tree_tag_set(&mapping->page_tree,
+                                               page_index(page),
                                                PAGECACHE_TAG_DIRTY);
                }
                spin_unlock_irq(&mapping->tree_lock);
@@ -925,7 +977,7 @@ EXPORT_SYMBOL(__set_page_dirty_buffers);
  * the osync code to catch these locked, dirty buffers without requeuing
  * any newly dirty buffers for write.
  */
-int fsync_buffers_list(spinlock_t *lock, struct list_head *list)
+static int fsync_buffers_list(spinlock_t *lock, struct list_head *list)
 {
        struct buffer_head *bh;
        struct list_head tmp;
@@ -1207,12 +1259,6 @@ grow_buffers(struct block_device *bdev, sector_t block, int size)
        pgoff_t index;
        int sizebits;
 
-       /* Size must be multiple of hard sectorsize */
-       if (size & (bdev_hardsect_size(bdev)-1))
-               BUG();
-       if (size < 512 || size > PAGE_SIZE)
-               BUG();
-
        sizebits = -1;
        do {
                sizebits++;
@@ -1233,6 +1279,18 @@ grow_buffers(struct block_device *bdev, sector_t block, int size)
 struct buffer_head *
 __getblk_slow(struct block_device *bdev, sector_t block, int size)
 {
+       /* Size must be multiple of hard sectorsize */
+       if (unlikely(size & (bdev_hardsect_size(bdev)-1) ||
+                       (size < 512 || size > PAGE_SIZE))) {
+               printk(KERN_ERR "getblk(): invalid block size %d requested\n",
+                                       size);
+               printk(KERN_ERR "hardsect size: %d\n",
+                                       bdev_hardsect_size(bdev));
+
+               dump_stack();
+               return NULL;
+       }
+
        for (;;) {
                struct buffer_head * bh;
 
@@ -1357,7 +1415,7 @@ struct bh_lru {
        struct buffer_head *bhs[BH_LRU_SIZE];
 };
 
-static DEFINE_PER_CPU(struct bh_lru, bh_lrus) = {{0}};
+static DEFINE_PER_CPU(struct bh_lru, bh_lrus) = {{ NULL }};
 
 #ifdef CONFIG_SMP
 #define bh_lru_lock()  local_irq_disable()
@@ -1488,6 +1546,7 @@ __getblk(struct block_device *bdev, sector_t block, int size)
 {
        struct buffer_head *bh = __find_get_block(bdev, block, size);
 
+       might_sleep();
        if (bh == NULL)
                bh = __getblk_slow(bdev, block, size);
        return bh;
@@ -1525,10 +1584,9 @@ __bread(struct block_device *bdev, sector_t block, int size)
 EXPORT_SYMBOL(__bread);
 
 /*
- * invalidate_bh_lrus() is called rarely - at unmount.  Because it is only for
- * unmount it only needs to ensure that all buffers from the target device are
- * invalidated on return and it doesn't need to worry about new buffers from
- * that device being added - the unmount code has to prevent that.
+ * invalidate_bh_lrus() is called rarely - but not only at unmount.
+ * This doesn't race because it runs in each cpu either in irq
+ * or with preempt disabled.
  */
 static void invalidate_bh_lru(void *arg)
 {
@@ -1714,6 +1772,8 @@ void unmap_underlying_metadata(struct block_device *bdev, sector_t block)
 {
        struct buffer_head *old_bh;
 
+       might_sleep();
+
        old_bh = __find_get_block_slow(bdev, block, 0);
        if (old_bh) {
                clear_buffer_dirty(old_bh);
@@ -1744,10 +1804,10 @@ EXPORT_SYMBOL(unmap_underlying_metadata);
  * state inside lock_buffer().
  *
  * If block_write_full_page() is called for regular writeback
- * (called_for_sync() is false) then it will redirty a page which has a locked
- * buffer.   This only can happen if someone has written the buffer directly,
- * with submit_bh().  At the address_space level PageWriteback prevents this
- * contention from occurring.
+ * (wbc->sync_mode == WB_SYNC_NONE) then it will redirty a page which has a
+ * locked buffer.   This only can happen if someone has written the buffer
+ * directly, with submit_bh().  At the address_space level PageWriteback
+ * prevents this contention from occurring.
  */
 static int __block_write_full_page(struct inode *inode, struct page *page,
                        get_block_t *get_block, struct writeback_control *wbc)
@@ -1836,14 +1896,14 @@ static int __block_write_full_page(struct inode *inode, struct page *page,
                }
        } while ((bh = bh->b_this_page) != head);
 
+       /*
+        * The page and its buffers are protected by PageWriteback(), so we can
+        * drop the bh refcounts early.
+        */
        BUG_ON(PageWriteback(page));
-       set_page_writeback(page);       /* Keeps try_to_free_buffers() away */
+       set_page_writeback(page);
        unlock_page(page);
 
-       /*
-        * The page may come unlocked any time after the *first* submit_bh()
-        * call.  Be careful with its buffers.
-        */
        do {
                struct buffer_head *next = bh->b_this_page;
                if (buffer_async_write(bh)) {
@@ -1873,6 +1933,10 @@ done:
                if (uptodate)
                        SetPageUptodate(page);
                end_page_writeback(page);
+               /*
+                * The page and buffer_heads can be released at any time from
+                * here on.
+                */
                wbc->pages_skipped++;   /* We didn't write this page */
        }
        return err;
@@ -2424,7 +2488,7 @@ int nobh_prepare_write(struct page *page, unsigned from, unsigned to,
                        }
                        bh->b_state = map_bh.b_state;
                        atomic_set(&bh->b_count, 0);
-                       bh->b_this_page = 0;
+                       bh->b_this_page = NULL;
                        bh->b_page = page;
                        bh->b_blocknr = map_bh.b_blocknr;
                        bh->b_size = blocksize;
@@ -2660,7 +2724,7 @@ int block_write_full_page(struct page *page, get_block_t *get_block,
 
        /*
         * The page straddles i_size.  It must be zeroed out on each and every
-        * writepage invocation because it may be mmapped.  "A file is mapped
+        * writepage invokation because it may be mmapped.  "A file is mapped
         * in multiples of the page size.  For a file that is not a multiple of
         * the  page size, the remaining memory is zeroed when mapped, and
         * writes to that region are not written out to the file."
@@ -2690,21 +2754,33 @@ static int end_bio_bh_io_sync(struct bio *bio, unsigned int bytes_done, int err)
        if (bio->bi_size)
                return 1;
 
+       if (err == -EOPNOTSUPP) {
+               set_bit(BIO_EOPNOTSUPP, &bio->bi_flags);
+               set_bit(BH_Eopnotsupp, &bh->b_state);
+       }
+
        bh->b_end_io(bh, test_bit(BIO_UPTODATE, &bio->bi_flags));
        bio_put(bio);
        return 0;
 }
 
-void submit_bh(int rw, struct buffer_head * bh)
+int submit_bh(int rw, struct buffer_head * bh)
 {
        struct bio *bio;
+       int ret = 0;
 
        BUG_ON(!buffer_locked(bh));
        BUG_ON(!buffer_mapped(bh));
        BUG_ON(!bh->b_end_io);
 
-       /* Only clear out a write error when rewriting */
-       if (test_set_buffer_req(bh) && rw == WRITE)
+       if (buffer_ordered(bh) && (rw == WRITE))
+               rw = WRITE_BARRIER;
+
+       /*
+        * Only clear out a write error when rewriting, should this
+        * include WRITE_SYNC as well?
+        */
+       if (test_set_buffer_req(bh) && (rw == WRITE || rw == WRITE_BARRIER))
                clear_buffer_write_io_error(bh);
 
        /*
@@ -2726,7 +2802,14 @@ void submit_bh(int rw, struct buffer_head * bh)
        bio->bi_end_io = end_bio_bh_io_sync;
        bio->bi_private = bh;
 
+       bio_get(bio);
        submit_bio(rw, bio);
+
+       if (bio_flagged(bio, BIO_EOPNOTSUPP))
+               ret = -EOPNOTSUPP;
+
+       bio_put(bio);
+       return ret;
 }
 
 /**
@@ -2785,20 +2868,30 @@ void ll_rw_block(int rw, int nr, struct buffer_head *bhs[])
 
 /*
  * For a data-integrity writeout, we need to wait upon any in-progress I/O
- * and then start new I/O and then wait upon it.
+ * and then start new I/O and then wait upon it.  The caller must have a ref on
+ * the buffer_head.
  */
-void sync_dirty_buffer(struct buffer_head *bh)
+int sync_dirty_buffer(struct buffer_head *bh)
 {
+       int ret = 0;
+
        WARN_ON(atomic_read(&bh->b_count) < 1);
        lock_buffer(bh);
        if (test_clear_buffer_dirty(bh)) {
                get_bh(bh);
                bh->b_end_io = end_buffer_write_sync;
-               submit_bh(WRITE, bh);
+               ret = submit_bh(WRITE, bh);
                wait_on_buffer(bh);
+               if (buffer_eopnotsupp(bh)) {
+                       clear_buffer_eopnotsupp(bh);
+                       ret = -EOPNOTSUPP;
+               }
+               if (!ret && !buffer_uptodate(bh))
+                       ret = -EIO;
        } else {
                unlock_buffer(bh);
        }
+       return ret;
 }
 
 /*
@@ -2832,7 +2925,6 @@ drop_buffers(struct page *page, struct buffer_head **buffers_to_free)
 {
        struct buffer_head *head = page_buffers(page);
        struct buffer_head *bh;
-       int was_uptodate = 1;
 
        bh = head;
        do {
@@ -2840,8 +2932,6 @@ drop_buffers(struct page *page, struct buffer_head **buffers_to_free)
                        set_bit(AS_EIO, &page->mapping->flags);
                if (buffer_busy(bh))
                        goto failed;
-               if (!buffer_uptodate(bh) && !buffer_req(bh))
-                       was_uptodate = 0;
                bh = bh->b_this_page;
        } while (bh != head);
 
@@ -2905,9 +2995,11 @@ EXPORT_SYMBOL(try_to_free_buffers);
 int block_sync_page(struct page *page)
 {
        struct address_space *mapping;
+
        smp_mb();
-       mapping = page->mapping;
-       blk_run_address_space(mapping);
+       mapping = page_mapping(page);
+       if (mapping)
+               blk_run_backing_dev(mapping->backing_dev_info, page);
        return 0;
 }
 
@@ -3035,7 +3127,7 @@ void __init buffer_init(void)
 
        bh_cachep = kmem_cache_create("buffer_head",
                        sizeof(struct buffer_head), 0,
-                       0, init_buffer_head, NULL);
+                       SLAB_PANIC, init_buffer_head, NULL);
        for (i = 0; i < ARRAY_SIZE(bh_wait_queue_heads); i++)
                init_waitqueue_head(&bh_wait_queue_heads[i].wqh);
 
@@ -3056,14 +3148,12 @@ EXPORT_SYMBOL(block_read_full_page);
 EXPORT_SYMBOL(block_sync_page);
 EXPORT_SYMBOL(block_truncate_page);
 EXPORT_SYMBOL(block_write_full_page);
-EXPORT_SYMBOL(buffer_insert_list);
 EXPORT_SYMBOL(cont_prepare_write);
 EXPORT_SYMBOL(end_buffer_async_write);
 EXPORT_SYMBOL(end_buffer_read_sync);
 EXPORT_SYMBOL(end_buffer_write_sync);
 EXPORT_SYMBOL(file_fsync);
 EXPORT_SYMBOL(fsync_bdev);
-EXPORT_SYMBOL(fsync_buffers_list);
 EXPORT_SYMBOL(generic_block_bmap);
 EXPORT_SYMBOL(generic_commit_write);
 EXPORT_SYMBOL(generic_cont_expand);