fedora core 6 1.2949 + vserver 2.2.0
[linux-2.6.git] / fs / buffer.c
index 9f27576..670a36e 100644 (file)
@@ -35,6 +35,7 @@
 #include <linux/hash.h>
 #include <linux/suspend.h>
 #include <linux/buffer_head.h>
+#include <linux/task_io_accounting_ops.h>
 #include <linux/bio.h>
 #include <linux/notifier.h>
 #include <linux/cpu.h>
@@ -77,6 +78,7 @@ EXPORT_SYMBOL(__lock_buffer);
 
 void fastcall unlock_buffer(struct buffer_head *bh)
 {
+       smp_mb__before_clear_bit();
        clear_buffer_locked(bh);
        smp_mb__after_clear_bit();
        wake_up_bit(&bh->b_state, BH_Lock);
@@ -159,31 +161,6 @@ int sync_blockdev(struct block_device *bdev)
 }
 EXPORT_SYMBOL(sync_blockdev);
 
-static void __fsync_super(struct super_block *sb)
-{
-       sync_inodes_sb(sb, 0);
-       DQUOT_SYNC(sb);
-       lock_super(sb);
-       if (sb->s_dirt && sb->s_op->write_super)
-               sb->s_op->write_super(sb);
-       unlock_super(sb);
-       if (sb->s_op->sync_fs)
-               sb->s_op->sync_fs(sb, 1);
-       sync_blockdev(sb->s_bdev);
-       sync_inodes_sb(sb, 1);
-}
-
-/*
- * Write out and wait upon all dirty data associated with this
- * superblock.  Filesystem data as well as the underlying block
- * device.  Takes the superblock lock.
- */
-int fsync_super(struct super_block *sb)
-{
-       __fsync_super(sb);
-       return sync_blockdev(sb->s_bdev);
-}
-
 EXPORT_SYMBOL(fsync_super);
 
 /*
@@ -206,7 +183,7 @@ int fsync_bdev(struct block_device *bdev)
  * freeze_bdev  --  lock a filesystem and force it into a consistent state
  * @bdev:      blockdevice to lock
  *
- * This takes the block device bd_mount_mutex to make sure no new mounts
+ * This takes the block device bd_mount_sem to make sure no new mounts
  * happen on bdev until thaw_bdev() is called.
  * If a superblock is found on this device, we take the s_umount semaphore
  * on it to make sure nobody unmounts until the snapshot creation is done.
@@ -215,7 +192,7 @@ struct super_block *freeze_bdev(struct block_device *bdev)
 {
        struct super_block *sb;
 
-       mutex_lock(&bdev->bd_mount_mutex);
+       down(&bdev->bd_mount_sem);
        sb = get_super(bdev);
        if (sb && !(sb->s_flags & MS_RDONLY)) {
                sb->s_frozen = SB_FREEZE_WRITE;
@@ -257,122 +234,10 @@ void thaw_bdev(struct block_device *bdev, struct super_block *sb)
                drop_super(sb);
        }
 
-       mutex_unlock(&bdev->bd_mount_mutex);
+       up(&bdev->bd_mount_sem);
 }
 EXPORT_SYMBOL(thaw_bdev);
 
-/*
- * sync everything.  Start out by waking pdflush, because that writes back
- * all queues in parallel.
- */
-static void do_sync(unsigned long wait)
-{
-       wakeup_pdflush(0);
-       sync_inodes(0);         /* All mappings, inodes and their blockdevs */
-       DQUOT_SYNC(NULL);
-       sync_supers();          /* Write the superblocks */
-       sync_filesystems(0);    /* Start syncing the filesystems */
-       sync_filesystems(wait); /* Waitingly sync the filesystems */
-       sync_inodes(wait);      /* Mappings, inodes and blockdevs, again. */
-       if (!wait)
-               printk("Emergency Sync complete\n");
-       if (unlikely(laptop_mode))
-               laptop_sync_completion();
-}
-
-asmlinkage long sys_sync(void)
-{
-       do_sync(1);
-       return 0;
-}
-
-void emergency_sync(void)
-{
-       pdflush_operation(do_sync, 0);
-}
-
-/*
- * Generic function to fsync a file.
- *
- * filp may be NULL if called via the msync of a vma.
- */
-int file_fsync(struct file *filp, struct dentry *dentry, int datasync)
-{
-       struct inode * inode = dentry->d_inode;
-       struct super_block * sb;
-       int ret, err;
-
-       /* sync the inode to buffers */
-       ret = write_inode_now(inode, 0);
-
-       /* sync the superblock to buffers */
-       sb = inode->i_sb;
-       lock_super(sb);
-       if (sb->s_op->write_super)
-               sb->s_op->write_super(sb);
-       unlock_super(sb);
-
-       /* .. finally sync the buffers to disk */
-       err = sync_blockdev(sb->s_bdev);
-       if (!ret)
-               ret = err;
-       return ret;
-}
-
-long do_fsync(struct file *file, int datasync)
-{
-       int ret;
-       int err;
-       struct address_space *mapping = file->f_mapping;
-
-       if (!file->f_op || !file->f_op->fsync) {
-               /* Why?  We can still call filemap_fdatawrite */
-               ret = -EINVAL;
-               goto out;
-       }
-
-       ret = filemap_fdatawrite(mapping);
-
-       /*
-        * We need to protect against concurrent writers, which could cause
-        * livelocks in fsync_buffers_list().
-        */
-       mutex_lock(&mapping->host->i_mutex);
-       err = file->f_op->fsync(file, file->f_dentry, datasync);
-       if (!ret)
-               ret = err;
-       mutex_unlock(&mapping->host->i_mutex);
-       err = filemap_fdatawait(mapping);
-       if (!ret)
-               ret = err;
-out:
-       return ret;
-}
-
-static long __do_fsync(unsigned int fd, int datasync)
-{
-       struct file *file;
-       int ret = -EBADF;
-
-       file = fget(fd);
-       if (file) {
-               ret = do_fsync(file, datasync);
-               fput(file);
-       }
-       return ret;
-}
-
-asmlinkage long sys_fsync(unsigned int fd)
-{
-       return __do_fsync(fd, 0);
-}
-
-asmlinkage long sys_fdatasync(unsigned int fd)
-{
-       return __do_fsync(fd, 1);
-}
-
 /*
  * Various filesystems appear to want __find_get_block to be non-blocking.
  * But it's the page lock which protects the buffers.  To get around this,
@@ -591,6 +456,7 @@ static void end_buffer_async_write(struct buffer_head *bh, int uptodate)
                               bdevname(bh->b_bdev, b));
                }
                set_bit(AS_EIO, &page->mapping->flags);
+               set_buffer_write_io_error(bh);
                clear_buffer_uptodate(bh);
                SetPageError(page);
        }
@@ -710,6 +576,10 @@ EXPORT_SYMBOL(mark_buffer_async_write);
 static inline void __remove_assoc_queue(struct buffer_head *bh)
 {
        list_del_init(&bh->b_assoc_buffers);
+       WARN_ON(!bh->b_assoc_map);
+       if (buffer_write_io_error(bh))
+               set_bit(AS_EIO, &bh->b_assoc_map->flags);
+       bh->b_assoc_map = NULL;
 }
 
 int inode_has_buffers(struct inode *inode)
@@ -808,6 +678,7 @@ void mark_buffer_dirty_inode(struct buffer_head *bh, struct inode *inode)
                spin_lock(&buffer_mapping->private_lock);
                list_move_tail(&bh->b_assoc_buffers,
                                &mapping->private_list);
+               bh->b_assoc_map = mapping;
                spin_unlock(&buffer_mapping->private_lock);
        }
 }
@@ -857,20 +728,21 @@ int __set_page_dirty_buffers(struct page *page)
        }
        spin_unlock(&mapping->private_lock);
 
-       if (!TestSetPageDirty(page)) {
-               write_lock_irq(&mapping->tree_lock);
-               if (page->mapping) {    /* Race with truncate? */
-                       if (mapping_cap_account_dirty(mapping))
-                               __inc_zone_page_state(page, NR_FILE_DIRTY);
-                       radix_tree_tag_set(&mapping->page_tree,
-                                               page_index(page),
-                                               PAGECACHE_TAG_DIRTY);
+       if (TestSetPageDirty(page))
+               return 0;
+
+       write_lock_irq(&mapping->tree_lock);
+       if (page->mapping) {    /* Race with truncate? */
+               if (mapping_cap_account_dirty(mapping)) {
+                       __inc_zone_page_state(page, NR_FILE_DIRTY);
+                       task_io_account_write(PAGE_CACHE_SIZE);
                }
-               write_unlock_irq(&mapping->tree_lock);
-               __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
-               return 1;
+               radix_tree_tag_set(&mapping->page_tree,
+                               page_index(page), PAGECACHE_TAG_DIRTY);
        }
-       return 0;
+       write_unlock_irq(&mapping->tree_lock);
+       __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
+       return 1;
 }
 EXPORT_SYMBOL(__set_page_dirty_buffers);
 
@@ -904,7 +776,7 @@ static int fsync_buffers_list(spinlock_t *lock, struct list_head *list)
        spin_lock(lock);
        while (!list_empty(list)) {
                bh = BH_ENTRY(list->next);
-               list_del_init(&bh->b_assoc_buffers);
+               __remove_assoc_queue(bh);
                if (buffer_dirty(bh) || buffer_locked(bh)) {
                        list_add(&bh->b_assoc_buffers, &tmp);
                        if (buffer_dirty(bh)) {
@@ -925,7 +797,7 @@ static int fsync_buffers_list(spinlock_t *lock, struct list_head *list)
 
        while (!list_empty(&tmp)) {
                bh = BH_ENTRY(tmp.prev);
-               __remove_assoc_queue(bh);
+               list_del_init(&bh->b_assoc_buffers);
                get_bh(bh);
                spin_unlock(lock);
                wait_on_buffer(bh);
@@ -1181,8 +1053,21 @@ grow_buffers(struct block_device *bdev, sector_t block, int size)
        } while ((size << sizebits) < PAGE_SIZE);
 
        index = block >> sizebits;
-       block = index << sizebits;
 
+       /*
+        * Check for a block which wants to lie outside our maximum possible
+        * pagecache index.  (this comparison is done using sector_t types).
+        */
+       if (unlikely(index != block >> sizebits)) {
+               char b[BDEVNAME_SIZE];
+
+               printk(KERN_ERR "%s: requested out-of-range block %llu for "
+                       "device %s\n",
+                       __FUNCTION__, (unsigned long long)block,
+                       bdevname(bdev, b));
+               return -EIO;
+       }
+       block = index << sizebits;
        /* Create a page with the proper size buffers.. */
        page = grow_dev_page(bdev, block, index, size);
        if (!page)
@@ -1209,12 +1094,16 @@ __getblk_slow(struct block_device *bdev, sector_t block, int size)
 
        for (;;) {
                struct buffer_head * bh;
+               int ret;
 
                bh = __find_get_block(bdev, block, size);
                if (bh)
                        return bh;
 
-               if (!grow_buffers(bdev, block, size))
+               ret = grow_buffers(bdev, block, size);
+               if (ret < 0)
+                       return NULL;
+               if (ret == 0)
                        free_more_memory();
        }
 }
@@ -1289,6 +1178,7 @@ void __bforget(struct buffer_head *bh)
 
                spin_lock(&buffer_mapping->private_lock);
                list_del_init(&bh->b_assoc_buffers);
+               bh->b_assoc_map = NULL;
                spin_unlock(&buffer_mapping->private_lock);
        }
        __brelse(bh);
@@ -1555,35 +1445,6 @@ static void discard_buffer(struct buffer_head * bh)
        unlock_buffer(bh);
 }
 
-/**
- * try_to_release_page() - release old fs-specific metadata on a page
- *
- * @page: the page which the kernel is trying to free
- * @gfp_mask: memory allocation flags (and I/O mode)
- *
- * The address_space is to try to release any data against the page
- * (presumably at page->private).  If the release was successful, return `1'.
- * Otherwise return zero.
- *
- * The @gfp_mask argument specifies whether I/O may be performed to release
- * this page (__GFP_IO), and whether the call may block (__GFP_WAIT).
- *
- * NOTE: @gfp_mask may go away, and this function may become non-blocking.
- */
-int try_to_release_page(struct page *page, gfp_t gfp_mask)
-{
-       struct address_space * const mapping = page->mapping;
-
-       BUG_ON(!PageLocked(page));
-       if (PageWriteback(page))
-               return 0;
-       
-       if (mapping && mapping->a_ops->releasepage)
-               return mapping->a_ops->releasepage(page, gfp_mask);
-       return try_to_free_buffers(page);
-}
-EXPORT_SYMBOL(try_to_release_page);
-
 /**
  * block_invalidatepage - invalidate part of all of a buffer-backed page
  *
@@ -1635,14 +1496,6 @@ out:
 }
 EXPORT_SYMBOL(block_invalidatepage);
 
-void do_invalidatepage(struct page *page, unsigned long offset)
-{
-       void (*invalidatepage)(struct page *, unsigned long);
-       invalidatepage = page->mapping->a_ops->invalidatepage ? :
-               block_invalidatepage;
-       (*invalidatepage)(page, offset);
-}
-
 /*
  * We attach and possibly dirty the buffers atomically wrt
  * __set_page_dirty_buffers() via private_lock.  try_to_free_buffers
@@ -2013,6 +1866,7 @@ static int __block_prepare_write(struct inode *inode, struct page *page,
                        clear_buffer_new(bh);
                        kaddr = kmap_atomic(page, KM_USER0);
                        memset(kaddr+block_start, 0, bh->b_size);
+                       flush_dcache_page(page);
                        kunmap_atomic(kaddr, KM_USER0);
                        set_buffer_uptodate(bh);
                        mark_buffer_dirty(bh);
@@ -2519,6 +2373,7 @@ failed:
         */
        kaddr = kmap_atomic(page, KM_USER0);
        memset(kaddr, 0, PAGE_CACHE_SIZE);
+       flush_dcache_page(page);
        kunmap_atomic(kaddr, KM_USER0);
        SetPageUptodate(page);
        set_page_dirty(page);
@@ -2992,18 +2847,24 @@ int try_to_free_buffers(struct page *page)
 
        spin_lock(&mapping->private_lock);
        ret = drop_buffers(page, &buffers_to_free);
+
+       /*
+        * If the filesystem writes its buffers by hand (eg ext3)
+        * then we can have clean buffers against a dirty page.  We
+        * clean the page here; otherwise the VM will never notice
+        * that the filesystem did any IO at all.
+        *
+        * Also, during truncate, discard_buffer will have marked all
+        * the page's buffers clean.  We discover that here and clean
+        * the page also.
+        *
+        * private_lock must be held over this entire operation in order
+        * to synchronise against __set_page_dirty_buffers and prevent the
+        * dirty bit from being lost.
+        */
+       if (ret)
+               cancel_dirty_page(page, PAGE_CACHE_SIZE);
        spin_unlock(&mapping->private_lock);
-       if (ret) {
-               /*
-                * If the filesystem writes its buffers by hand (eg ext3)
-                * then we can have clean buffers against a dirty page.  We
-                * clean the page here; otherwise later reattachment of buffers
-                * could encounter a non-uptodate page, which is unresolvable.
-                * This only applies in the rare case where try_to_free_buffers
-                * succeeds but the page is not freed.
-                */
-               clear_page_dirty(page);
-       }
 out:
        if (buffers_to_free) {
                struct buffer_head *bh = buffers_to_free;
@@ -3058,7 +2919,7 @@ asmlinkage long sys_bdflush(int func, long data)
 /*
  * Buffer-head allocation
  */
-static kmem_cache_t *bh_cachep;
+static struct kmem_cache *bh_cachep;
 
 /*
  * Once the number of bh's in the machine exceeds this level, we start
@@ -3111,7 +2972,7 @@ void free_buffer_head(struct buffer_head *bh)
 EXPORT_SYMBOL(free_buffer_head);
 
 static void
-init_buffer_head(void *data, kmem_cache_t *cachep, unsigned long flags)
+init_buffer_head(void *data, struct kmem_cache *cachep, unsigned long flags)
 {
        if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) ==
                            SLAB_CTOR_CONSTRUCTOR) {
@@ -3122,7 +2983,6 @@ init_buffer_head(void *data, kmem_cache_t *cachep, unsigned long flags)
        }
 }
 
-#ifdef CONFIG_HOTPLUG_CPU
 static void buffer_exit_cpu(int cpu)
 {
        int i;
@@ -3144,7 +3004,6 @@ static int buffer_cpu_notify(struct notifier_block *self,
                buffer_exit_cpu((unsigned long)hcpu);
        return NOTIFY_OK;
 }
-#endif /* CONFIG_HOTPLUG_CPU */
 
 void __init buffer_init(void)
 {