#include <linux/config.h>
#include <linux/kernel.h>
+#include <linux/syscalls.h>
#include <linux/fs.h>
#include <linux/mm.h>
#include <linux/percpu.h>
#include <linux/bio.h>
#include <linux/notifier.h>
#include <linux/cpu.h>
-#include <asm/bitops.h>
+#include <linux/bitops.h>
+#include <linux/mpage.h>
+static int fsync_buffers_list(spinlock_t *lock, struct list_head *list);
static void invalidate_bh_lrus(void);
#define BH_ENTRY(list) list_entry((list), struct buffer_head, b_assoc_buffers)
-struct bh_wait_queue {
- struct buffer_head *bh;
- wait_queue_t wait;
-};
-
-#define __DEFINE_BH_WAIT(name, b, f) \
- struct bh_wait_queue name = { \
- .bh = b, \
- .wait = { \
- .task = current, \
- .flags = f, \
- .func = bh_wake_function, \
- .task_list = \
- LIST_HEAD_INIT(name.wait.task_list),\
- }, \
- }
-#define DEFINE_BH_WAIT(name, bh) __DEFINE_BH_WAIT(name, bh, 0)
-#define DEFINE_BH_WAIT_EXCLUSIVE(name, bh) \
- __DEFINE_BH_WAIT(name, bh, WQ_FLAG_EXCLUSIVE)
-
-/*
- * Hashed waitqueue_head's for wait_on_buffer()
- */
-#define BH_WAIT_TABLE_ORDER 7
-static struct bh_wait_queue_head {
- wait_queue_head_t wqh;
-} ____cacheline_aligned_in_smp bh_wait_queue_heads[1<<BH_WAIT_TABLE_ORDER];
-
inline void
init_buffer(struct buffer_head *bh, bh_end_io_t *handler, void *private)
{
bh->b_private = private;
}
-/*
- * Return the address of the waitqueue_head to be used for this
- * buffer_head
- */
-wait_queue_head_t *bh_waitq_head(struct buffer_head *bh)
-{
- return &bh_wait_queue_heads[hash_ptr(bh, BH_WAIT_TABLE_ORDER)].wqh;
-}
-EXPORT_SYMBOL(bh_waitq_head);
-
-void wake_up_buffer(struct buffer_head *bh)
-{
- wait_queue_head_t *wq = bh_waitq_head(bh);
-
- smp_mb();
- if (waitqueue_active(wq))
- __wake_up(wq, TASK_INTERRUPTIBLE|TASK_UNINTERRUPTIBLE, 1, bh);
-}
-EXPORT_SYMBOL(wake_up_buffer);
-
-static int bh_wake_function(wait_queue_t *wait, unsigned mode,
- int sync, void *key)
-{
- struct buffer_head *bh = key;
- struct bh_wait_queue *wq;
-
- wq = container_of(wait, struct bh_wait_queue, wait);
- if (wq->bh != bh || buffer_locked(bh))
- return 0;
- else
- return autoremove_wake_function(wait, mode, sync, key);
-}
-
-static void sync_buffer(struct buffer_head *bh)
+static int sync_buffer(void *word)
{
struct block_device *bd;
+ struct buffer_head *bh
+ = container_of(word, struct buffer_head, b_state);
smp_mb();
bd = bh->b_bdev;
if (bd)
blk_run_address_space(bd->bd_inode->i_mapping);
+ io_schedule();
+ return 0;
}
void fastcall __lock_buffer(struct buffer_head *bh)
{
- wait_queue_head_t *wqh = bh_waitq_head(bh);
- DEFINE_BH_WAIT_EXCLUSIVE(wait, bh);
-
- do {
- prepare_to_wait_exclusive(wqh, &wait.wait,
- TASK_UNINTERRUPTIBLE);
- if (buffer_locked(bh)) {
- sync_buffer(bh);
- io_schedule();
- }
- } while (test_set_buffer_locked(bh));
- finish_wait(wqh, &wait.wait);
+ wait_on_bit_lock(&bh->b_state, BH_Lock, sync_buffer,
+ TASK_UNINTERRUPTIBLE);
}
EXPORT_SYMBOL(__lock_buffer);
{
clear_buffer_locked(bh);
smp_mb__after_clear_bit();
- wake_up_buffer(bh);
+ wake_up_bit(&bh->b_state, BH_Lock);
}
/*
*/
void __wait_on_buffer(struct buffer_head * bh)
{
- wait_queue_head_t *wqh = bh_waitq_head(bh);
- DEFINE_BH_WAIT(wait, bh);
-
- do {
- prepare_to_wait(wqh, &wait.wait, TASK_UNINTERRUPTIBLE);
- if (buffer_locked(bh)) {
- sync_buffer(bh);
- io_schedule();
- }
- } while (buffer_locked(bh));
- finish_wait(wqh, &wait.wait);
-}
-
-static void
-__set_page_buffers(struct page *page, struct buffer_head *head)
-{
- page_cache_get(page);
- SetPagePrivate(page);
- page->private = (unsigned long)head;
+ wait_on_bit(&bh->b_state, BH_Lock, sync_buffer, TASK_UNINTERRUPTIBLE);
}
static void
if (uptodate) {
set_buffer_uptodate(bh);
} else {
- if (printk_ratelimit()) {
+ if (!buffer_eopnotsupp(bh) && printk_ratelimit()) {
buffer_io_error(bh);
printk(KERN_WARNING "lost page write due to "
"I/O error on %s\n",
sb = get_super(bdev);
if (sb && !(sb->s_flags & MS_RDONLY)) {
sb->s_frozen = SB_FREEZE_WRITE;
- wmb();
+ smp_wmb();
sync_inodes_sb(sb, 0);
DQUOT_SYNC(sb);
sync_inodes_sb(sb, 1);
sb->s_frozen = SB_FREEZE_TRANS;
- wmb();
+ smp_wmb();
sync_blockdev(sb->s_bdev);
if (sb->s_op->unlockfs)
sb->s_op->unlockfs(sb);
sb->s_frozen = SB_UNFROZEN;
- wmb();
+ smp_wmb();
wake_up(&sb->s_wait_unfrozen);
drop_super(sb);
}
{
struct inode * inode = dentry->d_inode;
struct super_block * sb;
- int ret;
+ int ret, err;
/* sync the inode to buffers */
- write_inode_now(inode, 0);
+ ret = write_inode_now(inode, 0);
/* sync the superblock to buffers */
sb = inode->i_sb;
unlock_super(sb);
/* .. finally sync the buffers to disk */
- ret = sync_blockdev(sb->s_bdev);
+ err = sync_blockdev(sb->s_bdev);
+ if (!ret)
+ ret = err;
return ret;
}
goto out_putf;
}
- /* We need to protect against concurrent writers.. */
- down(&mapping->host->i_sem);
current->flags |= PF_SYNCWRITE;
ret = filemap_fdatawrite(mapping);
+
+ /*
+ * We need to protect against concurrent writers,
+ * which could cause livelocks in fsync_buffers_list
+ */
+ down(&mapping->host->i_sem);
err = file->f_op->fsync(file, file->f_dentry, 0);
if (!ret)
ret = err;
+ up(&mapping->host->i_sem);
err = filemap_fdatawait(mapping);
if (!ret)
ret = err;
current->flags &= ~PF_SYNCWRITE;
- up(&mapping->host->i_sem);
out_putf:
fput(file);
mapping = file->f_mapping;
- down(&mapping->host->i_sem);
current->flags |= PF_SYNCWRITE;
ret = filemap_fdatawrite(mapping);
+ down(&mapping->host->i_sem);
err = file->f_op->fsync(file, file->f_dentry, 1);
if (!ret)
ret = err;
+ up(&mapping->host->i_sem);
err = filemap_fdatawait(mapping);
if (!ret)
ret = err;
current->flags &= ~PF_SYNCWRITE;
- up(&mapping->host->i_sem);
out_putf:
fput(file);
struct buffer_head *bh;
struct buffer_head *head;
struct page *page;
+ int all_mapped = 1;
index = block >> (PAGE_CACHE_SHIFT - bd_inode->i_blkbits);
page = find_get_page(bd_mapping, index);
get_bh(bh);
goto out_unlock;
}
+ if (!buffer_mapped(bh))
+ all_mapped = 0;
bh = bh->b_this_page;
} while (bh != head);
- printk("__find_get_block_slow() failed. "
- "block=%llu, b_blocknr=%llu\n",
- (unsigned long long)block, (unsigned long long)bh->b_blocknr);
- printk("b_state=0x%08lx, b_size=%u\n", bh->b_state, bh->b_size);
- printk("device blocksize: %d\n", 1 << bd_inode->i_blkbits);
+ /* we might be here because some of the buffers on this page are
+ * not mapped. This is due to various races between
+ * file io on the block device and getblk. It gets dealt with
+ * elsewhere, don't buffer_error if we had some unmapped buffers
+ */
+ if (all_mapped) {
+ printk("__find_get_block_slow() failed. "
+ "block=%llu, b_blocknr=%llu\n",
+ (unsigned long long)block, (unsigned long long)bh->b_blocknr);
+ printk("b_state=0x%08lx, b_size=%u\n", bh->b_state, bh->b_size);
+ printk("device blocksize: %d\n", 1 << bd_inode->i_blkbits);
+ }
out_unlock:
spin_unlock(&bd_mapping->private_lock);
page_cache_release(page);
*/
static void end_buffer_async_read(struct buffer_head *bh, int uptodate)
{
- static spinlock_t page_uptodate_lock = SPIN_LOCK_UNLOCKED;
+ static DEFINE_SPINLOCK(page_uptodate_lock);
unsigned long flags;
struct buffer_head *tmp;
struct page *page;
set_buffer_uptodate(bh);
} else {
clear_buffer_uptodate(bh);
- buffer_io_error(bh);
+ if (printk_ratelimit())
+ buffer_io_error(bh);
SetPageError(page);
}
void end_buffer_async_write(struct buffer_head *bh, int uptodate)
{
char b[BDEVNAME_SIZE];
- static spinlock_t page_uptodate_lock = SPIN_LOCK_UNLOCKED;
+ static DEFINE_SPINLOCK(page_uptodate_lock);
unsigned long flags;
struct buffer_head *tmp;
struct page *page;
* PageLocked prevents anyone from starting writeback of a page which is
* under read I/O (PageWriteback is only ever set against a locked page).
*/
-void mark_buffer_async_read(struct buffer_head *bh)
+static void mark_buffer_async_read(struct buffer_head *bh)
{
bh->b_end_io = end_buffer_async_read;
set_buffer_async_read(bh);
}
-EXPORT_SYMBOL(mark_buffer_async_read);
void mark_buffer_async_write(struct buffer_head *bh)
{
* b_inode back.
*/
-void buffer_insert_list(spinlock_t *lock,
- struct buffer_head *bh, struct list_head *list)
-{
- spin_lock(lock);
- list_move_tail(&bh->b_assoc_buffers, list);
- spin_unlock(lock);
-}
-
/*
* The buffer's backing address_space's private_lock must be held
*/
/**
* sync_mapping_buffers - write out and wait upon a mapping's "associated"
* buffers
- * @buffer_mapping - the mapping which backs the buffers' data
- * @mapping - the mapping which wants those buffers written
+ * @mapping: the mapping which wants those buffers written
*
* Starts I/O against the buffers at mapping->private_list, and waits upon
* that I/O.
*
- * Basically, this is a convenience function for fsync(). @buffer_mapping is
- * the blockdev which "owns" the buffers and @mapping is a file or directory
- * which needs those buffers to be written for a successful fsync().
+ * Basically, this is a convenience function for fsync().
+ * @mapping is a file or directory which needs those buffers to be written for
+ * a successful fsync().
*/
int sync_mapping_buffers(struct address_space *mapping)
{
if (mapping->assoc_mapping != buffer_mapping)
BUG();
}
- if (list_empty(&bh->b_assoc_buffers))
- buffer_insert_list(&buffer_mapping->private_lock,
- bh, &mapping->private_list);
+ if (list_empty(&bh->b_assoc_buffers)) {
+ spin_lock(&buffer_mapping->private_lock);
+ list_move_tail(&bh->b_assoc_buffers,
+ &mapping->private_list);
+ spin_unlock(&buffer_mapping->private_lock);
+ }
}
EXPORT_SYMBOL(mark_buffer_dirty_inode);
spin_unlock(&mapping->private_lock);
if (!TestSetPageDirty(page)) {
- spin_lock_irq(&mapping->tree_lock);
+ write_lock_irq(&mapping->tree_lock);
if (page->mapping) { /* Race with truncate? */
- if (!mapping->backing_dev_info->memory_backed)
+ if (mapping_cap_account_dirty(mapping))
inc_page_state(nr_dirty);
radix_tree_tag_set(&mapping->page_tree,
page_index(page),
PAGECACHE_TAG_DIRTY);
}
- spin_unlock_irq(&mapping->tree_lock);
+ write_unlock_irq(&mapping->tree_lock);
__mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
}
* the osync code to catch these locked, dirty buffers without requeuing
* any newly dirty buffers for write.
*/
-int fsync_buffers_list(spinlock_t *lock, struct list_head *list)
+static int fsync_buffers_list(spinlock_t *lock, struct list_head *list)
{
struct buffer_head *bh;
struct list_head tmp;
* The retry flag is used to differentiate async IO (paging, swapping)
* which may not fail from ordinary buffer allocations.
*/
-static struct buffer_head *
-create_buffers(struct page * page, unsigned long size, int retry)
+struct buffer_head *alloc_page_buffers(struct page *page, unsigned long size,
+ int retry)
{
struct buffer_head *bh, *head;
long offset;
free_more_memory();
goto try_again;
}
+EXPORT_SYMBOL_GPL(alloc_page_buffers);
static inline void
link_dev_buffers(struct page *page, struct buffer_head *head)
bh = bh->b_this_page;
} while (bh);
tail->b_this_page = head;
- __set_page_buffers(page, head);
+ attach_page_buffers(page, head);
}
/*
{
struct buffer_head *head = page_buffers(page);
struct buffer_head *bh = head;
- unsigned int b_state;
-
- b_state = 1 << BH_Mapped;
- if (PageUptodate(page))
- b_state |= 1 << BH_Uptodate;
+ int uptodate = PageUptodate(page);
do {
- if (!(bh->b_state & (1 << BH_Mapped))) {
+ if (!buffer_mapped(bh)) {
init_buffer(bh, NULL, NULL);
bh->b_bdev = bdev;
bh->b_blocknr = block;
- bh->b_state = b_state;
+ if (uptodate)
+ set_buffer_uptodate(bh);
+ set_buffer_mapped(bh);
}
block++;
bh = bh->b_this_page;
if (page_has_buffers(page)) {
bh = page_buffers(page);
- if (bh->b_size == size)
+ if (bh->b_size == size) {
+ init_page_buffers(page, bdev, block, size);
return page;
+ }
if (!try_to_free_buffers(page))
goto failed;
}
/*
* Allocate some buffers for this page
*/
- bh = create_buffers(page, size, 0);
+ bh = alloc_page_buffers(page, size, 0);
if (!bh)
goto failed;
return 1;
}
-struct buffer_head *
+static struct buffer_head *
__getblk_slow(struct block_device *bdev, sector_t block, int size)
{
/* Size must be multiple of hard sectorsize */
/**
* mark_buffer_dirty - mark a buffer_head as needing writeout
+ * @bh: the buffer_head to mark dirty
*
* mark_buffer_dirty() will set the dirty bit against the buffer, then set its
* backing page dirty, then tag the page as dirty in its address_space's radix
struct buffer_head *bhs[BH_LRU_SIZE];
};
-static DEFINE_PER_CPU(struct bh_lru, bh_lrus) = {{0}};
+static DEFINE_PER_CPU(struct bh_lru, bh_lrus) = {{ NULL }};
#ifdef CONFIG_SMP
#define bh_lru_lock() local_irq_disable()
{
struct buffer_head *bh = __find_get_block(bdev, block, size);
+ might_sleep();
if (bh == NULL)
bh = __getblk_slow(bdev, block, size);
return bh;
/**
* __bread() - reads a specified block and returns the bh
+ * @bdev: the block_device to read from
* @block: number of block
* @size: size (in bytes) to read
*
EXPORT_SYMBOL(__bread);
/*
- * invalidate_bh_lrus() is called rarely - at unmount. Because it is only for
- * unmount it only needs to ensure that all buffers from the target device are
- * invalidated on return and it doesn't need to worry about new buffers from
- * that device being added - the unmount code has to prevent that.
+ * invalidate_bh_lrus() is called rarely - but not only at unmount.
+ * This doesn't race because it runs in each cpu either in irq
+ * or with preempt disabled.
*/
static void invalidate_bh_lru(void *arg)
{
{
struct buffer_head *bh, *head, *tail;
- head = create_buffers(page, blocksize, 1);
+ head = alloc_page_buffers(page, blocksize, 1);
bh = head;
do {
bh->b_state |= b_state;
bh = bh->b_this_page;
} while (bh != head);
}
- __set_page_buffers(page, head);
+ attach_page_buffers(page, head);
spin_unlock(&page->mapping->private_lock);
}
EXPORT_SYMBOL(create_empty_buffers);
{
struct buffer_head *old_bh;
+ might_sleep();
+
old_bh = __find_get_block_slow(bdev, block, 0);
if (old_bh) {
clear_buffer_dirty(old_bh);
* state inside lock_buffer().
*
* If block_write_full_page() is called for regular writeback
- * (called_for_sync() is false) then it will redirty a page which has a locked
- * buffer. This only can happen if someone has written the buffer directly,
- * with submit_bh(). At the address_space level PageWriteback prevents this
- * contention from occurring.
+ * (wbc->sync_mode == WB_SYNC_NONE) then it will redirty a page which has a
+ * locked buffer. This only can happen if someone has written the buffer
+ * directly, with submit_bh(). At the address_space level PageWriteback
+ * prevents this contention from occurring.
*/
static int __block_write_full_page(struct inode *inode, struct page *page,
get_block_t *get_block, struct writeback_control *wbc)
} while (bh != head);
do {
- get_bh(bh);
if (!buffer_mapped(bh))
continue;
/*
}
} while ((bh = bh->b_this_page) != head);
- BUG_ON(PageWriteback(page));
- set_page_writeback(page); /* Keeps try_to_free_buffers() away */
- unlock_page(page);
-
/*
- * The page may come unlocked any time after the *first* submit_bh()
- * call. Be careful with its buffers.
+ * The page and its buffers are protected by PageWriteback(), so we can
+ * drop the bh refcounts early.
*/
+ BUG_ON(PageWriteback(page));
+ set_page_writeback(page);
+
do {
struct buffer_head *next = bh->b_this_page;
if (buffer_async_write(bh)) {
submit_bh(WRITE, bh);
nr_underway++;
}
- put_bh(bh);
bh = next;
} while (bh != head);
+ unlock_page(page);
err = 0;
done:
if (uptodate)
SetPageUptodate(page);
end_page_writeback(page);
+ /*
+ * The page and buffer_heads can be released at any time from
+ * here on.
+ */
wbc->pages_skipped++; /* We didn't write this page */
}
return err;
bh = head;
/* Recovery: lock and submit the mapped buffers */
do {
- get_bh(bh);
if (buffer_mapped(bh) && buffer_dirty(bh)) {
lock_buffer(bh);
mark_buffer_async_write(bh);
submit_bh(WRITE, bh);
nr_underway++;
}
- put_bh(bh);
bh = next;
} while (bh != head);
goto done;
if (!buffer_mapped(bh)) {
err = get_block(inode, block, bh, 1);
if (err)
- goto out;
+ break;
if (buffer_new(bh)) {
clear_buffer_new(bh);
unmap_underlying_metadata(bh->b_bdev,
while(wait_bh > wait) {
wait_on_buffer(*--wait_bh);
if (!buffer_uptodate(*wait_bh))
- return -EIO;
+ err = -EIO;
}
- return 0;
-out:
+ if (!err)
+ return err;
+
+ /* Error case: */
/*
* Zero out any newly allocated blocks to avoid exposing stale
* data. If BH_New is set, we know that the block was newly
int nr, i;
int fully_mapped = 1;
- if (!PageLocked(page))
- PAGE_BUG(page);
+ BUG_ON(!PageLocked(page));
blocksize = 1 << inode->i_blkbits;
if (!page_has_buffers(page))
create_empty_buffers(page, blocksize, 0);
continue;
if (!buffer_mapped(bh)) {
+ int err = 0;
+
fully_mapped = 0;
if (iblock < lblock) {
- if (get_block(inode, iblock, bh, 0))
+ err = get_block(inode, iblock, bh, 0);
+ if (err)
SetPageError(page);
}
if (!buffer_mapped(bh)) {
memset(kaddr + i * blocksize, 0, blocksize);
flush_dcache_page(page);
kunmap_atomic(kaddr, KM_USER0);
- set_buffer_uptodate(bh);
+ if (!err)
+ set_buffer_uptodate(bh);
continue;
}
/*
int err;
err = -EFBIG;
- limit = current->rlim[RLIMIT_FSIZE].rlim_cur;
+ limit = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
if (limit != RLIM_INFINITY && size > (loff_t)limit) {
send_sig(SIGXFSZ, current, 0);
goto out;
memset(kaddr+zerofrom, 0, PAGE_CACHE_SIZE-zerofrom);
flush_dcache_page(new_page);
kunmap_atomic(kaddr, KM_USER0);
- __block_commit_write(inode, new_page,
- zerofrom, PAGE_CACHE_SIZE);
+ generic_commit_write(NULL, new_page, zerofrom, PAGE_CACHE_SIZE);
unlock_page(new_page);
page_cache_release(new_page);
}
}
bh->b_state = map_bh.b_state;
atomic_set(&bh->b_count, 0);
- bh->b_this_page = 0;
+ bh->b_this_page = NULL;
bh->b_page = page;
bh->b_blocknr = map_bh.b_blocknr;
bh->b_size = blocksize;
}
EXPORT_SYMBOL(nobh_commit_write);
+/*
+ * nobh_writepage() - based on block_full_write_page() except
+ * that it tries to operate without attaching bufferheads to
+ * the page.
+ */
+int nobh_writepage(struct page *page, get_block_t *get_block,
+ struct writeback_control *wbc)
+{
+ struct inode * const inode = page->mapping->host;
+ loff_t i_size = i_size_read(inode);
+ const pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
+ unsigned offset;
+ void *kaddr;
+ int ret;
+
+ /* Is the page fully inside i_size? */
+ if (page->index < end_index)
+ goto out;
+
+ /* Is the page fully outside i_size? (truncate in progress) */
+ offset = i_size & (PAGE_CACHE_SIZE-1);
+ if (page->index >= end_index+1 || !offset) {
+ /*
+ * The page may have dirty, unmapped buffers. For example,
+ * they may have been added in ext3_writepage(). Make them
+ * freeable here, so the page does not leak.
+ */
+#if 0
+ /* Not really sure about this - do we need this ? */
+ if (page->mapping->a_ops->invalidatepage)
+ page->mapping->a_ops->invalidatepage(page, offset);
+#endif
+ unlock_page(page);
+ return 0; /* don't care */
+ }
+
+ /*
+ * The page straddles i_size. It must be zeroed out on each and every
+ * writepage invocation because it may be mmapped. "A file is mapped
+ * in multiples of the page size. For a file that is not a multiple of
+ * the page size, the remaining memory is zeroed when mapped, and
+ * writes to that region are not written out to the file."
+ */
+ kaddr = kmap_atomic(page, KM_USER0);
+ memset(kaddr + offset, 0, PAGE_CACHE_SIZE - offset);
+ flush_dcache_page(page);
+ kunmap_atomic(kaddr, KM_USER0);
+out:
+ ret = mpage_writepage(page, get_block, wbc);
+ if (ret == -EAGAIN)
+ ret = __block_write_full_page(inode, page, get_block, wbc);
+ return ret;
+}
+EXPORT_SYMBOL(nobh_writepage);
+
/*
* This function assumes that ->prepare_write() uses nobh_prepare_write().
*/
/*
* The page straddles i_size. It must be zeroed out on each and every
- * writepage invocation because it may be mmapped. "A file is mapped
+ * writepage invokation because it may be mmapped. "A file is mapped
* in multiples of the page size. For a file that is not a multiple of
* the page size, the remaining memory is zeroed when mapped, and
* writes to that region are not written out to the file."
if (bio->bi_size)
return 1;
+ if (err == -EOPNOTSUPP) {
+ set_bit(BIO_EOPNOTSUPP, &bio->bi_flags);
+ set_bit(BH_Eopnotsupp, &bh->b_state);
+ }
+
bh->b_end_io(bh, test_bit(BIO_UPTODATE, &bio->bi_flags));
bio_put(bio);
return 0;
}
-void submit_bh(int rw, struct buffer_head * bh)
+int submit_bh(int rw, struct buffer_head * bh)
{
struct bio *bio;
+ int ret = 0;
BUG_ON(!buffer_locked(bh));
BUG_ON(!buffer_mapped(bh));
BUG_ON(!bh->b_end_io);
- /* Only clear out a write error when rewriting */
- if (test_set_buffer_req(bh) && rw == WRITE)
+ if (buffer_ordered(bh) && (rw == WRITE))
+ rw = WRITE_BARRIER;
+
+ /*
+ * Only clear out a write error when rewriting, should this
+ * include WRITE_SYNC as well?
+ */
+ if (test_set_buffer_req(bh) && (rw == WRITE || rw == WRITE_BARRIER))
clear_buffer_write_io_error(bh);
/*
bio->bi_end_io = end_bio_bh_io_sync;
bio->bi_private = bh;
+ bio_get(bio);
submit_bio(rw, bio);
+
+ if (bio_flagged(bio, BIO_EOPNOTSUPP))
+ ret = -EOPNOTSUPP;
+
+ bio_put(bio);
+ return ret;
}
/**
get_bh(bh);
if (rw == WRITE) {
- bh->b_end_io = end_buffer_write_sync;
if (test_clear_buffer_dirty(bh)) {
+ bh->b_end_io = end_buffer_write_sync;
submit_bh(WRITE, bh);
continue;
}
} else {
- bh->b_end_io = end_buffer_read_sync;
if (!buffer_uptodate(bh)) {
+ bh->b_end_io = end_buffer_read_sync;
submit_bh(rw, bh);
continue;
}
/*
* For a data-integrity writeout, we need to wait upon any in-progress I/O
- * and then start new I/O and then wait upon it.
+ * and then start new I/O and then wait upon it. The caller must have a ref on
+ * the buffer_head.
*/
-void sync_dirty_buffer(struct buffer_head *bh)
+int sync_dirty_buffer(struct buffer_head *bh)
{
+ int ret = 0;
+
WARN_ON(atomic_read(&bh->b_count) < 1);
lock_buffer(bh);
if (test_clear_buffer_dirty(bh)) {
get_bh(bh);
bh->b_end_io = end_buffer_write_sync;
- submit_bh(WRITE, bh);
+ ret = submit_bh(WRITE, bh);
wait_on_buffer(bh);
+ if (buffer_eopnotsupp(bh)) {
+ clear_buffer_eopnotsupp(bh);
+ ret = -EOPNOTSUPP;
+ }
+ if (!ret && !buffer_uptodate(bh))
+ ret = -EIO;
} else {
unlock_buffer(bh);
}
+ return ret;
}
/*
{
struct buffer_head *head = page_buffers(page);
struct buffer_head *bh;
- int was_uptodate = 1;
bh = head;
do {
- if (buffer_write_io_error(bh))
+ if (buffer_write_io_error(bh) && page->mapping)
set_bit(AS_EIO, &page->mapping->flags);
if (buffer_busy(bh))
goto failed;
- if (!buffer_uptodate(bh) && !buffer_req(bh))
- was_uptodate = 0;
bh = bh->b_this_page;
} while (bh != head);
buffer_heads_over_limit = (tot > max_buffer_heads);
}
-struct buffer_head *alloc_buffer_head(int gfp_flags)
+struct buffer_head *alloc_buffer_head(unsigned int __nocast gfp_flags)
{
struct buffer_head *ret = kmem_cache_alloc(bh_cachep, gfp_flags);
if (ret) {
void __init buffer_init(void)
{
- int i;
int nrpages;
bh_cachep = kmem_cache_create("buffer_head",
sizeof(struct buffer_head), 0,
- SLAB_PANIC, init_buffer_head, NULL);
- for (i = 0; i < ARRAY_SIZE(bh_wait_queue_heads); i++)
- init_waitqueue_head(&bh_wait_queue_heads[i].wqh);
+ SLAB_RECLAIM_ACCOUNT|SLAB_PANIC, init_buffer_head, NULL);
/*
* Limit the bh occupancy to 10% of ZONE_NORMAL
EXPORT_SYMBOL(block_sync_page);
EXPORT_SYMBOL(block_truncate_page);
EXPORT_SYMBOL(block_write_full_page);
-EXPORT_SYMBOL(buffer_insert_list);
EXPORT_SYMBOL(cont_prepare_write);
EXPORT_SYMBOL(end_buffer_async_write);
EXPORT_SYMBOL(end_buffer_read_sync);
EXPORT_SYMBOL(end_buffer_write_sync);
EXPORT_SYMBOL(file_fsync);
EXPORT_SYMBOL(fsync_bdev);
-EXPORT_SYMBOL(fsync_buffers_list);
EXPORT_SYMBOL(generic_block_bmap);
EXPORT_SYMBOL(generic_commit_write);
EXPORT_SYMBOL(generic_cont_expand);