#define BH_ENTRY(list) list_entry((list), struct buffer_head, b_assoc_buffers)
+struct bh_wait_queue {
+ struct buffer_head *bh;
+ wait_queue_t wait;
+};
+
+#define __DEFINE_BH_WAIT(name, b, f) \
+ struct bh_wait_queue name = { \
+ .bh = b, \
+ .wait = { \
+ .task = current, \
+ .flags = f, \
+ .func = bh_wake_function, \
+ .task_list = \
+ LIST_HEAD_INIT(name.wait.task_list),\
+ }, \
+ }
+#define DEFINE_BH_WAIT(name, bh) __DEFINE_BH_WAIT(name, bh, 0)
+#define DEFINE_BH_WAIT_EXCLUSIVE(name, bh) \
+ __DEFINE_BH_WAIT(name, bh, WQ_FLAG_EXCLUSIVE)
+
/*
* Hashed waitqueue_head's for wait_on_buffer()
*/
smp_mb();
if (waitqueue_active(wq))
- wake_up_all(wq);
+ __wake_up(wq, TASK_INTERRUPTIBLE|TASK_UNINTERRUPTIBLE, 1, bh);
}
EXPORT_SYMBOL(wake_up_buffer);
+static int bh_wake_function(wait_queue_t *wait, unsigned mode,
+ int sync, void *key)
+{
+ struct buffer_head *bh = key;
+ struct bh_wait_queue *wq;
+
+ wq = container_of(wait, struct bh_wait_queue, wait);
+ if (wq->bh != bh || buffer_locked(bh))
+ return 0;
+ else
+ return autoremove_wake_function(wait, mode, sync, key);
+}
+
+static void sync_buffer(struct buffer_head *bh)
+{
+ struct block_device *bd;
+
+ smp_mb();
+ bd = bh->b_bdev;
+ if (bd)
+ blk_run_address_space(bd->bd_inode->i_mapping);
+}
+
+void fastcall __lock_buffer(struct buffer_head *bh)
+{
+ wait_queue_head_t *wqh = bh_waitq_head(bh);
+ DEFINE_BH_WAIT_EXCLUSIVE(wait, bh);
+
+ do {
+ prepare_to_wait_exclusive(wqh, &wait.wait,
+ TASK_UNINTERRUPTIBLE);
+ if (buffer_locked(bh)) {
+ sync_buffer(bh);
+ io_schedule();
+ }
+ } while (test_set_buffer_locked(bh));
+ finish_wait(wqh, &wait.wait);
+}
+EXPORT_SYMBOL(__lock_buffer);
+
void fastcall unlock_buffer(struct buffer_head *bh)
{
clear_buffer_locked(bh);
void __wait_on_buffer(struct buffer_head * bh)
{
wait_queue_head_t *wqh = bh_waitq_head(bh);
- DEFINE_WAIT(wait);
+ DEFINE_BH_WAIT(wait, bh);
do {
- prepare_to_wait(wqh, &wait, TASK_UNINTERRUPTIBLE);
+ prepare_to_wait(wqh, &wait.wait, TASK_UNINTERRUPTIBLE);
if (buffer_locked(bh)) {
- struct block_device *bd;
- smp_mb();
- bd = bh->b_bdev;
- if (bd)
- blk_run_address_space(bd->bd_inode->i_mapping);
+ sync_buffer(bh);
io_schedule();
}
} while (buffer_locked(bh));
- finish_wait(wqh, &wait);
+ finish_wait(wqh, &wait.wait);
}
static void
if (page->mapping) { /* Race with truncate? */
if (!mapping->backing_dev_info->memory_backed)
inc_page_state(nr_dirty);
- radix_tree_tag_set(&mapping->page_tree, page->index,
+ radix_tree_tag_set(&mapping->page_tree,
+ page_index(page),
PAGECACHE_TAG_DIRTY);
}
spin_unlock_irq(&mapping->tree_lock);
pgoff_t index;
int sizebits;
- /* Size must be multiple of hard sectorsize */
- if (size & (bdev_hardsect_size(bdev)-1))
- BUG();
- if (size < 512 || size > PAGE_SIZE)
- BUG();
-
sizebits = -1;
do {
sizebits++;
struct buffer_head *
__getblk_slow(struct block_device *bdev, sector_t block, int size)
{
+ /* Size must be multiple of hard sectorsize */
+ if (unlikely(size & (bdev_hardsect_size(bdev)-1) ||
+ (size < 512 || size > PAGE_SIZE))) {
+ printk(KERN_ERR "getblk(): invalid block size %d requested\n",
+ size);
+ printk(KERN_ERR "hardsect size: %d\n",
+ bdev_hardsect_size(bdev));
+
+ dump_stack();
+ return NULL;
+ }
+
for (;;) {
struct buffer_head * bh;
int block_sync_page(struct page *page)
{
struct address_space *mapping;
+
smp_mb();
- mapping = page->mapping;
- blk_run_address_space(mapping);
+ mapping = page_mapping(page);
+ if (mapping)
+ blk_run_backing_dev(mapping->backing_dev_info, page);
return 0;
}
bh_cachep = kmem_cache_create("buffer_head",
sizeof(struct buffer_head), 0,
- 0, init_buffer_head, NULL);
+ SLAB_PANIC, init_buffer_head, NULL);
for (i = 0; i < ARRAY_SIZE(bh_wait_queue_heads); i++)
init_waitqueue_head(&bh_wait_queue_heads[i].wqh);