+static int bh_wake_function(wait_queue_t *wait, unsigned mode,
+ int sync, void *key)
+{
+ struct buffer_head *bh = key;
+ struct bh_wait_queue *wq;
+
+ wq = container_of(wait, struct bh_wait_queue, wait);
+ if (wq->bh != bh || buffer_locked(bh))
+ return 0;
+ else
+ return autoremove_wake_function(wait, mode, sync, key);
+}
+
+static void sync_buffer(struct buffer_head *bh)
+{
+ struct block_device *bd;
+
+ smp_mb();
+ bd = bh->b_bdev;
+ if (bd)
+ blk_run_address_space(bd->bd_inode->i_mapping);
+}
+
+void fastcall __lock_buffer(struct buffer_head *bh)
+{
+ wait_queue_head_t *wqh = bh_waitq_head(bh);
+ DEFINE_BH_WAIT_EXCLUSIVE(wait, bh);
+
+ do {
+ prepare_to_wait_exclusive(wqh, &wait.wait,
+ TASK_UNINTERRUPTIBLE);
+ if (buffer_locked(bh)) {
+ sync_buffer(bh);
+ io_schedule();
+ }
+ } while (test_set_buffer_locked(bh));
+ finish_wait(wqh, &wait.wait);
+}
+EXPORT_SYMBOL(__lock_buffer);
+