4 * Copyright (C) 1991, 1992, 2002 Linus Torvalds
8 * Start bdflush() with kernel_thread not syscall - Paul Gortmaker, 12/95
10 * Removed a lot of unnecessary code and simplified things now that
11 * the buffer cache isn't our primary cache - Andrew Tridgell 12/96
13 * Speed up hash, lru, and free list operations. Use gfp() for allocating
14 * hash table, use SLAB cache for buffer heads. SMP threading. -DaveM
16 * Added 32k buffer block sizes - these are required older ARM systems. - RMK
18 * async buffer flushing, 1999 Andrea Arcangeli <andrea@suse.de>
21 #include <linux/config.h>
22 #include <linux/kernel.h>
25 #include <linux/percpu.h>
26 #include <linux/slab.h>
27 #include <linux/smp_lock.h>
28 #include <linux/blkdev.h>
29 #include <linux/file.h>
30 #include <linux/quotaops.h>
31 #include <linux/highmem.h>
32 #include <linux/module.h>
33 #include <linux/writeback.h>
34 #include <linux/hash.h>
35 #include <linux/suspend.h>
36 #include <linux/buffer_head.h>
37 #include <linux/bio.h>
38 #include <linux/notifier.h>
39 #include <linux/cpu.h>
40 #include <asm/bitops.h>
42 static void invalidate_bh_lrus(void);
44 #define BH_ENTRY(list) list_entry((list), struct buffer_head, b_assoc_buffers)
46 struct bh_wait_queue {
47 struct buffer_head *bh;
51 #define __DEFINE_BH_WAIT(name, b, f) \
52 struct bh_wait_queue name = { \
57 .func = bh_wake_function, \
59 LIST_HEAD_INIT(name.wait.task_list),\
62 #define DEFINE_BH_WAIT(name, bh) __DEFINE_BH_WAIT(name, bh, 0)
63 #define DEFINE_BH_WAIT_EXCLUSIVE(name, bh) \
64 __DEFINE_BH_WAIT(name, bh, WQ_FLAG_EXCLUSIVE)
67 * Hashed waitqueue_head's for wait_on_buffer()
69 #define BH_WAIT_TABLE_ORDER 7
70 static struct bh_wait_queue_head {
71 wait_queue_head_t wqh;
72 } ____cacheline_aligned_in_smp bh_wait_queue_heads[1<<BH_WAIT_TABLE_ORDER];
75 init_buffer(struct buffer_head *bh, bh_end_io_t *handler, void *private)
77 bh->b_end_io = handler;
78 bh->b_private = private;
82 * Return the address of the waitqueue_head to be used for this
85 wait_queue_head_t *bh_waitq_head(struct buffer_head *bh)
87 return &bh_wait_queue_heads[hash_ptr(bh, BH_WAIT_TABLE_ORDER)].wqh;
89 EXPORT_SYMBOL(bh_waitq_head);
91 void wake_up_buffer(struct buffer_head *bh)
93 wait_queue_head_t *wq = bh_waitq_head(bh);
96 if (waitqueue_active(wq))
97 __wake_up(wq, TASK_INTERRUPTIBLE|TASK_UNINTERRUPTIBLE, 1, bh);
99 EXPORT_SYMBOL(wake_up_buffer);
101 static int bh_wake_function(wait_queue_t *wait, unsigned mode,
104 struct buffer_head *bh = key;
105 struct bh_wait_queue *wq;
107 wq = container_of(wait, struct bh_wait_queue, wait);
108 if (wq->bh != bh || buffer_locked(bh))
111 return autoremove_wake_function(wait, mode, sync, key);
114 static void sync_buffer(struct buffer_head *bh)
116 struct block_device *bd;
121 blk_run_address_space(bd->bd_inode->i_mapping);
124 void fastcall __lock_buffer(struct buffer_head *bh)
126 wait_queue_head_t *wqh = bh_waitq_head(bh);
127 DEFINE_BH_WAIT_EXCLUSIVE(wait, bh);
130 prepare_to_wait_exclusive(wqh, &wait.wait,
131 TASK_UNINTERRUPTIBLE);
132 if (buffer_locked(bh)) {
136 } while (test_set_buffer_locked(bh));
137 finish_wait(wqh, &wait.wait);
139 EXPORT_SYMBOL(__lock_buffer);
141 void fastcall unlock_buffer(struct buffer_head *bh)
143 clear_buffer_locked(bh);
144 smp_mb__after_clear_bit();
149 * Block until a buffer comes unlocked. This doesn't stop it
150 * from becoming locked again - you have to lock it yourself
151 * if you want to preserve its state.
153 void __wait_on_buffer(struct buffer_head * bh)
155 wait_queue_head_t *wqh = bh_waitq_head(bh);
156 DEFINE_BH_WAIT(wait, bh);
159 prepare_to_wait(wqh, &wait.wait, TASK_UNINTERRUPTIBLE);
160 if (buffer_locked(bh)) {
164 } while (buffer_locked(bh));
165 finish_wait(wqh, &wait.wait);
169 __set_page_buffers(struct page *page, struct buffer_head *head)
171 page_cache_get(page);
172 SetPagePrivate(page);
173 page->private = (unsigned long)head;
177 __clear_page_buffers(struct page *page)
179 ClearPagePrivate(page);
181 page_cache_release(page);
184 static void buffer_io_error(struct buffer_head *bh)
186 char b[BDEVNAME_SIZE];
188 printk(KERN_ERR "Buffer I/O error on device %s, logical block %Lu\n",
189 bdevname(bh->b_bdev, b),
190 (unsigned long long)bh->b_blocknr);
194 * Default synchronous end-of-IO handler.. Just mark it up-to-date and
195 * unlock the buffer. This is what ll_rw_block uses too.
197 void end_buffer_read_sync(struct buffer_head *bh, int uptodate)
200 set_buffer_uptodate(bh);
202 /* This happens, due to failed READA attempts. */
203 clear_buffer_uptodate(bh);
209 void end_buffer_write_sync(struct buffer_head *bh, int uptodate)
211 char b[BDEVNAME_SIZE];
214 set_buffer_uptodate(bh);
216 if (printk_ratelimit()) {
218 printk(KERN_WARNING "lost page write due to "
220 bdevname(bh->b_bdev, b));
222 set_buffer_write_io_error(bh);
223 clear_buffer_uptodate(bh);
230 * Write out and wait upon all the dirty data associated with a block
231 * device via its mapping. Does not take the superblock lock.
233 int sync_blockdev(struct block_device *bdev)
240 ret = filemap_fdatawrite(bdev->bd_inode->i_mapping);
241 err = filemap_fdatawait(bdev->bd_inode->i_mapping);
247 EXPORT_SYMBOL(sync_blockdev);
250 * Write out and wait upon all dirty data associated with this
251 * superblock. Filesystem data as well as the underlying block
252 * device. Takes the superblock lock.
254 int fsync_super(struct super_block *sb)
256 sync_inodes_sb(sb, 0);
259 if (sb->s_dirt && sb->s_op->write_super)
260 sb->s_op->write_super(sb);
262 if (sb->s_op->sync_fs)
263 sb->s_op->sync_fs(sb, 1);
264 sync_blockdev(sb->s_bdev);
265 sync_inodes_sb(sb, 1);
267 return sync_blockdev(sb->s_bdev);
271 * Write out and wait upon all dirty data associated with this
272 * device. Filesystem data as well as the underlying block
273 * device. Takes the superblock lock.
275 int fsync_bdev(struct block_device *bdev)
277 struct super_block *sb = get_super(bdev);
279 int res = fsync_super(sb);
283 return sync_blockdev(bdev);
287 * freeze_bdev -- lock a filesystem and force it into a consistent state
288 * @bdev: blockdevice to lock
290 * This takes the block device bd_mount_sem to make sure no new mounts
291 * happen on bdev until thaw_bdev() is called.
292 * If a superblock is found on this device, we take the s_umount semaphore
293 * on it to make sure nobody unmounts until the snapshot creation is done.
295 struct super_block *freeze_bdev(struct block_device *bdev)
297 struct super_block *sb;
299 down(&bdev->bd_mount_sem);
300 sb = get_super(bdev);
301 if (sb && !(sb->s_flags & MS_RDONLY)) {
302 sb->s_frozen = SB_FREEZE_WRITE;
305 sync_inodes_sb(sb, 0);
309 if (sb->s_dirt && sb->s_op->write_super)
310 sb->s_op->write_super(sb);
313 if (sb->s_op->sync_fs)
314 sb->s_op->sync_fs(sb, 1);
316 sync_blockdev(sb->s_bdev);
317 sync_inodes_sb(sb, 1);
319 sb->s_frozen = SB_FREEZE_TRANS;
322 sync_blockdev(sb->s_bdev);
324 if (sb->s_op->write_super_lockfs)
325 sb->s_op->write_super_lockfs(sb);
329 return sb; /* thaw_bdev releases s->s_umount and bd_mount_sem */
331 EXPORT_SYMBOL(freeze_bdev);
334 * thaw_bdev -- unlock filesystem
335 * @bdev: blockdevice to unlock
336 * @sb: associated superblock
338 * Unlocks the filesystem and marks it writeable again after freeze_bdev().
340 void thaw_bdev(struct block_device *bdev, struct super_block *sb)
343 BUG_ON(sb->s_bdev != bdev);
345 if (sb->s_op->unlockfs)
346 sb->s_op->unlockfs(sb);
347 sb->s_frozen = SB_UNFROZEN;
349 wake_up(&sb->s_wait_unfrozen);
353 up(&bdev->bd_mount_sem);
355 EXPORT_SYMBOL(thaw_bdev);
358 * sync everything. Start out by waking pdflush, because that writes back
359 * all queues in parallel.
361 static void do_sync(unsigned long wait)
364 sync_inodes(0); /* All mappings, inodes and their blockdevs */
366 sync_supers(); /* Write the superblocks */
367 sync_filesystems(0); /* Start syncing the filesystems */
368 sync_filesystems(wait); /* Waitingly sync the filesystems */
369 sync_inodes(wait); /* Mappings, inodes and blockdevs, again. */
371 printk("Emergency Sync complete\n");
372 if (unlikely(laptop_mode))
373 laptop_sync_completion();
376 asmlinkage long sys_sync(void)
382 void emergency_sync(void)
384 pdflush_operation(do_sync, 0);
388 * Generic function to fsync a file.
390 * filp may be NULL if called via the msync of a vma.
393 int file_fsync(struct file *filp, struct dentry *dentry, int datasync)
395 struct inode * inode = dentry->d_inode;
396 struct super_block * sb;
399 /* sync the inode to buffers */
400 write_inode_now(inode, 0);
402 /* sync the superblock to buffers */
405 if (sb->s_op->write_super)
406 sb->s_op->write_super(sb);
409 /* .. finally sync the buffers to disk */
410 ret = sync_blockdev(sb->s_bdev);
414 asmlinkage long sys_fsync(unsigned int fd)
417 struct address_space *mapping;
425 mapping = file->f_mapping;
428 if (!file->f_op || !file->f_op->fsync) {
429 /* Why? We can still call filemap_fdatawrite */
433 /* We need to protect against concurrent writers.. */
434 down(&mapping->host->i_sem);
435 current->flags |= PF_SYNCWRITE;
436 ret = filemap_fdatawrite(mapping);
437 err = file->f_op->fsync(file, file->f_dentry, 0);
440 err = filemap_fdatawait(mapping);
443 current->flags &= ~PF_SYNCWRITE;
444 up(&mapping->host->i_sem);
452 asmlinkage long sys_fdatasync(unsigned int fd)
455 struct address_space *mapping;
464 if (!file->f_op || !file->f_op->fsync)
467 mapping = file->f_mapping;
469 down(&mapping->host->i_sem);
470 current->flags |= PF_SYNCWRITE;
471 ret = filemap_fdatawrite(mapping);
472 err = file->f_op->fsync(file, file->f_dentry, 1);
475 err = filemap_fdatawait(mapping);
478 current->flags &= ~PF_SYNCWRITE;
479 up(&mapping->host->i_sem);
488 * Various filesystems appear to want __find_get_block to be non-blocking.
489 * But it's the page lock which protects the buffers. To get around this,
490 * we get exclusion from try_to_free_buffers with the blockdev mapping's
493 * Hack idea: for the blockdev mapping, i_bufferlist_lock contention
494 * may be quite high. This code could TryLock the page, and if that
495 * succeeds, there is no need to take private_lock. (But if
496 * private_lock is contended then so is mapping->tree_lock).
498 static struct buffer_head *
499 __find_get_block_slow(struct block_device *bdev, sector_t block, int unused)
501 struct inode *bd_inode = bdev->bd_inode;
502 struct address_space *bd_mapping = bd_inode->i_mapping;
503 struct buffer_head *ret = NULL;
505 struct buffer_head *bh;
506 struct buffer_head *head;
509 index = block >> (PAGE_CACHE_SHIFT - bd_inode->i_blkbits);
510 page = find_get_page(bd_mapping, index);
514 spin_lock(&bd_mapping->private_lock);
515 if (!page_has_buffers(page))
517 head = page_buffers(page);
520 if (bh->b_blocknr == block) {
525 bh = bh->b_this_page;
526 } while (bh != head);
528 printk("__find_get_block_slow() failed. "
529 "block=%llu, b_blocknr=%llu\n",
530 (unsigned long long)block, (unsigned long long)bh->b_blocknr);
531 printk("b_state=0x%08lx, b_size=%u\n", bh->b_state, bh->b_size);
532 printk("device blocksize: %d\n", 1 << bd_inode->i_blkbits);
534 spin_unlock(&bd_mapping->private_lock);
535 page_cache_release(page);
540 /* If invalidate_buffers() will trash dirty buffers, it means some kind
541 of fs corruption is going on. Trashing dirty data always imply losing
542 information that was supposed to be just stored on the physical layer
545 Thus invalidate_buffers in general usage is not allwowed to trash
546 dirty buffers. For example ioctl(FLSBLKBUF) expects dirty data to
547 be preserved. These buffers are simply skipped.
549 We also skip buffers which are still in use. For example this can
550 happen if a userspace program is reading the block device.
552 NOTE: In the case where the user removed a removable-media-disk even if
553 there's still dirty data not synced on disk (due a bug in the device driver
554 or due an error of the user), by not destroying the dirty buffers we could
555 generate corruption also on the next media inserted, thus a parameter is
556 necessary to handle this case in the most safe way possible (trying
557 to not corrupt also the new disk inserted with the data belonging to
558 the old now corrupted disk). Also for the ramdisk the natural thing
559 to do in order to release the ramdisk memory is to destroy dirty buffers.
561 These are two special cases. Normal usage imply the device driver
562 to issue a sync on the device (without waiting I/O completion) and
563 then an invalidate_buffers call that doesn't trash dirty buffers.
565 For handling cache coherency with the blkdev pagecache the 'update' case
566 is been introduced. It is needed to re-read from disk any pinned
567 buffer. NOTE: re-reading from disk is destructive so we can do it only
568 when we assume nobody is changing the buffercache under our I/O and when
569 we think the disk contains more recent information than the buffercache.
570 The update == 1 pass marks the buffers we need to update, the update == 2
571 pass does the actual I/O. */
572 void invalidate_bdev(struct block_device *bdev, int destroy_dirty_buffers)
574 invalidate_bh_lrus();
576 * FIXME: what about destroy_dirty_buffers?
577 * We really want to use invalidate_inode_pages2() for
578 * that, but not until that's cleaned up.
580 invalidate_inode_pages(bdev->bd_inode->i_mapping);
584 * Kick pdflush then try to free up some ZONE_NORMAL memory.
586 static void free_more_memory(void)
591 wakeup_bdflush(1024);
594 for_each_pgdat(pgdat) {
595 zones = pgdat->node_zonelists[GFP_NOFS&GFP_ZONEMASK].zones;
597 try_to_free_pages(zones, GFP_NOFS, 0);
602 * I/O completion handler for block_read_full_page() - pages
603 * which come unlocked at the end of I/O.
605 static void end_buffer_async_read(struct buffer_head *bh, int uptodate)
607 static spinlock_t page_uptodate_lock = SPIN_LOCK_UNLOCKED;
609 struct buffer_head *tmp;
611 int page_uptodate = 1;
613 BUG_ON(!buffer_async_read(bh));
617 set_buffer_uptodate(bh);
619 clear_buffer_uptodate(bh);
625 * Be _very_ careful from here on. Bad things can happen if
626 * two buffer heads end IO at almost the same time and both
627 * decide that the page is now completely done.
629 spin_lock_irqsave(&page_uptodate_lock, flags);
630 clear_buffer_async_read(bh);
634 if (!buffer_uptodate(tmp))
636 if (buffer_async_read(tmp)) {
637 BUG_ON(!buffer_locked(tmp));
640 tmp = tmp->b_this_page;
642 spin_unlock_irqrestore(&page_uptodate_lock, flags);
645 * If none of the buffers had errors and they are all
646 * uptodate then we can set the page uptodate.
648 if (page_uptodate && !PageError(page))
649 SetPageUptodate(page);
654 spin_unlock_irqrestore(&page_uptodate_lock, flags);
659 * Completion handler for block_write_full_page() - pages which are unlocked
660 * during I/O, and which have PageWriteback cleared upon I/O completion.
662 void end_buffer_async_write(struct buffer_head *bh, int uptodate)
664 char b[BDEVNAME_SIZE];
665 static spinlock_t page_uptodate_lock = SPIN_LOCK_UNLOCKED;
667 struct buffer_head *tmp;
670 BUG_ON(!buffer_async_write(bh));
674 set_buffer_uptodate(bh);
676 if (printk_ratelimit()) {
678 printk(KERN_WARNING "lost page write due to "
680 bdevname(bh->b_bdev, b));
682 set_bit(AS_EIO, &page->mapping->flags);
683 clear_buffer_uptodate(bh);
687 spin_lock_irqsave(&page_uptodate_lock, flags);
688 clear_buffer_async_write(bh);
690 tmp = bh->b_this_page;
692 if (buffer_async_write(tmp)) {
693 BUG_ON(!buffer_locked(tmp));
696 tmp = tmp->b_this_page;
698 spin_unlock_irqrestore(&page_uptodate_lock, flags);
699 end_page_writeback(page);
703 spin_unlock_irqrestore(&page_uptodate_lock, flags);
708 * If a page's buffers are under async readin (end_buffer_async_read
709 * completion) then there is a possibility that another thread of
710 * control could lock one of the buffers after it has completed
711 * but while some of the other buffers have not completed. This
712 * locked buffer would confuse end_buffer_async_read() into not unlocking
713 * the page. So the absence of BH_Async_Read tells end_buffer_async_read()
714 * that this buffer is not under async I/O.
716 * The page comes unlocked when it has no locked buffer_async buffers
719 * PageLocked prevents anyone starting new async I/O reads any of
722 * PageWriteback is used to prevent simultaneous writeout of the same
725 * PageLocked prevents anyone from starting writeback of a page which is
726 * under read I/O (PageWriteback is only ever set against a locked page).
728 void mark_buffer_async_read(struct buffer_head *bh)
730 bh->b_end_io = end_buffer_async_read;
731 set_buffer_async_read(bh);
733 EXPORT_SYMBOL(mark_buffer_async_read);
735 void mark_buffer_async_write(struct buffer_head *bh)
737 bh->b_end_io = end_buffer_async_write;
738 set_buffer_async_write(bh);
740 EXPORT_SYMBOL(mark_buffer_async_write);
744 * fs/buffer.c contains helper functions for buffer-backed address space's
745 * fsync functions. A common requirement for buffer-based filesystems is
746 * that certain data from the backing blockdev needs to be written out for
747 * a successful fsync(). For example, ext2 indirect blocks need to be
748 * written back and waited upon before fsync() returns.
750 * The functions mark_buffer_inode_dirty(), fsync_inode_buffers(),
751 * inode_has_buffers() and invalidate_inode_buffers() are provided for the
752 * management of a list of dependent buffers at ->i_mapping->private_list.
754 * Locking is a little subtle: try_to_free_buffers() will remove buffers
755 * from their controlling inode's queue when they are being freed. But
756 * try_to_free_buffers() will be operating against the *blockdev* mapping
757 * at the time, not against the S_ISREG file which depends on those buffers.
758 * So the locking for private_list is via the private_lock in the address_space
759 * which backs the buffers. Which is different from the address_space
760 * against which the buffers are listed. So for a particular address_space,
761 * mapping->private_lock does *not* protect mapping->private_list! In fact,
762 * mapping->private_list will always be protected by the backing blockdev's
765 * Which introduces a requirement: all buffers on an address_space's
766 * ->private_list must be from the same address_space: the blockdev's.
768 * address_spaces which do not place buffers at ->private_list via these
769 * utility functions are free to use private_lock and private_list for
770 * whatever they want. The only requirement is that list_empty(private_list)
771 * be true at clear_inode() time.
773 * FIXME: clear_inode should not call invalidate_inode_buffers(). The
774 * filesystems should do that. invalidate_inode_buffers() should just go
775 * BUG_ON(!list_empty).
777 * FIXME: mark_buffer_dirty_inode() is a data-plane operation. It should
778 * take an address_space, not an inode. And it should be called
779 * mark_buffer_dirty_fsync() to clearly define why those buffers are being
782 * FIXME: mark_buffer_dirty_inode() doesn't need to add the buffer to the
783 * list if it is already on a list. Because if the buffer is on a list,
784 * it *must* already be on the right one. If not, the filesystem is being
785 * silly. This will save a ton of locking. But first we have to ensure
786 * that buffers are taken *off* the old inode's list when they are freed
787 * (presumably in truncate). That requires careful auditing of all
788 * filesystems (do it inside bforget()). It could also be done by bringing
792 void buffer_insert_list(spinlock_t *lock,
793 struct buffer_head *bh, struct list_head *list)
796 list_move_tail(&bh->b_assoc_buffers, list);
801 * The buffer's backing address_space's private_lock must be held
803 static inline void __remove_assoc_queue(struct buffer_head *bh)
805 list_del_init(&bh->b_assoc_buffers);
808 int inode_has_buffers(struct inode *inode)
810 return !list_empty(&inode->i_data.private_list);
814 * osync is designed to support O_SYNC io. It waits synchronously for
815 * all already-submitted IO to complete, but does not queue any new
816 * writes to the disk.
818 * To do O_SYNC writes, just queue the buffer writes with ll_rw_block as
819 * you dirty the buffers, and then use osync_inode_buffers to wait for
820 * completion. Any other dirty buffers which are not yet queued for
821 * write will not be flushed to disk by the osync.
823 static int osync_buffers_list(spinlock_t *lock, struct list_head *list)
825 struct buffer_head *bh;
831 list_for_each_prev(p, list) {
833 if (buffer_locked(bh)) {
837 if (!buffer_uptodate(bh))
849 * sync_mapping_buffers - write out and wait upon a mapping's "associated"
851 * @buffer_mapping - the mapping which backs the buffers' data
852 * @mapping - the mapping which wants those buffers written
854 * Starts I/O against the buffers at mapping->private_list, and waits upon
857 * Basically, this is a convenience function for fsync(). @buffer_mapping is
858 * the blockdev which "owns" the buffers and @mapping is a file or directory
859 * which needs those buffers to be written for a successful fsync().
861 int sync_mapping_buffers(struct address_space *mapping)
863 struct address_space *buffer_mapping = mapping->assoc_mapping;
865 if (buffer_mapping == NULL || list_empty(&mapping->private_list))
868 return fsync_buffers_list(&buffer_mapping->private_lock,
869 &mapping->private_list);
871 EXPORT_SYMBOL(sync_mapping_buffers);
874 * Called when we've recently written block `bblock', and it is known that
875 * `bblock' was for a buffer_boundary() buffer. This means that the block at
876 * `bblock + 1' is probably a dirty indirect block. Hunt it down and, if it's
877 * dirty, schedule it for IO. So that indirects merge nicely with their data.
879 void write_boundary_block(struct block_device *bdev,
880 sector_t bblock, unsigned blocksize)
882 struct buffer_head *bh = __find_get_block(bdev, bblock + 1, blocksize);
884 if (buffer_dirty(bh))
885 ll_rw_block(WRITE, 1, &bh);
890 void mark_buffer_dirty_inode(struct buffer_head *bh, struct inode *inode)
892 struct address_space *mapping = inode->i_mapping;
893 struct address_space *buffer_mapping = bh->b_page->mapping;
895 mark_buffer_dirty(bh);
896 if (!mapping->assoc_mapping) {
897 mapping->assoc_mapping = buffer_mapping;
899 if (mapping->assoc_mapping != buffer_mapping)
902 if (list_empty(&bh->b_assoc_buffers))
903 buffer_insert_list(&buffer_mapping->private_lock,
904 bh, &mapping->private_list);
906 EXPORT_SYMBOL(mark_buffer_dirty_inode);
909 * Add a page to the dirty page list.
911 * It is a sad fact of life that this function is called from several places
912 * deeply under spinlocking. It may not sleep.
914 * If the page has buffers, the uptodate buffers are set dirty, to preserve
915 * dirty-state coherency between the page and the buffers. It the page does
916 * not have buffers then when they are later attached they will all be set
919 * The buffers are dirtied before the page is dirtied. There's a small race
920 * window in which a writepage caller may see the page cleanness but not the
921 * buffer dirtiness. That's fine. If this code were to set the page dirty
922 * before the buffers, a concurrent writepage caller could clear the page dirty
923 * bit, see a bunch of clean buffers and we'd end up with dirty buffers/clean
924 * page on the dirty page list.
926 * We use private_lock to lock against try_to_free_buffers while using the
927 * page's buffer list. Also use this to protect against clean buffers being
928 * added to the page after it was set dirty.
930 * FIXME: may need to call ->reservepage here as well. That's rather up to the
931 * address_space though.
933 int __set_page_dirty_buffers(struct page *page)
935 struct address_space * const mapping = page->mapping;
937 spin_lock(&mapping->private_lock);
938 if (page_has_buffers(page)) {
939 struct buffer_head *head = page_buffers(page);
940 struct buffer_head *bh = head;
943 set_buffer_dirty(bh);
944 bh = bh->b_this_page;
945 } while (bh != head);
947 spin_unlock(&mapping->private_lock);
949 if (!TestSetPageDirty(page)) {
950 spin_lock_irq(&mapping->tree_lock);
951 if (page->mapping) { /* Race with truncate? */
952 if (!mapping->backing_dev_info->memory_backed)
953 inc_page_state(nr_dirty);
954 radix_tree_tag_set(&mapping->page_tree,
956 PAGECACHE_TAG_DIRTY);
958 spin_unlock_irq(&mapping->tree_lock);
959 __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
964 EXPORT_SYMBOL(__set_page_dirty_buffers);
967 * Write out and wait upon a list of buffers.
969 * We have conflicting pressures: we want to make sure that all
970 * initially dirty buffers get waited on, but that any subsequently
971 * dirtied buffers don't. After all, we don't want fsync to last
972 * forever if somebody is actively writing to the file.
974 * Do this in two main stages: first we copy dirty buffers to a
975 * temporary inode list, queueing the writes as we go. Then we clean
976 * up, waiting for those writes to complete.
978 * During this second stage, any subsequent updates to the file may end
979 * up refiling the buffer on the original inode's dirty list again, so
980 * there is a chance we will end up with a buffer queued for write but
981 * not yet completed on that list. So, as a final cleanup we go through
982 * the osync code to catch these locked, dirty buffers without requeuing
983 * any newly dirty buffers for write.
985 int fsync_buffers_list(spinlock_t *lock, struct list_head *list)
987 struct buffer_head *bh;
988 struct list_head tmp;
991 INIT_LIST_HEAD(&tmp);
994 while (!list_empty(list)) {
995 bh = BH_ENTRY(list->next);
996 list_del_init(&bh->b_assoc_buffers);
997 if (buffer_dirty(bh) || buffer_locked(bh)) {
998 list_add(&bh->b_assoc_buffers, &tmp);
999 if (buffer_dirty(bh)) {
1003 * Ensure any pending I/O completes so that
1004 * ll_rw_block() actually writes the current
1005 * contents - it is a noop if I/O is still in
1006 * flight on potentially older contents.
1009 ll_rw_block(WRITE, 1, &bh);
1016 while (!list_empty(&tmp)) {
1017 bh = BH_ENTRY(tmp.prev);
1018 __remove_assoc_queue(bh);
1022 if (!buffer_uptodate(bh))
1029 err2 = osync_buffers_list(lock, list);
1037 * Invalidate any and all dirty buffers on a given inode. We are
1038 * probably unmounting the fs, but that doesn't mean we have already
1039 * done a sync(). Just drop the buffers from the inode list.
1041 * NOTE: we take the inode's blockdev's mapping's private_lock. Which
1042 * assumes that all the buffers are against the blockdev. Not true
1045 void invalidate_inode_buffers(struct inode *inode)
1047 if (inode_has_buffers(inode)) {
1048 struct address_space *mapping = &inode->i_data;
1049 struct list_head *list = &mapping->private_list;
1050 struct address_space *buffer_mapping = mapping->assoc_mapping;
1052 spin_lock(&buffer_mapping->private_lock);
1053 while (!list_empty(list))
1054 __remove_assoc_queue(BH_ENTRY(list->next));
1055 spin_unlock(&buffer_mapping->private_lock);
1060 * Remove any clean buffers from the inode's buffer list. This is called
1061 * when we're trying to free the inode itself. Those buffers can pin it.
1063 * Returns true if all buffers were removed.
1065 int remove_inode_buffers(struct inode *inode)
1069 if (inode_has_buffers(inode)) {
1070 struct address_space *mapping = &inode->i_data;
1071 struct list_head *list = &mapping->private_list;
1072 struct address_space *buffer_mapping = mapping->assoc_mapping;
1074 spin_lock(&buffer_mapping->private_lock);
1075 while (!list_empty(list)) {
1076 struct buffer_head *bh = BH_ENTRY(list->next);
1077 if (buffer_dirty(bh)) {
1081 __remove_assoc_queue(bh);
1083 spin_unlock(&buffer_mapping->private_lock);
1089 * Create the appropriate buffers when given a page for data area and
1090 * the size of each buffer.. Use the bh->b_this_page linked list to
1091 * follow the buffers created. Return NULL if unable to create more
1094 * The retry flag is used to differentiate async IO (paging, swapping)
1095 * which may not fail from ordinary buffer allocations.
1097 static struct buffer_head *
1098 create_buffers(struct page * page, unsigned long size, int retry)
1100 struct buffer_head *bh, *head;
1106 while ((offset -= size) >= 0) {
1107 bh = alloc_buffer_head(GFP_NOFS);
1112 bh->b_this_page = head;
1117 atomic_set(&bh->b_count, 0);
1120 /* Link the buffer to its page */
1121 set_bh_page(bh, page, offset);
1123 bh->b_end_io = NULL;
1127 * In case anything failed, we just free everything we got.
1133 head = head->b_this_page;
1134 free_buffer_head(bh);
1139 * Return failure for non-async IO requests. Async IO requests
1140 * are not allowed to fail, so we have to wait until buffer heads
1141 * become available. But we don't want tasks sleeping with
1142 * partially complete buffers, so all were released above.
1147 /* We're _really_ low on memory. Now we just
1148 * wait for old buffer heads to become free due to
1149 * finishing IO. Since this is an async request and
1150 * the reserve list is empty, we're sure there are
1151 * async buffer heads in use.
1158 link_dev_buffers(struct page *page, struct buffer_head *head)
1160 struct buffer_head *bh, *tail;
1165 bh = bh->b_this_page;
1167 tail->b_this_page = head;
1168 __set_page_buffers(page, head);
1172 * Initialise the state of a blockdev page's buffers.
1175 init_page_buffers(struct page *page, struct block_device *bdev,
1176 sector_t block, int size)
1178 struct buffer_head *head = page_buffers(page);
1179 struct buffer_head *bh = head;
1180 unsigned int b_state;
1182 b_state = 1 << BH_Mapped;
1183 if (PageUptodate(page))
1184 b_state |= 1 << BH_Uptodate;
1187 if (!(bh->b_state & (1 << BH_Mapped))) {
1188 init_buffer(bh, NULL, NULL);
1190 bh->b_blocknr = block;
1191 bh->b_state = b_state;
1194 bh = bh->b_this_page;
1195 } while (bh != head);
1199 * Create the page-cache page that contains the requested block.
1201 * This is user purely for blockdev mappings.
1203 static struct page *
1204 grow_dev_page(struct block_device *bdev, sector_t block,
1205 pgoff_t index, int size)
1207 struct inode *inode = bdev->bd_inode;
1209 struct buffer_head *bh;
1211 page = find_or_create_page(inode->i_mapping, index, GFP_NOFS);
1215 if (!PageLocked(page))
1218 if (page_has_buffers(page)) {
1219 bh = page_buffers(page);
1220 if (bh->b_size == size)
1222 if (!try_to_free_buffers(page))
1227 * Allocate some buffers for this page
1229 bh = create_buffers(page, size, 0);
1234 * Link the page to the buffers and initialise them. Take the
1235 * lock to be atomic wrt __find_get_block(), which does not
1236 * run under the page lock.
1238 spin_lock(&inode->i_mapping->private_lock);
1239 link_dev_buffers(page, bh);
1240 init_page_buffers(page, bdev, block, size);
1241 spin_unlock(&inode->i_mapping->private_lock);
1247 page_cache_release(page);
1252 * Create buffers for the specified block device block's page. If
1253 * that page was dirty, the buffers are set dirty also.
1255 * Except that's a bug. Attaching dirty buffers to a dirty
1256 * blockdev's page can result in filesystem corruption, because
1257 * some of those buffers may be aliases of filesystem data.
1258 * grow_dev_page() will go BUG() if this happens.
1261 grow_buffers(struct block_device *bdev, sector_t block, int size)
1270 } while ((size << sizebits) < PAGE_SIZE);
1272 index = block >> sizebits;
1273 block = index << sizebits;
1275 /* Create a page with the proper size buffers.. */
1276 page = grow_dev_page(bdev, block, index, size);
1280 page_cache_release(page);
1284 struct buffer_head *
1285 __getblk_slow(struct block_device *bdev, sector_t block, int size)
1287 /* Size must be multiple of hard sectorsize */
1288 if (unlikely(size & (bdev_hardsect_size(bdev)-1) ||
1289 (size < 512 || size > PAGE_SIZE))) {
1290 printk(KERN_ERR "getblk(): invalid block size %d requested\n",
1292 printk(KERN_ERR "hardsect size: %d\n",
1293 bdev_hardsect_size(bdev));
1300 struct buffer_head * bh;
1302 bh = __find_get_block(bdev, block, size);
1306 if (!grow_buffers(bdev, block, size))
1312 * The relationship between dirty buffers and dirty pages:
1314 * Whenever a page has any dirty buffers, the page's dirty bit is set, and
1315 * the page is tagged dirty in its radix tree.
1317 * At all times, the dirtiness of the buffers represents the dirtiness of
1318 * subsections of the page. If the page has buffers, the page dirty bit is
1319 * merely a hint about the true dirty state.
1321 * When a page is set dirty in its entirety, all its buffers are marked dirty
1322 * (if the page has buffers).
1324 * When a buffer is marked dirty, its page is dirtied, but the page's other
1327 * Also. When blockdev buffers are explicitly read with bread(), they
1328 * individually become uptodate. But their backing page remains not
1329 * uptodate - even if all of its buffers are uptodate. A subsequent
1330 * block_read_full_page() against that page will discover all the uptodate
1331 * buffers, will set the page uptodate and will perform no I/O.
1335 * mark_buffer_dirty - mark a buffer_head as needing writeout
1337 * mark_buffer_dirty() will set the dirty bit against the buffer, then set its
1338 * backing page dirty, then tag the page as dirty in its address_space's radix
1339 * tree and then attach the address_space's inode to its superblock's dirty
1342 * mark_buffer_dirty() is atomic. It takes bh->b_page->mapping->private_lock,
1343 * mapping->tree_lock and the global inode_lock.
1345 void fastcall mark_buffer_dirty(struct buffer_head *bh)
1347 if (!buffer_dirty(bh) && !test_set_buffer_dirty(bh))
1348 __set_page_dirty_nobuffers(bh->b_page);
1352 * Decrement a buffer_head's reference count. If all buffers against a page
1353 * have zero reference count, are clean and unlocked, and if the page is clean
1354 * and unlocked then try_to_free_buffers() may strip the buffers from the page
1355 * in preparation for freeing it (sometimes, rarely, buffers are removed from
1356 * a page but it ends up not being freed, and buffers may later be reattached).
1358 void __brelse(struct buffer_head * buf)
1360 if (atomic_read(&buf->b_count)) {
1364 printk(KERN_ERR "VFS: brelse: Trying to free free buffer\n");
1369 * bforget() is like brelse(), except it discards any
1370 * potentially dirty data.
1372 void __bforget(struct buffer_head *bh)
1374 clear_buffer_dirty(bh);
1375 if (!list_empty(&bh->b_assoc_buffers)) {
1376 struct address_space *buffer_mapping = bh->b_page->mapping;
1378 spin_lock(&buffer_mapping->private_lock);
1379 list_del_init(&bh->b_assoc_buffers);
1380 spin_unlock(&buffer_mapping->private_lock);
1385 static struct buffer_head *__bread_slow(struct buffer_head *bh)
1388 if (buffer_uptodate(bh)) {
1393 bh->b_end_io = end_buffer_read_sync;
1394 submit_bh(READ, bh);
1396 if (buffer_uptodate(bh))
1404 * Per-cpu buffer LRU implementation. To reduce the cost of __find_get_block().
1405 * The bhs[] array is sorted - newest buffer is at bhs[0]. Buffers have their
1406 * refcount elevated by one when they're in an LRU. A buffer can only appear
1407 * once in a particular CPU's LRU. A single buffer can be present in multiple
1408 * CPU's LRUs at the same time.
1410 * This is a transparent caching front-end to sb_bread(), sb_getblk() and
1411 * sb_find_get_block().
1413 * The LRUs themselves only need locking against invalidate_bh_lrus. We use
1414 * a local interrupt disable for that.
1417 #define BH_LRU_SIZE 8
1420 struct buffer_head *bhs[BH_LRU_SIZE];
1423 static DEFINE_PER_CPU(struct bh_lru, bh_lrus) = {{ NULL }};
1426 #define bh_lru_lock() local_irq_disable()
1427 #define bh_lru_unlock() local_irq_enable()
1429 #define bh_lru_lock() preempt_disable()
1430 #define bh_lru_unlock() preempt_enable()
1433 static inline void check_irqs_on(void)
1435 #ifdef irqs_disabled
1436 BUG_ON(irqs_disabled());
1441 * The LRU management algorithm is dopey-but-simple. Sorry.
1443 static void bh_lru_install(struct buffer_head *bh)
1445 struct buffer_head *evictee = NULL;
1450 lru = &__get_cpu_var(bh_lrus);
1451 if (lru->bhs[0] != bh) {
1452 struct buffer_head *bhs[BH_LRU_SIZE];
1458 for (in = 0; in < BH_LRU_SIZE; in++) {
1459 struct buffer_head *bh2 = lru->bhs[in];
1464 if (out >= BH_LRU_SIZE) {
1465 BUG_ON(evictee != NULL);
1472 while (out < BH_LRU_SIZE)
1474 memcpy(lru->bhs, bhs, sizeof(bhs));
1483 * Look up the bh in this cpu's LRU. If it's there, move it to the head.
1485 static inline struct buffer_head *
1486 lookup_bh_lru(struct block_device *bdev, sector_t block, int size)
1488 struct buffer_head *ret = NULL;
1494 lru = &__get_cpu_var(bh_lrus);
1495 for (i = 0; i < BH_LRU_SIZE; i++) {
1496 struct buffer_head *bh = lru->bhs[i];
1498 if (bh && bh->b_bdev == bdev &&
1499 bh->b_blocknr == block && bh->b_size == size) {
1502 lru->bhs[i] = lru->bhs[i - 1];
1517 * Perform a pagecache lookup for the matching buffer. If it's there, refresh
1518 * it in the LRU and mark it as accessed. If it is not present then return
1521 struct buffer_head *
1522 __find_get_block(struct block_device *bdev, sector_t block, int size)
1524 struct buffer_head *bh = lookup_bh_lru(bdev, block, size);
1527 bh = __find_get_block_slow(bdev, block, size);
1535 EXPORT_SYMBOL(__find_get_block);
1538 * __getblk will locate (and, if necessary, create) the buffer_head
1539 * which corresponds to the passed block_device, block and size. The
1540 * returned buffer has its reference count incremented.
1542 * __getblk() cannot fail - it just keeps trying. If you pass it an
1543 * illegal block number, __getblk() will happily return a buffer_head
1544 * which represents the non-existent block. Very weird.
1546 * __getblk() will lock up the machine if grow_dev_page's try_to_free_buffers()
1547 * attempt is failing. FIXME, perhaps?
1549 struct buffer_head *
1550 __getblk(struct block_device *bdev, sector_t block, int size)
1552 struct buffer_head *bh = __find_get_block(bdev, block, size);
1556 bh = __getblk_slow(bdev, block, size);
1559 EXPORT_SYMBOL(__getblk);
1562 * Do async read-ahead on a buffer..
1564 void __breadahead(struct block_device *bdev, sector_t block, int size)
1566 struct buffer_head *bh = __getblk(bdev, block, size);
1567 ll_rw_block(READA, 1, &bh);
1570 EXPORT_SYMBOL(__breadahead);
1573 * __bread() - reads a specified block and returns the bh
1574 * @block: number of block
1575 * @size: size (in bytes) to read
1577 * Reads a specified block, and returns buffer head that contains it.
1578 * It returns NULL if the block was unreadable.
1580 struct buffer_head *
1581 __bread(struct block_device *bdev, sector_t block, int size)
1583 struct buffer_head *bh = __getblk(bdev, block, size);
1585 if (!buffer_uptodate(bh))
1586 bh = __bread_slow(bh);
1589 EXPORT_SYMBOL(__bread);
1592 * invalidate_bh_lrus() is called rarely - but not only at unmount.
1593 * This doesn't race because it runs in each cpu either in irq
1594 * or with preempt disabled.
1596 static void invalidate_bh_lru(void *arg)
1598 struct bh_lru *b = &get_cpu_var(bh_lrus);
1601 for (i = 0; i < BH_LRU_SIZE; i++) {
1605 put_cpu_var(bh_lrus);
1608 static void invalidate_bh_lrus(void)
1610 on_each_cpu(invalidate_bh_lru, NULL, 1, 1);
1613 void set_bh_page(struct buffer_head *bh,
1614 struct page *page, unsigned long offset)
1617 if (offset >= PAGE_SIZE)
1619 if (PageHighMem(page))
1621 * This catches illegal uses and preserves the offset:
1623 bh->b_data = (char *)(0 + offset);
1625 bh->b_data = page_address(page) + offset;
1627 EXPORT_SYMBOL(set_bh_page);
1630 * Called when truncating a buffer on a page completely.
1632 static inline void discard_buffer(struct buffer_head * bh)
1635 clear_buffer_dirty(bh);
1637 clear_buffer_mapped(bh);
1638 clear_buffer_req(bh);
1639 clear_buffer_new(bh);
1640 clear_buffer_delay(bh);
1645 * try_to_release_page() - release old fs-specific metadata on a page
1647 * @page: the page which the kernel is trying to free
1648 * @gfp_mask: memory allocation flags (and I/O mode)
1650 * The address_space is to try to release any data against the page
1651 * (presumably at page->private). If the release was successful, return `1'.
1652 * Otherwise return zero.
1654 * The @gfp_mask argument specifies whether I/O may be performed to release
1655 * this page (__GFP_IO), and whether the call may block (__GFP_WAIT).
1657 * NOTE: @gfp_mask may go away, and this function may become non-blocking.
1659 int try_to_release_page(struct page *page, int gfp_mask)
1661 struct address_space * const mapping = page->mapping;
1663 BUG_ON(!PageLocked(page));
1664 if (PageWriteback(page))
1667 if (mapping && mapping->a_ops->releasepage)
1668 return mapping->a_ops->releasepage(page, gfp_mask);
1669 return try_to_free_buffers(page);
1671 EXPORT_SYMBOL(try_to_release_page);
1674 * block_invalidatepage - invalidate part of all of a buffer-backed page
1676 * @page: the page which is affected
1677 * @offset: the index of the truncation point
1679 * block_invalidatepage() is called when all or part of the page has become
1680 * invalidatedby a truncate operation.
1682 * block_invalidatepage() does not have to release all buffers, but it must
1683 * ensure that no dirty buffer is left outside @offset and that no I/O
1684 * is underway against any of the blocks which are outside the truncation
1685 * point. Because the caller is about to free (and possibly reuse) those
1688 int block_invalidatepage(struct page *page, unsigned long offset)
1690 struct buffer_head *head, *bh, *next;
1691 unsigned int curr_off = 0;
1694 BUG_ON(!PageLocked(page));
1695 if (!page_has_buffers(page))
1698 head = page_buffers(page);
1701 unsigned int next_off = curr_off + bh->b_size;
1702 next = bh->b_this_page;
1705 * is this block fully invalidated?
1707 if (offset <= curr_off)
1709 curr_off = next_off;
1711 } while (bh != head);
1714 * We release buffers only if the entire page is being invalidated.
1715 * The get_block cached value has been unconditionally invalidated,
1716 * so real IO is not possible anymore.
1719 ret = try_to_release_page(page, 0);
1723 EXPORT_SYMBOL(block_invalidatepage);
1726 * We attach and possibly dirty the buffers atomically wrt
1727 * __set_page_dirty_buffers() via private_lock. try_to_free_buffers
1728 * is already excluded via the page lock.
1730 void create_empty_buffers(struct page *page,
1731 unsigned long blocksize, unsigned long b_state)
1733 struct buffer_head *bh, *head, *tail;
1735 head = create_buffers(page, blocksize, 1);
1738 bh->b_state |= b_state;
1740 bh = bh->b_this_page;
1742 tail->b_this_page = head;
1744 spin_lock(&page->mapping->private_lock);
1745 if (PageUptodate(page) || PageDirty(page)) {
1748 if (PageDirty(page))
1749 set_buffer_dirty(bh);
1750 if (PageUptodate(page))
1751 set_buffer_uptodate(bh);
1752 bh = bh->b_this_page;
1753 } while (bh != head);
1755 __set_page_buffers(page, head);
1756 spin_unlock(&page->mapping->private_lock);
1758 EXPORT_SYMBOL(create_empty_buffers);
1761 * We are taking a block for data and we don't want any output from any
1762 * buffer-cache aliases starting from return from that function and
1763 * until the moment when something will explicitly mark the buffer
1764 * dirty (hopefully that will not happen until we will free that block ;-)
1765 * We don't even need to mark it not-uptodate - nobody can expect
1766 * anything from a newly allocated buffer anyway. We used to used
1767 * unmap_buffer() for such invalidation, but that was wrong. We definitely
1768 * don't want to mark the alias unmapped, for example - it would confuse
1769 * anyone who might pick it with bread() afterwards...
1771 * Also.. Note that bforget() doesn't lock the buffer. So there can
1772 * be writeout I/O going on against recently-freed buffers. We don't
1773 * wait on that I/O in bforget() - it's more efficient to wait on the I/O
1774 * only if we really need to. That happens here.
1776 void unmap_underlying_metadata(struct block_device *bdev, sector_t block)
1778 struct buffer_head *old_bh;
1782 old_bh = __find_get_block_slow(bdev, block, 0);
1784 clear_buffer_dirty(old_bh);
1785 wait_on_buffer(old_bh);
1786 clear_buffer_req(old_bh);
1790 EXPORT_SYMBOL(unmap_underlying_metadata);
1793 * NOTE! All mapped/uptodate combinations are valid:
1795 * Mapped Uptodate Meaning
1797 * No No "unknown" - must do get_block()
1798 * No Yes "hole" - zero-filled
1799 * Yes No "allocated" - allocated on disk, not read in
1800 * Yes Yes "valid" - allocated and up-to-date in memory.
1802 * "Dirty" is valid only with the last case (mapped+uptodate).
1806 * While block_write_full_page is writing back the dirty buffers under
1807 * the page lock, whoever dirtied the buffers may decide to clean them
1808 * again at any time. We handle that by only looking at the buffer
1809 * state inside lock_buffer().
1811 * If block_write_full_page() is called for regular writeback
1812 * (wbc->sync_mode == WB_SYNC_NONE) then it will redirty a page which has a
1813 * locked buffer. This only can happen if someone has written the buffer
1814 * directly, with submit_bh(). At the address_space level PageWriteback
1815 * prevents this contention from occurring.
1817 static int __block_write_full_page(struct inode *inode, struct page *page,
1818 get_block_t *get_block, struct writeback_control *wbc)
1822 sector_t last_block;
1823 struct buffer_head *bh, *head;
1824 int nr_underway = 0;
1826 BUG_ON(!PageLocked(page));
1828 last_block = (i_size_read(inode) - 1) >> inode->i_blkbits;
1830 if (!page_has_buffers(page)) {
1831 create_empty_buffers(page, 1 << inode->i_blkbits,
1832 (1 << BH_Dirty)|(1 << BH_Uptodate));
1836 * Be very careful. We have no exclusion from __set_page_dirty_buffers
1837 * here, and the (potentially unmapped) buffers may become dirty at
1838 * any time. If a buffer becomes dirty here after we've inspected it
1839 * then we just miss that fact, and the page stays dirty.
1841 * Buffers outside i_size may be dirtied by __set_page_dirty_buffers;
1842 * handle that here by just cleaning them.
1845 block = page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
1846 head = page_buffers(page);
1850 * Get all the dirty buffers mapped to disk addresses and
1851 * handle any aliases from the underlying blockdev's mapping.
1854 if (block > last_block) {
1856 * mapped buffers outside i_size will occur, because
1857 * this page can be outside i_size when there is a
1858 * truncate in progress.
1861 * The buffer was zeroed by block_write_full_page()
1863 clear_buffer_dirty(bh);
1864 set_buffer_uptodate(bh);
1865 } else if (!buffer_mapped(bh) && buffer_dirty(bh)) {
1866 err = get_block(inode, block, bh, 1);
1869 if (buffer_new(bh)) {
1870 /* blockdev mappings never come here */
1871 clear_buffer_new(bh);
1872 unmap_underlying_metadata(bh->b_bdev,
1876 bh = bh->b_this_page;
1878 } while (bh != head);
1882 if (!buffer_mapped(bh))
1885 * If it's a fully non-blocking write attempt and we cannot
1886 * lock the buffer then redirty the page. Note that this can
1887 * potentially cause a busy-wait loop from pdflush and kswapd
1888 * activity, but those code paths have their own higher-level
1891 if (wbc->sync_mode != WB_SYNC_NONE || !wbc->nonblocking) {
1893 } else if (test_set_buffer_locked(bh)) {
1894 redirty_page_for_writepage(wbc, page);
1897 if (test_clear_buffer_dirty(bh)) {
1898 mark_buffer_async_write(bh);
1902 } while ((bh = bh->b_this_page) != head);
1905 * The page and its buffers are protected by PageWriteback(), so we can
1906 * drop the bh refcounts early.
1908 BUG_ON(PageWriteback(page));
1909 set_page_writeback(page);
1913 struct buffer_head *next = bh->b_this_page;
1914 if (buffer_async_write(bh)) {
1915 submit_bh(WRITE, bh);
1920 } while (bh != head);
1924 if (nr_underway == 0) {
1926 * The page was marked dirty, but the buffers were
1927 * clean. Someone wrote them back by hand with
1928 * ll_rw_block/submit_bh. A rare case.
1932 if (!buffer_uptodate(bh)) {
1936 bh = bh->b_this_page;
1937 } while (bh != head);
1939 SetPageUptodate(page);
1940 end_page_writeback(page);
1942 * The page and buffer_heads can be released at any time from
1945 wbc->pages_skipped++; /* We didn't write this page */
1951 * ENOSPC, or some other error. We may already have added some
1952 * blocks to the file, so we need to write these out to avoid
1953 * exposing stale data.
1954 * The page is currently locked and not marked for writeback
1957 /* Recovery: lock and submit the mapped buffers */
1960 if (buffer_mapped(bh) && buffer_dirty(bh)) {
1962 mark_buffer_async_write(bh);
1965 * The buffer may have been set dirty during
1966 * attachment to a dirty page.
1968 clear_buffer_dirty(bh);
1970 } while ((bh = bh->b_this_page) != head);
1972 BUG_ON(PageWriteback(page));
1973 set_page_writeback(page);
1976 struct buffer_head *next = bh->b_this_page;
1977 if (buffer_async_write(bh)) {
1978 clear_buffer_dirty(bh);
1979 submit_bh(WRITE, bh);
1984 } while (bh != head);
1988 static int __block_prepare_write(struct inode *inode, struct page *page,
1989 unsigned from, unsigned to, get_block_t *get_block)
1991 unsigned block_start, block_end;
1994 unsigned blocksize, bbits;
1995 struct buffer_head *bh, *head, *wait[2], **wait_bh=wait;
1997 BUG_ON(!PageLocked(page));
1998 BUG_ON(from > PAGE_CACHE_SIZE);
1999 BUG_ON(to > PAGE_CACHE_SIZE);
2002 blocksize = 1 << inode->i_blkbits;
2003 if (!page_has_buffers(page))
2004 create_empty_buffers(page, blocksize, 0);
2005 head = page_buffers(page);
2007 bbits = inode->i_blkbits;
2008 block = (sector_t)page->index << (PAGE_CACHE_SHIFT - bbits);
2010 for(bh = head, block_start = 0; bh != head || !block_start;
2011 block++, block_start=block_end, bh = bh->b_this_page) {
2012 block_end = block_start + blocksize;
2013 if (block_end <= from || block_start >= to) {
2014 if (PageUptodate(page)) {
2015 if (!buffer_uptodate(bh))
2016 set_buffer_uptodate(bh);
2021 clear_buffer_new(bh);
2022 if (!buffer_mapped(bh)) {
2023 err = get_block(inode, block, bh, 1);
2026 if (buffer_new(bh)) {
2027 clear_buffer_new(bh);
2028 unmap_underlying_metadata(bh->b_bdev,
2030 if (PageUptodate(page)) {
2031 set_buffer_uptodate(bh);
2034 if (block_end > to || block_start < from) {
2037 kaddr = kmap_atomic(page, KM_USER0);
2041 if (block_start < from)
2042 memset(kaddr+block_start,
2043 0, from-block_start);
2044 flush_dcache_page(page);
2045 kunmap_atomic(kaddr, KM_USER0);
2050 if (PageUptodate(page)) {
2051 if (!buffer_uptodate(bh))
2052 set_buffer_uptodate(bh);
2055 if (!buffer_uptodate(bh) && !buffer_delay(bh) &&
2056 (block_start < from || block_end > to)) {
2057 ll_rw_block(READ, 1, &bh);
2062 * If we issued read requests - let them complete.
2064 while(wait_bh > wait) {
2065 wait_on_buffer(*--wait_bh);
2066 if (!buffer_uptodate(*wait_bh))
2072 * Zero out any newly allocated blocks to avoid exposing stale
2073 * data. If BH_New is set, we know that the block was newly
2074 * allocated in the above loop.
2079 block_end = block_start+blocksize;
2080 if (block_end <= from)
2082 if (block_start >= to)
2084 if (buffer_new(bh)) {
2087 clear_buffer_new(bh);
2088 kaddr = kmap_atomic(page, KM_USER0);
2089 memset(kaddr+block_start, 0, bh->b_size);
2090 kunmap_atomic(kaddr, KM_USER0);
2091 set_buffer_uptodate(bh);
2092 mark_buffer_dirty(bh);
2095 block_start = block_end;
2096 bh = bh->b_this_page;
2097 } while (bh != head);
2101 static int __block_commit_write(struct inode *inode, struct page *page,
2102 unsigned from, unsigned to)
2104 unsigned block_start, block_end;
2107 struct buffer_head *bh, *head;
2109 blocksize = 1 << inode->i_blkbits;
2111 for(bh = head = page_buffers(page), block_start = 0;
2112 bh != head || !block_start;
2113 block_start=block_end, bh = bh->b_this_page) {
2114 block_end = block_start + blocksize;
2115 if (block_end <= from || block_start >= to) {
2116 if (!buffer_uptodate(bh))
2119 set_buffer_uptodate(bh);
2120 mark_buffer_dirty(bh);
2125 * If this is a partial write which happened to make all buffers
2126 * uptodate then we can optimize away a bogus readpage() for
2127 * the next read(). Here we 'discover' whether the page went
2128 * uptodate as a result of this (potentially partial) write.
2131 SetPageUptodate(page);
2136 * Generic "read page" function for block devices that have the normal
2137 * get_block functionality. This is most of the block device filesystems.
2138 * Reads the page asynchronously --- the unlock_buffer() and
2139 * set/clear_buffer_uptodate() functions propagate buffer state into the
2140 * page struct once IO has completed.
2142 int block_read_full_page(struct page *page, get_block_t *get_block)
2144 struct inode *inode = page->mapping->host;
2145 sector_t iblock, lblock;
2146 struct buffer_head *bh, *head, *arr[MAX_BUF_PER_PAGE];
2147 unsigned int blocksize;
2149 int fully_mapped = 1;
2151 if (!PageLocked(page))
2153 blocksize = 1 << inode->i_blkbits;
2154 if (!page_has_buffers(page))
2155 create_empty_buffers(page, blocksize, 0);
2156 head = page_buffers(page);
2158 iblock = (sector_t)page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
2159 lblock = (i_size_read(inode)+blocksize-1) >> inode->i_blkbits;
2165 if (buffer_uptodate(bh))
2168 if (!buffer_mapped(bh)) {
2170 if (iblock < lblock) {
2171 if (get_block(inode, iblock, bh, 0))
2174 if (!buffer_mapped(bh)) {
2175 void *kaddr = kmap_atomic(page, KM_USER0);
2176 memset(kaddr + i * blocksize, 0, blocksize);
2177 flush_dcache_page(page);
2178 kunmap_atomic(kaddr, KM_USER0);
2179 set_buffer_uptodate(bh);
2183 * get_block() might have updated the buffer
2186 if (buffer_uptodate(bh))
2190 } while (i++, iblock++, (bh = bh->b_this_page) != head);
2193 SetPageMappedToDisk(page);
2197 * All buffers are uptodate - we can set the page uptodate
2198 * as well. But not if get_block() returned an error.
2200 if (!PageError(page))
2201 SetPageUptodate(page);
2206 /* Stage two: lock the buffers */
2207 for (i = 0; i < nr; i++) {
2210 mark_buffer_async_read(bh);
2214 * Stage 3: start the IO. Check for uptodateness
2215 * inside the buffer lock in case another process reading
2216 * the underlying blockdev brought it uptodate (the sct fix).
2218 for (i = 0; i < nr; i++) {
2220 if (buffer_uptodate(bh))
2221 end_buffer_async_read(bh, 1);
2223 submit_bh(READ, bh);
2228 /* utility function for filesystems that need to do work on expanding
2229 * truncates. Uses prepare/commit_write to allow the filesystem to
2230 * deal with the hole.
2232 int generic_cont_expand(struct inode *inode, loff_t size)
2234 struct address_space *mapping = inode->i_mapping;
2236 unsigned long index, offset, limit;
2240 limit = current->rlim[RLIMIT_FSIZE].rlim_cur;
2241 if (limit != RLIM_INFINITY && size > (loff_t)limit) {
2242 send_sig(SIGXFSZ, current, 0);
2245 if (size > inode->i_sb->s_maxbytes)
2248 offset = (size & (PAGE_CACHE_SIZE-1)); /* Within page */
2250 /* ugh. in prepare/commit_write, if from==to==start of block, we
2251 ** skip the prepare. make sure we never send an offset for the start
2254 if ((offset & (inode->i_sb->s_blocksize - 1)) == 0) {
2257 index = size >> PAGE_CACHE_SHIFT;
2259 page = grab_cache_page(mapping, index);
2262 err = mapping->a_ops->prepare_write(NULL, page, offset, offset);
2264 err = mapping->a_ops->commit_write(NULL, page, offset, offset);
2267 page_cache_release(page);
2275 * For moronic filesystems that do not allow holes in file.
2276 * We may have to extend the file.
2279 int cont_prepare_write(struct page *page, unsigned offset,
2280 unsigned to, get_block_t *get_block, loff_t *bytes)
2282 struct address_space *mapping = page->mapping;
2283 struct inode *inode = mapping->host;
2284 struct page *new_page;
2288 unsigned blocksize = 1 << inode->i_blkbits;
2291 while(page->index > (pgpos = *bytes>>PAGE_CACHE_SHIFT)) {
2293 new_page = grab_cache_page(mapping, pgpos);
2296 /* we might sleep */
2297 if (*bytes>>PAGE_CACHE_SHIFT != pgpos) {
2298 unlock_page(new_page);
2299 page_cache_release(new_page);
2302 zerofrom = *bytes & ~PAGE_CACHE_MASK;
2303 if (zerofrom & (blocksize-1)) {
2304 *bytes |= (blocksize-1);
2307 status = __block_prepare_write(inode, new_page, zerofrom,
2308 PAGE_CACHE_SIZE, get_block);
2311 kaddr = kmap_atomic(new_page, KM_USER0);
2312 memset(kaddr+zerofrom, 0, PAGE_CACHE_SIZE-zerofrom);
2313 flush_dcache_page(new_page);
2314 kunmap_atomic(kaddr, KM_USER0);
2315 __block_commit_write(inode, new_page,
2316 zerofrom, PAGE_CACHE_SIZE);
2317 unlock_page(new_page);
2318 page_cache_release(new_page);
2321 if (page->index < pgpos) {
2322 /* completely inside the area */
2325 /* page covers the boundary, find the boundary offset */
2326 zerofrom = *bytes & ~PAGE_CACHE_MASK;
2328 /* if we will expand the thing last block will be filled */
2329 if (to > zerofrom && (zerofrom & (blocksize-1))) {
2330 *bytes |= (blocksize-1);
2334 /* starting below the boundary? Nothing to zero out */
2335 if (offset <= zerofrom)
2338 status = __block_prepare_write(inode, page, zerofrom, to, get_block);
2341 if (zerofrom < offset) {
2342 kaddr = kmap_atomic(page, KM_USER0);
2343 memset(kaddr+zerofrom, 0, offset-zerofrom);
2344 flush_dcache_page(page);
2345 kunmap_atomic(kaddr, KM_USER0);
2346 __block_commit_write(inode, page, zerofrom, offset);
2350 ClearPageUptodate(page);
2354 ClearPageUptodate(new_page);
2355 unlock_page(new_page);
2356 page_cache_release(new_page);
2361 int block_prepare_write(struct page *page, unsigned from, unsigned to,
2362 get_block_t *get_block)
2364 struct inode *inode = page->mapping->host;
2365 int err = __block_prepare_write(inode, page, from, to, get_block);
2367 ClearPageUptodate(page);
2371 int block_commit_write(struct page *page, unsigned from, unsigned to)
2373 struct inode *inode = page->mapping->host;
2374 __block_commit_write(inode,page,from,to);
2378 int generic_commit_write(struct file *file, struct page *page,
2379 unsigned from, unsigned to)
2381 struct inode *inode = page->mapping->host;
2382 loff_t pos = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to;
2383 __block_commit_write(inode,page,from,to);
2385 * No need to use i_size_read() here, the i_size
2386 * cannot change under us because we hold i_sem.
2388 if (pos > inode->i_size) {
2389 i_size_write(inode, pos);
2390 mark_inode_dirty(inode);
2397 * nobh_prepare_write()'s prereads are special: the buffer_heads are freed
2398 * immediately, while under the page lock. So it needs a special end_io
2399 * handler which does not touch the bh after unlocking it.
2401 * Note: unlock_buffer() sort-of does touch the bh after unlocking it, but
2402 * a race there is benign: unlock_buffer() only use the bh's address for
2403 * hashing after unlocking the buffer, so it doesn't actually touch the bh
2406 static void end_buffer_read_nobh(struct buffer_head *bh, int uptodate)
2409 set_buffer_uptodate(bh);
2411 /* This happens, due to failed READA attempts. */
2412 clear_buffer_uptodate(bh);
2418 * On entry, the page is fully not uptodate.
2419 * On exit the page is fully uptodate in the areas outside (from,to)
2421 int nobh_prepare_write(struct page *page, unsigned from, unsigned to,
2422 get_block_t *get_block)
2424 struct inode *inode = page->mapping->host;
2425 const unsigned blkbits = inode->i_blkbits;
2426 const unsigned blocksize = 1 << blkbits;
2427 struct buffer_head map_bh;
2428 struct buffer_head *read_bh[MAX_BUF_PER_PAGE];
2429 unsigned block_in_page;
2430 unsigned block_start;
2431 sector_t block_in_file;
2436 int is_mapped_to_disk = 1;
2439 if (PageMappedToDisk(page))
2442 block_in_file = (sector_t)page->index << (PAGE_CACHE_SHIFT - blkbits);
2443 map_bh.b_page = page;
2446 * We loop across all blocks in the page, whether or not they are
2447 * part of the affected region. This is so we can discover if the
2448 * page is fully mapped-to-disk.
2450 for (block_start = 0, block_in_page = 0;
2451 block_start < PAGE_CACHE_SIZE;
2452 block_in_page++, block_start += blocksize) {
2453 unsigned block_end = block_start + blocksize;
2458 if (block_start >= to)
2460 ret = get_block(inode, block_in_file + block_in_page,
2464 if (!buffer_mapped(&map_bh))
2465 is_mapped_to_disk = 0;
2466 if (buffer_new(&map_bh))
2467 unmap_underlying_metadata(map_bh.b_bdev,
2469 if (PageUptodate(page))
2471 if (buffer_new(&map_bh) || !buffer_mapped(&map_bh)) {
2472 kaddr = kmap_atomic(page, KM_USER0);
2473 if (block_start < from) {
2474 memset(kaddr+block_start, 0, from-block_start);
2477 if (block_end > to) {
2478 memset(kaddr + to, 0, block_end - to);
2481 flush_dcache_page(page);
2482 kunmap_atomic(kaddr, KM_USER0);
2485 if (buffer_uptodate(&map_bh))
2486 continue; /* reiserfs does this */
2487 if (block_start < from || block_end > to) {
2488 struct buffer_head *bh = alloc_buffer_head(GFP_NOFS);
2494 bh->b_state = map_bh.b_state;
2495 atomic_set(&bh->b_count, 0);
2496 bh->b_this_page = NULL;
2498 bh->b_blocknr = map_bh.b_blocknr;
2499 bh->b_size = blocksize;
2500 bh->b_data = (char *)(long)block_start;
2501 bh->b_bdev = map_bh.b_bdev;
2502 bh->b_private = NULL;
2503 read_bh[nr_reads++] = bh;
2508 struct buffer_head *bh;
2511 * The page is locked, so these buffers are protected from
2512 * any VM or truncate activity. Hence we don't need to care
2513 * for the buffer_head refcounts.
2515 for (i = 0; i < nr_reads; i++) {
2518 bh->b_end_io = end_buffer_read_nobh;
2519 submit_bh(READ, bh);
2521 for (i = 0; i < nr_reads; i++) {
2524 if (!buffer_uptodate(bh))
2526 free_buffer_head(bh);
2533 if (is_mapped_to_disk)
2534 SetPageMappedToDisk(page);
2535 SetPageUptodate(page);
2538 * Setting the page dirty here isn't necessary for the prepare_write
2539 * function - commit_write will do that. But if/when this function is
2540 * used within the pagefault handler to ensure that all mmapped pages
2541 * have backing space in the filesystem, we will need to dirty the page
2542 * if its contents were altered.
2545 set_page_dirty(page);
2550 for (i = 0; i < nr_reads; i++) {
2552 free_buffer_head(read_bh[i]);
2556 * Error recovery is pretty slack. Clear the page and mark it dirty
2557 * so we'll later zero out any blocks which _were_ allocated.
2559 kaddr = kmap_atomic(page, KM_USER0);
2560 memset(kaddr, 0, PAGE_CACHE_SIZE);
2561 kunmap_atomic(kaddr, KM_USER0);
2562 SetPageUptodate(page);
2563 set_page_dirty(page);
2566 EXPORT_SYMBOL(nobh_prepare_write);
2568 int nobh_commit_write(struct file *file, struct page *page,
2569 unsigned from, unsigned to)
2571 struct inode *inode = page->mapping->host;
2572 loff_t pos = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to;
2574 set_page_dirty(page);
2575 if (pos > inode->i_size) {
2576 i_size_write(inode, pos);
2577 mark_inode_dirty(inode);
2581 EXPORT_SYMBOL(nobh_commit_write);
2584 * This function assumes that ->prepare_write() uses nobh_prepare_write().
2586 int nobh_truncate_page(struct address_space *mapping, loff_t from)
2588 struct inode *inode = mapping->host;
2589 unsigned blocksize = 1 << inode->i_blkbits;
2590 pgoff_t index = from >> PAGE_CACHE_SHIFT;
2591 unsigned offset = from & (PAGE_CACHE_SIZE-1);
2594 struct address_space_operations *a_ops = mapping->a_ops;
2598 if ((offset & (blocksize - 1)) == 0)
2602 page = grab_cache_page(mapping, index);
2606 to = (offset + blocksize) & ~(blocksize - 1);
2607 ret = a_ops->prepare_write(NULL, page, offset, to);
2609 kaddr = kmap_atomic(page, KM_USER0);
2610 memset(kaddr + offset, 0, PAGE_CACHE_SIZE - offset);
2611 flush_dcache_page(page);
2612 kunmap_atomic(kaddr, KM_USER0);
2613 set_page_dirty(page);
2616 page_cache_release(page);
2620 EXPORT_SYMBOL(nobh_truncate_page);
2622 int block_truncate_page(struct address_space *mapping,
2623 loff_t from, get_block_t *get_block)
2625 pgoff_t index = from >> PAGE_CACHE_SHIFT;
2626 unsigned offset = from & (PAGE_CACHE_SIZE-1);
2629 unsigned length, pos;
2630 struct inode *inode = mapping->host;
2632 struct buffer_head *bh;
2636 blocksize = 1 << inode->i_blkbits;
2637 length = offset & (blocksize - 1);
2639 /* Block boundary? Nothing to do */
2643 length = blocksize - length;
2644 iblock = index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
2646 page = grab_cache_page(mapping, index);
2651 if (!page_has_buffers(page))
2652 create_empty_buffers(page, blocksize, 0);
2654 /* Find the buffer that contains "offset" */
2655 bh = page_buffers(page);
2657 while (offset >= pos) {
2658 bh = bh->b_this_page;
2664 if (!buffer_mapped(bh)) {
2665 err = get_block(inode, iblock, bh, 0);
2668 /* unmapped? It's a hole - nothing to do */
2669 if (!buffer_mapped(bh))
2673 /* Ok, it's mapped. Make sure it's up-to-date */
2674 if (PageUptodate(page))
2675 set_buffer_uptodate(bh);
2677 if (!buffer_uptodate(bh) && !buffer_delay(bh)) {
2679 ll_rw_block(READ, 1, &bh);
2681 /* Uhhuh. Read error. Complain and punt. */
2682 if (!buffer_uptodate(bh))
2686 kaddr = kmap_atomic(page, KM_USER0);
2687 memset(kaddr + offset, 0, length);
2688 flush_dcache_page(page);
2689 kunmap_atomic(kaddr, KM_USER0);
2691 mark_buffer_dirty(bh);
2696 page_cache_release(page);
2702 * The generic ->writepage function for buffer-backed address_spaces
2704 int block_write_full_page(struct page *page, get_block_t *get_block,
2705 struct writeback_control *wbc)
2707 struct inode * const inode = page->mapping->host;
2708 loff_t i_size = i_size_read(inode);
2709 const pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
2713 /* Is the page fully inside i_size? */
2714 if (page->index < end_index)
2715 return __block_write_full_page(inode, page, get_block, wbc);
2717 /* Is the page fully outside i_size? (truncate in progress) */
2718 offset = i_size & (PAGE_CACHE_SIZE-1);
2719 if (page->index >= end_index+1 || !offset) {
2721 * The page may have dirty, unmapped buffers. For example,
2722 * they may have been added in ext3_writepage(). Make them
2723 * freeable here, so the page does not leak.
2725 block_invalidatepage(page, 0);
2727 return 0; /* don't care */
2731 * The page straddles i_size. It must be zeroed out on each and every
2732 * writepage invokation because it may be mmapped. "A file is mapped
2733 * in multiples of the page size. For a file that is not a multiple of
2734 * the page size, the remaining memory is zeroed when mapped, and
2735 * writes to that region are not written out to the file."
2737 kaddr = kmap_atomic(page, KM_USER0);
2738 memset(kaddr + offset, 0, PAGE_CACHE_SIZE - offset);
2739 flush_dcache_page(page);
2740 kunmap_atomic(kaddr, KM_USER0);
2741 return __block_write_full_page(inode, page, get_block, wbc);
2744 sector_t generic_block_bmap(struct address_space *mapping, sector_t block,
2745 get_block_t *get_block)
2747 struct buffer_head tmp;
2748 struct inode *inode = mapping->host;
2751 get_block(inode, block, &tmp, 0);
2752 return tmp.b_blocknr;
2755 static int end_bio_bh_io_sync(struct bio *bio, unsigned int bytes_done, int err)
2757 struct buffer_head *bh = bio->bi_private;
2762 bh->b_end_io(bh, test_bit(BIO_UPTODATE, &bio->bi_flags));
2767 void submit_bh(int rw, struct buffer_head * bh)
2771 BUG_ON(!buffer_locked(bh));
2772 BUG_ON(!buffer_mapped(bh));
2773 BUG_ON(!bh->b_end_io);
2775 /* Only clear out a write error when rewriting */
2776 if (test_set_buffer_req(bh) && rw == WRITE)
2777 clear_buffer_write_io_error(bh);
2780 * from here on down, it's all bio -- do the initial mapping,
2781 * submit_bio -> generic_make_request may further map this bio around
2783 bio = bio_alloc(GFP_NOIO, 1);
2785 bio->bi_sector = bh->b_blocknr * (bh->b_size >> 9);
2786 bio->bi_bdev = bh->b_bdev;
2787 bio->bi_io_vec[0].bv_page = bh->b_page;
2788 bio->bi_io_vec[0].bv_len = bh->b_size;
2789 bio->bi_io_vec[0].bv_offset = bh_offset(bh);
2793 bio->bi_size = bh->b_size;
2795 bio->bi_end_io = end_bio_bh_io_sync;
2796 bio->bi_private = bh;
2798 submit_bio(rw, bio);
2802 * ll_rw_block: low-level access to block devices (DEPRECATED)
2803 * @rw: whether to %READ or %WRITE or maybe %READA (readahead)
2804 * @nr: number of &struct buffer_heads in the array
2805 * @bhs: array of pointers to &struct buffer_head
2807 * ll_rw_block() takes an array of pointers to &struct buffer_heads,
2808 * and requests an I/O operation on them, either a %READ or a %WRITE.
2809 * The third %READA option is described in the documentation for
2810 * generic_make_request() which ll_rw_block() calls.
2812 * This function drops any buffer that it cannot get a lock on (with the
2813 * BH_Lock state bit), any buffer that appears to be clean when doing a
2814 * write request, and any buffer that appears to be up-to-date when doing
2815 * read request. Further it marks as clean buffers that are processed for
2816 * writing (the buffer cache won't assume that they are actually clean until
2817 * the buffer gets unlocked).
2819 * ll_rw_block sets b_end_io to simple completion handler that marks
2820 * the buffer up-to-date (if approriate), unlocks the buffer and wakes
2823 * All of the buffers must be for the same device, and must also be a
2824 * multiple of the current approved size for the device.
2826 void ll_rw_block(int rw, int nr, struct buffer_head *bhs[])
2830 for (i = 0; i < nr; i++) {
2831 struct buffer_head *bh = bhs[i];
2833 if (test_set_buffer_locked(bh))
2838 bh->b_end_io = end_buffer_write_sync;
2839 if (test_clear_buffer_dirty(bh)) {
2840 submit_bh(WRITE, bh);
2844 bh->b_end_io = end_buffer_read_sync;
2845 if (!buffer_uptodate(bh)) {
2856 * For a data-integrity writeout, we need to wait upon any in-progress I/O
2857 * and then start new I/O and then wait upon it.
2859 void sync_dirty_buffer(struct buffer_head *bh)
2861 WARN_ON(atomic_read(&bh->b_count) < 1);
2863 if (test_clear_buffer_dirty(bh)) {
2865 bh->b_end_io = end_buffer_write_sync;
2866 submit_bh(WRITE, bh);
2874 * try_to_free_buffers() checks if all the buffers on this particular page
2875 * are unused, and releases them if so.
2877 * Exclusion against try_to_free_buffers may be obtained by either
2878 * locking the page or by holding its mapping's private_lock.
2880 * If the page is dirty but all the buffers are clean then we need to
2881 * be sure to mark the page clean as well. This is because the page
2882 * may be against a block device, and a later reattachment of buffers
2883 * to a dirty page will set *all* buffers dirty. Which would corrupt
2884 * filesystem data on the same device.
2886 * The same applies to regular filesystem pages: if all the buffers are
2887 * clean then we set the page clean and proceed. To do that, we require
2888 * total exclusion from __set_page_dirty_buffers(). That is obtained with
2891 * try_to_free_buffers() is non-blocking.
2893 static inline int buffer_busy(struct buffer_head *bh)
2895 return atomic_read(&bh->b_count) |
2896 (bh->b_state & ((1 << BH_Dirty) | (1 << BH_Lock)));
2900 drop_buffers(struct page *page, struct buffer_head **buffers_to_free)
2902 struct buffer_head *head = page_buffers(page);
2903 struct buffer_head *bh;
2907 if (buffer_write_io_error(bh))
2908 set_bit(AS_EIO, &page->mapping->flags);
2909 if (buffer_busy(bh))
2911 bh = bh->b_this_page;
2912 } while (bh != head);
2915 struct buffer_head *next = bh->b_this_page;
2917 if (!list_empty(&bh->b_assoc_buffers))
2918 __remove_assoc_queue(bh);
2920 } while (bh != head);
2921 *buffers_to_free = head;
2922 __clear_page_buffers(page);
2928 int try_to_free_buffers(struct page *page)
2930 struct address_space * const mapping = page->mapping;
2931 struct buffer_head *buffers_to_free = NULL;
2934 BUG_ON(!PageLocked(page));
2935 if (PageWriteback(page))
2938 if (mapping == NULL) { /* can this still happen? */
2939 ret = drop_buffers(page, &buffers_to_free);
2943 spin_lock(&mapping->private_lock);
2944 ret = drop_buffers(page, &buffers_to_free);
2947 * If the filesystem writes its buffers by hand (eg ext3)
2948 * then we can have clean buffers against a dirty page. We
2949 * clean the page here; otherwise later reattachment of buffers
2950 * could encounter a non-uptodate page, which is unresolvable.
2951 * This only applies in the rare case where try_to_free_buffers
2952 * succeeds but the page is not freed.
2954 clear_page_dirty(page);
2956 spin_unlock(&mapping->private_lock);
2958 if (buffers_to_free) {
2959 struct buffer_head *bh = buffers_to_free;
2962 struct buffer_head *next = bh->b_this_page;
2963 free_buffer_head(bh);
2965 } while (bh != buffers_to_free);
2969 EXPORT_SYMBOL(try_to_free_buffers);
2971 int block_sync_page(struct page *page)
2973 struct address_space *mapping;
2976 mapping = page_mapping(page);
2978 blk_run_backing_dev(mapping->backing_dev_info, page);
2983 * There are no bdflush tunables left. But distributions are
2984 * still running obsolete flush daemons, so we terminate them here.
2986 * Use of bdflush() is deprecated and will be removed in a future kernel.
2987 * The `pdflush' kernel threads fully replace bdflush daemons and this call.
2989 asmlinkage long sys_bdflush(int func, long data)
2991 static int msg_count;
2993 if (!capable(CAP_SYS_ADMIN))
2996 if (msg_count < 5) {
2999 "warning: process `%s' used the obsolete bdflush"
3000 " system call\n", current->comm);
3001 printk(KERN_INFO "Fix your initscripts?\n");
3010 * Buffer-head allocation
3012 static kmem_cache_t *bh_cachep;
3015 * Once the number of bh's in the machine exceeds this level, we start
3016 * stripping them in writeback.
3018 static int max_buffer_heads;
3020 int buffer_heads_over_limit;
3022 struct bh_accounting {
3023 int nr; /* Number of live bh's */
3024 int ratelimit; /* Limit cacheline bouncing */
3027 static DEFINE_PER_CPU(struct bh_accounting, bh_accounting) = {0, 0};
3029 static void recalc_bh_state(void)
3034 if (__get_cpu_var(bh_accounting).ratelimit++ < 4096)
3036 __get_cpu_var(bh_accounting).ratelimit = 0;
3038 tot += per_cpu(bh_accounting, i).nr;
3039 buffer_heads_over_limit = (tot > max_buffer_heads);
3042 struct buffer_head *alloc_buffer_head(int gfp_flags)
3044 struct buffer_head *ret = kmem_cache_alloc(bh_cachep, gfp_flags);
3047 __get_cpu_var(bh_accounting).nr++;
3053 EXPORT_SYMBOL(alloc_buffer_head);
3055 void free_buffer_head(struct buffer_head *bh)
3057 BUG_ON(!list_empty(&bh->b_assoc_buffers));
3058 kmem_cache_free(bh_cachep, bh);
3060 __get_cpu_var(bh_accounting).nr--;
3064 EXPORT_SYMBOL(free_buffer_head);
3067 init_buffer_head(void *data, kmem_cache_t *cachep, unsigned long flags)
3069 if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) ==
3070 SLAB_CTOR_CONSTRUCTOR) {
3071 struct buffer_head * bh = (struct buffer_head *)data;
3073 memset(bh, 0, sizeof(*bh));
3074 INIT_LIST_HEAD(&bh->b_assoc_buffers);
3078 #ifdef CONFIG_HOTPLUG_CPU
3079 static void buffer_exit_cpu(int cpu)
3082 struct bh_lru *b = &per_cpu(bh_lrus, cpu);
3084 for (i = 0; i < BH_LRU_SIZE; i++) {
3090 static int buffer_cpu_notify(struct notifier_block *self,
3091 unsigned long action, void *hcpu)
3093 if (action == CPU_DEAD)
3094 buffer_exit_cpu((unsigned long)hcpu);
3097 #endif /* CONFIG_HOTPLUG_CPU */
3099 void __init buffer_init(void)
3104 bh_cachep = kmem_cache_create("buffer_head",
3105 sizeof(struct buffer_head), 0,
3106 SLAB_PANIC, init_buffer_head, NULL);
3107 for (i = 0; i < ARRAY_SIZE(bh_wait_queue_heads); i++)
3108 init_waitqueue_head(&bh_wait_queue_heads[i].wqh);
3111 * Limit the bh occupancy to 10% of ZONE_NORMAL
3113 nrpages = (nr_free_buffer_pages() * 10) / 100;
3114 max_buffer_heads = nrpages * (PAGE_SIZE / sizeof(struct buffer_head));
3115 hotcpu_notifier(buffer_cpu_notify, 0);
3118 EXPORT_SYMBOL(__bforget);
3119 EXPORT_SYMBOL(__brelse);
3120 EXPORT_SYMBOL(__wait_on_buffer);
3121 EXPORT_SYMBOL(block_commit_write);
3122 EXPORT_SYMBOL(block_prepare_write);
3123 EXPORT_SYMBOL(block_read_full_page);
3124 EXPORT_SYMBOL(block_sync_page);
3125 EXPORT_SYMBOL(block_truncate_page);
3126 EXPORT_SYMBOL(block_write_full_page);
3127 EXPORT_SYMBOL(buffer_insert_list);
3128 EXPORT_SYMBOL(cont_prepare_write);
3129 EXPORT_SYMBOL(end_buffer_async_write);
3130 EXPORT_SYMBOL(end_buffer_read_sync);
3131 EXPORT_SYMBOL(end_buffer_write_sync);
3132 EXPORT_SYMBOL(file_fsync);
3133 EXPORT_SYMBOL(fsync_bdev);
3134 EXPORT_SYMBOL(fsync_buffers_list);
3135 EXPORT_SYMBOL(generic_block_bmap);
3136 EXPORT_SYMBOL(generic_commit_write);
3137 EXPORT_SYMBOL(generic_cont_expand);
3138 EXPORT_SYMBOL(init_buffer);
3139 EXPORT_SYMBOL(invalidate_bdev);
3140 EXPORT_SYMBOL(ll_rw_block);
3141 EXPORT_SYMBOL(mark_buffer_dirty);
3142 EXPORT_SYMBOL(submit_bh);
3143 EXPORT_SYMBOL(sync_dirty_buffer);
3144 EXPORT_SYMBOL(unlock_buffer);