4 * Copyright (C) 1991, 1992, 2002 Linus Torvalds
8 * Start bdflush() with kernel_thread not syscall - Paul Gortmaker, 12/95
10 * Removed a lot of unnecessary code and simplified things now that
11 * the buffer cache isn't our primary cache - Andrew Tridgell 12/96
13 * Speed up hash, lru, and free list operations. Use gfp() for allocating
14 * hash table, use SLAB cache for buffer heads. SMP threading. -DaveM
16 * Added 32k buffer block sizes - these are required older ARM systems. - RMK
18 * async buffer flushing, 1999 Andrea Arcangeli <andrea@suse.de>
21 #include <linux/config.h>
22 #include <linux/kernel.h>
25 #include <linux/percpu.h>
26 #include <linux/slab.h>
27 #include <linux/smp_lock.h>
28 #include <linux/blkdev.h>
29 #include <linux/file.h>
30 #include <linux/quotaops.h>
31 #include <linux/highmem.h>
32 #include <linux/module.h>
33 #include <linux/writeback.h>
34 #include <linux/hash.h>
35 #include <linux/suspend.h>
36 #include <linux/buffer_head.h>
37 #include <linux/bio.h>
38 #include <linux/notifier.h>
39 #include <linux/cpu.h>
40 #include <asm/bitops.h>
42 static void invalidate_bh_lrus(void);
44 #define BH_ENTRY(list) list_entry((list), struct buffer_head, b_assoc_buffers)
46 struct bh_wait_queue {
47 struct buffer_head *bh;
51 #define __DEFINE_BH_WAIT(name, b, f) \
52 struct bh_wait_queue name = { \
57 .func = bh_wake_function, \
59 LIST_HEAD_INIT(name.wait.task_list),\
62 #define DEFINE_BH_WAIT(name, bh) __DEFINE_BH_WAIT(name, bh, 0)
63 #define DEFINE_BH_WAIT_EXCLUSIVE(name, bh) \
64 __DEFINE_BH_WAIT(name, bh, WQ_FLAG_EXCLUSIVE)
67 * Hashed waitqueue_head's for wait_on_buffer()
69 #define BH_WAIT_TABLE_ORDER 7
70 static struct bh_wait_queue_head {
71 wait_queue_head_t wqh;
72 } ____cacheline_aligned_in_smp bh_wait_queue_heads[1<<BH_WAIT_TABLE_ORDER];
75 init_buffer(struct buffer_head *bh, bh_end_io_t *handler, void *private)
77 bh->b_end_io = handler;
78 bh->b_private = private;
82 * Return the address of the waitqueue_head to be used for this
85 wait_queue_head_t *bh_waitq_head(struct buffer_head *bh)
87 return &bh_wait_queue_heads[hash_ptr(bh, BH_WAIT_TABLE_ORDER)].wqh;
89 EXPORT_SYMBOL(bh_waitq_head);
91 void wake_up_buffer(struct buffer_head *bh)
93 wait_queue_head_t *wq = bh_waitq_head(bh);
96 if (waitqueue_active(wq))
97 __wake_up(wq, TASK_INTERRUPTIBLE|TASK_UNINTERRUPTIBLE, 1, bh);
99 EXPORT_SYMBOL(wake_up_buffer);
101 static int bh_wake_function(wait_queue_t *wait, unsigned mode,
104 struct buffer_head *bh = key;
105 struct bh_wait_queue *wq;
107 wq = container_of(wait, struct bh_wait_queue, wait);
108 if (wq->bh != bh || buffer_locked(bh))
111 return autoremove_wake_function(wait, mode, sync, key);
114 static void sync_buffer(struct buffer_head *bh)
116 struct block_device *bd;
121 blk_run_address_space(bd->bd_inode->i_mapping);
124 void fastcall __lock_buffer(struct buffer_head *bh)
126 wait_queue_head_t *wqh = bh_waitq_head(bh);
127 DEFINE_BH_WAIT_EXCLUSIVE(wait, bh);
130 prepare_to_wait_exclusive(wqh, &wait.wait,
131 TASK_UNINTERRUPTIBLE);
132 if (buffer_locked(bh)) {
136 } while (test_set_buffer_locked(bh));
137 finish_wait(wqh, &wait.wait);
139 EXPORT_SYMBOL(__lock_buffer);
141 void fastcall unlock_buffer(struct buffer_head *bh)
143 clear_buffer_locked(bh);
144 smp_mb__after_clear_bit();
149 * Block until a buffer comes unlocked. This doesn't stop it
150 * from becoming locked again - you have to lock it yourself
151 * if you want to preserve its state.
153 void __wait_on_buffer(struct buffer_head * bh)
155 wait_queue_head_t *wqh = bh_waitq_head(bh);
156 DEFINE_BH_WAIT(wait, bh);
159 prepare_to_wait(wqh, &wait.wait, TASK_UNINTERRUPTIBLE);
160 if (buffer_locked(bh)) {
164 } while (buffer_locked(bh));
165 finish_wait(wqh, &wait.wait);
169 __set_page_buffers(struct page *page, struct buffer_head *head)
171 page_cache_get(page);
172 SetPagePrivate(page);
173 page->private = (unsigned long)head;
177 __clear_page_buffers(struct page *page)
179 ClearPagePrivate(page);
181 page_cache_release(page);
184 static void buffer_io_error(struct buffer_head *bh)
186 char b[BDEVNAME_SIZE];
188 printk(KERN_ERR "Buffer I/O error on device %s, logical block %Lu\n",
189 bdevname(bh->b_bdev, b),
190 (unsigned long long)bh->b_blocknr);
194 * Default synchronous end-of-IO handler.. Just mark it up-to-date and
195 * unlock the buffer. This is what ll_rw_block uses too.
197 void end_buffer_read_sync(struct buffer_head *bh, int uptodate)
200 set_buffer_uptodate(bh);
202 /* This happens, due to failed READA attempts. */
203 clear_buffer_uptodate(bh);
209 void end_buffer_write_sync(struct buffer_head *bh, int uptodate)
211 char b[BDEVNAME_SIZE];
214 set_buffer_uptodate(bh);
216 if (printk_ratelimit()) {
218 printk(KERN_WARNING "lost page write due to "
220 bdevname(bh->b_bdev, b));
222 set_buffer_write_io_error(bh);
223 clear_buffer_uptodate(bh);
230 * Write out and wait upon all the dirty data associated with a block
231 * device via its mapping. Does not take the superblock lock.
233 int sync_blockdev(struct block_device *bdev)
240 ret = filemap_fdatawrite(bdev->bd_inode->i_mapping);
241 err = filemap_fdatawait(bdev->bd_inode->i_mapping);
247 EXPORT_SYMBOL(sync_blockdev);
250 * Write out and wait upon all dirty data associated with this
251 * superblock. Filesystem data as well as the underlying block
252 * device. Takes the superblock lock.
254 int fsync_super(struct super_block *sb)
256 sync_inodes_sb(sb, 0);
259 if (sb->s_dirt && sb->s_op->write_super)
260 sb->s_op->write_super(sb);
262 if (sb->s_op->sync_fs)
263 sb->s_op->sync_fs(sb, 1);
264 sync_blockdev(sb->s_bdev);
265 sync_inodes_sb(sb, 1);
267 return sync_blockdev(sb->s_bdev);
271 * Write out and wait upon all dirty data associated with this
272 * device. Filesystem data as well as the underlying block
273 * device. Takes the superblock lock.
275 int fsync_bdev(struct block_device *bdev)
277 struct super_block *sb = get_super(bdev);
279 int res = fsync_super(sb);
283 return sync_blockdev(bdev);
287 * freeze_bdev -- lock a filesystem and force it into a consistent state
288 * @bdev: blockdevice to lock
290 * This takes the block device bd_mount_sem to make sure no new mounts
291 * happen on bdev until thaw_bdev() is called.
292 * If a superblock is found on this device, we take the s_umount semaphore
293 * on it to make sure nobody unmounts until the snapshot creation is done.
295 struct super_block *freeze_bdev(struct block_device *bdev)
297 struct super_block *sb;
299 down(&bdev->bd_mount_sem);
300 sb = get_super(bdev);
301 if (sb && !(sb->s_flags & MS_RDONLY)) {
302 sb->s_frozen = SB_FREEZE_WRITE;
305 sync_inodes_sb(sb, 0);
309 if (sb->s_dirt && sb->s_op->write_super)
310 sb->s_op->write_super(sb);
313 if (sb->s_op->sync_fs)
314 sb->s_op->sync_fs(sb, 1);
316 sync_blockdev(sb->s_bdev);
317 sync_inodes_sb(sb, 1);
319 sb->s_frozen = SB_FREEZE_TRANS;
322 sync_blockdev(sb->s_bdev);
324 if (sb->s_op->write_super_lockfs)
325 sb->s_op->write_super_lockfs(sb);
329 return sb; /* thaw_bdev releases s->s_umount and bd_mount_sem */
331 EXPORT_SYMBOL(freeze_bdev);
334 * thaw_bdev -- unlock filesystem
335 * @bdev: blockdevice to unlock
336 * @sb: associated superblock
338 * Unlocks the filesystem and marks it writeable again after freeze_bdev().
340 void thaw_bdev(struct block_device *bdev, struct super_block *sb)
343 BUG_ON(sb->s_bdev != bdev);
345 if (sb->s_op->unlockfs)
346 sb->s_op->unlockfs(sb);
347 sb->s_frozen = SB_UNFROZEN;
349 wake_up(&sb->s_wait_unfrozen);
353 up(&bdev->bd_mount_sem);
355 EXPORT_SYMBOL(thaw_bdev);
358 * sync everything. Start out by waking pdflush, because that writes back
359 * all queues in parallel.
361 static void do_sync(unsigned long wait)
364 sync_inodes(0); /* All mappings, inodes and their blockdevs */
366 sync_supers(); /* Write the superblocks */
367 sync_filesystems(0); /* Start syncing the filesystems */
368 sync_filesystems(wait); /* Waitingly sync the filesystems */
369 sync_inodes(wait); /* Mappings, inodes and blockdevs, again. */
371 printk("Emergency Sync complete\n");
372 if (unlikely(laptop_mode))
373 laptop_sync_completion();
376 asmlinkage long sys_sync(void)
382 void emergency_sync(void)
384 pdflush_operation(do_sync, 0);
388 * Generic function to fsync a file.
390 * filp may be NULL if called via the msync of a vma.
393 int file_fsync(struct file *filp, struct dentry *dentry, int datasync)
395 struct inode * inode = dentry->d_inode;
396 struct super_block * sb;
399 /* sync the inode to buffers */
400 write_inode_now(inode, 0);
402 /* sync the superblock to buffers */
405 if (sb->s_op->write_super)
406 sb->s_op->write_super(sb);
409 /* .. finally sync the buffers to disk */
410 ret = sync_blockdev(sb->s_bdev);
414 asmlinkage long sys_fsync(unsigned int fd)
417 struct address_space *mapping;
425 mapping = file->f_mapping;
428 if (!file->f_op || !file->f_op->fsync) {
429 /* Why? We can still call filemap_fdatawrite */
433 /* We need to protect against concurrent writers.. */
434 down(&mapping->host->i_sem);
435 current->flags |= PF_SYNCWRITE;
436 ret = filemap_fdatawrite(mapping);
437 err = file->f_op->fsync(file, file->f_dentry, 0);
440 err = filemap_fdatawait(mapping);
443 current->flags &= ~PF_SYNCWRITE;
444 up(&mapping->host->i_sem);
452 asmlinkage long sys_fdatasync(unsigned int fd)
455 struct address_space *mapping;
464 if (!file->f_op || !file->f_op->fsync)
467 mapping = file->f_mapping;
469 down(&mapping->host->i_sem);
470 current->flags |= PF_SYNCWRITE;
471 ret = filemap_fdatawrite(mapping);
472 err = file->f_op->fsync(file, file->f_dentry, 1);
475 err = filemap_fdatawait(mapping);
478 current->flags &= ~PF_SYNCWRITE;
479 up(&mapping->host->i_sem);
488 * Various filesystems appear to want __find_get_block to be non-blocking.
489 * But it's the page lock which protects the buffers. To get around this,
490 * we get exclusion from try_to_free_buffers with the blockdev mapping's
493 * Hack idea: for the blockdev mapping, i_bufferlist_lock contention
494 * may be quite high. This code could TryLock the page, and if that
495 * succeeds, there is no need to take private_lock. (But if
496 * private_lock is contended then so is mapping->tree_lock).
498 static struct buffer_head *
499 __find_get_block_slow(struct block_device *bdev, sector_t block, int unused)
501 struct inode *bd_inode = bdev->bd_inode;
502 struct address_space *bd_mapping = bd_inode->i_mapping;
503 struct buffer_head *ret = NULL;
505 struct buffer_head *bh;
506 struct buffer_head *head;
509 index = block >> (PAGE_CACHE_SHIFT - bd_inode->i_blkbits);
510 page = find_get_page(bd_mapping, index);
514 spin_lock(&bd_mapping->private_lock);
515 if (!page_has_buffers(page))
517 head = page_buffers(page);
520 if (bh->b_blocknr == block) {
525 bh = bh->b_this_page;
526 } while (bh != head);
528 printk("__find_get_block_slow() failed. "
529 "block=%llu, b_blocknr=%llu\n",
530 (unsigned long long)block, (unsigned long long)bh->b_blocknr);
531 printk("b_state=0x%08lx, b_size=%u\n", bh->b_state, bh->b_size);
532 printk("device blocksize: %d\n", 1 << bd_inode->i_blkbits);
534 spin_unlock(&bd_mapping->private_lock);
535 page_cache_release(page);
540 /* If invalidate_buffers() will trash dirty buffers, it means some kind
541 of fs corruption is going on. Trashing dirty data always imply losing
542 information that was supposed to be just stored on the physical layer
545 Thus invalidate_buffers in general usage is not allwowed to trash
546 dirty buffers. For example ioctl(FLSBLKBUF) expects dirty data to
547 be preserved. These buffers are simply skipped.
549 We also skip buffers which are still in use. For example this can
550 happen if a userspace program is reading the block device.
552 NOTE: In the case where the user removed a removable-media-disk even if
553 there's still dirty data not synced on disk (due a bug in the device driver
554 or due an error of the user), by not destroying the dirty buffers we could
555 generate corruption also on the next media inserted, thus a parameter is
556 necessary to handle this case in the most safe way possible (trying
557 to not corrupt also the new disk inserted with the data belonging to
558 the old now corrupted disk). Also for the ramdisk the natural thing
559 to do in order to release the ramdisk memory is to destroy dirty buffers.
561 These are two special cases. Normal usage imply the device driver
562 to issue a sync on the device (without waiting I/O completion) and
563 then an invalidate_buffers call that doesn't trash dirty buffers.
565 For handling cache coherency with the blkdev pagecache the 'update' case
566 is been introduced. It is needed to re-read from disk any pinned
567 buffer. NOTE: re-reading from disk is destructive so we can do it only
568 when we assume nobody is changing the buffercache under our I/O and when
569 we think the disk contains more recent information than the buffercache.
570 The update == 1 pass marks the buffers we need to update, the update == 2
571 pass does the actual I/O. */
572 void invalidate_bdev(struct block_device *bdev, int destroy_dirty_buffers)
574 invalidate_bh_lrus();
576 * FIXME: what about destroy_dirty_buffers?
577 * We really want to use invalidate_inode_pages2() for
578 * that, but not until that's cleaned up.
580 invalidate_inode_pages(bdev->bd_inode->i_mapping);
584 * Kick pdflush then try to free up some ZONE_NORMAL memory.
586 static void free_more_memory(void)
591 wakeup_bdflush(1024);
594 for_each_pgdat(pgdat) {
595 zones = pgdat->node_zonelists[GFP_NOFS&GFP_ZONEMASK].zones;
597 try_to_free_pages(zones, GFP_NOFS, 0);
602 * I/O completion handler for block_read_full_page() - pages
603 * which come unlocked at the end of I/O.
605 static void end_buffer_async_read(struct buffer_head *bh, int uptodate)
607 static spinlock_t page_uptodate_lock = SPIN_LOCK_UNLOCKED;
609 struct buffer_head *tmp;
611 int page_uptodate = 1;
613 BUG_ON(!buffer_async_read(bh));
617 set_buffer_uptodate(bh);
619 clear_buffer_uptodate(bh);
625 * Be _very_ careful from here on. Bad things can happen if
626 * two buffer heads end IO at almost the same time and both
627 * decide that the page is now completely done.
629 spin_lock_irqsave(&page_uptodate_lock, flags);
630 clear_buffer_async_read(bh);
634 if (!buffer_uptodate(tmp))
636 if (buffer_async_read(tmp)) {
637 BUG_ON(!buffer_locked(tmp));
640 tmp = tmp->b_this_page;
642 spin_unlock_irqrestore(&page_uptodate_lock, flags);
645 * If none of the buffers had errors and they are all
646 * uptodate then we can set the page uptodate.
648 if (page_uptodate && !PageError(page))
649 SetPageUptodate(page);
654 spin_unlock_irqrestore(&page_uptodate_lock, flags);
659 * Completion handler for block_write_full_page() - pages which are unlocked
660 * during I/O, and which have PageWriteback cleared upon I/O completion.
662 void end_buffer_async_write(struct buffer_head *bh, int uptodate)
664 char b[BDEVNAME_SIZE];
665 static spinlock_t page_uptodate_lock = SPIN_LOCK_UNLOCKED;
667 struct buffer_head *tmp;
670 BUG_ON(!buffer_async_write(bh));
674 set_buffer_uptodate(bh);
676 if (printk_ratelimit()) {
678 printk(KERN_WARNING "lost page write due to "
680 bdevname(bh->b_bdev, b));
682 set_bit(AS_EIO, &page->mapping->flags);
683 clear_buffer_uptodate(bh);
687 spin_lock_irqsave(&page_uptodate_lock, flags);
688 clear_buffer_async_write(bh);
690 tmp = bh->b_this_page;
692 if (buffer_async_write(tmp)) {
693 BUG_ON(!buffer_locked(tmp));
696 tmp = tmp->b_this_page;
698 spin_unlock_irqrestore(&page_uptodate_lock, flags);
699 end_page_writeback(page);
703 spin_unlock_irqrestore(&page_uptodate_lock, flags);
708 * If a page's buffers are under async readin (end_buffer_async_read
709 * completion) then there is a possibility that another thread of
710 * control could lock one of the buffers after it has completed
711 * but while some of the other buffers have not completed. This
712 * locked buffer would confuse end_buffer_async_read() into not unlocking
713 * the page. So the absence of BH_Async_Read tells end_buffer_async_read()
714 * that this buffer is not under async I/O.
716 * The page comes unlocked when it has no locked buffer_async buffers
719 * PageLocked prevents anyone starting new async I/O reads any of
722 * PageWriteback is used to prevent simultaneous writeout of the same
725 * PageLocked prevents anyone from starting writeback of a page which is
726 * under read I/O (PageWriteback is only ever set against a locked page).
728 void mark_buffer_async_read(struct buffer_head *bh)
730 bh->b_end_io = end_buffer_async_read;
731 set_buffer_async_read(bh);
733 EXPORT_SYMBOL(mark_buffer_async_read);
735 void mark_buffer_async_write(struct buffer_head *bh)
737 bh->b_end_io = end_buffer_async_write;
738 set_buffer_async_write(bh);
740 EXPORT_SYMBOL(mark_buffer_async_write);
744 * fs/buffer.c contains helper functions for buffer-backed address space's
745 * fsync functions. A common requirement for buffer-based filesystems is
746 * that certain data from the backing blockdev needs to be written out for
747 * a successful fsync(). For example, ext2 indirect blocks need to be
748 * written back and waited upon before fsync() returns.
750 * The functions mark_buffer_inode_dirty(), fsync_inode_buffers(),
751 * inode_has_buffers() and invalidate_inode_buffers() are provided for the
752 * management of a list of dependent buffers at ->i_mapping->private_list.
754 * Locking is a little subtle: try_to_free_buffers() will remove buffers
755 * from their controlling inode's queue when they are being freed. But
756 * try_to_free_buffers() will be operating against the *blockdev* mapping
757 * at the time, not against the S_ISREG file which depends on those buffers.
758 * So the locking for private_list is via the private_lock in the address_space
759 * which backs the buffers. Which is different from the address_space
760 * against which the buffers are listed. So for a particular address_space,
761 * mapping->private_lock does *not* protect mapping->private_list! In fact,
762 * mapping->private_list will always be protected by the backing blockdev's
765 * Which introduces a requirement: all buffers on an address_space's
766 * ->private_list must be from the same address_space: the blockdev's.
768 * address_spaces which do not place buffers at ->private_list via these
769 * utility functions are free to use private_lock and private_list for
770 * whatever they want. The only requirement is that list_empty(private_list)
771 * be true at clear_inode() time.
773 * FIXME: clear_inode should not call invalidate_inode_buffers(). The
774 * filesystems should do that. invalidate_inode_buffers() should just go
775 * BUG_ON(!list_empty).
777 * FIXME: mark_buffer_dirty_inode() is a data-plane operation. It should
778 * take an address_space, not an inode. And it should be called
779 * mark_buffer_dirty_fsync() to clearly define why those buffers are being
782 * FIXME: mark_buffer_dirty_inode() doesn't need to add the buffer to the
783 * list if it is already on a list. Because if the buffer is on a list,
784 * it *must* already be on the right one. If not, the filesystem is being
785 * silly. This will save a ton of locking. But first we have to ensure
786 * that buffers are taken *off* the old inode's list when they are freed
787 * (presumably in truncate). That requires careful auditing of all
788 * filesystems (do it inside bforget()). It could also be done by bringing
792 void buffer_insert_list(spinlock_t *lock,
793 struct buffer_head *bh, struct list_head *list)
796 list_move_tail(&bh->b_assoc_buffers, list);
801 * The buffer's backing address_space's private_lock must be held
803 static inline void __remove_assoc_queue(struct buffer_head *bh)
805 list_del_init(&bh->b_assoc_buffers);
808 int inode_has_buffers(struct inode *inode)
810 return !list_empty(&inode->i_data.private_list);
814 * osync is designed to support O_SYNC io. It waits synchronously for
815 * all already-submitted IO to complete, but does not queue any new
816 * writes to the disk.
818 * To do O_SYNC writes, just queue the buffer writes with ll_rw_block as
819 * you dirty the buffers, and then use osync_inode_buffers to wait for
820 * completion. Any other dirty buffers which are not yet queued for
821 * write will not be flushed to disk by the osync.
823 static int osync_buffers_list(spinlock_t *lock, struct list_head *list)
825 struct buffer_head *bh;
831 list_for_each_prev(p, list) {
833 if (buffer_locked(bh)) {
837 if (!buffer_uptodate(bh))
849 * sync_mapping_buffers - write out and wait upon a mapping's "associated"
851 * @buffer_mapping - the mapping which backs the buffers' data
852 * @mapping - the mapping which wants those buffers written
854 * Starts I/O against the buffers at mapping->private_list, and waits upon
857 * Basically, this is a convenience function for fsync(). @buffer_mapping is
858 * the blockdev which "owns" the buffers and @mapping is a file or directory
859 * which needs those buffers to be written for a successful fsync().
861 int sync_mapping_buffers(struct address_space *mapping)
863 struct address_space *buffer_mapping = mapping->assoc_mapping;
865 if (buffer_mapping == NULL || list_empty(&mapping->private_list))
868 return fsync_buffers_list(&buffer_mapping->private_lock,
869 &mapping->private_list);
871 EXPORT_SYMBOL(sync_mapping_buffers);
874 * Called when we've recently written block `bblock', and it is known that
875 * `bblock' was for a buffer_boundary() buffer. This means that the block at
876 * `bblock + 1' is probably a dirty indirect block. Hunt it down and, if it's
877 * dirty, schedule it for IO. So that indirects merge nicely with their data.
879 void write_boundary_block(struct block_device *bdev,
880 sector_t bblock, unsigned blocksize)
882 struct buffer_head *bh = __find_get_block(bdev, bblock + 1, blocksize);
884 if (buffer_dirty(bh))
885 ll_rw_block(WRITE, 1, &bh);
890 void mark_buffer_dirty_inode(struct buffer_head *bh, struct inode *inode)
892 struct address_space *mapping = inode->i_mapping;
893 struct address_space *buffer_mapping = bh->b_page->mapping;
895 mark_buffer_dirty(bh);
896 if (!mapping->assoc_mapping) {
897 mapping->assoc_mapping = buffer_mapping;
899 if (mapping->assoc_mapping != buffer_mapping)
902 if (list_empty(&bh->b_assoc_buffers))
903 buffer_insert_list(&buffer_mapping->private_lock,
904 bh, &mapping->private_list);
906 EXPORT_SYMBOL(mark_buffer_dirty_inode);
909 * Add a page to the dirty page list.
911 * It is a sad fact of life that this function is called from several places
912 * deeply under spinlocking. It may not sleep.
914 * If the page has buffers, the uptodate buffers are set dirty, to preserve
915 * dirty-state coherency between the page and the buffers. It the page does
916 * not have buffers then when they are later attached they will all be set
919 * The buffers are dirtied before the page is dirtied. There's a small race
920 * window in which a writepage caller may see the page cleanness but not the
921 * buffer dirtiness. That's fine. If this code were to set the page dirty
922 * before the buffers, a concurrent writepage caller could clear the page dirty
923 * bit, see a bunch of clean buffers and we'd end up with dirty buffers/clean
924 * page on the dirty page list.
926 * We use private_lock to lock against try_to_free_buffers while using the
927 * page's buffer list. Also use this to protect against clean buffers being
928 * added to the page after it was set dirty.
930 * FIXME: may need to call ->reservepage here as well. That's rather up to the
931 * address_space though.
933 int __set_page_dirty_buffers(struct page *page)
935 struct address_space * const mapping = page->mapping;
937 spin_lock(&mapping->private_lock);
938 if (page_has_buffers(page)) {
939 struct buffer_head *head = page_buffers(page);
940 struct buffer_head *bh = head;
943 set_buffer_dirty(bh);
944 bh = bh->b_this_page;
945 } while (bh != head);
947 spin_unlock(&mapping->private_lock);
949 if (!TestSetPageDirty(page)) {
950 spin_lock_irq(&mapping->tree_lock);
951 if (page->mapping) { /* Race with truncate? */
952 if (!mapping->backing_dev_info->memory_backed)
953 inc_page_state(nr_dirty);
954 radix_tree_tag_set(&mapping->page_tree,
956 PAGECACHE_TAG_DIRTY);
958 spin_unlock_irq(&mapping->tree_lock);
959 __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
964 EXPORT_SYMBOL(__set_page_dirty_buffers);
967 * Write out and wait upon a list of buffers.
969 * We have conflicting pressures: we want to make sure that all
970 * initially dirty buffers get waited on, but that any subsequently
971 * dirtied buffers don't. After all, we don't want fsync to last
972 * forever if somebody is actively writing to the file.
974 * Do this in two main stages: first we copy dirty buffers to a
975 * temporary inode list, queueing the writes as we go. Then we clean
976 * up, waiting for those writes to complete.
978 * During this second stage, any subsequent updates to the file may end
979 * up refiling the buffer on the original inode's dirty list again, so
980 * there is a chance we will end up with a buffer queued for write but
981 * not yet completed on that list. So, as a final cleanup we go through
982 * the osync code to catch these locked, dirty buffers without requeuing
983 * any newly dirty buffers for write.
985 int fsync_buffers_list(spinlock_t *lock, struct list_head *list)
987 struct buffer_head *bh;
988 struct list_head tmp;
991 INIT_LIST_HEAD(&tmp);
994 while (!list_empty(list)) {
995 bh = BH_ENTRY(list->next);
996 list_del_init(&bh->b_assoc_buffers);
997 if (buffer_dirty(bh) || buffer_locked(bh)) {
998 list_add(&bh->b_assoc_buffers, &tmp);
999 if (buffer_dirty(bh)) {
1003 * Ensure any pending I/O completes so that
1004 * ll_rw_block() actually writes the current
1005 * contents - it is a noop if I/O is still in
1006 * flight on potentially older contents.
1009 ll_rw_block(WRITE, 1, &bh);
1016 while (!list_empty(&tmp)) {
1017 bh = BH_ENTRY(tmp.prev);
1018 __remove_assoc_queue(bh);
1022 if (!buffer_uptodate(bh))
1029 err2 = osync_buffers_list(lock, list);
1037 * Invalidate any and all dirty buffers on a given inode. We are
1038 * probably unmounting the fs, but that doesn't mean we have already
1039 * done a sync(). Just drop the buffers from the inode list.
1041 * NOTE: we take the inode's blockdev's mapping's private_lock. Which
1042 * assumes that all the buffers are against the blockdev. Not true
1045 void invalidate_inode_buffers(struct inode *inode)
1047 if (inode_has_buffers(inode)) {
1048 struct address_space *mapping = &inode->i_data;
1049 struct list_head *list = &mapping->private_list;
1050 struct address_space *buffer_mapping = mapping->assoc_mapping;
1052 spin_lock(&buffer_mapping->private_lock);
1053 while (!list_empty(list))
1054 __remove_assoc_queue(BH_ENTRY(list->next));
1055 spin_unlock(&buffer_mapping->private_lock);
1060 * Remove any clean buffers from the inode's buffer list. This is called
1061 * when we're trying to free the inode itself. Those buffers can pin it.
1063 * Returns true if all buffers were removed.
1065 int remove_inode_buffers(struct inode *inode)
1069 if (inode_has_buffers(inode)) {
1070 struct address_space *mapping = &inode->i_data;
1071 struct list_head *list = &mapping->private_list;
1072 struct address_space *buffer_mapping = mapping->assoc_mapping;
1074 spin_lock(&buffer_mapping->private_lock);
1075 while (!list_empty(list)) {
1076 struct buffer_head *bh = BH_ENTRY(list->next);
1077 if (buffer_dirty(bh)) {
1081 __remove_assoc_queue(bh);
1083 spin_unlock(&buffer_mapping->private_lock);
1089 * Create the appropriate buffers when given a page for data area and
1090 * the size of each buffer.. Use the bh->b_this_page linked list to
1091 * follow the buffers created. Return NULL if unable to create more
1094 * The retry flag is used to differentiate async IO (paging, swapping)
1095 * which may not fail from ordinary buffer allocations.
1097 static struct buffer_head *
1098 create_buffers(struct page * page, unsigned long size, int retry)
1100 struct buffer_head *bh, *head;
1106 while ((offset -= size) >= 0) {
1107 bh = alloc_buffer_head(GFP_NOFS);
1112 bh->b_this_page = head;
1117 atomic_set(&bh->b_count, 0);
1120 /* Link the buffer to its page */
1121 set_bh_page(bh, page, offset);
1123 bh->b_end_io = NULL;
1127 * In case anything failed, we just free everything we got.
1133 head = head->b_this_page;
1134 free_buffer_head(bh);
1139 * Return failure for non-async IO requests. Async IO requests
1140 * are not allowed to fail, so we have to wait until buffer heads
1141 * become available. But we don't want tasks sleeping with
1142 * partially complete buffers, so all were released above.
1147 /* We're _really_ low on memory. Now we just
1148 * wait for old buffer heads to become free due to
1149 * finishing IO. Since this is an async request and
1150 * the reserve list is empty, we're sure there are
1151 * async buffer heads in use.
1158 link_dev_buffers(struct page *page, struct buffer_head *head)
1160 struct buffer_head *bh, *tail;
1165 bh = bh->b_this_page;
1167 tail->b_this_page = head;
1168 __set_page_buffers(page, head);
1172 * Initialise the state of a blockdev page's buffers.
1175 init_page_buffers(struct page *page, struct block_device *bdev,
1176 sector_t block, int size)
1178 struct buffer_head *head = page_buffers(page);
1179 struct buffer_head *bh = head;
1180 unsigned int b_state;
1182 b_state = 1 << BH_Mapped;
1183 if (PageUptodate(page))
1184 b_state |= 1 << BH_Uptodate;
1187 if (!(bh->b_state & (1 << BH_Mapped))) {
1188 init_buffer(bh, NULL, NULL);
1190 bh->b_blocknr = block;
1191 bh->b_state = b_state;
1194 bh = bh->b_this_page;
1195 } while (bh != head);
1199 * Create the page-cache page that contains the requested block.
1201 * This is user purely for blockdev mappings.
1203 static struct page *
1204 grow_dev_page(struct block_device *bdev, sector_t block,
1205 pgoff_t index, int size)
1207 struct inode *inode = bdev->bd_inode;
1209 struct buffer_head *bh;
1211 page = find_or_create_page(inode->i_mapping, index, GFP_NOFS);
1215 if (!PageLocked(page))
1218 if (page_has_buffers(page)) {
1219 bh = page_buffers(page);
1220 if (bh->b_size == size)
1222 if (!try_to_free_buffers(page))
1227 * Allocate some buffers for this page
1229 bh = create_buffers(page, size, 0);
1234 * Link the page to the buffers and initialise them. Take the
1235 * lock to be atomic wrt __find_get_block(), which does not
1236 * run under the page lock.
1238 spin_lock(&inode->i_mapping->private_lock);
1239 link_dev_buffers(page, bh);
1240 init_page_buffers(page, bdev, block, size);
1241 spin_unlock(&inode->i_mapping->private_lock);
1247 page_cache_release(page);
1252 * Create buffers for the specified block device block's page. If
1253 * that page was dirty, the buffers are set dirty also.
1255 * Except that's a bug. Attaching dirty buffers to a dirty
1256 * blockdev's page can result in filesystem corruption, because
1257 * some of those buffers may be aliases of filesystem data.
1258 * grow_dev_page() will go BUG() if this happens.
1261 grow_buffers(struct block_device *bdev, sector_t block, int size)
1270 } while ((size << sizebits) < PAGE_SIZE);
1272 index = block >> sizebits;
1273 block = index << sizebits;
1275 /* Create a page with the proper size buffers.. */
1276 page = grow_dev_page(bdev, block, index, size);
1280 page_cache_release(page);
1284 struct buffer_head *
1285 __getblk_slow(struct block_device *bdev, sector_t block, int size)
1287 /* Size must be multiple of hard sectorsize */
1288 if (unlikely(size & (bdev_hardsect_size(bdev)-1) ||
1289 (size < 512 || size > PAGE_SIZE))) {
1290 printk(KERN_ERR "getblk(): invalid block size %d requested\n",
1292 printk(KERN_ERR "hardsect size: %d\n",
1293 bdev_hardsect_size(bdev));
1300 struct buffer_head * bh;
1302 bh = __find_get_block(bdev, block, size);
1306 if (!grow_buffers(bdev, block, size))
1312 * The relationship between dirty buffers and dirty pages:
1314 * Whenever a page has any dirty buffers, the page's dirty bit is set, and
1315 * the page is tagged dirty in its radix tree.
1317 * At all times, the dirtiness of the buffers represents the dirtiness of
1318 * subsections of the page. If the page has buffers, the page dirty bit is
1319 * merely a hint about the true dirty state.
1321 * When a page is set dirty in its entirety, all its buffers are marked dirty
1322 * (if the page has buffers).
1324 * When a buffer is marked dirty, its page is dirtied, but the page's other
1327 * Also. When blockdev buffers are explicitly read with bread(), they
1328 * individually become uptodate. But their backing page remains not
1329 * uptodate - even if all of its buffers are uptodate. A subsequent
1330 * block_read_full_page() against that page will discover all the uptodate
1331 * buffers, will set the page uptodate and will perform no I/O.
1335 * mark_buffer_dirty - mark a buffer_head as needing writeout
1337 * mark_buffer_dirty() will set the dirty bit against the buffer, then set its
1338 * backing page dirty, then tag the page as dirty in its address_space's radix
1339 * tree and then attach the address_space's inode to its superblock's dirty
1342 * mark_buffer_dirty() is atomic. It takes bh->b_page->mapping->private_lock,
1343 * mapping->tree_lock and the global inode_lock.
1345 void fastcall mark_buffer_dirty(struct buffer_head *bh)
1347 if (!buffer_dirty(bh) && !test_set_buffer_dirty(bh))
1348 __set_page_dirty_nobuffers(bh->b_page);
1352 * Decrement a buffer_head's reference count. If all buffers against a page
1353 * have zero reference count, are clean and unlocked, and if the page is clean
1354 * and unlocked then try_to_free_buffers() may strip the buffers from the page
1355 * in preparation for freeing it (sometimes, rarely, buffers are removed from
1356 * a page but it ends up not being freed, and buffers may later be reattached).
1358 void __brelse(struct buffer_head * buf)
1360 if (atomic_read(&buf->b_count)) {
1364 printk(KERN_ERR "VFS: brelse: Trying to free free buffer\n");
1369 * bforget() is like brelse(), except it discards any
1370 * potentially dirty data.
1372 void __bforget(struct buffer_head *bh)
1374 clear_buffer_dirty(bh);
1375 if (!list_empty(&bh->b_assoc_buffers)) {
1376 struct address_space *buffer_mapping = bh->b_page->mapping;
1378 spin_lock(&buffer_mapping->private_lock);
1379 list_del_init(&bh->b_assoc_buffers);
1380 spin_unlock(&buffer_mapping->private_lock);
1385 static struct buffer_head *__bread_slow(struct buffer_head *bh)
1388 if (buffer_uptodate(bh)) {
1393 bh->b_end_io = end_buffer_read_sync;
1394 submit_bh(READ, bh);
1396 if (buffer_uptodate(bh))
1404 * Per-cpu buffer LRU implementation. To reduce the cost of __find_get_block().
1405 * The bhs[] array is sorted - newest buffer is at bhs[0]. Buffers have their
1406 * refcount elevated by one when they're in an LRU. A buffer can only appear
1407 * once in a particular CPU's LRU. A single buffer can be present in multiple
1408 * CPU's LRUs at the same time.
1410 * This is a transparent caching front-end to sb_bread(), sb_getblk() and
1411 * sb_find_get_block().
1413 * The LRUs themselves only need locking against invalidate_bh_lrus. We use
1414 * a local interrupt disable for that.
1417 #define BH_LRU_SIZE 8
1420 struct buffer_head *bhs[BH_LRU_SIZE];
1423 static DEFINE_PER_CPU(struct bh_lru, bh_lrus) = {{ NULL }};
1426 #define bh_lru_lock() local_irq_disable()
1427 #define bh_lru_unlock() local_irq_enable()
1429 #define bh_lru_lock() preempt_disable()
1430 #define bh_lru_unlock() preempt_enable()
1433 static inline void check_irqs_on(void)
1435 #ifdef irqs_disabled
1436 BUG_ON(irqs_disabled());
1441 * The LRU management algorithm is dopey-but-simple. Sorry.
1443 static void bh_lru_install(struct buffer_head *bh)
1445 struct buffer_head *evictee = NULL;
1450 lru = &__get_cpu_var(bh_lrus);
1451 if (lru->bhs[0] != bh) {
1452 struct buffer_head *bhs[BH_LRU_SIZE];
1458 for (in = 0; in < BH_LRU_SIZE; in++) {
1459 struct buffer_head *bh2 = lru->bhs[in];
1464 if (out >= BH_LRU_SIZE) {
1465 BUG_ON(evictee != NULL);
1472 while (out < BH_LRU_SIZE)
1474 memcpy(lru->bhs, bhs, sizeof(bhs));
1483 * Look up the bh in this cpu's LRU. If it's there, move it to the head.
1485 static inline struct buffer_head *
1486 lookup_bh_lru(struct block_device *bdev, sector_t block, int size)
1488 struct buffer_head *ret = NULL;
1494 lru = &__get_cpu_var(bh_lrus);
1495 for (i = 0; i < BH_LRU_SIZE; i++) {
1496 struct buffer_head *bh = lru->bhs[i];
1498 if (bh && bh->b_bdev == bdev &&
1499 bh->b_blocknr == block && bh->b_size == size) {
1502 lru->bhs[i] = lru->bhs[i - 1];
1517 * Perform a pagecache lookup for the matching buffer. If it's there, refresh
1518 * it in the LRU and mark it as accessed. If it is not present then return
1521 struct buffer_head *
1522 __find_get_block(struct block_device *bdev, sector_t block, int size)
1524 struct buffer_head *bh = lookup_bh_lru(bdev, block, size);
1527 bh = __find_get_block_slow(bdev, block, size);
1535 EXPORT_SYMBOL(__find_get_block);
1538 * __getblk will locate (and, if necessary, create) the buffer_head
1539 * which corresponds to the passed block_device, block and size. The
1540 * returned buffer has its reference count incremented.
1542 * __getblk() cannot fail - it just keeps trying. If you pass it an
1543 * illegal block number, __getblk() will happily return a buffer_head
1544 * which represents the non-existent block. Very weird.
1546 * __getblk() will lock up the machine if grow_dev_page's try_to_free_buffers()
1547 * attempt is failing. FIXME, perhaps?
1549 struct buffer_head *
1550 __getblk(struct block_device *bdev, sector_t block, int size)
1552 struct buffer_head *bh = __find_get_block(bdev, block, size);
1555 bh = __getblk_slow(bdev, block, size);
1558 EXPORT_SYMBOL(__getblk);
1561 * Do async read-ahead on a buffer..
1563 void __breadahead(struct block_device *bdev, sector_t block, int size)
1565 struct buffer_head *bh = __getblk(bdev, block, size);
1566 ll_rw_block(READA, 1, &bh);
1569 EXPORT_SYMBOL(__breadahead);
1572 * __bread() - reads a specified block and returns the bh
1573 * @block: number of block
1574 * @size: size (in bytes) to read
1576 * Reads a specified block, and returns buffer head that contains it.
1577 * It returns NULL if the block was unreadable.
1579 struct buffer_head *
1580 __bread(struct block_device *bdev, sector_t block, int size)
1582 struct buffer_head *bh = __getblk(bdev, block, size);
1584 if (!buffer_uptodate(bh))
1585 bh = __bread_slow(bh);
1588 EXPORT_SYMBOL(__bread);
1591 * invalidate_bh_lrus() is called rarely - but not only at unmount.
1592 * This doesn't race because it runs in each cpu either in irq
1593 * or with preempt disabled.
1595 static void invalidate_bh_lru(void *arg)
1597 struct bh_lru *b = &get_cpu_var(bh_lrus);
1600 for (i = 0; i < BH_LRU_SIZE; i++) {
1604 put_cpu_var(bh_lrus);
1607 static void invalidate_bh_lrus(void)
1609 on_each_cpu(invalidate_bh_lru, NULL, 1, 1);
1612 void set_bh_page(struct buffer_head *bh,
1613 struct page *page, unsigned long offset)
1616 if (offset >= PAGE_SIZE)
1618 if (PageHighMem(page))
1620 * This catches illegal uses and preserves the offset:
1622 bh->b_data = (char *)(0 + offset);
1624 bh->b_data = page_address(page) + offset;
1626 EXPORT_SYMBOL(set_bh_page);
1629 * Called when truncating a buffer on a page completely.
1631 static inline void discard_buffer(struct buffer_head * bh)
1634 clear_buffer_dirty(bh);
1636 clear_buffer_mapped(bh);
1637 clear_buffer_req(bh);
1638 clear_buffer_new(bh);
1639 clear_buffer_delay(bh);
1644 * try_to_release_page() - release old fs-specific metadata on a page
1646 * @page: the page which the kernel is trying to free
1647 * @gfp_mask: memory allocation flags (and I/O mode)
1649 * The address_space is to try to release any data against the page
1650 * (presumably at page->private). If the release was successful, return `1'.
1651 * Otherwise return zero.
1653 * The @gfp_mask argument specifies whether I/O may be performed to release
1654 * this page (__GFP_IO), and whether the call may block (__GFP_WAIT).
1656 * NOTE: @gfp_mask may go away, and this function may become non-blocking.
1658 int try_to_release_page(struct page *page, int gfp_mask)
1660 struct address_space * const mapping = page->mapping;
1662 BUG_ON(!PageLocked(page));
1663 if (PageWriteback(page))
1666 if (mapping && mapping->a_ops->releasepage)
1667 return mapping->a_ops->releasepage(page, gfp_mask);
1668 return try_to_free_buffers(page);
1670 EXPORT_SYMBOL(try_to_release_page);
1673 * block_invalidatepage - invalidate part of all of a buffer-backed page
1675 * @page: the page which is affected
1676 * @offset: the index of the truncation point
1678 * block_invalidatepage() is called when all or part of the page has become
1679 * invalidatedby a truncate operation.
1681 * block_invalidatepage() does not have to release all buffers, but it must
1682 * ensure that no dirty buffer is left outside @offset and that no I/O
1683 * is underway against any of the blocks which are outside the truncation
1684 * point. Because the caller is about to free (and possibly reuse) those
1687 int block_invalidatepage(struct page *page, unsigned long offset)
1689 struct buffer_head *head, *bh, *next;
1690 unsigned int curr_off = 0;
1693 BUG_ON(!PageLocked(page));
1694 if (!page_has_buffers(page))
1697 head = page_buffers(page);
1700 unsigned int next_off = curr_off + bh->b_size;
1701 next = bh->b_this_page;
1704 * is this block fully invalidated?
1706 if (offset <= curr_off)
1708 curr_off = next_off;
1710 } while (bh != head);
1713 * We release buffers only if the entire page is being invalidated.
1714 * The get_block cached value has been unconditionally invalidated,
1715 * so real IO is not possible anymore.
1718 ret = try_to_release_page(page, 0);
1722 EXPORT_SYMBOL(block_invalidatepage);
1725 * We attach and possibly dirty the buffers atomically wrt
1726 * __set_page_dirty_buffers() via private_lock. try_to_free_buffers
1727 * is already excluded via the page lock.
1729 void create_empty_buffers(struct page *page,
1730 unsigned long blocksize, unsigned long b_state)
1732 struct buffer_head *bh, *head, *tail;
1734 head = create_buffers(page, blocksize, 1);
1737 bh->b_state |= b_state;
1739 bh = bh->b_this_page;
1741 tail->b_this_page = head;
1743 spin_lock(&page->mapping->private_lock);
1744 if (PageUptodate(page) || PageDirty(page)) {
1747 if (PageDirty(page))
1748 set_buffer_dirty(bh);
1749 if (PageUptodate(page))
1750 set_buffer_uptodate(bh);
1751 bh = bh->b_this_page;
1752 } while (bh != head);
1754 __set_page_buffers(page, head);
1755 spin_unlock(&page->mapping->private_lock);
1757 EXPORT_SYMBOL(create_empty_buffers);
1760 * We are taking a block for data and we don't want any output from any
1761 * buffer-cache aliases starting from return from that function and
1762 * until the moment when something will explicitly mark the buffer
1763 * dirty (hopefully that will not happen until we will free that block ;-)
1764 * We don't even need to mark it not-uptodate - nobody can expect
1765 * anything from a newly allocated buffer anyway. We used to used
1766 * unmap_buffer() for such invalidation, but that was wrong. We definitely
1767 * don't want to mark the alias unmapped, for example - it would confuse
1768 * anyone who might pick it with bread() afterwards...
1770 * Also.. Note that bforget() doesn't lock the buffer. So there can
1771 * be writeout I/O going on against recently-freed buffers. We don't
1772 * wait on that I/O in bforget() - it's more efficient to wait on the I/O
1773 * only if we really need to. That happens here.
1775 void unmap_underlying_metadata(struct block_device *bdev, sector_t block)
1777 struct buffer_head *old_bh;
1779 old_bh = __find_get_block_slow(bdev, block, 0);
1781 clear_buffer_dirty(old_bh);
1782 wait_on_buffer(old_bh);
1783 clear_buffer_req(old_bh);
1787 EXPORT_SYMBOL(unmap_underlying_metadata);
1790 * NOTE! All mapped/uptodate combinations are valid:
1792 * Mapped Uptodate Meaning
1794 * No No "unknown" - must do get_block()
1795 * No Yes "hole" - zero-filled
1796 * Yes No "allocated" - allocated on disk, not read in
1797 * Yes Yes "valid" - allocated and up-to-date in memory.
1799 * "Dirty" is valid only with the last case (mapped+uptodate).
1803 * While block_write_full_page is writing back the dirty buffers under
1804 * the page lock, whoever dirtied the buffers may decide to clean them
1805 * again at any time. We handle that by only looking at the buffer
1806 * state inside lock_buffer().
1808 * If block_write_full_page() is called for regular writeback
1809 * (wbc->sync_mode == WB_SYNC_NONE) then it will redirty a page which has a
1810 * locked buffer. This only can happen if someone has written the buffer
1811 * directly, with submit_bh(). At the address_space level PageWriteback
1812 * prevents this contention from occurring.
1814 static int __block_write_full_page(struct inode *inode, struct page *page,
1815 get_block_t *get_block, struct writeback_control *wbc)
1819 sector_t last_block;
1820 struct buffer_head *bh, *head;
1821 int nr_underway = 0;
1823 BUG_ON(!PageLocked(page));
1825 last_block = (i_size_read(inode) - 1) >> inode->i_blkbits;
1827 if (!page_has_buffers(page)) {
1828 create_empty_buffers(page, 1 << inode->i_blkbits,
1829 (1 << BH_Dirty)|(1 << BH_Uptodate));
1833 * Be very careful. We have no exclusion from __set_page_dirty_buffers
1834 * here, and the (potentially unmapped) buffers may become dirty at
1835 * any time. If a buffer becomes dirty here after we've inspected it
1836 * then we just miss that fact, and the page stays dirty.
1838 * Buffers outside i_size may be dirtied by __set_page_dirty_buffers;
1839 * handle that here by just cleaning them.
1842 block = page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
1843 head = page_buffers(page);
1847 * Get all the dirty buffers mapped to disk addresses and
1848 * handle any aliases from the underlying blockdev's mapping.
1851 if (block > last_block) {
1853 * mapped buffers outside i_size will occur, because
1854 * this page can be outside i_size when there is a
1855 * truncate in progress.
1858 * The buffer was zeroed by block_write_full_page()
1860 clear_buffer_dirty(bh);
1861 set_buffer_uptodate(bh);
1862 } else if (!buffer_mapped(bh) && buffer_dirty(bh)) {
1863 err = get_block(inode, block, bh, 1);
1866 if (buffer_new(bh)) {
1867 /* blockdev mappings never come here */
1868 clear_buffer_new(bh);
1869 unmap_underlying_metadata(bh->b_bdev,
1873 bh = bh->b_this_page;
1875 } while (bh != head);
1879 if (!buffer_mapped(bh))
1882 * If it's a fully non-blocking write attempt and we cannot
1883 * lock the buffer then redirty the page. Note that this can
1884 * potentially cause a busy-wait loop from pdflush and kswapd
1885 * activity, but those code paths have their own higher-level
1888 if (wbc->sync_mode != WB_SYNC_NONE || !wbc->nonblocking) {
1890 } else if (test_set_buffer_locked(bh)) {
1891 redirty_page_for_writepage(wbc, page);
1894 if (test_clear_buffer_dirty(bh)) {
1895 mark_buffer_async_write(bh);
1899 } while ((bh = bh->b_this_page) != head);
1902 * The page and its buffers are protected by PageWriteback(), so we can
1903 * drop the bh refcounts early.
1905 BUG_ON(PageWriteback(page));
1906 set_page_writeback(page);
1910 struct buffer_head *next = bh->b_this_page;
1911 if (buffer_async_write(bh)) {
1912 submit_bh(WRITE, bh);
1917 } while (bh != head);
1921 if (nr_underway == 0) {
1923 * The page was marked dirty, but the buffers were
1924 * clean. Someone wrote them back by hand with
1925 * ll_rw_block/submit_bh. A rare case.
1929 if (!buffer_uptodate(bh)) {
1933 bh = bh->b_this_page;
1934 } while (bh != head);
1936 SetPageUptodate(page);
1937 end_page_writeback(page);
1939 * The page and buffer_heads can be released at any time from
1942 wbc->pages_skipped++; /* We didn't write this page */
1948 * ENOSPC, or some other error. We may already have added some
1949 * blocks to the file, so we need to write these out to avoid
1950 * exposing stale data.
1951 * The page is currently locked and not marked for writeback
1954 /* Recovery: lock and submit the mapped buffers */
1957 if (buffer_mapped(bh) && buffer_dirty(bh)) {
1959 mark_buffer_async_write(bh);
1962 * The buffer may have been set dirty during
1963 * attachment to a dirty page.
1965 clear_buffer_dirty(bh);
1967 } while ((bh = bh->b_this_page) != head);
1969 BUG_ON(PageWriteback(page));
1970 set_page_writeback(page);
1973 struct buffer_head *next = bh->b_this_page;
1974 if (buffer_async_write(bh)) {
1975 clear_buffer_dirty(bh);
1976 submit_bh(WRITE, bh);
1981 } while (bh != head);
1985 static int __block_prepare_write(struct inode *inode, struct page *page,
1986 unsigned from, unsigned to, get_block_t *get_block)
1988 unsigned block_start, block_end;
1991 unsigned blocksize, bbits;
1992 struct buffer_head *bh, *head, *wait[2], **wait_bh=wait;
1994 BUG_ON(!PageLocked(page));
1995 BUG_ON(from > PAGE_CACHE_SIZE);
1996 BUG_ON(to > PAGE_CACHE_SIZE);
1999 blocksize = 1 << inode->i_blkbits;
2000 if (!page_has_buffers(page))
2001 create_empty_buffers(page, blocksize, 0);
2002 head = page_buffers(page);
2004 bbits = inode->i_blkbits;
2005 block = (sector_t)page->index << (PAGE_CACHE_SHIFT - bbits);
2007 for(bh = head, block_start = 0; bh != head || !block_start;
2008 block++, block_start=block_end, bh = bh->b_this_page) {
2009 block_end = block_start + blocksize;
2010 if (block_end <= from || block_start >= to) {
2011 if (PageUptodate(page)) {
2012 if (!buffer_uptodate(bh))
2013 set_buffer_uptodate(bh);
2018 clear_buffer_new(bh);
2019 if (!buffer_mapped(bh)) {
2020 err = get_block(inode, block, bh, 1);
2023 if (buffer_new(bh)) {
2024 clear_buffer_new(bh);
2025 unmap_underlying_metadata(bh->b_bdev,
2027 if (PageUptodate(page)) {
2028 set_buffer_uptodate(bh);
2031 if (block_end > to || block_start < from) {
2034 kaddr = kmap_atomic(page, KM_USER0);
2038 if (block_start < from)
2039 memset(kaddr+block_start,
2040 0, from-block_start);
2041 flush_dcache_page(page);
2042 kunmap_atomic(kaddr, KM_USER0);
2047 if (PageUptodate(page)) {
2048 if (!buffer_uptodate(bh))
2049 set_buffer_uptodate(bh);
2052 if (!buffer_uptodate(bh) && !buffer_delay(bh) &&
2053 (block_start < from || block_end > to)) {
2054 ll_rw_block(READ, 1, &bh);
2059 * If we issued read requests - let them complete.
2061 while(wait_bh > wait) {
2062 wait_on_buffer(*--wait_bh);
2063 if (!buffer_uptodate(*wait_bh))
2069 * Zero out any newly allocated blocks to avoid exposing stale
2070 * data. If BH_New is set, we know that the block was newly
2071 * allocated in the above loop.
2076 block_end = block_start+blocksize;
2077 if (block_end <= from)
2079 if (block_start >= to)
2081 if (buffer_new(bh)) {
2084 clear_buffer_new(bh);
2085 kaddr = kmap_atomic(page, KM_USER0);
2086 memset(kaddr+block_start, 0, bh->b_size);
2087 kunmap_atomic(kaddr, KM_USER0);
2088 set_buffer_uptodate(bh);
2089 mark_buffer_dirty(bh);
2092 block_start = block_end;
2093 bh = bh->b_this_page;
2094 } while (bh != head);
2098 static int __block_commit_write(struct inode *inode, struct page *page,
2099 unsigned from, unsigned to)
2101 unsigned block_start, block_end;
2104 struct buffer_head *bh, *head;
2106 blocksize = 1 << inode->i_blkbits;
2108 for(bh = head = page_buffers(page), block_start = 0;
2109 bh != head || !block_start;
2110 block_start=block_end, bh = bh->b_this_page) {
2111 block_end = block_start + blocksize;
2112 if (block_end <= from || block_start >= to) {
2113 if (!buffer_uptodate(bh))
2116 set_buffer_uptodate(bh);
2117 mark_buffer_dirty(bh);
2122 * If this is a partial write which happened to make all buffers
2123 * uptodate then we can optimize away a bogus readpage() for
2124 * the next read(). Here we 'discover' whether the page went
2125 * uptodate as a result of this (potentially partial) write.
2128 SetPageUptodate(page);
2133 * Generic "read page" function for block devices that have the normal
2134 * get_block functionality. This is most of the block device filesystems.
2135 * Reads the page asynchronously --- the unlock_buffer() and
2136 * set/clear_buffer_uptodate() functions propagate buffer state into the
2137 * page struct once IO has completed.
2139 int block_read_full_page(struct page *page, get_block_t *get_block)
2141 struct inode *inode = page->mapping->host;
2142 sector_t iblock, lblock;
2143 struct buffer_head *bh, *head, *arr[MAX_BUF_PER_PAGE];
2144 unsigned int blocksize;
2146 int fully_mapped = 1;
2148 if (!PageLocked(page))
2150 blocksize = 1 << inode->i_blkbits;
2151 if (!page_has_buffers(page))
2152 create_empty_buffers(page, blocksize, 0);
2153 head = page_buffers(page);
2155 iblock = (sector_t)page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
2156 lblock = (i_size_read(inode)+blocksize-1) >> inode->i_blkbits;
2162 if (buffer_uptodate(bh))
2165 if (!buffer_mapped(bh)) {
2167 if (iblock < lblock) {
2168 if (get_block(inode, iblock, bh, 0))
2171 if (!buffer_mapped(bh)) {
2172 void *kaddr = kmap_atomic(page, KM_USER0);
2173 memset(kaddr + i * blocksize, 0, blocksize);
2174 flush_dcache_page(page);
2175 kunmap_atomic(kaddr, KM_USER0);
2176 set_buffer_uptodate(bh);
2180 * get_block() might have updated the buffer
2183 if (buffer_uptodate(bh))
2187 } while (i++, iblock++, (bh = bh->b_this_page) != head);
2190 SetPageMappedToDisk(page);
2194 * All buffers are uptodate - we can set the page uptodate
2195 * as well. But not if get_block() returned an error.
2197 if (!PageError(page))
2198 SetPageUptodate(page);
2203 /* Stage two: lock the buffers */
2204 for (i = 0; i < nr; i++) {
2207 mark_buffer_async_read(bh);
2211 * Stage 3: start the IO. Check for uptodateness
2212 * inside the buffer lock in case another process reading
2213 * the underlying blockdev brought it uptodate (the sct fix).
2215 for (i = 0; i < nr; i++) {
2217 if (buffer_uptodate(bh))
2218 end_buffer_async_read(bh, 1);
2220 submit_bh(READ, bh);
2225 /* utility function for filesystems that need to do work on expanding
2226 * truncates. Uses prepare/commit_write to allow the filesystem to
2227 * deal with the hole.
2229 int generic_cont_expand(struct inode *inode, loff_t size)
2231 struct address_space *mapping = inode->i_mapping;
2233 unsigned long index, offset, limit;
2237 limit = current->rlim[RLIMIT_FSIZE].rlim_cur;
2238 if (limit != RLIM_INFINITY && size > (loff_t)limit) {
2239 send_sig(SIGXFSZ, current, 0);
2242 if (size > inode->i_sb->s_maxbytes)
2245 offset = (size & (PAGE_CACHE_SIZE-1)); /* Within page */
2247 /* ugh. in prepare/commit_write, if from==to==start of block, we
2248 ** skip the prepare. make sure we never send an offset for the start
2251 if ((offset & (inode->i_sb->s_blocksize - 1)) == 0) {
2254 index = size >> PAGE_CACHE_SHIFT;
2256 page = grab_cache_page(mapping, index);
2259 err = mapping->a_ops->prepare_write(NULL, page, offset, offset);
2261 err = mapping->a_ops->commit_write(NULL, page, offset, offset);
2264 page_cache_release(page);
2272 * For moronic filesystems that do not allow holes in file.
2273 * We may have to extend the file.
2276 int cont_prepare_write(struct page *page, unsigned offset,
2277 unsigned to, get_block_t *get_block, loff_t *bytes)
2279 struct address_space *mapping = page->mapping;
2280 struct inode *inode = mapping->host;
2281 struct page *new_page;
2285 unsigned blocksize = 1 << inode->i_blkbits;
2288 while(page->index > (pgpos = *bytes>>PAGE_CACHE_SHIFT)) {
2290 new_page = grab_cache_page(mapping, pgpos);
2293 /* we might sleep */
2294 if (*bytes>>PAGE_CACHE_SHIFT != pgpos) {
2295 unlock_page(new_page);
2296 page_cache_release(new_page);
2299 zerofrom = *bytes & ~PAGE_CACHE_MASK;
2300 if (zerofrom & (blocksize-1)) {
2301 *bytes |= (blocksize-1);
2304 status = __block_prepare_write(inode, new_page, zerofrom,
2305 PAGE_CACHE_SIZE, get_block);
2308 kaddr = kmap_atomic(new_page, KM_USER0);
2309 memset(kaddr+zerofrom, 0, PAGE_CACHE_SIZE-zerofrom);
2310 flush_dcache_page(new_page);
2311 kunmap_atomic(kaddr, KM_USER0);
2312 __block_commit_write(inode, new_page,
2313 zerofrom, PAGE_CACHE_SIZE);
2314 unlock_page(new_page);
2315 page_cache_release(new_page);
2318 if (page->index < pgpos) {
2319 /* completely inside the area */
2322 /* page covers the boundary, find the boundary offset */
2323 zerofrom = *bytes & ~PAGE_CACHE_MASK;
2325 /* if we will expand the thing last block will be filled */
2326 if (to > zerofrom && (zerofrom & (blocksize-1))) {
2327 *bytes |= (blocksize-1);
2331 /* starting below the boundary? Nothing to zero out */
2332 if (offset <= zerofrom)
2335 status = __block_prepare_write(inode, page, zerofrom, to, get_block);
2338 if (zerofrom < offset) {
2339 kaddr = kmap_atomic(page, KM_USER0);
2340 memset(kaddr+zerofrom, 0, offset-zerofrom);
2341 flush_dcache_page(page);
2342 kunmap_atomic(kaddr, KM_USER0);
2343 __block_commit_write(inode, page, zerofrom, offset);
2347 ClearPageUptodate(page);
2351 ClearPageUptodate(new_page);
2352 unlock_page(new_page);
2353 page_cache_release(new_page);
2358 int block_prepare_write(struct page *page, unsigned from, unsigned to,
2359 get_block_t *get_block)
2361 struct inode *inode = page->mapping->host;
2362 int err = __block_prepare_write(inode, page, from, to, get_block);
2364 ClearPageUptodate(page);
2368 int block_commit_write(struct page *page, unsigned from, unsigned to)
2370 struct inode *inode = page->mapping->host;
2371 __block_commit_write(inode,page,from,to);
2375 int generic_commit_write(struct file *file, struct page *page,
2376 unsigned from, unsigned to)
2378 struct inode *inode = page->mapping->host;
2379 loff_t pos = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to;
2380 __block_commit_write(inode,page,from,to);
2382 * No need to use i_size_read() here, the i_size
2383 * cannot change under us because we hold i_sem.
2385 if (pos > inode->i_size) {
2386 i_size_write(inode, pos);
2387 mark_inode_dirty(inode);
2394 * nobh_prepare_write()'s prereads are special: the buffer_heads are freed
2395 * immediately, while under the page lock. So it needs a special end_io
2396 * handler which does not touch the bh after unlocking it.
2398 * Note: unlock_buffer() sort-of does touch the bh after unlocking it, but
2399 * a race there is benign: unlock_buffer() only use the bh's address for
2400 * hashing after unlocking the buffer, so it doesn't actually touch the bh
2403 static void end_buffer_read_nobh(struct buffer_head *bh, int uptodate)
2406 set_buffer_uptodate(bh);
2408 /* This happens, due to failed READA attempts. */
2409 clear_buffer_uptodate(bh);
2415 * On entry, the page is fully not uptodate.
2416 * On exit the page is fully uptodate in the areas outside (from,to)
2418 int nobh_prepare_write(struct page *page, unsigned from, unsigned to,
2419 get_block_t *get_block)
2421 struct inode *inode = page->mapping->host;
2422 const unsigned blkbits = inode->i_blkbits;
2423 const unsigned blocksize = 1 << blkbits;
2424 struct buffer_head map_bh;
2425 struct buffer_head *read_bh[MAX_BUF_PER_PAGE];
2426 unsigned block_in_page;
2427 unsigned block_start;
2428 sector_t block_in_file;
2433 int is_mapped_to_disk = 1;
2436 if (PageMappedToDisk(page))
2439 block_in_file = (sector_t)page->index << (PAGE_CACHE_SHIFT - blkbits);
2440 map_bh.b_page = page;
2443 * We loop across all blocks in the page, whether or not they are
2444 * part of the affected region. This is so we can discover if the
2445 * page is fully mapped-to-disk.
2447 for (block_start = 0, block_in_page = 0;
2448 block_start < PAGE_CACHE_SIZE;
2449 block_in_page++, block_start += blocksize) {
2450 unsigned block_end = block_start + blocksize;
2455 if (block_start >= to)
2457 ret = get_block(inode, block_in_file + block_in_page,
2461 if (!buffer_mapped(&map_bh))
2462 is_mapped_to_disk = 0;
2463 if (buffer_new(&map_bh))
2464 unmap_underlying_metadata(map_bh.b_bdev,
2466 if (PageUptodate(page))
2468 if (buffer_new(&map_bh) || !buffer_mapped(&map_bh)) {
2469 kaddr = kmap_atomic(page, KM_USER0);
2470 if (block_start < from) {
2471 memset(kaddr+block_start, 0, from-block_start);
2474 if (block_end > to) {
2475 memset(kaddr + to, 0, block_end - to);
2478 flush_dcache_page(page);
2479 kunmap_atomic(kaddr, KM_USER0);
2482 if (buffer_uptodate(&map_bh))
2483 continue; /* reiserfs does this */
2484 if (block_start < from || block_end > to) {
2485 struct buffer_head *bh = alloc_buffer_head(GFP_NOFS);
2491 bh->b_state = map_bh.b_state;
2492 atomic_set(&bh->b_count, 0);
2493 bh->b_this_page = NULL;
2495 bh->b_blocknr = map_bh.b_blocknr;
2496 bh->b_size = blocksize;
2497 bh->b_data = (char *)(long)block_start;
2498 bh->b_bdev = map_bh.b_bdev;
2499 bh->b_private = NULL;
2500 read_bh[nr_reads++] = bh;
2505 struct buffer_head *bh;
2508 * The page is locked, so these buffers are protected from
2509 * any VM or truncate activity. Hence we don't need to care
2510 * for the buffer_head refcounts.
2512 for (i = 0; i < nr_reads; i++) {
2515 bh->b_end_io = end_buffer_read_nobh;
2516 submit_bh(READ, bh);
2518 for (i = 0; i < nr_reads; i++) {
2521 if (!buffer_uptodate(bh))
2523 free_buffer_head(bh);
2530 if (is_mapped_to_disk)
2531 SetPageMappedToDisk(page);
2532 SetPageUptodate(page);
2535 * Setting the page dirty here isn't necessary for the prepare_write
2536 * function - commit_write will do that. But if/when this function is
2537 * used within the pagefault handler to ensure that all mmapped pages
2538 * have backing space in the filesystem, we will need to dirty the page
2539 * if its contents were altered.
2542 set_page_dirty(page);
2547 for (i = 0; i < nr_reads; i++) {
2549 free_buffer_head(read_bh[i]);
2553 * Error recovery is pretty slack. Clear the page and mark it dirty
2554 * so we'll later zero out any blocks which _were_ allocated.
2556 kaddr = kmap_atomic(page, KM_USER0);
2557 memset(kaddr, 0, PAGE_CACHE_SIZE);
2558 kunmap_atomic(kaddr, KM_USER0);
2559 SetPageUptodate(page);
2560 set_page_dirty(page);
2563 EXPORT_SYMBOL(nobh_prepare_write);
2565 int nobh_commit_write(struct file *file, struct page *page,
2566 unsigned from, unsigned to)
2568 struct inode *inode = page->mapping->host;
2569 loff_t pos = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to;
2571 set_page_dirty(page);
2572 if (pos > inode->i_size) {
2573 i_size_write(inode, pos);
2574 mark_inode_dirty(inode);
2578 EXPORT_SYMBOL(nobh_commit_write);
2581 * This function assumes that ->prepare_write() uses nobh_prepare_write().
2583 int nobh_truncate_page(struct address_space *mapping, loff_t from)
2585 struct inode *inode = mapping->host;
2586 unsigned blocksize = 1 << inode->i_blkbits;
2587 pgoff_t index = from >> PAGE_CACHE_SHIFT;
2588 unsigned offset = from & (PAGE_CACHE_SIZE-1);
2591 struct address_space_operations *a_ops = mapping->a_ops;
2595 if ((offset & (blocksize - 1)) == 0)
2599 page = grab_cache_page(mapping, index);
2603 to = (offset + blocksize) & ~(blocksize - 1);
2604 ret = a_ops->prepare_write(NULL, page, offset, to);
2606 kaddr = kmap_atomic(page, KM_USER0);
2607 memset(kaddr + offset, 0, PAGE_CACHE_SIZE - offset);
2608 flush_dcache_page(page);
2609 kunmap_atomic(kaddr, KM_USER0);
2610 set_page_dirty(page);
2613 page_cache_release(page);
2617 EXPORT_SYMBOL(nobh_truncate_page);
2619 int block_truncate_page(struct address_space *mapping,
2620 loff_t from, get_block_t *get_block)
2622 pgoff_t index = from >> PAGE_CACHE_SHIFT;
2623 unsigned offset = from & (PAGE_CACHE_SIZE-1);
2626 unsigned length, pos;
2627 struct inode *inode = mapping->host;
2629 struct buffer_head *bh;
2633 blocksize = 1 << inode->i_blkbits;
2634 length = offset & (blocksize - 1);
2636 /* Block boundary? Nothing to do */
2640 length = blocksize - length;
2641 iblock = index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
2643 page = grab_cache_page(mapping, index);
2648 if (!page_has_buffers(page))
2649 create_empty_buffers(page, blocksize, 0);
2651 /* Find the buffer that contains "offset" */
2652 bh = page_buffers(page);
2654 while (offset >= pos) {
2655 bh = bh->b_this_page;
2661 if (!buffer_mapped(bh)) {
2662 err = get_block(inode, iblock, bh, 0);
2665 /* unmapped? It's a hole - nothing to do */
2666 if (!buffer_mapped(bh))
2670 /* Ok, it's mapped. Make sure it's up-to-date */
2671 if (PageUptodate(page))
2672 set_buffer_uptodate(bh);
2674 if (!buffer_uptodate(bh) && !buffer_delay(bh)) {
2676 ll_rw_block(READ, 1, &bh);
2678 /* Uhhuh. Read error. Complain and punt. */
2679 if (!buffer_uptodate(bh))
2683 kaddr = kmap_atomic(page, KM_USER0);
2684 memset(kaddr + offset, 0, length);
2685 flush_dcache_page(page);
2686 kunmap_atomic(kaddr, KM_USER0);
2688 mark_buffer_dirty(bh);
2693 page_cache_release(page);
2699 * The generic ->writepage function for buffer-backed address_spaces
2701 int block_write_full_page(struct page *page, get_block_t *get_block,
2702 struct writeback_control *wbc)
2704 struct inode * const inode = page->mapping->host;
2705 loff_t i_size = i_size_read(inode);
2706 const pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
2710 /* Is the page fully inside i_size? */
2711 if (page->index < end_index)
2712 return __block_write_full_page(inode, page, get_block, wbc);
2714 /* Is the page fully outside i_size? (truncate in progress) */
2715 offset = i_size & (PAGE_CACHE_SIZE-1);
2716 if (page->index >= end_index+1 || !offset) {
2718 * The page may have dirty, unmapped buffers. For example,
2719 * they may have been added in ext3_writepage(). Make them
2720 * freeable here, so the page does not leak.
2722 block_invalidatepage(page, 0);
2724 return 0; /* don't care */
2728 * The page straddles i_size. It must be zeroed out on each and every
2729 * writepage invokation because it may be mmapped. "A file is mapped
2730 * in multiples of the page size. For a file that is not a multiple of
2731 * the page size, the remaining memory is zeroed when mapped, and
2732 * writes to that region are not written out to the file."
2734 kaddr = kmap_atomic(page, KM_USER0);
2735 memset(kaddr + offset, 0, PAGE_CACHE_SIZE - offset);
2736 flush_dcache_page(page);
2737 kunmap_atomic(kaddr, KM_USER0);
2738 return __block_write_full_page(inode, page, get_block, wbc);
2741 sector_t generic_block_bmap(struct address_space *mapping, sector_t block,
2742 get_block_t *get_block)
2744 struct buffer_head tmp;
2745 struct inode *inode = mapping->host;
2748 get_block(inode, block, &tmp, 0);
2749 return tmp.b_blocknr;
2752 static int end_bio_bh_io_sync(struct bio *bio, unsigned int bytes_done, int err)
2754 struct buffer_head *bh = bio->bi_private;
2759 bh->b_end_io(bh, test_bit(BIO_UPTODATE, &bio->bi_flags));
2764 void submit_bh(int rw, struct buffer_head * bh)
2768 BUG_ON(!buffer_locked(bh));
2769 BUG_ON(!buffer_mapped(bh));
2770 BUG_ON(!bh->b_end_io);
2772 /* Only clear out a write error when rewriting */
2773 if (test_set_buffer_req(bh) && rw == WRITE)
2774 clear_buffer_write_io_error(bh);
2777 * from here on down, it's all bio -- do the initial mapping,
2778 * submit_bio -> generic_make_request may further map this bio around
2780 bio = bio_alloc(GFP_NOIO, 1);
2782 bio->bi_sector = bh->b_blocknr * (bh->b_size >> 9);
2783 bio->bi_bdev = bh->b_bdev;
2784 bio->bi_io_vec[0].bv_page = bh->b_page;
2785 bio->bi_io_vec[0].bv_len = bh->b_size;
2786 bio->bi_io_vec[0].bv_offset = bh_offset(bh);
2790 bio->bi_size = bh->b_size;
2792 bio->bi_end_io = end_bio_bh_io_sync;
2793 bio->bi_private = bh;
2795 submit_bio(rw, bio);
2799 * ll_rw_block: low-level access to block devices (DEPRECATED)
2800 * @rw: whether to %READ or %WRITE or maybe %READA (readahead)
2801 * @nr: number of &struct buffer_heads in the array
2802 * @bhs: array of pointers to &struct buffer_head
2804 * ll_rw_block() takes an array of pointers to &struct buffer_heads,
2805 * and requests an I/O operation on them, either a %READ or a %WRITE.
2806 * The third %READA option is described in the documentation for
2807 * generic_make_request() which ll_rw_block() calls.
2809 * This function drops any buffer that it cannot get a lock on (with the
2810 * BH_Lock state bit), any buffer that appears to be clean when doing a
2811 * write request, and any buffer that appears to be up-to-date when doing
2812 * read request. Further it marks as clean buffers that are processed for
2813 * writing (the buffer cache won't assume that they are actually clean until
2814 * the buffer gets unlocked).
2816 * ll_rw_block sets b_end_io to simple completion handler that marks
2817 * the buffer up-to-date (if approriate), unlocks the buffer and wakes
2820 * All of the buffers must be for the same device, and must also be a
2821 * multiple of the current approved size for the device.
2823 void ll_rw_block(int rw, int nr, struct buffer_head *bhs[])
2827 for (i = 0; i < nr; i++) {
2828 struct buffer_head *bh = bhs[i];
2830 if (test_set_buffer_locked(bh))
2835 bh->b_end_io = end_buffer_write_sync;
2836 if (test_clear_buffer_dirty(bh)) {
2837 submit_bh(WRITE, bh);
2841 bh->b_end_io = end_buffer_read_sync;
2842 if (!buffer_uptodate(bh)) {
2853 * For a data-integrity writeout, we need to wait upon any in-progress I/O
2854 * and then start new I/O and then wait upon it.
2856 void sync_dirty_buffer(struct buffer_head *bh)
2858 WARN_ON(atomic_read(&bh->b_count) < 1);
2860 if (test_clear_buffer_dirty(bh)) {
2862 bh->b_end_io = end_buffer_write_sync;
2863 submit_bh(WRITE, bh);
2871 * try_to_free_buffers() checks if all the buffers on this particular page
2872 * are unused, and releases them if so.
2874 * Exclusion against try_to_free_buffers may be obtained by either
2875 * locking the page or by holding its mapping's private_lock.
2877 * If the page is dirty but all the buffers are clean then we need to
2878 * be sure to mark the page clean as well. This is because the page
2879 * may be against a block device, and a later reattachment of buffers
2880 * to a dirty page will set *all* buffers dirty. Which would corrupt
2881 * filesystem data on the same device.
2883 * The same applies to regular filesystem pages: if all the buffers are
2884 * clean then we set the page clean and proceed. To do that, we require
2885 * total exclusion from __set_page_dirty_buffers(). That is obtained with
2888 * try_to_free_buffers() is non-blocking.
2890 static inline int buffer_busy(struct buffer_head *bh)
2892 return atomic_read(&bh->b_count) |
2893 (bh->b_state & ((1 << BH_Dirty) | (1 << BH_Lock)));
2897 drop_buffers(struct page *page, struct buffer_head **buffers_to_free)
2899 struct buffer_head *head = page_buffers(page);
2900 struct buffer_head *bh;
2904 if (buffer_write_io_error(bh))
2905 set_bit(AS_EIO, &page->mapping->flags);
2906 if (buffer_busy(bh))
2908 bh = bh->b_this_page;
2909 } while (bh != head);
2912 struct buffer_head *next = bh->b_this_page;
2914 if (!list_empty(&bh->b_assoc_buffers))
2915 __remove_assoc_queue(bh);
2917 } while (bh != head);
2918 *buffers_to_free = head;
2919 __clear_page_buffers(page);
2925 int try_to_free_buffers(struct page *page)
2927 struct address_space * const mapping = page->mapping;
2928 struct buffer_head *buffers_to_free = NULL;
2931 BUG_ON(!PageLocked(page));
2932 if (PageWriteback(page))
2935 if (mapping == NULL) { /* can this still happen? */
2936 ret = drop_buffers(page, &buffers_to_free);
2940 spin_lock(&mapping->private_lock);
2941 ret = drop_buffers(page, &buffers_to_free);
2944 * If the filesystem writes its buffers by hand (eg ext3)
2945 * then we can have clean buffers against a dirty page. We
2946 * clean the page here; otherwise later reattachment of buffers
2947 * could encounter a non-uptodate page, which is unresolvable.
2948 * This only applies in the rare case where try_to_free_buffers
2949 * succeeds but the page is not freed.
2951 clear_page_dirty(page);
2953 spin_unlock(&mapping->private_lock);
2955 if (buffers_to_free) {
2956 struct buffer_head *bh = buffers_to_free;
2959 struct buffer_head *next = bh->b_this_page;
2960 free_buffer_head(bh);
2962 } while (bh != buffers_to_free);
2966 EXPORT_SYMBOL(try_to_free_buffers);
2968 int block_sync_page(struct page *page)
2970 struct address_space *mapping;
2973 mapping = page_mapping(page);
2975 blk_run_backing_dev(mapping->backing_dev_info, page);
2980 * There are no bdflush tunables left. But distributions are
2981 * still running obsolete flush daemons, so we terminate them here.
2983 * Use of bdflush() is deprecated and will be removed in a future kernel.
2984 * The `pdflush' kernel threads fully replace bdflush daemons and this call.
2986 asmlinkage long sys_bdflush(int func, long data)
2988 static int msg_count;
2990 if (!capable(CAP_SYS_ADMIN))
2993 if (msg_count < 5) {
2996 "warning: process `%s' used the obsolete bdflush"
2997 " system call\n", current->comm);
2998 printk(KERN_INFO "Fix your initscripts?\n");
3007 * Buffer-head allocation
3009 static kmem_cache_t *bh_cachep;
3012 * Once the number of bh's in the machine exceeds this level, we start
3013 * stripping them in writeback.
3015 static int max_buffer_heads;
3017 int buffer_heads_over_limit;
3019 struct bh_accounting {
3020 int nr; /* Number of live bh's */
3021 int ratelimit; /* Limit cacheline bouncing */
3024 static DEFINE_PER_CPU(struct bh_accounting, bh_accounting) = {0, 0};
3026 static void recalc_bh_state(void)
3031 if (__get_cpu_var(bh_accounting).ratelimit++ < 4096)
3033 __get_cpu_var(bh_accounting).ratelimit = 0;
3035 tot += per_cpu(bh_accounting, i).nr;
3036 buffer_heads_over_limit = (tot > max_buffer_heads);
3039 struct buffer_head *alloc_buffer_head(int gfp_flags)
3041 struct buffer_head *ret = kmem_cache_alloc(bh_cachep, gfp_flags);
3044 __get_cpu_var(bh_accounting).nr++;
3050 EXPORT_SYMBOL(alloc_buffer_head);
3052 void free_buffer_head(struct buffer_head *bh)
3054 BUG_ON(!list_empty(&bh->b_assoc_buffers));
3055 kmem_cache_free(bh_cachep, bh);
3057 __get_cpu_var(bh_accounting).nr--;
3061 EXPORT_SYMBOL(free_buffer_head);
3064 init_buffer_head(void *data, kmem_cache_t *cachep, unsigned long flags)
3066 if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) ==
3067 SLAB_CTOR_CONSTRUCTOR) {
3068 struct buffer_head * bh = (struct buffer_head *)data;
3070 memset(bh, 0, sizeof(*bh));
3071 INIT_LIST_HEAD(&bh->b_assoc_buffers);
3075 #ifdef CONFIG_HOTPLUG_CPU
3076 static void buffer_exit_cpu(int cpu)
3079 struct bh_lru *b = &per_cpu(bh_lrus, cpu);
3081 for (i = 0; i < BH_LRU_SIZE; i++) {
3087 static int buffer_cpu_notify(struct notifier_block *self,
3088 unsigned long action, void *hcpu)
3090 if (action == CPU_DEAD)
3091 buffer_exit_cpu((unsigned long)hcpu);
3094 #endif /* CONFIG_HOTPLUG_CPU */
3096 void __init buffer_init(void)
3101 bh_cachep = kmem_cache_create("buffer_head",
3102 sizeof(struct buffer_head), 0,
3103 SLAB_PANIC, init_buffer_head, NULL);
3104 for (i = 0; i < ARRAY_SIZE(bh_wait_queue_heads); i++)
3105 init_waitqueue_head(&bh_wait_queue_heads[i].wqh);
3108 * Limit the bh occupancy to 10% of ZONE_NORMAL
3110 nrpages = (nr_free_buffer_pages() * 10) / 100;
3111 max_buffer_heads = nrpages * (PAGE_SIZE / sizeof(struct buffer_head));
3112 hotcpu_notifier(buffer_cpu_notify, 0);
3115 EXPORT_SYMBOL(__bforget);
3116 EXPORT_SYMBOL(__brelse);
3117 EXPORT_SYMBOL(__wait_on_buffer);
3118 EXPORT_SYMBOL(block_commit_write);
3119 EXPORT_SYMBOL(block_prepare_write);
3120 EXPORT_SYMBOL(block_read_full_page);
3121 EXPORT_SYMBOL(block_sync_page);
3122 EXPORT_SYMBOL(block_truncate_page);
3123 EXPORT_SYMBOL(block_write_full_page);
3124 EXPORT_SYMBOL(buffer_insert_list);
3125 EXPORT_SYMBOL(cont_prepare_write);
3126 EXPORT_SYMBOL(end_buffer_async_write);
3127 EXPORT_SYMBOL(end_buffer_read_sync);
3128 EXPORT_SYMBOL(end_buffer_write_sync);
3129 EXPORT_SYMBOL(file_fsync);
3130 EXPORT_SYMBOL(fsync_bdev);
3131 EXPORT_SYMBOL(fsync_buffers_list);
3132 EXPORT_SYMBOL(generic_block_bmap);
3133 EXPORT_SYMBOL(generic_commit_write);
3134 EXPORT_SYMBOL(generic_cont_expand);
3135 EXPORT_SYMBOL(init_buffer);
3136 EXPORT_SYMBOL(invalidate_bdev);
3137 EXPORT_SYMBOL(ll_rw_block);
3138 EXPORT_SYMBOL(mark_buffer_dirty);
3139 EXPORT_SYMBOL(submit_bh);
3140 EXPORT_SYMBOL(sync_dirty_buffer);
3141 EXPORT_SYMBOL(unlock_buffer);