4 * Copyright (C) 1991, 1992, 2002 Linus Torvalds
8 * Start bdflush() with kernel_thread not syscall - Paul Gortmaker, 12/95
10 * Removed a lot of unnecessary code and simplified things now that
11 * the buffer cache isn't our primary cache - Andrew Tridgell 12/96
13 * Speed up hash, lru, and free list operations. Use gfp() for allocating
14 * hash table, use SLAB cache for buffer heads. SMP threading. -DaveM
16 * Added 32k buffer block sizes - these are required older ARM systems. - RMK
18 * async buffer flushing, 1999 Andrea Arcangeli <andrea@suse.de>
21 #include <linux/config.h>
22 #include <linux/kernel.h>
23 #include <linux/syscalls.h>
26 #include <linux/percpu.h>
27 #include <linux/slab.h>
28 #include <linux/smp_lock.h>
29 #include <linux/blkdev.h>
30 #include <linux/file.h>
31 #include <linux/quotaops.h>
32 #include <linux/highmem.h>
33 #include <linux/module.h>
34 #include <linux/writeback.h>
35 #include <linux/hash.h>
36 #include <linux/suspend.h>
37 #include <linux/buffer_head.h>
38 #include <linux/bio.h>
39 #include <linux/notifier.h>
40 #include <linux/cpu.h>
41 #include <linux/bitops.h>
43 static int fsync_buffers_list(spinlock_t *lock, struct list_head *list);
44 static void invalidate_bh_lrus(void);
46 #define BH_ENTRY(list) list_entry((list), struct buffer_head, b_assoc_buffers)
49 init_buffer(struct buffer_head *bh, bh_end_io_t *handler, void *private)
51 bh->b_end_io = handler;
52 bh->b_private = private;
55 static int sync_buffer(void *word)
57 struct block_device *bd;
58 struct buffer_head *bh
59 = container_of(word, struct buffer_head, b_state);
64 blk_run_address_space(bd->bd_inode->i_mapping);
69 void fastcall __lock_buffer(struct buffer_head *bh)
71 wait_on_bit_lock(&bh->b_state, BH_Lock, sync_buffer,
72 TASK_UNINTERRUPTIBLE);
74 EXPORT_SYMBOL(__lock_buffer);
76 void fastcall unlock_buffer(struct buffer_head *bh)
78 clear_buffer_locked(bh);
79 smp_mb__after_clear_bit();
80 wake_up_bit(&bh->b_state, BH_Lock);
84 * Block until a buffer comes unlocked. This doesn't stop it
85 * from becoming locked again - you have to lock it yourself
86 * if you want to preserve its state.
88 void __wait_on_buffer(struct buffer_head * bh)
90 wait_on_bit(&bh->b_state, BH_Lock, sync_buffer, TASK_UNINTERRUPTIBLE);
94 __clear_page_buffers(struct page *page)
96 ClearPagePrivate(page);
98 page_cache_release(page);
101 static void buffer_io_error(struct buffer_head *bh)
103 char b[BDEVNAME_SIZE];
105 printk(KERN_ERR "Buffer I/O error on device %s, logical block %Lu\n",
106 bdevname(bh->b_bdev, b),
107 (unsigned long long)bh->b_blocknr);
111 * Default synchronous end-of-IO handler.. Just mark it up-to-date and
112 * unlock the buffer. This is what ll_rw_block uses too.
114 void end_buffer_read_sync(struct buffer_head *bh, int uptodate)
117 set_buffer_uptodate(bh);
119 /* This happens, due to failed READA attempts. */
120 clear_buffer_uptodate(bh);
126 void end_buffer_write_sync(struct buffer_head *bh, int uptodate)
128 char b[BDEVNAME_SIZE];
131 set_buffer_uptodate(bh);
133 if (!buffer_eopnotsupp(bh) && printk_ratelimit()) {
135 printk(KERN_WARNING "lost page write due to "
137 bdevname(bh->b_bdev, b));
139 set_buffer_write_io_error(bh);
140 clear_buffer_uptodate(bh);
147 * Write out and wait upon all the dirty data associated with a block
148 * device via its mapping. Does not take the superblock lock.
150 int sync_blockdev(struct block_device *bdev)
157 ret = filemap_fdatawrite(bdev->bd_inode->i_mapping);
158 err = filemap_fdatawait(bdev->bd_inode->i_mapping);
164 EXPORT_SYMBOL(sync_blockdev);
167 * Write out and wait upon all dirty data associated with this
168 * superblock. Filesystem data as well as the underlying block
169 * device. Takes the superblock lock.
171 int fsync_super(struct super_block *sb)
173 sync_inodes_sb(sb, 0);
176 if (sb->s_dirt && sb->s_op->write_super)
177 sb->s_op->write_super(sb);
179 if (sb->s_op->sync_fs)
180 sb->s_op->sync_fs(sb, 1);
181 sync_blockdev(sb->s_bdev);
182 sync_inodes_sb(sb, 1);
184 return sync_blockdev(sb->s_bdev);
188 * Write out and wait upon all dirty data associated with this
189 * device. Filesystem data as well as the underlying block
190 * device. Takes the superblock lock.
192 int fsync_bdev(struct block_device *bdev)
194 struct super_block *sb = get_super(bdev);
196 int res = fsync_super(sb);
200 return sync_blockdev(bdev);
204 * freeze_bdev -- lock a filesystem and force it into a consistent state
205 * @bdev: blockdevice to lock
207 * This takes the block device bd_mount_sem to make sure no new mounts
208 * happen on bdev until thaw_bdev() is called.
209 * If a superblock is found on this device, we take the s_umount semaphore
210 * on it to make sure nobody unmounts until the snapshot creation is done.
212 struct super_block *freeze_bdev(struct block_device *bdev)
214 struct super_block *sb;
216 down(&bdev->bd_mount_sem);
217 sb = get_super(bdev);
218 if (sb && !(sb->s_flags & MS_RDONLY)) {
219 sb->s_frozen = SB_FREEZE_WRITE;
222 sync_inodes_sb(sb, 0);
226 if (sb->s_dirt && sb->s_op->write_super)
227 sb->s_op->write_super(sb);
230 if (sb->s_op->sync_fs)
231 sb->s_op->sync_fs(sb, 1);
233 sync_blockdev(sb->s_bdev);
234 sync_inodes_sb(sb, 1);
236 sb->s_frozen = SB_FREEZE_TRANS;
239 sync_blockdev(sb->s_bdev);
241 if (sb->s_op->write_super_lockfs)
242 sb->s_op->write_super_lockfs(sb);
246 return sb; /* thaw_bdev releases s->s_umount and bd_mount_sem */
248 EXPORT_SYMBOL(freeze_bdev);
251 * thaw_bdev -- unlock filesystem
252 * @bdev: blockdevice to unlock
253 * @sb: associated superblock
255 * Unlocks the filesystem and marks it writeable again after freeze_bdev().
257 void thaw_bdev(struct block_device *bdev, struct super_block *sb)
260 BUG_ON(sb->s_bdev != bdev);
262 if (sb->s_op->unlockfs)
263 sb->s_op->unlockfs(sb);
264 sb->s_frozen = SB_UNFROZEN;
266 wake_up(&sb->s_wait_unfrozen);
270 up(&bdev->bd_mount_sem);
272 EXPORT_SYMBOL(thaw_bdev);
275 * sync everything. Start out by waking pdflush, because that writes back
276 * all queues in parallel.
278 static void do_sync(unsigned long wait)
281 sync_inodes(0); /* All mappings, inodes and their blockdevs */
283 sync_supers(); /* Write the superblocks */
284 sync_filesystems(0); /* Start syncing the filesystems */
285 sync_filesystems(wait); /* Waitingly sync the filesystems */
286 sync_inodes(wait); /* Mappings, inodes and blockdevs, again. */
288 printk("Emergency Sync complete\n");
289 if (unlikely(laptop_mode))
290 laptop_sync_completion();
293 asmlinkage long sys_sync(void)
299 void emergency_sync(void)
301 pdflush_operation(do_sync, 0);
305 * Generic function to fsync a file.
307 * filp may be NULL if called via the msync of a vma.
310 int file_fsync(struct file *filp, struct dentry *dentry, int datasync)
312 struct inode * inode = dentry->d_inode;
313 struct super_block * sb;
316 /* sync the inode to buffers */
317 ret = write_inode_now(inode, 0);
319 /* sync the superblock to buffers */
322 if (sb->s_op->write_super)
323 sb->s_op->write_super(sb);
326 /* .. finally sync the buffers to disk */
327 err = sync_blockdev(sb->s_bdev);
333 asmlinkage long sys_fsync(unsigned int fd)
336 struct address_space *mapping;
344 mapping = file->f_mapping;
347 if (!file->f_op || !file->f_op->fsync) {
348 /* Why? We can still call filemap_fdatawrite */
352 current->flags |= PF_SYNCWRITE;
353 ret = filemap_fdatawrite(mapping);
356 * We need to protect against concurrent writers,
357 * which could cause livelocks in fsync_buffers_list
359 down(&mapping->host->i_sem);
360 err = file->f_op->fsync(file, file->f_dentry, 0);
363 up(&mapping->host->i_sem);
364 err = filemap_fdatawait(mapping);
367 current->flags &= ~PF_SYNCWRITE;
375 asmlinkage long sys_fdatasync(unsigned int fd)
378 struct address_space *mapping;
387 if (!file->f_op || !file->f_op->fsync)
390 mapping = file->f_mapping;
392 current->flags |= PF_SYNCWRITE;
393 ret = filemap_fdatawrite(mapping);
394 down(&mapping->host->i_sem);
395 err = file->f_op->fsync(file, file->f_dentry, 1);
398 up(&mapping->host->i_sem);
399 err = filemap_fdatawait(mapping);
402 current->flags &= ~PF_SYNCWRITE;
411 * Various filesystems appear to want __find_get_block to be non-blocking.
412 * But it's the page lock which protects the buffers. To get around this,
413 * we get exclusion from try_to_free_buffers with the blockdev mapping's
416 * Hack idea: for the blockdev mapping, i_bufferlist_lock contention
417 * may be quite high. This code could TryLock the page, and if that
418 * succeeds, there is no need to take private_lock. (But if
419 * private_lock is contended then so is mapping->tree_lock).
421 static struct buffer_head *
422 __find_get_block_slow(struct block_device *bdev, sector_t block, int unused)
424 struct inode *bd_inode = bdev->bd_inode;
425 struct address_space *bd_mapping = bd_inode->i_mapping;
426 struct buffer_head *ret = NULL;
428 struct buffer_head *bh;
429 struct buffer_head *head;
433 index = block >> (PAGE_CACHE_SHIFT - bd_inode->i_blkbits);
434 page = find_get_page(bd_mapping, index);
438 spin_lock(&bd_mapping->private_lock);
439 if (!page_has_buffers(page))
441 head = page_buffers(page);
444 if (bh->b_blocknr == block) {
449 if (!buffer_mapped(bh))
451 bh = bh->b_this_page;
452 } while (bh != head);
454 /* we might be here because some of the buffers on this page are
455 * not mapped. This is due to various races between
456 * file io on the block device and getblk. It gets dealt with
457 * elsewhere, don't buffer_error if we had some unmapped buffers
460 printk("__find_get_block_slow() failed. "
461 "block=%llu, b_blocknr=%llu\n",
462 (unsigned long long)block, (unsigned long long)bh->b_blocknr);
463 printk("b_state=0x%08lx, b_size=%u\n", bh->b_state, bh->b_size);
464 printk("device blocksize: %d\n", 1 << bd_inode->i_blkbits);
467 spin_unlock(&bd_mapping->private_lock);
468 page_cache_release(page);
473 /* If invalidate_buffers() will trash dirty buffers, it means some kind
474 of fs corruption is going on. Trashing dirty data always imply losing
475 information that was supposed to be just stored on the physical layer
478 Thus invalidate_buffers in general usage is not allwowed to trash
479 dirty buffers. For example ioctl(FLSBLKBUF) expects dirty data to
480 be preserved. These buffers are simply skipped.
482 We also skip buffers which are still in use. For example this can
483 happen if a userspace program is reading the block device.
485 NOTE: In the case where the user removed a removable-media-disk even if
486 there's still dirty data not synced on disk (due a bug in the device driver
487 or due an error of the user), by not destroying the dirty buffers we could
488 generate corruption also on the next media inserted, thus a parameter is
489 necessary to handle this case in the most safe way possible (trying
490 to not corrupt also the new disk inserted with the data belonging to
491 the old now corrupted disk). Also for the ramdisk the natural thing
492 to do in order to release the ramdisk memory is to destroy dirty buffers.
494 These are two special cases. Normal usage imply the device driver
495 to issue a sync on the device (without waiting I/O completion) and
496 then an invalidate_buffers call that doesn't trash dirty buffers.
498 For handling cache coherency with the blkdev pagecache the 'update' case
499 is been introduced. It is needed to re-read from disk any pinned
500 buffer. NOTE: re-reading from disk is destructive so we can do it only
501 when we assume nobody is changing the buffercache under our I/O and when
502 we think the disk contains more recent information than the buffercache.
503 The update == 1 pass marks the buffers we need to update, the update == 2
504 pass does the actual I/O. */
505 void invalidate_bdev(struct block_device *bdev, int destroy_dirty_buffers)
507 invalidate_bh_lrus();
509 * FIXME: what about destroy_dirty_buffers?
510 * We really want to use invalidate_inode_pages2() for
511 * that, but not until that's cleaned up.
513 invalidate_inode_pages(bdev->bd_inode->i_mapping);
517 * Kick pdflush then try to free up some ZONE_NORMAL memory.
519 static void free_more_memory(void)
524 wakeup_bdflush(1024);
527 for_each_pgdat(pgdat) {
528 zones = pgdat->node_zonelists[GFP_NOFS&GFP_ZONEMASK].zones;
530 try_to_free_pages(zones, GFP_NOFS, 0);
535 * I/O completion handler for block_read_full_page() - pages
536 * which come unlocked at the end of I/O.
538 static void end_buffer_async_read(struct buffer_head *bh, int uptodate)
540 static DEFINE_SPINLOCK(page_uptodate_lock);
542 struct buffer_head *tmp;
544 int page_uptodate = 1;
546 BUG_ON(!buffer_async_read(bh));
550 set_buffer_uptodate(bh);
552 clear_buffer_uptodate(bh);
553 if (printk_ratelimit())
559 * Be _very_ careful from here on. Bad things can happen if
560 * two buffer heads end IO at almost the same time and both
561 * decide that the page is now completely done.
563 spin_lock_irqsave(&page_uptodate_lock, flags);
564 clear_buffer_async_read(bh);
568 if (!buffer_uptodate(tmp))
570 if (buffer_async_read(tmp)) {
571 BUG_ON(!buffer_locked(tmp));
574 tmp = tmp->b_this_page;
576 spin_unlock_irqrestore(&page_uptodate_lock, flags);
579 * If none of the buffers had errors and they are all
580 * uptodate then we can set the page uptodate.
582 if (page_uptodate && !PageError(page))
583 SetPageUptodate(page);
588 spin_unlock_irqrestore(&page_uptodate_lock, flags);
593 * Completion handler for block_write_full_page() - pages which are unlocked
594 * during I/O, and which have PageWriteback cleared upon I/O completion.
596 void end_buffer_async_write(struct buffer_head *bh, int uptodate)
598 char b[BDEVNAME_SIZE];
599 static DEFINE_SPINLOCK(page_uptodate_lock);
601 struct buffer_head *tmp;
604 BUG_ON(!buffer_async_write(bh));
608 set_buffer_uptodate(bh);
610 if (printk_ratelimit()) {
612 printk(KERN_WARNING "lost page write due to "
614 bdevname(bh->b_bdev, b));
616 set_bit(AS_EIO, &page->mapping->flags);
617 clear_buffer_uptodate(bh);
621 spin_lock_irqsave(&page_uptodate_lock, flags);
622 clear_buffer_async_write(bh);
624 tmp = bh->b_this_page;
626 if (buffer_async_write(tmp)) {
627 BUG_ON(!buffer_locked(tmp));
630 tmp = tmp->b_this_page;
632 spin_unlock_irqrestore(&page_uptodate_lock, flags);
633 end_page_writeback(page);
637 spin_unlock_irqrestore(&page_uptodate_lock, flags);
642 * If a page's buffers are under async readin (end_buffer_async_read
643 * completion) then there is a possibility that another thread of
644 * control could lock one of the buffers after it has completed
645 * but while some of the other buffers have not completed. This
646 * locked buffer would confuse end_buffer_async_read() into not unlocking
647 * the page. So the absence of BH_Async_Read tells end_buffer_async_read()
648 * that this buffer is not under async I/O.
650 * The page comes unlocked when it has no locked buffer_async buffers
653 * PageLocked prevents anyone starting new async I/O reads any of
656 * PageWriteback is used to prevent simultaneous writeout of the same
659 * PageLocked prevents anyone from starting writeback of a page which is
660 * under read I/O (PageWriteback is only ever set against a locked page).
662 static void mark_buffer_async_read(struct buffer_head *bh)
664 bh->b_end_io = end_buffer_async_read;
665 set_buffer_async_read(bh);
668 void mark_buffer_async_write(struct buffer_head *bh)
670 bh->b_end_io = end_buffer_async_write;
671 set_buffer_async_write(bh);
673 EXPORT_SYMBOL(mark_buffer_async_write);
677 * fs/buffer.c contains helper functions for buffer-backed address space's
678 * fsync functions. A common requirement for buffer-based filesystems is
679 * that certain data from the backing blockdev needs to be written out for
680 * a successful fsync(). For example, ext2 indirect blocks need to be
681 * written back and waited upon before fsync() returns.
683 * The functions mark_buffer_inode_dirty(), fsync_inode_buffers(),
684 * inode_has_buffers() and invalidate_inode_buffers() are provided for the
685 * management of a list of dependent buffers at ->i_mapping->private_list.
687 * Locking is a little subtle: try_to_free_buffers() will remove buffers
688 * from their controlling inode's queue when they are being freed. But
689 * try_to_free_buffers() will be operating against the *blockdev* mapping
690 * at the time, not against the S_ISREG file which depends on those buffers.
691 * So the locking for private_list is via the private_lock in the address_space
692 * which backs the buffers. Which is different from the address_space
693 * against which the buffers are listed. So for a particular address_space,
694 * mapping->private_lock does *not* protect mapping->private_list! In fact,
695 * mapping->private_list will always be protected by the backing blockdev's
698 * Which introduces a requirement: all buffers on an address_space's
699 * ->private_list must be from the same address_space: the blockdev's.
701 * address_spaces which do not place buffers at ->private_list via these
702 * utility functions are free to use private_lock and private_list for
703 * whatever they want. The only requirement is that list_empty(private_list)
704 * be true at clear_inode() time.
706 * FIXME: clear_inode should not call invalidate_inode_buffers(). The
707 * filesystems should do that. invalidate_inode_buffers() should just go
708 * BUG_ON(!list_empty).
710 * FIXME: mark_buffer_dirty_inode() is a data-plane operation. It should
711 * take an address_space, not an inode. And it should be called
712 * mark_buffer_dirty_fsync() to clearly define why those buffers are being
715 * FIXME: mark_buffer_dirty_inode() doesn't need to add the buffer to the
716 * list if it is already on a list. Because if the buffer is on a list,
717 * it *must* already be on the right one. If not, the filesystem is being
718 * silly. This will save a ton of locking. But first we have to ensure
719 * that buffers are taken *off* the old inode's list when they are freed
720 * (presumably in truncate). That requires careful auditing of all
721 * filesystems (do it inside bforget()). It could also be done by bringing
726 * The buffer's backing address_space's private_lock must be held
728 static inline void __remove_assoc_queue(struct buffer_head *bh)
730 list_del_init(&bh->b_assoc_buffers);
733 int inode_has_buffers(struct inode *inode)
735 return !list_empty(&inode->i_data.private_list);
739 * osync is designed to support O_SYNC io. It waits synchronously for
740 * all already-submitted IO to complete, but does not queue any new
741 * writes to the disk.
743 * To do O_SYNC writes, just queue the buffer writes with ll_rw_block as
744 * you dirty the buffers, and then use osync_inode_buffers to wait for
745 * completion. Any other dirty buffers which are not yet queued for
746 * write will not be flushed to disk by the osync.
748 static int osync_buffers_list(spinlock_t *lock, struct list_head *list)
750 struct buffer_head *bh;
756 list_for_each_prev(p, list) {
758 if (buffer_locked(bh)) {
762 if (!buffer_uptodate(bh))
774 * sync_mapping_buffers - write out and wait upon a mapping's "associated"
776 * @buffer_mapping - the mapping which backs the buffers' data
777 * @mapping - the mapping which wants those buffers written
779 * Starts I/O against the buffers at mapping->private_list, and waits upon
782 * Basically, this is a convenience function for fsync(). @buffer_mapping is
783 * the blockdev which "owns" the buffers and @mapping is a file or directory
784 * which needs those buffers to be written for a successful fsync().
786 int sync_mapping_buffers(struct address_space *mapping)
788 struct address_space *buffer_mapping = mapping->assoc_mapping;
790 if (buffer_mapping == NULL || list_empty(&mapping->private_list))
793 return fsync_buffers_list(&buffer_mapping->private_lock,
794 &mapping->private_list);
796 EXPORT_SYMBOL(sync_mapping_buffers);
799 * Called when we've recently written block `bblock', and it is known that
800 * `bblock' was for a buffer_boundary() buffer. This means that the block at
801 * `bblock + 1' is probably a dirty indirect block. Hunt it down and, if it's
802 * dirty, schedule it for IO. So that indirects merge nicely with their data.
804 void write_boundary_block(struct block_device *bdev,
805 sector_t bblock, unsigned blocksize)
807 struct buffer_head *bh = __find_get_block(bdev, bblock + 1, blocksize);
809 if (buffer_dirty(bh))
810 ll_rw_block(WRITE, 1, &bh);
815 void mark_buffer_dirty_inode(struct buffer_head *bh, struct inode *inode)
817 struct address_space *mapping = inode->i_mapping;
818 struct address_space *buffer_mapping = bh->b_page->mapping;
820 mark_buffer_dirty(bh);
821 if (!mapping->assoc_mapping) {
822 mapping->assoc_mapping = buffer_mapping;
824 if (mapping->assoc_mapping != buffer_mapping)
827 if (list_empty(&bh->b_assoc_buffers)) {
828 spin_lock(&buffer_mapping->private_lock);
829 list_move_tail(&bh->b_assoc_buffers,
830 &mapping->private_list);
831 spin_unlock(&buffer_mapping->private_lock);
834 EXPORT_SYMBOL(mark_buffer_dirty_inode);
837 * Add a page to the dirty page list.
839 * It is a sad fact of life that this function is called from several places
840 * deeply under spinlocking. It may not sleep.
842 * If the page has buffers, the uptodate buffers are set dirty, to preserve
843 * dirty-state coherency between the page and the buffers. It the page does
844 * not have buffers then when they are later attached they will all be set
847 * The buffers are dirtied before the page is dirtied. There's a small race
848 * window in which a writepage caller may see the page cleanness but not the
849 * buffer dirtiness. That's fine. If this code were to set the page dirty
850 * before the buffers, a concurrent writepage caller could clear the page dirty
851 * bit, see a bunch of clean buffers and we'd end up with dirty buffers/clean
852 * page on the dirty page list.
854 * We use private_lock to lock against try_to_free_buffers while using the
855 * page's buffer list. Also use this to protect against clean buffers being
856 * added to the page after it was set dirty.
858 * FIXME: may need to call ->reservepage here as well. That's rather up to the
859 * address_space though.
861 int __set_page_dirty_buffers(struct page *page)
863 struct address_space * const mapping = page->mapping;
865 spin_lock(&mapping->private_lock);
866 if (page_has_buffers(page)) {
867 struct buffer_head *head = page_buffers(page);
868 struct buffer_head *bh = head;
871 set_buffer_dirty(bh);
872 bh = bh->b_this_page;
873 } while (bh != head);
875 spin_unlock(&mapping->private_lock);
877 if (!TestSetPageDirty(page)) {
878 spin_lock_irq(&mapping->tree_lock);
879 if (page->mapping) { /* Race with truncate? */
880 if (!mapping->backing_dev_info->memory_backed)
881 inc_page_state(nr_dirty);
882 radix_tree_tag_set(&mapping->page_tree,
884 PAGECACHE_TAG_DIRTY);
886 spin_unlock_irq(&mapping->tree_lock);
887 __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
892 EXPORT_SYMBOL(__set_page_dirty_buffers);
895 * Write out and wait upon a list of buffers.
897 * We have conflicting pressures: we want to make sure that all
898 * initially dirty buffers get waited on, but that any subsequently
899 * dirtied buffers don't. After all, we don't want fsync to last
900 * forever if somebody is actively writing to the file.
902 * Do this in two main stages: first we copy dirty buffers to a
903 * temporary inode list, queueing the writes as we go. Then we clean
904 * up, waiting for those writes to complete.
906 * During this second stage, any subsequent updates to the file may end
907 * up refiling the buffer on the original inode's dirty list again, so
908 * there is a chance we will end up with a buffer queued for write but
909 * not yet completed on that list. So, as a final cleanup we go through
910 * the osync code to catch these locked, dirty buffers without requeuing
911 * any newly dirty buffers for write.
913 static int fsync_buffers_list(spinlock_t *lock, struct list_head *list)
915 struct buffer_head *bh;
916 struct list_head tmp;
919 INIT_LIST_HEAD(&tmp);
922 while (!list_empty(list)) {
923 bh = BH_ENTRY(list->next);
924 list_del_init(&bh->b_assoc_buffers);
925 if (buffer_dirty(bh) || buffer_locked(bh)) {
926 list_add(&bh->b_assoc_buffers, &tmp);
927 if (buffer_dirty(bh)) {
931 * Ensure any pending I/O completes so that
932 * ll_rw_block() actually writes the current
933 * contents - it is a noop if I/O is still in
934 * flight on potentially older contents.
937 ll_rw_block(WRITE, 1, &bh);
944 while (!list_empty(&tmp)) {
945 bh = BH_ENTRY(tmp.prev);
946 __remove_assoc_queue(bh);
950 if (!buffer_uptodate(bh))
957 err2 = osync_buffers_list(lock, list);
965 * Invalidate any and all dirty buffers on a given inode. We are
966 * probably unmounting the fs, but that doesn't mean we have already
967 * done a sync(). Just drop the buffers from the inode list.
969 * NOTE: we take the inode's blockdev's mapping's private_lock. Which
970 * assumes that all the buffers are against the blockdev. Not true
973 void invalidate_inode_buffers(struct inode *inode)
975 if (inode_has_buffers(inode)) {
976 struct address_space *mapping = &inode->i_data;
977 struct list_head *list = &mapping->private_list;
978 struct address_space *buffer_mapping = mapping->assoc_mapping;
980 spin_lock(&buffer_mapping->private_lock);
981 while (!list_empty(list))
982 __remove_assoc_queue(BH_ENTRY(list->next));
983 spin_unlock(&buffer_mapping->private_lock);
988 * Remove any clean buffers from the inode's buffer list. This is called
989 * when we're trying to free the inode itself. Those buffers can pin it.
991 * Returns true if all buffers were removed.
993 int remove_inode_buffers(struct inode *inode)
997 if (inode_has_buffers(inode)) {
998 struct address_space *mapping = &inode->i_data;
999 struct list_head *list = &mapping->private_list;
1000 struct address_space *buffer_mapping = mapping->assoc_mapping;
1002 spin_lock(&buffer_mapping->private_lock);
1003 while (!list_empty(list)) {
1004 struct buffer_head *bh = BH_ENTRY(list->next);
1005 if (buffer_dirty(bh)) {
1009 __remove_assoc_queue(bh);
1011 spin_unlock(&buffer_mapping->private_lock);
1017 * Create the appropriate buffers when given a page for data area and
1018 * the size of each buffer.. Use the bh->b_this_page linked list to
1019 * follow the buffers created. Return NULL if unable to create more
1022 * The retry flag is used to differentiate async IO (paging, swapping)
1023 * which may not fail from ordinary buffer allocations.
1025 struct buffer_head *alloc_page_buffers(struct page *page, unsigned long size,
1028 struct buffer_head *bh, *head;
1034 while ((offset -= size) >= 0) {
1035 bh = alloc_buffer_head(GFP_NOFS);
1040 bh->b_this_page = head;
1045 atomic_set(&bh->b_count, 0);
1048 /* Link the buffer to its page */
1049 set_bh_page(bh, page, offset);
1051 bh->b_end_io = NULL;
1055 * In case anything failed, we just free everything we got.
1061 head = head->b_this_page;
1062 free_buffer_head(bh);
1067 * Return failure for non-async IO requests. Async IO requests
1068 * are not allowed to fail, so we have to wait until buffer heads
1069 * become available. But we don't want tasks sleeping with
1070 * partially complete buffers, so all were released above.
1075 /* We're _really_ low on memory. Now we just
1076 * wait for old buffer heads to become free due to
1077 * finishing IO. Since this is an async request and
1078 * the reserve list is empty, we're sure there are
1079 * async buffer heads in use.
1084 EXPORT_SYMBOL_GPL(alloc_page_buffers);
1087 link_dev_buffers(struct page *page, struct buffer_head *head)
1089 struct buffer_head *bh, *tail;
1094 bh = bh->b_this_page;
1096 tail->b_this_page = head;
1097 attach_page_buffers(page, head);
1101 * Initialise the state of a blockdev page's buffers.
1104 init_page_buffers(struct page *page, struct block_device *bdev,
1105 sector_t block, int size)
1107 struct buffer_head *head = page_buffers(page);
1108 struct buffer_head *bh = head;
1109 int uptodate = PageUptodate(page);
1112 if (!buffer_mapped(bh)) {
1113 init_buffer(bh, NULL, NULL);
1115 bh->b_blocknr = block;
1117 set_buffer_uptodate(bh);
1118 set_buffer_mapped(bh);
1121 bh = bh->b_this_page;
1122 } while (bh != head);
1126 * Create the page-cache page that contains the requested block.
1128 * This is user purely for blockdev mappings.
1130 static struct page *
1131 grow_dev_page(struct block_device *bdev, sector_t block,
1132 pgoff_t index, int size)
1134 struct inode *inode = bdev->bd_inode;
1136 struct buffer_head *bh;
1138 page = find_or_create_page(inode->i_mapping, index, GFP_NOFS);
1142 if (!PageLocked(page))
1145 if (page_has_buffers(page)) {
1146 bh = page_buffers(page);
1147 if (bh->b_size == size) {
1148 init_page_buffers(page, bdev, block, size);
1151 if (!try_to_free_buffers(page))
1156 * Allocate some buffers for this page
1158 bh = alloc_page_buffers(page, size, 0);
1163 * Link the page to the buffers and initialise them. Take the
1164 * lock to be atomic wrt __find_get_block(), which does not
1165 * run under the page lock.
1167 spin_lock(&inode->i_mapping->private_lock);
1168 link_dev_buffers(page, bh);
1169 init_page_buffers(page, bdev, block, size);
1170 spin_unlock(&inode->i_mapping->private_lock);
1176 page_cache_release(page);
1181 * Create buffers for the specified block device block's page. If
1182 * that page was dirty, the buffers are set dirty also.
1184 * Except that's a bug. Attaching dirty buffers to a dirty
1185 * blockdev's page can result in filesystem corruption, because
1186 * some of those buffers may be aliases of filesystem data.
1187 * grow_dev_page() will go BUG() if this happens.
1190 grow_buffers(struct block_device *bdev, sector_t block, int size)
1199 } while ((size << sizebits) < PAGE_SIZE);
1201 index = block >> sizebits;
1202 block = index << sizebits;
1204 /* Create a page with the proper size buffers.. */
1205 page = grow_dev_page(bdev, block, index, size);
1209 page_cache_release(page);
1213 struct buffer_head *
1214 __getblk_slow(struct block_device *bdev, sector_t block, int size)
1216 /* Size must be multiple of hard sectorsize */
1217 if (unlikely(size & (bdev_hardsect_size(bdev)-1) ||
1218 (size < 512 || size > PAGE_SIZE))) {
1219 printk(KERN_ERR "getblk(): invalid block size %d requested\n",
1221 printk(KERN_ERR "hardsect size: %d\n",
1222 bdev_hardsect_size(bdev));
1229 struct buffer_head * bh;
1231 bh = __find_get_block(bdev, block, size);
1235 if (!grow_buffers(bdev, block, size))
1241 * The relationship between dirty buffers and dirty pages:
1243 * Whenever a page has any dirty buffers, the page's dirty bit is set, and
1244 * the page is tagged dirty in its radix tree.
1246 * At all times, the dirtiness of the buffers represents the dirtiness of
1247 * subsections of the page. If the page has buffers, the page dirty bit is
1248 * merely a hint about the true dirty state.
1250 * When a page is set dirty in its entirety, all its buffers are marked dirty
1251 * (if the page has buffers).
1253 * When a buffer is marked dirty, its page is dirtied, but the page's other
1256 * Also. When blockdev buffers are explicitly read with bread(), they
1257 * individually become uptodate. But their backing page remains not
1258 * uptodate - even if all of its buffers are uptodate. A subsequent
1259 * block_read_full_page() against that page will discover all the uptodate
1260 * buffers, will set the page uptodate and will perform no I/O.
1264 * mark_buffer_dirty - mark a buffer_head as needing writeout
1266 * mark_buffer_dirty() will set the dirty bit against the buffer, then set its
1267 * backing page dirty, then tag the page as dirty in its address_space's radix
1268 * tree and then attach the address_space's inode to its superblock's dirty
1271 * mark_buffer_dirty() is atomic. It takes bh->b_page->mapping->private_lock,
1272 * mapping->tree_lock and the global inode_lock.
1274 void fastcall mark_buffer_dirty(struct buffer_head *bh)
1276 if (!buffer_dirty(bh) && !test_set_buffer_dirty(bh))
1277 __set_page_dirty_nobuffers(bh->b_page);
1281 * Decrement a buffer_head's reference count. If all buffers against a page
1282 * have zero reference count, are clean and unlocked, and if the page is clean
1283 * and unlocked then try_to_free_buffers() may strip the buffers from the page
1284 * in preparation for freeing it (sometimes, rarely, buffers are removed from
1285 * a page but it ends up not being freed, and buffers may later be reattached).
1287 void __brelse(struct buffer_head * buf)
1289 if (atomic_read(&buf->b_count)) {
1293 printk(KERN_ERR "VFS: brelse: Trying to free free buffer\n");
1298 * bforget() is like brelse(), except it discards any
1299 * potentially dirty data.
1301 void __bforget(struct buffer_head *bh)
1303 clear_buffer_dirty(bh);
1304 if (!list_empty(&bh->b_assoc_buffers)) {
1305 struct address_space *buffer_mapping = bh->b_page->mapping;
1307 spin_lock(&buffer_mapping->private_lock);
1308 list_del_init(&bh->b_assoc_buffers);
1309 spin_unlock(&buffer_mapping->private_lock);
1314 static struct buffer_head *__bread_slow(struct buffer_head *bh)
1317 if (buffer_uptodate(bh)) {
1322 bh->b_end_io = end_buffer_read_sync;
1323 submit_bh(READ, bh);
1325 if (buffer_uptodate(bh))
1333 * Per-cpu buffer LRU implementation. To reduce the cost of __find_get_block().
1334 * The bhs[] array is sorted - newest buffer is at bhs[0]. Buffers have their
1335 * refcount elevated by one when they're in an LRU. A buffer can only appear
1336 * once in a particular CPU's LRU. A single buffer can be present in multiple
1337 * CPU's LRUs at the same time.
1339 * This is a transparent caching front-end to sb_bread(), sb_getblk() and
1340 * sb_find_get_block().
1342 * The LRUs themselves only need locking against invalidate_bh_lrus. We use
1343 * a local interrupt disable for that.
1346 #define BH_LRU_SIZE 8
1349 struct buffer_head *bhs[BH_LRU_SIZE];
1352 static DEFINE_PER_CPU(struct bh_lru, bh_lrus) = {{ NULL }};
1355 #define bh_lru_lock() local_irq_disable()
1356 #define bh_lru_unlock() local_irq_enable()
1358 #define bh_lru_lock() preempt_disable()
1359 #define bh_lru_unlock() preempt_enable()
1362 static inline void check_irqs_on(void)
1364 #ifdef irqs_disabled
1365 BUG_ON(irqs_disabled());
1370 * The LRU management algorithm is dopey-but-simple. Sorry.
1372 static void bh_lru_install(struct buffer_head *bh)
1374 struct buffer_head *evictee = NULL;
1379 lru = &__get_cpu_var(bh_lrus);
1380 if (lru->bhs[0] != bh) {
1381 struct buffer_head *bhs[BH_LRU_SIZE];
1387 for (in = 0; in < BH_LRU_SIZE; in++) {
1388 struct buffer_head *bh2 = lru->bhs[in];
1393 if (out >= BH_LRU_SIZE) {
1394 BUG_ON(evictee != NULL);
1401 while (out < BH_LRU_SIZE)
1403 memcpy(lru->bhs, bhs, sizeof(bhs));
1412 * Look up the bh in this cpu's LRU. If it's there, move it to the head.
1414 static inline struct buffer_head *
1415 lookup_bh_lru(struct block_device *bdev, sector_t block, int size)
1417 struct buffer_head *ret = NULL;
1423 lru = &__get_cpu_var(bh_lrus);
1424 for (i = 0; i < BH_LRU_SIZE; i++) {
1425 struct buffer_head *bh = lru->bhs[i];
1427 if (bh && bh->b_bdev == bdev &&
1428 bh->b_blocknr == block && bh->b_size == size) {
1431 lru->bhs[i] = lru->bhs[i - 1];
1446 * Perform a pagecache lookup for the matching buffer. If it's there, refresh
1447 * it in the LRU and mark it as accessed. If it is not present then return
1450 struct buffer_head *
1451 __find_get_block(struct block_device *bdev, sector_t block, int size)
1453 struct buffer_head *bh = lookup_bh_lru(bdev, block, size);
1456 bh = __find_get_block_slow(bdev, block, size);
1464 EXPORT_SYMBOL(__find_get_block);
1467 * __getblk will locate (and, if necessary, create) the buffer_head
1468 * which corresponds to the passed block_device, block and size. The
1469 * returned buffer has its reference count incremented.
1471 * __getblk() cannot fail - it just keeps trying. If you pass it an
1472 * illegal block number, __getblk() will happily return a buffer_head
1473 * which represents the non-existent block. Very weird.
1475 * __getblk() will lock up the machine if grow_dev_page's try_to_free_buffers()
1476 * attempt is failing. FIXME, perhaps?
1478 struct buffer_head *
1479 __getblk(struct block_device *bdev, sector_t block, int size)
1481 struct buffer_head *bh = __find_get_block(bdev, block, size);
1485 bh = __getblk_slow(bdev, block, size);
1488 EXPORT_SYMBOL(__getblk);
1491 * Do async read-ahead on a buffer..
1493 void __breadahead(struct block_device *bdev, sector_t block, int size)
1495 struct buffer_head *bh = __getblk(bdev, block, size);
1496 ll_rw_block(READA, 1, &bh);
1499 EXPORT_SYMBOL(__breadahead);
1502 * __bread() - reads a specified block and returns the bh
1503 * @block: number of block
1504 * @size: size (in bytes) to read
1506 * Reads a specified block, and returns buffer head that contains it.
1507 * It returns NULL if the block was unreadable.
1509 struct buffer_head *
1510 __bread(struct block_device *bdev, sector_t block, int size)
1512 struct buffer_head *bh = __getblk(bdev, block, size);
1514 if (!buffer_uptodate(bh))
1515 bh = __bread_slow(bh);
1518 EXPORT_SYMBOL(__bread);
1521 * invalidate_bh_lrus() is called rarely - but not only at unmount.
1522 * This doesn't race because it runs in each cpu either in irq
1523 * or with preempt disabled.
1525 static void invalidate_bh_lru(void *arg)
1527 struct bh_lru *b = &get_cpu_var(bh_lrus);
1530 for (i = 0; i < BH_LRU_SIZE; i++) {
1534 put_cpu_var(bh_lrus);
1537 static void invalidate_bh_lrus(void)
1539 on_each_cpu(invalidate_bh_lru, NULL, 1, 1);
1542 void set_bh_page(struct buffer_head *bh,
1543 struct page *page, unsigned long offset)
1546 if (offset >= PAGE_SIZE)
1548 if (PageHighMem(page))
1550 * This catches illegal uses and preserves the offset:
1552 bh->b_data = (char *)(0 + offset);
1554 bh->b_data = page_address(page) + offset;
1556 EXPORT_SYMBOL(set_bh_page);
1559 * Called when truncating a buffer on a page completely.
1561 static inline void discard_buffer(struct buffer_head * bh)
1564 clear_buffer_dirty(bh);
1566 clear_buffer_mapped(bh);
1567 clear_buffer_req(bh);
1568 clear_buffer_new(bh);
1569 clear_buffer_delay(bh);
1574 * try_to_release_page() - release old fs-specific metadata on a page
1576 * @page: the page which the kernel is trying to free
1577 * @gfp_mask: memory allocation flags (and I/O mode)
1579 * The address_space is to try to release any data against the page
1580 * (presumably at page->private). If the release was successful, return `1'.
1581 * Otherwise return zero.
1583 * The @gfp_mask argument specifies whether I/O may be performed to release
1584 * this page (__GFP_IO), and whether the call may block (__GFP_WAIT).
1586 * NOTE: @gfp_mask may go away, and this function may become non-blocking.
1588 int try_to_release_page(struct page *page, int gfp_mask)
1590 struct address_space * const mapping = page->mapping;
1592 BUG_ON(!PageLocked(page));
1593 if (PageWriteback(page))
1596 if (mapping && mapping->a_ops->releasepage)
1597 return mapping->a_ops->releasepage(page, gfp_mask);
1598 return try_to_free_buffers(page);
1600 EXPORT_SYMBOL(try_to_release_page);
1603 * block_invalidatepage - invalidate part of all of a buffer-backed page
1605 * @page: the page which is affected
1606 * @offset: the index of the truncation point
1608 * block_invalidatepage() is called when all or part of the page has become
1609 * invalidatedby a truncate operation.
1611 * block_invalidatepage() does not have to release all buffers, but it must
1612 * ensure that no dirty buffer is left outside @offset and that no I/O
1613 * is underway against any of the blocks which are outside the truncation
1614 * point. Because the caller is about to free (and possibly reuse) those
1617 int block_invalidatepage(struct page *page, unsigned long offset)
1619 struct buffer_head *head, *bh, *next;
1620 unsigned int curr_off = 0;
1623 BUG_ON(!PageLocked(page));
1624 if (!page_has_buffers(page))
1627 head = page_buffers(page);
1630 unsigned int next_off = curr_off + bh->b_size;
1631 next = bh->b_this_page;
1634 * is this block fully invalidated?
1636 if (offset <= curr_off)
1638 curr_off = next_off;
1640 } while (bh != head);
1643 * We release buffers only if the entire page is being invalidated.
1644 * The get_block cached value has been unconditionally invalidated,
1645 * so real IO is not possible anymore.
1648 ret = try_to_release_page(page, 0);
1652 EXPORT_SYMBOL(block_invalidatepage);
1655 * We attach and possibly dirty the buffers atomically wrt
1656 * __set_page_dirty_buffers() via private_lock. try_to_free_buffers
1657 * is already excluded via the page lock.
1659 void create_empty_buffers(struct page *page,
1660 unsigned long blocksize, unsigned long b_state)
1662 struct buffer_head *bh, *head, *tail;
1664 head = alloc_page_buffers(page, blocksize, 1);
1667 bh->b_state |= b_state;
1669 bh = bh->b_this_page;
1671 tail->b_this_page = head;
1673 spin_lock(&page->mapping->private_lock);
1674 if (PageUptodate(page) || PageDirty(page)) {
1677 if (PageDirty(page))
1678 set_buffer_dirty(bh);
1679 if (PageUptodate(page))
1680 set_buffer_uptodate(bh);
1681 bh = bh->b_this_page;
1682 } while (bh != head);
1684 attach_page_buffers(page, head);
1685 spin_unlock(&page->mapping->private_lock);
1687 EXPORT_SYMBOL(create_empty_buffers);
1690 * We are taking a block for data and we don't want any output from any
1691 * buffer-cache aliases starting from return from that function and
1692 * until the moment when something will explicitly mark the buffer
1693 * dirty (hopefully that will not happen until we will free that block ;-)
1694 * We don't even need to mark it not-uptodate - nobody can expect
1695 * anything from a newly allocated buffer anyway. We used to used
1696 * unmap_buffer() for such invalidation, but that was wrong. We definitely
1697 * don't want to mark the alias unmapped, for example - it would confuse
1698 * anyone who might pick it with bread() afterwards...
1700 * Also.. Note that bforget() doesn't lock the buffer. So there can
1701 * be writeout I/O going on against recently-freed buffers. We don't
1702 * wait on that I/O in bforget() - it's more efficient to wait on the I/O
1703 * only if we really need to. That happens here.
1705 void unmap_underlying_metadata(struct block_device *bdev, sector_t block)
1707 struct buffer_head *old_bh;
1711 old_bh = __find_get_block_slow(bdev, block, 0);
1713 clear_buffer_dirty(old_bh);
1714 wait_on_buffer(old_bh);
1715 clear_buffer_req(old_bh);
1719 EXPORT_SYMBOL(unmap_underlying_metadata);
1722 * NOTE! All mapped/uptodate combinations are valid:
1724 * Mapped Uptodate Meaning
1726 * No No "unknown" - must do get_block()
1727 * No Yes "hole" - zero-filled
1728 * Yes No "allocated" - allocated on disk, not read in
1729 * Yes Yes "valid" - allocated and up-to-date in memory.
1731 * "Dirty" is valid only with the last case (mapped+uptodate).
1735 * While block_write_full_page is writing back the dirty buffers under
1736 * the page lock, whoever dirtied the buffers may decide to clean them
1737 * again at any time. We handle that by only looking at the buffer
1738 * state inside lock_buffer().
1740 * If block_write_full_page() is called for regular writeback
1741 * (wbc->sync_mode == WB_SYNC_NONE) then it will redirty a page which has a
1742 * locked buffer. This only can happen if someone has written the buffer
1743 * directly, with submit_bh(). At the address_space level PageWriteback
1744 * prevents this contention from occurring.
1746 static int __block_write_full_page(struct inode *inode, struct page *page,
1747 get_block_t *get_block, struct writeback_control *wbc)
1751 sector_t last_block;
1752 struct buffer_head *bh, *head;
1753 int nr_underway = 0;
1755 BUG_ON(!PageLocked(page));
1757 last_block = (i_size_read(inode) - 1) >> inode->i_blkbits;
1759 if (!page_has_buffers(page)) {
1760 create_empty_buffers(page, 1 << inode->i_blkbits,
1761 (1 << BH_Dirty)|(1 << BH_Uptodate));
1765 * Be very careful. We have no exclusion from __set_page_dirty_buffers
1766 * here, and the (potentially unmapped) buffers may become dirty at
1767 * any time. If a buffer becomes dirty here after we've inspected it
1768 * then we just miss that fact, and the page stays dirty.
1770 * Buffers outside i_size may be dirtied by __set_page_dirty_buffers;
1771 * handle that here by just cleaning them.
1774 block = page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
1775 head = page_buffers(page);
1779 * Get all the dirty buffers mapped to disk addresses and
1780 * handle any aliases from the underlying blockdev's mapping.
1783 if (block > last_block) {
1785 * mapped buffers outside i_size will occur, because
1786 * this page can be outside i_size when there is a
1787 * truncate in progress.
1790 * The buffer was zeroed by block_write_full_page()
1792 clear_buffer_dirty(bh);
1793 set_buffer_uptodate(bh);
1794 } else if (!buffer_mapped(bh) && buffer_dirty(bh)) {
1795 err = get_block(inode, block, bh, 1);
1798 if (buffer_new(bh)) {
1799 /* blockdev mappings never come here */
1800 clear_buffer_new(bh);
1801 unmap_underlying_metadata(bh->b_bdev,
1805 bh = bh->b_this_page;
1807 } while (bh != head);
1811 if (!buffer_mapped(bh))
1814 * If it's a fully non-blocking write attempt and we cannot
1815 * lock the buffer then redirty the page. Note that this can
1816 * potentially cause a busy-wait loop from pdflush and kswapd
1817 * activity, but those code paths have their own higher-level
1820 if (wbc->sync_mode != WB_SYNC_NONE || !wbc->nonblocking) {
1822 } else if (test_set_buffer_locked(bh)) {
1823 redirty_page_for_writepage(wbc, page);
1826 if (test_clear_buffer_dirty(bh)) {
1827 mark_buffer_async_write(bh);
1831 } while ((bh = bh->b_this_page) != head);
1834 * The page and its buffers are protected by PageWriteback(), so we can
1835 * drop the bh refcounts early.
1837 BUG_ON(PageWriteback(page));
1838 set_page_writeback(page);
1842 struct buffer_head *next = bh->b_this_page;
1843 if (buffer_async_write(bh)) {
1844 submit_bh(WRITE, bh);
1849 } while (bh != head);
1853 if (nr_underway == 0) {
1855 * The page was marked dirty, but the buffers were
1856 * clean. Someone wrote them back by hand with
1857 * ll_rw_block/submit_bh. A rare case.
1861 if (!buffer_uptodate(bh)) {
1865 bh = bh->b_this_page;
1866 } while (bh != head);
1868 SetPageUptodate(page);
1869 end_page_writeback(page);
1871 * The page and buffer_heads can be released at any time from
1874 wbc->pages_skipped++; /* We didn't write this page */
1880 * ENOSPC, or some other error. We may already have added some
1881 * blocks to the file, so we need to write these out to avoid
1882 * exposing stale data.
1883 * The page is currently locked and not marked for writeback
1886 /* Recovery: lock and submit the mapped buffers */
1889 if (buffer_mapped(bh) && buffer_dirty(bh)) {
1891 mark_buffer_async_write(bh);
1894 * The buffer may have been set dirty during
1895 * attachment to a dirty page.
1897 clear_buffer_dirty(bh);
1899 } while ((bh = bh->b_this_page) != head);
1901 BUG_ON(PageWriteback(page));
1902 set_page_writeback(page);
1905 struct buffer_head *next = bh->b_this_page;
1906 if (buffer_async_write(bh)) {
1907 clear_buffer_dirty(bh);
1908 submit_bh(WRITE, bh);
1913 } while (bh != head);
1917 static int __block_prepare_write(struct inode *inode, struct page *page,
1918 unsigned from, unsigned to, get_block_t *get_block)
1920 unsigned block_start, block_end;
1923 unsigned blocksize, bbits;
1924 struct buffer_head *bh, *head, *wait[2], **wait_bh=wait;
1926 BUG_ON(!PageLocked(page));
1927 BUG_ON(from > PAGE_CACHE_SIZE);
1928 BUG_ON(to > PAGE_CACHE_SIZE);
1931 blocksize = 1 << inode->i_blkbits;
1932 if (!page_has_buffers(page))
1933 create_empty_buffers(page, blocksize, 0);
1934 head = page_buffers(page);
1936 bbits = inode->i_blkbits;
1937 block = (sector_t)page->index << (PAGE_CACHE_SHIFT - bbits);
1939 for(bh = head, block_start = 0; bh != head || !block_start;
1940 block++, block_start=block_end, bh = bh->b_this_page) {
1941 block_end = block_start + blocksize;
1942 if (block_end <= from || block_start >= to) {
1943 if (PageUptodate(page)) {
1944 if (!buffer_uptodate(bh))
1945 set_buffer_uptodate(bh);
1950 clear_buffer_new(bh);
1951 if (!buffer_mapped(bh)) {
1952 err = get_block(inode, block, bh, 1);
1955 if (buffer_new(bh)) {
1956 clear_buffer_new(bh);
1957 unmap_underlying_metadata(bh->b_bdev,
1959 if (PageUptodate(page)) {
1960 set_buffer_uptodate(bh);
1963 if (block_end > to || block_start < from) {
1966 kaddr = kmap_atomic(page, KM_USER0);
1970 if (block_start < from)
1971 memset(kaddr+block_start,
1972 0, from-block_start);
1973 flush_dcache_page(page);
1974 kunmap_atomic(kaddr, KM_USER0);
1979 if (PageUptodate(page)) {
1980 if (!buffer_uptodate(bh))
1981 set_buffer_uptodate(bh);
1984 if (!buffer_uptodate(bh) && !buffer_delay(bh) &&
1985 (block_start < from || block_end > to)) {
1986 ll_rw_block(READ, 1, &bh);
1991 * If we issued read requests - let them complete.
1993 while(wait_bh > wait) {
1994 wait_on_buffer(*--wait_bh);
1995 if (!buffer_uptodate(*wait_bh))
2001 * Zero out any newly allocated blocks to avoid exposing stale
2002 * data. If BH_New is set, we know that the block was newly
2003 * allocated in the above loop.
2008 block_end = block_start+blocksize;
2009 if (block_end <= from)
2011 if (block_start >= to)
2013 if (buffer_new(bh)) {
2016 clear_buffer_new(bh);
2017 kaddr = kmap_atomic(page, KM_USER0);
2018 memset(kaddr+block_start, 0, bh->b_size);
2019 kunmap_atomic(kaddr, KM_USER0);
2020 set_buffer_uptodate(bh);
2021 mark_buffer_dirty(bh);
2024 block_start = block_end;
2025 bh = bh->b_this_page;
2026 } while (bh != head);
2030 static int __block_commit_write(struct inode *inode, struct page *page,
2031 unsigned from, unsigned to)
2033 unsigned block_start, block_end;
2036 struct buffer_head *bh, *head;
2038 blocksize = 1 << inode->i_blkbits;
2040 for(bh = head = page_buffers(page), block_start = 0;
2041 bh != head || !block_start;
2042 block_start=block_end, bh = bh->b_this_page) {
2043 block_end = block_start + blocksize;
2044 if (block_end <= from || block_start >= to) {
2045 if (!buffer_uptodate(bh))
2048 set_buffer_uptodate(bh);
2049 mark_buffer_dirty(bh);
2054 * If this is a partial write which happened to make all buffers
2055 * uptodate then we can optimize away a bogus readpage() for
2056 * the next read(). Here we 'discover' whether the page went
2057 * uptodate as a result of this (potentially partial) write.
2060 SetPageUptodate(page);
2065 * Generic "read page" function for block devices that have the normal
2066 * get_block functionality. This is most of the block device filesystems.
2067 * Reads the page asynchronously --- the unlock_buffer() and
2068 * set/clear_buffer_uptodate() functions propagate buffer state into the
2069 * page struct once IO has completed.
2071 int block_read_full_page(struct page *page, get_block_t *get_block)
2073 struct inode *inode = page->mapping->host;
2074 sector_t iblock, lblock;
2075 struct buffer_head *bh, *head, *arr[MAX_BUF_PER_PAGE];
2076 unsigned int blocksize;
2078 int fully_mapped = 1;
2080 if (!PageLocked(page))
2082 blocksize = 1 << inode->i_blkbits;
2083 if (!page_has_buffers(page))
2084 create_empty_buffers(page, blocksize, 0);
2085 head = page_buffers(page);
2087 iblock = (sector_t)page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
2088 lblock = (i_size_read(inode)+blocksize-1) >> inode->i_blkbits;
2094 if (buffer_uptodate(bh))
2097 if (!buffer_mapped(bh)) {
2099 if (iblock < lblock) {
2100 if (get_block(inode, iblock, bh, 0))
2103 if (!buffer_mapped(bh)) {
2104 void *kaddr = kmap_atomic(page, KM_USER0);
2105 memset(kaddr + i * blocksize, 0, blocksize);
2106 flush_dcache_page(page);
2107 kunmap_atomic(kaddr, KM_USER0);
2108 set_buffer_uptodate(bh);
2112 * get_block() might have updated the buffer
2115 if (buffer_uptodate(bh))
2119 } while (i++, iblock++, (bh = bh->b_this_page) != head);
2122 SetPageMappedToDisk(page);
2126 * All buffers are uptodate - we can set the page uptodate
2127 * as well. But not if get_block() returned an error.
2129 if (!PageError(page))
2130 SetPageUptodate(page);
2135 /* Stage two: lock the buffers */
2136 for (i = 0; i < nr; i++) {
2139 mark_buffer_async_read(bh);
2143 * Stage 3: start the IO. Check for uptodateness
2144 * inside the buffer lock in case another process reading
2145 * the underlying blockdev brought it uptodate (the sct fix).
2147 for (i = 0; i < nr; i++) {
2149 if (buffer_uptodate(bh))
2150 end_buffer_async_read(bh, 1);
2152 submit_bh(READ, bh);
2157 /* utility function for filesystems that need to do work on expanding
2158 * truncates. Uses prepare/commit_write to allow the filesystem to
2159 * deal with the hole.
2161 int generic_cont_expand(struct inode *inode, loff_t size)
2163 struct address_space *mapping = inode->i_mapping;
2165 unsigned long index, offset, limit;
2169 limit = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
2170 if (limit != RLIM_INFINITY && size > (loff_t)limit) {
2171 send_sig(SIGXFSZ, current, 0);
2174 if (size > inode->i_sb->s_maxbytes)
2177 offset = (size & (PAGE_CACHE_SIZE-1)); /* Within page */
2179 /* ugh. in prepare/commit_write, if from==to==start of block, we
2180 ** skip the prepare. make sure we never send an offset for the start
2183 if ((offset & (inode->i_sb->s_blocksize - 1)) == 0) {
2186 index = size >> PAGE_CACHE_SHIFT;
2188 page = grab_cache_page(mapping, index);
2191 err = mapping->a_ops->prepare_write(NULL, page, offset, offset);
2193 err = mapping->a_ops->commit_write(NULL, page, offset, offset);
2196 page_cache_release(page);
2204 * For moronic filesystems that do not allow holes in file.
2205 * We may have to extend the file.
2208 int cont_prepare_write(struct page *page, unsigned offset,
2209 unsigned to, get_block_t *get_block, loff_t *bytes)
2211 struct address_space *mapping = page->mapping;
2212 struct inode *inode = mapping->host;
2213 struct page *new_page;
2217 unsigned blocksize = 1 << inode->i_blkbits;
2220 while(page->index > (pgpos = *bytes>>PAGE_CACHE_SHIFT)) {
2222 new_page = grab_cache_page(mapping, pgpos);
2225 /* we might sleep */
2226 if (*bytes>>PAGE_CACHE_SHIFT != pgpos) {
2227 unlock_page(new_page);
2228 page_cache_release(new_page);
2231 zerofrom = *bytes & ~PAGE_CACHE_MASK;
2232 if (zerofrom & (blocksize-1)) {
2233 *bytes |= (blocksize-1);
2236 status = __block_prepare_write(inode, new_page, zerofrom,
2237 PAGE_CACHE_SIZE, get_block);
2240 kaddr = kmap_atomic(new_page, KM_USER0);
2241 memset(kaddr+zerofrom, 0, PAGE_CACHE_SIZE-zerofrom);
2242 flush_dcache_page(new_page);
2243 kunmap_atomic(kaddr, KM_USER0);
2244 generic_commit_write(NULL, new_page, zerofrom, PAGE_CACHE_SIZE);
2245 unlock_page(new_page);
2246 page_cache_release(new_page);
2249 if (page->index < pgpos) {
2250 /* completely inside the area */
2253 /* page covers the boundary, find the boundary offset */
2254 zerofrom = *bytes & ~PAGE_CACHE_MASK;
2256 /* if we will expand the thing last block will be filled */
2257 if (to > zerofrom && (zerofrom & (blocksize-1))) {
2258 *bytes |= (blocksize-1);
2262 /* starting below the boundary? Nothing to zero out */
2263 if (offset <= zerofrom)
2266 status = __block_prepare_write(inode, page, zerofrom, to, get_block);
2269 if (zerofrom < offset) {
2270 kaddr = kmap_atomic(page, KM_USER0);
2271 memset(kaddr+zerofrom, 0, offset-zerofrom);
2272 flush_dcache_page(page);
2273 kunmap_atomic(kaddr, KM_USER0);
2274 __block_commit_write(inode, page, zerofrom, offset);
2278 ClearPageUptodate(page);
2282 ClearPageUptodate(new_page);
2283 unlock_page(new_page);
2284 page_cache_release(new_page);
2289 int block_prepare_write(struct page *page, unsigned from, unsigned to,
2290 get_block_t *get_block)
2292 struct inode *inode = page->mapping->host;
2293 int err = __block_prepare_write(inode, page, from, to, get_block);
2295 ClearPageUptodate(page);
2299 int block_commit_write(struct page *page, unsigned from, unsigned to)
2301 struct inode *inode = page->mapping->host;
2302 __block_commit_write(inode,page,from,to);
2306 int generic_commit_write(struct file *file, struct page *page,
2307 unsigned from, unsigned to)
2309 struct inode *inode = page->mapping->host;
2310 loff_t pos = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to;
2311 __block_commit_write(inode,page,from,to);
2313 * No need to use i_size_read() here, the i_size
2314 * cannot change under us because we hold i_sem.
2316 if (pos > inode->i_size) {
2317 i_size_write(inode, pos);
2318 mark_inode_dirty(inode);
2325 * nobh_prepare_write()'s prereads are special: the buffer_heads are freed
2326 * immediately, while under the page lock. So it needs a special end_io
2327 * handler which does not touch the bh after unlocking it.
2329 * Note: unlock_buffer() sort-of does touch the bh after unlocking it, but
2330 * a race there is benign: unlock_buffer() only use the bh's address for
2331 * hashing after unlocking the buffer, so it doesn't actually touch the bh
2334 static void end_buffer_read_nobh(struct buffer_head *bh, int uptodate)
2337 set_buffer_uptodate(bh);
2339 /* This happens, due to failed READA attempts. */
2340 clear_buffer_uptodate(bh);
2346 * On entry, the page is fully not uptodate.
2347 * On exit the page is fully uptodate in the areas outside (from,to)
2349 int nobh_prepare_write(struct page *page, unsigned from, unsigned to,
2350 get_block_t *get_block)
2352 struct inode *inode = page->mapping->host;
2353 const unsigned blkbits = inode->i_blkbits;
2354 const unsigned blocksize = 1 << blkbits;
2355 struct buffer_head map_bh;
2356 struct buffer_head *read_bh[MAX_BUF_PER_PAGE];
2357 unsigned block_in_page;
2358 unsigned block_start;
2359 sector_t block_in_file;
2364 int is_mapped_to_disk = 1;
2367 if (PageMappedToDisk(page))
2370 block_in_file = (sector_t)page->index << (PAGE_CACHE_SHIFT - blkbits);
2371 map_bh.b_page = page;
2374 * We loop across all blocks in the page, whether or not they are
2375 * part of the affected region. This is so we can discover if the
2376 * page is fully mapped-to-disk.
2378 for (block_start = 0, block_in_page = 0;
2379 block_start < PAGE_CACHE_SIZE;
2380 block_in_page++, block_start += blocksize) {
2381 unsigned block_end = block_start + blocksize;
2386 if (block_start >= to)
2388 ret = get_block(inode, block_in_file + block_in_page,
2392 if (!buffer_mapped(&map_bh))
2393 is_mapped_to_disk = 0;
2394 if (buffer_new(&map_bh))
2395 unmap_underlying_metadata(map_bh.b_bdev,
2397 if (PageUptodate(page))
2399 if (buffer_new(&map_bh) || !buffer_mapped(&map_bh)) {
2400 kaddr = kmap_atomic(page, KM_USER0);
2401 if (block_start < from) {
2402 memset(kaddr+block_start, 0, from-block_start);
2405 if (block_end > to) {
2406 memset(kaddr + to, 0, block_end - to);
2409 flush_dcache_page(page);
2410 kunmap_atomic(kaddr, KM_USER0);
2413 if (buffer_uptodate(&map_bh))
2414 continue; /* reiserfs does this */
2415 if (block_start < from || block_end > to) {
2416 struct buffer_head *bh = alloc_buffer_head(GFP_NOFS);
2422 bh->b_state = map_bh.b_state;
2423 atomic_set(&bh->b_count, 0);
2424 bh->b_this_page = NULL;
2426 bh->b_blocknr = map_bh.b_blocknr;
2427 bh->b_size = blocksize;
2428 bh->b_data = (char *)(long)block_start;
2429 bh->b_bdev = map_bh.b_bdev;
2430 bh->b_private = NULL;
2431 read_bh[nr_reads++] = bh;
2436 struct buffer_head *bh;
2439 * The page is locked, so these buffers are protected from
2440 * any VM or truncate activity. Hence we don't need to care
2441 * for the buffer_head refcounts.
2443 for (i = 0; i < nr_reads; i++) {
2446 bh->b_end_io = end_buffer_read_nobh;
2447 submit_bh(READ, bh);
2449 for (i = 0; i < nr_reads; i++) {
2452 if (!buffer_uptodate(bh))
2454 free_buffer_head(bh);
2461 if (is_mapped_to_disk)
2462 SetPageMappedToDisk(page);
2463 SetPageUptodate(page);
2466 * Setting the page dirty here isn't necessary for the prepare_write
2467 * function - commit_write will do that. But if/when this function is
2468 * used within the pagefault handler to ensure that all mmapped pages
2469 * have backing space in the filesystem, we will need to dirty the page
2470 * if its contents were altered.
2473 set_page_dirty(page);
2478 for (i = 0; i < nr_reads; i++) {
2480 free_buffer_head(read_bh[i]);
2484 * Error recovery is pretty slack. Clear the page and mark it dirty
2485 * so we'll later zero out any blocks which _were_ allocated.
2487 kaddr = kmap_atomic(page, KM_USER0);
2488 memset(kaddr, 0, PAGE_CACHE_SIZE);
2489 kunmap_atomic(kaddr, KM_USER0);
2490 SetPageUptodate(page);
2491 set_page_dirty(page);
2494 EXPORT_SYMBOL(nobh_prepare_write);
2496 int nobh_commit_write(struct file *file, struct page *page,
2497 unsigned from, unsigned to)
2499 struct inode *inode = page->mapping->host;
2500 loff_t pos = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to;
2502 set_page_dirty(page);
2503 if (pos > inode->i_size) {
2504 i_size_write(inode, pos);
2505 mark_inode_dirty(inode);
2509 EXPORT_SYMBOL(nobh_commit_write);
2512 * This function assumes that ->prepare_write() uses nobh_prepare_write().
2514 int nobh_truncate_page(struct address_space *mapping, loff_t from)
2516 struct inode *inode = mapping->host;
2517 unsigned blocksize = 1 << inode->i_blkbits;
2518 pgoff_t index = from >> PAGE_CACHE_SHIFT;
2519 unsigned offset = from & (PAGE_CACHE_SIZE-1);
2522 struct address_space_operations *a_ops = mapping->a_ops;
2526 if ((offset & (blocksize - 1)) == 0)
2530 page = grab_cache_page(mapping, index);
2534 to = (offset + blocksize) & ~(blocksize - 1);
2535 ret = a_ops->prepare_write(NULL, page, offset, to);
2537 kaddr = kmap_atomic(page, KM_USER0);
2538 memset(kaddr + offset, 0, PAGE_CACHE_SIZE - offset);
2539 flush_dcache_page(page);
2540 kunmap_atomic(kaddr, KM_USER0);
2541 set_page_dirty(page);
2544 page_cache_release(page);
2548 EXPORT_SYMBOL(nobh_truncate_page);
2550 int block_truncate_page(struct address_space *mapping,
2551 loff_t from, get_block_t *get_block)
2553 pgoff_t index = from >> PAGE_CACHE_SHIFT;
2554 unsigned offset = from & (PAGE_CACHE_SIZE-1);
2557 unsigned length, pos;
2558 struct inode *inode = mapping->host;
2560 struct buffer_head *bh;
2564 blocksize = 1 << inode->i_blkbits;
2565 length = offset & (blocksize - 1);
2567 /* Block boundary? Nothing to do */
2571 length = blocksize - length;
2572 iblock = index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
2574 page = grab_cache_page(mapping, index);
2579 if (!page_has_buffers(page))
2580 create_empty_buffers(page, blocksize, 0);
2582 /* Find the buffer that contains "offset" */
2583 bh = page_buffers(page);
2585 while (offset >= pos) {
2586 bh = bh->b_this_page;
2592 if (!buffer_mapped(bh)) {
2593 err = get_block(inode, iblock, bh, 0);
2596 /* unmapped? It's a hole - nothing to do */
2597 if (!buffer_mapped(bh))
2601 /* Ok, it's mapped. Make sure it's up-to-date */
2602 if (PageUptodate(page))
2603 set_buffer_uptodate(bh);
2605 if (!buffer_uptodate(bh) && !buffer_delay(bh)) {
2607 ll_rw_block(READ, 1, &bh);
2609 /* Uhhuh. Read error. Complain and punt. */
2610 if (!buffer_uptodate(bh))
2614 kaddr = kmap_atomic(page, KM_USER0);
2615 memset(kaddr + offset, 0, length);
2616 flush_dcache_page(page);
2617 kunmap_atomic(kaddr, KM_USER0);
2619 mark_buffer_dirty(bh);
2624 page_cache_release(page);
2630 * The generic ->writepage function for buffer-backed address_spaces
2632 int block_write_full_page(struct page *page, get_block_t *get_block,
2633 struct writeback_control *wbc)
2635 struct inode * const inode = page->mapping->host;
2636 loff_t i_size = i_size_read(inode);
2637 const pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
2641 /* Is the page fully inside i_size? */
2642 if (page->index < end_index)
2643 return __block_write_full_page(inode, page, get_block, wbc);
2645 /* Is the page fully outside i_size? (truncate in progress) */
2646 offset = i_size & (PAGE_CACHE_SIZE-1);
2647 if (page->index >= end_index+1 || !offset) {
2649 * The page may have dirty, unmapped buffers. For example,
2650 * they may have been added in ext3_writepage(). Make them
2651 * freeable here, so the page does not leak.
2653 block_invalidatepage(page, 0);
2655 return 0; /* don't care */
2659 * The page straddles i_size. It must be zeroed out on each and every
2660 * writepage invokation because it may be mmapped. "A file is mapped
2661 * in multiples of the page size. For a file that is not a multiple of
2662 * the page size, the remaining memory is zeroed when mapped, and
2663 * writes to that region are not written out to the file."
2665 kaddr = kmap_atomic(page, KM_USER0);
2666 memset(kaddr + offset, 0, PAGE_CACHE_SIZE - offset);
2667 flush_dcache_page(page);
2668 kunmap_atomic(kaddr, KM_USER0);
2669 return __block_write_full_page(inode, page, get_block, wbc);
2672 sector_t generic_block_bmap(struct address_space *mapping, sector_t block,
2673 get_block_t *get_block)
2675 struct buffer_head tmp;
2676 struct inode *inode = mapping->host;
2679 get_block(inode, block, &tmp, 0);
2680 return tmp.b_blocknr;
2683 static int end_bio_bh_io_sync(struct bio *bio, unsigned int bytes_done, int err)
2685 struct buffer_head *bh = bio->bi_private;
2690 if (err == -EOPNOTSUPP) {
2691 set_bit(BIO_EOPNOTSUPP, &bio->bi_flags);
2692 set_bit(BH_Eopnotsupp, &bh->b_state);
2695 bh->b_end_io(bh, test_bit(BIO_UPTODATE, &bio->bi_flags));
2700 int submit_bh(int rw, struct buffer_head * bh)
2705 BUG_ON(!buffer_locked(bh));
2706 BUG_ON(!buffer_mapped(bh));
2707 BUG_ON(!bh->b_end_io);
2709 if (buffer_ordered(bh) && (rw == WRITE))
2713 * Only clear out a write error when rewriting, should this
2714 * include WRITE_SYNC as well?
2716 if (test_set_buffer_req(bh) && (rw == WRITE || rw == WRITE_BARRIER))
2717 clear_buffer_write_io_error(bh);
2720 * from here on down, it's all bio -- do the initial mapping,
2721 * submit_bio -> generic_make_request may further map this bio around
2723 bio = bio_alloc(GFP_NOIO, 1);
2725 bio->bi_sector = bh->b_blocknr * (bh->b_size >> 9);
2726 bio->bi_bdev = bh->b_bdev;
2727 bio->bi_io_vec[0].bv_page = bh->b_page;
2728 bio->bi_io_vec[0].bv_len = bh->b_size;
2729 bio->bi_io_vec[0].bv_offset = bh_offset(bh);
2733 bio->bi_size = bh->b_size;
2735 bio->bi_end_io = end_bio_bh_io_sync;
2736 bio->bi_private = bh;
2739 submit_bio(rw, bio);
2741 if (bio_flagged(bio, BIO_EOPNOTSUPP))
2749 * ll_rw_block: low-level access to block devices (DEPRECATED)
2750 * @rw: whether to %READ or %WRITE or maybe %READA (readahead)
2751 * @nr: number of &struct buffer_heads in the array
2752 * @bhs: array of pointers to &struct buffer_head
2754 * ll_rw_block() takes an array of pointers to &struct buffer_heads,
2755 * and requests an I/O operation on them, either a %READ or a %WRITE.
2756 * The third %READA option is described in the documentation for
2757 * generic_make_request() which ll_rw_block() calls.
2759 * This function drops any buffer that it cannot get a lock on (with the
2760 * BH_Lock state bit), any buffer that appears to be clean when doing a
2761 * write request, and any buffer that appears to be up-to-date when doing
2762 * read request. Further it marks as clean buffers that are processed for
2763 * writing (the buffer cache won't assume that they are actually clean until
2764 * the buffer gets unlocked).
2766 * ll_rw_block sets b_end_io to simple completion handler that marks
2767 * the buffer up-to-date (if approriate), unlocks the buffer and wakes
2770 * All of the buffers must be for the same device, and must also be a
2771 * multiple of the current approved size for the device.
2773 void ll_rw_block(int rw, int nr, struct buffer_head *bhs[])
2777 for (i = 0; i < nr; i++) {
2778 struct buffer_head *bh = bhs[i];
2780 if (test_set_buffer_locked(bh))
2785 bh->b_end_io = end_buffer_write_sync;
2786 if (test_clear_buffer_dirty(bh)) {
2787 submit_bh(WRITE, bh);
2791 bh->b_end_io = end_buffer_read_sync;
2792 if (!buffer_uptodate(bh)) {
2803 * For a data-integrity writeout, we need to wait upon any in-progress I/O
2804 * and then start new I/O and then wait upon it. The caller must have a ref on
2807 int sync_dirty_buffer(struct buffer_head *bh)
2811 WARN_ON(atomic_read(&bh->b_count) < 1);
2813 if (test_clear_buffer_dirty(bh)) {
2815 bh->b_end_io = end_buffer_write_sync;
2816 ret = submit_bh(WRITE, bh);
2818 if (buffer_eopnotsupp(bh)) {
2819 clear_buffer_eopnotsupp(bh);
2822 if (!ret && !buffer_uptodate(bh))
2831 * try_to_free_buffers() checks if all the buffers on this particular page
2832 * are unused, and releases them if so.
2834 * Exclusion against try_to_free_buffers may be obtained by either
2835 * locking the page or by holding its mapping's private_lock.
2837 * If the page is dirty but all the buffers are clean then we need to
2838 * be sure to mark the page clean as well. This is because the page
2839 * may be against a block device, and a later reattachment of buffers
2840 * to a dirty page will set *all* buffers dirty. Which would corrupt
2841 * filesystem data on the same device.
2843 * The same applies to regular filesystem pages: if all the buffers are
2844 * clean then we set the page clean and proceed. To do that, we require
2845 * total exclusion from __set_page_dirty_buffers(). That is obtained with
2848 * try_to_free_buffers() is non-blocking.
2850 static inline int buffer_busy(struct buffer_head *bh)
2852 return atomic_read(&bh->b_count) |
2853 (bh->b_state & ((1 << BH_Dirty) | (1 << BH_Lock)));
2857 drop_buffers(struct page *page, struct buffer_head **buffers_to_free)
2859 struct buffer_head *head = page_buffers(page);
2860 struct buffer_head *bh;
2864 if (buffer_write_io_error(bh))
2865 set_bit(AS_EIO, &page->mapping->flags);
2866 if (buffer_busy(bh))
2868 bh = bh->b_this_page;
2869 } while (bh != head);
2872 struct buffer_head *next = bh->b_this_page;
2874 if (!list_empty(&bh->b_assoc_buffers))
2875 __remove_assoc_queue(bh);
2877 } while (bh != head);
2878 *buffers_to_free = head;
2879 __clear_page_buffers(page);
2885 int try_to_free_buffers(struct page *page)
2887 struct address_space * const mapping = page->mapping;
2888 struct buffer_head *buffers_to_free = NULL;
2891 BUG_ON(!PageLocked(page));
2892 if (PageWriteback(page))
2895 if (mapping == NULL) { /* can this still happen? */
2896 ret = drop_buffers(page, &buffers_to_free);
2900 spin_lock(&mapping->private_lock);
2901 ret = drop_buffers(page, &buffers_to_free);
2904 * If the filesystem writes its buffers by hand (eg ext3)
2905 * then we can have clean buffers against a dirty page. We
2906 * clean the page here; otherwise later reattachment of buffers
2907 * could encounter a non-uptodate page, which is unresolvable.
2908 * This only applies in the rare case where try_to_free_buffers
2909 * succeeds but the page is not freed.
2911 clear_page_dirty(page);
2913 spin_unlock(&mapping->private_lock);
2915 if (buffers_to_free) {
2916 struct buffer_head *bh = buffers_to_free;
2919 struct buffer_head *next = bh->b_this_page;
2920 free_buffer_head(bh);
2922 } while (bh != buffers_to_free);
2926 EXPORT_SYMBOL(try_to_free_buffers);
2928 int block_sync_page(struct page *page)
2930 struct address_space *mapping;
2933 mapping = page_mapping(page);
2935 blk_run_backing_dev(mapping->backing_dev_info, page);
2940 * There are no bdflush tunables left. But distributions are
2941 * still running obsolete flush daemons, so we terminate them here.
2943 * Use of bdflush() is deprecated and will be removed in a future kernel.
2944 * The `pdflush' kernel threads fully replace bdflush daemons and this call.
2946 asmlinkage long sys_bdflush(int func, long data)
2948 static int msg_count;
2950 if (!capable(CAP_SYS_ADMIN))
2953 if (msg_count < 5) {
2956 "warning: process `%s' used the obsolete bdflush"
2957 " system call\n", current->comm);
2958 printk(KERN_INFO "Fix your initscripts?\n");
2967 * Buffer-head allocation
2969 static kmem_cache_t *bh_cachep;
2972 * Once the number of bh's in the machine exceeds this level, we start
2973 * stripping them in writeback.
2975 static int max_buffer_heads;
2977 int buffer_heads_over_limit;
2979 struct bh_accounting {
2980 int nr; /* Number of live bh's */
2981 int ratelimit; /* Limit cacheline bouncing */
2984 static DEFINE_PER_CPU(struct bh_accounting, bh_accounting) = {0, 0};
2986 static void recalc_bh_state(void)
2991 if (__get_cpu_var(bh_accounting).ratelimit++ < 4096)
2993 __get_cpu_var(bh_accounting).ratelimit = 0;
2995 tot += per_cpu(bh_accounting, i).nr;
2996 buffer_heads_over_limit = (tot > max_buffer_heads);
2999 struct buffer_head *alloc_buffer_head(int gfp_flags)
3001 struct buffer_head *ret = kmem_cache_alloc(bh_cachep, gfp_flags);
3004 __get_cpu_var(bh_accounting).nr++;
3010 EXPORT_SYMBOL(alloc_buffer_head);
3012 void free_buffer_head(struct buffer_head *bh)
3014 BUG_ON(!list_empty(&bh->b_assoc_buffers));
3015 kmem_cache_free(bh_cachep, bh);
3017 __get_cpu_var(bh_accounting).nr--;
3021 EXPORT_SYMBOL(free_buffer_head);
3024 init_buffer_head(void *data, kmem_cache_t *cachep, unsigned long flags)
3026 if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) ==
3027 SLAB_CTOR_CONSTRUCTOR) {
3028 struct buffer_head * bh = (struct buffer_head *)data;
3030 memset(bh, 0, sizeof(*bh));
3031 INIT_LIST_HEAD(&bh->b_assoc_buffers);
3035 #ifdef CONFIG_HOTPLUG_CPU
3036 static void buffer_exit_cpu(int cpu)
3039 struct bh_lru *b = &per_cpu(bh_lrus, cpu);
3041 for (i = 0; i < BH_LRU_SIZE; i++) {
3047 static int buffer_cpu_notify(struct notifier_block *self,
3048 unsigned long action, void *hcpu)
3050 if (action == CPU_DEAD)
3051 buffer_exit_cpu((unsigned long)hcpu);
3054 #endif /* CONFIG_HOTPLUG_CPU */
3056 void __init buffer_init(void)
3060 bh_cachep = kmem_cache_create("buffer_head",
3061 sizeof(struct buffer_head), 0,
3062 SLAB_PANIC, init_buffer_head, NULL);
3065 * Limit the bh occupancy to 10% of ZONE_NORMAL
3067 nrpages = (nr_free_buffer_pages() * 10) / 100;
3068 max_buffer_heads = nrpages * (PAGE_SIZE / sizeof(struct buffer_head));
3069 hotcpu_notifier(buffer_cpu_notify, 0);
3072 EXPORT_SYMBOL(__bforget);
3073 EXPORT_SYMBOL(__brelse);
3074 EXPORT_SYMBOL(__wait_on_buffer);
3075 EXPORT_SYMBOL(block_commit_write);
3076 EXPORT_SYMBOL(block_prepare_write);
3077 EXPORT_SYMBOL(block_read_full_page);
3078 EXPORT_SYMBOL(block_sync_page);
3079 EXPORT_SYMBOL(block_truncate_page);
3080 EXPORT_SYMBOL(block_write_full_page);
3081 EXPORT_SYMBOL(cont_prepare_write);
3082 EXPORT_SYMBOL(end_buffer_async_write);
3083 EXPORT_SYMBOL(end_buffer_read_sync);
3084 EXPORT_SYMBOL(end_buffer_write_sync);
3085 EXPORT_SYMBOL(file_fsync);
3086 EXPORT_SYMBOL(fsync_bdev);
3087 EXPORT_SYMBOL(generic_block_bmap);
3088 EXPORT_SYMBOL(generic_commit_write);
3089 EXPORT_SYMBOL(generic_cont_expand);
3090 EXPORT_SYMBOL(init_buffer);
3091 EXPORT_SYMBOL(invalidate_bdev);
3092 EXPORT_SYMBOL(ll_rw_block);
3093 EXPORT_SYMBOL(mark_buffer_dirty);
3094 EXPORT_SYMBOL(submit_bh);
3095 EXPORT_SYMBOL(sync_dirty_buffer);
3096 EXPORT_SYMBOL(unlock_buffer);