4 * Copyright (C) 1991, 1992, 2002 Linus Torvalds
8 * Start bdflush() with kernel_thread not syscall - Paul Gortmaker, 12/95
10 * Removed a lot of unnecessary code and simplified things now that
11 * the buffer cache isn't our primary cache - Andrew Tridgell 12/96
13 * Speed up hash, lru, and free list operations. Use gfp() for allocating
14 * hash table, use SLAB cache for buffer heads. SMP threading. -DaveM
16 * Added 32k buffer block sizes - these are required older ARM systems. - RMK
18 * async buffer flushing, 1999 Andrea Arcangeli <andrea@suse.de>
21 #include <linux/kernel.h>
22 #include <linux/syscalls.h>
25 #include <linux/percpu.h>
26 #include <linux/slab.h>
27 #include <linux/smp_lock.h>
28 #include <linux/capability.h>
29 #include <linux/blkdev.h>
30 #include <linux/file.h>
31 #include <linux/quotaops.h>
32 #include <linux/highmem.h>
33 #include <linux/module.h>
34 #include <linux/writeback.h>
35 #include <linux/hash.h>
36 #include <linux/suspend.h>
37 #include <linux/buffer_head.h>
38 #include <linux/bio.h>
39 #include <linux/notifier.h>
40 #include <linux/cpu.h>
41 #include <linux/bitops.h>
42 #include <linux/mpage.h>
43 #include <linux/bit_spinlock.h>
45 static int fsync_buffers_list(spinlock_t *lock, struct list_head *list);
46 static void invalidate_bh_lrus(void);
48 #define BH_ENTRY(list) list_entry((list), struct buffer_head, b_assoc_buffers)
51 init_buffer(struct buffer_head *bh, bh_end_io_t *handler, void *private)
53 bh->b_end_io = handler;
54 bh->b_private = private;
57 static int sync_buffer(void *word)
59 struct block_device *bd;
60 struct buffer_head *bh
61 = container_of(word, struct buffer_head, b_state);
66 blk_run_address_space(bd->bd_inode->i_mapping);
71 void fastcall __lock_buffer(struct buffer_head *bh)
73 wait_on_bit_lock(&bh->b_state, BH_Lock, sync_buffer,
74 TASK_UNINTERRUPTIBLE);
76 EXPORT_SYMBOL(__lock_buffer);
78 void fastcall unlock_buffer(struct buffer_head *bh)
80 clear_buffer_locked(bh);
81 smp_mb__after_clear_bit();
82 wake_up_bit(&bh->b_state, BH_Lock);
86 * Block until a buffer comes unlocked. This doesn't stop it
87 * from becoming locked again - you have to lock it yourself
88 * if you want to preserve its state.
90 void __wait_on_buffer(struct buffer_head * bh)
92 wait_on_bit(&bh->b_state, BH_Lock, sync_buffer, TASK_UNINTERRUPTIBLE);
96 __clear_page_buffers(struct page *page)
98 ClearPagePrivate(page);
99 set_page_private(page, 0);
100 page_cache_release(page);
103 static void buffer_io_error(struct buffer_head *bh)
105 char b[BDEVNAME_SIZE];
107 printk(KERN_ERR "Buffer I/O error on device %s, logical block %Lu\n",
108 bdevname(bh->b_bdev, b),
109 (unsigned long long)bh->b_blocknr);
113 * Default synchronous end-of-IO handler.. Just mark it up-to-date and
114 * unlock the buffer. This is what ll_rw_block uses too.
116 void end_buffer_read_sync(struct buffer_head *bh, int uptodate)
119 set_buffer_uptodate(bh);
121 /* This happens, due to failed READA attempts. */
122 clear_buffer_uptodate(bh);
128 void end_buffer_write_sync(struct buffer_head *bh, int uptodate)
130 char b[BDEVNAME_SIZE];
133 set_buffer_uptodate(bh);
135 if (!buffer_eopnotsupp(bh) && printk_ratelimit()) {
137 printk(KERN_WARNING "lost page write due to "
139 bdevname(bh->b_bdev, b));
141 set_buffer_write_io_error(bh);
142 clear_buffer_uptodate(bh);
149 * Write out and wait upon all the dirty data associated with a block
150 * device via its mapping. Does not take the superblock lock.
152 int sync_blockdev(struct block_device *bdev)
157 ret = filemap_write_and_wait(bdev->bd_inode->i_mapping);
160 EXPORT_SYMBOL(sync_blockdev);
162 static void __fsync_super(struct super_block *sb)
164 sync_inodes_sb(sb, 0);
167 if (sb->s_dirt && sb->s_op->write_super)
168 sb->s_op->write_super(sb);
170 if (sb->s_op->sync_fs)
171 sb->s_op->sync_fs(sb, 1);
172 sync_blockdev(sb->s_bdev);
173 sync_inodes_sb(sb, 1);
177 * Write out and wait upon all dirty data associated with this
178 * superblock. Filesystem data as well as the underlying block
179 * device. Takes the superblock lock.
181 int fsync_super(struct super_block *sb)
184 return sync_blockdev(sb->s_bdev);
187 EXPORT_SYMBOL(fsync_super);
190 * Write out and wait upon all dirty data associated with this
191 * device. Filesystem data as well as the underlying block
192 * device. Takes the superblock lock.
194 int fsync_bdev(struct block_device *bdev)
196 struct super_block *sb = get_super(bdev);
198 int res = fsync_super(sb);
202 return sync_blockdev(bdev);
206 * freeze_bdev -- lock a filesystem and force it into a consistent state
207 * @bdev: blockdevice to lock
209 * This takes the block device bd_mount_mutex to make sure no new mounts
210 * happen on bdev until thaw_bdev() is called.
211 * If a superblock is found on this device, we take the s_umount semaphore
212 * on it to make sure nobody unmounts until the snapshot creation is done.
214 struct super_block *freeze_bdev(struct block_device *bdev)
216 struct super_block *sb;
218 mutex_lock(&bdev->bd_mount_mutex);
219 sb = get_super(bdev);
220 if (sb && !(sb->s_flags & MS_RDONLY)) {
221 sb->s_frozen = SB_FREEZE_WRITE;
226 sb->s_frozen = SB_FREEZE_TRANS;
229 sync_blockdev(sb->s_bdev);
231 if (sb->s_op->write_super_lockfs)
232 sb->s_op->write_super_lockfs(sb);
236 return sb; /* thaw_bdev releases s->s_umount and bd_mount_sem */
238 EXPORT_SYMBOL(freeze_bdev);
241 * thaw_bdev -- unlock filesystem
242 * @bdev: blockdevice to unlock
243 * @sb: associated superblock
245 * Unlocks the filesystem and marks it writeable again after freeze_bdev().
247 void thaw_bdev(struct block_device *bdev, struct super_block *sb)
250 BUG_ON(sb->s_bdev != bdev);
252 if (sb->s_op->unlockfs)
253 sb->s_op->unlockfs(sb);
254 sb->s_frozen = SB_UNFROZEN;
256 wake_up(&sb->s_wait_unfrozen);
260 mutex_unlock(&bdev->bd_mount_mutex);
262 EXPORT_SYMBOL(thaw_bdev);
265 * sync everything. Start out by waking pdflush, because that writes back
266 * all queues in parallel.
268 static void do_sync(unsigned long wait)
271 sync_inodes(0); /* All mappings, inodes and their blockdevs */
273 sync_supers(); /* Write the superblocks */
274 sync_filesystems(0); /* Start syncing the filesystems */
275 sync_filesystems(wait); /* Waitingly sync the filesystems */
276 sync_inodes(wait); /* Mappings, inodes and blockdevs, again. */
278 printk("Emergency Sync complete\n");
279 if (unlikely(laptop_mode))
280 laptop_sync_completion();
283 asmlinkage long sys_sync(void)
289 void emergency_sync(void)
291 pdflush_operation(do_sync, 0);
295 * Generic function to fsync a file.
297 * filp may be NULL if called via the msync of a vma.
300 int file_fsync(struct file *filp, struct dentry *dentry, int datasync)
302 struct inode * inode = dentry->d_inode;
303 struct super_block * sb;
306 /* sync the inode to buffers */
307 ret = write_inode_now(inode, 0);
309 /* sync the superblock to buffers */
312 if (sb->s_op->write_super)
313 sb->s_op->write_super(sb);
316 /* .. finally sync the buffers to disk */
317 err = sync_blockdev(sb->s_bdev);
323 long do_fsync(struct file *file, int datasync)
327 struct address_space *mapping = file->f_mapping;
329 if (!file->f_op || !file->f_op->fsync) {
330 /* Why? We can still call filemap_fdatawrite */
335 ret = filemap_fdatawrite(mapping);
338 * We need to protect against concurrent writers, which could cause
339 * livelocks in fsync_buffers_list().
341 mutex_lock(&mapping->host->i_mutex);
342 err = file->f_op->fsync(file, file->f_dentry, datasync);
345 mutex_unlock(&mapping->host->i_mutex);
346 err = filemap_fdatawait(mapping);
353 static long __do_fsync(unsigned int fd, int datasync)
360 ret = do_fsync(file, datasync);
366 asmlinkage long sys_fsync(unsigned int fd)
368 return __do_fsync(fd, 0);
371 asmlinkage long sys_fdatasync(unsigned int fd)
373 return __do_fsync(fd, 1);
377 * Various filesystems appear to want __find_get_block to be non-blocking.
378 * But it's the page lock which protects the buffers. To get around this,
379 * we get exclusion from try_to_free_buffers with the blockdev mapping's
382 * Hack idea: for the blockdev mapping, i_bufferlist_lock contention
383 * may be quite high. This code could TryLock the page, and if that
384 * succeeds, there is no need to take private_lock. (But if
385 * private_lock is contended then so is mapping->tree_lock).
387 static struct buffer_head *
388 __find_get_block_slow(struct block_device *bdev, sector_t block)
390 struct inode *bd_inode = bdev->bd_inode;
391 struct address_space *bd_mapping = bd_inode->i_mapping;
392 struct buffer_head *ret = NULL;
394 struct buffer_head *bh;
395 struct buffer_head *head;
399 index = block >> (PAGE_CACHE_SHIFT - bd_inode->i_blkbits);
400 page = find_get_page(bd_mapping, index);
404 spin_lock(&bd_mapping->private_lock);
405 if (!page_has_buffers(page))
407 head = page_buffers(page);
410 if (bh->b_blocknr == block) {
415 if (!buffer_mapped(bh))
417 bh = bh->b_this_page;
418 } while (bh != head);
420 /* we might be here because some of the buffers on this page are
421 * not mapped. This is due to various races between
422 * file io on the block device and getblk. It gets dealt with
423 * elsewhere, don't buffer_error if we had some unmapped buffers
426 printk("__find_get_block_slow() failed. "
427 "block=%llu, b_blocknr=%llu\n",
428 (unsigned long long)block,
429 (unsigned long long)bh->b_blocknr);
430 printk("b_state=0x%08lx, b_size=%zu\n",
431 bh->b_state, bh->b_size);
432 printk("device blocksize: %d\n", 1 << bd_inode->i_blkbits);
435 spin_unlock(&bd_mapping->private_lock);
436 page_cache_release(page);
441 /* If invalidate_buffers() will trash dirty buffers, it means some kind
442 of fs corruption is going on. Trashing dirty data always imply losing
443 information that was supposed to be just stored on the physical layer
446 Thus invalidate_buffers in general usage is not allwowed to trash
447 dirty buffers. For example ioctl(FLSBLKBUF) expects dirty data to
448 be preserved. These buffers are simply skipped.
450 We also skip buffers which are still in use. For example this can
451 happen if a userspace program is reading the block device.
453 NOTE: In the case where the user removed a removable-media-disk even if
454 there's still dirty data not synced on disk (due a bug in the device driver
455 or due an error of the user), by not destroying the dirty buffers we could
456 generate corruption also on the next media inserted, thus a parameter is
457 necessary to handle this case in the most safe way possible (trying
458 to not corrupt also the new disk inserted with the data belonging to
459 the old now corrupted disk). Also for the ramdisk the natural thing
460 to do in order to release the ramdisk memory is to destroy dirty buffers.
462 These are two special cases. Normal usage imply the device driver
463 to issue a sync on the device (without waiting I/O completion) and
464 then an invalidate_buffers call that doesn't trash dirty buffers.
466 For handling cache coherency with the blkdev pagecache the 'update' case
467 is been introduced. It is needed to re-read from disk any pinned
468 buffer. NOTE: re-reading from disk is destructive so we can do it only
469 when we assume nobody is changing the buffercache under our I/O and when
470 we think the disk contains more recent information than the buffercache.
471 The update == 1 pass marks the buffers we need to update, the update == 2
472 pass does the actual I/O. */
473 void invalidate_bdev(struct block_device *bdev, int destroy_dirty_buffers)
475 struct address_space *mapping = bdev->bd_inode->i_mapping;
477 if (mapping->nrpages == 0)
480 invalidate_bh_lrus();
482 * FIXME: what about destroy_dirty_buffers?
483 * We really want to use invalidate_inode_pages2() for
484 * that, but not until that's cleaned up.
486 invalidate_inode_pages(mapping);
490 * Kick pdflush then try to free up some ZONE_NORMAL memory.
492 static void free_more_memory(void)
497 wakeup_pdflush(1024);
500 for_each_online_pgdat(pgdat) {
501 zones = pgdat->node_zonelists[gfp_zone(GFP_NOFS)].zones;
503 try_to_free_pages(zones, GFP_NOFS);
508 * I/O completion handler for block_read_full_page() - pages
509 * which come unlocked at the end of I/O.
511 static void end_buffer_async_read(struct buffer_head *bh, int uptodate)
514 struct buffer_head *first;
515 struct buffer_head *tmp;
517 int page_uptodate = 1;
519 BUG_ON(!buffer_async_read(bh));
523 set_buffer_uptodate(bh);
525 clear_buffer_uptodate(bh);
526 if (printk_ratelimit())
532 * Be _very_ careful from here on. Bad things can happen if
533 * two buffer heads end IO at almost the same time and both
534 * decide that the page is now completely done.
536 first = page_buffers(page);
537 local_irq_save(flags);
538 bit_spin_lock(BH_Uptodate_Lock, &first->b_state);
539 clear_buffer_async_read(bh);
543 if (!buffer_uptodate(tmp))
545 if (buffer_async_read(tmp)) {
546 BUG_ON(!buffer_locked(tmp));
549 tmp = tmp->b_this_page;
551 bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
552 local_irq_restore(flags);
555 * If none of the buffers had errors and they are all
556 * uptodate then we can set the page uptodate.
558 if (page_uptodate && !PageError(page))
559 SetPageUptodate(page);
564 bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
565 local_irq_restore(flags);
570 * Completion handler for block_write_full_page() - pages which are unlocked
571 * during I/O, and which have PageWriteback cleared upon I/O completion.
573 static void end_buffer_async_write(struct buffer_head *bh, int uptodate)
575 char b[BDEVNAME_SIZE];
577 struct buffer_head *first;
578 struct buffer_head *tmp;
581 BUG_ON(!buffer_async_write(bh));
585 set_buffer_uptodate(bh);
587 if (printk_ratelimit()) {
589 printk(KERN_WARNING "lost page write due to "
591 bdevname(bh->b_bdev, b));
593 set_bit(AS_EIO, &page->mapping->flags);
594 clear_buffer_uptodate(bh);
598 first = page_buffers(page);
599 local_irq_save(flags);
600 bit_spin_lock(BH_Uptodate_Lock, &first->b_state);
602 clear_buffer_async_write(bh);
604 tmp = bh->b_this_page;
606 if (buffer_async_write(tmp)) {
607 BUG_ON(!buffer_locked(tmp));
610 tmp = tmp->b_this_page;
612 bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
613 local_irq_restore(flags);
614 end_page_writeback(page);
618 bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
619 local_irq_restore(flags);
624 * If a page's buffers are under async readin (end_buffer_async_read
625 * completion) then there is a possibility that another thread of
626 * control could lock one of the buffers after it has completed
627 * but while some of the other buffers have not completed. This
628 * locked buffer would confuse end_buffer_async_read() into not unlocking
629 * the page. So the absence of BH_Async_Read tells end_buffer_async_read()
630 * that this buffer is not under async I/O.
632 * The page comes unlocked when it has no locked buffer_async buffers
635 * PageLocked prevents anyone starting new async I/O reads any of
638 * PageWriteback is used to prevent simultaneous writeout of the same
641 * PageLocked prevents anyone from starting writeback of a page which is
642 * under read I/O (PageWriteback is only ever set against a locked page).
644 static void mark_buffer_async_read(struct buffer_head *bh)
646 bh->b_end_io = end_buffer_async_read;
647 set_buffer_async_read(bh);
650 void mark_buffer_async_write(struct buffer_head *bh)
652 bh->b_end_io = end_buffer_async_write;
653 set_buffer_async_write(bh);
655 EXPORT_SYMBOL(mark_buffer_async_write);
659 * fs/buffer.c contains helper functions for buffer-backed address space's
660 * fsync functions. A common requirement for buffer-based filesystems is
661 * that certain data from the backing blockdev needs to be written out for
662 * a successful fsync(). For example, ext2 indirect blocks need to be
663 * written back and waited upon before fsync() returns.
665 * The functions mark_buffer_inode_dirty(), fsync_inode_buffers(),
666 * inode_has_buffers() and invalidate_inode_buffers() are provided for the
667 * management of a list of dependent buffers at ->i_mapping->private_list.
669 * Locking is a little subtle: try_to_free_buffers() will remove buffers
670 * from their controlling inode's queue when they are being freed. But
671 * try_to_free_buffers() will be operating against the *blockdev* mapping
672 * at the time, not against the S_ISREG file which depends on those buffers.
673 * So the locking for private_list is via the private_lock in the address_space
674 * which backs the buffers. Which is different from the address_space
675 * against which the buffers are listed. So for a particular address_space,
676 * mapping->private_lock does *not* protect mapping->private_list! In fact,
677 * mapping->private_list will always be protected by the backing blockdev's
680 * Which introduces a requirement: all buffers on an address_space's
681 * ->private_list must be from the same address_space: the blockdev's.
683 * address_spaces which do not place buffers at ->private_list via these
684 * utility functions are free to use private_lock and private_list for
685 * whatever they want. The only requirement is that list_empty(private_list)
686 * be true at clear_inode() time.
688 * FIXME: clear_inode should not call invalidate_inode_buffers(). The
689 * filesystems should do that. invalidate_inode_buffers() should just go
690 * BUG_ON(!list_empty).
692 * FIXME: mark_buffer_dirty_inode() is a data-plane operation. It should
693 * take an address_space, not an inode. And it should be called
694 * mark_buffer_dirty_fsync() to clearly define why those buffers are being
697 * FIXME: mark_buffer_dirty_inode() doesn't need to add the buffer to the
698 * list if it is already on a list. Because if the buffer is on a list,
699 * it *must* already be on the right one. If not, the filesystem is being
700 * silly. This will save a ton of locking. But first we have to ensure
701 * that buffers are taken *off* the old inode's list when they are freed
702 * (presumably in truncate). That requires careful auditing of all
703 * filesystems (do it inside bforget()). It could also be done by bringing
708 * The buffer's backing address_space's private_lock must be held
710 static inline void __remove_assoc_queue(struct buffer_head *bh)
712 list_del_init(&bh->b_assoc_buffers);
715 int inode_has_buffers(struct inode *inode)
717 return !list_empty(&inode->i_data.private_list);
721 * osync is designed to support O_SYNC io. It waits synchronously for
722 * all already-submitted IO to complete, but does not queue any new
723 * writes to the disk.
725 * To do O_SYNC writes, just queue the buffer writes with ll_rw_block as
726 * you dirty the buffers, and then use osync_inode_buffers to wait for
727 * completion. Any other dirty buffers which are not yet queued for
728 * write will not be flushed to disk by the osync.
730 static int osync_buffers_list(spinlock_t *lock, struct list_head *list)
732 struct buffer_head *bh;
738 list_for_each_prev(p, list) {
740 if (buffer_locked(bh)) {
744 if (!buffer_uptodate(bh))
756 * sync_mapping_buffers - write out and wait upon a mapping's "associated"
758 * @mapping: the mapping which wants those buffers written
760 * Starts I/O against the buffers at mapping->private_list, and waits upon
763 * Basically, this is a convenience function for fsync().
764 * @mapping is a file or directory which needs those buffers to be written for
765 * a successful fsync().
767 int sync_mapping_buffers(struct address_space *mapping)
769 struct address_space *buffer_mapping = mapping->assoc_mapping;
771 if (buffer_mapping == NULL || list_empty(&mapping->private_list))
774 return fsync_buffers_list(&buffer_mapping->private_lock,
775 &mapping->private_list);
777 EXPORT_SYMBOL(sync_mapping_buffers);
780 * Called when we've recently written block `bblock', and it is known that
781 * `bblock' was for a buffer_boundary() buffer. This means that the block at
782 * `bblock + 1' is probably a dirty indirect block. Hunt it down and, if it's
783 * dirty, schedule it for IO. So that indirects merge nicely with their data.
785 void write_boundary_block(struct block_device *bdev,
786 sector_t bblock, unsigned blocksize)
788 struct buffer_head *bh = __find_get_block(bdev, bblock + 1, blocksize);
790 if (buffer_dirty(bh))
791 ll_rw_block(WRITE, 1, &bh);
796 void mark_buffer_dirty_inode(struct buffer_head *bh, struct inode *inode)
798 struct address_space *mapping = inode->i_mapping;
799 struct address_space *buffer_mapping = bh->b_page->mapping;
801 mark_buffer_dirty(bh);
802 if (!mapping->assoc_mapping) {
803 mapping->assoc_mapping = buffer_mapping;
805 BUG_ON(mapping->assoc_mapping != buffer_mapping);
807 if (list_empty(&bh->b_assoc_buffers)) {
808 spin_lock(&buffer_mapping->private_lock);
809 list_move_tail(&bh->b_assoc_buffers,
810 &mapping->private_list);
811 spin_unlock(&buffer_mapping->private_lock);
814 EXPORT_SYMBOL(mark_buffer_dirty_inode);
817 * Add a page to the dirty page list.
819 * It is a sad fact of life that this function is called from several places
820 * deeply under spinlocking. It may not sleep.
822 * If the page has buffers, the uptodate buffers are set dirty, to preserve
823 * dirty-state coherency between the page and the buffers. It the page does
824 * not have buffers then when they are later attached they will all be set
827 * The buffers are dirtied before the page is dirtied. There's a small race
828 * window in which a writepage caller may see the page cleanness but not the
829 * buffer dirtiness. That's fine. If this code were to set the page dirty
830 * before the buffers, a concurrent writepage caller could clear the page dirty
831 * bit, see a bunch of clean buffers and we'd end up with dirty buffers/clean
832 * page on the dirty page list.
834 * We use private_lock to lock against try_to_free_buffers while using the
835 * page's buffer list. Also use this to protect against clean buffers being
836 * added to the page after it was set dirty.
838 * FIXME: may need to call ->reservepage here as well. That's rather up to the
839 * address_space though.
841 int __set_page_dirty_buffers(struct page *page)
843 struct address_space * const mapping = page_mapping(page);
845 if (unlikely(!mapping))
846 return !TestSetPageDirty(page);
848 spin_lock(&mapping->private_lock);
849 if (page_has_buffers(page)) {
850 struct buffer_head *head = page_buffers(page);
851 struct buffer_head *bh = head;
854 set_buffer_dirty(bh);
855 bh = bh->b_this_page;
856 } while (bh != head);
858 spin_unlock(&mapping->private_lock);
860 if (!TestSetPageDirty(page)) {
861 write_lock_irq(&mapping->tree_lock);
862 if (page->mapping) { /* Race with truncate? */
863 if (mapping_cap_account_dirty(mapping))
864 __inc_zone_page_state(page, NR_FILE_DIRTY);
865 radix_tree_tag_set(&mapping->page_tree,
867 PAGECACHE_TAG_DIRTY);
869 write_unlock_irq(&mapping->tree_lock);
870 __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
875 EXPORT_SYMBOL(__set_page_dirty_buffers);
878 * Write out and wait upon a list of buffers.
880 * We have conflicting pressures: we want to make sure that all
881 * initially dirty buffers get waited on, but that any subsequently
882 * dirtied buffers don't. After all, we don't want fsync to last
883 * forever if somebody is actively writing to the file.
885 * Do this in two main stages: first we copy dirty buffers to a
886 * temporary inode list, queueing the writes as we go. Then we clean
887 * up, waiting for those writes to complete.
889 * During this second stage, any subsequent updates to the file may end
890 * up refiling the buffer on the original inode's dirty list again, so
891 * there is a chance we will end up with a buffer queued for write but
892 * not yet completed on that list. So, as a final cleanup we go through
893 * the osync code to catch these locked, dirty buffers without requeuing
894 * any newly dirty buffers for write.
896 static int fsync_buffers_list(spinlock_t *lock, struct list_head *list)
898 struct buffer_head *bh;
899 struct list_head tmp;
902 INIT_LIST_HEAD(&tmp);
905 while (!list_empty(list)) {
906 bh = BH_ENTRY(list->next);
907 list_del_init(&bh->b_assoc_buffers);
908 if (buffer_dirty(bh) || buffer_locked(bh)) {
909 list_add(&bh->b_assoc_buffers, &tmp);
910 if (buffer_dirty(bh)) {
914 * Ensure any pending I/O completes so that
915 * ll_rw_block() actually writes the current
916 * contents - it is a noop if I/O is still in
917 * flight on potentially older contents.
919 ll_rw_block(SWRITE, 1, &bh);
926 while (!list_empty(&tmp)) {
927 bh = BH_ENTRY(tmp.prev);
928 __remove_assoc_queue(bh);
932 if (!buffer_uptodate(bh))
939 err2 = osync_buffers_list(lock, list);
947 * Invalidate any and all dirty buffers on a given inode. We are
948 * probably unmounting the fs, but that doesn't mean we have already
949 * done a sync(). Just drop the buffers from the inode list.
951 * NOTE: we take the inode's blockdev's mapping's private_lock. Which
952 * assumes that all the buffers are against the blockdev. Not true
955 void invalidate_inode_buffers(struct inode *inode)
957 if (inode_has_buffers(inode)) {
958 struct address_space *mapping = &inode->i_data;
959 struct list_head *list = &mapping->private_list;
960 struct address_space *buffer_mapping = mapping->assoc_mapping;
962 spin_lock(&buffer_mapping->private_lock);
963 while (!list_empty(list))
964 __remove_assoc_queue(BH_ENTRY(list->next));
965 spin_unlock(&buffer_mapping->private_lock);
970 * Remove any clean buffers from the inode's buffer list. This is called
971 * when we're trying to free the inode itself. Those buffers can pin it.
973 * Returns true if all buffers were removed.
975 int remove_inode_buffers(struct inode *inode)
979 if (inode_has_buffers(inode)) {
980 struct address_space *mapping = &inode->i_data;
981 struct list_head *list = &mapping->private_list;
982 struct address_space *buffer_mapping = mapping->assoc_mapping;
984 spin_lock(&buffer_mapping->private_lock);
985 while (!list_empty(list)) {
986 struct buffer_head *bh = BH_ENTRY(list->next);
987 if (buffer_dirty(bh)) {
991 __remove_assoc_queue(bh);
993 spin_unlock(&buffer_mapping->private_lock);
999 * Create the appropriate buffers when given a page for data area and
1000 * the size of each buffer.. Use the bh->b_this_page linked list to
1001 * follow the buffers created. Return NULL if unable to create more
1004 * The retry flag is used to differentiate async IO (paging, swapping)
1005 * which may not fail from ordinary buffer allocations.
1007 struct buffer_head *alloc_page_buffers(struct page *page, unsigned long size,
1010 struct buffer_head *bh, *head;
1016 while ((offset -= size) >= 0) {
1017 bh = alloc_buffer_head(GFP_NOFS);
1022 bh->b_this_page = head;
1027 atomic_set(&bh->b_count, 0);
1028 bh->b_private = NULL;
1031 /* Link the buffer to its page */
1032 set_bh_page(bh, page, offset);
1034 init_buffer(bh, NULL, NULL);
1038 * In case anything failed, we just free everything we got.
1044 head = head->b_this_page;
1045 free_buffer_head(bh);
1050 * Return failure for non-async IO requests. Async IO requests
1051 * are not allowed to fail, so we have to wait until buffer heads
1052 * become available. But we don't want tasks sleeping with
1053 * partially complete buffers, so all were released above.
1058 /* We're _really_ low on memory. Now we just
1059 * wait for old buffer heads to become free due to
1060 * finishing IO. Since this is an async request and
1061 * the reserve list is empty, we're sure there are
1062 * async buffer heads in use.
1067 EXPORT_SYMBOL_GPL(alloc_page_buffers);
1070 link_dev_buffers(struct page *page, struct buffer_head *head)
1072 struct buffer_head *bh, *tail;
1077 bh = bh->b_this_page;
1079 tail->b_this_page = head;
1080 attach_page_buffers(page, head);
1084 * Initialise the state of a blockdev page's buffers.
1087 init_page_buffers(struct page *page, struct block_device *bdev,
1088 sector_t block, int size)
1090 struct buffer_head *head = page_buffers(page);
1091 struct buffer_head *bh = head;
1092 int uptodate = PageUptodate(page);
1095 if (!buffer_mapped(bh)) {
1096 init_buffer(bh, NULL, NULL);
1098 bh->b_blocknr = block;
1100 set_buffer_uptodate(bh);
1101 set_buffer_mapped(bh);
1104 bh = bh->b_this_page;
1105 } while (bh != head);
1109 * Create the page-cache page that contains the requested block.
1111 * This is user purely for blockdev mappings.
1113 static struct page *
1114 grow_dev_page(struct block_device *bdev, sector_t block,
1115 pgoff_t index, int size)
1117 struct inode *inode = bdev->bd_inode;
1119 struct buffer_head *bh;
1121 page = find_or_create_page(inode->i_mapping, index, GFP_NOFS);
1125 BUG_ON(!PageLocked(page));
1127 if (page_has_buffers(page)) {
1128 bh = page_buffers(page);
1129 if (bh->b_size == size) {
1130 init_page_buffers(page, bdev, block, size);
1133 if (!try_to_free_buffers(page))
1138 * Allocate some buffers for this page
1140 bh = alloc_page_buffers(page, size, 0);
1145 * Link the page to the buffers and initialise them. Take the
1146 * lock to be atomic wrt __find_get_block(), which does not
1147 * run under the page lock.
1149 spin_lock(&inode->i_mapping->private_lock);
1150 link_dev_buffers(page, bh);
1151 init_page_buffers(page, bdev, block, size);
1152 spin_unlock(&inode->i_mapping->private_lock);
1158 page_cache_release(page);
1163 * Create buffers for the specified block device block's page. If
1164 * that page was dirty, the buffers are set dirty also.
1166 * Except that's a bug. Attaching dirty buffers to a dirty
1167 * blockdev's page can result in filesystem corruption, because
1168 * some of those buffers may be aliases of filesystem data.
1169 * grow_dev_page() will go BUG() if this happens.
1172 grow_buffers(struct block_device *bdev, sector_t block, int size)
1181 } while ((size << sizebits) < PAGE_SIZE);
1183 index = block >> sizebits;
1184 block = index << sizebits;
1186 /* Create a page with the proper size buffers.. */
1187 page = grow_dev_page(bdev, block, index, size);
1191 page_cache_release(page);
1195 static struct buffer_head *
1196 __getblk_slow(struct block_device *bdev, sector_t block, int size)
1198 /* Size must be multiple of hard sectorsize */
1199 if (unlikely(size & (bdev_hardsect_size(bdev)-1) ||
1200 (size < 512 || size > PAGE_SIZE))) {
1201 printk(KERN_ERR "getblk(): invalid block size %d requested\n",
1203 printk(KERN_ERR "hardsect size: %d\n",
1204 bdev_hardsect_size(bdev));
1211 struct buffer_head * bh;
1213 bh = __find_get_block(bdev, block, size);
1217 if (!grow_buffers(bdev, block, size))
1223 * The relationship between dirty buffers and dirty pages:
1225 * Whenever a page has any dirty buffers, the page's dirty bit is set, and
1226 * the page is tagged dirty in its radix tree.
1228 * At all times, the dirtiness of the buffers represents the dirtiness of
1229 * subsections of the page. If the page has buffers, the page dirty bit is
1230 * merely a hint about the true dirty state.
1232 * When a page is set dirty in its entirety, all its buffers are marked dirty
1233 * (if the page has buffers).
1235 * When a buffer is marked dirty, its page is dirtied, but the page's other
1238 * Also. When blockdev buffers are explicitly read with bread(), they
1239 * individually become uptodate. But their backing page remains not
1240 * uptodate - even if all of its buffers are uptodate. A subsequent
1241 * block_read_full_page() against that page will discover all the uptodate
1242 * buffers, will set the page uptodate and will perform no I/O.
1246 * mark_buffer_dirty - mark a buffer_head as needing writeout
1247 * @bh: the buffer_head to mark dirty
1249 * mark_buffer_dirty() will set the dirty bit against the buffer, then set its
1250 * backing page dirty, then tag the page as dirty in its address_space's radix
1251 * tree and then attach the address_space's inode to its superblock's dirty
1254 * mark_buffer_dirty() is atomic. It takes bh->b_page->mapping->private_lock,
1255 * mapping->tree_lock and the global inode_lock.
1257 void fastcall mark_buffer_dirty(struct buffer_head *bh)
1259 if (!buffer_dirty(bh) && !test_set_buffer_dirty(bh))
1260 __set_page_dirty_nobuffers(bh->b_page);
1264 * Decrement a buffer_head's reference count. If all buffers against a page
1265 * have zero reference count, are clean and unlocked, and if the page is clean
1266 * and unlocked then try_to_free_buffers() may strip the buffers from the page
1267 * in preparation for freeing it (sometimes, rarely, buffers are removed from
1268 * a page but it ends up not being freed, and buffers may later be reattached).
1270 void __brelse(struct buffer_head * buf)
1272 if (atomic_read(&buf->b_count)) {
1276 printk(KERN_ERR "VFS: brelse: Trying to free free buffer\n");
1281 * bforget() is like brelse(), except it discards any
1282 * potentially dirty data.
1284 void __bforget(struct buffer_head *bh)
1286 clear_buffer_dirty(bh);
1287 if (!list_empty(&bh->b_assoc_buffers)) {
1288 struct address_space *buffer_mapping = bh->b_page->mapping;
1290 spin_lock(&buffer_mapping->private_lock);
1291 list_del_init(&bh->b_assoc_buffers);
1292 spin_unlock(&buffer_mapping->private_lock);
1297 static struct buffer_head *__bread_slow(struct buffer_head *bh)
1300 if (buffer_uptodate(bh)) {
1305 bh->b_end_io = end_buffer_read_sync;
1306 submit_bh(READ, bh);
1308 if (buffer_uptodate(bh))
1316 * Per-cpu buffer LRU implementation. To reduce the cost of __find_get_block().
1317 * The bhs[] array is sorted - newest buffer is at bhs[0]. Buffers have their
1318 * refcount elevated by one when they're in an LRU. A buffer can only appear
1319 * once in a particular CPU's LRU. A single buffer can be present in multiple
1320 * CPU's LRUs at the same time.
1322 * This is a transparent caching front-end to sb_bread(), sb_getblk() and
1323 * sb_find_get_block().
1325 * The LRUs themselves only need locking against invalidate_bh_lrus. We use
1326 * a local interrupt disable for that.
1329 #define BH_LRU_SIZE 8
1332 struct buffer_head *bhs[BH_LRU_SIZE];
1335 static DEFINE_PER_CPU(struct bh_lru, bh_lrus) = {{ NULL }};
1338 #define bh_lru_lock() local_irq_disable()
1339 #define bh_lru_unlock() local_irq_enable()
1341 #define bh_lru_lock() preempt_disable()
1342 #define bh_lru_unlock() preempt_enable()
1345 static inline void check_irqs_on(void)
1347 #ifdef irqs_disabled
1348 BUG_ON(irqs_disabled());
1353 * The LRU management algorithm is dopey-but-simple. Sorry.
1355 static void bh_lru_install(struct buffer_head *bh)
1357 struct buffer_head *evictee = NULL;
1362 lru = &__get_cpu_var(bh_lrus);
1363 if (lru->bhs[0] != bh) {
1364 struct buffer_head *bhs[BH_LRU_SIZE];
1370 for (in = 0; in < BH_LRU_SIZE; in++) {
1371 struct buffer_head *bh2 = lru->bhs[in];
1376 if (out >= BH_LRU_SIZE) {
1377 BUG_ON(evictee != NULL);
1384 while (out < BH_LRU_SIZE)
1386 memcpy(lru->bhs, bhs, sizeof(bhs));
1395 * Look up the bh in this cpu's LRU. If it's there, move it to the head.
1397 static struct buffer_head *
1398 lookup_bh_lru(struct block_device *bdev, sector_t block, int size)
1400 struct buffer_head *ret = NULL;
1406 lru = &__get_cpu_var(bh_lrus);
1407 for (i = 0; i < BH_LRU_SIZE; i++) {
1408 struct buffer_head *bh = lru->bhs[i];
1410 if (bh && bh->b_bdev == bdev &&
1411 bh->b_blocknr == block && bh->b_size == size) {
1414 lru->bhs[i] = lru->bhs[i - 1];
1429 * Perform a pagecache lookup for the matching buffer. If it's there, refresh
1430 * it in the LRU and mark it as accessed. If it is not present then return
1433 struct buffer_head *
1434 __find_get_block(struct block_device *bdev, sector_t block, int size)
1436 struct buffer_head *bh = lookup_bh_lru(bdev, block, size);
1439 bh = __find_get_block_slow(bdev, block);
1447 EXPORT_SYMBOL(__find_get_block);
1450 * __getblk will locate (and, if necessary, create) the buffer_head
1451 * which corresponds to the passed block_device, block and size. The
1452 * returned buffer has its reference count incremented.
1454 * __getblk() cannot fail - it just keeps trying. If you pass it an
1455 * illegal block number, __getblk() will happily return a buffer_head
1456 * which represents the non-existent block. Very weird.
1458 * __getblk() will lock up the machine if grow_dev_page's try_to_free_buffers()
1459 * attempt is failing. FIXME, perhaps?
1461 struct buffer_head *
1462 __getblk(struct block_device *bdev, sector_t block, int size)
1464 struct buffer_head *bh = __find_get_block(bdev, block, size);
1468 bh = __getblk_slow(bdev, block, size);
1471 EXPORT_SYMBOL(__getblk);
1474 * Do async read-ahead on a buffer..
1476 void __breadahead(struct block_device *bdev, sector_t block, int size)
1478 struct buffer_head *bh = __getblk(bdev, block, size);
1480 ll_rw_block(READA, 1, &bh);
1484 EXPORT_SYMBOL(__breadahead);
1487 * __bread() - reads a specified block and returns the bh
1488 * @bdev: the block_device to read from
1489 * @block: number of block
1490 * @size: size (in bytes) to read
1492 * Reads a specified block, and returns buffer head that contains it.
1493 * It returns NULL if the block was unreadable.
1495 struct buffer_head *
1496 __bread(struct block_device *bdev, sector_t block, int size)
1498 struct buffer_head *bh = __getblk(bdev, block, size);
1500 if (likely(bh) && !buffer_uptodate(bh))
1501 bh = __bread_slow(bh);
1504 EXPORT_SYMBOL(__bread);
1507 * invalidate_bh_lrus() is called rarely - but not only at unmount.
1508 * This doesn't race because it runs in each cpu either in irq
1509 * or with preempt disabled.
1511 static void invalidate_bh_lru(void *arg)
1513 struct bh_lru *b = &get_cpu_var(bh_lrus);
1516 for (i = 0; i < BH_LRU_SIZE; i++) {
1520 put_cpu_var(bh_lrus);
1523 static void invalidate_bh_lrus(void)
1525 on_each_cpu(invalidate_bh_lru, NULL, 1, 1);
1528 void set_bh_page(struct buffer_head *bh,
1529 struct page *page, unsigned long offset)
1532 BUG_ON(offset >= PAGE_SIZE);
1533 if (PageHighMem(page))
1535 * This catches illegal uses and preserves the offset:
1537 bh->b_data = (char *)(0 + offset);
1539 bh->b_data = page_address(page) + offset;
1541 EXPORT_SYMBOL(set_bh_page);
1544 * Called when truncating a buffer on a page completely.
1546 static void discard_buffer(struct buffer_head * bh)
1549 clear_buffer_dirty(bh);
1551 clear_buffer_mapped(bh);
1552 clear_buffer_req(bh);
1553 clear_buffer_new(bh);
1554 clear_buffer_delay(bh);
1559 * try_to_release_page() - release old fs-specific metadata on a page
1561 * @page: the page which the kernel is trying to free
1562 * @gfp_mask: memory allocation flags (and I/O mode)
1564 * The address_space is to try to release any data against the page
1565 * (presumably at page->private). If the release was successful, return `1'.
1566 * Otherwise return zero.
1568 * The @gfp_mask argument specifies whether I/O may be performed to release
1569 * this page (__GFP_IO), and whether the call may block (__GFP_WAIT).
1571 * NOTE: @gfp_mask may go away, and this function may become non-blocking.
1573 int try_to_release_page(struct page *page, gfp_t gfp_mask)
1575 struct address_space * const mapping = page->mapping;
1577 BUG_ON(!PageLocked(page));
1578 if (PageWriteback(page))
1581 if (mapping && mapping->a_ops->releasepage)
1582 return mapping->a_ops->releasepage(page, gfp_mask);
1583 return try_to_free_buffers(page);
1585 EXPORT_SYMBOL(try_to_release_page);
1588 * block_invalidatepage - invalidate part of all of a buffer-backed page
1590 * @page: the page which is affected
1591 * @offset: the index of the truncation point
1593 * block_invalidatepage() is called when all or part of the page has become
1594 * invalidatedby a truncate operation.
1596 * block_invalidatepage() does not have to release all buffers, but it must
1597 * ensure that no dirty buffer is left outside @offset and that no I/O
1598 * is underway against any of the blocks which are outside the truncation
1599 * point. Because the caller is about to free (and possibly reuse) those
1602 void block_invalidatepage(struct page *page, unsigned long offset)
1604 struct buffer_head *head, *bh, *next;
1605 unsigned int curr_off = 0;
1607 BUG_ON(!PageLocked(page));
1608 if (!page_has_buffers(page))
1611 head = page_buffers(page);
1614 unsigned int next_off = curr_off + bh->b_size;
1615 next = bh->b_this_page;
1618 * is this block fully invalidated?
1620 if (offset <= curr_off)
1622 curr_off = next_off;
1624 } while (bh != head);
1627 * We release buffers only if the entire page is being invalidated.
1628 * The get_block cached value has been unconditionally invalidated,
1629 * so real IO is not possible anymore.
1632 try_to_release_page(page, 0);
1636 EXPORT_SYMBOL(block_invalidatepage);
1638 void do_invalidatepage(struct page *page, unsigned long offset)
1640 void (*invalidatepage)(struct page *, unsigned long);
1641 invalidatepage = page->mapping->a_ops->invalidatepage ? :
1642 block_invalidatepage;
1643 (*invalidatepage)(page, offset);
1647 * We attach and possibly dirty the buffers atomically wrt
1648 * __set_page_dirty_buffers() via private_lock. try_to_free_buffers
1649 * is already excluded via the page lock.
1651 void create_empty_buffers(struct page *page,
1652 unsigned long blocksize, unsigned long b_state)
1654 struct buffer_head *bh, *head, *tail;
1656 head = alloc_page_buffers(page, blocksize, 1);
1659 bh->b_state |= b_state;
1661 bh = bh->b_this_page;
1663 tail->b_this_page = head;
1665 spin_lock(&page->mapping->private_lock);
1666 if (PageUptodate(page) || PageDirty(page)) {
1669 if (PageDirty(page))
1670 set_buffer_dirty(bh);
1671 if (PageUptodate(page))
1672 set_buffer_uptodate(bh);
1673 bh = bh->b_this_page;
1674 } while (bh != head);
1676 attach_page_buffers(page, head);
1677 spin_unlock(&page->mapping->private_lock);
1679 EXPORT_SYMBOL(create_empty_buffers);
1682 * We are taking a block for data and we don't want any output from any
1683 * buffer-cache aliases starting from return from that function and
1684 * until the moment when something will explicitly mark the buffer
1685 * dirty (hopefully that will not happen until we will free that block ;-)
1686 * We don't even need to mark it not-uptodate - nobody can expect
1687 * anything from a newly allocated buffer anyway. We used to used
1688 * unmap_buffer() for such invalidation, but that was wrong. We definitely
1689 * don't want to mark the alias unmapped, for example - it would confuse
1690 * anyone who might pick it with bread() afterwards...
1692 * Also.. Note that bforget() doesn't lock the buffer. So there can
1693 * be writeout I/O going on against recently-freed buffers. We don't
1694 * wait on that I/O in bforget() - it's more efficient to wait on the I/O
1695 * only if we really need to. That happens here.
1697 void unmap_underlying_metadata(struct block_device *bdev, sector_t block)
1699 struct buffer_head *old_bh;
1703 old_bh = __find_get_block_slow(bdev, block);
1705 clear_buffer_dirty(old_bh);
1706 wait_on_buffer(old_bh);
1707 clear_buffer_req(old_bh);
1711 EXPORT_SYMBOL(unmap_underlying_metadata);
1714 * NOTE! All mapped/uptodate combinations are valid:
1716 * Mapped Uptodate Meaning
1718 * No No "unknown" - must do get_block()
1719 * No Yes "hole" - zero-filled
1720 * Yes No "allocated" - allocated on disk, not read in
1721 * Yes Yes "valid" - allocated and up-to-date in memory.
1723 * "Dirty" is valid only with the last case (mapped+uptodate).
1727 * While block_write_full_page is writing back the dirty buffers under
1728 * the page lock, whoever dirtied the buffers may decide to clean them
1729 * again at any time. We handle that by only looking at the buffer
1730 * state inside lock_buffer().
1732 * If block_write_full_page() is called for regular writeback
1733 * (wbc->sync_mode == WB_SYNC_NONE) then it will redirty a page which has a
1734 * locked buffer. This only can happen if someone has written the buffer
1735 * directly, with submit_bh(). At the address_space level PageWriteback
1736 * prevents this contention from occurring.
1738 static int __block_write_full_page(struct inode *inode, struct page *page,
1739 get_block_t *get_block, struct writeback_control *wbc)
1743 sector_t last_block;
1744 struct buffer_head *bh, *head;
1745 const unsigned blocksize = 1 << inode->i_blkbits;
1746 int nr_underway = 0;
1748 BUG_ON(!PageLocked(page));
1750 last_block = (i_size_read(inode) - 1) >> inode->i_blkbits;
1752 if (!page_has_buffers(page)) {
1753 create_empty_buffers(page, blocksize,
1754 (1 << BH_Dirty)|(1 << BH_Uptodate));
1758 * Be very careful. We have no exclusion from __set_page_dirty_buffers
1759 * here, and the (potentially unmapped) buffers may become dirty at
1760 * any time. If a buffer becomes dirty here after we've inspected it
1761 * then we just miss that fact, and the page stays dirty.
1763 * Buffers outside i_size may be dirtied by __set_page_dirty_buffers;
1764 * handle that here by just cleaning them.
1767 block = (sector_t)page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
1768 head = page_buffers(page);
1772 * Get all the dirty buffers mapped to disk addresses and
1773 * handle any aliases from the underlying blockdev's mapping.
1776 if (block > last_block) {
1778 * mapped buffers outside i_size will occur, because
1779 * this page can be outside i_size when there is a
1780 * truncate in progress.
1783 * The buffer was zeroed by block_write_full_page()
1785 clear_buffer_dirty(bh);
1786 set_buffer_uptodate(bh);
1787 } else if (!buffer_mapped(bh) && buffer_dirty(bh)) {
1788 WARN_ON(bh->b_size != blocksize);
1789 err = get_block(inode, block, bh, 1);
1792 if (buffer_new(bh)) {
1793 /* blockdev mappings never come here */
1794 clear_buffer_new(bh);
1795 unmap_underlying_metadata(bh->b_bdev,
1799 bh = bh->b_this_page;
1801 } while (bh != head);
1804 if (!buffer_mapped(bh))
1807 * If it's a fully non-blocking write attempt and we cannot
1808 * lock the buffer then redirty the page. Note that this can
1809 * potentially cause a busy-wait loop from pdflush and kswapd
1810 * activity, but those code paths have their own higher-level
1813 if (wbc->sync_mode != WB_SYNC_NONE || !wbc->nonblocking) {
1815 } else if (test_set_buffer_locked(bh)) {
1816 redirty_page_for_writepage(wbc, page);
1819 if (test_clear_buffer_dirty(bh)) {
1820 mark_buffer_async_write(bh);
1824 } while ((bh = bh->b_this_page) != head);
1827 * The page and its buffers are protected by PageWriteback(), so we can
1828 * drop the bh refcounts early.
1830 BUG_ON(PageWriteback(page));
1831 set_page_writeback(page);
1834 struct buffer_head *next = bh->b_this_page;
1835 if (buffer_async_write(bh)) {
1836 submit_bh(WRITE, bh);
1840 } while (bh != head);
1845 if (nr_underway == 0) {
1847 * The page was marked dirty, but the buffers were
1848 * clean. Someone wrote them back by hand with
1849 * ll_rw_block/submit_bh. A rare case.
1853 if (!buffer_uptodate(bh)) {
1857 bh = bh->b_this_page;
1858 } while (bh != head);
1860 SetPageUptodate(page);
1861 end_page_writeback(page);
1863 * The page and buffer_heads can be released at any time from
1866 wbc->pages_skipped++; /* We didn't write this page */
1872 * ENOSPC, or some other error. We may already have added some
1873 * blocks to the file, so we need to write these out to avoid
1874 * exposing stale data.
1875 * The page is currently locked and not marked for writeback
1878 /* Recovery: lock and submit the mapped buffers */
1880 if (buffer_mapped(bh) && buffer_dirty(bh)) {
1882 mark_buffer_async_write(bh);
1885 * The buffer may have been set dirty during
1886 * attachment to a dirty page.
1888 clear_buffer_dirty(bh);
1890 } while ((bh = bh->b_this_page) != head);
1892 BUG_ON(PageWriteback(page));
1893 set_page_writeback(page);
1896 struct buffer_head *next = bh->b_this_page;
1897 if (buffer_async_write(bh)) {
1898 clear_buffer_dirty(bh);
1899 submit_bh(WRITE, bh);
1903 } while (bh != head);
1907 static int __block_prepare_write(struct inode *inode, struct page *page,
1908 unsigned from, unsigned to, get_block_t *get_block)
1910 unsigned block_start, block_end;
1913 unsigned blocksize, bbits;
1914 struct buffer_head *bh, *head, *wait[2], **wait_bh=wait;
1916 BUG_ON(!PageLocked(page));
1917 BUG_ON(from > PAGE_CACHE_SIZE);
1918 BUG_ON(to > PAGE_CACHE_SIZE);
1921 blocksize = 1 << inode->i_blkbits;
1922 if (!page_has_buffers(page))
1923 create_empty_buffers(page, blocksize, 0);
1924 head = page_buffers(page);
1926 bbits = inode->i_blkbits;
1927 block = (sector_t)page->index << (PAGE_CACHE_SHIFT - bbits);
1929 for(bh = head, block_start = 0; bh != head || !block_start;
1930 block++, block_start=block_end, bh = bh->b_this_page) {
1931 block_end = block_start + blocksize;
1932 if (block_end <= from || block_start >= to) {
1933 if (PageUptodate(page)) {
1934 if (!buffer_uptodate(bh))
1935 set_buffer_uptodate(bh);
1940 clear_buffer_new(bh);
1941 if (!buffer_mapped(bh)) {
1942 WARN_ON(bh->b_size != blocksize);
1943 err = get_block(inode, block, bh, 1);
1946 if (buffer_new(bh)) {
1947 unmap_underlying_metadata(bh->b_bdev,
1949 if (PageUptodate(page)) {
1950 set_buffer_uptodate(bh);
1953 if (block_end > to || block_start < from) {
1956 kaddr = kmap_atomic(page, KM_USER0);
1960 if (block_start < from)
1961 memset(kaddr+block_start,
1962 0, from-block_start);
1963 flush_dcache_page(page);
1964 kunmap_atomic(kaddr, KM_USER0);
1969 if (PageUptodate(page)) {
1970 if (!buffer_uptodate(bh))
1971 set_buffer_uptodate(bh);
1974 if (!buffer_uptodate(bh) && !buffer_delay(bh) &&
1975 (block_start < from || block_end > to)) {
1976 ll_rw_block(READ, 1, &bh);
1981 * If we issued read requests - let them complete.
1983 while(wait_bh > wait) {
1984 wait_on_buffer(*--wait_bh);
1985 if (!buffer_uptodate(*wait_bh))
1992 clear_buffer_new(bh);
1993 } while ((bh = bh->b_this_page) != head);
1998 * Zero out any newly allocated blocks to avoid exposing stale
1999 * data. If BH_New is set, we know that the block was newly
2000 * allocated in the above loop.
2005 block_end = block_start+blocksize;
2006 if (block_end <= from)
2008 if (block_start >= to)
2010 if (buffer_new(bh)) {
2013 clear_buffer_new(bh);
2014 kaddr = kmap_atomic(page, KM_USER0);
2015 memset(kaddr+block_start, 0, bh->b_size);
2016 kunmap_atomic(kaddr, KM_USER0);
2017 set_buffer_uptodate(bh);
2018 mark_buffer_dirty(bh);
2021 block_start = block_end;
2022 bh = bh->b_this_page;
2023 } while (bh != head);
2027 static int __block_commit_write(struct inode *inode, struct page *page,
2028 unsigned from, unsigned to)
2030 unsigned block_start, block_end;
2033 struct buffer_head *bh, *head;
2035 blocksize = 1 << inode->i_blkbits;
2037 for(bh = head = page_buffers(page), block_start = 0;
2038 bh != head || !block_start;
2039 block_start=block_end, bh = bh->b_this_page) {
2040 block_end = block_start + blocksize;
2041 if (block_end <= from || block_start >= to) {
2042 if (!buffer_uptodate(bh))
2045 set_buffer_uptodate(bh);
2046 mark_buffer_dirty(bh);
2051 * If this is a partial write which happened to make all buffers
2052 * uptodate then we can optimize away a bogus readpage() for
2053 * the next read(). Here we 'discover' whether the page went
2054 * uptodate as a result of this (potentially partial) write.
2057 SetPageUptodate(page);
2062 * Generic "read page" function for block devices that have the normal
2063 * get_block functionality. This is most of the block device filesystems.
2064 * Reads the page asynchronously --- the unlock_buffer() and
2065 * set/clear_buffer_uptodate() functions propagate buffer state into the
2066 * page struct once IO has completed.
2068 int block_read_full_page(struct page *page, get_block_t *get_block)
2070 struct inode *inode = page->mapping->host;
2071 sector_t iblock, lblock;
2072 struct buffer_head *bh, *head, *arr[MAX_BUF_PER_PAGE];
2073 unsigned int blocksize;
2075 int fully_mapped = 1;
2077 BUG_ON(!PageLocked(page));
2078 blocksize = 1 << inode->i_blkbits;
2079 if (!page_has_buffers(page))
2080 create_empty_buffers(page, blocksize, 0);
2081 head = page_buffers(page);
2083 iblock = (sector_t)page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
2084 lblock = (i_size_read(inode)+blocksize-1) >> inode->i_blkbits;
2090 if (buffer_uptodate(bh))
2093 if (!buffer_mapped(bh)) {
2097 if (iblock < lblock) {
2098 WARN_ON(bh->b_size != blocksize);
2099 err = get_block(inode, iblock, bh, 0);
2103 if (!buffer_mapped(bh)) {
2104 void *kaddr = kmap_atomic(page, KM_USER0);
2105 memset(kaddr + i * blocksize, 0, blocksize);
2106 flush_dcache_page(page);
2107 kunmap_atomic(kaddr, KM_USER0);
2109 set_buffer_uptodate(bh);
2113 * get_block() might have updated the buffer
2116 if (buffer_uptodate(bh))
2120 } while (i++, iblock++, (bh = bh->b_this_page) != head);
2123 SetPageMappedToDisk(page);
2127 * All buffers are uptodate - we can set the page uptodate
2128 * as well. But not if get_block() returned an error.
2130 if (!PageError(page))
2131 SetPageUptodate(page);
2136 /* Stage two: lock the buffers */
2137 for (i = 0; i < nr; i++) {
2140 mark_buffer_async_read(bh);
2144 * Stage 3: start the IO. Check for uptodateness
2145 * inside the buffer lock in case another process reading
2146 * the underlying blockdev brought it uptodate (the sct fix).
2148 for (i = 0; i < nr; i++) {
2150 if (buffer_uptodate(bh))
2151 end_buffer_async_read(bh, 1);
2153 submit_bh(READ, bh);
2158 /* utility function for filesystems that need to do work on expanding
2159 * truncates. Uses prepare/commit_write to allow the filesystem to
2160 * deal with the hole.
2162 static int __generic_cont_expand(struct inode *inode, loff_t size,
2163 pgoff_t index, unsigned int offset)
2165 struct address_space *mapping = inode->i_mapping;
2167 unsigned long limit;
2171 limit = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
2172 if (limit != RLIM_INFINITY && size > (loff_t)limit) {
2173 send_sig(SIGXFSZ, current, 0);
2176 if (size > inode->i_sb->s_maxbytes)
2180 page = grab_cache_page(mapping, index);
2183 err = mapping->a_ops->prepare_write(NULL, page, offset, offset);
2186 * ->prepare_write() may have instantiated a few blocks
2187 * outside i_size. Trim these off again.
2190 page_cache_release(page);
2191 vmtruncate(inode, inode->i_size);
2195 err = mapping->a_ops->commit_write(NULL, page, offset, offset);
2198 page_cache_release(page);
2205 int generic_cont_expand(struct inode *inode, loff_t size)
2208 unsigned int offset;
2210 offset = (size & (PAGE_CACHE_SIZE - 1)); /* Within page */
2212 /* ugh. in prepare/commit_write, if from==to==start of block, we
2213 ** skip the prepare. make sure we never send an offset for the start
2216 if ((offset & (inode->i_sb->s_blocksize - 1)) == 0) {
2217 /* caller must handle this extra byte. */
2220 index = size >> PAGE_CACHE_SHIFT;
2222 return __generic_cont_expand(inode, size, index, offset);
2225 int generic_cont_expand_simple(struct inode *inode, loff_t size)
2227 loff_t pos = size - 1;
2228 pgoff_t index = pos >> PAGE_CACHE_SHIFT;
2229 unsigned int offset = (pos & (PAGE_CACHE_SIZE - 1)) + 1;
2231 /* prepare/commit_write can handle even if from==to==start of block. */
2232 return __generic_cont_expand(inode, size, index, offset);
2236 * For moronic filesystems that do not allow holes in file.
2237 * We may have to extend the file.
2240 int cont_prepare_write(struct page *page, unsigned offset,
2241 unsigned to, get_block_t *get_block, loff_t *bytes)
2243 struct address_space *mapping = page->mapping;
2244 struct inode *inode = mapping->host;
2245 struct page *new_page;
2249 unsigned blocksize = 1 << inode->i_blkbits;
2252 while(page->index > (pgpos = *bytes>>PAGE_CACHE_SHIFT)) {
2254 new_page = grab_cache_page(mapping, pgpos);
2257 /* we might sleep */
2258 if (*bytes>>PAGE_CACHE_SHIFT != pgpos) {
2259 unlock_page(new_page);
2260 page_cache_release(new_page);
2263 zerofrom = *bytes & ~PAGE_CACHE_MASK;
2264 if (zerofrom & (blocksize-1)) {
2265 *bytes |= (blocksize-1);
2268 status = __block_prepare_write(inode, new_page, zerofrom,
2269 PAGE_CACHE_SIZE, get_block);
2272 kaddr = kmap_atomic(new_page, KM_USER0);
2273 memset(kaddr+zerofrom, 0, PAGE_CACHE_SIZE-zerofrom);
2274 flush_dcache_page(new_page);
2275 kunmap_atomic(kaddr, KM_USER0);
2276 generic_commit_write(NULL, new_page, zerofrom, PAGE_CACHE_SIZE);
2277 unlock_page(new_page);
2278 page_cache_release(new_page);
2281 if (page->index < pgpos) {
2282 /* completely inside the area */
2285 /* page covers the boundary, find the boundary offset */
2286 zerofrom = *bytes & ~PAGE_CACHE_MASK;
2288 /* if we will expand the thing last block will be filled */
2289 if (to > zerofrom && (zerofrom & (blocksize-1))) {
2290 *bytes |= (blocksize-1);
2294 /* starting below the boundary? Nothing to zero out */
2295 if (offset <= zerofrom)
2298 status = __block_prepare_write(inode, page, zerofrom, to, get_block);
2301 if (zerofrom < offset) {
2302 kaddr = kmap_atomic(page, KM_USER0);
2303 memset(kaddr+zerofrom, 0, offset-zerofrom);
2304 flush_dcache_page(page);
2305 kunmap_atomic(kaddr, KM_USER0);
2306 __block_commit_write(inode, page, zerofrom, offset);
2310 ClearPageUptodate(page);
2314 ClearPageUptodate(new_page);
2315 unlock_page(new_page);
2316 page_cache_release(new_page);
2321 int block_prepare_write(struct page *page, unsigned from, unsigned to,
2322 get_block_t *get_block)
2324 struct inode *inode = page->mapping->host;
2325 int err = __block_prepare_write(inode, page, from, to, get_block);
2327 ClearPageUptodate(page);
2331 int block_commit_write(struct page *page, unsigned from, unsigned to)
2333 struct inode *inode = page->mapping->host;
2334 __block_commit_write(inode,page,from,to);
2338 int generic_commit_write(struct file *file, struct page *page,
2339 unsigned from, unsigned to)
2341 struct inode *inode = page->mapping->host;
2342 loff_t pos = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to;
2343 __block_commit_write(inode,page,from,to);
2345 * No need to use i_size_read() here, the i_size
2346 * cannot change under us because we hold i_mutex.
2348 if (pos > inode->i_size) {
2349 i_size_write(inode, pos);
2350 mark_inode_dirty(inode);
2357 * nobh_prepare_write()'s prereads are special: the buffer_heads are freed
2358 * immediately, while under the page lock. So it needs a special end_io
2359 * handler which does not touch the bh after unlocking it.
2361 * Note: unlock_buffer() sort-of does touch the bh after unlocking it, but
2362 * a race there is benign: unlock_buffer() only use the bh's address for
2363 * hashing after unlocking the buffer, so it doesn't actually touch the bh
2366 static void end_buffer_read_nobh(struct buffer_head *bh, int uptodate)
2369 set_buffer_uptodate(bh);
2371 /* This happens, due to failed READA attempts. */
2372 clear_buffer_uptodate(bh);
2378 * On entry, the page is fully not uptodate.
2379 * On exit the page is fully uptodate in the areas outside (from,to)
2381 int nobh_prepare_write(struct page *page, unsigned from, unsigned to,
2382 get_block_t *get_block)
2384 struct inode *inode = page->mapping->host;
2385 const unsigned blkbits = inode->i_blkbits;
2386 const unsigned blocksize = 1 << blkbits;
2387 struct buffer_head map_bh;
2388 struct buffer_head *read_bh[MAX_BUF_PER_PAGE];
2389 unsigned block_in_page;
2390 unsigned block_start;
2391 sector_t block_in_file;
2396 int is_mapped_to_disk = 1;
2399 if (PageMappedToDisk(page))
2402 block_in_file = (sector_t)page->index << (PAGE_CACHE_SHIFT - blkbits);
2403 map_bh.b_page = page;
2406 * We loop across all blocks in the page, whether or not they are
2407 * part of the affected region. This is so we can discover if the
2408 * page is fully mapped-to-disk.
2410 for (block_start = 0, block_in_page = 0;
2411 block_start < PAGE_CACHE_SIZE;
2412 block_in_page++, block_start += blocksize) {
2413 unsigned block_end = block_start + blocksize;
2418 if (block_start >= to)
2420 map_bh.b_size = blocksize;
2421 ret = get_block(inode, block_in_file + block_in_page,
2425 if (!buffer_mapped(&map_bh))
2426 is_mapped_to_disk = 0;
2427 if (buffer_new(&map_bh))
2428 unmap_underlying_metadata(map_bh.b_bdev,
2430 if (PageUptodate(page))
2432 if (buffer_new(&map_bh) || !buffer_mapped(&map_bh)) {
2433 kaddr = kmap_atomic(page, KM_USER0);
2434 if (block_start < from) {
2435 memset(kaddr+block_start, 0, from-block_start);
2438 if (block_end > to) {
2439 memset(kaddr + to, 0, block_end - to);
2442 flush_dcache_page(page);
2443 kunmap_atomic(kaddr, KM_USER0);
2446 if (buffer_uptodate(&map_bh))
2447 continue; /* reiserfs does this */
2448 if (block_start < from || block_end > to) {
2449 struct buffer_head *bh = alloc_buffer_head(GFP_NOFS);
2455 bh->b_state = map_bh.b_state;
2456 atomic_set(&bh->b_count, 0);
2457 bh->b_this_page = NULL;
2459 bh->b_blocknr = map_bh.b_blocknr;
2460 bh->b_size = blocksize;
2461 bh->b_data = (char *)(long)block_start;
2462 bh->b_bdev = map_bh.b_bdev;
2463 bh->b_private = NULL;
2464 read_bh[nr_reads++] = bh;
2469 struct buffer_head *bh;
2472 * The page is locked, so these buffers are protected from
2473 * any VM or truncate activity. Hence we don't need to care
2474 * for the buffer_head refcounts.
2476 for (i = 0; i < nr_reads; i++) {
2479 bh->b_end_io = end_buffer_read_nobh;
2480 submit_bh(READ, bh);
2482 for (i = 0; i < nr_reads; i++) {
2485 if (!buffer_uptodate(bh))
2487 free_buffer_head(bh);
2494 if (is_mapped_to_disk)
2495 SetPageMappedToDisk(page);
2496 SetPageUptodate(page);
2499 * Setting the page dirty here isn't necessary for the prepare_write
2500 * function - commit_write will do that. But if/when this function is
2501 * used within the pagefault handler to ensure that all mmapped pages
2502 * have backing space in the filesystem, we will need to dirty the page
2503 * if its contents were altered.
2506 set_page_dirty(page);
2511 for (i = 0; i < nr_reads; i++) {
2513 free_buffer_head(read_bh[i]);
2517 * Error recovery is pretty slack. Clear the page and mark it dirty
2518 * so we'll later zero out any blocks which _were_ allocated.
2520 kaddr = kmap_atomic(page, KM_USER0);
2521 memset(kaddr, 0, PAGE_CACHE_SIZE);
2522 kunmap_atomic(kaddr, KM_USER0);
2523 SetPageUptodate(page);
2524 set_page_dirty(page);
2527 EXPORT_SYMBOL(nobh_prepare_write);
2529 int nobh_commit_write(struct file *file, struct page *page,
2530 unsigned from, unsigned to)
2532 struct inode *inode = page->mapping->host;
2533 loff_t pos = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to;
2535 set_page_dirty(page);
2536 if (pos > inode->i_size) {
2537 i_size_write(inode, pos);
2538 mark_inode_dirty(inode);
2542 EXPORT_SYMBOL(nobh_commit_write);
2545 * nobh_writepage() - based on block_full_write_page() except
2546 * that it tries to operate without attaching bufferheads to
2549 int nobh_writepage(struct page *page, get_block_t *get_block,
2550 struct writeback_control *wbc)
2552 struct inode * const inode = page->mapping->host;
2553 loff_t i_size = i_size_read(inode);
2554 const pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
2559 /* Is the page fully inside i_size? */
2560 if (page->index < end_index)
2563 /* Is the page fully outside i_size? (truncate in progress) */
2564 offset = i_size & (PAGE_CACHE_SIZE-1);
2565 if (page->index >= end_index+1 || !offset) {
2567 * The page may have dirty, unmapped buffers. For example,
2568 * they may have been added in ext3_writepage(). Make them
2569 * freeable here, so the page does not leak.
2572 /* Not really sure about this - do we need this ? */
2573 if (page->mapping->a_ops->invalidatepage)
2574 page->mapping->a_ops->invalidatepage(page, offset);
2577 return 0; /* don't care */
2581 * The page straddles i_size. It must be zeroed out on each and every
2582 * writepage invocation because it may be mmapped. "A file is mapped
2583 * in multiples of the page size. For a file that is not a multiple of
2584 * the page size, the remaining memory is zeroed when mapped, and
2585 * writes to that region are not written out to the file."
2587 kaddr = kmap_atomic(page, KM_USER0);
2588 memset(kaddr + offset, 0, PAGE_CACHE_SIZE - offset);
2589 flush_dcache_page(page);
2590 kunmap_atomic(kaddr, KM_USER0);
2592 ret = mpage_writepage(page, get_block, wbc);
2594 ret = __block_write_full_page(inode, page, get_block, wbc);
2597 EXPORT_SYMBOL(nobh_writepage);
2600 * This function assumes that ->prepare_write() uses nobh_prepare_write().
2602 int nobh_truncate_page(struct address_space *mapping, loff_t from)
2604 struct inode *inode = mapping->host;
2605 unsigned blocksize = 1 << inode->i_blkbits;
2606 pgoff_t index = from >> PAGE_CACHE_SHIFT;
2607 unsigned offset = from & (PAGE_CACHE_SIZE-1);
2610 const struct address_space_operations *a_ops = mapping->a_ops;
2614 if ((offset & (blocksize - 1)) == 0)
2618 page = grab_cache_page(mapping, index);
2622 to = (offset + blocksize) & ~(blocksize - 1);
2623 ret = a_ops->prepare_write(NULL, page, offset, to);
2625 kaddr = kmap_atomic(page, KM_USER0);
2626 memset(kaddr + offset, 0, PAGE_CACHE_SIZE - offset);
2627 flush_dcache_page(page);
2628 kunmap_atomic(kaddr, KM_USER0);
2629 set_page_dirty(page);
2632 page_cache_release(page);
2636 EXPORT_SYMBOL(nobh_truncate_page);
2638 int block_truncate_page(struct address_space *mapping,
2639 loff_t from, get_block_t *get_block)
2641 pgoff_t index = from >> PAGE_CACHE_SHIFT;
2642 unsigned offset = from & (PAGE_CACHE_SIZE-1);
2645 unsigned length, pos;
2646 struct inode *inode = mapping->host;
2648 struct buffer_head *bh;
2652 blocksize = 1 << inode->i_blkbits;
2653 length = offset & (blocksize - 1);
2655 /* Block boundary? Nothing to do */
2659 length = blocksize - length;
2660 iblock = (sector_t)index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
2662 page = grab_cache_page(mapping, index);
2667 if (!page_has_buffers(page))
2668 create_empty_buffers(page, blocksize, 0);
2670 /* Find the buffer that contains "offset" */
2671 bh = page_buffers(page);
2673 while (offset >= pos) {
2674 bh = bh->b_this_page;
2680 if (!buffer_mapped(bh)) {
2681 WARN_ON(bh->b_size != blocksize);
2682 err = get_block(inode, iblock, bh, 0);
2685 /* unmapped? It's a hole - nothing to do */
2686 if (!buffer_mapped(bh))
2690 /* Ok, it's mapped. Make sure it's up-to-date */
2691 if (PageUptodate(page))
2692 set_buffer_uptodate(bh);
2694 if (!buffer_uptodate(bh) && !buffer_delay(bh)) {
2696 ll_rw_block(READ, 1, &bh);
2698 /* Uhhuh. Read error. Complain and punt. */
2699 if (!buffer_uptodate(bh))
2703 kaddr = kmap_atomic(page, KM_USER0);
2704 memset(kaddr + offset, 0, length);
2705 flush_dcache_page(page);
2706 kunmap_atomic(kaddr, KM_USER0);
2708 mark_buffer_dirty(bh);
2713 page_cache_release(page);
2719 * The generic ->writepage function for buffer-backed address_spaces
2721 int block_write_full_page(struct page *page, get_block_t *get_block,
2722 struct writeback_control *wbc)
2724 struct inode * const inode = page->mapping->host;
2725 loff_t i_size = i_size_read(inode);
2726 const pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
2730 /* Is the page fully inside i_size? */
2731 if (page->index < end_index)
2732 return __block_write_full_page(inode, page, get_block, wbc);
2734 /* Is the page fully outside i_size? (truncate in progress) */
2735 offset = i_size & (PAGE_CACHE_SIZE-1);
2736 if (page->index >= end_index+1 || !offset) {
2738 * The page may have dirty, unmapped buffers. For example,
2739 * they may have been added in ext3_writepage(). Make them
2740 * freeable here, so the page does not leak.
2742 do_invalidatepage(page, 0);
2744 return 0; /* don't care */
2748 * The page straddles i_size. It must be zeroed out on each and every
2749 * writepage invokation because it may be mmapped. "A file is mapped
2750 * in multiples of the page size. For a file that is not a multiple of
2751 * the page size, the remaining memory is zeroed when mapped, and
2752 * writes to that region are not written out to the file."
2754 kaddr = kmap_atomic(page, KM_USER0);
2755 memset(kaddr + offset, 0, PAGE_CACHE_SIZE - offset);
2756 flush_dcache_page(page);
2757 kunmap_atomic(kaddr, KM_USER0);
2758 return __block_write_full_page(inode, page, get_block, wbc);
2761 sector_t generic_block_bmap(struct address_space *mapping, sector_t block,
2762 get_block_t *get_block)
2764 struct buffer_head tmp;
2765 struct inode *inode = mapping->host;
2768 tmp.b_size = 1 << inode->i_blkbits;
2769 get_block(inode, block, &tmp, 0);
2770 return tmp.b_blocknr;
2773 static int end_bio_bh_io_sync(struct bio *bio, unsigned int bytes_done, int err)
2775 struct buffer_head *bh = bio->bi_private;
2780 if (err == -EOPNOTSUPP) {
2781 set_bit(BIO_EOPNOTSUPP, &bio->bi_flags);
2782 set_bit(BH_Eopnotsupp, &bh->b_state);
2785 bh->b_end_io(bh, test_bit(BIO_UPTODATE, &bio->bi_flags));
2790 int submit_bh(int rw, struct buffer_head * bh)
2795 BUG_ON(!buffer_locked(bh));
2796 BUG_ON(!buffer_mapped(bh));
2797 BUG_ON(!bh->b_end_io);
2799 if (buffer_ordered(bh) && (rw == WRITE))
2803 * Only clear out a write error when rewriting, should this
2804 * include WRITE_SYNC as well?
2806 if (test_set_buffer_req(bh) && (rw == WRITE || rw == WRITE_BARRIER))
2807 clear_buffer_write_io_error(bh);
2810 * from here on down, it's all bio -- do the initial mapping,
2811 * submit_bio -> generic_make_request may further map this bio around
2813 bio = bio_alloc(GFP_NOIO, 1);
2815 bio->bi_sector = bh->b_blocknr * (bh->b_size >> 9);
2816 bio->bi_bdev = bh->b_bdev;
2817 bio->bi_io_vec[0].bv_page = bh->b_page;
2818 bio->bi_io_vec[0].bv_len = bh->b_size;
2819 bio->bi_io_vec[0].bv_offset = bh_offset(bh);
2823 bio->bi_size = bh->b_size;
2825 bio->bi_end_io = end_bio_bh_io_sync;
2826 bio->bi_private = bh;
2829 submit_bio(rw, bio);
2831 if (bio_flagged(bio, BIO_EOPNOTSUPP))
2839 * ll_rw_block: low-level access to block devices (DEPRECATED)
2840 * @rw: whether to %READ or %WRITE or %SWRITE or maybe %READA (readahead)
2841 * @nr: number of &struct buffer_heads in the array
2842 * @bhs: array of pointers to &struct buffer_head
2844 * ll_rw_block() takes an array of pointers to &struct buffer_heads, and
2845 * requests an I/O operation on them, either a %READ or a %WRITE. The third
2846 * %SWRITE is like %WRITE only we make sure that the *current* data in buffers
2847 * are sent to disk. The fourth %READA option is described in the documentation
2848 * for generic_make_request() which ll_rw_block() calls.
2850 * This function drops any buffer that it cannot get a lock on (with the
2851 * BH_Lock state bit) unless SWRITE is required, any buffer that appears to be
2852 * clean when doing a write request, and any buffer that appears to be
2853 * up-to-date when doing read request. Further it marks as clean buffers that
2854 * are processed for writing (the buffer cache won't assume that they are
2855 * actually clean until the buffer gets unlocked).
2857 * ll_rw_block sets b_end_io to simple completion handler that marks
2858 * the buffer up-to-date (if approriate), unlocks the buffer and wakes
2861 * All of the buffers must be for the same device, and must also be a
2862 * multiple of the current approved size for the device.
2864 void ll_rw_block(int rw, int nr, struct buffer_head *bhs[])
2868 for (i = 0; i < nr; i++) {
2869 struct buffer_head *bh = bhs[i];
2873 else if (test_set_buffer_locked(bh))
2876 if (rw == WRITE || rw == SWRITE) {
2877 if (test_clear_buffer_dirty(bh)) {
2878 bh->b_end_io = end_buffer_write_sync;
2880 submit_bh(WRITE, bh);
2884 if (!buffer_uptodate(bh)) {
2885 bh->b_end_io = end_buffer_read_sync;
2896 * For a data-integrity writeout, we need to wait upon any in-progress I/O
2897 * and then start new I/O and then wait upon it. The caller must have a ref on
2900 int sync_dirty_buffer(struct buffer_head *bh)
2904 WARN_ON(atomic_read(&bh->b_count) < 1);
2906 if (test_clear_buffer_dirty(bh)) {
2908 bh->b_end_io = end_buffer_write_sync;
2909 ret = submit_bh(WRITE, bh);
2911 if (buffer_eopnotsupp(bh)) {
2912 clear_buffer_eopnotsupp(bh);
2915 if (!ret && !buffer_uptodate(bh))
2924 * try_to_free_buffers() checks if all the buffers on this particular page
2925 * are unused, and releases them if so.
2927 * Exclusion against try_to_free_buffers may be obtained by either
2928 * locking the page or by holding its mapping's private_lock.
2930 * If the page is dirty but all the buffers are clean then we need to
2931 * be sure to mark the page clean as well. This is because the page
2932 * may be against a block device, and a later reattachment of buffers
2933 * to a dirty page will set *all* buffers dirty. Which would corrupt
2934 * filesystem data on the same device.
2936 * The same applies to regular filesystem pages: if all the buffers are
2937 * clean then we set the page clean and proceed. To do that, we require
2938 * total exclusion from __set_page_dirty_buffers(). That is obtained with
2941 * try_to_free_buffers() is non-blocking.
2943 static inline int buffer_busy(struct buffer_head *bh)
2945 return atomic_read(&bh->b_count) |
2946 (bh->b_state & ((1 << BH_Dirty) | (1 << BH_Lock)));
2950 drop_buffers(struct page *page, struct buffer_head **buffers_to_free)
2952 struct buffer_head *head = page_buffers(page);
2953 struct buffer_head *bh;
2957 if (buffer_write_io_error(bh) && page->mapping)
2958 set_bit(AS_EIO, &page->mapping->flags);
2959 if (buffer_busy(bh))
2961 bh = bh->b_this_page;
2962 } while (bh != head);
2965 struct buffer_head *next = bh->b_this_page;
2967 if (!list_empty(&bh->b_assoc_buffers))
2968 __remove_assoc_queue(bh);
2970 } while (bh != head);
2971 *buffers_to_free = head;
2972 __clear_page_buffers(page);
2978 int try_to_free_buffers(struct page *page)
2980 struct address_space * const mapping = page->mapping;
2981 struct buffer_head *buffers_to_free = NULL;
2984 BUG_ON(!PageLocked(page));
2985 if (PageWriteback(page))
2988 if (mapping == NULL) { /* can this still happen? */
2989 ret = drop_buffers(page, &buffers_to_free);
2993 spin_lock(&mapping->private_lock);
2994 ret = drop_buffers(page, &buffers_to_free);
2995 spin_unlock(&mapping->private_lock);
2998 * If the filesystem writes its buffers by hand (eg ext3)
2999 * then we can have clean buffers against a dirty page. We
3000 * clean the page here; otherwise later reattachment of buffers
3001 * could encounter a non-uptodate page, which is unresolvable.
3002 * This only applies in the rare case where try_to_free_buffers
3003 * succeeds but the page is not freed.
3005 clear_page_dirty(page);
3008 if (buffers_to_free) {
3009 struct buffer_head *bh = buffers_to_free;
3012 struct buffer_head *next = bh->b_this_page;
3013 free_buffer_head(bh);
3015 } while (bh != buffers_to_free);
3019 EXPORT_SYMBOL(try_to_free_buffers);
3021 void block_sync_page(struct page *page)
3023 struct address_space *mapping;
3026 mapping = page_mapping(page);
3028 blk_run_backing_dev(mapping->backing_dev_info, page);
3032 * There are no bdflush tunables left. But distributions are
3033 * still running obsolete flush daemons, so we terminate them here.
3035 * Use of bdflush() is deprecated and will be removed in a future kernel.
3036 * The `pdflush' kernel threads fully replace bdflush daemons and this call.
3038 asmlinkage long sys_bdflush(int func, long data)
3040 static int msg_count;
3042 if (!capable(CAP_SYS_ADMIN))
3045 if (msg_count < 5) {
3048 "warning: process `%s' used the obsolete bdflush"
3049 " system call\n", current->comm);
3050 printk(KERN_INFO "Fix your initscripts?\n");
3059 * Buffer-head allocation
3061 static kmem_cache_t *bh_cachep;
3064 * Once the number of bh's in the machine exceeds this level, we start
3065 * stripping them in writeback.
3067 static int max_buffer_heads;
3069 int buffer_heads_over_limit;
3071 struct bh_accounting {
3072 int nr; /* Number of live bh's */
3073 int ratelimit; /* Limit cacheline bouncing */
3076 static DEFINE_PER_CPU(struct bh_accounting, bh_accounting) = {0, 0};
3078 static void recalc_bh_state(void)
3083 if (__get_cpu_var(bh_accounting).ratelimit++ < 4096)
3085 __get_cpu_var(bh_accounting).ratelimit = 0;
3086 for_each_online_cpu(i)
3087 tot += per_cpu(bh_accounting, i).nr;
3088 buffer_heads_over_limit = (tot > max_buffer_heads);
3091 struct buffer_head *alloc_buffer_head(gfp_t gfp_flags)
3093 struct buffer_head *ret = kmem_cache_alloc(bh_cachep, gfp_flags);
3095 get_cpu_var(bh_accounting).nr++;
3097 put_cpu_var(bh_accounting);
3101 EXPORT_SYMBOL(alloc_buffer_head);
3103 void free_buffer_head(struct buffer_head *bh)
3105 BUG_ON(!list_empty(&bh->b_assoc_buffers));
3106 kmem_cache_free(bh_cachep, bh);
3107 get_cpu_var(bh_accounting).nr--;
3109 put_cpu_var(bh_accounting);
3111 EXPORT_SYMBOL(free_buffer_head);
3114 init_buffer_head(void *data, kmem_cache_t *cachep, unsigned long flags)
3116 if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) ==
3117 SLAB_CTOR_CONSTRUCTOR) {
3118 struct buffer_head * bh = (struct buffer_head *)data;
3120 memset(bh, 0, sizeof(*bh));
3121 INIT_LIST_HEAD(&bh->b_assoc_buffers);
3125 #ifdef CONFIG_HOTPLUG_CPU
3126 static void buffer_exit_cpu(int cpu)
3129 struct bh_lru *b = &per_cpu(bh_lrus, cpu);
3131 for (i = 0; i < BH_LRU_SIZE; i++) {
3135 get_cpu_var(bh_accounting).nr += per_cpu(bh_accounting, cpu).nr;
3136 per_cpu(bh_accounting, cpu).nr = 0;
3137 put_cpu_var(bh_accounting);
3140 static int buffer_cpu_notify(struct notifier_block *self,
3141 unsigned long action, void *hcpu)
3143 if (action == CPU_DEAD)
3144 buffer_exit_cpu((unsigned long)hcpu);
3147 #endif /* CONFIG_HOTPLUG_CPU */
3149 void __init buffer_init(void)
3153 bh_cachep = kmem_cache_create("buffer_head",
3154 sizeof(struct buffer_head), 0,
3155 (SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|
3161 * Limit the bh occupancy to 10% of ZONE_NORMAL
3163 nrpages = (nr_free_buffer_pages() * 10) / 100;
3164 max_buffer_heads = nrpages * (PAGE_SIZE / sizeof(struct buffer_head));
3165 hotcpu_notifier(buffer_cpu_notify, 0);
3168 EXPORT_SYMBOL(__bforget);
3169 EXPORT_SYMBOL(__brelse);
3170 EXPORT_SYMBOL(__wait_on_buffer);
3171 EXPORT_SYMBOL(block_commit_write);
3172 EXPORT_SYMBOL(block_prepare_write);
3173 EXPORT_SYMBOL(block_read_full_page);
3174 EXPORT_SYMBOL(block_sync_page);
3175 EXPORT_SYMBOL(block_truncate_page);
3176 EXPORT_SYMBOL(block_write_full_page);
3177 EXPORT_SYMBOL(cont_prepare_write);
3178 EXPORT_SYMBOL(end_buffer_read_sync);
3179 EXPORT_SYMBOL(end_buffer_write_sync);
3180 EXPORT_SYMBOL(file_fsync);
3181 EXPORT_SYMBOL(fsync_bdev);
3182 EXPORT_SYMBOL(generic_block_bmap);
3183 EXPORT_SYMBOL(generic_commit_write);
3184 EXPORT_SYMBOL(generic_cont_expand);
3185 EXPORT_SYMBOL(generic_cont_expand_simple);
3186 EXPORT_SYMBOL(init_buffer);
3187 EXPORT_SYMBOL(invalidate_bdev);
3188 EXPORT_SYMBOL(ll_rw_block);
3189 EXPORT_SYMBOL(mark_buffer_dirty);
3190 EXPORT_SYMBOL(submit_bh);
3191 EXPORT_SYMBOL(sync_dirty_buffer);
3192 EXPORT_SYMBOL(unlock_buffer);