4 * Copyright (C) 1991, 1992, 2002 Linus Torvalds
8 * Start bdflush() with kernel_thread not syscall - Paul Gortmaker, 12/95
10 * Removed a lot of unnecessary code and simplified things now that
11 * the buffer cache isn't our primary cache - Andrew Tridgell 12/96
13 * Speed up hash, lru, and free list operations. Use gfp() for allocating
14 * hash table, use SLAB cache for buffer heads. SMP threading. -DaveM
16 * Added 32k buffer block sizes - these are required older ARM systems. - RMK
18 * async buffer flushing, 1999 Andrea Arcangeli <andrea@suse.de>
21 #include <linux/kernel.h>
22 #include <linux/syscalls.h>
25 #include <linux/percpu.h>
26 #include <linux/slab.h>
27 #include <linux/smp_lock.h>
28 #include <linux/capability.h>
29 #include <linux/blkdev.h>
30 #include <linux/file.h>
31 #include <linux/quotaops.h>
32 #include <linux/highmem.h>
33 #include <linux/module.h>
34 #include <linux/writeback.h>
35 #include <linux/hash.h>
36 #include <linux/suspend.h>
37 #include <linux/buffer_head.h>
38 #include <linux/bio.h>
39 #include <linux/notifier.h>
40 #include <linux/cpu.h>
41 #include <linux/bitops.h>
42 #include <linux/mpage.h>
43 #include <linux/bit_spinlock.h>
45 static int fsync_buffers_list(spinlock_t *lock, struct list_head *list);
46 static void invalidate_bh_lrus(void);
48 #define BH_ENTRY(list) list_entry((list), struct buffer_head, b_assoc_buffers)
51 init_buffer(struct buffer_head *bh, bh_end_io_t *handler, void *private)
53 bh->b_end_io = handler;
54 bh->b_private = private;
57 static int sync_buffer(void *word)
59 struct block_device *bd;
60 struct buffer_head *bh
61 = container_of(word, struct buffer_head, b_state);
66 blk_run_address_space(bd->bd_inode->i_mapping);
71 void fastcall __lock_buffer(struct buffer_head *bh)
73 wait_on_bit_lock(&bh->b_state, BH_Lock, sync_buffer,
74 TASK_UNINTERRUPTIBLE);
76 EXPORT_SYMBOL(__lock_buffer);
78 void fastcall unlock_buffer(struct buffer_head *bh)
80 clear_buffer_locked(bh);
81 smp_mb__after_clear_bit();
82 wake_up_bit(&bh->b_state, BH_Lock);
86 * Block until a buffer comes unlocked. This doesn't stop it
87 * from becoming locked again - you have to lock it yourself
88 * if you want to preserve its state.
90 void __wait_on_buffer(struct buffer_head * bh)
92 wait_on_bit(&bh->b_state, BH_Lock, sync_buffer, TASK_UNINTERRUPTIBLE);
96 __clear_page_buffers(struct page *page)
98 ClearPagePrivate(page);
99 set_page_private(page, 0);
100 page_cache_release(page);
103 static void buffer_io_error(struct buffer_head *bh)
105 char b[BDEVNAME_SIZE];
107 printk(KERN_ERR "Buffer I/O error on device %s, logical block %Lu\n",
108 bdevname(bh->b_bdev, b),
109 (unsigned long long)bh->b_blocknr);
113 * Default synchronous end-of-IO handler.. Just mark it up-to-date and
114 * unlock the buffer. This is what ll_rw_block uses too.
116 void end_buffer_read_sync(struct buffer_head *bh, int uptodate)
119 set_buffer_uptodate(bh);
121 /* This happens, due to failed READA attempts. */
122 clear_buffer_uptodate(bh);
128 void end_buffer_write_sync(struct buffer_head *bh, int uptodate)
130 char b[BDEVNAME_SIZE];
133 set_buffer_uptodate(bh);
135 if (!buffer_eopnotsupp(bh) && printk_ratelimit()) {
137 printk(KERN_WARNING "lost page write due to "
139 bdevname(bh->b_bdev, b));
141 set_buffer_write_io_error(bh);
142 clear_buffer_uptodate(bh);
149 * Write out and wait upon all the dirty data associated with a block
150 * device via its mapping. Does not take the superblock lock.
152 int sync_blockdev(struct block_device *bdev)
157 ret = filemap_write_and_wait(bdev->bd_inode->i_mapping);
160 EXPORT_SYMBOL(sync_blockdev);
162 static void __fsync_super(struct super_block *sb)
164 sync_inodes_sb(sb, 0);
167 if (sb->s_dirt && sb->s_op->write_super)
168 sb->s_op->write_super(sb);
170 if (sb->s_op->sync_fs)
171 sb->s_op->sync_fs(sb, 1);
172 sync_blockdev(sb->s_bdev);
173 sync_inodes_sb(sb, 1);
177 * Write out and wait upon all dirty data associated with this
178 * superblock. Filesystem data as well as the underlying block
179 * device. Takes the superblock lock.
181 int fsync_super(struct super_block *sb)
184 return sync_blockdev(sb->s_bdev);
187 EXPORT_SYMBOL(fsync_super);
190 * Write out and wait upon all dirty data associated with this
191 * device. Filesystem data as well as the underlying block
192 * device. Takes the superblock lock.
194 int fsync_bdev(struct block_device *bdev)
196 struct super_block *sb = get_super(bdev);
198 int res = fsync_super(sb);
202 return sync_blockdev(bdev);
206 * freeze_bdev -- lock a filesystem and force it into a consistent state
207 * @bdev: blockdevice to lock
209 * This takes the block device bd_mount_mutex to make sure no new mounts
210 * happen on bdev until thaw_bdev() is called.
211 * If a superblock is found on this device, we take the s_umount semaphore
212 * on it to make sure nobody unmounts until the snapshot creation is done.
214 struct super_block *freeze_bdev(struct block_device *bdev)
216 struct super_block *sb;
218 mutex_lock(&bdev->bd_mount_mutex);
219 sb = get_super(bdev);
220 if (sb && !(sb->s_flags & MS_RDONLY)) {
221 sb->s_frozen = SB_FREEZE_WRITE;
226 sb->s_frozen = SB_FREEZE_TRANS;
229 sync_blockdev(sb->s_bdev);
231 if (sb->s_op->write_super_lockfs)
232 sb->s_op->write_super_lockfs(sb);
236 return sb; /* thaw_bdev releases s->s_umount and bd_mount_sem */
238 EXPORT_SYMBOL(freeze_bdev);
241 * thaw_bdev -- unlock filesystem
242 * @bdev: blockdevice to unlock
243 * @sb: associated superblock
245 * Unlocks the filesystem and marks it writeable again after freeze_bdev().
247 void thaw_bdev(struct block_device *bdev, struct super_block *sb)
250 BUG_ON(sb->s_bdev != bdev);
252 if (sb->s_op->unlockfs)
253 sb->s_op->unlockfs(sb);
254 sb->s_frozen = SB_UNFROZEN;
256 wake_up(&sb->s_wait_unfrozen);
260 mutex_unlock(&bdev->bd_mount_mutex);
262 EXPORT_SYMBOL(thaw_bdev);
265 * sync everything. Start out by waking pdflush, because that writes back
266 * all queues in parallel.
268 static void do_sync(unsigned long wait)
271 sync_inodes(0); /* All mappings, inodes and their blockdevs */
273 sync_supers(); /* Write the superblocks */
274 sync_filesystems(0); /* Start syncing the filesystems */
275 sync_filesystems(wait); /* Waitingly sync the filesystems */
276 sync_inodes(wait); /* Mappings, inodes and blockdevs, again. */
278 printk("Emergency Sync complete\n");
279 if (unlikely(laptop_mode))
280 laptop_sync_completion();
283 asmlinkage long sys_sync(void)
289 void emergency_sync(void)
291 pdflush_operation(do_sync, 0);
295 * Generic function to fsync a file.
297 * filp may be NULL if called via the msync of a vma.
300 int file_fsync(struct file *filp, struct dentry *dentry, int datasync)
302 struct inode * inode = dentry->d_inode;
303 struct super_block * sb;
306 /* sync the inode to buffers */
307 ret = write_inode_now(inode, 0);
309 /* sync the superblock to buffers */
312 if (sb->s_op->write_super)
313 sb->s_op->write_super(sb);
316 /* .. finally sync the buffers to disk */
317 err = sync_blockdev(sb->s_bdev);
323 long do_fsync(struct file *file, int datasync)
327 struct address_space *mapping = file->f_mapping;
329 if (!file->f_op || !file->f_op->fsync) {
330 /* Why? We can still call filemap_fdatawrite */
335 ret = filemap_fdatawrite(mapping);
338 * We need to protect against concurrent writers, which could cause
339 * livelocks in fsync_buffers_list().
341 mutex_lock(&mapping->host->i_mutex);
342 err = file->f_op->fsync(file, file->f_dentry, datasync);
345 mutex_unlock(&mapping->host->i_mutex);
346 err = filemap_fdatawait(mapping);
353 static long __do_fsync(unsigned int fd, int datasync)
360 ret = do_fsync(file, datasync);
366 asmlinkage long sys_fsync(unsigned int fd)
368 return __do_fsync(fd, 0);
371 asmlinkage long sys_fdatasync(unsigned int fd)
373 return __do_fsync(fd, 1);
377 * Various filesystems appear to want __find_get_block to be non-blocking.
378 * But it's the page lock which protects the buffers. To get around this,
379 * we get exclusion from try_to_free_buffers with the blockdev mapping's
382 * Hack idea: for the blockdev mapping, i_bufferlist_lock contention
383 * may be quite high. This code could TryLock the page, and if that
384 * succeeds, there is no need to take private_lock. (But if
385 * private_lock is contended then so is mapping->tree_lock).
387 static struct buffer_head *
388 __find_get_block_slow(struct block_device *bdev, sector_t block)
390 struct inode *bd_inode = bdev->bd_inode;
391 struct address_space *bd_mapping = bd_inode->i_mapping;
392 struct buffer_head *ret = NULL;
394 struct buffer_head *bh;
395 struct buffer_head *head;
399 index = block >> (PAGE_CACHE_SHIFT - bd_inode->i_blkbits);
400 page = find_get_page(bd_mapping, index);
404 spin_lock(&bd_mapping->private_lock);
405 if (!page_has_buffers(page))
407 head = page_buffers(page);
410 if (bh->b_blocknr == block) {
415 if (!buffer_mapped(bh))
417 bh = bh->b_this_page;
418 } while (bh != head);
420 /* we might be here because some of the buffers on this page are
421 * not mapped. This is due to various races between
422 * file io on the block device and getblk. It gets dealt with
423 * elsewhere, don't buffer_error if we had some unmapped buffers
426 printk("__find_get_block_slow() failed. "
427 "block=%llu, b_blocknr=%llu\n",
428 (unsigned long long)block,
429 (unsigned long long)bh->b_blocknr);
430 printk("b_state=0x%08lx, b_size=%zu\n",
431 bh->b_state, bh->b_size);
432 printk("device blocksize: %d\n", 1 << bd_inode->i_blkbits);
435 spin_unlock(&bd_mapping->private_lock);
436 page_cache_release(page);
441 /* If invalidate_buffers() will trash dirty buffers, it means some kind
442 of fs corruption is going on. Trashing dirty data always imply losing
443 information that was supposed to be just stored on the physical layer
446 Thus invalidate_buffers in general usage is not allwowed to trash
447 dirty buffers. For example ioctl(FLSBLKBUF) expects dirty data to
448 be preserved. These buffers are simply skipped.
450 We also skip buffers which are still in use. For example this can
451 happen if a userspace program is reading the block device.
453 NOTE: In the case where the user removed a removable-media-disk even if
454 there's still dirty data not synced on disk (due a bug in the device driver
455 or due an error of the user), by not destroying the dirty buffers we could
456 generate corruption also on the next media inserted, thus a parameter is
457 necessary to handle this case in the most safe way possible (trying
458 to not corrupt also the new disk inserted with the data belonging to
459 the old now corrupted disk). Also for the ramdisk the natural thing
460 to do in order to release the ramdisk memory is to destroy dirty buffers.
462 These are two special cases. Normal usage imply the device driver
463 to issue a sync on the device (without waiting I/O completion) and
464 then an invalidate_buffers call that doesn't trash dirty buffers.
466 For handling cache coherency with the blkdev pagecache the 'update' case
467 is been introduced. It is needed to re-read from disk any pinned
468 buffer. NOTE: re-reading from disk is destructive so we can do it only
469 when we assume nobody is changing the buffercache under our I/O and when
470 we think the disk contains more recent information than the buffercache.
471 The update == 1 pass marks the buffers we need to update, the update == 2
472 pass does the actual I/O. */
473 void invalidate_bdev(struct block_device *bdev, int destroy_dirty_buffers)
475 struct address_space *mapping = bdev->bd_inode->i_mapping;
477 if (mapping->nrpages == 0)
480 invalidate_bh_lrus();
482 * FIXME: what about destroy_dirty_buffers?
483 * We really want to use invalidate_inode_pages2() for
484 * that, but not until that's cleaned up.
486 invalidate_inode_pages(mapping);
490 * Kick pdflush then try to free up some ZONE_NORMAL memory.
492 static void free_more_memory(void)
497 wakeup_pdflush(1024);
500 for_each_online_pgdat(pgdat) {
501 zones = pgdat->node_zonelists[gfp_zone(GFP_NOFS)].zones;
503 try_to_free_pages(zones, GFP_NOFS);
508 * I/O completion handler for block_read_full_page() - pages
509 * which come unlocked at the end of I/O.
511 static void end_buffer_async_read(struct buffer_head *bh, int uptodate)
514 struct buffer_head *first;
515 struct buffer_head *tmp;
517 int page_uptodate = 1;
519 BUG_ON(!buffer_async_read(bh));
523 set_buffer_uptodate(bh);
525 clear_buffer_uptodate(bh);
526 if (printk_ratelimit())
532 * Be _very_ careful from here on. Bad things can happen if
533 * two buffer heads end IO at almost the same time and both
534 * decide that the page is now completely done.
536 first = page_buffers(page);
537 local_irq_save(flags);
538 bit_spin_lock(BH_Uptodate_Lock, &first->b_state);
539 clear_buffer_async_read(bh);
543 if (!buffer_uptodate(tmp))
545 if (buffer_async_read(tmp)) {
546 BUG_ON(!buffer_locked(tmp));
549 tmp = tmp->b_this_page;
551 bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
552 local_irq_restore(flags);
555 * If none of the buffers had errors and they are all
556 * uptodate then we can set the page uptodate.
558 if (page_uptodate && !PageError(page))
559 SetPageUptodate(page);
564 bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
565 local_irq_restore(flags);
570 * Completion handler for block_write_full_page() - pages which are unlocked
571 * during I/O, and which have PageWriteback cleared upon I/O completion.
573 static void end_buffer_async_write(struct buffer_head *bh, int uptodate)
575 char b[BDEVNAME_SIZE];
577 struct buffer_head *first;
578 struct buffer_head *tmp;
581 BUG_ON(!buffer_async_write(bh));
585 set_buffer_uptodate(bh);
587 if (printk_ratelimit()) {
589 printk(KERN_WARNING "lost page write due to "
591 bdevname(bh->b_bdev, b));
593 set_bit(AS_EIO, &page->mapping->flags);
594 clear_buffer_uptodate(bh);
598 first = page_buffers(page);
599 local_irq_save(flags);
600 bit_spin_lock(BH_Uptodate_Lock, &first->b_state);
602 clear_buffer_async_write(bh);
604 tmp = bh->b_this_page;
606 if (buffer_async_write(tmp)) {
607 BUG_ON(!buffer_locked(tmp));
610 tmp = tmp->b_this_page;
612 bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
613 local_irq_restore(flags);
614 end_page_writeback(page);
618 bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
619 local_irq_restore(flags);
624 * If a page's buffers are under async readin (end_buffer_async_read
625 * completion) then there is a possibility that another thread of
626 * control could lock one of the buffers after it has completed
627 * but while some of the other buffers have not completed. This
628 * locked buffer would confuse end_buffer_async_read() into not unlocking
629 * the page. So the absence of BH_Async_Read tells end_buffer_async_read()
630 * that this buffer is not under async I/O.
632 * The page comes unlocked when it has no locked buffer_async buffers
635 * PageLocked prevents anyone starting new async I/O reads any of
638 * PageWriteback is used to prevent simultaneous writeout of the same
641 * PageLocked prevents anyone from starting writeback of a page which is
642 * under read I/O (PageWriteback is only ever set against a locked page).
644 static void mark_buffer_async_read(struct buffer_head *bh)
646 bh->b_end_io = end_buffer_async_read;
647 set_buffer_async_read(bh);
650 void mark_buffer_async_write(struct buffer_head *bh)
652 bh->b_end_io = end_buffer_async_write;
653 set_buffer_async_write(bh);
655 EXPORT_SYMBOL(mark_buffer_async_write);
659 * fs/buffer.c contains helper functions for buffer-backed address space's
660 * fsync functions. A common requirement for buffer-based filesystems is
661 * that certain data from the backing blockdev needs to be written out for
662 * a successful fsync(). For example, ext2 indirect blocks need to be
663 * written back and waited upon before fsync() returns.
665 * The functions mark_buffer_inode_dirty(), fsync_inode_buffers(),
666 * inode_has_buffers() and invalidate_inode_buffers() are provided for the
667 * management of a list of dependent buffers at ->i_mapping->private_list.
669 * Locking is a little subtle: try_to_free_buffers() will remove buffers
670 * from their controlling inode's queue when they are being freed. But
671 * try_to_free_buffers() will be operating against the *blockdev* mapping
672 * at the time, not against the S_ISREG file which depends on those buffers.
673 * So the locking for private_list is via the private_lock in the address_space
674 * which backs the buffers. Which is different from the address_space
675 * against which the buffers are listed. So for a particular address_space,
676 * mapping->private_lock does *not* protect mapping->private_list! In fact,
677 * mapping->private_list will always be protected by the backing blockdev's
680 * Which introduces a requirement: all buffers on an address_space's
681 * ->private_list must be from the same address_space: the blockdev's.
683 * address_spaces which do not place buffers at ->private_list via these
684 * utility functions are free to use private_lock and private_list for
685 * whatever they want. The only requirement is that list_empty(private_list)
686 * be true at clear_inode() time.
688 * FIXME: clear_inode should not call invalidate_inode_buffers(). The
689 * filesystems should do that. invalidate_inode_buffers() should just go
690 * BUG_ON(!list_empty).
692 * FIXME: mark_buffer_dirty_inode() is a data-plane operation. It should
693 * take an address_space, not an inode. And it should be called
694 * mark_buffer_dirty_fsync() to clearly define why those buffers are being
697 * FIXME: mark_buffer_dirty_inode() doesn't need to add the buffer to the
698 * list if it is already on a list. Because if the buffer is on a list,
699 * it *must* already be on the right one. If not, the filesystem is being
700 * silly. This will save a ton of locking. But first we have to ensure
701 * that buffers are taken *off* the old inode's list when they are freed
702 * (presumably in truncate). That requires careful auditing of all
703 * filesystems (do it inside bforget()). It could also be done by bringing
708 * The buffer's backing address_space's private_lock must be held
710 static inline void __remove_assoc_queue(struct buffer_head *bh)
712 list_del_init(&bh->b_assoc_buffers);
715 int inode_has_buffers(struct inode *inode)
717 return !list_empty(&inode->i_data.private_list);
721 * osync is designed to support O_SYNC io. It waits synchronously for
722 * all already-submitted IO to complete, but does not queue any new
723 * writes to the disk.
725 * To do O_SYNC writes, just queue the buffer writes with ll_rw_block as
726 * you dirty the buffers, and then use osync_inode_buffers to wait for
727 * completion. Any other dirty buffers which are not yet queued for
728 * write will not be flushed to disk by the osync.
730 static int osync_buffers_list(spinlock_t *lock, struct list_head *list)
732 struct buffer_head *bh;
738 list_for_each_prev(p, list) {
740 if (buffer_locked(bh)) {
744 if (!buffer_uptodate(bh))
756 * sync_mapping_buffers - write out and wait upon a mapping's "associated"
758 * @mapping: the mapping which wants those buffers written
760 * Starts I/O against the buffers at mapping->private_list, and waits upon
763 * Basically, this is a convenience function for fsync().
764 * @mapping is a file or directory which needs those buffers to be written for
765 * a successful fsync().
767 int sync_mapping_buffers(struct address_space *mapping)
769 struct address_space *buffer_mapping = mapping->assoc_mapping;
771 if (buffer_mapping == NULL || list_empty(&mapping->private_list))
774 return fsync_buffers_list(&buffer_mapping->private_lock,
775 &mapping->private_list);
777 EXPORT_SYMBOL(sync_mapping_buffers);
780 * Called when we've recently written block `bblock', and it is known that
781 * `bblock' was for a buffer_boundary() buffer. This means that the block at
782 * `bblock + 1' is probably a dirty indirect block. Hunt it down and, if it's
783 * dirty, schedule it for IO. So that indirects merge nicely with their data.
785 void write_boundary_block(struct block_device *bdev,
786 sector_t bblock, unsigned blocksize)
788 struct buffer_head *bh = __find_get_block(bdev, bblock + 1, blocksize);
790 if (buffer_dirty(bh))
791 ll_rw_block(WRITE, 1, &bh);
796 void mark_buffer_dirty_inode(struct buffer_head *bh, struct inode *inode)
798 struct address_space *mapping = inode->i_mapping;
799 struct address_space *buffer_mapping = bh->b_page->mapping;
801 mark_buffer_dirty(bh);
802 if (!mapping->assoc_mapping) {
803 mapping->assoc_mapping = buffer_mapping;
805 BUG_ON(mapping->assoc_mapping != buffer_mapping);
807 if (list_empty(&bh->b_assoc_buffers)) {
808 spin_lock(&buffer_mapping->private_lock);
809 list_move_tail(&bh->b_assoc_buffers,
810 &mapping->private_list);
811 spin_unlock(&buffer_mapping->private_lock);
814 EXPORT_SYMBOL(mark_buffer_dirty_inode);
817 * Add a page to the dirty page list.
819 * It is a sad fact of life that this function is called from several places
820 * deeply under spinlocking. It may not sleep.
822 * If the page has buffers, the uptodate buffers are set dirty, to preserve
823 * dirty-state coherency between the page and the buffers. It the page does
824 * not have buffers then when they are later attached they will all be set
827 * The buffers are dirtied before the page is dirtied. There's a small race
828 * window in which a writepage caller may see the page cleanness but not the
829 * buffer dirtiness. That's fine. If this code were to set the page dirty
830 * before the buffers, a concurrent writepage caller could clear the page dirty
831 * bit, see a bunch of clean buffers and we'd end up with dirty buffers/clean
832 * page on the dirty page list.
834 * We use private_lock to lock against try_to_free_buffers while using the
835 * page's buffer list. Also use this to protect against clean buffers being
836 * added to the page after it was set dirty.
838 * FIXME: may need to call ->reservepage here as well. That's rather up to the
839 * address_space though.
841 int __set_page_dirty_buffers(struct page *page)
843 struct address_space * const mapping = page_mapping(page);
845 if (unlikely(!mapping))
846 return !TestSetPageDirty(page);
848 spin_lock(&mapping->private_lock);
849 if (page_has_buffers(page)) {
850 struct buffer_head *head = page_buffers(page);
851 struct buffer_head *bh = head;
854 set_buffer_dirty(bh);
855 bh = bh->b_this_page;
856 } while (bh != head);
858 spin_unlock(&mapping->private_lock);
860 if (!TestSetPageDirty(page)) {
861 write_lock_irq(&mapping->tree_lock);
862 if (page->mapping) { /* Race with truncate? */
863 if (mapping_cap_account_dirty(mapping))
864 __inc_zone_page_state(page, NR_FILE_DIRTY);
865 radix_tree_tag_set(&mapping->page_tree,
867 PAGECACHE_TAG_DIRTY);
869 write_unlock_irq(&mapping->tree_lock);
870 __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
875 EXPORT_SYMBOL(__set_page_dirty_buffers);
878 * Write out and wait upon a list of buffers.
880 * We have conflicting pressures: we want to make sure that all
881 * initially dirty buffers get waited on, but that any subsequently
882 * dirtied buffers don't. After all, we don't want fsync to last
883 * forever if somebody is actively writing to the file.
885 * Do this in two main stages: first we copy dirty buffers to a
886 * temporary inode list, queueing the writes as we go. Then we clean
887 * up, waiting for those writes to complete.
889 * During this second stage, any subsequent updates to the file may end
890 * up refiling the buffer on the original inode's dirty list again, so
891 * there is a chance we will end up with a buffer queued for write but
892 * not yet completed on that list. So, as a final cleanup we go through
893 * the osync code to catch these locked, dirty buffers without requeuing
894 * any newly dirty buffers for write.
896 static int fsync_buffers_list(spinlock_t *lock, struct list_head *list)
898 struct buffer_head *bh;
899 struct list_head tmp;
902 INIT_LIST_HEAD(&tmp);
905 while (!list_empty(list)) {
906 bh = BH_ENTRY(list->next);
907 list_del_init(&bh->b_assoc_buffers);
908 if (buffer_dirty(bh) || buffer_locked(bh)) {
909 list_add(&bh->b_assoc_buffers, &tmp);
910 if (buffer_dirty(bh)) {
914 * Ensure any pending I/O completes so that
915 * ll_rw_block() actually writes the current
916 * contents - it is a noop if I/O is still in
917 * flight on potentially older contents.
919 ll_rw_block(SWRITE, 1, &bh);
926 while (!list_empty(&tmp)) {
927 bh = BH_ENTRY(tmp.prev);
928 __remove_assoc_queue(bh);
932 if (!buffer_uptodate(bh))
939 err2 = osync_buffers_list(lock, list);
947 * Invalidate any and all dirty buffers on a given inode. We are
948 * probably unmounting the fs, but that doesn't mean we have already
949 * done a sync(). Just drop the buffers from the inode list.
951 * NOTE: we take the inode's blockdev's mapping's private_lock. Which
952 * assumes that all the buffers are against the blockdev. Not true
955 void invalidate_inode_buffers(struct inode *inode)
957 if (inode_has_buffers(inode)) {
958 struct address_space *mapping = &inode->i_data;
959 struct list_head *list = &mapping->private_list;
960 struct address_space *buffer_mapping = mapping->assoc_mapping;
962 spin_lock(&buffer_mapping->private_lock);
963 while (!list_empty(list))
964 __remove_assoc_queue(BH_ENTRY(list->next));
965 spin_unlock(&buffer_mapping->private_lock);
970 * Remove any clean buffers from the inode's buffer list. This is called
971 * when we're trying to free the inode itself. Those buffers can pin it.
973 * Returns true if all buffers were removed.
975 int remove_inode_buffers(struct inode *inode)
979 if (inode_has_buffers(inode)) {
980 struct address_space *mapping = &inode->i_data;
981 struct list_head *list = &mapping->private_list;
982 struct address_space *buffer_mapping = mapping->assoc_mapping;
984 spin_lock(&buffer_mapping->private_lock);
985 while (!list_empty(list)) {
986 struct buffer_head *bh = BH_ENTRY(list->next);
987 if (buffer_dirty(bh)) {
991 __remove_assoc_queue(bh);
993 spin_unlock(&buffer_mapping->private_lock);
999 * Create the appropriate buffers when given a page for data area and
1000 * the size of each buffer.. Use the bh->b_this_page linked list to
1001 * follow the buffers created. Return NULL if unable to create more
1004 * The retry flag is used to differentiate async IO (paging, swapping)
1005 * which may not fail from ordinary buffer allocations.
1007 struct buffer_head *alloc_page_buffers(struct page *page, unsigned long size,
1010 struct buffer_head *bh, *head;
1016 while ((offset -= size) >= 0) {
1017 bh = alloc_buffer_head(GFP_NOFS);
1022 bh->b_this_page = head;
1027 atomic_set(&bh->b_count, 0);
1028 bh->b_private = NULL;
1031 /* Link the buffer to its page */
1032 set_bh_page(bh, page, offset);
1034 init_buffer(bh, NULL, NULL);
1038 * In case anything failed, we just free everything we got.
1044 head = head->b_this_page;
1045 free_buffer_head(bh);
1050 * Return failure for non-async IO requests. Async IO requests
1051 * are not allowed to fail, so we have to wait until buffer heads
1052 * become available. But we don't want tasks sleeping with
1053 * partially complete buffers, so all were released above.
1058 /* We're _really_ low on memory. Now we just
1059 * wait for old buffer heads to become free due to
1060 * finishing IO. Since this is an async request and
1061 * the reserve list is empty, we're sure there are
1062 * async buffer heads in use.
1067 EXPORT_SYMBOL_GPL(alloc_page_buffers);
1070 link_dev_buffers(struct page *page, struct buffer_head *head)
1072 struct buffer_head *bh, *tail;
1077 bh = bh->b_this_page;
1079 tail->b_this_page = head;
1080 attach_page_buffers(page, head);
1084 * Initialise the state of a blockdev page's buffers.
1087 init_page_buffers(struct page *page, struct block_device *bdev,
1088 sector_t block, int size)
1090 struct buffer_head *head = page_buffers(page);
1091 struct buffer_head *bh = head;
1092 int uptodate = PageUptodate(page);
1095 if (!buffer_mapped(bh)) {
1096 init_buffer(bh, NULL, NULL);
1098 bh->b_blocknr = block;
1100 set_buffer_uptodate(bh);
1101 set_buffer_mapped(bh);
1104 bh = bh->b_this_page;
1105 } while (bh != head);
1109 * Create the page-cache page that contains the requested block.
1111 * This is user purely for blockdev mappings.
1113 static struct page *
1114 grow_dev_page(struct block_device *bdev, sector_t block,
1115 pgoff_t index, int size)
1117 struct inode *inode = bdev->bd_inode;
1119 struct buffer_head *bh;
1121 page = find_or_create_page(inode->i_mapping, index, GFP_NOFS);
1125 BUG_ON(!PageLocked(page));
1127 if (page_has_buffers(page)) {
1128 bh = page_buffers(page);
1129 if (bh->b_size == size) {
1130 init_page_buffers(page, bdev, block, size);
1133 if (!try_to_free_buffers(page))
1138 * Allocate some buffers for this page
1140 bh = alloc_page_buffers(page, size, 0);
1145 * Link the page to the buffers and initialise them. Take the
1146 * lock to be atomic wrt __find_get_block(), which does not
1147 * run under the page lock.
1149 spin_lock(&inode->i_mapping->private_lock);
1150 link_dev_buffers(page, bh);
1151 init_page_buffers(page, bdev, block, size);
1152 spin_unlock(&inode->i_mapping->private_lock);
1158 page_cache_release(page);
1163 * Create buffers for the specified block device block's page. If
1164 * that page was dirty, the buffers are set dirty also.
1166 * Except that's a bug. Attaching dirty buffers to a dirty
1167 * blockdev's page can result in filesystem corruption, because
1168 * some of those buffers may be aliases of filesystem data.
1169 * grow_dev_page() will go BUG() if this happens.
1172 grow_buffers(struct block_device *bdev, sector_t block, int size)
1181 } while ((size << sizebits) < PAGE_SIZE);
1183 index = block >> sizebits;
1186 * Check for a block which wants to lie outside our maximum possible
1187 * pagecache index. (this comparison is done using sector_t types).
1189 if (unlikely(index != block >> sizebits)) {
1190 char b[BDEVNAME_SIZE];
1192 printk(KERN_ERR "%s: requested out-of-range block %llu for "
1194 __FUNCTION__, (unsigned long long)block,
1198 block = index << sizebits;
1199 /* Create a page with the proper size buffers.. */
1200 page = grow_dev_page(bdev, block, index, size);
1204 page_cache_release(page);
1208 static struct buffer_head *
1209 __getblk_slow(struct block_device *bdev, sector_t block, int size)
1211 /* Size must be multiple of hard sectorsize */
1212 if (unlikely(size & (bdev_hardsect_size(bdev)-1) ||
1213 (size < 512 || size > PAGE_SIZE))) {
1214 printk(KERN_ERR "getblk(): invalid block size %d requested\n",
1216 printk(KERN_ERR "hardsect size: %d\n",
1217 bdev_hardsect_size(bdev));
1224 struct buffer_head * bh;
1227 bh = __find_get_block(bdev, block, size);
1231 ret = grow_buffers(bdev, block, size);
1240 * The relationship between dirty buffers and dirty pages:
1242 * Whenever a page has any dirty buffers, the page's dirty bit is set, and
1243 * the page is tagged dirty in its radix tree.
1245 * At all times, the dirtiness of the buffers represents the dirtiness of
1246 * subsections of the page. If the page has buffers, the page dirty bit is
1247 * merely a hint about the true dirty state.
1249 * When a page is set dirty in its entirety, all its buffers are marked dirty
1250 * (if the page has buffers).
1252 * When a buffer is marked dirty, its page is dirtied, but the page's other
1255 * Also. When blockdev buffers are explicitly read with bread(), they
1256 * individually become uptodate. But their backing page remains not
1257 * uptodate - even if all of its buffers are uptodate. A subsequent
1258 * block_read_full_page() against that page will discover all the uptodate
1259 * buffers, will set the page uptodate and will perform no I/O.
1263 * mark_buffer_dirty - mark a buffer_head as needing writeout
1264 * @bh: the buffer_head to mark dirty
1266 * mark_buffer_dirty() will set the dirty bit against the buffer, then set its
1267 * backing page dirty, then tag the page as dirty in its address_space's radix
1268 * tree and then attach the address_space's inode to its superblock's dirty
1271 * mark_buffer_dirty() is atomic. It takes bh->b_page->mapping->private_lock,
1272 * mapping->tree_lock and the global inode_lock.
1274 void fastcall mark_buffer_dirty(struct buffer_head *bh)
1276 if (!buffer_dirty(bh) && !test_set_buffer_dirty(bh))
1277 __set_page_dirty_nobuffers(bh->b_page);
1281 * Decrement a buffer_head's reference count. If all buffers against a page
1282 * have zero reference count, are clean and unlocked, and if the page is clean
1283 * and unlocked then try_to_free_buffers() may strip the buffers from the page
1284 * in preparation for freeing it (sometimes, rarely, buffers are removed from
1285 * a page but it ends up not being freed, and buffers may later be reattached).
1287 void __brelse(struct buffer_head * buf)
1289 if (atomic_read(&buf->b_count)) {
1293 printk(KERN_ERR "VFS: brelse: Trying to free free buffer\n");
1298 * bforget() is like brelse(), except it discards any
1299 * potentially dirty data.
1301 void __bforget(struct buffer_head *bh)
1303 clear_buffer_dirty(bh);
1304 if (!list_empty(&bh->b_assoc_buffers)) {
1305 struct address_space *buffer_mapping = bh->b_page->mapping;
1307 spin_lock(&buffer_mapping->private_lock);
1308 list_del_init(&bh->b_assoc_buffers);
1309 spin_unlock(&buffer_mapping->private_lock);
1314 static struct buffer_head *__bread_slow(struct buffer_head *bh)
1317 if (buffer_uptodate(bh)) {
1322 bh->b_end_io = end_buffer_read_sync;
1323 submit_bh(READ, bh);
1325 if (buffer_uptodate(bh))
1333 * Per-cpu buffer LRU implementation. To reduce the cost of __find_get_block().
1334 * The bhs[] array is sorted - newest buffer is at bhs[0]. Buffers have their
1335 * refcount elevated by one when they're in an LRU. A buffer can only appear
1336 * once in a particular CPU's LRU. A single buffer can be present in multiple
1337 * CPU's LRUs at the same time.
1339 * This is a transparent caching front-end to sb_bread(), sb_getblk() and
1340 * sb_find_get_block().
1342 * The LRUs themselves only need locking against invalidate_bh_lrus. We use
1343 * a local interrupt disable for that.
1346 #define BH_LRU_SIZE 8
1349 struct buffer_head *bhs[BH_LRU_SIZE];
1352 static DEFINE_PER_CPU(struct bh_lru, bh_lrus) = {{ NULL }};
1355 #define bh_lru_lock() local_irq_disable()
1356 #define bh_lru_unlock() local_irq_enable()
1358 #define bh_lru_lock() preempt_disable()
1359 #define bh_lru_unlock() preempt_enable()
1362 static inline void check_irqs_on(void)
1364 #ifdef irqs_disabled
1365 BUG_ON(irqs_disabled());
1370 * The LRU management algorithm is dopey-but-simple. Sorry.
1372 static void bh_lru_install(struct buffer_head *bh)
1374 struct buffer_head *evictee = NULL;
1379 lru = &__get_cpu_var(bh_lrus);
1380 if (lru->bhs[0] != bh) {
1381 struct buffer_head *bhs[BH_LRU_SIZE];
1387 for (in = 0; in < BH_LRU_SIZE; in++) {
1388 struct buffer_head *bh2 = lru->bhs[in];
1393 if (out >= BH_LRU_SIZE) {
1394 BUG_ON(evictee != NULL);
1401 while (out < BH_LRU_SIZE)
1403 memcpy(lru->bhs, bhs, sizeof(bhs));
1412 * Look up the bh in this cpu's LRU. If it's there, move it to the head.
1414 static struct buffer_head *
1415 lookup_bh_lru(struct block_device *bdev, sector_t block, int size)
1417 struct buffer_head *ret = NULL;
1423 lru = &__get_cpu_var(bh_lrus);
1424 for (i = 0; i < BH_LRU_SIZE; i++) {
1425 struct buffer_head *bh = lru->bhs[i];
1427 if (bh && bh->b_bdev == bdev &&
1428 bh->b_blocknr == block && bh->b_size == size) {
1431 lru->bhs[i] = lru->bhs[i - 1];
1446 * Perform a pagecache lookup for the matching buffer. If it's there, refresh
1447 * it in the LRU and mark it as accessed. If it is not present then return
1450 struct buffer_head *
1451 __find_get_block(struct block_device *bdev, sector_t block, int size)
1453 struct buffer_head *bh = lookup_bh_lru(bdev, block, size);
1456 bh = __find_get_block_slow(bdev, block);
1464 EXPORT_SYMBOL(__find_get_block);
1467 * __getblk will locate (and, if necessary, create) the buffer_head
1468 * which corresponds to the passed block_device, block and size. The
1469 * returned buffer has its reference count incremented.
1471 * __getblk() cannot fail - it just keeps trying. If you pass it an
1472 * illegal block number, __getblk() will happily return a buffer_head
1473 * which represents the non-existent block. Very weird.
1475 * __getblk() will lock up the machine if grow_dev_page's try_to_free_buffers()
1476 * attempt is failing. FIXME, perhaps?
1478 struct buffer_head *
1479 __getblk(struct block_device *bdev, sector_t block, int size)
1481 struct buffer_head *bh = __find_get_block(bdev, block, size);
1485 bh = __getblk_slow(bdev, block, size);
1488 EXPORT_SYMBOL(__getblk);
1491 * Do async read-ahead on a buffer..
1493 void __breadahead(struct block_device *bdev, sector_t block, int size)
1495 struct buffer_head *bh = __getblk(bdev, block, size);
1497 ll_rw_block(READA, 1, &bh);
1501 EXPORT_SYMBOL(__breadahead);
1504 * __bread() - reads a specified block and returns the bh
1505 * @bdev: the block_device to read from
1506 * @block: number of block
1507 * @size: size (in bytes) to read
1509 * Reads a specified block, and returns buffer head that contains it.
1510 * It returns NULL if the block was unreadable.
1512 struct buffer_head *
1513 __bread(struct block_device *bdev, sector_t block, int size)
1515 struct buffer_head *bh = __getblk(bdev, block, size);
1517 if (likely(bh) && !buffer_uptodate(bh))
1518 bh = __bread_slow(bh);
1521 EXPORT_SYMBOL(__bread);
1524 * invalidate_bh_lrus() is called rarely - but not only at unmount.
1525 * This doesn't race because it runs in each cpu either in irq
1526 * or with preempt disabled.
1528 static void invalidate_bh_lru(void *arg)
1530 struct bh_lru *b = &get_cpu_var(bh_lrus);
1533 for (i = 0; i < BH_LRU_SIZE; i++) {
1537 put_cpu_var(bh_lrus);
1540 static void invalidate_bh_lrus(void)
1542 on_each_cpu(invalidate_bh_lru, NULL, 1, 1);
1545 void set_bh_page(struct buffer_head *bh,
1546 struct page *page, unsigned long offset)
1549 BUG_ON(offset >= PAGE_SIZE);
1550 if (PageHighMem(page))
1552 * This catches illegal uses and preserves the offset:
1554 bh->b_data = (char *)(0 + offset);
1556 bh->b_data = page_address(page) + offset;
1558 EXPORT_SYMBOL(set_bh_page);
1561 * Called when truncating a buffer on a page completely.
1563 static void discard_buffer(struct buffer_head * bh)
1566 clear_buffer_dirty(bh);
1568 clear_buffer_mapped(bh);
1569 clear_buffer_req(bh);
1570 clear_buffer_new(bh);
1571 clear_buffer_delay(bh);
1576 * try_to_release_page() - release old fs-specific metadata on a page
1578 * @page: the page which the kernel is trying to free
1579 * @gfp_mask: memory allocation flags (and I/O mode)
1581 * The address_space is to try to release any data against the page
1582 * (presumably at page->private). If the release was successful, return `1'.
1583 * Otherwise return zero.
1585 * The @gfp_mask argument specifies whether I/O may be performed to release
1586 * this page (__GFP_IO), and whether the call may block (__GFP_WAIT).
1588 * NOTE: @gfp_mask may go away, and this function may become non-blocking.
1590 int try_to_release_page(struct page *page, gfp_t gfp_mask)
1592 struct address_space * const mapping = page->mapping;
1594 BUG_ON(!PageLocked(page));
1595 if (PageWriteback(page))
1598 if (mapping && mapping->a_ops->releasepage)
1599 return mapping->a_ops->releasepage(page, gfp_mask);
1600 return try_to_free_buffers(page);
1602 EXPORT_SYMBOL(try_to_release_page);
1605 * block_invalidatepage - invalidate part of all of a buffer-backed page
1607 * @page: the page which is affected
1608 * @offset: the index of the truncation point
1610 * block_invalidatepage() is called when all or part of the page has become
1611 * invalidatedby a truncate operation.
1613 * block_invalidatepage() does not have to release all buffers, but it must
1614 * ensure that no dirty buffer is left outside @offset and that no I/O
1615 * is underway against any of the blocks which are outside the truncation
1616 * point. Because the caller is about to free (and possibly reuse) those
1619 void block_invalidatepage(struct page *page, unsigned long offset)
1621 struct buffer_head *head, *bh, *next;
1622 unsigned int curr_off = 0;
1624 BUG_ON(!PageLocked(page));
1625 if (!page_has_buffers(page))
1628 head = page_buffers(page);
1631 unsigned int next_off = curr_off + bh->b_size;
1632 next = bh->b_this_page;
1635 * is this block fully invalidated?
1637 if (offset <= curr_off)
1639 curr_off = next_off;
1641 } while (bh != head);
1644 * We release buffers only if the entire page is being invalidated.
1645 * The get_block cached value has been unconditionally invalidated,
1646 * so real IO is not possible anymore.
1649 try_to_release_page(page, 0);
1653 EXPORT_SYMBOL(block_invalidatepage);
1655 void do_invalidatepage(struct page *page, unsigned long offset)
1657 void (*invalidatepage)(struct page *, unsigned long);
1658 invalidatepage = page->mapping->a_ops->invalidatepage ? :
1659 block_invalidatepage;
1660 (*invalidatepage)(page, offset);
1664 * We attach and possibly dirty the buffers atomically wrt
1665 * __set_page_dirty_buffers() via private_lock. try_to_free_buffers
1666 * is already excluded via the page lock.
1668 void create_empty_buffers(struct page *page,
1669 unsigned long blocksize, unsigned long b_state)
1671 struct buffer_head *bh, *head, *tail;
1673 head = alloc_page_buffers(page, blocksize, 1);
1676 bh->b_state |= b_state;
1678 bh = bh->b_this_page;
1680 tail->b_this_page = head;
1682 spin_lock(&page->mapping->private_lock);
1683 if (PageUptodate(page) || PageDirty(page)) {
1686 if (PageDirty(page))
1687 set_buffer_dirty(bh);
1688 if (PageUptodate(page))
1689 set_buffer_uptodate(bh);
1690 bh = bh->b_this_page;
1691 } while (bh != head);
1693 attach_page_buffers(page, head);
1694 spin_unlock(&page->mapping->private_lock);
1696 EXPORT_SYMBOL(create_empty_buffers);
1699 * We are taking a block for data and we don't want any output from any
1700 * buffer-cache aliases starting from return from that function and
1701 * until the moment when something will explicitly mark the buffer
1702 * dirty (hopefully that will not happen until we will free that block ;-)
1703 * We don't even need to mark it not-uptodate - nobody can expect
1704 * anything from a newly allocated buffer anyway. We used to used
1705 * unmap_buffer() for such invalidation, but that was wrong. We definitely
1706 * don't want to mark the alias unmapped, for example - it would confuse
1707 * anyone who might pick it with bread() afterwards...
1709 * Also.. Note that bforget() doesn't lock the buffer. So there can
1710 * be writeout I/O going on against recently-freed buffers. We don't
1711 * wait on that I/O in bforget() - it's more efficient to wait on the I/O
1712 * only if we really need to. That happens here.
1714 void unmap_underlying_metadata(struct block_device *bdev, sector_t block)
1716 struct buffer_head *old_bh;
1720 old_bh = __find_get_block_slow(bdev, block);
1722 clear_buffer_dirty(old_bh);
1723 wait_on_buffer(old_bh);
1724 clear_buffer_req(old_bh);
1728 EXPORT_SYMBOL(unmap_underlying_metadata);
1731 * NOTE! All mapped/uptodate combinations are valid:
1733 * Mapped Uptodate Meaning
1735 * No No "unknown" - must do get_block()
1736 * No Yes "hole" - zero-filled
1737 * Yes No "allocated" - allocated on disk, not read in
1738 * Yes Yes "valid" - allocated and up-to-date in memory.
1740 * "Dirty" is valid only with the last case (mapped+uptodate).
1744 * While block_write_full_page is writing back the dirty buffers under
1745 * the page lock, whoever dirtied the buffers may decide to clean them
1746 * again at any time. We handle that by only looking at the buffer
1747 * state inside lock_buffer().
1749 * If block_write_full_page() is called for regular writeback
1750 * (wbc->sync_mode == WB_SYNC_NONE) then it will redirty a page which has a
1751 * locked buffer. This only can happen if someone has written the buffer
1752 * directly, with submit_bh(). At the address_space level PageWriteback
1753 * prevents this contention from occurring.
1755 static int __block_write_full_page(struct inode *inode, struct page *page,
1756 get_block_t *get_block, struct writeback_control *wbc)
1760 sector_t last_block;
1761 struct buffer_head *bh, *head;
1762 const unsigned blocksize = 1 << inode->i_blkbits;
1763 int nr_underway = 0;
1765 BUG_ON(!PageLocked(page));
1767 last_block = (i_size_read(inode) - 1) >> inode->i_blkbits;
1769 if (!page_has_buffers(page)) {
1770 create_empty_buffers(page, blocksize,
1771 (1 << BH_Dirty)|(1 << BH_Uptodate));
1775 * Be very careful. We have no exclusion from __set_page_dirty_buffers
1776 * here, and the (potentially unmapped) buffers may become dirty at
1777 * any time. If a buffer becomes dirty here after we've inspected it
1778 * then we just miss that fact, and the page stays dirty.
1780 * Buffers outside i_size may be dirtied by __set_page_dirty_buffers;
1781 * handle that here by just cleaning them.
1784 block = (sector_t)page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
1785 head = page_buffers(page);
1789 * Get all the dirty buffers mapped to disk addresses and
1790 * handle any aliases from the underlying blockdev's mapping.
1793 if (block > last_block) {
1795 * mapped buffers outside i_size will occur, because
1796 * this page can be outside i_size when there is a
1797 * truncate in progress.
1800 * The buffer was zeroed by block_write_full_page()
1802 clear_buffer_dirty(bh);
1803 set_buffer_uptodate(bh);
1804 } else if (!buffer_mapped(bh) && buffer_dirty(bh)) {
1805 WARN_ON(bh->b_size != blocksize);
1806 err = get_block(inode, block, bh, 1);
1809 if (buffer_new(bh)) {
1810 /* blockdev mappings never come here */
1811 clear_buffer_new(bh);
1812 unmap_underlying_metadata(bh->b_bdev,
1816 bh = bh->b_this_page;
1818 } while (bh != head);
1821 if (!buffer_mapped(bh))
1824 * If it's a fully non-blocking write attempt and we cannot
1825 * lock the buffer then redirty the page. Note that this can
1826 * potentially cause a busy-wait loop from pdflush and kswapd
1827 * activity, but those code paths have their own higher-level
1830 if (wbc->sync_mode != WB_SYNC_NONE || !wbc->nonblocking) {
1832 } else if (test_set_buffer_locked(bh)) {
1833 redirty_page_for_writepage(wbc, page);
1836 if (test_clear_buffer_dirty(bh)) {
1837 mark_buffer_async_write(bh);
1841 } while ((bh = bh->b_this_page) != head);
1844 * The page and its buffers are protected by PageWriteback(), so we can
1845 * drop the bh refcounts early.
1847 BUG_ON(PageWriteback(page));
1848 set_page_writeback(page);
1851 struct buffer_head *next = bh->b_this_page;
1852 if (buffer_async_write(bh)) {
1853 submit_bh(WRITE, bh);
1857 } while (bh != head);
1862 if (nr_underway == 0) {
1864 * The page was marked dirty, but the buffers were
1865 * clean. Someone wrote them back by hand with
1866 * ll_rw_block/submit_bh. A rare case.
1870 if (!buffer_uptodate(bh)) {
1874 bh = bh->b_this_page;
1875 } while (bh != head);
1877 SetPageUptodate(page);
1878 end_page_writeback(page);
1880 * The page and buffer_heads can be released at any time from
1883 wbc->pages_skipped++; /* We didn't write this page */
1889 * ENOSPC, or some other error. We may already have added some
1890 * blocks to the file, so we need to write these out to avoid
1891 * exposing stale data.
1892 * The page is currently locked and not marked for writeback
1895 /* Recovery: lock and submit the mapped buffers */
1897 if (buffer_mapped(bh) && buffer_dirty(bh)) {
1899 mark_buffer_async_write(bh);
1902 * The buffer may have been set dirty during
1903 * attachment to a dirty page.
1905 clear_buffer_dirty(bh);
1907 } while ((bh = bh->b_this_page) != head);
1909 BUG_ON(PageWriteback(page));
1910 set_page_writeback(page);
1913 struct buffer_head *next = bh->b_this_page;
1914 if (buffer_async_write(bh)) {
1915 clear_buffer_dirty(bh);
1916 submit_bh(WRITE, bh);
1920 } while (bh != head);
1924 static int __block_prepare_write(struct inode *inode, struct page *page,
1925 unsigned from, unsigned to, get_block_t *get_block)
1927 unsigned block_start, block_end;
1930 unsigned blocksize, bbits;
1931 struct buffer_head *bh, *head, *wait[2], **wait_bh=wait;
1933 BUG_ON(!PageLocked(page));
1934 BUG_ON(from > PAGE_CACHE_SIZE);
1935 BUG_ON(to > PAGE_CACHE_SIZE);
1938 blocksize = 1 << inode->i_blkbits;
1939 if (!page_has_buffers(page))
1940 create_empty_buffers(page, blocksize, 0);
1941 head = page_buffers(page);
1943 bbits = inode->i_blkbits;
1944 block = (sector_t)page->index << (PAGE_CACHE_SHIFT - bbits);
1946 for(bh = head, block_start = 0; bh != head || !block_start;
1947 block++, block_start=block_end, bh = bh->b_this_page) {
1948 block_end = block_start + blocksize;
1949 if (block_end <= from || block_start >= to) {
1950 if (PageUptodate(page)) {
1951 if (!buffer_uptodate(bh))
1952 set_buffer_uptodate(bh);
1957 clear_buffer_new(bh);
1958 if (!buffer_mapped(bh)) {
1959 WARN_ON(bh->b_size != blocksize);
1960 err = get_block(inode, block, bh, 1);
1963 if (buffer_new(bh)) {
1964 unmap_underlying_metadata(bh->b_bdev,
1966 if (PageUptodate(page)) {
1967 set_buffer_uptodate(bh);
1970 if (block_end > to || block_start < from) {
1973 kaddr = kmap_atomic(page, KM_USER0);
1977 if (block_start < from)
1978 memset(kaddr+block_start,
1979 0, from-block_start);
1980 flush_dcache_page(page);
1981 kunmap_atomic(kaddr, KM_USER0);
1986 if (PageUptodate(page)) {
1987 if (!buffer_uptodate(bh))
1988 set_buffer_uptodate(bh);
1991 if (!buffer_uptodate(bh) && !buffer_delay(bh) &&
1992 (block_start < from || block_end > to)) {
1993 ll_rw_block(READ, 1, &bh);
1998 * If we issued read requests - let them complete.
2000 while(wait_bh > wait) {
2001 wait_on_buffer(*--wait_bh);
2002 if (!buffer_uptodate(*wait_bh))
2009 clear_buffer_new(bh);
2010 } while ((bh = bh->b_this_page) != head);
2015 * Zero out any newly allocated blocks to avoid exposing stale
2016 * data. If BH_New is set, we know that the block was newly
2017 * allocated in the above loop.
2022 block_end = block_start+blocksize;
2023 if (block_end <= from)
2025 if (block_start >= to)
2027 if (buffer_new(bh)) {
2030 clear_buffer_new(bh);
2031 kaddr = kmap_atomic(page, KM_USER0);
2032 memset(kaddr+block_start, 0, bh->b_size);
2033 kunmap_atomic(kaddr, KM_USER0);
2034 set_buffer_uptodate(bh);
2035 mark_buffer_dirty(bh);
2038 block_start = block_end;
2039 bh = bh->b_this_page;
2040 } while (bh != head);
2044 static int __block_commit_write(struct inode *inode, struct page *page,
2045 unsigned from, unsigned to)
2047 unsigned block_start, block_end;
2050 struct buffer_head *bh, *head;
2052 blocksize = 1 << inode->i_blkbits;
2054 for(bh = head = page_buffers(page), block_start = 0;
2055 bh != head || !block_start;
2056 block_start=block_end, bh = bh->b_this_page) {
2057 block_end = block_start + blocksize;
2058 if (block_end <= from || block_start >= to) {
2059 if (!buffer_uptodate(bh))
2062 set_buffer_uptodate(bh);
2063 mark_buffer_dirty(bh);
2068 * If this is a partial write which happened to make all buffers
2069 * uptodate then we can optimize away a bogus readpage() for
2070 * the next read(). Here we 'discover' whether the page went
2071 * uptodate as a result of this (potentially partial) write.
2074 SetPageUptodate(page);
2079 * Generic "read page" function for block devices that have the normal
2080 * get_block functionality. This is most of the block device filesystems.
2081 * Reads the page asynchronously --- the unlock_buffer() and
2082 * set/clear_buffer_uptodate() functions propagate buffer state into the
2083 * page struct once IO has completed.
2085 int block_read_full_page(struct page *page, get_block_t *get_block)
2087 struct inode *inode = page->mapping->host;
2088 sector_t iblock, lblock;
2089 struct buffer_head *bh, *head, *arr[MAX_BUF_PER_PAGE];
2090 unsigned int blocksize;
2092 int fully_mapped = 1;
2094 BUG_ON(!PageLocked(page));
2095 blocksize = 1 << inode->i_blkbits;
2096 if (!page_has_buffers(page))
2097 create_empty_buffers(page, blocksize, 0);
2098 head = page_buffers(page);
2100 iblock = (sector_t)page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
2101 lblock = (i_size_read(inode)+blocksize-1) >> inode->i_blkbits;
2107 if (buffer_uptodate(bh))
2110 if (!buffer_mapped(bh)) {
2114 if (iblock < lblock) {
2115 WARN_ON(bh->b_size != blocksize);
2116 err = get_block(inode, iblock, bh, 0);
2120 if (!buffer_mapped(bh)) {
2121 void *kaddr = kmap_atomic(page, KM_USER0);
2122 memset(kaddr + i * blocksize, 0, blocksize);
2123 flush_dcache_page(page);
2124 kunmap_atomic(kaddr, KM_USER0);
2126 set_buffer_uptodate(bh);
2130 * get_block() might have updated the buffer
2133 if (buffer_uptodate(bh))
2137 } while (i++, iblock++, (bh = bh->b_this_page) != head);
2140 SetPageMappedToDisk(page);
2144 * All buffers are uptodate - we can set the page uptodate
2145 * as well. But not if get_block() returned an error.
2147 if (!PageError(page))
2148 SetPageUptodate(page);
2153 /* Stage two: lock the buffers */
2154 for (i = 0; i < nr; i++) {
2157 mark_buffer_async_read(bh);
2161 * Stage 3: start the IO. Check for uptodateness
2162 * inside the buffer lock in case another process reading
2163 * the underlying blockdev brought it uptodate (the sct fix).
2165 for (i = 0; i < nr; i++) {
2167 if (buffer_uptodate(bh))
2168 end_buffer_async_read(bh, 1);
2170 submit_bh(READ, bh);
2175 /* utility function for filesystems that need to do work on expanding
2176 * truncates. Uses prepare/commit_write to allow the filesystem to
2177 * deal with the hole.
2179 static int __generic_cont_expand(struct inode *inode, loff_t size,
2180 pgoff_t index, unsigned int offset)
2182 struct address_space *mapping = inode->i_mapping;
2184 unsigned long limit;
2188 limit = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
2189 if (limit != RLIM_INFINITY && size > (loff_t)limit) {
2190 send_sig(SIGXFSZ, current, 0);
2193 if (size > inode->i_sb->s_maxbytes)
2197 page = grab_cache_page(mapping, index);
2200 err = mapping->a_ops->prepare_write(NULL, page, offset, offset);
2203 * ->prepare_write() may have instantiated a few blocks
2204 * outside i_size. Trim these off again.
2207 page_cache_release(page);
2208 vmtruncate(inode, inode->i_size);
2212 err = mapping->a_ops->commit_write(NULL, page, offset, offset);
2215 page_cache_release(page);
2222 int generic_cont_expand(struct inode *inode, loff_t size)
2225 unsigned int offset;
2227 offset = (size & (PAGE_CACHE_SIZE - 1)); /* Within page */
2229 /* ugh. in prepare/commit_write, if from==to==start of block, we
2230 ** skip the prepare. make sure we never send an offset for the start
2233 if ((offset & (inode->i_sb->s_blocksize - 1)) == 0) {
2234 /* caller must handle this extra byte. */
2237 index = size >> PAGE_CACHE_SHIFT;
2239 return __generic_cont_expand(inode, size, index, offset);
2242 int generic_cont_expand_simple(struct inode *inode, loff_t size)
2244 loff_t pos = size - 1;
2245 pgoff_t index = pos >> PAGE_CACHE_SHIFT;
2246 unsigned int offset = (pos & (PAGE_CACHE_SIZE - 1)) + 1;
2248 /* prepare/commit_write can handle even if from==to==start of block. */
2249 return __generic_cont_expand(inode, size, index, offset);
2253 * For moronic filesystems that do not allow holes in file.
2254 * We may have to extend the file.
2257 int cont_prepare_write(struct page *page, unsigned offset,
2258 unsigned to, get_block_t *get_block, loff_t *bytes)
2260 struct address_space *mapping = page->mapping;
2261 struct inode *inode = mapping->host;
2262 struct page *new_page;
2266 unsigned blocksize = 1 << inode->i_blkbits;
2269 while(page->index > (pgpos = *bytes>>PAGE_CACHE_SHIFT)) {
2271 new_page = grab_cache_page(mapping, pgpos);
2274 /* we might sleep */
2275 if (*bytes>>PAGE_CACHE_SHIFT != pgpos) {
2276 unlock_page(new_page);
2277 page_cache_release(new_page);
2280 zerofrom = *bytes & ~PAGE_CACHE_MASK;
2281 if (zerofrom & (blocksize-1)) {
2282 *bytes |= (blocksize-1);
2285 status = __block_prepare_write(inode, new_page, zerofrom,
2286 PAGE_CACHE_SIZE, get_block);
2289 kaddr = kmap_atomic(new_page, KM_USER0);
2290 memset(kaddr+zerofrom, 0, PAGE_CACHE_SIZE-zerofrom);
2291 flush_dcache_page(new_page);
2292 kunmap_atomic(kaddr, KM_USER0);
2293 generic_commit_write(NULL, new_page, zerofrom, PAGE_CACHE_SIZE);
2294 unlock_page(new_page);
2295 page_cache_release(new_page);
2298 if (page->index < pgpos) {
2299 /* completely inside the area */
2302 /* page covers the boundary, find the boundary offset */
2303 zerofrom = *bytes & ~PAGE_CACHE_MASK;
2305 /* if we will expand the thing last block will be filled */
2306 if (to > zerofrom && (zerofrom & (blocksize-1))) {
2307 *bytes |= (blocksize-1);
2311 /* starting below the boundary? Nothing to zero out */
2312 if (offset <= zerofrom)
2315 status = __block_prepare_write(inode, page, zerofrom, to, get_block);
2318 if (zerofrom < offset) {
2319 kaddr = kmap_atomic(page, KM_USER0);
2320 memset(kaddr+zerofrom, 0, offset-zerofrom);
2321 flush_dcache_page(page);
2322 kunmap_atomic(kaddr, KM_USER0);
2323 __block_commit_write(inode, page, zerofrom, offset);
2327 ClearPageUptodate(page);
2331 ClearPageUptodate(new_page);
2332 unlock_page(new_page);
2333 page_cache_release(new_page);
2338 int block_prepare_write(struct page *page, unsigned from, unsigned to,
2339 get_block_t *get_block)
2341 struct inode *inode = page->mapping->host;
2342 int err = __block_prepare_write(inode, page, from, to, get_block);
2344 ClearPageUptodate(page);
2348 int block_commit_write(struct page *page, unsigned from, unsigned to)
2350 struct inode *inode = page->mapping->host;
2351 __block_commit_write(inode,page,from,to);
2355 int generic_commit_write(struct file *file, struct page *page,
2356 unsigned from, unsigned to)
2358 struct inode *inode = page->mapping->host;
2359 loff_t pos = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to;
2360 __block_commit_write(inode,page,from,to);
2362 * No need to use i_size_read() here, the i_size
2363 * cannot change under us because we hold i_mutex.
2365 if (pos > inode->i_size) {
2366 i_size_write(inode, pos);
2367 mark_inode_dirty(inode);
2374 * nobh_prepare_write()'s prereads are special: the buffer_heads are freed
2375 * immediately, while under the page lock. So it needs a special end_io
2376 * handler which does not touch the bh after unlocking it.
2378 * Note: unlock_buffer() sort-of does touch the bh after unlocking it, but
2379 * a race there is benign: unlock_buffer() only use the bh's address for
2380 * hashing after unlocking the buffer, so it doesn't actually touch the bh
2383 static void end_buffer_read_nobh(struct buffer_head *bh, int uptodate)
2386 set_buffer_uptodate(bh);
2388 /* This happens, due to failed READA attempts. */
2389 clear_buffer_uptodate(bh);
2395 * On entry, the page is fully not uptodate.
2396 * On exit the page is fully uptodate in the areas outside (from,to)
2398 int nobh_prepare_write(struct page *page, unsigned from, unsigned to,
2399 get_block_t *get_block)
2401 struct inode *inode = page->mapping->host;
2402 const unsigned blkbits = inode->i_blkbits;
2403 const unsigned blocksize = 1 << blkbits;
2404 struct buffer_head map_bh;
2405 struct buffer_head *read_bh[MAX_BUF_PER_PAGE];
2406 unsigned block_in_page;
2407 unsigned block_start;
2408 sector_t block_in_file;
2413 int is_mapped_to_disk = 1;
2416 if (PageMappedToDisk(page))
2419 block_in_file = (sector_t)page->index << (PAGE_CACHE_SHIFT - blkbits);
2420 map_bh.b_page = page;
2423 * We loop across all blocks in the page, whether or not they are
2424 * part of the affected region. This is so we can discover if the
2425 * page is fully mapped-to-disk.
2427 for (block_start = 0, block_in_page = 0;
2428 block_start < PAGE_CACHE_SIZE;
2429 block_in_page++, block_start += blocksize) {
2430 unsigned block_end = block_start + blocksize;
2435 if (block_start >= to)
2437 map_bh.b_size = blocksize;
2438 ret = get_block(inode, block_in_file + block_in_page,
2442 if (!buffer_mapped(&map_bh))
2443 is_mapped_to_disk = 0;
2444 if (buffer_new(&map_bh))
2445 unmap_underlying_metadata(map_bh.b_bdev,
2447 if (PageUptodate(page))
2449 if (buffer_new(&map_bh) || !buffer_mapped(&map_bh)) {
2450 kaddr = kmap_atomic(page, KM_USER0);
2451 if (block_start < from) {
2452 memset(kaddr+block_start, 0, from-block_start);
2455 if (block_end > to) {
2456 memset(kaddr + to, 0, block_end - to);
2459 flush_dcache_page(page);
2460 kunmap_atomic(kaddr, KM_USER0);
2463 if (buffer_uptodate(&map_bh))
2464 continue; /* reiserfs does this */
2465 if (block_start < from || block_end > to) {
2466 struct buffer_head *bh = alloc_buffer_head(GFP_NOFS);
2472 bh->b_state = map_bh.b_state;
2473 atomic_set(&bh->b_count, 0);
2474 bh->b_this_page = NULL;
2476 bh->b_blocknr = map_bh.b_blocknr;
2477 bh->b_size = blocksize;
2478 bh->b_data = (char *)(long)block_start;
2479 bh->b_bdev = map_bh.b_bdev;
2480 bh->b_private = NULL;
2481 read_bh[nr_reads++] = bh;
2486 struct buffer_head *bh;
2489 * The page is locked, so these buffers are protected from
2490 * any VM or truncate activity. Hence we don't need to care
2491 * for the buffer_head refcounts.
2493 for (i = 0; i < nr_reads; i++) {
2496 bh->b_end_io = end_buffer_read_nobh;
2497 submit_bh(READ, bh);
2499 for (i = 0; i < nr_reads; i++) {
2502 if (!buffer_uptodate(bh))
2504 free_buffer_head(bh);
2511 if (is_mapped_to_disk)
2512 SetPageMappedToDisk(page);
2513 SetPageUptodate(page);
2516 * Setting the page dirty here isn't necessary for the prepare_write
2517 * function - commit_write will do that. But if/when this function is
2518 * used within the pagefault handler to ensure that all mmapped pages
2519 * have backing space in the filesystem, we will need to dirty the page
2520 * if its contents were altered.
2523 set_page_dirty(page);
2528 for (i = 0; i < nr_reads; i++) {
2530 free_buffer_head(read_bh[i]);
2534 * Error recovery is pretty slack. Clear the page and mark it dirty
2535 * so we'll later zero out any blocks which _were_ allocated.
2537 kaddr = kmap_atomic(page, KM_USER0);
2538 memset(kaddr, 0, PAGE_CACHE_SIZE);
2539 kunmap_atomic(kaddr, KM_USER0);
2540 SetPageUptodate(page);
2541 set_page_dirty(page);
2544 EXPORT_SYMBOL(nobh_prepare_write);
2546 int nobh_commit_write(struct file *file, struct page *page,
2547 unsigned from, unsigned to)
2549 struct inode *inode = page->mapping->host;
2550 loff_t pos = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to;
2552 set_page_dirty(page);
2553 if (pos > inode->i_size) {
2554 i_size_write(inode, pos);
2555 mark_inode_dirty(inode);
2559 EXPORT_SYMBOL(nobh_commit_write);
2562 * nobh_writepage() - based on block_full_write_page() except
2563 * that it tries to operate without attaching bufferheads to
2566 int nobh_writepage(struct page *page, get_block_t *get_block,
2567 struct writeback_control *wbc)
2569 struct inode * const inode = page->mapping->host;
2570 loff_t i_size = i_size_read(inode);
2571 const pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
2576 /* Is the page fully inside i_size? */
2577 if (page->index < end_index)
2580 /* Is the page fully outside i_size? (truncate in progress) */
2581 offset = i_size & (PAGE_CACHE_SIZE-1);
2582 if (page->index >= end_index+1 || !offset) {
2584 * The page may have dirty, unmapped buffers. For example,
2585 * they may have been added in ext3_writepage(). Make them
2586 * freeable here, so the page does not leak.
2589 /* Not really sure about this - do we need this ? */
2590 if (page->mapping->a_ops->invalidatepage)
2591 page->mapping->a_ops->invalidatepage(page, offset);
2594 return 0; /* don't care */
2598 * The page straddles i_size. It must be zeroed out on each and every
2599 * writepage invocation because it may be mmapped. "A file is mapped
2600 * in multiples of the page size. For a file that is not a multiple of
2601 * the page size, the remaining memory is zeroed when mapped, and
2602 * writes to that region are not written out to the file."
2604 kaddr = kmap_atomic(page, KM_USER0);
2605 memset(kaddr + offset, 0, PAGE_CACHE_SIZE - offset);
2606 flush_dcache_page(page);
2607 kunmap_atomic(kaddr, KM_USER0);
2609 ret = mpage_writepage(page, get_block, wbc);
2611 ret = __block_write_full_page(inode, page, get_block, wbc);
2614 EXPORT_SYMBOL(nobh_writepage);
2617 * This function assumes that ->prepare_write() uses nobh_prepare_write().
2619 int nobh_truncate_page(struct address_space *mapping, loff_t from)
2621 struct inode *inode = mapping->host;
2622 unsigned blocksize = 1 << inode->i_blkbits;
2623 pgoff_t index = from >> PAGE_CACHE_SHIFT;
2624 unsigned offset = from & (PAGE_CACHE_SIZE-1);
2627 const struct address_space_operations *a_ops = mapping->a_ops;
2631 if ((offset & (blocksize - 1)) == 0)
2635 page = grab_cache_page(mapping, index);
2639 to = (offset + blocksize) & ~(blocksize - 1);
2640 ret = a_ops->prepare_write(NULL, page, offset, to);
2642 kaddr = kmap_atomic(page, KM_USER0);
2643 memset(kaddr + offset, 0, PAGE_CACHE_SIZE - offset);
2644 flush_dcache_page(page);
2645 kunmap_atomic(kaddr, KM_USER0);
2646 set_page_dirty(page);
2649 page_cache_release(page);
2653 EXPORT_SYMBOL(nobh_truncate_page);
2655 int block_truncate_page(struct address_space *mapping,
2656 loff_t from, get_block_t *get_block)
2658 pgoff_t index = from >> PAGE_CACHE_SHIFT;
2659 unsigned offset = from & (PAGE_CACHE_SIZE-1);
2662 unsigned length, pos;
2663 struct inode *inode = mapping->host;
2665 struct buffer_head *bh;
2669 blocksize = 1 << inode->i_blkbits;
2670 length = offset & (blocksize - 1);
2672 /* Block boundary? Nothing to do */
2676 length = blocksize - length;
2677 iblock = (sector_t)index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
2679 page = grab_cache_page(mapping, index);
2684 if (!page_has_buffers(page))
2685 create_empty_buffers(page, blocksize, 0);
2687 /* Find the buffer that contains "offset" */
2688 bh = page_buffers(page);
2690 while (offset >= pos) {
2691 bh = bh->b_this_page;
2697 if (!buffer_mapped(bh)) {
2698 WARN_ON(bh->b_size != blocksize);
2699 err = get_block(inode, iblock, bh, 0);
2702 /* unmapped? It's a hole - nothing to do */
2703 if (!buffer_mapped(bh))
2707 /* Ok, it's mapped. Make sure it's up-to-date */
2708 if (PageUptodate(page))
2709 set_buffer_uptodate(bh);
2711 if (!buffer_uptodate(bh) && !buffer_delay(bh)) {
2713 ll_rw_block(READ, 1, &bh);
2715 /* Uhhuh. Read error. Complain and punt. */
2716 if (!buffer_uptodate(bh))
2720 kaddr = kmap_atomic(page, KM_USER0);
2721 memset(kaddr + offset, 0, length);
2722 flush_dcache_page(page);
2723 kunmap_atomic(kaddr, KM_USER0);
2725 mark_buffer_dirty(bh);
2730 page_cache_release(page);
2736 * The generic ->writepage function for buffer-backed address_spaces
2738 int block_write_full_page(struct page *page, get_block_t *get_block,
2739 struct writeback_control *wbc)
2741 struct inode * const inode = page->mapping->host;
2742 loff_t i_size = i_size_read(inode);
2743 const pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
2747 /* Is the page fully inside i_size? */
2748 if (page->index < end_index)
2749 return __block_write_full_page(inode, page, get_block, wbc);
2751 /* Is the page fully outside i_size? (truncate in progress) */
2752 offset = i_size & (PAGE_CACHE_SIZE-1);
2753 if (page->index >= end_index+1 || !offset) {
2755 * The page may have dirty, unmapped buffers. For example,
2756 * they may have been added in ext3_writepage(). Make them
2757 * freeable here, so the page does not leak.
2759 do_invalidatepage(page, 0);
2761 return 0; /* don't care */
2765 * The page straddles i_size. It must be zeroed out on each and every
2766 * writepage invokation because it may be mmapped. "A file is mapped
2767 * in multiples of the page size. For a file that is not a multiple of
2768 * the page size, the remaining memory is zeroed when mapped, and
2769 * writes to that region are not written out to the file."
2771 kaddr = kmap_atomic(page, KM_USER0);
2772 memset(kaddr + offset, 0, PAGE_CACHE_SIZE - offset);
2773 flush_dcache_page(page);
2774 kunmap_atomic(kaddr, KM_USER0);
2775 return __block_write_full_page(inode, page, get_block, wbc);
2778 sector_t generic_block_bmap(struct address_space *mapping, sector_t block,
2779 get_block_t *get_block)
2781 struct buffer_head tmp;
2782 struct inode *inode = mapping->host;
2785 tmp.b_size = 1 << inode->i_blkbits;
2786 get_block(inode, block, &tmp, 0);
2787 return tmp.b_blocknr;
2790 static int end_bio_bh_io_sync(struct bio *bio, unsigned int bytes_done, int err)
2792 struct buffer_head *bh = bio->bi_private;
2797 if (err == -EOPNOTSUPP) {
2798 set_bit(BIO_EOPNOTSUPP, &bio->bi_flags);
2799 set_bit(BH_Eopnotsupp, &bh->b_state);
2802 bh->b_end_io(bh, test_bit(BIO_UPTODATE, &bio->bi_flags));
2807 int submit_bh(int rw, struct buffer_head * bh)
2812 BUG_ON(!buffer_locked(bh));
2813 BUG_ON(!buffer_mapped(bh));
2814 BUG_ON(!bh->b_end_io);
2816 if (buffer_ordered(bh) && (rw == WRITE))
2820 * Only clear out a write error when rewriting, should this
2821 * include WRITE_SYNC as well?
2823 if (test_set_buffer_req(bh) && (rw == WRITE || rw == WRITE_BARRIER))
2824 clear_buffer_write_io_error(bh);
2827 * from here on down, it's all bio -- do the initial mapping,
2828 * submit_bio -> generic_make_request may further map this bio around
2830 bio = bio_alloc(GFP_NOIO, 1);
2832 bio->bi_sector = bh->b_blocknr * (bh->b_size >> 9);
2833 bio->bi_bdev = bh->b_bdev;
2834 bio->bi_io_vec[0].bv_page = bh->b_page;
2835 bio->bi_io_vec[0].bv_len = bh->b_size;
2836 bio->bi_io_vec[0].bv_offset = bh_offset(bh);
2840 bio->bi_size = bh->b_size;
2842 bio->bi_end_io = end_bio_bh_io_sync;
2843 bio->bi_private = bh;
2846 submit_bio(rw, bio);
2848 if (bio_flagged(bio, BIO_EOPNOTSUPP))
2856 * ll_rw_block: low-level access to block devices (DEPRECATED)
2857 * @rw: whether to %READ or %WRITE or %SWRITE or maybe %READA (readahead)
2858 * @nr: number of &struct buffer_heads in the array
2859 * @bhs: array of pointers to &struct buffer_head
2861 * ll_rw_block() takes an array of pointers to &struct buffer_heads, and
2862 * requests an I/O operation on them, either a %READ or a %WRITE. The third
2863 * %SWRITE is like %WRITE only we make sure that the *current* data in buffers
2864 * are sent to disk. The fourth %READA option is described in the documentation
2865 * for generic_make_request() which ll_rw_block() calls.
2867 * This function drops any buffer that it cannot get a lock on (with the
2868 * BH_Lock state bit) unless SWRITE is required, any buffer that appears to be
2869 * clean when doing a write request, and any buffer that appears to be
2870 * up-to-date when doing read request. Further it marks as clean buffers that
2871 * are processed for writing (the buffer cache won't assume that they are
2872 * actually clean until the buffer gets unlocked).
2874 * ll_rw_block sets b_end_io to simple completion handler that marks
2875 * the buffer up-to-date (if approriate), unlocks the buffer and wakes
2878 * All of the buffers must be for the same device, and must also be a
2879 * multiple of the current approved size for the device.
2881 void ll_rw_block(int rw, int nr, struct buffer_head *bhs[])
2885 for (i = 0; i < nr; i++) {
2886 struct buffer_head *bh = bhs[i];
2890 else if (test_set_buffer_locked(bh))
2893 if (rw == WRITE || rw == SWRITE) {
2894 if (test_clear_buffer_dirty(bh)) {
2895 bh->b_end_io = end_buffer_write_sync;
2897 submit_bh(WRITE, bh);
2901 if (!buffer_uptodate(bh)) {
2902 bh->b_end_io = end_buffer_read_sync;
2913 * For a data-integrity writeout, we need to wait upon any in-progress I/O
2914 * and then start new I/O and then wait upon it. The caller must have a ref on
2917 int sync_dirty_buffer(struct buffer_head *bh)
2921 WARN_ON(atomic_read(&bh->b_count) < 1);
2923 if (test_clear_buffer_dirty(bh)) {
2925 bh->b_end_io = end_buffer_write_sync;
2926 ret = submit_bh(WRITE, bh);
2928 if (buffer_eopnotsupp(bh)) {
2929 clear_buffer_eopnotsupp(bh);
2932 if (!ret && !buffer_uptodate(bh))
2941 * try_to_free_buffers() checks if all the buffers on this particular page
2942 * are unused, and releases them if so.
2944 * Exclusion against try_to_free_buffers may be obtained by either
2945 * locking the page or by holding its mapping's private_lock.
2947 * If the page is dirty but all the buffers are clean then we need to
2948 * be sure to mark the page clean as well. This is because the page
2949 * may be against a block device, and a later reattachment of buffers
2950 * to a dirty page will set *all* buffers dirty. Which would corrupt
2951 * filesystem data on the same device.
2953 * The same applies to regular filesystem pages: if all the buffers are
2954 * clean then we set the page clean and proceed. To do that, we require
2955 * total exclusion from __set_page_dirty_buffers(). That is obtained with
2958 * try_to_free_buffers() is non-blocking.
2960 static inline int buffer_busy(struct buffer_head *bh)
2962 return atomic_read(&bh->b_count) |
2963 (bh->b_state & ((1 << BH_Dirty) | (1 << BH_Lock)));
2967 drop_buffers(struct page *page, struct buffer_head **buffers_to_free)
2969 struct buffer_head *head = page_buffers(page);
2970 struct buffer_head *bh;
2974 if (buffer_write_io_error(bh) && page->mapping)
2975 set_bit(AS_EIO, &page->mapping->flags);
2976 if (buffer_busy(bh))
2978 bh = bh->b_this_page;
2979 } while (bh != head);
2982 struct buffer_head *next = bh->b_this_page;
2984 if (!list_empty(&bh->b_assoc_buffers))
2985 __remove_assoc_queue(bh);
2987 } while (bh != head);
2988 *buffers_to_free = head;
2989 __clear_page_buffers(page);
2995 int try_to_free_buffers(struct page *page)
2997 struct address_space * const mapping = page->mapping;
2998 struct buffer_head *buffers_to_free = NULL;
3001 BUG_ON(!PageLocked(page));
3002 if (PageWriteback(page))
3005 if (mapping == NULL) { /* can this still happen? */
3006 ret = drop_buffers(page, &buffers_to_free);
3010 spin_lock(&mapping->private_lock);
3011 ret = drop_buffers(page, &buffers_to_free);
3012 spin_unlock(&mapping->private_lock);
3015 * If the filesystem writes its buffers by hand (eg ext3)
3016 * then we can have clean buffers against a dirty page. We
3017 * clean the page here; otherwise later reattachment of buffers
3018 * could encounter a non-uptodate page, which is unresolvable.
3019 * This only applies in the rare case where try_to_free_buffers
3020 * succeeds but the page is not freed.
3022 clear_page_dirty(page);
3025 if (buffers_to_free) {
3026 struct buffer_head *bh = buffers_to_free;
3029 struct buffer_head *next = bh->b_this_page;
3030 free_buffer_head(bh);
3032 } while (bh != buffers_to_free);
3036 EXPORT_SYMBOL(try_to_free_buffers);
3038 void block_sync_page(struct page *page)
3040 struct address_space *mapping;
3043 mapping = page_mapping(page);
3045 blk_run_backing_dev(mapping->backing_dev_info, page);
3049 * There are no bdflush tunables left. But distributions are
3050 * still running obsolete flush daemons, so we terminate them here.
3052 * Use of bdflush() is deprecated and will be removed in a future kernel.
3053 * The `pdflush' kernel threads fully replace bdflush daemons and this call.
3055 asmlinkage long sys_bdflush(int func, long data)
3057 static int msg_count;
3059 if (!capable(CAP_SYS_ADMIN))
3062 if (msg_count < 5) {
3065 "warning: process `%s' used the obsolete bdflush"
3066 " system call\n", current->comm);
3067 printk(KERN_INFO "Fix your initscripts?\n");
3076 * Buffer-head allocation
3078 static kmem_cache_t *bh_cachep;
3081 * Once the number of bh's in the machine exceeds this level, we start
3082 * stripping them in writeback.
3084 static int max_buffer_heads;
3086 int buffer_heads_over_limit;
3088 struct bh_accounting {
3089 int nr; /* Number of live bh's */
3090 int ratelimit; /* Limit cacheline bouncing */
3093 static DEFINE_PER_CPU(struct bh_accounting, bh_accounting) = {0, 0};
3095 static void recalc_bh_state(void)
3100 if (__get_cpu_var(bh_accounting).ratelimit++ < 4096)
3102 __get_cpu_var(bh_accounting).ratelimit = 0;
3103 for_each_online_cpu(i)
3104 tot += per_cpu(bh_accounting, i).nr;
3105 buffer_heads_over_limit = (tot > max_buffer_heads);
3108 struct buffer_head *alloc_buffer_head(gfp_t gfp_flags)
3110 struct buffer_head *ret = kmem_cache_alloc(bh_cachep, gfp_flags);
3112 get_cpu_var(bh_accounting).nr++;
3114 put_cpu_var(bh_accounting);
3118 EXPORT_SYMBOL(alloc_buffer_head);
3120 void free_buffer_head(struct buffer_head *bh)
3122 BUG_ON(!list_empty(&bh->b_assoc_buffers));
3123 kmem_cache_free(bh_cachep, bh);
3124 get_cpu_var(bh_accounting).nr--;
3126 put_cpu_var(bh_accounting);
3128 EXPORT_SYMBOL(free_buffer_head);
3131 init_buffer_head(void *data, kmem_cache_t *cachep, unsigned long flags)
3133 if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) ==
3134 SLAB_CTOR_CONSTRUCTOR) {
3135 struct buffer_head * bh = (struct buffer_head *)data;
3137 memset(bh, 0, sizeof(*bh));
3138 INIT_LIST_HEAD(&bh->b_assoc_buffers);
3142 #ifdef CONFIG_HOTPLUG_CPU
3143 static void buffer_exit_cpu(int cpu)
3146 struct bh_lru *b = &per_cpu(bh_lrus, cpu);
3148 for (i = 0; i < BH_LRU_SIZE; i++) {
3152 get_cpu_var(bh_accounting).nr += per_cpu(bh_accounting, cpu).nr;
3153 per_cpu(bh_accounting, cpu).nr = 0;
3154 put_cpu_var(bh_accounting);
3157 static int buffer_cpu_notify(struct notifier_block *self,
3158 unsigned long action, void *hcpu)
3160 if (action == CPU_DEAD)
3161 buffer_exit_cpu((unsigned long)hcpu);
3164 #endif /* CONFIG_HOTPLUG_CPU */
3166 void __init buffer_init(void)
3170 bh_cachep = kmem_cache_create("buffer_head",
3171 sizeof(struct buffer_head), 0,
3172 (SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|
3178 * Limit the bh occupancy to 10% of ZONE_NORMAL
3180 nrpages = (nr_free_buffer_pages() * 10) / 100;
3181 max_buffer_heads = nrpages * (PAGE_SIZE / sizeof(struct buffer_head));
3182 hotcpu_notifier(buffer_cpu_notify, 0);
3185 EXPORT_SYMBOL(__bforget);
3186 EXPORT_SYMBOL(__brelse);
3187 EXPORT_SYMBOL(__wait_on_buffer);
3188 EXPORT_SYMBOL(block_commit_write);
3189 EXPORT_SYMBOL(block_prepare_write);
3190 EXPORT_SYMBOL(block_read_full_page);
3191 EXPORT_SYMBOL(block_sync_page);
3192 EXPORT_SYMBOL(block_truncate_page);
3193 EXPORT_SYMBOL(block_write_full_page);
3194 EXPORT_SYMBOL(cont_prepare_write);
3195 EXPORT_SYMBOL(end_buffer_read_sync);
3196 EXPORT_SYMBOL(end_buffer_write_sync);
3197 EXPORT_SYMBOL(file_fsync);
3198 EXPORT_SYMBOL(fsync_bdev);
3199 EXPORT_SYMBOL(generic_block_bmap);
3200 EXPORT_SYMBOL(generic_commit_write);
3201 EXPORT_SYMBOL(generic_cont_expand);
3202 EXPORT_SYMBOL(generic_cont_expand_simple);
3203 EXPORT_SYMBOL(init_buffer);
3204 EXPORT_SYMBOL(invalidate_bdev);
3205 EXPORT_SYMBOL(ll_rw_block);
3206 EXPORT_SYMBOL(mark_buffer_dirty);
3207 EXPORT_SYMBOL(submit_bh);
3208 EXPORT_SYMBOL(sync_dirty_buffer);
3209 EXPORT_SYMBOL(unlock_buffer);