4 * Copyright (C) 1991, 1992, 2002 Linus Torvalds
8 * Start bdflush() with kernel_thread not syscall - Paul Gortmaker, 12/95
10 * Removed a lot of unnecessary code and simplified things now that
11 * the buffer cache isn't our primary cache - Andrew Tridgell 12/96
13 * Speed up hash, lru, and free list operations. Use gfp() for allocating
14 * hash table, use SLAB cache for buffer heads. SMP threading. -DaveM
16 * Added 32k buffer block sizes - these are required older ARM systems. - RMK
18 * async buffer flushing, 1999 Andrea Arcangeli <andrea@suse.de>
21 #include <linux/config.h>
22 #include <linux/kernel.h>
23 #include <linux/syscalls.h>
26 #include <linux/percpu.h>
27 #include <linux/slab.h>
28 #include <linux/smp_lock.h>
29 #include <linux/capability.h>
30 #include <linux/blkdev.h>
31 #include <linux/file.h>
32 #include <linux/quotaops.h>
33 #include <linux/highmem.h>
34 #include <linux/module.h>
35 #include <linux/writeback.h>
36 #include <linux/hash.h>
37 #include <linux/suspend.h>
38 #include <linux/buffer_head.h>
39 #include <linux/bio.h>
40 #include <linux/notifier.h>
41 #include <linux/cpu.h>
42 #include <linux/bitops.h>
43 #include <linux/mpage.h>
44 #include <linux/bit_spinlock.h>
46 static int fsync_buffers_list(spinlock_t *lock, struct list_head *list);
47 static void invalidate_bh_lrus(void);
49 #define BH_ENTRY(list) list_entry((list), struct buffer_head, b_assoc_buffers)
52 init_buffer(struct buffer_head *bh, bh_end_io_t *handler, void *private)
54 bh->b_end_io = handler;
55 bh->b_private = private;
58 static int sync_buffer(void *word)
60 struct block_device *bd;
61 struct buffer_head *bh
62 = container_of(word, struct buffer_head, b_state);
67 blk_run_address_space(bd->bd_inode->i_mapping);
72 void fastcall __lock_buffer(struct buffer_head *bh)
74 wait_on_bit_lock(&bh->b_state, BH_Lock, sync_buffer,
75 TASK_UNINTERRUPTIBLE);
77 EXPORT_SYMBOL(__lock_buffer);
79 void fastcall unlock_buffer(struct buffer_head *bh)
81 clear_buffer_locked(bh);
82 smp_mb__after_clear_bit();
83 wake_up_bit(&bh->b_state, BH_Lock);
87 * Block until a buffer comes unlocked. This doesn't stop it
88 * from becoming locked again - you have to lock it yourself
89 * if you want to preserve its state.
91 void __wait_on_buffer(struct buffer_head * bh)
93 wait_on_bit(&bh->b_state, BH_Lock, sync_buffer, TASK_UNINTERRUPTIBLE);
97 __clear_page_buffers(struct page *page)
99 ClearPagePrivate(page);
100 set_page_private(page, 0);
101 page_cache_release(page);
104 static void buffer_io_error(struct buffer_head *bh)
106 char b[BDEVNAME_SIZE];
108 printk(KERN_ERR "Buffer I/O error on device %s, logical block %Lu\n",
109 bdevname(bh->b_bdev, b),
110 (unsigned long long)bh->b_blocknr);
114 * Default synchronous end-of-IO handler.. Just mark it up-to-date and
115 * unlock the buffer. This is what ll_rw_block uses too.
117 void end_buffer_read_sync(struct buffer_head *bh, int uptodate)
120 set_buffer_uptodate(bh);
122 /* This happens, due to failed READA attempts. */
123 clear_buffer_uptodate(bh);
129 void end_buffer_write_sync(struct buffer_head *bh, int uptodate)
131 char b[BDEVNAME_SIZE];
134 set_buffer_uptodate(bh);
136 if (!buffer_eopnotsupp(bh) && printk_ratelimit()) {
138 printk(KERN_WARNING "lost page write due to "
140 bdevname(bh->b_bdev, b));
142 set_buffer_write_io_error(bh);
143 clear_buffer_uptodate(bh);
150 * Write out and wait upon all the dirty data associated with a block
151 * device via its mapping. Does not take the superblock lock.
153 int sync_blockdev(struct block_device *bdev)
158 ret = filemap_write_and_wait(bdev->bd_inode->i_mapping);
161 EXPORT_SYMBOL(sync_blockdev);
163 static void __fsync_super(struct super_block *sb)
165 sync_inodes_sb(sb, 0);
168 if (sb->s_dirt && sb->s_op->write_super)
169 sb->s_op->write_super(sb);
171 if (sb->s_op->sync_fs)
172 sb->s_op->sync_fs(sb, 1);
173 sync_blockdev(sb->s_bdev);
174 sync_inodes_sb(sb, 1);
178 * Write out and wait upon all dirty data associated with this
179 * superblock. Filesystem data as well as the underlying block
180 * device. Takes the superblock lock.
182 int fsync_super(struct super_block *sb)
185 return sync_blockdev(sb->s_bdev);
189 * Write out and wait upon all dirty data associated with this
190 * device. Filesystem data as well as the underlying block
191 * device. Takes the superblock lock.
193 int fsync_bdev(struct block_device *bdev)
195 struct super_block *sb = get_super(bdev);
197 int res = fsync_super(sb);
201 return sync_blockdev(bdev);
205 * freeze_bdev -- lock a filesystem and force it into a consistent state
206 * @bdev: blockdevice to lock
208 * This takes the block device bd_mount_mutex to make sure no new mounts
209 * happen on bdev until thaw_bdev() is called.
210 * If a superblock is found on this device, we take the s_umount semaphore
211 * on it to make sure nobody unmounts until the snapshot creation is done.
213 struct super_block *freeze_bdev(struct block_device *bdev)
215 struct super_block *sb;
217 mutex_lock(&bdev->bd_mount_mutex);
218 sb = get_super(bdev);
219 if (sb && !(sb->s_flags & MS_RDONLY)) {
220 sb->s_frozen = SB_FREEZE_WRITE;
225 sb->s_frozen = SB_FREEZE_TRANS;
228 sync_blockdev(sb->s_bdev);
230 if (sb->s_op->write_super_lockfs)
231 sb->s_op->write_super_lockfs(sb);
235 return sb; /* thaw_bdev releases s->s_umount and bd_mount_sem */
237 EXPORT_SYMBOL(freeze_bdev);
240 * thaw_bdev -- unlock filesystem
241 * @bdev: blockdevice to unlock
242 * @sb: associated superblock
244 * Unlocks the filesystem and marks it writeable again after freeze_bdev().
246 void thaw_bdev(struct block_device *bdev, struct super_block *sb)
249 BUG_ON(sb->s_bdev != bdev);
251 if (sb->s_op->unlockfs)
252 sb->s_op->unlockfs(sb);
253 sb->s_frozen = SB_UNFROZEN;
255 wake_up(&sb->s_wait_unfrozen);
259 mutex_unlock(&bdev->bd_mount_mutex);
261 EXPORT_SYMBOL(thaw_bdev);
264 * sync everything. Start out by waking pdflush, because that writes back
265 * all queues in parallel.
267 static void do_sync(unsigned long wait)
270 sync_inodes(0); /* All mappings, inodes and their blockdevs */
272 sync_supers(); /* Write the superblocks */
273 sync_filesystems(0); /* Start syncing the filesystems */
274 sync_filesystems(wait); /* Waitingly sync the filesystems */
275 sync_inodes(wait); /* Mappings, inodes and blockdevs, again. */
277 printk("Emergency Sync complete\n");
278 if (unlikely(laptop_mode))
279 laptop_sync_completion();
282 asmlinkage long sys_sync(void)
288 void emergency_sync(void)
290 pdflush_operation(do_sync, 0);
294 * Generic function to fsync a file.
296 * filp may be NULL if called via the msync of a vma.
299 int file_fsync(struct file *filp, struct dentry *dentry, int datasync)
301 struct inode * inode = dentry->d_inode;
302 struct super_block * sb;
305 /* sync the inode to buffers */
306 ret = write_inode_now(inode, 0);
308 /* sync the superblock to buffers */
311 if (sb->s_op->write_super)
312 sb->s_op->write_super(sb);
315 /* .. finally sync the buffers to disk */
316 err = sync_blockdev(sb->s_bdev);
322 long do_fsync(struct file *file, int datasync)
326 struct address_space *mapping = file->f_mapping;
328 if (!file->f_op || !file->f_op->fsync) {
329 /* Why? We can still call filemap_fdatawrite */
334 current->flags |= PF_SYNCWRITE;
335 ret = filemap_fdatawrite(mapping);
338 * We need to protect against concurrent writers, which could cause
339 * livelocks in fsync_buffers_list().
341 mutex_lock(&mapping->host->i_mutex);
342 err = file->f_op->fsync(file, file->f_dentry, datasync);
345 mutex_unlock(&mapping->host->i_mutex);
346 err = filemap_fdatawait(mapping);
349 current->flags &= ~PF_SYNCWRITE;
354 static long __do_fsync(unsigned int fd, int datasync)
361 ret = do_fsync(file, datasync);
367 asmlinkage long sys_fsync(unsigned int fd)
369 return __do_fsync(fd, 0);
372 asmlinkage long sys_fdatasync(unsigned int fd)
374 return __do_fsync(fd, 1);
378 * Various filesystems appear to want __find_get_block to be non-blocking.
379 * But it's the page lock which protects the buffers. To get around this,
380 * we get exclusion from try_to_free_buffers with the blockdev mapping's
383 * Hack idea: for the blockdev mapping, i_bufferlist_lock contention
384 * may be quite high. This code could TryLock the page, and if that
385 * succeeds, there is no need to take private_lock. (But if
386 * private_lock is contended then so is mapping->tree_lock).
388 static struct buffer_head *
389 __find_get_block_slow(struct block_device *bdev, sector_t block)
391 struct inode *bd_inode = bdev->bd_inode;
392 struct address_space *bd_mapping = bd_inode->i_mapping;
393 struct buffer_head *ret = NULL;
395 struct buffer_head *bh;
396 struct buffer_head *head;
400 index = block >> (PAGE_CACHE_SHIFT - bd_inode->i_blkbits);
401 page = find_get_page(bd_mapping, index);
405 spin_lock(&bd_mapping->private_lock);
406 if (!page_has_buffers(page))
408 head = page_buffers(page);
411 if (bh->b_blocknr == block) {
416 if (!buffer_mapped(bh))
418 bh = bh->b_this_page;
419 } while (bh != head);
421 /* we might be here because some of the buffers on this page are
422 * not mapped. This is due to various races between
423 * file io on the block device and getblk. It gets dealt with
424 * elsewhere, don't buffer_error if we had some unmapped buffers
427 printk("__find_get_block_slow() failed. "
428 "block=%llu, b_blocknr=%llu\n",
429 (unsigned long long)block,
430 (unsigned long long)bh->b_blocknr);
431 printk("b_state=0x%08lx, b_size=%zu\n",
432 bh->b_state, bh->b_size);
433 printk("device blocksize: %d\n", 1 << bd_inode->i_blkbits);
436 spin_unlock(&bd_mapping->private_lock);
437 page_cache_release(page);
442 /* If invalidate_buffers() will trash dirty buffers, it means some kind
443 of fs corruption is going on. Trashing dirty data always imply losing
444 information that was supposed to be just stored on the physical layer
447 Thus invalidate_buffers in general usage is not allwowed to trash
448 dirty buffers. For example ioctl(FLSBLKBUF) expects dirty data to
449 be preserved. These buffers are simply skipped.
451 We also skip buffers which are still in use. For example this can
452 happen if a userspace program is reading the block device.
454 NOTE: In the case where the user removed a removable-media-disk even if
455 there's still dirty data not synced on disk (due a bug in the device driver
456 or due an error of the user), by not destroying the dirty buffers we could
457 generate corruption also on the next media inserted, thus a parameter is
458 necessary to handle this case in the most safe way possible (trying
459 to not corrupt also the new disk inserted with the data belonging to
460 the old now corrupted disk). Also for the ramdisk the natural thing
461 to do in order to release the ramdisk memory is to destroy dirty buffers.
463 These are two special cases. Normal usage imply the device driver
464 to issue a sync on the device (without waiting I/O completion) and
465 then an invalidate_buffers call that doesn't trash dirty buffers.
467 For handling cache coherency with the blkdev pagecache the 'update' case
468 is been introduced. It is needed to re-read from disk any pinned
469 buffer. NOTE: re-reading from disk is destructive so we can do it only
470 when we assume nobody is changing the buffercache under our I/O and when
471 we think the disk contains more recent information than the buffercache.
472 The update == 1 pass marks the buffers we need to update, the update == 2
473 pass does the actual I/O. */
474 void invalidate_bdev(struct block_device *bdev, int destroy_dirty_buffers)
476 struct address_space *mapping = bdev->bd_inode->i_mapping;
478 if (mapping->nrpages == 0)
481 invalidate_bh_lrus();
483 * FIXME: what about destroy_dirty_buffers?
484 * We really want to use invalidate_inode_pages2() for
485 * that, but not until that's cleaned up.
487 invalidate_inode_pages(mapping);
491 * Kick pdflush then try to free up some ZONE_NORMAL memory.
493 static void free_more_memory(void)
498 wakeup_pdflush(1024);
501 for_each_online_pgdat(pgdat) {
502 zones = pgdat->node_zonelists[gfp_zone(GFP_NOFS)].zones;
504 try_to_free_pages(zones, GFP_NOFS);
509 * I/O completion handler for block_read_full_page() - pages
510 * which come unlocked at the end of I/O.
512 static void end_buffer_async_read(struct buffer_head *bh, int uptodate)
515 struct buffer_head *first;
516 struct buffer_head *tmp;
518 int page_uptodate = 1;
520 BUG_ON(!buffer_async_read(bh));
524 set_buffer_uptodate(bh);
526 clear_buffer_uptodate(bh);
527 if (printk_ratelimit())
533 * Be _very_ careful from here on. Bad things can happen if
534 * two buffer heads end IO at almost the same time and both
535 * decide that the page is now completely done.
537 first = page_buffers(page);
538 local_irq_save(flags);
539 bit_spin_lock(BH_Uptodate_Lock, &first->b_state);
540 clear_buffer_async_read(bh);
544 if (!buffer_uptodate(tmp))
546 if (buffer_async_read(tmp)) {
547 BUG_ON(!buffer_locked(tmp));
550 tmp = tmp->b_this_page;
552 bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
553 local_irq_restore(flags);
556 * If none of the buffers had errors and they are all
557 * uptodate then we can set the page uptodate.
559 if (page_uptodate && !PageError(page))
560 SetPageUptodate(page);
565 bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
566 local_irq_restore(flags);
571 * Completion handler for block_write_full_page() - pages which are unlocked
572 * during I/O, and which have PageWriteback cleared upon I/O completion.
574 void end_buffer_async_write(struct buffer_head *bh, int uptodate)
576 char b[BDEVNAME_SIZE];
578 struct buffer_head *first;
579 struct buffer_head *tmp;
582 BUG_ON(!buffer_async_write(bh));
586 set_buffer_uptodate(bh);
588 if (printk_ratelimit()) {
590 printk(KERN_WARNING "lost page write due to "
592 bdevname(bh->b_bdev, b));
594 set_bit(AS_EIO, &page->mapping->flags);
595 clear_buffer_uptodate(bh);
599 first = page_buffers(page);
600 local_irq_save(flags);
601 bit_spin_lock(BH_Uptodate_Lock, &first->b_state);
603 clear_buffer_async_write(bh);
605 tmp = bh->b_this_page;
607 if (buffer_async_write(tmp)) {
608 BUG_ON(!buffer_locked(tmp));
611 tmp = tmp->b_this_page;
613 bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
614 local_irq_restore(flags);
615 end_page_writeback(page);
619 bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
620 local_irq_restore(flags);
625 * If a page's buffers are under async readin (end_buffer_async_read
626 * completion) then there is a possibility that another thread of
627 * control could lock one of the buffers after it has completed
628 * but while some of the other buffers have not completed. This
629 * locked buffer would confuse end_buffer_async_read() into not unlocking
630 * the page. So the absence of BH_Async_Read tells end_buffer_async_read()
631 * that this buffer is not under async I/O.
633 * The page comes unlocked when it has no locked buffer_async buffers
636 * PageLocked prevents anyone starting new async I/O reads any of
639 * PageWriteback is used to prevent simultaneous writeout of the same
642 * PageLocked prevents anyone from starting writeback of a page which is
643 * under read I/O (PageWriteback is only ever set against a locked page).
645 static void mark_buffer_async_read(struct buffer_head *bh)
647 bh->b_end_io = end_buffer_async_read;
648 set_buffer_async_read(bh);
651 void mark_buffer_async_write(struct buffer_head *bh)
653 bh->b_end_io = end_buffer_async_write;
654 set_buffer_async_write(bh);
656 EXPORT_SYMBOL(mark_buffer_async_write);
660 * fs/buffer.c contains helper functions for buffer-backed address space's
661 * fsync functions. A common requirement for buffer-based filesystems is
662 * that certain data from the backing blockdev needs to be written out for
663 * a successful fsync(). For example, ext2 indirect blocks need to be
664 * written back and waited upon before fsync() returns.
666 * The functions mark_buffer_inode_dirty(), fsync_inode_buffers(),
667 * inode_has_buffers() and invalidate_inode_buffers() are provided for the
668 * management of a list of dependent buffers at ->i_mapping->private_list.
670 * Locking is a little subtle: try_to_free_buffers() will remove buffers
671 * from their controlling inode's queue when they are being freed. But
672 * try_to_free_buffers() will be operating against the *blockdev* mapping
673 * at the time, not against the S_ISREG file which depends on those buffers.
674 * So the locking for private_list is via the private_lock in the address_space
675 * which backs the buffers. Which is different from the address_space
676 * against which the buffers are listed. So for a particular address_space,
677 * mapping->private_lock does *not* protect mapping->private_list! In fact,
678 * mapping->private_list will always be protected by the backing blockdev's
681 * Which introduces a requirement: all buffers on an address_space's
682 * ->private_list must be from the same address_space: the blockdev's.
684 * address_spaces which do not place buffers at ->private_list via these
685 * utility functions are free to use private_lock and private_list for
686 * whatever they want. The only requirement is that list_empty(private_list)
687 * be true at clear_inode() time.
689 * FIXME: clear_inode should not call invalidate_inode_buffers(). The
690 * filesystems should do that. invalidate_inode_buffers() should just go
691 * BUG_ON(!list_empty).
693 * FIXME: mark_buffer_dirty_inode() is a data-plane operation. It should
694 * take an address_space, not an inode. And it should be called
695 * mark_buffer_dirty_fsync() to clearly define why those buffers are being
698 * FIXME: mark_buffer_dirty_inode() doesn't need to add the buffer to the
699 * list if it is already on a list. Because if the buffer is on a list,
700 * it *must* already be on the right one. If not, the filesystem is being
701 * silly. This will save a ton of locking. But first we have to ensure
702 * that buffers are taken *off* the old inode's list when they are freed
703 * (presumably in truncate). That requires careful auditing of all
704 * filesystems (do it inside bforget()). It could also be done by bringing
709 * The buffer's backing address_space's private_lock must be held
711 static inline void __remove_assoc_queue(struct buffer_head *bh)
713 list_del_init(&bh->b_assoc_buffers);
716 int inode_has_buffers(struct inode *inode)
718 return !list_empty(&inode->i_data.private_list);
722 * osync is designed to support O_SYNC io. It waits synchronously for
723 * all already-submitted IO to complete, but does not queue any new
724 * writes to the disk.
726 * To do O_SYNC writes, just queue the buffer writes with ll_rw_block as
727 * you dirty the buffers, and then use osync_inode_buffers to wait for
728 * completion. Any other dirty buffers which are not yet queued for
729 * write will not be flushed to disk by the osync.
731 static int osync_buffers_list(spinlock_t *lock, struct list_head *list)
733 struct buffer_head *bh;
739 list_for_each_prev(p, list) {
741 if (buffer_locked(bh)) {
745 if (!buffer_uptodate(bh))
757 * sync_mapping_buffers - write out and wait upon a mapping's "associated"
759 * @mapping: the mapping which wants those buffers written
761 * Starts I/O against the buffers at mapping->private_list, and waits upon
764 * Basically, this is a convenience function for fsync().
765 * @mapping is a file or directory which needs those buffers to be written for
766 * a successful fsync().
768 int sync_mapping_buffers(struct address_space *mapping)
770 struct address_space *buffer_mapping = mapping->assoc_mapping;
772 if (buffer_mapping == NULL || list_empty(&mapping->private_list))
775 return fsync_buffers_list(&buffer_mapping->private_lock,
776 &mapping->private_list);
778 EXPORT_SYMBOL(sync_mapping_buffers);
781 * Called when we've recently written block `bblock', and it is known that
782 * `bblock' was for a buffer_boundary() buffer. This means that the block at
783 * `bblock + 1' is probably a dirty indirect block. Hunt it down and, if it's
784 * dirty, schedule it for IO. So that indirects merge nicely with their data.
786 void write_boundary_block(struct block_device *bdev,
787 sector_t bblock, unsigned blocksize)
789 struct buffer_head *bh = __find_get_block(bdev, bblock + 1, blocksize);
791 if (buffer_dirty(bh))
792 ll_rw_block(WRITE, 1, &bh);
797 void mark_buffer_dirty_inode(struct buffer_head *bh, struct inode *inode)
799 struct address_space *mapping = inode->i_mapping;
800 struct address_space *buffer_mapping = bh->b_page->mapping;
802 mark_buffer_dirty(bh);
803 if (!mapping->assoc_mapping) {
804 mapping->assoc_mapping = buffer_mapping;
806 BUG_ON(mapping->assoc_mapping != buffer_mapping);
808 if (list_empty(&bh->b_assoc_buffers)) {
809 spin_lock(&buffer_mapping->private_lock);
810 list_move_tail(&bh->b_assoc_buffers,
811 &mapping->private_list);
812 spin_unlock(&buffer_mapping->private_lock);
815 EXPORT_SYMBOL(mark_buffer_dirty_inode);
818 * Add a page to the dirty page list.
820 * It is a sad fact of life that this function is called from several places
821 * deeply under spinlocking. It may not sleep.
823 * If the page has buffers, the uptodate buffers are set dirty, to preserve
824 * dirty-state coherency between the page and the buffers. It the page does
825 * not have buffers then when they are later attached they will all be set
828 * The buffers are dirtied before the page is dirtied. There's a small race
829 * window in which a writepage caller may see the page cleanness but not the
830 * buffer dirtiness. That's fine. If this code were to set the page dirty
831 * before the buffers, a concurrent writepage caller could clear the page dirty
832 * bit, see a bunch of clean buffers and we'd end up with dirty buffers/clean
833 * page on the dirty page list.
835 * We use private_lock to lock against try_to_free_buffers while using the
836 * page's buffer list. Also use this to protect against clean buffers being
837 * added to the page after it was set dirty.
839 * FIXME: may need to call ->reservepage here as well. That's rather up to the
840 * address_space though.
842 int __set_page_dirty_buffers(struct page *page)
844 struct address_space * const mapping = page->mapping;
846 spin_lock(&mapping->private_lock);
847 if (page_has_buffers(page)) {
848 struct buffer_head *head = page_buffers(page);
849 struct buffer_head *bh = head;
852 set_buffer_dirty(bh);
853 bh = bh->b_this_page;
854 } while (bh != head);
856 spin_unlock(&mapping->private_lock);
858 if (!TestSetPageDirty(page)) {
859 write_lock_irq(&mapping->tree_lock);
860 if (page->mapping) { /* Race with truncate? */
861 if (mapping_cap_account_dirty(mapping))
862 inc_page_state(nr_dirty);
863 radix_tree_tag_set(&mapping->page_tree,
865 PAGECACHE_TAG_DIRTY);
867 write_unlock_irq(&mapping->tree_lock);
868 __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
873 EXPORT_SYMBOL(__set_page_dirty_buffers);
876 * Write out and wait upon a list of buffers.
878 * We have conflicting pressures: we want to make sure that all
879 * initially dirty buffers get waited on, but that any subsequently
880 * dirtied buffers don't. After all, we don't want fsync to last
881 * forever if somebody is actively writing to the file.
883 * Do this in two main stages: first we copy dirty buffers to a
884 * temporary inode list, queueing the writes as we go. Then we clean
885 * up, waiting for those writes to complete.
887 * During this second stage, any subsequent updates to the file may end
888 * up refiling the buffer on the original inode's dirty list again, so
889 * there is a chance we will end up with a buffer queued for write but
890 * not yet completed on that list. So, as a final cleanup we go through
891 * the osync code to catch these locked, dirty buffers without requeuing
892 * any newly dirty buffers for write.
894 static int fsync_buffers_list(spinlock_t *lock, struct list_head *list)
896 struct buffer_head *bh;
897 struct list_head tmp;
900 INIT_LIST_HEAD(&tmp);
903 while (!list_empty(list)) {
904 bh = BH_ENTRY(list->next);
905 list_del_init(&bh->b_assoc_buffers);
906 if (buffer_dirty(bh) || buffer_locked(bh)) {
907 list_add(&bh->b_assoc_buffers, &tmp);
908 if (buffer_dirty(bh)) {
912 * Ensure any pending I/O completes so that
913 * ll_rw_block() actually writes the current
914 * contents - it is a noop if I/O is still in
915 * flight on potentially older contents.
917 ll_rw_block(SWRITE, 1, &bh);
924 while (!list_empty(&tmp)) {
925 bh = BH_ENTRY(tmp.prev);
926 __remove_assoc_queue(bh);
930 if (!buffer_uptodate(bh))
937 err2 = osync_buffers_list(lock, list);
945 * Invalidate any and all dirty buffers on a given inode. We are
946 * probably unmounting the fs, but that doesn't mean we have already
947 * done a sync(). Just drop the buffers from the inode list.
949 * NOTE: we take the inode's blockdev's mapping's private_lock. Which
950 * assumes that all the buffers are against the blockdev. Not true
953 void invalidate_inode_buffers(struct inode *inode)
955 if (inode_has_buffers(inode)) {
956 struct address_space *mapping = &inode->i_data;
957 struct list_head *list = &mapping->private_list;
958 struct address_space *buffer_mapping = mapping->assoc_mapping;
960 spin_lock(&buffer_mapping->private_lock);
961 while (!list_empty(list))
962 __remove_assoc_queue(BH_ENTRY(list->next));
963 spin_unlock(&buffer_mapping->private_lock);
968 * Remove any clean buffers from the inode's buffer list. This is called
969 * when we're trying to free the inode itself. Those buffers can pin it.
971 * Returns true if all buffers were removed.
973 int remove_inode_buffers(struct inode *inode)
977 if (inode_has_buffers(inode)) {
978 struct address_space *mapping = &inode->i_data;
979 struct list_head *list = &mapping->private_list;
980 struct address_space *buffer_mapping = mapping->assoc_mapping;
982 spin_lock(&buffer_mapping->private_lock);
983 while (!list_empty(list)) {
984 struct buffer_head *bh = BH_ENTRY(list->next);
985 if (buffer_dirty(bh)) {
989 __remove_assoc_queue(bh);
991 spin_unlock(&buffer_mapping->private_lock);
997 * Create the appropriate buffers when given a page for data area and
998 * the size of each buffer.. Use the bh->b_this_page linked list to
999 * follow the buffers created. Return NULL if unable to create more
1002 * The retry flag is used to differentiate async IO (paging, swapping)
1003 * which may not fail from ordinary buffer allocations.
1005 struct buffer_head *alloc_page_buffers(struct page *page, unsigned long size,
1008 struct buffer_head *bh, *head;
1014 while ((offset -= size) >= 0) {
1015 bh = alloc_buffer_head(GFP_NOFS);
1020 bh->b_this_page = head;
1025 atomic_set(&bh->b_count, 0);
1026 bh->b_private = NULL;
1029 /* Link the buffer to its page */
1030 set_bh_page(bh, page, offset);
1032 init_buffer(bh, NULL, NULL);
1036 * In case anything failed, we just free everything we got.
1042 head = head->b_this_page;
1043 free_buffer_head(bh);
1048 * Return failure for non-async IO requests. Async IO requests
1049 * are not allowed to fail, so we have to wait until buffer heads
1050 * become available. But we don't want tasks sleeping with
1051 * partially complete buffers, so all were released above.
1056 /* We're _really_ low on memory. Now we just
1057 * wait for old buffer heads to become free due to
1058 * finishing IO. Since this is an async request and
1059 * the reserve list is empty, we're sure there are
1060 * async buffer heads in use.
1065 EXPORT_SYMBOL_GPL(alloc_page_buffers);
1068 link_dev_buffers(struct page *page, struct buffer_head *head)
1070 struct buffer_head *bh, *tail;
1075 bh = bh->b_this_page;
1077 tail->b_this_page = head;
1078 attach_page_buffers(page, head);
1082 * Initialise the state of a blockdev page's buffers.
1085 init_page_buffers(struct page *page, struct block_device *bdev,
1086 sector_t block, int size)
1088 struct buffer_head *head = page_buffers(page);
1089 struct buffer_head *bh = head;
1090 int uptodate = PageUptodate(page);
1093 if (!buffer_mapped(bh)) {
1094 init_buffer(bh, NULL, NULL);
1096 bh->b_blocknr = block;
1098 set_buffer_uptodate(bh);
1099 set_buffer_mapped(bh);
1102 bh = bh->b_this_page;
1103 } while (bh != head);
1107 * Create the page-cache page that contains the requested block.
1109 * This is user purely for blockdev mappings.
1111 static struct page *
1112 grow_dev_page(struct block_device *bdev, sector_t block,
1113 pgoff_t index, int size)
1115 struct inode *inode = bdev->bd_inode;
1117 struct buffer_head *bh;
1119 page = find_or_create_page(inode->i_mapping, index, GFP_NOFS);
1123 BUG_ON(!PageLocked(page));
1125 if (page_has_buffers(page)) {
1126 bh = page_buffers(page);
1127 if (bh->b_size == size) {
1128 init_page_buffers(page, bdev, block, size);
1131 if (!try_to_free_buffers(page))
1136 * Allocate some buffers for this page
1138 bh = alloc_page_buffers(page, size, 0);
1143 * Link the page to the buffers and initialise them. Take the
1144 * lock to be atomic wrt __find_get_block(), which does not
1145 * run under the page lock.
1147 spin_lock(&inode->i_mapping->private_lock);
1148 link_dev_buffers(page, bh);
1149 init_page_buffers(page, bdev, block, size);
1150 spin_unlock(&inode->i_mapping->private_lock);
1156 page_cache_release(page);
1161 * Create buffers for the specified block device block's page. If
1162 * that page was dirty, the buffers are set dirty also.
1164 * Except that's a bug. Attaching dirty buffers to a dirty
1165 * blockdev's page can result in filesystem corruption, because
1166 * some of those buffers may be aliases of filesystem data.
1167 * grow_dev_page() will go BUG() if this happens.
1170 grow_buffers(struct block_device *bdev, sector_t block, int size)
1179 } while ((size << sizebits) < PAGE_SIZE);
1181 index = block >> sizebits;
1182 block = index << sizebits;
1184 /* Create a page with the proper size buffers.. */
1185 page = grow_dev_page(bdev, block, index, size);
1189 page_cache_release(page);
1193 static struct buffer_head *
1194 __getblk_slow(struct block_device *bdev, sector_t block, int size)
1196 /* Size must be multiple of hard sectorsize */
1197 if (unlikely(size & (bdev_hardsect_size(bdev)-1) ||
1198 (size < 512 || size > PAGE_SIZE))) {
1199 printk(KERN_ERR "getblk(): invalid block size %d requested\n",
1201 printk(KERN_ERR "hardsect size: %d\n",
1202 bdev_hardsect_size(bdev));
1209 struct buffer_head * bh;
1211 bh = __find_get_block(bdev, block, size);
1215 if (!grow_buffers(bdev, block, size))
1221 * The relationship between dirty buffers and dirty pages:
1223 * Whenever a page has any dirty buffers, the page's dirty bit is set, and
1224 * the page is tagged dirty in its radix tree.
1226 * At all times, the dirtiness of the buffers represents the dirtiness of
1227 * subsections of the page. If the page has buffers, the page dirty bit is
1228 * merely a hint about the true dirty state.
1230 * When a page is set dirty in its entirety, all its buffers are marked dirty
1231 * (if the page has buffers).
1233 * When a buffer is marked dirty, its page is dirtied, but the page's other
1236 * Also. When blockdev buffers are explicitly read with bread(), they
1237 * individually become uptodate. But their backing page remains not
1238 * uptodate - even if all of its buffers are uptodate. A subsequent
1239 * block_read_full_page() against that page will discover all the uptodate
1240 * buffers, will set the page uptodate and will perform no I/O.
1244 * mark_buffer_dirty - mark a buffer_head as needing writeout
1245 * @bh: the buffer_head to mark dirty
1247 * mark_buffer_dirty() will set the dirty bit against the buffer, then set its
1248 * backing page dirty, then tag the page as dirty in its address_space's radix
1249 * tree and then attach the address_space's inode to its superblock's dirty
1252 * mark_buffer_dirty() is atomic. It takes bh->b_page->mapping->private_lock,
1253 * mapping->tree_lock and the global inode_lock.
1255 void fastcall mark_buffer_dirty(struct buffer_head *bh)
1257 if (!buffer_dirty(bh) && !test_set_buffer_dirty(bh))
1258 __set_page_dirty_nobuffers(bh->b_page);
1262 * Decrement a buffer_head's reference count. If all buffers against a page
1263 * have zero reference count, are clean and unlocked, and if the page is clean
1264 * and unlocked then try_to_free_buffers() may strip the buffers from the page
1265 * in preparation for freeing it (sometimes, rarely, buffers are removed from
1266 * a page but it ends up not being freed, and buffers may later be reattached).
1268 void __brelse(struct buffer_head * buf)
1270 if (atomic_read(&buf->b_count)) {
1274 printk(KERN_ERR "VFS: brelse: Trying to free free buffer\n");
1279 * bforget() is like brelse(), except it discards any
1280 * potentially dirty data.
1282 void __bforget(struct buffer_head *bh)
1284 clear_buffer_dirty(bh);
1285 if (!list_empty(&bh->b_assoc_buffers)) {
1286 struct address_space *buffer_mapping = bh->b_page->mapping;
1288 spin_lock(&buffer_mapping->private_lock);
1289 list_del_init(&bh->b_assoc_buffers);
1290 spin_unlock(&buffer_mapping->private_lock);
1295 static struct buffer_head *__bread_slow(struct buffer_head *bh)
1298 if (buffer_uptodate(bh)) {
1303 bh->b_end_io = end_buffer_read_sync;
1304 submit_bh(READ, bh);
1306 if (buffer_uptodate(bh))
1314 * Per-cpu buffer LRU implementation. To reduce the cost of __find_get_block().
1315 * The bhs[] array is sorted - newest buffer is at bhs[0]. Buffers have their
1316 * refcount elevated by one when they're in an LRU. A buffer can only appear
1317 * once in a particular CPU's LRU. A single buffer can be present in multiple
1318 * CPU's LRUs at the same time.
1320 * This is a transparent caching front-end to sb_bread(), sb_getblk() and
1321 * sb_find_get_block().
1323 * The LRUs themselves only need locking against invalidate_bh_lrus. We use
1324 * a local interrupt disable for that.
1327 #define BH_LRU_SIZE 8
1330 struct buffer_head *bhs[BH_LRU_SIZE];
1333 static DEFINE_PER_CPU(struct bh_lru, bh_lrus) = {{ NULL }};
1336 #define bh_lru_lock() local_irq_disable()
1337 #define bh_lru_unlock() local_irq_enable()
1339 #define bh_lru_lock() preempt_disable()
1340 #define bh_lru_unlock() preempt_enable()
1343 static inline void check_irqs_on(void)
1345 #ifdef irqs_disabled
1346 BUG_ON(irqs_disabled());
1351 * The LRU management algorithm is dopey-but-simple. Sorry.
1353 static void bh_lru_install(struct buffer_head *bh)
1355 struct buffer_head *evictee = NULL;
1360 lru = &__get_cpu_var(bh_lrus);
1361 if (lru->bhs[0] != bh) {
1362 struct buffer_head *bhs[BH_LRU_SIZE];
1368 for (in = 0; in < BH_LRU_SIZE; in++) {
1369 struct buffer_head *bh2 = lru->bhs[in];
1374 if (out >= BH_LRU_SIZE) {
1375 BUG_ON(evictee != NULL);
1382 while (out < BH_LRU_SIZE)
1384 memcpy(lru->bhs, bhs, sizeof(bhs));
1393 * Look up the bh in this cpu's LRU. If it's there, move it to the head.
1395 static struct buffer_head *
1396 lookup_bh_lru(struct block_device *bdev, sector_t block, int size)
1398 struct buffer_head *ret = NULL;
1404 lru = &__get_cpu_var(bh_lrus);
1405 for (i = 0; i < BH_LRU_SIZE; i++) {
1406 struct buffer_head *bh = lru->bhs[i];
1408 if (bh && bh->b_bdev == bdev &&
1409 bh->b_blocknr == block && bh->b_size == size) {
1412 lru->bhs[i] = lru->bhs[i - 1];
1427 * Perform a pagecache lookup for the matching buffer. If it's there, refresh
1428 * it in the LRU and mark it as accessed. If it is not present then return
1431 struct buffer_head *
1432 __find_get_block(struct block_device *bdev, sector_t block, int size)
1434 struct buffer_head *bh = lookup_bh_lru(bdev, block, size);
1437 bh = __find_get_block_slow(bdev, block);
1445 EXPORT_SYMBOL(__find_get_block);
1448 * __getblk will locate (and, if necessary, create) the buffer_head
1449 * which corresponds to the passed block_device, block and size. The
1450 * returned buffer has its reference count incremented.
1452 * __getblk() cannot fail - it just keeps trying. If you pass it an
1453 * illegal block number, __getblk() will happily return a buffer_head
1454 * which represents the non-existent block. Very weird.
1456 * __getblk() will lock up the machine if grow_dev_page's try_to_free_buffers()
1457 * attempt is failing. FIXME, perhaps?
1459 struct buffer_head *
1460 __getblk(struct block_device *bdev, sector_t block, int size)
1462 struct buffer_head *bh = __find_get_block(bdev, block, size);
1466 bh = __getblk_slow(bdev, block, size);
1469 EXPORT_SYMBOL(__getblk);
1472 * Do async read-ahead on a buffer..
1474 void __breadahead(struct block_device *bdev, sector_t block, int size)
1476 struct buffer_head *bh = __getblk(bdev, block, size);
1478 ll_rw_block(READA, 1, &bh);
1482 EXPORT_SYMBOL(__breadahead);
1485 * __bread() - reads a specified block and returns the bh
1486 * @bdev: the block_device to read from
1487 * @block: number of block
1488 * @size: size (in bytes) to read
1490 * Reads a specified block, and returns buffer head that contains it.
1491 * It returns NULL if the block was unreadable.
1493 struct buffer_head *
1494 __bread(struct block_device *bdev, sector_t block, int size)
1496 struct buffer_head *bh = __getblk(bdev, block, size);
1498 if (likely(bh) && !buffer_uptodate(bh))
1499 bh = __bread_slow(bh);
1502 EXPORT_SYMBOL(__bread);
1505 * invalidate_bh_lrus() is called rarely - but not only at unmount.
1506 * This doesn't race because it runs in each cpu either in irq
1507 * or with preempt disabled.
1509 static void invalidate_bh_lru(void *arg)
1511 struct bh_lru *b = &get_cpu_var(bh_lrus);
1514 for (i = 0; i < BH_LRU_SIZE; i++) {
1518 put_cpu_var(bh_lrus);
1521 static void invalidate_bh_lrus(void)
1523 on_each_cpu(invalidate_bh_lru, NULL, 1, 1);
1526 void set_bh_page(struct buffer_head *bh,
1527 struct page *page, unsigned long offset)
1530 BUG_ON(offset >= PAGE_SIZE);
1531 if (PageHighMem(page))
1533 * This catches illegal uses and preserves the offset:
1535 bh->b_data = (char *)(0 + offset);
1537 bh->b_data = page_address(page) + offset;
1539 EXPORT_SYMBOL(set_bh_page);
1542 * Called when truncating a buffer on a page completely.
1544 static void discard_buffer(struct buffer_head * bh)
1547 clear_buffer_dirty(bh);
1549 clear_buffer_mapped(bh);
1550 clear_buffer_req(bh);
1551 clear_buffer_new(bh);
1552 clear_buffer_delay(bh);
1557 * try_to_release_page() - release old fs-specific metadata on a page
1559 * @page: the page which the kernel is trying to free
1560 * @gfp_mask: memory allocation flags (and I/O mode)
1562 * The address_space is to try to release any data against the page
1563 * (presumably at page->private). If the release was successful, return `1'.
1564 * Otherwise return zero.
1566 * The @gfp_mask argument specifies whether I/O may be performed to release
1567 * this page (__GFP_IO), and whether the call may block (__GFP_WAIT).
1569 * NOTE: @gfp_mask may go away, and this function may become non-blocking.
1571 int try_to_release_page(struct page *page, gfp_t gfp_mask)
1573 struct address_space * const mapping = page->mapping;
1575 BUG_ON(!PageLocked(page));
1576 if (PageWriteback(page))
1579 if (mapping && mapping->a_ops->releasepage)
1580 return mapping->a_ops->releasepage(page, gfp_mask);
1581 return try_to_free_buffers(page);
1583 EXPORT_SYMBOL(try_to_release_page);
1586 * block_invalidatepage - invalidate part of all of a buffer-backed page
1588 * @page: the page which is affected
1589 * @offset: the index of the truncation point
1591 * block_invalidatepage() is called when all or part of the page has become
1592 * invalidatedby a truncate operation.
1594 * block_invalidatepage() does not have to release all buffers, but it must
1595 * ensure that no dirty buffer is left outside @offset and that no I/O
1596 * is underway against any of the blocks which are outside the truncation
1597 * point. Because the caller is about to free (and possibly reuse) those
1600 void block_invalidatepage(struct page *page, unsigned long offset)
1602 struct buffer_head *head, *bh, *next;
1603 unsigned int curr_off = 0;
1605 BUG_ON(!PageLocked(page));
1606 if (!page_has_buffers(page))
1609 head = page_buffers(page);
1612 unsigned int next_off = curr_off + bh->b_size;
1613 next = bh->b_this_page;
1616 * is this block fully invalidated?
1618 if (offset <= curr_off)
1620 curr_off = next_off;
1622 } while (bh != head);
1625 * We release buffers only if the entire page is being invalidated.
1626 * The get_block cached value has been unconditionally invalidated,
1627 * so real IO is not possible anymore.
1630 try_to_release_page(page, 0);
1634 EXPORT_SYMBOL(block_invalidatepage);
1636 void do_invalidatepage(struct page *page, unsigned long offset)
1638 void (*invalidatepage)(struct page *, unsigned long);
1639 invalidatepage = page->mapping->a_ops->invalidatepage ? :
1640 block_invalidatepage;
1641 (*invalidatepage)(page, offset);
1645 * We attach and possibly dirty the buffers atomically wrt
1646 * __set_page_dirty_buffers() via private_lock. try_to_free_buffers
1647 * is already excluded via the page lock.
1649 void create_empty_buffers(struct page *page,
1650 unsigned long blocksize, unsigned long b_state)
1652 struct buffer_head *bh, *head, *tail;
1654 head = alloc_page_buffers(page, blocksize, 1);
1657 bh->b_state |= b_state;
1659 bh = bh->b_this_page;
1661 tail->b_this_page = head;
1663 spin_lock(&page->mapping->private_lock);
1664 if (PageUptodate(page) || PageDirty(page)) {
1667 if (PageDirty(page))
1668 set_buffer_dirty(bh);
1669 if (PageUptodate(page))
1670 set_buffer_uptodate(bh);
1671 bh = bh->b_this_page;
1672 } while (bh != head);
1674 attach_page_buffers(page, head);
1675 spin_unlock(&page->mapping->private_lock);
1677 EXPORT_SYMBOL(create_empty_buffers);
1680 * We are taking a block for data and we don't want any output from any
1681 * buffer-cache aliases starting from return from that function and
1682 * until the moment when something will explicitly mark the buffer
1683 * dirty (hopefully that will not happen until we will free that block ;-)
1684 * We don't even need to mark it not-uptodate - nobody can expect
1685 * anything from a newly allocated buffer anyway. We used to used
1686 * unmap_buffer() for such invalidation, but that was wrong. We definitely
1687 * don't want to mark the alias unmapped, for example - it would confuse
1688 * anyone who might pick it with bread() afterwards...
1690 * Also.. Note that bforget() doesn't lock the buffer. So there can
1691 * be writeout I/O going on against recently-freed buffers. We don't
1692 * wait on that I/O in bforget() - it's more efficient to wait on the I/O
1693 * only if we really need to. That happens here.
1695 void unmap_underlying_metadata(struct block_device *bdev, sector_t block)
1697 struct buffer_head *old_bh;
1701 old_bh = __find_get_block_slow(bdev, block);
1703 clear_buffer_dirty(old_bh);
1704 wait_on_buffer(old_bh);
1705 clear_buffer_req(old_bh);
1709 EXPORT_SYMBOL(unmap_underlying_metadata);
1712 * NOTE! All mapped/uptodate combinations are valid:
1714 * Mapped Uptodate Meaning
1716 * No No "unknown" - must do get_block()
1717 * No Yes "hole" - zero-filled
1718 * Yes No "allocated" - allocated on disk, not read in
1719 * Yes Yes "valid" - allocated and up-to-date in memory.
1721 * "Dirty" is valid only with the last case (mapped+uptodate).
1725 * While block_write_full_page is writing back the dirty buffers under
1726 * the page lock, whoever dirtied the buffers may decide to clean them
1727 * again at any time. We handle that by only looking at the buffer
1728 * state inside lock_buffer().
1730 * If block_write_full_page() is called for regular writeback
1731 * (wbc->sync_mode == WB_SYNC_NONE) then it will redirty a page which has a
1732 * locked buffer. This only can happen if someone has written the buffer
1733 * directly, with submit_bh(). At the address_space level PageWriteback
1734 * prevents this contention from occurring.
1736 static int __block_write_full_page(struct inode *inode, struct page *page,
1737 get_block_t *get_block, struct writeback_control *wbc)
1741 sector_t last_block;
1742 struct buffer_head *bh, *head;
1743 const unsigned blocksize = 1 << inode->i_blkbits;
1744 int nr_underway = 0;
1746 BUG_ON(!PageLocked(page));
1748 last_block = (i_size_read(inode) - 1) >> inode->i_blkbits;
1750 if (!page_has_buffers(page)) {
1751 create_empty_buffers(page, blocksize,
1752 (1 << BH_Dirty)|(1 << BH_Uptodate));
1756 * Be very careful. We have no exclusion from __set_page_dirty_buffers
1757 * here, and the (potentially unmapped) buffers may become dirty at
1758 * any time. If a buffer becomes dirty here after we've inspected it
1759 * then we just miss that fact, and the page stays dirty.
1761 * Buffers outside i_size may be dirtied by __set_page_dirty_buffers;
1762 * handle that here by just cleaning them.
1765 block = (sector_t)page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
1766 head = page_buffers(page);
1770 * Get all the dirty buffers mapped to disk addresses and
1771 * handle any aliases from the underlying blockdev's mapping.
1774 if (block > last_block) {
1776 * mapped buffers outside i_size will occur, because
1777 * this page can be outside i_size when there is a
1778 * truncate in progress.
1781 * The buffer was zeroed by block_write_full_page()
1783 clear_buffer_dirty(bh);
1784 set_buffer_uptodate(bh);
1785 } else if (!buffer_mapped(bh) && buffer_dirty(bh)) {
1786 WARN_ON(bh->b_size != blocksize);
1787 err = get_block(inode, block, bh, 1);
1790 if (buffer_new(bh)) {
1791 /* blockdev mappings never come here */
1792 clear_buffer_new(bh);
1793 unmap_underlying_metadata(bh->b_bdev,
1797 bh = bh->b_this_page;
1799 } while (bh != head);
1802 if (!buffer_mapped(bh))
1805 * If it's a fully non-blocking write attempt and we cannot
1806 * lock the buffer then redirty the page. Note that this can
1807 * potentially cause a busy-wait loop from pdflush and kswapd
1808 * activity, but those code paths have their own higher-level
1811 if (wbc->sync_mode != WB_SYNC_NONE || !wbc->nonblocking) {
1813 } else if (test_set_buffer_locked(bh)) {
1814 redirty_page_for_writepage(wbc, page);
1817 if (test_clear_buffer_dirty(bh)) {
1818 mark_buffer_async_write(bh);
1822 } while ((bh = bh->b_this_page) != head);
1825 * The page and its buffers are protected by PageWriteback(), so we can
1826 * drop the bh refcounts early.
1828 BUG_ON(PageWriteback(page));
1829 set_page_writeback(page);
1832 struct buffer_head *next = bh->b_this_page;
1833 if (buffer_async_write(bh)) {
1834 submit_bh(WRITE, bh);
1838 } while (bh != head);
1843 if (nr_underway == 0) {
1845 * The page was marked dirty, but the buffers were
1846 * clean. Someone wrote them back by hand with
1847 * ll_rw_block/submit_bh. A rare case.
1851 if (!buffer_uptodate(bh)) {
1855 bh = bh->b_this_page;
1856 } while (bh != head);
1858 SetPageUptodate(page);
1859 end_page_writeback(page);
1861 * The page and buffer_heads can be released at any time from
1864 wbc->pages_skipped++; /* We didn't write this page */
1870 * ENOSPC, or some other error. We may already have added some
1871 * blocks to the file, so we need to write these out to avoid
1872 * exposing stale data.
1873 * The page is currently locked and not marked for writeback
1876 /* Recovery: lock and submit the mapped buffers */
1878 if (buffer_mapped(bh) && buffer_dirty(bh)) {
1880 mark_buffer_async_write(bh);
1883 * The buffer may have been set dirty during
1884 * attachment to a dirty page.
1886 clear_buffer_dirty(bh);
1888 } while ((bh = bh->b_this_page) != head);
1890 BUG_ON(PageWriteback(page));
1891 set_page_writeback(page);
1894 struct buffer_head *next = bh->b_this_page;
1895 if (buffer_async_write(bh)) {
1896 clear_buffer_dirty(bh);
1897 submit_bh(WRITE, bh);
1901 } while (bh != head);
1905 static int __block_prepare_write(struct inode *inode, struct page *page,
1906 unsigned from, unsigned to, get_block_t *get_block)
1908 unsigned block_start, block_end;
1911 unsigned blocksize, bbits;
1912 struct buffer_head *bh, *head, *wait[2], **wait_bh=wait;
1914 BUG_ON(!PageLocked(page));
1915 BUG_ON(from > PAGE_CACHE_SIZE);
1916 BUG_ON(to > PAGE_CACHE_SIZE);
1919 blocksize = 1 << inode->i_blkbits;
1920 if (!page_has_buffers(page))
1921 create_empty_buffers(page, blocksize, 0);
1922 head = page_buffers(page);
1924 bbits = inode->i_blkbits;
1925 block = (sector_t)page->index << (PAGE_CACHE_SHIFT - bbits);
1927 for(bh = head, block_start = 0; bh != head || !block_start;
1928 block++, block_start=block_end, bh = bh->b_this_page) {
1929 block_end = block_start + blocksize;
1930 if (block_end <= from || block_start >= to) {
1931 if (PageUptodate(page)) {
1932 if (!buffer_uptodate(bh))
1933 set_buffer_uptodate(bh);
1938 clear_buffer_new(bh);
1939 if (!buffer_mapped(bh)) {
1940 WARN_ON(bh->b_size != blocksize);
1941 err = get_block(inode, block, bh, 1);
1944 if (buffer_new(bh)) {
1945 unmap_underlying_metadata(bh->b_bdev,
1947 if (PageUptodate(page)) {
1948 set_buffer_uptodate(bh);
1951 if (block_end > to || block_start < from) {
1954 kaddr = kmap_atomic(page, KM_USER0);
1958 if (block_start < from)
1959 memset(kaddr+block_start,
1960 0, from-block_start);
1961 flush_dcache_page(page);
1962 kunmap_atomic(kaddr, KM_USER0);
1967 if (PageUptodate(page)) {
1968 if (!buffer_uptodate(bh))
1969 set_buffer_uptodate(bh);
1972 if (!buffer_uptodate(bh) && !buffer_delay(bh) &&
1973 (block_start < from || block_end > to)) {
1974 ll_rw_block(READ, 1, &bh);
1979 * If we issued read requests - let them complete.
1981 while(wait_bh > wait) {
1982 wait_on_buffer(*--wait_bh);
1983 if (!buffer_uptodate(*wait_bh))
1990 clear_buffer_new(bh);
1991 } while ((bh = bh->b_this_page) != head);
1996 * Zero out any newly allocated blocks to avoid exposing stale
1997 * data. If BH_New is set, we know that the block was newly
1998 * allocated in the above loop.
2003 block_end = block_start+blocksize;
2004 if (block_end <= from)
2006 if (block_start >= to)
2008 if (buffer_new(bh)) {
2011 clear_buffer_new(bh);
2012 kaddr = kmap_atomic(page, KM_USER0);
2013 memset(kaddr+block_start, 0, bh->b_size);
2014 kunmap_atomic(kaddr, KM_USER0);
2015 set_buffer_uptodate(bh);
2016 mark_buffer_dirty(bh);
2019 block_start = block_end;
2020 bh = bh->b_this_page;
2021 } while (bh != head);
2025 static int __block_commit_write(struct inode *inode, struct page *page,
2026 unsigned from, unsigned to)
2028 unsigned block_start, block_end;
2031 struct buffer_head *bh, *head;
2033 blocksize = 1 << inode->i_blkbits;
2035 for(bh = head = page_buffers(page), block_start = 0;
2036 bh != head || !block_start;
2037 block_start=block_end, bh = bh->b_this_page) {
2038 block_end = block_start + blocksize;
2039 if (block_end <= from || block_start >= to) {
2040 if (!buffer_uptodate(bh))
2043 set_buffer_uptodate(bh);
2044 mark_buffer_dirty(bh);
2049 * If this is a partial write which happened to make all buffers
2050 * uptodate then we can optimize away a bogus readpage() for
2051 * the next read(). Here we 'discover' whether the page went
2052 * uptodate as a result of this (potentially partial) write.
2055 SetPageUptodate(page);
2060 * Generic "read page" function for block devices that have the normal
2061 * get_block functionality. This is most of the block device filesystems.
2062 * Reads the page asynchronously --- the unlock_buffer() and
2063 * set/clear_buffer_uptodate() functions propagate buffer state into the
2064 * page struct once IO has completed.
2066 int block_read_full_page(struct page *page, get_block_t *get_block)
2068 struct inode *inode = page->mapping->host;
2069 sector_t iblock, lblock;
2070 struct buffer_head *bh, *head, *arr[MAX_BUF_PER_PAGE];
2071 unsigned int blocksize;
2073 int fully_mapped = 1;
2075 BUG_ON(!PageLocked(page));
2076 blocksize = 1 << inode->i_blkbits;
2077 if (!page_has_buffers(page))
2078 create_empty_buffers(page, blocksize, 0);
2079 head = page_buffers(page);
2081 iblock = (sector_t)page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
2082 lblock = (i_size_read(inode)+blocksize-1) >> inode->i_blkbits;
2088 if (buffer_uptodate(bh))
2091 if (!buffer_mapped(bh)) {
2095 if (iblock < lblock) {
2096 WARN_ON(bh->b_size != blocksize);
2097 err = get_block(inode, iblock, bh, 0);
2101 if (!buffer_mapped(bh)) {
2102 void *kaddr = kmap_atomic(page, KM_USER0);
2103 memset(kaddr + i * blocksize, 0, blocksize);
2104 flush_dcache_page(page);
2105 kunmap_atomic(kaddr, KM_USER0);
2107 set_buffer_uptodate(bh);
2111 * get_block() might have updated the buffer
2114 if (buffer_uptodate(bh))
2118 } while (i++, iblock++, (bh = bh->b_this_page) != head);
2121 SetPageMappedToDisk(page);
2125 * All buffers are uptodate - we can set the page uptodate
2126 * as well. But not if get_block() returned an error.
2128 if (!PageError(page))
2129 SetPageUptodate(page);
2134 /* Stage two: lock the buffers */
2135 for (i = 0; i < nr; i++) {
2138 mark_buffer_async_read(bh);
2142 * Stage 3: start the IO. Check for uptodateness
2143 * inside the buffer lock in case another process reading
2144 * the underlying blockdev brought it uptodate (the sct fix).
2146 for (i = 0; i < nr; i++) {
2148 if (buffer_uptodate(bh))
2149 end_buffer_async_read(bh, 1);
2151 submit_bh(READ, bh);
2156 /* utility function for filesystems that need to do work on expanding
2157 * truncates. Uses prepare/commit_write to allow the filesystem to
2158 * deal with the hole.
2160 static int __generic_cont_expand(struct inode *inode, loff_t size,
2161 pgoff_t index, unsigned int offset)
2163 struct address_space *mapping = inode->i_mapping;
2165 unsigned long limit;
2169 limit = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
2170 if (limit != RLIM_INFINITY && size > (loff_t)limit) {
2171 send_sig(SIGXFSZ, current, 0);
2174 if (size > inode->i_sb->s_maxbytes)
2178 page = grab_cache_page(mapping, index);
2181 err = mapping->a_ops->prepare_write(NULL, page, offset, offset);
2184 * ->prepare_write() may have instantiated a few blocks
2185 * outside i_size. Trim these off again.
2188 page_cache_release(page);
2189 vmtruncate(inode, inode->i_size);
2193 err = mapping->a_ops->commit_write(NULL, page, offset, offset);
2196 page_cache_release(page);
2203 int generic_cont_expand(struct inode *inode, loff_t size)
2206 unsigned int offset;
2208 offset = (size & (PAGE_CACHE_SIZE - 1)); /* Within page */
2210 /* ugh. in prepare/commit_write, if from==to==start of block, we
2211 ** skip the prepare. make sure we never send an offset for the start
2214 if ((offset & (inode->i_sb->s_blocksize - 1)) == 0) {
2215 /* caller must handle this extra byte. */
2218 index = size >> PAGE_CACHE_SHIFT;
2220 return __generic_cont_expand(inode, size, index, offset);
2223 int generic_cont_expand_simple(struct inode *inode, loff_t size)
2225 loff_t pos = size - 1;
2226 pgoff_t index = pos >> PAGE_CACHE_SHIFT;
2227 unsigned int offset = (pos & (PAGE_CACHE_SIZE - 1)) + 1;
2229 /* prepare/commit_write can handle even if from==to==start of block. */
2230 return __generic_cont_expand(inode, size, index, offset);
2234 * For moronic filesystems that do not allow holes in file.
2235 * We may have to extend the file.
2238 int cont_prepare_write(struct page *page, unsigned offset,
2239 unsigned to, get_block_t *get_block, loff_t *bytes)
2241 struct address_space *mapping = page->mapping;
2242 struct inode *inode = mapping->host;
2243 struct page *new_page;
2247 unsigned blocksize = 1 << inode->i_blkbits;
2250 while(page->index > (pgpos = *bytes>>PAGE_CACHE_SHIFT)) {
2252 new_page = grab_cache_page(mapping, pgpos);
2255 /* we might sleep */
2256 if (*bytes>>PAGE_CACHE_SHIFT != pgpos) {
2257 unlock_page(new_page);
2258 page_cache_release(new_page);
2261 zerofrom = *bytes & ~PAGE_CACHE_MASK;
2262 if (zerofrom & (blocksize-1)) {
2263 *bytes |= (blocksize-1);
2266 status = __block_prepare_write(inode, new_page, zerofrom,
2267 PAGE_CACHE_SIZE, get_block);
2270 kaddr = kmap_atomic(new_page, KM_USER0);
2271 memset(kaddr+zerofrom, 0, PAGE_CACHE_SIZE-zerofrom);
2272 flush_dcache_page(new_page);
2273 kunmap_atomic(kaddr, KM_USER0);
2274 generic_commit_write(NULL, new_page, zerofrom, PAGE_CACHE_SIZE);
2275 unlock_page(new_page);
2276 page_cache_release(new_page);
2279 if (page->index < pgpos) {
2280 /* completely inside the area */
2283 /* page covers the boundary, find the boundary offset */
2284 zerofrom = *bytes & ~PAGE_CACHE_MASK;
2286 /* if we will expand the thing last block will be filled */
2287 if (to > zerofrom && (zerofrom & (blocksize-1))) {
2288 *bytes |= (blocksize-1);
2292 /* starting below the boundary? Nothing to zero out */
2293 if (offset <= zerofrom)
2296 status = __block_prepare_write(inode, page, zerofrom, to, get_block);
2299 if (zerofrom < offset) {
2300 kaddr = kmap_atomic(page, KM_USER0);
2301 memset(kaddr+zerofrom, 0, offset-zerofrom);
2302 flush_dcache_page(page);
2303 kunmap_atomic(kaddr, KM_USER0);
2304 __block_commit_write(inode, page, zerofrom, offset);
2308 ClearPageUptodate(page);
2312 ClearPageUptodate(new_page);
2313 unlock_page(new_page);
2314 page_cache_release(new_page);
2319 int block_prepare_write(struct page *page, unsigned from, unsigned to,
2320 get_block_t *get_block)
2322 struct inode *inode = page->mapping->host;
2323 int err = __block_prepare_write(inode, page, from, to, get_block);
2325 ClearPageUptodate(page);
2329 int block_commit_write(struct page *page, unsigned from, unsigned to)
2331 struct inode *inode = page->mapping->host;
2332 __block_commit_write(inode,page,from,to);
2336 int generic_commit_write(struct file *file, struct page *page,
2337 unsigned from, unsigned to)
2339 struct inode *inode = page->mapping->host;
2340 loff_t pos = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to;
2341 __block_commit_write(inode,page,from,to);
2343 * No need to use i_size_read() here, the i_size
2344 * cannot change under us because we hold i_mutex.
2346 if (pos > inode->i_size) {
2347 i_size_write(inode, pos);
2348 mark_inode_dirty(inode);
2355 * nobh_prepare_write()'s prereads are special: the buffer_heads are freed
2356 * immediately, while under the page lock. So it needs a special end_io
2357 * handler which does not touch the bh after unlocking it.
2359 * Note: unlock_buffer() sort-of does touch the bh after unlocking it, but
2360 * a race there is benign: unlock_buffer() only use the bh's address for
2361 * hashing after unlocking the buffer, so it doesn't actually touch the bh
2364 static void end_buffer_read_nobh(struct buffer_head *bh, int uptodate)
2367 set_buffer_uptodate(bh);
2369 /* This happens, due to failed READA attempts. */
2370 clear_buffer_uptodate(bh);
2376 * On entry, the page is fully not uptodate.
2377 * On exit the page is fully uptodate in the areas outside (from,to)
2379 int nobh_prepare_write(struct page *page, unsigned from, unsigned to,
2380 get_block_t *get_block)
2382 struct inode *inode = page->mapping->host;
2383 const unsigned blkbits = inode->i_blkbits;
2384 const unsigned blocksize = 1 << blkbits;
2385 struct buffer_head map_bh;
2386 struct buffer_head *read_bh[MAX_BUF_PER_PAGE];
2387 unsigned block_in_page;
2388 unsigned block_start;
2389 sector_t block_in_file;
2394 int is_mapped_to_disk = 1;
2397 if (PageMappedToDisk(page))
2400 block_in_file = (sector_t)page->index << (PAGE_CACHE_SHIFT - blkbits);
2401 map_bh.b_page = page;
2404 * We loop across all blocks in the page, whether or not they are
2405 * part of the affected region. This is so we can discover if the
2406 * page is fully mapped-to-disk.
2408 for (block_start = 0, block_in_page = 0;
2409 block_start < PAGE_CACHE_SIZE;
2410 block_in_page++, block_start += blocksize) {
2411 unsigned block_end = block_start + blocksize;
2416 if (block_start >= to)
2418 map_bh.b_size = blocksize;
2419 ret = get_block(inode, block_in_file + block_in_page,
2423 if (!buffer_mapped(&map_bh))
2424 is_mapped_to_disk = 0;
2425 if (buffer_new(&map_bh))
2426 unmap_underlying_metadata(map_bh.b_bdev,
2428 if (PageUptodate(page))
2430 if (buffer_new(&map_bh) || !buffer_mapped(&map_bh)) {
2431 kaddr = kmap_atomic(page, KM_USER0);
2432 if (block_start < from) {
2433 memset(kaddr+block_start, 0, from-block_start);
2436 if (block_end > to) {
2437 memset(kaddr + to, 0, block_end - to);
2440 flush_dcache_page(page);
2441 kunmap_atomic(kaddr, KM_USER0);
2444 if (buffer_uptodate(&map_bh))
2445 continue; /* reiserfs does this */
2446 if (block_start < from || block_end > to) {
2447 struct buffer_head *bh = alloc_buffer_head(GFP_NOFS);
2453 bh->b_state = map_bh.b_state;
2454 atomic_set(&bh->b_count, 0);
2455 bh->b_this_page = NULL;
2457 bh->b_blocknr = map_bh.b_blocknr;
2458 bh->b_size = blocksize;
2459 bh->b_data = (char *)(long)block_start;
2460 bh->b_bdev = map_bh.b_bdev;
2461 bh->b_private = NULL;
2462 read_bh[nr_reads++] = bh;
2467 struct buffer_head *bh;
2470 * The page is locked, so these buffers are protected from
2471 * any VM or truncate activity. Hence we don't need to care
2472 * for the buffer_head refcounts.
2474 for (i = 0; i < nr_reads; i++) {
2477 bh->b_end_io = end_buffer_read_nobh;
2478 submit_bh(READ, bh);
2480 for (i = 0; i < nr_reads; i++) {
2483 if (!buffer_uptodate(bh))
2485 free_buffer_head(bh);
2492 if (is_mapped_to_disk)
2493 SetPageMappedToDisk(page);
2494 SetPageUptodate(page);
2497 * Setting the page dirty here isn't necessary for the prepare_write
2498 * function - commit_write will do that. But if/when this function is
2499 * used within the pagefault handler to ensure that all mmapped pages
2500 * have backing space in the filesystem, we will need to dirty the page
2501 * if its contents were altered.
2504 set_page_dirty(page);
2509 for (i = 0; i < nr_reads; i++) {
2511 free_buffer_head(read_bh[i]);
2515 * Error recovery is pretty slack. Clear the page and mark it dirty
2516 * so we'll later zero out any blocks which _were_ allocated.
2518 kaddr = kmap_atomic(page, KM_USER0);
2519 memset(kaddr, 0, PAGE_CACHE_SIZE);
2520 kunmap_atomic(kaddr, KM_USER0);
2521 SetPageUptodate(page);
2522 set_page_dirty(page);
2525 EXPORT_SYMBOL(nobh_prepare_write);
2527 int nobh_commit_write(struct file *file, struct page *page,
2528 unsigned from, unsigned to)
2530 struct inode *inode = page->mapping->host;
2531 loff_t pos = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to;
2533 set_page_dirty(page);
2534 if (pos > inode->i_size) {
2535 i_size_write(inode, pos);
2536 mark_inode_dirty(inode);
2540 EXPORT_SYMBOL(nobh_commit_write);
2543 * nobh_writepage() - based on block_full_write_page() except
2544 * that it tries to operate without attaching bufferheads to
2547 int nobh_writepage(struct page *page, get_block_t *get_block,
2548 struct writeback_control *wbc)
2550 struct inode * const inode = page->mapping->host;
2551 loff_t i_size = i_size_read(inode);
2552 const pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
2557 /* Is the page fully inside i_size? */
2558 if (page->index < end_index)
2561 /* Is the page fully outside i_size? (truncate in progress) */
2562 offset = i_size & (PAGE_CACHE_SIZE-1);
2563 if (page->index >= end_index+1 || !offset) {
2565 * The page may have dirty, unmapped buffers. For example,
2566 * they may have been added in ext3_writepage(). Make them
2567 * freeable here, so the page does not leak.
2570 /* Not really sure about this - do we need this ? */
2571 if (page->mapping->a_ops->invalidatepage)
2572 page->mapping->a_ops->invalidatepage(page, offset);
2575 return 0; /* don't care */
2579 * The page straddles i_size. It must be zeroed out on each and every
2580 * writepage invocation because it may be mmapped. "A file is mapped
2581 * in multiples of the page size. For a file that is not a multiple of
2582 * the page size, the remaining memory is zeroed when mapped, and
2583 * writes to that region are not written out to the file."
2585 kaddr = kmap_atomic(page, KM_USER0);
2586 memset(kaddr + offset, 0, PAGE_CACHE_SIZE - offset);
2587 flush_dcache_page(page);
2588 kunmap_atomic(kaddr, KM_USER0);
2590 ret = mpage_writepage(page, get_block, wbc);
2592 ret = __block_write_full_page(inode, page, get_block, wbc);
2595 EXPORT_SYMBOL(nobh_writepage);
2598 * This function assumes that ->prepare_write() uses nobh_prepare_write().
2600 int nobh_truncate_page(struct address_space *mapping, loff_t from)
2602 struct inode *inode = mapping->host;
2603 unsigned blocksize = 1 << inode->i_blkbits;
2604 pgoff_t index = from >> PAGE_CACHE_SHIFT;
2605 unsigned offset = from & (PAGE_CACHE_SIZE-1);
2608 struct address_space_operations *a_ops = mapping->a_ops;
2612 if ((offset & (blocksize - 1)) == 0)
2616 page = grab_cache_page(mapping, index);
2620 to = (offset + blocksize) & ~(blocksize - 1);
2621 ret = a_ops->prepare_write(NULL, page, offset, to);
2623 kaddr = kmap_atomic(page, KM_USER0);
2624 memset(kaddr + offset, 0, PAGE_CACHE_SIZE - offset);
2625 flush_dcache_page(page);
2626 kunmap_atomic(kaddr, KM_USER0);
2627 set_page_dirty(page);
2630 page_cache_release(page);
2634 EXPORT_SYMBOL(nobh_truncate_page);
2636 int block_truncate_page(struct address_space *mapping,
2637 loff_t from, get_block_t *get_block)
2639 pgoff_t index = from >> PAGE_CACHE_SHIFT;
2640 unsigned offset = from & (PAGE_CACHE_SIZE-1);
2643 unsigned length, pos;
2644 struct inode *inode = mapping->host;
2646 struct buffer_head *bh;
2650 blocksize = 1 << inode->i_blkbits;
2651 length = offset & (blocksize - 1);
2653 /* Block boundary? Nothing to do */
2657 length = blocksize - length;
2658 iblock = (sector_t)index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
2660 page = grab_cache_page(mapping, index);
2665 if (!page_has_buffers(page))
2666 create_empty_buffers(page, blocksize, 0);
2668 /* Find the buffer that contains "offset" */
2669 bh = page_buffers(page);
2671 while (offset >= pos) {
2672 bh = bh->b_this_page;
2678 if (!buffer_mapped(bh)) {
2679 WARN_ON(bh->b_size != blocksize);
2680 err = get_block(inode, iblock, bh, 0);
2683 /* unmapped? It's a hole - nothing to do */
2684 if (!buffer_mapped(bh))
2688 /* Ok, it's mapped. Make sure it's up-to-date */
2689 if (PageUptodate(page))
2690 set_buffer_uptodate(bh);
2692 if (!buffer_uptodate(bh) && !buffer_delay(bh)) {
2694 ll_rw_block(READ, 1, &bh);
2696 /* Uhhuh. Read error. Complain and punt. */
2697 if (!buffer_uptodate(bh))
2701 kaddr = kmap_atomic(page, KM_USER0);
2702 memset(kaddr + offset, 0, length);
2703 flush_dcache_page(page);
2704 kunmap_atomic(kaddr, KM_USER0);
2706 mark_buffer_dirty(bh);
2711 page_cache_release(page);
2717 * The generic ->writepage function for buffer-backed address_spaces
2719 int block_write_full_page(struct page *page, get_block_t *get_block,
2720 struct writeback_control *wbc)
2722 struct inode * const inode = page->mapping->host;
2723 loff_t i_size = i_size_read(inode);
2724 const pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
2728 /* Is the page fully inside i_size? */
2729 if (page->index < end_index)
2730 return __block_write_full_page(inode, page, get_block, wbc);
2732 /* Is the page fully outside i_size? (truncate in progress) */
2733 offset = i_size & (PAGE_CACHE_SIZE-1);
2734 if (page->index >= end_index+1 || !offset) {
2736 * The page may have dirty, unmapped buffers. For example,
2737 * they may have been added in ext3_writepage(). Make them
2738 * freeable here, so the page does not leak.
2740 do_invalidatepage(page, 0);
2742 return 0; /* don't care */
2746 * The page straddles i_size. It must be zeroed out on each and every
2747 * writepage invokation because it may be mmapped. "A file is mapped
2748 * in multiples of the page size. For a file that is not a multiple of
2749 * the page size, the remaining memory is zeroed when mapped, and
2750 * writes to that region are not written out to the file."
2752 kaddr = kmap_atomic(page, KM_USER0);
2753 memset(kaddr + offset, 0, PAGE_CACHE_SIZE - offset);
2754 flush_dcache_page(page);
2755 kunmap_atomic(kaddr, KM_USER0);
2756 return __block_write_full_page(inode, page, get_block, wbc);
2759 sector_t generic_block_bmap(struct address_space *mapping, sector_t block,
2760 get_block_t *get_block)
2762 struct buffer_head tmp;
2763 struct inode *inode = mapping->host;
2766 tmp.b_size = 1 << inode->i_blkbits;
2767 get_block(inode, block, &tmp, 0);
2768 return tmp.b_blocknr;
2771 static int end_bio_bh_io_sync(struct bio *bio, unsigned int bytes_done, int err)
2773 struct buffer_head *bh = bio->bi_private;
2778 if (err == -EOPNOTSUPP) {
2779 set_bit(BIO_EOPNOTSUPP, &bio->bi_flags);
2780 set_bit(BH_Eopnotsupp, &bh->b_state);
2783 bh->b_end_io(bh, test_bit(BIO_UPTODATE, &bio->bi_flags));
2788 int submit_bh(int rw, struct buffer_head * bh)
2793 BUG_ON(!buffer_locked(bh));
2794 BUG_ON(!buffer_mapped(bh));
2795 BUG_ON(!bh->b_end_io);
2797 if (buffer_ordered(bh) && (rw == WRITE))
2801 * Only clear out a write error when rewriting, should this
2802 * include WRITE_SYNC as well?
2804 if (test_set_buffer_req(bh) && (rw == WRITE || rw == WRITE_BARRIER))
2805 clear_buffer_write_io_error(bh);
2808 * from here on down, it's all bio -- do the initial mapping,
2809 * submit_bio -> generic_make_request may further map this bio around
2811 bio = bio_alloc(GFP_NOIO, 1);
2813 bio->bi_sector = bh->b_blocknr * (bh->b_size >> 9);
2814 bio->bi_bdev = bh->b_bdev;
2815 bio->bi_io_vec[0].bv_page = bh->b_page;
2816 bio->bi_io_vec[0].bv_len = bh->b_size;
2817 bio->bi_io_vec[0].bv_offset = bh_offset(bh);
2821 bio->bi_size = bh->b_size;
2823 bio->bi_end_io = end_bio_bh_io_sync;
2824 bio->bi_private = bh;
2827 submit_bio(rw, bio);
2829 if (bio_flagged(bio, BIO_EOPNOTSUPP))
2837 * ll_rw_block: low-level access to block devices (DEPRECATED)
2838 * @rw: whether to %READ or %WRITE or %SWRITE or maybe %READA (readahead)
2839 * @nr: number of &struct buffer_heads in the array
2840 * @bhs: array of pointers to &struct buffer_head
2842 * ll_rw_block() takes an array of pointers to &struct buffer_heads, and
2843 * requests an I/O operation on them, either a %READ or a %WRITE. The third
2844 * %SWRITE is like %WRITE only we make sure that the *current* data in buffers
2845 * are sent to disk. The fourth %READA option is described in the documentation
2846 * for generic_make_request() which ll_rw_block() calls.
2848 * This function drops any buffer that it cannot get a lock on (with the
2849 * BH_Lock state bit) unless SWRITE is required, any buffer that appears to be
2850 * clean when doing a write request, and any buffer that appears to be
2851 * up-to-date when doing read request. Further it marks as clean buffers that
2852 * are processed for writing (the buffer cache won't assume that they are
2853 * actually clean until the buffer gets unlocked).
2855 * ll_rw_block sets b_end_io to simple completion handler that marks
2856 * the buffer up-to-date (if approriate), unlocks the buffer and wakes
2859 * All of the buffers must be for the same device, and must also be a
2860 * multiple of the current approved size for the device.
2862 void ll_rw_block(int rw, int nr, struct buffer_head *bhs[])
2866 for (i = 0; i < nr; i++) {
2867 struct buffer_head *bh = bhs[i];
2871 else if (test_set_buffer_locked(bh))
2874 if (rw == WRITE || rw == SWRITE) {
2875 if (test_clear_buffer_dirty(bh)) {
2876 bh->b_end_io = end_buffer_write_sync;
2878 submit_bh(WRITE, bh);
2882 if (!buffer_uptodate(bh)) {
2883 bh->b_end_io = end_buffer_read_sync;
2894 * For a data-integrity writeout, we need to wait upon any in-progress I/O
2895 * and then start new I/O and then wait upon it. The caller must have a ref on
2898 int sync_dirty_buffer(struct buffer_head *bh)
2902 WARN_ON(atomic_read(&bh->b_count) < 1);
2904 if (test_clear_buffer_dirty(bh)) {
2906 bh->b_end_io = end_buffer_write_sync;
2907 ret = submit_bh(WRITE, bh);
2909 if (buffer_eopnotsupp(bh)) {
2910 clear_buffer_eopnotsupp(bh);
2913 if (!ret && !buffer_uptodate(bh))
2922 * try_to_free_buffers() checks if all the buffers on this particular page
2923 * are unused, and releases them if so.
2925 * Exclusion against try_to_free_buffers may be obtained by either
2926 * locking the page or by holding its mapping's private_lock.
2928 * If the page is dirty but all the buffers are clean then we need to
2929 * be sure to mark the page clean as well. This is because the page
2930 * may be against a block device, and a later reattachment of buffers
2931 * to a dirty page will set *all* buffers dirty. Which would corrupt
2932 * filesystem data on the same device.
2934 * The same applies to regular filesystem pages: if all the buffers are
2935 * clean then we set the page clean and proceed. To do that, we require
2936 * total exclusion from __set_page_dirty_buffers(). That is obtained with
2939 * try_to_free_buffers() is non-blocking.
2941 static inline int buffer_busy(struct buffer_head *bh)
2943 return atomic_read(&bh->b_count) |
2944 (bh->b_state & ((1 << BH_Dirty) | (1 << BH_Lock)));
2948 drop_buffers(struct page *page, struct buffer_head **buffers_to_free)
2950 struct buffer_head *head = page_buffers(page);
2951 struct buffer_head *bh;
2955 if (buffer_write_io_error(bh) && page->mapping)
2956 set_bit(AS_EIO, &page->mapping->flags);
2957 if (buffer_busy(bh))
2959 bh = bh->b_this_page;
2960 } while (bh != head);
2963 struct buffer_head *next = bh->b_this_page;
2965 if (!list_empty(&bh->b_assoc_buffers))
2966 __remove_assoc_queue(bh);
2968 } while (bh != head);
2969 *buffers_to_free = head;
2970 __clear_page_buffers(page);
2976 int try_to_free_buffers(struct page *page)
2978 struct address_space * const mapping = page->mapping;
2979 struct buffer_head *buffers_to_free = NULL;
2982 BUG_ON(!PageLocked(page));
2983 if (PageWriteback(page))
2986 if (mapping == NULL) { /* can this still happen? */
2987 ret = drop_buffers(page, &buffers_to_free);
2991 spin_lock(&mapping->private_lock);
2992 ret = drop_buffers(page, &buffers_to_free);
2995 * If the filesystem writes its buffers by hand (eg ext3)
2996 * then we can have clean buffers against a dirty page. We
2997 * clean the page here; otherwise later reattachment of buffers
2998 * could encounter a non-uptodate page, which is unresolvable.
2999 * This only applies in the rare case where try_to_free_buffers
3000 * succeeds but the page is not freed.
3002 clear_page_dirty(page);
3004 spin_unlock(&mapping->private_lock);
3006 if (buffers_to_free) {
3007 struct buffer_head *bh = buffers_to_free;
3010 struct buffer_head *next = bh->b_this_page;
3011 free_buffer_head(bh);
3013 } while (bh != buffers_to_free);
3017 EXPORT_SYMBOL(try_to_free_buffers);
3019 void block_sync_page(struct page *page)
3021 struct address_space *mapping;
3024 mapping = page_mapping(page);
3026 blk_run_backing_dev(mapping->backing_dev_info, page);
3030 * There are no bdflush tunables left. But distributions are
3031 * still running obsolete flush daemons, so we terminate them here.
3033 * Use of bdflush() is deprecated and will be removed in a future kernel.
3034 * The `pdflush' kernel threads fully replace bdflush daemons and this call.
3036 asmlinkage long sys_bdflush(int func, long data)
3038 static int msg_count;
3040 if (!capable(CAP_SYS_ADMIN))
3043 if (msg_count < 5) {
3046 "warning: process `%s' used the obsolete bdflush"
3047 " system call\n", current->comm);
3048 printk(KERN_INFO "Fix your initscripts?\n");
3057 * Buffer-head allocation
3059 static kmem_cache_t *bh_cachep;
3062 * Once the number of bh's in the machine exceeds this level, we start
3063 * stripping them in writeback.
3065 static int max_buffer_heads;
3067 int buffer_heads_over_limit;
3069 struct bh_accounting {
3070 int nr; /* Number of live bh's */
3071 int ratelimit; /* Limit cacheline bouncing */
3074 static DEFINE_PER_CPU(struct bh_accounting, bh_accounting) = {0, 0};
3076 static void recalc_bh_state(void)
3081 if (__get_cpu_var(bh_accounting).ratelimit++ < 4096)
3083 __get_cpu_var(bh_accounting).ratelimit = 0;
3084 for_each_online_cpu(i)
3085 tot += per_cpu(bh_accounting, i).nr;
3086 buffer_heads_over_limit = (tot > max_buffer_heads);
3089 struct buffer_head *alloc_buffer_head(gfp_t gfp_flags)
3091 struct buffer_head *ret = kmem_cache_alloc(bh_cachep, gfp_flags);
3093 get_cpu_var(bh_accounting).nr++;
3095 put_cpu_var(bh_accounting);
3099 EXPORT_SYMBOL(alloc_buffer_head);
3101 void free_buffer_head(struct buffer_head *bh)
3103 BUG_ON(!list_empty(&bh->b_assoc_buffers));
3104 kmem_cache_free(bh_cachep, bh);
3105 get_cpu_var(bh_accounting).nr--;
3107 put_cpu_var(bh_accounting);
3109 EXPORT_SYMBOL(free_buffer_head);
3112 init_buffer_head(void *data, kmem_cache_t *cachep, unsigned long flags)
3114 if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) ==
3115 SLAB_CTOR_CONSTRUCTOR) {
3116 struct buffer_head * bh = (struct buffer_head *)data;
3118 memset(bh, 0, sizeof(*bh));
3119 INIT_LIST_HEAD(&bh->b_assoc_buffers);
3123 #ifdef CONFIG_HOTPLUG_CPU
3124 static void buffer_exit_cpu(int cpu)
3127 struct bh_lru *b = &per_cpu(bh_lrus, cpu);
3129 for (i = 0; i < BH_LRU_SIZE; i++) {
3133 get_cpu_var(bh_accounting).nr += per_cpu(bh_accounting, cpu).nr;
3134 per_cpu(bh_accounting, cpu).nr = 0;
3135 put_cpu_var(bh_accounting);
3138 static int buffer_cpu_notify(struct notifier_block *self,
3139 unsigned long action, void *hcpu)
3141 if (action == CPU_DEAD)
3142 buffer_exit_cpu((unsigned long)hcpu);
3145 #endif /* CONFIG_HOTPLUG_CPU */
3147 void __init buffer_init(void)
3151 bh_cachep = kmem_cache_create("buffer_head",
3152 sizeof(struct buffer_head), 0,
3153 (SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|
3159 * Limit the bh occupancy to 10% of ZONE_NORMAL
3161 nrpages = (nr_free_buffer_pages() * 10) / 100;
3162 max_buffer_heads = nrpages * (PAGE_SIZE / sizeof(struct buffer_head));
3163 hotcpu_notifier(buffer_cpu_notify, 0);
3166 EXPORT_SYMBOL(__bforget);
3167 EXPORT_SYMBOL(__brelse);
3168 EXPORT_SYMBOL(__wait_on_buffer);
3169 EXPORT_SYMBOL(block_commit_write);
3170 EXPORT_SYMBOL(block_prepare_write);
3171 EXPORT_SYMBOL(block_read_full_page);
3172 EXPORT_SYMBOL(block_sync_page);
3173 EXPORT_SYMBOL(block_truncate_page);
3174 EXPORT_SYMBOL(block_write_full_page);
3175 EXPORT_SYMBOL(cont_prepare_write);
3176 EXPORT_SYMBOL(end_buffer_async_write);
3177 EXPORT_SYMBOL(end_buffer_read_sync);
3178 EXPORT_SYMBOL(end_buffer_write_sync);
3179 EXPORT_SYMBOL(file_fsync);
3180 EXPORT_SYMBOL(fsync_bdev);
3181 EXPORT_SYMBOL(generic_block_bmap);
3182 EXPORT_SYMBOL(generic_commit_write);
3183 EXPORT_SYMBOL(generic_cont_expand);
3184 EXPORT_SYMBOL(generic_cont_expand_simple);
3185 EXPORT_SYMBOL(init_buffer);
3186 EXPORT_SYMBOL(invalidate_bdev);
3187 EXPORT_SYMBOL(ll_rw_block);
3188 EXPORT_SYMBOL(mark_buffer_dirty);
3189 EXPORT_SYMBOL(submit_bh);
3190 EXPORT_SYMBOL(sync_dirty_buffer);
3191 EXPORT_SYMBOL(unlock_buffer);