4 * Copyright (C) 1991, 1992, 2002 Linus Torvalds
8 * Start bdflush() with kernel_thread not syscall - Paul Gortmaker, 12/95
10 * Removed a lot of unnecessary code and simplified things now that
11 * the buffer cache isn't our primary cache - Andrew Tridgell 12/96
13 * Speed up hash, lru, and free list operations. Use gfp() for allocating
14 * hash table, use SLAB cache for buffer heads. SMP threading. -DaveM
16 * Added 32k buffer block sizes - these are required older ARM systems. - RMK
18 * async buffer flushing, 1999 Andrea Arcangeli <andrea@suse.de>
21 #include <linux/kernel.h>
22 #include <linux/syscalls.h>
25 #include <linux/percpu.h>
26 #include <linux/slab.h>
27 #include <linux/smp_lock.h>
28 #include <linux/capability.h>
29 #include <linux/blkdev.h>
30 #include <linux/file.h>
31 #include <linux/quotaops.h>
32 #include <linux/highmem.h>
33 #include <linux/module.h>
34 #include <linux/writeback.h>
35 #include <linux/hash.h>
36 #include <linux/suspend.h>
37 #include <linux/buffer_head.h>
38 #include <linux/task_io_accounting_ops.h>
39 #include <linux/bio.h>
40 #include <linux/notifier.h>
41 #include <linux/cpu.h>
42 #include <linux/bitops.h>
43 #include <linux/mpage.h>
44 #include <linux/bit_spinlock.h>
46 static int fsync_buffers_list(spinlock_t *lock, struct list_head *list);
47 static void invalidate_bh_lrus(void);
49 #define BH_ENTRY(list) list_entry((list), struct buffer_head, b_assoc_buffers)
52 init_buffer(struct buffer_head *bh, bh_end_io_t *handler, void *private)
54 bh->b_end_io = handler;
55 bh->b_private = private;
58 static int sync_buffer(void *word)
60 struct block_device *bd;
61 struct buffer_head *bh
62 = container_of(word, struct buffer_head, b_state);
67 blk_run_address_space(bd->bd_inode->i_mapping);
72 void fastcall __lock_buffer(struct buffer_head *bh)
74 wait_on_bit_lock(&bh->b_state, BH_Lock, sync_buffer,
75 TASK_UNINTERRUPTIBLE);
77 EXPORT_SYMBOL(__lock_buffer);
79 void fastcall unlock_buffer(struct buffer_head *bh)
81 smp_mb__before_clear_bit();
82 clear_buffer_locked(bh);
83 smp_mb__after_clear_bit();
84 wake_up_bit(&bh->b_state, BH_Lock);
88 * Block until a buffer comes unlocked. This doesn't stop it
89 * from becoming locked again - you have to lock it yourself
90 * if you want to preserve its state.
92 void __wait_on_buffer(struct buffer_head * bh)
94 wait_on_bit(&bh->b_state, BH_Lock, sync_buffer, TASK_UNINTERRUPTIBLE);
98 __clear_page_buffers(struct page *page)
100 ClearPagePrivate(page);
101 set_page_private(page, 0);
102 page_cache_release(page);
105 static void buffer_io_error(struct buffer_head *bh)
107 char b[BDEVNAME_SIZE];
109 printk(KERN_ERR "Buffer I/O error on device %s, logical block %Lu\n",
110 bdevname(bh->b_bdev, b),
111 (unsigned long long)bh->b_blocknr);
115 * Default synchronous end-of-IO handler.. Just mark it up-to-date and
116 * unlock the buffer. This is what ll_rw_block uses too.
118 void end_buffer_read_sync(struct buffer_head *bh, int uptodate)
121 set_buffer_uptodate(bh);
123 /* This happens, due to failed READA attempts. */
124 clear_buffer_uptodate(bh);
130 void end_buffer_write_sync(struct buffer_head *bh, int uptodate)
132 char b[BDEVNAME_SIZE];
135 set_buffer_uptodate(bh);
137 if (!buffer_eopnotsupp(bh) && printk_ratelimit()) {
139 printk(KERN_WARNING "lost page write due to "
141 bdevname(bh->b_bdev, b));
143 set_buffer_write_io_error(bh);
144 clear_buffer_uptodate(bh);
151 * Write out and wait upon all the dirty data associated with a block
152 * device via its mapping. Does not take the superblock lock.
154 int sync_blockdev(struct block_device *bdev)
159 ret = filemap_write_and_wait(bdev->bd_inode->i_mapping);
162 EXPORT_SYMBOL(sync_blockdev);
164 EXPORT_SYMBOL(fsync_super);
167 * Write out and wait upon all dirty data associated with this
168 * device. Filesystem data as well as the underlying block
169 * device. Takes the superblock lock.
171 int fsync_bdev(struct block_device *bdev)
173 struct super_block *sb = get_super(bdev);
175 int res = fsync_super(sb);
179 return sync_blockdev(bdev);
183 * freeze_bdev -- lock a filesystem and force it into a consistent state
184 * @bdev: blockdevice to lock
186 * This takes the block device bd_mount_sem to make sure no new mounts
187 * happen on bdev until thaw_bdev() is called.
188 * If a superblock is found on this device, we take the s_umount semaphore
189 * on it to make sure nobody unmounts until the snapshot creation is done.
191 struct super_block *freeze_bdev(struct block_device *bdev)
193 struct super_block *sb;
195 down(&bdev->bd_mount_sem);
196 sb = get_super(bdev);
197 if (sb && !(sb->s_flags & MS_RDONLY)) {
198 sb->s_frozen = SB_FREEZE_WRITE;
203 sb->s_frozen = SB_FREEZE_TRANS;
206 sync_blockdev(sb->s_bdev);
208 if (sb->s_op->write_super_lockfs)
209 sb->s_op->write_super_lockfs(sb);
213 return sb; /* thaw_bdev releases s->s_umount and bd_mount_sem */
215 EXPORT_SYMBOL(freeze_bdev);
218 * thaw_bdev -- unlock filesystem
219 * @bdev: blockdevice to unlock
220 * @sb: associated superblock
222 * Unlocks the filesystem and marks it writeable again after freeze_bdev().
224 void thaw_bdev(struct block_device *bdev, struct super_block *sb)
227 BUG_ON(sb->s_bdev != bdev);
229 if (sb->s_op->unlockfs)
230 sb->s_op->unlockfs(sb);
231 sb->s_frozen = SB_UNFROZEN;
233 wake_up(&sb->s_wait_unfrozen);
237 up(&bdev->bd_mount_sem);
239 EXPORT_SYMBOL(thaw_bdev);
242 * Various filesystems appear to want __find_get_block to be non-blocking.
243 * But it's the page lock which protects the buffers. To get around this,
244 * we get exclusion from try_to_free_buffers with the blockdev mapping's
247 * Hack idea: for the blockdev mapping, i_bufferlist_lock contention
248 * may be quite high. This code could TryLock the page, and if that
249 * succeeds, there is no need to take private_lock. (But if
250 * private_lock is contended then so is mapping->tree_lock).
252 static struct buffer_head *
253 __find_get_block_slow(struct block_device *bdev, sector_t block)
255 struct inode *bd_inode = bdev->bd_inode;
256 struct address_space *bd_mapping = bd_inode->i_mapping;
257 struct buffer_head *ret = NULL;
259 struct buffer_head *bh;
260 struct buffer_head *head;
264 index = block >> (PAGE_CACHE_SHIFT - bd_inode->i_blkbits);
265 page = find_get_page(bd_mapping, index);
269 spin_lock(&bd_mapping->private_lock);
270 if (!page_has_buffers(page))
272 head = page_buffers(page);
275 if (bh->b_blocknr == block) {
280 if (!buffer_mapped(bh))
282 bh = bh->b_this_page;
283 } while (bh != head);
285 /* we might be here because some of the buffers on this page are
286 * not mapped. This is due to various races between
287 * file io on the block device and getblk. It gets dealt with
288 * elsewhere, don't buffer_error if we had some unmapped buffers
291 printk("__find_get_block_slow() failed. "
292 "block=%llu, b_blocknr=%llu\n",
293 (unsigned long long)block,
294 (unsigned long long)bh->b_blocknr);
295 printk("b_state=0x%08lx, b_size=%zu\n",
296 bh->b_state, bh->b_size);
297 printk("device blocksize: %d\n", 1 << bd_inode->i_blkbits);
300 spin_unlock(&bd_mapping->private_lock);
301 page_cache_release(page);
306 /* If invalidate_buffers() will trash dirty buffers, it means some kind
307 of fs corruption is going on. Trashing dirty data always imply losing
308 information that was supposed to be just stored on the physical layer
311 Thus invalidate_buffers in general usage is not allwowed to trash
312 dirty buffers. For example ioctl(FLSBLKBUF) expects dirty data to
313 be preserved. These buffers are simply skipped.
315 We also skip buffers which are still in use. For example this can
316 happen if a userspace program is reading the block device.
318 NOTE: In the case where the user removed a removable-media-disk even if
319 there's still dirty data not synced on disk (due a bug in the device driver
320 or due an error of the user), by not destroying the dirty buffers we could
321 generate corruption also on the next media inserted, thus a parameter is
322 necessary to handle this case in the most safe way possible (trying
323 to not corrupt also the new disk inserted with the data belonging to
324 the old now corrupted disk). Also for the ramdisk the natural thing
325 to do in order to release the ramdisk memory is to destroy dirty buffers.
327 These are two special cases. Normal usage imply the device driver
328 to issue a sync on the device (without waiting I/O completion) and
329 then an invalidate_buffers call that doesn't trash dirty buffers.
331 For handling cache coherency with the blkdev pagecache the 'update' case
332 is been introduced. It is needed to re-read from disk any pinned
333 buffer. NOTE: re-reading from disk is destructive so we can do it only
334 when we assume nobody is changing the buffercache under our I/O and when
335 we think the disk contains more recent information than the buffercache.
336 The update == 1 pass marks the buffers we need to update, the update == 2
337 pass does the actual I/O. */
338 void invalidate_bdev(struct block_device *bdev, int destroy_dirty_buffers)
340 struct address_space *mapping = bdev->bd_inode->i_mapping;
342 if (mapping->nrpages == 0)
345 invalidate_bh_lrus();
347 * FIXME: what about destroy_dirty_buffers?
348 * We really want to use invalidate_inode_pages2() for
349 * that, but not until that's cleaned up.
351 invalidate_inode_pages(mapping);
355 * Kick pdflush then try to free up some ZONE_NORMAL memory.
357 static void free_more_memory(void)
362 wakeup_pdflush(1024);
365 for_each_online_pgdat(pgdat) {
366 zones = pgdat->node_zonelists[gfp_zone(GFP_NOFS)].zones;
368 try_to_free_pages(zones, GFP_NOFS);
373 * I/O completion handler for block_read_full_page() - pages
374 * which come unlocked at the end of I/O.
376 static void end_buffer_async_read(struct buffer_head *bh, int uptodate)
379 struct buffer_head *first;
380 struct buffer_head *tmp;
382 int page_uptodate = 1;
384 BUG_ON(!buffer_async_read(bh));
388 set_buffer_uptodate(bh);
390 clear_buffer_uptodate(bh);
391 if (printk_ratelimit())
397 * Be _very_ careful from here on. Bad things can happen if
398 * two buffer heads end IO at almost the same time and both
399 * decide that the page is now completely done.
401 first = page_buffers(page);
402 local_irq_save(flags);
403 bit_spin_lock(BH_Uptodate_Lock, &first->b_state);
404 clear_buffer_async_read(bh);
408 if (!buffer_uptodate(tmp))
410 if (buffer_async_read(tmp)) {
411 BUG_ON(!buffer_locked(tmp));
414 tmp = tmp->b_this_page;
416 bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
417 local_irq_restore(flags);
420 * If none of the buffers had errors and they are all
421 * uptodate then we can set the page uptodate.
423 if (page_uptodate && !PageError(page))
424 SetPageUptodate(page);
429 bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
430 local_irq_restore(flags);
435 * Completion handler for block_write_full_page() - pages which are unlocked
436 * during I/O, and which have PageWriteback cleared upon I/O completion.
438 static void end_buffer_async_write(struct buffer_head *bh, int uptodate)
440 char b[BDEVNAME_SIZE];
442 struct buffer_head *first;
443 struct buffer_head *tmp;
446 BUG_ON(!buffer_async_write(bh));
450 set_buffer_uptodate(bh);
452 if (printk_ratelimit()) {
454 printk(KERN_WARNING "lost page write due to "
456 bdevname(bh->b_bdev, b));
458 set_bit(AS_EIO, &page->mapping->flags);
459 set_buffer_write_io_error(bh);
460 clear_buffer_uptodate(bh);
464 first = page_buffers(page);
465 local_irq_save(flags);
466 bit_spin_lock(BH_Uptodate_Lock, &first->b_state);
468 clear_buffer_async_write(bh);
470 tmp = bh->b_this_page;
472 if (buffer_async_write(tmp)) {
473 BUG_ON(!buffer_locked(tmp));
476 tmp = tmp->b_this_page;
478 bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
479 local_irq_restore(flags);
480 end_page_writeback(page);
484 bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
485 local_irq_restore(flags);
490 * If a page's buffers are under async readin (end_buffer_async_read
491 * completion) then there is a possibility that another thread of
492 * control could lock one of the buffers after it has completed
493 * but while some of the other buffers have not completed. This
494 * locked buffer would confuse end_buffer_async_read() into not unlocking
495 * the page. So the absence of BH_Async_Read tells end_buffer_async_read()
496 * that this buffer is not under async I/O.
498 * The page comes unlocked when it has no locked buffer_async buffers
501 * PageLocked prevents anyone starting new async I/O reads any of
504 * PageWriteback is used to prevent simultaneous writeout of the same
507 * PageLocked prevents anyone from starting writeback of a page which is
508 * under read I/O (PageWriteback is only ever set against a locked page).
510 static void mark_buffer_async_read(struct buffer_head *bh)
512 bh->b_end_io = end_buffer_async_read;
513 set_buffer_async_read(bh);
516 void mark_buffer_async_write(struct buffer_head *bh)
518 bh->b_end_io = end_buffer_async_write;
519 set_buffer_async_write(bh);
521 EXPORT_SYMBOL(mark_buffer_async_write);
525 * fs/buffer.c contains helper functions for buffer-backed address space's
526 * fsync functions. A common requirement for buffer-based filesystems is
527 * that certain data from the backing blockdev needs to be written out for
528 * a successful fsync(). For example, ext2 indirect blocks need to be
529 * written back and waited upon before fsync() returns.
531 * The functions mark_buffer_inode_dirty(), fsync_inode_buffers(),
532 * inode_has_buffers() and invalidate_inode_buffers() are provided for the
533 * management of a list of dependent buffers at ->i_mapping->private_list.
535 * Locking is a little subtle: try_to_free_buffers() will remove buffers
536 * from their controlling inode's queue when they are being freed. But
537 * try_to_free_buffers() will be operating against the *blockdev* mapping
538 * at the time, not against the S_ISREG file which depends on those buffers.
539 * So the locking for private_list is via the private_lock in the address_space
540 * which backs the buffers. Which is different from the address_space
541 * against which the buffers are listed. So for a particular address_space,
542 * mapping->private_lock does *not* protect mapping->private_list! In fact,
543 * mapping->private_list will always be protected by the backing blockdev's
546 * Which introduces a requirement: all buffers on an address_space's
547 * ->private_list must be from the same address_space: the blockdev's.
549 * address_spaces which do not place buffers at ->private_list via these
550 * utility functions are free to use private_lock and private_list for
551 * whatever they want. The only requirement is that list_empty(private_list)
552 * be true at clear_inode() time.
554 * FIXME: clear_inode should not call invalidate_inode_buffers(). The
555 * filesystems should do that. invalidate_inode_buffers() should just go
556 * BUG_ON(!list_empty).
558 * FIXME: mark_buffer_dirty_inode() is a data-plane operation. It should
559 * take an address_space, not an inode. And it should be called
560 * mark_buffer_dirty_fsync() to clearly define why those buffers are being
563 * FIXME: mark_buffer_dirty_inode() doesn't need to add the buffer to the
564 * list if it is already on a list. Because if the buffer is on a list,
565 * it *must* already be on the right one. If not, the filesystem is being
566 * silly. This will save a ton of locking. But first we have to ensure
567 * that buffers are taken *off* the old inode's list when they are freed
568 * (presumably in truncate). That requires careful auditing of all
569 * filesystems (do it inside bforget()). It could also be done by bringing
574 * The buffer's backing address_space's private_lock must be held
576 static inline void __remove_assoc_queue(struct buffer_head *bh)
578 list_del_init(&bh->b_assoc_buffers);
579 WARN_ON(!bh->b_assoc_map);
580 if (buffer_write_io_error(bh))
581 set_bit(AS_EIO, &bh->b_assoc_map->flags);
582 bh->b_assoc_map = NULL;
585 int inode_has_buffers(struct inode *inode)
587 return !list_empty(&inode->i_data.private_list);
591 * osync is designed to support O_SYNC io. It waits synchronously for
592 * all already-submitted IO to complete, but does not queue any new
593 * writes to the disk.
595 * To do O_SYNC writes, just queue the buffer writes with ll_rw_block as
596 * you dirty the buffers, and then use osync_inode_buffers to wait for
597 * completion. Any other dirty buffers which are not yet queued for
598 * write will not be flushed to disk by the osync.
600 static int osync_buffers_list(spinlock_t *lock, struct list_head *list)
602 struct buffer_head *bh;
608 list_for_each_prev(p, list) {
610 if (buffer_locked(bh)) {
614 if (!buffer_uptodate(bh))
626 * sync_mapping_buffers - write out and wait upon a mapping's "associated"
628 * @mapping: the mapping which wants those buffers written
630 * Starts I/O against the buffers at mapping->private_list, and waits upon
633 * Basically, this is a convenience function for fsync().
634 * @mapping is a file or directory which needs those buffers to be written for
635 * a successful fsync().
637 int sync_mapping_buffers(struct address_space *mapping)
639 struct address_space *buffer_mapping = mapping->assoc_mapping;
641 if (buffer_mapping == NULL || list_empty(&mapping->private_list))
644 return fsync_buffers_list(&buffer_mapping->private_lock,
645 &mapping->private_list);
647 EXPORT_SYMBOL(sync_mapping_buffers);
650 * Called when we've recently written block `bblock', and it is known that
651 * `bblock' was for a buffer_boundary() buffer. This means that the block at
652 * `bblock + 1' is probably a dirty indirect block. Hunt it down and, if it's
653 * dirty, schedule it for IO. So that indirects merge nicely with their data.
655 void write_boundary_block(struct block_device *bdev,
656 sector_t bblock, unsigned blocksize)
658 struct buffer_head *bh = __find_get_block(bdev, bblock + 1, blocksize);
660 if (buffer_dirty(bh))
661 ll_rw_block(WRITE, 1, &bh);
666 void mark_buffer_dirty_inode(struct buffer_head *bh, struct inode *inode)
668 struct address_space *mapping = inode->i_mapping;
669 struct address_space *buffer_mapping = bh->b_page->mapping;
671 mark_buffer_dirty(bh);
672 if (!mapping->assoc_mapping) {
673 mapping->assoc_mapping = buffer_mapping;
675 BUG_ON(mapping->assoc_mapping != buffer_mapping);
677 if (list_empty(&bh->b_assoc_buffers)) {
678 spin_lock(&buffer_mapping->private_lock);
679 list_move_tail(&bh->b_assoc_buffers,
680 &mapping->private_list);
681 bh->b_assoc_map = mapping;
682 spin_unlock(&buffer_mapping->private_lock);
685 EXPORT_SYMBOL(mark_buffer_dirty_inode);
688 * Add a page to the dirty page list.
690 * It is a sad fact of life that this function is called from several places
691 * deeply under spinlocking. It may not sleep.
693 * If the page has buffers, the uptodate buffers are set dirty, to preserve
694 * dirty-state coherency between the page and the buffers. It the page does
695 * not have buffers then when they are later attached they will all be set
698 * The buffers are dirtied before the page is dirtied. There's a small race
699 * window in which a writepage caller may see the page cleanness but not the
700 * buffer dirtiness. That's fine. If this code were to set the page dirty
701 * before the buffers, a concurrent writepage caller could clear the page dirty
702 * bit, see a bunch of clean buffers and we'd end up with dirty buffers/clean
703 * page on the dirty page list.
705 * We use private_lock to lock against try_to_free_buffers while using the
706 * page's buffer list. Also use this to protect against clean buffers being
707 * added to the page after it was set dirty.
709 * FIXME: may need to call ->reservepage here as well. That's rather up to the
710 * address_space though.
712 int __set_page_dirty_buffers(struct page *page)
714 struct address_space * const mapping = page_mapping(page);
716 if (unlikely(!mapping))
717 return !TestSetPageDirty(page);
719 spin_lock(&mapping->private_lock);
720 if (page_has_buffers(page)) {
721 struct buffer_head *head = page_buffers(page);
722 struct buffer_head *bh = head;
725 set_buffer_dirty(bh);
726 bh = bh->b_this_page;
727 } while (bh != head);
729 spin_unlock(&mapping->private_lock);
731 if (TestSetPageDirty(page))
734 write_lock_irq(&mapping->tree_lock);
735 if (page->mapping) { /* Race with truncate? */
736 if (mapping_cap_account_dirty(mapping)) {
737 __inc_zone_page_state(page, NR_FILE_DIRTY);
738 task_io_account_write(PAGE_CACHE_SIZE);
740 radix_tree_tag_set(&mapping->page_tree,
741 page_index(page), PAGECACHE_TAG_DIRTY);
743 write_unlock_irq(&mapping->tree_lock);
744 __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
747 EXPORT_SYMBOL(__set_page_dirty_buffers);
750 * Write out and wait upon a list of buffers.
752 * We have conflicting pressures: we want to make sure that all
753 * initially dirty buffers get waited on, but that any subsequently
754 * dirtied buffers don't. After all, we don't want fsync to last
755 * forever if somebody is actively writing to the file.
757 * Do this in two main stages: first we copy dirty buffers to a
758 * temporary inode list, queueing the writes as we go. Then we clean
759 * up, waiting for those writes to complete.
761 * During this second stage, any subsequent updates to the file may end
762 * up refiling the buffer on the original inode's dirty list again, so
763 * there is a chance we will end up with a buffer queued for write but
764 * not yet completed on that list. So, as a final cleanup we go through
765 * the osync code to catch these locked, dirty buffers without requeuing
766 * any newly dirty buffers for write.
768 static int fsync_buffers_list(spinlock_t *lock, struct list_head *list)
770 struct buffer_head *bh;
771 struct list_head tmp;
774 INIT_LIST_HEAD(&tmp);
777 while (!list_empty(list)) {
778 bh = BH_ENTRY(list->next);
779 __remove_assoc_queue(bh);
780 if (buffer_dirty(bh) || buffer_locked(bh)) {
781 list_add(&bh->b_assoc_buffers, &tmp);
782 if (buffer_dirty(bh)) {
786 * Ensure any pending I/O completes so that
787 * ll_rw_block() actually writes the current
788 * contents - it is a noop if I/O is still in
789 * flight on potentially older contents.
791 ll_rw_block(SWRITE, 1, &bh);
798 while (!list_empty(&tmp)) {
799 bh = BH_ENTRY(tmp.prev);
800 list_del_init(&bh->b_assoc_buffers);
804 if (!buffer_uptodate(bh))
811 err2 = osync_buffers_list(lock, list);
819 * Invalidate any and all dirty buffers on a given inode. We are
820 * probably unmounting the fs, but that doesn't mean we have already
821 * done a sync(). Just drop the buffers from the inode list.
823 * NOTE: we take the inode's blockdev's mapping's private_lock. Which
824 * assumes that all the buffers are against the blockdev. Not true
827 void invalidate_inode_buffers(struct inode *inode)
829 if (inode_has_buffers(inode)) {
830 struct address_space *mapping = &inode->i_data;
831 struct list_head *list = &mapping->private_list;
832 struct address_space *buffer_mapping = mapping->assoc_mapping;
834 spin_lock(&buffer_mapping->private_lock);
835 while (!list_empty(list))
836 __remove_assoc_queue(BH_ENTRY(list->next));
837 spin_unlock(&buffer_mapping->private_lock);
842 * Remove any clean buffers from the inode's buffer list. This is called
843 * when we're trying to free the inode itself. Those buffers can pin it.
845 * Returns true if all buffers were removed.
847 int remove_inode_buffers(struct inode *inode)
851 if (inode_has_buffers(inode)) {
852 struct address_space *mapping = &inode->i_data;
853 struct list_head *list = &mapping->private_list;
854 struct address_space *buffer_mapping = mapping->assoc_mapping;
856 spin_lock(&buffer_mapping->private_lock);
857 while (!list_empty(list)) {
858 struct buffer_head *bh = BH_ENTRY(list->next);
859 if (buffer_dirty(bh)) {
863 __remove_assoc_queue(bh);
865 spin_unlock(&buffer_mapping->private_lock);
871 * Create the appropriate buffers when given a page for data area and
872 * the size of each buffer.. Use the bh->b_this_page linked list to
873 * follow the buffers created. Return NULL if unable to create more
876 * The retry flag is used to differentiate async IO (paging, swapping)
877 * which may not fail from ordinary buffer allocations.
879 struct buffer_head *alloc_page_buffers(struct page *page, unsigned long size,
882 struct buffer_head *bh, *head;
888 while ((offset -= size) >= 0) {
889 bh = alloc_buffer_head(GFP_NOFS);
894 bh->b_this_page = head;
899 atomic_set(&bh->b_count, 0);
900 bh->b_private = NULL;
903 /* Link the buffer to its page */
904 set_bh_page(bh, page, offset);
906 init_buffer(bh, NULL, NULL);
910 * In case anything failed, we just free everything we got.
916 head = head->b_this_page;
917 free_buffer_head(bh);
922 * Return failure for non-async IO requests. Async IO requests
923 * are not allowed to fail, so we have to wait until buffer heads
924 * become available. But we don't want tasks sleeping with
925 * partially complete buffers, so all were released above.
930 /* We're _really_ low on memory. Now we just
931 * wait for old buffer heads to become free due to
932 * finishing IO. Since this is an async request and
933 * the reserve list is empty, we're sure there are
934 * async buffer heads in use.
939 EXPORT_SYMBOL_GPL(alloc_page_buffers);
942 link_dev_buffers(struct page *page, struct buffer_head *head)
944 struct buffer_head *bh, *tail;
949 bh = bh->b_this_page;
951 tail->b_this_page = head;
952 attach_page_buffers(page, head);
956 * Initialise the state of a blockdev page's buffers.
959 init_page_buffers(struct page *page, struct block_device *bdev,
960 sector_t block, int size)
962 struct buffer_head *head = page_buffers(page);
963 struct buffer_head *bh = head;
964 int uptodate = PageUptodate(page);
967 if (!buffer_mapped(bh)) {
968 init_buffer(bh, NULL, NULL);
970 bh->b_blocknr = block;
972 set_buffer_uptodate(bh);
973 set_buffer_mapped(bh);
976 bh = bh->b_this_page;
977 } while (bh != head);
981 * Create the page-cache page that contains the requested block.
983 * This is user purely for blockdev mappings.
986 grow_dev_page(struct block_device *bdev, sector_t block,
987 pgoff_t index, int size)
989 struct inode *inode = bdev->bd_inode;
991 struct buffer_head *bh;
993 page = find_or_create_page(inode->i_mapping, index, GFP_NOFS);
997 BUG_ON(!PageLocked(page));
999 if (page_has_buffers(page)) {
1000 bh = page_buffers(page);
1001 if (bh->b_size == size) {
1002 init_page_buffers(page, bdev, block, size);
1005 if (!try_to_free_buffers(page))
1010 * Allocate some buffers for this page
1012 bh = alloc_page_buffers(page, size, 0);
1017 * Link the page to the buffers and initialise them. Take the
1018 * lock to be atomic wrt __find_get_block(), which does not
1019 * run under the page lock.
1021 spin_lock(&inode->i_mapping->private_lock);
1022 link_dev_buffers(page, bh);
1023 init_page_buffers(page, bdev, block, size);
1024 spin_unlock(&inode->i_mapping->private_lock);
1030 page_cache_release(page);
1035 * Create buffers for the specified block device block's page. If
1036 * that page was dirty, the buffers are set dirty also.
1038 * Except that's a bug. Attaching dirty buffers to a dirty
1039 * blockdev's page can result in filesystem corruption, because
1040 * some of those buffers may be aliases of filesystem data.
1041 * grow_dev_page() will go BUG() if this happens.
1044 grow_buffers(struct block_device *bdev, sector_t block, int size)
1053 } while ((size << sizebits) < PAGE_SIZE);
1055 index = block >> sizebits;
1058 * Check for a block which wants to lie outside our maximum possible
1059 * pagecache index. (this comparison is done using sector_t types).
1061 if (unlikely(index != block >> sizebits)) {
1062 char b[BDEVNAME_SIZE];
1064 printk(KERN_ERR "%s: requested out-of-range block %llu for "
1066 __FUNCTION__, (unsigned long long)block,
1070 block = index << sizebits;
1071 /* Create a page with the proper size buffers.. */
1072 page = grow_dev_page(bdev, block, index, size);
1076 page_cache_release(page);
1080 static struct buffer_head *
1081 __getblk_slow(struct block_device *bdev, sector_t block, int size)
1083 /* Size must be multiple of hard sectorsize */
1084 if (unlikely(size & (bdev_hardsect_size(bdev)-1) ||
1085 (size < 512 || size > PAGE_SIZE))) {
1086 printk(KERN_ERR "getblk(): invalid block size %d requested\n",
1088 printk(KERN_ERR "hardsect size: %d\n",
1089 bdev_hardsect_size(bdev));
1096 struct buffer_head * bh;
1099 bh = __find_get_block(bdev, block, size);
1103 ret = grow_buffers(bdev, block, size);
1112 * The relationship between dirty buffers and dirty pages:
1114 * Whenever a page has any dirty buffers, the page's dirty bit is set, and
1115 * the page is tagged dirty in its radix tree.
1117 * At all times, the dirtiness of the buffers represents the dirtiness of
1118 * subsections of the page. If the page has buffers, the page dirty bit is
1119 * merely a hint about the true dirty state.
1121 * When a page is set dirty in its entirety, all its buffers are marked dirty
1122 * (if the page has buffers).
1124 * When a buffer is marked dirty, its page is dirtied, but the page's other
1127 * Also. When blockdev buffers are explicitly read with bread(), they
1128 * individually become uptodate. But their backing page remains not
1129 * uptodate - even if all of its buffers are uptodate. A subsequent
1130 * block_read_full_page() against that page will discover all the uptodate
1131 * buffers, will set the page uptodate and will perform no I/O.
1135 * mark_buffer_dirty - mark a buffer_head as needing writeout
1136 * @bh: the buffer_head to mark dirty
1138 * mark_buffer_dirty() will set the dirty bit against the buffer, then set its
1139 * backing page dirty, then tag the page as dirty in its address_space's radix
1140 * tree and then attach the address_space's inode to its superblock's dirty
1143 * mark_buffer_dirty() is atomic. It takes bh->b_page->mapping->private_lock,
1144 * mapping->tree_lock and the global inode_lock.
1146 void fastcall mark_buffer_dirty(struct buffer_head *bh)
1148 if (!buffer_dirty(bh) && !test_set_buffer_dirty(bh))
1149 __set_page_dirty_nobuffers(bh->b_page);
1153 * Decrement a buffer_head's reference count. If all buffers against a page
1154 * have zero reference count, are clean and unlocked, and if the page is clean
1155 * and unlocked then try_to_free_buffers() may strip the buffers from the page
1156 * in preparation for freeing it (sometimes, rarely, buffers are removed from
1157 * a page but it ends up not being freed, and buffers may later be reattached).
1159 void __brelse(struct buffer_head * buf)
1161 if (atomic_read(&buf->b_count)) {
1165 printk(KERN_ERR "VFS: brelse: Trying to free free buffer\n");
1170 * bforget() is like brelse(), except it discards any
1171 * potentially dirty data.
1173 void __bforget(struct buffer_head *bh)
1175 clear_buffer_dirty(bh);
1176 if (!list_empty(&bh->b_assoc_buffers)) {
1177 struct address_space *buffer_mapping = bh->b_page->mapping;
1179 spin_lock(&buffer_mapping->private_lock);
1180 list_del_init(&bh->b_assoc_buffers);
1181 bh->b_assoc_map = NULL;
1182 spin_unlock(&buffer_mapping->private_lock);
1187 static struct buffer_head *__bread_slow(struct buffer_head *bh)
1190 if (buffer_uptodate(bh)) {
1195 bh->b_end_io = end_buffer_read_sync;
1196 submit_bh(READ, bh);
1198 if (buffer_uptodate(bh))
1206 * Per-cpu buffer LRU implementation. To reduce the cost of __find_get_block().
1207 * The bhs[] array is sorted - newest buffer is at bhs[0]. Buffers have their
1208 * refcount elevated by one when they're in an LRU. A buffer can only appear
1209 * once in a particular CPU's LRU. A single buffer can be present in multiple
1210 * CPU's LRUs at the same time.
1212 * This is a transparent caching front-end to sb_bread(), sb_getblk() and
1213 * sb_find_get_block().
1215 * The LRUs themselves only need locking against invalidate_bh_lrus. We use
1216 * a local interrupt disable for that.
1219 #define BH_LRU_SIZE 8
1222 struct buffer_head *bhs[BH_LRU_SIZE];
1225 static DEFINE_PER_CPU(struct bh_lru, bh_lrus) = {{ NULL }};
1228 #define bh_lru_lock() local_irq_disable()
1229 #define bh_lru_unlock() local_irq_enable()
1231 #define bh_lru_lock() preempt_disable()
1232 #define bh_lru_unlock() preempt_enable()
1235 static inline void check_irqs_on(void)
1237 #ifdef irqs_disabled
1238 BUG_ON(irqs_disabled());
1243 * The LRU management algorithm is dopey-but-simple. Sorry.
1245 static void bh_lru_install(struct buffer_head *bh)
1247 struct buffer_head *evictee = NULL;
1252 lru = &__get_cpu_var(bh_lrus);
1253 if (lru->bhs[0] != bh) {
1254 struct buffer_head *bhs[BH_LRU_SIZE];
1260 for (in = 0; in < BH_LRU_SIZE; in++) {
1261 struct buffer_head *bh2 = lru->bhs[in];
1266 if (out >= BH_LRU_SIZE) {
1267 BUG_ON(evictee != NULL);
1274 while (out < BH_LRU_SIZE)
1276 memcpy(lru->bhs, bhs, sizeof(bhs));
1285 * Look up the bh in this cpu's LRU. If it's there, move it to the head.
1287 static struct buffer_head *
1288 lookup_bh_lru(struct block_device *bdev, sector_t block, int size)
1290 struct buffer_head *ret = NULL;
1296 lru = &__get_cpu_var(bh_lrus);
1297 for (i = 0; i < BH_LRU_SIZE; i++) {
1298 struct buffer_head *bh = lru->bhs[i];
1300 if (bh && bh->b_bdev == bdev &&
1301 bh->b_blocknr == block && bh->b_size == size) {
1304 lru->bhs[i] = lru->bhs[i - 1];
1319 * Perform a pagecache lookup for the matching buffer. If it's there, refresh
1320 * it in the LRU and mark it as accessed. If it is not present then return
1323 struct buffer_head *
1324 __find_get_block(struct block_device *bdev, sector_t block, int size)
1326 struct buffer_head *bh = lookup_bh_lru(bdev, block, size);
1329 bh = __find_get_block_slow(bdev, block);
1337 EXPORT_SYMBOL(__find_get_block);
1340 * __getblk will locate (and, if necessary, create) the buffer_head
1341 * which corresponds to the passed block_device, block and size. The
1342 * returned buffer has its reference count incremented.
1344 * __getblk() cannot fail - it just keeps trying. If you pass it an
1345 * illegal block number, __getblk() will happily return a buffer_head
1346 * which represents the non-existent block. Very weird.
1348 * __getblk() will lock up the machine if grow_dev_page's try_to_free_buffers()
1349 * attempt is failing. FIXME, perhaps?
1351 struct buffer_head *
1352 __getblk(struct block_device *bdev, sector_t block, int size)
1354 struct buffer_head *bh = __find_get_block(bdev, block, size);
1358 bh = __getblk_slow(bdev, block, size);
1361 EXPORT_SYMBOL(__getblk);
1364 * Do async read-ahead on a buffer..
1366 void __breadahead(struct block_device *bdev, sector_t block, int size)
1368 struct buffer_head *bh = __getblk(bdev, block, size);
1370 ll_rw_block(READA, 1, &bh);
1374 EXPORT_SYMBOL(__breadahead);
1377 * __bread() - reads a specified block and returns the bh
1378 * @bdev: the block_device to read from
1379 * @block: number of block
1380 * @size: size (in bytes) to read
1382 * Reads a specified block, and returns buffer head that contains it.
1383 * It returns NULL if the block was unreadable.
1385 struct buffer_head *
1386 __bread(struct block_device *bdev, sector_t block, int size)
1388 struct buffer_head *bh = __getblk(bdev, block, size);
1390 if (likely(bh) && !buffer_uptodate(bh))
1391 bh = __bread_slow(bh);
1394 EXPORT_SYMBOL(__bread);
1397 * invalidate_bh_lrus() is called rarely - but not only at unmount.
1398 * This doesn't race because it runs in each cpu either in irq
1399 * or with preempt disabled.
1401 static void invalidate_bh_lru(void *arg)
1403 struct bh_lru *b = &get_cpu_var(bh_lrus);
1406 for (i = 0; i < BH_LRU_SIZE; i++) {
1410 put_cpu_var(bh_lrus);
1413 static void invalidate_bh_lrus(void)
1415 on_each_cpu(invalidate_bh_lru, NULL, 1, 1);
1418 void set_bh_page(struct buffer_head *bh,
1419 struct page *page, unsigned long offset)
1422 BUG_ON(offset >= PAGE_SIZE);
1423 if (PageHighMem(page))
1425 * This catches illegal uses and preserves the offset:
1427 bh->b_data = (char *)(0 + offset);
1429 bh->b_data = page_address(page) + offset;
1431 EXPORT_SYMBOL(set_bh_page);
1434 * Called when truncating a buffer on a page completely.
1436 static void discard_buffer(struct buffer_head * bh)
1439 clear_buffer_dirty(bh);
1441 clear_buffer_mapped(bh);
1442 clear_buffer_req(bh);
1443 clear_buffer_new(bh);
1444 clear_buffer_delay(bh);
1449 * block_invalidatepage - invalidate part of all of a buffer-backed page
1451 * @page: the page which is affected
1452 * @offset: the index of the truncation point
1454 * block_invalidatepage() is called when all or part of the page has become
1455 * invalidatedby a truncate operation.
1457 * block_invalidatepage() does not have to release all buffers, but it must
1458 * ensure that no dirty buffer is left outside @offset and that no I/O
1459 * is underway against any of the blocks which are outside the truncation
1460 * point. Because the caller is about to free (and possibly reuse) those
1463 void block_invalidatepage(struct page *page, unsigned long offset)
1465 struct buffer_head *head, *bh, *next;
1466 unsigned int curr_off = 0;
1468 BUG_ON(!PageLocked(page));
1469 if (!page_has_buffers(page))
1472 head = page_buffers(page);
1475 unsigned int next_off = curr_off + bh->b_size;
1476 next = bh->b_this_page;
1479 * is this block fully invalidated?
1481 if (offset <= curr_off)
1483 curr_off = next_off;
1485 } while (bh != head);
1488 * We release buffers only if the entire page is being invalidated.
1489 * The get_block cached value has been unconditionally invalidated,
1490 * so real IO is not possible anymore.
1493 try_to_release_page(page, 0);
1497 EXPORT_SYMBOL(block_invalidatepage);
1500 * We attach and possibly dirty the buffers atomically wrt
1501 * __set_page_dirty_buffers() via private_lock. try_to_free_buffers
1502 * is already excluded via the page lock.
1504 void create_empty_buffers(struct page *page,
1505 unsigned long blocksize, unsigned long b_state)
1507 struct buffer_head *bh, *head, *tail;
1509 head = alloc_page_buffers(page, blocksize, 1);
1512 bh->b_state |= b_state;
1514 bh = bh->b_this_page;
1516 tail->b_this_page = head;
1518 spin_lock(&page->mapping->private_lock);
1519 if (PageUptodate(page) || PageDirty(page)) {
1522 if (PageDirty(page))
1523 set_buffer_dirty(bh);
1524 if (PageUptodate(page))
1525 set_buffer_uptodate(bh);
1526 bh = bh->b_this_page;
1527 } while (bh != head);
1529 attach_page_buffers(page, head);
1530 spin_unlock(&page->mapping->private_lock);
1532 EXPORT_SYMBOL(create_empty_buffers);
1535 * We are taking a block for data and we don't want any output from any
1536 * buffer-cache aliases starting from return from that function and
1537 * until the moment when something will explicitly mark the buffer
1538 * dirty (hopefully that will not happen until we will free that block ;-)
1539 * We don't even need to mark it not-uptodate - nobody can expect
1540 * anything from a newly allocated buffer anyway. We used to used
1541 * unmap_buffer() for such invalidation, but that was wrong. We definitely
1542 * don't want to mark the alias unmapped, for example - it would confuse
1543 * anyone who might pick it with bread() afterwards...
1545 * Also.. Note that bforget() doesn't lock the buffer. So there can
1546 * be writeout I/O going on against recently-freed buffers. We don't
1547 * wait on that I/O in bforget() - it's more efficient to wait on the I/O
1548 * only if we really need to. That happens here.
1550 void unmap_underlying_metadata(struct block_device *bdev, sector_t block)
1552 struct buffer_head *old_bh;
1556 old_bh = __find_get_block_slow(bdev, block);
1558 clear_buffer_dirty(old_bh);
1559 wait_on_buffer(old_bh);
1560 clear_buffer_req(old_bh);
1564 EXPORT_SYMBOL(unmap_underlying_metadata);
1567 * NOTE! All mapped/uptodate combinations are valid:
1569 * Mapped Uptodate Meaning
1571 * No No "unknown" - must do get_block()
1572 * No Yes "hole" - zero-filled
1573 * Yes No "allocated" - allocated on disk, not read in
1574 * Yes Yes "valid" - allocated and up-to-date in memory.
1576 * "Dirty" is valid only with the last case (mapped+uptodate).
1580 * While block_write_full_page is writing back the dirty buffers under
1581 * the page lock, whoever dirtied the buffers may decide to clean them
1582 * again at any time. We handle that by only looking at the buffer
1583 * state inside lock_buffer().
1585 * If block_write_full_page() is called for regular writeback
1586 * (wbc->sync_mode == WB_SYNC_NONE) then it will redirty a page which has a
1587 * locked buffer. This only can happen if someone has written the buffer
1588 * directly, with submit_bh(). At the address_space level PageWriteback
1589 * prevents this contention from occurring.
1591 static int __block_write_full_page(struct inode *inode, struct page *page,
1592 get_block_t *get_block, struct writeback_control *wbc)
1596 sector_t last_block;
1597 struct buffer_head *bh, *head;
1598 const unsigned blocksize = 1 << inode->i_blkbits;
1599 int nr_underway = 0;
1601 BUG_ON(!PageLocked(page));
1603 last_block = (i_size_read(inode) - 1) >> inode->i_blkbits;
1605 if (!page_has_buffers(page)) {
1606 create_empty_buffers(page, blocksize,
1607 (1 << BH_Dirty)|(1 << BH_Uptodate));
1611 * Be very careful. We have no exclusion from __set_page_dirty_buffers
1612 * here, and the (potentially unmapped) buffers may become dirty at
1613 * any time. If a buffer becomes dirty here after we've inspected it
1614 * then we just miss that fact, and the page stays dirty.
1616 * Buffers outside i_size may be dirtied by __set_page_dirty_buffers;
1617 * handle that here by just cleaning them.
1620 block = (sector_t)page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
1621 head = page_buffers(page);
1625 * Get all the dirty buffers mapped to disk addresses and
1626 * handle any aliases from the underlying blockdev's mapping.
1629 if (block > last_block) {
1631 * mapped buffers outside i_size will occur, because
1632 * this page can be outside i_size when there is a
1633 * truncate in progress.
1636 * The buffer was zeroed by block_write_full_page()
1638 clear_buffer_dirty(bh);
1639 set_buffer_uptodate(bh);
1640 } else if (!buffer_mapped(bh) && buffer_dirty(bh)) {
1641 WARN_ON(bh->b_size != blocksize);
1642 err = get_block(inode, block, bh, 1);
1645 if (buffer_new(bh)) {
1646 /* blockdev mappings never come here */
1647 clear_buffer_new(bh);
1648 unmap_underlying_metadata(bh->b_bdev,
1652 bh = bh->b_this_page;
1654 } while (bh != head);
1657 if (!buffer_mapped(bh))
1660 * If it's a fully non-blocking write attempt and we cannot
1661 * lock the buffer then redirty the page. Note that this can
1662 * potentially cause a busy-wait loop from pdflush and kswapd
1663 * activity, but those code paths have their own higher-level
1666 if (wbc->sync_mode != WB_SYNC_NONE || !wbc->nonblocking) {
1668 } else if (test_set_buffer_locked(bh)) {
1669 redirty_page_for_writepage(wbc, page);
1672 if (test_clear_buffer_dirty(bh)) {
1673 mark_buffer_async_write(bh);
1677 } while ((bh = bh->b_this_page) != head);
1680 * The page and its buffers are protected by PageWriteback(), so we can
1681 * drop the bh refcounts early.
1683 BUG_ON(PageWriteback(page));
1684 set_page_writeback(page);
1687 struct buffer_head *next = bh->b_this_page;
1688 if (buffer_async_write(bh)) {
1689 submit_bh(WRITE, bh);
1693 } while (bh != head);
1698 if (nr_underway == 0) {
1700 * The page was marked dirty, but the buffers were
1701 * clean. Someone wrote them back by hand with
1702 * ll_rw_block/submit_bh. A rare case.
1706 if (!buffer_uptodate(bh)) {
1710 bh = bh->b_this_page;
1711 } while (bh != head);
1713 SetPageUptodate(page);
1714 end_page_writeback(page);
1716 * The page and buffer_heads can be released at any time from
1719 wbc->pages_skipped++; /* We didn't write this page */
1725 * ENOSPC, or some other error. We may already have added some
1726 * blocks to the file, so we need to write these out to avoid
1727 * exposing stale data.
1728 * The page is currently locked and not marked for writeback
1731 /* Recovery: lock and submit the mapped buffers */
1733 if (buffer_mapped(bh) && buffer_dirty(bh)) {
1735 mark_buffer_async_write(bh);
1738 * The buffer may have been set dirty during
1739 * attachment to a dirty page.
1741 clear_buffer_dirty(bh);
1743 } while ((bh = bh->b_this_page) != head);
1745 BUG_ON(PageWriteback(page));
1746 set_page_writeback(page);
1749 struct buffer_head *next = bh->b_this_page;
1750 if (buffer_async_write(bh)) {
1751 clear_buffer_dirty(bh);
1752 submit_bh(WRITE, bh);
1756 } while (bh != head);
1760 static int __block_prepare_write(struct inode *inode, struct page *page,
1761 unsigned from, unsigned to, get_block_t *get_block)
1763 unsigned block_start, block_end;
1766 unsigned blocksize, bbits;
1767 struct buffer_head *bh, *head, *wait[2], **wait_bh=wait;
1769 BUG_ON(!PageLocked(page));
1770 BUG_ON(from > PAGE_CACHE_SIZE);
1771 BUG_ON(to > PAGE_CACHE_SIZE);
1774 blocksize = 1 << inode->i_blkbits;
1775 if (!page_has_buffers(page))
1776 create_empty_buffers(page, blocksize, 0);
1777 head = page_buffers(page);
1779 bbits = inode->i_blkbits;
1780 block = (sector_t)page->index << (PAGE_CACHE_SHIFT - bbits);
1782 for(bh = head, block_start = 0; bh != head || !block_start;
1783 block++, block_start=block_end, bh = bh->b_this_page) {
1784 block_end = block_start + blocksize;
1785 if (block_end <= from || block_start >= to) {
1786 if (PageUptodate(page)) {
1787 if (!buffer_uptodate(bh))
1788 set_buffer_uptodate(bh);
1793 clear_buffer_new(bh);
1794 if (!buffer_mapped(bh)) {
1795 WARN_ON(bh->b_size != blocksize);
1796 err = get_block(inode, block, bh, 1);
1799 if (buffer_new(bh)) {
1800 unmap_underlying_metadata(bh->b_bdev,
1802 if (PageUptodate(page)) {
1803 set_buffer_uptodate(bh);
1806 if (block_end > to || block_start < from) {
1809 kaddr = kmap_atomic(page, KM_USER0);
1813 if (block_start < from)
1814 memset(kaddr+block_start,
1815 0, from-block_start);
1816 flush_dcache_page(page);
1817 kunmap_atomic(kaddr, KM_USER0);
1822 if (PageUptodate(page)) {
1823 if (!buffer_uptodate(bh))
1824 set_buffer_uptodate(bh);
1827 if (!buffer_uptodate(bh) && !buffer_delay(bh) &&
1828 (block_start < from || block_end > to)) {
1829 ll_rw_block(READ, 1, &bh);
1834 * If we issued read requests - let them complete.
1836 while(wait_bh > wait) {
1837 wait_on_buffer(*--wait_bh);
1838 if (!buffer_uptodate(*wait_bh))
1845 clear_buffer_new(bh);
1846 } while ((bh = bh->b_this_page) != head);
1851 * Zero out any newly allocated blocks to avoid exposing stale
1852 * data. If BH_New is set, we know that the block was newly
1853 * allocated in the above loop.
1858 block_end = block_start+blocksize;
1859 if (block_end <= from)
1861 if (block_start >= to)
1863 if (buffer_new(bh)) {
1866 clear_buffer_new(bh);
1867 kaddr = kmap_atomic(page, KM_USER0);
1868 memset(kaddr+block_start, 0, bh->b_size);
1869 flush_dcache_page(page);
1870 kunmap_atomic(kaddr, KM_USER0);
1871 set_buffer_uptodate(bh);
1872 mark_buffer_dirty(bh);
1875 block_start = block_end;
1876 bh = bh->b_this_page;
1877 } while (bh != head);
1881 static int __block_commit_write(struct inode *inode, struct page *page,
1882 unsigned from, unsigned to)
1884 unsigned block_start, block_end;
1887 struct buffer_head *bh, *head;
1889 blocksize = 1 << inode->i_blkbits;
1891 for(bh = head = page_buffers(page), block_start = 0;
1892 bh != head || !block_start;
1893 block_start=block_end, bh = bh->b_this_page) {
1894 block_end = block_start + blocksize;
1895 if (block_end <= from || block_start >= to) {
1896 if (!buffer_uptodate(bh))
1899 set_buffer_uptodate(bh);
1900 mark_buffer_dirty(bh);
1905 * If this is a partial write which happened to make all buffers
1906 * uptodate then we can optimize away a bogus readpage() for
1907 * the next read(). Here we 'discover' whether the page went
1908 * uptodate as a result of this (potentially partial) write.
1911 SetPageUptodate(page);
1916 * Generic "read page" function for block devices that have the normal
1917 * get_block functionality. This is most of the block device filesystems.
1918 * Reads the page asynchronously --- the unlock_buffer() and
1919 * set/clear_buffer_uptodate() functions propagate buffer state into the
1920 * page struct once IO has completed.
1922 int block_read_full_page(struct page *page, get_block_t *get_block)
1924 struct inode *inode = page->mapping->host;
1925 sector_t iblock, lblock;
1926 struct buffer_head *bh, *head, *arr[MAX_BUF_PER_PAGE];
1927 unsigned int blocksize;
1929 int fully_mapped = 1;
1931 BUG_ON(!PageLocked(page));
1932 blocksize = 1 << inode->i_blkbits;
1933 if (!page_has_buffers(page))
1934 create_empty_buffers(page, blocksize, 0);
1935 head = page_buffers(page);
1937 iblock = (sector_t)page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
1938 lblock = (i_size_read(inode)+blocksize-1) >> inode->i_blkbits;
1944 if (buffer_uptodate(bh))
1947 if (!buffer_mapped(bh)) {
1951 if (iblock < lblock) {
1952 WARN_ON(bh->b_size != blocksize);
1953 err = get_block(inode, iblock, bh, 0);
1957 if (!buffer_mapped(bh)) {
1958 void *kaddr = kmap_atomic(page, KM_USER0);
1959 memset(kaddr + i * blocksize, 0, blocksize);
1960 flush_dcache_page(page);
1961 kunmap_atomic(kaddr, KM_USER0);
1963 set_buffer_uptodate(bh);
1967 * get_block() might have updated the buffer
1970 if (buffer_uptodate(bh))
1974 } while (i++, iblock++, (bh = bh->b_this_page) != head);
1977 SetPageMappedToDisk(page);
1981 * All buffers are uptodate - we can set the page uptodate
1982 * as well. But not if get_block() returned an error.
1984 if (!PageError(page))
1985 SetPageUptodate(page);
1990 /* Stage two: lock the buffers */
1991 for (i = 0; i < nr; i++) {
1994 mark_buffer_async_read(bh);
1998 * Stage 3: start the IO. Check for uptodateness
1999 * inside the buffer lock in case another process reading
2000 * the underlying blockdev brought it uptodate (the sct fix).
2002 for (i = 0; i < nr; i++) {
2004 if (buffer_uptodate(bh))
2005 end_buffer_async_read(bh, 1);
2007 submit_bh(READ, bh);
2012 /* utility function for filesystems that need to do work on expanding
2013 * truncates. Uses prepare/commit_write to allow the filesystem to
2014 * deal with the hole.
2016 static int __generic_cont_expand(struct inode *inode, loff_t size,
2017 pgoff_t index, unsigned int offset)
2019 struct address_space *mapping = inode->i_mapping;
2021 unsigned long limit;
2025 limit = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
2026 if (limit != RLIM_INFINITY && size > (loff_t)limit) {
2027 send_sig(SIGXFSZ, current, 0);
2030 if (size > inode->i_sb->s_maxbytes)
2034 page = grab_cache_page(mapping, index);
2037 err = mapping->a_ops->prepare_write(NULL, page, offset, offset);
2040 * ->prepare_write() may have instantiated a few blocks
2041 * outside i_size. Trim these off again.
2044 page_cache_release(page);
2045 vmtruncate(inode, inode->i_size);
2049 err = mapping->a_ops->commit_write(NULL, page, offset, offset);
2052 page_cache_release(page);
2059 int generic_cont_expand(struct inode *inode, loff_t size)
2062 unsigned int offset;
2064 offset = (size & (PAGE_CACHE_SIZE - 1)); /* Within page */
2066 /* ugh. in prepare/commit_write, if from==to==start of block, we
2067 ** skip the prepare. make sure we never send an offset for the start
2070 if ((offset & (inode->i_sb->s_blocksize - 1)) == 0) {
2071 /* caller must handle this extra byte. */
2074 index = size >> PAGE_CACHE_SHIFT;
2076 return __generic_cont_expand(inode, size, index, offset);
2079 int generic_cont_expand_simple(struct inode *inode, loff_t size)
2081 loff_t pos = size - 1;
2082 pgoff_t index = pos >> PAGE_CACHE_SHIFT;
2083 unsigned int offset = (pos & (PAGE_CACHE_SIZE - 1)) + 1;
2085 /* prepare/commit_write can handle even if from==to==start of block. */
2086 return __generic_cont_expand(inode, size, index, offset);
2090 * For moronic filesystems that do not allow holes in file.
2091 * We may have to extend the file.
2094 int cont_prepare_write(struct page *page, unsigned offset,
2095 unsigned to, get_block_t *get_block, loff_t *bytes)
2097 struct address_space *mapping = page->mapping;
2098 struct inode *inode = mapping->host;
2099 struct page *new_page;
2103 unsigned blocksize = 1 << inode->i_blkbits;
2106 while(page->index > (pgpos = *bytes>>PAGE_CACHE_SHIFT)) {
2108 new_page = grab_cache_page(mapping, pgpos);
2111 /* we might sleep */
2112 if (*bytes>>PAGE_CACHE_SHIFT != pgpos) {
2113 unlock_page(new_page);
2114 page_cache_release(new_page);
2117 zerofrom = *bytes & ~PAGE_CACHE_MASK;
2118 if (zerofrom & (blocksize-1)) {
2119 *bytes |= (blocksize-1);
2122 status = __block_prepare_write(inode, new_page, zerofrom,
2123 PAGE_CACHE_SIZE, get_block);
2126 kaddr = kmap_atomic(new_page, KM_USER0);
2127 memset(kaddr+zerofrom, 0, PAGE_CACHE_SIZE-zerofrom);
2128 flush_dcache_page(new_page);
2129 kunmap_atomic(kaddr, KM_USER0);
2130 generic_commit_write(NULL, new_page, zerofrom, PAGE_CACHE_SIZE);
2131 unlock_page(new_page);
2132 page_cache_release(new_page);
2135 if (page->index < pgpos) {
2136 /* completely inside the area */
2139 /* page covers the boundary, find the boundary offset */
2140 zerofrom = *bytes & ~PAGE_CACHE_MASK;
2142 /* if we will expand the thing last block will be filled */
2143 if (to > zerofrom && (zerofrom & (blocksize-1))) {
2144 *bytes |= (blocksize-1);
2148 /* starting below the boundary? Nothing to zero out */
2149 if (offset <= zerofrom)
2152 status = __block_prepare_write(inode, page, zerofrom, to, get_block);
2155 if (zerofrom < offset) {
2156 kaddr = kmap_atomic(page, KM_USER0);
2157 memset(kaddr+zerofrom, 0, offset-zerofrom);
2158 flush_dcache_page(page);
2159 kunmap_atomic(kaddr, KM_USER0);
2160 __block_commit_write(inode, page, zerofrom, offset);
2164 ClearPageUptodate(page);
2168 ClearPageUptodate(new_page);
2169 unlock_page(new_page);
2170 page_cache_release(new_page);
2175 int block_prepare_write(struct page *page, unsigned from, unsigned to,
2176 get_block_t *get_block)
2178 struct inode *inode = page->mapping->host;
2179 int err = __block_prepare_write(inode, page, from, to, get_block);
2181 ClearPageUptodate(page);
2185 int block_commit_write(struct page *page, unsigned from, unsigned to)
2187 struct inode *inode = page->mapping->host;
2188 __block_commit_write(inode,page,from,to);
2192 int generic_commit_write(struct file *file, struct page *page,
2193 unsigned from, unsigned to)
2195 struct inode *inode = page->mapping->host;
2196 loff_t pos = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to;
2197 __block_commit_write(inode,page,from,to);
2199 * No need to use i_size_read() here, the i_size
2200 * cannot change under us because we hold i_mutex.
2202 if (pos > inode->i_size) {
2203 i_size_write(inode, pos);
2204 mark_inode_dirty(inode);
2211 * nobh_prepare_write()'s prereads are special: the buffer_heads are freed
2212 * immediately, while under the page lock. So it needs a special end_io
2213 * handler which does not touch the bh after unlocking it.
2215 * Note: unlock_buffer() sort-of does touch the bh after unlocking it, but
2216 * a race there is benign: unlock_buffer() only use the bh's address for
2217 * hashing after unlocking the buffer, so it doesn't actually touch the bh
2220 static void end_buffer_read_nobh(struct buffer_head *bh, int uptodate)
2223 set_buffer_uptodate(bh);
2225 /* This happens, due to failed READA attempts. */
2226 clear_buffer_uptodate(bh);
2232 * On entry, the page is fully not uptodate.
2233 * On exit the page is fully uptodate in the areas outside (from,to)
2235 int nobh_prepare_write(struct page *page, unsigned from, unsigned to,
2236 get_block_t *get_block)
2238 struct inode *inode = page->mapping->host;
2239 const unsigned blkbits = inode->i_blkbits;
2240 const unsigned blocksize = 1 << blkbits;
2241 struct buffer_head map_bh;
2242 struct buffer_head *read_bh[MAX_BUF_PER_PAGE];
2243 unsigned block_in_page;
2244 unsigned block_start;
2245 sector_t block_in_file;
2250 int is_mapped_to_disk = 1;
2253 if (PageMappedToDisk(page))
2256 block_in_file = (sector_t)page->index << (PAGE_CACHE_SHIFT - blkbits);
2257 map_bh.b_page = page;
2260 * We loop across all blocks in the page, whether or not they are
2261 * part of the affected region. This is so we can discover if the
2262 * page is fully mapped-to-disk.
2264 for (block_start = 0, block_in_page = 0;
2265 block_start < PAGE_CACHE_SIZE;
2266 block_in_page++, block_start += blocksize) {
2267 unsigned block_end = block_start + blocksize;
2272 if (block_start >= to)
2274 map_bh.b_size = blocksize;
2275 ret = get_block(inode, block_in_file + block_in_page,
2279 if (!buffer_mapped(&map_bh))
2280 is_mapped_to_disk = 0;
2281 if (buffer_new(&map_bh))
2282 unmap_underlying_metadata(map_bh.b_bdev,
2284 if (PageUptodate(page))
2286 if (buffer_new(&map_bh) || !buffer_mapped(&map_bh)) {
2287 kaddr = kmap_atomic(page, KM_USER0);
2288 if (block_start < from) {
2289 memset(kaddr+block_start, 0, from-block_start);
2292 if (block_end > to) {
2293 memset(kaddr + to, 0, block_end - to);
2296 flush_dcache_page(page);
2297 kunmap_atomic(kaddr, KM_USER0);
2300 if (buffer_uptodate(&map_bh))
2301 continue; /* reiserfs does this */
2302 if (block_start < from || block_end > to) {
2303 struct buffer_head *bh = alloc_buffer_head(GFP_NOFS);
2309 bh->b_state = map_bh.b_state;
2310 atomic_set(&bh->b_count, 0);
2311 bh->b_this_page = NULL;
2313 bh->b_blocknr = map_bh.b_blocknr;
2314 bh->b_size = blocksize;
2315 bh->b_data = (char *)(long)block_start;
2316 bh->b_bdev = map_bh.b_bdev;
2317 bh->b_private = NULL;
2318 read_bh[nr_reads++] = bh;
2323 struct buffer_head *bh;
2326 * The page is locked, so these buffers are protected from
2327 * any VM or truncate activity. Hence we don't need to care
2328 * for the buffer_head refcounts.
2330 for (i = 0; i < nr_reads; i++) {
2333 bh->b_end_io = end_buffer_read_nobh;
2334 submit_bh(READ, bh);
2336 for (i = 0; i < nr_reads; i++) {
2339 if (!buffer_uptodate(bh))
2341 free_buffer_head(bh);
2348 if (is_mapped_to_disk)
2349 SetPageMappedToDisk(page);
2350 SetPageUptodate(page);
2353 * Setting the page dirty here isn't necessary for the prepare_write
2354 * function - commit_write will do that. But if/when this function is
2355 * used within the pagefault handler to ensure that all mmapped pages
2356 * have backing space in the filesystem, we will need to dirty the page
2357 * if its contents were altered.
2360 set_page_dirty(page);
2365 for (i = 0; i < nr_reads; i++) {
2367 free_buffer_head(read_bh[i]);
2371 * Error recovery is pretty slack. Clear the page and mark it dirty
2372 * so we'll later zero out any blocks which _were_ allocated.
2374 kaddr = kmap_atomic(page, KM_USER0);
2375 memset(kaddr, 0, PAGE_CACHE_SIZE);
2376 flush_dcache_page(page);
2377 kunmap_atomic(kaddr, KM_USER0);
2378 SetPageUptodate(page);
2379 set_page_dirty(page);
2382 EXPORT_SYMBOL(nobh_prepare_write);
2384 int nobh_commit_write(struct file *file, struct page *page,
2385 unsigned from, unsigned to)
2387 struct inode *inode = page->mapping->host;
2388 loff_t pos = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to;
2390 set_page_dirty(page);
2391 if (pos > inode->i_size) {
2392 i_size_write(inode, pos);
2393 mark_inode_dirty(inode);
2397 EXPORT_SYMBOL(nobh_commit_write);
2400 * nobh_writepage() - based on block_full_write_page() except
2401 * that it tries to operate without attaching bufferheads to
2404 int nobh_writepage(struct page *page, get_block_t *get_block,
2405 struct writeback_control *wbc)
2407 struct inode * const inode = page->mapping->host;
2408 loff_t i_size = i_size_read(inode);
2409 const pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
2414 /* Is the page fully inside i_size? */
2415 if (page->index < end_index)
2418 /* Is the page fully outside i_size? (truncate in progress) */
2419 offset = i_size & (PAGE_CACHE_SIZE-1);
2420 if (page->index >= end_index+1 || !offset) {
2422 * The page may have dirty, unmapped buffers. For example,
2423 * they may have been added in ext3_writepage(). Make them
2424 * freeable here, so the page does not leak.
2427 /* Not really sure about this - do we need this ? */
2428 if (page->mapping->a_ops->invalidatepage)
2429 page->mapping->a_ops->invalidatepage(page, offset);
2432 return 0; /* don't care */
2436 * The page straddles i_size. It must be zeroed out on each and every
2437 * writepage invocation because it may be mmapped. "A file is mapped
2438 * in multiples of the page size. For a file that is not a multiple of
2439 * the page size, the remaining memory is zeroed when mapped, and
2440 * writes to that region are not written out to the file."
2442 kaddr = kmap_atomic(page, KM_USER0);
2443 memset(kaddr + offset, 0, PAGE_CACHE_SIZE - offset);
2444 flush_dcache_page(page);
2445 kunmap_atomic(kaddr, KM_USER0);
2447 ret = mpage_writepage(page, get_block, wbc);
2449 ret = __block_write_full_page(inode, page, get_block, wbc);
2452 EXPORT_SYMBOL(nobh_writepage);
2455 * This function assumes that ->prepare_write() uses nobh_prepare_write().
2457 int nobh_truncate_page(struct address_space *mapping, loff_t from)
2459 struct inode *inode = mapping->host;
2460 unsigned blocksize = 1 << inode->i_blkbits;
2461 pgoff_t index = from >> PAGE_CACHE_SHIFT;
2462 unsigned offset = from & (PAGE_CACHE_SIZE-1);
2465 const struct address_space_operations *a_ops = mapping->a_ops;
2469 if ((offset & (blocksize - 1)) == 0)
2473 page = grab_cache_page(mapping, index);
2477 to = (offset + blocksize) & ~(blocksize - 1);
2478 ret = a_ops->prepare_write(NULL, page, offset, to);
2480 kaddr = kmap_atomic(page, KM_USER0);
2481 memset(kaddr + offset, 0, PAGE_CACHE_SIZE - offset);
2482 flush_dcache_page(page);
2483 kunmap_atomic(kaddr, KM_USER0);
2484 set_page_dirty(page);
2487 page_cache_release(page);
2491 EXPORT_SYMBOL(nobh_truncate_page);
2493 int block_truncate_page(struct address_space *mapping,
2494 loff_t from, get_block_t *get_block)
2496 pgoff_t index = from >> PAGE_CACHE_SHIFT;
2497 unsigned offset = from & (PAGE_CACHE_SIZE-1);
2500 unsigned length, pos;
2501 struct inode *inode = mapping->host;
2503 struct buffer_head *bh;
2507 blocksize = 1 << inode->i_blkbits;
2508 length = offset & (blocksize - 1);
2510 /* Block boundary? Nothing to do */
2514 length = blocksize - length;
2515 iblock = (sector_t)index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
2517 page = grab_cache_page(mapping, index);
2522 if (!page_has_buffers(page))
2523 create_empty_buffers(page, blocksize, 0);
2525 /* Find the buffer that contains "offset" */
2526 bh = page_buffers(page);
2528 while (offset >= pos) {
2529 bh = bh->b_this_page;
2535 if (!buffer_mapped(bh)) {
2536 WARN_ON(bh->b_size != blocksize);
2537 err = get_block(inode, iblock, bh, 0);
2540 /* unmapped? It's a hole - nothing to do */
2541 if (!buffer_mapped(bh))
2545 /* Ok, it's mapped. Make sure it's up-to-date */
2546 if (PageUptodate(page))
2547 set_buffer_uptodate(bh);
2549 if (!buffer_uptodate(bh) && !buffer_delay(bh)) {
2551 ll_rw_block(READ, 1, &bh);
2553 /* Uhhuh. Read error. Complain and punt. */
2554 if (!buffer_uptodate(bh))
2558 kaddr = kmap_atomic(page, KM_USER0);
2559 memset(kaddr + offset, 0, length);
2560 flush_dcache_page(page);
2561 kunmap_atomic(kaddr, KM_USER0);
2563 mark_buffer_dirty(bh);
2568 page_cache_release(page);
2574 * The generic ->writepage function for buffer-backed address_spaces
2576 int block_write_full_page(struct page *page, get_block_t *get_block,
2577 struct writeback_control *wbc)
2579 struct inode * const inode = page->mapping->host;
2580 loff_t i_size = i_size_read(inode);
2581 const pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
2585 /* Is the page fully inside i_size? */
2586 if (page->index < end_index)
2587 return __block_write_full_page(inode, page, get_block, wbc);
2589 /* Is the page fully outside i_size? (truncate in progress) */
2590 offset = i_size & (PAGE_CACHE_SIZE-1);
2591 if (page->index >= end_index+1 || !offset) {
2593 * The page may have dirty, unmapped buffers. For example,
2594 * they may have been added in ext3_writepage(). Make them
2595 * freeable here, so the page does not leak.
2597 do_invalidatepage(page, 0);
2599 return 0; /* don't care */
2603 * The page straddles i_size. It must be zeroed out on each and every
2604 * writepage invokation because it may be mmapped. "A file is mapped
2605 * in multiples of the page size. For a file that is not a multiple of
2606 * the page size, the remaining memory is zeroed when mapped, and
2607 * writes to that region are not written out to the file."
2609 kaddr = kmap_atomic(page, KM_USER0);
2610 memset(kaddr + offset, 0, PAGE_CACHE_SIZE - offset);
2611 flush_dcache_page(page);
2612 kunmap_atomic(kaddr, KM_USER0);
2613 return __block_write_full_page(inode, page, get_block, wbc);
2616 sector_t generic_block_bmap(struct address_space *mapping, sector_t block,
2617 get_block_t *get_block)
2619 struct buffer_head tmp;
2620 struct inode *inode = mapping->host;
2623 tmp.b_size = 1 << inode->i_blkbits;
2624 get_block(inode, block, &tmp, 0);
2625 return tmp.b_blocknr;
2628 static int end_bio_bh_io_sync(struct bio *bio, unsigned int bytes_done, int err)
2630 struct buffer_head *bh = bio->bi_private;
2635 if (err == -EOPNOTSUPP) {
2636 set_bit(BIO_EOPNOTSUPP, &bio->bi_flags);
2637 set_bit(BH_Eopnotsupp, &bh->b_state);
2640 bh->b_end_io(bh, test_bit(BIO_UPTODATE, &bio->bi_flags));
2645 int submit_bh(int rw, struct buffer_head * bh)
2650 BUG_ON(!buffer_locked(bh));
2651 BUG_ON(!buffer_mapped(bh));
2652 BUG_ON(!bh->b_end_io);
2654 if (buffer_ordered(bh) && (rw == WRITE))
2658 * Only clear out a write error when rewriting, should this
2659 * include WRITE_SYNC as well?
2661 if (test_set_buffer_req(bh) && (rw == WRITE || rw == WRITE_BARRIER))
2662 clear_buffer_write_io_error(bh);
2665 * from here on down, it's all bio -- do the initial mapping,
2666 * submit_bio -> generic_make_request may further map this bio around
2668 bio = bio_alloc(GFP_NOIO, 1);
2670 bio->bi_sector = bh->b_blocknr * (bh->b_size >> 9);
2671 bio->bi_bdev = bh->b_bdev;
2672 bio->bi_io_vec[0].bv_page = bh->b_page;
2673 bio->bi_io_vec[0].bv_len = bh->b_size;
2674 bio->bi_io_vec[0].bv_offset = bh_offset(bh);
2678 bio->bi_size = bh->b_size;
2680 bio->bi_end_io = end_bio_bh_io_sync;
2681 bio->bi_private = bh;
2684 submit_bio(rw, bio);
2686 if (bio_flagged(bio, BIO_EOPNOTSUPP))
2694 * ll_rw_block: low-level access to block devices (DEPRECATED)
2695 * @rw: whether to %READ or %WRITE or %SWRITE or maybe %READA (readahead)
2696 * @nr: number of &struct buffer_heads in the array
2697 * @bhs: array of pointers to &struct buffer_head
2699 * ll_rw_block() takes an array of pointers to &struct buffer_heads, and
2700 * requests an I/O operation on them, either a %READ or a %WRITE. The third
2701 * %SWRITE is like %WRITE only we make sure that the *current* data in buffers
2702 * are sent to disk. The fourth %READA option is described in the documentation
2703 * for generic_make_request() which ll_rw_block() calls.
2705 * This function drops any buffer that it cannot get a lock on (with the
2706 * BH_Lock state bit) unless SWRITE is required, any buffer that appears to be
2707 * clean when doing a write request, and any buffer that appears to be
2708 * up-to-date when doing read request. Further it marks as clean buffers that
2709 * are processed for writing (the buffer cache won't assume that they are
2710 * actually clean until the buffer gets unlocked).
2712 * ll_rw_block sets b_end_io to simple completion handler that marks
2713 * the buffer up-to-date (if approriate), unlocks the buffer and wakes
2716 * All of the buffers must be for the same device, and must also be a
2717 * multiple of the current approved size for the device.
2719 void ll_rw_block(int rw, int nr, struct buffer_head *bhs[])
2723 for (i = 0; i < nr; i++) {
2724 struct buffer_head *bh = bhs[i];
2728 else if (test_set_buffer_locked(bh))
2731 if (rw == WRITE || rw == SWRITE) {
2732 if (test_clear_buffer_dirty(bh)) {
2733 bh->b_end_io = end_buffer_write_sync;
2735 submit_bh(WRITE, bh);
2739 if (!buffer_uptodate(bh)) {
2740 bh->b_end_io = end_buffer_read_sync;
2751 * For a data-integrity writeout, we need to wait upon any in-progress I/O
2752 * and then start new I/O and then wait upon it. The caller must have a ref on
2755 int sync_dirty_buffer(struct buffer_head *bh)
2759 WARN_ON(atomic_read(&bh->b_count) < 1);
2761 if (test_clear_buffer_dirty(bh)) {
2763 bh->b_end_io = end_buffer_write_sync;
2764 ret = submit_bh(WRITE, bh);
2766 if (buffer_eopnotsupp(bh)) {
2767 clear_buffer_eopnotsupp(bh);
2770 if (!ret && !buffer_uptodate(bh))
2779 * try_to_free_buffers() checks if all the buffers on this particular page
2780 * are unused, and releases them if so.
2782 * Exclusion against try_to_free_buffers may be obtained by either
2783 * locking the page or by holding its mapping's private_lock.
2785 * If the page is dirty but all the buffers are clean then we need to
2786 * be sure to mark the page clean as well. This is because the page
2787 * may be against a block device, and a later reattachment of buffers
2788 * to a dirty page will set *all* buffers dirty. Which would corrupt
2789 * filesystem data on the same device.
2791 * The same applies to regular filesystem pages: if all the buffers are
2792 * clean then we set the page clean and proceed. To do that, we require
2793 * total exclusion from __set_page_dirty_buffers(). That is obtained with
2796 * try_to_free_buffers() is non-blocking.
2798 static inline int buffer_busy(struct buffer_head *bh)
2800 return atomic_read(&bh->b_count) |
2801 (bh->b_state & ((1 << BH_Dirty) | (1 << BH_Lock)));
2805 drop_buffers(struct page *page, struct buffer_head **buffers_to_free)
2807 struct buffer_head *head = page_buffers(page);
2808 struct buffer_head *bh;
2812 if (buffer_write_io_error(bh) && page->mapping)
2813 set_bit(AS_EIO, &page->mapping->flags);
2814 if (buffer_busy(bh))
2816 bh = bh->b_this_page;
2817 } while (bh != head);
2820 struct buffer_head *next = bh->b_this_page;
2822 if (!list_empty(&bh->b_assoc_buffers))
2823 __remove_assoc_queue(bh);
2825 } while (bh != head);
2826 *buffers_to_free = head;
2827 __clear_page_buffers(page);
2833 int try_to_free_buffers(struct page *page)
2835 struct address_space * const mapping = page->mapping;
2836 struct buffer_head *buffers_to_free = NULL;
2839 BUG_ON(!PageLocked(page));
2840 if (PageWriteback(page))
2843 if (mapping == NULL) { /* can this still happen? */
2844 ret = drop_buffers(page, &buffers_to_free);
2848 spin_lock(&mapping->private_lock);
2849 ret = drop_buffers(page, &buffers_to_free);
2852 * If the filesystem writes its buffers by hand (eg ext3)
2853 * then we can have clean buffers against a dirty page. We
2854 * clean the page here; otherwise the VM will never notice
2855 * that the filesystem did any IO at all.
2857 * Also, during truncate, discard_buffer will have marked all
2858 * the page's buffers clean. We discover that here and clean
2861 * private_lock must be held over this entire operation in order
2862 * to synchronise against __set_page_dirty_buffers and prevent the
2863 * dirty bit from being lost.
2866 cancel_dirty_page(page, PAGE_CACHE_SIZE);
2867 spin_unlock(&mapping->private_lock);
2869 if (buffers_to_free) {
2870 struct buffer_head *bh = buffers_to_free;
2873 struct buffer_head *next = bh->b_this_page;
2874 free_buffer_head(bh);
2876 } while (bh != buffers_to_free);
2880 EXPORT_SYMBOL(try_to_free_buffers);
2882 void block_sync_page(struct page *page)
2884 struct address_space *mapping;
2887 mapping = page_mapping(page);
2889 blk_run_backing_dev(mapping->backing_dev_info, page);
2893 * There are no bdflush tunables left. But distributions are
2894 * still running obsolete flush daemons, so we terminate them here.
2896 * Use of bdflush() is deprecated and will be removed in a future kernel.
2897 * The `pdflush' kernel threads fully replace bdflush daemons and this call.
2899 asmlinkage long sys_bdflush(int func, long data)
2901 static int msg_count;
2903 if (!capable(CAP_SYS_ADMIN))
2906 if (msg_count < 5) {
2909 "warning: process `%s' used the obsolete bdflush"
2910 " system call\n", current->comm);
2911 printk(KERN_INFO "Fix your initscripts?\n");
2920 * Buffer-head allocation
2922 static struct kmem_cache *bh_cachep;
2925 * Once the number of bh's in the machine exceeds this level, we start
2926 * stripping them in writeback.
2928 static int max_buffer_heads;
2930 int buffer_heads_over_limit;
2932 struct bh_accounting {
2933 int nr; /* Number of live bh's */
2934 int ratelimit; /* Limit cacheline bouncing */
2937 static DEFINE_PER_CPU(struct bh_accounting, bh_accounting) = {0, 0};
2939 static void recalc_bh_state(void)
2944 if (__get_cpu_var(bh_accounting).ratelimit++ < 4096)
2946 __get_cpu_var(bh_accounting).ratelimit = 0;
2947 for_each_online_cpu(i)
2948 tot += per_cpu(bh_accounting, i).nr;
2949 buffer_heads_over_limit = (tot > max_buffer_heads);
2952 struct buffer_head *alloc_buffer_head(gfp_t gfp_flags)
2954 struct buffer_head *ret = kmem_cache_alloc(bh_cachep, gfp_flags);
2956 get_cpu_var(bh_accounting).nr++;
2958 put_cpu_var(bh_accounting);
2962 EXPORT_SYMBOL(alloc_buffer_head);
2964 void free_buffer_head(struct buffer_head *bh)
2966 BUG_ON(!list_empty(&bh->b_assoc_buffers));
2967 kmem_cache_free(bh_cachep, bh);
2968 get_cpu_var(bh_accounting).nr--;
2970 put_cpu_var(bh_accounting);
2972 EXPORT_SYMBOL(free_buffer_head);
2975 init_buffer_head(void *data, struct kmem_cache *cachep, unsigned long flags)
2977 if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) ==
2978 SLAB_CTOR_CONSTRUCTOR) {
2979 struct buffer_head * bh = (struct buffer_head *)data;
2981 memset(bh, 0, sizeof(*bh));
2982 INIT_LIST_HEAD(&bh->b_assoc_buffers);
2986 static void buffer_exit_cpu(int cpu)
2989 struct bh_lru *b = &per_cpu(bh_lrus, cpu);
2991 for (i = 0; i < BH_LRU_SIZE; i++) {
2995 get_cpu_var(bh_accounting).nr += per_cpu(bh_accounting, cpu).nr;
2996 per_cpu(bh_accounting, cpu).nr = 0;
2997 put_cpu_var(bh_accounting);
3000 static int buffer_cpu_notify(struct notifier_block *self,
3001 unsigned long action, void *hcpu)
3003 if (action == CPU_DEAD)
3004 buffer_exit_cpu((unsigned long)hcpu);
3008 void __init buffer_init(void)
3012 bh_cachep = kmem_cache_create("buffer_head",
3013 sizeof(struct buffer_head), 0,
3014 (SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|
3020 * Limit the bh occupancy to 10% of ZONE_NORMAL
3022 nrpages = (nr_free_buffer_pages() * 10) / 100;
3023 max_buffer_heads = nrpages * (PAGE_SIZE / sizeof(struct buffer_head));
3024 hotcpu_notifier(buffer_cpu_notify, 0);
3027 EXPORT_SYMBOL(__bforget);
3028 EXPORT_SYMBOL(__brelse);
3029 EXPORT_SYMBOL(__wait_on_buffer);
3030 EXPORT_SYMBOL(block_commit_write);
3031 EXPORT_SYMBOL(block_prepare_write);
3032 EXPORT_SYMBOL(block_read_full_page);
3033 EXPORT_SYMBOL(block_sync_page);
3034 EXPORT_SYMBOL(block_truncate_page);
3035 EXPORT_SYMBOL(block_write_full_page);
3036 EXPORT_SYMBOL(cont_prepare_write);
3037 EXPORT_SYMBOL(end_buffer_read_sync);
3038 EXPORT_SYMBOL(end_buffer_write_sync);
3039 EXPORT_SYMBOL(file_fsync);
3040 EXPORT_SYMBOL(fsync_bdev);
3041 EXPORT_SYMBOL(generic_block_bmap);
3042 EXPORT_SYMBOL(generic_commit_write);
3043 EXPORT_SYMBOL(generic_cont_expand);
3044 EXPORT_SYMBOL(generic_cont_expand_simple);
3045 EXPORT_SYMBOL(init_buffer);
3046 EXPORT_SYMBOL(invalidate_bdev);
3047 EXPORT_SYMBOL(ll_rw_block);
3048 EXPORT_SYMBOL(mark_buffer_dirty);
3049 EXPORT_SYMBOL(submit_bh);
3050 EXPORT_SYMBOL(sync_dirty_buffer);
3051 EXPORT_SYMBOL(unlock_buffer);