linux 2.6.16.38 w/ vs2.0.3-rc1
[linux-2.6.git] / fs / buffer.c
1 /*
2  *  linux/fs/buffer.c
3  *
4  *  Copyright (C) 1991, 1992, 2002  Linus Torvalds
5  */
6
7 /*
8  * Start bdflush() with kernel_thread not syscall - Paul Gortmaker, 12/95
9  *
10  * Removed a lot of unnecessary code and simplified things now that
11  * the buffer cache isn't our primary cache - Andrew Tridgell 12/96
12  *
13  * Speed up hash, lru, and free list operations.  Use gfp() for allocating
14  * hash table, use SLAB cache for buffer heads. SMP threading.  -DaveM
15  *
16  * Added 32k buffer block sizes - these are required older ARM systems. - RMK
17  *
18  * async buffer flushing, 1999 Andrea Arcangeli <andrea@suse.de>
19  */
20
21 #include <linux/config.h>
22 #include <linux/kernel.h>
23 #include <linux/syscalls.h>
24 #include <linux/fs.h>
25 #include <linux/mm.h>
26 #include <linux/percpu.h>
27 #include <linux/slab.h>
28 #include <linux/smp_lock.h>
29 #include <linux/capability.h>
30 #include <linux/blkdev.h>
31 #include <linux/file.h>
32 #include <linux/quotaops.h>
33 #include <linux/highmem.h>
34 #include <linux/module.h>
35 #include <linux/writeback.h>
36 #include <linux/hash.h>
37 #include <linux/suspend.h>
38 #include <linux/buffer_head.h>
39 #include <linux/bio.h>
40 #include <linux/notifier.h>
41 #include <linux/cpu.h>
42 #include <linux/bitops.h>
43 #include <linux/mpage.h>
44 #include <linux/bit_spinlock.h>
45
46 static int fsync_buffers_list(spinlock_t *lock, struct list_head *list);
47 static void invalidate_bh_lrus(void);
48
49 #define BH_ENTRY(list) list_entry((list), struct buffer_head, b_assoc_buffers)
50
51 inline void
52 init_buffer(struct buffer_head *bh, bh_end_io_t *handler, void *private)
53 {
54         bh->b_end_io = handler;
55         bh->b_private = private;
56 }
57
58 static int sync_buffer(void *word)
59 {
60         struct block_device *bd;
61         struct buffer_head *bh
62                 = container_of(word, struct buffer_head, b_state);
63
64         smp_mb();
65         bd = bh->b_bdev;
66         if (bd)
67                 blk_run_address_space(bd->bd_inode->i_mapping);
68         io_schedule();
69         return 0;
70 }
71
72 void fastcall __lock_buffer(struct buffer_head *bh)
73 {
74         wait_on_bit_lock(&bh->b_state, BH_Lock, sync_buffer,
75                                                         TASK_UNINTERRUPTIBLE);
76 }
77 EXPORT_SYMBOL(__lock_buffer);
78
79 void fastcall unlock_buffer(struct buffer_head *bh)
80 {
81         clear_buffer_locked(bh);
82         smp_mb__after_clear_bit();
83         wake_up_bit(&bh->b_state, BH_Lock);
84 }
85
86 /*
87  * Block until a buffer comes unlocked.  This doesn't stop it
88  * from becoming locked again - you have to lock it yourself
89  * if you want to preserve its state.
90  */
91 void __wait_on_buffer(struct buffer_head * bh)
92 {
93         wait_on_bit(&bh->b_state, BH_Lock, sync_buffer, TASK_UNINTERRUPTIBLE);
94 }
95
96 static void
97 __clear_page_buffers(struct page *page)
98 {
99         ClearPagePrivate(page);
100         set_page_private(page, 0);
101         page_cache_release(page);
102 }
103
104 static void buffer_io_error(struct buffer_head *bh)
105 {
106         char b[BDEVNAME_SIZE];
107
108         printk(KERN_ERR "Buffer I/O error on device %s, logical block %Lu\n",
109                         bdevname(bh->b_bdev, b),
110                         (unsigned long long)bh->b_blocknr);
111 }
112
113 /*
114  * Default synchronous end-of-IO handler..  Just mark it up-to-date and
115  * unlock the buffer. This is what ll_rw_block uses too.
116  */
117 void end_buffer_read_sync(struct buffer_head *bh, int uptodate)
118 {
119         if (uptodate) {
120                 set_buffer_uptodate(bh);
121         } else {
122                 /* This happens, due to failed READA attempts. */
123                 clear_buffer_uptodate(bh);
124         }
125         unlock_buffer(bh);
126         put_bh(bh);
127 }
128
129 void end_buffer_write_sync(struct buffer_head *bh, int uptodate)
130 {
131         char b[BDEVNAME_SIZE];
132
133         if (uptodate) {
134                 set_buffer_uptodate(bh);
135         } else {
136                 if (!buffer_eopnotsupp(bh) && printk_ratelimit()) {
137                         buffer_io_error(bh);
138                         printk(KERN_WARNING "lost page write due to "
139                                         "I/O error on %s\n",
140                                        bdevname(bh->b_bdev, b));
141                 }
142                 set_buffer_write_io_error(bh);
143                 clear_buffer_uptodate(bh);
144         }
145         unlock_buffer(bh);
146         put_bh(bh);
147 }
148
149 /*
150  * Write out and wait upon all the dirty data associated with a block
151  * device via its mapping.  Does not take the superblock lock.
152  */
153 int sync_blockdev(struct block_device *bdev)
154 {
155         int ret = 0;
156
157         if (bdev)
158                 ret = filemap_write_and_wait(bdev->bd_inode->i_mapping);
159         return ret;
160 }
161 EXPORT_SYMBOL(sync_blockdev);
162
163 /*
164  * Write out and wait upon all dirty data associated with this
165  * superblock.  Filesystem data as well as the underlying block
166  * device.  Takes the superblock lock.
167  */
168 int fsync_super(struct super_block *sb)
169 {
170         sync_inodes_sb(sb, 0);
171         DQUOT_SYNC(sb);
172         lock_super(sb);
173         if (sb->s_dirt && sb->s_op->write_super)
174                 sb->s_op->write_super(sb);
175         unlock_super(sb);
176         if (sb->s_op->sync_fs)
177                 sb->s_op->sync_fs(sb, 1);
178         sync_blockdev(sb->s_bdev);
179         sync_inodes_sb(sb, 1);
180
181         return sync_blockdev(sb->s_bdev);
182 }
183
184 /*
185  * Write out and wait upon all dirty data associated with this
186  * device.   Filesystem data as well as the underlying block
187  * device.  Takes the superblock lock.
188  */
189 int fsync_bdev(struct block_device *bdev)
190 {
191         struct super_block *sb = get_super(bdev);
192         if (sb) {
193                 int res = fsync_super(sb);
194                 drop_super(sb);
195                 return res;
196         }
197         return sync_blockdev(bdev);
198 }
199
200 /**
201  * freeze_bdev  --  lock a filesystem and force it into a consistent state
202  * @bdev:       blockdevice to lock
203  *
204  * This takes the block device bd_mount_sem to make sure no new mounts
205  * happen on bdev until thaw_bdev() is called.
206  * If a superblock is found on this device, we take the s_umount semaphore
207  * on it to make sure nobody unmounts until the snapshot creation is done.
208  */
209 struct super_block *freeze_bdev(struct block_device *bdev)
210 {
211         struct super_block *sb;
212
213         down(&bdev->bd_mount_sem);
214         sb = get_super(bdev);
215         if (sb && !(sb->s_flags & MS_RDONLY)) {
216                 sb->s_frozen = SB_FREEZE_WRITE;
217                 smp_wmb();
218
219                 sync_inodes_sb(sb, 0);
220                 DQUOT_SYNC(sb);
221
222                 lock_super(sb);
223                 if (sb->s_dirt && sb->s_op->write_super)
224                         sb->s_op->write_super(sb);
225                 unlock_super(sb);
226
227                 if (sb->s_op->sync_fs)
228                         sb->s_op->sync_fs(sb, 1);
229
230                 sync_blockdev(sb->s_bdev);
231                 sync_inodes_sb(sb, 1);
232
233                 sb->s_frozen = SB_FREEZE_TRANS;
234                 smp_wmb();
235
236                 sync_blockdev(sb->s_bdev);
237
238                 if (sb->s_op->write_super_lockfs)
239                         sb->s_op->write_super_lockfs(sb);
240         }
241
242         sync_blockdev(bdev);
243         return sb;      /* thaw_bdev releases s->s_umount and bd_mount_sem */
244 }
245 EXPORT_SYMBOL(freeze_bdev);
246
247 /**
248  * thaw_bdev  -- unlock filesystem
249  * @bdev:       blockdevice to unlock
250  * @sb:         associated superblock
251  *
252  * Unlocks the filesystem and marks it writeable again after freeze_bdev().
253  */
254 void thaw_bdev(struct block_device *bdev, struct super_block *sb)
255 {
256         if (sb) {
257                 BUG_ON(sb->s_bdev != bdev);
258
259                 if (sb->s_op->unlockfs)
260                         sb->s_op->unlockfs(sb);
261                 sb->s_frozen = SB_UNFROZEN;
262                 smp_wmb();
263                 wake_up(&sb->s_wait_unfrozen);
264                 drop_super(sb);
265         }
266
267         up(&bdev->bd_mount_sem);
268 }
269 EXPORT_SYMBOL(thaw_bdev);
270
271 /*
272  * sync everything.  Start out by waking pdflush, because that writes back
273  * all queues in parallel.
274  */
275 static void do_sync(unsigned long wait)
276 {
277         wakeup_pdflush(0);
278         sync_inodes(0);         /* All mappings, inodes and their blockdevs */
279         DQUOT_SYNC(NULL);
280         sync_supers();          /* Write the superblocks */
281         sync_filesystems(0);    /* Start syncing the filesystems */
282         sync_filesystems(wait); /* Waitingly sync the filesystems */
283         sync_inodes(wait);      /* Mappings, inodes and blockdevs, again. */
284         if (!wait)
285                 printk("Emergency Sync complete\n");
286         if (unlikely(laptop_mode))
287                 laptop_sync_completion();
288 }
289
290 asmlinkage long sys_sync(void)
291 {
292         do_sync(1);
293         return 0;
294 }
295
296 void emergency_sync(void)
297 {
298         pdflush_operation(do_sync, 0);
299 }
300
301 /*
302  * Generic function to fsync a file.
303  *
304  * filp may be NULL if called via the msync of a vma.
305  */
306  
307 int file_fsync(struct file *filp, struct dentry *dentry, int datasync)
308 {
309         struct inode * inode = dentry->d_inode;
310         struct super_block * sb;
311         int ret, err;
312
313         /* sync the inode to buffers */
314         ret = write_inode_now(inode, 0);
315
316         /* sync the superblock to buffers */
317         sb = inode->i_sb;
318         lock_super(sb);
319         if (sb->s_op->write_super)
320                 sb->s_op->write_super(sb);
321         unlock_super(sb);
322
323         /* .. finally sync the buffers to disk */
324         err = sync_blockdev(sb->s_bdev);
325         if (!ret)
326                 ret = err;
327         return ret;
328 }
329
330 static long do_fsync(unsigned int fd, int datasync)
331 {
332         struct file * file;
333         struct address_space *mapping;
334         int ret, err;
335
336         ret = -EBADF;
337         file = fget(fd);
338         if (!file)
339                 goto out;
340
341         ret = -EINVAL;
342         if (!file->f_op || !file->f_op->fsync) {
343                 /* Why?  We can still call filemap_fdatawrite */
344                 goto out_putf;
345         }
346
347         mapping = file->f_mapping;
348
349         current->flags |= PF_SYNCWRITE;
350         ret = filemap_fdatawrite(mapping);
351
352         /*
353          * We need to protect against concurrent writers,
354          * which could cause livelocks in fsync_buffers_list
355          */
356         mutex_lock(&mapping->host->i_mutex);
357         err = file->f_op->fsync(file, file->f_dentry, datasync);
358         if (!ret)
359                 ret = err;
360         mutex_unlock(&mapping->host->i_mutex);
361         err = filemap_fdatawait(mapping);
362         if (!ret)
363                 ret = err;
364         current->flags &= ~PF_SYNCWRITE;
365
366 out_putf:
367         fput(file);
368 out:
369         return ret;
370 }
371
372 asmlinkage long sys_fsync(unsigned int fd)
373 {
374         return do_fsync(fd, 0);
375 }
376
377 asmlinkage long sys_fdatasync(unsigned int fd)
378 {
379         return do_fsync(fd, 1);
380 }
381
382 /*
383  * Various filesystems appear to want __find_get_block to be non-blocking.
384  * But it's the page lock which protects the buffers.  To get around this,
385  * we get exclusion from try_to_free_buffers with the blockdev mapping's
386  * private_lock.
387  *
388  * Hack idea: for the blockdev mapping, i_bufferlist_lock contention
389  * may be quite high.  This code could TryLock the page, and if that
390  * succeeds, there is no need to take private_lock. (But if
391  * private_lock is contended then so is mapping->tree_lock).
392  */
393 static struct buffer_head *
394 __find_get_block_slow(struct block_device *bdev, sector_t block)
395 {
396         struct inode *bd_inode = bdev->bd_inode;
397         struct address_space *bd_mapping = bd_inode->i_mapping;
398         struct buffer_head *ret = NULL;
399         pgoff_t index;
400         struct buffer_head *bh;
401         struct buffer_head *head;
402         struct page *page;
403         int all_mapped = 1;
404
405         index = block >> (PAGE_CACHE_SHIFT - bd_inode->i_blkbits);
406         page = find_get_page(bd_mapping, index);
407         if (!page)
408                 goto out;
409
410         spin_lock(&bd_mapping->private_lock);
411         if (!page_has_buffers(page))
412                 goto out_unlock;
413         head = page_buffers(page);
414         bh = head;
415         do {
416                 if (bh->b_blocknr == block) {
417                         ret = bh;
418                         get_bh(bh);
419                         goto out_unlock;
420                 }
421                 if (!buffer_mapped(bh))
422                         all_mapped = 0;
423                 bh = bh->b_this_page;
424         } while (bh != head);
425
426         /* we might be here because some of the buffers on this page are
427          * not mapped.  This is due to various races between
428          * file io on the block device and getblk.  It gets dealt with
429          * elsewhere, don't buffer_error if we had some unmapped buffers
430          */
431         if (all_mapped) {
432                 printk("__find_get_block_slow() failed. "
433                         "block=%llu, b_blocknr=%llu\n",
434                         (unsigned long long)block, (unsigned long long)bh->b_blocknr);
435                 printk("b_state=0x%08lx, b_size=%u\n", bh->b_state, bh->b_size);
436                 printk("device blocksize: %d\n", 1 << bd_inode->i_blkbits);
437         }
438 out_unlock:
439         spin_unlock(&bd_mapping->private_lock);
440         page_cache_release(page);
441 out:
442         return ret;
443 }
444
445 /* If invalidate_buffers() will trash dirty buffers, it means some kind
446    of fs corruption is going on. Trashing dirty data always imply losing
447    information that was supposed to be just stored on the physical layer
448    by the user.
449
450    Thus invalidate_buffers in general usage is not allwowed to trash
451    dirty buffers. For example ioctl(FLSBLKBUF) expects dirty data to
452    be preserved.  These buffers are simply skipped.
453   
454    We also skip buffers which are still in use.  For example this can
455    happen if a userspace program is reading the block device.
456
457    NOTE: In the case where the user removed a removable-media-disk even if
458    there's still dirty data not synced on disk (due a bug in the device driver
459    or due an error of the user), by not destroying the dirty buffers we could
460    generate corruption also on the next media inserted, thus a parameter is
461    necessary to handle this case in the most safe way possible (trying
462    to not corrupt also the new disk inserted with the data belonging to
463    the old now corrupted disk). Also for the ramdisk the natural thing
464    to do in order to release the ramdisk memory is to destroy dirty buffers.
465
466    These are two special cases. Normal usage imply the device driver
467    to issue a sync on the device (without waiting I/O completion) and
468    then an invalidate_buffers call that doesn't trash dirty buffers.
469
470    For handling cache coherency with the blkdev pagecache the 'update' case
471    is been introduced. It is needed to re-read from disk any pinned
472    buffer. NOTE: re-reading from disk is destructive so we can do it only
473    when we assume nobody is changing the buffercache under our I/O and when
474    we think the disk contains more recent information than the buffercache.
475    The update == 1 pass marks the buffers we need to update, the update == 2
476    pass does the actual I/O. */
477 void invalidate_bdev(struct block_device *bdev, int destroy_dirty_buffers)
478 {
479         invalidate_bh_lrus();
480         /*
481          * FIXME: what about destroy_dirty_buffers?
482          * We really want to use invalidate_inode_pages2() for
483          * that, but not until that's cleaned up.
484          */
485         invalidate_inode_pages(bdev->bd_inode->i_mapping);
486 }
487
488 /*
489  * Kick pdflush then try to free up some ZONE_NORMAL memory.
490  */
491 static void free_more_memory(void)
492 {
493         struct zone **zones;
494         pg_data_t *pgdat;
495
496         wakeup_pdflush(1024);
497         yield();
498
499         for_each_pgdat(pgdat) {
500                 zones = pgdat->node_zonelists[gfp_zone(GFP_NOFS)].zones;
501                 if (*zones)
502                         try_to_free_pages(zones, GFP_NOFS);
503         }
504 }
505
506 /*
507  * I/O completion handler for block_read_full_page() - pages
508  * which come unlocked at the end of I/O.
509  */
510 static void end_buffer_async_read(struct buffer_head *bh, int uptodate)
511 {
512         unsigned long flags;
513         struct buffer_head *first;
514         struct buffer_head *tmp;
515         struct page *page;
516         int page_uptodate = 1;
517
518         BUG_ON(!buffer_async_read(bh));
519
520         page = bh->b_page;
521         if (uptodate) {
522                 set_buffer_uptodate(bh);
523         } else {
524                 clear_buffer_uptodate(bh);
525                 if (printk_ratelimit())
526                         buffer_io_error(bh);
527                 SetPageError(page);
528         }
529
530         /*
531          * Be _very_ careful from here on. Bad things can happen if
532          * two buffer heads end IO at almost the same time and both
533          * decide that the page is now completely done.
534          */
535         first = page_buffers(page);
536         local_irq_save(flags);
537         bit_spin_lock(BH_Uptodate_Lock, &first->b_state);
538         clear_buffer_async_read(bh);
539         unlock_buffer(bh);
540         tmp = bh;
541         do {
542                 if (!buffer_uptodate(tmp))
543                         page_uptodate = 0;
544                 if (buffer_async_read(tmp)) {
545                         BUG_ON(!buffer_locked(tmp));
546                         goto still_busy;
547                 }
548                 tmp = tmp->b_this_page;
549         } while (tmp != bh);
550         bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
551         local_irq_restore(flags);
552
553         /*
554          * If none of the buffers had errors and they are all
555          * uptodate then we can set the page uptodate.
556          */
557         if (page_uptodate && !PageError(page))
558                 SetPageUptodate(page);
559         unlock_page(page);
560         return;
561
562 still_busy:
563         bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
564         local_irq_restore(flags);
565         return;
566 }
567
568 /*
569  * Completion handler for block_write_full_page() - pages which are unlocked
570  * during I/O, and which have PageWriteback cleared upon I/O completion.
571  */
572 void end_buffer_async_write(struct buffer_head *bh, int uptodate)
573 {
574         char b[BDEVNAME_SIZE];
575         unsigned long flags;
576         struct buffer_head *first;
577         struct buffer_head *tmp;
578         struct page *page;
579
580         BUG_ON(!buffer_async_write(bh));
581
582         page = bh->b_page;
583         if (uptodate) {
584                 set_buffer_uptodate(bh);
585         } else {
586                 if (printk_ratelimit()) {
587                         buffer_io_error(bh);
588                         printk(KERN_WARNING "lost page write due to "
589                                         "I/O error on %s\n",
590                                bdevname(bh->b_bdev, b));
591                 }
592                 set_bit(AS_EIO, &page->mapping->flags);
593                 clear_buffer_uptodate(bh);
594                 SetPageError(page);
595         }
596
597         first = page_buffers(page);
598         local_irq_save(flags);
599         bit_spin_lock(BH_Uptodate_Lock, &first->b_state);
600
601         clear_buffer_async_write(bh);
602         unlock_buffer(bh);
603         tmp = bh->b_this_page;
604         while (tmp != bh) {
605                 if (buffer_async_write(tmp)) {
606                         BUG_ON(!buffer_locked(tmp));
607                         goto still_busy;
608                 }
609                 tmp = tmp->b_this_page;
610         }
611         bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
612         local_irq_restore(flags);
613         end_page_writeback(page);
614         return;
615
616 still_busy:
617         bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
618         local_irq_restore(flags);
619         return;
620 }
621
622 /*
623  * If a page's buffers are under async readin (end_buffer_async_read
624  * completion) then there is a possibility that another thread of
625  * control could lock one of the buffers after it has completed
626  * but while some of the other buffers have not completed.  This
627  * locked buffer would confuse end_buffer_async_read() into not unlocking
628  * the page.  So the absence of BH_Async_Read tells end_buffer_async_read()
629  * that this buffer is not under async I/O.
630  *
631  * The page comes unlocked when it has no locked buffer_async buffers
632  * left.
633  *
634  * PageLocked prevents anyone starting new async I/O reads any of
635  * the buffers.
636  *
637  * PageWriteback is used to prevent simultaneous writeout of the same
638  * page.
639  *
640  * PageLocked prevents anyone from starting writeback of a page which is
641  * under read I/O (PageWriteback is only ever set against a locked page).
642  */
643 static void mark_buffer_async_read(struct buffer_head *bh)
644 {
645         bh->b_end_io = end_buffer_async_read;
646         set_buffer_async_read(bh);
647 }
648
649 void mark_buffer_async_write(struct buffer_head *bh)
650 {
651         bh->b_end_io = end_buffer_async_write;
652         set_buffer_async_write(bh);
653 }
654 EXPORT_SYMBOL(mark_buffer_async_write);
655
656
657 /*
658  * fs/buffer.c contains helper functions for buffer-backed address space's
659  * fsync functions.  A common requirement for buffer-based filesystems is
660  * that certain data from the backing blockdev needs to be written out for
661  * a successful fsync().  For example, ext2 indirect blocks need to be
662  * written back and waited upon before fsync() returns.
663  *
664  * The functions mark_buffer_inode_dirty(), fsync_inode_buffers(),
665  * inode_has_buffers() and invalidate_inode_buffers() are provided for the
666  * management of a list of dependent buffers at ->i_mapping->private_list.
667  *
668  * Locking is a little subtle: try_to_free_buffers() will remove buffers
669  * from their controlling inode's queue when they are being freed.  But
670  * try_to_free_buffers() will be operating against the *blockdev* mapping
671  * at the time, not against the S_ISREG file which depends on those buffers.
672  * So the locking for private_list is via the private_lock in the address_space
673  * which backs the buffers.  Which is different from the address_space 
674  * against which the buffers are listed.  So for a particular address_space,
675  * mapping->private_lock does *not* protect mapping->private_list!  In fact,
676  * mapping->private_list will always be protected by the backing blockdev's
677  * ->private_lock.
678  *
679  * Which introduces a requirement: all buffers on an address_space's
680  * ->private_list must be from the same address_space: the blockdev's.
681  *
682  * address_spaces which do not place buffers at ->private_list via these
683  * utility functions are free to use private_lock and private_list for
684  * whatever they want.  The only requirement is that list_empty(private_list)
685  * be true at clear_inode() time.
686  *
687  * FIXME: clear_inode should not call invalidate_inode_buffers().  The
688  * filesystems should do that.  invalidate_inode_buffers() should just go
689  * BUG_ON(!list_empty).
690  *
691  * FIXME: mark_buffer_dirty_inode() is a data-plane operation.  It should
692  * take an address_space, not an inode.  And it should be called
693  * mark_buffer_dirty_fsync() to clearly define why those buffers are being
694  * queued up.
695  *
696  * FIXME: mark_buffer_dirty_inode() doesn't need to add the buffer to the
697  * list if it is already on a list.  Because if the buffer is on a list,
698  * it *must* already be on the right one.  If not, the filesystem is being
699  * silly.  This will save a ton of locking.  But first we have to ensure
700  * that buffers are taken *off* the old inode's list when they are freed
701  * (presumably in truncate).  That requires careful auditing of all
702  * filesystems (do it inside bforget()).  It could also be done by bringing
703  * b_inode back.
704  */
705
706 /*
707  * The buffer's backing address_space's private_lock must be held
708  */
709 static inline void __remove_assoc_queue(struct buffer_head *bh)
710 {
711         list_del_init(&bh->b_assoc_buffers);
712 }
713
714 int inode_has_buffers(struct inode *inode)
715 {
716         return !list_empty(&inode->i_data.private_list);
717 }
718
719 /*
720  * osync is designed to support O_SYNC io.  It waits synchronously for
721  * all already-submitted IO to complete, but does not queue any new
722  * writes to the disk.
723  *
724  * To do O_SYNC writes, just queue the buffer writes with ll_rw_block as
725  * you dirty the buffers, and then use osync_inode_buffers to wait for
726  * completion.  Any other dirty buffers which are not yet queued for
727  * write will not be flushed to disk by the osync.
728  */
729 static int osync_buffers_list(spinlock_t *lock, struct list_head *list)
730 {
731         struct buffer_head *bh;
732         struct list_head *p;
733         int err = 0;
734
735         spin_lock(lock);
736 repeat:
737         list_for_each_prev(p, list) {
738                 bh = BH_ENTRY(p);
739                 if (buffer_locked(bh)) {
740                         get_bh(bh);
741                         spin_unlock(lock);
742                         wait_on_buffer(bh);
743                         if (!buffer_uptodate(bh))
744                                 err = -EIO;
745                         brelse(bh);
746                         spin_lock(lock);
747                         goto repeat;
748                 }
749         }
750         spin_unlock(lock);
751         return err;
752 }
753
754 /**
755  * sync_mapping_buffers - write out and wait upon a mapping's "associated"
756  *                        buffers
757  * @mapping: the mapping which wants those buffers written
758  *
759  * Starts I/O against the buffers at mapping->private_list, and waits upon
760  * that I/O.
761  *
762  * Basically, this is a convenience function for fsync().
763  * @mapping is a file or directory which needs those buffers to be written for
764  * a successful fsync().
765  */
766 int sync_mapping_buffers(struct address_space *mapping)
767 {
768         struct address_space *buffer_mapping = mapping->assoc_mapping;
769
770         if (buffer_mapping == NULL || list_empty(&mapping->private_list))
771                 return 0;
772
773         return fsync_buffers_list(&buffer_mapping->private_lock,
774                                         &mapping->private_list);
775 }
776 EXPORT_SYMBOL(sync_mapping_buffers);
777
778 /*
779  * Called when we've recently written block `bblock', and it is known that
780  * `bblock' was for a buffer_boundary() buffer.  This means that the block at
781  * `bblock + 1' is probably a dirty indirect block.  Hunt it down and, if it's
782  * dirty, schedule it for IO.  So that indirects merge nicely with their data.
783  */
784 void write_boundary_block(struct block_device *bdev,
785                         sector_t bblock, unsigned blocksize)
786 {
787         struct buffer_head *bh = __find_get_block(bdev, bblock + 1, blocksize);
788         if (bh) {
789                 if (buffer_dirty(bh))
790                         ll_rw_block(WRITE, 1, &bh);
791                 put_bh(bh);
792         }
793 }
794
795 void mark_buffer_dirty_inode(struct buffer_head *bh, struct inode *inode)
796 {
797         struct address_space *mapping = inode->i_mapping;
798         struct address_space *buffer_mapping = bh->b_page->mapping;
799
800         mark_buffer_dirty(bh);
801         if (!mapping->assoc_mapping) {
802                 mapping->assoc_mapping = buffer_mapping;
803         } else {
804                 if (mapping->assoc_mapping != buffer_mapping)
805                         BUG();
806         }
807         if (list_empty(&bh->b_assoc_buffers)) {
808                 spin_lock(&buffer_mapping->private_lock);
809                 list_move_tail(&bh->b_assoc_buffers,
810                                 &mapping->private_list);
811                 spin_unlock(&buffer_mapping->private_lock);
812         }
813 }
814 EXPORT_SYMBOL(mark_buffer_dirty_inode);
815
816 /*
817  * Add a page to the dirty page list.
818  *
819  * It is a sad fact of life that this function is called from several places
820  * deeply under spinlocking.  It may not sleep.
821  *
822  * If the page has buffers, the uptodate buffers are set dirty, to preserve
823  * dirty-state coherency between the page and the buffers.  It the page does
824  * not have buffers then when they are later attached they will all be set
825  * dirty.
826  *
827  * The buffers are dirtied before the page is dirtied.  There's a small race
828  * window in which a writepage caller may see the page cleanness but not the
829  * buffer dirtiness.  That's fine.  If this code were to set the page dirty
830  * before the buffers, a concurrent writepage caller could clear the page dirty
831  * bit, see a bunch of clean buffers and we'd end up with dirty buffers/clean
832  * page on the dirty page list.
833  *
834  * We use private_lock to lock against try_to_free_buffers while using the
835  * page's buffer list.  Also use this to protect against clean buffers being
836  * added to the page after it was set dirty.
837  *
838  * FIXME: may need to call ->reservepage here as well.  That's rather up to the
839  * address_space though.
840  */
841 int __set_page_dirty_buffers(struct page *page)
842 {
843         struct address_space * const mapping = page->mapping;
844
845         spin_lock(&mapping->private_lock);
846         if (page_has_buffers(page)) {
847                 struct buffer_head *head = page_buffers(page);
848                 struct buffer_head *bh = head;
849
850                 do {
851                         set_buffer_dirty(bh);
852                         bh = bh->b_this_page;
853                 } while (bh != head);
854         }
855         spin_unlock(&mapping->private_lock);
856
857         if (!TestSetPageDirty(page)) {
858                 write_lock_irq(&mapping->tree_lock);
859                 if (page->mapping) {    /* Race with truncate? */
860                         if (mapping_cap_account_dirty(mapping))
861                                 inc_page_state(nr_dirty);
862                         radix_tree_tag_set(&mapping->page_tree,
863                                                 page_index(page),
864                                                 PAGECACHE_TAG_DIRTY);
865                 }
866                 write_unlock_irq(&mapping->tree_lock);
867                 __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
868         }
869         
870         return 0;
871 }
872 EXPORT_SYMBOL(__set_page_dirty_buffers);
873
874 /*
875  * Write out and wait upon a list of buffers.
876  *
877  * We have conflicting pressures: we want to make sure that all
878  * initially dirty buffers get waited on, but that any subsequently
879  * dirtied buffers don't.  After all, we don't want fsync to last
880  * forever if somebody is actively writing to the file.
881  *
882  * Do this in two main stages: first we copy dirty buffers to a
883  * temporary inode list, queueing the writes as we go.  Then we clean
884  * up, waiting for those writes to complete.
885  * 
886  * During this second stage, any subsequent updates to the file may end
887  * up refiling the buffer on the original inode's dirty list again, so
888  * there is a chance we will end up with a buffer queued for write but
889  * not yet completed on that list.  So, as a final cleanup we go through
890  * the osync code to catch these locked, dirty buffers without requeuing
891  * any newly dirty buffers for write.
892  */
893 static int fsync_buffers_list(spinlock_t *lock, struct list_head *list)
894 {
895         struct buffer_head *bh;
896         struct list_head tmp;
897         int err = 0, err2;
898
899         INIT_LIST_HEAD(&tmp);
900
901         spin_lock(lock);
902         while (!list_empty(list)) {
903                 bh = BH_ENTRY(list->next);
904                 list_del_init(&bh->b_assoc_buffers);
905                 if (buffer_dirty(bh) || buffer_locked(bh)) {
906                         list_add(&bh->b_assoc_buffers, &tmp);
907                         if (buffer_dirty(bh)) {
908                                 get_bh(bh);
909                                 spin_unlock(lock);
910                                 /*
911                                  * Ensure any pending I/O completes so that
912                                  * ll_rw_block() actually writes the current
913                                  * contents - it is a noop if I/O is still in
914                                  * flight on potentially older contents.
915                                  */
916                                 ll_rw_block(SWRITE, 1, &bh);
917                                 brelse(bh);
918                                 spin_lock(lock);
919                         }
920                 }
921         }
922
923         while (!list_empty(&tmp)) {
924                 bh = BH_ENTRY(tmp.prev);
925                 __remove_assoc_queue(bh);
926                 get_bh(bh);
927                 spin_unlock(lock);
928                 wait_on_buffer(bh);
929                 if (!buffer_uptodate(bh))
930                         err = -EIO;
931                 brelse(bh);
932                 spin_lock(lock);
933         }
934         
935         spin_unlock(lock);
936         err2 = osync_buffers_list(lock, list);
937         if (err)
938                 return err;
939         else
940                 return err2;
941 }
942
943 /*
944  * Invalidate any and all dirty buffers on a given inode.  We are
945  * probably unmounting the fs, but that doesn't mean we have already
946  * done a sync().  Just drop the buffers from the inode list.
947  *
948  * NOTE: we take the inode's blockdev's mapping's private_lock.  Which
949  * assumes that all the buffers are against the blockdev.  Not true
950  * for reiserfs.
951  */
952 void invalidate_inode_buffers(struct inode *inode)
953 {
954         if (inode_has_buffers(inode)) {
955                 struct address_space *mapping = &inode->i_data;
956                 struct list_head *list = &mapping->private_list;
957                 struct address_space *buffer_mapping = mapping->assoc_mapping;
958
959                 spin_lock(&buffer_mapping->private_lock);
960                 while (!list_empty(list))
961                         __remove_assoc_queue(BH_ENTRY(list->next));
962                 spin_unlock(&buffer_mapping->private_lock);
963         }
964 }
965
966 /*
967  * Remove any clean buffers from the inode's buffer list.  This is called
968  * when we're trying to free the inode itself.  Those buffers can pin it.
969  *
970  * Returns true if all buffers were removed.
971  */
972 int remove_inode_buffers(struct inode *inode)
973 {
974         int ret = 1;
975
976         if (inode_has_buffers(inode)) {
977                 struct address_space *mapping = &inode->i_data;
978                 struct list_head *list = &mapping->private_list;
979                 struct address_space *buffer_mapping = mapping->assoc_mapping;
980
981                 spin_lock(&buffer_mapping->private_lock);
982                 while (!list_empty(list)) {
983                         struct buffer_head *bh = BH_ENTRY(list->next);
984                         if (buffer_dirty(bh)) {
985                                 ret = 0;
986                                 break;
987                         }
988                         __remove_assoc_queue(bh);
989                 }
990                 spin_unlock(&buffer_mapping->private_lock);
991         }
992         return ret;
993 }
994
995 /*
996  * Create the appropriate buffers when given a page for data area and
997  * the size of each buffer.. Use the bh->b_this_page linked list to
998  * follow the buffers created.  Return NULL if unable to create more
999  * buffers.
1000  *
1001  * The retry flag is used to differentiate async IO (paging, swapping)
1002  * which may not fail from ordinary buffer allocations.
1003  */
1004 struct buffer_head *alloc_page_buffers(struct page *page, unsigned long size,
1005                 int retry)
1006 {
1007         struct buffer_head *bh, *head;
1008         long offset;
1009
1010 try_again:
1011         head = NULL;
1012         offset = PAGE_SIZE;
1013         while ((offset -= size) >= 0) {
1014                 bh = alloc_buffer_head(GFP_NOFS);
1015                 if (!bh)
1016                         goto no_grow;
1017
1018                 bh->b_bdev = NULL;
1019                 bh->b_this_page = head;
1020                 bh->b_blocknr = -1;
1021                 head = bh;
1022
1023                 bh->b_state = 0;
1024                 atomic_set(&bh->b_count, 0);
1025                 bh->b_private = NULL;
1026                 bh->b_size = size;
1027
1028                 /* Link the buffer to its page */
1029                 set_bh_page(bh, page, offset);
1030
1031                 init_buffer(bh, NULL, NULL);
1032         }
1033         return head;
1034 /*
1035  * In case anything failed, we just free everything we got.
1036  */
1037 no_grow:
1038         if (head) {
1039                 do {
1040                         bh = head;
1041                         head = head->b_this_page;
1042                         free_buffer_head(bh);
1043                 } while (head);
1044         }
1045
1046         /*
1047          * Return failure for non-async IO requests.  Async IO requests
1048          * are not allowed to fail, so we have to wait until buffer heads
1049          * become available.  But we don't want tasks sleeping with 
1050          * partially complete buffers, so all were released above.
1051          */
1052         if (!retry)
1053                 return NULL;
1054
1055         /* We're _really_ low on memory. Now we just
1056          * wait for old buffer heads to become free due to
1057          * finishing IO.  Since this is an async request and
1058          * the reserve list is empty, we're sure there are 
1059          * async buffer heads in use.
1060          */
1061         free_more_memory();
1062         goto try_again;
1063 }
1064 EXPORT_SYMBOL_GPL(alloc_page_buffers);
1065
1066 static inline void
1067 link_dev_buffers(struct page *page, struct buffer_head *head)
1068 {
1069         struct buffer_head *bh, *tail;
1070
1071         bh = head;
1072         do {
1073                 tail = bh;
1074                 bh = bh->b_this_page;
1075         } while (bh);
1076         tail->b_this_page = head;
1077         attach_page_buffers(page, head);
1078 }
1079
1080 /*
1081  * Initialise the state of a blockdev page's buffers.
1082  */ 
1083 static void
1084 init_page_buffers(struct page *page, struct block_device *bdev,
1085                         sector_t block, int size)
1086 {
1087         struct buffer_head *head = page_buffers(page);
1088         struct buffer_head *bh = head;
1089         int uptodate = PageUptodate(page);
1090
1091         do {
1092                 if (!buffer_mapped(bh)) {
1093                         init_buffer(bh, NULL, NULL);
1094                         bh->b_bdev = bdev;
1095                         bh->b_blocknr = block;
1096                         if (uptodate)
1097                                 set_buffer_uptodate(bh);
1098                         set_buffer_mapped(bh);
1099                 }
1100                 block++;
1101                 bh = bh->b_this_page;
1102         } while (bh != head);
1103 }
1104
1105 /*
1106  * Create the page-cache page that contains the requested block.
1107  *
1108  * This is user purely for blockdev mappings.
1109  */
1110 static struct page *
1111 grow_dev_page(struct block_device *bdev, sector_t block,
1112                 pgoff_t index, int size)
1113 {
1114         struct inode *inode = bdev->bd_inode;
1115         struct page *page;
1116         struct buffer_head *bh;
1117
1118         page = find_or_create_page(inode->i_mapping, index, GFP_NOFS);
1119         if (!page)
1120                 return NULL;
1121
1122         if (!PageLocked(page))
1123                 BUG();
1124
1125         if (page_has_buffers(page)) {
1126                 bh = page_buffers(page);
1127                 if (bh->b_size == size) {
1128                         init_page_buffers(page, bdev, block, size);
1129                         return page;
1130                 }
1131                 if (!try_to_free_buffers(page))
1132                         goto failed;
1133         }
1134
1135         /*
1136          * Allocate some buffers for this page
1137          */
1138         bh = alloc_page_buffers(page, size, 0);
1139         if (!bh)
1140                 goto failed;
1141
1142         /*
1143          * Link the page to the buffers and initialise them.  Take the
1144          * lock to be atomic wrt __find_get_block(), which does not
1145          * run under the page lock.
1146          */
1147         spin_lock(&inode->i_mapping->private_lock);
1148         link_dev_buffers(page, bh);
1149         init_page_buffers(page, bdev, block, size);
1150         spin_unlock(&inode->i_mapping->private_lock);
1151         return page;
1152
1153 failed:
1154         BUG();
1155         unlock_page(page);
1156         page_cache_release(page);
1157         return NULL;
1158 }
1159
1160 /*
1161  * Create buffers for the specified block device block's page.  If
1162  * that page was dirty, the buffers are set dirty also.
1163  *
1164  * Except that's a bug.  Attaching dirty buffers to a dirty
1165  * blockdev's page can result in filesystem corruption, because
1166  * some of those buffers may be aliases of filesystem data.
1167  * grow_dev_page() will go BUG() if this happens.
1168  */
1169 static int
1170 grow_buffers(struct block_device *bdev, sector_t block, int size)
1171 {
1172         struct page *page;
1173         pgoff_t index;
1174         int sizebits;
1175
1176         sizebits = -1;
1177         do {
1178                 sizebits++;
1179         } while ((size << sizebits) < PAGE_SIZE);
1180
1181         index = block >> sizebits;
1182
1183         /*
1184          * Check for a block which wants to lie outside our maximum possible
1185          * pagecache index.  (this comparison is done using sector_t types).
1186          */
1187         if (unlikely(index != block >> sizebits)) {
1188                 char b[BDEVNAME_SIZE];
1189
1190                 printk(KERN_ERR "%s: requested out-of-range block %llu for "
1191                         "device %s\n",
1192                         __FUNCTION__, (unsigned long long)block,
1193                         bdevname(bdev, b));
1194                 return -EIO;
1195         }
1196         block = index << sizebits;
1197         /* Create a page with the proper size buffers.. */
1198         page = grow_dev_page(bdev, block, index, size);
1199         if (!page)
1200                 return 0;
1201         unlock_page(page);
1202         page_cache_release(page);
1203         return 1;
1204 }
1205
1206 static struct buffer_head *
1207 __getblk_slow(struct block_device *bdev, sector_t block, int size)
1208 {
1209         /* Size must be multiple of hard sectorsize */
1210         if (unlikely(size & (bdev_hardsect_size(bdev)-1) ||
1211                         (size < 512 || size > PAGE_SIZE))) {
1212                 printk(KERN_ERR "getblk(): invalid block size %d requested\n",
1213                                         size);
1214                 printk(KERN_ERR "hardsect size: %d\n",
1215                                         bdev_hardsect_size(bdev));
1216
1217                 dump_stack();
1218                 return NULL;
1219         }
1220
1221         for (;;) {
1222                 struct buffer_head * bh;
1223                 int ret;
1224
1225                 bh = __find_get_block(bdev, block, size);
1226                 if (bh)
1227                         return bh;
1228
1229                 ret = grow_buffers(bdev, block, size);
1230                 if (ret < 0)
1231                         return NULL;
1232                 if (ret == 0)
1233                         free_more_memory();
1234         }
1235 }
1236
1237 /*
1238  * The relationship between dirty buffers and dirty pages:
1239  *
1240  * Whenever a page has any dirty buffers, the page's dirty bit is set, and
1241  * the page is tagged dirty in its radix tree.
1242  *
1243  * At all times, the dirtiness of the buffers represents the dirtiness of
1244  * subsections of the page.  If the page has buffers, the page dirty bit is
1245  * merely a hint about the true dirty state.
1246  *
1247  * When a page is set dirty in its entirety, all its buffers are marked dirty
1248  * (if the page has buffers).
1249  *
1250  * When a buffer is marked dirty, its page is dirtied, but the page's other
1251  * buffers are not.
1252  *
1253  * Also.  When blockdev buffers are explicitly read with bread(), they
1254  * individually become uptodate.  But their backing page remains not
1255  * uptodate - even if all of its buffers are uptodate.  A subsequent
1256  * block_read_full_page() against that page will discover all the uptodate
1257  * buffers, will set the page uptodate and will perform no I/O.
1258  */
1259
1260 /**
1261  * mark_buffer_dirty - mark a buffer_head as needing writeout
1262  * @bh: the buffer_head to mark dirty
1263  *
1264  * mark_buffer_dirty() will set the dirty bit against the buffer, then set its
1265  * backing page dirty, then tag the page as dirty in its address_space's radix
1266  * tree and then attach the address_space's inode to its superblock's dirty
1267  * inode list.
1268  *
1269  * mark_buffer_dirty() is atomic.  It takes bh->b_page->mapping->private_lock,
1270  * mapping->tree_lock and the global inode_lock.
1271  */
1272 void fastcall mark_buffer_dirty(struct buffer_head *bh)
1273 {
1274         if (!buffer_dirty(bh) && !test_set_buffer_dirty(bh))
1275                 __set_page_dirty_nobuffers(bh->b_page);
1276 }
1277
1278 /*
1279  * Decrement a buffer_head's reference count.  If all buffers against a page
1280  * have zero reference count, are clean and unlocked, and if the page is clean
1281  * and unlocked then try_to_free_buffers() may strip the buffers from the page
1282  * in preparation for freeing it (sometimes, rarely, buffers are removed from
1283  * a page but it ends up not being freed, and buffers may later be reattached).
1284  */
1285 void __brelse(struct buffer_head * buf)
1286 {
1287         if (atomic_read(&buf->b_count)) {
1288                 put_bh(buf);
1289                 return;
1290         }
1291         printk(KERN_ERR "VFS: brelse: Trying to free free buffer\n");
1292         WARN_ON(1);
1293 }
1294
1295 /*
1296  * bforget() is like brelse(), except it discards any
1297  * potentially dirty data.
1298  */
1299 void __bforget(struct buffer_head *bh)
1300 {
1301         clear_buffer_dirty(bh);
1302         if (!list_empty(&bh->b_assoc_buffers)) {
1303                 struct address_space *buffer_mapping = bh->b_page->mapping;
1304
1305                 spin_lock(&buffer_mapping->private_lock);
1306                 list_del_init(&bh->b_assoc_buffers);
1307                 spin_unlock(&buffer_mapping->private_lock);
1308         }
1309         __brelse(bh);
1310 }
1311
1312 static struct buffer_head *__bread_slow(struct buffer_head *bh)
1313 {
1314         lock_buffer(bh);
1315         if (buffer_uptodate(bh)) {
1316                 unlock_buffer(bh);
1317                 return bh;
1318         } else {
1319                 get_bh(bh);
1320                 bh->b_end_io = end_buffer_read_sync;
1321                 submit_bh(READ, bh);
1322                 wait_on_buffer(bh);
1323                 if (buffer_uptodate(bh))
1324                         return bh;
1325         }
1326         brelse(bh);
1327         return NULL;
1328 }
1329
1330 /*
1331  * Per-cpu buffer LRU implementation.  To reduce the cost of __find_get_block().
1332  * The bhs[] array is sorted - newest buffer is at bhs[0].  Buffers have their
1333  * refcount elevated by one when they're in an LRU.  A buffer can only appear
1334  * once in a particular CPU's LRU.  A single buffer can be present in multiple
1335  * CPU's LRUs at the same time.
1336  *
1337  * This is a transparent caching front-end to sb_bread(), sb_getblk() and
1338  * sb_find_get_block().
1339  *
1340  * The LRUs themselves only need locking against invalidate_bh_lrus.  We use
1341  * a local interrupt disable for that.
1342  */
1343
1344 #define BH_LRU_SIZE     8
1345
1346 struct bh_lru {
1347         struct buffer_head *bhs[BH_LRU_SIZE];
1348 };
1349
1350 static DEFINE_PER_CPU(struct bh_lru, bh_lrus) = {{ NULL }};
1351
1352 #ifdef CONFIG_SMP
1353 #define bh_lru_lock()   local_irq_disable()
1354 #define bh_lru_unlock() local_irq_enable()
1355 #else
1356 #define bh_lru_lock()   preempt_disable()
1357 #define bh_lru_unlock() preempt_enable()
1358 #endif
1359
1360 static inline void check_irqs_on(void)
1361 {
1362 #ifdef irqs_disabled
1363         BUG_ON(irqs_disabled());
1364 #endif
1365 }
1366
1367 /*
1368  * The LRU management algorithm is dopey-but-simple.  Sorry.
1369  */
1370 static void bh_lru_install(struct buffer_head *bh)
1371 {
1372         struct buffer_head *evictee = NULL;
1373         struct bh_lru *lru;
1374
1375         check_irqs_on();
1376         bh_lru_lock();
1377         lru = &__get_cpu_var(bh_lrus);
1378         if (lru->bhs[0] != bh) {
1379                 struct buffer_head *bhs[BH_LRU_SIZE];
1380                 int in;
1381                 int out = 0;
1382
1383                 get_bh(bh);
1384                 bhs[out++] = bh;
1385                 for (in = 0; in < BH_LRU_SIZE; in++) {
1386                         struct buffer_head *bh2 = lru->bhs[in];
1387
1388                         if (bh2 == bh) {
1389                                 __brelse(bh2);
1390                         } else {
1391                                 if (out >= BH_LRU_SIZE) {
1392                                         BUG_ON(evictee != NULL);
1393                                         evictee = bh2;
1394                                 } else {
1395                                         bhs[out++] = bh2;
1396                                 }
1397                         }
1398                 }
1399                 while (out < BH_LRU_SIZE)
1400                         bhs[out++] = NULL;
1401                 memcpy(lru->bhs, bhs, sizeof(bhs));
1402         }
1403         bh_lru_unlock();
1404
1405         if (evictee)
1406                 __brelse(evictee);
1407 }
1408
1409 /*
1410  * Look up the bh in this cpu's LRU.  If it's there, move it to the head.
1411  */
1412 static struct buffer_head *
1413 lookup_bh_lru(struct block_device *bdev, sector_t block, int size)
1414 {
1415         struct buffer_head *ret = NULL;
1416         struct bh_lru *lru;
1417         int i;
1418
1419         check_irqs_on();
1420         bh_lru_lock();
1421         lru = &__get_cpu_var(bh_lrus);
1422         for (i = 0; i < BH_LRU_SIZE; i++) {
1423                 struct buffer_head *bh = lru->bhs[i];
1424
1425                 if (bh && bh->b_bdev == bdev &&
1426                                 bh->b_blocknr == block && bh->b_size == size) {
1427                         if (i) {
1428                                 while (i) {
1429                                         lru->bhs[i] = lru->bhs[i - 1];
1430                                         i--;
1431                                 }
1432                                 lru->bhs[0] = bh;
1433                         }
1434                         get_bh(bh);
1435                         ret = bh;
1436                         break;
1437                 }
1438         }
1439         bh_lru_unlock();
1440         return ret;
1441 }
1442
1443 /*
1444  * Perform a pagecache lookup for the matching buffer.  If it's there, refresh
1445  * it in the LRU and mark it as accessed.  If it is not present then return
1446  * NULL
1447  */
1448 struct buffer_head *
1449 __find_get_block(struct block_device *bdev, sector_t block, int size)
1450 {
1451         struct buffer_head *bh = lookup_bh_lru(bdev, block, size);
1452
1453         if (bh == NULL) {
1454                 bh = __find_get_block_slow(bdev, block);
1455                 if (bh)
1456                         bh_lru_install(bh);
1457         }
1458         if (bh)
1459                 touch_buffer(bh);
1460         return bh;
1461 }
1462 EXPORT_SYMBOL(__find_get_block);
1463
1464 /*
1465  * __getblk will locate (and, if necessary, create) the buffer_head
1466  * which corresponds to the passed block_device, block and size. The
1467  * returned buffer has its reference count incremented.
1468  *
1469  * __getblk() cannot fail - it just keeps trying.  If you pass it an
1470  * illegal block number, __getblk() will happily return a buffer_head
1471  * which represents the non-existent block.  Very weird.
1472  *
1473  * __getblk() will lock up the machine if grow_dev_page's try_to_free_buffers()
1474  * attempt is failing.  FIXME, perhaps?
1475  */
1476 struct buffer_head *
1477 __getblk(struct block_device *bdev, sector_t block, int size)
1478 {
1479         struct buffer_head *bh = __find_get_block(bdev, block, size);
1480
1481         might_sleep();
1482         if (bh == NULL)
1483                 bh = __getblk_slow(bdev, block, size);
1484         return bh;
1485 }
1486 EXPORT_SYMBOL(__getblk);
1487
1488 /*
1489  * Do async read-ahead on a buffer..
1490  */
1491 void __breadahead(struct block_device *bdev, sector_t block, int size)
1492 {
1493         struct buffer_head *bh = __getblk(bdev, block, size);
1494         if (likely(bh)) {
1495                 ll_rw_block(READA, 1, &bh);
1496                 brelse(bh);
1497         }
1498 }
1499 EXPORT_SYMBOL(__breadahead);
1500
1501 /**
1502  *  __bread() - reads a specified block and returns the bh
1503  *  @bdev: the block_device to read from
1504  *  @block: number of block
1505  *  @size: size (in bytes) to read
1506  * 
1507  *  Reads a specified block, and returns buffer head that contains it.
1508  *  It returns NULL if the block was unreadable.
1509  */
1510 struct buffer_head *
1511 __bread(struct block_device *bdev, sector_t block, int size)
1512 {
1513         struct buffer_head *bh = __getblk(bdev, block, size);
1514
1515         if (likely(bh) && !buffer_uptodate(bh))
1516                 bh = __bread_slow(bh);
1517         return bh;
1518 }
1519 EXPORT_SYMBOL(__bread);
1520
1521 /*
1522  * invalidate_bh_lrus() is called rarely - but not only at unmount.
1523  * This doesn't race because it runs in each cpu either in irq
1524  * or with preempt disabled.
1525  */
1526 static void invalidate_bh_lru(void *arg)
1527 {
1528         struct bh_lru *b = &get_cpu_var(bh_lrus);
1529         int i;
1530
1531         for (i = 0; i < BH_LRU_SIZE; i++) {
1532                 brelse(b->bhs[i]);
1533                 b->bhs[i] = NULL;
1534         }
1535         put_cpu_var(bh_lrus);
1536 }
1537         
1538 static void invalidate_bh_lrus(void)
1539 {
1540         on_each_cpu(invalidate_bh_lru, NULL, 1, 1);
1541 }
1542
1543 void set_bh_page(struct buffer_head *bh,
1544                 struct page *page, unsigned long offset)
1545 {
1546         bh->b_page = page;
1547         if (offset >= PAGE_SIZE)
1548                 BUG();
1549         if (PageHighMem(page))
1550                 /*
1551                  * This catches illegal uses and preserves the offset:
1552                  */
1553                 bh->b_data = (char *)(0 + offset);
1554         else
1555                 bh->b_data = page_address(page) + offset;
1556 }
1557 EXPORT_SYMBOL(set_bh_page);
1558
1559 /*
1560  * Called when truncating a buffer on a page completely.
1561  */
1562 static void discard_buffer(struct buffer_head * bh)
1563 {
1564         lock_buffer(bh);
1565         clear_buffer_dirty(bh);
1566         bh->b_bdev = NULL;
1567         clear_buffer_mapped(bh);
1568         clear_buffer_req(bh);
1569         clear_buffer_new(bh);
1570         clear_buffer_delay(bh);
1571         unlock_buffer(bh);
1572 }
1573
1574 /**
1575  * try_to_release_page() - release old fs-specific metadata on a page
1576  *
1577  * @page: the page which the kernel is trying to free
1578  * @gfp_mask: memory allocation flags (and I/O mode)
1579  *
1580  * The address_space is to try to release any data against the page
1581  * (presumably at page->private).  If the release was successful, return `1'.
1582  * Otherwise return zero.
1583  *
1584  * The @gfp_mask argument specifies whether I/O may be performed to release
1585  * this page (__GFP_IO), and whether the call may block (__GFP_WAIT).
1586  *
1587  * NOTE: @gfp_mask may go away, and this function may become non-blocking.
1588  */
1589 int try_to_release_page(struct page *page, gfp_t gfp_mask)
1590 {
1591         struct address_space * const mapping = page->mapping;
1592
1593         BUG_ON(!PageLocked(page));
1594         if (PageWriteback(page))
1595                 return 0;
1596         
1597         if (mapping && mapping->a_ops->releasepage)
1598                 return mapping->a_ops->releasepage(page, gfp_mask);
1599         return try_to_free_buffers(page);
1600 }
1601 EXPORT_SYMBOL(try_to_release_page);
1602
1603 /**
1604  * block_invalidatepage - invalidate part of all of a buffer-backed page
1605  *
1606  * @page: the page which is affected
1607  * @offset: the index of the truncation point
1608  *
1609  * block_invalidatepage() is called when all or part of the page has become
1610  * invalidatedby a truncate operation.
1611  *
1612  * block_invalidatepage() does not have to release all buffers, but it must
1613  * ensure that no dirty buffer is left outside @offset and that no I/O
1614  * is underway against any of the blocks which are outside the truncation
1615  * point.  Because the caller is about to free (and possibly reuse) those
1616  * blocks on-disk.
1617  */
1618 int block_invalidatepage(struct page *page, unsigned long offset)
1619 {
1620         struct buffer_head *head, *bh, *next;
1621         unsigned int curr_off = 0;
1622         int ret = 1;
1623
1624         BUG_ON(!PageLocked(page));
1625         if (!page_has_buffers(page))
1626                 goto out;
1627
1628         head = page_buffers(page);
1629         bh = head;
1630         do {
1631                 unsigned int next_off = curr_off + bh->b_size;
1632                 next = bh->b_this_page;
1633
1634                 /*
1635                  * is this block fully invalidated?
1636                  */
1637                 if (offset <= curr_off)
1638                         discard_buffer(bh);
1639                 curr_off = next_off;
1640                 bh = next;
1641         } while (bh != head);
1642
1643         /*
1644          * We release buffers only if the entire page is being invalidated.
1645          * The get_block cached value has been unconditionally invalidated,
1646          * so real IO is not possible anymore.
1647          */
1648         if (offset == 0)
1649                 ret = try_to_release_page(page, 0);
1650 out:
1651         return ret;
1652 }
1653 EXPORT_SYMBOL(block_invalidatepage);
1654
1655 int do_invalidatepage(struct page *page, unsigned long offset)
1656 {
1657         int (*invalidatepage)(struct page *, unsigned long);
1658         invalidatepage = page->mapping->a_ops->invalidatepage;
1659         if (invalidatepage == NULL)
1660                 invalidatepage = block_invalidatepage;
1661         return (*invalidatepage)(page, offset);
1662 }
1663
1664 /*
1665  * We attach and possibly dirty the buffers atomically wrt
1666  * __set_page_dirty_buffers() via private_lock.  try_to_free_buffers
1667  * is already excluded via the page lock.
1668  */
1669 void create_empty_buffers(struct page *page,
1670                         unsigned long blocksize, unsigned long b_state)
1671 {
1672         struct buffer_head *bh, *head, *tail;
1673
1674         head = alloc_page_buffers(page, blocksize, 1);
1675         bh = head;
1676         do {
1677                 bh->b_state |= b_state;
1678                 tail = bh;
1679                 bh = bh->b_this_page;
1680         } while (bh);
1681         tail->b_this_page = head;
1682
1683         spin_lock(&page->mapping->private_lock);
1684         if (PageUptodate(page) || PageDirty(page)) {
1685                 bh = head;
1686                 do {
1687                         if (PageDirty(page))
1688                                 set_buffer_dirty(bh);
1689                         if (PageUptodate(page))
1690                                 set_buffer_uptodate(bh);
1691                         bh = bh->b_this_page;
1692                 } while (bh != head);
1693         }
1694         attach_page_buffers(page, head);
1695         spin_unlock(&page->mapping->private_lock);
1696 }
1697 EXPORT_SYMBOL(create_empty_buffers);
1698
1699 /*
1700  * We are taking a block for data and we don't want any output from any
1701  * buffer-cache aliases starting from return from that function and
1702  * until the moment when something will explicitly mark the buffer
1703  * dirty (hopefully that will not happen until we will free that block ;-)
1704  * We don't even need to mark it not-uptodate - nobody can expect
1705  * anything from a newly allocated buffer anyway. We used to used
1706  * unmap_buffer() for such invalidation, but that was wrong. We definitely
1707  * don't want to mark the alias unmapped, for example - it would confuse
1708  * anyone who might pick it with bread() afterwards...
1709  *
1710  * Also..  Note that bforget() doesn't lock the buffer.  So there can
1711  * be writeout I/O going on against recently-freed buffers.  We don't
1712  * wait on that I/O in bforget() - it's more efficient to wait on the I/O
1713  * only if we really need to.  That happens here.
1714  */
1715 void unmap_underlying_metadata(struct block_device *bdev, sector_t block)
1716 {
1717         struct buffer_head *old_bh;
1718
1719         might_sleep();
1720
1721         old_bh = __find_get_block_slow(bdev, block);
1722         if (old_bh) {
1723                 clear_buffer_dirty(old_bh);
1724                 wait_on_buffer(old_bh);
1725                 clear_buffer_req(old_bh);
1726                 __brelse(old_bh);
1727         }
1728 }
1729 EXPORT_SYMBOL(unmap_underlying_metadata);
1730
1731 /*
1732  * NOTE! All mapped/uptodate combinations are valid:
1733  *
1734  *      Mapped  Uptodate        Meaning
1735  *
1736  *      No      No              "unknown" - must do get_block()
1737  *      No      Yes             "hole" - zero-filled
1738  *      Yes     No              "allocated" - allocated on disk, not read in
1739  *      Yes     Yes             "valid" - allocated and up-to-date in memory.
1740  *
1741  * "Dirty" is valid only with the last case (mapped+uptodate).
1742  */
1743
1744 /*
1745  * While block_write_full_page is writing back the dirty buffers under
1746  * the page lock, whoever dirtied the buffers may decide to clean them
1747  * again at any time.  We handle that by only looking at the buffer
1748  * state inside lock_buffer().
1749  *
1750  * If block_write_full_page() is called for regular writeback
1751  * (wbc->sync_mode == WB_SYNC_NONE) then it will redirty a page which has a
1752  * locked buffer.   This only can happen if someone has written the buffer
1753  * directly, with submit_bh().  At the address_space level PageWriteback
1754  * prevents this contention from occurring.
1755  */
1756 static int __block_write_full_page(struct inode *inode, struct page *page,
1757                         get_block_t *get_block, struct writeback_control *wbc)
1758 {
1759         int err;
1760         sector_t block;
1761         sector_t last_block;
1762         struct buffer_head *bh, *head;
1763         int nr_underway = 0;
1764
1765         BUG_ON(!PageLocked(page));
1766
1767         last_block = (i_size_read(inode) - 1) >> inode->i_blkbits;
1768
1769         if (!page_has_buffers(page)) {
1770                 create_empty_buffers(page, 1 << inode->i_blkbits,
1771                                         (1 << BH_Dirty)|(1 << BH_Uptodate));
1772         }
1773
1774         /*
1775          * Be very careful.  We have no exclusion from __set_page_dirty_buffers
1776          * here, and the (potentially unmapped) buffers may become dirty at
1777          * any time.  If a buffer becomes dirty here after we've inspected it
1778          * then we just miss that fact, and the page stays dirty.
1779          *
1780          * Buffers outside i_size may be dirtied by __set_page_dirty_buffers;
1781          * handle that here by just cleaning them.
1782          */
1783
1784         block = (sector_t)page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
1785         head = page_buffers(page);
1786         bh = head;
1787
1788         /*
1789          * Get all the dirty buffers mapped to disk addresses and
1790          * handle any aliases from the underlying blockdev's mapping.
1791          */
1792         do {
1793                 if (block > last_block) {
1794                         /*
1795                          * mapped buffers outside i_size will occur, because
1796                          * this page can be outside i_size when there is a
1797                          * truncate in progress.
1798                          */
1799                         /*
1800                          * The buffer was zeroed by block_write_full_page()
1801                          */
1802                         clear_buffer_dirty(bh);
1803                         set_buffer_uptodate(bh);
1804                 } else if (!buffer_mapped(bh) && buffer_dirty(bh)) {
1805                         err = get_block(inode, block, bh, 1);
1806                         if (err)
1807                                 goto recover;
1808                         if (buffer_new(bh)) {
1809                                 /* blockdev mappings never come here */
1810                                 clear_buffer_new(bh);
1811                                 unmap_underlying_metadata(bh->b_bdev,
1812                                                         bh->b_blocknr);
1813                         }
1814                 }
1815                 bh = bh->b_this_page;
1816                 block++;
1817         } while (bh != head);
1818
1819         do {
1820                 if (!buffer_mapped(bh))
1821                         continue;
1822                 /*
1823                  * If it's a fully non-blocking write attempt and we cannot
1824                  * lock the buffer then redirty the page.  Note that this can
1825                  * potentially cause a busy-wait loop from pdflush and kswapd
1826                  * activity, but those code paths have their own higher-level
1827                  * throttling.
1828                  */
1829                 if (wbc->sync_mode != WB_SYNC_NONE || !wbc->nonblocking) {
1830                         lock_buffer(bh);
1831                 } else if (test_set_buffer_locked(bh)) {
1832                         redirty_page_for_writepage(wbc, page);
1833                         continue;
1834                 }
1835                 if (test_clear_buffer_dirty(bh)) {
1836                         mark_buffer_async_write(bh);
1837                 } else {
1838                         unlock_buffer(bh);
1839                 }
1840         } while ((bh = bh->b_this_page) != head);
1841
1842         /*
1843          * The page and its buffers are protected by PageWriteback(), so we can
1844          * drop the bh refcounts early.
1845          */
1846         BUG_ON(PageWriteback(page));
1847         set_page_writeback(page);
1848
1849         do {
1850                 struct buffer_head *next = bh->b_this_page;
1851                 if (buffer_async_write(bh)) {
1852                         submit_bh(WRITE, bh);
1853                         nr_underway++;
1854                 }
1855                 bh = next;
1856         } while (bh != head);
1857         unlock_page(page);
1858
1859         err = 0;
1860 done:
1861         if (nr_underway == 0) {
1862                 /*
1863                  * The page was marked dirty, but the buffers were
1864                  * clean.  Someone wrote them back by hand with
1865                  * ll_rw_block/submit_bh.  A rare case.
1866                  */
1867                 int uptodate = 1;
1868                 do {
1869                         if (!buffer_uptodate(bh)) {
1870                                 uptodate = 0;
1871                                 break;
1872                         }
1873                         bh = bh->b_this_page;
1874                 } while (bh != head);
1875                 if (uptodate)
1876                         SetPageUptodate(page);
1877                 end_page_writeback(page);
1878                 /*
1879                  * The page and buffer_heads can be released at any time from
1880                  * here on.
1881                  */
1882                 wbc->pages_skipped++;   /* We didn't write this page */
1883         }
1884         return err;
1885
1886 recover:
1887         /*
1888          * ENOSPC, or some other error.  We may already have added some
1889          * blocks to the file, so we need to write these out to avoid
1890          * exposing stale data.
1891          * The page is currently locked and not marked for writeback
1892          */
1893         bh = head;
1894         /* Recovery: lock and submit the mapped buffers */
1895         do {
1896                 if (buffer_mapped(bh) && buffer_dirty(bh)) {
1897                         lock_buffer(bh);
1898                         mark_buffer_async_write(bh);
1899                 } else {
1900                         /*
1901                          * The buffer may have been set dirty during
1902                          * attachment to a dirty page.
1903                          */
1904                         clear_buffer_dirty(bh);
1905                 }
1906         } while ((bh = bh->b_this_page) != head);
1907         SetPageError(page);
1908         BUG_ON(PageWriteback(page));
1909         set_page_writeback(page);
1910         unlock_page(page);
1911         do {
1912                 struct buffer_head *next = bh->b_this_page;
1913                 if (buffer_async_write(bh)) {
1914                         clear_buffer_dirty(bh);
1915                         submit_bh(WRITE, bh);
1916                         nr_underway++;
1917                 }
1918                 bh = next;
1919         } while (bh != head);
1920         goto done;
1921 }
1922
1923 static int __block_prepare_write(struct inode *inode, struct page *page,
1924                 unsigned from, unsigned to, get_block_t *get_block)
1925 {
1926         unsigned block_start, block_end;
1927         sector_t block;
1928         int err = 0;
1929         unsigned blocksize, bbits;
1930         struct buffer_head *bh, *head, *wait[2], **wait_bh=wait;
1931
1932         BUG_ON(!PageLocked(page));
1933         BUG_ON(from > PAGE_CACHE_SIZE);
1934         BUG_ON(to > PAGE_CACHE_SIZE);
1935         BUG_ON(from > to);
1936
1937         blocksize = 1 << inode->i_blkbits;
1938         if (!page_has_buffers(page))
1939                 create_empty_buffers(page, blocksize, 0);
1940         head = page_buffers(page);
1941
1942         bbits = inode->i_blkbits;
1943         block = (sector_t)page->index << (PAGE_CACHE_SHIFT - bbits);
1944
1945         for(bh = head, block_start = 0; bh != head || !block_start;
1946             block++, block_start=block_end, bh = bh->b_this_page) {
1947                 block_end = block_start + blocksize;
1948                 if (block_end <= from || block_start >= to) {
1949                         if (PageUptodate(page)) {
1950                                 if (!buffer_uptodate(bh))
1951                                         set_buffer_uptodate(bh);
1952                         }
1953                         continue;
1954                 }
1955                 if (buffer_new(bh))
1956                         clear_buffer_new(bh);
1957                 if (!buffer_mapped(bh)) {
1958                         err = get_block(inode, block, bh, 1);
1959                         if (err)
1960                                 break;
1961                         if (buffer_new(bh)) {
1962                                 unmap_underlying_metadata(bh->b_bdev,
1963                                                         bh->b_blocknr);
1964                                 if (PageUptodate(page)) {
1965                                         set_buffer_uptodate(bh);
1966                                         continue;
1967                                 }
1968                                 if (block_end > to || block_start < from) {
1969                                         void *kaddr;
1970
1971                                         kaddr = kmap_atomic(page, KM_USER0);
1972                                         if (block_end > to)
1973                                                 memset(kaddr+to, 0,
1974                                                         block_end-to);
1975                                         if (block_start < from)
1976                                                 memset(kaddr+block_start,
1977                                                         0, from-block_start);
1978                                         flush_dcache_page(page);
1979                                         kunmap_atomic(kaddr, KM_USER0);
1980                                 }
1981                                 continue;
1982                         }
1983                 }
1984                 if (PageUptodate(page)) {
1985                         if (!buffer_uptodate(bh))
1986                                 set_buffer_uptodate(bh);
1987                         continue; 
1988                 }
1989                 if (!buffer_uptodate(bh) && !buffer_delay(bh) &&
1990                      (block_start < from || block_end > to)) {
1991                         ll_rw_block(READ, 1, &bh);
1992                         *wait_bh++=bh;
1993                 }
1994         }
1995         /*
1996          * If we issued read requests - let them complete.
1997          */
1998         while(wait_bh > wait) {
1999                 wait_on_buffer(*--wait_bh);
2000                 if (!buffer_uptodate(*wait_bh))
2001                         err = -EIO;
2002         }
2003         if (!err) {
2004                 bh = head;
2005                 do {
2006                         if (buffer_new(bh))
2007                                 clear_buffer_new(bh);
2008                 } while ((bh = bh->b_this_page) != head);
2009                 return 0;
2010         }
2011         /* Error case: */
2012         /*
2013          * Zero out any newly allocated blocks to avoid exposing stale
2014          * data.  If BH_New is set, we know that the block was newly
2015          * allocated in the above loop.
2016          */
2017         bh = head;
2018         block_start = 0;
2019         do {
2020                 block_end = block_start+blocksize;
2021                 if (block_end <= from)
2022                         goto next_bh;
2023                 if (block_start >= to)
2024                         break;
2025                 if (buffer_new(bh)) {
2026                         void *kaddr;
2027
2028                         clear_buffer_new(bh);
2029                         kaddr = kmap_atomic(page, KM_USER0);
2030                         memset(kaddr+block_start, 0, bh->b_size);
2031                         kunmap_atomic(kaddr, KM_USER0);
2032                         set_buffer_uptodate(bh);
2033                         mark_buffer_dirty(bh);
2034                 }
2035 next_bh:
2036                 block_start = block_end;
2037                 bh = bh->b_this_page;
2038         } while (bh != head);
2039         return err;
2040 }
2041
2042 static int __block_commit_write(struct inode *inode, struct page *page,
2043                 unsigned from, unsigned to)
2044 {
2045         unsigned block_start, block_end;
2046         int partial = 0;
2047         unsigned blocksize;
2048         struct buffer_head *bh, *head;
2049
2050         blocksize = 1 << inode->i_blkbits;
2051
2052         for(bh = head = page_buffers(page), block_start = 0;
2053             bh != head || !block_start;
2054             block_start=block_end, bh = bh->b_this_page) {
2055                 block_end = block_start + blocksize;
2056                 if (block_end <= from || block_start >= to) {
2057                         if (!buffer_uptodate(bh))
2058                                 partial = 1;
2059                 } else {
2060                         set_buffer_uptodate(bh);
2061                         mark_buffer_dirty(bh);
2062                 }
2063         }
2064
2065         /*
2066          * If this is a partial write which happened to make all buffers
2067          * uptodate then we can optimize away a bogus readpage() for
2068          * the next read(). Here we 'discover' whether the page went
2069          * uptodate as a result of this (potentially partial) write.
2070          */
2071         if (!partial)
2072                 SetPageUptodate(page);
2073         return 0;
2074 }
2075
2076 /*
2077  * Generic "read page" function for block devices that have the normal
2078  * get_block functionality. This is most of the block device filesystems.
2079  * Reads the page asynchronously --- the unlock_buffer() and
2080  * set/clear_buffer_uptodate() functions propagate buffer state into the
2081  * page struct once IO has completed.
2082  */
2083 int block_read_full_page(struct page *page, get_block_t *get_block)
2084 {
2085         struct inode *inode = page->mapping->host;
2086         sector_t iblock, lblock;
2087         struct buffer_head *bh, *head, *arr[MAX_BUF_PER_PAGE];
2088         unsigned int blocksize;
2089         int nr, i;
2090         int fully_mapped = 1;
2091
2092         BUG_ON(!PageLocked(page));
2093         blocksize = 1 << inode->i_blkbits;
2094         if (!page_has_buffers(page))
2095                 create_empty_buffers(page, blocksize, 0);
2096         head = page_buffers(page);
2097
2098         iblock = (sector_t)page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
2099         lblock = (i_size_read(inode)+blocksize-1) >> inode->i_blkbits;
2100         bh = head;
2101         nr = 0;
2102         i = 0;
2103
2104         do {
2105                 if (buffer_uptodate(bh))
2106                         continue;
2107
2108                 if (!buffer_mapped(bh)) {
2109                         int err = 0;
2110
2111                         fully_mapped = 0;
2112                         if (iblock < lblock) {
2113                                 err = get_block(inode, iblock, bh, 0);
2114                                 if (err)
2115                                         SetPageError(page);
2116                         }
2117                         if (!buffer_mapped(bh)) {
2118                                 void *kaddr = kmap_atomic(page, KM_USER0);
2119                                 memset(kaddr + i * blocksize, 0, blocksize);
2120                                 flush_dcache_page(page);
2121                                 kunmap_atomic(kaddr, KM_USER0);
2122                                 if (!err)
2123                                         set_buffer_uptodate(bh);
2124                                 continue;
2125                         }
2126                         /*
2127                          * get_block() might have updated the buffer
2128                          * synchronously
2129                          */
2130                         if (buffer_uptodate(bh))
2131                                 continue;
2132                 }
2133                 arr[nr++] = bh;
2134         } while (i++, iblock++, (bh = bh->b_this_page) != head);
2135
2136         if (fully_mapped)
2137                 SetPageMappedToDisk(page);
2138
2139         if (!nr) {
2140                 /*
2141                  * All buffers are uptodate - we can set the page uptodate
2142                  * as well. But not if get_block() returned an error.
2143                  */
2144                 if (!PageError(page))
2145                         SetPageUptodate(page);
2146                 unlock_page(page);
2147                 return 0;
2148         }
2149
2150         /* Stage two: lock the buffers */
2151         for (i = 0; i < nr; i++) {
2152                 bh = arr[i];
2153                 lock_buffer(bh);
2154                 mark_buffer_async_read(bh);
2155         }
2156
2157         /*
2158          * Stage 3: start the IO.  Check for uptodateness
2159          * inside the buffer lock in case another process reading
2160          * the underlying blockdev brought it uptodate (the sct fix).
2161          */
2162         for (i = 0; i < nr; i++) {
2163                 bh = arr[i];
2164                 if (buffer_uptodate(bh))
2165                         end_buffer_async_read(bh, 1);
2166                 else
2167                         submit_bh(READ, bh);
2168         }
2169         return 0;
2170 }
2171
2172 /* utility function for filesystems that need to do work on expanding
2173  * truncates.  Uses prepare/commit_write to allow the filesystem to
2174  * deal with the hole.  
2175  */
2176 static int __generic_cont_expand(struct inode *inode, loff_t size,
2177                                  pgoff_t index, unsigned int offset)
2178 {
2179         struct address_space *mapping = inode->i_mapping;
2180         struct page *page;
2181         unsigned long limit;
2182         int err;
2183
2184         err = -EFBIG;
2185         limit = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
2186         if (limit != RLIM_INFINITY && size > (loff_t)limit) {
2187                 send_sig(SIGXFSZ, current, 0);
2188                 goto out;
2189         }
2190         if (size > inode->i_sb->s_maxbytes)
2191                 goto out;
2192
2193         err = -ENOMEM;
2194         page = grab_cache_page(mapping, index);
2195         if (!page)
2196                 goto out;
2197         err = mapping->a_ops->prepare_write(NULL, page, offset, offset);
2198         if (err) {
2199                 /*
2200                  * ->prepare_write() may have instantiated a few blocks
2201                  * outside i_size.  Trim these off again.
2202                  */
2203                 unlock_page(page);
2204                 page_cache_release(page);
2205                 vmtruncate(inode, inode->i_size);
2206                 goto out;
2207         }
2208
2209         err = mapping->a_ops->commit_write(NULL, page, offset, offset);
2210
2211         unlock_page(page);
2212         page_cache_release(page);
2213         if (err > 0)
2214                 err = 0;
2215 out:
2216         return err;
2217 }
2218
2219 int generic_cont_expand(struct inode *inode, loff_t size)
2220 {
2221         pgoff_t index;
2222         unsigned int offset;
2223
2224         offset = (size & (PAGE_CACHE_SIZE - 1)); /* Within page */
2225
2226         /* ugh.  in prepare/commit_write, if from==to==start of block, we
2227         ** skip the prepare.  make sure we never send an offset for the start
2228         ** of a block
2229         */
2230         if ((offset & (inode->i_sb->s_blocksize - 1)) == 0) {
2231                 /* caller must handle this extra byte. */
2232                 offset++;
2233         }
2234         index = size >> PAGE_CACHE_SHIFT;
2235
2236         return __generic_cont_expand(inode, size, index, offset);
2237 }
2238
2239 int generic_cont_expand_simple(struct inode *inode, loff_t size)
2240 {
2241         loff_t pos = size - 1;
2242         pgoff_t index = pos >> PAGE_CACHE_SHIFT;
2243         unsigned int offset = (pos & (PAGE_CACHE_SIZE - 1)) + 1;
2244
2245         /* prepare/commit_write can handle even if from==to==start of block. */
2246         return __generic_cont_expand(inode, size, index, offset);
2247 }
2248
2249 /*
2250  * For moronic filesystems that do not allow holes in file.
2251  * We may have to extend the file.
2252  */
2253
2254 int cont_prepare_write(struct page *page, unsigned offset,
2255                 unsigned to, get_block_t *get_block, loff_t *bytes)
2256 {
2257         struct address_space *mapping = page->mapping;
2258         struct inode *inode = mapping->host;
2259         struct page *new_page;
2260         pgoff_t pgpos;
2261         long status;
2262         unsigned zerofrom;
2263         unsigned blocksize = 1 << inode->i_blkbits;
2264         void *kaddr;
2265
2266         while(page->index > (pgpos = *bytes>>PAGE_CACHE_SHIFT)) {
2267                 status = -ENOMEM;
2268                 new_page = grab_cache_page(mapping, pgpos);
2269                 if (!new_page)
2270                         goto out;
2271                 /* we might sleep */
2272                 if (*bytes>>PAGE_CACHE_SHIFT != pgpos) {
2273                         unlock_page(new_page);
2274                         page_cache_release(new_page);
2275                         continue;
2276                 }
2277                 zerofrom = *bytes & ~PAGE_CACHE_MASK;
2278                 if (zerofrom & (blocksize-1)) {
2279                         *bytes |= (blocksize-1);
2280                         (*bytes)++;
2281                 }
2282                 status = __block_prepare_write(inode, new_page, zerofrom,
2283                                                 PAGE_CACHE_SIZE, get_block);
2284                 if (status)
2285                         goto out_unmap;
2286                 kaddr = kmap_atomic(new_page, KM_USER0);
2287                 memset(kaddr+zerofrom, 0, PAGE_CACHE_SIZE-zerofrom);
2288                 flush_dcache_page(new_page);
2289                 kunmap_atomic(kaddr, KM_USER0);
2290                 generic_commit_write(NULL, new_page, zerofrom, PAGE_CACHE_SIZE);
2291                 unlock_page(new_page);
2292                 page_cache_release(new_page);
2293         }
2294
2295         if (page->index < pgpos) {
2296                 /* completely inside the area */
2297                 zerofrom = offset;
2298         } else {
2299                 /* page covers the boundary, find the boundary offset */
2300                 zerofrom = *bytes & ~PAGE_CACHE_MASK;
2301
2302                 /* if we will expand the thing last block will be filled */
2303                 if (to > zerofrom && (zerofrom & (blocksize-1))) {
2304                         *bytes |= (blocksize-1);
2305                         (*bytes)++;
2306                 }
2307
2308                 /* starting below the boundary? Nothing to zero out */
2309                 if (offset <= zerofrom)
2310                         zerofrom = offset;
2311         }
2312         status = __block_prepare_write(inode, page, zerofrom, to, get_block);
2313         if (status)
2314                 goto out1;
2315         if (zerofrom < offset) {
2316                 kaddr = kmap_atomic(page, KM_USER0);
2317                 memset(kaddr+zerofrom, 0, offset-zerofrom);
2318                 flush_dcache_page(page);
2319                 kunmap_atomic(kaddr, KM_USER0);
2320                 __block_commit_write(inode, page, zerofrom, offset);
2321         }
2322         return 0;
2323 out1:
2324         ClearPageUptodate(page);
2325         return status;
2326
2327 out_unmap:
2328         ClearPageUptodate(new_page);
2329         unlock_page(new_page);
2330         page_cache_release(new_page);
2331 out:
2332         return status;
2333 }
2334
2335 int block_prepare_write(struct page *page, unsigned from, unsigned to,
2336                         get_block_t *get_block)
2337 {
2338         struct inode *inode = page->mapping->host;
2339         int err = __block_prepare_write(inode, page, from, to, get_block);
2340         if (err)
2341                 ClearPageUptodate(page);
2342         return err;
2343 }
2344
2345 int block_commit_write(struct page *page, unsigned from, unsigned to)
2346 {
2347         struct inode *inode = page->mapping->host;
2348         __block_commit_write(inode,page,from,to);
2349         return 0;
2350 }
2351
2352 int generic_commit_write(struct file *file, struct page *page,
2353                 unsigned from, unsigned to)
2354 {
2355         struct inode *inode = page->mapping->host;
2356         loff_t pos = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to;
2357         __block_commit_write(inode,page,from,to);
2358         /*
2359          * No need to use i_size_read() here, the i_size
2360          * cannot change under us because we hold i_mutex.
2361          */
2362         if (pos > inode->i_size) {
2363                 i_size_write(inode, pos);
2364                 mark_inode_dirty(inode);
2365         }
2366         return 0;
2367 }
2368
2369
2370 /*
2371  * nobh_prepare_write()'s prereads are special: the buffer_heads are freed
2372  * immediately, while under the page lock.  So it needs a special end_io
2373  * handler which does not touch the bh after unlocking it.
2374  *
2375  * Note: unlock_buffer() sort-of does touch the bh after unlocking it, but
2376  * a race there is benign: unlock_buffer() only use the bh's address for
2377  * hashing after unlocking the buffer, so it doesn't actually touch the bh
2378  * itself.
2379  */
2380 static void end_buffer_read_nobh(struct buffer_head *bh, int uptodate)
2381 {
2382         if (uptodate) {
2383                 set_buffer_uptodate(bh);
2384         } else {
2385                 /* This happens, due to failed READA attempts. */
2386                 clear_buffer_uptodate(bh);
2387         }
2388         unlock_buffer(bh);
2389 }
2390
2391 /*
2392  * On entry, the page is fully not uptodate.
2393  * On exit the page is fully uptodate in the areas outside (from,to)
2394  */
2395 int nobh_prepare_write(struct page *page, unsigned from, unsigned to,
2396                         get_block_t *get_block)
2397 {
2398         struct inode *inode = page->mapping->host;
2399         const unsigned blkbits = inode->i_blkbits;
2400         const unsigned blocksize = 1 << blkbits;
2401         struct buffer_head map_bh;
2402         struct buffer_head *read_bh[MAX_BUF_PER_PAGE];
2403         unsigned block_in_page;
2404         unsigned block_start;
2405         sector_t block_in_file;
2406         char *kaddr;
2407         int nr_reads = 0;
2408         int i;
2409         int ret = 0;
2410         int is_mapped_to_disk = 1;
2411         int dirtied_it = 0;
2412
2413         if (PageMappedToDisk(page))
2414                 return 0;
2415
2416         block_in_file = (sector_t)page->index << (PAGE_CACHE_SHIFT - blkbits);
2417         map_bh.b_page = page;
2418
2419         /*
2420          * We loop across all blocks in the page, whether or not they are
2421          * part of the affected region.  This is so we can discover if the
2422          * page is fully mapped-to-disk.
2423          */
2424         for (block_start = 0, block_in_page = 0;
2425                   block_start < PAGE_CACHE_SIZE;
2426                   block_in_page++, block_start += blocksize) {
2427                 unsigned block_end = block_start + blocksize;
2428                 int create;
2429
2430                 map_bh.b_state = 0;
2431                 create = 1;
2432                 if (block_start >= to)
2433                         create = 0;
2434                 ret = get_block(inode, block_in_file + block_in_page,
2435                                         &map_bh, create);
2436                 if (ret)
2437                         goto failed;
2438                 if (!buffer_mapped(&map_bh))
2439                         is_mapped_to_disk = 0;
2440                 if (buffer_new(&map_bh))
2441                         unmap_underlying_metadata(map_bh.b_bdev,
2442                                                         map_bh.b_blocknr);
2443                 if (PageUptodate(page))
2444                         continue;
2445                 if (buffer_new(&map_bh) || !buffer_mapped(&map_bh)) {
2446                         kaddr = kmap_atomic(page, KM_USER0);
2447                         if (block_start < from) {
2448                                 memset(kaddr+block_start, 0, from-block_start);
2449                                 dirtied_it = 1;
2450                         }
2451                         if (block_end > to) {
2452                                 memset(kaddr + to, 0, block_end - to);
2453                                 dirtied_it = 1;
2454                         }
2455                         flush_dcache_page(page);
2456                         kunmap_atomic(kaddr, KM_USER0);
2457                         continue;
2458                 }
2459                 if (buffer_uptodate(&map_bh))
2460                         continue;       /* reiserfs does this */
2461                 if (block_start < from || block_end > to) {
2462                         struct buffer_head *bh = alloc_buffer_head(GFP_NOFS);
2463
2464                         if (!bh) {
2465                                 ret = -ENOMEM;
2466                                 goto failed;
2467                         }
2468                         bh->b_state = map_bh.b_state;
2469                         atomic_set(&bh->b_count, 0);
2470                         bh->b_this_page = NULL;
2471                         bh->b_page = page;
2472                         bh->b_blocknr = map_bh.b_blocknr;
2473                         bh->b_size = blocksize;
2474                         bh->b_data = (char *)(long)block_start;
2475                         bh->b_bdev = map_bh.b_bdev;
2476                         bh->b_private = NULL;
2477                         read_bh[nr_reads++] = bh;
2478                 }
2479         }
2480
2481         if (nr_reads) {
2482                 struct buffer_head *bh;
2483
2484                 /*
2485                  * The page is locked, so these buffers are protected from
2486                  * any VM or truncate activity.  Hence we don't need to care
2487                  * for the buffer_head refcounts.
2488                  */
2489                 for (i = 0; i < nr_reads; i++) {
2490                         bh = read_bh[i];
2491                         lock_buffer(bh);
2492                         bh->b_end_io = end_buffer_read_nobh;
2493                         submit_bh(READ, bh);
2494                 }
2495                 for (i = 0; i < nr_reads; i++) {
2496                         bh = read_bh[i];
2497                         wait_on_buffer(bh);
2498                         if (!buffer_uptodate(bh))
2499                                 ret = -EIO;
2500                         free_buffer_head(bh);
2501                         read_bh[i] = NULL;
2502                 }
2503                 if (ret)
2504                         goto failed;
2505         }
2506
2507         if (is_mapped_to_disk)
2508                 SetPageMappedToDisk(page);
2509         SetPageUptodate(page);
2510
2511         /*
2512          * Setting the page dirty here isn't necessary for the prepare_write
2513          * function - commit_write will do that.  But if/when this function is
2514          * used within the pagefault handler to ensure that all mmapped pages
2515          * have backing space in the filesystem, we will need to dirty the page
2516          * if its contents were altered.
2517          */
2518         if (dirtied_it)
2519                 set_page_dirty(page);
2520
2521         return 0;
2522
2523 failed:
2524         for (i = 0; i < nr_reads; i++) {
2525                 if (read_bh[i])
2526                         free_buffer_head(read_bh[i]);
2527         }
2528
2529         /*
2530          * Error recovery is pretty slack.  Clear the page and mark it dirty
2531          * so we'll later zero out any blocks which _were_ allocated.
2532          */
2533         kaddr = kmap_atomic(page, KM_USER0);
2534         memset(kaddr, 0, PAGE_CACHE_SIZE);
2535         kunmap_atomic(kaddr, KM_USER0);
2536         SetPageUptodate(page);
2537         set_page_dirty(page);
2538         return ret;
2539 }
2540 EXPORT_SYMBOL(nobh_prepare_write);
2541
2542 int nobh_commit_write(struct file *file, struct page *page,
2543                 unsigned from, unsigned to)
2544 {
2545         struct inode *inode = page->mapping->host;
2546         loff_t pos = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to;
2547
2548         set_page_dirty(page);
2549         if (pos > inode->i_size) {
2550                 i_size_write(inode, pos);
2551                 mark_inode_dirty(inode);
2552         }
2553         return 0;
2554 }
2555 EXPORT_SYMBOL(nobh_commit_write);
2556
2557 /*
2558  * nobh_writepage() - based on block_full_write_page() except
2559  * that it tries to operate without attaching bufferheads to
2560  * the page.
2561  */
2562 int nobh_writepage(struct page *page, get_block_t *get_block,
2563                         struct writeback_control *wbc)
2564 {
2565         struct inode * const inode = page->mapping->host;
2566         loff_t i_size = i_size_read(inode);
2567         const pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
2568         unsigned offset;
2569         void *kaddr;
2570         int ret;
2571
2572         /* Is the page fully inside i_size? */
2573         if (page->index < end_index)
2574                 goto out;
2575
2576         /* Is the page fully outside i_size? (truncate in progress) */
2577         offset = i_size & (PAGE_CACHE_SIZE-1);
2578         if (page->index >= end_index+1 || !offset) {
2579                 /*
2580                  * The page may have dirty, unmapped buffers.  For example,
2581                  * they may have been added in ext3_writepage().  Make them
2582                  * freeable here, so the page does not leak.
2583                  */
2584 #if 0
2585                 /* Not really sure about this  - do we need this ? */
2586                 if (page->mapping->a_ops->invalidatepage)
2587                         page->mapping->a_ops->invalidatepage(page, offset);
2588 #endif
2589                 unlock_page(page);
2590                 return 0; /* don't care */
2591         }
2592
2593         /*
2594          * The page straddles i_size.  It must be zeroed out on each and every
2595          * writepage invocation because it may be mmapped.  "A file is mapped
2596          * in multiples of the page size.  For a file that is not a multiple of
2597          * the  page size, the remaining memory is zeroed when mapped, and
2598          * writes to that region are not written out to the file."
2599          */
2600         kaddr = kmap_atomic(page, KM_USER0);
2601         memset(kaddr + offset, 0, PAGE_CACHE_SIZE - offset);
2602         flush_dcache_page(page);
2603         kunmap_atomic(kaddr, KM_USER0);
2604 out:
2605         ret = mpage_writepage(page, get_block, wbc);
2606         if (ret == -EAGAIN)
2607                 ret = __block_write_full_page(inode, page, get_block, wbc);
2608         return ret;
2609 }
2610 EXPORT_SYMBOL(nobh_writepage);
2611
2612 /*
2613  * This function assumes that ->prepare_write() uses nobh_prepare_write().
2614  */
2615 int nobh_truncate_page(struct address_space *mapping, loff_t from)
2616 {
2617         struct inode *inode = mapping->host;
2618         unsigned blocksize = 1 << inode->i_blkbits;
2619         pgoff_t index = from >> PAGE_CACHE_SHIFT;
2620         unsigned offset = from & (PAGE_CACHE_SIZE-1);
2621         unsigned to;
2622         struct page *page;
2623         struct address_space_operations *a_ops = mapping->a_ops;
2624         char *kaddr;
2625         int ret = 0;
2626
2627         if ((offset & (blocksize - 1)) == 0)
2628                 goto out;
2629
2630         ret = -ENOMEM;
2631         page = grab_cache_page(mapping, index);
2632         if (!page)
2633                 goto out;
2634
2635         to = (offset + blocksize) & ~(blocksize - 1);
2636         ret = a_ops->prepare_write(NULL, page, offset, to);
2637         if (ret == 0) {
2638                 kaddr = kmap_atomic(page, KM_USER0);
2639                 memset(kaddr + offset, 0, PAGE_CACHE_SIZE - offset);
2640                 flush_dcache_page(page);
2641                 kunmap_atomic(kaddr, KM_USER0);
2642                 set_page_dirty(page);
2643         }
2644         unlock_page(page);
2645         page_cache_release(page);
2646 out:
2647         return ret;
2648 }
2649 EXPORT_SYMBOL(nobh_truncate_page);
2650
2651 int block_truncate_page(struct address_space *mapping,
2652                         loff_t from, get_block_t *get_block)
2653 {
2654         pgoff_t index = from >> PAGE_CACHE_SHIFT;
2655         unsigned offset = from & (PAGE_CACHE_SIZE-1);
2656         unsigned blocksize;
2657         sector_t iblock;
2658         unsigned length, pos;
2659         struct inode *inode = mapping->host;
2660         struct page *page;
2661         struct buffer_head *bh;
2662         void *kaddr;
2663         int err;
2664
2665         blocksize = 1 << inode->i_blkbits;
2666         length = offset & (blocksize - 1);
2667
2668         /* Block boundary? Nothing to do */
2669         if (!length)
2670                 return 0;
2671
2672         length = blocksize - length;
2673         iblock = (sector_t)index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
2674         
2675         page = grab_cache_page(mapping, index);
2676         err = -ENOMEM;
2677         if (!page)
2678                 goto out;
2679
2680         if (!page_has_buffers(page))
2681                 create_empty_buffers(page, blocksize, 0);
2682
2683         /* Find the buffer that contains "offset" */
2684         bh = page_buffers(page);
2685         pos = blocksize;
2686         while (offset >= pos) {
2687                 bh = bh->b_this_page;
2688                 iblock++;
2689                 pos += blocksize;
2690         }
2691
2692         err = 0;
2693         if (!buffer_mapped(bh)) {
2694                 err = get_block(inode, iblock, bh, 0);
2695                 if (err)
2696                         goto unlock;
2697                 /* unmapped? It's a hole - nothing to do */
2698                 if (!buffer_mapped(bh))
2699                         goto unlock;
2700         }
2701
2702         /* Ok, it's mapped. Make sure it's up-to-date */
2703         if (PageUptodate(page))
2704                 set_buffer_uptodate(bh);
2705
2706         if (!buffer_uptodate(bh) && !buffer_delay(bh)) {
2707                 err = -EIO;
2708                 ll_rw_block(READ, 1, &bh);
2709                 wait_on_buffer(bh);
2710                 /* Uhhuh. Read error. Complain and punt. */
2711                 if (!buffer_uptodate(bh))
2712                         goto unlock;
2713         }
2714
2715         kaddr = kmap_atomic(page, KM_USER0);
2716         memset(kaddr + offset, 0, length);
2717         flush_dcache_page(page);
2718         kunmap_atomic(kaddr, KM_USER0);
2719
2720         mark_buffer_dirty(bh);
2721         err = 0;
2722
2723 unlock:
2724         unlock_page(page);
2725         page_cache_release(page);
2726 out:
2727         return err;
2728 }
2729
2730 /*
2731  * The generic ->writepage function for buffer-backed address_spaces
2732  */
2733 int block_write_full_page(struct page *page, get_block_t *get_block,
2734                         struct writeback_control *wbc)
2735 {
2736         struct inode * const inode = page->mapping->host;
2737         loff_t i_size = i_size_read(inode);
2738         const pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
2739         unsigned offset;
2740         void *kaddr;
2741
2742         /* Is the page fully inside i_size? */
2743         if (page->index < end_index)
2744                 return __block_write_full_page(inode, page, get_block, wbc);
2745
2746         /* Is the page fully outside i_size? (truncate in progress) */
2747         offset = i_size & (PAGE_CACHE_SIZE-1);
2748         if (page->index >= end_index+1 || !offset) {
2749                 /*
2750                  * The page may have dirty, unmapped buffers.  For example,
2751                  * they may have been added in ext3_writepage().  Make them
2752                  * freeable here, so the page does not leak.
2753                  */
2754                 do_invalidatepage(page, 0);
2755                 unlock_page(page);
2756                 return 0; /* don't care */
2757         }
2758
2759         /*
2760          * The page straddles i_size.  It must be zeroed out on each and every
2761          * writepage invokation because it may be mmapped.  "A file is mapped
2762          * in multiples of the page size.  For a file that is not a multiple of
2763          * the  page size, the remaining memory is zeroed when mapped, and
2764          * writes to that region are not written out to the file."
2765          */
2766         kaddr = kmap_atomic(page, KM_USER0);
2767         memset(kaddr + offset, 0, PAGE_CACHE_SIZE - offset);
2768         flush_dcache_page(page);
2769         kunmap_atomic(kaddr, KM_USER0);
2770         return __block_write_full_page(inode, page, get_block, wbc);
2771 }
2772
2773 sector_t generic_block_bmap(struct address_space *mapping, sector_t block,
2774                             get_block_t *get_block)
2775 {
2776         struct buffer_head tmp;
2777         struct inode *inode = mapping->host;
2778         tmp.b_state = 0;
2779         tmp.b_blocknr = 0;
2780         get_block(inode, block, &tmp, 0);
2781         return tmp.b_blocknr;
2782 }
2783
2784 static int end_bio_bh_io_sync(struct bio *bio, unsigned int bytes_done, int err)
2785 {
2786         struct buffer_head *bh = bio->bi_private;
2787
2788         if (bio->bi_size)
2789                 return 1;
2790
2791         if (err == -EOPNOTSUPP) {
2792                 set_bit(BIO_EOPNOTSUPP, &bio->bi_flags);
2793                 set_bit(BH_Eopnotsupp, &bh->b_state);
2794         }
2795
2796         bh->b_end_io(bh, test_bit(BIO_UPTODATE, &bio->bi_flags));
2797         bio_put(bio);
2798         return 0;
2799 }
2800
2801 int submit_bh(int rw, struct buffer_head * bh)
2802 {
2803         struct bio *bio;
2804         int ret = 0;
2805
2806         BUG_ON(!buffer_locked(bh));
2807         BUG_ON(!buffer_mapped(bh));
2808         BUG_ON(!bh->b_end_io);
2809
2810         if (buffer_ordered(bh) && (rw == WRITE))
2811                 rw = WRITE_BARRIER;
2812
2813         /*
2814          * Only clear out a write error when rewriting, should this
2815          * include WRITE_SYNC as well?
2816          */
2817         if (test_set_buffer_req(bh) && (rw == WRITE || rw == WRITE_BARRIER))
2818                 clear_buffer_write_io_error(bh);
2819
2820         /*
2821          * from here on down, it's all bio -- do the initial mapping,
2822          * submit_bio -> generic_make_request may further map this bio around
2823          */
2824         bio = bio_alloc(GFP_NOIO, 1);
2825
2826         bio->bi_sector = bh->b_blocknr * (bh->b_size >> 9);
2827         bio->bi_bdev = bh->b_bdev;
2828         bio->bi_io_vec[0].bv_page = bh->b_page;
2829         bio->bi_io_vec[0].bv_len = bh->b_size;
2830         bio->bi_io_vec[0].bv_offset = bh_offset(bh);
2831
2832         bio->bi_vcnt = 1;
2833         bio->bi_idx = 0;
2834         bio->bi_size = bh->b_size;
2835
2836         bio->bi_end_io = end_bio_bh_io_sync;
2837         bio->bi_private = bh;
2838
2839         bio_get(bio);
2840         submit_bio(rw, bio);
2841
2842         if (bio_flagged(bio, BIO_EOPNOTSUPP))
2843                 ret = -EOPNOTSUPP;
2844
2845         bio_put(bio);
2846         return ret;
2847 }
2848
2849 /**
2850  * ll_rw_block: low-level access to block devices (DEPRECATED)
2851  * @rw: whether to %READ or %WRITE or %SWRITE or maybe %READA (readahead)
2852  * @nr: number of &struct buffer_heads in the array
2853  * @bhs: array of pointers to &struct buffer_head
2854  *
2855  * ll_rw_block() takes an array of pointers to &struct buffer_heads, and
2856  * requests an I/O operation on them, either a %READ or a %WRITE.  The third
2857  * %SWRITE is like %WRITE only we make sure that the *current* data in buffers
2858  * are sent to disk. The fourth %READA option is described in the documentation
2859  * for generic_make_request() which ll_rw_block() calls.
2860  *
2861  * This function drops any buffer that it cannot get a lock on (with the
2862  * BH_Lock state bit) unless SWRITE is required, any buffer that appears to be
2863  * clean when doing a write request, and any buffer that appears to be
2864  * up-to-date when doing read request.  Further it marks as clean buffers that
2865  * are processed for writing (the buffer cache won't assume that they are
2866  * actually clean until the buffer gets unlocked).
2867  *
2868  * ll_rw_block sets b_end_io to simple completion handler that marks
2869  * the buffer up-to-date (if approriate), unlocks the buffer and wakes
2870  * any waiters. 
2871  *
2872  * All of the buffers must be for the same device, and must also be a
2873  * multiple of the current approved size for the device.
2874  */
2875 void ll_rw_block(int rw, int nr, struct buffer_head *bhs[])
2876 {
2877         int i;
2878
2879         for (i = 0; i < nr; i++) {
2880                 struct buffer_head *bh = bhs[i];
2881
2882                 if (rw == SWRITE)
2883                         lock_buffer(bh);
2884                 else if (test_set_buffer_locked(bh))
2885                         continue;
2886
2887                 if (rw == WRITE || rw == SWRITE) {
2888                         if (test_clear_buffer_dirty(bh)) {
2889                                 bh->b_end_io = end_buffer_write_sync;
2890                                 get_bh(bh);
2891                                 submit_bh(WRITE, bh);
2892                                 continue;
2893                         }
2894                 } else {
2895                         if (!buffer_uptodate(bh)) {
2896                                 bh->b_end_io = end_buffer_read_sync;
2897                                 get_bh(bh);
2898                                 submit_bh(rw, bh);
2899                                 continue;
2900                         }
2901                 }
2902                 unlock_buffer(bh);
2903         }
2904 }
2905
2906 /*
2907  * For a data-integrity writeout, we need to wait upon any in-progress I/O
2908  * and then start new I/O and then wait upon it.  The caller must have a ref on
2909  * the buffer_head.
2910  */
2911 int sync_dirty_buffer(struct buffer_head *bh)
2912 {
2913         int ret = 0;
2914
2915         WARN_ON(atomic_read(&bh->b_count) < 1);
2916         lock_buffer(bh);
2917         if (test_clear_buffer_dirty(bh)) {
2918                 get_bh(bh);
2919                 bh->b_end_io = end_buffer_write_sync;
2920                 ret = submit_bh(WRITE, bh);
2921                 wait_on_buffer(bh);
2922                 if (buffer_eopnotsupp(bh)) {
2923                         clear_buffer_eopnotsupp(bh);
2924                         ret = -EOPNOTSUPP;
2925                 }
2926                 if (!ret && !buffer_uptodate(bh))
2927                         ret = -EIO;
2928         } else {
2929                 unlock_buffer(bh);
2930         }
2931         return ret;
2932 }
2933
2934 /*
2935  * try_to_free_buffers() checks if all the buffers on this particular page
2936  * are unused, and releases them if so.
2937  *
2938  * Exclusion against try_to_free_buffers may be obtained by either
2939  * locking the page or by holding its mapping's private_lock.
2940  *
2941  * If the page is dirty but all the buffers are clean then we need to
2942  * be sure to mark the page clean as well.  This is because the page
2943  * may be against a block device, and a later reattachment of buffers
2944  * to a dirty page will set *all* buffers dirty.  Which would corrupt
2945  * filesystem data on the same device.
2946  *
2947  * The same applies to regular filesystem pages: if all the buffers are
2948  * clean then we set the page clean and proceed.  To do that, we require
2949  * total exclusion from __set_page_dirty_buffers().  That is obtained with
2950  * private_lock.
2951  *
2952  * try_to_free_buffers() is non-blocking.
2953  */
2954 static inline int buffer_busy(struct buffer_head *bh)
2955 {
2956         return atomic_read(&bh->b_count) |
2957                 (bh->b_state & ((1 << BH_Dirty) | (1 << BH_Lock)));
2958 }
2959
2960 static int
2961 drop_buffers(struct page *page, struct buffer_head **buffers_to_free)
2962 {
2963         struct buffer_head *head = page_buffers(page);
2964         struct buffer_head *bh;
2965
2966         bh = head;
2967         do {
2968                 if (buffer_write_io_error(bh) && page->mapping)
2969                         set_bit(AS_EIO, &page->mapping->flags);
2970                 if (buffer_busy(bh))
2971                         goto failed;
2972                 bh = bh->b_this_page;
2973         } while (bh != head);
2974
2975         do {
2976                 struct buffer_head *next = bh->b_this_page;
2977
2978                 if (!list_empty(&bh->b_assoc_buffers))
2979                         __remove_assoc_queue(bh);
2980                 bh = next;
2981         } while (bh != head);
2982         *buffers_to_free = head;
2983         __clear_page_buffers(page);
2984         return 1;
2985 failed:
2986         return 0;
2987 }
2988
2989 int try_to_free_buffers(struct page *page)
2990 {
2991         struct address_space * const mapping = page->mapping;
2992         struct buffer_head *buffers_to_free = NULL;
2993         int ret = 0;
2994
2995         BUG_ON(!PageLocked(page));
2996         if (PageWriteback(page))
2997                 return 0;
2998
2999         if (mapping == NULL) {          /* can this still happen? */
3000                 ret = drop_buffers(page, &buffers_to_free);
3001                 goto out;
3002         }
3003
3004         spin_lock(&mapping->private_lock);
3005         ret = drop_buffers(page, &buffers_to_free);
3006         if (ret) {
3007                 /*
3008                  * If the filesystem writes its buffers by hand (eg ext3)
3009                  * then we can have clean buffers against a dirty page.  We
3010                  * clean the page here; otherwise later reattachment of buffers
3011                  * could encounter a non-uptodate page, which is unresolvable.
3012                  * This only applies in the rare case where try_to_free_buffers
3013                  * succeeds but the page is not freed.
3014                  */
3015                 clear_page_dirty(page);
3016         }
3017         spin_unlock(&mapping->private_lock);
3018 out:
3019         if (buffers_to_free) {
3020                 struct buffer_head *bh = buffers_to_free;
3021
3022                 do {
3023                         struct buffer_head *next = bh->b_this_page;
3024                         free_buffer_head(bh);
3025                         bh = next;
3026                 } while (bh != buffers_to_free);
3027         }
3028         return ret;
3029 }
3030 EXPORT_SYMBOL(try_to_free_buffers);
3031
3032 int block_sync_page(struct page *page)
3033 {
3034         struct address_space *mapping;
3035
3036         smp_mb();
3037         mapping = page_mapping(page);
3038         if (mapping)
3039                 blk_run_backing_dev(mapping->backing_dev_info, page);
3040         return 0;
3041 }
3042
3043 /*
3044  * There are no bdflush tunables left.  But distributions are
3045  * still running obsolete flush daemons, so we terminate them here.
3046  *
3047  * Use of bdflush() is deprecated and will be removed in a future kernel.
3048  * The `pdflush' kernel threads fully replace bdflush daemons and this call.
3049  */
3050 asmlinkage long sys_bdflush(int func, long data)
3051 {
3052         static int msg_count;
3053
3054         if (!capable(CAP_SYS_ADMIN))
3055                 return -EPERM;
3056
3057         if (msg_count < 5) {
3058                 msg_count++;
3059                 printk(KERN_INFO
3060                         "warning: process `%s' used the obsolete bdflush"
3061                         " system call\n", current->comm);
3062                 printk(KERN_INFO "Fix your initscripts?\n");
3063         }
3064
3065         if (func == 1)
3066                 do_exit(0);
3067         return 0;
3068 }
3069
3070 /*
3071  * Migration function for pages with buffers. This function can only be used
3072  * if the underlying filesystem guarantees that no other references to "page"
3073  * exist.
3074  */
3075 #ifdef CONFIG_MIGRATION
3076 int buffer_migrate_page(struct page *newpage, struct page *page)
3077 {
3078         struct address_space *mapping = page->mapping;
3079         struct buffer_head *bh, *head;
3080         int rc;
3081
3082         if (!mapping)
3083                 return -EAGAIN;
3084
3085         if (!page_has_buffers(page))
3086                 return migrate_page(newpage, page);
3087
3088         head = page_buffers(page);
3089
3090         rc = migrate_page_remove_references(newpage, page, 3);
3091         if (rc)
3092                 return rc;
3093
3094         bh = head;
3095         do {
3096                 get_bh(bh);
3097                 lock_buffer(bh);
3098                 bh = bh->b_this_page;
3099
3100         } while (bh != head);
3101
3102         ClearPagePrivate(page);
3103         set_page_private(newpage, page_private(page));
3104         set_page_private(page, 0);
3105         put_page(page);
3106         get_page(newpage);
3107
3108         bh = head;
3109         do {
3110                 set_bh_page(bh, newpage, bh_offset(bh));
3111                 bh = bh->b_this_page;
3112
3113         } while (bh != head);
3114
3115         SetPagePrivate(newpage);
3116
3117         migrate_page_copy(newpage, page);
3118
3119         bh = head;
3120         do {
3121                 unlock_buffer(bh);
3122                 put_bh(bh);
3123                 bh = bh->b_this_page;
3124
3125         } while (bh != head);
3126
3127         return 0;
3128 }
3129 EXPORT_SYMBOL(buffer_migrate_page);
3130 #endif
3131
3132 /*
3133  * Buffer-head allocation
3134  */
3135 static kmem_cache_t *bh_cachep;
3136
3137 /*
3138  * Once the number of bh's in the machine exceeds this level, we start
3139  * stripping them in writeback.
3140  */
3141 static int max_buffer_heads;
3142
3143 int buffer_heads_over_limit;
3144
3145 struct bh_accounting {
3146         int nr;                 /* Number of live bh's */
3147         int ratelimit;          /* Limit cacheline bouncing */
3148 };
3149
3150 static DEFINE_PER_CPU(struct bh_accounting, bh_accounting) = {0, 0};
3151
3152 static void recalc_bh_state(void)
3153 {
3154         int i;
3155         int tot = 0;
3156
3157         if (__get_cpu_var(bh_accounting).ratelimit++ < 4096)
3158                 return;
3159         __get_cpu_var(bh_accounting).ratelimit = 0;
3160         for_each_cpu(i)
3161                 tot += per_cpu(bh_accounting, i).nr;
3162         buffer_heads_over_limit = (tot > max_buffer_heads);
3163 }
3164         
3165 struct buffer_head *alloc_buffer_head(gfp_t gfp_flags)
3166 {
3167         struct buffer_head *ret = kmem_cache_alloc(bh_cachep, gfp_flags);
3168         if (ret) {
3169                 get_cpu_var(bh_accounting).nr++;
3170                 recalc_bh_state();
3171                 put_cpu_var(bh_accounting);
3172         }
3173         return ret;
3174 }
3175 EXPORT_SYMBOL(alloc_buffer_head);
3176
3177 void free_buffer_head(struct buffer_head *bh)
3178 {
3179         BUG_ON(!list_empty(&bh->b_assoc_buffers));
3180         kmem_cache_free(bh_cachep, bh);
3181         get_cpu_var(bh_accounting).nr--;
3182         recalc_bh_state();
3183         put_cpu_var(bh_accounting);
3184 }
3185 EXPORT_SYMBOL(free_buffer_head);
3186
3187 static void
3188 init_buffer_head(void *data, kmem_cache_t *cachep, unsigned long flags)
3189 {
3190         if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) ==
3191                             SLAB_CTOR_CONSTRUCTOR) {
3192                 struct buffer_head * bh = (struct buffer_head *)data;
3193
3194                 memset(bh, 0, sizeof(*bh));
3195                 INIT_LIST_HEAD(&bh->b_assoc_buffers);
3196         }
3197 }
3198
3199 #ifdef CONFIG_HOTPLUG_CPU
3200 static void buffer_exit_cpu(int cpu)
3201 {
3202         int i;
3203         struct bh_lru *b = &per_cpu(bh_lrus, cpu);
3204
3205         for (i = 0; i < BH_LRU_SIZE; i++) {
3206                 brelse(b->bhs[i]);
3207                 b->bhs[i] = NULL;
3208         }
3209 }
3210
3211 static int buffer_cpu_notify(struct notifier_block *self,
3212                               unsigned long action, void *hcpu)
3213 {
3214         if (action == CPU_DEAD)
3215                 buffer_exit_cpu((unsigned long)hcpu);
3216         return NOTIFY_OK;
3217 }
3218 #endif /* CONFIG_HOTPLUG_CPU */
3219
3220 void __init buffer_init(void)
3221 {
3222         int nrpages;
3223
3224         bh_cachep = kmem_cache_create("buffer_head",
3225                         sizeof(struct buffer_head), 0,
3226                         SLAB_RECLAIM_ACCOUNT|SLAB_PANIC, init_buffer_head, NULL);
3227
3228         /*
3229          * Limit the bh occupancy to 10% of ZONE_NORMAL
3230          */
3231         nrpages = (nr_free_buffer_pages() * 10) / 100;
3232         max_buffer_heads = nrpages * (PAGE_SIZE / sizeof(struct buffer_head));
3233         hotcpu_notifier(buffer_cpu_notify, 0);
3234 }
3235
3236 EXPORT_SYMBOL(__bforget);
3237 EXPORT_SYMBOL(__brelse);
3238 EXPORT_SYMBOL(__wait_on_buffer);
3239 EXPORT_SYMBOL(block_commit_write);
3240 EXPORT_SYMBOL(block_prepare_write);
3241 EXPORT_SYMBOL(block_read_full_page);
3242 EXPORT_SYMBOL(block_sync_page);
3243 EXPORT_SYMBOL(block_truncate_page);
3244 EXPORT_SYMBOL(block_write_full_page);
3245 EXPORT_SYMBOL(cont_prepare_write);
3246 EXPORT_SYMBOL(end_buffer_async_write);
3247 EXPORT_SYMBOL(end_buffer_read_sync);
3248 EXPORT_SYMBOL(end_buffer_write_sync);
3249 EXPORT_SYMBOL(file_fsync);
3250 EXPORT_SYMBOL(fsync_bdev);
3251 EXPORT_SYMBOL(generic_block_bmap);
3252 EXPORT_SYMBOL(generic_commit_write);
3253 EXPORT_SYMBOL(generic_cont_expand);
3254 EXPORT_SYMBOL(generic_cont_expand_simple);
3255 EXPORT_SYMBOL(init_buffer);
3256 EXPORT_SYMBOL(invalidate_bdev);
3257 EXPORT_SYMBOL(ll_rw_block);
3258 EXPORT_SYMBOL(mark_buffer_dirty);
3259 EXPORT_SYMBOL(submit_bh);
3260 EXPORT_SYMBOL(sync_dirty_buffer);
3261 EXPORT_SYMBOL(unlock_buffer);