b76259edea9bdf004d2b6ef5a7bc49cdf0be2dfd
[linux-2.6.git] / fs / ext3 / inode.c
1 /*
2  *  linux/fs/ext3/inode.c
3  *
4  * Copyright (C) 1992, 1993, 1994, 1995
5  * Remy Card (card@masi.ibp.fr)
6  * Laboratoire MASI - Institut Blaise Pascal
7  * Universite Pierre et Marie Curie (Paris VI)
8  *
9  *  from
10  *
11  *  linux/fs/minix/inode.c
12  *
13  *  Copyright (C) 1991, 1992  Linus Torvalds
14  *
15  *  Goal-directed block allocation by Stephen Tweedie
16  *      (sct@redhat.com), 1993, 1998
17  *  Big-endian to little-endian byte-swapping/bitmaps by
18  *        David S. Miller (davem@caip.rutgers.edu), 1995
19  *  64-bit file support on 64-bit platforms by Jakub Jelinek
20  *      (jj@sunsite.ms.mff.cuni.cz)
21  *
22  *  Assorted race fixes, rewrite of ext3_get_block() by Al Viro, 2000
23  */
24
25 #include <linux/module.h>
26 #include <linux/fs.h>
27 #include <linux/time.h>
28 #include <linux/ext3_jbd.h>
29 #include <linux/jbd.h>
30 #include <linux/smp_lock.h>
31 #include <linux/highuid.h>
32 #include <linux/pagemap.h>
33 #include <linux/quotaops.h>
34 #include <linux/string.h>
35 #include <linux/buffer_head.h>
36 #include <linux/writeback.h>
37 #include <linux/mpage.h>
38 #include <linux/uio.h>
39 #include "xattr.h"
40 #include "acl.h"
41
42 /*
43  * Test whether an inode is a fast symlink.
44  */
45 static inline int ext3_inode_is_fast_symlink(struct inode *inode)
46 {
47         int ea_blocks = EXT3_I(inode)->i_file_acl ?
48                 (inode->i_sb->s_blocksize >> 9) : 0;
49
50         return (S_ISLNK(inode->i_mode) &&
51                 inode->i_blocks - ea_blocks == 0);
52 }
53
54 /* The ext3 forget function must perform a revoke if we are freeing data
55  * which has been journaled.  Metadata (eg. indirect blocks) must be
56  * revoked in all cases. 
57  *
58  * "bh" may be NULL: a metadata block may have been freed from memory
59  * but there may still be a record of it in the journal, and that record
60  * still needs to be revoked.
61  */
62
63 int ext3_forget(handle_t *handle, int is_metadata,
64                        struct inode *inode, struct buffer_head *bh,
65                        int blocknr)
66 {
67         int err;
68
69         BUFFER_TRACE(bh, "enter");
70
71         jbd_debug(4, "forgetting bh %p: is_metadata = %d, mode %o, "
72                   "data mode %lx\n",
73                   bh, is_metadata, inode->i_mode,
74                   test_opt(inode->i_sb, DATA_FLAGS));
75
76         /* Never use the revoke function if we are doing full data
77          * journaling: there is no need to, and a V1 superblock won't
78          * support it.  Otherwise, only skip the revoke on un-journaled
79          * data blocks. */
80
81         if (test_opt(inode->i_sb, DATA_FLAGS) == EXT3_MOUNT_JOURNAL_DATA ||
82             (!is_metadata && !ext3_should_journal_data(inode))) {
83                 if (bh) {
84                         BUFFER_TRACE(bh, "call journal_forget");
85                         ext3_journal_forget(handle, bh);
86                 }
87                 return 0;
88         }
89
90         /*
91          * data!=journal && (is_metadata || should_journal_data(inode))
92          */
93         BUFFER_TRACE(bh, "call ext3_journal_revoke");
94         err = ext3_journal_revoke(handle, blocknr, bh);
95         if (err)
96                 ext3_abort(inode->i_sb, __FUNCTION__,
97                            "error %d when attempting revoke", err);
98         BUFFER_TRACE(bh, "exit");
99         return err;
100 }
101
102 /*
103  * Work out how many blocks we need to progress with the next chunk of a
104  * truncate transaction.
105  */
106
107 static unsigned long blocks_for_truncate(struct inode *inode) 
108 {
109         unsigned long needed;
110
111         needed = inode->i_blocks >> (inode->i_sb->s_blocksize_bits - 9);
112
113         /* Give ourselves just enough room to cope with inodes in which
114          * i_blocks is corrupt: we've seen disk corruptions in the past
115          * which resulted in random data in an inode which looked enough
116          * like a regular file for ext3 to try to delete it.  Things
117          * will go a bit crazy if that happens, but at least we should
118          * try not to panic the whole kernel. */
119         if (needed < 2)
120                 needed = 2;
121
122         /* But we need to bound the transaction so we don't overflow the
123          * journal. */
124         if (needed > EXT3_MAX_TRANS_DATA) 
125                 needed = EXT3_MAX_TRANS_DATA;
126
127         return EXT3_DATA_TRANS_BLOCKS + needed;
128 }
129
130 /* 
131  * Truncate transactions can be complex and absolutely huge.  So we need to
132  * be able to restart the transaction at a conventient checkpoint to make
133  * sure we don't overflow the journal.
134  *
135  * start_transaction gets us a new handle for a truncate transaction,
136  * and extend_transaction tries to extend the existing one a bit.  If
137  * extend fails, we need to propagate the failure up and restart the
138  * transaction in the top-level truncate loop. --sct 
139  */
140
141 static handle_t *start_transaction(struct inode *inode) 
142 {
143         handle_t *result;
144
145         result = ext3_journal_start(inode, blocks_for_truncate(inode));
146         if (!IS_ERR(result))
147                 return result;
148
149         ext3_std_error(inode->i_sb, PTR_ERR(result));
150         return result;
151 }
152
153 /*
154  * Try to extend this transaction for the purposes of truncation.
155  *
156  * Returns 0 if we managed to create more room.  If we can't create more
157  * room, and the transaction must be restarted we return 1.
158  */
159 static int try_to_extend_transaction(handle_t *handle, struct inode *inode)
160 {
161         if (handle->h_buffer_credits > EXT3_RESERVE_TRANS_BLOCKS)
162                 return 0;
163         if (!ext3_journal_extend(handle, blocks_for_truncate(inode)))
164                 return 0;
165         return 1;
166 }
167
168 /*
169  * Restart the transaction associated with *handle.  This does a commit,
170  * so before we call here everything must be consistently dirtied against
171  * this transaction.
172  */
173 static int ext3_journal_test_restart(handle_t *handle, struct inode *inode)
174 {
175         jbd_debug(2, "restarting handle %p\n", handle);
176         return ext3_journal_restart(handle, blocks_for_truncate(inode));
177 }
178
179 /*
180  * Called at each iput()
181  *
182  * The inode may be "bad" if ext3_read_inode() saw an error from
183  * ext3_get_inode(), so we need to check that to avoid freeing random disk
184  * blocks.
185  */
186 void ext3_put_inode(struct inode *inode)
187 {
188         if (!is_bad_inode(inode))
189                 ext3_discard_prealloc(inode);
190 }
191
192 /*
193  * Called at the last iput() if i_nlink is zero.
194  */
195 void ext3_delete_inode (struct inode * inode)
196 {
197         handle_t *handle;
198
199         if (is_bad_inode(inode))
200                 goto no_delete;
201
202         handle = start_transaction(inode);
203         if (IS_ERR(handle)) {
204                 /* If we're going to skip the normal cleanup, we still
205                  * need to make sure that the in-core orphan linked list
206                  * is properly cleaned up. */
207                 ext3_orphan_del(NULL, inode);
208
209                 ext3_std_error(inode->i_sb, PTR_ERR(handle));
210                 goto no_delete;
211         }
212
213         if (IS_SYNC(inode))
214                 handle->h_sync = 1;
215         inode->i_size = 0;
216         if (inode->i_blocks)
217                 ext3_truncate(inode);
218         /*
219          * Kill off the orphan record which ext3_truncate created.
220          * AKPM: I think this can be inside the above `if'.
221          * Note that ext3_orphan_del() has to be able to cope with the
222          * deletion of a non-existent orphan - this is because we don't
223          * know if ext3_truncate() actually created an orphan record.
224          * (Well, we could do this if we need to, but heck - it works)
225          */
226         ext3_orphan_del(handle, inode);
227         EXT3_I(inode)->i_dtime  = get_seconds();
228
229         /* 
230          * One subtle ordering requirement: if anything has gone wrong
231          * (transaction abort, IO errors, whatever), then we can still
232          * do these next steps (the fs will already have been marked as
233          * having errors), but we can't free the inode if the mark_dirty
234          * fails.  
235          */
236         if (ext3_mark_inode_dirty(handle, inode))
237                 /* If that failed, just do the required in-core inode clear. */
238                 clear_inode(inode);
239         else
240                 ext3_free_inode(handle, inode);
241         ext3_journal_stop(handle);
242         return;
243 no_delete:
244         clear_inode(inode);     /* We must guarantee clearing of inode... */
245 }
246
247 void ext3_discard_prealloc (struct inode * inode)
248 {
249 #ifdef EXT3_PREALLOCATE
250         struct ext3_inode_info *ei = EXT3_I(inode);
251         /* Writer: ->i_prealloc* */
252         if (ei->i_prealloc_count) {
253                 unsigned short total = ei->i_prealloc_count;
254                 unsigned long block = ei->i_prealloc_block;
255                 ei->i_prealloc_count = 0;
256                 ei->i_prealloc_block = 0;
257                 /* Writer: end */
258                 ext3_free_blocks (inode, block, total);
259         }
260 #endif
261 }
262
263 static int ext3_alloc_block (handle_t *handle,
264                         struct inode * inode, unsigned long goal, int *err)
265 {
266         unsigned long result;
267
268 #ifdef EXT3_PREALLOCATE
269 #ifdef EXT3FS_DEBUG
270         static unsigned long alloc_hits, alloc_attempts;
271 #endif
272         struct ext3_inode_info *ei = EXT3_I(inode);
273         /* Writer: ->i_prealloc* */
274         if (ei->i_prealloc_count &&
275             (goal == ei->i_prealloc_block ||
276              goal + 1 == ei->i_prealloc_block))
277         {
278                 result = ei->i_prealloc_block++;
279                 ei->i_prealloc_count--;
280                 /* Writer: end */
281                 ext3_debug ("preallocation hit (%lu/%lu).\n",
282                             ++alloc_hits, ++alloc_attempts);
283         } else {
284                 ext3_discard_prealloc (inode);
285                 ext3_debug ("preallocation miss (%lu/%lu).\n",
286                             alloc_hits, ++alloc_attempts);
287                 if (S_ISREG(inode->i_mode))
288                         result = ext3_new_block (inode, goal, 
289                                  &ei->i_prealloc_count,
290                                  &ei->i_prealloc_block, err);
291                 else
292                         result = ext3_new_block (inode, goal, 0, 0, err);
293                 /*
294                  * AKPM: this is somewhat sticky.  I'm not surprised it was
295                  * disabled in 2.2's ext3.  Need to integrate b_committed_data
296                  * guarding with preallocation, if indeed preallocation is
297                  * effective.
298                  */
299         }
300 #else
301         result = ext3_new_block (handle, inode, goal, 0, 0, err);
302 #endif
303         return result;
304 }
305
306
307 typedef struct {
308         u32     *p;
309         u32     key;
310         struct buffer_head *bh;
311 } Indirect;
312
313 static inline void add_chain(Indirect *p, struct buffer_head *bh, u32 *v)
314 {
315         p->key = *(p->p = v);
316         p->bh = bh;
317 }
318
319 static inline int verify_chain(Indirect *from, Indirect *to)
320 {
321         while (from <= to && from->key == *from->p)
322                 from++;
323         return (from > to);
324 }
325
326 /**
327  *      ext3_block_to_path - parse the block number into array of offsets
328  *      @inode: inode in question (we are only interested in its superblock)
329  *      @i_block: block number to be parsed
330  *      @offsets: array to store the offsets in
331  *      @boundary: set this non-zero if the referred-to block is likely to be
332  *             followed (on disk) by an indirect block.
333  *
334  *      To store the locations of file's data ext3 uses a data structure common
335  *      for UNIX filesystems - tree of pointers anchored in the inode, with
336  *      data blocks at leaves and indirect blocks in intermediate nodes.
337  *      This function translates the block number into path in that tree -
338  *      return value is the path length and @offsets[n] is the offset of
339  *      pointer to (n+1)th node in the nth one. If @block is out of range
340  *      (negative or too large) warning is printed and zero returned.
341  *
342  *      Note: function doesn't find node addresses, so no IO is needed. All
343  *      we need to know is the capacity of indirect blocks (taken from the
344  *      inode->i_sb).
345  */
346
347 /*
348  * Portability note: the last comparison (check that we fit into triple
349  * indirect block) is spelled differently, because otherwise on an
350  * architecture with 32-bit longs and 8Kb pages we might get into trouble
351  * if our filesystem had 8Kb blocks. We might use long long, but that would
352  * kill us on x86. Oh, well, at least the sign propagation does not matter -
353  * i_block would have to be negative in the very beginning, so we would not
354  * get there at all.
355  */
356
357 static int ext3_block_to_path(struct inode *inode,
358                         long i_block, int offsets[4], int *boundary)
359 {
360         int ptrs = EXT3_ADDR_PER_BLOCK(inode->i_sb);
361         int ptrs_bits = EXT3_ADDR_PER_BLOCK_BITS(inode->i_sb);
362         const long direct_blocks = EXT3_NDIR_BLOCKS,
363                 indirect_blocks = ptrs,
364                 double_blocks = (1 << (ptrs_bits * 2));
365         int n = 0;
366         int final = 0;
367
368         if (i_block < 0) {
369                 ext3_warning (inode->i_sb, "ext3_block_to_path", "block < 0");
370         } else if (i_block < direct_blocks) {
371                 offsets[n++] = i_block;
372                 final = direct_blocks;
373         } else if ( (i_block -= direct_blocks) < indirect_blocks) {
374                 offsets[n++] = EXT3_IND_BLOCK;
375                 offsets[n++] = i_block;
376                 final = ptrs;
377         } else if ((i_block -= indirect_blocks) < double_blocks) {
378                 offsets[n++] = EXT3_DIND_BLOCK;
379                 offsets[n++] = i_block >> ptrs_bits;
380                 offsets[n++] = i_block & (ptrs - 1);
381                 final = ptrs;
382         } else if (((i_block -= double_blocks) >> (ptrs_bits * 2)) < ptrs) {
383                 offsets[n++] = EXT3_TIND_BLOCK;
384                 offsets[n++] = i_block >> (ptrs_bits * 2);
385                 offsets[n++] = (i_block >> ptrs_bits) & (ptrs - 1);
386                 offsets[n++] = i_block & (ptrs - 1);
387                 final = ptrs;
388         } else {
389                 ext3_warning (inode->i_sb, "ext3_block_to_path", "block > big");
390         }
391         if (boundary)
392                 *boundary = (i_block & (ptrs - 1)) == (final - 1);
393         return n;
394 }
395
396 /**
397  *      ext3_get_branch - read the chain of indirect blocks leading to data
398  *      @inode: inode in question
399  *      @depth: depth of the chain (1 - direct pointer, etc.)
400  *      @offsets: offsets of pointers in inode/indirect blocks
401  *      @chain: place to store the result
402  *      @err: here we store the error value
403  *
404  *      Function fills the array of triples <key, p, bh> and returns %NULL
405  *      if everything went OK or the pointer to the last filled triple
406  *      (incomplete one) otherwise. Upon the return chain[i].key contains
407  *      the number of (i+1)-th block in the chain (as it is stored in memory,
408  *      i.e. little-endian 32-bit), chain[i].p contains the address of that
409  *      number (it points into struct inode for i==0 and into the bh->b_data
410  *      for i>0) and chain[i].bh points to the buffer_head of i-th indirect
411  *      block for i>0 and NULL for i==0. In other words, it holds the block
412  *      numbers of the chain, addresses they were taken from (and where we can
413  *      verify that chain did not change) and buffer_heads hosting these
414  *      numbers.
415  *
416  *      Function stops when it stumbles upon zero pointer (absent block)
417  *              (pointer to last triple returned, *@err == 0)
418  *      or when it gets an IO error reading an indirect block
419  *              (ditto, *@err == -EIO)
420  *      or when it notices that chain had been changed while it was reading
421  *              (ditto, *@err == -EAGAIN)
422  *      or when it reads all @depth-1 indirect blocks successfully and finds
423  *      the whole chain, all way to the data (returns %NULL, *err == 0).
424  */
425 static Indirect *ext3_get_branch(struct inode *inode, int depth, int *offsets,
426                                  Indirect chain[4], int *err)
427 {
428         struct super_block *sb = inode->i_sb;
429         Indirect *p = chain;
430         struct buffer_head *bh;
431
432         *err = 0;
433         /* i_data is not going away, no lock needed */
434         add_chain (chain, NULL, EXT3_I(inode)->i_data + *offsets);
435         if (!p->key)
436                 goto no_block;
437         while (--depth) {
438                 bh = sb_bread(sb, le32_to_cpu(p->key));
439                 if (!bh)
440                         goto failure;
441                 /* Reader: pointers */
442                 if (!verify_chain(chain, p))
443                         goto changed;
444                 add_chain(++p, bh, (u32*)bh->b_data + *++offsets);
445                 /* Reader: end */
446                 if (!p->key)
447                         goto no_block;
448         }
449         return NULL;
450
451 changed:
452         brelse(bh);
453         *err = -EAGAIN;
454         goto no_block;
455 failure:
456         *err = -EIO;
457 no_block:
458         return p;
459 }
460
461 /**
462  *      ext3_find_near - find a place for allocation with sufficient locality
463  *      @inode: owner
464  *      @ind: descriptor of indirect block.
465  *
466  *      This function returns the prefered place for block allocation.
467  *      It is used when heuristic for sequential allocation fails.
468  *      Rules are:
469  *        + if there is a block to the left of our position - allocate near it.
470  *        + if pointer will live in indirect block - allocate near that block.
471  *        + if pointer will live in inode - allocate in the same
472  *          cylinder group. 
473  *
474  * In the latter case we colour the starting block by the callers PID to
475  * prevent it from clashing with concurrent allocations for a different inode
476  * in the same block group.   The PID is used here so that functionally related
477  * files will be close-by on-disk.
478  *
479  *      Caller must make sure that @ind is valid and will stay that way.
480  */
481
482 static unsigned long ext3_find_near(struct inode *inode, Indirect *ind)
483 {
484         struct ext3_inode_info *ei = EXT3_I(inode);
485         u32 *start = ind->bh ? (u32*) ind->bh->b_data : ei->i_data;
486         u32 *p;
487         unsigned long bg_start;
488         unsigned long colour;
489
490         /* Try to find previous block */
491         for (p = ind->p - 1; p >= start; p--)
492                 if (*p)
493                         return le32_to_cpu(*p);
494
495         /* No such thing, so let's try location of indirect block */
496         if (ind->bh)
497                 return ind->bh->b_blocknr;
498
499         /*
500          * It is going to be refered from inode itself? OK, just put it into
501          * the same cylinder group then.
502          */
503         bg_start = (ei->i_block_group * EXT3_BLOCKS_PER_GROUP(inode->i_sb)) +
504                 le32_to_cpu(EXT3_SB(inode->i_sb)->s_es->s_first_data_block);
505         colour = (current->pid % 16) *
506                         (EXT3_BLOCKS_PER_GROUP(inode->i_sb) / 16);
507         return bg_start + colour;
508 }
509
510 /**
511  *      ext3_find_goal - find a prefered place for allocation.
512  *      @inode: owner
513  *      @block:  block we want
514  *      @chain:  chain of indirect blocks
515  *      @partial: pointer to the last triple within a chain
516  *      @goal:  place to store the result.
517  *
518  *      Normally this function find the prefered place for block allocation,
519  *      stores it in *@goal and returns zero. If the branch had been changed
520  *      under us we return -EAGAIN.
521  */
522
523 static int ext3_find_goal(struct inode *inode, long block, Indirect chain[4],
524                           Indirect *partial, unsigned long *goal)
525 {
526         struct ext3_inode_info *ei = EXT3_I(inode);
527         /* Writer: ->i_next_alloc* */
528         if (block == ei->i_next_alloc_block + 1) {
529                 ei->i_next_alloc_block++;
530                 ei->i_next_alloc_goal++;
531         }
532         /* Writer: end */
533         /* Reader: pointers, ->i_next_alloc* */
534         if (verify_chain(chain, partial)) {
535                 /*
536                  * try the heuristic for sequential allocation,
537                  * failing that at least try to get decent locality.
538                  */
539                 if (block == ei->i_next_alloc_block)
540                         *goal = ei->i_next_alloc_goal;
541                 if (!*goal)
542                         *goal = ext3_find_near(inode, partial);
543                 return 0;
544         }
545         /* Reader: end */
546         return -EAGAIN;
547 }
548
549 /**
550  *      ext3_alloc_branch - allocate and set up a chain of blocks.
551  *      @inode: owner
552  *      @num: depth of the chain (number of blocks to allocate)
553  *      @offsets: offsets (in the blocks) to store the pointers to next.
554  *      @branch: place to store the chain in.
555  *
556  *      This function allocates @num blocks, zeroes out all but the last one,
557  *      links them into chain and (if we are synchronous) writes them to disk.
558  *      In other words, it prepares a branch that can be spliced onto the
559  *      inode. It stores the information about that chain in the branch[], in
560  *      the same format as ext3_get_branch() would do. We are calling it after
561  *      we had read the existing part of chain and partial points to the last
562  *      triple of that (one with zero ->key). Upon the exit we have the same
563  *      picture as after the successful ext3_get_block(), excpet that in one
564  *      place chain is disconnected - *branch->p is still zero (we did not
565  *      set the last link), but branch->key contains the number that should
566  *      be placed into *branch->p to fill that gap.
567  *
568  *      If allocation fails we free all blocks we've allocated (and forget
569  *      their buffer_heads) and return the error value the from failed
570  *      ext3_alloc_block() (normally -ENOSPC). Otherwise we set the chain
571  *      as described above and return 0.
572  */
573
574 static int ext3_alloc_branch(handle_t *handle, struct inode *inode,
575                              int num,
576                              unsigned long goal,
577                              int *offsets,
578                              Indirect *branch)
579 {
580         int blocksize = inode->i_sb->s_blocksize;
581         int n = 0, keys = 0;
582         int err = 0;
583         int i;
584         int parent = ext3_alloc_block(handle, inode, goal, &err);
585
586         branch[0].key = cpu_to_le32(parent);
587         if (parent) {
588                 for (n = 1; n < num; n++) {
589                         struct buffer_head *bh;
590                         /* Allocate the next block */
591                         int nr = ext3_alloc_block(handle, inode, parent, &err);
592                         if (!nr)
593                                 break;
594                         branch[n].key = cpu_to_le32(nr);
595                         keys = n+1;
596
597                         /*
598                          * Get buffer_head for parent block, zero it out
599                          * and set the pointer to new one, then send
600                          * parent to disk.  
601                          */
602                         bh = sb_getblk(inode->i_sb, parent);
603                         branch[n].bh = bh;
604                         lock_buffer(bh);
605                         BUFFER_TRACE(bh, "call get_create_access");
606                         err = ext3_journal_get_create_access(handle, bh);
607                         if (err) {
608                                 unlock_buffer(bh);
609                                 brelse(bh);
610                                 break;
611                         }
612
613                         memset(bh->b_data, 0, blocksize);
614                         branch[n].p = (u32*) bh->b_data + offsets[n];
615                         *branch[n].p = branch[n].key;
616                         BUFFER_TRACE(bh, "marking uptodate");
617                         set_buffer_uptodate(bh);
618                         unlock_buffer(bh);
619
620                         BUFFER_TRACE(bh, "call ext3_journal_dirty_metadata");
621                         err = ext3_journal_dirty_metadata(handle, bh);
622                         if (err)
623                                 break;
624
625                         parent = nr;
626                 }
627         }
628         if (n == num)
629                 return 0;
630
631         /* Allocation failed, free what we already allocated */
632         for (i = 1; i < keys; i++) {
633                 BUFFER_TRACE(branch[i].bh, "call journal_forget");
634                 ext3_journal_forget(handle, branch[i].bh);
635         }
636         for (i = 0; i < keys; i++)
637                 ext3_free_blocks(handle, inode, le32_to_cpu(branch[i].key), 1);
638         return err;
639 }
640
641 /**
642  *      ext3_splice_branch - splice the allocated branch onto inode.
643  *      @inode: owner
644  *      @block: (logical) number of block we are adding
645  *      @chain: chain of indirect blocks (with a missing link - see
646  *              ext3_alloc_branch)
647  *      @where: location of missing link
648  *      @num:   number of blocks we are adding
649  *
650  *      This function verifies that chain (up to the missing link) had not
651  *      changed, fills the missing link and does all housekeeping needed in
652  *      inode (->i_blocks, etc.). In case of success we end up with the full
653  *      chain to new block and return 0. Otherwise (== chain had been changed)
654  *      we free the new blocks (forgetting their buffer_heads, indeed) and
655  *      return -EAGAIN.
656  */
657
658 static int ext3_splice_branch(handle_t *handle, struct inode *inode, long block,
659                               Indirect chain[4], Indirect *where, int num)
660 {
661         int i;
662         int err = 0;
663         struct ext3_inode_info *ei = EXT3_I(inode);
664
665         /*
666          * If we're splicing into a [td]indirect block (as opposed to the
667          * inode) then we need to get write access to the [td]indirect block
668          * before the splice.
669          */
670         if (where->bh) {
671                 BUFFER_TRACE(where->bh, "get_write_access");
672                 err = ext3_journal_get_write_access(handle, where->bh);
673                 if (err)
674                         goto err_out;
675         }
676         /* Verify that place we are splicing to is still there and vacant */
677
678         /* Writer: pointers, ->i_next_alloc* */
679         if (!verify_chain(chain, where-1) || *where->p)
680                 /* Writer: end */
681                 goto changed;
682
683         /* That's it */
684
685         *where->p = where->key;
686         ei->i_next_alloc_block = block;
687         ei->i_next_alloc_goal = le32_to_cpu(where[num-1].key);
688         /* Writer: end */
689
690         /* We are done with atomic stuff, now do the rest of housekeeping */
691
692         inode->i_ctime = CURRENT_TIME;
693         ext3_mark_inode_dirty(handle, inode);
694
695         /* had we spliced it onto indirect block? */
696         if (where->bh) {
697                 /*
698                  * akpm: If we spliced it onto an indirect block, we haven't
699                  * altered the inode.  Note however that if it is being spliced
700                  * onto an indirect block at the very end of the file (the
701                  * file is growing) then we *will* alter the inode to reflect
702                  * the new i_size.  But that is not done here - it is done in
703                  * generic_commit_write->__mark_inode_dirty->ext3_dirty_inode.
704                  */
705                 jbd_debug(5, "splicing indirect only\n");
706                 BUFFER_TRACE(where->bh, "call ext3_journal_dirty_metadata");
707                 err = ext3_journal_dirty_metadata(handle, where->bh);
708                 if (err) 
709                         goto err_out;
710         } else {
711                 /*
712                  * OK, we spliced it into the inode itself on a direct block.
713                  * Inode was dirtied above.
714                  */
715                 jbd_debug(5, "splicing direct\n");
716         }
717         return err;
718
719 changed:
720         /*
721          * AKPM: if where[i].bh isn't part of the current updating
722          * transaction then we explode nastily.  Test this code path.
723          */
724         jbd_debug(1, "the chain changed: try again\n");
725         err = -EAGAIN;
726
727 err_out:
728         for (i = 1; i < num; i++) {
729                 BUFFER_TRACE(where[i].bh, "call journal_forget");
730                 ext3_journal_forget(handle, where[i].bh);
731         }
732         /* For the normal collision cleanup case, we free up the blocks.
733          * On genuine filesystem errors we don't even think about doing
734          * that. */
735         if (err == -EAGAIN)
736                 for (i = 0; i < num; i++)
737                         ext3_free_blocks(handle, inode, 
738                                          le32_to_cpu(where[i].key), 1);
739         return err;
740 }
741
742 /*
743  * Allocation strategy is simple: if we have to allocate something, we will
744  * have to go the whole way to leaf. So let's do it before attaching anything
745  * to tree, set linkage between the newborn blocks, write them if sync is
746  * required, recheck the path, free and repeat if check fails, otherwise
747  * set the last missing link (that will protect us from any truncate-generated
748  * removals - all blocks on the path are immune now) and possibly force the
749  * write on the parent block.
750  * That has a nice additional property: no special recovery from the failed
751  * allocations is needed - we simply release blocks and do not touch anything
752  * reachable from inode.
753  *
754  * akpm: `handle' can be NULL if create == 0.
755  *
756  * The BKL may not be held on entry here.  Be sure to take it early.
757  */
758
759 static int
760 ext3_get_block_handle(handle_t *handle, struct inode *inode, sector_t iblock,
761                 struct buffer_head *bh_result, int create, int extend_disksize)
762 {
763         int err = -EIO;
764         int offsets[4];
765         Indirect chain[4];
766         Indirect *partial;
767         unsigned long goal;
768         int left;
769         int boundary = 0;
770         int depth = ext3_block_to_path(inode, iblock, offsets, &boundary);
771         struct ext3_inode_info *ei = EXT3_I(inode);
772
773         J_ASSERT(handle != NULL || create == 0);
774
775         if (depth == 0)
776                 goto out;
777
778 reread:
779         partial = ext3_get_branch(inode, depth, offsets, chain, &err);
780
781         /* Simplest case - block found, no allocation needed */
782         if (!partial) {
783                 clear_buffer_new(bh_result);
784 got_it:
785                 map_bh(bh_result, inode->i_sb, le32_to_cpu(chain[depth-1].key));
786                 if (boundary)
787                         set_buffer_boundary(bh_result);
788                 /* Clean up and exit */
789                 partial = chain+depth-1; /* the whole chain */
790                 goto cleanup;
791         }
792
793         /* Next simple case - plain lookup or failed read of indirect block */
794         if (!create || err == -EIO) {
795 cleanup:
796                 while (partial > chain) {
797                         BUFFER_TRACE(partial->bh, "call brelse");
798                         brelse(partial->bh);
799                         partial--;
800                 }
801                 BUFFER_TRACE(bh_result, "returned");
802 out:
803                 return err;
804         }
805
806         /*
807          * Indirect block might be removed by truncate while we were
808          * reading it. Handling of that case (forget what we've got and
809          * reread) is taken out of the main path.
810          */
811         if (err == -EAGAIN)
812                 goto changed;
813
814         down(&ei->truncate_sem);
815         if (ext3_find_goal(inode, iblock, chain, partial, &goal) < 0) {
816                 up(&ei->truncate_sem);
817                 goto changed;
818         }
819
820         left = (chain + depth) - partial;
821
822         /*
823          * Block out ext3_truncate while we alter the tree
824          */
825         err = ext3_alloc_branch(handle, inode, left, goal,
826                                         offsets+(partial-chain), partial);
827
828         /* The ext3_splice_branch call will free and forget any buffers
829          * on the new chain if there is a failure, but that risks using
830          * up transaction credits, especially for bitmaps where the
831          * credits cannot be returned.  Can we handle this somehow?  We
832          * may need to return -EAGAIN upwards in the worst case.  --sct */
833         if (!err)
834                 err = ext3_splice_branch(handle, inode, iblock, chain,
835                                          partial, left);
836         /* i_disksize growing is protected by truncate_sem
837          * don't forget to protect it if you're about to implement
838          * concurrent ext3_get_block() -bzzz */
839         if (!err && extend_disksize && inode->i_size > ei->i_disksize)
840                 ei->i_disksize = inode->i_size;
841         up(&ei->truncate_sem);
842         if (err == -EAGAIN)
843                 goto changed;
844         if (err)
845                 goto cleanup;
846
847         set_buffer_new(bh_result);
848         goto got_it;
849
850 changed:
851         while (partial > chain) {
852                 jbd_debug(1, "buffer chain changed, retrying\n");
853                 BUFFER_TRACE(partial->bh, "brelsing");
854                 brelse(partial->bh);
855                 partial--;
856         }
857         goto reread;
858 }
859
860 static int ext3_get_block(struct inode *inode, sector_t iblock,
861                         struct buffer_head *bh_result, int create)
862 {
863         handle_t *handle = 0;
864         int ret;
865
866         if (create) {
867                 handle = ext3_journal_current_handle();
868                 J_ASSERT(handle != 0);
869         }
870         ret = ext3_get_block_handle(handle, inode, iblock,
871                                 bh_result, create, 1);
872         return ret;
873 }
874
875 #define DIO_CREDITS (EXT3_RESERVE_TRANS_BLOCKS + 32)
876
877 static int
878 ext3_direct_io_get_blocks(struct inode *inode, sector_t iblock,
879                 unsigned long max_blocks, struct buffer_head *bh_result,
880                 int create)
881 {
882         handle_t *handle = journal_current_handle();
883         int ret = 0;
884
885         if (handle && handle->h_buffer_credits <= EXT3_RESERVE_TRANS_BLOCKS) {
886                 /*
887                  * Getting low on buffer credits...
888                  */
889                 if (!ext3_journal_extend(handle, DIO_CREDITS)) {
890                         /*
891                          * Couldn't extend the transaction.  Start a new one
892                          */
893                         ret = ext3_journal_restart(handle, DIO_CREDITS);
894                 }
895         }
896         if (ret == 0)
897                 ret = ext3_get_block_handle(handle, inode, iblock,
898                                         bh_result, create, 0);
899         if (ret == 0)
900                 bh_result->b_size = (1 << inode->i_blkbits);
901         return ret;
902 }
903
904
905 /*
906  * `handle' can be NULL if create is zero
907  */
908 struct buffer_head *ext3_getblk(handle_t *handle, struct inode * inode,
909                                 long block, int create, int * errp)
910 {
911         struct buffer_head dummy;
912         int fatal = 0, err;
913
914         J_ASSERT(handle != NULL || create == 0);
915
916         dummy.b_state = 0;
917         dummy.b_blocknr = -1000;
918         buffer_trace_init(&dummy.b_history);
919         *errp = ext3_get_block_handle(handle, inode, block, &dummy, create, 1);
920         if (!*errp && buffer_mapped(&dummy)) {
921                 struct buffer_head *bh;
922                 bh = sb_getblk(inode->i_sb, dummy.b_blocknr);
923                 if (buffer_new(&dummy)) {
924                         J_ASSERT(create != 0);
925                         J_ASSERT(handle != 0);
926
927                         /* Now that we do not always journal data, we
928                            should keep in mind whether this should
929                            always journal the new buffer as metadata.
930                            For now, regular file writes use
931                            ext3_get_block instead, so it's not a
932                            problem. */
933                         lock_buffer(bh);
934                         BUFFER_TRACE(bh, "call get_create_access");
935                         fatal = ext3_journal_get_create_access(handle, bh);
936                         if (!fatal && !buffer_uptodate(bh)) {
937                                 memset(bh->b_data, 0, inode->i_sb->s_blocksize);
938                                 set_buffer_uptodate(bh);
939                         }
940                         unlock_buffer(bh);
941                         BUFFER_TRACE(bh, "call ext3_journal_dirty_metadata");
942                         err = ext3_journal_dirty_metadata(handle, bh);
943                         if (!fatal)
944                                 fatal = err;
945                 } else {
946                         BUFFER_TRACE(bh, "not a new buffer");
947                 }
948                 if (fatal) {
949                         *errp = fatal;
950                         brelse(bh);
951                         bh = NULL;
952                 }
953                 return bh;
954         }
955         return NULL;
956 }
957
958 struct buffer_head *ext3_bread(handle_t *handle, struct inode * inode,
959                                int block, int create, int *err)
960 {
961         struct buffer_head * bh;
962         int prev_blocks;
963
964         prev_blocks = inode->i_blocks;
965
966         bh = ext3_getblk (handle, inode, block, create, err);
967         if (!bh)
968                 return bh;
969 #ifdef EXT3_PREALLOCATE
970         /*
971          * If the inode has grown, and this is a directory, then use a few
972          * more of the preallocated blocks to keep directory fragmentation
973          * down.  The preallocated blocks are guaranteed to be contiguous.
974          */
975         if (create &&
976             S_ISDIR(inode->i_mode) &&
977             inode->i_blocks > prev_blocks &&
978             EXT3_HAS_COMPAT_FEATURE(inode->i_sb,
979                                     EXT3_FEATURE_COMPAT_DIR_PREALLOC)) {
980                 int i;
981                 struct buffer_head *tmp_bh;
982
983                 for (i = 1;
984                      EXT3_I(inode)->i_prealloc_count &&
985                      i < EXT3_SB(inode->i_sb)->s_es->s_prealloc_dir_blocks;
986                      i++) {
987                         /*
988                          * ext3_getblk will zero out the contents of the
989                          * directory for us
990                          */
991                         tmp_bh = ext3_getblk(handle, inode,
992                                                 block+i, create, err);
993                         if (!tmp_bh) {
994                                 brelse (bh);
995                                 return 0;
996                         }
997                         brelse (tmp_bh);
998                 }
999         }
1000 #endif
1001         if (buffer_uptodate(bh))
1002                 return bh;
1003         ll_rw_block (READ, 1, &bh);
1004         wait_on_buffer (bh);
1005         if (buffer_uptodate(bh))
1006                 return bh;
1007         brelse (bh);
1008         *err = -EIO;
1009         return NULL;
1010 }
1011
1012 static int walk_page_buffers(   handle_t *handle,
1013                                 struct buffer_head *head,
1014                                 unsigned from,
1015                                 unsigned to,
1016                                 int *partial,
1017                                 int (*fn)(      handle_t *handle,
1018                                                 struct buffer_head *bh))
1019 {
1020         struct buffer_head *bh;
1021         unsigned block_start, block_end;
1022         unsigned blocksize = head->b_size;
1023         int err, ret = 0;
1024         struct buffer_head *next;
1025
1026         for (   bh = head, block_start = 0;
1027                 ret == 0 && (bh != head || !block_start);
1028                 block_start = block_end, bh = next)
1029         {
1030                 next = bh->b_this_page;
1031                 block_end = block_start + blocksize;
1032                 if (block_end <= from || block_start >= to) {
1033                         if (partial && !buffer_uptodate(bh))
1034                                 *partial = 1;
1035                         continue;
1036                 }
1037                 err = (*fn)(handle, bh);
1038                 if (!ret)
1039                         ret = err;
1040         }
1041         return ret;
1042 }
1043
1044 /*
1045  * To preserve ordering, it is essential that the hole instantiation and
1046  * the data write be encapsulated in a single transaction.  We cannot
1047  * close off a transaction and start a new one between the ext3_get_block()
1048  * and the commit_write().  So doing the journal_start at the start of
1049  * prepare_write() is the right place.
1050  *
1051  * Also, this function can nest inside ext3_writepage() ->
1052  * block_write_full_page(). In that case, we *know* that ext3_writepage()
1053  * has generated enough buffer credits to do the whole page.  So we won't
1054  * block on the journal in that case, which is good, because the caller may
1055  * be PF_MEMALLOC.
1056  *
1057  * By accident, ext3 can be reentered when a transaction is open via
1058  * quota file writes.  If we were to commit the transaction while thus
1059  * reentered, there can be a deadlock - we would be holding a quota
1060  * lock, and the commit would never complete if another thread had a
1061  * transaction open and was blocking on the quota lock - a ranking
1062  * violation.
1063  *
1064  * So what we do is to rely on the fact that journal_stop/journal_start
1065  * will _not_ run commit under these circumstances because handle->h_ref
1066  * is elevated.  We'll still have enough credits for the tiny quotafile
1067  * write.  
1068  */
1069
1070 static int do_journal_get_write_access(handle_t *handle, 
1071                                        struct buffer_head *bh)
1072 {
1073         if (!buffer_mapped(bh) || buffer_freed(bh))
1074                 return 0;
1075         return ext3_journal_get_write_access(handle, bh);
1076 }
1077
1078 static int ext3_prepare_write(struct file *file, struct page *page,
1079                               unsigned from, unsigned to)
1080 {
1081         struct inode *inode = page->mapping->host;
1082         int ret, needed_blocks = ext3_writepage_trans_blocks(inode);
1083         handle_t *handle;
1084
1085         handle = ext3_journal_start(inode, needed_blocks);
1086         if (IS_ERR(handle)) {
1087                 ret = PTR_ERR(handle);
1088                 goto out;
1089         }
1090         ret = block_prepare_write(page, from, to, ext3_get_block);
1091         if (ret != 0)
1092                 goto prepare_write_failed;
1093
1094         if (ext3_should_journal_data(inode)) {
1095                 ret = walk_page_buffers(handle, page_buffers(page),
1096                                 from, to, NULL, do_journal_get_write_access);
1097         }
1098 prepare_write_failed:
1099         if (ret)
1100                 ext3_journal_stop(handle);
1101 out:
1102         return ret;
1103 }
1104
1105 static int
1106 ext3_journal_dirty_data(handle_t *handle, struct buffer_head *bh)
1107 {
1108         int err = journal_dirty_data(handle, bh);
1109         if (err)
1110                 ext3_journal_abort_handle(__FUNCTION__, __FUNCTION__,
1111                                                 bh, handle,err);
1112         return err;
1113 }
1114
1115 /* For commit_write() in data=journal mode */
1116 static int commit_write_fn(handle_t *handle, struct buffer_head *bh)
1117 {
1118         if (!buffer_mapped(bh) || buffer_freed(bh))
1119                 return 0;
1120         set_buffer_uptodate(bh);
1121         return ext3_journal_dirty_metadata(handle, bh);
1122 }
1123
1124 /*
1125  * We need to pick up the new inode size which generic_commit_write gave us
1126  * `file' can be NULL - eg, when called from page_symlink().
1127  *
1128  * ext3 never places buffers on inode->i_mapping->private_list.  metadata
1129  * buffers are managed internally.
1130  */
1131
1132 static int ext3_ordered_commit_write(struct file *file, struct page *page,
1133                              unsigned from, unsigned to)
1134 {
1135         handle_t *handle = ext3_journal_current_handle();
1136         struct inode *inode = page->mapping->host;
1137         int ret = 0, ret2;
1138
1139         ret = walk_page_buffers(handle, page_buffers(page),
1140                 from, to, NULL, ext3_journal_dirty_data);
1141
1142         if (ret == 0) {
1143                 /*
1144                  * generic_commit_write() will run mark_inode_dirty() if i_size
1145                  * changes.  So let's piggyback the i_disksize mark_inode_dirty
1146                  * into that.
1147                  */
1148                 loff_t new_i_size;
1149
1150                 new_i_size = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to;
1151                 if (new_i_size > EXT3_I(inode)->i_disksize)
1152                         EXT3_I(inode)->i_disksize = new_i_size;
1153                 ret = generic_commit_write(file, page, from, to);
1154         }
1155         ret2 = ext3_journal_stop(handle);
1156         if (!ret)
1157                 ret = ret2;
1158         return ret;
1159 }
1160
1161 static int ext3_writeback_commit_write(struct file *file, struct page *page,
1162                              unsigned from, unsigned to)
1163 {
1164         handle_t *handle = ext3_journal_current_handle();
1165         struct inode *inode = page->mapping->host;
1166         int ret = 0, ret2;
1167         loff_t new_i_size;
1168
1169         new_i_size = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to;
1170         if (new_i_size > EXT3_I(inode)->i_disksize)
1171                 EXT3_I(inode)->i_disksize = new_i_size;
1172         ret = generic_commit_write(file, page, from, to);
1173         ret2 = ext3_journal_stop(handle);
1174         if (!ret)
1175                 ret = ret2;
1176         return ret;
1177 }
1178
1179 static int ext3_journalled_commit_write(struct file *file,
1180                         struct page *page, unsigned from, unsigned to)
1181 {
1182         handle_t *handle = ext3_journal_current_handle();
1183         struct inode *inode = page->mapping->host;
1184         int ret = 0, ret2;
1185         int partial = 0;
1186         loff_t pos;
1187
1188         /*
1189          * Here we duplicate the generic_commit_write() functionality
1190          */
1191         pos = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to;
1192
1193         ret = walk_page_buffers(handle, page_buffers(page), from,
1194                                 to, &partial, commit_write_fn);
1195         if (!partial)
1196                 SetPageUptodate(page);
1197         if (pos > inode->i_size)
1198                 i_size_write(inode, pos);
1199         EXT3_I(inode)->i_state |= EXT3_STATE_JDATA;
1200         if (inode->i_size > EXT3_I(inode)->i_disksize) {
1201                 EXT3_I(inode)->i_disksize = inode->i_size;
1202                 ret2 = ext3_mark_inode_dirty(handle, inode);
1203                 if (!ret) 
1204                         ret = ret2;
1205         }
1206         ret2 = ext3_journal_stop(handle);
1207         if (!ret)
1208                 ret = ret2;
1209         return ret;
1210 }
1211
1212 /* 
1213  * bmap() is special.  It gets used by applications such as lilo and by
1214  * the swapper to find the on-disk block of a specific piece of data.
1215  *
1216  * Naturally, this is dangerous if the block concerned is still in the
1217  * journal.  If somebody makes a swapfile on an ext3 data-journaling
1218  * filesystem and enables swap, then they may get a nasty shock when the
1219  * data getting swapped to that swapfile suddenly gets overwritten by
1220  * the original zero's written out previously to the journal and
1221  * awaiting writeback in the kernel's buffer cache. 
1222  *
1223  * So, if we see any bmap calls here on a modified, data-journaled file,
1224  * take extra steps to flush any blocks which might be in the cache. 
1225  */
1226 static sector_t ext3_bmap(struct address_space *mapping, sector_t block)
1227 {
1228         struct inode *inode = mapping->host;
1229         journal_t *journal;
1230         int err;
1231
1232         if (EXT3_I(inode)->i_state & EXT3_STATE_JDATA) {
1233                 /* 
1234                  * This is a REALLY heavyweight approach, but the use of
1235                  * bmap on dirty files is expected to be extremely rare:
1236                  * only if we run lilo or swapon on a freshly made file
1237                  * do we expect this to happen. 
1238                  *
1239                  * (bmap requires CAP_SYS_RAWIO so this does not
1240                  * represent an unprivileged user DOS attack --- we'd be
1241                  * in trouble if mortal users could trigger this path at
1242                  * will.) 
1243                  *
1244                  * NB. EXT3_STATE_JDATA is not set on files other than
1245                  * regular files.  If somebody wants to bmap a directory
1246                  * or symlink and gets confused because the buffer
1247                  * hasn't yet been flushed to disk, they deserve
1248                  * everything they get.
1249                  */
1250
1251                 EXT3_I(inode)->i_state &= ~EXT3_STATE_JDATA;
1252                 journal = EXT3_JOURNAL(inode);
1253                 journal_lock_updates(journal);
1254                 err = journal_flush(journal);
1255                 journal_unlock_updates(journal);
1256
1257                 if (err)
1258                         return 0;
1259         }
1260
1261         return generic_block_bmap(mapping,block,ext3_get_block);
1262 }
1263
1264 static int bget_one(handle_t *handle, struct buffer_head *bh)
1265 {
1266         get_bh(bh);
1267         return 0;
1268 }
1269
1270 static int bput_one(handle_t *handle, struct buffer_head *bh)
1271 {
1272         put_bh(bh);
1273         return 0;
1274 }
1275
1276 static int journal_dirty_data_fn(handle_t *handle, struct buffer_head *bh)
1277 {
1278         if (buffer_mapped(bh))
1279                 return ext3_journal_dirty_data(handle, bh);
1280         return 0;
1281 }
1282
1283 /*
1284  * Note that we always start a transaction even if we're not journalling
1285  * data.  This is to preserve ordering: any hole instantiation within
1286  * __block_write_full_page -> ext3_get_block() should be journalled
1287  * along with the data so we don't crash and then get metadata which
1288  * refers to old data.
1289  *
1290  * In all journalling modes block_write_full_page() will start the I/O.
1291  *
1292  * Problem:
1293  *
1294  *      ext3_writepage() -> kmalloc() -> __alloc_pages() -> page_launder() ->
1295  *              ext3_writepage()
1296  *
1297  * Similar for:
1298  *
1299  *      ext3_file_write() -> generic_file_write() -> __alloc_pages() -> ...
1300  *
1301  * Same applies to ext3_get_block().  We will deadlock on various things like
1302  * lock_journal and i_truncate_sem.
1303  *
1304  * Setting PF_MEMALLOC here doesn't work - too many internal memory
1305  * allocations fail.
1306  *
1307  * 16May01: If we're reentered then journal_current_handle() will be
1308  *          non-zero. We simply *return*.
1309  *
1310  * 1 July 2001: @@@ FIXME:
1311  *   In journalled data mode, a data buffer may be metadata against the
1312  *   current transaction.  But the same file is part of a shared mapping
1313  *   and someone does a writepage() on it.
1314  *
1315  *   We will move the buffer onto the async_data list, but *after* it has
1316  *   been dirtied. So there's a small window where we have dirty data on
1317  *   BJ_Metadata.
1318  *
1319  *   Note that this only applies to the last partial page in the file.  The
1320  *   bit which block_write_full_page() uses prepare/commit for.  (That's
1321  *   broken code anyway: it's wrong for msync()).
1322  *
1323  *   It's a rare case: affects the final partial page, for journalled data
1324  *   where the file is subject to bith write() and writepage() in the same
1325  *   transction.  To fix it we'll need a custom block_write_full_page().
1326  *   We'll probably need that anyway for journalling writepage() output.
1327  *
1328  * We don't honour synchronous mounts for writepage().  That would be
1329  * disastrous.  Any write() or metadata operation will sync the fs for
1330  * us.
1331  *
1332  * AKPM2: if all the page's buffers are mapped to disk and !data=journal,
1333  * we don't need to open a transaction here.
1334  */
1335 static int ext3_ordered_writepage(struct page *page,
1336                         struct writeback_control *wbc)
1337 {
1338         struct inode *inode = page->mapping->host;
1339         struct buffer_head *page_bufs;
1340         handle_t *handle = NULL;
1341         int ret = 0;
1342         int err;
1343
1344         J_ASSERT(PageLocked(page));
1345
1346         /*
1347          * We give up here if we're reentered, because it might be for a
1348          * different filesystem.
1349          */
1350         if (ext3_journal_current_handle())
1351                 goto out_fail;
1352
1353         handle = ext3_journal_start(inode, ext3_writepage_trans_blocks(inode));
1354
1355         if (IS_ERR(handle)) {
1356                 ret = PTR_ERR(handle);
1357                 goto out_fail;
1358         }
1359
1360         if (!page_has_buffers(page)) {
1361                 create_empty_buffers(page, inode->i_sb->s_blocksize,
1362                                 (1 << BH_Dirty)|(1 << BH_Uptodate));
1363         }
1364         page_bufs = page_buffers(page);
1365         walk_page_buffers(handle, page_bufs, 0,
1366                         PAGE_CACHE_SIZE, NULL, bget_one);
1367
1368         ret = block_write_full_page(page, ext3_get_block, wbc);
1369
1370         /*
1371          * The page can become unlocked at any point now, and
1372          * truncate can then come in and change things.  So we
1373          * can't touch *page from now on.  But *page_bufs is
1374          * safe due to elevated refcount.
1375          */
1376
1377         /*
1378          * And attach them to the current transaction.  But only if 
1379          * block_write_full_page() succeeded.  Otherwise they are unmapped,
1380          * and generally junk.
1381          */
1382         if (ret == 0) {
1383                 err = walk_page_buffers(handle, page_bufs, 0, PAGE_CACHE_SIZE,
1384                                         NULL, journal_dirty_data_fn);
1385                 if (!ret)
1386                         ret = err;
1387         }
1388         walk_page_buffers(handle, page_bufs, 0,
1389                         PAGE_CACHE_SIZE, NULL, bput_one);
1390         err = ext3_journal_stop(handle);
1391         if (!ret)
1392                 ret = err;
1393         return ret;
1394
1395 out_fail:
1396         redirty_page_for_writepage(wbc, page);
1397         unlock_page(page);
1398         return ret;
1399 }
1400
1401 static int ext3_writeback_writepage(struct page *page,
1402                                 struct writeback_control *wbc)
1403 {
1404         struct inode *inode = page->mapping->host;
1405         handle_t *handle = NULL;
1406         int ret = 0;
1407         int err;
1408
1409         if (ext3_journal_current_handle())
1410                 goto out_fail;
1411
1412         handle = ext3_journal_start(inode, ext3_writepage_trans_blocks(inode));
1413         if (IS_ERR(handle)) {
1414                 ret = PTR_ERR(handle);
1415                 goto out_fail;
1416         }
1417
1418         ret = block_write_full_page(page, ext3_get_block, wbc);
1419         err = ext3_journal_stop(handle);
1420         if (!ret)
1421                 ret = err;
1422         return ret;
1423
1424 out_fail:
1425         redirty_page_for_writepage(wbc, page);
1426         unlock_page(page);
1427         return ret;
1428 }
1429
1430 static int ext3_journalled_writepage(struct page *page,
1431                                 struct writeback_control *wbc)
1432 {
1433         struct inode *inode = page->mapping->host;
1434         handle_t *handle = NULL;
1435         int ret = 0;
1436         int err;
1437
1438         if (ext3_journal_current_handle())
1439                 goto no_write;
1440
1441         handle = ext3_journal_start(inode, ext3_writepage_trans_blocks(inode));
1442         if (IS_ERR(handle)) {
1443                 ret = PTR_ERR(handle);
1444                 goto no_write;
1445         }
1446
1447         if (!page_has_buffers(page) || PageChecked(page)) {
1448                 /*
1449                  * It's mmapped pagecache.  Add buffers and journal it.  There
1450                  * doesn't seem much point in redirtying the page here.
1451                  */
1452                 ClearPageChecked(page);
1453                 ret = block_prepare_write(page, 0, PAGE_CACHE_SIZE,
1454                                         ext3_get_block);
1455                 if (ret != 0)
1456                         goto out_unlock;
1457                 ret = walk_page_buffers(handle, page_buffers(page), 0,
1458                         PAGE_CACHE_SIZE, NULL, do_journal_get_write_access);
1459
1460                 err = walk_page_buffers(handle, page_buffers(page), 0,
1461                                 PAGE_CACHE_SIZE, NULL, commit_write_fn);
1462                 if (ret == 0)
1463                         ret = err;
1464                 EXT3_I(inode)->i_state |= EXT3_STATE_JDATA;
1465                 unlock_page(page);
1466         } else {
1467                 /*
1468                  * It may be a page full of checkpoint-mode buffers.  We don't
1469                  * really know unless we go poke around in the buffer_heads.
1470                  * But block_write_full_page will do the right thing.
1471                  */
1472                 ret = block_write_full_page(page, ext3_get_block, wbc);
1473         }
1474         err = ext3_journal_stop(handle);
1475         if (!ret)
1476                 ret = err;
1477 out:
1478         return ret;
1479
1480 no_write:
1481         redirty_page_for_writepage(wbc, page);
1482 out_unlock:
1483         unlock_page(page);
1484         goto out;
1485 }
1486
1487 static int ext3_readpage(struct file *file, struct page *page)
1488 {
1489         return mpage_readpage(page, ext3_get_block);
1490 }
1491
1492 static int
1493 ext3_readpages(struct file *file, struct address_space *mapping,
1494                 struct list_head *pages, unsigned nr_pages)
1495 {
1496         return mpage_readpages(mapping, pages, nr_pages, ext3_get_block);
1497 }
1498
1499 static int ext3_invalidatepage(struct page *page, unsigned long offset)
1500 {
1501         journal_t *journal = EXT3_JOURNAL(page->mapping->host);
1502
1503         /*
1504          * If it's a full truncate we just forget about the pending dirtying
1505          */
1506         if (offset == 0)
1507                 ClearPageChecked(page);
1508
1509         return journal_invalidatepage(journal, page, offset);
1510 }
1511
1512 static int ext3_releasepage(struct page *page, int wait)
1513 {
1514         journal_t *journal = EXT3_JOURNAL(page->mapping->host);
1515
1516         WARN_ON(PageChecked(page));
1517         return journal_try_to_free_buffers(journal, page, wait);
1518 }
1519
1520 /*
1521  * If the O_DIRECT write will extend the file then add this inode to the
1522  * orphan list.  So recovery will truncate it back to the original size
1523  * if the machine crashes during the write.
1524  *
1525  * If the O_DIRECT write is intantiating holes inside i_size and the machine
1526  * crashes then stale disk data _may_ be exposed inside the file.
1527  */
1528 static ssize_t ext3_direct_IO(int rw, struct kiocb *iocb,
1529                         const struct iovec *iov, loff_t offset,
1530                         unsigned long nr_segs)
1531 {
1532         struct file *file = iocb->ki_filp;
1533         struct inode *inode = file->f_mapping->host;
1534         struct ext3_inode_info *ei = EXT3_I(inode);
1535         handle_t *handle = NULL;
1536         ssize_t ret;
1537         int orphan = 0;
1538         size_t count = iov_length(iov, nr_segs);
1539
1540         if (rw == WRITE) {
1541                 loff_t final_size = offset + count;
1542
1543                 handle = ext3_journal_start(inode, DIO_CREDITS);
1544                 if (IS_ERR(handle)) {
1545                         ret = PTR_ERR(handle);
1546                         goto out;
1547                 }
1548                 if (final_size > inode->i_size) {
1549                         ret = ext3_orphan_add(handle, inode);
1550                         if (ret)
1551                                 goto out_stop;
1552                         orphan = 1;
1553                         ei->i_disksize = inode->i_size;
1554                 }
1555         }
1556
1557         ret = blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev, iov, 
1558                                  offset, nr_segs,
1559                                  ext3_direct_io_get_blocks, NULL);
1560
1561 out_stop:
1562         if (handle) {
1563                 int err;
1564
1565                 if (orphan) 
1566                         ext3_orphan_del(handle, inode);
1567                 if (orphan && ret > 0) {
1568                         loff_t end = offset + ret;
1569                         if (end > inode->i_size) {
1570                                 ei->i_disksize = end;
1571                                 i_size_write(inode, end);
1572                                 err = ext3_mark_inode_dirty(handle, inode);
1573                                 if (!ret) 
1574                                         ret = err;
1575                         }
1576                 }
1577                 err = ext3_journal_stop(handle);
1578                 if (ret == 0)
1579                         ret = err;
1580         }
1581 out:
1582         return ret;
1583 }
1584
1585 /*
1586  * Pages can be marked dirty completely asynchronously from ext3's journalling
1587  * activity.  By filemap_sync_pte(), try_to_unmap_one(), etc.  We cannot do
1588  * much here because ->set_page_dirty is called under VFS locks.  The page is
1589  * not necessarily locked.
1590  *
1591  * We cannot just dirty the page and leave attached buffers clean, because the
1592  * buffers' dirty state is "definitive".  We cannot just set the buffers dirty
1593  * or jbddirty because all the journalling code will explode.
1594  *
1595  * So what we do is to mark the page "pending dirty" and next time writepage
1596  * is called, propagate that into the buffers appropriately.
1597  */
1598 static int ext3_journalled_set_page_dirty(struct page *page)
1599 {
1600         SetPageChecked(page);
1601         return __set_page_dirty_nobuffers(page);
1602 }
1603
1604 static struct address_space_operations ext3_ordered_aops = {
1605         .readpage       = ext3_readpage,
1606         .readpages      = ext3_readpages,
1607         .writepage      = ext3_ordered_writepage,
1608         .sync_page      = block_sync_page,
1609         .prepare_write  = ext3_prepare_write,
1610         .commit_write   = ext3_ordered_commit_write,
1611         .bmap           = ext3_bmap,
1612         .invalidatepage = ext3_invalidatepage,
1613         .releasepage    = ext3_releasepage,
1614         .direct_IO      = ext3_direct_IO,
1615 };
1616
1617 static struct address_space_operations ext3_writeback_aops = {
1618         .readpage       = ext3_readpage,
1619         .readpages      = ext3_readpages,
1620         .writepage      = ext3_writeback_writepage,
1621         .sync_page      = block_sync_page,
1622         .prepare_write  = ext3_prepare_write,
1623         .commit_write   = ext3_writeback_commit_write,
1624         .bmap           = ext3_bmap,
1625         .invalidatepage = ext3_invalidatepage,
1626         .releasepage    = ext3_releasepage,
1627         .direct_IO      = ext3_direct_IO,
1628 };
1629
1630 static struct address_space_operations ext3_journalled_aops = {
1631         .readpage       = ext3_readpage,
1632         .readpages      = ext3_readpages,
1633         .writepage      = ext3_journalled_writepage,
1634         .sync_page      = block_sync_page,
1635         .prepare_write  = ext3_prepare_write,
1636         .commit_write   = ext3_journalled_commit_write,
1637         .set_page_dirty = ext3_journalled_set_page_dirty,
1638         .bmap           = ext3_bmap,
1639         .invalidatepage = ext3_invalidatepage,
1640         .releasepage    = ext3_releasepage,
1641 };
1642
1643 void ext3_set_aops(struct inode *inode)
1644 {
1645         if (ext3_should_order_data(inode))
1646                 inode->i_mapping->a_ops = &ext3_ordered_aops;
1647         else if (ext3_should_writeback_data(inode))
1648                 inode->i_mapping->a_ops = &ext3_writeback_aops;
1649         else
1650                 inode->i_mapping->a_ops = &ext3_journalled_aops;
1651 }
1652
1653 /*
1654  * ext3_block_truncate_page() zeroes out a mapping from file offset `from'
1655  * up to the end of the block which corresponds to `from'.
1656  * This required during truncate. We need to physically zero the tail end
1657  * of that block so it doesn't yield old data if the file is later grown.
1658  */
1659 static int ext3_block_truncate_page(handle_t *handle, struct page *page,
1660                 struct address_space *mapping, loff_t from)
1661 {
1662         unsigned long index = from >> PAGE_CACHE_SHIFT;
1663         unsigned offset = from & (PAGE_CACHE_SIZE-1);
1664         unsigned blocksize, iblock, length, pos;
1665         struct inode *inode = mapping->host;
1666         struct buffer_head *bh;
1667         int err;
1668         void *kaddr;
1669
1670         blocksize = inode->i_sb->s_blocksize;
1671         length = blocksize - (offset & (blocksize - 1));
1672         iblock = index << (PAGE_CACHE_SHIFT - inode->i_sb->s_blocksize_bits);
1673
1674         if (!page_has_buffers(page))
1675                 create_empty_buffers(page, blocksize, 0);
1676
1677         /* Find the buffer that contains "offset" */
1678         bh = page_buffers(page);
1679         pos = blocksize;
1680         while (offset >= pos) {
1681                 bh = bh->b_this_page;
1682                 iblock++;
1683                 pos += blocksize;
1684         }
1685
1686         err = 0;
1687         if (buffer_freed(bh)) {
1688                 BUFFER_TRACE(bh, "freed: skip");
1689                 goto unlock;
1690         }
1691
1692         if (!buffer_mapped(bh)) {
1693                 BUFFER_TRACE(bh, "unmapped");
1694                 ext3_get_block(inode, iblock, bh, 0);
1695                 /* unmapped? It's a hole - nothing to do */
1696                 if (!buffer_mapped(bh)) {
1697                         BUFFER_TRACE(bh, "still unmapped");
1698                         goto unlock;
1699                 }
1700         }
1701
1702         /* Ok, it's mapped. Make sure it's up-to-date */
1703         if (PageUptodate(page))
1704                 set_buffer_uptodate(bh);
1705
1706         if (!buffer_uptodate(bh)) {
1707                 err = -EIO;
1708                 ll_rw_block(READ, 1, &bh);
1709                 wait_on_buffer(bh);
1710                 /* Uhhuh. Read error. Complain and punt. */
1711                 if (!buffer_uptodate(bh))
1712                         goto unlock;
1713         }
1714
1715         if (ext3_should_journal_data(inode)) {
1716                 BUFFER_TRACE(bh, "get write access");
1717                 err = ext3_journal_get_write_access(handle, bh);
1718                 if (err)
1719                         goto unlock;
1720         }
1721
1722         kaddr = kmap_atomic(page, KM_USER0);
1723         memset(kaddr + offset, 0, length);
1724         flush_dcache_page(page);
1725         kunmap_atomic(kaddr, KM_USER0);
1726
1727         BUFFER_TRACE(bh, "zeroed end of block");
1728
1729         err = 0;
1730         if (ext3_should_journal_data(inode)) {
1731                 err = ext3_journal_dirty_metadata(handle, bh);
1732         } else {
1733                 if (ext3_should_order_data(inode))
1734                         err = ext3_journal_dirty_data(handle, bh);
1735                 mark_buffer_dirty(bh);
1736         }
1737
1738 unlock:
1739         unlock_page(page);
1740         page_cache_release(page);
1741         return err;
1742 }
1743
1744 /*
1745  * Probably it should be a library function... search for first non-zero word
1746  * or memcmp with zero_page, whatever is better for particular architecture.
1747  * Linus?
1748  */
1749 static inline int all_zeroes(u32 *p, u32 *q)
1750 {
1751         while (p < q)
1752                 if (*p++)
1753                         return 0;
1754         return 1;
1755 }
1756
1757 /**
1758  *      ext3_find_shared - find the indirect blocks for partial truncation.
1759  *      @inode:   inode in question
1760  *      @depth:   depth of the affected branch
1761  *      @offsets: offsets of pointers in that branch (see ext3_block_to_path)
1762  *      @chain:   place to store the pointers to partial indirect blocks
1763  *      @top:     place to the (detached) top of branch
1764  *
1765  *      This is a helper function used by ext3_truncate().
1766  *
1767  *      When we do truncate() we may have to clean the ends of several
1768  *      indirect blocks but leave the blocks themselves alive. Block is
1769  *      partially truncated if some data below the new i_size is refered
1770  *      from it (and it is on the path to the first completely truncated
1771  *      data block, indeed).  We have to free the top of that path along
1772  *      with everything to the right of the path. Since no allocation
1773  *      past the truncation point is possible until ext3_truncate()
1774  *      finishes, we may safely do the latter, but top of branch may
1775  *      require special attention - pageout below the truncation point
1776  *      might try to populate it.
1777  *
1778  *      We atomically detach the top of branch from the tree, store the
1779  *      block number of its root in *@top, pointers to buffer_heads of
1780  *      partially truncated blocks - in @chain[].bh and pointers to
1781  *      their last elements that should not be removed - in
1782  *      @chain[].p. Return value is the pointer to last filled element
1783  *      of @chain.
1784  *
1785  *      The work left to caller to do the actual freeing of subtrees:
1786  *              a) free the subtree starting from *@top
1787  *              b) free the subtrees whose roots are stored in
1788  *                      (@chain[i].p+1 .. end of @chain[i].bh->b_data)
1789  *              c) free the subtrees growing from the inode past the @chain[0].
1790  *                      (no partially truncated stuff there).  */
1791
1792 static Indirect *ext3_find_shared(struct inode *inode,
1793                                 int depth,
1794                                 int offsets[4],
1795                                 Indirect chain[4],
1796                                 u32 *top)
1797 {
1798         Indirect *partial, *p;
1799         int k, err;
1800
1801         *top = 0;
1802         /* Make k index the deepest non-null offest + 1 */
1803         for (k = depth; k > 1 && !offsets[k-1]; k--)
1804                 ;
1805         partial = ext3_get_branch(inode, k, offsets, chain, &err);
1806         /* Writer: pointers */
1807         if (!partial)
1808                 partial = chain + k-1;
1809         /*
1810          * If the branch acquired continuation since we've looked at it -
1811          * fine, it should all survive and (new) top doesn't belong to us.
1812          */
1813         if (!partial->key && *partial->p)
1814                 /* Writer: end */
1815                 goto no_top;
1816         for (p=partial; p>chain && all_zeroes((u32*)p->bh->b_data,p->p); p--)
1817                 ;
1818         /*
1819          * OK, we've found the last block that must survive. The rest of our
1820          * branch should be detached before unlocking. However, if that rest
1821          * of branch is all ours and does not grow immediately from the inode
1822          * it's easier to cheat and just decrement partial->p.
1823          */
1824         if (p == chain + k - 1 && p > chain) {
1825                 p->p--;
1826         } else {
1827                 *top = *p->p;
1828                 /* Nope, don't do this in ext3.  Must leave the tree intact */
1829 #if 0
1830                 *p->p = 0;
1831 #endif
1832         }
1833         /* Writer: end */
1834
1835         while(partial > p)
1836         {
1837                 brelse(partial->bh);
1838                 partial--;
1839         }
1840 no_top:
1841         return partial;
1842 }
1843
1844 /*
1845  * Zero a number of block pointers in either an inode or an indirect block.
1846  * If we restart the transaction we must again get write access to the
1847  * indirect block for further modification.
1848  *
1849  * We release `count' blocks on disk, but (last - first) may be greater
1850  * than `count' because there can be holes in there.
1851  */
1852 static void
1853 ext3_clear_blocks(handle_t *handle, struct inode *inode, struct buffer_head *bh,
1854                 unsigned long block_to_free, unsigned long count,
1855                 u32 *first, u32 *last)
1856 {
1857         u32 *p;
1858         if (try_to_extend_transaction(handle, inode)) {
1859                 if (bh) {
1860                         BUFFER_TRACE(bh, "call ext3_journal_dirty_metadata");
1861                         ext3_journal_dirty_metadata(handle, bh);
1862                 }
1863                 ext3_mark_inode_dirty(handle, inode);
1864                 ext3_journal_test_restart(handle, inode);
1865                 if (bh) {
1866                         BUFFER_TRACE(bh, "retaking write access");
1867                         ext3_journal_get_write_access(handle, bh);
1868                 }
1869         }
1870
1871         /*
1872          * Any buffers which are on the journal will be in memory. We find
1873          * them on the hash table so journal_revoke() will run journal_forget()
1874          * on them.  We've already detached each block from the file, so
1875          * bforget() in journal_forget() should be safe.
1876          *
1877          * AKPM: turn on bforget in journal_forget()!!!
1878          */
1879         for (p = first; p < last; p++) {
1880                 u32 nr = le32_to_cpu(*p);
1881                 if (nr) {
1882                         struct buffer_head *bh;
1883
1884                         *p = 0;
1885                         bh = sb_find_get_block(inode->i_sb, nr);
1886                         ext3_forget(handle, 0, inode, bh, nr);
1887                 }
1888         }
1889
1890         ext3_free_blocks(handle, inode, block_to_free, count);
1891 }
1892
1893 /**
1894  * ext3_free_data - free a list of data blocks
1895  * @handle:     handle for this transaction
1896  * @inode:      inode we are dealing with
1897  * @this_bh:    indirect buffer_head which contains *@first and *@last
1898  * @first:      array of block numbers
1899  * @last:       points immediately past the end of array
1900  *
1901  * We are freeing all blocks refered from that array (numbers are stored as
1902  * little-endian 32-bit) and updating @inode->i_blocks appropriately.
1903  *
1904  * We accumulate contiguous runs of blocks to free.  Conveniently, if these
1905  * blocks are contiguous then releasing them at one time will only affect one
1906  * or two bitmap blocks (+ group descriptor(s) and superblock) and we won't
1907  * actually use a lot of journal space.
1908  *
1909  * @this_bh will be %NULL if @first and @last point into the inode's direct
1910  * block pointers.
1911  */
1912 static void ext3_free_data(handle_t *handle, struct inode *inode,
1913                            struct buffer_head *this_bh, u32 *first, u32 *last)
1914 {
1915         unsigned long block_to_free = 0;    /* Starting block # of a run */
1916         unsigned long count = 0;            /* Number of blocks in the run */ 
1917         u32 *block_to_free_p = NULL;        /* Pointer into inode/ind
1918                                                corresponding to
1919                                                block_to_free */
1920         unsigned long nr;                   /* Current block # */
1921         u32 *p;                             /* Pointer into inode/ind
1922                                                for current block */
1923         int err;
1924
1925         if (this_bh) {                          /* For indirect block */
1926                 BUFFER_TRACE(this_bh, "get_write_access");
1927                 err = ext3_journal_get_write_access(handle, this_bh);
1928                 /* Important: if we can't update the indirect pointers
1929                  * to the blocks, we can't free them. */
1930                 if (err)
1931                         return;
1932         }
1933
1934         for (p = first; p < last; p++) {
1935                 nr = le32_to_cpu(*p);
1936                 if (nr) {
1937                         /* accumulate blocks to free if they're contiguous */
1938                         if (count == 0) {
1939                                 block_to_free = nr;
1940                                 block_to_free_p = p;
1941                                 count = 1;
1942                         } else if (nr == block_to_free + count) {
1943                                 count++;
1944                         } else {
1945                                 ext3_clear_blocks(handle, inode, this_bh, 
1946                                                   block_to_free,
1947                                                   count, block_to_free_p, p);
1948                                 block_to_free = nr;
1949                                 block_to_free_p = p;
1950                                 count = 1;
1951                         }
1952                 }
1953         }
1954
1955         if (count > 0)
1956                 ext3_clear_blocks(handle, inode, this_bh, block_to_free,
1957                                   count, block_to_free_p, p);
1958
1959         if (this_bh) {
1960                 BUFFER_TRACE(this_bh, "call ext3_journal_dirty_metadata");
1961                 ext3_journal_dirty_metadata(handle, this_bh);
1962         }
1963 }
1964
1965 /**
1966  *      ext3_free_branches - free an array of branches
1967  *      @handle: JBD handle for this transaction
1968  *      @inode: inode we are dealing with
1969  *      @parent_bh: the buffer_head which contains *@first and *@last
1970  *      @first: array of block numbers
1971  *      @last:  pointer immediately past the end of array
1972  *      @depth: depth of the branches to free
1973  *
1974  *      We are freeing all blocks refered from these branches (numbers are
1975  *      stored as little-endian 32-bit) and updating @inode->i_blocks
1976  *      appropriately.
1977  */
1978 static void ext3_free_branches(handle_t *handle, struct inode *inode,
1979                                struct buffer_head *parent_bh,
1980                                u32 *first, u32 *last, int depth)
1981 {
1982         unsigned long nr;
1983         u32 *p;
1984
1985         if (is_handle_aborted(handle))
1986                 return;
1987
1988         if (depth--) {
1989                 struct buffer_head *bh;
1990                 int addr_per_block = EXT3_ADDR_PER_BLOCK(inode->i_sb);
1991                 p = last;
1992                 while (--p >= first) {
1993                         nr = le32_to_cpu(*p);
1994                         if (!nr)
1995                                 continue;               /* A hole */
1996
1997                         /* Go read the buffer for the next level down */
1998                         bh = sb_bread(inode->i_sb, nr);
1999
2000                         /*
2001                          * A read failure? Report error and clear slot
2002                          * (should be rare).
2003                          */
2004                         if (!bh) {
2005                                 ext3_error(inode->i_sb, "ext3_free_branches",
2006                                            "Read failure, inode=%ld, block=%ld",
2007                                            inode->i_ino, nr);
2008                                 continue;
2009                         }
2010
2011                         /* This zaps the entire block.  Bottom up. */
2012                         BUFFER_TRACE(bh, "free child branches");
2013                         ext3_free_branches(handle, inode, bh, (u32*)bh->b_data,
2014                                            (u32*)bh->b_data + addr_per_block,
2015                                            depth);
2016
2017                         /*
2018                          * We've probably journalled the indirect block several
2019                          * times during the truncate.  But it's no longer
2020                          * needed and we now drop it from the transaction via
2021                          * journal_revoke().
2022                          *
2023                          * That's easy if it's exclusively part of this
2024                          * transaction.  But if it's part of the committing
2025                          * transaction then journal_forget() will simply
2026                          * brelse() it.  That means that if the underlying
2027                          * block is reallocated in ext3_get_block(),
2028                          * unmap_underlying_metadata() will find this block
2029                          * and will try to get rid of it.  damn, damn.
2030                          *
2031                          * If this block has already been committed to the
2032                          * journal, a revoke record will be written.  And
2033                          * revoke records must be emitted *before* clearing
2034                          * this block's bit in the bitmaps.
2035                          */
2036                         ext3_forget(handle, 1, inode, bh, bh->b_blocknr);
2037
2038                         /*
2039                          * Everything below this this pointer has been
2040                          * released.  Now let this top-of-subtree go.
2041                          *
2042                          * We want the freeing of this indirect block to be
2043                          * atomic in the journal with the updating of the
2044                          * bitmap block which owns it.  So make some room in
2045                          * the journal.
2046                          *
2047                          * We zero the parent pointer *after* freeing its
2048                          * pointee in the bitmaps, so if extend_transaction()
2049                          * for some reason fails to put the bitmap changes and
2050                          * the release into the same transaction, recovery
2051                          * will merely complain about releasing a free block,
2052                          * rather than leaking blocks.
2053                          */
2054                         if (is_handle_aborted(handle))
2055                                 return;
2056                         if (try_to_extend_transaction(handle, inode)) {
2057                                 ext3_mark_inode_dirty(handle, inode);
2058                                 ext3_journal_test_restart(handle, inode);
2059                         }
2060
2061                         ext3_free_blocks(handle, inode, nr, 1);
2062
2063                         if (parent_bh) {
2064                                 /*
2065                                  * The block which we have just freed is
2066                                  * pointed to by an indirect block: journal it
2067                                  */
2068                                 BUFFER_TRACE(parent_bh, "get_write_access");
2069                                 if (!ext3_journal_get_write_access(handle,
2070                                                                    parent_bh)){
2071                                         *p = 0;
2072                                         BUFFER_TRACE(parent_bh,
2073                                         "call ext3_journal_dirty_metadata");
2074                                         ext3_journal_dirty_metadata(handle, 
2075                                                                     parent_bh);
2076                                 }
2077                         }
2078                 }
2079         } else {
2080                 /* We have reached the bottom of the tree. */
2081                 BUFFER_TRACE(parent_bh, "free data blocks");
2082                 ext3_free_data(handle, inode, parent_bh, first, last);
2083         }
2084 }
2085
2086 /*
2087  * ext3_truncate()
2088  *
2089  * We block out ext3_get_block() block instantiations across the entire
2090  * transaction, and VFS/VM ensures that ext3_truncate() cannot run
2091  * simultaneously on behalf of the same inode.
2092  *
2093  * As we work through the truncate and commmit bits of it to the journal there
2094  * is one core, guiding principle: the file's tree must always be consistent on
2095  * disk.  We must be able to restart the truncate after a crash.
2096  *
2097  * The file's tree may be transiently inconsistent in memory (although it
2098  * probably isn't), but whenever we close off and commit a journal transaction,
2099  * the contents of (the filesystem + the journal) must be consistent and
2100  * restartable.  It's pretty simple, really: bottom up, right to left (although
2101  * left-to-right works OK too).
2102  *
2103  * Note that at recovery time, journal replay occurs *before* the restart of
2104  * truncate against the orphan inode list.
2105  *
2106  * The committed inode has the new, desired i_size (which is the same as
2107  * i_disksize in this case).  After a crash, ext3_orphan_cleanup() will see
2108  * that this inode's truncate did not complete and it will again call
2109  * ext3_truncate() to have another go.  So there will be instantiated blocks
2110  * to the right of the truncation point in a crashed ext3 filesystem.  But
2111  * that's fine - as long as they are linked from the inode, the post-crash
2112  * ext3_truncate() run will find them and release them.
2113  */
2114
2115 void ext3_truncate(struct inode * inode)
2116 {
2117         handle_t *handle;
2118         struct ext3_inode_info *ei = EXT3_I(inode);
2119         u32 *i_data = ei->i_data;
2120         int addr_per_block = EXT3_ADDR_PER_BLOCK(inode->i_sb);
2121         struct address_space *mapping = inode->i_mapping;
2122         int offsets[4];
2123         Indirect chain[4];
2124         Indirect *partial;
2125         int nr = 0;
2126         int n;
2127         long last_block;
2128         unsigned blocksize = inode->i_sb->s_blocksize;
2129         struct page *page;
2130
2131         if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
2132             S_ISLNK(inode->i_mode)))
2133                 return;
2134         if (ext3_inode_is_fast_symlink(inode))
2135                 return;
2136         if (IS_APPEND(inode) || IS_IMMUTABLE(inode))
2137                 return;
2138
2139         ext3_discard_prealloc(inode);
2140
2141         /*
2142          * We have to lock the EOF page here, because lock_page() nests
2143          * outside journal_start().
2144          */
2145         if ((inode->i_size & (blocksize - 1)) == 0) {
2146                 /* Block boundary? Nothing to do */
2147                 page = NULL;
2148         } else {
2149                 page = grab_cache_page(mapping,
2150                                 inode->i_size >> PAGE_CACHE_SHIFT);
2151                 if (!page)
2152                         return;
2153         }
2154
2155         handle = start_transaction(inode);
2156         if (IS_ERR(handle)) {
2157                 if (page) {
2158                         clear_highpage(page);
2159                         flush_dcache_page(page);
2160                         unlock_page(page);
2161                         page_cache_release(page);
2162                 }
2163                 return;         /* AKPM: return what? */
2164         }
2165
2166         last_block = (inode->i_size + blocksize-1)
2167                                         >> EXT3_BLOCK_SIZE_BITS(inode->i_sb);
2168
2169         if (page)
2170                 ext3_block_truncate_page(handle, page, mapping, inode->i_size);
2171
2172         n = ext3_block_to_path(inode, last_block, offsets, NULL);
2173         if (n == 0)
2174                 goto out_stop;  /* error */
2175
2176         /*
2177          * OK.  This truncate is going to happen.  We add the inode to the
2178          * orphan list, so that if this truncate spans multiple transactions,
2179          * and we crash, we will resume the truncate when the filesystem
2180          * recovers.  It also marks the inode dirty, to catch the new size.
2181          *
2182          * Implication: the file must always be in a sane, consistent
2183          * truncatable state while each transaction commits.
2184          */
2185         if (ext3_orphan_add(handle, inode))
2186                 goto out_stop;
2187
2188         /*
2189          * The orphan list entry will now protect us from any crash which
2190          * occurs before the truncate completes, so it is now safe to propagate
2191          * the new, shorter inode size (held for now in i_size) into the
2192          * on-disk inode. We do this via i_disksize, which is the value which
2193          * ext3 *really* writes onto the disk inode.
2194          */
2195         ei->i_disksize = inode->i_size;
2196
2197         /*
2198          * From here we block out all ext3_get_block() callers who want to
2199          * modify the block allocation tree.
2200          */
2201         down(&ei->truncate_sem);
2202
2203         if (n == 1) {           /* direct blocks */
2204                 ext3_free_data(handle, inode, NULL, i_data+offsets[0],
2205                                i_data + EXT3_NDIR_BLOCKS);
2206                 goto do_indirects;
2207         }
2208
2209         partial = ext3_find_shared(inode, n, offsets, chain, &nr);
2210         /* Kill the top of shared branch (not detached) */
2211         if (nr) {
2212                 if (partial == chain) {
2213                         /* Shared branch grows from the inode */
2214                         ext3_free_branches(handle, inode, NULL,
2215                                            &nr, &nr+1, (chain+n-1) - partial);
2216                         *partial->p = 0;
2217                         /*
2218                          * We mark the inode dirty prior to restart,
2219                          * and prior to stop.  No need for it here.
2220                          */
2221                 } else {
2222                         /* Shared branch grows from an indirect block */
2223                         BUFFER_TRACE(partial->bh, "get_write_access");
2224                         ext3_free_branches(handle, inode, partial->bh,
2225                                         partial->p,
2226                                         partial->p+1, (chain+n-1) - partial);
2227                 }
2228         }
2229         /* Clear the ends of indirect blocks on the shared branch */
2230         while (partial > chain) {
2231                 ext3_free_branches(handle, inode, partial->bh, partial->p + 1,
2232                                    (u32*)partial->bh->b_data + addr_per_block,
2233                                    (chain+n-1) - partial);
2234                 BUFFER_TRACE(partial->bh, "call brelse");
2235                 brelse (partial->bh);
2236                 partial--;
2237         }
2238 do_indirects:
2239         /* Kill the remaining (whole) subtrees */
2240         switch (offsets[0]) {
2241                 default:
2242                         nr = i_data[EXT3_IND_BLOCK];
2243                         if (nr) {
2244                                 ext3_free_branches(handle, inode, NULL,
2245                                                    &nr, &nr+1, 1);
2246                                 i_data[EXT3_IND_BLOCK] = 0;
2247                         }
2248                 case EXT3_IND_BLOCK:
2249                         nr = i_data[EXT3_DIND_BLOCK];
2250                         if (nr) {
2251                                 ext3_free_branches(handle, inode, NULL,
2252                                                    &nr, &nr+1, 2);
2253                                 i_data[EXT3_DIND_BLOCK] = 0;
2254                         }
2255                 case EXT3_DIND_BLOCK:
2256                         nr = i_data[EXT3_TIND_BLOCK];
2257                         if (nr) {
2258                                 ext3_free_branches(handle, inode, NULL,
2259                                                    &nr, &nr+1, 3);
2260                                 i_data[EXT3_TIND_BLOCK] = 0;
2261                         }
2262                 case EXT3_TIND_BLOCK:
2263                         ;
2264         }
2265         up(&ei->truncate_sem);
2266         inode->i_mtime = inode->i_ctime = CURRENT_TIME;
2267         ext3_mark_inode_dirty(handle, inode);
2268
2269         /* In a multi-transaction truncate, we only make the final
2270          * transaction synchronous */
2271         if (IS_SYNC(inode))
2272                 handle->h_sync = 1;
2273 out_stop:
2274         /*
2275          * If this was a simple ftruncate(), and the file will remain alive
2276          * then we need to clear up the orphan record which we created above.
2277          * However, if this was a real unlink then we were called by
2278          * ext3_delete_inode(), and we allow that function to clean up the
2279          * orphan info for us.
2280          */
2281         if (inode->i_nlink)
2282                 ext3_orphan_del(handle, inode);
2283
2284         ext3_journal_stop(handle);
2285 }
2286
2287 static unsigned long ext3_get_inode_block(struct super_block *sb,
2288                 unsigned long ino, struct ext3_iloc *iloc)
2289 {
2290         unsigned long desc, group_desc, block_group;
2291         unsigned long offset, block;
2292         struct buffer_head *bh;
2293         struct ext3_group_desc * gdp;
2294
2295         if ((ino != EXT3_ROOT_INO &&
2296                 ino != EXT3_JOURNAL_INO &&
2297                 ino < EXT3_FIRST_INO(sb)) ||
2298                 ino > le32_to_cpu(
2299                         EXT3_SB(sb)->s_es->s_inodes_count)) {
2300                 ext3_error (sb, "ext3_get_inode_block",
2301                             "bad inode number: %lu", ino);
2302                 return 0;
2303         }
2304         block_group = (ino - 1) / EXT3_INODES_PER_GROUP(sb);
2305         if (block_group >= EXT3_SB(sb)->s_groups_count) {
2306                 ext3_error (sb, "ext3_get_inode_block",
2307                             "group >= groups count");
2308                 return 0;
2309         }
2310         group_desc = block_group >> EXT3_DESC_PER_BLOCK_BITS(sb);
2311         desc = block_group & (EXT3_DESC_PER_BLOCK(sb) - 1);
2312         bh = EXT3_SB(sb)->s_group_desc[group_desc];
2313         if (!bh) {
2314                 ext3_error (sb, "ext3_get_inode_block",
2315                             "Descriptor not loaded");
2316                 return 0;
2317         }
2318
2319         gdp = (struct ext3_group_desc *) bh->b_data;
2320         /*
2321          * Figure out the offset within the block group inode table
2322          */
2323         offset = ((ino - 1) % EXT3_INODES_PER_GROUP(sb)) *
2324                 EXT3_INODE_SIZE(sb);
2325         block = le32_to_cpu(gdp[desc].bg_inode_table) +
2326                 (offset >> EXT3_BLOCK_SIZE_BITS(sb));
2327
2328         iloc->block_group = block_group;
2329         iloc->offset = offset & (EXT3_BLOCK_SIZE(sb) - 1);
2330         return block;
2331 }
2332
2333 /* 
2334  * ext3_get_inode_loc returns with an extra refcount against the inode's
2335  * underlying buffer_head on success.  If `in_mem' is false then we're purely
2336  * trying to determine the inode's location on-disk and no read need be
2337  * performed.
2338  */
2339 static int ext3_get_inode_loc(struct inode *inode,
2340                                 struct ext3_iloc *iloc, int in_mem)
2341 {
2342         unsigned long block;
2343         struct buffer_head *bh;
2344
2345         block = ext3_get_inode_block(inode->i_sb, inode->i_ino, iloc);
2346         if (!block)
2347                 return -EIO;
2348
2349         bh = sb_getblk(inode->i_sb, block);
2350         if (!bh) {
2351                 ext3_error (inode->i_sb, "ext3_get_inode_loc",
2352                                 "unable to read inode block - "
2353                                 "inode=%lu, block=%lu", inode->i_ino, block);
2354                 return -EIO;
2355         }
2356         if (!buffer_uptodate(bh)) {
2357                 lock_buffer(bh);
2358                 if (buffer_uptodate(bh)) {
2359                         /* someone brought it uptodate while we waited */
2360                         unlock_buffer(bh);
2361                         goto has_buffer;
2362                 }
2363
2364                 /* we can't skip I/O if inode is on a disk only */
2365                 if (in_mem) {
2366                         struct buffer_head *bitmap_bh;
2367                         struct ext3_group_desc *desc;
2368                         int inodes_per_buffer;
2369                         int inode_offset, i;
2370                         int block_group;
2371                         int start;
2372
2373                         /*
2374                          * If this is the only valid inode in the block we
2375                          * need not read the block.
2376                          */
2377                         block_group = (inode->i_ino - 1) /
2378                                         EXT3_INODES_PER_GROUP(inode->i_sb);
2379                         inodes_per_buffer = bh->b_size /
2380                                 EXT3_INODE_SIZE(inode->i_sb);
2381                         inode_offset = ((inode->i_ino - 1) %
2382                                         EXT3_INODES_PER_GROUP(inode->i_sb));
2383                         start = inode_offset & ~(inodes_per_buffer - 1);
2384
2385                         /* Is the inode bitmap in cache? */
2386                         desc = ext3_get_group_desc(inode->i_sb,
2387                                                 block_group, NULL);
2388                         if (!desc)
2389                                 goto make_io;
2390
2391                         bitmap_bh = sb_getblk(inode->i_sb,
2392                                         le32_to_cpu(desc->bg_inode_bitmap));
2393                         if (!bitmap_bh)
2394                                 goto make_io;
2395
2396                         /*
2397                          * If the inode bitmap isn't in cache then the
2398                          * optimisation may end up performing two reads instead
2399                          * of one, so skip it.
2400                          */
2401                         if (!buffer_uptodate(bitmap_bh)) {
2402                                 brelse(bitmap_bh);
2403                                 goto make_io;
2404                         }
2405                         for (i = start; i < start + inodes_per_buffer; i++) {
2406                                 if (i == inode_offset)
2407                                         continue;
2408                                 if (ext3_test_bit(i, bitmap_bh->b_data))
2409                                         break;
2410                         }
2411                         brelse(bitmap_bh);
2412                         if (i == start + inodes_per_buffer) {
2413                                 /* all other inodes are free, so skip I/O */
2414                                 memset(bh->b_data, 0, bh->b_size);
2415                                 set_buffer_uptodate(bh);
2416                                 unlock_buffer(bh);
2417                                 goto has_buffer;
2418                         }
2419                 }
2420
2421 make_io:
2422                 /*
2423                  * There are another valid inodes in the buffer so we must
2424                  * read the block from disk
2425                  */
2426                 get_bh(bh);
2427                 bh->b_end_io = end_buffer_read_sync;
2428                 submit_bh(READ, bh);
2429                 wait_on_buffer(bh);
2430                 if (!buffer_uptodate(bh)) {
2431                         ext3_error(inode->i_sb, "ext3_get_inode_loc",
2432                                         "unable to read inode block - "
2433                                         "inode=%lu, block=%lu",
2434                                         inode->i_ino, block);
2435                         brelse(bh);
2436                         return -EIO;
2437                 }
2438         }
2439 has_buffer:
2440         iloc->bh = bh;
2441         return 0;
2442 }
2443
2444 void ext3_set_inode_flags(struct inode *inode)
2445 {
2446         unsigned int flags = EXT3_I(inode)->i_flags;
2447
2448         inode->i_flags &= ~(S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC);
2449         if (flags & EXT3_SYNC_FL)
2450                 inode->i_flags |= S_SYNC;
2451         if (flags & EXT3_APPEND_FL)
2452                 inode->i_flags |= S_APPEND;
2453         if (flags & EXT3_IMMUTABLE_FL)
2454                 inode->i_flags |= S_IMMUTABLE;
2455         if (flags & EXT3_NOATIME_FL)
2456                 inode->i_flags |= S_NOATIME;
2457         if (flags & EXT3_DIRSYNC_FL)
2458                 inode->i_flags |= S_DIRSYNC;
2459 }
2460
2461 void ext3_read_inode(struct inode * inode)
2462 {
2463         struct ext3_iloc iloc;
2464         struct ext3_inode *raw_inode;
2465         struct ext3_inode_info *ei = EXT3_I(inode);
2466         struct buffer_head *bh;
2467         int block;
2468
2469 #ifdef CONFIG_EXT3_FS_POSIX_ACL
2470         ei->i_acl = EXT3_ACL_NOT_CACHED;
2471         ei->i_default_acl = EXT3_ACL_NOT_CACHED;
2472 #endif
2473         if (ext3_get_inode_loc(inode, &iloc, 0))
2474                 goto bad_inode;
2475         bh = iloc.bh;
2476         raw_inode = ext3_raw_inode(&iloc);
2477         inode->i_mode = le16_to_cpu(raw_inode->i_mode);
2478         inode->i_uid = (uid_t)le16_to_cpu(raw_inode->i_uid_low);
2479         inode->i_gid = (gid_t)le16_to_cpu(raw_inode->i_gid_low);
2480         if(!(test_opt (inode->i_sb, NO_UID32))) {
2481                 inode->i_uid |= le16_to_cpu(raw_inode->i_uid_high) << 16;
2482                 inode->i_gid |= le16_to_cpu(raw_inode->i_gid_high) << 16;
2483         }
2484         inode->i_nlink = le16_to_cpu(raw_inode->i_links_count);
2485         inode->i_size = le32_to_cpu(raw_inode->i_size);
2486         inode->i_atime.tv_sec = le32_to_cpu(raw_inode->i_atime);
2487         inode->i_ctime.tv_sec = le32_to_cpu(raw_inode->i_ctime);
2488         inode->i_mtime.tv_sec = le32_to_cpu(raw_inode->i_mtime);
2489         inode->i_atime.tv_nsec = inode->i_ctime.tv_nsec = inode->i_mtime.tv_nsec = 0;
2490
2491         ei->i_state = 0;
2492         ei->i_next_alloc_block = 0;
2493         ei->i_next_alloc_goal = 0;
2494         ei->i_dir_start_lookup = 0;
2495         ei->i_dtime = le32_to_cpu(raw_inode->i_dtime);
2496         /* We now have enough fields to check if the inode was active or not.
2497          * This is needed because nfsd might try to access dead inodes
2498          * the test is that same one that e2fsck uses
2499          * NeilBrown 1999oct15
2500          */
2501         if (inode->i_nlink == 0) {
2502                 if (inode->i_mode == 0 ||
2503                     !(EXT3_SB(inode->i_sb)->s_mount_state & EXT3_ORPHAN_FS)) {
2504                         /* this inode is deleted */
2505                         brelse (bh);
2506                         goto bad_inode;
2507                 }
2508                 /* The only unlinked inodes we let through here have
2509                  * valid i_mode and are being read by the orphan
2510                  * recovery code: that's fine, we're about to complete
2511                  * the process of deleting those. */
2512         }
2513         inode->i_blksize = PAGE_SIZE;   /* This is the optimal IO size
2514                                          * (for stat), not the fs block
2515                                          * size */  
2516         inode->i_blocks = le32_to_cpu(raw_inode->i_blocks);
2517         ei->i_flags = le32_to_cpu(raw_inode->i_flags);
2518 #ifdef EXT3_FRAGMENTS
2519         ei->i_faddr = le32_to_cpu(raw_inode->i_faddr);
2520         ei->i_frag_no = raw_inode->i_frag;
2521         ei->i_frag_size = raw_inode->i_fsize;
2522 #endif
2523         ei->i_file_acl = le32_to_cpu(raw_inode->i_file_acl);
2524         if (!S_ISREG(inode->i_mode)) {
2525                 ei->i_dir_acl = le32_to_cpu(raw_inode->i_dir_acl);
2526         } else {
2527                 inode->i_size |=
2528                         ((__u64)le32_to_cpu(raw_inode->i_size_high)) << 32;
2529         }
2530         ei->i_disksize = inode->i_size;
2531         inode->i_generation = le32_to_cpu(raw_inode->i_generation);
2532 #ifdef EXT3_PREALLOCATE
2533         ei->i_prealloc_count = 0;
2534 #endif
2535         ei->i_block_group = iloc.block_group;
2536
2537         /*
2538          * NOTE! The in-memory inode i_data array is in little-endian order
2539          * even on big-endian machines: we do NOT byteswap the block numbers!
2540          */
2541         for (block = 0; block < EXT3_N_BLOCKS; block++)
2542                 ei->i_data[block] = raw_inode->i_block[block];
2543         INIT_LIST_HEAD(&ei->i_orphan);
2544
2545         if (S_ISREG(inode->i_mode)) {
2546                 inode->i_op = &ext3_file_inode_operations;
2547                 inode->i_fop = &ext3_file_operations;
2548                 ext3_set_aops(inode);
2549         } else if (S_ISDIR(inode->i_mode)) {
2550                 inode->i_op = &ext3_dir_inode_operations;
2551                 inode->i_fop = &ext3_dir_operations;
2552         } else if (S_ISLNK(inode->i_mode)) {
2553                 if (ext3_inode_is_fast_symlink(inode))
2554                         inode->i_op = &ext3_fast_symlink_inode_operations;
2555                 else {
2556                         inode->i_op = &ext3_symlink_inode_operations;
2557                         ext3_set_aops(inode);
2558                 }
2559         } else {
2560                 inode->i_op = &ext3_special_inode_operations;
2561                 if (raw_inode->i_block[0])
2562                         init_special_inode(inode, inode->i_mode,
2563                            old_decode_dev(le32_to_cpu(raw_inode->i_block[0])));
2564                 else 
2565                         init_special_inode(inode, inode->i_mode,
2566                            new_decode_dev(le32_to_cpu(raw_inode->i_block[1])));
2567         }
2568         brelse (iloc.bh);
2569         ext3_set_inode_flags(inode);
2570         return;
2571
2572 bad_inode:
2573         make_bad_inode(inode);
2574         return;
2575 }
2576
2577 /*
2578  * Post the struct inode info into an on-disk inode location in the
2579  * buffer-cache.  This gobbles the caller's reference to the
2580  * buffer_head in the inode location struct.
2581  *
2582  * The caller must have write access to iloc->bh.
2583  */
2584 static int ext3_do_update_inode(handle_t *handle, 
2585                                 struct inode *inode, 
2586                                 struct ext3_iloc *iloc)
2587 {
2588         struct ext3_inode *raw_inode = ext3_raw_inode(iloc);
2589         struct ext3_inode_info *ei = EXT3_I(inode);
2590         struct buffer_head *bh = iloc->bh;
2591         int err = 0, rc, block;
2592
2593         /* For fields not not tracking in the in-memory inode,
2594          * initialise them to zero for new inodes. */
2595         if (ei->i_state & EXT3_STATE_NEW)
2596                 memset(raw_inode, 0, EXT3_SB(inode->i_sb)->s_inode_size);
2597
2598         raw_inode->i_mode = cpu_to_le16(inode->i_mode);
2599         if(!(test_opt(inode->i_sb, NO_UID32))) {
2600                 raw_inode->i_uid_low = cpu_to_le16(low_16_bits(inode->i_uid));
2601                 raw_inode->i_gid_low = cpu_to_le16(low_16_bits(inode->i_gid));
2602 /*
2603  * Fix up interoperability with old kernels. Otherwise, old inodes get
2604  * re-used with the upper 16 bits of the uid/gid intact
2605  */
2606                 if(!ei->i_dtime) {
2607                         raw_inode->i_uid_high =
2608                                 cpu_to_le16(high_16_bits(inode->i_uid));
2609                         raw_inode->i_gid_high =
2610                                 cpu_to_le16(high_16_bits(inode->i_gid));
2611                 } else {
2612                         raw_inode->i_uid_high = 0;
2613                         raw_inode->i_gid_high = 0;
2614                 }
2615         } else {
2616                 raw_inode->i_uid_low =
2617                         cpu_to_le16(fs_high2lowuid(inode->i_uid));
2618                 raw_inode->i_gid_low =
2619                         cpu_to_le16(fs_high2lowgid(inode->i_gid));
2620                 raw_inode->i_uid_high = 0;
2621                 raw_inode->i_gid_high = 0;
2622         }
2623         raw_inode->i_links_count = cpu_to_le16(inode->i_nlink);
2624         raw_inode->i_size = cpu_to_le32(ei->i_disksize);
2625         raw_inode->i_atime = cpu_to_le32(inode->i_atime.tv_sec);
2626         raw_inode->i_ctime = cpu_to_le32(inode->i_ctime.tv_sec);
2627         raw_inode->i_mtime = cpu_to_le32(inode->i_mtime.tv_sec);
2628         raw_inode->i_blocks = cpu_to_le32(inode->i_blocks);
2629         raw_inode->i_dtime = cpu_to_le32(ei->i_dtime);
2630         raw_inode->i_flags = cpu_to_le32(ei->i_flags);
2631 #ifdef EXT3_FRAGMENTS
2632         raw_inode->i_faddr = cpu_to_le32(ei->i_faddr);
2633         raw_inode->i_frag = ei->i_frag_no;
2634         raw_inode->i_fsize = ei->i_frag_size;
2635 #endif
2636         raw_inode->i_file_acl = cpu_to_le32(ei->i_file_acl);
2637         if (!S_ISREG(inode->i_mode)) {
2638                 raw_inode->i_dir_acl = cpu_to_le32(ei->i_dir_acl);
2639         } else {
2640                 raw_inode->i_size_high =
2641                         cpu_to_le32(ei->i_disksize >> 32);
2642                 if (ei->i_disksize > 0x7fffffffULL) {
2643                         struct super_block *sb = inode->i_sb;
2644                         if (!EXT3_HAS_RO_COMPAT_FEATURE(sb,
2645                                         EXT3_FEATURE_RO_COMPAT_LARGE_FILE) ||
2646                             EXT3_SB(sb)->s_es->s_rev_level ==
2647                                         cpu_to_le32(EXT3_GOOD_OLD_REV)) {
2648                                /* If this is the first large file
2649                                 * created, add a flag to the superblock.
2650                                 */
2651                                 err = ext3_journal_get_write_access(handle,
2652                                                 EXT3_SB(sb)->s_sbh);
2653                                 if (err)
2654                                         goto out_brelse;
2655                                 ext3_update_dynamic_rev(sb);
2656                                 EXT3_SET_RO_COMPAT_FEATURE(sb,
2657                                         EXT3_FEATURE_RO_COMPAT_LARGE_FILE);
2658                                 sb->s_dirt = 1;
2659                                 handle->h_sync = 1;
2660                                 err = ext3_journal_dirty_metadata(handle,
2661                                                 EXT3_SB(sb)->s_sbh);
2662                         }
2663                 }
2664         }
2665         raw_inode->i_generation = cpu_to_le32(inode->i_generation);
2666         if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) {
2667                 if (old_valid_dev(inode->i_rdev)) {
2668                         raw_inode->i_block[0] =
2669                                 cpu_to_le32(old_encode_dev(inode->i_rdev));
2670                         raw_inode->i_block[1] = 0;
2671                 } else {
2672                         raw_inode->i_block[0] = 0;
2673                         raw_inode->i_block[1] =
2674                                 cpu_to_le32(new_encode_dev(inode->i_rdev));
2675                         raw_inode->i_block[2] = 0;
2676                 }
2677         } else for (block = 0; block < EXT3_N_BLOCKS; block++)
2678                 raw_inode->i_block[block] = ei->i_data[block];
2679
2680         BUFFER_TRACE(bh, "call ext3_journal_dirty_metadata");
2681         rc = ext3_journal_dirty_metadata(handle, bh);
2682         if (!err)
2683                 err = rc;
2684         ei->i_state &= ~EXT3_STATE_NEW;
2685
2686 out_brelse:
2687         brelse (bh);
2688         ext3_std_error(inode->i_sb, err);
2689         return err;
2690 }
2691
2692 /*
2693  * ext3_write_inode()
2694  *
2695  * We are called from a few places:
2696  *
2697  * - Within generic_file_write() for O_SYNC files.
2698  *   Here, there will be no transaction running. We wait for any running
2699  *   trasnaction to commit.
2700  *
2701  * - Within sys_sync(), kupdate and such.
2702  *   We wait on commit, if tol to.
2703  *
2704  * - Within prune_icache() (PF_MEMALLOC == true)
2705  *   Here we simply return.  We can't afford to block kswapd on the
2706  *   journal commit.
2707  *
2708  * In all cases it is actually safe for us to return without doing anything,
2709  * because the inode has been copied into a raw inode buffer in
2710  * ext3_mark_inode_dirty().  This is a correctness thing for O_SYNC and for
2711  * knfsd.
2712  *
2713  * Note that we are absolutely dependent upon all inode dirtiers doing the
2714  * right thing: they *must* call mark_inode_dirty() after dirtying info in
2715  * which we are interested.
2716  *
2717  * It would be a bug for them to not do this.  The code:
2718  *
2719  *      mark_inode_dirty(inode)
2720  *      stuff();
2721  *      inode->i_size = expr;
2722  *
2723  * is in error because a kswapd-driven write_inode() could occur while
2724  * `stuff()' is running, and the new i_size will be lost.  Plus the inode
2725  * will no longer be on the superblock's dirty inode list.
2726  */
2727 void ext3_write_inode(struct inode *inode, int wait)
2728 {
2729         if (current->flags & PF_MEMALLOC)
2730                 return;
2731
2732         if (ext3_journal_current_handle()) {
2733                 jbd_debug(0, "called recursively, non-PF_MEMALLOC!\n");
2734                 dump_stack();
2735                 return;
2736         }
2737
2738         if (!wait)
2739                 return;
2740
2741         ext3_force_commit(inode->i_sb);
2742 }
2743
2744 /*
2745  * ext3_setattr()
2746  *
2747  * Called from notify_change.
2748  *
2749  * We want to trap VFS attempts to truncate the file as soon as
2750  * possible.  In particular, we want to make sure that when the VFS
2751  * shrinks i_size, we put the inode on the orphan list and modify
2752  * i_disksize immediately, so that during the subsequent flushing of
2753  * dirty pages and freeing of disk blocks, we can guarantee that any
2754  * commit will leave the blocks being flushed in an unused state on
2755  * disk.  (On recovery, the inode will get truncated and the blocks will
2756  * be freed, so we have a strong guarantee that no future commit will
2757  * leave these blocks visible to the user.)  
2758  *
2759  * Called with inode->sem down.
2760  */
2761 int ext3_setattr(struct dentry *dentry, struct iattr *attr)
2762 {
2763         struct inode *inode = dentry->d_inode;
2764         int error, rc = 0;
2765         const unsigned int ia_valid = attr->ia_valid;
2766
2767         error = inode_change_ok(inode, attr);
2768         if (error)
2769                 return error;
2770
2771         if ((ia_valid & ATTR_UID && attr->ia_uid != inode->i_uid) ||
2772                 (ia_valid & ATTR_GID && attr->ia_gid != inode->i_gid)) {
2773                 handle_t *handle;
2774
2775                 /* (user+group)*(old+new) structure, inode write (sb,
2776                  * inode block, ? - but truncate inode update has it) */
2777                 handle = ext3_journal_start(inode, 4*EXT3_QUOTA_INIT_BLOCKS+3);
2778                 if (IS_ERR(handle)) {
2779                         error = PTR_ERR(handle);
2780                         goto err_out;
2781                 }
2782                 error = DQUOT_TRANSFER(inode, attr) ? -EDQUOT : 0;
2783                 if (error) {
2784                         ext3_journal_stop(handle);
2785                         return error;
2786                 }
2787                 /* Update corresponding info in inode so that everything is in
2788                  * one transaction */
2789                 if (attr->ia_valid & ATTR_UID)
2790                         inode->i_uid = attr->ia_uid;
2791                 if (attr->ia_valid & ATTR_GID)
2792                         inode->i_gid = attr->ia_gid;
2793                 error = ext3_mark_inode_dirty(handle, inode);
2794                 ext3_journal_stop(handle);
2795         }
2796
2797         if (S_ISREG(inode->i_mode) &&
2798             attr->ia_valid & ATTR_SIZE && attr->ia_size < inode->i_size) {
2799                 handle_t *handle;
2800
2801                 handle = ext3_journal_start(inode, 3);
2802                 if (IS_ERR(handle)) {
2803                         error = PTR_ERR(handle);
2804                         goto err_out;
2805                 }
2806
2807                 error = ext3_orphan_add(handle, inode);
2808                 EXT3_I(inode)->i_disksize = attr->ia_size;
2809                 rc = ext3_mark_inode_dirty(handle, inode);
2810                 if (!error)
2811                         error = rc;
2812                 ext3_journal_stop(handle);
2813         }
2814
2815         rc = inode_setattr(inode, attr);
2816
2817         /* If inode_setattr's call to ext3_truncate failed to get a
2818          * transaction handle at all, we need to clean up the in-core
2819          * orphan list manually. */
2820         if (inode->i_nlink)
2821                 ext3_orphan_del(NULL, inode);
2822
2823         if (!rc && (ia_valid & ATTR_MODE))
2824                 rc = ext3_acl_chmod(inode);
2825
2826 err_out:
2827         ext3_std_error(inode->i_sb, error);
2828         if (!error)
2829                 error = rc;
2830         return error;
2831 }
2832
2833
2834 /*
2835  * akpm: how many blocks doth make a writepage()?
2836  *
2837  * With N blocks per page, it may be:
2838  * N data blocks
2839  * 2 indirect block
2840  * 2 dindirect
2841  * 1 tindirect
2842  * N+5 bitmap blocks (from the above)
2843  * N+5 group descriptor summary blocks
2844  * 1 inode block
2845  * 1 superblock.
2846  * 2 * EXT3_SINGLEDATA_TRANS_BLOCKS for the quote files
2847  *
2848  * 3 * (N + 5) + 2 + 2 * EXT3_SINGLEDATA_TRANS_BLOCKS
2849  *
2850  * With ordered or writeback data it's the same, less the N data blocks.
2851  *
2852  * If the inode's direct blocks can hold an integral number of pages then a
2853  * page cannot straddle two indirect blocks, and we can only touch one indirect
2854  * and dindirect block, and the "5" above becomes "3".
2855  *
2856  * This still overestimates under most circumstances.  If we were to pass the
2857  * start and end offsets in here as well we could do block_to_path() on each
2858  * block and work out the exact number of indirects which are touched.  Pah.
2859  */
2860
2861 int ext3_writepage_trans_blocks(struct inode *inode)
2862 {
2863         int bpp = ext3_journal_blocks_per_page(inode);
2864         int indirects = (EXT3_NDIR_BLOCKS % bpp) ? 5 : 3;
2865         int ret;
2866
2867         if (ext3_should_journal_data(inode))
2868                 ret = 3 * (bpp + indirects) + 2;
2869         else
2870                 ret = 2 * (bpp + indirects) + 2;
2871
2872 #ifdef CONFIG_QUOTA
2873         /* We know that structure was already allocated during DQUOT_INIT so
2874          * we will be updating only the data blocks + inodes */
2875         ret += 2*EXT3_QUOTA_TRANS_BLOCKS;
2876 #endif
2877
2878         return ret;
2879 }
2880
2881 /*
2882  * The caller must have previously called ext3_reserve_inode_write().
2883  * Give this, we know that the caller already has write access to iloc->bh.
2884  */
2885 int ext3_mark_iloc_dirty(handle_t *handle,
2886                 struct inode *inode, struct ext3_iloc *iloc)
2887 {
2888         int err = 0;
2889
2890         /* the do_update_inode consumes one bh->b_count */
2891         get_bh(iloc->bh);
2892
2893         /* ext3_do_update_inode() does journal_dirty_metadata */
2894         err = ext3_do_update_inode(handle, inode, iloc);
2895         put_bh(iloc->bh);
2896         return err;
2897 }
2898
2899 /* 
2900  * On success, We end up with an outstanding reference count against
2901  * iloc->bh.  This _must_ be cleaned up later. 
2902  */
2903
2904 int
2905 ext3_reserve_inode_write(handle_t *handle, struct inode *inode, 
2906                          struct ext3_iloc *iloc)
2907 {
2908         int err = 0;
2909         if (handle) {
2910                 err = ext3_get_inode_loc(inode, iloc, 1);
2911                 if (!err) {
2912                         BUFFER_TRACE(iloc->bh, "get_write_access");
2913                         err = ext3_journal_get_write_access(handle, iloc->bh);
2914                         if (err) {
2915                                 brelse(iloc->bh);
2916                                 iloc->bh = NULL;
2917                         }
2918                 }
2919         }
2920         ext3_std_error(inode->i_sb, err);
2921         return err;
2922 }
2923
2924 /*
2925  * akpm: What we do here is to mark the in-core inode as clean
2926  * with respect to inode dirtiness (it may still be data-dirty).
2927  * This means that the in-core inode may be reaped by prune_icache
2928  * without having to perform any I/O.  This is a very good thing,
2929  * because *any* task may call prune_icache - even ones which
2930  * have a transaction open against a different journal.
2931  *
2932  * Is this cheating?  Not really.  Sure, we haven't written the
2933  * inode out, but prune_icache isn't a user-visible syncing function.
2934  * Whenever the user wants stuff synced (sys_sync, sys_msync, sys_fsync)
2935  * we start and wait on commits.
2936  *
2937  * Is this efficient/effective?  Well, we're being nice to the system
2938  * by cleaning up our inodes proactively so they can be reaped
2939  * without I/O.  But we are potentially leaving up to five seconds'
2940  * worth of inodes floating about which prune_icache wants us to
2941  * write out.  One way to fix that would be to get prune_icache()
2942  * to do a write_super() to free up some memory.  It has the desired
2943  * effect.
2944  */
2945 int ext3_mark_inode_dirty(handle_t *handle, struct inode *inode)
2946 {
2947         struct ext3_iloc iloc;
2948         int err;
2949
2950         err = ext3_reserve_inode_write(handle, inode, &iloc);
2951         if (!err)
2952                 err = ext3_mark_iloc_dirty(handle, inode, &iloc);
2953         return err;
2954 }
2955
2956 /*
2957  * akpm: ext3_dirty_inode() is called from __mark_inode_dirty()
2958  *
2959  * We're really interested in the case where a file is being extended.
2960  * i_size has been changed by generic_commit_write() and we thus need
2961  * to include the updated inode in the current transaction.
2962  *
2963  * Also, DQUOT_ALLOC_SPACE() will always dirty the inode when blocks
2964  * are allocated to the file.
2965  *
2966  * If the inode is marked synchronous, we don't honour that here - doing
2967  * so would cause a commit on atime updates, which we don't bother doing.
2968  * We handle synchronous inodes at the highest possible level.
2969  */
2970 void ext3_dirty_inode(struct inode *inode)
2971 {
2972         handle_t *current_handle = ext3_journal_current_handle();
2973         handle_t *handle;
2974
2975         handle = ext3_journal_start(inode, 2);
2976         if (IS_ERR(handle))
2977                 goto out;
2978         if (current_handle &&
2979                 current_handle->h_transaction != handle->h_transaction) {
2980                 /* This task has a transaction open against a different fs */
2981                 printk(KERN_EMERG "%s: transactions do not match!\n",
2982                        __FUNCTION__);
2983         } else {
2984                 jbd_debug(5, "marking dirty.  outer handle=%p\n",
2985                                 current_handle);
2986                 ext3_mark_inode_dirty(handle, inode);
2987         }
2988         ext3_journal_stop(handle);
2989 out:
2990         return;
2991 }
2992
2993 #ifdef AKPM
2994 /* 
2995  * Bind an inode's backing buffer_head into this transaction, to prevent
2996  * it from being flushed to disk early.  Unlike
2997  * ext3_reserve_inode_write, this leaves behind no bh reference and
2998  * returns no iloc structure, so the caller needs to repeat the iloc
2999  * lookup to mark the inode dirty later.
3000  */
3001 static inline int
3002 ext3_pin_inode(handle_t *handle, struct inode *inode)
3003 {
3004         struct ext3_iloc iloc;
3005
3006         int err = 0;
3007         if (handle) {
3008                 err = ext3_get_inode_loc(inode, &iloc, 1);
3009                 if (!err) {
3010                         BUFFER_TRACE(iloc.bh, "get_write_access");
3011                         err = journal_get_write_access(handle, iloc.bh);
3012                         if (!err)
3013                                 err = ext3_journal_dirty_metadata(handle, 
3014                                                                   iloc.bh);
3015                         brelse(iloc.bh);
3016                 }
3017         }
3018         ext3_std_error(inode->i_sb, err);
3019         return err;
3020 }
3021 #endif
3022
3023 int ext3_change_inode_journal_flag(struct inode *inode, int val)
3024 {
3025         journal_t *journal;
3026         handle_t *handle;
3027         int err;
3028
3029         /*
3030          * We have to be very careful here: changing a data block's
3031          * journaling status dynamically is dangerous.  If we write a
3032          * data block to the journal, change the status and then delete
3033          * that block, we risk forgetting to revoke the old log record
3034          * from the journal and so a subsequent replay can corrupt data.
3035          * So, first we make sure that the journal is empty and that
3036          * nobody is changing anything.
3037          */
3038
3039         journal = EXT3_JOURNAL(inode);
3040         if (is_journal_aborted(journal) || IS_RDONLY(inode))
3041                 return -EROFS;
3042
3043         journal_lock_updates(journal);
3044         journal_flush(journal);
3045
3046         /*
3047          * OK, there are no updates running now, and all cached data is
3048          * synced to disk.  We are now in a completely consistent state
3049          * which doesn't have anything in the journal, and we know that
3050          * no filesystem updates are running, so it is safe to modify
3051          * the inode's in-core data-journaling state flag now.
3052          */
3053
3054         if (val)
3055                 EXT3_I(inode)->i_flags |= EXT3_JOURNAL_DATA_FL;
3056         else
3057                 EXT3_I(inode)->i_flags &= ~EXT3_JOURNAL_DATA_FL;
3058         ext3_set_aops(inode);
3059
3060         journal_unlock_updates(journal);
3061
3062         /* Finally we can mark the inode as dirty. */
3063
3064         handle = ext3_journal_start(inode, 1);
3065         if (IS_ERR(handle))
3066                 return PTR_ERR(handle);
3067
3068         err = ext3_mark_inode_dirty(handle, inode);
3069         handle->h_sync = 1;
3070         ext3_journal_stop(handle);
3071         ext3_std_error(inode->i_sb, err);
3072
3073         return err;
3074 }