f6f61f74172b9f678a222363c308e3809148c83d
[linux-2.6.git] / fs / ext3 / inode.c
1 /*
2  *  linux/fs/ext3/inode.c
3  *
4  * Copyright (C) 1992, 1993, 1994, 1995
5  * Remy Card (card@masi.ibp.fr)
6  * Laboratoire MASI - Institut Blaise Pascal
7  * Universite Pierre et Marie Curie (Paris VI)
8  *
9  *  from
10  *
11  *  linux/fs/minix/inode.c
12  *
13  *  Copyright (C) 1991, 1992  Linus Torvalds
14  *
15  *  Goal-directed block allocation by Stephen Tweedie
16  *      (sct@redhat.com), 1993, 1998
17  *  Big-endian to little-endian byte-swapping/bitmaps by
18  *        David S. Miller (davem@caip.rutgers.edu), 1995
19  *  64-bit file support on 64-bit platforms by Jakub Jelinek
20  *      (jj@sunsite.ms.mff.cuni.cz)
21  *
22  *  Assorted race fixes, rewrite of ext3_get_block() by Al Viro, 2000
23  */
24
25 #include <linux/module.h>
26 #include <linux/fs.h>
27 #include <linux/time.h>
28 #include <linux/ext3_jbd.h>
29 #include <linux/jbd.h>
30 #include <linux/smp_lock.h>
31 #include <linux/highuid.h>
32 #include <linux/pagemap.h>
33 #include <linux/quotaops.h>
34 #include <linux/string.h>
35 #include <linux/buffer_head.h>
36 #include <linux/writeback.h>
37 #include <linux/mpage.h>
38 #include <linux/uio.h>
39 #include "xattr.h"
40 #include "acl.h"
41
42 /*
43  * Test whether an inode is a fast symlink.
44  */
45 static inline int ext3_inode_is_fast_symlink(struct inode *inode)
46 {
47         int ea_blocks = EXT3_I(inode)->i_file_acl ?
48                 (inode->i_sb->s_blocksize >> 9) : 0;
49
50         return (S_ISLNK(inode->i_mode) &&
51                 inode->i_blocks - ea_blocks == 0);
52 }
53
54 /* The ext3 forget function must perform a revoke if we are freeing data
55  * which has been journaled.  Metadata (eg. indirect blocks) must be
56  * revoked in all cases. 
57  *
58  * "bh" may be NULL: a metadata block may have been freed from memory
59  * but there may still be a record of it in the journal, and that record
60  * still needs to be revoked.
61  */
62
63 int ext3_forget(handle_t *handle, int is_metadata,
64                        struct inode *inode, struct buffer_head *bh,
65                        int blocknr)
66 {
67         int err;
68
69         BUFFER_TRACE(bh, "enter");
70
71         jbd_debug(4, "forgetting bh %p: is_metadata = %d, mode %o, "
72                   "data mode %lx\n",
73                   bh, is_metadata, inode->i_mode,
74                   test_opt(inode->i_sb, DATA_FLAGS));
75
76         /* Never use the revoke function if we are doing full data
77          * journaling: there is no need to, and a V1 superblock won't
78          * support it.  Otherwise, only skip the revoke on un-journaled
79          * data blocks. */
80
81         if (test_opt(inode->i_sb, DATA_FLAGS) == EXT3_MOUNT_JOURNAL_DATA ||
82             (!is_metadata && !ext3_should_journal_data(inode))) {
83                 if (bh) {
84                         BUFFER_TRACE(bh, "call journal_forget");
85                         ext3_journal_forget(handle, bh);
86                 }
87                 return 0;
88         }
89
90         /*
91          * data!=journal && (is_metadata || should_journal_data(inode))
92          */
93         BUFFER_TRACE(bh, "call ext3_journal_revoke");
94         err = ext3_journal_revoke(handle, blocknr, bh);
95         if (err)
96                 ext3_abort(inode->i_sb, __FUNCTION__,
97                            "error %d when attempting revoke", err);
98         BUFFER_TRACE(bh, "exit");
99         return err;
100 }
101
102 /*
103  * Work out how many blocks we need to progress with the next chunk of a
104  * truncate transaction.
105  */
106
107 static unsigned long blocks_for_truncate(struct inode *inode) 
108 {
109         unsigned long needed;
110
111         needed = inode->i_blocks >> (inode->i_sb->s_blocksize_bits - 9);
112
113         /* Give ourselves just enough room to cope with inodes in which
114          * i_blocks is corrupt: we've seen disk corruptions in the past
115          * which resulted in random data in an inode which looked enough
116          * like a regular file for ext3 to try to delete it.  Things
117          * will go a bit crazy if that happens, but at least we should
118          * try not to panic the whole kernel. */
119         if (needed < 2)
120                 needed = 2;
121
122         /* But we need to bound the transaction so we don't overflow the
123          * journal. */
124         if (needed > EXT3_MAX_TRANS_DATA) 
125                 needed = EXT3_MAX_TRANS_DATA;
126
127         return EXT3_DATA_TRANS_BLOCKS + needed;
128 }
129
130 /* 
131  * Truncate transactions can be complex and absolutely huge.  So we need to
132  * be able to restart the transaction at a conventient checkpoint to make
133  * sure we don't overflow the journal.
134  *
135  * start_transaction gets us a new handle for a truncate transaction,
136  * and extend_transaction tries to extend the existing one a bit.  If
137  * extend fails, we need to propagate the failure up and restart the
138  * transaction in the top-level truncate loop. --sct 
139  */
140
141 static handle_t *start_transaction(struct inode *inode) 
142 {
143         handle_t *result;
144
145         result = ext3_journal_start(inode, blocks_for_truncate(inode));
146         if (!IS_ERR(result))
147                 return result;
148
149         ext3_std_error(inode->i_sb, PTR_ERR(result));
150         return result;
151 }
152
153 /*
154  * Try to extend this transaction for the purposes of truncation.
155  *
156  * Returns 0 if we managed to create more room.  If we can't create more
157  * room, and the transaction must be restarted we return 1.
158  */
159 static int try_to_extend_transaction(handle_t *handle, struct inode *inode)
160 {
161         if (handle->h_buffer_credits > EXT3_RESERVE_TRANS_BLOCKS)
162                 return 0;
163         if (!ext3_journal_extend(handle, blocks_for_truncate(inode)))
164                 return 0;
165         return 1;
166 }
167
168 /*
169  * Restart the transaction associated with *handle.  This does a commit,
170  * so before we call here everything must be consistently dirtied against
171  * this transaction.
172  */
173 static int ext3_journal_test_restart(handle_t *handle, struct inode *inode)
174 {
175         jbd_debug(2, "restarting handle %p\n", handle);
176         return ext3_journal_restart(handle, blocks_for_truncate(inode));
177 }
178
179 /*
180  * Called at the last iput() if i_nlink is zero.
181  */
182 void ext3_delete_inode (struct inode * inode)
183 {
184         handle_t *handle;
185
186         if (is_bad_inode(inode))
187                 goto no_delete;
188
189         handle = start_transaction(inode);
190         if (IS_ERR(handle)) {
191                 /* If we're going to skip the normal cleanup, we still
192                  * need to make sure that the in-core orphan linked list
193                  * is properly cleaned up. */
194                 ext3_orphan_del(NULL, inode);
195                 goto no_delete;
196         }
197
198         if (IS_SYNC(inode))
199                 handle->h_sync = 1;
200         inode->i_size = 0;
201         if (inode->i_blocks)
202                 ext3_truncate(inode);
203         /*
204          * Kill off the orphan record which ext3_truncate created.
205          * AKPM: I think this can be inside the above `if'.
206          * Note that ext3_orphan_del() has to be able to cope with the
207          * deletion of a non-existent orphan - this is because we don't
208          * know if ext3_truncate() actually created an orphan record.
209          * (Well, we could do this if we need to, but heck - it works)
210          */
211         ext3_orphan_del(handle, inode);
212         EXT3_I(inode)->i_dtime  = get_seconds();
213
214         /* 
215          * One subtle ordering requirement: if anything has gone wrong
216          * (transaction abort, IO errors, whatever), then we can still
217          * do these next steps (the fs will already have been marked as
218          * having errors), but we can't free the inode if the mark_dirty
219          * fails.  
220          */
221         if (ext3_mark_inode_dirty(handle, inode))
222                 /* If that failed, just do the required in-core inode clear. */
223                 clear_inode(inode);
224         else
225                 ext3_free_inode(handle, inode);
226         ext3_journal_stop(handle);
227         return;
228 no_delete:
229         clear_inode(inode);     /* We must guarantee clearing of inode... */
230 }
231
232 static int ext3_alloc_block (handle_t *handle,
233                         struct inode * inode, unsigned long goal, int *err)
234 {
235         unsigned long result;
236
237         result = ext3_new_block (handle, inode, goal, err);
238         return result;
239 }
240
241
242 typedef struct {
243         u32     *p;
244         u32     key;
245         struct buffer_head *bh;
246 } Indirect;
247
248 static inline void add_chain(Indirect *p, struct buffer_head *bh, u32 *v)
249 {
250         p->key = *(p->p = v);
251         p->bh = bh;
252 }
253
254 static inline int verify_chain(Indirect *from, Indirect *to)
255 {
256         while (from <= to && from->key == *from->p)
257                 from++;
258         return (from > to);
259 }
260
261 /**
262  *      ext3_block_to_path - parse the block number into array of offsets
263  *      @inode: inode in question (we are only interested in its superblock)
264  *      @i_block: block number to be parsed
265  *      @offsets: array to store the offsets in
266  *      @boundary: set this non-zero if the referred-to block is likely to be
267  *             followed (on disk) by an indirect block.
268  *
269  *      To store the locations of file's data ext3 uses a data structure common
270  *      for UNIX filesystems - tree of pointers anchored in the inode, with
271  *      data blocks at leaves and indirect blocks in intermediate nodes.
272  *      This function translates the block number into path in that tree -
273  *      return value is the path length and @offsets[n] is the offset of
274  *      pointer to (n+1)th node in the nth one. If @block is out of range
275  *      (negative or too large) warning is printed and zero returned.
276  *
277  *      Note: function doesn't find node addresses, so no IO is needed. All
278  *      we need to know is the capacity of indirect blocks (taken from the
279  *      inode->i_sb).
280  */
281
282 /*
283  * Portability note: the last comparison (check that we fit into triple
284  * indirect block) is spelled differently, because otherwise on an
285  * architecture with 32-bit longs and 8Kb pages we might get into trouble
286  * if our filesystem had 8Kb blocks. We might use long long, but that would
287  * kill us on x86. Oh, well, at least the sign propagation does not matter -
288  * i_block would have to be negative in the very beginning, so we would not
289  * get there at all.
290  */
291
292 static int ext3_block_to_path(struct inode *inode,
293                         long i_block, int offsets[4], int *boundary)
294 {
295         int ptrs = EXT3_ADDR_PER_BLOCK(inode->i_sb);
296         int ptrs_bits = EXT3_ADDR_PER_BLOCK_BITS(inode->i_sb);
297         const long direct_blocks = EXT3_NDIR_BLOCKS,
298                 indirect_blocks = ptrs,
299                 double_blocks = (1 << (ptrs_bits * 2));
300         int n = 0;
301         int final = 0;
302
303         if (i_block < 0) {
304                 ext3_warning (inode->i_sb, "ext3_block_to_path", "block < 0");
305         } else if (i_block < direct_blocks) {
306                 offsets[n++] = i_block;
307                 final = direct_blocks;
308         } else if ( (i_block -= direct_blocks) < indirect_blocks) {
309                 offsets[n++] = EXT3_IND_BLOCK;
310                 offsets[n++] = i_block;
311                 final = ptrs;
312         } else if ((i_block -= indirect_blocks) < double_blocks) {
313                 offsets[n++] = EXT3_DIND_BLOCK;
314                 offsets[n++] = i_block >> ptrs_bits;
315                 offsets[n++] = i_block & (ptrs - 1);
316                 final = ptrs;
317         } else if (((i_block -= double_blocks) >> (ptrs_bits * 2)) < ptrs) {
318                 offsets[n++] = EXT3_TIND_BLOCK;
319                 offsets[n++] = i_block >> (ptrs_bits * 2);
320                 offsets[n++] = (i_block >> ptrs_bits) & (ptrs - 1);
321                 offsets[n++] = i_block & (ptrs - 1);
322                 final = ptrs;
323         } else {
324                 ext3_warning (inode->i_sb, "ext3_block_to_path", "block > big");
325         }
326         if (boundary)
327                 *boundary = (i_block & (ptrs - 1)) == (final - 1);
328         return n;
329 }
330
331 /**
332  *      ext3_get_branch - read the chain of indirect blocks leading to data
333  *      @inode: inode in question
334  *      @depth: depth of the chain (1 - direct pointer, etc.)
335  *      @offsets: offsets of pointers in inode/indirect blocks
336  *      @chain: place to store the result
337  *      @err: here we store the error value
338  *
339  *      Function fills the array of triples <key, p, bh> and returns %NULL
340  *      if everything went OK or the pointer to the last filled triple
341  *      (incomplete one) otherwise. Upon the return chain[i].key contains
342  *      the number of (i+1)-th block in the chain (as it is stored in memory,
343  *      i.e. little-endian 32-bit), chain[i].p contains the address of that
344  *      number (it points into struct inode for i==0 and into the bh->b_data
345  *      for i>0) and chain[i].bh points to the buffer_head of i-th indirect
346  *      block for i>0 and NULL for i==0. In other words, it holds the block
347  *      numbers of the chain, addresses they were taken from (and where we can
348  *      verify that chain did not change) and buffer_heads hosting these
349  *      numbers.
350  *
351  *      Function stops when it stumbles upon zero pointer (absent block)
352  *              (pointer to last triple returned, *@err == 0)
353  *      or when it gets an IO error reading an indirect block
354  *              (ditto, *@err == -EIO)
355  *      or when it notices that chain had been changed while it was reading
356  *              (ditto, *@err == -EAGAIN)
357  *      or when it reads all @depth-1 indirect blocks successfully and finds
358  *      the whole chain, all way to the data (returns %NULL, *err == 0).
359  */
360 static Indirect *ext3_get_branch(struct inode *inode, int depth, int *offsets,
361                                  Indirect chain[4], int *err)
362 {
363         struct super_block *sb = inode->i_sb;
364         Indirect *p = chain;
365         struct buffer_head *bh;
366
367         *err = 0;
368         /* i_data is not going away, no lock needed */
369         add_chain (chain, NULL, EXT3_I(inode)->i_data + *offsets);
370         if (!p->key)
371                 goto no_block;
372         while (--depth) {
373                 bh = sb_bread(sb, le32_to_cpu(p->key));
374                 if (!bh)
375                         goto failure;
376                 /* Reader: pointers */
377                 if (!verify_chain(chain, p))
378                         goto changed;
379                 add_chain(++p, bh, (u32*)bh->b_data + *++offsets);
380                 /* Reader: end */
381                 if (!p->key)
382                         goto no_block;
383         }
384         return NULL;
385
386 changed:
387         brelse(bh);
388         *err = -EAGAIN;
389         goto no_block;
390 failure:
391         *err = -EIO;
392 no_block:
393         return p;
394 }
395
396 /**
397  *      ext3_find_near - find a place for allocation with sufficient locality
398  *      @inode: owner
399  *      @ind: descriptor of indirect block.
400  *
401  *      This function returns the prefered place for block allocation.
402  *      It is used when heuristic for sequential allocation fails.
403  *      Rules are:
404  *        + if there is a block to the left of our position - allocate near it.
405  *        + if pointer will live in indirect block - allocate near that block.
406  *        + if pointer will live in inode - allocate in the same
407  *          cylinder group. 
408  *
409  * In the latter case we colour the starting block by the callers PID to
410  * prevent it from clashing with concurrent allocations for a different inode
411  * in the same block group.   The PID is used here so that functionally related
412  * files will be close-by on-disk.
413  *
414  *      Caller must make sure that @ind is valid and will stay that way.
415  */
416
417 static unsigned long ext3_find_near(struct inode *inode, Indirect *ind)
418 {
419         struct ext3_inode_info *ei = EXT3_I(inode);
420         u32 *start = ind->bh ? (u32*) ind->bh->b_data : ei->i_data;
421         u32 *p;
422         unsigned long bg_start;
423         unsigned long colour;
424
425         /* Try to find previous block */
426         for (p = ind->p - 1; p >= start; p--)
427                 if (*p)
428                         return le32_to_cpu(*p);
429
430         /* No such thing, so let's try location of indirect block */
431         if (ind->bh)
432                 return ind->bh->b_blocknr;
433
434         /*
435          * It is going to be refered from inode itself? OK, just put it into
436          * the same cylinder group then.
437          */
438         bg_start = (ei->i_block_group * EXT3_BLOCKS_PER_GROUP(inode->i_sb)) +
439                 le32_to_cpu(EXT3_SB(inode->i_sb)->s_es->s_first_data_block);
440         colour = (current->pid % 16) *
441                         (EXT3_BLOCKS_PER_GROUP(inode->i_sb) / 16);
442         return bg_start + colour;
443 }
444
445 /**
446  *      ext3_find_goal - find a prefered place for allocation.
447  *      @inode: owner
448  *      @block:  block we want
449  *      @chain:  chain of indirect blocks
450  *      @partial: pointer to the last triple within a chain
451  *      @goal:  place to store the result.
452  *
453  *      Normally this function find the prefered place for block allocation,
454  *      stores it in *@goal and returns zero. If the branch had been changed
455  *      under us we return -EAGAIN.
456  */
457
458 static int ext3_find_goal(struct inode *inode, long block, Indirect chain[4],
459                           Indirect *partial, unsigned long *goal)
460 {
461         struct ext3_inode_info *ei = EXT3_I(inode);
462         /* Writer: ->i_next_alloc* */
463         if (block == ei->i_next_alloc_block + 1) {
464                 ei->i_next_alloc_block++;
465                 ei->i_next_alloc_goal++;
466         }
467         /* Writer: end */
468         /* Reader: pointers, ->i_next_alloc* */
469         if (verify_chain(chain, partial)) {
470                 /*
471                  * try the heuristic for sequential allocation,
472                  * failing that at least try to get decent locality.
473                  */
474                 if (block == ei->i_next_alloc_block)
475                         *goal = ei->i_next_alloc_goal;
476                 if (!*goal)
477                         *goal = ext3_find_near(inode, partial);
478                 return 0;
479         }
480         /* Reader: end */
481         return -EAGAIN;
482 }
483
484 /**
485  *      ext3_alloc_branch - allocate and set up a chain of blocks.
486  *      @inode: owner
487  *      @num: depth of the chain (number of blocks to allocate)
488  *      @offsets: offsets (in the blocks) to store the pointers to next.
489  *      @branch: place to store the chain in.
490  *
491  *      This function allocates @num blocks, zeroes out all but the last one,
492  *      links them into chain and (if we are synchronous) writes them to disk.
493  *      In other words, it prepares a branch that can be spliced onto the
494  *      inode. It stores the information about that chain in the branch[], in
495  *      the same format as ext3_get_branch() would do. We are calling it after
496  *      we had read the existing part of chain and partial points to the last
497  *      triple of that (one with zero ->key). Upon the exit we have the same
498  *      picture as after the successful ext3_get_block(), excpet that in one
499  *      place chain is disconnected - *branch->p is still zero (we did not
500  *      set the last link), but branch->key contains the number that should
501  *      be placed into *branch->p to fill that gap.
502  *
503  *      If allocation fails we free all blocks we've allocated (and forget
504  *      their buffer_heads) and return the error value the from failed
505  *      ext3_alloc_block() (normally -ENOSPC). Otherwise we set the chain
506  *      as described above and return 0.
507  */
508
509 static int ext3_alloc_branch(handle_t *handle, struct inode *inode,
510                              int num,
511                              unsigned long goal,
512                              int *offsets,
513                              Indirect *branch)
514 {
515         int blocksize = inode->i_sb->s_blocksize;
516         int n = 0, keys = 0;
517         int err = 0;
518         int i;
519         int parent = ext3_alloc_block(handle, inode, goal, &err);
520
521         branch[0].key = cpu_to_le32(parent);
522         if (parent) {
523                 for (n = 1; n < num; n++) {
524                         struct buffer_head *bh;
525                         /* Allocate the next block */
526                         int nr = ext3_alloc_block(handle, inode, parent, &err);
527                         if (!nr)
528                                 break;
529                         branch[n].key = cpu_to_le32(nr);
530                         keys = n+1;
531
532                         /*
533                          * Get buffer_head for parent block, zero it out
534                          * and set the pointer to new one, then send
535                          * parent to disk.  
536                          */
537                         bh = sb_getblk(inode->i_sb, parent);
538                         branch[n].bh = bh;
539                         lock_buffer(bh);
540                         BUFFER_TRACE(bh, "call get_create_access");
541                         err = ext3_journal_get_create_access(handle, bh);
542                         if (err) {
543                                 unlock_buffer(bh);
544                                 brelse(bh);
545                                 break;
546                         }
547
548                         memset(bh->b_data, 0, blocksize);
549                         branch[n].p = (u32*) bh->b_data + offsets[n];
550                         *branch[n].p = branch[n].key;
551                         BUFFER_TRACE(bh, "marking uptodate");
552                         set_buffer_uptodate(bh);
553                         unlock_buffer(bh);
554
555                         BUFFER_TRACE(bh, "call ext3_journal_dirty_metadata");
556                         err = ext3_journal_dirty_metadata(handle, bh);
557                         if (err)
558                                 break;
559
560                         parent = nr;
561                 }
562         }
563         if (n == num)
564                 return 0;
565
566         /* Allocation failed, free what we already allocated */
567         for (i = 1; i < keys; i++) {
568                 BUFFER_TRACE(branch[i].bh, "call journal_forget");
569                 ext3_journal_forget(handle, branch[i].bh);
570         }
571         for (i = 0; i < keys; i++)
572                 ext3_free_blocks(handle, inode, le32_to_cpu(branch[i].key), 1);
573         return err;
574 }
575
576 /**
577  *      ext3_splice_branch - splice the allocated branch onto inode.
578  *      @inode: owner
579  *      @block: (logical) number of block we are adding
580  *      @chain: chain of indirect blocks (with a missing link - see
581  *              ext3_alloc_branch)
582  *      @where: location of missing link
583  *      @num:   number of blocks we are adding
584  *
585  *      This function verifies that chain (up to the missing link) had not
586  *      changed, fills the missing link and does all housekeeping needed in
587  *      inode (->i_blocks, etc.). In case of success we end up with the full
588  *      chain to new block and return 0. Otherwise (== chain had been changed)
589  *      we free the new blocks (forgetting their buffer_heads, indeed) and
590  *      return -EAGAIN.
591  */
592
593 static int ext3_splice_branch(handle_t *handle, struct inode *inode, long block,
594                               Indirect chain[4], Indirect *where, int num)
595 {
596         int i;
597         int err = 0;
598         struct ext3_inode_info *ei = EXT3_I(inode);
599
600         /*
601          * If we're splicing into a [td]indirect block (as opposed to the
602          * inode) then we need to get write access to the [td]indirect block
603          * before the splice.
604          */
605         if (where->bh) {
606                 BUFFER_TRACE(where->bh, "get_write_access");
607                 err = ext3_journal_get_write_access(handle, where->bh);
608                 if (err)
609                         goto err_out;
610         }
611         /* Verify that place we are splicing to is still there and vacant */
612
613         /* Writer: pointers, ->i_next_alloc* */
614         if (!verify_chain(chain, where-1) || *where->p)
615                 /* Writer: end */
616                 goto changed;
617
618         /* That's it */
619
620         *where->p = where->key;
621         ei->i_next_alloc_block = block;
622         ei->i_next_alloc_goal = le32_to_cpu(where[num-1].key);
623         /* Writer: end */
624
625         /* We are done with atomic stuff, now do the rest of housekeeping */
626
627         inode->i_ctime = CURRENT_TIME;
628         ext3_mark_inode_dirty(handle, inode);
629
630         /* had we spliced it onto indirect block? */
631         if (where->bh) {
632                 /*
633                  * akpm: If we spliced it onto an indirect block, we haven't
634                  * altered the inode.  Note however that if it is being spliced
635                  * onto an indirect block at the very end of the file (the
636                  * file is growing) then we *will* alter the inode to reflect
637                  * the new i_size.  But that is not done here - it is done in
638                  * generic_commit_write->__mark_inode_dirty->ext3_dirty_inode.
639                  */
640                 jbd_debug(5, "splicing indirect only\n");
641                 BUFFER_TRACE(where->bh, "call ext3_journal_dirty_metadata");
642                 err = ext3_journal_dirty_metadata(handle, where->bh);
643                 if (err) 
644                         goto err_out;
645         } else {
646                 /*
647                  * OK, we spliced it into the inode itself on a direct block.
648                  * Inode was dirtied above.
649                  */
650                 jbd_debug(5, "splicing direct\n");
651         }
652         return err;
653
654 changed:
655         /*
656          * AKPM: if where[i].bh isn't part of the current updating
657          * transaction then we explode nastily.  Test this code path.
658          */
659         jbd_debug(1, "the chain changed: try again\n");
660         err = -EAGAIN;
661
662 err_out:
663         for (i = 1; i < num; i++) {
664                 BUFFER_TRACE(where[i].bh, "call journal_forget");
665                 ext3_journal_forget(handle, where[i].bh);
666         }
667         /* For the normal collision cleanup case, we free up the blocks.
668          * On genuine filesystem errors we don't even think about doing
669          * that. */
670         if (err == -EAGAIN)
671                 for (i = 0; i < num; i++)
672                         ext3_free_blocks(handle, inode, 
673                                          le32_to_cpu(where[i].key), 1);
674         return err;
675 }
676
677 /*
678  * Allocation strategy is simple: if we have to allocate something, we will
679  * have to go the whole way to leaf. So let's do it before attaching anything
680  * to tree, set linkage between the newborn blocks, write them if sync is
681  * required, recheck the path, free and repeat if check fails, otherwise
682  * set the last missing link (that will protect us from any truncate-generated
683  * removals - all blocks on the path are immune now) and possibly force the
684  * write on the parent block.
685  * That has a nice additional property: no special recovery from the failed
686  * allocations is needed - we simply release blocks and do not touch anything
687  * reachable from inode.
688  *
689  * akpm: `handle' can be NULL if create == 0.
690  *
691  * The BKL may not be held on entry here.  Be sure to take it early.
692  */
693
694 static int
695 ext3_get_block_handle(handle_t *handle, struct inode *inode, sector_t iblock,
696                 struct buffer_head *bh_result, int create, int extend_disksize)
697 {
698         int err = -EIO;
699         int offsets[4];
700         Indirect chain[4];
701         Indirect *partial;
702         unsigned long goal;
703         int left;
704         int boundary = 0;
705         int depth = ext3_block_to_path(inode, iblock, offsets, &boundary);
706         struct ext3_inode_info *ei = EXT3_I(inode);
707
708         J_ASSERT(handle != NULL || create == 0);
709
710         if (depth == 0)
711                 goto out;
712
713 reread:
714         partial = ext3_get_branch(inode, depth, offsets, chain, &err);
715
716         /* Simplest case - block found, no allocation needed */
717         if (!partial) {
718                 clear_buffer_new(bh_result);
719 got_it:
720                 map_bh(bh_result, inode->i_sb, le32_to_cpu(chain[depth-1].key));
721                 if (boundary)
722                         set_buffer_boundary(bh_result);
723                 /* Clean up and exit */
724                 partial = chain+depth-1; /* the whole chain */
725                 goto cleanup;
726         }
727
728         /* Next simple case - plain lookup or failed read of indirect block */
729         if (!create || err == -EIO) {
730 cleanup:
731                 while (partial > chain) {
732                         BUFFER_TRACE(partial->bh, "call brelse");
733                         brelse(partial->bh);
734                         partial--;
735                 }
736                 BUFFER_TRACE(bh_result, "returned");
737 out:
738                 return err;
739         }
740
741         /*
742          * Indirect block might be removed by truncate while we were
743          * reading it. Handling of that case (forget what we've got and
744          * reread) is taken out of the main path.
745          */
746         if (err == -EAGAIN)
747                 goto changed;
748
749         goal = 0;
750         down(&ei->truncate_sem);
751         if (ext3_find_goal(inode, iblock, chain, partial, &goal) < 0) {
752                 up(&ei->truncate_sem);
753                 goto changed;
754         }
755
756         left = (chain + depth) - partial;
757
758         /*
759          * Block out ext3_truncate while we alter the tree
760          */
761         err = ext3_alloc_branch(handle, inode, left, goal,
762                                         offsets+(partial-chain), partial);
763
764         /* The ext3_splice_branch call will free and forget any buffers
765          * on the new chain if there is a failure, but that risks using
766          * up transaction credits, especially for bitmaps where the
767          * credits cannot be returned.  Can we handle this somehow?  We
768          * may need to return -EAGAIN upwards in the worst case.  --sct */
769         if (!err)
770                 err = ext3_splice_branch(handle, inode, iblock, chain,
771                                          partial, left);
772         /* i_disksize growing is protected by truncate_sem
773          * don't forget to protect it if you're about to implement
774          * concurrent ext3_get_block() -bzzz */
775         if (!err && extend_disksize && inode->i_size > ei->i_disksize)
776                 ei->i_disksize = inode->i_size;
777         up(&ei->truncate_sem);
778         if (err == -EAGAIN)
779                 goto changed;
780         if (err)
781                 goto cleanup;
782
783         set_buffer_new(bh_result);
784         goto got_it;
785
786 changed:
787         while (partial > chain) {
788                 jbd_debug(1, "buffer chain changed, retrying\n");
789                 BUFFER_TRACE(partial->bh, "brelsing");
790                 brelse(partial->bh);
791                 partial--;
792         }
793         goto reread;
794 }
795
796 static int ext3_get_block(struct inode *inode, sector_t iblock,
797                         struct buffer_head *bh_result, int create)
798 {
799         handle_t *handle = 0;
800         int ret;
801
802         if (create) {
803                 handle = ext3_journal_current_handle();
804                 J_ASSERT(handle != 0);
805         }
806         ret = ext3_get_block_handle(handle, inode, iblock,
807                                 bh_result, create, 1);
808         return ret;
809 }
810
811 #define DIO_CREDITS (EXT3_RESERVE_TRANS_BLOCKS + 32)
812
813 static int
814 ext3_direct_io_get_blocks(struct inode *inode, sector_t iblock,
815                 unsigned long max_blocks, struct buffer_head *bh_result,
816                 int create)
817 {
818         handle_t *handle = journal_current_handle();
819         int ret = 0;
820
821         if (handle && handle->h_buffer_credits <= EXT3_RESERVE_TRANS_BLOCKS) {
822                 /*
823                  * Getting low on buffer credits...
824                  */
825                 if (!ext3_journal_extend(handle, DIO_CREDITS)) {
826                         /*
827                          * Couldn't extend the transaction.  Start a new one
828                          */
829                         ret = ext3_journal_restart(handle, DIO_CREDITS);
830                 }
831         }
832         if (ret == 0)
833                 ret = ext3_get_block_handle(handle, inode, iblock,
834                                         bh_result, create, 0);
835         if (ret == 0)
836                 bh_result->b_size = (1 << inode->i_blkbits);
837         return ret;
838 }
839
840
841 /*
842  * `handle' can be NULL if create is zero
843  */
844 struct buffer_head *ext3_getblk(handle_t *handle, struct inode * inode,
845                                 long block, int create, int * errp)
846 {
847         struct buffer_head dummy;
848         int fatal = 0, err;
849
850         J_ASSERT(handle != NULL || create == 0);
851
852         dummy.b_state = 0;
853         dummy.b_blocknr = -1000;
854         buffer_trace_init(&dummy.b_history);
855         *errp = ext3_get_block_handle(handle, inode, block, &dummy, create, 1);
856         if (!*errp && buffer_mapped(&dummy)) {
857                 struct buffer_head *bh;
858                 bh = sb_getblk(inode->i_sb, dummy.b_blocknr);
859                 if (buffer_new(&dummy)) {
860                         J_ASSERT(create != 0);
861                         J_ASSERT(handle != 0);
862
863                         /* Now that we do not always journal data, we
864                            should keep in mind whether this should
865                            always journal the new buffer as metadata.
866                            For now, regular file writes use
867                            ext3_get_block instead, so it's not a
868                            problem. */
869                         lock_buffer(bh);
870                         BUFFER_TRACE(bh, "call get_create_access");
871                         fatal = ext3_journal_get_create_access(handle, bh);
872                         if (!fatal && !buffer_uptodate(bh)) {
873                                 memset(bh->b_data, 0, inode->i_sb->s_blocksize);
874                                 set_buffer_uptodate(bh);
875                         }
876                         unlock_buffer(bh);
877                         BUFFER_TRACE(bh, "call ext3_journal_dirty_metadata");
878                         err = ext3_journal_dirty_metadata(handle, bh);
879                         if (!fatal)
880                                 fatal = err;
881                 } else {
882                         BUFFER_TRACE(bh, "not a new buffer");
883                 }
884                 if (fatal) {
885                         *errp = fatal;
886                         brelse(bh);
887                         bh = NULL;
888                 }
889                 return bh;
890         }
891         return NULL;
892 }
893
894 struct buffer_head *ext3_bread(handle_t *handle, struct inode * inode,
895                                int block, int create, int *err)
896 {
897         struct buffer_head * bh;
898         int prev_blocks;
899
900         prev_blocks = inode->i_blocks;
901
902         bh = ext3_getblk (handle, inode, block, create, err);
903         if (!bh)
904                 return bh;
905         if (buffer_uptodate(bh))
906                 return bh;
907         ll_rw_block (READ, 1, &bh);
908         wait_on_buffer (bh);
909         if (buffer_uptodate(bh))
910                 return bh;
911         brelse (bh);
912         *err = -EIO;
913         return NULL;
914 }
915
916 static int walk_page_buffers(   handle_t *handle,
917                                 struct buffer_head *head,
918                                 unsigned from,
919                                 unsigned to,
920                                 int *partial,
921                                 int (*fn)(      handle_t *handle,
922                                                 struct buffer_head *bh))
923 {
924         struct buffer_head *bh;
925         unsigned block_start, block_end;
926         unsigned blocksize = head->b_size;
927         int err, ret = 0;
928         struct buffer_head *next;
929
930         for (   bh = head, block_start = 0;
931                 ret == 0 && (bh != head || !block_start);
932                 block_start = block_end, bh = next)
933         {
934                 next = bh->b_this_page;
935                 block_end = block_start + blocksize;
936                 if (block_end <= from || block_start >= to) {
937                         if (partial && !buffer_uptodate(bh))
938                                 *partial = 1;
939                         continue;
940                 }
941                 err = (*fn)(handle, bh);
942                 if (!ret)
943                         ret = err;
944         }
945         return ret;
946 }
947
948 /*
949  * To preserve ordering, it is essential that the hole instantiation and
950  * the data write be encapsulated in a single transaction.  We cannot
951  * close off a transaction and start a new one between the ext3_get_block()
952  * and the commit_write().  So doing the journal_start at the start of
953  * prepare_write() is the right place.
954  *
955  * Also, this function can nest inside ext3_writepage() ->
956  * block_write_full_page(). In that case, we *know* that ext3_writepage()
957  * has generated enough buffer credits to do the whole page.  So we won't
958  * block on the journal in that case, which is good, because the caller may
959  * be PF_MEMALLOC.
960  *
961  * By accident, ext3 can be reentered when a transaction is open via
962  * quota file writes.  If we were to commit the transaction while thus
963  * reentered, there can be a deadlock - we would be holding a quota
964  * lock, and the commit would never complete if another thread had a
965  * transaction open and was blocking on the quota lock - a ranking
966  * violation.
967  *
968  * So what we do is to rely on the fact that journal_stop/journal_start
969  * will _not_ run commit under these circumstances because handle->h_ref
970  * is elevated.  We'll still have enough credits for the tiny quotafile
971  * write.  
972  */
973
974 static int do_journal_get_write_access(handle_t *handle, 
975                                        struct buffer_head *bh)
976 {
977         if (!buffer_mapped(bh) || buffer_freed(bh))
978                 return 0;
979         return ext3_journal_get_write_access(handle, bh);
980 }
981
982 static int ext3_prepare_write(struct file *file, struct page *page,
983                               unsigned from, unsigned to)
984 {
985         struct inode *inode = page->mapping->host;
986         int ret, needed_blocks = ext3_writepage_trans_blocks(inode);
987         handle_t *handle;
988         int retries = 0;
989
990 retry:
991         handle = ext3_journal_start(inode, needed_blocks);
992         if (IS_ERR(handle)) {
993                 ret = PTR_ERR(handle);
994                 goto out;
995         }
996         ret = block_prepare_write(page, from, to, ext3_get_block);
997         if (ret)
998                 goto prepare_write_failed;
999
1000         if (ext3_should_journal_data(inode)) {
1001                 ret = walk_page_buffers(handle, page_buffers(page),
1002                                 from, to, NULL, do_journal_get_write_access);
1003         }
1004 prepare_write_failed:
1005         if (ret)
1006                 ext3_journal_stop(handle);
1007         if (ret == -ENOSPC && ext3_should_retry_alloc(inode->i_sb, &retries))
1008                 goto retry;
1009 out:
1010         return ret;
1011 }
1012
1013 static int
1014 ext3_journal_dirty_data(handle_t *handle, struct buffer_head *bh)
1015 {
1016         int err = journal_dirty_data(handle, bh);
1017         if (err)
1018                 ext3_journal_abort_handle(__FUNCTION__, __FUNCTION__,
1019                                                 bh, handle,err);
1020         return err;
1021 }
1022
1023 /* For commit_write() in data=journal mode */
1024 static int commit_write_fn(handle_t *handle, struct buffer_head *bh)
1025 {
1026         if (!buffer_mapped(bh) || buffer_freed(bh))
1027                 return 0;
1028         set_buffer_uptodate(bh);
1029         return ext3_journal_dirty_metadata(handle, bh);
1030 }
1031
1032 /*
1033  * We need to pick up the new inode size which generic_commit_write gave us
1034  * `file' can be NULL - eg, when called from page_symlink().
1035  *
1036  * ext3 never places buffers on inode->i_mapping->private_list.  metadata
1037  * buffers are managed internally.
1038  */
1039
1040 static int ext3_ordered_commit_write(struct file *file, struct page *page,
1041                              unsigned from, unsigned to)
1042 {
1043         handle_t *handle = ext3_journal_current_handle();
1044         struct inode *inode = page->mapping->host;
1045         int ret = 0, ret2;
1046
1047         ret = walk_page_buffers(handle, page_buffers(page),
1048                 from, to, NULL, ext3_journal_dirty_data);
1049
1050         if (ret == 0) {
1051                 /*
1052                  * generic_commit_write() will run mark_inode_dirty() if i_size
1053                  * changes.  So let's piggyback the i_disksize mark_inode_dirty
1054                  * into that.
1055                  */
1056                 loff_t new_i_size;
1057
1058                 new_i_size = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to;
1059                 if (new_i_size > EXT3_I(inode)->i_disksize)
1060                         EXT3_I(inode)->i_disksize = new_i_size;
1061                 ret = generic_commit_write(file, page, from, to);
1062         }
1063         ret2 = ext3_journal_stop(handle);
1064         if (!ret)
1065                 ret = ret2;
1066         return ret;
1067 }
1068
1069 static int ext3_writeback_commit_write(struct file *file, struct page *page,
1070                              unsigned from, unsigned to)
1071 {
1072         handle_t *handle = ext3_journal_current_handle();
1073         struct inode *inode = page->mapping->host;
1074         int ret = 0, ret2;
1075         loff_t new_i_size;
1076
1077         new_i_size = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to;
1078         if (new_i_size > EXT3_I(inode)->i_disksize)
1079                 EXT3_I(inode)->i_disksize = new_i_size;
1080         ret = generic_commit_write(file, page, from, to);
1081         ret2 = ext3_journal_stop(handle);
1082         if (!ret)
1083                 ret = ret2;
1084         return ret;
1085 }
1086
1087 static int ext3_journalled_commit_write(struct file *file,
1088                         struct page *page, unsigned from, unsigned to)
1089 {
1090         handle_t *handle = ext3_journal_current_handle();
1091         struct inode *inode = page->mapping->host;
1092         int ret = 0, ret2;
1093         int partial = 0;
1094         loff_t pos;
1095
1096         /*
1097          * Here we duplicate the generic_commit_write() functionality
1098          */
1099         pos = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to;
1100
1101         ret = walk_page_buffers(handle, page_buffers(page), from,
1102                                 to, &partial, commit_write_fn);
1103         if (!partial)
1104                 SetPageUptodate(page);
1105         if (pos > inode->i_size)
1106                 i_size_write(inode, pos);
1107         EXT3_I(inode)->i_state |= EXT3_STATE_JDATA;
1108         if (inode->i_size > EXT3_I(inode)->i_disksize) {
1109                 EXT3_I(inode)->i_disksize = inode->i_size;
1110                 ret2 = ext3_mark_inode_dirty(handle, inode);
1111                 if (!ret) 
1112                         ret = ret2;
1113         }
1114         ret2 = ext3_journal_stop(handle);
1115         if (!ret)
1116                 ret = ret2;
1117         return ret;
1118 }
1119
1120 /* 
1121  * bmap() is special.  It gets used by applications such as lilo and by
1122  * the swapper to find the on-disk block of a specific piece of data.
1123  *
1124  * Naturally, this is dangerous if the block concerned is still in the
1125  * journal.  If somebody makes a swapfile on an ext3 data-journaling
1126  * filesystem and enables swap, then they may get a nasty shock when the
1127  * data getting swapped to that swapfile suddenly gets overwritten by
1128  * the original zero's written out previously to the journal and
1129  * awaiting writeback in the kernel's buffer cache. 
1130  *
1131  * So, if we see any bmap calls here on a modified, data-journaled file,
1132  * take extra steps to flush any blocks which might be in the cache. 
1133  */
1134 static sector_t ext3_bmap(struct address_space *mapping, sector_t block)
1135 {
1136         struct inode *inode = mapping->host;
1137         journal_t *journal;
1138         int err;
1139
1140         if (EXT3_I(inode)->i_state & EXT3_STATE_JDATA) {
1141                 /* 
1142                  * This is a REALLY heavyweight approach, but the use of
1143                  * bmap on dirty files is expected to be extremely rare:
1144                  * only if we run lilo or swapon on a freshly made file
1145                  * do we expect this to happen. 
1146                  *
1147                  * (bmap requires CAP_SYS_RAWIO so this does not
1148                  * represent an unprivileged user DOS attack --- we'd be
1149                  * in trouble if mortal users could trigger this path at
1150                  * will.) 
1151                  *
1152                  * NB. EXT3_STATE_JDATA is not set on files other than
1153                  * regular files.  If somebody wants to bmap a directory
1154                  * or symlink and gets confused because the buffer
1155                  * hasn't yet been flushed to disk, they deserve
1156                  * everything they get.
1157                  */
1158
1159                 EXT3_I(inode)->i_state &= ~EXT3_STATE_JDATA;
1160                 journal = EXT3_JOURNAL(inode);
1161                 journal_lock_updates(journal);
1162                 err = journal_flush(journal);
1163                 journal_unlock_updates(journal);
1164
1165                 if (err)
1166                         return 0;
1167         }
1168
1169         return generic_block_bmap(mapping,block,ext3_get_block);
1170 }
1171
1172 static int bget_one(handle_t *handle, struct buffer_head *bh)
1173 {
1174         get_bh(bh);
1175         return 0;
1176 }
1177
1178 static int bput_one(handle_t *handle, struct buffer_head *bh)
1179 {
1180         put_bh(bh);
1181         return 0;
1182 }
1183
1184 static int journal_dirty_data_fn(handle_t *handle, struct buffer_head *bh)
1185 {
1186         if (buffer_mapped(bh))
1187                 return ext3_journal_dirty_data(handle, bh);
1188         return 0;
1189 }
1190
1191 /*
1192  * Note that we always start a transaction even if we're not journalling
1193  * data.  This is to preserve ordering: any hole instantiation within
1194  * __block_write_full_page -> ext3_get_block() should be journalled
1195  * along with the data so we don't crash and then get metadata which
1196  * refers to old data.
1197  *
1198  * In all journalling modes block_write_full_page() will start the I/O.
1199  *
1200  * Problem:
1201  *
1202  *      ext3_writepage() -> kmalloc() -> __alloc_pages() -> page_launder() ->
1203  *              ext3_writepage()
1204  *
1205  * Similar for:
1206  *
1207  *      ext3_file_write() -> generic_file_write() -> __alloc_pages() -> ...
1208  *
1209  * Same applies to ext3_get_block().  We will deadlock on various things like
1210  * lock_journal and i_truncate_sem.
1211  *
1212  * Setting PF_MEMALLOC here doesn't work - too many internal memory
1213  * allocations fail.
1214  *
1215  * 16May01: If we're reentered then journal_current_handle() will be
1216  *          non-zero. We simply *return*.
1217  *
1218  * 1 July 2001: @@@ FIXME:
1219  *   In journalled data mode, a data buffer may be metadata against the
1220  *   current transaction.  But the same file is part of a shared mapping
1221  *   and someone does a writepage() on it.
1222  *
1223  *   We will move the buffer onto the async_data list, but *after* it has
1224  *   been dirtied. So there's a small window where we have dirty data on
1225  *   BJ_Metadata.
1226  *
1227  *   Note that this only applies to the last partial page in the file.  The
1228  *   bit which block_write_full_page() uses prepare/commit for.  (That's
1229  *   broken code anyway: it's wrong for msync()).
1230  *
1231  *   It's a rare case: affects the final partial page, for journalled data
1232  *   where the file is subject to bith write() and writepage() in the same
1233  *   transction.  To fix it we'll need a custom block_write_full_page().
1234  *   We'll probably need that anyway for journalling writepage() output.
1235  *
1236  * We don't honour synchronous mounts for writepage().  That would be
1237  * disastrous.  Any write() or metadata operation will sync the fs for
1238  * us.
1239  *
1240  * AKPM2: if all the page's buffers are mapped to disk and !data=journal,
1241  * we don't need to open a transaction here.
1242  */
1243 static int ext3_ordered_writepage(struct page *page,
1244                         struct writeback_control *wbc)
1245 {
1246         struct inode *inode = page->mapping->host;
1247         struct buffer_head *page_bufs;
1248         handle_t *handle = NULL;
1249         int ret = 0;
1250         int err;
1251
1252         J_ASSERT(PageLocked(page));
1253
1254         /*
1255          * We give up here if we're reentered, because it might be for a
1256          * different filesystem.
1257          */
1258         if (ext3_journal_current_handle())
1259                 goto out_fail;
1260
1261         handle = ext3_journal_start(inode, ext3_writepage_trans_blocks(inode));
1262
1263         if (IS_ERR(handle)) {
1264                 ret = PTR_ERR(handle);
1265                 goto out_fail;
1266         }
1267
1268         if (!page_has_buffers(page)) {
1269                 create_empty_buffers(page, inode->i_sb->s_blocksize,
1270                                 (1 << BH_Dirty)|(1 << BH_Uptodate));
1271         }
1272         page_bufs = page_buffers(page);
1273         walk_page_buffers(handle, page_bufs, 0,
1274                         PAGE_CACHE_SIZE, NULL, bget_one);
1275
1276         ret = block_write_full_page(page, ext3_get_block, wbc);
1277
1278         /*
1279          * The page can become unlocked at any point now, and
1280          * truncate can then come in and change things.  So we
1281          * can't touch *page from now on.  But *page_bufs is
1282          * safe due to elevated refcount.
1283          */
1284
1285         /*
1286          * And attach them to the current transaction.  But only if 
1287          * block_write_full_page() succeeded.  Otherwise they are unmapped,
1288          * and generally junk.
1289          */
1290         if (ret == 0) {
1291                 err = walk_page_buffers(handle, page_bufs, 0, PAGE_CACHE_SIZE,
1292                                         NULL, journal_dirty_data_fn);
1293                 if (!ret)
1294                         ret = err;
1295         }
1296         walk_page_buffers(handle, page_bufs, 0,
1297                         PAGE_CACHE_SIZE, NULL, bput_one);
1298         err = ext3_journal_stop(handle);
1299         if (!ret)
1300                 ret = err;
1301         return ret;
1302
1303 out_fail:
1304         redirty_page_for_writepage(wbc, page);
1305         unlock_page(page);
1306         return ret;
1307 }
1308
1309 static int ext3_writeback_writepage(struct page *page,
1310                                 struct writeback_control *wbc)
1311 {
1312         struct inode *inode = page->mapping->host;
1313         handle_t *handle = NULL;
1314         int ret = 0;
1315         int err;
1316
1317         if (ext3_journal_current_handle())
1318                 goto out_fail;
1319
1320         handle = ext3_journal_start(inode, ext3_writepage_trans_blocks(inode));
1321         if (IS_ERR(handle)) {
1322                 ret = PTR_ERR(handle);
1323                 goto out_fail;
1324         }
1325
1326         ret = block_write_full_page(page, ext3_get_block, wbc);
1327         err = ext3_journal_stop(handle);
1328         if (!ret)
1329                 ret = err;
1330         return ret;
1331
1332 out_fail:
1333         redirty_page_for_writepage(wbc, page);
1334         unlock_page(page);
1335         return ret;
1336 }
1337
1338 static int ext3_journalled_writepage(struct page *page,
1339                                 struct writeback_control *wbc)
1340 {
1341         struct inode *inode = page->mapping->host;
1342         handle_t *handle = NULL;
1343         int ret = 0;
1344         int err;
1345
1346         if (ext3_journal_current_handle())
1347                 goto no_write;
1348
1349         handle = ext3_journal_start(inode, ext3_writepage_trans_blocks(inode));
1350         if (IS_ERR(handle)) {
1351                 ret = PTR_ERR(handle);
1352                 goto no_write;
1353         }
1354
1355         if (!page_has_buffers(page) || PageChecked(page)) {
1356                 /*
1357                  * It's mmapped pagecache.  Add buffers and journal it.  There
1358                  * doesn't seem much point in redirtying the page here.
1359                  */
1360                 ClearPageChecked(page);
1361                 ret = block_prepare_write(page, 0, PAGE_CACHE_SIZE,
1362                                         ext3_get_block);
1363                 if (ret != 0)
1364                         goto out_unlock;
1365                 ret = walk_page_buffers(handle, page_buffers(page), 0,
1366                         PAGE_CACHE_SIZE, NULL, do_journal_get_write_access);
1367
1368                 err = walk_page_buffers(handle, page_buffers(page), 0,
1369                                 PAGE_CACHE_SIZE, NULL, commit_write_fn);
1370                 if (ret == 0)
1371                         ret = err;
1372                 EXT3_I(inode)->i_state |= EXT3_STATE_JDATA;
1373                 unlock_page(page);
1374         } else {
1375                 /*
1376                  * It may be a page full of checkpoint-mode buffers.  We don't
1377                  * really know unless we go poke around in the buffer_heads.
1378                  * But block_write_full_page will do the right thing.
1379                  */
1380                 ret = block_write_full_page(page, ext3_get_block, wbc);
1381         }
1382         err = ext3_journal_stop(handle);
1383         if (!ret)
1384                 ret = err;
1385 out:
1386         return ret;
1387
1388 no_write:
1389         redirty_page_for_writepage(wbc, page);
1390 out_unlock:
1391         unlock_page(page);
1392         goto out;
1393 }
1394
1395 static int ext3_readpage(struct file *file, struct page *page)
1396 {
1397         return mpage_readpage(page, ext3_get_block);
1398 }
1399
1400 static int
1401 ext3_readpages(struct file *file, struct address_space *mapping,
1402                 struct list_head *pages, unsigned nr_pages)
1403 {
1404         return mpage_readpages(mapping, pages, nr_pages, ext3_get_block);
1405 }
1406
1407 static int ext3_invalidatepage(struct page *page, unsigned long offset)
1408 {
1409         journal_t *journal = EXT3_JOURNAL(page->mapping->host);
1410
1411         /*
1412          * If it's a full truncate we just forget about the pending dirtying
1413          */
1414         if (offset == 0)
1415                 ClearPageChecked(page);
1416
1417         return journal_invalidatepage(journal, page, offset);
1418 }
1419
1420 static int ext3_releasepage(struct page *page, int wait)
1421 {
1422         journal_t *journal = EXT3_JOURNAL(page->mapping->host);
1423
1424         WARN_ON(PageChecked(page));
1425         return journal_try_to_free_buffers(journal, page, wait);
1426 }
1427
1428 /*
1429  * If the O_DIRECT write will extend the file then add this inode to the
1430  * orphan list.  So recovery will truncate it back to the original size
1431  * if the machine crashes during the write.
1432  *
1433  * If the O_DIRECT write is intantiating holes inside i_size and the machine
1434  * crashes then stale disk data _may_ be exposed inside the file.
1435  */
1436 static ssize_t ext3_direct_IO(int rw, struct kiocb *iocb,
1437                         const struct iovec *iov, loff_t offset,
1438                         unsigned long nr_segs)
1439 {
1440         struct file *file = iocb->ki_filp;
1441         struct inode *inode = file->f_mapping->host;
1442         struct ext3_inode_info *ei = EXT3_I(inode);
1443         handle_t *handle = NULL;
1444         ssize_t ret;
1445         int orphan = 0;
1446         size_t count = iov_length(iov, nr_segs);
1447
1448         if (rw == WRITE) {
1449                 loff_t final_size = offset + count;
1450
1451                 handle = ext3_journal_start(inode, DIO_CREDITS);
1452                 if (IS_ERR(handle)) {
1453                         ret = PTR_ERR(handle);
1454                         goto out;
1455                 }
1456                 if (final_size > inode->i_size) {
1457                         ret = ext3_orphan_add(handle, inode);
1458                         if (ret)
1459                                 goto out_stop;
1460                         orphan = 1;
1461                         ei->i_disksize = inode->i_size;
1462                 }
1463         }
1464
1465         ret = blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev, iov, 
1466                                  offset, nr_segs,
1467                                  ext3_direct_io_get_blocks, NULL);
1468
1469 out_stop:
1470         if (handle) {
1471                 int err;
1472
1473                 if (orphan) 
1474                         ext3_orphan_del(handle, inode);
1475                 if (orphan && ret > 0) {
1476                         loff_t end = offset + ret;
1477                         if (end > inode->i_size) {
1478                                 ei->i_disksize = end;
1479                                 i_size_write(inode, end);
1480                                 err = ext3_mark_inode_dirty(handle, inode);
1481                                 if (!ret) 
1482                                         ret = err;
1483                         }
1484                 }
1485                 err = ext3_journal_stop(handle);
1486                 if (ret == 0)
1487                         ret = err;
1488         }
1489 out:
1490         return ret;
1491 }
1492
1493 /*
1494  * Pages can be marked dirty completely asynchronously from ext3's journalling
1495  * activity.  By filemap_sync_pte(), try_to_unmap_one(), etc.  We cannot do
1496  * much here because ->set_page_dirty is called under VFS locks.  The page is
1497  * not necessarily locked.
1498  *
1499  * We cannot just dirty the page and leave attached buffers clean, because the
1500  * buffers' dirty state is "definitive".  We cannot just set the buffers dirty
1501  * or jbddirty because all the journalling code will explode.
1502  *
1503  * So what we do is to mark the page "pending dirty" and next time writepage
1504  * is called, propagate that into the buffers appropriately.
1505  */
1506 static int ext3_journalled_set_page_dirty(struct page *page)
1507 {
1508         SetPageChecked(page);
1509         return __set_page_dirty_nobuffers(page);
1510 }
1511
1512 static struct address_space_operations ext3_ordered_aops = {
1513         .readpage       = ext3_readpage,
1514         .readpages      = ext3_readpages,
1515         .writepage      = ext3_ordered_writepage,
1516         .sync_page      = block_sync_page,
1517         .prepare_write  = ext3_prepare_write,
1518         .commit_write   = ext3_ordered_commit_write,
1519         .bmap           = ext3_bmap,
1520         .invalidatepage = ext3_invalidatepage,
1521         .releasepage    = ext3_releasepage,
1522         .direct_IO      = ext3_direct_IO,
1523 };
1524
1525 static struct address_space_operations ext3_writeback_aops = {
1526         .readpage       = ext3_readpage,
1527         .readpages      = ext3_readpages,
1528         .writepage      = ext3_writeback_writepage,
1529         .sync_page      = block_sync_page,
1530         .prepare_write  = ext3_prepare_write,
1531         .commit_write   = ext3_writeback_commit_write,
1532         .bmap           = ext3_bmap,
1533         .invalidatepage = ext3_invalidatepage,
1534         .releasepage    = ext3_releasepage,
1535         .direct_IO      = ext3_direct_IO,
1536 };
1537
1538 static struct address_space_operations ext3_journalled_aops = {
1539         .readpage       = ext3_readpage,
1540         .readpages      = ext3_readpages,
1541         .writepage      = ext3_journalled_writepage,
1542         .sync_page      = block_sync_page,
1543         .prepare_write  = ext3_prepare_write,
1544         .commit_write   = ext3_journalled_commit_write,
1545         .set_page_dirty = ext3_journalled_set_page_dirty,
1546         .bmap           = ext3_bmap,
1547         .invalidatepage = ext3_invalidatepage,
1548         .releasepage    = ext3_releasepage,
1549 };
1550
1551 void ext3_set_aops(struct inode *inode)
1552 {
1553         if (ext3_should_order_data(inode))
1554                 inode->i_mapping->a_ops = &ext3_ordered_aops;
1555         else if (ext3_should_writeback_data(inode))
1556                 inode->i_mapping->a_ops = &ext3_writeback_aops;
1557         else
1558                 inode->i_mapping->a_ops = &ext3_journalled_aops;
1559 }
1560
1561 /*
1562  * ext3_block_truncate_page() zeroes out a mapping from file offset `from'
1563  * up to the end of the block which corresponds to `from'.
1564  * This required during truncate. We need to physically zero the tail end
1565  * of that block so it doesn't yield old data if the file is later grown.
1566  */
1567 static int ext3_block_truncate_page(handle_t *handle, struct page *page,
1568                 struct address_space *mapping, loff_t from)
1569 {
1570         unsigned long index = from >> PAGE_CACHE_SHIFT;
1571         unsigned offset = from & (PAGE_CACHE_SIZE-1);
1572         unsigned blocksize, iblock, length, pos;
1573         struct inode *inode = mapping->host;
1574         struct buffer_head *bh;
1575         int err;
1576         void *kaddr;
1577
1578         blocksize = inode->i_sb->s_blocksize;
1579         length = blocksize - (offset & (blocksize - 1));
1580         iblock = index << (PAGE_CACHE_SHIFT - inode->i_sb->s_blocksize_bits);
1581
1582         if (!page_has_buffers(page))
1583                 create_empty_buffers(page, blocksize, 0);
1584
1585         /* Find the buffer that contains "offset" */
1586         bh = page_buffers(page);
1587         pos = blocksize;
1588         while (offset >= pos) {
1589                 bh = bh->b_this_page;
1590                 iblock++;
1591                 pos += blocksize;
1592         }
1593
1594         err = 0;
1595         if (buffer_freed(bh)) {
1596                 BUFFER_TRACE(bh, "freed: skip");
1597                 goto unlock;
1598         }
1599
1600         if (!buffer_mapped(bh)) {
1601                 BUFFER_TRACE(bh, "unmapped");
1602                 ext3_get_block(inode, iblock, bh, 0);
1603                 /* unmapped? It's a hole - nothing to do */
1604                 if (!buffer_mapped(bh)) {
1605                         BUFFER_TRACE(bh, "still unmapped");
1606                         goto unlock;
1607                 }
1608         }
1609
1610         /* Ok, it's mapped. Make sure it's up-to-date */
1611         if (PageUptodate(page))
1612                 set_buffer_uptodate(bh);
1613
1614         if (!buffer_uptodate(bh)) {
1615                 err = -EIO;
1616                 ll_rw_block(READ, 1, &bh);
1617                 wait_on_buffer(bh);
1618                 /* Uhhuh. Read error. Complain and punt. */
1619                 if (!buffer_uptodate(bh))
1620                         goto unlock;
1621         }
1622
1623         if (ext3_should_journal_data(inode)) {
1624                 BUFFER_TRACE(bh, "get write access");
1625                 err = ext3_journal_get_write_access(handle, bh);
1626                 if (err)
1627                         goto unlock;
1628         }
1629
1630         kaddr = kmap_atomic(page, KM_USER0);
1631         memset(kaddr + offset, 0, length);
1632         flush_dcache_page(page);
1633         kunmap_atomic(kaddr, KM_USER0);
1634
1635         BUFFER_TRACE(bh, "zeroed end of block");
1636
1637         err = 0;
1638         if (ext3_should_journal_data(inode)) {
1639                 err = ext3_journal_dirty_metadata(handle, bh);
1640         } else {
1641                 if (ext3_should_order_data(inode))
1642                         err = ext3_journal_dirty_data(handle, bh);
1643                 mark_buffer_dirty(bh);
1644         }
1645
1646 unlock:
1647         unlock_page(page);
1648         page_cache_release(page);
1649         return err;
1650 }
1651
1652 /*
1653  * Probably it should be a library function... search for first non-zero word
1654  * or memcmp with zero_page, whatever is better for particular architecture.
1655  * Linus?
1656  */
1657 static inline int all_zeroes(u32 *p, u32 *q)
1658 {
1659         while (p < q)
1660                 if (*p++)
1661                         return 0;
1662         return 1;
1663 }
1664
1665 /**
1666  *      ext3_find_shared - find the indirect blocks for partial truncation.
1667  *      @inode:   inode in question
1668  *      @depth:   depth of the affected branch
1669  *      @offsets: offsets of pointers in that branch (see ext3_block_to_path)
1670  *      @chain:   place to store the pointers to partial indirect blocks
1671  *      @top:     place to the (detached) top of branch
1672  *
1673  *      This is a helper function used by ext3_truncate().
1674  *
1675  *      When we do truncate() we may have to clean the ends of several
1676  *      indirect blocks but leave the blocks themselves alive. Block is
1677  *      partially truncated if some data below the new i_size is refered
1678  *      from it (and it is on the path to the first completely truncated
1679  *      data block, indeed).  We have to free the top of that path along
1680  *      with everything to the right of the path. Since no allocation
1681  *      past the truncation point is possible until ext3_truncate()
1682  *      finishes, we may safely do the latter, but top of branch may
1683  *      require special attention - pageout below the truncation point
1684  *      might try to populate it.
1685  *
1686  *      We atomically detach the top of branch from the tree, store the
1687  *      block number of its root in *@top, pointers to buffer_heads of
1688  *      partially truncated blocks - in @chain[].bh and pointers to
1689  *      their last elements that should not be removed - in
1690  *      @chain[].p. Return value is the pointer to last filled element
1691  *      of @chain.
1692  *
1693  *      The work left to caller to do the actual freeing of subtrees:
1694  *              a) free the subtree starting from *@top
1695  *              b) free the subtrees whose roots are stored in
1696  *                      (@chain[i].p+1 .. end of @chain[i].bh->b_data)
1697  *              c) free the subtrees growing from the inode past the @chain[0].
1698  *                      (no partially truncated stuff there).  */
1699
1700 static Indirect *ext3_find_shared(struct inode *inode,
1701                                 int depth,
1702                                 int offsets[4],
1703                                 Indirect chain[4],
1704                                 u32 *top)
1705 {
1706         Indirect *partial, *p;
1707         int k, err;
1708
1709         *top = 0;
1710         /* Make k index the deepest non-null offest + 1 */
1711         for (k = depth; k > 1 && !offsets[k-1]; k--)
1712                 ;
1713         partial = ext3_get_branch(inode, k, offsets, chain, &err);
1714         /* Writer: pointers */
1715         if (!partial)
1716                 partial = chain + k-1;
1717         /*
1718          * If the branch acquired continuation since we've looked at it -
1719          * fine, it should all survive and (new) top doesn't belong to us.
1720          */
1721         if (!partial->key && *partial->p)
1722                 /* Writer: end */
1723                 goto no_top;
1724         for (p=partial; p>chain && all_zeroes((u32*)p->bh->b_data,p->p); p--)
1725                 ;
1726         /*
1727          * OK, we've found the last block that must survive. The rest of our
1728          * branch should be detached before unlocking. However, if that rest
1729          * of branch is all ours and does not grow immediately from the inode
1730          * it's easier to cheat and just decrement partial->p.
1731          */
1732         if (p == chain + k - 1 && p > chain) {
1733                 p->p--;
1734         } else {
1735                 *top = *p->p;
1736                 /* Nope, don't do this in ext3.  Must leave the tree intact */
1737 #if 0
1738                 *p->p = 0;
1739 #endif
1740         }
1741         /* Writer: end */
1742
1743         while(partial > p)
1744         {
1745                 brelse(partial->bh);
1746                 partial--;
1747         }
1748 no_top:
1749         return partial;
1750 }
1751
1752 /*
1753  * Zero a number of block pointers in either an inode or an indirect block.
1754  * If we restart the transaction we must again get write access to the
1755  * indirect block for further modification.
1756  *
1757  * We release `count' blocks on disk, but (last - first) may be greater
1758  * than `count' because there can be holes in there.
1759  */
1760 static void
1761 ext3_clear_blocks(handle_t *handle, struct inode *inode, struct buffer_head *bh,
1762                 unsigned long block_to_free, unsigned long count,
1763                 u32 *first, u32 *last)
1764 {
1765         u32 *p;
1766         if (try_to_extend_transaction(handle, inode)) {
1767                 if (bh) {
1768                         BUFFER_TRACE(bh, "call ext3_journal_dirty_metadata");
1769                         ext3_journal_dirty_metadata(handle, bh);
1770                 }
1771                 ext3_mark_inode_dirty(handle, inode);
1772                 ext3_journal_test_restart(handle, inode);
1773                 if (bh) {
1774                         BUFFER_TRACE(bh, "retaking write access");
1775                         ext3_journal_get_write_access(handle, bh);
1776                 }
1777         }
1778
1779         /*
1780          * Any buffers which are on the journal will be in memory. We find
1781          * them on the hash table so journal_revoke() will run journal_forget()
1782          * on them.  We've already detached each block from the file, so
1783          * bforget() in journal_forget() should be safe.
1784          *
1785          * AKPM: turn on bforget in journal_forget()!!!
1786          */
1787         for (p = first; p < last; p++) {
1788                 u32 nr = le32_to_cpu(*p);
1789                 if (nr) {
1790                         struct buffer_head *bh;
1791
1792                         *p = 0;
1793                         bh = sb_find_get_block(inode->i_sb, nr);
1794                         ext3_forget(handle, 0, inode, bh, nr);
1795                 }
1796         }
1797
1798         ext3_free_blocks(handle, inode, block_to_free, count);
1799 }
1800
1801 /**
1802  * ext3_free_data - free a list of data blocks
1803  * @handle:     handle for this transaction
1804  * @inode:      inode we are dealing with
1805  * @this_bh:    indirect buffer_head which contains *@first and *@last
1806  * @first:      array of block numbers
1807  * @last:       points immediately past the end of array
1808  *
1809  * We are freeing all blocks refered from that array (numbers are stored as
1810  * little-endian 32-bit) and updating @inode->i_blocks appropriately.
1811  *
1812  * We accumulate contiguous runs of blocks to free.  Conveniently, if these
1813  * blocks are contiguous then releasing them at one time will only affect one
1814  * or two bitmap blocks (+ group descriptor(s) and superblock) and we won't
1815  * actually use a lot of journal space.
1816  *
1817  * @this_bh will be %NULL if @first and @last point into the inode's direct
1818  * block pointers.
1819  */
1820 static void ext3_free_data(handle_t *handle, struct inode *inode,
1821                            struct buffer_head *this_bh, u32 *first, u32 *last)
1822 {
1823         unsigned long block_to_free = 0;    /* Starting block # of a run */
1824         unsigned long count = 0;            /* Number of blocks in the run */ 
1825         u32 *block_to_free_p = NULL;        /* Pointer into inode/ind
1826                                                corresponding to
1827                                                block_to_free */
1828         unsigned long nr;                   /* Current block # */
1829         u32 *p;                             /* Pointer into inode/ind
1830                                                for current block */
1831         int err;
1832
1833         if (this_bh) {                          /* For indirect block */
1834                 BUFFER_TRACE(this_bh, "get_write_access");
1835                 err = ext3_journal_get_write_access(handle, this_bh);
1836                 /* Important: if we can't update the indirect pointers
1837                  * to the blocks, we can't free them. */
1838                 if (err)
1839                         return;
1840         }
1841
1842         for (p = first; p < last; p++) {
1843                 nr = le32_to_cpu(*p);
1844                 if (nr) {
1845                         /* accumulate blocks to free if they're contiguous */
1846                         if (count == 0) {
1847                                 block_to_free = nr;
1848                                 block_to_free_p = p;
1849                                 count = 1;
1850                         } else if (nr == block_to_free + count) {
1851                                 count++;
1852                         } else {
1853                                 ext3_clear_blocks(handle, inode, this_bh, 
1854                                                   block_to_free,
1855                                                   count, block_to_free_p, p);
1856                                 block_to_free = nr;
1857                                 block_to_free_p = p;
1858                                 count = 1;
1859                         }
1860                 }
1861         }
1862
1863         if (count > 0)
1864                 ext3_clear_blocks(handle, inode, this_bh, block_to_free,
1865                                   count, block_to_free_p, p);
1866
1867         if (this_bh) {
1868                 BUFFER_TRACE(this_bh, "call ext3_journal_dirty_metadata");
1869                 ext3_journal_dirty_metadata(handle, this_bh);
1870         }
1871 }
1872
1873 /**
1874  *      ext3_free_branches - free an array of branches
1875  *      @handle: JBD handle for this transaction
1876  *      @inode: inode we are dealing with
1877  *      @parent_bh: the buffer_head which contains *@first and *@last
1878  *      @first: array of block numbers
1879  *      @last:  pointer immediately past the end of array
1880  *      @depth: depth of the branches to free
1881  *
1882  *      We are freeing all blocks refered from these branches (numbers are
1883  *      stored as little-endian 32-bit) and updating @inode->i_blocks
1884  *      appropriately.
1885  */
1886 static void ext3_free_branches(handle_t *handle, struct inode *inode,
1887                                struct buffer_head *parent_bh,
1888                                u32 *first, u32 *last, int depth)
1889 {
1890         unsigned long nr;
1891         u32 *p;
1892
1893         if (is_handle_aborted(handle))
1894                 return;
1895
1896         if (depth--) {
1897                 struct buffer_head *bh;
1898                 int addr_per_block = EXT3_ADDR_PER_BLOCK(inode->i_sb);
1899                 p = last;
1900                 while (--p >= first) {
1901                         nr = le32_to_cpu(*p);
1902                         if (!nr)
1903                                 continue;               /* A hole */
1904
1905                         /* Go read the buffer for the next level down */
1906                         bh = sb_bread(inode->i_sb, nr);
1907
1908                         /*
1909                          * A read failure? Report error and clear slot
1910                          * (should be rare).
1911                          */
1912                         if (!bh) {
1913                                 ext3_error(inode->i_sb, "ext3_free_branches",
1914                                            "Read failure, inode=%ld, block=%ld",
1915                                            inode->i_ino, nr);
1916                                 continue;
1917                         }
1918
1919                         /* This zaps the entire block.  Bottom up. */
1920                         BUFFER_TRACE(bh, "free child branches");
1921                         ext3_free_branches(handle, inode, bh, (u32*)bh->b_data,
1922                                            (u32*)bh->b_data + addr_per_block,
1923                                            depth);
1924
1925                         /*
1926                          * We've probably journalled the indirect block several
1927                          * times during the truncate.  But it's no longer
1928                          * needed and we now drop it from the transaction via
1929                          * journal_revoke().
1930                          *
1931                          * That's easy if it's exclusively part of this
1932                          * transaction.  But if it's part of the committing
1933                          * transaction then journal_forget() will simply
1934                          * brelse() it.  That means that if the underlying
1935                          * block is reallocated in ext3_get_block(),
1936                          * unmap_underlying_metadata() will find this block
1937                          * and will try to get rid of it.  damn, damn.
1938                          *
1939                          * If this block has already been committed to the
1940                          * journal, a revoke record will be written.  And
1941                          * revoke records must be emitted *before* clearing
1942                          * this block's bit in the bitmaps.
1943                          */
1944                         ext3_forget(handle, 1, inode, bh, bh->b_blocknr);
1945
1946                         /*
1947                          * Everything below this this pointer has been
1948                          * released.  Now let this top-of-subtree go.
1949                          *
1950                          * We want the freeing of this indirect block to be
1951                          * atomic in the journal with the updating of the
1952                          * bitmap block which owns it.  So make some room in
1953                          * the journal.
1954                          *
1955                          * We zero the parent pointer *after* freeing its
1956                          * pointee in the bitmaps, so if extend_transaction()
1957                          * for some reason fails to put the bitmap changes and
1958                          * the release into the same transaction, recovery
1959                          * will merely complain about releasing a free block,
1960                          * rather than leaking blocks.
1961                          */
1962                         if (is_handle_aborted(handle))
1963                                 return;
1964                         if (try_to_extend_transaction(handle, inode)) {
1965                                 ext3_mark_inode_dirty(handle, inode);
1966                                 ext3_journal_test_restart(handle, inode);
1967                         }
1968
1969                         ext3_free_blocks(handle, inode, nr, 1);
1970
1971                         if (parent_bh) {
1972                                 /*
1973                                  * The block which we have just freed is
1974                                  * pointed to by an indirect block: journal it
1975                                  */
1976                                 BUFFER_TRACE(parent_bh, "get_write_access");
1977                                 if (!ext3_journal_get_write_access(handle,
1978                                                                    parent_bh)){
1979                                         *p = 0;
1980                                         BUFFER_TRACE(parent_bh,
1981                                         "call ext3_journal_dirty_metadata");
1982                                         ext3_journal_dirty_metadata(handle, 
1983                                                                     parent_bh);
1984                                 }
1985                         }
1986                 }
1987         } else {
1988                 /* We have reached the bottom of the tree. */
1989                 BUFFER_TRACE(parent_bh, "free data blocks");
1990                 ext3_free_data(handle, inode, parent_bh, first, last);
1991         }
1992 }
1993
1994 /*
1995  * ext3_truncate()
1996  *
1997  * We block out ext3_get_block() block instantiations across the entire
1998  * transaction, and VFS/VM ensures that ext3_truncate() cannot run
1999  * simultaneously on behalf of the same inode.
2000  *
2001  * As we work through the truncate and commmit bits of it to the journal there
2002  * is one core, guiding principle: the file's tree must always be consistent on
2003  * disk.  We must be able to restart the truncate after a crash.
2004  *
2005  * The file's tree may be transiently inconsistent in memory (although it
2006  * probably isn't), but whenever we close off and commit a journal transaction,
2007  * the contents of (the filesystem + the journal) must be consistent and
2008  * restartable.  It's pretty simple, really: bottom up, right to left (although
2009  * left-to-right works OK too).
2010  *
2011  * Note that at recovery time, journal replay occurs *before* the restart of
2012  * truncate against the orphan inode list.
2013  *
2014  * The committed inode has the new, desired i_size (which is the same as
2015  * i_disksize in this case).  After a crash, ext3_orphan_cleanup() will see
2016  * that this inode's truncate did not complete and it will again call
2017  * ext3_truncate() to have another go.  So there will be instantiated blocks
2018  * to the right of the truncation point in a crashed ext3 filesystem.  But
2019  * that's fine - as long as they are linked from the inode, the post-crash
2020  * ext3_truncate() run will find them and release them.
2021  */
2022
2023 void ext3_truncate(struct inode * inode)
2024 {
2025         handle_t *handle;
2026         struct ext3_inode_info *ei = EXT3_I(inode);
2027         u32 *i_data = ei->i_data;
2028         int addr_per_block = EXT3_ADDR_PER_BLOCK(inode->i_sb);
2029         struct address_space *mapping = inode->i_mapping;
2030         int offsets[4];
2031         Indirect chain[4];
2032         Indirect *partial;
2033         int nr = 0;
2034         int n;
2035         long last_block;
2036         unsigned blocksize = inode->i_sb->s_blocksize;
2037         struct page *page;
2038
2039         if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
2040             S_ISLNK(inode->i_mode)))
2041                 return;
2042         if (ext3_inode_is_fast_symlink(inode))
2043                 return;
2044         if (IS_APPEND(inode) || IS_IMMUTABLE(inode))
2045                 return;
2046
2047         ext3_discard_reservation(inode);
2048
2049         /*
2050          * We have to lock the EOF page here, because lock_page() nests
2051          * outside journal_start().
2052          */
2053         if ((inode->i_size & (blocksize - 1)) == 0) {
2054                 /* Block boundary? Nothing to do */
2055                 page = NULL;
2056         } else {
2057                 page = grab_cache_page(mapping,
2058                                 inode->i_size >> PAGE_CACHE_SHIFT);
2059                 if (!page)
2060                         return;
2061         }
2062
2063         handle = start_transaction(inode);
2064         if (IS_ERR(handle)) {
2065                 if (page) {
2066                         clear_highpage(page);
2067                         flush_dcache_page(page);
2068                         unlock_page(page);
2069                         page_cache_release(page);
2070                 }
2071                 return;         /* AKPM: return what? */
2072         }
2073
2074         last_block = (inode->i_size + blocksize-1)
2075                                         >> EXT3_BLOCK_SIZE_BITS(inode->i_sb);
2076
2077         if (page)
2078                 ext3_block_truncate_page(handle, page, mapping, inode->i_size);
2079
2080         n = ext3_block_to_path(inode, last_block, offsets, NULL);
2081         if (n == 0)
2082                 goto out_stop;  /* error */
2083
2084         /*
2085          * OK.  This truncate is going to happen.  We add the inode to the
2086          * orphan list, so that if this truncate spans multiple transactions,
2087          * and we crash, we will resume the truncate when the filesystem
2088          * recovers.  It also marks the inode dirty, to catch the new size.
2089          *
2090          * Implication: the file must always be in a sane, consistent
2091          * truncatable state while each transaction commits.
2092          */
2093         if (ext3_orphan_add(handle, inode))
2094                 goto out_stop;
2095
2096         /*
2097          * The orphan list entry will now protect us from any crash which
2098          * occurs before the truncate completes, so it is now safe to propagate
2099          * the new, shorter inode size (held for now in i_size) into the
2100          * on-disk inode. We do this via i_disksize, which is the value which
2101          * ext3 *really* writes onto the disk inode.
2102          */
2103         ei->i_disksize = inode->i_size;
2104
2105         /*
2106          * From here we block out all ext3_get_block() callers who want to
2107          * modify the block allocation tree.
2108          */
2109         down(&ei->truncate_sem);
2110
2111         if (n == 1) {           /* direct blocks */
2112                 ext3_free_data(handle, inode, NULL, i_data+offsets[0],
2113                                i_data + EXT3_NDIR_BLOCKS);
2114                 goto do_indirects;
2115         }
2116
2117         partial = ext3_find_shared(inode, n, offsets, chain, &nr);
2118         /* Kill the top of shared branch (not detached) */
2119         if (nr) {
2120                 if (partial == chain) {
2121                         /* Shared branch grows from the inode */
2122                         ext3_free_branches(handle, inode, NULL,
2123                                            &nr, &nr+1, (chain+n-1) - partial);
2124                         *partial->p = 0;
2125                         /*
2126                          * We mark the inode dirty prior to restart,
2127                          * and prior to stop.  No need for it here.
2128                          */
2129                 } else {
2130                         /* Shared branch grows from an indirect block */
2131                         BUFFER_TRACE(partial->bh, "get_write_access");
2132                         ext3_free_branches(handle, inode, partial->bh,
2133                                         partial->p,
2134                                         partial->p+1, (chain+n-1) - partial);
2135                 }
2136         }
2137         /* Clear the ends of indirect blocks on the shared branch */
2138         while (partial > chain) {
2139                 ext3_free_branches(handle, inode, partial->bh, partial->p + 1,
2140                                    (u32*)partial->bh->b_data + addr_per_block,
2141                                    (chain+n-1) - partial);
2142                 BUFFER_TRACE(partial->bh, "call brelse");
2143                 brelse (partial->bh);
2144                 partial--;
2145         }
2146 do_indirects:
2147         /* Kill the remaining (whole) subtrees */
2148         switch (offsets[0]) {
2149                 default:
2150                         nr = i_data[EXT3_IND_BLOCK];
2151                         if (nr) {
2152                                 ext3_free_branches(handle, inode, NULL,
2153                                                    &nr, &nr+1, 1);
2154                                 i_data[EXT3_IND_BLOCK] = 0;
2155                         }
2156                 case EXT3_IND_BLOCK:
2157                         nr = i_data[EXT3_DIND_BLOCK];
2158                         if (nr) {
2159                                 ext3_free_branches(handle, inode, NULL,
2160                                                    &nr, &nr+1, 2);
2161                                 i_data[EXT3_DIND_BLOCK] = 0;
2162                         }
2163                 case EXT3_DIND_BLOCK:
2164                         nr = i_data[EXT3_TIND_BLOCK];
2165                         if (nr) {
2166                                 ext3_free_branches(handle, inode, NULL,
2167                                                    &nr, &nr+1, 3);
2168                                 i_data[EXT3_TIND_BLOCK] = 0;
2169                         }
2170                 case EXT3_TIND_BLOCK:
2171                         ;
2172         }
2173         up(&ei->truncate_sem);
2174         inode->i_mtime = inode->i_ctime = CURRENT_TIME;
2175         ext3_mark_inode_dirty(handle, inode);
2176
2177         /* In a multi-transaction truncate, we only make the final
2178          * transaction synchronous */
2179         if (IS_SYNC(inode))
2180                 handle->h_sync = 1;
2181 out_stop:
2182         /*
2183          * If this was a simple ftruncate(), and the file will remain alive
2184          * then we need to clear up the orphan record which we created above.
2185          * However, if this was a real unlink then we were called by
2186          * ext3_delete_inode(), and we allow that function to clean up the
2187          * orphan info for us.
2188          */
2189         if (inode->i_nlink)
2190                 ext3_orphan_del(handle, inode);
2191
2192         ext3_journal_stop(handle);
2193 }
2194
2195 static unsigned long ext3_get_inode_block(struct super_block *sb,
2196                 unsigned long ino, struct ext3_iloc *iloc)
2197 {
2198         unsigned long desc, group_desc, block_group;
2199         unsigned long offset, block;
2200         struct buffer_head *bh;
2201         struct ext3_group_desc * gdp;
2202         
2203
2204         if ((ino != EXT3_ROOT_INO &&
2205                 ino != EXT3_JOURNAL_INO &&
2206                 ino != EXT3_RESIZE_INO &&
2207                 ino < EXT3_FIRST_INO(sb)) ||
2208                 ino > le32_to_cpu(
2209                         EXT3_SB(sb)->s_es->s_inodes_count)) {
2210                 ext3_error (sb, "ext3_get_inode_block",
2211                             "bad inode number: %lu", ino);
2212                 return 0;
2213         }
2214         block_group = (ino - 1) / EXT3_INODES_PER_GROUP(sb);
2215         if (block_group >= EXT3_SB(sb)->s_groups_count) {
2216                 ext3_error (sb, "ext3_get_inode_block",
2217                             "group >= groups count");
2218                 return 0;
2219         }
2220         group_desc = block_group >> EXT3_DESC_PER_BLOCK_BITS(sb);
2221         desc = block_group & (EXT3_DESC_PER_BLOCK(sb) - 1);
2222         bh = EXT3_SB(sb)->s_group_desc[group_desc];
2223         if (!bh) {
2224                 ext3_error (sb, "ext3_get_inode_block",
2225                             "Descriptor not loaded");
2226                 return 0;
2227         }
2228
2229         gdp = (struct ext3_group_desc *) bh->b_data;
2230         /*
2231          * Figure out the offset within the block group inode table
2232          */
2233         offset = ((ino - 1) % EXT3_INODES_PER_GROUP(sb)) *
2234                 EXT3_INODE_SIZE(sb);
2235         block = le32_to_cpu(gdp[desc].bg_inode_table) +
2236                 (offset >> EXT3_BLOCK_SIZE_BITS(sb));
2237
2238         iloc->block_group = block_group;
2239         iloc->offset = offset & (EXT3_BLOCK_SIZE(sb) - 1);
2240         return block;
2241 }
2242
2243 /* 
2244  * ext3_get_inode_loc returns with an extra refcount against the inode's
2245  * underlying buffer_head on success.  If `in_mem' is false then we're purely
2246  * trying to determine the inode's location on-disk and no read need be
2247  * performed.
2248  */
2249 static int ext3_get_inode_loc(struct inode *inode,
2250                                 struct ext3_iloc *iloc, int in_mem)
2251 {
2252         unsigned long block;
2253         struct buffer_head *bh;
2254
2255         block = ext3_get_inode_block(inode->i_sb, inode->i_ino, iloc);
2256         if (!block)
2257                 return -EIO;
2258
2259         bh = sb_getblk(inode->i_sb, block);
2260         if (!bh) {
2261                 ext3_error (inode->i_sb, "ext3_get_inode_loc",
2262                                 "unable to read inode block - "
2263                                 "inode=%lu, block=%lu", inode->i_ino, block);
2264                 return -EIO;
2265         }
2266         if (!buffer_uptodate(bh)) {
2267                 lock_buffer(bh);
2268                 if (buffer_uptodate(bh)) {
2269                         /* someone brought it uptodate while we waited */
2270                         unlock_buffer(bh);
2271                         goto has_buffer;
2272                 }
2273
2274                 /* we can't skip I/O if inode is on a disk only */
2275                 if (in_mem) {
2276                         struct buffer_head *bitmap_bh;
2277                         struct ext3_group_desc *desc;
2278                         int inodes_per_buffer;
2279                         int inode_offset, i;
2280                         int block_group;
2281                         int start;
2282
2283                         /*
2284                          * If this is the only valid inode in the block we
2285                          * need not read the block.
2286                          */
2287                         block_group = (inode->i_ino - 1) /
2288                                         EXT3_INODES_PER_GROUP(inode->i_sb);
2289                         inodes_per_buffer = bh->b_size /
2290                                 EXT3_INODE_SIZE(inode->i_sb);
2291                         inode_offset = ((inode->i_ino - 1) %
2292                                         EXT3_INODES_PER_GROUP(inode->i_sb));
2293                         start = inode_offset & ~(inodes_per_buffer - 1);
2294
2295                         /* Is the inode bitmap in cache? */
2296                         desc = ext3_get_group_desc(inode->i_sb,
2297                                                 block_group, NULL);
2298                         if (!desc)
2299                                 goto make_io;
2300
2301                         bitmap_bh = sb_getblk(inode->i_sb,
2302                                         le32_to_cpu(desc->bg_inode_bitmap));
2303                         if (!bitmap_bh)
2304                                 goto make_io;
2305
2306                         /*
2307                          * If the inode bitmap isn't in cache then the
2308                          * optimisation may end up performing two reads instead
2309                          * of one, so skip it.
2310                          */
2311                         if (!buffer_uptodate(bitmap_bh)) {
2312                                 brelse(bitmap_bh);
2313                                 goto make_io;
2314                         }
2315                         for (i = start; i < start + inodes_per_buffer; i++) {
2316                                 if (i == inode_offset)
2317                                         continue;
2318                                 if (ext3_test_bit(i, bitmap_bh->b_data))
2319                                         break;
2320                         }
2321                         brelse(bitmap_bh);
2322                         if (i == start + inodes_per_buffer) {
2323                                 /* all other inodes are free, so skip I/O */
2324                                 memset(bh->b_data, 0, bh->b_size);
2325                                 set_buffer_uptodate(bh);
2326                                 unlock_buffer(bh);
2327                                 goto has_buffer;
2328                         }
2329                 }
2330
2331 make_io:
2332                 /*
2333                  * There are another valid inodes in the buffer so we must
2334                  * read the block from disk
2335                  */
2336                 get_bh(bh);
2337                 bh->b_end_io = end_buffer_read_sync;
2338                 submit_bh(READ, bh);
2339                 wait_on_buffer(bh);
2340                 if (!buffer_uptodate(bh)) {
2341                         ext3_error(inode->i_sb, "ext3_get_inode_loc",
2342                                         "unable to read inode block - "
2343                                         "inode=%lu, block=%lu",
2344                                         inode->i_ino, block);
2345                         brelse(bh);
2346                         return -EIO;
2347                 }
2348         }
2349 has_buffer:
2350         iloc->bh = bh;
2351         return 0;
2352 }
2353
2354 void ext3_set_inode_flags(struct inode *inode)
2355 {
2356         unsigned int flags = EXT3_I(inode)->i_flags;
2357
2358         inode->i_flags &= ~(S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC);
2359         if (flags & EXT3_SYNC_FL)
2360                 inode->i_flags |= S_SYNC;
2361         if (flags & EXT3_APPEND_FL)
2362                 inode->i_flags |= S_APPEND;
2363         if (flags & EXT3_IMMUTABLE_FL)
2364                 inode->i_flags |= S_IMMUTABLE;
2365         if (flags & EXT3_NOATIME_FL)
2366                 inode->i_flags |= S_NOATIME;
2367         if (flags & EXT3_DIRSYNC_FL)
2368                 inode->i_flags |= S_DIRSYNC;
2369 }
2370
2371 void ext3_read_inode(struct inode * inode)
2372 {
2373         struct ext3_iloc iloc;
2374         struct ext3_inode *raw_inode;
2375         struct ext3_inode_info *ei = EXT3_I(inode);
2376         struct buffer_head *bh;
2377         int block;
2378
2379 #ifdef CONFIG_EXT3_FS_POSIX_ACL
2380         ei->i_acl = EXT3_ACL_NOT_CACHED;
2381         ei->i_default_acl = EXT3_ACL_NOT_CACHED;
2382 #endif
2383         if (ext3_get_inode_loc(inode, &iloc, 0))
2384                 goto bad_inode;
2385         bh = iloc.bh;
2386         raw_inode = ext3_raw_inode(&iloc);
2387         inode->i_mode = le16_to_cpu(raw_inode->i_mode);
2388         inode->i_uid = (uid_t)le16_to_cpu(raw_inode->i_uid_low);
2389         inode->i_gid = (gid_t)le16_to_cpu(raw_inode->i_gid_low);
2390         if(!(test_opt (inode->i_sb, NO_UID32))) {
2391                 inode->i_uid |= le16_to_cpu(raw_inode->i_uid_high) << 16;
2392                 inode->i_gid |= le16_to_cpu(raw_inode->i_gid_high) << 16;
2393         }
2394         inode->i_nlink = le16_to_cpu(raw_inode->i_links_count);
2395         inode->i_size = le32_to_cpu(raw_inode->i_size);
2396         inode->i_atime.tv_sec = le32_to_cpu(raw_inode->i_atime);
2397         inode->i_ctime.tv_sec = le32_to_cpu(raw_inode->i_ctime);
2398         inode->i_mtime.tv_sec = le32_to_cpu(raw_inode->i_mtime);
2399         inode->i_atime.tv_nsec = inode->i_ctime.tv_nsec = inode->i_mtime.tv_nsec = 0;
2400
2401         ei->i_state = 0;
2402         ei->i_next_alloc_block = 0;
2403         ei->i_next_alloc_goal = 0;
2404         ei->i_dir_start_lookup = 0;
2405         ei->i_dtime = le32_to_cpu(raw_inode->i_dtime);
2406         /* We now have enough fields to check if the inode was active or not.
2407          * This is needed because nfsd might try to access dead inodes
2408          * the test is that same one that e2fsck uses
2409          * NeilBrown 1999oct15
2410          */
2411         if (inode->i_nlink == 0) {
2412                 if (inode->i_mode == 0 ||
2413                     !(EXT3_SB(inode->i_sb)->s_mount_state & EXT3_ORPHAN_FS)) {
2414                         /* this inode is deleted */
2415                         brelse (bh);
2416                         goto bad_inode;
2417                 }
2418                 /* The only unlinked inodes we let through here have
2419                  * valid i_mode and are being read by the orphan
2420                  * recovery code: that's fine, we're about to complete
2421                  * the process of deleting those. */
2422         }
2423         inode->i_blksize = PAGE_SIZE;   /* This is the optimal IO size
2424                                          * (for stat), not the fs block
2425                                          * size */  
2426         inode->i_blocks = le32_to_cpu(raw_inode->i_blocks);
2427         ei->i_flags = le32_to_cpu(raw_inode->i_flags);
2428 #ifdef EXT3_FRAGMENTS
2429         ei->i_faddr = le32_to_cpu(raw_inode->i_faddr);
2430         ei->i_frag_no = raw_inode->i_frag;
2431         ei->i_frag_size = raw_inode->i_fsize;
2432 #endif
2433         ei->i_file_acl = le32_to_cpu(raw_inode->i_file_acl);
2434         if (!S_ISREG(inode->i_mode)) {
2435                 ei->i_dir_acl = le32_to_cpu(raw_inode->i_dir_acl);
2436         } else {
2437                 inode->i_size |=
2438                         ((__u64)le32_to_cpu(raw_inode->i_size_high)) << 32;
2439         }
2440         ei->i_disksize = inode->i_size;
2441         inode->i_generation = le32_to_cpu(raw_inode->i_generation);
2442         ei->i_block_group = iloc.block_group;
2443         ei->i_rsv_window.rsv_start = 0;
2444         ei->i_rsv_window.rsv_end= 0;
2445         atomic_set(&ei->i_rsv_window.rsv_goal_size, EXT3_DEFAULT_RESERVE_BLOCKS);
2446         INIT_LIST_HEAD(&ei->i_rsv_window.rsv_list);
2447         /*
2448          * NOTE! The in-memory inode i_data array is in little-endian order
2449          * even on big-endian machines: we do NOT byteswap the block numbers!
2450          */
2451         for (block = 0; block < EXT3_N_BLOCKS; block++)
2452                 ei->i_data[block] = raw_inode->i_block[block];
2453         INIT_LIST_HEAD(&ei->i_orphan);
2454
2455         if (S_ISREG(inode->i_mode)) {
2456                 inode->i_op = &ext3_file_inode_operations;
2457                 inode->i_fop = &ext3_file_operations;
2458                 ext3_set_aops(inode);
2459         } else if (S_ISDIR(inode->i_mode)) {
2460                 inode->i_op = &ext3_dir_inode_operations;
2461                 inode->i_fop = &ext3_dir_operations;
2462         } else if (S_ISLNK(inode->i_mode)) {
2463                 if (ext3_inode_is_fast_symlink(inode))
2464                         inode->i_op = &ext3_fast_symlink_inode_operations;
2465                 else {
2466                         inode->i_op = &ext3_symlink_inode_operations;
2467                         ext3_set_aops(inode);
2468                 }
2469         } else {
2470                 inode->i_op = &ext3_special_inode_operations;
2471                 if (raw_inode->i_block[0])
2472                         init_special_inode(inode, inode->i_mode,
2473                            old_decode_dev(le32_to_cpu(raw_inode->i_block[0])));
2474                 else 
2475                         init_special_inode(inode, inode->i_mode,
2476                            new_decode_dev(le32_to_cpu(raw_inode->i_block[1])));
2477         }
2478         brelse (iloc.bh);
2479         ext3_set_inode_flags(inode);
2480         return;
2481
2482 bad_inode:
2483         make_bad_inode(inode);
2484         return;
2485 }
2486
2487 /*
2488  * Post the struct inode info into an on-disk inode location in the
2489  * buffer-cache.  This gobbles the caller's reference to the
2490  * buffer_head in the inode location struct.
2491  *
2492  * The caller must have write access to iloc->bh.
2493  */
2494 static int ext3_do_update_inode(handle_t *handle, 
2495                                 struct inode *inode, 
2496                                 struct ext3_iloc *iloc)
2497 {
2498         struct ext3_inode *raw_inode = ext3_raw_inode(iloc);
2499         struct ext3_inode_info *ei = EXT3_I(inode);
2500         struct buffer_head *bh = iloc->bh;
2501         int err = 0, rc, block;
2502
2503         /* For fields not not tracking in the in-memory inode,
2504          * initialise them to zero for new inodes. */
2505         if (ei->i_state & EXT3_STATE_NEW)
2506                 memset(raw_inode, 0, EXT3_SB(inode->i_sb)->s_inode_size);
2507
2508         raw_inode->i_mode = cpu_to_le16(inode->i_mode);
2509         if(!(test_opt(inode->i_sb, NO_UID32))) {
2510                 raw_inode->i_uid_low = cpu_to_le16(low_16_bits(inode->i_uid));
2511                 raw_inode->i_gid_low = cpu_to_le16(low_16_bits(inode->i_gid));
2512 /*
2513  * Fix up interoperability with old kernels. Otherwise, old inodes get
2514  * re-used with the upper 16 bits of the uid/gid intact
2515  */
2516                 if(!ei->i_dtime) {
2517                         raw_inode->i_uid_high =
2518                                 cpu_to_le16(high_16_bits(inode->i_uid));
2519                         raw_inode->i_gid_high =
2520                                 cpu_to_le16(high_16_bits(inode->i_gid));
2521                 } else {
2522                         raw_inode->i_uid_high = 0;
2523                         raw_inode->i_gid_high = 0;
2524                 }
2525         } else {
2526                 raw_inode->i_uid_low =
2527                         cpu_to_le16(fs_high2lowuid(inode->i_uid));
2528                 raw_inode->i_gid_low =
2529                         cpu_to_le16(fs_high2lowgid(inode->i_gid));
2530                 raw_inode->i_uid_high = 0;
2531                 raw_inode->i_gid_high = 0;
2532         }
2533         raw_inode->i_links_count = cpu_to_le16(inode->i_nlink);
2534         raw_inode->i_size = cpu_to_le32(ei->i_disksize);
2535         raw_inode->i_atime = cpu_to_le32(inode->i_atime.tv_sec);
2536         raw_inode->i_ctime = cpu_to_le32(inode->i_ctime.tv_sec);
2537         raw_inode->i_mtime = cpu_to_le32(inode->i_mtime.tv_sec);
2538         raw_inode->i_blocks = cpu_to_le32(inode->i_blocks);
2539         raw_inode->i_dtime = cpu_to_le32(ei->i_dtime);
2540         raw_inode->i_flags = cpu_to_le32(ei->i_flags);
2541 #ifdef EXT3_FRAGMENTS
2542         raw_inode->i_faddr = cpu_to_le32(ei->i_faddr);
2543         raw_inode->i_frag = ei->i_frag_no;
2544         raw_inode->i_fsize = ei->i_frag_size;
2545 #endif
2546         raw_inode->i_file_acl = cpu_to_le32(ei->i_file_acl);
2547         if (!S_ISREG(inode->i_mode)) {
2548                 raw_inode->i_dir_acl = cpu_to_le32(ei->i_dir_acl);
2549         } else {
2550                 raw_inode->i_size_high =
2551                         cpu_to_le32(ei->i_disksize >> 32);
2552                 if (ei->i_disksize > 0x7fffffffULL) {
2553                         struct super_block *sb = inode->i_sb;
2554                         if (!EXT3_HAS_RO_COMPAT_FEATURE(sb,
2555                                         EXT3_FEATURE_RO_COMPAT_LARGE_FILE) ||
2556                             EXT3_SB(sb)->s_es->s_rev_level ==
2557                                         cpu_to_le32(EXT3_GOOD_OLD_REV)) {
2558                                /* If this is the first large file
2559                                 * created, add a flag to the superblock.
2560                                 */
2561                                 err = ext3_journal_get_write_access(handle,
2562                                                 EXT3_SB(sb)->s_sbh);
2563                                 if (err)
2564                                         goto out_brelse;
2565                                 ext3_update_dynamic_rev(sb);
2566                                 EXT3_SET_RO_COMPAT_FEATURE(sb,
2567                                         EXT3_FEATURE_RO_COMPAT_LARGE_FILE);
2568                                 sb->s_dirt = 1;
2569                                 handle->h_sync = 1;
2570                                 err = ext3_journal_dirty_metadata(handle,
2571                                                 EXT3_SB(sb)->s_sbh);
2572                         }
2573                 }
2574         }
2575         raw_inode->i_generation = cpu_to_le32(inode->i_generation);
2576         if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) {
2577                 if (old_valid_dev(inode->i_rdev)) {
2578                         raw_inode->i_block[0] =
2579                                 cpu_to_le32(old_encode_dev(inode->i_rdev));
2580                         raw_inode->i_block[1] = 0;
2581                 } else {
2582                         raw_inode->i_block[0] = 0;
2583                         raw_inode->i_block[1] =
2584                                 cpu_to_le32(new_encode_dev(inode->i_rdev));
2585                         raw_inode->i_block[2] = 0;
2586                 }
2587         } else for (block = 0; block < EXT3_N_BLOCKS; block++)
2588                 raw_inode->i_block[block] = ei->i_data[block];
2589
2590         BUFFER_TRACE(bh, "call ext3_journal_dirty_metadata");
2591         rc = ext3_journal_dirty_metadata(handle, bh);
2592         if (!err)
2593                 err = rc;
2594         ei->i_state &= ~EXT3_STATE_NEW;
2595
2596 out_brelse:
2597         brelse (bh);
2598         ext3_std_error(inode->i_sb, err);
2599         return err;
2600 }
2601
2602 /*
2603  * ext3_write_inode()
2604  *
2605  * We are called from a few places:
2606  *
2607  * - Within generic_file_write() for O_SYNC files.
2608  *   Here, there will be no transaction running. We wait for any running
2609  *   trasnaction to commit.
2610  *
2611  * - Within sys_sync(), kupdate and such.
2612  *   We wait on commit, if tol to.
2613  *
2614  * - Within prune_icache() (PF_MEMALLOC == true)
2615  *   Here we simply return.  We can't afford to block kswapd on the
2616  *   journal commit.
2617  *
2618  * In all cases it is actually safe for us to return without doing anything,
2619  * because the inode has been copied into a raw inode buffer in
2620  * ext3_mark_inode_dirty().  This is a correctness thing for O_SYNC and for
2621  * knfsd.
2622  *
2623  * Note that we are absolutely dependent upon all inode dirtiers doing the
2624  * right thing: they *must* call mark_inode_dirty() after dirtying info in
2625  * which we are interested.
2626  *
2627  * It would be a bug for them to not do this.  The code:
2628  *
2629  *      mark_inode_dirty(inode)
2630  *      stuff();
2631  *      inode->i_size = expr;
2632  *
2633  * is in error because a kswapd-driven write_inode() could occur while
2634  * `stuff()' is running, and the new i_size will be lost.  Plus the inode
2635  * will no longer be on the superblock's dirty inode list.
2636  */
2637 void ext3_write_inode(struct inode *inode, int wait)
2638 {
2639         if (current->flags & PF_MEMALLOC)
2640                 return;
2641
2642         if (ext3_journal_current_handle()) {
2643                 jbd_debug(0, "called recursively, non-PF_MEMALLOC!\n");
2644                 dump_stack();
2645                 return;
2646         }
2647
2648         if (!wait)
2649                 return;
2650
2651         ext3_force_commit(inode->i_sb);
2652 }
2653
2654 /*
2655  * ext3_setattr()
2656  *
2657  * Called from notify_change.
2658  *
2659  * We want to trap VFS attempts to truncate the file as soon as
2660  * possible.  In particular, we want to make sure that when the VFS
2661  * shrinks i_size, we put the inode on the orphan list and modify
2662  * i_disksize immediately, so that during the subsequent flushing of
2663  * dirty pages and freeing of disk blocks, we can guarantee that any
2664  * commit will leave the blocks being flushed in an unused state on
2665  * disk.  (On recovery, the inode will get truncated and the blocks will
2666  * be freed, so we have a strong guarantee that no future commit will
2667  * leave these blocks visible to the user.)  
2668  *
2669  * Called with inode->sem down.
2670  */
2671 int ext3_setattr(struct dentry *dentry, struct iattr *attr)
2672 {
2673         struct inode *inode = dentry->d_inode;
2674         int error, rc = 0;
2675         const unsigned int ia_valid = attr->ia_valid;
2676
2677         error = inode_change_ok(inode, attr);
2678         if (error)
2679                 return error;
2680
2681         if ((ia_valid & ATTR_UID && attr->ia_uid != inode->i_uid) ||
2682                 (ia_valid & ATTR_GID && attr->ia_gid != inode->i_gid)) {
2683                 handle_t *handle;
2684
2685                 /* (user+group)*(old+new) structure, inode write (sb,
2686                  * inode block, ? - but truncate inode update has it) */
2687                 handle = ext3_journal_start(inode, 4*EXT3_QUOTA_INIT_BLOCKS+3);
2688                 if (IS_ERR(handle)) {
2689                         error = PTR_ERR(handle);
2690                         goto err_out;
2691                 }
2692                 error = DQUOT_TRANSFER(inode, attr) ? -EDQUOT : 0;
2693                 if (error) {
2694                         ext3_journal_stop(handle);
2695                         return error;
2696                 }
2697                 /* Update corresponding info in inode so that everything is in
2698                  * one transaction */
2699                 if (attr->ia_valid & ATTR_UID)
2700                         inode->i_uid = attr->ia_uid;
2701                 if (attr->ia_valid & ATTR_GID)
2702                         inode->i_gid = attr->ia_gid;
2703                 error = ext3_mark_inode_dirty(handle, inode);
2704                 ext3_journal_stop(handle);
2705         }
2706
2707         if (S_ISREG(inode->i_mode) &&
2708             attr->ia_valid & ATTR_SIZE && attr->ia_size < inode->i_size) {
2709                 handle_t *handle;
2710
2711                 handle = ext3_journal_start(inode, 3);
2712                 if (IS_ERR(handle)) {
2713                         error = PTR_ERR(handle);
2714                         goto err_out;
2715                 }
2716
2717                 error = ext3_orphan_add(handle, inode);
2718                 EXT3_I(inode)->i_disksize = attr->ia_size;
2719                 rc = ext3_mark_inode_dirty(handle, inode);
2720                 if (!error)
2721                         error = rc;
2722                 ext3_journal_stop(handle);
2723         }
2724
2725         rc = inode_setattr(inode, attr);
2726
2727         /* If inode_setattr's call to ext3_truncate failed to get a
2728          * transaction handle at all, we need to clean up the in-core
2729          * orphan list manually. */
2730         if (inode->i_nlink)
2731                 ext3_orphan_del(NULL, inode);
2732
2733         if (!rc && (ia_valid & ATTR_MODE))
2734                 rc = ext3_acl_chmod(inode);
2735
2736 err_out:
2737         ext3_std_error(inode->i_sb, error);
2738         if (!error)
2739                 error = rc;
2740         return error;
2741 }
2742
2743
2744 /*
2745  * akpm: how many blocks doth make a writepage()?
2746  *
2747  * With N blocks per page, it may be:
2748  * N data blocks
2749  * 2 indirect block
2750  * 2 dindirect
2751  * 1 tindirect
2752  * N+5 bitmap blocks (from the above)
2753  * N+5 group descriptor summary blocks
2754  * 1 inode block
2755  * 1 superblock.
2756  * 2 * EXT3_SINGLEDATA_TRANS_BLOCKS for the quote files
2757  *
2758  * 3 * (N + 5) + 2 + 2 * EXT3_SINGLEDATA_TRANS_BLOCKS
2759  *
2760  * With ordered or writeback data it's the same, less the N data blocks.
2761  *
2762  * If the inode's direct blocks can hold an integral number of pages then a
2763  * page cannot straddle two indirect blocks, and we can only touch one indirect
2764  * and dindirect block, and the "5" above becomes "3".
2765  *
2766  * This still overestimates under most circumstances.  If we were to pass the
2767  * start and end offsets in here as well we could do block_to_path() on each
2768  * block and work out the exact number of indirects which are touched.  Pah.
2769  */
2770
2771 int ext3_writepage_trans_blocks(struct inode *inode)
2772 {
2773         int bpp = ext3_journal_blocks_per_page(inode);
2774         int indirects = (EXT3_NDIR_BLOCKS % bpp) ? 5 : 3;
2775         int ret;
2776
2777         if (ext3_should_journal_data(inode))
2778                 ret = 3 * (bpp + indirects) + 2;
2779         else
2780                 ret = 2 * (bpp + indirects) + 2;
2781
2782 #ifdef CONFIG_QUOTA
2783         /* We know that structure was already allocated during DQUOT_INIT so
2784          * we will be updating only the data blocks + inodes */
2785         ret += 2*EXT3_QUOTA_TRANS_BLOCKS;
2786 #endif
2787
2788         return ret;
2789 }
2790
2791 /*
2792  * The caller must have previously called ext3_reserve_inode_write().
2793  * Give this, we know that the caller already has write access to iloc->bh.
2794  */
2795 int ext3_mark_iloc_dirty(handle_t *handle,
2796                 struct inode *inode, struct ext3_iloc *iloc)
2797 {
2798         int err = 0;
2799
2800         /* the do_update_inode consumes one bh->b_count */
2801         get_bh(iloc->bh);
2802
2803         /* ext3_do_update_inode() does journal_dirty_metadata */
2804         err = ext3_do_update_inode(handle, inode, iloc);
2805         put_bh(iloc->bh);
2806         return err;
2807 }
2808
2809 /* 
2810  * On success, We end up with an outstanding reference count against
2811  * iloc->bh.  This _must_ be cleaned up later. 
2812  */
2813
2814 int
2815 ext3_reserve_inode_write(handle_t *handle, struct inode *inode, 
2816                          struct ext3_iloc *iloc)
2817 {
2818         int err = 0;
2819         if (handle) {
2820                 err = ext3_get_inode_loc(inode, iloc, 1);
2821                 if (!err) {
2822                         BUFFER_TRACE(iloc->bh, "get_write_access");
2823                         err = ext3_journal_get_write_access(handle, iloc->bh);
2824                         if (err) {
2825                                 brelse(iloc->bh);
2826                                 iloc->bh = NULL;
2827                         }
2828                 }
2829         }
2830         ext3_std_error(inode->i_sb, err);
2831         return err;
2832 }
2833
2834 /*
2835  * akpm: What we do here is to mark the in-core inode as clean
2836  * with respect to inode dirtiness (it may still be data-dirty).
2837  * This means that the in-core inode may be reaped by prune_icache
2838  * without having to perform any I/O.  This is a very good thing,
2839  * because *any* task may call prune_icache - even ones which
2840  * have a transaction open against a different journal.
2841  *
2842  * Is this cheating?  Not really.  Sure, we haven't written the
2843  * inode out, but prune_icache isn't a user-visible syncing function.
2844  * Whenever the user wants stuff synced (sys_sync, sys_msync, sys_fsync)
2845  * we start and wait on commits.
2846  *
2847  * Is this efficient/effective?  Well, we're being nice to the system
2848  * by cleaning up our inodes proactively so they can be reaped
2849  * without I/O.  But we are potentially leaving up to five seconds'
2850  * worth of inodes floating about which prune_icache wants us to
2851  * write out.  One way to fix that would be to get prune_icache()
2852  * to do a write_super() to free up some memory.  It has the desired
2853  * effect.
2854  */
2855 int ext3_mark_inode_dirty(handle_t *handle, struct inode *inode)
2856 {
2857         struct ext3_iloc iloc;
2858         int err;
2859
2860         err = ext3_reserve_inode_write(handle, inode, &iloc);
2861         if (!err)
2862                 err = ext3_mark_iloc_dirty(handle, inode, &iloc);
2863         return err;
2864 }
2865
2866 /*
2867  * akpm: ext3_dirty_inode() is called from __mark_inode_dirty()
2868  *
2869  * We're really interested in the case where a file is being extended.
2870  * i_size has been changed by generic_commit_write() and we thus need
2871  * to include the updated inode in the current transaction.
2872  *
2873  * Also, DQUOT_ALLOC_SPACE() will always dirty the inode when blocks
2874  * are allocated to the file.
2875  *
2876  * If the inode is marked synchronous, we don't honour that here - doing
2877  * so would cause a commit on atime updates, which we don't bother doing.
2878  * We handle synchronous inodes at the highest possible level.
2879  */
2880 void ext3_dirty_inode(struct inode *inode)
2881 {
2882         handle_t *current_handle = ext3_journal_current_handle();
2883         handle_t *handle;
2884
2885         handle = ext3_journal_start(inode, 2);
2886         if (IS_ERR(handle))
2887                 goto out;
2888         if (current_handle &&
2889                 current_handle->h_transaction != handle->h_transaction) {
2890                 /* This task has a transaction open against a different fs */
2891                 printk(KERN_EMERG "%s: transactions do not match!\n",
2892                        __FUNCTION__);
2893         } else {
2894                 jbd_debug(5, "marking dirty.  outer handle=%p\n",
2895                                 current_handle);
2896                 ext3_mark_inode_dirty(handle, inode);
2897         }
2898         ext3_journal_stop(handle);
2899 out:
2900         return;
2901 }
2902
2903 #ifdef AKPM
2904 /* 
2905  * Bind an inode's backing buffer_head into this transaction, to prevent
2906  * it from being flushed to disk early.  Unlike
2907  * ext3_reserve_inode_write, this leaves behind no bh reference and
2908  * returns no iloc structure, so the caller needs to repeat the iloc
2909  * lookup to mark the inode dirty later.
2910  */
2911 static inline int
2912 ext3_pin_inode(handle_t *handle, struct inode *inode)
2913 {
2914         struct ext3_iloc iloc;
2915
2916         int err = 0;
2917         if (handle) {
2918                 err = ext3_get_inode_loc(inode, &iloc, 1);
2919                 if (!err) {
2920                         BUFFER_TRACE(iloc.bh, "get_write_access");
2921                         err = journal_get_write_access(handle, iloc.bh);
2922                         if (!err)
2923                                 err = ext3_journal_dirty_metadata(handle, 
2924                                                                   iloc.bh);
2925                         brelse(iloc.bh);
2926                 }
2927         }
2928         ext3_std_error(inode->i_sb, err);
2929         return err;
2930 }
2931 #endif
2932
2933 int ext3_change_inode_journal_flag(struct inode *inode, int val)
2934 {
2935         journal_t *journal;
2936         handle_t *handle;
2937         int err;
2938
2939         /*
2940          * We have to be very careful here: changing a data block's
2941          * journaling status dynamically is dangerous.  If we write a
2942          * data block to the journal, change the status and then delete
2943          * that block, we risk forgetting to revoke the old log record
2944          * from the journal and so a subsequent replay can corrupt data.
2945          * So, first we make sure that the journal is empty and that
2946          * nobody is changing anything.
2947          */
2948
2949         journal = EXT3_JOURNAL(inode);
2950         if (is_journal_aborted(journal) || IS_RDONLY(inode))
2951                 return -EROFS;
2952
2953         journal_lock_updates(journal);
2954         journal_flush(journal);
2955
2956         /*
2957          * OK, there are no updates running now, and all cached data is
2958          * synced to disk.  We are now in a completely consistent state
2959          * which doesn't have anything in the journal, and we know that
2960          * no filesystem updates are running, so it is safe to modify
2961          * the inode's in-core data-journaling state flag now.
2962          */
2963
2964         if (val)
2965                 EXT3_I(inode)->i_flags |= EXT3_JOURNAL_DATA_FL;
2966         else
2967                 EXT3_I(inode)->i_flags &= ~EXT3_JOURNAL_DATA_FL;
2968         ext3_set_aops(inode);
2969
2970         journal_unlock_updates(journal);
2971
2972         /* Finally we can mark the inode as dirty. */
2973
2974         handle = ext3_journal_start(inode, 1);
2975         if (IS_ERR(handle))
2976                 return PTR_ERR(handle);
2977
2978         err = ext3_mark_inode_dirty(handle, inode);
2979         handle->h_sync = 1;
2980         ext3_journal_stop(handle);
2981         ext3_std_error(inode->i_sb, err);
2982
2983         return err;
2984 }