vserver 1.9.3
[linux-2.6.git] / fs / ext3 / inode.c
1 /*
2  *  linux/fs/ext3/inode.c
3  *
4  * Copyright (C) 1992, 1993, 1994, 1995
5  * Remy Card (card@masi.ibp.fr)
6  * Laboratoire MASI - Institut Blaise Pascal
7  * Universite Pierre et Marie Curie (Paris VI)
8  *
9  *  from
10  *
11  *  linux/fs/minix/inode.c
12  *
13  *  Copyright (C) 1991, 1992  Linus Torvalds
14  *
15  *  Goal-directed block allocation by Stephen Tweedie
16  *      (sct@redhat.com), 1993, 1998
17  *  Big-endian to little-endian byte-swapping/bitmaps by
18  *        David S. Miller (davem@caip.rutgers.edu), 1995
19  *  64-bit file support on 64-bit platforms by Jakub Jelinek
20  *      (jj@sunsite.ms.mff.cuni.cz)
21  *
22  *  Assorted race fixes, rewrite of ext3_get_block() by Al Viro, 2000
23  */
24
25 #include <linux/module.h>
26 #include <linux/fs.h>
27 #include <linux/time.h>
28 #include <linux/ext3_jbd.h>
29 #include <linux/jbd.h>
30 #include <linux/smp_lock.h>
31 #include <linux/highuid.h>
32 #include <linux/pagemap.h>
33 #include <linux/quotaops.h>
34 #include <linux/string.h>
35 #include <linux/buffer_head.h>
36 #include <linux/writeback.h>
37 #include <linux/mpage.h>
38 #include <linux/uio.h>
39 #include <linux/vserver/xid.h>
40 #include "xattr.h"
41 #include "acl.h"
42
43 /*
44  * Test whether an inode is a fast symlink.
45  */
46 static inline int ext3_inode_is_fast_symlink(struct inode *inode)
47 {
48         int ea_blocks = EXT3_I(inode)->i_file_acl ?
49                 (inode->i_sb->s_blocksize >> 9) : 0;
50
51         return (S_ISLNK(inode->i_mode) &&
52                 inode->i_blocks - ea_blocks == 0);
53 }
54
55 /* The ext3 forget function must perform a revoke if we are freeing data
56  * which has been journaled.  Metadata (eg. indirect blocks) must be
57  * revoked in all cases. 
58  *
59  * "bh" may be NULL: a metadata block may have been freed from memory
60  * but there may still be a record of it in the journal, and that record
61  * still needs to be revoked.
62  */
63
64 int ext3_forget(handle_t *handle, int is_metadata,
65                        struct inode *inode, struct buffer_head *bh,
66                        int blocknr)
67 {
68         int err;
69
70         might_sleep();
71
72         BUFFER_TRACE(bh, "enter");
73
74         jbd_debug(4, "forgetting bh %p: is_metadata = %d, mode %o, "
75                   "data mode %lx\n",
76                   bh, is_metadata, inode->i_mode,
77                   test_opt(inode->i_sb, DATA_FLAGS));
78
79         /* Never use the revoke function if we are doing full data
80          * journaling: there is no need to, and a V1 superblock won't
81          * support it.  Otherwise, only skip the revoke on un-journaled
82          * data blocks. */
83
84         if (test_opt(inode->i_sb, DATA_FLAGS) == EXT3_MOUNT_JOURNAL_DATA ||
85             (!is_metadata && !ext3_should_journal_data(inode))) {
86                 if (bh) {
87                         BUFFER_TRACE(bh, "call journal_forget");
88                         ext3_journal_forget(handle, bh);
89                 }
90                 return 0;
91         }
92
93         /*
94          * data!=journal && (is_metadata || should_journal_data(inode))
95          */
96         BUFFER_TRACE(bh, "call ext3_journal_revoke");
97         err = ext3_journal_revoke(handle, blocknr, bh);
98         if (err)
99                 ext3_abort(inode->i_sb, __FUNCTION__,
100                            "error %d when attempting revoke", err);
101         BUFFER_TRACE(bh, "exit");
102         return err;
103 }
104
105 /*
106  * Work out how many blocks we need to progress with the next chunk of a
107  * truncate transaction.
108  */
109
110 static unsigned long blocks_for_truncate(struct inode *inode) 
111 {
112         unsigned long needed;
113
114         needed = inode->i_blocks >> (inode->i_sb->s_blocksize_bits - 9);
115
116         /* Give ourselves just enough room to cope with inodes in which
117          * i_blocks is corrupt: we've seen disk corruptions in the past
118          * which resulted in random data in an inode which looked enough
119          * like a regular file for ext3 to try to delete it.  Things
120          * will go a bit crazy if that happens, but at least we should
121          * try not to panic the whole kernel. */
122         if (needed < 2)
123                 needed = 2;
124
125         /* But we need to bound the transaction so we don't overflow the
126          * journal. */
127         if (needed > EXT3_MAX_TRANS_DATA) 
128                 needed = EXT3_MAX_TRANS_DATA;
129
130         return EXT3_DATA_TRANS_BLOCKS + needed;
131 }
132
133 /* 
134  * Truncate transactions can be complex and absolutely huge.  So we need to
135  * be able to restart the transaction at a conventient checkpoint to make
136  * sure we don't overflow the journal.
137  *
138  * start_transaction gets us a new handle for a truncate transaction,
139  * and extend_transaction tries to extend the existing one a bit.  If
140  * extend fails, we need to propagate the failure up and restart the
141  * transaction in the top-level truncate loop. --sct 
142  */
143
144 static handle_t *start_transaction(struct inode *inode) 
145 {
146         handle_t *result;
147
148         result = ext3_journal_start(inode, blocks_for_truncate(inode));
149         if (!IS_ERR(result))
150                 return result;
151
152         ext3_std_error(inode->i_sb, PTR_ERR(result));
153         return result;
154 }
155
156 /*
157  * Try to extend this transaction for the purposes of truncation.
158  *
159  * Returns 0 if we managed to create more room.  If we can't create more
160  * room, and the transaction must be restarted we return 1.
161  */
162 static int try_to_extend_transaction(handle_t *handle, struct inode *inode)
163 {
164         if (handle->h_buffer_credits > EXT3_RESERVE_TRANS_BLOCKS)
165                 return 0;
166         if (!ext3_journal_extend(handle, blocks_for_truncate(inode)))
167                 return 0;
168         return 1;
169 }
170
171 /*
172  * Restart the transaction associated with *handle.  This does a commit,
173  * so before we call here everything must be consistently dirtied against
174  * this transaction.
175  */
176 static int ext3_journal_test_restart(handle_t *handle, struct inode *inode)
177 {
178         jbd_debug(2, "restarting handle %p\n", handle);
179         return ext3_journal_restart(handle, blocks_for_truncate(inode));
180 }
181
182 /*
183  * Called at each iput()
184  *
185  * The inode may be "bad" if ext3_read_inode() saw an error from
186  * ext3_get_inode(), so we need to check that to avoid freeing random disk
187  * blocks.
188  */
189 void ext3_put_inode(struct inode *inode)
190 {
191         if (!is_bad_inode(inode))
192                 ext3_discard_prealloc(inode);
193 }
194
195 static void ext3_truncate_nocheck (struct inode *inode);
196
197 /*
198  * Called at the last iput() if i_nlink is zero.
199  */
200 void ext3_delete_inode (struct inode * inode)
201 {
202         handle_t *handle;
203
204         if (is_bad_inode(inode))
205                 goto no_delete;
206
207         handle = start_transaction(inode);
208         if (IS_ERR(handle)) {
209                 /* If we're going to skip the normal cleanup, we still
210                  * need to make sure that the in-core orphan linked list
211                  * is properly cleaned up. */
212                 ext3_orphan_del(NULL, inode);
213                 goto no_delete;
214         }
215
216         if (IS_SYNC(inode))
217                 handle->h_sync = 1;
218         inode->i_size = 0;
219         if (inode->i_blocks)
220                 ext3_truncate_nocheck(inode);
221         /*
222          * Kill off the orphan record which ext3_truncate created.
223          * AKPM: I think this can be inside the above `if'.
224          * Note that ext3_orphan_del() has to be able to cope with the
225          * deletion of a non-existent orphan - this is because we don't
226          * know if ext3_truncate() actually created an orphan record.
227          * (Well, we could do this if we need to, but heck - it works)
228          */
229         ext3_orphan_del(handle, inode);
230         EXT3_I(inode)->i_dtime  = get_seconds();
231
232         /* 
233          * One subtle ordering requirement: if anything has gone wrong
234          * (transaction abort, IO errors, whatever), then we can still
235          * do these next steps (the fs will already have been marked as
236          * having errors), but we can't free the inode if the mark_dirty
237          * fails.  
238          */
239         if (ext3_mark_inode_dirty(handle, inode))
240                 /* If that failed, just do the required in-core inode clear. */
241                 clear_inode(inode);
242         else
243                 ext3_free_inode(handle, inode);
244         ext3_journal_stop(handle);
245         return;
246 no_delete:
247         clear_inode(inode);     /* We must guarantee clearing of inode... */
248 }
249
250 void ext3_discard_prealloc (struct inode * inode)
251 {
252 #ifdef EXT3_PREALLOCATE
253         struct ext3_inode_info *ei = EXT3_I(inode);
254         /* Writer: ->i_prealloc* */
255         if (ei->i_prealloc_count) {
256                 unsigned short total = ei->i_prealloc_count;
257                 unsigned long block = ei->i_prealloc_block;
258                 ei->i_prealloc_count = 0;
259                 ei->i_prealloc_block = 0;
260                 /* Writer: end */
261                 ext3_free_blocks (inode, block, total);
262         }
263 #endif
264 }
265
266 static int ext3_alloc_block (handle_t *handle,
267                         struct inode * inode, unsigned long goal, int *err)
268 {
269         unsigned long result;
270
271 #ifdef EXT3_PREALLOCATE
272 #ifdef EXT3FS_DEBUG
273         static unsigned long alloc_hits, alloc_attempts;
274 #endif
275         struct ext3_inode_info *ei = EXT3_I(inode);
276         /* Writer: ->i_prealloc* */
277         if (ei->i_prealloc_count &&
278             (goal == ei->i_prealloc_block ||
279              goal + 1 == ei->i_prealloc_block))
280         {
281                 result = ei->i_prealloc_block++;
282                 ei->i_prealloc_count--;
283                 /* Writer: end */
284                 ext3_debug ("preallocation hit (%lu/%lu).\n",
285                             ++alloc_hits, ++alloc_attempts);
286         } else {
287                 ext3_discard_prealloc (inode);
288                 ext3_debug ("preallocation miss (%lu/%lu).\n",
289                             alloc_hits, ++alloc_attempts);
290                 if (S_ISREG(inode->i_mode))
291                         result = ext3_new_block (inode, goal, 
292                                  &ei->i_prealloc_count,
293                                  &ei->i_prealloc_block, err);
294                 else
295                         result = ext3_new_block(inode, goal, NULL, NULL, err);
296                 /*
297                  * AKPM: this is somewhat sticky.  I'm not surprised it was
298                  * disabled in 2.2's ext3.  Need to integrate b_committed_data
299                  * guarding with preallocation, if indeed preallocation is
300                  * effective.
301                  */
302         }
303 #else
304         result = ext3_new_block(handle, inode, goal, NULL, NULL, err);
305 #endif
306         return result;
307 }
308
309
310 typedef struct {
311         __le32  *p;
312         __le32  key;
313         struct buffer_head *bh;
314 } Indirect;
315
316 static inline void add_chain(Indirect *p, struct buffer_head *bh, __le32 *v)
317 {
318         p->key = *(p->p = v);
319         p->bh = bh;
320 }
321
322 static inline int verify_chain(Indirect *from, Indirect *to)
323 {
324         while (from <= to && from->key == *from->p)
325                 from++;
326         return (from > to);
327 }
328
329 /**
330  *      ext3_block_to_path - parse the block number into array of offsets
331  *      @inode: inode in question (we are only interested in its superblock)
332  *      @i_block: block number to be parsed
333  *      @offsets: array to store the offsets in
334  *      @boundary: set this non-zero if the referred-to block is likely to be
335  *             followed (on disk) by an indirect block.
336  *
337  *      To store the locations of file's data ext3 uses a data structure common
338  *      for UNIX filesystems - tree of pointers anchored in the inode, with
339  *      data blocks at leaves and indirect blocks in intermediate nodes.
340  *      This function translates the block number into path in that tree -
341  *      return value is the path length and @offsets[n] is the offset of
342  *      pointer to (n+1)th node in the nth one. If @block is out of range
343  *      (negative or too large) warning is printed and zero returned.
344  *
345  *      Note: function doesn't find node addresses, so no IO is needed. All
346  *      we need to know is the capacity of indirect blocks (taken from the
347  *      inode->i_sb).
348  */
349
350 /*
351  * Portability note: the last comparison (check that we fit into triple
352  * indirect block) is spelled differently, because otherwise on an
353  * architecture with 32-bit longs and 8Kb pages we might get into trouble
354  * if our filesystem had 8Kb blocks. We might use long long, but that would
355  * kill us on x86. Oh, well, at least the sign propagation does not matter -
356  * i_block would have to be negative in the very beginning, so we would not
357  * get there at all.
358  */
359
360 static int ext3_block_to_path(struct inode *inode,
361                         long i_block, int offsets[4], int *boundary)
362 {
363         int ptrs = EXT3_ADDR_PER_BLOCK(inode->i_sb);
364         int ptrs_bits = EXT3_ADDR_PER_BLOCK_BITS(inode->i_sb);
365         const long direct_blocks = EXT3_NDIR_BLOCKS,
366                 indirect_blocks = ptrs,
367                 double_blocks = (1 << (ptrs_bits * 2));
368         int n = 0;
369         int final = 0;
370
371         if (i_block < 0) {
372                 ext3_warning (inode->i_sb, "ext3_block_to_path", "block < 0");
373         } else if (i_block < direct_blocks) {
374                 offsets[n++] = i_block;
375                 final = direct_blocks;
376         } else if ( (i_block -= direct_blocks) < indirect_blocks) {
377                 offsets[n++] = EXT3_IND_BLOCK;
378                 offsets[n++] = i_block;
379                 final = ptrs;
380         } else if ((i_block -= indirect_blocks) < double_blocks) {
381                 offsets[n++] = EXT3_DIND_BLOCK;
382                 offsets[n++] = i_block >> ptrs_bits;
383                 offsets[n++] = i_block & (ptrs - 1);
384                 final = ptrs;
385         } else if (((i_block -= double_blocks) >> (ptrs_bits * 2)) < ptrs) {
386                 offsets[n++] = EXT3_TIND_BLOCK;
387                 offsets[n++] = i_block >> (ptrs_bits * 2);
388                 offsets[n++] = (i_block >> ptrs_bits) & (ptrs - 1);
389                 offsets[n++] = i_block & (ptrs - 1);
390                 final = ptrs;
391         } else {
392                 ext3_warning (inode->i_sb, "ext3_block_to_path", "block > big");
393         }
394         if (boundary)
395                 *boundary = (i_block & (ptrs - 1)) == (final - 1);
396         return n;
397 }
398
399 /**
400  *      ext3_get_branch - read the chain of indirect blocks leading to data
401  *      @inode: inode in question
402  *      @depth: depth of the chain (1 - direct pointer, etc.)
403  *      @offsets: offsets of pointers in inode/indirect blocks
404  *      @chain: place to store the result
405  *      @err: here we store the error value
406  *
407  *      Function fills the array of triples <key, p, bh> and returns %NULL
408  *      if everything went OK or the pointer to the last filled triple
409  *      (incomplete one) otherwise. Upon the return chain[i].key contains
410  *      the number of (i+1)-th block in the chain (as it is stored in memory,
411  *      i.e. little-endian 32-bit), chain[i].p contains the address of that
412  *      number (it points into struct inode for i==0 and into the bh->b_data
413  *      for i>0) and chain[i].bh points to the buffer_head of i-th indirect
414  *      block for i>0 and NULL for i==0. In other words, it holds the block
415  *      numbers of the chain, addresses they were taken from (and where we can
416  *      verify that chain did not change) and buffer_heads hosting these
417  *      numbers.
418  *
419  *      Function stops when it stumbles upon zero pointer (absent block)
420  *              (pointer to last triple returned, *@err == 0)
421  *      or when it gets an IO error reading an indirect block
422  *              (ditto, *@err == -EIO)
423  *      or when it notices that chain had been changed while it was reading
424  *              (ditto, *@err == -EAGAIN)
425  *      or when it reads all @depth-1 indirect blocks successfully and finds
426  *      the whole chain, all way to the data (returns %NULL, *err == 0).
427  */
428 static Indirect *ext3_get_branch(struct inode *inode, int depth, int *offsets,
429                                  Indirect chain[4], int *err)
430 {
431         struct super_block *sb = inode->i_sb;
432         Indirect *p = chain;
433         struct buffer_head *bh;
434
435         *err = 0;
436         /* i_data is not going away, no lock needed */
437         add_chain (chain, NULL, EXT3_I(inode)->i_data + *offsets);
438         if (!p->key)
439                 goto no_block;
440         while (--depth) {
441                 bh = sb_bread(sb, le32_to_cpu(p->key));
442                 if (!bh)
443                         goto failure;
444                 /* Reader: pointers */
445                 if (!verify_chain(chain, p))
446                         goto changed;
447                 add_chain(++p, bh, (__le32*)bh->b_data + *++offsets);
448                 /* Reader: end */
449                 if (!p->key)
450                         goto no_block;
451         }
452         return NULL;
453
454 changed:
455         brelse(bh);
456         *err = -EAGAIN;
457         goto no_block;
458 failure:
459         *err = -EIO;
460 no_block:
461         return p;
462 }
463
464 /**
465  *      ext3_find_near - find a place for allocation with sufficient locality
466  *      @inode: owner
467  *      @ind: descriptor of indirect block.
468  *
469  *      This function returns the prefered place for block allocation.
470  *      It is used when heuristic for sequential allocation fails.
471  *      Rules are:
472  *        + if there is a block to the left of our position - allocate near it.
473  *        + if pointer will live in indirect block - allocate near that block.
474  *        + if pointer will live in inode - allocate in the same
475  *          cylinder group. 
476  *
477  * In the latter case we colour the starting block by the callers PID to
478  * prevent it from clashing with concurrent allocations for a different inode
479  * in the same block group.   The PID is used here so that functionally related
480  * files will be close-by on-disk.
481  *
482  *      Caller must make sure that @ind is valid and will stay that way.
483  */
484
485 static unsigned long ext3_find_near(struct inode *inode, Indirect *ind)
486 {
487         struct ext3_inode_info *ei = EXT3_I(inode);
488         __le32 *start = ind->bh ? (__le32*) ind->bh->b_data : ei->i_data;
489         __le32 *p;
490         unsigned long bg_start;
491         unsigned long colour;
492
493         /* Try to find previous block */
494         for (p = ind->p - 1; p >= start; p--)
495                 if (*p)
496                         return le32_to_cpu(*p);
497
498         /* No such thing, so let's try location of indirect block */
499         if (ind->bh)
500                 return ind->bh->b_blocknr;
501
502         /*
503          * It is going to be refered from inode itself? OK, just put it into
504          * the same cylinder group then.
505          */
506         bg_start = (ei->i_block_group * EXT3_BLOCKS_PER_GROUP(inode->i_sb)) +
507                 le32_to_cpu(EXT3_SB(inode->i_sb)->s_es->s_first_data_block);
508         colour = (current->pid % 16) *
509                         (EXT3_BLOCKS_PER_GROUP(inode->i_sb) / 16);
510         return bg_start + colour;
511 }
512
513 /**
514  *      ext3_find_goal - find a prefered place for allocation.
515  *      @inode: owner
516  *      @block:  block we want
517  *      @chain:  chain of indirect blocks
518  *      @partial: pointer to the last triple within a chain
519  *      @goal:  place to store the result.
520  *
521  *      Normally this function find the prefered place for block allocation,
522  *      stores it in *@goal and returns zero. If the branch had been changed
523  *      under us we return -EAGAIN.
524  */
525
526 static int ext3_find_goal(struct inode *inode, long block, Indirect chain[4],
527                           Indirect *partial, unsigned long *goal)
528 {
529         struct ext3_inode_info *ei = EXT3_I(inode);
530         /* Writer: ->i_next_alloc* */
531         if (block == ei->i_next_alloc_block + 1) {
532                 ei->i_next_alloc_block++;
533                 ei->i_next_alloc_goal++;
534         }
535         /* Writer: end */
536         /* Reader: pointers, ->i_next_alloc* */
537         if (verify_chain(chain, partial)) {
538                 /*
539                  * try the heuristic for sequential allocation,
540                  * failing that at least try to get decent locality.
541                  */
542                 if (block == ei->i_next_alloc_block)
543                         *goal = ei->i_next_alloc_goal;
544                 if (!*goal)
545                         *goal = ext3_find_near(inode, partial);
546                 return 0;
547         }
548         /* Reader: end */
549         return -EAGAIN;
550 }
551
552 /**
553  *      ext3_alloc_branch - allocate and set up a chain of blocks.
554  *      @inode: owner
555  *      @num: depth of the chain (number of blocks to allocate)
556  *      @offsets: offsets (in the blocks) to store the pointers to next.
557  *      @branch: place to store the chain in.
558  *
559  *      This function allocates @num blocks, zeroes out all but the last one,
560  *      links them into chain and (if we are synchronous) writes them to disk.
561  *      In other words, it prepares a branch that can be spliced onto the
562  *      inode. It stores the information about that chain in the branch[], in
563  *      the same format as ext3_get_branch() would do. We are calling it after
564  *      we had read the existing part of chain and partial points to the last
565  *      triple of that (one with zero ->key). Upon the exit we have the same
566  *      picture as after the successful ext3_get_block(), excpet that in one
567  *      place chain is disconnected - *branch->p is still zero (we did not
568  *      set the last link), but branch->key contains the number that should
569  *      be placed into *branch->p to fill that gap.
570  *
571  *      If allocation fails we free all blocks we've allocated (and forget
572  *      their buffer_heads) and return the error value the from failed
573  *      ext3_alloc_block() (normally -ENOSPC). Otherwise we set the chain
574  *      as described above and return 0.
575  */
576
577 static int ext3_alloc_branch(handle_t *handle, struct inode *inode,
578                              int num,
579                              unsigned long goal,
580                              int *offsets,
581                              Indirect *branch)
582 {
583         int blocksize = inode->i_sb->s_blocksize;
584         int n = 0, keys = 0;
585         int err = 0;
586         int i;
587         int parent = ext3_alloc_block(handle, inode, goal, &err);
588
589         branch[0].key = cpu_to_le32(parent);
590         if (parent) {
591                 for (n = 1; n < num; n++) {
592                         struct buffer_head *bh;
593                         /* Allocate the next block */
594                         int nr = ext3_alloc_block(handle, inode, parent, &err);
595                         if (!nr)
596                                 break;
597                         branch[n].key = cpu_to_le32(nr);
598                         keys = n+1;
599
600                         /*
601                          * Get buffer_head for parent block, zero it out
602                          * and set the pointer to new one, then send
603                          * parent to disk.  
604                          */
605                         bh = sb_getblk(inode->i_sb, parent);
606                         branch[n].bh = bh;
607                         lock_buffer(bh);
608                         BUFFER_TRACE(bh, "call get_create_access");
609                         err = ext3_journal_get_create_access(handle, bh);
610                         if (err) {
611                                 unlock_buffer(bh);
612                                 brelse(bh);
613                                 break;
614                         }
615
616                         memset(bh->b_data, 0, blocksize);
617                         branch[n].p = (__le32*) bh->b_data + offsets[n];
618                         *branch[n].p = branch[n].key;
619                         BUFFER_TRACE(bh, "marking uptodate");
620                         set_buffer_uptodate(bh);
621                         unlock_buffer(bh);
622
623                         BUFFER_TRACE(bh, "call ext3_journal_dirty_metadata");
624                         err = ext3_journal_dirty_metadata(handle, bh);
625                         if (err)
626                                 break;
627
628                         parent = nr;
629                 }
630         }
631         if (n == num)
632                 return 0;
633
634         /* Allocation failed, free what we already allocated */
635         for (i = 1; i < keys; i++) {
636                 BUFFER_TRACE(branch[i].bh, "call journal_forget");
637                 ext3_journal_forget(handle, branch[i].bh);
638         }
639         for (i = 0; i < keys; i++)
640                 ext3_free_blocks(handle, inode, le32_to_cpu(branch[i].key), 1);
641         return err;
642 }
643
644 /**
645  *      ext3_splice_branch - splice the allocated branch onto inode.
646  *      @inode: owner
647  *      @block: (logical) number of block we are adding
648  *      @chain: chain of indirect blocks (with a missing link - see
649  *              ext3_alloc_branch)
650  *      @where: location of missing link
651  *      @num:   number of blocks we are adding
652  *
653  *      This function verifies that chain (up to the missing link) had not
654  *      changed, fills the missing link and does all housekeeping needed in
655  *      inode (->i_blocks, etc.). In case of success we end up with the full
656  *      chain to new block and return 0. Otherwise (== chain had been changed)
657  *      we free the new blocks (forgetting their buffer_heads, indeed) and
658  *      return -EAGAIN.
659  */
660
661 static int ext3_splice_branch(handle_t *handle, struct inode *inode, long block,
662                               Indirect chain[4], Indirect *where, int num)
663 {
664         int i;
665         int err = 0;
666         struct ext3_inode_info *ei = EXT3_I(inode);
667
668         /*
669          * If we're splicing into a [td]indirect block (as opposed to the
670          * inode) then we need to get write access to the [td]indirect block
671          * before the splice.
672          */
673         if (where->bh) {
674                 BUFFER_TRACE(where->bh, "get_write_access");
675                 err = ext3_journal_get_write_access(handle, where->bh);
676                 if (err)
677                         goto err_out;
678         }
679         /* Verify that place we are splicing to is still there and vacant */
680
681         /* Writer: pointers, ->i_next_alloc* */
682         if (!verify_chain(chain, where-1) || *where->p)
683                 /* Writer: end */
684                 goto changed;
685
686         /* That's it */
687
688         *where->p = where->key;
689         ei->i_next_alloc_block = block;
690         ei->i_next_alloc_goal = le32_to_cpu(where[num-1].key);
691         /* Writer: end */
692
693         /* We are done with atomic stuff, now do the rest of housekeeping */
694
695         inode->i_ctime = CURRENT_TIME;
696         ext3_mark_inode_dirty(handle, inode);
697
698         /* had we spliced it onto indirect block? */
699         if (where->bh) {
700                 /*
701                  * akpm: If we spliced it onto an indirect block, we haven't
702                  * altered the inode.  Note however that if it is being spliced
703                  * onto an indirect block at the very end of the file (the
704                  * file is growing) then we *will* alter the inode to reflect
705                  * the new i_size.  But that is not done here - it is done in
706                  * generic_commit_write->__mark_inode_dirty->ext3_dirty_inode.
707                  */
708                 jbd_debug(5, "splicing indirect only\n");
709                 BUFFER_TRACE(where->bh, "call ext3_journal_dirty_metadata");
710                 err = ext3_journal_dirty_metadata(handle, where->bh);
711                 if (err) 
712                         goto err_out;
713         } else {
714                 /*
715                  * OK, we spliced it into the inode itself on a direct block.
716                  * Inode was dirtied above.
717                  */
718                 jbd_debug(5, "splicing direct\n");
719         }
720         return err;
721
722 changed:
723         /*
724          * AKPM: if where[i].bh isn't part of the current updating
725          * transaction then we explode nastily.  Test this code path.
726          */
727         jbd_debug(1, "the chain changed: try again\n");
728         err = -EAGAIN;
729
730 err_out:
731         for (i = 1; i < num; i++) {
732                 BUFFER_TRACE(where[i].bh, "call journal_forget");
733                 ext3_journal_forget(handle, where[i].bh);
734         }
735         /* For the normal collision cleanup case, we free up the blocks.
736          * On genuine filesystem errors we don't even think about doing
737          * that. */
738         if (err == -EAGAIN)
739                 for (i = 0; i < num; i++)
740                         ext3_free_blocks(handle, inode, 
741                                          le32_to_cpu(where[i].key), 1);
742         return err;
743 }
744
745 /*
746  * Allocation strategy is simple: if we have to allocate something, we will
747  * have to go the whole way to leaf. So let's do it before attaching anything
748  * to tree, set linkage between the newborn blocks, write them if sync is
749  * required, recheck the path, free and repeat if check fails, otherwise
750  * set the last missing link (that will protect us from any truncate-generated
751  * removals - all blocks on the path are immune now) and possibly force the
752  * write on the parent block.
753  * That has a nice additional property: no special recovery from the failed
754  * allocations is needed - we simply release blocks and do not touch anything
755  * reachable from inode.
756  *
757  * akpm: `handle' can be NULL if create == 0.
758  *
759  * The BKL may not be held on entry here.  Be sure to take it early.
760  */
761
762 static int
763 ext3_get_block_handle(handle_t *handle, struct inode *inode, sector_t iblock,
764                 struct buffer_head *bh_result, int create, int extend_disksize)
765 {
766         int err = -EIO;
767         int offsets[4];
768         Indirect chain[4];
769         Indirect *partial;
770         unsigned long goal;
771         int left;
772         int boundary = 0;
773         int depth = ext3_block_to_path(inode, iblock, offsets, &boundary);
774         struct ext3_inode_info *ei = EXT3_I(inode);
775
776         J_ASSERT(handle != NULL || create == 0);
777
778         if (depth == 0)
779                 goto out;
780
781 reread:
782         partial = ext3_get_branch(inode, depth, offsets, chain, &err);
783
784         /* Simplest case - block found, no allocation needed */
785         if (!partial) {
786                 clear_buffer_new(bh_result);
787 got_it:
788                 map_bh(bh_result, inode->i_sb, le32_to_cpu(chain[depth-1].key));
789                 if (boundary)
790                         set_buffer_boundary(bh_result);
791                 /* Clean up and exit */
792                 partial = chain+depth-1; /* the whole chain */
793                 goto cleanup;
794         }
795
796         /* Next simple case - plain lookup or failed read of indirect block */
797         if (!create || err == -EIO) {
798 cleanup:
799                 while (partial > chain) {
800                         BUFFER_TRACE(partial->bh, "call brelse");
801                         brelse(partial->bh);
802                         partial--;
803                 }
804                 BUFFER_TRACE(bh_result, "returned");
805 out:
806                 return err;
807         }
808
809         /*
810          * Indirect block might be removed by truncate while we were
811          * reading it. Handling of that case (forget what we've got and
812          * reread) is taken out of the main path.
813          */
814         if (err == -EAGAIN)
815                 goto changed;
816
817         goal = 0;
818         down(&ei->truncate_sem);
819         if (ext3_find_goal(inode, iblock, chain, partial, &goal) < 0) {
820                 up(&ei->truncate_sem);
821                 goto changed;
822         }
823
824         left = (chain + depth) - partial;
825
826         /*
827          * Block out ext3_truncate while we alter the tree
828          */
829         err = ext3_alloc_branch(handle, inode, left, goal,
830                                         offsets+(partial-chain), partial);
831
832         /* The ext3_splice_branch call will free and forget any buffers
833          * on the new chain if there is a failure, but that risks using
834          * up transaction credits, especially for bitmaps where the
835          * credits cannot be returned.  Can we handle this somehow?  We
836          * may need to return -EAGAIN upwards in the worst case.  --sct */
837         if (!err)
838                 err = ext3_splice_branch(handle, inode, iblock, chain,
839                                          partial, left);
840         /* i_disksize growing is protected by truncate_sem
841          * don't forget to protect it if you're about to implement
842          * concurrent ext3_get_block() -bzzz */
843         if (!err && extend_disksize && inode->i_size > ei->i_disksize)
844                 ei->i_disksize = inode->i_size;
845         up(&ei->truncate_sem);
846         if (err == -EAGAIN)
847                 goto changed;
848         if (err)
849                 goto cleanup;
850
851         set_buffer_new(bh_result);
852         goto got_it;
853
854 changed:
855         while (partial > chain) {
856                 jbd_debug(1, "buffer chain changed, retrying\n");
857                 BUFFER_TRACE(partial->bh, "brelsing");
858                 brelse(partial->bh);
859                 partial--;
860         }
861         goto reread;
862 }
863
864 static int ext3_get_block(struct inode *inode, sector_t iblock,
865                         struct buffer_head *bh_result, int create)
866 {
867         handle_t *handle = NULL;
868         int ret;
869
870         if (create) {
871                 handle = ext3_journal_current_handle();
872                 J_ASSERT(handle != 0);
873         }
874         ret = ext3_get_block_handle(handle, inode, iblock,
875                                 bh_result, create, 1);
876         return ret;
877 }
878
879 #define DIO_CREDITS (EXT3_RESERVE_TRANS_BLOCKS + 32)
880
881 static int
882 ext3_direct_io_get_blocks(struct inode *inode, sector_t iblock,
883                 unsigned long max_blocks, struct buffer_head *bh_result,
884                 int create)
885 {
886         handle_t *handle = journal_current_handle();
887         int ret = 0;
888
889         if (!handle)
890                 goto get_block;         /* A read */
891
892         if (handle->h_transaction->t_state == T_LOCKED) {
893                 /*
894                  * Huge direct-io writes can hold off commits for long
895                  * periods of time.  Let this commit run.
896                  */
897                 ext3_journal_stop(handle);
898                 handle = ext3_journal_start(inode, DIO_CREDITS);
899                 if (IS_ERR(handle))
900                         ret = PTR_ERR(handle);
901                 goto get_block;
902         }
903
904         if (handle->h_buffer_credits <= EXT3_RESERVE_TRANS_BLOCKS) {
905                 /*
906                  * Getting low on buffer credits...
907                  */
908                 ret = ext3_journal_extend(handle, DIO_CREDITS);
909                 if (ret > 0) {
910                         /*
911                          * Couldn't extend the transaction.  Start a new one.
912                          */
913                         ret = ext3_journal_restart(handle, DIO_CREDITS);
914                 }
915         }
916
917 get_block:
918         if (ret == 0)
919                 ret = ext3_get_block_handle(handle, inode, iblock,
920                                         bh_result, create, 0);
921         bh_result->b_size = (1 << inode->i_blkbits);
922         return ret;
923 }
924
925 /*
926  * `handle' can be NULL if create is zero
927  */
928 struct buffer_head *ext3_getblk(handle_t *handle, struct inode * inode,
929                                 long block, int create, int * errp)
930 {
931         struct buffer_head dummy;
932         int fatal = 0, err;
933
934         J_ASSERT(handle != NULL || create == 0);
935
936         dummy.b_state = 0;
937         dummy.b_blocknr = -1000;
938         buffer_trace_init(&dummy.b_history);
939         *errp = ext3_get_block_handle(handle, inode, block, &dummy, create, 1);
940         if (!*errp && buffer_mapped(&dummy)) {
941                 struct buffer_head *bh;
942                 bh = sb_getblk(inode->i_sb, dummy.b_blocknr);
943                 if (buffer_new(&dummy)) {
944                         J_ASSERT(create != 0);
945                         J_ASSERT(handle != 0);
946
947                         /* Now that we do not always journal data, we
948                            should keep in mind whether this should
949                            always journal the new buffer as metadata.
950                            For now, regular file writes use
951                            ext3_get_block instead, so it's not a
952                            problem. */
953                         lock_buffer(bh);
954                         BUFFER_TRACE(bh, "call get_create_access");
955                         fatal = ext3_journal_get_create_access(handle, bh);
956                         if (!fatal && !buffer_uptodate(bh)) {
957                                 memset(bh->b_data, 0, inode->i_sb->s_blocksize);
958                                 set_buffer_uptodate(bh);
959                         }
960                         unlock_buffer(bh);
961                         BUFFER_TRACE(bh, "call ext3_journal_dirty_metadata");
962                         err = ext3_journal_dirty_metadata(handle, bh);
963                         if (!fatal)
964                                 fatal = err;
965                 } else {
966                         BUFFER_TRACE(bh, "not a new buffer");
967                 }
968                 if (fatal) {
969                         *errp = fatal;
970                         brelse(bh);
971                         bh = NULL;
972                 }
973                 return bh;
974         }
975         return NULL;
976 }
977
978 struct buffer_head *ext3_bread(handle_t *handle, struct inode * inode,
979                                int block, int create, int *err)
980 {
981         struct buffer_head * bh;
982         int prev_blocks;
983
984         prev_blocks = inode->i_blocks;
985
986         bh = ext3_getblk (handle, inode, block, create, err);
987         if (!bh)
988                 return bh;
989 #ifdef EXT3_PREALLOCATE
990         /*
991          * If the inode has grown, and this is a directory, then use a few
992          * more of the preallocated blocks to keep directory fragmentation
993          * down.  The preallocated blocks are guaranteed to be contiguous.
994          */
995         if (create &&
996             S_ISDIR(inode->i_mode) &&
997             inode->i_blocks > prev_blocks &&
998             EXT3_HAS_COMPAT_FEATURE(inode->i_sb,
999                                     EXT3_FEATURE_COMPAT_DIR_PREALLOC)) {
1000                 int i;
1001                 struct buffer_head *tmp_bh;
1002
1003                 for (i = 1;
1004                      EXT3_I(inode)->i_prealloc_count &&
1005                      i < EXT3_SB(inode->i_sb)->s_es->s_prealloc_dir_blocks;
1006                      i++) {
1007                         /*
1008                          * ext3_getblk will zero out the contents of the
1009                          * directory for us
1010                          */
1011                         tmp_bh = ext3_getblk(handle, inode,
1012                                                 block+i, create, err);
1013                         if (!tmp_bh) {
1014                                 brelse (bh);
1015                                 return 0;
1016                         }
1017                         brelse (tmp_bh);
1018                 }
1019         }
1020 #endif
1021         if (buffer_uptodate(bh))
1022                 return bh;
1023         ll_rw_block (READ, 1, &bh);
1024         wait_on_buffer (bh);
1025         if (buffer_uptodate(bh))
1026                 return bh;
1027         brelse (bh);
1028         *err = -EIO;
1029         return NULL;
1030 }
1031
1032 static int walk_page_buffers(   handle_t *handle,
1033                                 struct buffer_head *head,
1034                                 unsigned from,
1035                                 unsigned to,
1036                                 int *partial,
1037                                 int (*fn)(      handle_t *handle,
1038                                                 struct buffer_head *bh))
1039 {
1040         struct buffer_head *bh;
1041         unsigned block_start, block_end;
1042         unsigned blocksize = head->b_size;
1043         int err, ret = 0;
1044         struct buffer_head *next;
1045
1046         for (   bh = head, block_start = 0;
1047                 ret == 0 && (bh != head || !block_start);
1048                 block_start = block_end, bh = next)
1049         {
1050                 next = bh->b_this_page;
1051                 block_end = block_start + blocksize;
1052                 if (block_end <= from || block_start >= to) {
1053                         if (partial && !buffer_uptodate(bh))
1054                                 *partial = 1;
1055                         continue;
1056                 }
1057                 err = (*fn)(handle, bh);
1058                 if (!ret)
1059                         ret = err;
1060         }
1061         return ret;
1062 }
1063
1064 /*
1065  * To preserve ordering, it is essential that the hole instantiation and
1066  * the data write be encapsulated in a single transaction.  We cannot
1067  * close off a transaction and start a new one between the ext3_get_block()
1068  * and the commit_write().  So doing the journal_start at the start of
1069  * prepare_write() is the right place.
1070  *
1071  * Also, this function can nest inside ext3_writepage() ->
1072  * block_write_full_page(). In that case, we *know* that ext3_writepage()
1073  * has generated enough buffer credits to do the whole page.  So we won't
1074  * block on the journal in that case, which is good, because the caller may
1075  * be PF_MEMALLOC.
1076  *
1077  * By accident, ext3 can be reentered when a transaction is open via
1078  * quota file writes.  If we were to commit the transaction while thus
1079  * reentered, there can be a deadlock - we would be holding a quota
1080  * lock, and the commit would never complete if another thread had a
1081  * transaction open and was blocking on the quota lock - a ranking
1082  * violation.
1083  *
1084  * So what we do is to rely on the fact that journal_stop/journal_start
1085  * will _not_ run commit under these circumstances because handle->h_ref
1086  * is elevated.  We'll still have enough credits for the tiny quotafile
1087  * write.  
1088  */
1089
1090 static int do_journal_get_write_access(handle_t *handle, 
1091                                        struct buffer_head *bh)
1092 {
1093         if (!buffer_mapped(bh) || buffer_freed(bh))
1094                 return 0;
1095         return ext3_journal_get_write_access(handle, bh);
1096 }
1097
1098 static int ext3_prepare_write(struct file *file, struct page *page,
1099                               unsigned from, unsigned to)
1100 {
1101         struct inode *inode = page->mapping->host;
1102         int ret, needed_blocks = ext3_writepage_trans_blocks(inode);
1103         handle_t *handle;
1104         int retries = 0;
1105
1106 retry:
1107         handle = ext3_journal_start(inode, needed_blocks);
1108         if (IS_ERR(handle)) {
1109                 ret = PTR_ERR(handle);
1110                 goto out;
1111         }
1112         ret = block_prepare_write(page, from, to, ext3_get_block);
1113         if (ret)
1114                 goto prepare_write_failed;
1115
1116         if (ext3_should_journal_data(inode)) {
1117                 ret = walk_page_buffers(handle, page_buffers(page),
1118                                 from, to, NULL, do_journal_get_write_access);
1119         }
1120 prepare_write_failed:
1121         if (ret)
1122                 ext3_journal_stop(handle);
1123         if (ret == -ENOSPC && ext3_should_retry_alloc(inode->i_sb, &retries))
1124                 goto retry;
1125 out:
1126         return ret;
1127 }
1128
1129 static int
1130 ext3_journal_dirty_data(handle_t *handle, struct buffer_head *bh)
1131 {
1132         int err = journal_dirty_data(handle, bh);
1133         if (err)
1134                 ext3_journal_abort_handle(__FUNCTION__, __FUNCTION__,
1135                                                 bh, handle,err);
1136         return err;
1137 }
1138
1139 /* For commit_write() in data=journal mode */
1140 static int commit_write_fn(handle_t *handle, struct buffer_head *bh)
1141 {
1142         if (!buffer_mapped(bh) || buffer_freed(bh))
1143                 return 0;
1144         set_buffer_uptodate(bh);
1145         return ext3_journal_dirty_metadata(handle, bh);
1146 }
1147
1148 /*
1149  * We need to pick up the new inode size which generic_commit_write gave us
1150  * `file' can be NULL - eg, when called from page_symlink().
1151  *
1152  * ext3 never places buffers on inode->i_mapping->private_list.  metadata
1153  * buffers are managed internally.
1154  */
1155
1156 static int ext3_ordered_commit_write(struct file *file, struct page *page,
1157                              unsigned from, unsigned to)
1158 {
1159         handle_t *handle = ext3_journal_current_handle();
1160         struct inode *inode = page->mapping->host;
1161         int ret = 0, ret2;
1162
1163         ret = walk_page_buffers(handle, page_buffers(page),
1164                 from, to, NULL, ext3_journal_dirty_data);
1165
1166         if (ret == 0) {
1167                 /*
1168                  * generic_commit_write() will run mark_inode_dirty() if i_size
1169                  * changes.  So let's piggyback the i_disksize mark_inode_dirty
1170                  * into that.
1171                  */
1172                 loff_t new_i_size;
1173
1174                 new_i_size = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to;
1175                 if (new_i_size > EXT3_I(inode)->i_disksize)
1176                         EXT3_I(inode)->i_disksize = new_i_size;
1177                 ret = generic_commit_write(file, page, from, to);
1178         }
1179         ret2 = ext3_journal_stop(handle);
1180         if (!ret)
1181                 ret = ret2;
1182         return ret;
1183 }
1184
1185 static int ext3_writeback_commit_write(struct file *file, struct page *page,
1186                              unsigned from, unsigned to)
1187 {
1188         handle_t *handle = ext3_journal_current_handle();
1189         struct inode *inode = page->mapping->host;
1190         int ret = 0, ret2;
1191         loff_t new_i_size;
1192
1193         new_i_size = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to;
1194         if (new_i_size > EXT3_I(inode)->i_disksize)
1195                 EXT3_I(inode)->i_disksize = new_i_size;
1196         ret = generic_commit_write(file, page, from, to);
1197         ret2 = ext3_journal_stop(handle);
1198         if (!ret)
1199                 ret = ret2;
1200         return ret;
1201 }
1202
1203 static int ext3_journalled_commit_write(struct file *file,
1204                         struct page *page, unsigned from, unsigned to)
1205 {
1206         handle_t *handle = ext3_journal_current_handle();
1207         struct inode *inode = page->mapping->host;
1208         int ret = 0, ret2;
1209         int partial = 0;
1210         loff_t pos;
1211
1212         /*
1213          * Here we duplicate the generic_commit_write() functionality
1214          */
1215         pos = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to;
1216
1217         ret = walk_page_buffers(handle, page_buffers(page), from,
1218                                 to, &partial, commit_write_fn);
1219         if (!partial)
1220                 SetPageUptodate(page);
1221         if (pos > inode->i_size)
1222                 i_size_write(inode, pos);
1223         EXT3_I(inode)->i_state |= EXT3_STATE_JDATA;
1224         if (inode->i_size > EXT3_I(inode)->i_disksize) {
1225                 EXT3_I(inode)->i_disksize = inode->i_size;
1226                 ret2 = ext3_mark_inode_dirty(handle, inode);
1227                 if (!ret) 
1228                         ret = ret2;
1229         }
1230         ret2 = ext3_journal_stop(handle);
1231         if (!ret)
1232                 ret = ret2;
1233         return ret;
1234 }
1235
1236 /* 
1237  * bmap() is special.  It gets used by applications such as lilo and by
1238  * the swapper to find the on-disk block of a specific piece of data.
1239  *
1240  * Naturally, this is dangerous if the block concerned is still in the
1241  * journal.  If somebody makes a swapfile on an ext3 data-journaling
1242  * filesystem and enables swap, then they may get a nasty shock when the
1243  * data getting swapped to that swapfile suddenly gets overwritten by
1244  * the original zero's written out previously to the journal and
1245  * awaiting writeback in the kernel's buffer cache. 
1246  *
1247  * So, if we see any bmap calls here on a modified, data-journaled file,
1248  * take extra steps to flush any blocks which might be in the cache. 
1249  */
1250 static sector_t ext3_bmap(struct address_space *mapping, sector_t block)
1251 {
1252         struct inode *inode = mapping->host;
1253         journal_t *journal;
1254         int err;
1255
1256         if (EXT3_I(inode)->i_state & EXT3_STATE_JDATA) {
1257                 /* 
1258                  * This is a REALLY heavyweight approach, but the use of
1259                  * bmap on dirty files is expected to be extremely rare:
1260                  * only if we run lilo or swapon on a freshly made file
1261                  * do we expect this to happen. 
1262                  *
1263                  * (bmap requires CAP_SYS_RAWIO so this does not
1264                  * represent an unprivileged user DOS attack --- we'd be
1265                  * in trouble if mortal users could trigger this path at
1266                  * will.) 
1267                  *
1268                  * NB. EXT3_STATE_JDATA is not set on files other than
1269                  * regular files.  If somebody wants to bmap a directory
1270                  * or symlink and gets confused because the buffer
1271                  * hasn't yet been flushed to disk, they deserve
1272                  * everything they get.
1273                  */
1274
1275                 EXT3_I(inode)->i_state &= ~EXT3_STATE_JDATA;
1276                 journal = EXT3_JOURNAL(inode);
1277                 journal_lock_updates(journal);
1278                 err = journal_flush(journal);
1279                 journal_unlock_updates(journal);
1280
1281                 if (err)
1282                         return 0;
1283         }
1284
1285         return generic_block_bmap(mapping,block,ext3_get_block);
1286 }
1287
1288 static int bget_one(handle_t *handle, struct buffer_head *bh)
1289 {
1290         get_bh(bh);
1291         return 0;
1292 }
1293
1294 static int bput_one(handle_t *handle, struct buffer_head *bh)
1295 {
1296         put_bh(bh);
1297         return 0;
1298 }
1299
1300 static int journal_dirty_data_fn(handle_t *handle, struct buffer_head *bh)
1301 {
1302         if (buffer_mapped(bh))
1303                 return ext3_journal_dirty_data(handle, bh);
1304         return 0;
1305 }
1306
1307 /*
1308  * Note that we always start a transaction even if we're not journalling
1309  * data.  This is to preserve ordering: any hole instantiation within
1310  * __block_write_full_page -> ext3_get_block() should be journalled
1311  * along with the data so we don't crash and then get metadata which
1312  * refers to old data.
1313  *
1314  * In all journalling modes block_write_full_page() will start the I/O.
1315  *
1316  * Problem:
1317  *
1318  *      ext3_writepage() -> kmalloc() -> __alloc_pages() -> page_launder() ->
1319  *              ext3_writepage()
1320  *
1321  * Similar for:
1322  *
1323  *      ext3_file_write() -> generic_file_write() -> __alloc_pages() -> ...
1324  *
1325  * Same applies to ext3_get_block().  We will deadlock on various things like
1326  * lock_journal and i_truncate_sem.
1327  *
1328  * Setting PF_MEMALLOC here doesn't work - too many internal memory
1329  * allocations fail.
1330  *
1331  * 16May01: If we're reentered then journal_current_handle() will be
1332  *          non-zero. We simply *return*.
1333  *
1334  * 1 July 2001: @@@ FIXME:
1335  *   In journalled data mode, a data buffer may be metadata against the
1336  *   current transaction.  But the same file is part of a shared mapping
1337  *   and someone does a writepage() on it.
1338  *
1339  *   We will move the buffer onto the async_data list, but *after* it has
1340  *   been dirtied. So there's a small window where we have dirty data on
1341  *   BJ_Metadata.
1342  *
1343  *   Note that this only applies to the last partial page in the file.  The
1344  *   bit which block_write_full_page() uses prepare/commit for.  (That's
1345  *   broken code anyway: it's wrong for msync()).
1346  *
1347  *   It's a rare case: affects the final partial page, for journalled data
1348  *   where the file is subject to bith write() and writepage() in the same
1349  *   transction.  To fix it we'll need a custom block_write_full_page().
1350  *   We'll probably need that anyway for journalling writepage() output.
1351  *
1352  * We don't honour synchronous mounts for writepage().  That would be
1353  * disastrous.  Any write() or metadata operation will sync the fs for
1354  * us.
1355  *
1356  * AKPM2: if all the page's buffers are mapped to disk and !data=journal,
1357  * we don't need to open a transaction here.
1358  */
1359 static int ext3_ordered_writepage(struct page *page,
1360                         struct writeback_control *wbc)
1361 {
1362         struct inode *inode = page->mapping->host;
1363         struct buffer_head *page_bufs;
1364         handle_t *handle = NULL;
1365         int ret = 0;
1366         int err;
1367
1368         J_ASSERT(PageLocked(page));
1369
1370         /*
1371          * We give up here if we're reentered, because it might be for a
1372          * different filesystem.
1373          */
1374         if (ext3_journal_current_handle())
1375                 goto out_fail;
1376
1377         handle = ext3_journal_start(inode, ext3_writepage_trans_blocks(inode));
1378
1379         if (IS_ERR(handle)) {
1380                 ret = PTR_ERR(handle);
1381                 goto out_fail;
1382         }
1383
1384         if (!page_has_buffers(page)) {
1385                 create_empty_buffers(page, inode->i_sb->s_blocksize,
1386                                 (1 << BH_Dirty)|(1 << BH_Uptodate));
1387         }
1388         page_bufs = page_buffers(page);
1389         walk_page_buffers(handle, page_bufs, 0,
1390                         PAGE_CACHE_SIZE, NULL, bget_one);
1391
1392         ret = block_write_full_page(page, ext3_get_block, wbc);
1393
1394         /*
1395          * The page can become unlocked at any point now, and
1396          * truncate can then come in and change things.  So we
1397          * can't touch *page from now on.  But *page_bufs is
1398          * safe due to elevated refcount.
1399          */
1400
1401         /*
1402          * And attach them to the current transaction.  But only if 
1403          * block_write_full_page() succeeded.  Otherwise they are unmapped,
1404          * and generally junk.
1405          */
1406         if (ret == 0) {
1407                 err = walk_page_buffers(handle, page_bufs, 0, PAGE_CACHE_SIZE,
1408                                         NULL, journal_dirty_data_fn);
1409                 if (!ret)
1410                         ret = err;
1411         }
1412         walk_page_buffers(handle, page_bufs, 0,
1413                         PAGE_CACHE_SIZE, NULL, bput_one);
1414         err = ext3_journal_stop(handle);
1415         if (!ret)
1416                 ret = err;
1417         return ret;
1418
1419 out_fail:
1420         redirty_page_for_writepage(wbc, page);
1421         unlock_page(page);
1422         return ret;
1423 }
1424
1425 static int ext3_writeback_writepage(struct page *page,
1426                                 struct writeback_control *wbc)
1427 {
1428         struct inode *inode = page->mapping->host;
1429         handle_t *handle = NULL;
1430         int ret = 0;
1431         int err;
1432
1433         if (ext3_journal_current_handle())
1434                 goto out_fail;
1435
1436         handle = ext3_journal_start(inode, ext3_writepage_trans_blocks(inode));
1437         if (IS_ERR(handle)) {
1438                 ret = PTR_ERR(handle);
1439                 goto out_fail;
1440         }
1441
1442         ret = block_write_full_page(page, ext3_get_block, wbc);
1443         err = ext3_journal_stop(handle);
1444         if (!ret)
1445                 ret = err;
1446         return ret;
1447
1448 out_fail:
1449         redirty_page_for_writepage(wbc, page);
1450         unlock_page(page);
1451         return ret;
1452 }
1453
1454 static int ext3_journalled_writepage(struct page *page,
1455                                 struct writeback_control *wbc)
1456 {
1457         struct inode *inode = page->mapping->host;
1458         handle_t *handle = NULL;
1459         int ret = 0;
1460         int err;
1461
1462         if (ext3_journal_current_handle())
1463                 goto no_write;
1464
1465         handle = ext3_journal_start(inode, ext3_writepage_trans_blocks(inode));
1466         if (IS_ERR(handle)) {
1467                 ret = PTR_ERR(handle);
1468                 goto no_write;
1469         }
1470
1471         if (!page_has_buffers(page) || PageChecked(page)) {
1472                 /*
1473                  * It's mmapped pagecache.  Add buffers and journal it.  There
1474                  * doesn't seem much point in redirtying the page here.
1475                  */
1476                 ClearPageChecked(page);
1477                 ret = block_prepare_write(page, 0, PAGE_CACHE_SIZE,
1478                                         ext3_get_block);
1479                 if (ret != 0)
1480                         goto out_unlock;
1481                 ret = walk_page_buffers(handle, page_buffers(page), 0,
1482                         PAGE_CACHE_SIZE, NULL, do_journal_get_write_access);
1483
1484                 err = walk_page_buffers(handle, page_buffers(page), 0,
1485                                 PAGE_CACHE_SIZE, NULL, commit_write_fn);
1486                 if (ret == 0)
1487                         ret = err;
1488                 EXT3_I(inode)->i_state |= EXT3_STATE_JDATA;
1489                 unlock_page(page);
1490         } else {
1491                 /*
1492                  * It may be a page full of checkpoint-mode buffers.  We don't
1493                  * really know unless we go poke around in the buffer_heads.
1494                  * But block_write_full_page will do the right thing.
1495                  */
1496                 ret = block_write_full_page(page, ext3_get_block, wbc);
1497         }
1498         err = ext3_journal_stop(handle);
1499         if (!ret)
1500                 ret = err;
1501 out:
1502         return ret;
1503
1504 no_write:
1505         redirty_page_for_writepage(wbc, page);
1506 out_unlock:
1507         unlock_page(page);
1508         goto out;
1509 }
1510
1511 static int ext3_readpage(struct file *file, struct page *page)
1512 {
1513         return mpage_readpage(page, ext3_get_block);
1514 }
1515
1516 static int
1517 ext3_readpages(struct file *file, struct address_space *mapping,
1518                 struct list_head *pages, unsigned nr_pages)
1519 {
1520         return mpage_readpages(mapping, pages, nr_pages, ext3_get_block);
1521 }
1522
1523 static int ext3_invalidatepage(struct page *page, unsigned long offset)
1524 {
1525         journal_t *journal = EXT3_JOURNAL(page->mapping->host);
1526
1527         /*
1528          * If it's a full truncate we just forget about the pending dirtying
1529          */
1530         if (offset == 0)
1531                 ClearPageChecked(page);
1532
1533         return journal_invalidatepage(journal, page, offset);
1534 }
1535
1536 static int ext3_releasepage(struct page *page, int wait)
1537 {
1538         journal_t *journal = EXT3_JOURNAL(page->mapping->host);
1539
1540         WARN_ON(PageChecked(page));
1541         return journal_try_to_free_buffers(journal, page, wait);
1542 }
1543
1544 /*
1545  * If the O_DIRECT write will extend the file then add this inode to the
1546  * orphan list.  So recovery will truncate it back to the original size
1547  * if the machine crashes during the write.
1548  *
1549  * If the O_DIRECT write is intantiating holes inside i_size and the machine
1550  * crashes then stale disk data _may_ be exposed inside the file.
1551  */
1552 static ssize_t ext3_direct_IO(int rw, struct kiocb *iocb,
1553                         const struct iovec *iov, loff_t offset,
1554                         unsigned long nr_segs)
1555 {
1556         struct file *file = iocb->ki_filp;
1557         struct inode *inode = file->f_mapping->host;
1558         struct ext3_inode_info *ei = EXT3_I(inode);
1559         handle_t *handle = NULL;
1560         ssize_t ret;
1561         int orphan = 0;
1562         size_t count = iov_length(iov, nr_segs);
1563
1564         if (rw == WRITE) {
1565                 loff_t final_size = offset + count;
1566
1567                 handle = ext3_journal_start(inode, DIO_CREDITS);
1568                 if (IS_ERR(handle)) {
1569                         ret = PTR_ERR(handle);
1570                         goto out;
1571                 }
1572                 if (final_size > inode->i_size) {
1573                         ret = ext3_orphan_add(handle, inode);
1574                         if (ret)
1575                                 goto out_stop;
1576                         orphan = 1;
1577                         ei->i_disksize = inode->i_size;
1578                 }
1579         }
1580
1581         ret = blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev, iov, 
1582                                  offset, nr_segs,
1583                                  ext3_direct_io_get_blocks, NULL);
1584
1585         /*
1586          * Reacquire the handle: ext3_direct_io_get_block() can restart the
1587          * transaction
1588          */
1589         handle = journal_current_handle();
1590
1591 out_stop:
1592         if (handle) {
1593                 int err;
1594
1595                 if (orphan) 
1596                         ext3_orphan_del(handle, inode);
1597                 if (orphan && ret > 0) {
1598                         loff_t end = offset + ret;
1599                         if (end > inode->i_size) {
1600                                 ei->i_disksize = end;
1601                                 i_size_write(inode, end);
1602                                 err = ext3_mark_inode_dirty(handle, inode);
1603                                 if (!ret) 
1604                                         ret = err;
1605                         }
1606                 }
1607                 err = ext3_journal_stop(handle);
1608                 if (ret == 0)
1609                         ret = err;
1610         }
1611 out:
1612         return ret;
1613 }
1614
1615 /*
1616  * Pages can be marked dirty completely asynchronously from ext3's journalling
1617  * activity.  By filemap_sync_pte(), try_to_unmap_one(), etc.  We cannot do
1618  * much here because ->set_page_dirty is called under VFS locks.  The page is
1619  * not necessarily locked.
1620  *
1621  * We cannot just dirty the page and leave attached buffers clean, because the
1622  * buffers' dirty state is "definitive".  We cannot just set the buffers dirty
1623  * or jbddirty because all the journalling code will explode.
1624  *
1625  * So what we do is to mark the page "pending dirty" and next time writepage
1626  * is called, propagate that into the buffers appropriately.
1627  */
1628 static int ext3_journalled_set_page_dirty(struct page *page)
1629 {
1630         SetPageChecked(page);
1631         return __set_page_dirty_nobuffers(page);
1632 }
1633
1634 static struct address_space_operations ext3_ordered_aops = {
1635         .readpage       = ext3_readpage,
1636         .readpages      = ext3_readpages,
1637         .writepage      = ext3_ordered_writepage,
1638         .sync_page      = block_sync_page,
1639         .prepare_write  = ext3_prepare_write,
1640         .commit_write   = ext3_ordered_commit_write,
1641         .bmap           = ext3_bmap,
1642         .invalidatepage = ext3_invalidatepage,
1643         .releasepage    = ext3_releasepage,
1644         .direct_IO      = ext3_direct_IO,
1645 };
1646
1647 static struct address_space_operations ext3_writeback_aops = {
1648         .readpage       = ext3_readpage,
1649         .readpages      = ext3_readpages,
1650         .writepage      = ext3_writeback_writepage,
1651         .sync_page      = block_sync_page,
1652         .prepare_write  = ext3_prepare_write,
1653         .commit_write   = ext3_writeback_commit_write,
1654         .bmap           = ext3_bmap,
1655         .invalidatepage = ext3_invalidatepage,
1656         .releasepage    = ext3_releasepage,
1657         .direct_IO      = ext3_direct_IO,
1658 };
1659
1660 static struct address_space_operations ext3_journalled_aops = {
1661         .readpage       = ext3_readpage,
1662         .readpages      = ext3_readpages,
1663         .writepage      = ext3_journalled_writepage,
1664         .sync_page      = block_sync_page,
1665         .prepare_write  = ext3_prepare_write,
1666         .commit_write   = ext3_journalled_commit_write,
1667         .set_page_dirty = ext3_journalled_set_page_dirty,
1668         .bmap           = ext3_bmap,
1669         .invalidatepage = ext3_invalidatepage,
1670         .releasepage    = ext3_releasepage,
1671 };
1672
1673 void ext3_set_aops(struct inode *inode)
1674 {
1675         if (ext3_should_order_data(inode))
1676                 inode->i_mapping->a_ops = &ext3_ordered_aops;
1677         else if (ext3_should_writeback_data(inode))
1678                 inode->i_mapping->a_ops = &ext3_writeback_aops;
1679         else
1680                 inode->i_mapping->a_ops = &ext3_journalled_aops;
1681 }
1682
1683 /*
1684  * ext3_block_truncate_page() zeroes out a mapping from file offset `from'
1685  * up to the end of the block which corresponds to `from'.
1686  * This required during truncate. We need to physically zero the tail end
1687  * of that block so it doesn't yield old data if the file is later grown.
1688  */
1689 static int ext3_block_truncate_page(handle_t *handle, struct page *page,
1690                 struct address_space *mapping, loff_t from)
1691 {
1692         unsigned long index = from >> PAGE_CACHE_SHIFT;
1693         unsigned offset = from & (PAGE_CACHE_SIZE-1);
1694         unsigned blocksize, iblock, length, pos;
1695         struct inode *inode = mapping->host;
1696         struct buffer_head *bh;
1697         int err;
1698         void *kaddr;
1699
1700         blocksize = inode->i_sb->s_blocksize;
1701         length = blocksize - (offset & (blocksize - 1));
1702         iblock = index << (PAGE_CACHE_SHIFT - inode->i_sb->s_blocksize_bits);
1703
1704         if (!page_has_buffers(page))
1705                 create_empty_buffers(page, blocksize, 0);
1706
1707         /* Find the buffer that contains "offset" */
1708         bh = page_buffers(page);
1709         pos = blocksize;
1710         while (offset >= pos) {
1711                 bh = bh->b_this_page;
1712                 iblock++;
1713                 pos += blocksize;
1714         }
1715
1716         err = 0;
1717         if (buffer_freed(bh)) {
1718                 BUFFER_TRACE(bh, "freed: skip");
1719                 goto unlock;
1720         }
1721
1722         if (!buffer_mapped(bh)) {
1723                 BUFFER_TRACE(bh, "unmapped");
1724                 ext3_get_block(inode, iblock, bh, 0);
1725                 /* unmapped? It's a hole - nothing to do */
1726                 if (!buffer_mapped(bh)) {
1727                         BUFFER_TRACE(bh, "still unmapped");
1728                         goto unlock;
1729                 }
1730         }
1731
1732         /* Ok, it's mapped. Make sure it's up-to-date */
1733         if (PageUptodate(page))
1734                 set_buffer_uptodate(bh);
1735
1736         if (!buffer_uptodate(bh)) {
1737                 err = -EIO;
1738                 ll_rw_block(READ, 1, &bh);
1739                 wait_on_buffer(bh);
1740                 /* Uhhuh. Read error. Complain and punt. */
1741                 if (!buffer_uptodate(bh))
1742                         goto unlock;
1743         }
1744
1745         if (ext3_should_journal_data(inode)) {
1746                 BUFFER_TRACE(bh, "get write access");
1747                 err = ext3_journal_get_write_access(handle, bh);
1748                 if (err)
1749                         goto unlock;
1750         }
1751
1752         kaddr = kmap_atomic(page, KM_USER0);
1753         memset(kaddr + offset, 0, length);
1754         flush_dcache_page(page);
1755         kunmap_atomic(kaddr, KM_USER0);
1756
1757         BUFFER_TRACE(bh, "zeroed end of block");
1758
1759         err = 0;
1760         if (ext3_should_journal_data(inode)) {
1761                 err = ext3_journal_dirty_metadata(handle, bh);
1762         } else {
1763                 if (ext3_should_order_data(inode))
1764                         err = ext3_journal_dirty_data(handle, bh);
1765                 mark_buffer_dirty(bh);
1766         }
1767
1768 unlock:
1769         unlock_page(page);
1770         page_cache_release(page);
1771         return err;
1772 }
1773
1774 /*
1775  * Probably it should be a library function... search for first non-zero word
1776  * or memcmp with zero_page, whatever is better for particular architecture.
1777  * Linus?
1778  */
1779 static inline int all_zeroes(__le32 *p, __le32 *q)
1780 {
1781         while (p < q)
1782                 if (*p++)
1783                         return 0;
1784         return 1;
1785 }
1786
1787 /**
1788  *      ext3_find_shared - find the indirect blocks for partial truncation.
1789  *      @inode:   inode in question
1790  *      @depth:   depth of the affected branch
1791  *      @offsets: offsets of pointers in that branch (see ext3_block_to_path)
1792  *      @chain:   place to store the pointers to partial indirect blocks
1793  *      @top:     place to the (detached) top of branch
1794  *
1795  *      This is a helper function used by ext3_truncate().
1796  *
1797  *      When we do truncate() we may have to clean the ends of several
1798  *      indirect blocks but leave the blocks themselves alive. Block is
1799  *      partially truncated if some data below the new i_size is refered
1800  *      from it (and it is on the path to the first completely truncated
1801  *      data block, indeed).  We have to free the top of that path along
1802  *      with everything to the right of the path. Since no allocation
1803  *      past the truncation point is possible until ext3_truncate()
1804  *      finishes, we may safely do the latter, but top of branch may
1805  *      require special attention - pageout below the truncation point
1806  *      might try to populate it.
1807  *
1808  *      We atomically detach the top of branch from the tree, store the
1809  *      block number of its root in *@top, pointers to buffer_heads of
1810  *      partially truncated blocks - in @chain[].bh and pointers to
1811  *      their last elements that should not be removed - in
1812  *      @chain[].p. Return value is the pointer to last filled element
1813  *      of @chain.
1814  *
1815  *      The work left to caller to do the actual freeing of subtrees:
1816  *              a) free the subtree starting from *@top
1817  *              b) free the subtrees whose roots are stored in
1818  *                      (@chain[i].p+1 .. end of @chain[i].bh->b_data)
1819  *              c) free the subtrees growing from the inode past the @chain[0].
1820  *                      (no partially truncated stuff there).  */
1821
1822 static Indirect *ext3_find_shared(struct inode *inode,
1823                                 int depth,
1824                                 int offsets[4],
1825                                 Indirect chain[4],
1826                                 __le32 *top)
1827 {
1828         Indirect *partial, *p;
1829         int k, err;
1830
1831         *top = 0;
1832         /* Make k index the deepest non-null offest + 1 */
1833         for (k = depth; k > 1 && !offsets[k-1]; k--)
1834                 ;
1835         partial = ext3_get_branch(inode, k, offsets, chain, &err);
1836         /* Writer: pointers */
1837         if (!partial)
1838                 partial = chain + k-1;
1839         /*
1840          * If the branch acquired continuation since we've looked at it -
1841          * fine, it should all survive and (new) top doesn't belong to us.
1842          */
1843         if (!partial->key && *partial->p)
1844                 /* Writer: end */
1845                 goto no_top;
1846         for (p=partial; p>chain && all_zeroes((__le32*)p->bh->b_data,p->p); p--)
1847                 ;
1848         /*
1849          * OK, we've found the last block that must survive. The rest of our
1850          * branch should be detached before unlocking. However, if that rest
1851          * of branch is all ours and does not grow immediately from the inode
1852          * it's easier to cheat and just decrement partial->p.
1853          */
1854         if (p == chain + k - 1 && p > chain) {
1855                 p->p--;
1856         } else {
1857                 *top = *p->p;
1858                 /* Nope, don't do this in ext3.  Must leave the tree intact */
1859 #if 0
1860                 *p->p = 0;
1861 #endif
1862         }
1863         /* Writer: end */
1864
1865         while(partial > p)
1866         {
1867                 brelse(partial->bh);
1868                 partial--;
1869         }
1870 no_top:
1871         return partial;
1872 }
1873
1874 /*
1875  * Zero a number of block pointers in either an inode or an indirect block.
1876  * If we restart the transaction we must again get write access to the
1877  * indirect block for further modification.
1878  *
1879  * We release `count' blocks on disk, but (last - first) may be greater
1880  * than `count' because there can be holes in there.
1881  */
1882 static void
1883 ext3_clear_blocks(handle_t *handle, struct inode *inode, struct buffer_head *bh,
1884                 unsigned long block_to_free, unsigned long count,
1885                 __le32 *first, __le32 *last)
1886 {
1887         __le32 *p;
1888         if (try_to_extend_transaction(handle, inode)) {
1889                 if (bh) {
1890                         BUFFER_TRACE(bh, "call ext3_journal_dirty_metadata");
1891                         ext3_journal_dirty_metadata(handle, bh);
1892                 }
1893                 ext3_mark_inode_dirty(handle, inode);
1894                 ext3_journal_test_restart(handle, inode);
1895                 if (bh) {
1896                         BUFFER_TRACE(bh, "retaking write access");
1897                         ext3_journal_get_write_access(handle, bh);
1898                 }
1899         }
1900
1901         /*
1902          * Any buffers which are on the journal will be in memory. We find
1903          * them on the hash table so journal_revoke() will run journal_forget()
1904          * on them.  We've already detached each block from the file, so
1905          * bforget() in journal_forget() should be safe.
1906          *
1907          * AKPM: turn on bforget in journal_forget()!!!
1908          */
1909         for (p = first; p < last; p++) {
1910                 u32 nr = le32_to_cpu(*p);
1911                 if (nr) {
1912                         struct buffer_head *bh;
1913
1914                         *p = 0;
1915                         bh = sb_find_get_block(inode->i_sb, nr);
1916                         ext3_forget(handle, 0, inode, bh, nr);
1917                 }
1918         }
1919
1920         ext3_free_blocks(handle, inode, block_to_free, count);
1921 }
1922
1923 /**
1924  * ext3_free_data - free a list of data blocks
1925  * @handle:     handle for this transaction
1926  * @inode:      inode we are dealing with
1927  * @this_bh:    indirect buffer_head which contains *@first and *@last
1928  * @first:      array of block numbers
1929  * @last:       points immediately past the end of array
1930  *
1931  * We are freeing all blocks refered from that array (numbers are stored as
1932  * little-endian 32-bit) and updating @inode->i_blocks appropriately.
1933  *
1934  * We accumulate contiguous runs of blocks to free.  Conveniently, if these
1935  * blocks are contiguous then releasing them at one time will only affect one
1936  * or two bitmap blocks (+ group descriptor(s) and superblock) and we won't
1937  * actually use a lot of journal space.
1938  *
1939  * @this_bh will be %NULL if @first and @last point into the inode's direct
1940  * block pointers.
1941  */
1942 static void ext3_free_data(handle_t *handle, struct inode *inode,
1943                            struct buffer_head *this_bh,
1944                            __le32 *first, __le32 *last)
1945 {
1946         unsigned long block_to_free = 0;    /* Starting block # of a run */
1947         unsigned long count = 0;            /* Number of blocks in the run */ 
1948         __le32 *block_to_free_p = NULL;     /* Pointer into inode/ind
1949                                                corresponding to
1950                                                block_to_free */
1951         unsigned long nr;                   /* Current block # */
1952         __le32 *p;                          /* Pointer into inode/ind
1953                                                for current block */
1954         int err;
1955
1956         if (this_bh) {                          /* For indirect block */
1957                 BUFFER_TRACE(this_bh, "get_write_access");
1958                 err = ext3_journal_get_write_access(handle, this_bh);
1959                 /* Important: if we can't update the indirect pointers
1960                  * to the blocks, we can't free them. */
1961                 if (err)
1962                         return;
1963         }
1964
1965         for (p = first; p < last; p++) {
1966                 nr = le32_to_cpu(*p);
1967                 if (nr) {
1968                         /* accumulate blocks to free if they're contiguous */
1969                         if (count == 0) {
1970                                 block_to_free = nr;
1971                                 block_to_free_p = p;
1972                                 count = 1;
1973                         } else if (nr == block_to_free + count) {
1974                                 count++;
1975                         } else {
1976                                 ext3_clear_blocks(handle, inode, this_bh, 
1977                                                   block_to_free,
1978                                                   count, block_to_free_p, p);
1979                                 block_to_free = nr;
1980                                 block_to_free_p = p;
1981                                 count = 1;
1982                         }
1983                 }
1984         }
1985
1986         if (count > 0)
1987                 ext3_clear_blocks(handle, inode, this_bh, block_to_free,
1988                                   count, block_to_free_p, p);
1989
1990         if (this_bh) {
1991                 BUFFER_TRACE(this_bh, "call ext3_journal_dirty_metadata");
1992                 ext3_journal_dirty_metadata(handle, this_bh);
1993         }
1994 }
1995
1996 /**
1997  *      ext3_free_branches - free an array of branches
1998  *      @handle: JBD handle for this transaction
1999  *      @inode: inode we are dealing with
2000  *      @parent_bh: the buffer_head which contains *@first and *@last
2001  *      @first: array of block numbers
2002  *      @last:  pointer immediately past the end of array
2003  *      @depth: depth of the branches to free
2004  *
2005  *      We are freeing all blocks refered from these branches (numbers are
2006  *      stored as little-endian 32-bit) and updating @inode->i_blocks
2007  *      appropriately.
2008  */
2009 static void ext3_free_branches(handle_t *handle, struct inode *inode,
2010                                struct buffer_head *parent_bh,
2011                                __le32 *first, __le32 *last, int depth)
2012 {
2013         unsigned long nr;
2014         __le32 *p;
2015
2016         if (is_handle_aborted(handle))
2017                 return;
2018
2019         if (depth--) {
2020                 struct buffer_head *bh;
2021                 int addr_per_block = EXT3_ADDR_PER_BLOCK(inode->i_sb);
2022                 p = last;
2023                 while (--p >= first) {
2024                         nr = le32_to_cpu(*p);
2025                         if (!nr)
2026                                 continue;               /* A hole */
2027
2028                         /* Go read the buffer for the next level down */
2029                         bh = sb_bread(inode->i_sb, nr);
2030
2031                         /*
2032                          * A read failure? Report error and clear slot
2033                          * (should be rare).
2034                          */
2035                         if (!bh) {
2036                                 ext3_error(inode->i_sb, "ext3_free_branches",
2037                                            "Read failure, inode=%ld, block=%ld",
2038                                            inode->i_ino, nr);
2039                                 continue;
2040                         }
2041
2042                         /* This zaps the entire block.  Bottom up. */
2043                         BUFFER_TRACE(bh, "free child branches");
2044                         ext3_free_branches(handle, inode, bh,
2045                                            (__le32*)bh->b_data,
2046                                            (__le32*)bh->b_data + addr_per_block,
2047                                            depth);
2048
2049                         /*
2050                          * We've probably journalled the indirect block several
2051                          * times during the truncate.  But it's no longer
2052                          * needed and we now drop it from the transaction via
2053                          * journal_revoke().
2054                          *
2055                          * That's easy if it's exclusively part of this
2056                          * transaction.  But if it's part of the committing
2057                          * transaction then journal_forget() will simply
2058                          * brelse() it.  That means that if the underlying
2059                          * block is reallocated in ext3_get_block(),
2060                          * unmap_underlying_metadata() will find this block
2061                          * and will try to get rid of it.  damn, damn.
2062                          *
2063                          * If this block has already been committed to the
2064                          * journal, a revoke record will be written.  And
2065                          * revoke records must be emitted *before* clearing
2066                          * this block's bit in the bitmaps.
2067                          */
2068                         ext3_forget(handle, 1, inode, bh, bh->b_blocknr);
2069
2070                         /*
2071                          * Everything below this this pointer has been
2072                          * released.  Now let this top-of-subtree go.
2073                          *
2074                          * We want the freeing of this indirect block to be
2075                          * atomic in the journal with the updating of the
2076                          * bitmap block which owns it.  So make some room in
2077                          * the journal.
2078                          *
2079                          * We zero the parent pointer *after* freeing its
2080                          * pointee in the bitmaps, so if extend_transaction()
2081                          * for some reason fails to put the bitmap changes and
2082                          * the release into the same transaction, recovery
2083                          * will merely complain about releasing a free block,
2084                          * rather than leaking blocks.
2085                          */
2086                         if (is_handle_aborted(handle))
2087                                 return;
2088                         if (try_to_extend_transaction(handle, inode)) {
2089                                 ext3_mark_inode_dirty(handle, inode);
2090                                 ext3_journal_test_restart(handle, inode);
2091                         }
2092
2093                         ext3_free_blocks(handle, inode, nr, 1);
2094
2095                         if (parent_bh) {
2096                                 /*
2097                                  * The block which we have just freed is
2098                                  * pointed to by an indirect block: journal it
2099                                  */
2100                                 BUFFER_TRACE(parent_bh, "get_write_access");
2101                                 if (!ext3_journal_get_write_access(handle,
2102                                                                    parent_bh)){
2103                                         *p = 0;
2104                                         BUFFER_TRACE(parent_bh,
2105                                         "call ext3_journal_dirty_metadata");
2106                                         ext3_journal_dirty_metadata(handle, 
2107                                                                     parent_bh);
2108                                 }
2109                         }
2110                 }
2111         } else {
2112                 /* We have reached the bottom of the tree. */
2113                 BUFFER_TRACE(parent_bh, "free data blocks");
2114                 ext3_free_data(handle, inode, parent_bh, first, last);
2115         }
2116 }
2117
2118 /*
2119  * ext3_truncate()
2120  *
2121  * We block out ext3_get_block() block instantiations across the entire
2122  * transaction, and VFS/VM ensures that ext3_truncate() cannot run
2123  * simultaneously on behalf of the same inode.
2124  *
2125  * As we work through the truncate and commmit bits of it to the journal there
2126  * is one core, guiding principle: the file's tree must always be consistent on
2127  * disk.  We must be able to restart the truncate after a crash.
2128  *
2129  * The file's tree may be transiently inconsistent in memory (although it
2130  * probably isn't), but whenever we close off and commit a journal transaction,
2131  * the contents of (the filesystem + the journal) must be consistent and
2132  * restartable.  It's pretty simple, really: bottom up, right to left (although
2133  * left-to-right works OK too).
2134  *
2135  * Note that at recovery time, journal replay occurs *before* the restart of
2136  * truncate against the orphan inode list.
2137  *
2138  * The committed inode has the new, desired i_size (which is the same as
2139  * i_disksize in this case).  After a crash, ext3_orphan_cleanup() will see
2140  * that this inode's truncate did not complete and it will again call
2141  * ext3_truncate() to have another go.  So there will be instantiated blocks
2142  * to the right of the truncation point in a crashed ext3 filesystem.  But
2143  * that's fine - as long as they are linked from the inode, the post-crash
2144  * ext3_truncate() run will find them and release them.
2145  */
2146
2147 void ext3_truncate_nocheck(struct inode * inode)
2148 {
2149         handle_t *handle;
2150         struct ext3_inode_info *ei = EXT3_I(inode);
2151         __le32 *i_data = ei->i_data;
2152         int addr_per_block = EXT3_ADDR_PER_BLOCK(inode->i_sb);
2153         struct address_space *mapping = inode->i_mapping;
2154         int offsets[4];
2155         Indirect chain[4];
2156         Indirect *partial;
2157         __le32 nr = 0;
2158         int n;
2159         long last_block;
2160         unsigned blocksize = inode->i_sb->s_blocksize;
2161         struct page *page;
2162
2163         if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
2164             S_ISLNK(inode->i_mode)))
2165                 return;
2166         if (ext3_inode_is_fast_symlink(inode))
2167                 return;
2168
2169         ext3_discard_prealloc(inode);
2170
2171         /*
2172          * We have to lock the EOF page here, because lock_page() nests
2173          * outside journal_start().
2174          */
2175         if ((inode->i_size & (blocksize - 1)) == 0) {
2176                 /* Block boundary? Nothing to do */
2177                 page = NULL;
2178         } else {
2179                 page = grab_cache_page(mapping,
2180                                 inode->i_size >> PAGE_CACHE_SHIFT);
2181                 if (!page)
2182                         return;
2183         }
2184
2185         handle = start_transaction(inode);
2186         if (IS_ERR(handle)) {
2187                 if (page) {
2188                         clear_highpage(page);
2189                         flush_dcache_page(page);
2190                         unlock_page(page);
2191                         page_cache_release(page);
2192                 }
2193                 return;         /* AKPM: return what? */
2194         }
2195
2196         last_block = (inode->i_size + blocksize-1)
2197                                         >> EXT3_BLOCK_SIZE_BITS(inode->i_sb);
2198
2199         if (page)
2200                 ext3_block_truncate_page(handle, page, mapping, inode->i_size);
2201
2202         n = ext3_block_to_path(inode, last_block, offsets, NULL);
2203         if (n == 0)
2204                 goto out_stop;  /* error */
2205
2206         /*
2207          * OK.  This truncate is going to happen.  We add the inode to the
2208          * orphan list, so that if this truncate spans multiple transactions,
2209          * and we crash, we will resume the truncate when the filesystem
2210          * recovers.  It also marks the inode dirty, to catch the new size.
2211          *
2212          * Implication: the file must always be in a sane, consistent
2213          * truncatable state while each transaction commits.
2214          */
2215         if (ext3_orphan_add(handle, inode))
2216                 goto out_stop;
2217
2218         /*
2219          * The orphan list entry will now protect us from any crash which
2220          * occurs before the truncate completes, so it is now safe to propagate
2221          * the new, shorter inode size (held for now in i_size) into the
2222          * on-disk inode. We do this via i_disksize, which is the value which
2223          * ext3 *really* writes onto the disk inode.
2224          */
2225         ei->i_disksize = inode->i_size;
2226
2227         /*
2228          * From here we block out all ext3_get_block() callers who want to
2229          * modify the block allocation tree.
2230          */
2231         down(&ei->truncate_sem);
2232
2233         if (n == 1) {           /* direct blocks */
2234                 ext3_free_data(handle, inode, NULL, i_data+offsets[0],
2235                                i_data + EXT3_NDIR_BLOCKS);
2236                 goto do_indirects;
2237         }
2238
2239         partial = ext3_find_shared(inode, n, offsets, chain, &nr);
2240         /* Kill the top of shared branch (not detached) */
2241         if (nr) {
2242                 if (partial == chain) {
2243                         /* Shared branch grows from the inode */
2244                         ext3_free_branches(handle, inode, NULL,
2245                                            &nr, &nr+1, (chain+n-1) - partial);
2246                         *partial->p = 0;
2247                         /*
2248                          * We mark the inode dirty prior to restart,
2249                          * and prior to stop.  No need for it here.
2250                          */
2251                 } else {
2252                         /* Shared branch grows from an indirect block */
2253                         BUFFER_TRACE(partial->bh, "get_write_access");
2254                         ext3_free_branches(handle, inode, partial->bh,
2255                                         partial->p,
2256                                         partial->p+1, (chain+n-1) - partial);
2257                 }
2258         }
2259         /* Clear the ends of indirect blocks on the shared branch */
2260         while (partial > chain) {
2261                 ext3_free_branches(handle, inode, partial->bh, partial->p + 1,
2262                                    (__le32*)partial->bh->b_data+addr_per_block,
2263                                    (chain+n-1) - partial);
2264                 BUFFER_TRACE(partial->bh, "call brelse");
2265                 brelse (partial->bh);
2266                 partial--;
2267         }
2268 do_indirects:
2269         /* Kill the remaining (whole) subtrees */
2270         switch (offsets[0]) {
2271                 default:
2272                         nr = i_data[EXT3_IND_BLOCK];
2273                         if (nr) {
2274                                 ext3_free_branches(handle, inode, NULL,
2275                                                    &nr, &nr+1, 1);
2276                                 i_data[EXT3_IND_BLOCK] = 0;
2277                         }
2278                 case EXT3_IND_BLOCK:
2279                         nr = i_data[EXT3_DIND_BLOCK];
2280                         if (nr) {
2281                                 ext3_free_branches(handle, inode, NULL,
2282                                                    &nr, &nr+1, 2);
2283                                 i_data[EXT3_DIND_BLOCK] = 0;
2284                         }
2285                 case EXT3_DIND_BLOCK:
2286                         nr = i_data[EXT3_TIND_BLOCK];
2287                         if (nr) {
2288                                 ext3_free_branches(handle, inode, NULL,
2289                                                    &nr, &nr+1, 3);
2290                                 i_data[EXT3_TIND_BLOCK] = 0;
2291                         }
2292                 case EXT3_TIND_BLOCK:
2293                         ;
2294         }
2295         up(&ei->truncate_sem);
2296         inode->i_mtime = inode->i_ctime = CURRENT_TIME;
2297         ext3_mark_inode_dirty(handle, inode);
2298
2299         /* In a multi-transaction truncate, we only make the final
2300          * transaction synchronous */
2301         if (IS_SYNC(inode))
2302                 handle->h_sync = 1;
2303 out_stop:
2304         /*
2305          * If this was a simple ftruncate(), and the file will remain alive
2306          * then we need to clear up the orphan record which we created above.
2307          * However, if this was a real unlink then we were called by
2308          * ext3_delete_inode(), and we allow that function to clean up the
2309          * orphan info for us.
2310          */
2311         if (inode->i_nlink)
2312                 ext3_orphan_del(handle, inode);
2313
2314         ext3_journal_stop(handle);
2315 }
2316
2317 static unsigned long ext3_get_inode_block(struct super_block *sb,
2318                 unsigned long ino, struct ext3_iloc *iloc)
2319 {
2320         unsigned long desc, group_desc, block_group;
2321         unsigned long offset, block;
2322         struct buffer_head *bh;
2323         struct ext3_group_desc * gdp;
2324
2325         if ((ino != EXT3_ROOT_INO &&
2326                 ino != EXT3_JOURNAL_INO &&
2327                 ino < EXT3_FIRST_INO(sb)) ||
2328                 ino > le32_to_cpu(
2329                         EXT3_SB(sb)->s_es->s_inodes_count)) {
2330                 ext3_error (sb, "ext3_get_inode_block",
2331                             "bad inode number: %lu", ino);
2332                 return 0;
2333         }
2334         block_group = (ino - 1) / EXT3_INODES_PER_GROUP(sb);
2335         if (block_group >= EXT3_SB(sb)->s_groups_count) {
2336                 ext3_error (sb, "ext3_get_inode_block",
2337                             "group >= groups count");
2338                 return 0;
2339         }
2340         group_desc = block_group >> EXT3_DESC_PER_BLOCK_BITS(sb);
2341         desc = block_group & (EXT3_DESC_PER_BLOCK(sb) - 1);
2342         bh = EXT3_SB(sb)->s_group_desc[group_desc];
2343         if (!bh) {
2344                 ext3_error (sb, "ext3_get_inode_block",
2345                             "Descriptor not loaded");
2346                 return 0;
2347         }
2348
2349         gdp = (struct ext3_group_desc *) bh->b_data;
2350         /*
2351          * Figure out the offset within the block group inode table
2352          */
2353         offset = ((ino - 1) % EXT3_INODES_PER_GROUP(sb)) *
2354                 EXT3_INODE_SIZE(sb);
2355         block = le32_to_cpu(gdp[desc].bg_inode_table) +
2356                 (offset >> EXT3_BLOCK_SIZE_BITS(sb));
2357
2358         iloc->block_group = block_group;
2359         iloc->offset = offset & (EXT3_BLOCK_SIZE(sb) - 1);
2360         return block;
2361 }
2362
2363 /* 
2364  * ext3_get_inode_loc returns with an extra refcount against the inode's
2365  * underlying buffer_head on success.  If `in_mem' is false then we're purely
2366  * trying to determine the inode's location on-disk and no read need be
2367  * performed.
2368  */
2369 static int ext3_get_inode_loc(struct inode *inode,
2370                                 struct ext3_iloc *iloc, int in_mem)
2371 {
2372         unsigned long block;
2373         struct buffer_head *bh;
2374
2375         block = ext3_get_inode_block(inode->i_sb, inode->i_ino, iloc);
2376         if (!block)
2377                 return -EIO;
2378
2379         bh = sb_getblk(inode->i_sb, block);
2380         if (!bh) {
2381                 ext3_error (inode->i_sb, "ext3_get_inode_loc",
2382                                 "unable to read inode block - "
2383                                 "inode=%lu, block=%lu", inode->i_ino, block);
2384                 return -EIO;
2385         }
2386         if (!buffer_uptodate(bh)) {
2387                 lock_buffer(bh);
2388                 if (buffer_uptodate(bh)) {
2389                         /* someone brought it uptodate while we waited */
2390                         unlock_buffer(bh);
2391                         goto has_buffer;
2392                 }
2393
2394                 /* we can't skip I/O if inode is on a disk only */
2395                 if (in_mem) {
2396                         struct buffer_head *bitmap_bh;
2397                         struct ext3_group_desc *desc;
2398                         int inodes_per_buffer;
2399                         int inode_offset, i;
2400                         int block_group;
2401                         int start;
2402
2403                         /*
2404                          * If this is the only valid inode in the block we
2405                          * need not read the block.
2406                          */
2407                         block_group = (inode->i_ino - 1) /
2408                                         EXT3_INODES_PER_GROUP(inode->i_sb);
2409                         inodes_per_buffer = bh->b_size /
2410                                 EXT3_INODE_SIZE(inode->i_sb);
2411                         inode_offset = ((inode->i_ino - 1) %
2412                                         EXT3_INODES_PER_GROUP(inode->i_sb));
2413                         start = inode_offset & ~(inodes_per_buffer - 1);
2414
2415                         /* Is the inode bitmap in cache? */
2416                         desc = ext3_get_group_desc(inode->i_sb,
2417                                                 block_group, NULL);
2418                         if (!desc)
2419                                 goto make_io;
2420
2421                         bitmap_bh = sb_getblk(inode->i_sb,
2422                                         le32_to_cpu(desc->bg_inode_bitmap));
2423                         if (!bitmap_bh)
2424                                 goto make_io;
2425
2426                         /*
2427                          * If the inode bitmap isn't in cache then the
2428                          * optimisation may end up performing two reads instead
2429                          * of one, so skip it.
2430                          */
2431                         if (!buffer_uptodate(bitmap_bh)) {
2432                                 brelse(bitmap_bh);
2433                                 goto make_io;
2434                         }
2435                         for (i = start; i < start + inodes_per_buffer; i++) {
2436                                 if (i == inode_offset)
2437                                         continue;
2438                                 if (ext3_test_bit(i, bitmap_bh->b_data))
2439                                         break;
2440                         }
2441                         brelse(bitmap_bh);
2442                         if (i == start + inodes_per_buffer) {
2443                                 /* all other inodes are free, so skip I/O */
2444                                 memset(bh->b_data, 0, bh->b_size);
2445                                 set_buffer_uptodate(bh);
2446                                 unlock_buffer(bh);
2447                                 goto has_buffer;
2448                         }
2449                 }
2450
2451 make_io:
2452                 /*
2453                  * There are another valid inodes in the buffer so we must
2454                  * read the block from disk
2455                  */
2456                 get_bh(bh);
2457                 bh->b_end_io = end_buffer_read_sync;
2458                 submit_bh(READ, bh);
2459                 wait_on_buffer(bh);
2460                 if (!buffer_uptodate(bh)) {
2461                         ext3_error(inode->i_sb, "ext3_get_inode_loc",
2462                                         "unable to read inode block - "
2463                                         "inode=%lu, block=%lu",
2464                                         inode->i_ino, block);
2465                         brelse(bh);
2466                         return -EIO;
2467                 }
2468         }
2469 has_buffer:
2470         iloc->bh = bh;
2471         return 0;
2472 }
2473
2474 void ext3_truncate(struct inode * inode)
2475 {
2476         if (IS_APPEND(inode) || IS_IMMUTABLE(inode))
2477                 return;
2478         ext3_truncate_nocheck(inode);
2479 }
2480
2481 void ext3_set_inode_flags(struct inode *inode)
2482 {
2483         unsigned int flags = EXT3_I(inode)->i_flags;
2484
2485         inode->i_flags &= ~(S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC);
2486         if (flags & EXT3_SYNC_FL)
2487                 inode->i_flags |= S_SYNC;
2488         if (flags & EXT3_APPEND_FL)
2489                 inode->i_flags |= S_APPEND;
2490         if (flags & EXT3_IMMUTABLE_FL)
2491                 inode->i_flags |= S_IMMUTABLE;
2492         if (flags & EXT3_IUNLINK_FL)
2493                 inode->i_flags |= S_IUNLINK;
2494         if (flags & EXT3_BARRIER_FL)
2495                 inode->i_flags |= S_BARRIER;
2496         if (flags & EXT3_NOATIME_FL)
2497                 inode->i_flags |= S_NOATIME;
2498         if (flags & EXT3_DIRSYNC_FL)
2499                 inode->i_flags |= S_DIRSYNC;
2500 }
2501
2502 void ext3_read_inode(struct inode * inode)
2503 {
2504         struct ext3_iloc iloc;
2505         struct ext3_inode *raw_inode;
2506         struct ext3_inode_info *ei = EXT3_I(inode);
2507         struct buffer_head *bh;
2508         int block;
2509         uid_t uid;
2510         gid_t gid;
2511
2512 #ifdef CONFIG_EXT3_FS_POSIX_ACL
2513         ei->i_acl = EXT3_ACL_NOT_CACHED;
2514         ei->i_default_acl = EXT3_ACL_NOT_CACHED;
2515 #endif
2516         if (ext3_get_inode_loc(inode, &iloc, 0))
2517                 goto bad_inode;
2518         bh = iloc.bh;
2519         raw_inode = ext3_raw_inode(&iloc);
2520         inode->i_mode = le16_to_cpu(raw_inode->i_mode);
2521         uid = (uid_t)le16_to_cpu(raw_inode->i_uid_low);
2522         gid = (gid_t)le16_to_cpu(raw_inode->i_gid_low);
2523         if(!(test_opt (inode->i_sb, NO_UID32))) {
2524                 uid |= le16_to_cpu(raw_inode->i_uid_high) << 16;
2525                 gid |= le16_to_cpu(raw_inode->i_gid_high) << 16;
2526         }
2527         inode->i_uid = INOXID_UID(XID_TAG(inode), uid, gid);
2528         inode->i_gid = INOXID_GID(XID_TAG(inode), uid, gid);
2529         inode->i_xid = INOXID_XID(XID_TAG(inode), uid, gid,
2530                 le16_to_cpu(raw_inode->i_raw_xid));
2531
2532         inode->i_nlink = le16_to_cpu(raw_inode->i_links_count);
2533         inode->i_size = le32_to_cpu(raw_inode->i_size);
2534         inode->i_atime.tv_sec = le32_to_cpu(raw_inode->i_atime);
2535         inode->i_ctime.tv_sec = le32_to_cpu(raw_inode->i_ctime);
2536         inode->i_mtime.tv_sec = le32_to_cpu(raw_inode->i_mtime);
2537         inode->i_atime.tv_nsec = inode->i_ctime.tv_nsec = inode->i_mtime.tv_nsec = 0;
2538
2539         ei->i_state = 0;
2540         ei->i_next_alloc_block = 0;
2541         ei->i_next_alloc_goal = 0;
2542         ei->i_dir_start_lookup = 0;
2543         ei->i_dtime = le32_to_cpu(raw_inode->i_dtime);
2544         /* We now have enough fields to check if the inode was active or not.
2545          * This is needed because nfsd might try to access dead inodes
2546          * the test is that same one that e2fsck uses
2547          * NeilBrown 1999oct15
2548          */
2549         if (inode->i_nlink == 0) {
2550                 if (inode->i_mode == 0 ||
2551                     !(EXT3_SB(inode->i_sb)->s_mount_state & EXT3_ORPHAN_FS)) {
2552                         /* this inode is deleted */
2553                         brelse (bh);
2554                         goto bad_inode;
2555                 }
2556                 /* The only unlinked inodes we let through here have
2557                  * valid i_mode and are being read by the orphan
2558                  * recovery code: that's fine, we're about to complete
2559                  * the process of deleting those. */
2560         }
2561         inode->i_blksize = PAGE_SIZE;   /* This is the optimal IO size
2562                                          * (for stat), not the fs block
2563                                          * size */  
2564         inode->i_blocks = le32_to_cpu(raw_inode->i_blocks);
2565         ei->i_flags = le32_to_cpu(raw_inode->i_flags);
2566 #ifdef EXT3_FRAGMENTS
2567         ei->i_faddr = le32_to_cpu(raw_inode->i_faddr);
2568         ei->i_frag_no = raw_inode->i_frag;
2569         ei->i_frag_size = raw_inode->i_fsize;
2570 #endif
2571         ei->i_file_acl = le32_to_cpu(raw_inode->i_file_acl);
2572         if (!S_ISREG(inode->i_mode)) {
2573                 ei->i_dir_acl = le32_to_cpu(raw_inode->i_dir_acl);
2574         } else {
2575                 inode->i_size |=
2576                         ((__u64)le32_to_cpu(raw_inode->i_size_high)) << 32;
2577         }
2578         ei->i_disksize = inode->i_size;
2579         inode->i_generation = le32_to_cpu(raw_inode->i_generation);
2580 #ifdef EXT3_PREALLOCATE
2581         ei->i_prealloc_count = 0;
2582 #endif
2583         ei->i_block_group = iloc.block_group;
2584
2585         /*
2586          * NOTE! The in-memory inode i_data array is in little-endian order
2587          * even on big-endian machines: we do NOT byteswap the block numbers!
2588          */
2589         for (block = 0; block < EXT3_N_BLOCKS; block++)
2590                 ei->i_data[block] = raw_inode->i_block[block];
2591         INIT_LIST_HEAD(&ei->i_orphan);
2592
2593         if (S_ISREG(inode->i_mode)) {
2594                 inode->i_op = &ext3_file_inode_operations;
2595                 inode->i_fop = &ext3_file_operations;
2596                 ext3_set_aops(inode);
2597         } else if (S_ISDIR(inode->i_mode)) {
2598                 inode->i_op = &ext3_dir_inode_operations;
2599                 inode->i_fop = &ext3_dir_operations;
2600         } else if (S_ISLNK(inode->i_mode)) {
2601                 if (ext3_inode_is_fast_symlink(inode))
2602                         inode->i_op = &ext3_fast_symlink_inode_operations;
2603                 else {
2604                         inode->i_op = &ext3_symlink_inode_operations;
2605                         ext3_set_aops(inode);
2606                 }
2607         } else {
2608                 inode->i_op = &ext3_special_inode_operations;
2609                 if (raw_inode->i_block[0])
2610                         init_special_inode(inode, inode->i_mode,
2611                            old_decode_dev(le32_to_cpu(raw_inode->i_block[0])));
2612                 else 
2613                         init_special_inode(inode, inode->i_mode,
2614                            new_decode_dev(le32_to_cpu(raw_inode->i_block[1])));
2615         }
2616         brelse (iloc.bh);
2617         ext3_set_inode_flags(inode);
2618         return;
2619
2620 bad_inode:
2621         make_bad_inode(inode);
2622         return;
2623 }
2624
2625 /*
2626  * Post the struct inode info into an on-disk inode location in the
2627  * buffer-cache.  This gobbles the caller's reference to the
2628  * buffer_head in the inode location struct.
2629  *
2630  * The caller must have write access to iloc->bh.
2631  */
2632 static int ext3_do_update_inode(handle_t *handle, 
2633                                 struct inode *inode, 
2634                                 struct ext3_iloc *iloc)
2635 {
2636         struct ext3_inode *raw_inode = ext3_raw_inode(iloc);
2637         struct ext3_inode_info *ei = EXT3_I(inode);
2638         struct buffer_head *bh = iloc->bh;
2639         uid_t uid = XIDINO_UID(XID_TAG(inode), inode->i_uid, inode->i_xid);
2640         gid_t gid = XIDINO_GID(XID_TAG(inode), inode->i_gid, inode->i_xid);
2641         int err = 0, rc, block;
2642
2643         /* For fields not not tracking in the in-memory inode,
2644          * initialise them to zero for new inodes. */
2645         if (ei->i_state & EXT3_STATE_NEW)
2646                 memset(raw_inode, 0, EXT3_SB(inode->i_sb)->s_inode_size);
2647
2648         raw_inode->i_mode = cpu_to_le16(inode->i_mode);
2649         if(!(test_opt(inode->i_sb, NO_UID32))) {
2650                 raw_inode->i_uid_low = cpu_to_le16(low_16_bits(uid));
2651                 raw_inode->i_gid_low = cpu_to_le16(low_16_bits(gid));
2652 /*
2653  * Fix up interoperability with old kernels. Otherwise, old inodes get
2654  * re-used with the upper 16 bits of the uid/gid intact
2655  */
2656                 if(!ei->i_dtime) {
2657                         raw_inode->i_uid_high =
2658                                 cpu_to_le16(high_16_bits(uid));
2659                         raw_inode->i_gid_high =
2660                                 cpu_to_le16(high_16_bits(gid));
2661                 } else {
2662                         raw_inode->i_uid_high = 0;
2663                         raw_inode->i_gid_high = 0;
2664                 }
2665         } else {
2666                 raw_inode->i_uid_low =
2667                         cpu_to_le16(fs_high2lowuid(uid));
2668                 raw_inode->i_gid_low =
2669                         cpu_to_le16(fs_high2lowgid(gid));
2670                 raw_inode->i_uid_high = 0;
2671                 raw_inode->i_gid_high = 0;
2672         }
2673 #ifdef CONFIG_INOXID_GID32
2674         raw_inode->i_raw_xid = cpu_to_le16(inode->i_xid);
2675 #endif
2676         raw_inode->i_links_count = cpu_to_le16(inode->i_nlink);
2677         raw_inode->i_size = cpu_to_le32(ei->i_disksize);
2678         raw_inode->i_atime = cpu_to_le32(inode->i_atime.tv_sec);
2679         raw_inode->i_ctime = cpu_to_le32(inode->i_ctime.tv_sec);
2680         raw_inode->i_mtime = cpu_to_le32(inode->i_mtime.tv_sec);
2681         raw_inode->i_blocks = cpu_to_le32(inode->i_blocks);
2682         raw_inode->i_dtime = cpu_to_le32(ei->i_dtime);
2683         raw_inode->i_flags = cpu_to_le32(ei->i_flags);
2684 #ifdef EXT3_FRAGMENTS
2685         raw_inode->i_faddr = cpu_to_le32(ei->i_faddr);
2686         raw_inode->i_frag = ei->i_frag_no;
2687         raw_inode->i_fsize = ei->i_frag_size;
2688 #endif
2689         raw_inode->i_file_acl = cpu_to_le32(ei->i_file_acl);
2690         if (!S_ISREG(inode->i_mode)) {
2691                 raw_inode->i_dir_acl = cpu_to_le32(ei->i_dir_acl);
2692         } else {
2693                 raw_inode->i_size_high =
2694                         cpu_to_le32(ei->i_disksize >> 32);
2695                 if (ei->i_disksize > 0x7fffffffULL) {
2696                         struct super_block *sb = inode->i_sb;
2697                         if (!EXT3_HAS_RO_COMPAT_FEATURE(sb,
2698                                         EXT3_FEATURE_RO_COMPAT_LARGE_FILE) ||
2699                             EXT3_SB(sb)->s_es->s_rev_level ==
2700                                         cpu_to_le32(EXT3_GOOD_OLD_REV)) {
2701                                /* If this is the first large file
2702                                 * created, add a flag to the superblock.
2703                                 */
2704                                 err = ext3_journal_get_write_access(handle,
2705                                                 EXT3_SB(sb)->s_sbh);
2706                                 if (err)
2707                                         goto out_brelse;
2708                                 ext3_update_dynamic_rev(sb);
2709                                 EXT3_SET_RO_COMPAT_FEATURE(sb,
2710                                         EXT3_FEATURE_RO_COMPAT_LARGE_FILE);
2711                                 sb->s_dirt = 1;
2712                                 handle->h_sync = 1;
2713                                 err = ext3_journal_dirty_metadata(handle,
2714                                                 EXT3_SB(sb)->s_sbh);
2715                         }
2716                 }
2717         }
2718         raw_inode->i_generation = cpu_to_le32(inode->i_generation);
2719         if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) {
2720                 if (old_valid_dev(inode->i_rdev)) {
2721                         raw_inode->i_block[0] =
2722                                 cpu_to_le32(old_encode_dev(inode->i_rdev));
2723                         raw_inode->i_block[1] = 0;
2724                 } else {
2725                         raw_inode->i_block[0] = 0;
2726                         raw_inode->i_block[1] =
2727                                 cpu_to_le32(new_encode_dev(inode->i_rdev));
2728                         raw_inode->i_block[2] = 0;
2729                 }
2730         } else for (block = 0; block < EXT3_N_BLOCKS; block++)
2731                 raw_inode->i_block[block] = ei->i_data[block];
2732
2733         BUFFER_TRACE(bh, "call ext3_journal_dirty_metadata");
2734         rc = ext3_journal_dirty_metadata(handle, bh);
2735         if (!err)
2736                 err = rc;
2737         ei->i_state &= ~EXT3_STATE_NEW;
2738
2739 out_brelse:
2740         brelse (bh);
2741         ext3_std_error(inode->i_sb, err);
2742         return err;
2743 }
2744
2745 /*
2746  * ext3_write_inode()
2747  *
2748  * We are called from a few places:
2749  *
2750  * - Within generic_file_write() for O_SYNC files.
2751  *   Here, there will be no transaction running. We wait for any running
2752  *   trasnaction to commit.
2753  *
2754  * - Within sys_sync(), kupdate and such.
2755  *   We wait on commit, if tol to.
2756  *
2757  * - Within prune_icache() (PF_MEMALLOC == true)
2758  *   Here we simply return.  We can't afford to block kswapd on the
2759  *   journal commit.
2760  *
2761  * In all cases it is actually safe for us to return without doing anything,
2762  * because the inode has been copied into a raw inode buffer in
2763  * ext3_mark_inode_dirty().  This is a correctness thing for O_SYNC and for
2764  * knfsd.
2765  *
2766  * Note that we are absolutely dependent upon all inode dirtiers doing the
2767  * right thing: they *must* call mark_inode_dirty() after dirtying info in
2768  * which we are interested.
2769  *
2770  * It would be a bug for them to not do this.  The code:
2771  *
2772  *      mark_inode_dirty(inode)
2773  *      stuff();
2774  *      inode->i_size = expr;
2775  *
2776  * is in error because a kswapd-driven write_inode() could occur while
2777  * `stuff()' is running, and the new i_size will be lost.  Plus the inode
2778  * will no longer be on the superblock's dirty inode list.
2779  */
2780 int ext3_write_inode(struct inode *inode, int wait)
2781 {
2782         if (current->flags & PF_MEMALLOC)
2783                 return 0;
2784
2785         if (ext3_journal_current_handle()) {
2786                 jbd_debug(0, "called recursively, non-PF_MEMALLOC!\n");
2787                 dump_stack();
2788                 return -EIO;
2789         }
2790
2791         if (!wait)
2792                 return 0;
2793
2794         return ext3_force_commit(inode->i_sb);
2795 }
2796
2797 int ext3_setattr_flags(struct inode *inode, unsigned int flags)
2798 {
2799         unsigned int oldflags, newflags;
2800         int err = 0;
2801
2802         oldflags = EXT3_I(inode)->i_flags;
2803         newflags = oldflags &
2804                 ~(EXT3_IMMUTABLE_FL | EXT3_IUNLINK_FL | EXT3_BARRIER_FL);
2805         if (flags & ATTR_FLAG_IMMUTABLE)
2806                 newflags |= EXT3_IMMUTABLE_FL;
2807         if (flags & ATTR_FLAG_IUNLINK)
2808                 newflags |= EXT3_IUNLINK_FL;
2809         if (flags & ATTR_FLAG_BARRIER)
2810                 newflags |= EXT3_BARRIER_FL;
2811
2812         if (oldflags ^ newflags) {
2813                 handle_t *handle;
2814                 struct ext3_iloc iloc;
2815
2816                 handle = ext3_journal_start(inode, 1);
2817                 if (IS_ERR(handle))
2818                         return PTR_ERR(handle);
2819                 if (IS_SYNC(inode))
2820                         handle->h_sync = 1;
2821                 err = ext3_reserve_inode_write(handle, inode, &iloc);
2822                 if (err)
2823                         goto flags_err;
2824
2825                 EXT3_I(inode)->i_flags = newflags;
2826                 inode->i_ctime = CURRENT_TIME;
2827
2828                 err = ext3_mark_iloc_dirty(handle, inode, &iloc);
2829         flags_err:
2830                 ext3_journal_stop(handle);
2831         }
2832         return err;
2833 }
2834
2835 /*
2836  * ext3_setattr()
2837  *
2838  * Called from notify_change.
2839  *
2840  * We want to trap VFS attempts to truncate the file as soon as
2841  * possible.  In particular, we want to make sure that when the VFS
2842  * shrinks i_size, we put the inode on the orphan list and modify
2843  * i_disksize immediately, so that during the subsequent flushing of
2844  * dirty pages and freeing of disk blocks, we can guarantee that any
2845  * commit will leave the blocks being flushed in an unused state on
2846  * disk.  (On recovery, the inode will get truncated and the blocks will
2847  * be freed, so we have a strong guarantee that no future commit will
2848  * leave these blocks visible to the user.)  
2849  *
2850  * Called with inode->sem down.
2851  */
2852 int ext3_setattr(struct dentry *dentry, struct iattr *attr)
2853 {
2854         struct inode *inode = dentry->d_inode;
2855         int error, rc = 0;
2856         const unsigned int ia_valid = attr->ia_valid;
2857
2858         error = inode_change_ok(inode, attr);
2859         if (error)
2860                 return error;
2861
2862         if ((ia_valid & ATTR_UID && attr->ia_uid != inode->i_uid) ||
2863                 (ia_valid & ATTR_GID && attr->ia_gid != inode->i_gid) ||
2864                 (ia_valid & ATTR_XID && attr->ia_xid != inode->i_xid)) {
2865                 handle_t *handle;
2866
2867                 /* (user+group)*(old+new) structure, inode write (sb,
2868                  * inode block, ? - but truncate inode update has it) */
2869                 handle = ext3_journal_start(inode, 4*EXT3_QUOTA_INIT_BLOCKS+3);
2870                 if (IS_ERR(handle)) {
2871                         error = PTR_ERR(handle);
2872                         goto err_out;
2873                 }
2874                 error = DQUOT_TRANSFER(inode, attr) ? -EDQUOT : 0;
2875                 if (error) {
2876                         ext3_journal_stop(handle);
2877                         return error;
2878                 }
2879                 /* Update corresponding info in inode so that everything is in
2880                  * one transaction */
2881                 if (attr->ia_valid & ATTR_UID)
2882                         inode->i_uid = attr->ia_uid;
2883                 if (attr->ia_valid & ATTR_GID)
2884                         inode->i_gid = attr->ia_gid;
2885                 if ((attr->ia_valid & ATTR_XID)
2886                         && inode->i_sb
2887                         && (inode->i_sb->s_flags & MS_TAGXID))
2888                         inode->i_xid = attr->ia_xid;
2889                 error = ext3_mark_inode_dirty(handle, inode);
2890                 ext3_journal_stop(handle);
2891         }
2892
2893         if (S_ISREG(inode->i_mode) &&
2894             attr->ia_valid & ATTR_SIZE && attr->ia_size < inode->i_size) {
2895                 handle_t *handle;
2896
2897                 handle = ext3_journal_start(inode, 3);
2898                 if (IS_ERR(handle)) {
2899                         error = PTR_ERR(handle);
2900                         goto err_out;
2901                 }
2902
2903                 error = ext3_orphan_add(handle, inode);
2904                 EXT3_I(inode)->i_disksize = attr->ia_size;
2905                 rc = ext3_mark_inode_dirty(handle, inode);
2906                 if (!error)
2907                         error = rc;
2908                 ext3_journal_stop(handle);
2909         }
2910
2911         if (ia_valid & ATTR_ATTR_FLAG) {
2912                 rc = ext3_setattr_flags(inode, attr->ia_attr_flags);
2913                 if (!error)
2914                         error = rc;
2915         }
2916
2917         rc = inode_setattr(inode, attr);
2918
2919         /* If inode_setattr's call to ext3_truncate failed to get a
2920          * transaction handle at all, we need to clean up the in-core
2921          * orphan list manually. */
2922         if (inode->i_nlink)
2923                 ext3_orphan_del(NULL, inode);
2924
2925         if (!rc && (ia_valid & ATTR_MODE))
2926                 rc = ext3_acl_chmod(inode);
2927
2928 err_out:
2929         ext3_std_error(inode->i_sb, error);
2930         if (!error)
2931                 error = rc;
2932         return error;
2933 }
2934
2935
2936 /*
2937  * akpm: how many blocks doth make a writepage()?
2938  *
2939  * With N blocks per page, it may be:
2940  * N data blocks
2941  * 2 indirect block
2942  * 2 dindirect
2943  * 1 tindirect
2944  * N+5 bitmap blocks (from the above)
2945  * N+5 group descriptor summary blocks
2946  * 1 inode block
2947  * 1 superblock.
2948  * 2 * EXT3_SINGLEDATA_TRANS_BLOCKS for the quote files
2949  *
2950  * 3 * (N + 5) + 2 + 2 * EXT3_SINGLEDATA_TRANS_BLOCKS
2951  *
2952  * With ordered or writeback data it's the same, less the N data blocks.
2953  *
2954  * If the inode's direct blocks can hold an integral number of pages then a
2955  * page cannot straddle two indirect blocks, and we can only touch one indirect
2956  * and dindirect block, and the "5" above becomes "3".
2957  *
2958  * This still overestimates under most circumstances.  If we were to pass the
2959  * start and end offsets in here as well we could do block_to_path() on each
2960  * block and work out the exact number of indirects which are touched.  Pah.
2961  */
2962
2963 int ext3_writepage_trans_blocks(struct inode *inode)
2964 {
2965         int bpp = ext3_journal_blocks_per_page(inode);
2966         int indirects = (EXT3_NDIR_BLOCKS % bpp) ? 5 : 3;
2967         int ret;
2968
2969         if (ext3_should_journal_data(inode))
2970                 ret = 3 * (bpp + indirects) + 2;
2971         else
2972                 ret = 2 * (bpp + indirects) + 2;
2973
2974 #ifdef CONFIG_QUOTA
2975         /* We know that structure was already allocated during DQUOT_INIT so
2976          * we will be updating only the data blocks + inodes */
2977         ret += 2*EXT3_QUOTA_TRANS_BLOCKS;
2978 #endif
2979
2980         return ret;
2981 }
2982
2983 /*
2984  * The caller must have previously called ext3_reserve_inode_write().
2985  * Give this, we know that the caller already has write access to iloc->bh.
2986  */
2987 int ext3_mark_iloc_dirty(handle_t *handle,
2988                 struct inode *inode, struct ext3_iloc *iloc)
2989 {
2990         int err = 0;
2991
2992         /* the do_update_inode consumes one bh->b_count */
2993         get_bh(iloc->bh);
2994
2995         /* ext3_do_update_inode() does journal_dirty_metadata */
2996         err = ext3_do_update_inode(handle, inode, iloc);
2997         put_bh(iloc->bh);
2998         return err;
2999 }
3000
3001 /* 
3002  * On success, We end up with an outstanding reference count against
3003  * iloc->bh.  This _must_ be cleaned up later. 
3004  */
3005
3006 int
3007 ext3_reserve_inode_write(handle_t *handle, struct inode *inode, 
3008                          struct ext3_iloc *iloc)
3009 {
3010         int err = 0;
3011         if (handle) {
3012                 err = ext3_get_inode_loc(inode, iloc, 1);
3013                 if (!err) {
3014                         BUFFER_TRACE(iloc->bh, "get_write_access");
3015                         err = ext3_journal_get_write_access(handle, iloc->bh);
3016                         if (err) {
3017                                 brelse(iloc->bh);
3018                                 iloc->bh = NULL;
3019                         }
3020                 }
3021         }
3022         ext3_std_error(inode->i_sb, err);
3023         return err;
3024 }
3025
3026 /*
3027  * akpm: What we do here is to mark the in-core inode as clean
3028  * with respect to inode dirtiness (it may still be data-dirty).
3029  * This means that the in-core inode may be reaped by prune_icache
3030  * without having to perform any I/O.  This is a very good thing,
3031  * because *any* task may call prune_icache - even ones which
3032  * have a transaction open against a different journal.
3033  *
3034  * Is this cheating?  Not really.  Sure, we haven't written the
3035  * inode out, but prune_icache isn't a user-visible syncing function.
3036  * Whenever the user wants stuff synced (sys_sync, sys_msync, sys_fsync)
3037  * we start and wait on commits.
3038  *
3039  * Is this efficient/effective?  Well, we're being nice to the system
3040  * by cleaning up our inodes proactively so they can be reaped
3041  * without I/O.  But we are potentially leaving up to five seconds'
3042  * worth of inodes floating about which prune_icache wants us to
3043  * write out.  One way to fix that would be to get prune_icache()
3044  * to do a write_super() to free up some memory.  It has the desired
3045  * effect.
3046  */
3047 int ext3_mark_inode_dirty(handle_t *handle, struct inode *inode)
3048 {
3049         struct ext3_iloc iloc;
3050         int err;
3051
3052         might_sleep();
3053         err = ext3_reserve_inode_write(handle, inode, &iloc);
3054         if (!err)
3055                 err = ext3_mark_iloc_dirty(handle, inode, &iloc);
3056         return err;
3057 }
3058
3059 /*
3060  * akpm: ext3_dirty_inode() is called from __mark_inode_dirty()
3061  *
3062  * We're really interested in the case where a file is being extended.
3063  * i_size has been changed by generic_commit_write() and we thus need
3064  * to include the updated inode in the current transaction.
3065  *
3066  * Also, DQUOT_ALLOC_SPACE() will always dirty the inode when blocks
3067  * are allocated to the file.
3068  *
3069  * If the inode is marked synchronous, we don't honour that here - doing
3070  * so would cause a commit on atime updates, which we don't bother doing.
3071  * We handle synchronous inodes at the highest possible level.
3072  */
3073 void ext3_dirty_inode(struct inode *inode)
3074 {
3075         handle_t *current_handle = ext3_journal_current_handle();
3076         handle_t *handle;
3077
3078         handle = ext3_journal_start(inode, 2);
3079         if (IS_ERR(handle))
3080                 goto out;
3081         if (current_handle &&
3082                 current_handle->h_transaction != handle->h_transaction) {
3083                 /* This task has a transaction open against a different fs */
3084                 printk(KERN_EMERG "%s: transactions do not match!\n",
3085                        __FUNCTION__);
3086         } else {
3087                 jbd_debug(5, "marking dirty.  outer handle=%p\n",
3088                                 current_handle);
3089                 ext3_mark_inode_dirty(handle, inode);
3090         }
3091         ext3_journal_stop(handle);
3092 out:
3093         return;
3094 }
3095
3096 #ifdef AKPM
3097 /* 
3098  * Bind an inode's backing buffer_head into this transaction, to prevent
3099  * it from being flushed to disk early.  Unlike
3100  * ext3_reserve_inode_write, this leaves behind no bh reference and
3101  * returns no iloc structure, so the caller needs to repeat the iloc
3102  * lookup to mark the inode dirty later.
3103  */
3104 static inline int
3105 ext3_pin_inode(handle_t *handle, struct inode *inode)
3106 {
3107         struct ext3_iloc iloc;
3108
3109         int err = 0;
3110         if (handle) {
3111                 err = ext3_get_inode_loc(inode, &iloc, 1);
3112                 if (!err) {
3113                         BUFFER_TRACE(iloc.bh, "get_write_access");
3114                         err = journal_get_write_access(handle, iloc.bh);
3115                         if (!err)
3116                                 err = ext3_journal_dirty_metadata(handle, 
3117                                                                   iloc.bh);
3118                         brelse(iloc.bh);
3119                 }
3120         }
3121         ext3_std_error(inode->i_sb, err);
3122         return err;
3123 }
3124 #endif
3125
3126 int ext3_change_inode_journal_flag(struct inode *inode, int val)
3127 {
3128         journal_t *journal;
3129         handle_t *handle;
3130         int err;
3131
3132         /*
3133          * We have to be very careful here: changing a data block's
3134          * journaling status dynamically is dangerous.  If we write a
3135          * data block to the journal, change the status and then delete
3136          * that block, we risk forgetting to revoke the old log record
3137          * from the journal and so a subsequent replay can corrupt data.
3138          * So, first we make sure that the journal is empty and that
3139          * nobody is changing anything.
3140          */
3141
3142         journal = EXT3_JOURNAL(inode);
3143         if (is_journal_aborted(journal) || IS_RDONLY(inode))
3144                 return -EROFS;
3145
3146         journal_lock_updates(journal);
3147         journal_flush(journal);
3148
3149         /*
3150          * OK, there are no updates running now, and all cached data is
3151          * synced to disk.  We are now in a completely consistent state
3152          * which doesn't have anything in the journal, and we know that
3153          * no filesystem updates are running, so it is safe to modify
3154          * the inode's in-core data-journaling state flag now.
3155          */
3156
3157         if (val)
3158                 EXT3_I(inode)->i_flags |= EXT3_JOURNAL_DATA_FL;
3159         else
3160                 EXT3_I(inode)->i_flags &= ~EXT3_JOURNAL_DATA_FL;
3161         ext3_set_aops(inode);
3162
3163         journal_unlock_updates(journal);
3164
3165         /* Finally we can mark the inode as dirty. */
3166
3167         handle = ext3_journal_start(inode, 1);
3168         if (IS_ERR(handle))
3169                 return PTR_ERR(handle);
3170
3171         err = ext3_mark_inode_dirty(handle, inode);
3172         handle->h_sync = 1;
3173         ext3_journal_stop(handle);
3174         ext3_std_error(inode->i_sb, err);
3175
3176         return err;
3177 }