2 * linux/fs/ext3/inode.c
4 * Copyright (C) 1992, 1993, 1994, 1995
5 * Remy Card (card@masi.ibp.fr)
6 * Laboratoire MASI - Institut Blaise Pascal
7 * Universite Pierre et Marie Curie (Paris VI)
11 * linux/fs/minix/inode.c
13 * Copyright (C) 1991, 1992 Linus Torvalds
15 * Goal-directed block allocation by Stephen Tweedie
16 * (sct@redhat.com), 1993, 1998
17 * Big-endian to little-endian byte-swapping/bitmaps by
18 * David S. Miller (davem@caip.rutgers.edu), 1995
19 * 64-bit file support on 64-bit platforms by Jakub Jelinek
20 * (jj@sunsite.ms.mff.cuni.cz)
22 * Assorted race fixes, rewrite of ext3_get_block() by Al Viro, 2000
25 #include <linux/module.h>
27 #include <linux/time.h>
28 #include <linux/ext3_jbd.h>
29 #include <linux/jbd.h>
30 #include <linux/smp_lock.h>
31 #include <linux/highuid.h>
32 #include <linux/pagemap.h>
33 #include <linux/quotaops.h>
34 #include <linux/string.h>
35 #include <linux/buffer_head.h>
36 #include <linux/writeback.h>
37 #include <linux/mpage.h>
38 #include <linux/uio.h>
39 #include <linux/vserver/xid.h>
44 * Test whether an inode is a fast symlink.
46 static inline int ext3_inode_is_fast_symlink(struct inode *inode)
48 int ea_blocks = EXT3_I(inode)->i_file_acl ?
49 (inode->i_sb->s_blocksize >> 9) : 0;
51 return (S_ISLNK(inode->i_mode) &&
52 inode->i_blocks - ea_blocks == 0);
55 /* The ext3 forget function must perform a revoke if we are freeing data
56 * which has been journaled. Metadata (eg. indirect blocks) must be
57 * revoked in all cases.
59 * "bh" may be NULL: a metadata block may have been freed from memory
60 * but there may still be a record of it in the journal, and that record
61 * still needs to be revoked.
64 int ext3_forget(handle_t *handle, int is_metadata,
65 struct inode *inode, struct buffer_head *bh,
70 BUFFER_TRACE(bh, "enter");
72 jbd_debug(4, "forgetting bh %p: is_metadata = %d, mode %o, "
74 bh, is_metadata, inode->i_mode,
75 test_opt(inode->i_sb, DATA_FLAGS));
77 /* Never use the revoke function if we are doing full data
78 * journaling: there is no need to, and a V1 superblock won't
79 * support it. Otherwise, only skip the revoke on un-journaled
82 if (test_opt(inode->i_sb, DATA_FLAGS) == EXT3_MOUNT_JOURNAL_DATA ||
83 (!is_metadata && !ext3_should_journal_data(inode))) {
85 BUFFER_TRACE(bh, "call journal_forget");
86 ext3_journal_forget(handle, bh);
92 * data!=journal && (is_metadata || should_journal_data(inode))
94 BUFFER_TRACE(bh, "call ext3_journal_revoke");
95 err = ext3_journal_revoke(handle, blocknr, bh);
97 ext3_abort(inode->i_sb, __FUNCTION__,
98 "error %d when attempting revoke", err);
99 BUFFER_TRACE(bh, "exit");
104 * Work out how many blocks we need to progress with the next chunk of a
105 * truncate transaction.
108 static unsigned long blocks_for_truncate(struct inode *inode)
110 unsigned long needed;
112 needed = inode->i_blocks >> (inode->i_sb->s_blocksize_bits - 9);
114 /* Give ourselves just enough room to cope with inodes in which
115 * i_blocks is corrupt: we've seen disk corruptions in the past
116 * which resulted in random data in an inode which looked enough
117 * like a regular file for ext3 to try to delete it. Things
118 * will go a bit crazy if that happens, but at least we should
119 * try not to panic the whole kernel. */
123 /* But we need to bound the transaction so we don't overflow the
125 if (needed > EXT3_MAX_TRANS_DATA)
126 needed = EXT3_MAX_TRANS_DATA;
128 return EXT3_DATA_TRANS_BLOCKS + needed;
132 * Truncate transactions can be complex and absolutely huge. So we need to
133 * be able to restart the transaction at a conventient checkpoint to make
134 * sure we don't overflow the journal.
136 * start_transaction gets us a new handle for a truncate transaction,
137 * and extend_transaction tries to extend the existing one a bit. If
138 * extend fails, we need to propagate the failure up and restart the
139 * transaction in the top-level truncate loop. --sct
142 static handle_t *start_transaction(struct inode *inode)
146 result = ext3_journal_start(inode, blocks_for_truncate(inode));
150 ext3_std_error(inode->i_sb, PTR_ERR(result));
155 * Try to extend this transaction for the purposes of truncation.
157 * Returns 0 if we managed to create more room. If we can't create more
158 * room, and the transaction must be restarted we return 1.
160 static int try_to_extend_transaction(handle_t *handle, struct inode *inode)
162 if (handle->h_buffer_credits > EXT3_RESERVE_TRANS_BLOCKS)
164 if (!ext3_journal_extend(handle, blocks_for_truncate(inode)))
170 * Restart the transaction associated with *handle. This does a commit,
171 * so before we call here everything must be consistently dirtied against
174 static int ext3_journal_test_restart(handle_t *handle, struct inode *inode)
176 jbd_debug(2, "restarting handle %p\n", handle);
177 return ext3_journal_restart(handle, blocks_for_truncate(inode));
181 * Called at each iput()
183 * The inode may be "bad" if ext3_read_inode() saw an error from
184 * ext3_get_inode(), so we need to check that to avoid freeing random disk
187 void ext3_put_inode(struct inode *inode)
189 if (!is_bad_inode(inode))
190 ext3_discard_prealloc(inode);
193 static void ext3_truncate_nocheck (struct inode *inode);
196 * Called at the last iput() if i_nlink is zero.
198 void ext3_delete_inode (struct inode * inode)
202 if (is_bad_inode(inode))
205 handle = start_transaction(inode);
206 if (IS_ERR(handle)) {
207 /* If we're going to skip the normal cleanup, we still
208 * need to make sure that the in-core orphan linked list
209 * is properly cleaned up. */
210 ext3_orphan_del(NULL, inode);
218 ext3_truncate_nocheck(inode);
220 * Kill off the orphan record which ext3_truncate created.
221 * AKPM: I think this can be inside the above `if'.
222 * Note that ext3_orphan_del() has to be able to cope with the
223 * deletion of a non-existent orphan - this is because we don't
224 * know if ext3_truncate() actually created an orphan record.
225 * (Well, we could do this if we need to, but heck - it works)
227 ext3_orphan_del(handle, inode);
228 EXT3_I(inode)->i_dtime = get_seconds();
231 * One subtle ordering requirement: if anything has gone wrong
232 * (transaction abort, IO errors, whatever), then we can still
233 * do these next steps (the fs will already have been marked as
234 * having errors), but we can't free the inode if the mark_dirty
237 if (ext3_mark_inode_dirty(handle, inode))
238 /* If that failed, just do the required in-core inode clear. */
241 ext3_free_inode(handle, inode);
242 ext3_journal_stop(handle);
245 clear_inode(inode); /* We must guarantee clearing of inode... */
248 void ext3_discard_prealloc (struct inode * inode)
250 #ifdef EXT3_PREALLOCATE
251 struct ext3_inode_info *ei = EXT3_I(inode);
252 /* Writer: ->i_prealloc* */
253 if (ei->i_prealloc_count) {
254 unsigned short total = ei->i_prealloc_count;
255 unsigned long block = ei->i_prealloc_block;
256 ei->i_prealloc_count = 0;
257 ei->i_prealloc_block = 0;
259 ext3_free_blocks (inode, block, total);
264 static int ext3_alloc_block (handle_t *handle,
265 struct inode * inode, unsigned long goal, int *err)
267 unsigned long result;
269 #ifdef EXT3_PREALLOCATE
271 static unsigned long alloc_hits, alloc_attempts;
273 struct ext3_inode_info *ei = EXT3_I(inode);
274 /* Writer: ->i_prealloc* */
275 if (ei->i_prealloc_count &&
276 (goal == ei->i_prealloc_block ||
277 goal + 1 == ei->i_prealloc_block))
279 result = ei->i_prealloc_block++;
280 ei->i_prealloc_count--;
282 ext3_debug ("preallocation hit (%lu/%lu).\n",
283 ++alloc_hits, ++alloc_attempts);
285 ext3_discard_prealloc (inode);
286 ext3_debug ("preallocation miss (%lu/%lu).\n",
287 alloc_hits, ++alloc_attempts);
288 if (S_ISREG(inode->i_mode))
289 result = ext3_new_block (inode, goal,
290 &ei->i_prealloc_count,
291 &ei->i_prealloc_block, err);
293 result = ext3_new_block (inode, goal, 0, 0, err);
295 * AKPM: this is somewhat sticky. I'm not surprised it was
296 * disabled in 2.2's ext3. Need to integrate b_committed_data
297 * guarding with preallocation, if indeed preallocation is
302 result = ext3_new_block (handle, inode, goal, 0, 0, err);
311 struct buffer_head *bh;
314 static inline void add_chain(Indirect *p, struct buffer_head *bh, u32 *v)
316 p->key = *(p->p = v);
320 static inline int verify_chain(Indirect *from, Indirect *to)
322 while (from <= to && from->key == *from->p)
328 * ext3_block_to_path - parse the block number into array of offsets
329 * @inode: inode in question (we are only interested in its superblock)
330 * @i_block: block number to be parsed
331 * @offsets: array to store the offsets in
332 * @boundary: set this non-zero if the referred-to block is likely to be
333 * followed (on disk) by an indirect block.
335 * To store the locations of file's data ext3 uses a data structure common
336 * for UNIX filesystems - tree of pointers anchored in the inode, with
337 * data blocks at leaves and indirect blocks in intermediate nodes.
338 * This function translates the block number into path in that tree -
339 * return value is the path length and @offsets[n] is the offset of
340 * pointer to (n+1)th node in the nth one. If @block is out of range
341 * (negative or too large) warning is printed and zero returned.
343 * Note: function doesn't find node addresses, so no IO is needed. All
344 * we need to know is the capacity of indirect blocks (taken from the
349 * Portability note: the last comparison (check that we fit into triple
350 * indirect block) is spelled differently, because otherwise on an
351 * architecture with 32-bit longs and 8Kb pages we might get into trouble
352 * if our filesystem had 8Kb blocks. We might use long long, but that would
353 * kill us on x86. Oh, well, at least the sign propagation does not matter -
354 * i_block would have to be negative in the very beginning, so we would not
358 static int ext3_block_to_path(struct inode *inode,
359 long i_block, int offsets[4], int *boundary)
361 int ptrs = EXT3_ADDR_PER_BLOCK(inode->i_sb);
362 int ptrs_bits = EXT3_ADDR_PER_BLOCK_BITS(inode->i_sb);
363 const long direct_blocks = EXT3_NDIR_BLOCKS,
364 indirect_blocks = ptrs,
365 double_blocks = (1 << (ptrs_bits * 2));
370 ext3_warning (inode->i_sb, "ext3_block_to_path", "block < 0");
371 } else if (i_block < direct_blocks) {
372 offsets[n++] = i_block;
373 final = direct_blocks;
374 } else if ( (i_block -= direct_blocks) < indirect_blocks) {
375 offsets[n++] = EXT3_IND_BLOCK;
376 offsets[n++] = i_block;
378 } else if ((i_block -= indirect_blocks) < double_blocks) {
379 offsets[n++] = EXT3_DIND_BLOCK;
380 offsets[n++] = i_block >> ptrs_bits;
381 offsets[n++] = i_block & (ptrs - 1);
383 } else if (((i_block -= double_blocks) >> (ptrs_bits * 2)) < ptrs) {
384 offsets[n++] = EXT3_TIND_BLOCK;
385 offsets[n++] = i_block >> (ptrs_bits * 2);
386 offsets[n++] = (i_block >> ptrs_bits) & (ptrs - 1);
387 offsets[n++] = i_block & (ptrs - 1);
390 ext3_warning (inode->i_sb, "ext3_block_to_path", "block > big");
393 *boundary = (i_block & (ptrs - 1)) == (final - 1);
398 * ext3_get_branch - read the chain of indirect blocks leading to data
399 * @inode: inode in question
400 * @depth: depth of the chain (1 - direct pointer, etc.)
401 * @offsets: offsets of pointers in inode/indirect blocks
402 * @chain: place to store the result
403 * @err: here we store the error value
405 * Function fills the array of triples <key, p, bh> and returns %NULL
406 * if everything went OK or the pointer to the last filled triple
407 * (incomplete one) otherwise. Upon the return chain[i].key contains
408 * the number of (i+1)-th block in the chain (as it is stored in memory,
409 * i.e. little-endian 32-bit), chain[i].p contains the address of that
410 * number (it points into struct inode for i==0 and into the bh->b_data
411 * for i>0) and chain[i].bh points to the buffer_head of i-th indirect
412 * block for i>0 and NULL for i==0. In other words, it holds the block
413 * numbers of the chain, addresses they were taken from (and where we can
414 * verify that chain did not change) and buffer_heads hosting these
417 * Function stops when it stumbles upon zero pointer (absent block)
418 * (pointer to last triple returned, *@err == 0)
419 * or when it gets an IO error reading an indirect block
420 * (ditto, *@err == -EIO)
421 * or when it notices that chain had been changed while it was reading
422 * (ditto, *@err == -EAGAIN)
423 * or when it reads all @depth-1 indirect blocks successfully and finds
424 * the whole chain, all way to the data (returns %NULL, *err == 0).
426 static Indirect *ext3_get_branch(struct inode *inode, int depth, int *offsets,
427 Indirect chain[4], int *err)
429 struct super_block *sb = inode->i_sb;
431 struct buffer_head *bh;
434 /* i_data is not going away, no lock needed */
435 add_chain (chain, NULL, EXT3_I(inode)->i_data + *offsets);
439 bh = sb_bread(sb, le32_to_cpu(p->key));
442 /* Reader: pointers */
443 if (!verify_chain(chain, p))
445 add_chain(++p, bh, (u32*)bh->b_data + *++offsets);
463 * ext3_find_near - find a place for allocation with sufficient locality
465 * @ind: descriptor of indirect block.
467 * This function returns the prefered place for block allocation.
468 * It is used when heuristic for sequential allocation fails.
470 * + if there is a block to the left of our position - allocate near it.
471 * + if pointer will live in indirect block - allocate near that block.
472 * + if pointer will live in inode - allocate in the same
475 * In the latter case we colour the starting block by the callers PID to
476 * prevent it from clashing with concurrent allocations for a different inode
477 * in the same block group. The PID is used here so that functionally related
478 * files will be close-by on-disk.
480 * Caller must make sure that @ind is valid and will stay that way.
483 static unsigned long ext3_find_near(struct inode *inode, Indirect *ind)
485 struct ext3_inode_info *ei = EXT3_I(inode);
486 u32 *start = ind->bh ? (u32*) ind->bh->b_data : ei->i_data;
488 unsigned long bg_start;
489 unsigned long colour;
491 /* Try to find previous block */
492 for (p = ind->p - 1; p >= start; p--)
494 return le32_to_cpu(*p);
496 /* No such thing, so let's try location of indirect block */
498 return ind->bh->b_blocknr;
501 * It is going to be refered from inode itself? OK, just put it into
502 * the same cylinder group then.
504 bg_start = (ei->i_block_group * EXT3_BLOCKS_PER_GROUP(inode->i_sb)) +
505 le32_to_cpu(EXT3_SB(inode->i_sb)->s_es->s_first_data_block);
506 colour = (current->pid % 16) *
507 (EXT3_BLOCKS_PER_GROUP(inode->i_sb) / 16);
508 return bg_start + colour;
512 * ext3_find_goal - find a prefered place for allocation.
514 * @block: block we want
515 * @chain: chain of indirect blocks
516 * @partial: pointer to the last triple within a chain
517 * @goal: place to store the result.
519 * Normally this function find the prefered place for block allocation,
520 * stores it in *@goal and returns zero. If the branch had been changed
521 * under us we return -EAGAIN.
524 static int ext3_find_goal(struct inode *inode, long block, Indirect chain[4],
525 Indirect *partial, unsigned long *goal)
527 struct ext3_inode_info *ei = EXT3_I(inode);
528 /* Writer: ->i_next_alloc* */
529 if (block == ei->i_next_alloc_block + 1) {
530 ei->i_next_alloc_block++;
531 ei->i_next_alloc_goal++;
534 /* Reader: pointers, ->i_next_alloc* */
535 if (verify_chain(chain, partial)) {
537 * try the heuristic for sequential allocation,
538 * failing that at least try to get decent locality.
540 if (block == ei->i_next_alloc_block)
541 *goal = ei->i_next_alloc_goal;
543 *goal = ext3_find_near(inode, partial);
551 * ext3_alloc_branch - allocate and set up a chain of blocks.
553 * @num: depth of the chain (number of blocks to allocate)
554 * @offsets: offsets (in the blocks) to store the pointers to next.
555 * @branch: place to store the chain in.
557 * This function allocates @num blocks, zeroes out all but the last one,
558 * links them into chain and (if we are synchronous) writes them to disk.
559 * In other words, it prepares a branch that can be spliced onto the
560 * inode. It stores the information about that chain in the branch[], in
561 * the same format as ext3_get_branch() would do. We are calling it after
562 * we had read the existing part of chain and partial points to the last
563 * triple of that (one with zero ->key). Upon the exit we have the same
564 * picture as after the successful ext3_get_block(), excpet that in one
565 * place chain is disconnected - *branch->p is still zero (we did not
566 * set the last link), but branch->key contains the number that should
567 * be placed into *branch->p to fill that gap.
569 * If allocation fails we free all blocks we've allocated (and forget
570 * their buffer_heads) and return the error value the from failed
571 * ext3_alloc_block() (normally -ENOSPC). Otherwise we set the chain
572 * as described above and return 0.
575 static int ext3_alloc_branch(handle_t *handle, struct inode *inode,
581 int blocksize = inode->i_sb->s_blocksize;
585 int parent = ext3_alloc_block(handle, inode, goal, &err);
587 branch[0].key = cpu_to_le32(parent);
589 for (n = 1; n < num; n++) {
590 struct buffer_head *bh;
591 /* Allocate the next block */
592 int nr = ext3_alloc_block(handle, inode, parent, &err);
595 branch[n].key = cpu_to_le32(nr);
599 * Get buffer_head for parent block, zero it out
600 * and set the pointer to new one, then send
603 bh = sb_getblk(inode->i_sb, parent);
606 BUFFER_TRACE(bh, "call get_create_access");
607 err = ext3_journal_get_create_access(handle, bh);
614 memset(bh->b_data, 0, blocksize);
615 branch[n].p = (u32*) bh->b_data + offsets[n];
616 *branch[n].p = branch[n].key;
617 BUFFER_TRACE(bh, "marking uptodate");
618 set_buffer_uptodate(bh);
621 BUFFER_TRACE(bh, "call ext3_journal_dirty_metadata");
622 err = ext3_journal_dirty_metadata(handle, bh);
632 /* Allocation failed, free what we already allocated */
633 for (i = 1; i < keys; i++) {
634 BUFFER_TRACE(branch[i].bh, "call journal_forget");
635 ext3_journal_forget(handle, branch[i].bh);
637 for (i = 0; i < keys; i++)
638 ext3_free_blocks(handle, inode, le32_to_cpu(branch[i].key), 1);
643 * ext3_splice_branch - splice the allocated branch onto inode.
645 * @block: (logical) number of block we are adding
646 * @chain: chain of indirect blocks (with a missing link - see
648 * @where: location of missing link
649 * @num: number of blocks we are adding
651 * This function verifies that chain (up to the missing link) had not
652 * changed, fills the missing link and does all housekeeping needed in
653 * inode (->i_blocks, etc.). In case of success we end up with the full
654 * chain to new block and return 0. Otherwise (== chain had been changed)
655 * we free the new blocks (forgetting their buffer_heads, indeed) and
659 static int ext3_splice_branch(handle_t *handle, struct inode *inode, long block,
660 Indirect chain[4], Indirect *where, int num)
664 struct ext3_inode_info *ei = EXT3_I(inode);
667 * If we're splicing into a [td]indirect block (as opposed to the
668 * inode) then we need to get write access to the [td]indirect block
672 BUFFER_TRACE(where->bh, "get_write_access");
673 err = ext3_journal_get_write_access(handle, where->bh);
677 /* Verify that place we are splicing to is still there and vacant */
679 /* Writer: pointers, ->i_next_alloc* */
680 if (!verify_chain(chain, where-1) || *where->p)
686 *where->p = where->key;
687 ei->i_next_alloc_block = block;
688 ei->i_next_alloc_goal = le32_to_cpu(where[num-1].key);
691 /* We are done with atomic stuff, now do the rest of housekeeping */
693 inode->i_ctime = CURRENT_TIME;
694 ext3_mark_inode_dirty(handle, inode);
696 /* had we spliced it onto indirect block? */
699 * akpm: If we spliced it onto an indirect block, we haven't
700 * altered the inode. Note however that if it is being spliced
701 * onto an indirect block at the very end of the file (the
702 * file is growing) then we *will* alter the inode to reflect
703 * the new i_size. But that is not done here - it is done in
704 * generic_commit_write->__mark_inode_dirty->ext3_dirty_inode.
706 jbd_debug(5, "splicing indirect only\n");
707 BUFFER_TRACE(where->bh, "call ext3_journal_dirty_metadata");
708 err = ext3_journal_dirty_metadata(handle, where->bh);
713 * OK, we spliced it into the inode itself on a direct block.
714 * Inode was dirtied above.
716 jbd_debug(5, "splicing direct\n");
722 * AKPM: if where[i].bh isn't part of the current updating
723 * transaction then we explode nastily. Test this code path.
725 jbd_debug(1, "the chain changed: try again\n");
729 for (i = 1; i < num; i++) {
730 BUFFER_TRACE(where[i].bh, "call journal_forget");
731 ext3_journal_forget(handle, where[i].bh);
733 /* For the normal collision cleanup case, we free up the blocks.
734 * On genuine filesystem errors we don't even think about doing
737 for (i = 0; i < num; i++)
738 ext3_free_blocks(handle, inode,
739 le32_to_cpu(where[i].key), 1);
744 * Allocation strategy is simple: if we have to allocate something, we will
745 * have to go the whole way to leaf. So let's do it before attaching anything
746 * to tree, set linkage between the newborn blocks, write them if sync is
747 * required, recheck the path, free and repeat if check fails, otherwise
748 * set the last missing link (that will protect us from any truncate-generated
749 * removals - all blocks on the path are immune now) and possibly force the
750 * write on the parent block.
751 * That has a nice additional property: no special recovery from the failed
752 * allocations is needed - we simply release blocks and do not touch anything
753 * reachable from inode.
755 * akpm: `handle' can be NULL if create == 0.
757 * The BKL may not be held on entry here. Be sure to take it early.
761 ext3_get_block_handle(handle_t *handle, struct inode *inode, sector_t iblock,
762 struct buffer_head *bh_result, int create, int extend_disksize)
771 int depth = ext3_block_to_path(inode, iblock, offsets, &boundary);
772 struct ext3_inode_info *ei = EXT3_I(inode);
774 J_ASSERT(handle != NULL || create == 0);
780 partial = ext3_get_branch(inode, depth, offsets, chain, &err);
782 /* Simplest case - block found, no allocation needed */
784 clear_buffer_new(bh_result);
786 map_bh(bh_result, inode->i_sb, le32_to_cpu(chain[depth-1].key));
788 set_buffer_boundary(bh_result);
789 /* Clean up and exit */
790 partial = chain+depth-1; /* the whole chain */
794 /* Next simple case - plain lookup or failed read of indirect block */
795 if (!create || err == -EIO) {
797 while (partial > chain) {
798 BUFFER_TRACE(partial->bh, "call brelse");
802 BUFFER_TRACE(bh_result, "returned");
808 * Indirect block might be removed by truncate while we were
809 * reading it. Handling of that case (forget what we've got and
810 * reread) is taken out of the main path.
816 down(&ei->truncate_sem);
817 if (ext3_find_goal(inode, iblock, chain, partial, &goal) < 0) {
818 up(&ei->truncate_sem);
822 left = (chain + depth) - partial;
825 * Block out ext3_truncate while we alter the tree
827 err = ext3_alloc_branch(handle, inode, left, goal,
828 offsets+(partial-chain), partial);
830 /* The ext3_splice_branch call will free and forget any buffers
831 * on the new chain if there is a failure, but that risks using
832 * up transaction credits, especially for bitmaps where the
833 * credits cannot be returned. Can we handle this somehow? We
834 * may need to return -EAGAIN upwards in the worst case. --sct */
836 err = ext3_splice_branch(handle, inode, iblock, chain,
838 /* i_disksize growing is protected by truncate_sem
839 * don't forget to protect it if you're about to implement
840 * concurrent ext3_get_block() -bzzz */
841 if (!err && extend_disksize && inode->i_size > ei->i_disksize)
842 ei->i_disksize = inode->i_size;
843 up(&ei->truncate_sem);
849 set_buffer_new(bh_result);
853 while (partial > chain) {
854 jbd_debug(1, "buffer chain changed, retrying\n");
855 BUFFER_TRACE(partial->bh, "brelsing");
862 static int ext3_get_block(struct inode *inode, sector_t iblock,
863 struct buffer_head *bh_result, int create)
865 handle_t *handle = 0;
869 handle = ext3_journal_current_handle();
870 J_ASSERT(handle != 0);
872 ret = ext3_get_block_handle(handle, inode, iblock,
873 bh_result, create, 1);
877 #define DIO_CREDITS (EXT3_RESERVE_TRANS_BLOCKS + 32)
880 ext3_direct_io_get_blocks(struct inode *inode, sector_t iblock,
881 unsigned long max_blocks, struct buffer_head *bh_result,
884 handle_t *handle = journal_current_handle();
887 if (handle && handle->h_buffer_credits <= EXT3_RESERVE_TRANS_BLOCKS) {
889 * Getting low on buffer credits...
891 if (!ext3_journal_extend(handle, DIO_CREDITS)) {
893 * Couldn't extend the transaction. Start a new one
895 ret = ext3_journal_restart(handle, DIO_CREDITS);
899 ret = ext3_get_block_handle(handle, inode, iblock,
900 bh_result, create, 0);
902 bh_result->b_size = (1 << inode->i_blkbits);
908 * `handle' can be NULL if create is zero
910 struct buffer_head *ext3_getblk(handle_t *handle, struct inode * inode,
911 long block, int create, int * errp)
913 struct buffer_head dummy;
916 J_ASSERT(handle != NULL || create == 0);
919 dummy.b_blocknr = -1000;
920 buffer_trace_init(&dummy.b_history);
921 *errp = ext3_get_block_handle(handle, inode, block, &dummy, create, 1);
922 if (!*errp && buffer_mapped(&dummy)) {
923 struct buffer_head *bh;
924 bh = sb_getblk(inode->i_sb, dummy.b_blocknr);
925 if (buffer_new(&dummy)) {
926 J_ASSERT(create != 0);
927 J_ASSERT(handle != 0);
929 /* Now that we do not always journal data, we
930 should keep in mind whether this should
931 always journal the new buffer as metadata.
932 For now, regular file writes use
933 ext3_get_block instead, so it's not a
936 BUFFER_TRACE(bh, "call get_create_access");
937 fatal = ext3_journal_get_create_access(handle, bh);
938 if (!fatal && !buffer_uptodate(bh)) {
939 memset(bh->b_data, 0, inode->i_sb->s_blocksize);
940 set_buffer_uptodate(bh);
943 BUFFER_TRACE(bh, "call ext3_journal_dirty_metadata");
944 err = ext3_journal_dirty_metadata(handle, bh);
948 BUFFER_TRACE(bh, "not a new buffer");
960 struct buffer_head *ext3_bread(handle_t *handle, struct inode * inode,
961 int block, int create, int *err)
963 struct buffer_head * bh;
966 prev_blocks = inode->i_blocks;
968 bh = ext3_getblk (handle, inode, block, create, err);
971 #ifdef EXT3_PREALLOCATE
973 * If the inode has grown, and this is a directory, then use a few
974 * more of the preallocated blocks to keep directory fragmentation
975 * down. The preallocated blocks are guaranteed to be contiguous.
978 S_ISDIR(inode->i_mode) &&
979 inode->i_blocks > prev_blocks &&
980 EXT3_HAS_COMPAT_FEATURE(inode->i_sb,
981 EXT3_FEATURE_COMPAT_DIR_PREALLOC)) {
983 struct buffer_head *tmp_bh;
986 EXT3_I(inode)->i_prealloc_count &&
987 i < EXT3_SB(inode->i_sb)->s_es->s_prealloc_dir_blocks;
990 * ext3_getblk will zero out the contents of the
993 tmp_bh = ext3_getblk(handle, inode,
994 block+i, create, err);
1003 if (buffer_uptodate(bh))
1005 ll_rw_block (READ, 1, &bh);
1006 wait_on_buffer (bh);
1007 if (buffer_uptodate(bh))
1014 static int walk_page_buffers( handle_t *handle,
1015 struct buffer_head *head,
1019 int (*fn)( handle_t *handle,
1020 struct buffer_head *bh))
1022 struct buffer_head *bh;
1023 unsigned block_start, block_end;
1024 unsigned blocksize = head->b_size;
1026 struct buffer_head *next;
1028 for ( bh = head, block_start = 0;
1029 ret == 0 && (bh != head || !block_start);
1030 block_start = block_end, bh = next)
1032 next = bh->b_this_page;
1033 block_end = block_start + blocksize;
1034 if (block_end <= from || block_start >= to) {
1035 if (partial && !buffer_uptodate(bh))
1039 err = (*fn)(handle, bh);
1047 * To preserve ordering, it is essential that the hole instantiation and
1048 * the data write be encapsulated in a single transaction. We cannot
1049 * close off a transaction and start a new one between the ext3_get_block()
1050 * and the commit_write(). So doing the journal_start at the start of
1051 * prepare_write() is the right place.
1053 * Also, this function can nest inside ext3_writepage() ->
1054 * block_write_full_page(). In that case, we *know* that ext3_writepage()
1055 * has generated enough buffer credits to do the whole page. So we won't
1056 * block on the journal in that case, which is good, because the caller may
1059 * By accident, ext3 can be reentered when a transaction is open via
1060 * quota file writes. If we were to commit the transaction while thus
1061 * reentered, there can be a deadlock - we would be holding a quota
1062 * lock, and the commit would never complete if another thread had a
1063 * transaction open and was blocking on the quota lock - a ranking
1066 * So what we do is to rely on the fact that journal_stop/journal_start
1067 * will _not_ run commit under these circumstances because handle->h_ref
1068 * is elevated. We'll still have enough credits for the tiny quotafile
1072 static int do_journal_get_write_access(handle_t *handle,
1073 struct buffer_head *bh)
1075 if (!buffer_mapped(bh) || buffer_freed(bh))
1077 return ext3_journal_get_write_access(handle, bh);
1080 static int ext3_prepare_write(struct file *file, struct page *page,
1081 unsigned from, unsigned to)
1083 struct inode *inode = page->mapping->host;
1084 int ret, needed_blocks = ext3_writepage_trans_blocks(inode);
1086 int tried_commit = 0;
1089 handle = ext3_journal_start(inode, needed_blocks);
1090 if (IS_ERR(handle)) {
1091 ret = PTR_ERR(handle);
1094 ret = block_prepare_write(page, from, to, ext3_get_block);
1096 if (ret != -ENOSPC || tried_commit)
1097 goto prepare_write_failed;
1099 * It could be that there _is_ free space, but it's all tied up
1100 * in uncommitted bitmaps. So force a commit here, which makes
1101 * those blocks allocatable and try again.
1105 ext3_journal_stop(handle);
1109 if (ext3_should_journal_data(inode)) {
1110 ret = walk_page_buffers(handle, page_buffers(page),
1111 from, to, NULL, do_journal_get_write_access);
1113 prepare_write_failed:
1115 ext3_journal_stop(handle);
1121 ext3_journal_dirty_data(handle_t *handle, struct buffer_head *bh)
1123 int err = journal_dirty_data(handle, bh);
1125 ext3_journal_abort_handle(__FUNCTION__, __FUNCTION__,
1130 /* For commit_write() in data=journal mode */
1131 static int commit_write_fn(handle_t *handle, struct buffer_head *bh)
1133 if (!buffer_mapped(bh) || buffer_freed(bh))
1135 set_buffer_uptodate(bh);
1136 return ext3_journal_dirty_metadata(handle, bh);
1140 * We need to pick up the new inode size which generic_commit_write gave us
1141 * `file' can be NULL - eg, when called from page_symlink().
1143 * ext3 never places buffers on inode->i_mapping->private_list. metadata
1144 * buffers are managed internally.
1147 static int ext3_ordered_commit_write(struct file *file, struct page *page,
1148 unsigned from, unsigned to)
1150 handle_t *handle = ext3_journal_current_handle();
1151 struct inode *inode = page->mapping->host;
1154 ret = walk_page_buffers(handle, page_buffers(page),
1155 from, to, NULL, ext3_journal_dirty_data);
1159 * generic_commit_write() will run mark_inode_dirty() if i_size
1160 * changes. So let's piggyback the i_disksize mark_inode_dirty
1165 new_i_size = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to;
1166 if (new_i_size > EXT3_I(inode)->i_disksize)
1167 EXT3_I(inode)->i_disksize = new_i_size;
1168 ret = generic_commit_write(file, page, from, to);
1170 ret2 = ext3_journal_stop(handle);
1176 static int ext3_writeback_commit_write(struct file *file, struct page *page,
1177 unsigned from, unsigned to)
1179 handle_t *handle = ext3_journal_current_handle();
1180 struct inode *inode = page->mapping->host;
1184 new_i_size = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to;
1185 if (new_i_size > EXT3_I(inode)->i_disksize)
1186 EXT3_I(inode)->i_disksize = new_i_size;
1187 ret = generic_commit_write(file, page, from, to);
1188 ret2 = ext3_journal_stop(handle);
1194 static int ext3_journalled_commit_write(struct file *file,
1195 struct page *page, unsigned from, unsigned to)
1197 handle_t *handle = ext3_journal_current_handle();
1198 struct inode *inode = page->mapping->host;
1204 * Here we duplicate the generic_commit_write() functionality
1206 pos = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to;
1208 ret = walk_page_buffers(handle, page_buffers(page), from,
1209 to, &partial, commit_write_fn);
1211 SetPageUptodate(page);
1212 if (pos > inode->i_size)
1213 i_size_write(inode, pos);
1214 EXT3_I(inode)->i_state |= EXT3_STATE_JDATA;
1215 if (inode->i_size > EXT3_I(inode)->i_disksize) {
1216 EXT3_I(inode)->i_disksize = inode->i_size;
1217 ret2 = ext3_mark_inode_dirty(handle, inode);
1221 ret2 = ext3_journal_stop(handle);
1228 * bmap() is special. It gets used by applications such as lilo and by
1229 * the swapper to find the on-disk block of a specific piece of data.
1231 * Naturally, this is dangerous if the block concerned is still in the
1232 * journal. If somebody makes a swapfile on an ext3 data-journaling
1233 * filesystem and enables swap, then they may get a nasty shock when the
1234 * data getting swapped to that swapfile suddenly gets overwritten by
1235 * the original zero's written out previously to the journal and
1236 * awaiting writeback in the kernel's buffer cache.
1238 * So, if we see any bmap calls here on a modified, data-journaled file,
1239 * take extra steps to flush any blocks which might be in the cache.
1241 static sector_t ext3_bmap(struct address_space *mapping, sector_t block)
1243 struct inode *inode = mapping->host;
1247 if (EXT3_I(inode)->i_state & EXT3_STATE_JDATA) {
1249 * This is a REALLY heavyweight approach, but the use of
1250 * bmap on dirty files is expected to be extremely rare:
1251 * only if we run lilo or swapon on a freshly made file
1252 * do we expect this to happen.
1254 * (bmap requires CAP_SYS_RAWIO so this does not
1255 * represent an unprivileged user DOS attack --- we'd be
1256 * in trouble if mortal users could trigger this path at
1259 * NB. EXT3_STATE_JDATA is not set on files other than
1260 * regular files. If somebody wants to bmap a directory
1261 * or symlink and gets confused because the buffer
1262 * hasn't yet been flushed to disk, they deserve
1263 * everything they get.
1266 EXT3_I(inode)->i_state &= ~EXT3_STATE_JDATA;
1267 journal = EXT3_JOURNAL(inode);
1268 journal_lock_updates(journal);
1269 err = journal_flush(journal);
1270 journal_unlock_updates(journal);
1276 return generic_block_bmap(mapping,block,ext3_get_block);
1279 static int bget_one(handle_t *handle, struct buffer_head *bh)
1285 static int bput_one(handle_t *handle, struct buffer_head *bh)
1291 static int journal_dirty_data_fn(handle_t *handle, struct buffer_head *bh)
1293 if (buffer_mapped(bh))
1294 return ext3_journal_dirty_data(handle, bh);
1299 * Note that we always start a transaction even if we're not journalling
1300 * data. This is to preserve ordering: any hole instantiation within
1301 * __block_write_full_page -> ext3_get_block() should be journalled
1302 * along with the data so we don't crash and then get metadata which
1303 * refers to old data.
1305 * In all journalling modes block_write_full_page() will start the I/O.
1309 * ext3_writepage() -> kmalloc() -> __alloc_pages() -> page_launder() ->
1314 * ext3_file_write() -> generic_file_write() -> __alloc_pages() -> ...
1316 * Same applies to ext3_get_block(). We will deadlock on various things like
1317 * lock_journal and i_truncate_sem.
1319 * Setting PF_MEMALLOC here doesn't work - too many internal memory
1322 * 16May01: If we're reentered then journal_current_handle() will be
1323 * non-zero. We simply *return*.
1325 * 1 July 2001: @@@ FIXME:
1326 * In journalled data mode, a data buffer may be metadata against the
1327 * current transaction. But the same file is part of a shared mapping
1328 * and someone does a writepage() on it.
1330 * We will move the buffer onto the async_data list, but *after* it has
1331 * been dirtied. So there's a small window where we have dirty data on
1334 * Note that this only applies to the last partial page in the file. The
1335 * bit which block_write_full_page() uses prepare/commit for. (That's
1336 * broken code anyway: it's wrong for msync()).
1338 * It's a rare case: affects the final partial page, for journalled data
1339 * where the file is subject to bith write() and writepage() in the same
1340 * transction. To fix it we'll need a custom block_write_full_page().
1341 * We'll probably need that anyway for journalling writepage() output.
1343 * We don't honour synchronous mounts for writepage(). That would be
1344 * disastrous. Any write() or metadata operation will sync the fs for
1347 * AKPM2: if all the page's buffers are mapped to disk and !data=journal,
1348 * we don't need to open a transaction here.
1350 static int ext3_ordered_writepage(struct page *page,
1351 struct writeback_control *wbc)
1353 struct inode *inode = page->mapping->host;
1354 struct buffer_head *page_bufs;
1355 handle_t *handle = NULL;
1359 J_ASSERT(PageLocked(page));
1362 * We give up here if we're reentered, because it might be for a
1363 * different filesystem.
1365 if (ext3_journal_current_handle())
1368 handle = ext3_journal_start(inode, ext3_writepage_trans_blocks(inode));
1370 if (IS_ERR(handle)) {
1371 ret = PTR_ERR(handle);
1375 if (!page_has_buffers(page)) {
1376 create_empty_buffers(page, inode->i_sb->s_blocksize,
1377 (1 << BH_Dirty)|(1 << BH_Uptodate));
1379 page_bufs = page_buffers(page);
1380 walk_page_buffers(handle, page_bufs, 0,
1381 PAGE_CACHE_SIZE, NULL, bget_one);
1383 ret = block_write_full_page(page, ext3_get_block, wbc);
1386 * The page can become unlocked at any point now, and
1387 * truncate can then come in and change things. So we
1388 * can't touch *page from now on. But *page_bufs is
1389 * safe due to elevated refcount.
1393 * And attach them to the current transaction. But only if
1394 * block_write_full_page() succeeded. Otherwise they are unmapped,
1395 * and generally junk.
1398 err = walk_page_buffers(handle, page_bufs, 0, PAGE_CACHE_SIZE,
1399 NULL, journal_dirty_data_fn);
1403 walk_page_buffers(handle, page_bufs, 0,
1404 PAGE_CACHE_SIZE, NULL, bput_one);
1405 err = ext3_journal_stop(handle);
1411 redirty_page_for_writepage(wbc, page);
1416 static int ext3_writeback_writepage(struct page *page,
1417 struct writeback_control *wbc)
1419 struct inode *inode = page->mapping->host;
1420 handle_t *handle = NULL;
1424 if (ext3_journal_current_handle())
1427 handle = ext3_journal_start(inode, ext3_writepage_trans_blocks(inode));
1428 if (IS_ERR(handle)) {
1429 ret = PTR_ERR(handle);
1433 ret = block_write_full_page(page, ext3_get_block, wbc);
1434 err = ext3_journal_stop(handle);
1440 redirty_page_for_writepage(wbc, page);
1445 static int ext3_journalled_writepage(struct page *page,
1446 struct writeback_control *wbc)
1448 struct inode *inode = page->mapping->host;
1449 handle_t *handle = NULL;
1453 if (ext3_journal_current_handle())
1456 handle = ext3_journal_start(inode, ext3_writepage_trans_blocks(inode));
1457 if (IS_ERR(handle)) {
1458 ret = PTR_ERR(handle);
1462 if (!page_has_buffers(page) || PageChecked(page)) {
1464 * It's mmapped pagecache. Add buffers and journal it. There
1465 * doesn't seem much point in redirtying the page here.
1467 ClearPageChecked(page);
1468 ret = block_prepare_write(page, 0, PAGE_CACHE_SIZE,
1472 ret = walk_page_buffers(handle, page_buffers(page), 0,
1473 PAGE_CACHE_SIZE, NULL, do_journal_get_write_access);
1475 err = walk_page_buffers(handle, page_buffers(page), 0,
1476 PAGE_CACHE_SIZE, NULL, commit_write_fn);
1479 EXT3_I(inode)->i_state |= EXT3_STATE_JDATA;
1483 * It may be a page full of checkpoint-mode buffers. We don't
1484 * really know unless we go poke around in the buffer_heads.
1485 * But block_write_full_page will do the right thing.
1487 ret = block_write_full_page(page, ext3_get_block, wbc);
1489 err = ext3_journal_stop(handle);
1496 redirty_page_for_writepage(wbc, page);
1502 static int ext3_readpage(struct file *file, struct page *page)
1504 return mpage_readpage(page, ext3_get_block);
1508 ext3_readpages(struct file *file, struct address_space *mapping,
1509 struct list_head *pages, unsigned nr_pages)
1511 return mpage_readpages(mapping, pages, nr_pages, ext3_get_block);
1514 static int ext3_invalidatepage(struct page *page, unsigned long offset)
1516 journal_t *journal = EXT3_JOURNAL(page->mapping->host);
1519 * If it's a full truncate we just forget about the pending dirtying
1522 ClearPageChecked(page);
1524 return journal_invalidatepage(journal, page, offset);
1527 static int ext3_releasepage(struct page *page, int wait)
1529 journal_t *journal = EXT3_JOURNAL(page->mapping->host);
1531 WARN_ON(PageChecked(page));
1532 return journal_try_to_free_buffers(journal, page, wait);
1536 * If the O_DIRECT write will extend the file then add this inode to the
1537 * orphan list. So recovery will truncate it back to the original size
1538 * if the machine crashes during the write.
1540 * If the O_DIRECT write is intantiating holes inside i_size and the machine
1541 * crashes then stale disk data _may_ be exposed inside the file.
1543 static ssize_t ext3_direct_IO(int rw, struct kiocb *iocb,
1544 const struct iovec *iov, loff_t offset,
1545 unsigned long nr_segs)
1547 struct file *file = iocb->ki_filp;
1548 struct inode *inode = file->f_mapping->host;
1549 struct ext3_inode_info *ei = EXT3_I(inode);
1550 handle_t *handle = NULL;
1553 size_t count = iov_length(iov, nr_segs);
1556 loff_t final_size = offset + count;
1558 handle = ext3_journal_start(inode, DIO_CREDITS);
1559 if (IS_ERR(handle)) {
1560 ret = PTR_ERR(handle);
1563 if (final_size > inode->i_size) {
1564 ret = ext3_orphan_add(handle, inode);
1568 ei->i_disksize = inode->i_size;
1572 ret = blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev, iov,
1574 ext3_direct_io_get_blocks, NULL);
1581 ext3_orphan_del(handle, inode);
1582 if (orphan && ret > 0) {
1583 loff_t end = offset + ret;
1584 if (end > inode->i_size) {
1585 ei->i_disksize = end;
1586 i_size_write(inode, end);
1587 err = ext3_mark_inode_dirty(handle, inode);
1592 err = ext3_journal_stop(handle);
1601 * Pages can be marked dirty completely asynchronously from ext3's journalling
1602 * activity. By filemap_sync_pte(), try_to_unmap_one(), etc. We cannot do
1603 * much here because ->set_page_dirty is called under VFS locks. The page is
1604 * not necessarily locked.
1606 * We cannot just dirty the page and leave attached buffers clean, because the
1607 * buffers' dirty state is "definitive". We cannot just set the buffers dirty
1608 * or jbddirty because all the journalling code will explode.
1610 * So what we do is to mark the page "pending dirty" and next time writepage
1611 * is called, propagate that into the buffers appropriately.
1613 static int ext3_journalled_set_page_dirty(struct page *page)
1615 SetPageChecked(page);
1616 return __set_page_dirty_nobuffers(page);
1619 static struct address_space_operations ext3_ordered_aops = {
1620 .readpage = ext3_readpage,
1621 .readpages = ext3_readpages,
1622 .writepage = ext3_ordered_writepage,
1623 .sync_page = block_sync_page,
1624 .prepare_write = ext3_prepare_write,
1625 .commit_write = ext3_ordered_commit_write,
1627 .invalidatepage = ext3_invalidatepage,
1628 .releasepage = ext3_releasepage,
1629 .direct_IO = ext3_direct_IO,
1632 static struct address_space_operations ext3_writeback_aops = {
1633 .readpage = ext3_readpage,
1634 .readpages = ext3_readpages,
1635 .writepage = ext3_writeback_writepage,
1636 .sync_page = block_sync_page,
1637 .prepare_write = ext3_prepare_write,
1638 .commit_write = ext3_writeback_commit_write,
1640 .invalidatepage = ext3_invalidatepage,
1641 .releasepage = ext3_releasepage,
1642 .direct_IO = ext3_direct_IO,
1645 static struct address_space_operations ext3_journalled_aops = {
1646 .readpage = ext3_readpage,
1647 .readpages = ext3_readpages,
1648 .writepage = ext3_journalled_writepage,
1649 .sync_page = block_sync_page,
1650 .prepare_write = ext3_prepare_write,
1651 .commit_write = ext3_journalled_commit_write,
1652 .set_page_dirty = ext3_journalled_set_page_dirty,
1654 .invalidatepage = ext3_invalidatepage,
1655 .releasepage = ext3_releasepage,
1658 void ext3_set_aops(struct inode *inode)
1660 if (ext3_should_order_data(inode))
1661 inode->i_mapping->a_ops = &ext3_ordered_aops;
1662 else if (ext3_should_writeback_data(inode))
1663 inode->i_mapping->a_ops = &ext3_writeback_aops;
1665 inode->i_mapping->a_ops = &ext3_journalled_aops;
1669 * ext3_block_truncate_page() zeroes out a mapping from file offset `from'
1670 * up to the end of the block which corresponds to `from'.
1671 * This required during truncate. We need to physically zero the tail end
1672 * of that block so it doesn't yield old data if the file is later grown.
1674 static int ext3_block_truncate_page(handle_t *handle, struct page *page,
1675 struct address_space *mapping, loff_t from)
1677 unsigned long index = from >> PAGE_CACHE_SHIFT;
1678 unsigned offset = from & (PAGE_CACHE_SIZE-1);
1679 unsigned blocksize, iblock, length, pos;
1680 struct inode *inode = mapping->host;
1681 struct buffer_head *bh;
1685 blocksize = inode->i_sb->s_blocksize;
1686 length = blocksize - (offset & (blocksize - 1));
1687 iblock = index << (PAGE_CACHE_SHIFT - inode->i_sb->s_blocksize_bits);
1689 if (!page_has_buffers(page))
1690 create_empty_buffers(page, blocksize, 0);
1692 /* Find the buffer that contains "offset" */
1693 bh = page_buffers(page);
1695 while (offset >= pos) {
1696 bh = bh->b_this_page;
1702 if (buffer_freed(bh)) {
1703 BUFFER_TRACE(bh, "freed: skip");
1707 if (!buffer_mapped(bh)) {
1708 BUFFER_TRACE(bh, "unmapped");
1709 ext3_get_block(inode, iblock, bh, 0);
1710 /* unmapped? It's a hole - nothing to do */
1711 if (!buffer_mapped(bh)) {
1712 BUFFER_TRACE(bh, "still unmapped");
1717 /* Ok, it's mapped. Make sure it's up-to-date */
1718 if (PageUptodate(page))
1719 set_buffer_uptodate(bh);
1721 if (!buffer_uptodate(bh)) {
1723 ll_rw_block(READ, 1, &bh);
1725 /* Uhhuh. Read error. Complain and punt. */
1726 if (!buffer_uptodate(bh))
1730 if (ext3_should_journal_data(inode)) {
1731 BUFFER_TRACE(bh, "get write access");
1732 err = ext3_journal_get_write_access(handle, bh);
1737 kaddr = kmap_atomic(page, KM_USER0);
1738 memset(kaddr + offset, 0, length);
1739 flush_dcache_page(page);
1740 kunmap_atomic(kaddr, KM_USER0);
1742 BUFFER_TRACE(bh, "zeroed end of block");
1745 if (ext3_should_journal_data(inode)) {
1746 err = ext3_journal_dirty_metadata(handle, bh);
1748 if (ext3_should_order_data(inode))
1749 err = ext3_journal_dirty_data(handle, bh);
1750 mark_buffer_dirty(bh);
1755 page_cache_release(page);
1760 * Probably it should be a library function... search for first non-zero word
1761 * or memcmp with zero_page, whatever is better for particular architecture.
1764 static inline int all_zeroes(u32 *p, u32 *q)
1773 * ext3_find_shared - find the indirect blocks for partial truncation.
1774 * @inode: inode in question
1775 * @depth: depth of the affected branch
1776 * @offsets: offsets of pointers in that branch (see ext3_block_to_path)
1777 * @chain: place to store the pointers to partial indirect blocks
1778 * @top: place to the (detached) top of branch
1780 * This is a helper function used by ext3_truncate().
1782 * When we do truncate() we may have to clean the ends of several
1783 * indirect blocks but leave the blocks themselves alive. Block is
1784 * partially truncated if some data below the new i_size is refered
1785 * from it (and it is on the path to the first completely truncated
1786 * data block, indeed). We have to free the top of that path along
1787 * with everything to the right of the path. Since no allocation
1788 * past the truncation point is possible until ext3_truncate()
1789 * finishes, we may safely do the latter, but top of branch may
1790 * require special attention - pageout below the truncation point
1791 * might try to populate it.
1793 * We atomically detach the top of branch from the tree, store the
1794 * block number of its root in *@top, pointers to buffer_heads of
1795 * partially truncated blocks - in @chain[].bh and pointers to
1796 * their last elements that should not be removed - in
1797 * @chain[].p. Return value is the pointer to last filled element
1800 * The work left to caller to do the actual freeing of subtrees:
1801 * a) free the subtree starting from *@top
1802 * b) free the subtrees whose roots are stored in
1803 * (@chain[i].p+1 .. end of @chain[i].bh->b_data)
1804 * c) free the subtrees growing from the inode past the @chain[0].
1805 * (no partially truncated stuff there). */
1807 static Indirect *ext3_find_shared(struct inode *inode,
1813 Indirect *partial, *p;
1817 /* Make k index the deepest non-null offest + 1 */
1818 for (k = depth; k > 1 && !offsets[k-1]; k--)
1820 partial = ext3_get_branch(inode, k, offsets, chain, &err);
1821 /* Writer: pointers */
1823 partial = chain + k-1;
1825 * If the branch acquired continuation since we've looked at it -
1826 * fine, it should all survive and (new) top doesn't belong to us.
1828 if (!partial->key && *partial->p)
1831 for (p=partial; p>chain && all_zeroes((u32*)p->bh->b_data,p->p); p--)
1834 * OK, we've found the last block that must survive. The rest of our
1835 * branch should be detached before unlocking. However, if that rest
1836 * of branch is all ours and does not grow immediately from the inode
1837 * it's easier to cheat and just decrement partial->p.
1839 if (p == chain + k - 1 && p > chain) {
1843 /* Nope, don't do this in ext3. Must leave the tree intact */
1852 brelse(partial->bh);
1860 * Zero a number of block pointers in either an inode or an indirect block.
1861 * If we restart the transaction we must again get write access to the
1862 * indirect block for further modification.
1864 * We release `count' blocks on disk, but (last - first) may be greater
1865 * than `count' because there can be holes in there.
1868 ext3_clear_blocks(handle_t *handle, struct inode *inode, struct buffer_head *bh,
1869 unsigned long block_to_free, unsigned long count,
1870 u32 *first, u32 *last)
1873 if (try_to_extend_transaction(handle, inode)) {
1875 BUFFER_TRACE(bh, "call ext3_journal_dirty_metadata");
1876 ext3_journal_dirty_metadata(handle, bh);
1878 ext3_mark_inode_dirty(handle, inode);
1879 ext3_journal_test_restart(handle, inode);
1881 BUFFER_TRACE(bh, "retaking write access");
1882 ext3_journal_get_write_access(handle, bh);
1887 * Any buffers which are on the journal will be in memory. We find
1888 * them on the hash table so journal_revoke() will run journal_forget()
1889 * on them. We've already detached each block from the file, so
1890 * bforget() in journal_forget() should be safe.
1892 * AKPM: turn on bforget in journal_forget()!!!
1894 for (p = first; p < last; p++) {
1895 u32 nr = le32_to_cpu(*p);
1897 struct buffer_head *bh;
1900 bh = sb_find_get_block(inode->i_sb, nr);
1901 ext3_forget(handle, 0, inode, bh, nr);
1905 ext3_free_blocks(handle, inode, block_to_free, count);
1909 * ext3_free_data - free a list of data blocks
1910 * @handle: handle for this transaction
1911 * @inode: inode we are dealing with
1912 * @this_bh: indirect buffer_head which contains *@first and *@last
1913 * @first: array of block numbers
1914 * @last: points immediately past the end of array
1916 * We are freeing all blocks refered from that array (numbers are stored as
1917 * little-endian 32-bit) and updating @inode->i_blocks appropriately.
1919 * We accumulate contiguous runs of blocks to free. Conveniently, if these
1920 * blocks are contiguous then releasing them at one time will only affect one
1921 * or two bitmap blocks (+ group descriptor(s) and superblock) and we won't
1922 * actually use a lot of journal space.
1924 * @this_bh will be %NULL if @first and @last point into the inode's direct
1927 static void ext3_free_data(handle_t *handle, struct inode *inode,
1928 struct buffer_head *this_bh, u32 *first, u32 *last)
1930 unsigned long block_to_free = 0; /* Starting block # of a run */
1931 unsigned long count = 0; /* Number of blocks in the run */
1932 u32 *block_to_free_p = NULL; /* Pointer into inode/ind
1935 unsigned long nr; /* Current block # */
1936 u32 *p; /* Pointer into inode/ind
1937 for current block */
1940 if (this_bh) { /* For indirect block */
1941 BUFFER_TRACE(this_bh, "get_write_access");
1942 err = ext3_journal_get_write_access(handle, this_bh);
1943 /* Important: if we can't update the indirect pointers
1944 * to the blocks, we can't free them. */
1949 for (p = first; p < last; p++) {
1950 nr = le32_to_cpu(*p);
1952 /* accumulate blocks to free if they're contiguous */
1955 block_to_free_p = p;
1957 } else if (nr == block_to_free + count) {
1960 ext3_clear_blocks(handle, inode, this_bh,
1962 count, block_to_free_p, p);
1964 block_to_free_p = p;
1971 ext3_clear_blocks(handle, inode, this_bh, block_to_free,
1972 count, block_to_free_p, p);
1975 BUFFER_TRACE(this_bh, "call ext3_journal_dirty_metadata");
1976 ext3_journal_dirty_metadata(handle, this_bh);
1981 * ext3_free_branches - free an array of branches
1982 * @handle: JBD handle for this transaction
1983 * @inode: inode we are dealing with
1984 * @parent_bh: the buffer_head which contains *@first and *@last
1985 * @first: array of block numbers
1986 * @last: pointer immediately past the end of array
1987 * @depth: depth of the branches to free
1989 * We are freeing all blocks refered from these branches (numbers are
1990 * stored as little-endian 32-bit) and updating @inode->i_blocks
1993 static void ext3_free_branches(handle_t *handle, struct inode *inode,
1994 struct buffer_head *parent_bh,
1995 u32 *first, u32 *last, int depth)
2000 if (is_handle_aborted(handle))
2004 struct buffer_head *bh;
2005 int addr_per_block = EXT3_ADDR_PER_BLOCK(inode->i_sb);
2007 while (--p >= first) {
2008 nr = le32_to_cpu(*p);
2010 continue; /* A hole */
2012 /* Go read the buffer for the next level down */
2013 bh = sb_bread(inode->i_sb, nr);
2016 * A read failure? Report error and clear slot
2020 ext3_error(inode->i_sb, "ext3_free_branches",
2021 "Read failure, inode=%ld, block=%ld",
2026 /* This zaps the entire block. Bottom up. */
2027 BUFFER_TRACE(bh, "free child branches");
2028 ext3_free_branches(handle, inode, bh, (u32*)bh->b_data,
2029 (u32*)bh->b_data + addr_per_block,
2033 * We've probably journalled the indirect block several
2034 * times during the truncate. But it's no longer
2035 * needed and we now drop it from the transaction via
2038 * That's easy if it's exclusively part of this
2039 * transaction. But if it's part of the committing
2040 * transaction then journal_forget() will simply
2041 * brelse() it. That means that if the underlying
2042 * block is reallocated in ext3_get_block(),
2043 * unmap_underlying_metadata() will find this block
2044 * and will try to get rid of it. damn, damn.
2046 * If this block has already been committed to the
2047 * journal, a revoke record will be written. And
2048 * revoke records must be emitted *before* clearing
2049 * this block's bit in the bitmaps.
2051 ext3_forget(handle, 1, inode, bh, bh->b_blocknr);
2054 * Everything below this this pointer has been
2055 * released. Now let this top-of-subtree go.
2057 * We want the freeing of this indirect block to be
2058 * atomic in the journal with the updating of the
2059 * bitmap block which owns it. So make some room in
2062 * We zero the parent pointer *after* freeing its
2063 * pointee in the bitmaps, so if extend_transaction()
2064 * for some reason fails to put the bitmap changes and
2065 * the release into the same transaction, recovery
2066 * will merely complain about releasing a free block,
2067 * rather than leaking blocks.
2069 if (is_handle_aborted(handle))
2071 if (try_to_extend_transaction(handle, inode)) {
2072 ext3_mark_inode_dirty(handle, inode);
2073 ext3_journal_test_restart(handle, inode);
2076 ext3_free_blocks(handle, inode, nr, 1);
2080 * The block which we have just freed is
2081 * pointed to by an indirect block: journal it
2083 BUFFER_TRACE(parent_bh, "get_write_access");
2084 if (!ext3_journal_get_write_access(handle,
2087 BUFFER_TRACE(parent_bh,
2088 "call ext3_journal_dirty_metadata");
2089 ext3_journal_dirty_metadata(handle,
2095 /* We have reached the bottom of the tree. */
2096 BUFFER_TRACE(parent_bh, "free data blocks");
2097 ext3_free_data(handle, inode, parent_bh, first, last);
2104 * We block out ext3_get_block() block instantiations across the entire
2105 * transaction, and VFS/VM ensures that ext3_truncate() cannot run
2106 * simultaneously on behalf of the same inode.
2108 * As we work through the truncate and commmit bits of it to the journal there
2109 * is one core, guiding principle: the file's tree must always be consistent on
2110 * disk. We must be able to restart the truncate after a crash.
2112 * The file's tree may be transiently inconsistent in memory (although it
2113 * probably isn't), but whenever we close off and commit a journal transaction,
2114 * the contents of (the filesystem + the journal) must be consistent and
2115 * restartable. It's pretty simple, really: bottom up, right to left (although
2116 * left-to-right works OK too).
2118 * Note that at recovery time, journal replay occurs *before* the restart of
2119 * truncate against the orphan inode list.
2121 * The committed inode has the new, desired i_size (which is the same as
2122 * i_disksize in this case). After a crash, ext3_orphan_cleanup() will see
2123 * that this inode's truncate did not complete and it will again call
2124 * ext3_truncate() to have another go. So there will be instantiated blocks
2125 * to the right of the truncation point in a crashed ext3 filesystem. But
2126 * that's fine - as long as they are linked from the inode, the post-crash
2127 * ext3_truncate() run will find them and release them.
2130 void ext3_truncate_nocheck(struct inode * inode)
2133 struct ext3_inode_info *ei = EXT3_I(inode);
2134 u32 *i_data = ei->i_data;
2135 int addr_per_block = EXT3_ADDR_PER_BLOCK(inode->i_sb);
2136 struct address_space *mapping = inode->i_mapping;
2143 unsigned blocksize = inode->i_sb->s_blocksize;
2146 if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
2147 S_ISLNK(inode->i_mode)))
2149 if (ext3_inode_is_fast_symlink(inode))
2152 ext3_discard_prealloc(inode);
2155 * We have to lock the EOF page here, because lock_page() nests
2156 * outside journal_start().
2158 if ((inode->i_size & (blocksize - 1)) == 0) {
2159 /* Block boundary? Nothing to do */
2162 page = grab_cache_page(mapping,
2163 inode->i_size >> PAGE_CACHE_SHIFT);
2168 handle = start_transaction(inode);
2169 if (IS_ERR(handle)) {
2171 clear_highpage(page);
2172 flush_dcache_page(page);
2174 page_cache_release(page);
2176 return; /* AKPM: return what? */
2179 last_block = (inode->i_size + blocksize-1)
2180 >> EXT3_BLOCK_SIZE_BITS(inode->i_sb);
2183 ext3_block_truncate_page(handle, page, mapping, inode->i_size);
2185 n = ext3_block_to_path(inode, last_block, offsets, NULL);
2187 goto out_stop; /* error */
2190 * OK. This truncate is going to happen. We add the inode to the
2191 * orphan list, so that if this truncate spans multiple transactions,
2192 * and we crash, we will resume the truncate when the filesystem
2193 * recovers. It also marks the inode dirty, to catch the new size.
2195 * Implication: the file must always be in a sane, consistent
2196 * truncatable state while each transaction commits.
2198 if (ext3_orphan_add(handle, inode))
2202 * The orphan list entry will now protect us from any crash which
2203 * occurs before the truncate completes, so it is now safe to propagate
2204 * the new, shorter inode size (held for now in i_size) into the
2205 * on-disk inode. We do this via i_disksize, which is the value which
2206 * ext3 *really* writes onto the disk inode.
2208 ei->i_disksize = inode->i_size;
2211 * From here we block out all ext3_get_block() callers who want to
2212 * modify the block allocation tree.
2214 down(&ei->truncate_sem);
2216 if (n == 1) { /* direct blocks */
2217 ext3_free_data(handle, inode, NULL, i_data+offsets[0],
2218 i_data + EXT3_NDIR_BLOCKS);
2222 partial = ext3_find_shared(inode, n, offsets, chain, &nr);
2223 /* Kill the top of shared branch (not detached) */
2225 if (partial == chain) {
2226 /* Shared branch grows from the inode */
2227 ext3_free_branches(handle, inode, NULL,
2228 &nr, &nr+1, (chain+n-1) - partial);
2231 * We mark the inode dirty prior to restart,
2232 * and prior to stop. No need for it here.
2235 /* Shared branch grows from an indirect block */
2236 BUFFER_TRACE(partial->bh, "get_write_access");
2237 ext3_free_branches(handle, inode, partial->bh,
2239 partial->p+1, (chain+n-1) - partial);
2242 /* Clear the ends of indirect blocks on the shared branch */
2243 while (partial > chain) {
2244 ext3_free_branches(handle, inode, partial->bh, partial->p + 1,
2245 (u32*)partial->bh->b_data + addr_per_block,
2246 (chain+n-1) - partial);
2247 BUFFER_TRACE(partial->bh, "call brelse");
2248 brelse (partial->bh);
2252 /* Kill the remaining (whole) subtrees */
2253 switch (offsets[0]) {
2255 nr = i_data[EXT3_IND_BLOCK];
2257 ext3_free_branches(handle, inode, NULL,
2259 i_data[EXT3_IND_BLOCK] = 0;
2261 case EXT3_IND_BLOCK:
2262 nr = i_data[EXT3_DIND_BLOCK];
2264 ext3_free_branches(handle, inode, NULL,
2266 i_data[EXT3_DIND_BLOCK] = 0;
2268 case EXT3_DIND_BLOCK:
2269 nr = i_data[EXT3_TIND_BLOCK];
2271 ext3_free_branches(handle, inode, NULL,
2273 i_data[EXT3_TIND_BLOCK] = 0;
2275 case EXT3_TIND_BLOCK:
2278 up(&ei->truncate_sem);
2279 inode->i_mtime = inode->i_ctime = CURRENT_TIME;
2280 ext3_mark_inode_dirty(handle, inode);
2282 /* In a multi-transaction truncate, we only make the final
2283 * transaction synchronous */
2288 * If this was a simple ftruncate(), and the file will remain alive
2289 * then we need to clear up the orphan record which we created above.
2290 * However, if this was a real unlink then we were called by
2291 * ext3_delete_inode(), and we allow that function to clean up the
2292 * orphan info for us.
2295 ext3_orphan_del(handle, inode);
2297 ext3_journal_stop(handle);
2300 static unsigned long ext3_get_inode_block(struct super_block *sb,
2301 unsigned long ino, struct ext3_iloc *iloc)
2303 unsigned long desc, group_desc, block_group;
2304 unsigned long offset, block;
2305 struct buffer_head *bh;
2306 struct ext3_group_desc * gdp;
2308 if ((ino != EXT3_ROOT_INO &&
2309 ino != EXT3_JOURNAL_INO &&
2310 ino < EXT3_FIRST_INO(sb)) ||
2312 EXT3_SB(sb)->s_es->s_inodes_count)) {
2313 ext3_error (sb, "ext3_get_inode_block",
2314 "bad inode number: %lu", ino);
2317 block_group = (ino - 1) / EXT3_INODES_PER_GROUP(sb);
2318 if (block_group >= EXT3_SB(sb)->s_groups_count) {
2319 ext3_error (sb, "ext3_get_inode_block",
2320 "group >= groups count");
2323 group_desc = block_group >> EXT3_DESC_PER_BLOCK_BITS(sb);
2324 desc = block_group & (EXT3_DESC_PER_BLOCK(sb) - 1);
2325 bh = EXT3_SB(sb)->s_group_desc[group_desc];
2327 ext3_error (sb, "ext3_get_inode_block",
2328 "Descriptor not loaded");
2332 gdp = (struct ext3_group_desc *) bh->b_data;
2334 * Figure out the offset within the block group inode table
2336 offset = ((ino - 1) % EXT3_INODES_PER_GROUP(sb)) *
2337 EXT3_INODE_SIZE(sb);
2338 block = le32_to_cpu(gdp[desc].bg_inode_table) +
2339 (offset >> EXT3_BLOCK_SIZE_BITS(sb));
2341 iloc->block_group = block_group;
2342 iloc->offset = offset & (EXT3_BLOCK_SIZE(sb) - 1);
2347 * ext3_get_inode_loc returns with an extra refcount against the inode's
2348 * underlying buffer_head on success. If `in_mem' is false then we're purely
2349 * trying to determine the inode's location on-disk and no read need be
2352 static int ext3_get_inode_loc(struct inode *inode,
2353 struct ext3_iloc *iloc, int in_mem)
2355 unsigned long block;
2356 struct buffer_head *bh;
2358 block = ext3_get_inode_block(inode->i_sb, inode->i_ino, iloc);
2362 bh = sb_getblk(inode->i_sb, block);
2364 ext3_error (inode->i_sb, "ext3_get_inode_loc",
2365 "unable to read inode block - "
2366 "inode=%lu, block=%lu", inode->i_ino, block);
2369 if (!buffer_uptodate(bh)) {
2371 if (buffer_uptodate(bh)) {
2372 /* someone brought it uptodate while we waited */
2377 /* we can't skip I/O if inode is on a disk only */
2379 struct buffer_head *bitmap_bh;
2380 struct ext3_group_desc *desc;
2381 int inodes_per_buffer;
2382 int inode_offset, i;
2387 * If this is the only valid inode in the block we
2388 * need not read the block.
2390 block_group = (inode->i_ino - 1) /
2391 EXT3_INODES_PER_GROUP(inode->i_sb);
2392 inodes_per_buffer = bh->b_size /
2393 EXT3_INODE_SIZE(inode->i_sb);
2394 inode_offset = ((inode->i_ino - 1) %
2395 EXT3_INODES_PER_GROUP(inode->i_sb));
2396 start = inode_offset & ~(inodes_per_buffer - 1);
2398 /* Is the inode bitmap in cache? */
2399 desc = ext3_get_group_desc(inode->i_sb,
2404 bitmap_bh = sb_getblk(inode->i_sb,
2405 le32_to_cpu(desc->bg_inode_bitmap));
2410 * If the inode bitmap isn't in cache then the
2411 * optimisation may end up performing two reads instead
2412 * of one, so skip it.
2414 if (!buffer_uptodate(bitmap_bh)) {
2418 for (i = start; i < start + inodes_per_buffer; i++) {
2419 if (i == inode_offset)
2421 if (ext3_test_bit(i, bitmap_bh->b_data))
2425 if (i == start + inodes_per_buffer) {
2426 /* all other inodes are free, so skip I/O */
2427 memset(bh->b_data, 0, bh->b_size);
2428 set_buffer_uptodate(bh);
2436 * There are another valid inodes in the buffer so we must
2437 * read the block from disk
2440 bh->b_end_io = end_buffer_read_sync;
2441 submit_bh(READ, bh);
2443 if (!buffer_uptodate(bh)) {
2444 ext3_error(inode->i_sb, "ext3_get_inode_loc",
2445 "unable to read inode block - "
2446 "inode=%lu, block=%lu",
2447 inode->i_ino, block);
2457 void ext3_truncate(struct inode * inode)
2459 if (IS_APPEND(inode) || IS_IMMUTABLE(inode))
2461 ext3_truncate_nocheck(inode);
2464 void ext3_set_inode_flags(struct inode *inode)
2466 unsigned int flags = EXT3_I(inode)->i_flags;
2468 inode->i_flags &= ~(S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC);
2469 if (flags & EXT3_SYNC_FL)
2470 inode->i_flags |= S_SYNC;
2471 if (flags & EXT3_APPEND_FL)
2472 inode->i_flags |= S_APPEND;
2473 if (flags & EXT3_IMMUTABLE_FL)
2474 inode->i_flags |= S_IMMUTABLE;
2475 if (flags & EXT3_IUNLINK_FL)
2476 inode->i_flags |= S_IUNLINK;
2477 if (flags & EXT3_BARRIER_FL)
2478 inode->i_flags |= S_BARRIER;
2479 if (flags & EXT3_NOATIME_FL)
2480 inode->i_flags |= S_NOATIME;
2481 if (flags & EXT3_DIRSYNC_FL)
2482 inode->i_flags |= S_DIRSYNC;
2485 void ext3_read_inode(struct inode * inode)
2487 struct ext3_iloc iloc;
2488 struct ext3_inode *raw_inode;
2489 struct ext3_inode_info *ei = EXT3_I(inode);
2490 struct buffer_head *bh;
2495 #ifdef CONFIG_EXT3_FS_POSIX_ACL
2496 ei->i_acl = EXT3_ACL_NOT_CACHED;
2497 ei->i_default_acl = EXT3_ACL_NOT_CACHED;
2499 if (ext3_get_inode_loc(inode, &iloc, 0))
2502 raw_inode = ext3_raw_inode(&iloc);
2503 inode->i_mode = le16_to_cpu(raw_inode->i_mode);
2504 uid = (uid_t)le16_to_cpu(raw_inode->i_uid_low);
2505 gid = (gid_t)le16_to_cpu(raw_inode->i_gid_low);
2506 if(!(test_opt (inode->i_sb, NO_UID32))) {
2507 uid |= le16_to_cpu(raw_inode->i_uid_high) << 16;
2508 gid |= le16_to_cpu(raw_inode->i_gid_high) << 16;
2510 inode->i_uid = INOXID_UID(uid, gid);
2511 inode->i_gid = INOXID_GID(uid, gid);
2512 if (inode->i_sb->s_flags & MS_TAGXID)
2513 inode->i_xid = INOXID_XID(uid, gid, le16_to_cpu(raw_inode->i_raw_xid));
2515 inode->i_nlink = le16_to_cpu(raw_inode->i_links_count);
2516 inode->i_size = le32_to_cpu(raw_inode->i_size);
2517 inode->i_atime.tv_sec = le32_to_cpu(raw_inode->i_atime);
2518 inode->i_ctime.tv_sec = le32_to_cpu(raw_inode->i_ctime);
2519 inode->i_mtime.tv_sec = le32_to_cpu(raw_inode->i_mtime);
2520 inode->i_atime.tv_nsec = inode->i_ctime.tv_nsec = inode->i_mtime.tv_nsec = 0;
2523 ei->i_next_alloc_block = 0;
2524 ei->i_next_alloc_goal = 0;
2525 ei->i_dir_start_lookup = 0;
2526 ei->i_dtime = le32_to_cpu(raw_inode->i_dtime);
2527 /* We now have enough fields to check if the inode was active or not.
2528 * This is needed because nfsd might try to access dead inodes
2529 * the test is that same one that e2fsck uses
2530 * NeilBrown 1999oct15
2532 if (inode->i_nlink == 0) {
2533 if (inode->i_mode == 0 ||
2534 !(EXT3_SB(inode->i_sb)->s_mount_state & EXT3_ORPHAN_FS)) {
2535 /* this inode is deleted */
2539 /* The only unlinked inodes we let through here have
2540 * valid i_mode and are being read by the orphan
2541 * recovery code: that's fine, we're about to complete
2542 * the process of deleting those. */
2544 inode->i_blksize = PAGE_SIZE; /* This is the optimal IO size
2545 * (for stat), not the fs block
2547 inode->i_blocks = le32_to_cpu(raw_inode->i_blocks);
2548 ei->i_flags = le32_to_cpu(raw_inode->i_flags);
2549 #ifdef EXT3_FRAGMENTS
2550 ei->i_faddr = le32_to_cpu(raw_inode->i_faddr);
2551 ei->i_frag_no = raw_inode->i_frag;
2552 ei->i_frag_size = raw_inode->i_fsize;
2554 ei->i_file_acl = le32_to_cpu(raw_inode->i_file_acl);
2555 if (!S_ISREG(inode->i_mode)) {
2556 ei->i_dir_acl = le32_to_cpu(raw_inode->i_dir_acl);
2559 ((__u64)le32_to_cpu(raw_inode->i_size_high)) << 32;
2561 ei->i_disksize = inode->i_size;
2562 inode->i_generation = le32_to_cpu(raw_inode->i_generation);
2563 #ifdef EXT3_PREALLOCATE
2564 ei->i_prealloc_count = 0;
2566 ei->i_block_group = iloc.block_group;
2569 * NOTE! The in-memory inode i_data array is in little-endian order
2570 * even on big-endian machines: we do NOT byteswap the block numbers!
2572 for (block = 0; block < EXT3_N_BLOCKS; block++)
2573 ei->i_data[block] = raw_inode->i_block[block];
2574 INIT_LIST_HEAD(&ei->i_orphan);
2576 if (S_ISREG(inode->i_mode)) {
2577 inode->i_op = &ext3_file_inode_operations;
2578 inode->i_fop = &ext3_file_operations;
2579 ext3_set_aops(inode);
2580 } else if (S_ISDIR(inode->i_mode)) {
2581 inode->i_op = &ext3_dir_inode_operations;
2582 inode->i_fop = &ext3_dir_operations;
2583 } else if (S_ISLNK(inode->i_mode)) {
2584 if (ext3_inode_is_fast_symlink(inode))
2585 inode->i_op = &ext3_fast_symlink_inode_operations;
2587 inode->i_op = &ext3_symlink_inode_operations;
2588 ext3_set_aops(inode);
2591 inode->i_op = &ext3_special_inode_operations;
2592 if (raw_inode->i_block[0])
2593 init_special_inode(inode, inode->i_mode,
2594 old_decode_dev(le32_to_cpu(raw_inode->i_block[0])));
2596 init_special_inode(inode, inode->i_mode,
2597 new_decode_dev(le32_to_cpu(raw_inode->i_block[1])));
2600 ext3_set_inode_flags(inode);
2604 make_bad_inode(inode);
2609 * Post the struct inode info into an on-disk inode location in the
2610 * buffer-cache. This gobbles the caller's reference to the
2611 * buffer_head in the inode location struct.
2613 * The caller must have write access to iloc->bh.
2615 static int ext3_do_update_inode(handle_t *handle,
2616 struct inode *inode,
2617 struct ext3_iloc *iloc)
2619 struct ext3_inode *raw_inode = ext3_raw_inode(iloc);
2620 struct ext3_inode_info *ei = EXT3_I(inode);
2621 struct buffer_head *bh = iloc->bh;
2622 uid_t uid = XIDINO_UID(inode->i_uid, inode->i_xid);
2623 gid_t gid = XIDINO_GID(inode->i_gid, inode->i_xid);
2624 int err = 0, rc, block;
2626 /* For fields not not tracking in the in-memory inode,
2627 * initialise them to zero for new inodes. */
2628 if (ei->i_state & EXT3_STATE_NEW)
2629 memset(raw_inode, 0, EXT3_SB(inode->i_sb)->s_inode_size);
2631 raw_inode->i_mode = cpu_to_le16(inode->i_mode);
2632 if(!(test_opt(inode->i_sb, NO_UID32))) {
2633 raw_inode->i_uid_low = cpu_to_le16(low_16_bits(uid));
2634 raw_inode->i_gid_low = cpu_to_le16(low_16_bits(gid));
2636 * Fix up interoperability with old kernels. Otherwise, old inodes get
2637 * re-used with the upper 16 bits of the uid/gid intact
2640 raw_inode->i_uid_high =
2641 cpu_to_le16(high_16_bits(uid));
2642 raw_inode->i_gid_high =
2643 cpu_to_le16(high_16_bits(gid));
2645 raw_inode->i_uid_high = 0;
2646 raw_inode->i_gid_high = 0;
2649 raw_inode->i_uid_low =
2650 cpu_to_le16(fs_high2lowuid(uid));
2651 raw_inode->i_gid_low =
2652 cpu_to_le16(fs_high2lowgid(gid));
2653 raw_inode->i_uid_high = 0;
2654 raw_inode->i_gid_high = 0;
2656 #ifdef CONFIG_INOXID_GID32
2657 raw_inode->i_raw_xid = cpu_to_le16(inode->i_xid);
2659 raw_inode->i_links_count = cpu_to_le16(inode->i_nlink);
2660 raw_inode->i_size = cpu_to_le32(ei->i_disksize);
2661 raw_inode->i_atime = cpu_to_le32(inode->i_atime.tv_sec);
2662 raw_inode->i_ctime = cpu_to_le32(inode->i_ctime.tv_sec);
2663 raw_inode->i_mtime = cpu_to_le32(inode->i_mtime.tv_sec);
2664 raw_inode->i_blocks = cpu_to_le32(inode->i_blocks);
2665 raw_inode->i_dtime = cpu_to_le32(ei->i_dtime);
2666 raw_inode->i_flags = cpu_to_le32(ei->i_flags);
2667 #ifdef EXT3_FRAGMENTS
2668 raw_inode->i_faddr = cpu_to_le32(ei->i_faddr);
2669 raw_inode->i_frag = ei->i_frag_no;
2670 raw_inode->i_fsize = ei->i_frag_size;
2672 raw_inode->i_file_acl = cpu_to_le32(ei->i_file_acl);
2673 if (!S_ISREG(inode->i_mode)) {
2674 raw_inode->i_dir_acl = cpu_to_le32(ei->i_dir_acl);
2676 raw_inode->i_size_high =
2677 cpu_to_le32(ei->i_disksize >> 32);
2678 if (ei->i_disksize > 0x7fffffffULL) {
2679 struct super_block *sb = inode->i_sb;
2680 if (!EXT3_HAS_RO_COMPAT_FEATURE(sb,
2681 EXT3_FEATURE_RO_COMPAT_LARGE_FILE) ||
2682 EXT3_SB(sb)->s_es->s_rev_level ==
2683 cpu_to_le32(EXT3_GOOD_OLD_REV)) {
2684 /* If this is the first large file
2685 * created, add a flag to the superblock.
2687 err = ext3_journal_get_write_access(handle,
2688 EXT3_SB(sb)->s_sbh);
2691 ext3_update_dynamic_rev(sb);
2692 EXT3_SET_RO_COMPAT_FEATURE(sb,
2693 EXT3_FEATURE_RO_COMPAT_LARGE_FILE);
2696 err = ext3_journal_dirty_metadata(handle,
2697 EXT3_SB(sb)->s_sbh);
2701 raw_inode->i_generation = cpu_to_le32(inode->i_generation);
2702 if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) {
2703 if (old_valid_dev(inode->i_rdev)) {
2704 raw_inode->i_block[0] =
2705 cpu_to_le32(old_encode_dev(inode->i_rdev));
2706 raw_inode->i_block[1] = 0;
2708 raw_inode->i_block[0] = 0;
2709 raw_inode->i_block[1] =
2710 cpu_to_le32(new_encode_dev(inode->i_rdev));
2711 raw_inode->i_block[2] = 0;
2713 } else for (block = 0; block < EXT3_N_BLOCKS; block++)
2714 raw_inode->i_block[block] = ei->i_data[block];
2716 BUFFER_TRACE(bh, "call ext3_journal_dirty_metadata");
2717 rc = ext3_journal_dirty_metadata(handle, bh);
2720 ei->i_state &= ~EXT3_STATE_NEW;
2724 ext3_std_error(inode->i_sb, err);
2729 * ext3_write_inode()
2731 * We are called from a few places:
2733 * - Within generic_file_write() for O_SYNC files.
2734 * Here, there will be no transaction running. We wait for any running
2735 * trasnaction to commit.
2737 * - Within sys_sync(), kupdate and such.
2738 * We wait on commit, if tol to.
2740 * - Within prune_icache() (PF_MEMALLOC == true)
2741 * Here we simply return. We can't afford to block kswapd on the
2744 * In all cases it is actually safe for us to return without doing anything,
2745 * because the inode has been copied into a raw inode buffer in
2746 * ext3_mark_inode_dirty(). This is a correctness thing for O_SYNC and for
2749 * Note that we are absolutely dependent upon all inode dirtiers doing the
2750 * right thing: they *must* call mark_inode_dirty() after dirtying info in
2751 * which we are interested.
2753 * It would be a bug for them to not do this. The code:
2755 * mark_inode_dirty(inode)
2757 * inode->i_size = expr;
2759 * is in error because a kswapd-driven write_inode() could occur while
2760 * `stuff()' is running, and the new i_size will be lost. Plus the inode
2761 * will no longer be on the superblock's dirty inode list.
2763 void ext3_write_inode(struct inode *inode, int wait)
2765 if (current->flags & PF_MEMALLOC)
2768 if (ext3_journal_current_handle()) {
2769 jbd_debug(0, "called recursively, non-PF_MEMALLOC!\n");
2777 ext3_force_commit(inode->i_sb);
2780 int ext3_setattr_flags(struct inode *inode, unsigned int flags)
2782 unsigned int oldflags, newflags;
2785 oldflags = EXT3_I(inode)->i_flags;
2786 newflags = oldflags &
2787 ~(EXT3_IMMUTABLE_FL | EXT3_IUNLINK_FL | EXT3_BARRIER_FL);
2788 if (flags & ATTR_FLAG_IMMUTABLE)
2789 newflags |= EXT3_IMMUTABLE_FL;
2790 if (flags & ATTR_FLAG_IUNLINK)
2791 newflags |= EXT3_IUNLINK_FL;
2792 if (flags & ATTR_FLAG_BARRIER)
2793 newflags |= EXT3_BARRIER_FL;
2795 if (oldflags ^ newflags) {
2797 struct ext3_iloc iloc;
2799 handle = ext3_journal_start(inode, 1);
2801 return PTR_ERR(handle);
2804 err = ext3_reserve_inode_write(handle, inode, &iloc);
2808 EXT3_I(inode)->i_flags = newflags;
2809 inode->i_ctime = CURRENT_TIME;
2811 err = ext3_mark_iloc_dirty(handle, inode, &iloc);
2813 ext3_journal_stop(handle);
2821 * Called from notify_change.
2823 * We want to trap VFS attempts to truncate the file as soon as
2824 * possible. In particular, we want to make sure that when the VFS
2825 * shrinks i_size, we put the inode on the orphan list and modify
2826 * i_disksize immediately, so that during the subsequent flushing of
2827 * dirty pages and freeing of disk blocks, we can guarantee that any
2828 * commit will leave the blocks being flushed in an unused state on
2829 * disk. (On recovery, the inode will get truncated and the blocks will
2830 * be freed, so we have a strong guarantee that no future commit will
2831 * leave these blocks visible to the user.)
2833 * Called with inode->sem down.
2835 int ext3_setattr(struct dentry *dentry, struct iattr *attr)
2837 struct inode *inode = dentry->d_inode;
2839 const unsigned int ia_valid = attr->ia_valid;
2841 error = inode_change_ok(inode, attr);
2845 if ((ia_valid & ATTR_UID && attr->ia_uid != inode->i_uid) ||
2846 (ia_valid & ATTR_GID && attr->ia_gid != inode->i_gid) ||
2847 (ia_valid & ATTR_XID && attr->ia_xid != inode->i_xid)) {
2850 /* (user+group)*(old+new) structure, inode write (sb,
2851 * inode block, ? - but truncate inode update has it) */
2852 handle = ext3_journal_start(inode, 4*EXT3_QUOTA_INIT_BLOCKS+3);
2853 if (IS_ERR(handle)) {
2854 error = PTR_ERR(handle);
2857 error = DQUOT_TRANSFER(inode, attr) ? -EDQUOT : 0;
2859 ext3_journal_stop(handle);
2862 /* Update corresponding info in inode so that everything is in
2863 * one transaction */
2864 if (attr->ia_valid & ATTR_UID)
2865 inode->i_uid = attr->ia_uid;
2866 if (attr->ia_valid & ATTR_GID)
2867 inode->i_gid = attr->ia_gid;
2868 if (attr->ia_valid & ATTR_XID)
2869 inode->i_xid = attr->ia_xid;
2870 error = ext3_mark_inode_dirty(handle, inode);
2871 ext3_journal_stop(handle);
2874 if (S_ISREG(inode->i_mode) &&
2875 attr->ia_valid & ATTR_SIZE && attr->ia_size < inode->i_size) {
2878 handle = ext3_journal_start(inode, 3);
2879 if (IS_ERR(handle)) {
2880 error = PTR_ERR(handle);
2884 error = ext3_orphan_add(handle, inode);
2885 EXT3_I(inode)->i_disksize = attr->ia_size;
2886 rc = ext3_mark_inode_dirty(handle, inode);
2889 ext3_journal_stop(handle);
2892 if (ia_valid & ATTR_ATTR_FLAG) {
2893 rc = ext3_setattr_flags(inode, attr->ia_attr_flags);
2898 rc = inode_setattr(inode, attr);
2900 /* If inode_setattr's call to ext3_truncate failed to get a
2901 * transaction handle at all, we need to clean up the in-core
2902 * orphan list manually. */
2904 ext3_orphan_del(NULL, inode);
2906 if (!rc && (ia_valid & ATTR_MODE))
2907 rc = ext3_acl_chmod(inode);
2910 ext3_std_error(inode->i_sb, error);
2918 * akpm: how many blocks doth make a writepage()?
2920 * With N blocks per page, it may be:
2925 * N+5 bitmap blocks (from the above)
2926 * N+5 group descriptor summary blocks
2929 * 2 * EXT3_SINGLEDATA_TRANS_BLOCKS for the quote files
2931 * 3 * (N + 5) + 2 + 2 * EXT3_SINGLEDATA_TRANS_BLOCKS
2933 * With ordered or writeback data it's the same, less the N data blocks.
2935 * If the inode's direct blocks can hold an integral number of pages then a
2936 * page cannot straddle two indirect blocks, and we can only touch one indirect
2937 * and dindirect block, and the "5" above becomes "3".
2939 * This still overestimates under most circumstances. If we were to pass the
2940 * start and end offsets in here as well we could do block_to_path() on each
2941 * block and work out the exact number of indirects which are touched. Pah.
2944 int ext3_writepage_trans_blocks(struct inode *inode)
2946 int bpp = ext3_journal_blocks_per_page(inode);
2947 int indirects = (EXT3_NDIR_BLOCKS % bpp) ? 5 : 3;
2950 if (ext3_should_journal_data(inode))
2951 ret = 3 * (bpp + indirects) + 2;
2953 ret = 2 * (bpp + indirects) + 2;
2956 /* We know that structure was already allocated during DQUOT_INIT so
2957 * we will be updating only the data blocks + inodes */
2958 ret += 2*EXT3_QUOTA_TRANS_BLOCKS;
2965 * The caller must have previously called ext3_reserve_inode_write().
2966 * Give this, we know that the caller already has write access to iloc->bh.
2968 int ext3_mark_iloc_dirty(handle_t *handle,
2969 struct inode *inode, struct ext3_iloc *iloc)
2973 /* the do_update_inode consumes one bh->b_count */
2976 /* ext3_do_update_inode() does journal_dirty_metadata */
2977 err = ext3_do_update_inode(handle, inode, iloc);
2983 * On success, We end up with an outstanding reference count against
2984 * iloc->bh. This _must_ be cleaned up later.
2988 ext3_reserve_inode_write(handle_t *handle, struct inode *inode,
2989 struct ext3_iloc *iloc)
2993 err = ext3_get_inode_loc(inode, iloc, 1);
2995 BUFFER_TRACE(iloc->bh, "get_write_access");
2996 err = ext3_journal_get_write_access(handle, iloc->bh);
3003 ext3_std_error(inode->i_sb, err);
3008 * akpm: What we do here is to mark the in-core inode as clean
3009 * with respect to inode dirtiness (it may still be data-dirty).
3010 * This means that the in-core inode may be reaped by prune_icache
3011 * without having to perform any I/O. This is a very good thing,
3012 * because *any* task may call prune_icache - even ones which
3013 * have a transaction open against a different journal.
3015 * Is this cheating? Not really. Sure, we haven't written the
3016 * inode out, but prune_icache isn't a user-visible syncing function.
3017 * Whenever the user wants stuff synced (sys_sync, sys_msync, sys_fsync)
3018 * we start and wait on commits.
3020 * Is this efficient/effective? Well, we're being nice to the system
3021 * by cleaning up our inodes proactively so they can be reaped
3022 * without I/O. But we are potentially leaving up to five seconds'
3023 * worth of inodes floating about which prune_icache wants us to
3024 * write out. One way to fix that would be to get prune_icache()
3025 * to do a write_super() to free up some memory. It has the desired
3028 int ext3_mark_inode_dirty(handle_t *handle, struct inode *inode)
3030 struct ext3_iloc iloc;
3033 err = ext3_reserve_inode_write(handle, inode, &iloc);
3035 err = ext3_mark_iloc_dirty(handle, inode, &iloc);
3040 * akpm: ext3_dirty_inode() is called from __mark_inode_dirty()
3042 * We're really interested in the case where a file is being extended.
3043 * i_size has been changed by generic_commit_write() and we thus need
3044 * to include the updated inode in the current transaction.
3046 * Also, DQUOT_ALLOC_SPACE() will always dirty the inode when blocks
3047 * are allocated to the file.
3049 * If the inode is marked synchronous, we don't honour that here - doing
3050 * so would cause a commit on atime updates, which we don't bother doing.
3051 * We handle synchronous inodes at the highest possible level.
3053 void ext3_dirty_inode(struct inode *inode)
3055 handle_t *current_handle = ext3_journal_current_handle();
3058 handle = ext3_journal_start(inode, 2);
3061 if (current_handle &&
3062 current_handle->h_transaction != handle->h_transaction) {
3063 /* This task has a transaction open against a different fs */
3064 printk(KERN_EMERG "%s: transactions do not match!\n",
3067 jbd_debug(5, "marking dirty. outer handle=%p\n",
3069 ext3_mark_inode_dirty(handle, inode);
3071 ext3_journal_stop(handle);
3078 * Bind an inode's backing buffer_head into this transaction, to prevent
3079 * it from being flushed to disk early. Unlike
3080 * ext3_reserve_inode_write, this leaves behind no bh reference and
3081 * returns no iloc structure, so the caller needs to repeat the iloc
3082 * lookup to mark the inode dirty later.
3085 ext3_pin_inode(handle_t *handle, struct inode *inode)
3087 struct ext3_iloc iloc;
3091 err = ext3_get_inode_loc(inode, &iloc, 1);
3093 BUFFER_TRACE(iloc.bh, "get_write_access");
3094 err = journal_get_write_access(handle, iloc.bh);
3096 err = ext3_journal_dirty_metadata(handle,
3101 ext3_std_error(inode->i_sb, err);
3106 int ext3_change_inode_journal_flag(struct inode *inode, int val)
3113 * We have to be very careful here: changing a data block's
3114 * journaling status dynamically is dangerous. If we write a
3115 * data block to the journal, change the status and then delete
3116 * that block, we risk forgetting to revoke the old log record
3117 * from the journal and so a subsequent replay can corrupt data.
3118 * So, first we make sure that the journal is empty and that
3119 * nobody is changing anything.
3122 journal = EXT3_JOURNAL(inode);
3123 if (is_journal_aborted(journal) || IS_RDONLY(inode))
3126 journal_lock_updates(journal);
3127 journal_flush(journal);
3130 * OK, there are no updates running now, and all cached data is
3131 * synced to disk. We are now in a completely consistent state
3132 * which doesn't have anything in the journal, and we know that
3133 * no filesystem updates are running, so it is safe to modify
3134 * the inode's in-core data-journaling state flag now.
3138 EXT3_I(inode)->i_flags |= EXT3_JOURNAL_DATA_FL;
3140 EXT3_I(inode)->i_flags &= ~EXT3_JOURNAL_DATA_FL;
3141 ext3_set_aops(inode);
3143 journal_unlock_updates(journal);
3145 /* Finally we can mark the inode as dirty. */
3147 handle = ext3_journal_start(inode, 1);
3149 return PTR_ERR(handle);
3151 err = ext3_mark_inode_dirty(handle, inode);
3153 ext3_journal_stop(handle);
3154 ext3_std_error(inode->i_sb, err);