vserver 2.0 rc7
[linux-2.6.git] / fs / ext3 / inode.c
index da40fc7..b3194cb 100644 (file)
@@ -40,6 +40,8 @@
 #include "xattr.h"
 #include "acl.h"
 
+static int ext3_writepage_trans_blocks(struct inode *inode);
+
 /*
  * Test whether an inode is a fast symlink.
  */
@@ -67,6 +69,8 @@ int ext3_forget(handle_t *handle, int is_metadata,
 {
        int err;
 
+       might_sleep();
+
        BUFFER_TRACE(bh, "enter");
 
        jbd_debug(4, "forgetting bh %p: is_metadata = %d, mode %o, "
@@ -83,7 +87,7 @@ int ext3_forget(handle_t *handle, int is_metadata,
            (!is_metadata && !ext3_should_journal_data(inode))) {
                if (bh) {
                        BUFFER_TRACE(bh, "call journal_forget");
-                       ext3_journal_forget(handle, bh);
+                       return ext3_journal_forget(handle, bh);
                }
                return 0;
        }
@@ -177,19 +181,6 @@ static int ext3_journal_test_restart(handle_t *handle, struct inode *inode)
        return ext3_journal_restart(handle, blocks_for_truncate(inode));
 }
 
-/*
- * Called at each iput()
- *
- * The inode may be "bad" if ext3_read_inode() saw an error from
- * ext3_get_inode(), so we need to check that to avoid freeing random disk
- * blocks.
- */
-void ext3_put_inode(struct inode *inode)
-{
-       if (!is_bad_inode(inode))
-               ext3_discard_prealloc(inode);
-}
-
 static void ext3_truncate_nocheck (struct inode *inode);
 
 /*
@@ -245,73 +236,23 @@ no_delete:
        clear_inode(inode);     /* We must guarantee clearing of inode... */
 }
 
-void ext3_discard_prealloc (struct inode * inode)
-{
-#ifdef EXT3_PREALLOCATE
-       struct ext3_inode_info *ei = EXT3_I(inode);
-       /* Writer: ->i_prealloc* */
-       if (ei->i_prealloc_count) {
-               unsigned short total = ei->i_prealloc_count;
-               unsigned long block = ei->i_prealloc_block;
-               ei->i_prealloc_count = 0;
-               ei->i_prealloc_block = 0;
-               /* Writer: end */
-               ext3_free_blocks (inode, block, total);
-       }
-#endif
-}
-
 static int ext3_alloc_block (handle_t *handle,
                        struct inode * inode, unsigned long goal, int *err)
 {
        unsigned long result;
 
-#ifdef EXT3_PREALLOCATE
-#ifdef EXT3FS_DEBUG
-       static unsigned long alloc_hits, alloc_attempts;
-#endif
-       struct ext3_inode_info *ei = EXT3_I(inode);
-       /* Writer: ->i_prealloc* */
-       if (ei->i_prealloc_count &&
-           (goal == ei->i_prealloc_block ||
-            goal + 1 == ei->i_prealloc_block))
-       {
-               result = ei->i_prealloc_block++;
-               ei->i_prealloc_count--;
-               /* Writer: end */
-               ext3_debug ("preallocation hit (%lu/%lu).\n",
-                           ++alloc_hits, ++alloc_attempts);
-       } else {
-               ext3_discard_prealloc (inode);
-               ext3_debug ("preallocation miss (%lu/%lu).\n",
-                           alloc_hits, ++alloc_attempts);
-               if (S_ISREG(inode->i_mode))
-                       result = ext3_new_block (inode, goal, 
-                                &ei->i_prealloc_count,
-                                &ei->i_prealloc_block, err);
-               else
-                       result = ext3_new_block(inode, goal, NULL, NULL, err);
-               /*
-                * AKPM: this is somewhat sticky.  I'm not surprised it was
-                * disabled in 2.2's ext3.  Need to integrate b_committed_data
-                * guarding with preallocation, if indeed preallocation is
-                * effective.
-                */
-       }
-#else
-       result = ext3_new_block(handle, inode, goal, NULL, NULL, err);
-#endif
+       result = ext3_new_block(handle, inode, goal, err);
        return result;
 }
 
 
 typedef struct {
-       u32     *p;
-       u32     key;
+       __le32  *p;
+       __le32  key;
        struct buffer_head *bh;
 } Indirect;
 
-static inline void add_chain(Indirect *p, struct buffer_head *bh, u32 *v)
+static inline void add_chain(Indirect *p, struct buffer_head *bh, __le32 *v)
 {
        p->key = *(p->p = v);
        p->bh = bh;
@@ -442,7 +383,7 @@ static Indirect *ext3_get_branch(struct inode *inode, int depth, int *offsets,
                /* Reader: pointers */
                if (!verify_chain(chain, p))
                        goto changed;
-               add_chain(++p, bh, (u32*)bh->b_data + *++offsets);
+               add_chain(++p, bh, (__le32*)bh->b_data + *++offsets);
                /* Reader: end */
                if (!p->key)
                        goto no_block;
@@ -483,8 +424,8 @@ no_block:
 static unsigned long ext3_find_near(struct inode *inode, Indirect *ind)
 {
        struct ext3_inode_info *ei = EXT3_I(inode);
-       u32 *start = ind->bh ? (u32*) ind->bh->b_data : ei->i_data;
-       u32 *p;
+       __le32 *start = ind->bh ? (__le32*) ind->bh->b_data : ei->i_data;
+       __le32 *p;
        unsigned long bg_start;
        unsigned long colour;
 
@@ -517,34 +458,24 @@ static unsigned long ext3_find_near(struct inode *inode, Indirect *ind)
  *     @goal:  place to store the result.
  *
  *     Normally this function find the prefered place for block allocation,
- *     stores it in *@goal and returns zero. If the branch had been changed
- *     under us we return -EAGAIN.
+ *     stores it in *@goal and returns zero.
  */
 
-static int ext3_find_goal(struct inode *inode, long block, Indirect chain[4],
-                         Indirect *partial, unsigned long *goal)
+static unsigned long ext3_find_goal(struct inode *inode, long block,
+               Indirect chain[4], Indirect *partial)
 {
-       struct ext3_inode_info *ei = EXT3_I(inode);
-       /* Writer: ->i_next_alloc* */
-       if (block == ei->i_next_alloc_block + 1) {
-               ei->i_next_alloc_block++;
-               ei->i_next_alloc_goal++;
-       }
-       /* Writer: end */
-       /* Reader: pointers, ->i_next_alloc* */
-       if (verify_chain(chain, partial)) {
-               /*
-                * try the heuristic for sequential allocation,
-                * failing that at least try to get decent locality.
-                */
-               if (block == ei->i_next_alloc_block)
-                       *goal = ei->i_next_alloc_goal;
-               if (!*goal)
-                       *goal = ext3_find_near(inode, partial);
-               return 0;
+       struct ext3_block_alloc_info *block_i =  EXT3_I(inode)->i_block_alloc_info;
+
+       /*
+        * try the heuristic for sequential allocation,
+        * failing that at least try to get decent locality.
+        */
+       if (block_i && (block == block_i->last_alloc_logical_block + 1)
+               && (block_i->last_alloc_physical_block != 0)) {
+               return block_i->last_alloc_physical_block + 1;
        }
-       /* Reader: end */
-       return -EAGAIN;
+
+       return ext3_find_near(inode, partial);
 }
 
 /**
@@ -612,7 +543,7 @@ static int ext3_alloc_branch(handle_t *handle, struct inode *inode,
                        }
 
                        memset(bh->b_data, 0, blocksize);
-                       branch[n].p = (u32*) bh->b_data + offsets[n];
+                       branch[n].p = (__le32*) bh->b_data + offsets[n];
                        *branch[n].p = branch[n].key;
                        BUFFER_TRACE(bh, "marking uptodate");
                        set_buffer_uptodate(bh);
@@ -648,12 +579,9 @@ static int ext3_alloc_branch(handle_t *handle, struct inode *inode,
  *     @where: location of missing link
  *     @num:   number of blocks we are adding
  *
- *     This function verifies that chain (up to the missing link) had not
- *     changed, fills the missing link and does all housekeeping needed in
+ *     This function fills the missing link and does all housekeeping needed in
  *     inode (->i_blocks, etc.). In case of success we end up with the full
- *     chain to new block and return 0. Otherwise (== chain had been changed)
- *     we free the new blocks (forgetting their buffer_heads, indeed) and
- *     return -EAGAIN.
+ *     chain to new block and return 0.
  */
 
 static int ext3_splice_branch(handle_t *handle, struct inode *inode, long block,
@@ -661,7 +589,7 @@ static int ext3_splice_branch(handle_t *handle, struct inode *inode, long block,
 {
        int i;
        int err = 0;
-       struct ext3_inode_info *ei = EXT3_I(inode);
+       struct ext3_block_alloc_info *block_i = EXT3_I(inode)->i_block_alloc_info;
 
        /*
         * If we're splicing into a [td]indirect block (as opposed to the
@@ -674,23 +602,23 @@ static int ext3_splice_branch(handle_t *handle, struct inode *inode, long block,
                if (err)
                        goto err_out;
        }
-       /* Verify that place we are splicing to is still there and vacant */
-
-       /* Writer: pointers, ->i_next_alloc* */
-       if (!verify_chain(chain, where-1) || *where->p)
-               /* Writer: end */
-               goto changed;
-
        /* That's it */
 
        *where->p = where->key;
-       ei->i_next_alloc_block = block;
-       ei->i_next_alloc_goal = le32_to_cpu(where[num-1].key);
-       /* Writer: end */
+
+       /*
+        * update the most recently allocated logical & physical block
+        * in i_block_alloc_info, to assist find the proper goal block for next
+        * allocation
+        */
+       if (block_i) {
+               block_i->last_alloc_logical_block = block;
+               block_i->last_alloc_physical_block = le32_to_cpu(where[num-1].key);
+       }
 
        /* We are done with atomic stuff, now do the rest of housekeeping */
 
-       inode->i_ctime = CURRENT_TIME;
+       inode->i_ctime = CURRENT_TIME_SEC;
        ext3_mark_inode_dirty(handle, inode);
 
        /* had we spliced it onto indirect block? */
@@ -717,26 +645,11 @@ static int ext3_splice_branch(handle_t *handle, struct inode *inode, long block,
        }
        return err;
 
-changed:
-       /*
-        * AKPM: if where[i].bh isn't part of the current updating
-        * transaction then we explode nastily.  Test this code path.
-        */
-       jbd_debug(1, "the chain changed: try again\n");
-       err = -EAGAIN;
-
 err_out:
        for (i = 1; i < num; i++) {
                BUFFER_TRACE(where[i].bh, "call journal_forget");
                ext3_journal_forget(handle, where[i].bh);
        }
-       /* For the normal collision cleanup case, we free up the blocks.
-        * On genuine filesystem errors we don't even think about doing
-        * that. */
-       if (err == -EAGAIN)
-               for (i = 0; i < num; i++)
-                       ext3_free_blocks(handle, inode, 
-                                        le32_to_cpu(where[i].key), 1);
        return err;
 }
 
@@ -768,7 +681,7 @@ ext3_get_block_handle(handle_t *handle, struct inode *inode, sector_t iblock,
        unsigned long goal;
        int left;
        int boundary = 0;
-       int depth = ext3_block_to_path(inode, iblock, offsets, &boundary);
+       const int depth = ext3_block_to_path(inode, iblock, offsets, &boundary);
        struct ext3_inode_info *ei = EXT3_I(inode);
 
        J_ASSERT(handle != NULL || create == 0);
@@ -776,48 +689,55 @@ ext3_get_block_handle(handle_t *handle, struct inode *inode, sector_t iblock,
        if (depth == 0)
                goto out;
 
-reread:
        partial = ext3_get_branch(inode, depth, offsets, chain, &err);
 
        /* Simplest case - block found, no allocation needed */
        if (!partial) {
                clear_buffer_new(bh_result);
-got_it:
-               map_bh(bh_result, inode->i_sb, le32_to_cpu(chain[depth-1].key));
-               if (boundary)
-                       set_buffer_boundary(bh_result);
-               /* Clean up and exit */
-               partial = chain+depth-1; /* the whole chain */
-               goto cleanup;
+               goto got_it;
        }
 
        /* Next simple case - plain lookup or failed read of indirect block */
-       if (!create || err == -EIO) {
-cleanup:
+       if (!create || err == -EIO)
+               goto cleanup;
+
+       down(&ei->truncate_sem);
+
+       /*
+        * If the indirect block is missing while we are reading
+        * the chain(ext3_get_branch() returns -EAGAIN err), or
+        * if the chain has been changed after we grab the semaphore,
+        * (either because another process truncated this branch, or
+        * another get_block allocated this branch) re-grab the chain to see if
+        * the request block has been allocated or not.
+        *
+        * Since we already block the truncate/other get_block
+        * at this point, we will have the current copy of the chain when we
+        * splice the branch into the tree.
+        */
+       if (err == -EAGAIN || !verify_chain(chain, partial)) {
                while (partial > chain) {
-                       BUFFER_TRACE(partial->bh, "call brelse");
                        brelse(partial->bh);
                        partial--;
                }
-               BUFFER_TRACE(bh_result, "returned");
-out:
-               return err;
+               partial = ext3_get_branch(inode, depth, offsets, chain, &err);
+               if (!partial) {
+                       up(&ei->truncate_sem);
+                       if (err)
+                               goto cleanup;
+                       clear_buffer_new(bh_result);
+                       goto got_it;
+               }
        }
 
        /*
-        * Indirect block might be removed by truncate while we were
-        * reading it. Handling of that case (forget what we've got and
-        * reread) is taken out of the main path.
-        */
-       if (err == -EAGAIN)
-               goto changed;
+        * Okay, we need to do block allocation.  Lazily initialize the block
+        * allocation info here if necessary
+       */
+       if (S_ISREG(inode->i_mode) && (!ei->i_block_alloc_info))
+               ext3_init_block_alloc_info(inode);
 
-       goal = 0;
-       down(&ei->truncate_sem);
-       if (ext3_find_goal(inode, iblock, chain, partial, &goal) < 0) {
-               up(&ei->truncate_sem);
-               goto changed;
-       }
+       goal = ext3_find_goal(inode, iblock, chain, partial);
 
        left = (chain + depth) - partial;
 
@@ -825,38 +745,45 @@ out:
         * Block out ext3_truncate while we alter the tree
         */
        err = ext3_alloc_branch(handle, inode, left, goal,
-                                       offsets+(partial-chain), partial);
+                               offsets + (partial - chain), partial);
 
-       /* The ext3_splice_branch call will free and forget any buffers
+       /*
+        * The ext3_splice_branch call will free and forget any buffers
         * on the new chain if there is a failure, but that risks using
         * up transaction credits, especially for bitmaps where the
         * credits cannot be returned.  Can we handle this somehow?  We
-        * may need to return -EAGAIN upwards in the worst case.  --sct */
+        * may need to return -EAGAIN upwards in the worst case.  --sct
+        */
        if (!err)
                err = ext3_splice_branch(handle, inode, iblock, chain,
                                         partial, left);
-       /* i_disksize growing is protected by truncate_sem
-        * don't forget to protect it if you're about to implement
-        * concurrent ext3_get_block() -bzzz */
+       /*
+        * i_disksize growing is protected by truncate_sem.  Don't forget to
+        * protect it if you're about to implement concurrent
+        * ext3_get_block() -bzzz
+       */
        if (!err && extend_disksize && inode->i_size > ei->i_disksize)
                ei->i_disksize = inode->i_size;
        up(&ei->truncate_sem);
-       if (err == -EAGAIN)
-               goto changed;
        if (err)
                goto cleanup;
 
        set_buffer_new(bh_result);
-       goto got_it;
-
-changed:
+got_it:
+       map_bh(bh_result, inode->i_sb, le32_to_cpu(chain[depth-1].key));
+       if (boundary)
+               set_buffer_boundary(bh_result);
+       /* Clean up and exit */
+       partial = chain + depth - 1;    /* the whole chain */
+cleanup:
        while (partial > chain) {
-               jbd_debug(1, "buffer chain changed, retrying\n");
-               BUFFER_TRACE(partial->bh, "brelsing");
+               BUFFER_TRACE(partial->bh, "call brelse");
                brelse(partial->bh);
                partial--;
        }
-       goto reread;
+       BUFFER_TRACE(bh_result, "returned");
+out:
+       return err;
 }
 
 static int ext3_get_block(struct inode *inode, sector_t iblock,
@@ -977,52 +904,17 @@ struct buffer_head *ext3_bread(handle_t *handle, struct inode * inode,
                               int block, int create, int *err)
 {
        struct buffer_head * bh;
-       int prev_blocks;
-
-       prev_blocks = inode->i_blocks;
 
-       bh = ext3_getblk (handle, inode, block, create, err);
+       bh = ext3_getblk(handle, inode, block, create, err);
        if (!bh)
                return bh;
-#ifdef EXT3_PREALLOCATE
-       /*
-        * If the inode has grown, and this is a directory, then use a few
-        * more of the preallocated blocks to keep directory fragmentation
-        * down.  The preallocated blocks are guaranteed to be contiguous.
-        */
-       if (create &&
-           S_ISDIR(inode->i_mode) &&
-           inode->i_blocks > prev_blocks &&
-           EXT3_HAS_COMPAT_FEATURE(inode->i_sb,
-                                   EXT3_FEATURE_COMPAT_DIR_PREALLOC)) {
-               int i;
-               struct buffer_head *tmp_bh;
-
-               for (i = 1;
-                    EXT3_I(inode)->i_prealloc_count &&
-                    i < EXT3_SB(inode->i_sb)->s_es->s_prealloc_dir_blocks;
-                    i++) {
-                       /*
-                        * ext3_getblk will zero out the contents of the
-                        * directory for us
-                        */
-                       tmp_bh = ext3_getblk(handle, inode,
-                                               block+i, create, err);
-                       if (!tmp_bh) {
-                               brelse (bh);
-                               return 0;
-                       }
-                       brelse (tmp_bh);
-               }
-       }
-#endif
        if (buffer_uptodate(bh))
                return bh;
-       ll_rw_block (READ, 1, &bh);
-       wait_on_buffer (bh);
+       ll_rw_block(READ, 1, &bh);
+       wait_on_buffer(bh);
        if (buffer_uptodate(bh))
                return bh;
-       brelse (bh);
+       put_bh(bh);
        *err = -EIO;
        return NULL;
 }
@@ -1107,7 +999,10 @@ retry:
                ret = PTR_ERR(handle);
                goto out;
        }
-       ret = block_prepare_write(page, from, to, ext3_get_block);
+       if (test_opt(inode->i_sb, NOBH))
+               ret = nobh_prepare_write(page, from, to, ext3_get_block);
+       else
+               ret = block_prepare_write(page, from, to, ext3_get_block);
        if (ret)
                goto prepare_write_failed;
 
@@ -1124,7 +1019,7 @@ out:
        return ret;
 }
 
-static int
+int
 ext3_journal_dirty_data(handle_t *handle, struct buffer_head *bh)
 {
        int err = journal_dirty_data(handle, bh);
@@ -1191,7 +1086,12 @@ static int ext3_writeback_commit_write(struct file *file, struct page *page,
        new_i_size = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to;
        if (new_i_size > EXT3_I(inode)->i_disksize)
                EXT3_I(inode)->i_disksize = new_i_size;
-       ret = generic_commit_write(file, page, from, to);
+
+       if (test_opt(inode->i_sb, NOBH))
+               ret = nobh_commit_write(file, page, from, to);
+       else
+               ret = generic_commit_write(file, page, from, to);
+
        ret2 = ext3_journal_stop(handle);
        if (!ret)
                ret = ret2;
@@ -1437,7 +1337,11 @@ static int ext3_writeback_writepage(struct page *page,
                goto out_fail;
        }
 
-       ret = block_write_full_page(page, ext3_get_block, wbc);
+       if (test_opt(inode->i_sb, NOBH))
+               ret = nobh_writepage(page, ext3_get_block, wbc);
+       else
+               ret = block_write_full_page(page, ext3_get_block, wbc);
+
        err = ext3_journal_stop(handle);
        if (!ret)
                ret = err;
@@ -1536,6 +1440,8 @@ static int ext3_releasepage(struct page *page, int wait)
        journal_t *journal = EXT3_JOURNAL(page->mapping->host);
 
        WARN_ON(PageChecked(page));
+       if (!page_has_buffers(page))
+               return 0;
        return journal_try_to_free_buffers(journal, page, wait);
 }
 
@@ -1580,20 +1486,31 @@ static ssize_t ext3_direct_IO(int rw, struct kiocb *iocb,
                                 offset, nr_segs,
                                 ext3_direct_io_get_blocks, NULL);
 
+       /*
+        * Reacquire the handle: ext3_direct_io_get_block() can restart the
+        * transaction
+        */
+       handle = journal_current_handle();
+
 out_stop:
        if (handle) {
                int err;
 
-               if (orphan
+               if (orphan && inode->i_nlink)
                        ext3_orphan_del(handle, inode);
                if (orphan && ret > 0) {
                        loff_t end = offset + ret;
                        if (end > inode->i_size) {
                                ei->i_disksize = end;
                                i_size_write(inode, end);
-                               err = ext3_mark_inode_dirty(handle, inode);
-                               if (!ret) 
-                                       ret = err;
+                               /*
+                                * We're going to return a positive `ret'
+                                * here due to non-zero-length I/O, so there's
+                                * no way of reporting error returns from
+                                * ext3_mark_inode_dirty() to userspace.  So
+                                * ignore it.
+                                */
+                               ext3_mark_inode_dirty(handle, inode);
                        }
                }
                err = ext3_journal_stop(handle);
@@ -1686,13 +1603,28 @@ static int ext3_block_truncate_page(handle_t *handle, struct page *page,
        unsigned blocksize, iblock, length, pos;
        struct inode *inode = mapping->host;
        struct buffer_head *bh;
-       int err;
+       int err = 0;
        void *kaddr;
 
        blocksize = inode->i_sb->s_blocksize;
        length = blocksize - (offset & (blocksize - 1));
        iblock = index << (PAGE_CACHE_SHIFT - inode->i_sb->s_blocksize_bits);
 
+       /*
+        * For "nobh" option,  we can only work if we don't need to
+        * read-in the page - otherwise we create buffers to do the IO.
+        */
+       if (!page_has_buffers(page) && test_opt(inode->i_sb, NOBH)) {
+               if (PageUptodate(page)) {
+                       kaddr = kmap_atomic(page, KM_USER0);
+                       memset(kaddr + offset, 0, length);
+                       flush_dcache_page(page);
+                       kunmap_atomic(kaddr, KM_USER0);
+                       set_page_dirty(page);
+                       goto unlock;
+               }
+       }
+
        if (!page_has_buffers(page))
                create_empty_buffers(page, blocksize, 0);
 
@@ -1768,7 +1700,7 @@ unlock:
  * or memcmp with zero_page, whatever is better for particular architecture.
  * Linus?
  */
-static inline int all_zeroes(u32 *p, u32 *q)
+static inline int all_zeroes(__le32 *p, __le32 *q)
 {
        while (p < q)
                if (*p++)
@@ -1815,7 +1747,7 @@ static Indirect *ext3_find_shared(struct inode *inode,
                                int depth,
                                int offsets[4],
                                Indirect chain[4],
-                               u32 *top)
+                               __le32 *top)
 {
        Indirect *partial, *p;
        int k, err;
@@ -1835,7 +1767,7 @@ static Indirect *ext3_find_shared(struct inode *inode,
        if (!partial->key && *partial->p)
                /* Writer: end */
                goto no_top;
-       for (p=partial; p>chain && all_zeroes((u32*)p->bh->b_data,p->p); p--)
+       for (p=partial; p>chain && all_zeroes((__le32*)p->bh->b_data,p->p); p--)
                ;
        /*
         * OK, we've found the last block that must survive. The rest of our
@@ -1874,9 +1806,9 @@ no_top:
 static void
 ext3_clear_blocks(handle_t *handle, struct inode *inode, struct buffer_head *bh,
                unsigned long block_to_free, unsigned long count,
-               u32 *first, u32 *last)
+               __le32 *first, __le32 *last)
 {
-       u32 *p;
+       __le32 *p;
        if (try_to_extend_transaction(handle, inode)) {
                if (bh) {
                        BUFFER_TRACE(bh, "call ext3_journal_dirty_metadata");
@@ -1932,15 +1864,16 @@ ext3_clear_blocks(handle_t *handle, struct inode *inode, struct buffer_head *bh,
  * block pointers.
  */
 static void ext3_free_data(handle_t *handle, struct inode *inode,
-                          struct buffer_head *this_bh, u32 *first, u32 *last)
+                          struct buffer_head *this_bh,
+                          __le32 *first, __le32 *last)
 {
        unsigned long block_to_free = 0;    /* Starting block # of a run */
        unsigned long count = 0;            /* Number of blocks in the run */ 
-       u32 *block_to_free_p = NULL;        /* Pointer into inode/ind
+       __le32 *block_to_free_p = NULL;     /* Pointer into inode/ind
                                               corresponding to
                                               block_to_free */
        unsigned long nr;                   /* Current block # */
-       u32 *p;                             /* Pointer into inode/ind
+       __le32 *p;                          /* Pointer into inode/ind
                                               for current block */
        int err;
 
@@ -1999,10 +1932,10 @@ static void ext3_free_data(handle_t *handle, struct inode *inode,
  */
 static void ext3_free_branches(handle_t *handle, struct inode *inode,
                               struct buffer_head *parent_bh,
-                              u32 *first, u32 *last, int depth)
+                              __le32 *first, __le32 *last, int depth)
 {
        unsigned long nr;
-       u32 *p;
+       __le32 *p;
 
        if (is_handle_aborted(handle))
                return;
@@ -2032,8 +1965,9 @@ static void ext3_free_branches(handle_t *handle, struct inode *inode,
 
                        /* This zaps the entire block.  Bottom up. */
                        BUFFER_TRACE(bh, "free child branches");
-                       ext3_free_branches(handle, inode, bh, (u32*)bh->b_data,
-                                          (u32*)bh->b_data + addr_per_block,
+                       ext3_free_branches(handle, inode, bh,
+                                          (__le32*)bh->b_data,
+                                          (__le32*)bh->b_data + addr_per_block,
                                           depth);
 
                        /*
@@ -2138,13 +2072,13 @@ void ext3_truncate_nocheck(struct inode * inode)
 {
        handle_t *handle;
        struct ext3_inode_info *ei = EXT3_I(inode);
-       u32 *i_data = ei->i_data;
+       __le32 *i_data = ei->i_data;
        int addr_per_block = EXT3_ADDR_PER_BLOCK(inode->i_sb);
        struct address_space *mapping = inode->i_mapping;
        int offsets[4];
        Indirect chain[4];
        Indirect *partial;
-       int nr = 0;
+       __le32 nr = 0;
        int n;
        long last_block;
        unsigned blocksize = inode->i_sb->s_blocksize;
@@ -2156,8 +2090,6 @@ void ext3_truncate_nocheck(struct inode * inode)
        if (ext3_inode_is_fast_symlink(inode))
                return;
 
-       ext3_discard_prealloc(inode);
-
        /*
         * We have to lock the EOF page here, because lock_page() nests
         * outside journal_start().
@@ -2249,7 +2181,7 @@ void ext3_truncate_nocheck(struct inode * inode)
        /* Clear the ends of indirect blocks on the shared branch */
        while (partial > chain) {
                ext3_free_branches(handle, inode, partial->bh, partial->p + 1,
-                                  (u32*)partial->bh->b_data + addr_per_block,
+                                  (__le32*)partial->bh->b_data+addr_per_block,
                                   (chain+n-1) - partial);
                BUFFER_TRACE(partial->bh, "call brelse");
                brelse (partial->bh);
@@ -2282,8 +2214,11 @@ do_indirects:
                case EXT3_TIND_BLOCK:
                        ;
        }
+
+       ext3_discard_reservation(inode);
+
        up(&ei->truncate_sem);
-       inode->i_mtime = inode->i_ctime = CURRENT_TIME;
+       inode->i_mtime = inode->i_ctime = CURRENT_TIME_SEC;
        ext3_mark_inode_dirty(handle, inode);
 
        /* In a multi-transaction truncate, we only make the final
@@ -2312,8 +2247,10 @@ static unsigned long ext3_get_inode_block(struct super_block *sb,
        struct buffer_head *bh;
        struct ext3_group_desc * gdp;
 
+
        if ((ino != EXT3_ROOT_INO &&
                ino != EXT3_JOURNAL_INO &&
+               ino != EXT3_RESIZE_INO &&
                ino < EXT3_FIRST_INO(sb)) ||
                ino > le32_to_cpu(
                        EXT3_SB(sb)->s_es->s_inodes_count)) {
@@ -2327,6 +2264,7 @@ static unsigned long ext3_get_inode_block(struct super_block *sb,
                            "group >= groups count");
                return 0;
        }
+       smp_rmb();
        group_desc = block_group >> EXT3_DESC_PER_BLOCK_BITS(sb);
        desc = block_group & (EXT3_DESC_PER_BLOCK(sb) - 1);
        bh = EXT3_SB(sb)->s_group_desc[group_desc];
@@ -2350,13 +2288,13 @@ static unsigned long ext3_get_inode_block(struct super_block *sb,
        return block;
 }
 
-/* 
+/*
  * ext3_get_inode_loc returns with an extra refcount against the inode's
- * underlying buffer_head on success.  If `in_mem' is false then we're purely
- * trying to determine the inode's location on-disk and no read need be
- * performed.
+ * underlying buffer_head on success. If 'in_mem' is true, we have all
+ * data in memory that is needed to recreate the on-disk version of this
+ * inode.
  */
-static int ext3_get_inode_loc(struct inode *inode,
+static int __ext3_get_inode_loc(struct inode *inode,
                                struct ext3_iloc *iloc, int in_mem)
 {
        unsigned long block;
@@ -2381,7 +2319,11 @@ static int ext3_get_inode_loc(struct inode *inode,
                        goto has_buffer;
                }
 
-               /* we can't skip I/O if inode is on a disk only */
+               /*
+                * If we have all information of the inode in memory and this
+                * is the only valid inode in the block, we need not read the
+                * block.
+                */
                if (in_mem) {
                        struct buffer_head *bitmap_bh;
                        struct ext3_group_desc *desc;
@@ -2390,10 +2332,6 @@ static int ext3_get_inode_loc(struct inode *inode,
                        int block_group;
                        int start;
 
-                       /*
-                        * If this is the only valid inode in the block we
-                        * need not read the block.
-                        */
                        block_group = (inode->i_ino - 1) /
                                        EXT3_INODES_PER_GROUP(inode->i_sb);
                        inodes_per_buffer = bh->b_size /
@@ -2440,8 +2378,9 @@ static int ext3_get_inode_loc(struct inode *inode,
 
 make_io:
                /*
-                * There are another valid inodes in the buffer so we must
-                * read the block from disk
+                * There are other valid inodes in the buffer, this inode
+                * has in-inode xattrs, or we don't have this inode in memory.
+                * Read the block from disk.
                 */
                get_bh(bh);
                bh->b_end_io = end_buffer_read_sync;
@@ -2461,6 +2400,13 @@ has_buffer:
        return 0;
 }
 
+int ext3_get_inode_loc(struct inode *inode, struct ext3_iloc *iloc)
+{
+       /* We have all inode data except xattrs in memory here. */
+       return __ext3_get_inode_loc(inode, iloc,
+               !(EXT3_I(inode)->i_state & EXT3_STATE_XATTR));
+}
+
 void ext3_truncate(struct inode * inode)
 {
        if (IS_APPEND(inode) || IS_IMMUTABLE(inode))
@@ -2472,7 +2418,7 @@ void ext3_set_inode_flags(struct inode *inode)
 {
        unsigned int flags = EXT3_I(inode)->i_flags;
 
-       inode->i_flags &= ~(S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC);
+       inode->i_flags &= ~(S_SYNC|S_APPEND|S_IMMUTABLE|S_IUNLINK|S_BARRIER|S_NOATIME|S_DIRSYNC);
        if (flags & EXT3_SYNC_FL)
                inode->i_flags |= S_SYNC;
        if (flags & EXT3_APPEND_FL)
@@ -2503,7 +2449,9 @@ void ext3_read_inode(struct inode * inode)
        ei->i_acl = EXT3_ACL_NOT_CACHED;
        ei->i_default_acl = EXT3_ACL_NOT_CACHED;
 #endif
-       if (ext3_get_inode_loc(inode, &iloc, 0))
+       ei->i_block_alloc_info = NULL;
+
+       if (__ext3_get_inode_loc(inode, &iloc, 0))
                goto bad_inode;
        bh = iloc.bh;
        raw_inode = ext3_raw_inode(&iloc);
@@ -2527,8 +2475,6 @@ void ext3_read_inode(struct inode * inode)
        inode->i_atime.tv_nsec = inode->i_ctime.tv_nsec = inode->i_mtime.tv_nsec = 0;
 
        ei->i_state = 0;
-       ei->i_next_alloc_block = 0;
-       ei->i_next_alloc_goal = 0;
        ei->i_dir_start_lookup = 0;
        ei->i_dtime = le32_to_cpu(raw_inode->i_dtime);
        /* We now have enough fields to check if the inode was active or not.
@@ -2567,11 +2513,7 @@ void ext3_read_inode(struct inode * inode)
        }
        ei->i_disksize = inode->i_size;
        inode->i_generation = le32_to_cpu(raw_inode->i_generation);
-#ifdef EXT3_PREALLOCATE
-       ei->i_prealloc_count = 0;
-#endif
        ei->i_block_group = iloc.block_group;
-
        /*
         * NOTE! The in-memory inode i_data array is in little-endian order
         * even on big-endian machines: we do NOT byteswap the block numbers!
@@ -2580,6 +2522,31 @@ void ext3_read_inode(struct inode * inode)
                ei->i_data[block] = raw_inode->i_block[block];
        INIT_LIST_HEAD(&ei->i_orphan);
 
+       if (inode->i_ino >= EXT3_FIRST_INO(inode->i_sb) + 1 &&
+           EXT3_INODE_SIZE(inode->i_sb) > EXT3_GOOD_OLD_INODE_SIZE) {
+               /*
+                * When mke2fs creates big inodes it does not zero out
+                * the unused bytes above EXT3_GOOD_OLD_INODE_SIZE,
+                * so ignore those first few inodes.
+                */
+               ei->i_extra_isize = le16_to_cpu(raw_inode->i_extra_isize);
+               if (EXT3_GOOD_OLD_INODE_SIZE + ei->i_extra_isize >
+                   EXT3_INODE_SIZE(inode->i_sb))
+                       goto bad_inode;
+               if (ei->i_extra_isize == 0) {
+                       /* The extra space is currently unused. Use it. */
+                       ei->i_extra_isize = sizeof(struct ext3_inode) -
+                                           EXT3_GOOD_OLD_INODE_SIZE;
+               } else {
+                       __le32 *magic = (void *)raw_inode +
+                                       EXT3_GOOD_OLD_INODE_SIZE +
+                                       ei->i_extra_isize;
+                       if (*magic == cpu_to_le32(EXT3_XATTR_MAGIC))
+                                ei->i_state |= EXT3_STATE_XATTR;
+               }
+       } else
+               ei->i_extra_isize = 0;
+
        if (S_ISREG(inode->i_mode)) {
                inode->i_op = &ext3_file_inode_operations;
                inode->i_fop = &ext3_file_operations;
@@ -2660,7 +2627,7 @@ static int ext3_do_update_inode(handle_t *handle,
                raw_inode->i_uid_high = 0;
                raw_inode->i_gid_high = 0;
        }
-#ifdef CONFIG_INOXID_GID32
+#ifdef CONFIG_INOXID_INTERN
        raw_inode->i_raw_xid = cpu_to_le16(inode->i_xid);
 #endif
        raw_inode->i_links_count = cpu_to_le16(inode->i_nlink);
@@ -2720,6 +2687,9 @@ static int ext3_do_update_inode(handle_t *handle,
        } else for (block = 0; block < EXT3_N_BLOCKS; block++)
                raw_inode->i_block[block] = ei->i_data[block];
 
+       if (EXT3_INODE_SIZE(inode->i_sb) > EXT3_GOOD_OLD_INODE_SIZE)
+               raw_inode->i_extra_isize = cpu_to_le16(ei->i_extra_isize);
+
        BUFFER_TRACE(bh, "call ext3_journal_dirty_metadata");
        rc = ext3_journal_dirty_metadata(handle, bh);
        if (!err)
@@ -2767,21 +2737,21 @@ out_brelse:
  * `stuff()' is running, and the new i_size will be lost.  Plus the inode
  * will no longer be on the superblock's dirty inode list.
  */
-void ext3_write_inode(struct inode *inode, int wait)
+int ext3_write_inode(struct inode *inode, int wait)
 {
        if (current->flags & PF_MEMALLOC)
-               return;
+               return 0;
 
        if (ext3_journal_current_handle()) {
                jbd_debug(0, "called recursively, non-PF_MEMALLOC!\n");
                dump_stack();
-               return;
+               return -EIO;
        }
 
        if (!wait)
-               return;
+               return 0;
 
-       ext3_force_commit(inode->i_sb);
+       return ext3_force_commit(inode->i_sb);
 }
 
 int ext3_setattr_flags(struct inode *inode, unsigned int flags)
@@ -2791,7 +2761,7 @@ int ext3_setattr_flags(struct inode *inode, unsigned int flags)
 
        oldflags = EXT3_I(inode)->i_flags;
        newflags = oldflags &
-               ~(EXT3_IMMUTABLE_FL | EXT3_IUNLINK_FL | EXT3_BARRIER_FL);       
+               ~(EXT3_IMMUTABLE_FL | EXT3_IUNLINK_FL | EXT3_BARRIER_FL);
        if (flags & ATTR_FLAG_IMMUTABLE)
                newflags |= EXT3_IMMUTABLE_FL;
        if (flags & ATTR_FLAG_IUNLINK)
@@ -2811,7 +2781,7 @@ int ext3_setattr_flags(struct inode *inode, unsigned int flags)
                err = ext3_reserve_inode_write(handle, inode, &iloc);
                if (err)
                        goto flags_err;
-               
+
                EXT3_I(inode)->i_flags = newflags;
                inode->i_ctime = CURRENT_TIME;
 
@@ -2950,7 +2920,7 @@ err_out:
  * block and work out the exact number of indirects which are touched.  Pah.
  */
 
-int ext3_writepage_trans_blocks(struct inode *inode)
+static int ext3_writepage_trans_blocks(struct inode *inode)
 {
        int bpp = ext3_journal_blocks_per_page(inode);
        int indirects = (EXT3_NDIR_BLOCKS % bpp) ? 5 : 3;
@@ -2999,7 +2969,7 @@ ext3_reserve_inode_write(handle_t *handle, struct inode *inode,
 {
        int err = 0;
        if (handle) {
-               err = ext3_get_inode_loc(inode, iloc, 1);
+               err = ext3_get_inode_loc(inode, iloc);
                if (!err) {
                        BUFFER_TRACE(iloc->bh, "get_write_access");
                        err = ext3_journal_get_write_access(handle, iloc->bh);
@@ -3039,6 +3009,7 @@ int ext3_mark_inode_dirty(handle_t *handle, struct inode *inode)
        struct ext3_iloc iloc;
        int err;
 
+       might_sleep();
        err = ext3_reserve_inode_write(handle, inode, &iloc);
        if (!err)
                err = ext3_mark_iloc_dirty(handle, inode, &iloc);
@@ -3097,7 +3068,7 @@ ext3_pin_inode(handle_t *handle, struct inode *inode)
 
        int err = 0;
        if (handle) {
-               err = ext3_get_inode_loc(inode, &iloc, 1);
+               err = ext3_get_inode_loc(inode, &iloc);
                if (!err) {
                        BUFFER_TRACE(iloc.bh, "get_write_access");
                        err = journal_get_write_access(handle, iloc.bh);