{
int err;
+ might_sleep();
+
BUFFER_TRACE(bh, "enter");
jbd_debug(4, "forgetting bh %p: is_metadata = %d, mode %o, "
* need to make sure that the in-core orphan linked list
* is properly cleaned up. */
ext3_orphan_del(NULL, inode);
-
- ext3_std_error(inode->i_sb, PTR_ERR(handle));
goto no_delete;
}
&ei->i_prealloc_count,
&ei->i_prealloc_block, err);
else
- result = ext3_new_block (inode, goal, 0, 0, err);
+ result = ext3_new_block(inode, goal, NULL, NULL, err);
/*
* AKPM: this is somewhat sticky. I'm not surprised it was
* disabled in 2.2's ext3. Need to integrate b_committed_data
*/
}
#else
- result = ext3_new_block (handle, inode, goal, 0, 0, err);
+ result = ext3_new_block(handle, inode, goal, NULL, NULL, err);
#endif
return result;
}
typedef struct {
- u32 *p;
- u32 key;
+ __le32 *p;
+ __le32 key;
struct buffer_head *bh;
} Indirect;
-static inline void add_chain(Indirect *p, struct buffer_head *bh, u32 *v)
+static inline void add_chain(Indirect *p, struct buffer_head *bh, __le32 *v)
{
p->key = *(p->p = v);
p->bh = bh;
/* Reader: pointers */
if (!verify_chain(chain, p))
goto changed;
- add_chain(++p, bh, (u32*)bh->b_data + *++offsets);
+ add_chain(++p, bh, (__le32*)bh->b_data + *++offsets);
/* Reader: end */
if (!p->key)
goto no_block;
static unsigned long ext3_find_near(struct inode *inode, Indirect *ind)
{
struct ext3_inode_info *ei = EXT3_I(inode);
- u32 *start = ind->bh ? (u32*) ind->bh->b_data : ei->i_data;
- u32 *p;
+ __le32 *start = ind->bh ? (__le32*) ind->bh->b_data : ei->i_data;
+ __le32 *p;
unsigned long bg_start;
unsigned long colour;
}
memset(bh->b_data, 0, blocksize);
- branch[n].p = (u32*) bh->b_data + offsets[n];
+ branch[n].p = (__le32*) bh->b_data + offsets[n];
*branch[n].p = branch[n].key;
BUFFER_TRACE(bh, "marking uptodate");
set_buffer_uptodate(bh);
if (err == -EAGAIN)
goto changed;
+ goal = 0;
down(&ei->truncate_sem);
if (ext3_find_goal(inode, iblock, chain, partial, &goal) < 0) {
up(&ei->truncate_sem);
static int ext3_get_block(struct inode *inode, sector_t iblock,
struct buffer_head *bh_result, int create)
{
- handle_t *handle = 0;
+ handle_t *handle = NULL;
int ret;
if (create) {
handle_t *handle = journal_current_handle();
int ret = 0;
- if (handle && handle->h_buffer_credits <= EXT3_RESERVE_TRANS_BLOCKS) {
+ if (!handle)
+ goto get_block; /* A read */
+
+ if (handle->h_transaction->t_state == T_LOCKED) {
+ /*
+ * Huge direct-io writes can hold off commits for long
+ * periods of time. Let this commit run.
+ */
+ ext3_journal_stop(handle);
+ handle = ext3_journal_start(inode, DIO_CREDITS);
+ if (IS_ERR(handle))
+ ret = PTR_ERR(handle);
+ goto get_block;
+ }
+
+ if (handle->h_buffer_credits <= EXT3_RESERVE_TRANS_BLOCKS) {
/*
* Getting low on buffer credits...
*/
- if (!ext3_journal_extend(handle, DIO_CREDITS)) {
+ ret = ext3_journal_extend(handle, DIO_CREDITS);
+ if (ret > 0) {
/*
- * Couldn't extend the transaction. Start a new one
+ * Couldn't extend the transaction. Start a new one.
*/
ret = ext3_journal_restart(handle, DIO_CREDITS);
}
}
+
+get_block:
if (ret == 0)
ret = ext3_get_block_handle(handle, inode, iblock,
bh_result, create, 0);
- if (ret == 0)
- bh_result->b_size = (1 << inode->i_blkbits);
+ bh_result->b_size = (1 << inode->i_blkbits);
return ret;
}
-
/*
* `handle' can be NULL if create is zero
*/
struct inode *inode = page->mapping->host;
int ret, needed_blocks = ext3_writepage_trans_blocks(inode);
handle_t *handle;
+ int retries = 0;
+retry:
handle = ext3_journal_start(inode, needed_blocks);
if (IS_ERR(handle)) {
ret = PTR_ERR(handle);
goto out;
}
ret = block_prepare_write(page, from, to, ext3_get_block);
- if (ret != 0)
+ if (ret)
goto prepare_write_failed;
if (ext3_should_journal_data(inode)) {
prepare_write_failed:
if (ret)
ext3_journal_stop(handle);
+ if (ret == -ENOSPC && ext3_should_retry_alloc(inode->i_sb, &retries))
+ goto retry;
out:
return ret;
}
offset, nr_segs,
ext3_direct_io_get_blocks, NULL);
+ /*
+ * Reacquire the handle: ext3_direct_io_get_block() can restart the
+ * transaction
+ */
+ handle = journal_current_handle();
+
out_stop:
if (handle) {
int err;
* or memcmp with zero_page, whatever is better for particular architecture.
* Linus?
*/
-static inline int all_zeroes(u32 *p, u32 *q)
+static inline int all_zeroes(__le32 *p, __le32 *q)
{
while (p < q)
if (*p++)
int depth,
int offsets[4],
Indirect chain[4],
- u32 *top)
+ __le32 *top)
{
Indirect *partial, *p;
int k, err;
if (!partial->key && *partial->p)
/* Writer: end */
goto no_top;
- for (p=partial; p>chain && all_zeroes((u32*)p->bh->b_data,p->p); p--)
+ for (p=partial; p>chain && all_zeroes((__le32*)p->bh->b_data,p->p); p--)
;
/*
* OK, we've found the last block that must survive. The rest of our
static void
ext3_clear_blocks(handle_t *handle, struct inode *inode, struct buffer_head *bh,
unsigned long block_to_free, unsigned long count,
- u32 *first, u32 *last)
+ __le32 *first, __le32 *last)
{
- u32 *p;
+ __le32 *p;
if (try_to_extend_transaction(handle, inode)) {
if (bh) {
BUFFER_TRACE(bh, "call ext3_journal_dirty_metadata");
* block pointers.
*/
static void ext3_free_data(handle_t *handle, struct inode *inode,
- struct buffer_head *this_bh, u32 *first, u32 *last)
+ struct buffer_head *this_bh,
+ __le32 *first, __le32 *last)
{
unsigned long block_to_free = 0; /* Starting block # of a run */
unsigned long count = 0; /* Number of blocks in the run */
- u32 *block_to_free_p = NULL; /* Pointer into inode/ind
+ __le32 *block_to_free_p = NULL; /* Pointer into inode/ind
corresponding to
block_to_free */
unsigned long nr; /* Current block # */
- u32 *p; /* Pointer into inode/ind
+ __le32 *p; /* Pointer into inode/ind
for current block */
int err;
*/
static void ext3_free_branches(handle_t *handle, struct inode *inode,
struct buffer_head *parent_bh,
- u32 *first, u32 *last, int depth)
+ __le32 *first, __le32 *last, int depth)
{
unsigned long nr;
- u32 *p;
+ __le32 *p;
if (is_handle_aborted(handle))
return;
/* This zaps the entire block. Bottom up. */
BUFFER_TRACE(bh, "free child branches");
- ext3_free_branches(handle, inode, bh, (u32*)bh->b_data,
- (u32*)bh->b_data + addr_per_block,
+ ext3_free_branches(handle, inode, bh,
+ (__le32*)bh->b_data,
+ (__le32*)bh->b_data + addr_per_block,
depth);
/*
{
handle_t *handle;
struct ext3_inode_info *ei = EXT3_I(inode);
- u32 *i_data = ei->i_data;
+ __le32 *i_data = ei->i_data;
int addr_per_block = EXT3_ADDR_PER_BLOCK(inode->i_sb);
struct address_space *mapping = inode->i_mapping;
int offsets[4];
Indirect chain[4];
Indirect *partial;
- int nr = 0;
+ __le32 nr = 0;
int n;
long last_block;
unsigned blocksize = inode->i_sb->s_blocksize;
/* Clear the ends of indirect blocks on the shared branch */
while (partial > chain) {
ext3_free_branches(handle, inode, partial->bh, partial->p + 1,
- (u32*)partial->bh->b_data + addr_per_block,
+ (__le32*)partial->bh->b_data+addr_per_block,
(chain+n-1) - partial);
BUFFER_TRACE(partial->bh, "call brelse");
brelse (partial->bh);
uid |= le16_to_cpu(raw_inode->i_uid_high) << 16;
gid |= le16_to_cpu(raw_inode->i_gid_high) << 16;
}
- inode->i_uid = INOXID_UID(uid, gid);
- inode->i_gid = INOXID_GID(uid, gid);
- if (inode->i_sb->s_flags & MS_TAGXID)
- inode->i_xid = INOXID_XID(uid, gid, le16_to_cpu(raw_inode->i_raw_xid));
+ inode->i_uid = INOXID_UID(XID_TAG(inode), uid, gid);
+ inode->i_gid = INOXID_GID(XID_TAG(inode), uid, gid);
+ inode->i_xid = INOXID_XID(XID_TAG(inode), uid, gid,
+ le16_to_cpu(raw_inode->i_raw_xid));
inode->i_nlink = le16_to_cpu(raw_inode->i_links_count);
inode->i_size = le32_to_cpu(raw_inode->i_size);
struct ext3_inode *raw_inode = ext3_raw_inode(iloc);
struct ext3_inode_info *ei = EXT3_I(inode);
struct buffer_head *bh = iloc->bh;
- uid_t uid = XIDINO_UID(inode->i_uid, inode->i_xid);
- gid_t gid = XIDINO_GID(inode->i_gid, inode->i_xid);
+ uid_t uid = XIDINO_UID(XID_TAG(inode), inode->i_uid, inode->i_xid);
+ gid_t gid = XIDINO_GID(XID_TAG(inode), inode->i_gid, inode->i_xid);
int err = 0, rc, block;
/* For fields not not tracking in the in-memory inode,
* `stuff()' is running, and the new i_size will be lost. Plus the inode
* will no longer be on the superblock's dirty inode list.
*/
-void ext3_write_inode(struct inode *inode, int wait)
+int ext3_write_inode(struct inode *inode, int wait)
{
if (current->flags & PF_MEMALLOC)
- return;
+ return 0;
if (ext3_journal_current_handle()) {
jbd_debug(0, "called recursively, non-PF_MEMALLOC!\n");
dump_stack();
- return;
+ return -EIO;
}
if (!wait)
- return;
+ return 0;
- ext3_force_commit(inode->i_sb);
+ return ext3_force_commit(inode->i_sb);
}
int ext3_setattr_flags(struct inode *inode, unsigned int flags)
oldflags = EXT3_I(inode)->i_flags;
newflags = oldflags &
- ~(EXT3_IMMUTABLE_FL | EXT3_IUNLINK_FL | EXT3_BARRIER_FL);
+ ~(EXT3_IMMUTABLE_FL | EXT3_IUNLINK_FL | EXT3_BARRIER_FL);
if (flags & ATTR_FLAG_IMMUTABLE)
newflags |= EXT3_IMMUTABLE_FL;
if (flags & ATTR_FLAG_IUNLINK)
err = ext3_reserve_inode_write(handle, inode, &iloc);
if (err)
goto flags_err;
-
+
EXT3_I(inode)->i_flags = newflags;
inode->i_ctime = CURRENT_TIME;
return error;
if ((ia_valid & ATTR_UID && attr->ia_uid != inode->i_uid) ||
- (ia_valid & ATTR_GID && attr->ia_gid != inode->i_gid)) {
+ (ia_valid & ATTR_GID && attr->ia_gid != inode->i_gid) ||
+ (ia_valid & ATTR_XID && attr->ia_xid != inode->i_xid)) {
handle_t *handle;
/* (user+group)*(old+new) structure, inode write (sb,
inode->i_uid = attr->ia_uid;
if (attr->ia_valid & ATTR_GID)
inode->i_gid = attr->ia_gid;
+ if ((attr->ia_valid & ATTR_XID)
+ && inode->i_sb
+ && (inode->i_sb->s_flags & MS_TAGXID))
+ inode->i_xid = attr->ia_xid;
error = ext3_mark_inode_dirty(handle, inode);
ext3_journal_stop(handle);
}
struct ext3_iloc iloc;
int err;
+ might_sleep();
err = ext3_reserve_inode_write(handle, inode, &iloc);
if (!err)
err = ext3_mark_iloc_dirty(handle, inode, &iloc);