X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=fs%2Fext3%2Fballoc.c;h=9de5e42ca2087c372ea29a572cf87f07bcba404f;hb=1bb1c71785da5d4c8a916d87ab68bc48e5103bd3;hp=75369a95cceece50065dafe602e2476c13a5782c;hpb=bc77d24c47b89f1e0efed0b8e4be5f8aad102883;p=linux-2.6.git diff --git a/fs/ext3/balloc.c b/fs/ext3/balloc.c index 75369a95c..9de5e42ca 100644 --- a/fs/ext3/balloc.c +++ b/fs/ext3/balloc.c @@ -11,14 +11,16 @@ * David S. Miller (davem@caip.rutgers.edu), 1995 */ -#include #include +#include #include #include #include #include #include #include +#include +#include /* * balloc.c contains the blocks allocation and deallocation routines @@ -43,33 +45,34 @@ struct ext3_group_desc * ext3_get_group_desc(struct super_block * sb, struct buffer_head ** bh) { unsigned long group_desc; - unsigned long desc; - struct ext3_group_desc * gdp; + unsigned long offset; + struct ext3_group_desc * desc; + struct ext3_sb_info *sbi = EXT3_SB(sb); - if (block_group >= EXT3_SB(sb)->s_groups_count) { + if (block_group >= sbi->s_groups_count) { ext3_error (sb, "ext3_get_group_desc", "block_group >= groups_count - " "block_group = %d, groups_count = %lu", - block_group, EXT3_SB(sb)->s_groups_count); + block_group, sbi->s_groups_count); return NULL; } + smp_rmb(); - group_desc = block_group / EXT3_DESC_PER_BLOCK(sb); - desc = block_group % EXT3_DESC_PER_BLOCK(sb); - if (!EXT3_SB(sb)->s_group_desc[group_desc]) { + group_desc = block_group >> EXT3_DESC_PER_BLOCK_BITS(sb); + offset = block_group & (EXT3_DESC_PER_BLOCK(sb) - 1); + if (!sbi->s_group_desc[group_desc]) { ext3_error (sb, "ext3_get_group_desc", "Group descriptor not loaded - " "block_group = %d, group_desc = %lu, desc = %lu", - block_group, group_desc, desc); + block_group, group_desc, offset); return NULL; } - gdp = (struct ext3_group_desc *) - EXT3_SB(sb)->s_group_desc[group_desc]->b_data; + desc = (struct ext3_group_desc *) sbi->s_group_desc[group_desc]->b_data; if (bh) - *bh = EXT3_SB(sb)->s_group_desc[group_desc]; - return gdp + desc; + *bh = sbi->s_group_desc[group_desc]; + return desc + offset; } /* @@ -91,8 +94,8 @@ read_block_bitmap(struct super_block *sb, unsigned int block_group) if (!bh) ext3_error (sb, "read_block_bitmap", "Cannot read block bitmap - " - "block_group = %d, block_bitmap = %lu", - block_group, (unsigned long) desc->bg_block_bitmap); + "block_group = %d, block_bitmap = %u", + block_group, le32_to_cpu(desc->bg_block_bitmap)); error_out: return bh; } @@ -110,105 +113,231 @@ error_out: * we could easily switch to that without changing too much * code. */ -static inline void rsv_window_dump(struct reserve_window *head, char *fn) +#if 0 +static void __rsv_window_dump(struct rb_root *root, int verbose, + const char *fn) { - struct reserve_window *rsv; + struct rb_node *n; + struct ext3_reserve_window_node *rsv, *prev; + int bad; + +restart: + n = rb_first(root); + bad = 0; + prev = NULL; printk("Block Allocation Reservation Windows Map (%s):\n", fn); - list_for_each_entry(rsv, &head->rsv_list, rsv_list) { - printk("reservation window 0x%p start: %d, end: %d\n", - rsv, rsv->rsv_start, rsv->rsv_end); + while (n) { + rsv = list_entry(n, struct ext3_reserve_window_node, rsv_node); + if (verbose) + printk("reservation window 0x%p " + "start: %d, end: %d\n", + rsv, rsv->rsv_start, rsv->rsv_end); + if (rsv->rsv_start && rsv->rsv_start >= rsv->rsv_end) { + printk("Bad reservation %p (start >= end)\n", + rsv); + bad = 1; + } + if (prev && prev->rsv_end >= rsv->rsv_start) { + printk("Bad reservation %p (prev->end >= start)\n", + rsv); + bad = 1; + } + if (bad) { + if (!verbose) { + printk("Restarting reservation walk in verbose mode\n"); + verbose = 1; + goto restart; + } + } + n = rb_next(n); + prev = rsv; } + printk("Window map complete.\n"); + if (bad) + BUG(); } +#define rsv_window_dump(root, verbose) \ + __rsv_window_dump((root), (verbose), __FUNCTION__) +#else +#define rsv_window_dump(root, verbose) do {} while (0) +#endif static int -goal_in_my_reservation(struct reserve_window *rsv, int goal, +goal_in_my_reservation(struct ext3_reserve_window *rsv, ext3_grpblk_t grp_goal, unsigned int group, struct super_block * sb) { - unsigned long group_first_block, group_last_block; + ext3_fsblk_t group_first_block, group_last_block; - group_first_block = le32_to_cpu(EXT3_SB(sb)->s_es->s_first_data_block) + - group * EXT3_BLOCKS_PER_GROUP(sb); - group_last_block = group_first_block + EXT3_BLOCKS_PER_GROUP(sb) - 1; + group_first_block = ext3_group_first_block_no(sb, group); + group_last_block = group_first_block + (EXT3_BLOCKS_PER_GROUP(sb) - 1); - if ((rsv->rsv_start > group_last_block) || - (rsv->rsv_end < group_first_block)) + if ((rsv->_rsv_start > group_last_block) || + (rsv->_rsv_end < group_first_block)) return 0; - if ((goal >= 0) && ((goal + group_first_block < rsv->rsv_start) - || (goal + group_first_block > rsv->rsv_end))) + if ((grp_goal >= 0) && ((grp_goal + group_first_block < rsv->_rsv_start) + || (grp_goal + group_first_block > rsv->_rsv_end))) return 0; return 1; } -static inline void rsv_window_add(struct reserve_window *rsv, - struct reserve_window *prev) +/* + * Find the reserved window which includes the goal, or the previous one + * if the goal is not in any window. + * Returns NULL if there are no windows or if all windows start after the goal. + */ +static struct ext3_reserve_window_node * +search_reserve_window(struct rb_root *root, ext3_fsblk_t goal) { - /* insert the new reservation window after the head */ - list_add(&rsv->rsv_list, &prev->rsv_list); + struct rb_node *n = root->rb_node; + struct ext3_reserve_window_node *rsv; + + if (!n) + return NULL; + + do { + rsv = rb_entry(n, struct ext3_reserve_window_node, rsv_node); + + if (goal < rsv->rsv_start) + n = n->rb_left; + else if (goal > rsv->rsv_end) + n = n->rb_right; + else + return rsv; + } while (n); + /* + * We've fallen off the end of the tree: the goal wasn't inside + * any particular node. OK, the previous node must be to one + * side of the interval containing the goal. If it's the RHS, + * we need to back up one. + */ + if (rsv->rsv_start > goal) { + n = rb_prev(&rsv->rsv_node); + rsv = rb_entry(n, struct ext3_reserve_window_node, rsv_node); + } + return rsv; } -static inline void rsv_window_remove(struct reserve_window *rsv) +void ext3_rsv_window_add(struct super_block *sb, + struct ext3_reserve_window_node *rsv) { - rsv->rsv_start = 0; - rsv->rsv_end = 0; - rsv->rsv_alloc_hit = 0; - list_del(&rsv->rsv_list); - INIT_LIST_HEAD(&rsv->rsv_list); + struct rb_root *root = &EXT3_SB(sb)->s_rsv_window_root; + struct rb_node *node = &rsv->rsv_node; + ext3_fsblk_t start = rsv->rsv_start; + + struct rb_node ** p = &root->rb_node; + struct rb_node * parent = NULL; + struct ext3_reserve_window_node *this; + + while (*p) + { + parent = *p; + this = rb_entry(parent, struct ext3_reserve_window_node, rsv_node); + + if (start < this->rsv_start) + p = &(*p)->rb_left; + else if (start > this->rsv_end) + p = &(*p)->rb_right; + else + BUG(); + } + + rb_link_node(node, parent, p); + rb_insert_color(node, root); +} + +static void rsv_window_remove(struct super_block *sb, + struct ext3_reserve_window_node *rsv) +{ + rsv->rsv_start = EXT3_RESERVE_WINDOW_NOT_ALLOCATED; + rsv->rsv_end = EXT3_RESERVE_WINDOW_NOT_ALLOCATED; + rsv->rsv_alloc_hit = 0; + rb_erase(&rsv->rsv_node, &EXT3_SB(sb)->s_rsv_window_root); } -static inline int rsv_is_empty(struct reserve_window *rsv) +static inline int rsv_is_empty(struct ext3_reserve_window *rsv) { /* a valid reservation end block could not be 0 */ - return (rsv->rsv_end == 0); + return (rsv->_rsv_end == EXT3_RESERVE_WINDOW_NOT_ALLOCATED); +} +void ext3_init_block_alloc_info(struct inode *inode) +{ + struct ext3_inode_info *ei = EXT3_I(inode); + struct ext3_block_alloc_info *block_i = ei->i_block_alloc_info; + struct super_block *sb = inode->i_sb; + + block_i = kmalloc(sizeof(*block_i), GFP_NOFS); + if (block_i) { + struct ext3_reserve_window_node *rsv = &block_i->rsv_window_node; + + rsv->rsv_start = EXT3_RESERVE_WINDOW_NOT_ALLOCATED; + rsv->rsv_end = EXT3_RESERVE_WINDOW_NOT_ALLOCATED; + + /* + * if filesystem is mounted with NORESERVATION, the goal + * reservation window size is set to zero to indicate + * block reservation is off + */ + if (!test_opt(sb, RESERVATION)) + rsv->rsv_goal_size = 0; + else + rsv->rsv_goal_size = EXT3_DEFAULT_RESERVE_BLOCKS; + rsv->rsv_alloc_hit = 0; + block_i->last_alloc_logical_block = 0; + block_i->last_alloc_physical_block = 0; + } + ei->i_block_alloc_info = block_i; } void ext3_discard_reservation(struct inode *inode) { struct ext3_inode_info *ei = EXT3_I(inode); - struct reserve_window *rsv = &ei->i_rsv_window; + struct ext3_block_alloc_info *block_i = ei->i_block_alloc_info; + struct ext3_reserve_window_node *rsv; spinlock_t *rsv_lock = &EXT3_SB(inode->i_sb)->s_rsv_window_lock; - if (!rsv_is_empty(rsv)) { + if (!block_i) + return; + + rsv = &block_i->rsv_window_node; + if (!rsv_is_empty(&rsv->rsv_window)) { spin_lock(rsv_lock); - rsv_window_remove(rsv); + if (!rsv_is_empty(&rsv->rsv_window)) + rsv_window_remove(inode->i_sb, rsv); spin_unlock(rsv_lock); } } /* Free given blocks, update quota and i_blocks field */ -void ext3_free_blocks(handle_t *handle, struct inode *inode, - unsigned long block, unsigned long count) +void ext3_free_blocks_sb(handle_t *handle, struct super_block *sb, + ext3_fsblk_t block, unsigned long count, + unsigned long *pdquot_freed_blocks) { struct buffer_head *bitmap_bh = NULL; struct buffer_head *gd_bh; unsigned long block_group; - unsigned long bit; + ext3_grpblk_t bit; unsigned long i; unsigned long overflow; - struct super_block * sb; - struct ext3_group_desc * gdp; + struct ext3_group_desc * desc; struct ext3_super_block * es; struct ext3_sb_info *sbi; int err = 0, ret; - int dquot_freed_blocks = 0; + ext3_grpblk_t group_freed; - sb = inode->i_sb; - if (!sb) { - printk ("ext3_free_blocks: nonexistent device"); - return; - } + *pdquot_freed_blocks = 0; sbi = EXT3_SB(sb); - es = EXT3_SB(sb)->s_es; + es = sbi->s_es; if (block < le32_to_cpu(es->s_first_data_block) || block + count < block || block + count > le32_to_cpu(es->s_blocks_count)) { ext3_error (sb, "ext3_free_blocks", "Freeing blocks not in datazone - " - "block = %lu, count = %lu", block, count); + "block = "E3FSBLK", count = %lu", block, count); goto error_return; } - ext3_debug ("freeing block %lu\n", block); + ext3_debug ("freeing block(s) %lu-%lu\n", block, block + count - 1); do_more: overflow = 0; @@ -228,19 +357,19 @@ do_more: bitmap_bh = read_block_bitmap(sb, block_group); if (!bitmap_bh) goto error_return; - gdp = ext3_get_group_desc (sb, block_group, &gd_bh); - if (!gdp) + desc = ext3_get_group_desc (sb, block_group, &gd_bh); + if (!desc) goto error_return; - if (in_range (le32_to_cpu(gdp->bg_block_bitmap), block, count) || - in_range (le32_to_cpu(gdp->bg_inode_bitmap), block, count) || - in_range (block, le32_to_cpu(gdp->bg_inode_table), - EXT3_SB(sb)->s_itb_per_group) || - in_range (block + count - 1, le32_to_cpu(gdp->bg_inode_table), - EXT3_SB(sb)->s_itb_per_group)) + if (in_range (le32_to_cpu(desc->bg_block_bitmap), block, count) || + in_range (le32_to_cpu(desc->bg_inode_bitmap), block, count) || + in_range (block, le32_to_cpu(desc->bg_inode_table), + sbi->s_itb_per_group) || + in_range (block + count - 1, le32_to_cpu(desc->bg_inode_table), + sbi->s_itb_per_group)) ext3_error (sb, "ext3_free_blocks", "Freeing blocks in system zones - " - "Block = %lu, count = %lu", + "Block = "E3FSBLK", count = %lu", block, count); /* @@ -249,7 +378,7 @@ do_more: */ /* @@@ check errors */ BUFFER_TRACE(bitmap_bh, "getting undo access"); - err = ext3_journal_get_undo_access(handle, bitmap_bh, NULL); + err = ext3_journal_get_undo_access(handle, bitmap_bh); if (err) goto error_return; @@ -265,7 +394,7 @@ do_more: jbd_lock_bh_state(bitmap_bh); - for (i = 0; i < count; i++) { + for (i = 0, group_freed = 0; i < count; i++) { /* * An HJ special. This is expensive... */ @@ -285,6 +414,11 @@ do_more: } jbd_lock_bh_state(bitmap_bh); #endif + if (need_resched()) { + jbd_unlock_bh_state(bitmap_bh); + cond_resched(); + jbd_lock_bh_state(bitmap_bh); + } /* @@@ This prevents newly-allocated data from being * freed and then reallocated within the same * transaction. @@ -319,19 +453,20 @@ do_more: bit + i, bitmap_bh->b_data)) { jbd_unlock_bh_state(bitmap_bh); ext3_error(sb, __FUNCTION__, - "bit already cleared for block %lu", block + i); + "bit already cleared for block "E3FSBLK, + block + i); jbd_lock_bh_state(bitmap_bh); BUFFER_TRACE(bitmap_bh, "bit already cleared"); } else { - dquot_freed_blocks++; + group_freed++; } } jbd_unlock_bh_state(bitmap_bh); spin_lock(sb_bgl_lock(sbi, block_group)); - gdp->bg_free_blocks_count = - cpu_to_le16(le16_to_cpu(gdp->bg_free_blocks_count) + - dquot_freed_blocks); + desc->bg_free_blocks_count = + cpu_to_le16(le16_to_cpu(desc->bg_free_blocks_count) + + group_freed); spin_unlock(sb_bgl_lock(sbi, block_group)); percpu_counter_mod(&sbi->s_freeblocks_counter, count); @@ -343,6 +478,7 @@ do_more: BUFFER_TRACE(gd_bh, "dirtied group descriptor block"); ret = ext3_journal_dirty_metadata(handle, gd_bh); if (!err) err = ret; + *pdquot_freed_blocks += group_freed; if (overflow && !err) { block += count; @@ -353,8 +489,26 @@ do_more: error_return: brelse(bitmap_bh); ext3_std_error(sb, err); - if (dquot_freed_blocks && !(EXT3_I(inode)->i_state & EXT3_STATE_RESIZE)) + return; +} + +/* Free given blocks, update quota and i_blocks field */ +void ext3_free_blocks(handle_t *handle, struct inode *inode, + ext3_fsblk_t block, unsigned long count) +{ + struct super_block * sb; + unsigned long dquot_freed_blocks; + + sb = inode->i_sb; + if (!sb) { + printk ("ext3_free_blocks: nonexistent device"); + return; + } + ext3_free_blocks_sb(handle, sb, block, count, &dquot_freed_blocks); + if (dquot_freed_blocks) { + DLIMIT_FREE_BLOCK(inode, dquot_freed_blocks); DQUOT_FREE_BLOCK(inode, dquot_freed_blocks); + } return; } @@ -374,7 +528,7 @@ error_return: * data-writes at some point, and disable it for metadata allocations or * sync-data inodes. */ -static int ext3_test_allocatable(int nr, struct buffer_head *bh) +static int ext3_test_allocatable(ext3_grpblk_t nr, struct buffer_head *bh) { int ret; struct journal_head *jh = bh2jh(bh); @@ -391,11 +545,11 @@ static int ext3_test_allocatable(int nr, struct buffer_head *bh) return ret; } -static int -bitmap_search_next_usable_block(int start, struct buffer_head *bh, - int maxblocks) +static ext3_grpblk_t +bitmap_search_next_usable_block(ext3_grpblk_t start, struct buffer_head *bh, + ext3_grpblk_t maxblocks) { - int next; + ext3_grpblk_t next; struct journal_head *jh = bh2jh(bh); /* @@ -425,10 +579,11 @@ bitmap_search_next_usable_block(int start, struct buffer_head *bh, * the initial goal; then for a free byte somewhere in the bitmap; then * for any free bit in the bitmap. */ -static int -find_next_usable_block(int start, struct buffer_head *bh, int maxblocks) +static ext3_grpblk_t +find_next_usable_block(ext3_grpblk_t start, struct buffer_head *bh, + ext3_grpblk_t maxblocks) { - int here, next; + ext3_grpblk_t here, next; char *p, *r; if (start > 0) { @@ -440,7 +595,7 @@ find_next_usable_block(int start, struct buffer_head *bh, int maxblocks) * less than EXT3_BLOCKS_PER_GROUP. Aligning up to the * next 64-bit boundary is simple.. */ - int end_goal = (start + 63) & ~63; + ext3_grpblk_t end_goal = (start + 63) & ~63; if (end_goal > maxblocks) end_goal = maxblocks; here = ext3_find_next_zero_bit(bh->b_data, end_goal, start); @@ -477,7 +632,7 @@ find_next_usable_block(int start, struct buffer_head *bh, int maxblocks) * zero (failure). */ static inline int -claim_block(spinlock_t *lock, int block, struct buffer_head *bh) +claim_block(spinlock_t *lock, ext3_grpblk_t block, struct buffer_head *bh) { struct journal_head *jh = bh2jh(bh); int ret; @@ -500,33 +655,34 @@ claim_block(spinlock_t *lock, int block, struct buffer_head *bh) * new bitmap. In that case we must release write access to the old one via * ext3_journal_release_buffer(), else we'll run out of credits. */ -static int +static ext3_grpblk_t ext3_try_to_allocate(struct super_block *sb, handle_t *handle, int group, - struct buffer_head *bitmap_bh, int goal, struct reserve_window *my_rsv) + struct buffer_head *bitmap_bh, ext3_grpblk_t grp_goal, + unsigned long *count, struct ext3_reserve_window *my_rsv) { - int group_first_block, start, end; + ext3_fsblk_t group_first_block; + ext3_grpblk_t start, end; + unsigned long num = 0; /* we do allocation within the reservation window if we have a window */ if (my_rsv) { - group_first_block = - le32_to_cpu(EXT3_SB(sb)->s_es->s_first_data_block) + - group * EXT3_BLOCKS_PER_GROUP(sb); - if (my_rsv->rsv_start >= group_first_block) - start = my_rsv->rsv_start - group_first_block; + group_first_block = ext3_group_first_block_no(sb, group); + if (my_rsv->_rsv_start >= group_first_block) + start = my_rsv->_rsv_start - group_first_block; else /* reservation window cross group boundary */ start = 0; - end = my_rsv->rsv_end - group_first_block + 1; + end = my_rsv->_rsv_end - group_first_block + 1; if (end > EXT3_BLOCKS_PER_GROUP(sb)) /* reservation window crosses group boundary */ end = EXT3_BLOCKS_PER_GROUP(sb); - if ((start <= goal) && (goal < end)) - start = goal; + if ((start <= grp_goal) && (grp_goal < end)) + start = grp_goal; else - goal = -1; + grp_goal = -1; } else { - if (goal > 0) - start = goal; + if (grp_goal > 0) + start = grp_goal; else start = 0; end = EXT3_BLOCKS_PER_GROUP(sb); @@ -535,98 +691,101 @@ ext3_try_to_allocate(struct super_block *sb, handle_t *handle, int group, BUG_ON(start > EXT3_BLOCKS_PER_GROUP(sb)); repeat: - if (goal < 0 || !ext3_test_allocatable(goal, bitmap_bh)) { - goal = find_next_usable_block(start, bitmap_bh, end); - if (goal < 0) + if (grp_goal < 0 || !ext3_test_allocatable(grp_goal, bitmap_bh)) { + grp_goal = find_next_usable_block(start, bitmap_bh, end); + if (grp_goal < 0) goto fail_access; if (!my_rsv) { int i; - for (i = 0; i < 7 && goal > start && - ext3_test_allocatable(goal - 1, + for (i = 0; i < 7 && grp_goal > start && + ext3_test_allocatable(grp_goal - 1, bitmap_bh); - i++, goal--) + i++, grp_goal--) ; } } - start = goal; + start = grp_goal; - if (!claim_block(sb_bgl_lock(EXT3_SB(sb), group), goal, bitmap_bh)) { + if (!claim_block(sb_bgl_lock(EXT3_SB(sb), group), grp_goal, bitmap_bh)) { /* * The block was allocated by another thread, or it was * allocated and then freed by another thread */ start++; - goal++; + grp_goal++; if (start >= end) goto fail_access; goto repeat; } - if (my_rsv) - my_rsv->rsv_alloc_hit++; - return goal; + num++; + grp_goal++; + while (num < *count && grp_goal < end + && ext3_test_allocatable(grp_goal, bitmap_bh) + && claim_block(sb_bgl_lock(EXT3_SB(sb), group), grp_goal, bitmap_bh)) { + num++; + grp_goal++; + } + *count = num; + return grp_goal - num; fail_access: + *count = num; return -1; } /** * find_next_reservable_window(): - * find a reservable space within the given range - * It does not allocate the reservation window for now + * find a reservable space within the given range. + * It does not allocate the reservation window for now: * alloc_new_reservation() will do the work later. * * @search_head: the head of the searching list; - * This is not necessary the list head of the whole filesystem + * This is not necessarily the list head of the whole filesystem * - * we have both head and start_block to assist the search - * for the reservable space. The list start from head, + * We have both head and start_block to assist the search + * for the reservable space. The list starts from head, * but we will shift to the place where start_block is, - * then start from there, we looking for a resevable space. - * - * @fs_rsv_head: per-filesystem reervation list head. + * then start from there, when looking for a reservable space. * * @size: the target new reservation window size + * * @group_first_block: the first block we consider to start * the real search from * * @last_block: - * the maxium block number that our goal reservable space + * the maximum block number that our goal reservable space * could start from. This is normally the last block in this * group. The search will end when we found the start of next - * possiblereservable space is out of this boundary. - * This could handle the cross bounday reservation window request. + * possible reservable space is out of this boundary. + * This could handle the cross boundary reservation window + * request. * * basically we search from the given range, rather than the whole * reservation double linked list, (start_block, last_block) - * to find a free region that of of my size and has not + * to find a free region that is of my size and has not * been reserved. * - * on succeed, it returns the reservation window to be append to. - * failed, return NULL. */ -static inline -struct reserve_window *find_next_reservable_window( - struct reserve_window *search_head, - struct reserve_window *fs_rsv_head, - unsigned long size, int *start_block, - int last_block) +static int find_next_reservable_window( + struct ext3_reserve_window_node *search_head, + struct ext3_reserve_window_node *my_rsv, + struct super_block * sb, + ext3_fsblk_t start_block, + ext3_fsblk_t last_block) { - struct reserve_window *rsv; - int cur; - - /* TODO:make the start of the reservation window byte alligned */ - /*cur = *start_block & 8;*/ - cur = *start_block; - rsv = list_entry(search_head->rsv_list.next, - struct reserve_window, rsv_list); - while (rsv != fs_rsv_head) { - if (cur + size <= rsv->rsv_start) { - /* - * Found a reserveable space big enough. We could - * have a reservation across the group boundary here - */ - break; - } + struct rb_node *next; + struct ext3_reserve_window_node *rsv, *prev; + ext3_fsblk_t cur; + int size = my_rsv->rsv_goal_size; + + /* TODO: make the start of the reservation window byte-aligned */ + /* cur = *start_block & ~7;*/ + cur = start_block; + rsv = search_head; + if (!rsv) + return -1; + + while (1) { if (cur <= rsv->rsv_end) cur = rsv->rsv_end + 1; @@ -639,14 +798,31 @@ struct reserve_window *find_next_reservable_window( * For now it will fail if we could not find the reservable * space with expected-size (or more)... */ - rsv = list_entry(rsv->rsv_list.next, - struct reserve_window, rsv_list); if (cur > last_block) - return NULL; /* fail */ + return -1; /* fail */ + + prev = rsv; + next = rb_next(&rsv->rsv_node); + rsv = list_entry(next,struct ext3_reserve_window_node,rsv_node); + + /* + * Reached the last reservation, we can just append to the + * previous one. + */ + if (!next) + break; + + if (cur + size <= rsv->rsv_start) { + /* + * Found a reserveable space big enough. We could + * have a reservation across the group boundary here + */ + break; + } } /* * we come here either : - * when we rearch to the end of the whole list, + * when we reach the end of the whole list, * and there is empty reservable space after last entry in the list. * append it to the end of the list. * @@ -654,82 +830,90 @@ struct reserve_window *find_next_reservable_window( * return the reservation window that we could append to. * succeed. */ - *start_block = cur; - return list_entry(rsv->rsv_list.prev, struct reserve_window, rsv_list); + + if ((prev != my_rsv) && (!rsv_is_empty(&my_rsv->rsv_window))) + rsv_window_remove(sb, my_rsv); + + /* + * Let's book the whole avaliable window for now. We will check the + * disk bitmap later and then, if there are free blocks then we adjust + * the window size if it's larger than requested. + * Otherwise, we will remove this node from the tree next time + * call find_next_reservable_window. + */ + my_rsv->rsv_start = cur; + my_rsv->rsv_end = cur + size - 1; + my_rsv->rsv_alloc_hit = 0; + + if (prev != my_rsv) + ext3_rsv_window_add(sb, my_rsv); + + return 0; } /** * alloc_new_reservation()--allocate a new reservation window - * if there is an existing reservation, discard it first - * then allocate the new one from there - * otherwise allocate the new reservation from the given - * start block, or the beginning of the group, if a goal - * is not given. * * To make a new reservation, we search part of the filesystem - * reservation list(the list that inside the group). - * - * If we have a old reservation, the search goal is the end of - * last reservation. If we do not have a old reservatio, then we - * start from a given goal, or the first block of the group, if - * the goal is not given. + * reservation list (the list that inside the group). We try to + * allocate a new reservation window near the allocation goal, + * or the beginning of the group, if there is no goal. * * We first find a reservable space after the goal, then from - * there,we check the bitmap for the first free block after + * there, we check the bitmap for the first free block after * it. If there is no free block until the end of group, then the * whole group is full, we failed. Otherwise, check if the free * block is inside the expected reservable space, if so, we * succeed. - * If the first free block is outside the reseravle space, then - * start from the first free block, we search for next avalibale + * If the first free block is outside the reservable space, then + * start from the first free block, we search for next available * space, and go on. * * on succeed, a new reservation will be found and inserted into the list - * It contains at least one free block, and it is not overlap with other - * reservation window. + * It contains at least one free block, and it does not overlap with other + * reservation windows. * - * failed: we failed to found a reservation window in this group + * failed: we failed to find a reservation window in this group * * @rsv: the reservation * - * @goal: The goal. It is where the search for a + * @grp_goal: The goal (group-relative). It is where the search for a * free reservable space should start from. - * if we have a old reservation, start_block is the end of - * old reservation. Otherwise, - * if we have a goal(goal >0 ), then start from there, - * no goal(goal = -1), we start from the first block + * if we have a grp_goal(grp_goal >0 ), then start from there, + * no grp_goal(grp_goal = -1), we start from the first block * of the group. * * @sb: the super block - * @group: the group we are trying to do allocate in + * @group: the group we are trying to allocate in * @bitmap_bh: the block group block bitmap + * */ -static int alloc_new_reservation(struct reserve_window *my_rsv, - int goal, struct super_block *sb, +static int alloc_new_reservation(struct ext3_reserve_window_node *my_rsv, + ext3_grpblk_t grp_goal, struct super_block *sb, unsigned int group, struct buffer_head *bitmap_bh) { - struct reserve_window *search_head; - int group_first_block, group_end_block, start_block; - int first_free_block; - int reservable_space_start; - struct reserve_window *prev_rsv; - struct reserve_window *fs_rsv_head = &EXT3_SB(sb)->s_rsv_window_head; + struct ext3_reserve_window_node *search_head; + ext3_fsblk_t group_first_block, group_end_block, start_block; + ext3_grpblk_t first_free_block; + struct rb_root *fs_rsv_root = &EXT3_SB(sb)->s_rsv_window_root; unsigned long size; + int ret; + spinlock_t *rsv_lock = &EXT3_SB(sb)->s_rsv_window_lock; - group_first_block = le32_to_cpu(EXT3_SB(sb)->s_es->s_first_data_block) + - group * EXT3_BLOCKS_PER_GROUP(sb); - group_end_block = group_first_block + EXT3_BLOCKS_PER_GROUP(sb) - 1; + group_first_block = ext3_group_first_block_no(sb, group); + group_end_block = group_first_block + (EXT3_BLOCKS_PER_GROUP(sb) - 1); - if (goal < 0) + if (grp_goal < 0) start_block = group_first_block; else - start_block = goal + group_first_block; + start_block = grp_goal + group_first_block; + + size = my_rsv->rsv_goal_size; - size = atomic_read(&my_rsv->rsv_goal_size); - /* if we have a old reservation, start the search from the old rsv */ - if (!rsv_is_empty(my_rsv)) { + if (!rsv_is_empty(&my_rsv->rsv_window)) { /* * if the old reservation is cross group boundary + * and if the goal is inside the old reservation window, * we will come here when we just failed to allocate from * the first part of the window. We still have another part * that belongs to the next group. In this case, there is no @@ -742,14 +926,12 @@ static int alloc_new_reservation(struct reserve_window *my_rsv, */ if ((my_rsv->rsv_start <= group_end_block) && - (my_rsv->rsv_end > group_end_block)) + (my_rsv->rsv_end > group_end_block) && + (start_block >= my_rsv->rsv_start)) return -1; - /* remember where we are before we discard the old one */ - if (my_rsv->rsv_end + 1 > start_block) - start_block = my_rsv->rsv_end + 1; - search_head = my_rsv; - if ((my_rsv->rsv_alloc_hit > (my_rsv->rsv_end - my_rsv->rsv_start + 1) / 2)) { + if ((my_rsv->rsv_alloc_hit > + (my_rsv->rsv_end - my_rsv->rsv_start + 1) / 2)) { /* * if we previously allocation hit ration is greater than half * we double the size of reservation window next time @@ -758,31 +940,34 @@ static int alloc_new_reservation(struct reserve_window *my_rsv, size = size * 2; if (size > EXT3_MAX_RESERVE_BLOCKS) size = EXT3_MAX_RESERVE_BLOCKS; - atomic_set(&my_rsv->rsv_goal_size, size); + my_rsv->rsv_goal_size= size; } } - else { - /* - * we don't have a reservation, - * we set our goal(start_block) and - * the list head for the search - */ - search_head = fs_rsv_head; - } + + spin_lock(rsv_lock); + /* + * shift the search start to the window near the goal block + */ + search_head = search_reserve_window(fs_rsv_root, start_block); /* - * find_next_reservable_window() simply find a reservable window + * find_next_reservable_window() simply finds a reservable window * inside the given range(start_block, group_end_block). * * To make sure the reservation window has a free bit inside it, we * need to check the bitmap after we found a reservable window. */ retry: - prev_rsv = find_next_reservable_window(search_head, fs_rsv_head, size, - &start_block, group_end_block); - if (prev_rsv == NULL) - goto failed; - reservable_space_start = start_block; + ret = find_next_reservable_window(search_head, my_rsv, sb, + start_block, group_end_block); + + if (ret == -1) { + if (!rsv_is_empty(&my_rsv->rsv_window)) + rsv_window_remove(sb, my_rsv); + spin_unlock(rsv_lock); + return -1; + } + /* * On success, find_next_reservable_window() returns the * reservation window where there is a reservable space after it. @@ -794,8 +979,9 @@ retry: * block. Search start from the start block of the reservable space * we just found. */ + spin_unlock(rsv_lock); first_free_block = bitmap_search_next_usable_block( - reservable_space_start - group_first_block, + my_rsv->rsv_start - group_first_block, bitmap_bh, group_end_block - group_first_block + 1); if (first_free_block < 0) { @@ -803,44 +989,54 @@ retry: * no free block left on the bitmap, no point * to reserve the space. return failed. */ - goto failed; + spin_lock(rsv_lock); + if (!rsv_is_empty(&my_rsv->rsv_window)) + rsv_window_remove(sb, my_rsv); + spin_unlock(rsv_lock); + return -1; /* failed */ } + start_block = first_free_block + group_first_block; /* * check if the first free block is within the - * free space we just found + * free space we just reserved */ - if ((start_block >= reservable_space_start) && - (start_block < reservable_space_start + size)) - goto found_rsv_window; + if (start_block >= my_rsv->rsv_start && start_block < my_rsv->rsv_end) + return 0; /* success */ /* * if the first free bit we found is out of the reservable space - * this means there is no free block on the reservable space - * we should continue search for next reservable space, + * continue search for next reservable space, * start from where the free block is, * we also shift the list head to where we stopped last time */ - search_head = prev_rsv; + search_head = my_rsv; + spin_lock(rsv_lock); goto retry; +} -found_rsv_window: - /* - * great! the reservable space contains some free blocks. - * if the search returns that we should add the new - * window just next to where the old window, we don't - * need to remove the old window first then add it to the - * same place, just update the new start and new end. - */ - if (my_rsv != prev_rsv) { - if (!rsv_is_empty(my_rsv)) - rsv_window_remove(my_rsv); - rsv_window_add(my_rsv, prev_rsv); +static void try_to_extend_reservation(struct ext3_reserve_window_node *my_rsv, + struct super_block *sb, int size) +{ + struct ext3_reserve_window_node *next_rsv; + struct rb_node *next; + spinlock_t *rsv_lock = &EXT3_SB(sb)->s_rsv_window_lock; + + if (!spin_trylock(rsv_lock)) + return; + + next = rb_next(&my_rsv->rsv_node); + + if (!next) + my_rsv->rsv_end += size; + else { + next_rsv = list_entry(next, struct ext3_reserve_window_node, rsv_node); + + if ((next_rsv->rsv_start - my_rsv->rsv_end - 1) >= size) + my_rsv->rsv_end += size; + else + my_rsv->rsv_end = next_rsv->rsv_start - 1; } - my_rsv->rsv_start = reservable_space_start; - my_rsv->rsv_end = my_rsv->rsv_start + size - 1; - return 0; /* succeed */ -failed: - return -1; /* failed */ + spin_unlock(rsv_lock); } /* @@ -851,29 +1047,30 @@ failed: * its own reservation. If it does not have a reservation window, instead of * looking for a free bit on bitmap first, then look up the reservation list to * see if it is inside somebody else's reservation window, we try to allocate a - * reservation window for it start from the goal first. Then do the block + * reservation window for it starting from the goal first. Then do the block * allocation within the reservation window. * - * This will aviod keep searching the reservation list again and again when - * someboday is looking for a free block(without reservation), and there are - * lots of free blocks, but they are all being reserved + * This will avoid keeping on searching the reservation list again and + * again when somebody is looking for a free block (without + * reservation), and there are lots of free blocks, but they are all + * being reserved. * * We use a sorted double linked list for the per-filesystem reservation list. * The insert, remove and find a free space(non-reserved) operations for the * sorted double linked list should be fast. * */ -static int +static ext3_grpblk_t ext3_try_to_allocate_with_rsv(struct super_block *sb, handle_t *handle, unsigned int group, struct buffer_head *bitmap_bh, - int goal, struct reserve_window * my_rsv, - int *errp) + ext3_grpblk_t grp_goal, + struct ext3_reserve_window_node * my_rsv, + unsigned long *count, int *errp) { - spinlock_t *rsv_lock; - unsigned long group_first_block; - int ret = 0; + ext3_fsblk_t group_first_block, group_last_block; + ext3_grpblk_t ret = 0; int fatal; - int credits = 0; + unsigned long num = *count; *errp = 0; @@ -883,7 +1080,7 @@ ext3_try_to_allocate_with_rsv(struct super_block *sb, handle_t *handle, * if the buffer is in BJ_Forget state in the committing transaction. */ BUFFER_TRACE(bitmap_bh, "get undo access for new block"); - fatal = ext3_journal_get_undo_access(handle, bitmap_bh, &credits); + fatal = ext3_journal_get_undo_access(handle, bitmap_bh); if (fatal) { *errp = fatal; return -1; @@ -893,21 +1090,21 @@ ext3_try_to_allocate_with_rsv(struct super_block *sb, handle_t *handle, * we don't deal with reservation when * filesystem is mounted without reservation * or the file is not a regular file - * of last attemp of allocating a block with reservation turn on failed + * or last attempt to allocate a block with reservation turned on failed */ if (my_rsv == NULL ) { - ret = ext3_try_to_allocate(sb, handle, group, bitmap_bh, goal, NULL); + ret = ext3_try_to_allocate(sb, handle, group, bitmap_bh, + grp_goal, count, NULL); goto out; } - rsv_lock = &EXT3_SB(sb)->s_rsv_window_lock; /* - * goal is a group relative block number (if there is a goal) - * 0 < goal < EXT3_BLOCKS_PER_GROUP(sb) + * grp_goal is a group relative block number (if there is a goal) + * 0 < grp_goal < EXT3_BLOCKS_PER_GROUP(sb) * first block is a filesystem wide block number * first block is the block number of the first block in this group */ - group_first_block = le32_to_cpu(EXT3_SB(sb)->s_es->s_first_data_block) + - group * EXT3_BLOCKS_PER_GROUP(sb); + group_first_block = ext3_group_first_block_no(sb, group); + group_last_block = group_first_block + (EXT3_BLOCKS_PER_GROUP(sb) - 1); /* * Basically we will allocate a new block from inode's reservation @@ -915,35 +1112,42 @@ ext3_try_to_allocate_with_rsv(struct super_block *sb, handle_t *handle, * * We need to allocate a new reservation window, if: * a) inode does not have a reservation window; or - * b) last attemp of allocating a block from existing reservation + * b) last attempt to allocate a block from existing reservation * failed; or * c) we come here with a goal and with a reservation window * * We do not need to allocate a new reservation window if we come here * at the beginning with a goal and the goal is inside the window, or - * or we don't have a goal but already have a reservation window. + * we don't have a goal but already have a reservation window. * then we could go to allocate from the reservation window directly. */ while (1) { - if (rsv_is_empty(my_rsv) || (ret < 0) || - !goal_in_my_reservation(my_rsv, goal, group, sb)) { - spin_lock(rsv_lock); - ret = alloc_new_reservation(my_rsv, goal, sb, + if (rsv_is_empty(&my_rsv->rsv_window) || (ret < 0) || + !goal_in_my_reservation(&my_rsv->rsv_window, grp_goal, group, sb)) { + if (my_rsv->rsv_goal_size < *count) + my_rsv->rsv_goal_size = *count; + ret = alloc_new_reservation(my_rsv, grp_goal, sb, group, bitmap_bh); - spin_unlock(rsv_lock); if (ret < 0) break; /* failed */ - if (!goal_in_my_reservation(my_rsv, goal, group, sb)) - goal = -1; - } - if ((my_rsv->rsv_start >= group_first_block + EXT3_BLOCKS_PER_GROUP(sb)) - || (my_rsv->rsv_end < group_first_block)) + if (!goal_in_my_reservation(&my_rsv->rsv_window, grp_goal, group, sb)) + grp_goal = -1; + } else if (grp_goal > 0 && (my_rsv->rsv_end-grp_goal+1) < *count) + try_to_extend_reservation(my_rsv, sb, + *count-my_rsv->rsv_end + grp_goal - 1); + + if ((my_rsv->rsv_start > group_last_block) || + (my_rsv->rsv_end < group_first_block)) BUG(); - ret = ext3_try_to_allocate(sb, handle, group, bitmap_bh, goal, - my_rsv); - if (ret >= 0) + ret = ext3_try_to_allocate(sb, handle, group, bitmap_bh, grp_goal, + &num, &my_rsv->rsv_window); + if (ret >= 0) { + my_rsv->rsv_alloc_hit += num; + *count = num; break; /* succeed */ + } + num = *count; } out: if (ret >= 0) { @@ -958,22 +1162,37 @@ out: } BUFFER_TRACE(bitmap_bh, "journal_release_buffer"); - ext3_journal_release_buffer(handle, bitmap_bh, credits); + ext3_journal_release_buffer(handle, bitmap_bh); return ret; } -static int ext3_has_free_blocks(struct ext3_sb_info *sbi) +static int ext3_has_free_blocks(struct super_block *sb) { - int free_blocks, root_blocks; + struct ext3_sb_info *sbi = EXT3_SB(sb); + ext3_fsblk_t free_blocks, root_blocks; + int cond; free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter); root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count); - if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) && + + vxdprintk(VXD_CBIT(dlim, 3), + "ext3_has_free_blocks(%p): free=%lu, root=%lu", + sb, free_blocks, root_blocks); + + DLIMIT_ADJUST_BLOCK(sb, vx_current_xid(), &free_blocks, &root_blocks); + + cond = (free_blocks < root_blocks + 1 && + !capable(CAP_SYS_RESOURCE) && sbi->s_resuid != current->fsuid && - (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid))) { - return 0; - } - return 1; + (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid))); + + vxdprintk(VXD_CBIT(dlim, 3), + "ext3_has_free_blocks(%p): %lu<%lu+1, %c, %u!=%u r=%d", + sb, free_blocks, root_blocks, + !capable(CAP_SYS_RESOURCE)?'1':'0', + sbi->s_resuid, current->fsuid, cond?0:1); + + return (cond ? 0 : 1); } /* @@ -984,7 +1203,7 @@ static int ext3_has_free_blocks(struct ext3_sb_info *sbi) */ int ext3_should_retry_alloc(struct super_block *sb, int *retries) { - if (!ext3_has_free_blocks(EXT3_SB(sb)) || (*retries)++ > 3) + if (!ext3_has_free_blocks(sb) || (*retries)++ > 3) return 0; jbd_debug(1, "%s: retrying operation after ENOSPC\n", sb->s_id); @@ -1000,27 +1219,33 @@ int ext3_should_retry_alloc(struct super_block *sb, int *retries) * bitmap, and then for any free bit if that fails. * This function also updates quota and i_blocks field. */ -int ext3_new_block(handle_t *handle, struct inode *inode, - unsigned long goal, int *errp) +ext3_fsblk_t ext3_new_blocks(handle_t *handle, struct inode *inode, + ext3_fsblk_t goal, unsigned long *count, int *errp) { struct buffer_head *bitmap_bh = NULL; struct buffer_head *gdp_bh; int group_no; int goal_group; - int ret_block; + ext3_grpblk_t grp_target_blk; /* blockgroup relative goal block */ + ext3_grpblk_t grp_alloc_blk; /* blockgroup-relative allocated block*/ + ext3_fsblk_t ret_block; /* filesyetem-wide allocated block */ int bgi; /* blockgroup iteration index */ - int target_block; int fatal = 0, err; int performed_allocation = 0; - int free_blocks; + ext3_grpblk_t free_blocks; /* number of free blocks in a group */ struct super_block *sb; struct ext3_group_desc *gdp; struct ext3_super_block *es; struct ext3_sb_info *sbi; - struct reserve_window *my_rsv = NULL; + struct ext3_reserve_window_node *my_rsv = NULL; + struct ext3_block_alloc_info *block_i; + unsigned short windowsz = 0; #ifdef EXT3FS_DEBUG static int goal_hits, goal_attempts; #endif + unsigned long ngroups; + unsigned long num = *count; + *errp = -ENOSPC; sb = inode->i_sb; if (!sb) { @@ -1031,17 +1256,29 @@ int ext3_new_block(handle_t *handle, struct inode *inode, /* * Check quota for allocation of this block. */ - if (DQUOT_ALLOC_BLOCK(inode, 1)) { + if (DQUOT_ALLOC_BLOCK(inode, num)) { *errp = -EDQUOT; return 0; } + if (DLIMIT_ALLOC_BLOCK(inode, 1)) + goto out_dlimit; sbi = EXT3_SB(sb); es = EXT3_SB(sb)->s_es; ext3_debug("goal=%lu.\n", goal); - if (test_opt(sb, RESERVATION) && S_ISREG(inode->i_mode)) - my_rsv = &EXT3_I(inode)->i_rsv_window; - if (!ext3_has_free_blocks(sbi)) { + /* + * Allocate a block from reservation only when + * filesystem is mounted with reservation(default,-o reservation), and + * it's a regular file, and + * the desired window size is greater than 0 (One could use ioctl + * command EXT3_IOC_SETRSVSZ to set the window size to 0 to turn off + * reservation on that particular file) + */ + block_i = EXT3_I(inode)->i_block_alloc_info; + if (block_i && ((windowsz = block_i->rsv_window_node.rsv_goal_size) > 0)) + my_rsv = &block_i->rsv_window_node; + + if (!ext3_has_free_blocks(sb)) { *errp = -ENOSPC; goto out; } @@ -1054,34 +1291,46 @@ int ext3_new_block(handle_t *handle, struct inode *inode, goal = le32_to_cpu(es->s_first_data_block); group_no = (goal - le32_to_cpu(es->s_first_data_block)) / EXT3_BLOCKS_PER_GROUP(sb); + goal_group = group_no; +retry_alloc: gdp = ext3_get_group_desc(sb, group_no, &gdp_bh); if (!gdp) goto io_error; - goal_group = group_no; -retry: free_blocks = le16_to_cpu(gdp->bg_free_blocks_count); + /* + * if there is not enough free blocks to make a new resevation + * turn off reservation for this allocation + */ + if (my_rsv && (free_blocks < windowsz) + && (rsv_is_empty(&my_rsv->rsv_window))) + my_rsv = NULL; + if (free_blocks > 0) { - ret_block = ((goal - le32_to_cpu(es->s_first_data_block)) % + grp_target_blk = ((goal - le32_to_cpu(es->s_first_data_block)) % EXT3_BLOCKS_PER_GROUP(sb)); bitmap_bh = read_block_bitmap(sb, group_no); if (!bitmap_bh) goto io_error; - ret_block = ext3_try_to_allocate_with_rsv(sb, handle, group_no, - bitmap_bh, ret_block, my_rsv, &fatal); + grp_alloc_blk = ext3_try_to_allocate_with_rsv(sb, handle, + group_no, bitmap_bh, grp_target_blk, + my_rsv, &num, &fatal); if (fatal) goto out; - if (ret_block >= 0) + if (grp_alloc_blk >= 0) goto allocated; } + ngroups = EXT3_SB(sb)->s_groups_count; + smp_rmb(); + /* * Now search the rest of the groups. We assume that * i and gdp correctly point to the last group visited. */ - for (bgi = 0; bgi < EXT3_SB(sb)->s_groups_count; bgi++) { + for (bgi = 0; bgi < ngroups; bgi++) { group_no++; - if (group_no >= EXT3_SB(sb)->s_groups_count) + if (group_no >= ngroups) group_no = 0; gdp = ext3_get_group_desc(sb, group_no, &gdp_bh); if (!gdp) { @@ -1089,18 +1338,27 @@ retry: goto out; } free_blocks = le16_to_cpu(gdp->bg_free_blocks_count); - if (free_blocks <= 0) + /* + * skip this group if the number of + * free blocks is less than half of the reservation + * window size. + */ + if (free_blocks <= (windowsz/2)) continue; brelse(bitmap_bh); bitmap_bh = read_block_bitmap(sb, group_no); if (!bitmap_bh) goto io_error; - ret_block = ext3_try_to_allocate_with_rsv(sb, handle, group_no, - bitmap_bh, -1, my_rsv, &fatal); + /* + * try to allocate block(s) from this group, without a goal(-1). + */ + grp_alloc_blk = ext3_try_to_allocate_with_rsv(sb, handle, + group_no, bitmap_bh, -1, my_rsv, + &num, &fatal); if (fatal) goto out; - if (ret_block >= 0) + if (grp_alloc_blk >= 0) goto allocated; } /* @@ -1113,7 +1371,7 @@ retry: if (my_rsv) { my_rsv = NULL; group_no = goal_group; - goto retry; + goto retry_alloc; } /* No space left on the device */ *errp = -ENOSPC; @@ -1129,16 +1387,18 @@ allocated: if (fatal) goto out; - target_block = ret_block + group_no * EXT3_BLOCKS_PER_GROUP(sb) - + le32_to_cpu(es->s_first_data_block); + ret_block = grp_alloc_blk + ext3_group_first_block_no(sb, group_no); - if (target_block == le32_to_cpu(gdp->bg_block_bitmap) || - target_block == le32_to_cpu(gdp->bg_inode_bitmap) || - in_range(target_block, le32_to_cpu(gdp->bg_inode_table), + if (in_range(le32_to_cpu(gdp->bg_block_bitmap), ret_block, num) || + in_range(le32_to_cpu(gdp->bg_inode_bitmap), ret_block, num) || + in_range(ret_block, le32_to_cpu(gdp->bg_inode_table), + EXT3_SB(sb)->s_itb_per_group) || + in_range(ret_block + num - 1, le32_to_cpu(gdp->bg_inode_table), EXT3_SB(sb)->s_itb_per_group)) ext3_error(sb, "ext3_new_block", "Allocating block in system zone - " - "block = %u", target_block); + "blocks from "E3FSBLK", length %lu", + ret_block, num); performed_allocation = 1; @@ -1147,7 +1407,7 @@ allocated: struct buffer_head *debug_bh; /* Record bitmap buffer state in the newly allocated block */ - debug_bh = sb_find_get_block(sb, target_block); + debug_bh = sb_find_get_block(sb, ret_block); if (debug_bh) { BUFFER_TRACE(debug_bh, "state when allocated"); BUFFER_TRACE2(debug_bh, bitmap_bh, "bitmap state"); @@ -1157,23 +1417,24 @@ allocated: jbd_lock_bh_state(bitmap_bh); spin_lock(sb_bgl_lock(sbi, group_no)); if (buffer_jbd(bitmap_bh) && bh2jh(bitmap_bh)->b_committed_data) { - if (ext3_test_bit(ret_block, - bh2jh(bitmap_bh)->b_committed_data)) { - printk("%s: block was unexpectedly set in " - "b_committed_data\n", __FUNCTION__); + int i; + + for (i = 0; i < num; i++) { + if (ext3_test_bit(grp_alloc_blk+i, + bh2jh(bitmap_bh)->b_committed_data)) { + printk("%s: block was unexpectedly set in " + "b_committed_data\n", __FUNCTION__); + } } } - ext3_debug("found bit %d\n", ret_block); + ext3_debug("found bit %d\n", grp_alloc_blk); spin_unlock(sb_bgl_lock(sbi, group_no)); jbd_unlock_bh_state(bitmap_bh); #endif - /* ret_block was blockgroup-relative. Now it becomes fs-relative */ - ret_block = target_block; - - if (ret_block >= le32_to_cpu(es->s_blocks_count)) { + if (ret_block + num - 1 >= le32_to_cpu(es->s_blocks_count)) { ext3_error(sb, "ext3_new_block", - "block(%d) >= blocks count(%d) - " + "block("E3FSBLK") >= blocks count(%d) - " "block_group = %d, es == %p ", ret_block, le32_to_cpu(es->s_blocks_count), group_no, es); goto out; @@ -1184,14 +1445,14 @@ allocated: * list of some description. We don't know in advance whether * the caller wants to use it as metadata or data. */ - ext3_debug("allocating block %d. Goal hits %d of %d.\n", + ext3_debug("allocating block %lu. Goal hits %d of %d.\n", ret_block, goal_hits, goal_attempts); spin_lock(sb_bgl_lock(sbi, group_no)); gdp->bg_free_blocks_count = - cpu_to_le16(le16_to_cpu(gdp->bg_free_blocks_count) - 1); + cpu_to_le16(le16_to_cpu(gdp->bg_free_blocks_count) - num); spin_unlock(sb_bgl_lock(sbi, group_no)); - percpu_counter_mod(&sbi->s_freeblocks_counter, -1); + percpu_counter_mod(&sbi->s_freeblocks_counter, -num); BUFFER_TRACE(gdp_bh, "journal_dirty_metadata for group descriptor"); err = ext3_journal_dirty_metadata(handle, gdp_bh); @@ -1204,11 +1465,16 @@ allocated: *errp = 0; brelse(bitmap_bh); + DQUOT_FREE_BLOCK(inode, *count-num); + *count = num; return ret_block; io_error: *errp = -EIO; out: + if (!performed_allocation) + DLIMIT_FREE_BLOCK(inode, 1); +out_dlimit: if (fatal) { *errp = fatal; ext3_std_error(sb, fatal); @@ -1217,27 +1483,38 @@ out: * Undo the block allocation */ if (!performed_allocation) - DQUOT_FREE_BLOCK(inode, 1); + DQUOT_FREE_BLOCK(inode, *count); brelse(bitmap_bh); return 0; } -unsigned long ext3_count_free_blocks(struct super_block *sb) +ext3_fsblk_t ext3_new_block(handle_t *handle, struct inode *inode, + ext3_fsblk_t goal, int *errp) +{ + unsigned long count = 1; + + return ext3_new_blocks(handle, inode, goal, &count, errp); +} + +ext3_fsblk_t ext3_count_free_blocks(struct super_block *sb) { - unsigned long desc_count; + ext3_fsblk_t desc_count; struct ext3_group_desc *gdp; int i; + unsigned long ngroups = EXT3_SB(sb)->s_groups_count; #ifdef EXT3FS_DEBUG struct ext3_super_block *es; - unsigned long bitmap_count, x; + ext3_fsblk_t bitmap_count; + unsigned long x; struct buffer_head *bitmap_bh = NULL; - lock_super(sb); es = EXT3_SB(sb)->s_es; desc_count = 0; bitmap_count = 0; gdp = NULL; - for (i = 0; i < EXT3_SB(sb)->s_groups_count; i++) { + + smp_rmb(); + for (i = 0; i < ngroups; i++) { gdp = ext3_get_group_desc(sb, i, NULL); if (!gdp) continue; @@ -1253,13 +1530,15 @@ unsigned long ext3_count_free_blocks(struct super_block *sb) bitmap_count += x; } brelse(bitmap_bh); - printk("ext3_count_free_blocks: stored = %u, computed = %lu, %lu\n", - le32_to_cpu(es->s_free_blocks_count), desc_count, bitmap_count); - unlock_super(sb); + printk("ext3_count_free_blocks: stored = "E3FSBLK + ", computed = "E3FSBLK", "E3FSBLK"\n", + le32_to_cpu(es->s_free_blocks_count), + desc_count, bitmap_count); return bitmap_count; #else desc_count = 0; - for (i = 0; i < EXT3_SB(sb)->s_groups_count; i++) { + smp_rmb(); + for (i = 0; i < ngroups; i++) { gdp = ext3_get_group_desc(sb, i, NULL); if (!gdp) continue; @@ -1270,9 +1549,8 @@ unsigned long ext3_count_free_blocks(struct super_block *sb) #endif } -static inline int block_in_use(unsigned long block, - struct super_block * sb, - unsigned char * map) +static inline int +block_in_use(ext3_fsblk_t block, struct super_block *sb, unsigned char *map) { return ext3_test_bit ((block - le32_to_cpu(EXT3_SB(sb)->s_es->s_first_data_block)) % @@ -1281,21 +1559,21 @@ static inline int block_in_use(unsigned long block, static inline int test_root(int a, int b) { - if (a == 0) - return 1; - while (1) { - if (a == 1) - return 1; - if (a % b) - return 0; - a = a / b; - } + int num = b; + + while (a > num) + num *= b; + return num == a; } -int ext3_group_sparse(int group) +static int ext3_group_sparse(int group) { - return (test_root(group, 3) || test_root(group, 5) || - test_root(group, 7)); + if (group <= 1) + return 1; + if (!(group & 1)) + return 0; + return (test_root(group, 7) || test_root(group, 5) || + test_root(group, 3)); } /** @@ -1308,12 +1586,33 @@ int ext3_group_sparse(int group) */ int ext3_bg_has_super(struct super_block *sb, int group) { - if (EXT3_HAS_RO_COMPAT_FEATURE(sb,EXT3_FEATURE_RO_COMPAT_SPARSE_SUPER)&& - !ext3_group_sparse(group)) + if (EXT3_HAS_RO_COMPAT_FEATURE(sb, + EXT3_FEATURE_RO_COMPAT_SPARSE_SUPER) && + !ext3_group_sparse(group)) return 0; return 1; } +static unsigned long ext3_bg_num_gdb_meta(struct super_block *sb, int group) +{ + unsigned long metagroup = group / EXT3_DESC_PER_BLOCK(sb); + unsigned long first = metagroup * EXT3_DESC_PER_BLOCK(sb); + unsigned long last = first + EXT3_DESC_PER_BLOCK(sb) - 1; + + if (group == first || group == first + 1 || group == last) + return 1; + return 0; +} + +static unsigned long ext3_bg_num_gdb_nometa(struct super_block *sb, int group) +{ + if (EXT3_HAS_RO_COMPAT_FEATURE(sb, + EXT3_FEATURE_RO_COMPAT_SPARSE_SUPER) && + !ext3_group_sparse(group)) + return 0; + return EXT3_SB(sb)->s_gdb_count; +} + /** * ext3_bg_num_gdb - number of blocks used by the group table in group * @sb: superblock for filesystem @@ -1325,82 +1624,14 @@ int ext3_bg_has_super(struct super_block *sb, int group) */ unsigned long ext3_bg_num_gdb(struct super_block *sb, int group) { - if (EXT3_HAS_RO_COMPAT_FEATURE(sb,EXT3_FEATURE_RO_COMPAT_SPARSE_SUPER)&& - !ext3_group_sparse(group)) - return 0; - return EXT3_SB(sb)->s_gdb_count; -} + unsigned long first_meta_bg = + le32_to_cpu(EXT3_SB(sb)->s_es->s_first_meta_bg); + unsigned long metagroup = group / EXT3_DESC_PER_BLOCK(sb); -#ifdef CONFIG_EXT3_CHECK -/* Called at mount-time, super-block is locked */ -void ext3_check_blocks_bitmap (struct super_block * sb) -{ - struct ext3_super_block *es; - unsigned long desc_count, bitmap_count, x, j; - unsigned long desc_blocks; - struct buffer_head *bitmap_bh = NULL; - struct ext3_group_desc *gdp; - int i; + if (!EXT3_HAS_INCOMPAT_FEATURE(sb,EXT3_FEATURE_INCOMPAT_META_BG) || + metagroup < first_meta_bg) + return ext3_bg_num_gdb_nometa(sb,group); - es = EXT3_SB(sb)->s_es; - desc_count = 0; - bitmap_count = 0; - gdp = NULL; - for (i = 0; i < EXT3_SB(sb)->s_groups_count; i++) { - gdp = ext3_get_group_desc (sb, i, NULL); - if (!gdp) - continue; - desc_count += le16_to_cpu(gdp->bg_free_blocks_count); - brelse(bitmap_bh); - bitmap_bh = read_block_bitmap(sb, i); - if (bitmap_bh == NULL) - continue; + return ext3_bg_num_gdb_meta(sb,group); - if (ext3_bg_has_super(sb, i) && - !ext3_test_bit(0, bitmap_bh->b_data)) - ext3_error(sb, __FUNCTION__, - "Superblock in group %d is marked free", i); - - desc_blocks = ext3_bg_num_gdb(sb, i); - for (j = 0; j < desc_blocks; j++) - if (!ext3_test_bit(j + 1, bitmap_bh->b_data)) - ext3_error(sb, __FUNCTION__, - "Descriptor block #%ld in group " - "%d is marked free", j, i); - - if (!block_in_use (le32_to_cpu(gdp->bg_block_bitmap), - sb, bitmap_bh->b_data)) - ext3_error (sb, "ext3_check_blocks_bitmap", - "Block bitmap for group %d is marked free", - i); - - if (!block_in_use (le32_to_cpu(gdp->bg_inode_bitmap), - sb, bitmap_bh->b_data)) - ext3_error (sb, "ext3_check_blocks_bitmap", - "Inode bitmap for group %d is marked free", - i); - - for (j = 0; j < EXT3_SB(sb)->s_itb_per_group; j++) - if (!block_in_use (le32_to_cpu(gdp->bg_inode_table) + j, - sb, bitmap_bh->b_data)) - ext3_error (sb, "ext3_check_blocks_bitmap", - "Block #%d of the inode table in " - "group %d is marked free", j, i); - - x = ext3_count_free(bitmap_bh, sb->s_blocksize); - if (le16_to_cpu(gdp->bg_free_blocks_count) != x) - ext3_error (sb, "ext3_check_blocks_bitmap", - "Wrong free blocks count for group %d, " - "stored = %d, counted = %lu", i, - le16_to_cpu(gdp->bg_free_blocks_count), x); - bitmap_count += x; - } - brelse(bitmap_bh); - if (le32_to_cpu(es->s_free_blocks_count) != bitmap_count) - ext3_error (sb, "ext3_check_blocks_bitmap", - "Wrong free blocks count in super block, " - "stored = %lu, counted = %lu", - (unsigned long)le32_to_cpu(es->s_free_blocks_count), - bitmap_count); } -#endif