2 * linux/fs/ext3/balloc.c
4 * Copyright (C) 1992, 1993, 1994, 1995
5 * Remy Card (card@masi.ibp.fr)
6 * Laboratoire MASI - Institut Blaise Pascal
7 * Universite Pierre et Marie Curie (Paris VI)
9 * Enhanced block allocation by Stephen Tweedie (sct@redhat.com), 1993
10 * Big-endian to little-endian byte-swapping/bitmaps by
11 * David S. Miller (davem@caip.rutgers.edu), 1995
14 #include <linux/time.h>
15 #include <linux/capability.h>
17 #include <linux/jbd.h>
18 #include <linux/ext3_fs.h>
19 #include <linux/ext3_jbd.h>
20 #include <linux/quotaops.h>
21 #include <linux/buffer_head.h>
22 #include <linux/vs_dlimit.h>
23 #include <linux/vs_tag.h>
26 * balloc.c contains the blocks allocation and deallocation routines
30 * The free blocks are managed by bitmaps. A file system contains several
31 * blocks groups. Each group contains 1 bitmap block for blocks, 1 bitmap
32 * block for inodes, N blocks for the inode table and data blocks.
34 * The file system contains group descriptors which are located after the
35 * super block. Each descriptor contains the number of the bitmap block and
36 * the free blocks count in the block. The descriptors are loaded in memory
37 * when a file system is mounted (see ext3_read_super).
41 #define in_range(b, first, len) ((b) >= (first) && (b) <= (first) + (len) - 1)
44 * ext3_get_group_desc() -- load group descriptor from disk
46 * @block_group: given block group
47 * @bh: pointer to the buffer head to store the block
50 struct ext3_group_desc * ext3_get_group_desc(struct super_block * sb,
51 unsigned int block_group,
52 struct buffer_head ** bh)
54 unsigned long group_desc;
56 struct ext3_group_desc * desc;
57 struct ext3_sb_info *sbi = EXT3_SB(sb);
59 if (block_group >= sbi->s_groups_count) {
60 ext3_error (sb, "ext3_get_group_desc",
61 "block_group >= groups_count - "
62 "block_group = %d, groups_count = %lu",
63 block_group, sbi->s_groups_count);
69 group_desc = block_group >> EXT3_DESC_PER_BLOCK_BITS(sb);
70 offset = block_group & (EXT3_DESC_PER_BLOCK(sb) - 1);
71 if (!sbi->s_group_desc[group_desc]) {
72 ext3_error (sb, "ext3_get_group_desc",
73 "Group descriptor not loaded - "
74 "block_group = %d, group_desc = %lu, desc = %lu",
75 block_group, group_desc, offset);
79 desc = (struct ext3_group_desc *) sbi->s_group_desc[group_desc]->b_data;
81 *bh = sbi->s_group_desc[group_desc];
88 * @block_group: given block group
90 * Read the bitmap for a given block_group, reading into the specified
91 * slot in the superblock's bitmap cache.
93 * Return buffer_head on success or NULL in case of failure.
95 static struct buffer_head *
96 read_block_bitmap(struct super_block *sb, unsigned int block_group)
98 struct ext3_group_desc * desc;
99 struct buffer_head * bh = NULL;
101 desc = ext3_get_group_desc (sb, block_group, NULL);
104 bh = sb_bread(sb, le32_to_cpu(desc->bg_block_bitmap));
106 ext3_error (sb, "read_block_bitmap",
107 "Cannot read block bitmap - "
108 "block_group = %d, block_bitmap = %u",
109 block_group, le32_to_cpu(desc->bg_block_bitmap));
114 * The reservation window structure operations
115 * --------------------------------------------
116 * Operations include:
117 * dump, find, add, remove, is_empty, find_next_reservable_window, etc.
119 * We use a red-black tree to represent per-filesystem reservation
125 * __rsv_window_dump() -- Dump the filesystem block allocation reservation map
126 * @rb_root: root of per-filesystem reservation rb tree
127 * @verbose: verbose mode
128 * @fn: function which wishes to dump the reservation map
130 * If verbose is turned on, it will print the whole block reservation
131 * windows(start, end). Otherwise, it will only print out the "bad" windows,
132 * those windows that overlap with their immediate neighbors.
135 static void __rsv_window_dump(struct rb_root *root, int verbose,
139 struct ext3_reserve_window_node *rsv, *prev;
147 printk("Block Allocation Reservation Windows Map (%s):\n", fn);
149 rsv = rb_entry(n, struct ext3_reserve_window_node, rsv_node);
151 printk("reservation window 0x%p "
152 "start: %lu, end: %lu\n",
153 rsv, rsv->rsv_start, rsv->rsv_end);
154 if (rsv->rsv_start && rsv->rsv_start >= rsv->rsv_end) {
155 printk("Bad reservation %p (start >= end)\n",
159 if (prev && prev->rsv_end >= rsv->rsv_start) {
160 printk("Bad reservation %p (prev->end >= start)\n",
166 printk("Restarting reservation walk in verbose mode\n");
174 printk("Window map complete.\n");
178 #define rsv_window_dump(root, verbose) \
179 __rsv_window_dump((root), (verbose), __FUNCTION__)
181 #define rsv_window_dump(root, verbose) do {} while (0)
185 * goal_in_my_reservation()
186 * @rsv: inode's reservation window
187 * @grp_goal: given goal block relative to the allocation block group
188 * @group: the current allocation block group
189 * @sb: filesystem super block
191 * Test if the given goal block (group relative) is within the file's
192 * own block reservation window range.
194 * If the reservation window is outside the goal allocation group, return 0;
195 * grp_goal (given goal block) could be -1, which means no specific
196 * goal block. In this case, always return 1.
197 * If the goal block is within the reservation window, return 1;
198 * otherwise, return 0;
201 goal_in_my_reservation(struct ext3_reserve_window *rsv, ext3_grpblk_t grp_goal,
202 unsigned int group, struct super_block * sb)
204 ext3_fsblk_t group_first_block, group_last_block;
206 group_first_block = ext3_group_first_block_no(sb, group);
207 group_last_block = group_first_block + (EXT3_BLOCKS_PER_GROUP(sb) - 1);
209 if ((rsv->_rsv_start > group_last_block) ||
210 (rsv->_rsv_end < group_first_block))
212 if ((grp_goal >= 0) && ((grp_goal + group_first_block < rsv->_rsv_start)
213 || (grp_goal + group_first_block > rsv->_rsv_end)))
219 * search_reserve_window()
220 * @rb_root: root of reservation tree
221 * @goal: target allocation block
223 * Find the reserved window which includes the goal, or the previous one
224 * if the goal is not in any window.
225 * Returns NULL if there are no windows or if all windows start after the goal.
227 static struct ext3_reserve_window_node *
228 search_reserve_window(struct rb_root *root, ext3_fsblk_t goal)
230 struct rb_node *n = root->rb_node;
231 struct ext3_reserve_window_node *rsv;
237 rsv = rb_entry(n, struct ext3_reserve_window_node, rsv_node);
239 if (goal < rsv->rsv_start)
241 else if (goal > rsv->rsv_end)
247 * We've fallen off the end of the tree: the goal wasn't inside
248 * any particular node. OK, the previous node must be to one
249 * side of the interval containing the goal. If it's the RHS,
250 * we need to back up one.
252 if (rsv->rsv_start > goal) {
253 n = rb_prev(&rsv->rsv_node);
254 rsv = rb_entry(n, struct ext3_reserve_window_node, rsv_node);
260 * ext3_rsv_window_add() -- Insert a window to the block reservation rb tree.
262 * @rsv: reservation window to add
264 * Must be called with rsv_lock hold.
266 void ext3_rsv_window_add(struct super_block *sb,
267 struct ext3_reserve_window_node *rsv)
269 struct rb_root *root = &EXT3_SB(sb)->s_rsv_window_root;
270 struct rb_node *node = &rsv->rsv_node;
271 ext3_fsblk_t start = rsv->rsv_start;
273 struct rb_node ** p = &root->rb_node;
274 struct rb_node * parent = NULL;
275 struct ext3_reserve_window_node *this;
280 this = rb_entry(parent, struct ext3_reserve_window_node, rsv_node);
282 if (start < this->rsv_start)
284 else if (start > this->rsv_end)
287 rsv_window_dump(root, 1);
292 rb_link_node(node, parent, p);
293 rb_insert_color(node, root);
297 * ext3_rsv_window_remove() -- unlink a window from the reservation rb tree
299 * @rsv: reservation window to remove
301 * Mark the block reservation window as not allocated, and unlink it
302 * from the filesystem reservation window rb tree. Must be called with
305 static void rsv_window_remove(struct super_block *sb,
306 struct ext3_reserve_window_node *rsv)
308 rsv->rsv_start = EXT3_RESERVE_WINDOW_NOT_ALLOCATED;
309 rsv->rsv_end = EXT3_RESERVE_WINDOW_NOT_ALLOCATED;
310 rsv->rsv_alloc_hit = 0;
311 rb_erase(&rsv->rsv_node, &EXT3_SB(sb)->s_rsv_window_root);
315 * rsv_is_empty() -- Check if the reservation window is allocated.
316 * @rsv: given reservation window to check
318 * returns 1 if the end block is EXT3_RESERVE_WINDOW_NOT_ALLOCATED.
320 static inline int rsv_is_empty(struct ext3_reserve_window *rsv)
322 /* a valid reservation end block could not be 0 */
323 return rsv->_rsv_end == EXT3_RESERVE_WINDOW_NOT_ALLOCATED;
327 * ext3_init_block_alloc_info()
328 * @inode: file inode structure
330 * Allocate and initialize the reservation window structure, and
331 * link the window to the ext3 inode structure at last
333 * The reservation window structure is only dynamically allocated
334 * and linked to ext3 inode the first time the open file
335 * needs a new block. So, before every ext3_new_block(s) call, for
336 * regular files, we should check whether the reservation window
337 * structure exists or not. In the latter case, this function is called.
338 * Fail to do so will result in block reservation being turned off for that
341 * This function is called from ext3_get_blocks_handle(), also called
342 * when setting the reservation window size through ioctl before the file
343 * is open for write (needs block allocation).
345 * Needs truncate_mutex protection prior to call this function.
347 void ext3_init_block_alloc_info(struct inode *inode)
349 struct ext3_inode_info *ei = EXT3_I(inode);
350 struct ext3_block_alloc_info *block_i = ei->i_block_alloc_info;
351 struct super_block *sb = inode->i_sb;
353 block_i = kmalloc(sizeof(*block_i), GFP_NOFS);
355 struct ext3_reserve_window_node *rsv = &block_i->rsv_window_node;
357 rsv->rsv_start = EXT3_RESERVE_WINDOW_NOT_ALLOCATED;
358 rsv->rsv_end = EXT3_RESERVE_WINDOW_NOT_ALLOCATED;
361 * if filesystem is mounted with NORESERVATION, the goal
362 * reservation window size is set to zero to indicate
363 * block reservation is off
365 if (!test_opt(sb, RESERVATION))
366 rsv->rsv_goal_size = 0;
368 rsv->rsv_goal_size = EXT3_DEFAULT_RESERVE_BLOCKS;
369 rsv->rsv_alloc_hit = 0;
370 block_i->last_alloc_logical_block = 0;
371 block_i->last_alloc_physical_block = 0;
373 ei->i_block_alloc_info = block_i;
377 * ext3_discard_reservation()
380 * Discard(free) block reservation window on last file close, or truncate
383 * It is being called in three cases:
384 * ext3_release_file(): last writer close the file
385 * ext3_clear_inode(): last iput(), when nobody link to this file.
386 * ext3_truncate(): when the block indirect map is about to change.
389 void ext3_discard_reservation(struct inode *inode)
391 struct ext3_inode_info *ei = EXT3_I(inode);
392 struct ext3_block_alloc_info *block_i = ei->i_block_alloc_info;
393 struct ext3_reserve_window_node *rsv;
394 spinlock_t *rsv_lock = &EXT3_SB(inode->i_sb)->s_rsv_window_lock;
399 rsv = &block_i->rsv_window_node;
400 if (!rsv_is_empty(&rsv->rsv_window)) {
402 if (!rsv_is_empty(&rsv->rsv_window))
403 rsv_window_remove(inode->i_sb, rsv);
404 spin_unlock(rsv_lock);
409 * ext3_free_blocks_sb() -- Free given blocks and update quota
410 * @handle: handle to this transaction
412 * @block: start physcial block to free
413 * @count: number of blocks to free
414 * @pdquot_freed_blocks: pointer to quota
416 void ext3_free_blocks_sb(handle_t *handle, struct super_block *sb,
417 ext3_fsblk_t block, unsigned long count,
418 unsigned long *pdquot_freed_blocks)
420 struct buffer_head *bitmap_bh = NULL;
421 struct buffer_head *gd_bh;
422 unsigned long block_group;
425 unsigned long overflow;
426 struct ext3_group_desc * desc;
427 struct ext3_super_block * es;
428 struct ext3_sb_info *sbi;
430 ext3_grpblk_t group_freed;
432 *pdquot_freed_blocks = 0;
435 if (block < le32_to_cpu(es->s_first_data_block) ||
436 block + count < block ||
437 block + count > le32_to_cpu(es->s_blocks_count)) {
438 ext3_error (sb, "ext3_free_blocks",
439 "Freeing blocks not in datazone - "
440 "block = "E3FSBLK", count = %lu", block, count);
444 ext3_debug ("freeing block(s) %lu-%lu\n", block, block + count - 1);
448 block_group = (block - le32_to_cpu(es->s_first_data_block)) /
449 EXT3_BLOCKS_PER_GROUP(sb);
450 bit = (block - le32_to_cpu(es->s_first_data_block)) %
451 EXT3_BLOCKS_PER_GROUP(sb);
453 * Check to see if we are freeing blocks across a group
456 if (bit + count > EXT3_BLOCKS_PER_GROUP(sb)) {
457 overflow = bit + count - EXT3_BLOCKS_PER_GROUP(sb);
461 bitmap_bh = read_block_bitmap(sb, block_group);
464 desc = ext3_get_group_desc (sb, block_group, &gd_bh);
468 if (in_range (le32_to_cpu(desc->bg_block_bitmap), block, count) ||
469 in_range (le32_to_cpu(desc->bg_inode_bitmap), block, count) ||
470 in_range (block, le32_to_cpu(desc->bg_inode_table),
471 sbi->s_itb_per_group) ||
472 in_range (block + count - 1, le32_to_cpu(desc->bg_inode_table),
473 sbi->s_itb_per_group))
474 ext3_error (sb, "ext3_free_blocks",
475 "Freeing blocks in system zones - "
476 "Block = "E3FSBLK", count = %lu",
480 * We are about to start releasing blocks in the bitmap,
481 * so we need undo access.
483 /* @@@ check errors */
484 BUFFER_TRACE(bitmap_bh, "getting undo access");
485 err = ext3_journal_get_undo_access(handle, bitmap_bh);
490 * We are about to modify some metadata. Call the journal APIs
491 * to unshare ->b_data if a currently-committing transaction is
494 BUFFER_TRACE(gd_bh, "get_write_access");
495 err = ext3_journal_get_write_access(handle, gd_bh);
499 jbd_lock_bh_state(bitmap_bh);
501 for (i = 0, group_freed = 0; i < count; i++) {
503 * An HJ special. This is expensive...
505 #ifdef CONFIG_JBD_DEBUG
506 jbd_unlock_bh_state(bitmap_bh);
508 struct buffer_head *debug_bh;
509 debug_bh = sb_find_get_block(sb, block + i);
511 BUFFER_TRACE(debug_bh, "Deleted!");
512 if (!bh2jh(bitmap_bh)->b_committed_data)
513 BUFFER_TRACE(debug_bh,
514 "No commited data in bitmap");
515 BUFFER_TRACE2(debug_bh, bitmap_bh, "bitmap");
519 jbd_lock_bh_state(bitmap_bh);
521 if (need_resched()) {
522 jbd_unlock_bh_state(bitmap_bh);
524 jbd_lock_bh_state(bitmap_bh);
526 /* @@@ This prevents newly-allocated data from being
527 * freed and then reallocated within the same
530 * Ideally we would want to allow that to happen, but to
531 * do so requires making journal_forget() capable of
532 * revoking the queued write of a data block, which
533 * implies blocking on the journal lock. *forget()
534 * cannot block due to truncate races.
536 * Eventually we can fix this by making journal_forget()
537 * return a status indicating whether or not it was able
538 * to revoke the buffer. On successful revoke, it is
539 * safe not to set the allocation bit in the committed
540 * bitmap, because we know that there is no outstanding
541 * activity on the buffer any more and so it is safe to
544 BUFFER_TRACE(bitmap_bh, "set in b_committed_data");
545 J_ASSERT_BH(bitmap_bh,
546 bh2jh(bitmap_bh)->b_committed_data != NULL);
547 ext3_set_bit_atomic(sb_bgl_lock(sbi, block_group), bit + i,
548 bh2jh(bitmap_bh)->b_committed_data);
551 * We clear the bit in the bitmap after setting the committed
552 * data bit, because this is the reverse order to that which
553 * the allocator uses.
555 BUFFER_TRACE(bitmap_bh, "clear bit");
556 if (!ext3_clear_bit_atomic(sb_bgl_lock(sbi, block_group),
557 bit + i, bitmap_bh->b_data)) {
558 jbd_unlock_bh_state(bitmap_bh);
559 ext3_error(sb, __FUNCTION__,
560 "bit already cleared for block "E3FSBLK,
562 jbd_lock_bh_state(bitmap_bh);
563 BUFFER_TRACE(bitmap_bh, "bit already cleared");
568 jbd_unlock_bh_state(bitmap_bh);
570 spin_lock(sb_bgl_lock(sbi, block_group));
571 desc->bg_free_blocks_count =
572 cpu_to_le16(le16_to_cpu(desc->bg_free_blocks_count) +
574 spin_unlock(sb_bgl_lock(sbi, block_group));
575 percpu_counter_mod(&sbi->s_freeblocks_counter, count);
577 /* We dirtied the bitmap block */
578 BUFFER_TRACE(bitmap_bh, "dirtied bitmap block");
579 err = ext3_journal_dirty_metadata(handle, bitmap_bh);
581 /* And the group descriptor block */
582 BUFFER_TRACE(gd_bh, "dirtied group descriptor block");
583 ret = ext3_journal_dirty_metadata(handle, gd_bh);
585 *pdquot_freed_blocks += group_freed;
587 if (overflow && !err) {
595 ext3_std_error(sb, err);
600 * ext3_free_blocks() -- Free given blocks and update quota
601 * @handle: handle for this transaction
603 * @block: start physical block to free
604 * @count: number of blocks to count
606 void ext3_free_blocks(handle_t *handle, struct inode *inode,
607 ext3_fsblk_t block, unsigned long count)
609 struct super_block * sb;
610 unsigned long dquot_freed_blocks;
614 printk ("ext3_free_blocks: nonexistent device");
617 ext3_free_blocks_sb(handle, sb, block, count, &dquot_freed_blocks);
618 if (dquot_freed_blocks) {
619 DLIMIT_FREE_BLOCK(inode, dquot_freed_blocks);
620 DQUOT_FREE_BLOCK(inode, dquot_freed_blocks);
626 * ext3_test_allocatable()
627 * @nr: given allocation block group
628 * @bh: bufferhead contains the bitmap of the given block group
630 * For ext3 allocations, we must not reuse any blocks which are
631 * allocated in the bitmap buffer's "last committed data" copy. This
632 * prevents deletes from freeing up the page for reuse until we have
633 * committed the delete transaction.
635 * If we didn't do this, then deleting something and reallocating it as
636 * data would allow the old block to be overwritten before the
637 * transaction committed (because we force data to disk before commit).
638 * This would lead to corruption if we crashed between overwriting the
639 * data and committing the delete.
641 * @@@ We may want to make this allocation behaviour conditional on
642 * data-writes at some point, and disable it for metadata allocations or
645 static int ext3_test_allocatable(ext3_grpblk_t nr, struct buffer_head *bh)
648 struct journal_head *jh = bh2jh(bh);
650 if (ext3_test_bit(nr, bh->b_data))
653 jbd_lock_bh_state(bh);
654 if (!jh->b_committed_data)
657 ret = !ext3_test_bit(nr, jh->b_committed_data);
658 jbd_unlock_bh_state(bh);
663 * bitmap_search_next_usable_block()
664 * @start: the starting block (group relative) of the search
665 * @bh: bufferhead contains the block group bitmap
666 * @maxblocks: the ending block (group relative) of the reservation
668 * The bitmap search --- search forward alternately through the actual
669 * bitmap on disk and the last-committed copy in journal, until we find a
670 * bit free in both bitmaps.
673 bitmap_search_next_usable_block(ext3_grpblk_t start, struct buffer_head *bh,
674 ext3_grpblk_t maxblocks)
677 struct journal_head *jh = bh2jh(bh);
679 while (start < maxblocks) {
680 next = ext3_find_next_zero_bit(bh->b_data, maxblocks, start);
681 if (next >= maxblocks)
683 if (ext3_test_allocatable(next, bh))
685 jbd_lock_bh_state(bh);
686 if (jh->b_committed_data)
687 start = ext3_find_next_zero_bit(jh->b_committed_data,
689 jbd_unlock_bh_state(bh);
695 * find_next_usable_block()
696 * @start: the starting block (group relative) to find next
697 * allocatable block in bitmap.
698 * @bh: bufferhead contains the block group bitmap
699 * @maxblocks: the ending block (group relative) for the search
701 * Find an allocatable block in a bitmap. We honor both the bitmap and
702 * its last-committed copy (if that exists), and perform the "most
703 * appropriate allocation" algorithm of looking for a free block near
704 * the initial goal; then for a free byte somewhere in the bitmap; then
705 * for any free bit in the bitmap.
708 find_next_usable_block(ext3_grpblk_t start, struct buffer_head *bh,
709 ext3_grpblk_t maxblocks)
711 ext3_grpblk_t here, next;
716 * The goal was occupied; search forward for a free
717 * block within the next XX blocks.
719 * end_goal is more or less random, but it has to be
720 * less than EXT3_BLOCKS_PER_GROUP. Aligning up to the
721 * next 64-bit boundary is simple..
723 ext3_grpblk_t end_goal = (start + 63) & ~63;
724 if (end_goal > maxblocks)
725 end_goal = maxblocks;
726 here = ext3_find_next_zero_bit(bh->b_data, end_goal, start);
727 if (here < end_goal && ext3_test_allocatable(here, bh))
729 ext3_debug("Bit not found near goal\n");
736 p = ((char *)bh->b_data) + (here >> 3);
737 r = memscan(p, 0, ((maxblocks + 7) >> 3) - (here >> 3));
738 next = (r - ((char *)bh->b_data)) << 3;
740 if (next < maxblocks && next >= start && ext3_test_allocatable(next, bh))
744 * The bitmap search --- search forward alternately through the actual
745 * bitmap and the last-committed copy until we find a bit free in
748 here = bitmap_search_next_usable_block(here, bh, maxblocks);
754 * @block: the free block (group relative) to allocate
755 * @bh: the bufferhead containts the block group bitmap
757 * We think we can allocate this block in this bitmap. Try to set the bit.
758 * If that succeeds then check that nobody has allocated and then freed the
759 * block since we saw that is was not marked in b_committed_data. If it _was_
760 * allocated and freed then clear the bit in the bitmap again and return
764 claim_block(spinlock_t *lock, ext3_grpblk_t block, struct buffer_head *bh)
766 struct journal_head *jh = bh2jh(bh);
769 if (ext3_set_bit_atomic(lock, block, bh->b_data))
771 jbd_lock_bh_state(bh);
772 if (jh->b_committed_data && ext3_test_bit(block,jh->b_committed_data)) {
773 ext3_clear_bit_atomic(lock, block, bh->b_data);
778 jbd_unlock_bh_state(bh);
783 * ext3_try_to_allocate()
785 * @handle: handle to this transaction
786 * @group: given allocation block group
787 * @bitmap_bh: bufferhead holds the block bitmap
788 * @grp_goal: given target block within the group
789 * @count: target number of blocks to allocate
790 * @my_rsv: reservation window
792 * Attempt to allocate blocks within a give range. Set the range of allocation
793 * first, then find the first free bit(s) from the bitmap (within the range),
794 * and at last, allocate the blocks by claiming the found free bit as allocated.
796 * To set the range of this allocation:
797 * if there is a reservation window, only try to allocate block(s) from the
798 * file's own reservation window;
799 * Otherwise, the allocation range starts from the give goal block, ends at
800 * the block group's last block.
802 * If we failed to allocate the desired block then we may end up crossing to a
803 * new bitmap. In that case we must release write access to the old one via
804 * ext3_journal_release_buffer(), else we'll run out of credits.
807 ext3_try_to_allocate(struct super_block *sb, handle_t *handle, int group,
808 struct buffer_head *bitmap_bh, ext3_grpblk_t grp_goal,
809 unsigned long *count, struct ext3_reserve_window *my_rsv)
811 ext3_fsblk_t group_first_block;
812 ext3_grpblk_t start, end;
813 unsigned long num = 0;
815 /* we do allocation within the reservation window if we have a window */
817 group_first_block = ext3_group_first_block_no(sb, group);
818 if (my_rsv->_rsv_start >= group_first_block)
819 start = my_rsv->_rsv_start - group_first_block;
821 /* reservation window cross group boundary */
823 end = my_rsv->_rsv_end - group_first_block + 1;
824 if (end > EXT3_BLOCKS_PER_GROUP(sb))
825 /* reservation window crosses group boundary */
826 end = EXT3_BLOCKS_PER_GROUP(sb);
827 if ((start <= grp_goal) && (grp_goal < end))
836 end = EXT3_BLOCKS_PER_GROUP(sb);
839 BUG_ON(start > EXT3_BLOCKS_PER_GROUP(sb));
842 if (grp_goal < 0 || !ext3_test_allocatable(grp_goal, bitmap_bh)) {
843 grp_goal = find_next_usable_block(start, bitmap_bh, end);
849 for (i = 0; i < 7 && grp_goal > start &&
850 ext3_test_allocatable(grp_goal - 1,
858 if (!claim_block(sb_bgl_lock(EXT3_SB(sb), group),
859 grp_goal, bitmap_bh)) {
861 * The block was allocated by another thread, or it was
862 * allocated and then freed by another thread
872 while (num < *count && grp_goal < end
873 && ext3_test_allocatable(grp_goal, bitmap_bh)
874 && claim_block(sb_bgl_lock(EXT3_SB(sb), group),
875 grp_goal, bitmap_bh)) {
880 return grp_goal - num;
887 * find_next_reservable_window():
888 * find a reservable space within the given range.
889 * It does not allocate the reservation window for now:
890 * alloc_new_reservation() will do the work later.
892 * @search_head: the head of the searching list;
893 * This is not necessarily the list head of the whole filesystem
895 * We have both head and start_block to assist the search
896 * for the reservable space. The list starts from head,
897 * but we will shift to the place where start_block is,
898 * then start from there, when looking for a reservable space.
900 * @size: the target new reservation window size
902 * @group_first_block: the first block we consider to start
903 * the real search from
906 * the maximum block number that our goal reservable space
907 * could start from. This is normally the last block in this
908 * group. The search will end when we found the start of next
909 * possible reservable space is out of this boundary.
910 * This could handle the cross boundary reservation window
913 * basically we search from the given range, rather than the whole
914 * reservation double linked list, (start_block, last_block)
915 * to find a free region that is of my size and has not
919 static int find_next_reservable_window(
920 struct ext3_reserve_window_node *search_head,
921 struct ext3_reserve_window_node *my_rsv,
922 struct super_block * sb,
923 ext3_fsblk_t start_block,
924 ext3_fsblk_t last_block)
926 struct rb_node *next;
927 struct ext3_reserve_window_node *rsv, *prev;
929 int size = my_rsv->rsv_goal_size;
931 /* TODO: make the start of the reservation window byte-aligned */
932 /* cur = *start_block & ~7;*/
939 if (cur <= rsv->rsv_end)
940 cur = rsv->rsv_end + 1;
943 * in the case we could not find a reservable space
944 * that is what is expected, during the re-search, we could
945 * remember what's the largest reservable space we could have
946 * and return that one.
948 * For now it will fail if we could not find the reservable
949 * space with expected-size (or more)...
951 if (cur > last_block)
952 return -1; /* fail */
955 next = rb_next(&rsv->rsv_node);
956 rsv = rb_entry(next,struct ext3_reserve_window_node,rsv_node);
959 * Reached the last reservation, we can just append to the
965 if (cur + size <= rsv->rsv_start) {
967 * Found a reserveable space big enough. We could
968 * have a reservation across the group boundary here
974 * we come here either :
975 * when we reach the end of the whole list,
976 * and there is empty reservable space after last entry in the list.
977 * append it to the end of the list.
979 * or we found one reservable space in the middle of the list,
980 * return the reservation window that we could append to.
984 if ((prev != my_rsv) && (!rsv_is_empty(&my_rsv->rsv_window)))
985 rsv_window_remove(sb, my_rsv);
988 * Let's book the whole avaliable window for now. We will check the
989 * disk bitmap later and then, if there are free blocks then we adjust
990 * the window size if it's larger than requested.
991 * Otherwise, we will remove this node from the tree next time
992 * call find_next_reservable_window.
994 my_rsv->rsv_start = cur;
995 my_rsv->rsv_end = cur + size - 1;
996 my_rsv->rsv_alloc_hit = 0;
999 ext3_rsv_window_add(sb, my_rsv);
1005 * alloc_new_reservation()--allocate a new reservation window
1007 * To make a new reservation, we search part of the filesystem
1008 * reservation list (the list that inside the group). We try to
1009 * allocate a new reservation window near the allocation goal,
1010 * or the beginning of the group, if there is no goal.
1012 * We first find a reservable space after the goal, then from
1013 * there, we check the bitmap for the first free block after
1014 * it. If there is no free block until the end of group, then the
1015 * whole group is full, we failed. Otherwise, check if the free
1016 * block is inside the expected reservable space, if so, we
1018 * If the first free block is outside the reservable space, then
1019 * start from the first free block, we search for next available
1022 * on succeed, a new reservation will be found and inserted into the list
1023 * It contains at least one free block, and it does not overlap with other
1024 * reservation windows.
1026 * failed: we failed to find a reservation window in this group
1028 * @rsv: the reservation
1030 * @grp_goal: The goal (group-relative). It is where the search for a
1031 * free reservable space should start from.
1032 * if we have a grp_goal(grp_goal >0 ), then start from there,
1033 * no grp_goal(grp_goal = -1), we start from the first block
1036 * @sb: the super block
1037 * @group: the group we are trying to allocate in
1038 * @bitmap_bh: the block group block bitmap
1041 static int alloc_new_reservation(struct ext3_reserve_window_node *my_rsv,
1042 ext3_grpblk_t grp_goal, struct super_block *sb,
1043 unsigned int group, struct buffer_head *bitmap_bh)
1045 struct ext3_reserve_window_node *search_head;
1046 ext3_fsblk_t group_first_block, group_end_block, start_block;
1047 ext3_grpblk_t first_free_block;
1048 struct rb_root *fs_rsv_root = &EXT3_SB(sb)->s_rsv_window_root;
1051 spinlock_t *rsv_lock = &EXT3_SB(sb)->s_rsv_window_lock;
1053 group_first_block = ext3_group_first_block_no(sb, group);
1054 group_end_block = group_first_block + (EXT3_BLOCKS_PER_GROUP(sb) - 1);
1057 start_block = group_first_block;
1059 start_block = grp_goal + group_first_block;
1061 size = my_rsv->rsv_goal_size;
1063 if (!rsv_is_empty(&my_rsv->rsv_window)) {
1065 * if the old reservation is cross group boundary
1066 * and if the goal is inside the old reservation window,
1067 * we will come here when we just failed to allocate from
1068 * the first part of the window. We still have another part
1069 * that belongs to the next group. In this case, there is no
1070 * point to discard our window and try to allocate a new one
1071 * in this group(which will fail). we should
1072 * keep the reservation window, just simply move on.
1074 * Maybe we could shift the start block of the reservation
1075 * window to the first block of next group.
1078 if ((my_rsv->rsv_start <= group_end_block) &&
1079 (my_rsv->rsv_end > group_end_block) &&
1080 (start_block >= my_rsv->rsv_start))
1083 if ((my_rsv->rsv_alloc_hit >
1084 (my_rsv->rsv_end - my_rsv->rsv_start + 1) / 2)) {
1086 * if the previously allocation hit ratio is
1087 * greater than 1/2, then we double the size of
1088 * the reservation window the next time,
1089 * otherwise we keep the same size window
1092 if (size > EXT3_MAX_RESERVE_BLOCKS)
1093 size = EXT3_MAX_RESERVE_BLOCKS;
1094 my_rsv->rsv_goal_size= size;
1098 spin_lock(rsv_lock);
1100 * shift the search start to the window near the goal block
1102 search_head = search_reserve_window(fs_rsv_root, start_block);
1105 * find_next_reservable_window() simply finds a reservable window
1106 * inside the given range(start_block, group_end_block).
1108 * To make sure the reservation window has a free bit inside it, we
1109 * need to check the bitmap after we found a reservable window.
1112 ret = find_next_reservable_window(search_head, my_rsv, sb,
1113 start_block, group_end_block);
1116 if (!rsv_is_empty(&my_rsv->rsv_window))
1117 rsv_window_remove(sb, my_rsv);
1118 spin_unlock(rsv_lock);
1123 * On success, find_next_reservable_window() returns the
1124 * reservation window where there is a reservable space after it.
1125 * Before we reserve this reservable space, we need
1126 * to make sure there is at least a free block inside this region.
1128 * searching the first free bit on the block bitmap and copy of
1129 * last committed bitmap alternatively, until we found a allocatable
1130 * block. Search start from the start block of the reservable space
1133 spin_unlock(rsv_lock);
1134 first_free_block = bitmap_search_next_usable_block(
1135 my_rsv->rsv_start - group_first_block,
1136 bitmap_bh, group_end_block - group_first_block + 1);
1138 if (first_free_block < 0) {
1140 * no free block left on the bitmap, no point
1141 * to reserve the space. return failed.
1143 spin_lock(rsv_lock);
1144 if (!rsv_is_empty(&my_rsv->rsv_window))
1145 rsv_window_remove(sb, my_rsv);
1146 spin_unlock(rsv_lock);
1147 return -1; /* failed */
1150 start_block = first_free_block + group_first_block;
1152 * check if the first free block is within the
1153 * free space we just reserved
1155 if (start_block >= my_rsv->rsv_start && start_block <= my_rsv->rsv_end)
1156 return 0; /* success */
1158 * if the first free bit we found is out of the reservable space
1159 * continue search for next reservable space,
1160 * start from where the free block is,
1161 * we also shift the list head to where we stopped last time
1163 search_head = my_rsv;
1164 spin_lock(rsv_lock);
1169 * try_to_extend_reservation()
1170 * @my_rsv: given reservation window
1172 * @size: the delta to extend
1174 * Attempt to expand the reservation window large enough to have
1175 * required number of free blocks
1177 * Since ext3_try_to_allocate() will always allocate blocks within
1178 * the reservation window range, if the window size is too small,
1179 * multiple blocks allocation has to stop at the end of the reservation
1180 * window. To make this more efficient, given the total number of
1181 * blocks needed and the current size of the window, we try to
1182 * expand the reservation window size if necessary on a best-effort
1183 * basis before ext3_new_blocks() tries to allocate blocks,
1185 static void try_to_extend_reservation(struct ext3_reserve_window_node *my_rsv,
1186 struct super_block *sb, int size)
1188 struct ext3_reserve_window_node *next_rsv;
1189 struct rb_node *next;
1190 spinlock_t *rsv_lock = &EXT3_SB(sb)->s_rsv_window_lock;
1192 if (!spin_trylock(rsv_lock))
1195 next = rb_next(&my_rsv->rsv_node);
1198 my_rsv->rsv_end += size;
1200 next_rsv = rb_entry(next, struct ext3_reserve_window_node, rsv_node);
1202 if ((next_rsv->rsv_start - my_rsv->rsv_end - 1) >= size)
1203 my_rsv->rsv_end += size;
1205 my_rsv->rsv_end = next_rsv->rsv_start - 1;
1207 spin_unlock(rsv_lock);
1211 * ext3_try_to_allocate_with_rsv()
1213 * @handle: handle to this transaction
1214 * @group: given allocation block group
1215 * @bitmap_bh: bufferhead holds the block bitmap
1216 * @grp_goal: given target block within the group
1217 * @count: target number of blocks to allocate
1218 * @my_rsv: reservation window
1219 * @errp: pointer to store the error code
1221 * This is the main function used to allocate a new block and its reservation
1224 * Each time when a new block allocation is need, first try to allocate from
1225 * its own reservation. If it does not have a reservation window, instead of
1226 * looking for a free bit on bitmap first, then look up the reservation list to
1227 * see if it is inside somebody else's reservation window, we try to allocate a
1228 * reservation window for it starting from the goal first. Then do the block
1229 * allocation within the reservation window.
1231 * This will avoid keeping on searching the reservation list again and
1232 * again when somebody is looking for a free block (without
1233 * reservation), and there are lots of free blocks, but they are all
1236 * We use a red-black tree for the per-filesystem reservation list.
1239 static ext3_grpblk_t
1240 ext3_try_to_allocate_with_rsv(struct super_block *sb, handle_t *handle,
1241 unsigned int group, struct buffer_head *bitmap_bh,
1242 ext3_grpblk_t grp_goal,
1243 struct ext3_reserve_window_node * my_rsv,
1244 unsigned long *count, int *errp)
1246 ext3_fsblk_t group_first_block, group_last_block;
1247 ext3_grpblk_t ret = 0;
1249 unsigned long num = *count;
1254 * Make sure we use undo access for the bitmap, because it is critical
1255 * that we do the frozen_data COW on bitmap buffers in all cases even
1256 * if the buffer is in BJ_Forget state in the committing transaction.
1258 BUFFER_TRACE(bitmap_bh, "get undo access for new block");
1259 fatal = ext3_journal_get_undo_access(handle, bitmap_bh);
1266 * we don't deal with reservation when
1267 * filesystem is mounted without reservation
1268 * or the file is not a regular file
1269 * or last attempt to allocate a block with reservation turned on failed
1271 if (my_rsv == NULL ) {
1272 ret = ext3_try_to_allocate(sb, handle, group, bitmap_bh,
1273 grp_goal, count, NULL);
1277 * grp_goal is a group relative block number (if there is a goal)
1278 * 0 <= grp_goal < EXT3_BLOCKS_PER_GROUP(sb)
1279 * first block is a filesystem wide block number
1280 * first block is the block number of the first block in this group
1282 group_first_block = ext3_group_first_block_no(sb, group);
1283 group_last_block = group_first_block + (EXT3_BLOCKS_PER_GROUP(sb) - 1);
1286 * Basically we will allocate a new block from inode's reservation
1289 * We need to allocate a new reservation window, if:
1290 * a) inode does not have a reservation window; or
1291 * b) last attempt to allocate a block from existing reservation
1293 * c) we come here with a goal and with a reservation window
1295 * We do not need to allocate a new reservation window if we come here
1296 * at the beginning with a goal and the goal is inside the window, or
1297 * we don't have a goal but already have a reservation window.
1298 * then we could go to allocate from the reservation window directly.
1301 if (rsv_is_empty(&my_rsv->rsv_window) || (ret < 0) ||
1302 !goal_in_my_reservation(&my_rsv->rsv_window,
1303 grp_goal, group, sb)) {
1304 if (my_rsv->rsv_goal_size < *count)
1305 my_rsv->rsv_goal_size = *count;
1306 ret = alloc_new_reservation(my_rsv, grp_goal, sb,
1311 if (!goal_in_my_reservation(&my_rsv->rsv_window,
1312 grp_goal, group, sb))
1314 } else if (grp_goal >= 0) {
1315 int curr = my_rsv->rsv_end -
1316 (grp_goal + group_first_block) + 1;
1319 try_to_extend_reservation(my_rsv, sb,
1323 if ((my_rsv->rsv_start > group_last_block) ||
1324 (my_rsv->rsv_end < group_first_block)) {
1325 rsv_window_dump(&EXT3_SB(sb)->s_rsv_window_root, 1);
1328 ret = ext3_try_to_allocate(sb, handle, group, bitmap_bh,
1329 grp_goal, &num, &my_rsv->rsv_window);
1331 my_rsv->rsv_alloc_hit += num;
1333 break; /* succeed */
1339 BUFFER_TRACE(bitmap_bh, "journal_dirty_metadata for "
1341 fatal = ext3_journal_dirty_metadata(handle, bitmap_bh);
1349 BUFFER_TRACE(bitmap_bh, "journal_release_buffer");
1350 ext3_journal_release_buffer(handle, bitmap_bh);
1355 * ext3_has_free_blocks()
1356 * @sbi: in-core super block structure.
1358 * Check if filesystem has at least 1 free block available for allocation.
1360 static int ext3_has_free_blocks(struct super_block *sb)
1362 struct ext3_sb_info *sbi = EXT3_SB(sb);
1363 unsigned long long free_blocks, root_blocks;
1366 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
1367 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
1369 vxdprintk(VXD_CBIT(dlim, 3),
1370 "ext3_has_free_blocks(%p): free=%llu, root=%llu",
1371 sb, free_blocks, root_blocks);
1373 DLIMIT_ADJUST_BLOCK(sb, dx_current_tag(), &free_blocks, &root_blocks);
1375 cond = (free_blocks < root_blocks + 1 &&
1376 !capable(CAP_SYS_RESOURCE) &&
1377 sbi->s_resuid != current->fsuid &&
1378 (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid)));
1380 vxdprintk(VXD_CBIT(dlim, 3),
1381 "ext3_has_free_blocks(%p): %llu<%llu+1, %c, %u!=%u r=%d",
1382 sb, free_blocks, root_blocks,
1383 !capable(CAP_SYS_RESOURCE)?'1':'0',
1384 sbi->s_resuid, current->fsuid, cond?0:1);
1386 return (cond ? 0 : 1);
1390 * ext3_should_retry_alloc()
1392 * @retries number of attemps has been made
1394 * ext3_should_retry_alloc() is called when ENOSPC is returned, and if
1395 * it is profitable to retry the operation, this function will wait
1396 * for the current or commiting transaction to complete, and then
1399 * if the total number of retries exceed three times, return FALSE.
1401 int ext3_should_retry_alloc(struct super_block *sb, int *retries)
1403 if (!ext3_has_free_blocks(sb) || (*retries)++ > 3)
1406 jbd_debug(1, "%s: retrying operation after ENOSPC\n", sb->s_id);
1408 return journal_force_commit_nested(EXT3_SB(sb)->s_journal);
1412 * ext3_new_blocks() -- core block(s) allocation function
1413 * @handle: handle to this transaction
1414 * @inode: file inode
1415 * @goal: given target block(filesystem wide)
1416 * @count: target number of blocks to allocate
1419 * ext3_new_blocks uses a goal block to assist allocation. It tries to
1420 * allocate block(s) from the block group contains the goal block first. If that
1421 * fails, it will try to allocate block(s) from other block groups without
1422 * any specific goal block.
1425 ext3_fsblk_t ext3_new_blocks(handle_t *handle, struct inode *inode,
1426 ext3_fsblk_t goal, unsigned long *count, int *errp)
1428 struct buffer_head *bitmap_bh = NULL;
1429 struct buffer_head *gdp_bh;
1432 ext3_grpblk_t grp_target_blk; /* blockgroup relative goal block */
1433 ext3_grpblk_t grp_alloc_blk; /* blockgroup-relative allocated block*/
1434 ext3_fsblk_t ret_block; /* filesyetem-wide allocated block */
1435 int bgi; /* blockgroup iteration index */
1437 int performed_allocation = 0;
1438 ext3_grpblk_t free_blocks; /* number of free blocks in a group */
1439 struct super_block *sb;
1440 struct ext3_group_desc *gdp;
1441 struct ext3_super_block *es;
1442 struct ext3_sb_info *sbi;
1443 struct ext3_reserve_window_node *my_rsv = NULL;
1444 struct ext3_block_alloc_info *block_i;
1445 unsigned short windowsz = 0;
1447 static int goal_hits, goal_attempts;
1449 unsigned long ngroups;
1450 unsigned long num = *count;
1455 printk("ext3_new_block: nonexistent device");
1460 * Check quota for allocation of this block.
1462 if (DQUOT_ALLOC_BLOCK(inode, num)) {
1466 if (DLIMIT_ALLOC_BLOCK(inode, 1))
1470 es = EXT3_SB(sb)->s_es;
1471 ext3_debug("goal=%lu.\n", goal);
1473 * Allocate a block from reservation only when
1474 * filesystem is mounted with reservation(default,-o reservation), and
1475 * it's a regular file, and
1476 * the desired window size is greater than 0 (One could use ioctl
1477 * command EXT3_IOC_SETRSVSZ to set the window size to 0 to turn off
1478 * reservation on that particular file)
1480 block_i = EXT3_I(inode)->i_block_alloc_info;
1481 if (block_i && ((windowsz = block_i->rsv_window_node.rsv_goal_size) > 0))
1482 my_rsv = &block_i->rsv_window_node;
1484 if (!ext3_has_free_blocks(sb)) {
1490 * First, test whether the goal block is free.
1492 if (goal < le32_to_cpu(es->s_first_data_block) ||
1493 goal >= le32_to_cpu(es->s_blocks_count))
1494 goal = le32_to_cpu(es->s_first_data_block);
1495 group_no = (goal - le32_to_cpu(es->s_first_data_block)) /
1496 EXT3_BLOCKS_PER_GROUP(sb);
1497 goal_group = group_no;
1499 gdp = ext3_get_group_desc(sb, group_no, &gdp_bh);
1503 free_blocks = le16_to_cpu(gdp->bg_free_blocks_count);
1505 * if there is not enough free blocks to make a new resevation
1506 * turn off reservation for this allocation
1508 if (my_rsv && (free_blocks < windowsz)
1509 && (rsv_is_empty(&my_rsv->rsv_window)))
1512 if (free_blocks > 0) {
1513 grp_target_blk = ((goal - le32_to_cpu(es->s_first_data_block)) %
1514 EXT3_BLOCKS_PER_GROUP(sb));
1515 bitmap_bh = read_block_bitmap(sb, group_no);
1518 grp_alloc_blk = ext3_try_to_allocate_with_rsv(sb, handle,
1519 group_no, bitmap_bh, grp_target_blk,
1520 my_rsv, &num, &fatal);
1523 if (grp_alloc_blk >= 0)
1527 ngroups = EXT3_SB(sb)->s_groups_count;
1531 * Now search the rest of the groups. We assume that
1532 * i and gdp correctly point to the last group visited.
1534 for (bgi = 0; bgi < ngroups; bgi++) {
1536 if (group_no >= ngroups)
1538 gdp = ext3_get_group_desc(sb, group_no, &gdp_bh);
1541 free_blocks = le16_to_cpu(gdp->bg_free_blocks_count);
1543 * skip this group if the number of
1544 * free blocks is less than half of the reservation
1547 if (free_blocks <= (windowsz/2))
1551 bitmap_bh = read_block_bitmap(sb, group_no);
1555 * try to allocate block(s) from this group, without a goal(-1).
1557 grp_alloc_blk = ext3_try_to_allocate_with_rsv(sb, handle,
1558 group_no, bitmap_bh, -1, my_rsv,
1562 if (grp_alloc_blk >= 0)
1566 * We may end up a bogus ealier ENOSPC error due to
1567 * filesystem is "full" of reservations, but
1568 * there maybe indeed free blocks avaliable on disk
1569 * In this case, we just forget about the reservations
1570 * just do block allocation as without reservations.
1575 group_no = goal_group;
1578 /* No space left on the device */
1584 ext3_debug("using block group %d(%d)\n",
1585 group_no, gdp->bg_free_blocks_count);
1587 BUFFER_TRACE(gdp_bh, "get_write_access");
1588 fatal = ext3_journal_get_write_access(handle, gdp_bh);
1592 ret_block = grp_alloc_blk + ext3_group_first_block_no(sb, group_no);
1594 if (in_range(le32_to_cpu(gdp->bg_block_bitmap), ret_block, num) ||
1595 in_range(le32_to_cpu(gdp->bg_inode_bitmap), ret_block, num) ||
1596 in_range(ret_block, le32_to_cpu(gdp->bg_inode_table),
1597 EXT3_SB(sb)->s_itb_per_group) ||
1598 in_range(ret_block + num - 1, le32_to_cpu(gdp->bg_inode_table),
1599 EXT3_SB(sb)->s_itb_per_group))
1600 ext3_error(sb, "ext3_new_block",
1601 "Allocating block in system zone - "
1602 "blocks from "E3FSBLK", length %lu",
1605 performed_allocation = 1;
1607 #ifdef CONFIG_JBD_DEBUG
1609 struct buffer_head *debug_bh;
1611 /* Record bitmap buffer state in the newly allocated block */
1612 debug_bh = sb_find_get_block(sb, ret_block);
1614 BUFFER_TRACE(debug_bh, "state when allocated");
1615 BUFFER_TRACE2(debug_bh, bitmap_bh, "bitmap state");
1619 jbd_lock_bh_state(bitmap_bh);
1620 spin_lock(sb_bgl_lock(sbi, group_no));
1621 if (buffer_jbd(bitmap_bh) && bh2jh(bitmap_bh)->b_committed_data) {
1624 for (i = 0; i < num; i++) {
1625 if (ext3_test_bit(grp_alloc_blk+i,
1626 bh2jh(bitmap_bh)->b_committed_data)) {
1627 printk("%s: block was unexpectedly set in "
1628 "b_committed_data\n", __FUNCTION__);
1632 ext3_debug("found bit %d\n", grp_alloc_blk);
1633 spin_unlock(sb_bgl_lock(sbi, group_no));
1634 jbd_unlock_bh_state(bitmap_bh);
1637 if (ret_block + num - 1 >= le32_to_cpu(es->s_blocks_count)) {
1638 ext3_error(sb, "ext3_new_block",
1639 "block("E3FSBLK") >= blocks count(%d) - "
1640 "block_group = %d, es == %p ", ret_block,
1641 le32_to_cpu(es->s_blocks_count), group_no, es);
1646 * It is up to the caller to add the new buffer to a journal
1647 * list of some description. We don't know in advance whether
1648 * the caller wants to use it as metadata or data.
1650 ext3_debug("allocating block %lu. Goal hits %d of %d.\n",
1651 ret_block, goal_hits, goal_attempts);
1653 spin_lock(sb_bgl_lock(sbi, group_no));
1654 gdp->bg_free_blocks_count =
1655 cpu_to_le16(le16_to_cpu(gdp->bg_free_blocks_count)-num);
1656 spin_unlock(sb_bgl_lock(sbi, group_no));
1657 percpu_counter_mod(&sbi->s_freeblocks_counter, -num);
1659 BUFFER_TRACE(gdp_bh, "journal_dirty_metadata for group descriptor");
1660 err = ext3_journal_dirty_metadata(handle, gdp_bh);
1670 DQUOT_FREE_BLOCK(inode, *count-num);
1677 if (!performed_allocation)
1678 DLIMIT_FREE_BLOCK(inode, 1);
1682 ext3_std_error(sb, fatal);
1685 * Undo the block allocation
1687 if (!performed_allocation)
1688 DQUOT_FREE_BLOCK(inode, *count);
1693 ext3_fsblk_t ext3_new_block(handle_t *handle, struct inode *inode,
1694 ext3_fsblk_t goal, int *errp)
1696 unsigned long count = 1;
1698 return ext3_new_blocks(handle, inode, goal, &count, errp);
1702 * ext3_count_free_blocks() -- count filesystem free blocks
1705 * Adds up the number of free blocks from each block group.
1707 ext3_fsblk_t ext3_count_free_blocks(struct super_block *sb)
1709 ext3_fsblk_t desc_count;
1710 struct ext3_group_desc *gdp;
1712 unsigned long ngroups = EXT3_SB(sb)->s_groups_count;
1714 struct ext3_super_block *es;
1715 ext3_fsblk_t bitmap_count;
1717 struct buffer_head *bitmap_bh = NULL;
1719 es = EXT3_SB(sb)->s_es;
1725 for (i = 0; i < ngroups; i++) {
1726 gdp = ext3_get_group_desc(sb, i, NULL);
1729 desc_count += le16_to_cpu(gdp->bg_free_blocks_count);
1731 bitmap_bh = read_block_bitmap(sb, i);
1732 if (bitmap_bh == NULL)
1735 x = ext3_count_free(bitmap_bh, sb->s_blocksize);
1736 printk("group %d: stored = %d, counted = %lu\n",
1737 i, le16_to_cpu(gdp->bg_free_blocks_count), x);
1741 printk("ext3_count_free_blocks: stored = "E3FSBLK
1742 ", computed = "E3FSBLK", "E3FSBLK"\n",
1743 le32_to_cpu(es->s_free_blocks_count),
1744 desc_count, bitmap_count);
1745 return bitmap_count;
1749 for (i = 0; i < ngroups; i++) {
1750 gdp = ext3_get_group_desc(sb, i, NULL);
1753 desc_count += le16_to_cpu(gdp->bg_free_blocks_count);
1761 block_in_use(ext3_fsblk_t block, struct super_block *sb, unsigned char *map)
1763 return ext3_test_bit ((block -
1764 le32_to_cpu(EXT3_SB(sb)->s_es->s_first_data_block)) %
1765 EXT3_BLOCKS_PER_GROUP(sb), map);
1768 static inline int test_root(int a, int b)
1777 static int ext3_group_sparse(int group)
1783 return (test_root(group, 7) || test_root(group, 5) ||
1784 test_root(group, 3));
1788 * ext3_bg_has_super - number of blocks used by the superblock in group
1789 * @sb: superblock for filesystem
1790 * @group: group number to check
1792 * Return the number of blocks used by the superblock (primary or backup)
1793 * in this group. Currently this will be only 0 or 1.
1795 int ext3_bg_has_super(struct super_block *sb, int group)
1797 if (EXT3_HAS_RO_COMPAT_FEATURE(sb,
1798 EXT3_FEATURE_RO_COMPAT_SPARSE_SUPER) &&
1799 !ext3_group_sparse(group))
1804 static unsigned long ext3_bg_num_gdb_meta(struct super_block *sb, int group)
1806 unsigned long metagroup = group / EXT3_DESC_PER_BLOCK(sb);
1807 unsigned long first = metagroup * EXT3_DESC_PER_BLOCK(sb);
1808 unsigned long last = first + EXT3_DESC_PER_BLOCK(sb) - 1;
1810 if (group == first || group == first + 1 || group == last)
1815 static unsigned long ext3_bg_num_gdb_nometa(struct super_block *sb, int group)
1817 if (EXT3_HAS_RO_COMPAT_FEATURE(sb,
1818 EXT3_FEATURE_RO_COMPAT_SPARSE_SUPER) &&
1819 !ext3_group_sparse(group))
1821 return EXT3_SB(sb)->s_gdb_count;
1825 * ext3_bg_num_gdb - number of blocks used by the group table in group
1826 * @sb: superblock for filesystem
1827 * @group: group number to check
1829 * Return the number of blocks used by the group descriptor table
1830 * (primary or backup) in this group. In the future there may be a
1831 * different number of descriptor blocks in each group.
1833 unsigned long ext3_bg_num_gdb(struct super_block *sb, int group)
1835 unsigned long first_meta_bg =
1836 le32_to_cpu(EXT3_SB(sb)->s_es->s_first_meta_bg);
1837 unsigned long metagroup = group / EXT3_DESC_PER_BLOCK(sb);
1839 if (!EXT3_HAS_INCOMPAT_FEATURE(sb,EXT3_FEATURE_INCOMPAT_META_BG) ||
1840 metagroup < first_meta_bg)
1841 return ext3_bg_num_gdb_nometa(sb,group);
1843 return ext3_bg_num_gdb_meta(sb,group);