2 * linux/fs/ext3/balloc.c
4 * Copyright (C) 1992, 1993, 1994, 1995
5 * Remy Card (card@masi.ibp.fr)
6 * Laboratoire MASI - Institut Blaise Pascal
7 * Universite Pierre et Marie Curie (Paris VI)
9 * Enhanced block allocation by Stephen Tweedie (sct@redhat.com), 1993
10 * Big-endian to little-endian byte-swapping/bitmaps by
11 * David S. Miller (davem@caip.rutgers.edu), 1995
14 #include <linux/config.h>
15 #include <linux/time.h>
17 #include <linux/jbd.h>
18 #include <linux/ext3_fs.h>
19 #include <linux/ext3_jbd.h>
20 #include <linux/quotaops.h>
21 #include <linux/buffer_head.h>
22 #include <linux/vs_base.h>
23 #include <linux/vs_dlimit.h>
26 * balloc.c contains the blocks allocation and deallocation routines
30 * The free blocks are managed by bitmaps. A file system contains several
31 * blocks groups. Each group contains 1 bitmap block for blocks, 1 bitmap
32 * block for inodes, N blocks for the inode table and data blocks.
34 * The file system contains group descriptors which are located after the
35 * super block. Each descriptor contains the number of the bitmap block and
36 * the free blocks count in the block. The descriptors are loaded in memory
37 * when a file system is mounted (see ext3_read_super).
41 #define in_range(b, first, len) ((b) >= (first) && (b) <= (first) + (len) - 1)
43 struct ext3_group_desc * ext3_get_group_desc(struct super_block * sb,
44 unsigned int block_group,
45 struct buffer_head ** bh)
47 unsigned long group_desc;
49 struct ext3_group_desc * gdp;
51 if (block_group >= EXT3_SB(sb)->s_groups_count) {
52 ext3_error (sb, "ext3_get_group_desc",
53 "block_group >= groups_count - "
54 "block_group = %d, groups_count = %lu",
55 block_group, EXT3_SB(sb)->s_groups_count);
60 group_desc = block_group / EXT3_DESC_PER_BLOCK(sb);
61 desc = block_group % EXT3_DESC_PER_BLOCK(sb);
62 if (!EXT3_SB(sb)->s_group_desc[group_desc]) {
63 ext3_error (sb, "ext3_get_group_desc",
64 "Group descriptor not loaded - "
65 "block_group = %d, group_desc = %lu, desc = %lu",
66 block_group, group_desc, desc);
70 gdp = (struct ext3_group_desc *)
71 EXT3_SB(sb)->s_group_desc[group_desc]->b_data;
73 *bh = EXT3_SB(sb)->s_group_desc[group_desc];
78 * Read the bitmap for a given block_group, reading into the specified
79 * slot in the superblock's bitmap cache.
81 * Return buffer_head on success or NULL in case of failure.
83 static struct buffer_head *
84 read_block_bitmap(struct super_block *sb, unsigned int block_group)
86 struct ext3_group_desc * desc;
87 struct buffer_head * bh = NULL;
89 desc = ext3_get_group_desc (sb, block_group, NULL);
92 bh = sb_bread(sb, le32_to_cpu(desc->bg_block_bitmap));
94 ext3_error (sb, "read_block_bitmap",
95 "Cannot read block bitmap - "
96 "block_group = %d, block_bitmap = %lu",
97 block_group, (unsigned long) desc->bg_block_bitmap);
102 /* Free given blocks, update quota and i_blocks field */
103 void ext3_free_blocks (handle_t *handle, struct inode * inode,
104 unsigned long block, unsigned long count)
106 struct buffer_head *bitmap_bh = NULL;
107 struct buffer_head *gd_bh;
108 unsigned long block_group;
111 unsigned long overflow;
112 struct super_block * sb;
113 struct ext3_group_desc * gdp;
114 struct ext3_super_block * es;
115 struct ext3_sb_info *sbi;
117 int dquot_freed_blocks = 0;
121 printk ("ext3_free_blocks: nonexistent device");
125 es = EXT3_SB(sb)->s_es;
126 if (block < le32_to_cpu(es->s_first_data_block) ||
127 block + count < block ||
128 block + count > le32_to_cpu(es->s_blocks_count)) {
129 ext3_error (sb, "ext3_free_blocks",
130 "Freeing blocks not in datazone - "
131 "block = %lu, count = %lu", block, count);
135 ext3_debug ("freeing block %lu\n", block);
139 block_group = (block - le32_to_cpu(es->s_first_data_block)) /
140 EXT3_BLOCKS_PER_GROUP(sb);
141 bit = (block - le32_to_cpu(es->s_first_data_block)) %
142 EXT3_BLOCKS_PER_GROUP(sb);
144 * Check to see if we are freeing blocks across a group
147 if (bit + count > EXT3_BLOCKS_PER_GROUP(sb)) {
148 overflow = bit + count - EXT3_BLOCKS_PER_GROUP(sb);
152 bitmap_bh = read_block_bitmap(sb, block_group);
155 gdp = ext3_get_group_desc (sb, block_group, &gd_bh);
159 if (in_range (le32_to_cpu(gdp->bg_block_bitmap), block, count) ||
160 in_range (le32_to_cpu(gdp->bg_inode_bitmap), block, count) ||
161 in_range (block, le32_to_cpu(gdp->bg_inode_table),
162 EXT3_SB(sb)->s_itb_per_group) ||
163 in_range (block + count - 1, le32_to_cpu(gdp->bg_inode_table),
164 EXT3_SB(sb)->s_itb_per_group))
165 ext3_error (sb, "ext3_free_blocks",
166 "Freeing blocks in system zones - "
167 "Block = %lu, count = %lu",
171 * We are about to start releasing blocks in the bitmap,
172 * so we need undo access.
174 /* @@@ check errors */
175 BUFFER_TRACE(bitmap_bh, "getting undo access");
176 err = ext3_journal_get_undo_access(handle, bitmap_bh, NULL);
181 * We are about to modify some metadata. Call the journal APIs
182 * to unshare ->b_data if a currently-committing transaction is
185 BUFFER_TRACE(gd_bh, "get_write_access");
186 err = ext3_journal_get_write_access(handle, gd_bh);
190 jbd_lock_bh_state(bitmap_bh);
192 for (i = 0; i < count; i++) {
194 * An HJ special. This is expensive...
196 #ifdef CONFIG_JBD_DEBUG
197 jbd_unlock_bh_state(bitmap_bh);
199 struct buffer_head *debug_bh;
200 debug_bh = sb_find_get_block(sb, block + i);
202 BUFFER_TRACE(debug_bh, "Deleted!");
203 if (!bh2jh(bitmap_bh)->b_committed_data)
204 BUFFER_TRACE(debug_bh,
205 "No commited data in bitmap");
206 BUFFER_TRACE2(debug_bh, bitmap_bh, "bitmap");
210 jbd_lock_bh_state(bitmap_bh);
212 /* @@@ This prevents newly-allocated data from being
213 * freed and then reallocated within the same
216 * Ideally we would want to allow that to happen, but to
217 * do so requires making journal_forget() capable of
218 * revoking the queued write of a data block, which
219 * implies blocking on the journal lock. *forget()
220 * cannot block due to truncate races.
222 * Eventually we can fix this by making journal_forget()
223 * return a status indicating whether or not it was able
224 * to revoke the buffer. On successful revoke, it is
225 * safe not to set the allocation bit in the committed
226 * bitmap, because we know that there is no outstanding
227 * activity on the buffer any more and so it is safe to
230 BUFFER_TRACE(bitmap_bh, "set in b_committed_data");
231 J_ASSERT_BH(bitmap_bh,
232 bh2jh(bitmap_bh)->b_committed_data != NULL);
233 ext3_set_bit_atomic(sb_bgl_lock(sbi, block_group), bit + i,
234 bh2jh(bitmap_bh)->b_committed_data);
237 * We clear the bit in the bitmap after setting the committed
238 * data bit, because this is the reverse order to that which
239 * the allocator uses.
241 BUFFER_TRACE(bitmap_bh, "clear bit");
242 if (!ext3_clear_bit_atomic(sb_bgl_lock(sbi, block_group),
243 bit + i, bitmap_bh->b_data)) {
244 jbd_unlock_bh_state(bitmap_bh);
245 ext3_error(sb, __FUNCTION__,
246 "bit already cleared for block %lu", block + i);
247 jbd_lock_bh_state(bitmap_bh);
248 BUFFER_TRACE(bitmap_bh, "bit already cleared");
250 dquot_freed_blocks++;
253 jbd_unlock_bh_state(bitmap_bh);
255 spin_lock(sb_bgl_lock(sbi, block_group));
256 gdp->bg_free_blocks_count =
257 cpu_to_le16(le16_to_cpu(gdp->bg_free_blocks_count) +
259 spin_unlock(sb_bgl_lock(sbi, block_group));
260 percpu_counter_mod(&sbi->s_freeblocks_counter, count);
262 /* We dirtied the bitmap block */
263 BUFFER_TRACE(bitmap_bh, "dirtied bitmap block");
264 err = ext3_journal_dirty_metadata(handle, bitmap_bh);
266 /* And the group descriptor block */
267 BUFFER_TRACE(gd_bh, "dirtied group descriptor block");
268 ret = ext3_journal_dirty_metadata(handle, gd_bh);
271 if (overflow && !err) {
279 ext3_std_error(sb, err);
280 if (dquot_freed_blocks) {
281 DLIMIT_FREE_BLOCK(sb, inode->i_xid, dquot_freed_blocks);
282 DQUOT_FREE_BLOCK(inode, dquot_freed_blocks);
288 * For ext3 allocations, we must not reuse any blocks which are
289 * allocated in the bitmap buffer's "last committed data" copy. This
290 * prevents deletes from freeing up the page for reuse until we have
291 * committed the delete transaction.
293 * If we didn't do this, then deleting something and reallocating it as
294 * data would allow the old block to be overwritten before the
295 * transaction committed (because we force data to disk before commit).
296 * This would lead to corruption if we crashed between overwriting the
297 * data and committing the delete.
299 * @@@ We may want to make this allocation behaviour conditional on
300 * data-writes at some point, and disable it for metadata allocations or
303 static inline int ext3_test_allocatable(int nr, struct buffer_head *bh)
306 struct journal_head *jh = bh2jh(bh);
308 if (ext3_test_bit(nr, bh->b_data))
311 jbd_lock_bh_state(bh);
312 if (!jh->b_committed_data)
315 ret = !ext3_test_bit(nr, jh->b_committed_data);
316 jbd_unlock_bh_state(bh);
321 * Find an allocatable block in a bitmap. We honour both the bitmap and
322 * its last-committed copy (if that exists), and perform the "most
323 * appropriate allocation" algorithm of looking for a free block near
324 * the initial goal; then for a free byte somewhere in the bitmap; then
325 * for any free bit in the bitmap.
328 find_next_usable_block(int start, struct buffer_head *bh, int maxblocks)
332 struct journal_head *jh = bh2jh(bh);
336 * The goal was occupied; search forward for a free
337 * block within the next XX blocks.
339 * end_goal is more or less random, but it has to be
340 * less than EXT3_BLOCKS_PER_GROUP. Aligning up to the
341 * next 64-bit boundary is simple..
343 int end_goal = (start + 63) & ~63;
344 here = ext3_find_next_zero_bit(bh->b_data, end_goal, start);
345 if (here < end_goal && ext3_test_allocatable(here, bh))
347 ext3_debug("Bit not found near goal\n");
354 p = ((char *)bh->b_data) + (here >> 3);
355 r = memscan(p, 0, (maxblocks - here + 7) >> 3);
356 next = (r - ((char *)bh->b_data)) << 3;
358 if (next < maxblocks && ext3_test_allocatable(next, bh))
362 * The bitmap search --- search forward alternately through the actual
363 * bitmap and the last-committed copy until we find a bit free in
366 while (here < maxblocks) {
367 next = ext3_find_next_zero_bit(bh->b_data, maxblocks, here);
368 if (next >= maxblocks)
370 if (ext3_test_allocatable(next, bh))
372 jbd_lock_bh_state(bh);
373 if (jh->b_committed_data)
374 here = ext3_find_next_zero_bit(jh->b_committed_data,
376 jbd_unlock_bh_state(bh);
382 * We think we can allocate this block in this bitmap. Try to set the bit.
383 * If that succeeds then check that nobody has allocated and then freed the
384 * block since we saw that is was not marked in b_committed_data. If it _was_
385 * allocated and freed then clear the bit in the bitmap again and return
389 claim_block(spinlock_t *lock, int block, struct buffer_head *bh)
391 struct journal_head *jh = bh2jh(bh);
394 if (ext3_set_bit_atomic(lock, block, bh->b_data))
396 jbd_lock_bh_state(bh);
397 if (jh->b_committed_data && ext3_test_bit(block,jh->b_committed_data)) {
398 ext3_clear_bit_atomic(lock, block, bh->b_data);
403 jbd_unlock_bh_state(bh);
408 * If we failed to allocate the desired block then we may end up crossing to a
409 * new bitmap. In that case we must release write access to the old one via
410 * ext3_journal_release_buffer(), else we'll run out of credits.
413 ext3_try_to_allocate(struct super_block *sb, handle_t *handle, int group,
414 struct buffer_head *bitmap_bh, int goal, int *errp)
423 * Make sure we use undo access for the bitmap, because it is critical
424 * that we do the frozen_data COW on bitmap buffers in all cases even
425 * if the buffer is in BJ_Forget state in the committing transaction.
427 BUFFER_TRACE(bitmap_bh, "get undo access for new block");
428 fatal = ext3_journal_get_undo_access(handle, bitmap_bh, &credits);
435 if (goal < 0 || !ext3_test_allocatable(goal, bitmap_bh)) {
436 goal = find_next_usable_block(goal, bitmap_bh,
437 EXT3_BLOCKS_PER_GROUP(sb));
441 for (i = 0; i < 7 && goal > 0 &&
442 ext3_test_allocatable(goal - 1, bitmap_bh);
446 if (!claim_block(sb_bgl_lock(EXT3_SB(sb), group), goal, bitmap_bh)) {
448 * The block was allocated by another thread, or it was
449 * allocated and then freed by another thread
452 if (goal >= EXT3_BLOCKS_PER_GROUP(sb))
457 BUFFER_TRACE(bitmap_bh, "journal_dirty_metadata for bitmap block");
458 fatal = ext3_journal_dirty_metadata(handle, bitmap_bh);
466 BUFFER_TRACE(bitmap_bh, "journal_release_buffer");
467 ext3_journal_release_buffer(handle, bitmap_bh, credits);
472 static int ext3_has_free_blocks(struct super_block *sb)
474 struct ext3_sb_info *sbi = EXT3_SB(sb);
475 int free_blocks, root_blocks, cond;
477 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
478 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
480 vxdprintk(VXD_CBIT(dlim, 3),
481 "ext3_has_free_blocks(%p): free=%u, root=%u",
482 sb, free_blocks, root_blocks);
484 DLIMIT_ADJUST_BLOCK(sb, vx_current_xid(), &free_blocks, &root_blocks);
486 cond = (free_blocks < root_blocks + 1 &&
487 !capable(CAP_SYS_RESOURCE) &&
488 sbi->s_resuid != current->fsuid &&
489 (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid)));
491 vxdprintk(VXD_CBIT(dlim, 3),
492 "ext3_has_free_blocks(%p): %u<%u+1, %c, %u!=%u r=%d",
493 sb, free_blocks, root_blocks,
494 !capable(CAP_SYS_RESOURCE)?'1':'0',
495 sbi->s_resuid, current->fsuid, cond?0:1);
497 return (cond ? 0 : 1);
501 * ext3_should_retry_alloc() is called when ENOSPC is returned, and if
502 * it is profitable to retry the operation, this function will wait
503 * for the current or commiting transaction to complete, and then
506 int ext3_should_retry_alloc(struct super_block *sb, int *retries)
508 if (!ext3_has_free_blocks(sb) || (*retries)++ > 3)
511 jbd_debug(1, "%s: retrying operation after ENOSPC\n", sb->s_id);
513 return journal_force_commit_nested(EXT3_SB(sb)->s_journal);
517 * ext3_new_block uses a goal block to assist allocation. If the goal is
518 * free, or there is a free block within 32 blocks of the goal, that block
519 * is allocated. Otherwise a forward search is made for a free block; within
520 * each block group the search first looks for an entire free byte in the block
521 * bitmap, and then for any free bit if that fails.
522 * This function also updates quota and i_blocks field.
525 ext3_new_block(handle_t *handle, struct inode *inode, unsigned long goal,
526 u32 *prealloc_count, u32 *prealloc_block, int *errp)
528 struct buffer_head *bitmap_bh = NULL; /* bh */
529 struct buffer_head *gdp_bh; /* bh2 */
530 int group_no; /* i */
531 int ret_block; /* j */
532 int bgi; /* blockgroup iteration index */
533 int target_block; /* tmp */
535 int performed_allocation = 0;
537 struct super_block *sb;
538 struct ext3_group_desc *gdp;
539 struct ext3_super_block *es;
540 struct ext3_sb_info *sbi;
542 static int goal_hits, goal_attempts;
547 printk("ext3_new_block: nonexistent device");
552 * Check quota for allocation of this block.
554 if (DQUOT_ALLOC_BLOCK(inode, 1)) {
558 if (DLIMIT_ALLOC_BLOCK(sb, inode->i_xid, 1))
562 es = EXT3_SB(sb)->s_es;
563 ext3_debug("goal=%lu.\n", goal);
565 if (!ext3_has_free_blocks(sb)) {
571 * First, test whether the goal block is free.
573 if (goal < le32_to_cpu(es->s_first_data_block) ||
574 goal >= le32_to_cpu(es->s_blocks_count))
575 goal = le32_to_cpu(es->s_first_data_block);
576 group_no = (goal - le32_to_cpu(es->s_first_data_block)) /
577 EXT3_BLOCKS_PER_GROUP(sb);
578 gdp = ext3_get_group_desc(sb, group_no, &gdp_bh);
582 free_blocks = le16_to_cpu(gdp->bg_free_blocks_count);
583 if (free_blocks > 0) {
584 ret_block = ((goal - le32_to_cpu(es->s_first_data_block)) %
585 EXT3_BLOCKS_PER_GROUP(sb));
586 bitmap_bh = read_block_bitmap(sb, group_no);
589 ret_block = ext3_try_to_allocate(sb, handle, group_no,
590 bitmap_bh, ret_block, &fatal);
598 * Now search the rest of the groups. We assume that
599 * i and gdp correctly point to the last group visited.
601 for (bgi = 0; bgi < EXT3_SB(sb)->s_groups_count; bgi++) {
603 if (group_no >= EXT3_SB(sb)->s_groups_count)
605 gdp = ext3_get_group_desc(sb, group_no, &gdp_bh);
610 free_blocks = le16_to_cpu(gdp->bg_free_blocks_count);
611 if (free_blocks <= 0)
615 bitmap_bh = read_block_bitmap(sb, group_no);
618 ret_block = ext3_try_to_allocate(sb, handle, group_no,
619 bitmap_bh, -1, &fatal);
626 /* No space left on the device */
632 ext3_debug("using block group %d(%d)\n",
633 group_no, gdp->bg_free_blocks_count);
635 BUFFER_TRACE(gdp_bh, "get_write_access");
636 fatal = ext3_journal_get_write_access(handle, gdp_bh);
640 target_block = ret_block + group_no * EXT3_BLOCKS_PER_GROUP(sb)
641 + le32_to_cpu(es->s_first_data_block);
643 if (target_block == le32_to_cpu(gdp->bg_block_bitmap) ||
644 target_block == le32_to_cpu(gdp->bg_inode_bitmap) ||
645 in_range(target_block, le32_to_cpu(gdp->bg_inode_table),
646 EXT3_SB(sb)->s_itb_per_group))
647 ext3_error(sb, "ext3_new_block",
648 "Allocating block in system zone - "
649 "block = %u", target_block);
651 performed_allocation = 1;
653 #ifdef CONFIG_JBD_DEBUG
655 struct buffer_head *debug_bh;
657 /* Record bitmap buffer state in the newly allocated block */
658 debug_bh = sb_find_get_block(sb, target_block);
660 BUFFER_TRACE(debug_bh, "state when allocated");
661 BUFFER_TRACE2(debug_bh, bitmap_bh, "bitmap state");
665 jbd_lock_bh_state(bitmap_bh);
666 spin_lock(sb_bgl_lock(sbi, group_no));
667 if (buffer_jbd(bitmap_bh) && bh2jh(bitmap_bh)->b_committed_data) {
668 if (ext3_test_bit(ret_block,
669 bh2jh(bitmap_bh)->b_committed_data)) {
670 printk("%s: block was unexpectedly set in "
671 "b_committed_data\n", __FUNCTION__);
674 ext3_debug("found bit %d\n", ret_block);
675 spin_unlock(sb_bgl_lock(sbi, group_no));
676 jbd_unlock_bh_state(bitmap_bh);
679 /* ret_block was blockgroup-relative. Now it becomes fs-relative */
680 ret_block = target_block;
682 if (ret_block >= le32_to_cpu(es->s_blocks_count)) {
683 ext3_error(sb, "ext3_new_block",
684 "block(%d) >= blocks count(%d) - "
685 "block_group = %d, es == %p ", ret_block,
686 le32_to_cpu(es->s_blocks_count), group_no, es);
691 * It is up to the caller to add the new buffer to a journal
692 * list of some description. We don't know in advance whether
693 * the caller wants to use it as metadata or data.
695 ext3_debug("allocating block %d. Goal hits %d of %d.\n",
696 ret_block, goal_hits, goal_attempts);
698 spin_lock(sb_bgl_lock(sbi, group_no));
699 gdp->bg_free_blocks_count =
700 cpu_to_le16(le16_to_cpu(gdp->bg_free_blocks_count) - 1);
701 spin_unlock(sb_bgl_lock(sbi, group_no));
702 percpu_counter_mod(&sbi->s_freeblocks_counter, -1);
704 BUFFER_TRACE(gdp_bh, "journal_dirty_metadata for group descriptor");
705 err = ext3_journal_dirty_metadata(handle, gdp_bh);
720 if (!performed_allocation)
721 DLIMIT_FREE_BLOCK(sb, inode->i_xid, 1);
725 ext3_std_error(sb, fatal);
728 * Undo the block allocation
730 if (!performed_allocation)
731 DQUOT_FREE_BLOCK(inode, 1);
736 unsigned long ext3_count_free_blocks(struct super_block *sb)
738 unsigned long desc_count;
739 struct ext3_group_desc *gdp;
742 struct ext3_super_block *es;
743 unsigned long bitmap_count, x;
744 struct buffer_head *bitmap_bh = NULL;
747 es = EXT3_SB(sb)->s_es;
751 for (i = 0; i < EXT3_SB(sb)->s_groups_count; i++) {
752 gdp = ext3_get_group_desc(sb, i, NULL);
755 desc_count += le16_to_cpu(gdp->bg_free_blocks_count);
757 bitmap_bh = read_block_bitmap(sb, i);
758 if (bitmap_bh == NULL)
761 x = ext3_count_free(bitmap_bh, sb->s_blocksize);
762 printk("group %d: stored = %d, counted = %lu\n",
763 i, le16_to_cpu(gdp->bg_free_blocks_count), x);
767 printk("ext3_count_free_blocks: stored = %u, computed = %lu, %lu\n",
768 le32_to_cpu(es->s_free_blocks_count), desc_count, bitmap_count);
773 for (i = 0; i < EXT3_SB(sb)->s_groups_count; i++) {
774 gdp = ext3_get_group_desc(sb, i, NULL);
777 desc_count += le16_to_cpu(gdp->bg_free_blocks_count);
784 static inline int block_in_use(unsigned long block,
785 struct super_block * sb,
788 return ext3_test_bit ((block -
789 le32_to_cpu(EXT3_SB(sb)->s_es->s_first_data_block)) %
790 EXT3_BLOCKS_PER_GROUP(sb), map);
793 static inline int test_root(int a, int b)
806 int ext3_group_sparse(int group)
808 return (test_root(group, 3) || test_root(group, 5) ||
809 test_root(group, 7));
813 * ext3_bg_has_super - number of blocks used by the superblock in group
814 * @sb: superblock for filesystem
815 * @group: group number to check
817 * Return the number of blocks used by the superblock (primary or backup)
818 * in this group. Currently this will be only 0 or 1.
820 int ext3_bg_has_super(struct super_block *sb, int group)
822 if (EXT3_HAS_RO_COMPAT_FEATURE(sb,EXT3_FEATURE_RO_COMPAT_SPARSE_SUPER)&&
823 !ext3_group_sparse(group))
829 * ext3_bg_num_gdb - number of blocks used by the group table in group
830 * @sb: superblock for filesystem
831 * @group: group number to check
833 * Return the number of blocks used by the group descriptor table
834 * (primary or backup) in this group. In the future there may be a
835 * different number of descriptor blocks in each group.
837 unsigned long ext3_bg_num_gdb(struct super_block *sb, int group)
839 if (EXT3_HAS_RO_COMPAT_FEATURE(sb,EXT3_FEATURE_RO_COMPAT_SPARSE_SUPER)&&
840 !ext3_group_sparse(group))
842 return EXT3_SB(sb)->s_gdb_count;
845 #ifdef CONFIG_EXT3_CHECK
846 /* Called at mount-time, super-block is locked */
847 void ext3_check_blocks_bitmap (struct super_block * sb)
849 struct ext3_super_block *es;
850 unsigned long desc_count, bitmap_count, x, j;
851 unsigned long desc_blocks;
852 struct buffer_head *bitmap_bh = NULL;
853 struct ext3_group_desc *gdp;
856 es = EXT3_SB(sb)->s_es;
860 for (i = 0; i < EXT3_SB(sb)->s_groups_count; i++) {
861 gdp = ext3_get_group_desc (sb, i, NULL);
864 desc_count += le16_to_cpu(gdp->bg_free_blocks_count);
866 bitmap_bh = read_block_bitmap(sb, i);
867 if (bitmap_bh == NULL)
870 if (ext3_bg_has_super(sb, i) &&
871 !ext3_test_bit(0, bitmap_bh->b_data))
872 ext3_error(sb, __FUNCTION__,
873 "Superblock in group %d is marked free", i);
875 desc_blocks = ext3_bg_num_gdb(sb, i);
876 for (j = 0; j < desc_blocks; j++)
877 if (!ext3_test_bit(j + 1, bitmap_bh->b_data))
878 ext3_error(sb, __FUNCTION__,
879 "Descriptor block #%ld in group "
880 "%d is marked free", j, i);
882 if (!block_in_use (le32_to_cpu(gdp->bg_block_bitmap),
883 sb, bitmap_bh->b_data))
884 ext3_error (sb, "ext3_check_blocks_bitmap",
885 "Block bitmap for group %d is marked free",
888 if (!block_in_use (le32_to_cpu(gdp->bg_inode_bitmap),
889 sb, bitmap_bh->b_data))
890 ext3_error (sb, "ext3_check_blocks_bitmap",
891 "Inode bitmap for group %d is marked free",
894 for (j = 0; j < EXT3_SB(sb)->s_itb_per_group; j++)
895 if (!block_in_use (le32_to_cpu(gdp->bg_inode_table) + j,
896 sb, bitmap_bh->b_data))
897 ext3_error (sb, "ext3_check_blocks_bitmap",
898 "Block #%d of the inode table in "
899 "group %d is marked free", j, i);
901 x = ext3_count_free(bitmap_bh, sb->s_blocksize);
902 if (le16_to_cpu(gdp->bg_free_blocks_count) != x)
903 ext3_error (sb, "ext3_check_blocks_bitmap",
904 "Wrong free blocks count for group %d, "
905 "stored = %d, counted = %lu", i,
906 le16_to_cpu(gdp->bg_free_blocks_count), x);
910 if (le32_to_cpu(es->s_free_blocks_count) != bitmap_count)
911 ext3_error (sb, "ext3_check_blocks_bitmap",
912 "Wrong free blocks count in super block, "
913 "stored = %lu, counted = %lu",
914 (unsigned long)le32_to_cpu(es->s_free_blocks_count),