2 * linux/fs/ext3/balloc.c
4 * Copyright (C) 1992, 1993, 1994, 1995
5 * Remy Card (card@masi.ibp.fr)
6 * Laboratoire MASI - Institut Blaise Pascal
7 * Universite Pierre et Marie Curie (Paris VI)
9 * Enhanced block allocation by Stephen Tweedie (sct@redhat.com), 1993
10 * Big-endian to little-endian byte-swapping/bitmaps by
11 * David S. Miller (davem@caip.rutgers.edu), 1995
14 #include <linux/config.h>
15 #include <linux/time.h>
16 #include <linux/capability.h>
18 #include <linux/jbd.h>
19 #include <linux/ext3_fs.h>
20 #include <linux/ext3_jbd.h>
21 #include <linux/quotaops.h>
22 #include <linux/buffer_head.h>
23 #include <linux/vs_base.h>
24 #include <linux/vs_dlimit.h>
27 * balloc.c contains the blocks allocation and deallocation routines
31 * The free blocks are managed by bitmaps. A file system contains several
32 * blocks groups. Each group contains 1 bitmap block for blocks, 1 bitmap
33 * block for inodes, N blocks for the inode table and data blocks.
35 * The file system contains group descriptors which are located after the
36 * super block. Each descriptor contains the number of the bitmap block and
37 * the free blocks count in the block. The descriptors are loaded in memory
38 * when a file system is mounted (see ext3_read_super).
42 #define in_range(b, first, len) ((b) >= (first) && (b) <= (first) + (len) - 1)
44 struct ext3_group_desc * ext3_get_group_desc(struct super_block * sb,
45 unsigned int block_group,
46 struct buffer_head ** bh)
48 unsigned long group_desc;
50 struct ext3_group_desc * desc;
51 struct ext3_sb_info *sbi = EXT3_SB(sb);
53 if (block_group >= sbi->s_groups_count) {
54 ext3_error (sb, "ext3_get_group_desc",
55 "block_group >= groups_count - "
56 "block_group = %d, groups_count = %lu",
57 block_group, sbi->s_groups_count);
63 group_desc = block_group >> EXT3_DESC_PER_BLOCK_BITS(sb);
64 offset = block_group & (EXT3_DESC_PER_BLOCK(sb) - 1);
65 if (!sbi->s_group_desc[group_desc]) {
66 ext3_error (sb, "ext3_get_group_desc",
67 "Group descriptor not loaded - "
68 "block_group = %d, group_desc = %lu, desc = %lu",
69 block_group, group_desc, offset);
73 desc = (struct ext3_group_desc *) sbi->s_group_desc[group_desc]->b_data;
75 *bh = sbi->s_group_desc[group_desc];
80 * Read the bitmap for a given block_group, reading into the specified
81 * slot in the superblock's bitmap cache.
83 * Return buffer_head on success or NULL in case of failure.
85 static struct buffer_head *
86 read_block_bitmap(struct super_block *sb, unsigned int block_group)
88 struct ext3_group_desc * desc;
89 struct buffer_head * bh = NULL;
91 desc = ext3_get_group_desc (sb, block_group, NULL);
94 bh = sb_bread(sb, le32_to_cpu(desc->bg_block_bitmap));
96 ext3_error (sb, "read_block_bitmap",
97 "Cannot read block bitmap - "
98 "block_group = %d, block_bitmap = %u",
99 block_group, le32_to_cpu(desc->bg_block_bitmap));
104 * The reservation window structure operations
105 * --------------------------------------------
106 * Operations include:
107 * dump, find, add, remove, is_empty, find_next_reservable_window, etc.
109 * We use sorted double linked list for the per-filesystem reservation
110 * window list. (like in vm_region).
112 * Initially, we keep those small operations in the abstract functions,
113 * so later if we need a better searching tree than double linked-list,
114 * we could easily switch to that without changing too much
118 static void __rsv_window_dump(struct rb_root *root, int verbose,
122 struct ext3_reserve_window_node *rsv, *prev;
130 printk("Block Allocation Reservation Windows Map (%s):\n", fn);
132 rsv = list_entry(n, struct ext3_reserve_window_node, rsv_node);
134 printk("reservation window 0x%p "
135 "start: %d, end: %d\n",
136 rsv, rsv->rsv_start, rsv->rsv_end);
137 if (rsv->rsv_start && rsv->rsv_start >= rsv->rsv_end) {
138 printk("Bad reservation %p (start >= end)\n",
142 if (prev && prev->rsv_end >= rsv->rsv_start) {
143 printk("Bad reservation %p (prev->end >= start)\n",
149 printk("Restarting reservation walk in verbose mode\n");
157 printk("Window map complete.\n");
161 #define rsv_window_dump(root, verbose) \
162 __rsv_window_dump((root), (verbose), __FUNCTION__)
164 #define rsv_window_dump(root, verbose) do {} while (0)
168 goal_in_my_reservation(struct ext3_reserve_window *rsv, int goal,
169 unsigned int group, struct super_block * sb)
171 unsigned long group_first_block, group_last_block;
173 group_first_block = le32_to_cpu(EXT3_SB(sb)->s_es->s_first_data_block) +
174 group * EXT3_BLOCKS_PER_GROUP(sb);
175 group_last_block = group_first_block + EXT3_BLOCKS_PER_GROUP(sb) - 1;
177 if ((rsv->_rsv_start > group_last_block) ||
178 (rsv->_rsv_end < group_first_block))
180 if ((goal >= 0) && ((goal + group_first_block < rsv->_rsv_start)
181 || (goal + group_first_block > rsv->_rsv_end)))
187 * Find the reserved window which includes the goal, or the previous one
188 * if the goal is not in any window.
189 * Returns NULL if there are no windows or if all windows start after the goal.
191 static struct ext3_reserve_window_node *
192 search_reserve_window(struct rb_root *root, unsigned long goal)
194 struct rb_node *n = root->rb_node;
195 struct ext3_reserve_window_node *rsv;
201 rsv = rb_entry(n, struct ext3_reserve_window_node, rsv_node);
203 if (goal < rsv->rsv_start)
205 else if (goal > rsv->rsv_end)
211 * We've fallen off the end of the tree: the goal wasn't inside
212 * any particular node. OK, the previous node must be to one
213 * side of the interval containing the goal. If it's the RHS,
214 * we need to back up one.
216 if (rsv->rsv_start > goal) {
217 n = rb_prev(&rsv->rsv_node);
218 rsv = rb_entry(n, struct ext3_reserve_window_node, rsv_node);
223 void ext3_rsv_window_add(struct super_block *sb,
224 struct ext3_reserve_window_node *rsv)
226 struct rb_root *root = &EXT3_SB(sb)->s_rsv_window_root;
227 struct rb_node *node = &rsv->rsv_node;
228 unsigned int start = rsv->rsv_start;
230 struct rb_node ** p = &root->rb_node;
231 struct rb_node * parent = NULL;
232 struct ext3_reserve_window_node *this;
237 this = rb_entry(parent, struct ext3_reserve_window_node, rsv_node);
239 if (start < this->rsv_start)
241 else if (start > this->rsv_end)
247 rb_link_node(node, parent, p);
248 rb_insert_color(node, root);
251 static void rsv_window_remove(struct super_block *sb,
252 struct ext3_reserve_window_node *rsv)
254 rsv->rsv_start = EXT3_RESERVE_WINDOW_NOT_ALLOCATED;
255 rsv->rsv_end = EXT3_RESERVE_WINDOW_NOT_ALLOCATED;
256 rsv->rsv_alloc_hit = 0;
257 rb_erase(&rsv->rsv_node, &EXT3_SB(sb)->s_rsv_window_root);
260 static inline int rsv_is_empty(struct ext3_reserve_window *rsv)
262 /* a valid reservation end block could not be 0 */
263 return (rsv->_rsv_end == EXT3_RESERVE_WINDOW_NOT_ALLOCATED);
265 void ext3_init_block_alloc_info(struct inode *inode)
267 struct ext3_inode_info *ei = EXT3_I(inode);
268 struct ext3_block_alloc_info *block_i = ei->i_block_alloc_info;
269 struct super_block *sb = inode->i_sb;
271 block_i = kmalloc(sizeof(*block_i), GFP_NOFS);
273 struct ext3_reserve_window_node *rsv = &block_i->rsv_window_node;
275 rsv->rsv_start = EXT3_RESERVE_WINDOW_NOT_ALLOCATED;
276 rsv->rsv_end = EXT3_RESERVE_WINDOW_NOT_ALLOCATED;
279 * if filesystem is mounted with NORESERVATION, the goal
280 * reservation window size is set to zero to indicate
281 * block reservation is off
283 if (!test_opt(sb, RESERVATION))
284 rsv->rsv_goal_size = 0;
286 rsv->rsv_goal_size = EXT3_DEFAULT_RESERVE_BLOCKS;
287 rsv->rsv_alloc_hit = 0;
288 block_i->last_alloc_logical_block = 0;
289 block_i->last_alloc_physical_block = 0;
291 ei->i_block_alloc_info = block_i;
294 void ext3_discard_reservation(struct inode *inode)
296 struct ext3_inode_info *ei = EXT3_I(inode);
297 struct ext3_block_alloc_info *block_i = ei->i_block_alloc_info;
298 struct ext3_reserve_window_node *rsv;
299 spinlock_t *rsv_lock = &EXT3_SB(inode->i_sb)->s_rsv_window_lock;
304 rsv = &block_i->rsv_window_node;
305 if (!rsv_is_empty(&rsv->rsv_window)) {
307 if (!rsv_is_empty(&rsv->rsv_window))
308 rsv_window_remove(inode->i_sb, rsv);
309 spin_unlock(rsv_lock);
313 /* Free given blocks, update quota and i_blocks field */
314 void ext3_free_blocks_sb(handle_t *handle, struct super_block *sb,
315 unsigned long block, unsigned long count,
316 int *pdquot_freed_blocks)
318 struct buffer_head *bitmap_bh = NULL;
319 struct buffer_head *gd_bh;
320 unsigned long block_group;
323 unsigned long overflow;
324 struct ext3_group_desc * desc;
325 struct ext3_super_block * es;
326 struct ext3_sb_info *sbi;
328 unsigned group_freed;
330 *pdquot_freed_blocks = 0;
333 if (block < le32_to_cpu(es->s_first_data_block) ||
334 block + count < block ||
335 block + count > le32_to_cpu(es->s_blocks_count)) {
336 ext3_error (sb, "ext3_free_blocks",
337 "Freeing blocks not in datazone - "
338 "block = %lu, count = %lu", block, count);
342 ext3_debug ("freeing block(s) %lu-%lu\n", block, block + count - 1);
346 block_group = (block - le32_to_cpu(es->s_first_data_block)) /
347 EXT3_BLOCKS_PER_GROUP(sb);
348 bit = (block - le32_to_cpu(es->s_first_data_block)) %
349 EXT3_BLOCKS_PER_GROUP(sb);
351 * Check to see if we are freeing blocks across a group
354 if (bit + count > EXT3_BLOCKS_PER_GROUP(sb)) {
355 overflow = bit + count - EXT3_BLOCKS_PER_GROUP(sb);
359 bitmap_bh = read_block_bitmap(sb, block_group);
362 desc = ext3_get_group_desc (sb, block_group, &gd_bh);
366 if (in_range (le32_to_cpu(desc->bg_block_bitmap), block, count) ||
367 in_range (le32_to_cpu(desc->bg_inode_bitmap), block, count) ||
368 in_range (block, le32_to_cpu(desc->bg_inode_table),
369 sbi->s_itb_per_group) ||
370 in_range (block + count - 1, le32_to_cpu(desc->bg_inode_table),
371 sbi->s_itb_per_group))
372 ext3_error (sb, "ext3_free_blocks",
373 "Freeing blocks in system zones - "
374 "Block = %lu, count = %lu",
378 * We are about to start releasing blocks in the bitmap,
379 * so we need undo access.
381 /* @@@ check errors */
382 BUFFER_TRACE(bitmap_bh, "getting undo access");
383 err = ext3_journal_get_undo_access(handle, bitmap_bh);
388 * We are about to modify some metadata. Call the journal APIs
389 * to unshare ->b_data if a currently-committing transaction is
392 BUFFER_TRACE(gd_bh, "get_write_access");
393 err = ext3_journal_get_write_access(handle, gd_bh);
397 jbd_lock_bh_state(bitmap_bh);
399 for (i = 0, group_freed = 0; i < count; i++) {
401 * An HJ special. This is expensive...
403 #ifdef CONFIG_JBD_DEBUG
404 jbd_unlock_bh_state(bitmap_bh);
406 struct buffer_head *debug_bh;
407 debug_bh = sb_find_get_block(sb, block + i);
409 BUFFER_TRACE(debug_bh, "Deleted!");
410 if (!bh2jh(bitmap_bh)->b_committed_data)
411 BUFFER_TRACE(debug_bh,
412 "No commited data in bitmap");
413 BUFFER_TRACE2(debug_bh, bitmap_bh, "bitmap");
417 jbd_lock_bh_state(bitmap_bh);
419 if (need_resched()) {
420 jbd_unlock_bh_state(bitmap_bh);
422 jbd_lock_bh_state(bitmap_bh);
424 /* @@@ This prevents newly-allocated data from being
425 * freed and then reallocated within the same
428 * Ideally we would want to allow that to happen, but to
429 * do so requires making journal_forget() capable of
430 * revoking the queued write of a data block, which
431 * implies blocking on the journal lock. *forget()
432 * cannot block due to truncate races.
434 * Eventually we can fix this by making journal_forget()
435 * return a status indicating whether or not it was able
436 * to revoke the buffer. On successful revoke, it is
437 * safe not to set the allocation bit in the committed
438 * bitmap, because we know that there is no outstanding
439 * activity on the buffer any more and so it is safe to
442 BUFFER_TRACE(bitmap_bh, "set in b_committed_data");
443 J_ASSERT_BH(bitmap_bh,
444 bh2jh(bitmap_bh)->b_committed_data != NULL);
445 ext3_set_bit_atomic(sb_bgl_lock(sbi, block_group), bit + i,
446 bh2jh(bitmap_bh)->b_committed_data);
449 * We clear the bit in the bitmap after setting the committed
450 * data bit, because this is the reverse order to that which
451 * the allocator uses.
453 BUFFER_TRACE(bitmap_bh, "clear bit");
454 if (!ext3_clear_bit_atomic(sb_bgl_lock(sbi, block_group),
455 bit + i, bitmap_bh->b_data)) {
456 jbd_unlock_bh_state(bitmap_bh);
457 ext3_error(sb, __FUNCTION__,
458 "bit already cleared for block %lu", block + i);
459 jbd_lock_bh_state(bitmap_bh);
460 BUFFER_TRACE(bitmap_bh, "bit already cleared");
465 jbd_unlock_bh_state(bitmap_bh);
467 spin_lock(sb_bgl_lock(sbi, block_group));
468 desc->bg_free_blocks_count =
469 cpu_to_le16(le16_to_cpu(desc->bg_free_blocks_count) +
471 spin_unlock(sb_bgl_lock(sbi, block_group));
472 percpu_counter_mod(&sbi->s_freeblocks_counter, count);
474 /* We dirtied the bitmap block */
475 BUFFER_TRACE(bitmap_bh, "dirtied bitmap block");
476 err = ext3_journal_dirty_metadata(handle, bitmap_bh);
478 /* And the group descriptor block */
479 BUFFER_TRACE(gd_bh, "dirtied group descriptor block");
480 ret = ext3_journal_dirty_metadata(handle, gd_bh);
482 *pdquot_freed_blocks += group_freed;
484 if (overflow && !err) {
492 ext3_std_error(sb, err);
496 /* Free given blocks, update quota and i_blocks field */
497 void ext3_free_blocks(handle_t *handle, struct inode *inode,
498 unsigned long block, unsigned long count)
500 struct super_block * sb;
501 int dquot_freed_blocks;
505 printk ("ext3_free_blocks: nonexistent device");
508 ext3_free_blocks_sb(handle, sb, block, count, &dquot_freed_blocks);
509 if (dquot_freed_blocks) {
510 DLIMIT_FREE_BLOCK(inode, dquot_freed_blocks);
511 DQUOT_FREE_BLOCK(inode, dquot_freed_blocks);
517 * For ext3 allocations, we must not reuse any blocks which are
518 * allocated in the bitmap buffer's "last committed data" copy. This
519 * prevents deletes from freeing up the page for reuse until we have
520 * committed the delete transaction.
522 * If we didn't do this, then deleting something and reallocating it as
523 * data would allow the old block to be overwritten before the
524 * transaction committed (because we force data to disk before commit).
525 * This would lead to corruption if we crashed between overwriting the
526 * data and committing the delete.
528 * @@@ We may want to make this allocation behaviour conditional on
529 * data-writes at some point, and disable it for metadata allocations or
532 static int ext3_test_allocatable(int nr, struct buffer_head *bh)
535 struct journal_head *jh = bh2jh(bh);
537 if (ext3_test_bit(nr, bh->b_data))
540 jbd_lock_bh_state(bh);
541 if (!jh->b_committed_data)
544 ret = !ext3_test_bit(nr, jh->b_committed_data);
545 jbd_unlock_bh_state(bh);
550 bitmap_search_next_usable_block(int start, struct buffer_head *bh,
554 struct journal_head *jh = bh2jh(bh);
557 * The bitmap search --- search forward alternately through the actual
558 * bitmap and the last-committed copy until we find a bit free in
561 while (start < maxblocks) {
562 next = ext3_find_next_zero_bit(bh->b_data, maxblocks, start);
563 if (next >= maxblocks)
565 if (ext3_test_allocatable(next, bh))
567 jbd_lock_bh_state(bh);
568 if (jh->b_committed_data)
569 start = ext3_find_next_zero_bit(jh->b_committed_data,
571 jbd_unlock_bh_state(bh);
577 * Find an allocatable block in a bitmap. We honour both the bitmap and
578 * its last-committed copy (if that exists), and perform the "most
579 * appropriate allocation" algorithm of looking for a free block near
580 * the initial goal; then for a free byte somewhere in the bitmap; then
581 * for any free bit in the bitmap.
584 find_next_usable_block(int start, struct buffer_head *bh, int maxblocks)
591 * The goal was occupied; search forward for a free
592 * block within the next XX blocks.
594 * end_goal is more or less random, but it has to be
595 * less than EXT3_BLOCKS_PER_GROUP. Aligning up to the
596 * next 64-bit boundary is simple..
598 int end_goal = (start + 63) & ~63;
599 if (end_goal > maxblocks)
600 end_goal = maxblocks;
601 here = ext3_find_next_zero_bit(bh->b_data, end_goal, start);
602 if (here < end_goal && ext3_test_allocatable(here, bh))
604 ext3_debug("Bit not found near goal\n");
611 p = ((char *)bh->b_data) + (here >> 3);
612 r = memscan(p, 0, (maxblocks - here + 7) >> 3);
613 next = (r - ((char *)bh->b_data)) << 3;
615 if (next < maxblocks && next >= start && ext3_test_allocatable(next, bh))
619 * The bitmap search --- search forward alternately through the actual
620 * bitmap and the last-committed copy until we find a bit free in
623 here = bitmap_search_next_usable_block(here, bh, maxblocks);
628 * We think we can allocate this block in this bitmap. Try to set the bit.
629 * If that succeeds then check that nobody has allocated and then freed the
630 * block since we saw that is was not marked in b_committed_data. If it _was_
631 * allocated and freed then clear the bit in the bitmap again and return
635 claim_block(spinlock_t *lock, int block, struct buffer_head *bh)
637 struct journal_head *jh = bh2jh(bh);
640 if (ext3_set_bit_atomic(lock, block, bh->b_data))
642 jbd_lock_bh_state(bh);
643 if (jh->b_committed_data && ext3_test_bit(block,jh->b_committed_data)) {
644 ext3_clear_bit_atomic(lock, block, bh->b_data);
649 jbd_unlock_bh_state(bh);
654 * If we failed to allocate the desired block then we may end up crossing to a
655 * new bitmap. In that case we must release write access to the old one via
656 * ext3_journal_release_buffer(), else we'll run out of credits.
659 ext3_try_to_allocate(struct super_block *sb, handle_t *handle, int group,
660 struct buffer_head *bitmap_bh, int goal, struct ext3_reserve_window *my_rsv)
662 int group_first_block, start, end;
664 /* we do allocation within the reservation window if we have a window */
667 le32_to_cpu(EXT3_SB(sb)->s_es->s_first_data_block) +
668 group * EXT3_BLOCKS_PER_GROUP(sb);
669 if (my_rsv->_rsv_start >= group_first_block)
670 start = my_rsv->_rsv_start - group_first_block;
672 /* reservation window cross group boundary */
674 end = my_rsv->_rsv_end - group_first_block + 1;
675 if (end > EXT3_BLOCKS_PER_GROUP(sb))
676 /* reservation window crosses group boundary */
677 end = EXT3_BLOCKS_PER_GROUP(sb);
678 if ((start <= goal) && (goal < end))
687 end = EXT3_BLOCKS_PER_GROUP(sb);
690 BUG_ON(start > EXT3_BLOCKS_PER_GROUP(sb));
693 if (goal < 0 || !ext3_test_allocatable(goal, bitmap_bh)) {
694 goal = find_next_usable_block(start, bitmap_bh, end);
700 for (i = 0; i < 7 && goal > start &&
701 ext3_test_allocatable(goal - 1,
709 if (!claim_block(sb_bgl_lock(EXT3_SB(sb), group), goal, bitmap_bh)) {
711 * The block was allocated by another thread, or it was
712 * allocated and then freed by another thread
726 * find_next_reservable_window():
727 * find a reservable space within the given range.
728 * It does not allocate the reservation window for now:
729 * alloc_new_reservation() will do the work later.
731 * @search_head: the head of the searching list;
732 * This is not necessarily the list head of the whole filesystem
734 * We have both head and start_block to assist the search
735 * for the reservable space. The list starts from head,
736 * but we will shift to the place where start_block is,
737 * then start from there, when looking for a reservable space.
739 * @size: the target new reservation window size
741 * @group_first_block: the first block we consider to start
742 * the real search from
745 * the maximum block number that our goal reservable space
746 * could start from. This is normally the last block in this
747 * group. The search will end when we found the start of next
748 * possible reservable space is out of this boundary.
749 * This could handle the cross boundary reservation window
752 * basically we search from the given range, rather than the whole
753 * reservation double linked list, (start_block, last_block)
754 * to find a free region that is of my size and has not
758 static int find_next_reservable_window(
759 struct ext3_reserve_window_node *search_head,
760 struct ext3_reserve_window_node *my_rsv,
761 struct super_block * sb, int start_block,
764 struct rb_node *next;
765 struct ext3_reserve_window_node *rsv, *prev;
767 int size = my_rsv->rsv_goal_size;
769 /* TODO: make the start of the reservation window byte-aligned */
770 /* cur = *start_block & ~7;*/
777 if (cur <= rsv->rsv_end)
778 cur = rsv->rsv_end + 1;
781 * in the case we could not find a reservable space
782 * that is what is expected, during the re-search, we could
783 * remember what's the largest reservable space we could have
784 * and return that one.
786 * For now it will fail if we could not find the reservable
787 * space with expected-size (or more)...
789 if (cur > last_block)
790 return -1; /* fail */
793 next = rb_next(&rsv->rsv_node);
794 rsv = list_entry(next,struct ext3_reserve_window_node,rsv_node);
797 * Reached the last reservation, we can just append to the
803 if (cur + size <= rsv->rsv_start) {
805 * Found a reserveable space big enough. We could
806 * have a reservation across the group boundary here
812 * we come here either :
813 * when we reach the end of the whole list,
814 * and there is empty reservable space after last entry in the list.
815 * append it to the end of the list.
817 * or we found one reservable space in the middle of the list,
818 * return the reservation window that we could append to.
822 if ((prev != my_rsv) && (!rsv_is_empty(&my_rsv->rsv_window)))
823 rsv_window_remove(sb, my_rsv);
826 * Let's book the whole avaliable window for now. We will check the
827 * disk bitmap later and then, if there are free blocks then we adjust
828 * the window size if it's larger than requested.
829 * Otherwise, we will remove this node from the tree next time
830 * call find_next_reservable_window.
832 my_rsv->rsv_start = cur;
833 my_rsv->rsv_end = cur + size - 1;
834 my_rsv->rsv_alloc_hit = 0;
837 ext3_rsv_window_add(sb, my_rsv);
843 * alloc_new_reservation()--allocate a new reservation window
845 * To make a new reservation, we search part of the filesystem
846 * reservation list (the list that inside the group). We try to
847 * allocate a new reservation window near the allocation goal,
848 * or the beginning of the group, if there is no goal.
850 * We first find a reservable space after the goal, then from
851 * there, we check the bitmap for the first free block after
852 * it. If there is no free block until the end of group, then the
853 * whole group is full, we failed. Otherwise, check if the free
854 * block is inside the expected reservable space, if so, we
856 * If the first free block is outside the reservable space, then
857 * start from the first free block, we search for next available
860 * on succeed, a new reservation will be found and inserted into the list
861 * It contains at least one free block, and it does not overlap with other
862 * reservation windows.
864 * failed: we failed to find a reservation window in this group
866 * @rsv: the reservation
868 * @goal: The goal (group-relative). It is where the search for a
869 * free reservable space should start from.
870 * if we have a goal(goal >0 ), then start from there,
871 * no goal(goal = -1), we start from the first block
874 * @sb: the super block
875 * @group: the group we are trying to allocate in
876 * @bitmap_bh: the block group block bitmap
879 static int alloc_new_reservation(struct ext3_reserve_window_node *my_rsv,
880 int goal, struct super_block *sb,
881 unsigned int group, struct buffer_head *bitmap_bh)
883 struct ext3_reserve_window_node *search_head;
884 int group_first_block, group_end_block, start_block;
885 int first_free_block;
886 struct rb_root *fs_rsv_root = &EXT3_SB(sb)->s_rsv_window_root;
889 spinlock_t *rsv_lock = &EXT3_SB(sb)->s_rsv_window_lock;
891 group_first_block = le32_to_cpu(EXT3_SB(sb)->s_es->s_first_data_block) +
892 group * EXT3_BLOCKS_PER_GROUP(sb);
893 group_end_block = group_first_block + EXT3_BLOCKS_PER_GROUP(sb) - 1;
896 start_block = group_first_block;
898 start_block = goal + group_first_block;
900 size = my_rsv->rsv_goal_size;
902 if (!rsv_is_empty(&my_rsv->rsv_window)) {
904 * if the old reservation is cross group boundary
905 * and if the goal is inside the old reservation window,
906 * we will come here when we just failed to allocate from
907 * the first part of the window. We still have another part
908 * that belongs to the next group. In this case, there is no
909 * point to discard our window and try to allocate a new one
910 * in this group(which will fail). we should
911 * keep the reservation window, just simply move on.
913 * Maybe we could shift the start block of the reservation
914 * window to the first block of next group.
917 if ((my_rsv->rsv_start <= group_end_block) &&
918 (my_rsv->rsv_end > group_end_block) &&
919 (start_block >= my_rsv->rsv_start))
922 if ((my_rsv->rsv_alloc_hit >
923 (my_rsv->rsv_end - my_rsv->rsv_start + 1) / 2)) {
925 * if we previously allocation hit ration is greater than half
926 * we double the size of reservation window next time
927 * otherwise keep the same
930 if (size > EXT3_MAX_RESERVE_BLOCKS)
931 size = EXT3_MAX_RESERVE_BLOCKS;
932 my_rsv->rsv_goal_size= size;
938 * shift the search start to the window near the goal block
940 search_head = search_reserve_window(fs_rsv_root, start_block);
943 * find_next_reservable_window() simply finds a reservable window
944 * inside the given range(start_block, group_end_block).
946 * To make sure the reservation window has a free bit inside it, we
947 * need to check the bitmap after we found a reservable window.
950 ret = find_next_reservable_window(search_head, my_rsv, sb,
951 start_block, group_end_block);
954 if (!rsv_is_empty(&my_rsv->rsv_window))
955 rsv_window_remove(sb, my_rsv);
956 spin_unlock(rsv_lock);
961 * On success, find_next_reservable_window() returns the
962 * reservation window where there is a reservable space after it.
963 * Before we reserve this reservable space, we need
964 * to make sure there is at least a free block inside this region.
966 * searching the first free bit on the block bitmap and copy of
967 * last committed bitmap alternatively, until we found a allocatable
968 * block. Search start from the start block of the reservable space
971 spin_unlock(rsv_lock);
972 first_free_block = bitmap_search_next_usable_block(
973 my_rsv->rsv_start - group_first_block,
974 bitmap_bh, group_end_block - group_first_block + 1);
976 if (first_free_block < 0) {
978 * no free block left on the bitmap, no point
979 * to reserve the space. return failed.
982 if (!rsv_is_empty(&my_rsv->rsv_window))
983 rsv_window_remove(sb, my_rsv);
984 spin_unlock(rsv_lock);
985 return -1; /* failed */
988 start_block = first_free_block + group_first_block;
990 * check if the first free block is within the
991 * free space we just reserved
993 if (start_block >= my_rsv->rsv_start && start_block < my_rsv->rsv_end)
994 return 0; /* success */
996 * if the first free bit we found is out of the reservable space
997 * continue search for next reservable space,
998 * start from where the free block is,
999 * we also shift the list head to where we stopped last time
1001 search_head = my_rsv;
1002 spin_lock(rsv_lock);
1007 * This is the main function used to allocate a new block and its reservation
1010 * Each time when a new block allocation is need, first try to allocate from
1011 * its own reservation. If it does not have a reservation window, instead of
1012 * looking for a free bit on bitmap first, then look up the reservation list to
1013 * see if it is inside somebody else's reservation window, we try to allocate a
1014 * reservation window for it starting from the goal first. Then do the block
1015 * allocation within the reservation window.
1017 * This will avoid keeping on searching the reservation list again and
1018 * again when somebody is looking for a free block (without
1019 * reservation), and there are lots of free blocks, but they are all
1022 * We use a sorted double linked list for the per-filesystem reservation list.
1023 * The insert, remove and find a free space(non-reserved) operations for the
1024 * sorted double linked list should be fast.
1028 ext3_try_to_allocate_with_rsv(struct super_block *sb, handle_t *handle,
1029 unsigned int group, struct buffer_head *bitmap_bh,
1030 int goal, struct ext3_reserve_window_node * my_rsv,
1033 unsigned long group_first_block;
1040 * Make sure we use undo access for the bitmap, because it is critical
1041 * that we do the frozen_data COW on bitmap buffers in all cases even
1042 * if the buffer is in BJ_Forget state in the committing transaction.
1044 BUFFER_TRACE(bitmap_bh, "get undo access for new block");
1045 fatal = ext3_journal_get_undo_access(handle, bitmap_bh);
1052 * we don't deal with reservation when
1053 * filesystem is mounted without reservation
1054 * or the file is not a regular file
1055 * or last attempt to allocate a block with reservation turned on failed
1057 if (my_rsv == NULL ) {
1058 ret = ext3_try_to_allocate(sb, handle, group, bitmap_bh, goal, NULL);
1062 * goal is a group relative block number (if there is a goal)
1063 * 0 < goal < EXT3_BLOCKS_PER_GROUP(sb)
1064 * first block is a filesystem wide block number
1065 * first block is the block number of the first block in this group
1067 group_first_block = le32_to_cpu(EXT3_SB(sb)->s_es->s_first_data_block) +
1068 group * EXT3_BLOCKS_PER_GROUP(sb);
1071 * Basically we will allocate a new block from inode's reservation
1074 * We need to allocate a new reservation window, if:
1075 * a) inode does not have a reservation window; or
1076 * b) last attempt to allocate a block from existing reservation
1078 * c) we come here with a goal and with a reservation window
1080 * We do not need to allocate a new reservation window if we come here
1081 * at the beginning with a goal and the goal is inside the window, or
1082 * we don't have a goal but already have a reservation window.
1083 * then we could go to allocate from the reservation window directly.
1086 if (rsv_is_empty(&my_rsv->rsv_window) || (ret < 0) ||
1087 !goal_in_my_reservation(&my_rsv->rsv_window, goal, group, sb)) {
1088 ret = alloc_new_reservation(my_rsv, goal, sb,
1093 if (!goal_in_my_reservation(&my_rsv->rsv_window, goal, group, sb))
1096 if ((my_rsv->rsv_start >= group_first_block + EXT3_BLOCKS_PER_GROUP(sb))
1097 || (my_rsv->rsv_end < group_first_block))
1099 ret = ext3_try_to_allocate(sb, handle, group, bitmap_bh, goal,
1100 &my_rsv->rsv_window);
1102 my_rsv->rsv_alloc_hit++;
1103 break; /* succeed */
1108 BUFFER_TRACE(bitmap_bh, "journal_dirty_metadata for "
1110 fatal = ext3_journal_dirty_metadata(handle, bitmap_bh);
1118 BUFFER_TRACE(bitmap_bh, "journal_release_buffer");
1119 ext3_journal_release_buffer(handle, bitmap_bh);
1123 static int ext3_has_free_blocks(struct super_block *sb)
1125 struct ext3_sb_info *sbi = EXT3_SB(sb);
1126 int free_blocks, root_blocks, cond;
1128 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
1129 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
1131 vxdprintk(VXD_CBIT(dlim, 3),
1132 "ext3_has_free_blocks(%p): free=%u, root=%u",
1133 sb, free_blocks, root_blocks);
1135 DLIMIT_ADJUST_BLOCK(sb, vx_current_xid(), &free_blocks, &root_blocks);
1137 cond = (free_blocks < root_blocks + 1 &&
1138 !capable(CAP_SYS_RESOURCE) &&
1139 sbi->s_resuid != current->fsuid &&
1140 (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid)));
1142 vxdprintk(VXD_CBIT(dlim, 3),
1143 "ext3_has_free_blocks(%p): %u<%u+1, %c, %u!=%u r=%d",
1144 sb, free_blocks, root_blocks,
1145 !capable(CAP_SYS_RESOURCE)?'1':'0',
1146 sbi->s_resuid, current->fsuid, cond?0:1);
1148 return (cond ? 0 : 1);
1152 * ext3_should_retry_alloc() is called when ENOSPC is returned, and if
1153 * it is profitable to retry the operation, this function will wait
1154 * for the current or commiting transaction to complete, and then
1157 int ext3_should_retry_alloc(struct super_block *sb, int *retries)
1159 if (!ext3_has_free_blocks(sb) || (*retries)++ > 3)
1162 jbd_debug(1, "%s: retrying operation after ENOSPC\n", sb->s_id);
1164 return journal_force_commit_nested(EXT3_SB(sb)->s_journal);
1168 * ext3_new_block uses a goal block to assist allocation. If the goal is
1169 * free, or there is a free block within 32 blocks of the goal, that block
1170 * is allocated. Otherwise a forward search is made for a free block; within
1171 * each block group the search first looks for an entire free byte in the block
1172 * bitmap, and then for any free bit if that fails.
1173 * This function also updates quota and i_blocks field.
1175 int ext3_new_block(handle_t *handle, struct inode *inode,
1176 unsigned long goal, int *errp)
1178 struct buffer_head *bitmap_bh = NULL;
1179 struct buffer_head *gdp_bh;
1183 int bgi; /* blockgroup iteration index */
1186 int performed_allocation = 0;
1188 struct super_block *sb;
1189 struct ext3_group_desc *gdp;
1190 struct ext3_super_block *es;
1191 struct ext3_sb_info *sbi;
1192 struct ext3_reserve_window_node *my_rsv = NULL;
1193 struct ext3_block_alloc_info *block_i;
1194 unsigned short windowsz = 0;
1196 static int goal_hits, goal_attempts;
1198 unsigned long ngroups;
1203 printk("ext3_new_block: nonexistent device");
1208 * Check quota for allocation of this block.
1210 if (DQUOT_ALLOC_BLOCK(inode, 1)) {
1214 if (DLIMIT_ALLOC_BLOCK(inode, 1))
1218 es = EXT3_SB(sb)->s_es;
1219 ext3_debug("goal=%lu.\n", goal);
1221 * Allocate a block from reservation only when
1222 * filesystem is mounted with reservation(default,-o reservation), and
1223 * it's a regular file, and
1224 * the desired window size is greater than 0 (One could use ioctl
1225 * command EXT3_IOC_SETRSVSZ to set the window size to 0 to turn off
1226 * reservation on that particular file)
1228 block_i = EXT3_I(inode)->i_block_alloc_info;
1229 if (block_i && ((windowsz = block_i->rsv_window_node.rsv_goal_size) > 0))
1230 my_rsv = &block_i->rsv_window_node;
1232 if (!ext3_has_free_blocks(sb)) {
1238 * First, test whether the goal block is free.
1240 if (goal < le32_to_cpu(es->s_first_data_block) ||
1241 goal >= le32_to_cpu(es->s_blocks_count))
1242 goal = le32_to_cpu(es->s_first_data_block);
1243 group_no = (goal - le32_to_cpu(es->s_first_data_block)) /
1244 EXT3_BLOCKS_PER_GROUP(sb);
1245 gdp = ext3_get_group_desc(sb, group_no, &gdp_bh);
1249 goal_group = group_no;
1251 free_blocks = le16_to_cpu(gdp->bg_free_blocks_count);
1253 * if there is not enough free blocks to make a new resevation
1254 * turn off reservation for this allocation
1256 if (my_rsv && (free_blocks < windowsz)
1257 && (rsv_is_empty(&my_rsv->rsv_window)))
1260 if (free_blocks > 0) {
1261 ret_block = ((goal - le32_to_cpu(es->s_first_data_block)) %
1262 EXT3_BLOCKS_PER_GROUP(sb));
1263 bitmap_bh = read_block_bitmap(sb, group_no);
1266 ret_block = ext3_try_to_allocate_with_rsv(sb, handle, group_no,
1267 bitmap_bh, ret_block, my_rsv, &fatal);
1274 ngroups = EXT3_SB(sb)->s_groups_count;
1278 * Now search the rest of the groups. We assume that
1279 * i and gdp correctly point to the last group visited.
1281 for (bgi = 0; bgi < ngroups; bgi++) {
1283 if (group_no >= ngroups)
1285 gdp = ext3_get_group_desc(sb, group_no, &gdp_bh);
1290 free_blocks = le16_to_cpu(gdp->bg_free_blocks_count);
1292 * skip this group if the number of
1293 * free blocks is less than half of the reservation
1296 if (free_blocks <= (windowsz/2))
1300 bitmap_bh = read_block_bitmap(sb, group_no);
1303 ret_block = ext3_try_to_allocate_with_rsv(sb, handle, group_no,
1304 bitmap_bh, -1, my_rsv, &fatal);
1311 * We may end up a bogus ealier ENOSPC error due to
1312 * filesystem is "full" of reservations, but
1313 * there maybe indeed free blocks avaliable on disk
1314 * In this case, we just forget about the reservations
1315 * just do block allocation as without reservations.
1319 group_no = goal_group;
1322 /* No space left on the device */
1328 ext3_debug("using block group %d(%d)\n",
1329 group_no, gdp->bg_free_blocks_count);
1331 BUFFER_TRACE(gdp_bh, "get_write_access");
1332 fatal = ext3_journal_get_write_access(handle, gdp_bh);
1336 target_block = ret_block + group_no * EXT3_BLOCKS_PER_GROUP(sb)
1337 + le32_to_cpu(es->s_first_data_block);
1339 if (target_block == le32_to_cpu(gdp->bg_block_bitmap) ||
1340 target_block == le32_to_cpu(gdp->bg_inode_bitmap) ||
1341 in_range(target_block, le32_to_cpu(gdp->bg_inode_table),
1342 EXT3_SB(sb)->s_itb_per_group))
1343 ext3_error(sb, "ext3_new_block",
1344 "Allocating block in system zone - "
1345 "block = %u", target_block);
1347 performed_allocation = 1;
1349 #ifdef CONFIG_JBD_DEBUG
1351 struct buffer_head *debug_bh;
1353 /* Record bitmap buffer state in the newly allocated block */
1354 debug_bh = sb_find_get_block(sb, target_block);
1356 BUFFER_TRACE(debug_bh, "state when allocated");
1357 BUFFER_TRACE2(debug_bh, bitmap_bh, "bitmap state");
1361 jbd_lock_bh_state(bitmap_bh);
1362 spin_lock(sb_bgl_lock(sbi, group_no));
1363 if (buffer_jbd(bitmap_bh) && bh2jh(bitmap_bh)->b_committed_data) {
1364 if (ext3_test_bit(ret_block,
1365 bh2jh(bitmap_bh)->b_committed_data)) {
1366 printk("%s: block was unexpectedly set in "
1367 "b_committed_data\n", __FUNCTION__);
1370 ext3_debug("found bit %d\n", ret_block);
1371 spin_unlock(sb_bgl_lock(sbi, group_no));
1372 jbd_unlock_bh_state(bitmap_bh);
1375 /* ret_block was blockgroup-relative. Now it becomes fs-relative */
1376 ret_block = target_block;
1378 if (ret_block >= le32_to_cpu(es->s_blocks_count)) {
1379 ext3_error(sb, "ext3_new_block",
1380 "block(%d) >= blocks count(%d) - "
1381 "block_group = %d, es == %p ", ret_block,
1382 le32_to_cpu(es->s_blocks_count), group_no, es);
1387 * It is up to the caller to add the new buffer to a journal
1388 * list of some description. We don't know in advance whether
1389 * the caller wants to use it as metadata or data.
1391 ext3_debug("allocating block %d. Goal hits %d of %d.\n",
1392 ret_block, goal_hits, goal_attempts);
1394 spin_lock(sb_bgl_lock(sbi, group_no));
1395 gdp->bg_free_blocks_count =
1396 cpu_to_le16(le16_to_cpu(gdp->bg_free_blocks_count) - 1);
1397 spin_unlock(sb_bgl_lock(sbi, group_no));
1398 percpu_counter_mod(&sbi->s_freeblocks_counter, -1);
1400 BUFFER_TRACE(gdp_bh, "journal_dirty_metadata for group descriptor");
1401 err = ext3_journal_dirty_metadata(handle, gdp_bh);
1416 if (!performed_allocation)
1417 DLIMIT_FREE_BLOCK(inode, 1);
1421 ext3_std_error(sb, fatal);
1424 * Undo the block allocation
1426 if (!performed_allocation)
1427 DQUOT_FREE_BLOCK(inode, 1);
1432 unsigned long ext3_count_free_blocks(struct super_block *sb)
1434 unsigned long desc_count;
1435 struct ext3_group_desc *gdp;
1437 unsigned long ngroups = EXT3_SB(sb)->s_groups_count;
1439 struct ext3_super_block *es;
1440 unsigned long bitmap_count, x;
1441 struct buffer_head *bitmap_bh = NULL;
1443 es = EXT3_SB(sb)->s_es;
1449 for (i = 0; i < ngroups; i++) {
1450 gdp = ext3_get_group_desc(sb, i, NULL);
1453 desc_count += le16_to_cpu(gdp->bg_free_blocks_count);
1455 bitmap_bh = read_block_bitmap(sb, i);
1456 if (bitmap_bh == NULL)
1459 x = ext3_count_free(bitmap_bh, sb->s_blocksize);
1460 printk("group %d: stored = %d, counted = %lu\n",
1461 i, le16_to_cpu(gdp->bg_free_blocks_count), x);
1465 printk("ext3_count_free_blocks: stored = %u, computed = %lu, %lu\n",
1466 le32_to_cpu(es->s_free_blocks_count), desc_count, bitmap_count);
1467 return bitmap_count;
1471 for (i = 0; i < ngroups; i++) {
1472 gdp = ext3_get_group_desc(sb, i, NULL);
1475 desc_count += le16_to_cpu(gdp->bg_free_blocks_count);
1483 block_in_use(unsigned long block, struct super_block *sb, unsigned char *map)
1485 return ext3_test_bit ((block -
1486 le32_to_cpu(EXT3_SB(sb)->s_es->s_first_data_block)) %
1487 EXT3_BLOCKS_PER_GROUP(sb), map);
1490 static inline int test_root(int a, int b)
1499 static int ext3_group_sparse(int group)
1505 return (test_root(group, 7) || test_root(group, 5) ||
1506 test_root(group, 3));
1510 * ext3_bg_has_super - number of blocks used by the superblock in group
1511 * @sb: superblock for filesystem
1512 * @group: group number to check
1514 * Return the number of blocks used by the superblock (primary or backup)
1515 * in this group. Currently this will be only 0 or 1.
1517 int ext3_bg_has_super(struct super_block *sb, int group)
1519 if (EXT3_HAS_RO_COMPAT_FEATURE(sb,EXT3_FEATURE_RO_COMPAT_SPARSE_SUPER)&&
1520 !ext3_group_sparse(group))
1526 * ext3_bg_num_gdb - number of blocks used by the group table in group
1527 * @sb: superblock for filesystem
1528 * @group: group number to check
1530 * Return the number of blocks used by the group descriptor table
1531 * (primary or backup) in this group. In the future there may be a
1532 * different number of descriptor blocks in each group.
1534 unsigned long ext3_bg_num_gdb(struct super_block *sb, int group)
1536 if (EXT3_HAS_RO_COMPAT_FEATURE(sb,EXT3_FEATURE_RO_COMPAT_SPARSE_SUPER)&&
1537 !ext3_group_sparse(group))
1539 return EXT3_SB(sb)->s_gdb_count;