2 * Copyright 2000 by Hans Reiser, licensing governed by reiserfs/README
4 /* Reiserfs block (de)allocator, bitmap-based. */
6 #include <linux/time.h>
7 #include <linux/reiserfs_fs.h>
8 #include <linux/errno.h>
9 #include <linux/buffer_head.h>
10 #include <linux/kernel.h>
11 #include <linux/pagemap.h>
12 #include <linux/vmalloc.h>
13 #include <linux/reiserfs_fs_sb.h>
14 #include <linux/reiserfs_fs_i.h>
15 #include <linux/quotaops.h>
16 #include <linux/vs_dlimit.h>
18 #define PREALLOCATION_SIZE 9
20 /* different reiserfs block allocator options */
22 #define SB_ALLOC_OPTS(s) (REISERFS_SB(s)->s_alloc_options.bits)
24 #define _ALLOC_concentrating_formatted_nodes 0
25 #define _ALLOC_displacing_large_files 1
26 #define _ALLOC_displacing_new_packing_localities 2
27 #define _ALLOC_old_hashed_relocation 3
28 #define _ALLOC_new_hashed_relocation 4
29 #define _ALLOC_skip_busy 5
30 #define _ALLOC_displace_based_on_dirid 6
31 #define _ALLOC_hashed_formatted_nodes 7
32 #define _ALLOC_old_way 8
33 #define _ALLOC_hundredth_slices 9
34 #define _ALLOC_dirid_groups 10
35 #define _ALLOC_oid_groups 11
36 #define _ALLOC_packing_groups 12
38 #define concentrating_formatted_nodes(s) test_bit(_ALLOC_concentrating_formatted_nodes, &SB_ALLOC_OPTS(s))
39 #define displacing_large_files(s) test_bit(_ALLOC_displacing_large_files, &SB_ALLOC_OPTS(s))
40 #define displacing_new_packing_localities(s) test_bit(_ALLOC_displacing_new_packing_localities, &SB_ALLOC_OPTS(s))
42 #define SET_OPTION(optname) \
44 reiserfs_warning(s, "reiserfs: option \"%s\" is set", #optname); \
45 set_bit(_ALLOC_ ## optname , &SB_ALLOC_OPTS(s)); \
47 #define TEST_OPTION(optname, s) \
48 test_bit(_ALLOC_ ## optname , &SB_ALLOC_OPTS(s))
50 static inline void get_bit_address(struct super_block *s,
51 b_blocknr_t block, int *bmap_nr, int *offset)
53 /* It is in the bitmap block number equal to the block
54 * number divided by the number of bits in a block. */
55 *bmap_nr = block >> (s->s_blocksize_bits + 3);
56 /* Within that bitmap block it is located at bit offset *offset. */
57 *offset = block & ((s->s_blocksize << 3) - 1);
60 #ifdef CONFIG_REISERFS_CHECK
61 int is_reusable(struct super_block *s, b_blocknr_t block, int bit_value)
65 if (block == 0 || block >= SB_BLOCK_COUNT(s)) {
67 "vs-4010: is_reusable: block number is out of range %lu (%u)",
68 block, SB_BLOCK_COUNT(s));
72 get_bit_address(s, block, &bmap, &offset);
74 /* Old format filesystem? Unlikely, but the bitmaps are all up front so
75 * we need to account for it. */
76 if (unlikely(test_bit(REISERFS_OLD_FORMAT,
77 &(REISERFS_SB(s)->s_properties)))) {
78 b_blocknr_t bmap1 = REISERFS_SB(s)->s_sbh->b_blocknr + 1;
79 if (block >= bmap1 && block <= bmap1 + SB_BMAP_NR(s)) {
80 reiserfs_warning(s, "vs: 4019: is_reusable: "
81 "bitmap block %lu(%u) can't be freed or reused",
82 block, SB_BMAP_NR(s));
87 reiserfs_warning(s, "vs: 4020: is_reusable: "
88 "bitmap block %lu(%u) can't be freed or reused",
89 block, SB_BMAP_NR(s));
94 if (bmap >= SB_BMAP_NR(s)) {
96 "vs-4030: is_reusable: there is no so many bitmap blocks: "
97 "block=%lu, bitmap_nr=%d", block, bmap);
101 if (bit_value == 0 && block == SB_ROOT_BLOCK(s)) {
103 "vs-4050: is_reusable: this is root block (%u), "
104 "it must be busy", SB_ROOT_BLOCK(s));
110 #endif /* CONFIG_REISERFS_CHECK */
112 /* searches in journal structures for a given block number (bmap, off). If block
113 is found in reiserfs journal it suggests next free block candidate to test. */
114 static inline int is_block_in_journal(struct super_block *s, int bmap, int
119 if (reiserfs_in_journal(s, bmap, off, 1, &tmp)) {
120 if (tmp) { /* hint supplied */
122 PROC_INFO_INC(s, scan_bitmap.in_journal_hint);
124 (*next) = off + 1; /* inc offset to avoid looping. */
125 PROC_INFO_INC(s, scan_bitmap.in_journal_nohint);
127 PROC_INFO_INC(s, scan_bitmap.retry);
133 /* it searches for a window of zero bits with given minimum and maximum lengths in one bitmap
135 static int scan_bitmap_block(struct reiserfs_transaction_handle *th,
136 int bmap_n, int *beg, int boundary, int min,
139 struct super_block *s = th->t_super;
140 struct reiserfs_bitmap_info *bi = &SB_AP_BITMAP(s)[bmap_n];
141 struct buffer_head *bh;
145 BUG_ON(!th->t_trans_id);
147 RFALSE(bmap_n >= SB_BMAP_NR(s), "Bitmap %d is out of range (0..%d)",
148 bmap_n, SB_BMAP_NR(s) - 1);
149 PROC_INFO_INC(s, scan_bitmap.bmap);
150 /* this is unclear and lacks comments, explain how journal bitmaps
151 work here for the reader. Convey a sense of the design here. What
153 /* - I mean `a window of zero bits' as in description of this function - Zam. */
156 reiserfs_warning(s, "NULL bitmap info pointer for bitmap %d",
161 bh = reiserfs_read_bitmap_block(s, bmap_n);
167 if (bi->free_count < min) {
169 return 0; // No free blocks in this bitmap
172 /* search for a first zero bit -- beggining of a window */
173 *beg = reiserfs_find_next_zero_le_bit
174 ((unsigned long *)(bh->b_data), boundary, *beg);
176 if (*beg + min > boundary) { /* search for a zero bit fails or the rest of bitmap block
177 * cannot contain a zero window of minimum size */
182 if (unfm && is_block_in_journal(s, bmap_n, *beg, beg))
184 /* first zero bit found; we check next bits */
185 for (end = *beg + 1;; end++) {
186 if (end >= *beg + max || end >= boundary
187 || reiserfs_test_le_bit(end, bh->b_data)) {
191 /* finding the other end of zero bit window requires looking into journal structures (in
192 * case of searching for free blocks for unformatted nodes) */
193 if (unfm && is_block_in_journal(s, bmap_n, end, &next))
197 /* now (*beg) points to beginning of zero bits window,
198 * (end) points to one bit after the window end */
199 if (end - *beg >= min) { /* it seems we have found window of proper size */
201 reiserfs_prepare_for_journal(s, bh, 1);
202 /* try to set all blocks used checking are they still free */
203 for (i = *beg; i < end; i++) {
204 /* It seems that we should not check in journal again. */
205 if (reiserfs_test_and_set_le_bit
207 /* bit was set by another process
208 * while we slept in prepare_for_journal() */
209 PROC_INFO_INC(s, scan_bitmap.stolen);
210 if (i >= *beg + min) { /* we can continue with smaller set of allocated blocks,
211 * if length of this set is more or equal to `min' */
215 /* otherwise we clear all bit were set ... */
217 reiserfs_test_and_clear_le_bit
219 reiserfs_restore_prepared_buffer(s, bh);
221 /* ... and search again in current block from beginning */
225 bi->free_count -= (end - *beg);
226 journal_mark_dirty(th, s, bh);
229 /* free block count calculation */
230 reiserfs_prepare_for_journal(s, SB_BUFFER_WITH_SB(s),
232 PUT_SB_FREE_BLOCKS(s, SB_FREE_BLOCKS(s) - (end - *beg));
233 journal_mark_dirty(th, s, SB_BUFFER_WITH_SB(s));
242 static int bmap_hash_id(struct super_block *s, u32 id)
244 char *hash_in = NULL;
251 hash_in = (char *)(&id);
252 hash = keyed_hash(hash_in, 4);
253 bm = hash % SB_BMAP_NR(s);
257 /* this can only be true when SB_BMAP_NR = 1 */
258 if (bm >= SB_BMAP_NR(s))
264 * hashes the id and then returns > 0 if the block group for the
265 * corresponding hash is full
267 static inline int block_group_used(struct super_block *s, u32 id)
269 int bm = bmap_hash_id(s, id);
270 struct reiserfs_bitmap_info *info = &SB_AP_BITMAP(s)[bm];
272 /* If we don't have cached information on this bitmap block, we're
273 * going to have to load it later anyway. Loading it here allows us
274 * to make a better decision. This favors long-term performace gain
275 * with a better on-disk layout vs. a short term gain of skipping the
276 * read and potentially having a bad placement. */
277 if (info->first_zero_hint == 0) {
278 struct buffer_head *bh = reiserfs_read_bitmap_block(s, bm);
282 if (info->free_count > ((s->s_blocksize << 3) * 60 / 100)) {
289 * the packing is returned in disk byte order
291 __le32 reiserfs_choose_packing(struct inode * dir)
294 if (TEST_OPTION(packing_groups, dir->i_sb)) {
295 u32 parent_dir = le32_to_cpu(INODE_PKEY(dir)->k_dir_id);
297 * some versions of reiserfsck expect packing locality 1 to be
300 if (parent_dir == 1 || block_group_used(dir->i_sb, parent_dir))
301 packing = INODE_PKEY(dir)->k_objectid;
303 packing = INODE_PKEY(dir)->k_dir_id;
305 packing = INODE_PKEY(dir)->k_objectid;
309 /* Tries to find contiguous zero bit window (given size) in given region of
310 * bitmap and place new blocks there. Returns number of allocated blocks. */
311 static int scan_bitmap(struct reiserfs_transaction_handle *th,
312 b_blocknr_t * start, b_blocknr_t finish,
313 int min, int max, int unfm, unsigned long file_block)
315 int nr_allocated = 0;
316 struct super_block *s = th->t_super;
317 /* find every bm and bmap and bmap_nr in this file, and change them all to bitmap_blocknr
318 * - Hans, it is not a block number - Zam. */
322 int off_max = s->s_blocksize << 3;
324 BUG_ON(!th->t_trans_id);
326 PROC_INFO_INC(s, scan_bitmap.call);
327 if (SB_FREE_BLOCKS(s) <= 0)
328 return 0; // No point in looking for more free blocks
330 get_bit_address(s, *start, &bm, &off);
331 get_bit_address(s, finish, &end_bm, &end_off);
332 if (bm > SB_BMAP_NR(s))
334 if (end_bm > SB_BMAP_NR(s))
335 end_bm = SB_BMAP_NR(s);
337 /* When the bitmap is more than 10% free, anyone can allocate.
338 * When it's less than 10% free, only files that already use the
339 * bitmap are allowed. Once we pass 80% full, this restriction
342 * We do this so that files that grow later still have space close to
343 * their original allocation. This improves locality, and presumably
344 * performance as a result.
346 * This is only an allocation policy and does not make up for getting a
347 * bad hint. Decent hinting must be implemented for this to work well.
349 if (TEST_OPTION(skip_busy, s)
350 && SB_FREE_BLOCKS(s) > SB_BLOCK_COUNT(s) / 20) {
351 for (; bm < end_bm; bm++, off = 0) {
352 if ((off && (!unfm || (file_block != 0)))
353 || SB_AP_BITMAP(s)[bm].free_count >
354 (s->s_blocksize << 3) / 10)
356 scan_bitmap_block(th, bm, &off, off_max,
361 /* we know from above that start is a reasonable number */
362 get_bit_address(s, *start, &bm, &off);
365 for (; bm < end_bm; bm++, off = 0) {
367 scan_bitmap_block(th, bm, &off, off_max, min, max, unfm);
373 scan_bitmap_block(th, bm, &off, end_off + 1, min, max, unfm);
376 *start = bm * off_max + off;
381 static void _reiserfs_free_block(struct reiserfs_transaction_handle *th,
382 struct inode *inode, b_blocknr_t block,
385 struct super_block *s = th->t_super;
386 struct reiserfs_super_block *rs;
387 struct buffer_head *sbh, *bmbh;
388 struct reiserfs_bitmap_info *apbi;
391 BUG_ON(!th->t_trans_id);
393 PROC_INFO_INC(s, free_block);
395 rs = SB_DISK_SUPER_BLOCK(s);
396 sbh = SB_BUFFER_WITH_SB(s);
397 apbi = SB_AP_BITMAP(s);
399 get_bit_address(s, block, &nr, &offset);
401 if (nr >= sb_bmap_nr(rs)) {
402 reiserfs_warning(s, "vs-4075: reiserfs_free_block: "
403 "block %lu is out of range on %s",
404 block, reiserfs_bdevname(s));
408 bmbh = reiserfs_read_bitmap_block(s, nr);
412 reiserfs_prepare_for_journal(s, bmbh, 1);
414 /* clear bit for the given block in bit map */
415 if (!reiserfs_test_and_clear_le_bit(offset, bmbh->b_data)) {
416 reiserfs_warning(s, "vs-4080: reiserfs_free_block: "
417 "free_block (%s:%lu)[dev:blocknr]: bit already cleared",
418 reiserfs_bdevname(s), block);
420 apbi[nr].free_count++;
421 journal_mark_dirty(th, s, bmbh);
424 reiserfs_prepare_for_journal(s, sbh, 1);
425 /* update super block */
426 set_sb_free_blocks(rs, sb_free_blocks(rs) + 1);
428 journal_mark_dirty(th, s, sbh);
429 if (for_unformatted) {
430 DLIMIT_FREE_BLOCK(inode, 1);
431 DQUOT_FREE_BLOCK_NODIRTY(inode, 1);
435 void reiserfs_free_block(struct reiserfs_transaction_handle *th,
436 struct inode *inode, b_blocknr_t block,
439 struct super_block *s = th->t_super;
441 BUG_ON(!th->t_trans_id);
443 RFALSE(!s, "vs-4061: trying to free block on nonexistent device");
444 RFALSE(is_reusable(s, block, 1) == 0,
445 "vs-4071: can not free such block");
446 /* mark it before we clear it, just in case */
447 journal_mark_freed(th, s, block);
448 _reiserfs_free_block(th, inode, block, for_unformatted);
451 /* preallocated blocks don't need to be run through journal_mark_freed */
452 static void reiserfs_free_prealloc_block(struct reiserfs_transaction_handle *th,
453 struct inode *inode, b_blocknr_t block)
456 "vs-4060: trying to free block on nonexistent device");
457 RFALSE(is_reusable(th->t_super, block, 1) == 0,
458 "vs-4070: can not free such block");
459 BUG_ON(!th->t_trans_id);
460 _reiserfs_free_block(th, inode, block, 1);
463 static void __discard_prealloc(struct reiserfs_transaction_handle *th,
464 struct reiserfs_inode_info *ei)
466 unsigned long save = ei->i_prealloc_block;
468 struct inode *inode = &ei->vfs_inode;
469 BUG_ON(!th->t_trans_id);
470 #ifdef CONFIG_REISERFS_CHECK
471 if (ei->i_prealloc_count < 0)
472 reiserfs_warning(th->t_super,
473 "zam-4001:%s: inode has negative prealloc blocks count.",
476 while (ei->i_prealloc_count > 0) {
477 reiserfs_free_prealloc_block(th, inode, ei->i_prealloc_block);
478 ei->i_prealloc_block++;
479 ei->i_prealloc_count--;
483 reiserfs_update_sd(th, inode);
484 ei->i_prealloc_block = save;
485 list_del_init(&(ei->i_prealloc_list));
488 /* FIXME: It should be inline function */
489 void reiserfs_discard_prealloc(struct reiserfs_transaction_handle *th,
492 struct reiserfs_inode_info *ei = REISERFS_I(inode);
493 BUG_ON(!th->t_trans_id);
494 if (ei->i_prealloc_count)
495 __discard_prealloc(th, ei);
498 void reiserfs_discard_all_prealloc(struct reiserfs_transaction_handle *th)
500 struct list_head *plist = &SB_JOURNAL(th->t_super)->j_prealloc_list;
502 BUG_ON(!th->t_trans_id);
504 while (!list_empty(plist)) {
505 struct reiserfs_inode_info *ei;
506 ei = list_entry(plist->next, struct reiserfs_inode_info,
508 #ifdef CONFIG_REISERFS_CHECK
509 if (!ei->i_prealloc_count) {
510 reiserfs_warning(th->t_super,
511 "zam-4001:%s: inode is in prealloc list but has no preallocated blocks.",
515 __discard_prealloc(th, ei);
519 void reiserfs_init_alloc_options(struct super_block *s)
521 set_bit(_ALLOC_skip_busy, &SB_ALLOC_OPTS(s));
522 set_bit(_ALLOC_dirid_groups, &SB_ALLOC_OPTS(s));
523 set_bit(_ALLOC_packing_groups, &SB_ALLOC_OPTS(s));
526 /* block allocator related options are parsed here */
527 int reiserfs_parse_alloc_options(struct super_block *s, char *options)
529 char *this_char, *value;
531 REISERFS_SB(s)->s_alloc_options.bits = 0; /* clear default settings */
533 while ((this_char = strsep(&options, ":")) != NULL) {
534 if ((value = strchr(this_char, '=')) != NULL)
537 if (!strcmp(this_char, "concentrating_formatted_nodes")) {
539 SET_OPTION(concentrating_formatted_nodes);
541 && *value) ? simple_strtoul(value, &value,
543 if (temp <= 0 || temp > 100) {
544 REISERFS_SB(s)->s_alloc_options.border = 10;
546 REISERFS_SB(s)->s_alloc_options.border =
551 if (!strcmp(this_char, "displacing_large_files")) {
552 SET_OPTION(displacing_large_files);
553 REISERFS_SB(s)->s_alloc_options.large_file_size =
555 && *value) ? simple_strtoul(value, &value, 0) : 16;
558 if (!strcmp(this_char, "displacing_new_packing_localities")) {
559 SET_OPTION(displacing_new_packing_localities);
563 if (!strcmp(this_char, "old_hashed_relocation")) {
564 SET_OPTION(old_hashed_relocation);
568 if (!strcmp(this_char, "new_hashed_relocation")) {
569 SET_OPTION(new_hashed_relocation);
573 if (!strcmp(this_char, "dirid_groups")) {
574 SET_OPTION(dirid_groups);
577 if (!strcmp(this_char, "oid_groups")) {
578 SET_OPTION(oid_groups);
581 if (!strcmp(this_char, "packing_groups")) {
582 SET_OPTION(packing_groups);
585 if (!strcmp(this_char, "hashed_formatted_nodes")) {
586 SET_OPTION(hashed_formatted_nodes);
590 if (!strcmp(this_char, "skip_busy")) {
591 SET_OPTION(skip_busy);
595 if (!strcmp(this_char, "hundredth_slices")) {
596 SET_OPTION(hundredth_slices);
600 if (!strcmp(this_char, "old_way")) {
605 if (!strcmp(this_char, "displace_based_on_dirid")) {
606 SET_OPTION(displace_based_on_dirid);
610 if (!strcmp(this_char, "preallocmin")) {
611 REISERFS_SB(s)->s_alloc_options.preallocmin =
613 && *value) ? simple_strtoul(value, &value, 0) : 4;
617 if (!strcmp(this_char, "preallocsize")) {
618 REISERFS_SB(s)->s_alloc_options.preallocsize =
620 && *value) ? simple_strtoul(value, &value,
626 reiserfs_warning(s, "zam-4001: %s : unknown option - %s",
627 __FUNCTION__, this_char);
631 reiserfs_warning(s, "allocator options = [%08x]\n", SB_ALLOC_OPTS(s));
635 static inline void new_hashed_relocation(reiserfs_blocknr_hint_t * hint)
638 if (hint->formatted_node) {
639 hash_in = (char *)&hint->key.k_dir_id;
642 //hint->search_start = hint->beg;
643 hash_in = (char *)&hint->key.k_dir_id;
645 if (TEST_OPTION(displace_based_on_dirid, hint->th->t_super))
646 hash_in = (char *)(&INODE_PKEY(hint->inode)->k_dir_id);
649 (char *)(&INODE_PKEY(hint->inode)->k_objectid);
653 hint->beg + keyed_hash(hash_in, 4) % (hint->end - hint->beg);
657 * Relocation based on dirid, hashing them into a given bitmap block
658 * files. Formatted nodes are unaffected, a seperate policy covers them
660 static void dirid_groups(reiserfs_blocknr_hint_t * hint)
665 struct super_block *sb = hint->th->t_super;
667 dirid = le32_to_cpu(INODE_PKEY(hint->inode)->k_dir_id);
668 else if (hint->formatted_node)
669 dirid = hint->key.k_dir_id;
672 bm = bmap_hash_id(sb, dirid);
673 hash = bm * (sb->s_blocksize << 3);
674 /* give a portion of the block group to metadata */
676 hash += sb->s_blocksize / 2;
677 hint->search_start = hash;
682 * Relocation based on oid, hashing them into a given bitmap block
683 * files. Formatted nodes are unaffected, a seperate policy covers them
685 static void oid_groups(reiserfs_blocknr_hint_t * hint)
693 dirid = le32_to_cpu(INODE_PKEY(hint->inode)->k_dir_id);
695 /* keep the root dir and it's first set of subdirs close to
696 * the start of the disk
699 hash = (hint->inode->i_sb->s_blocksize << 3);
701 oid = le32_to_cpu(INODE_PKEY(hint->inode)->k_objectid);
702 bm = bmap_hash_id(hint->inode->i_sb, oid);
703 hash = bm * (hint->inode->i_sb->s_blocksize << 3);
705 hint->search_start = hash;
709 /* returns 1 if it finds an indirect item and gets valid hint info
710 * from it, otherwise 0
712 static int get_left_neighbor(reiserfs_blocknr_hint_t * hint)
714 struct treepath *path;
715 struct buffer_head *bh;
716 struct item_head *ih;
721 if (!hint->path) /* reiserfs code can call this function w/o pointer to path
722 * structure supplied; then we rely on supplied search_start */
726 bh = get_last_bh(path);
727 RFALSE(!bh, "green-4002: Illegal path specified to get_left_neighbor");
729 pos_in_item = path->pos_in_item;
730 item = get_item(path);
732 hint->search_start = bh->b_blocknr;
734 if (!hint->formatted_node && is_indirect_le_ih(ih)) {
735 /* for indirect item: go to left and look for the first non-hole entry
736 in the indirect item */
737 if (pos_in_item == I_UNFM_NUM(ih))
739 // pos_in_item = I_UNFM_NUM (ih) - 1;
740 while (pos_in_item >= 0) {
741 int t = get_block_num(item, pos_in_item);
743 hint->search_start = t;
751 /* does result value fit into specified region? */
755 /* should be, if formatted node, then try to put on first part of the device
756 specified as number of percent with mount option device, else try to put
757 on last of device. This is not to say it is good code to do so,
758 but the effect should be measured. */
759 static inline void set_border_in_hint(struct super_block *s,
760 reiserfs_blocknr_hint_t * hint)
763 SB_BLOCK_COUNT(s) / REISERFS_SB(s)->s_alloc_options.border;
765 if (hint->formatted_node)
766 hint->end = border - 1;
771 static inline void displace_large_file(reiserfs_blocknr_hint_t * hint)
773 if (TEST_OPTION(displace_based_on_dirid, hint->th->t_super))
776 keyed_hash((char *)(&INODE_PKEY(hint->inode)->k_dir_id),
777 4) % (hint->end - hint->beg);
781 keyed_hash((char *)(&INODE_PKEY(hint->inode)->k_objectid),
782 4) % (hint->end - hint->beg);
785 static inline void hash_formatted_node(reiserfs_blocknr_hint_t * hint)
790 hash_in = (char *)&hint->key.k_dir_id;
791 else if (TEST_OPTION(displace_based_on_dirid, hint->th->t_super))
792 hash_in = (char *)(&INODE_PKEY(hint->inode)->k_dir_id);
794 hash_in = (char *)(&INODE_PKEY(hint->inode)->k_objectid);
797 hint->beg + keyed_hash(hash_in, 4) % (hint->end - hint->beg);
801 this_blocknr_allocation_would_make_it_a_large_file(reiserfs_blocknr_hint_t *
804 return hint->block ==
805 REISERFS_SB(hint->th->t_super)->s_alloc_options.large_file_size;
808 #ifdef DISPLACE_NEW_PACKING_LOCALITIES
809 static inline void displace_new_packing_locality(reiserfs_blocknr_hint_t * hint)
811 struct in_core_key *key = &hint->key;
813 hint->th->displace_new_blocks = 0;
815 hint->beg + keyed_hash((char *)(&key->k_objectid),
816 4) % (hint->end - hint->beg);
820 static inline int old_hashed_relocation(reiserfs_blocknr_hint_t * hint)
825 if (hint->formatted_node || hint->inode == NULL) {
829 hash_in = le32_to_cpu((INODE_PKEY(hint->inode))->k_dir_id);
831 hint->beg + (u32) keyed_hash(((char *)(&hash_in)),
832 4) % (hint->end - hint->beg - 1);
833 if (border > hint->search_start)
834 hint->search_start = border;
839 static inline int old_way(reiserfs_blocknr_hint_t * hint)
843 if (hint->formatted_node || hint->inode == NULL) {
849 le32_to_cpu(INODE_PKEY(hint->inode)->k_dir_id) % (hint->end -
851 if (border > hint->search_start)
852 hint->search_start = border;
857 static inline void hundredth_slices(reiserfs_blocknr_hint_t * hint)
859 struct in_core_key *key = &hint->key;
860 b_blocknr_t slice_start;
863 (keyed_hash((char *)(&key->k_dir_id), 4) % 100) * (hint->end / 100);
864 if (slice_start > hint->search_start
865 || slice_start + (hint->end / 100) <= hint->search_start) {
866 hint->search_start = slice_start;
870 static void determine_search_start(reiserfs_blocknr_hint_t * hint,
873 struct super_block *s = hint->th->t_super;
877 hint->end = SB_BLOCK_COUNT(s) - 1;
879 /* This is former border algorithm. Now with tunable border offset */
880 if (concentrating_formatted_nodes(s))
881 set_border_in_hint(s, hint);
883 #ifdef DISPLACE_NEW_PACKING_LOCALITIES
884 /* whenever we create a new directory, we displace it. At first we will
885 hash for location, later we might look for a moderately empty place for
887 if (displacing_new_packing_localities(s)
888 && hint->th->displace_new_blocks) {
889 displace_new_packing_locality(hint);
891 /* we do not continue determine_search_start,
892 * if new packing locality is being displaced */
897 /* all persons should feel encouraged to add more special cases here and
900 if (displacing_large_files(s) && !hint->formatted_node
901 && this_blocknr_allocation_would_make_it_a_large_file(hint)) {
902 displace_large_file(hint);
906 /* if none of our special cases is relevant, use the left neighbor in the
907 tree order of the new node we are allocating for */
908 if (hint->formatted_node && TEST_OPTION(hashed_formatted_nodes, s)) {
909 hash_formatted_node(hint);
913 unfm_hint = get_left_neighbor(hint);
915 /* Mimic old block allocator behaviour, that is if VFS allowed for preallocation,
916 new blocks are displaced based on directory ID. Also, if suggested search_start
917 is less than last preallocated block, we start searching from it, assuming that
918 HDD dataflow is faster in forward direction */
919 if (TEST_OPTION(old_way, s)) {
920 if (!hint->formatted_node) {
921 if (!reiserfs_hashed_relocation(s))
923 else if (!reiserfs_no_unhashed_relocation(s))
924 old_hashed_relocation(hint);
927 && hint->search_start <
928 REISERFS_I(hint->inode)->i_prealloc_block)
930 REISERFS_I(hint->inode)->i_prealloc_block;
935 /* This is an approach proposed by Hans */
936 if (TEST_OPTION(hundredth_slices, s)
937 && !(displacing_large_files(s) && !hint->formatted_node)) {
938 hundredth_slices(hint);
942 /* old_hashed_relocation only works on unformatted */
943 if (!unfm_hint && !hint->formatted_node &&
944 TEST_OPTION(old_hashed_relocation, s)) {
945 old_hashed_relocation(hint);
947 /* new_hashed_relocation works with both formatted/unformatted nodes */
948 if ((!unfm_hint || hint->formatted_node) &&
949 TEST_OPTION(new_hashed_relocation, s)) {
950 new_hashed_relocation(hint);
952 /* dirid grouping works only on unformatted nodes */
953 if (!unfm_hint && !hint->formatted_node && TEST_OPTION(dirid_groups, s)) {
956 #ifdef DISPLACE_NEW_PACKING_LOCALITIES
957 if (hint->formatted_node && TEST_OPTION(dirid_groups, s)) {
962 /* oid grouping works only on unformatted nodes */
963 if (!unfm_hint && !hint->formatted_node && TEST_OPTION(oid_groups, s)) {
969 static int determine_prealloc_size(reiserfs_blocknr_hint_t * hint)
971 /* make minimum size a mount option and benchmark both ways */
972 /* we preallocate blocks only for regular files, specific size */
973 /* benchmark preallocating always and see what happens */
975 hint->prealloc_size = 0;
977 if (!hint->formatted_node && hint->preallocate) {
978 if (S_ISREG(hint->inode->i_mode)
979 && hint->inode->i_size >=
980 REISERFS_SB(hint->th->t_super)->s_alloc_options.
981 preallocmin * hint->inode->i_sb->s_blocksize)
982 hint->prealloc_size =
983 REISERFS_SB(hint->th->t_super)->s_alloc_options.
989 /* XXX I know it could be merged with upper-level function;
990 but may be result function would be too complex. */
991 static inline int allocate_without_wrapping_disk(reiserfs_blocknr_hint_t * hint,
992 b_blocknr_t * new_blocknrs,
994 b_blocknr_t finish, int min,
998 int rest = amount_needed;
1001 while (rest > 0 && start <= finish) {
1002 nr_allocated = scan_bitmap(hint->th, &start, finish, min,
1003 rest + prealloc_size,
1004 !hint->formatted_node, hint->block);
1006 if (nr_allocated == 0) /* no new blocks allocated, return */
1009 /* fill free_blocknrs array first */
1010 while (rest > 0 && nr_allocated > 0) {
1011 *new_blocknrs++ = start++;
1016 /* do we have something to fill prealloc. array also ? */
1017 if (nr_allocated > 0) {
1018 /* it means prealloc_size was greater that 0 and we do preallocation */
1019 list_add(&REISERFS_I(hint->inode)->i_prealloc_list,
1020 &SB_JOURNAL(hint->th->t_super)->
1022 REISERFS_I(hint->inode)->i_prealloc_block = start;
1023 REISERFS_I(hint->inode)->i_prealloc_count =
1029 return (amount_needed - rest);
1032 static inline int blocknrs_and_prealloc_arrays_from_search_start
1033 (reiserfs_blocknr_hint_t * hint, b_blocknr_t * new_blocknrs,
1034 int amount_needed) {
1035 struct super_block *s = hint->th->t_super;
1036 b_blocknr_t start = hint->search_start;
1037 b_blocknr_t finish = SB_BLOCK_COUNT(s) - 1;
1039 int nr_allocated = 0;
1042 determine_prealloc_size(hint);
1043 if (!hint->formatted_node) {
1045 #ifdef REISERQUOTA_DEBUG
1046 reiserfs_debug(s, REISERFS_DEBUG_CODE,
1047 "reiserquota: allocating %d blocks id=%u",
1048 amount_needed, hint->inode->i_uid);
1050 quota_ret = DQUOT_ALLOC_BLOCK_NODIRTY(hint->inode,
1053 return QUOTA_EXCEEDED;
1054 if (DLIMIT_ALLOC_BLOCK(hint->inode, amount_needed)) {
1055 DQUOT_FREE_BLOCK_NODIRTY(hint->inode,
1057 return NO_DISK_SPACE;
1060 if (hint->preallocate && hint->prealloc_size) {
1061 #ifdef REISERQUOTA_DEBUG
1062 reiserfs_debug(s, REISERFS_DEBUG_CODE,
1063 "reiserquota: allocating (prealloc) %d blocks id=%u",
1064 hint->prealloc_size, hint->inode->i_uid);
1066 quota_ret = DQUOT_PREALLOC_BLOCK_NODIRTY(hint->inode,
1067 hint->prealloc_size);
1069 DLIMIT_ALLOC_BLOCK(hint->inode, hint->prealloc_size)) {
1070 DQUOT_FREE_BLOCK_NODIRTY(hint->inode,
1071 hint->prealloc_size);
1075 hint->preallocate = hint->prealloc_size = 0;
1077 /* for unformatted nodes, force large allocations */
1082 case 0: /* Search from hint->search_start to end of disk */
1083 start = hint->search_start;
1084 finish = SB_BLOCK_COUNT(s) - 1;
1086 case 1: /* Search from hint->beg to hint->search_start */
1088 finish = hint->search_start;
1090 case 2: /* Last chance: Search from 0 to hint->beg */
1094 default: /* We've tried searching everywhere, not enough space */
1095 /* Free the blocks */
1096 if (!hint->formatted_node) {
1097 #ifdef REISERQUOTA_DEBUG
1098 reiserfs_debug(s, REISERFS_DEBUG_CODE,
1099 "reiserquota: freeing (nospace) %d blocks id=%u",
1101 hint->prealloc_size -
1103 hint->inode->i_uid);
1105 /* Free not allocated blocks */
1106 blocks = amount_needed + hint->prealloc_size - nr_allocated;
1107 DLIMIT_FREE_BLOCK(hint->inode, blocks);
1108 DQUOT_FREE_BLOCK_NODIRTY(hint->inode, blocks);
1110 while (nr_allocated--)
1111 reiserfs_free_block(hint->th, hint->inode,
1112 new_blocknrs[nr_allocated],
1113 !hint->formatted_node);
1115 return NO_DISK_SPACE;
1117 } while ((nr_allocated += allocate_without_wrapping_disk(hint,
1127 if (!hint->formatted_node &&
1128 amount_needed + hint->prealloc_size >
1129 nr_allocated + REISERFS_I(hint->inode)->i_prealloc_count) {
1130 /* Some of preallocation blocks were not allocated */
1131 #ifdef REISERQUOTA_DEBUG
1132 reiserfs_debug(s, REISERFS_DEBUG_CODE,
1133 "reiserquota: freeing (failed prealloc) %d blocks id=%u",
1134 amount_needed + hint->prealloc_size -
1136 REISERFS_I(hint->inode)->i_prealloc_count,
1137 hint->inode->i_uid);
1139 blocks = amount_needed + hint->prealloc_size - nr_allocated -
1140 REISERFS_I(hint->inode)->i_prealloc_count;
1141 DLIMIT_FREE_BLOCK(hint->inode, blocks);
1142 DQUOT_FREE_BLOCK_NODIRTY(hint->inode, blocks);
1148 /* grab new blocknrs from preallocated list */
1149 /* return amount still needed after using them */
1150 static int use_preallocated_list_if_available(reiserfs_blocknr_hint_t * hint,
1151 b_blocknr_t * new_blocknrs,
1154 struct inode *inode = hint->inode;
1156 if (REISERFS_I(inode)->i_prealloc_count > 0) {
1157 while (amount_needed) {
1159 *new_blocknrs++ = REISERFS_I(inode)->i_prealloc_block++;
1160 REISERFS_I(inode)->i_prealloc_count--;
1164 if (REISERFS_I(inode)->i_prealloc_count <= 0) {
1165 list_del(&REISERFS_I(inode)->i_prealloc_list);
1170 /* return amount still needed after using preallocated blocks */
1171 return amount_needed;
1174 int reiserfs_allocate_blocknrs(reiserfs_blocknr_hint_t * hint, b_blocknr_t * new_blocknrs, int amount_needed, int reserved_by_us /* Amount of blocks we have
1175 already reserved */ )
1177 int initial_amount_needed = amount_needed;
1179 struct super_block *s = hint->th->t_super;
1181 /* Check if there is enough space, taking into account reserved space */
1182 if (SB_FREE_BLOCKS(s) - REISERFS_SB(s)->reserved_blocks <
1183 amount_needed - reserved_by_us)
1184 return NO_DISK_SPACE;
1185 /* should this be if !hint->inode && hint->preallocate? */
1186 /* do you mean hint->formatted_node can be removed ? - Zam */
1187 /* hint->formatted_node cannot be removed because we try to access
1188 inode information here, and there is often no inode assotiated with
1189 metadata allocations - green */
1191 if (!hint->formatted_node && hint->preallocate) {
1192 amount_needed = use_preallocated_list_if_available
1193 (hint, new_blocknrs, amount_needed);
1194 if (amount_needed == 0) /* all blocknrs we need we got from
1197 new_blocknrs += (initial_amount_needed - amount_needed);
1200 /* find search start and save it in hint structure */
1201 determine_search_start(hint, amount_needed);
1202 if (hint->search_start >= SB_BLOCK_COUNT(s))
1203 hint->search_start = SB_BLOCK_COUNT(s) - 1;
1205 /* allocation itself; fill new_blocknrs and preallocation arrays */
1206 ret = blocknrs_and_prealloc_arrays_from_search_start
1207 (hint, new_blocknrs, amount_needed);
1209 /* we used prealloc. list to fill (partially) new_blocknrs array. If final allocation fails we
1210 * need to return blocks back to prealloc. list or just free them. -- Zam (I chose second
1213 if (ret != CARRY_ON) {
1214 while (amount_needed++ < initial_amount_needed) {
1215 reiserfs_free_block(hint->th, hint->inode,
1216 *(--new_blocknrs), 1);
1222 /* These 2 functions are here to provide blocks reservation to the rest of kernel */
1223 /* Reserve @blocks amount of blocks in fs pointed by @sb. Caller must make sure
1224 there are actually this much blocks on the FS available */
1225 void reiserfs_claim_blocks_to_be_allocated(struct super_block *sb, /* super block of
1229 int blocks /* How much to reserve */
1233 /* Fast case, if reservation is zero - exit immediately. */
1237 spin_lock(&REISERFS_SB(sb)->bitmap_lock);
1238 REISERFS_SB(sb)->reserved_blocks += blocks;
1239 spin_unlock(&REISERFS_SB(sb)->bitmap_lock);
1242 /* Unreserve @blocks amount of blocks in fs pointed by @sb */
1243 void reiserfs_release_claimed_blocks(struct super_block *sb, /* super block of
1247 int blocks /* How much to unreserve */
1251 /* Fast case, if unreservation is zero - exit immediately. */
1255 spin_lock(&REISERFS_SB(sb)->bitmap_lock);
1256 REISERFS_SB(sb)->reserved_blocks -= blocks;
1257 spin_unlock(&REISERFS_SB(sb)->bitmap_lock);
1258 RFALSE(REISERFS_SB(sb)->reserved_blocks < 0,
1259 "amount of blocks reserved became zero?");
1262 /* This function estimates how much pages we will be able to write to FS
1263 used for reiserfs_file_write() purposes for now. */
1264 int reiserfs_can_fit_pages(struct super_block *sb /* superblock of filesystem
1265 to estimate space */ )
1269 spin_lock(&REISERFS_SB(sb)->bitmap_lock);
1271 (SB_FREE_BLOCKS(sb) -
1272 REISERFS_SB(sb)->reserved_blocks) >> (PAGE_CACHE_SHIFT -
1273 sb->s_blocksize_bits);
1274 spin_unlock(&REISERFS_SB(sb)->bitmap_lock);
1276 return space > 0 ? space : 0;
1279 void reiserfs_cache_bitmap_metadata(struct super_block *sb,
1280 struct buffer_head *bh,
1281 struct reiserfs_bitmap_info *info)
1283 unsigned long *cur = (unsigned long *)(bh->b_data + bh->b_size);
1285 info->first_zero_hint = 1 << (sb->s_blocksize_bits + 3);
1287 while (--cur >= (unsigned long *)bh->b_data) {
1288 int base = ((char *)cur - bh->b_data) << 3;
1290 /* 0 and ~0 are special, we can optimize for them */
1292 info->first_zero_hint = base;
1293 info->free_count += BITS_PER_LONG;
1294 } else if (*cur != ~0L) { /* A mix, investigate */
1296 for (b = BITS_PER_LONG - 1; b >= 0; b--) {
1297 if (!reiserfs_test_le_bit(b, cur)) {
1298 info->first_zero_hint = base + b;
1304 /* The first bit must ALWAYS be 1 */
1305 BUG_ON(info->first_zero_hint == 0);
1308 struct buffer_head *reiserfs_read_bitmap_block(struct super_block *sb,
1309 unsigned int bitmap)
1311 b_blocknr_t block = (sb->s_blocksize << 3) * bitmap;
1312 struct reiserfs_bitmap_info *info = SB_AP_BITMAP(sb) + bitmap;
1313 struct buffer_head *bh;
1315 /* Way old format filesystems had the bitmaps packed up front.
1316 * I doubt there are any of these left, but just in case... */
1317 if (unlikely(test_bit(REISERFS_OLD_FORMAT,
1318 &(REISERFS_SB(sb)->s_properties))))
1319 block = REISERFS_SB(sb)->s_sbh->b_blocknr + 1 + bitmap;
1320 else if (bitmap == 0)
1321 block = (REISERFS_DISK_OFFSET_IN_BYTES >> sb->s_blocksize_bits) + 1;
1323 bh = sb_bread(sb, block);
1325 reiserfs_warning(sb, "sh-2029: %s: bitmap block (#%u) "
1326 "reading failed", __FUNCTION__, block);
1328 if (buffer_locked(bh)) {
1329 PROC_INFO_INC(sb, scan_bitmap.wait);
1330 __wait_on_buffer(bh);
1332 BUG_ON(!buffer_uptodate(bh));
1333 BUG_ON(atomic_read(&bh->b_count) == 0);
1335 if (info->first_zero_hint == 0)
1336 reiserfs_cache_bitmap_metadata(sb, bh, info);
1342 int reiserfs_init_bitmap_cache(struct super_block *sb)
1344 struct reiserfs_bitmap_info *bitmap;
1346 bitmap = vmalloc(sizeof (*bitmap) * SB_BMAP_NR(sb));
1350 memset(bitmap, 0, sizeof (*bitmap) * SB_BMAP_NR(sb));
1352 SB_AP_BITMAP(sb) = bitmap;
1357 void reiserfs_free_bitmap_cache(struct super_block *sb)
1359 if (SB_AP_BITMAP(sb)) {
1360 vfree(SB_AP_BITMAP(sb));
1361 SB_AP_BITMAP(sb) = NULL;