2 * linux/fs/ufs/balloc.c
5 * Daniel Pirkl <daniel.pirkl@email.cz>
6 * Charles University, Faculty of Mathematics and Physics
10 #include <linux/ufs_fs.h>
11 #include <linux/stat.h>
12 #include <linux/time.h>
13 #include <linux/string.h>
14 #include <linux/quotaops.h>
15 #include <linux/buffer_head.h>
16 #include <linux/sched.h>
17 #include <asm/bitops.h>
18 #include <asm/byteorder.h>
23 #undef UFS_BALLOC_DEBUG
25 #ifdef UFS_BALLOC_DEBUG
26 #define UFSD(x) printk("(%s, %d), %s:", __FILE__, __LINE__, __FUNCTION__); printk x;
31 unsigned ufs_add_fragments (struct inode *, unsigned, unsigned, unsigned, int *);
32 unsigned ufs_alloc_fragments (struct inode *, unsigned, unsigned, unsigned, int *);
33 unsigned ufs_alloccg_block (struct inode *, struct ufs_cg_private_info *, unsigned, int *);
34 unsigned ufs_bitmap_search (struct super_block *, struct ufs_cg_private_info *, unsigned, unsigned);
35 static unsigned char ufs_fragtable_8fpb[], ufs_fragtable_other[];
36 void ufs_clusteracct(struct super_block *, struct ufs_cg_private_info *, unsigned, int);
39 * Free 'count' fragments from fragment number 'fragment'
41 void ufs_free_fragments (struct inode * inode, unsigned fragment, unsigned count) {
42 struct super_block * sb;
43 struct ufs_sb_private_info * uspi;
44 struct ufs_super_block_first * usb1;
45 struct ufs_cg_private_info * ucpi;
46 struct ufs_cylinder_group * ucg;
47 unsigned cgno, bit, end_bit, bbase, blkmap, i, blkno, cylno;
50 uspi = UFS_SB(sb)->s_uspi;
51 usb1 = ubh_get_usb_first(USPI_UBH);
53 UFSD(("ENTER, fragment %u, count %u\n", fragment, count))
55 if (ufs_fragnum(fragment) + count > uspi->s_fpg)
56 ufs_error (sb, "ufs_free_fragments", "internal error");
60 cgno = ufs_dtog(fragment);
61 bit = ufs_dtogd(fragment);
62 if (cgno >= uspi->s_ncg) {
63 ufs_panic (sb, "ufs_free_fragments", "freeing blocks are outside device");
67 ucpi = ufs_load_cylinder (sb, cgno);
70 ucg = ubh_get_ucg (UCPI_UBH);
71 if (!ufs_cg_chkmagic(sb, ucg)) {
72 ufs_panic (sb, "ufs_free_fragments", "internal error, bad magic number on cg %u", cgno);
76 end_bit = bit + count;
77 bbase = ufs_blknum (bit);
78 blkmap = ubh_blkmap (UCPI_UBH, ucpi->c_freeoff, bbase);
79 ufs_fragacct (sb, blkmap, ucg->cg_frsum, -1);
80 for (i = bit; i < end_bit; i++) {
81 if (ubh_isclr (UCPI_UBH, ucpi->c_freeoff, i))
82 ubh_setbit (UCPI_UBH, ucpi->c_freeoff, i);
83 else ufs_error (sb, "ufs_free_fragments",
84 "bit already cleared for fragment %u", i);
87 DQUOT_FREE_BLOCK (inode, count);
90 fs32_add(sb, &ucg->cg_cs.cs_nffree, count);
91 fs32_add(sb, &usb1->fs_cstotal.cs_nffree, count);
92 fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, count);
93 blkmap = ubh_blkmap (UCPI_UBH, ucpi->c_freeoff, bbase);
94 ufs_fragacct(sb, blkmap, ucg->cg_frsum, 1);
97 * Trying to reassemble free fragments into block
99 blkno = ufs_fragstoblks (bbase);
100 if (ubh_isblockset(UCPI_UBH, ucpi->c_freeoff, blkno)) {
101 fs32_sub(sb, &ucg->cg_cs.cs_nffree, uspi->s_fpb);
102 fs32_sub(sb, &usb1->fs_cstotal.cs_nffree, uspi->s_fpb);
103 fs32_sub(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, uspi->s_fpb);
104 if ((UFS_SB(sb)->s_flags & UFS_CG_MASK) == UFS_CG_44BSD)
105 ufs_clusteracct (sb, ucpi, blkno, 1);
106 fs32_add(sb, &ucg->cg_cs.cs_nbfree, 1);
107 fs32_add(sb, &usb1->fs_cstotal.cs_nbfree, 1);
108 fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nbfree, 1);
109 cylno = ufs_cbtocylno (bbase);
110 fs16_add(sb, &ubh_cg_blks(ucpi, cylno, ufs_cbtorpos(bbase)), 1);
111 fs32_add(sb, &ubh_cg_blktot(ucpi, cylno), 1);
114 ubh_mark_buffer_dirty (USPI_UBH);
115 ubh_mark_buffer_dirty (UCPI_UBH);
116 if (sb->s_flags & MS_SYNCHRONOUS) {
117 ubh_wait_on_buffer (UCPI_UBH);
118 ubh_ll_rw_block (WRITE, 1, (struct ufs_buffer_head **)&ucpi);
119 ubh_wait_on_buffer (UCPI_UBH);
129 UFSD(("EXIT (FAILED)\n"))
134 * Free 'count' fragments from fragment number 'fragment' (free whole blocks)
136 void ufs_free_blocks (struct inode * inode, unsigned fragment, unsigned count) {
137 struct super_block * sb;
138 struct ufs_sb_private_info * uspi;
139 struct ufs_super_block_first * usb1;
140 struct ufs_cg_private_info * ucpi;
141 struct ufs_cylinder_group * ucg;
142 unsigned overflow, cgno, bit, end_bit, blkno, i, cylno;
145 uspi = UFS_SB(sb)->s_uspi;
146 usb1 = ubh_get_usb_first(USPI_UBH);
148 UFSD(("ENTER, fragment %u, count %u\n", fragment, count))
150 if ((fragment & uspi->s_fpbmask) || (count & uspi->s_fpbmask)) {
151 ufs_error (sb, "ufs_free_blocks", "internal error, "
152 "fragment %u, count %u\n", fragment, count);
160 cgno = ufs_dtog (fragment);
161 bit = ufs_dtogd (fragment);
162 if (cgno >= uspi->s_ncg) {
163 ufs_panic (sb, "ufs_free_blocks", "freeing blocks are outside device");
166 end_bit = bit + count;
167 if (end_bit > uspi->s_fpg) {
168 overflow = bit + count - uspi->s_fpg;
173 ucpi = ufs_load_cylinder (sb, cgno);
176 ucg = ubh_get_ucg (UCPI_UBH);
177 if (!ufs_cg_chkmagic(sb, ucg)) {
178 ufs_panic (sb, "ufs_free_blocks", "internal error, bad magic number on cg %u", cgno);
182 for (i = bit; i < end_bit; i += uspi->s_fpb) {
183 blkno = ufs_fragstoblks(i);
184 if (ubh_isblockset(UCPI_UBH, ucpi->c_freeoff, blkno)) {
185 ufs_error(sb, "ufs_free_blocks", "freeing free fragment");
187 ubh_setblock(UCPI_UBH, ucpi->c_freeoff, blkno);
188 if ((UFS_SB(sb)->s_flags & UFS_CG_MASK) == UFS_CG_44BSD)
189 ufs_clusteracct (sb, ucpi, blkno, 1);
190 DQUOT_FREE_BLOCK(inode, uspi->s_fpb);
192 fs32_add(sb, &ucg->cg_cs.cs_nbfree, 1);
193 fs32_add(sb, &usb1->fs_cstotal.cs_nbfree, 1);
194 fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nbfree, 1);
195 cylno = ufs_cbtocylno(i);
196 fs16_add(sb, &ubh_cg_blks(ucpi, cylno, ufs_cbtorpos(i)), 1);
197 fs32_add(sb, &ubh_cg_blktot(ucpi, cylno), 1);
200 ubh_mark_buffer_dirty (USPI_UBH);
201 ubh_mark_buffer_dirty (UCPI_UBH);
202 if (sb->s_flags & MS_SYNCHRONOUS) {
203 ubh_wait_on_buffer (UCPI_UBH);
204 ubh_ll_rw_block (WRITE, 1, (struct ufs_buffer_head **)&ucpi);
205 ubh_wait_on_buffer (UCPI_UBH);
221 UFSD(("EXIT (FAILED)\n"))
227 #define NULLIFY_FRAGMENTS \
228 for (i = oldcount; i < newcount; i++) { \
229 bh = sb_getblk(sb, result + i); \
230 memset (bh->b_data, 0, sb->s_blocksize); \
231 set_buffer_uptodate(bh); \
232 mark_buffer_dirty (bh); \
233 if (IS_SYNC(inode)) \
234 sync_dirty_buffer(bh); \
238 unsigned ufs_new_fragments (struct inode * inode, u32 * p, unsigned fragment,
239 unsigned goal, unsigned count, int * err )
241 struct super_block * sb;
242 struct ufs_sb_private_info * uspi;
243 struct ufs_super_block_first * usb1;
244 struct buffer_head * bh;
245 unsigned cgno, oldcount, newcount, tmp, request, i, result;
247 UFSD(("ENTER, ino %lu, fragment %u, goal %u, count %u\n", inode->i_ino, fragment, goal, count))
250 uspi = UFS_SB(sb)->s_uspi;
251 usb1 = ubh_get_usb_first(USPI_UBH);
256 tmp = fs32_to_cpu(sb, *p);
257 if (count + ufs_fragnum(fragment) > uspi->s_fpb) {
258 ufs_warning (sb, "ufs_new_fragments", "internal warning"
259 " fragment %u, count %u", fragment, count);
260 count = uspi->s_fpb - ufs_fragnum(fragment);
262 oldcount = ufs_fragnum (fragment);
263 newcount = oldcount + count;
266 * Somebody else has just allocated our fragments
270 ufs_error (sb, "ufs_new_fragments", "internal error, "
271 "fragment %u, tmp %u\n", fragment, tmp);
275 if (fragment < UFS_I(inode)->i_lastfrag) {
276 UFSD(("EXIT (ALREADY ALLOCATED)\n"))
283 UFSD(("EXIT (ALREADY ALLOCATED)\n"))
290 * There is not enough space for user on the device
292 if (!capable(CAP_SYS_RESOURCE) && ufs_freespace(usb1, UFS_MINFREE) <= 0) {
294 UFSD(("EXIT (FAILED)\n"))
298 if (goal >= uspi->s_size)
301 cgno = ufs_inotocg (inode->i_ino);
303 cgno = ufs_dtog (goal);
306 * allocate new fragment
309 result = ufs_alloc_fragments (inode, cgno, goal, count, err);
311 *p = cpu_to_fs32(sb, result);
313 inode->i_blocks += count << uspi->s_nspfshift;
314 UFS_I(inode)->i_lastfrag = max_t(u32, UFS_I(inode)->i_lastfrag, fragment + count);
318 UFSD(("EXIT, result %u\n", result))
325 result = ufs_add_fragments (inode, tmp, oldcount, newcount, err);
328 inode->i_blocks += count << uspi->s_nspfshift;
329 UFS_I(inode)->i_lastfrag = max_t(u32, UFS_I(inode)->i_lastfrag, fragment + count);
332 UFSD(("EXIT, result %u\n", result))
337 * allocate new block and move data
339 switch (fs32_to_cpu(sb, usb1->fs_optim)) {
342 if (uspi->s_minfree < 5 || fs32_to_cpu(sb, usb1->fs_cstotal.cs_nffree)
343 > uspi->s_dsize * uspi->s_minfree / (2 * 100) )
345 usb1->fs_optim = cpu_to_fs32(sb, UFS_OPTTIME);
348 usb1->fs_optim = cpu_to_fs32(sb, UFS_OPTTIME);
351 request = uspi->s_fpb;
352 if (fs32_to_cpu(sb, usb1->fs_cstotal.cs_nffree) < uspi->s_dsize *
353 (uspi->s_minfree - 2) / 100)
355 usb1->fs_optim = cpu_to_fs32(sb, UFS_OPTTIME);
358 result = ufs_alloc_fragments (inode, cgno, goal, request, err);
360 for (i = 0; i < oldcount; i++) {
361 bh = sb_bread(sb, tmp + i);
364 clear_buffer_dirty(bh);
365 bh->b_blocknr = result + i;
366 mark_buffer_dirty (bh);
368 sync_dirty_buffer(bh);
373 printk(KERN_ERR "ufs_new_fragments: bread fail\n");
378 *p = cpu_to_fs32(sb, result);
380 inode->i_blocks += count << uspi->s_nspfshift;
381 UFS_I(inode)->i_lastfrag = max_t(u32, UFS_I(inode)->i_lastfrag, fragment + count);
384 if (newcount < request)
385 ufs_free_fragments (inode, result + newcount, request - newcount);
386 ufs_free_fragments (inode, tmp, oldcount);
387 UFSD(("EXIT, result %u\n", result))
392 UFSD(("EXIT (FAILED)\n"))
396 unsigned ufs_add_fragments (struct inode * inode, unsigned fragment,
397 unsigned oldcount, unsigned newcount, int * err)
399 struct super_block * sb;
400 struct ufs_sb_private_info * uspi;
401 struct ufs_super_block_first * usb1;
402 struct ufs_cg_private_info * ucpi;
403 struct ufs_cylinder_group * ucg;
404 unsigned cgno, fragno, fragoff, count, fragsize, i;
406 UFSD(("ENTER, fragment %u, oldcount %u, newcount %u\n", fragment, oldcount, newcount))
409 uspi = UFS_SB(sb)->s_uspi;
410 usb1 = ubh_get_usb_first (USPI_UBH);
411 count = newcount - oldcount;
413 cgno = ufs_dtog(fragment);
414 if (UFS_SB(sb)->fs_cs(cgno).cs_nffree < count)
416 if ((ufs_fragnum (fragment) + newcount) > uspi->s_fpb)
418 ucpi = ufs_load_cylinder (sb, cgno);
421 ucg = ubh_get_ucg (UCPI_UBH);
422 if (!ufs_cg_chkmagic(sb, ucg)) {
423 ufs_panic (sb, "ufs_add_fragments",
424 "internal error, bad magic number on cg %u", cgno);
428 fragno = ufs_dtogd (fragment);
429 fragoff = ufs_fragnum (fragno);
430 for (i = oldcount; i < newcount; i++)
431 if (ubh_isclr (UCPI_UBH, ucpi->c_freeoff, fragno + i))
434 * Block can be extended
436 ucg->cg_time = cpu_to_fs32(sb, get_seconds());
437 for (i = newcount; i < (uspi->s_fpb - fragoff); i++)
438 if (ubh_isclr (UCPI_UBH, ucpi->c_freeoff, fragno + i))
440 fragsize = i - oldcount;
441 if (!fs32_to_cpu(sb, ucg->cg_frsum[fragsize]))
442 ufs_panic (sb, "ufs_add_fragments",
443 "internal error or corrupted bitmap on cg %u", cgno);
444 fs32_sub(sb, &ucg->cg_frsum[fragsize], 1);
445 if (fragsize != count)
446 fs32_add(sb, &ucg->cg_frsum[fragsize - count], 1);
447 for (i = oldcount; i < newcount; i++)
448 ubh_clrbit (UCPI_UBH, ucpi->c_freeoff, fragno + i);
449 if(DQUOT_ALLOC_BLOCK(inode, count)) {
454 fs32_sub(sb, &ucg->cg_cs.cs_nffree, count);
455 fs32_sub(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, count);
456 fs32_sub(sb, &usb1->fs_cstotal.cs_nffree, count);
458 ubh_mark_buffer_dirty (USPI_UBH);
459 ubh_mark_buffer_dirty (UCPI_UBH);
460 if (sb->s_flags & MS_SYNCHRONOUS) {
461 ubh_wait_on_buffer (UCPI_UBH);
462 ubh_ll_rw_block (WRITE, 1, (struct ufs_buffer_head **)&ucpi);
463 ubh_wait_on_buffer (UCPI_UBH);
467 UFSD(("EXIT, fragment %u\n", fragment))
472 #define UFS_TEST_FREE_SPACE_CG \
473 ucg = (struct ufs_cylinder_group *) UFS_SB(sb)->s_ucg[cgno]->b_data; \
474 if (fs32_to_cpu(sb, ucg->cg_cs.cs_nbfree)) \
476 for (k = count; k < uspi->s_fpb; k++) \
477 if (fs32_to_cpu(sb, ucg->cg_frsum[k])) \
480 unsigned ufs_alloc_fragments (struct inode * inode, unsigned cgno,
481 unsigned goal, unsigned count, int * err)
483 struct super_block * sb;
484 struct ufs_sb_private_info * uspi;
485 struct ufs_super_block_first * usb1;
486 struct ufs_cg_private_info * ucpi;
487 struct ufs_cylinder_group * ucg;
488 unsigned oldcg, i, j, k, result, allocsize;
490 UFSD(("ENTER, ino %lu, cgno %u, goal %u, count %u\n", inode->i_ino, cgno, goal, count))
493 uspi = UFS_SB(sb)->s_uspi;
494 usb1 = ubh_get_usb_first(USPI_UBH);
498 * 1. searching on preferred cylinder group
500 UFS_TEST_FREE_SPACE_CG
503 * 2. quadratic rehash
505 for (j = 1; j < uspi->s_ncg; j *= 2) {
507 if (cgno >= uspi->s_ncg)
509 UFS_TEST_FREE_SPACE_CG
513 * 3. brute force search
514 * We start at i = 2 ( 0 is checked at 1.step, 1 at 2.step )
516 cgno = (oldcg + 1) % uspi->s_ncg;
517 for (j = 2; j < uspi->s_ncg; j++) {
519 if (cgno >= uspi->s_ncg)
521 UFS_TEST_FREE_SPACE_CG
524 UFSD(("EXIT (FAILED)\n"))
528 ucpi = ufs_load_cylinder (sb, cgno);
531 ucg = ubh_get_ucg (UCPI_UBH);
532 if (!ufs_cg_chkmagic(sb, ucg))
533 ufs_panic (sb, "ufs_alloc_fragments",
534 "internal error, bad magic number on cg %u", cgno);
535 ucg->cg_time = cpu_to_fs32(sb, get_seconds());
537 if (count == uspi->s_fpb) {
538 result = ufs_alloccg_block (inode, ucpi, goal, err);
539 if (result == (unsigned)-1)
544 for (allocsize = count; allocsize < uspi->s_fpb; allocsize++)
545 if (fs32_to_cpu(sb, ucg->cg_frsum[allocsize]) != 0)
548 if (allocsize == uspi->s_fpb) {
549 result = ufs_alloccg_block (inode, ucpi, goal, err);
550 if (result == (unsigned)-1)
552 goal = ufs_dtogd (result);
553 for (i = count; i < uspi->s_fpb; i++)
554 ubh_setbit (UCPI_UBH, ucpi->c_freeoff, goal + i);
555 i = uspi->s_fpb - count;
556 DQUOT_FREE_BLOCK(inode, i);
558 fs32_add(sb, &ucg->cg_cs.cs_nffree, i);
559 fs32_add(sb, &usb1->fs_cstotal.cs_nffree, i);
560 fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, i);
561 fs32_add(sb, &ucg->cg_frsum[i], 1);
565 result = ufs_bitmap_search (sb, ucpi, goal, allocsize);
566 if (result == (unsigned)-1)
568 if(DQUOT_ALLOC_BLOCK(inode, count)) {
572 for (i = 0; i < count; i++)
573 ubh_clrbit (UCPI_UBH, ucpi->c_freeoff, result + i);
575 fs32_sub(sb, &ucg->cg_cs.cs_nffree, count);
576 fs32_sub(sb, &usb1->fs_cstotal.cs_nffree, count);
577 fs32_sub(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, count);
578 fs32_sub(sb, &ucg->cg_frsum[allocsize], 1);
580 if (count != allocsize)
581 fs32_add(sb, &ucg->cg_frsum[allocsize - count], 1);
584 ubh_mark_buffer_dirty (USPI_UBH);
585 ubh_mark_buffer_dirty (UCPI_UBH);
586 if (sb->s_flags & MS_SYNCHRONOUS) {
587 ubh_wait_on_buffer (UCPI_UBH);
588 ubh_ll_rw_block (WRITE, 1, (struct ufs_buffer_head **)&ucpi);
589 ubh_wait_on_buffer (UCPI_UBH);
593 result += cgno * uspi->s_fpg;
594 UFSD(("EXIT3, result %u\n", result))
598 unsigned ufs_alloccg_block (struct inode * inode,
599 struct ufs_cg_private_info * ucpi, unsigned goal, int * err)
601 struct super_block * sb;
602 struct ufs_sb_private_info * uspi;
603 struct ufs_super_block_first * usb1;
604 struct ufs_cylinder_group * ucg;
605 unsigned result, cylno, blkno;
607 UFSD(("ENTER, goal %u\n", goal))
610 uspi = UFS_SB(sb)->s_uspi;
611 usb1 = ubh_get_usb_first(USPI_UBH);
612 ucg = ubh_get_ucg(UCPI_UBH);
615 goal = ucpi->c_rotor;
618 goal = ufs_blknum (goal);
619 goal = ufs_dtogd (goal);
622 * If the requested block is available, use it.
624 if (ubh_isblockset(UCPI_UBH, ucpi->c_freeoff, ufs_fragstoblks(goal))) {
630 result = ufs_bitmap_search (sb, ucpi, goal, uspi->s_fpb);
631 if (result == (unsigned)-1)
633 ucpi->c_rotor = result;
635 blkno = ufs_fragstoblks(result);
636 ubh_clrblock (UCPI_UBH, ucpi->c_freeoff, blkno);
637 if ((UFS_SB(sb)->s_flags & UFS_CG_MASK) == UFS_CG_44BSD)
638 ufs_clusteracct (sb, ucpi, blkno, -1);
639 if(DQUOT_ALLOC_BLOCK(inode, uspi->s_fpb)) {
644 fs32_sub(sb, &ucg->cg_cs.cs_nbfree, 1);
645 fs32_sub(sb, &usb1->fs_cstotal.cs_nbfree, 1);
646 fs32_sub(sb, &UFS_SB(sb)->fs_cs(ucpi->c_cgx).cs_nbfree, 1);
647 cylno = ufs_cbtocylno(result);
648 fs16_sub(sb, &ubh_cg_blks(ucpi, cylno, ufs_cbtorpos(result)), 1);
649 fs32_sub(sb, &ubh_cg_blktot(ucpi, cylno), 1);
651 UFSD(("EXIT, result %u\n", result))
656 unsigned ufs_bitmap_search (struct super_block * sb,
657 struct ufs_cg_private_info * ucpi, unsigned goal, unsigned count)
659 struct ufs_sb_private_info * uspi;
660 struct ufs_super_block_first * usb1;
661 struct ufs_cylinder_group * ucg;
662 unsigned start, length, location, result;
663 unsigned possition, fragsize, blockmap, mask;
665 UFSD(("ENTER, cg %u, goal %u, count %u\n", ucpi->c_cgx, goal, count))
667 uspi = UFS_SB(sb)->s_uspi;
668 usb1 = ubh_get_usb_first (USPI_UBH);
669 ucg = ubh_get_ucg(UCPI_UBH);
672 start = ufs_dtogd(goal) >> 3;
674 start = ucpi->c_frotor >> 3;
676 length = ((uspi->s_fpg + 7) >> 3) - start;
677 location = ubh_scanc(UCPI_UBH, ucpi->c_freeoff + start, length,
678 (uspi->s_fpb == 8) ? ufs_fragtable_8fpb : ufs_fragtable_other,
679 1 << (count - 1 + (uspi->s_fpb & 7)));
682 location = ubh_scanc(UCPI_UBH, ucpi->c_freeoff, length,
683 (uspi->s_fpb == 8) ? ufs_fragtable_8fpb : ufs_fragtable_other,
684 1 << (count - 1 + (uspi->s_fpb & 7)));
686 ufs_error (sb, "ufs_bitmap_search",
687 "bitmap corrupted on cg %u, start %u, length %u, count %u, freeoff %u\n",
688 ucpi->c_cgx, start, length, count, ucpi->c_freeoff);
693 result = (start + length - location) << 3;
694 ucpi->c_frotor = result;
697 * found the byte in the map
699 blockmap = ubh_blkmap(UCPI_UBH, ucpi->c_freeoff, result);
701 for (possition = 0, mask = 1; possition < 8; possition++, mask <<= 1) {
702 if (blockmap & mask) {
703 if (!(possition & uspi->s_fpbmask))
709 if (fragsize == count) {
710 result += possition - count;
711 UFSD(("EXIT, result %u\n", result))
717 if (fragsize == count) {
718 result += possition - count;
719 UFSD(("EXIT, result %u\n", result))
722 ufs_error (sb, "ufs_bitmap_search", "block not in map on cg %u\n", ucpi->c_cgx);
723 UFSD(("EXIT (FAILED)\n"))
727 void ufs_clusteracct(struct super_block * sb,
728 struct ufs_cg_private_info * ucpi, unsigned blkno, int cnt)
730 struct ufs_sb_private_info * uspi;
731 int i, start, end, forw, back;
733 uspi = UFS_SB(sb)->s_uspi;
734 if (uspi->s_contigsumsize <= 0)
738 ubh_setbit(UCPI_UBH, ucpi->c_clusteroff, blkno);
740 ubh_clrbit(UCPI_UBH, ucpi->c_clusteroff, blkno);
743 * Find the size of the cluster going forward.
746 end = start + uspi->s_contigsumsize;
747 if ( end >= ucpi->c_nclusterblks)
748 end = ucpi->c_nclusterblks;
749 i = ubh_find_next_zero_bit (UCPI_UBH, ucpi->c_clusteroff, end, start);
755 * Find the size of the cluster going backward.
758 end = start - uspi->s_contigsumsize;
761 i = ubh_find_last_zero_bit (UCPI_UBH, ucpi->c_clusteroff, start, end);
767 * Account for old cluster and the possibly new forward and
771 if (i > uspi->s_contigsumsize)
772 i = uspi->s_contigsumsize;
773 fs32_add(sb, (u32*)ubh_get_addr(UCPI_UBH, ucpi->c_clustersumoff + (i << 2)), cnt);
775 fs32_sub(sb, (u32*)ubh_get_addr(UCPI_UBH, ucpi->c_clustersumoff + (back << 2)), cnt);
777 fs32_sub(sb, (u32*)ubh_get_addr(UCPI_UBH, ucpi->c_clustersumoff + (forw << 2)), cnt);
781 static unsigned char ufs_fragtable_8fpb[] = {
782 0x00, 0x01, 0x01, 0x02, 0x01, 0x01, 0x02, 0x04, 0x01, 0x01, 0x01, 0x03, 0x02, 0x03, 0x04, 0x08,
783 0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x02, 0x03, 0x03, 0x02, 0x04, 0x05, 0x08, 0x10,
784 0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x01, 0x01, 0x01, 0x03, 0x03, 0x03, 0x05, 0x09,
785 0x02, 0x03, 0x03, 0x02, 0x03, 0x03, 0x02, 0x06, 0x04, 0x05, 0x05, 0x06, 0x08, 0x09, 0x10, 0x20,
786 0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x01, 0x01, 0x01, 0x03, 0x03, 0x03, 0x05, 0x09,
787 0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x03, 0x03, 0x03, 0x03, 0x05, 0x05, 0x09, 0x11,
788 0x02, 0x03, 0x03, 0x02, 0x03, 0x03, 0x02, 0x06, 0x03, 0x03, 0x03, 0x03, 0x02, 0x03, 0x06, 0x0A,
789 0x04, 0x05, 0x05, 0x06, 0x05, 0x05, 0x06, 0x04, 0x08, 0x09, 0x09, 0x0A, 0x10, 0x11, 0x20, 0x40,
790 0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x01, 0x01, 0x01, 0x03, 0x03, 0x03, 0x05, 0x09,
791 0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x03, 0x03, 0x03, 0x03, 0x05, 0x05, 0x09, 0x11,
792 0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x01, 0x01, 0x01, 0x03, 0x03, 0x03, 0x05, 0x09,
793 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x07, 0x05, 0x05, 0x05, 0x07, 0x09, 0x09, 0x11, 0x21,
794 0x02, 0x03, 0x03, 0x02, 0x03, 0x03, 0x02, 0x06, 0x03, 0x03, 0x03, 0x03, 0x02, 0x03, 0x06, 0x0A,
795 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x07, 0x02, 0x03, 0x03, 0x02, 0x06, 0x07, 0x0A, 0x12,
796 0x04, 0x05, 0x05, 0x06, 0x05, 0x05, 0x06, 0x04, 0x05, 0x05, 0x05, 0x07, 0x06, 0x07, 0x04, 0x0C,
797 0x08, 0x09, 0x09, 0x0A, 0x09, 0x09, 0x0A, 0x0C, 0x10, 0x11, 0x11, 0x12, 0x20, 0x21, 0x40, 0x80,
800 static unsigned char ufs_fragtable_other[] = {
801 0x00, 0x16, 0x16, 0x2A, 0x16, 0x16, 0x26, 0x4E, 0x16, 0x16, 0x16, 0x3E, 0x2A, 0x3E, 0x4E, 0x8A,
802 0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
803 0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
804 0x2A, 0x3E, 0x3E, 0x2A, 0x3E, 0x3E, 0x2E, 0x6E, 0x3E, 0x3E, 0x3E, 0x3E, 0x2A, 0x3E, 0x6E, 0xAA,
805 0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
806 0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
807 0x26, 0x36, 0x36, 0x2E, 0x36, 0x36, 0x26, 0x6E, 0x36, 0x36, 0x36, 0x3E, 0x2E, 0x3E, 0x6E, 0xAE,
808 0x4E, 0x5E, 0x5E, 0x6E, 0x5E, 0x5E, 0x6E, 0x4E, 0x5E, 0x5E, 0x5E, 0x7E, 0x6E, 0x7E, 0x4E, 0xCE,
809 0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
810 0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
811 0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
812 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x7E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x7E, 0xBE,
813 0x2A, 0x3E, 0x3E, 0x2A, 0x3E, 0x3E, 0x2E, 0x6E, 0x3E, 0x3E, 0x3E, 0x3E, 0x2A, 0x3E, 0x6E, 0xAA,
814 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x7E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x7E, 0xBE,
815 0x4E, 0x5E, 0x5E, 0x6E, 0x5E, 0x5E, 0x6E, 0x4E, 0x5E, 0x5E, 0x5E, 0x7E, 0x6E, 0x7E, 0x4E, 0xCE,
816 0x8A, 0x9E, 0x9E, 0xAA, 0x9E, 0x9E, 0xAE, 0xCE, 0x9E, 0x9E, 0x9E, 0xBE, 0xAA, 0xBE, 0xCE, 0x8A,