2 * linux/fs/ufs/balloc.c
5 * Daniel Pirkl <daniel.pirkl@email.cz>
6 * Charles University, Faculty of Mathematics and Physics
10 #include <linux/ufs_fs.h>
11 #include <linux/stat.h>
12 #include <linux/time.h>
13 #include <linux/string.h>
14 #include <linux/quotaops.h>
15 #include <linux/buffer_head.h>
16 #include <linux/sched.h>
17 #include <asm/bitops.h>
18 #include <asm/byteorder.h>
23 #undef UFS_BALLOC_DEBUG
25 #ifdef UFS_BALLOC_DEBUG
26 #define UFSD(x) printk("(%s, %d), %s:", __FILE__, __LINE__, __FUNCTION__); printk x;
31 unsigned ufs_add_fragments (struct inode *, unsigned, unsigned, unsigned, int *);
32 unsigned ufs_alloc_fragments (struct inode *, unsigned, unsigned, unsigned, int *);
33 unsigned ufs_alloccg_block (struct inode *, struct ufs_cg_private_info *, unsigned, int *);
34 unsigned ufs_bitmap_search (struct super_block *, struct ufs_cg_private_info *, unsigned, unsigned);
35 static unsigned char ufs_fragtable_8fpb[], ufs_fragtable_other[];
36 void ufs_clusteracct(struct super_block *, struct ufs_cg_private_info *, unsigned, int);
39 * Free 'count' fragments from fragment number 'fragment'
41 void ufs_free_fragments (struct inode * inode, unsigned fragment, unsigned count) {
42 struct super_block * sb;
43 struct ufs_sb_private_info * uspi;
44 struct ufs_super_block_first * usb1;
45 struct ufs_cg_private_info * ucpi;
46 struct ufs_cylinder_group * ucg;
47 unsigned cgno, bit, end_bit, bbase, blkmap, i, blkno, cylno;
50 uspi = UFS_SB(sb)->s_uspi;
51 usb1 = ubh_get_usb_first(USPI_UBH);
53 UFSD(("ENTER, fragment %u, count %u\n", fragment, count))
55 if (ufs_fragnum(fragment) + count > uspi->s_fpg)
56 ufs_error (sb, "ufs_free_fragments", "internal error");
60 cgno = ufs_dtog(fragment);
61 bit = ufs_dtogd(fragment);
62 if (cgno >= uspi->s_ncg) {
63 ufs_panic (sb, "ufs_free_fragments", "freeing blocks are outside device");
67 ucpi = ufs_load_cylinder (sb, cgno);
70 ucg = ubh_get_ucg (UCPI_UBH);
71 if (!ufs_cg_chkmagic(sb, ucg)) {
72 ufs_panic (sb, "ufs_free_fragments", "internal error, bad magic number on cg %u", cgno);
76 end_bit = bit + count;
77 bbase = ufs_blknum (bit);
78 blkmap = ubh_blkmap (UCPI_UBH, ucpi->c_freeoff, bbase);
79 ufs_fragacct (sb, blkmap, ucg->cg_frsum, -1);
80 for (i = bit; i < end_bit; i++) {
81 if (ubh_isclr (UCPI_UBH, ucpi->c_freeoff, i))
82 ubh_setbit (UCPI_UBH, ucpi->c_freeoff, i);
83 else ufs_error (sb, "ufs_free_fragments",
84 "bit already cleared for fragment %u", i);
87 DQUOT_FREE_BLOCK (inode, count);
90 fs32_add(sb, &ucg->cg_cs.cs_nffree, count);
91 fs32_add(sb, &usb1->fs_cstotal.cs_nffree, count);
92 fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, count);
93 blkmap = ubh_blkmap (UCPI_UBH, ucpi->c_freeoff, bbase);
94 ufs_fragacct(sb, blkmap, ucg->cg_frsum, 1);
97 * Trying to reassemble free fragments into block
99 blkno = ufs_fragstoblks (bbase);
100 if (ubh_isblockset(UCPI_UBH, ucpi->c_freeoff, blkno)) {
101 fs32_sub(sb, &ucg->cg_cs.cs_nffree, uspi->s_fpb);
102 fs32_sub(sb, &usb1->fs_cstotal.cs_nffree, uspi->s_fpb);
103 fs32_sub(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, uspi->s_fpb);
104 if ((UFS_SB(sb)->s_flags & UFS_CG_MASK) == UFS_CG_44BSD)
105 ufs_clusteracct (sb, ucpi, blkno, 1);
106 fs32_add(sb, &ucg->cg_cs.cs_nbfree, 1);
107 fs32_add(sb, &usb1->fs_cstotal.cs_nbfree, 1);
108 fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nbfree, 1);
109 cylno = ufs_cbtocylno (bbase);
110 fs16_add(sb, &ubh_cg_blks(ucpi, cylno, ufs_cbtorpos(bbase)), 1);
111 fs32_add(sb, &ubh_cg_blktot(ucpi, cylno), 1);
114 ubh_mark_buffer_dirty (USPI_UBH);
115 ubh_mark_buffer_dirty (UCPI_UBH);
116 if (sb->s_flags & MS_SYNCHRONOUS) {
117 ubh_wait_on_buffer (UCPI_UBH);
118 ubh_ll_rw_block (WRITE, 1, (struct ufs_buffer_head **)&ucpi);
119 ubh_wait_on_buffer (UCPI_UBH);
129 UFSD(("EXIT (FAILED)\n"))
134 * Free 'count' fragments from fragment number 'fragment' (free whole blocks)
136 void ufs_free_blocks (struct inode * inode, unsigned fragment, unsigned count) {
137 struct super_block * sb;
138 struct ufs_sb_private_info * uspi;
139 struct ufs_super_block_first * usb1;
140 struct ufs_cg_private_info * ucpi;
141 struct ufs_cylinder_group * ucg;
142 unsigned overflow, cgno, bit, end_bit, blkno, i, cylno;
145 uspi = UFS_SB(sb)->s_uspi;
146 usb1 = ubh_get_usb_first(USPI_UBH);
148 UFSD(("ENTER, fragment %u, count %u\n", fragment, count))
150 if ((fragment & uspi->s_fpbmask) || (count & uspi->s_fpbmask)) {
151 ufs_error (sb, "ufs_free_blocks", "internal error, "
152 "fragment %u, count %u\n", fragment, count);
160 cgno = ufs_dtog (fragment);
161 bit = ufs_dtogd (fragment);
162 if (cgno >= uspi->s_ncg) {
163 ufs_panic (sb, "ufs_free_blocks", "freeing blocks are outside device");
166 end_bit = bit + count;
167 if (end_bit > uspi->s_fpg) {
168 overflow = bit + count - uspi->s_fpg;
173 ucpi = ufs_load_cylinder (sb, cgno);
176 ucg = ubh_get_ucg (UCPI_UBH);
177 if (!ufs_cg_chkmagic(sb, ucg)) {
178 ufs_panic (sb, "ufs_free_blocks", "internal error, bad magic number on cg %u", cgno);
182 for (i = bit; i < end_bit; i += uspi->s_fpb) {
183 blkno = ufs_fragstoblks(i);
184 if (ubh_isblockset(UCPI_UBH, ucpi->c_freeoff, blkno)) {
185 ufs_error(sb, "ufs_free_blocks", "freeing free fragment");
187 ubh_setblock(UCPI_UBH, ucpi->c_freeoff, blkno);
188 if ((UFS_SB(sb)->s_flags & UFS_CG_MASK) == UFS_CG_44BSD)
189 ufs_clusteracct (sb, ucpi, blkno, 1);
190 DQUOT_FREE_BLOCK(inode, uspi->s_fpb);
192 fs32_add(sb, &ucg->cg_cs.cs_nbfree, 1);
193 fs32_add(sb, &usb1->fs_cstotal.cs_nbfree, 1);
194 fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nbfree, 1);
195 cylno = ufs_cbtocylno(i);
196 fs16_add(sb, &ubh_cg_blks(ucpi, cylno, ufs_cbtorpos(i)), 1);
197 fs32_add(sb, &ubh_cg_blktot(ucpi, cylno), 1);
200 ubh_mark_buffer_dirty (USPI_UBH);
201 ubh_mark_buffer_dirty (UCPI_UBH);
202 if (sb->s_flags & MS_SYNCHRONOUS) {
203 ubh_wait_on_buffer (UCPI_UBH);
204 ubh_ll_rw_block (WRITE, 1, (struct ufs_buffer_head **)&ucpi);
205 ubh_wait_on_buffer (UCPI_UBH);
221 UFSD(("EXIT (FAILED)\n"))
227 #define NULLIFY_FRAGMENTS \
228 for (i = oldcount; i < newcount; i++) { \
229 bh = sb_getblk(sb, result + i); \
230 memset (bh->b_data, 0, sb->s_blocksize); \
231 set_buffer_uptodate(bh); \
232 mark_buffer_dirty (bh); \
233 if (IS_SYNC(inode)) \
234 sync_dirty_buffer(bh); \
238 unsigned ufs_new_fragments (struct inode * inode, u32 * p, unsigned fragment,
239 unsigned goal, unsigned count, int * err )
241 struct super_block * sb;
242 struct ufs_sb_private_info * uspi;
243 struct ufs_super_block_first * usb1;
244 struct buffer_head * bh;
245 unsigned cgno, oldcount, newcount, tmp, request, i, result;
247 UFSD(("ENTER, ino %lu, fragment %u, goal %u, count %u\n", inode->i_ino, fragment, goal, count))
250 uspi = UFS_SB(sb)->s_uspi;
251 usb1 = ubh_get_usb_first(USPI_UBH);
256 tmp = fs32_to_cpu(sb, *p);
257 if (count + ufs_fragnum(fragment) > uspi->s_fpb) {
258 ufs_warning (sb, "ufs_new_fragments", "internal warning"
259 " fragment %u, count %u", fragment, count);
260 count = uspi->s_fpb - ufs_fragnum(fragment);
262 oldcount = ufs_fragnum (fragment);
263 newcount = oldcount + count;
266 * Somebody else has just allocated our fragments
270 ufs_error (sb, "ufs_new_fragments", "internal error, "
271 "fragment %u, tmp %u\n", fragment, tmp);
275 if (fragment < UFS_I(inode)->i_lastfrag) {
276 UFSD(("EXIT (ALREADY ALLOCATED)\n"))
283 UFSD(("EXIT (ALREADY ALLOCATED)\n"))
290 * There is not enough space for user on the device
292 if (!capable(CAP_SYS_RESOURCE) && ufs_freespace(usb1, UFS_MINFREE) <= 0) {
294 UFSD(("EXIT (FAILED)\n"))
298 if (goal >= uspi->s_size)
301 cgno = ufs_inotocg (inode->i_ino);
303 cgno = ufs_dtog (goal);
306 * allocate new fragment
309 result = ufs_alloc_fragments (inode, cgno, goal, count, err);
311 *p = cpu_to_fs32(sb, result);
313 inode->i_blocks += count << uspi->s_nspfshift;
314 UFS_I(inode)->i_lastfrag = max_t(u32, UFS_I(inode)->i_lastfrag, fragment + count);
318 UFSD(("EXIT, result %u\n", result))
325 result = ufs_add_fragments (inode, tmp, oldcount, newcount, err);
328 inode->i_blocks += count << uspi->s_nspfshift;
329 UFS_I(inode)->i_lastfrag = max_t(u32, UFS_I(inode)->i_lastfrag, fragment + count);
332 UFSD(("EXIT, result %u\n", result))
337 * allocate new block and move data
339 switch (fs32_to_cpu(sb, usb1->fs_optim)) {
342 if (uspi->s_minfree < 5 || fs32_to_cpu(sb, usb1->fs_cstotal.cs_nffree)
343 > uspi->s_dsize * uspi->s_minfree / (2 * 100) )
345 usb1->fs_optim = cpu_to_fs32(sb, UFS_OPTTIME);
348 usb1->fs_optim = cpu_to_fs32(sb, UFS_OPTTIME);
351 request = uspi->s_fpb;
352 if (fs32_to_cpu(sb, usb1->fs_cstotal.cs_nffree) < uspi->s_dsize *
353 (uspi->s_minfree - 2) / 100)
355 usb1->fs_optim = cpu_to_fs32(sb, UFS_OPTTIME);
358 result = ufs_alloc_fragments (inode, cgno, goal, request, err);
360 for (i = 0; i < oldcount; i++) {
361 bh = sb_bread(sb, tmp + i);
364 clear_buffer_dirty(bh);
365 bh->b_blocknr = result + i;
366 mark_buffer_dirty (bh);
368 sync_dirty_buffer(bh);
373 printk(KERN_ERR "ufs_new_fragments: bread fail\n");
377 *p = cpu_to_fs32(sb, result);
379 inode->i_blocks += count << uspi->s_nspfshift;
380 UFS_I(inode)->i_lastfrag = max_t(u32, UFS_I(inode)->i_lastfrag, fragment + count);
383 if (newcount < request)
384 ufs_free_fragments (inode, result + newcount, request - newcount);
385 ufs_free_fragments (inode, tmp, oldcount);
386 UFSD(("EXIT, result %u\n", result))
391 UFSD(("EXIT (FAILED)\n"))
395 unsigned ufs_add_fragments (struct inode * inode, unsigned fragment,
396 unsigned oldcount, unsigned newcount, int * err)
398 struct super_block * sb;
399 struct ufs_sb_private_info * uspi;
400 struct ufs_super_block_first * usb1;
401 struct ufs_cg_private_info * ucpi;
402 struct ufs_cylinder_group * ucg;
403 unsigned cgno, fragno, fragoff, count, fragsize, i;
405 UFSD(("ENTER, fragment %u, oldcount %u, newcount %u\n", fragment, oldcount, newcount))
408 uspi = UFS_SB(sb)->s_uspi;
409 usb1 = ubh_get_usb_first (USPI_UBH);
410 count = newcount - oldcount;
412 cgno = ufs_dtog(fragment);
413 if (UFS_SB(sb)->fs_cs(cgno).cs_nffree < count)
415 if ((ufs_fragnum (fragment) + newcount) > uspi->s_fpb)
417 ucpi = ufs_load_cylinder (sb, cgno);
420 ucg = ubh_get_ucg (UCPI_UBH);
421 if (!ufs_cg_chkmagic(sb, ucg)) {
422 ufs_panic (sb, "ufs_add_fragments",
423 "internal error, bad magic number on cg %u", cgno);
427 fragno = ufs_dtogd (fragment);
428 fragoff = ufs_fragnum (fragno);
429 for (i = oldcount; i < newcount; i++)
430 if (ubh_isclr (UCPI_UBH, ucpi->c_freeoff, fragno + i))
433 * Block can be extended
435 ucg->cg_time = cpu_to_fs32(sb, get_seconds());
436 for (i = newcount; i < (uspi->s_fpb - fragoff); i++)
437 if (ubh_isclr (UCPI_UBH, ucpi->c_freeoff, fragno + i))
439 fragsize = i - oldcount;
440 if (!fs32_to_cpu(sb, ucg->cg_frsum[fragsize]))
441 ufs_panic (sb, "ufs_add_fragments",
442 "internal error or corrupted bitmap on cg %u", cgno);
443 fs32_sub(sb, &ucg->cg_frsum[fragsize], 1);
444 if (fragsize != count)
445 fs32_add(sb, &ucg->cg_frsum[fragsize - count], 1);
446 for (i = oldcount; i < newcount; i++)
447 ubh_clrbit (UCPI_UBH, ucpi->c_freeoff, fragno + i);
448 if(DQUOT_ALLOC_BLOCK(inode, count)) {
453 fs32_sub(sb, &ucg->cg_cs.cs_nffree, count);
454 fs32_sub(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, count);
455 fs32_sub(sb, &usb1->fs_cstotal.cs_nffree, count);
457 ubh_mark_buffer_dirty (USPI_UBH);
458 ubh_mark_buffer_dirty (UCPI_UBH);
459 if (sb->s_flags & MS_SYNCHRONOUS) {
460 ubh_wait_on_buffer (UCPI_UBH);
461 ubh_ll_rw_block (WRITE, 1, (struct ufs_buffer_head **)&ucpi);
462 ubh_wait_on_buffer (UCPI_UBH);
466 UFSD(("EXIT, fragment %u\n", fragment))
471 #define UFS_TEST_FREE_SPACE_CG \
472 ucg = (struct ufs_cylinder_group *) UFS_SB(sb)->s_ucg[cgno]->b_data; \
473 if (fs32_to_cpu(sb, ucg->cg_cs.cs_nbfree)) \
475 for (k = count; k < uspi->s_fpb; k++) \
476 if (fs32_to_cpu(sb, ucg->cg_frsum[k])) \
479 unsigned ufs_alloc_fragments (struct inode * inode, unsigned cgno,
480 unsigned goal, unsigned count, int * err)
482 struct super_block * sb;
483 struct ufs_sb_private_info * uspi;
484 struct ufs_super_block_first * usb1;
485 struct ufs_cg_private_info * ucpi;
486 struct ufs_cylinder_group * ucg;
487 unsigned oldcg, i, j, k, result, allocsize;
489 UFSD(("ENTER, ino %lu, cgno %u, goal %u, count %u\n", inode->i_ino, cgno, goal, count))
492 uspi = UFS_SB(sb)->s_uspi;
493 usb1 = ubh_get_usb_first(USPI_UBH);
497 * 1. searching on preferred cylinder group
499 UFS_TEST_FREE_SPACE_CG
502 * 2. quadratic rehash
504 for (j = 1; j < uspi->s_ncg; j *= 2) {
506 if (cgno >= uspi->s_ncg)
508 UFS_TEST_FREE_SPACE_CG
512 * 3. brute force search
513 * We start at i = 2 ( 0 is checked at 1.step, 1 at 2.step )
515 cgno = (oldcg + 1) % uspi->s_ncg;
516 for (j = 2; j < uspi->s_ncg; j++) {
518 if (cgno >= uspi->s_ncg)
520 UFS_TEST_FREE_SPACE_CG
523 UFSD(("EXIT (FAILED)\n"))
527 ucpi = ufs_load_cylinder (sb, cgno);
530 ucg = ubh_get_ucg (UCPI_UBH);
531 if (!ufs_cg_chkmagic(sb, ucg))
532 ufs_panic (sb, "ufs_alloc_fragments",
533 "internal error, bad magic number on cg %u", cgno);
534 ucg->cg_time = cpu_to_fs32(sb, get_seconds());
536 if (count == uspi->s_fpb) {
537 result = ufs_alloccg_block (inode, ucpi, goal, err);
538 if (result == (unsigned)-1)
543 for (allocsize = count; allocsize < uspi->s_fpb; allocsize++)
544 if (fs32_to_cpu(sb, ucg->cg_frsum[allocsize]) != 0)
547 if (allocsize == uspi->s_fpb) {
548 result = ufs_alloccg_block (inode, ucpi, goal, err);
549 if (result == (unsigned)-1)
551 goal = ufs_dtogd (result);
552 for (i = count; i < uspi->s_fpb; i++)
553 ubh_setbit (UCPI_UBH, ucpi->c_freeoff, goal + i);
554 i = uspi->s_fpb - count;
555 DQUOT_FREE_BLOCK(inode, i);
557 fs32_add(sb, &ucg->cg_cs.cs_nffree, i);
558 fs32_add(sb, &usb1->fs_cstotal.cs_nffree, i);
559 fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, i);
560 fs32_add(sb, &ucg->cg_frsum[i], 1);
564 result = ufs_bitmap_search (sb, ucpi, goal, allocsize);
565 if (result == (unsigned)-1)
567 if(DQUOT_ALLOC_BLOCK(inode, count)) {
571 for (i = 0; i < count; i++)
572 ubh_clrbit (UCPI_UBH, ucpi->c_freeoff, result + i);
574 fs32_sub(sb, &ucg->cg_cs.cs_nffree, count);
575 fs32_sub(sb, &usb1->fs_cstotal.cs_nffree, count);
576 fs32_sub(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, count);
577 fs32_sub(sb, &ucg->cg_frsum[allocsize], 1);
579 if (count != allocsize)
580 fs32_add(sb, &ucg->cg_frsum[allocsize - count], 1);
583 ubh_mark_buffer_dirty (USPI_UBH);
584 ubh_mark_buffer_dirty (UCPI_UBH);
585 if (sb->s_flags & MS_SYNCHRONOUS) {
586 ubh_wait_on_buffer (UCPI_UBH);
587 ubh_ll_rw_block (WRITE, 1, (struct ufs_buffer_head **)&ucpi);
588 ubh_wait_on_buffer (UCPI_UBH);
592 result += cgno * uspi->s_fpg;
593 UFSD(("EXIT3, result %u\n", result))
597 unsigned ufs_alloccg_block (struct inode * inode,
598 struct ufs_cg_private_info * ucpi, unsigned goal, int * err)
600 struct super_block * sb;
601 struct ufs_sb_private_info * uspi;
602 struct ufs_super_block_first * usb1;
603 struct ufs_cylinder_group * ucg;
604 unsigned result, cylno, blkno;
606 UFSD(("ENTER, goal %u\n", goal))
609 uspi = UFS_SB(sb)->s_uspi;
610 usb1 = ubh_get_usb_first(USPI_UBH);
611 ucg = ubh_get_ucg(UCPI_UBH);
614 goal = ucpi->c_rotor;
617 goal = ufs_blknum (goal);
618 goal = ufs_dtogd (goal);
621 * If the requested block is available, use it.
623 if (ubh_isblockset(UCPI_UBH, ucpi->c_freeoff, ufs_fragstoblks(goal))) {
629 result = ufs_bitmap_search (sb, ucpi, goal, uspi->s_fpb);
630 if (result == (unsigned)-1)
632 ucpi->c_rotor = result;
634 blkno = ufs_fragstoblks(result);
635 ubh_clrblock (UCPI_UBH, ucpi->c_freeoff, blkno);
636 if ((UFS_SB(sb)->s_flags & UFS_CG_MASK) == UFS_CG_44BSD)
637 ufs_clusteracct (sb, ucpi, blkno, -1);
638 if(DQUOT_ALLOC_BLOCK(inode, uspi->s_fpb)) {
643 fs32_sub(sb, &ucg->cg_cs.cs_nbfree, 1);
644 fs32_sub(sb, &usb1->fs_cstotal.cs_nbfree, 1);
645 fs32_sub(sb, &UFS_SB(sb)->fs_cs(ucpi->c_cgx).cs_nbfree, 1);
646 cylno = ufs_cbtocylno(result);
647 fs16_sub(sb, &ubh_cg_blks(ucpi, cylno, ufs_cbtorpos(result)), 1);
648 fs32_sub(sb, &ubh_cg_blktot(ucpi, cylno), 1);
650 UFSD(("EXIT, result %u\n", result))
655 unsigned ufs_bitmap_search (struct super_block * sb,
656 struct ufs_cg_private_info * ucpi, unsigned goal, unsigned count)
658 struct ufs_sb_private_info * uspi;
659 struct ufs_super_block_first * usb1;
660 struct ufs_cylinder_group * ucg;
661 unsigned start, length, location, result;
662 unsigned possition, fragsize, blockmap, mask;
664 UFSD(("ENTER, cg %u, goal %u, count %u\n", ucpi->c_cgx, goal, count))
666 uspi = UFS_SB(sb)->s_uspi;
667 usb1 = ubh_get_usb_first (USPI_UBH);
668 ucg = ubh_get_ucg(UCPI_UBH);
671 start = ufs_dtogd(goal) >> 3;
673 start = ucpi->c_frotor >> 3;
675 length = ((uspi->s_fpg + 7) >> 3) - start;
676 location = ubh_scanc(UCPI_UBH, ucpi->c_freeoff + start, length,
677 (uspi->s_fpb == 8) ? ufs_fragtable_8fpb : ufs_fragtable_other,
678 1 << (count - 1 + (uspi->s_fpb & 7)));
681 location = ubh_scanc(UCPI_UBH, ucpi->c_freeoff, length,
682 (uspi->s_fpb == 8) ? ufs_fragtable_8fpb : ufs_fragtable_other,
683 1 << (count - 1 + (uspi->s_fpb & 7)));
685 ufs_error (sb, "ufs_bitmap_search",
686 "bitmap corrupted on cg %u, start %u, length %u, count %u, freeoff %u\n",
687 ucpi->c_cgx, start, length, count, ucpi->c_freeoff);
692 result = (start + length - location) << 3;
693 ucpi->c_frotor = result;
696 * found the byte in the map
698 blockmap = ubh_blkmap(UCPI_UBH, ucpi->c_freeoff, result);
700 for (possition = 0, mask = 1; possition < 8; possition++, mask <<= 1) {
701 if (blockmap & mask) {
702 if (!(possition & uspi->s_fpbmask))
708 if (fragsize == count) {
709 result += possition - count;
710 UFSD(("EXIT, result %u\n", result))
716 if (fragsize == count) {
717 result += possition - count;
718 UFSD(("EXIT, result %u\n", result))
721 ufs_error (sb, "ufs_bitmap_search", "block not in map on cg %u\n", ucpi->c_cgx);
722 UFSD(("EXIT (FAILED)\n"))
726 void ufs_clusteracct(struct super_block * sb,
727 struct ufs_cg_private_info * ucpi, unsigned blkno, int cnt)
729 struct ufs_sb_private_info * uspi;
730 int i, start, end, forw, back;
732 uspi = UFS_SB(sb)->s_uspi;
733 if (uspi->s_contigsumsize <= 0)
737 ubh_setbit(UCPI_UBH, ucpi->c_clusteroff, blkno);
739 ubh_clrbit(UCPI_UBH, ucpi->c_clusteroff, blkno);
742 * Find the size of the cluster going forward.
745 end = start + uspi->s_contigsumsize;
746 if ( end >= ucpi->c_nclusterblks)
747 end = ucpi->c_nclusterblks;
748 i = ubh_find_next_zero_bit (UCPI_UBH, ucpi->c_clusteroff, end, start);
754 * Find the size of the cluster going backward.
757 end = start - uspi->s_contigsumsize;
760 i = ubh_find_last_zero_bit (UCPI_UBH, ucpi->c_clusteroff, start, end);
766 * Account for old cluster and the possibly new forward and
770 if (i > uspi->s_contigsumsize)
771 i = uspi->s_contigsumsize;
772 fs32_add(sb, (u32*)ubh_get_addr(UCPI_UBH, ucpi->c_clustersumoff + (i << 2)), cnt);
774 fs32_sub(sb, (u32*)ubh_get_addr(UCPI_UBH, ucpi->c_clustersumoff + (back << 2)), cnt);
776 fs32_sub(sb, (u32*)ubh_get_addr(UCPI_UBH, ucpi->c_clustersumoff + (forw << 2)), cnt);
780 static unsigned char ufs_fragtable_8fpb[] = {
781 0x00, 0x01, 0x01, 0x02, 0x01, 0x01, 0x02, 0x04, 0x01, 0x01, 0x01, 0x03, 0x02, 0x03, 0x04, 0x08,
782 0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x02, 0x03, 0x03, 0x02, 0x04, 0x05, 0x08, 0x10,
783 0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x01, 0x01, 0x01, 0x03, 0x03, 0x03, 0x05, 0x09,
784 0x02, 0x03, 0x03, 0x02, 0x03, 0x03, 0x02, 0x06, 0x04, 0x05, 0x05, 0x06, 0x08, 0x09, 0x10, 0x20,
785 0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x01, 0x01, 0x01, 0x03, 0x03, 0x03, 0x05, 0x09,
786 0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x03, 0x03, 0x03, 0x03, 0x05, 0x05, 0x09, 0x11,
787 0x02, 0x03, 0x03, 0x02, 0x03, 0x03, 0x02, 0x06, 0x03, 0x03, 0x03, 0x03, 0x02, 0x03, 0x06, 0x0A,
788 0x04, 0x05, 0x05, 0x06, 0x05, 0x05, 0x06, 0x04, 0x08, 0x09, 0x09, 0x0A, 0x10, 0x11, 0x20, 0x40,
789 0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x01, 0x01, 0x01, 0x03, 0x03, 0x03, 0x05, 0x09,
790 0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x03, 0x03, 0x03, 0x03, 0x05, 0x05, 0x09, 0x11,
791 0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x01, 0x01, 0x01, 0x03, 0x03, 0x03, 0x05, 0x09,
792 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x07, 0x05, 0x05, 0x05, 0x07, 0x09, 0x09, 0x11, 0x21,
793 0x02, 0x03, 0x03, 0x02, 0x03, 0x03, 0x02, 0x06, 0x03, 0x03, 0x03, 0x03, 0x02, 0x03, 0x06, 0x0A,
794 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x07, 0x02, 0x03, 0x03, 0x02, 0x06, 0x07, 0x0A, 0x12,
795 0x04, 0x05, 0x05, 0x06, 0x05, 0x05, 0x06, 0x04, 0x05, 0x05, 0x05, 0x07, 0x06, 0x07, 0x04, 0x0C,
796 0x08, 0x09, 0x09, 0x0A, 0x09, 0x09, 0x0A, 0x0C, 0x10, 0x11, 0x11, 0x12, 0x20, 0x21, 0x40, 0x80,
799 static unsigned char ufs_fragtable_other[] = {
800 0x00, 0x16, 0x16, 0x2A, 0x16, 0x16, 0x26, 0x4E, 0x16, 0x16, 0x16, 0x3E, 0x2A, 0x3E, 0x4E, 0x8A,
801 0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
802 0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
803 0x2A, 0x3E, 0x3E, 0x2A, 0x3E, 0x3E, 0x2E, 0x6E, 0x3E, 0x3E, 0x3E, 0x3E, 0x2A, 0x3E, 0x6E, 0xAA,
804 0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
805 0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
806 0x26, 0x36, 0x36, 0x2E, 0x36, 0x36, 0x26, 0x6E, 0x36, 0x36, 0x36, 0x3E, 0x2E, 0x3E, 0x6E, 0xAE,
807 0x4E, 0x5E, 0x5E, 0x6E, 0x5E, 0x5E, 0x6E, 0x4E, 0x5E, 0x5E, 0x5E, 0x7E, 0x6E, 0x7E, 0x4E, 0xCE,
808 0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
809 0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
810 0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
811 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x7E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x7E, 0xBE,
812 0x2A, 0x3E, 0x3E, 0x2A, 0x3E, 0x3E, 0x2E, 0x6E, 0x3E, 0x3E, 0x3E, 0x3E, 0x2A, 0x3E, 0x6E, 0xAA,
813 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x7E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x7E, 0xBE,
814 0x4E, 0x5E, 0x5E, 0x6E, 0x5E, 0x5E, 0x6E, 0x4E, 0x5E, 0x5E, 0x5E, 0x7E, 0x6E, 0x7E, 0x4E, 0xCE,
815 0x8A, 0x9E, 0x9E, 0xAA, 0x9E, 0x9E, 0xAE, 0xCE, 0x9E, 0x9E, 0x9E, 0xBE, 0xAA, 0xBE, 0xCE, 0x8A,