2 * linux/fs/hfs/extent.c
4 * Copyright (C) 1995-1997 Paul H. Hargrove
5 * (C) 2003 Ardis Technologies <roman@ardistech.com>
6 * This file may be distributed under the terms of the GNU General Public License.
8 * This file contains the functions related to the extents B-tree.
11 #include <linux/pagemap.h>
16 /*================ File-local functions ================*/
21 static void hfs_ext_build_key(hfs_btree_key *key, u32 cnid, u16 block, u8 type)
24 key->ext.FkType = type;
25 key->ext.FNum = cpu_to_be32(cnid);
26 key->ext.FABN = cpu_to_be16(block);
33 * This is the comparison function used for the extents B-tree. In
34 * comparing extent B-tree entries, the file id is the most
35 * significant field (compared as unsigned ints); the fork type is
36 * the second most significant field (compared as unsigned chars);
37 * and the allocation block number field is the least significant
38 * (compared as unsigned ints).
40 * struct hfs_ext_key *key1: pointer to the first key to compare
41 * struct hfs_ext_key *key2: pointer to the second key to compare
45 * int: negative if key1<key2, positive if key1>key2, and 0 if key1==key2
47 * key1 and key2 point to "valid" (struct hfs_ext_key)s.
49 * This function has no side-effects */
50 int hfs_ext_keycmp(const btree_key *key1, const btree_key *key2)
55 tmp = be32_to_cpu(key1->ext.FNum) - be32_to_cpu(key2->ext.FNum);
59 tmp = (unsigned char)key1->ext.FkType - (unsigned char)key2->ext.FkType;
63 retval = (int)(be16_to_cpu(key1->ext.FABN)
64 - be16_to_cpu(key2->ext.FABN));
73 * Find a block within an extent record
75 static u16 hfs_ext_find_block(struct hfs_extent *ext, u16 off)
80 for (i = 0; i < 3; ext++, i++) {
81 count = be16_to_cpu(ext->count);
83 return be16_to_cpu(ext->block) + off;
90 static int hfs_ext_block_count(struct hfs_extent *ext)
95 for (i = 0; i < 3; ext++, i++)
96 count += be16_to_cpu(ext->count);
100 static u16 hfs_ext_lastblock(struct hfs_extent *ext)
105 for (i = 0; i < 2; ext--, i++)
108 return be16_to_cpu(ext->block) + be16_to_cpu(ext->count);
111 static void __hfs_ext_write_extent(struct inode *inode, struct hfs_find_data *fd)
115 hfs_ext_build_key(fd->search_key, inode->i_ino, HFS_I(inode)->cached_start,
116 HFS_IS_RSRC(inode) ? HFS_FK_RSRC : HFS_FK_DATA);
117 res = hfs_brec_find(fd);
118 if (HFS_I(inode)->flags & HFS_FLG_EXT_NEW) {
121 hfs_brec_insert(fd, HFS_I(inode)->cached_extents, sizeof(hfs_extent_rec));
122 HFS_I(inode)->flags &= ~(HFS_FLG_EXT_DIRTY|HFS_FLG_EXT_NEW);
126 hfs_bnode_write(fd->bnode, HFS_I(inode)->cached_extents, fd->entryoffset, fd->entrylength);
127 HFS_I(inode)->flags &= ~HFS_FLG_EXT_DIRTY;
131 void hfs_ext_write_extent(struct inode *inode)
133 struct hfs_find_data fd;
135 if (HFS_I(inode)->flags & HFS_FLG_EXT_DIRTY) {
136 hfs_find_init(HFS_SB(inode->i_sb)->ext_tree, &fd);
137 __hfs_ext_write_extent(inode, &fd);
142 static inline int __hfs_ext_read_extent(struct hfs_find_data *fd, struct hfs_extent *extent,
143 u32 cnid, u32 block, u8 type)
147 hfs_ext_build_key(fd->search_key, cnid, block, type);
148 fd->key->ext.FNum = 0;
149 res = hfs_brec_find(fd);
150 if (res && res != -ENOENT)
152 if (fd->key->ext.FNum != fd->search_key->ext.FNum ||
153 fd->key->ext.FkType != fd->search_key->ext.FkType)
155 if (fd->entrylength != sizeof(hfs_extent_rec))
157 hfs_bnode_read(fd->bnode, extent, fd->entryoffset, sizeof(hfs_extent_rec));
161 static inline int __hfs_ext_cache_extent(struct hfs_find_data *fd, struct inode *inode, u32 block)
165 if (HFS_I(inode)->flags & HFS_FLG_EXT_DIRTY)
166 __hfs_ext_write_extent(inode, fd);
168 res = __hfs_ext_read_extent(fd, HFS_I(inode)->cached_extents, inode->i_ino,
169 block, HFS_IS_RSRC(inode) ? HFS_FK_RSRC : HFS_FK_DATA);
171 HFS_I(inode)->cached_start = be16_to_cpu(fd->key->ext.FABN);
172 HFS_I(inode)->cached_blocks = hfs_ext_block_count(HFS_I(inode)->cached_extents);
174 HFS_I(inode)->cached_start = HFS_I(inode)->cached_blocks = 0;
175 HFS_I(inode)->flags &= ~(HFS_FLG_EXT_DIRTY|HFS_FLG_EXT_NEW);
180 static int hfs_ext_read_extent(struct inode *inode, u16 block)
182 struct hfs_find_data fd;
185 if (block >= HFS_I(inode)->cached_start &&
186 block < HFS_I(inode)->cached_start + HFS_I(inode)->cached_blocks)
189 hfs_find_init(HFS_SB(inode->i_sb)->ext_tree, &fd);
190 res = __hfs_ext_cache_extent(&fd, inode, block);
195 static void hfs_dump_extent(struct hfs_extent *extent)
199 dprint(DBG_EXTENT, " ");
200 for (i = 0; i < 3; i++)
201 dprint(DBG_EXTENT, " %u:%u", be16_to_cpu(extent[i].block),
202 be16_to_cpu(extent[i].count));
203 dprint(DBG_EXTENT, "\n");
206 static int hfs_add_extent(struct hfs_extent *extent, u16 offset,
207 u16 alloc_block, u16 block_count)
212 hfs_dump_extent(extent);
213 for (i = 0; i < 3; extent++, i++) {
214 count = be16_to_cpu(extent->count);
215 if (offset == count) {
216 start = be16_to_cpu(extent->block);
217 if (alloc_block != start + count) {
221 extent->block = cpu_to_be16(alloc_block);
223 block_count += count;
224 extent->count = cpu_to_be16(block_count);
226 } else if (offset < count)
234 int hfs_free_extents(struct super_block *sb, struct hfs_extent *extent,
235 u16 offset, u16 block_nr)
240 hfs_dump_extent(extent);
241 for (i = 0; i < 3; extent++, i++) {
242 count = be16_to_cpu(extent->count);
245 else if (offset < count)
253 start = be16_to_cpu(extent->block);
254 if (count <= block_nr) {
255 hfs_clear_vbm_bits(sb, start, count);
261 hfs_clear_vbm_bits(sb, start + count, block_nr);
262 extent->count = cpu_to_be16(count);
269 count = be16_to_cpu(extent->count);
273 int hfs_free_fork(struct super_block *sb, struct hfs_cat_file *file, int type)
275 struct hfs_find_data fd;
276 u32 total_blocks, blocks, start;
277 u32 cnid = be32_to_cpu(file->FlNum);
278 struct hfs_extent *extent;
281 if (type == HFS_FK_DATA) {
282 total_blocks = be32_to_cpu(file->PyLen);
283 extent = file->ExtRec;
285 total_blocks = be32_to_cpu(file->RPyLen);
286 extent = file->RExtRec;
288 total_blocks /= HFS_SB(sb)->alloc_blksz;
293 for (i = 0; i < 3; extent++, i++)
294 blocks += be16_to_cpu(extent[i].count);
296 res = hfs_free_extents(sb, extent, blocks, blocks);
299 if (total_blocks == blocks)
302 hfs_find_init(HFS_SB(sb)->ext_tree, &fd);
304 res = __hfs_ext_read_extent(&fd, extent, cnid, total_blocks, type);
307 start = be16_to_cpu(fd.key->ext.FABN);
308 hfs_free_extents(sb, extent, total_blocks - start, total_blocks);
309 hfs_brec_remove(&fd);
310 total_blocks = start;
311 } while (total_blocks > blocks);
320 int hfs_get_block(struct inode *inode, sector_t block,
321 struct buffer_head *bh_result, int create)
323 struct super_block *sb;
328 /* Convert inode block to disk allocation block */
329 ablock = (u32)block / HFS_SB(sb)->fs_div;
331 if (block >= inode->i_blocks) {
332 if (block > inode->i_blocks || !create)
334 if (ablock >= HFS_I(inode)->alloc_blocks) {
335 res = hfs_extend_file(inode);
342 if (ablock < HFS_I(inode)->first_blocks) {
343 dblock = hfs_ext_find_block(HFS_I(inode)->first_extents, ablock);
347 down(&HFS_I(inode)->extents_lock);
348 res = hfs_ext_read_extent(inode, ablock);
350 dblock = hfs_ext_find_block(HFS_I(inode)->cached_extents,
351 ablock - HFS_I(inode)->cached_start);
353 up(&HFS_I(inode)->extents_lock);
356 up(&HFS_I(inode)->extents_lock);
359 map_bh(bh_result, sb, HFS_SB(sb)->fs_start +
360 dblock * HFS_SB(sb)->fs_div +
361 (u32)block % HFS_SB(sb)->fs_div);
364 set_buffer_new(bh_result);
365 HFS_I(inode)->phys_size += sb->s_blocksize;
367 mark_inode_dirty(inode);
372 int hfs_extend_file(struct inode *inode)
374 struct super_block *sb = inode->i_sb;
375 u32 start, len, goal;
378 down(&HFS_I(inode)->extents_lock);
379 if (HFS_I(inode)->alloc_blocks == HFS_I(inode)->first_blocks)
380 goal = hfs_ext_lastblock(HFS_I(inode)->first_extents);
382 res = hfs_ext_read_extent(inode, HFS_I(inode)->alloc_blocks);
385 goal = hfs_ext_lastblock(HFS_I(inode)->cached_extents);
388 len = HFS_I(inode)->clump_blocks;
389 start = hfs_vbm_search_free(sb, goal, &len);
395 dprint(DBG_EXTENT, "extend %lu: %u,%u\n", inode->i_ino, start, len);
396 if (HFS_I(inode)->alloc_blocks == HFS_I(inode)->first_blocks) {
397 if (!HFS_I(inode)->first_blocks) {
398 dprint(DBG_EXTENT, "first extents\n");
400 HFS_I(inode)->first_extents[0].block = cpu_to_be16(start);
401 HFS_I(inode)->first_extents[0].count = cpu_to_be16(len);
404 /* try to append to extents in inode */
405 res = hfs_add_extent(HFS_I(inode)->first_extents,
406 HFS_I(inode)->alloc_blocks,
412 hfs_dump_extent(HFS_I(inode)->first_extents);
413 HFS_I(inode)->first_blocks += len;
416 res = hfs_add_extent(HFS_I(inode)->cached_extents,
417 HFS_I(inode)->alloc_blocks -
418 HFS_I(inode)->cached_start,
421 hfs_dump_extent(HFS_I(inode)->cached_extents);
422 HFS_I(inode)->flags |= HFS_FLG_EXT_DIRTY;
423 HFS_I(inode)->cached_blocks += len;
424 } else if (res == -ENOSPC)
428 up(&HFS_I(inode)->extents_lock);
430 HFS_I(inode)->alloc_blocks += len;
431 mark_inode_dirty(inode);
432 if (inode->i_ino < HFS_FIRSTUSER_CNID)
433 set_bit(HFS_FLG_ALT_MDB_DIRTY, &HFS_SB(sb)->flags);
434 set_bit(HFS_FLG_MDB_DIRTY, &HFS_SB(sb)->flags);
440 dprint(DBG_EXTENT, "insert new extent\n");
441 hfs_ext_write_extent(inode);
443 memset(HFS_I(inode)->cached_extents, 0, sizeof(hfs_extent_rec));
444 HFS_I(inode)->cached_extents[0].block = cpu_to_be16(start);
445 HFS_I(inode)->cached_extents[0].count = cpu_to_be16(len);
446 hfs_dump_extent(HFS_I(inode)->cached_extents);
447 HFS_I(inode)->flags |= HFS_FLG_EXT_DIRTY|HFS_FLG_EXT_NEW;
448 HFS_I(inode)->cached_start = HFS_I(inode)->alloc_blocks;
449 HFS_I(inode)->cached_blocks = len;
455 void hfs_file_truncate(struct inode *inode)
457 struct super_block *sb = inode->i_sb;
458 struct hfs_find_data fd;
459 u16 blk_cnt, alloc_cnt, start;
463 dprint(DBG_INODE, "truncate: %lu, %Lu -> %Lu\n", inode->i_ino,
464 (long long)HFS_I(inode)->phys_size, inode->i_size);
465 if (inode->i_size > HFS_I(inode)->phys_size) {
466 struct address_space *mapping = inode->i_mapping;
470 size = inode->i_size - 1;
471 page = grab_cache_page(mapping, size >> PAGE_CACHE_SHIFT);
474 size &= PAGE_CACHE_SIZE - 1;
476 res = mapping->a_ops->prepare_write(NULL, page, size, size);
478 res = mapping->a_ops->commit_write(NULL, page, size, size);
480 inode->i_size = HFS_I(inode)->phys_size;
482 page_cache_release(page);
483 mark_inode_dirty(inode);
486 size = inode->i_size + HFS_SB(sb)->alloc_blksz - 1;
487 blk_cnt = size / HFS_SB(sb)->alloc_blksz;
488 alloc_cnt = HFS_I(inode)->alloc_blocks;
489 if (blk_cnt == alloc_cnt)
492 down(&HFS_I(inode)->extents_lock);
493 hfs_find_init(HFS_SB(sb)->ext_tree, &fd);
495 if (alloc_cnt == HFS_I(inode)->first_blocks) {
496 hfs_free_extents(sb, HFS_I(inode)->first_extents,
497 alloc_cnt, alloc_cnt - blk_cnt);
498 hfs_dump_extent(HFS_I(inode)->first_extents);
499 HFS_I(inode)->first_blocks = blk_cnt;
502 res = __hfs_ext_cache_extent(&fd, inode, alloc_cnt);
505 start = HFS_I(inode)->cached_start;
506 hfs_free_extents(sb, HFS_I(inode)->cached_extents,
507 alloc_cnt - start, alloc_cnt - blk_cnt);
508 hfs_dump_extent(HFS_I(inode)->cached_extents);
509 if (blk_cnt > start) {
510 HFS_I(inode)->flags |= HFS_FLG_EXT_DIRTY;
514 HFS_I(inode)->cached_start = HFS_I(inode)->cached_blocks = 0;
515 HFS_I(inode)->flags &= ~(HFS_FLG_EXT_DIRTY|HFS_FLG_EXT_NEW);
516 hfs_brec_remove(&fd);
519 up(&HFS_I(inode)->extents_lock);
521 HFS_I(inode)->alloc_blocks = blk_cnt;
523 HFS_I(inode)->phys_size = inode->i_size;
524 mark_inode_dirty(inode);
525 inode->i_blocks = (inode->i_size + sb->s_blocksize - 1) >> sb->s_blocksize_bits;