5 * Inode handling routines for the OSTA-UDF(tm) filesystem.
8 * E-mail regarding any portion of the Linux UDF file system should be
9 * directed to the development team mailing list (run by majordomo):
10 * linux_udf@hpesjro.fc.hp.com
13 * This file is distributed under the terms of the GNU General Public
14 * License (GPL). Copies of the GPL can be obtained from:
15 * ftp://prep.ai.mit.edu/pub/gnu/GPL
16 * Each contributing author retains all rights to their own work.
18 * (C) 1998 Dave Boynton
19 * (C) 1998-2004 Ben Fennema
20 * (C) 1999-2000 Stelias Computing Inc
24 * 10/04/98 dgb Added rudimentary directory functions
25 * 10/07/98 Fully working udf_block_map! It works!
26 * 11/25/98 bmap altered to better support extents
27 * 12/06/98 blf partition support in udf_iget, udf_block_map and udf_read_inode
28 * 12/12/98 rewrote udf_block_map to handle next extents and descs across
29 * block boundaries (which is not actually allowed)
30 * 12/20/98 added support for strategy 4096
31 * 03/07/99 rewrote udf_block_map (again)
32 * New funcs, inode_bmap, udf_next_aext
33 * 04/19/99 Support for writing device EA's for major/minor #
38 #include <linux/smp_lock.h>
39 #include <linux/module.h>
40 #include <linux/pagemap.h>
41 #include <linux/buffer_head.h>
42 #include <linux/writeback.h>
43 #include <linux/slab.h>
48 MODULE_AUTHOR("Ben Fennema");
49 MODULE_DESCRIPTION("Universal Disk Format Filesystem");
50 MODULE_LICENSE("GPL");
52 #define EXTENT_MERGE_SIZE 5
54 static mode_t udf_convert_permissions(struct fileEntry *);
55 static int udf_update_inode(struct inode *, int);
56 static void udf_fill_inode(struct inode *, struct buffer_head *);
57 static struct buffer_head *inode_getblk(struct inode *, long, int *, long *, int *);
58 static void udf_split_extents(struct inode *, int *, int, int,
59 long_ad [EXTENT_MERGE_SIZE], int *);
60 static void udf_prealloc_extents(struct inode *, int, int,
61 long_ad [EXTENT_MERGE_SIZE], int *);
62 static void udf_merge_extents(struct inode *,
63 long_ad [EXTENT_MERGE_SIZE], int *);
64 static void udf_update_extents(struct inode *,
65 long_ad [EXTENT_MERGE_SIZE], int, int,
66 lb_addr, uint32_t, struct buffer_head **);
67 static int udf_get_block(struct inode *, sector_t, struct buffer_head *, int);
75 * This routine is called whenever the kernel no longer needs the inode.
78 * July 1, 1997 - Andrew E. Mileski
79 * Written, tested, and released.
81 * Called at each iput()
83 void udf_put_inode(struct inode * inode)
85 if (!(inode->i_sb->s_flags & MS_RDONLY))
88 udf_discard_prealloc(inode);
97 * Clean-up before the specified inode is destroyed.
100 * This routine is called when the kernel destroys an inode structure
101 * ie. when iput() finds i_count == 0.
104 * July 1, 1997 - Andrew E. Mileski
105 * Written, tested, and released.
107 * Called at the last iput() if i_nlink is zero.
109 void udf_delete_inode(struct inode * inode)
111 if (is_bad_inode(inode))
118 udf_update_inode(inode, IS_SYNC(inode));
119 udf_free_inode(inode);
127 void udf_clear_inode(struct inode *inode)
129 kfree(UDF_I_DATA(inode));
130 UDF_I_DATA(inode) = NULL;
133 static int udf_writepage(struct page *page, struct writeback_control *wbc)
135 return block_write_full_page(page, udf_get_block, wbc);
138 static int udf_readpage(struct file *file, struct page *page)
140 return block_read_full_page(page, udf_get_block);
143 static int udf_prepare_write(struct file *file, struct page *page, unsigned from, unsigned to)
145 return block_prepare_write(page, from, to, udf_get_block);
148 static sector_t udf_bmap(struct address_space *mapping, sector_t block)
150 return generic_block_bmap(mapping,block,udf_get_block);
153 struct address_space_operations udf_aops = {
154 .readpage = udf_readpage,
155 .writepage = udf_writepage,
156 .sync_page = block_sync_page,
157 .prepare_write = udf_prepare_write,
158 .commit_write = generic_commit_write,
162 void udf_expand_file_adinicb(struct inode * inode, int newsize, int * err)
166 struct writeback_control udf_wbc = {
167 .sync_mode = WB_SYNC_NONE,
171 /* from now on we have normal address_space methods */
172 inode->i_data.a_ops = &udf_aops;
174 if (!UDF_I_LENALLOC(inode))
176 if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_USE_SHORT_AD))
177 UDF_I_ALLOCTYPE(inode) = ICBTAG_FLAG_AD_SHORT;
179 UDF_I_ALLOCTYPE(inode) = ICBTAG_FLAG_AD_LONG;
180 mark_inode_dirty(inode);
184 page = grab_cache_page(inode->i_mapping, 0);
185 if (!PageLocked(page))
187 if (!PageUptodate(page))
190 memset(kaddr + UDF_I_LENALLOC(inode), 0x00,
191 PAGE_CACHE_SIZE - UDF_I_LENALLOC(inode));
192 memcpy(kaddr, UDF_I_DATA(inode) + UDF_I_LENEATTR(inode),
193 UDF_I_LENALLOC(inode));
194 flush_dcache_page(page);
195 SetPageUptodate(page);
198 memset(UDF_I_DATA(inode) + UDF_I_LENEATTR(inode), 0x00,
199 UDF_I_LENALLOC(inode));
200 UDF_I_LENALLOC(inode) = 0;
201 if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_USE_SHORT_AD))
202 UDF_I_ALLOCTYPE(inode) = ICBTAG_FLAG_AD_SHORT;
204 UDF_I_ALLOCTYPE(inode) = ICBTAG_FLAG_AD_LONG;
206 inode->i_data.a_ops->writepage(page, &udf_wbc);
207 page_cache_release(page);
209 mark_inode_dirty(inode);
212 struct buffer_head * udf_expand_dir_adinicb(struct inode *inode, int *block, int *err)
215 struct buffer_head *sbh = NULL, *dbh = NULL;
217 uint32_t elen, extoffset;
220 struct udf_fileident_bh sfibh, dfibh;
221 loff_t f_pos = udf_ext0_offset(inode) >> 2;
222 int size = (udf_ext0_offset(inode) + inode->i_size) >> 2;
223 struct fileIdentDesc cfi, *sfi, *dfi;
225 if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_USE_SHORT_AD))
226 alloctype = ICBTAG_FLAG_AD_SHORT;
228 alloctype = ICBTAG_FLAG_AD_LONG;
232 UDF_I_ALLOCTYPE(inode) = alloctype;
233 mark_inode_dirty(inode);
237 /* alloc block, and copy data to it */
238 *block = udf_new_block(inode->i_sb, inode,
239 UDF_I_LOCATION(inode).partitionReferenceNum,
240 UDF_I_LOCATION(inode).logicalBlockNum, err);
244 newblock = udf_get_pblock(inode->i_sb, *block,
245 UDF_I_LOCATION(inode).partitionReferenceNum, 0);
248 dbh = udf_tgetblk(inode->i_sb, newblock);
252 memset(dbh->b_data, 0x00, inode->i_sb->s_blocksize);
253 set_buffer_uptodate(dbh);
255 mark_buffer_dirty_inode(dbh, inode);
257 sfibh.soffset = sfibh.eoffset = (f_pos & ((inode->i_sb->s_blocksize - 1) >> 2)) << 2;
258 sbh = sfibh.sbh = sfibh.ebh = NULL;
259 dfibh.soffset = dfibh.eoffset = 0;
260 dfibh.sbh = dfibh.ebh = dbh;
261 while ( (f_pos < size) )
263 UDF_I_ALLOCTYPE(inode) = ICBTAG_FLAG_AD_IN_ICB;
264 sfi = udf_fileident_read(inode, &f_pos, &sfibh, &cfi, NULL, NULL, NULL, NULL, NULL, NULL);
267 udf_release_data(dbh);
270 UDF_I_ALLOCTYPE(inode) = alloctype;
271 sfi->descTag.tagLocation = *block;
272 dfibh.soffset = dfibh.eoffset;
273 dfibh.eoffset += (sfibh.eoffset - sfibh.soffset);
274 dfi = (struct fileIdentDesc *)(dbh->b_data + dfibh.soffset);
275 if (udf_write_fi(inode, sfi, dfi, &dfibh, sfi->impUse,
276 sfi->fileIdent + sfi->lengthOfImpUse))
278 UDF_I_ALLOCTYPE(inode) = ICBTAG_FLAG_AD_IN_ICB;
279 udf_release_data(dbh);
283 mark_buffer_dirty_inode(dbh, inode);
285 memset(UDF_I_DATA(inode) + UDF_I_LENEATTR(inode), 0, UDF_I_LENALLOC(inode));
286 UDF_I_LENALLOC(inode) = 0;
287 bloc = UDF_I_LOCATION(inode);
288 eloc.logicalBlockNum = *block;
289 eloc.partitionReferenceNum = UDF_I_LOCATION(inode).partitionReferenceNum;
290 elen = inode->i_size;
291 UDF_I_LENEXTENTS(inode) = elen;
292 extoffset = udf_file_entry_alloc_offset(inode);
293 udf_add_aext(inode, &bloc, &extoffset, eloc, elen, &sbh, 0);
296 udf_release_data(sbh);
297 mark_inode_dirty(inode);
301 static int udf_get_block(struct inode *inode, sector_t block, struct buffer_head *bh_result, int create)
304 struct buffer_head *bh;
309 phys = udf_block_map(inode, block);
311 map_bh(bh_result, inode->i_sb, phys);
324 if (block == UDF_I_NEXT_ALLOC_BLOCK(inode) + 1)
326 UDF_I_NEXT_ALLOC_BLOCK(inode) ++;
327 UDF_I_NEXT_ALLOC_GOAL(inode) ++;
332 bh = inode_getblk(inode, block, &err, &phys, &new);
341 set_buffer_new(bh_result);
342 map_bh(bh_result, inode->i_sb, phys);
348 udf_warning(inode->i_sb, "udf_get_block", "block < 0");
352 struct buffer_head * udf_getblk(struct inode * inode, long block,
353 int create, int * err)
355 struct buffer_head dummy;
358 dummy.b_blocknr = -1000;
359 *err = udf_get_block(inode, block, &dummy, create);
360 if (!*err && buffer_mapped(&dummy))
362 struct buffer_head *bh;
363 bh = sb_getblk(inode->i_sb, dummy.b_blocknr);
364 if (buffer_new(&dummy))
367 memset(bh->b_data, 0x00, inode->i_sb->s_blocksize);
368 set_buffer_uptodate(bh);
370 mark_buffer_dirty_inode(bh, inode);
377 static struct buffer_head * inode_getblk(struct inode * inode, long block,
378 int *err, long *phys, int *new)
380 struct buffer_head *pbh = NULL, *cbh = NULL, *nbh = NULL, *result = NULL;
381 long_ad laarr[EXTENT_MERGE_SIZE];
382 uint32_t pextoffset = 0, cextoffset = 0, nextoffset = 0;
383 int count = 0, startnum = 0, endnum = 0;
385 lb_addr eloc, pbloc, cbloc, nbloc;
387 uint64_t lbcount = 0, b_off = 0;
388 uint32_t newblocknum, newblock, offset = 0;
390 int goal = 0, pgoal = UDF_I_LOCATION(inode).logicalBlockNum;
393 pextoffset = cextoffset = nextoffset = udf_file_entry_alloc_offset(inode);
394 b_off = (uint64_t)block << inode->i_sb->s_blocksize_bits;
395 pbloc = cbloc = nbloc = UDF_I_LOCATION(inode);
397 /* find the extent which contains the block we are looking for.
398 alternate between laarr[0] and laarr[1] for locations of the
399 current extent, and the previous extent */
404 udf_release_data(pbh);
405 atomic_inc(&cbh->b_count);
410 udf_release_data(cbh);
411 atomic_inc(&nbh->b_count);
420 pextoffset = cextoffset;
421 cextoffset = nextoffset;
423 if ((etype = udf_next_aext(inode, &nbloc, &nextoffset, &eloc, &elen, &nbh, 1)) == -1)
428 laarr[c].extLength = (etype << 30) | elen;
429 laarr[c].extLocation = eloc;
431 if (etype != (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30))
432 pgoal = eloc.logicalBlockNum +
433 ((elen + inode->i_sb->s_blocksize - 1) >>
434 inode->i_sb->s_blocksize_bits);
437 } while (lbcount + elen <= b_off);
440 offset = b_off >> inode->i_sb->s_blocksize_bits;
442 /* if the extent is allocated and recorded, return the block
443 if the extent is not a multiple of the blocksize, round up */
445 if (etype == (EXT_RECORDED_ALLOCATED >> 30))
447 if (elen & (inode->i_sb->s_blocksize - 1))
449 elen = EXT_RECORDED_ALLOCATED |
450 ((elen + inode->i_sb->s_blocksize - 1) &
451 ~(inode->i_sb->s_blocksize - 1));
452 etype = udf_write_aext(inode, nbloc, &cextoffset, eloc, elen, nbh, 1);
454 udf_release_data(pbh);
455 udf_release_data(cbh);
456 udf_release_data(nbh);
457 newblock = udf_get_lb_pblock(inode->i_sb, eloc, offset);
464 endnum = startnum = ((count > 1) ? 1 : count);
465 if (laarr[c].extLength & (inode->i_sb->s_blocksize - 1))
468 (laarr[c].extLength & UDF_EXTENT_FLAG_MASK) |
469 (((laarr[c].extLength & UDF_EXTENT_LENGTH_MASK) +
470 inode->i_sb->s_blocksize - 1) &
471 ~(inode->i_sb->s_blocksize - 1));
472 UDF_I_LENEXTENTS(inode) =
473 (UDF_I_LENEXTENTS(inode) + inode->i_sb->s_blocksize - 1) &
474 ~(inode->i_sb->s_blocksize - 1);
477 laarr[c].extLength = EXT_NOT_RECORDED_NOT_ALLOCATED |
478 ((offset + 1) << inode->i_sb->s_blocksize_bits);
479 memset(&laarr[c].extLocation, 0x00, sizeof(lb_addr));
485 endnum = startnum = ((count > 2) ? 2 : count);
487 /* if the current extent is in position 0, swap it with the previous */
488 if (!c && count != 1)
496 /* if the current block is located in a extent, read the next extent */
499 if ((etype = udf_next_aext(inode, &nbloc, &nextoffset, &eloc, &elen, &nbh, 0)) != -1)
501 laarr[c+1].extLength = (etype << 30) | elen;
502 laarr[c+1].extLocation = eloc;
510 udf_release_data(cbh);
511 udf_release_data(nbh);
513 /* if the current extent is not recorded but allocated, get the
514 block in the extent corresponding to the requested block */
515 if ((laarr[c].extLength >> 30) == (EXT_NOT_RECORDED_ALLOCATED >> 30))
516 newblocknum = laarr[c].extLocation.logicalBlockNum + offset;
517 else /* otherwise, allocate a new block */
519 if (UDF_I_NEXT_ALLOC_BLOCK(inode) == block)
520 goal = UDF_I_NEXT_ALLOC_GOAL(inode);
525 goal = UDF_I_LOCATION(inode).logicalBlockNum + 1;
528 if (!(newblocknum = udf_new_block(inode->i_sb, inode,
529 UDF_I_LOCATION(inode).partitionReferenceNum, goal, err)))
531 udf_release_data(pbh);
535 UDF_I_LENEXTENTS(inode) += inode->i_sb->s_blocksize;
538 /* if the extent the requsted block is located in contains multiple blocks,
539 split the extent into at most three extents. blocks prior to requested
540 block, requested block, and blocks after requested block */
541 udf_split_extents(inode, &c, offset, newblocknum, laarr, &endnum);
543 #ifdef UDF_PREALLOCATE
544 /* preallocate blocks */
545 udf_prealloc_extents(inode, c, lastblock, laarr, &endnum);
548 /* merge any continuous blocks in laarr */
549 udf_merge_extents(inode, laarr, &endnum);
551 /* write back the new extents, inserting new extents if the new number
552 of extents is greater than the old number, and deleting extents if
553 the new number of extents is less than the old number */
554 udf_update_extents(inode, laarr, startnum, endnum, pbloc, pextoffset, &pbh);
556 udf_release_data(pbh);
558 if (!(newblock = udf_get_pblock(inode->i_sb, newblocknum,
559 UDF_I_LOCATION(inode).partitionReferenceNum, 0)))
566 UDF_I_NEXT_ALLOC_BLOCK(inode) = block;
567 UDF_I_NEXT_ALLOC_GOAL(inode) = newblocknum;
568 inode->i_ctime = CURRENT_TIME;
571 udf_sync_inode(inode);
573 mark_inode_dirty(inode);
577 static void udf_split_extents(struct inode *inode, int *c, int offset, int newblocknum,
578 long_ad laarr[EXTENT_MERGE_SIZE], int *endnum)
580 if ((laarr[*c].extLength >> 30) == (EXT_NOT_RECORDED_ALLOCATED >> 30) ||
581 (laarr[*c].extLength >> 30) == (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30))
584 int blen = ((laarr[curr].extLength & UDF_EXTENT_LENGTH_MASK) +
585 inode->i_sb->s_blocksize - 1) >> inode->i_sb->s_blocksize_bits;
586 int8_t etype = (laarr[curr].extLength >> 30);
590 else if (!offset || blen == offset + 1)
592 laarr[curr+2] = laarr[curr+1];
593 laarr[curr+1] = laarr[curr];
597 laarr[curr+3] = laarr[curr+1];
598 laarr[curr+2] = laarr[curr+1] = laarr[curr];
603 if (etype == (EXT_NOT_RECORDED_ALLOCATED >> 30))
605 udf_free_blocks(inode->i_sb, inode, laarr[curr].extLocation, 0, offset);
606 laarr[curr].extLength = EXT_NOT_RECORDED_NOT_ALLOCATED |
607 (offset << inode->i_sb->s_blocksize_bits);
608 laarr[curr].extLocation.logicalBlockNum = 0;
609 laarr[curr].extLocation.partitionReferenceNum = 0;
612 laarr[curr].extLength = (etype << 30) |
613 (offset << inode->i_sb->s_blocksize_bits);
619 laarr[curr].extLocation.logicalBlockNum = newblocknum;
620 if (etype == (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30))
621 laarr[curr].extLocation.partitionReferenceNum =
622 UDF_I_LOCATION(inode).partitionReferenceNum;
623 laarr[curr].extLength = EXT_RECORDED_ALLOCATED |
624 inode->i_sb->s_blocksize;
627 if (blen != offset + 1)
629 if (etype == (EXT_NOT_RECORDED_ALLOCATED >> 30))
630 laarr[curr].extLocation.logicalBlockNum += (offset + 1);
631 laarr[curr].extLength = (etype << 30) |
632 ((blen - (offset + 1)) << inode->i_sb->s_blocksize_bits);
639 static void udf_prealloc_extents(struct inode *inode, int c, int lastblock,
640 long_ad laarr[EXTENT_MERGE_SIZE], int *endnum)
642 int start, length = 0, currlength = 0, i;
644 if (*endnum >= (c+1))
653 if ((laarr[c+1].extLength >> 30) == (EXT_NOT_RECORDED_ALLOCATED >> 30))
656 length = currlength = (((laarr[c+1].extLength & UDF_EXTENT_LENGTH_MASK) +
657 inode->i_sb->s_blocksize - 1) >> inode->i_sb->s_blocksize_bits);
663 for (i=start+1; i<=*endnum; i++)
668 length += UDF_DEFAULT_PREALLOC_BLOCKS;
670 else if ((laarr[i].extLength >> 30) == (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30))
671 length += (((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) +
672 inode->i_sb->s_blocksize - 1) >> inode->i_sb->s_blocksize_bits);
679 int next = laarr[start].extLocation.logicalBlockNum +
680 (((laarr[start].extLength & UDF_EXTENT_LENGTH_MASK) +
681 inode->i_sb->s_blocksize - 1) >> inode->i_sb->s_blocksize_bits);
682 int numalloc = udf_prealloc_blocks(inode->i_sb, inode,
683 laarr[start].extLocation.partitionReferenceNum,
684 next, (UDF_DEFAULT_PREALLOC_BLOCKS > length ? length :
685 UDF_DEFAULT_PREALLOC_BLOCKS) - currlength);
690 laarr[start].extLength +=
691 (numalloc << inode->i_sb->s_blocksize_bits);
694 memmove(&laarr[c+2], &laarr[c+1],
695 sizeof(long_ad) * (*endnum - (c+1)));
697 laarr[c+1].extLocation.logicalBlockNum = next;
698 laarr[c+1].extLocation.partitionReferenceNum =
699 laarr[c].extLocation.partitionReferenceNum;
700 laarr[c+1].extLength = EXT_NOT_RECORDED_ALLOCATED |
701 (numalloc << inode->i_sb->s_blocksize_bits);
705 for (i=start+1; numalloc && i<*endnum; i++)
707 int elen = ((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) +
708 inode->i_sb->s_blocksize - 1) >> inode->i_sb->s_blocksize_bits;
712 laarr[i].extLength -=
713 (numalloc << inode->i_sb->s_blocksize_bits);
720 memmove(&laarr[i], &laarr[i+1],
721 sizeof(long_ad) * (*endnum - (i+1)));
726 UDF_I_LENEXTENTS(inode) += numalloc << inode->i_sb->s_blocksize_bits;
731 static void udf_merge_extents(struct inode *inode,
732 long_ad laarr[EXTENT_MERGE_SIZE], int *endnum)
736 for (i=0; i<(*endnum-1); i++)
738 if ((laarr[i].extLength >> 30) == (laarr[i+1].extLength >> 30))
740 if (((laarr[i].extLength >> 30) == (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30)) ||
741 ((laarr[i+1].extLocation.logicalBlockNum - laarr[i].extLocation.logicalBlockNum) ==
742 (((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) +
743 inode->i_sb->s_blocksize - 1) >> inode->i_sb->s_blocksize_bits)))
745 if (((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) +
746 (laarr[i+1].extLength & UDF_EXTENT_LENGTH_MASK) +
747 inode->i_sb->s_blocksize - 1) & ~UDF_EXTENT_LENGTH_MASK)
749 laarr[i+1].extLength = (laarr[i+1].extLength -
750 (laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) +
751 UDF_EXTENT_LENGTH_MASK) & ~(inode->i_sb->s_blocksize-1);
752 laarr[i].extLength = (laarr[i].extLength & UDF_EXTENT_FLAG_MASK) +
753 (UDF_EXTENT_LENGTH_MASK + 1) - inode->i_sb->s_blocksize;
754 laarr[i+1].extLocation.logicalBlockNum =
755 laarr[i].extLocation.logicalBlockNum +
756 ((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) >>
757 inode->i_sb->s_blocksize_bits);
761 laarr[i].extLength = laarr[i+1].extLength +
762 (((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) +
763 inode->i_sb->s_blocksize - 1) & ~(inode->i_sb->s_blocksize-1));
765 memmove(&laarr[i+1], &laarr[i+2],
766 sizeof(long_ad) * (*endnum - (i+2)));
772 else if (((laarr[i].extLength >> 30) == (EXT_NOT_RECORDED_ALLOCATED >> 30)) &&
773 ((laarr[i+1].extLength >> 30) == (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30)))
775 udf_free_blocks(inode->i_sb, inode, laarr[i].extLocation, 0,
776 ((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) +
777 inode->i_sb->s_blocksize - 1) >> inode->i_sb->s_blocksize_bits);
778 laarr[i].extLocation.logicalBlockNum = 0;
779 laarr[i].extLocation.partitionReferenceNum = 0;
781 if (((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) +
782 (laarr[i+1].extLength & UDF_EXTENT_LENGTH_MASK) +
783 inode->i_sb->s_blocksize - 1) & ~UDF_EXTENT_LENGTH_MASK)
785 laarr[i+1].extLength = (laarr[i+1].extLength -
786 (laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) +
787 UDF_EXTENT_LENGTH_MASK) & ~(inode->i_sb->s_blocksize-1);
788 laarr[i].extLength = (laarr[i].extLength & UDF_EXTENT_FLAG_MASK) +
789 (UDF_EXTENT_LENGTH_MASK + 1) - inode->i_sb->s_blocksize;
793 laarr[i].extLength = laarr[i+1].extLength +
794 (((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) +
795 inode->i_sb->s_blocksize - 1) & ~(inode->i_sb->s_blocksize-1));
797 memmove(&laarr[i+1], &laarr[i+2],
798 sizeof(long_ad) * (*endnum - (i+2)));
803 else if ((laarr[i].extLength >> 30) == (EXT_NOT_RECORDED_ALLOCATED >> 30))
805 udf_free_blocks(inode->i_sb, inode, laarr[i].extLocation, 0,
806 ((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) +
807 inode->i_sb->s_blocksize - 1) >> inode->i_sb->s_blocksize_bits);
808 laarr[i].extLocation.logicalBlockNum = 0;
809 laarr[i].extLocation.partitionReferenceNum = 0;
810 laarr[i].extLength = (laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) |
811 EXT_NOT_RECORDED_NOT_ALLOCATED;
816 static void udf_update_extents(struct inode *inode,
817 long_ad laarr[EXTENT_MERGE_SIZE], int startnum, int endnum,
818 lb_addr pbloc, uint32_t pextoffset, struct buffer_head **pbh)
824 if (startnum > endnum)
826 for (i=0; i<(startnum-endnum); i++)
828 udf_delete_aext(inode, pbloc, pextoffset, laarr[i].extLocation,
829 laarr[i].extLength, *pbh);
832 else if (startnum < endnum)
834 for (i=0; i<(endnum-startnum); i++)
836 udf_insert_aext(inode, pbloc, pextoffset, laarr[i].extLocation,
837 laarr[i].extLength, *pbh);
838 udf_next_aext(inode, &pbloc, &pextoffset, &laarr[i].extLocation,
839 &laarr[i].extLength, pbh, 1);
844 for (i=start; i<endnum; i++)
846 udf_next_aext(inode, &pbloc, &pextoffset, &tmploc, &tmplen, pbh, 0);
847 udf_write_aext(inode, pbloc, &pextoffset, laarr[i].extLocation,
848 laarr[i].extLength, *pbh, 1);
852 struct buffer_head * udf_bread(struct inode * inode, int block,
853 int create, int * err)
855 struct buffer_head * bh = NULL;
857 bh = udf_getblk(inode, block, create, err);
861 if (buffer_uptodate(bh))
863 ll_rw_block(READ, 1, &bh);
865 if (buffer_uptodate(bh))
872 void udf_truncate(struct inode * inode)
877 if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
878 S_ISLNK(inode->i_mode)))
880 if (IS_APPEND(inode) || IS_IMMUTABLE(inode))
884 if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_IN_ICB)
886 if (inode->i_sb->s_blocksize < (udf_file_entry_alloc_offset(inode) +
889 udf_expand_file_adinicb(inode, inode->i_size, &err);
890 if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_IN_ICB)
892 inode->i_size = UDF_I_LENALLOC(inode);
897 udf_truncate_extents(inode);
901 offset = inode->i_size & (inode->i_sb->s_blocksize - 1);
902 memset(UDF_I_DATA(inode) + UDF_I_LENEATTR(inode) + offset, 0x00, inode->i_sb->s_blocksize - offset - udf_file_entry_alloc_offset(inode));
903 UDF_I_LENALLOC(inode) = inode->i_size;
908 block_truncate_page(inode->i_mapping, inode->i_size, udf_get_block);
909 udf_truncate_extents(inode);
912 inode->i_mtime = inode->i_ctime = CURRENT_TIME;
914 udf_sync_inode (inode);
916 mark_inode_dirty(inode);
927 * This routine is called by iget() [which is called by udf_iget()]
928 * (clean_inode() will have been called first)
929 * when an inode is first read into memory.
932 * July 1, 1997 - Andrew E. Mileski
933 * Written, tested, and released.
935 * 12/19/98 dgb Updated to fix size problems.
939 udf_read_inode(struct inode *inode)
941 memset(&UDF_I_LOCATION(inode), 0xFF, sizeof(lb_addr));
945 __udf_read_inode(struct inode *inode)
947 struct buffer_head *bh = NULL;
948 struct fileEntry *fe;
952 * Set defaults, but the inode is still incomplete!
953 * Note: get_new_inode() sets the following on a new inode:
956 * i_flags = sb->s_flags
958 * clean_inode(): zero fills and sets
963 inode->i_blksize = PAGE_SIZE;
965 bh = udf_read_ptagged(inode->i_sb, UDF_I_LOCATION(inode), 0, &ident);
969 printk(KERN_ERR "udf: udf_read_inode(ino %ld) failed !bh\n",
971 make_bad_inode(inode);
975 if (ident != TAG_IDENT_FE && ident != TAG_IDENT_EFE &&
976 ident != TAG_IDENT_USE)
978 printk(KERN_ERR "udf: udf_read_inode(ino %ld) failed ident=%d\n",
979 inode->i_ino, ident);
980 udf_release_data(bh);
981 make_bad_inode(inode);
985 fe = (struct fileEntry *)bh->b_data;
987 if (le16_to_cpu(fe->icbTag.strategyType) == 4096)
989 struct buffer_head *ibh = NULL, *nbh = NULL;
990 struct indirectEntry *ie;
992 ibh = udf_read_ptagged(inode->i_sb, UDF_I_LOCATION(inode), 1, &ident);
993 if (ident == TAG_IDENT_IE)
998 ie = (struct indirectEntry *)ibh->b_data;
1000 loc = lelb_to_cpu(ie->indirectICB.extLocation);
1002 if (ie->indirectICB.extLength &&
1003 (nbh = udf_read_ptagged(inode->i_sb, loc, 0, &ident)))
1005 if (ident == TAG_IDENT_FE ||
1006 ident == TAG_IDENT_EFE)
1008 memcpy(&UDF_I_LOCATION(inode), &loc, sizeof(lb_addr));
1009 udf_release_data(bh);
1010 udf_release_data(ibh);
1011 udf_release_data(nbh);
1012 __udf_read_inode(inode);
1017 udf_release_data(nbh);
1018 udf_release_data(ibh);
1022 udf_release_data(ibh);
1026 udf_release_data(ibh);
1028 else if (le16_to_cpu(fe->icbTag.strategyType) != 4)
1030 printk(KERN_ERR "udf: unsupported strategy type: %d\n",
1031 le16_to_cpu(fe->icbTag.strategyType));
1032 udf_release_data(bh);
1033 make_bad_inode(inode);
1036 udf_fill_inode(inode, bh);
1037 udf_release_data(bh);
1040 static void udf_fill_inode(struct inode *inode, struct buffer_head *bh)
1042 struct fileEntry *fe;
1043 struct extendedFileEntry *efe;
1048 fe = (struct fileEntry *)bh->b_data;
1049 efe = (struct extendedFileEntry *)bh->b_data;
1051 if (le16_to_cpu(fe->icbTag.strategyType) == 4)
1052 UDF_I_STRAT4096(inode) = 0;
1053 else /* if (le16_to_cpu(fe->icbTag.strategyType) == 4096) */
1054 UDF_I_STRAT4096(inode) = 1;
1056 UDF_I_ALLOCTYPE(inode) = le16_to_cpu(fe->icbTag.flags) & ICBTAG_FLAG_AD_MASK;
1057 UDF_I_UNIQUE(inode) = 0;
1058 UDF_I_LENEATTR(inode) = 0;
1059 UDF_I_LENEXTENTS(inode) = 0;
1060 UDF_I_LENALLOC(inode) = 0;
1061 UDF_I_NEXT_ALLOC_BLOCK(inode) = 0;
1062 UDF_I_NEXT_ALLOC_GOAL(inode) = 0;
1063 if (le16_to_cpu(fe->descTag.tagIdent) == TAG_IDENT_EFE)
1065 UDF_I_EFE(inode) = 1;
1066 UDF_I_USE(inode) = 0;
1067 UDF_I_DATA(inode) = kmalloc(inode->i_sb->s_blocksize - sizeof(struct extendedFileEntry), GFP_KERNEL);
1068 memcpy(UDF_I_DATA(inode), bh->b_data + sizeof(struct extendedFileEntry), inode->i_sb->s_blocksize - sizeof(struct extendedFileEntry));
1070 else if (le16_to_cpu(fe->descTag.tagIdent) == TAG_IDENT_FE)
1072 UDF_I_EFE(inode) = 0;
1073 UDF_I_USE(inode) = 0;
1074 UDF_I_DATA(inode) = kmalloc(inode->i_sb->s_blocksize - sizeof(struct fileEntry), GFP_KERNEL);
1075 memcpy(UDF_I_DATA(inode), bh->b_data + sizeof(struct fileEntry), inode->i_sb->s_blocksize - sizeof(struct fileEntry));
1077 else if (le16_to_cpu(fe->descTag.tagIdent) == TAG_IDENT_USE)
1079 UDF_I_EFE(inode) = 0;
1080 UDF_I_USE(inode) = 1;
1081 UDF_I_LENALLOC(inode) =
1083 ((struct unallocSpaceEntry *)bh->b_data)->lengthAllocDescs);
1084 UDF_I_DATA(inode) = kmalloc(inode->i_sb->s_blocksize - sizeof(struct unallocSpaceEntry), GFP_KERNEL);
1085 memcpy(UDF_I_DATA(inode), bh->b_data + sizeof(struct unallocSpaceEntry), inode->i_sb->s_blocksize - sizeof(struct unallocSpaceEntry));
1089 inode->i_uid = le32_to_cpu(fe->uid);
1090 if ( inode->i_uid == -1 ) inode->i_uid = UDF_SB(inode->i_sb)->s_uid;
1092 inode->i_gid = le32_to_cpu(fe->gid);
1093 if ( inode->i_gid == -1 ) inode->i_gid = UDF_SB(inode->i_sb)->s_gid;
1095 inode->i_nlink = le16_to_cpu(fe->fileLinkCount);
1096 if (!inode->i_nlink)
1099 inode->i_size = le64_to_cpu(fe->informationLength);
1100 UDF_I_LENEXTENTS(inode) = inode->i_size;
1102 inode->i_mode = udf_convert_permissions(fe);
1103 inode->i_mode &= ~UDF_SB(inode->i_sb)->s_umask;
1105 if (UDF_I_EFE(inode) == 0)
1107 inode->i_blocks = le64_to_cpu(fe->logicalBlocksRecorded) <<
1108 (inode->i_sb->s_blocksize_bits - 9);
1110 if ( udf_stamp_to_time(&convtime, &convtime_usec,
1111 lets_to_cpu(fe->accessTime)) )
1113 inode->i_atime.tv_sec = convtime;
1114 inode->i_atime.tv_nsec = convtime_usec * 1000;
1118 inode->i_atime = UDF_SB_RECORDTIME(inode->i_sb);
1121 if ( udf_stamp_to_time(&convtime, &convtime_usec,
1122 lets_to_cpu(fe->modificationTime)) )
1124 inode->i_mtime.tv_sec = convtime;
1125 inode->i_mtime.tv_nsec = convtime_usec * 1000;
1129 inode->i_mtime = UDF_SB_RECORDTIME(inode->i_sb);
1132 if ( udf_stamp_to_time(&convtime, &convtime_usec,
1133 lets_to_cpu(fe->attrTime)) )
1135 inode->i_ctime.tv_sec = convtime;
1136 inode->i_ctime.tv_nsec = convtime_usec * 1000;
1140 inode->i_ctime = UDF_SB_RECORDTIME(inode->i_sb);
1143 UDF_I_UNIQUE(inode) = le64_to_cpu(fe->uniqueID);
1144 UDF_I_LENEATTR(inode) = le32_to_cpu(fe->lengthExtendedAttr);
1145 UDF_I_LENALLOC(inode) = le32_to_cpu(fe->lengthAllocDescs);
1146 offset = sizeof(struct fileEntry) + UDF_I_LENEATTR(inode);
1150 inode->i_blocks = le64_to_cpu(efe->logicalBlocksRecorded) <<
1151 (inode->i_sb->s_blocksize_bits - 9);
1153 if ( udf_stamp_to_time(&convtime, &convtime_usec,
1154 lets_to_cpu(efe->accessTime)) )
1156 inode->i_atime.tv_sec = convtime;
1157 inode->i_atime.tv_nsec = convtime_usec * 1000;
1161 inode->i_atime = UDF_SB_RECORDTIME(inode->i_sb);
1164 if ( udf_stamp_to_time(&convtime, &convtime_usec,
1165 lets_to_cpu(efe->modificationTime)) )
1167 inode->i_mtime.tv_sec = convtime;
1168 inode->i_mtime.tv_nsec = convtime_usec * 1000;
1172 inode->i_mtime = UDF_SB_RECORDTIME(inode->i_sb);
1175 if ( udf_stamp_to_time(&convtime, &convtime_usec,
1176 lets_to_cpu(efe->createTime)) )
1178 UDF_I_CRTIME(inode).tv_sec = convtime;
1179 UDF_I_CRTIME(inode).tv_nsec = convtime_usec * 1000;
1183 UDF_I_CRTIME(inode) = UDF_SB_RECORDTIME(inode->i_sb);
1186 if ( udf_stamp_to_time(&convtime, &convtime_usec,
1187 lets_to_cpu(efe->attrTime)) )
1189 inode->i_ctime.tv_sec = convtime;
1190 inode->i_ctime.tv_nsec = convtime_usec * 1000;
1194 inode->i_ctime = UDF_SB_RECORDTIME(inode->i_sb);
1197 UDF_I_UNIQUE(inode) = le64_to_cpu(efe->uniqueID);
1198 UDF_I_LENEATTR(inode) = le32_to_cpu(efe->lengthExtendedAttr);
1199 UDF_I_LENALLOC(inode) = le32_to_cpu(efe->lengthAllocDescs);
1200 offset = sizeof(struct extendedFileEntry) + UDF_I_LENEATTR(inode);
1203 switch (fe->icbTag.fileType)
1205 case ICBTAG_FILE_TYPE_DIRECTORY:
1207 inode->i_op = &udf_dir_inode_operations;
1208 inode->i_fop = &udf_dir_operations;
1209 inode->i_mode |= S_IFDIR;
1213 case ICBTAG_FILE_TYPE_REALTIME:
1214 case ICBTAG_FILE_TYPE_REGULAR:
1215 case ICBTAG_FILE_TYPE_UNDEF:
1217 if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_IN_ICB)
1218 inode->i_data.a_ops = &udf_adinicb_aops;
1220 inode->i_data.a_ops = &udf_aops;
1221 inode->i_op = &udf_file_inode_operations;
1222 inode->i_fop = &udf_file_operations;
1223 inode->i_mode |= S_IFREG;
1226 case ICBTAG_FILE_TYPE_BLOCK:
1228 inode->i_mode |= S_IFBLK;
1231 case ICBTAG_FILE_TYPE_CHAR:
1233 inode->i_mode |= S_IFCHR;
1236 case ICBTAG_FILE_TYPE_FIFO:
1238 init_special_inode(inode, inode->i_mode | S_IFIFO, 0);
1241 case ICBTAG_FILE_TYPE_SOCKET:
1243 init_special_inode(inode, inode->i_mode | S_IFSOCK, 0);
1246 case ICBTAG_FILE_TYPE_SYMLINK:
1248 inode->i_data.a_ops = &udf_symlink_aops;
1249 inode->i_op = &page_symlink_inode_operations;
1250 inode->i_mode = S_IFLNK|S_IRWXUGO;
1255 printk(KERN_ERR "udf: udf_fill_inode(ino %ld) failed unknown file type=%d\n",
1256 inode->i_ino, fe->icbTag.fileType);
1257 make_bad_inode(inode);
1261 if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode))
1263 struct deviceSpec *dsea =
1264 (struct deviceSpec *)
1265 udf_get_extendedattr(inode, 12, 1);
1269 init_special_inode(inode, inode->i_mode, MKDEV(
1270 le32_to_cpu(dsea->majorDeviceIdent),
1271 le32_to_cpu(dsea->minorDeviceIdent)));
1272 /* Developer ID ??? */
1276 make_bad_inode(inode);
1282 udf_convert_permissions(struct fileEntry *fe)
1285 uint32_t permissions;
1288 permissions = le32_to_cpu(fe->permissions);
1289 flags = le16_to_cpu(fe->icbTag.flags);
1291 mode = (( permissions ) & S_IRWXO) |
1292 (( permissions >> 2 ) & S_IRWXG) |
1293 (( permissions >> 4 ) & S_IRWXU) |
1294 (( flags & ICBTAG_FLAG_SETUID) ? S_ISUID : 0) |
1295 (( flags & ICBTAG_FLAG_SETGID) ? S_ISGID : 0) |
1296 (( flags & ICBTAG_FLAG_STICKY) ? S_ISVTX : 0);
1305 * Write out the specified inode.
1308 * This routine is called whenever an inode is synced.
1309 * Currently this routine is just a placeholder.
1312 * July 1, 1997 - Andrew E. Mileski
1313 * Written, tested, and released.
1316 void udf_write_inode(struct inode * inode, int sync)
1319 udf_update_inode(inode, sync);
1323 int udf_sync_inode(struct inode * inode)
1325 return udf_update_inode(inode, 1);
1329 udf_update_inode(struct inode *inode, int do_sync)
1331 struct buffer_head *bh = NULL;
1332 struct fileEntry *fe;
1333 struct extendedFileEntry *efe;
1341 bh = udf_tread(inode->i_sb,
1342 udf_get_lb_pblock(inode->i_sb, UDF_I_LOCATION(inode), 0));
1346 udf_debug("bread failure\n");
1350 memset(bh->b_data, 0x00, inode->i_sb->s_blocksize);
1352 fe = (struct fileEntry *)bh->b_data;
1353 efe = (struct extendedFileEntry *)bh->b_data;
1355 if (le16_to_cpu(fe->descTag.tagIdent) == TAG_IDENT_USE)
1357 struct unallocSpaceEntry *use =
1358 (struct unallocSpaceEntry *)bh->b_data;
1360 use->lengthAllocDescs = cpu_to_le32(UDF_I_LENALLOC(inode));
1361 memcpy(bh->b_data + sizeof(struct unallocSpaceEntry), UDF_I_DATA(inode), inode->i_sb->s_blocksize - sizeof(struct unallocSpaceEntry));
1362 crclen = sizeof(struct unallocSpaceEntry) + UDF_I_LENALLOC(inode) -
1364 use->descTag.tagLocation = cpu_to_le32(UDF_I_LOCATION(inode).logicalBlockNum);
1365 use->descTag.descCRCLength = cpu_to_le16(crclen);
1366 use->descTag.descCRC = cpu_to_le16(udf_crc((char *)use + sizeof(tag), crclen, 0));
1368 use->descTag.tagChecksum = 0;
1369 for (i=0; i<16; i++)
1371 use->descTag.tagChecksum += ((uint8_t *)&(use->descTag))[i];
1373 mark_buffer_dirty(bh);
1374 udf_release_data(bh);
1378 if (inode->i_uid != UDF_SB(inode->i_sb)->s_uid)
1379 fe->uid = cpu_to_le32(inode->i_uid);
1381 if (inode->i_gid != UDF_SB(inode->i_sb)->s_gid)
1382 fe->gid = cpu_to_le32(inode->i_gid);
1384 udfperms = ((inode->i_mode & S_IRWXO) ) |
1385 ((inode->i_mode & S_IRWXG) << 2) |
1386 ((inode->i_mode & S_IRWXU) << 4);
1388 udfperms |= (le32_to_cpu(fe->permissions) &
1389 (FE_PERM_O_DELETE | FE_PERM_O_CHATTR |
1390 FE_PERM_G_DELETE | FE_PERM_G_CHATTR |
1391 FE_PERM_U_DELETE | FE_PERM_U_CHATTR));
1392 fe->permissions = cpu_to_le32(udfperms);
1394 if (S_ISDIR(inode->i_mode))
1395 fe->fileLinkCount = cpu_to_le16(inode->i_nlink - 1);
1397 fe->fileLinkCount = cpu_to_le16(inode->i_nlink);
1399 fe->informationLength = cpu_to_le64(inode->i_size);
1401 if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode))
1404 struct deviceSpec *dsea =
1405 (struct deviceSpec *)
1406 udf_get_extendedattr(inode, 12, 1);
1410 dsea = (struct deviceSpec *)
1411 udf_add_extendedattr(inode,
1412 sizeof(struct deviceSpec) +
1413 sizeof(regid), 12, 0x3);
1414 dsea->attrType = 12;
1415 dsea->attrSubtype = 1;
1416 dsea->attrLength = sizeof(struct deviceSpec) +
1418 dsea->impUseLength = sizeof(regid);
1420 eid = (regid *)dsea->impUse;
1421 memset(eid, 0, sizeof(regid));
1422 strcpy(eid->ident, UDF_ID_DEVELOPER);
1423 eid->identSuffix[0] = UDF_OS_CLASS_UNIX;
1424 eid->identSuffix[1] = UDF_OS_ID_LINUX;
1425 dsea->majorDeviceIdent = cpu_to_le32(imajor(inode));
1426 dsea->minorDeviceIdent = cpu_to_le32(iminor(inode));
1429 if (UDF_I_EFE(inode) == 0)
1431 memcpy(bh->b_data + sizeof(struct fileEntry), UDF_I_DATA(inode), inode->i_sb->s_blocksize - sizeof(struct fileEntry));
1432 fe->logicalBlocksRecorded = cpu_to_le64(
1433 (inode->i_blocks + (1 << (inode->i_sb->s_blocksize_bits - 9)) - 1) >>
1434 (inode->i_sb->s_blocksize_bits - 9));
1436 if (udf_time_to_stamp(&cpu_time, inode->i_atime))
1437 fe->accessTime = cpu_to_lets(cpu_time);
1438 if (udf_time_to_stamp(&cpu_time, inode->i_mtime))
1439 fe->modificationTime = cpu_to_lets(cpu_time);
1440 if (udf_time_to_stamp(&cpu_time, inode->i_ctime))
1441 fe->attrTime = cpu_to_lets(cpu_time);
1442 memset(&(fe->impIdent), 0, sizeof(regid));
1443 strcpy(fe->impIdent.ident, UDF_ID_DEVELOPER);
1444 fe->impIdent.identSuffix[0] = UDF_OS_CLASS_UNIX;
1445 fe->impIdent.identSuffix[1] = UDF_OS_ID_LINUX;
1446 fe->uniqueID = cpu_to_le64(UDF_I_UNIQUE(inode));
1447 fe->lengthExtendedAttr = cpu_to_le32(UDF_I_LENEATTR(inode));
1448 fe->lengthAllocDescs = cpu_to_le32(UDF_I_LENALLOC(inode));
1449 fe->descTag.tagIdent = le16_to_cpu(TAG_IDENT_FE);
1450 crclen = sizeof(struct fileEntry);
1454 memcpy(bh->b_data + sizeof(struct extendedFileEntry), UDF_I_DATA(inode), inode->i_sb->s_blocksize - sizeof(struct extendedFileEntry));
1455 efe->objectSize = cpu_to_le64(inode->i_size);
1456 efe->logicalBlocksRecorded = cpu_to_le64(
1457 (inode->i_blocks + (1 << (inode->i_sb->s_blocksize_bits - 9)) - 1) >>
1458 (inode->i_sb->s_blocksize_bits - 9));
1460 if (UDF_I_CRTIME(inode).tv_sec > inode->i_atime.tv_sec ||
1461 (UDF_I_CRTIME(inode).tv_sec == inode->i_atime.tv_sec &&
1462 UDF_I_CRTIME(inode).tv_nsec > inode->i_atime.tv_nsec))
1464 UDF_I_CRTIME(inode) = inode->i_atime;
1466 if (UDF_I_CRTIME(inode).tv_sec > inode->i_mtime.tv_sec ||
1467 (UDF_I_CRTIME(inode).tv_sec == inode->i_mtime.tv_sec &&
1468 UDF_I_CRTIME(inode).tv_nsec > inode->i_mtime.tv_nsec))
1470 UDF_I_CRTIME(inode) = inode->i_mtime;
1472 if (UDF_I_CRTIME(inode).tv_sec > inode->i_ctime.tv_sec ||
1473 (UDF_I_CRTIME(inode).tv_sec == inode->i_ctime.tv_sec &&
1474 UDF_I_CRTIME(inode).tv_nsec > inode->i_ctime.tv_nsec))
1476 UDF_I_CRTIME(inode) = inode->i_ctime;
1479 if (udf_time_to_stamp(&cpu_time, inode->i_atime))
1480 efe->accessTime = cpu_to_lets(cpu_time);
1481 if (udf_time_to_stamp(&cpu_time, inode->i_mtime))
1482 efe->modificationTime = cpu_to_lets(cpu_time);
1483 if (udf_time_to_stamp(&cpu_time, UDF_I_CRTIME(inode)))
1484 efe->createTime = cpu_to_lets(cpu_time);
1485 if (udf_time_to_stamp(&cpu_time, inode->i_ctime))
1486 efe->attrTime = cpu_to_lets(cpu_time);
1488 memset(&(efe->impIdent), 0, sizeof(regid));
1489 strcpy(efe->impIdent.ident, UDF_ID_DEVELOPER);
1490 efe->impIdent.identSuffix[0] = UDF_OS_CLASS_UNIX;
1491 efe->impIdent.identSuffix[1] = UDF_OS_ID_LINUX;
1492 efe->uniqueID = cpu_to_le64(UDF_I_UNIQUE(inode));
1493 efe->lengthExtendedAttr = cpu_to_le32(UDF_I_LENEATTR(inode));
1494 efe->lengthAllocDescs = cpu_to_le32(UDF_I_LENALLOC(inode));
1495 efe->descTag.tagIdent = le16_to_cpu(TAG_IDENT_EFE);
1496 crclen = sizeof(struct extendedFileEntry);
1498 if (UDF_I_STRAT4096(inode))
1500 fe->icbTag.strategyType = cpu_to_le16(4096);
1501 fe->icbTag.strategyParameter = cpu_to_le16(1);
1502 fe->icbTag.numEntries = cpu_to_le16(2);
1506 fe->icbTag.strategyType = cpu_to_le16(4);
1507 fe->icbTag.numEntries = cpu_to_le16(1);
1510 if (S_ISDIR(inode->i_mode))
1511 fe->icbTag.fileType = ICBTAG_FILE_TYPE_DIRECTORY;
1512 else if (S_ISREG(inode->i_mode))
1513 fe->icbTag.fileType = ICBTAG_FILE_TYPE_REGULAR;
1514 else if (S_ISLNK(inode->i_mode))
1515 fe->icbTag.fileType = ICBTAG_FILE_TYPE_SYMLINK;
1516 else if (S_ISBLK(inode->i_mode))
1517 fe->icbTag.fileType = ICBTAG_FILE_TYPE_BLOCK;
1518 else if (S_ISCHR(inode->i_mode))
1519 fe->icbTag.fileType = ICBTAG_FILE_TYPE_CHAR;
1520 else if (S_ISFIFO(inode->i_mode))
1521 fe->icbTag.fileType = ICBTAG_FILE_TYPE_FIFO;
1522 else if (S_ISSOCK(inode->i_mode))
1523 fe->icbTag.fileType = ICBTAG_FILE_TYPE_SOCKET;
1525 icbflags = UDF_I_ALLOCTYPE(inode) |
1526 ((inode->i_mode & S_ISUID) ? ICBTAG_FLAG_SETUID : 0) |
1527 ((inode->i_mode & S_ISGID) ? ICBTAG_FLAG_SETGID : 0) |
1528 ((inode->i_mode & S_ISVTX) ? ICBTAG_FLAG_STICKY : 0) |
1529 (le16_to_cpu(fe->icbTag.flags) &
1530 ~(ICBTAG_FLAG_AD_MASK | ICBTAG_FLAG_SETUID |
1531 ICBTAG_FLAG_SETGID | ICBTAG_FLAG_STICKY));
1533 fe->icbTag.flags = cpu_to_le16(icbflags);
1534 if (UDF_SB_UDFREV(inode->i_sb) >= 0x0200)
1535 fe->descTag.descVersion = cpu_to_le16(3);
1537 fe->descTag.descVersion = cpu_to_le16(2);
1538 fe->descTag.tagSerialNum = cpu_to_le16(UDF_SB_SERIALNUM(inode->i_sb));
1539 fe->descTag.tagLocation = cpu_to_le32(UDF_I_LOCATION(inode).logicalBlockNum);
1540 crclen += UDF_I_LENEATTR(inode) + UDF_I_LENALLOC(inode) - sizeof(tag);
1541 fe->descTag.descCRCLength = cpu_to_le16(crclen);
1542 fe->descTag.descCRC = cpu_to_le16(udf_crc((char *)fe + sizeof(tag), crclen, 0));
1544 fe->descTag.tagChecksum = 0;
1545 for (i=0; i<16; i++)
1547 fe->descTag.tagChecksum += ((uint8_t *)&(fe->descTag))[i];
1549 /* write the data blocks */
1550 mark_buffer_dirty(bh);
1553 sync_dirty_buffer(bh);
1554 if (buffer_req(bh) && !buffer_uptodate(bh))
1556 printk("IO error syncing udf inode [%s:%08lx]\n",
1557 inode->i_sb->s_id, inode->i_ino);
1561 udf_release_data(bh);
1572 * This routine replaces iget() and read_inode().
1575 * October 3, 1997 - Andrew E. Mileski
1576 * Written, tested, and released.
1578 * 12/19/98 dgb Added semaphore and changed to be a wrapper of iget
1581 udf_iget(struct super_block *sb, lb_addr ino)
1583 struct inode *inode;
1584 unsigned long block;
1586 block = udf_get_lb_pblock(sb, ino, 0);
1590 inode = iget(sb, block);
1591 /* calls udf_read_inode() ! */
1595 printk(KERN_ERR "udf: iget() failed\n");
1598 else if (is_bad_inode(inode))
1603 else if (UDF_I_LOCATION(inode).logicalBlockNum == 0xFFFFFFFF &&
1604 UDF_I_LOCATION(inode).partitionReferenceNum == 0xFFFF)
1606 memcpy(&UDF_I_LOCATION(inode), &ino, sizeof(lb_addr));
1607 __udf_read_inode(inode);
1608 if (is_bad_inode(inode))
1615 if ( ino.logicalBlockNum >= UDF_SB_PARTLEN(sb, ino.partitionReferenceNum) )
1617 udf_debug("block=%d, partition=%d out of range\n",
1618 ino.logicalBlockNum, ino.partitionReferenceNum);
1619 make_bad_inode(inode);
1627 int8_t udf_add_aext(struct inode *inode, lb_addr *bloc, int *extoffset,
1628 lb_addr eloc, uint32_t elen, struct buffer_head **bh, int inc)
1631 short_ad *sad = NULL;
1632 long_ad *lad = NULL;
1633 struct allocExtDesc *aed;
1638 ptr = UDF_I_DATA(inode) + *extoffset - udf_file_entry_alloc_offset(inode) + UDF_I_LENEATTR(inode);
1640 ptr = (*bh)->b_data + *extoffset;
1642 if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_SHORT)
1643 adsize = sizeof(short_ad);
1644 else if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_LONG)
1645 adsize = sizeof(long_ad);
1649 if (*extoffset + (2 * adsize) > inode->i_sb->s_blocksize)
1652 struct buffer_head *nbh;
1654 lb_addr obloc = *bloc;
1656 if (!(bloc->logicalBlockNum = udf_new_block(inode->i_sb, NULL,
1657 obloc.partitionReferenceNum, obloc.logicalBlockNum, &err)))
1661 if (!(nbh = udf_tgetblk(inode->i_sb, udf_get_lb_pblock(inode->i_sb,
1667 memset(nbh->b_data, 0x00, inode->i_sb->s_blocksize);
1668 set_buffer_uptodate(nbh);
1670 mark_buffer_dirty_inode(nbh, inode);
1672 aed = (struct allocExtDesc *)(nbh->b_data);
1673 if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT))
1674 aed->previousAllocExtLocation = cpu_to_le32(obloc.logicalBlockNum);
1675 if (*extoffset + adsize > inode->i_sb->s_blocksize)
1677 loffset = *extoffset;
1678 aed->lengthAllocDescs = cpu_to_le32(adsize);
1679 sptr = ptr - adsize;
1680 dptr = nbh->b_data + sizeof(struct allocExtDesc);
1681 memcpy(dptr, sptr, adsize);
1682 *extoffset = sizeof(struct allocExtDesc) + adsize;
1686 loffset = *extoffset + adsize;
1687 aed->lengthAllocDescs = cpu_to_le32(0);
1689 *extoffset = sizeof(struct allocExtDesc);
1693 aed = (struct allocExtDesc *)(*bh)->b_data;
1694 aed->lengthAllocDescs =
1695 cpu_to_le32(le32_to_cpu(aed->lengthAllocDescs) + adsize);
1699 UDF_I_LENALLOC(inode) += adsize;
1700 mark_inode_dirty(inode);
1703 if (UDF_SB_UDFREV(inode->i_sb) >= 0x0200)
1704 udf_new_tag(nbh->b_data, TAG_IDENT_AED, 3, 1,
1705 bloc->logicalBlockNum, sizeof(tag));
1707 udf_new_tag(nbh->b_data, TAG_IDENT_AED, 2, 1,
1708 bloc->logicalBlockNum, sizeof(tag));
1709 switch (UDF_I_ALLOCTYPE(inode))
1711 case ICBTAG_FLAG_AD_SHORT:
1713 sad = (short_ad *)sptr;
1714 sad->extLength = cpu_to_le32(
1715 EXT_NEXT_EXTENT_ALLOCDECS |
1716 inode->i_sb->s_blocksize);
1717 sad->extPosition = cpu_to_le32(bloc->logicalBlockNum);
1720 case ICBTAG_FLAG_AD_LONG:
1722 lad = (long_ad *)sptr;
1723 lad->extLength = cpu_to_le32(
1724 EXT_NEXT_EXTENT_ALLOCDECS |
1725 inode->i_sb->s_blocksize);
1726 lad->extLocation = cpu_to_lelb(*bloc);
1727 memset(lad->impUse, 0x00, sizeof(lad->impUse));
1733 if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT) || UDF_SB_UDFREV(inode->i_sb) >= 0x0201)
1734 udf_update_tag((*bh)->b_data, loffset);
1736 udf_update_tag((*bh)->b_data, sizeof(struct allocExtDesc));
1737 mark_buffer_dirty_inode(*bh, inode);
1738 udf_release_data(*bh);
1741 mark_inode_dirty(inode);
1745 etype = udf_write_aext(inode, *bloc, extoffset, eloc, elen, *bh, inc);
1749 UDF_I_LENALLOC(inode) += adsize;
1750 mark_inode_dirty(inode);
1754 aed = (struct allocExtDesc *)(*bh)->b_data;
1755 aed->lengthAllocDescs =
1756 cpu_to_le32(le32_to_cpu(aed->lengthAllocDescs) + adsize);
1757 if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT) || UDF_SB_UDFREV(inode->i_sb) >= 0x0201)
1758 udf_update_tag((*bh)->b_data, *extoffset + (inc ? 0 : adsize));
1760 udf_update_tag((*bh)->b_data, sizeof(struct allocExtDesc));
1761 mark_buffer_dirty_inode(*bh, inode);
1767 int8_t udf_write_aext(struct inode *inode, lb_addr bloc, int *extoffset,
1768 lb_addr eloc, uint32_t elen, struct buffer_head *bh, int inc)
1774 ptr = UDF_I_DATA(inode) + *extoffset - udf_file_entry_alloc_offset(inode) + UDF_I_LENEATTR(inode);
1777 ptr = bh->b_data + *extoffset;
1778 atomic_inc(&bh->b_count);
1781 switch (UDF_I_ALLOCTYPE(inode))
1783 case ICBTAG_FLAG_AD_SHORT:
1785 short_ad *sad = (short_ad *)ptr;
1786 sad->extLength = cpu_to_le32(elen);
1787 sad->extPosition = cpu_to_le32(eloc.logicalBlockNum);
1788 adsize = sizeof(short_ad);
1791 case ICBTAG_FLAG_AD_LONG:
1793 long_ad *lad = (long_ad *)ptr;
1794 lad->extLength = cpu_to_le32(elen);
1795 lad->extLocation = cpu_to_lelb(eloc);
1796 memset(lad->impUse, 0x00, sizeof(lad->impUse));
1797 adsize = sizeof(long_ad);
1806 if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT) || UDF_SB_UDFREV(inode->i_sb) >= 0x0201)
1808 struct allocExtDesc *aed = (struct allocExtDesc *)(bh)->b_data;
1809 udf_update_tag((bh)->b_data,
1810 le32_to_cpu(aed->lengthAllocDescs) + sizeof(struct allocExtDesc));
1812 mark_buffer_dirty_inode(bh, inode);
1813 udf_release_data(bh);
1816 mark_inode_dirty(inode);
1819 *extoffset += adsize;
1820 return (elen >> 30);
1823 int8_t udf_next_aext(struct inode *inode, lb_addr *bloc, int *extoffset,
1824 lb_addr *eloc, uint32_t *elen, struct buffer_head **bh, int inc)
1828 while ((etype = udf_current_aext(inode, bloc, extoffset, eloc, elen, bh, inc)) ==
1829 (EXT_NEXT_EXTENT_ALLOCDECS >> 30))
1832 *extoffset = sizeof(struct allocExtDesc);
1833 udf_release_data(*bh);
1834 if (!(*bh = udf_tread(inode->i_sb, udf_get_lb_pblock(inode->i_sb, *bloc, 0))))
1836 udf_debug("reading block %d failed!\n",
1837 udf_get_lb_pblock(inode->i_sb, *bloc, 0));
1845 int8_t udf_current_aext(struct inode *inode, lb_addr *bloc, int *extoffset,
1846 lb_addr *eloc, uint32_t *elen, struct buffer_head **bh, int inc)
1855 *extoffset = udf_file_entry_alloc_offset(inode);
1856 ptr = UDF_I_DATA(inode) + *extoffset - udf_file_entry_alloc_offset(inode) + UDF_I_LENEATTR(inode);
1857 alen = udf_file_entry_alloc_offset(inode) + UDF_I_LENALLOC(inode);
1862 *extoffset = sizeof(struct allocExtDesc);
1863 ptr = (*bh)->b_data + *extoffset;
1864 alen = sizeof(struct allocExtDesc) + le32_to_cpu(((struct allocExtDesc *)(*bh)->b_data)->lengthAllocDescs);
1867 switch (UDF_I_ALLOCTYPE(inode))
1869 case ICBTAG_FLAG_AD_SHORT:
1873 if (!(sad = udf_get_fileshortad(ptr, alen, extoffset, inc)))
1876 etype = le32_to_cpu(sad->extLength) >> 30;
1877 eloc->logicalBlockNum = le32_to_cpu(sad->extPosition);
1878 eloc->partitionReferenceNum = UDF_I_LOCATION(inode).partitionReferenceNum;
1879 *elen = le32_to_cpu(sad->extLength) & UDF_EXTENT_LENGTH_MASK;
1882 case ICBTAG_FLAG_AD_LONG:
1886 if (!(lad = udf_get_filelongad(ptr, alen, extoffset, inc)))
1889 etype = le32_to_cpu(lad->extLength) >> 30;
1890 *eloc = lelb_to_cpu(lad->extLocation);
1891 *elen = le32_to_cpu(lad->extLength) & UDF_EXTENT_LENGTH_MASK;
1896 udf_debug("alloc_type = %d unsupported\n", UDF_I_ALLOCTYPE(inode));
1904 int8_t udf_insert_aext(struct inode *inode, lb_addr bloc, int extoffset,
1905 lb_addr neloc, uint32_t nelen, struct buffer_head *bh)
1912 atomic_inc(&bh->b_count);
1914 while ((etype = udf_next_aext(inode, &bloc, &extoffset, &oeloc, &oelen, &bh, 0)) != -1)
1916 udf_write_aext(inode, bloc, &extoffset, neloc, nelen, bh, 1);
1919 nelen = (etype << 30) | oelen;
1921 udf_add_aext(inode, &bloc, &extoffset, neloc, nelen, &bh, 1);
1922 udf_release_data(bh);
1923 return (nelen >> 30);
1926 int8_t udf_delete_aext(struct inode *inode, lb_addr nbloc, int nextoffset,
1927 lb_addr eloc, uint32_t elen, struct buffer_head *nbh)
1929 struct buffer_head *obh;
1931 int oextoffset, adsize;
1933 struct allocExtDesc *aed;
1937 atomic_inc(&nbh->b_count);
1938 atomic_inc(&nbh->b_count);
1941 if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_SHORT)
1942 adsize = sizeof(short_ad);
1943 else if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_LONG)
1944 adsize = sizeof(long_ad);
1950 oextoffset = nextoffset;
1952 if (udf_next_aext(inode, &nbloc, &nextoffset, &eloc, &elen, &nbh, 1) == -1)
1955 while ((etype = udf_next_aext(inode, &nbloc, &nextoffset, &eloc, &elen, &nbh, 1)) != -1)
1957 udf_write_aext(inode, obloc, &oextoffset, eloc, (etype << 30) | elen, obh, 1);
1961 udf_release_data(obh);
1962 atomic_inc(&nbh->b_count);
1964 oextoffset = nextoffset - adsize;
1967 memset(&eloc, 0x00, sizeof(lb_addr));
1972 udf_free_blocks(inode->i_sb, inode, nbloc, 0, 1);
1973 udf_write_aext(inode, obloc, &oextoffset, eloc, elen, obh, 1);
1974 udf_write_aext(inode, obloc, &oextoffset, eloc, elen, obh, 1);
1977 UDF_I_LENALLOC(inode) -= (adsize * 2);
1978 mark_inode_dirty(inode);
1982 aed = (struct allocExtDesc *)(obh)->b_data;
1983 aed->lengthAllocDescs =
1984 cpu_to_le32(le32_to_cpu(aed->lengthAllocDescs) - (2*adsize));
1985 if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT) || UDF_SB_UDFREV(inode->i_sb) >= 0x0201)
1986 udf_update_tag((obh)->b_data, oextoffset - (2*adsize));
1988 udf_update_tag((obh)->b_data, sizeof(struct allocExtDesc));
1989 mark_buffer_dirty_inode(obh, inode);
1994 udf_write_aext(inode, obloc, &oextoffset, eloc, elen, obh, 1);
1997 UDF_I_LENALLOC(inode) -= adsize;
1998 mark_inode_dirty(inode);
2002 aed = (struct allocExtDesc *)(obh)->b_data;
2003 aed->lengthAllocDescs =
2004 cpu_to_le32(le32_to_cpu(aed->lengthAllocDescs) - adsize);
2005 if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT) || UDF_SB_UDFREV(inode->i_sb) >= 0x0201)
2006 udf_update_tag((obh)->b_data, oextoffset - adsize);
2008 udf_update_tag((obh)->b_data, sizeof(struct allocExtDesc));
2009 mark_buffer_dirty_inode(obh, inode);
2013 udf_release_data(nbh);
2014 udf_release_data(obh);
2015 return (elen >> 30);
2018 int8_t inode_bmap(struct inode *inode, int block, lb_addr *bloc, uint32_t *extoffset,
2019 lb_addr *eloc, uint32_t *elen, uint32_t *offset, struct buffer_head **bh)
2021 uint64_t lbcount = 0, bcount = (uint64_t)block << inode->i_sb->s_blocksize_bits;
2026 printk(KERN_ERR "udf: inode_bmap: block < 0\n");
2031 printk(KERN_ERR "udf: inode_bmap: NULL inode\n");
2037 *bloc = UDF_I_LOCATION(inode);
2041 if ((etype = udf_next_aext(inode, bloc, extoffset, eloc, elen, bh, 1)) == -1)
2043 *offset = bcount - lbcount;
2044 UDF_I_LENEXTENTS(inode) = lbcount;
2048 } while (lbcount <= bcount);
2050 *offset = bcount + *elen - lbcount;
2055 long udf_block_map(struct inode *inode, long block)
2058 uint32_t offset, extoffset, elen;
2059 struct buffer_head *bh = NULL;
2064 if (inode_bmap(inode, block, &bloc, &extoffset, &eloc, &elen, &offset, &bh) == (EXT_RECORDED_ALLOCATED >> 30))
2065 ret = udf_get_lb_pblock(inode->i_sb, eloc, offset >> inode->i_sb->s_blocksize_bits);
2070 udf_release_data(bh);
2072 if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_VARCONV))
2073 return udf_fixed_to_variable(ret);