2 * Squashfs - a compressed read only filesystem for Linux
4 * Copyright (c) 2002, 2003, 2004, 2005, 2006
5 * Phillip Lougher <phillip@lougher.org.uk>
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version 2,
10 * or (at your option) any later version.
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
24 #include <linux/types.h>
25 #include <linux/squashfs_fs.h>
26 #include <linux/module.h>
27 #include <linux/errno.h>
28 #include <linux/slab.h>
29 #include <linux/zlib.h>
31 #include <linux/smp_lock.h>
32 #include <linux/slab.h>
33 #include <linux/squashfs_fs_sb.h>
34 #include <linux/squashfs_fs_i.h>
35 #include <linux/buffer_head.h>
36 #include <linux/vfs.h>
37 #include <linux/init.h>
38 #include <linux/dcache.h>
39 #include <linux/wait.h>
40 #include <linux/blkdev.h>
41 #include <linux/vmalloc.h>
42 #include <asm/uaccess.h>
43 #include <asm/semaphore.h>
47 static void squashfs_put_super(struct super_block *);
48 static int squashfs_statfs(struct dentry *, struct kstatfs *);
49 static int squashfs_symlink_readpage(struct file *file, struct page *page);
50 static int squashfs_readpage(struct file *file, struct page *page);
51 static int squashfs_readpage4K(struct file *file, struct page *page);
52 static int squashfs_readdir(struct file *, void *, filldir_t);
53 static struct inode *squashfs_alloc_inode(struct super_block *sb);
54 static void squashfs_destroy_inode(struct inode *inode);
55 static int init_inodecache(void);
56 static void destroy_inodecache(void);
57 static struct dentry *squashfs_lookup(struct inode *, struct dentry *,
59 static int squashfs_read_inode(struct inode *i, squashfs_inode_t inode);
60 static long long read_blocklist(struct inode *inode, int index,
61 int readahead_blks, char *block_list,
62 unsigned short **block_p, unsigned int *bsize);
63 static int squashfs_get_sb(struct file_system_type *,int, const char *, void *,
65 static void vfs_read_inode(struct inode *i);
66 static struct dentry *squashfs_get_parent(struct dentry *child);
68 static struct file_system_type squashfs_fs_type = {
71 .get_sb = squashfs_get_sb,
72 .kill_sb = kill_block_super,
73 .fs_flags = FS_REQUIRES_DEV
76 static unsigned char squashfs_filetype_table[] = {
77 DT_UNKNOWN, DT_DIR, DT_REG, DT_LNK, DT_BLK, DT_CHR, DT_FIFO, DT_SOCK
80 static struct super_operations squashfs_super_ops = {
81 .alloc_inode = squashfs_alloc_inode,
82 .destroy_inode = squashfs_destroy_inode,
83 .statfs = squashfs_statfs,
84 .put_super = squashfs_put_super,
87 static struct super_operations squashfs_export_super_ops = {
88 .alloc_inode = squashfs_alloc_inode,
89 .destroy_inode = squashfs_destroy_inode,
90 .statfs = squashfs_statfs,
91 .put_super = squashfs_put_super,
92 .read_inode = vfs_read_inode
95 struct export_operations squashfs_export_ops = {
96 .get_parent = squashfs_get_parent
99 SQSH_EXTERN struct address_space_operations squashfs_symlink_aops = {
100 .readpage = squashfs_symlink_readpage
103 SQSH_EXTERN struct address_space_operations squashfs_aops = {
104 .readpage = squashfs_readpage
107 SQSH_EXTERN struct address_space_operations squashfs_aops_4K = {
108 .readpage = squashfs_readpage4K
111 static struct file_operations squashfs_dir_ops = {
112 .read = generic_read_dir,
113 .readdir = squashfs_readdir
116 SQSH_EXTERN struct inode_operations squashfs_dir_inode_ops = {
117 .lookup = squashfs_lookup
121 static struct buffer_head *get_block_length(struct super_block *s,
122 int *cur_index, int *offset, int *c_byte)
124 struct squashfs_sb_info *msblk = s->s_fs_info;
126 struct buffer_head *bh;
128 if (!(bh = sb_bread(s, *cur_index)))
131 if (msblk->devblksize - *offset == 1) {
133 ((unsigned char *) &temp)[1] = *((unsigned char *)
134 (bh->b_data + *offset));
136 ((unsigned char *) &temp)[0] = *((unsigned char *)
137 (bh->b_data + *offset));
139 if (!(bh = sb_bread(s, ++(*cur_index))))
142 ((unsigned char *) &temp)[0] = *((unsigned char *)
145 ((unsigned char *) &temp)[1] = *((unsigned char *)
151 ((unsigned char *) &temp)[1] = *((unsigned char *)
152 (bh->b_data + *offset));
153 ((unsigned char *) &temp)[0] = *((unsigned char *)
154 (bh->b_data + *offset + 1));
156 ((unsigned char *) &temp)[0] = *((unsigned char *)
157 (bh->b_data + *offset));
158 ((unsigned char *) &temp)[1] = *((unsigned char *)
159 (bh->b_data + *offset + 1));
165 if (SQUASHFS_CHECK_DATA(msblk->sblk.flags)) {
166 if (*offset == msblk->devblksize) {
168 if (!(bh = sb_bread(s, ++(*cur_index))))
172 if (*((unsigned char *) (bh->b_data + *offset)) !=
173 SQUASHFS_MARKER_BYTE) {
174 ERROR("Metadata block marker corrupt @ %x\n",
188 SQSH_EXTERN unsigned int squashfs_read_data(struct super_block *s, char *buffer,
189 long long index, unsigned int length,
190 long long *next_index, int srclength)
192 struct squashfs_sb_info *msblk = s->s_fs_info;
193 struct squashfs_super_block *sblk = &msblk->sblk;
194 struct buffer_head *bh[((SQUASHFS_FILE_MAX_SIZE - 1) >>
195 msblk->devblksize_log2) + 2];
196 unsigned int offset = index & ((1 << msblk->devblksize_log2) - 1);
197 unsigned int cur_index = index >> msblk->devblksize_log2;
198 int bytes, avail_bytes, b = 0, k = 0;
200 unsigned int compressed;
201 unsigned int c_byte = length;
204 bytes = msblk->devblksize - offset;
205 compressed = SQUASHFS_COMPRESSED_BLOCK(c_byte);
206 c_buffer = compressed ? msblk->read_data : buffer;
207 c_byte = SQUASHFS_COMPRESSED_SIZE_BLOCK(c_byte);
209 TRACE("Block @ 0x%llx, %scompressed size %d, src size %d\n", index, compressed
210 ? "" : "un", (unsigned int) c_byte, srclength);
212 if (c_byte > srclength || index < 0 || (index + c_byte) > sblk->bytes_used)
215 if (!(bh[0] = sb_getblk(s, cur_index)))
218 for (b = 1; bytes < c_byte; b++) {
219 if (!(bh[b] = sb_getblk(s, ++cur_index)))
221 bytes += msblk->devblksize;
223 ll_rw_block(READ, b, bh);
225 if (index < 0 || (index + 2) > sblk->bytes_used)
228 if (!(bh[0] = get_block_length(s, &cur_index, &offset,
232 bytes = msblk->devblksize - offset;
233 compressed = SQUASHFS_COMPRESSED(c_byte);
234 c_buffer = compressed ? msblk->read_data : buffer;
235 c_byte = SQUASHFS_COMPRESSED_SIZE(c_byte);
237 TRACE("Block @ 0x%llx, %scompressed size %d\n", index, compressed
238 ? "" : "un", (unsigned int) c_byte);
240 if (c_byte > srclength || (index + c_byte) > sblk->bytes_used)
243 for (b = 1; bytes < c_byte; b++) {
244 if (!(bh[b] = sb_getblk(s, ++cur_index)))
246 bytes += msblk->devblksize;
248 ll_rw_block(READ, b - 1, bh + 1);
252 down(&msblk->read_data_mutex);
254 for (bytes = 0; k < b; k++) {
255 avail_bytes = (c_byte - bytes) > (msblk->devblksize - offset) ?
256 msblk->devblksize - offset :
258 wait_on_buffer(bh[k]);
259 if (!buffer_uptodate(bh[k]))
261 memcpy(c_buffer + bytes, bh[k]->b_data + offset, avail_bytes);
262 bytes += avail_bytes;
273 msblk->stream.next_in = c_buffer;
274 msblk->stream.avail_in = c_byte;
275 msblk->stream.next_out = buffer;
276 //msblk->stream.avail_out = msblk->read_size;//srclength;
277 msblk->stream.avail_out = srclength;
279 if (((zlib_err = zlib_inflateInit(&msblk->stream)) != Z_OK) ||
280 ((zlib_err = zlib_inflate(&msblk->stream, Z_FINISH))
281 != Z_STREAM_END) || ((zlib_err =
282 zlib_inflateEnd(&msblk->stream)) != Z_OK)) {
283 //ERROR("zlib_fs returned unexpected result 0x%x\n",
285 ERROR("zlib_fs returned unexpected result 0x%x, srclength %d\n",
286 zlib_err, srclength);
289 bytes = msblk->stream.total_out;
291 up(&msblk->read_data_mutex);
295 *next_index = index + c_byte + (length ? 0 :
296 (SQUASHFS_CHECK_DATA(msblk->sblk.flags)
305 ERROR("sb_bread failed reading block 0x%x\n", cur_index);
310 SQSH_EXTERN int squashfs_get_cached_block(struct super_block *s, char *buffer,
311 long long block, unsigned int offset,
312 int length, long long *next_block,
313 unsigned int *next_offset)
315 struct squashfs_sb_info *msblk = s->s_fs_info;
316 int n, i, bytes, return_length = length;
317 long long next_index;
319 TRACE("Entered squashfs_get_cached_block [%llx:%x]\n", block, offset);
322 for (i = 0; i < SQUASHFS_CACHED_BLKS; i++)
323 if (msblk->block_cache[i].block == block)
326 down(&msblk->block_cache_mutex);
328 if (i == SQUASHFS_CACHED_BLKS) {
329 /* read inode header block */
330 for (i = msblk->next_cache, n = SQUASHFS_CACHED_BLKS;
331 n ; n --, i = (i + 1) %
332 SQUASHFS_CACHED_BLKS)
333 if (msblk->block_cache[i].block !=
340 init_waitqueue_entry(&wait, current);
341 add_wait_queue(&msblk->waitq, &wait);
342 set_current_state(TASK_UNINTERRUPTIBLE);
343 up(&msblk->block_cache_mutex);
345 set_current_state(TASK_RUNNING);
346 remove_wait_queue(&msblk->waitq, &wait);
349 msblk->next_cache = (i + 1) % SQUASHFS_CACHED_BLKS;
351 if (msblk->block_cache[i].block ==
352 SQUASHFS_INVALID_BLK) {
353 if (!(msblk->block_cache[i].data =
354 kmalloc(SQUASHFS_METADATA_SIZE,
356 ERROR("Failed to allocate cache"
358 up(&msblk->block_cache_mutex);
363 msblk->block_cache[i].block = SQUASHFS_USED_BLK;
364 up(&msblk->block_cache_mutex);
366 msblk->block_cache[i].length = squashfs_read_data(s,
367 msblk->block_cache[i].data, block, 0, &next_index, SQUASHFS_METADATA_SIZE);
368 if (msblk->block_cache[i].length == 0) {
369 ERROR("Unable to read cache block [%llx:%x]\n",
371 down(&msblk->block_cache_mutex);
372 msblk->block_cache[i].block = SQUASHFS_INVALID_BLK;
373 kfree(msblk->block_cache[i].data);
374 wake_up(&msblk->waitq);
375 up(&msblk->block_cache_mutex);
379 down(&msblk->block_cache_mutex);
380 wake_up(&msblk->waitq);
381 msblk->block_cache[i].block = block;
382 msblk->block_cache[i].next_index = next_index;
383 TRACE("Read cache block [%llx:%x]\n", block, offset);
386 if (msblk->block_cache[i].block != block) {
387 up(&msblk->block_cache_mutex);
391 bytes = msblk->block_cache[i].length - offset;
394 up(&msblk->block_cache_mutex);
396 } else if (bytes >= length) {
398 memcpy(buffer, msblk->block_cache[i].data +
400 if (msblk->block_cache[i].length - offset == length) {
401 *next_block = msblk->block_cache[i].next_index;
405 *next_offset = offset + length;
407 up(&msblk->block_cache_mutex);
411 memcpy(buffer, msblk->block_cache[i].data +
415 block = msblk->block_cache[i].next_index;
416 up(&msblk->block_cache_mutex);
423 return return_length;
429 static int get_fragment_location(struct super_block *s, unsigned int fragment,
430 long long *fragment_start_block,
431 unsigned int *fragment_size)
433 struct squashfs_sb_info *msblk = s->s_fs_info;
434 long long start_block =
435 msblk->fragment_index[SQUASHFS_FRAGMENT_INDEX(fragment)];
436 int offset = SQUASHFS_FRAGMENT_INDEX_OFFSET(fragment);
437 struct squashfs_fragment_entry fragment_entry;
440 struct squashfs_fragment_entry sfragment_entry;
442 if (!squashfs_get_cached_block(s, (char *) &sfragment_entry,
444 sizeof(sfragment_entry), &start_block,
447 SQUASHFS_SWAP_FRAGMENT_ENTRY(&fragment_entry, &sfragment_entry);
449 if (!squashfs_get_cached_block(s, (char *) &fragment_entry,
451 sizeof(fragment_entry), &start_block,
455 *fragment_start_block = fragment_entry.start_block;
456 *fragment_size = fragment_entry.size;
465 SQSH_EXTERN void release_cached_fragment(struct squashfs_sb_info *msblk, struct
466 squashfs_fragment_cache *fragment)
468 down(&msblk->fragment_mutex);
470 wake_up(&msblk->fragment_wait_queue);
471 up(&msblk->fragment_mutex);
475 SQSH_EXTERN struct squashfs_fragment_cache *get_cached_fragment(struct super_block
476 *s, long long start_block,
480 struct squashfs_sb_info *msblk = s->s_fs_info;
481 struct squashfs_super_block *sblk = &msblk->sblk;
484 down(&msblk->fragment_mutex);
486 for (i = 0; i < SQUASHFS_CACHED_FRAGMENTS &&
487 msblk->fragment[i].block != start_block; i++);
489 if (i == SQUASHFS_CACHED_FRAGMENTS) {
490 for (i = msblk->next_fragment, n =
491 SQUASHFS_CACHED_FRAGMENTS; n &&
492 msblk->fragment[i].locked; n--, i = (i + 1) %
493 SQUASHFS_CACHED_FRAGMENTS);
498 init_waitqueue_entry(&wait, current);
499 add_wait_queue(&msblk->fragment_wait_queue,
501 set_current_state(TASK_UNINTERRUPTIBLE);
502 up(&msblk->fragment_mutex);
504 set_current_state(TASK_RUNNING);
505 remove_wait_queue(&msblk->fragment_wait_queue,
509 msblk->next_fragment = (msblk->next_fragment + 1) %
510 SQUASHFS_CACHED_FRAGMENTS;
512 if (msblk->fragment[i].data == NULL)
513 if (!(msblk->fragment[i].data = SQUASHFS_ALLOC
514 (SQUASHFS_FILE_MAX_SIZE))) {
515 ERROR("Failed to allocate fragment "
517 up(&msblk->fragment_mutex);
521 msblk->fragment[i].block = SQUASHFS_INVALID_BLK;
522 msblk->fragment[i].locked = 1;
523 up(&msblk->fragment_mutex);
525 if (!(msblk->fragment[i].length = squashfs_read_data(s,
526 msblk->fragment[i].data,
527 start_block, length, NULL, sblk->block_size))) {
528 ERROR("Unable to read fragment cache block "
529 "[%llx]\n", start_block);
530 msblk->fragment[i].locked = 0;
534 msblk->fragment[i].block = start_block;
535 TRACE("New fragment %d, start block %lld, locked %d\n",
536 i, msblk->fragment[i].block,
537 msblk->fragment[i].locked);
541 msblk->fragment[i].locked++;
542 up(&msblk->fragment_mutex);
543 TRACE("Got fragment %d, start block %lld, locked %d\n", i,
544 msblk->fragment[i].block,
545 msblk->fragment[i].locked);
549 return &msblk->fragment[i];
556 static void squashfs_new_inode(struct squashfs_sb_info *msblk, struct inode *i,
557 struct squashfs_base_inode_header *inodeb)
559 i->i_ino = inodeb->inode_number;
560 i->i_mtime.tv_sec = inodeb->mtime;
561 i->i_atime.tv_sec = inodeb->mtime;
562 i->i_ctime.tv_sec = inodeb->mtime;
563 i->i_uid = msblk->uid[inodeb->uid];
564 i->i_mode = inodeb->mode;
566 if (inodeb->guid == SQUASHFS_GUIDS)
569 i->i_gid = msblk->guid[inodeb->guid];
573 static squashfs_inode_t squashfs_inode_lookup(struct super_block *s, int ino)
575 struct squashfs_sb_info *msblk = s->s_fs_info;
576 long long start = msblk->inode_lookup_table[SQUASHFS_LOOKUP_BLOCK(ino - 1)];
577 int offset = SQUASHFS_LOOKUP_BLOCK_OFFSET(ino - 1);
578 squashfs_inode_t inode;
580 TRACE("Entered squashfs_inode_lookup, inode_number = %d\n", ino);
583 squashfs_inode_t sinode;
585 if (!squashfs_get_cached_block(s, (char *) &sinode, start, offset,
586 sizeof(sinode), &start, &offset))
588 SQUASHFS_SWAP_INODE_T((&inode), &sinode);
589 } else if (!squashfs_get_cached_block(s, (char *) &inode, start, offset,
590 sizeof(inode), &start, &offset))
593 TRACE("squashfs_inode_lookup, inode = 0x%llx\n", inode);
598 return SQUASHFS_INVALID_BLK;
602 static void vfs_read_inode(struct inode *i)
604 struct squashfs_sb_info *msblk = i->i_sb->s_fs_info;
605 squashfs_inode_t inode = squashfs_inode_lookup(i->i_sb, i->i_ino);
607 TRACE("Entered vfs_read_inode\n");
609 if(inode != SQUASHFS_INVALID_BLK)
610 (msblk->read_inode)(i, inode);
614 static struct dentry *squashfs_get_parent(struct dentry *child)
616 struct inode *i = child->d_inode;
617 struct inode *parent = iget(i->i_sb, SQUASHFS_I(i)->u.s2.parent_inode);
620 TRACE("Entered squashfs_get_parent\n");
623 rv = ERR_PTR(-EACCES);
627 rv = d_alloc_anon(parent);
629 rv = ERR_PTR(-ENOMEM);
636 SQSH_EXTERN struct inode *squashfs_iget(struct super_block *s, squashfs_inode_t inode, unsigned int inode_number)
638 struct squashfs_sb_info *msblk = s->s_fs_info;
639 struct inode *i = iget_locked(s, inode_number);
641 TRACE("Entered squashfs_iget\n");
643 if(i && (i->i_state & I_NEW)) {
644 (msblk->read_inode)(i, inode);
652 static int squashfs_read_inode(struct inode *i, squashfs_inode_t inode)
654 struct super_block *s = i->i_sb;
655 struct squashfs_sb_info *msblk = s->s_fs_info;
656 struct squashfs_super_block *sblk = &msblk->sblk;
657 long long block = SQUASHFS_INODE_BLK(inode) +
658 sblk->inode_table_start;
659 unsigned int offset = SQUASHFS_INODE_OFFSET(inode);
660 long long next_block;
661 unsigned int next_offset;
662 union squashfs_inode_header id, sid;
663 struct squashfs_base_inode_header *inodeb = &id.base,
664 *sinodeb = &sid.base;
666 TRACE("Entered squashfs_read_inode\n");
669 if (!squashfs_get_cached_block(s, (char *) sinodeb, block,
670 offset, sizeof(*sinodeb), &next_block,
673 SQUASHFS_SWAP_BASE_INODE_HEADER(inodeb, sinodeb,
676 if (!squashfs_get_cached_block(s, (char *) inodeb, block,
677 offset, sizeof(*inodeb), &next_block,
681 squashfs_new_inode(msblk, i, inodeb);
683 switch(inodeb->inode_type) {
684 case SQUASHFS_FILE_TYPE: {
685 unsigned int frag_size;
687 struct squashfs_reg_inode_header *inodep = &id.reg;
688 struct squashfs_reg_inode_header *sinodep = &sid.reg;
691 if (!squashfs_get_cached_block(s, (char *)
692 sinodep, block, offset,
693 sizeof(*sinodep), &next_block,
696 SQUASHFS_SWAP_REG_INODE_HEADER(inodep, sinodep);
698 if (!squashfs_get_cached_block(s, (char *)
699 inodep, block, offset,
700 sizeof(*inodep), &next_block,
704 frag_blk = SQUASHFS_INVALID_BLK;
705 if (inodep->fragment != SQUASHFS_INVALID_FRAG &&
706 !get_fragment_location(s,
707 inodep->fragment, &frag_blk, &frag_size))
711 i->i_size = inodep->file_size;
712 i->i_fop = &generic_ro_fops;
713 i->i_mode |= S_IFREG;
714 i->i_blocks = ((i->i_size - 1) >> 9) + 1;
715 SQUASHFS_I(i)->u.s1.fragment_start_block = frag_blk;
716 SQUASHFS_I(i)->u.s1.fragment_size = frag_size;
717 SQUASHFS_I(i)->u.s1.fragment_offset = inodep->offset;
718 SQUASHFS_I(i)->start_block = inodep->start_block;
719 SQUASHFS_I(i)->u.s1.block_list_start = next_block;
720 SQUASHFS_I(i)->offset = next_offset;
721 if (sblk->block_size > 4096)
722 i->i_data.a_ops = &squashfs_aops;
724 i->i_data.a_ops = &squashfs_aops_4K;
726 TRACE("File inode %x:%x, start_block %llx, "
727 "block_list_start %llx, offset %x\n",
728 SQUASHFS_INODE_BLK(inode), offset,
729 inodep->start_block, next_block,
733 case SQUASHFS_LREG_TYPE: {
734 unsigned int frag_size;
736 struct squashfs_lreg_inode_header *inodep = &id.lreg;
737 struct squashfs_lreg_inode_header *sinodep = &sid.lreg;
740 if (!squashfs_get_cached_block(s, (char *)
741 sinodep, block, offset,
742 sizeof(*sinodep), &next_block,
745 SQUASHFS_SWAP_LREG_INODE_HEADER(inodep, sinodep);
747 if (!squashfs_get_cached_block(s, (char *)
748 inodep, block, offset,
749 sizeof(*inodep), &next_block,
753 frag_blk = SQUASHFS_INVALID_BLK;
754 if (inodep->fragment != SQUASHFS_INVALID_FRAG &&
755 !get_fragment_location(s,
756 inodep->fragment, &frag_blk, &frag_size))
759 i->i_nlink = inodep->nlink;
760 i->i_size = inodep->file_size;
761 i->i_fop = &generic_ro_fops;
762 i->i_mode |= S_IFREG;
763 i->i_blocks = ((i->i_size - 1) >> 9) + 1;
764 SQUASHFS_I(i)->u.s1.fragment_start_block = frag_blk;
765 SQUASHFS_I(i)->u.s1.fragment_size = frag_size;
766 SQUASHFS_I(i)->u.s1.fragment_offset = inodep->offset;
767 SQUASHFS_I(i)->start_block = inodep->start_block;
768 SQUASHFS_I(i)->u.s1.block_list_start = next_block;
769 SQUASHFS_I(i)->offset = next_offset;
770 if (sblk->block_size > 4096)
771 i->i_data.a_ops = &squashfs_aops;
773 i->i_data.a_ops = &squashfs_aops_4K;
775 TRACE("File inode %x:%x, start_block %llx, "
776 "block_list_start %llx, offset %x\n",
777 SQUASHFS_INODE_BLK(inode), offset,
778 inodep->start_block, next_block,
782 case SQUASHFS_DIR_TYPE: {
783 struct squashfs_dir_inode_header *inodep = &id.dir;
784 struct squashfs_dir_inode_header *sinodep = &sid.dir;
787 if (!squashfs_get_cached_block(s, (char *)
788 sinodep, block, offset,
789 sizeof(*sinodep), &next_block,
792 SQUASHFS_SWAP_DIR_INODE_HEADER(inodep, sinodep);
794 if (!squashfs_get_cached_block(s, (char *)
795 inodep, block, offset,
796 sizeof(*inodep), &next_block,
800 i->i_nlink = inodep->nlink;
801 i->i_size = inodep->file_size;
802 i->i_op = &squashfs_dir_inode_ops;
803 i->i_fop = &squashfs_dir_ops;
804 i->i_mode |= S_IFDIR;
805 SQUASHFS_I(i)->start_block = inodep->start_block;
806 SQUASHFS_I(i)->offset = inodep->offset;
807 SQUASHFS_I(i)->u.s2.directory_index_count = 0;
808 SQUASHFS_I(i)->u.s2.parent_inode = inodep->parent_inode;
810 TRACE("Directory inode %x:%x, start_block %x, offset "
811 "%x\n", SQUASHFS_INODE_BLK(inode),
812 offset, inodep->start_block,
816 case SQUASHFS_LDIR_TYPE: {
817 struct squashfs_ldir_inode_header *inodep = &id.ldir;
818 struct squashfs_ldir_inode_header *sinodep = &sid.ldir;
821 if (!squashfs_get_cached_block(s, (char *)
822 sinodep, block, offset,
823 sizeof(*sinodep), &next_block,
826 SQUASHFS_SWAP_LDIR_INODE_HEADER(inodep,
829 if (!squashfs_get_cached_block(s, (char *)
830 inodep, block, offset,
831 sizeof(*inodep), &next_block,
835 i->i_nlink = inodep->nlink;
836 i->i_size = inodep->file_size;
837 i->i_op = &squashfs_dir_inode_ops;
838 i->i_fop = &squashfs_dir_ops;
839 i->i_mode |= S_IFDIR;
840 SQUASHFS_I(i)->start_block = inodep->start_block;
841 SQUASHFS_I(i)->offset = inodep->offset;
842 SQUASHFS_I(i)->u.s2.directory_index_start = next_block;
843 SQUASHFS_I(i)->u.s2.directory_index_offset =
845 SQUASHFS_I(i)->u.s2.directory_index_count =
847 SQUASHFS_I(i)->u.s2.parent_inode = inodep->parent_inode;
849 TRACE("Long directory inode %x:%x, start_block %x, "
851 SQUASHFS_INODE_BLK(inode), offset,
852 inodep->start_block, inodep->offset);
855 case SQUASHFS_SYMLINK_TYPE: {
856 struct squashfs_symlink_inode_header *inodep =
858 struct squashfs_symlink_inode_header *sinodep =
862 if (!squashfs_get_cached_block(s, (char *)
863 sinodep, block, offset,
864 sizeof(*sinodep), &next_block,
867 SQUASHFS_SWAP_SYMLINK_INODE_HEADER(inodep,
870 if (!squashfs_get_cached_block(s, (char *)
871 inodep, block, offset,
872 sizeof(*inodep), &next_block,
876 i->i_nlink = inodep->nlink;
877 i->i_size = inodep->symlink_size;
878 i->i_op = &page_symlink_inode_operations;
879 i->i_data.a_ops = &squashfs_symlink_aops;
880 i->i_mode |= S_IFLNK;
881 SQUASHFS_I(i)->start_block = next_block;
882 SQUASHFS_I(i)->offset = next_offset;
884 TRACE("Symbolic link inode %x:%x, start_block %llx, "
886 SQUASHFS_INODE_BLK(inode), offset,
887 next_block, next_offset);
890 case SQUASHFS_BLKDEV_TYPE:
891 case SQUASHFS_CHRDEV_TYPE: {
892 struct squashfs_dev_inode_header *inodep = &id.dev;
893 struct squashfs_dev_inode_header *sinodep = &sid.dev;
896 if (!squashfs_get_cached_block(s, (char *)
897 sinodep, block, offset,
898 sizeof(*sinodep), &next_block,
901 SQUASHFS_SWAP_DEV_INODE_HEADER(inodep, sinodep);
903 if (!squashfs_get_cached_block(s, (char *)
904 inodep, block, offset,
905 sizeof(*inodep), &next_block,
909 i->i_nlink = inodep->nlink;
910 i->i_mode |= (inodeb->inode_type ==
911 SQUASHFS_CHRDEV_TYPE) ? S_IFCHR :
913 init_special_inode(i, i->i_mode,
914 old_decode_dev(inodep->rdev));
916 TRACE("Device inode %x:%x, rdev %x\n",
917 SQUASHFS_INODE_BLK(inode), offset,
921 case SQUASHFS_FIFO_TYPE:
922 case SQUASHFS_SOCKET_TYPE: {
923 struct squashfs_ipc_inode_header *inodep = &id.ipc;
924 struct squashfs_ipc_inode_header *sinodep = &sid.ipc;
927 if (!squashfs_get_cached_block(s, (char *)
928 sinodep, block, offset,
929 sizeof(*sinodep), &next_block,
932 SQUASHFS_SWAP_IPC_INODE_HEADER(inodep, sinodep);
934 if (!squashfs_get_cached_block(s, (char *)
935 inodep, block, offset,
936 sizeof(*inodep), &next_block,
940 i->i_nlink = inodep->nlink;
941 i->i_mode |= (inodeb->inode_type == SQUASHFS_FIFO_TYPE)
942 ? S_IFIFO : S_IFSOCK;
943 init_special_inode(i, i->i_mode, 0);
947 ERROR("Unknown inode type %d in squashfs_iget!\n",
955 ERROR("Unable to read inode [%llx:%x]\n", block, offset);
963 static int read_inode_lookup_table(struct super_block *s)
965 struct squashfs_sb_info *msblk = s->s_fs_info;
966 struct squashfs_super_block *sblk = &msblk->sblk;
967 unsigned int length = SQUASHFS_LOOKUP_BLOCK_BYTES(sblk->inodes);
969 TRACE("In read_inode_lookup_table, length %d\n", length);
971 /* Allocate inode lookup table */
972 if (!(msblk->inode_lookup_table = kmalloc(length, GFP_KERNEL))) {
973 ERROR("Failed to allocate inode lookup table\n");
977 if (!squashfs_read_data(s, (char *) msblk->inode_lookup_table,
978 sblk->lookup_table_start, length |
979 SQUASHFS_COMPRESSED_BIT_BLOCK, NULL, length)) {
980 ERROR("unable to read inode lookup table\n");
988 for (i = 0; i < SQUASHFS_LOOKUP_BLOCKS(sblk->inodes); i++) {
989 SQUASHFS_SWAP_LOOKUP_BLOCKS((&block),
990 &msblk->inode_lookup_table[i], 1);
991 msblk->inode_lookup_table[i] = block;
999 static int read_fragment_index_table(struct super_block *s)
1001 struct squashfs_sb_info *msblk = s->s_fs_info;
1002 struct squashfs_super_block *sblk = &msblk->sblk;
1003 unsigned int length = SQUASHFS_FRAGMENT_INDEX_BYTES(sblk->fragments);
1008 /* Allocate fragment index table */
1009 if (!(msblk->fragment_index = kmalloc(length, GFP_KERNEL))) {
1010 ERROR("Failed to allocate fragment index table\n");
1014 if (!squashfs_read_data(s, (char *) msblk->fragment_index,
1015 sblk->fragment_table_start, length |
1016 SQUASHFS_COMPRESSED_BIT_BLOCK, NULL, length)) {
1017 ERROR("unable to read fragment index table\n");
1025 for (i = 0; i < SQUASHFS_FRAGMENT_INDEXES(sblk->fragments); i++) {
1026 SQUASHFS_SWAP_FRAGMENT_INDEXES((&fragment),
1027 &msblk->fragment_index[i], 1);
1028 msblk->fragment_index[i] = fragment;
1036 static int supported_squashfs_filesystem(struct squashfs_sb_info *msblk, int silent)
1038 struct squashfs_super_block *sblk = &msblk->sblk;
1040 msblk->read_inode = squashfs_read_inode;
1041 msblk->read_blocklist = read_blocklist;
1042 msblk->read_fragment_index_table = read_fragment_index_table;
1044 if (sblk->s_major == 1) {
1045 if (!squashfs_1_0_supported(msblk)) {
1046 SERROR("Major/Minor mismatch, Squashfs 1.0 filesystems "
1047 "are unsupported\n");
1048 SERROR("Please recompile with "
1049 "Squashfs 1.0 support enabled\n");
1052 } else if (sblk->s_major == 2) {
1053 if (!squashfs_2_0_supported(msblk)) {
1054 SERROR("Major/Minor mismatch, Squashfs 2.0 filesystems "
1055 "are unsupported\n");
1056 SERROR("Please recompile with "
1057 "Squashfs 2.0 support enabled\n");
1060 } else if(sblk->s_major != SQUASHFS_MAJOR || sblk->s_minor >
1062 SERROR("Major/Minor mismatch, trying to mount newer %d.%d "
1063 "filesystem\n", sblk->s_major, sblk->s_minor);
1064 SERROR("Please update your kernel\n");
1072 static int squashfs_fill_super(struct super_block *s, void *data, int silent)
1074 struct squashfs_sb_info *msblk;
1075 struct squashfs_super_block *sblk;
1077 char b[BDEVNAME_SIZE];
1080 TRACE("Entered squashfs_read_superblock\n");
1082 if (!(s->s_fs_info = kmalloc(sizeof(struct squashfs_sb_info),
1084 ERROR("Failed to allocate superblock\n");
1087 memset(s->s_fs_info, 0, sizeof(struct squashfs_sb_info));
1088 msblk = s->s_fs_info;
1089 if (!(msblk->stream.workspace = vmalloc(zlib_inflate_workspacesize()))) {
1090 ERROR("Failed to allocate zlib workspace\n");
1093 sblk = &msblk->sblk;
1095 msblk->devblksize = sb_min_blocksize(s, BLOCK_SIZE);
1096 msblk->devblksize_log2 = ffz(~msblk->devblksize);
1098 init_MUTEX(&msblk->read_data_mutex);
1099 init_MUTEX(&msblk->read_page_mutex);
1100 init_MUTEX(&msblk->block_cache_mutex);
1101 init_MUTEX(&msblk->fragment_mutex);
1102 init_MUTEX(&msblk->meta_index_mutex);
1104 init_waitqueue_head(&msblk->waitq);
1105 init_waitqueue_head(&msblk->fragment_wait_queue);
1107 sblk->bytes_used = sizeof(struct squashfs_super_block);
1108 if (!squashfs_read_data(s, (char *) sblk, SQUASHFS_START,
1109 sizeof(struct squashfs_super_block) |
1110 SQUASHFS_COMPRESSED_BIT_BLOCK, NULL, sizeof(struct squashfs_super_block))) {
1111 SERROR("unable to read superblock\n");
1115 /* Check it is a SQUASHFS superblock */
1117 if ((s->s_magic = sblk->s_magic) != SQUASHFS_MAGIC) {
1118 if (sblk->s_magic == SQUASHFS_MAGIC_SWAP) {
1119 struct squashfs_super_block ssblk;
1121 WARNING("Mounting a different endian SQUASHFS "
1122 "filesystem on %s\n", bdevname(s->s_bdev, b));
1124 SQUASHFS_SWAP_SUPER_BLOCK(&ssblk, sblk);
1125 memcpy(sblk, &ssblk, sizeof(struct squashfs_super_block));
1128 SERROR("Can't find a SQUASHFS superblock on %s\n",
1129 bdevname(s->s_bdev, b));
1134 /* Check the MAJOR & MINOR versions */
1135 if(!supported_squashfs_filesystem(msblk, silent))
1138 /* Check the filesystem does not extend beyond the end of the
1140 if(sblk->bytes_used < 0 || sblk->bytes_used > i_size_read(s->s_bdev->bd_inode))
1143 /* Check the root inode for sanity */
1144 if (SQUASHFS_INODE_OFFSET(sblk->root_inode) > SQUASHFS_METADATA_SIZE)
1147 TRACE("Found valid superblock on %s\n", bdevname(s->s_bdev, b));
1148 TRACE("Inodes are %scompressed\n",
1149 SQUASHFS_UNCOMPRESSED_INODES
1150 (sblk->flags) ? "un" : "");
1151 TRACE("Data is %scompressed\n",
1152 SQUASHFS_UNCOMPRESSED_DATA(sblk->flags)
1154 TRACE("Check data is %s present in the filesystem\n",
1155 SQUASHFS_CHECK_DATA(sblk->flags) ?
1157 TRACE("Filesystem size %lld bytes\n", sblk->bytes_used);
1158 TRACE("Block size %d\n", sblk->block_size);
1159 TRACE("Number of inodes %d\n", sblk->inodes);
1160 if (sblk->s_major > 1)
1161 TRACE("Number of fragments %d\n", sblk->fragments);
1162 TRACE("Number of uids %d\n", sblk->no_uids);
1163 TRACE("Number of gids %d\n", sblk->no_guids);
1164 TRACE("sblk->inode_table_start %llx\n", sblk->inode_table_start);
1165 TRACE("sblk->directory_table_start %llx\n", sblk->directory_table_start);
1166 if (sblk->s_major > 1)
1167 TRACE("sblk->fragment_table_start %llx\n",
1168 sblk->fragment_table_start);
1169 TRACE("sblk->uid_start %llx\n", sblk->uid_start);
1171 s->s_flags |= MS_RDONLY;
1172 s->s_op = &squashfs_super_ops;
1174 /* Init inode_table block pointer array */
1175 if (!(msblk->block_cache = kmalloc(sizeof(struct squashfs_cache) *
1176 SQUASHFS_CACHED_BLKS, GFP_KERNEL))) {
1177 ERROR("Failed to allocate block cache\n");
1181 for (i = 0; i < SQUASHFS_CACHED_BLKS; i++)
1182 msblk->block_cache[i].block = SQUASHFS_INVALID_BLK;
1184 msblk->next_cache = 0;
1186 /* Allocate read_data block */
1187 msblk->read_size = (sblk->block_size < SQUASHFS_METADATA_SIZE) ?
1188 SQUASHFS_METADATA_SIZE :
1191 if (!(msblk->read_data = kmalloc(msblk->read_size, GFP_KERNEL))) {
1192 ERROR("Failed to allocate read_data block\n");
1196 /* Allocate read_page block */
1197 if (!(msblk->read_page = kmalloc(sblk->block_size, GFP_KERNEL))) {
1198 ERROR("Failed to allocate read_page block\n");
1202 /* Allocate uid and gid tables */
1203 if (!(msblk->uid = kmalloc((sblk->no_uids + sblk->no_guids) *
1204 sizeof(unsigned int), GFP_KERNEL))) {
1205 ERROR("Failed to allocate uid/gid table\n");
1208 msblk->guid = msblk->uid + sblk->no_uids;
1211 unsigned int suid[sblk->no_uids + sblk->no_guids];
1213 if (!squashfs_read_data(s, (char *) &suid, sblk->uid_start,
1214 ((sblk->no_uids + sblk->no_guids) *
1215 sizeof(unsigned int)) |
1216 SQUASHFS_COMPRESSED_BIT_BLOCK, NULL, (sblk->no_uids + sblk->no_guids) * sizeof(unsigned int))) {
1217 ERROR("unable to read uid/gid table\n");
1221 SQUASHFS_SWAP_DATA(msblk->uid, suid, (sblk->no_uids +
1222 sblk->no_guids), (sizeof(unsigned int) * 8));
1224 if (!squashfs_read_data(s, (char *) msblk->uid, sblk->uid_start,
1225 ((sblk->no_uids + sblk->no_guids) *
1226 sizeof(unsigned int)) |
1227 SQUASHFS_COMPRESSED_BIT_BLOCK, NULL, (sblk->no_uids + sblk->no_guids) * sizeof(unsigned int))) {
1228 ERROR("unable to read uid/gid table\n");
1233 if (sblk->s_major == 1 && squashfs_1_0_supported(msblk))
1236 if (!(msblk->fragment = kmalloc(sizeof(struct squashfs_fragment_cache) *
1237 SQUASHFS_CACHED_FRAGMENTS, GFP_KERNEL))) {
1238 ERROR("Failed to allocate fragment block cache\n");
1242 for (i = 0; i < SQUASHFS_CACHED_FRAGMENTS; i++) {
1243 msblk->fragment[i].locked = 0;
1244 msblk->fragment[i].block = SQUASHFS_INVALID_BLK;
1245 msblk->fragment[i].data = NULL;
1248 msblk->next_fragment = 0;
1250 /* Allocate and read fragment index table */
1251 if (msblk->read_fragment_index_table(s) == 0)
1254 if(sblk->lookup_table_start == SQUASHFS_INVALID_BLK)
1257 /* Allocate and read inode lookup table */
1258 if (read_inode_lookup_table(s) == 0)
1261 s->s_op = &squashfs_export_super_ops;
1262 s->s_export_op = &squashfs_export_ops;
1265 root = new_inode(s);
1266 if ((msblk->read_inode)(root, sblk->root_inode) == 0)
1268 insert_inode_hash(root);
1270 if ((s->s_root = d_alloc_root(root)) == NULL) {
1271 ERROR("Root inode create failed\n");
1276 TRACE("Leaving squashfs_read_super\n");
1280 kfree(msblk->inode_lookup_table);
1281 kfree(msblk->fragment_index);
1282 kfree(msblk->fragment);
1284 kfree(msblk->read_page);
1285 kfree(msblk->read_data);
1286 kfree(msblk->block_cache);
1287 kfree(msblk->fragment_index_2);
1288 vfree(msblk->stream.workspace);
1289 kfree(s->s_fs_info);
1290 s->s_fs_info = NULL;
1298 static int squashfs_statfs(struct dentry *dentry, struct kstatfs *buf)
1300 struct squashfs_sb_info *msblk = dentry->d_sb->s_fs_info;
1301 struct squashfs_super_block *sblk = &msblk->sblk;
1303 TRACE("Entered squashfs_statfs\n");
1305 buf->f_type = SQUASHFS_MAGIC;
1306 buf->f_bsize = sblk->block_size;
1307 buf->f_blocks = ((sblk->bytes_used - 1) >> sblk->block_log) + 1;
1308 buf->f_bfree = buf->f_bavail = 0;
1309 buf->f_files = sblk->inodes;
1311 buf->f_namelen = SQUASHFS_NAME_LEN;
1317 static int squashfs_symlink_readpage(struct file *file, struct page *page)
1319 struct inode *inode = page->mapping->host;
1320 int index = page->index << PAGE_CACHE_SHIFT, length, bytes;
1321 long long block = SQUASHFS_I(inode)->start_block;
1322 int offset = SQUASHFS_I(inode)->offset;
1323 void *pageaddr = kmap(page);
1325 TRACE("Entered squashfs_symlink_readpage, page index %ld, start block "
1326 "%llx, offset %x\n", page->index,
1327 SQUASHFS_I(inode)->start_block,
1328 SQUASHFS_I(inode)->offset);
1330 for (length = 0; length < index; length += bytes) {
1331 if (!(bytes = squashfs_get_cached_block(inode->i_sb, NULL,
1332 block, offset, PAGE_CACHE_SIZE, &block,
1334 ERROR("Unable to read symbolic link [%llx:%x]\n", block,
1340 if (length != index) {
1341 ERROR("(squashfs_symlink_readpage) length != index\n");
1346 bytes = (i_size_read(inode) - length) > PAGE_CACHE_SIZE ? PAGE_CACHE_SIZE :
1347 i_size_read(inode) - length;
1349 if (!(bytes = squashfs_get_cached_block(inode->i_sb, pageaddr, block,
1350 offset, bytes, &block, &offset)))
1351 ERROR("Unable to read symbolic link [%llx:%x]\n", block, offset);
1354 memset(pageaddr + bytes, 0, PAGE_CACHE_SIZE - bytes);
1356 SetPageUptodate(page);
1363 struct meta_index *locate_meta_index(struct inode *inode, int index, int offset)
1365 struct meta_index *meta = NULL;
1366 struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info;
1369 down(&msblk->meta_index_mutex);
1371 TRACE("locate_meta_index: index %d, offset %d\n", index, offset);
1373 if(msblk->meta_index == NULL)
1376 for (i = 0; i < SQUASHFS_META_NUMBER; i ++)
1377 if (msblk->meta_index[i].inode_number == inode->i_ino &&
1378 msblk->meta_index[i].offset >= offset &&
1379 msblk->meta_index[i].offset <= index &&
1380 msblk->meta_index[i].locked == 0) {
1381 TRACE("locate_meta_index: entry %d, offset %d\n", i,
1382 msblk->meta_index[i].offset);
1383 meta = &msblk->meta_index[i];
1384 offset = meta->offset;
1391 up(&msblk->meta_index_mutex);
1397 struct meta_index *empty_meta_index(struct inode *inode, int offset, int skip)
1399 struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info;
1400 struct meta_index *meta = NULL;
1403 down(&msblk->meta_index_mutex);
1405 TRACE("empty_meta_index: offset %d, skip %d\n", offset, skip);
1407 if(msblk->meta_index == NULL) {
1408 if (!(msblk->meta_index = kmalloc(sizeof(struct meta_index) *
1409 SQUASHFS_META_NUMBER, GFP_KERNEL))) {
1410 ERROR("Failed to allocate meta_index\n");
1413 for(i = 0; i < SQUASHFS_META_NUMBER; i++) {
1414 msblk->meta_index[i].inode_number = 0;
1415 msblk->meta_index[i].locked = 0;
1417 msblk->next_meta_index = 0;
1420 for(i = SQUASHFS_META_NUMBER; i &&
1421 msblk->meta_index[msblk->next_meta_index].locked; i --)
1422 msblk->next_meta_index = (msblk->next_meta_index + 1) %
1423 SQUASHFS_META_NUMBER;
1426 TRACE("empty_meta_index: failed!\n");
1430 TRACE("empty_meta_index: returned meta entry %d, %p\n",
1431 msblk->next_meta_index,
1432 &msblk->meta_index[msblk->next_meta_index]);
1434 meta = &msblk->meta_index[msblk->next_meta_index];
1435 msblk->next_meta_index = (msblk->next_meta_index + 1) %
1436 SQUASHFS_META_NUMBER;
1438 meta->inode_number = inode->i_ino;
1439 meta->offset = offset;
1445 up(&msblk->meta_index_mutex);
1450 void release_meta_index(struct inode *inode, struct meta_index *meta)
1456 static int read_block_index(struct super_block *s, int blocks, char *block_list,
1457 long long *start_block, int *offset)
1459 struct squashfs_sb_info *msblk = s->s_fs_info;
1460 unsigned int *block_listp;
1464 char sblock_list[blocks << 2];
1466 if (!squashfs_get_cached_block(s, sblock_list, *start_block,
1467 *offset, blocks << 2, start_block, offset)) {
1468 ERROR("Unable to read block list [%llx:%x]\n",
1469 *start_block, *offset);
1472 SQUASHFS_SWAP_INTS(((unsigned int *)block_list),
1473 ((unsigned int *)sblock_list), blocks);
1475 if (!squashfs_get_cached_block(s, block_list, *start_block,
1476 *offset, blocks << 2, start_block, offset)) {
1477 ERROR("Unable to read block list [%llx:%x]\n",
1478 *start_block, *offset);
1482 for (block_listp = (unsigned int *) block_list; blocks;
1483 block_listp++, blocks --)
1484 block += SQUASHFS_COMPRESSED_SIZE_BLOCK(*block_listp);
1495 static inline int calculate_skip(int blocks) {
1496 int skip = (blocks - 1) / ((SQUASHFS_SLOTS * SQUASHFS_META_ENTRIES + 1) * SQUASHFS_META_INDEXES);
1497 return skip >= 7 ? 7 : skip + 1;
1501 static int get_meta_index(struct inode *inode, int index,
1502 long long *index_block, int *index_offset,
1503 long long *data_block, char *block_list)
1505 struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info;
1506 struct squashfs_super_block *sblk = &msblk->sblk;
1507 int skip = calculate_skip(i_size_read(inode) >> sblk->block_log);
1509 struct meta_index *meta;
1510 struct meta_entry *meta_entry;
1511 long long cur_index_block = SQUASHFS_I(inode)->u.s1.block_list_start;
1512 int cur_offset = SQUASHFS_I(inode)->offset;
1513 long long cur_data_block = SQUASHFS_I(inode)->start_block;
1516 index /= SQUASHFS_META_INDEXES * skip;
1518 while ( offset < index ) {
1519 meta = locate_meta_index(inode, index, offset + 1);
1522 if ((meta = empty_meta_index(inode, offset + 1,
1526 if(meta->entries == 0)
1528 offset = index < meta->offset + meta->entries ? index :
1529 meta->offset + meta->entries - 1;
1530 meta_entry = &meta->meta_entry[offset - meta->offset];
1531 cur_index_block = meta_entry->index_block + sblk->inode_table_start;
1532 cur_offset = meta_entry->offset;
1533 cur_data_block = meta_entry->data_block;
1534 TRACE("get_meta_index: offset %d, meta->offset %d, "
1535 "meta->entries %d\n", offset, meta->offset,
1537 TRACE("get_meta_index: index_block 0x%llx, offset 0x%x"
1538 " data_block 0x%llx\n", cur_index_block,
1539 cur_offset, cur_data_block);
1542 for (i = meta->offset + meta->entries; i <= index &&
1543 i < meta->offset + SQUASHFS_META_ENTRIES; i++) {
1544 int blocks = skip * SQUASHFS_META_INDEXES;
1547 int block = blocks > (SIZE >> 2) ? (SIZE >> 2) :
1549 int res = read_block_index(inode->i_sb, block,
1550 block_list, &cur_index_block,
1556 cur_data_block += res;
1560 meta_entry = &meta->meta_entry[i - meta->offset];
1561 meta_entry->index_block = cur_index_block - sblk->inode_table_start;
1562 meta_entry->offset = cur_offset;
1563 meta_entry->data_block = cur_data_block;
1568 TRACE("get_meta_index: meta->offset %d, meta->entries %d\n",
1569 meta->offset, meta->entries);
1571 release_meta_index(inode, meta);
1575 *index_block = cur_index_block;
1576 *index_offset = cur_offset;
1577 *data_block = cur_data_block;
1579 return offset * SQUASHFS_META_INDEXES * skip;
1582 release_meta_index(inode, meta);
1587 static long long read_blocklist(struct inode *inode, int index,
1588 int readahead_blks, char *block_list,
1589 unsigned short **block_p, unsigned int *bsize)
1591 long long block_ptr;
1594 int res = get_meta_index(inode, index, &block_ptr, &offset, &block,
1597 TRACE("read_blocklist: res %d, index %d, block_ptr 0x%llx, offset"
1598 " 0x%x, block 0x%llx\n", res, index, block_ptr, offset,
1607 int blocks = index > (SIZE >> 2) ? (SIZE >> 2) : index;
1608 int res = read_block_index(inode->i_sb, blocks, block_list,
1609 &block_ptr, &offset);
1616 if (read_block_index(inode->i_sb, 1, block_list,
1617 &block_ptr, &offset) == -1)
1619 *bsize = *((unsigned int *) block_list);
1628 static int squashfs_readpage(struct file *file, struct page *page)
1630 struct inode *inode = page->mapping->host;
1631 struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info;
1632 struct squashfs_super_block *sblk = &msblk->sblk;
1633 unsigned char *block_list;
1635 unsigned int bsize, i = 0, bytes = 0, byte_offset = 0;
1636 int index = page->index >> (sblk->block_log - PAGE_CACHE_SHIFT);
1638 struct squashfs_fragment_cache *fragment = NULL;
1639 char *data_ptr = msblk->read_page;
1641 int mask = (1 << (sblk->block_log - PAGE_CACHE_SHIFT)) - 1;
1642 int start_index = page->index & ~mask;
1643 int end_index = start_index | mask;
1645 TRACE("Entered squashfs_readpage, page index %lx, start block %llx\n",
1647 SQUASHFS_I(inode)->start_block);
1649 if (!(block_list = kmalloc(SIZE, GFP_KERNEL))) {
1650 ERROR("Failed to allocate block_list\n");
1654 if (page->index >= ((i_size_read(inode) + PAGE_CACHE_SIZE - 1) >>
1658 if (SQUASHFS_I(inode)->u.s1.fragment_start_block == SQUASHFS_INVALID_BLK
1659 || index < (i_size_read(inode) >>
1661 if ((block = (msblk->read_blocklist)(inode, index, 1,
1662 block_list, NULL, &bsize)) == 0)
1665 down(&msblk->read_page_mutex);
1667 if (!(bytes = squashfs_read_data(inode->i_sb, msblk->read_page,
1668 block, bsize, NULL, sblk->block_size))) {
1669 ERROR("Unable to read page, block %llx, size %x\n", block,
1671 up(&msblk->read_page_mutex);
1675 if ((fragment = get_cached_fragment(inode->i_sb,
1677 u.s1.fragment_start_block,
1678 SQUASHFS_I(inode)->u.s1.fragment_size))
1680 ERROR("Unable to read page, block %llx, size %x\n",
1682 u.s1.fragment_start_block,
1683 (int) SQUASHFS_I(inode)->
1684 u.s1.fragment_size);
1687 bytes = SQUASHFS_I(inode)->u.s1.fragment_offset +
1688 (i_size_read(inode) & (sblk->block_size
1690 byte_offset = SQUASHFS_I(inode)->u.s1.fragment_offset;
1691 data_ptr = fragment->data;
1694 for (i = start_index; i <= end_index && byte_offset < bytes;
1695 i++, byte_offset += PAGE_CACHE_SIZE) {
1696 struct page *push_page;
1697 int avail = (bytes - byte_offset) > PAGE_CACHE_SIZE ?
1698 PAGE_CACHE_SIZE : bytes - byte_offset;
1700 TRACE("bytes %d, i %d, byte_offset %d, available_bytes %d\n",
1701 bytes, i, byte_offset, avail);
1703 push_page = (i == page->index) ? page :
1704 grab_cache_page_nowait(page->mapping, i);
1709 if (PageUptodate(push_page))
1712 pageaddr = kmap_atomic(push_page, KM_USER0);
1713 memcpy(pageaddr, data_ptr + byte_offset, avail);
1714 memset(pageaddr + avail, 0, PAGE_CACHE_SIZE - avail);
1715 kunmap_atomic(pageaddr, KM_USER0);
1716 flush_dcache_page(push_page);
1717 SetPageUptodate(push_page);
1719 unlock_page(push_page);
1720 if(i != page->index)
1721 page_cache_release(push_page);
1724 if (SQUASHFS_I(inode)->u.s1.fragment_start_block == SQUASHFS_INVALID_BLK
1725 || index < (i_size_read(inode) >>
1727 up(&msblk->read_page_mutex);
1729 release_cached_fragment(msblk, fragment);
1735 pageaddr = kmap_atomic(page, KM_USER0);
1736 memset(pageaddr + bytes, 0, PAGE_CACHE_SIZE - bytes);
1737 kunmap_atomic(pageaddr, KM_USER0);
1738 flush_dcache_page(page);
1739 SetPageUptodate(page);
1747 static int squashfs_readpage4K(struct file *file, struct page *page)
1749 struct inode *inode = page->mapping->host;
1750 struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info;
1751 struct squashfs_super_block *sblk = &msblk->sblk;
1752 unsigned char *block_list;
1754 unsigned int bsize, bytes = 0;
1757 TRACE("Entered squashfs_readpage4K, page index %lx, start block %llx\n",
1759 SQUASHFS_I(inode)->start_block);
1761 if (page->index >= ((i_size_read(inode) + PAGE_CACHE_SIZE - 1) >>
1762 PAGE_CACHE_SHIFT)) {
1767 if (!(block_list = kmalloc(SIZE, GFP_KERNEL))) {
1768 ERROR("Failed to allocate block_list\n");
1772 if (SQUASHFS_I(inode)->u.s1.fragment_start_block == SQUASHFS_INVALID_BLK
1773 || page->index < (i_size_read(inode) >>
1775 block = (msblk->read_blocklist)(inode, page->index, 1,
1776 block_list, NULL, &bsize);
1780 down(&msblk->read_page_mutex);
1781 bytes = squashfs_read_data(inode->i_sb, msblk->read_page, block,
1782 bsize, NULL, sblk->block_size);
1784 pageaddr = kmap_atomic(page, KM_USER0);
1785 memcpy(pageaddr, msblk->read_page, bytes);
1786 kunmap_atomic(pageaddr, KM_USER0);
1788 ERROR("Unable to read page, block %llx, size %x\n",
1790 up(&msblk->read_page_mutex);
1792 struct squashfs_fragment_cache *fragment =
1793 get_cached_fragment(inode->i_sb,
1795 u.s1.fragment_start_block,
1796 SQUASHFS_I(inode)-> u.s1.fragment_size);
1798 bytes = i_size_read(inode) & (sblk->block_size - 1);
1799 pageaddr = kmap_atomic(page, KM_USER0);
1800 memcpy(pageaddr, fragment->data + SQUASHFS_I(inode)->
1801 u.s1.fragment_offset, bytes);
1802 kunmap_atomic(pageaddr, KM_USER0);
1803 release_cached_fragment(msblk, fragment);
1805 ERROR("Unable to read page, block %llx, size %x\n",
1807 u.s1.fragment_start_block, (int)
1808 SQUASHFS_I(inode)-> u.s1.fragment_size);
1812 pageaddr = kmap_atomic(page, KM_USER0);
1813 memset(pageaddr + bytes, 0, PAGE_CACHE_SIZE - bytes);
1814 kunmap_atomic(pageaddr, KM_USER0);
1815 flush_dcache_page(page);
1816 SetPageUptodate(page);
1824 static int get_dir_index_using_offset(struct super_block *s, long long
1825 *next_block, unsigned int *next_offset,
1826 long long index_start,
1827 unsigned int index_offset, int i_count,
1830 struct squashfs_sb_info *msblk = s->s_fs_info;
1831 struct squashfs_super_block *sblk = &msblk->sblk;
1833 struct squashfs_dir_index index;
1835 TRACE("Entered get_dir_index_using_offset, i_count %d, f_pos %d\n",
1836 i_count, (unsigned int) f_pos);
1842 for (i = 0; i < i_count; i++) {
1844 struct squashfs_dir_index sindex;
1845 squashfs_get_cached_block(s, (char *) &sindex,
1846 index_start, index_offset,
1847 sizeof(sindex), &index_start,
1849 SQUASHFS_SWAP_DIR_INDEX(&index, &sindex);
1851 squashfs_get_cached_block(s, (char *) &index,
1852 index_start, index_offset,
1853 sizeof(index), &index_start,
1856 if (index.index > f_pos)
1859 squashfs_get_cached_block(s, NULL, index_start, index_offset,
1860 index.size + 1, &index_start,
1863 length = index.index;
1864 *next_block = index.start_block + sblk->directory_table_start;
1867 *next_offset = (length + *next_offset) % SQUASHFS_METADATA_SIZE;
1874 static int get_dir_index_using_name(struct super_block *s, long long
1875 *next_block, unsigned int *next_offset,
1876 long long index_start,
1877 unsigned int index_offset, int i_count,
1878 const char *name, int size)
1880 struct squashfs_sb_info *msblk = s->s_fs_info;
1881 struct squashfs_super_block *sblk = &msblk->sblk;
1883 struct squashfs_dir_index *index;
1886 TRACE("Entered get_dir_index_using_name, i_count %d\n", i_count);
1888 if (!(str = kmalloc(sizeof(struct squashfs_dir_index) +
1889 (SQUASHFS_NAME_LEN + 1) * 2, GFP_KERNEL))) {
1890 ERROR("Failed to allocate squashfs_dir_index\n");
1894 index = (struct squashfs_dir_index *) (str + SQUASHFS_NAME_LEN + 1);
1895 strncpy(str, name, size);
1898 for (i = 0; i < i_count; i++) {
1900 struct squashfs_dir_index sindex;
1901 squashfs_get_cached_block(s, (char *) &sindex,
1902 index_start, index_offset,
1903 sizeof(sindex), &index_start,
1905 SQUASHFS_SWAP_DIR_INDEX(index, &sindex);
1907 squashfs_get_cached_block(s, (char *) index,
1908 index_start, index_offset,
1909 sizeof(struct squashfs_dir_index),
1910 &index_start, &index_offset);
1912 squashfs_get_cached_block(s, index->name, index_start,
1913 index_offset, index->size + 1,
1914 &index_start, &index_offset);
1916 index->name[index->size + 1] = '\0';
1918 if (strcmp(index->name, str) > 0)
1921 length = index->index;
1922 *next_block = index->start_block + sblk->directory_table_start;
1925 *next_offset = (length + *next_offset) % SQUASHFS_METADATA_SIZE;
1932 static int squashfs_readdir(struct file *file, void *dirent, filldir_t filldir)
1934 struct inode *i = file->f_dentry->d_inode;
1935 struct squashfs_sb_info *msblk = i->i_sb->s_fs_info;
1936 struct squashfs_super_block *sblk = &msblk->sblk;
1937 long long next_block = SQUASHFS_I(i)->start_block +
1938 sblk->directory_table_start;
1939 int next_offset = SQUASHFS_I(i)->offset, length = 0,
1941 struct squashfs_dir_header dirh;
1942 struct squashfs_dir_entry *dire;
1944 TRACE("Entered squashfs_readdir [%llx:%x]\n", next_block, next_offset);
1946 if (!(dire = kmalloc(sizeof(struct squashfs_dir_entry) +
1947 SQUASHFS_NAME_LEN + 1, GFP_KERNEL))) {
1948 ERROR("Failed to allocate squashfs_dir_entry\n");
1952 while(file->f_pos < 3) {
1956 if(file->f_pos == 0) {
1963 i_ino = SQUASHFS_I(i)->u.s2.parent_inode;
1965 TRACE("Calling filldir(%x, %s, %d, %d, %d, %d)\n",
1966 (unsigned int) dirent, name, size, (int)
1968 squashfs_filetype_table[1]);
1970 if (filldir(dirent, name, size,
1972 squashfs_filetype_table[1]) < 0) {
1973 TRACE("Filldir returned less than 0\n");
1976 file->f_pos += size;
1979 length = get_dir_index_using_offset(i->i_sb, &next_block, &next_offset,
1980 SQUASHFS_I(i)->u.s2.directory_index_start,
1981 SQUASHFS_I(i)->u.s2.directory_index_offset,
1982 SQUASHFS_I(i)->u.s2.directory_index_count,
1985 while (length < i_size_read(i)) {
1986 /* read directory header */
1988 struct squashfs_dir_header sdirh;
1990 if (!squashfs_get_cached_block(i->i_sb, (char *) &sdirh,
1991 next_block, next_offset, sizeof(sdirh),
1992 &next_block, &next_offset))
1995 length += sizeof(sdirh);
1996 SQUASHFS_SWAP_DIR_HEADER(&dirh, &sdirh);
1998 if (!squashfs_get_cached_block(i->i_sb, (char *) &dirh,
1999 next_block, next_offset, sizeof(dirh),
2000 &next_block, &next_offset))
2003 length += sizeof(dirh);
2006 dir_count = dirh.count + 1;
2007 while (dir_count--) {
2009 struct squashfs_dir_entry sdire;
2010 if (!squashfs_get_cached_block(i->i_sb, (char *)
2011 &sdire, next_block, next_offset,
2012 sizeof(sdire), &next_block,
2016 length += sizeof(sdire);
2017 SQUASHFS_SWAP_DIR_ENTRY(dire, &sdire);
2019 if (!squashfs_get_cached_block(i->i_sb, (char *)
2020 dire, next_block, next_offset,
2021 sizeof(*dire), &next_block,
2025 length += sizeof(*dire);
2028 if (!squashfs_get_cached_block(i->i_sb, dire->name,
2029 next_block, next_offset,
2030 dire->size + 1, &next_block,
2034 length += dire->size + 1;
2036 if (file->f_pos >= length)
2039 dire->name[dire->size + 1] = '\0';
2041 TRACE("Calling filldir(%x, %s, %d, %d, %x:%x, %d, %d)\n",
2042 (unsigned int) dirent, dire->name,
2043 dire->size + 1, (int) file->f_pos,
2044 dirh.start_block, dire->offset,
2045 dirh.inode_number + dire->inode_number,
2046 squashfs_filetype_table[dire->type]);
2048 if (filldir(dirent, dire->name, dire->size + 1,
2050 dirh.inode_number + dire->inode_number,
2051 squashfs_filetype_table[dire->type])
2053 TRACE("Filldir returned less than 0\n");
2056 file->f_pos = length;
2065 ERROR("Unable to read directory block [%llx:%x]\n", next_block,
2072 static struct dentry *squashfs_lookup(struct inode *i, struct dentry *dentry,
2073 struct nameidata *nd)
2075 const unsigned char *name = dentry->d_name.name;
2076 int len = dentry->d_name.len;
2077 struct inode *inode = NULL;
2078 struct squashfs_sb_info *msblk = i->i_sb->s_fs_info;
2079 struct squashfs_super_block *sblk = &msblk->sblk;
2080 long long next_block = SQUASHFS_I(i)->start_block +
2081 sblk->directory_table_start;
2082 int next_offset = SQUASHFS_I(i)->offset, length = 0,
2084 struct squashfs_dir_header dirh;
2085 struct squashfs_dir_entry *dire;
2087 TRACE("Entered squashfs_lookup [%llx:%x]\n", next_block, next_offset);
2089 if (!(dire = kmalloc(sizeof(struct squashfs_dir_entry) +
2090 SQUASHFS_NAME_LEN + 1, GFP_KERNEL))) {
2091 ERROR("Failed to allocate squashfs_dir_entry\n");
2095 if (len > SQUASHFS_NAME_LEN)
2098 length = get_dir_index_using_name(i->i_sb, &next_block, &next_offset,
2099 SQUASHFS_I(i)->u.s2.directory_index_start,
2100 SQUASHFS_I(i)->u.s2.directory_index_offset,
2101 SQUASHFS_I(i)->u.s2.directory_index_count, name,
2104 while (length < i_size_read(i)) {
2105 /* read directory header */
2107 struct squashfs_dir_header sdirh;
2108 if (!squashfs_get_cached_block(i->i_sb, (char *) &sdirh,
2109 next_block, next_offset, sizeof(sdirh),
2110 &next_block, &next_offset))
2113 length += sizeof(sdirh);
2114 SQUASHFS_SWAP_DIR_HEADER(&dirh, &sdirh);
2116 if (!squashfs_get_cached_block(i->i_sb, (char *) &dirh,
2117 next_block, next_offset, sizeof(dirh),
2118 &next_block, &next_offset))
2121 length += sizeof(dirh);
2124 dir_count = dirh.count + 1;
2125 while (dir_count--) {
2127 struct squashfs_dir_entry sdire;
2128 if (!squashfs_get_cached_block(i->i_sb, (char *)
2129 &sdire, next_block,next_offset,
2130 sizeof(sdire), &next_block,
2134 length += sizeof(sdire);
2135 SQUASHFS_SWAP_DIR_ENTRY(dire, &sdire);
2137 if (!squashfs_get_cached_block(i->i_sb, (char *)
2138 dire, next_block,next_offset,
2139 sizeof(*dire), &next_block,
2143 length += sizeof(*dire);
2146 if (!squashfs_get_cached_block(i->i_sb, dire->name,
2147 next_block, next_offset, dire->size + 1,
2148 &next_block, &next_offset))
2151 length += dire->size + 1;
2153 if (name[0] < dire->name[0])
2156 if ((len == dire->size + 1) && !strncmp(name, dire->name, len)) {
2157 squashfs_inode_t ino = SQUASHFS_MKINODE(dirh.start_block,
2160 TRACE("calling squashfs_iget for directory "
2161 "entry %s, inode %x:%x, %d\n", name,
2162 dirh.start_block, dire->offset,
2163 dirh.inode_number + dire->inode_number);
2165 inode = squashfs_iget(i->i_sb, ino, dirh.inode_number + dire->inode_number);
2175 return d_splice_alias(inode, dentry);
2176 d_add(dentry, inode);
2180 ERROR("Unable to read directory block [%llx:%x]\n", next_block,
2186 static void squashfs_put_super(struct super_block *s)
2191 struct squashfs_sb_info *sbi = s->s_fs_info;
2192 if (sbi->block_cache)
2193 for (i = 0; i < SQUASHFS_CACHED_BLKS; i++)
2194 if (sbi->block_cache[i].block !=
2195 SQUASHFS_INVALID_BLK)
2196 kfree(sbi->block_cache[i].data);
2198 for (i = 0; i < SQUASHFS_CACHED_FRAGMENTS; i++)
2199 SQUASHFS_FREE(sbi->fragment[i].data);
2200 kfree(sbi->fragment);
2201 kfree(sbi->block_cache);
2202 kfree(sbi->read_data);
2203 kfree(sbi->read_page);
2205 kfree(sbi->fragment_index);
2206 kfree(sbi->fragment_index_2);
2207 kfree(sbi->meta_index);
2208 vfree(sbi->stream.workspace);
2209 kfree(s->s_fs_info);
2210 s->s_fs_info = NULL;
2215 static int squashfs_get_sb(struct file_system_type *fs_type, int flags,
2216 const char *dev_name, void *data,
2217 struct vfsmount *mnt)
2219 return get_sb_bdev(fs_type, flags, dev_name, data, squashfs_fill_super,
2224 static int __init init_squashfs_fs(void)
2226 int err = init_inodecache();
2230 printk(KERN_INFO "squashfs: version 3.2-alpha (2006/12/12) "
2231 "Phillip Lougher\n");
2233 if ((err = register_filesystem(&squashfs_fs_type)))
2234 destroy_inodecache();
2241 static void __exit exit_squashfs_fs(void)
2243 unregister_filesystem(&squashfs_fs_type);
2244 destroy_inodecache();
2248 static struct kmem_cache *squashfs_inode_cachep;
2251 static struct inode *squashfs_alloc_inode(struct super_block *sb)
2253 struct squashfs_inode_info *ei;
2254 ei = kmem_cache_alloc(squashfs_inode_cachep, GFP_KERNEL);
2257 return &ei->vfs_inode;
2261 static void squashfs_destroy_inode(struct inode *inode)
2263 kmem_cache_free(squashfs_inode_cachep, SQUASHFS_I(inode));
2267 static void init_once(void * foo, struct kmem_cache *cachep, unsigned long flags)
2269 struct squashfs_inode_info *ei = foo;
2271 if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) ==
2272 SLAB_CTOR_CONSTRUCTOR)
2273 inode_init_once(&ei->vfs_inode);
2277 static int __init init_inodecache(void)
2279 squashfs_inode_cachep = kmem_cache_create("squashfs_inode_cache",
2280 sizeof(struct squashfs_inode_info),
2281 0, SLAB_HWCACHE_ALIGN|SLAB_RECLAIM_ACCOUNT,
2283 if (squashfs_inode_cachep == NULL)
2289 static void destroy_inodecache(void)
2291 kmem_cache_destroy(squashfs_inode_cachep);
2295 module_init(init_squashfs_fs);
2296 module_exit(exit_squashfs_fs);
2297 MODULE_DESCRIPTION("squashfs 3.2, a compressed read-only filesystem");
2298 MODULE_AUTHOR("Phillip Lougher <phillip@lougher.org.uk>");
2299 MODULE_LICENSE("GPL");