2 * Squashfs - a compressed read only filesystem for Linux
4 * Copyright (c) 2002, 2003, 2004, 2005, 2006
5 * Phillip Lougher <phillip@lougher.org.uk>
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version 2,
10 * or (at your option) any later version.
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
24 #include <linux/types.h>
25 #include <linux/squashfs_fs.h>
26 #include <linux/module.h>
27 #include <linux/errno.h>
28 #include <linux/slab.h>
29 #include <linux/zlib.h>
31 #include <linux/smp_lock.h>
32 #include <linux/slab.h>
33 #include <linux/squashfs_fs_sb.h>
34 #include <linux/squashfs_fs_i.h>
35 #include <linux/buffer_head.h>
36 #include <linux/vfs.h>
37 #include <linux/init.h>
38 #include <linux/dcache.h>
39 #include <linux/wait.h>
40 #include <linux/blkdev.h>
41 #include <linux/vmalloc.h>
42 #include <asm/uaccess.h>
43 #include <asm/semaphore.h>
47 static void squashfs_put_super(struct super_block *);
48 static int squashfs_statfs(struct dentry *, struct kstatfs *);
49 static int squashfs_symlink_readpage(struct file *file, struct page *page);
50 static int squashfs_readpage(struct file *file, struct page *page);
51 static int squashfs_readpage4K(struct file *file, struct page *page);
52 static int squashfs_readdir(struct file *, void *, filldir_t);
53 static struct inode *squashfs_alloc_inode(struct super_block *sb);
54 static void squashfs_destroy_inode(struct inode *inode);
55 static int init_inodecache(void);
56 static void destroy_inodecache(void);
57 static struct dentry *squashfs_lookup(struct inode *, struct dentry *,
59 static struct inode *squashfs_iget(struct super_block *s, squashfs_inode_t inode);
60 static long long read_blocklist(struct inode *inode, int index,
61 int readahead_blks, char *block_list,
62 unsigned short **block_p, unsigned int *bsize);
63 static int squashfs_get_sb(struct file_system_type *, int,
64 const char *, void *, struct vfsmount *);
66 static struct file_system_type squashfs_fs_type = {
69 .get_sb = squashfs_get_sb,
70 .kill_sb = kill_block_super,
71 .fs_flags = FS_REQUIRES_DEV
74 static unsigned char squashfs_filetype_table[] = {
75 DT_UNKNOWN, DT_DIR, DT_REG, DT_LNK, DT_BLK, DT_CHR, DT_FIFO, DT_SOCK
78 static struct super_operations squashfs_ops = {
79 .alloc_inode = squashfs_alloc_inode,
80 .destroy_inode = squashfs_destroy_inode,
81 .statfs = squashfs_statfs,
82 .put_super = squashfs_put_super,
85 SQSH_EXTERN struct address_space_operations squashfs_symlink_aops = {
86 .readpage = squashfs_symlink_readpage
89 SQSH_EXTERN struct address_space_operations squashfs_aops = {
90 .readpage = squashfs_readpage
93 SQSH_EXTERN struct address_space_operations squashfs_aops_4K = {
94 .readpage = squashfs_readpage4K
97 static struct file_operations squashfs_dir_ops = {
98 .read = generic_read_dir,
99 .readdir = squashfs_readdir
102 SQSH_EXTERN struct inode_operations squashfs_dir_inode_ops = {
103 .lookup = squashfs_lookup
107 static struct buffer_head *get_block_length(struct super_block *s,
108 int *cur_index, int *offset, int *c_byte)
110 struct squashfs_sb_info *msblk = s->s_fs_info;
112 struct buffer_head *bh;
114 if (!(bh = sb_bread(s, *cur_index)))
117 if (msblk->devblksize - *offset == 1) {
119 ((unsigned char *) &temp)[1] = *((unsigned char *)
120 (bh->b_data + *offset));
122 ((unsigned char *) &temp)[0] = *((unsigned char *)
123 (bh->b_data + *offset));
125 if (!(bh = sb_bread(s, ++(*cur_index))))
128 ((unsigned char *) &temp)[0] = *((unsigned char *)
131 ((unsigned char *) &temp)[1] = *((unsigned char *)
137 ((unsigned char *) &temp)[1] = *((unsigned char *)
138 (bh->b_data + *offset));
139 ((unsigned char *) &temp)[0] = *((unsigned char *)
140 (bh->b_data + *offset + 1));
142 ((unsigned char *) &temp)[0] = *((unsigned char *)
143 (bh->b_data + *offset));
144 ((unsigned char *) &temp)[1] = *((unsigned char *)
145 (bh->b_data + *offset + 1));
151 if (SQUASHFS_CHECK_DATA(msblk->sblk.flags)) {
152 if (*offset == msblk->devblksize) {
154 if (!(bh = sb_bread(s, ++(*cur_index))))
158 if (*((unsigned char *) (bh->b_data + *offset)) !=
159 SQUASHFS_MARKER_BYTE) {
160 ERROR("Metadata block marker corrupt @ %x\n",
174 SQSH_EXTERN unsigned int squashfs_read_data(struct super_block *s, char *buffer,
175 long long index, unsigned int length,
176 long long *next_index, int srclength)
178 struct squashfs_sb_info *msblk = s->s_fs_info;
179 struct squashfs_super_block *sblk = &msblk->sblk;
180 struct buffer_head *bh[((SQUASHFS_FILE_MAX_SIZE - 1) >>
181 msblk->devblksize_log2) + 2];
182 unsigned int offset = index & ((1 << msblk->devblksize_log2) - 1);
183 unsigned int cur_index = index >> msblk->devblksize_log2;
184 int bytes, avail_bytes, b = 0, k = 0;
186 unsigned int compressed;
187 unsigned int c_byte = length;
190 bytes = msblk->devblksize - offset;
191 compressed = SQUASHFS_COMPRESSED_BLOCK(c_byte);
192 c_buffer = compressed ? msblk->read_data : buffer;
193 c_byte = SQUASHFS_COMPRESSED_SIZE_BLOCK(c_byte);
195 TRACE("Block @ 0x%llx, %scompressed size %d, src size %d\n", index, compressed
196 ? "" : "un", (unsigned int) c_byte, srclength);
198 if (c_byte > srclength || index < 0 || (index + c_byte) > sblk->bytes_used)
201 if (!(bh[0] = sb_getblk(s, cur_index)))
204 for (b = 1; bytes < c_byte; b++) {
205 if (!(bh[b] = sb_getblk(s, ++cur_index)))
207 bytes += msblk->devblksize;
209 ll_rw_block(READ, b, bh);
211 if (index < 0 || (index + 2) > sblk->bytes_used)
214 if (!(bh[0] = get_block_length(s, &cur_index, &offset,
218 bytes = msblk->devblksize - offset;
219 compressed = SQUASHFS_COMPRESSED(c_byte);
220 c_buffer = compressed ? msblk->read_data : buffer;
221 c_byte = SQUASHFS_COMPRESSED_SIZE(c_byte);
223 TRACE("Block @ 0x%llx, %scompressed size %d\n", index, compressed
224 ? "" : "un", (unsigned int) c_byte);
226 if (c_byte > srclength || (index + c_byte) > sblk->bytes_used)
229 for (b = 1; bytes < c_byte; b++) {
230 if (!(bh[b] = sb_getblk(s, ++cur_index)))
232 bytes += msblk->devblksize;
234 ll_rw_block(READ, b - 1, bh + 1);
238 down(&msblk->read_data_mutex);
240 for (bytes = 0; k < b; k++) {
241 avail_bytes = (c_byte - bytes) > (msblk->devblksize - offset) ?
242 msblk->devblksize - offset :
244 wait_on_buffer(bh[k]);
245 if (!buffer_uptodate(bh[k]))
247 memcpy(c_buffer + bytes, bh[k]->b_data + offset, avail_bytes);
248 bytes += avail_bytes;
259 msblk->stream.next_in = c_buffer;
260 msblk->stream.avail_in = c_byte;
261 msblk->stream.next_out = buffer;
262 msblk->stream.avail_out = srclength;
264 if (((zlib_err = zlib_inflateInit(&msblk->stream)) != Z_OK) ||
265 ((zlib_err = zlib_inflate(&msblk->stream, Z_FINISH))
266 != Z_STREAM_END) || ((zlib_err =
267 zlib_inflateEnd(&msblk->stream)) != Z_OK)) {
268 ERROR("zlib_fs returned unexpected result 0x%x\n",
272 bytes = msblk->stream.total_out;
274 up(&msblk->read_data_mutex);
278 *next_index = index + c_byte + (length ? 0 :
279 (SQUASHFS_CHECK_DATA(msblk->sblk.flags)
288 ERROR("sb_bread failed reading block 0x%x\n", cur_index);
293 SQSH_EXTERN int squashfs_get_cached_block(struct super_block *s, char *buffer,
294 long long block, unsigned int offset,
295 int length, long long *next_block,
296 unsigned int *next_offset)
298 struct squashfs_sb_info *msblk = s->s_fs_info;
299 int n, i, bytes, return_length = length;
300 long long next_index;
302 TRACE("Entered squashfs_get_cached_block [%llx:%x]\n", block, offset);
305 for (i = 0; i < SQUASHFS_CACHED_BLKS; i++)
306 if (msblk->block_cache[i].block == block)
309 down(&msblk->block_cache_mutex);
311 if (i == SQUASHFS_CACHED_BLKS) {
312 /* read inode header block */
313 for (i = msblk->next_cache, n = SQUASHFS_CACHED_BLKS;
314 n ; n --, i = (i + 1) %
315 SQUASHFS_CACHED_BLKS)
316 if (msblk->block_cache[i].block !=
323 init_waitqueue_entry(&wait, current);
324 add_wait_queue(&msblk->waitq, &wait);
325 set_current_state(TASK_UNINTERRUPTIBLE);
326 up(&msblk->block_cache_mutex);
328 set_current_state(TASK_RUNNING);
329 remove_wait_queue(&msblk->waitq, &wait);
332 msblk->next_cache = (i + 1) % SQUASHFS_CACHED_BLKS;
334 if (msblk->block_cache[i].block ==
335 SQUASHFS_INVALID_BLK) {
336 if (!(msblk->block_cache[i].data =
337 kmalloc(SQUASHFS_METADATA_SIZE,
339 ERROR("Failed to allocate cache"
341 up(&msblk->block_cache_mutex);
346 msblk->block_cache[i].block = SQUASHFS_USED_BLK;
347 up(&msblk->block_cache_mutex);
349 msblk->block_cache[i].length = squashfs_read_data(s,
350 msblk->block_cache[i].data, block, 0, &next_index,
351 SQUASHFS_METADATA_SIZE);
353 if (msblk->block_cache[i].length == 0) {
354 ERROR("Unable to read cache block [%llx:%x]\n",
356 down(&msblk->block_cache_mutex);
357 msblk->block_cache[i].block = SQUASHFS_INVALID_BLK;
358 kfree(msblk->block_cache[i].data);
359 wake_up(&msblk->waitq);
360 up(&msblk->block_cache_mutex);
364 down(&msblk->block_cache_mutex);
365 wake_up(&msblk->waitq);
366 msblk->block_cache[i].block = block;
367 msblk->block_cache[i].next_index = next_index;
368 TRACE("Read cache block [%llx:%x]\n", block, offset);
371 if (msblk->block_cache[i].block != block) {
372 up(&msblk->block_cache_mutex);
376 bytes = msblk->block_cache[i].length - offset;
380 else if (bytes >= length) {
382 memcpy(buffer, msblk->block_cache[i].data +
384 if (msblk->block_cache[i].length - offset == length) {
385 *next_block = msblk->block_cache[i].next_index;
389 *next_offset = offset + length;
391 up(&msblk->block_cache_mutex);
395 memcpy(buffer, msblk->block_cache[i].data +
399 block = msblk->block_cache[i].next_index;
400 up(&msblk->block_cache_mutex);
407 return return_length;
413 static int get_fragment_location(struct super_block *s, unsigned int fragment,
414 long long *fragment_start_block,
415 unsigned int *fragment_size)
417 struct squashfs_sb_info *msblk = s->s_fs_info;
418 long long start_block =
419 msblk->fragment_index[SQUASHFS_FRAGMENT_INDEX(fragment)];
420 int offset = SQUASHFS_FRAGMENT_INDEX_OFFSET(fragment);
421 struct squashfs_fragment_entry fragment_entry;
424 struct squashfs_fragment_entry sfragment_entry;
426 if (!squashfs_get_cached_block(s, (char *) &sfragment_entry,
428 sizeof(sfragment_entry), &start_block,
431 SQUASHFS_SWAP_FRAGMENT_ENTRY(&fragment_entry, &sfragment_entry);
433 if (!squashfs_get_cached_block(s, (char *) &fragment_entry,
435 sizeof(fragment_entry), &start_block,
439 *fragment_start_block = fragment_entry.start_block;
440 *fragment_size = fragment_entry.size;
449 SQSH_EXTERN void release_cached_fragment(struct squashfs_sb_info *msblk, struct
450 squashfs_fragment_cache *fragment)
452 down(&msblk->fragment_mutex);
454 wake_up(&msblk->fragment_wait_queue);
455 up(&msblk->fragment_mutex);
459 SQSH_EXTERN struct squashfs_fragment_cache *get_cached_fragment(struct super_block
460 *s, long long start_block,
464 struct squashfs_sb_info *msblk = s->s_fs_info;
465 struct squashfs_super_block *sblk = &msblk->sblk;
468 down(&msblk->fragment_mutex);
470 for (i = 0; i < SQUASHFS_CACHED_FRAGMENTS &&
471 msblk->fragment[i].block != start_block; i++);
473 if (i == SQUASHFS_CACHED_FRAGMENTS) {
474 for (i = msblk->next_fragment, n =
475 SQUASHFS_CACHED_FRAGMENTS; n &&
476 msblk->fragment[i].locked; n--, i = (i + 1) %
477 SQUASHFS_CACHED_FRAGMENTS);
482 init_waitqueue_entry(&wait, current);
483 add_wait_queue(&msblk->fragment_wait_queue,
485 set_current_state(TASK_UNINTERRUPTIBLE);
486 up(&msblk->fragment_mutex);
488 set_current_state(TASK_RUNNING);
489 remove_wait_queue(&msblk->fragment_wait_queue,
493 msblk->next_fragment = (msblk->next_fragment + 1) %
494 SQUASHFS_CACHED_FRAGMENTS;
496 if (msblk->fragment[i].data == NULL)
497 if (!(msblk->fragment[i].data = SQUASHFS_ALLOC
498 (SQUASHFS_FILE_MAX_SIZE))) {
499 ERROR("Failed to allocate fragment "
501 up(&msblk->fragment_mutex);
505 msblk->fragment[i].block = SQUASHFS_INVALID_BLK;
506 msblk->fragment[i].locked = 1;
507 up(&msblk->fragment_mutex);
509 if (!(msblk->fragment[i].length = squashfs_read_data(s,
510 msblk->fragment[i].data,
511 start_block, length, NULL,
512 sblk->block_size))) {
513 ERROR("Unable to read fragment cache block "
514 "[%llx]\n", start_block);
515 msblk->fragment[i].locked = 0;
519 msblk->fragment[i].block = start_block;
520 TRACE("New fragment %d, start block %lld, locked %d\n",
521 i, msblk->fragment[i].block,
522 msblk->fragment[i].locked);
526 msblk->fragment[i].locked++;
527 up(&msblk->fragment_mutex);
528 TRACE("Got fragment %d, start block %lld, locked %d\n", i,
529 msblk->fragment[i].block,
530 msblk->fragment[i].locked);
534 return &msblk->fragment[i];
541 static struct inode *squashfs_new_inode(struct super_block *s,
542 struct squashfs_base_inode_header *inodeb)
544 struct squashfs_sb_info *msblk = s->s_fs_info;
545 struct inode *i = new_inode(s);
548 i->i_ino = inodeb->inode_number;
549 i->i_mtime.tv_sec = inodeb->mtime;
550 i->i_atime.tv_sec = inodeb->mtime;
551 i->i_ctime.tv_sec = inodeb->mtime;
552 i->i_uid = msblk->uid[inodeb->uid];
553 i->i_mode = inodeb->mode;
555 if (inodeb->guid == SQUASHFS_GUIDS)
558 i->i_gid = msblk->guid[inodeb->guid];
565 static struct inode *squashfs_iget(struct super_block *s, squashfs_inode_t inode)
568 struct squashfs_sb_info *msblk = s->s_fs_info;
569 struct squashfs_super_block *sblk = &msblk->sblk;
570 long long block = SQUASHFS_INODE_BLK(inode) +
571 sblk->inode_table_start;
572 unsigned int offset = SQUASHFS_INODE_OFFSET(inode);
573 long long next_block;
574 unsigned int next_offset;
575 union squashfs_inode_header id, sid;
576 struct squashfs_base_inode_header *inodeb = &id.base,
577 *sinodeb = &sid.base;
579 TRACE("Entered squashfs_iget\n");
582 if (!squashfs_get_cached_block(s, (char *) sinodeb, block,
583 offset, sizeof(*sinodeb), &next_block,
586 SQUASHFS_SWAP_BASE_INODE_HEADER(inodeb, sinodeb,
589 if (!squashfs_get_cached_block(s, (char *) inodeb, block,
590 offset, sizeof(*inodeb), &next_block,
594 switch(inodeb->inode_type) {
595 case SQUASHFS_FILE_TYPE: {
596 unsigned int frag_size;
598 struct squashfs_reg_inode_header *inodep = &id.reg;
599 struct squashfs_reg_inode_header *sinodep = &sid.reg;
602 if (!squashfs_get_cached_block(s, (char *)
603 sinodep, block, offset,
604 sizeof(*sinodep), &next_block,
607 SQUASHFS_SWAP_REG_INODE_HEADER(inodep, sinodep);
609 if (!squashfs_get_cached_block(s, (char *)
610 inodep, block, offset,
611 sizeof(*inodep), &next_block,
615 frag_blk = SQUASHFS_INVALID_BLK;
616 if (inodep->fragment != SQUASHFS_INVALID_FRAG &&
617 !get_fragment_location(s,
618 inodep->fragment, &frag_blk, &frag_size))
621 if((i = squashfs_new_inode(s, inodeb)) == NULL)
625 i->i_size = inodep->file_size;
626 i->i_fop = &generic_ro_fops;
627 i->i_mode |= S_IFREG;
628 i->i_blocks = ((i->i_size - 1) >> 9) + 1;
629 SQUASHFS_I(i)->u.s1.fragment_start_block = frag_blk;
630 SQUASHFS_I(i)->u.s1.fragment_size = frag_size;
631 SQUASHFS_I(i)->u.s1.fragment_offset = inodep->offset;
632 SQUASHFS_I(i)->start_block = inodep->start_block;
633 SQUASHFS_I(i)->u.s1.block_list_start = next_block;
634 SQUASHFS_I(i)->offset = next_offset;
635 if (sblk->block_size > 4096)
636 i->i_data.a_ops = &squashfs_aops;
638 i->i_data.a_ops = &squashfs_aops_4K;
640 TRACE("File inode %x:%x, start_block %llx, "
641 "block_list_start %llx, offset %x\n",
642 SQUASHFS_INODE_BLK(inode), offset,
643 inodep->start_block, next_block,
647 case SQUASHFS_LREG_TYPE: {
648 unsigned int frag_size;
650 struct squashfs_lreg_inode_header *inodep = &id.lreg;
651 struct squashfs_lreg_inode_header *sinodep = &sid.lreg;
654 if (!squashfs_get_cached_block(s, (char *)
655 sinodep, block, offset,
656 sizeof(*sinodep), &next_block,
659 SQUASHFS_SWAP_LREG_INODE_HEADER(inodep, sinodep);
661 if (!squashfs_get_cached_block(s, (char *)
662 inodep, block, offset,
663 sizeof(*inodep), &next_block,
667 frag_blk = SQUASHFS_INVALID_BLK;
668 if (inodep->fragment != SQUASHFS_INVALID_FRAG &&
669 !get_fragment_location(s,
670 inodep->fragment, &frag_blk, &frag_size))
673 if((i = squashfs_new_inode(s, inodeb)) == NULL)
676 i->i_nlink = inodep->nlink;
677 i->i_size = inodep->file_size;
678 i->i_fop = &generic_ro_fops;
679 i->i_mode |= S_IFREG;
680 i->i_blocks = ((i->i_size - 1) >> 9) + 1;
681 SQUASHFS_I(i)->u.s1.fragment_start_block = frag_blk;
682 SQUASHFS_I(i)->u.s1.fragment_size = frag_size;
683 SQUASHFS_I(i)->u.s1.fragment_offset = inodep->offset;
684 SQUASHFS_I(i)->start_block = inodep->start_block;
685 SQUASHFS_I(i)->u.s1.block_list_start = next_block;
686 SQUASHFS_I(i)->offset = next_offset;
687 if (sblk->block_size > 4096)
688 i->i_data.a_ops = &squashfs_aops;
690 i->i_data.a_ops = &squashfs_aops_4K;
692 TRACE("File inode %x:%x, start_block %llx, "
693 "block_list_start %llx, offset %x\n",
694 SQUASHFS_INODE_BLK(inode), offset,
695 inodep->start_block, next_block,
699 case SQUASHFS_DIR_TYPE: {
700 struct squashfs_dir_inode_header *inodep = &id.dir;
701 struct squashfs_dir_inode_header *sinodep = &sid.dir;
704 if (!squashfs_get_cached_block(s, (char *)
705 sinodep, block, offset,
706 sizeof(*sinodep), &next_block,
709 SQUASHFS_SWAP_DIR_INODE_HEADER(inodep, sinodep);
711 if (!squashfs_get_cached_block(s, (char *)
712 inodep, block, offset,
713 sizeof(*inodep), &next_block,
717 if((i = squashfs_new_inode(s, inodeb)) == NULL)
720 i->i_nlink = inodep->nlink;
721 i->i_size = inodep->file_size;
722 i->i_op = &squashfs_dir_inode_ops;
723 i->i_fop = &squashfs_dir_ops;
724 i->i_mode |= S_IFDIR;
725 SQUASHFS_I(i)->start_block = inodep->start_block;
726 SQUASHFS_I(i)->offset = inodep->offset;
727 SQUASHFS_I(i)->u.s2.directory_index_count = 0;
728 SQUASHFS_I(i)->u.s2.parent_inode = inodep->parent_inode;
730 TRACE("Directory inode %x:%x, start_block %x, offset "
731 "%x\n", SQUASHFS_INODE_BLK(inode),
732 offset, inodep->start_block,
736 case SQUASHFS_LDIR_TYPE: {
737 struct squashfs_ldir_inode_header *inodep = &id.ldir;
738 struct squashfs_ldir_inode_header *sinodep = &sid.ldir;
741 if (!squashfs_get_cached_block(s, (char *)
742 sinodep, block, offset,
743 sizeof(*sinodep), &next_block,
746 SQUASHFS_SWAP_LDIR_INODE_HEADER(inodep,
749 if (!squashfs_get_cached_block(s, (char *)
750 inodep, block, offset,
751 sizeof(*inodep), &next_block,
755 if((i = squashfs_new_inode(s, inodeb)) == NULL)
758 i->i_nlink = inodep->nlink;
759 i->i_size = inodep->file_size;
760 i->i_op = &squashfs_dir_inode_ops;
761 i->i_fop = &squashfs_dir_ops;
762 i->i_mode |= S_IFDIR;
763 SQUASHFS_I(i)->start_block = inodep->start_block;
764 SQUASHFS_I(i)->offset = inodep->offset;
765 SQUASHFS_I(i)->u.s2.directory_index_start = next_block;
766 SQUASHFS_I(i)->u.s2.directory_index_offset =
768 SQUASHFS_I(i)->u.s2.directory_index_count =
770 SQUASHFS_I(i)->u.s2.parent_inode = inodep->parent_inode;
772 TRACE("Long directory inode %x:%x, start_block %x, "
774 SQUASHFS_INODE_BLK(inode), offset,
775 inodep->start_block, inodep->offset);
778 case SQUASHFS_SYMLINK_TYPE: {
779 struct squashfs_symlink_inode_header *inodep =
781 struct squashfs_symlink_inode_header *sinodep =
785 if (!squashfs_get_cached_block(s, (char *)
786 sinodep, block, offset,
787 sizeof(*sinodep), &next_block,
790 SQUASHFS_SWAP_SYMLINK_INODE_HEADER(inodep,
793 if (!squashfs_get_cached_block(s, (char *)
794 inodep, block, offset,
795 sizeof(*inodep), &next_block,
799 if((i = squashfs_new_inode(s, inodeb)) == NULL)
802 i->i_nlink = inodep->nlink;
803 i->i_size = inodep->symlink_size;
804 i->i_op = &page_symlink_inode_operations;
805 i->i_data.a_ops = &squashfs_symlink_aops;
806 i->i_mode |= S_IFLNK;
807 SQUASHFS_I(i)->start_block = next_block;
808 SQUASHFS_I(i)->offset = next_offset;
810 TRACE("Symbolic link inode %x:%x, start_block %llx, "
812 SQUASHFS_INODE_BLK(inode), offset,
813 next_block, next_offset);
816 case SQUASHFS_BLKDEV_TYPE:
817 case SQUASHFS_CHRDEV_TYPE: {
818 struct squashfs_dev_inode_header *inodep = &id.dev;
819 struct squashfs_dev_inode_header *sinodep = &sid.dev;
822 if (!squashfs_get_cached_block(s, (char *)
823 sinodep, block, offset,
824 sizeof(*sinodep), &next_block,
827 SQUASHFS_SWAP_DEV_INODE_HEADER(inodep, sinodep);
829 if (!squashfs_get_cached_block(s, (char *)
830 inodep, block, offset,
831 sizeof(*inodep), &next_block,
835 if ((i = squashfs_new_inode(s, inodeb)) == NULL)
838 i->i_nlink = inodep->nlink;
839 i->i_mode |= (inodeb->inode_type ==
840 SQUASHFS_CHRDEV_TYPE) ? S_IFCHR :
842 init_special_inode(i, i->i_mode,
843 old_decode_dev(inodep->rdev));
845 TRACE("Device inode %x:%x, rdev %x\n",
846 SQUASHFS_INODE_BLK(inode), offset,
850 case SQUASHFS_FIFO_TYPE:
851 case SQUASHFS_SOCKET_TYPE: {
852 struct squashfs_ipc_inode_header *inodep = &id.ipc;
853 struct squashfs_ipc_inode_header *sinodep = &sid.ipc;
856 if (!squashfs_get_cached_block(s, (char *)
857 sinodep, block, offset,
858 sizeof(*sinodep), &next_block,
861 SQUASHFS_SWAP_IPC_INODE_HEADER(inodep, sinodep);
863 if (!squashfs_get_cached_block(s, (char *)
864 inodep, block, offset,
865 sizeof(*inodep), &next_block,
869 if ((i = squashfs_new_inode(s, inodeb)) == NULL)
872 i->i_nlink = inodep->nlink;
873 i->i_mode |= (inodeb->inode_type == SQUASHFS_FIFO_TYPE)
874 ? S_IFIFO : S_IFSOCK;
875 init_special_inode(i, i->i_mode, 0);
879 ERROR("Unknown inode type %d in squashfs_iget!\n",
884 insert_inode_hash(i);
888 ERROR("Unable to read inode [%llx:%x]\n", block, offset);
895 static int read_fragment_index_table(struct super_block *s)
897 struct squashfs_sb_info *msblk = s->s_fs_info;
898 struct squashfs_super_block *sblk = &msblk->sblk;
899 unsigned int length = SQUASHFS_FRAGMENT_INDEX_BYTES(sblk->fragments);
904 /* Allocate fragment index table */
905 if (!(msblk->fragment_index = kmalloc(SQUASHFS_FRAGMENT_INDEX_BYTES
906 (sblk->fragments), GFP_KERNEL))) {
907 ERROR("Failed to allocate uid/gid table\n");
911 if (!squashfs_read_data(s, (char *) msblk->fragment_index,
912 sblk->fragment_table_start, length |
913 SQUASHFS_COMPRESSED_BIT_BLOCK, NULL, length)) {
914 ERROR("unable to read fragment index table\n");
922 for (i = 0; i < SQUASHFS_FRAGMENT_INDEXES(sblk->fragments);
924 SQUASHFS_SWAP_FRAGMENT_INDEXES((&fragment),
925 &msblk->fragment_index[i], 1);
926 msblk->fragment_index[i] = fragment;
934 static int supported_squashfs_filesystem(struct squashfs_sb_info *msblk, int silent)
936 struct squashfs_super_block *sblk = &msblk->sblk;
938 msblk->iget = squashfs_iget;
939 msblk->read_blocklist = read_blocklist;
940 msblk->read_fragment_index_table = read_fragment_index_table;
942 if (sblk->s_major == 1) {
943 if (!squashfs_1_0_supported(msblk)) {
944 SERROR("Major/Minor mismatch, Squashfs 1.0 filesystems "
945 "are unsupported\n");
946 SERROR("Please recompile with "
947 "Squashfs 1.0 support enabled\n");
950 } else if (sblk->s_major == 2) {
951 if (!squashfs_2_0_supported(msblk)) {
952 SERROR("Major/Minor mismatch, Squashfs 2.0 filesystems "
953 "are unsupported\n");
954 SERROR("Please recompile with "
955 "Squashfs 2.0 support enabled\n");
958 } else if(sblk->s_major != SQUASHFS_MAJOR || sblk->s_minor >
960 SERROR("Major/Minor mismatch, trying to mount newer %d.%d "
961 "filesystem\n", sblk->s_major, sblk->s_minor);
962 SERROR("Please update your kernel\n");
970 static int squashfs_fill_super(struct super_block *s, void *data, int silent)
972 struct squashfs_sb_info *msblk;
973 struct squashfs_super_block *sblk;
975 char b[BDEVNAME_SIZE];
978 TRACE("Entered squashfs_read_superblock\n");
980 if (!(s->s_fs_info = kmalloc(sizeof(struct squashfs_sb_info),
982 ERROR("Failed to allocate superblock\n");
985 memset(s->s_fs_info, 0, sizeof(struct squashfs_sb_info));
986 msblk = s->s_fs_info;
987 if (!(msblk->stream.workspace = vmalloc(zlib_inflate_workspacesize()))) {
988 ERROR("Failed to allocate zlib workspace\n");
993 msblk->devblksize = sb_min_blocksize(s, BLOCK_SIZE);
994 msblk->devblksize_log2 = ffz(~msblk->devblksize);
996 init_MUTEX(&msblk->read_data_mutex);
997 init_MUTEX(&msblk->read_page_mutex);
998 init_MUTEX(&msblk->block_cache_mutex);
999 init_MUTEX(&msblk->fragment_mutex);
1000 init_MUTEX(&msblk->meta_index_mutex);
1002 init_waitqueue_head(&msblk->waitq);
1003 init_waitqueue_head(&msblk->fragment_wait_queue);
1005 sblk->bytes_used = sizeof(struct squashfs_super_block);
1006 if (!squashfs_read_data(s, (char *) sblk, SQUASHFS_START,
1007 sizeof(struct squashfs_super_block) |
1008 SQUASHFS_COMPRESSED_BIT_BLOCK, NULL,
1009 sizeof(struct squashfs_super_block))) {
1010 SERROR("unable to read superblock\n");
1014 /* Check it is a SQUASHFS superblock */
1016 if ((s->s_magic = sblk->s_magic) != SQUASHFS_MAGIC) {
1017 if (sblk->s_magic == SQUASHFS_MAGIC_SWAP) {
1018 struct squashfs_super_block ssblk;
1020 WARNING("Mounting a different endian SQUASHFS "
1021 "filesystem on %s\n", bdevname(s->s_bdev, b));
1023 SQUASHFS_SWAP_SUPER_BLOCK(&ssblk, sblk);
1024 memcpy(sblk, &ssblk, sizeof(struct squashfs_super_block));
1027 SERROR("Can't find a SQUASHFS superblock on %s\n",
1028 bdevname(s->s_bdev, b));
1033 /* Check the MAJOR & MINOR versions */
1034 if(!supported_squashfs_filesystem(msblk, silent))
1037 /* Check the filesystem does not extend beyond the end of the
1039 if(sblk->bytes_used < 0 || sblk->bytes_used > i_size_read(s->s_bdev->bd_inode))
1042 /* Check the root inode for sanity */
1043 if (SQUASHFS_INODE_OFFSET(sblk->root_inode) > SQUASHFS_METADATA_SIZE)
1046 TRACE("Found valid superblock on %s\n", bdevname(s->s_bdev, b));
1047 TRACE("Inodes are %scompressed\n",
1048 SQUASHFS_UNCOMPRESSED_INODES
1049 (sblk->flags) ? "un" : "");
1050 TRACE("Data is %scompressed\n",
1051 SQUASHFS_UNCOMPRESSED_DATA(sblk->flags)
1053 TRACE("Check data is %s present in the filesystem\n",
1054 SQUASHFS_CHECK_DATA(sblk->flags) ?
1056 TRACE("Filesystem size %lld bytes\n", sblk->bytes_used);
1057 TRACE("Block size %d\n", sblk->block_size);
1058 TRACE("Number of inodes %d\n", sblk->inodes);
1059 if (sblk->s_major > 1)
1060 TRACE("Number of fragments %d\n", sblk->fragments);
1061 TRACE("Number of uids %d\n", sblk->no_uids);
1062 TRACE("Number of gids %d\n", sblk->no_guids);
1063 TRACE("sblk->inode_table_start %llx\n", sblk->inode_table_start);
1064 TRACE("sblk->directory_table_start %llx\n", sblk->directory_table_start);
1065 if (sblk->s_major > 1)
1066 TRACE("sblk->fragment_table_start %llx\n",
1067 sblk->fragment_table_start);
1068 TRACE("sblk->uid_start %llx\n", sblk->uid_start);
1070 s->s_flags |= MS_RDONLY;
1071 s->s_op = &squashfs_ops;
1073 /* Init inode_table block pointer array */
1074 if (!(msblk->block_cache = kmalloc(sizeof(struct squashfs_cache) *
1075 SQUASHFS_CACHED_BLKS, GFP_KERNEL))) {
1076 ERROR("Failed to allocate block cache\n");
1080 for (i = 0; i < SQUASHFS_CACHED_BLKS; i++)
1081 msblk->block_cache[i].block = SQUASHFS_INVALID_BLK;
1083 msblk->next_cache = 0;
1085 /* Allocate read_data block */
1086 msblk->read_size = (sblk->block_size < SQUASHFS_METADATA_SIZE) ?
1087 SQUASHFS_METADATA_SIZE :
1090 if (!(msblk->read_data = kmalloc(msblk->read_size, GFP_KERNEL))) {
1091 ERROR("Failed to allocate read_data block\n");
1095 /* Allocate read_page block */
1096 if (!(msblk->read_page = kmalloc(sblk->block_size, GFP_KERNEL))) {
1097 ERROR("Failed to allocate read_page block\n");
1101 /* Allocate uid and gid tables */
1102 if (!(msblk->uid = kmalloc((sblk->no_uids + sblk->no_guids) *
1103 sizeof(unsigned int), GFP_KERNEL))) {
1104 ERROR("Failed to allocate uid/gid table\n");
1107 msblk->guid = msblk->uid + sblk->no_uids;
1110 unsigned int suid[sblk->no_uids + sblk->no_guids];
1112 if (!squashfs_read_data(s, (char *) &suid, sblk->uid_start,
1113 ((sblk->no_uids + sblk->no_guids) *
1114 sizeof(unsigned int)) |
1115 SQUASHFS_COMPRESSED_BIT_BLOCK, NULL,
1116 (sblk->no_uids + sblk->no_guids) *
1117 sizeof(unsigned int))) {
1118 ERROR("unable to read uid/gid table\n");
1122 SQUASHFS_SWAP_DATA(msblk->uid, suid, (sblk->no_uids +
1123 sblk->no_guids), (sizeof(unsigned int) * 8));
1125 if (!squashfs_read_data(s, (char *) msblk->uid, sblk->uid_start,
1126 ((sblk->no_uids + sblk->no_guids) *
1127 sizeof(unsigned int)) |
1128 SQUASHFS_COMPRESSED_BIT_BLOCK, NULL,
1129 (sblk->no_uids + sblk->no_guids) *
1130 sizeof(unsigned int))) {
1131 ERROR("unable to read uid/gid table\n");
1136 if (sblk->s_major == 1 && squashfs_1_0_supported(msblk))
1139 if (!(msblk->fragment = kmalloc(sizeof(struct squashfs_fragment_cache) *
1140 SQUASHFS_CACHED_FRAGMENTS, GFP_KERNEL))) {
1141 ERROR("Failed to allocate fragment block cache\n");
1145 for (i = 0; i < SQUASHFS_CACHED_FRAGMENTS; i++) {
1146 msblk->fragment[i].locked = 0;
1147 msblk->fragment[i].block = SQUASHFS_INVALID_BLK;
1148 msblk->fragment[i].data = NULL;
1151 msblk->next_fragment = 0;
1153 /* Allocate fragment index table */
1154 if (msblk->read_fragment_index_table(s) == 0)
1158 if ((root = (msblk->iget)(s, sblk->root_inode)) == NULL)
1161 if ((s->s_root = d_alloc_root(root)) == NULL) {
1162 ERROR("Root inode create failed\n");
1167 TRACE("Leaving squashfs_read_super\n");
1171 kfree(msblk->fragment_index);
1172 kfree(msblk->fragment);
1174 kfree(msblk->read_page);
1175 kfree(msblk->read_data);
1176 kfree(msblk->block_cache);
1177 kfree(msblk->fragment_index_2);
1178 vfree(msblk->stream.workspace);
1179 kfree(s->s_fs_info);
1180 s->s_fs_info = NULL;
1188 static int squashfs_statfs(struct dentry *s, struct kstatfs *buf)
1190 struct squashfs_sb_info *msblk = s->d_sb->s_fs_info;
1191 struct squashfs_super_block *sblk = &msblk->sblk;
1193 TRACE("Entered squashfs_statfs\n");
1195 buf->f_type = SQUASHFS_MAGIC;
1196 buf->f_bsize = sblk->block_size;
1197 buf->f_blocks = ((sblk->bytes_used - 1) >> sblk->block_log) + 1;
1198 buf->f_bfree = buf->f_bavail = 0;
1199 buf->f_files = sblk->inodes;
1201 buf->f_namelen = SQUASHFS_NAME_LEN;
1207 static int squashfs_symlink_readpage(struct file *file, struct page *page)
1209 struct inode *inode = page->mapping->host;
1210 int index = page->index << PAGE_CACHE_SHIFT, length, bytes;
1211 long long block = SQUASHFS_I(inode)->start_block;
1212 int offset = SQUASHFS_I(inode)->offset;
1213 void *pageaddr = kmap(page);
1215 TRACE("Entered squashfs_symlink_readpage, page index %ld, start block "
1216 "%llx, offset %x\n", page->index,
1217 SQUASHFS_I(inode)->start_block,
1218 SQUASHFS_I(inode)->offset);
1220 for (length = 0; length < index; length += bytes) {
1221 if (!(bytes = squashfs_get_cached_block(inode->i_sb, NULL,
1222 block, offset, PAGE_CACHE_SIZE, &block,
1224 ERROR("Unable to read symbolic link [%llx:%x]\n", block,
1230 if (length != index) {
1231 ERROR("(squashfs_symlink_readpage) length != index\n");
1236 bytes = (i_size_read(inode) - length) > PAGE_CACHE_SIZE ? PAGE_CACHE_SIZE :
1237 i_size_read(inode) - length;
1239 if (!(bytes = squashfs_get_cached_block(inode->i_sb, pageaddr, block,
1240 offset, bytes, &block, &offset)))
1241 ERROR("Unable to read symbolic link [%llx:%x]\n", block, offset);
1244 memset(pageaddr + bytes, 0, PAGE_CACHE_SIZE - bytes);
1246 SetPageUptodate(page);
1253 struct meta_index *locate_meta_index(struct inode *inode, int index, int offset)
1255 struct meta_index *meta = NULL;
1256 struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info;
1259 down(&msblk->meta_index_mutex);
1261 TRACE("locate_meta_index: index %d, offset %d\n", index, offset);
1263 if(msblk->meta_index == NULL)
1266 for (i = 0; i < SQUASHFS_META_NUMBER; i ++)
1267 if (msblk->meta_index[i].inode_number == inode->i_ino &&
1268 msblk->meta_index[i].offset >= offset &&
1269 msblk->meta_index[i].offset <= index &&
1270 msblk->meta_index[i].locked == 0) {
1271 TRACE("locate_meta_index: entry %d, offset %d\n", i,
1272 msblk->meta_index[i].offset);
1273 meta = &msblk->meta_index[i];
1274 offset = meta->offset;
1281 up(&msblk->meta_index_mutex);
1287 struct meta_index *empty_meta_index(struct inode *inode, int offset, int skip)
1289 struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info;
1290 struct meta_index *meta = NULL;
1293 down(&msblk->meta_index_mutex);
1295 TRACE("empty_meta_index: offset %d, skip %d\n", offset, skip);
1297 if(msblk->meta_index == NULL) {
1298 if (!(msblk->meta_index = kmalloc(sizeof(struct meta_index) *
1299 SQUASHFS_META_NUMBER, GFP_KERNEL))) {
1300 ERROR("Failed to allocate meta_index\n");
1303 for(i = 0; i < SQUASHFS_META_NUMBER; i++) {
1304 msblk->meta_index[i].inode_number = 0;
1305 msblk->meta_index[i].locked = 0;
1307 msblk->next_meta_index = 0;
1310 for(i = SQUASHFS_META_NUMBER; i &&
1311 msblk->meta_index[msblk->next_meta_index].locked; i --)
1312 msblk->next_meta_index = (msblk->next_meta_index + 1) %
1313 SQUASHFS_META_NUMBER;
1316 TRACE("empty_meta_index: failed!\n");
1320 TRACE("empty_meta_index: returned meta entry %d, %p\n",
1321 msblk->next_meta_index,
1322 &msblk->meta_index[msblk->next_meta_index]);
1324 meta = &msblk->meta_index[msblk->next_meta_index];
1325 msblk->next_meta_index = (msblk->next_meta_index + 1) %
1326 SQUASHFS_META_NUMBER;
1328 meta->inode_number = inode->i_ino;
1329 meta->offset = offset;
1335 up(&msblk->meta_index_mutex);
1340 void release_meta_index(struct inode *inode, struct meta_index *meta)
1346 static int read_block_index(struct super_block *s, int blocks, char *block_list,
1347 long long *start_block, int *offset)
1349 struct squashfs_sb_info *msblk = s->s_fs_info;
1350 unsigned int *block_listp;
1354 char sblock_list[blocks << 2];
1356 if (!squashfs_get_cached_block(s, sblock_list, *start_block,
1357 *offset, blocks << 2, start_block, offset)) {
1358 ERROR("Unable to read block list [%llx:%x]\n",
1359 *start_block, *offset);
1362 SQUASHFS_SWAP_INTS(((unsigned int *)block_list),
1363 ((unsigned int *)sblock_list), blocks);
1365 if (!squashfs_get_cached_block(s, block_list, *start_block,
1366 *offset, blocks << 2, start_block, offset)) {
1367 ERROR("Unable to read block list [%llx:%x]\n",
1368 *start_block, *offset);
1372 for (block_listp = (unsigned int *) block_list; blocks;
1373 block_listp++, blocks --)
1374 block += SQUASHFS_COMPRESSED_SIZE_BLOCK(*block_listp);
1385 static inline int calculate_skip(int blocks) {
1386 int skip = (blocks - 1) / ((SQUASHFS_SLOTS * SQUASHFS_META_ENTRIES + 1) * SQUASHFS_META_INDEXES);
1387 return skip >= 7 ? 7 : skip + 1;
1391 static int get_meta_index(struct inode *inode, int index,
1392 long long *index_block, int *index_offset,
1393 long long *data_block, char *block_list)
1395 struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info;
1396 struct squashfs_super_block *sblk = &msblk->sblk;
1397 int skip = calculate_skip(i_size_read(inode) >> sblk->block_log);
1399 struct meta_index *meta;
1400 struct meta_entry *meta_entry;
1401 long long cur_index_block = SQUASHFS_I(inode)->u.s1.block_list_start;
1402 int cur_offset = SQUASHFS_I(inode)->offset;
1403 long long cur_data_block = SQUASHFS_I(inode)->start_block;
1406 index /= SQUASHFS_META_INDEXES * skip;
1408 while ( offset < index ) {
1409 meta = locate_meta_index(inode, index, offset + 1);
1412 if ((meta = empty_meta_index(inode, offset + 1,
1416 offset = index < meta->offset + meta->entries ? index :
1417 meta->offset + meta->entries - 1;
1418 meta_entry = &meta->meta_entry[offset - meta->offset];
1419 cur_index_block = meta_entry->index_block + sblk->inode_table_start;
1420 cur_offset = meta_entry->offset;
1421 cur_data_block = meta_entry->data_block;
1422 TRACE("get_meta_index: offset %d, meta->offset %d, "
1423 "meta->entries %d\n", offset, meta->offset,
1425 TRACE("get_meta_index: index_block 0x%llx, offset 0x%x"
1426 " data_block 0x%llx\n", cur_index_block,
1427 cur_offset, cur_data_block);
1430 for (i = meta->offset + meta->entries; i <= index &&
1431 i < meta->offset + SQUASHFS_META_ENTRIES; i++) {
1432 int blocks = skip * SQUASHFS_META_INDEXES;
1435 int block = blocks > (SIZE >> 2) ? (SIZE >> 2) :
1437 int res = read_block_index(inode->i_sb, block,
1438 block_list, &cur_index_block,
1444 cur_data_block += res;
1448 meta_entry = &meta->meta_entry[i - meta->offset];
1449 meta_entry->index_block = cur_index_block - sblk->inode_table_start;
1450 meta_entry->offset = cur_offset;
1451 meta_entry->data_block = cur_data_block;
1456 TRACE("get_meta_index: meta->offset %d, meta->entries %d\n",
1457 meta->offset, meta->entries);
1459 release_meta_index(inode, meta);
1463 *index_block = cur_index_block;
1464 *index_offset = cur_offset;
1465 *data_block = cur_data_block;
1467 return offset * SQUASHFS_META_INDEXES * skip;
1470 release_meta_index(inode, meta);
1475 static long long read_blocklist(struct inode *inode, int index,
1476 int readahead_blks, char *block_list,
1477 unsigned short **block_p, unsigned int *bsize)
1479 long long block_ptr;
1482 int res = get_meta_index(inode, index, &block_ptr, &offset, &block,
1485 TRACE("read_blocklist: res %d, index %d, block_ptr 0x%llx, offset"
1486 " 0x%x, block 0x%llx\n", res, index, block_ptr, offset,
1495 int blocks = index > (SIZE >> 2) ? (SIZE >> 2) : index;
1496 int res = read_block_index(inode->i_sb, blocks, block_list,
1497 &block_ptr, &offset);
1504 if (read_block_index(inode->i_sb, 1, block_list,
1505 &block_ptr, &offset) == -1)
1507 *bsize = *((unsigned int *) block_list);
1516 static int squashfs_readpage(struct file *file, struct page *page)
1518 struct inode *inode = page->mapping->host;
1519 struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info;
1520 struct squashfs_super_block *sblk = &msblk->sblk;
1521 unsigned char *block_list;
1523 unsigned int bsize, i = 0, bytes = 0, byte_offset = 0;
1524 int index = page->index >> (sblk->block_log - PAGE_CACHE_SHIFT);
1526 struct squashfs_fragment_cache *fragment = NULL;
1527 char *data_ptr = msblk->read_page;
1529 int mask = (1 << (sblk->block_log - PAGE_CACHE_SHIFT)) - 1;
1530 int start_index = page->index & ~mask;
1531 int end_index = start_index | mask;
1533 TRACE("Entered squashfs_readpage, page index %lx, start block %llx\n",
1535 SQUASHFS_I(inode)->start_block);
1537 if (!(block_list = kmalloc(SIZE, GFP_KERNEL))) {
1538 ERROR("Failed to allocate block_list\n");
1542 if (page->index >= ((i_size_read(inode) + PAGE_CACHE_SIZE - 1) >>
1546 if (SQUASHFS_I(inode)->u.s1.fragment_start_block == SQUASHFS_INVALID_BLK
1547 || index < (i_size_read(inode) >>
1549 if ((block = (msblk->read_blocklist)(inode, index, 1,
1550 block_list, NULL, &bsize)) == 0)
1553 down(&msblk->read_page_mutex);
1555 if (!(bytes = squashfs_read_data(inode->i_sb, msblk->read_page,
1557 msblk->read_size))) {
1558 ERROR("Unable to read page, block %llx, size %x\n", block,
1560 up(&msblk->read_page_mutex);
1564 if ((fragment = get_cached_fragment(inode->i_sb,
1566 u.s1.fragment_start_block,
1567 SQUASHFS_I(inode)->u.s1.fragment_size))
1569 ERROR("Unable to read page, block %llx, size %x\n",
1571 u.s1.fragment_start_block,
1572 (int) SQUASHFS_I(inode)->
1573 u.s1.fragment_size);
1576 bytes = SQUASHFS_I(inode)->u.s1.fragment_offset +
1577 (i_size_read(inode) & (sblk->block_size
1579 byte_offset = SQUASHFS_I(inode)->u.s1.fragment_offset;
1580 data_ptr = fragment->data;
1583 for (i = start_index; i <= end_index && byte_offset < bytes;
1584 i++, byte_offset += PAGE_CACHE_SIZE) {
1585 struct page *push_page;
1586 int available_bytes = (bytes - byte_offset) > PAGE_CACHE_SIZE ?
1587 PAGE_CACHE_SIZE : bytes - byte_offset;
1589 TRACE("bytes %d, i %d, byte_offset %d, available_bytes %d\n",
1590 bytes, i, byte_offset, available_bytes);
1592 if (i == page->index) {
1593 pageaddr = kmap_atomic(page, KM_USER0);
1594 memcpy(pageaddr, data_ptr + byte_offset,
1596 memset(pageaddr + available_bytes, 0,
1597 PAGE_CACHE_SIZE - available_bytes);
1598 kunmap_atomic(pageaddr, KM_USER0);
1599 flush_dcache_page(page);
1600 SetPageUptodate(page);
1602 } else if ((push_page =
1603 grab_cache_page_nowait(page->mapping, i))) {
1604 pageaddr = kmap_atomic(push_page, KM_USER0);
1606 memcpy(pageaddr, data_ptr + byte_offset,
1608 memset(pageaddr + available_bytes, 0,
1609 PAGE_CACHE_SIZE - available_bytes);
1610 kunmap_atomic(pageaddr, KM_USER0);
1611 flush_dcache_page(push_page);
1612 SetPageUptodate(push_page);
1613 unlock_page(push_page);
1614 page_cache_release(push_page);
1618 if (SQUASHFS_I(inode)->u.s1.fragment_start_block == SQUASHFS_INVALID_BLK
1619 || index < (i_size_read(inode) >>
1621 up(&msblk->read_page_mutex);
1623 release_cached_fragment(msblk, fragment);
1629 pageaddr = kmap_atomic(page, KM_USER0);
1630 memset(pageaddr + bytes, 0, PAGE_CACHE_SIZE - bytes);
1631 kunmap_atomic(pageaddr, KM_USER0);
1632 flush_dcache_page(page);
1633 SetPageUptodate(page);
1641 static int squashfs_readpage4K(struct file *file, struct page *page)
1643 struct inode *inode = page->mapping->host;
1644 struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info;
1645 struct squashfs_super_block *sblk = &msblk->sblk;
1646 unsigned char *block_list;
1648 unsigned int bsize, bytes = 0;
1651 TRACE("Entered squashfs_readpage4K, page index %lx, start block %llx\n",
1653 SQUASHFS_I(inode)->start_block);
1655 if (page->index >= ((i_size_read(inode) + PAGE_CACHE_SIZE - 1) >>
1656 PAGE_CACHE_SHIFT)) {
1661 if (!(block_list = kmalloc(SIZE, GFP_KERNEL))) {
1662 ERROR("Failed to allocate block_list\n");
1666 if (SQUASHFS_I(inode)->u.s1.fragment_start_block == SQUASHFS_INVALID_BLK
1667 || page->index < (i_size_read(inode) >>
1669 block = (msblk->read_blocklist)(inode, page->index, 1,
1670 block_list, NULL, &bsize);
1672 down(&msblk->read_page_mutex);
1673 bytes = squashfs_read_data(inode->i_sb, msblk->read_page, block,
1674 bsize, NULL, msblk->read_size);
1676 pageaddr = kmap_atomic(page, KM_USER0);
1677 memcpy(pageaddr, msblk->read_page, bytes);
1678 kunmap_atomic(pageaddr, KM_USER0);
1680 ERROR("Unable to read page, block %llx, size %x\n",
1682 up(&msblk->read_page_mutex);
1684 struct squashfs_fragment_cache *fragment =
1685 get_cached_fragment(inode->i_sb,
1687 u.s1.fragment_start_block,
1688 SQUASHFS_I(inode)-> u.s1.fragment_size);
1690 bytes = i_size_read(inode) & (sblk->block_size - 1);
1691 pageaddr = kmap_atomic(page, KM_USER0);
1692 memcpy(pageaddr, fragment->data + SQUASHFS_I(inode)->
1693 u.s1.fragment_offset, bytes);
1694 kunmap_atomic(pageaddr, KM_USER0);
1695 release_cached_fragment(msblk, fragment);
1697 ERROR("Unable to read page, block %llx, size %x\n",
1699 u.s1.fragment_start_block, (int)
1700 SQUASHFS_I(inode)-> u.s1.fragment_size);
1704 pageaddr = kmap_atomic(page, KM_USER0);
1705 memset(pageaddr + bytes, 0, PAGE_CACHE_SIZE - bytes);
1706 kunmap_atomic(pageaddr, KM_USER0);
1707 flush_dcache_page(page);
1708 SetPageUptodate(page);
1716 static int get_dir_index_using_offset(struct super_block *s, long long
1717 *next_block, unsigned int *next_offset,
1718 long long index_start,
1719 unsigned int index_offset, int i_count,
1722 struct squashfs_sb_info *msblk = s->s_fs_info;
1723 struct squashfs_super_block *sblk = &msblk->sblk;
1725 struct squashfs_dir_index index;
1727 TRACE("Entered get_dir_index_using_offset, i_count %d, f_pos %d\n",
1728 i_count, (unsigned int) f_pos);
1734 for (i = 0; i < i_count; i++) {
1736 struct squashfs_dir_index sindex;
1737 squashfs_get_cached_block(s, (char *) &sindex,
1738 index_start, index_offset,
1739 sizeof(sindex), &index_start,
1741 SQUASHFS_SWAP_DIR_INDEX(&index, &sindex);
1743 squashfs_get_cached_block(s, (char *) &index,
1744 index_start, index_offset,
1745 sizeof(index), &index_start,
1748 if (index.index > f_pos)
1751 squashfs_get_cached_block(s, NULL, index_start, index_offset,
1752 index.size + 1, &index_start,
1755 length = index.index;
1756 *next_block = index.start_block + sblk->directory_table_start;
1759 *next_offset = (length + *next_offset) % SQUASHFS_METADATA_SIZE;
1766 static int get_dir_index_using_name(struct super_block *s, long long
1767 *next_block, unsigned int *next_offset,
1768 long long index_start,
1769 unsigned int index_offset, int i_count,
1770 const char *name, int size)
1772 struct squashfs_sb_info *msblk = s->s_fs_info;
1773 struct squashfs_super_block *sblk = &msblk->sblk;
1775 struct squashfs_dir_index *index;
1778 TRACE("Entered get_dir_index_using_name, i_count %d\n", i_count);
1780 if (!(str = kmalloc(sizeof(struct squashfs_dir_index) +
1781 (SQUASHFS_NAME_LEN + 1) * 2, GFP_KERNEL))) {
1782 ERROR("Failed to allocate squashfs_dir_index\n");
1786 index = (struct squashfs_dir_index *) (str + SQUASHFS_NAME_LEN + 1);
1787 strncpy(str, name, size);
1790 for (i = 0; i < i_count; i++) {
1792 struct squashfs_dir_index sindex;
1793 squashfs_get_cached_block(s, (char *) &sindex,
1794 index_start, index_offset,
1795 sizeof(sindex), &index_start,
1797 SQUASHFS_SWAP_DIR_INDEX(index, &sindex);
1799 squashfs_get_cached_block(s, (char *) index,
1800 index_start, index_offset,
1801 sizeof(struct squashfs_dir_index),
1802 &index_start, &index_offset);
1804 squashfs_get_cached_block(s, index->name, index_start,
1805 index_offset, index->size + 1,
1806 &index_start, &index_offset);
1808 index->name[index->size + 1] = '\0';
1810 if (strcmp(index->name, str) > 0)
1813 length = index->index;
1814 *next_block = index->start_block + sblk->directory_table_start;
1817 *next_offset = (length + *next_offset) % SQUASHFS_METADATA_SIZE;
1824 static int squashfs_readdir(struct file *file, void *dirent, filldir_t filldir)
1826 struct inode *i = file->f_dentry->d_inode;
1827 struct squashfs_sb_info *msblk = i->i_sb->s_fs_info;
1828 struct squashfs_super_block *sblk = &msblk->sblk;
1829 long long next_block = SQUASHFS_I(i)->start_block +
1830 sblk->directory_table_start;
1831 int next_offset = SQUASHFS_I(i)->offset, length = 0,
1833 struct squashfs_dir_header dirh;
1834 struct squashfs_dir_entry *dire;
1836 TRACE("Entered squashfs_readdir [%llx:%x]\n", next_block, next_offset);
1838 if (!(dire = kmalloc(sizeof(struct squashfs_dir_entry) +
1839 SQUASHFS_NAME_LEN + 1, GFP_KERNEL))) {
1840 ERROR("Failed to allocate squashfs_dir_entry\n");
1844 while(file->f_pos < 3) {
1848 if(file->f_pos == 0) {
1855 i_ino = SQUASHFS_I(i)->u.s2.parent_inode;
1857 TRACE("Calling filldir(%x, %s, %d, %d, %d, %d)\n",
1858 (unsigned int) dirent, name, size, (int)
1860 squashfs_filetype_table[1]);
1862 if (filldir(dirent, name, size,
1864 squashfs_filetype_table[1]) < 0) {
1865 TRACE("Filldir returned less than 0\n");
1868 file->f_pos += size;
1871 length = get_dir_index_using_offset(i->i_sb, &next_block, &next_offset,
1872 SQUASHFS_I(i)->u.s2.directory_index_start,
1873 SQUASHFS_I(i)->u.s2.directory_index_offset,
1874 SQUASHFS_I(i)->u.s2.directory_index_count,
1877 while (length < i_size_read(i)) {
1878 /* read directory header */
1880 struct squashfs_dir_header sdirh;
1882 if (!squashfs_get_cached_block(i->i_sb, (char *) &sdirh,
1883 next_block, next_offset, sizeof(sdirh),
1884 &next_block, &next_offset))
1887 length += sizeof(sdirh);
1888 SQUASHFS_SWAP_DIR_HEADER(&dirh, &sdirh);
1890 if (!squashfs_get_cached_block(i->i_sb, (char *) &dirh,
1891 next_block, next_offset, sizeof(dirh),
1892 &next_block, &next_offset))
1895 length += sizeof(dirh);
1898 dir_count = dirh.count + 1;
1899 while (dir_count--) {
1901 struct squashfs_dir_entry sdire;
1902 if (!squashfs_get_cached_block(i->i_sb, (char *)
1903 &sdire, next_block, next_offset,
1904 sizeof(sdire), &next_block,
1908 length += sizeof(sdire);
1909 SQUASHFS_SWAP_DIR_ENTRY(dire, &sdire);
1911 if (!squashfs_get_cached_block(i->i_sb, (char *)
1912 dire, next_block, next_offset,
1913 sizeof(*dire), &next_block,
1917 length += sizeof(*dire);
1920 if (!squashfs_get_cached_block(i->i_sb, dire->name,
1921 next_block, next_offset,
1922 dire->size + 1, &next_block,
1926 length += dire->size + 1;
1928 if (file->f_pos >= length)
1931 dire->name[dire->size + 1] = '\0';
1933 TRACE("Calling filldir(%x, %s, %d, %d, %x:%x, %d, %d)\n",
1934 (unsigned int) dirent, dire->name,
1935 dire->size + 1, (int) file->f_pos,
1936 dirh.start_block, dire->offset,
1937 dirh.inode_number + dire->inode_number,
1938 squashfs_filetype_table[dire->type]);
1940 if (filldir(dirent, dire->name, dire->size + 1,
1942 dirh.inode_number + dire->inode_number,
1943 squashfs_filetype_table[dire->type])
1945 TRACE("Filldir returned less than 0\n");
1948 file->f_pos = length;
1957 ERROR("Unable to read directory block [%llx:%x]\n", next_block,
1963 static struct dentry *squashfs_lookup(struct inode *i, struct dentry *dentry,
1964 struct nameidata *nd)
1966 const unsigned char *name = dentry->d_name.name;
1967 int len = dentry->d_name.len;
1968 struct inode *inode = NULL;
1969 struct squashfs_sb_info *msblk = i->i_sb->s_fs_info;
1970 struct squashfs_super_block *sblk = &msblk->sblk;
1971 long long next_block = SQUASHFS_I(i)->start_block +
1972 sblk->directory_table_start;
1973 int next_offset = SQUASHFS_I(i)->offset, length = 0,
1975 struct squashfs_dir_header dirh;
1976 struct squashfs_dir_entry *dire;
1978 TRACE("Entered squashfs_lookup [%llx:%x]\n", next_block, next_offset);
1980 if (!(dire = kmalloc(sizeof(struct squashfs_dir_entry) +
1981 SQUASHFS_NAME_LEN + 1, GFP_KERNEL))) {
1982 ERROR("Failed to allocate squashfs_dir_entry\n");
1986 if (len > SQUASHFS_NAME_LEN)
1989 length = get_dir_index_using_name(i->i_sb, &next_block, &next_offset,
1990 SQUASHFS_I(i)->u.s2.directory_index_start,
1991 SQUASHFS_I(i)->u.s2.directory_index_offset,
1992 SQUASHFS_I(i)->u.s2.directory_index_count, name,
1995 while (length < i_size_read(i)) {
1996 /* read directory header */
1998 struct squashfs_dir_header sdirh;
1999 if (!squashfs_get_cached_block(i->i_sb, (char *) &sdirh,
2000 next_block, next_offset, sizeof(sdirh),
2001 &next_block, &next_offset))
2004 length += sizeof(sdirh);
2005 SQUASHFS_SWAP_DIR_HEADER(&dirh, &sdirh);
2007 if (!squashfs_get_cached_block(i->i_sb, (char *) &dirh,
2008 next_block, next_offset, sizeof(dirh),
2009 &next_block, &next_offset))
2012 length += sizeof(dirh);
2015 dir_count = dirh.count + 1;
2016 while (dir_count--) {
2018 struct squashfs_dir_entry sdire;
2019 if (!squashfs_get_cached_block(i->i_sb, (char *)
2020 &sdire, next_block,next_offset,
2021 sizeof(sdire), &next_block,
2025 length += sizeof(sdire);
2026 SQUASHFS_SWAP_DIR_ENTRY(dire, &sdire);
2028 if (!squashfs_get_cached_block(i->i_sb, (char *)
2029 dire, next_block,next_offset,
2030 sizeof(*dire), &next_block,
2034 length += sizeof(*dire);
2037 if (!squashfs_get_cached_block(i->i_sb, dire->name,
2038 next_block, next_offset, dire->size + 1,
2039 &next_block, &next_offset))
2042 length += dire->size + 1;
2044 if (name[0] < dire->name[0])
2047 if ((len == dire->size + 1) && !strncmp(name,
2049 squashfs_inode_t ino =
2050 SQUASHFS_MKINODE(dirh.start_block,
2053 TRACE("calling squashfs_iget for directory "
2054 "entry %s, inode %x:%x, %d\n", name,
2055 dirh.start_block, dire->offset,
2056 dirh.inode_number + dire->inode_number);
2058 inode = (msblk->iget)(i->i_sb, ino);
2067 d_add(dentry, inode);
2071 ERROR("Unable to read directory block [%llx:%x]\n", next_block,
2077 static void squashfs_put_super(struct super_block *s)
2082 struct squashfs_sb_info *sbi = s->s_fs_info;
2083 if (sbi->block_cache)
2084 for (i = 0; i < SQUASHFS_CACHED_BLKS; i++)
2085 if (sbi->block_cache[i].block !=
2086 SQUASHFS_INVALID_BLK)
2087 kfree(sbi->block_cache[i].data);
2089 for (i = 0; i < SQUASHFS_CACHED_FRAGMENTS; i++)
2090 SQUASHFS_FREE(sbi->fragment[i].data);
2091 kfree(sbi->fragment);
2092 kfree(sbi->block_cache);
2093 kfree(sbi->read_data);
2094 kfree(sbi->read_page);
2096 kfree(sbi->fragment_index);
2097 kfree(sbi->fragment_index_2);
2098 kfree(sbi->meta_index);
2099 vfree(sbi->stream.workspace);
2100 kfree(s->s_fs_info);
2101 s->s_fs_info = NULL;
2106 static int squashfs_get_sb(struct file_system_type *fs_type,
2107 int flags, const char *dev_name, void *data, struct vfsmount *mnt)
2109 return get_sb_bdev(fs_type, flags, dev_name, data, squashfs_fill_super, mnt);
2113 static int __init init_squashfs_fs(void)
2115 int err = init_inodecache();
2119 printk(KERN_INFO "squashfs: version 3.1 (2006/08/09) "
2120 "Phillip Lougher\n");
2122 if ((err = register_filesystem(&squashfs_fs_type)))
2123 destroy_inodecache();
2130 static void __exit exit_squashfs_fs(void)
2132 unregister_filesystem(&squashfs_fs_type);
2133 destroy_inodecache();
2137 static kmem_cache_t * squashfs_inode_cachep;
2140 static struct inode *squashfs_alloc_inode(struct super_block *sb)
2142 struct squashfs_inode_info *ei;
2143 ei = kmem_cache_alloc(squashfs_inode_cachep, SLAB_KERNEL);
2146 return &ei->vfs_inode;
2150 static void squashfs_destroy_inode(struct inode *inode)
2152 kmem_cache_free(squashfs_inode_cachep, SQUASHFS_I(inode));
2156 static void init_once(void * foo, kmem_cache_t * cachep, unsigned long flags)
2158 struct squashfs_inode_info *ei = foo;
2160 if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) ==
2161 SLAB_CTOR_CONSTRUCTOR)
2162 inode_init_once(&ei->vfs_inode);
2166 static int __init init_inodecache(void)
2168 squashfs_inode_cachep = kmem_cache_create("squashfs_inode_cache",
2169 sizeof(struct squashfs_inode_info),
2170 0, SLAB_HWCACHE_ALIGN|SLAB_RECLAIM_ACCOUNT,
2172 if (squashfs_inode_cachep == NULL)
2178 static void destroy_inodecache(void)
2180 if (kmem_cache_destroy(squashfs_inode_cachep))
2181 printk(KERN_INFO "squashfs_inode_cache: not all structures "
2186 module_init(init_squashfs_fs);
2187 module_exit(exit_squashfs_fs);
2188 MODULE_DESCRIPTION("squashfs, a compressed read-only filesystem");
2189 MODULE_AUTHOR("Phillip Lougher <phillip@lougher.org.uk>");
2190 MODULE_LICENSE("GPL");