2 * Squashfs - a compressed read only filesystem for Linux
4 * Copyright (c) 2002, 2003, 2004, 2005, 2006
5 * Phillip Lougher <phillip@lougher.org.uk>
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version 2,
10 * or (at your option) any later version.
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
24 #include <linux/types.h>
25 #include <linux/squashfs_fs.h>
26 #include <linux/module.h>
27 #include <linux/errno.h>
28 #include <linux/slab.h>
29 #include <linux/zlib.h>
31 #include <linux/smp_lock.h>
32 #include <linux/slab.h>
33 #include <linux/squashfs_fs_sb.h>
34 #include <linux/squashfs_fs_i.h>
35 #include <linux/buffer_head.h>
36 #include <linux/vfs.h>
37 #include <linux/init.h>
38 #include <linux/dcache.h>
39 #include <linux/wait.h>
40 #include <linux/blkdev.h>
41 #include <linux/vmalloc.h>
42 #include <asm/uaccess.h>
43 #include <asm/semaphore.h>
47 static void squashfs_put_super(struct super_block *);
48 static int squashfs_statfs(struct dentry *, struct kstatfs *);
49 static int squashfs_symlink_readpage(struct file *file, struct page *page);
50 static int squashfs_readpage(struct file *file, struct page *page);
51 static int squashfs_readpage4K(struct file *file, struct page *page);
52 static int squashfs_readdir(struct file *, void *, filldir_t);
53 static struct inode *squashfs_alloc_inode(struct super_block *sb);
54 static void squashfs_destroy_inode(struct inode *inode);
55 static int init_inodecache(void);
56 static void destroy_inodecache(void);
57 static struct dentry *squashfs_lookup(struct inode *, struct dentry *,
59 static struct inode *squashfs_iget(struct super_block *s, squashfs_inode_t inode);
60 static long long read_blocklist(struct inode *inode, int index,
61 int readahead_blks, char *block_list,
62 unsigned short **block_p, unsigned int *bsize);
63 static int squashfs_get_sb(struct file_system_type *, int,
64 const char *, void *, struct vfsmount *);
66 static struct file_system_type squashfs_fs_type = {
69 .get_sb = squashfs_get_sb,
70 .kill_sb = kill_block_super,
71 .fs_flags = FS_REQUIRES_DEV
74 static unsigned char squashfs_filetype_table[] = {
75 DT_UNKNOWN, DT_DIR, DT_REG, DT_LNK, DT_BLK, DT_CHR, DT_FIFO, DT_SOCK
78 static struct super_operations squashfs_ops = {
79 .alloc_inode = squashfs_alloc_inode,
80 .destroy_inode = squashfs_destroy_inode,
81 .statfs = squashfs_statfs,
82 .put_super = squashfs_put_super,
85 SQSH_EXTERN struct address_space_operations squashfs_symlink_aops = {
86 .readpage = squashfs_symlink_readpage
89 SQSH_EXTERN struct address_space_operations squashfs_aops = {
90 .readpage = squashfs_readpage
93 SQSH_EXTERN struct address_space_operations squashfs_aops_4K = {
94 .readpage = squashfs_readpage4K
97 static struct file_operations squashfs_dir_ops = {
98 .read = generic_read_dir,
99 .readdir = squashfs_readdir
102 SQSH_EXTERN struct inode_operations squashfs_dir_inode_ops = {
103 .lookup = squashfs_lookup
107 static struct buffer_head *get_block_length(struct super_block *s,
108 int *cur_index, int *offset, int *c_byte)
110 struct squashfs_sb_info *msblk = s->s_fs_info;
112 struct buffer_head *bh;
114 if (!(bh = sb_bread(s, *cur_index)))
117 if (msblk->devblksize - *offset == 1) {
119 ((unsigned char *) &temp)[1] = *((unsigned char *)
120 (bh->b_data + *offset));
122 ((unsigned char *) &temp)[0] = *((unsigned char *)
123 (bh->b_data + *offset));
125 if (!(bh = sb_bread(s, ++(*cur_index))))
128 ((unsigned char *) &temp)[0] = *((unsigned char *)
131 ((unsigned char *) &temp)[1] = *((unsigned char *)
137 ((unsigned char *) &temp)[1] = *((unsigned char *)
138 (bh->b_data + *offset));
139 ((unsigned char *) &temp)[0] = *((unsigned char *)
140 (bh->b_data + *offset + 1));
142 ((unsigned char *) &temp)[0] = *((unsigned char *)
143 (bh->b_data + *offset));
144 ((unsigned char *) &temp)[1] = *((unsigned char *)
145 (bh->b_data + *offset + 1));
151 if (SQUASHFS_CHECK_DATA(msblk->sblk.flags)) {
152 if (*offset == msblk->devblksize) {
154 if (!(bh = sb_bread(s, ++(*cur_index))))
158 if (*((unsigned char *) (bh->b_data + *offset)) !=
159 SQUASHFS_MARKER_BYTE) {
160 ERROR("Metadata block marker corrupt @ %x\n",
174 SQSH_EXTERN unsigned int squashfs_read_data(struct super_block *s, char *buffer,
175 long long index, unsigned int length,
176 long long *next_index)
178 struct squashfs_sb_info *msblk = s->s_fs_info;
179 struct buffer_head *bh[((SQUASHFS_FILE_MAX_SIZE - 1) >>
180 msblk->devblksize_log2) + 2];
181 unsigned int offset = index & ((1 << msblk->devblksize_log2) - 1);
182 unsigned int cur_index = index >> msblk->devblksize_log2;
183 int bytes, avail_bytes, b = 0, k;
185 unsigned int compressed;
186 unsigned int c_byte = length;
189 bytes = msblk->devblksize - offset;
190 compressed = SQUASHFS_COMPRESSED_BLOCK(c_byte);
191 c_buffer = compressed ? msblk->read_data : buffer;
192 c_byte = SQUASHFS_COMPRESSED_SIZE_BLOCK(c_byte);
194 TRACE("Block @ 0x%llx, %scompressed size %d\n", index, compressed
195 ? "" : "un", (unsigned int) c_byte);
197 if (!(bh[0] = sb_getblk(s, cur_index)))
200 for (b = 1; bytes < c_byte; b++) {
201 if (!(bh[b] = sb_getblk(s, ++cur_index)))
203 bytes += msblk->devblksize;
205 ll_rw_block(READ, b, bh);
207 if (!(bh[0] = get_block_length(s, &cur_index, &offset,
211 bytes = msblk->devblksize - offset;
212 compressed = SQUASHFS_COMPRESSED(c_byte);
213 c_buffer = compressed ? msblk->read_data : buffer;
214 c_byte = SQUASHFS_COMPRESSED_SIZE(c_byte);
216 TRACE("Block @ 0x%llx, %scompressed size %d\n", index, compressed
217 ? "" : "un", (unsigned int) c_byte);
219 for (b = 1; bytes < c_byte; b++) {
220 if (!(bh[b] = sb_getblk(s, ++cur_index)))
222 bytes += msblk->devblksize;
224 ll_rw_block(READ, b - 1, bh + 1);
228 down(&msblk->read_data_mutex);
230 for (bytes = 0, k = 0; k < b; k++) {
231 avail_bytes = (c_byte - bytes) > (msblk->devblksize - offset) ?
232 msblk->devblksize - offset :
234 wait_on_buffer(bh[k]);
235 if (!buffer_uptodate(bh[k]))
237 memcpy(c_buffer + bytes, bh[k]->b_data + offset, avail_bytes);
238 bytes += avail_bytes;
249 msblk->stream.next_in = c_buffer;
250 msblk->stream.avail_in = c_byte;
251 msblk->stream.next_out = buffer;
252 msblk->stream.avail_out = msblk->read_size;
254 if (((zlib_err = zlib_inflateInit(&msblk->stream)) != Z_OK) ||
255 ((zlib_err = zlib_inflate(&msblk->stream, Z_FINISH))
256 != Z_STREAM_END) || ((zlib_err =
257 zlib_inflateEnd(&msblk->stream)) != Z_OK)) {
258 ERROR("zlib_fs returned unexpected result 0x%x\n",
262 bytes = msblk->stream.total_out;
264 up(&msblk->read_data_mutex);
268 *next_index = index + c_byte + (length ? 0 :
269 (SQUASHFS_CHECK_DATA(msblk->sblk.flags)
278 ERROR("sb_bread failed reading block 0x%x\n", cur_index);
283 SQSH_EXTERN int squashfs_get_cached_block(struct super_block *s, char *buffer,
284 long long block, unsigned int offset,
285 int length, long long *next_block,
286 unsigned int *next_offset)
288 struct squashfs_sb_info *msblk = s->s_fs_info;
289 int n, i, bytes, return_length = length;
290 long long next_index;
292 TRACE("Entered squashfs_get_cached_block [%llx:%x]\n", block, offset);
295 for (i = 0; i < SQUASHFS_CACHED_BLKS; i++)
296 if (msblk->block_cache[i].block == block)
299 down(&msblk->block_cache_mutex);
301 if (i == SQUASHFS_CACHED_BLKS) {
302 /* read inode header block */
303 for (i = msblk->next_cache, n = SQUASHFS_CACHED_BLKS;
304 n ; n --, i = (i + 1) %
305 SQUASHFS_CACHED_BLKS)
306 if (msblk->block_cache[i].block !=
313 init_waitqueue_entry(&wait, current);
314 add_wait_queue(&msblk->waitq, &wait);
315 set_current_state(TASK_UNINTERRUPTIBLE);
316 up(&msblk->block_cache_mutex);
318 set_current_state(TASK_RUNNING);
319 remove_wait_queue(&msblk->waitq, &wait);
322 msblk->next_cache = (i + 1) % SQUASHFS_CACHED_BLKS;
324 if (msblk->block_cache[i].block ==
325 SQUASHFS_INVALID_BLK) {
326 if (!(msblk->block_cache[i].data =
327 kmalloc(SQUASHFS_METADATA_SIZE,
329 ERROR("Failed to allocate cache"
331 up(&msblk->block_cache_mutex);
336 msblk->block_cache[i].block = SQUASHFS_USED_BLK;
337 up(&msblk->block_cache_mutex);
339 if (!(msblk->block_cache[i].length =
340 squashfs_read_data(s,
341 msblk->block_cache[i].data,
342 block, 0, &next_index))) {
343 ERROR("Unable to read cache block [%llx:%x]\n",
348 down(&msblk->block_cache_mutex);
349 wake_up(&msblk->waitq);
350 msblk->block_cache[i].block = block;
351 msblk->block_cache[i].next_index = next_index;
352 TRACE("Read cache block [%llx:%x]\n", block, offset);
355 if (msblk->block_cache[i].block != block) {
356 up(&msblk->block_cache_mutex);
360 if ((bytes = msblk->block_cache[i].length - offset) >= length) {
362 memcpy(buffer, msblk->block_cache[i].data +
364 if (msblk->block_cache[i].length - offset == length) {
365 *next_block = msblk->block_cache[i].next_index;
369 *next_offset = offset + length;
371 up(&msblk->block_cache_mutex);
375 memcpy(buffer, msblk->block_cache[i].data +
379 block = msblk->block_cache[i].next_index;
380 up(&msblk->block_cache_mutex);
387 return return_length;
393 static int get_fragment_location(struct super_block *s, unsigned int fragment,
394 long long *fragment_start_block,
395 unsigned int *fragment_size)
397 struct squashfs_sb_info *msblk = s->s_fs_info;
398 long long start_block =
399 msblk->fragment_index[SQUASHFS_FRAGMENT_INDEX(fragment)];
400 int offset = SQUASHFS_FRAGMENT_INDEX_OFFSET(fragment);
401 struct squashfs_fragment_entry fragment_entry;
404 struct squashfs_fragment_entry sfragment_entry;
406 if (!squashfs_get_cached_block(s, (char *) &sfragment_entry,
408 sizeof(sfragment_entry), &start_block,
411 SQUASHFS_SWAP_FRAGMENT_ENTRY(&fragment_entry, &sfragment_entry);
413 if (!squashfs_get_cached_block(s, (char *) &fragment_entry,
415 sizeof(fragment_entry), &start_block,
419 *fragment_start_block = fragment_entry.start_block;
420 *fragment_size = fragment_entry.size;
429 SQSH_EXTERN void release_cached_fragment(struct squashfs_sb_info *msblk, struct
430 squashfs_fragment_cache *fragment)
432 down(&msblk->fragment_mutex);
434 wake_up(&msblk->fragment_wait_queue);
435 up(&msblk->fragment_mutex);
439 SQSH_EXTERN struct squashfs_fragment_cache *get_cached_fragment(struct super_block
440 *s, long long start_block,
444 struct squashfs_sb_info *msblk = s->s_fs_info;
447 down(&msblk->fragment_mutex);
449 for (i = 0; i < SQUASHFS_CACHED_FRAGMENTS &&
450 msblk->fragment[i].block != start_block; i++);
452 if (i == SQUASHFS_CACHED_FRAGMENTS) {
453 for (i = msblk->next_fragment, n =
454 SQUASHFS_CACHED_FRAGMENTS; n &&
455 msblk->fragment[i].locked; n--, i = (i + 1) %
456 SQUASHFS_CACHED_FRAGMENTS);
461 init_waitqueue_entry(&wait, current);
462 add_wait_queue(&msblk->fragment_wait_queue,
464 set_current_state(TASK_UNINTERRUPTIBLE);
465 up(&msblk->fragment_mutex);
467 set_current_state(TASK_RUNNING);
468 remove_wait_queue(&msblk->fragment_wait_queue,
472 msblk->next_fragment = (msblk->next_fragment + 1) %
473 SQUASHFS_CACHED_FRAGMENTS;
475 if (msblk->fragment[i].data == NULL)
476 if (!(msblk->fragment[i].data = SQUASHFS_ALLOC
477 (SQUASHFS_FILE_MAX_SIZE))) {
478 ERROR("Failed to allocate fragment "
480 up(&msblk->fragment_mutex);
484 msblk->fragment[i].block = SQUASHFS_INVALID_BLK;
485 msblk->fragment[i].locked = 1;
486 up(&msblk->fragment_mutex);
488 if (!(msblk->fragment[i].length = squashfs_read_data(s,
489 msblk->fragment[i].data,
490 start_block, length, NULL))) {
491 ERROR("Unable to read fragment cache block "
492 "[%llx]\n", start_block);
493 msblk->fragment[i].locked = 0;
497 msblk->fragment[i].block = start_block;
498 TRACE("New fragment %d, start block %lld, locked %d\n",
499 i, msblk->fragment[i].block,
500 msblk->fragment[i].locked);
504 msblk->fragment[i].locked++;
505 up(&msblk->fragment_mutex);
506 TRACE("Got fragment %d, start block %lld, locked %d\n", i,
507 msblk->fragment[i].block,
508 msblk->fragment[i].locked);
512 return &msblk->fragment[i];
519 static struct inode *squashfs_new_inode(struct super_block *s,
520 struct squashfs_base_inode_header *inodeb)
522 struct squashfs_sb_info *msblk = s->s_fs_info;
523 struct inode *i = new_inode(s);
526 i->i_ino = inodeb->inode_number;
527 i->i_mtime.tv_sec = inodeb->mtime;
528 i->i_atime.tv_sec = inodeb->mtime;
529 i->i_ctime.tv_sec = inodeb->mtime;
530 i->i_uid = msblk->uid[inodeb->uid];
531 i->i_mode = inodeb->mode;
533 if (inodeb->guid == SQUASHFS_GUIDS)
536 i->i_gid = msblk->guid[inodeb->guid];
543 static struct inode *squashfs_iget(struct super_block *s, squashfs_inode_t inode)
546 struct squashfs_sb_info *msblk = s->s_fs_info;
547 struct squashfs_super_block *sblk = &msblk->sblk;
548 long long block = SQUASHFS_INODE_BLK(inode) +
549 sblk->inode_table_start;
550 unsigned int offset = SQUASHFS_INODE_OFFSET(inode);
551 long long next_block;
552 unsigned int next_offset;
553 union squashfs_inode_header id, sid;
554 struct squashfs_base_inode_header *inodeb = &id.base,
555 *sinodeb = &sid.base;
557 TRACE("Entered squashfs_iget\n");
560 if (!squashfs_get_cached_block(s, (char *) sinodeb, block,
561 offset, sizeof(*sinodeb), &next_block,
564 SQUASHFS_SWAP_BASE_INODE_HEADER(inodeb, sinodeb,
567 if (!squashfs_get_cached_block(s, (char *) inodeb, block,
568 offset, sizeof(*inodeb), &next_block,
572 switch(inodeb->inode_type) {
573 case SQUASHFS_FILE_TYPE: {
574 unsigned int frag_size;
576 struct squashfs_reg_inode_header *inodep = &id.reg;
577 struct squashfs_reg_inode_header *sinodep = &sid.reg;
580 if (!squashfs_get_cached_block(s, (char *)
581 sinodep, block, offset,
582 sizeof(*sinodep), &next_block,
585 SQUASHFS_SWAP_REG_INODE_HEADER(inodep, sinodep);
587 if (!squashfs_get_cached_block(s, (char *)
588 inodep, block, offset,
589 sizeof(*inodep), &next_block,
593 frag_blk = SQUASHFS_INVALID_BLK;
594 if (inodep->fragment != SQUASHFS_INVALID_FRAG &&
595 !get_fragment_location(s,
596 inodep->fragment, &frag_blk, &frag_size))
599 if((i = squashfs_new_inode(s, inodeb)) == NULL)
603 i->i_size = inodep->file_size;
604 i->i_fop = &generic_ro_fops;
605 i->i_mode |= S_IFREG;
606 i->i_blocks = ((i->i_size - 1) >> 9) + 1;
607 SQUASHFS_I(i)->u.s1.fragment_start_block = frag_blk;
608 SQUASHFS_I(i)->u.s1.fragment_size = frag_size;
609 SQUASHFS_I(i)->u.s1.fragment_offset = inodep->offset;
610 SQUASHFS_I(i)->start_block = inodep->start_block;
611 SQUASHFS_I(i)->u.s1.block_list_start = next_block;
612 SQUASHFS_I(i)->offset = next_offset;
613 if (sblk->block_size > 4096)
614 i->i_data.a_ops = &squashfs_aops;
616 i->i_data.a_ops = &squashfs_aops_4K;
618 TRACE("File inode %x:%x, start_block %llx, "
619 "block_list_start %llx, offset %x\n",
620 SQUASHFS_INODE_BLK(inode), offset,
621 inodep->start_block, next_block,
625 case SQUASHFS_LREG_TYPE: {
626 unsigned int frag_size;
628 struct squashfs_lreg_inode_header *inodep = &id.lreg;
629 struct squashfs_lreg_inode_header *sinodep = &sid.lreg;
632 if (!squashfs_get_cached_block(s, (char *)
633 sinodep, block, offset,
634 sizeof(*sinodep), &next_block,
637 SQUASHFS_SWAP_LREG_INODE_HEADER(inodep, sinodep);
639 if (!squashfs_get_cached_block(s, (char *)
640 inodep, block, offset,
641 sizeof(*inodep), &next_block,
645 frag_blk = SQUASHFS_INVALID_BLK;
646 if (inodep->fragment != SQUASHFS_INVALID_FRAG &&
647 !get_fragment_location(s,
648 inodep->fragment, &frag_blk, &frag_size))
651 if((i = squashfs_new_inode(s, inodeb)) == NULL)
654 i->i_nlink = inodep->nlink;
655 i->i_size = inodep->file_size;
656 i->i_fop = &generic_ro_fops;
657 i->i_mode |= S_IFREG;
658 i->i_blocks = ((i->i_size - 1) >> 9) + 1;
659 SQUASHFS_I(i)->u.s1.fragment_start_block = frag_blk;
660 SQUASHFS_I(i)->u.s1.fragment_size = frag_size;
661 SQUASHFS_I(i)->u.s1.fragment_offset = inodep->offset;
662 SQUASHFS_I(i)->start_block = inodep->start_block;
663 SQUASHFS_I(i)->u.s1.block_list_start = next_block;
664 SQUASHFS_I(i)->offset = next_offset;
665 if (sblk->block_size > 4096)
666 i->i_data.a_ops = &squashfs_aops;
668 i->i_data.a_ops = &squashfs_aops_4K;
670 TRACE("File inode %x:%x, start_block %llx, "
671 "block_list_start %llx, offset %x\n",
672 SQUASHFS_INODE_BLK(inode), offset,
673 inodep->start_block, next_block,
677 case SQUASHFS_DIR_TYPE: {
678 struct squashfs_dir_inode_header *inodep = &id.dir;
679 struct squashfs_dir_inode_header *sinodep = &sid.dir;
682 if (!squashfs_get_cached_block(s, (char *)
683 sinodep, block, offset,
684 sizeof(*sinodep), &next_block,
687 SQUASHFS_SWAP_DIR_INODE_HEADER(inodep, sinodep);
689 if (!squashfs_get_cached_block(s, (char *)
690 inodep, block, offset,
691 sizeof(*inodep), &next_block,
695 if((i = squashfs_new_inode(s, inodeb)) == NULL)
698 i->i_nlink = inodep->nlink;
699 i->i_size = inodep->file_size;
700 i->i_op = &squashfs_dir_inode_ops;
701 i->i_fop = &squashfs_dir_ops;
702 i->i_mode |= S_IFDIR;
703 SQUASHFS_I(i)->start_block = inodep->start_block;
704 SQUASHFS_I(i)->offset = inodep->offset;
705 SQUASHFS_I(i)->u.s2.directory_index_count = 0;
706 SQUASHFS_I(i)->u.s2.parent_inode = inodep->parent_inode;
708 TRACE("Directory inode %x:%x, start_block %x, offset "
709 "%x\n", SQUASHFS_INODE_BLK(inode),
710 offset, inodep->start_block,
714 case SQUASHFS_LDIR_TYPE: {
715 struct squashfs_ldir_inode_header *inodep = &id.ldir;
716 struct squashfs_ldir_inode_header *sinodep = &sid.ldir;
719 if (!squashfs_get_cached_block(s, (char *)
720 sinodep, block, offset,
721 sizeof(*sinodep), &next_block,
724 SQUASHFS_SWAP_LDIR_INODE_HEADER(inodep,
727 if (!squashfs_get_cached_block(s, (char *)
728 inodep, block, offset,
729 sizeof(*inodep), &next_block,
733 if((i = squashfs_new_inode(s, inodeb)) == NULL)
736 i->i_nlink = inodep->nlink;
737 i->i_size = inodep->file_size;
738 i->i_op = &squashfs_dir_inode_ops;
739 i->i_fop = &squashfs_dir_ops;
740 i->i_mode |= S_IFDIR;
741 SQUASHFS_I(i)->start_block = inodep->start_block;
742 SQUASHFS_I(i)->offset = inodep->offset;
743 SQUASHFS_I(i)->u.s2.directory_index_start = next_block;
744 SQUASHFS_I(i)->u.s2.directory_index_offset =
746 SQUASHFS_I(i)->u.s2.directory_index_count =
748 SQUASHFS_I(i)->u.s2.parent_inode = inodep->parent_inode;
750 TRACE("Long directory inode %x:%x, start_block %x, "
752 SQUASHFS_INODE_BLK(inode), offset,
753 inodep->start_block, inodep->offset);
756 case SQUASHFS_SYMLINK_TYPE: {
757 struct squashfs_symlink_inode_header *inodep =
759 struct squashfs_symlink_inode_header *sinodep =
763 if (!squashfs_get_cached_block(s, (char *)
764 sinodep, block, offset,
765 sizeof(*sinodep), &next_block,
768 SQUASHFS_SWAP_SYMLINK_INODE_HEADER(inodep,
771 if (!squashfs_get_cached_block(s, (char *)
772 inodep, block, offset,
773 sizeof(*inodep), &next_block,
777 if((i = squashfs_new_inode(s, inodeb)) == NULL)
780 i->i_nlink = inodep->nlink;
781 i->i_size = inodep->symlink_size;
782 i->i_op = &page_symlink_inode_operations;
783 i->i_data.a_ops = &squashfs_symlink_aops;
784 i->i_mode |= S_IFLNK;
785 SQUASHFS_I(i)->start_block = next_block;
786 SQUASHFS_I(i)->offset = next_offset;
788 TRACE("Symbolic link inode %x:%x, start_block %llx, "
790 SQUASHFS_INODE_BLK(inode), offset,
791 next_block, next_offset);
794 case SQUASHFS_BLKDEV_TYPE:
795 case SQUASHFS_CHRDEV_TYPE: {
796 struct squashfs_dev_inode_header *inodep = &id.dev;
797 struct squashfs_dev_inode_header *sinodep = &sid.dev;
800 if (!squashfs_get_cached_block(s, (char *)
801 sinodep, block, offset,
802 sizeof(*sinodep), &next_block,
805 SQUASHFS_SWAP_DEV_INODE_HEADER(inodep, sinodep);
807 if (!squashfs_get_cached_block(s, (char *)
808 inodep, block, offset,
809 sizeof(*inodep), &next_block,
813 if ((i = squashfs_new_inode(s, inodeb)) == NULL)
816 i->i_nlink = inodep->nlink;
817 i->i_mode |= (inodeb->inode_type ==
818 SQUASHFS_CHRDEV_TYPE) ? S_IFCHR :
820 init_special_inode(i, i->i_mode,
821 old_decode_dev(inodep->rdev));
823 TRACE("Device inode %x:%x, rdev %x\n",
824 SQUASHFS_INODE_BLK(inode), offset,
828 case SQUASHFS_FIFO_TYPE:
829 case SQUASHFS_SOCKET_TYPE: {
830 struct squashfs_ipc_inode_header *inodep = &id.ipc;
831 struct squashfs_ipc_inode_header *sinodep = &sid.ipc;
834 if (!squashfs_get_cached_block(s, (char *)
835 sinodep, block, offset,
836 sizeof(*sinodep), &next_block,
839 SQUASHFS_SWAP_IPC_INODE_HEADER(inodep, sinodep);
841 if (!squashfs_get_cached_block(s, (char *)
842 inodep, block, offset,
843 sizeof(*inodep), &next_block,
847 if ((i = squashfs_new_inode(s, inodeb)) == NULL)
850 i->i_nlink = inodep->nlink;
851 i->i_mode |= (inodeb->inode_type == SQUASHFS_FIFO_TYPE)
852 ? S_IFIFO : S_IFSOCK;
853 init_special_inode(i, i->i_mode, 0);
857 ERROR("Unknown inode type %d in squashfs_iget!\n",
862 insert_inode_hash(i);
866 ERROR("Unable to read inode [%llx:%x]\n", block, offset);
873 static int read_fragment_index_table(struct super_block *s)
875 struct squashfs_sb_info *msblk = s->s_fs_info;
876 struct squashfs_super_block *sblk = &msblk->sblk;
878 /* Allocate fragment index table */
879 if (!(msblk->fragment_index = kmalloc(SQUASHFS_FRAGMENT_INDEX_BYTES
880 (sblk->fragments), GFP_KERNEL))) {
881 ERROR("Failed to allocate uid/gid table\n");
885 if (SQUASHFS_FRAGMENT_INDEX_BYTES(sblk->fragments) &&
886 !squashfs_read_data(s, (char *)
887 msblk->fragment_index,
888 sblk->fragment_table_start,
889 SQUASHFS_FRAGMENT_INDEX_BYTES
891 SQUASHFS_COMPRESSED_BIT_BLOCK, NULL)) {
892 ERROR("unable to read fragment index table\n");
900 for (i = 0; i < SQUASHFS_FRAGMENT_INDEXES(sblk->fragments);
902 SQUASHFS_SWAP_FRAGMENT_INDEXES((&fragment),
903 &msblk->fragment_index[i], 1);
904 msblk->fragment_index[i] = fragment;
912 static int supported_squashfs_filesystem(struct squashfs_sb_info *msblk, int silent)
914 struct squashfs_super_block *sblk = &msblk->sblk;
916 msblk->iget = squashfs_iget;
917 msblk->read_blocklist = read_blocklist;
918 msblk->read_fragment_index_table = read_fragment_index_table;
920 if (sblk->s_major == 1) {
921 if (!squashfs_1_0_supported(msblk)) {
922 SERROR("Major/Minor mismatch, Squashfs 1.0 filesystems "
923 "are unsupported\n");
924 SERROR("Please recompile with "
925 "Squashfs 1.0 support enabled\n");
928 } else if (sblk->s_major == 2) {
929 if (!squashfs_2_0_supported(msblk)) {
930 SERROR("Major/Minor mismatch, Squashfs 2.0 filesystems "
931 "are unsupported\n");
932 SERROR("Please recompile with "
933 "Squashfs 2.0 support enabled\n");
936 } else if(sblk->s_major != SQUASHFS_MAJOR || sblk->s_minor >
938 SERROR("Major/Minor mismatch, trying to mount newer %d.%d "
939 "filesystem\n", sblk->s_major, sblk->s_minor);
940 SERROR("Please update your kernel\n");
948 static int squashfs_fill_super(struct super_block *s, void *data, int silent)
950 struct squashfs_sb_info *msblk;
951 struct squashfs_super_block *sblk;
953 char b[BDEVNAME_SIZE];
956 TRACE("Entered squashfs_read_superblock\n");
958 if (!(s->s_fs_info = kmalloc(sizeof(struct squashfs_sb_info),
960 ERROR("Failed to allocate superblock\n");
963 memset(s->s_fs_info, 0, sizeof(struct squashfs_sb_info));
964 msblk = s->s_fs_info;
965 if (!(msblk->stream.workspace = vmalloc(zlib_inflate_workspacesize()))) {
966 ERROR("Failed to allocate zlib workspace\n");
971 msblk->devblksize = sb_min_blocksize(s, BLOCK_SIZE);
972 msblk->devblksize_log2 = ffz(~msblk->devblksize);
974 init_MUTEX(&msblk->read_data_mutex);
975 init_MUTEX(&msblk->read_page_mutex);
976 init_MUTEX(&msblk->block_cache_mutex);
977 init_MUTEX(&msblk->fragment_mutex);
978 init_MUTEX(&msblk->meta_index_mutex);
980 init_waitqueue_head(&msblk->waitq);
981 init_waitqueue_head(&msblk->fragment_wait_queue);
983 if (!squashfs_read_data(s, (char *) sblk, SQUASHFS_START,
984 sizeof(struct squashfs_super_block) |
985 SQUASHFS_COMPRESSED_BIT_BLOCK, NULL)) {
986 SERROR("unable to read superblock\n");
990 /* Check it is a SQUASHFS superblock */
992 if ((s->s_magic = sblk->s_magic) != SQUASHFS_MAGIC) {
993 if (sblk->s_magic == SQUASHFS_MAGIC_SWAP) {
994 struct squashfs_super_block ssblk;
996 WARNING("Mounting a different endian SQUASHFS "
997 "filesystem on %s\n", bdevname(s->s_bdev, b));
999 SQUASHFS_SWAP_SUPER_BLOCK(&ssblk, sblk);
1000 memcpy(sblk, &ssblk, sizeof(struct squashfs_super_block));
1003 SERROR("Can't find a SQUASHFS superblock on %s\n",
1004 bdevname(s->s_bdev, b));
1009 /* Check the MAJOR & MINOR versions */
1010 if(!supported_squashfs_filesystem(msblk, silent))
1013 TRACE("Found valid superblock on %s\n", bdevname(s->s_bdev, b));
1014 TRACE("Inodes are %scompressed\n",
1015 SQUASHFS_UNCOMPRESSED_INODES
1016 (sblk->flags) ? "un" : "");
1017 TRACE("Data is %scompressed\n",
1018 SQUASHFS_UNCOMPRESSED_DATA(sblk->flags)
1020 TRACE("Check data is %s present in the filesystem\n",
1021 SQUASHFS_CHECK_DATA(sblk->flags) ?
1023 TRACE("Filesystem size %lld bytes\n", sblk->bytes_used);
1024 TRACE("Block size %d\n", sblk->block_size);
1025 TRACE("Number of inodes %d\n", sblk->inodes);
1026 if (sblk->s_major > 1)
1027 TRACE("Number of fragments %d\n", sblk->fragments);
1028 TRACE("Number of uids %d\n", sblk->no_uids);
1029 TRACE("Number of gids %d\n", sblk->no_guids);
1030 TRACE("sblk->inode_table_start %llx\n", sblk->inode_table_start);
1031 TRACE("sblk->directory_table_start %llx\n", sblk->directory_table_start);
1032 if (sblk->s_major > 1)
1033 TRACE("sblk->fragment_table_start %llx\n",
1034 sblk->fragment_table_start);
1035 TRACE("sblk->uid_start %llx\n", sblk->uid_start);
1037 s->s_flags |= MS_RDONLY;
1038 s->s_op = &squashfs_ops;
1040 /* Init inode_table block pointer array */
1041 if (!(msblk->block_cache = kmalloc(sizeof(struct squashfs_cache) *
1042 SQUASHFS_CACHED_BLKS, GFP_KERNEL))) {
1043 ERROR("Failed to allocate block cache\n");
1047 for (i = 0; i < SQUASHFS_CACHED_BLKS; i++)
1048 msblk->block_cache[i].block = SQUASHFS_INVALID_BLK;
1050 msblk->next_cache = 0;
1052 /* Allocate read_data block */
1053 msblk->read_size = (sblk->block_size < SQUASHFS_METADATA_SIZE) ?
1054 SQUASHFS_METADATA_SIZE :
1057 if (!(msblk->read_data = kmalloc(msblk->read_size, GFP_KERNEL))) {
1058 ERROR("Failed to allocate read_data block\n");
1062 /* Allocate read_page block */
1063 if (!(msblk->read_page = kmalloc(sblk->block_size, GFP_KERNEL))) {
1064 ERROR("Failed to allocate read_page block\n");
1068 /* Allocate uid and gid tables */
1069 if (!(msblk->uid = kmalloc((sblk->no_uids + sblk->no_guids) *
1070 sizeof(unsigned int), GFP_KERNEL))) {
1071 ERROR("Failed to allocate uid/gid table\n");
1074 msblk->guid = msblk->uid + sblk->no_uids;
1077 unsigned int suid[sblk->no_uids + sblk->no_guids];
1079 if (!squashfs_read_data(s, (char *) &suid, sblk->uid_start,
1080 ((sblk->no_uids + sblk->no_guids) *
1081 sizeof(unsigned int)) |
1082 SQUASHFS_COMPRESSED_BIT_BLOCK, NULL)) {
1083 ERROR("unable to read uid/gid table\n");
1087 SQUASHFS_SWAP_DATA(msblk->uid, suid, (sblk->no_uids +
1088 sblk->no_guids), (sizeof(unsigned int) * 8));
1090 if (!squashfs_read_data(s, (char *) msblk->uid, sblk->uid_start,
1091 ((sblk->no_uids + sblk->no_guids) *
1092 sizeof(unsigned int)) |
1093 SQUASHFS_COMPRESSED_BIT_BLOCK, NULL)) {
1094 ERROR("unable to read uid/gid table\n");
1099 if (sblk->s_major == 1 && squashfs_1_0_supported(msblk))
1102 if (!(msblk->fragment = kmalloc(sizeof(struct squashfs_fragment_cache) *
1103 SQUASHFS_CACHED_FRAGMENTS, GFP_KERNEL))) {
1104 ERROR("Failed to allocate fragment block cache\n");
1108 for (i = 0; i < SQUASHFS_CACHED_FRAGMENTS; i++) {
1109 msblk->fragment[i].locked = 0;
1110 msblk->fragment[i].block = SQUASHFS_INVALID_BLK;
1111 msblk->fragment[i].data = NULL;
1114 msblk->next_fragment = 0;
1116 /* Allocate fragment index table */
1117 if (msblk->read_fragment_index_table(s) == 0)
1121 if ((root = (msblk->iget)(s, sblk->root_inode)) == NULL)
1124 if ((s->s_root = d_alloc_root(root)) == NULL) {
1125 ERROR("Root inode create failed\n");
1130 TRACE("Leaving squashfs_read_super\n");
1134 kfree(msblk->fragment_index);
1135 kfree(msblk->fragment);
1137 kfree(msblk->read_page);
1138 kfree(msblk->read_data);
1139 kfree(msblk->block_cache);
1140 kfree(msblk->fragment_index_2);
1141 vfree(msblk->stream.workspace);
1142 kfree(s->s_fs_info);
1143 s->s_fs_info = NULL;
1151 static int squashfs_statfs(struct dentry *s, struct kstatfs *buf)
1153 struct squashfs_sb_info *msblk = s->d_sb->s_fs_info;
1154 struct squashfs_super_block *sblk = &msblk->sblk;
1156 TRACE("Entered squashfs_statfs\n");
1158 buf->f_type = SQUASHFS_MAGIC;
1159 buf->f_bsize = sblk->block_size;
1160 buf->f_blocks = ((sblk->bytes_used - 1) >> sblk->block_log) + 1;
1161 buf->f_bfree = buf->f_bavail = 0;
1162 buf->f_files = sblk->inodes;
1164 buf->f_namelen = SQUASHFS_NAME_LEN;
1170 static int squashfs_symlink_readpage(struct file *file, struct page *page)
1172 struct inode *inode = page->mapping->host;
1173 int index = page->index << PAGE_CACHE_SHIFT, length, bytes;
1174 long long block = SQUASHFS_I(inode)->start_block;
1175 int offset = SQUASHFS_I(inode)->offset;
1176 void *pageaddr = kmap(page);
1178 TRACE("Entered squashfs_symlink_readpage, page index %ld, start block "
1179 "%llx, offset %x\n", page->index,
1180 SQUASHFS_I(inode)->start_block,
1181 SQUASHFS_I(inode)->offset);
1183 for (length = 0; length < index; length += bytes) {
1184 if (!(bytes = squashfs_get_cached_block(inode->i_sb, NULL,
1185 block, offset, PAGE_CACHE_SIZE, &block,
1187 ERROR("Unable to read symbolic link [%llx:%x]\n", block,
1193 if (length != index) {
1194 ERROR("(squashfs_symlink_readpage) length != index\n");
1199 bytes = (i_size_read(inode) - length) > PAGE_CACHE_SIZE ? PAGE_CACHE_SIZE :
1200 i_size_read(inode) - length;
1202 if (!(bytes = squashfs_get_cached_block(inode->i_sb, pageaddr, block,
1203 offset, bytes, &block, &offset)))
1204 ERROR("Unable to read symbolic link [%llx:%x]\n", block, offset);
1207 memset(pageaddr + bytes, 0, PAGE_CACHE_SIZE - bytes);
1209 SetPageUptodate(page);
1216 struct meta_index *locate_meta_index(struct inode *inode, int index, int offset)
1218 struct meta_index *meta = NULL;
1219 struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info;
1222 down(&msblk->meta_index_mutex);
1224 TRACE("locate_meta_index: index %d, offset %d\n", index, offset);
1226 if(msblk->meta_index == NULL)
1229 for (i = 0; i < SQUASHFS_META_NUMBER; i ++)
1230 if (msblk->meta_index[i].inode_number == inode->i_ino &&
1231 msblk->meta_index[i].offset >= offset &&
1232 msblk->meta_index[i].offset <= index &&
1233 msblk->meta_index[i].locked == 0) {
1234 TRACE("locate_meta_index: entry %d, offset %d\n", i,
1235 msblk->meta_index[i].offset);
1236 meta = &msblk->meta_index[i];
1237 offset = meta->offset;
1244 up(&msblk->meta_index_mutex);
1250 struct meta_index *empty_meta_index(struct inode *inode, int offset, int skip)
1252 struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info;
1253 struct meta_index *meta = NULL;
1256 down(&msblk->meta_index_mutex);
1258 TRACE("empty_meta_index: offset %d, skip %d\n", offset, skip);
1260 if(msblk->meta_index == NULL) {
1261 if (!(msblk->meta_index = kmalloc(sizeof(struct meta_index) *
1262 SQUASHFS_META_NUMBER, GFP_KERNEL))) {
1263 ERROR("Failed to allocate meta_index\n");
1266 for(i = 0; i < SQUASHFS_META_NUMBER; i++) {
1267 msblk->meta_index[i].inode_number = 0;
1268 msblk->meta_index[i].locked = 0;
1270 msblk->next_meta_index = 0;
1273 for(i = SQUASHFS_META_NUMBER; i &&
1274 msblk->meta_index[msblk->next_meta_index].locked; i --)
1275 msblk->next_meta_index = (msblk->next_meta_index + 1) %
1276 SQUASHFS_META_NUMBER;
1279 TRACE("empty_meta_index: failed!\n");
1283 TRACE("empty_meta_index: returned meta entry %d, %p\n",
1284 msblk->next_meta_index,
1285 &msblk->meta_index[msblk->next_meta_index]);
1287 meta = &msblk->meta_index[msblk->next_meta_index];
1288 msblk->next_meta_index = (msblk->next_meta_index + 1) %
1289 SQUASHFS_META_NUMBER;
1291 meta->inode_number = inode->i_ino;
1292 meta->offset = offset;
1298 up(&msblk->meta_index_mutex);
1303 void release_meta_index(struct inode *inode, struct meta_index *meta)
1309 static int read_block_index(struct super_block *s, int blocks, char *block_list,
1310 long long *start_block, int *offset)
1312 struct squashfs_sb_info *msblk = s->s_fs_info;
1313 unsigned int *block_listp;
1317 char sblock_list[blocks << 2];
1319 if (!squashfs_get_cached_block(s, sblock_list, *start_block,
1320 *offset, blocks << 2, start_block, offset)) {
1321 ERROR("Unable to read block list [%llx:%x]\n",
1322 *start_block, *offset);
1325 SQUASHFS_SWAP_INTS(((unsigned int *)block_list),
1326 ((unsigned int *)sblock_list), blocks);
1328 if (!squashfs_get_cached_block(s, block_list, *start_block,
1329 *offset, blocks << 2, start_block, offset)) {
1330 ERROR("Unable to read block list [%llx:%x]\n",
1331 *start_block, *offset);
1335 for (block_listp = (unsigned int *) block_list; blocks;
1336 block_listp++, blocks --)
1337 block += SQUASHFS_COMPRESSED_SIZE_BLOCK(*block_listp);
1348 static inline int calculate_skip(int blocks) {
1349 int skip = (blocks - 1) / ((SQUASHFS_SLOTS * SQUASHFS_META_ENTRIES + 1) * SQUASHFS_META_INDEXES);
1350 return skip >= 7 ? 7 : skip + 1;
1354 static int get_meta_index(struct inode *inode, int index,
1355 long long *index_block, int *index_offset,
1356 long long *data_block, char *block_list)
1358 struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info;
1359 struct squashfs_super_block *sblk = &msblk->sblk;
1360 int skip = calculate_skip(i_size_read(inode) >> sblk->block_log);
1362 struct meta_index *meta;
1363 struct meta_entry *meta_entry;
1364 long long cur_index_block = SQUASHFS_I(inode)->u.s1.block_list_start;
1365 int cur_offset = SQUASHFS_I(inode)->offset;
1366 long long cur_data_block = SQUASHFS_I(inode)->start_block;
1369 index /= SQUASHFS_META_INDEXES * skip;
1371 while ( offset < index ) {
1372 meta = locate_meta_index(inode, index, offset + 1);
1375 if ((meta = empty_meta_index(inode, offset + 1,
1379 offset = index < meta->offset + meta->entries ? index :
1380 meta->offset + meta->entries - 1;
1381 meta_entry = &meta->meta_entry[offset - meta->offset];
1382 cur_index_block = meta_entry->index_block + sblk->inode_table_start;
1383 cur_offset = meta_entry->offset;
1384 cur_data_block = meta_entry->data_block;
1385 TRACE("get_meta_index: offset %d, meta->offset %d, "
1386 "meta->entries %d\n", offset, meta->offset,
1388 TRACE("get_meta_index: index_block 0x%llx, offset 0x%x"
1389 " data_block 0x%llx\n", cur_index_block,
1390 cur_offset, cur_data_block);
1393 for (i = meta->offset + meta->entries; i <= index &&
1394 i < meta->offset + SQUASHFS_META_ENTRIES; i++) {
1395 int blocks = skip * SQUASHFS_META_INDEXES;
1398 int block = blocks > (SIZE >> 2) ? (SIZE >> 2) :
1400 int res = read_block_index(inode->i_sb, block,
1401 block_list, &cur_index_block,
1407 cur_data_block += res;
1411 meta_entry = &meta->meta_entry[i - meta->offset];
1412 meta_entry->index_block = cur_index_block - sblk->inode_table_start;
1413 meta_entry->offset = cur_offset;
1414 meta_entry->data_block = cur_data_block;
1419 TRACE("get_meta_index: meta->offset %d, meta->entries %d\n",
1420 meta->offset, meta->entries);
1422 release_meta_index(inode, meta);
1426 *index_block = cur_index_block;
1427 *index_offset = cur_offset;
1428 *data_block = cur_data_block;
1430 return offset * SQUASHFS_META_INDEXES * skip;
1433 release_meta_index(inode, meta);
1438 static long long read_blocklist(struct inode *inode, int index,
1439 int readahead_blks, char *block_list,
1440 unsigned short **block_p, unsigned int *bsize)
1442 long long block_ptr;
1445 int res = get_meta_index(inode, index, &block_ptr, &offset, &block,
1448 TRACE("read_blocklist: res %d, index %d, block_ptr 0x%llx, offset"
1449 " 0x%x, block 0x%llx\n", res, index, block_ptr, offset,
1458 int blocks = index > (SIZE >> 2) ? (SIZE >> 2) : index;
1459 int res = read_block_index(inode->i_sb, blocks, block_list,
1460 &block_ptr, &offset);
1467 if (read_block_index(inode->i_sb, 1, block_list,
1468 &block_ptr, &offset) == -1)
1470 *bsize = *((unsigned int *) block_list);
1479 static int squashfs_readpage(struct file *file, struct page *page)
1481 struct inode *inode = page->mapping->host;
1482 struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info;
1483 struct squashfs_super_block *sblk = &msblk->sblk;
1484 unsigned char *block_list;
1486 unsigned int bsize, i = 0, bytes = 0, byte_offset = 0;
1487 int index = page->index >> (sblk->block_log - PAGE_CACHE_SHIFT);
1489 struct squashfs_fragment_cache *fragment = NULL;
1490 char *data_ptr = msblk->read_page;
1492 int mask = (1 << (sblk->block_log - PAGE_CACHE_SHIFT)) - 1;
1493 int start_index = page->index & ~mask;
1494 int end_index = start_index | mask;
1496 TRACE("Entered squashfs_readpage, page index %lx, start block %llx\n",
1498 SQUASHFS_I(inode)->start_block);
1500 if (!(block_list = kmalloc(SIZE, GFP_KERNEL))) {
1501 ERROR("Failed to allocate block_list\n");
1505 if (page->index >= ((i_size_read(inode) + PAGE_CACHE_SIZE - 1) >>
1509 if (SQUASHFS_I(inode)->u.s1.fragment_start_block == SQUASHFS_INVALID_BLK
1510 || index < (i_size_read(inode) >>
1512 if ((block = (msblk->read_blocklist)(inode, index, 1,
1513 block_list, NULL, &bsize)) == 0)
1516 down(&msblk->read_page_mutex);
1518 if (!(bytes = squashfs_read_data(inode->i_sb, msblk->read_page,
1519 block, bsize, NULL))) {
1520 ERROR("Unable to read page, block %llx, size %x\n", block,
1522 up(&msblk->read_page_mutex);
1526 if ((fragment = get_cached_fragment(inode->i_sb,
1528 u.s1.fragment_start_block,
1529 SQUASHFS_I(inode)->u.s1.fragment_size))
1531 ERROR("Unable to read page, block %llx, size %x\n",
1533 u.s1.fragment_start_block,
1534 (int) SQUASHFS_I(inode)->
1535 u.s1.fragment_size);
1538 bytes = SQUASHFS_I(inode)->u.s1.fragment_offset +
1539 (i_size_read(inode) & (sblk->block_size
1541 byte_offset = SQUASHFS_I(inode)->u.s1.fragment_offset;
1542 data_ptr = fragment->data;
1545 for (i = start_index; i <= end_index && byte_offset < bytes;
1546 i++, byte_offset += PAGE_CACHE_SIZE) {
1547 struct page *push_page;
1548 int available_bytes = (bytes - byte_offset) > PAGE_CACHE_SIZE ?
1549 PAGE_CACHE_SIZE : bytes - byte_offset;
1551 TRACE("bytes %d, i %d, byte_offset %d, available_bytes %d\n",
1552 bytes, i, byte_offset, available_bytes);
1554 if (i == page->index) {
1555 pageaddr = kmap_atomic(page, KM_USER0);
1556 memcpy(pageaddr, data_ptr + byte_offset,
1558 memset(pageaddr + available_bytes, 0,
1559 PAGE_CACHE_SIZE - available_bytes);
1560 kunmap_atomic(pageaddr, KM_USER0);
1561 flush_dcache_page(page);
1562 SetPageUptodate(page);
1564 } else if ((push_page =
1565 grab_cache_page_nowait(page->mapping, i))) {
1566 pageaddr = kmap_atomic(push_page, KM_USER0);
1568 memcpy(pageaddr, data_ptr + byte_offset,
1570 memset(pageaddr + available_bytes, 0,
1571 PAGE_CACHE_SIZE - available_bytes);
1572 kunmap_atomic(pageaddr, KM_USER0);
1573 flush_dcache_page(push_page);
1574 SetPageUptodate(push_page);
1575 unlock_page(push_page);
1576 page_cache_release(push_page);
1580 if (SQUASHFS_I(inode)->u.s1.fragment_start_block == SQUASHFS_INVALID_BLK
1581 || index < (i_size_read(inode) >>
1583 up(&msblk->read_page_mutex);
1585 release_cached_fragment(msblk, fragment);
1591 pageaddr = kmap_atomic(page, KM_USER0);
1592 memset(pageaddr + bytes, 0, PAGE_CACHE_SIZE - bytes);
1593 kunmap_atomic(pageaddr, KM_USER0);
1594 flush_dcache_page(page);
1595 SetPageUptodate(page);
1603 static int squashfs_readpage4K(struct file *file, struct page *page)
1605 struct inode *inode = page->mapping->host;
1606 struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info;
1607 struct squashfs_super_block *sblk = &msblk->sblk;
1608 unsigned char *block_list;
1610 unsigned int bsize, bytes = 0;
1613 TRACE("Entered squashfs_readpage4K, page index %lx, start block %llx\n",
1615 SQUASHFS_I(inode)->start_block);
1617 if (page->index >= ((i_size_read(inode) + PAGE_CACHE_SIZE - 1) >>
1618 PAGE_CACHE_SHIFT)) {
1619 pageaddr = kmap_atomic(page, KM_USER0);
1624 if (!(block_list = kmalloc(SIZE, GFP_KERNEL))) {
1625 ERROR("Failed to allocate block_list\n");
1626 pageaddr = kmap_atomic(page, KM_USER0);
1631 if (SQUASHFS_I(inode)->u.s1.fragment_start_block == SQUASHFS_INVALID_BLK
1632 || page->index < (i_size_read(inode) >>
1634 block = (msblk->read_blocklist)(inode, page->index, 1,
1635 block_list, NULL, &bsize);
1637 down(&msblk->read_page_mutex);
1638 bytes = squashfs_read_data(inode->i_sb, msblk->read_page, block,
1640 pageaddr = kmap_atomic(page, KM_USER0);
1642 memcpy(pageaddr, msblk->read_page, bytes);
1644 ERROR("Unable to read page, block %llx, size %x\n",
1646 up(&msblk->read_page_mutex);
1648 struct squashfs_fragment_cache *fragment =
1649 get_cached_fragment(inode->i_sb,
1651 u.s1.fragment_start_block,
1652 SQUASHFS_I(inode)-> u.s1.fragment_size);
1653 pageaddr = kmap_atomic(page, KM_USER0);
1655 bytes = i_size_read(inode) & (sblk->block_size - 1);
1656 memcpy(pageaddr, fragment->data + SQUASHFS_I(inode)->
1657 u.s1.fragment_offset, bytes);
1658 release_cached_fragment(msblk, fragment);
1660 ERROR("Unable to read page, block %llx, size %x\n",
1662 u.s1.fragment_start_block, (int)
1663 SQUASHFS_I(inode)-> u.s1.fragment_size);
1667 memset(pageaddr + bytes, 0, PAGE_CACHE_SIZE - bytes);
1668 kunmap_atomic(pageaddr, KM_USER0);
1669 flush_dcache_page(page);
1670 SetPageUptodate(page);
1678 static int get_dir_index_using_offset(struct super_block *s, long long
1679 *next_block, unsigned int *next_offset,
1680 long long index_start,
1681 unsigned int index_offset, int i_count,
1684 struct squashfs_sb_info *msblk = s->s_fs_info;
1685 struct squashfs_super_block *sblk = &msblk->sblk;
1687 struct squashfs_dir_index index;
1689 TRACE("Entered get_dir_index_using_offset, i_count %d, f_pos %d\n",
1690 i_count, (unsigned int) f_pos);
1696 for (i = 0; i < i_count; i++) {
1698 struct squashfs_dir_index sindex;
1699 squashfs_get_cached_block(s, (char *) &sindex,
1700 index_start, index_offset,
1701 sizeof(sindex), &index_start,
1703 SQUASHFS_SWAP_DIR_INDEX(&index, &sindex);
1705 squashfs_get_cached_block(s, (char *) &index,
1706 index_start, index_offset,
1707 sizeof(index), &index_start,
1710 if (index.index > f_pos)
1713 squashfs_get_cached_block(s, NULL, index_start, index_offset,
1714 index.size + 1, &index_start,
1717 length = index.index;
1718 *next_block = index.start_block + sblk->directory_table_start;
1721 *next_offset = (length + *next_offset) % SQUASHFS_METADATA_SIZE;
1728 static int get_dir_index_using_name(struct super_block *s, long long
1729 *next_block, unsigned int *next_offset,
1730 long long index_start,
1731 unsigned int index_offset, int i_count,
1732 const char *name, int size)
1734 struct squashfs_sb_info *msblk = s->s_fs_info;
1735 struct squashfs_super_block *sblk = &msblk->sblk;
1737 struct squashfs_dir_index *index;
1740 TRACE("Entered get_dir_index_using_name, i_count %d\n", i_count);
1742 if (!(str = kmalloc(sizeof(struct squashfs_dir_index) +
1743 (SQUASHFS_NAME_LEN + 1) * 2, GFP_KERNEL))) {
1744 ERROR("Failed to allocate squashfs_dir_index\n");
1748 index = (struct squashfs_dir_index *) (str + SQUASHFS_NAME_LEN + 1);
1749 strncpy(str, name, size);
1752 for (i = 0; i < i_count; i++) {
1754 struct squashfs_dir_index sindex;
1755 squashfs_get_cached_block(s, (char *) &sindex,
1756 index_start, index_offset,
1757 sizeof(sindex), &index_start,
1759 SQUASHFS_SWAP_DIR_INDEX(index, &sindex);
1761 squashfs_get_cached_block(s, (char *) index,
1762 index_start, index_offset,
1763 sizeof(struct squashfs_dir_index),
1764 &index_start, &index_offset);
1766 squashfs_get_cached_block(s, index->name, index_start,
1767 index_offset, index->size + 1,
1768 &index_start, &index_offset);
1770 index->name[index->size + 1] = '\0';
1772 if (strcmp(index->name, str) > 0)
1775 length = index->index;
1776 *next_block = index->start_block + sblk->directory_table_start;
1779 *next_offset = (length + *next_offset) % SQUASHFS_METADATA_SIZE;
1786 static int squashfs_readdir(struct file *file, void *dirent, filldir_t filldir)
1788 struct inode *i = file->f_dentry->d_inode;
1789 struct squashfs_sb_info *msblk = i->i_sb->s_fs_info;
1790 struct squashfs_super_block *sblk = &msblk->sblk;
1791 long long next_block = SQUASHFS_I(i)->start_block +
1792 sblk->directory_table_start;
1793 int next_offset = SQUASHFS_I(i)->offset, length = 0,
1795 struct squashfs_dir_header dirh;
1796 struct squashfs_dir_entry *dire;
1798 TRACE("Entered squashfs_readdir [%llx:%x]\n", next_block, next_offset);
1800 if (!(dire = kmalloc(sizeof(struct squashfs_dir_entry) +
1801 SQUASHFS_NAME_LEN + 1, GFP_KERNEL))) {
1802 ERROR("Failed to allocate squashfs_dir_entry\n");
1806 while(file->f_pos < 3) {
1810 if(file->f_pos == 0) {
1817 i_ino = SQUASHFS_I(i)->u.s2.parent_inode;
1819 TRACE("Calling filldir(%x, %s, %d, %d, %d, %d)\n",
1820 (unsigned int) dirent, name, size, (int)
1822 squashfs_filetype_table[1]);
1824 if (filldir(dirent, name, size,
1826 squashfs_filetype_table[1]) < 0) {
1827 TRACE("Filldir returned less than 0\n");
1830 file->f_pos += size;
1833 length = get_dir_index_using_offset(i->i_sb, &next_block, &next_offset,
1834 SQUASHFS_I(i)->u.s2.directory_index_start,
1835 SQUASHFS_I(i)->u.s2.directory_index_offset,
1836 SQUASHFS_I(i)->u.s2.directory_index_count,
1839 while (length < i_size_read(i)) {
1840 /* read directory header */
1842 struct squashfs_dir_header sdirh;
1844 if (!squashfs_get_cached_block(i->i_sb, (char *) &sdirh,
1845 next_block, next_offset, sizeof(sdirh),
1846 &next_block, &next_offset))
1849 length += sizeof(sdirh);
1850 SQUASHFS_SWAP_DIR_HEADER(&dirh, &sdirh);
1852 if (!squashfs_get_cached_block(i->i_sb, (char *) &dirh,
1853 next_block, next_offset, sizeof(dirh),
1854 &next_block, &next_offset))
1857 length += sizeof(dirh);
1860 dir_count = dirh.count + 1;
1861 while (dir_count--) {
1863 struct squashfs_dir_entry sdire;
1864 if (!squashfs_get_cached_block(i->i_sb, (char *)
1865 &sdire, next_block, next_offset,
1866 sizeof(sdire), &next_block,
1870 length += sizeof(sdire);
1871 SQUASHFS_SWAP_DIR_ENTRY(dire, &sdire);
1873 if (!squashfs_get_cached_block(i->i_sb, (char *)
1874 dire, next_block, next_offset,
1875 sizeof(*dire), &next_block,
1879 length += sizeof(*dire);
1882 if (!squashfs_get_cached_block(i->i_sb, dire->name,
1883 next_block, next_offset,
1884 dire->size + 1, &next_block,
1888 length += dire->size + 1;
1890 if (file->f_pos >= length)
1893 dire->name[dire->size + 1] = '\0';
1895 TRACE("Calling filldir(%x, %s, %d, %d, %x:%x, %d, %d)\n",
1896 (unsigned int) dirent, dire->name,
1897 dire->size + 1, (int) file->f_pos,
1898 dirh.start_block, dire->offset,
1899 dirh.inode_number + dire->inode_number,
1900 squashfs_filetype_table[dire->type]);
1902 if (filldir(dirent, dire->name, dire->size + 1,
1904 dirh.inode_number + dire->inode_number,
1905 squashfs_filetype_table[dire->type])
1907 TRACE("Filldir returned less than 0\n");
1910 file->f_pos = length;
1919 ERROR("Unable to read directory block [%llx:%x]\n", next_block,
1925 static struct dentry *squashfs_lookup(struct inode *i, struct dentry *dentry,
1926 struct nameidata *nd)
1928 const unsigned char *name = dentry->d_name.name;
1929 int len = dentry->d_name.len;
1930 struct inode *inode = NULL;
1931 struct squashfs_sb_info *msblk = i->i_sb->s_fs_info;
1932 struct squashfs_super_block *sblk = &msblk->sblk;
1933 long long next_block = SQUASHFS_I(i)->start_block +
1934 sblk->directory_table_start;
1935 int next_offset = SQUASHFS_I(i)->offset, length = 0,
1937 struct squashfs_dir_header dirh;
1938 struct squashfs_dir_entry *dire;
1940 TRACE("Entered squashfs_lookup [%llx:%x]\n", next_block, next_offset);
1942 if (!(dire = kmalloc(sizeof(struct squashfs_dir_entry) +
1943 SQUASHFS_NAME_LEN + 1, GFP_KERNEL))) {
1944 ERROR("Failed to allocate squashfs_dir_entry\n");
1948 if (len > SQUASHFS_NAME_LEN)
1951 length = get_dir_index_using_name(i->i_sb, &next_block, &next_offset,
1952 SQUASHFS_I(i)->u.s2.directory_index_start,
1953 SQUASHFS_I(i)->u.s2.directory_index_offset,
1954 SQUASHFS_I(i)->u.s2.directory_index_count, name,
1957 while (length < i_size_read(i)) {
1958 /* read directory header */
1960 struct squashfs_dir_header sdirh;
1961 if (!squashfs_get_cached_block(i->i_sb, (char *) &sdirh,
1962 next_block, next_offset, sizeof(sdirh),
1963 &next_block, &next_offset))
1966 length += sizeof(sdirh);
1967 SQUASHFS_SWAP_DIR_HEADER(&dirh, &sdirh);
1969 if (!squashfs_get_cached_block(i->i_sb, (char *) &dirh,
1970 next_block, next_offset, sizeof(dirh),
1971 &next_block, &next_offset))
1974 length += sizeof(dirh);
1977 dir_count = dirh.count + 1;
1978 while (dir_count--) {
1980 struct squashfs_dir_entry sdire;
1981 if (!squashfs_get_cached_block(i->i_sb, (char *)
1982 &sdire, next_block,next_offset,
1983 sizeof(sdire), &next_block,
1987 length += sizeof(sdire);
1988 SQUASHFS_SWAP_DIR_ENTRY(dire, &sdire);
1990 if (!squashfs_get_cached_block(i->i_sb, (char *)
1991 dire, next_block,next_offset,
1992 sizeof(*dire), &next_block,
1996 length += sizeof(*dire);
1999 if (!squashfs_get_cached_block(i->i_sb, dire->name,
2000 next_block, next_offset, dire->size + 1,
2001 &next_block, &next_offset))
2004 length += dire->size + 1;
2006 if (name[0] < dire->name[0])
2009 if ((len == dire->size + 1) && !strncmp(name,
2011 squashfs_inode_t ino =
2012 SQUASHFS_MKINODE(dirh.start_block,
2015 TRACE("calling squashfs_iget for directory "
2016 "entry %s, inode %x:%x, %d\n", name,
2017 dirh.start_block, dire->offset,
2018 dirh.inode_number + dire->inode_number);
2020 inode = (msblk->iget)(i->i_sb, ino);
2029 d_add(dentry, inode);
2033 ERROR("Unable to read directory block [%llx:%x]\n", next_block,
2039 static void squashfs_put_super(struct super_block *s)
2044 struct squashfs_sb_info *sbi = s->s_fs_info;
2045 if (sbi->block_cache)
2046 for (i = 0; i < SQUASHFS_CACHED_BLKS; i++)
2047 if (sbi->block_cache[i].block !=
2048 SQUASHFS_INVALID_BLK)
2049 kfree(sbi->block_cache[i].data);
2051 for (i = 0; i < SQUASHFS_CACHED_FRAGMENTS; i++)
2052 SQUASHFS_FREE(sbi->fragment[i].data);
2053 kfree(sbi->fragment);
2054 kfree(sbi->block_cache);
2055 kfree(sbi->read_data);
2056 kfree(sbi->read_page);
2058 kfree(sbi->fragment_index);
2059 kfree(sbi->fragment_index_2);
2060 kfree(sbi->meta_index);
2061 vfree(sbi->stream.workspace);
2062 kfree(s->s_fs_info);
2063 s->s_fs_info = NULL;
2068 static int squashfs_get_sb(struct file_system_type *fs_type,
2069 int flags, const char *dev_name, void *data, struct vfsmount *mnt)
2071 return get_sb_bdev(fs_type, flags, dev_name, data, squashfs_fill_super, mnt);
2075 static int __init init_squashfs_fs(void)
2077 int err = init_inodecache();
2081 printk(KERN_INFO "squashfs: version 3.1 (2006/08/09) "
2082 "Phillip Lougher\n");
2084 if ((err = register_filesystem(&squashfs_fs_type)))
2085 destroy_inodecache();
2092 static void __exit exit_squashfs_fs(void)
2094 unregister_filesystem(&squashfs_fs_type);
2095 destroy_inodecache();
2099 static kmem_cache_t * squashfs_inode_cachep;
2102 static struct inode *squashfs_alloc_inode(struct super_block *sb)
2104 struct squashfs_inode_info *ei;
2105 ei = kmem_cache_alloc(squashfs_inode_cachep, SLAB_KERNEL);
2108 return &ei->vfs_inode;
2112 static void squashfs_destroy_inode(struct inode *inode)
2114 kmem_cache_free(squashfs_inode_cachep, SQUASHFS_I(inode));
2118 static void init_once(void * foo, kmem_cache_t * cachep, unsigned long flags)
2120 struct squashfs_inode_info *ei = foo;
2122 if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) ==
2123 SLAB_CTOR_CONSTRUCTOR)
2124 inode_init_once(&ei->vfs_inode);
2128 static int __init init_inodecache(void)
2130 squashfs_inode_cachep = kmem_cache_create("squashfs_inode_cache",
2131 sizeof(struct squashfs_inode_info),
2132 0, SLAB_HWCACHE_ALIGN|SLAB_RECLAIM_ACCOUNT,
2134 if (squashfs_inode_cachep == NULL)
2140 static void destroy_inodecache(void)
2142 if (kmem_cache_destroy(squashfs_inode_cachep))
2143 printk(KERN_INFO "squashfs_inode_cache: not all structures "
2148 module_init(init_squashfs_fs);
2149 module_exit(exit_squashfs_fs);
2150 MODULE_DESCRIPTION("squashfs, a compressed read-only filesystem");
2151 MODULE_AUTHOR("Phillip Lougher <phillip@lougher.org.uk>");
2152 MODULE_LICENSE("GPL");