2 * JFFS2 -- Journalling Flash File System, Version 2.
4 * Copyright (C) 2001-2003 Red Hat, Inc.
6 * Created by David Woodhouse <dwmw2@redhat.com>
8 * For licensing information, see the file 'LICENCE' in this directory.
10 * $Id: build.c,v 1.52 2003/10/09 00:38:38 dwmw2 Exp $
14 #include <linux/kernel.h>
15 #include <linux/sched.h>
16 #include <linux/slab.h>
19 int jffs2_build_inode_pass1(struct jffs2_sb_info *, struct jffs2_inode_cache *);
20 int jffs2_build_remove_unlinked_inode(struct jffs2_sb_info *, struct jffs2_inode_cache *);
22 static inline struct jffs2_inode_cache *
23 first_inode_chain(int *i, struct jffs2_sb_info *c)
25 for (; *i < INOCACHE_HASHSIZE; (*i)++) {
26 if (c->inocache_list[*i])
27 return c->inocache_list[*i];
32 static inline struct jffs2_inode_cache *
33 next_inode(int *i, struct jffs2_inode_cache *ic, struct jffs2_sb_info *c)
35 /* More in this chain? */
39 return first_inode_chain(i, c);
42 #define for_each_inode(i, c, ic) \
43 for (i = 0, ic = first_inode_chain(&i, (c)); \
45 ic = next_inode(&i, ic, (c)))
48 - Scan physical nodes. Build map of inodes/dirents. Allocate inocaches as we go
49 - Scan directory tree from top down, setting nlink in inocaches
50 - Scan inocaches for inodes with nlink==0
52 static int jffs2_build_filesystem(struct jffs2_sb_info *c)
56 struct jffs2_inode_cache *ic;
58 /* First, scan the medium and build all the inode caches with
59 lists of physical nodes */
61 c->flags |= JFFS2_SB_FLAG_MOUNTING;
62 ret = jffs2_scan_medium(c);
63 c->flags &= ~JFFS2_SB_FLAG_MOUNTING;
68 D1(printk(KERN_DEBUG "Scanned flash completely\n"));
69 D1(jffs2_dump_block_lists(c));
71 /* Now scan the directory tree, increasing nlink according to every dirent found. */
72 for_each_inode(i, c, ic) {
73 D1(printk(KERN_DEBUG "Pass 1: ino #%u\n", ic->ino));
74 ret = jffs2_build_inode_pass1(c, ic);
76 D1(printk(KERN_WARNING "Eep. jffs2_build_inode_pass1 for ino %d returned %d\n", ic->ino, ret));
81 D1(printk(KERN_DEBUG "Pass 1 complete\n"));
82 D1(jffs2_dump_block_lists(c));
84 /* Next, scan for inodes with nlink == 0 and remove them. If
85 they were directories, then decrement the nlink of their
86 children too, and repeat the scan. As that's going to be
87 a fairly uncommon occurrence, it's not so evil to do it this
88 way. Recursion bad. */
90 D1(printk(KERN_DEBUG "Pass 2 (re)starting\n"));
92 for_each_inode(i, c, ic) {
93 D1(printk(KERN_DEBUG "Pass 2: ino #%u, nlink %d, ic %p, nodes %p\n", ic->ino, ic->nlink, ic, ic->nodes));
97 /* XXX: Can get high latency here. Move the cond_resched() from the end of the loop? */
99 ret = jffs2_build_remove_unlinked_inode(c, ic);
102 /* -EAGAIN means the inode's nlink was zero, so we deleted it,
103 and furthermore that it had children and their nlink has now
104 gone to zero too. So we have to restart the scan. */
106 D1(jffs2_dump_block_lists(c));
110 } while(ret == -EAGAIN);
112 D1(printk(KERN_DEBUG "Pass 2 complete\n"));
114 /* Finally, we can scan again and free the dirent nodes and scan_info structs */
115 for_each_inode(i, c, ic) {
116 struct jffs2_full_dirent *fd;
117 D1(printk(KERN_DEBUG "Pass 3: ino #%u, ic %p, nodes %p\n", ic->ino, ic, ic->nodes));
119 while(ic->scan_dents) {
121 ic->scan_dents = fd->next;
122 jffs2_free_full_dirent(fd);
124 ic->scan_dents = NULL;
127 D1(printk(KERN_DEBUG "Pass 3 complete\n"));
128 D1(jffs2_dump_block_lists(c));
130 /* Rotate the lists by some number to ensure wear levelling */
131 jffs2_rotate_lists(c);
136 int jffs2_build_inode_pass1(struct jffs2_sb_info *c, struct jffs2_inode_cache *ic)
138 struct jffs2_full_dirent *fd;
140 D1(printk(KERN_DEBUG "jffs2_build_inode building inode #%u\n", ic->ino));
142 if (ic->ino > c->highest_ino)
143 c->highest_ino = ic->ino;
145 /* For each child, increase nlink */
146 for(fd=ic->scan_dents; fd; fd = fd->next) {
147 struct jffs2_inode_cache *child_ic;
151 /* XXX: Can get high latency here with huge directories */
153 child_ic = jffs2_get_ino_cache(c, fd->ino);
155 printk(KERN_NOTICE "Eep. Child \"%s\" (ino #%u) of dir ino #%u doesn't exist!\n",
156 fd->name, fd->ino, ic->ino);
160 if (child_ic->nlink++ && fd->type == DT_DIR) {
161 printk(KERN_NOTICE "Child dir \"%s\" (ino #%u) of dir ino #%u appears to be a hard link\n", fd->name, fd->ino, ic->ino);
162 if (fd->ino == 1 && ic->ino == 1) {
163 printk(KERN_NOTICE "This is mostly harmless, and probably caused by creating a JFFS2 image\n");
164 printk(KERN_NOTICE "using a buggy version of mkfs.jffs2. Use at least v1.17.\n");
166 /* What do we do about it? */
168 D1(printk(KERN_DEBUG "Increased nlink for child \"%s\" (ino #%u)\n", fd->name, fd->ino));
169 /* Can't free them. We might need them in pass 2 */
174 int jffs2_build_remove_unlinked_inode(struct jffs2_sb_info *c, struct jffs2_inode_cache *ic)
176 struct jffs2_raw_node_ref *raw;
177 struct jffs2_full_dirent *fd;
180 D1(printk(KERN_DEBUG "JFFS2: Removing ino #%u with nlink == zero.\n", ic->ino));
182 for (raw = ic->nodes; raw != (void *)ic; raw = raw->next_in_ino) {
183 D1(printk(KERN_DEBUG "obsoleting node at 0x%08x\n", ref_offset(raw)));
184 jffs2_mark_node_obsolete(c, raw);
187 if (ic->scan_dents) {
189 D1(printk(KERN_DEBUG "Inode #%u was a directory which may have children...\n", ic->ino));
191 while(ic->scan_dents) {
192 struct jffs2_inode_cache *child_ic;
195 ic->scan_dents = fd->next;
198 /* It's a deletion dirent. Ignore it */
199 D1(printk(KERN_DEBUG "Child \"%s\" is a deletion dirent, skipping...\n", fd->name));
200 jffs2_free_full_dirent(fd);
205 printk(KERN_NOTICE "Inode #%u was a directory with children - removing those too...\n", ic->ino);
208 D1(printk(KERN_DEBUG "Removing child \"%s\", ino #%u\n",
211 child_ic = jffs2_get_ino_cache(c, fd->ino);
213 printk(KERN_NOTICE "Cannot remove child \"%s\", ino #%u, because it doesn't exist\n", fd->name, fd->ino);
214 jffs2_free_full_dirent(fd);
217 jffs2_free_full_dirent(fd);
224 We don't delete the inocache from the hash list and free it yet.
225 The erase code will do that, when all the nodes are completely gone.
231 static void jffs2_calc_trigger_levels(struct jffs2_sb_info *c)
235 /* Deletion should almost _always_ be allowed. We're fairly
236 buggered once we stop allowing people to delete stuff
237 because there's not enough free space... */
238 c->resv_blocks_deletion = 2;
240 /* Be conservative about how much space we need before we allow writes.
241 On top of that which is required for deletia, require an extra 2%
242 of the medium to be available, for overhead caused by nodes being
243 split across blocks, etc. */
245 size = c->flash_size / 50; /* 2% of flash size */
246 size += c->nr_blocks * 100; /* And 100 bytes per eraseblock */
247 size += c->sector_size - 1; /* ... and round up */
249 c->resv_blocks_write = c->resv_blocks_deletion + (size / c->sector_size);
251 /* When do we let the GC thread run in the background */
253 c->resv_blocks_gctrigger = c->resv_blocks_write + 1;
255 /* When do we allow garbage collection to merge nodes to make
256 long-term progress at the expense of short-term space exhaustion? */
257 c->resv_blocks_gcmerge = c->resv_blocks_deletion + 1;
259 /* When do we allow garbage collection to eat from bad blocks rather
260 than actually making progress? */
261 c->resv_blocks_gcbad = 0;//c->resv_blocks_deletion + 2;
263 /* If there's less than this amount of dirty space, don't bother
264 trying to GC to make more space. It'll be a fruitless task */
265 c->nospc_dirty_size = c->sector_size + (c->flash_size / 100);
267 D1(printk(KERN_DEBUG "JFFS2 trigger levels (size %d KiB, block size %d KiB, %d blocks)\n",
268 c->flash_size / 1024, c->sector_size / 1024, c->nr_blocks));
269 D1(printk(KERN_DEBUG "Blocks required to allow deletion: %d (%d KiB)\n",
270 c->resv_blocks_deletion, c->resv_blocks_deletion*c->sector_size/1024));
271 D1(printk(KERN_DEBUG "Blocks required to allow writes: %d (%d KiB)\n",
272 c->resv_blocks_write, c->resv_blocks_write*c->sector_size/1024));
273 D1(printk(KERN_DEBUG "Blocks required to quiesce GC thread: %d (%d KiB)\n",
274 c->resv_blocks_gctrigger, c->resv_blocks_gctrigger*c->sector_size/1024));
275 D1(printk(KERN_DEBUG "Blocks required to allow GC merges: %d (%d KiB)\n",
276 c->resv_blocks_gcmerge, c->resv_blocks_gcmerge*c->sector_size/1024));
277 D1(printk(KERN_DEBUG "Blocks required to GC bad blocks: %d (%d KiB)\n",
278 c->resv_blocks_gcbad, c->resv_blocks_gcbad*c->sector_size/1024));
279 D1(printk(KERN_DEBUG "Amount of dirty space required to GC: %d bytes\n",
280 c->nospc_dirty_size));
283 int jffs2_do_mount_fs(struct jffs2_sb_info *c)
287 c->free_size = c->flash_size;
288 c->nr_blocks = c->flash_size / c->sector_size;
289 c->blocks = kmalloc(sizeof(struct jffs2_eraseblock) * c->nr_blocks, GFP_KERNEL);
292 for (i=0; i<c->nr_blocks; i++) {
293 INIT_LIST_HEAD(&c->blocks[i].list);
294 c->blocks[i].offset = i * c->sector_size;
295 c->blocks[i].free_size = c->sector_size;
296 c->blocks[i].dirty_size = 0;
297 c->blocks[i].wasted_size = 0;
298 c->blocks[i].unchecked_size = 0;
299 c->blocks[i].used_size = 0;
300 c->blocks[i].first_node = NULL;
301 c->blocks[i].last_node = NULL;
304 init_MUTEX(&c->alloc_sem);
305 init_MUTEX(&c->erase_free_sem);
306 init_waitqueue_head(&c->erase_wait);
307 init_waitqueue_head(&c->inocache_wq);
308 spin_lock_init(&c->erase_completion_lock);
309 spin_lock_init(&c->inocache_lock);
311 INIT_LIST_HEAD(&c->clean_list);
312 INIT_LIST_HEAD(&c->very_dirty_list);
313 INIT_LIST_HEAD(&c->dirty_list);
314 INIT_LIST_HEAD(&c->erasable_list);
315 INIT_LIST_HEAD(&c->erasing_list);
316 INIT_LIST_HEAD(&c->erase_pending_list);
317 INIT_LIST_HEAD(&c->erasable_pending_wbuf_list);
318 INIT_LIST_HEAD(&c->erase_complete_list);
319 INIT_LIST_HEAD(&c->free_list);
320 INIT_LIST_HEAD(&c->bad_list);
321 INIT_LIST_HEAD(&c->bad_used_list);
324 if (jffs2_build_filesystem(c)) {
325 D1(printk(KERN_DEBUG "build_fs failed\n"));
326 jffs2_free_ino_caches(c);
327 jffs2_free_raw_node_refs(c);
332 jffs2_calc_trigger_levels(c);