2 * JFFS2 -- Journalling Flash File System, Version 2.
4 * Copyright (C) 2001-2003 Red Hat, Inc.
6 * Created by David Woodhouse <dwmw2@redhat.com>
8 * For licensing information, see the file 'LICENCE' in this directory.
10 * $Id: nodemgmt.c,v 1.107 2003/11/26 15:30:58 dwmw2 Exp $
14 #include <linux/kernel.h>
15 #include <linux/slab.h>
16 #include <linux/mtd/mtd.h>
17 #include <linux/compiler.h>
18 #include <linux/sched.h> /* For cond_resched() */
22 * jffs2_reserve_space - request physical space to write nodes to flash
24 * @minsize: Minimum acceptable size of allocation
25 * @ofs: Returned value of node offset
26 * @len: Returned value of allocation length
27 * @prio: Allocation type - ALLOC_{NORMAL,DELETION}
29 * Requests a block of physical space on the flash. Returns zero for success
30 * and puts 'ofs' and 'len' into the appriopriate place, or returns -ENOSPC
31 * or other error if appropriate.
33 * If it returns zero, jffs2_reserve_space() also downs the per-filesystem
34 * allocation semaphore, to prevent more than one allocation from being
35 * active at any time. The semaphore is later released by jffs2_commit_allocation()
37 * jffs2_reserve_space() may trigger garbage collection in order to make room
38 * for the requested allocation.
41 static int jffs2_do_reserve_space(struct jffs2_sb_info *c, uint32_t minsize, uint32_t *ofs, uint32_t *len);
43 int jffs2_reserve_space(struct jffs2_sb_info *c, uint32_t minsize, uint32_t *ofs, uint32_t *len, int prio)
46 int blocksneeded = c->resv_blocks_write;
48 minsize = PAD(minsize);
50 D1(printk(KERN_DEBUG "jffs2_reserve_space(): Requested 0x%x bytes\n", minsize));
53 D1(printk(KERN_DEBUG "jffs2_reserve_space(): alloc sem got\n"));
55 spin_lock(&c->erase_completion_lock);
57 /* this needs a little more thought (true <tglx> :)) */
58 while(ret == -EAGAIN) {
59 while(c->nr_free_blocks + c->nr_erasing_blocks < blocksneeded) {
61 uint32_t dirty, avail;
63 /* calculate real dirty size
64 * dirty_size contains blocks on erase_pending_list
65 * those blocks are counted in c->nr_erasing_blocks.
66 * If one block is actually erased, it is not longer counted as dirty_space
67 * but it is counted in c->nr_erasing_blocks, so we add it and subtract it
68 * with c->nr_erasing_blocks * c->sector_size again.
69 * Blocks on erasable_list are counted as dirty_size, but not in c->nr_erasing_blocks
70 * This helps us to force gc and pick eventually a clean block to spread the load.
71 * We add unchecked_size here, as we hopefully will find some space to use.
72 * This will affect the sum only once, as gc first finishes checking
75 dirty = c->dirty_size + c->erasing_size - c->nr_erasing_blocks * c->sector_size + c->unchecked_size;
76 if (dirty < c->nospc_dirty_size) {
77 if (prio == ALLOC_DELETION && c->nr_free_blocks + c->nr_erasing_blocks >= c->resv_blocks_deletion) {
78 printk(KERN_NOTICE "jffs2_reserve_space(): Low on dirty space to GC, but it's a deletion. Allowing...\n");
81 D1(printk(KERN_DEBUG "dirty size 0x%08x + unchecked_size 0x%08x < nospc_dirty_size 0x%08x, returning -ENOSPC\n",
82 dirty, c->unchecked_size, c->sector_size));
84 spin_unlock(&c->erase_completion_lock);
89 /* Calc possibly available space. Possibly available means that we
90 * don't know, if unchecked size contains obsoleted nodes, which could give us some
91 * more usable space. This will affect the sum only once, as gc first finishes checking
93 + Return -ENOSPC, if the maximum possibly available space is less or equal than
94 * blocksneeded * sector_size.
95 * This blocks endless gc looping on a filesystem, which is nearly full, even if
96 * the check above passes.
98 avail = c->free_size + c->dirty_size + c->erasing_size + c->unchecked_size;
99 if ( (avail / c->sector_size) <= blocksneeded) {
100 if (prio == ALLOC_DELETION && c->nr_free_blocks + c->nr_erasing_blocks >= c->resv_blocks_deletion) {
101 printk(KERN_NOTICE "jffs2_reserve_space(): Low on possibly available space, but it's a deletion. Allowing...\n");
105 D1(printk(KERN_DEBUG "max. available size 0x%08x < blocksneeded * sector_size 0x%08x, returning -ENOSPC\n",
106 avail, blocksneeded * c->sector_size));
107 spin_unlock(&c->erase_completion_lock);
114 D1(printk(KERN_DEBUG "Triggering GC pass. nr_free_blocks %d, nr_erasing_blocks %d, free_size 0x%08x, dirty_size 0x%08x, wasted_size 0x%08x, used_size 0x%08x, erasing_size 0x%08x, bad_size 0x%08x (total 0x%08x of 0x%08x)\n",
115 c->nr_free_blocks, c->nr_erasing_blocks, c->free_size, c->dirty_size, c->wasted_size, c->used_size, c->erasing_size, c->bad_size,
116 c->free_size + c->dirty_size + c->wasted_size + c->used_size + c->erasing_size + c->bad_size, c->flash_size));
117 spin_unlock(&c->erase_completion_lock);
119 ret = jffs2_garbage_collect_pass(c);
125 if (signal_pending(current))
129 spin_lock(&c->erase_completion_lock);
132 ret = jffs2_do_reserve_space(c, minsize, ofs, len);
134 D1(printk(KERN_DEBUG "jffs2_reserve_space: ret is %d\n", ret));
137 spin_unlock(&c->erase_completion_lock);
143 int jffs2_reserve_space_gc(struct jffs2_sb_info *c, uint32_t minsize, uint32_t *ofs, uint32_t *len)
146 minsize = PAD(minsize);
148 D1(printk(KERN_DEBUG "jffs2_reserve_space_gc(): Requested 0x%x bytes\n", minsize));
150 spin_lock(&c->erase_completion_lock);
151 while(ret == -EAGAIN) {
152 ret = jffs2_do_reserve_space(c, minsize, ofs, len);
154 D1(printk(KERN_DEBUG "jffs2_reserve_space_gc: looping, ret is %d\n", ret));
157 spin_unlock(&c->erase_completion_lock);
161 /* Called with alloc sem _and_ erase_completion_lock */
162 static int jffs2_do_reserve_space(struct jffs2_sb_info *c, uint32_t minsize, uint32_t *ofs, uint32_t *len)
164 struct jffs2_eraseblock *jeb = c->nextblock;
167 if (jeb && minsize > jeb->free_size) {
168 /* Skip the end of this block and file it as having some dirty space */
169 /* If there's a pending write to it, flush now */
170 if (jffs2_wbuf_dirty(c)) {
171 spin_unlock(&c->erase_completion_lock);
172 D1(printk(KERN_DEBUG "jffs2_do_reserve_space: Flushing write buffer\n"));
173 jffs2_flush_wbuf_pad(c);
174 spin_lock(&c->erase_completion_lock);
178 c->wasted_size += jeb->free_size;
179 c->free_size -= jeb->free_size;
180 jeb->wasted_size += jeb->free_size;
183 /* Check, if we have a dirty block now, or if it was dirty already */
184 if (ISDIRTY (jeb->wasted_size + jeb->dirty_size)) {
185 c->dirty_size += jeb->wasted_size;
186 c->wasted_size -= jeb->wasted_size;
187 jeb->dirty_size += jeb->wasted_size;
188 jeb->wasted_size = 0;
189 if (VERYDIRTY(c, jeb->dirty_size)) {
190 D1(printk(KERN_DEBUG "Adding full erase block at 0x%08x to very_dirty_list (free 0x%08x, dirty 0x%08x, used 0x%08x\n",
191 jeb->offset, jeb->free_size, jeb->dirty_size, jeb->used_size));
192 list_add_tail(&jeb->list, &c->very_dirty_list);
194 D1(printk(KERN_DEBUG "Adding full erase block at 0x%08x to dirty_list (free 0x%08x, dirty 0x%08x, used 0x%08x\n",
195 jeb->offset, jeb->free_size, jeb->dirty_size, jeb->used_size));
196 list_add_tail(&jeb->list, &c->dirty_list);
199 D1(printk(KERN_DEBUG "Adding full erase block at 0x%08x to clean_list (free 0x%08x, dirty 0x%08x, used 0x%08x\n",
200 jeb->offset, jeb->free_size, jeb->dirty_size, jeb->used_size));
201 list_add_tail(&jeb->list, &c->clean_list);
203 c->nextblock = jeb = NULL;
207 struct list_head *next;
208 /* Take the next block off the 'free' list */
210 if (list_empty(&c->free_list)) {
212 if (!c->nr_erasing_blocks &&
213 !list_empty(&c->erasable_list)) {
214 struct jffs2_eraseblock *ejeb;
216 ejeb = list_entry(c->erasable_list.next, struct jffs2_eraseblock, list);
217 list_del(&ejeb->list);
218 list_add_tail(&ejeb->list, &c->erase_pending_list);
219 c->nr_erasing_blocks++;
220 jffs2_erase_pending_trigger(c);
221 D1(printk(KERN_DEBUG "jffs2_do_reserve_space: Triggering erase of erasable block at 0x%08x\n",
225 if (!c->nr_erasing_blocks &&
226 !list_empty(&c->erasable_pending_wbuf_list)) {
227 D1(printk(KERN_DEBUG "jffs2_do_reserve_space: Flushing write buffer\n"));
228 /* c->nextblock is NULL, no update to c->nextblock allowed */
229 spin_unlock(&c->erase_completion_lock);
230 jffs2_flush_wbuf_pad(c);
231 spin_lock(&c->erase_completion_lock);
232 /* Have another go. It'll be on the erasable_list now */
236 if (!c->nr_erasing_blocks) {
237 /* Ouch. We're in GC, or we wouldn't have got here.
238 And there's no space left. At all. */
239 printk(KERN_CRIT "Argh. No free space left for GC. nr_erasing_blocks is %d. nr_free_blocks is %d. (erasableempty: %s, erasingempty: %s, erasependingempty: %s)\n",
240 c->nr_erasing_blocks, c->nr_free_blocks, list_empty(&c->erasable_list)?"yes":"no",
241 list_empty(&c->erasing_list)?"yes":"no", list_empty(&c->erase_pending_list)?"yes":"no");
245 spin_unlock(&c->erase_completion_lock);
246 /* Don't wait for it; just erase one right now */
247 jffs2_erase_pending_blocks(c, 1);
248 spin_lock(&c->erase_completion_lock);
250 /* An erase may have failed, decreasing the
251 amount of free space available. So we must
252 restart from the beginning */
256 next = c->free_list.next;
258 c->nextblock = jeb = list_entry(next, struct jffs2_eraseblock, list);
261 if (jeb->free_size != c->sector_size - c->cleanmarker_size) {
262 printk(KERN_WARNING "Eep. Block 0x%08x taken from free_list had free_size of 0x%08x!!\n", jeb->offset, jeb->free_size);
266 /* OK, jeb (==c->nextblock) is now pointing at a block which definitely has
268 *ofs = jeb->offset + (c->sector_size - jeb->free_size);
269 *len = jeb->free_size;
271 if (c->cleanmarker_size && jeb->used_size == c->cleanmarker_size &&
272 !jeb->first_node->next_in_ino) {
273 /* Only node in it beforehand was a CLEANMARKER node (we think).
274 So mark it obsolete now that there's going to be another node
275 in the block. This will reduce used_size to zero but We've
276 already set c->nextblock so that jffs2_mark_node_obsolete()
277 won't try to refile it to the dirty_list.
279 spin_unlock(&c->erase_completion_lock);
280 jffs2_mark_node_obsolete(c, jeb->first_node);
281 spin_lock(&c->erase_completion_lock);
284 D1(printk(KERN_DEBUG "jffs2_do_reserve_space(): Giving 0x%x bytes at 0x%x\n", *len, *ofs));
289 * jffs2_add_physical_node_ref - add a physical node reference to the list
290 * @c: superblock info
291 * @new: new node reference to add
292 * @len: length of this physical node
293 * @dirty: dirty flag for new node
295 * Should only be used to report nodes for which space has been allocated
296 * by jffs2_reserve_space.
298 * Must be called with the alloc_sem held.
301 int jffs2_add_physical_node_ref(struct jffs2_sb_info *c, struct jffs2_raw_node_ref *new)
303 struct jffs2_eraseblock *jeb;
306 jeb = &c->blocks[new->flash_offset / c->sector_size];
307 len = ref_totlen(c, jeb, new);
309 D1(printk(KERN_DEBUG "jffs2_add_physical_node_ref(): Node at 0x%x(%d), size 0x%x\n", ref_offset(new), ref_flags(new), len));
311 if (jeb != c->nextblock || (ref_offset(new)) != jeb->offset + (c->sector_size - jeb->free_size)) {
312 printk(KERN_WARNING "argh. node added in wrong place\n");
313 jffs2_free_raw_node_ref(new);
317 spin_lock(&c->erase_completion_lock);
319 if (!jeb->first_node)
320 jeb->first_node = new;
322 jeb->last_node->next_phys = new;
323 jeb->last_node = new;
325 jeb->free_size -= len;
327 if (ref_obsolete(new)) {
328 jeb->dirty_size += len;
329 c->dirty_size += len;
331 jeb->used_size += len;
335 if (!jeb->free_size && !jeb->dirty_size) {
336 /* If it lives on the dirty_list, jffs2_reserve_space will put it there */
337 D1(printk(KERN_DEBUG "Adding full erase block at 0x%08x to clean_list (free 0x%08x, dirty 0x%08x, used 0x%08x\n",
338 jeb->offset, jeb->free_size, jeb->dirty_size, jeb->used_size));
339 if (jffs2_wbuf_dirty(c)) {
340 /* Flush the last write in the block if it's outstanding */
341 spin_unlock(&c->erase_completion_lock);
342 jffs2_flush_wbuf_pad(c);
343 spin_lock(&c->erase_completion_lock);
346 list_add_tail(&jeb->list, &c->clean_list);
349 ACCT_SANITY_CHECK(c,jeb);
350 D1(ACCT_PARANOIA_CHECK(jeb));
352 spin_unlock(&c->erase_completion_lock);
358 void jffs2_complete_reservation(struct jffs2_sb_info *c)
360 D1(printk(KERN_DEBUG "jffs2_complete_reservation()\n"));
361 jffs2_garbage_collect_trigger(c);
365 static inline int on_list(struct list_head *obj, struct list_head *head)
367 struct list_head *this;
369 list_for_each(this, head) {
371 D1(printk("%p is on list at %p\n", obj, head));
379 void jffs2_mark_node_obsolete(struct jffs2_sb_info *c, struct jffs2_raw_node_ref *ref)
381 struct jffs2_eraseblock *jeb;
383 struct jffs2_unknown_node n;
388 printk(KERN_NOTICE "EEEEEK. jffs2_mark_node_obsolete called with NULL node\n");
391 if (ref_obsolete(ref)) {
392 D1(printk(KERN_DEBUG "jffs2_mark_node_obsolete called with already obsolete node at 0x%08x\n", ref_offset(ref)));
395 blocknr = ref->flash_offset / c->sector_size;
396 if (blocknr >= c->nr_blocks) {
397 printk(KERN_NOTICE "raw node at 0x%08x is off the end of device!\n", ref->flash_offset);
400 jeb = &c->blocks[blocknr];
402 spin_lock(&c->erase_completion_lock);
404 if (ref_flags(ref) == REF_UNCHECKED) {
405 D1(if (unlikely(jeb->unchecked_size < ref_totlen(c, jeb, ref))) {
406 printk(KERN_NOTICE "raw unchecked node of size 0x%08x freed from erase block %d at 0x%08x, but unchecked_size was already 0x%08x\n",
407 ref_totlen(c, jeb, ref), blocknr, ref->flash_offset, jeb->used_size);
410 D1(printk(KERN_DEBUG "Obsoleting previously unchecked node at 0x%08x of len %x: ", ref_offset(ref), ref_totlen(c, jeb, ref)));
411 jeb->unchecked_size -= ref_totlen(c, jeb, ref);
412 c->unchecked_size -= ref_totlen(c, jeb, ref);
414 D1(if (unlikely(jeb->used_size < ref_totlen(c, jeb, ref))) {
415 printk(KERN_NOTICE "raw node of size 0x%08x freed from erase block %d at 0x%08x, but used_size was already 0x%08x\n",
416 ref_totlen(c, jeb, ref), blocknr, ref->flash_offset, jeb->used_size);
419 D1(printk(KERN_DEBUG "Obsoleting node at 0x%08x of len %x: ", ref_offset(ref), ref_totlen(c, jeb, ref)));
420 jeb->used_size -= ref_totlen(c, jeb, ref);
421 c->used_size -= ref_totlen(c, jeb, ref);
424 // Take care, that wasted size is taken into concern
425 if ((jeb->dirty_size || ISDIRTY(jeb->wasted_size + ref_totlen(c, jeb, ref))) && jeb != c->nextblock) {
426 D1(printk("Dirtying\n"));
427 addedsize = ref_totlen(c, jeb, ref);
428 jeb->dirty_size += ref_totlen(c, jeb, ref);
429 c->dirty_size += ref_totlen(c, jeb, ref);
431 /* Convert wasted space to dirty, if not a bad block */
432 if (jeb->wasted_size) {
433 if (on_list(&jeb->list, &c->bad_used_list)) {
434 D1(printk(KERN_DEBUG "Leaving block at %08x on the bad_used_list\n",
436 addedsize = 0; /* To fool the refiling code later */
438 D1(printk(KERN_DEBUG "Converting %d bytes of wasted space to dirty in block at %08x\n",
439 jeb->wasted_size, jeb->offset));
440 addedsize += jeb->wasted_size;
441 jeb->dirty_size += jeb->wasted_size;
442 c->dirty_size += jeb->wasted_size;
443 c->wasted_size -= jeb->wasted_size;
444 jeb->wasted_size = 0;
448 D1(printk("Wasting\n"));
450 jeb->wasted_size += ref_totlen(c, jeb, ref);
451 c->wasted_size += ref_totlen(c, jeb, ref);
453 ref->flash_offset = ref_offset(ref) | REF_OBSOLETE;
455 ACCT_SANITY_CHECK(c, jeb);
457 D1(ACCT_PARANOIA_CHECK(jeb));
459 if (c->flags & JFFS2_SB_FLAG_MOUNTING) {
460 /* Mount in progress. Don't muck about with the block
461 lists because they're not ready yet, and don't actually
462 obliterate nodes that look obsolete. If they weren't
463 marked obsolete on the flash at the time they _became_
464 obsolete, there was probably a reason for that. */
465 spin_unlock(&c->erase_completion_lock);
469 if (jeb == c->nextblock) {
470 D2(printk(KERN_DEBUG "Not moving nextblock 0x%08x to dirty/erase_pending list\n", jeb->offset));
471 } else if (!jeb->used_size && !jeb->unchecked_size) {
472 if (jeb == c->gcblock) {
473 D1(printk(KERN_DEBUG "gcblock at 0x%08x completely dirtied. Clearing gcblock...\n", jeb->offset));
476 D1(printk(KERN_DEBUG "Eraseblock at 0x%08x completely dirtied. Removing from (dirty?) list...\n", jeb->offset));
477 list_del(&jeb->list);
479 if (jffs2_wbuf_dirty(c)) {
480 D1(printk(KERN_DEBUG "...and adding to erasable_pending_wbuf_list\n"));
481 list_add_tail(&jeb->list, &c->erasable_pending_wbuf_list);
484 /* Most of the time, we just erase it immediately. Otherwise we
485 spend ages scanning it on mount, etc. */
486 D1(printk(KERN_DEBUG "...and adding to erase_pending_list\n"));
487 list_add_tail(&jeb->list, &c->erase_pending_list);
488 c->nr_erasing_blocks++;
489 jffs2_erase_pending_trigger(c);
491 /* Sometimes, however, we leave it elsewhere so it doesn't get
492 immediately reused, and we spread the load a bit. */
493 D1(printk(KERN_DEBUG "...and adding to erasable_list\n"));
494 list_add_tail(&jeb->list, &c->erasable_list);
497 D1(printk(KERN_DEBUG "Done OK\n"));
498 } else if (jeb == c->gcblock) {
499 D2(printk(KERN_DEBUG "Not moving gcblock 0x%08x to dirty_list\n", jeb->offset));
500 } else if (ISDIRTY(jeb->dirty_size) && !ISDIRTY(jeb->dirty_size - addedsize)) {
501 D1(printk(KERN_DEBUG "Eraseblock at 0x%08x is freshly dirtied. Removing from clean list...\n", jeb->offset));
502 list_del(&jeb->list);
503 D1(printk(KERN_DEBUG "...and adding to dirty_list\n"));
504 list_add_tail(&jeb->list, &c->dirty_list);
505 } else if (VERYDIRTY(c, jeb->dirty_size) &&
506 !VERYDIRTY(c, jeb->dirty_size - addedsize)) {
507 D1(printk(KERN_DEBUG "Eraseblock at 0x%08x is now very dirty. Removing from dirty list...\n", jeb->offset));
508 list_del(&jeb->list);
509 D1(printk(KERN_DEBUG "...and adding to very_dirty_list\n"));
510 list_add_tail(&jeb->list, &c->very_dirty_list);
512 D1(printk(KERN_DEBUG "Eraseblock at 0x%08x not moved anywhere. (free 0x%08x, dirty 0x%08x, used 0x%08x)\n",
513 jeb->offset, jeb->free_size, jeb->dirty_size, jeb->used_size));
516 spin_unlock(&c->erase_completion_lock);
518 if (!jffs2_can_mark_obsolete(c))
520 if (jffs2_is_readonly(c))
523 D1(printk(KERN_DEBUG "obliterating obsoleted node at 0x%08x\n", ref_offset(ref)));
524 ret = jffs2_flash_read(c, ref_offset(ref), sizeof(n), &retlen, (char *)&n);
526 printk(KERN_WARNING "Read error reading from obsoleted node at 0x%08x: %d\n", ref_offset(ref), ret);
529 if (retlen != sizeof(n)) {
530 printk(KERN_WARNING "Short read from obsoleted node at 0x%08x: %zd\n", ref_offset(ref), retlen);
533 if (PAD(je32_to_cpu(n.totlen)) != PAD(ref_totlen(c, jeb, ref))) {
534 printk(KERN_WARNING "Node totlen on flash (0x%08x) != totlen from node ref (0x%08x)\n", je32_to_cpu(n.totlen), ref_totlen(c, jeb, ref));
537 if (!(je16_to_cpu(n.nodetype) & JFFS2_NODE_ACCURATE)) {
538 D1(printk(KERN_DEBUG "Node at 0x%08x was already marked obsolete (nodetype 0x%04x)\n", ref_offset(ref), je16_to_cpu(n.nodetype)));
541 /* XXX FIXME: This is ugly now */
542 n.nodetype = cpu_to_je16(je16_to_cpu(n.nodetype) & ~JFFS2_NODE_ACCURATE);
543 ret = jffs2_flash_write(c, ref_offset(ref), sizeof(n), &retlen, (char *)&n);
545 printk(KERN_WARNING "Write error in obliterating obsoleted node at 0x%08x: %d\n", ref_offset(ref), ret);
548 if (retlen != sizeof(n)) {
549 printk(KERN_WARNING "Short write in obliterating obsoleted node at 0x%08x: %zd\n", ref_offset(ref), retlen);
554 #if CONFIG_JFFS2_FS_DEBUG > 0
555 void jffs2_dump_block_lists(struct jffs2_sb_info *c)
559 printk(KERN_DEBUG "jffs2_dump_block_lists:\n");
560 printk(KERN_DEBUG "flash_size: %08x\n", c->flash_size);
561 printk(KERN_DEBUG "used_size: %08x\n", c->used_size);
562 printk(KERN_DEBUG "dirty_size: %08x\n", c->dirty_size);
563 printk(KERN_DEBUG "wasted_size: %08x\n", c->wasted_size);
564 printk(KERN_DEBUG "unchecked_size: %08x\n", c->unchecked_size);
565 printk(KERN_DEBUG "free_size: %08x\n", c->free_size);
566 printk(KERN_DEBUG "erasing_size: %08x\n", c->erasing_size);
567 printk(KERN_DEBUG "bad_size: %08x\n", c->bad_size);
568 printk(KERN_DEBUG "sector_size: %08x\n", c->sector_size);
569 printk(KERN_DEBUG "jffs2_reserved_blocks size: %08x\n",c->sector_size * c->resv_blocks_write);
572 printk(KERN_DEBUG "nextblock: %08x (used %08x, dirty %08x, wasted %08x, unchecked %08x, free %08x)\n",
573 c->nextblock->offset, c->nextblock->used_size, c->nextblock->dirty_size, c->nextblock->wasted_size, c->nextblock->unchecked_size, c->nextblock->free_size);
575 printk(KERN_DEBUG "nextblock: NULL\n");
578 printk(KERN_DEBUG "gcblock: %08x (used %08x, dirty %08x, wasted %08x, unchecked %08x, free %08x)\n",
579 c->gcblock->offset, c->gcblock->used_size, c->gcblock->dirty_size, c->gcblock->wasted_size, c->gcblock->unchecked_size, c->gcblock->free_size);
581 printk(KERN_DEBUG "gcblock: NULL\n");
583 if (list_empty(&c->clean_list)) {
584 printk(KERN_DEBUG "clean_list: empty\n");
586 struct list_head *this;
590 list_for_each(this, &c->clean_list) {
591 struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list);
593 dirty += jeb->wasted_size;
594 printk(KERN_DEBUG "clean_list: %08x (used %08x, dirty %08x, wasted %08x, unchecked %08x, free %08x)\n", jeb->offset, jeb->used_size, jeb->dirty_size, jeb->wasted_size, jeb->unchecked_size, jeb->free_size);
596 printk (KERN_DEBUG "Contains %d blocks with total wasted size %u, average wasted size: %u\n", numblocks, dirty, dirty / numblocks);
598 if (list_empty(&c->very_dirty_list)) {
599 printk(KERN_DEBUG "very_dirty_list: empty\n");
601 struct list_head *this;
605 list_for_each(this, &c->very_dirty_list) {
606 struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list);
608 dirty += jeb->dirty_size;
609 printk(KERN_DEBUG "very_dirty_list: %08x (used %08x, dirty %08x, wasted %08x, unchecked %08x, free %08x)\n",
610 jeb->offset, jeb->used_size, jeb->dirty_size, jeb->wasted_size, jeb->unchecked_size, jeb->free_size);
612 printk (KERN_DEBUG "Contains %d blocks with total dirty size %u, average dirty size: %u\n",
613 numblocks, dirty, dirty / numblocks);
615 if (list_empty(&c->dirty_list)) {
616 printk(KERN_DEBUG "dirty_list: empty\n");
618 struct list_head *this;
622 list_for_each(this, &c->dirty_list) {
623 struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list);
625 dirty += jeb->dirty_size;
626 printk(KERN_DEBUG "dirty_list: %08x (used %08x, dirty %08x, wasted %08x, unchecked %08x, free %08x)\n",
627 jeb->offset, jeb->used_size, jeb->dirty_size, jeb->wasted_size, jeb->unchecked_size, jeb->free_size);
629 printk (KERN_DEBUG "Contains %d blocks with total dirty size %u, average dirty size: %u\n",
630 numblocks, dirty, dirty / numblocks);
632 if (list_empty(&c->erasable_list)) {
633 printk(KERN_DEBUG "erasable_list: empty\n");
635 struct list_head *this;
637 list_for_each(this, &c->erasable_list) {
638 struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list);
639 printk(KERN_DEBUG "erasable_list: %08x (used %08x, dirty %08x, wasted %08x, unchecked %08x, free %08x)\n",
640 jeb->offset, jeb->used_size, jeb->dirty_size, jeb->wasted_size, jeb->unchecked_size, jeb->free_size);
643 if (list_empty(&c->erasing_list)) {
644 printk(KERN_DEBUG "erasing_list: empty\n");
646 struct list_head *this;
648 list_for_each(this, &c->erasing_list) {
649 struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list);
650 printk(KERN_DEBUG "erasing_list: %08x (used %08x, dirty %08x, wasted %08x, unchecked %08x, free %08x)\n",
651 jeb->offset, jeb->used_size, jeb->dirty_size, jeb->wasted_size, jeb->unchecked_size, jeb->free_size);
654 if (list_empty(&c->erase_pending_list)) {
655 printk(KERN_DEBUG "erase_pending_list: empty\n");
657 struct list_head *this;
659 list_for_each(this, &c->erase_pending_list) {
660 struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list);
661 printk(KERN_DEBUG "erase_pending_list: %08x (used %08x, dirty %08x, wasted %08x, unchecked %08x, free %08x)\n",
662 jeb->offset, jeb->used_size, jeb->dirty_size, jeb->wasted_size, jeb->unchecked_size, jeb->free_size);
665 if (list_empty(&c->erasable_pending_wbuf_list)) {
666 printk(KERN_DEBUG "erasable_pending_wbuf_list: empty\n");
668 struct list_head *this;
670 list_for_each(this, &c->erasable_pending_wbuf_list) {
671 struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list);
672 printk(KERN_DEBUG "erasable_pending_wbuf_list: %08x (used %08x, dirty %08x, wasted %08x, unchecked %08x, free %08x)\n",
673 jeb->offset, jeb->used_size, jeb->dirty_size, jeb->wasted_size, jeb->unchecked_size, jeb->free_size);
676 if (list_empty(&c->free_list)) {
677 printk(KERN_DEBUG "free_list: empty\n");
679 struct list_head *this;
681 list_for_each(this, &c->free_list) {
682 struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list);
683 printk(KERN_DEBUG "free_list: %08x (used %08x, dirty %08x, wasted %08x, unchecked %08x, free %08x)\n",
684 jeb->offset, jeb->used_size, jeb->dirty_size, jeb->wasted_size, jeb->unchecked_size, jeb->free_size);
687 if (list_empty(&c->bad_list)) {
688 printk(KERN_DEBUG "bad_list: empty\n");
690 struct list_head *this;
692 list_for_each(this, &c->bad_list) {
693 struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list);
694 printk(KERN_DEBUG "bad_list: %08x (used %08x, dirty %08x, wasted %08x, unchecked %08x, free %08x)\n",
695 jeb->offset, jeb->used_size, jeb->dirty_size, jeb->wasted_size, jeb->unchecked_size, jeb->free_size);
698 if (list_empty(&c->bad_used_list)) {
699 printk(KERN_DEBUG "bad_used_list: empty\n");
701 struct list_head *this;
703 list_for_each(this, &c->bad_used_list) {
704 struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list);
705 printk(KERN_DEBUG "bad_used_list: %08x (used %08x, dirty %08x, wasted %08x, unchecked %08x, free %08x)\n",
706 jeb->offset, jeb->used_size, jeb->dirty_size, jeb->wasted_size, jeb->unchecked_size, jeb->free_size);
710 #endif /* CONFIG_JFFS2_FS_DEBUG */
712 int jffs2_thread_should_wake(struct jffs2_sb_info *c)
717 if (c->unchecked_size) {
718 D1(printk(KERN_DEBUG "jffs2_thread_should_wake(): unchecked_size %d, checked_ino #%d\n",
719 c->unchecked_size, c->checked_ino));
723 /* dirty_size contains blocks on erase_pending_list
724 * those blocks are counted in c->nr_erasing_blocks.
725 * If one block is actually erased, it is not longer counted as dirty_space
726 * but it is counted in c->nr_erasing_blocks, so we add it and subtract it
727 * with c->nr_erasing_blocks * c->sector_size again.
728 * Blocks on erasable_list are counted as dirty_size, but not in c->nr_erasing_blocks
729 * This helps us to force gc and pick eventually a clean block to spread the load.
731 dirty = c->dirty_size + c->erasing_size - c->nr_erasing_blocks * c->sector_size;
733 if (c->nr_free_blocks + c->nr_erasing_blocks < c->resv_blocks_gctrigger &&
734 (dirty > c->nospc_dirty_size))
737 D1(printk(KERN_DEBUG "jffs2_thread_should_wake(): nr_free_blocks %d, nr_erasing_blocks %d, dirty_size 0x%x: %s\n",
738 c->nr_free_blocks, c->nr_erasing_blocks, c->dirty_size, ret?"yes":"no"));