2 * JFFS2 -- Journalling Flash File System, Version 2.
4 * Copyright (C) 2001-2003 Red Hat, Inc.
6 * Created by David Woodhouse <dwmw2@redhat.com>
8 * For licensing information, see the file 'LICENCE' in this directory.
10 * $Id: nodemgmt.c,v 1.102 2003/10/08 17:21:19 dwmw2 Exp $
14 #include <linux/kernel.h>
15 #include <linux/slab.h>
16 #include <linux/mtd/mtd.h>
17 #include <linux/compiler.h>
18 #include <linux/sched.h> /* For cond_resched() */
22 * jffs2_reserve_space - request physical space to write nodes to flash
24 * @minsize: Minimum acceptable size of allocation
25 * @ofs: Returned value of node offset
26 * @len: Returned value of allocation length
27 * @prio: Allocation type - ALLOC_{NORMAL,DELETION}
29 * Requests a block of physical space on the flash. Returns zero for success
30 * and puts 'ofs' and 'len' into the appriopriate place, or returns -ENOSPC
31 * or other error if appropriate.
33 * If it returns zero, jffs2_reserve_space() also downs the per-filesystem
34 * allocation semaphore, to prevent more than one allocation from being
35 * active at any time. The semaphore is later released by jffs2_commit_allocation()
37 * jffs2_reserve_space() may trigger garbage collection in order to make room
38 * for the requested allocation.
41 static int jffs2_do_reserve_space(struct jffs2_sb_info *c, uint32_t minsize, uint32_t *ofs, uint32_t *len);
43 int jffs2_reserve_space(struct jffs2_sb_info *c, uint32_t minsize, uint32_t *ofs, uint32_t *len, int prio)
46 int blocksneeded = c->resv_blocks_write;
48 minsize = PAD(minsize);
50 D1(printk(KERN_DEBUG "jffs2_reserve_space(): Requested 0x%x bytes\n", minsize));
53 D1(printk(KERN_DEBUG "jffs2_reserve_space(): alloc sem got\n"));
55 spin_lock(&c->erase_completion_lock);
57 /* this needs a little more thought (true <tglx> :)) */
58 while(ret == -EAGAIN) {
59 while(c->nr_free_blocks + c->nr_erasing_blocks < blocksneeded) {
61 uint32_t dirty, avail;
63 /* calculate real dirty size
64 * dirty_size contains blocks on erase_pending_list
65 * those blocks are counted in c->nr_erasing_blocks.
66 * If one block is actually erased, it is not longer counted as dirty_space
67 * but it is counted in c->nr_erasing_blocks, so we add it and subtract it
68 * with c->nr_erasing_blocks * c->sector_size again.
69 * Blocks on erasable_list are counted as dirty_size, but not in c->nr_erasing_blocks
70 * This helps us to force gc and pick eventually a clean block to spread the load.
71 * We add unchecked_size here, as we hopefully will find some space to use.
72 * This will affect the sum only once, as gc first finishes checking
75 dirty = c->dirty_size + c->erasing_size - c->nr_erasing_blocks * c->sector_size + c->unchecked_size;
76 if (dirty < c->nospc_dirty_size) {
77 if (prio == ALLOC_DELETION && c->nr_free_blocks + c->nr_erasing_blocks >= c->resv_blocks_deletion) {
78 printk(KERN_NOTICE "jffs2_reserve_space(): Low on dirty space to GC, but it's a deletion. Allowing...\n");
81 D1(printk(KERN_DEBUG "dirty size 0x%08x + unchecked_size 0x%08x < nospc_dirty_size 0x%08x, returning -ENOSPC\n",
82 dirty, c->unchecked_size, c->sector_size));
84 spin_unlock(&c->erase_completion_lock);
89 /* Calc possibly available space. Possibly available means that we
90 * don't know, if unchecked size contains obsoleted nodes, which could give us some
91 * more usable space. This will affect the sum only once, as gc first finishes checking
93 + Return -ENOSPC, if the maximum possibly available space is less or equal than
94 * blocksneeded * sector_size.
95 * This blocks endless gc looping on a filesystem, which is nearly full, even if
96 * the check above passes.
98 avail = c->free_size + c->dirty_size + c->erasing_size + c->unchecked_size;
99 if ( (avail / c->sector_size) <= blocksneeded) {
100 if (prio == ALLOC_DELETION && c->nr_free_blocks + c->nr_erasing_blocks >= c->resv_blocks_deletion) {
101 printk(KERN_NOTICE "jffs2_reserve_space(): Low on possibly available space, but it's a deletion. Allowing...\n");
105 D1(printk(KERN_DEBUG "max. available size 0x%08x < blocksneeded * sector_size 0x%08x, returning -ENOSPC\n",
106 avail, blocksneeded * c->sector_size));
107 spin_unlock(&c->erase_completion_lock);
114 D1(printk(KERN_DEBUG "Triggering GC pass. nr_free_blocks %d, nr_erasing_blocks %d, free_size 0x%08x, dirty_size 0x%08x, wasted_size 0x%08x, used_size 0x%08x, erasing_size 0x%08x, bad_size 0x%08x (total 0x%08x of 0x%08x)\n",
115 c->nr_free_blocks, c->nr_erasing_blocks, c->free_size, c->dirty_size, c->wasted_size, c->used_size, c->erasing_size, c->bad_size,
116 c->free_size + c->dirty_size + c->wasted_size + c->used_size + c->erasing_size + c->bad_size, c->flash_size));
117 spin_unlock(&c->erase_completion_lock);
119 ret = jffs2_garbage_collect_pass(c);
125 if (signal_pending(current))
129 spin_lock(&c->erase_completion_lock);
132 ret = jffs2_do_reserve_space(c, minsize, ofs, len);
134 D1(printk(KERN_DEBUG "jffs2_reserve_space: ret is %d\n", ret));
137 spin_unlock(&c->erase_completion_lock);
143 int jffs2_reserve_space_gc(struct jffs2_sb_info *c, uint32_t minsize, uint32_t *ofs, uint32_t *len)
146 minsize = PAD(minsize);
148 D1(printk(KERN_DEBUG "jffs2_reserve_space_gc(): Requested 0x%x bytes\n", minsize));
150 spin_lock(&c->erase_completion_lock);
151 while(ret == -EAGAIN) {
152 ret = jffs2_do_reserve_space(c, minsize, ofs, len);
154 D1(printk(KERN_DEBUG "jffs2_reserve_space_gc: looping, ret is %d\n", ret));
157 spin_unlock(&c->erase_completion_lock);
161 /* Called with alloc sem _and_ erase_completion_lock */
162 static int jffs2_do_reserve_space(struct jffs2_sb_info *c, uint32_t minsize, uint32_t *ofs, uint32_t *len)
164 struct jffs2_eraseblock *jeb = c->nextblock;
167 if (jeb && minsize > jeb->free_size) {
168 /* Skip the end of this block and file it as having some dirty space */
169 /* If there's a pending write to it, flush now */
170 if (jffs2_wbuf_dirty(c)) {
171 spin_unlock(&c->erase_completion_lock);
172 D1(printk(KERN_DEBUG "jffs2_do_reserve_space: Flushing write buffer\n"));
173 jffs2_flush_wbuf_pad(c);
174 spin_lock(&c->erase_completion_lock);
178 c->wasted_size += jeb->free_size;
179 c->free_size -= jeb->free_size;
180 jeb->wasted_size += jeb->free_size;
183 /* Check, if we have a dirty block now, or if it was dirty already */
184 if (ISDIRTY (jeb->wasted_size + jeb->dirty_size)) {
185 c->dirty_size += jeb->wasted_size;
186 c->wasted_size -= jeb->wasted_size;
187 jeb->dirty_size += jeb->wasted_size;
188 jeb->wasted_size = 0;
189 if (VERYDIRTY(c, jeb->dirty_size)) {
190 D1(printk(KERN_DEBUG "Adding full erase block at 0x%08x to very_dirty_list (free 0x%08x, dirty 0x%08x, used 0x%08x\n",
191 jeb->offset, jeb->free_size, jeb->dirty_size, jeb->used_size));
192 list_add_tail(&jeb->list, &c->very_dirty_list);
194 D1(printk(KERN_DEBUG "Adding full erase block at 0x%08x to dirty_list (free 0x%08x, dirty 0x%08x, used 0x%08x\n",
195 jeb->offset, jeb->free_size, jeb->dirty_size, jeb->used_size));
196 list_add_tail(&jeb->list, &c->dirty_list);
199 D1(printk(KERN_DEBUG "Adding full erase block at 0x%08x to clean_list (free 0x%08x, dirty 0x%08x, used 0x%08x\n",
200 jeb->offset, jeb->free_size, jeb->dirty_size, jeb->used_size));
201 list_add_tail(&jeb->list, &c->clean_list);
203 c->nextblock = jeb = NULL;
207 struct list_head *next;
208 /* Take the next block off the 'free' list */
210 if (list_empty(&c->free_list)) {
212 DECLARE_WAITQUEUE(wait, current);
214 if (!c->nr_erasing_blocks &&
215 !list_empty(&c->erasable_list)) {
216 struct jffs2_eraseblock *ejeb;
218 ejeb = list_entry(c->erasable_list.next, struct jffs2_eraseblock, list);
219 list_del(&ejeb->list);
220 list_add_tail(&ejeb->list, &c->erase_pending_list);
221 c->nr_erasing_blocks++;
222 jffs2_erase_pending_trigger(c);
223 D1(printk(KERN_DEBUG "jffs2_do_reserve_space: Triggering erase of erasable block at 0x%08x\n",
227 if (!c->nr_erasing_blocks &&
228 !list_empty(&c->erasable_pending_wbuf_list)) {
229 D1(printk(KERN_DEBUG "jffs2_do_reserve_space: Flushing write buffer\n"));
230 /* c->nextblock is NULL, no update to c->nextblock allowed */
231 spin_unlock(&c->erase_completion_lock);
232 jffs2_flush_wbuf_pad(c);
233 spin_lock(&c->erase_completion_lock);
234 /* Have another go. It'll be on the erasable_list now */
238 if (!c->nr_erasing_blocks) {
239 /* Ouch. We're in GC, or we wouldn't have got here.
240 And there's no space left. At all. */
241 printk(KERN_CRIT "Argh. No free space left for GC. nr_erasing_blocks is %d. nr_free_blocks is %d. (erasableempty: %s, erasingempty: %s, erasependingempty: %s)\n",
242 c->nr_erasing_blocks, c->nr_free_blocks, list_empty(&c->erasable_list)?"yes":"no",
243 list_empty(&c->erasing_list)?"yes":"no", list_empty(&c->erase_pending_list)?"yes":"no");
246 /* Make sure this can't deadlock. Someone has to start the erases
247 of erase_pending blocks */
249 /* In eCos, we don't have a handy kernel thread doing the erases for
250 us. We do them ourselves right now. */
251 jffs2_erase_pending_blocks(c);
253 set_current_state(TASK_INTERRUPTIBLE);
254 add_wait_queue(&c->erase_wait, &wait);
255 D1(printk(KERN_DEBUG "Waiting for erases to complete. erasing_blocks is %d. (erasableempty: %s, erasingempty: %s, erasependingempty: %s)\n",
256 c->nr_erasing_blocks, list_empty(&c->erasable_list)?"yes":"no",
257 list_empty(&c->erasing_list)?"yes":"no", list_empty(&c->erase_pending_list)?"yes":"no"));
258 if (!list_empty(&c->erase_pending_list)) {
259 D1(printk(KERN_DEBUG "Triggering pending erases\n"));
260 jffs2_erase_pending_trigger(c);
262 spin_unlock(&c->erase_completion_lock);
264 remove_wait_queue(&c->erase_wait, &wait);
265 spin_lock(&c->erase_completion_lock);
266 if (signal_pending(current)) {
270 /* An erase may have failed, decreasing the
271 amount of free space available. So we must
272 restart from the beginning */
276 next = c->free_list.next;
278 c->nextblock = jeb = list_entry(next, struct jffs2_eraseblock, list);
281 if (jeb->free_size != c->sector_size - c->cleanmarker_size) {
282 printk(KERN_WARNING "Eep. Block 0x%08x taken from free_list had free_size of 0x%08x!!\n", jeb->offset, jeb->free_size);
286 /* OK, jeb (==c->nextblock) is now pointing at a block which definitely has
288 *ofs = jeb->offset + (c->sector_size - jeb->free_size);
289 *len = jeb->free_size;
291 if (c->cleanmarker_size && jeb->used_size == c->cleanmarker_size &&
292 !jeb->first_node->next_in_ino) {
293 /* Only node in it beforehand was a CLEANMARKER node (we think).
294 So mark it obsolete now that there's going to be another node
295 in the block. This will reduce used_size to zero but We've
296 already set c->nextblock so that jffs2_mark_node_obsolete()
297 won't try to refile it to the dirty_list.
299 spin_unlock(&c->erase_completion_lock);
300 jffs2_mark_node_obsolete(c, jeb->first_node);
301 spin_lock(&c->erase_completion_lock);
304 D1(printk(KERN_DEBUG "jffs2_do_reserve_space(): Giving 0x%x bytes at 0x%x\n", *len, *ofs));
309 * jffs2_add_physical_node_ref - add a physical node reference to the list
310 * @c: superblock info
311 * @new: new node reference to add
312 * @len: length of this physical node
313 * @dirty: dirty flag for new node
315 * Should only be used to report nodes for which space has been allocated
316 * by jffs2_reserve_space.
318 * Must be called with the alloc_sem held.
321 int jffs2_add_physical_node_ref(struct jffs2_sb_info *c, struct jffs2_raw_node_ref *new)
323 struct jffs2_eraseblock *jeb;
324 uint32_t len = new->totlen;
326 jeb = &c->blocks[new->flash_offset / c->sector_size];
327 D1(printk(KERN_DEBUG "jffs2_add_physical_node_ref(): Node at 0x%x(%d), size 0x%x\n", ref_offset(new), ref_flags(new), len));
329 if (jeb != c->nextblock || (ref_offset(new)) != jeb->offset + (c->sector_size - jeb->free_size)) {
330 printk(KERN_WARNING "argh. node added in wrong place\n");
331 jffs2_free_raw_node_ref(new);
335 spin_lock(&c->erase_completion_lock);
337 if (!jeb->first_node)
338 jeb->first_node = new;
340 jeb->last_node->next_phys = new;
341 jeb->last_node = new;
343 jeb->free_size -= len;
345 if (ref_obsolete(new)) {
346 jeb->dirty_size += len;
347 c->dirty_size += len;
349 jeb->used_size += len;
353 if (!jeb->free_size && !jeb->dirty_size) {
354 /* If it lives on the dirty_list, jffs2_reserve_space will put it there */
355 D1(printk(KERN_DEBUG "Adding full erase block at 0x%08x to clean_list (free 0x%08x, dirty 0x%08x, used 0x%08x\n",
356 jeb->offset, jeb->free_size, jeb->dirty_size, jeb->used_size));
357 if (jffs2_wbuf_dirty(c)) {
358 /* Flush the last write in the block if it's outstanding */
359 spin_unlock(&c->erase_completion_lock);
360 jffs2_flush_wbuf_pad(c);
361 spin_lock(&c->erase_completion_lock);
364 list_add_tail(&jeb->list, &c->clean_list);
367 ACCT_SANITY_CHECK(c,jeb);
368 D1(ACCT_PARANOIA_CHECK(jeb));
370 spin_unlock(&c->erase_completion_lock);
376 void jffs2_complete_reservation(struct jffs2_sb_info *c)
378 D1(printk(KERN_DEBUG "jffs2_complete_reservation()\n"));
379 jffs2_garbage_collect_trigger(c);
383 static inline int on_list(struct list_head *obj, struct list_head *head)
385 struct list_head *this;
387 list_for_each(this, head) {
389 D1(printk("%p is on list at %p\n", obj, head));
397 void jffs2_mark_node_obsolete(struct jffs2_sb_info *c, struct jffs2_raw_node_ref *ref)
399 struct jffs2_eraseblock *jeb;
401 struct jffs2_unknown_node n;
406 printk(KERN_NOTICE "EEEEEK. jffs2_mark_node_obsolete called with NULL node\n");
409 if (ref_obsolete(ref)) {
410 D1(printk(KERN_DEBUG "jffs2_mark_node_obsolete called with already obsolete node at 0x%08x\n", ref_offset(ref)));
413 blocknr = ref->flash_offset / c->sector_size;
414 if (blocknr >= c->nr_blocks) {
415 printk(KERN_NOTICE "raw node at 0x%08x is off the end of device!\n", ref->flash_offset);
418 jeb = &c->blocks[blocknr];
420 spin_lock(&c->erase_completion_lock);
422 if (ref_flags(ref) == REF_UNCHECKED) {
423 D1(if (unlikely(jeb->unchecked_size < ref->totlen)) {
424 printk(KERN_NOTICE "raw unchecked node of size 0x%08x freed from erase block %d at 0x%08x, but unchecked_size was already 0x%08x\n",
425 ref->totlen, blocknr, ref->flash_offset, jeb->used_size);
428 D1(printk(KERN_DEBUG "Obsoleting previously unchecked node at 0x%08x of len %x: ", ref_offset(ref), ref->totlen));
429 jeb->unchecked_size -= ref->totlen;
430 c->unchecked_size -= ref->totlen;
432 D1(if (unlikely(jeb->used_size < ref->totlen)) {
433 printk(KERN_NOTICE "raw node of size 0x%08x freed from erase block %d at 0x%08x, but used_size was already 0x%08x\n",
434 ref->totlen, blocknr, ref->flash_offset, jeb->used_size);
437 D1(printk(KERN_DEBUG "Obsoleting node at 0x%08x of len %x: ", ref_offset(ref), ref->totlen));
438 jeb->used_size -= ref->totlen;
439 c->used_size -= ref->totlen;
442 // Take care, that wasted size is taken into concern
443 if ((jeb->dirty_size || ISDIRTY(jeb->wasted_size + ref->totlen)) && jeb != c->nextblock) {
444 D1(printk("Dirtying\n"));
445 addedsize = ref->totlen;
446 jeb->dirty_size += ref->totlen;
447 c->dirty_size += ref->totlen;
449 /* Convert wasted space to dirty, if not a bad block */
450 if (jeb->wasted_size) {
451 if (on_list(&jeb->list, &c->bad_used_list)) {
452 D1(printk(KERN_DEBUG "Leaving block at %08x on the bad_used_list\n",
454 addedsize = 0; /* To fool the refiling code later */
456 D1(printk(KERN_DEBUG "Converting %d bytes of wasted space to dirty in block at %08x\n",
457 jeb->wasted_size, jeb->offset));
458 addedsize += jeb->wasted_size;
459 jeb->dirty_size += jeb->wasted_size;
460 c->dirty_size += jeb->wasted_size;
461 c->wasted_size -= jeb->wasted_size;
462 jeb->wasted_size = 0;
466 D1(printk("Wasting\n"));
468 jeb->wasted_size += ref->totlen;
469 c->wasted_size += ref->totlen;
471 ref->flash_offset = ref_offset(ref) | REF_OBSOLETE;
473 ACCT_SANITY_CHECK(c, jeb);
475 D1(ACCT_PARANOIA_CHECK(jeb));
477 if (c->flags & JFFS2_SB_FLAG_MOUNTING) {
478 /* Mount in progress. Don't muck about with the block
479 lists because they're not ready yet, and don't actually
480 obliterate nodes that look obsolete. If they weren't
481 marked obsolete on the flash at the time they _became_
482 obsolete, there was probably a reason for that. */
483 spin_unlock(&c->erase_completion_lock);
487 if (jeb == c->nextblock) {
488 D2(printk(KERN_DEBUG "Not moving nextblock 0x%08x to dirty/erase_pending list\n", jeb->offset));
489 } else if (!jeb->used_size && !jeb->unchecked_size) {
490 if (jeb == c->gcblock) {
491 D1(printk(KERN_DEBUG "gcblock at 0x%08x completely dirtied. Clearing gcblock...\n", jeb->offset));
494 D1(printk(KERN_DEBUG "Eraseblock at 0x%08x completely dirtied. Removing from (dirty?) list...\n", jeb->offset));
495 list_del(&jeb->list);
497 if (jffs2_wbuf_dirty(c)) {
498 D1(printk(KERN_DEBUG "...and adding to erasable_pending_wbuf_list\n"));
499 list_add_tail(&jeb->list, &c->erasable_pending_wbuf_list);
500 #if 0 /* This check was added to allow us to find places where we added nodes to the lists
501 after dropping the alloc_sem, and it did that just fine. But it also caused us to
502 lock the alloc_sem in other places, like clear_inode(), when we wouldn't otherwise
503 have needed to. So I suspect it's outlived its usefulness. Thomas? */
505 /* We've changed the rules slightly. After
506 writing a node you now mustn't drop the
507 alloc_sem before you've finished all the
508 list management - this is so that when we
509 get here, we know that no other nodes have
510 been written, and the above check on wbuf
511 is valid - wbuf_len is nonzero IFF the node
512 which obsoletes this node is still in the
515 So we BUG() if that new rule is broken, to
516 make sure we catch it and fix it.
518 if (!down_trylock(&c->alloc_sem)) {
520 printk(KERN_CRIT "jffs2_mark_node_obsolete() called with wbuf active but alloc_sem not locked!\n");
526 /* Most of the time, we just erase it immediately. Otherwise we
527 spend ages scanning it on mount, etc. */
528 D1(printk(KERN_DEBUG "...and adding to erase_pending_list\n"));
529 list_add_tail(&jeb->list, &c->erase_pending_list);
530 c->nr_erasing_blocks++;
531 jffs2_erase_pending_trigger(c);
533 /* Sometimes, however, we leave it elsewhere so it doesn't get
534 immediately reused, and we spread the load a bit. */
535 D1(printk(KERN_DEBUG "...and adding to erasable_list\n"));
536 list_add_tail(&jeb->list, &c->erasable_list);
539 D1(printk(KERN_DEBUG "Done OK\n"));
540 } else if (jeb == c->gcblock) {
541 D2(printk(KERN_DEBUG "Not moving gcblock 0x%08x to dirty_list\n", jeb->offset));
542 } else if (ISDIRTY(jeb->dirty_size) && !ISDIRTY(jeb->dirty_size - addedsize)) {
543 D1(printk(KERN_DEBUG "Eraseblock at 0x%08x is freshly dirtied. Removing from clean list...\n", jeb->offset));
544 list_del(&jeb->list);
545 D1(printk(KERN_DEBUG "...and adding to dirty_list\n"));
546 list_add_tail(&jeb->list, &c->dirty_list);
547 } else if (VERYDIRTY(c, jeb->dirty_size) &&
548 !VERYDIRTY(c, jeb->dirty_size - addedsize)) {
549 D1(printk(KERN_DEBUG "Eraseblock at 0x%08x is now very dirty. Removing from dirty list...\n", jeb->offset));
550 list_del(&jeb->list);
551 D1(printk(KERN_DEBUG "...and adding to very_dirty_list\n"));
552 list_add_tail(&jeb->list, &c->very_dirty_list);
554 D1(printk(KERN_DEBUG "Eraseblock at 0x%08x not moved anywhere. (free 0x%08x, dirty 0x%08x, used 0x%08x)\n",
555 jeb->offset, jeb->free_size, jeb->dirty_size, jeb->used_size));
558 spin_unlock(&c->erase_completion_lock);
560 if (!jffs2_can_mark_obsolete(c))
562 if (jffs2_is_readonly(c))
565 D1(printk(KERN_DEBUG "obliterating obsoleted node at 0x%08x\n", ref_offset(ref)));
566 ret = jffs2_flash_read(c, ref_offset(ref), sizeof(n), &retlen, (char *)&n);
568 printk(KERN_WARNING "Read error reading from obsoleted node at 0x%08x: %d\n", ref_offset(ref), ret);
571 if (retlen != sizeof(n)) {
572 printk(KERN_WARNING "Short read from obsoleted node at 0x%08x: %zd\n", ref_offset(ref), retlen);
575 if (PAD(je32_to_cpu(n.totlen)) != PAD(ref->totlen)) {
576 printk(KERN_WARNING "Node totlen on flash (0x%08x) != totlen in node ref (0x%08x)\n", je32_to_cpu(n.totlen), ref->totlen);
579 if (!(je16_to_cpu(n.nodetype) & JFFS2_NODE_ACCURATE)) {
580 D1(printk(KERN_DEBUG "Node at 0x%08x was already marked obsolete (nodetype 0x%04x\n", ref_offset(ref), je16_to_cpu(n.nodetype)));
583 /* XXX FIXME: This is ugly now */
584 n.nodetype = cpu_to_je16(je16_to_cpu(n.nodetype) & ~JFFS2_NODE_ACCURATE);
585 ret = jffs2_flash_write(c, ref_offset(ref), sizeof(n), &retlen, (char *)&n);
587 printk(KERN_WARNING "Write error in obliterating obsoleted node at 0x%08x: %d\n", ref_offset(ref), ret);
590 if (retlen != sizeof(n)) {
591 printk(KERN_WARNING "Short write in obliterating obsoleted node at 0x%08x: %zd\n", ref_offset(ref), retlen);
596 #if CONFIG_JFFS2_FS_DEBUG > 0
597 void jffs2_dump_block_lists(struct jffs2_sb_info *c)
601 printk(KERN_DEBUG "jffs2_dump_block_lists:\n");
602 printk(KERN_DEBUG "flash_size: %08x\n", c->flash_size);
603 printk(KERN_DEBUG "used_size: %08x\n", c->used_size);
604 printk(KERN_DEBUG "dirty_size: %08x\n", c->dirty_size);
605 printk(KERN_DEBUG "wasted_size: %08x\n", c->wasted_size);
606 printk(KERN_DEBUG "unchecked_size: %08x\n", c->unchecked_size);
607 printk(KERN_DEBUG "free_size: %08x\n", c->free_size);
608 printk(KERN_DEBUG "erasing_size: %08x\n", c->erasing_size);
609 printk(KERN_DEBUG "bad_size: %08x\n", c->bad_size);
610 printk(KERN_DEBUG "sector_size: %08x\n", c->sector_size);
611 printk(KERN_DEBUG "jffs2_reserved_blocks size: %08x\n",c->sector_size * c->resv_blocks_write);
614 printk(KERN_DEBUG "nextblock: %08x (used %08x, dirty %08x, wasted %08x, unchecked %08x, free %08x)\n",
615 c->nextblock->offset, c->nextblock->used_size, c->nextblock->dirty_size, c->nextblock->wasted_size, c->nextblock->unchecked_size, c->nextblock->free_size);
617 printk(KERN_DEBUG "nextblock: NULL\n");
620 printk(KERN_DEBUG "gcblock: %08x (used %08x, dirty %08x, wasted %08x, unchecked %08x, free %08x)\n",
621 c->gcblock->offset, c->gcblock->used_size, c->gcblock->dirty_size, c->gcblock->wasted_size, c->gcblock->unchecked_size, c->gcblock->free_size);
623 printk(KERN_DEBUG "gcblock: NULL\n");
625 if (list_empty(&c->clean_list)) {
626 printk(KERN_DEBUG "clean_list: empty\n");
628 struct list_head *this;
632 list_for_each(this, &c->clean_list) {
633 struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list);
635 dirty += jeb->wasted_size;
636 printk(KERN_DEBUG "clean_list: %08x (used %08x, dirty %08x, wasted %08x, unchecked %08x, free %08x)\n", jeb->offset, jeb->used_size, jeb->dirty_size, jeb->wasted_size, jeb->unchecked_size, jeb->free_size);
638 printk (KERN_DEBUG "Contains %d blocks with total wasted size %u, average wasted size: %u\n", numblocks, dirty, dirty / numblocks);
640 if (list_empty(&c->very_dirty_list)) {
641 printk(KERN_DEBUG "very_dirty_list: empty\n");
643 struct list_head *this;
647 list_for_each(this, &c->very_dirty_list) {
648 struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list);
650 dirty += jeb->dirty_size;
651 printk(KERN_DEBUG "very_dirty_list: %08x (used %08x, dirty %08x, wasted %08x, unchecked %08x, free %08x)\n",
652 jeb->offset, jeb->used_size, jeb->dirty_size, jeb->wasted_size, jeb->unchecked_size, jeb->free_size);
654 printk (KERN_DEBUG "Contains %d blocks with total dirty size %u, average dirty size: %u\n",
655 numblocks, dirty, dirty / numblocks);
657 if (list_empty(&c->dirty_list)) {
658 printk(KERN_DEBUG "dirty_list: empty\n");
660 struct list_head *this;
664 list_for_each(this, &c->dirty_list) {
665 struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list);
667 dirty += jeb->dirty_size;
668 printk(KERN_DEBUG "dirty_list: %08x (used %08x, dirty %08x, wasted %08x, unchecked %08x, free %08x)\n",
669 jeb->offset, jeb->used_size, jeb->dirty_size, jeb->wasted_size, jeb->unchecked_size, jeb->free_size);
671 printk (KERN_DEBUG "Contains %d blocks with total dirty size %u, average dirty size: %u\n",
672 numblocks, dirty, dirty / numblocks);
674 if (list_empty(&c->erasable_list)) {
675 printk(KERN_DEBUG "erasable_list: empty\n");
677 struct list_head *this;
679 list_for_each(this, &c->erasable_list) {
680 struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list);
681 printk(KERN_DEBUG "erasable_list: %08x (used %08x, dirty %08x, wasted %08x, unchecked %08x, free %08x)\n",
682 jeb->offset, jeb->used_size, jeb->dirty_size, jeb->wasted_size, jeb->unchecked_size, jeb->free_size);
685 if (list_empty(&c->erasing_list)) {
686 printk(KERN_DEBUG "erasing_list: empty\n");
688 struct list_head *this;
690 list_for_each(this, &c->erasing_list) {
691 struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list);
692 printk(KERN_DEBUG "erasing_list: %08x (used %08x, dirty %08x, wasted %08x, unchecked %08x, free %08x)\n",
693 jeb->offset, jeb->used_size, jeb->dirty_size, jeb->wasted_size, jeb->unchecked_size, jeb->free_size);
696 if (list_empty(&c->erase_pending_list)) {
697 printk(KERN_DEBUG "erase_pending_list: empty\n");
699 struct list_head *this;
701 list_for_each(this, &c->erase_pending_list) {
702 struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list);
703 printk(KERN_DEBUG "erase_pending_list: %08x (used %08x, dirty %08x, wasted %08x, unchecked %08x, free %08x)\n",
704 jeb->offset, jeb->used_size, jeb->dirty_size, jeb->wasted_size, jeb->unchecked_size, jeb->free_size);
707 if (list_empty(&c->erasable_pending_wbuf_list)) {
708 printk(KERN_DEBUG "erasable_pending_wbuf_list: empty\n");
710 struct list_head *this;
712 list_for_each(this, &c->erasable_pending_wbuf_list) {
713 struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list);
714 printk(KERN_DEBUG "erasable_pending_wbuf_list: %08x (used %08x, dirty %08x, wasted %08x, unchecked %08x, free %08x)\n",
715 jeb->offset, jeb->used_size, jeb->dirty_size, jeb->wasted_size, jeb->unchecked_size, jeb->free_size);
718 if (list_empty(&c->free_list)) {
719 printk(KERN_DEBUG "free_list: empty\n");
721 struct list_head *this;
723 list_for_each(this, &c->free_list) {
724 struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list);
725 printk(KERN_DEBUG "free_list: %08x (used %08x, dirty %08x, wasted %08x, unchecked %08x, free %08x)\n",
726 jeb->offset, jeb->used_size, jeb->dirty_size, jeb->wasted_size, jeb->unchecked_size, jeb->free_size);
729 if (list_empty(&c->bad_list)) {
730 printk(KERN_DEBUG "bad_list: empty\n");
732 struct list_head *this;
734 list_for_each(this, &c->bad_list) {
735 struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list);
736 printk(KERN_DEBUG "bad_list: %08x (used %08x, dirty %08x, wasted %08x, unchecked %08x, free %08x)\n",
737 jeb->offset, jeb->used_size, jeb->dirty_size, jeb->wasted_size, jeb->unchecked_size, jeb->free_size);
740 if (list_empty(&c->bad_used_list)) {
741 printk(KERN_DEBUG "bad_used_list: empty\n");
743 struct list_head *this;
745 list_for_each(this, &c->bad_used_list) {
746 struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list);
747 printk(KERN_DEBUG "bad_used_list: %08x (used %08x, dirty %08x, wasted %08x, unchecked %08x, free %08x)\n",
748 jeb->offset, jeb->used_size, jeb->dirty_size, jeb->wasted_size, jeb->unchecked_size, jeb->free_size);
752 #endif /* CONFIG_JFFS2_FS_DEBUG */