VServer 1.9.2 (patch-2.6.8.1-vs1.9.2.diff)
[linux-2.6.git] / fs / jffs2 / nodemgmt.c
1 /*
2  * JFFS2 -- Journalling Flash File System, Version 2.
3  *
4  * Copyright (C) 2001-2003 Red Hat, Inc.
5  *
6  * Created by David Woodhouse <dwmw2@redhat.com>
7  *
8  * For licensing information, see the file 'LICENCE' in this directory.
9  *
10  * $Id: nodemgmt.c,v 1.107 2003/11/26 15:30:58 dwmw2 Exp $
11  *
12  */
13
14 #include <linux/kernel.h>
15 #include <linux/slab.h>
16 #include <linux/mtd/mtd.h>
17 #include <linux/compiler.h>
18 #include <linux/sched.h> /* For cond_resched() */
19 #include "nodelist.h"
20
21 /**
22  *      jffs2_reserve_space - request physical space to write nodes to flash
23  *      @c: superblock info
24  *      @minsize: Minimum acceptable size of allocation
25  *      @ofs: Returned value of node offset
26  *      @len: Returned value of allocation length
27  *      @prio: Allocation type - ALLOC_{NORMAL,DELETION}
28  *
29  *      Requests a block of physical space on the flash. Returns zero for success
30  *      and puts 'ofs' and 'len' into the appriopriate place, or returns -ENOSPC
31  *      or other error if appropriate.
32  *
33  *      If it returns zero, jffs2_reserve_space() also downs the per-filesystem
34  *      allocation semaphore, to prevent more than one allocation from being
35  *      active at any time. The semaphore is later released by jffs2_commit_allocation()
36  *
37  *      jffs2_reserve_space() may trigger garbage collection in order to make room
38  *      for the requested allocation.
39  */
40
41 static int jffs2_do_reserve_space(struct jffs2_sb_info *c,  uint32_t minsize, uint32_t *ofs, uint32_t *len);
42
43 int jffs2_reserve_space(struct jffs2_sb_info *c, uint32_t minsize, uint32_t *ofs, uint32_t *len, int prio)
44 {
45         int ret = -EAGAIN;
46         int blocksneeded = c->resv_blocks_write;
47         /* align it */
48         minsize = PAD(minsize);
49
50         D1(printk(KERN_DEBUG "jffs2_reserve_space(): Requested 0x%x bytes\n", minsize));
51         down(&c->alloc_sem);
52
53         D1(printk(KERN_DEBUG "jffs2_reserve_space(): alloc sem got\n"));
54
55         spin_lock(&c->erase_completion_lock);
56
57         /* this needs a little more thought (true <tglx> :)) */
58         while(ret == -EAGAIN) {
59                 while(c->nr_free_blocks + c->nr_erasing_blocks < blocksneeded) {
60                         int ret;
61                         uint32_t dirty, avail;
62
63                         /* calculate real dirty size
64                          * dirty_size contains blocks on erase_pending_list
65                          * those blocks are counted in c->nr_erasing_blocks.
66                          * If one block is actually erased, it is not longer counted as dirty_space
67                          * but it is counted in c->nr_erasing_blocks, so we add it and subtract it
68                          * with c->nr_erasing_blocks * c->sector_size again.
69                          * Blocks on erasable_list are counted as dirty_size, but not in c->nr_erasing_blocks
70                          * This helps us to force gc and pick eventually a clean block to spread the load.
71                          * We add unchecked_size here, as we hopefully will find some space to use.
72                          * This will affect the sum only once, as gc first finishes checking
73                          * of nodes.
74                          */
75                         dirty = c->dirty_size + c->erasing_size - c->nr_erasing_blocks * c->sector_size + c->unchecked_size;
76                         if (dirty < c->nospc_dirty_size) {
77                                 if (prio == ALLOC_DELETION && c->nr_free_blocks + c->nr_erasing_blocks >= c->resv_blocks_deletion) {
78                                         printk(KERN_NOTICE "jffs2_reserve_space(): Low on dirty space to GC, but it's a deletion. Allowing...\n");
79                                         break;
80                                 }
81                                 D1(printk(KERN_DEBUG "dirty size 0x%08x + unchecked_size 0x%08x < nospc_dirty_size 0x%08x, returning -ENOSPC\n",
82                                           dirty, c->unchecked_size, c->sector_size));
83
84                                 spin_unlock(&c->erase_completion_lock);
85                                 up(&c->alloc_sem);
86                                 return -ENOSPC;
87                         }
88                         
89                         /* Calc possibly available space. Possibly available means that we
90                          * don't know, if unchecked size contains obsoleted nodes, which could give us some
91                          * more usable space. This will affect the sum only once, as gc first finishes checking
92                          * of nodes.
93                          + Return -ENOSPC, if the maximum possibly available space is less or equal than 
94                          * blocksneeded * sector_size.
95                          * This blocks endless gc looping on a filesystem, which is nearly full, even if
96                          * the check above passes.
97                          */
98                         avail = c->free_size + c->dirty_size + c->erasing_size + c->unchecked_size;
99                         if ( (avail / c->sector_size) <= blocksneeded) {
100                                 if (prio == ALLOC_DELETION && c->nr_free_blocks + c->nr_erasing_blocks >= c->resv_blocks_deletion) {
101                                         printk(KERN_NOTICE "jffs2_reserve_space(): Low on possibly available space, but it's a deletion. Allowing...\n");
102                                         break;
103                                 }
104
105                                 D1(printk(KERN_DEBUG "max. available size 0x%08x  < blocksneeded * sector_size 0x%08x, returning -ENOSPC\n",
106                                           avail, blocksneeded * c->sector_size));
107                                 spin_unlock(&c->erase_completion_lock);
108                                 up(&c->alloc_sem);
109                                 return -ENOSPC;
110                         }
111
112                         up(&c->alloc_sem);
113
114                         D1(printk(KERN_DEBUG "Triggering GC pass. nr_free_blocks %d, nr_erasing_blocks %d, free_size 0x%08x, dirty_size 0x%08x, wasted_size 0x%08x, used_size 0x%08x, erasing_size 0x%08x, bad_size 0x%08x (total 0x%08x of 0x%08x)\n",
115                                   c->nr_free_blocks, c->nr_erasing_blocks, c->free_size, c->dirty_size, c->wasted_size, c->used_size, c->erasing_size, c->bad_size,
116                                   c->free_size + c->dirty_size + c->wasted_size + c->used_size + c->erasing_size + c->bad_size, c->flash_size));
117                         spin_unlock(&c->erase_completion_lock);
118                         
119                         ret = jffs2_garbage_collect_pass(c);
120                         if (ret)
121                                 return ret;
122
123                         cond_resched();
124
125                         if (signal_pending(current))
126                                 return -EINTR;
127
128                         down(&c->alloc_sem);
129                         spin_lock(&c->erase_completion_lock);
130                 }
131
132                 ret = jffs2_do_reserve_space(c, minsize, ofs, len);
133                 if (ret) {
134                         D1(printk(KERN_DEBUG "jffs2_reserve_space: ret is %d\n", ret));
135                 }
136         }
137         spin_unlock(&c->erase_completion_lock);
138         if (ret)
139                 up(&c->alloc_sem);
140         return ret;
141 }
142
143 int jffs2_reserve_space_gc(struct jffs2_sb_info *c, uint32_t minsize, uint32_t *ofs, uint32_t *len)
144 {
145         int ret = -EAGAIN;
146         minsize = PAD(minsize);
147
148         D1(printk(KERN_DEBUG "jffs2_reserve_space_gc(): Requested 0x%x bytes\n", minsize));
149
150         spin_lock(&c->erase_completion_lock);
151         while(ret == -EAGAIN) {
152                 ret = jffs2_do_reserve_space(c, minsize, ofs, len);
153                 if (ret) {
154                         D1(printk(KERN_DEBUG "jffs2_reserve_space_gc: looping, ret is %d\n", ret));
155                 }
156         }
157         spin_unlock(&c->erase_completion_lock);
158         return ret;
159 }
160
161 /* Called with alloc sem _and_ erase_completion_lock */
162 static int jffs2_do_reserve_space(struct jffs2_sb_info *c,  uint32_t minsize, uint32_t *ofs, uint32_t *len)
163 {
164         struct jffs2_eraseblock *jeb = c->nextblock;
165         
166  restart:
167         if (jeb && minsize > jeb->free_size) {
168                 /* Skip the end of this block and file it as having some dirty space */
169                 /* If there's a pending write to it, flush now */
170                 if (jffs2_wbuf_dirty(c)) {
171                         spin_unlock(&c->erase_completion_lock);
172                         D1(printk(KERN_DEBUG "jffs2_do_reserve_space: Flushing write buffer\n"));                           
173                         jffs2_flush_wbuf_pad(c);
174                         spin_lock(&c->erase_completion_lock);
175                         jeb = c->nextblock;
176                         goto restart;
177                 }
178                 c->wasted_size += jeb->free_size;
179                 c->free_size -= jeb->free_size;
180                 jeb->wasted_size += jeb->free_size;
181                 jeb->free_size = 0;
182                 
183                 /* Check, if we have a dirty block now, or if it was dirty already */
184                 if (ISDIRTY (jeb->wasted_size + jeb->dirty_size)) {
185                         c->dirty_size += jeb->wasted_size;
186                         c->wasted_size -= jeb->wasted_size;
187                         jeb->dirty_size += jeb->wasted_size;
188                         jeb->wasted_size = 0;
189                         if (VERYDIRTY(c, jeb->dirty_size)) {
190                                 D1(printk(KERN_DEBUG "Adding full erase block at 0x%08x to very_dirty_list (free 0x%08x, dirty 0x%08x, used 0x%08x\n",
191                                   jeb->offset, jeb->free_size, jeb->dirty_size, jeb->used_size));
192                                 list_add_tail(&jeb->list, &c->very_dirty_list);
193                         } else {
194                                 D1(printk(KERN_DEBUG "Adding full erase block at 0x%08x to dirty_list (free 0x%08x, dirty 0x%08x, used 0x%08x\n",
195                                   jeb->offset, jeb->free_size, jeb->dirty_size, jeb->used_size));
196                                 list_add_tail(&jeb->list, &c->dirty_list);
197                         }
198                 } else { 
199                         D1(printk(KERN_DEBUG "Adding full erase block at 0x%08x to clean_list (free 0x%08x, dirty 0x%08x, used 0x%08x\n",
200                           jeb->offset, jeb->free_size, jeb->dirty_size, jeb->used_size));
201                         list_add_tail(&jeb->list, &c->clean_list);
202                 }
203                 c->nextblock = jeb = NULL;
204         }
205         
206         if (!jeb) {
207                 struct list_head *next;
208                 /* Take the next block off the 'free' list */
209
210                 if (list_empty(&c->free_list)) {
211
212                         if (!c->nr_erasing_blocks && 
213                             !list_empty(&c->erasable_list)) {
214                                 struct jffs2_eraseblock *ejeb;
215
216                                 ejeb = list_entry(c->erasable_list.next, struct jffs2_eraseblock, list);
217                                 list_del(&ejeb->list);
218                                 list_add_tail(&ejeb->list, &c->erase_pending_list);
219                                 c->nr_erasing_blocks++;
220                                 jffs2_erase_pending_trigger(c);
221                                 D1(printk(KERN_DEBUG "jffs2_do_reserve_space: Triggering erase of erasable block at 0x%08x\n",
222                                           ejeb->offset));
223                         }
224
225                         if (!c->nr_erasing_blocks && 
226                             !list_empty(&c->erasable_pending_wbuf_list)) {
227                                 D1(printk(KERN_DEBUG "jffs2_do_reserve_space: Flushing write buffer\n"));
228                                 /* c->nextblock is NULL, no update to c->nextblock allowed */                       
229                                 spin_unlock(&c->erase_completion_lock);
230                                 jffs2_flush_wbuf_pad(c);
231                                 spin_lock(&c->erase_completion_lock);
232                                 /* Have another go. It'll be on the erasable_list now */
233                                 return -EAGAIN;
234                         }
235
236                         if (!c->nr_erasing_blocks) {
237                                 /* Ouch. We're in GC, or we wouldn't have got here.
238                                    And there's no space left. At all. */
239                                 printk(KERN_CRIT "Argh. No free space left for GC. nr_erasing_blocks is %d. nr_free_blocks is %d. (erasableempty: %s, erasingempty: %s, erasependingempty: %s)\n", 
240                                        c->nr_erasing_blocks, c->nr_free_blocks, list_empty(&c->erasable_list)?"yes":"no", 
241                                        list_empty(&c->erasing_list)?"yes":"no", list_empty(&c->erase_pending_list)?"yes":"no");
242                                 return -ENOSPC;
243                         }
244
245                         spin_unlock(&c->erase_completion_lock);
246                         /* Don't wait for it; just erase one right now */
247                         jffs2_erase_pending_blocks(c, 1);
248                         spin_lock(&c->erase_completion_lock);
249
250                         /* An erase may have failed, decreasing the
251                            amount of free space available. So we must
252                            restart from the beginning */
253                         return -EAGAIN;
254                 }
255
256                 next = c->free_list.next;
257                 list_del(next);
258                 c->nextblock = jeb = list_entry(next, struct jffs2_eraseblock, list);
259                 c->nr_free_blocks--;
260
261                 if (jeb->free_size != c->sector_size - c->cleanmarker_size) {
262                         printk(KERN_WARNING "Eep. Block 0x%08x taken from free_list had free_size of 0x%08x!!\n", jeb->offset, jeb->free_size);
263                         goto restart;
264                 }
265         }
266         /* OK, jeb (==c->nextblock) is now pointing at a block which definitely has
267            enough space */
268         *ofs = jeb->offset + (c->sector_size - jeb->free_size);
269         *len = jeb->free_size;
270
271         if (c->cleanmarker_size && jeb->used_size == c->cleanmarker_size &&
272             !jeb->first_node->next_in_ino) {
273                 /* Only node in it beforehand was a CLEANMARKER node (we think). 
274                    So mark it obsolete now that there's going to be another node
275                    in the block. This will reduce used_size to zero but We've 
276                    already set c->nextblock so that jffs2_mark_node_obsolete()
277                    won't try to refile it to the dirty_list.
278                 */
279                 spin_unlock(&c->erase_completion_lock);
280                 jffs2_mark_node_obsolete(c, jeb->first_node);
281                 spin_lock(&c->erase_completion_lock);
282         }
283
284         D1(printk(KERN_DEBUG "jffs2_do_reserve_space(): Giving 0x%x bytes at 0x%x\n", *len, *ofs));
285         return 0;
286 }
287
288 /**
289  *      jffs2_add_physical_node_ref - add a physical node reference to the list
290  *      @c: superblock info
291  *      @new: new node reference to add
292  *      @len: length of this physical node
293  *      @dirty: dirty flag for new node
294  *
295  *      Should only be used to report nodes for which space has been allocated 
296  *      by jffs2_reserve_space.
297  *
298  *      Must be called with the alloc_sem held.
299  */
300  
301 int jffs2_add_physical_node_ref(struct jffs2_sb_info *c, struct jffs2_raw_node_ref *new)
302 {
303         struct jffs2_eraseblock *jeb;
304         uint32_t len;
305
306         jeb = &c->blocks[new->flash_offset / c->sector_size];
307         len = ref_totlen(c, jeb, new);
308
309         D1(printk(KERN_DEBUG "jffs2_add_physical_node_ref(): Node at 0x%x(%d), size 0x%x\n", ref_offset(new), ref_flags(new), len));
310 #if 1
311         if (jeb != c->nextblock || (ref_offset(new)) != jeb->offset + (c->sector_size - jeb->free_size)) {
312                 printk(KERN_WARNING "argh. node added in wrong place\n");
313                 jffs2_free_raw_node_ref(new);
314                 return -EINVAL;
315         }
316 #endif
317         spin_lock(&c->erase_completion_lock);
318
319         if (!jeb->first_node)
320                 jeb->first_node = new;
321         if (jeb->last_node)
322                 jeb->last_node->next_phys = new;
323         jeb->last_node = new;
324
325         jeb->free_size -= len;
326         c->free_size -= len;
327         if (ref_obsolete(new)) {
328                 jeb->dirty_size += len;
329                 c->dirty_size += len;
330         } else {
331                 jeb->used_size += len;
332                 c->used_size += len;
333         }
334
335         if (!jeb->free_size && !jeb->dirty_size) {
336                 /* If it lives on the dirty_list, jffs2_reserve_space will put it there */
337                 D1(printk(KERN_DEBUG "Adding full erase block at 0x%08x to clean_list (free 0x%08x, dirty 0x%08x, used 0x%08x\n",
338                           jeb->offset, jeb->free_size, jeb->dirty_size, jeb->used_size));
339                 if (jffs2_wbuf_dirty(c)) {
340                         /* Flush the last write in the block if it's outstanding */
341                         spin_unlock(&c->erase_completion_lock);
342                         jffs2_flush_wbuf_pad(c);
343                         spin_lock(&c->erase_completion_lock);
344                 }
345
346                 list_add_tail(&jeb->list, &c->clean_list);
347                 c->nextblock = NULL;
348         }
349         ACCT_SANITY_CHECK(c,jeb);
350         D1(ACCT_PARANOIA_CHECK(jeb));
351
352         spin_unlock(&c->erase_completion_lock);
353
354         return 0;
355 }
356
357
358 void jffs2_complete_reservation(struct jffs2_sb_info *c)
359 {
360         D1(printk(KERN_DEBUG "jffs2_complete_reservation()\n"));
361         jffs2_garbage_collect_trigger(c);
362         up(&c->alloc_sem);
363 }
364
365 static inline int on_list(struct list_head *obj, struct list_head *head)
366 {
367         struct list_head *this;
368
369         list_for_each(this, head) {
370                 if (this == obj) {
371                         D1(printk("%p is on list at %p\n", obj, head));
372                         return 1;
373
374                 }
375         }
376         return 0;
377 }
378
379 void jffs2_mark_node_obsolete(struct jffs2_sb_info *c, struct jffs2_raw_node_ref *ref)
380 {
381         struct jffs2_eraseblock *jeb;
382         int blocknr;
383         struct jffs2_unknown_node n;
384         int ret, addedsize;
385         size_t retlen;
386
387         if(!ref) {
388                 printk(KERN_NOTICE "EEEEEK. jffs2_mark_node_obsolete called with NULL node\n");
389                 return;
390         }
391         if (ref_obsolete(ref)) {
392                 D1(printk(KERN_DEBUG "jffs2_mark_node_obsolete called with already obsolete node at 0x%08x\n", ref_offset(ref)));
393                 return;
394         }
395         blocknr = ref->flash_offset / c->sector_size;
396         if (blocknr >= c->nr_blocks) {
397                 printk(KERN_NOTICE "raw node at 0x%08x is off the end of device!\n", ref->flash_offset);
398                 BUG();
399         }
400         jeb = &c->blocks[blocknr];
401
402         spin_lock(&c->erase_completion_lock);
403
404         if (ref_flags(ref) == REF_UNCHECKED) {
405                 D1(if (unlikely(jeb->unchecked_size < ref_totlen(c, jeb, ref))) {
406                         printk(KERN_NOTICE "raw unchecked node of size 0x%08x freed from erase block %d at 0x%08x, but unchecked_size was already 0x%08x\n",
407                                ref_totlen(c, jeb, ref), blocknr, ref->flash_offset, jeb->used_size);
408                         BUG();
409                 })
410                 D1(printk(KERN_DEBUG "Obsoleting previously unchecked node at 0x%08x of len %x: ", ref_offset(ref), ref_totlen(c, jeb, ref)));
411                 jeb->unchecked_size -= ref_totlen(c, jeb, ref);
412                 c->unchecked_size -= ref_totlen(c, jeb, ref);
413         } else {
414                 D1(if (unlikely(jeb->used_size < ref_totlen(c, jeb, ref))) {
415                         printk(KERN_NOTICE "raw node of size 0x%08x freed from erase block %d at 0x%08x, but used_size was already 0x%08x\n",
416                                ref_totlen(c, jeb, ref), blocknr, ref->flash_offset, jeb->used_size);
417                         BUG();
418                 })
419                 D1(printk(KERN_DEBUG "Obsoleting node at 0x%08x of len %x: ", ref_offset(ref), ref_totlen(c, jeb, ref)));
420                 jeb->used_size -= ref_totlen(c, jeb, ref);
421                 c->used_size -= ref_totlen(c, jeb, ref);
422         }
423
424         // Take care, that wasted size is taken into concern
425         if ((jeb->dirty_size || ISDIRTY(jeb->wasted_size + ref_totlen(c, jeb, ref))) && jeb != c->nextblock) {
426                 D1(printk("Dirtying\n"));
427                 addedsize = ref_totlen(c, jeb, ref);
428                 jeb->dirty_size += ref_totlen(c, jeb, ref);
429                 c->dirty_size += ref_totlen(c, jeb, ref);
430
431                 /* Convert wasted space to dirty, if not a bad block */
432                 if (jeb->wasted_size) {
433                         if (on_list(&jeb->list, &c->bad_used_list)) {
434                                 D1(printk(KERN_DEBUG "Leaving block at %08x on the bad_used_list\n",
435                                           jeb->offset));
436                                 addedsize = 0; /* To fool the refiling code later */
437                         } else {
438                                 D1(printk(KERN_DEBUG "Converting %d bytes of wasted space to dirty in block at %08x\n",
439                                           jeb->wasted_size, jeb->offset));
440                                 addedsize += jeb->wasted_size;
441                                 jeb->dirty_size += jeb->wasted_size;
442                                 c->dirty_size += jeb->wasted_size;
443                                 c->wasted_size -= jeb->wasted_size;
444                                 jeb->wasted_size = 0;
445                         }
446                 }
447         } else {
448                 D1(printk("Wasting\n"));
449                 addedsize = 0;
450                 jeb->wasted_size += ref_totlen(c, jeb, ref);
451                 c->wasted_size += ref_totlen(c, jeb, ref);      
452         }
453         ref->flash_offset = ref_offset(ref) | REF_OBSOLETE;
454         
455         ACCT_SANITY_CHECK(c, jeb);
456
457         D1(ACCT_PARANOIA_CHECK(jeb));
458
459         if (c->flags & JFFS2_SB_FLAG_MOUNTING) {
460                 /* Mount in progress. Don't muck about with the block
461                    lists because they're not ready yet, and don't actually
462                    obliterate nodes that look obsolete. If they weren't 
463                    marked obsolete on the flash at the time they _became_
464                    obsolete, there was probably a reason for that. */
465                 spin_unlock(&c->erase_completion_lock);
466                 return;
467         }
468
469         if (jeb == c->nextblock) {
470                 D2(printk(KERN_DEBUG "Not moving nextblock 0x%08x to dirty/erase_pending list\n", jeb->offset));
471         } else if (!jeb->used_size && !jeb->unchecked_size) {
472                 if (jeb == c->gcblock) {
473                         D1(printk(KERN_DEBUG "gcblock at 0x%08x completely dirtied. Clearing gcblock...\n", jeb->offset));
474                         c->gcblock = NULL;
475                 } else {
476                         D1(printk(KERN_DEBUG "Eraseblock at 0x%08x completely dirtied. Removing from (dirty?) list...\n", jeb->offset));
477                         list_del(&jeb->list);
478                 }
479                 if (jffs2_wbuf_dirty(c)) {
480                         D1(printk(KERN_DEBUG "...and adding to erasable_pending_wbuf_list\n"));
481                         list_add_tail(&jeb->list, &c->erasable_pending_wbuf_list);
482                 } else {
483                         if (jiffies & 127) {
484                                 /* Most of the time, we just erase it immediately. Otherwise we
485                                    spend ages scanning it on mount, etc. */
486                                 D1(printk(KERN_DEBUG "...and adding to erase_pending_list\n"));
487                                 list_add_tail(&jeb->list, &c->erase_pending_list);
488                                 c->nr_erasing_blocks++;
489                                 jffs2_erase_pending_trigger(c);
490                         } else {
491                                 /* Sometimes, however, we leave it elsewhere so it doesn't get
492                                    immediately reused, and we spread the load a bit. */
493                                 D1(printk(KERN_DEBUG "...and adding to erasable_list\n"));
494                                 list_add_tail(&jeb->list, &c->erasable_list);
495                         }                               
496                 }
497                 D1(printk(KERN_DEBUG "Done OK\n"));
498         } else if (jeb == c->gcblock) {
499                 D2(printk(KERN_DEBUG "Not moving gcblock 0x%08x to dirty_list\n", jeb->offset));
500         } else if (ISDIRTY(jeb->dirty_size) && !ISDIRTY(jeb->dirty_size - addedsize)) {
501                 D1(printk(KERN_DEBUG "Eraseblock at 0x%08x is freshly dirtied. Removing from clean list...\n", jeb->offset));
502                 list_del(&jeb->list);
503                 D1(printk(KERN_DEBUG "...and adding to dirty_list\n"));
504                 list_add_tail(&jeb->list, &c->dirty_list);
505         } else if (VERYDIRTY(c, jeb->dirty_size) &&
506                    !VERYDIRTY(c, jeb->dirty_size - addedsize)) {
507                 D1(printk(KERN_DEBUG "Eraseblock at 0x%08x is now very dirty. Removing from dirty list...\n", jeb->offset));
508                 list_del(&jeb->list);
509                 D1(printk(KERN_DEBUG "...and adding to very_dirty_list\n"));
510                 list_add_tail(&jeb->list, &c->very_dirty_list);
511         } else {
512                 D1(printk(KERN_DEBUG "Eraseblock at 0x%08x not moved anywhere. (free 0x%08x, dirty 0x%08x, used 0x%08x)\n",
513                           jeb->offset, jeb->free_size, jeb->dirty_size, jeb->used_size)); 
514         }                               
515
516         spin_unlock(&c->erase_completion_lock);
517
518         if (!jffs2_can_mark_obsolete(c))
519                 return;
520         if (jffs2_is_readonly(c))
521                 return;
522
523         D1(printk(KERN_DEBUG "obliterating obsoleted node at 0x%08x\n", ref_offset(ref)));
524         ret = jffs2_flash_read(c, ref_offset(ref), sizeof(n), &retlen, (char *)&n);
525         if (ret) {
526                 printk(KERN_WARNING "Read error reading from obsoleted node at 0x%08x: %d\n", ref_offset(ref), ret);
527                 return;
528         }
529         if (retlen != sizeof(n)) {
530                 printk(KERN_WARNING "Short read from obsoleted node at 0x%08x: %zd\n", ref_offset(ref), retlen);
531                 return;
532         }
533         if (PAD(je32_to_cpu(n.totlen)) != PAD(ref_totlen(c, jeb, ref))) {
534                 printk(KERN_WARNING "Node totlen on flash (0x%08x) != totlen from node ref (0x%08x)\n", je32_to_cpu(n.totlen), ref_totlen(c, jeb, ref));
535                 return;
536         }
537         if (!(je16_to_cpu(n.nodetype) & JFFS2_NODE_ACCURATE)) {
538                 D1(printk(KERN_DEBUG "Node at 0x%08x was already marked obsolete (nodetype 0x%04x)\n", ref_offset(ref), je16_to_cpu(n.nodetype)));
539                 return;
540         }
541         /* XXX FIXME: This is ugly now */
542         n.nodetype = cpu_to_je16(je16_to_cpu(n.nodetype) & ~JFFS2_NODE_ACCURATE);
543         ret = jffs2_flash_write(c, ref_offset(ref), sizeof(n), &retlen, (char *)&n);
544         if (ret) {
545                 printk(KERN_WARNING "Write error in obliterating obsoleted node at 0x%08x: %d\n", ref_offset(ref), ret);
546                 return;
547         }
548         if (retlen != sizeof(n)) {
549                 printk(KERN_WARNING "Short write in obliterating obsoleted node at 0x%08x: %zd\n", ref_offset(ref), retlen);
550                 return;
551         }
552 }
553
554 #if CONFIG_JFFS2_FS_DEBUG > 0
555 void jffs2_dump_block_lists(struct jffs2_sb_info *c)
556 {
557
558
559         printk(KERN_DEBUG "jffs2_dump_block_lists:\n");
560         printk(KERN_DEBUG "flash_size: %08x\n", c->flash_size);
561         printk(KERN_DEBUG "used_size: %08x\n", c->used_size);
562         printk(KERN_DEBUG "dirty_size: %08x\n", c->dirty_size);
563         printk(KERN_DEBUG "wasted_size: %08x\n", c->wasted_size);
564         printk(KERN_DEBUG "unchecked_size: %08x\n", c->unchecked_size);
565         printk(KERN_DEBUG "free_size: %08x\n", c->free_size);
566         printk(KERN_DEBUG "erasing_size: %08x\n", c->erasing_size);
567         printk(KERN_DEBUG "bad_size: %08x\n", c->bad_size);
568         printk(KERN_DEBUG "sector_size: %08x\n", c->sector_size);
569         printk(KERN_DEBUG "jffs2_reserved_blocks size: %08x\n",c->sector_size * c->resv_blocks_write);
570
571         if (c->nextblock) {
572                 printk(KERN_DEBUG "nextblock: %08x (used %08x, dirty %08x, wasted %08x, unchecked %08x, free %08x)\n",
573                        c->nextblock->offset, c->nextblock->used_size, c->nextblock->dirty_size, c->nextblock->wasted_size, c->nextblock->unchecked_size, c->nextblock->free_size);
574         } else {
575                 printk(KERN_DEBUG "nextblock: NULL\n");
576         }
577         if (c->gcblock) {
578                 printk(KERN_DEBUG "gcblock: %08x (used %08x, dirty %08x, wasted %08x, unchecked %08x, free %08x)\n",
579                        c->gcblock->offset, c->gcblock->used_size, c->gcblock->dirty_size, c->gcblock->wasted_size, c->gcblock->unchecked_size, c->gcblock->free_size);
580         } else {
581                 printk(KERN_DEBUG "gcblock: NULL\n");
582         }
583         if (list_empty(&c->clean_list)) {
584                 printk(KERN_DEBUG "clean_list: empty\n");
585         } else {
586                 struct list_head *this;
587                 int     numblocks = 0;
588                 uint32_t dirty = 0;
589
590                 list_for_each(this, &c->clean_list) {
591                         struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list);
592                         numblocks ++;
593                         dirty += jeb->wasted_size;
594                         printk(KERN_DEBUG "clean_list: %08x (used %08x, dirty %08x, wasted %08x, unchecked %08x, free %08x)\n", jeb->offset, jeb->used_size, jeb->dirty_size, jeb->wasted_size, jeb->unchecked_size, jeb->free_size);
595                 }
596                 printk (KERN_DEBUG "Contains %d blocks with total wasted size %u, average wasted size: %u\n", numblocks, dirty, dirty / numblocks);
597         }
598         if (list_empty(&c->very_dirty_list)) {
599                 printk(KERN_DEBUG "very_dirty_list: empty\n");
600         } else {
601                 struct list_head *this;
602                 int     numblocks = 0;
603                 uint32_t dirty = 0;
604
605                 list_for_each(this, &c->very_dirty_list) {
606                         struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list);
607                         numblocks ++;
608                         dirty += jeb->dirty_size;
609                         printk(KERN_DEBUG "very_dirty_list: %08x (used %08x, dirty %08x, wasted %08x, unchecked %08x, free %08x)\n",
610                                jeb->offset, jeb->used_size, jeb->dirty_size, jeb->wasted_size, jeb->unchecked_size, jeb->free_size);
611                 }
612                 printk (KERN_DEBUG "Contains %d blocks with total dirty size %u, average dirty size: %u\n",
613                         numblocks, dirty, dirty / numblocks);
614         }
615         if (list_empty(&c->dirty_list)) {
616                 printk(KERN_DEBUG "dirty_list: empty\n");
617         } else {
618                 struct list_head *this;
619                 int     numblocks = 0;
620                 uint32_t dirty = 0;
621
622                 list_for_each(this, &c->dirty_list) {
623                         struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list);
624                         numblocks ++;
625                         dirty += jeb->dirty_size;
626                         printk(KERN_DEBUG "dirty_list: %08x (used %08x, dirty %08x, wasted %08x, unchecked %08x, free %08x)\n",
627                                jeb->offset, jeb->used_size, jeb->dirty_size, jeb->wasted_size, jeb->unchecked_size, jeb->free_size);
628                 }
629                 printk (KERN_DEBUG "Contains %d blocks with total dirty size %u, average dirty size: %u\n",
630                         numblocks, dirty, dirty / numblocks);
631         }
632         if (list_empty(&c->erasable_list)) {
633                 printk(KERN_DEBUG "erasable_list: empty\n");
634         } else {
635                 struct list_head *this;
636
637                 list_for_each(this, &c->erasable_list) {
638                         struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list);
639                         printk(KERN_DEBUG "erasable_list: %08x (used %08x, dirty %08x, wasted %08x, unchecked %08x, free %08x)\n",
640                                jeb->offset, jeb->used_size, jeb->dirty_size, jeb->wasted_size, jeb->unchecked_size, jeb->free_size);
641                 }
642         }
643         if (list_empty(&c->erasing_list)) {
644                 printk(KERN_DEBUG "erasing_list: empty\n");
645         } else {
646                 struct list_head *this;
647
648                 list_for_each(this, &c->erasing_list) {
649                         struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list);
650                         printk(KERN_DEBUG "erasing_list: %08x (used %08x, dirty %08x, wasted %08x, unchecked %08x, free %08x)\n",
651                                jeb->offset, jeb->used_size, jeb->dirty_size, jeb->wasted_size, jeb->unchecked_size, jeb->free_size);
652                 }
653         }
654         if (list_empty(&c->erase_pending_list)) {
655                 printk(KERN_DEBUG "erase_pending_list: empty\n");
656         } else {
657                 struct list_head *this;
658
659                 list_for_each(this, &c->erase_pending_list) {
660                         struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list);
661                         printk(KERN_DEBUG "erase_pending_list: %08x (used %08x, dirty %08x, wasted %08x, unchecked %08x, free %08x)\n",
662                                jeb->offset, jeb->used_size, jeb->dirty_size, jeb->wasted_size, jeb->unchecked_size, jeb->free_size);
663                 }
664         }
665         if (list_empty(&c->erasable_pending_wbuf_list)) {
666                 printk(KERN_DEBUG "erasable_pending_wbuf_list: empty\n");
667         } else {
668                 struct list_head *this;
669
670                 list_for_each(this, &c->erasable_pending_wbuf_list) {
671                         struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list);
672                         printk(KERN_DEBUG "erasable_pending_wbuf_list: %08x (used %08x, dirty %08x, wasted %08x, unchecked %08x, free %08x)\n",
673                                jeb->offset, jeb->used_size, jeb->dirty_size, jeb->wasted_size, jeb->unchecked_size, jeb->free_size);
674                 }
675         }
676         if (list_empty(&c->free_list)) {
677                 printk(KERN_DEBUG "free_list: empty\n");
678         } else {
679                 struct list_head *this;
680
681                 list_for_each(this, &c->free_list) {
682                         struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list);
683                         printk(KERN_DEBUG "free_list: %08x (used %08x, dirty %08x, wasted %08x, unchecked %08x, free %08x)\n",
684                                jeb->offset, jeb->used_size, jeb->dirty_size, jeb->wasted_size, jeb->unchecked_size, jeb->free_size);
685                 }
686         }
687         if (list_empty(&c->bad_list)) {
688                 printk(KERN_DEBUG "bad_list: empty\n");
689         } else {
690                 struct list_head *this;
691
692                 list_for_each(this, &c->bad_list) {
693                         struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list);
694                         printk(KERN_DEBUG "bad_list: %08x (used %08x, dirty %08x, wasted %08x, unchecked %08x, free %08x)\n",
695                                jeb->offset, jeb->used_size, jeb->dirty_size, jeb->wasted_size, jeb->unchecked_size, jeb->free_size);
696                 }
697         }
698         if (list_empty(&c->bad_used_list)) {
699                 printk(KERN_DEBUG "bad_used_list: empty\n");
700         } else {
701                 struct list_head *this;
702
703                 list_for_each(this, &c->bad_used_list) {
704                         struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list);
705                         printk(KERN_DEBUG "bad_used_list: %08x (used %08x, dirty %08x, wasted %08x, unchecked %08x, free %08x)\n",
706                                jeb->offset, jeb->used_size, jeb->dirty_size, jeb->wasted_size, jeb->unchecked_size, jeb->free_size);
707                 }
708         }
709 }
710 #endif /* CONFIG_JFFS2_FS_DEBUG */
711
712 int jffs2_thread_should_wake(struct jffs2_sb_info *c)
713 {
714         int ret = 0;
715         uint32_t dirty;
716
717         if (c->unchecked_size) {
718                 D1(printk(KERN_DEBUG "jffs2_thread_should_wake(): unchecked_size %d, checked_ino #%d\n",
719                           c->unchecked_size, c->checked_ino));
720                 return 1;
721         }
722
723         /* dirty_size contains blocks on erase_pending_list
724          * those blocks are counted in c->nr_erasing_blocks.
725          * If one block is actually erased, it is not longer counted as dirty_space
726          * but it is counted in c->nr_erasing_blocks, so we add it and subtract it
727          * with c->nr_erasing_blocks * c->sector_size again.
728          * Blocks on erasable_list are counted as dirty_size, but not in c->nr_erasing_blocks
729          * This helps us to force gc and pick eventually a clean block to spread the load.
730          */
731         dirty = c->dirty_size + c->erasing_size - c->nr_erasing_blocks * c->sector_size;
732
733         if (c->nr_free_blocks + c->nr_erasing_blocks < c->resv_blocks_gctrigger && 
734                         (dirty > c->nospc_dirty_size)) 
735                 ret = 1;
736
737         D1(printk(KERN_DEBUG "jffs2_thread_should_wake(): nr_free_blocks %d, nr_erasing_blocks %d, dirty_size 0x%x: %s\n", 
738                   c->nr_free_blocks, c->nr_erasing_blocks, c->dirty_size, ret?"yes":"no"));
739
740         return ret;
741 }