ftp://ftp.kernel.org/pub/linux/kernel/v2.6/linux-2.6.6.tar.bz2
[linux-2.6.git] / fs / jffs2 / nodemgmt.c
1 /*
2  * JFFS2 -- Journalling Flash File System, Version 2.
3  *
4  * Copyright (C) 2001-2003 Red Hat, Inc.
5  *
6  * Created by David Woodhouse <dwmw2@redhat.com>
7  *
8  * For licensing information, see the file 'LICENCE' in this directory.
9  *
10  * $Id: nodemgmt.c,v 1.102 2003/10/08 17:21:19 dwmw2 Exp $
11  *
12  */
13
14 #include <linux/kernel.h>
15 #include <linux/slab.h>
16 #include <linux/mtd/mtd.h>
17 #include <linux/compiler.h>
18 #include <linux/sched.h> /* For cond_resched() */
19 #include "nodelist.h"
20
21 /**
22  *      jffs2_reserve_space - request physical space to write nodes to flash
23  *      @c: superblock info
24  *      @minsize: Minimum acceptable size of allocation
25  *      @ofs: Returned value of node offset
26  *      @len: Returned value of allocation length
27  *      @prio: Allocation type - ALLOC_{NORMAL,DELETION}
28  *
29  *      Requests a block of physical space on the flash. Returns zero for success
30  *      and puts 'ofs' and 'len' into the appriopriate place, or returns -ENOSPC
31  *      or other error if appropriate.
32  *
33  *      If it returns zero, jffs2_reserve_space() also downs the per-filesystem
34  *      allocation semaphore, to prevent more than one allocation from being
35  *      active at any time. The semaphore is later released by jffs2_commit_allocation()
36  *
37  *      jffs2_reserve_space() may trigger garbage collection in order to make room
38  *      for the requested allocation.
39  */
40
41 static int jffs2_do_reserve_space(struct jffs2_sb_info *c,  uint32_t minsize, uint32_t *ofs, uint32_t *len);
42
43 int jffs2_reserve_space(struct jffs2_sb_info *c, uint32_t minsize, uint32_t *ofs, uint32_t *len, int prio)
44 {
45         int ret = -EAGAIN;
46         int blocksneeded = c->resv_blocks_write;
47         /* align it */
48         minsize = PAD(minsize);
49
50         D1(printk(KERN_DEBUG "jffs2_reserve_space(): Requested 0x%x bytes\n", minsize));
51         down(&c->alloc_sem);
52
53         D1(printk(KERN_DEBUG "jffs2_reserve_space(): alloc sem got\n"));
54
55         spin_lock(&c->erase_completion_lock);
56
57         /* this needs a little more thought (true <tglx> :)) */
58         while(ret == -EAGAIN) {
59                 while(c->nr_free_blocks + c->nr_erasing_blocks < blocksneeded) {
60                         int ret;
61                         uint32_t dirty, avail;
62
63                         /* calculate real dirty size
64                          * dirty_size contains blocks on erase_pending_list
65                          * those blocks are counted in c->nr_erasing_blocks.
66                          * If one block is actually erased, it is not longer counted as dirty_space
67                          * but it is counted in c->nr_erasing_blocks, so we add it and subtract it
68                          * with c->nr_erasing_blocks * c->sector_size again.
69                          * Blocks on erasable_list are counted as dirty_size, but not in c->nr_erasing_blocks
70                          * This helps us to force gc and pick eventually a clean block to spread the load.
71                          * We add unchecked_size here, as we hopefully will find some space to use.
72                          * This will affect the sum only once, as gc first finishes checking
73                          * of nodes.
74                          */
75                         dirty = c->dirty_size + c->erasing_size - c->nr_erasing_blocks * c->sector_size + c->unchecked_size;
76                         if (dirty < c->nospc_dirty_size) {
77                                 if (prio == ALLOC_DELETION && c->nr_free_blocks + c->nr_erasing_blocks >= c->resv_blocks_deletion) {
78                                         printk(KERN_NOTICE "jffs2_reserve_space(): Low on dirty space to GC, but it's a deletion. Allowing...\n");
79                                         break;
80                                 }
81                                 D1(printk(KERN_DEBUG "dirty size 0x%08x + unchecked_size 0x%08x < nospc_dirty_size 0x%08x, returning -ENOSPC\n",
82                                           dirty, c->unchecked_size, c->sector_size));
83
84                                 spin_unlock(&c->erase_completion_lock);
85                                 up(&c->alloc_sem);
86                                 return -ENOSPC;
87                         }
88                         
89                         /* Calc possibly available space. Possibly available means that we
90                          * don't know, if unchecked size contains obsoleted nodes, which could give us some
91                          * more usable space. This will affect the sum only once, as gc first finishes checking
92                          * of nodes.
93                          + Return -ENOSPC, if the maximum possibly available space is less or equal than 
94                          * blocksneeded * sector_size.
95                          * This blocks endless gc looping on a filesystem, which is nearly full, even if
96                          * the check above passes.
97                          */
98                         avail = c->free_size + c->dirty_size + c->erasing_size + c->unchecked_size;
99                         if ( (avail / c->sector_size) <= blocksneeded) {
100                                 if (prio == ALLOC_DELETION && c->nr_free_blocks + c->nr_erasing_blocks >= c->resv_blocks_deletion) {
101                                         printk(KERN_NOTICE "jffs2_reserve_space(): Low on possibly available space, but it's a deletion. Allowing...\n");
102                                         break;
103                                 }
104
105                                 D1(printk(KERN_DEBUG "max. available size 0x%08x  < blocksneeded * sector_size 0x%08x, returning -ENOSPC\n",
106                                           avail, blocksneeded * c->sector_size));
107                                 spin_unlock(&c->erase_completion_lock);
108                                 up(&c->alloc_sem);
109                                 return -ENOSPC;
110                         }
111
112                         up(&c->alloc_sem);
113
114                         D1(printk(KERN_DEBUG "Triggering GC pass. nr_free_blocks %d, nr_erasing_blocks %d, free_size 0x%08x, dirty_size 0x%08x, wasted_size 0x%08x, used_size 0x%08x, erasing_size 0x%08x, bad_size 0x%08x (total 0x%08x of 0x%08x)\n",
115                                   c->nr_free_blocks, c->nr_erasing_blocks, c->free_size, c->dirty_size, c->wasted_size, c->used_size, c->erasing_size, c->bad_size,
116                                   c->free_size + c->dirty_size + c->wasted_size + c->used_size + c->erasing_size + c->bad_size, c->flash_size));
117                         spin_unlock(&c->erase_completion_lock);
118                         
119                         ret = jffs2_garbage_collect_pass(c);
120                         if (ret)
121                                 return ret;
122
123                         cond_resched();
124
125                         if (signal_pending(current))
126                                 return -EINTR;
127
128                         down(&c->alloc_sem);
129                         spin_lock(&c->erase_completion_lock);
130                 }
131
132                 ret = jffs2_do_reserve_space(c, minsize, ofs, len);
133                 if (ret) {
134                         D1(printk(KERN_DEBUG "jffs2_reserve_space: ret is %d\n", ret));
135                 }
136         }
137         spin_unlock(&c->erase_completion_lock);
138         if (ret)
139                 up(&c->alloc_sem);
140         return ret;
141 }
142
143 int jffs2_reserve_space_gc(struct jffs2_sb_info *c, uint32_t minsize, uint32_t *ofs, uint32_t *len)
144 {
145         int ret = -EAGAIN;
146         minsize = PAD(minsize);
147
148         D1(printk(KERN_DEBUG "jffs2_reserve_space_gc(): Requested 0x%x bytes\n", minsize));
149
150         spin_lock(&c->erase_completion_lock);
151         while(ret == -EAGAIN) {
152                 ret = jffs2_do_reserve_space(c, minsize, ofs, len);
153                 if (ret) {
154                         D1(printk(KERN_DEBUG "jffs2_reserve_space_gc: looping, ret is %d\n", ret));
155                 }
156         }
157         spin_unlock(&c->erase_completion_lock);
158         return ret;
159 }
160
161 /* Called with alloc sem _and_ erase_completion_lock */
162 static int jffs2_do_reserve_space(struct jffs2_sb_info *c,  uint32_t minsize, uint32_t *ofs, uint32_t *len)
163 {
164         struct jffs2_eraseblock *jeb = c->nextblock;
165         
166  restart:
167         if (jeb && minsize > jeb->free_size) {
168                 /* Skip the end of this block and file it as having some dirty space */
169                 /* If there's a pending write to it, flush now */
170                 if (jffs2_wbuf_dirty(c)) {
171                         spin_unlock(&c->erase_completion_lock);
172                         D1(printk(KERN_DEBUG "jffs2_do_reserve_space: Flushing write buffer\n"));                           
173                         jffs2_flush_wbuf_pad(c);
174                         spin_lock(&c->erase_completion_lock);
175                         jeb = c->nextblock;
176                         goto restart;
177                 }
178                 c->wasted_size += jeb->free_size;
179                 c->free_size -= jeb->free_size;
180                 jeb->wasted_size += jeb->free_size;
181                 jeb->free_size = 0;
182                 
183                 /* Check, if we have a dirty block now, or if it was dirty already */
184                 if (ISDIRTY (jeb->wasted_size + jeb->dirty_size)) {
185                         c->dirty_size += jeb->wasted_size;
186                         c->wasted_size -= jeb->wasted_size;
187                         jeb->dirty_size += jeb->wasted_size;
188                         jeb->wasted_size = 0;
189                         if (VERYDIRTY(c, jeb->dirty_size)) {
190                                 D1(printk(KERN_DEBUG "Adding full erase block at 0x%08x to very_dirty_list (free 0x%08x, dirty 0x%08x, used 0x%08x\n",
191                                   jeb->offset, jeb->free_size, jeb->dirty_size, jeb->used_size));
192                                 list_add_tail(&jeb->list, &c->very_dirty_list);
193                         } else {
194                                 D1(printk(KERN_DEBUG "Adding full erase block at 0x%08x to dirty_list (free 0x%08x, dirty 0x%08x, used 0x%08x\n",
195                                   jeb->offset, jeb->free_size, jeb->dirty_size, jeb->used_size));
196                                 list_add_tail(&jeb->list, &c->dirty_list);
197                         }
198                 } else { 
199                         D1(printk(KERN_DEBUG "Adding full erase block at 0x%08x to clean_list (free 0x%08x, dirty 0x%08x, used 0x%08x\n",
200                           jeb->offset, jeb->free_size, jeb->dirty_size, jeb->used_size));
201                         list_add_tail(&jeb->list, &c->clean_list);
202                 }
203                 c->nextblock = jeb = NULL;
204         }
205         
206         if (!jeb) {
207                 struct list_head *next;
208                 /* Take the next block off the 'free' list */
209
210                 if (list_empty(&c->free_list)) {
211
212                         DECLARE_WAITQUEUE(wait, current);
213                         
214                         if (!c->nr_erasing_blocks && 
215                             !list_empty(&c->erasable_list)) {
216                                 struct jffs2_eraseblock *ejeb;
217
218                                 ejeb = list_entry(c->erasable_list.next, struct jffs2_eraseblock, list);
219                                 list_del(&ejeb->list);
220                                 list_add_tail(&ejeb->list, &c->erase_pending_list);
221                                 c->nr_erasing_blocks++;
222                                 jffs2_erase_pending_trigger(c);
223                                 D1(printk(KERN_DEBUG "jffs2_do_reserve_space: Triggering erase of erasable block at 0x%08x\n",
224                                           ejeb->offset));
225                         }
226
227                         if (!c->nr_erasing_blocks && 
228                             !list_empty(&c->erasable_pending_wbuf_list)) {
229                                 D1(printk(KERN_DEBUG "jffs2_do_reserve_space: Flushing write buffer\n"));
230                                 /* c->nextblock is NULL, no update to c->nextblock allowed */                       
231                                 spin_unlock(&c->erase_completion_lock);
232                                 jffs2_flush_wbuf_pad(c);
233                                 spin_lock(&c->erase_completion_lock);
234                                 /* Have another go. It'll be on the erasable_list now */
235                                 return -EAGAIN;
236                         }
237
238                         if (!c->nr_erasing_blocks) {
239                                 /* Ouch. We're in GC, or we wouldn't have got here.
240                                    And there's no space left. At all. */
241                                 printk(KERN_CRIT "Argh. No free space left for GC. nr_erasing_blocks is %d. nr_free_blocks is %d. (erasableempty: %s, erasingempty: %s, erasependingempty: %s)\n", 
242                                        c->nr_erasing_blocks, c->nr_free_blocks, list_empty(&c->erasable_list)?"yes":"no", 
243                                        list_empty(&c->erasing_list)?"yes":"no", list_empty(&c->erase_pending_list)?"yes":"no");
244                                 return -ENOSPC;
245                         }
246                         /* Make sure this can't deadlock. Someone has to start the erases
247                            of erase_pending blocks */
248 #ifdef __ECOS
249                         /* In eCos, we don't have a handy kernel thread doing the erases for
250                            us. We do them ourselves right now. */
251                         jffs2_erase_pending_blocks(c);
252 #else
253                         set_current_state(TASK_INTERRUPTIBLE);
254                         add_wait_queue(&c->erase_wait, &wait);
255                         D1(printk(KERN_DEBUG "Waiting for erases to complete. erasing_blocks is %d. (erasableempty: %s, erasingempty: %s, erasependingempty: %s)\n", 
256                                   c->nr_erasing_blocks, list_empty(&c->erasable_list)?"yes":"no",
257                                   list_empty(&c->erasing_list)?"yes":"no", list_empty(&c->erase_pending_list)?"yes":"no"));
258                         if (!list_empty(&c->erase_pending_list)) {
259                                 D1(printk(KERN_DEBUG "Triggering pending erases\n"));
260                                 jffs2_erase_pending_trigger(c);
261                         }
262                         spin_unlock(&c->erase_completion_lock);
263                         schedule();
264                         remove_wait_queue(&c->erase_wait, &wait);
265                         spin_lock(&c->erase_completion_lock);
266                         if (signal_pending(current)) {
267                                 return -EINTR;
268                         }
269 #endif
270                         /* An erase may have failed, decreasing the
271                            amount of free space available. So we must
272                            restart from the beginning */
273                         return -EAGAIN;
274                 }
275
276                 next = c->free_list.next;
277                 list_del(next);
278                 c->nextblock = jeb = list_entry(next, struct jffs2_eraseblock, list);
279                 c->nr_free_blocks--;
280
281                 if (jeb->free_size != c->sector_size - c->cleanmarker_size) {
282                         printk(KERN_WARNING "Eep. Block 0x%08x taken from free_list had free_size of 0x%08x!!\n", jeb->offset, jeb->free_size);
283                         goto restart;
284                 }
285         }
286         /* OK, jeb (==c->nextblock) is now pointing at a block which definitely has
287            enough space */
288         *ofs = jeb->offset + (c->sector_size - jeb->free_size);
289         *len = jeb->free_size;
290
291         if (c->cleanmarker_size && jeb->used_size == c->cleanmarker_size &&
292             !jeb->first_node->next_in_ino) {
293                 /* Only node in it beforehand was a CLEANMARKER node (we think). 
294                    So mark it obsolete now that there's going to be another node
295                    in the block. This will reduce used_size to zero but We've 
296                    already set c->nextblock so that jffs2_mark_node_obsolete()
297                    won't try to refile it to the dirty_list.
298                 */
299                 spin_unlock(&c->erase_completion_lock);
300                 jffs2_mark_node_obsolete(c, jeb->first_node);
301                 spin_lock(&c->erase_completion_lock);
302         }
303
304         D1(printk(KERN_DEBUG "jffs2_do_reserve_space(): Giving 0x%x bytes at 0x%x\n", *len, *ofs));
305         return 0;
306 }
307
308 /**
309  *      jffs2_add_physical_node_ref - add a physical node reference to the list
310  *      @c: superblock info
311  *      @new: new node reference to add
312  *      @len: length of this physical node
313  *      @dirty: dirty flag for new node
314  *
315  *      Should only be used to report nodes for which space has been allocated 
316  *      by jffs2_reserve_space.
317  *
318  *      Must be called with the alloc_sem held.
319  */
320  
321 int jffs2_add_physical_node_ref(struct jffs2_sb_info *c, struct jffs2_raw_node_ref *new)
322 {
323         struct jffs2_eraseblock *jeb;
324         uint32_t len = new->totlen;
325
326         jeb = &c->blocks[new->flash_offset / c->sector_size];
327         D1(printk(KERN_DEBUG "jffs2_add_physical_node_ref(): Node at 0x%x(%d), size 0x%x\n", ref_offset(new), ref_flags(new), len));
328 #if 1
329         if (jeb != c->nextblock || (ref_offset(new)) != jeb->offset + (c->sector_size - jeb->free_size)) {
330                 printk(KERN_WARNING "argh. node added in wrong place\n");
331                 jffs2_free_raw_node_ref(new);
332                 return -EINVAL;
333         }
334 #endif
335         spin_lock(&c->erase_completion_lock);
336
337         if (!jeb->first_node)
338                 jeb->first_node = new;
339         if (jeb->last_node)
340                 jeb->last_node->next_phys = new;
341         jeb->last_node = new;
342
343         jeb->free_size -= len;
344         c->free_size -= len;
345         if (ref_obsolete(new)) {
346                 jeb->dirty_size += len;
347                 c->dirty_size += len;
348         } else {
349                 jeb->used_size += len;
350                 c->used_size += len;
351         }
352
353         if (!jeb->free_size && !jeb->dirty_size) {
354                 /* If it lives on the dirty_list, jffs2_reserve_space will put it there */
355                 D1(printk(KERN_DEBUG "Adding full erase block at 0x%08x to clean_list (free 0x%08x, dirty 0x%08x, used 0x%08x\n",
356                           jeb->offset, jeb->free_size, jeb->dirty_size, jeb->used_size));
357                 if (jffs2_wbuf_dirty(c)) {
358                         /* Flush the last write in the block if it's outstanding */
359                         spin_unlock(&c->erase_completion_lock);
360                         jffs2_flush_wbuf_pad(c);
361                         spin_lock(&c->erase_completion_lock);
362                 }
363
364                 list_add_tail(&jeb->list, &c->clean_list);
365                 c->nextblock = NULL;
366         }
367         ACCT_SANITY_CHECK(c,jeb);
368         D1(ACCT_PARANOIA_CHECK(jeb));
369
370         spin_unlock(&c->erase_completion_lock);
371
372         return 0;
373 }
374
375
376 void jffs2_complete_reservation(struct jffs2_sb_info *c)
377 {
378         D1(printk(KERN_DEBUG "jffs2_complete_reservation()\n"));
379         jffs2_garbage_collect_trigger(c);
380         up(&c->alloc_sem);
381 }
382
383 static inline int on_list(struct list_head *obj, struct list_head *head)
384 {
385         struct list_head *this;
386
387         list_for_each(this, head) {
388                 if (this == obj) {
389                         D1(printk("%p is on list at %p\n", obj, head));
390                         return 1;
391
392                 }
393         }
394         return 0;
395 }
396
397 void jffs2_mark_node_obsolete(struct jffs2_sb_info *c, struct jffs2_raw_node_ref *ref)
398 {
399         struct jffs2_eraseblock *jeb;
400         int blocknr;
401         struct jffs2_unknown_node n;
402         int ret, addedsize;
403         size_t retlen;
404
405         if(!ref) {
406                 printk(KERN_NOTICE "EEEEEK. jffs2_mark_node_obsolete called with NULL node\n");
407                 return;
408         }
409         if (ref_obsolete(ref)) {
410                 D1(printk(KERN_DEBUG "jffs2_mark_node_obsolete called with already obsolete node at 0x%08x\n", ref_offset(ref)));
411                 return;
412         }
413         blocknr = ref->flash_offset / c->sector_size;
414         if (blocknr >= c->nr_blocks) {
415                 printk(KERN_NOTICE "raw node at 0x%08x is off the end of device!\n", ref->flash_offset);
416                 BUG();
417         }
418         jeb = &c->blocks[blocknr];
419
420         spin_lock(&c->erase_completion_lock);
421
422         if (ref_flags(ref) == REF_UNCHECKED) {
423                 D1(if (unlikely(jeb->unchecked_size < ref->totlen)) {
424                         printk(KERN_NOTICE "raw unchecked node of size 0x%08x freed from erase block %d at 0x%08x, but unchecked_size was already 0x%08x\n",
425                                ref->totlen, blocknr, ref->flash_offset, jeb->used_size);
426                         BUG();
427                 })
428                 D1(printk(KERN_DEBUG "Obsoleting previously unchecked node at 0x%08x of len %x: ", ref_offset(ref), ref->totlen));
429                 jeb->unchecked_size -= ref->totlen;
430                 c->unchecked_size -= ref->totlen;
431         } else {
432                 D1(if (unlikely(jeb->used_size < ref->totlen)) {
433                         printk(KERN_NOTICE "raw node of size 0x%08x freed from erase block %d at 0x%08x, but used_size was already 0x%08x\n",
434                                ref->totlen, blocknr, ref->flash_offset, jeb->used_size);
435                         BUG();
436                 })
437                 D1(printk(KERN_DEBUG "Obsoleting node at 0x%08x of len %x: ", ref_offset(ref), ref->totlen));
438                 jeb->used_size -= ref->totlen;
439                 c->used_size -= ref->totlen;
440         }
441
442         // Take care, that wasted size is taken into concern
443         if ((jeb->dirty_size || ISDIRTY(jeb->wasted_size + ref->totlen)) && jeb != c->nextblock) {
444                 D1(printk("Dirtying\n"));
445                 addedsize = ref->totlen;
446                 jeb->dirty_size += ref->totlen;
447                 c->dirty_size += ref->totlen;
448
449                 /* Convert wasted space to dirty, if not a bad block */
450                 if (jeb->wasted_size) {
451                         if (on_list(&jeb->list, &c->bad_used_list)) {
452                                 D1(printk(KERN_DEBUG "Leaving block at %08x on the bad_used_list\n",
453                                           jeb->offset));
454                                 addedsize = 0; /* To fool the refiling code later */
455                         } else {
456                                 D1(printk(KERN_DEBUG "Converting %d bytes of wasted space to dirty in block at %08x\n",
457                                           jeb->wasted_size, jeb->offset));
458                                 addedsize += jeb->wasted_size;
459                                 jeb->dirty_size += jeb->wasted_size;
460                                 c->dirty_size += jeb->wasted_size;
461                                 c->wasted_size -= jeb->wasted_size;
462                                 jeb->wasted_size = 0;
463                         }
464                 }
465         } else {
466                 D1(printk("Wasting\n"));
467                 addedsize = 0;
468                 jeb->wasted_size += ref->totlen;
469                 c->wasted_size += ref->totlen;  
470         }
471         ref->flash_offset = ref_offset(ref) | REF_OBSOLETE;
472         
473         ACCT_SANITY_CHECK(c, jeb);
474
475         D1(ACCT_PARANOIA_CHECK(jeb));
476
477         if (c->flags & JFFS2_SB_FLAG_MOUNTING) {
478                 /* Mount in progress. Don't muck about with the block
479                    lists because they're not ready yet, and don't actually
480                    obliterate nodes that look obsolete. If they weren't 
481                    marked obsolete on the flash at the time they _became_
482                    obsolete, there was probably a reason for that. */
483                 spin_unlock(&c->erase_completion_lock);
484                 return;
485         }
486
487         if (jeb == c->nextblock) {
488                 D2(printk(KERN_DEBUG "Not moving nextblock 0x%08x to dirty/erase_pending list\n", jeb->offset));
489         } else if (!jeb->used_size && !jeb->unchecked_size) {
490                 if (jeb == c->gcblock) {
491                         D1(printk(KERN_DEBUG "gcblock at 0x%08x completely dirtied. Clearing gcblock...\n", jeb->offset));
492                         c->gcblock = NULL;
493                 } else {
494                         D1(printk(KERN_DEBUG "Eraseblock at 0x%08x completely dirtied. Removing from (dirty?) list...\n", jeb->offset));
495                         list_del(&jeb->list);
496                 }
497                 if (jffs2_wbuf_dirty(c)) {
498                         D1(printk(KERN_DEBUG "...and adding to erasable_pending_wbuf_list\n"));
499                         list_add_tail(&jeb->list, &c->erasable_pending_wbuf_list);
500 #if 0 /* This check was added to allow us to find places where we added nodes to the lists
501          after dropping the alloc_sem, and it did that just fine. But it also caused us to
502          lock the alloc_sem in other places, like clear_inode(), when we wouldn't otherwise
503          have needed to. So I suspect it's outlived its usefulness. Thomas? */
504
505                         /* We've changed the rules slightly. After
506                            writing a node you now mustn't drop the
507                            alloc_sem before you've finished all the
508                            list management - this is so that when we
509                            get here, we know that no other nodes have
510                            been written, and the above check on wbuf
511                            is valid - wbuf_len is nonzero IFF the node
512                            which obsoletes this node is still in the
513                            wbuf.
514
515                            So we BUG() if that new rule is broken, to
516                            make sure we catch it and fix it.
517                         */
518                         if (!down_trylock(&c->alloc_sem)) {
519                                 up(&c->alloc_sem);
520                                 printk(KERN_CRIT "jffs2_mark_node_obsolete() called with wbuf active but alloc_sem not locked!\n");
521                                 BUG();
522                         }
523 #endif
524                 } else {
525                         if (jiffies & 127) {
526                                 /* Most of the time, we just erase it immediately. Otherwise we
527                                    spend ages scanning it on mount, etc. */
528                                 D1(printk(KERN_DEBUG "...and adding to erase_pending_list\n"));
529                                 list_add_tail(&jeb->list, &c->erase_pending_list);
530                                 c->nr_erasing_blocks++;
531                                 jffs2_erase_pending_trigger(c);
532                         } else {
533                                 /* Sometimes, however, we leave it elsewhere so it doesn't get
534                                    immediately reused, and we spread the load a bit. */
535                                 D1(printk(KERN_DEBUG "...and adding to erasable_list\n"));
536                                 list_add_tail(&jeb->list, &c->erasable_list);
537                         }                               
538                 }
539                 D1(printk(KERN_DEBUG "Done OK\n"));
540         } else if (jeb == c->gcblock) {
541                 D2(printk(KERN_DEBUG "Not moving gcblock 0x%08x to dirty_list\n", jeb->offset));
542         } else if (ISDIRTY(jeb->dirty_size) && !ISDIRTY(jeb->dirty_size - addedsize)) {
543                 D1(printk(KERN_DEBUG "Eraseblock at 0x%08x is freshly dirtied. Removing from clean list...\n", jeb->offset));
544                 list_del(&jeb->list);
545                 D1(printk(KERN_DEBUG "...and adding to dirty_list\n"));
546                 list_add_tail(&jeb->list, &c->dirty_list);
547         } else if (VERYDIRTY(c, jeb->dirty_size) &&
548                    !VERYDIRTY(c, jeb->dirty_size - addedsize)) {
549                 D1(printk(KERN_DEBUG "Eraseblock at 0x%08x is now very dirty. Removing from dirty list...\n", jeb->offset));
550                 list_del(&jeb->list);
551                 D1(printk(KERN_DEBUG "...and adding to very_dirty_list\n"));
552                 list_add_tail(&jeb->list, &c->very_dirty_list);
553         } else {
554                 D1(printk(KERN_DEBUG "Eraseblock at 0x%08x not moved anywhere. (free 0x%08x, dirty 0x%08x, used 0x%08x)\n",
555                           jeb->offset, jeb->free_size, jeb->dirty_size, jeb->used_size)); 
556         }                               
557
558         spin_unlock(&c->erase_completion_lock);
559
560         if (!jffs2_can_mark_obsolete(c))
561                 return;
562         if (jffs2_is_readonly(c))
563                 return;
564
565         D1(printk(KERN_DEBUG "obliterating obsoleted node at 0x%08x\n", ref_offset(ref)));
566         ret = jffs2_flash_read(c, ref_offset(ref), sizeof(n), &retlen, (char *)&n);
567         if (ret) {
568                 printk(KERN_WARNING "Read error reading from obsoleted node at 0x%08x: %d\n", ref_offset(ref), ret);
569                 return;
570         }
571         if (retlen != sizeof(n)) {
572                 printk(KERN_WARNING "Short read from obsoleted node at 0x%08x: %zd\n", ref_offset(ref), retlen);
573                 return;
574         }
575         if (PAD(je32_to_cpu(n.totlen)) != PAD(ref->totlen)) {
576                 printk(KERN_WARNING "Node totlen on flash (0x%08x) != totlen in node ref (0x%08x)\n", je32_to_cpu(n.totlen), ref->totlen);
577                 return;
578         }
579         if (!(je16_to_cpu(n.nodetype) & JFFS2_NODE_ACCURATE)) {
580                 D1(printk(KERN_DEBUG "Node at 0x%08x was already marked obsolete (nodetype 0x%04x\n", ref_offset(ref), je16_to_cpu(n.nodetype)));
581                 return;
582         }
583         /* XXX FIXME: This is ugly now */
584         n.nodetype = cpu_to_je16(je16_to_cpu(n.nodetype) & ~JFFS2_NODE_ACCURATE);
585         ret = jffs2_flash_write(c, ref_offset(ref), sizeof(n), &retlen, (char *)&n);
586         if (ret) {
587                 printk(KERN_WARNING "Write error in obliterating obsoleted node at 0x%08x: %d\n", ref_offset(ref), ret);
588                 return;
589         }
590         if (retlen != sizeof(n)) {
591                 printk(KERN_WARNING "Short write in obliterating obsoleted node at 0x%08x: %zd\n", ref_offset(ref), retlen);
592                 return;
593         }
594 }
595
596 #if CONFIG_JFFS2_FS_DEBUG > 0
597 void jffs2_dump_block_lists(struct jffs2_sb_info *c)
598 {
599
600
601         printk(KERN_DEBUG "jffs2_dump_block_lists:\n");
602         printk(KERN_DEBUG "flash_size: %08x\n", c->flash_size);
603         printk(KERN_DEBUG "used_size: %08x\n", c->used_size);
604         printk(KERN_DEBUG "dirty_size: %08x\n", c->dirty_size);
605         printk(KERN_DEBUG "wasted_size: %08x\n", c->wasted_size);
606         printk(KERN_DEBUG "unchecked_size: %08x\n", c->unchecked_size);
607         printk(KERN_DEBUG "free_size: %08x\n", c->free_size);
608         printk(KERN_DEBUG "erasing_size: %08x\n", c->erasing_size);
609         printk(KERN_DEBUG "bad_size: %08x\n", c->bad_size);
610         printk(KERN_DEBUG "sector_size: %08x\n", c->sector_size);
611         printk(KERN_DEBUG "jffs2_reserved_blocks size: %08x\n",c->sector_size * c->resv_blocks_write);
612
613         if (c->nextblock) {
614                 printk(KERN_DEBUG "nextblock: %08x (used %08x, dirty %08x, wasted %08x, unchecked %08x, free %08x)\n",
615                        c->nextblock->offset, c->nextblock->used_size, c->nextblock->dirty_size, c->nextblock->wasted_size, c->nextblock->unchecked_size, c->nextblock->free_size);
616         } else {
617                 printk(KERN_DEBUG "nextblock: NULL\n");
618         }
619         if (c->gcblock) {
620                 printk(KERN_DEBUG "gcblock: %08x (used %08x, dirty %08x, wasted %08x, unchecked %08x, free %08x)\n",
621                        c->gcblock->offset, c->gcblock->used_size, c->gcblock->dirty_size, c->gcblock->wasted_size, c->gcblock->unchecked_size, c->gcblock->free_size);
622         } else {
623                 printk(KERN_DEBUG "gcblock: NULL\n");
624         }
625         if (list_empty(&c->clean_list)) {
626                 printk(KERN_DEBUG "clean_list: empty\n");
627         } else {
628                 struct list_head *this;
629                 int     numblocks = 0;
630                 uint32_t dirty = 0;
631
632                 list_for_each(this, &c->clean_list) {
633                         struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list);
634                         numblocks ++;
635                         dirty += jeb->wasted_size;
636                         printk(KERN_DEBUG "clean_list: %08x (used %08x, dirty %08x, wasted %08x, unchecked %08x, free %08x)\n", jeb->offset, jeb->used_size, jeb->dirty_size, jeb->wasted_size, jeb->unchecked_size, jeb->free_size);
637                 }
638                 printk (KERN_DEBUG "Contains %d blocks with total wasted size %u, average wasted size: %u\n", numblocks, dirty, dirty / numblocks);
639         }
640         if (list_empty(&c->very_dirty_list)) {
641                 printk(KERN_DEBUG "very_dirty_list: empty\n");
642         } else {
643                 struct list_head *this;
644                 int     numblocks = 0;
645                 uint32_t dirty = 0;
646
647                 list_for_each(this, &c->very_dirty_list) {
648                         struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list);
649                         numblocks ++;
650                         dirty += jeb->dirty_size;
651                         printk(KERN_DEBUG "very_dirty_list: %08x (used %08x, dirty %08x, wasted %08x, unchecked %08x, free %08x)\n",
652                                jeb->offset, jeb->used_size, jeb->dirty_size, jeb->wasted_size, jeb->unchecked_size, jeb->free_size);
653                 }
654                 printk (KERN_DEBUG "Contains %d blocks with total dirty size %u, average dirty size: %u\n",
655                         numblocks, dirty, dirty / numblocks);
656         }
657         if (list_empty(&c->dirty_list)) {
658                 printk(KERN_DEBUG "dirty_list: empty\n");
659         } else {
660                 struct list_head *this;
661                 int     numblocks = 0;
662                 uint32_t dirty = 0;
663
664                 list_for_each(this, &c->dirty_list) {
665                         struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list);
666                         numblocks ++;
667                         dirty += jeb->dirty_size;
668                         printk(KERN_DEBUG "dirty_list: %08x (used %08x, dirty %08x, wasted %08x, unchecked %08x, free %08x)\n",
669                                jeb->offset, jeb->used_size, jeb->dirty_size, jeb->wasted_size, jeb->unchecked_size, jeb->free_size);
670                 }
671                 printk (KERN_DEBUG "Contains %d blocks with total dirty size %u, average dirty size: %u\n",
672                         numblocks, dirty, dirty / numblocks);
673         }
674         if (list_empty(&c->erasable_list)) {
675                 printk(KERN_DEBUG "erasable_list: empty\n");
676         } else {
677                 struct list_head *this;
678
679                 list_for_each(this, &c->erasable_list) {
680                         struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list);
681                         printk(KERN_DEBUG "erasable_list: %08x (used %08x, dirty %08x, wasted %08x, unchecked %08x, free %08x)\n",
682                                jeb->offset, jeb->used_size, jeb->dirty_size, jeb->wasted_size, jeb->unchecked_size, jeb->free_size);
683                 }
684         }
685         if (list_empty(&c->erasing_list)) {
686                 printk(KERN_DEBUG "erasing_list: empty\n");
687         } else {
688                 struct list_head *this;
689
690                 list_for_each(this, &c->erasing_list) {
691                         struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list);
692                         printk(KERN_DEBUG "erasing_list: %08x (used %08x, dirty %08x, wasted %08x, unchecked %08x, free %08x)\n",
693                                jeb->offset, jeb->used_size, jeb->dirty_size, jeb->wasted_size, jeb->unchecked_size, jeb->free_size);
694                 }
695         }
696         if (list_empty(&c->erase_pending_list)) {
697                 printk(KERN_DEBUG "erase_pending_list: empty\n");
698         } else {
699                 struct list_head *this;
700
701                 list_for_each(this, &c->erase_pending_list) {
702                         struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list);
703                         printk(KERN_DEBUG "erase_pending_list: %08x (used %08x, dirty %08x, wasted %08x, unchecked %08x, free %08x)\n",
704                                jeb->offset, jeb->used_size, jeb->dirty_size, jeb->wasted_size, jeb->unchecked_size, jeb->free_size);
705                 }
706         }
707         if (list_empty(&c->erasable_pending_wbuf_list)) {
708                 printk(KERN_DEBUG "erasable_pending_wbuf_list: empty\n");
709         } else {
710                 struct list_head *this;
711
712                 list_for_each(this, &c->erasable_pending_wbuf_list) {
713                         struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list);
714                         printk(KERN_DEBUG "erasable_pending_wbuf_list: %08x (used %08x, dirty %08x, wasted %08x, unchecked %08x, free %08x)\n",
715                                jeb->offset, jeb->used_size, jeb->dirty_size, jeb->wasted_size, jeb->unchecked_size, jeb->free_size);
716                 }
717         }
718         if (list_empty(&c->free_list)) {
719                 printk(KERN_DEBUG "free_list: empty\n");
720         } else {
721                 struct list_head *this;
722
723                 list_for_each(this, &c->free_list) {
724                         struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list);
725                         printk(KERN_DEBUG "free_list: %08x (used %08x, dirty %08x, wasted %08x, unchecked %08x, free %08x)\n",
726                                jeb->offset, jeb->used_size, jeb->dirty_size, jeb->wasted_size, jeb->unchecked_size, jeb->free_size);
727                 }
728         }
729         if (list_empty(&c->bad_list)) {
730                 printk(KERN_DEBUG "bad_list: empty\n");
731         } else {
732                 struct list_head *this;
733
734                 list_for_each(this, &c->bad_list) {
735                         struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list);
736                         printk(KERN_DEBUG "bad_list: %08x (used %08x, dirty %08x, wasted %08x, unchecked %08x, free %08x)\n",
737                                jeb->offset, jeb->used_size, jeb->dirty_size, jeb->wasted_size, jeb->unchecked_size, jeb->free_size);
738                 }
739         }
740         if (list_empty(&c->bad_used_list)) {
741                 printk(KERN_DEBUG "bad_used_list: empty\n");
742         } else {
743                 struct list_head *this;
744
745                 list_for_each(this, &c->bad_used_list) {
746                         struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list);
747                         printk(KERN_DEBUG "bad_used_list: %08x (used %08x, dirty %08x, wasted %08x, unchecked %08x, free %08x)\n",
748                                jeb->offset, jeb->used_size, jeb->dirty_size, jeb->wasted_size, jeb->unchecked_size, jeb->free_size);
749                 }
750         }
751 }
752 #endif /* CONFIG_JFFS2_FS_DEBUG */