X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=fs%2Fjffs2%2Fnodemgmt.c;fp=fs%2Fjffs2%2Fnodemgmt.c;h=49127a1f045861324bab8e79570b895b7b1ab4c5;hb=64ba3f394c830ec48a1c31b53dcae312c56f1604;hp=d88376992ed9b094cc23f7b75f7b8c07870468ba;hpb=be1e6109ac94a859551f8e1774eb9a8469fe055c;p=linux-2.6.git diff --git a/fs/jffs2/nodemgmt.c b/fs/jffs2/nodemgmt.c index d88376992..49127a1f0 100644 --- a/fs/jffs2/nodemgmt.c +++ b/fs/jffs2/nodemgmt.c @@ -23,12 +23,13 @@ * jffs2_reserve_space - request physical space to write nodes to flash * @c: superblock info * @minsize: Minimum acceptable size of allocation + * @ofs: Returned value of node offset * @len: Returned value of allocation length * @prio: Allocation type - ALLOC_{NORMAL,DELETION} * * Requests a block of physical space on the flash. Returns zero for success - * and puts 'len' into the appropriate place, or returns -ENOSPC or other - * error if appropriate. Doesn't return len since that's + * and puts 'ofs' and 'len' into the appriopriate place, or returns -ENOSPC + * or other error if appropriate. * * If it returns zero, jffs2_reserve_space() also downs the per-filesystem * allocation semaphore, to prevent more than one allocation from being @@ -39,9 +40,9 @@ */ static int jffs2_do_reserve_space(struct jffs2_sb_info *c, uint32_t minsize, - uint32_t *len, uint32_t sumsize); + uint32_t *ofs, uint32_t *len, uint32_t sumsize); -int jffs2_reserve_space(struct jffs2_sb_info *c, uint32_t minsize, +int jffs2_reserve_space(struct jffs2_sb_info *c, uint32_t minsize, uint32_t *ofs, uint32_t *len, int prio, uint32_t sumsize) { int ret = -EAGAIN; @@ -131,21 +132,19 @@ int jffs2_reserve_space(struct jffs2_sb_info *c, uint32_t minsize, spin_lock(&c->erase_completion_lock); } - ret = jffs2_do_reserve_space(c, minsize, len, sumsize); + ret = jffs2_do_reserve_space(c, minsize, ofs, len, sumsize); if (ret) { D1(printk(KERN_DEBUG "jffs2_reserve_space: ret is %d\n", ret)); } } spin_unlock(&c->erase_completion_lock); - if (!ret) - ret = jffs2_prealloc_raw_node_refs(c, c->nextblock, 1); if (ret) up(&c->alloc_sem); return ret; } -int jffs2_reserve_space_gc(struct jffs2_sb_info *c, uint32_t minsize, - uint32_t *len, uint32_t sumsize) +int jffs2_reserve_space_gc(struct jffs2_sb_info *c, uint32_t minsize, uint32_t *ofs, + uint32_t *len, uint32_t sumsize) { int ret = -EAGAIN; minsize = PAD(minsize); @@ -154,15 +153,12 @@ int jffs2_reserve_space_gc(struct jffs2_sb_info *c, uint32_t minsize, spin_lock(&c->erase_completion_lock); while(ret == -EAGAIN) { - ret = jffs2_do_reserve_space(c, minsize, len, sumsize); + ret = jffs2_do_reserve_space(c, minsize, ofs, len, sumsize); if (ret) { D1(printk(KERN_DEBUG "jffs2_reserve_space_gc: looping, ret is %d\n", ret)); } } spin_unlock(&c->erase_completion_lock); - if (!ret) - ret = jffs2_prealloc_raw_node_refs(c, c->nextblock, 1); - return ret; } @@ -211,7 +207,8 @@ static int jffs2_find_nextblock(struct jffs2_sb_info *c) struct jffs2_eraseblock *ejeb; ejeb = list_entry(c->erasable_list.next, struct jffs2_eraseblock, list); - list_move_tail(&ejeb->list, &c->erase_pending_list); + list_del(&ejeb->list); + list_add_tail(&ejeb->list, &c->erase_pending_list); c->nr_erasing_blocks++; jffs2_erase_pending_trigger(c); D1(printk(KERN_DEBUG "jffs2_find_nextblock: Triggering erase of erasable block at 0x%08x\n", @@ -262,11 +259,10 @@ static int jffs2_find_nextblock(struct jffs2_sb_info *c) } /* Called with alloc sem _and_ erase_completion_lock */ -static int jffs2_do_reserve_space(struct jffs2_sb_info *c, uint32_t minsize, - uint32_t *len, uint32_t sumsize) +static int jffs2_do_reserve_space(struct jffs2_sb_info *c, uint32_t minsize, uint32_t *ofs, uint32_t *len, uint32_t sumsize) { struct jffs2_eraseblock *jeb = c->nextblock; - uint32_t reserved_size; /* for summary information at the end of the jeb */ + uint32_t reserved_size; /* for summary information at the end of the jeb */ int ret; restart: @@ -316,8 +312,6 @@ static int jffs2_do_reserve_space(struct jffs2_sb_info *c, uint32_t minsize, } } else { if (jeb && minsize > jeb->free_size) { - uint32_t waste; - /* Skip the end of this block and file it as having some dirty space */ /* If there's a pending write to it, flush now */ @@ -330,26 +324,10 @@ static int jffs2_do_reserve_space(struct jffs2_sb_info *c, uint32_t minsize, goto restart; } - spin_unlock(&c->erase_completion_lock); - - ret = jffs2_prealloc_raw_node_refs(c, jeb, 1); - if (ret) - return ret; - /* Just lock it again and continue. Nothing much can change because - we hold c->alloc_sem anyway. In fact, it's not entirely clear why - we hold c->erase_completion_lock in the majority of this function... - but that's a question for another (more caffeine-rich) day. */ - spin_lock(&c->erase_completion_lock); - - waste = jeb->free_size; - jffs2_link_node_ref(c, jeb, - (jeb->offset + c->sector_size - waste) | REF_OBSOLETE, - waste, NULL); - /* FIXME: that made it count as dirty. Convert to wasted */ - jeb->dirty_size -= waste; - c->dirty_size -= waste; - jeb->wasted_size += waste; - c->wasted_size += waste; + c->wasted_size += jeb->free_size; + c->free_size -= jeb->free_size; + jeb->wasted_size += jeb->free_size; + jeb->free_size = 0; jffs2_close_nextblock(c, jeb); jeb = NULL; @@ -371,6 +349,7 @@ static int jffs2_do_reserve_space(struct jffs2_sb_info *c, uint32_t minsize, } /* OK, jeb (==c->nextblock) is now pointing at a block which definitely has enough space */ + *ofs = jeb->offset + (c->sector_size - jeb->free_size); *len = jeb->free_size - reserved_size; if (c->cleanmarker_size && jeb->used_size == c->cleanmarker_size && @@ -386,8 +365,7 @@ static int jffs2_do_reserve_space(struct jffs2_sb_info *c, uint32_t minsize, spin_lock(&c->erase_completion_lock); } - D1(printk(KERN_DEBUG "jffs2_do_reserve_space(): Giving 0x%x bytes at 0x%x\n", - *len, jeb->offset + (c->sector_size - jeb->free_size))); + D1(printk(KERN_DEBUG "jffs2_do_reserve_space(): Giving 0x%x bytes at 0x%x\n", *len, *ofs)); return 0; } @@ -396,6 +374,7 @@ static int jffs2_do_reserve_space(struct jffs2_sb_info *c, uint32_t minsize, * @c: superblock info * @new: new node reference to add * @len: length of this physical node + * @dirty: dirty flag for new node * * Should only be used to report nodes for which space has been allocated * by jffs2_reserve_space. @@ -403,30 +382,42 @@ static int jffs2_do_reserve_space(struct jffs2_sb_info *c, uint32_t minsize, * Must be called with the alloc_sem held. */ -struct jffs2_raw_node_ref *jffs2_add_physical_node_ref(struct jffs2_sb_info *c, - uint32_t ofs, uint32_t len, - struct jffs2_inode_cache *ic) +int jffs2_add_physical_node_ref(struct jffs2_sb_info *c, struct jffs2_raw_node_ref *new) { struct jffs2_eraseblock *jeb; - struct jffs2_raw_node_ref *new; + uint32_t len; - jeb = &c->blocks[ofs / c->sector_size]; + jeb = &c->blocks[new->flash_offset / c->sector_size]; + len = ref_totlen(c, jeb, new); - D1(printk(KERN_DEBUG "jffs2_add_physical_node_ref(): Node at 0x%x(%d), size 0x%x\n", - ofs & ~3, ofs & 3, len)); + D1(printk(KERN_DEBUG "jffs2_add_physical_node_ref(): Node at 0x%x(%d), size 0x%x\n", ref_offset(new), ref_flags(new), len)); #if 1 - /* Allow non-obsolete nodes only to be added at the end of c->nextblock, - if c->nextblock is set. Note that wbuf.c will file obsolete nodes - even after refiling c->nextblock */ - if ((c->nextblock || ((ofs & 3) != REF_OBSOLETE)) - && (jeb != c->nextblock || (ofs & ~3) != jeb->offset + (c->sector_size - jeb->free_size))) { + /* we could get some obsolete nodes after nextblock was refiled + in wbuf.c */ + if ((c->nextblock || !ref_obsolete(new)) + &&(jeb != c->nextblock || ref_offset(new) != jeb->offset + (c->sector_size - jeb->free_size))) { printk(KERN_WARNING "argh. node added in wrong place\n"); - return ERR_PTR(-EINVAL); + jffs2_free_raw_node_ref(new); + return -EINVAL; } #endif spin_lock(&c->erase_completion_lock); - new = jffs2_link_node_ref(c, jeb, ofs, len, ic); + if (!jeb->first_node) + jeb->first_node = new; + if (jeb->last_node) + jeb->last_node->next_phys = new; + jeb->last_node = new; + + jeb->free_size -= len; + c->free_size -= len; + if (ref_obsolete(new)) { + jeb->dirty_size += len; + c->dirty_size += len; + } else { + jeb->used_size += len; + c->used_size += len; + } if (!jeb->free_size && !jeb->dirty_size && !ISDIRTY(jeb->wasted_size)) { /* If it lives on the dirty_list, jffs2_reserve_space will put it there */ @@ -447,7 +438,7 @@ struct jffs2_raw_node_ref *jffs2_add_physical_node_ref(struct jffs2_sb_info *c, spin_unlock(&c->erase_completion_lock); - return new; + return 0; } @@ -479,9 +470,8 @@ void jffs2_mark_node_obsolete(struct jffs2_sb_info *c, struct jffs2_raw_node_ref struct jffs2_unknown_node n; int ret, addedsize; size_t retlen; - uint32_t freed_len; - if(unlikely(!ref)) { + if(!ref) { printk(KERN_NOTICE "EEEEEK. jffs2_mark_node_obsolete called with NULL node\n"); return; } @@ -509,34 +499,32 @@ void jffs2_mark_node_obsolete(struct jffs2_sb_info *c, struct jffs2_raw_node_ref spin_lock(&c->erase_completion_lock); - freed_len = ref_totlen(c, jeb, ref); - if (ref_flags(ref) == REF_UNCHECKED) { - D1(if (unlikely(jeb->unchecked_size < freed_len)) { + D1(if (unlikely(jeb->unchecked_size < ref_totlen(c, jeb, ref))) { printk(KERN_NOTICE "raw unchecked node of size 0x%08x freed from erase block %d at 0x%08x, but unchecked_size was already 0x%08x\n", - freed_len, blocknr, ref->flash_offset, jeb->used_size); + ref_totlen(c, jeb, ref), blocknr, ref->flash_offset, jeb->used_size); BUG(); }) - D1(printk(KERN_DEBUG "Obsoleting previously unchecked node at 0x%08x of len %x: ", ref_offset(ref), freed_len)); - jeb->unchecked_size -= freed_len; - c->unchecked_size -= freed_len; + D1(printk(KERN_DEBUG "Obsoleting previously unchecked node at 0x%08x of len %x: ", ref_offset(ref), ref_totlen(c, jeb, ref))); + jeb->unchecked_size -= ref_totlen(c, jeb, ref); + c->unchecked_size -= ref_totlen(c, jeb, ref); } else { - D1(if (unlikely(jeb->used_size < freed_len)) { + D1(if (unlikely(jeb->used_size < ref_totlen(c, jeb, ref))) { printk(KERN_NOTICE "raw node of size 0x%08x freed from erase block %d at 0x%08x, but used_size was already 0x%08x\n", - freed_len, blocknr, ref->flash_offset, jeb->used_size); + ref_totlen(c, jeb, ref), blocknr, ref->flash_offset, jeb->used_size); BUG(); }) - D1(printk(KERN_DEBUG "Obsoleting node at 0x%08x of len %#x: ", ref_offset(ref), freed_len)); - jeb->used_size -= freed_len; - c->used_size -= freed_len; + D1(printk(KERN_DEBUG "Obsoleting node at 0x%08x of len %#x: ", ref_offset(ref), ref_totlen(c, jeb, ref))); + jeb->used_size -= ref_totlen(c, jeb, ref); + c->used_size -= ref_totlen(c, jeb, ref); } // Take care, that wasted size is taken into concern - if ((jeb->dirty_size || ISDIRTY(jeb->wasted_size + freed_len)) && jeb != c->nextblock) { - D1(printk("Dirtying\n")); - addedsize = freed_len; - jeb->dirty_size += freed_len; - c->dirty_size += freed_len; + if ((jeb->dirty_size || ISDIRTY(jeb->wasted_size + ref_totlen(c, jeb, ref))) && jeb != c->nextblock) { + D1(printk(KERN_DEBUG "Dirtying\n")); + addedsize = ref_totlen(c, jeb, ref); + jeb->dirty_size += ref_totlen(c, jeb, ref); + c->dirty_size += ref_totlen(c, jeb, ref); /* Convert wasted space to dirty, if not a bad block */ if (jeb->wasted_size) { @@ -555,10 +543,10 @@ void jffs2_mark_node_obsolete(struct jffs2_sb_info *c, struct jffs2_raw_node_ref } } } else { - D1(printk("Wasting\n")); + D1(printk(KERN_DEBUG "Wasting\n")); addedsize = 0; - jeb->wasted_size += freed_len; - c->wasted_size += freed_len; + jeb->wasted_size += ref_totlen(c, jeb, ref); + c->wasted_size += ref_totlen(c, jeb, ref); } ref->flash_offset = ref_offset(ref) | REF_OBSOLETE; @@ -634,7 +622,7 @@ void jffs2_mark_node_obsolete(struct jffs2_sb_info *c, struct jffs2_raw_node_ref /* The erase_free_sem is locked, and has been since before we marked the node obsolete and potentially put its eraseblock onto the erase_pending_list. Thus, we know that the block hasn't _already_ been erased, and that 'ref' itself hasn't been freed yet - by jffs2_free_jeb_node_refs() in erase.c. Which is nice. */ + by jffs2_free_all_node_refs() in erase.c. Which is nice. */ D1(printk(KERN_DEBUG "obliterating obsoleted node at 0x%08x\n", ref_offset(ref))); ret = jffs2_flash_read(c, ref_offset(ref), sizeof(n), &retlen, (char *)&n); @@ -646,8 +634,8 @@ void jffs2_mark_node_obsolete(struct jffs2_sb_info *c, struct jffs2_raw_node_ref printk(KERN_WARNING "Short read from obsoleted node at 0x%08x: %zd\n", ref_offset(ref), retlen); goto out_erase_sem; } - if (PAD(je32_to_cpu(n.totlen)) != PAD(freed_len)) { - printk(KERN_WARNING "Node totlen on flash (0x%08x) != totlen from node ref (0x%08x)\n", je32_to_cpu(n.totlen), freed_len); + if (PAD(je32_to_cpu(n.totlen)) != PAD(ref_totlen(c, jeb, ref))) { + printk(KERN_WARNING "Node totlen on flash (0x%08x) != totlen from node ref (0x%08x)\n", je32_to_cpu(n.totlen), ref_totlen(c, jeb, ref)); goto out_erase_sem; } if (!(je16_to_cpu(n.nodetype) & JFFS2_NODE_ACCURATE)) { @@ -689,23 +677,57 @@ void jffs2_mark_node_obsolete(struct jffs2_sb_info *c, struct jffs2_raw_node_ref *p = ref->next_in_ino; ref->next_in_ino = NULL; - switch (ic->class) { -#ifdef CONFIG_JFFS2_FS_XATTR - case RAWNODE_CLASS_XATTR_DATUM: - jffs2_release_xattr_datum(c, (struct jffs2_xattr_datum *)ic); - break; - case RAWNODE_CLASS_XATTR_REF: - jffs2_release_xattr_ref(c, (struct jffs2_xattr_ref *)ic); - break; -#endif - default: - if (ic->nodes == (void *)ic && ic->nlink == 0) - jffs2_del_ino_cache(c, ic); - break; + if (ic->nodes == (void *)ic && ic->nlink == 0) + jffs2_del_ino_cache(c, ic); + + spin_unlock(&c->erase_completion_lock); + } + + + /* Merge with the next node in the physical list, if there is one + and if it's also obsolete and if it doesn't belong to any inode */ + if (ref->next_phys && ref_obsolete(ref->next_phys) && + !ref->next_phys->next_in_ino) { + struct jffs2_raw_node_ref *n = ref->next_phys; + + spin_lock(&c->erase_completion_lock); + + ref->__totlen += n->__totlen; + ref->next_phys = n->next_phys; + if (jeb->last_node == n) jeb->last_node = ref; + if (jeb->gc_node == n) { + /* gc will be happy continuing gc on this node */ + jeb->gc_node=ref; } spin_unlock(&c->erase_completion_lock); + + jffs2_free_raw_node_ref(n); } + /* Also merge with the previous node in the list, if there is one + and that one is obsolete */ + if (ref != jeb->first_node ) { + struct jffs2_raw_node_ref *p = jeb->first_node; + + spin_lock(&c->erase_completion_lock); + + while (p->next_phys != ref) + p = p->next_phys; + + if (ref_obsolete(p) && !ref->next_in_ino) { + p->__totlen += ref->__totlen; + if (jeb->last_node == ref) { + jeb->last_node = p; + } + if (jeb->gc_node == ref) { + /* gc will be happy continuing gc on this node */ + jeb->gc_node=p; + } + p->next_phys = ref->next_phys; + jffs2_free_raw_node_ref(ref); + } + spin_unlock(&c->erase_completion_lock); + } out_erase_sem: up(&c->erase_free_sem); }