X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=fs%2Fjffs2%2Fbackground.c;h=7b77a9541125b7b62aa01d0cf3961f45cb1ca47c;hb=987b0145d94eecf292d8b301228356f44611ab7c;hp=621231064418f7f11a2c925d2c4a7d85dbdd9379;hpb=daddc0d38b3571bed170afa273a49a0eba090c1e;p=linux-2.6.git diff --git a/fs/jffs2/background.c b/fs/jffs2/background.c index 621231064..7b77a9541 100644 --- a/fs/jffs2/background.c +++ b/fs/jffs2/background.c @@ -3,11 +3,11 @@ * * Copyright (C) 2001-2003 Red Hat, Inc. * - * Created by David Woodhouse + * Created by David Woodhouse * * For licensing information, see the file 'LICENCE' in this directory. * - * $Id: background.c,v 1.44 2003/10/08 13:29:55 dwmw2 Exp $ + * $Id: background.c,v 1.54 2005/05/20 21:37:12 gleixner Exp $ * */ @@ -15,17 +15,16 @@ #include #include #include -#include +#include #include "nodelist.h" static int jffs2_garbage_collect_thread(void *); -static int thread_should_wake(struct jffs2_sb_info *c); void jffs2_garbage_collect_trigger(struct jffs2_sb_info *c) { spin_lock(&c->erase_completion_lock); - if (c->gc_task && thread_should_wake(c)) + if (c->gc_task && jffs2_thread_should_wake(c)) send_sig(SIGHUP, c->gc_task, 1); spin_unlock(&c->erase_completion_lock); } @@ -39,7 +38,7 @@ int jffs2_start_garbage_collect_thread(struct jffs2_sb_info *c) if (c->gc_task) BUG(); - init_MUTEX_LOCKED(&c->gc_thread_start); + init_completion(&c->gc_thread_start); init_completion(&c->gc_thread_exit); pid = kernel_thread(jffs2_garbage_collect_thread, c, CLONE_FS|CLONE_FILES); @@ -50,21 +49,24 @@ int jffs2_start_garbage_collect_thread(struct jffs2_sb_info *c) } else { /* Wait for it... */ D1(printk(KERN_DEBUG "JFFS2: Garbage collect thread is pid %d\n", pid)); - down(&c->gc_thread_start); + wait_for_completion(&c->gc_thread_start); } - + return ret; } void jffs2_stop_garbage_collect_thread(struct jffs2_sb_info *c) { + int wait = 0; spin_lock(&c->erase_completion_lock); if (c->gc_task) { D1(printk(KERN_DEBUG "jffs2: Killing GC task %d\n", c->gc_task->pid)); send_sig(SIGKILL, c->gc_task, 1); + wait = 1; } spin_unlock(&c->erase_completion_lock); - wait_for_completion(&c->gc_thread_exit); + if (wait) + wait_for_completion(&c->gc_thread_exit); } static int jffs2_garbage_collect_thread(void *_c) @@ -77,33 +79,29 @@ static int jffs2_garbage_collect_thread(void *_c) allow_signal(SIGCONT); c->gc_task = current; - up(&c->gc_thread_start); + complete(&c->gc_thread_start); set_user_nice(current, 10); for (;;) { allow_signal(SIGHUP); - if (!thread_should_wake(c)) { + if (!jffs2_thread_should_wake(c)) { set_current_state (TASK_INTERRUPTIBLE); D1(printk(KERN_DEBUG "jffs2_garbage_collect_thread sleeping...\n")); - /* Yes, there's a race here; we checked thread_should_wake() before - setting current->state to TASK_INTERRUPTIBLE. But it doesn't + /* Yes, there's a race here; we checked jffs2_thread_should_wake() + before setting current->state to TASK_INTERRUPTIBLE. But it doesn't matter - We don't care if we miss a wakeup, because the GC thread is only an optimisation anyway. */ schedule(); } - if (current->flags & PF_FREEZE) { - refrigerator(0); - /* refrigerator() should recalc sigpending for us - but doesn't. No matter - allow_signal() will. */ + if (try_to_freeze()) continue; - } cond_resched(); - /* Put_super will send a SIGKILL and then wait on the sem. + /* Put_super will send a SIGKILL and then wait on the sem. */ while (signal_pending(current)) { siginfo_t info; @@ -144,34 +142,3 @@ static int jffs2_garbage_collect_thread(void *_c) spin_unlock(&c->erase_completion_lock); complete_and_exit(&c->gc_thread_exit, 0); } - -static int thread_should_wake(struct jffs2_sb_info *c) -{ - int ret = 0; - uint32_t dirty; - - if (c->unchecked_size) { - D1(printk(KERN_DEBUG "thread_should_wake(): unchecked_size %d, checked_ino #%d\n", - c->unchecked_size, c->checked_ino)); - return 1; - } - - /* dirty_size contains blocks on erase_pending_list - * those blocks are counted in c->nr_erasing_blocks. - * If one block is actually erased, it is not longer counted as dirty_space - * but it is counted in c->nr_erasing_blocks, so we add it and subtract it - * with c->nr_erasing_blocks * c->sector_size again. - * Blocks on erasable_list are counted as dirty_size, but not in c->nr_erasing_blocks - * This helps us to force gc and pick eventually a clean block to spread the load. - */ - dirty = c->dirty_size + c->erasing_size - c->nr_erasing_blocks * c->sector_size; - - if (c->nr_free_blocks + c->nr_erasing_blocks < c->resv_blocks_gctrigger && - (dirty > c->nospc_dirty_size)) - ret = 1; - - D1(printk(KERN_DEBUG "thread_should_wake(): nr_free_blocks %d, nr_erasing_blocks %d, dirty_size 0x%x: %s\n", - c->nr_free_blocks, c->nr_erasing_blocks, c->dirty_size, ret?"yes":"no")); - - return ret; -}