linux 2.6.16.38 w/ vs2.0.3-rc1
[linux-2.6.git] / fs / jfs / jfs_logmgr.c
index 73569b1..d27bac6 100644 (file)
 #include <linux/buffer_head.h>         /* for sync_blockdev() */
 #include <linux/bio.h>
 #include <linux/suspend.h>
+#include <linux/delay.h>
 #include "jfs_incore.h"
 #include "jfs_filsys.h"
 #include "jfs_metapage.h"
+#include "jfs_superblock.h"
 #include "jfs_txnmgr.h"
 #include "jfs_debug.h"
 
@@ -78,7 +80,7 @@
  * lbuf's ready to be redriven.  Protected by log_redrive_lock (jfsIO thread)
  */
 static struct lbuf *log_redrive_list;
-static spinlock_t log_redrive_lock = SPIN_LOCK_UNLOCKED;
+static DEFINE_SPINLOCK(log_redrive_lock);
 DECLARE_WAIT_QUEUE_HEAD(jfs_IO_thread_wait);
 
 
@@ -113,7 +115,7 @@ DECLARE_WAIT_QUEUE_HEAD(jfs_IO_thread_wait);
 /*
  *     log buffer cache synchronization
  */
-static spinlock_t jfsLCacheLock = SPIN_LOCK_UNLOCKED;
+static DEFINE_SPINLOCK(jfsLCacheLock);
 
 #define        LCACHE_LOCK(flags)      spin_lock_irqsave(&jfsLCacheLock, flags)
 #define        LCACHE_UNLOCK(flags)    spin_unlock_irqrestore(&jfsLCacheLock, flags)
@@ -161,16 +163,9 @@ do {                                               \
 /*
  * Global list of active external journals
  */
-LIST_HEAD(jfs_external_logs);
-struct jfs_log *dummy_log = NULL;
-DECLARE_MUTEX(jfs_log_sem);
-
-/*
- * external references
- */
-extern void txLazyUnlock(struct tblock * tblk);
-extern int jfs_stop_threads;
-extern struct completion jfsIOwait;
+static LIST_HEAD(jfs_external_logs);
+static struct jfs_log *dummy_log = NULL;
+static DECLARE_MUTEX(jfs_log_sem);
 
 /*
  * forward references
@@ -196,7 +191,7 @@ static int lbmIOWait(struct lbuf * bp, int flag);
 static bio_end_io_t lbmIODone;
 static void lbmStartIO(struct lbuf * bp);
 static void lmGCwrite(struct jfs_log * log, int cant_block);
-static int lmLogSync(struct jfs_log * log, int nosyncwait);
+static int lmLogSync(struct jfs_log * log, int hard_sync);
 
 
 
@@ -204,7 +199,7 @@ static int lmLogSync(struct jfs_log * log, int nosyncwait);
  *     statistics
  */
 #ifdef CONFIG_JFS_STATISTICS
-struct lmStat {
+static struct lmStat {
        uint commit;            /* # of commit */
        uint pagedone;          /* # of page written */
        uint submitted;         /* # of pages submitted */
@@ -232,6 +227,7 @@ int lmLog(struct jfs_log * log, struct tblock * tblk, struct lrd * lrd,
        int lsn;
        int diffp, difft;
        struct metapage *mp = NULL;
+       unsigned long flags;
 
        jfs_info("lmLog: log:0x%p tblk:0x%p, lrd:0x%p tlck:0x%p",
                 log, tblk, lrd, tlck);
@@ -252,7 +248,7 @@ int lmLog(struct jfs_log * log, struct tblock * tblk, struct lrd * lrd,
         */
        lsn = log->lsn;
 
-       LOGSYNC_LOCK(log);
+       LOGSYNC_LOCK(log, flags);
 
        /*
         * initialize page lsn if first log write of the page
@@ -308,7 +304,7 @@ int lmLog(struct jfs_log * log, struct tblock * tblk, struct lrd * lrd,
                }
        }
 
-       LOGSYNC_UNLOCK(log);
+       LOGSYNC_UNLOCK(log, flags);
 
        /*
         *      write the log record
@@ -332,7 +328,6 @@ int lmLog(struct jfs_log * log, struct tblock * tblk, struct lrd * lrd,
        return lsn;
 }
 
-
 /*
  * NAME:       lmWriteRecord()
  *
@@ -524,12 +519,7 @@ lmWriteRecord(struct jfs_log * log, struct tblock * tblk, struct lrd * lrd,
                        tblk->eor = log->eor;
 
                        /* enqueue transaction to commit queue */
-                       tblk->cqnext = NULL;
-                       if (log->cqueue.head) {
-                               log->cqueue.tail->cqnext = tblk;
-                               log->cqueue.tail = tblk;
-                       } else
-                               log->cqueue.head = log->cqueue.tail = tblk;
+                       list_add_tail(&tblk->cqueue, &log->cqueue);
 
                        LOGGC_UNLOCK(log);
                }
@@ -587,7 +577,10 @@ static int lmNextPage(struct jfs_log * log)
         *      write or queue the full page at the tail of write queue
         */
        /* get the tail tblk on commit queue */
-       tblk = log->cqueue.tail;
+       if (list_empty(&log->cqueue))
+               tblk = NULL;
+       else
+               tblk = list_entry(log->cqueue.prev, struct tblock, cqueue);
 
        /* every tblk who has COMMIT record on the current page,
         * and has not been committed, must be on commit queue
@@ -688,8 +681,9 @@ int lmGroupCommit(struct jfs_log * log, struct tblock * tblk)
        if (tblk->xflag & COMMIT_LAZY)
                tblk->flag |= tblkGC_LAZY;
 
-       if ((!(log->cflag & logGC_PAGEOUT)) && log->cqueue.head &&
-           (!(tblk->xflag & COMMIT_LAZY) || test_bit(log_FLUSH, &log->flag))) {
+       if ((!(log->cflag & logGC_PAGEOUT)) && (!list_empty(&log->cqueue)) &&
+           (!(tblk->xflag & COMMIT_LAZY) || test_bit(log_FLUSH, &log->flag)
+            || jfs_tlocks_low)) {
                /*
                 * No pageout in progress
                 *
@@ -753,7 +747,7 @@ static void lmGCwrite(struct jfs_log * log, int cant_write)
        struct logpage *lp;
        int gcpn;               /* group commit page number */
        struct tblock *tblk;
-       struct tblock *xtblk;
+       struct tblock *xtblk = NULL;
 
        /*
         * build the commit group of a log page
@@ -762,15 +756,16 @@ static void lmGCwrite(struct jfs_log * log, int cant_write)
         * transactions with COMMIT records on the same log page.
         */
        /* get the head tblk on the commit queue */
-       tblk = xtblk = log->cqueue.head;
-       gcpn = tblk->pn;
+       gcpn = list_entry(log->cqueue.next, struct tblock, cqueue)->pn;
+
+       list_for_each_entry(tblk, &log->cqueue, cqueue) {
+               if (tblk->pn != gcpn)
+                       break;
 
-       while (tblk && tblk->pn == gcpn) {
                xtblk = tblk;
 
                /* state transition: (QUEUE, READY) -> COMMIT */
                tblk->flag |= tblkGC_COMMIT;
-               tblk = tblk->cqnext;
        }
        tblk = xtblk;           /* last tblk of the page */
 
@@ -816,7 +811,7 @@ static void lmPostGC(struct lbuf * bp)
        unsigned long flags;
        struct jfs_log *log = bp->l_log;
        struct logpage *lp;
-       struct tblock *tblk;
+       struct tblock *tblk, *temp;
 
        //LOGGC_LOCK(log);
        spin_lock_irqsave(&log->gclock, flags);
@@ -826,7 +821,9 @@ static void lmPostGC(struct lbuf * bp)
         * remove/wakeup transactions from commit queue who were
         * group committed with the current log page
         */
-       while ((tblk = log->cqueue.head) && (tblk->flag & tblkGC_COMMIT)) {
+       list_for_each_entry_safe(tblk, temp, &log->cqueue, cqueue) {
+               if (!(tblk->flag & tblkGC_COMMIT))
+                       break;
                /* if transaction was marked GC_COMMIT then
                 * it has been shipped in the current pageout
                 * and made it to disk - it is committed.
@@ -836,11 +833,8 @@ static void lmPostGC(struct lbuf * bp)
                        tblk->flag |= tblkGC_ERROR;
 
                /* remove it from the commit queue */
-               log->cqueue.head = tblk->cqnext;
-               if (log->cqueue.head == NULL)
-                       log->cqueue.tail = NULL;
+               list_del(&tblk->cqueue);
                tblk->flag &= ~tblkGC_QUEUE;
-               tblk->cqnext = 0;
 
                if (tblk == log->flush_tblk) {
                        /* we can stop flushing the log now */
@@ -893,9 +887,9 @@ static void lmPostGC(struct lbuf * bp)
         * select the latest ready transaction as new group leader and
         * wake her up to lead her group.
         */
-       if ((tblk = log->cqueue.head) &&
+       if ((!list_empty(&log->cqueue)) &&
            ((log->gcrtc > 0) || (tblk->bp->l_wqnext != NULL) ||
-            test_bit(log_FLUSH, &log->flag)))
+            test_bit(log_FLUSH, &log->flag) || jfs_tlocks_low))
                /*
                 * Call lmGCwrite with new group leader
                 */
@@ -921,20 +915,17 @@ static void lmPostGC(struct lbuf * bp)
  *     if new sync address is available
  *     (normally the case if sync() is executed by back-ground
  *     process).
- *     if not, explicitly run jfs_blogsync() to initiate
- *     getting of new sync address.
  *     calculate new value of i_nextsync which determines when
  *     this code is called again.
  *
- *     this is called only from lmLog().
- *
- * PARAMETER:  ip      - pointer to logs inode.
+ * PARAMETERS: log     - log structure
+ *             hard_sync - 1 to force all metadata to be written
  *
  * RETURN:     0
  *                     
  * serialization: LOG_LOCK() held on entry/exit
  */
-static int lmLogSync(struct jfs_log * log, int nosyncwait)
+static int lmLogSync(struct jfs_log * log, int hard_sync)
 {
        int logsize;
        int written;            /* written since last syncpt */
@@ -944,6 +935,22 @@ static int lmLogSync(struct jfs_log * log, int nosyncwait)
        struct lrd lrd;
        int lsn;
        struct logsyncblk *lp;
+       struct jfs_sb_info *sbi;
+       unsigned long flags;
+
+       /* push dirty metapages out to disk */
+       if (hard_sync)
+               list_for_each_entry(sbi, &log->sb_list, log_list) {
+                       filemap_fdatawrite(sbi->ipbmap->i_mapping);
+                       filemap_fdatawrite(sbi->ipimap->i_mapping);
+                       filemap_fdatawrite(sbi->direct_inode->i_mapping);
+               }
+       else
+               list_for_each_entry(sbi, &log->sb_list, log_list) {
+                       filemap_flush(sbi->ipbmap->i_mapping);
+                       filemap_flush(sbi->ipimap->i_mapping);
+                       filemap_flush(sbi->direct_inode->i_mapping);
+               }
 
        /*
         *      forward syncpt
@@ -953,10 +960,7 @@ static int lmLogSync(struct jfs_log * log, int nosyncwait)
         */
 
        if (log->sync == log->syncpt) {
-               LOGSYNC_LOCK(log);
-               /* ToDo: push dirty metapages out to disk */
-//              bmLogSync(log);
-
+               LOGSYNC_LOCK(log, flags);
                if (list_empty(&log->synclist))
                        log->sync = log->lsn;
                else {
@@ -964,7 +968,7 @@ static int lmLogSync(struct jfs_log * log, int nosyncwait)
                                        struct logsyncblk, synclist);
                        log->sync = lp->lsn;
                }
-               LOGSYNC_UNLOCK(log);
+               LOGSYNC_UNLOCK(log, flags);
 
        }
 
@@ -973,23 +977,6 @@ static int lmLogSync(struct jfs_log * log, int nosyncwait)
         * reset syncpt = sync
         */
        if (log->sync != log->syncpt) {
-               struct jfs_sb_info *sbi;
-
-               /*
-                * We need to make sure all of the "written" metapages
-                * actually make it to disk
-                */
-               list_for_each_entry(sbi, &log->sb_list, log_list) {
-                       filemap_fdatawrite(sbi->ipbmap->i_mapping);
-                       filemap_fdatawrite(sbi->ipimap->i_mapping);
-                       filemap_fdatawrite(sbi->sb->s_bdev->bd_inode->i_mapping);
-               }
-               list_for_each_entry(sbi, &log->sb_list, log_list) {
-                       filemap_fdatawait(sbi->ipbmap->i_mapping);
-                       filemap_fdatawait(sbi->ipimap->i_mapping);
-                       filemap_fdatawait(sbi->sb->s_bdev->bd_inode->i_mapping);
-               }
-
                lrd.logtid = 0;
                lrd.backchain = 0;
                lrd.type = cpu_to_le16(LOG_SYNCPT);
@@ -1039,16 +1026,13 @@ static int lmLogSync(struct jfs_log * log, int nosyncwait)
                /* next syncpt trigger = written + more */
                log->nextsync = written + more;
 
-       /* return if lmLogSync() from outside of transaction, e.g., sync() */
-       if (nosyncwait)
-               return lsn;
-
        /* if number of bytes written from last sync point is more
         * than 1/4 of the log size, stop new transactions from
         * starting until all current transactions are completed
         * by setting syncbarrier flag.
         */
-       if (written > LOGSYNC_BARRIER(logsize) && logsize > 32 * LOGPSIZE) {
+       if (!test_bit(log_SYNCBARRIER, &log->flag) &&
+           (written > LOGSYNC_BARRIER(logsize)) && log->active) {
                set_bit(log_SYNCBARRIER, &log->flag);
                jfs_info("log barrier on: lsn=0x%x syncpt=0x%x", lsn,
                         log->syncpt);
@@ -1061,6 +1045,19 @@ static int lmLogSync(struct jfs_log * log, int nosyncwait)
        return lsn;
 }
 
+/*
+ * NAME:       jfs_syncpt
+ *
+ * FUNCTION:   write log SYNCPT record for specified log
+ *
+ * PARAMETERS: log       - log structure
+ *             hard_sync - set to 1 to force metadata to be written
+ */
+void jfs_syncpt(struct jfs_log *log, int hard_sync)
+{      LOG_LOCK(log);
+       lmLogSync(log, hard_sync);
+       LOG_UNLOCK(log);
+}
 
 /*
  * NAME:       lmLogOpen()
@@ -1114,6 +1111,7 @@ int lmLogOpen(struct super_block *sb)
        }
        memset(log, 0, sizeof(struct jfs_log));
        INIT_LIST_HEAD(&log->sb_list);
+       init_waitqueue_head(&log->syncwait);
 
        /*
         *      external log as separate logical volume
@@ -1187,6 +1185,7 @@ static int open_inline_log(struct super_block *sb)
                return -ENOMEM;
        memset(log, 0, sizeof(struct jfs_log));
        INIT_LIST_HEAD(&log->sb_list);
+       init_waitqueue_head(&log->syncwait);
 
        set_bit(log_INLINELOG, &log->flag);
        log->bdev = sb->s_bdev;
@@ -1224,6 +1223,7 @@ static int open_dummy_log(struct super_block *sb)
                }
                memset(dummy_log, 0, sizeof(struct jfs_log));
                INIT_LIST_HEAD(&dummy_log->sb_list);
+               init_waitqueue_head(&dummy_log->syncwait);
                dummy_log->no_integrity = 1;
                /* Make up some stuff */
                dummy_log->base = 0;
@@ -1286,9 +1286,7 @@ int lmLogInit(struct jfs_log * log)
 
        INIT_LIST_HEAD(&log->synclist);
 
-       init_waitqueue_head(&log->syncwait);
-
-       log->cqueue.head = log->cqueue.tail = NULL;
+       INIT_LIST_HEAD(&log->cqueue);
        log->flush_tblk = NULL;
 
        log->count = 0;
@@ -1435,6 +1433,8 @@ int lmLogInit(struct jfs_log * log)
         *      unwind on error
         */
       errout30:                /* release log page */
+       log->wqueue = NULL;
+       bp->l_wqnext = NULL;
        lbmFree(bp);
 
       errout20:                /* release log superblock */
@@ -1486,6 +1486,7 @@ int lmLogClose(struct super_block *sb)
                 *      in-line log in host file system
                 */
                rc = lmLogShutdown(log);
+               kfree(log);
                goto out;
        }
 
@@ -1515,6 +1516,8 @@ int lmLogClose(struct super_block *sb)
        bd_release(bdev);
        blkdev_put(bdev);
 
+       kfree(log);
+
       out:
        up(&jfs_log_sem);
        jfs_info("lmLogClose: exit(%d)", rc);
@@ -1535,7 +1538,8 @@ int lmLogClose(struct super_block *sb)
 void jfs_flush_journal(struct jfs_log *log, int wait)
 {
        int i;
-       struct tblock *target;
+       struct tblock *target = NULL;
+       struct jfs_sb_info *sbi;
 
        /* jfs_write_inode may call us during read-only mount */
        if (!log)
@@ -1545,13 +1549,12 @@ void jfs_flush_journal(struct jfs_log *log, int wait)
 
        LOGGC_LOCK(log);
 
-       target = log->cqueue.head;
-
-       if (target) {
+       if (!list_empty(&log->cqueue)) {
                /*
                 * This ensures that we will keep writing to the journal as long
                 * as there are unwritten commit records
                 */
+               target = list_entry(log->cqueue.prev, struct tblock, cqueue);
 
                if (test_bit(log_FLUSH, &log->flag)) {
                        /*
@@ -1598,21 +1601,44 @@ void jfs_flush_journal(struct jfs_log *log, int wait)
        if (wait < 2)
                return;
 
+       list_for_each_entry(sbi, &log->sb_list, log_list) {
+               filemap_fdatawrite(sbi->ipbmap->i_mapping);
+               filemap_fdatawrite(sbi->ipimap->i_mapping);
+               filemap_fdatawrite(sbi->direct_inode->i_mapping);
+       }
+
        /*
         * If there was recent activity, we may need to wait
         * for the lazycommit thread to catch up
         */
-       if (log->cqueue.head || !list_empty(&log->synclist)) {
-               for (i = 0; i < 800; i++) {     /* Too much? */
-                       current->state = TASK_INTERRUPTIBLE;
-                       schedule_timeout(HZ / 4);
-                       if ((log->cqueue.head == NULL) &&
+       if ((!list_empty(&log->cqueue)) || !list_empty(&log->synclist)) {
+               for (i = 0; i < 200; i++) {     /* Too much? */
+                       msleep(250);
+                       if (list_empty(&log->cqueue) &&
                            list_empty(&log->synclist))
                                break;
                }
        }
-       assert(log->cqueue.head == NULL);
-       assert(list_empty(&log->synclist));
+       assert(list_empty(&log->cqueue));
+
+#ifdef CONFIG_JFS_DEBUG
+       if (!list_empty(&log->synclist)) {
+               struct logsyncblk *lp;
+
+               list_for_each_entry(lp, &log->synclist, synclist) {
+                       if (lp->xflag & COMMIT_PAGE) {
+                               struct metapage *mp = (struct metapage *)lp;
+                               dump_mem("orphan metapage", lp,
+                                        sizeof(struct metapage));
+                               dump_mem("page", mp->page, sizeof(struct page));
+                       }
+                       else
+                               dump_mem("orphan tblock", lp,
+                                        sizeof(struct tblock));
+               }
+       }
+#endif
+       //assert(list_empty(&log->synclist));
        clear_bit(log_FLUSH, &log->flag);
 }
 
@@ -1660,6 +1686,7 @@ int lmLogShutdown(struct jfs_log * log)
        lp->h.eor = lp->t.eor = cpu_to_le16(bp->l_eor);
        lbmWrite(log, log->bp, lbmWRITE | lbmRELEASE | lbmSYNC, 0);
        lbmIOWait(log->bp, lbmFREE);
+       log->bp = NULL;
 
        /*
         * synchronous update log superblock
@@ -1810,20 +1837,34 @@ static int lbmLogInit(struct jfs_log * log)
 
        log->lbuf_free = NULL;
 
-       for (i = 0; i < LOGPAGES; i++) {
-               lbuf = kmalloc(sizeof(struct lbuf), GFP_KERNEL);
-               if (lbuf == 0)
-                       goto error;
-               lbuf->l_ldata = (char *) get_zeroed_page(GFP_KERNEL);
-               if (lbuf->l_ldata == 0) {
-                       kfree(lbuf);
+       for (i = 0; i < LOGPAGES;) {
+               char *buffer;
+               uint offset;
+               struct page *page;
+
+               buffer = (char *) get_zeroed_page(GFP_KERNEL);
+               if (buffer == NULL)
                        goto error;
+               page = virt_to_page(buffer);
+               for (offset = 0; offset < PAGE_SIZE; offset += LOGPSIZE) {
+                       lbuf = kmalloc(sizeof(struct lbuf), GFP_KERNEL);
+                       if (lbuf == NULL) {
+                               if (offset == 0)
+                                       free_page((unsigned long) buffer);
+                               goto error;
+                       }
+                       if (offset) /* we already have one reference */
+                               get_page(page);
+                       lbuf->l_offset = offset;
+                       lbuf->l_ldata = buffer + offset;
+                       lbuf->l_page = page;
+                       lbuf->l_log = log;
+                       init_waitqueue_head(&lbuf->l_ioevent);
+
+                       lbuf->l_freelist = log->lbuf_free;
+                       log->lbuf_free = lbuf;
+                       i++;
                }
-               lbuf->l_log = log;
-               init_waitqueue_head(&lbuf->l_ioevent);
-
-               lbuf->l_freelist = log->lbuf_free;
-               log->lbuf_free = lbuf;
        }
 
        return (0);
@@ -1848,12 +1889,10 @@ static void lbmLogShutdown(struct jfs_log * log)
        lbuf = log->lbuf_free;
        while (lbuf) {
                struct lbuf *next = lbuf->l_freelist;
-               free_page((unsigned long) lbuf->l_ldata);
+               __free_page(lbuf->l_page);
                kfree(lbuf);
                lbuf = next;
        }
-
-       log->bp = NULL;
 }
 
 
@@ -1965,9 +2004,9 @@ static int lbmRead(struct jfs_log * log, int pn, struct lbuf ** bpp)
 
        bio->bi_sector = bp->l_blkno << (log->l2bsize - 9);
        bio->bi_bdev = log->bdev;
-       bio->bi_io_vec[0].bv_page = virt_to_page(bp->l_ldata);
+       bio->bi_io_vec[0].bv_page = bp->l_page;
        bio->bi_io_vec[0].bv_len = LOGPSIZE;
-       bio->bi_io_vec[0].bv_offset = 0;
+       bio->bi_io_vec[0].bv_offset = bp->l_offset;
 
        bio->bi_vcnt = 1;
        bio->bi_idx = 0;
@@ -2106,9 +2145,9 @@ static void lbmStartIO(struct lbuf * bp)
        bio = bio_alloc(GFP_NOFS, 1);
        bio->bi_sector = bp->l_blkno << (log->l2bsize - 9);
        bio->bi_bdev = log->bdev;
-       bio->bi_io_vec[0].bv_page = virt_to_page(bp->l_ldata);
+       bio->bi_io_vec[0].bv_page = bp->l_page;
        bio->bi_io_vec[0].bv_len = LOGPSIZE;
-       bio->bi_io_vec[0].bv_offset = 0;
+       bio->bi_io_vec[0].bv_offset = bp->l_offset;
 
        bio->bi_vcnt = 1;
        bio->bi_idx = 0;
@@ -2118,16 +2157,13 @@ static void lbmStartIO(struct lbuf * bp)
        bio->bi_private = bp;
 
        /* check if journaling to disk has been disabled */
-       if (!log->no_integrity) {
+       if (log->no_integrity) {
+               bio->bi_size = 0;
+               lbmIODone(bio, 0, 0);
+       } else {
                submit_bio(WRITE_SYNC, bio);
                INCREMENT(lmStat.submitted);
        }
-       else {
-               bio->bi_size = 0;
-               lbmIODone(bio, 0, 0); /* 2nd argument appears to not be used => 0
-                                      *  3rd argument appears to not be used => 0
-                                      */
-       }
 }
 
 
@@ -2319,16 +2355,16 @@ int jfsIOWait(void *arg)
                DECLARE_WAITQUEUE(wq, current);
 
                spin_lock_irq(&log_redrive_lock);
-               while ((bp = log_redrive_list)) {
+               while ((bp = log_redrive_list) != 0) {
                        log_redrive_list = bp->l_redrive_next;
                        bp->l_redrive_next = NULL;
                        spin_unlock_irq(&log_redrive_lock);
                        lbmStartIO(bp);
                        spin_lock_irq(&log_redrive_lock);
                }
-               if (current->flags & PF_FREEZE) {
+               if (freezing(current)) {
                        spin_unlock_irq(&log_redrive_lock);
-                       refrigerator(PF_FREEZE);
+                       refrigerator();
                } else {
                        add_wait_queue(&jfs_IO_thread_wait, &wq);
                        set_current_state(TASK_INTERRUPTIBLE);