* lbuf's ready to be redriven. Protected by log_redrive_lock (jfsIO thread)
*/
static struct lbuf *log_redrive_list;
-static spinlock_t log_redrive_lock = SPIN_LOCK_UNLOCKED;
+static DEFINE_SPINLOCK(log_redrive_lock);
DECLARE_WAIT_QUEUE_HEAD(jfs_IO_thread_wait);
/*
* log buffer cache synchronization
*/
-static spinlock_t jfsLCacheLock = SPIN_LOCK_UNLOCKED;
+static DEFINE_SPINLOCK(jfsLCacheLock);
#define LCACHE_LOCK(flags) spin_lock_irqsave(&jfsLCacheLock, flags)
#define LCACHE_UNLOCK(flags) spin_unlock_irqrestore(&jfsLCacheLock, flags)
/*
* Global list of active external journals
*/
-LIST_HEAD(jfs_external_logs);
-struct jfs_log *dummy_log = NULL;
-DECLARE_MUTEX(jfs_log_sem);
+static LIST_HEAD(jfs_external_logs);
+static struct jfs_log *dummy_log = NULL;
+static DECLARE_MUTEX(jfs_log_sem);
/*
* external references
extern void txLazyUnlock(struct tblock * tblk);
extern int jfs_stop_threads;
extern struct completion jfsIOwait;
+extern int jfs_tlocks_low;
/*
* forward references
* statistics
*/
#ifdef CONFIG_JFS_STATISTICS
-struct lmStat {
+static struct lmStat {
uint commit; /* # of commit */
uint pagedone; /* # of page written */
uint submitted; /* # of pages submitted */
tblk->eor = log->eor;
/* enqueue transaction to commit queue */
- tblk->cqnext = NULL;
- if (log->cqueue.head) {
- log->cqueue.tail->cqnext = tblk;
- log->cqueue.tail = tblk;
- } else
- log->cqueue.head = log->cqueue.tail = tblk;
+ list_add_tail(&tblk->cqueue, &log->cqueue);
LOGGC_UNLOCK(log);
}
* write or queue the full page at the tail of write queue
*/
/* get the tail tblk on commit queue */
- tblk = log->cqueue.tail;
+ if (list_empty(&log->cqueue))
+ tblk = NULL;
+ else
+ tblk = list_entry(log->cqueue.prev, struct tblock, cqueue);
/* every tblk who has COMMIT record on the current page,
* and has not been committed, must be on commit queue
if (tblk->xflag & COMMIT_LAZY)
tblk->flag |= tblkGC_LAZY;
- if ((!(log->cflag & logGC_PAGEOUT)) && log->cqueue.head &&
- (!(tblk->xflag & COMMIT_LAZY) || test_bit(log_FLUSH, &log->flag))) {
+ if ((!(log->cflag & logGC_PAGEOUT)) && (!list_empty(&log->cqueue)) &&
+ (!(tblk->xflag & COMMIT_LAZY) || test_bit(log_FLUSH, &log->flag)
+ || jfs_tlocks_low)) {
/*
* No pageout in progress
*
struct logpage *lp;
int gcpn; /* group commit page number */
struct tblock *tblk;
- struct tblock *xtblk;
+ struct tblock *xtblk = NULL;
/*
* build the commit group of a log page
* transactions with COMMIT records on the same log page.
*/
/* get the head tblk on the commit queue */
- tblk = xtblk = log->cqueue.head;
- gcpn = tblk->pn;
+ gcpn = list_entry(log->cqueue.next, struct tblock, cqueue)->pn;
+
+ list_for_each_entry(tblk, &log->cqueue, cqueue) {
+ if (tblk->pn != gcpn)
+ break;
- while (tblk && tblk->pn == gcpn) {
xtblk = tblk;
/* state transition: (QUEUE, READY) -> COMMIT */
tblk->flag |= tblkGC_COMMIT;
- tblk = tblk->cqnext;
}
tblk = xtblk; /* last tblk of the page */
unsigned long flags;
struct jfs_log *log = bp->l_log;
struct logpage *lp;
- struct tblock *tblk;
+ struct tblock *tblk, *temp;
//LOGGC_LOCK(log);
spin_lock_irqsave(&log->gclock, flags);
* remove/wakeup transactions from commit queue who were
* group committed with the current log page
*/
- while ((tblk = log->cqueue.head) && (tblk->flag & tblkGC_COMMIT)) {
+ list_for_each_entry_safe(tblk, temp, &log->cqueue, cqueue) {
+ if (!(tblk->flag & tblkGC_COMMIT))
+ break;
/* if transaction was marked GC_COMMIT then
* it has been shipped in the current pageout
* and made it to disk - it is committed.
tblk->flag |= tblkGC_ERROR;
/* remove it from the commit queue */
- log->cqueue.head = tblk->cqnext;
- if (log->cqueue.head == NULL)
- log->cqueue.tail = NULL;
+ list_del(&tblk->cqueue);
tblk->flag &= ~tblkGC_QUEUE;
- tblk->cqnext = 0;
if (tblk == log->flush_tblk) {
/* we can stop flushing the log now */
* select the latest ready transaction as new group leader and
* wake her up to lead her group.
*/
- if ((tblk = log->cqueue.head) &&
+ if ((!list_empty(&log->cqueue)) &&
((log->gcrtc > 0) || (tblk->bp->l_wqnext != NULL) ||
- test_bit(log_FLUSH, &log->flag)))
+ test_bit(log_FLUSH, &log->flag) || jfs_tlocks_low))
/*
* Call lmGCwrite with new group leader
*/
* actually make it to disk
*/
list_for_each_entry(sbi, &log->sb_list, log_list) {
+ if (sbi->flag & JFS_NOINTEGRITY)
+ continue;
filemap_fdatawrite(sbi->ipbmap->i_mapping);
filemap_fdatawrite(sbi->ipimap->i_mapping);
filemap_fdatawrite(sbi->sb->s_bdev->bd_inode->i_mapping);
}
list_for_each_entry(sbi, &log->sb_list, log_list) {
+ if (sbi->flag & JFS_NOINTEGRITY)
+ continue;
filemap_fdatawait(sbi->ipbmap->i_mapping);
filemap_fdatawait(sbi->ipimap->i_mapping);
filemap_fdatawait(sbi->sb->s_bdev->bd_inode->i_mapping);
init_waitqueue_head(&log->syncwait);
- log->cqueue.head = log->cqueue.tail = NULL;
+ INIT_LIST_HEAD(&log->cqueue);
log->flush_tblk = NULL;
log->count = 0;
* unwind on error
*/
errout30: /* release log page */
+ log->wqueue = NULL;
+ bp->l_wqnext = NULL;
lbmFree(bp);
errout20: /* release log superblock */
* in-line log in host file system
*/
rc = lmLogShutdown(log);
+ kfree(log);
goto out;
}
bd_release(bdev);
blkdev_put(bdev);
+ kfree(log);
+
out:
up(&jfs_log_sem);
jfs_info("lmLogClose: exit(%d)", rc);
void jfs_flush_journal(struct jfs_log *log, int wait)
{
int i;
- struct tblock *target;
+ struct tblock *target = NULL;
/* jfs_write_inode may call us during read-only mount */
if (!log)
LOGGC_LOCK(log);
- target = log->cqueue.head;
-
- if (target) {
+ if (!list_empty(&log->cqueue)) {
/*
* This ensures that we will keep writing to the journal as long
* as there are unwritten commit records
*/
+ target = list_entry(log->cqueue.prev, struct tblock, cqueue);
if (test_bit(log_FLUSH, &log->flag)) {
/*
* If there was recent activity, we may need to wait
* for the lazycommit thread to catch up
*/
- if (log->cqueue.head || !list_empty(&log->synclist)) {
+ if ((!list_empty(&log->cqueue)) || !list_empty(&log->synclist)) {
for (i = 0; i < 800; i++) { /* Too much? */
current->state = TASK_INTERRUPTIBLE;
schedule_timeout(HZ / 4);
- if ((log->cqueue.head == NULL) &&
+ if (list_empty(&log->cqueue) &&
list_empty(&log->synclist))
break;
}
}
- assert(log->cqueue.head == NULL);
+ assert(list_empty(&log->cqueue));
assert(list_empty(&log->synclist));
clear_bit(log_FLUSH, &log->flag);
}
DECLARE_WAITQUEUE(wq, current);
spin_lock_irq(&log_redrive_lock);
- while ((bp = log_redrive_list)) {
+ while ((bp = log_redrive_list) != 0) {
log_redrive_list = bp->l_redrive_next;
bp->l_redrive_next = NULL;
spin_unlock_irq(&log_redrive_lock);