2 * Copyright (C) International Business Machines Corp., 2000-2004
3 * Portions Copyright (C) Christoph Hellwig, 2001-2002
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
13 * the GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 * jfs_txnmgr.c: transaction manager
24 * transaction starts with txBegin() and ends with txCommit()
27 * tlock is acquired at the time of update;
28 * (obviate scan at commit time for xtree and dtree)
29 * tlock and mp points to each other;
30 * (no hashlist for mp -> tlock).
33 * tlock on in-memory inode:
34 * in-place tlock in the in-memory inode itself;
35 * converted to page lock by iWrite() at commit time.
37 * tlock during write()/mmap() under anonymous transaction (tid = 0):
38 * transferred (?) to transaction at commit time.
40 * use the page itself to update allocation maps
41 * (obviate intermediate replication of allocation/deallocation data)
42 * hold on to mp+lock thru update of maps
47 #include <linux/vmalloc.h>
48 #include <linux/smp_lock.h>
49 #include <linux/completion.h>
50 #include <linux/suspend.h>
51 #include "jfs_incore.h"
52 #include "jfs_filsys.h"
53 #include "jfs_metapage.h"
54 #include "jfs_dinode.h"
57 #include "jfs_superblock.h"
58 #include "jfs_debug.h"
61 * transaction management structures
65 int freetid; /* index of a free tid structure */
66 wait_queue_head_t freewait; /* eventlist of free tblock */
69 int freelock; /* index first free lock word */
70 wait_queue_head_t freelockwait; /* eventlist of free tlock */
71 wait_queue_head_t lowlockwait; /* eventlist of ample tlocks */
72 int tlocksInUse; /* Number of tlocks in use */
73 int TlocksLow; /* Indicates low number of available tlocks */
74 spinlock_t LazyLock; /* synchronize sync_queue & unlock_queue */
75 /* struct tblock *sync_queue; * Transactions waiting for data sync */
76 struct tblock *unlock_queue; /* Txns waiting to be released */
77 struct tblock *unlock_tail; /* Tail of unlock_queue */
78 struct list_head anon_list; /* inodes having anonymous txns */
79 struct list_head anon_list2; /* inodes having anonymous txns
80 that couldn't be sync'ed */
83 #ifdef CONFIG_JFS_STATISTICS
87 uint txBegin_lockslow;
90 uint txBeginAnon_barrier;
91 uint txBeginAnon_lockslow;
93 uint txLockAlloc_freelock;
97 static int nTxBlock = 512; /* number of transaction blocks */
98 struct tblock *TxBlock; /* transaction block table */
100 static int nTxLock = 4096; /* number of transaction locks */
101 static int TxLockLWM = 4096*.4; /* Low water mark for number of txLocks used */
102 static int TxLockHWM = 4096*.8; /* High water mark for number of txLocks used */
103 struct tlock *TxLock; /* transaction lock table */
107 * transaction management lock
109 static spinlock_t jfsTxnLock = SPIN_LOCK_UNLOCKED;
111 #define TXN_LOCK() spin_lock(&jfsTxnLock)
112 #define TXN_UNLOCK() spin_unlock(&jfsTxnLock)
114 #define LAZY_LOCK_INIT() spin_lock_init(&TxAnchor.LazyLock);
115 #define LAZY_LOCK(flags) spin_lock_irqsave(&TxAnchor.LazyLock, flags)
116 #define LAZY_UNLOCK(flags) spin_unlock_irqrestore(&TxAnchor.LazyLock, flags)
118 DECLARE_WAIT_QUEUE_HEAD(jfs_sync_thread_wait);
119 DECLARE_WAIT_QUEUE_HEAD(jfs_commit_thread_wait);
122 * Retry logic exist outside these macros to protect from spurrious wakeups.
124 static inline void TXN_SLEEP_DROP_LOCK(wait_queue_head_t * event)
126 DECLARE_WAITQUEUE(wait, current);
128 add_wait_queue(event, &wait);
129 set_current_state(TASK_UNINTERRUPTIBLE);
132 current->state = TASK_RUNNING;
133 remove_wait_queue(event, &wait);
136 #define TXN_SLEEP(event)\
138 TXN_SLEEP_DROP_LOCK(event);\
142 #define TXN_WAKEUP(event) wake_up_all(event)
149 tid_t maxtid; /* 4: biggest tid ever used */
150 lid_t maxlid; /* 4: biggest lid ever used */
151 int ntid; /* 4: # of transactions performed */
152 int nlid; /* 4: # of tlocks acquired */
153 int waitlock; /* 4: # of tlock wait */
158 * external references
160 extern int lmGroupCommit(struct jfs_log *, struct tblock *);
161 extern void lmSync(struct jfs_log *);
162 extern int jfs_commit_inode(struct inode *, int);
163 extern int jfs_stop_threads;
165 struct task_struct *jfsCommitTask;
166 extern struct completion jfsIOwait;
171 static int diLog(struct jfs_log * log, struct tblock * tblk, struct lrd * lrd,
172 struct tlock * tlck, struct commit * cd);
173 static int dataLog(struct jfs_log * log, struct tblock * tblk, struct lrd * lrd,
174 struct tlock * tlck);
175 static void dtLog(struct jfs_log * log, struct tblock * tblk, struct lrd * lrd,
176 struct tlock * tlck);
177 static void mapLog(struct jfs_log * log, struct tblock * tblk, struct lrd * lrd,
178 struct tlock * tlck);
179 static void txAllocPMap(struct inode *ip, struct maplock * maplock,
180 struct tblock * tblk);
181 static void txForce(struct tblock * tblk);
182 static int txLog(struct jfs_log * log, struct tblock * tblk,
184 static void txUpdateMap(struct tblock * tblk);
185 static void txRelease(struct tblock * tblk);
186 static void xtLog(struct jfs_log * log, struct tblock * tblk, struct lrd * lrd,
187 struct tlock * tlck);
188 static void LogSyncRelease(struct metapage * mp);
191 * transaction block/lock management
192 * ---------------------------------
196 * Get a transaction lock from the free list. If the number in use is
197 * greater than the high water mark, wake up the sync daemon. This should
198 * free some anonymous transaction locks. (TXN_LOCK must be held.)
200 static lid_t txLockAlloc(void)
204 INCREMENT(TxStat.txLockAlloc);
205 if (!TxAnchor.freelock) {
206 INCREMENT(TxStat.txLockAlloc_freelock);
209 while (!(lid = TxAnchor.freelock))
210 TXN_SLEEP(&TxAnchor.freelockwait);
211 TxAnchor.freelock = TxLock[lid].next;
212 HIGHWATERMARK(stattx.maxlid, lid);
213 if ((++TxAnchor.tlocksInUse > TxLockHWM) && (TxAnchor.TlocksLow == 0)) {
214 jfs_info("txLockAlloc TlocksLow");
215 TxAnchor.TlocksLow = 1;
216 wake_up(&jfs_sync_thread_wait);
222 static void txLockFree(lid_t lid)
224 TxLock[lid].next = TxAnchor.freelock;
225 TxAnchor.freelock = lid;
226 TxAnchor.tlocksInUse--;
227 if (TxAnchor.TlocksLow && (TxAnchor.tlocksInUse < TxLockLWM)) {
228 jfs_info("txLockFree TlocksLow no more");
229 TxAnchor.TlocksLow = 0;
230 TXN_WAKEUP(&TxAnchor.lowlockwait);
232 TXN_WAKEUP(&TxAnchor.freelockwait);
238 * FUNCTION: initialize transaction management structures
242 * serialization: single thread at jfs_init()
249 * initialize transaction block (tblock) table
251 * transaction id (tid) = tblock index
252 * tid = 0 is reserved.
254 size = sizeof(struct tblock) * nTxBlock;
255 TxBlock = (struct tblock *) vmalloc(size);
259 for (k = 1; k < nTxBlock - 1; k++) {
260 TxBlock[k].next = k + 1;
261 init_waitqueue_head(&TxBlock[k].gcwait);
262 init_waitqueue_head(&TxBlock[k].waitor);
265 init_waitqueue_head(&TxBlock[k].gcwait);
266 init_waitqueue_head(&TxBlock[k].waitor);
268 TxAnchor.freetid = 1;
269 init_waitqueue_head(&TxAnchor.freewait);
271 stattx.maxtid = 1; /* statistics */
274 * initialize transaction lock (tlock) table
276 * transaction lock id = tlock index
277 * tlock id = 0 is reserved.
279 size = sizeof(struct tlock) * nTxLock;
280 TxLock = (struct tlock *) vmalloc(size);
281 if (TxLock == NULL) {
286 /* initialize tlock table */
287 for (k = 1; k < nTxLock - 1; k++)
288 TxLock[k].next = k + 1;
290 init_waitqueue_head(&TxAnchor.freelockwait);
291 init_waitqueue_head(&TxAnchor.lowlockwait);
293 TxAnchor.freelock = 1;
294 TxAnchor.tlocksInUse = 0;
295 INIT_LIST_HEAD(&TxAnchor.anon_list);
296 INIT_LIST_HEAD(&TxAnchor.anon_list2);
298 stattx.maxlid = 1; /* statistics */
306 * FUNCTION: clean up when module is unloaded
320 * FUNCTION: start a transaction.
322 * PARAMETER: sb - superblock
323 * flag - force for nested tx;
325 * RETURN: tid - transaction id
327 * note: flag force allows to start tx for nested tx
328 * to prevent deadlock on logsync barrier;
330 tid_t txBegin(struct super_block *sb, int flag)
336 jfs_info("txBegin: flag = 0x%x", flag);
337 log = JFS_SBI(sb)->log;
341 INCREMENT(TxStat.txBegin);
344 if (!(flag & COMMIT_FORCE)) {
346 * synchronize with logsync barrier
348 if (test_bit(log_SYNCBARRIER, &log->flag) ||
349 test_bit(log_QUIESCE, &log->flag)) {
350 INCREMENT(TxStat.txBegin_barrier);
351 TXN_SLEEP(&log->syncwait);
357 * Don't begin transaction if we're getting starved for tlocks
358 * unless COMMIT_FORCE or COMMIT_INODE (which may ultimately
361 if (TxAnchor.TlocksLow) {
362 INCREMENT(TxStat.txBegin_lockslow);
363 TXN_SLEEP(&TxAnchor.lowlockwait);
369 * allocate transaction id/block
371 if ((t = TxAnchor.freetid) == 0) {
372 jfs_info("txBegin: waiting for free tid");
373 INCREMENT(TxStat.txBegin_freetid);
374 TXN_SLEEP(&TxAnchor.freewait);
378 tblk = tid_to_tblock(t);
380 if ((tblk->next == 0) && !(flag & COMMIT_FORCE)) {
381 /* Don't let a non-forced transaction take the last tblk */
382 jfs_info("txBegin: waiting for free tid");
383 INCREMENT(TxStat.txBegin_freetid);
384 TXN_SLEEP(&TxAnchor.freewait);
388 TxAnchor.freetid = tblk->next;
391 * initialize transaction
395 * We can't zero the whole thing or we screw up another thread being
396 * awakened after sleeping on tblk->waitor
398 * memset(tblk, 0, sizeof(struct tblock));
400 tblk->next = tblk->last = tblk->xflag = tblk->flag = tblk->lsn = 0;
404 tblk->logtid = log->logtid;
408 HIGHWATERMARK(stattx.maxtid, t); /* statistics */
409 INCREMENT(stattx.ntid); /* statistics */
413 jfs_info("txBegin: returning tid = %d", t);
420 * NAME: txBeginAnon()
422 * FUNCTION: start an anonymous transaction.
423 * Blocks if logsync or available tlocks are low to prevent
424 * anonymous tlocks from depleting supply.
426 * PARAMETER: sb - superblock
430 void txBeginAnon(struct super_block *sb)
434 log = JFS_SBI(sb)->log;
437 INCREMENT(TxStat.txBeginAnon);
441 * synchronize with logsync barrier
443 if (test_bit(log_SYNCBARRIER, &log->flag) ||
444 test_bit(log_QUIESCE, &log->flag)) {
445 INCREMENT(TxStat.txBeginAnon_barrier);
446 TXN_SLEEP(&log->syncwait);
451 * Don't begin transaction if we're getting starved for tlocks
453 if (TxAnchor.TlocksLow) {
454 INCREMENT(TxStat.txBeginAnon_lockslow);
455 TXN_SLEEP(&TxAnchor.lowlockwait);
465 * function: free specified transaction block.
467 * logsync barrier processing:
471 void txEnd(tid_t tid)
473 struct tblock *tblk = tid_to_tblock(tid);
476 jfs_info("txEnd: tid = %d", tid);
480 * wakeup transactions waiting on the page locked
481 * by the current transaction
483 TXN_WAKEUP(&tblk->waitor);
485 log = JFS_SBI(tblk->sb)->log;
488 * Lazy commit thread can't free this guy until we mark it UNLOCKED,
489 * otherwise, we would be left with a transaction that may have been
492 * Lazy commit thread will turn off tblkGC_LAZY before calling this
495 if (tblk->flag & tblkGC_LAZY) {
496 jfs_info("txEnd called w/lazy tid: %d, tblk = 0x%p", tid, tblk);
499 spin_lock_irq(&log->gclock); // LOGGC_LOCK
500 tblk->flag |= tblkGC_UNLOCKED;
501 spin_unlock_irq(&log->gclock); // LOGGC_UNLOCK
505 jfs_info("txEnd: tid: %d, tblk = 0x%p", tid, tblk);
507 assert(tblk->next == 0);
510 * insert tblock back on freelist
512 tblk->next = TxAnchor.freetid;
513 TxAnchor.freetid = tid;
516 * mark the tblock not active
518 if (--log->active == 0) {
519 clear_bit(log_FLUSH, &log->flag);
522 * synchronize with logsync barrier
524 if (test_bit(log_SYNCBARRIER, &log->flag)) {
525 /* forward log syncpt */
528 jfs_info("log barrier off: 0x%x", log->lsn);
530 /* enable new transactions start */
531 clear_bit(log_SYNCBARRIER, &log->flag);
533 /* wakeup all waitors for logsync barrier */
534 TXN_WAKEUP(&log->syncwait);
539 * wakeup all waitors for a free tblock
541 TXN_WAKEUP(&TxAnchor.freewait);
550 * function: acquire a transaction lock on the specified <mp>
554 * return: transaction lock id
558 struct tlock *txLock(tid_t tid, struct inode *ip, struct metapage * mp,
561 struct jfs_inode_info *jfs_ip = JFS_IP(ip);
566 struct xtlock *xtlck;
567 struct linelock *linelock;
573 if (S_ISDIR(ip->i_mode) && (type & tlckXTREE) &&
574 !(mp->xflag & COMMIT_PAGE)) {
576 * Directory inode is special. It can have both an xtree tlock
577 * and a dtree tlock associated with it.
584 /* is page not locked by a transaction ? */
588 jfs_info("txLock: tid:%d ip:0x%p mp:0x%p lid:%d", tid, ip, mp, lid);
590 /* is page locked by the requester transaction ? */
591 tlck = lid_to_tlock(lid);
592 if ((xtid = tlck->tid) == tid)
596 * is page locked by anonymous transaction/lock ?
598 * (page update without transaction (i.e., file write) is
599 * locked under anonymous transaction tid = 0:
600 * anonymous tlocks maintained on anonymous tlock list of
601 * the inode of the page and available to all anonymous
602 * transactions until txCommit() time at which point
603 * they are transferred to the transaction tlock list of
604 * the commiting transaction of the inode)
608 tblk = tid_to_tblock(tid);
610 * The order of the tlocks in the transaction is important
611 * (during truncate, child xtree pages must be freed before
612 * parent's tlocks change the working map).
613 * Take tlock off anonymous list and add to tail of
616 * Note: We really need to get rid of the tid & lid and
617 * use list_head's. This code is getting UGLY!
619 if (jfs_ip->atlhead == lid) {
620 if (jfs_ip->atltail == lid) {
621 /* only anonymous txn.
622 * Remove from anon_list
624 list_del_init(&jfs_ip->anon_inode_list);
626 jfs_ip->atlhead = tlck->next;
629 for (last = jfs_ip->atlhead;
630 lid_to_tlock(last)->next != lid;
631 last = lid_to_tlock(last)->next) {
634 lid_to_tlock(last)->next = tlck->next;
635 if (jfs_ip->atltail == lid)
636 jfs_ip->atltail = last;
639 /* insert the tlock at tail of transaction tlock list */
642 lid_to_tlock(tblk->last)->next = lid;
658 tlck = lid_to_tlock(lid);
665 /* mark tlock for meta-data page */
666 if (mp->xflag & COMMIT_PAGE) {
668 tlck->flag = tlckPAGELOCK;
670 /* mark the page dirty and nohomeok */
671 mark_metapage_dirty(mp);
672 atomic_inc(&mp->nohomeok);
674 jfs_info("locking mp = 0x%p, nohomeok = %d tid = %d tlck = 0x%p",
675 mp, atomic_read(&mp->nohomeok), tid, tlck);
677 /* if anonymous transaction, and buffer is on the group
678 * commit synclist, mark inode to show this. This will
679 * prevent the buffer from being marked nohomeok for too
682 if ((tid == 0) && mp->lsn)
683 set_cflag(COMMIT_Synclist, ip);
685 /* mark tlock for in-memory inode */
687 tlck->flag = tlckINODELOCK;
691 /* bind the tlock and the page */
700 * enqueue transaction lock to transaction/inode
702 /* insert the tlock at tail of transaction tlock list */
704 tblk = tid_to_tblock(tid);
706 lid_to_tlock(tblk->last)->next = lid;
712 /* anonymous transaction:
713 * insert the tlock at head of inode anonymous tlock list
716 tlck->next = jfs_ip->atlhead;
717 jfs_ip->atlhead = lid;
718 if (tlck->next == 0) {
719 /* This inode's first anonymous transaction */
720 jfs_ip->atltail = lid;
721 list_add_tail(&jfs_ip->anon_inode_list,
722 &TxAnchor.anon_list);
726 /* initialize type dependent area for linelock */
727 linelock = (struct linelock *) & tlck->lock;
729 linelock->flag = tlckLINELOCK;
730 linelock->maxcnt = TLOCKSHORT;
733 switch (type & tlckTYPE) {
735 linelock->l2linesize = L2DTSLOTSIZE;
739 linelock->l2linesize = L2XTSLOTSIZE;
741 xtlck = (struct xtlock *) linelock;
742 xtlck->header.offset = 0;
743 xtlck->header.length = 2;
745 if (type & tlckNEW) {
746 xtlck->lwm.offset = XTENTRYSTART;
748 if (mp->xflag & COMMIT_PAGE)
749 p = (xtpage_t *) mp->data;
751 p = &jfs_ip->i_xtroot;
753 le16_to_cpu(p->header.nextindex);
755 xtlck->lwm.length = 0; /* ! */
756 xtlck->twm.offset = 0;
757 xtlck->hwm.offset = 0;
763 linelock->l2linesize = L2INODESLOTSIZE;
767 linelock->l2linesize = L2DATASLOTSIZE;
771 jfs_err("UFO tlock:0x%p", tlck);
775 * update tlock vector
785 * page is being locked by another transaction:
788 /* Only locks on ipimap or ipaimap should reach here */
789 /* assert(jfs_ip->fileset == AGGREGATE_I); */
790 if (jfs_ip->fileset != AGGREGATE_I) {
791 jfs_err("txLock: trying to lock locked page!");
792 dump_mem("ip", ip, sizeof(struct inode));
793 dump_mem("mp", mp, sizeof(struct metapage));
794 dump_mem("Locker's tblk", tid_to_tblock(tid),
795 sizeof(struct tblock));
796 dump_mem("Tlock", tlck, sizeof(struct tlock));
799 INCREMENT(stattx.waitlock); /* statistics */
800 release_metapage(mp);
802 jfs_info("txLock: in waitLock, tid = %d, xtid = %d, lid = %d",
804 TXN_SLEEP_DROP_LOCK(&tid_to_tblock(xtid)->waitor);
805 jfs_info("txLock: awakened tid = %d, lid = %d", tid, lid);
814 * FUNCTION: Release buffers associated with transaction locks, but don't
815 * mark homeok yet. The allows other transactions to modify
816 * buffers, but won't let them go to disk until commit record
817 * actually gets written.
822 * RETURN: Errors from subroutines.
824 static void txRelease(struct tblock * tblk)
832 for (lid = tblk->next; lid; lid = tlck->next) {
833 tlck = lid_to_tlock(lid);
834 if ((mp = tlck->mp) != NULL &&
835 (tlck->type & tlckBTROOT) == 0) {
836 assert(mp->xflag & COMMIT_PAGE);
842 * wakeup transactions waiting on a page locked
843 * by the current transaction
845 TXN_WAKEUP(&tblk->waitor);
854 * FUNCTION: Initiates pageout of pages modified by tid in journalled
855 * objects and frees their lockwords.
857 static void txUnlock(struct tblock * tblk)
860 struct linelock *linelock;
861 lid_t lid, next, llid, k;
866 jfs_info("txUnlock: tblk = 0x%p", tblk);
867 log = JFS_SBI(tblk->sb)->log;
870 * mark page under tlock homeok (its log has been written):
872 for (lid = tblk->next; lid; lid = next) {
873 tlck = lid_to_tlock(lid);
876 jfs_info("unlocking lid = %d, tlck = 0x%p", lid, tlck);
878 /* unbind page from tlock */
879 if ((mp = tlck->mp) != NULL &&
880 (tlck->type & tlckBTROOT) == 0) {
881 assert(mp->xflag & COMMIT_PAGE);
885 * It's possible that someone else has the metapage.
886 * The only things were changing are nohomeok, which
887 * is handled atomically, and clsn which is protected
888 * by the LOGSYNC_LOCK.
890 hold_metapage(mp, 1);
892 assert(atomic_read(&mp->nohomeok) > 0);
893 atomic_dec(&mp->nohomeok);
895 /* inherit younger/larger clsn */
898 logdiff(difft, tblk->clsn, log);
899 logdiff(diffp, mp->clsn, log);
901 mp->clsn = tblk->clsn;
903 mp->clsn = tblk->clsn;
906 assert(!(tlck->flag & tlckFREEPAGE));
908 if (tlck->flag & tlckWRITEPAGE) {
911 /* release page which has been forced */
912 release_metapage(mp);
916 /* insert tlock, and linelock(s) of the tlock if any,
917 * at head of freelist
921 llid = ((struct linelock *) & tlck->lock)->next;
923 linelock = (struct linelock *) lid_to_tlock(llid);
932 tblk->next = tblk->last = 0;
935 * remove tblock from logsynclist
936 * (allocation map pages inherited lsn of tblk and
937 * has been inserted in logsync list at txUpdateMap())
942 list_del(&tblk->synclist);
951 * function: allocate a transaction lock for freed page/entry;
952 * for freed page, maplock is used as xtlock/dtlock type;
954 struct tlock *txMaplock(tid_t tid, struct inode *ip, int type)
956 struct jfs_inode_info *jfs_ip = JFS_IP(ip);
960 struct maplock *maplock;
968 tlck = lid_to_tlock(lid);
975 /* bind the tlock and the object */
976 tlck->flag = tlckINODELOCK;
983 * enqueue transaction lock to transaction/inode
985 /* insert the tlock at tail of transaction tlock list */
987 tblk = tid_to_tblock(tid);
989 lid_to_tlock(tblk->last)->next = lid;
995 /* anonymous transaction:
996 * insert the tlock at head of inode anonymous tlock list
999 tlck->next = jfs_ip->atlhead;
1000 jfs_ip->atlhead = lid;
1001 if (tlck->next == 0) {
1002 /* This inode's first anonymous transaction */
1003 jfs_ip->atltail = lid;
1004 list_add_tail(&jfs_ip->anon_inode_list,
1005 &TxAnchor.anon_list);
1011 /* initialize type dependent area for maplock */
1012 maplock = (struct maplock *) & tlck->lock;
1014 maplock->maxcnt = 0;
1024 * function: allocate a transaction lock for log vector list
1026 struct linelock *txLinelock(struct linelock * tlock)
1030 struct linelock *linelock;
1034 /* allocate a TxLock structure */
1035 lid = txLockAlloc();
1036 tlck = lid_to_tlock(lid);
1040 /* initialize linelock */
1041 linelock = (struct linelock *) tlck;
1043 linelock->flag = tlckLINELOCK;
1044 linelock->maxcnt = TLOCKLONG;
1045 linelock->index = 0;
1047 /* append linelock after tlock */
1048 linelock->next = tlock->next;
1057 * transaction commit management
1058 * -----------------------------
1064 * FUNCTION: commit the changes to the objects specified in
1065 * clist. For journalled segments only the
1066 * changes of the caller are committed, ie by tid.
1067 * for non-journalled segments the data are flushed to
1068 * disk and then the change to the disk inode and indirect
1069 * blocks committed (so blocks newly allocated to the
1070 * segment will be made a part of the segment atomically).
1072 * all of the segments specified in clist must be in
1073 * one file system. no more than 6 segments are needed
1074 * to handle all unix svcs.
1076 * if the i_nlink field (i.e. disk inode link count)
1077 * is zero, and the type of inode is a regular file or
1078 * directory, or symbolic link , the inode is truncated
1079 * to zero length. the truncation is committed but the
1080 * VM resources are unaffected until it is closed (see
1088 * on entry the inode lock on each segment is assumed
1093 int txCommit(tid_t tid, /* transaction identifier */
1094 int nip, /* number of inodes to commit */
1095 struct inode **iplist, /* list of inode to commit */
1100 struct jfs_log *log;
1101 struct tblock *tblk;
1105 struct jfs_inode_info *jfs_ip;
1108 struct super_block *sb;
1110 jfs_info("txCommit, tid = %d, flag = %d", tid, flag);
1111 /* is read-only file system ? */
1112 if (isReadOnly(iplist[0])) {
1117 sb = cd.sb = iplist[0]->i_sb;
1121 tid = txBegin(sb, 0);
1122 tblk = tid_to_tblock(tid);
1125 * initialize commit structure
1127 log = JFS_SBI(sb)->log;
1130 /* initialize log record descriptor in commit */
1132 lrd->logtid = cpu_to_le32(tblk->logtid);
1135 tblk->xflag |= flag;
1137 if ((flag & (COMMIT_FORCE | COMMIT_SYNC)) == 0)
1138 tblk->xflag |= COMMIT_LAZY;
1140 * prepare non-journaled objects for commit
1142 * flush data pages of non-journaled file
1143 * to prevent the file getting non-initialized disk blocks
1151 * acquire transaction lock on (on-disk) inodes
1153 * update on-disk inode from in-memory inode
1154 * acquiring transaction locks for AFTER records
1155 * on the on-disk inode of file object
1157 * sort the inodes array by inode number in descending order
1158 * to prevent deadlock when acquiring transaction lock
1159 * of on-disk inodes on multiple on-disk inode pages by
1160 * multiple concurrent transactions
1162 for (k = 0; k < cd.nip; k++) {
1163 top = (cd.iplist[k])->i_ino;
1164 for (n = k + 1; n < cd.nip; n++) {
1166 if (ip->i_ino > top) {
1168 cd.iplist[n] = cd.iplist[k];
1174 jfs_ip = JFS_IP(ip);
1177 * BUGBUG - This code has temporarily been removed. The
1178 * intent is to ensure that any file data is written before
1179 * the metadata is committed to the journal. This prevents
1180 * uninitialized data from appearing in a file after the
1181 * journal has been replayed. (The uninitialized data
1182 * could be sensitive data removed by another user.)
1184 * The problem now is that we are holding the IWRITELOCK
1185 * on the inode, and calling filemap_fdatawrite on an
1186 * unmapped page will cause a deadlock in jfs_get_block.
1188 * The long term solution is to pare down the use of
1189 * IWRITELOCK. We are currently holding it too long.
1190 * We could also be smarter about which data pages need
1191 * to be written before the transaction is committed and
1192 * when we don't need to worry about it at all.
1194 * if ((!S_ISDIR(ip->i_mode))
1195 * && (tblk->flag & COMMIT_DELETE) == 0) {
1196 * filemap_fdatawrite(ip->i_mapping);
1197 * filemap_fdatawait(ip->i_mapping);
1202 * Mark inode as not dirty. It will still be on the dirty
1203 * inode list, but we'll know not to commit it again unless
1204 * it gets marked dirty again
1206 clear_cflag(COMMIT_Dirty, ip);
1208 /* inherit anonymous tlock(s) of inode */
1209 if (jfs_ip->atlhead) {
1210 lid_to_tlock(jfs_ip->atltail)->next = tblk->next;
1211 tblk->next = jfs_ip->atlhead;
1213 tblk->last = jfs_ip->atltail;
1214 jfs_ip->atlhead = jfs_ip->atltail = 0;
1216 list_del_init(&jfs_ip->anon_inode_list);
1221 * acquire transaction lock on on-disk inode page
1222 * (become first tlock of the tblk's tlock list)
1224 if (((rc = diWrite(tid, ip))))
1229 * write log records from transaction locks
1231 * txUpdateMap() resets XAD_NEW in XAD.
1233 if ((rc = txLog(log, tblk, &cd)))
1237 * Ensure that inode isn't reused before
1238 * lazy commit thread finishes processing
1240 if (tblk->xflag & COMMIT_DELETE) {
1241 atomic_inc(&tblk->u.ip->i_count);
1243 * Avoid a rare deadlock
1245 * If the inode is locked, we may be blocked in
1246 * jfs_commit_inode. If so, we don't want the
1247 * lazy_commit thread doing the last iput() on the inode
1248 * since that may block on the locked inode. Instead,
1249 * commit the transaction synchronously, so the last iput
1250 * will be done by the calling thread (or later)
1252 if (tblk->u.ip->i_state & I_LOCK)
1253 tblk->xflag &= ~COMMIT_LAZY;
1256 ASSERT((!(tblk->xflag & COMMIT_DELETE)) ||
1257 ((tblk->u.ip->i_nlink == 0) &&
1258 !test_cflag(COMMIT_Nolink, tblk->u.ip)));
1261 * write COMMIT log record
1263 lrd->type = cpu_to_le16(LOG_COMMIT);
1265 lsn = lmLog(log, tblk, lrd, NULL);
1267 lmGroupCommit(log, tblk);
1270 * - transaction is now committed -
1274 * force pages in careful update
1275 * (imap addressing structure update)
1277 if (flag & COMMIT_FORCE)
1281 * update allocation map.
1283 * update inode allocation map and inode:
1284 * free pager lock on memory object of inode if any.
1285 * update block allocation map.
1287 * txUpdateMap() resets XAD_NEW in XAD.
1289 if (tblk->xflag & COMMIT_FORCE)
1293 * free transaction locks and pageout/free pages
1297 if ((tblk->flag & tblkGC_LAZY) == 0)
1302 * reset in-memory object state
1304 for (k = 0; k < cd.nip; k++) {
1306 jfs_ip = JFS_IP(ip);
1309 * reset in-memory inode state
1320 jfs_info("txCommit: tid = %d, returning %d", tid, rc);
1328 * FUNCTION: Writes AFTER log records for all lines modified
1329 * by tid for segments specified by inodes in comdata.
1330 * Code assumes only WRITELOCKS are recorded in lockwords.
1336 static int txLog(struct jfs_log * log, struct tblock * tblk, struct commit * cd)
1342 struct lrd *lrd = &cd->lrd;
1345 * write log record(s) for each tlock of transaction,
1347 for (lid = tblk->next; lid; lid = tlck->next) {
1348 tlck = lid_to_tlock(lid);
1350 tlck->flag |= tlckLOG;
1352 /* initialize lrd common */
1354 lrd->aggregate = cpu_to_le32(JFS_SBI(ip->i_sb)->aggregate);
1355 lrd->log.redopage.fileset = cpu_to_le32(JFS_IP(ip)->fileset);
1356 lrd->log.redopage.inode = cpu_to_le32(ip->i_ino);
1358 /* write log record of page from the tlock */
1359 switch (tlck->type & tlckTYPE) {
1361 xtLog(log, tblk, lrd, tlck);
1365 dtLog(log, tblk, lrd, tlck);
1369 diLog(log, tblk, lrd, tlck, cd);
1373 mapLog(log, tblk, lrd, tlck);
1377 dataLog(log, tblk, lrd, tlck);
1381 jfs_err("UFO tlock:0x%p", tlck);
1392 * function: log inode tlock and format maplock to update bmap;
1394 static int diLog(struct jfs_log * log, struct tblock * tblk, struct lrd * lrd,
1395 struct tlock * tlck, struct commit * cd)
1398 struct metapage *mp;
1400 struct pxd_lock *pxdlock;
1404 /* initialize as REDOPAGE record format */
1405 lrd->log.redopage.type = cpu_to_le16(LOG_INODE);
1406 lrd->log.redopage.l2linesize = cpu_to_le16(L2INODESLOTSIZE);
1408 pxd = &lrd->log.redopage.pxd;
1413 if (tlck->type & tlckENTRY) {
1414 /* log after-image for logredo(): */
1415 lrd->type = cpu_to_le16(LOG_REDOPAGE);
1416 // *pxd = mp->cm_pxd;
1417 PXDaddress(pxd, mp->index);
1419 mp->logical_size >> tblk->sb->s_blocksize_bits);
1420 lrd->backchain = cpu_to_le32(lmLog(log, tblk, lrd, tlck));
1422 /* mark page as homeward bound */
1423 tlck->flag |= tlckWRITEPAGE;
1424 } else if (tlck->type & tlckFREE) {
1428 * (pages of the freed inode extent have been invalidated and
1429 * a maplock for free of the extent has been formatted at
1432 * the tlock had been acquired on the inode allocation map page
1433 * (iag) that specifies the freed extent, even though the map
1434 * page is not itself logged, to prevent pageout of the map
1435 * page before the log;
1438 /* log LOG_NOREDOINOEXT of the freed inode extent for
1439 * logredo() to start NoRedoPage filters, and to update
1440 * imap and bmap for free of the extent;
1442 lrd->type = cpu_to_le16(LOG_NOREDOINOEXT);
1444 * For the LOG_NOREDOINOEXT record, we need
1445 * to pass the IAG number and inode extent
1446 * index (within that IAG) from which the
1447 * the extent being released. These have been
1448 * passed to us in the iplist[1] and iplist[2].
1450 lrd->log.noredoinoext.iagnum =
1451 cpu_to_le32((u32) (size_t) cd->iplist[1]);
1452 lrd->log.noredoinoext.inoext_idx =
1453 cpu_to_le32((u32) (size_t) cd->iplist[2]);
1455 pxdlock = (struct pxd_lock *) & tlck->lock;
1456 *pxd = pxdlock->pxd;
1457 lrd->backchain = cpu_to_le32(lmLog(log, tblk, lrd, NULL));
1460 tlck->flag |= tlckUPDATEMAP;
1462 /* mark page as homeward bound */
1463 tlck->flag |= tlckWRITEPAGE;
1465 jfs_err("diLog: UFO type tlck:0x%p", tlck);
1468 * alloc/free external EA extent
1470 * a maplock for txUpdateMap() to update bPWMAP for alloc/free
1471 * of the extent has been formatted at txLock() time;
1474 assert(tlck->type & tlckEA);
1476 /* log LOG_UPDATEMAP for logredo() to update bmap for
1477 * alloc of new (and free of old) external EA extent;
1479 lrd->type = cpu_to_le16(LOG_UPDATEMAP);
1480 pxdlock = (struct pxd_lock *) & tlck->lock;
1481 nlock = pxdlock->index;
1482 for (i = 0; i < nlock; i++, pxdlock++) {
1483 if (pxdlock->flag & mlckALLOCPXD)
1484 lrd->log.updatemap.type =
1485 cpu_to_le16(LOG_ALLOCPXD);
1487 lrd->log.updatemap.type =
1488 cpu_to_le16(LOG_FREEPXD);
1489 lrd->log.updatemap.nxd = cpu_to_le16(1);
1490 lrd->log.updatemap.pxd = pxdlock->pxd;
1492 cpu_to_le32(lmLog(log, tblk, lrd, NULL));
1496 tlck->flag |= tlckUPDATEMAP;
1498 #endif /* _JFS_WIP */
1507 * function: log data tlock
1509 static int dataLog(struct jfs_log * log, struct tblock * tblk, struct lrd * lrd,
1510 struct tlock * tlck)
1512 struct metapage *mp;
1517 /* initialize as REDOPAGE record format */
1518 lrd->log.redopage.type = cpu_to_le16(LOG_DATA);
1519 lrd->log.redopage.l2linesize = cpu_to_le16(L2DATASLOTSIZE);
1521 pxd = &lrd->log.redopage.pxd;
1523 /* log after-image for logredo(): */
1524 lrd->type = cpu_to_le16(LOG_REDOPAGE);
1526 if (JFS_IP(tlck->ip)->next_index < MAX_INLINE_DIRTABLE_ENTRY) {
1528 * The table has been truncated, we've must have deleted
1529 * the last entry, so don't bother logging this
1532 hold_metapage(mp, 0);
1533 atomic_dec(&mp->nohomeok);
1534 discard_metapage(mp);
1539 PXDaddress(pxd, mp->index);
1540 PXDlength(pxd, mp->logical_size >> tblk->sb->s_blocksize_bits);
1542 lrd->backchain = cpu_to_le32(lmLog(log, tblk, lrd, tlck));
1544 /* mark page as homeward bound */
1545 tlck->flag |= tlckWRITEPAGE;
1554 * function: log dtree tlock and format maplock to update bmap;
1556 static void dtLog(struct jfs_log * log, struct tblock * tblk, struct lrd * lrd,
1557 struct tlock * tlck)
1559 struct metapage *mp;
1560 struct pxd_lock *pxdlock;
1565 /* initialize as REDOPAGE/NOREDOPAGE record format */
1566 lrd->log.redopage.type = cpu_to_le16(LOG_DTREE);
1567 lrd->log.redopage.l2linesize = cpu_to_le16(L2DTSLOTSIZE);
1569 pxd = &lrd->log.redopage.pxd;
1571 if (tlck->type & tlckBTROOT)
1572 lrd->log.redopage.type |= cpu_to_le16(LOG_BTROOT);
1575 * page extension via relocation: entry insertion;
1576 * page extension in-place: entry insertion;
1577 * new right page from page split, reinitialized in-line
1578 * root from root page split: entry insertion;
1580 if (tlck->type & (tlckNEW | tlckEXTEND)) {
1581 /* log after-image of the new page for logredo():
1582 * mark log (LOG_NEW) for logredo() to initialize
1583 * freelist and update bmap for alloc of the new page;
1585 lrd->type = cpu_to_le16(LOG_REDOPAGE);
1586 if (tlck->type & tlckEXTEND)
1587 lrd->log.redopage.type |= cpu_to_le16(LOG_EXTEND);
1589 lrd->log.redopage.type |= cpu_to_le16(LOG_NEW);
1590 // *pxd = mp->cm_pxd;
1591 PXDaddress(pxd, mp->index);
1593 mp->logical_size >> tblk->sb->s_blocksize_bits);
1594 lrd->backchain = cpu_to_le32(lmLog(log, tblk, lrd, tlck));
1596 /* format a maplock for txUpdateMap() to update bPMAP for
1597 * alloc of the new page;
1599 if (tlck->type & tlckBTROOT)
1601 tlck->flag |= tlckUPDATEMAP;
1602 pxdlock = (struct pxd_lock *) & tlck->lock;
1603 pxdlock->flag = mlckALLOCPXD;
1604 pxdlock->pxd = *pxd;
1608 /* mark page as homeward bound */
1609 tlck->flag |= tlckWRITEPAGE;
1614 * entry insertion/deletion,
1615 * sibling page link update (old right page before split);
1617 if (tlck->type & (tlckENTRY | tlckRELINK)) {
1618 /* log after-image for logredo(): */
1619 lrd->type = cpu_to_le16(LOG_REDOPAGE);
1620 PXDaddress(pxd, mp->index);
1622 mp->logical_size >> tblk->sb->s_blocksize_bits);
1623 lrd->backchain = cpu_to_le32(lmLog(log, tblk, lrd, tlck));
1625 /* mark page as homeward bound */
1626 tlck->flag |= tlckWRITEPAGE;
1631 * page deletion: page has been invalidated
1632 * page relocation: source extent
1634 * a maplock for free of the page has been formatted
1635 * at txLock() time);
1637 if (tlck->type & (tlckFREE | tlckRELOCATE)) {
1638 /* log LOG_NOREDOPAGE of the deleted page for logredo()
1639 * to start NoRedoPage filter and to update bmap for free
1640 * of the deletd page
1642 lrd->type = cpu_to_le16(LOG_NOREDOPAGE);
1643 pxdlock = (struct pxd_lock *) & tlck->lock;
1644 *pxd = pxdlock->pxd;
1645 lrd->backchain = cpu_to_le32(lmLog(log, tblk, lrd, NULL));
1647 /* a maplock for txUpdateMap() for free of the page
1648 * has been formatted at txLock() time;
1650 tlck->flag |= tlckUPDATEMAP;
1659 * function: log xtree tlock and format maplock to update bmap;
1661 static void xtLog(struct jfs_log * log, struct tblock * tblk, struct lrd * lrd,
1662 struct tlock * tlck)
1665 struct metapage *mp;
1667 struct xtlock *xtlck;
1668 struct maplock *maplock;
1669 struct xdlistlock *xadlock;
1670 struct pxd_lock *pxdlock;
1677 /* initialize as REDOPAGE/NOREDOPAGE record format */
1678 lrd->log.redopage.type = cpu_to_le16(LOG_XTREE);
1679 lrd->log.redopage.l2linesize = cpu_to_le16(L2XTSLOTSIZE);
1681 pxd = &lrd->log.redopage.pxd;
1683 if (tlck->type & tlckBTROOT) {
1684 lrd->log.redopage.type |= cpu_to_le16(LOG_BTROOT);
1685 p = &JFS_IP(ip)->i_xtroot;
1686 if (S_ISDIR(ip->i_mode))
1687 lrd->log.redopage.type |=
1688 cpu_to_le16(LOG_DIR_XTREE);
1690 p = (xtpage_t *) mp->data;
1691 next = le16_to_cpu(p->header.nextindex);
1693 xtlck = (struct xtlock *) & tlck->lock;
1695 maplock = (struct maplock *) & tlck->lock;
1696 xadlock = (struct xdlistlock *) maplock;
1699 * entry insertion/extension;
1700 * sibling page link update (old right page before split);
1702 if (tlck->type & (tlckNEW | tlckGROW | tlckRELINK)) {
1703 /* log after-image for logredo():
1704 * logredo() will update bmap for alloc of new/extended
1705 * extents (XAD_NEW|XAD_EXTEND) of XAD[lwm:next) from
1706 * after-image of XADlist;
1707 * logredo() resets (XAD_NEW|XAD_EXTEND) flag when
1708 * applying the after-image to the meta-data page.
1710 lrd->type = cpu_to_le16(LOG_REDOPAGE);
1711 // *pxd = mp->cm_pxd;
1712 PXDaddress(pxd, mp->index);
1714 mp->logical_size >> tblk->sb->s_blocksize_bits);
1715 lrd->backchain = cpu_to_le32(lmLog(log, tblk, lrd, tlck));
1717 /* format a maplock for txUpdateMap() to update bPMAP
1718 * for alloc of new/extended extents of XAD[lwm:next)
1719 * from the page itself;
1720 * txUpdateMap() resets (XAD_NEW|XAD_EXTEND) flag.
1722 lwm = xtlck->lwm.offset;
1724 lwm = XTPAGEMAXSLOT;
1729 tlck->flag |= tlckUPDATEMAP;
1730 xadlock->flag = mlckALLOCXADLIST;
1731 xadlock->count = next - lwm;
1732 if ((xadlock->count <= 2) && (tblk->xflag & COMMIT_LAZY)) {
1735 * Lazy commit may allow xtree to be modified before
1736 * txUpdateMap runs. Copy xad into linelock to
1737 * preserve correct data.
1739 xadlock->xdlist = &xtlck->pxdlock;
1740 memcpy(xadlock->xdlist, &p->xad[lwm],
1741 sizeof(xad_t) * xadlock->count);
1743 for (i = 0; i < xadlock->count; i++)
1744 p->xad[lwm + i].flag &=
1745 ~(XAD_NEW | XAD_EXTENDED);
1748 * xdlist will point to into inode's xtree, ensure
1749 * that transaction is not committed lazily.
1751 xadlock->xdlist = &p->xad[lwm];
1752 tblk->xflag &= ~COMMIT_LAZY;
1754 jfs_info("xtLog: alloc ip:0x%p mp:0x%p tlck:0x%p lwm:%d "
1755 "count:%d", tlck->ip, mp, tlck, lwm, xadlock->count);
1760 /* mark page as homeward bound */
1761 tlck->flag |= tlckWRITEPAGE;
1767 * page deletion: file deletion/truncation (ref. xtTruncate())
1769 * (page will be invalidated after log is written and bmap
1770 * is updated from the page);
1772 if (tlck->type & tlckFREE) {
1773 /* LOG_NOREDOPAGE log for NoRedoPage filter:
1774 * if page free from file delete, NoRedoFile filter from
1775 * inode image of zero link count will subsume NoRedoPage
1776 * filters for each page;
1777 * if page free from file truncattion, write NoRedoPage
1780 * upadte of block allocation map for the page itself:
1781 * if page free from deletion and truncation, LOG_UPDATEMAP
1782 * log for the page itself is generated from processing
1783 * its parent page xad entries;
1785 /* if page free from file truncation, log LOG_NOREDOPAGE
1786 * of the deleted page for logredo() to start NoRedoPage
1787 * filter for the page;
1789 if (tblk->xflag & COMMIT_TRUNCATE) {
1790 /* write NOREDOPAGE for the page */
1791 lrd->type = cpu_to_le16(LOG_NOREDOPAGE);
1792 PXDaddress(pxd, mp->index);
1794 mp->logical_size >> tblk->sb->
1797 cpu_to_le32(lmLog(log, tblk, lrd, NULL));
1799 if (tlck->type & tlckBTROOT) {
1800 /* Empty xtree must be logged */
1801 lrd->type = cpu_to_le16(LOG_REDOPAGE);
1803 cpu_to_le32(lmLog(log, tblk, lrd, tlck));
1807 /* init LOG_UPDATEMAP of the freed extents
1808 * XAD[XTENTRYSTART:hwm) from the deleted page itself
1809 * for logredo() to update bmap;
1811 lrd->type = cpu_to_le16(LOG_UPDATEMAP);
1812 lrd->log.updatemap.type = cpu_to_le16(LOG_FREEXADLIST);
1813 xtlck = (struct xtlock *) & tlck->lock;
1814 hwm = xtlck->hwm.offset;
1815 lrd->log.updatemap.nxd =
1816 cpu_to_le16(hwm - XTENTRYSTART + 1);
1817 /* reformat linelock for lmLog() */
1818 xtlck->header.offset = XTENTRYSTART;
1819 xtlck->header.length = hwm - XTENTRYSTART + 1;
1821 lrd->backchain = cpu_to_le32(lmLog(log, tblk, lrd, tlck));
1823 /* format a maplock for txUpdateMap() to update bmap
1824 * to free extents of XAD[XTENTRYSTART:hwm) from the
1825 * deleted page itself;
1827 tlck->flag |= tlckUPDATEMAP;
1828 xadlock->flag = mlckFREEXADLIST;
1829 xadlock->count = hwm - XTENTRYSTART + 1;
1830 if ((xadlock->count <= 2) && (tblk->xflag & COMMIT_LAZY)) {
1832 * Lazy commit may allow xtree to be modified before
1833 * txUpdateMap runs. Copy xad into linelock to
1834 * preserve correct data.
1836 xadlock->xdlist = &xtlck->pxdlock;
1837 memcpy(xadlock->xdlist, &p->xad[XTENTRYSTART],
1838 sizeof(xad_t) * xadlock->count);
1841 * xdlist will point to into inode's xtree, ensure
1842 * that transaction is not committed lazily.
1844 xadlock->xdlist = &p->xad[XTENTRYSTART];
1845 tblk->xflag &= ~COMMIT_LAZY;
1847 jfs_info("xtLog: free ip:0x%p mp:0x%p count:%d lwm:2",
1848 tlck->ip, mp, xadlock->count);
1852 /* mark page as invalid */
1853 if (((tblk->xflag & COMMIT_PWMAP) || S_ISDIR(ip->i_mode))
1854 && !(tlck->type & tlckBTROOT))
1855 tlck->flag |= tlckFREEPAGE;
1857 else (tblk->xflag & COMMIT_PMAP)
1864 * page/entry truncation: file truncation (ref. xtTruncate())
1866 * |----------+------+------+---------------|
1868 * | | hwm - hwm before truncation
1869 * | next - truncation point
1870 * lwm - lwm before truncation
1873 if (tlck->type & tlckTRUNCATE) {
1874 pxd_t tpxd; /* truncated extent of xad */
1878 * For truncation the entire linelock may be used, so it would
1879 * be difficult to store xad list in linelock itself.
1880 * Therefore, we'll just force transaction to be committed
1881 * synchronously, so that xtree pages won't be changed before
1884 tblk->xflag &= ~COMMIT_LAZY;
1885 lwm = xtlck->lwm.offset;
1887 lwm = XTPAGEMAXSLOT;
1888 hwm = xtlck->hwm.offset;
1889 twm = xtlck->twm.offset;
1895 * allocate entries XAD[lwm:next]:
1898 /* log after-image for logredo():
1899 * logredo() will update bmap for alloc of new/extended
1900 * extents (XAD_NEW|XAD_EXTEND) of XAD[lwm:next) from
1901 * after-image of XADlist;
1902 * logredo() resets (XAD_NEW|XAD_EXTEND) flag when
1903 * applying the after-image to the meta-data page.
1905 lrd->type = cpu_to_le16(LOG_REDOPAGE);
1906 PXDaddress(pxd, mp->index);
1908 mp->logical_size >> tblk->sb->
1911 cpu_to_le32(lmLog(log, tblk, lrd, tlck));
1915 * truncate entry XAD[twm == next - 1]:
1917 if (twm == next - 1) {
1918 /* init LOG_UPDATEMAP for logredo() to update bmap for
1919 * free of truncated delta extent of the truncated
1920 * entry XAD[next - 1]:
1921 * (xtlck->pxdlock = truncated delta extent);
1923 pxdlock = (struct pxd_lock *) & xtlck->pxdlock;
1924 /* assert(pxdlock->type & tlckTRUNCATE); */
1925 lrd->type = cpu_to_le16(LOG_UPDATEMAP);
1926 lrd->log.updatemap.type = cpu_to_le16(LOG_FREEPXD);
1927 lrd->log.updatemap.nxd = cpu_to_le16(1);
1928 lrd->log.updatemap.pxd = pxdlock->pxd;
1929 tpxd = pxdlock->pxd; /* save to format maplock */
1931 cpu_to_le32(lmLog(log, tblk, lrd, NULL));
1935 * free entries XAD[next:hwm]:
1938 /* init LOG_UPDATEMAP of the freed extents
1939 * XAD[next:hwm] from the deleted page itself
1940 * for logredo() to update bmap;
1942 lrd->type = cpu_to_le16(LOG_UPDATEMAP);
1943 lrd->log.updatemap.type =
1944 cpu_to_le16(LOG_FREEXADLIST);
1945 xtlck = (struct xtlock *) & tlck->lock;
1946 hwm = xtlck->hwm.offset;
1947 lrd->log.updatemap.nxd =
1948 cpu_to_le16(hwm - next + 1);
1949 /* reformat linelock for lmLog() */
1950 xtlck->header.offset = next;
1951 xtlck->header.length = hwm - next + 1;
1954 cpu_to_le32(lmLog(log, tblk, lrd, tlck));
1958 * format maplock(s) for txUpdateMap() to update bmap
1963 * allocate entries XAD[lwm:next):
1966 /* format a maplock for txUpdateMap() to update bPMAP
1967 * for alloc of new/extended extents of XAD[lwm:next)
1968 * from the page itself;
1969 * txUpdateMap() resets (XAD_NEW|XAD_EXTEND) flag.
1971 tlck->flag |= tlckUPDATEMAP;
1972 xadlock->flag = mlckALLOCXADLIST;
1973 xadlock->count = next - lwm;
1974 xadlock->xdlist = &p->xad[lwm];
1976 jfs_info("xtLog: alloc ip:0x%p mp:0x%p count:%d "
1978 tlck->ip, mp, xadlock->count, lwm, next);
1984 * truncate entry XAD[twm == next - 1]:
1986 if (twm == next - 1) {
1987 struct pxd_lock *pxdlock;
1989 /* format a maplock for txUpdateMap() to update bmap
1990 * to free truncated delta extent of the truncated
1991 * entry XAD[next - 1];
1992 * (xtlck->pxdlock = truncated delta extent);
1994 tlck->flag |= tlckUPDATEMAP;
1995 pxdlock = (struct pxd_lock *) xadlock;
1996 pxdlock->flag = mlckFREEPXD;
1998 pxdlock->pxd = tpxd;
2000 jfs_info("xtLog: truncate ip:0x%p mp:0x%p count:%d "
2001 "hwm:%d", ip, mp, pxdlock->count, hwm);
2007 * free entries XAD[next:hwm]:
2010 /* format a maplock for txUpdateMap() to update bmap
2011 * to free extents of XAD[next:hwm] from thedeleted
2014 tlck->flag |= tlckUPDATEMAP;
2015 xadlock->flag = mlckFREEXADLIST;
2016 xadlock->count = hwm - next + 1;
2017 xadlock->xdlist = &p->xad[next];
2019 jfs_info("xtLog: free ip:0x%p mp:0x%p count:%d "
2021 tlck->ip, mp, xadlock->count, next, hwm);
2025 /* mark page as homeward bound */
2026 tlck->flag |= tlckWRITEPAGE;
2035 * function: log from maplock of freed data extents;
2037 void mapLog(struct jfs_log * log, struct tblock * tblk, struct lrd * lrd,
2038 struct tlock * tlck)
2040 struct pxd_lock *pxdlock;
2045 * page relocation: free the source page extent
2047 * a maplock for txUpdateMap() for free of the page
2048 * has been formatted at txLock() time saving the src
2049 * relocated page address;
2051 if (tlck->type & tlckRELOCATE) {
2052 /* log LOG_NOREDOPAGE of the old relocated page
2053 * for logredo() to start NoRedoPage filter;
2055 lrd->type = cpu_to_le16(LOG_NOREDOPAGE);
2056 pxdlock = (struct pxd_lock *) & tlck->lock;
2057 pxd = &lrd->log.redopage.pxd;
2058 *pxd = pxdlock->pxd;
2059 lrd->backchain = cpu_to_le32(lmLog(log, tblk, lrd, NULL));
2061 /* (N.B. currently, logredo() does NOT update bmap
2062 * for free of the page itself for (LOG_XTREE|LOG_NOREDOPAGE);
2063 * if page free from relocation, LOG_UPDATEMAP log is
2064 * specifically generated now for logredo()
2065 * to update bmap for free of src relocated page;
2066 * (new flag LOG_RELOCATE may be introduced which will
2067 * inform logredo() to start NORedoPage filter and also
2068 * update block allocation map at the same time, thus
2069 * avoiding an extra log write);
2071 lrd->type = cpu_to_le16(LOG_UPDATEMAP);
2072 lrd->log.updatemap.type = cpu_to_le16(LOG_FREEPXD);
2073 lrd->log.updatemap.nxd = cpu_to_le16(1);
2074 lrd->log.updatemap.pxd = pxdlock->pxd;
2075 lrd->backchain = cpu_to_le32(lmLog(log, tblk, lrd, NULL));
2077 /* a maplock for txUpdateMap() for free of the page
2078 * has been formatted at txLock() time;
2080 tlck->flag |= tlckUPDATEMAP;
2085 * Otherwise it's not a relocate request
2089 /* log LOG_UPDATEMAP for logredo() to update bmap for
2090 * free of truncated/relocated delta extent of the data;
2091 * e.g.: external EA extent, relocated/truncated extent
2092 * from xtTailgate();
2094 lrd->type = cpu_to_le16(LOG_UPDATEMAP);
2095 pxdlock = (struct pxd_lock *) & tlck->lock;
2096 nlock = pxdlock->index;
2097 for (i = 0; i < nlock; i++, pxdlock++) {
2098 if (pxdlock->flag & mlckALLOCPXD)
2099 lrd->log.updatemap.type =
2100 cpu_to_le16(LOG_ALLOCPXD);
2102 lrd->log.updatemap.type =
2103 cpu_to_le16(LOG_FREEPXD);
2104 lrd->log.updatemap.nxd = cpu_to_le16(1);
2105 lrd->log.updatemap.pxd = pxdlock->pxd;
2107 cpu_to_le32(lmLog(log, tblk, lrd, NULL));
2108 jfs_info("mapLog: xaddr:0x%lx xlen:0x%x",
2109 (ulong) addressPXD(&pxdlock->pxd),
2110 lengthPXD(&pxdlock->pxd));
2114 tlck->flag |= tlckUPDATEMAP;
2122 * function: acquire maplock for EA/ACL extents or
2123 * set COMMIT_INLINE flag;
2125 void txEA(tid_t tid, struct inode *ip, dxd_t * oldea, dxd_t * newea)
2127 struct tlock *tlck = NULL;
2128 struct pxd_lock *maplock = NULL, *pxdlock = NULL;
2131 * format maplock for alloc of new EA extent
2134 /* Since the newea could be a completely zeroed entry we need to
2135 * check for the two flags which indicate we should actually
2136 * commit new EA data
2138 if (newea->flag & DXD_EXTENT) {
2139 tlck = txMaplock(tid, ip, tlckMAP);
2140 maplock = (struct pxd_lock *) & tlck->lock;
2141 pxdlock = (struct pxd_lock *) maplock;
2142 pxdlock->flag = mlckALLOCPXD;
2143 PXDaddress(&pxdlock->pxd, addressDXD(newea));
2144 PXDlength(&pxdlock->pxd, lengthDXD(newea));
2147 } else if (newea->flag & DXD_INLINE) {
2150 set_cflag(COMMIT_Inlineea, ip);
2155 * format maplock for free of old EA extent
2157 if (!test_cflag(COMMIT_Nolink, ip) && oldea->flag & DXD_EXTENT) {
2159 tlck = txMaplock(tid, ip, tlckMAP);
2160 maplock = (struct pxd_lock *) & tlck->lock;
2161 pxdlock = (struct pxd_lock *) maplock;
2164 pxdlock->flag = mlckFREEPXD;
2165 PXDaddress(&pxdlock->pxd, addressDXD(oldea));
2166 PXDlength(&pxdlock->pxd, lengthDXD(oldea));
2175 * function: synchronously write pages locked by transaction
2176 * after txLog() but before txUpdateMap();
2178 void txForce(struct tblock * tblk)
2182 struct metapage *mp;
2185 * reverse the order of transaction tlocks in
2186 * careful update order of address index pages
2187 * (right to left, bottom up)
2189 tlck = lid_to_tlock(tblk->next);
2193 tlck = lid_to_tlock(lid);
2195 tlck->next = tblk->next;
2201 * synchronously write the page, and
2202 * hold the page for txUpdateMap();
2204 for (lid = tblk->next; lid; lid = next) {
2205 tlck = lid_to_tlock(lid);
2208 if ((mp = tlck->mp) != NULL &&
2209 (tlck->type & tlckBTROOT) == 0) {
2210 assert(mp->xflag & COMMIT_PAGE);
2212 if (tlck->flag & tlckWRITEPAGE) {
2213 tlck->flag &= ~tlckWRITEPAGE;
2215 /* do not release page to freelist */
2218 * The "right" thing to do here is to
2219 * synchronously write the metadata.
2220 * With the current implementation this
2221 * is hard since write_metapage requires
2222 * us to kunmap & remap the page. If we
2223 * have tlocks pointing into the metadata
2224 * pages, we don't want to do this. I think
2225 * we can get by with synchronously writing
2226 * the pages when they are released.
2228 assert(atomic_read(&mp->nohomeok));
2229 set_bit(META_dirty, &mp->flag);
2230 set_bit(META_sync, &mp->flag);
2240 * function: update persistent allocation map (and working map
2245 static void txUpdateMap(struct tblock * tblk)
2248 struct inode *ipimap;
2251 struct maplock *maplock;
2252 struct pxd_lock pxdlock;
2255 struct metapage *mp = 0;
2257 ipimap = JFS_SBI(tblk->sb)->ipimap;
2259 maptype = (tblk->xflag & COMMIT_PMAP) ? COMMIT_PMAP : COMMIT_PWMAP;
2263 * update block allocation map
2265 * update allocation state in pmap (and wmap) and
2266 * update lsn of the pmap page;
2269 * scan each tlock/page of transaction for block allocation/free:
2271 * for each tlock/page of transaction, update map.
2272 * ? are there tlock for pmap and pwmap at the same time ?
2274 for (lid = tblk->next; lid; lid = tlck->next) {
2275 tlck = lid_to_tlock(lid);
2277 if ((tlck->flag & tlckUPDATEMAP) == 0)
2280 if (tlck->flag & tlckFREEPAGE) {
2282 * Another thread may attempt to reuse freed space
2283 * immediately, so we want to get rid of the metapage
2284 * before anyone else has a chance to get it.
2285 * Lock metapage, update maps, then invalidate
2289 ASSERT(mp->xflag & COMMIT_PAGE);
2290 hold_metapage(mp, 0);
2295 * . in-line PXD list:
2296 * . out-of-line XAD list:
2298 maplock = (struct maplock *) & tlck->lock;
2299 nlock = maplock->index;
2301 for (k = 0; k < nlock; k++, maplock++) {
2303 * allocate blocks in persistent map:
2305 * blocks have been allocated from wmap at alloc time;
2307 if (maplock->flag & mlckALLOC) {
2308 txAllocPMap(ipimap, maplock, tblk);
2311 * free blocks in persistent and working map:
2312 * blocks will be freed in pmap and then in wmap;
2314 * ? tblock specifies the PMAP/PWMAP based upon
2317 * free blocks in persistent map:
2318 * blocks will be freed from wmap at last reference
2319 * release of the object for regular files;
2321 * Alway free blocks from both persistent & working
2322 * maps for directories
2324 else { /* (maplock->flag & mlckFREE) */
2326 if (S_ISDIR(tlck->ip->i_mode))
2327 txFreeMap(ipimap, maplock,
2328 tblk, COMMIT_PWMAP);
2330 txFreeMap(ipimap, maplock,
2334 if (tlck->flag & tlckFREEPAGE) {
2335 if (!(tblk->flag & tblkGC_LAZY)) {
2336 /* This is equivalent to txRelease */
2337 ASSERT(mp->lid == lid);
2340 assert(atomic_read(&mp->nohomeok) == 1);
2341 atomic_dec(&mp->nohomeok);
2342 discard_metapage(mp);
2347 * update inode allocation map
2349 * update allocation state in pmap and
2350 * update lsn of the pmap page;
2351 * update in-memory inode flag/state
2353 * unlock mapper/write lock
2355 if (tblk->xflag & COMMIT_CREATE) {
2356 diUpdatePMap(ipimap, tblk->ino, FALSE, tblk);
2357 ipimap->i_state |= I_DIRTY;
2358 /* update persistent block allocation map
2359 * for the allocation of inode extent;
2361 pxdlock.flag = mlckALLOCPXD;
2362 pxdlock.pxd = tblk->u.ixpxd;
2364 txAllocPMap(ipimap, (struct maplock *) & pxdlock, tblk);
2365 } else if (tblk->xflag & COMMIT_DELETE) {
2367 diUpdatePMap(ipimap, ip->i_ino, TRUE, tblk);
2368 ipimap->i_state |= I_DIRTY;
2377 * function: allocate from persistent map;
2386 * allocate from persistent map;
2387 * free from persistent map;
2388 * (e.g., tmp file - free from working map at releae
2389 * of last reference);
2390 * free from persistent and working map;
2392 * lsn - log sequence number;
2394 static void txAllocPMap(struct inode *ip, struct maplock * maplock,
2395 struct tblock * tblk)
2397 struct inode *ipbmap = JFS_SBI(ip->i_sb)->ipbmap;
2398 struct xdlistlock *xadlistlock;
2402 struct pxd_lock *pxdlock;
2403 struct xdlistlock *pxdlistlock;
2408 * allocate from persistent map;
2410 if (maplock->flag & mlckALLOCXADLIST) {
2411 xadlistlock = (struct xdlistlock *) maplock;
2412 xad = xadlistlock->xdlist;
2413 for (n = 0; n < xadlistlock->count; n++, xad++) {
2414 if (xad->flag & (XAD_NEW | XAD_EXTENDED)) {
2415 xaddr = addressXAD(xad);
2416 xlen = lengthXAD(xad);
2417 dbUpdatePMap(ipbmap, FALSE, xaddr,
2419 xad->flag &= ~(XAD_NEW | XAD_EXTENDED);
2420 jfs_info("allocPMap: xaddr:0x%lx xlen:%d",
2421 (ulong) xaddr, xlen);
2424 } else if (maplock->flag & mlckALLOCPXD) {
2425 pxdlock = (struct pxd_lock *) maplock;
2426 xaddr = addressPXD(&pxdlock->pxd);
2427 xlen = lengthPXD(&pxdlock->pxd);
2428 dbUpdatePMap(ipbmap, FALSE, xaddr, (s64) xlen, tblk);
2429 jfs_info("allocPMap: xaddr:0x%lx xlen:%d", (ulong) xaddr, xlen);
2430 } else { /* (maplock->flag & mlckALLOCPXDLIST) */
2432 pxdlistlock = (struct xdlistlock *) maplock;
2433 pxd = pxdlistlock->xdlist;
2434 for (n = 0; n < pxdlistlock->count; n++, pxd++) {
2435 xaddr = addressPXD(pxd);
2436 xlen = lengthPXD(pxd);
2437 dbUpdatePMap(ipbmap, FALSE, xaddr, (s64) xlen,
2439 jfs_info("allocPMap: xaddr:0x%lx xlen:%d",
2440 (ulong) xaddr, xlen);
2449 * function: free from persistent and/or working map;
2451 * todo: optimization
2453 void txFreeMap(struct inode *ip,
2454 struct maplock * maplock, struct tblock * tblk, int maptype)
2456 struct inode *ipbmap = JFS_SBI(ip->i_sb)->ipbmap;
2457 struct xdlistlock *xadlistlock;
2461 struct pxd_lock *pxdlock;
2462 struct xdlistlock *pxdlistlock;
2466 jfs_info("txFreeMap: tblk:0x%p maplock:0x%p maptype:0x%x",
2467 tblk, maplock, maptype);
2470 * free from persistent map;
2472 if (maptype == COMMIT_PMAP || maptype == COMMIT_PWMAP) {
2473 if (maplock->flag & mlckFREEXADLIST) {
2474 xadlistlock = (struct xdlistlock *) maplock;
2475 xad = xadlistlock->xdlist;
2476 for (n = 0; n < xadlistlock->count; n++, xad++) {
2477 if (!(xad->flag & XAD_NEW)) {
2478 xaddr = addressXAD(xad);
2479 xlen = lengthXAD(xad);
2480 dbUpdatePMap(ipbmap, TRUE, xaddr,
2482 jfs_info("freePMap: xaddr:0x%lx "
2484 (ulong) xaddr, xlen);
2487 } else if (maplock->flag & mlckFREEPXD) {
2488 pxdlock = (struct pxd_lock *) maplock;
2489 xaddr = addressPXD(&pxdlock->pxd);
2490 xlen = lengthPXD(&pxdlock->pxd);
2491 dbUpdatePMap(ipbmap, TRUE, xaddr, (s64) xlen,
2493 jfs_info("freePMap: xaddr:0x%lx xlen:%d",
2494 (ulong) xaddr, xlen);
2495 } else { /* (maplock->flag & mlckALLOCPXDLIST) */
2497 pxdlistlock = (struct xdlistlock *) maplock;
2498 pxd = pxdlistlock->xdlist;
2499 for (n = 0; n < pxdlistlock->count; n++, pxd++) {
2500 xaddr = addressPXD(pxd);
2501 xlen = lengthPXD(pxd);
2502 dbUpdatePMap(ipbmap, TRUE, xaddr,
2504 jfs_info("freePMap: xaddr:0x%lx xlen:%d",
2505 (ulong) xaddr, xlen);
2511 * free from working map;
2513 if (maptype == COMMIT_PWMAP || maptype == COMMIT_WMAP) {
2514 if (maplock->flag & mlckFREEXADLIST) {
2515 xadlistlock = (struct xdlistlock *) maplock;
2516 xad = xadlistlock->xdlist;
2517 for (n = 0; n < xadlistlock->count; n++, xad++) {
2518 xaddr = addressXAD(xad);
2519 xlen = lengthXAD(xad);
2520 dbFree(ip, xaddr, (s64) xlen);
2522 jfs_info("freeWMap: xaddr:0x%lx xlen:%d",
2523 (ulong) xaddr, xlen);
2525 } else if (maplock->flag & mlckFREEPXD) {
2526 pxdlock = (struct pxd_lock *) maplock;
2527 xaddr = addressPXD(&pxdlock->pxd);
2528 xlen = lengthPXD(&pxdlock->pxd);
2529 dbFree(ip, xaddr, (s64) xlen);
2530 jfs_info("freeWMap: xaddr:0x%lx xlen:%d",
2531 (ulong) xaddr, xlen);
2532 } else { /* (maplock->flag & mlckFREEPXDLIST) */
2534 pxdlistlock = (struct xdlistlock *) maplock;
2535 pxd = pxdlistlock->xdlist;
2536 for (n = 0; n < pxdlistlock->count; n++, pxd++) {
2537 xaddr = addressPXD(pxd);
2538 xlen = lengthPXD(pxd);
2539 dbFree(ip, xaddr, (s64) xlen);
2540 jfs_info("freeWMap: xaddr:0x%lx xlen:%d",
2541 (ulong) xaddr, xlen);
2551 * function: remove tlock from inode anonymous locklist
2553 void txFreelock(struct inode *ip)
2555 struct jfs_inode_info *jfs_ip = JFS_IP(ip);
2556 struct tlock *xtlck, *tlck;
2557 lid_t xlid = 0, lid;
2559 if (!jfs_ip->atlhead)
2562 xtlck = (struct tlock *) &jfs_ip->atlhead;
2564 while ((lid = xtlck->next)) {
2565 tlck = lid_to_tlock(lid);
2566 if (tlck->flag & tlckFREELOCK) {
2567 xtlck->next = tlck->next;
2575 if (jfs_ip->atlhead)
2576 jfs_ip->atltail = xlid;
2578 jfs_ip->atltail = 0;
2580 * If inode was on anon_list, remove it
2583 list_del_init(&jfs_ip->anon_inode_list);
2592 * function: abort tx before commit;
2594 * frees line-locks and segment locks for all
2595 * segments in comdata structure.
2596 * Optionally sets state of file-system to FM_DIRTY in super-block.
2597 * log age of page-frames in memory for which caller has
2598 * are reset to 0 (to avoid logwarap).
2600 void txAbort(tid_t tid, int dirty)
2603 struct metapage *mp;
2604 struct tblock *tblk = tid_to_tblock(tid);
2606 jfs_warn("txAbort: tid:%d dirty:0x%x", tid, dirty);
2609 * free tlocks of the transaction
2611 for (lid = tblk->next; lid; lid = next) {
2612 next = lid_to_tlock(lid)->next;
2614 mp = lid_to_tlock(lid)->mp;
2620 * reset lsn of page to avoid logwarap:
2622 * (page may have been previously committed by another
2623 * transaction(s) but has not been paged, i.e.,
2624 * it may be on logsync list even though it has not
2625 * been logged for the current tx.)
2627 if (mp->xflag & COMMIT_PAGE && mp->lsn)
2630 /* insert tlock at head of freelist */
2636 /* caller will free the transaction block */
2638 tblk->next = tblk->last = 0;
2641 * mark filesystem dirty
2644 jfs_error(tblk->sb, "txAbort");
2650 * txLazyCommit(void)
2652 * All transactions except those changing ipimap (COMMIT_FORCE) are
2653 * processed by this routine. This insures that the inode and block
2654 * allocation maps are updated in order. For synchronous transactions,
2655 * let the user thread finish processing after txUpdateMap() is called.
2657 static void txLazyCommit(struct tblock * tblk)
2659 struct jfs_log *log;
2661 while (((tblk->flag & tblkGC_READY) == 0) &&
2662 ((tblk->flag & tblkGC_UNLOCKED) == 0)) {
2663 /* We must have gotten ahead of the user thread
2665 jfs_info("jfs_lazycommit: tblk 0x%p not unlocked", tblk);
2669 jfs_info("txLazyCommit: processing tblk 0x%p", tblk);
2673 log = (struct jfs_log *) JFS_SBI(tblk->sb)->log;
2675 spin_lock_irq(&log->gclock); // LOGGC_LOCK
2677 tblk->flag |= tblkGC_COMMITTED;
2679 if (tblk->flag & tblkGC_READY)
2682 wake_up_all(&tblk->gcwait); // LOGGC_WAKEUP
2685 * Can't release log->gclock until we've tested tblk->flag
2687 if (tblk->flag & tblkGC_LAZY) {
2688 spin_unlock_irq(&log->gclock); // LOGGC_UNLOCK
2690 tblk->flag &= ~tblkGC_LAZY;
2691 txEnd(tblk - TxBlock); /* Convert back to tid */
2693 spin_unlock_irq(&log->gclock); // LOGGC_UNLOCK
2695 jfs_info("txLazyCommit: done: tblk = 0x%p", tblk);
2699 * jfs_lazycommit(void)
2701 * To be run as a kernel daemon. If lbmIODone is called in an interrupt
2702 * context, or where blocking is not wanted, this routine will process
2703 * committed transactions from the unlock queue.
2705 int jfs_lazycommit(void *arg)
2708 struct tblock *tblk;
2709 unsigned long flags;
2711 daemonize("jfsCommit");
2713 jfsCommitTask = current;
2716 TxAnchor.unlock_queue = TxAnchor.unlock_tail = 0;
2718 complete(&jfsIOwait);
2724 while ((tblk = TxAnchor.unlock_queue)) {
2726 * We can't get ahead of user thread. Spinning is
2727 * simpler than blocking/waking. We shouldn't spin
2728 * very long, since user thread shouldn't be blocking
2729 * between lmGroupCommit & txEnd.
2734 * Remove first transaction from queue
2736 TxAnchor.unlock_queue = tblk->cqnext;
2738 if (TxAnchor.unlock_tail == tblk)
2739 TxAnchor.unlock_tail = 0;
2745 * We can be running indefinitely if other processors
2746 * are adding transactions to this list
2755 if (current->flags & PF_FREEZE) {
2757 refrigerator(PF_FREEZE);
2759 DECLARE_WAITQUEUE(wq, current);
2761 add_wait_queue(&jfs_commit_thread_wait, &wq);
2762 set_current_state(TASK_INTERRUPTIBLE);
2765 current->state = TASK_RUNNING;
2766 remove_wait_queue(&jfs_commit_thread_wait, &wq);
2768 } while (!jfs_stop_threads);
2770 if (TxAnchor.unlock_queue)
2771 jfs_err("jfs_lazycommit being killed w/pending transactions!");
2773 jfs_info("jfs_lazycommit being killed\n");
2774 complete_and_exit(&jfsIOwait, 0);
2777 void txLazyUnlock(struct tblock * tblk)
2779 unsigned long flags;
2783 if (TxAnchor.unlock_tail)
2784 TxAnchor.unlock_tail->cqnext = tblk;
2786 TxAnchor.unlock_queue = tblk;
2787 TxAnchor.unlock_tail = tblk;
2790 wake_up(&jfs_commit_thread_wait);
2793 static void LogSyncRelease(struct metapage * mp)
2795 struct jfs_log *log = mp->log;
2797 assert(atomic_read(&mp->nohomeok));
2799 atomic_dec(&mp->nohomeok);
2801 if (atomic_read(&mp->nohomeok))
2804 hold_metapage(mp, 0);
2811 list_del_init(&mp->synclist);
2812 LOGSYNC_UNLOCK(log);
2814 release_metapage(mp);
2820 * Block all new transactions and push anonymous transactions to
2823 * This does almost the same thing as jfs_sync below. We don't
2824 * worry about deadlocking when TlocksLow is set, since we would
2825 * expect jfs_sync to get us out of that jam.
2827 void txQuiesce(struct super_block *sb)
2830 struct jfs_inode_info *jfs_ip;
2831 struct jfs_log *log = JFS_SBI(sb)->log;
2834 set_bit(log_QUIESCE, &log->flag);
2838 while (!list_empty(&TxAnchor.anon_list)) {
2839 jfs_ip = list_entry(TxAnchor.anon_list.next,
2840 struct jfs_inode_info,
2842 ip = &jfs_ip->vfs_inode;
2845 * inode will be removed from anonymous list
2846 * when it is committed
2849 tid = txBegin(ip->i_sb, COMMIT_INODE | COMMIT_FORCE);
2850 down(&jfs_ip->commit_sem);
2851 txCommit(tid, 1, &ip, 0);
2853 up(&jfs_ip->commit_sem);
2855 * Just to be safe. I don't know how
2856 * long we can run without blocking
2863 * If jfs_sync is running in parallel, there could be some inodes
2864 * on anon_list2. Let's check.
2866 if (!list_empty(&TxAnchor.anon_list2)) {
2867 list_splice(&TxAnchor.anon_list2, &TxAnchor.anon_list);
2868 INIT_LIST_HEAD(&TxAnchor.anon_list2);
2874 * We may need to kick off the group commit
2876 jfs_flush_journal(log, 0);
2882 * Allows transactions to start again following txQuiesce
2884 void txResume(struct super_block *sb)
2886 struct jfs_log *log = JFS_SBI(sb)->log;
2888 clear_bit(log_QUIESCE, &log->flag);
2889 TXN_WAKEUP(&log->syncwait);
2895 * To be run as a kernel daemon. This is awakened when tlocks run low.
2896 * We write any inodes that have anonymous tlocks so they will become
2899 int jfs_sync(void *arg)
2902 struct jfs_inode_info *jfs_ip;
2906 daemonize("jfsSync");
2908 complete(&jfsIOwait);
2912 * write each inode on the anonymous inode list
2915 while (TxAnchor.TlocksLow && !list_empty(&TxAnchor.anon_list)) {
2916 jfs_ip = list_entry(TxAnchor.anon_list.next,
2917 struct jfs_inode_info,
2919 ip = &jfs_ip->vfs_inode;
2923 * Inode is being freed
2925 list_del_init(&jfs_ip->anon_inode_list);
2926 } else if (! down_trylock(&jfs_ip->commit_sem)) {
2928 * inode will be removed from anonymous list
2929 * when it is committed
2932 tid = txBegin(ip->i_sb, COMMIT_INODE);
2933 rc = txCommit(tid, 1, &ip, 0);
2935 up(&jfs_ip->commit_sem);
2939 * Just to be safe. I don't know how
2940 * long we can run without blocking
2945 /* We can't get the commit semaphore. It may
2946 * be held by a thread waiting for tlock's
2947 * so let's not block here. Save it to
2948 * put back on the anon_list.
2951 /* Take off anon_list */
2952 list_del(&jfs_ip->anon_inode_list);
2954 /* Put on anon_list2 */
2955 list_add(&jfs_ip->anon_inode_list,
2956 &TxAnchor.anon_list2);
2963 /* Add anon_list2 back to anon_list */
2964 list_splice_init(&TxAnchor.anon_list2, &TxAnchor.anon_list);
2966 if (current->flags & PF_FREEZE) {
2968 refrigerator(PF_FREEZE);
2970 DECLARE_WAITQUEUE(wq, current);
2972 add_wait_queue(&jfs_sync_thread_wait, &wq);
2973 set_current_state(TASK_INTERRUPTIBLE);
2976 current->state = TASK_RUNNING;
2977 remove_wait_queue(&jfs_sync_thread_wait, &wq);
2979 } while (!jfs_stop_threads);
2981 jfs_info("jfs_sync being killed");
2982 complete_and_exit(&jfsIOwait, 0);
2985 #if defined(CONFIG_PROC_FS) && defined(CONFIG_JFS_DEBUG)
2986 int jfs_txanchor_read(char *buffer, char **start, off_t offset, int length,
2987 int *eof, void *data)
2996 waitqueue_active(&TxAnchor.freewait) ? "active" : "empty";
2998 waitqueue_active(&TxAnchor.freelockwait) ? "active" : "empty";
3000 waitqueue_active(&TxAnchor.lowlockwait) ? "active" : "empty";
3002 len += sprintf(buffer,
3008 "freelockwait = %s\n"
3009 "lowlockwait = %s\n"
3010 "tlocksInUse = %d\n"
3012 "unlock_queue = 0x%p\n"
3013 "unlock_tail = 0x%p\n",
3019 TxAnchor.tlocksInUse,
3021 TxAnchor.unlock_queue,
3022 TxAnchor.unlock_tail);
3025 *start = buffer + begin;
3040 #if defined(CONFIG_PROC_FS) && defined(CONFIG_JFS_STATISTICS)
3041 int jfs_txstats_read(char *buffer, char **start, off_t offset, int length,
3042 int *eof, void *data)
3047 len += sprintf(buffer,
3050 "calls to txBegin = %d\n"
3051 "txBegin blocked by sync barrier = %d\n"
3052 "txBegin blocked by tlocks low = %d\n"
3053 "txBegin blocked by no free tid = %d\n"
3054 "calls to txBeginAnon = %d\n"
3055 "txBeginAnon blocked by sync barrier = %d\n"
3056 "txBeginAnon blocked by tlocks low = %d\n"
3057 "calls to txLockAlloc = %d\n"
3058 "tLockAlloc blocked by no free lock = %d\n",
3060 TxStat.txBegin_barrier,
3061 TxStat.txBegin_lockslow,
3062 TxStat.txBegin_freetid,
3064 TxStat.txBeginAnon_barrier,
3065 TxStat.txBeginAnon_lockslow,
3067 TxStat.txLockAlloc_freelock);
3070 *start = buffer + begin;