2 * Copyright (C) International Business Machines Corp., 2000-2004
3 * Portions Copyright (C) Christoph Hellwig, 2001-2002
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
13 * the GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 * jfs_txnmgr.c: transaction manager
24 * transaction starts with txBegin() and ends with txCommit()
27 * tlock is acquired at the time of update;
28 * (obviate scan at commit time for xtree and dtree)
29 * tlock and mp points to each other;
30 * (no hashlist for mp -> tlock).
33 * tlock on in-memory inode:
34 * in-place tlock in the in-memory inode itself;
35 * converted to page lock by iWrite() at commit time.
37 * tlock during write()/mmap() under anonymous transaction (tid = 0):
38 * transferred (?) to transaction at commit time.
40 * use the page itself to update allocation maps
41 * (obviate intermediate replication of allocation/deallocation data)
42 * hold on to mp+lock thru update of maps
47 #include <linux/vmalloc.h>
48 #include <linux/smp_lock.h>
49 #include <linux/completion.h>
50 #include <linux/suspend.h>
51 #include <linux/module.h>
52 #include <linux/moduleparam.h>
53 #include "jfs_incore.h"
54 #include "jfs_filsys.h"
55 #include "jfs_metapage.h"
56 #include "jfs_dinode.h"
59 #include "jfs_superblock.h"
60 #include "jfs_debug.h"
63 * transaction management structures
66 int freetid; /* index of a free tid structure */
67 int freelock; /* index first free lock word */
68 wait_queue_head_t freewait; /* eventlist of free tblock */
69 wait_queue_head_t freelockwait; /* eventlist of free tlock */
70 wait_queue_head_t lowlockwait; /* eventlist of ample tlocks */
71 int tlocksInUse; /* Number of tlocks in use */
72 spinlock_t LazyLock; /* synchronize sync_queue & unlock_queue */
73 /* struct tblock *sync_queue; * Transactions waiting for data sync */
74 struct list_head unlock_queue; /* Txns waiting to be released */
75 struct list_head anon_list; /* inodes having anonymous txns */
76 struct list_head anon_list2; /* inodes having anonymous txns
77 that couldn't be sync'ed */
80 int jfs_tlocks_low; /* Indicates low number of available tlocks */
82 #ifdef CONFIG_JFS_STATISTICS
86 uint txBegin_lockslow;
89 uint txBeginAnon_barrier;
90 uint txBeginAnon_lockslow;
92 uint txLockAlloc_freelock;
96 static int nTxBlock = 512; /* number of transaction blocks */
97 module_param(nTxBlock, int, 0);
98 MODULE_PARM_DESC(nTxBlock,
99 "Number of transaction blocks (default:512, max:65536)");
101 static int nTxLock = 4096; /* number of transaction locks */
102 module_param(nTxLock, int, 0);
103 MODULE_PARM_DESC(nTxLock,
104 "Number of transaction locks (default:4096, max:65536)");
106 struct tblock *TxBlock; /* transaction block table */
107 static int TxLockLWM; /* Low water mark for number of txLocks used */
108 static int TxLockHWM; /* High water mark for number of txLocks used */
109 static int TxLockVHWM; /* Very High water mark */
110 struct tlock *TxLock; /* transaction lock table */
114 * transaction management lock
116 static spinlock_t jfsTxnLock = SPIN_LOCK_UNLOCKED;
118 #define TXN_LOCK() spin_lock(&jfsTxnLock)
119 #define TXN_UNLOCK() spin_unlock(&jfsTxnLock)
121 #define LAZY_LOCK_INIT() spin_lock_init(&TxAnchor.LazyLock);
122 #define LAZY_LOCK(flags) spin_lock_irqsave(&TxAnchor.LazyLock, flags)
123 #define LAZY_UNLOCK(flags) spin_unlock_irqrestore(&TxAnchor.LazyLock, flags)
125 DECLARE_WAIT_QUEUE_HEAD(jfs_sync_thread_wait);
126 DECLARE_WAIT_QUEUE_HEAD(jfs_commit_thread_wait);
129 * Retry logic exist outside these macros to protect from spurrious wakeups.
131 static inline void TXN_SLEEP_DROP_LOCK(wait_queue_head_t * event)
133 DECLARE_WAITQUEUE(wait, current);
135 add_wait_queue(event, &wait);
136 set_current_state(TASK_UNINTERRUPTIBLE);
139 current->state = TASK_RUNNING;
140 remove_wait_queue(event, &wait);
143 #define TXN_SLEEP(event)\
145 TXN_SLEEP_DROP_LOCK(event);\
149 #define TXN_WAKEUP(event) wake_up_all(event)
156 tid_t maxtid; /* 4: biggest tid ever used */
157 lid_t maxlid; /* 4: biggest lid ever used */
158 int ntid; /* 4: # of transactions performed */
159 int nlid; /* 4: # of tlocks acquired */
160 int waitlock; /* 4: # of tlock wait */
165 * external references
167 extern int lmGroupCommit(struct jfs_log *, struct tblock *);
168 extern void lmSync(struct jfs_log *);
169 extern int jfs_commit_inode(struct inode *, int);
170 extern int jfs_stop_threads;
172 extern struct completion jfsIOwait;
177 static int diLog(struct jfs_log * log, struct tblock * tblk, struct lrd * lrd,
178 struct tlock * tlck, struct commit * cd);
179 static int dataLog(struct jfs_log * log, struct tblock * tblk, struct lrd * lrd,
180 struct tlock * tlck);
181 static void dtLog(struct jfs_log * log, struct tblock * tblk, struct lrd * lrd,
182 struct tlock * tlck);
183 static void mapLog(struct jfs_log * log, struct tblock * tblk, struct lrd * lrd,
184 struct tlock * tlck);
185 static void txAllocPMap(struct inode *ip, struct maplock * maplock,
186 struct tblock * tblk);
187 static void txForce(struct tblock * tblk);
188 static int txLog(struct jfs_log * log, struct tblock * tblk,
190 static void txUpdateMap(struct tblock * tblk);
191 static void txRelease(struct tblock * tblk);
192 static void xtLog(struct jfs_log * log, struct tblock * tblk, struct lrd * lrd,
193 struct tlock * tlck);
194 static void LogSyncRelease(struct metapage * mp);
197 * transaction block/lock management
198 * ---------------------------------
202 * Get a transaction lock from the free list. If the number in use is
203 * greater than the high water mark, wake up the sync daemon. This should
204 * free some anonymous transaction locks. (TXN_LOCK must be held.)
206 static lid_t txLockAlloc(void)
210 INCREMENT(TxStat.txLockAlloc);
211 if (!TxAnchor.freelock) {
212 INCREMENT(TxStat.txLockAlloc_freelock);
215 while (!(lid = TxAnchor.freelock))
216 TXN_SLEEP(&TxAnchor.freelockwait);
217 TxAnchor.freelock = TxLock[lid].next;
218 HIGHWATERMARK(stattx.maxlid, lid);
219 if ((++TxAnchor.tlocksInUse > TxLockHWM) && (jfs_tlocks_low == 0)) {
220 jfs_info("txLockAlloc tlocks low");
222 wake_up(&jfs_sync_thread_wait);
228 static void txLockFree(lid_t lid)
230 TxLock[lid].next = TxAnchor.freelock;
231 TxAnchor.freelock = lid;
232 TxAnchor.tlocksInUse--;
233 if (jfs_tlocks_low && (TxAnchor.tlocksInUse < TxLockLWM)) {
234 jfs_info("txLockFree jfs_tlocks_low no more");
236 TXN_WAKEUP(&TxAnchor.lowlockwait);
238 TXN_WAKEUP(&TxAnchor.freelockwait);
244 * FUNCTION: initialize transaction management structures
248 * serialization: single thread at jfs_init()
254 /* Verify tunable parameters */
256 nTxBlock = 16; /* No one should set it this low */
257 if (nTxBlock > 65536)
260 nTxLock = 256; /* No one should set it this low */
264 * initialize transaction block (tblock) table
266 * transaction id (tid) = tblock index
267 * tid = 0 is reserved.
269 TxLockLWM = (nTxLock * 4) / 10;
270 TxLockHWM = (nTxLock * 8) / 10;
271 TxLockVHWM = (nTxLock * 9) / 10;
273 size = sizeof(struct tblock) * nTxBlock;
274 TxBlock = (struct tblock *) vmalloc(size);
278 for (k = 1; k < nTxBlock - 1; k++) {
279 TxBlock[k].next = k + 1;
280 init_waitqueue_head(&TxBlock[k].gcwait);
281 init_waitqueue_head(&TxBlock[k].waitor);
284 init_waitqueue_head(&TxBlock[k].gcwait);
285 init_waitqueue_head(&TxBlock[k].waitor);
287 TxAnchor.freetid = 1;
288 init_waitqueue_head(&TxAnchor.freewait);
290 stattx.maxtid = 1; /* statistics */
293 * initialize transaction lock (tlock) table
295 * transaction lock id = tlock index
296 * tlock id = 0 is reserved.
298 size = sizeof(struct tlock) * nTxLock;
299 TxLock = (struct tlock *) vmalloc(size);
300 if (TxLock == NULL) {
305 /* initialize tlock table */
306 for (k = 1; k < nTxLock - 1; k++)
307 TxLock[k].next = k + 1;
309 init_waitqueue_head(&TxAnchor.freelockwait);
310 init_waitqueue_head(&TxAnchor.lowlockwait);
312 TxAnchor.freelock = 1;
313 TxAnchor.tlocksInUse = 0;
314 INIT_LIST_HEAD(&TxAnchor.anon_list);
315 INIT_LIST_HEAD(&TxAnchor.anon_list2);
318 INIT_LIST_HEAD(&TxAnchor.unlock_queue);
320 stattx.maxlid = 1; /* statistics */
328 * FUNCTION: clean up when module is unloaded
342 * FUNCTION: start a transaction.
344 * PARAMETER: sb - superblock
345 * flag - force for nested tx;
347 * RETURN: tid - transaction id
349 * note: flag force allows to start tx for nested tx
350 * to prevent deadlock on logsync barrier;
352 tid_t txBegin(struct super_block *sb, int flag)
358 jfs_info("txBegin: flag = 0x%x", flag);
359 log = JFS_SBI(sb)->log;
363 INCREMENT(TxStat.txBegin);
366 if (!(flag & COMMIT_FORCE)) {
368 * synchronize with logsync barrier
370 if (test_bit(log_SYNCBARRIER, &log->flag) ||
371 test_bit(log_QUIESCE, &log->flag)) {
372 INCREMENT(TxStat.txBegin_barrier);
373 TXN_SLEEP(&log->syncwait);
379 * Don't begin transaction if we're getting starved for tlocks
380 * unless COMMIT_FORCE or COMMIT_INODE (which may ultimately
383 if (TxAnchor.tlocksInUse > TxLockVHWM) {
384 INCREMENT(TxStat.txBegin_lockslow);
385 TXN_SLEEP(&TxAnchor.lowlockwait);
391 * allocate transaction id/block
393 if ((t = TxAnchor.freetid) == 0) {
394 jfs_info("txBegin: waiting for free tid");
395 INCREMENT(TxStat.txBegin_freetid);
396 TXN_SLEEP(&TxAnchor.freewait);
400 tblk = tid_to_tblock(t);
402 if ((tblk->next == 0) && !(flag & COMMIT_FORCE)) {
403 /* Don't let a non-forced transaction take the last tblk */
404 jfs_info("txBegin: waiting for free tid");
405 INCREMENT(TxStat.txBegin_freetid);
406 TXN_SLEEP(&TxAnchor.freewait);
410 TxAnchor.freetid = tblk->next;
413 * initialize transaction
417 * We can't zero the whole thing or we screw up another thread being
418 * awakened after sleeping on tblk->waitor
420 * memset(tblk, 0, sizeof(struct tblock));
422 tblk->next = tblk->last = tblk->xflag = tblk->flag = tblk->lsn = 0;
426 tblk->logtid = log->logtid;
430 HIGHWATERMARK(stattx.maxtid, t); /* statistics */
431 INCREMENT(stattx.ntid); /* statistics */
435 jfs_info("txBegin: returning tid = %d", t);
442 * NAME: txBeginAnon()
444 * FUNCTION: start an anonymous transaction.
445 * Blocks if logsync or available tlocks are low to prevent
446 * anonymous tlocks from depleting supply.
448 * PARAMETER: sb - superblock
452 void txBeginAnon(struct super_block *sb)
456 log = JFS_SBI(sb)->log;
459 INCREMENT(TxStat.txBeginAnon);
463 * synchronize with logsync barrier
465 if (test_bit(log_SYNCBARRIER, &log->flag) ||
466 test_bit(log_QUIESCE, &log->flag)) {
467 INCREMENT(TxStat.txBeginAnon_barrier);
468 TXN_SLEEP(&log->syncwait);
473 * Don't begin transaction if we're getting starved for tlocks
475 if (TxAnchor.tlocksInUse > TxLockVHWM) {
476 INCREMENT(TxStat.txBeginAnon_lockslow);
477 TXN_SLEEP(&TxAnchor.lowlockwait);
487 * function: free specified transaction block.
489 * logsync barrier processing:
493 void txEnd(tid_t tid)
495 struct tblock *tblk = tid_to_tblock(tid);
498 jfs_info("txEnd: tid = %d", tid);
502 * wakeup transactions waiting on the page locked
503 * by the current transaction
505 TXN_WAKEUP(&tblk->waitor);
507 log = JFS_SBI(tblk->sb)->log;
510 * Lazy commit thread can't free this guy until we mark it UNLOCKED,
511 * otherwise, we would be left with a transaction that may have been
514 * Lazy commit thread will turn off tblkGC_LAZY before calling this
517 if (tblk->flag & tblkGC_LAZY) {
518 jfs_info("txEnd called w/lazy tid: %d, tblk = 0x%p", tid, tblk);
521 spin_lock_irq(&log->gclock); // LOGGC_LOCK
522 tblk->flag |= tblkGC_UNLOCKED;
523 spin_unlock_irq(&log->gclock); // LOGGC_UNLOCK
527 jfs_info("txEnd: tid: %d, tblk = 0x%p", tid, tblk);
529 assert(tblk->next == 0);
532 * insert tblock back on freelist
534 tblk->next = TxAnchor.freetid;
535 TxAnchor.freetid = tid;
538 * mark the tblock not active
540 if (--log->active == 0) {
541 clear_bit(log_FLUSH, &log->flag);
544 * synchronize with logsync barrier
546 if (test_bit(log_SYNCBARRIER, &log->flag)) {
547 /* forward log syncpt */
550 jfs_info("log barrier off: 0x%x", log->lsn);
552 /* enable new transactions start */
553 clear_bit(log_SYNCBARRIER, &log->flag);
555 /* wakeup all waitors for logsync barrier */
556 TXN_WAKEUP(&log->syncwait);
561 * wakeup all waitors for a free tblock
563 TXN_WAKEUP(&TxAnchor.freewait);
572 * function: acquire a transaction lock on the specified <mp>
576 * return: transaction lock id
580 struct tlock *txLock(tid_t tid, struct inode *ip, struct metapage * mp,
583 struct jfs_inode_info *jfs_ip = JFS_IP(ip);
588 struct xtlock *xtlck;
589 struct linelock *linelock;
595 if (S_ISDIR(ip->i_mode) && (type & tlckXTREE) &&
596 !(mp->xflag & COMMIT_PAGE)) {
598 * Directory inode is special. It can have both an xtree tlock
599 * and a dtree tlock associated with it.
606 /* is page not locked by a transaction ? */
610 jfs_info("txLock: tid:%d ip:0x%p mp:0x%p lid:%d", tid, ip, mp, lid);
612 /* is page locked by the requester transaction ? */
613 tlck = lid_to_tlock(lid);
614 if ((xtid = tlck->tid) == tid)
618 * is page locked by anonymous transaction/lock ?
620 * (page update without transaction (i.e., file write) is
621 * locked under anonymous transaction tid = 0:
622 * anonymous tlocks maintained on anonymous tlock list of
623 * the inode of the page and available to all anonymous
624 * transactions until txCommit() time at which point
625 * they are transferred to the transaction tlock list of
626 * the commiting transaction of the inode)
630 tblk = tid_to_tblock(tid);
632 * The order of the tlocks in the transaction is important
633 * (during truncate, child xtree pages must be freed before
634 * parent's tlocks change the working map).
635 * Take tlock off anonymous list and add to tail of
638 * Note: We really need to get rid of the tid & lid and
639 * use list_head's. This code is getting UGLY!
641 if (jfs_ip->atlhead == lid) {
642 if (jfs_ip->atltail == lid) {
643 /* only anonymous txn.
644 * Remove from anon_list
646 list_del_init(&jfs_ip->anon_inode_list);
648 jfs_ip->atlhead = tlck->next;
651 for (last = jfs_ip->atlhead;
652 lid_to_tlock(last)->next != lid;
653 last = lid_to_tlock(last)->next) {
656 lid_to_tlock(last)->next = tlck->next;
657 if (jfs_ip->atltail == lid)
658 jfs_ip->atltail = last;
661 /* insert the tlock at tail of transaction tlock list */
664 lid_to_tlock(tblk->last)->next = lid;
680 tlck = lid_to_tlock(lid);
687 /* mark tlock for meta-data page */
688 if (mp->xflag & COMMIT_PAGE) {
690 tlck->flag = tlckPAGELOCK;
692 /* mark the page dirty and nohomeok */
693 mark_metapage_dirty(mp);
694 atomic_inc(&mp->nohomeok);
696 jfs_info("locking mp = 0x%p, nohomeok = %d tid = %d tlck = 0x%p",
697 mp, atomic_read(&mp->nohomeok), tid, tlck);
699 /* if anonymous transaction, and buffer is on the group
700 * commit synclist, mark inode to show this. This will
701 * prevent the buffer from being marked nohomeok for too
704 if ((tid == 0) && mp->lsn)
705 set_cflag(COMMIT_Synclist, ip);
707 /* mark tlock for in-memory inode */
709 tlck->flag = tlckINODELOCK;
713 /* bind the tlock and the page */
722 * enqueue transaction lock to transaction/inode
724 /* insert the tlock at tail of transaction tlock list */
726 tblk = tid_to_tblock(tid);
728 lid_to_tlock(tblk->last)->next = lid;
734 /* anonymous transaction:
735 * insert the tlock at head of inode anonymous tlock list
738 tlck->next = jfs_ip->atlhead;
739 jfs_ip->atlhead = lid;
740 if (tlck->next == 0) {
741 /* This inode's first anonymous transaction */
742 jfs_ip->atltail = lid;
743 list_add_tail(&jfs_ip->anon_inode_list,
744 &TxAnchor.anon_list);
748 /* initialize type dependent area for linelock */
749 linelock = (struct linelock *) & tlck->lock;
751 linelock->flag = tlckLINELOCK;
752 linelock->maxcnt = TLOCKSHORT;
755 switch (type & tlckTYPE) {
757 linelock->l2linesize = L2DTSLOTSIZE;
761 linelock->l2linesize = L2XTSLOTSIZE;
763 xtlck = (struct xtlock *) linelock;
764 xtlck->header.offset = 0;
765 xtlck->header.length = 2;
767 if (type & tlckNEW) {
768 xtlck->lwm.offset = XTENTRYSTART;
770 if (mp->xflag & COMMIT_PAGE)
771 p = (xtpage_t *) mp->data;
773 p = &jfs_ip->i_xtroot;
775 le16_to_cpu(p->header.nextindex);
777 xtlck->lwm.length = 0; /* ! */
778 xtlck->twm.offset = 0;
779 xtlck->hwm.offset = 0;
785 linelock->l2linesize = L2INODESLOTSIZE;
789 linelock->l2linesize = L2DATASLOTSIZE;
793 jfs_err("UFO tlock:0x%p", tlck);
797 * update tlock vector
807 * page is being locked by another transaction:
810 /* Only locks on ipimap or ipaimap should reach here */
811 /* assert(jfs_ip->fileset == AGGREGATE_I); */
812 if (jfs_ip->fileset != AGGREGATE_I) {
813 jfs_err("txLock: trying to lock locked page!");
814 dump_mem("ip", ip, sizeof(struct inode));
815 dump_mem("mp", mp, sizeof(struct metapage));
816 dump_mem("Locker's tblk", tid_to_tblock(tid),
817 sizeof(struct tblock));
818 dump_mem("Tlock", tlck, sizeof(struct tlock));
821 INCREMENT(stattx.waitlock); /* statistics */
822 release_metapage(mp);
824 jfs_info("txLock: in waitLock, tid = %d, xtid = %d, lid = %d",
826 TXN_SLEEP_DROP_LOCK(&tid_to_tblock(xtid)->waitor);
827 jfs_info("txLock: awakened tid = %d, lid = %d", tid, lid);
836 * FUNCTION: Release buffers associated with transaction locks, but don't
837 * mark homeok yet. The allows other transactions to modify
838 * buffers, but won't let them go to disk until commit record
839 * actually gets written.
844 * RETURN: Errors from subroutines.
846 static void txRelease(struct tblock * tblk)
854 for (lid = tblk->next; lid; lid = tlck->next) {
855 tlck = lid_to_tlock(lid);
856 if ((mp = tlck->mp) != NULL &&
857 (tlck->type & tlckBTROOT) == 0) {
858 assert(mp->xflag & COMMIT_PAGE);
864 * wakeup transactions waiting on a page locked
865 * by the current transaction
867 TXN_WAKEUP(&tblk->waitor);
876 * FUNCTION: Initiates pageout of pages modified by tid in journalled
877 * objects and frees their lockwords.
879 static void txUnlock(struct tblock * tblk)
882 struct linelock *linelock;
883 lid_t lid, next, llid, k;
888 jfs_info("txUnlock: tblk = 0x%p", tblk);
889 log = JFS_SBI(tblk->sb)->log;
892 * mark page under tlock homeok (its log has been written):
894 for (lid = tblk->next; lid; lid = next) {
895 tlck = lid_to_tlock(lid);
898 jfs_info("unlocking lid = %d, tlck = 0x%p", lid, tlck);
900 /* unbind page from tlock */
901 if ((mp = tlck->mp) != NULL &&
902 (tlck->type & tlckBTROOT) == 0) {
903 assert(mp->xflag & COMMIT_PAGE);
907 * It's possible that someone else has the metapage.
908 * The only things were changing are nohomeok, which
909 * is handled atomically, and clsn which is protected
910 * by the LOGSYNC_LOCK.
912 hold_metapage(mp, 1);
914 assert(atomic_read(&mp->nohomeok) > 0);
915 atomic_dec(&mp->nohomeok);
917 /* inherit younger/larger clsn */
920 logdiff(difft, tblk->clsn, log);
921 logdiff(diffp, mp->clsn, log);
923 mp->clsn = tblk->clsn;
925 mp->clsn = tblk->clsn;
928 assert(!(tlck->flag & tlckFREEPAGE));
930 if (tlck->flag & tlckWRITEPAGE) {
933 /* release page which has been forced */
934 release_metapage(mp);
938 /* insert tlock, and linelock(s) of the tlock if any,
939 * at head of freelist
943 llid = ((struct linelock *) & tlck->lock)->next;
945 linelock = (struct linelock *) lid_to_tlock(llid);
954 tblk->next = tblk->last = 0;
957 * remove tblock from logsynclist
958 * (allocation map pages inherited lsn of tblk and
959 * has been inserted in logsync list at txUpdateMap())
964 list_del(&tblk->synclist);
973 * function: allocate a transaction lock for freed page/entry;
974 * for freed page, maplock is used as xtlock/dtlock type;
976 struct tlock *txMaplock(tid_t tid, struct inode *ip, int type)
978 struct jfs_inode_info *jfs_ip = JFS_IP(ip);
982 struct maplock *maplock;
990 tlck = lid_to_tlock(lid);
997 /* bind the tlock and the object */
998 tlck->flag = tlckINODELOCK;
1005 * enqueue transaction lock to transaction/inode
1007 /* insert the tlock at tail of transaction tlock list */
1009 tblk = tid_to_tblock(tid);
1011 lid_to_tlock(tblk->last)->next = lid;
1017 /* anonymous transaction:
1018 * insert the tlock at head of inode anonymous tlock list
1021 tlck->next = jfs_ip->atlhead;
1022 jfs_ip->atlhead = lid;
1023 if (tlck->next == 0) {
1024 /* This inode's first anonymous transaction */
1025 jfs_ip->atltail = lid;
1026 list_add_tail(&jfs_ip->anon_inode_list,
1027 &TxAnchor.anon_list);
1033 /* initialize type dependent area for maplock */
1034 maplock = (struct maplock *) & tlck->lock;
1036 maplock->maxcnt = 0;
1046 * function: allocate a transaction lock for log vector list
1048 struct linelock *txLinelock(struct linelock * tlock)
1052 struct linelock *linelock;
1056 /* allocate a TxLock structure */
1057 lid = txLockAlloc();
1058 tlck = lid_to_tlock(lid);
1062 /* initialize linelock */
1063 linelock = (struct linelock *) tlck;
1065 linelock->flag = tlckLINELOCK;
1066 linelock->maxcnt = TLOCKLONG;
1067 linelock->index = 0;
1069 /* append linelock after tlock */
1070 linelock->next = tlock->next;
1079 * transaction commit management
1080 * -----------------------------
1086 * FUNCTION: commit the changes to the objects specified in
1087 * clist. For journalled segments only the
1088 * changes of the caller are committed, ie by tid.
1089 * for non-journalled segments the data are flushed to
1090 * disk and then the change to the disk inode and indirect
1091 * blocks committed (so blocks newly allocated to the
1092 * segment will be made a part of the segment atomically).
1094 * all of the segments specified in clist must be in
1095 * one file system. no more than 6 segments are needed
1096 * to handle all unix svcs.
1098 * if the i_nlink field (i.e. disk inode link count)
1099 * is zero, and the type of inode is a regular file or
1100 * directory, or symbolic link , the inode is truncated
1101 * to zero length. the truncation is committed but the
1102 * VM resources are unaffected until it is closed (see
1110 * on entry the inode lock on each segment is assumed
1115 int txCommit(tid_t tid, /* transaction identifier */
1116 int nip, /* number of inodes to commit */
1117 struct inode **iplist, /* list of inode to commit */
1122 struct jfs_log *log;
1123 struct tblock *tblk;
1127 struct jfs_inode_info *jfs_ip;
1130 struct super_block *sb;
1132 jfs_info("txCommit, tid = %d, flag = %d", tid, flag);
1133 /* is read-only file system ? */
1134 if (isReadOnly(iplist[0])) {
1139 sb = cd.sb = iplist[0]->i_sb;
1143 tid = txBegin(sb, 0);
1144 tblk = tid_to_tblock(tid);
1147 * initialize commit structure
1149 log = JFS_SBI(sb)->log;
1152 /* initialize log record descriptor in commit */
1154 lrd->logtid = cpu_to_le32(tblk->logtid);
1157 tblk->xflag |= flag;
1159 if ((flag & (COMMIT_FORCE | COMMIT_SYNC)) == 0)
1160 tblk->xflag |= COMMIT_LAZY;
1162 * prepare non-journaled objects for commit
1164 * flush data pages of non-journaled file
1165 * to prevent the file getting non-initialized disk blocks
1173 * acquire transaction lock on (on-disk) inodes
1175 * update on-disk inode from in-memory inode
1176 * acquiring transaction locks for AFTER records
1177 * on the on-disk inode of file object
1179 * sort the inodes array by inode number in descending order
1180 * to prevent deadlock when acquiring transaction lock
1181 * of on-disk inodes on multiple on-disk inode pages by
1182 * multiple concurrent transactions
1184 for (k = 0; k < cd.nip; k++) {
1185 top = (cd.iplist[k])->i_ino;
1186 for (n = k + 1; n < cd.nip; n++) {
1188 if (ip->i_ino > top) {
1190 cd.iplist[n] = cd.iplist[k];
1196 jfs_ip = JFS_IP(ip);
1199 * BUGBUG - This code has temporarily been removed. The
1200 * intent is to ensure that any file data is written before
1201 * the metadata is committed to the journal. This prevents
1202 * uninitialized data from appearing in a file after the
1203 * journal has been replayed. (The uninitialized data
1204 * could be sensitive data removed by another user.)
1206 * The problem now is that we are holding the IWRITELOCK
1207 * on the inode, and calling filemap_fdatawrite on an
1208 * unmapped page will cause a deadlock in jfs_get_block.
1210 * The long term solution is to pare down the use of
1211 * IWRITELOCK. We are currently holding it too long.
1212 * We could also be smarter about which data pages need
1213 * to be written before the transaction is committed and
1214 * when we don't need to worry about it at all.
1216 * if ((!S_ISDIR(ip->i_mode))
1217 * && (tblk->flag & COMMIT_DELETE) == 0) {
1218 * filemap_fdatawrite(ip->i_mapping);
1219 * filemap_fdatawait(ip->i_mapping);
1224 * Mark inode as not dirty. It will still be on the dirty
1225 * inode list, but we'll know not to commit it again unless
1226 * it gets marked dirty again
1228 clear_cflag(COMMIT_Dirty, ip);
1230 /* inherit anonymous tlock(s) of inode */
1231 if (jfs_ip->atlhead) {
1232 lid_to_tlock(jfs_ip->atltail)->next = tblk->next;
1233 tblk->next = jfs_ip->atlhead;
1235 tblk->last = jfs_ip->atltail;
1236 jfs_ip->atlhead = jfs_ip->atltail = 0;
1238 list_del_init(&jfs_ip->anon_inode_list);
1243 * acquire transaction lock on on-disk inode page
1244 * (become first tlock of the tblk's tlock list)
1246 if (((rc = diWrite(tid, ip))))
1251 * write log records from transaction locks
1253 * txUpdateMap() resets XAD_NEW in XAD.
1255 if ((rc = txLog(log, tblk, &cd)))
1259 * Ensure that inode isn't reused before
1260 * lazy commit thread finishes processing
1262 if (tblk->xflag & COMMIT_DELETE) {
1263 atomic_inc(&tblk->u.ip->i_count);
1265 * Avoid a rare deadlock
1267 * If the inode is locked, we may be blocked in
1268 * jfs_commit_inode. If so, we don't want the
1269 * lazy_commit thread doing the last iput() on the inode
1270 * since that may block on the locked inode. Instead,
1271 * commit the transaction synchronously, so the last iput
1272 * will be done by the calling thread (or later)
1274 if (tblk->u.ip->i_state & I_LOCK)
1275 tblk->xflag &= ~COMMIT_LAZY;
1278 ASSERT((!(tblk->xflag & COMMIT_DELETE)) ||
1279 ((tblk->u.ip->i_nlink == 0) &&
1280 !test_cflag(COMMIT_Nolink, tblk->u.ip)));
1283 * write COMMIT log record
1285 lrd->type = cpu_to_le16(LOG_COMMIT);
1287 lsn = lmLog(log, tblk, lrd, NULL);
1289 lmGroupCommit(log, tblk);
1292 * - transaction is now committed -
1296 * force pages in careful update
1297 * (imap addressing structure update)
1299 if (flag & COMMIT_FORCE)
1303 * update allocation map.
1305 * update inode allocation map and inode:
1306 * free pager lock on memory object of inode if any.
1307 * update block allocation map.
1309 * txUpdateMap() resets XAD_NEW in XAD.
1311 if (tblk->xflag & COMMIT_FORCE)
1315 * free transaction locks and pageout/free pages
1319 if ((tblk->flag & tblkGC_LAZY) == 0)
1324 * reset in-memory object state
1326 for (k = 0; k < cd.nip; k++) {
1328 jfs_ip = JFS_IP(ip);
1331 * reset in-memory inode state
1342 jfs_info("txCommit: tid = %d, returning %d", tid, rc);
1350 * FUNCTION: Writes AFTER log records for all lines modified
1351 * by tid for segments specified by inodes in comdata.
1352 * Code assumes only WRITELOCKS are recorded in lockwords.
1358 static int txLog(struct jfs_log * log, struct tblock * tblk, struct commit * cd)
1364 struct lrd *lrd = &cd->lrd;
1367 * write log record(s) for each tlock of transaction,
1369 for (lid = tblk->next; lid; lid = tlck->next) {
1370 tlck = lid_to_tlock(lid);
1372 tlck->flag |= tlckLOG;
1374 /* initialize lrd common */
1376 lrd->aggregate = cpu_to_le32(JFS_SBI(ip->i_sb)->aggregate);
1377 lrd->log.redopage.fileset = cpu_to_le32(JFS_IP(ip)->fileset);
1378 lrd->log.redopage.inode = cpu_to_le32(ip->i_ino);
1380 /* write log record of page from the tlock */
1381 switch (tlck->type & tlckTYPE) {
1383 xtLog(log, tblk, lrd, tlck);
1387 dtLog(log, tblk, lrd, tlck);
1391 diLog(log, tblk, lrd, tlck, cd);
1395 mapLog(log, tblk, lrd, tlck);
1399 dataLog(log, tblk, lrd, tlck);
1403 jfs_err("UFO tlock:0x%p", tlck);
1414 * function: log inode tlock and format maplock to update bmap;
1416 static int diLog(struct jfs_log * log, struct tblock * tblk, struct lrd * lrd,
1417 struct tlock * tlck, struct commit * cd)
1420 struct metapage *mp;
1422 struct pxd_lock *pxdlock;
1426 /* initialize as REDOPAGE record format */
1427 lrd->log.redopage.type = cpu_to_le16(LOG_INODE);
1428 lrd->log.redopage.l2linesize = cpu_to_le16(L2INODESLOTSIZE);
1430 pxd = &lrd->log.redopage.pxd;
1435 if (tlck->type & tlckENTRY) {
1436 /* log after-image for logredo(): */
1437 lrd->type = cpu_to_le16(LOG_REDOPAGE);
1438 // *pxd = mp->cm_pxd;
1439 PXDaddress(pxd, mp->index);
1441 mp->logical_size >> tblk->sb->s_blocksize_bits);
1442 lrd->backchain = cpu_to_le32(lmLog(log, tblk, lrd, tlck));
1444 /* mark page as homeward bound */
1445 tlck->flag |= tlckWRITEPAGE;
1446 } else if (tlck->type & tlckFREE) {
1450 * (pages of the freed inode extent have been invalidated and
1451 * a maplock for free of the extent has been formatted at
1454 * the tlock had been acquired on the inode allocation map page
1455 * (iag) that specifies the freed extent, even though the map
1456 * page is not itself logged, to prevent pageout of the map
1457 * page before the log;
1460 /* log LOG_NOREDOINOEXT of the freed inode extent for
1461 * logredo() to start NoRedoPage filters, and to update
1462 * imap and bmap for free of the extent;
1464 lrd->type = cpu_to_le16(LOG_NOREDOINOEXT);
1466 * For the LOG_NOREDOINOEXT record, we need
1467 * to pass the IAG number and inode extent
1468 * index (within that IAG) from which the
1469 * the extent being released. These have been
1470 * passed to us in the iplist[1] and iplist[2].
1472 lrd->log.noredoinoext.iagnum =
1473 cpu_to_le32((u32) (size_t) cd->iplist[1]);
1474 lrd->log.noredoinoext.inoext_idx =
1475 cpu_to_le32((u32) (size_t) cd->iplist[2]);
1477 pxdlock = (struct pxd_lock *) & tlck->lock;
1478 *pxd = pxdlock->pxd;
1479 lrd->backchain = cpu_to_le32(lmLog(log, tblk, lrd, NULL));
1482 tlck->flag |= tlckUPDATEMAP;
1484 /* mark page as homeward bound */
1485 tlck->flag |= tlckWRITEPAGE;
1487 jfs_err("diLog: UFO type tlck:0x%p", tlck);
1490 * alloc/free external EA extent
1492 * a maplock for txUpdateMap() to update bPWMAP for alloc/free
1493 * of the extent has been formatted at txLock() time;
1496 assert(tlck->type & tlckEA);
1498 /* log LOG_UPDATEMAP for logredo() to update bmap for
1499 * alloc of new (and free of old) external EA extent;
1501 lrd->type = cpu_to_le16(LOG_UPDATEMAP);
1502 pxdlock = (struct pxd_lock *) & tlck->lock;
1503 nlock = pxdlock->index;
1504 for (i = 0; i < nlock; i++, pxdlock++) {
1505 if (pxdlock->flag & mlckALLOCPXD)
1506 lrd->log.updatemap.type =
1507 cpu_to_le16(LOG_ALLOCPXD);
1509 lrd->log.updatemap.type =
1510 cpu_to_le16(LOG_FREEPXD);
1511 lrd->log.updatemap.nxd = cpu_to_le16(1);
1512 lrd->log.updatemap.pxd = pxdlock->pxd;
1514 cpu_to_le32(lmLog(log, tblk, lrd, NULL));
1518 tlck->flag |= tlckUPDATEMAP;
1520 #endif /* _JFS_WIP */
1529 * function: log data tlock
1531 static int dataLog(struct jfs_log * log, struct tblock * tblk, struct lrd * lrd,
1532 struct tlock * tlck)
1534 struct metapage *mp;
1539 /* initialize as REDOPAGE record format */
1540 lrd->log.redopage.type = cpu_to_le16(LOG_DATA);
1541 lrd->log.redopage.l2linesize = cpu_to_le16(L2DATASLOTSIZE);
1543 pxd = &lrd->log.redopage.pxd;
1545 /* log after-image for logredo(): */
1546 lrd->type = cpu_to_le16(LOG_REDOPAGE);
1548 if (JFS_IP(tlck->ip)->next_index < MAX_INLINE_DIRTABLE_ENTRY) {
1550 * The table has been truncated, we've must have deleted
1551 * the last entry, so don't bother logging this
1554 hold_metapage(mp, 0);
1555 atomic_dec(&mp->nohomeok);
1556 discard_metapage(mp);
1561 PXDaddress(pxd, mp->index);
1562 PXDlength(pxd, mp->logical_size >> tblk->sb->s_blocksize_bits);
1564 lrd->backchain = cpu_to_le32(lmLog(log, tblk, lrd, tlck));
1566 /* mark page as homeward bound */
1567 tlck->flag |= tlckWRITEPAGE;
1576 * function: log dtree tlock and format maplock to update bmap;
1578 static void dtLog(struct jfs_log * log, struct tblock * tblk, struct lrd * lrd,
1579 struct tlock * tlck)
1581 struct metapage *mp;
1582 struct pxd_lock *pxdlock;
1587 /* initialize as REDOPAGE/NOREDOPAGE record format */
1588 lrd->log.redopage.type = cpu_to_le16(LOG_DTREE);
1589 lrd->log.redopage.l2linesize = cpu_to_le16(L2DTSLOTSIZE);
1591 pxd = &lrd->log.redopage.pxd;
1593 if (tlck->type & tlckBTROOT)
1594 lrd->log.redopage.type |= cpu_to_le16(LOG_BTROOT);
1597 * page extension via relocation: entry insertion;
1598 * page extension in-place: entry insertion;
1599 * new right page from page split, reinitialized in-line
1600 * root from root page split: entry insertion;
1602 if (tlck->type & (tlckNEW | tlckEXTEND)) {
1603 /* log after-image of the new page for logredo():
1604 * mark log (LOG_NEW) for logredo() to initialize
1605 * freelist and update bmap for alloc of the new page;
1607 lrd->type = cpu_to_le16(LOG_REDOPAGE);
1608 if (tlck->type & tlckEXTEND)
1609 lrd->log.redopage.type |= cpu_to_le16(LOG_EXTEND);
1611 lrd->log.redopage.type |= cpu_to_le16(LOG_NEW);
1612 // *pxd = mp->cm_pxd;
1613 PXDaddress(pxd, mp->index);
1615 mp->logical_size >> tblk->sb->s_blocksize_bits);
1616 lrd->backchain = cpu_to_le32(lmLog(log, tblk, lrd, tlck));
1618 /* format a maplock for txUpdateMap() to update bPMAP for
1619 * alloc of the new page;
1621 if (tlck->type & tlckBTROOT)
1623 tlck->flag |= tlckUPDATEMAP;
1624 pxdlock = (struct pxd_lock *) & tlck->lock;
1625 pxdlock->flag = mlckALLOCPXD;
1626 pxdlock->pxd = *pxd;
1630 /* mark page as homeward bound */
1631 tlck->flag |= tlckWRITEPAGE;
1636 * entry insertion/deletion,
1637 * sibling page link update (old right page before split);
1639 if (tlck->type & (tlckENTRY | tlckRELINK)) {
1640 /* log after-image for logredo(): */
1641 lrd->type = cpu_to_le16(LOG_REDOPAGE);
1642 PXDaddress(pxd, mp->index);
1644 mp->logical_size >> tblk->sb->s_blocksize_bits);
1645 lrd->backchain = cpu_to_le32(lmLog(log, tblk, lrd, tlck));
1647 /* mark page as homeward bound */
1648 tlck->flag |= tlckWRITEPAGE;
1653 * page deletion: page has been invalidated
1654 * page relocation: source extent
1656 * a maplock for free of the page has been formatted
1657 * at txLock() time);
1659 if (tlck->type & (tlckFREE | tlckRELOCATE)) {
1660 /* log LOG_NOREDOPAGE of the deleted page for logredo()
1661 * to start NoRedoPage filter and to update bmap for free
1662 * of the deletd page
1664 lrd->type = cpu_to_le16(LOG_NOREDOPAGE);
1665 pxdlock = (struct pxd_lock *) & tlck->lock;
1666 *pxd = pxdlock->pxd;
1667 lrd->backchain = cpu_to_le32(lmLog(log, tblk, lrd, NULL));
1669 /* a maplock for txUpdateMap() for free of the page
1670 * has been formatted at txLock() time;
1672 tlck->flag |= tlckUPDATEMAP;
1681 * function: log xtree tlock and format maplock to update bmap;
1683 static void xtLog(struct jfs_log * log, struct tblock * tblk, struct lrd * lrd,
1684 struct tlock * tlck)
1687 struct metapage *mp;
1689 struct xtlock *xtlck;
1690 struct maplock *maplock;
1691 struct xdlistlock *xadlock;
1692 struct pxd_lock *pxdlock;
1699 /* initialize as REDOPAGE/NOREDOPAGE record format */
1700 lrd->log.redopage.type = cpu_to_le16(LOG_XTREE);
1701 lrd->log.redopage.l2linesize = cpu_to_le16(L2XTSLOTSIZE);
1703 pxd = &lrd->log.redopage.pxd;
1705 if (tlck->type & tlckBTROOT) {
1706 lrd->log.redopage.type |= cpu_to_le16(LOG_BTROOT);
1707 p = &JFS_IP(ip)->i_xtroot;
1708 if (S_ISDIR(ip->i_mode))
1709 lrd->log.redopage.type |=
1710 cpu_to_le16(LOG_DIR_XTREE);
1712 p = (xtpage_t *) mp->data;
1713 next = le16_to_cpu(p->header.nextindex);
1715 xtlck = (struct xtlock *) & tlck->lock;
1717 maplock = (struct maplock *) & tlck->lock;
1718 xadlock = (struct xdlistlock *) maplock;
1721 * entry insertion/extension;
1722 * sibling page link update (old right page before split);
1724 if (tlck->type & (tlckNEW | tlckGROW | tlckRELINK)) {
1725 /* log after-image for logredo():
1726 * logredo() will update bmap for alloc of new/extended
1727 * extents (XAD_NEW|XAD_EXTEND) of XAD[lwm:next) from
1728 * after-image of XADlist;
1729 * logredo() resets (XAD_NEW|XAD_EXTEND) flag when
1730 * applying the after-image to the meta-data page.
1732 lrd->type = cpu_to_le16(LOG_REDOPAGE);
1733 // *pxd = mp->cm_pxd;
1734 PXDaddress(pxd, mp->index);
1736 mp->logical_size >> tblk->sb->s_blocksize_bits);
1737 lrd->backchain = cpu_to_le32(lmLog(log, tblk, lrd, tlck));
1739 /* format a maplock for txUpdateMap() to update bPMAP
1740 * for alloc of new/extended extents of XAD[lwm:next)
1741 * from the page itself;
1742 * txUpdateMap() resets (XAD_NEW|XAD_EXTEND) flag.
1744 lwm = xtlck->lwm.offset;
1746 lwm = XTPAGEMAXSLOT;
1751 tlck->flag |= tlckUPDATEMAP;
1752 xadlock->flag = mlckALLOCXADLIST;
1753 xadlock->count = next - lwm;
1754 if ((xadlock->count <= 2) && (tblk->xflag & COMMIT_LAZY)) {
1757 * Lazy commit may allow xtree to be modified before
1758 * txUpdateMap runs. Copy xad into linelock to
1759 * preserve correct data.
1761 xadlock->xdlist = &xtlck->pxdlock;
1762 memcpy(xadlock->xdlist, &p->xad[lwm],
1763 sizeof(xad_t) * xadlock->count);
1765 for (i = 0; i < xadlock->count; i++)
1766 p->xad[lwm + i].flag &=
1767 ~(XAD_NEW | XAD_EXTENDED);
1770 * xdlist will point to into inode's xtree, ensure
1771 * that transaction is not committed lazily.
1773 xadlock->xdlist = &p->xad[lwm];
1774 tblk->xflag &= ~COMMIT_LAZY;
1776 jfs_info("xtLog: alloc ip:0x%p mp:0x%p tlck:0x%p lwm:%d "
1777 "count:%d", tlck->ip, mp, tlck, lwm, xadlock->count);
1782 /* mark page as homeward bound */
1783 tlck->flag |= tlckWRITEPAGE;
1789 * page deletion: file deletion/truncation (ref. xtTruncate())
1791 * (page will be invalidated after log is written and bmap
1792 * is updated from the page);
1794 if (tlck->type & tlckFREE) {
1795 /* LOG_NOREDOPAGE log for NoRedoPage filter:
1796 * if page free from file delete, NoRedoFile filter from
1797 * inode image of zero link count will subsume NoRedoPage
1798 * filters for each page;
1799 * if page free from file truncattion, write NoRedoPage
1802 * upadte of block allocation map for the page itself:
1803 * if page free from deletion and truncation, LOG_UPDATEMAP
1804 * log for the page itself is generated from processing
1805 * its parent page xad entries;
1807 /* if page free from file truncation, log LOG_NOREDOPAGE
1808 * of the deleted page for logredo() to start NoRedoPage
1809 * filter for the page;
1811 if (tblk->xflag & COMMIT_TRUNCATE) {
1812 /* write NOREDOPAGE for the page */
1813 lrd->type = cpu_to_le16(LOG_NOREDOPAGE);
1814 PXDaddress(pxd, mp->index);
1816 mp->logical_size >> tblk->sb->
1819 cpu_to_le32(lmLog(log, tblk, lrd, NULL));
1821 if (tlck->type & tlckBTROOT) {
1822 /* Empty xtree must be logged */
1823 lrd->type = cpu_to_le16(LOG_REDOPAGE);
1825 cpu_to_le32(lmLog(log, tblk, lrd, tlck));
1829 /* init LOG_UPDATEMAP of the freed extents
1830 * XAD[XTENTRYSTART:hwm) from the deleted page itself
1831 * for logredo() to update bmap;
1833 lrd->type = cpu_to_le16(LOG_UPDATEMAP);
1834 lrd->log.updatemap.type = cpu_to_le16(LOG_FREEXADLIST);
1835 xtlck = (struct xtlock *) & tlck->lock;
1836 hwm = xtlck->hwm.offset;
1837 lrd->log.updatemap.nxd =
1838 cpu_to_le16(hwm - XTENTRYSTART + 1);
1839 /* reformat linelock for lmLog() */
1840 xtlck->header.offset = XTENTRYSTART;
1841 xtlck->header.length = hwm - XTENTRYSTART + 1;
1843 lrd->backchain = cpu_to_le32(lmLog(log, tblk, lrd, tlck));
1845 /* format a maplock for txUpdateMap() to update bmap
1846 * to free extents of XAD[XTENTRYSTART:hwm) from the
1847 * deleted page itself;
1849 tlck->flag |= tlckUPDATEMAP;
1850 xadlock->flag = mlckFREEXADLIST;
1851 xadlock->count = hwm - XTENTRYSTART + 1;
1852 if ((xadlock->count <= 2) && (tblk->xflag & COMMIT_LAZY)) {
1854 * Lazy commit may allow xtree to be modified before
1855 * txUpdateMap runs. Copy xad into linelock to
1856 * preserve correct data.
1858 xadlock->xdlist = &xtlck->pxdlock;
1859 memcpy(xadlock->xdlist, &p->xad[XTENTRYSTART],
1860 sizeof(xad_t) * xadlock->count);
1863 * xdlist will point to into inode's xtree, ensure
1864 * that transaction is not committed lazily.
1866 xadlock->xdlist = &p->xad[XTENTRYSTART];
1867 tblk->xflag &= ~COMMIT_LAZY;
1869 jfs_info("xtLog: free ip:0x%p mp:0x%p count:%d lwm:2",
1870 tlck->ip, mp, xadlock->count);
1874 /* mark page as invalid */
1875 if (((tblk->xflag & COMMIT_PWMAP) || S_ISDIR(ip->i_mode))
1876 && !(tlck->type & tlckBTROOT))
1877 tlck->flag |= tlckFREEPAGE;
1879 else (tblk->xflag & COMMIT_PMAP)
1886 * page/entry truncation: file truncation (ref. xtTruncate())
1888 * |----------+------+------+---------------|
1890 * | | hwm - hwm before truncation
1891 * | next - truncation point
1892 * lwm - lwm before truncation
1895 if (tlck->type & tlckTRUNCATE) {
1896 pxd_t tpxd; /* truncated extent of xad */
1900 * For truncation the entire linelock may be used, so it would
1901 * be difficult to store xad list in linelock itself.
1902 * Therefore, we'll just force transaction to be committed
1903 * synchronously, so that xtree pages won't be changed before
1906 tblk->xflag &= ~COMMIT_LAZY;
1907 lwm = xtlck->lwm.offset;
1909 lwm = XTPAGEMAXSLOT;
1910 hwm = xtlck->hwm.offset;
1911 twm = xtlck->twm.offset;
1917 * allocate entries XAD[lwm:next]:
1920 /* log after-image for logredo():
1921 * logredo() will update bmap for alloc of new/extended
1922 * extents (XAD_NEW|XAD_EXTEND) of XAD[lwm:next) from
1923 * after-image of XADlist;
1924 * logredo() resets (XAD_NEW|XAD_EXTEND) flag when
1925 * applying the after-image to the meta-data page.
1927 lrd->type = cpu_to_le16(LOG_REDOPAGE);
1928 PXDaddress(pxd, mp->index);
1930 mp->logical_size >> tblk->sb->
1933 cpu_to_le32(lmLog(log, tblk, lrd, tlck));
1937 * truncate entry XAD[twm == next - 1]:
1939 if (twm == next - 1) {
1940 /* init LOG_UPDATEMAP for logredo() to update bmap for
1941 * free of truncated delta extent of the truncated
1942 * entry XAD[next - 1]:
1943 * (xtlck->pxdlock = truncated delta extent);
1945 pxdlock = (struct pxd_lock *) & xtlck->pxdlock;
1946 /* assert(pxdlock->type & tlckTRUNCATE); */
1947 lrd->type = cpu_to_le16(LOG_UPDATEMAP);
1948 lrd->log.updatemap.type = cpu_to_le16(LOG_FREEPXD);
1949 lrd->log.updatemap.nxd = cpu_to_le16(1);
1950 lrd->log.updatemap.pxd = pxdlock->pxd;
1951 tpxd = pxdlock->pxd; /* save to format maplock */
1953 cpu_to_le32(lmLog(log, tblk, lrd, NULL));
1957 * free entries XAD[next:hwm]:
1960 /* init LOG_UPDATEMAP of the freed extents
1961 * XAD[next:hwm] from the deleted page itself
1962 * for logredo() to update bmap;
1964 lrd->type = cpu_to_le16(LOG_UPDATEMAP);
1965 lrd->log.updatemap.type =
1966 cpu_to_le16(LOG_FREEXADLIST);
1967 xtlck = (struct xtlock *) & tlck->lock;
1968 hwm = xtlck->hwm.offset;
1969 lrd->log.updatemap.nxd =
1970 cpu_to_le16(hwm - next + 1);
1971 /* reformat linelock for lmLog() */
1972 xtlck->header.offset = next;
1973 xtlck->header.length = hwm - next + 1;
1976 cpu_to_le32(lmLog(log, tblk, lrd, tlck));
1980 * format maplock(s) for txUpdateMap() to update bmap
1985 * allocate entries XAD[lwm:next):
1988 /* format a maplock for txUpdateMap() to update bPMAP
1989 * for alloc of new/extended extents of XAD[lwm:next)
1990 * from the page itself;
1991 * txUpdateMap() resets (XAD_NEW|XAD_EXTEND) flag.
1993 tlck->flag |= tlckUPDATEMAP;
1994 xadlock->flag = mlckALLOCXADLIST;
1995 xadlock->count = next - lwm;
1996 xadlock->xdlist = &p->xad[lwm];
1998 jfs_info("xtLog: alloc ip:0x%p mp:0x%p count:%d "
2000 tlck->ip, mp, xadlock->count, lwm, next);
2006 * truncate entry XAD[twm == next - 1]:
2008 if (twm == next - 1) {
2009 struct pxd_lock *pxdlock;
2011 /* format a maplock for txUpdateMap() to update bmap
2012 * to free truncated delta extent of the truncated
2013 * entry XAD[next - 1];
2014 * (xtlck->pxdlock = truncated delta extent);
2016 tlck->flag |= tlckUPDATEMAP;
2017 pxdlock = (struct pxd_lock *) xadlock;
2018 pxdlock->flag = mlckFREEPXD;
2020 pxdlock->pxd = tpxd;
2022 jfs_info("xtLog: truncate ip:0x%p mp:0x%p count:%d "
2023 "hwm:%d", ip, mp, pxdlock->count, hwm);
2029 * free entries XAD[next:hwm]:
2032 /* format a maplock for txUpdateMap() to update bmap
2033 * to free extents of XAD[next:hwm] from thedeleted
2036 tlck->flag |= tlckUPDATEMAP;
2037 xadlock->flag = mlckFREEXADLIST;
2038 xadlock->count = hwm - next + 1;
2039 xadlock->xdlist = &p->xad[next];
2041 jfs_info("xtLog: free ip:0x%p mp:0x%p count:%d "
2043 tlck->ip, mp, xadlock->count, next, hwm);
2047 /* mark page as homeward bound */
2048 tlck->flag |= tlckWRITEPAGE;
2057 * function: log from maplock of freed data extents;
2059 void mapLog(struct jfs_log * log, struct tblock * tblk, struct lrd * lrd,
2060 struct tlock * tlck)
2062 struct pxd_lock *pxdlock;
2067 * page relocation: free the source page extent
2069 * a maplock for txUpdateMap() for free of the page
2070 * has been formatted at txLock() time saving the src
2071 * relocated page address;
2073 if (tlck->type & tlckRELOCATE) {
2074 /* log LOG_NOREDOPAGE of the old relocated page
2075 * for logredo() to start NoRedoPage filter;
2077 lrd->type = cpu_to_le16(LOG_NOREDOPAGE);
2078 pxdlock = (struct pxd_lock *) & tlck->lock;
2079 pxd = &lrd->log.redopage.pxd;
2080 *pxd = pxdlock->pxd;
2081 lrd->backchain = cpu_to_le32(lmLog(log, tblk, lrd, NULL));
2083 /* (N.B. currently, logredo() does NOT update bmap
2084 * for free of the page itself for (LOG_XTREE|LOG_NOREDOPAGE);
2085 * if page free from relocation, LOG_UPDATEMAP log is
2086 * specifically generated now for logredo()
2087 * to update bmap for free of src relocated page;
2088 * (new flag LOG_RELOCATE may be introduced which will
2089 * inform logredo() to start NORedoPage filter and also
2090 * update block allocation map at the same time, thus
2091 * avoiding an extra log write);
2093 lrd->type = cpu_to_le16(LOG_UPDATEMAP);
2094 lrd->log.updatemap.type = cpu_to_le16(LOG_FREEPXD);
2095 lrd->log.updatemap.nxd = cpu_to_le16(1);
2096 lrd->log.updatemap.pxd = pxdlock->pxd;
2097 lrd->backchain = cpu_to_le32(lmLog(log, tblk, lrd, NULL));
2099 /* a maplock for txUpdateMap() for free of the page
2100 * has been formatted at txLock() time;
2102 tlck->flag |= tlckUPDATEMAP;
2107 * Otherwise it's not a relocate request
2111 /* log LOG_UPDATEMAP for logredo() to update bmap for
2112 * free of truncated/relocated delta extent of the data;
2113 * e.g.: external EA extent, relocated/truncated extent
2114 * from xtTailgate();
2116 lrd->type = cpu_to_le16(LOG_UPDATEMAP);
2117 pxdlock = (struct pxd_lock *) & tlck->lock;
2118 nlock = pxdlock->index;
2119 for (i = 0; i < nlock; i++, pxdlock++) {
2120 if (pxdlock->flag & mlckALLOCPXD)
2121 lrd->log.updatemap.type =
2122 cpu_to_le16(LOG_ALLOCPXD);
2124 lrd->log.updatemap.type =
2125 cpu_to_le16(LOG_FREEPXD);
2126 lrd->log.updatemap.nxd = cpu_to_le16(1);
2127 lrd->log.updatemap.pxd = pxdlock->pxd;
2129 cpu_to_le32(lmLog(log, tblk, lrd, NULL));
2130 jfs_info("mapLog: xaddr:0x%lx xlen:0x%x",
2131 (ulong) addressPXD(&pxdlock->pxd),
2132 lengthPXD(&pxdlock->pxd));
2136 tlck->flag |= tlckUPDATEMAP;
2144 * function: acquire maplock for EA/ACL extents or
2145 * set COMMIT_INLINE flag;
2147 void txEA(tid_t tid, struct inode *ip, dxd_t * oldea, dxd_t * newea)
2149 struct tlock *tlck = NULL;
2150 struct pxd_lock *maplock = NULL, *pxdlock = NULL;
2153 * format maplock for alloc of new EA extent
2156 /* Since the newea could be a completely zeroed entry we need to
2157 * check for the two flags which indicate we should actually
2158 * commit new EA data
2160 if (newea->flag & DXD_EXTENT) {
2161 tlck = txMaplock(tid, ip, tlckMAP);
2162 maplock = (struct pxd_lock *) & tlck->lock;
2163 pxdlock = (struct pxd_lock *) maplock;
2164 pxdlock->flag = mlckALLOCPXD;
2165 PXDaddress(&pxdlock->pxd, addressDXD(newea));
2166 PXDlength(&pxdlock->pxd, lengthDXD(newea));
2169 } else if (newea->flag & DXD_INLINE) {
2172 set_cflag(COMMIT_Inlineea, ip);
2177 * format maplock for free of old EA extent
2179 if (!test_cflag(COMMIT_Nolink, ip) && oldea->flag & DXD_EXTENT) {
2181 tlck = txMaplock(tid, ip, tlckMAP);
2182 maplock = (struct pxd_lock *) & tlck->lock;
2183 pxdlock = (struct pxd_lock *) maplock;
2186 pxdlock->flag = mlckFREEPXD;
2187 PXDaddress(&pxdlock->pxd, addressDXD(oldea));
2188 PXDlength(&pxdlock->pxd, lengthDXD(oldea));
2197 * function: synchronously write pages locked by transaction
2198 * after txLog() but before txUpdateMap();
2200 void txForce(struct tblock * tblk)
2204 struct metapage *mp;
2207 * reverse the order of transaction tlocks in
2208 * careful update order of address index pages
2209 * (right to left, bottom up)
2211 tlck = lid_to_tlock(tblk->next);
2215 tlck = lid_to_tlock(lid);
2217 tlck->next = tblk->next;
2223 * synchronously write the page, and
2224 * hold the page for txUpdateMap();
2226 for (lid = tblk->next; lid; lid = next) {
2227 tlck = lid_to_tlock(lid);
2230 if ((mp = tlck->mp) != NULL &&
2231 (tlck->type & tlckBTROOT) == 0) {
2232 assert(mp->xflag & COMMIT_PAGE);
2234 if (tlck->flag & tlckWRITEPAGE) {
2235 tlck->flag &= ~tlckWRITEPAGE;
2237 /* do not release page to freelist */
2240 * The "right" thing to do here is to
2241 * synchronously write the metadata.
2242 * With the current implementation this
2243 * is hard since write_metapage requires
2244 * us to kunmap & remap the page. If we
2245 * have tlocks pointing into the metadata
2246 * pages, we don't want to do this. I think
2247 * we can get by with synchronously writing
2248 * the pages when they are released.
2250 assert(atomic_read(&mp->nohomeok));
2251 set_bit(META_dirty, &mp->flag);
2252 set_bit(META_sync, &mp->flag);
2262 * function: update persistent allocation map (and working map
2267 static void txUpdateMap(struct tblock * tblk)
2270 struct inode *ipimap;
2273 struct maplock *maplock;
2274 struct pxd_lock pxdlock;
2277 struct metapage *mp = 0;
2279 ipimap = JFS_SBI(tblk->sb)->ipimap;
2281 maptype = (tblk->xflag & COMMIT_PMAP) ? COMMIT_PMAP : COMMIT_PWMAP;
2285 * update block allocation map
2287 * update allocation state in pmap (and wmap) and
2288 * update lsn of the pmap page;
2291 * scan each tlock/page of transaction for block allocation/free:
2293 * for each tlock/page of transaction, update map.
2294 * ? are there tlock for pmap and pwmap at the same time ?
2296 for (lid = tblk->next; lid; lid = tlck->next) {
2297 tlck = lid_to_tlock(lid);
2299 if ((tlck->flag & tlckUPDATEMAP) == 0)
2302 if (tlck->flag & tlckFREEPAGE) {
2304 * Another thread may attempt to reuse freed space
2305 * immediately, so we want to get rid of the metapage
2306 * before anyone else has a chance to get it.
2307 * Lock metapage, update maps, then invalidate
2311 ASSERT(mp->xflag & COMMIT_PAGE);
2312 hold_metapage(mp, 0);
2317 * . in-line PXD list:
2318 * . out-of-line XAD list:
2320 maplock = (struct maplock *) & tlck->lock;
2321 nlock = maplock->index;
2323 for (k = 0; k < nlock; k++, maplock++) {
2325 * allocate blocks in persistent map:
2327 * blocks have been allocated from wmap at alloc time;
2329 if (maplock->flag & mlckALLOC) {
2330 txAllocPMap(ipimap, maplock, tblk);
2333 * free blocks in persistent and working map:
2334 * blocks will be freed in pmap and then in wmap;
2336 * ? tblock specifies the PMAP/PWMAP based upon
2339 * free blocks in persistent map:
2340 * blocks will be freed from wmap at last reference
2341 * release of the object for regular files;
2343 * Alway free blocks from both persistent & working
2344 * maps for directories
2346 else { /* (maplock->flag & mlckFREE) */
2348 if (S_ISDIR(tlck->ip->i_mode))
2349 txFreeMap(ipimap, maplock,
2350 tblk, COMMIT_PWMAP);
2352 txFreeMap(ipimap, maplock,
2356 if (tlck->flag & tlckFREEPAGE) {
2357 if (!(tblk->flag & tblkGC_LAZY)) {
2358 /* This is equivalent to txRelease */
2359 ASSERT(mp->lid == lid);
2362 assert(atomic_read(&mp->nohomeok) == 1);
2363 atomic_dec(&mp->nohomeok);
2364 discard_metapage(mp);
2369 * update inode allocation map
2371 * update allocation state in pmap and
2372 * update lsn of the pmap page;
2373 * update in-memory inode flag/state
2375 * unlock mapper/write lock
2377 if (tblk->xflag & COMMIT_CREATE) {
2378 diUpdatePMap(ipimap, tblk->ino, FALSE, tblk);
2379 ipimap->i_state |= I_DIRTY;
2380 /* update persistent block allocation map
2381 * for the allocation of inode extent;
2383 pxdlock.flag = mlckALLOCPXD;
2384 pxdlock.pxd = tblk->u.ixpxd;
2386 txAllocPMap(ipimap, (struct maplock *) & pxdlock, tblk);
2387 } else if (tblk->xflag & COMMIT_DELETE) {
2389 diUpdatePMap(ipimap, ip->i_ino, TRUE, tblk);
2390 ipimap->i_state |= I_DIRTY;
2399 * function: allocate from persistent map;
2408 * allocate from persistent map;
2409 * free from persistent map;
2410 * (e.g., tmp file - free from working map at releae
2411 * of last reference);
2412 * free from persistent and working map;
2414 * lsn - log sequence number;
2416 static void txAllocPMap(struct inode *ip, struct maplock * maplock,
2417 struct tblock * tblk)
2419 struct inode *ipbmap = JFS_SBI(ip->i_sb)->ipbmap;
2420 struct xdlistlock *xadlistlock;
2424 struct pxd_lock *pxdlock;
2425 struct xdlistlock *pxdlistlock;
2430 * allocate from persistent map;
2432 if (maplock->flag & mlckALLOCXADLIST) {
2433 xadlistlock = (struct xdlistlock *) maplock;
2434 xad = xadlistlock->xdlist;
2435 for (n = 0; n < xadlistlock->count; n++, xad++) {
2436 if (xad->flag & (XAD_NEW | XAD_EXTENDED)) {
2437 xaddr = addressXAD(xad);
2438 xlen = lengthXAD(xad);
2439 dbUpdatePMap(ipbmap, FALSE, xaddr,
2441 xad->flag &= ~(XAD_NEW | XAD_EXTENDED);
2442 jfs_info("allocPMap: xaddr:0x%lx xlen:%d",
2443 (ulong) xaddr, xlen);
2446 } else if (maplock->flag & mlckALLOCPXD) {
2447 pxdlock = (struct pxd_lock *) maplock;
2448 xaddr = addressPXD(&pxdlock->pxd);
2449 xlen = lengthPXD(&pxdlock->pxd);
2450 dbUpdatePMap(ipbmap, FALSE, xaddr, (s64) xlen, tblk);
2451 jfs_info("allocPMap: xaddr:0x%lx xlen:%d", (ulong) xaddr, xlen);
2452 } else { /* (maplock->flag & mlckALLOCPXDLIST) */
2454 pxdlistlock = (struct xdlistlock *) maplock;
2455 pxd = pxdlistlock->xdlist;
2456 for (n = 0; n < pxdlistlock->count; n++, pxd++) {
2457 xaddr = addressPXD(pxd);
2458 xlen = lengthPXD(pxd);
2459 dbUpdatePMap(ipbmap, FALSE, xaddr, (s64) xlen,
2461 jfs_info("allocPMap: xaddr:0x%lx xlen:%d",
2462 (ulong) xaddr, xlen);
2471 * function: free from persistent and/or working map;
2473 * todo: optimization
2475 void txFreeMap(struct inode *ip,
2476 struct maplock * maplock, struct tblock * tblk, int maptype)
2478 struct inode *ipbmap = JFS_SBI(ip->i_sb)->ipbmap;
2479 struct xdlistlock *xadlistlock;
2483 struct pxd_lock *pxdlock;
2484 struct xdlistlock *pxdlistlock;
2488 jfs_info("txFreeMap: tblk:0x%p maplock:0x%p maptype:0x%x",
2489 tblk, maplock, maptype);
2492 * free from persistent map;
2494 if (maptype == COMMIT_PMAP || maptype == COMMIT_PWMAP) {
2495 if (maplock->flag & mlckFREEXADLIST) {
2496 xadlistlock = (struct xdlistlock *) maplock;
2497 xad = xadlistlock->xdlist;
2498 for (n = 0; n < xadlistlock->count; n++, xad++) {
2499 if (!(xad->flag & XAD_NEW)) {
2500 xaddr = addressXAD(xad);
2501 xlen = lengthXAD(xad);
2502 dbUpdatePMap(ipbmap, TRUE, xaddr,
2504 jfs_info("freePMap: xaddr:0x%lx "
2506 (ulong) xaddr, xlen);
2509 } else if (maplock->flag & mlckFREEPXD) {
2510 pxdlock = (struct pxd_lock *) maplock;
2511 xaddr = addressPXD(&pxdlock->pxd);
2512 xlen = lengthPXD(&pxdlock->pxd);
2513 dbUpdatePMap(ipbmap, TRUE, xaddr, (s64) xlen,
2515 jfs_info("freePMap: xaddr:0x%lx xlen:%d",
2516 (ulong) xaddr, xlen);
2517 } else { /* (maplock->flag & mlckALLOCPXDLIST) */
2519 pxdlistlock = (struct xdlistlock *) maplock;
2520 pxd = pxdlistlock->xdlist;
2521 for (n = 0; n < pxdlistlock->count; n++, pxd++) {
2522 xaddr = addressPXD(pxd);
2523 xlen = lengthPXD(pxd);
2524 dbUpdatePMap(ipbmap, TRUE, xaddr,
2526 jfs_info("freePMap: xaddr:0x%lx xlen:%d",
2527 (ulong) xaddr, xlen);
2533 * free from working map;
2535 if (maptype == COMMIT_PWMAP || maptype == COMMIT_WMAP) {
2536 if (maplock->flag & mlckFREEXADLIST) {
2537 xadlistlock = (struct xdlistlock *) maplock;
2538 xad = xadlistlock->xdlist;
2539 for (n = 0; n < xadlistlock->count; n++, xad++) {
2540 xaddr = addressXAD(xad);
2541 xlen = lengthXAD(xad);
2542 dbFree(ip, xaddr, (s64) xlen);
2544 jfs_info("freeWMap: xaddr:0x%lx xlen:%d",
2545 (ulong) xaddr, xlen);
2547 } else if (maplock->flag & mlckFREEPXD) {
2548 pxdlock = (struct pxd_lock *) maplock;
2549 xaddr = addressPXD(&pxdlock->pxd);
2550 xlen = lengthPXD(&pxdlock->pxd);
2551 dbFree(ip, xaddr, (s64) xlen);
2552 jfs_info("freeWMap: xaddr:0x%lx xlen:%d",
2553 (ulong) xaddr, xlen);
2554 } else { /* (maplock->flag & mlckFREEPXDLIST) */
2556 pxdlistlock = (struct xdlistlock *) maplock;
2557 pxd = pxdlistlock->xdlist;
2558 for (n = 0; n < pxdlistlock->count; n++, pxd++) {
2559 xaddr = addressPXD(pxd);
2560 xlen = lengthPXD(pxd);
2561 dbFree(ip, xaddr, (s64) xlen);
2562 jfs_info("freeWMap: xaddr:0x%lx xlen:%d",
2563 (ulong) xaddr, xlen);
2573 * function: remove tlock from inode anonymous locklist
2575 void txFreelock(struct inode *ip)
2577 struct jfs_inode_info *jfs_ip = JFS_IP(ip);
2578 struct tlock *xtlck, *tlck;
2579 lid_t xlid = 0, lid;
2581 if (!jfs_ip->atlhead)
2585 xtlck = (struct tlock *) &jfs_ip->atlhead;
2587 while ((lid = xtlck->next)) {
2588 tlck = lid_to_tlock(lid);
2589 if (tlck->flag & tlckFREELOCK) {
2590 xtlck->next = tlck->next;
2598 if (jfs_ip->atlhead)
2599 jfs_ip->atltail = xlid;
2601 jfs_ip->atltail = 0;
2603 * If inode was on anon_list, remove it
2605 list_del_init(&jfs_ip->anon_inode_list);
2614 * function: abort tx before commit;
2616 * frees line-locks and segment locks for all
2617 * segments in comdata structure.
2618 * Optionally sets state of file-system to FM_DIRTY in super-block.
2619 * log age of page-frames in memory for which caller has
2620 * are reset to 0 (to avoid logwarap).
2622 void txAbort(tid_t tid, int dirty)
2625 struct metapage *mp;
2626 struct tblock *tblk = tid_to_tblock(tid);
2628 jfs_warn("txAbort: tid:%d dirty:0x%x", tid, dirty);
2631 * free tlocks of the transaction
2633 for (lid = tblk->next; lid; lid = next) {
2634 next = lid_to_tlock(lid)->next;
2636 mp = lid_to_tlock(lid)->mp;
2642 * reset lsn of page to avoid logwarap:
2644 * (page may have been previously committed by another
2645 * transaction(s) but has not been paged, i.e.,
2646 * it may be on logsync list even though it has not
2647 * been logged for the current tx.)
2649 if (mp->xflag & COMMIT_PAGE && mp->lsn)
2652 /* insert tlock at head of freelist */
2658 /* caller will free the transaction block */
2660 tblk->next = tblk->last = 0;
2663 * mark filesystem dirty
2666 jfs_error(tblk->sb, "txAbort");
2672 * txLazyCommit(void)
2674 * All transactions except those changing ipimap (COMMIT_FORCE) are
2675 * processed by this routine. This insures that the inode and block
2676 * allocation maps are updated in order. For synchronous transactions,
2677 * let the user thread finish processing after txUpdateMap() is called.
2679 static void txLazyCommit(struct tblock * tblk)
2681 struct jfs_log *log;
2683 while (((tblk->flag & tblkGC_READY) == 0) &&
2684 ((tblk->flag & tblkGC_UNLOCKED) == 0)) {
2685 /* We must have gotten ahead of the user thread
2687 jfs_info("jfs_lazycommit: tblk 0x%p not unlocked", tblk);
2691 jfs_info("txLazyCommit: processing tblk 0x%p", tblk);
2695 log = (struct jfs_log *) JFS_SBI(tblk->sb)->log;
2697 spin_lock_irq(&log->gclock); // LOGGC_LOCK
2699 tblk->flag |= tblkGC_COMMITTED;
2701 if (tblk->flag & tblkGC_READY)
2704 wake_up_all(&tblk->gcwait); // LOGGC_WAKEUP
2707 * Can't release log->gclock until we've tested tblk->flag
2709 if (tblk->flag & tblkGC_LAZY) {
2710 spin_unlock_irq(&log->gclock); // LOGGC_UNLOCK
2712 tblk->flag &= ~tblkGC_LAZY;
2713 txEnd(tblk - TxBlock); /* Convert back to tid */
2715 spin_unlock_irq(&log->gclock); // LOGGC_UNLOCK
2717 jfs_info("txLazyCommit: done: tblk = 0x%p", tblk);
2721 * jfs_lazycommit(void)
2723 * To be run as a kernel daemon. If lbmIODone is called in an interrupt
2724 * context, or where blocking is not wanted, this routine will process
2725 * committed transactions from the unlock queue.
2727 int jfs_lazycommit(void *arg)
2730 struct tblock *tblk;
2731 unsigned long flags;
2732 struct jfs_sb_info *sbi;
2734 daemonize("jfsCommit");
2736 complete(&jfsIOwait);
2740 while (!list_empty(&TxAnchor.unlock_queue)) {
2742 list_for_each_entry(tblk, &TxAnchor.unlock_queue,
2745 sbi = JFS_SBI(tblk->sb);
2747 * For each volume, the transactions must be
2748 * handled in order. If another commit thread
2749 * is handling a tblk for this superblock,
2752 if (sbi->commit_state & IN_LAZYCOMMIT)
2755 sbi->commit_state |= IN_LAZYCOMMIT;
2759 * Remove transaction from queue
2761 list_del(&tblk->cqueue);
2767 sbi->commit_state &= ~IN_LAZYCOMMIT;
2769 * Don't continue in the for loop. (We can't
2770 * anyway, it's unsafe!) We want to go back to
2771 * the beginning of the list.
2776 /* If there was nothing to do, don't continue */
2781 if (current->flags & PF_FREEZE) {
2783 refrigerator(PF_FREEZE);
2785 DECLARE_WAITQUEUE(wq, current);
2787 add_wait_queue(&jfs_commit_thread_wait, &wq);
2788 set_current_state(TASK_INTERRUPTIBLE);
2791 current->state = TASK_RUNNING;
2792 remove_wait_queue(&jfs_commit_thread_wait, &wq);
2794 } while (!jfs_stop_threads);
2796 if (!list_empty(&TxAnchor.unlock_queue))
2797 jfs_err("jfs_lazycommit being killed w/pending transactions!");
2799 jfs_info("jfs_lazycommit being killed\n");
2800 complete_and_exit(&jfsIOwait, 0);
2803 void txLazyUnlock(struct tblock * tblk)
2805 unsigned long flags;
2809 list_add_tail(&tblk->cqueue, &TxAnchor.unlock_queue);
2811 * Don't wake up a commit thread if there is already one servicing
2814 if (!(JFS_SBI(tblk->sb)->commit_state & IN_LAZYCOMMIT))
2815 wake_up(&jfs_commit_thread_wait);
2819 static void LogSyncRelease(struct metapage * mp)
2821 struct jfs_log *log = mp->log;
2823 assert(atomic_read(&mp->nohomeok));
2825 atomic_dec(&mp->nohomeok);
2827 if (atomic_read(&mp->nohomeok))
2830 hold_metapage(mp, 0);
2837 list_del_init(&mp->synclist);
2838 LOGSYNC_UNLOCK(log);
2840 release_metapage(mp);
2846 * Block all new transactions and push anonymous transactions to
2849 * This does almost the same thing as jfs_sync below. We don't
2850 * worry about deadlocking when jfs_tlocks_low is set, since we would
2851 * expect jfs_sync to get us out of that jam.
2853 void txQuiesce(struct super_block *sb)
2856 struct jfs_inode_info *jfs_ip;
2857 struct jfs_log *log = JFS_SBI(sb)->log;
2860 set_bit(log_QUIESCE, &log->flag);
2864 while (!list_empty(&TxAnchor.anon_list)) {
2865 jfs_ip = list_entry(TxAnchor.anon_list.next,
2866 struct jfs_inode_info,
2868 ip = &jfs_ip->vfs_inode;
2871 * inode will be removed from anonymous list
2872 * when it is committed
2875 tid = txBegin(ip->i_sb, COMMIT_INODE | COMMIT_FORCE);
2876 down(&jfs_ip->commit_sem);
2877 txCommit(tid, 1, &ip, 0);
2879 up(&jfs_ip->commit_sem);
2881 * Just to be safe. I don't know how
2882 * long we can run without blocking
2889 * If jfs_sync is running in parallel, there could be some inodes
2890 * on anon_list2. Let's check.
2892 if (!list_empty(&TxAnchor.anon_list2)) {
2893 list_splice(&TxAnchor.anon_list2, &TxAnchor.anon_list);
2894 INIT_LIST_HEAD(&TxAnchor.anon_list2);
2900 * We may need to kick off the group commit
2902 jfs_flush_journal(log, 0);
2908 * Allows transactions to start again following txQuiesce
2910 void txResume(struct super_block *sb)
2912 struct jfs_log *log = JFS_SBI(sb)->log;
2914 clear_bit(log_QUIESCE, &log->flag);
2915 TXN_WAKEUP(&log->syncwait);
2921 * To be run as a kernel daemon. This is awakened when tlocks run low.
2922 * We write any inodes that have anonymous tlocks so they will become
2925 int jfs_sync(void *arg)
2928 struct jfs_inode_info *jfs_ip;
2932 daemonize("jfsSync");
2934 complete(&jfsIOwait);
2938 * write each inode on the anonymous inode list
2941 while (jfs_tlocks_low && !list_empty(&TxAnchor.anon_list)) {
2942 jfs_ip = list_entry(TxAnchor.anon_list.next,
2943 struct jfs_inode_info,
2945 ip = &jfs_ip->vfs_inode;
2949 * Inode is being freed
2951 list_del_init(&jfs_ip->anon_inode_list);
2952 } else if (! down_trylock(&jfs_ip->commit_sem)) {
2954 * inode will be removed from anonymous list
2955 * when it is committed
2958 tid = txBegin(ip->i_sb, COMMIT_INODE);
2959 rc = txCommit(tid, 1, &ip, 0);
2961 up(&jfs_ip->commit_sem);
2965 * Just to be safe. I don't know how
2966 * long we can run without blocking
2971 /* We can't get the commit semaphore. It may
2972 * be held by a thread waiting for tlock's
2973 * so let's not block here. Save it to
2974 * put back on the anon_list.
2977 /* Take off anon_list */
2978 list_del(&jfs_ip->anon_inode_list);
2980 /* Put on anon_list2 */
2981 list_add(&jfs_ip->anon_inode_list,
2982 &TxAnchor.anon_list2);
2989 /* Add anon_list2 back to anon_list */
2990 list_splice_init(&TxAnchor.anon_list2, &TxAnchor.anon_list);
2992 if (current->flags & PF_FREEZE) {
2994 refrigerator(PF_FREEZE);
2996 DECLARE_WAITQUEUE(wq, current);
2998 add_wait_queue(&jfs_sync_thread_wait, &wq);
2999 set_current_state(TASK_INTERRUPTIBLE);
3002 current->state = TASK_RUNNING;
3003 remove_wait_queue(&jfs_sync_thread_wait, &wq);
3005 } while (!jfs_stop_threads);
3007 jfs_info("jfs_sync being killed");
3008 complete_and_exit(&jfsIOwait, 0);
3011 #if defined(CONFIG_PROC_FS) && defined(CONFIG_JFS_DEBUG)
3012 int jfs_txanchor_read(char *buffer, char **start, off_t offset, int length,
3013 int *eof, void *data)
3022 waitqueue_active(&TxAnchor.freewait) ? "active" : "empty";
3024 waitqueue_active(&TxAnchor.freelockwait) ? "active" : "empty";
3026 waitqueue_active(&TxAnchor.lowlockwait) ? "active" : "empty";
3028 len += sprintf(buffer,
3034 "freelockwait = %s\n"
3035 "lowlockwait = %s\n"
3036 "tlocksInUse = %d\n"
3037 "jfs_tlocks_low = %d\n"
3038 "unlock_queue is %sempty\n",
3044 TxAnchor.tlocksInUse,
3046 list_empty(&TxAnchor.unlock_queue) ? "" : "not ");
3049 *start = buffer + begin;
3064 #if defined(CONFIG_PROC_FS) && defined(CONFIG_JFS_STATISTICS)
3065 int jfs_txstats_read(char *buffer, char **start, off_t offset, int length,
3066 int *eof, void *data)
3071 len += sprintf(buffer,
3074 "calls to txBegin = %d\n"
3075 "txBegin blocked by sync barrier = %d\n"
3076 "txBegin blocked by tlocks low = %d\n"
3077 "txBegin blocked by no free tid = %d\n"
3078 "calls to txBeginAnon = %d\n"
3079 "txBeginAnon blocked by sync barrier = %d\n"
3080 "txBeginAnon blocked by tlocks low = %d\n"
3081 "calls to txLockAlloc = %d\n"
3082 "tLockAlloc blocked by no free lock = %d\n",
3084 TxStat.txBegin_barrier,
3085 TxStat.txBegin_lockslow,
3086 TxStat.txBegin_freetid,
3088 TxStat.txBeginAnon_barrier,
3089 TxStat.txBeginAnon_lockslow,
3091 TxStat.txLockAlloc_freelock);
3094 *start = buffer + begin;