2 * Copyright (c) International Business Machines Corp., 2000-2002
3 * Portions Copyright (c) Christoph Hellwig, 2001-2002
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
13 * the GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 * jfs_txnmgr.c: transaction manager
24 * transaction starts with txBegin() and ends with txCommit()
27 * tlock is acquired at the time of update;
28 * (obviate scan at commit time for xtree and dtree)
29 * tlock and mp points to each other;
30 * (no hashlist for mp -> tlock).
33 * tlock on in-memory inode:
34 * in-place tlock in the in-memory inode itself;
35 * converted to page lock by iWrite() at commit time.
37 * tlock during write()/mmap() under anonymous transaction (tid = 0):
38 * transferred (?) to transaction at commit time.
40 * use the page itself to update allocation maps
41 * (obviate intermediate replication of allocation/deallocation data)
42 * hold on to mp+lock thru update of maps
47 #include <linux/vmalloc.h>
48 #include <linux/smp_lock.h>
49 #include <linux/completion.h>
50 #include "jfs_incore.h"
51 #include "jfs_filsys.h"
52 #include "jfs_metapage.h"
53 #include "jfs_dinode.h"
56 #include "jfs_superblock.h"
57 #include "jfs_debug.h"
60 * transaction management structures
64 int freetid; /* index of a free tid structure */
65 wait_queue_head_t freewait; /* eventlist of free tblock */
68 int freelock; /* index first free lock word */
69 wait_queue_head_t freelockwait; /* eventlist of free tlock */
70 wait_queue_head_t lowlockwait; /* eventlist of ample tlocks */
71 int tlocksInUse; /* Number of tlocks in use */
72 int TlocksLow; /* Indicates low number of available tlocks */
73 spinlock_t LazyLock; /* synchronize sync_queue & unlock_queue */
74 /* struct tblock *sync_queue; * Transactions waiting for data sync */
75 struct tblock *unlock_queue; /* Txns waiting to be released */
76 struct tblock *unlock_tail; /* Tail of unlock_queue */
77 struct list_head anon_list; /* inodes having anonymous txns */
78 struct list_head anon_list2; /* inodes having anonymous txns
79 that couldn't be sync'ed */
82 #ifdef CONFIG_JFS_STATISTICS
86 uint txBegin_lockslow;
89 uint txBeginAnon_barrier;
90 uint txBeginAnon_lockslow;
92 uint txLockAlloc_freelock;
96 static int nTxBlock = 512; /* number of transaction blocks */
97 struct tblock *TxBlock; /* transaction block table */
99 static int nTxLock = 4096; /* number of transaction locks */
100 static int TxLockLWM = 4096*.4; /* Low water mark for number of txLocks used */
101 static int TxLockHWM = 4096*.8; /* High water mark for number of txLocks used */
102 struct tlock *TxLock; /* transaction lock table */
106 * transaction management lock
108 static spinlock_t jfsTxnLock = SPIN_LOCK_UNLOCKED;
110 #define TXN_LOCK() spin_lock(&jfsTxnLock)
111 #define TXN_UNLOCK() spin_unlock(&jfsTxnLock)
113 #define LAZY_LOCK_INIT() spin_lock_init(&TxAnchor.LazyLock);
114 #define LAZY_LOCK(flags) spin_lock_irqsave(&TxAnchor.LazyLock, flags)
115 #define LAZY_UNLOCK(flags) spin_unlock_irqrestore(&TxAnchor.LazyLock, flags)
117 DECLARE_WAIT_QUEUE_HEAD(jfs_sync_thread_wait);
118 DECLARE_WAIT_QUEUE_HEAD(jfs_commit_thread_wait);
121 * Retry logic exist outside these macros to protect from spurrious wakeups.
123 static inline void TXN_SLEEP_DROP_LOCK(wait_queue_head_t * event)
125 DECLARE_WAITQUEUE(wait, current);
127 add_wait_queue(event, &wait);
128 set_current_state(TASK_UNINTERRUPTIBLE);
131 current->state = TASK_RUNNING;
132 remove_wait_queue(event, &wait);
135 #define TXN_SLEEP(event)\
137 TXN_SLEEP_DROP_LOCK(event);\
141 #define TXN_WAKEUP(event) wake_up_all(event)
148 tid_t maxtid; /* 4: biggest tid ever used */
149 lid_t maxlid; /* 4: biggest lid ever used */
150 int ntid; /* 4: # of transactions performed */
151 int nlid; /* 4: # of tlocks acquired */
152 int waitlock; /* 4: # of tlock wait */
157 * external references
159 extern int lmGroupCommit(struct jfs_log *, struct tblock *);
160 extern void lmSync(struct jfs_log *);
161 extern int jfs_commit_inode(struct inode *, int);
162 extern int jfs_stop_threads;
164 struct task_struct *jfsCommitTask;
165 extern struct completion jfsIOwait;
170 int diLog(struct jfs_log * log, struct tblock * tblk, struct lrd * lrd,
171 struct tlock * tlck, struct commit * cd);
172 int dataLog(struct jfs_log * log, struct tblock * tblk, struct lrd * lrd,
173 struct tlock * tlck);
174 void dtLog(struct jfs_log * log, struct tblock * tblk, struct lrd * lrd,
175 struct tlock * tlck);
176 void inlineLog(struct jfs_log * log, struct tblock * tblk, struct lrd * lrd,
177 struct tlock * tlck);
178 void mapLog(struct jfs_log * log, struct tblock * tblk, struct lrd * lrd,
179 struct tlock * tlck);
180 void txAbortCommit(struct commit * cd, int exval);
181 static void txAllocPMap(struct inode *ip, struct maplock * maplock,
182 struct tblock * tblk);
183 void txForce(struct tblock * tblk);
184 static int txLog(struct jfs_log * log, struct tblock * tblk, struct commit * cd);
185 int txMoreLock(void);
186 static void txUpdateMap(struct tblock * tblk);
187 static void txRelease(struct tblock * tblk);
188 void xtLog(struct jfs_log * log, struct tblock * tblk, struct lrd * lrd,
189 struct tlock * tlck);
190 static void LogSyncRelease(struct metapage * mp);
193 * transaction block/lock management
194 * ---------------------------------
198 * Get a transaction lock from the free list. If the number in use is
199 * greater than the high water mark, wake up the sync daemon. This should
200 * free some anonymous transaction locks. (TXN_LOCK must be held.)
202 static lid_t txLockAlloc(void)
206 INCREMENT(TxStat.txLockAlloc);
207 if (!TxAnchor.freelock) {
208 INCREMENT(TxStat.txLockAlloc_freelock);
211 while (!(lid = TxAnchor.freelock))
212 TXN_SLEEP(&TxAnchor.freelockwait);
213 TxAnchor.freelock = TxLock[lid].next;
214 HIGHWATERMARK(stattx.maxlid, lid);
215 if ((++TxAnchor.tlocksInUse > TxLockHWM) && (TxAnchor.TlocksLow == 0)) {
216 jEVENT(0,("txLockAlloc TlocksLow\n"));
217 TxAnchor.TlocksLow = 1;
218 wake_up(&jfs_sync_thread_wait);
224 static void txLockFree(lid_t lid)
226 TxLock[lid].next = TxAnchor.freelock;
227 TxAnchor.freelock = lid;
228 TxAnchor.tlocksInUse--;
229 if (TxAnchor.TlocksLow && (TxAnchor.tlocksInUse < TxLockLWM)) {
230 jEVENT(0,("txLockFree TlocksLow no more\n"));
231 TxAnchor.TlocksLow = 0;
232 TXN_WAKEUP(&TxAnchor.lowlockwait);
234 TXN_WAKEUP(&TxAnchor.freelockwait);
240 * FUNCTION: initialize transaction management structures
244 * serialization: single thread at jfs_init()
251 * initialize transaction block (tblock) table
253 * transaction id (tid) = tblock index
254 * tid = 0 is reserved.
256 size = sizeof(struct tblock) * nTxBlock;
257 TxBlock = (struct tblock *) vmalloc(size);
261 for (k = 1; k < nTxBlock - 1; k++) {
262 TxBlock[k].next = k + 1;
263 init_waitqueue_head(&TxBlock[k].gcwait);
264 init_waitqueue_head(&TxBlock[k].waitor);
267 init_waitqueue_head(&TxBlock[k].gcwait);
268 init_waitqueue_head(&TxBlock[k].waitor);
270 TxAnchor.freetid = 1;
271 init_waitqueue_head(&TxAnchor.freewait);
273 stattx.maxtid = 1; /* statistics */
276 * initialize transaction lock (tlock) table
278 * transaction lock id = tlock index
279 * tlock id = 0 is reserved.
281 size = sizeof(struct tlock) * nTxLock;
282 TxLock = (struct tlock *) vmalloc(size);
283 if (TxLock == NULL) {
288 /* initialize tlock table */
289 for (k = 1; k < nTxLock - 1; k++)
290 TxLock[k].next = k + 1;
292 init_waitqueue_head(&TxAnchor.freelockwait);
293 init_waitqueue_head(&TxAnchor.lowlockwait);
295 TxAnchor.freelock = 1;
296 TxAnchor.tlocksInUse = 0;
297 INIT_LIST_HEAD(&TxAnchor.anon_list);
298 INIT_LIST_HEAD(&TxAnchor.anon_list2);
300 stattx.maxlid = 1; /* statistics */
308 * FUNCTION: clean up when module is unloaded
322 * FUNCTION: start a transaction.
324 * PARAMETER: sb - superblock
325 * flag - force for nested tx;
327 * RETURN: tid - transaction id
329 * note: flag force allows to start tx for nested tx
330 * to prevent deadlock on logsync barrier;
332 tid_t txBegin(struct super_block *sb, int flag)
338 jFYI(1, ("txBegin: flag = 0x%x\n", flag));
339 log = JFS_SBI(sb)->log;
343 INCREMENT(TxStat.txBegin);
346 if (!(flag & COMMIT_FORCE)) {
348 * synchronize with logsync barrier
350 if (test_bit(log_SYNCBARRIER, &log->flag) ||
351 test_bit(log_QUIESCE, &log->flag)) {
352 INCREMENT(TxStat.txBegin_barrier);
353 TXN_SLEEP(&log->syncwait);
359 * Don't begin transaction if we're getting starved for tlocks
360 * unless COMMIT_FORCE or COMMIT_INODE (which may ultimately
363 if (TxAnchor.TlocksLow) {
364 INCREMENT(TxStat.txBegin_lockslow);
365 TXN_SLEEP(&TxAnchor.lowlockwait);
371 * allocate transaction id/block
373 if ((t = TxAnchor.freetid) == 0) {
374 jFYI(1, ("txBegin: waiting for free tid\n"));
375 INCREMENT(TxStat.txBegin_freetid);
376 TXN_SLEEP(&TxAnchor.freewait);
380 tblk = tid_to_tblock(t);
382 if ((tblk->next == 0) && (current != jfsCommitTask)) {
383 /* Save one tblk for jfsCommit thread */
384 jFYI(1, ("txBegin: waiting for free tid\n"));
385 INCREMENT(TxStat.txBegin_freetid);
386 TXN_SLEEP(&TxAnchor.freewait);
390 TxAnchor.freetid = tblk->next;
393 * initialize transaction
397 * We can't zero the whole thing or we screw up another thread being
398 * awakened after sleeping on tblk->waitor
400 * memset(tblk, 0, sizeof(struct tblock));
402 tblk->next = tblk->last = tblk->xflag = tblk->flag = tblk->lsn = 0;
406 tblk->logtid = log->logtid;
410 HIGHWATERMARK(stattx.maxtid, t); /* statistics */
411 INCREMENT(stattx.ntid); /* statistics */
415 jFYI(1, ("txBegin: returning tid = %d\n", t));
422 * NAME: txBeginAnon()
424 * FUNCTION: start an anonymous transaction.
425 * Blocks if logsync or available tlocks are low to prevent
426 * anonymous tlocks from depleting supply.
428 * PARAMETER: sb - superblock
432 void txBeginAnon(struct super_block *sb)
436 log = JFS_SBI(sb)->log;
439 INCREMENT(TxStat.txBeginAnon);
443 * synchronize with logsync barrier
445 if (test_bit(log_SYNCBARRIER, &log->flag) ||
446 test_bit(log_QUIESCE, &log->flag)) {
447 INCREMENT(TxStat.txBeginAnon_barrier);
448 TXN_SLEEP(&log->syncwait);
453 * Don't begin transaction if we're getting starved for tlocks
455 if (TxAnchor.TlocksLow) {
456 INCREMENT(TxStat.txBeginAnon_lockslow);
457 TXN_SLEEP(&TxAnchor.lowlockwait);
467 * function: free specified transaction block.
469 * logsync barrier processing:
473 void txEnd(tid_t tid)
475 struct tblock *tblk = tid_to_tblock(tid);
478 jFYI(1, ("txEnd: tid = %d\n", tid));
482 * wakeup transactions waiting on the page locked
483 * by the current transaction
485 TXN_WAKEUP(&tblk->waitor);
487 log = JFS_SBI(tblk->sb)->log;
490 * Lazy commit thread can't free this guy until we mark it UNLOCKED,
491 * otherwise, we would be left with a transaction that may have been
494 * Lazy commit thread will turn off tblkGC_LAZY before calling this
497 if (tblk->flag & tblkGC_LAZY) {
499 ("txEnd called w/lazy tid: %d, tblk = 0x%p\n",
503 spin_lock_irq(&log->gclock); // LOGGC_LOCK
504 tblk->flag |= tblkGC_UNLOCKED;
505 spin_unlock_irq(&log->gclock); // LOGGC_UNLOCK
509 jFYI(1, ("txEnd: tid: %d, tblk = 0x%p\n", tid, tblk));
511 assert(tblk->next == 0);
514 * insert tblock back on freelist
516 tblk->next = TxAnchor.freetid;
517 TxAnchor.freetid = tid;
520 * mark the tblock not active
525 * synchronize with logsync barrier
527 if (test_bit(log_SYNCBARRIER, &log->flag) && log->active == 0) {
528 /* forward log syncpt */
531 jFYI(1, (" log barrier off: 0x%x\n", log->lsn));
533 /* enable new transactions start */
534 clear_bit(log_SYNCBARRIER, &log->flag);
536 /* wakeup all waitors for logsync barrier */
537 TXN_WAKEUP(&log->syncwait);
541 * wakeup all waitors for a free tblock
543 TXN_WAKEUP(&TxAnchor.freewait);
546 jFYI(1, ("txEnd: exitting\n"));
553 * function: acquire a transaction lock on the specified <mp>
557 * return: transaction lock id
561 struct tlock *txLock(tid_t tid, struct inode *ip, struct metapage * mp,
564 struct jfs_inode_info *jfs_ip = JFS_IP(ip);
569 struct xtlock *xtlck;
570 struct linelock *linelock;
574 assert(!test_cflag(COMMIT_Nolink, ip));
578 if (S_ISDIR(ip->i_mode) && (type & tlckXTREE) &&
579 !(mp->xflag & COMMIT_PAGE)) {
581 * Directory inode is special. It can have both an xtree tlock
582 * and a dtree tlock associated with it.
589 /* is page not locked by a transaction ? */
593 jFYI(1, ("txLock: tid:%d ip:0x%p mp:0x%p lid:%d\n",
596 /* is page locked by the requester transaction ? */
597 tlck = lid_to_tlock(lid);
598 if ((xtid = tlck->tid) == tid)
602 * is page locked by anonymous transaction/lock ?
604 * (page update without transaction (i.e., file write) is
605 * locked under anonymous transaction tid = 0:
606 * anonymous tlocks maintained on anonymous tlock list of
607 * the inode of the page and available to all anonymous
608 * transactions until txCommit() time at which point
609 * they are transferred to the transaction tlock list of
610 * the commiting transaction of the inode)
614 tblk = tid_to_tblock(tid);
616 * The order of the tlocks in the transaction is important
617 * (during truncate, child xtree pages must be freed before
618 * parent's tlocks change the working map).
619 * Take tlock off anonymous list and add to tail of
622 * Note: We really need to get rid of the tid & lid and
623 * use list_head's. This code is getting UGLY!
625 if (jfs_ip->atlhead == lid) {
626 if (jfs_ip->atltail == lid) {
627 /* only anonymous txn.
628 * Remove from anon_list
630 list_del_init(&jfs_ip->anon_inode_list);
632 jfs_ip->atlhead = tlck->next;
635 for (last = jfs_ip->atlhead;
636 lid_to_tlock(last)->next != lid;
637 last = lid_to_tlock(last)->next) {
640 lid_to_tlock(last)->next = tlck->next;
641 if (jfs_ip->atltail == lid)
642 jfs_ip->atltail = last;
645 /* insert the tlock at tail of transaction tlock list */
648 lid_to_tlock(tblk->last)->next = lid;
664 tlck = lid_to_tlock(lid);
671 /* mark tlock for meta-data page */
672 if (mp->xflag & COMMIT_PAGE) {
674 tlck->flag = tlckPAGELOCK;
676 /* mark the page dirty and nohomeok */
677 mark_metapage_dirty(mp);
678 atomic_inc(&mp->nohomeok);
681 ("locking mp = 0x%p, nohomeok = %d tid = %d tlck = 0x%p\n",
682 mp, atomic_read(&mp->nohomeok), tid, tlck));
684 /* if anonymous transaction, and buffer is on the group
685 * commit synclist, mark inode to show this. This will
686 * prevent the buffer from being marked nohomeok for too
689 if ((tid == 0) && mp->lsn)
690 set_cflag(COMMIT_Synclist, ip);
692 /* mark tlock for in-memory inode */
694 tlck->flag = tlckINODELOCK;
698 /* bind the tlock and the page */
707 * enqueue transaction lock to transaction/inode
709 /* insert the tlock at tail of transaction tlock list */
711 tblk = tid_to_tblock(tid);
713 lid_to_tlock(tblk->last)->next = lid;
719 /* anonymous transaction:
720 * insert the tlock at head of inode anonymous tlock list
723 tlck->next = jfs_ip->atlhead;
724 jfs_ip->atlhead = lid;
725 if (tlck->next == 0) {
726 /* This inode's first anonymous transaction */
727 jfs_ip->atltail = lid;
728 list_add_tail(&jfs_ip->anon_inode_list,
729 &TxAnchor.anon_list);
733 /* initialize type dependent area for linelock */
734 linelock = (struct linelock *) & tlck->lock;
736 linelock->flag = tlckLINELOCK;
737 linelock->maxcnt = TLOCKSHORT;
740 switch (type & tlckTYPE) {
742 linelock->l2linesize = L2DTSLOTSIZE;
746 linelock->l2linesize = L2XTSLOTSIZE;
748 xtlck = (struct xtlock *) linelock;
749 xtlck->header.offset = 0;
750 xtlck->header.length = 2;
752 if (type & tlckNEW) {
753 xtlck->lwm.offset = XTENTRYSTART;
755 if (mp->xflag & COMMIT_PAGE)
756 p = (xtpage_t *) mp->data;
758 p = &jfs_ip->i_xtroot;
760 le16_to_cpu(p->header.nextindex);
762 xtlck->lwm.length = 0; /* ! */
763 xtlck->twm.offset = 0;
764 xtlck->hwm.offset = 0;
770 linelock->l2linesize = L2INODESLOTSIZE;
774 linelock->l2linesize = L2DATASLOTSIZE;
778 jERROR(1, ("UFO tlock:0x%p\n", tlck));
782 * update tlock vector
792 * page is being locked by another transaction:
795 /* Only locks on ipimap or ipaimap should reach here */
796 /* assert(jfs_ip->fileset == AGGREGATE_I); */
797 if (jfs_ip->fileset != AGGREGATE_I) {
798 jERROR(1, ("txLock: trying to lock locked page!\n"));
799 dump_mem("ip", ip, sizeof(struct inode));
800 dump_mem("mp", mp, sizeof(struct metapage));
801 dump_mem("Locker's tblk", tid_to_tblock(tid),
802 sizeof(struct tblock));
803 dump_mem("Tlock", tlck, sizeof(struct tlock));
806 INCREMENT(stattx.waitlock); /* statistics */
807 release_metapage(mp);
809 jEVENT(0, ("txLock: in waitLock, tid = %d, xtid = %d, lid = %d\n",
811 TXN_SLEEP_DROP_LOCK(&tid_to_tblock(xtid)->waitor);
812 jEVENT(0, ("txLock: awakened tid = %d, lid = %d\n", tid, lid));
821 * FUNCTION: Release buffers associated with transaction locks, but don't
822 * mark homeok yet. The allows other transactions to modify
823 * buffers, but won't let them go to disk until commit record
824 * actually gets written.
829 * RETURN: Errors from subroutines.
831 static void txRelease(struct tblock * tblk)
839 for (lid = tblk->next; lid; lid = tlck->next) {
840 tlck = lid_to_tlock(lid);
841 if ((mp = tlck->mp) != NULL &&
842 (tlck->type & tlckBTROOT) == 0) {
843 assert(mp->xflag & COMMIT_PAGE);
849 * wakeup transactions waiting on a page locked
850 * by the current transaction
852 TXN_WAKEUP(&tblk->waitor);
861 * FUNCTION: Initiates pageout of pages modified by tid in journalled
862 * objects and frees their lockwords.
864 static void txUnlock(struct tblock * tblk)
867 struct linelock *linelock;
868 lid_t lid, next, llid, k;
873 jFYI(1, ("txUnlock: tblk = 0x%p\n", tblk));
874 log = JFS_SBI(tblk->sb)->log;
877 * mark page under tlock homeok (its log has been written):
879 for (lid = tblk->next; lid; lid = next) {
880 tlck = lid_to_tlock(lid);
883 jFYI(1, ("unlocking lid = %d, tlck = 0x%p\n", lid, tlck));
885 /* unbind page from tlock */
886 if ((mp = tlck->mp) != NULL &&
887 (tlck->type & tlckBTROOT) == 0) {
888 assert(mp->xflag & COMMIT_PAGE);
892 * It's possible that someone else has the metapage.
893 * The only things were changing are nohomeok, which
894 * is handled atomically, and clsn which is protected
895 * by the LOGSYNC_LOCK.
897 hold_metapage(mp, 1);
899 assert(atomic_read(&mp->nohomeok) > 0);
900 atomic_dec(&mp->nohomeok);
902 /* inherit younger/larger clsn */
905 logdiff(difft, tblk->clsn, log);
906 logdiff(diffp, mp->clsn, log);
908 mp->clsn = tblk->clsn;
910 mp->clsn = tblk->clsn;
913 assert(!(tlck->flag & tlckFREEPAGE));
915 if (tlck->flag & tlckWRITEPAGE) {
918 /* release page which has been forced */
919 release_metapage(mp);
923 /* insert tlock, and linelock(s) of the tlock if any,
924 * at head of freelist
928 llid = ((struct linelock *) & tlck->lock)->next;
930 linelock = (struct linelock *) lid_to_tlock(llid);
939 tblk->next = tblk->last = 0;
942 * remove tblock from logsynclist
943 * (allocation map pages inherited lsn of tblk and
944 * has been inserted in logsync list at txUpdateMap())
949 list_del(&tblk->synclist);
958 * function: allocate a transaction lock for freed page/entry;
959 * for freed page, maplock is used as xtlock/dtlock type;
961 struct tlock *txMaplock(tid_t tid, struct inode *ip, int type)
963 struct jfs_inode_info *jfs_ip = JFS_IP(ip);
967 struct maplock *maplock;
975 tlck = lid_to_tlock(lid);
982 /* bind the tlock and the object */
983 tlck->flag = tlckINODELOCK;
990 * enqueue transaction lock to transaction/inode
992 /* insert the tlock at tail of transaction tlock list */
994 tblk = tid_to_tblock(tid);
996 lid_to_tlock(tblk->last)->next = lid;
1002 /* anonymous transaction:
1003 * insert the tlock at head of inode anonymous tlock list
1006 tlck->next = jfs_ip->atlhead;
1007 jfs_ip->atlhead = lid;
1008 if (tlck->next == 0) {
1009 /* This inode's first anonymous transaction */
1010 jfs_ip->atltail = lid;
1011 list_add_tail(&jfs_ip->anon_inode_list,
1012 &TxAnchor.anon_list);
1018 /* initialize type dependent area for maplock */
1019 maplock = (struct maplock *) & tlck->lock;
1021 maplock->maxcnt = 0;
1031 * function: allocate a transaction lock for log vector list
1033 struct linelock *txLinelock(struct linelock * tlock)
1037 struct linelock *linelock;
1041 /* allocate a TxLock structure */
1042 lid = txLockAlloc();
1043 tlck = lid_to_tlock(lid);
1047 /* initialize linelock */
1048 linelock = (struct linelock *) tlck;
1050 linelock->flag = tlckLINELOCK;
1051 linelock->maxcnt = TLOCKLONG;
1052 linelock->index = 0;
1054 /* append linelock after tlock */
1055 linelock->next = tlock->next;
1064 * transaction commit management
1065 * -----------------------------
1071 * FUNCTION: commit the changes to the objects specified in
1072 * clist. For journalled segments only the
1073 * changes of the caller are committed, ie by tid.
1074 * for non-journalled segments the data are flushed to
1075 * disk and then the change to the disk inode and indirect
1076 * blocks committed (so blocks newly allocated to the
1077 * segment will be made a part of the segment atomically).
1079 * all of the segments specified in clist must be in
1080 * one file system. no more than 6 segments are needed
1081 * to handle all unix svcs.
1083 * if the i_nlink field (i.e. disk inode link count)
1084 * is zero, and the type of inode is a regular file or
1085 * directory, or symbolic link , the inode is truncated
1086 * to zero length. the truncation is committed but the
1087 * VM resources are unaffected until it is closed (see
1095 * on entry the inode lock on each segment is assumed
1100 int txCommit(tid_t tid, /* transaction identifier */
1101 int nip, /* number of inodes to commit */
1102 struct inode **iplist, /* list of inode to commit */
1105 int rc = 0, rc1 = 0;
1107 struct jfs_log *log;
1108 struct tblock *tblk;
1112 struct jfs_inode_info *jfs_ip;
1115 struct super_block *sb;
1117 jFYI(1, ("txCommit, tid = %d, flag = %d\n", tid, flag));
1118 /* is read-only file system ? */
1119 if (isReadOnly(iplist[0])) {
1124 sb = cd.sb = iplist[0]->i_sb;
1128 tid = txBegin(sb, 0);
1129 tblk = tid_to_tblock(tid);
1132 * initialize commit structure
1134 log = JFS_SBI(sb)->log;
1137 /* initialize log record descriptor in commit */
1139 lrd->logtid = cpu_to_le32(tblk->logtid);
1142 tblk->xflag |= flag;
1144 if ((flag & (COMMIT_FORCE | COMMIT_SYNC)) == 0)
1145 tblk->xflag |= COMMIT_LAZY;
1147 * prepare non-journaled objects for commit
1149 * flush data pages of non-journaled file
1150 * to prevent the file getting non-initialized disk blocks
1158 * acquire transaction lock on (on-disk) inodes
1160 * update on-disk inode from in-memory inode
1161 * acquiring transaction locks for AFTER records
1162 * on the on-disk inode of file object
1164 * sort the inodes array by inode number in descending order
1165 * to prevent deadlock when acquiring transaction lock
1166 * of on-disk inodes on multiple on-disk inode pages by
1167 * multiple concurrent transactions
1169 for (k = 0; k < cd.nip; k++) {
1170 top = (cd.iplist[k])->i_ino;
1171 for (n = k + 1; n < cd.nip; n++) {
1173 if (ip->i_ino > top) {
1175 cd.iplist[n] = cd.iplist[k];
1181 jfs_ip = JFS_IP(ip);
1184 * BUGBUG - Should we call filemap_fdatasync here instead
1185 * of fsync_inode_data?
1186 * If we do, we have a deadlock condition since we may end
1187 * up recursively calling jfs_get_block with the IWRITELOCK
1188 * held. We may be able to do away with IWRITELOCK while
1189 * committing transactions and use i_sem instead.
1191 if ((!S_ISDIR(ip->i_mode))
1192 && (tblk->flag & COMMIT_DELETE) == 0)
1193 fsync_inode_data_buffers(ip);
1196 * Mark inode as not dirty. It will still be on the dirty
1197 * inode list, but we'll know not to commit it again unless
1198 * it gets marked dirty again
1200 clear_cflag(COMMIT_Dirty, ip);
1202 /* inherit anonymous tlock(s) of inode */
1203 if (jfs_ip->atlhead) {
1204 lid_to_tlock(jfs_ip->atltail)->next = tblk->next;
1205 tblk->next = jfs_ip->atlhead;
1207 tblk->last = jfs_ip->atltail;
1208 jfs_ip->atlhead = jfs_ip->atltail = 0;
1210 list_del_init(&jfs_ip->anon_inode_list);
1215 * acquire transaction lock on on-disk inode page
1216 * (become first tlock of the tblk's tlock list)
1218 if (((rc = diWrite(tid, ip))))
1223 * write log records from transaction locks
1225 * txUpdateMap() resets XAD_NEW in XAD.
1227 if ((rc = txLog(log, tblk, &cd)))
1231 * Ensure that inode isn't reused before
1232 * lazy commit thread finishes processing
1234 if (tblk->xflag & (COMMIT_CREATE | COMMIT_DELETE))
1235 atomic_inc(&tblk->ip->i_count);
1237 ASSERT((!(tblk->xflag & COMMIT_DELETE)) ||
1238 ((tblk->ip->i_nlink == 0) &&
1239 !test_cflag(COMMIT_Nolink, tblk->ip)));
1242 * write COMMIT log record
1244 lrd->type = cpu_to_le16(LOG_COMMIT);
1246 lsn = lmLog(log, tblk, lrd, NULL);
1248 lmGroupCommit(log, tblk);
1251 * - transaction is now committed -
1255 * force pages in careful update
1256 * (imap addressing structure update)
1258 if (flag & COMMIT_FORCE)
1262 * update allocation map.
1264 * update inode allocation map and inode:
1265 * free pager lock on memory object of inode if any.
1266 * update block allocation map.
1268 * txUpdateMap() resets XAD_NEW in XAD.
1270 if (tblk->xflag & COMMIT_FORCE)
1274 * free transaction locks and pageout/free pages
1278 if ((tblk->flag & tblkGC_LAZY) == 0)
1283 * reset in-memory object state
1285 for (k = 0; k < cd.nip; k++) {
1287 jfs_ip = JFS_IP(ip);
1290 * reset in-memory inode state
1298 txAbortCommit(&cd, rc);
1303 jFYI(1, ("txCommit: tid = %d, returning %d\n", tid, rc));
1311 * FUNCTION: Writes AFTER log records for all lines modified
1312 * by tid for segments specified by inodes in comdata.
1313 * Code assumes only WRITELOCKS are recorded in lockwords.
1319 static int txLog(struct jfs_log * log, struct tblock * tblk, struct commit * cd)
1325 struct lrd *lrd = &cd->lrd;
1328 * write log record(s) for each tlock of transaction,
1330 for (lid = tblk->next; lid; lid = tlck->next) {
1331 tlck = lid_to_tlock(lid);
1333 tlck->flag |= tlckLOG;
1335 /* initialize lrd common */
1337 lrd->aggregate = cpu_to_le32(kdev_t_to_nr(ip->i_dev));
1338 lrd->log.redopage.fileset = cpu_to_le32(JFS_IP(ip)->fileset);
1339 lrd->log.redopage.inode = cpu_to_le32(ip->i_ino);
1342 hold_metapage(tlck->mp, 0);
1344 /* write log record of page from the tlock */
1345 switch (tlck->type & tlckTYPE) {
1347 xtLog(log, tblk, lrd, tlck);
1351 dtLog(log, tblk, lrd, tlck);
1355 diLog(log, tblk, lrd, tlck, cd);
1359 mapLog(log, tblk, lrd, tlck);
1363 dataLog(log, tblk, lrd, tlck);
1367 jERROR(1, ("UFO tlock:0x%p\n", tlck));
1370 release_metapage(tlck->mp);
1380 * function: log inode tlock and format maplock to update bmap;
1382 int diLog(struct jfs_log * log, struct tblock * tblk, struct lrd * lrd,
1383 struct tlock * tlck, struct commit * cd)
1386 struct metapage *mp;
1388 struct pxd_lock *pxdlock;
1392 /* initialize as REDOPAGE record format */
1393 lrd->log.redopage.type = cpu_to_le16(LOG_INODE);
1394 lrd->log.redopage.l2linesize = cpu_to_le16(L2INODESLOTSIZE);
1396 pxd = &lrd->log.redopage.pxd;
1401 if (tlck->type & tlckENTRY) {
1402 /* log after-image for logredo(): */
1403 lrd->type = cpu_to_le16(LOG_REDOPAGE);
1404 // *pxd = mp->cm_pxd;
1405 PXDaddress(pxd, mp->index);
1407 mp->logical_size >> tblk->sb->s_blocksize_bits);
1408 lrd->backchain = cpu_to_le32(lmLog(log, tblk, lrd, tlck));
1410 /* mark page as homeward bound */
1411 tlck->flag |= tlckWRITEPAGE;
1412 } else if (tlck->type & tlckFREE) {
1416 * (pages of the freed inode extent have been invalidated and
1417 * a maplock for free of the extent has been formatted at
1420 * the tlock had been acquired on the inode allocation map page
1421 * (iag) that specifies the freed extent, even though the map
1422 * page is not itself logged, to prevent pageout of the map
1423 * page before the log;
1425 assert(tlck->type & tlckFREE);
1427 /* log LOG_NOREDOINOEXT of the freed inode extent for
1428 * logredo() to start NoRedoPage filters, and to update
1429 * imap and bmap for free of the extent;
1431 lrd->type = cpu_to_le16(LOG_NOREDOINOEXT);
1433 * For the LOG_NOREDOINOEXT record, we need
1434 * to pass the IAG number and inode extent
1435 * index (within that IAG) from which the
1436 * the extent being released. These have been
1437 * passed to us in the iplist[1] and iplist[2].
1439 lrd->log.noredoinoext.iagnum =
1440 cpu_to_le32((u32) (size_t) cd->iplist[1]);
1441 lrd->log.noredoinoext.inoext_idx =
1442 cpu_to_le32((u32) (size_t) cd->iplist[2]);
1444 pxdlock = (struct pxd_lock *) & tlck->lock;
1445 *pxd = pxdlock->pxd;
1446 lrd->backchain = cpu_to_le32(lmLog(log, tblk, lrd, NULL));
1449 tlck->flag |= tlckUPDATEMAP;
1451 /* mark page as homeward bound */
1452 tlck->flag |= tlckWRITEPAGE;
1454 jERROR(2, ("diLog: UFO type tlck:0x%p\n", tlck));
1458 * alloc/free external EA extent
1460 * a maplock for txUpdateMap() to update bPWMAP for alloc/free
1461 * of the extent has been formatted at txLock() time;
1464 assert(tlck->type & tlckEA);
1466 /* log LOG_UPDATEMAP for logredo() to update bmap for
1467 * alloc of new (and free of old) external EA extent;
1469 lrd->type = cpu_to_le16(LOG_UPDATEMAP);
1470 pxdlock = (struct pxd_lock *) & tlck->lock;
1471 nlock = pxdlock->index;
1472 for (i = 0; i < nlock; i++, pxdlock++) {
1473 if (pxdlock->flag & mlckALLOCPXD)
1474 lrd->log.updatemap.type =
1475 cpu_to_le16(LOG_ALLOCPXD);
1477 lrd->log.updatemap.type =
1478 cpu_to_le16(LOG_FREEPXD);
1479 lrd->log.updatemap.nxd = cpu_to_le16(1);
1480 lrd->log.updatemap.pxd = pxdlock->pxd;
1482 cpu_to_le32(lmLog(log, tblk, lrd, NULL));
1486 tlck->flag |= tlckUPDATEMAP;
1488 #endif /* _JFS_WIP */
1497 * function: log data tlock
1499 int dataLog(struct jfs_log * log, struct tblock * tblk, struct lrd * lrd,
1500 struct tlock * tlck)
1502 struct metapage *mp;
1507 /* initialize as REDOPAGE record format */
1508 lrd->log.redopage.type = cpu_to_le16(LOG_DATA);
1509 lrd->log.redopage.l2linesize = cpu_to_le16(L2DATASLOTSIZE);
1511 pxd = &lrd->log.redopage.pxd;
1513 /* log after-image for logredo(): */
1514 lrd->type = cpu_to_le16(LOG_REDOPAGE);
1516 if (JFS_IP(tlck->ip)->next_index < MAX_INLINE_DIRTABLE_ENTRY) {
1518 * The table has been truncated, we've must have deleted
1519 * the last entry, so don't bother logging this
1522 atomic_dec(&mp->nohomeok);
1523 discard_metapage(mp);
1528 PXDaddress(pxd, mp->index);
1529 PXDlength(pxd, mp->logical_size >> tblk->sb->s_blocksize_bits);
1531 lrd->backchain = cpu_to_le32(lmLog(log, tblk, lrd, tlck));
1533 /* mark page as homeward bound */
1534 tlck->flag |= tlckWRITEPAGE;
1543 * function: log dtree tlock and format maplock to update bmap;
1545 void dtLog(struct jfs_log * log, struct tblock * tblk, struct lrd * lrd,
1546 struct tlock * tlck)
1549 struct metapage *mp;
1550 struct pxd_lock *pxdlock;
1556 /* initialize as REDOPAGE/NOREDOPAGE record format */
1557 lrd->log.redopage.type = cpu_to_le16(LOG_DTREE);
1558 lrd->log.redopage.l2linesize = cpu_to_le16(L2DTSLOTSIZE);
1560 pxd = &lrd->log.redopage.pxd;
1562 if (tlck->type & tlckBTROOT)
1563 lrd->log.redopage.type |= cpu_to_le16(LOG_BTROOT);
1566 * page extension via relocation: entry insertion;
1567 * page extension in-place: entry insertion;
1568 * new right page from page split, reinitialized in-line
1569 * root from root page split: entry insertion;
1571 if (tlck->type & (tlckNEW | tlckEXTEND)) {
1572 /* log after-image of the new page for logredo():
1573 * mark log (LOG_NEW) for logredo() to initialize
1574 * freelist and update bmap for alloc of the new page;
1576 lrd->type = cpu_to_le16(LOG_REDOPAGE);
1577 if (tlck->type & tlckEXTEND)
1578 lrd->log.redopage.type |= cpu_to_le16(LOG_EXTEND);
1580 lrd->log.redopage.type |= cpu_to_le16(LOG_NEW);
1581 // *pxd = mp->cm_pxd;
1582 PXDaddress(pxd, mp->index);
1584 mp->logical_size >> tblk->sb->s_blocksize_bits);
1585 lrd->backchain = cpu_to_le32(lmLog(log, tblk, lrd, tlck));
1587 /* format a maplock for txUpdateMap() to update bPMAP for
1588 * alloc of the new page;
1590 if (tlck->type & tlckBTROOT)
1592 tlck->flag |= tlckUPDATEMAP;
1593 pxdlock = (struct pxd_lock *) & tlck->lock;
1594 pxdlock->flag = mlckALLOCPXD;
1595 pxdlock->pxd = *pxd;
1599 /* mark page as homeward bound */
1600 tlck->flag |= tlckWRITEPAGE;
1605 * entry insertion/deletion,
1606 * sibling page link update (old right page before split);
1608 if (tlck->type & (tlckENTRY | tlckRELINK)) {
1609 /* log after-image for logredo(): */
1610 lrd->type = cpu_to_le16(LOG_REDOPAGE);
1611 PXDaddress(pxd, mp->index);
1613 mp->logical_size >> tblk->sb->s_blocksize_bits);
1614 lrd->backchain = cpu_to_le32(lmLog(log, tblk, lrd, tlck));
1616 /* mark page as homeward bound */
1617 tlck->flag |= tlckWRITEPAGE;
1622 * page deletion: page has been invalidated
1623 * page relocation: source extent
1625 * a maplock for free of the page has been formatted
1626 * at txLock() time);
1628 if (tlck->type & (tlckFREE | tlckRELOCATE)) {
1629 /* log LOG_NOREDOPAGE of the deleted page for logredo()
1630 * to start NoRedoPage filter and to update bmap for free
1631 * of the deletd page
1633 lrd->type = cpu_to_le16(LOG_NOREDOPAGE);
1634 pxdlock = (struct pxd_lock *) & tlck->lock;
1635 *pxd = pxdlock->pxd;
1636 lrd->backchain = cpu_to_le32(lmLog(log, tblk, lrd, NULL));
1638 /* a maplock for txUpdateMap() for free of the page
1639 * has been formatted at txLock() time;
1641 tlck->flag |= tlckUPDATEMAP;
1650 * function: log xtree tlock and format maplock to update bmap;
1652 void xtLog(struct jfs_log * log, struct tblock * tblk, struct lrd * lrd,
1653 struct tlock * tlck)
1656 struct metapage *mp;
1658 struct xtlock *xtlck;
1659 struct maplock *maplock;
1660 struct xdlistlock *xadlock;
1661 struct pxd_lock *pxdlock;
1668 /* initialize as REDOPAGE/NOREDOPAGE record format */
1669 lrd->log.redopage.type = cpu_to_le16(LOG_XTREE);
1670 lrd->log.redopage.l2linesize = cpu_to_le16(L2XTSLOTSIZE);
1672 pxd = &lrd->log.redopage.pxd;
1674 if (tlck->type & tlckBTROOT) {
1675 lrd->log.redopage.type |= cpu_to_le16(LOG_BTROOT);
1676 p = &JFS_IP(ip)->i_xtroot;
1677 if (S_ISDIR(ip->i_mode))
1678 lrd->log.redopage.type |=
1679 cpu_to_le16(LOG_DIR_XTREE);
1681 p = (xtpage_t *) mp->data;
1682 next = le16_to_cpu(p->header.nextindex);
1684 xtlck = (struct xtlock *) & tlck->lock;
1686 maplock = (struct maplock *) & tlck->lock;
1687 xadlock = (struct xdlistlock *) maplock;
1690 * entry insertion/extension;
1691 * sibling page link update (old right page before split);
1693 if (tlck->type & (tlckNEW | tlckGROW | tlckRELINK)) {
1694 /* log after-image for logredo():
1695 * logredo() will update bmap for alloc of new/extended
1696 * extents (XAD_NEW|XAD_EXTEND) of XAD[lwm:next) from
1697 * after-image of XADlist;
1698 * logredo() resets (XAD_NEW|XAD_EXTEND) flag when
1699 * applying the after-image to the meta-data page.
1701 lrd->type = cpu_to_le16(LOG_REDOPAGE);
1702 // *pxd = mp->cm_pxd;
1703 PXDaddress(pxd, mp->index);
1705 mp->logical_size >> tblk->sb->s_blocksize_bits);
1706 lrd->backchain = cpu_to_le32(lmLog(log, tblk, lrd, tlck));
1708 /* format a maplock for txUpdateMap() to update bPMAP
1709 * for alloc of new/extended extents of XAD[lwm:next)
1710 * from the page itself;
1711 * txUpdateMap() resets (XAD_NEW|XAD_EXTEND) flag.
1713 lwm = xtlck->lwm.offset;
1715 lwm = XTPAGEMAXSLOT;
1720 tlck->flag |= tlckUPDATEMAP;
1721 xadlock->flag = mlckALLOCXADLIST;
1722 xadlock->count = next - lwm;
1723 if ((xadlock->count <= 2) && (tblk->xflag & COMMIT_LAZY)) {
1726 * Lazy commit may allow xtree to be modified before
1727 * txUpdateMap runs. Copy xad into linelock to
1728 * preserve correct data.
1730 xadlock->xdlist = &xtlck->pxdlock;
1731 memcpy(xadlock->xdlist, &p->xad[lwm],
1732 sizeof(xad_t) * xadlock->count);
1734 for (i = 0; i < xadlock->count; i++)
1735 p->xad[lwm + i].flag &=
1736 ~(XAD_NEW | XAD_EXTENDED);
1739 * xdlist will point to into inode's xtree, ensure
1740 * that transaction is not committed lazily.
1742 xadlock->xdlist = &p->xad[lwm];
1743 tblk->xflag &= ~COMMIT_LAZY;
1746 ("xtLog: alloc ip:0x%p mp:0x%p tlck:0x%p lwm:%d count:%d\n",
1747 tlck->ip, mp, tlck, lwm, xadlock->count));
1752 /* mark page as homeward bound */
1753 tlck->flag |= tlckWRITEPAGE;
1759 * page deletion: file deletion/truncation (ref. xtTruncate())
1761 * (page will be invalidated after log is written and bmap
1762 * is updated from the page);
1764 if (tlck->type & tlckFREE) {
1765 /* LOG_NOREDOPAGE log for NoRedoPage filter:
1766 * if page free from file delete, NoRedoFile filter from
1767 * inode image of zero link count will subsume NoRedoPage
1768 * filters for each page;
1769 * if page free from file truncattion, write NoRedoPage
1772 * upadte of block allocation map for the page itself:
1773 * if page free from deletion and truncation, LOG_UPDATEMAP
1774 * log for the page itself is generated from processing
1775 * its parent page xad entries;
1777 /* if page free from file truncation, log LOG_NOREDOPAGE
1778 * of the deleted page for logredo() to start NoRedoPage
1779 * filter for the page;
1781 if (tblk->xflag & COMMIT_TRUNCATE) {
1782 /* write NOREDOPAGE for the page */
1783 lrd->type = cpu_to_le16(LOG_NOREDOPAGE);
1784 PXDaddress(pxd, mp->index);
1786 mp->logical_size >> tblk->sb->
1789 cpu_to_le32(lmLog(log, tblk, lrd, NULL));
1791 if (tlck->type & tlckBTROOT) {
1792 /* Empty xtree must be logged */
1793 lrd->type = cpu_to_le16(LOG_REDOPAGE);
1795 cpu_to_le32(lmLog(log, tblk, lrd, tlck));
1799 /* init LOG_UPDATEMAP of the freed extents
1800 * XAD[XTENTRYSTART:hwm) from the deleted page itself
1801 * for logredo() to update bmap;
1803 lrd->type = cpu_to_le16(LOG_UPDATEMAP);
1804 lrd->log.updatemap.type = cpu_to_le16(LOG_FREEXADLIST);
1805 xtlck = (struct xtlock *) & tlck->lock;
1806 hwm = xtlck->hwm.offset;
1807 lrd->log.updatemap.nxd =
1808 cpu_to_le16(hwm - XTENTRYSTART + 1);
1809 /* reformat linelock for lmLog() */
1810 xtlck->header.offset = XTENTRYSTART;
1811 xtlck->header.length = hwm - XTENTRYSTART + 1;
1813 lrd->backchain = cpu_to_le32(lmLog(log, tblk, lrd, tlck));
1815 /* format a maplock for txUpdateMap() to update bmap
1816 * to free extents of XAD[XTENTRYSTART:hwm) from the
1817 * deleted page itself;
1819 tlck->flag |= tlckUPDATEMAP;
1820 xadlock->flag = mlckFREEXADLIST;
1821 xadlock->count = hwm - XTENTRYSTART + 1;
1822 if ((xadlock->count <= 2) && (tblk->xflag & COMMIT_LAZY)) {
1824 * Lazy commit may allow xtree to be modified before
1825 * txUpdateMap runs. Copy xad into linelock to
1826 * preserve correct data.
1828 xadlock->xdlist = &xtlck->pxdlock;
1829 memcpy(xadlock->xdlist, &p->xad[XTENTRYSTART],
1830 sizeof(xad_t) * xadlock->count);
1833 * xdlist will point to into inode's xtree, ensure
1834 * that transaction is not committed lazily.
1836 xadlock->xdlist = &p->xad[XTENTRYSTART];
1837 tblk->xflag &= ~COMMIT_LAZY;
1840 ("xtLog: free ip:0x%p mp:0x%p count:%d lwm:2\n",
1841 tlck->ip, mp, xadlock->count));
1845 /* mark page as invalid */
1846 if (((tblk->xflag & COMMIT_PWMAP) || S_ISDIR(ip->i_mode))
1847 && !(tlck->type & tlckBTROOT))
1848 tlck->flag |= tlckFREEPAGE;
1850 else (tblk->xflag & COMMIT_PMAP)
1857 * page/entry truncation: file truncation (ref. xtTruncate())
1859 * |----------+------+------+---------------|
1861 * | | hwm - hwm before truncation
1862 * | next - truncation point
1863 * lwm - lwm before truncation
1866 if (tlck->type & tlckTRUNCATE) {
1867 pxd_t tpxd; /* truncated extent of xad */
1871 * For truncation the entire linelock may be used, so it would
1872 * be difficult to store xad list in linelock itself.
1873 * Therefore, we'll just force transaction to be committed
1874 * synchronously, so that xtree pages won't be changed before
1877 tblk->xflag &= ~COMMIT_LAZY;
1878 lwm = xtlck->lwm.offset;
1880 lwm = XTPAGEMAXSLOT;
1881 hwm = xtlck->hwm.offset;
1882 twm = xtlck->twm.offset;
1888 * allocate entries XAD[lwm:next]:
1891 /* log after-image for logredo():
1892 * logredo() will update bmap for alloc of new/extended
1893 * extents (XAD_NEW|XAD_EXTEND) of XAD[lwm:next) from
1894 * after-image of XADlist;
1895 * logredo() resets (XAD_NEW|XAD_EXTEND) flag when
1896 * applying the after-image to the meta-data page.
1898 lrd->type = cpu_to_le16(LOG_REDOPAGE);
1899 PXDaddress(pxd, mp->index);
1901 mp->logical_size >> tblk->sb->
1904 cpu_to_le32(lmLog(log, tblk, lrd, tlck));
1908 * truncate entry XAD[twm == next - 1]:
1910 if (twm == next - 1) {
1911 /* init LOG_UPDATEMAP for logredo() to update bmap for
1912 * free of truncated delta extent of the truncated
1913 * entry XAD[next - 1]:
1914 * (xtlck->pxdlock = truncated delta extent);
1916 pxdlock = (struct pxd_lock *) & xtlck->pxdlock;
1917 /* assert(pxdlock->type & tlckTRUNCATE); */
1918 lrd->type = cpu_to_le16(LOG_UPDATEMAP);
1919 lrd->log.updatemap.type = cpu_to_le16(LOG_FREEPXD);
1920 lrd->log.updatemap.nxd = cpu_to_le16(1);
1921 lrd->log.updatemap.pxd = pxdlock->pxd;
1922 tpxd = pxdlock->pxd; /* save to format maplock */
1924 cpu_to_le32(lmLog(log, tblk, lrd, NULL));
1928 * free entries XAD[next:hwm]:
1931 /* init LOG_UPDATEMAP of the freed extents
1932 * XAD[next:hwm] from the deleted page itself
1933 * for logredo() to update bmap;
1935 lrd->type = cpu_to_le16(LOG_UPDATEMAP);
1936 lrd->log.updatemap.type =
1937 cpu_to_le16(LOG_FREEXADLIST);
1938 xtlck = (struct xtlock *) & tlck->lock;
1939 hwm = xtlck->hwm.offset;
1940 lrd->log.updatemap.nxd =
1941 cpu_to_le16(hwm - next + 1);
1942 /* reformat linelock for lmLog() */
1943 xtlck->header.offset = next;
1944 xtlck->header.length = hwm - next + 1;
1947 cpu_to_le32(lmLog(log, tblk, lrd, tlck));
1951 * format maplock(s) for txUpdateMap() to update bmap
1956 * allocate entries XAD[lwm:next):
1959 /* format a maplock for txUpdateMap() to update bPMAP
1960 * for alloc of new/extended extents of XAD[lwm:next)
1961 * from the page itself;
1962 * txUpdateMap() resets (XAD_NEW|XAD_EXTEND) flag.
1964 tlck->flag |= tlckUPDATEMAP;
1965 xadlock->flag = mlckALLOCXADLIST;
1966 xadlock->count = next - lwm;
1967 xadlock->xdlist = &p->xad[lwm];
1970 ("xtLog: alloc ip:0x%p mp:0x%p count:%d lwm:%d next:%d\n",
1971 tlck->ip, mp, xadlock->count, lwm, next));
1977 * truncate entry XAD[twm == next - 1]:
1979 if (twm == next - 1) {
1980 struct pxd_lock *pxdlock;
1982 /* format a maplock for txUpdateMap() to update bmap
1983 * to free truncated delta extent of the truncated
1984 * entry XAD[next - 1];
1985 * (xtlck->pxdlock = truncated delta extent);
1987 tlck->flag |= tlckUPDATEMAP;
1988 pxdlock = (struct pxd_lock *) xadlock;
1989 pxdlock->flag = mlckFREEPXD;
1991 pxdlock->pxd = tpxd;
1994 ("xtLog: truncate ip:0x%p mp:0x%p count:%d hwm:%d\n",
1995 ip, mp, pxdlock->count, hwm));
2001 * free entries XAD[next:hwm]:
2004 /* format a maplock for txUpdateMap() to update bmap
2005 * to free extents of XAD[next:hwm] from thedeleted
2008 tlck->flag |= tlckUPDATEMAP;
2009 xadlock->flag = mlckFREEXADLIST;
2010 xadlock->count = hwm - next + 1;
2011 xadlock->xdlist = &p->xad[next];
2014 ("xtLog: free ip:0x%p mp:0x%p count:%d next:%d hwm:%d\n",
2015 tlck->ip, mp, xadlock->count, next, hwm));
2019 /* mark page as homeward bound */
2020 tlck->flag |= tlckWRITEPAGE;
2029 * function: log from maplock of freed data extents;
2031 void mapLog(struct jfs_log * log, struct tblock * tblk, struct lrd * lrd,
2032 struct tlock * tlck)
2034 struct pxd_lock *pxdlock;
2039 * page relocation: free the source page extent
2041 * a maplock for txUpdateMap() for free of the page
2042 * has been formatted at txLock() time saving the src
2043 * relocated page address;
2045 if (tlck->type & tlckRELOCATE) {
2046 /* log LOG_NOREDOPAGE of the old relocated page
2047 * for logredo() to start NoRedoPage filter;
2049 lrd->type = cpu_to_le16(LOG_NOREDOPAGE);
2050 pxdlock = (struct pxd_lock *) & tlck->lock;
2051 pxd = &lrd->log.redopage.pxd;
2052 *pxd = pxdlock->pxd;
2053 lrd->backchain = cpu_to_le32(lmLog(log, tblk, lrd, NULL));
2055 /* (N.B. currently, logredo() does NOT update bmap
2056 * for free of the page itself for (LOG_XTREE|LOG_NOREDOPAGE);
2057 * if page free from relocation, LOG_UPDATEMAP log is
2058 * specifically generated now for logredo()
2059 * to update bmap for free of src relocated page;
2060 * (new flag LOG_RELOCATE may be introduced which will
2061 * inform logredo() to start NORedoPage filter and also
2062 * update block allocation map at the same time, thus
2063 * avoiding an extra log write);
2065 lrd->type = cpu_to_le16(LOG_UPDATEMAP);
2066 lrd->log.updatemap.type = cpu_to_le16(LOG_FREEPXD);
2067 lrd->log.updatemap.nxd = cpu_to_le16(1);
2068 lrd->log.updatemap.pxd = pxdlock->pxd;
2069 lrd->backchain = cpu_to_le32(lmLog(log, tblk, lrd, NULL));
2071 /* a maplock for txUpdateMap() for free of the page
2072 * has been formatted at txLock() time;
2074 tlck->flag |= tlckUPDATEMAP;
2079 * Otherwise it's not a relocate request
2083 /* log LOG_UPDATEMAP for logredo() to update bmap for
2084 * free of truncated/relocated delta extent of the data;
2085 * e.g.: external EA extent, relocated/truncated extent
2086 * from xtTailgate();
2088 lrd->type = cpu_to_le16(LOG_UPDATEMAP);
2089 pxdlock = (struct pxd_lock *) & tlck->lock;
2090 nlock = pxdlock->index;
2091 for (i = 0; i < nlock; i++, pxdlock++) {
2092 if (pxdlock->flag & mlckALLOCPXD)
2093 lrd->log.updatemap.type =
2094 cpu_to_le16(LOG_ALLOCPXD);
2096 lrd->log.updatemap.type =
2097 cpu_to_le16(LOG_FREEPXD);
2098 lrd->log.updatemap.nxd = cpu_to_le16(1);
2099 lrd->log.updatemap.pxd = pxdlock->pxd;
2101 cpu_to_le32(lmLog(log, tblk, lrd, NULL));
2102 jFYI(1, ("mapLog: xaddr:0x%lx xlen:0x%x\n",
2103 (ulong) addressPXD(&pxdlock->pxd),
2104 lengthPXD(&pxdlock->pxd)));
2108 tlck->flag |= tlckUPDATEMAP;
2116 * function: acquire maplock for EA/ACL extents or
2117 * set COMMIT_INLINE flag;
2119 void txEA(tid_t tid, struct inode *ip, dxd_t * oldea, dxd_t * newea)
2121 struct tlock *tlck = NULL;
2122 struct pxd_lock *maplock = NULL, *pxdlock = NULL;
2125 * format maplock for alloc of new EA extent
2128 /* Since the newea could be a completely zeroed entry we need to
2129 * check for the two flags which indicate we should actually
2130 * commit new EA data
2132 if (newea->flag & DXD_EXTENT) {
2133 tlck = txMaplock(tid, ip, tlckMAP);
2134 maplock = (struct pxd_lock *) & tlck->lock;
2135 pxdlock = (struct pxd_lock *) maplock;
2136 pxdlock->flag = mlckALLOCPXD;
2137 PXDaddress(&pxdlock->pxd, addressDXD(newea));
2138 PXDlength(&pxdlock->pxd, lengthDXD(newea));
2141 } else if (newea->flag & DXD_INLINE) {
2144 set_cflag(COMMIT_Inlineea, ip);
2149 * format maplock for free of old EA extent
2151 if (!test_cflag(COMMIT_Nolink, ip) && oldea->flag & DXD_EXTENT) {
2153 tlck = txMaplock(tid, ip, tlckMAP);
2154 maplock = (struct pxd_lock *) & tlck->lock;
2155 pxdlock = (struct pxd_lock *) maplock;
2158 pxdlock->flag = mlckFREEPXD;
2159 PXDaddress(&pxdlock->pxd, addressDXD(oldea));
2160 PXDlength(&pxdlock->pxd, lengthDXD(oldea));
2169 * function: synchronously write pages locked by transaction
2170 * after txLog() but before txUpdateMap();
2172 void txForce(struct tblock * tblk)
2176 struct metapage *mp;
2179 * reverse the order of transaction tlocks in
2180 * careful update order of address index pages
2181 * (right to left, bottom up)
2183 tlck = lid_to_tlock(tblk->next);
2187 tlck = lid_to_tlock(lid);
2189 tlck->next = tblk->next;
2195 * synchronously write the page, and
2196 * hold the page for txUpdateMap();
2198 for (lid = tblk->next; lid; lid = next) {
2199 tlck = lid_to_tlock(lid);
2202 if ((mp = tlck->mp) != NULL &&
2203 (tlck->type & tlckBTROOT) == 0) {
2204 assert(mp->xflag & COMMIT_PAGE);
2206 if (tlck->flag & tlckWRITEPAGE) {
2207 tlck->flag &= ~tlckWRITEPAGE;
2209 /* do not release page to freelist */
2212 * The "right" thing to do here is to
2213 * synchronously write the metadata.
2214 * With the current implementation this
2215 * is hard since write_metapage requires
2216 * us to kunmap & remap the page. If we
2217 * have tlocks pointing into the metadata
2218 * pages, we don't want to do this. I think
2219 * we can get by with synchronously writing
2220 * the pages when they are released.
2222 assert(atomic_read(&mp->nohomeok));
2223 set_bit(META_dirty, &mp->flag);
2224 set_bit(META_sync, &mp->flag);
2234 * function: update persistent allocation map (and working map
2239 static void txUpdateMap(struct tblock * tblk)
2242 struct inode *ipimap;
2245 struct maplock *maplock;
2246 struct pxd_lock pxdlock;
2249 struct metapage *mp = 0;
2251 ipimap = JFS_SBI(tblk->sb)->ipimap;
2253 maptype = (tblk->xflag & COMMIT_PMAP) ? COMMIT_PMAP : COMMIT_PWMAP;
2257 * update block allocation map
2259 * update allocation state in pmap (and wmap) and
2260 * update lsn of the pmap page;
2263 * scan each tlock/page of transaction for block allocation/free:
2265 * for each tlock/page of transaction, update map.
2266 * ? are there tlock for pmap and pwmap at the same time ?
2268 for (lid = tblk->next; lid; lid = tlck->next) {
2269 tlck = lid_to_tlock(lid);
2271 if ((tlck->flag & tlckUPDATEMAP) == 0)
2274 if (tlck->flag & tlckFREEPAGE) {
2276 * Another thread may attempt to reuse freed space
2277 * immediately, so we want to get rid of the metapage
2278 * before anyone else has a chance to get it.
2279 * Lock metapage, update maps, then invalidate
2283 ASSERT(mp->xflag & COMMIT_PAGE);
2284 hold_metapage(mp, 0);
2289 * . in-line PXD list:
2290 * . out-of-line XAD list:
2292 maplock = (struct maplock *) & tlck->lock;
2293 nlock = maplock->index;
2295 for (k = 0; k < nlock; k++, maplock++) {
2297 * allocate blocks in persistent map:
2299 * blocks have been allocated from wmap at alloc time;
2301 if (maplock->flag & mlckALLOC) {
2302 txAllocPMap(ipimap, maplock, tblk);
2305 * free blocks in persistent and working map:
2306 * blocks will be freed in pmap and then in wmap;
2308 * ? tblock specifies the PMAP/PWMAP based upon
2311 * free blocks in persistent map:
2312 * blocks will be freed from wmap at last reference
2313 * release of the object for regular files;
2315 * Alway free blocks from both persistent & working
2316 * maps for directories
2318 else { /* (maplock->flag & mlckFREE) */
2320 if (S_ISDIR(tlck->ip->i_mode))
2321 txFreeMap(ipimap, maplock,
2322 tblk, COMMIT_PWMAP);
2324 txFreeMap(ipimap, maplock,
2328 if (tlck->flag & tlckFREEPAGE) {
2329 if (!(tblk->flag & tblkGC_LAZY)) {
2330 /* This is equivalent to txRelease */
2331 ASSERT(mp->lid == lid);
2334 assert(atomic_read(&mp->nohomeok) == 1);
2335 atomic_dec(&mp->nohomeok);
2336 discard_metapage(mp);
2341 * update inode allocation map
2343 * update allocation state in pmap and
2344 * update lsn of the pmap page;
2345 * update in-memory inode flag/state
2347 * unlock mapper/write lock
2349 if (tblk->xflag & COMMIT_CREATE) {
2352 ASSERT(test_cflag(COMMIT_New, ip));
2353 clear_cflag(COMMIT_New, ip);
2355 diUpdatePMap(ipimap, ip->i_ino, FALSE, tblk);
2356 ipimap->i_state |= I_DIRTY;
2357 /* update persistent block allocation map
2358 * for the allocation of inode extent;
2360 pxdlock.flag = mlckALLOCPXD;
2361 pxdlock.pxd = JFS_IP(ip)->ixpxd;
2363 txAllocPMap(ip, (struct maplock *) & pxdlock, tblk);
2365 } else if (tblk->xflag & COMMIT_DELETE) {
2367 diUpdatePMap(ipimap, ip->i_ino, TRUE, tblk);
2368 ipimap->i_state |= I_DIRTY;
2377 * function: allocate from persistent map;
2386 * allocate from persistent map;
2387 * free from persistent map;
2388 * (e.g., tmp file - free from working map at releae
2389 * of last reference);
2390 * free from persistent and working map;
2392 * lsn - log sequence number;
2394 static void txAllocPMap(struct inode *ip, struct maplock * maplock,
2395 struct tblock * tblk)
2397 struct inode *ipbmap = JFS_SBI(ip->i_sb)->ipbmap;
2398 struct xdlistlock *xadlistlock;
2402 struct pxd_lock *pxdlock;
2403 struct xdlistlock *pxdlistlock;
2408 * allocate from persistent map;
2410 if (maplock->flag & mlckALLOCXADLIST) {
2411 xadlistlock = (struct xdlistlock *) maplock;
2412 xad = xadlistlock->xdlist;
2413 for (n = 0; n < xadlistlock->count; n++, xad++) {
2414 if (xad->flag & (XAD_NEW | XAD_EXTENDED)) {
2415 xaddr = addressXAD(xad);
2416 xlen = lengthXAD(xad);
2417 dbUpdatePMap(ipbmap, FALSE, xaddr,
2419 xad->flag &= ~(XAD_NEW | XAD_EXTENDED);
2421 ("allocPMap: xaddr:0x%lx xlen:%d\n",
2422 (ulong) xaddr, xlen));
2425 } else if (maplock->flag & mlckALLOCPXD) {
2426 pxdlock = (struct pxd_lock *) maplock;
2427 xaddr = addressPXD(&pxdlock->pxd);
2428 xlen = lengthPXD(&pxdlock->pxd);
2429 dbUpdatePMap(ipbmap, FALSE, xaddr, (s64) xlen, tblk);
2431 ("allocPMap: xaddr:0x%lx xlen:%d\n", (ulong) xaddr,
2433 } else { /* (maplock->flag & mlckALLOCPXDLIST) */
2435 pxdlistlock = (struct xdlistlock *) maplock;
2436 pxd = pxdlistlock->xdlist;
2437 for (n = 0; n < pxdlistlock->count; n++, pxd++) {
2438 xaddr = addressPXD(pxd);
2439 xlen = lengthPXD(pxd);
2440 dbUpdatePMap(ipbmap, FALSE, xaddr, (s64) xlen,
2443 ("allocPMap: xaddr:0x%lx xlen:%d\n",
2444 (ulong) xaddr, xlen));
2453 * function: free from persistent and/or working map;
2455 * todo: optimization
2457 void txFreeMap(struct inode *ip,
2458 struct maplock * maplock, struct tblock * tblk, int maptype)
2460 struct inode *ipbmap = JFS_SBI(ip->i_sb)->ipbmap;
2461 struct xdlistlock *xadlistlock;
2465 struct pxd_lock *pxdlock;
2466 struct xdlistlock *pxdlistlock;
2471 ("txFreeMap: tblk:0x%p maplock:0x%p maptype:0x%x\n",
2472 tblk, maplock, maptype));
2475 * free from persistent map;
2477 if (maptype == COMMIT_PMAP || maptype == COMMIT_PWMAP) {
2478 if (maplock->flag & mlckFREEXADLIST) {
2479 xadlistlock = (struct xdlistlock *) maplock;
2480 xad = xadlistlock->xdlist;
2481 for (n = 0; n < xadlistlock->count; n++, xad++) {
2482 if (!(xad->flag & XAD_NEW)) {
2483 xaddr = addressXAD(xad);
2484 xlen = lengthXAD(xad);
2485 dbUpdatePMap(ipbmap, TRUE, xaddr,
2488 ("freePMap: xaddr:0x%lx xlen:%d\n",
2489 (ulong) xaddr, xlen));
2492 } else if (maplock->flag & mlckFREEPXD) {
2493 pxdlock = (struct pxd_lock *) maplock;
2494 xaddr = addressPXD(&pxdlock->pxd);
2495 xlen = lengthPXD(&pxdlock->pxd);
2496 dbUpdatePMap(ipbmap, TRUE, xaddr, (s64) xlen,
2499 ("freePMap: xaddr:0x%lx xlen:%d\n",
2500 (ulong) xaddr, xlen));
2501 } else { /* (maplock->flag & mlckALLOCPXDLIST) */
2503 pxdlistlock = (struct xdlistlock *) maplock;
2504 pxd = pxdlistlock->xdlist;
2505 for (n = 0; n < pxdlistlock->count; n++, pxd++) {
2506 xaddr = addressPXD(pxd);
2507 xlen = lengthPXD(pxd);
2508 dbUpdatePMap(ipbmap, TRUE, xaddr,
2511 ("freePMap: xaddr:0x%lx xlen:%d\n",
2512 (ulong) xaddr, xlen));
2518 * free from working map;
2520 if (maptype == COMMIT_PWMAP || maptype == COMMIT_WMAP) {
2521 if (maplock->flag & mlckFREEXADLIST) {
2522 xadlistlock = (struct xdlistlock *) maplock;
2523 xad = xadlistlock->xdlist;
2524 for (n = 0; n < xadlistlock->count; n++, xad++) {
2525 xaddr = addressXAD(xad);
2526 xlen = lengthXAD(xad);
2527 dbFree(ip, xaddr, (s64) xlen);
2530 ("freeWMap: xaddr:0x%lx xlen:%d\n",
2531 (ulong) xaddr, xlen));
2533 } else if (maplock->flag & mlckFREEPXD) {
2534 pxdlock = (struct pxd_lock *) maplock;
2535 xaddr = addressPXD(&pxdlock->pxd);
2536 xlen = lengthPXD(&pxdlock->pxd);
2537 dbFree(ip, xaddr, (s64) xlen);
2539 ("freeWMap: xaddr:0x%lx xlen:%d\n",
2540 (ulong) xaddr, xlen));
2541 } else { /* (maplock->flag & mlckFREEPXDLIST) */
2543 pxdlistlock = (struct xdlistlock *) maplock;
2544 pxd = pxdlistlock->xdlist;
2545 for (n = 0; n < pxdlistlock->count; n++, pxd++) {
2546 xaddr = addressPXD(pxd);
2547 xlen = lengthPXD(pxd);
2548 dbFree(ip, xaddr, (s64) xlen);
2550 ("freeWMap: xaddr:0x%lx xlen:%d\n",
2551 (ulong) xaddr, xlen));
2561 * function: remove tlock from inode anonymous locklist
2563 void txFreelock(struct inode *ip)
2565 struct jfs_inode_info *jfs_ip = JFS_IP(ip);
2566 struct tlock *xtlck, *tlck;
2567 lid_t xlid = 0, lid;
2569 if (!jfs_ip->atlhead)
2572 xtlck = (struct tlock *) &jfs_ip->atlhead;
2574 while ((lid = xtlck->next)) {
2575 tlck = lid_to_tlock(lid);
2576 if (tlck->flag & tlckFREELOCK) {
2577 xtlck->next = tlck->next;
2585 if (jfs_ip->atlhead)
2586 jfs_ip->atltail = xlid;
2588 jfs_ip->atltail = 0;
2590 * If inode was on anon_list, remove it
2593 list_del_init(&jfs_ip->anon_inode_list);
2602 * function: abort tx before commit;
2604 * frees line-locks and segment locks for all
2605 * segments in comdata structure.
2606 * Optionally sets state of file-system to FM_DIRTY in super-block.
2607 * log age of page-frames in memory for which caller has
2608 * are reset to 0 (to avoid logwarap).
2610 void txAbort(tid_t tid, int dirty)
2613 struct metapage *mp;
2614 struct tblock *tblk = tid_to_tblock(tid);
2616 jEVENT(1, ("txAbort: tid:%d dirty:0x%x\n", tid, dirty));
2619 * free tlocks of the transaction
2621 for (lid = tblk->next; lid; lid = next) {
2622 next = lid_to_tlock(lid)->next;
2624 mp = lid_to_tlock(lid)->mp;
2630 * reset lsn of page to avoid logwarap:
2632 * (page may have been previously committed by another
2633 * transaction(s) but has not been paged, i.e.,
2634 * it may be on logsync list even though it has not
2635 * been logged for the current tx.)
2637 if (mp->xflag & COMMIT_PAGE && mp->lsn)
2640 /* insert tlock at head of freelist */
2646 /* caller will free the transaction block */
2648 tblk->next = tblk->last = 0;
2651 * mark filesystem dirty
2654 updateSuper(tblk->sb, FM_DIRTY);
2663 * function: abort commit.
2665 * frees tlocks of transaction; line-locks and segment locks for all
2666 * segments in comdata structure. frees malloc storage
2667 * sets state of file-system to FM_MDIRTY in super-block.
2668 * log age of page-frames in memory for which caller has
2669 * are reset to 0 (to avoid logwarap).
2671 void txAbortCommit(struct commit * cd, int exval)
2673 struct tblock *tblk;
2676 struct metapage *mp;
2678 assert(exval == EIO || exval == ENOMEM);
2679 jEVENT(1, ("txAbortCommit: cd:0x%p\n", cd));
2682 * free tlocks of the transaction
2685 tblk = tid_to_tblock(tid);
2686 for (lid = tblk->next; lid; lid = next) {
2687 next = lid_to_tlock(lid)->next;
2689 mp = lid_to_tlock(lid)->mp;
2694 * reset lsn of page to avoid logwarap;
2696 if (mp->xflag & COMMIT_PAGE)
2700 /* insert tlock at head of freelist */
2706 tblk->next = tblk->last = 0;
2708 /* free the transaction block */
2712 * mark filesystem dirty
2714 updateSuper(cd->sb, FM_DIRTY);
2719 * txLazyCommit(void)
2721 * All transactions except those changing ipimap (COMMIT_FORCE) are
2722 * processed by this routine. This insures that the inode and block
2723 * allocation maps are updated in order. For synchronous transactions,
2724 * let the user thread finish processing after txUpdateMap() is called.
2726 void txLazyCommit(struct tblock * tblk)
2728 struct jfs_log *log;
2730 while (((tblk->flag & tblkGC_READY) == 0) &&
2731 ((tblk->flag & tblkGC_UNLOCKED) == 0)) {
2732 /* We must have gotten ahead of the user thread
2734 jFYI(1, ("txLazyCommit: tblk 0x%p not unlocked\n", tblk));
2738 jFYI(1, ("txLazyCommit: processing tblk 0x%p\n", tblk));
2742 log = (struct jfs_log *) JFS_SBI(tblk->sb)->log;
2744 spin_lock_irq(&log->gclock); // LOGGC_LOCK
2746 tblk->flag |= tblkGC_COMMITTED;
2748 if (tblk->flag & tblkGC_READY)
2751 if (tblk->flag & tblkGC_READY)
2752 wake_up(&tblk->gcwait); // LOGGC_WAKEUP
2755 * Can't release log->gclock until we've tested tblk->flag
2757 if (tblk->flag & tblkGC_LAZY) {
2758 spin_unlock_irq(&log->gclock); // LOGGC_UNLOCK
2760 tblk->flag &= ~tblkGC_LAZY;
2761 txEnd(tblk - TxBlock); /* Convert back to tid */
2763 spin_unlock_irq(&log->gclock); // LOGGC_UNLOCK
2765 jFYI(1, ("txLazyCommit: done: tblk = 0x%p\n", tblk));
2769 * jfs_lazycommit(void)
2771 * To be run as a kernel daemon. If lbmIODone is called in an interrupt
2772 * context, or where blocking is not wanted, this routine will process
2773 * committed transactions from the unlock queue.
2775 int jfs_lazycommit(void *arg)
2778 struct tblock *tblk;
2779 unsigned long flags;
2784 current->tty = NULL;
2785 strcpy(current->comm, "jfsCommit");
2789 jfsCommitTask = current;
2791 spin_lock_irq(¤t->sigmask_lock);
2792 sigfillset(¤t->blocked);
2793 recalc_sigpending(current);
2794 spin_unlock_irq(¤t->sigmask_lock);
2797 TxAnchor.unlock_queue = TxAnchor.unlock_tail = 0;
2799 complete(&jfsIOwait);
2802 DECLARE_WAITQUEUE(wq, current);
2807 while ((tblk = TxAnchor.unlock_queue)) {
2809 * We can't get ahead of user thread. Spinning is
2810 * simpler than blocking/waking. We shouldn't spin
2811 * very long, since user thread shouldn't be blocking
2812 * between lmGroupCommit & txEnd.
2817 * Remove first transaction from queue
2819 TxAnchor.unlock_queue = tblk->cqnext;
2821 if (TxAnchor.unlock_tail == tblk)
2822 TxAnchor.unlock_tail = 0;
2828 * We can be running indefinately if other processors
2829 * are adding transactions to this list
2838 add_wait_queue(&jfs_commit_thread_wait, &wq);
2839 set_current_state(TASK_INTERRUPTIBLE);
2842 current->state = TASK_RUNNING;
2843 remove_wait_queue(&jfs_commit_thread_wait, &wq);
2844 } while (!jfs_stop_threads);
2846 if (TxAnchor.unlock_queue)
2847 jERROR(1, ("jfs_lazycommit being killed with pending transactions!\n"));
2849 jFYI(1, ("jfs_lazycommit being killed\n"));
2850 complete(&jfsIOwait);
2854 void txLazyUnlock(struct tblock * tblk)
2856 unsigned long flags;
2860 if (TxAnchor.unlock_tail)
2861 TxAnchor.unlock_tail->cqnext = tblk;
2863 TxAnchor.unlock_queue = tblk;
2864 TxAnchor.unlock_tail = tblk;
2867 wake_up(&jfs_commit_thread_wait);
2870 static void LogSyncRelease(struct metapage * mp)
2872 struct jfs_log *log = mp->log;
2874 assert(atomic_read(&mp->nohomeok));
2876 atomic_dec(&mp->nohomeok);
2878 if (atomic_read(&mp->nohomeok))
2881 hold_metapage(mp, 0);
2888 list_del_init(&mp->synclist);
2889 LOGSYNC_UNLOCK(log);
2891 release_metapage(mp);
2897 * Block all new transactions and push anonymous transactions to
2900 * This does almost the same thing as jfs_sync below. We don't
2901 * worry about deadlocking when TlocksLow is set, since we would
2902 * expect jfs_sync to get us out of that jam.
2904 void txQuiesce(struct super_block *sb)
2907 struct jfs_inode_info *jfs_ip;
2908 struct jfs_log *log = JFS_SBI(sb)->log;
2912 set_bit(log_QUIESCE, &log->flag);
2916 while (!list_empty(&TxAnchor.anon_list)) {
2917 jfs_ip = list_entry(TxAnchor.anon_list.next,
2918 struct jfs_inode_info,
2923 * inode will be removed from anonymous list
2924 * when it is committed
2927 tid = txBegin(ip->i_sb, COMMIT_INODE | COMMIT_FORCE);
2928 down(&jfs_ip->commit_sem);
2929 rc = txCommit(tid, 1, &ip, 0);
2931 up(&jfs_ip->commit_sem);
2933 * Just to be safe. I don't know how
2934 * long we can run without blocking
2941 * If jfs_sync is running in parallel, there could be some inodes
2942 * on anon_list2. Let's check.
2944 if (!list_empty(&TxAnchor.anon_list2)) {
2945 list_splice(&TxAnchor.anon_list2, &TxAnchor.anon_list);
2946 INIT_LIST_HEAD(&TxAnchor.anon_list2);
2952 * We may need to kick off the group commit
2954 jfs_flush_journal(log, 0);
2960 * Allows transactions to start again following txQuiesce
2962 void txResume(struct super_block *sb)
2964 struct jfs_log *log = JFS_SBI(sb)->log;
2966 clear_bit(log_QUIESCE, &log->flag);
2967 TXN_WAKEUP(&log->syncwait);
2973 * To be run as a kernel daemon. This is awakened when tlocks run low.
2974 * We write any inodes that have anonymous tlocks so they will become
2977 int jfs_sync(void *arg)
2980 struct jfs_inode_info *jfs_ip;
2987 current->tty = NULL;
2988 strcpy(current->comm, "jfsSync");
2992 spin_lock_irq(¤t->sigmask_lock);
2993 sigfillset(¤t->blocked);
2994 recalc_sigpending(current);
2995 spin_unlock_irq(¤t->sigmask_lock);
2997 complete(&jfsIOwait);
3000 DECLARE_WAITQUEUE(wq, current);
3002 * write each inode on the anonymous inode list
3005 while (TxAnchor.TlocksLow && !list_empty(&TxAnchor.anon_list)) {
3006 jfs_ip = list_entry(TxAnchor.anon_list.next,
3007 struct jfs_inode_info,
3012 * down_trylock returns 0 on success. This is
3013 * inconsistent with spin_trylock.
3015 if (! down_trylock(&jfs_ip->commit_sem)) {
3017 * inode will be removed from anonymous list
3018 * when it is committed
3021 tid = txBegin(ip->i_sb,
3022 COMMIT_INODE | COMMIT_FORCE);
3023 rc = txCommit(tid, 1, &ip, 0);
3025 up(&jfs_ip->commit_sem);
3027 * Just to be safe. I don't know how
3028 * long we can run without blocking
3033 /* We can't get the commit semaphore. It may
3034 * be held by a thread waiting for tlock's
3035 * so let's not block here. Save it to
3036 * put back on the anon_list.
3039 /* Take off anon_list */
3040 list_del(&jfs_ip->anon_inode_list);
3042 /* Put on anon_list2 */
3043 list_add(&jfs_ip->anon_inode_list,
3044 &TxAnchor.anon_list2);
3047 /* Add anon_list2 back to anon_list */
3048 if (!list_empty(&TxAnchor.anon_list2)) {
3049 list_splice(&TxAnchor.anon_list2, &TxAnchor.anon_list);
3050 INIT_LIST_HEAD(&TxAnchor.anon_list2);
3052 add_wait_queue(&jfs_sync_thread_wait, &wq);
3053 set_current_state(TASK_INTERRUPTIBLE);
3056 current->state = TASK_RUNNING;
3057 remove_wait_queue(&jfs_sync_thread_wait, &wq);
3058 } while (!jfs_stop_threads);
3060 jFYI(1, ("jfs_sync being killed\n"));
3061 complete(&jfsIOwait);
3065 #if defined(CONFIG_PROC_FS) && defined(CONFIG_JFS_DEBUG)
3066 int jfs_txanchor_read(char *buffer, char **start, off_t offset, int length,
3067 int *eof, void *data)
3076 waitqueue_active(&TxAnchor.freewait) ? "active" : "empty";
3078 waitqueue_active(&TxAnchor.freelockwait) ? "active" : "empty";
3080 waitqueue_active(&TxAnchor.lowlockwait) ? "active" : "empty";
3082 len += sprintf(buffer,
3088 "freelockwait = %s\n"
3089 "lowlockwait = %s\n"
3090 "tlocksInUse = %d\n"
3092 "unlock_queue = 0x%p\n"
3093 "unlock_tail = 0x%p\n",
3099 TxAnchor.tlocksInUse,
3101 TxAnchor.unlock_queue,
3102 TxAnchor.unlock_tail);
3105 *start = buffer + begin;
3120 #if defined(CONFIG_PROC_FS) && defined(CONFIG_JFS_STATISTICS)
3121 int jfs_txstats_read(char *buffer, char **start, off_t offset, int length,
3122 int *eof, void *data)
3127 len += sprintf(buffer,
3130 "calls to txBegin = %d\n"
3131 "txBegin blocked by sync barrier = %d\n"
3132 "txBegin blocked by tlocks low = %d\n"
3133 "txBegin blocked by no free tid = %d\n"
3134 "calls to txBeginAnon = %d\n"
3135 "txBeginAnon blocked by sync barrier = %d\n"
3136 "txBeginAnon blocked by tlocks low = %d\n"
3137 "calls to txLockAlloc = %d\n"
3138 "tLockAlloc blocked by no free lock = %d\n",
3140 TxStat.txBegin_barrier,
3141 TxStat.txBegin_lockslow,
3142 TxStat.txBegin_freetid,
3144 TxStat.txBeginAnon_barrier,
3145 TxStat.txBeginAnon_lockslow,
3147 TxStat.txLockAlloc_freelock);
3150 *start = buffer + begin;