1 /* 2 * Copyright (C) International Business Machines Corp., 2000-2004 3 * Portions Copyright (C) Christoph Hellwig, 2001-2002 4 * 5 * This program is free software; you can redistribute it and/or modify 6 * it under the terms of the GNU General Public License as published by 7 * the Free Software Foundation; either version 2 of the License, or 8 * (at your option) any later version. 9 * 10 * This program is distributed in the hope that it will be useful, 11 * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See 13 * the GNU General Public License for more details. 14 * 15 * You should have received a copy of the GNU General Public License 16 * along with this program; if not, write to the Free Software 17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 18 */ 19 20 /* 21 * jfs_logmgr.c: log manager 22 * 23 * for related information, see transaction manager (jfs_txnmgr.c), and 24 * recovery manager (jfs_logredo.c). 25 * 26 * note: for detail, RTFS. 27 * 28 * log buffer manager: 29 * special purpose buffer manager supporting log i/o requirements. 30 * per log serial pageout of logpage 31 * queuing i/o requests and redrive i/o at iodone 32 * maintain current logpage buffer 33 * no caching since append only 34 * appropriate jfs buffer cache buffers as needed 35 * 36 * group commit: 37 * transactions which wrote COMMIT records in the same in-memory 38 * log page during the pageout of previous/current log page(s) are 39 * committed together by the pageout of the page. 40 * 41 * TBD lazy commit: 42 * transactions are committed asynchronously when the log page 43 * containing it COMMIT is paged out when it becomes full; 44 * 45 * serialization: 46 * . a per log lock serialize log write. 47 * . a per log lock serialize group commit. 48 * . a per log lock serialize log open/close; 49 * 50 * TBD log integrity: 51 * careful-write (ping-pong) of last logpage to recover from crash 52 * in overwrite. 53 * detection of split (out-of-order) write of physical sectors 54 * of last logpage via timestamp at end of each sector 55 * with its mirror data array at trailer). 56 * 57 * alternatives: 58 * lsn - 64-bit monotonically increasing integer vs 59 * 32-bit lspn and page eor. 60 */ 61 62 #include <linux/fs.h> 63 #include <linux/blkdev.h> 64 #include <linux/interrupt.h> 65 #include <linux/completion.h> 66 #include <linux/kthread.h> 67 #include <linux/buffer_head.h> /* for sync_blockdev() */ 68 #include <linux/bio.h> 69 #include <linux/freezer.h> 70 #include <linux/delay.h> 71 #include <linux/mutex.h> 72 #include <linux/seq_file.h> 73 #include "jfs_incore.h" 74 #include "jfs_filsys.h" 75 #include "jfs_metapage.h" 76 #include "jfs_superblock.h" 77 #include "jfs_txnmgr.h" 78 #include "jfs_debug.h" 79 80 81 /* 82 * lbuf's ready to be redriven. Protected by log_redrive_lock (jfsIO thread) 83 */ 84 static struct lbuf *log_redrive_list; 85 static DEFINE_SPINLOCK(log_redrive_lock); 86 87 88 /* 89 * log read/write serialization (per log) 90 */ 91 #define LOG_LOCK_INIT(log) mutex_init(&(log)->loglock) 92 #define LOG_LOCK(log) mutex_lock(&((log)->loglock)) 93 #define LOG_UNLOCK(log) mutex_unlock(&((log)->loglock)) 94 95 96 /* 97 * log group commit serialization (per log) 98 */ 99 100 #define LOGGC_LOCK_INIT(log) spin_lock_init(&(log)->gclock) 101 #define LOGGC_LOCK(log) spin_lock_irq(&(log)->gclock) 102 #define LOGGC_UNLOCK(log) spin_unlock_irq(&(log)->gclock) 103 #define LOGGC_WAKEUP(tblk) wake_up_all(&(tblk)->gcwait) 104 105 /* 106 * log sync serialization (per log) 107 */ 108 #define LOGSYNC_DELTA(logsize) min((logsize)/8, 128*LOGPSIZE) 109 #define LOGSYNC_BARRIER(logsize) ((logsize)/4) 110 /* 111 #define LOGSYNC_DELTA(logsize) min((logsize)/4, 256*LOGPSIZE) 112 #define LOGSYNC_BARRIER(logsize) ((logsize)/2) 113 */ 114 115 116 /* 117 * log buffer cache synchronization 118 */ 119 static DEFINE_SPINLOCK(jfsLCacheLock); 120 121 #define LCACHE_LOCK(flags) spin_lock_irqsave(&jfsLCacheLock, flags) 122 #define LCACHE_UNLOCK(flags) spin_unlock_irqrestore(&jfsLCacheLock, flags) 123 124 /* 125 * See __SLEEP_COND in jfs_locks.h 126 */ 127 #define LCACHE_SLEEP_COND(wq, cond, flags) \ 128 do { \ 129 if (cond) \ 130 break; \ 131 __SLEEP_COND(wq, cond, LCACHE_LOCK(flags), LCACHE_UNLOCK(flags)); \ 132 } while (0) 133 134 #define LCACHE_WAKEUP(event) wake_up(event) 135 136 137 /* 138 * lbuf buffer cache (lCache) control 139 */ 140 /* log buffer manager pageout control (cumulative, inclusive) */ 141 #define lbmREAD 0x0001 142 #define lbmWRITE 0x0002 /* enqueue at tail of write queue; 143 * init pageout if at head of queue; 144 */ 145 #define lbmRELEASE 0x0004 /* remove from write queue 146 * at completion of pageout; 147 * do not free/recycle it yet: 148 * caller will free it; 149 */ 150 #define lbmSYNC 0x0008 /* do not return to freelist 151 * when removed from write queue; 152 */ 153 #define lbmFREE 0x0010 /* return to freelist 154 * at completion of pageout; 155 * the buffer may be recycled; 156 */ 157 #define lbmDONE 0x0020 158 #define lbmERROR 0x0040 159 #define lbmGC 0x0080 /* lbmIODone to perform post-GC processing 160 * of log page 161 */ 162 #define lbmDIRECT 0x0100 163 164 /* 165 * Global list of active external journals 166 */ 167 static LIST_HEAD(jfs_external_logs); 168 static struct jfs_log *dummy_log = NULL; 169 static DEFINE_MUTEX(jfs_log_mutex); 170 171 /* 172 * forward references 173 */ 174 static int lmWriteRecord(struct jfs_log * log, struct tblock * tblk, 175 struct lrd * lrd, struct tlock * tlck); 176 177 static int lmNextPage(struct jfs_log * log); 178 static int lmLogFileSystem(struct jfs_log * log, struct jfs_sb_info *sbi, 179 int activate); 180 181 static int open_inline_log(struct super_block *sb); 182 static int open_dummy_log(struct super_block *sb); 183 static int lbmLogInit(struct jfs_log * log); 184 static void lbmLogShutdown(struct jfs_log * log); 185 static struct lbuf *lbmAllocate(struct jfs_log * log, int); 186 static void lbmFree(struct lbuf * bp); 187 static void lbmfree(struct lbuf * bp); 188 static int lbmRead(struct jfs_log * log, int pn, struct lbuf ** bpp); 189 static void lbmWrite(struct jfs_log * log, struct lbuf * bp, int flag, int cant_block); 190 static void lbmDirectWrite(struct jfs_log * log, struct lbuf * bp, int flag); 191 static int lbmIOWait(struct lbuf * bp, int flag); 192 static bio_end_io_t lbmIODone; 193 static void lbmStartIO(struct lbuf * bp); 194 static void lmGCwrite(struct jfs_log * log, int cant_block); 195 static int lmLogSync(struct jfs_log * log, int hard_sync); 196 197 198 199 /* 200 * statistics 201 */ 202 #ifdef CONFIG_JFS_STATISTICS 203 static struct lmStat { 204 uint commit; /* # of commit */ 205 uint pagedone; /* # of page written */ 206 uint submitted; /* # of pages submitted */ 207 uint full_page; /* # of full pages submitted */ 208 uint partial_page; /* # of partial pages submitted */ 209 } lmStat; 210 #endif 211 212 static void write_special_inodes(struct jfs_log *log, 213 int (*writer)(struct address_space *)) 214 { 215 struct jfs_sb_info *sbi; 216 217 list_for_each_entry(sbi, &log->sb_list, log_list) { 218 writer(sbi->ipbmap->i_mapping); 219 writer(sbi->ipimap->i_mapping); 220 writer(sbi->direct_inode->i_mapping); 221 } 222 } 223 224 /* 225 * NAME: lmLog() 226 * 227 * FUNCTION: write a log record; 228 * 229 * PARAMETER: 230 * 231 * RETURN: lsn - offset to the next log record to write (end-of-log); 232 * -1 - error; 233 * 234 * note: todo: log error handler 235 */ 236 int lmLog(struct jfs_log * log, struct tblock * tblk, struct lrd * lrd, 237 struct tlock * tlck) 238 { 239 int lsn; 240 int diffp, difft; 241 struct metapage *mp = NULL; 242 unsigned long flags; 243 244 jfs_info("lmLog: log:0x%p tblk:0x%p, lrd:0x%p tlck:0x%p", 245 log, tblk, lrd, tlck); 246 247 LOG_LOCK(log); 248 249 /* log by (out-of-transaction) JFS ? */ 250 if (tblk == NULL) 251 goto writeRecord; 252 253 /* log from page ? */ 254 if (tlck == NULL || 255 tlck->type & tlckBTROOT || (mp = tlck->mp) == NULL) 256 goto writeRecord; 257 258 /* 259 * initialize/update page/transaction recovery lsn 260 */ 261 lsn = log->lsn; 262 263 LOGSYNC_LOCK(log, flags); 264 265 /* 266 * initialize page lsn if first log write of the page 267 */ 268 if (mp->lsn == 0) { 269 mp->log = log; 270 mp->lsn = lsn; 271 log->count++; 272 273 /* insert page at tail of logsynclist */ 274 list_add_tail(&mp->synclist, &log->synclist); 275 } 276 277 /* 278 * initialize/update lsn of tblock of the page 279 * 280 * transaction inherits oldest lsn of pages associated 281 * with allocation/deallocation of resources (their 282 * log records are used to reconstruct allocation map 283 * at recovery time: inode for inode allocation map, 284 * B+-tree index of extent descriptors for block 285 * allocation map); 286 * allocation map pages inherit transaction lsn at 287 * commit time to allow forwarding log syncpt past log 288 * records associated with allocation/deallocation of 289 * resources only after persistent map of these map pages 290 * have been updated and propagated to home. 291 */ 292 /* 293 * initialize transaction lsn: 294 */ 295 if (tblk->lsn == 0) { 296 /* inherit lsn of its first page logged */ 297 tblk->lsn = mp->lsn; 298 log->count++; 299 300 /* insert tblock after the page on logsynclist */ 301 list_add(&tblk->synclist, &mp->synclist); 302 } 303 /* 304 * update transaction lsn: 305 */ 306 else { 307 /* inherit oldest/smallest lsn of page */ 308 logdiff(diffp, mp->lsn, log); 309 logdiff(difft, tblk->lsn, log); 310 if (diffp < difft) { 311 /* update tblock lsn with page lsn */ 312 tblk->lsn = mp->lsn; 313 314 /* move tblock after page on logsynclist */ 315 list_move(&tblk->synclist, &mp->synclist); 316 } 317 } 318 319 LOGSYNC_UNLOCK(log, flags); 320 321 /* 322 * write the log record 323 */ 324 writeRecord: 325 lsn = lmWriteRecord(log, tblk, lrd, tlck); 326 327 /* 328 * forward log syncpt if log reached next syncpt trigger 329 */ 330 logdiff(diffp, lsn, log); 331 if (diffp >= log->nextsync) 332 lsn = lmLogSync(log, 0); 333 334 /* update end-of-log lsn */ 335 log->lsn = lsn; 336 337 LOG_UNLOCK(log); 338 339 /* return end-of-log address */ 340 return lsn; 341 } 342 343 /* 344 * NAME: lmWriteRecord() 345 * 346 * FUNCTION: move the log record to current log page 347 * 348 * PARAMETER: cd - commit descriptor 349 * 350 * RETURN: end-of-log address 351 * 352 * serialization: LOG_LOCK() held on entry/exit 353 */ 354 static int 355 lmWriteRecord(struct jfs_log * log, struct tblock * tblk, struct lrd * lrd, 356 struct tlock * tlck) 357 { 358 int lsn = 0; /* end-of-log address */ 359 struct lbuf *bp; /* dst log page buffer */ 360 struct logpage *lp; /* dst log page */ 361 caddr_t dst; /* destination address in log page */ 362 int dstoffset; /* end-of-log offset in log page */ 363 int freespace; /* free space in log page */ 364 caddr_t p; /* src meta-data page */ 365 caddr_t src; 366 int srclen; 367 int nbytes; /* number of bytes to move */ 368 int i; 369 int len; 370 struct linelock *linelock; 371 struct lv *lv; 372 struct lvd *lvd; 373 int l2linesize; 374 375 len = 0; 376 377 /* retrieve destination log page to write */ 378 bp = (struct lbuf *) log->bp; 379 lp = (struct logpage *) bp->l_ldata; 380 dstoffset = log->eor; 381 382 /* any log data to write ? */ 383 if (tlck == NULL) 384 goto moveLrd; 385 386 /* 387 * move log record data 388 */ 389 /* retrieve source meta-data page to log */ 390 if (tlck->flag & tlckPAGELOCK) { 391 p = (caddr_t) (tlck->mp->data); 392 linelock = (struct linelock *) & tlck->lock; 393 } 394 /* retrieve source in-memory inode to log */ 395 else if (tlck->flag & tlckINODELOCK) { 396 if (tlck->type & tlckDTREE) 397 p = (caddr_t) &JFS_IP(tlck->ip)->i_dtroot; 398 else 399 p = (caddr_t) &JFS_IP(tlck->ip)->i_xtroot; 400 linelock = (struct linelock *) & tlck->lock; 401 } 402 #ifdef _JFS_WIP 403 else if (tlck->flag & tlckINLINELOCK) { 404 405 inlinelock = (struct inlinelock *) & tlck; 406 p = (caddr_t) & inlinelock->pxd; 407 linelock = (struct linelock *) & tlck; 408 } 409 #endif /* _JFS_WIP */ 410 else { 411 jfs_err("lmWriteRecord: UFO tlck:0x%p", tlck); 412 return 0; /* Probably should trap */ 413 } 414 l2linesize = linelock->l2linesize; 415 416 moveData: 417 ASSERT(linelock->index <= linelock->maxcnt); 418 419 lv = linelock->lv; 420 for (i = 0; i < linelock->index; i++, lv++) { 421 if (lv->length == 0) 422 continue; 423 424 /* is page full ? */ 425 if (dstoffset >= LOGPSIZE - LOGPTLRSIZE) { 426 /* page become full: move on to next page */ 427 lmNextPage(log); 428 429 bp = log->bp; 430 lp = (struct logpage *) bp->l_ldata; 431 dstoffset = LOGPHDRSIZE; 432 } 433 434 /* 435 * move log vector data 436 */ 437 src = (u8 *) p + (lv->offset << l2linesize); 438 srclen = lv->length << l2linesize; 439 len += srclen; 440 while (srclen > 0) { 441 freespace = (LOGPSIZE - LOGPTLRSIZE) - dstoffset; 442 nbytes = min(freespace, srclen); 443 dst = (caddr_t) lp + dstoffset; 444 memcpy(dst, src, nbytes); 445 dstoffset += nbytes; 446 447 /* is page not full ? */ 448 if (dstoffset < LOGPSIZE - LOGPTLRSIZE) 449 break; 450 451 /* page become full: move on to next page */ 452 lmNextPage(log); 453 454 bp = (struct lbuf *) log->bp; 455 lp = (struct logpage *) bp->l_ldata; 456 dstoffset = LOGPHDRSIZE; 457 458 srclen -= nbytes; 459 src += nbytes; 460 } 461 462 /* 463 * move log vector descriptor 464 */ 465 len += 4; 466 lvd = (struct lvd *) ((caddr_t) lp + dstoffset); 467 lvd->offset = cpu_to_le16(lv->offset); 468 lvd->length = cpu_to_le16(lv->length); 469 dstoffset += 4; 470 jfs_info("lmWriteRecord: lv offset:%d length:%d", 471 lv->offset, lv->length); 472 } 473 474 if ((i = linelock->next)) { 475 linelock = (struct linelock *) lid_to_tlock(i); 476 goto moveData; 477 } 478 479 /* 480 * move log record descriptor 481 */ 482 moveLrd: 483 lrd->length = cpu_to_le16(len); 484 485 src = (caddr_t) lrd; 486 srclen = LOGRDSIZE; 487 488 while (srclen > 0) { 489 freespace = (LOGPSIZE - LOGPTLRSIZE) - dstoffset; 490 nbytes = min(freespace, srclen); 491 dst = (caddr_t) lp + dstoffset; 492 memcpy(dst, src, nbytes); 493 494 dstoffset += nbytes; 495 srclen -= nbytes; 496 497 /* are there more to move than freespace of page ? */ 498 if (srclen) 499 goto pageFull; 500 501 /* 502 * end of log record descriptor 503 */ 504 505 /* update last log record eor */ 506 log->eor = dstoffset; 507 bp->l_eor = dstoffset; 508 lsn = (log->page << L2LOGPSIZE) + dstoffset; 509 510 if (lrd->type & cpu_to_le16(LOG_COMMIT)) { 511 tblk->clsn = lsn; 512 jfs_info("wr: tclsn:0x%x, beor:0x%x", tblk->clsn, 513 bp->l_eor); 514 515 INCREMENT(lmStat.commit); /* # of commit */ 516 517 /* 518 * enqueue tblock for group commit: 519 * 520 * enqueue tblock of non-trivial/synchronous COMMIT 521 * at tail of group commit queue 522 * (trivial/asynchronous COMMITs are ignored by 523 * group commit.) 524 */ 525 LOGGC_LOCK(log); 526 527 /* init tblock gc state */ 528 tblk->flag = tblkGC_QUEUE; 529 tblk->bp = log->bp; 530 tblk->pn = log->page; 531 tblk->eor = log->eor; 532 533 /* enqueue transaction to commit queue */ 534 list_add_tail(&tblk->cqueue, &log->cqueue); 535 536 LOGGC_UNLOCK(log); 537 } 538 539 jfs_info("lmWriteRecord: lrd:0x%04x bp:0x%p pn:%d eor:0x%x", 540 le16_to_cpu(lrd->type), log->bp, log->page, dstoffset); 541 542 /* page not full ? */ 543 if (dstoffset < LOGPSIZE - LOGPTLRSIZE) 544 return lsn; 545 546 pageFull: 547 /* page become full: move on to next page */ 548 lmNextPage(log); 549 550 bp = (struct lbuf *) log->bp; 551 lp = (struct logpage *) bp->l_ldata; 552 dstoffset = LOGPHDRSIZE; 553 src += nbytes; 554 } 555 556 return lsn; 557 } 558 559 560 /* 561 * NAME: lmNextPage() 562 * 563 * FUNCTION: write current page and allocate next page. 564 * 565 * PARAMETER: log 566 * 567 * RETURN: 0 568 * 569 * serialization: LOG_LOCK() held on entry/exit 570 */ 571 static int lmNextPage(struct jfs_log * log) 572 { 573 struct logpage *lp; 574 int lspn; /* log sequence page number */ 575 int pn; /* current page number */ 576 struct lbuf *bp; 577 struct lbuf *nextbp; 578 struct tblock *tblk; 579 580 /* get current log page number and log sequence page number */ 581 pn = log->page; 582 bp = log->bp; 583 lp = (struct logpage *) bp->l_ldata; 584 lspn = le32_to_cpu(lp->h.page); 585 586 LOGGC_LOCK(log); 587 588 /* 589 * write or queue the full page at the tail of write queue 590 */ 591 /* get the tail tblk on commit queue */ 592 if (list_empty(&log->cqueue)) 593 tblk = NULL; 594 else 595 tblk = list_entry(log->cqueue.prev, struct tblock, cqueue); 596 597 /* every tblk who has COMMIT record on the current page, 598 * and has not been committed, must be on commit queue 599 * since tblk is queued at commit queueu at the time 600 * of writing its COMMIT record on the page before 601 * page becomes full (even though the tblk thread 602 * who wrote COMMIT record may have been suspended 603 * currently); 604 */ 605 606 /* is page bound with outstanding tail tblk ? */ 607 if (tblk && tblk->pn == pn) { 608 /* mark tblk for end-of-page */ 609 tblk->flag |= tblkGC_EOP; 610 611 if (log->cflag & logGC_PAGEOUT) { 612 /* if page is not already on write queue, 613 * just enqueue (no lbmWRITE to prevent redrive) 614 * buffer to wqueue to ensure correct serial order 615 * of the pages since log pages will be added 616 * continuously 617 */ 618 if (bp->l_wqnext == NULL) 619 lbmWrite(log, bp, 0, 0); 620 } else { 621 /* 622 * No current GC leader, initiate group commit 623 */ 624 log->cflag |= logGC_PAGEOUT; 625 lmGCwrite(log, 0); 626 } 627 } 628 /* page is not bound with outstanding tblk: 629 * init write or mark it to be redriven (lbmWRITE) 630 */ 631 else { 632 /* finalize the page */ 633 bp->l_ceor = bp->l_eor; 634 lp->h.eor = lp->t.eor = cpu_to_le16(bp->l_ceor); 635 lbmWrite(log, bp, lbmWRITE | lbmRELEASE | lbmFREE, 0); 636 } 637 LOGGC_UNLOCK(log); 638 639 /* 640 * allocate/initialize next page 641 */ 642 /* if log wraps, the first data page of log is 2 643 * (0 never used, 1 is superblock). 644 */ 645 log->page = (pn == log->size - 1) ? 2 : pn + 1; 646 log->eor = LOGPHDRSIZE; /* ? valid page empty/full at logRedo() */ 647 648 /* allocate/initialize next log page buffer */ 649 nextbp = lbmAllocate(log, log->page); 650 nextbp->l_eor = log->eor; 651 log->bp = nextbp; 652 653 /* initialize next log page */ 654 lp = (struct logpage *) nextbp->l_ldata; 655 lp->h.page = lp->t.page = cpu_to_le32(lspn + 1); 656 lp->h.eor = lp->t.eor = cpu_to_le16(LOGPHDRSIZE); 657 658 return 0; 659 } 660 661 662 /* 663 * NAME: lmGroupCommit() 664 * 665 * FUNCTION: group commit 666 * initiate pageout of the pages with COMMIT in the order of 667 * page number - redrive pageout of the page at the head of 668 * pageout queue until full page has been written. 669 * 670 * RETURN: 671 * 672 * NOTE: 673 * LOGGC_LOCK serializes log group commit queue, and 674 * transaction blocks on the commit queue. 675 * N.B. LOG_LOCK is NOT held during lmGroupCommit(). 676 */ 677 int lmGroupCommit(struct jfs_log * log, struct tblock * tblk) 678 { 679 int rc = 0; 680 681 LOGGC_LOCK(log); 682 683 /* group committed already ? */ 684 if (tblk->flag & tblkGC_COMMITTED) { 685 if (tblk->flag & tblkGC_ERROR) 686 rc = -EIO; 687 688 LOGGC_UNLOCK(log); 689 return rc; 690 } 691 jfs_info("lmGroup Commit: tblk = 0x%p, gcrtc = %d", tblk, log->gcrtc); 692 693 if (tblk->xflag & COMMIT_LAZY) 694 tblk->flag |= tblkGC_LAZY; 695 696 if ((!(log->cflag & logGC_PAGEOUT)) && (!list_empty(&log->cqueue)) && 697 (!(tblk->xflag & COMMIT_LAZY) || test_bit(log_FLUSH, &log->flag) 698 || jfs_tlocks_low)) { 699 /* 700 * No pageout in progress 701 * 702 * start group commit as its group leader. 703 */ 704 log->cflag |= logGC_PAGEOUT; 705 706 lmGCwrite(log, 0); 707 } 708 709 if (tblk->xflag & COMMIT_LAZY) { 710 /* 711 * Lazy transactions can leave now 712 */ 713 LOGGC_UNLOCK(log); 714 return 0; 715 } 716 717 /* lmGCwrite gives up LOGGC_LOCK, check again */ 718 719 if (tblk->flag & tblkGC_COMMITTED) { 720 if (tblk->flag & tblkGC_ERROR) 721 rc = -EIO; 722 723 LOGGC_UNLOCK(log); 724 return rc; 725 } 726 727 /* upcount transaction waiting for completion 728 */ 729 log->gcrtc++; 730 tblk->flag |= tblkGC_READY; 731 732 __SLEEP_COND(tblk->gcwait, (tblk->flag & tblkGC_COMMITTED), 733 LOGGC_LOCK(log), LOGGC_UNLOCK(log)); 734 735 /* removed from commit queue */ 736 if (tblk->flag & tblkGC_ERROR) 737 rc = -EIO; 738 739 LOGGC_UNLOCK(log); 740 return rc; 741 } 742 743 /* 744 * NAME: lmGCwrite() 745 * 746 * FUNCTION: group commit write 747 * initiate write of log page, building a group of all transactions 748 * with commit records on that page. 749 * 750 * RETURN: None 751 * 752 * NOTE: 753 * LOGGC_LOCK must be held by caller. 754 * N.B. LOG_LOCK is NOT held during lmGroupCommit(). 755 */ 756 static void lmGCwrite(struct jfs_log * log, int cant_write) 757 { 758 struct lbuf *bp; 759 struct logpage *lp; 760 int gcpn; /* group commit page number */ 761 struct tblock *tblk; 762 struct tblock *xtblk = NULL; 763 764 /* 765 * build the commit group of a log page 766 * 767 * scan commit queue and make a commit group of all 768 * transactions with COMMIT records on the same log page. 769 */ 770 /* get the head tblk on the commit queue */ 771 gcpn = list_entry(log->cqueue.next, struct tblock, cqueue)->pn; 772 773 list_for_each_entry(tblk, &log->cqueue, cqueue) { 774 if (tblk->pn != gcpn) 775 break; 776 777 xtblk = tblk; 778 779 /* state transition: (QUEUE, READY) -> COMMIT */ 780 tblk->flag |= tblkGC_COMMIT; 781 } 782 tblk = xtblk; /* last tblk of the page */ 783 784 /* 785 * pageout to commit transactions on the log page. 786 */ 787 bp = (struct lbuf *) tblk->bp; 788 lp = (struct logpage *) bp->l_ldata; 789 /* is page already full ? */ 790 if (tblk->flag & tblkGC_EOP) { 791 /* mark page to free at end of group commit of the page */ 792 tblk->flag &= ~tblkGC_EOP; 793 tblk->flag |= tblkGC_FREE; 794 bp->l_ceor = bp->l_eor; 795 lp->h.eor = lp->t.eor = cpu_to_le16(bp->l_ceor); 796 lbmWrite(log, bp, lbmWRITE | lbmRELEASE | lbmGC, 797 cant_write); 798 INCREMENT(lmStat.full_page); 799 } 800 /* page is not yet full */ 801 else { 802 bp->l_ceor = tblk->eor; /* ? bp->l_ceor = bp->l_eor; */ 803 lp->h.eor = lp->t.eor = cpu_to_le16(bp->l_ceor); 804 lbmWrite(log, bp, lbmWRITE | lbmGC, cant_write); 805 INCREMENT(lmStat.partial_page); 806 } 807 } 808 809 /* 810 * NAME: lmPostGC() 811 * 812 * FUNCTION: group commit post-processing 813 * Processes transactions after their commit records have been written 814 * to disk, redriving log I/O if necessary. 815 * 816 * RETURN: None 817 * 818 * NOTE: 819 * This routine is called a interrupt time by lbmIODone 820 */ 821 static void lmPostGC(struct lbuf * bp) 822 { 823 unsigned long flags; 824 struct jfs_log *log = bp->l_log; 825 struct logpage *lp; 826 struct tblock *tblk, *temp; 827 828 //LOGGC_LOCK(log); 829 spin_lock_irqsave(&log->gclock, flags); 830 /* 831 * current pageout of group commit completed. 832 * 833 * remove/wakeup transactions from commit queue who were 834 * group committed with the current log page 835 */ 836 list_for_each_entry_safe(tblk, temp, &log->cqueue, cqueue) { 837 if (!(tblk->flag & tblkGC_COMMIT)) 838 break; 839 /* if transaction was marked GC_COMMIT then 840 * it has been shipped in the current pageout 841 * and made it to disk - it is committed. 842 */ 843 844 if (bp->l_flag & lbmERROR) 845 tblk->flag |= tblkGC_ERROR; 846 847 /* remove it from the commit queue */ 848 list_del(&tblk->cqueue); 849 tblk->flag &= ~tblkGC_QUEUE; 850 851 if (tblk == log->flush_tblk) { 852 /* we can stop flushing the log now */ 853 clear_bit(log_FLUSH, &log->flag); 854 log->flush_tblk = NULL; 855 } 856 857 jfs_info("lmPostGC: tblk = 0x%p, flag = 0x%x", tblk, 858 tblk->flag); 859 860 if (!(tblk->xflag & COMMIT_FORCE)) 861 /* 862 * Hand tblk over to lazy commit thread 863 */ 864 txLazyUnlock(tblk); 865 else { 866 /* state transition: COMMIT -> COMMITTED */ 867 tblk->flag |= tblkGC_COMMITTED; 868 869 if (tblk->flag & tblkGC_READY) 870 log->gcrtc--; 871 872 LOGGC_WAKEUP(tblk); 873 } 874 875 /* was page full before pageout ? 876 * (and this is the last tblk bound with the page) 877 */ 878 if (tblk->flag & tblkGC_FREE) 879 lbmFree(bp); 880 /* did page become full after pageout ? 881 * (and this is the last tblk bound with the page) 882 */ 883 else if (tblk->flag & tblkGC_EOP) { 884 /* finalize the page */ 885 lp = (struct logpage *) bp->l_ldata; 886 bp->l_ceor = bp->l_eor; 887 lp->h.eor = lp->t.eor = cpu_to_le16(bp->l_eor); 888 jfs_info("lmPostGC: calling lbmWrite"); 889 lbmWrite(log, bp, lbmWRITE | lbmRELEASE | lbmFREE, 890 1); 891 } 892 893 } 894 895 /* are there any transactions who have entered lnGroupCommit() 896 * (whose COMMITs are after that of the last log page written. 897 * They are waiting for new group commit (above at (SLEEP 1)) 898 * or lazy transactions are on a full (queued) log page, 899 * select the latest ready transaction as new group leader and 900 * wake her up to lead her group. 901 */ 902 if ((!list_empty(&log->cqueue)) && 903 ((log->gcrtc > 0) || (tblk->bp->l_wqnext != NULL) || 904 test_bit(log_FLUSH, &log->flag) || jfs_tlocks_low)) 905 /* 906 * Call lmGCwrite with new group leader 907 */ 908 lmGCwrite(log, 1); 909 910 /* no transaction are ready yet (transactions are only just 911 * queued (GC_QUEUE) and not entered for group commit yet). 912 * the first transaction entering group commit 913 * will elect herself as new group leader. 914 */ 915 else 916 log->cflag &= ~logGC_PAGEOUT; 917 918 //LOGGC_UNLOCK(log); 919 spin_unlock_irqrestore(&log->gclock, flags); 920 return; 921 } 922 923 /* 924 * NAME: lmLogSync() 925 * 926 * FUNCTION: write log SYNCPT record for specified log 927 * if new sync address is available 928 * (normally the case if sync() is executed by back-ground 929 * process). 930 * calculate new value of i_nextsync which determines when 931 * this code is called again. 932 * 933 * PARAMETERS: log - log structure 934 * hard_sync - 1 to force all metadata to be written 935 * 936 * RETURN: 0 937 * 938 * serialization: LOG_LOCK() held on entry/exit 939 */ 940 static int lmLogSync(struct jfs_log * log, int hard_sync) 941 { 942 int logsize; 943 int written; /* written since last syncpt */ 944 int free; /* free space left available */ 945 int delta; /* additional delta to write normally */ 946 int more; /* additional write granted */ 947 struct lrd lrd; 948 int lsn; 949 struct logsyncblk *lp; 950 unsigned long flags; 951 952 /* push dirty metapages out to disk */ 953 if (hard_sync) 954 write_special_inodes(log, filemap_fdatawrite); 955 else 956 write_special_inodes(log, filemap_flush); 957 958 /* 959 * forward syncpt 960 */ 961 /* if last sync is same as last syncpt, 962 * invoke sync point forward processing to update sync. 963 */ 964 965 if (log->sync == log->syncpt) { 966 LOGSYNC_LOCK(log, flags); 967 if (list_empty(&log->synclist)) 968 log->sync = log->lsn; 969 else { 970 lp = list_entry(log->synclist.next, 971 struct logsyncblk, synclist); 972 log->sync = lp->lsn; 973 } 974 LOGSYNC_UNLOCK(log, flags); 975 976 } 977 978 /* if sync is different from last syncpt, 979 * write a SYNCPT record with syncpt = sync. 980 * reset syncpt = sync 981 */ 982 if (log->sync != log->syncpt) { 983 lrd.logtid = 0; 984 lrd.backchain = 0; 985 lrd.type = cpu_to_le16(LOG_SYNCPT); 986 lrd.length = 0; 987 lrd.log.syncpt.sync = cpu_to_le32(log->sync); 988 lsn = lmWriteRecord(log, NULL, &lrd, NULL); 989 990 log->syncpt = log->sync; 991 } else 992 lsn = log->lsn; 993 994 /* 995 * setup next syncpt trigger (SWAG) 996 */ 997 logsize = log->logsize; 998 999 logdiff(written, lsn, log); 1000 free = logsize - written; 1001 delta = LOGSYNC_DELTA(logsize); 1002 more = min(free / 2, delta); 1003 if (more < 2 * LOGPSIZE) { 1004 jfs_warn("\n ... Log Wrap ... Log Wrap ... Log Wrap ...\n"); 1005 /* 1006 * log wrapping 1007 * 1008 * option 1 - panic ? No.! 1009 * option 2 - shutdown file systems 1010 * associated with log ? 1011 * option 3 - extend log ? 1012 */ 1013 /* 1014 * option 4 - second chance 1015 * 1016 * mark log wrapped, and continue. 1017 * when all active transactions are completed, 1018 * mark log vaild for recovery. 1019 * if crashed during invalid state, log state 1020 * implies invald log, forcing fsck(). 1021 */ 1022 /* mark log state log wrap in log superblock */ 1023 /* log->state = LOGWRAP; */ 1024 1025 /* reset sync point computation */ 1026 log->syncpt = log->sync = lsn; 1027 log->nextsync = delta; 1028 } else 1029 /* next syncpt trigger = written + more */ 1030 log->nextsync = written + more; 1031 1032 /* if number of bytes written from last sync point is more 1033 * than 1/4 of the log size, stop new transactions from 1034 * starting until all current transactions are completed 1035 * by setting syncbarrier flag. 1036 */ 1037 if (!test_bit(log_SYNCBARRIER, &log->flag) && 1038 (written > LOGSYNC_BARRIER(logsize)) && log->active) { 1039 set_bit(log_SYNCBARRIER, &log->flag); 1040 jfs_info("log barrier on: lsn=0x%x syncpt=0x%x", lsn, 1041 log->syncpt); 1042 /* 1043 * We may have to initiate group commit 1044 */ 1045 jfs_flush_journal(log, 0); 1046 } 1047 1048 return lsn; 1049 } 1050 1051 /* 1052 * NAME: jfs_syncpt 1053 * 1054 * FUNCTION: write log SYNCPT record for specified log 1055 * 1056 * PARAMETERS: log - log structure 1057 * hard_sync - set to 1 to force metadata to be written 1058 */ 1059 void jfs_syncpt(struct jfs_log *log, int hard_sync) 1060 { LOG_LOCK(log); 1061 lmLogSync(log, hard_sync); 1062 LOG_UNLOCK(log); 1063 } 1064 1065 /* 1066 * NAME: lmLogOpen() 1067 * 1068 * FUNCTION: open the log on first open; 1069 * insert filesystem in the active list of the log. 1070 * 1071 * PARAMETER: ipmnt - file system mount inode 1072 * iplog - log inode (out) 1073 * 1074 * RETURN: 1075 * 1076 * serialization: 1077 */ 1078 int lmLogOpen(struct super_block *sb) 1079 { 1080 int rc; 1081 struct block_device *bdev; 1082 struct jfs_log *log; 1083 struct jfs_sb_info *sbi = JFS_SBI(sb); 1084 1085 if (sbi->flag & JFS_NOINTEGRITY) 1086 return open_dummy_log(sb); 1087 1088 if (sbi->mntflag & JFS_INLINELOG) 1089 return open_inline_log(sb); 1090 1091 mutex_lock(&jfs_log_mutex); 1092 list_for_each_entry(log, &jfs_external_logs, journal_list) { 1093 if (log->bdev->bd_dev == sbi->logdev) { 1094 if (memcmp(log->uuid, sbi->loguuid, 1095 sizeof(log->uuid))) { 1096 jfs_warn("wrong uuid on JFS journal\n"); 1097 mutex_unlock(&jfs_log_mutex); 1098 return -EINVAL; 1099 } 1100 /* 1101 * add file system to log active file system list 1102 */ 1103 if ((rc = lmLogFileSystem(log, sbi, 1))) { 1104 mutex_unlock(&jfs_log_mutex); 1105 return rc; 1106 } 1107 goto journal_found; 1108 } 1109 } 1110 1111 if (!(log = kzalloc(sizeof(struct jfs_log), GFP_KERNEL))) { 1112 mutex_unlock(&jfs_log_mutex); 1113 return -ENOMEM; 1114 } 1115 INIT_LIST_HEAD(&log->sb_list); 1116 init_waitqueue_head(&log->syncwait); 1117 1118 /* 1119 * external log as separate logical volume 1120 * 1121 * file systems to log may have n-to-1 relationship; 1122 */ 1123 1124 bdev = open_by_devnum(sbi->logdev, FMODE_READ|FMODE_WRITE); 1125 if (IS_ERR(bdev)) { 1126 rc = -PTR_ERR(bdev); 1127 goto free; 1128 } 1129 1130 if ((rc = bd_claim(bdev, log))) { 1131 goto close; 1132 } 1133 1134 log->bdev = bdev; 1135 memcpy(log->uuid, sbi->loguuid, sizeof(log->uuid)); 1136 1137 /* 1138 * initialize log: 1139 */ 1140 if ((rc = lmLogInit(log))) 1141 goto unclaim; 1142 1143 list_add(&log->journal_list, &jfs_external_logs); 1144 1145 /* 1146 * add file system to log active file system list 1147 */ 1148 if ((rc = lmLogFileSystem(log, sbi, 1))) 1149 goto shutdown; 1150 1151 journal_found: 1152 LOG_LOCK(log); 1153 list_add(&sbi->log_list, &log->sb_list); 1154 sbi->log = log; 1155 LOG_UNLOCK(log); 1156 1157 mutex_unlock(&jfs_log_mutex); 1158 return 0; 1159 1160 /* 1161 * unwind on error 1162 */ 1163 shutdown: /* unwind lbmLogInit() */ 1164 list_del(&log->journal_list); 1165 lbmLogShutdown(log); 1166 1167 unclaim: 1168 bd_release(bdev); 1169 1170 close: /* close external log device */ 1171 blkdev_put(bdev, FMODE_READ|FMODE_WRITE); 1172 1173 free: /* free log descriptor */ 1174 mutex_unlock(&jfs_log_mutex); 1175 kfree(log); 1176 1177 jfs_warn("lmLogOpen: exit(%d)", rc); 1178 return rc; 1179 } 1180 1181 static int open_inline_log(struct super_block *sb) 1182 { 1183 struct jfs_log *log; 1184 int rc; 1185 1186 if (!(log = kzalloc(sizeof(struct jfs_log), GFP_KERNEL))) 1187 return -ENOMEM; 1188 INIT_LIST_HEAD(&log->sb_list); 1189 init_waitqueue_head(&log->syncwait); 1190 1191 set_bit(log_INLINELOG, &log->flag); 1192 log->bdev = sb->s_bdev; 1193 log->base = addressPXD(&JFS_SBI(sb)->logpxd); 1194 log->size = lengthPXD(&JFS_SBI(sb)->logpxd) >> 1195 (L2LOGPSIZE - sb->s_blocksize_bits); 1196 log->l2bsize = sb->s_blocksize_bits; 1197 ASSERT(L2LOGPSIZE >= sb->s_blocksize_bits); 1198 1199 /* 1200 * initialize log. 1201 */ 1202 if ((rc = lmLogInit(log))) { 1203 kfree(log); 1204 jfs_warn("lmLogOpen: exit(%d)", rc); 1205 return rc; 1206 } 1207 1208 list_add(&JFS_SBI(sb)->log_list, &log->sb_list); 1209 JFS_SBI(sb)->log = log; 1210 1211 return rc; 1212 } 1213 1214 static int open_dummy_log(struct super_block *sb) 1215 { 1216 int rc; 1217 1218 mutex_lock(&jfs_log_mutex); 1219 if (!dummy_log) { 1220 dummy_log = kzalloc(sizeof(struct jfs_log), GFP_KERNEL); 1221 if (!dummy_log) { 1222 mutex_unlock(&jfs_log_mutex); 1223 return -ENOMEM; 1224 } 1225 INIT_LIST_HEAD(&dummy_log->sb_list); 1226 init_waitqueue_head(&dummy_log->syncwait); 1227 dummy_log->no_integrity = 1; 1228 /* Make up some stuff */ 1229 dummy_log->base = 0; 1230 dummy_log->size = 1024; 1231 rc = lmLogInit(dummy_log); 1232 if (rc) { 1233 kfree(dummy_log); 1234 dummy_log = NULL; 1235 mutex_unlock(&jfs_log_mutex); 1236 return rc; 1237 } 1238 } 1239 1240 LOG_LOCK(dummy_log); 1241 list_add(&JFS_SBI(sb)->log_list, &dummy_log->sb_list); 1242 JFS_SBI(sb)->log = dummy_log; 1243 LOG_UNLOCK(dummy_log); 1244 mutex_unlock(&jfs_log_mutex); 1245 1246 return 0; 1247 } 1248 1249 /* 1250 * NAME: lmLogInit() 1251 * 1252 * FUNCTION: log initialization at first log open. 1253 * 1254 * logredo() (or logformat()) should have been run previously. 1255 * initialize the log from log superblock. 1256 * set the log state in the superblock to LOGMOUNT and 1257 * write SYNCPT log record. 1258 * 1259 * PARAMETER: log - log structure 1260 * 1261 * RETURN: 0 - if ok 1262 * -EINVAL - bad log magic number or superblock dirty 1263 * error returned from logwait() 1264 * 1265 * serialization: single first open thread 1266 */ 1267 int lmLogInit(struct jfs_log * log) 1268 { 1269 int rc = 0; 1270 struct lrd lrd; 1271 struct logsuper *logsuper; 1272 struct lbuf *bpsuper; 1273 struct lbuf *bp; 1274 struct logpage *lp; 1275 int lsn = 0; 1276 1277 jfs_info("lmLogInit: log:0x%p", log); 1278 1279 /* initialize the group commit serialization lock */ 1280 LOGGC_LOCK_INIT(log); 1281 1282 /* allocate/initialize the log write serialization lock */ 1283 LOG_LOCK_INIT(log); 1284 1285 LOGSYNC_LOCK_INIT(log); 1286 1287 INIT_LIST_HEAD(&log->synclist); 1288 1289 INIT_LIST_HEAD(&log->cqueue); 1290 log->flush_tblk = NULL; 1291 1292 log->count = 0; 1293 1294 /* 1295 * initialize log i/o 1296 */ 1297 if ((rc = lbmLogInit(log))) 1298 return rc; 1299 1300 if (!test_bit(log_INLINELOG, &log->flag)) 1301 log->l2bsize = L2LOGPSIZE; 1302 1303 /* check for disabled journaling to disk */ 1304 if (log->no_integrity) { 1305 /* 1306 * Journal pages will still be filled. When the time comes 1307 * to actually do the I/O, the write is not done, and the 1308 * endio routine is called directly. 1309 */ 1310 bp = lbmAllocate(log , 0); 1311 log->bp = bp; 1312 bp->l_pn = bp->l_eor = 0; 1313 } else { 1314 /* 1315 * validate log superblock 1316 */ 1317 if ((rc = lbmRead(log, 1, &bpsuper))) 1318 goto errout10; 1319 1320 logsuper = (struct logsuper *) bpsuper->l_ldata; 1321 1322 if (logsuper->magic != cpu_to_le32(LOGMAGIC)) { 1323 jfs_warn("*** Log Format Error ! ***"); 1324 rc = -EINVAL; 1325 goto errout20; 1326 } 1327 1328 /* logredo() should have been run successfully. */ 1329 if (logsuper->state != cpu_to_le32(LOGREDONE)) { 1330 jfs_warn("*** Log Is Dirty ! ***"); 1331 rc = -EINVAL; 1332 goto errout20; 1333 } 1334 1335 /* initialize log from log superblock */ 1336 if (test_bit(log_INLINELOG,&log->flag)) { 1337 if (log->size != le32_to_cpu(logsuper->size)) { 1338 rc = -EINVAL; 1339 goto errout20; 1340 } 1341 jfs_info("lmLogInit: inline log:0x%p base:0x%Lx " 1342 "size:0x%x", log, 1343 (unsigned long long) log->base, log->size); 1344 } else { 1345 if (memcmp(logsuper->uuid, log->uuid, 16)) { 1346 jfs_warn("wrong uuid on JFS log device"); 1347 goto errout20; 1348 } 1349 log->size = le32_to_cpu(logsuper->size); 1350 log->l2bsize = le32_to_cpu(logsuper->l2bsize); 1351 jfs_info("lmLogInit: external log:0x%p base:0x%Lx " 1352 "size:0x%x", log, 1353 (unsigned long long) log->base, log->size); 1354 } 1355 1356 log->page = le32_to_cpu(logsuper->end) / LOGPSIZE; 1357 log->eor = le32_to_cpu(logsuper->end) - (LOGPSIZE * log->page); 1358 1359 /* 1360 * initialize for log append write mode 1361 */ 1362 /* establish current/end-of-log page/buffer */ 1363 if ((rc = lbmRead(log, log->page, &bp))) 1364 goto errout20; 1365 1366 lp = (struct logpage *) bp->l_ldata; 1367 1368 jfs_info("lmLogInit: lsn:0x%x page:%d eor:%d:%d", 1369 le32_to_cpu(logsuper->end), log->page, log->eor, 1370 le16_to_cpu(lp->h.eor)); 1371 1372 log->bp = bp; 1373 bp->l_pn = log->page; 1374 bp->l_eor = log->eor; 1375 1376 /* if current page is full, move on to next page */ 1377 if (log->eor >= LOGPSIZE - LOGPTLRSIZE) 1378 lmNextPage(log); 1379 1380 /* 1381 * initialize log syncpoint 1382 */ 1383 /* 1384 * write the first SYNCPT record with syncpoint = 0 1385 * (i.e., log redo up to HERE !); 1386 * remove current page from lbm write queue at end of pageout 1387 * (to write log superblock update), but do not release to 1388 * freelist; 1389 */ 1390 lrd.logtid = 0; 1391 lrd.backchain = 0; 1392 lrd.type = cpu_to_le16(LOG_SYNCPT); 1393 lrd.length = 0; 1394 lrd.log.syncpt.sync = 0; 1395 lsn = lmWriteRecord(log, NULL, &lrd, NULL); 1396 bp = log->bp; 1397 bp->l_ceor = bp->l_eor; 1398 lp = (struct logpage *) bp->l_ldata; 1399 lp->h.eor = lp->t.eor = cpu_to_le16(bp->l_eor); 1400 lbmWrite(log, bp, lbmWRITE | lbmSYNC, 0); 1401 if ((rc = lbmIOWait(bp, 0))) 1402 goto errout30; 1403 1404 /* 1405 * update/write superblock 1406 */ 1407 logsuper->state = cpu_to_le32(LOGMOUNT); 1408 log->serial = le32_to_cpu(logsuper->serial) + 1; 1409 logsuper->serial = cpu_to_le32(log->serial); 1410 lbmDirectWrite(log, bpsuper, lbmWRITE | lbmRELEASE | lbmSYNC); 1411 if ((rc = lbmIOWait(bpsuper, lbmFREE))) 1412 goto errout30; 1413 } 1414 1415 /* initialize logsync parameters */ 1416 log->logsize = (log->size - 2) << L2LOGPSIZE; 1417 log->lsn = lsn; 1418 log->syncpt = lsn; 1419 log->sync = log->syncpt; 1420 log->nextsync = LOGSYNC_DELTA(log->logsize); 1421 1422 jfs_info("lmLogInit: lsn:0x%x syncpt:0x%x sync:0x%x", 1423 log->lsn, log->syncpt, log->sync); 1424 1425 /* 1426 * initialize for lazy/group commit 1427 */ 1428 log->clsn = lsn; 1429 1430 return 0; 1431 1432 /* 1433 * unwind on error 1434 */ 1435 errout30: /* release log page */ 1436 log->wqueue = NULL; 1437 bp->l_wqnext = NULL; 1438 lbmFree(bp); 1439 1440 errout20: /* release log superblock */ 1441 lbmFree(bpsuper); 1442 1443 errout10: /* unwind lbmLogInit() */ 1444 lbmLogShutdown(log); 1445 1446 jfs_warn("lmLogInit: exit(%d)", rc); 1447 return rc; 1448 } 1449 1450 1451 /* 1452 * NAME: lmLogClose() 1453 * 1454 * FUNCTION: remove file system <ipmnt> from active list of log <iplog> 1455 * and close it on last close. 1456 * 1457 * PARAMETER: sb - superblock 1458 * 1459 * RETURN: errors from subroutines 1460 * 1461 * serialization: 1462 */ 1463 int lmLogClose(struct super_block *sb) 1464 { 1465 struct jfs_sb_info *sbi = JFS_SBI(sb); 1466 struct jfs_log *log = sbi->log; 1467 struct block_device *bdev; 1468 int rc = 0; 1469 1470 jfs_info("lmLogClose: log:0x%p", log); 1471 1472 mutex_lock(&jfs_log_mutex); 1473 LOG_LOCK(log); 1474 list_del(&sbi->log_list); 1475 LOG_UNLOCK(log); 1476 sbi->log = NULL; 1477 1478 /* 1479 * We need to make sure all of the "written" metapages 1480 * actually make it to disk 1481 */ 1482 sync_blockdev(sb->s_bdev); 1483 1484 if (test_bit(log_INLINELOG, &log->flag)) { 1485 /* 1486 * in-line log in host file system 1487 */ 1488 rc = lmLogShutdown(log); 1489 kfree(log); 1490 goto out; 1491 } 1492 1493 if (!log->no_integrity) 1494 lmLogFileSystem(log, sbi, 0); 1495 1496 if (!list_empty(&log->sb_list)) 1497 goto out; 1498 1499 /* 1500 * TODO: ensure that the dummy_log is in a state to allow 1501 * lbmLogShutdown to deallocate all the buffers and call 1502 * kfree against dummy_log. For now, leave dummy_log & its 1503 * buffers in memory, and resuse if another no-integrity mount 1504 * is requested. 1505 */ 1506 if (log->no_integrity) 1507 goto out; 1508 1509 /* 1510 * external log as separate logical volume 1511 */ 1512 list_del(&log->journal_list); 1513 bdev = log->bdev; 1514 rc = lmLogShutdown(log); 1515 1516 bd_release(bdev); 1517 blkdev_put(bdev, FMODE_READ|FMODE_WRITE); 1518 1519 kfree(log); 1520 1521 out: 1522 mutex_unlock(&jfs_log_mutex); 1523 jfs_info("lmLogClose: exit(%d)", rc); 1524 return rc; 1525 } 1526 1527 1528 /* 1529 * NAME: jfs_flush_journal() 1530 * 1531 * FUNCTION: initiate write of any outstanding transactions to the journal 1532 * and optionally wait until they are all written to disk 1533 * 1534 * wait == 0 flush until latest txn is committed, don't wait 1535 * wait == 1 flush until latest txn is committed, wait 1536 * wait > 1 flush until all txn's are complete, wait 1537 */ 1538 void jfs_flush_journal(struct jfs_log *log, int wait) 1539 { 1540 int i; 1541 struct tblock *target = NULL; 1542 1543 /* jfs_write_inode may call us during read-only mount */ 1544 if (!log) 1545 return; 1546 1547 jfs_info("jfs_flush_journal: log:0x%p wait=%d", log, wait); 1548 1549 LOGGC_LOCK(log); 1550 1551 if (!list_empty(&log->cqueue)) { 1552 /* 1553 * This ensures that we will keep writing to the journal as long 1554 * as there are unwritten commit records 1555 */ 1556 target = list_entry(log->cqueue.prev, struct tblock, cqueue); 1557 1558 if (test_bit(log_FLUSH, &log->flag)) { 1559 /* 1560 * We're already flushing. 1561 * if flush_tblk is NULL, we are flushing everything, 1562 * so leave it that way. Otherwise, update it to the 1563 * latest transaction 1564 */ 1565 if (log->flush_tblk) 1566 log->flush_tblk = target; 1567 } else { 1568 /* Only flush until latest transaction is committed */ 1569 log->flush_tblk = target; 1570 set_bit(log_FLUSH, &log->flag); 1571 1572 /* 1573 * Initiate I/O on outstanding transactions 1574 */ 1575 if (!(log->cflag & logGC_PAGEOUT)) { 1576 log->cflag |= logGC_PAGEOUT; 1577 lmGCwrite(log, 0); 1578 } 1579 } 1580 } 1581 if ((wait > 1) || test_bit(log_SYNCBARRIER, &log->flag)) { 1582 /* Flush until all activity complete */ 1583 set_bit(log_FLUSH, &log->flag); 1584 log->flush_tblk = NULL; 1585 } 1586 1587 if (wait && target && !(target->flag & tblkGC_COMMITTED)) { 1588 DECLARE_WAITQUEUE(__wait, current); 1589 1590 add_wait_queue(&target->gcwait, &__wait); 1591 set_current_state(TASK_UNINTERRUPTIBLE); 1592 LOGGC_UNLOCK(log); 1593 schedule(); 1594 __set_current_state(TASK_RUNNING); 1595 LOGGC_LOCK(log); 1596 remove_wait_queue(&target->gcwait, &__wait); 1597 } 1598 LOGGC_UNLOCK(log); 1599 1600 if (wait < 2) 1601 return; 1602 1603 write_special_inodes(log, filemap_fdatawrite); 1604 1605 /* 1606 * If there was recent activity, we may need to wait 1607 * for the lazycommit thread to catch up 1608 */ 1609 if ((!list_empty(&log->cqueue)) || !list_empty(&log->synclist)) { 1610 for (i = 0; i < 200; i++) { /* Too much? */ 1611 msleep(250); 1612 write_special_inodes(log, filemap_fdatawrite); 1613 if (list_empty(&log->cqueue) && 1614 list_empty(&log->synclist)) 1615 break; 1616 } 1617 } 1618 assert(list_empty(&log->cqueue)); 1619 1620 #ifdef CONFIG_JFS_DEBUG 1621 if (!list_empty(&log->synclist)) { 1622 struct logsyncblk *lp; 1623 1624 printk(KERN_ERR "jfs_flush_journal: synclist not empty\n"); 1625 list_for_each_entry(lp, &log->synclist, synclist) { 1626 if (lp->xflag & COMMIT_PAGE) { 1627 struct metapage *mp = (struct metapage *)lp; 1628 print_hex_dump(KERN_ERR, "metapage: ", 1629 DUMP_PREFIX_ADDRESS, 16, 4, 1630 mp, sizeof(struct metapage), 0); 1631 print_hex_dump(KERN_ERR, "page: ", 1632 DUMP_PREFIX_ADDRESS, 16, 1633 sizeof(long), mp->page, 1634 sizeof(struct page), 0); 1635 } else 1636 print_hex_dump(KERN_ERR, "tblock:", 1637 DUMP_PREFIX_ADDRESS, 16, 4, 1638 lp, sizeof(struct tblock), 0); 1639 } 1640 } 1641 #else 1642 WARN_ON(!list_empty(&log->synclist)); 1643 #endif 1644 clear_bit(log_FLUSH, &log->flag); 1645 } 1646 1647 /* 1648 * NAME: lmLogShutdown() 1649 * 1650 * FUNCTION: log shutdown at last LogClose(). 1651 * 1652 * write log syncpt record. 1653 * update super block to set redone flag to 0. 1654 * 1655 * PARAMETER: log - log inode 1656 * 1657 * RETURN: 0 - success 1658 * 1659 * serialization: single last close thread 1660 */ 1661 int lmLogShutdown(struct jfs_log * log) 1662 { 1663 int rc; 1664 struct lrd lrd; 1665 int lsn; 1666 struct logsuper *logsuper; 1667 struct lbuf *bpsuper; 1668 struct lbuf *bp; 1669 struct logpage *lp; 1670 1671 jfs_info("lmLogShutdown: log:0x%p", log); 1672 1673 jfs_flush_journal(log, 2); 1674 1675 /* 1676 * write the last SYNCPT record with syncpoint = 0 1677 * (i.e., log redo up to HERE !) 1678 */ 1679 lrd.logtid = 0; 1680 lrd.backchain = 0; 1681 lrd.type = cpu_to_le16(LOG_SYNCPT); 1682 lrd.length = 0; 1683 lrd.log.syncpt.sync = 0; 1684 1685 lsn = lmWriteRecord(log, NULL, &lrd, NULL); 1686 bp = log->bp; 1687 lp = (struct logpage *) bp->l_ldata; 1688 lp->h.eor = lp->t.eor = cpu_to_le16(bp->l_eor); 1689 lbmWrite(log, log->bp, lbmWRITE | lbmRELEASE | lbmSYNC, 0); 1690 lbmIOWait(log->bp, lbmFREE); 1691 log->bp = NULL; 1692 1693 /* 1694 * synchronous update log superblock 1695 * mark log state as shutdown cleanly 1696 * (i.e., Log does not need to be replayed). 1697 */ 1698 if ((rc = lbmRead(log, 1, &bpsuper))) 1699 goto out; 1700 1701 logsuper = (struct logsuper *) bpsuper->l_ldata; 1702 logsuper->state = cpu_to_le32(LOGREDONE); 1703 logsuper->end = cpu_to_le32(lsn); 1704 lbmDirectWrite(log, bpsuper, lbmWRITE | lbmRELEASE | lbmSYNC); 1705 rc = lbmIOWait(bpsuper, lbmFREE); 1706 1707 jfs_info("lmLogShutdown: lsn:0x%x page:%d eor:%d", 1708 lsn, log->page, log->eor); 1709 1710 out: 1711 /* 1712 * shutdown per log i/o 1713 */ 1714 lbmLogShutdown(log); 1715 1716 if (rc) { 1717 jfs_warn("lmLogShutdown: exit(%d)", rc); 1718 } 1719 return rc; 1720 } 1721 1722 1723 /* 1724 * NAME: lmLogFileSystem() 1725 * 1726 * FUNCTION: insert (<activate> = true)/remove (<activate> = false) 1727 * file system into/from log active file system list. 1728 * 1729 * PARAMETE: log - pointer to logs inode. 1730 * fsdev - kdev_t of filesystem. 1731 * serial - pointer to returned log serial number 1732 * activate - insert/remove device from active list. 1733 * 1734 * RETURN: 0 - success 1735 * errors returned by vms_iowait(). 1736 */ 1737 static int lmLogFileSystem(struct jfs_log * log, struct jfs_sb_info *sbi, 1738 int activate) 1739 { 1740 int rc = 0; 1741 int i; 1742 struct logsuper *logsuper; 1743 struct lbuf *bpsuper; 1744 char *uuid = sbi->uuid; 1745 1746 /* 1747 * insert/remove file system device to log active file system list. 1748 */ 1749 if ((rc = lbmRead(log, 1, &bpsuper))) 1750 return rc; 1751 1752 logsuper = (struct logsuper *) bpsuper->l_ldata; 1753 if (activate) { 1754 for (i = 0; i < MAX_ACTIVE; i++) 1755 if (!memcmp(logsuper->active[i].uuid, NULL_UUID, 16)) { 1756 memcpy(logsuper->active[i].uuid, uuid, 16); 1757 sbi->aggregate = i; 1758 break; 1759 } 1760 if (i == MAX_ACTIVE) { 1761 jfs_warn("Too many file systems sharing journal!"); 1762 lbmFree(bpsuper); 1763 return -EMFILE; /* Is there a better rc? */ 1764 } 1765 } else { 1766 for (i = 0; i < MAX_ACTIVE; i++) 1767 if (!memcmp(logsuper->active[i].uuid, uuid, 16)) { 1768 memcpy(logsuper->active[i].uuid, NULL_UUID, 16); 1769 break; 1770 } 1771 if (i == MAX_ACTIVE) { 1772 jfs_warn("Somebody stomped on the journal!"); 1773 lbmFree(bpsuper); 1774 return -EIO; 1775 } 1776 1777 } 1778 1779 /* 1780 * synchronous write log superblock: 1781 * 1782 * write sidestream bypassing write queue: 1783 * at file system mount, log super block is updated for 1784 * activation of the file system before any log record 1785 * (MOUNT record) of the file system, and at file system 1786 * unmount, all meta data for the file system has been 1787 * flushed before log super block is updated for deactivation 1788 * of the file system. 1789 */ 1790 lbmDirectWrite(log, bpsuper, lbmWRITE | lbmRELEASE | lbmSYNC); 1791 rc = lbmIOWait(bpsuper, lbmFREE); 1792 1793 return rc; 1794 } 1795 1796 /* 1797 * log buffer manager (lbm) 1798 * ------------------------ 1799 * 1800 * special purpose buffer manager supporting log i/o requirements. 1801 * 1802 * per log write queue: 1803 * log pageout occurs in serial order by fifo write queue and 1804 * restricting to a single i/o in pregress at any one time. 1805 * a circular singly-linked list 1806 * (log->wrqueue points to the tail, and buffers are linked via 1807 * bp->wrqueue field), and 1808 * maintains log page in pageout ot waiting for pageout in serial pageout. 1809 */ 1810 1811 /* 1812 * lbmLogInit() 1813 * 1814 * initialize per log I/O setup at lmLogInit() 1815 */ 1816 static int lbmLogInit(struct jfs_log * log) 1817 { /* log inode */ 1818 int i; 1819 struct lbuf *lbuf; 1820 1821 jfs_info("lbmLogInit: log:0x%p", log); 1822 1823 /* initialize current buffer cursor */ 1824 log->bp = NULL; 1825 1826 /* initialize log device write queue */ 1827 log->wqueue = NULL; 1828 1829 /* 1830 * Each log has its own buffer pages allocated to it. These are 1831 * not managed by the page cache. This ensures that a transaction 1832 * writing to the log does not block trying to allocate a page from 1833 * the page cache (for the log). This would be bad, since page 1834 * allocation waits on the kswapd thread that may be committing inodes 1835 * which would cause log activity. Was that clear? I'm trying to 1836 * avoid deadlock here. 1837 */ 1838 init_waitqueue_head(&log->free_wait); 1839 1840 log->lbuf_free = NULL; 1841 1842 for (i = 0; i < LOGPAGES;) { 1843 char *buffer; 1844 uint offset; 1845 struct page *page; 1846 1847 buffer = (char *) get_zeroed_page(GFP_KERNEL); 1848 if (buffer == NULL) 1849 goto error; 1850 page = virt_to_page(buffer); 1851 for (offset = 0; offset < PAGE_SIZE; offset += LOGPSIZE) { 1852 lbuf = kmalloc(sizeof(struct lbuf), GFP_KERNEL); 1853 if (lbuf == NULL) { 1854 if (offset == 0) 1855 free_page((unsigned long) buffer); 1856 goto error; 1857 } 1858 if (offset) /* we already have one reference */ 1859 get_page(page); 1860 lbuf->l_offset = offset; 1861 lbuf->l_ldata = buffer + offset; 1862 lbuf->l_page = page; 1863 lbuf->l_log = log; 1864 init_waitqueue_head(&lbuf->l_ioevent); 1865 1866 lbuf->l_freelist = log->lbuf_free; 1867 log->lbuf_free = lbuf; 1868 i++; 1869 } 1870 } 1871 1872 return (0); 1873 1874 error: 1875 lbmLogShutdown(log); 1876 return -ENOMEM; 1877 } 1878 1879 1880 /* 1881 * lbmLogShutdown() 1882 * 1883 * finalize per log I/O setup at lmLogShutdown() 1884 */ 1885 static void lbmLogShutdown(struct jfs_log * log) 1886 { 1887 struct lbuf *lbuf; 1888 1889 jfs_info("lbmLogShutdown: log:0x%p", log); 1890 1891 lbuf = log->lbuf_free; 1892 while (lbuf) { 1893 struct lbuf *next = lbuf->l_freelist; 1894 __free_page(lbuf->l_page); 1895 kfree(lbuf); 1896 lbuf = next; 1897 } 1898 } 1899 1900 1901 /* 1902 * lbmAllocate() 1903 * 1904 * allocate an empty log buffer 1905 */ 1906 static struct lbuf *lbmAllocate(struct jfs_log * log, int pn) 1907 { 1908 struct lbuf *bp; 1909 unsigned long flags; 1910 1911 /* 1912 * recycle from log buffer freelist if any 1913 */ 1914 LCACHE_LOCK(flags); 1915 LCACHE_SLEEP_COND(log->free_wait, (bp = log->lbuf_free), flags); 1916 log->lbuf_free = bp->l_freelist; 1917 LCACHE_UNLOCK(flags); 1918 1919 bp->l_flag = 0; 1920 1921 bp->l_wqnext = NULL; 1922 bp->l_freelist = NULL; 1923 1924 bp->l_pn = pn; 1925 bp->l_blkno = log->base + (pn << (L2LOGPSIZE - log->l2bsize)); 1926 bp->l_ceor = 0; 1927 1928 return bp; 1929 } 1930 1931 1932 /* 1933 * lbmFree() 1934 * 1935 * release a log buffer to freelist 1936 */ 1937 static void lbmFree(struct lbuf * bp) 1938 { 1939 unsigned long flags; 1940 1941 LCACHE_LOCK(flags); 1942 1943 lbmfree(bp); 1944 1945 LCACHE_UNLOCK(flags); 1946 } 1947 1948 static void lbmfree(struct lbuf * bp) 1949 { 1950 struct jfs_log *log = bp->l_log; 1951 1952 assert(bp->l_wqnext == NULL); 1953 1954 /* 1955 * return the buffer to head of freelist 1956 */ 1957 bp->l_freelist = log->lbuf_free; 1958 log->lbuf_free = bp; 1959 1960 wake_up(&log->free_wait); 1961 return; 1962 } 1963 1964 1965 /* 1966 * NAME: lbmRedrive 1967 * 1968 * FUNCTION: add a log buffer to the log redrive list 1969 * 1970 * PARAMETER: 1971 * bp - log buffer 1972 * 1973 * NOTES: 1974 * Takes log_redrive_lock. 1975 */ 1976 static inline void lbmRedrive(struct lbuf *bp) 1977 { 1978 unsigned long flags; 1979 1980 spin_lock_irqsave(&log_redrive_lock, flags); 1981 bp->l_redrive_next = log_redrive_list; 1982 log_redrive_list = bp; 1983 spin_unlock_irqrestore(&log_redrive_lock, flags); 1984 1985 wake_up_process(jfsIOthread); 1986 } 1987 1988 1989 /* 1990 * lbmRead() 1991 */ 1992 static int lbmRead(struct jfs_log * log, int pn, struct lbuf ** bpp) 1993 { 1994 struct bio *bio; 1995 struct lbuf *bp; 1996 1997 /* 1998 * allocate a log buffer 1999 */ 2000 *bpp = bp = lbmAllocate(log, pn); 2001 jfs_info("lbmRead: bp:0x%p pn:0x%x", bp, pn); 2002 2003 bp->l_flag |= lbmREAD; 2004 2005 bio = bio_alloc(GFP_NOFS, 1); 2006 2007 bio->bi_sector = bp->l_blkno << (log->l2bsize - 9); 2008 bio->bi_bdev = log->bdev; 2009 bio->bi_io_vec[0].bv_page = bp->l_page; 2010 bio->bi_io_vec[0].bv_len = LOGPSIZE; 2011 bio->bi_io_vec[0].bv_offset = bp->l_offset; 2012 2013 bio->bi_vcnt = 1; 2014 bio->bi_idx = 0; 2015 bio->bi_size = LOGPSIZE; 2016 2017 bio->bi_end_io = lbmIODone; 2018 bio->bi_private = bp; 2019 submit_bio(READ_SYNC, bio); 2020 2021 wait_event(bp->l_ioevent, (bp->l_flag != lbmREAD)); 2022 2023 return 0; 2024 } 2025 2026 2027 /* 2028 * lbmWrite() 2029 * 2030 * buffer at head of pageout queue stays after completion of 2031 * partial-page pageout and redriven by explicit initiation of 2032 * pageout by caller until full-page pageout is completed and 2033 * released. 2034 * 2035 * device driver i/o done redrives pageout of new buffer at 2036 * head of pageout queue when current buffer at head of pageout 2037 * queue is released at the completion of its full-page pageout. 2038 * 2039 * LOGGC_LOCK() serializes lbmWrite() by lmNextPage() and lmGroupCommit(). 2040 * LCACHE_LOCK() serializes xflag between lbmWrite() and lbmIODone() 2041 */ 2042 static void lbmWrite(struct jfs_log * log, struct lbuf * bp, int flag, 2043 int cant_block) 2044 { 2045 struct lbuf *tail; 2046 unsigned long flags; 2047 2048 jfs_info("lbmWrite: bp:0x%p flag:0x%x pn:0x%x", bp, flag, bp->l_pn); 2049 2050 /* map the logical block address to physical block address */ 2051 bp->l_blkno = 2052 log->base + (bp->l_pn << (L2LOGPSIZE - log->l2bsize)); 2053 2054 LCACHE_LOCK(flags); /* disable+lock */ 2055 2056 /* 2057 * initialize buffer for device driver 2058 */ 2059 bp->l_flag = flag; 2060 2061 /* 2062 * insert bp at tail of write queue associated with log 2063 * 2064 * (request is either for bp already/currently at head of queue 2065 * or new bp to be inserted at tail) 2066 */ 2067 tail = log->wqueue; 2068 2069 /* is buffer not already on write queue ? */ 2070 if (bp->l_wqnext == NULL) { 2071 /* insert at tail of wqueue */ 2072 if (tail == NULL) { 2073 log->wqueue = bp; 2074 bp->l_wqnext = bp; 2075 } else { 2076 log->wqueue = bp; 2077 bp->l_wqnext = tail->l_wqnext; 2078 tail->l_wqnext = bp; 2079 } 2080 2081 tail = bp; 2082 } 2083 2084 /* is buffer at head of wqueue and for write ? */ 2085 if ((bp != tail->l_wqnext) || !(flag & lbmWRITE)) { 2086 LCACHE_UNLOCK(flags); /* unlock+enable */ 2087 return; 2088 } 2089 2090 LCACHE_UNLOCK(flags); /* unlock+enable */ 2091 2092 if (cant_block) 2093 lbmRedrive(bp); 2094 else if (flag & lbmSYNC) 2095 lbmStartIO(bp); 2096 else { 2097 LOGGC_UNLOCK(log); 2098 lbmStartIO(bp); 2099 LOGGC_LOCK(log); 2100 } 2101 } 2102 2103 2104 /* 2105 * lbmDirectWrite() 2106 * 2107 * initiate pageout bypassing write queue for sidestream 2108 * (e.g., log superblock) write; 2109 */ 2110 static void lbmDirectWrite(struct jfs_log * log, struct lbuf * bp, int flag) 2111 { 2112 jfs_info("lbmDirectWrite: bp:0x%p flag:0x%x pn:0x%x", 2113 bp, flag, bp->l_pn); 2114 2115 /* 2116 * initialize buffer for device driver 2117 */ 2118 bp->l_flag = flag | lbmDIRECT; 2119 2120 /* map the logical block address to physical block address */ 2121 bp->l_blkno = 2122 log->base + (bp->l_pn << (L2LOGPSIZE - log->l2bsize)); 2123 2124 /* 2125 * initiate pageout of the page 2126 */ 2127 lbmStartIO(bp); 2128 } 2129 2130 2131 /* 2132 * NAME: lbmStartIO() 2133 * 2134 * FUNCTION: Interface to DD strategy routine 2135 * 2136 * RETURN: none 2137 * 2138 * serialization: LCACHE_LOCK() is NOT held during log i/o; 2139 */ 2140 static void lbmStartIO(struct lbuf * bp) 2141 { 2142 struct bio *bio; 2143 struct jfs_log *log = bp->l_log; 2144 2145 jfs_info("lbmStartIO\n"); 2146 2147 bio = bio_alloc(GFP_NOFS, 1); 2148 bio->bi_sector = bp->l_blkno << (log->l2bsize - 9); 2149 bio->bi_bdev = log->bdev; 2150 bio->bi_io_vec[0].bv_page = bp->l_page; 2151 bio->bi_io_vec[0].bv_len = LOGPSIZE; 2152 bio->bi_io_vec[0].bv_offset = bp->l_offset; 2153 2154 bio->bi_vcnt = 1; 2155 bio->bi_idx = 0; 2156 bio->bi_size = LOGPSIZE; 2157 2158 bio->bi_end_io = lbmIODone; 2159 bio->bi_private = bp; 2160 2161 /* check if journaling to disk has been disabled */ 2162 if (log->no_integrity) { 2163 bio->bi_size = 0; 2164 lbmIODone(bio, 0); 2165 } else { 2166 submit_bio(WRITE_SYNC, bio); 2167 INCREMENT(lmStat.submitted); 2168 } 2169 } 2170 2171 2172 /* 2173 * lbmIOWait() 2174 */ 2175 static int lbmIOWait(struct lbuf * bp, int flag) 2176 { 2177 unsigned long flags; 2178 int rc = 0; 2179 2180 jfs_info("lbmIOWait1: bp:0x%p flag:0x%x:0x%x", bp, bp->l_flag, flag); 2181 2182 LCACHE_LOCK(flags); /* disable+lock */ 2183 2184 LCACHE_SLEEP_COND(bp->l_ioevent, (bp->l_flag & lbmDONE), flags); 2185 2186 rc = (bp->l_flag & lbmERROR) ? -EIO : 0; 2187 2188 if (flag & lbmFREE) 2189 lbmfree(bp); 2190 2191 LCACHE_UNLOCK(flags); /* unlock+enable */ 2192 2193 jfs_info("lbmIOWait2: bp:0x%p flag:0x%x:0x%x", bp, bp->l_flag, flag); 2194 return rc; 2195 } 2196 2197 /* 2198 * lbmIODone() 2199 * 2200 * executed at INTIODONE level 2201 */ 2202 static void lbmIODone(struct bio *bio, int error) 2203 { 2204 struct lbuf *bp = bio->bi_private; 2205 struct lbuf *nextbp, *tail; 2206 struct jfs_log *log; 2207 unsigned long flags; 2208 2209 /* 2210 * get back jfs buffer bound to the i/o buffer 2211 */ 2212 jfs_info("lbmIODone: bp:0x%p flag:0x%x", bp, bp->l_flag); 2213 2214 LCACHE_LOCK(flags); /* disable+lock */ 2215 2216 bp->l_flag |= lbmDONE; 2217 2218 if (!test_bit(BIO_UPTODATE, &bio->bi_flags)) { 2219 bp->l_flag |= lbmERROR; 2220 2221 jfs_err("lbmIODone: I/O error in JFS log"); 2222 } 2223 2224 bio_put(bio); 2225 2226 /* 2227 * pagein completion 2228 */ 2229 if (bp->l_flag & lbmREAD) { 2230 bp->l_flag &= ~lbmREAD; 2231 2232 LCACHE_UNLOCK(flags); /* unlock+enable */ 2233 2234 /* wakeup I/O initiator */ 2235 LCACHE_WAKEUP(&bp->l_ioevent); 2236 2237 return; 2238 } 2239 2240 /* 2241 * pageout completion 2242 * 2243 * the bp at the head of write queue has completed pageout. 2244 * 2245 * if single-commit/full-page pageout, remove the current buffer 2246 * from head of pageout queue, and redrive pageout with 2247 * the new buffer at head of pageout queue; 2248 * otherwise, the partial-page pageout buffer stays at 2249 * the head of pageout queue to be redriven for pageout 2250 * by lmGroupCommit() until full-page pageout is completed. 2251 */ 2252 bp->l_flag &= ~lbmWRITE; 2253 INCREMENT(lmStat.pagedone); 2254 2255 /* update committed lsn */ 2256 log = bp->l_log; 2257 log->clsn = (bp->l_pn << L2LOGPSIZE) + bp->l_ceor; 2258 2259 if (bp->l_flag & lbmDIRECT) { 2260 LCACHE_WAKEUP(&bp->l_ioevent); 2261 LCACHE_UNLOCK(flags); 2262 return; 2263 } 2264 2265 tail = log->wqueue; 2266 2267 /* single element queue */ 2268 if (bp == tail) { 2269 /* remove head buffer of full-page pageout 2270 * from log device write queue 2271 */ 2272 if (bp->l_flag & lbmRELEASE) { 2273 log->wqueue = NULL; 2274 bp->l_wqnext = NULL; 2275 } 2276 } 2277 /* multi element queue */ 2278 else { 2279 /* remove head buffer of full-page pageout 2280 * from log device write queue 2281 */ 2282 if (bp->l_flag & lbmRELEASE) { 2283 nextbp = tail->l_wqnext = bp->l_wqnext; 2284 bp->l_wqnext = NULL; 2285 2286 /* 2287 * redrive pageout of next page at head of write queue: 2288 * redrive next page without any bound tblk 2289 * (i.e., page w/o any COMMIT records), or 2290 * first page of new group commit which has been 2291 * queued after current page (subsequent pageout 2292 * is performed synchronously, except page without 2293 * any COMMITs) by lmGroupCommit() as indicated 2294 * by lbmWRITE flag; 2295 */ 2296 if (nextbp->l_flag & lbmWRITE) { 2297 /* 2298 * We can't do the I/O at interrupt time. 2299 * The jfsIO thread can do it 2300 */ 2301 lbmRedrive(nextbp); 2302 } 2303 } 2304 } 2305 2306 /* 2307 * synchronous pageout: 2308 * 2309 * buffer has not necessarily been removed from write queue 2310 * (e.g., synchronous write of partial-page with COMMIT): 2311 * leave buffer for i/o initiator to dispose 2312 */ 2313 if (bp->l_flag & lbmSYNC) { 2314 LCACHE_UNLOCK(flags); /* unlock+enable */ 2315 2316 /* wakeup I/O initiator */ 2317 LCACHE_WAKEUP(&bp->l_ioevent); 2318 } 2319 2320 /* 2321 * Group Commit pageout: 2322 */ 2323 else if (bp->l_flag & lbmGC) { 2324 LCACHE_UNLOCK(flags); 2325 lmPostGC(bp); 2326 } 2327 2328 /* 2329 * asynchronous pageout: 2330 * 2331 * buffer must have been removed from write queue: 2332 * insert buffer at head of freelist where it can be recycled 2333 */ 2334 else { 2335 assert(bp->l_flag & lbmRELEASE); 2336 assert(bp->l_flag & lbmFREE); 2337 lbmfree(bp); 2338 2339 LCACHE_UNLOCK(flags); /* unlock+enable */ 2340 } 2341 } 2342 2343 int jfsIOWait(void *arg) 2344 { 2345 struct lbuf *bp; 2346 2347 do { 2348 spin_lock_irq(&log_redrive_lock); 2349 while ((bp = log_redrive_list)) { 2350 log_redrive_list = bp->l_redrive_next; 2351 bp->l_redrive_next = NULL; 2352 spin_unlock_irq(&log_redrive_lock); 2353 lbmStartIO(bp); 2354 spin_lock_irq(&log_redrive_lock); 2355 } 2356 2357 if (freezing(current)) { 2358 spin_unlock_irq(&log_redrive_lock); 2359 refrigerator(); 2360 } else { 2361 set_current_state(TASK_INTERRUPTIBLE); 2362 spin_unlock_irq(&log_redrive_lock); 2363 schedule(); 2364 __set_current_state(TASK_RUNNING); 2365 } 2366 } while (!kthread_should_stop()); 2367 2368 jfs_info("jfsIOWait being killed!"); 2369 return 0; 2370 } 2371 2372 /* 2373 * NAME: lmLogFormat()/jfs_logform() 2374 * 2375 * FUNCTION: format file system log 2376 * 2377 * PARAMETERS: 2378 * log - volume log 2379 * logAddress - start address of log space in FS block 2380 * logSize - length of log space in FS block; 2381 * 2382 * RETURN: 0 - success 2383 * -EIO - i/o error 2384 * 2385 * XXX: We're synchronously writing one page at a time. This needs to 2386 * be improved by writing multiple pages at once. 2387 */ 2388 int lmLogFormat(struct jfs_log *log, s64 logAddress, int logSize) 2389 { 2390 int rc = -EIO; 2391 struct jfs_sb_info *sbi; 2392 struct logsuper *logsuper; 2393 struct logpage *lp; 2394 int lspn; /* log sequence page number */ 2395 struct lrd *lrd_ptr; 2396 int npages = 0; 2397 struct lbuf *bp; 2398 2399 jfs_info("lmLogFormat: logAddress:%Ld logSize:%d", 2400 (long long)logAddress, logSize); 2401 2402 sbi = list_entry(log->sb_list.next, struct jfs_sb_info, log_list); 2403 2404 /* allocate a log buffer */ 2405 bp = lbmAllocate(log, 1); 2406 2407 npages = logSize >> sbi->l2nbperpage; 2408 2409 /* 2410 * log space: 2411 * 2412 * page 0 - reserved; 2413 * page 1 - log superblock; 2414 * page 2 - log data page: A SYNC log record is written 2415 * into this page at logform time; 2416 * pages 3-N - log data page: set to empty log data pages; 2417 */ 2418 /* 2419 * init log superblock: log page 1 2420 */ 2421 logsuper = (struct logsuper *) bp->l_ldata; 2422 2423 logsuper->magic = cpu_to_le32(LOGMAGIC); 2424 logsuper->version = cpu_to_le32(LOGVERSION); 2425 logsuper->state = cpu_to_le32(LOGREDONE); 2426 logsuper->flag = cpu_to_le32(sbi->mntflag); /* ? */ 2427 logsuper->size = cpu_to_le32(npages); 2428 logsuper->bsize = cpu_to_le32(sbi->bsize); 2429 logsuper->l2bsize = cpu_to_le32(sbi->l2bsize); 2430 logsuper->end = cpu_to_le32(2 * LOGPSIZE + LOGPHDRSIZE + LOGRDSIZE); 2431 2432 bp->l_flag = lbmWRITE | lbmSYNC | lbmDIRECT; 2433 bp->l_blkno = logAddress + sbi->nbperpage; 2434 lbmStartIO(bp); 2435 if ((rc = lbmIOWait(bp, 0))) 2436 goto exit; 2437 2438 /* 2439 * init pages 2 to npages-1 as log data pages: 2440 * 2441 * log page sequence number (lpsn) initialization: 2442 * 2443 * pn: 0 1 2 3 n-1 2444 * +-----+-----+=====+=====+===.....===+=====+ 2445 * lspn: N-1 0 1 N-2 2446 * <--- N page circular file ----> 2447 * 2448 * the N (= npages-2) data pages of the log is maintained as 2449 * a circular file for the log records; 2450 * lpsn grows by 1 monotonically as each log page is written 2451 * to the circular file of the log; 2452 * and setLogpage() will not reset the page number even if 2453 * the eor is equal to LOGPHDRSIZE. In order for binary search 2454 * still work in find log end process, we have to simulate the 2455 * log wrap situation at the log format time. 2456 * The 1st log page written will have the highest lpsn. Then 2457 * the succeeding log pages will have ascending order of 2458 * the lspn starting from 0, ... (N-2) 2459 */ 2460 lp = (struct logpage *) bp->l_ldata; 2461 /* 2462 * initialize 1st log page to be written: lpsn = N - 1, 2463 * write a SYNCPT log record is written to this page 2464 */ 2465 lp->h.page = lp->t.page = cpu_to_le32(npages - 3); 2466 lp->h.eor = lp->t.eor = cpu_to_le16(LOGPHDRSIZE + LOGRDSIZE); 2467 2468 lrd_ptr = (struct lrd *) &lp->data; 2469 lrd_ptr->logtid = 0; 2470 lrd_ptr->backchain = 0; 2471 lrd_ptr->type = cpu_to_le16(LOG_SYNCPT); 2472 lrd_ptr->length = 0; 2473 lrd_ptr->log.syncpt.sync = 0; 2474 2475 bp->l_blkno += sbi->nbperpage; 2476 bp->l_flag = lbmWRITE | lbmSYNC | lbmDIRECT; 2477 lbmStartIO(bp); 2478 if ((rc = lbmIOWait(bp, 0))) 2479 goto exit; 2480 2481 /* 2482 * initialize succeeding log pages: lpsn = 0, 1, ..., (N-2) 2483 */ 2484 for (lspn = 0; lspn < npages - 3; lspn++) { 2485 lp->h.page = lp->t.page = cpu_to_le32(lspn); 2486 lp->h.eor = lp->t.eor = cpu_to_le16(LOGPHDRSIZE); 2487 2488 bp->l_blkno += sbi->nbperpage; 2489 bp->l_flag = lbmWRITE | lbmSYNC | lbmDIRECT; 2490 lbmStartIO(bp); 2491 if ((rc = lbmIOWait(bp, 0))) 2492 goto exit; 2493 } 2494 2495 rc = 0; 2496 exit: 2497 /* 2498 * finalize log 2499 */ 2500 /* release the buffer */ 2501 lbmFree(bp); 2502 2503 return rc; 2504 } 2505 2506 #ifdef CONFIG_JFS_STATISTICS 2507 static int jfs_lmstats_proc_show(struct seq_file *m, void *v) 2508 { 2509 seq_printf(m, 2510 "JFS Logmgr stats\n" 2511 "================\n" 2512 "commits = %d\n" 2513 "writes submitted = %d\n" 2514 "writes completed = %d\n" 2515 "full pages submitted = %d\n" 2516 "partial pages submitted = %d\n", 2517 lmStat.commit, 2518 lmStat.submitted, 2519 lmStat.pagedone, 2520 lmStat.full_page, 2521 lmStat.partial_page); 2522 return 0; 2523 } 2524 2525 static int jfs_lmstats_proc_open(struct inode *inode, struct file *file) 2526 { 2527 return single_open(file, jfs_lmstats_proc_show, NULL); 2528 } 2529 2530 const struct file_operations jfs_lmstats_proc_fops = { 2531 .owner = THIS_MODULE, 2532 .open = jfs_lmstats_proc_open, 2533 .read = seq_read, 2534 .llseek = seq_lseek, 2535 .release = single_release, 2536 }; 2537 #endif /* CONFIG_JFS_STATISTICS */ 2538