1 /* 2 * Copyright (C) International Business Machines Corp., 2000-2004 3 * Portions Copyright (C) Christoph Hellwig, 2001-2002 4 * 5 * This program is free software; you can redistribute it and/or modify 6 * it under the terms of the GNU General Public License as published by 7 * the Free Software Foundation; either version 2 of the License, or 8 * (at your option) any later version. 9 * 10 * This program is distributed in the hope that it will be useful, 11 * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See 13 * the GNU General Public License for more details. 14 * 15 * You should have received a copy of the GNU General Public License 16 * along with this program; if not, write to the Free Software 17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 18 */ 19 20 /* 21 * jfs_logmgr.c: log manager 22 * 23 * for related information, see transaction manager (jfs_txnmgr.c), and 24 * recovery manager (jfs_logredo.c). 25 * 26 * note: for detail, RTFS. 27 * 28 * log buffer manager: 29 * special purpose buffer manager supporting log i/o requirements. 30 * per log serial pageout of logpage 31 * queuing i/o requests and redrive i/o at iodone 32 * maintain current logpage buffer 33 * no caching since append only 34 * appropriate jfs buffer cache buffers as needed 35 * 36 * group commit: 37 * transactions which wrote COMMIT records in the same in-memory 38 * log page during the pageout of previous/current log page(s) are 39 * committed together by the pageout of the page. 40 * 41 * TBD lazy commit: 42 * transactions are committed asynchronously when the log page 43 * containing it COMMIT is paged out when it becomes full; 44 * 45 * serialization: 46 * . a per log lock serialize log write. 47 * . a per log lock serialize group commit. 48 * . a per log lock serialize log open/close; 49 * 50 * TBD log integrity: 51 * careful-write (ping-pong) of last logpage to recover from crash 52 * in overwrite. 53 * detection of split (out-of-order) write of physical sectors 54 * of last logpage via timestamp at end of each sector 55 * with its mirror data array at trailer). 56 * 57 * alternatives: 58 * lsn - 64-bit monotonically increasing integer vs 59 * 32-bit lspn and page eor. 60 */ 61 62 #include <linux/fs.h> 63 #include <linux/blkdev.h> 64 #include <linux/interrupt.h> 65 #include <linux/smp_lock.h> 66 #include <linux/completion.h> 67 #include <linux/buffer_head.h> /* for sync_blockdev() */ 68 #include <linux/bio.h> 69 #include <linux/suspend.h> 70 #include <linux/delay.h> 71 #include "jfs_incore.h" 72 #include "jfs_filsys.h" 73 #include "jfs_metapage.h" 74 #include "jfs_superblock.h" 75 #include "jfs_txnmgr.h" 76 #include "jfs_debug.h" 77 78 79 /* 80 * lbuf's ready to be redriven. Protected by log_redrive_lock (jfsIO thread) 81 */ 82 static struct lbuf *log_redrive_list; 83 static DEFINE_SPINLOCK(log_redrive_lock); 84 DECLARE_WAIT_QUEUE_HEAD(jfs_IO_thread_wait); 85 86 87 /* 88 * log read/write serialization (per log) 89 */ 90 #define LOG_LOCK_INIT(log) init_MUTEX(&(log)->loglock) 91 #define LOG_LOCK(log) down(&((log)->loglock)) 92 #define LOG_UNLOCK(log) up(&((log)->loglock)) 93 94 95 /* 96 * log group commit serialization (per log) 97 */ 98 99 #define LOGGC_LOCK_INIT(log) spin_lock_init(&(log)->gclock) 100 #define LOGGC_LOCK(log) spin_lock_irq(&(log)->gclock) 101 #define LOGGC_UNLOCK(log) spin_unlock_irq(&(log)->gclock) 102 #define LOGGC_WAKEUP(tblk) wake_up_all(&(tblk)->gcwait) 103 104 /* 105 * log sync serialization (per log) 106 */ 107 #define LOGSYNC_DELTA(logsize) min((logsize)/8, 128*LOGPSIZE) 108 #define LOGSYNC_BARRIER(logsize) ((logsize)/4) 109 /* 110 #define LOGSYNC_DELTA(logsize) min((logsize)/4, 256*LOGPSIZE) 111 #define LOGSYNC_BARRIER(logsize) ((logsize)/2) 112 */ 113 114 115 /* 116 * log buffer cache synchronization 117 */ 118 static DEFINE_SPINLOCK(jfsLCacheLock); 119 120 #define LCACHE_LOCK(flags) spin_lock_irqsave(&jfsLCacheLock, flags) 121 #define LCACHE_UNLOCK(flags) spin_unlock_irqrestore(&jfsLCacheLock, flags) 122 123 /* 124 * See __SLEEP_COND in jfs_locks.h 125 */ 126 #define LCACHE_SLEEP_COND(wq, cond, flags) \ 127 do { \ 128 if (cond) \ 129 break; \ 130 __SLEEP_COND(wq, cond, LCACHE_LOCK(flags), LCACHE_UNLOCK(flags)); \ 131 } while (0) 132 133 #define LCACHE_WAKEUP(event) wake_up(event) 134 135 136 /* 137 * lbuf buffer cache (lCache) control 138 */ 139 /* log buffer manager pageout control (cumulative, inclusive) */ 140 #define lbmREAD 0x0001 141 #define lbmWRITE 0x0002 /* enqueue at tail of write queue; 142 * init pageout if at head of queue; 143 */ 144 #define lbmRELEASE 0x0004 /* remove from write queue 145 * at completion of pageout; 146 * do not free/recycle it yet: 147 * caller will free it; 148 */ 149 #define lbmSYNC 0x0008 /* do not return to freelist 150 * when removed from write queue; 151 */ 152 #define lbmFREE 0x0010 /* return to freelist 153 * at completion of pageout; 154 * the buffer may be recycled; 155 */ 156 #define lbmDONE 0x0020 157 #define lbmERROR 0x0040 158 #define lbmGC 0x0080 /* lbmIODone to perform post-GC processing 159 * of log page 160 */ 161 #define lbmDIRECT 0x0100 162 163 /* 164 * Global list of active external journals 165 */ 166 static LIST_HEAD(jfs_external_logs); 167 static struct jfs_log *dummy_log = NULL; 168 static DECLARE_MUTEX(jfs_log_sem); 169 170 /* 171 * forward references 172 */ 173 static int lmWriteRecord(struct jfs_log * log, struct tblock * tblk, 174 struct lrd * lrd, struct tlock * tlck); 175 176 static int lmNextPage(struct jfs_log * log); 177 static int lmLogFileSystem(struct jfs_log * log, struct jfs_sb_info *sbi, 178 int activate); 179 180 static int open_inline_log(struct super_block *sb); 181 static int open_dummy_log(struct super_block *sb); 182 static int lbmLogInit(struct jfs_log * log); 183 static void lbmLogShutdown(struct jfs_log * log); 184 static struct lbuf *lbmAllocate(struct jfs_log * log, int); 185 static void lbmFree(struct lbuf * bp); 186 static void lbmfree(struct lbuf * bp); 187 static int lbmRead(struct jfs_log * log, int pn, struct lbuf ** bpp); 188 static void lbmWrite(struct jfs_log * log, struct lbuf * bp, int flag, int cant_block); 189 static void lbmDirectWrite(struct jfs_log * log, struct lbuf * bp, int flag); 190 static int lbmIOWait(struct lbuf * bp, int flag); 191 static bio_end_io_t lbmIODone; 192 static void lbmStartIO(struct lbuf * bp); 193 static void lmGCwrite(struct jfs_log * log, int cant_block); 194 static int lmLogSync(struct jfs_log * log, int hard_sync); 195 196 197 198 /* 199 * statistics 200 */ 201 #ifdef CONFIG_JFS_STATISTICS 202 static struct lmStat { 203 uint commit; /* # of commit */ 204 uint pagedone; /* # of page written */ 205 uint submitted; /* # of pages submitted */ 206 uint full_page; /* # of full pages submitted */ 207 uint partial_page; /* # of partial pages submitted */ 208 } lmStat; 209 #endif 210 211 212 /* 213 * NAME: lmLog() 214 * 215 * FUNCTION: write a log record; 216 * 217 * PARAMETER: 218 * 219 * RETURN: lsn - offset to the next log record to write (end-of-log); 220 * -1 - error; 221 * 222 * note: todo: log error handler 223 */ 224 int lmLog(struct jfs_log * log, struct tblock * tblk, struct lrd * lrd, 225 struct tlock * tlck) 226 { 227 int lsn; 228 int diffp, difft; 229 struct metapage *mp = NULL; 230 unsigned long flags; 231 232 jfs_info("lmLog: log:0x%p tblk:0x%p, lrd:0x%p tlck:0x%p", 233 log, tblk, lrd, tlck); 234 235 LOG_LOCK(log); 236 237 /* log by (out-of-transaction) JFS ? */ 238 if (tblk == NULL) 239 goto writeRecord; 240 241 /* log from page ? */ 242 if (tlck == NULL || 243 tlck->type & tlckBTROOT || (mp = tlck->mp) == NULL) 244 goto writeRecord; 245 246 /* 247 * initialize/update page/transaction recovery lsn 248 */ 249 lsn = log->lsn; 250 251 LOGSYNC_LOCK(log, flags); 252 253 /* 254 * initialize page lsn if first log write of the page 255 */ 256 if (mp->lsn == 0) { 257 mp->log = log; 258 mp->lsn = lsn; 259 log->count++; 260 261 /* insert page at tail of logsynclist */ 262 list_add_tail(&mp->synclist, &log->synclist); 263 } 264 265 /* 266 * initialize/update lsn of tblock of the page 267 * 268 * transaction inherits oldest lsn of pages associated 269 * with allocation/deallocation of resources (their 270 * log records are used to reconstruct allocation map 271 * at recovery time: inode for inode allocation map, 272 * B+-tree index of extent descriptors for block 273 * allocation map); 274 * allocation map pages inherit transaction lsn at 275 * commit time to allow forwarding log syncpt past log 276 * records associated with allocation/deallocation of 277 * resources only after persistent map of these map pages 278 * have been updated and propagated to home. 279 */ 280 /* 281 * initialize transaction lsn: 282 */ 283 if (tblk->lsn == 0) { 284 /* inherit lsn of its first page logged */ 285 tblk->lsn = mp->lsn; 286 log->count++; 287 288 /* insert tblock after the page on logsynclist */ 289 list_add(&tblk->synclist, &mp->synclist); 290 } 291 /* 292 * update transaction lsn: 293 */ 294 else { 295 /* inherit oldest/smallest lsn of page */ 296 logdiff(diffp, mp->lsn, log); 297 logdiff(difft, tblk->lsn, log); 298 if (diffp < difft) { 299 /* update tblock lsn with page lsn */ 300 tblk->lsn = mp->lsn; 301 302 /* move tblock after page on logsynclist */ 303 list_move(&tblk->synclist, &mp->synclist); 304 } 305 } 306 307 LOGSYNC_UNLOCK(log, flags); 308 309 /* 310 * write the log record 311 */ 312 writeRecord: 313 lsn = lmWriteRecord(log, tblk, lrd, tlck); 314 315 /* 316 * forward log syncpt if log reached next syncpt trigger 317 */ 318 logdiff(diffp, lsn, log); 319 if (diffp >= log->nextsync) 320 lsn = lmLogSync(log, 0); 321 322 /* update end-of-log lsn */ 323 log->lsn = lsn; 324 325 LOG_UNLOCK(log); 326 327 /* return end-of-log address */ 328 return lsn; 329 } 330 331 /* 332 * NAME: lmWriteRecord() 333 * 334 * FUNCTION: move the log record to current log page 335 * 336 * PARAMETER: cd - commit descriptor 337 * 338 * RETURN: end-of-log address 339 * 340 * serialization: LOG_LOCK() held on entry/exit 341 */ 342 static int 343 lmWriteRecord(struct jfs_log * log, struct tblock * tblk, struct lrd * lrd, 344 struct tlock * tlck) 345 { 346 int lsn = 0; /* end-of-log address */ 347 struct lbuf *bp; /* dst log page buffer */ 348 struct logpage *lp; /* dst log page */ 349 caddr_t dst; /* destination address in log page */ 350 int dstoffset; /* end-of-log offset in log page */ 351 int freespace; /* free space in log page */ 352 caddr_t p; /* src meta-data page */ 353 caddr_t src; 354 int srclen; 355 int nbytes; /* number of bytes to move */ 356 int i; 357 int len; 358 struct linelock *linelock; 359 struct lv *lv; 360 struct lvd *lvd; 361 int l2linesize; 362 363 len = 0; 364 365 /* retrieve destination log page to write */ 366 bp = (struct lbuf *) log->bp; 367 lp = (struct logpage *) bp->l_ldata; 368 dstoffset = log->eor; 369 370 /* any log data to write ? */ 371 if (tlck == NULL) 372 goto moveLrd; 373 374 /* 375 * move log record data 376 */ 377 /* retrieve source meta-data page to log */ 378 if (tlck->flag & tlckPAGELOCK) { 379 p = (caddr_t) (tlck->mp->data); 380 linelock = (struct linelock *) & tlck->lock; 381 } 382 /* retrieve source in-memory inode to log */ 383 else if (tlck->flag & tlckINODELOCK) { 384 if (tlck->type & tlckDTREE) 385 p = (caddr_t) &JFS_IP(tlck->ip)->i_dtroot; 386 else 387 p = (caddr_t) &JFS_IP(tlck->ip)->i_xtroot; 388 linelock = (struct linelock *) & tlck->lock; 389 } 390 #ifdef _JFS_WIP 391 else if (tlck->flag & tlckINLINELOCK) { 392 393 inlinelock = (struct inlinelock *) & tlck; 394 p = (caddr_t) & inlinelock->pxd; 395 linelock = (struct linelock *) & tlck; 396 } 397 #endif /* _JFS_WIP */ 398 else { 399 jfs_err("lmWriteRecord: UFO tlck:0x%p", tlck); 400 return 0; /* Probably should trap */ 401 } 402 l2linesize = linelock->l2linesize; 403 404 moveData: 405 ASSERT(linelock->index <= linelock->maxcnt); 406 407 lv = linelock->lv; 408 for (i = 0; i < linelock->index; i++, lv++) { 409 if (lv->length == 0) 410 continue; 411 412 /* is page full ? */ 413 if (dstoffset >= LOGPSIZE - LOGPTLRSIZE) { 414 /* page become full: move on to next page */ 415 lmNextPage(log); 416 417 bp = log->bp; 418 lp = (struct logpage *) bp->l_ldata; 419 dstoffset = LOGPHDRSIZE; 420 } 421 422 /* 423 * move log vector data 424 */ 425 src = (u8 *) p + (lv->offset << l2linesize); 426 srclen = lv->length << l2linesize; 427 len += srclen; 428 while (srclen > 0) { 429 freespace = (LOGPSIZE - LOGPTLRSIZE) - dstoffset; 430 nbytes = min(freespace, srclen); 431 dst = (caddr_t) lp + dstoffset; 432 memcpy(dst, src, nbytes); 433 dstoffset += nbytes; 434 435 /* is page not full ? */ 436 if (dstoffset < LOGPSIZE - LOGPTLRSIZE) 437 break; 438 439 /* page become full: move on to next page */ 440 lmNextPage(log); 441 442 bp = (struct lbuf *) log->bp; 443 lp = (struct logpage *) bp->l_ldata; 444 dstoffset = LOGPHDRSIZE; 445 446 srclen -= nbytes; 447 src += nbytes; 448 } 449 450 /* 451 * move log vector descriptor 452 */ 453 len += 4; 454 lvd = (struct lvd *) ((caddr_t) lp + dstoffset); 455 lvd->offset = cpu_to_le16(lv->offset); 456 lvd->length = cpu_to_le16(lv->length); 457 dstoffset += 4; 458 jfs_info("lmWriteRecord: lv offset:%d length:%d", 459 lv->offset, lv->length); 460 } 461 462 if ((i = linelock->next)) { 463 linelock = (struct linelock *) lid_to_tlock(i); 464 goto moveData; 465 } 466 467 /* 468 * move log record descriptor 469 */ 470 moveLrd: 471 lrd->length = cpu_to_le16(len); 472 473 src = (caddr_t) lrd; 474 srclen = LOGRDSIZE; 475 476 while (srclen > 0) { 477 freespace = (LOGPSIZE - LOGPTLRSIZE) - dstoffset; 478 nbytes = min(freespace, srclen); 479 dst = (caddr_t) lp + dstoffset; 480 memcpy(dst, src, nbytes); 481 482 dstoffset += nbytes; 483 srclen -= nbytes; 484 485 /* are there more to move than freespace of page ? */ 486 if (srclen) 487 goto pageFull; 488 489 /* 490 * end of log record descriptor 491 */ 492 493 /* update last log record eor */ 494 log->eor = dstoffset; 495 bp->l_eor = dstoffset; 496 lsn = (log->page << L2LOGPSIZE) + dstoffset; 497 498 if (lrd->type & cpu_to_le16(LOG_COMMIT)) { 499 tblk->clsn = lsn; 500 jfs_info("wr: tclsn:0x%x, beor:0x%x", tblk->clsn, 501 bp->l_eor); 502 503 INCREMENT(lmStat.commit); /* # of commit */ 504 505 /* 506 * enqueue tblock for group commit: 507 * 508 * enqueue tblock of non-trivial/synchronous COMMIT 509 * at tail of group commit queue 510 * (trivial/asynchronous COMMITs are ignored by 511 * group commit.) 512 */ 513 LOGGC_LOCK(log); 514 515 /* init tblock gc state */ 516 tblk->flag = tblkGC_QUEUE; 517 tblk->bp = log->bp; 518 tblk->pn = log->page; 519 tblk->eor = log->eor; 520 521 /* enqueue transaction to commit queue */ 522 list_add_tail(&tblk->cqueue, &log->cqueue); 523 524 LOGGC_UNLOCK(log); 525 } 526 527 jfs_info("lmWriteRecord: lrd:0x%04x bp:0x%p pn:%d eor:0x%x", 528 le16_to_cpu(lrd->type), log->bp, log->page, dstoffset); 529 530 /* page not full ? */ 531 if (dstoffset < LOGPSIZE - LOGPTLRSIZE) 532 return lsn; 533 534 pageFull: 535 /* page become full: move on to next page */ 536 lmNextPage(log); 537 538 bp = (struct lbuf *) log->bp; 539 lp = (struct logpage *) bp->l_ldata; 540 dstoffset = LOGPHDRSIZE; 541 src += nbytes; 542 } 543 544 return lsn; 545 } 546 547 548 /* 549 * NAME: lmNextPage() 550 * 551 * FUNCTION: write current page and allocate next page. 552 * 553 * PARAMETER: log 554 * 555 * RETURN: 0 556 * 557 * serialization: LOG_LOCK() held on entry/exit 558 */ 559 static int lmNextPage(struct jfs_log * log) 560 { 561 struct logpage *lp; 562 int lspn; /* log sequence page number */ 563 int pn; /* current page number */ 564 struct lbuf *bp; 565 struct lbuf *nextbp; 566 struct tblock *tblk; 567 568 /* get current log page number and log sequence page number */ 569 pn = log->page; 570 bp = log->bp; 571 lp = (struct logpage *) bp->l_ldata; 572 lspn = le32_to_cpu(lp->h.page); 573 574 LOGGC_LOCK(log); 575 576 /* 577 * write or queue the full page at the tail of write queue 578 */ 579 /* get the tail tblk on commit queue */ 580 if (list_empty(&log->cqueue)) 581 tblk = NULL; 582 else 583 tblk = list_entry(log->cqueue.prev, struct tblock, cqueue); 584 585 /* every tblk who has COMMIT record on the current page, 586 * and has not been committed, must be on commit queue 587 * since tblk is queued at commit queueu at the time 588 * of writing its COMMIT record on the page before 589 * page becomes full (even though the tblk thread 590 * who wrote COMMIT record may have been suspended 591 * currently); 592 */ 593 594 /* is page bound with outstanding tail tblk ? */ 595 if (tblk && tblk->pn == pn) { 596 /* mark tblk for end-of-page */ 597 tblk->flag |= tblkGC_EOP; 598 599 if (log->cflag & logGC_PAGEOUT) { 600 /* if page is not already on write queue, 601 * just enqueue (no lbmWRITE to prevent redrive) 602 * buffer to wqueue to ensure correct serial order 603 * of the pages since log pages will be added 604 * continuously 605 */ 606 if (bp->l_wqnext == NULL) 607 lbmWrite(log, bp, 0, 0); 608 } else { 609 /* 610 * No current GC leader, initiate group commit 611 */ 612 log->cflag |= logGC_PAGEOUT; 613 lmGCwrite(log, 0); 614 } 615 } 616 /* page is not bound with outstanding tblk: 617 * init write or mark it to be redriven (lbmWRITE) 618 */ 619 else { 620 /* finalize the page */ 621 bp->l_ceor = bp->l_eor; 622 lp->h.eor = lp->t.eor = cpu_to_le16(bp->l_ceor); 623 lbmWrite(log, bp, lbmWRITE | lbmRELEASE | lbmFREE, 0); 624 } 625 LOGGC_UNLOCK(log); 626 627 /* 628 * allocate/initialize next page 629 */ 630 /* if log wraps, the first data page of log is 2 631 * (0 never used, 1 is superblock). 632 */ 633 log->page = (pn == log->size - 1) ? 2 : pn + 1; 634 log->eor = LOGPHDRSIZE; /* ? valid page empty/full at logRedo() */ 635 636 /* allocate/initialize next log page buffer */ 637 nextbp = lbmAllocate(log, log->page); 638 nextbp->l_eor = log->eor; 639 log->bp = nextbp; 640 641 /* initialize next log page */ 642 lp = (struct logpage *) nextbp->l_ldata; 643 lp->h.page = lp->t.page = cpu_to_le32(lspn + 1); 644 lp->h.eor = lp->t.eor = cpu_to_le16(LOGPHDRSIZE); 645 646 return 0; 647 } 648 649 650 /* 651 * NAME: lmGroupCommit() 652 * 653 * FUNCTION: group commit 654 * initiate pageout of the pages with COMMIT in the order of 655 * page number - redrive pageout of the page at the head of 656 * pageout queue until full page has been written. 657 * 658 * RETURN: 659 * 660 * NOTE: 661 * LOGGC_LOCK serializes log group commit queue, and 662 * transaction blocks on the commit queue. 663 * N.B. LOG_LOCK is NOT held during lmGroupCommit(). 664 */ 665 int lmGroupCommit(struct jfs_log * log, struct tblock * tblk) 666 { 667 int rc = 0; 668 669 LOGGC_LOCK(log); 670 671 /* group committed already ? */ 672 if (tblk->flag & tblkGC_COMMITTED) { 673 if (tblk->flag & tblkGC_ERROR) 674 rc = -EIO; 675 676 LOGGC_UNLOCK(log); 677 return rc; 678 } 679 jfs_info("lmGroup Commit: tblk = 0x%p, gcrtc = %d", tblk, log->gcrtc); 680 681 if (tblk->xflag & COMMIT_LAZY) 682 tblk->flag |= tblkGC_LAZY; 683 684 if ((!(log->cflag & logGC_PAGEOUT)) && (!list_empty(&log->cqueue)) && 685 (!(tblk->xflag & COMMIT_LAZY) || test_bit(log_FLUSH, &log->flag) 686 || jfs_tlocks_low)) { 687 /* 688 * No pageout in progress 689 * 690 * start group commit as its group leader. 691 */ 692 log->cflag |= logGC_PAGEOUT; 693 694 lmGCwrite(log, 0); 695 } 696 697 if (tblk->xflag & COMMIT_LAZY) { 698 /* 699 * Lazy transactions can leave now 700 */ 701 LOGGC_UNLOCK(log); 702 return 0; 703 } 704 705 /* lmGCwrite gives up LOGGC_LOCK, check again */ 706 707 if (tblk->flag & tblkGC_COMMITTED) { 708 if (tblk->flag & tblkGC_ERROR) 709 rc = -EIO; 710 711 LOGGC_UNLOCK(log); 712 return rc; 713 } 714 715 /* upcount transaction waiting for completion 716 */ 717 log->gcrtc++; 718 tblk->flag |= tblkGC_READY; 719 720 __SLEEP_COND(tblk->gcwait, (tblk->flag & tblkGC_COMMITTED), 721 LOGGC_LOCK(log), LOGGC_UNLOCK(log)); 722 723 /* removed from commit queue */ 724 if (tblk->flag & tblkGC_ERROR) 725 rc = -EIO; 726 727 LOGGC_UNLOCK(log); 728 return rc; 729 } 730 731 /* 732 * NAME: lmGCwrite() 733 * 734 * FUNCTION: group commit write 735 * initiate write of log page, building a group of all transactions 736 * with commit records on that page. 737 * 738 * RETURN: None 739 * 740 * NOTE: 741 * LOGGC_LOCK must be held by caller. 742 * N.B. LOG_LOCK is NOT held during lmGroupCommit(). 743 */ 744 static void lmGCwrite(struct jfs_log * log, int cant_write) 745 { 746 struct lbuf *bp; 747 struct logpage *lp; 748 int gcpn; /* group commit page number */ 749 struct tblock *tblk; 750 struct tblock *xtblk = NULL; 751 752 /* 753 * build the commit group of a log page 754 * 755 * scan commit queue and make a commit group of all 756 * transactions with COMMIT records on the same log page. 757 */ 758 /* get the head tblk on the commit queue */ 759 gcpn = list_entry(log->cqueue.next, struct tblock, cqueue)->pn; 760 761 list_for_each_entry(tblk, &log->cqueue, cqueue) { 762 if (tblk->pn != gcpn) 763 break; 764 765 xtblk = tblk; 766 767 /* state transition: (QUEUE, READY) -> COMMIT */ 768 tblk->flag |= tblkGC_COMMIT; 769 } 770 tblk = xtblk; /* last tblk of the page */ 771 772 /* 773 * pageout to commit transactions on the log page. 774 */ 775 bp = (struct lbuf *) tblk->bp; 776 lp = (struct logpage *) bp->l_ldata; 777 /* is page already full ? */ 778 if (tblk->flag & tblkGC_EOP) { 779 /* mark page to free at end of group commit of the page */ 780 tblk->flag &= ~tblkGC_EOP; 781 tblk->flag |= tblkGC_FREE; 782 bp->l_ceor = bp->l_eor; 783 lp->h.eor = lp->t.eor = cpu_to_le16(bp->l_ceor); 784 lbmWrite(log, bp, lbmWRITE | lbmRELEASE | lbmGC, 785 cant_write); 786 INCREMENT(lmStat.full_page); 787 } 788 /* page is not yet full */ 789 else { 790 bp->l_ceor = tblk->eor; /* ? bp->l_ceor = bp->l_eor; */ 791 lp->h.eor = lp->t.eor = cpu_to_le16(bp->l_ceor); 792 lbmWrite(log, bp, lbmWRITE | lbmGC, cant_write); 793 INCREMENT(lmStat.partial_page); 794 } 795 } 796 797 /* 798 * NAME: lmPostGC() 799 * 800 * FUNCTION: group commit post-processing 801 * Processes transactions after their commit records have been written 802 * to disk, redriving log I/O if necessary. 803 * 804 * RETURN: None 805 * 806 * NOTE: 807 * This routine is called a interrupt time by lbmIODone 808 */ 809 static void lmPostGC(struct lbuf * bp) 810 { 811 unsigned long flags; 812 struct jfs_log *log = bp->l_log; 813 struct logpage *lp; 814 struct tblock *tblk, *temp; 815 816 //LOGGC_LOCK(log); 817 spin_lock_irqsave(&log->gclock, flags); 818 /* 819 * current pageout of group commit completed. 820 * 821 * remove/wakeup transactions from commit queue who were 822 * group committed with the current log page 823 */ 824 list_for_each_entry_safe(tblk, temp, &log->cqueue, cqueue) { 825 if (!(tblk->flag & tblkGC_COMMIT)) 826 break; 827 /* if transaction was marked GC_COMMIT then 828 * it has been shipped in the current pageout 829 * and made it to disk - it is committed. 830 */ 831 832 if (bp->l_flag & lbmERROR) 833 tblk->flag |= tblkGC_ERROR; 834 835 /* remove it from the commit queue */ 836 list_del(&tblk->cqueue); 837 tblk->flag &= ~tblkGC_QUEUE; 838 839 if (tblk == log->flush_tblk) { 840 /* we can stop flushing the log now */ 841 clear_bit(log_FLUSH, &log->flag); 842 log->flush_tblk = NULL; 843 } 844 845 jfs_info("lmPostGC: tblk = 0x%p, flag = 0x%x", tblk, 846 tblk->flag); 847 848 if (!(tblk->xflag & COMMIT_FORCE)) 849 /* 850 * Hand tblk over to lazy commit thread 851 */ 852 txLazyUnlock(tblk); 853 else { 854 /* state transition: COMMIT -> COMMITTED */ 855 tblk->flag |= tblkGC_COMMITTED; 856 857 if (tblk->flag & tblkGC_READY) 858 log->gcrtc--; 859 860 LOGGC_WAKEUP(tblk); 861 } 862 863 /* was page full before pageout ? 864 * (and this is the last tblk bound with the page) 865 */ 866 if (tblk->flag & tblkGC_FREE) 867 lbmFree(bp); 868 /* did page become full after pageout ? 869 * (and this is the last tblk bound with the page) 870 */ 871 else if (tblk->flag & tblkGC_EOP) { 872 /* finalize the page */ 873 lp = (struct logpage *) bp->l_ldata; 874 bp->l_ceor = bp->l_eor; 875 lp->h.eor = lp->t.eor = cpu_to_le16(bp->l_eor); 876 jfs_info("lmPostGC: calling lbmWrite"); 877 lbmWrite(log, bp, lbmWRITE | lbmRELEASE | lbmFREE, 878 1); 879 } 880 881 } 882 883 /* are there any transactions who have entered lnGroupCommit() 884 * (whose COMMITs are after that of the last log page written. 885 * They are waiting for new group commit (above at (SLEEP 1)) 886 * or lazy transactions are on a full (queued) log page, 887 * select the latest ready transaction as new group leader and 888 * wake her up to lead her group. 889 */ 890 if ((!list_empty(&log->cqueue)) && 891 ((log->gcrtc > 0) || (tblk->bp->l_wqnext != NULL) || 892 test_bit(log_FLUSH, &log->flag) || jfs_tlocks_low)) 893 /* 894 * Call lmGCwrite with new group leader 895 */ 896 lmGCwrite(log, 1); 897 898 /* no transaction are ready yet (transactions are only just 899 * queued (GC_QUEUE) and not entered for group commit yet). 900 * the first transaction entering group commit 901 * will elect herself as new group leader. 902 */ 903 else 904 log->cflag &= ~logGC_PAGEOUT; 905 906 //LOGGC_UNLOCK(log); 907 spin_unlock_irqrestore(&log->gclock, flags); 908 return; 909 } 910 911 /* 912 * NAME: lmLogSync() 913 * 914 * FUNCTION: write log SYNCPT record for specified log 915 * if new sync address is available 916 * (normally the case if sync() is executed by back-ground 917 * process). 918 * calculate new value of i_nextsync which determines when 919 * this code is called again. 920 * 921 * PARAMETERS: log - log structure 922 * hard_sync - 1 to force all metadata to be written 923 * 924 * RETURN: 0 925 * 926 * serialization: LOG_LOCK() held on entry/exit 927 */ 928 static int lmLogSync(struct jfs_log * log, int hard_sync) 929 { 930 int logsize; 931 int written; /* written since last syncpt */ 932 int free; /* free space left available */ 933 int delta; /* additional delta to write normally */ 934 int more; /* additional write granted */ 935 struct lrd lrd; 936 int lsn; 937 struct logsyncblk *lp; 938 struct jfs_sb_info *sbi; 939 unsigned long flags; 940 941 /* push dirty metapages out to disk */ 942 if (hard_sync) 943 list_for_each_entry(sbi, &log->sb_list, log_list) { 944 filemap_fdatawrite(sbi->ipbmap->i_mapping); 945 filemap_fdatawrite(sbi->ipimap->i_mapping); 946 filemap_fdatawrite(sbi->direct_inode->i_mapping); 947 } 948 else 949 list_for_each_entry(sbi, &log->sb_list, log_list) { 950 filemap_flush(sbi->ipbmap->i_mapping); 951 filemap_flush(sbi->ipimap->i_mapping); 952 filemap_flush(sbi->direct_inode->i_mapping); 953 } 954 955 /* 956 * forward syncpt 957 */ 958 /* if last sync is same as last syncpt, 959 * invoke sync point forward processing to update sync. 960 */ 961 962 if (log->sync == log->syncpt) { 963 LOGSYNC_LOCK(log, flags); 964 if (list_empty(&log->synclist)) 965 log->sync = log->lsn; 966 else { 967 lp = list_entry(log->synclist.next, 968 struct logsyncblk, synclist); 969 log->sync = lp->lsn; 970 } 971 LOGSYNC_UNLOCK(log, flags); 972 973 } 974 975 /* if sync is different from last syncpt, 976 * write a SYNCPT record with syncpt = sync. 977 * reset syncpt = sync 978 */ 979 if (log->sync != log->syncpt) { 980 lrd.logtid = 0; 981 lrd.backchain = 0; 982 lrd.type = cpu_to_le16(LOG_SYNCPT); 983 lrd.length = 0; 984 lrd.log.syncpt.sync = cpu_to_le32(log->sync); 985 lsn = lmWriteRecord(log, NULL, &lrd, NULL); 986 987 log->syncpt = log->sync; 988 } else 989 lsn = log->lsn; 990 991 /* 992 * setup next syncpt trigger (SWAG) 993 */ 994 logsize = log->logsize; 995 996 logdiff(written, lsn, log); 997 free = logsize - written; 998 delta = LOGSYNC_DELTA(logsize); 999 more = min(free / 2, delta); 1000 if (more < 2 * LOGPSIZE) { 1001 jfs_warn("\n ... Log Wrap ... Log Wrap ... Log Wrap ...\n"); 1002 /* 1003 * log wrapping 1004 * 1005 * option 1 - panic ? No.! 1006 * option 2 - shutdown file systems 1007 * associated with log ? 1008 * option 3 - extend log ? 1009 */ 1010 /* 1011 * option 4 - second chance 1012 * 1013 * mark log wrapped, and continue. 1014 * when all active transactions are completed, 1015 * mark log vaild for recovery. 1016 * if crashed during invalid state, log state 1017 * implies invald log, forcing fsck(). 1018 */ 1019 /* mark log state log wrap in log superblock */ 1020 /* log->state = LOGWRAP; */ 1021 1022 /* reset sync point computation */ 1023 log->syncpt = log->sync = lsn; 1024 log->nextsync = delta; 1025 } else 1026 /* next syncpt trigger = written + more */ 1027 log->nextsync = written + more; 1028 1029 /* if number of bytes written from last sync point is more 1030 * than 1/4 of the log size, stop new transactions from 1031 * starting until all current transactions are completed 1032 * by setting syncbarrier flag. 1033 */ 1034 if (!test_bit(log_SYNCBARRIER, &log->flag) && 1035 (written > LOGSYNC_BARRIER(logsize)) && log->active) { 1036 set_bit(log_SYNCBARRIER, &log->flag); 1037 jfs_info("log barrier on: lsn=0x%x syncpt=0x%x", lsn, 1038 log->syncpt); 1039 /* 1040 * We may have to initiate group commit 1041 */ 1042 jfs_flush_journal(log, 0); 1043 } 1044 1045 return lsn; 1046 } 1047 1048 /* 1049 * NAME: jfs_syncpt 1050 * 1051 * FUNCTION: write log SYNCPT record for specified log 1052 * 1053 * PARAMETERS: log - log structure 1054 * hard_sync - set to 1 to force metadata to be written 1055 */ 1056 void jfs_syncpt(struct jfs_log *log, int hard_sync) 1057 { LOG_LOCK(log); 1058 lmLogSync(log, hard_sync); 1059 LOG_UNLOCK(log); 1060 } 1061 1062 /* 1063 * NAME: lmLogOpen() 1064 * 1065 * FUNCTION: open the log on first open; 1066 * insert filesystem in the active list of the log. 1067 * 1068 * PARAMETER: ipmnt - file system mount inode 1069 * iplog - log inode (out) 1070 * 1071 * RETURN: 1072 * 1073 * serialization: 1074 */ 1075 int lmLogOpen(struct super_block *sb) 1076 { 1077 int rc; 1078 struct block_device *bdev; 1079 struct jfs_log *log; 1080 struct jfs_sb_info *sbi = JFS_SBI(sb); 1081 1082 if (sbi->flag & JFS_NOINTEGRITY) 1083 return open_dummy_log(sb); 1084 1085 if (sbi->mntflag & JFS_INLINELOG) 1086 return open_inline_log(sb); 1087 1088 down(&jfs_log_sem); 1089 list_for_each_entry(log, &jfs_external_logs, journal_list) { 1090 if (log->bdev->bd_dev == sbi->logdev) { 1091 if (memcmp(log->uuid, sbi->loguuid, 1092 sizeof(log->uuid))) { 1093 jfs_warn("wrong uuid on JFS journal\n"); 1094 up(&jfs_log_sem); 1095 return -EINVAL; 1096 } 1097 /* 1098 * add file system to log active file system list 1099 */ 1100 if ((rc = lmLogFileSystem(log, sbi, 1))) { 1101 up(&jfs_log_sem); 1102 return rc; 1103 } 1104 goto journal_found; 1105 } 1106 } 1107 1108 if (!(log = kmalloc(sizeof(struct jfs_log), GFP_KERNEL))) { 1109 up(&jfs_log_sem); 1110 return -ENOMEM; 1111 } 1112 memset(log, 0, sizeof(struct jfs_log)); 1113 INIT_LIST_HEAD(&log->sb_list); 1114 init_waitqueue_head(&log->syncwait); 1115 1116 /* 1117 * external log as separate logical volume 1118 * 1119 * file systems to log may have n-to-1 relationship; 1120 */ 1121 1122 bdev = open_by_devnum(sbi->logdev, FMODE_READ|FMODE_WRITE); 1123 if (IS_ERR(bdev)) { 1124 rc = -PTR_ERR(bdev); 1125 goto free; 1126 } 1127 1128 if ((rc = bd_claim(bdev, log))) { 1129 goto close; 1130 } 1131 1132 log->bdev = bdev; 1133 memcpy(log->uuid, sbi->loguuid, sizeof(log->uuid)); 1134 1135 /* 1136 * initialize log: 1137 */ 1138 if ((rc = lmLogInit(log))) 1139 goto unclaim; 1140 1141 list_add(&log->journal_list, &jfs_external_logs); 1142 1143 /* 1144 * add file system to log active file system list 1145 */ 1146 if ((rc = lmLogFileSystem(log, sbi, 1))) 1147 goto shutdown; 1148 1149 journal_found: 1150 LOG_LOCK(log); 1151 list_add(&sbi->log_list, &log->sb_list); 1152 sbi->log = log; 1153 LOG_UNLOCK(log); 1154 1155 up(&jfs_log_sem); 1156 return 0; 1157 1158 /* 1159 * unwind on error 1160 */ 1161 shutdown: /* unwind lbmLogInit() */ 1162 list_del(&log->journal_list); 1163 lbmLogShutdown(log); 1164 1165 unclaim: 1166 bd_release(bdev); 1167 1168 close: /* close external log device */ 1169 blkdev_put(bdev); 1170 1171 free: /* free log descriptor */ 1172 up(&jfs_log_sem); 1173 kfree(log); 1174 1175 jfs_warn("lmLogOpen: exit(%d)", rc); 1176 return rc; 1177 } 1178 1179 static int open_inline_log(struct super_block *sb) 1180 { 1181 struct jfs_log *log; 1182 int rc; 1183 1184 if (!(log = kmalloc(sizeof(struct jfs_log), GFP_KERNEL))) 1185 return -ENOMEM; 1186 memset(log, 0, sizeof(struct jfs_log)); 1187 INIT_LIST_HEAD(&log->sb_list); 1188 init_waitqueue_head(&log->syncwait); 1189 1190 set_bit(log_INLINELOG, &log->flag); 1191 log->bdev = sb->s_bdev; 1192 log->base = addressPXD(&JFS_SBI(sb)->logpxd); 1193 log->size = lengthPXD(&JFS_SBI(sb)->logpxd) >> 1194 (L2LOGPSIZE - sb->s_blocksize_bits); 1195 log->l2bsize = sb->s_blocksize_bits; 1196 ASSERT(L2LOGPSIZE >= sb->s_blocksize_bits); 1197 1198 /* 1199 * initialize log. 1200 */ 1201 if ((rc = lmLogInit(log))) { 1202 kfree(log); 1203 jfs_warn("lmLogOpen: exit(%d)", rc); 1204 return rc; 1205 } 1206 1207 list_add(&JFS_SBI(sb)->log_list, &log->sb_list); 1208 JFS_SBI(sb)->log = log; 1209 1210 return rc; 1211 } 1212 1213 static int open_dummy_log(struct super_block *sb) 1214 { 1215 int rc; 1216 1217 down(&jfs_log_sem); 1218 if (!dummy_log) { 1219 dummy_log = kmalloc(sizeof(struct jfs_log), GFP_KERNEL); 1220 if (!dummy_log) { 1221 up(&jfs_log_sem); 1222 return -ENOMEM; 1223 } 1224 memset(dummy_log, 0, sizeof(struct jfs_log)); 1225 INIT_LIST_HEAD(&dummy_log->sb_list); 1226 init_waitqueue_head(&dummy_log->syncwait); 1227 dummy_log->no_integrity = 1; 1228 /* Make up some stuff */ 1229 dummy_log->base = 0; 1230 dummy_log->size = 1024; 1231 rc = lmLogInit(dummy_log); 1232 if (rc) { 1233 kfree(dummy_log); 1234 dummy_log = NULL; 1235 up(&jfs_log_sem); 1236 return rc; 1237 } 1238 } 1239 1240 LOG_LOCK(dummy_log); 1241 list_add(&JFS_SBI(sb)->log_list, &dummy_log->sb_list); 1242 JFS_SBI(sb)->log = dummy_log; 1243 LOG_UNLOCK(dummy_log); 1244 up(&jfs_log_sem); 1245 1246 return 0; 1247 } 1248 1249 /* 1250 * NAME: lmLogInit() 1251 * 1252 * FUNCTION: log initialization at first log open. 1253 * 1254 * logredo() (or logformat()) should have been run previously. 1255 * initialize the log from log superblock. 1256 * set the log state in the superblock to LOGMOUNT and 1257 * write SYNCPT log record. 1258 * 1259 * PARAMETER: log - log structure 1260 * 1261 * RETURN: 0 - if ok 1262 * -EINVAL - bad log magic number or superblock dirty 1263 * error returned from logwait() 1264 * 1265 * serialization: single first open thread 1266 */ 1267 int lmLogInit(struct jfs_log * log) 1268 { 1269 int rc = 0; 1270 struct lrd lrd; 1271 struct logsuper *logsuper; 1272 struct lbuf *bpsuper; 1273 struct lbuf *bp; 1274 struct logpage *lp; 1275 int lsn = 0; 1276 1277 jfs_info("lmLogInit: log:0x%p", log); 1278 1279 /* initialize the group commit serialization lock */ 1280 LOGGC_LOCK_INIT(log); 1281 1282 /* allocate/initialize the log write serialization lock */ 1283 LOG_LOCK_INIT(log); 1284 1285 LOGSYNC_LOCK_INIT(log); 1286 1287 INIT_LIST_HEAD(&log->synclist); 1288 1289 INIT_LIST_HEAD(&log->cqueue); 1290 log->flush_tblk = NULL; 1291 1292 log->count = 0; 1293 1294 /* 1295 * initialize log i/o 1296 */ 1297 if ((rc = lbmLogInit(log))) 1298 return rc; 1299 1300 if (!test_bit(log_INLINELOG, &log->flag)) 1301 log->l2bsize = L2LOGPSIZE; 1302 1303 /* check for disabled journaling to disk */ 1304 if (log->no_integrity) { 1305 /* 1306 * Journal pages will still be filled. When the time comes 1307 * to actually do the I/O, the write is not done, and the 1308 * endio routine is called directly. 1309 */ 1310 bp = lbmAllocate(log , 0); 1311 log->bp = bp; 1312 bp->l_pn = bp->l_eor = 0; 1313 } else { 1314 /* 1315 * validate log superblock 1316 */ 1317 if ((rc = lbmRead(log, 1, &bpsuper))) 1318 goto errout10; 1319 1320 logsuper = (struct logsuper *) bpsuper->l_ldata; 1321 1322 if (logsuper->magic != cpu_to_le32(LOGMAGIC)) { 1323 jfs_warn("*** Log Format Error ! ***"); 1324 rc = -EINVAL; 1325 goto errout20; 1326 } 1327 1328 /* logredo() should have been run successfully. */ 1329 if (logsuper->state != cpu_to_le32(LOGREDONE)) { 1330 jfs_warn("*** Log Is Dirty ! ***"); 1331 rc = -EINVAL; 1332 goto errout20; 1333 } 1334 1335 /* initialize log from log superblock */ 1336 if (test_bit(log_INLINELOG,&log->flag)) { 1337 if (log->size != le32_to_cpu(logsuper->size)) { 1338 rc = -EINVAL; 1339 goto errout20; 1340 } 1341 jfs_info("lmLogInit: inline log:0x%p base:0x%Lx " 1342 "size:0x%x", log, 1343 (unsigned long long) log->base, log->size); 1344 } else { 1345 if (memcmp(logsuper->uuid, log->uuid, 16)) { 1346 jfs_warn("wrong uuid on JFS log device"); 1347 goto errout20; 1348 } 1349 log->size = le32_to_cpu(logsuper->size); 1350 log->l2bsize = le32_to_cpu(logsuper->l2bsize); 1351 jfs_info("lmLogInit: external log:0x%p base:0x%Lx " 1352 "size:0x%x", log, 1353 (unsigned long long) log->base, log->size); 1354 } 1355 1356 log->page = le32_to_cpu(logsuper->end) / LOGPSIZE; 1357 log->eor = le32_to_cpu(logsuper->end) - (LOGPSIZE * log->page); 1358 1359 /* 1360 * initialize for log append write mode 1361 */ 1362 /* establish current/end-of-log page/buffer */ 1363 if ((rc = lbmRead(log, log->page, &bp))) 1364 goto errout20; 1365 1366 lp = (struct logpage *) bp->l_ldata; 1367 1368 jfs_info("lmLogInit: lsn:0x%x page:%d eor:%d:%d", 1369 le32_to_cpu(logsuper->end), log->page, log->eor, 1370 le16_to_cpu(lp->h.eor)); 1371 1372 log->bp = bp; 1373 bp->l_pn = log->page; 1374 bp->l_eor = log->eor; 1375 1376 /* if current page is full, move on to next page */ 1377 if (log->eor >= LOGPSIZE - LOGPTLRSIZE) 1378 lmNextPage(log); 1379 1380 /* 1381 * initialize log syncpoint 1382 */ 1383 /* 1384 * write the first SYNCPT record with syncpoint = 0 1385 * (i.e., log redo up to HERE !); 1386 * remove current page from lbm write queue at end of pageout 1387 * (to write log superblock update), but do not release to 1388 * freelist; 1389 */ 1390 lrd.logtid = 0; 1391 lrd.backchain = 0; 1392 lrd.type = cpu_to_le16(LOG_SYNCPT); 1393 lrd.length = 0; 1394 lrd.log.syncpt.sync = 0; 1395 lsn = lmWriteRecord(log, NULL, &lrd, NULL); 1396 bp = log->bp; 1397 bp->l_ceor = bp->l_eor; 1398 lp = (struct logpage *) bp->l_ldata; 1399 lp->h.eor = lp->t.eor = cpu_to_le16(bp->l_eor); 1400 lbmWrite(log, bp, lbmWRITE | lbmSYNC, 0); 1401 if ((rc = lbmIOWait(bp, 0))) 1402 goto errout30; 1403 1404 /* 1405 * update/write superblock 1406 */ 1407 logsuper->state = cpu_to_le32(LOGMOUNT); 1408 log->serial = le32_to_cpu(logsuper->serial) + 1; 1409 logsuper->serial = cpu_to_le32(log->serial); 1410 lbmDirectWrite(log, bpsuper, lbmWRITE | lbmRELEASE | lbmSYNC); 1411 if ((rc = lbmIOWait(bpsuper, lbmFREE))) 1412 goto errout30; 1413 } 1414 1415 /* initialize logsync parameters */ 1416 log->logsize = (log->size - 2) << L2LOGPSIZE; 1417 log->lsn = lsn; 1418 log->syncpt = lsn; 1419 log->sync = log->syncpt; 1420 log->nextsync = LOGSYNC_DELTA(log->logsize); 1421 1422 jfs_info("lmLogInit: lsn:0x%x syncpt:0x%x sync:0x%x", 1423 log->lsn, log->syncpt, log->sync); 1424 1425 /* 1426 * initialize for lazy/group commit 1427 */ 1428 log->clsn = lsn; 1429 1430 return 0; 1431 1432 /* 1433 * unwind on error 1434 */ 1435 errout30: /* release log page */ 1436 log->wqueue = NULL; 1437 bp->l_wqnext = NULL; 1438 lbmFree(bp); 1439 1440 errout20: /* release log superblock */ 1441 lbmFree(bpsuper); 1442 1443 errout10: /* unwind lbmLogInit() */ 1444 lbmLogShutdown(log); 1445 1446 jfs_warn("lmLogInit: exit(%d)", rc); 1447 return rc; 1448 } 1449 1450 1451 /* 1452 * NAME: lmLogClose() 1453 * 1454 * FUNCTION: remove file system <ipmnt> from active list of log <iplog> 1455 * and close it on last close. 1456 * 1457 * PARAMETER: sb - superblock 1458 * 1459 * RETURN: errors from subroutines 1460 * 1461 * serialization: 1462 */ 1463 int lmLogClose(struct super_block *sb) 1464 { 1465 struct jfs_sb_info *sbi = JFS_SBI(sb); 1466 struct jfs_log *log = sbi->log; 1467 struct block_device *bdev; 1468 int rc = 0; 1469 1470 jfs_info("lmLogClose: log:0x%p", log); 1471 1472 down(&jfs_log_sem); 1473 LOG_LOCK(log); 1474 list_del(&sbi->log_list); 1475 LOG_UNLOCK(log); 1476 sbi->log = NULL; 1477 1478 /* 1479 * We need to make sure all of the "written" metapages 1480 * actually make it to disk 1481 */ 1482 sync_blockdev(sb->s_bdev); 1483 1484 if (test_bit(log_INLINELOG, &log->flag)) { 1485 /* 1486 * in-line log in host file system 1487 */ 1488 rc = lmLogShutdown(log); 1489 kfree(log); 1490 goto out; 1491 } 1492 1493 if (!log->no_integrity) 1494 lmLogFileSystem(log, sbi, 0); 1495 1496 if (!list_empty(&log->sb_list)) 1497 goto out; 1498 1499 /* 1500 * TODO: ensure that the dummy_log is in a state to allow 1501 * lbmLogShutdown to deallocate all the buffers and call 1502 * kfree against dummy_log. For now, leave dummy_log & its 1503 * buffers in memory, and resuse if another no-integrity mount 1504 * is requested. 1505 */ 1506 if (log->no_integrity) 1507 goto out; 1508 1509 /* 1510 * external log as separate logical volume 1511 */ 1512 list_del(&log->journal_list); 1513 bdev = log->bdev; 1514 rc = lmLogShutdown(log); 1515 1516 bd_release(bdev); 1517 blkdev_put(bdev); 1518 1519 kfree(log); 1520 1521 out: 1522 up(&jfs_log_sem); 1523 jfs_info("lmLogClose: exit(%d)", rc); 1524 return rc; 1525 } 1526 1527 1528 /* 1529 * NAME: jfs_flush_journal() 1530 * 1531 * FUNCTION: initiate write of any outstanding transactions to the journal 1532 * and optionally wait until they are all written to disk 1533 * 1534 * wait == 0 flush until latest txn is committed, don't wait 1535 * wait == 1 flush until latest txn is committed, wait 1536 * wait > 1 flush until all txn's are complete, wait 1537 */ 1538 void jfs_flush_journal(struct jfs_log *log, int wait) 1539 { 1540 int i; 1541 struct tblock *target = NULL; 1542 struct jfs_sb_info *sbi; 1543 1544 /* jfs_write_inode may call us during read-only mount */ 1545 if (!log) 1546 return; 1547 1548 jfs_info("jfs_flush_journal: log:0x%p wait=%d", log, wait); 1549 1550 LOGGC_LOCK(log); 1551 1552 if (!list_empty(&log->cqueue)) { 1553 /* 1554 * This ensures that we will keep writing to the journal as long 1555 * as there are unwritten commit records 1556 */ 1557 target = list_entry(log->cqueue.prev, struct tblock, cqueue); 1558 1559 if (test_bit(log_FLUSH, &log->flag)) { 1560 /* 1561 * We're already flushing. 1562 * if flush_tblk is NULL, we are flushing everything, 1563 * so leave it that way. Otherwise, update it to the 1564 * latest transaction 1565 */ 1566 if (log->flush_tblk) 1567 log->flush_tblk = target; 1568 } else { 1569 /* Only flush until latest transaction is committed */ 1570 log->flush_tblk = target; 1571 set_bit(log_FLUSH, &log->flag); 1572 1573 /* 1574 * Initiate I/O on outstanding transactions 1575 */ 1576 if (!(log->cflag & logGC_PAGEOUT)) { 1577 log->cflag |= logGC_PAGEOUT; 1578 lmGCwrite(log, 0); 1579 } 1580 } 1581 } 1582 if ((wait > 1) || test_bit(log_SYNCBARRIER, &log->flag)) { 1583 /* Flush until all activity complete */ 1584 set_bit(log_FLUSH, &log->flag); 1585 log->flush_tblk = NULL; 1586 } 1587 1588 if (wait && target && !(target->flag & tblkGC_COMMITTED)) { 1589 DECLARE_WAITQUEUE(__wait, current); 1590 1591 add_wait_queue(&target->gcwait, &__wait); 1592 set_current_state(TASK_UNINTERRUPTIBLE); 1593 LOGGC_UNLOCK(log); 1594 schedule(); 1595 current->state = TASK_RUNNING; 1596 LOGGC_LOCK(log); 1597 remove_wait_queue(&target->gcwait, &__wait); 1598 } 1599 LOGGC_UNLOCK(log); 1600 1601 if (wait < 2) 1602 return; 1603 1604 list_for_each_entry(sbi, &log->sb_list, log_list) { 1605 filemap_fdatawrite(sbi->ipbmap->i_mapping); 1606 filemap_fdatawrite(sbi->ipimap->i_mapping); 1607 filemap_fdatawrite(sbi->direct_inode->i_mapping); 1608 } 1609 1610 /* 1611 * If there was recent activity, we may need to wait 1612 * for the lazycommit thread to catch up 1613 */ 1614 if ((!list_empty(&log->cqueue)) || !list_empty(&log->synclist)) { 1615 for (i = 0; i < 200; i++) { /* Too much? */ 1616 msleep(250); 1617 if (list_empty(&log->cqueue) && 1618 list_empty(&log->synclist)) 1619 break; 1620 } 1621 } 1622 assert(list_empty(&log->cqueue)); 1623 1624 #ifdef CONFIG_JFS_DEBUG 1625 if (!list_empty(&log->synclist)) { 1626 struct logsyncblk *lp; 1627 1628 list_for_each_entry(lp, &log->synclist, synclist) { 1629 if (lp->xflag & COMMIT_PAGE) { 1630 struct metapage *mp = (struct metapage *)lp; 1631 dump_mem("orphan metapage", lp, 1632 sizeof(struct metapage)); 1633 dump_mem("page", mp->page, sizeof(struct page)); 1634 } 1635 else 1636 dump_mem("orphan tblock", lp, 1637 sizeof(struct tblock)); 1638 } 1639 } 1640 #endif 1641 //assert(list_empty(&log->synclist)); 1642 clear_bit(log_FLUSH, &log->flag); 1643 } 1644 1645 /* 1646 * NAME: lmLogShutdown() 1647 * 1648 * FUNCTION: log shutdown at last LogClose(). 1649 * 1650 * write log syncpt record. 1651 * update super block to set redone flag to 0. 1652 * 1653 * PARAMETER: log - log inode 1654 * 1655 * RETURN: 0 - success 1656 * 1657 * serialization: single last close thread 1658 */ 1659 int lmLogShutdown(struct jfs_log * log) 1660 { 1661 int rc; 1662 struct lrd lrd; 1663 int lsn; 1664 struct logsuper *logsuper; 1665 struct lbuf *bpsuper; 1666 struct lbuf *bp; 1667 struct logpage *lp; 1668 1669 jfs_info("lmLogShutdown: log:0x%p", log); 1670 1671 jfs_flush_journal(log, 2); 1672 1673 /* 1674 * write the last SYNCPT record with syncpoint = 0 1675 * (i.e., log redo up to HERE !) 1676 */ 1677 lrd.logtid = 0; 1678 lrd.backchain = 0; 1679 lrd.type = cpu_to_le16(LOG_SYNCPT); 1680 lrd.length = 0; 1681 lrd.log.syncpt.sync = 0; 1682 1683 lsn = lmWriteRecord(log, NULL, &lrd, NULL); 1684 bp = log->bp; 1685 lp = (struct logpage *) bp->l_ldata; 1686 lp->h.eor = lp->t.eor = cpu_to_le16(bp->l_eor); 1687 lbmWrite(log, log->bp, lbmWRITE | lbmRELEASE | lbmSYNC, 0); 1688 lbmIOWait(log->bp, lbmFREE); 1689 log->bp = NULL; 1690 1691 /* 1692 * synchronous update log superblock 1693 * mark log state as shutdown cleanly 1694 * (i.e., Log does not need to be replayed). 1695 */ 1696 if ((rc = lbmRead(log, 1, &bpsuper))) 1697 goto out; 1698 1699 logsuper = (struct logsuper *) bpsuper->l_ldata; 1700 logsuper->state = cpu_to_le32(LOGREDONE); 1701 logsuper->end = cpu_to_le32(lsn); 1702 lbmDirectWrite(log, bpsuper, lbmWRITE | lbmRELEASE | lbmSYNC); 1703 rc = lbmIOWait(bpsuper, lbmFREE); 1704 1705 jfs_info("lmLogShutdown: lsn:0x%x page:%d eor:%d", 1706 lsn, log->page, log->eor); 1707 1708 out: 1709 /* 1710 * shutdown per log i/o 1711 */ 1712 lbmLogShutdown(log); 1713 1714 if (rc) { 1715 jfs_warn("lmLogShutdown: exit(%d)", rc); 1716 } 1717 return rc; 1718 } 1719 1720 1721 /* 1722 * NAME: lmLogFileSystem() 1723 * 1724 * FUNCTION: insert (<activate> = true)/remove (<activate> = false) 1725 * file system into/from log active file system list. 1726 * 1727 * PARAMETE: log - pointer to logs inode. 1728 * fsdev - kdev_t of filesystem. 1729 * serial - pointer to returned log serial number 1730 * activate - insert/remove device from active list. 1731 * 1732 * RETURN: 0 - success 1733 * errors returned by vms_iowait(). 1734 */ 1735 static int lmLogFileSystem(struct jfs_log * log, struct jfs_sb_info *sbi, 1736 int activate) 1737 { 1738 int rc = 0; 1739 int i; 1740 struct logsuper *logsuper; 1741 struct lbuf *bpsuper; 1742 char *uuid = sbi->uuid; 1743 1744 /* 1745 * insert/remove file system device to log active file system list. 1746 */ 1747 if ((rc = lbmRead(log, 1, &bpsuper))) 1748 return rc; 1749 1750 logsuper = (struct logsuper *) bpsuper->l_ldata; 1751 if (activate) { 1752 for (i = 0; i < MAX_ACTIVE; i++) 1753 if (!memcmp(logsuper->active[i].uuid, NULL_UUID, 16)) { 1754 memcpy(logsuper->active[i].uuid, uuid, 16); 1755 sbi->aggregate = i; 1756 break; 1757 } 1758 if (i == MAX_ACTIVE) { 1759 jfs_warn("Too many file systems sharing journal!"); 1760 lbmFree(bpsuper); 1761 return -EMFILE; /* Is there a better rc? */ 1762 } 1763 } else { 1764 for (i = 0; i < MAX_ACTIVE; i++) 1765 if (!memcmp(logsuper->active[i].uuid, uuid, 16)) { 1766 memcpy(logsuper->active[i].uuid, NULL_UUID, 16); 1767 break; 1768 } 1769 if (i == MAX_ACTIVE) { 1770 jfs_warn("Somebody stomped on the journal!"); 1771 lbmFree(bpsuper); 1772 return -EIO; 1773 } 1774 1775 } 1776 1777 /* 1778 * synchronous write log superblock: 1779 * 1780 * write sidestream bypassing write queue: 1781 * at file system mount, log super block is updated for 1782 * activation of the file system before any log record 1783 * (MOUNT record) of the file system, and at file system 1784 * unmount, all meta data for the file system has been 1785 * flushed before log super block is updated for deactivation 1786 * of the file system. 1787 */ 1788 lbmDirectWrite(log, bpsuper, lbmWRITE | lbmRELEASE | lbmSYNC); 1789 rc = lbmIOWait(bpsuper, lbmFREE); 1790 1791 return rc; 1792 } 1793 1794 /* 1795 * log buffer manager (lbm) 1796 * ------------------------ 1797 * 1798 * special purpose buffer manager supporting log i/o requirements. 1799 * 1800 * per log write queue: 1801 * log pageout occurs in serial order by fifo write queue and 1802 * restricting to a single i/o in pregress at any one time. 1803 * a circular singly-linked list 1804 * (log->wrqueue points to the tail, and buffers are linked via 1805 * bp->wrqueue field), and 1806 * maintains log page in pageout ot waiting for pageout in serial pageout. 1807 */ 1808 1809 /* 1810 * lbmLogInit() 1811 * 1812 * initialize per log I/O setup at lmLogInit() 1813 */ 1814 static int lbmLogInit(struct jfs_log * log) 1815 { /* log inode */ 1816 int i; 1817 struct lbuf *lbuf; 1818 1819 jfs_info("lbmLogInit: log:0x%p", log); 1820 1821 /* initialize current buffer cursor */ 1822 log->bp = NULL; 1823 1824 /* initialize log device write queue */ 1825 log->wqueue = NULL; 1826 1827 /* 1828 * Each log has its own buffer pages allocated to it. These are 1829 * not managed by the page cache. This ensures that a transaction 1830 * writing to the log does not block trying to allocate a page from 1831 * the page cache (for the log). This would be bad, since page 1832 * allocation waits on the kswapd thread that may be committing inodes 1833 * which would cause log activity. Was that clear? I'm trying to 1834 * avoid deadlock here. 1835 */ 1836 init_waitqueue_head(&log->free_wait); 1837 1838 log->lbuf_free = NULL; 1839 1840 for (i = 0; i < LOGPAGES;) { 1841 char *buffer; 1842 uint offset; 1843 struct page *page; 1844 1845 buffer = (char *) get_zeroed_page(GFP_KERNEL); 1846 if (buffer == NULL) 1847 goto error; 1848 page = virt_to_page(buffer); 1849 for (offset = 0; offset < PAGE_SIZE; offset += LOGPSIZE) { 1850 lbuf = kmalloc(sizeof(struct lbuf), GFP_KERNEL); 1851 if (lbuf == NULL) { 1852 if (offset == 0) 1853 free_page((unsigned long) buffer); 1854 goto error; 1855 } 1856 if (offset) /* we already have one reference */ 1857 get_page(page); 1858 lbuf->l_offset = offset; 1859 lbuf->l_ldata = buffer + offset; 1860 lbuf->l_page = page; 1861 lbuf->l_log = log; 1862 init_waitqueue_head(&lbuf->l_ioevent); 1863 1864 lbuf->l_freelist = log->lbuf_free; 1865 log->lbuf_free = lbuf; 1866 i++; 1867 } 1868 } 1869 1870 return (0); 1871 1872 error: 1873 lbmLogShutdown(log); 1874 return -ENOMEM; 1875 } 1876 1877 1878 /* 1879 * lbmLogShutdown() 1880 * 1881 * finalize per log I/O setup at lmLogShutdown() 1882 */ 1883 static void lbmLogShutdown(struct jfs_log * log) 1884 { 1885 struct lbuf *lbuf; 1886 1887 jfs_info("lbmLogShutdown: log:0x%p", log); 1888 1889 lbuf = log->lbuf_free; 1890 while (lbuf) { 1891 struct lbuf *next = lbuf->l_freelist; 1892 __free_page(lbuf->l_page); 1893 kfree(lbuf); 1894 lbuf = next; 1895 } 1896 } 1897 1898 1899 /* 1900 * lbmAllocate() 1901 * 1902 * allocate an empty log buffer 1903 */ 1904 static struct lbuf *lbmAllocate(struct jfs_log * log, int pn) 1905 { 1906 struct lbuf *bp; 1907 unsigned long flags; 1908 1909 /* 1910 * recycle from log buffer freelist if any 1911 */ 1912 LCACHE_LOCK(flags); 1913 LCACHE_SLEEP_COND(log->free_wait, (bp = log->lbuf_free), flags); 1914 log->lbuf_free = bp->l_freelist; 1915 LCACHE_UNLOCK(flags); 1916 1917 bp->l_flag = 0; 1918 1919 bp->l_wqnext = NULL; 1920 bp->l_freelist = NULL; 1921 1922 bp->l_pn = pn; 1923 bp->l_blkno = log->base + (pn << (L2LOGPSIZE - log->l2bsize)); 1924 bp->l_ceor = 0; 1925 1926 return bp; 1927 } 1928 1929 1930 /* 1931 * lbmFree() 1932 * 1933 * release a log buffer to freelist 1934 */ 1935 static void lbmFree(struct lbuf * bp) 1936 { 1937 unsigned long flags; 1938 1939 LCACHE_LOCK(flags); 1940 1941 lbmfree(bp); 1942 1943 LCACHE_UNLOCK(flags); 1944 } 1945 1946 static void lbmfree(struct lbuf * bp) 1947 { 1948 struct jfs_log *log = bp->l_log; 1949 1950 assert(bp->l_wqnext == NULL); 1951 1952 /* 1953 * return the buffer to head of freelist 1954 */ 1955 bp->l_freelist = log->lbuf_free; 1956 log->lbuf_free = bp; 1957 1958 wake_up(&log->free_wait); 1959 return; 1960 } 1961 1962 1963 /* 1964 * NAME: lbmRedrive 1965 * 1966 * FUNCTION: add a log buffer to the the log redrive list 1967 * 1968 * PARAMETER: 1969 * bp - log buffer 1970 * 1971 * NOTES: 1972 * Takes log_redrive_lock. 1973 */ 1974 static inline void lbmRedrive(struct lbuf *bp) 1975 { 1976 unsigned long flags; 1977 1978 spin_lock_irqsave(&log_redrive_lock, flags); 1979 bp->l_redrive_next = log_redrive_list; 1980 log_redrive_list = bp; 1981 spin_unlock_irqrestore(&log_redrive_lock, flags); 1982 1983 wake_up(&jfs_IO_thread_wait); 1984 } 1985 1986 1987 /* 1988 * lbmRead() 1989 */ 1990 static int lbmRead(struct jfs_log * log, int pn, struct lbuf ** bpp) 1991 { 1992 struct bio *bio; 1993 struct lbuf *bp; 1994 1995 /* 1996 * allocate a log buffer 1997 */ 1998 *bpp = bp = lbmAllocate(log, pn); 1999 jfs_info("lbmRead: bp:0x%p pn:0x%x", bp, pn); 2000 2001 bp->l_flag |= lbmREAD; 2002 2003 bio = bio_alloc(GFP_NOFS, 1); 2004 2005 bio->bi_sector = bp->l_blkno << (log->l2bsize - 9); 2006 bio->bi_bdev = log->bdev; 2007 bio->bi_io_vec[0].bv_page = bp->l_page; 2008 bio->bi_io_vec[0].bv_len = LOGPSIZE; 2009 bio->bi_io_vec[0].bv_offset = bp->l_offset; 2010 2011 bio->bi_vcnt = 1; 2012 bio->bi_idx = 0; 2013 bio->bi_size = LOGPSIZE; 2014 2015 bio->bi_end_io = lbmIODone; 2016 bio->bi_private = bp; 2017 submit_bio(READ_SYNC, bio); 2018 2019 wait_event(bp->l_ioevent, (bp->l_flag != lbmREAD)); 2020 2021 return 0; 2022 } 2023 2024 2025 /* 2026 * lbmWrite() 2027 * 2028 * buffer at head of pageout queue stays after completion of 2029 * partial-page pageout and redriven by explicit initiation of 2030 * pageout by caller until full-page pageout is completed and 2031 * released. 2032 * 2033 * device driver i/o done redrives pageout of new buffer at 2034 * head of pageout queue when current buffer at head of pageout 2035 * queue is released at the completion of its full-page pageout. 2036 * 2037 * LOGGC_LOCK() serializes lbmWrite() by lmNextPage() and lmGroupCommit(). 2038 * LCACHE_LOCK() serializes xflag between lbmWrite() and lbmIODone() 2039 */ 2040 static void lbmWrite(struct jfs_log * log, struct lbuf * bp, int flag, 2041 int cant_block) 2042 { 2043 struct lbuf *tail; 2044 unsigned long flags; 2045 2046 jfs_info("lbmWrite: bp:0x%p flag:0x%x pn:0x%x", bp, flag, bp->l_pn); 2047 2048 /* map the logical block address to physical block address */ 2049 bp->l_blkno = 2050 log->base + (bp->l_pn << (L2LOGPSIZE - log->l2bsize)); 2051 2052 LCACHE_LOCK(flags); /* disable+lock */ 2053 2054 /* 2055 * initialize buffer for device driver 2056 */ 2057 bp->l_flag = flag; 2058 2059 /* 2060 * insert bp at tail of write queue associated with log 2061 * 2062 * (request is either for bp already/currently at head of queue 2063 * or new bp to be inserted at tail) 2064 */ 2065 tail = log->wqueue; 2066 2067 /* is buffer not already on write queue ? */ 2068 if (bp->l_wqnext == NULL) { 2069 /* insert at tail of wqueue */ 2070 if (tail == NULL) { 2071 log->wqueue = bp; 2072 bp->l_wqnext = bp; 2073 } else { 2074 log->wqueue = bp; 2075 bp->l_wqnext = tail->l_wqnext; 2076 tail->l_wqnext = bp; 2077 } 2078 2079 tail = bp; 2080 } 2081 2082 /* is buffer at head of wqueue and for write ? */ 2083 if ((bp != tail->l_wqnext) || !(flag & lbmWRITE)) { 2084 LCACHE_UNLOCK(flags); /* unlock+enable */ 2085 return; 2086 } 2087 2088 LCACHE_UNLOCK(flags); /* unlock+enable */ 2089 2090 if (cant_block) 2091 lbmRedrive(bp); 2092 else if (flag & lbmSYNC) 2093 lbmStartIO(bp); 2094 else { 2095 LOGGC_UNLOCK(log); 2096 lbmStartIO(bp); 2097 LOGGC_LOCK(log); 2098 } 2099 } 2100 2101 2102 /* 2103 * lbmDirectWrite() 2104 * 2105 * initiate pageout bypassing write queue for sidestream 2106 * (e.g., log superblock) write; 2107 */ 2108 static void lbmDirectWrite(struct jfs_log * log, struct lbuf * bp, int flag) 2109 { 2110 jfs_info("lbmDirectWrite: bp:0x%p flag:0x%x pn:0x%x", 2111 bp, flag, bp->l_pn); 2112 2113 /* 2114 * initialize buffer for device driver 2115 */ 2116 bp->l_flag = flag | lbmDIRECT; 2117 2118 /* map the logical block address to physical block address */ 2119 bp->l_blkno = 2120 log->base + (bp->l_pn << (L2LOGPSIZE - log->l2bsize)); 2121 2122 /* 2123 * initiate pageout of the page 2124 */ 2125 lbmStartIO(bp); 2126 } 2127 2128 2129 /* 2130 * NAME: lbmStartIO() 2131 * 2132 * FUNCTION: Interface to DD strategy routine 2133 * 2134 * RETURN: none 2135 * 2136 * serialization: LCACHE_LOCK() is NOT held during log i/o; 2137 */ 2138 static void lbmStartIO(struct lbuf * bp) 2139 { 2140 struct bio *bio; 2141 struct jfs_log *log = bp->l_log; 2142 2143 jfs_info("lbmStartIO\n"); 2144 2145 bio = bio_alloc(GFP_NOFS, 1); 2146 bio->bi_sector = bp->l_blkno << (log->l2bsize - 9); 2147 bio->bi_bdev = log->bdev; 2148 bio->bi_io_vec[0].bv_page = bp->l_page; 2149 bio->bi_io_vec[0].bv_len = LOGPSIZE; 2150 bio->bi_io_vec[0].bv_offset = bp->l_offset; 2151 2152 bio->bi_vcnt = 1; 2153 bio->bi_idx = 0; 2154 bio->bi_size = LOGPSIZE; 2155 2156 bio->bi_end_io = lbmIODone; 2157 bio->bi_private = bp; 2158 2159 /* check if journaling to disk has been disabled */ 2160 if (log->no_integrity) { 2161 bio->bi_size = 0; 2162 lbmIODone(bio, 0, 0); 2163 } else { 2164 submit_bio(WRITE_SYNC, bio); 2165 INCREMENT(lmStat.submitted); 2166 } 2167 } 2168 2169 2170 /* 2171 * lbmIOWait() 2172 */ 2173 static int lbmIOWait(struct lbuf * bp, int flag) 2174 { 2175 unsigned long flags; 2176 int rc = 0; 2177 2178 jfs_info("lbmIOWait1: bp:0x%p flag:0x%x:0x%x", bp, bp->l_flag, flag); 2179 2180 LCACHE_LOCK(flags); /* disable+lock */ 2181 2182 LCACHE_SLEEP_COND(bp->l_ioevent, (bp->l_flag & lbmDONE), flags); 2183 2184 rc = (bp->l_flag & lbmERROR) ? -EIO : 0; 2185 2186 if (flag & lbmFREE) 2187 lbmfree(bp); 2188 2189 LCACHE_UNLOCK(flags); /* unlock+enable */ 2190 2191 jfs_info("lbmIOWait2: bp:0x%p flag:0x%x:0x%x", bp, bp->l_flag, flag); 2192 return rc; 2193 } 2194 2195 /* 2196 * lbmIODone() 2197 * 2198 * executed at INTIODONE level 2199 */ 2200 static int lbmIODone(struct bio *bio, unsigned int bytes_done, int error) 2201 { 2202 struct lbuf *bp = bio->bi_private; 2203 struct lbuf *nextbp, *tail; 2204 struct jfs_log *log; 2205 unsigned long flags; 2206 2207 if (bio->bi_size) 2208 return 1; 2209 2210 /* 2211 * get back jfs buffer bound to the i/o buffer 2212 */ 2213 jfs_info("lbmIODone: bp:0x%p flag:0x%x", bp, bp->l_flag); 2214 2215 LCACHE_LOCK(flags); /* disable+lock */ 2216 2217 bp->l_flag |= lbmDONE; 2218 2219 if (!test_bit(BIO_UPTODATE, &bio->bi_flags)) { 2220 bp->l_flag |= lbmERROR; 2221 2222 jfs_err("lbmIODone: I/O error in JFS log"); 2223 } 2224 2225 bio_put(bio); 2226 2227 /* 2228 * pagein completion 2229 */ 2230 if (bp->l_flag & lbmREAD) { 2231 bp->l_flag &= ~lbmREAD; 2232 2233 LCACHE_UNLOCK(flags); /* unlock+enable */ 2234 2235 /* wakeup I/O initiator */ 2236 LCACHE_WAKEUP(&bp->l_ioevent); 2237 2238 return 0; 2239 } 2240 2241 /* 2242 * pageout completion 2243 * 2244 * the bp at the head of write queue has completed pageout. 2245 * 2246 * if single-commit/full-page pageout, remove the current buffer 2247 * from head of pageout queue, and redrive pageout with 2248 * the new buffer at head of pageout queue; 2249 * otherwise, the partial-page pageout buffer stays at 2250 * the head of pageout queue to be redriven for pageout 2251 * by lmGroupCommit() until full-page pageout is completed. 2252 */ 2253 bp->l_flag &= ~lbmWRITE; 2254 INCREMENT(lmStat.pagedone); 2255 2256 /* update committed lsn */ 2257 log = bp->l_log; 2258 log->clsn = (bp->l_pn << L2LOGPSIZE) + bp->l_ceor; 2259 2260 if (bp->l_flag & lbmDIRECT) { 2261 LCACHE_WAKEUP(&bp->l_ioevent); 2262 LCACHE_UNLOCK(flags); 2263 return 0; 2264 } 2265 2266 tail = log->wqueue; 2267 2268 /* single element queue */ 2269 if (bp == tail) { 2270 /* remove head buffer of full-page pageout 2271 * from log device write queue 2272 */ 2273 if (bp->l_flag & lbmRELEASE) { 2274 log->wqueue = NULL; 2275 bp->l_wqnext = NULL; 2276 } 2277 } 2278 /* multi element queue */ 2279 else { 2280 /* remove head buffer of full-page pageout 2281 * from log device write queue 2282 */ 2283 if (bp->l_flag & lbmRELEASE) { 2284 nextbp = tail->l_wqnext = bp->l_wqnext; 2285 bp->l_wqnext = NULL; 2286 2287 /* 2288 * redrive pageout of next page at head of write queue: 2289 * redrive next page without any bound tblk 2290 * (i.e., page w/o any COMMIT records), or 2291 * first page of new group commit which has been 2292 * queued after current page (subsequent pageout 2293 * is performed synchronously, except page without 2294 * any COMMITs) by lmGroupCommit() as indicated 2295 * by lbmWRITE flag; 2296 */ 2297 if (nextbp->l_flag & lbmWRITE) { 2298 /* 2299 * We can't do the I/O at interrupt time. 2300 * The jfsIO thread can do it 2301 */ 2302 lbmRedrive(nextbp); 2303 } 2304 } 2305 } 2306 2307 /* 2308 * synchronous pageout: 2309 * 2310 * buffer has not necessarily been removed from write queue 2311 * (e.g., synchronous write of partial-page with COMMIT): 2312 * leave buffer for i/o initiator to dispose 2313 */ 2314 if (bp->l_flag & lbmSYNC) { 2315 LCACHE_UNLOCK(flags); /* unlock+enable */ 2316 2317 /* wakeup I/O initiator */ 2318 LCACHE_WAKEUP(&bp->l_ioevent); 2319 } 2320 2321 /* 2322 * Group Commit pageout: 2323 */ 2324 else if (bp->l_flag & lbmGC) { 2325 LCACHE_UNLOCK(flags); 2326 lmPostGC(bp); 2327 } 2328 2329 /* 2330 * asynchronous pageout: 2331 * 2332 * buffer must have been removed from write queue: 2333 * insert buffer at head of freelist where it can be recycled 2334 */ 2335 else { 2336 assert(bp->l_flag & lbmRELEASE); 2337 assert(bp->l_flag & lbmFREE); 2338 lbmfree(bp); 2339 2340 LCACHE_UNLOCK(flags); /* unlock+enable */ 2341 } 2342 2343 return 0; 2344 } 2345 2346 int jfsIOWait(void *arg) 2347 { 2348 struct lbuf *bp; 2349 2350 daemonize("jfsIO"); 2351 2352 complete(&jfsIOwait); 2353 2354 do { 2355 DECLARE_WAITQUEUE(wq, current); 2356 2357 spin_lock_irq(&log_redrive_lock); 2358 while ((bp = log_redrive_list) != 0) { 2359 log_redrive_list = bp->l_redrive_next; 2360 bp->l_redrive_next = NULL; 2361 spin_unlock_irq(&log_redrive_lock); 2362 lbmStartIO(bp); 2363 spin_lock_irq(&log_redrive_lock); 2364 } 2365 if (freezing(current)) { 2366 spin_unlock_irq(&log_redrive_lock); 2367 refrigerator(); 2368 } else { 2369 add_wait_queue(&jfs_IO_thread_wait, &wq); 2370 set_current_state(TASK_INTERRUPTIBLE); 2371 spin_unlock_irq(&log_redrive_lock); 2372 schedule(); 2373 current->state = TASK_RUNNING; 2374 remove_wait_queue(&jfs_IO_thread_wait, &wq); 2375 } 2376 } while (!jfs_stop_threads); 2377 2378 jfs_info("jfsIOWait being killed!"); 2379 complete_and_exit(&jfsIOwait, 0); 2380 } 2381 2382 /* 2383 * NAME: lmLogFormat()/jfs_logform() 2384 * 2385 * FUNCTION: format file system log 2386 * 2387 * PARAMETERS: 2388 * log - volume log 2389 * logAddress - start address of log space in FS block 2390 * logSize - length of log space in FS block; 2391 * 2392 * RETURN: 0 - success 2393 * -EIO - i/o error 2394 * 2395 * XXX: We're synchronously writing one page at a time. This needs to 2396 * be improved by writing multiple pages at once. 2397 */ 2398 int lmLogFormat(struct jfs_log *log, s64 logAddress, int logSize) 2399 { 2400 int rc = -EIO; 2401 struct jfs_sb_info *sbi; 2402 struct logsuper *logsuper; 2403 struct logpage *lp; 2404 int lspn; /* log sequence page number */ 2405 struct lrd *lrd_ptr; 2406 int npages = 0; 2407 struct lbuf *bp; 2408 2409 jfs_info("lmLogFormat: logAddress:%Ld logSize:%d", 2410 (long long)logAddress, logSize); 2411 2412 sbi = list_entry(log->sb_list.next, struct jfs_sb_info, log_list); 2413 2414 /* allocate a log buffer */ 2415 bp = lbmAllocate(log, 1); 2416 2417 npages = logSize >> sbi->l2nbperpage; 2418 2419 /* 2420 * log space: 2421 * 2422 * page 0 - reserved; 2423 * page 1 - log superblock; 2424 * page 2 - log data page: A SYNC log record is written 2425 * into this page at logform time; 2426 * pages 3-N - log data page: set to empty log data pages; 2427 */ 2428 /* 2429 * init log superblock: log page 1 2430 */ 2431 logsuper = (struct logsuper *) bp->l_ldata; 2432 2433 logsuper->magic = cpu_to_le32(LOGMAGIC); 2434 logsuper->version = cpu_to_le32(LOGVERSION); 2435 logsuper->state = cpu_to_le32(LOGREDONE); 2436 logsuper->flag = cpu_to_le32(sbi->mntflag); /* ? */ 2437 logsuper->size = cpu_to_le32(npages); 2438 logsuper->bsize = cpu_to_le32(sbi->bsize); 2439 logsuper->l2bsize = cpu_to_le32(sbi->l2bsize); 2440 logsuper->end = cpu_to_le32(2 * LOGPSIZE + LOGPHDRSIZE + LOGRDSIZE); 2441 2442 bp->l_flag = lbmWRITE | lbmSYNC | lbmDIRECT; 2443 bp->l_blkno = logAddress + sbi->nbperpage; 2444 lbmStartIO(bp); 2445 if ((rc = lbmIOWait(bp, 0))) 2446 goto exit; 2447 2448 /* 2449 * init pages 2 to npages-1 as log data pages: 2450 * 2451 * log page sequence number (lpsn) initialization: 2452 * 2453 * pn: 0 1 2 3 n-1 2454 * +-----+-----+=====+=====+===.....===+=====+ 2455 * lspn: N-1 0 1 N-2 2456 * <--- N page circular file ----> 2457 * 2458 * the N (= npages-2) data pages of the log is maintained as 2459 * a circular file for the log records; 2460 * lpsn grows by 1 monotonically as each log page is written 2461 * to the circular file of the log; 2462 * and setLogpage() will not reset the page number even if 2463 * the eor is equal to LOGPHDRSIZE. In order for binary search 2464 * still work in find log end process, we have to simulate the 2465 * log wrap situation at the log format time. 2466 * The 1st log page written will have the highest lpsn. Then 2467 * the succeeding log pages will have ascending order of 2468 * the lspn starting from 0, ... (N-2) 2469 */ 2470 lp = (struct logpage *) bp->l_ldata; 2471 /* 2472 * initialize 1st log page to be written: lpsn = N - 1, 2473 * write a SYNCPT log record is written to this page 2474 */ 2475 lp->h.page = lp->t.page = cpu_to_le32(npages - 3); 2476 lp->h.eor = lp->t.eor = cpu_to_le16(LOGPHDRSIZE + LOGRDSIZE); 2477 2478 lrd_ptr = (struct lrd *) &lp->data; 2479 lrd_ptr->logtid = 0; 2480 lrd_ptr->backchain = 0; 2481 lrd_ptr->type = cpu_to_le16(LOG_SYNCPT); 2482 lrd_ptr->length = 0; 2483 lrd_ptr->log.syncpt.sync = 0; 2484 2485 bp->l_blkno += sbi->nbperpage; 2486 bp->l_flag = lbmWRITE | lbmSYNC | lbmDIRECT; 2487 lbmStartIO(bp); 2488 if ((rc = lbmIOWait(bp, 0))) 2489 goto exit; 2490 2491 /* 2492 * initialize succeeding log pages: lpsn = 0, 1, ..., (N-2) 2493 */ 2494 for (lspn = 0; lspn < npages - 3; lspn++) { 2495 lp->h.page = lp->t.page = cpu_to_le32(lspn); 2496 lp->h.eor = lp->t.eor = cpu_to_le16(LOGPHDRSIZE); 2497 2498 bp->l_blkno += sbi->nbperpage; 2499 bp->l_flag = lbmWRITE | lbmSYNC | lbmDIRECT; 2500 lbmStartIO(bp); 2501 if ((rc = lbmIOWait(bp, 0))) 2502 goto exit; 2503 } 2504 2505 rc = 0; 2506 exit: 2507 /* 2508 * finalize log 2509 */ 2510 /* release the buffer */ 2511 lbmFree(bp); 2512 2513 return rc; 2514 } 2515 2516 #ifdef CONFIG_JFS_STATISTICS 2517 int jfs_lmstats_read(char *buffer, char **start, off_t offset, int length, 2518 int *eof, void *data) 2519 { 2520 int len = 0; 2521 off_t begin; 2522 2523 len += sprintf(buffer, 2524 "JFS Logmgr stats\n" 2525 "================\n" 2526 "commits = %d\n" 2527 "writes submitted = %d\n" 2528 "writes completed = %d\n" 2529 "full pages submitted = %d\n" 2530 "partial pages submitted = %d\n", 2531 lmStat.commit, 2532 lmStat.submitted, 2533 lmStat.pagedone, 2534 lmStat.full_page, 2535 lmStat.partial_page); 2536 2537 begin = offset; 2538 *start = buffer + begin; 2539 len -= begin; 2540 2541 if (len > length) 2542 len = length; 2543 else 2544 *eof = 1; 2545 2546 if (len < 0) 2547 len = 0; 2548 2549 return len; 2550 } 2551 #endif /* CONFIG_JFS_STATISTICS */ 2552