1 /* 2 * Copyright (C) International Business Machines Corp., 2000-2004 3 * Portions Copyright (C) Christoph Hellwig, 2001-2002 4 * 5 * This program is free software; you can redistribute it and/or modify 6 * it under the terms of the GNU General Public License as published by 7 * the Free Software Foundation; either version 2 of the License, or 8 * (at your option) any later version. 9 * 10 * This program is distributed in the hope that it will be useful, 11 * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See 13 * the GNU General Public License for more details. 14 * 15 * You should have received a copy of the GNU General Public License 16 * along with this program; if not, write to the Free Software 17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 18 */ 19 20 /* 21 * jfs_logmgr.c: log manager 22 * 23 * for related information, see transaction manager (jfs_txnmgr.c), and 24 * recovery manager (jfs_logredo.c). 25 * 26 * note: for detail, RTFS. 27 * 28 * log buffer manager: 29 * special purpose buffer manager supporting log i/o requirements. 30 * per log serial pageout of logpage 31 * queuing i/o requests and redrive i/o at iodone 32 * maintain current logpage buffer 33 * no caching since append only 34 * appropriate jfs buffer cache buffers as needed 35 * 36 * group commit: 37 * transactions which wrote COMMIT records in the same in-memory 38 * log page during the pageout of previous/current log page(s) are 39 * committed together by the pageout of the page. 40 * 41 * TBD lazy commit: 42 * transactions are committed asynchronously when the log page 43 * containing it COMMIT is paged out when it becomes full; 44 * 45 * serialization: 46 * . a per log lock serialize log write. 47 * . a per log lock serialize group commit. 48 * . a per log lock serialize log open/close; 49 * 50 * TBD log integrity: 51 * careful-write (ping-pong) of last logpage to recover from crash 52 * in overwrite. 53 * detection of split (out-of-order) write of physical sectors 54 * of last logpage via timestamp at end of each sector 55 * with its mirror data array at trailer). 56 * 57 * alternatives: 58 * lsn - 64-bit monotonically increasing integer vs 59 * 32-bit lspn and page eor. 60 */ 61 62 #include <linux/fs.h> 63 #include <linux/blkdev.h> 64 #include <linux/interrupt.h> 65 #include <linux/completion.h> 66 #include <linux/kthread.h> 67 #include <linux/buffer_head.h> /* for sync_blockdev() */ 68 #include <linux/bio.h> 69 #include <linux/freezer.h> 70 #include <linux/delay.h> 71 #include <linux/mutex.h> 72 #include "jfs_incore.h" 73 #include "jfs_filsys.h" 74 #include "jfs_metapage.h" 75 #include "jfs_superblock.h" 76 #include "jfs_txnmgr.h" 77 #include "jfs_debug.h" 78 79 80 /* 81 * lbuf's ready to be redriven. Protected by log_redrive_lock (jfsIO thread) 82 */ 83 static struct lbuf *log_redrive_list; 84 static DEFINE_SPINLOCK(log_redrive_lock); 85 86 87 /* 88 * log read/write serialization (per log) 89 */ 90 #define LOG_LOCK_INIT(log) mutex_init(&(log)->loglock) 91 #define LOG_LOCK(log) mutex_lock(&((log)->loglock)) 92 #define LOG_UNLOCK(log) mutex_unlock(&((log)->loglock)) 93 94 95 /* 96 * log group commit serialization (per log) 97 */ 98 99 #define LOGGC_LOCK_INIT(log) spin_lock_init(&(log)->gclock) 100 #define LOGGC_LOCK(log) spin_lock_irq(&(log)->gclock) 101 #define LOGGC_UNLOCK(log) spin_unlock_irq(&(log)->gclock) 102 #define LOGGC_WAKEUP(tblk) wake_up_all(&(tblk)->gcwait) 103 104 /* 105 * log sync serialization (per log) 106 */ 107 #define LOGSYNC_DELTA(logsize) min((logsize)/8, 128*LOGPSIZE) 108 #define LOGSYNC_BARRIER(logsize) ((logsize)/4) 109 /* 110 #define LOGSYNC_DELTA(logsize) min((logsize)/4, 256*LOGPSIZE) 111 #define LOGSYNC_BARRIER(logsize) ((logsize)/2) 112 */ 113 114 115 /* 116 * log buffer cache synchronization 117 */ 118 static DEFINE_SPINLOCK(jfsLCacheLock); 119 120 #define LCACHE_LOCK(flags) spin_lock_irqsave(&jfsLCacheLock, flags) 121 #define LCACHE_UNLOCK(flags) spin_unlock_irqrestore(&jfsLCacheLock, flags) 122 123 /* 124 * See __SLEEP_COND in jfs_locks.h 125 */ 126 #define LCACHE_SLEEP_COND(wq, cond, flags) \ 127 do { \ 128 if (cond) \ 129 break; \ 130 __SLEEP_COND(wq, cond, LCACHE_LOCK(flags), LCACHE_UNLOCK(flags)); \ 131 } while (0) 132 133 #define LCACHE_WAKEUP(event) wake_up(event) 134 135 136 /* 137 * lbuf buffer cache (lCache) control 138 */ 139 /* log buffer manager pageout control (cumulative, inclusive) */ 140 #define lbmREAD 0x0001 141 #define lbmWRITE 0x0002 /* enqueue at tail of write queue; 142 * init pageout if at head of queue; 143 */ 144 #define lbmRELEASE 0x0004 /* remove from write queue 145 * at completion of pageout; 146 * do not free/recycle it yet: 147 * caller will free it; 148 */ 149 #define lbmSYNC 0x0008 /* do not return to freelist 150 * when removed from write queue; 151 */ 152 #define lbmFREE 0x0010 /* return to freelist 153 * at completion of pageout; 154 * the buffer may be recycled; 155 */ 156 #define lbmDONE 0x0020 157 #define lbmERROR 0x0040 158 #define lbmGC 0x0080 /* lbmIODone to perform post-GC processing 159 * of log page 160 */ 161 #define lbmDIRECT 0x0100 162 163 /* 164 * Global list of active external journals 165 */ 166 static LIST_HEAD(jfs_external_logs); 167 static struct jfs_log *dummy_log = NULL; 168 static DEFINE_MUTEX(jfs_log_mutex); 169 170 /* 171 * forward references 172 */ 173 static int lmWriteRecord(struct jfs_log * log, struct tblock * tblk, 174 struct lrd * lrd, struct tlock * tlck); 175 176 static int lmNextPage(struct jfs_log * log); 177 static int lmLogFileSystem(struct jfs_log * log, struct jfs_sb_info *sbi, 178 int activate); 179 180 static int open_inline_log(struct super_block *sb); 181 static int open_dummy_log(struct super_block *sb); 182 static int lbmLogInit(struct jfs_log * log); 183 static void lbmLogShutdown(struct jfs_log * log); 184 static struct lbuf *lbmAllocate(struct jfs_log * log, int); 185 static void lbmFree(struct lbuf * bp); 186 static void lbmfree(struct lbuf * bp); 187 static int lbmRead(struct jfs_log * log, int pn, struct lbuf ** bpp); 188 static void lbmWrite(struct jfs_log * log, struct lbuf * bp, int flag, int cant_block); 189 static void lbmDirectWrite(struct jfs_log * log, struct lbuf * bp, int flag); 190 static int lbmIOWait(struct lbuf * bp, int flag); 191 static bio_end_io_t lbmIODone; 192 static void lbmStartIO(struct lbuf * bp); 193 static void lmGCwrite(struct jfs_log * log, int cant_block); 194 static int lmLogSync(struct jfs_log * log, int hard_sync); 195 196 197 198 /* 199 * statistics 200 */ 201 #ifdef CONFIG_JFS_STATISTICS 202 static struct lmStat { 203 uint commit; /* # of commit */ 204 uint pagedone; /* # of page written */ 205 uint submitted; /* # of pages submitted */ 206 uint full_page; /* # of full pages submitted */ 207 uint partial_page; /* # of partial pages submitted */ 208 } lmStat; 209 #endif 210 211 static void write_special_inodes(struct jfs_log *log, 212 int (*writer)(struct address_space *)) 213 { 214 struct jfs_sb_info *sbi; 215 216 list_for_each_entry(sbi, &log->sb_list, log_list) { 217 writer(sbi->ipbmap->i_mapping); 218 writer(sbi->ipimap->i_mapping); 219 writer(sbi->direct_inode->i_mapping); 220 } 221 } 222 223 /* 224 * NAME: lmLog() 225 * 226 * FUNCTION: write a log record; 227 * 228 * PARAMETER: 229 * 230 * RETURN: lsn - offset to the next log record to write (end-of-log); 231 * -1 - error; 232 * 233 * note: todo: log error handler 234 */ 235 int lmLog(struct jfs_log * log, struct tblock * tblk, struct lrd * lrd, 236 struct tlock * tlck) 237 { 238 int lsn; 239 int diffp, difft; 240 struct metapage *mp = NULL; 241 unsigned long flags; 242 243 jfs_info("lmLog: log:0x%p tblk:0x%p, lrd:0x%p tlck:0x%p", 244 log, tblk, lrd, tlck); 245 246 LOG_LOCK(log); 247 248 /* log by (out-of-transaction) JFS ? */ 249 if (tblk == NULL) 250 goto writeRecord; 251 252 /* log from page ? */ 253 if (tlck == NULL || 254 tlck->type & tlckBTROOT || (mp = tlck->mp) == NULL) 255 goto writeRecord; 256 257 /* 258 * initialize/update page/transaction recovery lsn 259 */ 260 lsn = log->lsn; 261 262 LOGSYNC_LOCK(log, flags); 263 264 /* 265 * initialize page lsn if first log write of the page 266 */ 267 if (mp->lsn == 0) { 268 mp->log = log; 269 mp->lsn = lsn; 270 log->count++; 271 272 /* insert page at tail of logsynclist */ 273 list_add_tail(&mp->synclist, &log->synclist); 274 } 275 276 /* 277 * initialize/update lsn of tblock of the page 278 * 279 * transaction inherits oldest lsn of pages associated 280 * with allocation/deallocation of resources (their 281 * log records are used to reconstruct allocation map 282 * at recovery time: inode for inode allocation map, 283 * B+-tree index of extent descriptors for block 284 * allocation map); 285 * allocation map pages inherit transaction lsn at 286 * commit time to allow forwarding log syncpt past log 287 * records associated with allocation/deallocation of 288 * resources only after persistent map of these map pages 289 * have been updated and propagated to home. 290 */ 291 /* 292 * initialize transaction lsn: 293 */ 294 if (tblk->lsn == 0) { 295 /* inherit lsn of its first page logged */ 296 tblk->lsn = mp->lsn; 297 log->count++; 298 299 /* insert tblock after the page on logsynclist */ 300 list_add(&tblk->synclist, &mp->synclist); 301 } 302 /* 303 * update transaction lsn: 304 */ 305 else { 306 /* inherit oldest/smallest lsn of page */ 307 logdiff(diffp, mp->lsn, log); 308 logdiff(difft, tblk->lsn, log); 309 if (diffp < difft) { 310 /* update tblock lsn with page lsn */ 311 tblk->lsn = mp->lsn; 312 313 /* move tblock after page on logsynclist */ 314 list_move(&tblk->synclist, &mp->synclist); 315 } 316 } 317 318 LOGSYNC_UNLOCK(log, flags); 319 320 /* 321 * write the log record 322 */ 323 writeRecord: 324 lsn = lmWriteRecord(log, tblk, lrd, tlck); 325 326 /* 327 * forward log syncpt if log reached next syncpt trigger 328 */ 329 logdiff(diffp, lsn, log); 330 if (diffp >= log->nextsync) 331 lsn = lmLogSync(log, 0); 332 333 /* update end-of-log lsn */ 334 log->lsn = lsn; 335 336 LOG_UNLOCK(log); 337 338 /* return end-of-log address */ 339 return lsn; 340 } 341 342 /* 343 * NAME: lmWriteRecord() 344 * 345 * FUNCTION: move the log record to current log page 346 * 347 * PARAMETER: cd - commit descriptor 348 * 349 * RETURN: end-of-log address 350 * 351 * serialization: LOG_LOCK() held on entry/exit 352 */ 353 static int 354 lmWriteRecord(struct jfs_log * log, struct tblock * tblk, struct lrd * lrd, 355 struct tlock * tlck) 356 { 357 int lsn = 0; /* end-of-log address */ 358 struct lbuf *bp; /* dst log page buffer */ 359 struct logpage *lp; /* dst log page */ 360 caddr_t dst; /* destination address in log page */ 361 int dstoffset; /* end-of-log offset in log page */ 362 int freespace; /* free space in log page */ 363 caddr_t p; /* src meta-data page */ 364 caddr_t src; 365 int srclen; 366 int nbytes; /* number of bytes to move */ 367 int i; 368 int len; 369 struct linelock *linelock; 370 struct lv *lv; 371 struct lvd *lvd; 372 int l2linesize; 373 374 len = 0; 375 376 /* retrieve destination log page to write */ 377 bp = (struct lbuf *) log->bp; 378 lp = (struct logpage *) bp->l_ldata; 379 dstoffset = log->eor; 380 381 /* any log data to write ? */ 382 if (tlck == NULL) 383 goto moveLrd; 384 385 /* 386 * move log record data 387 */ 388 /* retrieve source meta-data page to log */ 389 if (tlck->flag & tlckPAGELOCK) { 390 p = (caddr_t) (tlck->mp->data); 391 linelock = (struct linelock *) & tlck->lock; 392 } 393 /* retrieve source in-memory inode to log */ 394 else if (tlck->flag & tlckINODELOCK) { 395 if (tlck->type & tlckDTREE) 396 p = (caddr_t) &JFS_IP(tlck->ip)->i_dtroot; 397 else 398 p = (caddr_t) &JFS_IP(tlck->ip)->i_xtroot; 399 linelock = (struct linelock *) & tlck->lock; 400 } 401 #ifdef _JFS_WIP 402 else if (tlck->flag & tlckINLINELOCK) { 403 404 inlinelock = (struct inlinelock *) & tlck; 405 p = (caddr_t) & inlinelock->pxd; 406 linelock = (struct linelock *) & tlck; 407 } 408 #endif /* _JFS_WIP */ 409 else { 410 jfs_err("lmWriteRecord: UFO tlck:0x%p", tlck); 411 return 0; /* Probably should trap */ 412 } 413 l2linesize = linelock->l2linesize; 414 415 moveData: 416 ASSERT(linelock->index <= linelock->maxcnt); 417 418 lv = linelock->lv; 419 for (i = 0; i < linelock->index; i++, lv++) { 420 if (lv->length == 0) 421 continue; 422 423 /* is page full ? */ 424 if (dstoffset >= LOGPSIZE - LOGPTLRSIZE) { 425 /* page become full: move on to next page */ 426 lmNextPage(log); 427 428 bp = log->bp; 429 lp = (struct logpage *) bp->l_ldata; 430 dstoffset = LOGPHDRSIZE; 431 } 432 433 /* 434 * move log vector data 435 */ 436 src = (u8 *) p + (lv->offset << l2linesize); 437 srclen = lv->length << l2linesize; 438 len += srclen; 439 while (srclen > 0) { 440 freespace = (LOGPSIZE - LOGPTLRSIZE) - dstoffset; 441 nbytes = min(freespace, srclen); 442 dst = (caddr_t) lp + dstoffset; 443 memcpy(dst, src, nbytes); 444 dstoffset += nbytes; 445 446 /* is page not full ? */ 447 if (dstoffset < LOGPSIZE - LOGPTLRSIZE) 448 break; 449 450 /* page become full: move on to next page */ 451 lmNextPage(log); 452 453 bp = (struct lbuf *) log->bp; 454 lp = (struct logpage *) bp->l_ldata; 455 dstoffset = LOGPHDRSIZE; 456 457 srclen -= nbytes; 458 src += nbytes; 459 } 460 461 /* 462 * move log vector descriptor 463 */ 464 len += 4; 465 lvd = (struct lvd *) ((caddr_t) lp + dstoffset); 466 lvd->offset = cpu_to_le16(lv->offset); 467 lvd->length = cpu_to_le16(lv->length); 468 dstoffset += 4; 469 jfs_info("lmWriteRecord: lv offset:%d length:%d", 470 lv->offset, lv->length); 471 } 472 473 if ((i = linelock->next)) { 474 linelock = (struct linelock *) lid_to_tlock(i); 475 goto moveData; 476 } 477 478 /* 479 * move log record descriptor 480 */ 481 moveLrd: 482 lrd->length = cpu_to_le16(len); 483 484 src = (caddr_t) lrd; 485 srclen = LOGRDSIZE; 486 487 while (srclen > 0) { 488 freespace = (LOGPSIZE - LOGPTLRSIZE) - dstoffset; 489 nbytes = min(freespace, srclen); 490 dst = (caddr_t) lp + dstoffset; 491 memcpy(dst, src, nbytes); 492 493 dstoffset += nbytes; 494 srclen -= nbytes; 495 496 /* are there more to move than freespace of page ? */ 497 if (srclen) 498 goto pageFull; 499 500 /* 501 * end of log record descriptor 502 */ 503 504 /* update last log record eor */ 505 log->eor = dstoffset; 506 bp->l_eor = dstoffset; 507 lsn = (log->page << L2LOGPSIZE) + dstoffset; 508 509 if (lrd->type & cpu_to_le16(LOG_COMMIT)) { 510 tblk->clsn = lsn; 511 jfs_info("wr: tclsn:0x%x, beor:0x%x", tblk->clsn, 512 bp->l_eor); 513 514 INCREMENT(lmStat.commit); /* # of commit */ 515 516 /* 517 * enqueue tblock for group commit: 518 * 519 * enqueue tblock of non-trivial/synchronous COMMIT 520 * at tail of group commit queue 521 * (trivial/asynchronous COMMITs are ignored by 522 * group commit.) 523 */ 524 LOGGC_LOCK(log); 525 526 /* init tblock gc state */ 527 tblk->flag = tblkGC_QUEUE; 528 tblk->bp = log->bp; 529 tblk->pn = log->page; 530 tblk->eor = log->eor; 531 532 /* enqueue transaction to commit queue */ 533 list_add_tail(&tblk->cqueue, &log->cqueue); 534 535 LOGGC_UNLOCK(log); 536 } 537 538 jfs_info("lmWriteRecord: lrd:0x%04x bp:0x%p pn:%d eor:0x%x", 539 le16_to_cpu(lrd->type), log->bp, log->page, dstoffset); 540 541 /* page not full ? */ 542 if (dstoffset < LOGPSIZE - LOGPTLRSIZE) 543 return lsn; 544 545 pageFull: 546 /* page become full: move on to next page */ 547 lmNextPage(log); 548 549 bp = (struct lbuf *) log->bp; 550 lp = (struct logpage *) bp->l_ldata; 551 dstoffset = LOGPHDRSIZE; 552 src += nbytes; 553 } 554 555 return lsn; 556 } 557 558 559 /* 560 * NAME: lmNextPage() 561 * 562 * FUNCTION: write current page and allocate next page. 563 * 564 * PARAMETER: log 565 * 566 * RETURN: 0 567 * 568 * serialization: LOG_LOCK() held on entry/exit 569 */ 570 static int lmNextPage(struct jfs_log * log) 571 { 572 struct logpage *lp; 573 int lspn; /* log sequence page number */ 574 int pn; /* current page number */ 575 struct lbuf *bp; 576 struct lbuf *nextbp; 577 struct tblock *tblk; 578 579 /* get current log page number and log sequence page number */ 580 pn = log->page; 581 bp = log->bp; 582 lp = (struct logpage *) bp->l_ldata; 583 lspn = le32_to_cpu(lp->h.page); 584 585 LOGGC_LOCK(log); 586 587 /* 588 * write or queue the full page at the tail of write queue 589 */ 590 /* get the tail tblk on commit queue */ 591 if (list_empty(&log->cqueue)) 592 tblk = NULL; 593 else 594 tblk = list_entry(log->cqueue.prev, struct tblock, cqueue); 595 596 /* every tblk who has COMMIT record on the current page, 597 * and has not been committed, must be on commit queue 598 * since tblk is queued at commit queueu at the time 599 * of writing its COMMIT record on the page before 600 * page becomes full (even though the tblk thread 601 * who wrote COMMIT record may have been suspended 602 * currently); 603 */ 604 605 /* is page bound with outstanding tail tblk ? */ 606 if (tblk && tblk->pn == pn) { 607 /* mark tblk for end-of-page */ 608 tblk->flag |= tblkGC_EOP; 609 610 if (log->cflag & logGC_PAGEOUT) { 611 /* if page is not already on write queue, 612 * just enqueue (no lbmWRITE to prevent redrive) 613 * buffer to wqueue to ensure correct serial order 614 * of the pages since log pages will be added 615 * continuously 616 */ 617 if (bp->l_wqnext == NULL) 618 lbmWrite(log, bp, 0, 0); 619 } else { 620 /* 621 * No current GC leader, initiate group commit 622 */ 623 log->cflag |= logGC_PAGEOUT; 624 lmGCwrite(log, 0); 625 } 626 } 627 /* page is not bound with outstanding tblk: 628 * init write or mark it to be redriven (lbmWRITE) 629 */ 630 else { 631 /* finalize the page */ 632 bp->l_ceor = bp->l_eor; 633 lp->h.eor = lp->t.eor = cpu_to_le16(bp->l_ceor); 634 lbmWrite(log, bp, lbmWRITE | lbmRELEASE | lbmFREE, 0); 635 } 636 LOGGC_UNLOCK(log); 637 638 /* 639 * allocate/initialize next page 640 */ 641 /* if log wraps, the first data page of log is 2 642 * (0 never used, 1 is superblock). 643 */ 644 log->page = (pn == log->size - 1) ? 2 : pn + 1; 645 log->eor = LOGPHDRSIZE; /* ? valid page empty/full at logRedo() */ 646 647 /* allocate/initialize next log page buffer */ 648 nextbp = lbmAllocate(log, log->page); 649 nextbp->l_eor = log->eor; 650 log->bp = nextbp; 651 652 /* initialize next log page */ 653 lp = (struct logpage *) nextbp->l_ldata; 654 lp->h.page = lp->t.page = cpu_to_le32(lspn + 1); 655 lp->h.eor = lp->t.eor = cpu_to_le16(LOGPHDRSIZE); 656 657 return 0; 658 } 659 660 661 /* 662 * NAME: lmGroupCommit() 663 * 664 * FUNCTION: group commit 665 * initiate pageout of the pages with COMMIT in the order of 666 * page number - redrive pageout of the page at the head of 667 * pageout queue until full page has been written. 668 * 669 * RETURN: 670 * 671 * NOTE: 672 * LOGGC_LOCK serializes log group commit queue, and 673 * transaction blocks on the commit queue. 674 * N.B. LOG_LOCK is NOT held during lmGroupCommit(). 675 */ 676 int lmGroupCommit(struct jfs_log * log, struct tblock * tblk) 677 { 678 int rc = 0; 679 680 LOGGC_LOCK(log); 681 682 /* group committed already ? */ 683 if (tblk->flag & tblkGC_COMMITTED) { 684 if (tblk->flag & tblkGC_ERROR) 685 rc = -EIO; 686 687 LOGGC_UNLOCK(log); 688 return rc; 689 } 690 jfs_info("lmGroup Commit: tblk = 0x%p, gcrtc = %d", tblk, log->gcrtc); 691 692 if (tblk->xflag & COMMIT_LAZY) 693 tblk->flag |= tblkGC_LAZY; 694 695 if ((!(log->cflag & logGC_PAGEOUT)) && (!list_empty(&log->cqueue)) && 696 (!(tblk->xflag & COMMIT_LAZY) || test_bit(log_FLUSH, &log->flag) 697 || jfs_tlocks_low)) { 698 /* 699 * No pageout in progress 700 * 701 * start group commit as its group leader. 702 */ 703 log->cflag |= logGC_PAGEOUT; 704 705 lmGCwrite(log, 0); 706 } 707 708 if (tblk->xflag & COMMIT_LAZY) { 709 /* 710 * Lazy transactions can leave now 711 */ 712 LOGGC_UNLOCK(log); 713 return 0; 714 } 715 716 /* lmGCwrite gives up LOGGC_LOCK, check again */ 717 718 if (tblk->flag & tblkGC_COMMITTED) { 719 if (tblk->flag & tblkGC_ERROR) 720 rc = -EIO; 721 722 LOGGC_UNLOCK(log); 723 return rc; 724 } 725 726 /* upcount transaction waiting for completion 727 */ 728 log->gcrtc++; 729 tblk->flag |= tblkGC_READY; 730 731 __SLEEP_COND(tblk->gcwait, (tblk->flag & tblkGC_COMMITTED), 732 LOGGC_LOCK(log), LOGGC_UNLOCK(log)); 733 734 /* removed from commit queue */ 735 if (tblk->flag & tblkGC_ERROR) 736 rc = -EIO; 737 738 LOGGC_UNLOCK(log); 739 return rc; 740 } 741 742 /* 743 * NAME: lmGCwrite() 744 * 745 * FUNCTION: group commit write 746 * initiate write of log page, building a group of all transactions 747 * with commit records on that page. 748 * 749 * RETURN: None 750 * 751 * NOTE: 752 * LOGGC_LOCK must be held by caller. 753 * N.B. LOG_LOCK is NOT held during lmGroupCommit(). 754 */ 755 static void lmGCwrite(struct jfs_log * log, int cant_write) 756 { 757 struct lbuf *bp; 758 struct logpage *lp; 759 int gcpn; /* group commit page number */ 760 struct tblock *tblk; 761 struct tblock *xtblk = NULL; 762 763 /* 764 * build the commit group of a log page 765 * 766 * scan commit queue and make a commit group of all 767 * transactions with COMMIT records on the same log page. 768 */ 769 /* get the head tblk on the commit queue */ 770 gcpn = list_entry(log->cqueue.next, struct tblock, cqueue)->pn; 771 772 list_for_each_entry(tblk, &log->cqueue, cqueue) { 773 if (tblk->pn != gcpn) 774 break; 775 776 xtblk = tblk; 777 778 /* state transition: (QUEUE, READY) -> COMMIT */ 779 tblk->flag |= tblkGC_COMMIT; 780 } 781 tblk = xtblk; /* last tblk of the page */ 782 783 /* 784 * pageout to commit transactions on the log page. 785 */ 786 bp = (struct lbuf *) tblk->bp; 787 lp = (struct logpage *) bp->l_ldata; 788 /* is page already full ? */ 789 if (tblk->flag & tblkGC_EOP) { 790 /* mark page to free at end of group commit of the page */ 791 tblk->flag &= ~tblkGC_EOP; 792 tblk->flag |= tblkGC_FREE; 793 bp->l_ceor = bp->l_eor; 794 lp->h.eor = lp->t.eor = cpu_to_le16(bp->l_ceor); 795 lbmWrite(log, bp, lbmWRITE | lbmRELEASE | lbmGC, 796 cant_write); 797 INCREMENT(lmStat.full_page); 798 } 799 /* page is not yet full */ 800 else { 801 bp->l_ceor = tblk->eor; /* ? bp->l_ceor = bp->l_eor; */ 802 lp->h.eor = lp->t.eor = cpu_to_le16(bp->l_ceor); 803 lbmWrite(log, bp, lbmWRITE | lbmGC, cant_write); 804 INCREMENT(lmStat.partial_page); 805 } 806 } 807 808 /* 809 * NAME: lmPostGC() 810 * 811 * FUNCTION: group commit post-processing 812 * Processes transactions after their commit records have been written 813 * to disk, redriving log I/O if necessary. 814 * 815 * RETURN: None 816 * 817 * NOTE: 818 * This routine is called a interrupt time by lbmIODone 819 */ 820 static void lmPostGC(struct lbuf * bp) 821 { 822 unsigned long flags; 823 struct jfs_log *log = bp->l_log; 824 struct logpage *lp; 825 struct tblock *tblk, *temp; 826 827 //LOGGC_LOCK(log); 828 spin_lock_irqsave(&log->gclock, flags); 829 /* 830 * current pageout of group commit completed. 831 * 832 * remove/wakeup transactions from commit queue who were 833 * group committed with the current log page 834 */ 835 list_for_each_entry_safe(tblk, temp, &log->cqueue, cqueue) { 836 if (!(tblk->flag & tblkGC_COMMIT)) 837 break; 838 /* if transaction was marked GC_COMMIT then 839 * it has been shipped in the current pageout 840 * and made it to disk - it is committed. 841 */ 842 843 if (bp->l_flag & lbmERROR) 844 tblk->flag |= tblkGC_ERROR; 845 846 /* remove it from the commit queue */ 847 list_del(&tblk->cqueue); 848 tblk->flag &= ~tblkGC_QUEUE; 849 850 if (tblk == log->flush_tblk) { 851 /* we can stop flushing the log now */ 852 clear_bit(log_FLUSH, &log->flag); 853 log->flush_tblk = NULL; 854 } 855 856 jfs_info("lmPostGC: tblk = 0x%p, flag = 0x%x", tblk, 857 tblk->flag); 858 859 if (!(tblk->xflag & COMMIT_FORCE)) 860 /* 861 * Hand tblk over to lazy commit thread 862 */ 863 txLazyUnlock(tblk); 864 else { 865 /* state transition: COMMIT -> COMMITTED */ 866 tblk->flag |= tblkGC_COMMITTED; 867 868 if (tblk->flag & tblkGC_READY) 869 log->gcrtc--; 870 871 LOGGC_WAKEUP(tblk); 872 } 873 874 /* was page full before pageout ? 875 * (and this is the last tblk bound with the page) 876 */ 877 if (tblk->flag & tblkGC_FREE) 878 lbmFree(bp); 879 /* did page become full after pageout ? 880 * (and this is the last tblk bound with the page) 881 */ 882 else if (tblk->flag & tblkGC_EOP) { 883 /* finalize the page */ 884 lp = (struct logpage *) bp->l_ldata; 885 bp->l_ceor = bp->l_eor; 886 lp->h.eor = lp->t.eor = cpu_to_le16(bp->l_eor); 887 jfs_info("lmPostGC: calling lbmWrite"); 888 lbmWrite(log, bp, lbmWRITE | lbmRELEASE | lbmFREE, 889 1); 890 } 891 892 } 893 894 /* are there any transactions who have entered lnGroupCommit() 895 * (whose COMMITs are after that of the last log page written. 896 * They are waiting for new group commit (above at (SLEEP 1)) 897 * or lazy transactions are on a full (queued) log page, 898 * select the latest ready transaction as new group leader and 899 * wake her up to lead her group. 900 */ 901 if ((!list_empty(&log->cqueue)) && 902 ((log->gcrtc > 0) || (tblk->bp->l_wqnext != NULL) || 903 test_bit(log_FLUSH, &log->flag) || jfs_tlocks_low)) 904 /* 905 * Call lmGCwrite with new group leader 906 */ 907 lmGCwrite(log, 1); 908 909 /* no transaction are ready yet (transactions are only just 910 * queued (GC_QUEUE) and not entered for group commit yet). 911 * the first transaction entering group commit 912 * will elect herself as new group leader. 913 */ 914 else 915 log->cflag &= ~logGC_PAGEOUT; 916 917 //LOGGC_UNLOCK(log); 918 spin_unlock_irqrestore(&log->gclock, flags); 919 return; 920 } 921 922 /* 923 * NAME: lmLogSync() 924 * 925 * FUNCTION: write log SYNCPT record for specified log 926 * if new sync address is available 927 * (normally the case if sync() is executed by back-ground 928 * process). 929 * calculate new value of i_nextsync which determines when 930 * this code is called again. 931 * 932 * PARAMETERS: log - log structure 933 * hard_sync - 1 to force all metadata to be written 934 * 935 * RETURN: 0 936 * 937 * serialization: LOG_LOCK() held on entry/exit 938 */ 939 static int lmLogSync(struct jfs_log * log, int hard_sync) 940 { 941 int logsize; 942 int written; /* written since last syncpt */ 943 int free; /* free space left available */ 944 int delta; /* additional delta to write normally */ 945 int more; /* additional write granted */ 946 struct lrd lrd; 947 int lsn; 948 struct logsyncblk *lp; 949 unsigned long flags; 950 951 /* push dirty metapages out to disk */ 952 if (hard_sync) 953 write_special_inodes(log, filemap_fdatawrite); 954 else 955 write_special_inodes(log, filemap_flush); 956 957 /* 958 * forward syncpt 959 */ 960 /* if last sync is same as last syncpt, 961 * invoke sync point forward processing to update sync. 962 */ 963 964 if (log->sync == log->syncpt) { 965 LOGSYNC_LOCK(log, flags); 966 if (list_empty(&log->synclist)) 967 log->sync = log->lsn; 968 else { 969 lp = list_entry(log->synclist.next, 970 struct logsyncblk, synclist); 971 log->sync = lp->lsn; 972 } 973 LOGSYNC_UNLOCK(log, flags); 974 975 } 976 977 /* if sync is different from last syncpt, 978 * write a SYNCPT record with syncpt = sync. 979 * reset syncpt = sync 980 */ 981 if (log->sync != log->syncpt) { 982 lrd.logtid = 0; 983 lrd.backchain = 0; 984 lrd.type = cpu_to_le16(LOG_SYNCPT); 985 lrd.length = 0; 986 lrd.log.syncpt.sync = cpu_to_le32(log->sync); 987 lsn = lmWriteRecord(log, NULL, &lrd, NULL); 988 989 log->syncpt = log->sync; 990 } else 991 lsn = log->lsn; 992 993 /* 994 * setup next syncpt trigger (SWAG) 995 */ 996 logsize = log->logsize; 997 998 logdiff(written, lsn, log); 999 free = logsize - written; 1000 delta = LOGSYNC_DELTA(logsize); 1001 more = min(free / 2, delta); 1002 if (more < 2 * LOGPSIZE) { 1003 jfs_warn("\n ... Log Wrap ... Log Wrap ... Log Wrap ...\n"); 1004 /* 1005 * log wrapping 1006 * 1007 * option 1 - panic ? No.! 1008 * option 2 - shutdown file systems 1009 * associated with log ? 1010 * option 3 - extend log ? 1011 */ 1012 /* 1013 * option 4 - second chance 1014 * 1015 * mark log wrapped, and continue. 1016 * when all active transactions are completed, 1017 * mark log vaild for recovery. 1018 * if crashed during invalid state, log state 1019 * implies invald log, forcing fsck(). 1020 */ 1021 /* mark log state log wrap in log superblock */ 1022 /* log->state = LOGWRAP; */ 1023 1024 /* reset sync point computation */ 1025 log->syncpt = log->sync = lsn; 1026 log->nextsync = delta; 1027 } else 1028 /* next syncpt trigger = written + more */ 1029 log->nextsync = written + more; 1030 1031 /* if number of bytes written from last sync point is more 1032 * than 1/4 of the log size, stop new transactions from 1033 * starting until all current transactions are completed 1034 * by setting syncbarrier flag. 1035 */ 1036 if (!test_bit(log_SYNCBARRIER, &log->flag) && 1037 (written > LOGSYNC_BARRIER(logsize)) && log->active) { 1038 set_bit(log_SYNCBARRIER, &log->flag); 1039 jfs_info("log barrier on: lsn=0x%x syncpt=0x%x", lsn, 1040 log->syncpt); 1041 /* 1042 * We may have to initiate group commit 1043 */ 1044 jfs_flush_journal(log, 0); 1045 } 1046 1047 return lsn; 1048 } 1049 1050 /* 1051 * NAME: jfs_syncpt 1052 * 1053 * FUNCTION: write log SYNCPT record for specified log 1054 * 1055 * PARAMETERS: log - log structure 1056 * hard_sync - set to 1 to force metadata to be written 1057 */ 1058 void jfs_syncpt(struct jfs_log *log, int hard_sync) 1059 { LOG_LOCK(log); 1060 lmLogSync(log, hard_sync); 1061 LOG_UNLOCK(log); 1062 } 1063 1064 /* 1065 * NAME: lmLogOpen() 1066 * 1067 * FUNCTION: open the log on first open; 1068 * insert filesystem in the active list of the log. 1069 * 1070 * PARAMETER: ipmnt - file system mount inode 1071 * iplog - log inode (out) 1072 * 1073 * RETURN: 1074 * 1075 * serialization: 1076 */ 1077 int lmLogOpen(struct super_block *sb) 1078 { 1079 int rc; 1080 struct block_device *bdev; 1081 struct jfs_log *log; 1082 struct jfs_sb_info *sbi = JFS_SBI(sb); 1083 1084 if (sbi->flag & JFS_NOINTEGRITY) 1085 return open_dummy_log(sb); 1086 1087 if (sbi->mntflag & JFS_INLINELOG) 1088 return open_inline_log(sb); 1089 1090 mutex_lock(&jfs_log_mutex); 1091 list_for_each_entry(log, &jfs_external_logs, journal_list) { 1092 if (log->bdev->bd_dev == sbi->logdev) { 1093 if (memcmp(log->uuid, sbi->loguuid, 1094 sizeof(log->uuid))) { 1095 jfs_warn("wrong uuid on JFS journal\n"); 1096 mutex_unlock(&jfs_log_mutex); 1097 return -EINVAL; 1098 } 1099 /* 1100 * add file system to log active file system list 1101 */ 1102 if ((rc = lmLogFileSystem(log, sbi, 1))) { 1103 mutex_unlock(&jfs_log_mutex); 1104 return rc; 1105 } 1106 goto journal_found; 1107 } 1108 } 1109 1110 if (!(log = kzalloc(sizeof(struct jfs_log), GFP_KERNEL))) { 1111 mutex_unlock(&jfs_log_mutex); 1112 return -ENOMEM; 1113 } 1114 INIT_LIST_HEAD(&log->sb_list); 1115 init_waitqueue_head(&log->syncwait); 1116 1117 /* 1118 * external log as separate logical volume 1119 * 1120 * file systems to log may have n-to-1 relationship; 1121 */ 1122 1123 bdev = open_by_devnum(sbi->logdev, FMODE_READ|FMODE_WRITE); 1124 if (IS_ERR(bdev)) { 1125 rc = -PTR_ERR(bdev); 1126 goto free; 1127 } 1128 1129 if ((rc = bd_claim(bdev, log))) { 1130 goto close; 1131 } 1132 1133 log->bdev = bdev; 1134 memcpy(log->uuid, sbi->loguuid, sizeof(log->uuid)); 1135 1136 /* 1137 * initialize log: 1138 */ 1139 if ((rc = lmLogInit(log))) 1140 goto unclaim; 1141 1142 list_add(&log->journal_list, &jfs_external_logs); 1143 1144 /* 1145 * add file system to log active file system list 1146 */ 1147 if ((rc = lmLogFileSystem(log, sbi, 1))) 1148 goto shutdown; 1149 1150 journal_found: 1151 LOG_LOCK(log); 1152 list_add(&sbi->log_list, &log->sb_list); 1153 sbi->log = log; 1154 LOG_UNLOCK(log); 1155 1156 mutex_unlock(&jfs_log_mutex); 1157 return 0; 1158 1159 /* 1160 * unwind on error 1161 */ 1162 shutdown: /* unwind lbmLogInit() */ 1163 list_del(&log->journal_list); 1164 lbmLogShutdown(log); 1165 1166 unclaim: 1167 bd_release(bdev); 1168 1169 close: /* close external log device */ 1170 blkdev_put(bdev); 1171 1172 free: /* free log descriptor */ 1173 mutex_unlock(&jfs_log_mutex); 1174 kfree(log); 1175 1176 jfs_warn("lmLogOpen: exit(%d)", rc); 1177 return rc; 1178 } 1179 1180 static int open_inline_log(struct super_block *sb) 1181 { 1182 struct jfs_log *log; 1183 int rc; 1184 1185 if (!(log = kzalloc(sizeof(struct jfs_log), GFP_KERNEL))) 1186 return -ENOMEM; 1187 INIT_LIST_HEAD(&log->sb_list); 1188 init_waitqueue_head(&log->syncwait); 1189 1190 set_bit(log_INLINELOG, &log->flag); 1191 log->bdev = sb->s_bdev; 1192 log->base = addressPXD(&JFS_SBI(sb)->logpxd); 1193 log->size = lengthPXD(&JFS_SBI(sb)->logpxd) >> 1194 (L2LOGPSIZE - sb->s_blocksize_bits); 1195 log->l2bsize = sb->s_blocksize_bits; 1196 ASSERT(L2LOGPSIZE >= sb->s_blocksize_bits); 1197 1198 /* 1199 * initialize log. 1200 */ 1201 if ((rc = lmLogInit(log))) { 1202 kfree(log); 1203 jfs_warn("lmLogOpen: exit(%d)", rc); 1204 return rc; 1205 } 1206 1207 list_add(&JFS_SBI(sb)->log_list, &log->sb_list); 1208 JFS_SBI(sb)->log = log; 1209 1210 return rc; 1211 } 1212 1213 static int open_dummy_log(struct super_block *sb) 1214 { 1215 int rc; 1216 1217 mutex_lock(&jfs_log_mutex); 1218 if (!dummy_log) { 1219 dummy_log = kzalloc(sizeof(struct jfs_log), GFP_KERNEL); 1220 if (!dummy_log) { 1221 mutex_unlock(&jfs_log_mutex); 1222 return -ENOMEM; 1223 } 1224 INIT_LIST_HEAD(&dummy_log->sb_list); 1225 init_waitqueue_head(&dummy_log->syncwait); 1226 dummy_log->no_integrity = 1; 1227 /* Make up some stuff */ 1228 dummy_log->base = 0; 1229 dummy_log->size = 1024; 1230 rc = lmLogInit(dummy_log); 1231 if (rc) { 1232 kfree(dummy_log); 1233 dummy_log = NULL; 1234 mutex_unlock(&jfs_log_mutex); 1235 return rc; 1236 } 1237 } 1238 1239 LOG_LOCK(dummy_log); 1240 list_add(&JFS_SBI(sb)->log_list, &dummy_log->sb_list); 1241 JFS_SBI(sb)->log = dummy_log; 1242 LOG_UNLOCK(dummy_log); 1243 mutex_unlock(&jfs_log_mutex); 1244 1245 return 0; 1246 } 1247 1248 /* 1249 * NAME: lmLogInit() 1250 * 1251 * FUNCTION: log initialization at first log open. 1252 * 1253 * logredo() (or logformat()) should have been run previously. 1254 * initialize the log from log superblock. 1255 * set the log state in the superblock to LOGMOUNT and 1256 * write SYNCPT log record. 1257 * 1258 * PARAMETER: log - log structure 1259 * 1260 * RETURN: 0 - if ok 1261 * -EINVAL - bad log magic number or superblock dirty 1262 * error returned from logwait() 1263 * 1264 * serialization: single first open thread 1265 */ 1266 int lmLogInit(struct jfs_log * log) 1267 { 1268 int rc = 0; 1269 struct lrd lrd; 1270 struct logsuper *logsuper; 1271 struct lbuf *bpsuper; 1272 struct lbuf *bp; 1273 struct logpage *lp; 1274 int lsn = 0; 1275 1276 jfs_info("lmLogInit: log:0x%p", log); 1277 1278 /* initialize the group commit serialization lock */ 1279 LOGGC_LOCK_INIT(log); 1280 1281 /* allocate/initialize the log write serialization lock */ 1282 LOG_LOCK_INIT(log); 1283 1284 LOGSYNC_LOCK_INIT(log); 1285 1286 INIT_LIST_HEAD(&log->synclist); 1287 1288 INIT_LIST_HEAD(&log->cqueue); 1289 log->flush_tblk = NULL; 1290 1291 log->count = 0; 1292 1293 /* 1294 * initialize log i/o 1295 */ 1296 if ((rc = lbmLogInit(log))) 1297 return rc; 1298 1299 if (!test_bit(log_INLINELOG, &log->flag)) 1300 log->l2bsize = L2LOGPSIZE; 1301 1302 /* check for disabled journaling to disk */ 1303 if (log->no_integrity) { 1304 /* 1305 * Journal pages will still be filled. When the time comes 1306 * to actually do the I/O, the write is not done, and the 1307 * endio routine is called directly. 1308 */ 1309 bp = lbmAllocate(log , 0); 1310 log->bp = bp; 1311 bp->l_pn = bp->l_eor = 0; 1312 } else { 1313 /* 1314 * validate log superblock 1315 */ 1316 if ((rc = lbmRead(log, 1, &bpsuper))) 1317 goto errout10; 1318 1319 logsuper = (struct logsuper *) bpsuper->l_ldata; 1320 1321 if (logsuper->magic != cpu_to_le32(LOGMAGIC)) { 1322 jfs_warn("*** Log Format Error ! ***"); 1323 rc = -EINVAL; 1324 goto errout20; 1325 } 1326 1327 /* logredo() should have been run successfully. */ 1328 if (logsuper->state != cpu_to_le32(LOGREDONE)) { 1329 jfs_warn("*** Log Is Dirty ! ***"); 1330 rc = -EINVAL; 1331 goto errout20; 1332 } 1333 1334 /* initialize log from log superblock */ 1335 if (test_bit(log_INLINELOG,&log->flag)) { 1336 if (log->size != le32_to_cpu(logsuper->size)) { 1337 rc = -EINVAL; 1338 goto errout20; 1339 } 1340 jfs_info("lmLogInit: inline log:0x%p base:0x%Lx " 1341 "size:0x%x", log, 1342 (unsigned long long) log->base, log->size); 1343 } else { 1344 if (memcmp(logsuper->uuid, log->uuid, 16)) { 1345 jfs_warn("wrong uuid on JFS log device"); 1346 goto errout20; 1347 } 1348 log->size = le32_to_cpu(logsuper->size); 1349 log->l2bsize = le32_to_cpu(logsuper->l2bsize); 1350 jfs_info("lmLogInit: external log:0x%p base:0x%Lx " 1351 "size:0x%x", log, 1352 (unsigned long long) log->base, log->size); 1353 } 1354 1355 log->page = le32_to_cpu(logsuper->end) / LOGPSIZE; 1356 log->eor = le32_to_cpu(logsuper->end) - (LOGPSIZE * log->page); 1357 1358 /* 1359 * initialize for log append write mode 1360 */ 1361 /* establish current/end-of-log page/buffer */ 1362 if ((rc = lbmRead(log, log->page, &bp))) 1363 goto errout20; 1364 1365 lp = (struct logpage *) bp->l_ldata; 1366 1367 jfs_info("lmLogInit: lsn:0x%x page:%d eor:%d:%d", 1368 le32_to_cpu(logsuper->end), log->page, log->eor, 1369 le16_to_cpu(lp->h.eor)); 1370 1371 log->bp = bp; 1372 bp->l_pn = log->page; 1373 bp->l_eor = log->eor; 1374 1375 /* if current page is full, move on to next page */ 1376 if (log->eor >= LOGPSIZE - LOGPTLRSIZE) 1377 lmNextPage(log); 1378 1379 /* 1380 * initialize log syncpoint 1381 */ 1382 /* 1383 * write the first SYNCPT record with syncpoint = 0 1384 * (i.e., log redo up to HERE !); 1385 * remove current page from lbm write queue at end of pageout 1386 * (to write log superblock update), but do not release to 1387 * freelist; 1388 */ 1389 lrd.logtid = 0; 1390 lrd.backchain = 0; 1391 lrd.type = cpu_to_le16(LOG_SYNCPT); 1392 lrd.length = 0; 1393 lrd.log.syncpt.sync = 0; 1394 lsn = lmWriteRecord(log, NULL, &lrd, NULL); 1395 bp = log->bp; 1396 bp->l_ceor = bp->l_eor; 1397 lp = (struct logpage *) bp->l_ldata; 1398 lp->h.eor = lp->t.eor = cpu_to_le16(bp->l_eor); 1399 lbmWrite(log, bp, lbmWRITE | lbmSYNC, 0); 1400 if ((rc = lbmIOWait(bp, 0))) 1401 goto errout30; 1402 1403 /* 1404 * update/write superblock 1405 */ 1406 logsuper->state = cpu_to_le32(LOGMOUNT); 1407 log->serial = le32_to_cpu(logsuper->serial) + 1; 1408 logsuper->serial = cpu_to_le32(log->serial); 1409 lbmDirectWrite(log, bpsuper, lbmWRITE | lbmRELEASE | lbmSYNC); 1410 if ((rc = lbmIOWait(bpsuper, lbmFREE))) 1411 goto errout30; 1412 } 1413 1414 /* initialize logsync parameters */ 1415 log->logsize = (log->size - 2) << L2LOGPSIZE; 1416 log->lsn = lsn; 1417 log->syncpt = lsn; 1418 log->sync = log->syncpt; 1419 log->nextsync = LOGSYNC_DELTA(log->logsize); 1420 1421 jfs_info("lmLogInit: lsn:0x%x syncpt:0x%x sync:0x%x", 1422 log->lsn, log->syncpt, log->sync); 1423 1424 /* 1425 * initialize for lazy/group commit 1426 */ 1427 log->clsn = lsn; 1428 1429 return 0; 1430 1431 /* 1432 * unwind on error 1433 */ 1434 errout30: /* release log page */ 1435 log->wqueue = NULL; 1436 bp->l_wqnext = NULL; 1437 lbmFree(bp); 1438 1439 errout20: /* release log superblock */ 1440 lbmFree(bpsuper); 1441 1442 errout10: /* unwind lbmLogInit() */ 1443 lbmLogShutdown(log); 1444 1445 jfs_warn("lmLogInit: exit(%d)", rc); 1446 return rc; 1447 } 1448 1449 1450 /* 1451 * NAME: lmLogClose() 1452 * 1453 * FUNCTION: remove file system <ipmnt> from active list of log <iplog> 1454 * and close it on last close. 1455 * 1456 * PARAMETER: sb - superblock 1457 * 1458 * RETURN: errors from subroutines 1459 * 1460 * serialization: 1461 */ 1462 int lmLogClose(struct super_block *sb) 1463 { 1464 struct jfs_sb_info *sbi = JFS_SBI(sb); 1465 struct jfs_log *log = sbi->log; 1466 struct block_device *bdev; 1467 int rc = 0; 1468 1469 jfs_info("lmLogClose: log:0x%p", log); 1470 1471 mutex_lock(&jfs_log_mutex); 1472 LOG_LOCK(log); 1473 list_del(&sbi->log_list); 1474 LOG_UNLOCK(log); 1475 sbi->log = NULL; 1476 1477 /* 1478 * We need to make sure all of the "written" metapages 1479 * actually make it to disk 1480 */ 1481 sync_blockdev(sb->s_bdev); 1482 1483 if (test_bit(log_INLINELOG, &log->flag)) { 1484 /* 1485 * in-line log in host file system 1486 */ 1487 rc = lmLogShutdown(log); 1488 kfree(log); 1489 goto out; 1490 } 1491 1492 if (!log->no_integrity) 1493 lmLogFileSystem(log, sbi, 0); 1494 1495 if (!list_empty(&log->sb_list)) 1496 goto out; 1497 1498 /* 1499 * TODO: ensure that the dummy_log is in a state to allow 1500 * lbmLogShutdown to deallocate all the buffers and call 1501 * kfree against dummy_log. For now, leave dummy_log & its 1502 * buffers in memory, and resuse if another no-integrity mount 1503 * is requested. 1504 */ 1505 if (log->no_integrity) 1506 goto out; 1507 1508 /* 1509 * external log as separate logical volume 1510 */ 1511 list_del(&log->journal_list); 1512 bdev = log->bdev; 1513 rc = lmLogShutdown(log); 1514 1515 bd_release(bdev); 1516 blkdev_put(bdev); 1517 1518 kfree(log); 1519 1520 out: 1521 mutex_unlock(&jfs_log_mutex); 1522 jfs_info("lmLogClose: exit(%d)", rc); 1523 return rc; 1524 } 1525 1526 1527 /* 1528 * NAME: jfs_flush_journal() 1529 * 1530 * FUNCTION: initiate write of any outstanding transactions to the journal 1531 * and optionally wait until they are all written to disk 1532 * 1533 * wait == 0 flush until latest txn is committed, don't wait 1534 * wait == 1 flush until latest txn is committed, wait 1535 * wait > 1 flush until all txn's are complete, wait 1536 */ 1537 void jfs_flush_journal(struct jfs_log *log, int wait) 1538 { 1539 int i; 1540 struct tblock *target = NULL; 1541 1542 /* jfs_write_inode may call us during read-only mount */ 1543 if (!log) 1544 return; 1545 1546 jfs_info("jfs_flush_journal: log:0x%p wait=%d", log, wait); 1547 1548 LOGGC_LOCK(log); 1549 1550 if (!list_empty(&log->cqueue)) { 1551 /* 1552 * This ensures that we will keep writing to the journal as long 1553 * as there are unwritten commit records 1554 */ 1555 target = list_entry(log->cqueue.prev, struct tblock, cqueue); 1556 1557 if (test_bit(log_FLUSH, &log->flag)) { 1558 /* 1559 * We're already flushing. 1560 * if flush_tblk is NULL, we are flushing everything, 1561 * so leave it that way. Otherwise, update it to the 1562 * latest transaction 1563 */ 1564 if (log->flush_tblk) 1565 log->flush_tblk = target; 1566 } else { 1567 /* Only flush until latest transaction is committed */ 1568 log->flush_tblk = target; 1569 set_bit(log_FLUSH, &log->flag); 1570 1571 /* 1572 * Initiate I/O on outstanding transactions 1573 */ 1574 if (!(log->cflag & logGC_PAGEOUT)) { 1575 log->cflag |= logGC_PAGEOUT; 1576 lmGCwrite(log, 0); 1577 } 1578 } 1579 } 1580 if ((wait > 1) || test_bit(log_SYNCBARRIER, &log->flag)) { 1581 /* Flush until all activity complete */ 1582 set_bit(log_FLUSH, &log->flag); 1583 log->flush_tblk = NULL; 1584 } 1585 1586 if (wait && target && !(target->flag & tblkGC_COMMITTED)) { 1587 DECLARE_WAITQUEUE(__wait, current); 1588 1589 add_wait_queue(&target->gcwait, &__wait); 1590 set_current_state(TASK_UNINTERRUPTIBLE); 1591 LOGGC_UNLOCK(log); 1592 schedule(); 1593 __set_current_state(TASK_RUNNING); 1594 LOGGC_LOCK(log); 1595 remove_wait_queue(&target->gcwait, &__wait); 1596 } 1597 LOGGC_UNLOCK(log); 1598 1599 if (wait < 2) 1600 return; 1601 1602 write_special_inodes(log, filemap_fdatawrite); 1603 1604 /* 1605 * If there was recent activity, we may need to wait 1606 * for the lazycommit thread to catch up 1607 */ 1608 if ((!list_empty(&log->cqueue)) || !list_empty(&log->synclist)) { 1609 for (i = 0; i < 200; i++) { /* Too much? */ 1610 msleep(250); 1611 write_special_inodes(log, filemap_fdatawrite); 1612 if (list_empty(&log->cqueue) && 1613 list_empty(&log->synclist)) 1614 break; 1615 } 1616 } 1617 assert(list_empty(&log->cqueue)); 1618 1619 #ifdef CONFIG_JFS_DEBUG 1620 if (!list_empty(&log->synclist)) { 1621 struct logsyncblk *lp; 1622 1623 printk(KERN_ERR "jfs_flush_journal: synclist not empty\n"); 1624 list_for_each_entry(lp, &log->synclist, synclist) { 1625 if (lp->xflag & COMMIT_PAGE) { 1626 struct metapage *mp = (struct metapage *)lp; 1627 print_hex_dump(KERN_ERR, "metapage: ", 1628 DUMP_PREFIX_ADDRESS, 16, 4, 1629 mp, sizeof(struct metapage), 0); 1630 print_hex_dump(KERN_ERR, "page: ", 1631 DUMP_PREFIX_ADDRESS, 16, 1632 sizeof(long), mp->page, 1633 sizeof(struct page), 0); 1634 } else 1635 print_hex_dump(KERN_ERR, "tblock:", 1636 DUMP_PREFIX_ADDRESS, 16, 4, 1637 lp, sizeof(struct tblock), 0); 1638 } 1639 } 1640 #else 1641 WARN_ON(!list_empty(&log->synclist)); 1642 #endif 1643 clear_bit(log_FLUSH, &log->flag); 1644 } 1645 1646 /* 1647 * NAME: lmLogShutdown() 1648 * 1649 * FUNCTION: log shutdown at last LogClose(). 1650 * 1651 * write log syncpt record. 1652 * update super block to set redone flag to 0. 1653 * 1654 * PARAMETER: log - log inode 1655 * 1656 * RETURN: 0 - success 1657 * 1658 * serialization: single last close thread 1659 */ 1660 int lmLogShutdown(struct jfs_log * log) 1661 { 1662 int rc; 1663 struct lrd lrd; 1664 int lsn; 1665 struct logsuper *logsuper; 1666 struct lbuf *bpsuper; 1667 struct lbuf *bp; 1668 struct logpage *lp; 1669 1670 jfs_info("lmLogShutdown: log:0x%p", log); 1671 1672 jfs_flush_journal(log, 2); 1673 1674 /* 1675 * write the last SYNCPT record with syncpoint = 0 1676 * (i.e., log redo up to HERE !) 1677 */ 1678 lrd.logtid = 0; 1679 lrd.backchain = 0; 1680 lrd.type = cpu_to_le16(LOG_SYNCPT); 1681 lrd.length = 0; 1682 lrd.log.syncpt.sync = 0; 1683 1684 lsn = lmWriteRecord(log, NULL, &lrd, NULL); 1685 bp = log->bp; 1686 lp = (struct logpage *) bp->l_ldata; 1687 lp->h.eor = lp->t.eor = cpu_to_le16(bp->l_eor); 1688 lbmWrite(log, log->bp, lbmWRITE | lbmRELEASE | lbmSYNC, 0); 1689 lbmIOWait(log->bp, lbmFREE); 1690 log->bp = NULL; 1691 1692 /* 1693 * synchronous update log superblock 1694 * mark log state as shutdown cleanly 1695 * (i.e., Log does not need to be replayed). 1696 */ 1697 if ((rc = lbmRead(log, 1, &bpsuper))) 1698 goto out; 1699 1700 logsuper = (struct logsuper *) bpsuper->l_ldata; 1701 logsuper->state = cpu_to_le32(LOGREDONE); 1702 logsuper->end = cpu_to_le32(lsn); 1703 lbmDirectWrite(log, bpsuper, lbmWRITE | lbmRELEASE | lbmSYNC); 1704 rc = lbmIOWait(bpsuper, lbmFREE); 1705 1706 jfs_info("lmLogShutdown: lsn:0x%x page:%d eor:%d", 1707 lsn, log->page, log->eor); 1708 1709 out: 1710 /* 1711 * shutdown per log i/o 1712 */ 1713 lbmLogShutdown(log); 1714 1715 if (rc) { 1716 jfs_warn("lmLogShutdown: exit(%d)", rc); 1717 } 1718 return rc; 1719 } 1720 1721 1722 /* 1723 * NAME: lmLogFileSystem() 1724 * 1725 * FUNCTION: insert (<activate> = true)/remove (<activate> = false) 1726 * file system into/from log active file system list. 1727 * 1728 * PARAMETE: log - pointer to logs inode. 1729 * fsdev - kdev_t of filesystem. 1730 * serial - pointer to returned log serial number 1731 * activate - insert/remove device from active list. 1732 * 1733 * RETURN: 0 - success 1734 * errors returned by vms_iowait(). 1735 */ 1736 static int lmLogFileSystem(struct jfs_log * log, struct jfs_sb_info *sbi, 1737 int activate) 1738 { 1739 int rc = 0; 1740 int i; 1741 struct logsuper *logsuper; 1742 struct lbuf *bpsuper; 1743 char *uuid = sbi->uuid; 1744 1745 /* 1746 * insert/remove file system device to log active file system list. 1747 */ 1748 if ((rc = lbmRead(log, 1, &bpsuper))) 1749 return rc; 1750 1751 logsuper = (struct logsuper *) bpsuper->l_ldata; 1752 if (activate) { 1753 for (i = 0; i < MAX_ACTIVE; i++) 1754 if (!memcmp(logsuper->active[i].uuid, NULL_UUID, 16)) { 1755 memcpy(logsuper->active[i].uuid, uuid, 16); 1756 sbi->aggregate = i; 1757 break; 1758 } 1759 if (i == MAX_ACTIVE) { 1760 jfs_warn("Too many file systems sharing journal!"); 1761 lbmFree(bpsuper); 1762 return -EMFILE; /* Is there a better rc? */ 1763 } 1764 } else { 1765 for (i = 0; i < MAX_ACTIVE; i++) 1766 if (!memcmp(logsuper->active[i].uuid, uuid, 16)) { 1767 memcpy(logsuper->active[i].uuid, NULL_UUID, 16); 1768 break; 1769 } 1770 if (i == MAX_ACTIVE) { 1771 jfs_warn("Somebody stomped on the journal!"); 1772 lbmFree(bpsuper); 1773 return -EIO; 1774 } 1775 1776 } 1777 1778 /* 1779 * synchronous write log superblock: 1780 * 1781 * write sidestream bypassing write queue: 1782 * at file system mount, log super block is updated for 1783 * activation of the file system before any log record 1784 * (MOUNT record) of the file system, and at file system 1785 * unmount, all meta data for the file system has been 1786 * flushed before log super block is updated for deactivation 1787 * of the file system. 1788 */ 1789 lbmDirectWrite(log, bpsuper, lbmWRITE | lbmRELEASE | lbmSYNC); 1790 rc = lbmIOWait(bpsuper, lbmFREE); 1791 1792 return rc; 1793 } 1794 1795 /* 1796 * log buffer manager (lbm) 1797 * ------------------------ 1798 * 1799 * special purpose buffer manager supporting log i/o requirements. 1800 * 1801 * per log write queue: 1802 * log pageout occurs in serial order by fifo write queue and 1803 * restricting to a single i/o in pregress at any one time. 1804 * a circular singly-linked list 1805 * (log->wrqueue points to the tail, and buffers are linked via 1806 * bp->wrqueue field), and 1807 * maintains log page in pageout ot waiting for pageout in serial pageout. 1808 */ 1809 1810 /* 1811 * lbmLogInit() 1812 * 1813 * initialize per log I/O setup at lmLogInit() 1814 */ 1815 static int lbmLogInit(struct jfs_log * log) 1816 { /* log inode */ 1817 int i; 1818 struct lbuf *lbuf; 1819 1820 jfs_info("lbmLogInit: log:0x%p", log); 1821 1822 /* initialize current buffer cursor */ 1823 log->bp = NULL; 1824 1825 /* initialize log device write queue */ 1826 log->wqueue = NULL; 1827 1828 /* 1829 * Each log has its own buffer pages allocated to it. These are 1830 * not managed by the page cache. This ensures that a transaction 1831 * writing to the log does not block trying to allocate a page from 1832 * the page cache (for the log). This would be bad, since page 1833 * allocation waits on the kswapd thread that may be committing inodes 1834 * which would cause log activity. Was that clear? I'm trying to 1835 * avoid deadlock here. 1836 */ 1837 init_waitqueue_head(&log->free_wait); 1838 1839 log->lbuf_free = NULL; 1840 1841 for (i = 0; i < LOGPAGES;) { 1842 char *buffer; 1843 uint offset; 1844 struct page *page; 1845 1846 buffer = (char *) get_zeroed_page(GFP_KERNEL); 1847 if (buffer == NULL) 1848 goto error; 1849 page = virt_to_page(buffer); 1850 for (offset = 0; offset < PAGE_SIZE; offset += LOGPSIZE) { 1851 lbuf = kmalloc(sizeof(struct lbuf), GFP_KERNEL); 1852 if (lbuf == NULL) { 1853 if (offset == 0) 1854 free_page((unsigned long) buffer); 1855 goto error; 1856 } 1857 if (offset) /* we already have one reference */ 1858 get_page(page); 1859 lbuf->l_offset = offset; 1860 lbuf->l_ldata = buffer + offset; 1861 lbuf->l_page = page; 1862 lbuf->l_log = log; 1863 init_waitqueue_head(&lbuf->l_ioevent); 1864 1865 lbuf->l_freelist = log->lbuf_free; 1866 log->lbuf_free = lbuf; 1867 i++; 1868 } 1869 } 1870 1871 return (0); 1872 1873 error: 1874 lbmLogShutdown(log); 1875 return -ENOMEM; 1876 } 1877 1878 1879 /* 1880 * lbmLogShutdown() 1881 * 1882 * finalize per log I/O setup at lmLogShutdown() 1883 */ 1884 static void lbmLogShutdown(struct jfs_log * log) 1885 { 1886 struct lbuf *lbuf; 1887 1888 jfs_info("lbmLogShutdown: log:0x%p", log); 1889 1890 lbuf = log->lbuf_free; 1891 while (lbuf) { 1892 struct lbuf *next = lbuf->l_freelist; 1893 __free_page(lbuf->l_page); 1894 kfree(lbuf); 1895 lbuf = next; 1896 } 1897 } 1898 1899 1900 /* 1901 * lbmAllocate() 1902 * 1903 * allocate an empty log buffer 1904 */ 1905 static struct lbuf *lbmAllocate(struct jfs_log * log, int pn) 1906 { 1907 struct lbuf *bp; 1908 unsigned long flags; 1909 1910 /* 1911 * recycle from log buffer freelist if any 1912 */ 1913 LCACHE_LOCK(flags); 1914 LCACHE_SLEEP_COND(log->free_wait, (bp = log->lbuf_free), flags); 1915 log->lbuf_free = bp->l_freelist; 1916 LCACHE_UNLOCK(flags); 1917 1918 bp->l_flag = 0; 1919 1920 bp->l_wqnext = NULL; 1921 bp->l_freelist = NULL; 1922 1923 bp->l_pn = pn; 1924 bp->l_blkno = log->base + (pn << (L2LOGPSIZE - log->l2bsize)); 1925 bp->l_ceor = 0; 1926 1927 return bp; 1928 } 1929 1930 1931 /* 1932 * lbmFree() 1933 * 1934 * release a log buffer to freelist 1935 */ 1936 static void lbmFree(struct lbuf * bp) 1937 { 1938 unsigned long flags; 1939 1940 LCACHE_LOCK(flags); 1941 1942 lbmfree(bp); 1943 1944 LCACHE_UNLOCK(flags); 1945 } 1946 1947 static void lbmfree(struct lbuf * bp) 1948 { 1949 struct jfs_log *log = bp->l_log; 1950 1951 assert(bp->l_wqnext == NULL); 1952 1953 /* 1954 * return the buffer to head of freelist 1955 */ 1956 bp->l_freelist = log->lbuf_free; 1957 log->lbuf_free = bp; 1958 1959 wake_up(&log->free_wait); 1960 return; 1961 } 1962 1963 1964 /* 1965 * NAME: lbmRedrive 1966 * 1967 * FUNCTION: add a log buffer to the log redrive list 1968 * 1969 * PARAMETER: 1970 * bp - log buffer 1971 * 1972 * NOTES: 1973 * Takes log_redrive_lock. 1974 */ 1975 static inline void lbmRedrive(struct lbuf *bp) 1976 { 1977 unsigned long flags; 1978 1979 spin_lock_irqsave(&log_redrive_lock, flags); 1980 bp->l_redrive_next = log_redrive_list; 1981 log_redrive_list = bp; 1982 spin_unlock_irqrestore(&log_redrive_lock, flags); 1983 1984 wake_up_process(jfsIOthread); 1985 } 1986 1987 1988 /* 1989 * lbmRead() 1990 */ 1991 static int lbmRead(struct jfs_log * log, int pn, struct lbuf ** bpp) 1992 { 1993 struct bio *bio; 1994 struct lbuf *bp; 1995 1996 /* 1997 * allocate a log buffer 1998 */ 1999 *bpp = bp = lbmAllocate(log, pn); 2000 jfs_info("lbmRead: bp:0x%p pn:0x%x", bp, pn); 2001 2002 bp->l_flag |= lbmREAD; 2003 2004 bio = bio_alloc(GFP_NOFS, 1); 2005 2006 bio->bi_sector = bp->l_blkno << (log->l2bsize - 9); 2007 bio->bi_bdev = log->bdev; 2008 bio->bi_io_vec[0].bv_page = bp->l_page; 2009 bio->bi_io_vec[0].bv_len = LOGPSIZE; 2010 bio->bi_io_vec[0].bv_offset = bp->l_offset; 2011 2012 bio->bi_vcnt = 1; 2013 bio->bi_idx = 0; 2014 bio->bi_size = LOGPSIZE; 2015 2016 bio->bi_end_io = lbmIODone; 2017 bio->bi_private = bp; 2018 submit_bio(READ_SYNC, bio); 2019 2020 wait_event(bp->l_ioevent, (bp->l_flag != lbmREAD)); 2021 2022 return 0; 2023 } 2024 2025 2026 /* 2027 * lbmWrite() 2028 * 2029 * buffer at head of pageout queue stays after completion of 2030 * partial-page pageout and redriven by explicit initiation of 2031 * pageout by caller until full-page pageout is completed and 2032 * released. 2033 * 2034 * device driver i/o done redrives pageout of new buffer at 2035 * head of pageout queue when current buffer at head of pageout 2036 * queue is released at the completion of its full-page pageout. 2037 * 2038 * LOGGC_LOCK() serializes lbmWrite() by lmNextPage() and lmGroupCommit(). 2039 * LCACHE_LOCK() serializes xflag between lbmWrite() and lbmIODone() 2040 */ 2041 static void lbmWrite(struct jfs_log * log, struct lbuf * bp, int flag, 2042 int cant_block) 2043 { 2044 struct lbuf *tail; 2045 unsigned long flags; 2046 2047 jfs_info("lbmWrite: bp:0x%p flag:0x%x pn:0x%x", bp, flag, bp->l_pn); 2048 2049 /* map the logical block address to physical block address */ 2050 bp->l_blkno = 2051 log->base + (bp->l_pn << (L2LOGPSIZE - log->l2bsize)); 2052 2053 LCACHE_LOCK(flags); /* disable+lock */ 2054 2055 /* 2056 * initialize buffer for device driver 2057 */ 2058 bp->l_flag = flag; 2059 2060 /* 2061 * insert bp at tail of write queue associated with log 2062 * 2063 * (request is either for bp already/currently at head of queue 2064 * or new bp to be inserted at tail) 2065 */ 2066 tail = log->wqueue; 2067 2068 /* is buffer not already on write queue ? */ 2069 if (bp->l_wqnext == NULL) { 2070 /* insert at tail of wqueue */ 2071 if (tail == NULL) { 2072 log->wqueue = bp; 2073 bp->l_wqnext = bp; 2074 } else { 2075 log->wqueue = bp; 2076 bp->l_wqnext = tail->l_wqnext; 2077 tail->l_wqnext = bp; 2078 } 2079 2080 tail = bp; 2081 } 2082 2083 /* is buffer at head of wqueue and for write ? */ 2084 if ((bp != tail->l_wqnext) || !(flag & lbmWRITE)) { 2085 LCACHE_UNLOCK(flags); /* unlock+enable */ 2086 return; 2087 } 2088 2089 LCACHE_UNLOCK(flags); /* unlock+enable */ 2090 2091 if (cant_block) 2092 lbmRedrive(bp); 2093 else if (flag & lbmSYNC) 2094 lbmStartIO(bp); 2095 else { 2096 LOGGC_UNLOCK(log); 2097 lbmStartIO(bp); 2098 LOGGC_LOCK(log); 2099 } 2100 } 2101 2102 2103 /* 2104 * lbmDirectWrite() 2105 * 2106 * initiate pageout bypassing write queue for sidestream 2107 * (e.g., log superblock) write; 2108 */ 2109 static void lbmDirectWrite(struct jfs_log * log, struct lbuf * bp, int flag) 2110 { 2111 jfs_info("lbmDirectWrite: bp:0x%p flag:0x%x pn:0x%x", 2112 bp, flag, bp->l_pn); 2113 2114 /* 2115 * initialize buffer for device driver 2116 */ 2117 bp->l_flag = flag | lbmDIRECT; 2118 2119 /* map the logical block address to physical block address */ 2120 bp->l_blkno = 2121 log->base + (bp->l_pn << (L2LOGPSIZE - log->l2bsize)); 2122 2123 /* 2124 * initiate pageout of the page 2125 */ 2126 lbmStartIO(bp); 2127 } 2128 2129 2130 /* 2131 * NAME: lbmStartIO() 2132 * 2133 * FUNCTION: Interface to DD strategy routine 2134 * 2135 * RETURN: none 2136 * 2137 * serialization: LCACHE_LOCK() is NOT held during log i/o; 2138 */ 2139 static void lbmStartIO(struct lbuf * bp) 2140 { 2141 struct bio *bio; 2142 struct jfs_log *log = bp->l_log; 2143 2144 jfs_info("lbmStartIO\n"); 2145 2146 bio = bio_alloc(GFP_NOFS, 1); 2147 bio->bi_sector = bp->l_blkno << (log->l2bsize - 9); 2148 bio->bi_bdev = log->bdev; 2149 bio->bi_io_vec[0].bv_page = bp->l_page; 2150 bio->bi_io_vec[0].bv_len = LOGPSIZE; 2151 bio->bi_io_vec[0].bv_offset = bp->l_offset; 2152 2153 bio->bi_vcnt = 1; 2154 bio->bi_idx = 0; 2155 bio->bi_size = LOGPSIZE; 2156 2157 bio->bi_end_io = lbmIODone; 2158 bio->bi_private = bp; 2159 2160 /* check if journaling to disk has been disabled */ 2161 if (log->no_integrity) { 2162 bio->bi_size = 0; 2163 lbmIODone(bio, 0); 2164 } else { 2165 submit_bio(WRITE_SYNC, bio); 2166 INCREMENT(lmStat.submitted); 2167 } 2168 } 2169 2170 2171 /* 2172 * lbmIOWait() 2173 */ 2174 static int lbmIOWait(struct lbuf * bp, int flag) 2175 { 2176 unsigned long flags; 2177 int rc = 0; 2178 2179 jfs_info("lbmIOWait1: bp:0x%p flag:0x%x:0x%x", bp, bp->l_flag, flag); 2180 2181 LCACHE_LOCK(flags); /* disable+lock */ 2182 2183 LCACHE_SLEEP_COND(bp->l_ioevent, (bp->l_flag & lbmDONE), flags); 2184 2185 rc = (bp->l_flag & lbmERROR) ? -EIO : 0; 2186 2187 if (flag & lbmFREE) 2188 lbmfree(bp); 2189 2190 LCACHE_UNLOCK(flags); /* unlock+enable */ 2191 2192 jfs_info("lbmIOWait2: bp:0x%p flag:0x%x:0x%x", bp, bp->l_flag, flag); 2193 return rc; 2194 } 2195 2196 /* 2197 * lbmIODone() 2198 * 2199 * executed at INTIODONE level 2200 */ 2201 static void lbmIODone(struct bio *bio, int error) 2202 { 2203 struct lbuf *bp = bio->bi_private; 2204 struct lbuf *nextbp, *tail; 2205 struct jfs_log *log; 2206 unsigned long flags; 2207 2208 /* 2209 * get back jfs buffer bound to the i/o buffer 2210 */ 2211 jfs_info("lbmIODone: bp:0x%p flag:0x%x", bp, bp->l_flag); 2212 2213 LCACHE_LOCK(flags); /* disable+lock */ 2214 2215 bp->l_flag |= lbmDONE; 2216 2217 if (!test_bit(BIO_UPTODATE, &bio->bi_flags)) { 2218 bp->l_flag |= lbmERROR; 2219 2220 jfs_err("lbmIODone: I/O error in JFS log"); 2221 } 2222 2223 bio_put(bio); 2224 2225 /* 2226 * pagein completion 2227 */ 2228 if (bp->l_flag & lbmREAD) { 2229 bp->l_flag &= ~lbmREAD; 2230 2231 LCACHE_UNLOCK(flags); /* unlock+enable */ 2232 2233 /* wakeup I/O initiator */ 2234 LCACHE_WAKEUP(&bp->l_ioevent); 2235 2236 return; 2237 } 2238 2239 /* 2240 * pageout completion 2241 * 2242 * the bp at the head of write queue has completed pageout. 2243 * 2244 * if single-commit/full-page pageout, remove the current buffer 2245 * from head of pageout queue, and redrive pageout with 2246 * the new buffer at head of pageout queue; 2247 * otherwise, the partial-page pageout buffer stays at 2248 * the head of pageout queue to be redriven for pageout 2249 * by lmGroupCommit() until full-page pageout is completed. 2250 */ 2251 bp->l_flag &= ~lbmWRITE; 2252 INCREMENT(lmStat.pagedone); 2253 2254 /* update committed lsn */ 2255 log = bp->l_log; 2256 log->clsn = (bp->l_pn << L2LOGPSIZE) + bp->l_ceor; 2257 2258 if (bp->l_flag & lbmDIRECT) { 2259 LCACHE_WAKEUP(&bp->l_ioevent); 2260 LCACHE_UNLOCK(flags); 2261 return; 2262 } 2263 2264 tail = log->wqueue; 2265 2266 /* single element queue */ 2267 if (bp == tail) { 2268 /* remove head buffer of full-page pageout 2269 * from log device write queue 2270 */ 2271 if (bp->l_flag & lbmRELEASE) { 2272 log->wqueue = NULL; 2273 bp->l_wqnext = NULL; 2274 } 2275 } 2276 /* multi element queue */ 2277 else { 2278 /* remove head buffer of full-page pageout 2279 * from log device write queue 2280 */ 2281 if (bp->l_flag & lbmRELEASE) { 2282 nextbp = tail->l_wqnext = bp->l_wqnext; 2283 bp->l_wqnext = NULL; 2284 2285 /* 2286 * redrive pageout of next page at head of write queue: 2287 * redrive next page without any bound tblk 2288 * (i.e., page w/o any COMMIT records), or 2289 * first page of new group commit which has been 2290 * queued after current page (subsequent pageout 2291 * is performed synchronously, except page without 2292 * any COMMITs) by lmGroupCommit() as indicated 2293 * by lbmWRITE flag; 2294 */ 2295 if (nextbp->l_flag & lbmWRITE) { 2296 /* 2297 * We can't do the I/O at interrupt time. 2298 * The jfsIO thread can do it 2299 */ 2300 lbmRedrive(nextbp); 2301 } 2302 } 2303 } 2304 2305 /* 2306 * synchronous pageout: 2307 * 2308 * buffer has not necessarily been removed from write queue 2309 * (e.g., synchronous write of partial-page with COMMIT): 2310 * leave buffer for i/o initiator to dispose 2311 */ 2312 if (bp->l_flag & lbmSYNC) { 2313 LCACHE_UNLOCK(flags); /* unlock+enable */ 2314 2315 /* wakeup I/O initiator */ 2316 LCACHE_WAKEUP(&bp->l_ioevent); 2317 } 2318 2319 /* 2320 * Group Commit pageout: 2321 */ 2322 else if (bp->l_flag & lbmGC) { 2323 LCACHE_UNLOCK(flags); 2324 lmPostGC(bp); 2325 } 2326 2327 /* 2328 * asynchronous pageout: 2329 * 2330 * buffer must have been removed from write queue: 2331 * insert buffer at head of freelist where it can be recycled 2332 */ 2333 else { 2334 assert(bp->l_flag & lbmRELEASE); 2335 assert(bp->l_flag & lbmFREE); 2336 lbmfree(bp); 2337 2338 LCACHE_UNLOCK(flags); /* unlock+enable */ 2339 } 2340 } 2341 2342 int jfsIOWait(void *arg) 2343 { 2344 struct lbuf *bp; 2345 2346 do { 2347 spin_lock_irq(&log_redrive_lock); 2348 while ((bp = log_redrive_list)) { 2349 log_redrive_list = bp->l_redrive_next; 2350 bp->l_redrive_next = NULL; 2351 spin_unlock_irq(&log_redrive_lock); 2352 lbmStartIO(bp); 2353 spin_lock_irq(&log_redrive_lock); 2354 } 2355 2356 if (freezing(current)) { 2357 spin_unlock_irq(&log_redrive_lock); 2358 refrigerator(); 2359 } else { 2360 set_current_state(TASK_INTERRUPTIBLE); 2361 spin_unlock_irq(&log_redrive_lock); 2362 schedule(); 2363 __set_current_state(TASK_RUNNING); 2364 } 2365 } while (!kthread_should_stop()); 2366 2367 jfs_info("jfsIOWait being killed!"); 2368 return 0; 2369 } 2370 2371 /* 2372 * NAME: lmLogFormat()/jfs_logform() 2373 * 2374 * FUNCTION: format file system log 2375 * 2376 * PARAMETERS: 2377 * log - volume log 2378 * logAddress - start address of log space in FS block 2379 * logSize - length of log space in FS block; 2380 * 2381 * RETURN: 0 - success 2382 * -EIO - i/o error 2383 * 2384 * XXX: We're synchronously writing one page at a time. This needs to 2385 * be improved by writing multiple pages at once. 2386 */ 2387 int lmLogFormat(struct jfs_log *log, s64 logAddress, int logSize) 2388 { 2389 int rc = -EIO; 2390 struct jfs_sb_info *sbi; 2391 struct logsuper *logsuper; 2392 struct logpage *lp; 2393 int lspn; /* log sequence page number */ 2394 struct lrd *lrd_ptr; 2395 int npages = 0; 2396 struct lbuf *bp; 2397 2398 jfs_info("lmLogFormat: logAddress:%Ld logSize:%d", 2399 (long long)logAddress, logSize); 2400 2401 sbi = list_entry(log->sb_list.next, struct jfs_sb_info, log_list); 2402 2403 /* allocate a log buffer */ 2404 bp = lbmAllocate(log, 1); 2405 2406 npages = logSize >> sbi->l2nbperpage; 2407 2408 /* 2409 * log space: 2410 * 2411 * page 0 - reserved; 2412 * page 1 - log superblock; 2413 * page 2 - log data page: A SYNC log record is written 2414 * into this page at logform time; 2415 * pages 3-N - log data page: set to empty log data pages; 2416 */ 2417 /* 2418 * init log superblock: log page 1 2419 */ 2420 logsuper = (struct logsuper *) bp->l_ldata; 2421 2422 logsuper->magic = cpu_to_le32(LOGMAGIC); 2423 logsuper->version = cpu_to_le32(LOGVERSION); 2424 logsuper->state = cpu_to_le32(LOGREDONE); 2425 logsuper->flag = cpu_to_le32(sbi->mntflag); /* ? */ 2426 logsuper->size = cpu_to_le32(npages); 2427 logsuper->bsize = cpu_to_le32(sbi->bsize); 2428 logsuper->l2bsize = cpu_to_le32(sbi->l2bsize); 2429 logsuper->end = cpu_to_le32(2 * LOGPSIZE + LOGPHDRSIZE + LOGRDSIZE); 2430 2431 bp->l_flag = lbmWRITE | lbmSYNC | lbmDIRECT; 2432 bp->l_blkno = logAddress + sbi->nbperpage; 2433 lbmStartIO(bp); 2434 if ((rc = lbmIOWait(bp, 0))) 2435 goto exit; 2436 2437 /* 2438 * init pages 2 to npages-1 as log data pages: 2439 * 2440 * log page sequence number (lpsn) initialization: 2441 * 2442 * pn: 0 1 2 3 n-1 2443 * +-----+-----+=====+=====+===.....===+=====+ 2444 * lspn: N-1 0 1 N-2 2445 * <--- N page circular file ----> 2446 * 2447 * the N (= npages-2) data pages of the log is maintained as 2448 * a circular file for the log records; 2449 * lpsn grows by 1 monotonically as each log page is written 2450 * to the circular file of the log; 2451 * and setLogpage() will not reset the page number even if 2452 * the eor is equal to LOGPHDRSIZE. In order for binary search 2453 * still work in find log end process, we have to simulate the 2454 * log wrap situation at the log format time. 2455 * The 1st log page written will have the highest lpsn. Then 2456 * the succeeding log pages will have ascending order of 2457 * the lspn starting from 0, ... (N-2) 2458 */ 2459 lp = (struct logpage *) bp->l_ldata; 2460 /* 2461 * initialize 1st log page to be written: lpsn = N - 1, 2462 * write a SYNCPT log record is written to this page 2463 */ 2464 lp->h.page = lp->t.page = cpu_to_le32(npages - 3); 2465 lp->h.eor = lp->t.eor = cpu_to_le16(LOGPHDRSIZE + LOGRDSIZE); 2466 2467 lrd_ptr = (struct lrd *) &lp->data; 2468 lrd_ptr->logtid = 0; 2469 lrd_ptr->backchain = 0; 2470 lrd_ptr->type = cpu_to_le16(LOG_SYNCPT); 2471 lrd_ptr->length = 0; 2472 lrd_ptr->log.syncpt.sync = 0; 2473 2474 bp->l_blkno += sbi->nbperpage; 2475 bp->l_flag = lbmWRITE | lbmSYNC | lbmDIRECT; 2476 lbmStartIO(bp); 2477 if ((rc = lbmIOWait(bp, 0))) 2478 goto exit; 2479 2480 /* 2481 * initialize succeeding log pages: lpsn = 0, 1, ..., (N-2) 2482 */ 2483 for (lspn = 0; lspn < npages - 3; lspn++) { 2484 lp->h.page = lp->t.page = cpu_to_le32(lspn); 2485 lp->h.eor = lp->t.eor = cpu_to_le16(LOGPHDRSIZE); 2486 2487 bp->l_blkno += sbi->nbperpage; 2488 bp->l_flag = lbmWRITE | lbmSYNC | lbmDIRECT; 2489 lbmStartIO(bp); 2490 if ((rc = lbmIOWait(bp, 0))) 2491 goto exit; 2492 } 2493 2494 rc = 0; 2495 exit: 2496 /* 2497 * finalize log 2498 */ 2499 /* release the buffer */ 2500 lbmFree(bp); 2501 2502 return rc; 2503 } 2504 2505 #ifdef CONFIG_JFS_STATISTICS 2506 int jfs_lmstats_read(char *buffer, char **start, off_t offset, int length, 2507 int *eof, void *data) 2508 { 2509 int len = 0; 2510 off_t begin; 2511 2512 len += sprintf(buffer, 2513 "JFS Logmgr stats\n" 2514 "================\n" 2515 "commits = %d\n" 2516 "writes submitted = %d\n" 2517 "writes completed = %d\n" 2518 "full pages submitted = %d\n" 2519 "partial pages submitted = %d\n", 2520 lmStat.commit, 2521 lmStat.submitted, 2522 lmStat.pagedone, 2523 lmStat.full_page, 2524 lmStat.partial_page); 2525 2526 begin = offset; 2527 *start = buffer + begin; 2528 len -= begin; 2529 2530 if (len > length) 2531 len = length; 2532 else 2533 *eof = 1; 2534 2535 if (len < 0) 2536 len = 0; 2537 2538 return len; 2539 } 2540 #endif /* CONFIG_JFS_STATISTICS */ 2541