1 /* 2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved. 3 * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved. 4 * 5 * This copyrighted material is made available to anyone wishing to use, 6 * modify, copy, or redistribute it subject to the terms and conditions 7 * of the GNU General Public License version 2. 8 */ 9 10 #include <linux/sched.h> 11 #include <linux/slab.h> 12 #include <linux/spinlock.h> 13 #include <linux/completion.h> 14 #include <linux/buffer_head.h> 15 #include <linux/gfs2_ondisk.h> 16 #include <linux/crc32.h> 17 #include <linux/lm_interface.h> 18 #include <linux/delay.h> 19 20 #include "gfs2.h" 21 #include "incore.h" 22 #include "bmap.h" 23 #include "glock.h" 24 #include "log.h" 25 #include "lops.h" 26 #include "meta_io.h" 27 #include "util.h" 28 #include "dir.h" 29 30 #define PULL 1 31 32 /** 33 * gfs2_struct2blk - compute stuff 34 * @sdp: the filesystem 35 * @nstruct: the number of structures 36 * @ssize: the size of the structures 37 * 38 * Compute the number of log descriptor blocks needed to hold a certain number 39 * of structures of a certain size. 40 * 41 * Returns: the number of blocks needed (minimum is always 1) 42 */ 43 44 unsigned int gfs2_struct2blk(struct gfs2_sbd *sdp, unsigned int nstruct, 45 unsigned int ssize) 46 { 47 unsigned int blks; 48 unsigned int first, second; 49 50 blks = 1; 51 first = (sdp->sd_sb.sb_bsize - sizeof(struct gfs2_log_descriptor)) / ssize; 52 53 if (nstruct > first) { 54 second = (sdp->sd_sb.sb_bsize - 55 sizeof(struct gfs2_meta_header)) / ssize; 56 blks += DIV_ROUND_UP(nstruct - first, second); 57 } 58 59 return blks; 60 } 61 62 /** 63 * gfs2_ail1_start_one - Start I/O on a part of the AIL 64 * @sdp: the filesystem 65 * @tr: the part of the AIL 66 * 67 */ 68 69 static void gfs2_ail1_start_one(struct gfs2_sbd *sdp, struct gfs2_ail *ai) 70 { 71 struct gfs2_bufdata *bd, *s; 72 struct buffer_head *bh; 73 int retry; 74 75 BUG_ON(!spin_is_locked(&sdp->sd_log_lock)); 76 77 do { 78 retry = 0; 79 80 list_for_each_entry_safe_reverse(bd, s, &ai->ai_ail1_list, 81 bd_ail_st_list) { 82 bh = bd->bd_bh; 83 84 gfs2_assert(sdp, bd->bd_ail == ai); 85 86 if (!bh){ 87 list_move(&bd->bd_ail_st_list, &ai->ai_ail2_list); 88 continue; 89 } 90 91 if (!buffer_busy(bh)) { 92 if (!buffer_uptodate(bh)) { 93 gfs2_log_unlock(sdp); 94 gfs2_io_error_bh(sdp, bh); 95 gfs2_log_lock(sdp); 96 } 97 list_move(&bd->bd_ail_st_list, &ai->ai_ail2_list); 98 continue; 99 } 100 101 if (!buffer_dirty(bh)) 102 continue; 103 104 list_move(&bd->bd_ail_st_list, &ai->ai_ail1_list); 105 106 gfs2_log_unlock(sdp); 107 wait_on_buffer(bh); 108 ll_rw_block(WRITE, 1, &bh); 109 gfs2_log_lock(sdp); 110 111 retry = 1; 112 break; 113 } 114 } while (retry); 115 } 116 117 /** 118 * gfs2_ail1_empty_one - Check whether or not a trans in the AIL has been synced 119 * @sdp: the filesystem 120 * @ai: the AIL entry 121 * 122 */ 123 124 static int gfs2_ail1_empty_one(struct gfs2_sbd *sdp, struct gfs2_ail *ai, int flags) 125 { 126 struct gfs2_bufdata *bd, *s; 127 struct buffer_head *bh; 128 129 list_for_each_entry_safe_reverse(bd, s, &ai->ai_ail1_list, 130 bd_ail_st_list) { 131 bh = bd->bd_bh; 132 133 if (!bh){ 134 list_move(&bd->bd_ail_st_list, &ai->ai_ail2_list); 135 continue; 136 } 137 138 gfs2_assert(sdp, bd->bd_ail == ai); 139 140 if (buffer_busy(bh)) { 141 if (flags & DIO_ALL) 142 continue; 143 else 144 break; 145 } 146 147 if (!buffer_uptodate(bh)) 148 gfs2_io_error_bh(sdp, bh); 149 150 list_move(&bd->bd_ail_st_list, &ai->ai_ail2_list); 151 } 152 153 return list_empty(&ai->ai_ail1_list); 154 } 155 156 static void gfs2_ail1_start(struct gfs2_sbd *sdp, int flags) 157 { 158 struct list_head *head = &sdp->sd_ail1_list; 159 u64 sync_gen; 160 struct list_head *first; 161 struct gfs2_ail *first_ai, *ai, *tmp; 162 int done = 0; 163 164 gfs2_log_lock(sdp); 165 if (list_empty(head)) { 166 gfs2_log_unlock(sdp); 167 return; 168 } 169 sync_gen = sdp->sd_ail_sync_gen++; 170 171 first = head->prev; 172 first_ai = list_entry(first, struct gfs2_ail, ai_list); 173 first_ai->ai_sync_gen = sync_gen; 174 gfs2_ail1_start_one(sdp, first_ai); /* This may drop log lock */ 175 176 if (flags & DIO_ALL) 177 first = NULL; 178 179 while(!done) { 180 if (first && (head->prev != first || 181 gfs2_ail1_empty_one(sdp, first_ai, 0))) 182 break; 183 184 done = 1; 185 list_for_each_entry_safe_reverse(ai, tmp, head, ai_list) { 186 if (ai->ai_sync_gen >= sync_gen) 187 continue; 188 ai->ai_sync_gen = sync_gen; 189 gfs2_ail1_start_one(sdp, ai); /* This may drop log lock */ 190 done = 0; 191 break; 192 } 193 } 194 195 gfs2_log_unlock(sdp); 196 } 197 198 int gfs2_ail1_empty(struct gfs2_sbd *sdp, int flags) 199 { 200 struct gfs2_ail *ai, *s; 201 int ret; 202 203 gfs2_log_lock(sdp); 204 205 list_for_each_entry_safe_reverse(ai, s, &sdp->sd_ail1_list, ai_list) { 206 if (gfs2_ail1_empty_one(sdp, ai, flags)) 207 list_move(&ai->ai_list, &sdp->sd_ail2_list); 208 else if (!(flags & DIO_ALL)) 209 break; 210 } 211 212 ret = list_empty(&sdp->sd_ail1_list); 213 214 gfs2_log_unlock(sdp); 215 216 return ret; 217 } 218 219 220 /** 221 * gfs2_ail2_empty_one - Check whether or not a trans in the AIL has been synced 222 * @sdp: the filesystem 223 * @ai: the AIL entry 224 * 225 */ 226 227 static void gfs2_ail2_empty_one(struct gfs2_sbd *sdp, struct gfs2_ail *ai) 228 { 229 struct list_head *head = &ai->ai_ail2_list; 230 struct gfs2_bufdata *bd; 231 232 while (!list_empty(head)) { 233 bd = list_entry(head->prev, struct gfs2_bufdata, 234 bd_ail_st_list); 235 gfs2_assert(sdp, bd->bd_ail == ai); 236 bd->bd_ail = NULL; 237 list_del(&bd->bd_ail_st_list); 238 list_del(&bd->bd_ail_gl_list); 239 atomic_dec(&bd->bd_gl->gl_ail_count); 240 brelse(bd->bd_bh); 241 } 242 } 243 244 static void ail2_empty(struct gfs2_sbd *sdp, unsigned int new_tail) 245 { 246 struct gfs2_ail *ai, *safe; 247 unsigned int old_tail = sdp->sd_log_tail; 248 int wrap = (new_tail < old_tail); 249 int a, b, rm; 250 251 gfs2_log_lock(sdp); 252 253 list_for_each_entry_safe(ai, safe, &sdp->sd_ail2_list, ai_list) { 254 a = (old_tail <= ai->ai_first); 255 b = (ai->ai_first < new_tail); 256 rm = (wrap) ? (a || b) : (a && b); 257 if (!rm) 258 continue; 259 260 gfs2_ail2_empty_one(sdp, ai); 261 list_del(&ai->ai_list); 262 gfs2_assert_warn(sdp, list_empty(&ai->ai_ail1_list)); 263 gfs2_assert_warn(sdp, list_empty(&ai->ai_ail2_list)); 264 kfree(ai); 265 } 266 267 gfs2_log_unlock(sdp); 268 } 269 270 /** 271 * gfs2_log_reserve - Make a log reservation 272 * @sdp: The GFS2 superblock 273 * @blks: The number of blocks to reserve 274 * 275 * Note that we never give out the last few blocks of the journal. Thats 276 * due to the fact that there is a small number of header blocks 277 * associated with each log flush. The exact number can't be known until 278 * flush time, so we ensure that we have just enough free blocks at all 279 * times to avoid running out during a log flush. 280 * 281 * Returns: errno 282 */ 283 284 int gfs2_log_reserve(struct gfs2_sbd *sdp, unsigned int blks) 285 { 286 unsigned int try = 0; 287 unsigned reserved_blks = 6 * (4096 / sdp->sd_vfs->s_blocksize); 288 289 if (gfs2_assert_warn(sdp, blks) || 290 gfs2_assert_warn(sdp, blks <= sdp->sd_jdesc->jd_blocks)) 291 return -EINVAL; 292 293 mutex_lock(&sdp->sd_log_reserve_mutex); 294 gfs2_log_lock(sdp); 295 while(sdp->sd_log_blks_free <= (blks + reserved_blks)) { 296 gfs2_log_unlock(sdp); 297 gfs2_ail1_empty(sdp, 0); 298 gfs2_log_flush(sdp, NULL); 299 300 if (try++) 301 gfs2_ail1_start(sdp, 0); 302 gfs2_log_lock(sdp); 303 } 304 sdp->sd_log_blks_free -= blks; 305 gfs2_log_unlock(sdp); 306 mutex_unlock(&sdp->sd_log_reserve_mutex); 307 308 down_read(&sdp->sd_log_flush_lock); 309 310 return 0; 311 } 312 313 /** 314 * gfs2_log_release - Release a given number of log blocks 315 * @sdp: The GFS2 superblock 316 * @blks: The number of blocks 317 * 318 */ 319 320 void gfs2_log_release(struct gfs2_sbd *sdp, unsigned int blks) 321 { 322 323 gfs2_log_lock(sdp); 324 sdp->sd_log_blks_free += blks; 325 gfs2_assert_withdraw(sdp, 326 sdp->sd_log_blks_free <= sdp->sd_jdesc->jd_blocks); 327 gfs2_log_unlock(sdp); 328 up_read(&sdp->sd_log_flush_lock); 329 } 330 331 static u64 log_bmap(struct gfs2_sbd *sdp, unsigned int lbn) 332 { 333 struct inode *inode = sdp->sd_jdesc->jd_inode; 334 int error; 335 struct buffer_head bh_map = { .b_state = 0, .b_blocknr = 0 }; 336 337 bh_map.b_size = 1 << inode->i_blkbits; 338 error = gfs2_block_map(inode, lbn, 0, &bh_map); 339 if (error || !bh_map.b_blocknr) 340 printk(KERN_INFO "error=%d, dbn=%llu lbn=%u", error, 341 (unsigned long long)bh_map.b_blocknr, lbn); 342 gfs2_assert_withdraw(sdp, !error && bh_map.b_blocknr); 343 344 return bh_map.b_blocknr; 345 } 346 347 /** 348 * log_distance - Compute distance between two journal blocks 349 * @sdp: The GFS2 superblock 350 * @newer: The most recent journal block of the pair 351 * @older: The older journal block of the pair 352 * 353 * Compute the distance (in the journal direction) between two 354 * blocks in the journal 355 * 356 * Returns: the distance in blocks 357 */ 358 359 static inline unsigned int log_distance(struct gfs2_sbd *sdp, unsigned int newer, 360 unsigned int older) 361 { 362 int dist; 363 364 dist = newer - older; 365 if (dist < 0) 366 dist += sdp->sd_jdesc->jd_blocks; 367 368 return dist; 369 } 370 371 /** 372 * calc_reserved - Calculate the number of blocks to reserve when 373 * refunding a transaction's unused buffers. 374 * @sdp: The GFS2 superblock 375 * 376 * This is complex. We need to reserve room for all our currently used 377 * metadata buffers (e.g. normal file I/O rewriting file time stamps) and 378 * all our journaled data buffers for journaled files (e.g. files in the 379 * meta_fs like rindex, or files for which chattr +j was done.) 380 * If we don't reserve enough space, gfs2_log_refund and gfs2_log_flush 381 * will count it as free space (sd_log_blks_free) and corruption will follow. 382 * 383 * We can have metadata bufs and jdata bufs in the same journal. So each 384 * type gets its own log header, for which we need to reserve a block. 385 * In fact, each type has the potential for needing more than one header 386 * in cases where we have more buffers than will fit on a journal page. 387 * Metadata journal entries take up half the space of journaled buffer entries. 388 * Thus, metadata entries have buf_limit (502) and journaled buffers have 389 * databuf_limit (251) before they cause a wrap around. 390 * 391 * Also, we need to reserve blocks for revoke journal entries and one for an 392 * overall header for the lot. 393 * 394 * Returns: the number of blocks reserved 395 */ 396 static unsigned int calc_reserved(struct gfs2_sbd *sdp) 397 { 398 unsigned int reserved = 0; 399 unsigned int mbuf_limit, metabufhdrs_needed; 400 unsigned int dbuf_limit, databufhdrs_needed; 401 unsigned int revokes = 0; 402 403 mbuf_limit = buf_limit(sdp); 404 metabufhdrs_needed = (sdp->sd_log_commited_buf + 405 (mbuf_limit - 1)) / mbuf_limit; 406 dbuf_limit = databuf_limit(sdp); 407 databufhdrs_needed = (sdp->sd_log_commited_databuf + 408 (dbuf_limit - 1)) / dbuf_limit; 409 410 if (sdp->sd_log_commited_revoke) 411 revokes = gfs2_struct2blk(sdp, sdp->sd_log_commited_revoke, 412 sizeof(u64)); 413 414 reserved = sdp->sd_log_commited_buf + metabufhdrs_needed + 415 sdp->sd_log_commited_databuf + databufhdrs_needed + 416 revokes; 417 /* One for the overall header */ 418 if (reserved) 419 reserved++; 420 return reserved; 421 } 422 423 static unsigned int current_tail(struct gfs2_sbd *sdp) 424 { 425 struct gfs2_ail *ai; 426 unsigned int tail; 427 428 gfs2_log_lock(sdp); 429 430 if (list_empty(&sdp->sd_ail1_list)) { 431 tail = sdp->sd_log_head; 432 } else { 433 ai = list_entry(sdp->sd_ail1_list.prev, struct gfs2_ail, ai_list); 434 tail = ai->ai_first; 435 } 436 437 gfs2_log_unlock(sdp); 438 439 return tail; 440 } 441 442 static inline void log_incr_head(struct gfs2_sbd *sdp) 443 { 444 if (sdp->sd_log_flush_head == sdp->sd_log_tail) 445 gfs2_assert_withdraw(sdp, sdp->sd_log_flush_head == sdp->sd_log_head); 446 447 if (++sdp->sd_log_flush_head == sdp->sd_jdesc->jd_blocks) { 448 sdp->sd_log_flush_head = 0; 449 sdp->sd_log_flush_wrapped = 1; 450 } 451 } 452 453 /** 454 * gfs2_log_get_buf - Get and initialize a buffer to use for log control data 455 * @sdp: The GFS2 superblock 456 * 457 * Returns: the buffer_head 458 */ 459 460 struct buffer_head *gfs2_log_get_buf(struct gfs2_sbd *sdp) 461 { 462 u64 blkno = log_bmap(sdp, sdp->sd_log_flush_head); 463 struct gfs2_log_buf *lb; 464 struct buffer_head *bh; 465 466 lb = kzalloc(sizeof(struct gfs2_log_buf), GFP_NOFS | __GFP_NOFAIL); 467 list_add(&lb->lb_list, &sdp->sd_log_flush_list); 468 469 bh = lb->lb_bh = sb_getblk(sdp->sd_vfs, blkno); 470 lock_buffer(bh); 471 memset(bh->b_data, 0, bh->b_size); 472 set_buffer_uptodate(bh); 473 clear_buffer_dirty(bh); 474 unlock_buffer(bh); 475 476 log_incr_head(sdp); 477 478 return bh; 479 } 480 481 /** 482 * gfs2_log_fake_buf - Build a fake buffer head to write metadata buffer to log 483 * @sdp: the filesystem 484 * @data: the data the buffer_head should point to 485 * 486 * Returns: the log buffer descriptor 487 */ 488 489 struct buffer_head *gfs2_log_fake_buf(struct gfs2_sbd *sdp, 490 struct buffer_head *real) 491 { 492 u64 blkno = log_bmap(sdp, sdp->sd_log_flush_head); 493 struct gfs2_log_buf *lb; 494 struct buffer_head *bh; 495 496 lb = kzalloc(sizeof(struct gfs2_log_buf), GFP_NOFS | __GFP_NOFAIL); 497 list_add(&lb->lb_list, &sdp->sd_log_flush_list); 498 lb->lb_real = real; 499 500 bh = lb->lb_bh = alloc_buffer_head(GFP_NOFS | __GFP_NOFAIL); 501 atomic_set(&bh->b_count, 1); 502 bh->b_state = (1 << BH_Mapped) | (1 << BH_Uptodate); 503 set_bh_page(bh, real->b_page, bh_offset(real)); 504 bh->b_blocknr = blkno; 505 bh->b_size = sdp->sd_sb.sb_bsize; 506 bh->b_bdev = sdp->sd_vfs->s_bdev; 507 508 log_incr_head(sdp); 509 510 return bh; 511 } 512 513 static void log_pull_tail(struct gfs2_sbd *sdp, unsigned int new_tail) 514 { 515 unsigned int dist = log_distance(sdp, new_tail, sdp->sd_log_tail); 516 517 ail2_empty(sdp, new_tail); 518 519 gfs2_log_lock(sdp); 520 sdp->sd_log_blks_free += dist; 521 gfs2_assert_withdraw(sdp, sdp->sd_log_blks_free <= sdp->sd_jdesc->jd_blocks); 522 gfs2_log_unlock(sdp); 523 524 sdp->sd_log_tail = new_tail; 525 } 526 527 /** 528 * log_write_header - Get and initialize a journal header buffer 529 * @sdp: The GFS2 superblock 530 * 531 * Returns: the initialized log buffer descriptor 532 */ 533 534 static void log_write_header(struct gfs2_sbd *sdp, u32 flags, int pull) 535 { 536 u64 blkno = log_bmap(sdp, sdp->sd_log_flush_head); 537 struct buffer_head *bh; 538 struct gfs2_log_header *lh; 539 unsigned int tail; 540 u32 hash; 541 542 bh = sb_getblk(sdp->sd_vfs, blkno); 543 lock_buffer(bh); 544 memset(bh->b_data, 0, bh->b_size); 545 set_buffer_uptodate(bh); 546 clear_buffer_dirty(bh); 547 unlock_buffer(bh); 548 549 gfs2_ail1_empty(sdp, 0); 550 tail = current_tail(sdp); 551 552 lh = (struct gfs2_log_header *)bh->b_data; 553 memset(lh, 0, sizeof(struct gfs2_log_header)); 554 lh->lh_header.mh_magic = cpu_to_be32(GFS2_MAGIC); 555 lh->lh_header.mh_type = cpu_to_be32(GFS2_METATYPE_LH); 556 lh->lh_header.mh_format = cpu_to_be32(GFS2_FORMAT_LH); 557 lh->lh_sequence = cpu_to_be64(sdp->sd_log_sequence++); 558 lh->lh_flags = cpu_to_be32(flags); 559 lh->lh_tail = cpu_to_be32(tail); 560 lh->lh_blkno = cpu_to_be32(sdp->sd_log_flush_head); 561 hash = gfs2_disk_hash(bh->b_data, sizeof(struct gfs2_log_header)); 562 lh->lh_hash = cpu_to_be32(hash); 563 564 set_buffer_dirty(bh); 565 if (sync_dirty_buffer(bh)) 566 gfs2_io_error_bh(sdp, bh); 567 brelse(bh); 568 569 if (sdp->sd_log_tail != tail) 570 log_pull_tail(sdp, tail); 571 else 572 gfs2_assert_withdraw(sdp, !pull); 573 574 sdp->sd_log_idle = (tail == sdp->sd_log_flush_head); 575 log_incr_head(sdp); 576 } 577 578 static void log_flush_commit(struct gfs2_sbd *sdp) 579 { 580 struct list_head *head = &sdp->sd_log_flush_list; 581 struct gfs2_log_buf *lb; 582 struct buffer_head *bh; 583 int flushcount = 0; 584 585 while (!list_empty(head)) { 586 lb = list_entry(head->next, struct gfs2_log_buf, lb_list); 587 list_del(&lb->lb_list); 588 bh = lb->lb_bh; 589 590 wait_on_buffer(bh); 591 if (!buffer_uptodate(bh)) 592 gfs2_io_error_bh(sdp, bh); 593 if (lb->lb_real) { 594 while (atomic_read(&bh->b_count) != 1) /* Grrrr... */ 595 schedule(); 596 free_buffer_head(bh); 597 } else 598 brelse(bh); 599 kfree(lb); 600 flushcount++; 601 } 602 603 /* If nothing was journaled, the header is unplanned and unwanted. */ 604 if (flushcount) { 605 log_write_header(sdp, 0, 0); 606 } else { 607 unsigned int tail; 608 tail = current_tail(sdp); 609 610 gfs2_ail1_empty(sdp, 0); 611 if (sdp->sd_log_tail != tail) 612 log_pull_tail(sdp, tail); 613 } 614 } 615 616 /** 617 * gfs2_log_flush - flush incore transaction(s) 618 * @sdp: the filesystem 619 * @gl: The glock structure to flush. If NULL, flush the whole incore log 620 * 621 */ 622 623 void gfs2_log_flush(struct gfs2_sbd *sdp, struct gfs2_glock *gl) 624 { 625 struct gfs2_ail *ai; 626 627 down_write(&sdp->sd_log_flush_lock); 628 629 if (gl) { 630 gfs2_log_lock(sdp); 631 if (list_empty(&gl->gl_le.le_list)) { 632 gfs2_log_unlock(sdp); 633 up_write(&sdp->sd_log_flush_lock); 634 return; 635 } 636 gfs2_log_unlock(sdp); 637 } 638 639 ai = kzalloc(sizeof(struct gfs2_ail), GFP_NOFS | __GFP_NOFAIL); 640 INIT_LIST_HEAD(&ai->ai_ail1_list); 641 INIT_LIST_HEAD(&ai->ai_ail2_list); 642 643 gfs2_assert_withdraw(sdp, 644 sdp->sd_log_num_buf + sdp->sd_log_num_jdata == 645 sdp->sd_log_commited_buf + 646 sdp->sd_log_commited_databuf); 647 gfs2_assert_withdraw(sdp, 648 sdp->sd_log_num_revoke == sdp->sd_log_commited_revoke); 649 650 sdp->sd_log_flush_head = sdp->sd_log_head; 651 sdp->sd_log_flush_wrapped = 0; 652 ai->ai_first = sdp->sd_log_flush_head; 653 654 lops_before_commit(sdp); 655 if (!list_empty(&sdp->sd_log_flush_list)) 656 log_flush_commit(sdp); 657 else if (sdp->sd_log_tail != current_tail(sdp) && !sdp->sd_log_idle){ 658 gfs2_log_lock(sdp); 659 sdp->sd_log_blks_free--; /* Adjust for unreserved buffer */ 660 gfs2_log_unlock(sdp); 661 log_write_header(sdp, 0, PULL); 662 } 663 lops_after_commit(sdp, ai); 664 665 gfs2_log_lock(sdp); 666 sdp->sd_log_head = sdp->sd_log_flush_head; 667 sdp->sd_log_blks_reserved = 0; 668 sdp->sd_log_commited_buf = 0; 669 sdp->sd_log_commited_databuf = 0; 670 sdp->sd_log_commited_revoke = 0; 671 672 if (!list_empty(&ai->ai_ail1_list)) { 673 list_add(&ai->ai_list, &sdp->sd_ail1_list); 674 ai = NULL; 675 } 676 gfs2_log_unlock(sdp); 677 678 sdp->sd_vfs->s_dirt = 0; 679 up_write(&sdp->sd_log_flush_lock); 680 681 kfree(ai); 682 } 683 684 static void log_refund(struct gfs2_sbd *sdp, struct gfs2_trans *tr) 685 { 686 unsigned int reserved; 687 unsigned int old; 688 689 gfs2_log_lock(sdp); 690 691 sdp->sd_log_commited_buf += tr->tr_num_buf_new - tr->tr_num_buf_rm; 692 sdp->sd_log_commited_databuf += tr->tr_num_databuf_new - 693 tr->tr_num_databuf_rm; 694 gfs2_assert_withdraw(sdp, (((int)sdp->sd_log_commited_buf) >= 0) || 695 (((int)sdp->sd_log_commited_databuf) >= 0)); 696 sdp->sd_log_commited_revoke += tr->tr_num_revoke - tr->tr_num_revoke_rm; 697 gfs2_assert_withdraw(sdp, ((int)sdp->sd_log_commited_revoke) >= 0); 698 reserved = calc_reserved(sdp); 699 old = sdp->sd_log_blks_free; 700 sdp->sd_log_blks_free += tr->tr_reserved - 701 (reserved - sdp->sd_log_blks_reserved); 702 703 gfs2_assert_withdraw(sdp, sdp->sd_log_blks_free >= old); 704 gfs2_assert_withdraw(sdp, sdp->sd_log_blks_free <= 705 sdp->sd_jdesc->jd_blocks); 706 707 sdp->sd_log_blks_reserved = reserved; 708 709 gfs2_log_unlock(sdp); 710 } 711 712 /** 713 * gfs2_log_commit - Commit a transaction to the log 714 * @sdp: the filesystem 715 * @tr: the transaction 716 * 717 * Returns: errno 718 */ 719 720 void gfs2_log_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr) 721 { 722 log_refund(sdp, tr); 723 lops_incore_commit(sdp, tr); 724 725 sdp->sd_vfs->s_dirt = 1; 726 up_read(&sdp->sd_log_flush_lock); 727 728 gfs2_log_lock(sdp); 729 if (sdp->sd_log_num_buf > gfs2_tune_get(sdp, gt_incore_log_blocks)) 730 wake_up_process(sdp->sd_logd_process); 731 gfs2_log_unlock(sdp); 732 } 733 734 /** 735 * gfs2_log_shutdown - write a shutdown header into a journal 736 * @sdp: the filesystem 737 * 738 */ 739 740 void gfs2_log_shutdown(struct gfs2_sbd *sdp) 741 { 742 down_write(&sdp->sd_log_flush_lock); 743 744 gfs2_assert_withdraw(sdp, !sdp->sd_log_blks_reserved); 745 gfs2_assert_withdraw(sdp, !sdp->sd_log_num_gl); 746 gfs2_assert_withdraw(sdp, !sdp->sd_log_num_buf); 747 gfs2_assert_withdraw(sdp, !sdp->sd_log_num_jdata); 748 gfs2_assert_withdraw(sdp, !sdp->sd_log_num_revoke); 749 gfs2_assert_withdraw(sdp, !sdp->sd_log_num_rg); 750 gfs2_assert_withdraw(sdp, !sdp->sd_log_num_databuf); 751 gfs2_assert_withdraw(sdp, list_empty(&sdp->sd_ail1_list)); 752 753 sdp->sd_log_flush_head = sdp->sd_log_head; 754 sdp->sd_log_flush_wrapped = 0; 755 756 log_write_header(sdp, GFS2_LOG_HEAD_UNMOUNT, 757 (sdp->sd_log_tail == current_tail(sdp)) ? 0 : PULL); 758 759 gfs2_assert_warn(sdp, sdp->sd_log_blks_free == sdp->sd_jdesc->jd_blocks); 760 gfs2_assert_warn(sdp, sdp->sd_log_head == sdp->sd_log_tail); 761 gfs2_assert_warn(sdp, list_empty(&sdp->sd_ail2_list)); 762 763 sdp->sd_log_head = sdp->sd_log_flush_head; 764 sdp->sd_log_tail = sdp->sd_log_head; 765 766 up_write(&sdp->sd_log_flush_lock); 767 } 768 769 770 /** 771 * gfs2_meta_syncfs - sync all the buffers in a filesystem 772 * @sdp: the filesystem 773 * 774 */ 775 776 void gfs2_meta_syncfs(struct gfs2_sbd *sdp) 777 { 778 gfs2_log_flush(sdp, NULL); 779 for (;;) { 780 gfs2_ail1_start(sdp, DIO_ALL); 781 if (gfs2_ail1_empty(sdp, DIO_ALL)) 782 break; 783 msleep(10); 784 } 785 } 786 787