1 /* 2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved. 3 * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved. 4 * 5 * This copyrighted material is made available to anyone wishing to use, 6 * modify, copy, or redistribute it subject to the terms and conditions 7 * of the GNU General Public License version 2. 8 */ 9 10 #include <linux/sched.h> 11 #include <linux/slab.h> 12 #include <linux/spinlock.h> 13 #include <linux/completion.h> 14 #include <linux/buffer_head.h> 15 #include <linux/gfs2_ondisk.h> 16 #include <linux/crc32.h> 17 #include <linux/lm_interface.h> 18 19 #include "gfs2.h" 20 #include "incore.h" 21 #include "bmap.h" 22 #include "glock.h" 23 #include "log.h" 24 #include "lops.h" 25 #include "meta_io.h" 26 #include "util.h" 27 #include "dir.h" 28 29 #define PULL 1 30 31 /** 32 * gfs2_struct2blk - compute stuff 33 * @sdp: the filesystem 34 * @nstruct: the number of structures 35 * @ssize: the size of the structures 36 * 37 * Compute the number of log descriptor blocks needed to hold a certain number 38 * of structures of a certain size. 39 * 40 * Returns: the number of blocks needed (minimum is always 1) 41 */ 42 43 unsigned int gfs2_struct2blk(struct gfs2_sbd *sdp, unsigned int nstruct, 44 unsigned int ssize) 45 { 46 unsigned int blks; 47 unsigned int first, second; 48 49 blks = 1; 50 first = (sdp->sd_sb.sb_bsize - sizeof(struct gfs2_log_descriptor)) / ssize; 51 52 if (nstruct > first) { 53 second = (sdp->sd_sb.sb_bsize - 54 sizeof(struct gfs2_meta_header)) / ssize; 55 blks += DIV_ROUND_UP(nstruct - first, second); 56 } 57 58 return blks; 59 } 60 61 /** 62 * gfs2_ail1_start_one - Start I/O on a part of the AIL 63 * @sdp: the filesystem 64 * @tr: the part of the AIL 65 * 66 */ 67 68 static void gfs2_ail1_start_one(struct gfs2_sbd *sdp, struct gfs2_ail *ai) 69 { 70 struct gfs2_bufdata *bd, *s; 71 struct buffer_head *bh; 72 int retry; 73 74 BUG_ON(!spin_is_locked(&sdp->sd_log_lock)); 75 76 do { 77 retry = 0; 78 79 list_for_each_entry_safe_reverse(bd, s, &ai->ai_ail1_list, 80 bd_ail_st_list) { 81 bh = bd->bd_bh; 82 83 gfs2_assert(sdp, bd->bd_ail == ai); 84 85 if (!buffer_busy(bh)) { 86 if (!buffer_uptodate(bh)) { 87 gfs2_log_unlock(sdp); 88 gfs2_io_error_bh(sdp, bh); 89 gfs2_log_lock(sdp); 90 } 91 list_move(&bd->bd_ail_st_list, &ai->ai_ail2_list); 92 continue; 93 } 94 95 if (!buffer_dirty(bh)) 96 continue; 97 98 list_move(&bd->bd_ail_st_list, &ai->ai_ail1_list); 99 100 gfs2_log_unlock(sdp); 101 wait_on_buffer(bh); 102 ll_rw_block(WRITE, 1, &bh); 103 gfs2_log_lock(sdp); 104 105 retry = 1; 106 break; 107 } 108 } while (retry); 109 } 110 111 /** 112 * gfs2_ail1_empty_one - Check whether or not a trans in the AIL has been synced 113 * @sdp: the filesystem 114 * @ai: the AIL entry 115 * 116 */ 117 118 static int gfs2_ail1_empty_one(struct gfs2_sbd *sdp, struct gfs2_ail *ai, int flags) 119 { 120 struct gfs2_bufdata *bd, *s; 121 struct buffer_head *bh; 122 123 list_for_each_entry_safe_reverse(bd, s, &ai->ai_ail1_list, 124 bd_ail_st_list) { 125 bh = bd->bd_bh; 126 127 gfs2_assert(sdp, bd->bd_ail == ai); 128 129 if (buffer_busy(bh)) { 130 if (flags & DIO_ALL) 131 continue; 132 else 133 break; 134 } 135 136 if (!buffer_uptodate(bh)) 137 gfs2_io_error_bh(sdp, bh); 138 139 list_move(&bd->bd_ail_st_list, &ai->ai_ail2_list); 140 } 141 142 return list_empty(&ai->ai_ail1_list); 143 } 144 145 void gfs2_ail1_start(struct gfs2_sbd *sdp, int flags) 146 { 147 struct list_head *head = &sdp->sd_ail1_list; 148 u64 sync_gen; 149 struct list_head *first; 150 struct gfs2_ail *first_ai, *ai, *tmp; 151 int done = 0; 152 153 gfs2_log_lock(sdp); 154 if (list_empty(head)) { 155 gfs2_log_unlock(sdp); 156 return; 157 } 158 sync_gen = sdp->sd_ail_sync_gen++; 159 160 first = head->prev; 161 first_ai = list_entry(first, struct gfs2_ail, ai_list); 162 first_ai->ai_sync_gen = sync_gen; 163 gfs2_ail1_start_one(sdp, first_ai); /* This may drop log lock */ 164 165 if (flags & DIO_ALL) 166 first = NULL; 167 168 while(!done) { 169 if (first && (head->prev != first || 170 gfs2_ail1_empty_one(sdp, first_ai, 0))) 171 break; 172 173 done = 1; 174 list_for_each_entry_safe_reverse(ai, tmp, head, ai_list) { 175 if (ai->ai_sync_gen >= sync_gen) 176 continue; 177 ai->ai_sync_gen = sync_gen; 178 gfs2_ail1_start_one(sdp, ai); /* This may drop log lock */ 179 done = 0; 180 break; 181 } 182 } 183 184 gfs2_log_unlock(sdp); 185 } 186 187 int gfs2_ail1_empty(struct gfs2_sbd *sdp, int flags) 188 { 189 struct gfs2_ail *ai, *s; 190 int ret; 191 192 gfs2_log_lock(sdp); 193 194 list_for_each_entry_safe_reverse(ai, s, &sdp->sd_ail1_list, ai_list) { 195 if (gfs2_ail1_empty_one(sdp, ai, flags)) 196 list_move(&ai->ai_list, &sdp->sd_ail2_list); 197 else if (!(flags & DIO_ALL)) 198 break; 199 } 200 201 ret = list_empty(&sdp->sd_ail1_list); 202 203 gfs2_log_unlock(sdp); 204 205 return ret; 206 } 207 208 209 /** 210 * gfs2_ail2_empty_one - Check whether or not a trans in the AIL has been synced 211 * @sdp: the filesystem 212 * @ai: the AIL entry 213 * 214 */ 215 216 static void gfs2_ail2_empty_one(struct gfs2_sbd *sdp, struct gfs2_ail *ai) 217 { 218 struct list_head *head = &ai->ai_ail2_list; 219 struct gfs2_bufdata *bd; 220 221 while (!list_empty(head)) { 222 bd = list_entry(head->prev, struct gfs2_bufdata, 223 bd_ail_st_list); 224 gfs2_assert(sdp, bd->bd_ail == ai); 225 bd->bd_ail = NULL; 226 list_del(&bd->bd_ail_st_list); 227 list_del(&bd->bd_ail_gl_list); 228 atomic_dec(&bd->bd_gl->gl_ail_count); 229 brelse(bd->bd_bh); 230 } 231 } 232 233 static void ail2_empty(struct gfs2_sbd *sdp, unsigned int new_tail) 234 { 235 struct gfs2_ail *ai, *safe; 236 unsigned int old_tail = sdp->sd_log_tail; 237 int wrap = (new_tail < old_tail); 238 int a, b, rm; 239 240 gfs2_log_lock(sdp); 241 242 list_for_each_entry_safe(ai, safe, &sdp->sd_ail2_list, ai_list) { 243 a = (old_tail <= ai->ai_first); 244 b = (ai->ai_first < new_tail); 245 rm = (wrap) ? (a || b) : (a && b); 246 if (!rm) 247 continue; 248 249 gfs2_ail2_empty_one(sdp, ai); 250 list_del(&ai->ai_list); 251 gfs2_assert_warn(sdp, list_empty(&ai->ai_ail1_list)); 252 gfs2_assert_warn(sdp, list_empty(&ai->ai_ail2_list)); 253 kfree(ai); 254 } 255 256 gfs2_log_unlock(sdp); 257 } 258 259 /** 260 * gfs2_log_reserve - Make a log reservation 261 * @sdp: The GFS2 superblock 262 * @blks: The number of blocks to reserve 263 * 264 * Note that we never give out the last 6 blocks of the journal. Thats 265 * due to the fact that there is are a small number of header blocks 266 * associated with each log flush. The exact number can't be known until 267 * flush time, so we ensure that we have just enough free blocks at all 268 * times to avoid running out during a log flush. 269 * 270 * Returns: errno 271 */ 272 273 int gfs2_log_reserve(struct gfs2_sbd *sdp, unsigned int blks) 274 { 275 unsigned int try = 0; 276 277 if (gfs2_assert_warn(sdp, blks) || 278 gfs2_assert_warn(sdp, blks <= sdp->sd_jdesc->jd_blocks)) 279 return -EINVAL; 280 281 mutex_lock(&sdp->sd_log_reserve_mutex); 282 gfs2_log_lock(sdp); 283 while(sdp->sd_log_blks_free <= (blks + 6)) { 284 gfs2_log_unlock(sdp); 285 gfs2_ail1_empty(sdp, 0); 286 gfs2_log_flush(sdp, NULL); 287 288 if (try++) 289 gfs2_ail1_start(sdp, 0); 290 gfs2_log_lock(sdp); 291 } 292 sdp->sd_log_blks_free -= blks; 293 gfs2_log_unlock(sdp); 294 mutex_unlock(&sdp->sd_log_reserve_mutex); 295 296 down_read(&sdp->sd_log_flush_lock); 297 298 return 0; 299 } 300 301 /** 302 * gfs2_log_release - Release a given number of log blocks 303 * @sdp: The GFS2 superblock 304 * @blks: The number of blocks 305 * 306 */ 307 308 void gfs2_log_release(struct gfs2_sbd *sdp, unsigned int blks) 309 { 310 311 gfs2_log_lock(sdp); 312 sdp->sd_log_blks_free += blks; 313 gfs2_assert_withdraw(sdp, 314 sdp->sd_log_blks_free <= sdp->sd_jdesc->jd_blocks); 315 gfs2_log_unlock(sdp); 316 up_read(&sdp->sd_log_flush_lock); 317 } 318 319 static u64 log_bmap(struct gfs2_sbd *sdp, unsigned int lbn) 320 { 321 struct inode *inode = sdp->sd_jdesc->jd_inode; 322 int error; 323 struct buffer_head bh_map = { .b_state = 0, .b_blocknr = 0 }; 324 325 bh_map.b_size = 1 << inode->i_blkbits; 326 error = gfs2_block_map(inode, lbn, 0, &bh_map); 327 if (error || !bh_map.b_blocknr) 328 printk(KERN_INFO "error=%d, dbn=%llu lbn=%u", error, bh_map.b_blocknr, lbn); 329 gfs2_assert_withdraw(sdp, !error && bh_map.b_blocknr); 330 331 return bh_map.b_blocknr; 332 } 333 334 /** 335 * log_distance - Compute distance between two journal blocks 336 * @sdp: The GFS2 superblock 337 * @newer: The most recent journal block of the pair 338 * @older: The older journal block of the pair 339 * 340 * Compute the distance (in the journal direction) between two 341 * blocks in the journal 342 * 343 * Returns: the distance in blocks 344 */ 345 346 static inline unsigned int log_distance(struct gfs2_sbd *sdp, unsigned int newer, 347 unsigned int older) 348 { 349 int dist; 350 351 dist = newer - older; 352 if (dist < 0) 353 dist += sdp->sd_jdesc->jd_blocks; 354 355 return dist; 356 } 357 358 static unsigned int current_tail(struct gfs2_sbd *sdp) 359 { 360 struct gfs2_ail *ai; 361 unsigned int tail; 362 363 gfs2_log_lock(sdp); 364 365 if (list_empty(&sdp->sd_ail1_list)) { 366 tail = sdp->sd_log_head; 367 } else { 368 ai = list_entry(sdp->sd_ail1_list.prev, struct gfs2_ail, ai_list); 369 tail = ai->ai_first; 370 } 371 372 gfs2_log_unlock(sdp); 373 374 return tail; 375 } 376 377 static inline void log_incr_head(struct gfs2_sbd *sdp) 378 { 379 if (sdp->sd_log_flush_head == sdp->sd_log_tail) 380 gfs2_assert_withdraw(sdp, sdp->sd_log_flush_head == sdp->sd_log_head); 381 382 if (++sdp->sd_log_flush_head == sdp->sd_jdesc->jd_blocks) { 383 sdp->sd_log_flush_head = 0; 384 sdp->sd_log_flush_wrapped = 1; 385 } 386 } 387 388 /** 389 * gfs2_log_get_buf - Get and initialize a buffer to use for log control data 390 * @sdp: The GFS2 superblock 391 * 392 * Returns: the buffer_head 393 */ 394 395 struct buffer_head *gfs2_log_get_buf(struct gfs2_sbd *sdp) 396 { 397 u64 blkno = log_bmap(sdp, sdp->sd_log_flush_head); 398 struct gfs2_log_buf *lb; 399 struct buffer_head *bh; 400 401 lb = kzalloc(sizeof(struct gfs2_log_buf), GFP_NOFS | __GFP_NOFAIL); 402 list_add(&lb->lb_list, &sdp->sd_log_flush_list); 403 404 bh = lb->lb_bh = sb_getblk(sdp->sd_vfs, blkno); 405 lock_buffer(bh); 406 memset(bh->b_data, 0, bh->b_size); 407 set_buffer_uptodate(bh); 408 clear_buffer_dirty(bh); 409 unlock_buffer(bh); 410 411 log_incr_head(sdp); 412 413 return bh; 414 } 415 416 /** 417 * gfs2_log_fake_buf - Build a fake buffer head to write metadata buffer to log 418 * @sdp: the filesystem 419 * @data: the data the buffer_head should point to 420 * 421 * Returns: the log buffer descriptor 422 */ 423 424 struct buffer_head *gfs2_log_fake_buf(struct gfs2_sbd *sdp, 425 struct buffer_head *real) 426 { 427 u64 blkno = log_bmap(sdp, sdp->sd_log_flush_head); 428 struct gfs2_log_buf *lb; 429 struct buffer_head *bh; 430 431 lb = kzalloc(sizeof(struct gfs2_log_buf), GFP_NOFS | __GFP_NOFAIL); 432 list_add(&lb->lb_list, &sdp->sd_log_flush_list); 433 lb->lb_real = real; 434 435 bh = lb->lb_bh = alloc_buffer_head(GFP_NOFS | __GFP_NOFAIL); 436 atomic_set(&bh->b_count, 1); 437 bh->b_state = (1 << BH_Mapped) | (1 << BH_Uptodate); 438 set_bh_page(bh, real->b_page, bh_offset(real)); 439 bh->b_blocknr = blkno; 440 bh->b_size = sdp->sd_sb.sb_bsize; 441 bh->b_bdev = sdp->sd_vfs->s_bdev; 442 443 log_incr_head(sdp); 444 445 return bh; 446 } 447 448 static void log_pull_tail(struct gfs2_sbd *sdp, unsigned int new_tail, int pull) 449 { 450 unsigned int dist = log_distance(sdp, new_tail, sdp->sd_log_tail); 451 452 ail2_empty(sdp, new_tail); 453 454 gfs2_log_lock(sdp); 455 sdp->sd_log_blks_free += dist - (pull ? 1 : 0); 456 gfs2_assert_withdraw(sdp, sdp->sd_log_blks_free <= sdp->sd_jdesc->jd_blocks); 457 gfs2_log_unlock(sdp); 458 459 sdp->sd_log_tail = new_tail; 460 } 461 462 /** 463 * log_write_header - Get and initialize a journal header buffer 464 * @sdp: The GFS2 superblock 465 * 466 * Returns: the initialized log buffer descriptor 467 */ 468 469 static void log_write_header(struct gfs2_sbd *sdp, u32 flags, int pull) 470 { 471 u64 blkno = log_bmap(sdp, sdp->sd_log_flush_head); 472 struct buffer_head *bh; 473 struct gfs2_log_header *lh; 474 unsigned int tail; 475 u32 hash; 476 477 bh = sb_getblk(sdp->sd_vfs, blkno); 478 lock_buffer(bh); 479 memset(bh->b_data, 0, bh->b_size); 480 set_buffer_uptodate(bh); 481 clear_buffer_dirty(bh); 482 unlock_buffer(bh); 483 484 gfs2_ail1_empty(sdp, 0); 485 tail = current_tail(sdp); 486 487 lh = (struct gfs2_log_header *)bh->b_data; 488 memset(lh, 0, sizeof(struct gfs2_log_header)); 489 lh->lh_header.mh_magic = cpu_to_be32(GFS2_MAGIC); 490 lh->lh_header.mh_type = cpu_to_be32(GFS2_METATYPE_LH); 491 lh->lh_header.mh_format = cpu_to_be32(GFS2_FORMAT_LH); 492 lh->lh_sequence = cpu_to_be64(sdp->sd_log_sequence++); 493 lh->lh_flags = cpu_to_be32(flags); 494 lh->lh_tail = cpu_to_be32(tail); 495 lh->lh_blkno = cpu_to_be32(sdp->sd_log_flush_head); 496 hash = gfs2_disk_hash(bh->b_data, sizeof(struct gfs2_log_header)); 497 lh->lh_hash = cpu_to_be32(hash); 498 499 set_buffer_dirty(bh); 500 if (sync_dirty_buffer(bh)) 501 gfs2_io_error_bh(sdp, bh); 502 brelse(bh); 503 504 if (sdp->sd_log_tail != tail) 505 log_pull_tail(sdp, tail, pull); 506 else 507 gfs2_assert_withdraw(sdp, !pull); 508 509 sdp->sd_log_idle = (tail == sdp->sd_log_flush_head); 510 log_incr_head(sdp); 511 } 512 513 static void log_flush_commit(struct gfs2_sbd *sdp) 514 { 515 struct list_head *head = &sdp->sd_log_flush_list; 516 struct gfs2_log_buf *lb; 517 struct buffer_head *bh; 518 519 while (!list_empty(head)) { 520 lb = list_entry(head->next, struct gfs2_log_buf, lb_list); 521 list_del(&lb->lb_list); 522 bh = lb->lb_bh; 523 524 wait_on_buffer(bh); 525 if (!buffer_uptodate(bh)) 526 gfs2_io_error_bh(sdp, bh); 527 if (lb->lb_real) { 528 while (atomic_read(&bh->b_count) != 1) /* Grrrr... */ 529 schedule(); 530 free_buffer_head(bh); 531 } else 532 brelse(bh); 533 kfree(lb); 534 } 535 536 log_write_header(sdp, 0, 0); 537 } 538 539 /** 540 * gfs2_log_flush - flush incore transaction(s) 541 * @sdp: the filesystem 542 * @gl: The glock structure to flush. If NULL, flush the whole incore log 543 * 544 */ 545 546 void gfs2_log_flush(struct gfs2_sbd *sdp, struct gfs2_glock *gl) 547 { 548 struct gfs2_ail *ai; 549 550 down_write(&sdp->sd_log_flush_lock); 551 552 if (gl) { 553 gfs2_log_lock(sdp); 554 if (list_empty(&gl->gl_le.le_list)) { 555 gfs2_log_unlock(sdp); 556 up_write(&sdp->sd_log_flush_lock); 557 return; 558 } 559 gfs2_log_unlock(sdp); 560 } 561 562 ai = kzalloc(sizeof(struct gfs2_ail), GFP_NOFS | __GFP_NOFAIL); 563 INIT_LIST_HEAD(&ai->ai_ail1_list); 564 INIT_LIST_HEAD(&ai->ai_ail2_list); 565 566 gfs2_assert_withdraw(sdp, sdp->sd_log_num_buf == sdp->sd_log_commited_buf); 567 gfs2_assert_withdraw(sdp, 568 sdp->sd_log_num_revoke == sdp->sd_log_commited_revoke); 569 570 sdp->sd_log_flush_head = sdp->sd_log_head; 571 sdp->sd_log_flush_wrapped = 0; 572 ai->ai_first = sdp->sd_log_flush_head; 573 574 lops_before_commit(sdp); 575 if (!list_empty(&sdp->sd_log_flush_list)) 576 log_flush_commit(sdp); 577 else if (sdp->sd_log_tail != current_tail(sdp) && !sdp->sd_log_idle) 578 log_write_header(sdp, 0, PULL); 579 lops_after_commit(sdp, ai); 580 581 gfs2_log_lock(sdp); 582 sdp->sd_log_head = sdp->sd_log_flush_head; 583 sdp->sd_log_blks_free -= sdp->sd_log_num_hdrs; 584 sdp->sd_log_blks_reserved = 0; 585 sdp->sd_log_commited_buf = 0; 586 sdp->sd_log_num_hdrs = 0; 587 sdp->sd_log_commited_revoke = 0; 588 589 if (!list_empty(&ai->ai_ail1_list)) { 590 list_add(&ai->ai_list, &sdp->sd_ail1_list); 591 ai = NULL; 592 } 593 gfs2_log_unlock(sdp); 594 595 sdp->sd_vfs->s_dirt = 0; 596 up_write(&sdp->sd_log_flush_lock); 597 598 kfree(ai); 599 } 600 601 static void log_refund(struct gfs2_sbd *sdp, struct gfs2_trans *tr) 602 { 603 unsigned int reserved = 0; 604 unsigned int old; 605 606 gfs2_log_lock(sdp); 607 608 sdp->sd_log_commited_buf += tr->tr_num_buf_new - tr->tr_num_buf_rm; 609 gfs2_assert_withdraw(sdp, ((int)sdp->sd_log_commited_buf) >= 0); 610 sdp->sd_log_commited_revoke += tr->tr_num_revoke - tr->tr_num_revoke_rm; 611 gfs2_assert_withdraw(sdp, ((int)sdp->sd_log_commited_revoke) >= 0); 612 613 if (sdp->sd_log_commited_buf) 614 reserved += sdp->sd_log_commited_buf; 615 if (sdp->sd_log_commited_revoke) 616 reserved += gfs2_struct2blk(sdp, sdp->sd_log_commited_revoke, 617 sizeof(u64)); 618 if (reserved) 619 reserved++; 620 621 old = sdp->sd_log_blks_free; 622 sdp->sd_log_blks_free += tr->tr_reserved - 623 (reserved - sdp->sd_log_blks_reserved); 624 625 gfs2_assert_withdraw(sdp, sdp->sd_log_blks_free >= old); 626 gfs2_assert_withdraw(sdp, 627 sdp->sd_log_blks_free <= sdp->sd_jdesc->jd_blocks + 628 sdp->sd_log_num_hdrs); 629 630 sdp->sd_log_blks_reserved = reserved; 631 632 gfs2_log_unlock(sdp); 633 } 634 635 /** 636 * gfs2_log_commit - Commit a transaction to the log 637 * @sdp: the filesystem 638 * @tr: the transaction 639 * 640 * Returns: errno 641 */ 642 643 void gfs2_log_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr) 644 { 645 log_refund(sdp, tr); 646 lops_incore_commit(sdp, tr); 647 648 sdp->sd_vfs->s_dirt = 1; 649 up_read(&sdp->sd_log_flush_lock); 650 651 gfs2_log_lock(sdp); 652 if (sdp->sd_log_num_buf > gfs2_tune_get(sdp, gt_incore_log_blocks)) 653 wake_up_process(sdp->sd_logd_process); 654 gfs2_log_unlock(sdp); 655 } 656 657 /** 658 * gfs2_log_shutdown - write a shutdown header into a journal 659 * @sdp: the filesystem 660 * 661 */ 662 663 void gfs2_log_shutdown(struct gfs2_sbd *sdp) 664 { 665 down_write(&sdp->sd_log_flush_lock); 666 667 gfs2_assert_withdraw(sdp, !sdp->sd_log_blks_reserved); 668 gfs2_assert_withdraw(sdp, !sdp->sd_log_num_gl); 669 gfs2_assert_withdraw(sdp, !sdp->sd_log_num_buf); 670 gfs2_assert_withdraw(sdp, !sdp->sd_log_num_jdata); 671 gfs2_assert_withdraw(sdp, !sdp->sd_log_num_revoke); 672 gfs2_assert_withdraw(sdp, !sdp->sd_log_num_rg); 673 gfs2_assert_withdraw(sdp, !sdp->sd_log_num_databuf); 674 gfs2_assert_withdraw(sdp, !sdp->sd_log_num_hdrs); 675 gfs2_assert_withdraw(sdp, list_empty(&sdp->sd_ail1_list)); 676 677 sdp->sd_log_flush_head = sdp->sd_log_head; 678 sdp->sd_log_flush_wrapped = 0; 679 680 log_write_header(sdp, GFS2_LOG_HEAD_UNMOUNT, 0); 681 682 gfs2_assert_warn(sdp, sdp->sd_log_blks_free == sdp->sd_jdesc->jd_blocks); 683 gfs2_assert_warn(sdp, sdp->sd_log_head == sdp->sd_log_tail); 684 gfs2_assert_warn(sdp, list_empty(&sdp->sd_ail2_list)); 685 686 sdp->sd_log_head = sdp->sd_log_flush_head; 687 sdp->sd_log_tail = sdp->sd_log_head; 688 689 up_write(&sdp->sd_log_flush_lock); 690 } 691 692