1 /* 2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved. 3 * Copyright (C) 2004-2007 Red Hat, Inc. All rights reserved. 4 * 5 * This copyrighted material is made available to anyone wishing to use, 6 * modify, copy, or redistribute it subject to the terms and conditions 7 * of the GNU General Public License version 2. 8 */ 9 10 #include <linux/sched.h> 11 #include <linux/slab.h> 12 #include <linux/spinlock.h> 13 #include <linux/completion.h> 14 #include <linux/buffer_head.h> 15 #include <linux/gfs2_ondisk.h> 16 #include <linux/crc32.h> 17 #include <linux/crc32c.h> 18 #include <linux/delay.h> 19 #include <linux/kthread.h> 20 #include <linux/freezer.h> 21 #include <linux/bio.h> 22 #include <linux/blkdev.h> 23 #include <linux/writeback.h> 24 #include <linux/list_sort.h> 25 26 #include "gfs2.h" 27 #include "incore.h" 28 #include "bmap.h" 29 #include "glock.h" 30 #include "log.h" 31 #include "lops.h" 32 #include "meta_io.h" 33 #include "util.h" 34 #include "dir.h" 35 #include "trace_gfs2.h" 36 37 /** 38 * gfs2_struct2blk - compute stuff 39 * @sdp: the filesystem 40 * @nstruct: the number of structures 41 * @ssize: the size of the structures 42 * 43 * Compute the number of log descriptor blocks needed to hold a certain number 44 * of structures of a certain size. 45 * 46 * Returns: the number of blocks needed (minimum is always 1) 47 */ 48 49 unsigned int gfs2_struct2blk(struct gfs2_sbd *sdp, unsigned int nstruct, 50 unsigned int ssize) 51 { 52 unsigned int blks; 53 unsigned int first, second; 54 55 blks = 1; 56 first = (sdp->sd_sb.sb_bsize - sizeof(struct gfs2_log_descriptor)) / ssize; 57 58 if (nstruct > first) { 59 second = (sdp->sd_sb.sb_bsize - 60 sizeof(struct gfs2_meta_header)) / ssize; 61 blks += DIV_ROUND_UP(nstruct - first, second); 62 } 63 64 return blks; 65 } 66 67 /** 68 * gfs2_remove_from_ail - Remove an entry from the ail lists, updating counters 69 * @mapping: The associated mapping (maybe NULL) 70 * @bd: The gfs2_bufdata to remove 71 * 72 * The ail lock _must_ be held when calling this function 73 * 74 */ 75 76 void gfs2_remove_from_ail(struct gfs2_bufdata *bd) 77 { 78 bd->bd_tr = NULL; 79 list_del_init(&bd->bd_ail_st_list); 80 list_del_init(&bd->bd_ail_gl_list); 81 atomic_dec(&bd->bd_gl->gl_ail_count); 82 brelse(bd->bd_bh); 83 } 84 85 /** 86 * gfs2_ail1_start_one - Start I/O on a part of the AIL 87 * @sdp: the filesystem 88 * @wbc: The writeback control structure 89 * @ai: The ail structure 90 * 91 */ 92 93 static int gfs2_ail1_start_one(struct gfs2_sbd *sdp, 94 struct writeback_control *wbc, 95 struct gfs2_trans *tr) 96 __releases(&sdp->sd_ail_lock) 97 __acquires(&sdp->sd_ail_lock) 98 { 99 struct gfs2_glock *gl = NULL; 100 struct address_space *mapping; 101 struct gfs2_bufdata *bd, *s; 102 struct buffer_head *bh; 103 104 list_for_each_entry_safe_reverse(bd, s, &tr->tr_ail1_list, bd_ail_st_list) { 105 bh = bd->bd_bh; 106 107 gfs2_assert(sdp, bd->bd_tr == tr); 108 109 if (!buffer_busy(bh)) { 110 if (!buffer_uptodate(bh)) 111 gfs2_io_error_bh(sdp, bh); 112 list_move(&bd->bd_ail_st_list, &tr->tr_ail2_list); 113 continue; 114 } 115 116 if (!buffer_dirty(bh)) 117 continue; 118 if (gl == bd->bd_gl) 119 continue; 120 gl = bd->bd_gl; 121 list_move(&bd->bd_ail_st_list, &tr->tr_ail1_list); 122 mapping = bh->b_page->mapping; 123 if (!mapping) 124 continue; 125 spin_unlock(&sdp->sd_ail_lock); 126 generic_writepages(mapping, wbc); 127 spin_lock(&sdp->sd_ail_lock); 128 if (wbc->nr_to_write <= 0) 129 break; 130 return 1; 131 } 132 133 return 0; 134 } 135 136 137 /** 138 * gfs2_ail1_flush - start writeback of some ail1 entries 139 * @sdp: The super block 140 * @wbc: The writeback control structure 141 * 142 * Writes back some ail1 entries, according to the limits in the 143 * writeback control structure 144 */ 145 146 void gfs2_ail1_flush(struct gfs2_sbd *sdp, struct writeback_control *wbc) 147 { 148 struct list_head *head = &sdp->sd_ail1_list; 149 struct gfs2_trans *tr; 150 struct blk_plug plug; 151 152 trace_gfs2_ail_flush(sdp, wbc, 1); 153 blk_start_plug(&plug); 154 spin_lock(&sdp->sd_ail_lock); 155 restart: 156 list_for_each_entry_reverse(tr, head, tr_list) { 157 if (wbc->nr_to_write <= 0) 158 break; 159 if (gfs2_ail1_start_one(sdp, wbc, tr)) 160 goto restart; 161 } 162 spin_unlock(&sdp->sd_ail_lock); 163 blk_finish_plug(&plug); 164 trace_gfs2_ail_flush(sdp, wbc, 0); 165 } 166 167 /** 168 * gfs2_ail1_start - start writeback of all ail1 entries 169 * @sdp: The superblock 170 */ 171 172 static void gfs2_ail1_start(struct gfs2_sbd *sdp) 173 { 174 struct writeback_control wbc = { 175 .sync_mode = WB_SYNC_NONE, 176 .nr_to_write = LONG_MAX, 177 .range_start = 0, 178 .range_end = LLONG_MAX, 179 }; 180 181 return gfs2_ail1_flush(sdp, &wbc); 182 } 183 184 /** 185 * gfs2_ail1_empty_one - Check whether or not a trans in the AIL has been synced 186 * @sdp: the filesystem 187 * @ai: the AIL entry 188 * 189 */ 190 191 static void gfs2_ail1_empty_one(struct gfs2_sbd *sdp, struct gfs2_trans *tr) 192 { 193 struct gfs2_bufdata *bd, *s; 194 struct buffer_head *bh; 195 196 list_for_each_entry_safe_reverse(bd, s, &tr->tr_ail1_list, 197 bd_ail_st_list) { 198 bh = bd->bd_bh; 199 gfs2_assert(sdp, bd->bd_tr == tr); 200 if (buffer_busy(bh)) 201 continue; 202 if (!buffer_uptodate(bh)) 203 gfs2_io_error_bh(sdp, bh); 204 list_move(&bd->bd_ail_st_list, &tr->tr_ail2_list); 205 } 206 207 } 208 209 /** 210 * gfs2_ail1_empty - Try to empty the ail1 lists 211 * @sdp: The superblock 212 * 213 * Tries to empty the ail1 lists, starting with the oldest first 214 */ 215 216 static int gfs2_ail1_empty(struct gfs2_sbd *sdp) 217 { 218 struct gfs2_trans *tr, *s; 219 int oldest_tr = 1; 220 int ret; 221 222 spin_lock(&sdp->sd_ail_lock); 223 list_for_each_entry_safe_reverse(tr, s, &sdp->sd_ail1_list, tr_list) { 224 gfs2_ail1_empty_one(sdp, tr); 225 if (list_empty(&tr->tr_ail1_list) && oldest_tr) 226 list_move(&tr->tr_list, &sdp->sd_ail2_list); 227 else 228 oldest_tr = 0; 229 } 230 ret = list_empty(&sdp->sd_ail1_list); 231 spin_unlock(&sdp->sd_ail_lock); 232 233 return ret; 234 } 235 236 static void gfs2_ail1_wait(struct gfs2_sbd *sdp) 237 { 238 struct gfs2_trans *tr; 239 struct gfs2_bufdata *bd; 240 struct buffer_head *bh; 241 242 spin_lock(&sdp->sd_ail_lock); 243 list_for_each_entry_reverse(tr, &sdp->sd_ail1_list, tr_list) { 244 list_for_each_entry(bd, &tr->tr_ail1_list, bd_ail_st_list) { 245 bh = bd->bd_bh; 246 if (!buffer_locked(bh)) 247 continue; 248 get_bh(bh); 249 spin_unlock(&sdp->sd_ail_lock); 250 wait_on_buffer(bh); 251 brelse(bh); 252 return; 253 } 254 } 255 spin_unlock(&sdp->sd_ail_lock); 256 } 257 258 /** 259 * gfs2_ail2_empty_one - Check whether or not a trans in the AIL has been synced 260 * @sdp: the filesystem 261 * @ai: the AIL entry 262 * 263 */ 264 265 static void gfs2_ail2_empty_one(struct gfs2_sbd *sdp, struct gfs2_trans *tr) 266 { 267 struct list_head *head = &tr->tr_ail2_list; 268 struct gfs2_bufdata *bd; 269 270 while (!list_empty(head)) { 271 bd = list_entry(head->prev, struct gfs2_bufdata, 272 bd_ail_st_list); 273 gfs2_assert(sdp, bd->bd_tr == tr); 274 gfs2_remove_from_ail(bd); 275 } 276 } 277 278 static void ail2_empty(struct gfs2_sbd *sdp, unsigned int new_tail) 279 { 280 struct gfs2_trans *tr, *safe; 281 unsigned int old_tail = sdp->sd_log_tail; 282 int wrap = (new_tail < old_tail); 283 int a, b, rm; 284 285 spin_lock(&sdp->sd_ail_lock); 286 287 list_for_each_entry_safe(tr, safe, &sdp->sd_ail2_list, tr_list) { 288 a = (old_tail <= tr->tr_first); 289 b = (tr->tr_first < new_tail); 290 rm = (wrap) ? (a || b) : (a && b); 291 if (!rm) 292 continue; 293 294 gfs2_ail2_empty_one(sdp, tr); 295 list_del(&tr->tr_list); 296 gfs2_assert_warn(sdp, list_empty(&tr->tr_ail1_list)); 297 gfs2_assert_warn(sdp, list_empty(&tr->tr_ail2_list)); 298 kfree(tr); 299 } 300 301 spin_unlock(&sdp->sd_ail_lock); 302 } 303 304 /** 305 * gfs2_log_release - Release a given number of log blocks 306 * @sdp: The GFS2 superblock 307 * @blks: The number of blocks 308 * 309 */ 310 311 void gfs2_log_release(struct gfs2_sbd *sdp, unsigned int blks) 312 { 313 314 atomic_add(blks, &sdp->sd_log_blks_free); 315 trace_gfs2_log_blocks(sdp, blks); 316 gfs2_assert_withdraw(sdp, atomic_read(&sdp->sd_log_blks_free) <= 317 sdp->sd_jdesc->jd_blocks); 318 up_read(&sdp->sd_log_flush_lock); 319 } 320 321 /** 322 * gfs2_log_reserve - Make a log reservation 323 * @sdp: The GFS2 superblock 324 * @blks: The number of blocks to reserve 325 * 326 * Note that we never give out the last few blocks of the journal. Thats 327 * due to the fact that there is a small number of header blocks 328 * associated with each log flush. The exact number can't be known until 329 * flush time, so we ensure that we have just enough free blocks at all 330 * times to avoid running out during a log flush. 331 * 332 * We no longer flush the log here, instead we wake up logd to do that 333 * for us. To avoid the thundering herd and to ensure that we deal fairly 334 * with queued waiters, we use an exclusive wait. This means that when we 335 * get woken with enough journal space to get our reservation, we need to 336 * wake the next waiter on the list. 337 * 338 * Returns: errno 339 */ 340 341 int gfs2_log_reserve(struct gfs2_sbd *sdp, unsigned int blks) 342 { 343 int ret = 0; 344 unsigned reserved_blks = 7 * (4096 / sdp->sd_vfs->s_blocksize); 345 unsigned wanted = blks + reserved_blks; 346 DEFINE_WAIT(wait); 347 int did_wait = 0; 348 unsigned int free_blocks; 349 350 if (gfs2_assert_warn(sdp, blks) || 351 gfs2_assert_warn(sdp, blks <= sdp->sd_jdesc->jd_blocks)) 352 return -EINVAL; 353 atomic_add(blks, &sdp->sd_log_blks_needed); 354 retry: 355 free_blocks = atomic_read(&sdp->sd_log_blks_free); 356 if (unlikely(free_blocks <= wanted)) { 357 do { 358 prepare_to_wait_exclusive(&sdp->sd_log_waitq, &wait, 359 TASK_UNINTERRUPTIBLE); 360 wake_up(&sdp->sd_logd_waitq); 361 did_wait = 1; 362 if (atomic_read(&sdp->sd_log_blks_free) <= wanted) 363 io_schedule(); 364 free_blocks = atomic_read(&sdp->sd_log_blks_free); 365 } while(free_blocks <= wanted); 366 finish_wait(&sdp->sd_log_waitq, &wait); 367 } 368 atomic_inc(&sdp->sd_reserving_log); 369 if (atomic_cmpxchg(&sdp->sd_log_blks_free, free_blocks, 370 free_blocks - blks) != free_blocks) { 371 if (atomic_dec_and_test(&sdp->sd_reserving_log)) 372 wake_up(&sdp->sd_reserving_log_wait); 373 goto retry; 374 } 375 atomic_sub(blks, &sdp->sd_log_blks_needed); 376 trace_gfs2_log_blocks(sdp, -blks); 377 378 /* 379 * If we waited, then so might others, wake them up _after_ we get 380 * our share of the log. 381 */ 382 if (unlikely(did_wait)) 383 wake_up(&sdp->sd_log_waitq); 384 385 down_read(&sdp->sd_log_flush_lock); 386 if (unlikely(!test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags))) { 387 gfs2_log_release(sdp, blks); 388 ret = -EROFS; 389 } 390 if (atomic_dec_and_test(&sdp->sd_reserving_log)) 391 wake_up(&sdp->sd_reserving_log_wait); 392 return ret; 393 } 394 395 /** 396 * log_distance - Compute distance between two journal blocks 397 * @sdp: The GFS2 superblock 398 * @newer: The most recent journal block of the pair 399 * @older: The older journal block of the pair 400 * 401 * Compute the distance (in the journal direction) between two 402 * blocks in the journal 403 * 404 * Returns: the distance in blocks 405 */ 406 407 static inline unsigned int log_distance(struct gfs2_sbd *sdp, unsigned int newer, 408 unsigned int older) 409 { 410 int dist; 411 412 dist = newer - older; 413 if (dist < 0) 414 dist += sdp->sd_jdesc->jd_blocks; 415 416 return dist; 417 } 418 419 /** 420 * calc_reserved - Calculate the number of blocks to reserve when 421 * refunding a transaction's unused buffers. 422 * @sdp: The GFS2 superblock 423 * 424 * This is complex. We need to reserve room for all our currently used 425 * metadata buffers (e.g. normal file I/O rewriting file time stamps) and 426 * all our journaled data buffers for journaled files (e.g. files in the 427 * meta_fs like rindex, or files for which chattr +j was done.) 428 * If we don't reserve enough space, gfs2_log_refund and gfs2_log_flush 429 * will count it as free space (sd_log_blks_free) and corruption will follow. 430 * 431 * We can have metadata bufs and jdata bufs in the same journal. So each 432 * type gets its own log header, for which we need to reserve a block. 433 * In fact, each type has the potential for needing more than one header 434 * in cases where we have more buffers than will fit on a journal page. 435 * Metadata journal entries take up half the space of journaled buffer entries. 436 * Thus, metadata entries have buf_limit (502) and journaled buffers have 437 * databuf_limit (251) before they cause a wrap around. 438 * 439 * Also, we need to reserve blocks for revoke journal entries and one for an 440 * overall header for the lot. 441 * 442 * Returns: the number of blocks reserved 443 */ 444 static unsigned int calc_reserved(struct gfs2_sbd *sdp) 445 { 446 unsigned int reserved = 0; 447 unsigned int mbuf; 448 unsigned int dbuf; 449 struct gfs2_trans *tr = sdp->sd_log_tr; 450 451 if (tr) { 452 mbuf = tr->tr_num_buf_new - tr->tr_num_buf_rm; 453 dbuf = tr->tr_num_databuf_new - tr->tr_num_databuf_rm; 454 reserved = mbuf + dbuf; 455 /* Account for header blocks */ 456 reserved += DIV_ROUND_UP(mbuf, buf_limit(sdp)); 457 reserved += DIV_ROUND_UP(dbuf, databuf_limit(sdp)); 458 } 459 460 if (sdp->sd_log_commited_revoke > 0) 461 reserved += gfs2_struct2blk(sdp, sdp->sd_log_commited_revoke, 462 sizeof(u64)); 463 /* One for the overall header */ 464 if (reserved) 465 reserved++; 466 return reserved; 467 } 468 469 static unsigned int current_tail(struct gfs2_sbd *sdp) 470 { 471 struct gfs2_trans *tr; 472 unsigned int tail; 473 474 spin_lock(&sdp->sd_ail_lock); 475 476 if (list_empty(&sdp->sd_ail1_list)) { 477 tail = sdp->sd_log_head; 478 } else { 479 tr = list_entry(sdp->sd_ail1_list.prev, struct gfs2_trans, 480 tr_list); 481 tail = tr->tr_first; 482 } 483 484 spin_unlock(&sdp->sd_ail_lock); 485 486 return tail; 487 } 488 489 static void log_pull_tail(struct gfs2_sbd *sdp, unsigned int new_tail) 490 { 491 unsigned int dist = log_distance(sdp, new_tail, sdp->sd_log_tail); 492 493 ail2_empty(sdp, new_tail); 494 495 atomic_add(dist, &sdp->sd_log_blks_free); 496 trace_gfs2_log_blocks(sdp, dist); 497 gfs2_assert_withdraw(sdp, atomic_read(&sdp->sd_log_blks_free) <= 498 sdp->sd_jdesc->jd_blocks); 499 500 sdp->sd_log_tail = new_tail; 501 } 502 503 504 static void log_flush_wait(struct gfs2_sbd *sdp) 505 { 506 DEFINE_WAIT(wait); 507 508 if (atomic_read(&sdp->sd_log_in_flight)) { 509 do { 510 prepare_to_wait(&sdp->sd_log_flush_wait, &wait, 511 TASK_UNINTERRUPTIBLE); 512 if (atomic_read(&sdp->sd_log_in_flight)) 513 io_schedule(); 514 } while(atomic_read(&sdp->sd_log_in_flight)); 515 finish_wait(&sdp->sd_log_flush_wait, &wait); 516 } 517 } 518 519 static int ip_cmp(void *priv, struct list_head *a, struct list_head *b) 520 { 521 struct gfs2_inode *ipa, *ipb; 522 523 ipa = list_entry(a, struct gfs2_inode, i_ordered); 524 ipb = list_entry(b, struct gfs2_inode, i_ordered); 525 526 if (ipa->i_no_addr < ipb->i_no_addr) 527 return -1; 528 if (ipa->i_no_addr > ipb->i_no_addr) 529 return 1; 530 return 0; 531 } 532 533 static void gfs2_ordered_write(struct gfs2_sbd *sdp) 534 { 535 struct gfs2_inode *ip; 536 LIST_HEAD(written); 537 538 spin_lock(&sdp->sd_ordered_lock); 539 list_sort(NULL, &sdp->sd_log_le_ordered, &ip_cmp); 540 while (!list_empty(&sdp->sd_log_le_ordered)) { 541 ip = list_entry(sdp->sd_log_le_ordered.next, struct gfs2_inode, i_ordered); 542 if (ip->i_inode.i_mapping->nrpages == 0) { 543 test_and_clear_bit(GIF_ORDERED, &ip->i_flags); 544 list_del(&ip->i_ordered); 545 continue; 546 } 547 list_move(&ip->i_ordered, &written); 548 spin_unlock(&sdp->sd_ordered_lock); 549 filemap_fdatawrite(ip->i_inode.i_mapping); 550 spin_lock(&sdp->sd_ordered_lock); 551 } 552 list_splice(&written, &sdp->sd_log_le_ordered); 553 spin_unlock(&sdp->sd_ordered_lock); 554 } 555 556 static void gfs2_ordered_wait(struct gfs2_sbd *sdp) 557 { 558 struct gfs2_inode *ip; 559 560 spin_lock(&sdp->sd_ordered_lock); 561 while (!list_empty(&sdp->sd_log_le_ordered)) { 562 ip = list_entry(sdp->sd_log_le_ordered.next, struct gfs2_inode, i_ordered); 563 list_del(&ip->i_ordered); 564 WARN_ON(!test_and_clear_bit(GIF_ORDERED, &ip->i_flags)); 565 if (ip->i_inode.i_mapping->nrpages == 0) 566 continue; 567 spin_unlock(&sdp->sd_ordered_lock); 568 filemap_fdatawait(ip->i_inode.i_mapping); 569 spin_lock(&sdp->sd_ordered_lock); 570 } 571 spin_unlock(&sdp->sd_ordered_lock); 572 } 573 574 void gfs2_ordered_del_inode(struct gfs2_inode *ip) 575 { 576 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); 577 578 spin_lock(&sdp->sd_ordered_lock); 579 if (test_and_clear_bit(GIF_ORDERED, &ip->i_flags)) 580 list_del(&ip->i_ordered); 581 spin_unlock(&sdp->sd_ordered_lock); 582 } 583 584 void gfs2_add_revoke(struct gfs2_sbd *sdp, struct gfs2_bufdata *bd) 585 { 586 struct buffer_head *bh = bd->bd_bh; 587 struct gfs2_glock *gl = bd->bd_gl; 588 589 bh->b_private = NULL; 590 bd->bd_blkno = bh->b_blocknr; 591 gfs2_remove_from_ail(bd); /* drops ref on bh */ 592 bd->bd_bh = NULL; 593 bd->bd_ops = &gfs2_revoke_lops; 594 sdp->sd_log_num_revoke++; 595 atomic_inc(&gl->gl_revokes); 596 set_bit(GLF_LFLUSH, &gl->gl_flags); 597 list_add(&bd->bd_list, &sdp->sd_log_le_revoke); 598 } 599 600 void gfs2_write_revokes(struct gfs2_sbd *sdp) 601 { 602 struct gfs2_trans *tr; 603 struct gfs2_bufdata *bd, *tmp; 604 int have_revokes = 0; 605 int max_revokes = (sdp->sd_sb.sb_bsize - sizeof(struct gfs2_log_descriptor)) / sizeof(u64); 606 607 gfs2_ail1_empty(sdp); 608 spin_lock(&sdp->sd_ail_lock); 609 list_for_each_entry(tr, &sdp->sd_ail1_list, tr_list) { 610 list_for_each_entry(bd, &tr->tr_ail2_list, bd_ail_st_list) { 611 if (list_empty(&bd->bd_list)) { 612 have_revokes = 1; 613 goto done; 614 } 615 } 616 } 617 done: 618 spin_unlock(&sdp->sd_ail_lock); 619 if (have_revokes == 0) 620 return; 621 while (sdp->sd_log_num_revoke > max_revokes) 622 max_revokes += (sdp->sd_sb.sb_bsize - sizeof(struct gfs2_meta_header)) / sizeof(u64); 623 max_revokes -= sdp->sd_log_num_revoke; 624 if (!sdp->sd_log_num_revoke) { 625 atomic_dec(&sdp->sd_log_blks_free); 626 /* If no blocks have been reserved, we need to also 627 * reserve a block for the header */ 628 if (!sdp->sd_log_blks_reserved) 629 atomic_dec(&sdp->sd_log_blks_free); 630 } 631 gfs2_log_lock(sdp); 632 spin_lock(&sdp->sd_ail_lock); 633 list_for_each_entry(tr, &sdp->sd_ail1_list, tr_list) { 634 list_for_each_entry_safe(bd, tmp, &tr->tr_ail2_list, bd_ail_st_list) { 635 if (max_revokes == 0) 636 goto out_of_blocks; 637 if (!list_empty(&bd->bd_list)) 638 continue; 639 gfs2_add_revoke(sdp, bd); 640 max_revokes--; 641 } 642 } 643 out_of_blocks: 644 spin_unlock(&sdp->sd_ail_lock); 645 gfs2_log_unlock(sdp); 646 647 if (!sdp->sd_log_num_revoke) { 648 atomic_inc(&sdp->sd_log_blks_free); 649 if (!sdp->sd_log_blks_reserved) 650 atomic_inc(&sdp->sd_log_blks_free); 651 } 652 } 653 654 /** 655 * write_log_header - Write a journal log header buffer at sd_log_flush_head 656 * @sdp: The GFS2 superblock 657 * @jd: journal descriptor of the journal to which we are writing 658 * @seq: sequence number 659 * @tail: tail of the log 660 * @flags: log header flags GFS2_LOG_HEAD_* 661 * @op_flags: flags to pass to the bio 662 * 663 * Returns: the initialized log buffer descriptor 664 */ 665 666 void gfs2_write_log_header(struct gfs2_sbd *sdp, struct gfs2_jdesc *jd, 667 u64 seq, u32 tail, u32 flags, int op_flags) 668 { 669 struct gfs2_log_header *lh; 670 u32 hash, crc; 671 struct page *page = mempool_alloc(gfs2_page_pool, GFP_NOIO); 672 struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local; 673 struct timespec64 tv; 674 struct super_block *sb = sdp->sd_vfs; 675 u64 addr; 676 677 lh = page_address(page); 678 clear_page(lh); 679 680 lh->lh_header.mh_magic = cpu_to_be32(GFS2_MAGIC); 681 lh->lh_header.mh_type = cpu_to_be32(GFS2_METATYPE_LH); 682 lh->lh_header.__pad0 = cpu_to_be64(0); 683 lh->lh_header.mh_format = cpu_to_be32(GFS2_FORMAT_LH); 684 lh->lh_header.mh_jid = cpu_to_be32(sdp->sd_jdesc->jd_jid); 685 lh->lh_sequence = cpu_to_be64(seq); 686 lh->lh_flags = cpu_to_be32(flags); 687 lh->lh_tail = cpu_to_be32(tail); 688 lh->lh_blkno = cpu_to_be32(sdp->sd_log_flush_head); 689 hash = ~crc32(~0, lh, LH_V1_SIZE); 690 lh->lh_hash = cpu_to_be32(hash); 691 692 tv = current_kernel_time64(); 693 lh->lh_nsec = cpu_to_be32(tv.tv_nsec); 694 lh->lh_sec = cpu_to_be64(tv.tv_sec); 695 addr = gfs2_log_bmap(sdp); 696 lh->lh_addr = cpu_to_be64(addr); 697 lh->lh_jinode = cpu_to_be64(GFS2_I(jd->jd_inode)->i_no_addr); 698 699 /* We may only write local statfs, quota, etc., when writing to our 700 own journal. The values are left 0 when recovering a journal 701 different from our own. */ 702 if (!(flags & GFS2_LOG_HEAD_RECOVERY)) { 703 lh->lh_statfs_addr = 704 cpu_to_be64(GFS2_I(sdp->sd_sc_inode)->i_no_addr); 705 lh->lh_quota_addr = 706 cpu_to_be64(GFS2_I(sdp->sd_qc_inode)->i_no_addr); 707 708 spin_lock(&sdp->sd_statfs_spin); 709 lh->lh_local_total = cpu_to_be64(l_sc->sc_total); 710 lh->lh_local_free = cpu_to_be64(l_sc->sc_free); 711 lh->lh_local_dinodes = cpu_to_be64(l_sc->sc_dinodes); 712 spin_unlock(&sdp->sd_statfs_spin); 713 } 714 715 BUILD_BUG_ON(offsetof(struct gfs2_log_header, lh_crc) != LH_V1_SIZE); 716 717 crc = crc32c(~0, (void *)lh + LH_V1_SIZE + 4, 718 sb->s_blocksize - LH_V1_SIZE - 4); 719 lh->lh_crc = cpu_to_be32(crc); 720 721 gfs2_log_write(sdp, page, sb->s_blocksize, 0, addr); 722 gfs2_log_flush_bio(sdp, REQ_OP_WRITE, op_flags); 723 log_flush_wait(sdp); 724 } 725 726 /** 727 * log_write_header - Get and initialize a journal header buffer 728 * @sdp: The GFS2 superblock 729 * @flags: The log header flags, including log header origin 730 * 731 * Returns: the initialized log buffer descriptor 732 */ 733 734 static void log_write_header(struct gfs2_sbd *sdp, u32 flags) 735 { 736 unsigned int tail; 737 int op_flags = REQ_PREFLUSH | REQ_FUA | REQ_META | REQ_SYNC; 738 enum gfs2_freeze_state state = atomic_read(&sdp->sd_freeze_state); 739 740 gfs2_assert_withdraw(sdp, (state != SFS_FROZEN)); 741 tail = current_tail(sdp); 742 743 if (test_bit(SDF_NOBARRIERS, &sdp->sd_flags)) { 744 gfs2_ordered_wait(sdp); 745 log_flush_wait(sdp); 746 op_flags = REQ_SYNC | REQ_META | REQ_PRIO; 747 } 748 sdp->sd_log_idle = (tail == sdp->sd_log_flush_head); 749 gfs2_write_log_header(sdp, sdp->sd_jdesc, sdp->sd_log_sequence++, tail, 750 flags, op_flags); 751 752 if (sdp->sd_log_tail != tail) 753 log_pull_tail(sdp, tail); 754 } 755 756 /** 757 * gfs2_log_flush - flush incore transaction(s) 758 * @sdp: the filesystem 759 * @gl: The glock structure to flush. If NULL, flush the whole incore log 760 * @flags: The log header flags: GFS2_LOG_HEAD_FLUSH_* and debug flags 761 * 762 */ 763 764 void gfs2_log_flush(struct gfs2_sbd *sdp, struct gfs2_glock *gl, u32 flags) 765 { 766 struct gfs2_trans *tr; 767 enum gfs2_freeze_state state = atomic_read(&sdp->sd_freeze_state); 768 769 down_write(&sdp->sd_log_flush_lock); 770 771 /* Log might have been flushed while we waited for the flush lock */ 772 if (gl && !test_bit(GLF_LFLUSH, &gl->gl_flags)) { 773 up_write(&sdp->sd_log_flush_lock); 774 return; 775 } 776 trace_gfs2_log_flush(sdp, 1, flags); 777 778 if (flags & GFS2_LOG_HEAD_FLUSH_SHUTDOWN) 779 clear_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags); 780 781 sdp->sd_log_flush_head = sdp->sd_log_head; 782 tr = sdp->sd_log_tr; 783 if (tr) { 784 sdp->sd_log_tr = NULL; 785 INIT_LIST_HEAD(&tr->tr_ail1_list); 786 INIT_LIST_HEAD(&tr->tr_ail2_list); 787 tr->tr_first = sdp->sd_log_flush_head; 788 if (unlikely (state == SFS_FROZEN)) 789 gfs2_assert_withdraw(sdp, !tr->tr_num_buf_new && !tr->tr_num_databuf_new); 790 } 791 792 if (unlikely(state == SFS_FROZEN)) 793 gfs2_assert_withdraw(sdp, !sdp->sd_log_num_revoke); 794 gfs2_assert_withdraw(sdp, 795 sdp->sd_log_num_revoke == sdp->sd_log_commited_revoke); 796 797 gfs2_ordered_write(sdp); 798 lops_before_commit(sdp, tr); 799 gfs2_log_flush_bio(sdp, REQ_OP_WRITE, 0); 800 801 if (sdp->sd_log_head != sdp->sd_log_flush_head) { 802 log_flush_wait(sdp); 803 log_write_header(sdp, flags); 804 } else if (sdp->sd_log_tail != current_tail(sdp) && !sdp->sd_log_idle){ 805 atomic_dec(&sdp->sd_log_blks_free); /* Adjust for unreserved buffer */ 806 trace_gfs2_log_blocks(sdp, -1); 807 log_write_header(sdp, flags); 808 } 809 lops_after_commit(sdp, tr); 810 811 gfs2_log_lock(sdp); 812 sdp->sd_log_head = sdp->sd_log_flush_head; 813 sdp->sd_log_blks_reserved = 0; 814 sdp->sd_log_commited_revoke = 0; 815 816 spin_lock(&sdp->sd_ail_lock); 817 if (tr && !list_empty(&tr->tr_ail1_list)) { 818 list_add(&tr->tr_list, &sdp->sd_ail1_list); 819 tr = NULL; 820 } 821 spin_unlock(&sdp->sd_ail_lock); 822 gfs2_log_unlock(sdp); 823 824 if (!(flags & GFS2_LOG_HEAD_FLUSH_NORMAL)) { 825 if (!sdp->sd_log_idle) { 826 for (;;) { 827 gfs2_ail1_start(sdp); 828 gfs2_ail1_wait(sdp); 829 if (gfs2_ail1_empty(sdp)) 830 break; 831 } 832 atomic_dec(&sdp->sd_log_blks_free); /* Adjust for unreserved buffer */ 833 trace_gfs2_log_blocks(sdp, -1); 834 log_write_header(sdp, flags); 835 sdp->sd_log_head = sdp->sd_log_flush_head; 836 } 837 if (flags & (GFS2_LOG_HEAD_FLUSH_SHUTDOWN | 838 GFS2_LOG_HEAD_FLUSH_FREEZE)) 839 gfs2_log_shutdown(sdp); 840 if (flags & GFS2_LOG_HEAD_FLUSH_FREEZE) 841 atomic_set(&sdp->sd_freeze_state, SFS_FROZEN); 842 } 843 844 trace_gfs2_log_flush(sdp, 0, flags); 845 up_write(&sdp->sd_log_flush_lock); 846 847 kfree(tr); 848 } 849 850 /** 851 * gfs2_merge_trans - Merge a new transaction into a cached transaction 852 * @old: Original transaction to be expanded 853 * @new: New transaction to be merged 854 */ 855 856 static void gfs2_merge_trans(struct gfs2_trans *old, struct gfs2_trans *new) 857 { 858 WARN_ON_ONCE(!test_bit(TR_ATTACHED, &old->tr_flags)); 859 860 old->tr_num_buf_new += new->tr_num_buf_new; 861 old->tr_num_databuf_new += new->tr_num_databuf_new; 862 old->tr_num_buf_rm += new->tr_num_buf_rm; 863 old->tr_num_databuf_rm += new->tr_num_databuf_rm; 864 old->tr_num_revoke += new->tr_num_revoke; 865 old->tr_num_revoke_rm += new->tr_num_revoke_rm; 866 867 list_splice_tail_init(&new->tr_databuf, &old->tr_databuf); 868 list_splice_tail_init(&new->tr_buf, &old->tr_buf); 869 } 870 871 static void log_refund(struct gfs2_sbd *sdp, struct gfs2_trans *tr) 872 { 873 unsigned int reserved; 874 unsigned int unused; 875 unsigned int maxres; 876 877 gfs2_log_lock(sdp); 878 879 if (sdp->sd_log_tr) { 880 gfs2_merge_trans(sdp->sd_log_tr, tr); 881 } else if (tr->tr_num_buf_new || tr->tr_num_databuf_new) { 882 gfs2_assert_withdraw(sdp, test_bit(TR_ALLOCED, &tr->tr_flags)); 883 sdp->sd_log_tr = tr; 884 set_bit(TR_ATTACHED, &tr->tr_flags); 885 } 886 887 sdp->sd_log_commited_revoke += tr->tr_num_revoke - tr->tr_num_revoke_rm; 888 reserved = calc_reserved(sdp); 889 maxres = sdp->sd_log_blks_reserved + tr->tr_reserved; 890 gfs2_assert_withdraw(sdp, maxres >= reserved); 891 unused = maxres - reserved; 892 atomic_add(unused, &sdp->sd_log_blks_free); 893 trace_gfs2_log_blocks(sdp, unused); 894 gfs2_assert_withdraw(sdp, atomic_read(&sdp->sd_log_blks_free) <= 895 sdp->sd_jdesc->jd_blocks); 896 sdp->sd_log_blks_reserved = reserved; 897 898 gfs2_log_unlock(sdp); 899 } 900 901 /** 902 * gfs2_log_commit - Commit a transaction to the log 903 * @sdp: the filesystem 904 * @tr: the transaction 905 * 906 * We wake up gfs2_logd if the number of pinned blocks exceed thresh1 907 * or the total number of used blocks (pinned blocks plus AIL blocks) 908 * is greater than thresh2. 909 * 910 * At mount time thresh1 is 1/3rd of journal size, thresh2 is 2/3rd of 911 * journal size. 912 * 913 * Returns: errno 914 */ 915 916 void gfs2_log_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr) 917 { 918 log_refund(sdp, tr); 919 920 if (atomic_read(&sdp->sd_log_pinned) > atomic_read(&sdp->sd_log_thresh1) || 921 ((sdp->sd_jdesc->jd_blocks - atomic_read(&sdp->sd_log_blks_free)) > 922 atomic_read(&sdp->sd_log_thresh2))) 923 wake_up(&sdp->sd_logd_waitq); 924 } 925 926 /** 927 * gfs2_log_shutdown - write a shutdown header into a journal 928 * @sdp: the filesystem 929 * 930 */ 931 932 void gfs2_log_shutdown(struct gfs2_sbd *sdp) 933 { 934 gfs2_assert_withdraw(sdp, !sdp->sd_log_blks_reserved); 935 gfs2_assert_withdraw(sdp, !sdp->sd_log_num_revoke); 936 gfs2_assert_withdraw(sdp, list_empty(&sdp->sd_ail1_list)); 937 938 sdp->sd_log_flush_head = sdp->sd_log_head; 939 940 log_write_header(sdp, GFS2_LOG_HEAD_UNMOUNT | GFS2_LFC_SHUTDOWN); 941 942 gfs2_assert_warn(sdp, sdp->sd_log_head == sdp->sd_log_tail); 943 gfs2_assert_warn(sdp, list_empty(&sdp->sd_ail2_list)); 944 945 sdp->sd_log_head = sdp->sd_log_flush_head; 946 sdp->sd_log_tail = sdp->sd_log_head; 947 } 948 949 static inline int gfs2_jrnl_flush_reqd(struct gfs2_sbd *sdp) 950 { 951 return (atomic_read(&sdp->sd_log_pinned) + 952 atomic_read(&sdp->sd_log_blks_needed) >= 953 atomic_read(&sdp->sd_log_thresh1)); 954 } 955 956 static inline int gfs2_ail_flush_reqd(struct gfs2_sbd *sdp) 957 { 958 unsigned int used_blocks = sdp->sd_jdesc->jd_blocks - atomic_read(&sdp->sd_log_blks_free); 959 960 if (test_and_clear_bit(SDF_FORCE_AIL_FLUSH, &sdp->sd_flags)) 961 return 1; 962 963 return used_blocks + atomic_read(&sdp->sd_log_blks_needed) >= 964 atomic_read(&sdp->sd_log_thresh2); 965 } 966 967 /** 968 * gfs2_logd - Update log tail as Active Items get flushed to in-place blocks 969 * @sdp: Pointer to GFS2 superblock 970 * 971 * Also, periodically check to make sure that we're using the most recent 972 * journal index. 973 */ 974 975 int gfs2_logd(void *data) 976 { 977 struct gfs2_sbd *sdp = data; 978 unsigned long t = 1; 979 DEFINE_WAIT(wait); 980 bool did_flush; 981 982 while (!kthread_should_stop()) { 983 984 /* Check for errors writing to the journal */ 985 if (sdp->sd_log_error) { 986 gfs2_lm_withdraw(sdp, 987 "GFS2: fsid=%s: error %d: " 988 "withdrawing the file system to " 989 "prevent further damage.\n", 990 sdp->sd_fsname, sdp->sd_log_error); 991 } 992 993 did_flush = false; 994 if (gfs2_jrnl_flush_reqd(sdp) || t == 0) { 995 gfs2_ail1_empty(sdp); 996 gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_NORMAL | 997 GFS2_LFC_LOGD_JFLUSH_REQD); 998 did_flush = true; 999 } 1000 1001 if (gfs2_ail_flush_reqd(sdp)) { 1002 gfs2_ail1_start(sdp); 1003 gfs2_ail1_wait(sdp); 1004 gfs2_ail1_empty(sdp); 1005 gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_NORMAL | 1006 GFS2_LFC_LOGD_AIL_FLUSH_REQD); 1007 did_flush = true; 1008 } 1009 1010 if (!gfs2_ail_flush_reqd(sdp) || did_flush) 1011 wake_up(&sdp->sd_log_waitq); 1012 1013 t = gfs2_tune_get(sdp, gt_logd_secs) * HZ; 1014 1015 try_to_freeze(); 1016 1017 do { 1018 prepare_to_wait(&sdp->sd_logd_waitq, &wait, 1019 TASK_INTERRUPTIBLE); 1020 if (!gfs2_ail_flush_reqd(sdp) && 1021 !gfs2_jrnl_flush_reqd(sdp) && 1022 !kthread_should_stop()) 1023 t = schedule_timeout(t); 1024 } while(t && !gfs2_ail_flush_reqd(sdp) && 1025 !gfs2_jrnl_flush_reqd(sdp) && 1026 !kthread_should_stop()); 1027 finish_wait(&sdp->sd_logd_waitq, &wait); 1028 } 1029 1030 return 0; 1031 } 1032 1033