1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved. 4 * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved. 5 */ 6 7 #include <linux/sched.h> 8 #include <linux/slab.h> 9 #include <linux/spinlock.h> 10 #include <linux/completion.h> 11 #include <linux/buffer_head.h> 12 #include <linux/mempool.h> 13 #include <linux/gfs2_ondisk.h> 14 #include <linux/bio.h> 15 #include <linux/fs.h> 16 #include <linux/list_sort.h> 17 #include <linux/blkdev.h> 18 19 #include "bmap.h" 20 #include "dir.h" 21 #include "gfs2.h" 22 #include "incore.h" 23 #include "inode.h" 24 #include "glock.h" 25 #include "glops.h" 26 #include "log.h" 27 #include "lops.h" 28 #include "meta_io.h" 29 #include "recovery.h" 30 #include "rgrp.h" 31 #include "trans.h" 32 #include "util.h" 33 #include "trace_gfs2.h" 34 35 /** 36 * gfs2_pin - Pin a buffer in memory 37 * @sdp: The superblock 38 * @bh: The buffer to be pinned 39 * 40 * The log lock must be held when calling this function 41 */ 42 void gfs2_pin(struct gfs2_sbd *sdp, struct buffer_head *bh) 43 { 44 struct gfs2_bufdata *bd; 45 46 BUG_ON(!current->journal_info); 47 48 clear_buffer_dirty(bh); 49 if (test_set_buffer_pinned(bh)) 50 gfs2_assert_withdraw(sdp, 0); 51 if (!buffer_uptodate(bh)) 52 gfs2_io_error_bh_wd(sdp, bh); 53 bd = bh->b_private; 54 /* If this buffer is in the AIL and it has already been written 55 * to in-place disk block, remove it from the AIL. 56 */ 57 spin_lock(&sdp->sd_ail_lock); 58 if (bd->bd_tr) 59 list_move(&bd->bd_ail_st_list, &bd->bd_tr->tr_ail2_list); 60 spin_unlock(&sdp->sd_ail_lock); 61 get_bh(bh); 62 atomic_inc(&sdp->sd_log_pinned); 63 trace_gfs2_pin(bd, 1); 64 } 65 66 static bool buffer_is_rgrp(const struct gfs2_bufdata *bd) 67 { 68 return bd->bd_gl->gl_name.ln_type == LM_TYPE_RGRP; 69 } 70 71 static void maybe_release_space(struct gfs2_bufdata *bd) 72 { 73 struct gfs2_glock *gl = bd->bd_gl; 74 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; 75 struct gfs2_rgrpd *rgd = gfs2_glock2rgrp(gl); 76 unsigned int index = bd->bd_bh->b_blocknr - gl->gl_name.ln_number; 77 struct gfs2_bitmap *bi = rgd->rd_bits + index; 78 79 rgrp_lock_local(rgd); 80 if (bi->bi_clone == NULL) 81 goto out; 82 if (sdp->sd_args.ar_discard) 83 gfs2_rgrp_send_discards(sdp, rgd->rd_data0, bd->bd_bh, bi, 1, NULL); 84 memcpy(bi->bi_clone + bi->bi_offset, 85 bd->bd_bh->b_data + bi->bi_offset, bi->bi_bytes); 86 clear_bit(GBF_FULL, &bi->bi_flags); 87 rgd->rd_free_clone = rgd->rd_free; 88 BUG_ON(rgd->rd_free_clone < rgd->rd_reserved); 89 rgd->rd_extfail_pt = rgd->rd_free; 90 91 out: 92 rgrp_unlock_local(rgd); 93 } 94 95 /** 96 * gfs2_unpin - Unpin a buffer 97 * @sdp: the filesystem the buffer belongs to 98 * @bh: The buffer to unpin 99 * @ai: 100 * @flags: The inode dirty flags 101 * 102 */ 103 104 static void gfs2_unpin(struct gfs2_sbd *sdp, struct buffer_head *bh, 105 struct gfs2_trans *tr) 106 { 107 struct gfs2_bufdata *bd = bh->b_private; 108 109 BUG_ON(!buffer_uptodate(bh)); 110 BUG_ON(!buffer_pinned(bh)); 111 112 lock_buffer(bh); 113 mark_buffer_dirty(bh); 114 clear_buffer_pinned(bh); 115 116 if (buffer_is_rgrp(bd)) 117 maybe_release_space(bd); 118 119 spin_lock(&sdp->sd_ail_lock); 120 if (bd->bd_tr) { 121 list_del(&bd->bd_ail_st_list); 122 brelse(bh); 123 } else { 124 struct gfs2_glock *gl = bd->bd_gl; 125 list_add(&bd->bd_ail_gl_list, &gl->gl_ail_list); 126 atomic_inc(&gl->gl_ail_count); 127 } 128 bd->bd_tr = tr; 129 list_add(&bd->bd_ail_st_list, &tr->tr_ail1_list); 130 spin_unlock(&sdp->sd_ail_lock); 131 132 clear_bit(GLF_LFLUSH, &bd->bd_gl->gl_flags); 133 trace_gfs2_pin(bd, 0); 134 unlock_buffer(bh); 135 atomic_dec(&sdp->sd_log_pinned); 136 } 137 138 void gfs2_log_incr_head(struct gfs2_sbd *sdp) 139 { 140 BUG_ON((sdp->sd_log_flush_head == sdp->sd_log_tail) && 141 (sdp->sd_log_flush_head != sdp->sd_log_head)); 142 143 if (++sdp->sd_log_flush_head == sdp->sd_jdesc->jd_blocks) 144 sdp->sd_log_flush_head = 0; 145 } 146 147 u64 gfs2_log_bmap(struct gfs2_jdesc *jd, unsigned int lblock) 148 { 149 struct gfs2_journal_extent *je; 150 151 list_for_each_entry(je, &jd->extent_list, list) { 152 if (lblock >= je->lblock && lblock < je->lblock + je->blocks) 153 return je->dblock + lblock - je->lblock; 154 } 155 156 return -1; 157 } 158 159 /** 160 * gfs2_end_log_write_bh - end log write of pagecache data with buffers 161 * @sdp: The superblock 162 * @bvec: The bio_vec 163 * @error: The i/o status 164 * 165 * This finds the relevant buffers and unlocks them and sets the 166 * error flag according to the status of the i/o request. This is 167 * used when the log is writing data which has an in-place version 168 * that is pinned in the pagecache. 169 */ 170 171 static void gfs2_end_log_write_bh(struct gfs2_sbd *sdp, 172 struct bio_vec *bvec, 173 blk_status_t error) 174 { 175 struct buffer_head *bh, *next; 176 struct page *page = bvec->bv_page; 177 unsigned size; 178 179 bh = page_buffers(page); 180 size = bvec->bv_len; 181 while (bh_offset(bh) < bvec->bv_offset) 182 bh = bh->b_this_page; 183 do { 184 if (error) 185 mark_buffer_write_io_error(bh); 186 unlock_buffer(bh); 187 next = bh->b_this_page; 188 size -= bh->b_size; 189 brelse(bh); 190 bh = next; 191 } while(bh && size); 192 } 193 194 /** 195 * gfs2_end_log_write - end of i/o to the log 196 * @bio: The bio 197 * 198 * Each bio_vec contains either data from the pagecache or data 199 * relating to the log itself. Here we iterate over the bio_vec 200 * array, processing both kinds of data. 201 * 202 */ 203 204 static void gfs2_end_log_write(struct bio *bio) 205 { 206 struct gfs2_sbd *sdp = bio->bi_private; 207 struct bio_vec *bvec; 208 struct page *page; 209 struct bvec_iter_all iter_all; 210 211 if (bio->bi_status) { 212 if (!cmpxchg(&sdp->sd_log_error, 0, (int)bio->bi_status)) 213 fs_err(sdp, "Error %d writing to journal, jid=%u\n", 214 bio->bi_status, sdp->sd_jdesc->jd_jid); 215 gfs2_withdraw_delayed(sdp); 216 /* prevent more writes to the journal */ 217 clear_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags); 218 wake_up(&sdp->sd_logd_waitq); 219 } 220 221 bio_for_each_segment_all(bvec, bio, iter_all) { 222 page = bvec->bv_page; 223 if (page_has_buffers(page)) 224 gfs2_end_log_write_bh(sdp, bvec, bio->bi_status); 225 else 226 mempool_free(page, gfs2_page_pool); 227 } 228 229 bio_put(bio); 230 if (atomic_dec_and_test(&sdp->sd_log_in_flight)) 231 wake_up(&sdp->sd_log_flush_wait); 232 } 233 234 /** 235 * gfs2_log_submit_bio - Submit any pending log bio 236 * @biop: Address of the bio pointer 237 * @opf: REQ_OP | op_flags 238 * 239 * Submit any pending part-built or full bio to the block device. If 240 * there is no pending bio, then this is a no-op. 241 */ 242 243 void gfs2_log_submit_bio(struct bio **biop, int opf) 244 { 245 struct bio *bio = *biop; 246 if (bio) { 247 struct gfs2_sbd *sdp = bio->bi_private; 248 atomic_inc(&sdp->sd_log_in_flight); 249 bio->bi_opf = opf; 250 submit_bio(bio); 251 *biop = NULL; 252 } 253 } 254 255 /** 256 * gfs2_log_alloc_bio - Allocate a bio 257 * @sdp: The super block 258 * @blkno: The device block number we want to write to 259 * @end_io: The bi_end_io callback 260 * 261 * Allocate a new bio, initialize it with the given parameters and return it. 262 * 263 * Returns: The newly allocated bio 264 */ 265 266 static struct bio *gfs2_log_alloc_bio(struct gfs2_sbd *sdp, u64 blkno, 267 bio_end_io_t *end_io) 268 { 269 struct super_block *sb = sdp->sd_vfs; 270 struct bio *bio = bio_alloc(GFP_NOIO, BIO_MAX_VECS); 271 272 bio->bi_iter.bi_sector = blkno << sdp->sd_fsb2bb_shift; 273 bio_set_dev(bio, sb->s_bdev); 274 bio->bi_end_io = end_io; 275 bio->bi_private = sdp; 276 277 return bio; 278 } 279 280 /** 281 * gfs2_log_get_bio - Get cached log bio, or allocate a new one 282 * @sdp: The super block 283 * @blkno: The device block number we want to write to 284 * @bio: The bio to get or allocate 285 * @op: REQ_OP 286 * @end_io: The bi_end_io callback 287 * @flush: Always flush the current bio and allocate a new one? 288 * 289 * If there is a cached bio, then if the next block number is sequential 290 * with the previous one, return it, otherwise flush the bio to the 291 * device. If there is no cached bio, or we just flushed it, then 292 * allocate a new one. 293 * 294 * Returns: The bio to use for log writes 295 */ 296 297 static struct bio *gfs2_log_get_bio(struct gfs2_sbd *sdp, u64 blkno, 298 struct bio **biop, int op, 299 bio_end_io_t *end_io, bool flush) 300 { 301 struct bio *bio = *biop; 302 303 if (bio) { 304 u64 nblk; 305 306 nblk = bio_end_sector(bio); 307 nblk >>= sdp->sd_fsb2bb_shift; 308 if (blkno == nblk && !flush) 309 return bio; 310 gfs2_log_submit_bio(biop, op); 311 } 312 313 *biop = gfs2_log_alloc_bio(sdp, blkno, end_io); 314 return *biop; 315 } 316 317 /** 318 * gfs2_log_write - write to log 319 * @sdp: the filesystem 320 * @page: the page to write 321 * @size: the size of the data to write 322 * @offset: the offset within the page 323 * @blkno: block number of the log entry 324 * 325 * Try and add the page segment to the current bio. If that fails, 326 * submit the current bio to the device and create a new one, and 327 * then add the page segment to that. 328 */ 329 330 void gfs2_log_write(struct gfs2_sbd *sdp, struct gfs2_jdesc *jd, 331 struct page *page, unsigned size, unsigned offset, 332 u64 blkno) 333 { 334 struct bio *bio; 335 int ret; 336 337 bio = gfs2_log_get_bio(sdp, blkno, &jd->jd_log_bio, REQ_OP_WRITE, 338 gfs2_end_log_write, false); 339 ret = bio_add_page(bio, page, size, offset); 340 if (ret == 0) { 341 bio = gfs2_log_get_bio(sdp, blkno, &jd->jd_log_bio, 342 REQ_OP_WRITE, gfs2_end_log_write, true); 343 ret = bio_add_page(bio, page, size, offset); 344 WARN_ON(ret == 0); 345 } 346 } 347 348 /** 349 * gfs2_log_write_bh - write a buffer's content to the log 350 * @sdp: The super block 351 * @bh: The buffer pointing to the in-place location 352 * 353 * This writes the content of the buffer to the next available location 354 * in the log. The buffer will be unlocked once the i/o to the log has 355 * completed. 356 */ 357 358 static void gfs2_log_write_bh(struct gfs2_sbd *sdp, struct buffer_head *bh) 359 { 360 u64 dblock; 361 362 dblock = gfs2_log_bmap(sdp->sd_jdesc, sdp->sd_log_flush_head); 363 gfs2_log_incr_head(sdp); 364 gfs2_log_write(sdp, sdp->sd_jdesc, bh->b_page, bh->b_size, 365 bh_offset(bh), dblock); 366 } 367 368 /** 369 * gfs2_log_write_page - write one block stored in a page, into the log 370 * @sdp: The superblock 371 * @page: The struct page 372 * 373 * This writes the first block-sized part of the page into the log. Note 374 * that the page must have been allocated from the gfs2_page_pool mempool 375 * and that after this has been called, ownership has been transferred and 376 * the page may be freed at any time. 377 */ 378 379 static void gfs2_log_write_page(struct gfs2_sbd *sdp, struct page *page) 380 { 381 struct super_block *sb = sdp->sd_vfs; 382 u64 dblock; 383 384 dblock = gfs2_log_bmap(sdp->sd_jdesc, sdp->sd_log_flush_head); 385 gfs2_log_incr_head(sdp); 386 gfs2_log_write(sdp, sdp->sd_jdesc, page, sb->s_blocksize, 0, dblock); 387 } 388 389 /** 390 * gfs2_end_log_read - end I/O callback for reads from the log 391 * @bio: The bio 392 * 393 * Simply unlock the pages in the bio. The main thread will wait on them and 394 * process them in order as necessary. 395 */ 396 397 static void gfs2_end_log_read(struct bio *bio) 398 { 399 struct page *page; 400 struct bio_vec *bvec; 401 struct bvec_iter_all iter_all; 402 403 bio_for_each_segment_all(bvec, bio, iter_all) { 404 page = bvec->bv_page; 405 if (bio->bi_status) { 406 int err = blk_status_to_errno(bio->bi_status); 407 408 SetPageError(page); 409 mapping_set_error(page->mapping, err); 410 } 411 unlock_page(page); 412 } 413 414 bio_put(bio); 415 } 416 417 /** 418 * gfs2_jhead_pg_srch - Look for the journal head in a given page. 419 * @jd: The journal descriptor 420 * @page: The page to look in 421 * 422 * Returns: 1 if found, 0 otherwise. 423 */ 424 425 static bool gfs2_jhead_pg_srch(struct gfs2_jdesc *jd, 426 struct gfs2_log_header_host *head, 427 struct page *page) 428 { 429 struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode); 430 struct gfs2_log_header_host lh; 431 void *kaddr = kmap_atomic(page); 432 unsigned int offset; 433 bool ret = false; 434 435 for (offset = 0; offset < PAGE_SIZE; offset += sdp->sd_sb.sb_bsize) { 436 if (!__get_log_header(sdp, kaddr + offset, 0, &lh)) { 437 if (lh.lh_sequence >= head->lh_sequence) 438 *head = lh; 439 else { 440 ret = true; 441 break; 442 } 443 } 444 } 445 kunmap_atomic(kaddr); 446 return ret; 447 } 448 449 /** 450 * gfs2_jhead_process_page - Search/cleanup a page 451 * @jd: The journal descriptor 452 * @index: Index of the page to look into 453 * @done: If set, perform only cleanup, else search and set if found. 454 * 455 * Find the page with 'index' in the journal's mapping. Search the page for 456 * the journal head if requested (cleanup == false). Release refs on the 457 * page so the page cache can reclaim it (put_page() twice). We grabbed a 458 * reference on this page two times, first when we did a find_or_create_page() 459 * to obtain the page to add it to the bio and second when we do a 460 * find_get_page() here to get the page to wait on while I/O on it is being 461 * completed. 462 * This function is also used to free up a page we might've grabbed but not 463 * used. Maybe we added it to a bio, but not submitted it for I/O. Or we 464 * submitted the I/O, but we already found the jhead so we only need to drop 465 * our references to the page. 466 */ 467 468 static void gfs2_jhead_process_page(struct gfs2_jdesc *jd, unsigned long index, 469 struct gfs2_log_header_host *head, 470 bool *done) 471 { 472 struct page *page; 473 474 page = find_get_page(jd->jd_inode->i_mapping, index); 475 wait_on_page_locked(page); 476 477 if (PageError(page)) 478 *done = true; 479 480 if (!*done) 481 *done = gfs2_jhead_pg_srch(jd, head, page); 482 483 put_page(page); /* Once for find_get_page */ 484 put_page(page); /* Once more for find_or_create_page */ 485 } 486 487 static struct bio *gfs2_chain_bio(struct bio *prev, unsigned int nr_iovecs) 488 { 489 struct bio *new; 490 491 new = bio_alloc(GFP_NOIO, nr_iovecs); 492 bio_copy_dev(new, prev); 493 new->bi_iter.bi_sector = bio_end_sector(prev); 494 new->bi_opf = prev->bi_opf; 495 new->bi_write_hint = prev->bi_write_hint; 496 bio_chain(new, prev); 497 submit_bio(prev); 498 return new; 499 } 500 501 /** 502 * gfs2_find_jhead - find the head of a log 503 * @jd: The journal descriptor 504 * @head: The log descriptor for the head of the log is returned here 505 * 506 * Do a search of a journal by reading it in large chunks using bios and find 507 * the valid log entry with the highest sequence number. (i.e. the log head) 508 * 509 * Returns: 0 on success, errno otherwise 510 */ 511 int gfs2_find_jhead(struct gfs2_jdesc *jd, struct gfs2_log_header_host *head, 512 bool keep_cache) 513 { 514 struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode); 515 struct address_space *mapping = jd->jd_inode->i_mapping; 516 unsigned int block = 0, blocks_submitted = 0, blocks_read = 0; 517 unsigned int bsize = sdp->sd_sb.sb_bsize, off; 518 unsigned int bsize_shift = sdp->sd_sb.sb_bsize_shift; 519 unsigned int shift = PAGE_SHIFT - bsize_shift; 520 unsigned int max_blocks = 2 * 1024 * 1024 >> bsize_shift; 521 struct gfs2_journal_extent *je; 522 int sz, ret = 0; 523 struct bio *bio = NULL; 524 struct page *page = NULL; 525 bool done = false; 526 errseq_t since; 527 528 memset(head, 0, sizeof(*head)); 529 if (list_empty(&jd->extent_list)) 530 gfs2_map_journal_extents(sdp, jd); 531 532 since = filemap_sample_wb_err(mapping); 533 list_for_each_entry(je, &jd->extent_list, list) { 534 u64 dblock = je->dblock; 535 536 for (; block < je->lblock + je->blocks; block++, dblock++) { 537 if (!page) { 538 page = find_or_create_page(mapping, 539 block >> shift, GFP_NOFS); 540 if (!page) { 541 ret = -ENOMEM; 542 done = true; 543 goto out; 544 } 545 off = 0; 546 } 547 548 if (bio && (off || block < blocks_submitted + max_blocks)) { 549 sector_t sector = dblock << sdp->sd_fsb2bb_shift; 550 551 if (bio_end_sector(bio) == sector) { 552 sz = bio_add_page(bio, page, bsize, off); 553 if (sz == bsize) 554 goto block_added; 555 } 556 if (off) { 557 unsigned int blocks = 558 (PAGE_SIZE - off) >> bsize_shift; 559 560 bio = gfs2_chain_bio(bio, blocks); 561 goto add_block_to_new_bio; 562 } 563 } 564 565 if (bio) { 566 blocks_submitted = block; 567 submit_bio(bio); 568 } 569 570 bio = gfs2_log_alloc_bio(sdp, dblock, gfs2_end_log_read); 571 bio->bi_opf = REQ_OP_READ; 572 add_block_to_new_bio: 573 sz = bio_add_page(bio, page, bsize, off); 574 BUG_ON(sz != bsize); 575 block_added: 576 off += bsize; 577 if (off == PAGE_SIZE) 578 page = NULL; 579 if (blocks_submitted <= blocks_read + max_blocks) { 580 /* Keep at least one bio in flight */ 581 continue; 582 } 583 584 gfs2_jhead_process_page(jd, blocks_read >> shift, head, &done); 585 blocks_read += PAGE_SIZE >> bsize_shift; 586 if (done) 587 goto out; /* found */ 588 } 589 } 590 591 out: 592 if (bio) 593 submit_bio(bio); 594 while (blocks_read < block) { 595 gfs2_jhead_process_page(jd, blocks_read >> shift, head, &done); 596 blocks_read += PAGE_SIZE >> bsize_shift; 597 } 598 599 if (!ret) 600 ret = filemap_check_wb_err(mapping, since); 601 602 if (!keep_cache) 603 truncate_inode_pages(mapping, 0); 604 605 return ret; 606 } 607 608 static struct page *gfs2_get_log_desc(struct gfs2_sbd *sdp, u32 ld_type, 609 u32 ld_length, u32 ld_data1) 610 { 611 struct page *page = mempool_alloc(gfs2_page_pool, GFP_NOIO); 612 struct gfs2_log_descriptor *ld = page_address(page); 613 clear_page(ld); 614 ld->ld_header.mh_magic = cpu_to_be32(GFS2_MAGIC); 615 ld->ld_header.mh_type = cpu_to_be32(GFS2_METATYPE_LD); 616 ld->ld_header.mh_format = cpu_to_be32(GFS2_FORMAT_LD); 617 ld->ld_type = cpu_to_be32(ld_type); 618 ld->ld_length = cpu_to_be32(ld_length); 619 ld->ld_data1 = cpu_to_be32(ld_data1); 620 ld->ld_data2 = 0; 621 return page; 622 } 623 624 static void gfs2_check_magic(struct buffer_head *bh) 625 { 626 void *kaddr; 627 __be32 *ptr; 628 629 clear_buffer_escaped(bh); 630 kaddr = kmap_atomic(bh->b_page); 631 ptr = kaddr + bh_offset(bh); 632 if (*ptr == cpu_to_be32(GFS2_MAGIC)) 633 set_buffer_escaped(bh); 634 kunmap_atomic(kaddr); 635 } 636 637 static int blocknr_cmp(void *priv, const struct list_head *a, 638 const struct list_head *b) 639 { 640 struct gfs2_bufdata *bda, *bdb; 641 642 bda = list_entry(a, struct gfs2_bufdata, bd_list); 643 bdb = list_entry(b, struct gfs2_bufdata, bd_list); 644 645 if (bda->bd_bh->b_blocknr < bdb->bd_bh->b_blocknr) 646 return -1; 647 if (bda->bd_bh->b_blocknr > bdb->bd_bh->b_blocknr) 648 return 1; 649 return 0; 650 } 651 652 static void gfs2_before_commit(struct gfs2_sbd *sdp, unsigned int limit, 653 unsigned int total, struct list_head *blist, 654 bool is_databuf) 655 { 656 struct gfs2_log_descriptor *ld; 657 struct gfs2_bufdata *bd1 = NULL, *bd2; 658 struct page *page; 659 unsigned int num; 660 unsigned n; 661 __be64 *ptr; 662 663 gfs2_log_lock(sdp); 664 list_sort(NULL, blist, blocknr_cmp); 665 bd1 = bd2 = list_prepare_entry(bd1, blist, bd_list); 666 while(total) { 667 num = total; 668 if (total > limit) 669 num = limit; 670 gfs2_log_unlock(sdp); 671 page = gfs2_get_log_desc(sdp, 672 is_databuf ? GFS2_LOG_DESC_JDATA : 673 GFS2_LOG_DESC_METADATA, num + 1, num); 674 ld = page_address(page); 675 gfs2_log_lock(sdp); 676 ptr = (__be64 *)(ld + 1); 677 678 n = 0; 679 list_for_each_entry_continue(bd1, blist, bd_list) { 680 *ptr++ = cpu_to_be64(bd1->bd_bh->b_blocknr); 681 if (is_databuf) { 682 gfs2_check_magic(bd1->bd_bh); 683 *ptr++ = cpu_to_be64(buffer_escaped(bd1->bd_bh) ? 1 : 0); 684 } 685 if (++n >= num) 686 break; 687 } 688 689 gfs2_log_unlock(sdp); 690 gfs2_log_write_page(sdp, page); 691 gfs2_log_lock(sdp); 692 693 n = 0; 694 list_for_each_entry_continue(bd2, blist, bd_list) { 695 get_bh(bd2->bd_bh); 696 gfs2_log_unlock(sdp); 697 lock_buffer(bd2->bd_bh); 698 699 if (buffer_escaped(bd2->bd_bh)) { 700 void *kaddr; 701 page = mempool_alloc(gfs2_page_pool, GFP_NOIO); 702 ptr = page_address(page); 703 kaddr = kmap_atomic(bd2->bd_bh->b_page); 704 memcpy(ptr, kaddr + bh_offset(bd2->bd_bh), 705 bd2->bd_bh->b_size); 706 kunmap_atomic(kaddr); 707 *(__be32 *)ptr = 0; 708 clear_buffer_escaped(bd2->bd_bh); 709 unlock_buffer(bd2->bd_bh); 710 brelse(bd2->bd_bh); 711 gfs2_log_write_page(sdp, page); 712 } else { 713 gfs2_log_write_bh(sdp, bd2->bd_bh); 714 } 715 gfs2_log_lock(sdp); 716 if (++n >= num) 717 break; 718 } 719 720 BUG_ON(total < num); 721 total -= num; 722 } 723 gfs2_log_unlock(sdp); 724 } 725 726 static void buf_lo_before_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr) 727 { 728 unsigned int limit = buf_limit(sdp); /* 503 for 4k blocks */ 729 unsigned int nbuf; 730 if (tr == NULL) 731 return; 732 nbuf = tr->tr_num_buf_new - tr->tr_num_buf_rm; 733 gfs2_before_commit(sdp, limit, nbuf, &tr->tr_buf, 0); 734 } 735 736 static void buf_lo_after_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr) 737 { 738 struct list_head *head; 739 struct gfs2_bufdata *bd; 740 741 if (tr == NULL) 742 return; 743 744 head = &tr->tr_buf; 745 while (!list_empty(head)) { 746 bd = list_first_entry(head, struct gfs2_bufdata, bd_list); 747 list_del_init(&bd->bd_list); 748 gfs2_unpin(sdp, bd->bd_bh, tr); 749 } 750 } 751 752 static void buf_lo_before_scan(struct gfs2_jdesc *jd, 753 struct gfs2_log_header_host *head, int pass) 754 { 755 if (pass != 0) 756 return; 757 758 jd->jd_found_blocks = 0; 759 jd->jd_replayed_blocks = 0; 760 } 761 762 static int buf_lo_scan_elements(struct gfs2_jdesc *jd, u32 start, 763 struct gfs2_log_descriptor *ld, __be64 *ptr, 764 int pass) 765 { 766 struct gfs2_inode *ip = GFS2_I(jd->jd_inode); 767 struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode); 768 struct gfs2_glock *gl = ip->i_gl; 769 unsigned int blks = be32_to_cpu(ld->ld_data1); 770 struct buffer_head *bh_log, *bh_ip; 771 u64 blkno; 772 int error = 0; 773 774 if (pass != 1 || be32_to_cpu(ld->ld_type) != GFS2_LOG_DESC_METADATA) 775 return 0; 776 777 gfs2_replay_incr_blk(jd, &start); 778 779 for (; blks; gfs2_replay_incr_blk(jd, &start), blks--) { 780 blkno = be64_to_cpu(*ptr++); 781 782 jd->jd_found_blocks++; 783 784 if (gfs2_revoke_check(jd, blkno, start)) 785 continue; 786 787 error = gfs2_replay_read_block(jd, start, &bh_log); 788 if (error) 789 return error; 790 791 bh_ip = gfs2_meta_new(gl, blkno); 792 memcpy(bh_ip->b_data, bh_log->b_data, bh_log->b_size); 793 794 if (gfs2_meta_check(sdp, bh_ip)) 795 error = -EIO; 796 else { 797 struct gfs2_meta_header *mh = 798 (struct gfs2_meta_header *)bh_ip->b_data; 799 800 if (mh->mh_type == cpu_to_be32(GFS2_METATYPE_RG)) { 801 struct gfs2_rgrpd *rgd; 802 803 rgd = gfs2_blk2rgrpd(sdp, blkno, false); 804 if (rgd && rgd->rd_addr == blkno && 805 rgd->rd_bits && rgd->rd_bits->bi_bh) { 806 fs_info(sdp, "Replaying 0x%llx but we " 807 "already have a bh!\n", 808 (unsigned long long)blkno); 809 fs_info(sdp, "busy:%d, pinned:%d\n", 810 buffer_busy(rgd->rd_bits->bi_bh) ? 1 : 0, 811 buffer_pinned(rgd->rd_bits->bi_bh)); 812 gfs2_dump_glock(NULL, rgd->rd_gl, true); 813 } 814 } 815 mark_buffer_dirty(bh_ip); 816 } 817 brelse(bh_log); 818 brelse(bh_ip); 819 820 if (error) 821 break; 822 823 jd->jd_replayed_blocks++; 824 } 825 826 return error; 827 } 828 829 static void buf_lo_after_scan(struct gfs2_jdesc *jd, int error, int pass) 830 { 831 struct gfs2_inode *ip = GFS2_I(jd->jd_inode); 832 struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode); 833 834 if (error) { 835 gfs2_inode_metasync(ip->i_gl); 836 return; 837 } 838 if (pass != 1) 839 return; 840 841 gfs2_inode_metasync(ip->i_gl); 842 843 fs_info(sdp, "jid=%u: Replayed %u of %u blocks\n", 844 jd->jd_jid, jd->jd_replayed_blocks, jd->jd_found_blocks); 845 } 846 847 static void revoke_lo_before_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr) 848 { 849 struct gfs2_meta_header *mh; 850 unsigned int offset; 851 struct list_head *head = &sdp->sd_log_revokes; 852 struct gfs2_bufdata *bd; 853 struct page *page; 854 unsigned int length; 855 856 gfs2_flush_revokes(sdp); 857 if (!sdp->sd_log_num_revoke) 858 return; 859 860 length = gfs2_struct2blk(sdp, sdp->sd_log_num_revoke); 861 page = gfs2_get_log_desc(sdp, GFS2_LOG_DESC_REVOKE, length, sdp->sd_log_num_revoke); 862 offset = sizeof(struct gfs2_log_descriptor); 863 864 list_for_each_entry(bd, head, bd_list) { 865 sdp->sd_log_num_revoke--; 866 867 if (offset + sizeof(u64) > sdp->sd_sb.sb_bsize) { 868 gfs2_log_write_page(sdp, page); 869 page = mempool_alloc(gfs2_page_pool, GFP_NOIO); 870 mh = page_address(page); 871 clear_page(mh); 872 mh->mh_magic = cpu_to_be32(GFS2_MAGIC); 873 mh->mh_type = cpu_to_be32(GFS2_METATYPE_LB); 874 mh->mh_format = cpu_to_be32(GFS2_FORMAT_LB); 875 offset = sizeof(struct gfs2_meta_header); 876 } 877 878 *(__be64 *)(page_address(page) + offset) = cpu_to_be64(bd->bd_blkno); 879 offset += sizeof(u64); 880 } 881 gfs2_assert_withdraw(sdp, !sdp->sd_log_num_revoke); 882 883 gfs2_log_write_page(sdp, page); 884 } 885 886 static void revoke_lo_after_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr) 887 { 888 struct list_head *head = &sdp->sd_log_revokes; 889 struct gfs2_bufdata *bd; 890 struct gfs2_glock *gl; 891 892 while (!list_empty(head)) { 893 bd = list_first_entry(head, struct gfs2_bufdata, bd_list); 894 list_del_init(&bd->bd_list); 895 gl = bd->bd_gl; 896 gfs2_glock_remove_revoke(gl); 897 kmem_cache_free(gfs2_bufdata_cachep, bd); 898 } 899 } 900 901 static void revoke_lo_before_scan(struct gfs2_jdesc *jd, 902 struct gfs2_log_header_host *head, int pass) 903 { 904 if (pass != 0) 905 return; 906 907 jd->jd_found_revokes = 0; 908 jd->jd_replay_tail = head->lh_tail; 909 } 910 911 static int revoke_lo_scan_elements(struct gfs2_jdesc *jd, u32 start, 912 struct gfs2_log_descriptor *ld, __be64 *ptr, 913 int pass) 914 { 915 struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode); 916 unsigned int blks = be32_to_cpu(ld->ld_length); 917 unsigned int revokes = be32_to_cpu(ld->ld_data1); 918 struct buffer_head *bh; 919 unsigned int offset; 920 u64 blkno; 921 int first = 1; 922 int error; 923 924 if (pass != 0 || be32_to_cpu(ld->ld_type) != GFS2_LOG_DESC_REVOKE) 925 return 0; 926 927 offset = sizeof(struct gfs2_log_descriptor); 928 929 for (; blks; gfs2_replay_incr_blk(jd, &start), blks--) { 930 error = gfs2_replay_read_block(jd, start, &bh); 931 if (error) 932 return error; 933 934 if (!first) 935 gfs2_metatype_check(sdp, bh, GFS2_METATYPE_LB); 936 937 while (offset + sizeof(u64) <= sdp->sd_sb.sb_bsize) { 938 blkno = be64_to_cpu(*(__be64 *)(bh->b_data + offset)); 939 940 error = gfs2_revoke_add(jd, blkno, start); 941 if (error < 0) { 942 brelse(bh); 943 return error; 944 } 945 else if (error) 946 jd->jd_found_revokes++; 947 948 if (!--revokes) 949 break; 950 offset += sizeof(u64); 951 } 952 953 brelse(bh); 954 offset = sizeof(struct gfs2_meta_header); 955 first = 0; 956 } 957 958 return 0; 959 } 960 961 static void revoke_lo_after_scan(struct gfs2_jdesc *jd, int error, int pass) 962 { 963 struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode); 964 965 if (error) { 966 gfs2_revoke_clean(jd); 967 return; 968 } 969 if (pass != 1) 970 return; 971 972 fs_info(sdp, "jid=%u: Found %u revoke tags\n", 973 jd->jd_jid, jd->jd_found_revokes); 974 975 gfs2_revoke_clean(jd); 976 } 977 978 /** 979 * databuf_lo_before_commit - Scan the data buffers, writing as we go 980 * 981 */ 982 983 static void databuf_lo_before_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr) 984 { 985 unsigned int limit = databuf_limit(sdp); 986 unsigned int nbuf; 987 if (tr == NULL) 988 return; 989 nbuf = tr->tr_num_databuf_new - tr->tr_num_databuf_rm; 990 gfs2_before_commit(sdp, limit, nbuf, &tr->tr_databuf, 1); 991 } 992 993 static int databuf_lo_scan_elements(struct gfs2_jdesc *jd, u32 start, 994 struct gfs2_log_descriptor *ld, 995 __be64 *ptr, int pass) 996 { 997 struct gfs2_inode *ip = GFS2_I(jd->jd_inode); 998 struct gfs2_glock *gl = ip->i_gl; 999 unsigned int blks = be32_to_cpu(ld->ld_data1); 1000 struct buffer_head *bh_log, *bh_ip; 1001 u64 blkno; 1002 u64 esc; 1003 int error = 0; 1004 1005 if (pass != 1 || be32_to_cpu(ld->ld_type) != GFS2_LOG_DESC_JDATA) 1006 return 0; 1007 1008 gfs2_replay_incr_blk(jd, &start); 1009 for (; blks; gfs2_replay_incr_blk(jd, &start), blks--) { 1010 blkno = be64_to_cpu(*ptr++); 1011 esc = be64_to_cpu(*ptr++); 1012 1013 jd->jd_found_blocks++; 1014 1015 if (gfs2_revoke_check(jd, blkno, start)) 1016 continue; 1017 1018 error = gfs2_replay_read_block(jd, start, &bh_log); 1019 if (error) 1020 return error; 1021 1022 bh_ip = gfs2_meta_new(gl, blkno); 1023 memcpy(bh_ip->b_data, bh_log->b_data, bh_log->b_size); 1024 1025 /* Unescape */ 1026 if (esc) { 1027 __be32 *eptr = (__be32 *)bh_ip->b_data; 1028 *eptr = cpu_to_be32(GFS2_MAGIC); 1029 } 1030 mark_buffer_dirty(bh_ip); 1031 1032 brelse(bh_log); 1033 brelse(bh_ip); 1034 1035 jd->jd_replayed_blocks++; 1036 } 1037 1038 return error; 1039 } 1040 1041 /* FIXME: sort out accounting for log blocks etc. */ 1042 1043 static void databuf_lo_after_scan(struct gfs2_jdesc *jd, int error, int pass) 1044 { 1045 struct gfs2_inode *ip = GFS2_I(jd->jd_inode); 1046 struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode); 1047 1048 if (error) { 1049 gfs2_inode_metasync(ip->i_gl); 1050 return; 1051 } 1052 if (pass != 1) 1053 return; 1054 1055 /* data sync? */ 1056 gfs2_inode_metasync(ip->i_gl); 1057 1058 fs_info(sdp, "jid=%u: Replayed %u of %u data blocks\n", 1059 jd->jd_jid, jd->jd_replayed_blocks, jd->jd_found_blocks); 1060 } 1061 1062 static void databuf_lo_after_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr) 1063 { 1064 struct list_head *head; 1065 struct gfs2_bufdata *bd; 1066 1067 if (tr == NULL) 1068 return; 1069 1070 head = &tr->tr_databuf; 1071 while (!list_empty(head)) { 1072 bd = list_first_entry(head, struct gfs2_bufdata, bd_list); 1073 list_del_init(&bd->bd_list); 1074 gfs2_unpin(sdp, bd->bd_bh, tr); 1075 } 1076 } 1077 1078 1079 static const struct gfs2_log_operations gfs2_buf_lops = { 1080 .lo_before_commit = buf_lo_before_commit, 1081 .lo_after_commit = buf_lo_after_commit, 1082 .lo_before_scan = buf_lo_before_scan, 1083 .lo_scan_elements = buf_lo_scan_elements, 1084 .lo_after_scan = buf_lo_after_scan, 1085 .lo_name = "buf", 1086 }; 1087 1088 static const struct gfs2_log_operations gfs2_revoke_lops = { 1089 .lo_before_commit = revoke_lo_before_commit, 1090 .lo_after_commit = revoke_lo_after_commit, 1091 .lo_before_scan = revoke_lo_before_scan, 1092 .lo_scan_elements = revoke_lo_scan_elements, 1093 .lo_after_scan = revoke_lo_after_scan, 1094 .lo_name = "revoke", 1095 }; 1096 1097 static const struct gfs2_log_operations gfs2_databuf_lops = { 1098 .lo_before_commit = databuf_lo_before_commit, 1099 .lo_after_commit = databuf_lo_after_commit, 1100 .lo_scan_elements = databuf_lo_scan_elements, 1101 .lo_after_scan = databuf_lo_after_scan, 1102 .lo_name = "databuf", 1103 }; 1104 1105 const struct gfs2_log_operations *gfs2_log_ops[] = { 1106 &gfs2_databuf_lops, 1107 &gfs2_buf_lops, 1108 &gfs2_revoke_lops, 1109 NULL, 1110 }; 1111 1112