1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved. 4 * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved. 5 */ 6 7 #include <linux/sched.h> 8 #include <linux/slab.h> 9 #include <linux/spinlock.h> 10 #include <linux/completion.h> 11 #include <linux/buffer_head.h> 12 #include <linux/mempool.h> 13 #include <linux/gfs2_ondisk.h> 14 #include <linux/bio.h> 15 #include <linux/fs.h> 16 #include <linux/list_sort.h> 17 #include <linux/blkdev.h> 18 19 #include "bmap.h" 20 #include "dir.h" 21 #include "gfs2.h" 22 #include "incore.h" 23 #include "inode.h" 24 #include "glock.h" 25 #include "glops.h" 26 #include "log.h" 27 #include "lops.h" 28 #include "meta_io.h" 29 #include "recovery.h" 30 #include "rgrp.h" 31 #include "trans.h" 32 #include "util.h" 33 #include "trace_gfs2.h" 34 35 /** 36 * gfs2_pin - Pin a buffer in memory 37 * @sdp: The superblock 38 * @bh: The buffer to be pinned 39 * 40 * The log lock must be held when calling this function 41 */ 42 void gfs2_pin(struct gfs2_sbd *sdp, struct buffer_head *bh) 43 { 44 struct gfs2_bufdata *bd; 45 46 BUG_ON(!current->journal_info); 47 48 clear_buffer_dirty(bh); 49 if (test_set_buffer_pinned(bh)) 50 gfs2_assert_withdraw(sdp, 0); 51 if (!buffer_uptodate(bh)) 52 gfs2_io_error_bh_wd(sdp, bh); 53 bd = bh->b_private; 54 /* If this buffer is in the AIL and it has already been written 55 * to in-place disk block, remove it from the AIL. 56 */ 57 spin_lock(&sdp->sd_ail_lock); 58 if (bd->bd_tr) 59 list_move(&bd->bd_ail_st_list, &bd->bd_tr->tr_ail2_list); 60 spin_unlock(&sdp->sd_ail_lock); 61 get_bh(bh); 62 atomic_inc(&sdp->sd_log_pinned); 63 trace_gfs2_pin(bd, 1); 64 } 65 66 static bool buffer_is_rgrp(const struct gfs2_bufdata *bd) 67 { 68 return bd->bd_gl->gl_name.ln_type == LM_TYPE_RGRP; 69 } 70 71 static void maybe_release_space(struct gfs2_bufdata *bd) 72 { 73 struct gfs2_glock *gl = bd->bd_gl; 74 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; 75 struct gfs2_rgrpd *rgd = gfs2_glock2rgrp(gl); 76 unsigned int index = bd->bd_bh->b_blocknr - gl->gl_name.ln_number; 77 struct gfs2_bitmap *bi = rgd->rd_bits + index; 78 79 rgrp_lock_local(rgd); 80 if (bi->bi_clone == NULL) 81 goto out; 82 if (sdp->sd_args.ar_discard) 83 gfs2_rgrp_send_discards(sdp, rgd->rd_data0, bd->bd_bh, bi, 1, NULL); 84 memcpy(bi->bi_clone + bi->bi_offset, 85 bd->bd_bh->b_data + bi->bi_offset, bi->bi_bytes); 86 clear_bit(GBF_FULL, &bi->bi_flags); 87 rgd->rd_free_clone = rgd->rd_free; 88 BUG_ON(rgd->rd_free_clone < rgd->rd_reserved); 89 rgd->rd_extfail_pt = rgd->rd_free; 90 91 out: 92 rgrp_unlock_local(rgd); 93 } 94 95 /** 96 * gfs2_unpin - Unpin a buffer 97 * @sdp: the filesystem the buffer belongs to 98 * @bh: The buffer to unpin 99 * @ai: 100 * @flags: The inode dirty flags 101 * 102 */ 103 104 static void gfs2_unpin(struct gfs2_sbd *sdp, struct buffer_head *bh, 105 struct gfs2_trans *tr) 106 { 107 struct gfs2_bufdata *bd = bh->b_private; 108 109 BUG_ON(!buffer_uptodate(bh)); 110 BUG_ON(!buffer_pinned(bh)); 111 112 lock_buffer(bh); 113 mark_buffer_dirty(bh); 114 clear_buffer_pinned(bh); 115 116 if (buffer_is_rgrp(bd)) 117 maybe_release_space(bd); 118 119 spin_lock(&sdp->sd_ail_lock); 120 if (bd->bd_tr) { 121 list_del(&bd->bd_ail_st_list); 122 brelse(bh); 123 } else { 124 struct gfs2_glock *gl = bd->bd_gl; 125 list_add(&bd->bd_ail_gl_list, &gl->gl_ail_list); 126 atomic_inc(&gl->gl_ail_count); 127 } 128 bd->bd_tr = tr; 129 list_add(&bd->bd_ail_st_list, &tr->tr_ail1_list); 130 spin_unlock(&sdp->sd_ail_lock); 131 132 clear_bit(GLF_LFLUSH, &bd->bd_gl->gl_flags); 133 trace_gfs2_pin(bd, 0); 134 unlock_buffer(bh); 135 atomic_dec(&sdp->sd_log_pinned); 136 } 137 138 void gfs2_log_incr_head(struct gfs2_sbd *sdp) 139 { 140 BUG_ON((sdp->sd_log_flush_head == sdp->sd_log_tail) && 141 (sdp->sd_log_flush_head != sdp->sd_log_head)); 142 143 if (++sdp->sd_log_flush_head == sdp->sd_jdesc->jd_blocks) 144 sdp->sd_log_flush_head = 0; 145 } 146 147 u64 gfs2_log_bmap(struct gfs2_jdesc *jd, unsigned int lblock) 148 { 149 struct gfs2_journal_extent *je; 150 151 list_for_each_entry(je, &jd->extent_list, list) { 152 if (lblock >= je->lblock && lblock < je->lblock + je->blocks) 153 return je->dblock + lblock - je->lblock; 154 } 155 156 return -1; 157 } 158 159 /** 160 * gfs2_end_log_write_bh - end log write of pagecache data with buffers 161 * @sdp: The superblock 162 * @bvec: The bio_vec 163 * @error: The i/o status 164 * 165 * This finds the relevant buffers and unlocks them and sets the 166 * error flag according to the status of the i/o request. This is 167 * used when the log is writing data which has an in-place version 168 * that is pinned in the pagecache. 169 */ 170 171 static void gfs2_end_log_write_bh(struct gfs2_sbd *sdp, 172 struct bio_vec *bvec, 173 blk_status_t error) 174 { 175 struct buffer_head *bh, *next; 176 struct page *page = bvec->bv_page; 177 unsigned size; 178 179 bh = page_buffers(page); 180 size = bvec->bv_len; 181 while (bh_offset(bh) < bvec->bv_offset) 182 bh = bh->b_this_page; 183 do { 184 if (error) 185 mark_buffer_write_io_error(bh); 186 unlock_buffer(bh); 187 next = bh->b_this_page; 188 size -= bh->b_size; 189 brelse(bh); 190 bh = next; 191 } while(bh && size); 192 } 193 194 /** 195 * gfs2_end_log_write - end of i/o to the log 196 * @bio: The bio 197 * 198 * Each bio_vec contains either data from the pagecache or data 199 * relating to the log itself. Here we iterate over the bio_vec 200 * array, processing both kinds of data. 201 * 202 */ 203 204 static void gfs2_end_log_write(struct bio *bio) 205 { 206 struct gfs2_sbd *sdp = bio->bi_private; 207 struct bio_vec *bvec; 208 struct page *page; 209 struct bvec_iter_all iter_all; 210 211 if (bio->bi_status) { 212 if (!cmpxchg(&sdp->sd_log_error, 0, (int)bio->bi_status)) 213 fs_err(sdp, "Error %d writing to journal, jid=%u\n", 214 bio->bi_status, sdp->sd_jdesc->jd_jid); 215 gfs2_withdraw_delayed(sdp); 216 /* prevent more writes to the journal */ 217 clear_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags); 218 wake_up(&sdp->sd_logd_waitq); 219 } 220 221 bio_for_each_segment_all(bvec, bio, iter_all) { 222 page = bvec->bv_page; 223 if (page_has_buffers(page)) 224 gfs2_end_log_write_bh(sdp, bvec, bio->bi_status); 225 else 226 mempool_free(page, gfs2_page_pool); 227 } 228 229 bio_put(bio); 230 if (atomic_dec_and_test(&sdp->sd_log_in_flight)) 231 wake_up(&sdp->sd_log_flush_wait); 232 } 233 234 /** 235 * gfs2_log_submit_bio - Submit any pending log bio 236 * @biop: Address of the bio pointer 237 * @opf: REQ_OP | op_flags 238 * 239 * Submit any pending part-built or full bio to the block device. If 240 * there is no pending bio, then this is a no-op. 241 */ 242 243 void gfs2_log_submit_bio(struct bio **biop, int opf) 244 { 245 struct bio *bio = *biop; 246 if (bio) { 247 struct gfs2_sbd *sdp = bio->bi_private; 248 atomic_inc(&sdp->sd_log_in_flight); 249 bio->bi_opf = opf; 250 submit_bio(bio); 251 *biop = NULL; 252 } 253 } 254 255 /** 256 * gfs2_log_alloc_bio - Allocate a bio 257 * @sdp: The super block 258 * @blkno: The device block number we want to write to 259 * @end_io: The bi_end_io callback 260 * 261 * Allocate a new bio, initialize it with the given parameters and return it. 262 * 263 * Returns: The newly allocated bio 264 */ 265 266 static struct bio *gfs2_log_alloc_bio(struct gfs2_sbd *sdp, u64 blkno, 267 bio_end_io_t *end_io) 268 { 269 struct super_block *sb = sdp->sd_vfs; 270 struct bio *bio = bio_alloc(GFP_NOIO, BIO_MAX_PAGES); 271 272 bio->bi_iter.bi_sector = blkno << sdp->sd_fsb2bb_shift; 273 bio_set_dev(bio, sb->s_bdev); 274 bio->bi_end_io = end_io; 275 bio->bi_private = sdp; 276 277 return bio; 278 } 279 280 /** 281 * gfs2_log_get_bio - Get cached log bio, or allocate a new one 282 * @sdp: The super block 283 * @blkno: The device block number we want to write to 284 * @bio: The bio to get or allocate 285 * @op: REQ_OP 286 * @end_io: The bi_end_io callback 287 * @flush: Always flush the current bio and allocate a new one? 288 * 289 * If there is a cached bio, then if the next block number is sequential 290 * with the previous one, return it, otherwise flush the bio to the 291 * device. If there is no cached bio, or we just flushed it, then 292 * allocate a new one. 293 * 294 * Returns: The bio to use for log writes 295 */ 296 297 static struct bio *gfs2_log_get_bio(struct gfs2_sbd *sdp, u64 blkno, 298 struct bio **biop, int op, 299 bio_end_io_t *end_io, bool flush) 300 { 301 struct bio *bio = *biop; 302 303 if (bio) { 304 u64 nblk; 305 306 nblk = bio_end_sector(bio); 307 nblk >>= sdp->sd_fsb2bb_shift; 308 if (blkno == nblk && !flush) 309 return bio; 310 gfs2_log_submit_bio(biop, op); 311 } 312 313 *biop = gfs2_log_alloc_bio(sdp, blkno, end_io); 314 return *biop; 315 } 316 317 /** 318 * gfs2_log_write - write to log 319 * @sdp: the filesystem 320 * @page: the page to write 321 * @size: the size of the data to write 322 * @offset: the offset within the page 323 * @blkno: block number of the log entry 324 * 325 * Try and add the page segment to the current bio. If that fails, 326 * submit the current bio to the device and create a new one, and 327 * then add the page segment to that. 328 */ 329 330 void gfs2_log_write(struct gfs2_sbd *sdp, struct page *page, 331 unsigned size, unsigned offset, u64 blkno) 332 { 333 struct bio *bio; 334 int ret; 335 336 bio = gfs2_log_get_bio(sdp, blkno, &sdp->sd_log_bio, REQ_OP_WRITE, 337 gfs2_end_log_write, false); 338 ret = bio_add_page(bio, page, size, offset); 339 if (ret == 0) { 340 bio = gfs2_log_get_bio(sdp, blkno, &sdp->sd_log_bio, 341 REQ_OP_WRITE, gfs2_end_log_write, true); 342 ret = bio_add_page(bio, page, size, offset); 343 WARN_ON(ret == 0); 344 } 345 } 346 347 /** 348 * gfs2_log_write_bh - write a buffer's content to the log 349 * @sdp: The super block 350 * @bh: The buffer pointing to the in-place location 351 * 352 * This writes the content of the buffer to the next available location 353 * in the log. The buffer will be unlocked once the i/o to the log has 354 * completed. 355 */ 356 357 static void gfs2_log_write_bh(struct gfs2_sbd *sdp, struct buffer_head *bh) 358 { 359 u64 dblock; 360 361 dblock = gfs2_log_bmap(sdp->sd_jdesc, sdp->sd_log_flush_head); 362 gfs2_log_incr_head(sdp); 363 gfs2_log_write(sdp, bh->b_page, bh->b_size, bh_offset(bh), dblock); 364 } 365 366 /** 367 * gfs2_log_write_page - write one block stored in a page, into the log 368 * @sdp: The superblock 369 * @page: The struct page 370 * 371 * This writes the first block-sized part of the page into the log. Note 372 * that the page must have been allocated from the gfs2_page_pool mempool 373 * and that after this has been called, ownership has been transferred and 374 * the page may be freed at any time. 375 */ 376 377 void gfs2_log_write_page(struct gfs2_sbd *sdp, struct page *page) 378 { 379 struct super_block *sb = sdp->sd_vfs; 380 u64 dblock; 381 382 dblock = gfs2_log_bmap(sdp->sd_jdesc, sdp->sd_log_flush_head); 383 gfs2_log_incr_head(sdp); 384 gfs2_log_write(sdp, page, sb->s_blocksize, 0, dblock); 385 } 386 387 /** 388 * gfs2_end_log_read - end I/O callback for reads from the log 389 * @bio: The bio 390 * 391 * Simply unlock the pages in the bio. The main thread will wait on them and 392 * process them in order as necessary. 393 */ 394 395 static void gfs2_end_log_read(struct bio *bio) 396 { 397 struct page *page; 398 struct bio_vec *bvec; 399 struct bvec_iter_all iter_all; 400 401 bio_for_each_segment_all(bvec, bio, iter_all) { 402 page = bvec->bv_page; 403 if (bio->bi_status) { 404 int err = blk_status_to_errno(bio->bi_status); 405 406 SetPageError(page); 407 mapping_set_error(page->mapping, err); 408 } 409 unlock_page(page); 410 } 411 412 bio_put(bio); 413 } 414 415 /** 416 * gfs2_jhead_pg_srch - Look for the journal head in a given page. 417 * @jd: The journal descriptor 418 * @page: The page to look in 419 * 420 * Returns: 1 if found, 0 otherwise. 421 */ 422 423 static bool gfs2_jhead_pg_srch(struct gfs2_jdesc *jd, 424 struct gfs2_log_header_host *head, 425 struct page *page) 426 { 427 struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode); 428 struct gfs2_log_header_host lh; 429 void *kaddr = kmap_atomic(page); 430 unsigned int offset; 431 bool ret = false; 432 433 for (offset = 0; offset < PAGE_SIZE; offset += sdp->sd_sb.sb_bsize) { 434 if (!__get_log_header(sdp, kaddr + offset, 0, &lh)) { 435 if (lh.lh_sequence >= head->lh_sequence) 436 *head = lh; 437 else { 438 ret = true; 439 break; 440 } 441 } 442 } 443 kunmap_atomic(kaddr); 444 return ret; 445 } 446 447 /** 448 * gfs2_jhead_process_page - Search/cleanup a page 449 * @jd: The journal descriptor 450 * @index: Index of the page to look into 451 * @done: If set, perform only cleanup, else search and set if found. 452 * 453 * Find the page with 'index' in the journal's mapping. Search the page for 454 * the journal head if requested (cleanup == false). Release refs on the 455 * page so the page cache can reclaim it (put_page() twice). We grabbed a 456 * reference on this page two times, first when we did a find_or_create_page() 457 * to obtain the page to add it to the bio and second when we do a 458 * find_get_page() here to get the page to wait on while I/O on it is being 459 * completed. 460 * This function is also used to free up a page we might've grabbed but not 461 * used. Maybe we added it to a bio, but not submitted it for I/O. Or we 462 * submitted the I/O, but we already found the jhead so we only need to drop 463 * our references to the page. 464 */ 465 466 static void gfs2_jhead_process_page(struct gfs2_jdesc *jd, unsigned long index, 467 struct gfs2_log_header_host *head, 468 bool *done) 469 { 470 struct page *page; 471 472 page = find_get_page(jd->jd_inode->i_mapping, index); 473 wait_on_page_locked(page); 474 475 if (PageError(page)) 476 *done = true; 477 478 if (!*done) 479 *done = gfs2_jhead_pg_srch(jd, head, page); 480 481 put_page(page); /* Once for find_get_page */ 482 put_page(page); /* Once more for find_or_create_page */ 483 } 484 485 static struct bio *gfs2_chain_bio(struct bio *prev, unsigned int nr_iovecs) 486 { 487 struct bio *new; 488 489 new = bio_alloc(GFP_NOIO, nr_iovecs); 490 bio_copy_dev(new, prev); 491 new->bi_iter.bi_sector = bio_end_sector(prev); 492 new->bi_opf = prev->bi_opf; 493 new->bi_write_hint = prev->bi_write_hint; 494 bio_chain(new, prev); 495 submit_bio(prev); 496 return new; 497 } 498 499 /** 500 * gfs2_find_jhead - find the head of a log 501 * @jd: The journal descriptor 502 * @head: The log descriptor for the head of the log is returned here 503 * 504 * Do a search of a journal by reading it in large chunks using bios and find 505 * the valid log entry with the highest sequence number. (i.e. the log head) 506 * 507 * Returns: 0 on success, errno otherwise 508 */ 509 int gfs2_find_jhead(struct gfs2_jdesc *jd, struct gfs2_log_header_host *head, 510 bool keep_cache) 511 { 512 struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode); 513 struct address_space *mapping = jd->jd_inode->i_mapping; 514 unsigned int block = 0, blocks_submitted = 0, blocks_read = 0; 515 unsigned int bsize = sdp->sd_sb.sb_bsize, off; 516 unsigned int bsize_shift = sdp->sd_sb.sb_bsize_shift; 517 unsigned int shift = PAGE_SHIFT - bsize_shift; 518 unsigned int max_blocks = 2 * 1024 * 1024 >> bsize_shift; 519 struct gfs2_journal_extent *je; 520 int sz, ret = 0; 521 struct bio *bio = NULL; 522 struct page *page = NULL; 523 bool done = false; 524 errseq_t since; 525 526 memset(head, 0, sizeof(*head)); 527 if (list_empty(&jd->extent_list)) 528 gfs2_map_journal_extents(sdp, jd); 529 530 since = filemap_sample_wb_err(mapping); 531 list_for_each_entry(je, &jd->extent_list, list) { 532 u64 dblock = je->dblock; 533 534 for (; block < je->lblock + je->blocks; block++, dblock++) { 535 if (!page) { 536 page = find_or_create_page(mapping, 537 block >> shift, GFP_NOFS); 538 if (!page) { 539 ret = -ENOMEM; 540 done = true; 541 goto out; 542 } 543 off = 0; 544 } 545 546 if (bio && (off || block < blocks_submitted + max_blocks)) { 547 sector_t sector = dblock << sdp->sd_fsb2bb_shift; 548 549 if (bio_end_sector(bio) == sector) { 550 sz = bio_add_page(bio, page, bsize, off); 551 if (sz == bsize) 552 goto block_added; 553 } 554 if (off) { 555 unsigned int blocks = 556 (PAGE_SIZE - off) >> bsize_shift; 557 558 bio = gfs2_chain_bio(bio, blocks); 559 goto add_block_to_new_bio; 560 } 561 } 562 563 if (bio) { 564 blocks_submitted = block; 565 submit_bio(bio); 566 } 567 568 bio = gfs2_log_alloc_bio(sdp, dblock, gfs2_end_log_read); 569 bio->bi_opf = REQ_OP_READ; 570 add_block_to_new_bio: 571 sz = bio_add_page(bio, page, bsize, off); 572 BUG_ON(sz != bsize); 573 block_added: 574 off += bsize; 575 if (off == PAGE_SIZE) 576 page = NULL; 577 if (blocks_submitted <= blocks_read + max_blocks) { 578 /* Keep at least one bio in flight */ 579 continue; 580 } 581 582 gfs2_jhead_process_page(jd, blocks_read >> shift, head, &done); 583 blocks_read += PAGE_SIZE >> bsize_shift; 584 if (done) 585 goto out; /* found */ 586 } 587 } 588 589 out: 590 if (bio) 591 submit_bio(bio); 592 while (blocks_read < block) { 593 gfs2_jhead_process_page(jd, blocks_read >> shift, head, &done); 594 blocks_read += PAGE_SIZE >> bsize_shift; 595 } 596 597 if (!ret) 598 ret = filemap_check_wb_err(mapping, since); 599 600 if (!keep_cache) 601 truncate_inode_pages(mapping, 0); 602 603 return ret; 604 } 605 606 static struct page *gfs2_get_log_desc(struct gfs2_sbd *sdp, u32 ld_type, 607 u32 ld_length, u32 ld_data1) 608 { 609 struct page *page = mempool_alloc(gfs2_page_pool, GFP_NOIO); 610 struct gfs2_log_descriptor *ld = page_address(page); 611 clear_page(ld); 612 ld->ld_header.mh_magic = cpu_to_be32(GFS2_MAGIC); 613 ld->ld_header.mh_type = cpu_to_be32(GFS2_METATYPE_LD); 614 ld->ld_header.mh_format = cpu_to_be32(GFS2_FORMAT_LD); 615 ld->ld_type = cpu_to_be32(ld_type); 616 ld->ld_length = cpu_to_be32(ld_length); 617 ld->ld_data1 = cpu_to_be32(ld_data1); 618 ld->ld_data2 = 0; 619 return page; 620 } 621 622 static void gfs2_check_magic(struct buffer_head *bh) 623 { 624 void *kaddr; 625 __be32 *ptr; 626 627 clear_buffer_escaped(bh); 628 kaddr = kmap_atomic(bh->b_page); 629 ptr = kaddr + bh_offset(bh); 630 if (*ptr == cpu_to_be32(GFS2_MAGIC)) 631 set_buffer_escaped(bh); 632 kunmap_atomic(kaddr); 633 } 634 635 static int blocknr_cmp(void *priv, struct list_head *a, struct list_head *b) 636 { 637 struct gfs2_bufdata *bda, *bdb; 638 639 bda = list_entry(a, struct gfs2_bufdata, bd_list); 640 bdb = list_entry(b, struct gfs2_bufdata, bd_list); 641 642 if (bda->bd_bh->b_blocknr < bdb->bd_bh->b_blocknr) 643 return -1; 644 if (bda->bd_bh->b_blocknr > bdb->bd_bh->b_blocknr) 645 return 1; 646 return 0; 647 } 648 649 static void gfs2_before_commit(struct gfs2_sbd *sdp, unsigned int limit, 650 unsigned int total, struct list_head *blist, 651 bool is_databuf) 652 { 653 struct gfs2_log_descriptor *ld; 654 struct gfs2_bufdata *bd1 = NULL, *bd2; 655 struct page *page; 656 unsigned int num; 657 unsigned n; 658 __be64 *ptr; 659 660 gfs2_log_lock(sdp); 661 list_sort(NULL, blist, blocknr_cmp); 662 bd1 = bd2 = list_prepare_entry(bd1, blist, bd_list); 663 while(total) { 664 num = total; 665 if (total > limit) 666 num = limit; 667 gfs2_log_unlock(sdp); 668 page = gfs2_get_log_desc(sdp, 669 is_databuf ? GFS2_LOG_DESC_JDATA : 670 GFS2_LOG_DESC_METADATA, num + 1, num); 671 ld = page_address(page); 672 gfs2_log_lock(sdp); 673 ptr = (__be64 *)(ld + 1); 674 675 n = 0; 676 list_for_each_entry_continue(bd1, blist, bd_list) { 677 *ptr++ = cpu_to_be64(bd1->bd_bh->b_blocknr); 678 if (is_databuf) { 679 gfs2_check_magic(bd1->bd_bh); 680 *ptr++ = cpu_to_be64(buffer_escaped(bd1->bd_bh) ? 1 : 0); 681 } 682 if (++n >= num) 683 break; 684 } 685 686 gfs2_log_unlock(sdp); 687 gfs2_log_write_page(sdp, page); 688 gfs2_log_lock(sdp); 689 690 n = 0; 691 list_for_each_entry_continue(bd2, blist, bd_list) { 692 get_bh(bd2->bd_bh); 693 gfs2_log_unlock(sdp); 694 lock_buffer(bd2->bd_bh); 695 696 if (buffer_escaped(bd2->bd_bh)) { 697 void *kaddr; 698 page = mempool_alloc(gfs2_page_pool, GFP_NOIO); 699 ptr = page_address(page); 700 kaddr = kmap_atomic(bd2->bd_bh->b_page); 701 memcpy(ptr, kaddr + bh_offset(bd2->bd_bh), 702 bd2->bd_bh->b_size); 703 kunmap_atomic(kaddr); 704 *(__be32 *)ptr = 0; 705 clear_buffer_escaped(bd2->bd_bh); 706 unlock_buffer(bd2->bd_bh); 707 brelse(bd2->bd_bh); 708 gfs2_log_write_page(sdp, page); 709 } else { 710 gfs2_log_write_bh(sdp, bd2->bd_bh); 711 } 712 gfs2_log_lock(sdp); 713 if (++n >= num) 714 break; 715 } 716 717 BUG_ON(total < num); 718 total -= num; 719 } 720 gfs2_log_unlock(sdp); 721 } 722 723 static void buf_lo_before_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr) 724 { 725 unsigned int limit = buf_limit(sdp); /* 503 for 4k blocks */ 726 unsigned int nbuf; 727 if (tr == NULL) 728 return; 729 nbuf = tr->tr_num_buf_new - tr->tr_num_buf_rm; 730 gfs2_before_commit(sdp, limit, nbuf, &tr->tr_buf, 0); 731 } 732 733 static void buf_lo_after_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr) 734 { 735 struct list_head *head; 736 struct gfs2_bufdata *bd; 737 738 if (tr == NULL) 739 return; 740 741 head = &tr->tr_buf; 742 while (!list_empty(head)) { 743 bd = list_first_entry(head, struct gfs2_bufdata, bd_list); 744 list_del_init(&bd->bd_list); 745 gfs2_unpin(sdp, bd->bd_bh, tr); 746 } 747 } 748 749 static void buf_lo_before_scan(struct gfs2_jdesc *jd, 750 struct gfs2_log_header_host *head, int pass) 751 { 752 if (pass != 0) 753 return; 754 755 jd->jd_found_blocks = 0; 756 jd->jd_replayed_blocks = 0; 757 } 758 759 static int buf_lo_scan_elements(struct gfs2_jdesc *jd, u32 start, 760 struct gfs2_log_descriptor *ld, __be64 *ptr, 761 int pass) 762 { 763 struct gfs2_inode *ip = GFS2_I(jd->jd_inode); 764 struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode); 765 struct gfs2_glock *gl = ip->i_gl; 766 unsigned int blks = be32_to_cpu(ld->ld_data1); 767 struct buffer_head *bh_log, *bh_ip; 768 u64 blkno; 769 int error = 0; 770 771 if (pass != 1 || be32_to_cpu(ld->ld_type) != GFS2_LOG_DESC_METADATA) 772 return 0; 773 774 gfs2_replay_incr_blk(jd, &start); 775 776 for (; blks; gfs2_replay_incr_blk(jd, &start), blks--) { 777 blkno = be64_to_cpu(*ptr++); 778 779 jd->jd_found_blocks++; 780 781 if (gfs2_revoke_check(jd, blkno, start)) 782 continue; 783 784 error = gfs2_replay_read_block(jd, start, &bh_log); 785 if (error) 786 return error; 787 788 bh_ip = gfs2_meta_new(gl, blkno); 789 memcpy(bh_ip->b_data, bh_log->b_data, bh_log->b_size); 790 791 if (gfs2_meta_check(sdp, bh_ip)) 792 error = -EIO; 793 else { 794 struct gfs2_meta_header *mh = 795 (struct gfs2_meta_header *)bh_ip->b_data; 796 797 if (mh->mh_type == cpu_to_be32(GFS2_METATYPE_RG)) { 798 struct gfs2_rgrpd *rgd; 799 800 rgd = gfs2_blk2rgrpd(sdp, blkno, false); 801 if (rgd && rgd->rd_addr == blkno && 802 rgd->rd_bits && rgd->rd_bits->bi_bh) { 803 fs_info(sdp, "Replaying 0x%llx but we " 804 "already have a bh!\n", 805 (unsigned long long)blkno); 806 fs_info(sdp, "busy:%d, pinned:%d\n", 807 buffer_busy(rgd->rd_bits->bi_bh) ? 1 : 0, 808 buffer_pinned(rgd->rd_bits->bi_bh)); 809 gfs2_dump_glock(NULL, rgd->rd_gl, true); 810 } 811 } 812 mark_buffer_dirty(bh_ip); 813 } 814 brelse(bh_log); 815 brelse(bh_ip); 816 817 if (error) 818 break; 819 820 jd->jd_replayed_blocks++; 821 } 822 823 return error; 824 } 825 826 static void buf_lo_after_scan(struct gfs2_jdesc *jd, int error, int pass) 827 { 828 struct gfs2_inode *ip = GFS2_I(jd->jd_inode); 829 struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode); 830 831 if (error) { 832 gfs2_inode_metasync(ip->i_gl); 833 return; 834 } 835 if (pass != 1) 836 return; 837 838 gfs2_inode_metasync(ip->i_gl); 839 840 fs_info(sdp, "jid=%u: Replayed %u of %u blocks\n", 841 jd->jd_jid, jd->jd_replayed_blocks, jd->jd_found_blocks); 842 } 843 844 static void revoke_lo_before_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr) 845 { 846 struct gfs2_meta_header *mh; 847 unsigned int offset; 848 struct list_head *head = &sdp->sd_log_revokes; 849 struct gfs2_bufdata *bd; 850 struct page *page; 851 unsigned int length; 852 853 gfs2_write_revokes(sdp); 854 if (!sdp->sd_log_num_revoke) 855 return; 856 857 length = gfs2_struct2blk(sdp, sdp->sd_log_num_revoke); 858 page = gfs2_get_log_desc(sdp, GFS2_LOG_DESC_REVOKE, length, sdp->sd_log_num_revoke); 859 offset = sizeof(struct gfs2_log_descriptor); 860 861 list_for_each_entry(bd, head, bd_list) { 862 sdp->sd_log_num_revoke--; 863 864 if (offset + sizeof(u64) > sdp->sd_sb.sb_bsize) { 865 866 gfs2_log_write_page(sdp, page); 867 page = mempool_alloc(gfs2_page_pool, GFP_NOIO); 868 mh = page_address(page); 869 clear_page(mh); 870 mh->mh_magic = cpu_to_be32(GFS2_MAGIC); 871 mh->mh_type = cpu_to_be32(GFS2_METATYPE_LB); 872 mh->mh_format = cpu_to_be32(GFS2_FORMAT_LB); 873 offset = sizeof(struct gfs2_meta_header); 874 } 875 876 *(__be64 *)(page_address(page) + offset) = cpu_to_be64(bd->bd_blkno); 877 offset += sizeof(u64); 878 } 879 gfs2_assert_withdraw(sdp, !sdp->sd_log_num_revoke); 880 881 gfs2_log_write_page(sdp, page); 882 } 883 884 static void revoke_lo_after_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr) 885 { 886 struct list_head *head = &sdp->sd_log_revokes; 887 struct gfs2_bufdata *bd; 888 struct gfs2_glock *gl; 889 890 while (!list_empty(head)) { 891 bd = list_first_entry(head, struct gfs2_bufdata, bd_list); 892 list_del_init(&bd->bd_list); 893 gl = bd->bd_gl; 894 gfs2_glock_remove_revoke(gl); 895 kmem_cache_free(gfs2_bufdata_cachep, bd); 896 } 897 } 898 899 static void revoke_lo_before_scan(struct gfs2_jdesc *jd, 900 struct gfs2_log_header_host *head, int pass) 901 { 902 if (pass != 0) 903 return; 904 905 jd->jd_found_revokes = 0; 906 jd->jd_replay_tail = head->lh_tail; 907 } 908 909 static int revoke_lo_scan_elements(struct gfs2_jdesc *jd, u32 start, 910 struct gfs2_log_descriptor *ld, __be64 *ptr, 911 int pass) 912 { 913 struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode); 914 unsigned int blks = be32_to_cpu(ld->ld_length); 915 unsigned int revokes = be32_to_cpu(ld->ld_data1); 916 struct buffer_head *bh; 917 unsigned int offset; 918 u64 blkno; 919 int first = 1; 920 int error; 921 922 if (pass != 0 || be32_to_cpu(ld->ld_type) != GFS2_LOG_DESC_REVOKE) 923 return 0; 924 925 offset = sizeof(struct gfs2_log_descriptor); 926 927 for (; blks; gfs2_replay_incr_blk(jd, &start), blks--) { 928 error = gfs2_replay_read_block(jd, start, &bh); 929 if (error) 930 return error; 931 932 if (!first) 933 gfs2_metatype_check(sdp, bh, GFS2_METATYPE_LB); 934 935 while (offset + sizeof(u64) <= sdp->sd_sb.sb_bsize) { 936 blkno = be64_to_cpu(*(__be64 *)(bh->b_data + offset)); 937 938 error = gfs2_revoke_add(jd, blkno, start); 939 if (error < 0) { 940 brelse(bh); 941 return error; 942 } 943 else if (error) 944 jd->jd_found_revokes++; 945 946 if (!--revokes) 947 break; 948 offset += sizeof(u64); 949 } 950 951 brelse(bh); 952 offset = sizeof(struct gfs2_meta_header); 953 first = 0; 954 } 955 956 return 0; 957 } 958 959 static void revoke_lo_after_scan(struct gfs2_jdesc *jd, int error, int pass) 960 { 961 struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode); 962 963 if (error) { 964 gfs2_revoke_clean(jd); 965 return; 966 } 967 if (pass != 1) 968 return; 969 970 fs_info(sdp, "jid=%u: Found %u revoke tags\n", 971 jd->jd_jid, jd->jd_found_revokes); 972 973 gfs2_revoke_clean(jd); 974 } 975 976 /** 977 * databuf_lo_before_commit - Scan the data buffers, writing as we go 978 * 979 */ 980 981 static void databuf_lo_before_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr) 982 { 983 unsigned int limit = databuf_limit(sdp); 984 unsigned int nbuf; 985 if (tr == NULL) 986 return; 987 nbuf = tr->tr_num_databuf_new - tr->tr_num_databuf_rm; 988 gfs2_before_commit(sdp, limit, nbuf, &tr->tr_databuf, 1); 989 } 990 991 static int databuf_lo_scan_elements(struct gfs2_jdesc *jd, u32 start, 992 struct gfs2_log_descriptor *ld, 993 __be64 *ptr, int pass) 994 { 995 struct gfs2_inode *ip = GFS2_I(jd->jd_inode); 996 struct gfs2_glock *gl = ip->i_gl; 997 unsigned int blks = be32_to_cpu(ld->ld_data1); 998 struct buffer_head *bh_log, *bh_ip; 999 u64 blkno; 1000 u64 esc; 1001 int error = 0; 1002 1003 if (pass != 1 || be32_to_cpu(ld->ld_type) != GFS2_LOG_DESC_JDATA) 1004 return 0; 1005 1006 gfs2_replay_incr_blk(jd, &start); 1007 for (; blks; gfs2_replay_incr_blk(jd, &start), blks--) { 1008 blkno = be64_to_cpu(*ptr++); 1009 esc = be64_to_cpu(*ptr++); 1010 1011 jd->jd_found_blocks++; 1012 1013 if (gfs2_revoke_check(jd, blkno, start)) 1014 continue; 1015 1016 error = gfs2_replay_read_block(jd, start, &bh_log); 1017 if (error) 1018 return error; 1019 1020 bh_ip = gfs2_meta_new(gl, blkno); 1021 memcpy(bh_ip->b_data, bh_log->b_data, bh_log->b_size); 1022 1023 /* Unescape */ 1024 if (esc) { 1025 __be32 *eptr = (__be32 *)bh_ip->b_data; 1026 *eptr = cpu_to_be32(GFS2_MAGIC); 1027 } 1028 mark_buffer_dirty(bh_ip); 1029 1030 brelse(bh_log); 1031 brelse(bh_ip); 1032 1033 jd->jd_replayed_blocks++; 1034 } 1035 1036 return error; 1037 } 1038 1039 /* FIXME: sort out accounting for log blocks etc. */ 1040 1041 static void databuf_lo_after_scan(struct gfs2_jdesc *jd, int error, int pass) 1042 { 1043 struct gfs2_inode *ip = GFS2_I(jd->jd_inode); 1044 struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode); 1045 1046 if (error) { 1047 gfs2_inode_metasync(ip->i_gl); 1048 return; 1049 } 1050 if (pass != 1) 1051 return; 1052 1053 /* data sync? */ 1054 gfs2_inode_metasync(ip->i_gl); 1055 1056 fs_info(sdp, "jid=%u: Replayed %u of %u data blocks\n", 1057 jd->jd_jid, jd->jd_replayed_blocks, jd->jd_found_blocks); 1058 } 1059 1060 static void databuf_lo_after_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr) 1061 { 1062 struct list_head *head; 1063 struct gfs2_bufdata *bd; 1064 1065 if (tr == NULL) 1066 return; 1067 1068 head = &tr->tr_databuf; 1069 while (!list_empty(head)) { 1070 bd = list_first_entry(head, struct gfs2_bufdata, bd_list); 1071 list_del_init(&bd->bd_list); 1072 gfs2_unpin(sdp, bd->bd_bh, tr); 1073 } 1074 } 1075 1076 1077 static const struct gfs2_log_operations gfs2_buf_lops = { 1078 .lo_before_commit = buf_lo_before_commit, 1079 .lo_after_commit = buf_lo_after_commit, 1080 .lo_before_scan = buf_lo_before_scan, 1081 .lo_scan_elements = buf_lo_scan_elements, 1082 .lo_after_scan = buf_lo_after_scan, 1083 .lo_name = "buf", 1084 }; 1085 1086 static const struct gfs2_log_operations gfs2_revoke_lops = { 1087 .lo_before_commit = revoke_lo_before_commit, 1088 .lo_after_commit = revoke_lo_after_commit, 1089 .lo_before_scan = revoke_lo_before_scan, 1090 .lo_scan_elements = revoke_lo_scan_elements, 1091 .lo_after_scan = revoke_lo_after_scan, 1092 .lo_name = "revoke", 1093 }; 1094 1095 static const struct gfs2_log_operations gfs2_databuf_lops = { 1096 .lo_before_commit = databuf_lo_before_commit, 1097 .lo_after_commit = databuf_lo_after_commit, 1098 .lo_scan_elements = databuf_lo_scan_elements, 1099 .lo_after_scan = databuf_lo_after_scan, 1100 .lo_name = "databuf", 1101 }; 1102 1103 const struct gfs2_log_operations *gfs2_log_ops[] = { 1104 &gfs2_databuf_lops, 1105 &gfs2_buf_lops, 1106 &gfs2_revoke_lops, 1107 NULL, 1108 }; 1109 1110