1 /* 2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved. 3 * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved. 4 * 5 * This copyrighted material is made available to anyone wishing to use, 6 * modify, copy, or redistribute it subject to the terms and conditions 7 * of the GNU General Public License version 2. 8 */ 9 10 #include <linux/sched.h> 11 #include <linux/slab.h> 12 #include <linux/spinlock.h> 13 #include <linux/completion.h> 14 #include <linux/buffer_head.h> 15 #include <linux/mempool.h> 16 #include <linux/gfs2_ondisk.h> 17 #include <linux/bio.h> 18 #include <linux/fs.h> 19 #include <linux/list_sort.h> 20 21 #include "dir.h" 22 #include "gfs2.h" 23 #include "incore.h" 24 #include "inode.h" 25 #include "glock.h" 26 #include "log.h" 27 #include "lops.h" 28 #include "meta_io.h" 29 #include "recovery.h" 30 #include "rgrp.h" 31 #include "trans.h" 32 #include "util.h" 33 #include "trace_gfs2.h" 34 35 /** 36 * gfs2_pin - Pin a buffer in memory 37 * @sdp: The superblock 38 * @bh: The buffer to be pinned 39 * 40 * The log lock must be held when calling this function 41 */ 42 void gfs2_pin(struct gfs2_sbd *sdp, struct buffer_head *bh) 43 { 44 struct gfs2_bufdata *bd; 45 46 BUG_ON(!current->journal_info); 47 48 clear_buffer_dirty(bh); 49 if (test_set_buffer_pinned(bh)) 50 gfs2_assert_withdraw(sdp, 0); 51 if (!buffer_uptodate(bh)) 52 gfs2_io_error_bh(sdp, bh); 53 bd = bh->b_private; 54 /* If this buffer is in the AIL and it has already been written 55 * to in-place disk block, remove it from the AIL. 56 */ 57 spin_lock(&sdp->sd_ail_lock); 58 if (bd->bd_tr) 59 list_move(&bd->bd_ail_st_list, &bd->bd_tr->tr_ail2_list); 60 spin_unlock(&sdp->sd_ail_lock); 61 get_bh(bh); 62 atomic_inc(&sdp->sd_log_pinned); 63 trace_gfs2_pin(bd, 1); 64 } 65 66 static bool buffer_is_rgrp(const struct gfs2_bufdata *bd) 67 { 68 return bd->bd_gl->gl_name.ln_type == LM_TYPE_RGRP; 69 } 70 71 static void maybe_release_space(struct gfs2_bufdata *bd) 72 { 73 struct gfs2_glock *gl = bd->bd_gl; 74 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; 75 struct gfs2_rgrpd *rgd = gfs2_glock2rgrp(gl); 76 unsigned int index = bd->bd_bh->b_blocknr - gl->gl_name.ln_number; 77 struct gfs2_bitmap *bi = rgd->rd_bits + index; 78 79 if (bi->bi_clone == NULL) 80 return; 81 if (sdp->sd_args.ar_discard) 82 gfs2_rgrp_send_discards(sdp, rgd->rd_data0, bd->bd_bh, bi, 1, NULL); 83 memcpy(bi->bi_clone + bi->bi_offset, 84 bd->bd_bh->b_data + bi->bi_offset, bi->bi_len); 85 clear_bit(GBF_FULL, &bi->bi_flags); 86 rgd->rd_free_clone = rgd->rd_free; 87 rgd->rd_extfail_pt = rgd->rd_free; 88 } 89 90 /** 91 * gfs2_unpin - Unpin a buffer 92 * @sdp: the filesystem the buffer belongs to 93 * @bh: The buffer to unpin 94 * @ai: 95 * @flags: The inode dirty flags 96 * 97 */ 98 99 static void gfs2_unpin(struct gfs2_sbd *sdp, struct buffer_head *bh, 100 struct gfs2_trans *tr) 101 { 102 struct gfs2_bufdata *bd = bh->b_private; 103 104 BUG_ON(!buffer_uptodate(bh)); 105 BUG_ON(!buffer_pinned(bh)); 106 107 lock_buffer(bh); 108 mark_buffer_dirty(bh); 109 clear_buffer_pinned(bh); 110 111 if (buffer_is_rgrp(bd)) 112 maybe_release_space(bd); 113 114 spin_lock(&sdp->sd_ail_lock); 115 if (bd->bd_tr) { 116 list_del(&bd->bd_ail_st_list); 117 brelse(bh); 118 } else { 119 struct gfs2_glock *gl = bd->bd_gl; 120 list_add(&bd->bd_ail_gl_list, &gl->gl_ail_list); 121 atomic_inc(&gl->gl_ail_count); 122 } 123 bd->bd_tr = tr; 124 list_add(&bd->bd_ail_st_list, &tr->tr_ail1_list); 125 spin_unlock(&sdp->sd_ail_lock); 126 127 clear_bit(GLF_LFLUSH, &bd->bd_gl->gl_flags); 128 trace_gfs2_pin(bd, 0); 129 unlock_buffer(bh); 130 atomic_dec(&sdp->sd_log_pinned); 131 } 132 133 static void gfs2_log_incr_head(struct gfs2_sbd *sdp) 134 { 135 BUG_ON((sdp->sd_log_flush_head == sdp->sd_log_tail) && 136 (sdp->sd_log_flush_head != sdp->sd_log_head)); 137 138 if (++sdp->sd_log_flush_head == sdp->sd_jdesc->jd_blocks) 139 sdp->sd_log_flush_head = 0; 140 } 141 142 u64 gfs2_log_bmap(struct gfs2_sbd *sdp) 143 { 144 unsigned int lbn = sdp->sd_log_flush_head; 145 struct gfs2_journal_extent *je; 146 u64 block; 147 148 list_for_each_entry(je, &sdp->sd_jdesc->extent_list, list) { 149 if ((lbn >= je->lblock) && (lbn < (je->lblock + je->blocks))) { 150 block = je->dblock + lbn - je->lblock; 151 gfs2_log_incr_head(sdp); 152 return block; 153 } 154 } 155 156 return -1; 157 } 158 159 /** 160 * gfs2_end_log_write_bh - end log write of pagecache data with buffers 161 * @sdp: The superblock 162 * @bvec: The bio_vec 163 * @error: The i/o status 164 * 165 * This finds the relevant buffers and unlocks them and sets the 166 * error flag according to the status of the i/o request. This is 167 * used when the log is writing data which has an in-place version 168 * that is pinned in the pagecache. 169 */ 170 171 static void gfs2_end_log_write_bh(struct gfs2_sbd *sdp, struct bio_vec *bvec, 172 blk_status_t error) 173 { 174 struct buffer_head *bh, *next; 175 struct page *page = bvec->bv_page; 176 unsigned size; 177 178 bh = page_buffers(page); 179 size = bvec->bv_len; 180 while (bh_offset(bh) < bvec->bv_offset) 181 bh = bh->b_this_page; 182 do { 183 if (error) 184 mark_buffer_write_io_error(bh); 185 unlock_buffer(bh); 186 next = bh->b_this_page; 187 size -= bh->b_size; 188 brelse(bh); 189 bh = next; 190 } while(bh && size); 191 } 192 193 /** 194 * gfs2_end_log_write - end of i/o to the log 195 * @bio: The bio 196 * @error: Status of i/o request 197 * 198 * Each bio_vec contains either data from the pagecache or data 199 * relating to the log itself. Here we iterate over the bio_vec 200 * array, processing both kinds of data. 201 * 202 */ 203 204 static void gfs2_end_log_write(struct bio *bio) 205 { 206 struct gfs2_sbd *sdp = bio->bi_private; 207 struct bio_vec *bvec; 208 struct page *page; 209 int i; 210 211 if (bio->bi_status) { 212 fs_err(sdp, "Error %d writing to journal, jid=%u\n", 213 bio->bi_status, sdp->sd_jdesc->jd_jid); 214 wake_up(&sdp->sd_logd_waitq); 215 } 216 217 bio_for_each_segment_all(bvec, bio, i) { 218 page = bvec->bv_page; 219 if (page_has_buffers(page)) 220 gfs2_end_log_write_bh(sdp, bvec, bio->bi_status); 221 else 222 mempool_free(page, gfs2_page_pool); 223 } 224 225 bio_put(bio); 226 if (atomic_dec_and_test(&sdp->sd_log_in_flight)) 227 wake_up(&sdp->sd_log_flush_wait); 228 } 229 230 /** 231 * gfs2_log_flush_bio - Submit any pending log bio 232 * @sdp: The superblock 233 * @op: REQ_OP 234 * @op_flags: req_flag_bits 235 * 236 * Submit any pending part-built or full bio to the block device. If 237 * there is no pending bio, then this is a no-op. 238 */ 239 240 void gfs2_log_flush_bio(struct gfs2_sbd *sdp, int op, int op_flags) 241 { 242 if (sdp->sd_log_bio) { 243 atomic_inc(&sdp->sd_log_in_flight); 244 bio_set_op_attrs(sdp->sd_log_bio, op, op_flags); 245 submit_bio(sdp->sd_log_bio); 246 sdp->sd_log_bio = NULL; 247 } 248 } 249 250 /** 251 * gfs2_log_alloc_bio - Allocate a new bio for log writing 252 * @sdp: The superblock 253 * @blkno: The next device block number we want to write to 254 * 255 * This should never be called when there is a cached bio in the 256 * super block. When it returns, there will be a cached bio in the 257 * super block which will have as many bio_vecs as the device is 258 * happy to handle. 259 * 260 * Returns: Newly allocated bio 261 */ 262 263 static struct bio *gfs2_log_alloc_bio(struct gfs2_sbd *sdp, u64 blkno) 264 { 265 struct super_block *sb = sdp->sd_vfs; 266 struct bio *bio; 267 268 BUG_ON(sdp->sd_log_bio); 269 270 bio = bio_alloc(GFP_NOIO, BIO_MAX_PAGES); 271 bio->bi_iter.bi_sector = blkno * (sb->s_blocksize >> 9); 272 bio_set_dev(bio, sb->s_bdev); 273 bio->bi_end_io = gfs2_end_log_write; 274 bio->bi_private = sdp; 275 276 sdp->sd_log_bio = bio; 277 278 return bio; 279 } 280 281 /** 282 * gfs2_log_get_bio - Get cached log bio, or allocate a new one 283 * @sdp: The superblock 284 * @blkno: The device block number we want to write to 285 * 286 * If there is a cached bio, then if the next block number is sequential 287 * with the previous one, return it, otherwise flush the bio to the 288 * device. If there is not a cached bio, or we just flushed it, then 289 * allocate a new one. 290 * 291 * Returns: The bio to use for log writes 292 */ 293 294 static struct bio *gfs2_log_get_bio(struct gfs2_sbd *sdp, u64 blkno) 295 { 296 struct bio *bio = sdp->sd_log_bio; 297 u64 nblk; 298 299 if (bio) { 300 nblk = bio_end_sector(bio); 301 nblk >>= sdp->sd_fsb2bb_shift; 302 if (blkno == nblk) 303 return bio; 304 gfs2_log_flush_bio(sdp, REQ_OP_WRITE, 0); 305 } 306 307 return gfs2_log_alloc_bio(sdp, blkno); 308 } 309 310 /** 311 * gfs2_log_write - write to log 312 * @sdp: the filesystem 313 * @page: the page to write 314 * @size: the size of the data to write 315 * @offset: the offset within the page 316 * @blkno: block number of the log entry 317 * 318 * Try and add the page segment to the current bio. If that fails, 319 * submit the current bio to the device and create a new one, and 320 * then add the page segment to that. 321 */ 322 323 void gfs2_log_write(struct gfs2_sbd *sdp, struct page *page, 324 unsigned size, unsigned offset, u64 blkno) 325 { 326 struct bio *bio; 327 int ret; 328 329 bio = gfs2_log_get_bio(sdp, blkno); 330 ret = bio_add_page(bio, page, size, offset); 331 if (ret == 0) { 332 gfs2_log_flush_bio(sdp, REQ_OP_WRITE, 0); 333 bio = gfs2_log_alloc_bio(sdp, blkno); 334 ret = bio_add_page(bio, page, size, offset); 335 WARN_ON(ret == 0); 336 } 337 } 338 339 /** 340 * gfs2_log_write_bh - write a buffer's content to the log 341 * @sdp: The super block 342 * @bh: The buffer pointing to the in-place location 343 * 344 * This writes the content of the buffer to the next available location 345 * in the log. The buffer will be unlocked once the i/o to the log has 346 * completed. 347 */ 348 349 static void gfs2_log_write_bh(struct gfs2_sbd *sdp, struct buffer_head *bh) 350 { 351 gfs2_log_write(sdp, bh->b_page, bh->b_size, bh_offset(bh), 352 gfs2_log_bmap(sdp)); 353 } 354 355 /** 356 * gfs2_log_write_page - write one block stored in a page, into the log 357 * @sdp: The superblock 358 * @page: The struct page 359 * 360 * This writes the first block-sized part of the page into the log. Note 361 * that the page must have been allocated from the gfs2_page_pool mempool 362 * and that after this has been called, ownership has been transferred and 363 * the page may be freed at any time. 364 */ 365 366 void gfs2_log_write_page(struct gfs2_sbd *sdp, struct page *page) 367 { 368 struct super_block *sb = sdp->sd_vfs; 369 gfs2_log_write(sdp, page, sb->s_blocksize, 0, 370 gfs2_log_bmap(sdp)); 371 } 372 373 static struct page *gfs2_get_log_desc(struct gfs2_sbd *sdp, u32 ld_type, 374 u32 ld_length, u32 ld_data1) 375 { 376 struct page *page = mempool_alloc(gfs2_page_pool, GFP_NOIO); 377 struct gfs2_log_descriptor *ld = page_address(page); 378 clear_page(ld); 379 ld->ld_header.mh_magic = cpu_to_be32(GFS2_MAGIC); 380 ld->ld_header.mh_type = cpu_to_be32(GFS2_METATYPE_LD); 381 ld->ld_header.mh_format = cpu_to_be32(GFS2_FORMAT_LD); 382 ld->ld_type = cpu_to_be32(ld_type); 383 ld->ld_length = cpu_to_be32(ld_length); 384 ld->ld_data1 = cpu_to_be32(ld_data1); 385 ld->ld_data2 = 0; 386 return page; 387 } 388 389 static void gfs2_check_magic(struct buffer_head *bh) 390 { 391 void *kaddr; 392 __be32 *ptr; 393 394 clear_buffer_escaped(bh); 395 kaddr = kmap_atomic(bh->b_page); 396 ptr = kaddr + bh_offset(bh); 397 if (*ptr == cpu_to_be32(GFS2_MAGIC)) 398 set_buffer_escaped(bh); 399 kunmap_atomic(kaddr); 400 } 401 402 static int blocknr_cmp(void *priv, struct list_head *a, struct list_head *b) 403 { 404 struct gfs2_bufdata *bda, *bdb; 405 406 bda = list_entry(a, struct gfs2_bufdata, bd_list); 407 bdb = list_entry(b, struct gfs2_bufdata, bd_list); 408 409 if (bda->bd_bh->b_blocknr < bdb->bd_bh->b_blocknr) 410 return -1; 411 if (bda->bd_bh->b_blocknr > bdb->bd_bh->b_blocknr) 412 return 1; 413 return 0; 414 } 415 416 static void gfs2_before_commit(struct gfs2_sbd *sdp, unsigned int limit, 417 unsigned int total, struct list_head *blist, 418 bool is_databuf) 419 { 420 struct gfs2_log_descriptor *ld; 421 struct gfs2_bufdata *bd1 = NULL, *bd2; 422 struct page *page; 423 unsigned int num; 424 unsigned n; 425 __be64 *ptr; 426 427 gfs2_log_lock(sdp); 428 list_sort(NULL, blist, blocknr_cmp); 429 bd1 = bd2 = list_prepare_entry(bd1, blist, bd_list); 430 while(total) { 431 num = total; 432 if (total > limit) 433 num = limit; 434 gfs2_log_unlock(sdp); 435 page = gfs2_get_log_desc(sdp, 436 is_databuf ? GFS2_LOG_DESC_JDATA : 437 GFS2_LOG_DESC_METADATA, num + 1, num); 438 ld = page_address(page); 439 gfs2_log_lock(sdp); 440 ptr = (__be64 *)(ld + 1); 441 442 n = 0; 443 list_for_each_entry_continue(bd1, blist, bd_list) { 444 *ptr++ = cpu_to_be64(bd1->bd_bh->b_blocknr); 445 if (is_databuf) { 446 gfs2_check_magic(bd1->bd_bh); 447 *ptr++ = cpu_to_be64(buffer_escaped(bd1->bd_bh) ? 1 : 0); 448 } 449 if (++n >= num) 450 break; 451 } 452 453 gfs2_log_unlock(sdp); 454 gfs2_log_write_page(sdp, page); 455 gfs2_log_lock(sdp); 456 457 n = 0; 458 list_for_each_entry_continue(bd2, blist, bd_list) { 459 get_bh(bd2->bd_bh); 460 gfs2_log_unlock(sdp); 461 lock_buffer(bd2->bd_bh); 462 463 if (buffer_escaped(bd2->bd_bh)) { 464 void *kaddr; 465 page = mempool_alloc(gfs2_page_pool, GFP_NOIO); 466 ptr = page_address(page); 467 kaddr = kmap_atomic(bd2->bd_bh->b_page); 468 memcpy(ptr, kaddr + bh_offset(bd2->bd_bh), 469 bd2->bd_bh->b_size); 470 kunmap_atomic(kaddr); 471 *(__be32 *)ptr = 0; 472 clear_buffer_escaped(bd2->bd_bh); 473 unlock_buffer(bd2->bd_bh); 474 brelse(bd2->bd_bh); 475 gfs2_log_write_page(sdp, page); 476 } else { 477 gfs2_log_write_bh(sdp, bd2->bd_bh); 478 } 479 gfs2_log_lock(sdp); 480 if (++n >= num) 481 break; 482 } 483 484 BUG_ON(total < num); 485 total -= num; 486 } 487 gfs2_log_unlock(sdp); 488 } 489 490 static void buf_lo_before_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr) 491 { 492 unsigned int limit = buf_limit(sdp); /* 503 for 4k blocks */ 493 unsigned int nbuf; 494 if (tr == NULL) 495 return; 496 nbuf = tr->tr_num_buf_new - tr->tr_num_buf_rm; 497 gfs2_before_commit(sdp, limit, nbuf, &tr->tr_buf, 0); 498 } 499 500 static void buf_lo_after_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr) 501 { 502 struct list_head *head; 503 struct gfs2_bufdata *bd; 504 505 if (tr == NULL) 506 return; 507 508 head = &tr->tr_buf; 509 while (!list_empty(head)) { 510 bd = list_entry(head->next, struct gfs2_bufdata, bd_list); 511 list_del_init(&bd->bd_list); 512 gfs2_unpin(sdp, bd->bd_bh, tr); 513 } 514 } 515 516 static void buf_lo_before_scan(struct gfs2_jdesc *jd, 517 struct gfs2_log_header_host *head, int pass) 518 { 519 if (pass != 0) 520 return; 521 522 jd->jd_found_blocks = 0; 523 jd->jd_replayed_blocks = 0; 524 } 525 526 static int buf_lo_scan_elements(struct gfs2_jdesc *jd, unsigned int start, 527 struct gfs2_log_descriptor *ld, __be64 *ptr, 528 int pass) 529 { 530 struct gfs2_inode *ip = GFS2_I(jd->jd_inode); 531 struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode); 532 struct gfs2_glock *gl = ip->i_gl; 533 unsigned int blks = be32_to_cpu(ld->ld_data1); 534 struct buffer_head *bh_log, *bh_ip; 535 u64 blkno; 536 int error = 0; 537 538 if (pass != 1 || be32_to_cpu(ld->ld_type) != GFS2_LOG_DESC_METADATA) 539 return 0; 540 541 gfs2_replay_incr_blk(jd, &start); 542 543 for (; blks; gfs2_replay_incr_blk(jd, &start), blks--) { 544 blkno = be64_to_cpu(*ptr++); 545 546 jd->jd_found_blocks++; 547 548 if (gfs2_revoke_check(jd, blkno, start)) 549 continue; 550 551 error = gfs2_replay_read_block(jd, start, &bh_log); 552 if (error) 553 return error; 554 555 bh_ip = gfs2_meta_new(gl, blkno); 556 memcpy(bh_ip->b_data, bh_log->b_data, bh_log->b_size); 557 558 if (gfs2_meta_check(sdp, bh_ip)) 559 error = -EIO; 560 else 561 mark_buffer_dirty(bh_ip); 562 563 brelse(bh_log); 564 brelse(bh_ip); 565 566 if (error) 567 break; 568 569 jd->jd_replayed_blocks++; 570 } 571 572 return error; 573 } 574 575 /** 576 * gfs2_meta_sync - Sync all buffers associated with a glock 577 * @gl: The glock 578 * 579 */ 580 581 static void gfs2_meta_sync(struct gfs2_glock *gl) 582 { 583 struct address_space *mapping = gfs2_glock2aspace(gl); 584 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; 585 int error; 586 587 if (mapping == NULL) 588 mapping = &sdp->sd_aspace; 589 590 filemap_fdatawrite(mapping); 591 error = filemap_fdatawait(mapping); 592 593 if (error) 594 gfs2_io_error(gl->gl_name.ln_sbd); 595 } 596 597 static void buf_lo_after_scan(struct gfs2_jdesc *jd, int error, int pass) 598 { 599 struct gfs2_inode *ip = GFS2_I(jd->jd_inode); 600 struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode); 601 602 if (error) { 603 gfs2_meta_sync(ip->i_gl); 604 return; 605 } 606 if (pass != 1) 607 return; 608 609 gfs2_meta_sync(ip->i_gl); 610 611 fs_info(sdp, "jid=%u: Replayed %u of %u blocks\n", 612 jd->jd_jid, jd->jd_replayed_blocks, jd->jd_found_blocks); 613 } 614 615 static void revoke_lo_before_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr) 616 { 617 struct gfs2_meta_header *mh; 618 unsigned int offset; 619 struct list_head *head = &sdp->sd_log_le_revoke; 620 struct gfs2_bufdata *bd; 621 struct page *page; 622 unsigned int length; 623 624 gfs2_write_revokes(sdp); 625 if (!sdp->sd_log_num_revoke) 626 return; 627 628 length = gfs2_struct2blk(sdp, sdp->sd_log_num_revoke, sizeof(u64)); 629 page = gfs2_get_log_desc(sdp, GFS2_LOG_DESC_REVOKE, length, sdp->sd_log_num_revoke); 630 offset = sizeof(struct gfs2_log_descriptor); 631 632 list_for_each_entry(bd, head, bd_list) { 633 sdp->sd_log_num_revoke--; 634 635 if (offset + sizeof(u64) > sdp->sd_sb.sb_bsize) { 636 637 gfs2_log_write_page(sdp, page); 638 page = mempool_alloc(gfs2_page_pool, GFP_NOIO); 639 mh = page_address(page); 640 clear_page(mh); 641 mh->mh_magic = cpu_to_be32(GFS2_MAGIC); 642 mh->mh_type = cpu_to_be32(GFS2_METATYPE_LB); 643 mh->mh_format = cpu_to_be32(GFS2_FORMAT_LB); 644 offset = sizeof(struct gfs2_meta_header); 645 } 646 647 *(__be64 *)(page_address(page) + offset) = cpu_to_be64(bd->bd_blkno); 648 offset += sizeof(u64); 649 } 650 gfs2_assert_withdraw(sdp, !sdp->sd_log_num_revoke); 651 652 gfs2_log_write_page(sdp, page); 653 } 654 655 static void revoke_lo_after_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr) 656 { 657 struct list_head *head = &sdp->sd_log_le_revoke; 658 struct gfs2_bufdata *bd; 659 struct gfs2_glock *gl; 660 661 while (!list_empty(head)) { 662 bd = list_entry(head->next, struct gfs2_bufdata, bd_list); 663 list_del_init(&bd->bd_list); 664 gl = bd->bd_gl; 665 atomic_dec(&gl->gl_revokes); 666 clear_bit(GLF_LFLUSH, &gl->gl_flags); 667 kmem_cache_free(gfs2_bufdata_cachep, bd); 668 } 669 } 670 671 static void revoke_lo_before_scan(struct gfs2_jdesc *jd, 672 struct gfs2_log_header_host *head, int pass) 673 { 674 if (pass != 0) 675 return; 676 677 jd->jd_found_revokes = 0; 678 jd->jd_replay_tail = head->lh_tail; 679 } 680 681 static int revoke_lo_scan_elements(struct gfs2_jdesc *jd, unsigned int start, 682 struct gfs2_log_descriptor *ld, __be64 *ptr, 683 int pass) 684 { 685 struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode); 686 unsigned int blks = be32_to_cpu(ld->ld_length); 687 unsigned int revokes = be32_to_cpu(ld->ld_data1); 688 struct buffer_head *bh; 689 unsigned int offset; 690 u64 blkno; 691 int first = 1; 692 int error; 693 694 if (pass != 0 || be32_to_cpu(ld->ld_type) != GFS2_LOG_DESC_REVOKE) 695 return 0; 696 697 offset = sizeof(struct gfs2_log_descriptor); 698 699 for (; blks; gfs2_replay_incr_blk(jd, &start), blks--) { 700 error = gfs2_replay_read_block(jd, start, &bh); 701 if (error) 702 return error; 703 704 if (!first) 705 gfs2_metatype_check(sdp, bh, GFS2_METATYPE_LB); 706 707 while (offset + sizeof(u64) <= sdp->sd_sb.sb_bsize) { 708 blkno = be64_to_cpu(*(__be64 *)(bh->b_data + offset)); 709 710 error = gfs2_revoke_add(jd, blkno, start); 711 if (error < 0) { 712 brelse(bh); 713 return error; 714 } 715 else if (error) 716 jd->jd_found_revokes++; 717 718 if (!--revokes) 719 break; 720 offset += sizeof(u64); 721 } 722 723 brelse(bh); 724 offset = sizeof(struct gfs2_meta_header); 725 first = 0; 726 } 727 728 return 0; 729 } 730 731 static void revoke_lo_after_scan(struct gfs2_jdesc *jd, int error, int pass) 732 { 733 struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode); 734 735 if (error) { 736 gfs2_revoke_clean(jd); 737 return; 738 } 739 if (pass != 1) 740 return; 741 742 fs_info(sdp, "jid=%u: Found %u revoke tags\n", 743 jd->jd_jid, jd->jd_found_revokes); 744 745 gfs2_revoke_clean(jd); 746 } 747 748 /** 749 * databuf_lo_before_commit - Scan the data buffers, writing as we go 750 * 751 */ 752 753 static void databuf_lo_before_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr) 754 { 755 unsigned int limit = databuf_limit(sdp); 756 unsigned int nbuf; 757 if (tr == NULL) 758 return; 759 nbuf = tr->tr_num_databuf_new - tr->tr_num_databuf_rm; 760 gfs2_before_commit(sdp, limit, nbuf, &tr->tr_databuf, 1); 761 } 762 763 static int databuf_lo_scan_elements(struct gfs2_jdesc *jd, unsigned int start, 764 struct gfs2_log_descriptor *ld, 765 __be64 *ptr, int pass) 766 { 767 struct gfs2_inode *ip = GFS2_I(jd->jd_inode); 768 struct gfs2_glock *gl = ip->i_gl; 769 unsigned int blks = be32_to_cpu(ld->ld_data1); 770 struct buffer_head *bh_log, *bh_ip; 771 u64 blkno; 772 u64 esc; 773 int error = 0; 774 775 if (pass != 1 || be32_to_cpu(ld->ld_type) != GFS2_LOG_DESC_JDATA) 776 return 0; 777 778 gfs2_replay_incr_blk(jd, &start); 779 for (; blks; gfs2_replay_incr_blk(jd, &start), blks--) { 780 blkno = be64_to_cpu(*ptr++); 781 esc = be64_to_cpu(*ptr++); 782 783 jd->jd_found_blocks++; 784 785 if (gfs2_revoke_check(jd, blkno, start)) 786 continue; 787 788 error = gfs2_replay_read_block(jd, start, &bh_log); 789 if (error) 790 return error; 791 792 bh_ip = gfs2_meta_new(gl, blkno); 793 memcpy(bh_ip->b_data, bh_log->b_data, bh_log->b_size); 794 795 /* Unescape */ 796 if (esc) { 797 __be32 *eptr = (__be32 *)bh_ip->b_data; 798 *eptr = cpu_to_be32(GFS2_MAGIC); 799 } 800 mark_buffer_dirty(bh_ip); 801 802 brelse(bh_log); 803 brelse(bh_ip); 804 805 jd->jd_replayed_blocks++; 806 } 807 808 return error; 809 } 810 811 /* FIXME: sort out accounting for log blocks etc. */ 812 813 static void databuf_lo_after_scan(struct gfs2_jdesc *jd, int error, int pass) 814 { 815 struct gfs2_inode *ip = GFS2_I(jd->jd_inode); 816 struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode); 817 818 if (error) { 819 gfs2_meta_sync(ip->i_gl); 820 return; 821 } 822 if (pass != 1) 823 return; 824 825 /* data sync? */ 826 gfs2_meta_sync(ip->i_gl); 827 828 fs_info(sdp, "jid=%u: Replayed %u of %u data blocks\n", 829 jd->jd_jid, jd->jd_replayed_blocks, jd->jd_found_blocks); 830 } 831 832 static void databuf_lo_after_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr) 833 { 834 struct list_head *head; 835 struct gfs2_bufdata *bd; 836 837 if (tr == NULL) 838 return; 839 840 head = &tr->tr_databuf; 841 while (!list_empty(head)) { 842 bd = list_entry(head->next, struct gfs2_bufdata, bd_list); 843 list_del_init(&bd->bd_list); 844 gfs2_unpin(sdp, bd->bd_bh, tr); 845 } 846 } 847 848 849 const struct gfs2_log_operations gfs2_buf_lops = { 850 .lo_before_commit = buf_lo_before_commit, 851 .lo_after_commit = buf_lo_after_commit, 852 .lo_before_scan = buf_lo_before_scan, 853 .lo_scan_elements = buf_lo_scan_elements, 854 .lo_after_scan = buf_lo_after_scan, 855 .lo_name = "buf", 856 }; 857 858 const struct gfs2_log_operations gfs2_revoke_lops = { 859 .lo_before_commit = revoke_lo_before_commit, 860 .lo_after_commit = revoke_lo_after_commit, 861 .lo_before_scan = revoke_lo_before_scan, 862 .lo_scan_elements = revoke_lo_scan_elements, 863 .lo_after_scan = revoke_lo_after_scan, 864 .lo_name = "revoke", 865 }; 866 867 const struct gfs2_log_operations gfs2_databuf_lops = { 868 .lo_before_commit = databuf_lo_before_commit, 869 .lo_after_commit = databuf_lo_after_commit, 870 .lo_scan_elements = databuf_lo_scan_elements, 871 .lo_after_scan = databuf_lo_after_scan, 872 .lo_name = "databuf", 873 }; 874 875 const struct gfs2_log_operations *gfs2_log_ops[] = { 876 &gfs2_databuf_lops, 877 &gfs2_buf_lops, 878 &gfs2_revoke_lops, 879 NULL, 880 }; 881 882