1 /* 2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved. 3 * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved. 4 * 5 * This copyrighted material is made available to anyone wishing to use, 6 * modify, copy, or redistribute it subject to the terms and conditions 7 * of the GNU General Public License version 2. 8 */ 9 10 #include <linux/sched.h> 11 #include <linux/slab.h> 12 #include <linux/spinlock.h> 13 #include <linux/completion.h> 14 #include <linux/buffer_head.h> 15 #include <linux/gfs2_ondisk.h> 16 #include <linux/bio.h> 17 #include <linux/fs.h> 18 19 #include "gfs2.h" 20 #include "incore.h" 21 #include "inode.h" 22 #include "glock.h" 23 #include "log.h" 24 #include "lops.h" 25 #include "meta_io.h" 26 #include "recovery.h" 27 #include "rgrp.h" 28 #include "trans.h" 29 #include "util.h" 30 #include "trace_gfs2.h" 31 32 /** 33 * gfs2_pin - Pin a buffer in memory 34 * @sdp: The superblock 35 * @bh: The buffer to be pinned 36 * 37 * The log lock must be held when calling this function 38 */ 39 static void gfs2_pin(struct gfs2_sbd *sdp, struct buffer_head *bh) 40 { 41 struct gfs2_bufdata *bd; 42 43 BUG_ON(!current->journal_info); 44 45 clear_buffer_dirty(bh); 46 if (test_set_buffer_pinned(bh)) 47 gfs2_assert_withdraw(sdp, 0); 48 if (!buffer_uptodate(bh)) 49 gfs2_io_error_bh(sdp, bh); 50 bd = bh->b_private; 51 /* If this buffer is in the AIL and it has already been written 52 * to in-place disk block, remove it from the AIL. 53 */ 54 spin_lock(&sdp->sd_ail_lock); 55 if (bd->bd_ail) 56 list_move(&bd->bd_ail_st_list, &bd->bd_ail->ai_ail2_list); 57 spin_unlock(&sdp->sd_ail_lock); 58 get_bh(bh); 59 atomic_inc(&sdp->sd_log_pinned); 60 trace_gfs2_pin(bd, 1); 61 } 62 63 static bool buffer_is_rgrp(const struct gfs2_bufdata *bd) 64 { 65 return bd->bd_gl->gl_name.ln_type == LM_TYPE_RGRP; 66 } 67 68 static void maybe_release_space(struct gfs2_bufdata *bd) 69 { 70 struct gfs2_glock *gl = bd->bd_gl; 71 struct gfs2_sbd *sdp = gl->gl_sbd; 72 struct gfs2_rgrpd *rgd = gl->gl_object; 73 unsigned int index = bd->bd_bh->b_blocknr - gl->gl_name.ln_number; 74 struct gfs2_bitmap *bi = rgd->rd_bits + index; 75 76 if (bi->bi_clone == 0) 77 return; 78 if (sdp->sd_args.ar_discard) 79 gfs2_rgrp_send_discards(sdp, rgd->rd_data0, bd->bd_bh, bi); 80 memcpy(bi->bi_clone + bi->bi_offset, 81 bd->bd_bh->b_data + bi->bi_offset, bi->bi_len); 82 clear_bit(GBF_FULL, &bi->bi_flags); 83 rgd->rd_free_clone = rgd->rd_free; 84 } 85 86 /** 87 * gfs2_unpin - Unpin a buffer 88 * @sdp: the filesystem the buffer belongs to 89 * @bh: The buffer to unpin 90 * @ai: 91 * @flags: The inode dirty flags 92 * 93 */ 94 95 static void gfs2_unpin(struct gfs2_sbd *sdp, struct buffer_head *bh, 96 struct gfs2_ail *ai) 97 { 98 struct gfs2_bufdata *bd = bh->b_private; 99 100 BUG_ON(!buffer_uptodate(bh)); 101 BUG_ON(!buffer_pinned(bh)); 102 103 lock_buffer(bh); 104 mark_buffer_dirty(bh); 105 clear_buffer_pinned(bh); 106 107 if (buffer_is_rgrp(bd)) 108 maybe_release_space(bd); 109 110 spin_lock(&sdp->sd_ail_lock); 111 if (bd->bd_ail) { 112 list_del(&bd->bd_ail_st_list); 113 brelse(bh); 114 } else { 115 struct gfs2_glock *gl = bd->bd_gl; 116 list_add(&bd->bd_ail_gl_list, &gl->gl_ail_list); 117 atomic_inc(&gl->gl_ail_count); 118 } 119 bd->bd_ail = ai; 120 list_add(&bd->bd_ail_st_list, &ai->ai_ail1_list); 121 spin_unlock(&sdp->sd_ail_lock); 122 123 clear_bit(GLF_LFLUSH, &bd->bd_gl->gl_flags); 124 trace_gfs2_pin(bd, 0); 125 unlock_buffer(bh); 126 atomic_dec(&sdp->sd_log_pinned); 127 } 128 129 130 static inline struct gfs2_log_descriptor *bh_log_desc(struct buffer_head *bh) 131 { 132 return (struct gfs2_log_descriptor *)bh->b_data; 133 } 134 135 static inline __be64 *bh_log_ptr(struct buffer_head *bh) 136 { 137 struct gfs2_log_descriptor *ld = bh_log_desc(bh); 138 return (__force __be64 *)(ld + 1); 139 } 140 141 static inline __be64 *bh_ptr_end(struct buffer_head *bh) 142 { 143 return (__force __be64 *)(bh->b_data + bh->b_size); 144 } 145 146 147 static struct buffer_head *gfs2_get_log_desc(struct gfs2_sbd *sdp, u32 ld_type) 148 { 149 struct buffer_head *bh = gfs2_log_get_buf(sdp); 150 struct gfs2_log_descriptor *ld = bh_log_desc(bh); 151 ld->ld_header.mh_magic = cpu_to_be32(GFS2_MAGIC); 152 ld->ld_header.mh_type = cpu_to_be32(GFS2_METATYPE_LD); 153 ld->ld_header.mh_format = cpu_to_be32(GFS2_FORMAT_LD); 154 ld->ld_type = cpu_to_be32(ld_type); 155 ld->ld_length = 0; 156 ld->ld_data1 = 0; 157 ld->ld_data2 = 0; 158 memset(ld->ld_reserved, 0, sizeof(ld->ld_reserved)); 159 return bh; 160 } 161 162 static void buf_lo_add(struct gfs2_sbd *sdp, struct gfs2_log_element *le) 163 { 164 struct gfs2_bufdata *bd = container_of(le, struct gfs2_bufdata, bd_le); 165 struct gfs2_meta_header *mh; 166 struct gfs2_trans *tr; 167 168 lock_buffer(bd->bd_bh); 169 gfs2_log_lock(sdp); 170 if (!list_empty(&bd->bd_list_tr)) 171 goto out; 172 tr = current->journal_info; 173 tr->tr_touched = 1; 174 tr->tr_num_buf++; 175 list_add(&bd->bd_list_tr, &tr->tr_list_buf); 176 if (!list_empty(&le->le_list)) 177 goto out; 178 set_bit(GLF_LFLUSH, &bd->bd_gl->gl_flags); 179 set_bit(GLF_DIRTY, &bd->bd_gl->gl_flags); 180 gfs2_meta_check(sdp, bd->bd_bh); 181 gfs2_pin(sdp, bd->bd_bh); 182 mh = (struct gfs2_meta_header *)bd->bd_bh->b_data; 183 mh->__pad0 = cpu_to_be64(0); 184 mh->mh_jid = cpu_to_be32(sdp->sd_jdesc->jd_jid); 185 sdp->sd_log_num_buf++; 186 list_add(&le->le_list, &sdp->sd_log_le_buf); 187 tr->tr_num_buf_new++; 188 out: 189 gfs2_log_unlock(sdp); 190 unlock_buffer(bd->bd_bh); 191 } 192 193 static void buf_lo_before_commit(struct gfs2_sbd *sdp) 194 { 195 struct buffer_head *bh; 196 struct gfs2_log_descriptor *ld; 197 struct gfs2_bufdata *bd1 = NULL, *bd2; 198 unsigned int total; 199 unsigned int limit; 200 unsigned int num; 201 unsigned n; 202 __be64 *ptr; 203 204 limit = buf_limit(sdp); 205 /* for 4k blocks, limit = 503 */ 206 207 gfs2_log_lock(sdp); 208 total = sdp->sd_log_num_buf; 209 bd1 = bd2 = list_prepare_entry(bd1, &sdp->sd_log_le_buf, bd_le.le_list); 210 while(total) { 211 num = total; 212 if (total > limit) 213 num = limit; 214 gfs2_log_unlock(sdp); 215 bh = gfs2_get_log_desc(sdp, GFS2_LOG_DESC_METADATA); 216 gfs2_log_lock(sdp); 217 ld = bh_log_desc(bh); 218 ptr = bh_log_ptr(bh); 219 ld->ld_length = cpu_to_be32(num + 1); 220 ld->ld_data1 = cpu_to_be32(num); 221 222 n = 0; 223 list_for_each_entry_continue(bd1, &sdp->sd_log_le_buf, 224 bd_le.le_list) { 225 *ptr++ = cpu_to_be64(bd1->bd_bh->b_blocknr); 226 if (++n >= num) 227 break; 228 } 229 230 gfs2_log_unlock(sdp); 231 submit_bh(WRITE_SYNC, bh); 232 gfs2_log_lock(sdp); 233 234 n = 0; 235 list_for_each_entry_continue(bd2, &sdp->sd_log_le_buf, 236 bd_le.le_list) { 237 get_bh(bd2->bd_bh); 238 gfs2_log_unlock(sdp); 239 lock_buffer(bd2->bd_bh); 240 bh = gfs2_log_fake_buf(sdp, bd2->bd_bh); 241 submit_bh(WRITE_SYNC, bh); 242 gfs2_log_lock(sdp); 243 if (++n >= num) 244 break; 245 } 246 247 BUG_ON(total < num); 248 total -= num; 249 } 250 gfs2_log_unlock(sdp); 251 } 252 253 static void buf_lo_after_commit(struct gfs2_sbd *sdp, struct gfs2_ail *ai) 254 { 255 struct list_head *head = &sdp->sd_log_le_buf; 256 struct gfs2_bufdata *bd; 257 258 while (!list_empty(head)) { 259 bd = list_entry(head->next, struct gfs2_bufdata, bd_le.le_list); 260 list_del_init(&bd->bd_le.le_list); 261 sdp->sd_log_num_buf--; 262 263 gfs2_unpin(sdp, bd->bd_bh, ai); 264 } 265 gfs2_assert_warn(sdp, !sdp->sd_log_num_buf); 266 } 267 268 static void buf_lo_before_scan(struct gfs2_jdesc *jd, 269 struct gfs2_log_header_host *head, int pass) 270 { 271 struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode); 272 273 if (pass != 0) 274 return; 275 276 sdp->sd_found_blocks = 0; 277 sdp->sd_replayed_blocks = 0; 278 } 279 280 static int buf_lo_scan_elements(struct gfs2_jdesc *jd, unsigned int start, 281 struct gfs2_log_descriptor *ld, __be64 *ptr, 282 int pass) 283 { 284 struct gfs2_inode *ip = GFS2_I(jd->jd_inode); 285 struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode); 286 struct gfs2_glock *gl = ip->i_gl; 287 unsigned int blks = be32_to_cpu(ld->ld_data1); 288 struct buffer_head *bh_log, *bh_ip; 289 u64 blkno; 290 int error = 0; 291 292 if (pass != 1 || be32_to_cpu(ld->ld_type) != GFS2_LOG_DESC_METADATA) 293 return 0; 294 295 gfs2_replay_incr_blk(sdp, &start); 296 297 for (; blks; gfs2_replay_incr_blk(sdp, &start), blks--) { 298 blkno = be64_to_cpu(*ptr++); 299 300 sdp->sd_found_blocks++; 301 302 if (gfs2_revoke_check(sdp, blkno, start)) 303 continue; 304 305 error = gfs2_replay_read_block(jd, start, &bh_log); 306 if (error) 307 return error; 308 309 bh_ip = gfs2_meta_new(gl, blkno); 310 memcpy(bh_ip->b_data, bh_log->b_data, bh_log->b_size); 311 312 if (gfs2_meta_check(sdp, bh_ip)) 313 error = -EIO; 314 else 315 mark_buffer_dirty(bh_ip); 316 317 brelse(bh_log); 318 brelse(bh_ip); 319 320 if (error) 321 break; 322 323 sdp->sd_replayed_blocks++; 324 } 325 326 return error; 327 } 328 329 static void buf_lo_after_scan(struct gfs2_jdesc *jd, int error, int pass) 330 { 331 struct gfs2_inode *ip = GFS2_I(jd->jd_inode); 332 struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode); 333 334 if (error) { 335 gfs2_meta_sync(ip->i_gl); 336 return; 337 } 338 if (pass != 1) 339 return; 340 341 gfs2_meta_sync(ip->i_gl); 342 343 fs_info(sdp, "jid=%u: Replayed %u of %u blocks\n", 344 jd->jd_jid, sdp->sd_replayed_blocks, sdp->sd_found_blocks); 345 } 346 347 static void revoke_lo_add(struct gfs2_sbd *sdp, struct gfs2_log_element *le) 348 { 349 struct gfs2_bufdata *bd = container_of(le, struct gfs2_bufdata, bd_le); 350 struct gfs2_glock *gl = bd->bd_gl; 351 struct gfs2_trans *tr; 352 353 tr = current->journal_info; 354 tr->tr_touched = 1; 355 tr->tr_num_revoke++; 356 sdp->sd_log_num_revoke++; 357 atomic_inc(&gl->gl_revokes); 358 set_bit(GLF_LFLUSH, &gl->gl_flags); 359 list_add(&le->le_list, &sdp->sd_log_le_revoke); 360 } 361 362 static void revoke_lo_before_commit(struct gfs2_sbd *sdp) 363 { 364 struct gfs2_log_descriptor *ld; 365 struct gfs2_meta_header *mh; 366 struct buffer_head *bh; 367 unsigned int offset; 368 struct list_head *head = &sdp->sd_log_le_revoke; 369 struct gfs2_bufdata *bd; 370 371 if (!sdp->sd_log_num_revoke) 372 return; 373 374 bh = gfs2_get_log_desc(sdp, GFS2_LOG_DESC_REVOKE); 375 ld = bh_log_desc(bh); 376 ld->ld_length = cpu_to_be32(gfs2_struct2blk(sdp, sdp->sd_log_num_revoke, 377 sizeof(u64))); 378 ld->ld_data1 = cpu_to_be32(sdp->sd_log_num_revoke); 379 offset = sizeof(struct gfs2_log_descriptor); 380 381 list_for_each_entry(bd, head, bd_le.le_list) { 382 sdp->sd_log_num_revoke--; 383 384 if (offset + sizeof(u64) > sdp->sd_sb.sb_bsize) { 385 submit_bh(WRITE_SYNC, bh); 386 387 bh = gfs2_log_get_buf(sdp); 388 mh = (struct gfs2_meta_header *)bh->b_data; 389 mh->mh_magic = cpu_to_be32(GFS2_MAGIC); 390 mh->mh_type = cpu_to_be32(GFS2_METATYPE_LB); 391 mh->mh_format = cpu_to_be32(GFS2_FORMAT_LB); 392 offset = sizeof(struct gfs2_meta_header); 393 } 394 395 *(__be64 *)(bh->b_data + offset) = cpu_to_be64(bd->bd_blkno); 396 offset += sizeof(u64); 397 } 398 gfs2_assert_withdraw(sdp, !sdp->sd_log_num_revoke); 399 400 submit_bh(WRITE_SYNC, bh); 401 } 402 403 static void revoke_lo_after_commit(struct gfs2_sbd *sdp, struct gfs2_ail *ai) 404 { 405 struct list_head *head = &sdp->sd_log_le_revoke; 406 struct gfs2_bufdata *bd; 407 struct gfs2_glock *gl; 408 409 while (!list_empty(head)) { 410 bd = list_entry(head->next, struct gfs2_bufdata, bd_le.le_list); 411 list_del_init(&bd->bd_le.le_list); 412 gl = bd->bd_gl; 413 atomic_dec(&gl->gl_revokes); 414 clear_bit(GLF_LFLUSH, &gl->gl_flags); 415 kmem_cache_free(gfs2_bufdata_cachep, bd); 416 } 417 } 418 419 static void revoke_lo_before_scan(struct gfs2_jdesc *jd, 420 struct gfs2_log_header_host *head, int pass) 421 { 422 struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode); 423 424 if (pass != 0) 425 return; 426 427 sdp->sd_found_revokes = 0; 428 sdp->sd_replay_tail = head->lh_tail; 429 } 430 431 static int revoke_lo_scan_elements(struct gfs2_jdesc *jd, unsigned int start, 432 struct gfs2_log_descriptor *ld, __be64 *ptr, 433 int pass) 434 { 435 struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode); 436 unsigned int blks = be32_to_cpu(ld->ld_length); 437 unsigned int revokes = be32_to_cpu(ld->ld_data1); 438 struct buffer_head *bh; 439 unsigned int offset; 440 u64 blkno; 441 int first = 1; 442 int error; 443 444 if (pass != 0 || be32_to_cpu(ld->ld_type) != GFS2_LOG_DESC_REVOKE) 445 return 0; 446 447 offset = sizeof(struct gfs2_log_descriptor); 448 449 for (; blks; gfs2_replay_incr_blk(sdp, &start), blks--) { 450 error = gfs2_replay_read_block(jd, start, &bh); 451 if (error) 452 return error; 453 454 if (!first) 455 gfs2_metatype_check(sdp, bh, GFS2_METATYPE_LB); 456 457 while (offset + sizeof(u64) <= sdp->sd_sb.sb_bsize) { 458 blkno = be64_to_cpu(*(__be64 *)(bh->b_data + offset)); 459 460 error = gfs2_revoke_add(sdp, blkno, start); 461 if (error < 0) { 462 brelse(bh); 463 return error; 464 } 465 else if (error) 466 sdp->sd_found_revokes++; 467 468 if (!--revokes) 469 break; 470 offset += sizeof(u64); 471 } 472 473 brelse(bh); 474 offset = sizeof(struct gfs2_meta_header); 475 first = 0; 476 } 477 478 return 0; 479 } 480 481 static void revoke_lo_after_scan(struct gfs2_jdesc *jd, int error, int pass) 482 { 483 struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode); 484 485 if (error) { 486 gfs2_revoke_clean(sdp); 487 return; 488 } 489 if (pass != 1) 490 return; 491 492 fs_info(sdp, "jid=%u: Found %u revoke tags\n", 493 jd->jd_jid, sdp->sd_found_revokes); 494 495 gfs2_revoke_clean(sdp); 496 } 497 498 /** 499 * databuf_lo_add - Add a databuf to the transaction. 500 * 501 * This is used in two distinct cases: 502 * i) In ordered write mode 503 * We put the data buffer on a list so that we can ensure that its 504 * synced to disk at the right time 505 * ii) In journaled data mode 506 * We need to journal the data block in the same way as metadata in 507 * the functions above. The difference is that here we have a tag 508 * which is two __be64's being the block number (as per meta data) 509 * and a flag which says whether the data block needs escaping or 510 * not. This means we need a new log entry for each 251 or so data 511 * blocks, which isn't an enormous overhead but twice as much as 512 * for normal metadata blocks. 513 */ 514 static void databuf_lo_add(struct gfs2_sbd *sdp, struct gfs2_log_element *le) 515 { 516 struct gfs2_bufdata *bd = container_of(le, struct gfs2_bufdata, bd_le); 517 struct gfs2_trans *tr = current->journal_info; 518 struct address_space *mapping = bd->bd_bh->b_page->mapping; 519 struct gfs2_inode *ip = GFS2_I(mapping->host); 520 521 lock_buffer(bd->bd_bh); 522 gfs2_log_lock(sdp); 523 if (tr) { 524 if (!list_empty(&bd->bd_list_tr)) 525 goto out; 526 tr->tr_touched = 1; 527 if (gfs2_is_jdata(ip)) { 528 tr->tr_num_buf++; 529 list_add(&bd->bd_list_tr, &tr->tr_list_buf); 530 } 531 } 532 if (!list_empty(&le->le_list)) 533 goto out; 534 535 set_bit(GLF_LFLUSH, &bd->bd_gl->gl_flags); 536 set_bit(GLF_DIRTY, &bd->bd_gl->gl_flags); 537 if (gfs2_is_jdata(ip)) { 538 gfs2_pin(sdp, bd->bd_bh); 539 tr->tr_num_databuf_new++; 540 sdp->sd_log_num_databuf++; 541 list_add_tail(&le->le_list, &sdp->sd_log_le_databuf); 542 } else { 543 list_add_tail(&le->le_list, &sdp->sd_log_le_ordered); 544 } 545 out: 546 gfs2_log_unlock(sdp); 547 unlock_buffer(bd->bd_bh); 548 } 549 550 static void gfs2_check_magic(struct buffer_head *bh) 551 { 552 void *kaddr; 553 __be32 *ptr; 554 555 clear_buffer_escaped(bh); 556 kaddr = kmap_atomic(bh->b_page, KM_USER0); 557 ptr = kaddr + bh_offset(bh); 558 if (*ptr == cpu_to_be32(GFS2_MAGIC)) 559 set_buffer_escaped(bh); 560 kunmap_atomic(kaddr, KM_USER0); 561 } 562 563 static void gfs2_write_blocks(struct gfs2_sbd *sdp, struct buffer_head *bh, 564 struct list_head *list, struct list_head *done, 565 unsigned int n) 566 { 567 struct buffer_head *bh1; 568 struct gfs2_log_descriptor *ld; 569 struct gfs2_bufdata *bd; 570 __be64 *ptr; 571 572 if (!bh) 573 return; 574 575 ld = bh_log_desc(bh); 576 ld->ld_length = cpu_to_be32(n + 1); 577 ld->ld_data1 = cpu_to_be32(n); 578 579 ptr = bh_log_ptr(bh); 580 581 get_bh(bh); 582 submit_bh(WRITE_SYNC, bh); 583 gfs2_log_lock(sdp); 584 while(!list_empty(list)) { 585 bd = list_entry(list->next, struct gfs2_bufdata, bd_le.le_list); 586 list_move_tail(&bd->bd_le.le_list, done); 587 get_bh(bd->bd_bh); 588 while (be64_to_cpu(*ptr) != bd->bd_bh->b_blocknr) { 589 gfs2_log_incr_head(sdp); 590 ptr += 2; 591 } 592 gfs2_log_unlock(sdp); 593 lock_buffer(bd->bd_bh); 594 if (buffer_escaped(bd->bd_bh)) { 595 void *kaddr; 596 bh1 = gfs2_log_get_buf(sdp); 597 kaddr = kmap_atomic(bd->bd_bh->b_page, KM_USER0); 598 memcpy(bh1->b_data, kaddr + bh_offset(bd->bd_bh), 599 bh1->b_size); 600 kunmap_atomic(kaddr, KM_USER0); 601 *(__be32 *)bh1->b_data = 0; 602 clear_buffer_escaped(bd->bd_bh); 603 unlock_buffer(bd->bd_bh); 604 brelse(bd->bd_bh); 605 } else { 606 bh1 = gfs2_log_fake_buf(sdp, bd->bd_bh); 607 } 608 submit_bh(WRITE_SYNC, bh1); 609 gfs2_log_lock(sdp); 610 ptr += 2; 611 } 612 gfs2_log_unlock(sdp); 613 brelse(bh); 614 } 615 616 /** 617 * databuf_lo_before_commit - Scan the data buffers, writing as we go 618 * 619 */ 620 621 static void databuf_lo_before_commit(struct gfs2_sbd *sdp) 622 { 623 struct gfs2_bufdata *bd = NULL; 624 struct buffer_head *bh = NULL; 625 unsigned int n = 0; 626 __be64 *ptr = NULL, *end = NULL; 627 LIST_HEAD(processed); 628 LIST_HEAD(in_progress); 629 630 gfs2_log_lock(sdp); 631 while (!list_empty(&sdp->sd_log_le_databuf)) { 632 if (ptr == end) { 633 gfs2_log_unlock(sdp); 634 gfs2_write_blocks(sdp, bh, &in_progress, &processed, n); 635 n = 0; 636 bh = gfs2_get_log_desc(sdp, GFS2_LOG_DESC_JDATA); 637 ptr = bh_log_ptr(bh); 638 end = bh_ptr_end(bh) - 1; 639 gfs2_log_lock(sdp); 640 continue; 641 } 642 bd = list_entry(sdp->sd_log_le_databuf.next, struct gfs2_bufdata, bd_le.le_list); 643 list_move_tail(&bd->bd_le.le_list, &in_progress); 644 gfs2_check_magic(bd->bd_bh); 645 *ptr++ = cpu_to_be64(bd->bd_bh->b_blocknr); 646 *ptr++ = cpu_to_be64(buffer_escaped(bh) ? 1 : 0); 647 n++; 648 } 649 gfs2_log_unlock(sdp); 650 gfs2_write_blocks(sdp, bh, &in_progress, &processed, n); 651 gfs2_log_lock(sdp); 652 list_splice(&processed, &sdp->sd_log_le_databuf); 653 gfs2_log_unlock(sdp); 654 } 655 656 static int databuf_lo_scan_elements(struct gfs2_jdesc *jd, unsigned int start, 657 struct gfs2_log_descriptor *ld, 658 __be64 *ptr, int pass) 659 { 660 struct gfs2_inode *ip = GFS2_I(jd->jd_inode); 661 struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode); 662 struct gfs2_glock *gl = ip->i_gl; 663 unsigned int blks = be32_to_cpu(ld->ld_data1); 664 struct buffer_head *bh_log, *bh_ip; 665 u64 blkno; 666 u64 esc; 667 int error = 0; 668 669 if (pass != 1 || be32_to_cpu(ld->ld_type) != GFS2_LOG_DESC_JDATA) 670 return 0; 671 672 gfs2_replay_incr_blk(sdp, &start); 673 for (; blks; gfs2_replay_incr_blk(sdp, &start), blks--) { 674 blkno = be64_to_cpu(*ptr++); 675 esc = be64_to_cpu(*ptr++); 676 677 sdp->sd_found_blocks++; 678 679 if (gfs2_revoke_check(sdp, blkno, start)) 680 continue; 681 682 error = gfs2_replay_read_block(jd, start, &bh_log); 683 if (error) 684 return error; 685 686 bh_ip = gfs2_meta_new(gl, blkno); 687 memcpy(bh_ip->b_data, bh_log->b_data, bh_log->b_size); 688 689 /* Unescape */ 690 if (esc) { 691 __be32 *eptr = (__be32 *)bh_ip->b_data; 692 *eptr = cpu_to_be32(GFS2_MAGIC); 693 } 694 mark_buffer_dirty(bh_ip); 695 696 brelse(bh_log); 697 brelse(bh_ip); 698 699 sdp->sd_replayed_blocks++; 700 } 701 702 return error; 703 } 704 705 /* FIXME: sort out accounting for log blocks etc. */ 706 707 static void databuf_lo_after_scan(struct gfs2_jdesc *jd, int error, int pass) 708 { 709 struct gfs2_inode *ip = GFS2_I(jd->jd_inode); 710 struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode); 711 712 if (error) { 713 gfs2_meta_sync(ip->i_gl); 714 return; 715 } 716 if (pass != 1) 717 return; 718 719 /* data sync? */ 720 gfs2_meta_sync(ip->i_gl); 721 722 fs_info(sdp, "jid=%u: Replayed %u of %u data blocks\n", 723 jd->jd_jid, sdp->sd_replayed_blocks, sdp->sd_found_blocks); 724 } 725 726 static void databuf_lo_after_commit(struct gfs2_sbd *sdp, struct gfs2_ail *ai) 727 { 728 struct list_head *head = &sdp->sd_log_le_databuf; 729 struct gfs2_bufdata *bd; 730 731 while (!list_empty(head)) { 732 bd = list_entry(head->next, struct gfs2_bufdata, bd_le.le_list); 733 list_del_init(&bd->bd_le.le_list); 734 sdp->sd_log_num_databuf--; 735 gfs2_unpin(sdp, bd->bd_bh, ai); 736 } 737 gfs2_assert_warn(sdp, !sdp->sd_log_num_databuf); 738 } 739 740 741 const struct gfs2_log_operations gfs2_buf_lops = { 742 .lo_add = buf_lo_add, 743 .lo_before_commit = buf_lo_before_commit, 744 .lo_after_commit = buf_lo_after_commit, 745 .lo_before_scan = buf_lo_before_scan, 746 .lo_scan_elements = buf_lo_scan_elements, 747 .lo_after_scan = buf_lo_after_scan, 748 .lo_name = "buf", 749 }; 750 751 const struct gfs2_log_operations gfs2_revoke_lops = { 752 .lo_add = revoke_lo_add, 753 .lo_before_commit = revoke_lo_before_commit, 754 .lo_after_commit = revoke_lo_after_commit, 755 .lo_before_scan = revoke_lo_before_scan, 756 .lo_scan_elements = revoke_lo_scan_elements, 757 .lo_after_scan = revoke_lo_after_scan, 758 .lo_name = "revoke", 759 }; 760 761 const struct gfs2_log_operations gfs2_rg_lops = { 762 .lo_name = "rg", 763 }; 764 765 const struct gfs2_log_operations gfs2_databuf_lops = { 766 .lo_add = databuf_lo_add, 767 .lo_before_commit = databuf_lo_before_commit, 768 .lo_after_commit = databuf_lo_after_commit, 769 .lo_scan_elements = databuf_lo_scan_elements, 770 .lo_after_scan = databuf_lo_after_scan, 771 .lo_name = "databuf", 772 }; 773 774 const struct gfs2_log_operations *gfs2_log_ops[] = { 775 &gfs2_databuf_lops, 776 &gfs2_buf_lops, 777 &gfs2_rg_lops, 778 &gfs2_revoke_lops, 779 NULL, 780 }; 781 782