1 /* 2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved. 3 * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved. 4 * 5 * This copyrighted material is made available to anyone wishing to use, 6 * modify, copy, or redistribute it subject to the terms and conditions 7 * of the GNU General Public License version 2. 8 */ 9 10 #include <linux/sched.h> 11 #include <linux/slab.h> 12 #include <linux/spinlock.h> 13 #include <linux/completion.h> 14 #include <linux/buffer_head.h> 15 #include <linux/gfs2_ondisk.h> 16 #include <linux/bio.h> 17 #include <linux/fs.h> 18 19 #include "gfs2.h" 20 #include "incore.h" 21 #include "inode.h" 22 #include "glock.h" 23 #include "log.h" 24 #include "lops.h" 25 #include "meta_io.h" 26 #include "recovery.h" 27 #include "rgrp.h" 28 #include "trans.h" 29 #include "util.h" 30 #include "trace_gfs2.h" 31 32 /** 33 * gfs2_pin - Pin a buffer in memory 34 * @sdp: The superblock 35 * @bh: The buffer to be pinned 36 * 37 * The log lock must be held when calling this function 38 */ 39 static void gfs2_pin(struct gfs2_sbd *sdp, struct buffer_head *bh) 40 { 41 struct gfs2_bufdata *bd; 42 43 BUG_ON(!current->journal_info); 44 45 clear_buffer_dirty(bh); 46 if (test_set_buffer_pinned(bh)) 47 gfs2_assert_withdraw(sdp, 0); 48 if (!buffer_uptodate(bh)) 49 gfs2_io_error_bh(sdp, bh); 50 bd = bh->b_private; 51 /* If this buffer is in the AIL and it has already been written 52 * to in-place disk block, remove it from the AIL. 53 */ 54 spin_lock(&sdp->sd_ail_lock); 55 if (bd->bd_ail) 56 list_move(&bd->bd_ail_st_list, &bd->bd_ail->ai_ail2_list); 57 spin_unlock(&sdp->sd_ail_lock); 58 get_bh(bh); 59 atomic_inc(&sdp->sd_log_pinned); 60 trace_gfs2_pin(bd, 1); 61 } 62 63 /** 64 * gfs2_unpin - Unpin a buffer 65 * @sdp: the filesystem the buffer belongs to 66 * @bh: The buffer to unpin 67 * @ai: 68 * @flags: The inode dirty flags 69 * 70 */ 71 72 static void gfs2_unpin(struct gfs2_sbd *sdp, struct buffer_head *bh, 73 struct gfs2_ail *ai) 74 { 75 struct gfs2_bufdata *bd = bh->b_private; 76 77 BUG_ON(!buffer_uptodate(bh)); 78 BUG_ON(!buffer_pinned(bh)); 79 80 lock_buffer(bh); 81 mark_buffer_dirty(bh); 82 clear_buffer_pinned(bh); 83 84 spin_lock(&sdp->sd_ail_lock); 85 if (bd->bd_ail) { 86 list_del(&bd->bd_ail_st_list); 87 brelse(bh); 88 } else { 89 struct gfs2_glock *gl = bd->bd_gl; 90 list_add(&bd->bd_ail_gl_list, &gl->gl_ail_list); 91 atomic_inc(&gl->gl_ail_count); 92 } 93 bd->bd_ail = ai; 94 list_add(&bd->bd_ail_st_list, &ai->ai_ail1_list); 95 spin_unlock(&sdp->sd_ail_lock); 96 97 clear_bit(GLF_LFLUSH, &bd->bd_gl->gl_flags); 98 trace_gfs2_pin(bd, 0); 99 unlock_buffer(bh); 100 atomic_dec(&sdp->sd_log_pinned); 101 } 102 103 104 static inline struct gfs2_log_descriptor *bh_log_desc(struct buffer_head *bh) 105 { 106 return (struct gfs2_log_descriptor *)bh->b_data; 107 } 108 109 static inline __be64 *bh_log_ptr(struct buffer_head *bh) 110 { 111 struct gfs2_log_descriptor *ld = bh_log_desc(bh); 112 return (__force __be64 *)(ld + 1); 113 } 114 115 static inline __be64 *bh_ptr_end(struct buffer_head *bh) 116 { 117 return (__force __be64 *)(bh->b_data + bh->b_size); 118 } 119 120 121 static struct buffer_head *gfs2_get_log_desc(struct gfs2_sbd *sdp, u32 ld_type) 122 { 123 struct buffer_head *bh = gfs2_log_get_buf(sdp); 124 struct gfs2_log_descriptor *ld = bh_log_desc(bh); 125 ld->ld_header.mh_magic = cpu_to_be32(GFS2_MAGIC); 126 ld->ld_header.mh_type = cpu_to_be32(GFS2_METATYPE_LD); 127 ld->ld_header.mh_format = cpu_to_be32(GFS2_FORMAT_LD); 128 ld->ld_type = cpu_to_be32(ld_type); 129 ld->ld_length = 0; 130 ld->ld_data1 = 0; 131 ld->ld_data2 = 0; 132 memset(ld->ld_reserved, 0, sizeof(ld->ld_reserved)); 133 return bh; 134 } 135 136 static void buf_lo_add(struct gfs2_sbd *sdp, struct gfs2_log_element *le) 137 { 138 struct gfs2_bufdata *bd = container_of(le, struct gfs2_bufdata, bd_le); 139 struct gfs2_meta_header *mh; 140 struct gfs2_trans *tr; 141 142 lock_buffer(bd->bd_bh); 143 gfs2_log_lock(sdp); 144 if (!list_empty(&bd->bd_list_tr)) 145 goto out; 146 tr = current->journal_info; 147 tr->tr_touched = 1; 148 tr->tr_num_buf++; 149 list_add(&bd->bd_list_tr, &tr->tr_list_buf); 150 if (!list_empty(&le->le_list)) 151 goto out; 152 set_bit(GLF_LFLUSH, &bd->bd_gl->gl_flags); 153 set_bit(GLF_DIRTY, &bd->bd_gl->gl_flags); 154 gfs2_meta_check(sdp, bd->bd_bh); 155 gfs2_pin(sdp, bd->bd_bh); 156 mh = (struct gfs2_meta_header *)bd->bd_bh->b_data; 157 mh->__pad0 = cpu_to_be64(0); 158 mh->mh_jid = cpu_to_be32(sdp->sd_jdesc->jd_jid); 159 sdp->sd_log_num_buf++; 160 list_add(&le->le_list, &sdp->sd_log_le_buf); 161 tr->tr_num_buf_new++; 162 out: 163 gfs2_log_unlock(sdp); 164 unlock_buffer(bd->bd_bh); 165 } 166 167 static void buf_lo_before_commit(struct gfs2_sbd *sdp) 168 { 169 struct buffer_head *bh; 170 struct gfs2_log_descriptor *ld; 171 struct gfs2_bufdata *bd1 = NULL, *bd2; 172 unsigned int total; 173 unsigned int limit; 174 unsigned int num; 175 unsigned n; 176 __be64 *ptr; 177 178 limit = buf_limit(sdp); 179 /* for 4k blocks, limit = 503 */ 180 181 gfs2_log_lock(sdp); 182 total = sdp->sd_log_num_buf; 183 bd1 = bd2 = list_prepare_entry(bd1, &sdp->sd_log_le_buf, bd_le.le_list); 184 while(total) { 185 num = total; 186 if (total > limit) 187 num = limit; 188 gfs2_log_unlock(sdp); 189 bh = gfs2_get_log_desc(sdp, GFS2_LOG_DESC_METADATA); 190 gfs2_log_lock(sdp); 191 ld = bh_log_desc(bh); 192 ptr = bh_log_ptr(bh); 193 ld->ld_length = cpu_to_be32(num + 1); 194 ld->ld_data1 = cpu_to_be32(num); 195 196 n = 0; 197 list_for_each_entry_continue(bd1, &sdp->sd_log_le_buf, 198 bd_le.le_list) { 199 *ptr++ = cpu_to_be64(bd1->bd_bh->b_blocknr); 200 if (++n >= num) 201 break; 202 } 203 204 gfs2_log_unlock(sdp); 205 submit_bh(WRITE_SYNC, bh); 206 gfs2_log_lock(sdp); 207 208 n = 0; 209 list_for_each_entry_continue(bd2, &sdp->sd_log_le_buf, 210 bd_le.le_list) { 211 get_bh(bd2->bd_bh); 212 gfs2_log_unlock(sdp); 213 lock_buffer(bd2->bd_bh); 214 bh = gfs2_log_fake_buf(sdp, bd2->bd_bh); 215 submit_bh(WRITE_SYNC, bh); 216 gfs2_log_lock(sdp); 217 if (++n >= num) 218 break; 219 } 220 221 BUG_ON(total < num); 222 total -= num; 223 } 224 gfs2_log_unlock(sdp); 225 } 226 227 static void buf_lo_after_commit(struct gfs2_sbd *sdp, struct gfs2_ail *ai) 228 { 229 struct list_head *head = &sdp->sd_log_le_buf; 230 struct gfs2_bufdata *bd; 231 232 while (!list_empty(head)) { 233 bd = list_entry(head->next, struct gfs2_bufdata, bd_le.le_list); 234 list_del_init(&bd->bd_le.le_list); 235 sdp->sd_log_num_buf--; 236 237 gfs2_unpin(sdp, bd->bd_bh, ai); 238 } 239 gfs2_assert_warn(sdp, !sdp->sd_log_num_buf); 240 } 241 242 static void buf_lo_before_scan(struct gfs2_jdesc *jd, 243 struct gfs2_log_header_host *head, int pass) 244 { 245 struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode); 246 247 if (pass != 0) 248 return; 249 250 sdp->sd_found_blocks = 0; 251 sdp->sd_replayed_blocks = 0; 252 } 253 254 static int buf_lo_scan_elements(struct gfs2_jdesc *jd, unsigned int start, 255 struct gfs2_log_descriptor *ld, __be64 *ptr, 256 int pass) 257 { 258 struct gfs2_inode *ip = GFS2_I(jd->jd_inode); 259 struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode); 260 struct gfs2_glock *gl = ip->i_gl; 261 unsigned int blks = be32_to_cpu(ld->ld_data1); 262 struct buffer_head *bh_log, *bh_ip; 263 u64 blkno; 264 int error = 0; 265 266 if (pass != 1 || be32_to_cpu(ld->ld_type) != GFS2_LOG_DESC_METADATA) 267 return 0; 268 269 gfs2_replay_incr_blk(sdp, &start); 270 271 for (; blks; gfs2_replay_incr_blk(sdp, &start), blks--) { 272 blkno = be64_to_cpu(*ptr++); 273 274 sdp->sd_found_blocks++; 275 276 if (gfs2_revoke_check(sdp, blkno, start)) 277 continue; 278 279 error = gfs2_replay_read_block(jd, start, &bh_log); 280 if (error) 281 return error; 282 283 bh_ip = gfs2_meta_new(gl, blkno); 284 memcpy(bh_ip->b_data, bh_log->b_data, bh_log->b_size); 285 286 if (gfs2_meta_check(sdp, bh_ip)) 287 error = -EIO; 288 else 289 mark_buffer_dirty(bh_ip); 290 291 brelse(bh_log); 292 brelse(bh_ip); 293 294 if (error) 295 break; 296 297 sdp->sd_replayed_blocks++; 298 } 299 300 return error; 301 } 302 303 static void buf_lo_after_scan(struct gfs2_jdesc *jd, int error, int pass) 304 { 305 struct gfs2_inode *ip = GFS2_I(jd->jd_inode); 306 struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode); 307 308 if (error) { 309 gfs2_meta_sync(ip->i_gl); 310 return; 311 } 312 if (pass != 1) 313 return; 314 315 gfs2_meta_sync(ip->i_gl); 316 317 fs_info(sdp, "jid=%u: Replayed %u of %u blocks\n", 318 jd->jd_jid, sdp->sd_replayed_blocks, sdp->sd_found_blocks); 319 } 320 321 static void revoke_lo_add(struct gfs2_sbd *sdp, struct gfs2_log_element *le) 322 { 323 struct gfs2_bufdata *bd = container_of(le, struct gfs2_bufdata, bd_le); 324 struct gfs2_glock *gl = bd->bd_gl; 325 struct gfs2_trans *tr; 326 327 tr = current->journal_info; 328 tr->tr_touched = 1; 329 tr->tr_num_revoke++; 330 sdp->sd_log_num_revoke++; 331 atomic_inc(&gl->gl_revokes); 332 set_bit(GLF_LFLUSH, &gl->gl_flags); 333 list_add(&le->le_list, &sdp->sd_log_le_revoke); 334 } 335 336 static void revoke_lo_before_commit(struct gfs2_sbd *sdp) 337 { 338 struct gfs2_log_descriptor *ld; 339 struct gfs2_meta_header *mh; 340 struct buffer_head *bh; 341 unsigned int offset; 342 struct list_head *head = &sdp->sd_log_le_revoke; 343 struct gfs2_bufdata *bd; 344 345 if (!sdp->sd_log_num_revoke) 346 return; 347 348 bh = gfs2_get_log_desc(sdp, GFS2_LOG_DESC_REVOKE); 349 ld = bh_log_desc(bh); 350 ld->ld_length = cpu_to_be32(gfs2_struct2blk(sdp, sdp->sd_log_num_revoke, 351 sizeof(u64))); 352 ld->ld_data1 = cpu_to_be32(sdp->sd_log_num_revoke); 353 offset = sizeof(struct gfs2_log_descriptor); 354 355 list_for_each_entry(bd, head, bd_le.le_list) { 356 sdp->sd_log_num_revoke--; 357 358 if (offset + sizeof(u64) > sdp->sd_sb.sb_bsize) { 359 submit_bh(WRITE_SYNC, bh); 360 361 bh = gfs2_log_get_buf(sdp); 362 mh = (struct gfs2_meta_header *)bh->b_data; 363 mh->mh_magic = cpu_to_be32(GFS2_MAGIC); 364 mh->mh_type = cpu_to_be32(GFS2_METATYPE_LB); 365 mh->mh_format = cpu_to_be32(GFS2_FORMAT_LB); 366 offset = sizeof(struct gfs2_meta_header); 367 } 368 369 *(__be64 *)(bh->b_data + offset) = cpu_to_be64(bd->bd_blkno); 370 offset += sizeof(u64); 371 } 372 gfs2_assert_withdraw(sdp, !sdp->sd_log_num_revoke); 373 374 submit_bh(WRITE_SYNC, bh); 375 } 376 377 static void revoke_lo_after_commit(struct gfs2_sbd *sdp, struct gfs2_ail *ai) 378 { 379 struct list_head *head = &sdp->sd_log_le_revoke; 380 struct gfs2_bufdata *bd; 381 struct gfs2_glock *gl; 382 383 while (!list_empty(head)) { 384 bd = list_entry(head->next, struct gfs2_bufdata, bd_le.le_list); 385 list_del_init(&bd->bd_le.le_list); 386 gl = bd->bd_gl; 387 atomic_dec(&gl->gl_revokes); 388 clear_bit(GLF_LFLUSH, &gl->gl_flags); 389 kmem_cache_free(gfs2_bufdata_cachep, bd); 390 } 391 } 392 393 static void revoke_lo_before_scan(struct gfs2_jdesc *jd, 394 struct gfs2_log_header_host *head, int pass) 395 { 396 struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode); 397 398 if (pass != 0) 399 return; 400 401 sdp->sd_found_revokes = 0; 402 sdp->sd_replay_tail = head->lh_tail; 403 } 404 405 static int revoke_lo_scan_elements(struct gfs2_jdesc *jd, unsigned int start, 406 struct gfs2_log_descriptor *ld, __be64 *ptr, 407 int pass) 408 { 409 struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode); 410 unsigned int blks = be32_to_cpu(ld->ld_length); 411 unsigned int revokes = be32_to_cpu(ld->ld_data1); 412 struct buffer_head *bh; 413 unsigned int offset; 414 u64 blkno; 415 int first = 1; 416 int error; 417 418 if (pass != 0 || be32_to_cpu(ld->ld_type) != GFS2_LOG_DESC_REVOKE) 419 return 0; 420 421 offset = sizeof(struct gfs2_log_descriptor); 422 423 for (; blks; gfs2_replay_incr_blk(sdp, &start), blks--) { 424 error = gfs2_replay_read_block(jd, start, &bh); 425 if (error) 426 return error; 427 428 if (!first) 429 gfs2_metatype_check(sdp, bh, GFS2_METATYPE_LB); 430 431 while (offset + sizeof(u64) <= sdp->sd_sb.sb_bsize) { 432 blkno = be64_to_cpu(*(__be64 *)(bh->b_data + offset)); 433 434 error = gfs2_revoke_add(sdp, blkno, start); 435 if (error < 0) { 436 brelse(bh); 437 return error; 438 } 439 else if (error) 440 sdp->sd_found_revokes++; 441 442 if (!--revokes) 443 break; 444 offset += sizeof(u64); 445 } 446 447 brelse(bh); 448 offset = sizeof(struct gfs2_meta_header); 449 first = 0; 450 } 451 452 return 0; 453 } 454 455 static void revoke_lo_after_scan(struct gfs2_jdesc *jd, int error, int pass) 456 { 457 struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode); 458 459 if (error) { 460 gfs2_revoke_clean(sdp); 461 return; 462 } 463 if (pass != 1) 464 return; 465 466 fs_info(sdp, "jid=%u: Found %u revoke tags\n", 467 jd->jd_jid, sdp->sd_found_revokes); 468 469 gfs2_revoke_clean(sdp); 470 } 471 472 static void rg_lo_add(struct gfs2_sbd *sdp, struct gfs2_log_element *le) 473 { 474 struct gfs2_rgrpd *rgd; 475 struct gfs2_trans *tr = current->journal_info; 476 477 tr->tr_touched = 1; 478 479 rgd = container_of(le, struct gfs2_rgrpd, rd_le); 480 481 gfs2_log_lock(sdp); 482 if (!list_empty(&le->le_list)){ 483 gfs2_log_unlock(sdp); 484 return; 485 } 486 gfs2_rgrp_bh_hold(rgd); 487 sdp->sd_log_num_rg++; 488 list_add(&le->le_list, &sdp->sd_log_le_rg); 489 gfs2_log_unlock(sdp); 490 } 491 492 static void rg_lo_after_commit(struct gfs2_sbd *sdp, struct gfs2_ail *ai) 493 { 494 struct list_head *head = &sdp->sd_log_le_rg; 495 struct gfs2_rgrpd *rgd; 496 497 while (!list_empty(head)) { 498 rgd = list_entry(head->next, struct gfs2_rgrpd, rd_le.le_list); 499 list_del_init(&rgd->rd_le.le_list); 500 sdp->sd_log_num_rg--; 501 502 gfs2_rgrp_repolish_clones(rgd); 503 gfs2_rgrp_bh_put(rgd); 504 } 505 gfs2_assert_warn(sdp, !sdp->sd_log_num_rg); 506 } 507 508 /** 509 * databuf_lo_add - Add a databuf to the transaction. 510 * 511 * This is used in two distinct cases: 512 * i) In ordered write mode 513 * We put the data buffer on a list so that we can ensure that its 514 * synced to disk at the right time 515 * ii) In journaled data mode 516 * We need to journal the data block in the same way as metadata in 517 * the functions above. The difference is that here we have a tag 518 * which is two __be64's being the block number (as per meta data) 519 * and a flag which says whether the data block needs escaping or 520 * not. This means we need a new log entry for each 251 or so data 521 * blocks, which isn't an enormous overhead but twice as much as 522 * for normal metadata blocks. 523 */ 524 static void databuf_lo_add(struct gfs2_sbd *sdp, struct gfs2_log_element *le) 525 { 526 struct gfs2_bufdata *bd = container_of(le, struct gfs2_bufdata, bd_le); 527 struct gfs2_trans *tr = current->journal_info; 528 struct address_space *mapping = bd->bd_bh->b_page->mapping; 529 struct gfs2_inode *ip = GFS2_I(mapping->host); 530 531 lock_buffer(bd->bd_bh); 532 gfs2_log_lock(sdp); 533 if (tr) { 534 if (!list_empty(&bd->bd_list_tr)) 535 goto out; 536 tr->tr_touched = 1; 537 if (gfs2_is_jdata(ip)) { 538 tr->tr_num_buf++; 539 list_add(&bd->bd_list_tr, &tr->tr_list_buf); 540 } 541 } 542 if (!list_empty(&le->le_list)) 543 goto out; 544 545 set_bit(GLF_LFLUSH, &bd->bd_gl->gl_flags); 546 set_bit(GLF_DIRTY, &bd->bd_gl->gl_flags); 547 if (gfs2_is_jdata(ip)) { 548 gfs2_pin(sdp, bd->bd_bh); 549 tr->tr_num_databuf_new++; 550 sdp->sd_log_num_databuf++; 551 list_add_tail(&le->le_list, &sdp->sd_log_le_databuf); 552 } else { 553 list_add_tail(&le->le_list, &sdp->sd_log_le_ordered); 554 } 555 out: 556 gfs2_log_unlock(sdp); 557 unlock_buffer(bd->bd_bh); 558 } 559 560 static void gfs2_check_magic(struct buffer_head *bh) 561 { 562 void *kaddr; 563 __be32 *ptr; 564 565 clear_buffer_escaped(bh); 566 kaddr = kmap_atomic(bh->b_page, KM_USER0); 567 ptr = kaddr + bh_offset(bh); 568 if (*ptr == cpu_to_be32(GFS2_MAGIC)) 569 set_buffer_escaped(bh); 570 kunmap_atomic(kaddr, KM_USER0); 571 } 572 573 static void gfs2_write_blocks(struct gfs2_sbd *sdp, struct buffer_head *bh, 574 struct list_head *list, struct list_head *done, 575 unsigned int n) 576 { 577 struct buffer_head *bh1; 578 struct gfs2_log_descriptor *ld; 579 struct gfs2_bufdata *bd; 580 __be64 *ptr; 581 582 if (!bh) 583 return; 584 585 ld = bh_log_desc(bh); 586 ld->ld_length = cpu_to_be32(n + 1); 587 ld->ld_data1 = cpu_to_be32(n); 588 589 ptr = bh_log_ptr(bh); 590 591 get_bh(bh); 592 submit_bh(WRITE_SYNC, bh); 593 gfs2_log_lock(sdp); 594 while(!list_empty(list)) { 595 bd = list_entry(list->next, struct gfs2_bufdata, bd_le.le_list); 596 list_move_tail(&bd->bd_le.le_list, done); 597 get_bh(bd->bd_bh); 598 while (be64_to_cpu(*ptr) != bd->bd_bh->b_blocknr) { 599 gfs2_log_incr_head(sdp); 600 ptr += 2; 601 } 602 gfs2_log_unlock(sdp); 603 lock_buffer(bd->bd_bh); 604 if (buffer_escaped(bd->bd_bh)) { 605 void *kaddr; 606 bh1 = gfs2_log_get_buf(sdp); 607 kaddr = kmap_atomic(bd->bd_bh->b_page, KM_USER0); 608 memcpy(bh1->b_data, kaddr + bh_offset(bd->bd_bh), 609 bh1->b_size); 610 kunmap_atomic(kaddr, KM_USER0); 611 *(__be32 *)bh1->b_data = 0; 612 clear_buffer_escaped(bd->bd_bh); 613 unlock_buffer(bd->bd_bh); 614 brelse(bd->bd_bh); 615 } else { 616 bh1 = gfs2_log_fake_buf(sdp, bd->bd_bh); 617 } 618 submit_bh(WRITE_SYNC, bh1); 619 gfs2_log_lock(sdp); 620 ptr += 2; 621 } 622 gfs2_log_unlock(sdp); 623 brelse(bh); 624 } 625 626 /** 627 * databuf_lo_before_commit - Scan the data buffers, writing as we go 628 * 629 */ 630 631 static void databuf_lo_before_commit(struct gfs2_sbd *sdp) 632 { 633 struct gfs2_bufdata *bd = NULL; 634 struct buffer_head *bh = NULL; 635 unsigned int n = 0; 636 __be64 *ptr = NULL, *end = NULL; 637 LIST_HEAD(processed); 638 LIST_HEAD(in_progress); 639 640 gfs2_log_lock(sdp); 641 while (!list_empty(&sdp->sd_log_le_databuf)) { 642 if (ptr == end) { 643 gfs2_log_unlock(sdp); 644 gfs2_write_blocks(sdp, bh, &in_progress, &processed, n); 645 n = 0; 646 bh = gfs2_get_log_desc(sdp, GFS2_LOG_DESC_JDATA); 647 ptr = bh_log_ptr(bh); 648 end = bh_ptr_end(bh) - 1; 649 gfs2_log_lock(sdp); 650 continue; 651 } 652 bd = list_entry(sdp->sd_log_le_databuf.next, struct gfs2_bufdata, bd_le.le_list); 653 list_move_tail(&bd->bd_le.le_list, &in_progress); 654 gfs2_check_magic(bd->bd_bh); 655 *ptr++ = cpu_to_be64(bd->bd_bh->b_blocknr); 656 *ptr++ = cpu_to_be64(buffer_escaped(bh) ? 1 : 0); 657 n++; 658 } 659 gfs2_log_unlock(sdp); 660 gfs2_write_blocks(sdp, bh, &in_progress, &processed, n); 661 gfs2_log_lock(sdp); 662 list_splice(&processed, &sdp->sd_log_le_databuf); 663 gfs2_log_unlock(sdp); 664 } 665 666 static int databuf_lo_scan_elements(struct gfs2_jdesc *jd, unsigned int start, 667 struct gfs2_log_descriptor *ld, 668 __be64 *ptr, int pass) 669 { 670 struct gfs2_inode *ip = GFS2_I(jd->jd_inode); 671 struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode); 672 struct gfs2_glock *gl = ip->i_gl; 673 unsigned int blks = be32_to_cpu(ld->ld_data1); 674 struct buffer_head *bh_log, *bh_ip; 675 u64 blkno; 676 u64 esc; 677 int error = 0; 678 679 if (pass != 1 || be32_to_cpu(ld->ld_type) != GFS2_LOG_DESC_JDATA) 680 return 0; 681 682 gfs2_replay_incr_blk(sdp, &start); 683 for (; blks; gfs2_replay_incr_blk(sdp, &start), blks--) { 684 blkno = be64_to_cpu(*ptr++); 685 esc = be64_to_cpu(*ptr++); 686 687 sdp->sd_found_blocks++; 688 689 if (gfs2_revoke_check(sdp, blkno, start)) 690 continue; 691 692 error = gfs2_replay_read_block(jd, start, &bh_log); 693 if (error) 694 return error; 695 696 bh_ip = gfs2_meta_new(gl, blkno); 697 memcpy(bh_ip->b_data, bh_log->b_data, bh_log->b_size); 698 699 /* Unescape */ 700 if (esc) { 701 __be32 *eptr = (__be32 *)bh_ip->b_data; 702 *eptr = cpu_to_be32(GFS2_MAGIC); 703 } 704 mark_buffer_dirty(bh_ip); 705 706 brelse(bh_log); 707 brelse(bh_ip); 708 if (error) 709 break; 710 711 sdp->sd_replayed_blocks++; 712 } 713 714 return error; 715 } 716 717 /* FIXME: sort out accounting for log blocks etc. */ 718 719 static void databuf_lo_after_scan(struct gfs2_jdesc *jd, int error, int pass) 720 { 721 struct gfs2_inode *ip = GFS2_I(jd->jd_inode); 722 struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode); 723 724 if (error) { 725 gfs2_meta_sync(ip->i_gl); 726 return; 727 } 728 if (pass != 1) 729 return; 730 731 /* data sync? */ 732 gfs2_meta_sync(ip->i_gl); 733 734 fs_info(sdp, "jid=%u: Replayed %u of %u data blocks\n", 735 jd->jd_jid, sdp->sd_replayed_blocks, sdp->sd_found_blocks); 736 } 737 738 static void databuf_lo_after_commit(struct gfs2_sbd *sdp, struct gfs2_ail *ai) 739 { 740 struct list_head *head = &sdp->sd_log_le_databuf; 741 struct gfs2_bufdata *bd; 742 743 while (!list_empty(head)) { 744 bd = list_entry(head->next, struct gfs2_bufdata, bd_le.le_list); 745 list_del_init(&bd->bd_le.le_list); 746 sdp->sd_log_num_databuf--; 747 gfs2_unpin(sdp, bd->bd_bh, ai); 748 } 749 gfs2_assert_warn(sdp, !sdp->sd_log_num_databuf); 750 } 751 752 753 const struct gfs2_log_operations gfs2_buf_lops = { 754 .lo_add = buf_lo_add, 755 .lo_before_commit = buf_lo_before_commit, 756 .lo_after_commit = buf_lo_after_commit, 757 .lo_before_scan = buf_lo_before_scan, 758 .lo_scan_elements = buf_lo_scan_elements, 759 .lo_after_scan = buf_lo_after_scan, 760 .lo_name = "buf", 761 }; 762 763 const struct gfs2_log_operations gfs2_revoke_lops = { 764 .lo_add = revoke_lo_add, 765 .lo_before_commit = revoke_lo_before_commit, 766 .lo_after_commit = revoke_lo_after_commit, 767 .lo_before_scan = revoke_lo_before_scan, 768 .lo_scan_elements = revoke_lo_scan_elements, 769 .lo_after_scan = revoke_lo_after_scan, 770 .lo_name = "revoke", 771 }; 772 773 const struct gfs2_log_operations gfs2_rg_lops = { 774 .lo_add = rg_lo_add, 775 .lo_after_commit = rg_lo_after_commit, 776 .lo_name = "rg", 777 }; 778 779 const struct gfs2_log_operations gfs2_databuf_lops = { 780 .lo_add = databuf_lo_add, 781 .lo_before_commit = databuf_lo_before_commit, 782 .lo_after_commit = databuf_lo_after_commit, 783 .lo_scan_elements = databuf_lo_scan_elements, 784 .lo_after_scan = databuf_lo_after_scan, 785 .lo_name = "databuf", 786 }; 787 788 const struct gfs2_log_operations *gfs2_log_ops[] = { 789 &gfs2_databuf_lops, 790 &gfs2_buf_lops, 791 &gfs2_rg_lops, 792 &gfs2_revoke_lops, 793 NULL, 794 }; 795 796