1 /* -*- mode: c; c-basic-offset: 8; -*- 2 * vim: noexpandtab sw=8 ts=8 sts=0: 3 * 4 * journal.c 5 * 6 * Defines functions of journalling api 7 * 8 * Copyright (C) 2003, 2004 Oracle. All rights reserved. 9 * 10 * This program is free software; you can redistribute it and/or 11 * modify it under the terms of the GNU General Public 12 * License as published by the Free Software Foundation; either 13 * version 2 of the License, or (at your option) any later version. 14 * 15 * This program is distributed in the hope that it will be useful, 16 * but WITHOUT ANY WARRANTY; without even the implied warranty of 17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 18 * General Public License for more details. 19 * 20 * You should have received a copy of the GNU General Public 21 * License along with this program; if not, write to the 22 * Free Software Foundation, Inc., 59 Temple Place - Suite 330, 23 * Boston, MA 021110-1307, USA. 24 */ 25 26 #include <linux/fs.h> 27 #include <linux/types.h> 28 #include <linux/slab.h> 29 #include <linux/highmem.h> 30 #include <linux/kthread.h> 31 #include <linux/time.h> 32 #include <linux/random.h> 33 #include <linux/delay.h> 34 35 #include <cluster/masklog.h> 36 37 #include "ocfs2.h" 38 39 #include "alloc.h" 40 #include "blockcheck.h" 41 #include "dir.h" 42 #include "dlmglue.h" 43 #include "extent_map.h" 44 #include "heartbeat.h" 45 #include "inode.h" 46 #include "journal.h" 47 #include "localalloc.h" 48 #include "slot_map.h" 49 #include "super.h" 50 #include "sysfile.h" 51 #include "uptodate.h" 52 #include "quota.h" 53 #include "file.h" 54 #include "namei.h" 55 56 #include "buffer_head_io.h" 57 #include "ocfs2_trace.h" 58 59 DEFINE_SPINLOCK(trans_inc_lock); 60 61 #define ORPHAN_SCAN_SCHEDULE_TIMEOUT 300000 62 63 static int ocfs2_force_read_journal(struct inode *inode); 64 static int ocfs2_recover_node(struct ocfs2_super *osb, 65 int node_num, int slot_num); 66 static int __ocfs2_recovery_thread(void *arg); 67 static int ocfs2_commit_cache(struct ocfs2_super *osb); 68 static int __ocfs2_wait_on_mount(struct ocfs2_super *osb, int quota); 69 static int ocfs2_journal_toggle_dirty(struct ocfs2_super *osb, 70 int dirty, int replayed); 71 static int ocfs2_trylock_journal(struct ocfs2_super *osb, 72 int slot_num); 73 static int ocfs2_recover_orphans(struct ocfs2_super *osb, 74 int slot, 75 enum ocfs2_orphan_reco_type orphan_reco_type); 76 static int ocfs2_commit_thread(void *arg); 77 static void ocfs2_queue_recovery_completion(struct ocfs2_journal *journal, 78 int slot_num, 79 struct ocfs2_dinode *la_dinode, 80 struct ocfs2_dinode *tl_dinode, 81 struct ocfs2_quota_recovery *qrec, 82 enum ocfs2_orphan_reco_type orphan_reco_type); 83 84 static inline int ocfs2_wait_on_mount(struct ocfs2_super *osb) 85 { 86 return __ocfs2_wait_on_mount(osb, 0); 87 } 88 89 static inline int ocfs2_wait_on_quotas(struct ocfs2_super *osb) 90 { 91 return __ocfs2_wait_on_mount(osb, 1); 92 } 93 94 /* 95 * This replay_map is to track online/offline slots, so we could recover 96 * offline slots during recovery and mount 97 */ 98 99 enum ocfs2_replay_state { 100 REPLAY_UNNEEDED = 0, /* Replay is not needed, so ignore this map */ 101 REPLAY_NEEDED, /* Replay slots marked in rm_replay_slots */ 102 REPLAY_DONE /* Replay was already queued */ 103 }; 104 105 struct ocfs2_replay_map { 106 unsigned int rm_slots; 107 enum ocfs2_replay_state rm_state; 108 unsigned char rm_replay_slots[0]; 109 }; 110 111 static void ocfs2_replay_map_set_state(struct ocfs2_super *osb, int state) 112 { 113 if (!osb->replay_map) 114 return; 115 116 /* If we've already queued the replay, we don't have any more to do */ 117 if (osb->replay_map->rm_state == REPLAY_DONE) 118 return; 119 120 osb->replay_map->rm_state = state; 121 } 122 123 int ocfs2_compute_replay_slots(struct ocfs2_super *osb) 124 { 125 struct ocfs2_replay_map *replay_map; 126 int i, node_num; 127 128 /* If replay map is already set, we don't do it again */ 129 if (osb->replay_map) 130 return 0; 131 132 replay_map = kzalloc(sizeof(struct ocfs2_replay_map) + 133 (osb->max_slots * sizeof(char)), GFP_KERNEL); 134 135 if (!replay_map) { 136 mlog_errno(-ENOMEM); 137 return -ENOMEM; 138 } 139 140 spin_lock(&osb->osb_lock); 141 142 replay_map->rm_slots = osb->max_slots; 143 replay_map->rm_state = REPLAY_UNNEEDED; 144 145 /* set rm_replay_slots for offline slot(s) */ 146 for (i = 0; i < replay_map->rm_slots; i++) { 147 if (ocfs2_slot_to_node_num_locked(osb, i, &node_num) == -ENOENT) 148 replay_map->rm_replay_slots[i] = 1; 149 } 150 151 osb->replay_map = replay_map; 152 spin_unlock(&osb->osb_lock); 153 return 0; 154 } 155 156 static void ocfs2_queue_replay_slots(struct ocfs2_super *osb, 157 enum ocfs2_orphan_reco_type orphan_reco_type) 158 { 159 struct ocfs2_replay_map *replay_map = osb->replay_map; 160 int i; 161 162 if (!replay_map) 163 return; 164 165 if (replay_map->rm_state != REPLAY_NEEDED) 166 return; 167 168 for (i = 0; i < replay_map->rm_slots; i++) 169 if (replay_map->rm_replay_slots[i]) 170 ocfs2_queue_recovery_completion(osb->journal, i, NULL, 171 NULL, NULL, 172 orphan_reco_type); 173 replay_map->rm_state = REPLAY_DONE; 174 } 175 176 static void ocfs2_free_replay_slots(struct ocfs2_super *osb) 177 { 178 struct ocfs2_replay_map *replay_map = osb->replay_map; 179 180 if (!osb->replay_map) 181 return; 182 183 kfree(replay_map); 184 osb->replay_map = NULL; 185 } 186 187 int ocfs2_recovery_init(struct ocfs2_super *osb) 188 { 189 struct ocfs2_recovery_map *rm; 190 191 mutex_init(&osb->recovery_lock); 192 osb->disable_recovery = 0; 193 osb->recovery_thread_task = NULL; 194 init_waitqueue_head(&osb->recovery_event); 195 196 rm = kzalloc(sizeof(struct ocfs2_recovery_map) + 197 osb->max_slots * sizeof(unsigned int), 198 GFP_KERNEL); 199 if (!rm) { 200 mlog_errno(-ENOMEM); 201 return -ENOMEM; 202 } 203 204 rm->rm_entries = (unsigned int *)((char *)rm + 205 sizeof(struct ocfs2_recovery_map)); 206 osb->recovery_map = rm; 207 208 return 0; 209 } 210 211 /* we can't grab the goofy sem lock from inside wait_event, so we use 212 * memory barriers to make sure that we'll see the null task before 213 * being woken up */ 214 static int ocfs2_recovery_thread_running(struct ocfs2_super *osb) 215 { 216 mb(); 217 return osb->recovery_thread_task != NULL; 218 } 219 220 void ocfs2_recovery_exit(struct ocfs2_super *osb) 221 { 222 struct ocfs2_recovery_map *rm; 223 224 /* disable any new recovery threads and wait for any currently 225 * running ones to exit. Do this before setting the vol_state. */ 226 mutex_lock(&osb->recovery_lock); 227 osb->disable_recovery = 1; 228 mutex_unlock(&osb->recovery_lock); 229 wait_event(osb->recovery_event, !ocfs2_recovery_thread_running(osb)); 230 231 /* At this point, we know that no more recovery threads can be 232 * launched, so wait for any recovery completion work to 233 * complete. */ 234 flush_workqueue(ocfs2_wq); 235 236 /* 237 * Now that recovery is shut down, and the osb is about to be 238 * freed, the osb_lock is not taken here. 239 */ 240 rm = osb->recovery_map; 241 /* XXX: Should we bug if there are dirty entries? */ 242 243 kfree(rm); 244 } 245 246 static int __ocfs2_recovery_map_test(struct ocfs2_super *osb, 247 unsigned int node_num) 248 { 249 int i; 250 struct ocfs2_recovery_map *rm = osb->recovery_map; 251 252 assert_spin_locked(&osb->osb_lock); 253 254 for (i = 0; i < rm->rm_used; i++) { 255 if (rm->rm_entries[i] == node_num) 256 return 1; 257 } 258 259 return 0; 260 } 261 262 /* Behaves like test-and-set. Returns the previous value */ 263 static int ocfs2_recovery_map_set(struct ocfs2_super *osb, 264 unsigned int node_num) 265 { 266 struct ocfs2_recovery_map *rm = osb->recovery_map; 267 268 spin_lock(&osb->osb_lock); 269 if (__ocfs2_recovery_map_test(osb, node_num)) { 270 spin_unlock(&osb->osb_lock); 271 return 1; 272 } 273 274 /* XXX: Can this be exploited? Not from o2dlm... */ 275 BUG_ON(rm->rm_used >= osb->max_slots); 276 277 rm->rm_entries[rm->rm_used] = node_num; 278 rm->rm_used++; 279 spin_unlock(&osb->osb_lock); 280 281 return 0; 282 } 283 284 static void ocfs2_recovery_map_clear(struct ocfs2_super *osb, 285 unsigned int node_num) 286 { 287 int i; 288 struct ocfs2_recovery_map *rm = osb->recovery_map; 289 290 spin_lock(&osb->osb_lock); 291 292 for (i = 0; i < rm->rm_used; i++) { 293 if (rm->rm_entries[i] == node_num) 294 break; 295 } 296 297 if (i < rm->rm_used) { 298 /* XXX: be careful with the pointer math */ 299 memmove(&(rm->rm_entries[i]), &(rm->rm_entries[i + 1]), 300 (rm->rm_used - i - 1) * sizeof(unsigned int)); 301 rm->rm_used--; 302 } 303 304 spin_unlock(&osb->osb_lock); 305 } 306 307 static int ocfs2_commit_cache(struct ocfs2_super *osb) 308 { 309 int status = 0; 310 unsigned int flushed; 311 struct ocfs2_journal *journal = NULL; 312 313 journal = osb->journal; 314 315 /* Flush all pending commits and checkpoint the journal. */ 316 down_write(&journal->j_trans_barrier); 317 318 flushed = atomic_read(&journal->j_num_trans); 319 trace_ocfs2_commit_cache_begin(flushed); 320 if (flushed == 0) { 321 up_write(&journal->j_trans_barrier); 322 goto finally; 323 } 324 325 jbd2_journal_lock_updates(journal->j_journal); 326 status = jbd2_journal_flush(journal->j_journal); 327 jbd2_journal_unlock_updates(journal->j_journal); 328 if (status < 0) { 329 up_write(&journal->j_trans_barrier); 330 mlog_errno(status); 331 goto finally; 332 } 333 334 ocfs2_inc_trans_id(journal); 335 336 flushed = atomic_read(&journal->j_num_trans); 337 atomic_set(&journal->j_num_trans, 0); 338 up_write(&journal->j_trans_barrier); 339 340 trace_ocfs2_commit_cache_end(journal->j_trans_id, flushed); 341 342 ocfs2_wake_downconvert_thread(osb); 343 wake_up(&journal->j_checkpointed); 344 finally: 345 return status; 346 } 347 348 handle_t *ocfs2_start_trans(struct ocfs2_super *osb, int max_buffs) 349 { 350 journal_t *journal = osb->journal->j_journal; 351 handle_t *handle; 352 353 BUG_ON(!osb || !osb->journal->j_journal); 354 355 if (ocfs2_is_hard_readonly(osb)) 356 return ERR_PTR(-EROFS); 357 358 BUG_ON(osb->journal->j_state == OCFS2_JOURNAL_FREE); 359 BUG_ON(max_buffs <= 0); 360 361 /* Nested transaction? Just return the handle... */ 362 if (journal_current_handle()) 363 return jbd2_journal_start(journal, max_buffs); 364 365 sb_start_intwrite(osb->sb); 366 367 down_read(&osb->journal->j_trans_barrier); 368 369 handle = jbd2_journal_start(journal, max_buffs); 370 if (IS_ERR(handle)) { 371 up_read(&osb->journal->j_trans_barrier); 372 sb_end_intwrite(osb->sb); 373 374 mlog_errno(PTR_ERR(handle)); 375 376 if (is_journal_aborted(journal)) { 377 ocfs2_abort(osb->sb, "Detected aborted journal"); 378 handle = ERR_PTR(-EROFS); 379 } 380 } else { 381 if (!ocfs2_mount_local(osb)) 382 atomic_inc(&(osb->journal->j_num_trans)); 383 } 384 385 return handle; 386 } 387 388 int ocfs2_commit_trans(struct ocfs2_super *osb, 389 handle_t *handle) 390 { 391 int ret, nested; 392 struct ocfs2_journal *journal = osb->journal; 393 394 BUG_ON(!handle); 395 396 nested = handle->h_ref > 1; 397 ret = jbd2_journal_stop(handle); 398 if (ret < 0) 399 mlog_errno(ret); 400 401 if (!nested) { 402 up_read(&journal->j_trans_barrier); 403 sb_end_intwrite(osb->sb); 404 } 405 406 return ret; 407 } 408 409 /* 410 * 'nblocks' is what you want to add to the current transaction. 411 * 412 * This might call jbd2_journal_restart() which will commit dirty buffers 413 * and then restart the transaction. Before calling 414 * ocfs2_extend_trans(), any changed blocks should have been 415 * dirtied. After calling it, all blocks which need to be changed must 416 * go through another set of journal_access/journal_dirty calls. 417 * 418 * WARNING: This will not release any semaphores or disk locks taken 419 * during the transaction, so make sure they were taken *before* 420 * start_trans or we'll have ordering deadlocks. 421 * 422 * WARNING2: Note that we do *not* drop j_trans_barrier here. This is 423 * good because transaction ids haven't yet been recorded on the 424 * cluster locks associated with this handle. 425 */ 426 int ocfs2_extend_trans(handle_t *handle, int nblocks) 427 { 428 int status, old_nblocks; 429 430 BUG_ON(!handle); 431 BUG_ON(nblocks < 0); 432 433 if (!nblocks) 434 return 0; 435 436 old_nblocks = handle->h_buffer_credits; 437 438 trace_ocfs2_extend_trans(old_nblocks, nblocks); 439 440 #ifdef CONFIG_OCFS2_DEBUG_FS 441 status = 1; 442 #else 443 status = jbd2_journal_extend(handle, nblocks); 444 if (status < 0) { 445 mlog_errno(status); 446 goto bail; 447 } 448 #endif 449 450 if (status > 0) { 451 trace_ocfs2_extend_trans_restart(old_nblocks + nblocks); 452 status = jbd2_journal_restart(handle, 453 old_nblocks + nblocks); 454 if (status < 0) { 455 mlog_errno(status); 456 goto bail; 457 } 458 } 459 460 status = 0; 461 bail: 462 return status; 463 } 464 465 /* 466 * If we have fewer than thresh credits, extend by OCFS2_MAX_TRANS_DATA. 467 * If that fails, restart the transaction & regain write access for the 468 * buffer head which is used for metadata modifications. 469 * Taken from Ext4: extend_or_restart_transaction() 470 */ 471 int ocfs2_allocate_extend_trans(handle_t *handle, int thresh) 472 { 473 int status, old_nblks; 474 475 BUG_ON(!handle); 476 477 old_nblks = handle->h_buffer_credits; 478 trace_ocfs2_allocate_extend_trans(old_nblks, thresh); 479 480 if (old_nblks < thresh) 481 return 0; 482 483 status = jbd2_journal_extend(handle, OCFS2_MAX_TRANS_DATA); 484 if (status < 0) { 485 mlog_errno(status); 486 goto bail; 487 } 488 489 if (status > 0) { 490 status = jbd2_journal_restart(handle, OCFS2_MAX_TRANS_DATA); 491 if (status < 0) 492 mlog_errno(status); 493 } 494 495 bail: 496 return status; 497 } 498 499 500 struct ocfs2_triggers { 501 struct jbd2_buffer_trigger_type ot_triggers; 502 int ot_offset; 503 }; 504 505 static inline struct ocfs2_triggers *to_ocfs2_trigger(struct jbd2_buffer_trigger_type *triggers) 506 { 507 return container_of(triggers, struct ocfs2_triggers, ot_triggers); 508 } 509 510 static void ocfs2_frozen_trigger(struct jbd2_buffer_trigger_type *triggers, 511 struct buffer_head *bh, 512 void *data, size_t size) 513 { 514 struct ocfs2_triggers *ot = to_ocfs2_trigger(triggers); 515 516 /* 517 * We aren't guaranteed to have the superblock here, so we 518 * must unconditionally compute the ecc data. 519 * __ocfs2_journal_access() will only set the triggers if 520 * metaecc is enabled. 521 */ 522 ocfs2_block_check_compute(data, size, data + ot->ot_offset); 523 } 524 525 /* 526 * Quota blocks have their own trigger because the struct ocfs2_block_check 527 * offset depends on the blocksize. 528 */ 529 static void ocfs2_dq_frozen_trigger(struct jbd2_buffer_trigger_type *triggers, 530 struct buffer_head *bh, 531 void *data, size_t size) 532 { 533 struct ocfs2_disk_dqtrailer *dqt = 534 ocfs2_block_dqtrailer(size, data); 535 536 /* 537 * We aren't guaranteed to have the superblock here, so we 538 * must unconditionally compute the ecc data. 539 * __ocfs2_journal_access() will only set the triggers if 540 * metaecc is enabled. 541 */ 542 ocfs2_block_check_compute(data, size, &dqt->dq_check); 543 } 544 545 /* 546 * Directory blocks also have their own trigger because the 547 * struct ocfs2_block_check offset depends on the blocksize. 548 */ 549 static void ocfs2_db_frozen_trigger(struct jbd2_buffer_trigger_type *triggers, 550 struct buffer_head *bh, 551 void *data, size_t size) 552 { 553 struct ocfs2_dir_block_trailer *trailer = 554 ocfs2_dir_trailer_from_size(size, data); 555 556 /* 557 * We aren't guaranteed to have the superblock here, so we 558 * must unconditionally compute the ecc data. 559 * __ocfs2_journal_access() will only set the triggers if 560 * metaecc is enabled. 561 */ 562 ocfs2_block_check_compute(data, size, &trailer->db_check); 563 } 564 565 static void ocfs2_abort_trigger(struct jbd2_buffer_trigger_type *triggers, 566 struct buffer_head *bh) 567 { 568 mlog(ML_ERROR, 569 "ocfs2_abort_trigger called by JBD2. bh = 0x%lx, " 570 "bh->b_blocknr = %llu\n", 571 (unsigned long)bh, 572 (unsigned long long)bh->b_blocknr); 573 574 ocfs2_error(bh->b_bdev->bd_super, 575 "JBD2 has aborted our journal, ocfs2 cannot continue\n"); 576 } 577 578 static struct ocfs2_triggers di_triggers = { 579 .ot_triggers = { 580 .t_frozen = ocfs2_frozen_trigger, 581 .t_abort = ocfs2_abort_trigger, 582 }, 583 .ot_offset = offsetof(struct ocfs2_dinode, i_check), 584 }; 585 586 static struct ocfs2_triggers eb_triggers = { 587 .ot_triggers = { 588 .t_frozen = ocfs2_frozen_trigger, 589 .t_abort = ocfs2_abort_trigger, 590 }, 591 .ot_offset = offsetof(struct ocfs2_extent_block, h_check), 592 }; 593 594 static struct ocfs2_triggers rb_triggers = { 595 .ot_triggers = { 596 .t_frozen = ocfs2_frozen_trigger, 597 .t_abort = ocfs2_abort_trigger, 598 }, 599 .ot_offset = offsetof(struct ocfs2_refcount_block, rf_check), 600 }; 601 602 static struct ocfs2_triggers gd_triggers = { 603 .ot_triggers = { 604 .t_frozen = ocfs2_frozen_trigger, 605 .t_abort = ocfs2_abort_trigger, 606 }, 607 .ot_offset = offsetof(struct ocfs2_group_desc, bg_check), 608 }; 609 610 static struct ocfs2_triggers db_triggers = { 611 .ot_triggers = { 612 .t_frozen = ocfs2_db_frozen_trigger, 613 .t_abort = ocfs2_abort_trigger, 614 }, 615 }; 616 617 static struct ocfs2_triggers xb_triggers = { 618 .ot_triggers = { 619 .t_frozen = ocfs2_frozen_trigger, 620 .t_abort = ocfs2_abort_trigger, 621 }, 622 .ot_offset = offsetof(struct ocfs2_xattr_block, xb_check), 623 }; 624 625 static struct ocfs2_triggers dq_triggers = { 626 .ot_triggers = { 627 .t_frozen = ocfs2_dq_frozen_trigger, 628 .t_abort = ocfs2_abort_trigger, 629 }, 630 }; 631 632 static struct ocfs2_triggers dr_triggers = { 633 .ot_triggers = { 634 .t_frozen = ocfs2_frozen_trigger, 635 .t_abort = ocfs2_abort_trigger, 636 }, 637 .ot_offset = offsetof(struct ocfs2_dx_root_block, dr_check), 638 }; 639 640 static struct ocfs2_triggers dl_triggers = { 641 .ot_triggers = { 642 .t_frozen = ocfs2_frozen_trigger, 643 .t_abort = ocfs2_abort_trigger, 644 }, 645 .ot_offset = offsetof(struct ocfs2_dx_leaf, dl_check), 646 }; 647 648 static int __ocfs2_journal_access(handle_t *handle, 649 struct ocfs2_caching_info *ci, 650 struct buffer_head *bh, 651 struct ocfs2_triggers *triggers, 652 int type) 653 { 654 int status; 655 struct ocfs2_super *osb = 656 OCFS2_SB(ocfs2_metadata_cache_get_super(ci)); 657 658 BUG_ON(!ci || !ci->ci_ops); 659 BUG_ON(!handle); 660 BUG_ON(!bh); 661 662 trace_ocfs2_journal_access( 663 (unsigned long long)ocfs2_metadata_cache_owner(ci), 664 (unsigned long long)bh->b_blocknr, type, bh->b_size); 665 666 /* we can safely remove this assertion after testing. */ 667 if (!buffer_uptodate(bh)) { 668 mlog(ML_ERROR, "giving me a buffer that's not uptodate!\n"); 669 mlog(ML_ERROR, "b_blocknr=%llu\n", 670 (unsigned long long)bh->b_blocknr); 671 BUG(); 672 } 673 674 /* Set the current transaction information on the ci so 675 * that the locking code knows whether it can drop it's locks 676 * on this ci or not. We're protected from the commit 677 * thread updating the current transaction id until 678 * ocfs2_commit_trans() because ocfs2_start_trans() took 679 * j_trans_barrier for us. */ 680 ocfs2_set_ci_lock_trans(osb->journal, ci); 681 682 ocfs2_metadata_cache_io_lock(ci); 683 switch (type) { 684 case OCFS2_JOURNAL_ACCESS_CREATE: 685 case OCFS2_JOURNAL_ACCESS_WRITE: 686 status = jbd2_journal_get_write_access(handle, bh); 687 break; 688 689 case OCFS2_JOURNAL_ACCESS_UNDO: 690 status = jbd2_journal_get_undo_access(handle, bh); 691 break; 692 693 default: 694 status = -EINVAL; 695 mlog(ML_ERROR, "Unknown access type!\n"); 696 } 697 if (!status && ocfs2_meta_ecc(osb) && triggers) 698 jbd2_journal_set_triggers(bh, &triggers->ot_triggers); 699 ocfs2_metadata_cache_io_unlock(ci); 700 701 if (status < 0) 702 mlog(ML_ERROR, "Error %d getting %d access to buffer!\n", 703 status, type); 704 705 return status; 706 } 707 708 int ocfs2_journal_access_di(handle_t *handle, struct ocfs2_caching_info *ci, 709 struct buffer_head *bh, int type) 710 { 711 return __ocfs2_journal_access(handle, ci, bh, &di_triggers, type); 712 } 713 714 int ocfs2_journal_access_eb(handle_t *handle, struct ocfs2_caching_info *ci, 715 struct buffer_head *bh, int type) 716 { 717 return __ocfs2_journal_access(handle, ci, bh, &eb_triggers, type); 718 } 719 720 int ocfs2_journal_access_rb(handle_t *handle, struct ocfs2_caching_info *ci, 721 struct buffer_head *bh, int type) 722 { 723 return __ocfs2_journal_access(handle, ci, bh, &rb_triggers, 724 type); 725 } 726 727 int ocfs2_journal_access_gd(handle_t *handle, struct ocfs2_caching_info *ci, 728 struct buffer_head *bh, int type) 729 { 730 return __ocfs2_journal_access(handle, ci, bh, &gd_triggers, type); 731 } 732 733 int ocfs2_journal_access_db(handle_t *handle, struct ocfs2_caching_info *ci, 734 struct buffer_head *bh, int type) 735 { 736 return __ocfs2_journal_access(handle, ci, bh, &db_triggers, type); 737 } 738 739 int ocfs2_journal_access_xb(handle_t *handle, struct ocfs2_caching_info *ci, 740 struct buffer_head *bh, int type) 741 { 742 return __ocfs2_journal_access(handle, ci, bh, &xb_triggers, type); 743 } 744 745 int ocfs2_journal_access_dq(handle_t *handle, struct ocfs2_caching_info *ci, 746 struct buffer_head *bh, int type) 747 { 748 return __ocfs2_journal_access(handle, ci, bh, &dq_triggers, type); 749 } 750 751 int ocfs2_journal_access_dr(handle_t *handle, struct ocfs2_caching_info *ci, 752 struct buffer_head *bh, int type) 753 { 754 return __ocfs2_journal_access(handle, ci, bh, &dr_triggers, type); 755 } 756 757 int ocfs2_journal_access_dl(handle_t *handle, struct ocfs2_caching_info *ci, 758 struct buffer_head *bh, int type) 759 { 760 return __ocfs2_journal_access(handle, ci, bh, &dl_triggers, type); 761 } 762 763 int ocfs2_journal_access(handle_t *handle, struct ocfs2_caching_info *ci, 764 struct buffer_head *bh, int type) 765 { 766 return __ocfs2_journal_access(handle, ci, bh, NULL, type); 767 } 768 769 void ocfs2_journal_dirty(handle_t *handle, struct buffer_head *bh) 770 { 771 int status; 772 773 trace_ocfs2_journal_dirty((unsigned long long)bh->b_blocknr); 774 775 status = jbd2_journal_dirty_metadata(handle, bh); 776 if (status) { 777 mlog_errno(status); 778 if (!is_handle_aborted(handle)) { 779 journal_t *journal = handle->h_transaction->t_journal; 780 struct super_block *sb = bh->b_bdev->bd_super; 781 782 mlog(ML_ERROR, "jbd2_journal_dirty_metadata failed. " 783 "Aborting transaction and journal.\n"); 784 handle->h_err = status; 785 jbd2_journal_abort_handle(handle); 786 jbd2_journal_abort(journal, status); 787 ocfs2_abort(sb, "Journal already aborted.\n"); 788 } 789 } 790 } 791 792 #define OCFS2_DEFAULT_COMMIT_INTERVAL (HZ * JBD2_DEFAULT_MAX_COMMIT_AGE) 793 794 void ocfs2_set_journal_params(struct ocfs2_super *osb) 795 { 796 journal_t *journal = osb->journal->j_journal; 797 unsigned long commit_interval = OCFS2_DEFAULT_COMMIT_INTERVAL; 798 799 if (osb->osb_commit_interval) 800 commit_interval = osb->osb_commit_interval; 801 802 write_lock(&journal->j_state_lock); 803 journal->j_commit_interval = commit_interval; 804 if (osb->s_mount_opt & OCFS2_MOUNT_BARRIER) 805 journal->j_flags |= JBD2_BARRIER; 806 else 807 journal->j_flags &= ~JBD2_BARRIER; 808 write_unlock(&journal->j_state_lock); 809 } 810 811 int ocfs2_journal_init(struct ocfs2_journal *journal, int *dirty) 812 { 813 int status = -1; 814 struct inode *inode = NULL; /* the journal inode */ 815 journal_t *j_journal = NULL; 816 struct ocfs2_dinode *di = NULL; 817 struct buffer_head *bh = NULL; 818 struct ocfs2_super *osb; 819 int inode_lock = 0; 820 821 BUG_ON(!journal); 822 823 osb = journal->j_osb; 824 825 /* already have the inode for our journal */ 826 inode = ocfs2_get_system_file_inode(osb, JOURNAL_SYSTEM_INODE, 827 osb->slot_num); 828 if (inode == NULL) { 829 status = -EACCES; 830 mlog_errno(status); 831 goto done; 832 } 833 if (is_bad_inode(inode)) { 834 mlog(ML_ERROR, "access error (bad inode)\n"); 835 iput(inode); 836 inode = NULL; 837 status = -EACCES; 838 goto done; 839 } 840 841 SET_INODE_JOURNAL(inode); 842 OCFS2_I(inode)->ip_open_count++; 843 844 /* Skip recovery waits here - journal inode metadata never 845 * changes in a live cluster so it can be considered an 846 * exception to the rule. */ 847 status = ocfs2_inode_lock_full(inode, &bh, 1, OCFS2_META_LOCK_RECOVERY); 848 if (status < 0) { 849 if (status != -ERESTARTSYS) 850 mlog(ML_ERROR, "Could not get lock on journal!\n"); 851 goto done; 852 } 853 854 inode_lock = 1; 855 di = (struct ocfs2_dinode *)bh->b_data; 856 857 if (i_size_read(inode) < OCFS2_MIN_JOURNAL_SIZE) { 858 mlog(ML_ERROR, "Journal file size (%lld) is too small!\n", 859 i_size_read(inode)); 860 status = -EINVAL; 861 goto done; 862 } 863 864 trace_ocfs2_journal_init(i_size_read(inode), 865 (unsigned long long)inode->i_blocks, 866 OCFS2_I(inode)->ip_clusters); 867 868 /* call the kernels journal init function now */ 869 j_journal = jbd2_journal_init_inode(inode); 870 if (j_journal == NULL) { 871 mlog(ML_ERROR, "Linux journal layer error\n"); 872 status = -EINVAL; 873 goto done; 874 } 875 876 trace_ocfs2_journal_init_maxlen(j_journal->j_maxlen); 877 878 *dirty = (le32_to_cpu(di->id1.journal1.ij_flags) & 879 OCFS2_JOURNAL_DIRTY_FL); 880 881 journal->j_journal = j_journal; 882 journal->j_inode = inode; 883 journal->j_bh = bh; 884 885 ocfs2_set_journal_params(osb); 886 887 journal->j_state = OCFS2_JOURNAL_LOADED; 888 889 status = 0; 890 done: 891 if (status < 0) { 892 if (inode_lock) 893 ocfs2_inode_unlock(inode, 1); 894 brelse(bh); 895 if (inode) { 896 OCFS2_I(inode)->ip_open_count--; 897 iput(inode); 898 } 899 } 900 901 return status; 902 } 903 904 static void ocfs2_bump_recovery_generation(struct ocfs2_dinode *di) 905 { 906 le32_add_cpu(&(di->id1.journal1.ij_recovery_generation), 1); 907 } 908 909 static u32 ocfs2_get_recovery_generation(struct ocfs2_dinode *di) 910 { 911 return le32_to_cpu(di->id1.journal1.ij_recovery_generation); 912 } 913 914 static int ocfs2_journal_toggle_dirty(struct ocfs2_super *osb, 915 int dirty, int replayed) 916 { 917 int status; 918 unsigned int flags; 919 struct ocfs2_journal *journal = osb->journal; 920 struct buffer_head *bh = journal->j_bh; 921 struct ocfs2_dinode *fe; 922 923 fe = (struct ocfs2_dinode *)bh->b_data; 924 925 /* The journal bh on the osb always comes from ocfs2_journal_init() 926 * and was validated there inside ocfs2_inode_lock_full(). It's a 927 * code bug if we mess it up. */ 928 BUG_ON(!OCFS2_IS_VALID_DINODE(fe)); 929 930 flags = le32_to_cpu(fe->id1.journal1.ij_flags); 931 if (dirty) 932 flags |= OCFS2_JOURNAL_DIRTY_FL; 933 else 934 flags &= ~OCFS2_JOURNAL_DIRTY_FL; 935 fe->id1.journal1.ij_flags = cpu_to_le32(flags); 936 937 if (replayed) 938 ocfs2_bump_recovery_generation(fe); 939 940 ocfs2_compute_meta_ecc(osb->sb, bh->b_data, &fe->i_check); 941 status = ocfs2_write_block(osb, bh, INODE_CACHE(journal->j_inode)); 942 if (status < 0) 943 mlog_errno(status); 944 945 return status; 946 } 947 948 /* 949 * If the journal has been kmalloc'd it needs to be freed after this 950 * call. 951 */ 952 void ocfs2_journal_shutdown(struct ocfs2_super *osb) 953 { 954 struct ocfs2_journal *journal = NULL; 955 int status = 0; 956 struct inode *inode = NULL; 957 int num_running_trans = 0; 958 959 BUG_ON(!osb); 960 961 journal = osb->journal; 962 if (!journal) 963 goto done; 964 965 inode = journal->j_inode; 966 967 if (journal->j_state != OCFS2_JOURNAL_LOADED) 968 goto done; 969 970 /* need to inc inode use count - jbd2_journal_destroy will iput. */ 971 if (!igrab(inode)) 972 BUG(); 973 974 num_running_trans = atomic_read(&(osb->journal->j_num_trans)); 975 trace_ocfs2_journal_shutdown(num_running_trans); 976 977 /* Do a commit_cache here. It will flush our journal, *and* 978 * release any locks that are still held. 979 * set the SHUTDOWN flag and release the trans lock. 980 * the commit thread will take the trans lock for us below. */ 981 journal->j_state = OCFS2_JOURNAL_IN_SHUTDOWN; 982 983 /* The OCFS2_JOURNAL_IN_SHUTDOWN will signal to commit_cache to not 984 * drop the trans_lock (which we want to hold until we 985 * completely destroy the journal. */ 986 if (osb->commit_task) { 987 /* Wait for the commit thread */ 988 trace_ocfs2_journal_shutdown_wait(osb->commit_task); 989 kthread_stop(osb->commit_task); 990 osb->commit_task = NULL; 991 } 992 993 BUG_ON(atomic_read(&(osb->journal->j_num_trans)) != 0); 994 995 if (ocfs2_mount_local(osb)) { 996 jbd2_journal_lock_updates(journal->j_journal); 997 status = jbd2_journal_flush(journal->j_journal); 998 jbd2_journal_unlock_updates(journal->j_journal); 999 if (status < 0) 1000 mlog_errno(status); 1001 } 1002 1003 if (status == 0) { 1004 /* 1005 * Do not toggle if flush was unsuccessful otherwise 1006 * will leave dirty metadata in a "clean" journal 1007 */ 1008 status = ocfs2_journal_toggle_dirty(osb, 0, 0); 1009 if (status < 0) 1010 mlog_errno(status); 1011 } 1012 1013 /* Shutdown the kernel journal system */ 1014 jbd2_journal_destroy(journal->j_journal); 1015 journal->j_journal = NULL; 1016 1017 OCFS2_I(inode)->ip_open_count--; 1018 1019 /* unlock our journal */ 1020 ocfs2_inode_unlock(inode, 1); 1021 1022 brelse(journal->j_bh); 1023 journal->j_bh = NULL; 1024 1025 journal->j_state = OCFS2_JOURNAL_FREE; 1026 1027 // up_write(&journal->j_trans_barrier); 1028 done: 1029 if (inode) 1030 iput(inode); 1031 } 1032 1033 static void ocfs2_clear_journal_error(struct super_block *sb, 1034 journal_t *journal, 1035 int slot) 1036 { 1037 int olderr; 1038 1039 olderr = jbd2_journal_errno(journal); 1040 if (olderr) { 1041 mlog(ML_ERROR, "File system error %d recorded in " 1042 "journal %u.\n", olderr, slot); 1043 mlog(ML_ERROR, "File system on device %s needs checking.\n", 1044 sb->s_id); 1045 1046 jbd2_journal_ack_err(journal); 1047 jbd2_journal_clear_err(journal); 1048 } 1049 } 1050 1051 int ocfs2_journal_load(struct ocfs2_journal *journal, int local, int replayed) 1052 { 1053 int status = 0; 1054 struct ocfs2_super *osb; 1055 1056 BUG_ON(!journal); 1057 1058 osb = journal->j_osb; 1059 1060 status = jbd2_journal_load(journal->j_journal); 1061 if (status < 0) { 1062 mlog(ML_ERROR, "Failed to load journal!\n"); 1063 goto done; 1064 } 1065 1066 ocfs2_clear_journal_error(osb->sb, journal->j_journal, osb->slot_num); 1067 1068 status = ocfs2_journal_toggle_dirty(osb, 1, replayed); 1069 if (status < 0) { 1070 mlog_errno(status); 1071 goto done; 1072 } 1073 1074 /* Launch the commit thread */ 1075 if (!local) { 1076 osb->commit_task = kthread_run(ocfs2_commit_thread, osb, 1077 "ocfs2cmt"); 1078 if (IS_ERR(osb->commit_task)) { 1079 status = PTR_ERR(osb->commit_task); 1080 osb->commit_task = NULL; 1081 mlog(ML_ERROR, "unable to launch ocfs2commit thread, " 1082 "error=%d", status); 1083 goto done; 1084 } 1085 } else 1086 osb->commit_task = NULL; 1087 1088 done: 1089 return status; 1090 } 1091 1092 1093 /* 'full' flag tells us whether we clear out all blocks or if we just 1094 * mark the journal clean */ 1095 int ocfs2_journal_wipe(struct ocfs2_journal *journal, int full) 1096 { 1097 int status; 1098 1099 BUG_ON(!journal); 1100 1101 status = jbd2_journal_wipe(journal->j_journal, full); 1102 if (status < 0) { 1103 mlog_errno(status); 1104 goto bail; 1105 } 1106 1107 status = ocfs2_journal_toggle_dirty(journal->j_osb, 0, 0); 1108 if (status < 0) 1109 mlog_errno(status); 1110 1111 bail: 1112 return status; 1113 } 1114 1115 static int ocfs2_recovery_completed(struct ocfs2_super *osb) 1116 { 1117 int empty; 1118 struct ocfs2_recovery_map *rm = osb->recovery_map; 1119 1120 spin_lock(&osb->osb_lock); 1121 empty = (rm->rm_used == 0); 1122 spin_unlock(&osb->osb_lock); 1123 1124 return empty; 1125 } 1126 1127 void ocfs2_wait_for_recovery(struct ocfs2_super *osb) 1128 { 1129 wait_event(osb->recovery_event, ocfs2_recovery_completed(osb)); 1130 } 1131 1132 /* 1133 * JBD Might read a cached version of another nodes journal file. We 1134 * don't want this as this file changes often and we get no 1135 * notification on those changes. The only way to be sure that we've 1136 * got the most up to date version of those blocks then is to force 1137 * read them off disk. Just searching through the buffer cache won't 1138 * work as there may be pages backing this file which are still marked 1139 * up to date. We know things can't change on this file underneath us 1140 * as we have the lock by now :) 1141 */ 1142 static int ocfs2_force_read_journal(struct inode *inode) 1143 { 1144 int status = 0; 1145 int i; 1146 u64 v_blkno, p_blkno, p_blocks, num_blocks; 1147 #define CONCURRENT_JOURNAL_FILL 32ULL 1148 struct buffer_head *bhs[CONCURRENT_JOURNAL_FILL]; 1149 1150 memset(bhs, 0, sizeof(struct buffer_head *) * CONCURRENT_JOURNAL_FILL); 1151 1152 num_blocks = ocfs2_blocks_for_bytes(inode->i_sb, i_size_read(inode)); 1153 v_blkno = 0; 1154 while (v_blkno < num_blocks) { 1155 status = ocfs2_extent_map_get_blocks(inode, v_blkno, 1156 &p_blkno, &p_blocks, NULL); 1157 if (status < 0) { 1158 mlog_errno(status); 1159 goto bail; 1160 } 1161 1162 if (p_blocks > CONCURRENT_JOURNAL_FILL) 1163 p_blocks = CONCURRENT_JOURNAL_FILL; 1164 1165 /* We are reading journal data which should not 1166 * be put in the uptodate cache */ 1167 status = ocfs2_read_blocks_sync(OCFS2_SB(inode->i_sb), 1168 p_blkno, p_blocks, bhs); 1169 if (status < 0) { 1170 mlog_errno(status); 1171 goto bail; 1172 } 1173 1174 for(i = 0; i < p_blocks; i++) { 1175 brelse(bhs[i]); 1176 bhs[i] = NULL; 1177 } 1178 1179 v_blkno += p_blocks; 1180 } 1181 1182 bail: 1183 for(i = 0; i < CONCURRENT_JOURNAL_FILL; i++) 1184 brelse(bhs[i]); 1185 return status; 1186 } 1187 1188 struct ocfs2_la_recovery_item { 1189 struct list_head lri_list; 1190 int lri_slot; 1191 struct ocfs2_dinode *lri_la_dinode; 1192 struct ocfs2_dinode *lri_tl_dinode; 1193 struct ocfs2_quota_recovery *lri_qrec; 1194 enum ocfs2_orphan_reco_type lri_orphan_reco_type; 1195 }; 1196 1197 /* Does the second half of the recovery process. By this point, the 1198 * node is marked clean and can actually be considered recovered, 1199 * hence it's no longer in the recovery map, but there's still some 1200 * cleanup we can do which shouldn't happen within the recovery thread 1201 * as locking in that context becomes very difficult if we are to take 1202 * recovering nodes into account. 1203 * 1204 * NOTE: This function can and will sleep on recovery of other nodes 1205 * during cluster locking, just like any other ocfs2 process. 1206 */ 1207 void ocfs2_complete_recovery(struct work_struct *work) 1208 { 1209 int ret = 0; 1210 struct ocfs2_journal *journal = 1211 container_of(work, struct ocfs2_journal, j_recovery_work); 1212 struct ocfs2_super *osb = journal->j_osb; 1213 struct ocfs2_dinode *la_dinode, *tl_dinode; 1214 struct ocfs2_la_recovery_item *item, *n; 1215 struct ocfs2_quota_recovery *qrec; 1216 enum ocfs2_orphan_reco_type orphan_reco_type; 1217 LIST_HEAD(tmp_la_list); 1218 1219 trace_ocfs2_complete_recovery( 1220 (unsigned long long)OCFS2_I(journal->j_inode)->ip_blkno); 1221 1222 spin_lock(&journal->j_lock); 1223 list_splice_init(&journal->j_la_cleanups, &tmp_la_list); 1224 spin_unlock(&journal->j_lock); 1225 1226 list_for_each_entry_safe(item, n, &tmp_la_list, lri_list) { 1227 list_del_init(&item->lri_list); 1228 1229 ocfs2_wait_on_quotas(osb); 1230 1231 la_dinode = item->lri_la_dinode; 1232 tl_dinode = item->lri_tl_dinode; 1233 qrec = item->lri_qrec; 1234 orphan_reco_type = item->lri_orphan_reco_type; 1235 1236 trace_ocfs2_complete_recovery_slot(item->lri_slot, 1237 la_dinode ? le64_to_cpu(la_dinode->i_blkno) : 0, 1238 tl_dinode ? le64_to_cpu(tl_dinode->i_blkno) : 0, 1239 qrec); 1240 1241 if (la_dinode) { 1242 ret = ocfs2_complete_local_alloc_recovery(osb, 1243 la_dinode); 1244 if (ret < 0) 1245 mlog_errno(ret); 1246 1247 kfree(la_dinode); 1248 } 1249 1250 if (tl_dinode) { 1251 ret = ocfs2_complete_truncate_log_recovery(osb, 1252 tl_dinode); 1253 if (ret < 0) 1254 mlog_errno(ret); 1255 1256 kfree(tl_dinode); 1257 } 1258 1259 ret = ocfs2_recover_orphans(osb, item->lri_slot, 1260 orphan_reco_type); 1261 if (ret < 0) 1262 mlog_errno(ret); 1263 1264 if (qrec) { 1265 ret = ocfs2_finish_quota_recovery(osb, qrec, 1266 item->lri_slot); 1267 if (ret < 0) 1268 mlog_errno(ret); 1269 /* Recovery info is already freed now */ 1270 } 1271 1272 kfree(item); 1273 } 1274 1275 trace_ocfs2_complete_recovery_end(ret); 1276 } 1277 1278 /* NOTE: This function always eats your references to la_dinode and 1279 * tl_dinode, either manually on error, or by passing them to 1280 * ocfs2_complete_recovery */ 1281 static void ocfs2_queue_recovery_completion(struct ocfs2_journal *journal, 1282 int slot_num, 1283 struct ocfs2_dinode *la_dinode, 1284 struct ocfs2_dinode *tl_dinode, 1285 struct ocfs2_quota_recovery *qrec, 1286 enum ocfs2_orphan_reco_type orphan_reco_type) 1287 { 1288 struct ocfs2_la_recovery_item *item; 1289 1290 item = kmalloc(sizeof(struct ocfs2_la_recovery_item), GFP_NOFS); 1291 if (!item) { 1292 /* Though we wish to avoid it, we are in fact safe in 1293 * skipping local alloc cleanup as fsck.ocfs2 is more 1294 * than capable of reclaiming unused space. */ 1295 kfree(la_dinode); 1296 kfree(tl_dinode); 1297 1298 if (qrec) 1299 ocfs2_free_quota_recovery(qrec); 1300 1301 mlog_errno(-ENOMEM); 1302 return; 1303 } 1304 1305 INIT_LIST_HEAD(&item->lri_list); 1306 item->lri_la_dinode = la_dinode; 1307 item->lri_slot = slot_num; 1308 item->lri_tl_dinode = tl_dinode; 1309 item->lri_qrec = qrec; 1310 item->lri_orphan_reco_type = orphan_reco_type; 1311 1312 spin_lock(&journal->j_lock); 1313 list_add_tail(&item->lri_list, &journal->j_la_cleanups); 1314 queue_work(ocfs2_wq, &journal->j_recovery_work); 1315 spin_unlock(&journal->j_lock); 1316 } 1317 1318 /* Called by the mount code to queue recovery the last part of 1319 * recovery for it's own and offline slot(s). */ 1320 void ocfs2_complete_mount_recovery(struct ocfs2_super *osb) 1321 { 1322 struct ocfs2_journal *journal = osb->journal; 1323 1324 if (ocfs2_is_hard_readonly(osb)) 1325 return; 1326 1327 /* No need to queue up our truncate_log as regular cleanup will catch 1328 * that */ 1329 ocfs2_queue_recovery_completion(journal, osb->slot_num, 1330 osb->local_alloc_copy, NULL, NULL, 1331 ORPHAN_NEED_TRUNCATE); 1332 ocfs2_schedule_truncate_log_flush(osb, 0); 1333 1334 osb->local_alloc_copy = NULL; 1335 osb->dirty = 0; 1336 1337 /* queue to recover orphan slots for all offline slots */ 1338 ocfs2_replay_map_set_state(osb, REPLAY_NEEDED); 1339 ocfs2_queue_replay_slots(osb, ORPHAN_NEED_TRUNCATE); 1340 ocfs2_free_replay_slots(osb); 1341 } 1342 1343 void ocfs2_complete_quota_recovery(struct ocfs2_super *osb) 1344 { 1345 if (osb->quota_rec) { 1346 ocfs2_queue_recovery_completion(osb->journal, 1347 osb->slot_num, 1348 NULL, 1349 NULL, 1350 osb->quota_rec, 1351 ORPHAN_NEED_TRUNCATE); 1352 osb->quota_rec = NULL; 1353 } 1354 } 1355 1356 static int __ocfs2_recovery_thread(void *arg) 1357 { 1358 int status, node_num, slot_num; 1359 struct ocfs2_super *osb = arg; 1360 struct ocfs2_recovery_map *rm = osb->recovery_map; 1361 int *rm_quota = NULL; 1362 int rm_quota_used = 0, i; 1363 struct ocfs2_quota_recovery *qrec; 1364 1365 status = ocfs2_wait_on_mount(osb); 1366 if (status < 0) { 1367 goto bail; 1368 } 1369 1370 rm_quota = kzalloc(osb->max_slots * sizeof(int), GFP_NOFS); 1371 if (!rm_quota) { 1372 status = -ENOMEM; 1373 goto bail; 1374 } 1375 restart: 1376 status = ocfs2_super_lock(osb, 1); 1377 if (status < 0) { 1378 mlog_errno(status); 1379 goto bail; 1380 } 1381 1382 status = ocfs2_compute_replay_slots(osb); 1383 if (status < 0) 1384 mlog_errno(status); 1385 1386 /* queue recovery for our own slot */ 1387 ocfs2_queue_recovery_completion(osb->journal, osb->slot_num, NULL, 1388 NULL, NULL, ORPHAN_NO_NEED_TRUNCATE); 1389 1390 spin_lock(&osb->osb_lock); 1391 while (rm->rm_used) { 1392 /* It's always safe to remove entry zero, as we won't 1393 * clear it until ocfs2_recover_node() has succeeded. */ 1394 node_num = rm->rm_entries[0]; 1395 spin_unlock(&osb->osb_lock); 1396 slot_num = ocfs2_node_num_to_slot(osb, node_num); 1397 trace_ocfs2_recovery_thread_node(node_num, slot_num); 1398 if (slot_num == -ENOENT) { 1399 status = 0; 1400 goto skip_recovery; 1401 } 1402 1403 /* It is a bit subtle with quota recovery. We cannot do it 1404 * immediately because we have to obtain cluster locks from 1405 * quota files and we also don't want to just skip it because 1406 * then quota usage would be out of sync until some node takes 1407 * the slot. So we remember which nodes need quota recovery 1408 * and when everything else is done, we recover quotas. */ 1409 for (i = 0; i < rm_quota_used && rm_quota[i] != slot_num; i++); 1410 if (i == rm_quota_used) 1411 rm_quota[rm_quota_used++] = slot_num; 1412 1413 status = ocfs2_recover_node(osb, node_num, slot_num); 1414 skip_recovery: 1415 if (!status) { 1416 ocfs2_recovery_map_clear(osb, node_num); 1417 } else { 1418 mlog(ML_ERROR, 1419 "Error %d recovering node %d on device (%u,%u)!\n", 1420 status, node_num, 1421 MAJOR(osb->sb->s_dev), MINOR(osb->sb->s_dev)); 1422 mlog(ML_ERROR, "Volume requires unmount.\n"); 1423 } 1424 1425 spin_lock(&osb->osb_lock); 1426 } 1427 spin_unlock(&osb->osb_lock); 1428 trace_ocfs2_recovery_thread_end(status); 1429 1430 /* Refresh all journal recovery generations from disk */ 1431 status = ocfs2_check_journals_nolocks(osb); 1432 status = (status == -EROFS) ? 0 : status; 1433 if (status < 0) 1434 mlog_errno(status); 1435 1436 /* Now it is right time to recover quotas... We have to do this under 1437 * superblock lock so that no one can start using the slot (and crash) 1438 * before we recover it */ 1439 for (i = 0; i < rm_quota_used; i++) { 1440 qrec = ocfs2_begin_quota_recovery(osb, rm_quota[i]); 1441 if (IS_ERR(qrec)) { 1442 status = PTR_ERR(qrec); 1443 mlog_errno(status); 1444 continue; 1445 } 1446 ocfs2_queue_recovery_completion(osb->journal, rm_quota[i], 1447 NULL, NULL, qrec, 1448 ORPHAN_NEED_TRUNCATE); 1449 } 1450 1451 ocfs2_super_unlock(osb, 1); 1452 1453 /* queue recovery for offline slots */ 1454 ocfs2_queue_replay_slots(osb, ORPHAN_NEED_TRUNCATE); 1455 1456 bail: 1457 mutex_lock(&osb->recovery_lock); 1458 if (!status && !ocfs2_recovery_completed(osb)) { 1459 mutex_unlock(&osb->recovery_lock); 1460 goto restart; 1461 } 1462 1463 ocfs2_free_replay_slots(osb); 1464 osb->recovery_thread_task = NULL; 1465 mb(); /* sync with ocfs2_recovery_thread_running */ 1466 wake_up(&osb->recovery_event); 1467 1468 mutex_unlock(&osb->recovery_lock); 1469 1470 kfree(rm_quota); 1471 1472 /* no one is callint kthread_stop() for us so the kthread() api 1473 * requires that we call do_exit(). And it isn't exported, but 1474 * complete_and_exit() seems to be a minimal wrapper around it. */ 1475 complete_and_exit(NULL, status); 1476 } 1477 1478 void ocfs2_recovery_thread(struct ocfs2_super *osb, int node_num) 1479 { 1480 mutex_lock(&osb->recovery_lock); 1481 1482 trace_ocfs2_recovery_thread(node_num, osb->node_num, 1483 osb->disable_recovery, osb->recovery_thread_task, 1484 osb->disable_recovery ? 1485 -1 : ocfs2_recovery_map_set(osb, node_num)); 1486 1487 if (osb->disable_recovery) 1488 goto out; 1489 1490 if (osb->recovery_thread_task) 1491 goto out; 1492 1493 osb->recovery_thread_task = kthread_run(__ocfs2_recovery_thread, osb, 1494 "ocfs2rec"); 1495 if (IS_ERR(osb->recovery_thread_task)) { 1496 mlog_errno((int)PTR_ERR(osb->recovery_thread_task)); 1497 osb->recovery_thread_task = NULL; 1498 } 1499 1500 out: 1501 mutex_unlock(&osb->recovery_lock); 1502 wake_up(&osb->recovery_event); 1503 } 1504 1505 static int ocfs2_read_journal_inode(struct ocfs2_super *osb, 1506 int slot_num, 1507 struct buffer_head **bh, 1508 struct inode **ret_inode) 1509 { 1510 int status = -EACCES; 1511 struct inode *inode = NULL; 1512 1513 BUG_ON(slot_num >= osb->max_slots); 1514 1515 inode = ocfs2_get_system_file_inode(osb, JOURNAL_SYSTEM_INODE, 1516 slot_num); 1517 if (!inode || is_bad_inode(inode)) { 1518 mlog_errno(status); 1519 goto bail; 1520 } 1521 SET_INODE_JOURNAL(inode); 1522 1523 status = ocfs2_read_inode_block_full(inode, bh, OCFS2_BH_IGNORE_CACHE); 1524 if (status < 0) { 1525 mlog_errno(status); 1526 goto bail; 1527 } 1528 1529 status = 0; 1530 1531 bail: 1532 if (inode) { 1533 if (status || !ret_inode) 1534 iput(inode); 1535 else 1536 *ret_inode = inode; 1537 } 1538 return status; 1539 } 1540 1541 /* Does the actual journal replay and marks the journal inode as 1542 * clean. Will only replay if the journal inode is marked dirty. */ 1543 static int ocfs2_replay_journal(struct ocfs2_super *osb, 1544 int node_num, 1545 int slot_num) 1546 { 1547 int status; 1548 int got_lock = 0; 1549 unsigned int flags; 1550 struct inode *inode = NULL; 1551 struct ocfs2_dinode *fe; 1552 journal_t *journal = NULL; 1553 struct buffer_head *bh = NULL; 1554 u32 slot_reco_gen; 1555 1556 status = ocfs2_read_journal_inode(osb, slot_num, &bh, &inode); 1557 if (status) { 1558 mlog_errno(status); 1559 goto done; 1560 } 1561 1562 fe = (struct ocfs2_dinode *)bh->b_data; 1563 slot_reco_gen = ocfs2_get_recovery_generation(fe); 1564 brelse(bh); 1565 bh = NULL; 1566 1567 /* 1568 * As the fs recovery is asynchronous, there is a small chance that 1569 * another node mounted (and recovered) the slot before the recovery 1570 * thread could get the lock. To handle that, we dirty read the journal 1571 * inode for that slot to get the recovery generation. If it is 1572 * different than what we expected, the slot has been recovered. 1573 * If not, it needs recovery. 1574 */ 1575 if (osb->slot_recovery_generations[slot_num] != slot_reco_gen) { 1576 trace_ocfs2_replay_journal_recovered(slot_num, 1577 osb->slot_recovery_generations[slot_num], slot_reco_gen); 1578 osb->slot_recovery_generations[slot_num] = slot_reco_gen; 1579 status = -EBUSY; 1580 goto done; 1581 } 1582 1583 /* Continue with recovery as the journal has not yet been recovered */ 1584 1585 status = ocfs2_inode_lock_full(inode, &bh, 1, OCFS2_META_LOCK_RECOVERY); 1586 if (status < 0) { 1587 trace_ocfs2_replay_journal_lock_err(status); 1588 if (status != -ERESTARTSYS) 1589 mlog(ML_ERROR, "Could not lock journal!\n"); 1590 goto done; 1591 } 1592 got_lock = 1; 1593 1594 fe = (struct ocfs2_dinode *) bh->b_data; 1595 1596 flags = le32_to_cpu(fe->id1.journal1.ij_flags); 1597 slot_reco_gen = ocfs2_get_recovery_generation(fe); 1598 1599 if (!(flags & OCFS2_JOURNAL_DIRTY_FL)) { 1600 trace_ocfs2_replay_journal_skip(node_num); 1601 /* Refresh recovery generation for the slot */ 1602 osb->slot_recovery_generations[slot_num] = slot_reco_gen; 1603 goto done; 1604 } 1605 1606 /* we need to run complete recovery for offline orphan slots */ 1607 ocfs2_replay_map_set_state(osb, REPLAY_NEEDED); 1608 1609 printk(KERN_NOTICE "ocfs2: Begin replay journal (node %d, slot %d) on "\ 1610 "device (%u,%u)\n", node_num, slot_num, MAJOR(osb->sb->s_dev), 1611 MINOR(osb->sb->s_dev)); 1612 1613 OCFS2_I(inode)->ip_clusters = le32_to_cpu(fe->i_clusters); 1614 1615 status = ocfs2_force_read_journal(inode); 1616 if (status < 0) { 1617 mlog_errno(status); 1618 goto done; 1619 } 1620 1621 journal = jbd2_journal_init_inode(inode); 1622 if (journal == NULL) { 1623 mlog(ML_ERROR, "Linux journal layer error\n"); 1624 status = -EIO; 1625 goto done; 1626 } 1627 1628 status = jbd2_journal_load(journal); 1629 if (status < 0) { 1630 mlog_errno(status); 1631 if (!igrab(inode)) 1632 BUG(); 1633 jbd2_journal_destroy(journal); 1634 goto done; 1635 } 1636 1637 ocfs2_clear_journal_error(osb->sb, journal, slot_num); 1638 1639 /* wipe the journal */ 1640 jbd2_journal_lock_updates(journal); 1641 status = jbd2_journal_flush(journal); 1642 jbd2_journal_unlock_updates(journal); 1643 if (status < 0) 1644 mlog_errno(status); 1645 1646 /* This will mark the node clean */ 1647 flags = le32_to_cpu(fe->id1.journal1.ij_flags); 1648 flags &= ~OCFS2_JOURNAL_DIRTY_FL; 1649 fe->id1.journal1.ij_flags = cpu_to_le32(flags); 1650 1651 /* Increment recovery generation to indicate successful recovery */ 1652 ocfs2_bump_recovery_generation(fe); 1653 osb->slot_recovery_generations[slot_num] = 1654 ocfs2_get_recovery_generation(fe); 1655 1656 ocfs2_compute_meta_ecc(osb->sb, bh->b_data, &fe->i_check); 1657 status = ocfs2_write_block(osb, bh, INODE_CACHE(inode)); 1658 if (status < 0) 1659 mlog_errno(status); 1660 1661 if (!igrab(inode)) 1662 BUG(); 1663 1664 jbd2_journal_destroy(journal); 1665 1666 printk(KERN_NOTICE "ocfs2: End replay journal (node %d, slot %d) on "\ 1667 "device (%u,%u)\n", node_num, slot_num, MAJOR(osb->sb->s_dev), 1668 MINOR(osb->sb->s_dev)); 1669 done: 1670 /* drop the lock on this nodes journal */ 1671 if (got_lock) 1672 ocfs2_inode_unlock(inode, 1); 1673 1674 if (inode) 1675 iput(inode); 1676 1677 brelse(bh); 1678 1679 return status; 1680 } 1681 1682 /* 1683 * Do the most important parts of node recovery: 1684 * - Replay it's journal 1685 * - Stamp a clean local allocator file 1686 * - Stamp a clean truncate log 1687 * - Mark the node clean 1688 * 1689 * If this function completes without error, a node in OCFS2 can be 1690 * said to have been safely recovered. As a result, failure during the 1691 * second part of a nodes recovery process (local alloc recovery) is 1692 * far less concerning. 1693 */ 1694 static int ocfs2_recover_node(struct ocfs2_super *osb, 1695 int node_num, int slot_num) 1696 { 1697 int status = 0; 1698 struct ocfs2_dinode *la_copy = NULL; 1699 struct ocfs2_dinode *tl_copy = NULL; 1700 1701 trace_ocfs2_recover_node(node_num, slot_num, osb->node_num); 1702 1703 /* Should not ever be called to recover ourselves -- in that 1704 * case we should've called ocfs2_journal_load instead. */ 1705 BUG_ON(osb->node_num == node_num); 1706 1707 status = ocfs2_replay_journal(osb, node_num, slot_num); 1708 if (status < 0) { 1709 if (status == -EBUSY) { 1710 trace_ocfs2_recover_node_skip(slot_num, node_num); 1711 status = 0; 1712 goto done; 1713 } 1714 mlog_errno(status); 1715 goto done; 1716 } 1717 1718 /* Stamp a clean local alloc file AFTER recovering the journal... */ 1719 status = ocfs2_begin_local_alloc_recovery(osb, slot_num, &la_copy); 1720 if (status < 0) { 1721 mlog_errno(status); 1722 goto done; 1723 } 1724 1725 /* An error from begin_truncate_log_recovery is not 1726 * serious enough to warrant halting the rest of 1727 * recovery. */ 1728 status = ocfs2_begin_truncate_log_recovery(osb, slot_num, &tl_copy); 1729 if (status < 0) 1730 mlog_errno(status); 1731 1732 /* Likewise, this would be a strange but ultimately not so 1733 * harmful place to get an error... */ 1734 status = ocfs2_clear_slot(osb, slot_num); 1735 if (status < 0) 1736 mlog_errno(status); 1737 1738 /* This will kfree the memory pointed to by la_copy and tl_copy */ 1739 ocfs2_queue_recovery_completion(osb->journal, slot_num, la_copy, 1740 tl_copy, NULL, ORPHAN_NEED_TRUNCATE); 1741 1742 status = 0; 1743 done: 1744 1745 return status; 1746 } 1747 1748 /* Test node liveness by trylocking his journal. If we get the lock, 1749 * we drop it here. Return 0 if we got the lock, -EAGAIN if node is 1750 * still alive (we couldn't get the lock) and < 0 on error. */ 1751 static int ocfs2_trylock_journal(struct ocfs2_super *osb, 1752 int slot_num) 1753 { 1754 int status, flags; 1755 struct inode *inode = NULL; 1756 1757 inode = ocfs2_get_system_file_inode(osb, JOURNAL_SYSTEM_INODE, 1758 slot_num); 1759 if (inode == NULL) { 1760 mlog(ML_ERROR, "access error\n"); 1761 status = -EACCES; 1762 goto bail; 1763 } 1764 if (is_bad_inode(inode)) { 1765 mlog(ML_ERROR, "access error (bad inode)\n"); 1766 iput(inode); 1767 inode = NULL; 1768 status = -EACCES; 1769 goto bail; 1770 } 1771 SET_INODE_JOURNAL(inode); 1772 1773 flags = OCFS2_META_LOCK_RECOVERY | OCFS2_META_LOCK_NOQUEUE; 1774 status = ocfs2_inode_lock_full(inode, NULL, 1, flags); 1775 if (status < 0) { 1776 if (status != -EAGAIN) 1777 mlog_errno(status); 1778 goto bail; 1779 } 1780 1781 ocfs2_inode_unlock(inode, 1); 1782 bail: 1783 if (inode) 1784 iput(inode); 1785 1786 return status; 1787 } 1788 1789 /* Call this underneath ocfs2_super_lock. It also assumes that the 1790 * slot info struct has been updated from disk. */ 1791 int ocfs2_mark_dead_nodes(struct ocfs2_super *osb) 1792 { 1793 unsigned int node_num; 1794 int status, i; 1795 u32 gen; 1796 struct buffer_head *bh = NULL; 1797 struct ocfs2_dinode *di; 1798 1799 /* This is called with the super block cluster lock, so we 1800 * know that the slot map can't change underneath us. */ 1801 1802 for (i = 0; i < osb->max_slots; i++) { 1803 /* Read journal inode to get the recovery generation */ 1804 status = ocfs2_read_journal_inode(osb, i, &bh, NULL); 1805 if (status) { 1806 mlog_errno(status); 1807 goto bail; 1808 } 1809 di = (struct ocfs2_dinode *)bh->b_data; 1810 gen = ocfs2_get_recovery_generation(di); 1811 brelse(bh); 1812 bh = NULL; 1813 1814 spin_lock(&osb->osb_lock); 1815 osb->slot_recovery_generations[i] = gen; 1816 1817 trace_ocfs2_mark_dead_nodes(i, 1818 osb->slot_recovery_generations[i]); 1819 1820 if (i == osb->slot_num) { 1821 spin_unlock(&osb->osb_lock); 1822 continue; 1823 } 1824 1825 status = ocfs2_slot_to_node_num_locked(osb, i, &node_num); 1826 if (status == -ENOENT) { 1827 spin_unlock(&osb->osb_lock); 1828 continue; 1829 } 1830 1831 if (__ocfs2_recovery_map_test(osb, node_num)) { 1832 spin_unlock(&osb->osb_lock); 1833 continue; 1834 } 1835 spin_unlock(&osb->osb_lock); 1836 1837 /* Ok, we have a slot occupied by another node which 1838 * is not in the recovery map. We trylock his journal 1839 * file here to test if he's alive. */ 1840 status = ocfs2_trylock_journal(osb, i); 1841 if (!status) { 1842 /* Since we're called from mount, we know that 1843 * the recovery thread can't race us on 1844 * setting / checking the recovery bits. */ 1845 ocfs2_recovery_thread(osb, node_num); 1846 } else if ((status < 0) && (status != -EAGAIN)) { 1847 mlog_errno(status); 1848 goto bail; 1849 } 1850 } 1851 1852 status = 0; 1853 bail: 1854 return status; 1855 } 1856 1857 /* 1858 * Scan timer should get fired every ORPHAN_SCAN_SCHEDULE_TIMEOUT. Add some 1859 * randomness to the timeout to minimize multple nodes firing the timer at the 1860 * same time. 1861 */ 1862 static inline unsigned long ocfs2_orphan_scan_timeout(void) 1863 { 1864 unsigned long time; 1865 1866 get_random_bytes(&time, sizeof(time)); 1867 time = ORPHAN_SCAN_SCHEDULE_TIMEOUT + (time % 5000); 1868 return msecs_to_jiffies(time); 1869 } 1870 1871 /* 1872 * ocfs2_queue_orphan_scan calls ocfs2_queue_recovery_completion for 1873 * every slot, queuing a recovery of the slot on the ocfs2_wq thread. This 1874 * is done to catch any orphans that are left over in orphan directories. 1875 * 1876 * It scans all slots, even ones that are in use. It does so to handle the 1877 * case described below: 1878 * 1879 * Node 1 has an inode it was using. The dentry went away due to memory 1880 * pressure. Node 1 closes the inode, but it's on the free list. The node 1881 * has the open lock. 1882 * Node 2 unlinks the inode. It grabs the dentry lock to notify others, 1883 * but node 1 has no dentry and doesn't get the message. It trylocks the 1884 * open lock, sees that another node has a PR, and does nothing. 1885 * Later node 2 runs its orphan dir. It igets the inode, trylocks the 1886 * open lock, sees the PR still, and does nothing. 1887 * Basically, we have to trigger an orphan iput on node 1. The only way 1888 * for this to happen is if node 1 runs node 2's orphan dir. 1889 * 1890 * ocfs2_queue_orphan_scan gets called every ORPHAN_SCAN_SCHEDULE_TIMEOUT 1891 * seconds. It gets an EX lock on os_lockres and checks sequence number 1892 * stored in LVB. If the sequence number has changed, it means some other 1893 * node has done the scan. This node skips the scan and tracks the 1894 * sequence number. If the sequence number didn't change, it means a scan 1895 * hasn't happened. The node queues a scan and increments the 1896 * sequence number in the LVB. 1897 */ 1898 static void ocfs2_queue_orphan_scan(struct ocfs2_super *osb) 1899 { 1900 struct ocfs2_orphan_scan *os; 1901 int status, i; 1902 u32 seqno = 0; 1903 1904 os = &osb->osb_orphan_scan; 1905 1906 if (atomic_read(&os->os_state) == ORPHAN_SCAN_INACTIVE) 1907 goto out; 1908 1909 trace_ocfs2_queue_orphan_scan_begin(os->os_count, os->os_seqno, 1910 atomic_read(&os->os_state)); 1911 1912 status = ocfs2_orphan_scan_lock(osb, &seqno); 1913 if (status < 0) { 1914 if (status != -EAGAIN) 1915 mlog_errno(status); 1916 goto out; 1917 } 1918 1919 /* Do no queue the tasks if the volume is being umounted */ 1920 if (atomic_read(&os->os_state) == ORPHAN_SCAN_INACTIVE) 1921 goto unlock; 1922 1923 if (os->os_seqno != seqno) { 1924 os->os_seqno = seqno; 1925 goto unlock; 1926 } 1927 1928 for (i = 0; i < osb->max_slots; i++) 1929 ocfs2_queue_recovery_completion(osb->journal, i, NULL, NULL, 1930 NULL, ORPHAN_NO_NEED_TRUNCATE); 1931 /* 1932 * We queued a recovery on orphan slots, increment the sequence 1933 * number and update LVB so other node will skip the scan for a while 1934 */ 1935 seqno++; 1936 os->os_count++; 1937 os->os_scantime = CURRENT_TIME; 1938 unlock: 1939 ocfs2_orphan_scan_unlock(osb, seqno); 1940 out: 1941 trace_ocfs2_queue_orphan_scan_end(os->os_count, os->os_seqno, 1942 atomic_read(&os->os_state)); 1943 return; 1944 } 1945 1946 /* Worker task that gets fired every ORPHAN_SCAN_SCHEDULE_TIMEOUT millsec */ 1947 static void ocfs2_orphan_scan_work(struct work_struct *work) 1948 { 1949 struct ocfs2_orphan_scan *os; 1950 struct ocfs2_super *osb; 1951 1952 os = container_of(work, struct ocfs2_orphan_scan, 1953 os_orphan_scan_work.work); 1954 osb = os->os_osb; 1955 1956 mutex_lock(&os->os_lock); 1957 ocfs2_queue_orphan_scan(osb); 1958 if (atomic_read(&os->os_state) == ORPHAN_SCAN_ACTIVE) 1959 queue_delayed_work(ocfs2_wq, &os->os_orphan_scan_work, 1960 ocfs2_orphan_scan_timeout()); 1961 mutex_unlock(&os->os_lock); 1962 } 1963 1964 void ocfs2_orphan_scan_stop(struct ocfs2_super *osb) 1965 { 1966 struct ocfs2_orphan_scan *os; 1967 1968 os = &osb->osb_orphan_scan; 1969 if (atomic_read(&os->os_state) == ORPHAN_SCAN_ACTIVE) { 1970 atomic_set(&os->os_state, ORPHAN_SCAN_INACTIVE); 1971 mutex_lock(&os->os_lock); 1972 cancel_delayed_work(&os->os_orphan_scan_work); 1973 mutex_unlock(&os->os_lock); 1974 } 1975 } 1976 1977 void ocfs2_orphan_scan_init(struct ocfs2_super *osb) 1978 { 1979 struct ocfs2_orphan_scan *os; 1980 1981 os = &osb->osb_orphan_scan; 1982 os->os_osb = osb; 1983 os->os_count = 0; 1984 os->os_seqno = 0; 1985 mutex_init(&os->os_lock); 1986 INIT_DELAYED_WORK(&os->os_orphan_scan_work, ocfs2_orphan_scan_work); 1987 } 1988 1989 void ocfs2_orphan_scan_start(struct ocfs2_super *osb) 1990 { 1991 struct ocfs2_orphan_scan *os; 1992 1993 os = &osb->osb_orphan_scan; 1994 os->os_scantime = CURRENT_TIME; 1995 if (ocfs2_is_hard_readonly(osb) || ocfs2_mount_local(osb)) 1996 atomic_set(&os->os_state, ORPHAN_SCAN_INACTIVE); 1997 else { 1998 atomic_set(&os->os_state, ORPHAN_SCAN_ACTIVE); 1999 queue_delayed_work(ocfs2_wq, &os->os_orphan_scan_work, 2000 ocfs2_orphan_scan_timeout()); 2001 } 2002 } 2003 2004 struct ocfs2_orphan_filldir_priv { 2005 struct dir_context ctx; 2006 struct inode *head; 2007 struct ocfs2_super *osb; 2008 }; 2009 2010 static int ocfs2_orphan_filldir(struct dir_context *ctx, const char *name, 2011 int name_len, loff_t pos, u64 ino, 2012 unsigned type) 2013 { 2014 struct ocfs2_orphan_filldir_priv *p = 2015 container_of(ctx, struct ocfs2_orphan_filldir_priv, ctx); 2016 struct inode *iter; 2017 2018 if (name_len == 1 && !strncmp(".", name, 1)) 2019 return 0; 2020 if (name_len == 2 && !strncmp("..", name, 2)) 2021 return 0; 2022 2023 /* Skip bad inodes so that recovery can continue */ 2024 iter = ocfs2_iget(p->osb, ino, 2025 OCFS2_FI_FLAG_ORPHAN_RECOVERY, 0); 2026 if (IS_ERR(iter)) 2027 return 0; 2028 2029 /* Skip inodes which are already added to recover list, since dio may 2030 * happen concurrently with unlink/rename */ 2031 if (OCFS2_I(iter)->ip_next_orphan) { 2032 iput(iter); 2033 return 0; 2034 } 2035 2036 trace_ocfs2_orphan_filldir((unsigned long long)OCFS2_I(iter)->ip_blkno); 2037 /* No locking is required for the next_orphan queue as there 2038 * is only ever a single process doing orphan recovery. */ 2039 OCFS2_I(iter)->ip_next_orphan = p->head; 2040 p->head = iter; 2041 2042 return 0; 2043 } 2044 2045 static int ocfs2_queue_orphans(struct ocfs2_super *osb, 2046 int slot, 2047 struct inode **head) 2048 { 2049 int status; 2050 struct inode *orphan_dir_inode = NULL; 2051 struct ocfs2_orphan_filldir_priv priv = { 2052 .ctx.actor = ocfs2_orphan_filldir, 2053 .osb = osb, 2054 .head = *head 2055 }; 2056 2057 orphan_dir_inode = ocfs2_get_system_file_inode(osb, 2058 ORPHAN_DIR_SYSTEM_INODE, 2059 slot); 2060 if (!orphan_dir_inode) { 2061 status = -ENOENT; 2062 mlog_errno(status); 2063 return status; 2064 } 2065 2066 mutex_lock(&orphan_dir_inode->i_mutex); 2067 status = ocfs2_inode_lock(orphan_dir_inode, NULL, 0); 2068 if (status < 0) { 2069 mlog_errno(status); 2070 goto out; 2071 } 2072 2073 status = ocfs2_dir_foreach(orphan_dir_inode, &priv.ctx); 2074 if (status) { 2075 mlog_errno(status); 2076 goto out_cluster; 2077 } 2078 2079 *head = priv.head; 2080 2081 out_cluster: 2082 ocfs2_inode_unlock(orphan_dir_inode, 0); 2083 out: 2084 mutex_unlock(&orphan_dir_inode->i_mutex); 2085 iput(orphan_dir_inode); 2086 return status; 2087 } 2088 2089 static int ocfs2_orphan_recovery_can_continue(struct ocfs2_super *osb, 2090 int slot) 2091 { 2092 int ret; 2093 2094 spin_lock(&osb->osb_lock); 2095 ret = !osb->osb_orphan_wipes[slot]; 2096 spin_unlock(&osb->osb_lock); 2097 return ret; 2098 } 2099 2100 static void ocfs2_mark_recovering_orphan_dir(struct ocfs2_super *osb, 2101 int slot) 2102 { 2103 spin_lock(&osb->osb_lock); 2104 /* Mark ourselves such that new processes in delete_inode() 2105 * know to quit early. */ 2106 ocfs2_node_map_set_bit(osb, &osb->osb_recovering_orphan_dirs, slot); 2107 while (osb->osb_orphan_wipes[slot]) { 2108 /* If any processes are already in the middle of an 2109 * orphan wipe on this dir, then we need to wait for 2110 * them. */ 2111 spin_unlock(&osb->osb_lock); 2112 wait_event_interruptible(osb->osb_wipe_event, 2113 ocfs2_orphan_recovery_can_continue(osb, slot)); 2114 spin_lock(&osb->osb_lock); 2115 } 2116 spin_unlock(&osb->osb_lock); 2117 } 2118 2119 static void ocfs2_clear_recovering_orphan_dir(struct ocfs2_super *osb, 2120 int slot) 2121 { 2122 ocfs2_node_map_clear_bit(osb, &osb->osb_recovering_orphan_dirs, slot); 2123 } 2124 2125 /* 2126 * Orphan recovery. Each mounted node has it's own orphan dir which we 2127 * must run during recovery. Our strategy here is to build a list of 2128 * the inodes in the orphan dir and iget/iput them. The VFS does 2129 * (most) of the rest of the work. 2130 * 2131 * Orphan recovery can happen at any time, not just mount so we have a 2132 * couple of extra considerations. 2133 * 2134 * - We grab as many inodes as we can under the orphan dir lock - 2135 * doing iget() outside the orphan dir risks getting a reference on 2136 * an invalid inode. 2137 * - We must be sure not to deadlock with other processes on the 2138 * system wanting to run delete_inode(). This can happen when they go 2139 * to lock the orphan dir and the orphan recovery process attempts to 2140 * iget() inside the orphan dir lock. This can be avoided by 2141 * advertising our state to ocfs2_delete_inode(). 2142 */ 2143 static int ocfs2_recover_orphans(struct ocfs2_super *osb, 2144 int slot, 2145 enum ocfs2_orphan_reco_type orphan_reco_type) 2146 { 2147 int ret = 0; 2148 struct inode *inode = NULL; 2149 struct inode *iter; 2150 struct ocfs2_inode_info *oi; 2151 struct buffer_head *di_bh = NULL; 2152 struct ocfs2_dinode *di = NULL; 2153 2154 trace_ocfs2_recover_orphans(slot); 2155 2156 ocfs2_mark_recovering_orphan_dir(osb, slot); 2157 ret = ocfs2_queue_orphans(osb, slot, &inode); 2158 ocfs2_clear_recovering_orphan_dir(osb, slot); 2159 2160 /* Error here should be noted, but we want to continue with as 2161 * many queued inodes as we've got. */ 2162 if (ret) 2163 mlog_errno(ret); 2164 2165 while (inode) { 2166 oi = OCFS2_I(inode); 2167 trace_ocfs2_recover_orphans_iput( 2168 (unsigned long long)oi->ip_blkno); 2169 2170 iter = oi->ip_next_orphan; 2171 oi->ip_next_orphan = NULL; 2172 2173 ret = ocfs2_rw_lock(inode, 1); 2174 if (ret < 0) { 2175 mlog_errno(ret); 2176 goto next; 2177 } 2178 /* 2179 * We need to take and drop the inode lock to 2180 * force read inode from disk. 2181 */ 2182 ret = ocfs2_inode_lock(inode, &di_bh, 1); 2183 if (ret) { 2184 mlog_errno(ret); 2185 goto unlock_rw; 2186 } 2187 2188 di = (struct ocfs2_dinode *)di_bh->b_data; 2189 2190 if (inode->i_nlink == 0) { 2191 spin_lock(&oi->ip_lock); 2192 /* Set the proper information to get us going into 2193 * ocfs2_delete_inode. */ 2194 oi->ip_flags |= OCFS2_INODE_MAYBE_ORPHANED; 2195 spin_unlock(&oi->ip_lock); 2196 } else if ((orphan_reco_type == ORPHAN_NEED_TRUNCATE) && 2197 (di->i_flags & cpu_to_le32(OCFS2_DIO_ORPHANED_FL))) { 2198 ret = ocfs2_truncate_file(inode, di_bh, 2199 i_size_read(inode)); 2200 if (ret < 0) { 2201 if (ret != -ENOSPC) 2202 mlog_errno(ret); 2203 goto unlock_inode; 2204 } 2205 2206 ret = ocfs2_del_inode_from_orphan(osb, inode, di_bh, 0, 0); 2207 if (ret) 2208 mlog_errno(ret); 2209 2210 wake_up(&OCFS2_I(inode)->append_dio_wq); 2211 } /* else if ORPHAN_NO_NEED_TRUNCATE, do nothing */ 2212 unlock_inode: 2213 ocfs2_inode_unlock(inode, 1); 2214 unlock_rw: 2215 ocfs2_rw_unlock(inode, 1); 2216 next: 2217 iput(inode); 2218 brelse(di_bh); 2219 di_bh = NULL; 2220 inode = iter; 2221 } 2222 2223 return ret; 2224 } 2225 2226 static int __ocfs2_wait_on_mount(struct ocfs2_super *osb, int quota) 2227 { 2228 /* This check is good because ocfs2 will wait on our recovery 2229 * thread before changing it to something other than MOUNTED 2230 * or DISABLED. */ 2231 wait_event(osb->osb_mount_event, 2232 (!quota && atomic_read(&osb->vol_state) == VOLUME_MOUNTED) || 2233 atomic_read(&osb->vol_state) == VOLUME_MOUNTED_QUOTAS || 2234 atomic_read(&osb->vol_state) == VOLUME_DISABLED); 2235 2236 /* If there's an error on mount, then we may never get to the 2237 * MOUNTED flag, but this is set right before 2238 * dismount_volume() so we can trust it. */ 2239 if (atomic_read(&osb->vol_state) == VOLUME_DISABLED) { 2240 trace_ocfs2_wait_on_mount(VOLUME_DISABLED); 2241 mlog(0, "mount error, exiting!\n"); 2242 return -EBUSY; 2243 } 2244 2245 return 0; 2246 } 2247 2248 static int ocfs2_commit_thread(void *arg) 2249 { 2250 int status; 2251 struct ocfs2_super *osb = arg; 2252 struct ocfs2_journal *journal = osb->journal; 2253 2254 /* we can trust j_num_trans here because _should_stop() is only set in 2255 * shutdown and nobody other than ourselves should be able to start 2256 * transactions. committing on shutdown might take a few iterations 2257 * as final transactions put deleted inodes on the list */ 2258 while (!(kthread_should_stop() && 2259 atomic_read(&journal->j_num_trans) == 0)) { 2260 2261 wait_event_interruptible(osb->checkpoint_event, 2262 atomic_read(&journal->j_num_trans) 2263 || kthread_should_stop()); 2264 2265 status = ocfs2_commit_cache(osb); 2266 if (status < 0) { 2267 static unsigned long abort_warn_time; 2268 2269 /* Warn about this once per minute */ 2270 if (printk_timed_ratelimit(&abort_warn_time, 60*HZ)) 2271 mlog(ML_ERROR, "status = %d, journal is " 2272 "already aborted.\n", status); 2273 /* 2274 * After ocfs2_commit_cache() fails, j_num_trans has a 2275 * non-zero value. Sleep here to avoid a busy-wait 2276 * loop. 2277 */ 2278 msleep_interruptible(1000); 2279 } 2280 2281 if (kthread_should_stop() && atomic_read(&journal->j_num_trans)){ 2282 mlog(ML_KTHREAD, 2283 "commit_thread: %u transactions pending on " 2284 "shutdown\n", 2285 atomic_read(&journal->j_num_trans)); 2286 } 2287 } 2288 2289 return 0; 2290 } 2291 2292 /* Reads all the journal inodes without taking any cluster locks. Used 2293 * for hard readonly access to determine whether any journal requires 2294 * recovery. Also used to refresh the recovery generation numbers after 2295 * a journal has been recovered by another node. 2296 */ 2297 int ocfs2_check_journals_nolocks(struct ocfs2_super *osb) 2298 { 2299 int ret = 0; 2300 unsigned int slot; 2301 struct buffer_head *di_bh = NULL; 2302 struct ocfs2_dinode *di; 2303 int journal_dirty = 0; 2304 2305 for(slot = 0; slot < osb->max_slots; slot++) { 2306 ret = ocfs2_read_journal_inode(osb, slot, &di_bh, NULL); 2307 if (ret) { 2308 mlog_errno(ret); 2309 goto out; 2310 } 2311 2312 di = (struct ocfs2_dinode *) di_bh->b_data; 2313 2314 osb->slot_recovery_generations[slot] = 2315 ocfs2_get_recovery_generation(di); 2316 2317 if (le32_to_cpu(di->id1.journal1.ij_flags) & 2318 OCFS2_JOURNAL_DIRTY_FL) 2319 journal_dirty = 1; 2320 2321 brelse(di_bh); 2322 di_bh = NULL; 2323 } 2324 2325 out: 2326 if (journal_dirty) 2327 ret = -EROFS; 2328 return ret; 2329 } 2330