1 /* -*- mode: c; c-basic-offset: 8; -*- 2 * vim: noexpandtab sw=8 ts=8 sts=0: 3 * 4 * journal.c 5 * 6 * Defines functions of journalling api 7 * 8 * Copyright (C) 2003, 2004 Oracle. All rights reserved. 9 * 10 * This program is free software; you can redistribute it and/or 11 * modify it under the terms of the GNU General Public 12 * License as published by the Free Software Foundation; either 13 * version 2 of the License, or (at your option) any later version. 14 * 15 * This program is distributed in the hope that it will be useful, 16 * but WITHOUT ANY WARRANTY; without even the implied warranty of 17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 18 * General Public License for more details. 19 * 20 * You should have received a copy of the GNU General Public 21 * License along with this program; if not, write to the 22 * Free Software Foundation, Inc., 59 Temple Place - Suite 330, 23 * Boston, MA 021110-1307, USA. 24 */ 25 26 #include <linux/fs.h> 27 #include <linux/types.h> 28 #include <linux/slab.h> 29 #include <linux/highmem.h> 30 #include <linux/kthread.h> 31 #include <linux/time.h> 32 #include <linux/random.h> 33 34 #define MLOG_MASK_PREFIX ML_JOURNAL 35 #include <cluster/masklog.h> 36 37 #include "ocfs2.h" 38 39 #include "alloc.h" 40 #include "blockcheck.h" 41 #include "dir.h" 42 #include "dlmglue.h" 43 #include "extent_map.h" 44 #include "heartbeat.h" 45 #include "inode.h" 46 #include "journal.h" 47 #include "localalloc.h" 48 #include "slot_map.h" 49 #include "super.h" 50 #include "sysfile.h" 51 #include "uptodate.h" 52 #include "quota.h" 53 54 #include "buffer_head_io.h" 55 56 DEFINE_SPINLOCK(trans_inc_lock); 57 58 #define ORPHAN_SCAN_SCHEDULE_TIMEOUT 300000 59 60 static int ocfs2_force_read_journal(struct inode *inode); 61 static int ocfs2_recover_node(struct ocfs2_super *osb, 62 int node_num, int slot_num); 63 static int __ocfs2_recovery_thread(void *arg); 64 static int ocfs2_commit_cache(struct ocfs2_super *osb); 65 static int __ocfs2_wait_on_mount(struct ocfs2_super *osb, int quota); 66 static int ocfs2_journal_toggle_dirty(struct ocfs2_super *osb, 67 int dirty, int replayed); 68 static int ocfs2_trylock_journal(struct ocfs2_super *osb, 69 int slot_num); 70 static int ocfs2_recover_orphans(struct ocfs2_super *osb, 71 int slot); 72 static int ocfs2_commit_thread(void *arg); 73 static void ocfs2_queue_recovery_completion(struct ocfs2_journal *journal, 74 int slot_num, 75 struct ocfs2_dinode *la_dinode, 76 struct ocfs2_dinode *tl_dinode, 77 struct ocfs2_quota_recovery *qrec); 78 79 static inline int ocfs2_wait_on_mount(struct ocfs2_super *osb) 80 { 81 return __ocfs2_wait_on_mount(osb, 0); 82 } 83 84 static inline int ocfs2_wait_on_quotas(struct ocfs2_super *osb) 85 { 86 return __ocfs2_wait_on_mount(osb, 1); 87 } 88 89 /* 90 * This replay_map is to track online/offline slots, so we could recover 91 * offline slots during recovery and mount 92 */ 93 94 enum ocfs2_replay_state { 95 REPLAY_UNNEEDED = 0, /* Replay is not needed, so ignore this map */ 96 REPLAY_NEEDED, /* Replay slots marked in rm_replay_slots */ 97 REPLAY_DONE /* Replay was already queued */ 98 }; 99 100 struct ocfs2_replay_map { 101 unsigned int rm_slots; 102 enum ocfs2_replay_state rm_state; 103 unsigned char rm_replay_slots[0]; 104 }; 105 106 void ocfs2_replay_map_set_state(struct ocfs2_super *osb, int state) 107 { 108 if (!osb->replay_map) 109 return; 110 111 /* If we've already queued the replay, we don't have any more to do */ 112 if (osb->replay_map->rm_state == REPLAY_DONE) 113 return; 114 115 osb->replay_map->rm_state = state; 116 } 117 118 int ocfs2_compute_replay_slots(struct ocfs2_super *osb) 119 { 120 struct ocfs2_replay_map *replay_map; 121 int i, node_num; 122 123 /* If replay map is already set, we don't do it again */ 124 if (osb->replay_map) 125 return 0; 126 127 replay_map = kzalloc(sizeof(struct ocfs2_replay_map) + 128 (osb->max_slots * sizeof(char)), GFP_KERNEL); 129 130 if (!replay_map) { 131 mlog_errno(-ENOMEM); 132 return -ENOMEM; 133 } 134 135 spin_lock(&osb->osb_lock); 136 137 replay_map->rm_slots = osb->max_slots; 138 replay_map->rm_state = REPLAY_UNNEEDED; 139 140 /* set rm_replay_slots for offline slot(s) */ 141 for (i = 0; i < replay_map->rm_slots; i++) { 142 if (ocfs2_slot_to_node_num_locked(osb, i, &node_num) == -ENOENT) 143 replay_map->rm_replay_slots[i] = 1; 144 } 145 146 osb->replay_map = replay_map; 147 spin_unlock(&osb->osb_lock); 148 return 0; 149 } 150 151 void ocfs2_queue_replay_slots(struct ocfs2_super *osb) 152 { 153 struct ocfs2_replay_map *replay_map = osb->replay_map; 154 int i; 155 156 if (!replay_map) 157 return; 158 159 if (replay_map->rm_state != REPLAY_NEEDED) 160 return; 161 162 for (i = 0; i < replay_map->rm_slots; i++) 163 if (replay_map->rm_replay_slots[i]) 164 ocfs2_queue_recovery_completion(osb->journal, i, NULL, 165 NULL, NULL); 166 replay_map->rm_state = REPLAY_DONE; 167 } 168 169 void ocfs2_free_replay_slots(struct ocfs2_super *osb) 170 { 171 struct ocfs2_replay_map *replay_map = osb->replay_map; 172 173 if (!osb->replay_map) 174 return; 175 176 kfree(replay_map); 177 osb->replay_map = NULL; 178 } 179 180 int ocfs2_recovery_init(struct ocfs2_super *osb) 181 { 182 struct ocfs2_recovery_map *rm; 183 184 mutex_init(&osb->recovery_lock); 185 osb->disable_recovery = 0; 186 osb->recovery_thread_task = NULL; 187 init_waitqueue_head(&osb->recovery_event); 188 189 rm = kzalloc(sizeof(struct ocfs2_recovery_map) + 190 osb->max_slots * sizeof(unsigned int), 191 GFP_KERNEL); 192 if (!rm) { 193 mlog_errno(-ENOMEM); 194 return -ENOMEM; 195 } 196 197 rm->rm_entries = (unsigned int *)((char *)rm + 198 sizeof(struct ocfs2_recovery_map)); 199 osb->recovery_map = rm; 200 201 return 0; 202 } 203 204 /* we can't grab the goofy sem lock from inside wait_event, so we use 205 * memory barriers to make sure that we'll see the null task before 206 * being woken up */ 207 static int ocfs2_recovery_thread_running(struct ocfs2_super *osb) 208 { 209 mb(); 210 return osb->recovery_thread_task != NULL; 211 } 212 213 void ocfs2_recovery_exit(struct ocfs2_super *osb) 214 { 215 struct ocfs2_recovery_map *rm; 216 217 /* disable any new recovery threads and wait for any currently 218 * running ones to exit. Do this before setting the vol_state. */ 219 mutex_lock(&osb->recovery_lock); 220 osb->disable_recovery = 1; 221 mutex_unlock(&osb->recovery_lock); 222 wait_event(osb->recovery_event, !ocfs2_recovery_thread_running(osb)); 223 224 /* At this point, we know that no more recovery threads can be 225 * launched, so wait for any recovery completion work to 226 * complete. */ 227 flush_workqueue(ocfs2_wq); 228 229 /* 230 * Now that recovery is shut down, and the osb is about to be 231 * freed, the osb_lock is not taken here. 232 */ 233 rm = osb->recovery_map; 234 /* XXX: Should we bug if there are dirty entries? */ 235 236 kfree(rm); 237 } 238 239 static int __ocfs2_recovery_map_test(struct ocfs2_super *osb, 240 unsigned int node_num) 241 { 242 int i; 243 struct ocfs2_recovery_map *rm = osb->recovery_map; 244 245 assert_spin_locked(&osb->osb_lock); 246 247 for (i = 0; i < rm->rm_used; i++) { 248 if (rm->rm_entries[i] == node_num) 249 return 1; 250 } 251 252 return 0; 253 } 254 255 /* Behaves like test-and-set. Returns the previous value */ 256 static int ocfs2_recovery_map_set(struct ocfs2_super *osb, 257 unsigned int node_num) 258 { 259 struct ocfs2_recovery_map *rm = osb->recovery_map; 260 261 spin_lock(&osb->osb_lock); 262 if (__ocfs2_recovery_map_test(osb, node_num)) { 263 spin_unlock(&osb->osb_lock); 264 return 1; 265 } 266 267 /* XXX: Can this be exploited? Not from o2dlm... */ 268 BUG_ON(rm->rm_used >= osb->max_slots); 269 270 rm->rm_entries[rm->rm_used] = node_num; 271 rm->rm_used++; 272 spin_unlock(&osb->osb_lock); 273 274 return 0; 275 } 276 277 static void ocfs2_recovery_map_clear(struct ocfs2_super *osb, 278 unsigned int node_num) 279 { 280 int i; 281 struct ocfs2_recovery_map *rm = osb->recovery_map; 282 283 spin_lock(&osb->osb_lock); 284 285 for (i = 0; i < rm->rm_used; i++) { 286 if (rm->rm_entries[i] == node_num) 287 break; 288 } 289 290 if (i < rm->rm_used) { 291 /* XXX: be careful with the pointer math */ 292 memmove(&(rm->rm_entries[i]), &(rm->rm_entries[i + 1]), 293 (rm->rm_used - i - 1) * sizeof(unsigned int)); 294 rm->rm_used--; 295 } 296 297 spin_unlock(&osb->osb_lock); 298 } 299 300 static int ocfs2_commit_cache(struct ocfs2_super *osb) 301 { 302 int status = 0; 303 unsigned int flushed; 304 struct ocfs2_journal *journal = NULL; 305 306 mlog_entry_void(); 307 308 journal = osb->journal; 309 310 /* Flush all pending commits and checkpoint the journal. */ 311 down_write(&journal->j_trans_barrier); 312 313 if (atomic_read(&journal->j_num_trans) == 0) { 314 up_write(&journal->j_trans_barrier); 315 mlog(0, "No transactions for me to flush!\n"); 316 goto finally; 317 } 318 319 jbd2_journal_lock_updates(journal->j_journal); 320 status = jbd2_journal_flush(journal->j_journal); 321 jbd2_journal_unlock_updates(journal->j_journal); 322 if (status < 0) { 323 up_write(&journal->j_trans_barrier); 324 mlog_errno(status); 325 goto finally; 326 } 327 328 ocfs2_inc_trans_id(journal); 329 330 flushed = atomic_read(&journal->j_num_trans); 331 atomic_set(&journal->j_num_trans, 0); 332 up_write(&journal->j_trans_barrier); 333 334 mlog(0, "commit_thread: flushed transaction %lu (%u handles)\n", 335 journal->j_trans_id, flushed); 336 337 ocfs2_wake_downconvert_thread(osb); 338 wake_up(&journal->j_checkpointed); 339 finally: 340 mlog_exit(status); 341 return status; 342 } 343 344 handle_t *ocfs2_start_trans(struct ocfs2_super *osb, int max_buffs) 345 { 346 journal_t *journal = osb->journal->j_journal; 347 handle_t *handle; 348 349 BUG_ON(!osb || !osb->journal->j_journal); 350 351 if (ocfs2_is_hard_readonly(osb)) 352 return ERR_PTR(-EROFS); 353 354 BUG_ON(osb->journal->j_state == OCFS2_JOURNAL_FREE); 355 BUG_ON(max_buffs <= 0); 356 357 /* Nested transaction? Just return the handle... */ 358 if (journal_current_handle()) 359 return jbd2_journal_start(journal, max_buffs); 360 361 down_read(&osb->journal->j_trans_barrier); 362 363 handle = jbd2_journal_start(journal, max_buffs); 364 if (IS_ERR(handle)) { 365 up_read(&osb->journal->j_trans_barrier); 366 367 mlog_errno(PTR_ERR(handle)); 368 369 if (is_journal_aborted(journal)) { 370 ocfs2_abort(osb->sb, "Detected aborted journal"); 371 handle = ERR_PTR(-EROFS); 372 } 373 } else { 374 if (!ocfs2_mount_local(osb)) 375 atomic_inc(&(osb->journal->j_num_trans)); 376 } 377 378 return handle; 379 } 380 381 int ocfs2_commit_trans(struct ocfs2_super *osb, 382 handle_t *handle) 383 { 384 int ret, nested; 385 struct ocfs2_journal *journal = osb->journal; 386 387 BUG_ON(!handle); 388 389 nested = handle->h_ref > 1; 390 ret = jbd2_journal_stop(handle); 391 if (ret < 0) 392 mlog_errno(ret); 393 394 if (!nested) 395 up_read(&journal->j_trans_barrier); 396 397 return ret; 398 } 399 400 /* 401 * 'nblocks' is what you want to add to the current transaction. 402 * 403 * This might call jbd2_journal_restart() which will commit dirty buffers 404 * and then restart the transaction. Before calling 405 * ocfs2_extend_trans(), any changed blocks should have been 406 * dirtied. After calling it, all blocks which need to be changed must 407 * go through another set of journal_access/journal_dirty calls. 408 * 409 * WARNING: This will not release any semaphores or disk locks taken 410 * during the transaction, so make sure they were taken *before* 411 * start_trans or we'll have ordering deadlocks. 412 * 413 * WARNING2: Note that we do *not* drop j_trans_barrier here. This is 414 * good because transaction ids haven't yet been recorded on the 415 * cluster locks associated with this handle. 416 */ 417 int ocfs2_extend_trans(handle_t *handle, int nblocks) 418 { 419 int status, old_nblocks; 420 421 BUG_ON(!handle); 422 BUG_ON(nblocks < 0); 423 424 if (!nblocks) 425 return 0; 426 427 old_nblocks = handle->h_buffer_credits; 428 mlog_entry_void(); 429 430 mlog(0, "Trying to extend transaction by %d blocks\n", nblocks); 431 432 #ifdef CONFIG_OCFS2_DEBUG_FS 433 status = 1; 434 #else 435 status = jbd2_journal_extend(handle, nblocks); 436 if (status < 0) { 437 mlog_errno(status); 438 goto bail; 439 } 440 #endif 441 442 if (status > 0) { 443 mlog(0, 444 "jbd2_journal_extend failed, trying " 445 "jbd2_journal_restart\n"); 446 status = jbd2_journal_restart(handle, 447 old_nblocks + nblocks); 448 if (status < 0) { 449 mlog_errno(status); 450 goto bail; 451 } 452 } 453 454 status = 0; 455 bail: 456 457 mlog_exit(status); 458 return status; 459 } 460 461 struct ocfs2_triggers { 462 struct jbd2_buffer_trigger_type ot_triggers; 463 int ot_offset; 464 }; 465 466 static inline struct ocfs2_triggers *to_ocfs2_trigger(struct jbd2_buffer_trigger_type *triggers) 467 { 468 return container_of(triggers, struct ocfs2_triggers, ot_triggers); 469 } 470 471 static void ocfs2_frozen_trigger(struct jbd2_buffer_trigger_type *triggers, 472 struct buffer_head *bh, 473 void *data, size_t size) 474 { 475 struct ocfs2_triggers *ot = to_ocfs2_trigger(triggers); 476 477 /* 478 * We aren't guaranteed to have the superblock here, so we 479 * must unconditionally compute the ecc data. 480 * __ocfs2_journal_access() will only set the triggers if 481 * metaecc is enabled. 482 */ 483 ocfs2_block_check_compute(data, size, data + ot->ot_offset); 484 } 485 486 /* 487 * Quota blocks have their own trigger because the struct ocfs2_block_check 488 * offset depends on the blocksize. 489 */ 490 static void ocfs2_dq_frozen_trigger(struct jbd2_buffer_trigger_type *triggers, 491 struct buffer_head *bh, 492 void *data, size_t size) 493 { 494 struct ocfs2_disk_dqtrailer *dqt = 495 ocfs2_block_dqtrailer(size, data); 496 497 /* 498 * We aren't guaranteed to have the superblock here, so we 499 * must unconditionally compute the ecc data. 500 * __ocfs2_journal_access() will only set the triggers if 501 * metaecc is enabled. 502 */ 503 ocfs2_block_check_compute(data, size, &dqt->dq_check); 504 } 505 506 /* 507 * Directory blocks also have their own trigger because the 508 * struct ocfs2_block_check offset depends on the blocksize. 509 */ 510 static void ocfs2_db_frozen_trigger(struct jbd2_buffer_trigger_type *triggers, 511 struct buffer_head *bh, 512 void *data, size_t size) 513 { 514 struct ocfs2_dir_block_trailer *trailer = 515 ocfs2_dir_trailer_from_size(size, data); 516 517 /* 518 * We aren't guaranteed to have the superblock here, so we 519 * must unconditionally compute the ecc data. 520 * __ocfs2_journal_access() will only set the triggers if 521 * metaecc is enabled. 522 */ 523 ocfs2_block_check_compute(data, size, &trailer->db_check); 524 } 525 526 static void ocfs2_abort_trigger(struct jbd2_buffer_trigger_type *triggers, 527 struct buffer_head *bh) 528 { 529 mlog(ML_ERROR, 530 "ocfs2_abort_trigger called by JBD2. bh = 0x%lx, " 531 "bh->b_blocknr = %llu\n", 532 (unsigned long)bh, 533 (unsigned long long)bh->b_blocknr); 534 535 /* We aren't guaranteed to have the superblock here - but if we 536 * don't, it'll just crash. */ 537 ocfs2_error(bh->b_assoc_map->host->i_sb, 538 "JBD2 has aborted our journal, ocfs2 cannot continue\n"); 539 } 540 541 static struct ocfs2_triggers di_triggers = { 542 .ot_triggers = { 543 .t_frozen = ocfs2_frozen_trigger, 544 .t_abort = ocfs2_abort_trigger, 545 }, 546 .ot_offset = offsetof(struct ocfs2_dinode, i_check), 547 }; 548 549 static struct ocfs2_triggers eb_triggers = { 550 .ot_triggers = { 551 .t_frozen = ocfs2_frozen_trigger, 552 .t_abort = ocfs2_abort_trigger, 553 }, 554 .ot_offset = offsetof(struct ocfs2_extent_block, h_check), 555 }; 556 557 static struct ocfs2_triggers rb_triggers = { 558 .ot_triggers = { 559 .t_frozen = ocfs2_frozen_trigger, 560 .t_abort = ocfs2_abort_trigger, 561 }, 562 .ot_offset = offsetof(struct ocfs2_refcount_block, rf_check), 563 }; 564 565 static struct ocfs2_triggers gd_triggers = { 566 .ot_triggers = { 567 .t_frozen = ocfs2_frozen_trigger, 568 .t_abort = ocfs2_abort_trigger, 569 }, 570 .ot_offset = offsetof(struct ocfs2_group_desc, bg_check), 571 }; 572 573 static struct ocfs2_triggers db_triggers = { 574 .ot_triggers = { 575 .t_frozen = ocfs2_db_frozen_trigger, 576 .t_abort = ocfs2_abort_trigger, 577 }, 578 }; 579 580 static struct ocfs2_triggers xb_triggers = { 581 .ot_triggers = { 582 .t_frozen = ocfs2_frozen_trigger, 583 .t_abort = ocfs2_abort_trigger, 584 }, 585 .ot_offset = offsetof(struct ocfs2_xattr_block, xb_check), 586 }; 587 588 static struct ocfs2_triggers dq_triggers = { 589 .ot_triggers = { 590 .t_frozen = ocfs2_dq_frozen_trigger, 591 .t_abort = ocfs2_abort_trigger, 592 }, 593 }; 594 595 static struct ocfs2_triggers dr_triggers = { 596 .ot_triggers = { 597 .t_frozen = ocfs2_frozen_trigger, 598 .t_abort = ocfs2_abort_trigger, 599 }, 600 .ot_offset = offsetof(struct ocfs2_dx_root_block, dr_check), 601 }; 602 603 static struct ocfs2_triggers dl_triggers = { 604 .ot_triggers = { 605 .t_frozen = ocfs2_frozen_trigger, 606 .t_abort = ocfs2_abort_trigger, 607 }, 608 .ot_offset = offsetof(struct ocfs2_dx_leaf, dl_check), 609 }; 610 611 static int __ocfs2_journal_access(handle_t *handle, 612 struct ocfs2_caching_info *ci, 613 struct buffer_head *bh, 614 struct ocfs2_triggers *triggers, 615 int type) 616 { 617 int status; 618 struct ocfs2_super *osb = 619 OCFS2_SB(ocfs2_metadata_cache_get_super(ci)); 620 621 BUG_ON(!ci || !ci->ci_ops); 622 BUG_ON(!handle); 623 BUG_ON(!bh); 624 625 mlog_entry("bh->b_blocknr=%llu, type=%d (\"%s\"), bh->b_size = %zu\n", 626 (unsigned long long)bh->b_blocknr, type, 627 (type == OCFS2_JOURNAL_ACCESS_CREATE) ? 628 "OCFS2_JOURNAL_ACCESS_CREATE" : 629 "OCFS2_JOURNAL_ACCESS_WRITE", 630 bh->b_size); 631 632 /* we can safely remove this assertion after testing. */ 633 if (!buffer_uptodate(bh)) { 634 mlog(ML_ERROR, "giving me a buffer that's not uptodate!\n"); 635 mlog(ML_ERROR, "b_blocknr=%llu\n", 636 (unsigned long long)bh->b_blocknr); 637 BUG(); 638 } 639 640 /* Set the current transaction information on the ci so 641 * that the locking code knows whether it can drop it's locks 642 * on this ci or not. We're protected from the commit 643 * thread updating the current transaction id until 644 * ocfs2_commit_trans() because ocfs2_start_trans() took 645 * j_trans_barrier for us. */ 646 ocfs2_set_ci_lock_trans(osb->journal, ci); 647 648 ocfs2_metadata_cache_io_lock(ci); 649 switch (type) { 650 case OCFS2_JOURNAL_ACCESS_CREATE: 651 case OCFS2_JOURNAL_ACCESS_WRITE: 652 status = jbd2_journal_get_write_access(handle, bh); 653 break; 654 655 case OCFS2_JOURNAL_ACCESS_UNDO: 656 status = jbd2_journal_get_undo_access(handle, bh); 657 break; 658 659 default: 660 status = -EINVAL; 661 mlog(ML_ERROR, "Unknown access type!\n"); 662 } 663 if (!status && ocfs2_meta_ecc(osb) && triggers) 664 jbd2_journal_set_triggers(bh, &triggers->ot_triggers); 665 ocfs2_metadata_cache_io_unlock(ci); 666 667 if (status < 0) 668 mlog(ML_ERROR, "Error %d getting %d access to buffer!\n", 669 status, type); 670 671 mlog_exit(status); 672 return status; 673 } 674 675 int ocfs2_journal_access_di(handle_t *handle, struct ocfs2_caching_info *ci, 676 struct buffer_head *bh, int type) 677 { 678 return __ocfs2_journal_access(handle, ci, bh, &di_triggers, type); 679 } 680 681 int ocfs2_journal_access_eb(handle_t *handle, struct ocfs2_caching_info *ci, 682 struct buffer_head *bh, int type) 683 { 684 return __ocfs2_journal_access(handle, ci, bh, &eb_triggers, type); 685 } 686 687 int ocfs2_journal_access_rb(handle_t *handle, struct ocfs2_caching_info *ci, 688 struct buffer_head *bh, int type) 689 { 690 return __ocfs2_journal_access(handle, ci, bh, &rb_triggers, 691 type); 692 } 693 694 int ocfs2_journal_access_gd(handle_t *handle, struct ocfs2_caching_info *ci, 695 struct buffer_head *bh, int type) 696 { 697 return __ocfs2_journal_access(handle, ci, bh, &gd_triggers, type); 698 } 699 700 int ocfs2_journal_access_db(handle_t *handle, struct ocfs2_caching_info *ci, 701 struct buffer_head *bh, int type) 702 { 703 return __ocfs2_journal_access(handle, ci, bh, &db_triggers, type); 704 } 705 706 int ocfs2_journal_access_xb(handle_t *handle, struct ocfs2_caching_info *ci, 707 struct buffer_head *bh, int type) 708 { 709 return __ocfs2_journal_access(handle, ci, bh, &xb_triggers, type); 710 } 711 712 int ocfs2_journal_access_dq(handle_t *handle, struct ocfs2_caching_info *ci, 713 struct buffer_head *bh, int type) 714 { 715 return __ocfs2_journal_access(handle, ci, bh, &dq_triggers, type); 716 } 717 718 int ocfs2_journal_access_dr(handle_t *handle, struct ocfs2_caching_info *ci, 719 struct buffer_head *bh, int type) 720 { 721 return __ocfs2_journal_access(handle, ci, bh, &dr_triggers, type); 722 } 723 724 int ocfs2_journal_access_dl(handle_t *handle, struct ocfs2_caching_info *ci, 725 struct buffer_head *bh, int type) 726 { 727 return __ocfs2_journal_access(handle, ci, bh, &dl_triggers, type); 728 } 729 730 int ocfs2_journal_access(handle_t *handle, struct ocfs2_caching_info *ci, 731 struct buffer_head *bh, int type) 732 { 733 return __ocfs2_journal_access(handle, ci, bh, NULL, type); 734 } 735 736 void ocfs2_journal_dirty(handle_t *handle, struct buffer_head *bh) 737 { 738 int status; 739 740 mlog_entry("(bh->b_blocknr=%llu)\n", 741 (unsigned long long)bh->b_blocknr); 742 743 status = jbd2_journal_dirty_metadata(handle, bh); 744 BUG_ON(status); 745 746 mlog_exit_void(); 747 } 748 749 #define OCFS2_DEFAULT_COMMIT_INTERVAL (HZ * JBD2_DEFAULT_MAX_COMMIT_AGE) 750 751 void ocfs2_set_journal_params(struct ocfs2_super *osb) 752 { 753 journal_t *journal = osb->journal->j_journal; 754 unsigned long commit_interval = OCFS2_DEFAULT_COMMIT_INTERVAL; 755 756 if (osb->osb_commit_interval) 757 commit_interval = osb->osb_commit_interval; 758 759 write_lock(&journal->j_state_lock); 760 journal->j_commit_interval = commit_interval; 761 if (osb->s_mount_opt & OCFS2_MOUNT_BARRIER) 762 journal->j_flags |= JBD2_BARRIER; 763 else 764 journal->j_flags &= ~JBD2_BARRIER; 765 write_unlock(&journal->j_state_lock); 766 } 767 768 int ocfs2_journal_init(struct ocfs2_journal *journal, int *dirty) 769 { 770 int status = -1; 771 struct inode *inode = NULL; /* the journal inode */ 772 journal_t *j_journal = NULL; 773 struct ocfs2_dinode *di = NULL; 774 struct buffer_head *bh = NULL; 775 struct ocfs2_super *osb; 776 int inode_lock = 0; 777 778 mlog_entry_void(); 779 780 BUG_ON(!journal); 781 782 osb = journal->j_osb; 783 784 /* already have the inode for our journal */ 785 inode = ocfs2_get_system_file_inode(osb, JOURNAL_SYSTEM_INODE, 786 osb->slot_num); 787 if (inode == NULL) { 788 status = -EACCES; 789 mlog_errno(status); 790 goto done; 791 } 792 if (is_bad_inode(inode)) { 793 mlog(ML_ERROR, "access error (bad inode)\n"); 794 iput(inode); 795 inode = NULL; 796 status = -EACCES; 797 goto done; 798 } 799 800 SET_INODE_JOURNAL(inode); 801 OCFS2_I(inode)->ip_open_count++; 802 803 /* Skip recovery waits here - journal inode metadata never 804 * changes in a live cluster so it can be considered an 805 * exception to the rule. */ 806 status = ocfs2_inode_lock_full(inode, &bh, 1, OCFS2_META_LOCK_RECOVERY); 807 if (status < 0) { 808 if (status != -ERESTARTSYS) 809 mlog(ML_ERROR, "Could not get lock on journal!\n"); 810 goto done; 811 } 812 813 inode_lock = 1; 814 di = (struct ocfs2_dinode *)bh->b_data; 815 816 if (inode->i_size < OCFS2_MIN_JOURNAL_SIZE) { 817 mlog(ML_ERROR, "Journal file size (%lld) is too small!\n", 818 inode->i_size); 819 status = -EINVAL; 820 goto done; 821 } 822 823 mlog(0, "inode->i_size = %lld\n", inode->i_size); 824 mlog(0, "inode->i_blocks = %llu\n", 825 (unsigned long long)inode->i_blocks); 826 mlog(0, "inode->ip_clusters = %u\n", OCFS2_I(inode)->ip_clusters); 827 828 /* call the kernels journal init function now */ 829 j_journal = jbd2_journal_init_inode(inode); 830 if (j_journal == NULL) { 831 mlog(ML_ERROR, "Linux journal layer error\n"); 832 status = -EINVAL; 833 goto done; 834 } 835 836 mlog(0, "Returned from jbd2_journal_init_inode\n"); 837 mlog(0, "j_journal->j_maxlen = %u\n", j_journal->j_maxlen); 838 839 *dirty = (le32_to_cpu(di->id1.journal1.ij_flags) & 840 OCFS2_JOURNAL_DIRTY_FL); 841 842 journal->j_journal = j_journal; 843 journal->j_inode = inode; 844 journal->j_bh = bh; 845 846 ocfs2_set_journal_params(osb); 847 848 journal->j_state = OCFS2_JOURNAL_LOADED; 849 850 status = 0; 851 done: 852 if (status < 0) { 853 if (inode_lock) 854 ocfs2_inode_unlock(inode, 1); 855 brelse(bh); 856 if (inode) { 857 OCFS2_I(inode)->ip_open_count--; 858 iput(inode); 859 } 860 } 861 862 mlog_exit(status); 863 return status; 864 } 865 866 static void ocfs2_bump_recovery_generation(struct ocfs2_dinode *di) 867 { 868 le32_add_cpu(&(di->id1.journal1.ij_recovery_generation), 1); 869 } 870 871 static u32 ocfs2_get_recovery_generation(struct ocfs2_dinode *di) 872 { 873 return le32_to_cpu(di->id1.journal1.ij_recovery_generation); 874 } 875 876 static int ocfs2_journal_toggle_dirty(struct ocfs2_super *osb, 877 int dirty, int replayed) 878 { 879 int status; 880 unsigned int flags; 881 struct ocfs2_journal *journal = osb->journal; 882 struct buffer_head *bh = journal->j_bh; 883 struct ocfs2_dinode *fe; 884 885 mlog_entry_void(); 886 887 fe = (struct ocfs2_dinode *)bh->b_data; 888 889 /* The journal bh on the osb always comes from ocfs2_journal_init() 890 * and was validated there inside ocfs2_inode_lock_full(). It's a 891 * code bug if we mess it up. */ 892 BUG_ON(!OCFS2_IS_VALID_DINODE(fe)); 893 894 flags = le32_to_cpu(fe->id1.journal1.ij_flags); 895 if (dirty) 896 flags |= OCFS2_JOURNAL_DIRTY_FL; 897 else 898 flags &= ~OCFS2_JOURNAL_DIRTY_FL; 899 fe->id1.journal1.ij_flags = cpu_to_le32(flags); 900 901 if (replayed) 902 ocfs2_bump_recovery_generation(fe); 903 904 ocfs2_compute_meta_ecc(osb->sb, bh->b_data, &fe->i_check); 905 status = ocfs2_write_block(osb, bh, INODE_CACHE(journal->j_inode)); 906 if (status < 0) 907 mlog_errno(status); 908 909 mlog_exit(status); 910 return status; 911 } 912 913 /* 914 * If the journal has been kmalloc'd it needs to be freed after this 915 * call. 916 */ 917 void ocfs2_journal_shutdown(struct ocfs2_super *osb) 918 { 919 struct ocfs2_journal *journal = NULL; 920 int status = 0; 921 struct inode *inode = NULL; 922 int num_running_trans = 0; 923 924 mlog_entry_void(); 925 926 BUG_ON(!osb); 927 928 journal = osb->journal; 929 if (!journal) 930 goto done; 931 932 inode = journal->j_inode; 933 934 if (journal->j_state != OCFS2_JOURNAL_LOADED) 935 goto done; 936 937 /* need to inc inode use count - jbd2_journal_destroy will iput. */ 938 if (!igrab(inode)) 939 BUG(); 940 941 num_running_trans = atomic_read(&(osb->journal->j_num_trans)); 942 if (num_running_trans > 0) 943 mlog(0, "Shutting down journal: must wait on %d " 944 "running transactions!\n", 945 num_running_trans); 946 947 /* Do a commit_cache here. It will flush our journal, *and* 948 * release any locks that are still held. 949 * set the SHUTDOWN flag and release the trans lock. 950 * the commit thread will take the trans lock for us below. */ 951 journal->j_state = OCFS2_JOURNAL_IN_SHUTDOWN; 952 953 /* The OCFS2_JOURNAL_IN_SHUTDOWN will signal to commit_cache to not 954 * drop the trans_lock (which we want to hold until we 955 * completely destroy the journal. */ 956 if (osb->commit_task) { 957 /* Wait for the commit thread */ 958 mlog(0, "Waiting for ocfs2commit to exit....\n"); 959 kthread_stop(osb->commit_task); 960 osb->commit_task = NULL; 961 } 962 963 BUG_ON(atomic_read(&(osb->journal->j_num_trans)) != 0); 964 965 if (ocfs2_mount_local(osb)) { 966 jbd2_journal_lock_updates(journal->j_journal); 967 status = jbd2_journal_flush(journal->j_journal); 968 jbd2_journal_unlock_updates(journal->j_journal); 969 if (status < 0) 970 mlog_errno(status); 971 } 972 973 if (status == 0) { 974 /* 975 * Do not toggle if flush was unsuccessful otherwise 976 * will leave dirty metadata in a "clean" journal 977 */ 978 status = ocfs2_journal_toggle_dirty(osb, 0, 0); 979 if (status < 0) 980 mlog_errno(status); 981 } 982 983 /* Shutdown the kernel journal system */ 984 jbd2_journal_destroy(journal->j_journal); 985 journal->j_journal = NULL; 986 987 OCFS2_I(inode)->ip_open_count--; 988 989 /* unlock our journal */ 990 ocfs2_inode_unlock(inode, 1); 991 992 brelse(journal->j_bh); 993 journal->j_bh = NULL; 994 995 journal->j_state = OCFS2_JOURNAL_FREE; 996 997 // up_write(&journal->j_trans_barrier); 998 done: 999 if (inode) 1000 iput(inode); 1001 mlog_exit_void(); 1002 } 1003 1004 static void ocfs2_clear_journal_error(struct super_block *sb, 1005 journal_t *journal, 1006 int slot) 1007 { 1008 int olderr; 1009 1010 olderr = jbd2_journal_errno(journal); 1011 if (olderr) { 1012 mlog(ML_ERROR, "File system error %d recorded in " 1013 "journal %u.\n", olderr, slot); 1014 mlog(ML_ERROR, "File system on device %s needs checking.\n", 1015 sb->s_id); 1016 1017 jbd2_journal_ack_err(journal); 1018 jbd2_journal_clear_err(journal); 1019 } 1020 } 1021 1022 int ocfs2_journal_load(struct ocfs2_journal *journal, int local, int replayed) 1023 { 1024 int status = 0; 1025 struct ocfs2_super *osb; 1026 1027 mlog_entry_void(); 1028 1029 BUG_ON(!journal); 1030 1031 osb = journal->j_osb; 1032 1033 status = jbd2_journal_load(journal->j_journal); 1034 if (status < 0) { 1035 mlog(ML_ERROR, "Failed to load journal!\n"); 1036 goto done; 1037 } 1038 1039 ocfs2_clear_journal_error(osb->sb, journal->j_journal, osb->slot_num); 1040 1041 status = ocfs2_journal_toggle_dirty(osb, 1, replayed); 1042 if (status < 0) { 1043 mlog_errno(status); 1044 goto done; 1045 } 1046 1047 /* Launch the commit thread */ 1048 if (!local) { 1049 osb->commit_task = kthread_run(ocfs2_commit_thread, osb, 1050 "ocfs2cmt"); 1051 if (IS_ERR(osb->commit_task)) { 1052 status = PTR_ERR(osb->commit_task); 1053 osb->commit_task = NULL; 1054 mlog(ML_ERROR, "unable to launch ocfs2commit thread, " 1055 "error=%d", status); 1056 goto done; 1057 } 1058 } else 1059 osb->commit_task = NULL; 1060 1061 done: 1062 mlog_exit(status); 1063 return status; 1064 } 1065 1066 1067 /* 'full' flag tells us whether we clear out all blocks or if we just 1068 * mark the journal clean */ 1069 int ocfs2_journal_wipe(struct ocfs2_journal *journal, int full) 1070 { 1071 int status; 1072 1073 mlog_entry_void(); 1074 1075 BUG_ON(!journal); 1076 1077 status = jbd2_journal_wipe(journal->j_journal, full); 1078 if (status < 0) { 1079 mlog_errno(status); 1080 goto bail; 1081 } 1082 1083 status = ocfs2_journal_toggle_dirty(journal->j_osb, 0, 0); 1084 if (status < 0) 1085 mlog_errno(status); 1086 1087 bail: 1088 mlog_exit(status); 1089 return status; 1090 } 1091 1092 static int ocfs2_recovery_completed(struct ocfs2_super *osb) 1093 { 1094 int empty; 1095 struct ocfs2_recovery_map *rm = osb->recovery_map; 1096 1097 spin_lock(&osb->osb_lock); 1098 empty = (rm->rm_used == 0); 1099 spin_unlock(&osb->osb_lock); 1100 1101 return empty; 1102 } 1103 1104 void ocfs2_wait_for_recovery(struct ocfs2_super *osb) 1105 { 1106 wait_event(osb->recovery_event, ocfs2_recovery_completed(osb)); 1107 } 1108 1109 /* 1110 * JBD Might read a cached version of another nodes journal file. We 1111 * don't want this as this file changes often and we get no 1112 * notification on those changes. The only way to be sure that we've 1113 * got the most up to date version of those blocks then is to force 1114 * read them off disk. Just searching through the buffer cache won't 1115 * work as there may be pages backing this file which are still marked 1116 * up to date. We know things can't change on this file underneath us 1117 * as we have the lock by now :) 1118 */ 1119 static int ocfs2_force_read_journal(struct inode *inode) 1120 { 1121 int status = 0; 1122 int i; 1123 u64 v_blkno, p_blkno, p_blocks, num_blocks; 1124 #define CONCURRENT_JOURNAL_FILL 32ULL 1125 struct buffer_head *bhs[CONCURRENT_JOURNAL_FILL]; 1126 1127 mlog_entry_void(); 1128 1129 memset(bhs, 0, sizeof(struct buffer_head *) * CONCURRENT_JOURNAL_FILL); 1130 1131 num_blocks = ocfs2_blocks_for_bytes(inode->i_sb, inode->i_size); 1132 v_blkno = 0; 1133 while (v_blkno < num_blocks) { 1134 status = ocfs2_extent_map_get_blocks(inode, v_blkno, 1135 &p_blkno, &p_blocks, NULL); 1136 if (status < 0) { 1137 mlog_errno(status); 1138 goto bail; 1139 } 1140 1141 if (p_blocks > CONCURRENT_JOURNAL_FILL) 1142 p_blocks = CONCURRENT_JOURNAL_FILL; 1143 1144 /* We are reading journal data which should not 1145 * be put in the uptodate cache */ 1146 status = ocfs2_read_blocks_sync(OCFS2_SB(inode->i_sb), 1147 p_blkno, p_blocks, bhs); 1148 if (status < 0) { 1149 mlog_errno(status); 1150 goto bail; 1151 } 1152 1153 for(i = 0; i < p_blocks; i++) { 1154 brelse(bhs[i]); 1155 bhs[i] = NULL; 1156 } 1157 1158 v_blkno += p_blocks; 1159 } 1160 1161 bail: 1162 for(i = 0; i < CONCURRENT_JOURNAL_FILL; i++) 1163 brelse(bhs[i]); 1164 mlog_exit(status); 1165 return status; 1166 } 1167 1168 struct ocfs2_la_recovery_item { 1169 struct list_head lri_list; 1170 int lri_slot; 1171 struct ocfs2_dinode *lri_la_dinode; 1172 struct ocfs2_dinode *lri_tl_dinode; 1173 struct ocfs2_quota_recovery *lri_qrec; 1174 }; 1175 1176 /* Does the second half of the recovery process. By this point, the 1177 * node is marked clean and can actually be considered recovered, 1178 * hence it's no longer in the recovery map, but there's still some 1179 * cleanup we can do which shouldn't happen within the recovery thread 1180 * as locking in that context becomes very difficult if we are to take 1181 * recovering nodes into account. 1182 * 1183 * NOTE: This function can and will sleep on recovery of other nodes 1184 * during cluster locking, just like any other ocfs2 process. 1185 */ 1186 void ocfs2_complete_recovery(struct work_struct *work) 1187 { 1188 int ret; 1189 struct ocfs2_journal *journal = 1190 container_of(work, struct ocfs2_journal, j_recovery_work); 1191 struct ocfs2_super *osb = journal->j_osb; 1192 struct ocfs2_dinode *la_dinode, *tl_dinode; 1193 struct ocfs2_la_recovery_item *item, *n; 1194 struct ocfs2_quota_recovery *qrec; 1195 LIST_HEAD(tmp_la_list); 1196 1197 mlog_entry_void(); 1198 1199 mlog(0, "completing recovery from keventd\n"); 1200 1201 spin_lock(&journal->j_lock); 1202 list_splice_init(&journal->j_la_cleanups, &tmp_la_list); 1203 spin_unlock(&journal->j_lock); 1204 1205 list_for_each_entry_safe(item, n, &tmp_la_list, lri_list) { 1206 list_del_init(&item->lri_list); 1207 1208 mlog(0, "Complete recovery for slot %d\n", item->lri_slot); 1209 1210 ocfs2_wait_on_quotas(osb); 1211 1212 la_dinode = item->lri_la_dinode; 1213 if (la_dinode) { 1214 mlog(0, "Clean up local alloc %llu\n", 1215 (unsigned long long)le64_to_cpu(la_dinode->i_blkno)); 1216 1217 ret = ocfs2_complete_local_alloc_recovery(osb, 1218 la_dinode); 1219 if (ret < 0) 1220 mlog_errno(ret); 1221 1222 kfree(la_dinode); 1223 } 1224 1225 tl_dinode = item->lri_tl_dinode; 1226 if (tl_dinode) { 1227 mlog(0, "Clean up truncate log %llu\n", 1228 (unsigned long long)le64_to_cpu(tl_dinode->i_blkno)); 1229 1230 ret = ocfs2_complete_truncate_log_recovery(osb, 1231 tl_dinode); 1232 if (ret < 0) 1233 mlog_errno(ret); 1234 1235 kfree(tl_dinode); 1236 } 1237 1238 ret = ocfs2_recover_orphans(osb, item->lri_slot); 1239 if (ret < 0) 1240 mlog_errno(ret); 1241 1242 qrec = item->lri_qrec; 1243 if (qrec) { 1244 mlog(0, "Recovering quota files"); 1245 ret = ocfs2_finish_quota_recovery(osb, qrec, 1246 item->lri_slot); 1247 if (ret < 0) 1248 mlog_errno(ret); 1249 /* Recovery info is already freed now */ 1250 } 1251 1252 kfree(item); 1253 } 1254 1255 mlog(0, "Recovery completion\n"); 1256 mlog_exit_void(); 1257 } 1258 1259 /* NOTE: This function always eats your references to la_dinode and 1260 * tl_dinode, either manually on error, or by passing them to 1261 * ocfs2_complete_recovery */ 1262 static void ocfs2_queue_recovery_completion(struct ocfs2_journal *journal, 1263 int slot_num, 1264 struct ocfs2_dinode *la_dinode, 1265 struct ocfs2_dinode *tl_dinode, 1266 struct ocfs2_quota_recovery *qrec) 1267 { 1268 struct ocfs2_la_recovery_item *item; 1269 1270 item = kmalloc(sizeof(struct ocfs2_la_recovery_item), GFP_NOFS); 1271 if (!item) { 1272 /* Though we wish to avoid it, we are in fact safe in 1273 * skipping local alloc cleanup as fsck.ocfs2 is more 1274 * than capable of reclaiming unused space. */ 1275 if (la_dinode) 1276 kfree(la_dinode); 1277 1278 if (tl_dinode) 1279 kfree(tl_dinode); 1280 1281 if (qrec) 1282 ocfs2_free_quota_recovery(qrec); 1283 1284 mlog_errno(-ENOMEM); 1285 return; 1286 } 1287 1288 INIT_LIST_HEAD(&item->lri_list); 1289 item->lri_la_dinode = la_dinode; 1290 item->lri_slot = slot_num; 1291 item->lri_tl_dinode = tl_dinode; 1292 item->lri_qrec = qrec; 1293 1294 spin_lock(&journal->j_lock); 1295 list_add_tail(&item->lri_list, &journal->j_la_cleanups); 1296 queue_work(ocfs2_wq, &journal->j_recovery_work); 1297 spin_unlock(&journal->j_lock); 1298 } 1299 1300 /* Called by the mount code to queue recovery the last part of 1301 * recovery for it's own and offline slot(s). */ 1302 void ocfs2_complete_mount_recovery(struct ocfs2_super *osb) 1303 { 1304 struct ocfs2_journal *journal = osb->journal; 1305 1306 /* No need to queue up our truncate_log as regular cleanup will catch 1307 * that */ 1308 ocfs2_queue_recovery_completion(journal, osb->slot_num, 1309 osb->local_alloc_copy, NULL, NULL); 1310 ocfs2_schedule_truncate_log_flush(osb, 0); 1311 1312 osb->local_alloc_copy = NULL; 1313 osb->dirty = 0; 1314 1315 /* queue to recover orphan slots for all offline slots */ 1316 ocfs2_replay_map_set_state(osb, REPLAY_NEEDED); 1317 ocfs2_queue_replay_slots(osb); 1318 ocfs2_free_replay_slots(osb); 1319 } 1320 1321 void ocfs2_complete_quota_recovery(struct ocfs2_super *osb) 1322 { 1323 if (osb->quota_rec) { 1324 ocfs2_queue_recovery_completion(osb->journal, 1325 osb->slot_num, 1326 NULL, 1327 NULL, 1328 osb->quota_rec); 1329 osb->quota_rec = NULL; 1330 } 1331 } 1332 1333 static int __ocfs2_recovery_thread(void *arg) 1334 { 1335 int status, node_num, slot_num; 1336 struct ocfs2_super *osb = arg; 1337 struct ocfs2_recovery_map *rm = osb->recovery_map; 1338 int *rm_quota = NULL; 1339 int rm_quota_used = 0, i; 1340 struct ocfs2_quota_recovery *qrec; 1341 1342 mlog_entry_void(); 1343 1344 status = ocfs2_wait_on_mount(osb); 1345 if (status < 0) { 1346 goto bail; 1347 } 1348 1349 rm_quota = kzalloc(osb->max_slots * sizeof(int), GFP_NOFS); 1350 if (!rm_quota) { 1351 status = -ENOMEM; 1352 goto bail; 1353 } 1354 restart: 1355 status = ocfs2_super_lock(osb, 1); 1356 if (status < 0) { 1357 mlog_errno(status); 1358 goto bail; 1359 } 1360 1361 status = ocfs2_compute_replay_slots(osb); 1362 if (status < 0) 1363 mlog_errno(status); 1364 1365 /* queue recovery for our own slot */ 1366 ocfs2_queue_recovery_completion(osb->journal, osb->slot_num, NULL, 1367 NULL, NULL); 1368 1369 spin_lock(&osb->osb_lock); 1370 while (rm->rm_used) { 1371 /* It's always safe to remove entry zero, as we won't 1372 * clear it until ocfs2_recover_node() has succeeded. */ 1373 node_num = rm->rm_entries[0]; 1374 spin_unlock(&osb->osb_lock); 1375 mlog(0, "checking node %d\n", node_num); 1376 slot_num = ocfs2_node_num_to_slot(osb, node_num); 1377 if (slot_num == -ENOENT) { 1378 status = 0; 1379 mlog(0, "no slot for this node, so no recovery" 1380 "required.\n"); 1381 goto skip_recovery; 1382 } 1383 mlog(0, "node %d was using slot %d\n", node_num, slot_num); 1384 1385 /* It is a bit subtle with quota recovery. We cannot do it 1386 * immediately because we have to obtain cluster locks from 1387 * quota files and we also don't want to just skip it because 1388 * then quota usage would be out of sync until some node takes 1389 * the slot. So we remember which nodes need quota recovery 1390 * and when everything else is done, we recover quotas. */ 1391 for (i = 0; i < rm_quota_used && rm_quota[i] != slot_num; i++); 1392 if (i == rm_quota_used) 1393 rm_quota[rm_quota_used++] = slot_num; 1394 1395 status = ocfs2_recover_node(osb, node_num, slot_num); 1396 skip_recovery: 1397 if (!status) { 1398 ocfs2_recovery_map_clear(osb, node_num); 1399 } else { 1400 mlog(ML_ERROR, 1401 "Error %d recovering node %d on device (%u,%u)!\n", 1402 status, node_num, 1403 MAJOR(osb->sb->s_dev), MINOR(osb->sb->s_dev)); 1404 mlog(ML_ERROR, "Volume requires unmount.\n"); 1405 } 1406 1407 spin_lock(&osb->osb_lock); 1408 } 1409 spin_unlock(&osb->osb_lock); 1410 mlog(0, "All nodes recovered\n"); 1411 1412 /* Refresh all journal recovery generations from disk */ 1413 status = ocfs2_check_journals_nolocks(osb); 1414 status = (status == -EROFS) ? 0 : status; 1415 if (status < 0) 1416 mlog_errno(status); 1417 1418 /* Now it is right time to recover quotas... We have to do this under 1419 * superblock lock so that noone can start using the slot (and crash) 1420 * before we recover it */ 1421 for (i = 0; i < rm_quota_used; i++) { 1422 qrec = ocfs2_begin_quota_recovery(osb, rm_quota[i]); 1423 if (IS_ERR(qrec)) { 1424 status = PTR_ERR(qrec); 1425 mlog_errno(status); 1426 continue; 1427 } 1428 ocfs2_queue_recovery_completion(osb->journal, rm_quota[i], 1429 NULL, NULL, qrec); 1430 } 1431 1432 ocfs2_super_unlock(osb, 1); 1433 1434 /* queue recovery for offline slots */ 1435 ocfs2_queue_replay_slots(osb); 1436 1437 bail: 1438 mutex_lock(&osb->recovery_lock); 1439 if (!status && !ocfs2_recovery_completed(osb)) { 1440 mutex_unlock(&osb->recovery_lock); 1441 goto restart; 1442 } 1443 1444 ocfs2_free_replay_slots(osb); 1445 osb->recovery_thread_task = NULL; 1446 mb(); /* sync with ocfs2_recovery_thread_running */ 1447 wake_up(&osb->recovery_event); 1448 1449 mutex_unlock(&osb->recovery_lock); 1450 1451 if (rm_quota) 1452 kfree(rm_quota); 1453 1454 mlog_exit(status); 1455 /* no one is callint kthread_stop() for us so the kthread() api 1456 * requires that we call do_exit(). And it isn't exported, but 1457 * complete_and_exit() seems to be a minimal wrapper around it. */ 1458 complete_and_exit(NULL, status); 1459 return status; 1460 } 1461 1462 void ocfs2_recovery_thread(struct ocfs2_super *osb, int node_num) 1463 { 1464 mlog_entry("(node_num=%d, osb->node_num = %d)\n", 1465 node_num, osb->node_num); 1466 1467 mutex_lock(&osb->recovery_lock); 1468 if (osb->disable_recovery) 1469 goto out; 1470 1471 /* People waiting on recovery will wait on 1472 * the recovery map to empty. */ 1473 if (ocfs2_recovery_map_set(osb, node_num)) 1474 mlog(0, "node %d already in recovery map.\n", node_num); 1475 1476 mlog(0, "starting recovery thread...\n"); 1477 1478 if (osb->recovery_thread_task) 1479 goto out; 1480 1481 osb->recovery_thread_task = kthread_run(__ocfs2_recovery_thread, osb, 1482 "ocfs2rec"); 1483 if (IS_ERR(osb->recovery_thread_task)) { 1484 mlog_errno((int)PTR_ERR(osb->recovery_thread_task)); 1485 osb->recovery_thread_task = NULL; 1486 } 1487 1488 out: 1489 mutex_unlock(&osb->recovery_lock); 1490 wake_up(&osb->recovery_event); 1491 1492 mlog_exit_void(); 1493 } 1494 1495 static int ocfs2_read_journal_inode(struct ocfs2_super *osb, 1496 int slot_num, 1497 struct buffer_head **bh, 1498 struct inode **ret_inode) 1499 { 1500 int status = -EACCES; 1501 struct inode *inode = NULL; 1502 1503 BUG_ON(slot_num >= osb->max_slots); 1504 1505 inode = ocfs2_get_system_file_inode(osb, JOURNAL_SYSTEM_INODE, 1506 slot_num); 1507 if (!inode || is_bad_inode(inode)) { 1508 mlog_errno(status); 1509 goto bail; 1510 } 1511 SET_INODE_JOURNAL(inode); 1512 1513 status = ocfs2_read_inode_block_full(inode, bh, OCFS2_BH_IGNORE_CACHE); 1514 if (status < 0) { 1515 mlog_errno(status); 1516 goto bail; 1517 } 1518 1519 status = 0; 1520 1521 bail: 1522 if (inode) { 1523 if (status || !ret_inode) 1524 iput(inode); 1525 else 1526 *ret_inode = inode; 1527 } 1528 return status; 1529 } 1530 1531 /* Does the actual journal replay and marks the journal inode as 1532 * clean. Will only replay if the journal inode is marked dirty. */ 1533 static int ocfs2_replay_journal(struct ocfs2_super *osb, 1534 int node_num, 1535 int slot_num) 1536 { 1537 int status; 1538 int got_lock = 0; 1539 unsigned int flags; 1540 struct inode *inode = NULL; 1541 struct ocfs2_dinode *fe; 1542 journal_t *journal = NULL; 1543 struct buffer_head *bh = NULL; 1544 u32 slot_reco_gen; 1545 1546 status = ocfs2_read_journal_inode(osb, slot_num, &bh, &inode); 1547 if (status) { 1548 mlog_errno(status); 1549 goto done; 1550 } 1551 1552 fe = (struct ocfs2_dinode *)bh->b_data; 1553 slot_reco_gen = ocfs2_get_recovery_generation(fe); 1554 brelse(bh); 1555 bh = NULL; 1556 1557 /* 1558 * As the fs recovery is asynchronous, there is a small chance that 1559 * another node mounted (and recovered) the slot before the recovery 1560 * thread could get the lock. To handle that, we dirty read the journal 1561 * inode for that slot to get the recovery generation. If it is 1562 * different than what we expected, the slot has been recovered. 1563 * If not, it needs recovery. 1564 */ 1565 if (osb->slot_recovery_generations[slot_num] != slot_reco_gen) { 1566 mlog(0, "Slot %u already recovered (old/new=%u/%u)\n", slot_num, 1567 osb->slot_recovery_generations[slot_num], slot_reco_gen); 1568 osb->slot_recovery_generations[slot_num] = slot_reco_gen; 1569 status = -EBUSY; 1570 goto done; 1571 } 1572 1573 /* Continue with recovery as the journal has not yet been recovered */ 1574 1575 status = ocfs2_inode_lock_full(inode, &bh, 1, OCFS2_META_LOCK_RECOVERY); 1576 if (status < 0) { 1577 mlog(0, "status returned from ocfs2_inode_lock=%d\n", status); 1578 if (status != -ERESTARTSYS) 1579 mlog(ML_ERROR, "Could not lock journal!\n"); 1580 goto done; 1581 } 1582 got_lock = 1; 1583 1584 fe = (struct ocfs2_dinode *) bh->b_data; 1585 1586 flags = le32_to_cpu(fe->id1.journal1.ij_flags); 1587 slot_reco_gen = ocfs2_get_recovery_generation(fe); 1588 1589 if (!(flags & OCFS2_JOURNAL_DIRTY_FL)) { 1590 mlog(0, "No recovery required for node %d\n", node_num); 1591 /* Refresh recovery generation for the slot */ 1592 osb->slot_recovery_generations[slot_num] = slot_reco_gen; 1593 goto done; 1594 } 1595 1596 /* we need to run complete recovery for offline orphan slots */ 1597 ocfs2_replay_map_set_state(osb, REPLAY_NEEDED); 1598 1599 mlog(ML_NOTICE, "Recovering node %d from slot %d on device (%u,%u)\n", 1600 node_num, slot_num, 1601 MAJOR(osb->sb->s_dev), MINOR(osb->sb->s_dev)); 1602 1603 OCFS2_I(inode)->ip_clusters = le32_to_cpu(fe->i_clusters); 1604 1605 status = ocfs2_force_read_journal(inode); 1606 if (status < 0) { 1607 mlog_errno(status); 1608 goto done; 1609 } 1610 1611 mlog(0, "calling journal_init_inode\n"); 1612 journal = jbd2_journal_init_inode(inode); 1613 if (journal == NULL) { 1614 mlog(ML_ERROR, "Linux journal layer error\n"); 1615 status = -EIO; 1616 goto done; 1617 } 1618 1619 status = jbd2_journal_load(journal); 1620 if (status < 0) { 1621 mlog_errno(status); 1622 if (!igrab(inode)) 1623 BUG(); 1624 jbd2_journal_destroy(journal); 1625 goto done; 1626 } 1627 1628 ocfs2_clear_journal_error(osb->sb, journal, slot_num); 1629 1630 /* wipe the journal */ 1631 mlog(0, "flushing the journal.\n"); 1632 jbd2_journal_lock_updates(journal); 1633 status = jbd2_journal_flush(journal); 1634 jbd2_journal_unlock_updates(journal); 1635 if (status < 0) 1636 mlog_errno(status); 1637 1638 /* This will mark the node clean */ 1639 flags = le32_to_cpu(fe->id1.journal1.ij_flags); 1640 flags &= ~OCFS2_JOURNAL_DIRTY_FL; 1641 fe->id1.journal1.ij_flags = cpu_to_le32(flags); 1642 1643 /* Increment recovery generation to indicate successful recovery */ 1644 ocfs2_bump_recovery_generation(fe); 1645 osb->slot_recovery_generations[slot_num] = 1646 ocfs2_get_recovery_generation(fe); 1647 1648 ocfs2_compute_meta_ecc(osb->sb, bh->b_data, &fe->i_check); 1649 status = ocfs2_write_block(osb, bh, INODE_CACHE(inode)); 1650 if (status < 0) 1651 mlog_errno(status); 1652 1653 if (!igrab(inode)) 1654 BUG(); 1655 1656 jbd2_journal_destroy(journal); 1657 1658 done: 1659 /* drop the lock on this nodes journal */ 1660 if (got_lock) 1661 ocfs2_inode_unlock(inode, 1); 1662 1663 if (inode) 1664 iput(inode); 1665 1666 brelse(bh); 1667 1668 mlog_exit(status); 1669 return status; 1670 } 1671 1672 /* 1673 * Do the most important parts of node recovery: 1674 * - Replay it's journal 1675 * - Stamp a clean local allocator file 1676 * - Stamp a clean truncate log 1677 * - Mark the node clean 1678 * 1679 * If this function completes without error, a node in OCFS2 can be 1680 * said to have been safely recovered. As a result, failure during the 1681 * second part of a nodes recovery process (local alloc recovery) is 1682 * far less concerning. 1683 */ 1684 static int ocfs2_recover_node(struct ocfs2_super *osb, 1685 int node_num, int slot_num) 1686 { 1687 int status = 0; 1688 struct ocfs2_dinode *la_copy = NULL; 1689 struct ocfs2_dinode *tl_copy = NULL; 1690 1691 mlog_entry("(node_num=%d, slot_num=%d, osb->node_num = %d)\n", 1692 node_num, slot_num, osb->node_num); 1693 1694 /* Should not ever be called to recover ourselves -- in that 1695 * case we should've called ocfs2_journal_load instead. */ 1696 BUG_ON(osb->node_num == node_num); 1697 1698 status = ocfs2_replay_journal(osb, node_num, slot_num); 1699 if (status < 0) { 1700 if (status == -EBUSY) { 1701 mlog(0, "Skipping recovery for slot %u (node %u) " 1702 "as another node has recovered it\n", slot_num, 1703 node_num); 1704 status = 0; 1705 goto done; 1706 } 1707 mlog_errno(status); 1708 goto done; 1709 } 1710 1711 /* Stamp a clean local alloc file AFTER recovering the journal... */ 1712 status = ocfs2_begin_local_alloc_recovery(osb, slot_num, &la_copy); 1713 if (status < 0) { 1714 mlog_errno(status); 1715 goto done; 1716 } 1717 1718 /* An error from begin_truncate_log_recovery is not 1719 * serious enough to warrant halting the rest of 1720 * recovery. */ 1721 status = ocfs2_begin_truncate_log_recovery(osb, slot_num, &tl_copy); 1722 if (status < 0) 1723 mlog_errno(status); 1724 1725 /* Likewise, this would be a strange but ultimately not so 1726 * harmful place to get an error... */ 1727 status = ocfs2_clear_slot(osb, slot_num); 1728 if (status < 0) 1729 mlog_errno(status); 1730 1731 /* This will kfree the memory pointed to by la_copy and tl_copy */ 1732 ocfs2_queue_recovery_completion(osb->journal, slot_num, la_copy, 1733 tl_copy, NULL); 1734 1735 status = 0; 1736 done: 1737 1738 mlog_exit(status); 1739 return status; 1740 } 1741 1742 /* Test node liveness by trylocking his journal. If we get the lock, 1743 * we drop it here. Return 0 if we got the lock, -EAGAIN if node is 1744 * still alive (we couldn't get the lock) and < 0 on error. */ 1745 static int ocfs2_trylock_journal(struct ocfs2_super *osb, 1746 int slot_num) 1747 { 1748 int status, flags; 1749 struct inode *inode = NULL; 1750 1751 inode = ocfs2_get_system_file_inode(osb, JOURNAL_SYSTEM_INODE, 1752 slot_num); 1753 if (inode == NULL) { 1754 mlog(ML_ERROR, "access error\n"); 1755 status = -EACCES; 1756 goto bail; 1757 } 1758 if (is_bad_inode(inode)) { 1759 mlog(ML_ERROR, "access error (bad inode)\n"); 1760 iput(inode); 1761 inode = NULL; 1762 status = -EACCES; 1763 goto bail; 1764 } 1765 SET_INODE_JOURNAL(inode); 1766 1767 flags = OCFS2_META_LOCK_RECOVERY | OCFS2_META_LOCK_NOQUEUE; 1768 status = ocfs2_inode_lock_full(inode, NULL, 1, flags); 1769 if (status < 0) { 1770 if (status != -EAGAIN) 1771 mlog_errno(status); 1772 goto bail; 1773 } 1774 1775 ocfs2_inode_unlock(inode, 1); 1776 bail: 1777 if (inode) 1778 iput(inode); 1779 1780 return status; 1781 } 1782 1783 /* Call this underneath ocfs2_super_lock. It also assumes that the 1784 * slot info struct has been updated from disk. */ 1785 int ocfs2_mark_dead_nodes(struct ocfs2_super *osb) 1786 { 1787 unsigned int node_num; 1788 int status, i; 1789 u32 gen; 1790 struct buffer_head *bh = NULL; 1791 struct ocfs2_dinode *di; 1792 1793 /* This is called with the super block cluster lock, so we 1794 * know that the slot map can't change underneath us. */ 1795 1796 for (i = 0; i < osb->max_slots; i++) { 1797 /* Read journal inode to get the recovery generation */ 1798 status = ocfs2_read_journal_inode(osb, i, &bh, NULL); 1799 if (status) { 1800 mlog_errno(status); 1801 goto bail; 1802 } 1803 di = (struct ocfs2_dinode *)bh->b_data; 1804 gen = ocfs2_get_recovery_generation(di); 1805 brelse(bh); 1806 bh = NULL; 1807 1808 spin_lock(&osb->osb_lock); 1809 osb->slot_recovery_generations[i] = gen; 1810 1811 mlog(0, "Slot %u recovery generation is %u\n", i, 1812 osb->slot_recovery_generations[i]); 1813 1814 if (i == osb->slot_num) { 1815 spin_unlock(&osb->osb_lock); 1816 continue; 1817 } 1818 1819 status = ocfs2_slot_to_node_num_locked(osb, i, &node_num); 1820 if (status == -ENOENT) { 1821 spin_unlock(&osb->osb_lock); 1822 continue; 1823 } 1824 1825 if (__ocfs2_recovery_map_test(osb, node_num)) { 1826 spin_unlock(&osb->osb_lock); 1827 continue; 1828 } 1829 spin_unlock(&osb->osb_lock); 1830 1831 /* Ok, we have a slot occupied by another node which 1832 * is not in the recovery map. We trylock his journal 1833 * file here to test if he's alive. */ 1834 status = ocfs2_trylock_journal(osb, i); 1835 if (!status) { 1836 /* Since we're called from mount, we know that 1837 * the recovery thread can't race us on 1838 * setting / checking the recovery bits. */ 1839 ocfs2_recovery_thread(osb, node_num); 1840 } else if ((status < 0) && (status != -EAGAIN)) { 1841 mlog_errno(status); 1842 goto bail; 1843 } 1844 } 1845 1846 status = 0; 1847 bail: 1848 mlog_exit(status); 1849 return status; 1850 } 1851 1852 /* 1853 * Scan timer should get fired every ORPHAN_SCAN_SCHEDULE_TIMEOUT. Add some 1854 * randomness to the timeout to minimize multple nodes firing the timer at the 1855 * same time. 1856 */ 1857 static inline unsigned long ocfs2_orphan_scan_timeout(void) 1858 { 1859 unsigned long time; 1860 1861 get_random_bytes(&time, sizeof(time)); 1862 time = ORPHAN_SCAN_SCHEDULE_TIMEOUT + (time % 5000); 1863 return msecs_to_jiffies(time); 1864 } 1865 1866 /* 1867 * ocfs2_queue_orphan_scan calls ocfs2_queue_recovery_completion for 1868 * every slot, queuing a recovery of the slot on the ocfs2_wq thread. This 1869 * is done to catch any orphans that are left over in orphan directories. 1870 * 1871 * ocfs2_queue_orphan_scan gets called every ORPHAN_SCAN_SCHEDULE_TIMEOUT 1872 * seconds. It gets an EX lock on os_lockres and checks sequence number 1873 * stored in LVB. If the sequence number has changed, it means some other 1874 * node has done the scan. This node skips the scan and tracks the 1875 * sequence number. If the sequence number didn't change, it means a scan 1876 * hasn't happened. The node queues a scan and increments the 1877 * sequence number in the LVB. 1878 */ 1879 void ocfs2_queue_orphan_scan(struct ocfs2_super *osb) 1880 { 1881 struct ocfs2_orphan_scan *os; 1882 int status, i; 1883 u32 seqno = 0; 1884 1885 os = &osb->osb_orphan_scan; 1886 1887 mlog(0, "Begin orphan scan\n"); 1888 1889 if (atomic_read(&os->os_state) == ORPHAN_SCAN_INACTIVE) 1890 goto out; 1891 1892 status = ocfs2_orphan_scan_lock(osb, &seqno); 1893 if (status < 0) { 1894 if (status != -EAGAIN) 1895 mlog_errno(status); 1896 goto out; 1897 } 1898 1899 /* Do no queue the tasks if the volume is being umounted */ 1900 if (atomic_read(&os->os_state) == ORPHAN_SCAN_INACTIVE) 1901 goto unlock; 1902 1903 if (os->os_seqno != seqno) { 1904 os->os_seqno = seqno; 1905 goto unlock; 1906 } 1907 1908 for (i = 0; i < osb->max_slots; i++) 1909 ocfs2_queue_recovery_completion(osb->journal, i, NULL, NULL, 1910 NULL); 1911 /* 1912 * We queued a recovery on orphan slots, increment the sequence 1913 * number and update LVB so other node will skip the scan for a while 1914 */ 1915 seqno++; 1916 os->os_count++; 1917 os->os_scantime = CURRENT_TIME; 1918 unlock: 1919 ocfs2_orphan_scan_unlock(osb, seqno); 1920 out: 1921 mlog(0, "Orphan scan completed\n"); 1922 return; 1923 } 1924 1925 /* Worker task that gets fired every ORPHAN_SCAN_SCHEDULE_TIMEOUT millsec */ 1926 void ocfs2_orphan_scan_work(struct work_struct *work) 1927 { 1928 struct ocfs2_orphan_scan *os; 1929 struct ocfs2_super *osb; 1930 1931 os = container_of(work, struct ocfs2_orphan_scan, 1932 os_orphan_scan_work.work); 1933 osb = os->os_osb; 1934 1935 mutex_lock(&os->os_lock); 1936 ocfs2_queue_orphan_scan(osb); 1937 if (atomic_read(&os->os_state) == ORPHAN_SCAN_ACTIVE) 1938 queue_delayed_work(ocfs2_wq, &os->os_orphan_scan_work, 1939 ocfs2_orphan_scan_timeout()); 1940 mutex_unlock(&os->os_lock); 1941 } 1942 1943 void ocfs2_orphan_scan_stop(struct ocfs2_super *osb) 1944 { 1945 struct ocfs2_orphan_scan *os; 1946 1947 os = &osb->osb_orphan_scan; 1948 if (atomic_read(&os->os_state) == ORPHAN_SCAN_ACTIVE) { 1949 atomic_set(&os->os_state, ORPHAN_SCAN_INACTIVE); 1950 mutex_lock(&os->os_lock); 1951 cancel_delayed_work(&os->os_orphan_scan_work); 1952 mutex_unlock(&os->os_lock); 1953 } 1954 } 1955 1956 void ocfs2_orphan_scan_init(struct ocfs2_super *osb) 1957 { 1958 struct ocfs2_orphan_scan *os; 1959 1960 os = &osb->osb_orphan_scan; 1961 os->os_osb = osb; 1962 os->os_count = 0; 1963 os->os_seqno = 0; 1964 mutex_init(&os->os_lock); 1965 INIT_DELAYED_WORK(&os->os_orphan_scan_work, ocfs2_orphan_scan_work); 1966 } 1967 1968 void ocfs2_orphan_scan_start(struct ocfs2_super *osb) 1969 { 1970 struct ocfs2_orphan_scan *os; 1971 1972 os = &osb->osb_orphan_scan; 1973 os->os_scantime = CURRENT_TIME; 1974 if (ocfs2_is_hard_readonly(osb) || ocfs2_mount_local(osb)) 1975 atomic_set(&os->os_state, ORPHAN_SCAN_INACTIVE); 1976 else { 1977 atomic_set(&os->os_state, ORPHAN_SCAN_ACTIVE); 1978 queue_delayed_work(ocfs2_wq, &os->os_orphan_scan_work, 1979 ocfs2_orphan_scan_timeout()); 1980 } 1981 } 1982 1983 struct ocfs2_orphan_filldir_priv { 1984 struct inode *head; 1985 struct ocfs2_super *osb; 1986 }; 1987 1988 static int ocfs2_orphan_filldir(void *priv, const char *name, int name_len, 1989 loff_t pos, u64 ino, unsigned type) 1990 { 1991 struct ocfs2_orphan_filldir_priv *p = priv; 1992 struct inode *iter; 1993 1994 if (name_len == 1 && !strncmp(".", name, 1)) 1995 return 0; 1996 if (name_len == 2 && !strncmp("..", name, 2)) 1997 return 0; 1998 1999 /* Skip bad inodes so that recovery can continue */ 2000 iter = ocfs2_iget(p->osb, ino, 2001 OCFS2_FI_FLAG_ORPHAN_RECOVERY, 0); 2002 if (IS_ERR(iter)) 2003 return 0; 2004 2005 mlog(0, "queue orphan %llu\n", 2006 (unsigned long long)OCFS2_I(iter)->ip_blkno); 2007 /* No locking is required for the next_orphan queue as there 2008 * is only ever a single process doing orphan recovery. */ 2009 OCFS2_I(iter)->ip_next_orphan = p->head; 2010 p->head = iter; 2011 2012 return 0; 2013 } 2014 2015 static int ocfs2_queue_orphans(struct ocfs2_super *osb, 2016 int slot, 2017 struct inode **head) 2018 { 2019 int status; 2020 struct inode *orphan_dir_inode = NULL; 2021 struct ocfs2_orphan_filldir_priv priv; 2022 loff_t pos = 0; 2023 2024 priv.osb = osb; 2025 priv.head = *head; 2026 2027 orphan_dir_inode = ocfs2_get_system_file_inode(osb, 2028 ORPHAN_DIR_SYSTEM_INODE, 2029 slot); 2030 if (!orphan_dir_inode) { 2031 status = -ENOENT; 2032 mlog_errno(status); 2033 return status; 2034 } 2035 2036 mutex_lock(&orphan_dir_inode->i_mutex); 2037 status = ocfs2_inode_lock(orphan_dir_inode, NULL, 0); 2038 if (status < 0) { 2039 mlog_errno(status); 2040 goto out; 2041 } 2042 2043 status = ocfs2_dir_foreach(orphan_dir_inode, &pos, &priv, 2044 ocfs2_orphan_filldir); 2045 if (status) { 2046 mlog_errno(status); 2047 goto out_cluster; 2048 } 2049 2050 *head = priv.head; 2051 2052 out_cluster: 2053 ocfs2_inode_unlock(orphan_dir_inode, 0); 2054 out: 2055 mutex_unlock(&orphan_dir_inode->i_mutex); 2056 iput(orphan_dir_inode); 2057 return status; 2058 } 2059 2060 static int ocfs2_orphan_recovery_can_continue(struct ocfs2_super *osb, 2061 int slot) 2062 { 2063 int ret; 2064 2065 spin_lock(&osb->osb_lock); 2066 ret = !osb->osb_orphan_wipes[slot]; 2067 spin_unlock(&osb->osb_lock); 2068 return ret; 2069 } 2070 2071 static void ocfs2_mark_recovering_orphan_dir(struct ocfs2_super *osb, 2072 int slot) 2073 { 2074 spin_lock(&osb->osb_lock); 2075 /* Mark ourselves such that new processes in delete_inode() 2076 * know to quit early. */ 2077 ocfs2_node_map_set_bit(osb, &osb->osb_recovering_orphan_dirs, slot); 2078 while (osb->osb_orphan_wipes[slot]) { 2079 /* If any processes are already in the middle of an 2080 * orphan wipe on this dir, then we need to wait for 2081 * them. */ 2082 spin_unlock(&osb->osb_lock); 2083 wait_event_interruptible(osb->osb_wipe_event, 2084 ocfs2_orphan_recovery_can_continue(osb, slot)); 2085 spin_lock(&osb->osb_lock); 2086 } 2087 spin_unlock(&osb->osb_lock); 2088 } 2089 2090 static void ocfs2_clear_recovering_orphan_dir(struct ocfs2_super *osb, 2091 int slot) 2092 { 2093 ocfs2_node_map_clear_bit(osb, &osb->osb_recovering_orphan_dirs, slot); 2094 } 2095 2096 /* 2097 * Orphan recovery. Each mounted node has it's own orphan dir which we 2098 * must run during recovery. Our strategy here is to build a list of 2099 * the inodes in the orphan dir and iget/iput them. The VFS does 2100 * (most) of the rest of the work. 2101 * 2102 * Orphan recovery can happen at any time, not just mount so we have a 2103 * couple of extra considerations. 2104 * 2105 * - We grab as many inodes as we can under the orphan dir lock - 2106 * doing iget() outside the orphan dir risks getting a reference on 2107 * an invalid inode. 2108 * - We must be sure not to deadlock with other processes on the 2109 * system wanting to run delete_inode(). This can happen when they go 2110 * to lock the orphan dir and the orphan recovery process attempts to 2111 * iget() inside the orphan dir lock. This can be avoided by 2112 * advertising our state to ocfs2_delete_inode(). 2113 */ 2114 static int ocfs2_recover_orphans(struct ocfs2_super *osb, 2115 int slot) 2116 { 2117 int ret = 0; 2118 struct inode *inode = NULL; 2119 struct inode *iter; 2120 struct ocfs2_inode_info *oi; 2121 2122 mlog(0, "Recover inodes from orphan dir in slot %d\n", slot); 2123 2124 ocfs2_mark_recovering_orphan_dir(osb, slot); 2125 ret = ocfs2_queue_orphans(osb, slot, &inode); 2126 ocfs2_clear_recovering_orphan_dir(osb, slot); 2127 2128 /* Error here should be noted, but we want to continue with as 2129 * many queued inodes as we've got. */ 2130 if (ret) 2131 mlog_errno(ret); 2132 2133 while (inode) { 2134 oi = OCFS2_I(inode); 2135 mlog(0, "iput orphan %llu\n", (unsigned long long)oi->ip_blkno); 2136 2137 iter = oi->ip_next_orphan; 2138 2139 spin_lock(&oi->ip_lock); 2140 /* The remote delete code may have set these on the 2141 * assumption that the other node would wipe them 2142 * successfully. If they are still in the node's 2143 * orphan dir, we need to reset that state. */ 2144 oi->ip_flags &= ~(OCFS2_INODE_DELETED|OCFS2_INODE_SKIP_DELETE); 2145 2146 /* Set the proper information to get us going into 2147 * ocfs2_delete_inode. */ 2148 oi->ip_flags |= OCFS2_INODE_MAYBE_ORPHANED; 2149 spin_unlock(&oi->ip_lock); 2150 2151 iput(inode); 2152 2153 inode = iter; 2154 } 2155 2156 return ret; 2157 } 2158 2159 static int __ocfs2_wait_on_mount(struct ocfs2_super *osb, int quota) 2160 { 2161 /* This check is good because ocfs2 will wait on our recovery 2162 * thread before changing it to something other than MOUNTED 2163 * or DISABLED. */ 2164 wait_event(osb->osb_mount_event, 2165 (!quota && atomic_read(&osb->vol_state) == VOLUME_MOUNTED) || 2166 atomic_read(&osb->vol_state) == VOLUME_MOUNTED_QUOTAS || 2167 atomic_read(&osb->vol_state) == VOLUME_DISABLED); 2168 2169 /* If there's an error on mount, then we may never get to the 2170 * MOUNTED flag, but this is set right before 2171 * dismount_volume() so we can trust it. */ 2172 if (atomic_read(&osb->vol_state) == VOLUME_DISABLED) { 2173 mlog(0, "mount error, exiting!\n"); 2174 return -EBUSY; 2175 } 2176 2177 return 0; 2178 } 2179 2180 static int ocfs2_commit_thread(void *arg) 2181 { 2182 int status; 2183 struct ocfs2_super *osb = arg; 2184 struct ocfs2_journal *journal = osb->journal; 2185 2186 /* we can trust j_num_trans here because _should_stop() is only set in 2187 * shutdown and nobody other than ourselves should be able to start 2188 * transactions. committing on shutdown might take a few iterations 2189 * as final transactions put deleted inodes on the list */ 2190 while (!(kthread_should_stop() && 2191 atomic_read(&journal->j_num_trans) == 0)) { 2192 2193 wait_event_interruptible(osb->checkpoint_event, 2194 atomic_read(&journal->j_num_trans) 2195 || kthread_should_stop()); 2196 2197 status = ocfs2_commit_cache(osb); 2198 if (status < 0) 2199 mlog_errno(status); 2200 2201 if (kthread_should_stop() && atomic_read(&journal->j_num_trans)){ 2202 mlog(ML_KTHREAD, 2203 "commit_thread: %u transactions pending on " 2204 "shutdown\n", 2205 atomic_read(&journal->j_num_trans)); 2206 } 2207 } 2208 2209 return 0; 2210 } 2211 2212 /* Reads all the journal inodes without taking any cluster locks. Used 2213 * for hard readonly access to determine whether any journal requires 2214 * recovery. Also used to refresh the recovery generation numbers after 2215 * a journal has been recovered by another node. 2216 */ 2217 int ocfs2_check_journals_nolocks(struct ocfs2_super *osb) 2218 { 2219 int ret = 0; 2220 unsigned int slot; 2221 struct buffer_head *di_bh = NULL; 2222 struct ocfs2_dinode *di; 2223 int journal_dirty = 0; 2224 2225 for(slot = 0; slot < osb->max_slots; slot++) { 2226 ret = ocfs2_read_journal_inode(osb, slot, &di_bh, NULL); 2227 if (ret) { 2228 mlog_errno(ret); 2229 goto out; 2230 } 2231 2232 di = (struct ocfs2_dinode *) di_bh->b_data; 2233 2234 osb->slot_recovery_generations[slot] = 2235 ocfs2_get_recovery_generation(di); 2236 2237 if (le32_to_cpu(di->id1.journal1.ij_flags) & 2238 OCFS2_JOURNAL_DIRTY_FL) 2239 journal_dirty = 1; 2240 2241 brelse(di_bh); 2242 di_bh = NULL; 2243 } 2244 2245 out: 2246 if (journal_dirty) 2247 ret = -EROFS; 2248 return ret; 2249 } 2250