1ccd979bdSMark Fasheh /* -*- mode: c; c-basic-offset: 8; -*- 2ccd979bdSMark Fasheh * vim: noexpandtab sw=8 ts=8 sts=0: 3ccd979bdSMark Fasheh * 4ccd979bdSMark Fasheh * journal.c 5ccd979bdSMark Fasheh * 6ccd979bdSMark Fasheh * Defines functions of journalling api 7ccd979bdSMark Fasheh * 8ccd979bdSMark Fasheh * Copyright (C) 2003, 2004 Oracle. All rights reserved. 9ccd979bdSMark Fasheh * 10ccd979bdSMark Fasheh * This program is free software; you can redistribute it and/or 11ccd979bdSMark Fasheh * modify it under the terms of the GNU General Public 12ccd979bdSMark Fasheh * License as published by the Free Software Foundation; either 13ccd979bdSMark Fasheh * version 2 of the License, or (at your option) any later version. 14ccd979bdSMark Fasheh * 15ccd979bdSMark Fasheh * This program is distributed in the hope that it will be useful, 16ccd979bdSMark Fasheh * but WITHOUT ANY WARRANTY; without even the implied warranty of 17ccd979bdSMark Fasheh * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 18ccd979bdSMark Fasheh * General Public License for more details. 19ccd979bdSMark Fasheh * 20ccd979bdSMark Fasheh * You should have received a copy of the GNU General Public 21ccd979bdSMark Fasheh * License along with this program; if not, write to the 22ccd979bdSMark Fasheh * Free Software Foundation, Inc., 59 Temple Place - Suite 330, 23ccd979bdSMark Fasheh * Boston, MA 021110-1307, USA. 24ccd979bdSMark Fasheh */ 25ccd979bdSMark Fasheh 26ccd979bdSMark Fasheh #include <linux/fs.h> 27ccd979bdSMark Fasheh #include <linux/types.h> 28ccd979bdSMark Fasheh #include <linux/slab.h> 29ccd979bdSMark Fasheh #include <linux/highmem.h> 30ccd979bdSMark Fasheh #include <linux/kthread.h> 3183273932SSrinivas Eeda #include <linux/time.h> 3283273932SSrinivas Eeda #include <linux/random.h> 33ccd979bdSMark Fasheh 34ccd979bdSMark Fasheh #define MLOG_MASK_PREFIX ML_JOURNAL 35ccd979bdSMark Fasheh #include <cluster/masklog.h> 36ccd979bdSMark Fasheh 37ccd979bdSMark Fasheh #include "ocfs2.h" 38ccd979bdSMark Fasheh 39ccd979bdSMark Fasheh #include "alloc.h" 4050655ae9SJoel Becker #include "blockcheck.h" 41316f4b9fSMark Fasheh #include "dir.h" 42ccd979bdSMark Fasheh #include "dlmglue.h" 43ccd979bdSMark Fasheh #include "extent_map.h" 44ccd979bdSMark Fasheh #include "heartbeat.h" 45ccd979bdSMark Fasheh #include "inode.h" 46ccd979bdSMark Fasheh #include "journal.h" 47ccd979bdSMark Fasheh #include "localalloc.h" 48ccd979bdSMark Fasheh #include "slot_map.h" 49ccd979bdSMark Fasheh #include "super.h" 50ccd979bdSMark Fasheh #include "sysfile.h" 510cf2f763SJoel Becker #include "uptodate.h" 522205363dSJan Kara #include "quota.h" 53ccd979bdSMark Fasheh 54ccd979bdSMark Fasheh #include "buffer_head_io.h" 55ccd979bdSMark Fasheh 5634af946aSIngo Molnar DEFINE_SPINLOCK(trans_inc_lock); 57ccd979bdSMark Fasheh 5883273932SSrinivas Eeda #define ORPHAN_SCAN_SCHEDULE_TIMEOUT 300000 5983273932SSrinivas Eeda 60ccd979bdSMark Fasheh static int ocfs2_force_read_journal(struct inode *inode); 61ccd979bdSMark Fasheh static int ocfs2_recover_node(struct ocfs2_super *osb, 622205363dSJan Kara int node_num, int slot_num); 63ccd979bdSMark Fasheh static int __ocfs2_recovery_thread(void *arg); 64ccd979bdSMark Fasheh static int ocfs2_commit_cache(struct ocfs2_super *osb); 6519ece546SJan Kara static int __ocfs2_wait_on_mount(struct ocfs2_super *osb, int quota); 66ccd979bdSMark Fasheh static int ocfs2_journal_toggle_dirty(struct ocfs2_super *osb, 67539d8264SSunil Mushran int dirty, int replayed); 68ccd979bdSMark Fasheh static int ocfs2_trylock_journal(struct ocfs2_super *osb, 69ccd979bdSMark Fasheh int slot_num); 70ccd979bdSMark Fasheh static int ocfs2_recover_orphans(struct ocfs2_super *osb, 71ccd979bdSMark Fasheh int slot); 72ccd979bdSMark Fasheh static int ocfs2_commit_thread(void *arg); 739140db04SSrinivas Eeda static void ocfs2_queue_recovery_completion(struct ocfs2_journal *journal, 749140db04SSrinivas Eeda int slot_num, 759140db04SSrinivas Eeda struct ocfs2_dinode *la_dinode, 769140db04SSrinivas Eeda struct ocfs2_dinode *tl_dinode, 779140db04SSrinivas Eeda struct ocfs2_quota_recovery *qrec); 78ccd979bdSMark Fasheh 7919ece546SJan Kara static inline int ocfs2_wait_on_mount(struct ocfs2_super *osb) 8019ece546SJan Kara { 8119ece546SJan Kara return __ocfs2_wait_on_mount(osb, 0); 8219ece546SJan Kara } 8319ece546SJan Kara 8419ece546SJan Kara static inline int ocfs2_wait_on_quotas(struct ocfs2_super *osb) 8519ece546SJan Kara { 8619ece546SJan Kara return __ocfs2_wait_on_mount(osb, 1); 8719ece546SJan Kara } 8819ece546SJan Kara 899140db04SSrinivas Eeda /* 909140db04SSrinivas Eeda * This replay_map is to track online/offline slots, so we could recover 919140db04SSrinivas Eeda * offline slots during recovery and mount 929140db04SSrinivas Eeda */ 939140db04SSrinivas Eeda 949140db04SSrinivas Eeda enum ocfs2_replay_state { 959140db04SSrinivas Eeda REPLAY_UNNEEDED = 0, /* Replay is not needed, so ignore this map */ 969140db04SSrinivas Eeda REPLAY_NEEDED, /* Replay slots marked in rm_replay_slots */ 979140db04SSrinivas Eeda REPLAY_DONE /* Replay was already queued */ 989140db04SSrinivas Eeda }; 999140db04SSrinivas Eeda 1009140db04SSrinivas Eeda struct ocfs2_replay_map { 1019140db04SSrinivas Eeda unsigned int rm_slots; 1029140db04SSrinivas Eeda enum ocfs2_replay_state rm_state; 1039140db04SSrinivas Eeda unsigned char rm_replay_slots[0]; 1049140db04SSrinivas Eeda }; 1059140db04SSrinivas Eeda 1069140db04SSrinivas Eeda void ocfs2_replay_map_set_state(struct ocfs2_super *osb, int state) 1079140db04SSrinivas Eeda { 1089140db04SSrinivas Eeda if (!osb->replay_map) 1099140db04SSrinivas Eeda return; 1109140db04SSrinivas Eeda 1119140db04SSrinivas Eeda /* If we've already queued the replay, we don't have any more to do */ 1129140db04SSrinivas Eeda if (osb->replay_map->rm_state == REPLAY_DONE) 1139140db04SSrinivas Eeda return; 1149140db04SSrinivas Eeda 1159140db04SSrinivas Eeda osb->replay_map->rm_state = state; 1169140db04SSrinivas Eeda } 1179140db04SSrinivas Eeda 1189140db04SSrinivas Eeda int ocfs2_compute_replay_slots(struct ocfs2_super *osb) 1199140db04SSrinivas Eeda { 1209140db04SSrinivas Eeda struct ocfs2_replay_map *replay_map; 1219140db04SSrinivas Eeda int i, node_num; 1229140db04SSrinivas Eeda 1239140db04SSrinivas Eeda /* If replay map is already set, we don't do it again */ 1249140db04SSrinivas Eeda if (osb->replay_map) 1259140db04SSrinivas Eeda return 0; 1269140db04SSrinivas Eeda 1279140db04SSrinivas Eeda replay_map = kzalloc(sizeof(struct ocfs2_replay_map) + 1289140db04SSrinivas Eeda (osb->max_slots * sizeof(char)), GFP_KERNEL); 1299140db04SSrinivas Eeda 1309140db04SSrinivas Eeda if (!replay_map) { 1319140db04SSrinivas Eeda mlog_errno(-ENOMEM); 1329140db04SSrinivas Eeda return -ENOMEM; 1339140db04SSrinivas Eeda } 1349140db04SSrinivas Eeda 1359140db04SSrinivas Eeda spin_lock(&osb->osb_lock); 1369140db04SSrinivas Eeda 1379140db04SSrinivas Eeda replay_map->rm_slots = osb->max_slots; 1389140db04SSrinivas Eeda replay_map->rm_state = REPLAY_UNNEEDED; 1399140db04SSrinivas Eeda 1409140db04SSrinivas Eeda /* set rm_replay_slots for offline slot(s) */ 1419140db04SSrinivas Eeda for (i = 0; i < replay_map->rm_slots; i++) { 1429140db04SSrinivas Eeda if (ocfs2_slot_to_node_num_locked(osb, i, &node_num) == -ENOENT) 1439140db04SSrinivas Eeda replay_map->rm_replay_slots[i] = 1; 1449140db04SSrinivas Eeda } 1459140db04SSrinivas Eeda 1469140db04SSrinivas Eeda osb->replay_map = replay_map; 1479140db04SSrinivas Eeda spin_unlock(&osb->osb_lock); 1489140db04SSrinivas Eeda return 0; 1499140db04SSrinivas Eeda } 1509140db04SSrinivas Eeda 1519140db04SSrinivas Eeda void ocfs2_queue_replay_slots(struct ocfs2_super *osb) 1529140db04SSrinivas Eeda { 1539140db04SSrinivas Eeda struct ocfs2_replay_map *replay_map = osb->replay_map; 1549140db04SSrinivas Eeda int i; 1559140db04SSrinivas Eeda 1569140db04SSrinivas Eeda if (!replay_map) 1579140db04SSrinivas Eeda return; 1589140db04SSrinivas Eeda 1599140db04SSrinivas Eeda if (replay_map->rm_state != REPLAY_NEEDED) 1609140db04SSrinivas Eeda return; 1619140db04SSrinivas Eeda 1629140db04SSrinivas Eeda for (i = 0; i < replay_map->rm_slots; i++) 1639140db04SSrinivas Eeda if (replay_map->rm_replay_slots[i]) 1649140db04SSrinivas Eeda ocfs2_queue_recovery_completion(osb->journal, i, NULL, 1659140db04SSrinivas Eeda NULL, NULL); 1669140db04SSrinivas Eeda replay_map->rm_state = REPLAY_DONE; 1679140db04SSrinivas Eeda } 1689140db04SSrinivas Eeda 1699140db04SSrinivas Eeda void ocfs2_free_replay_slots(struct ocfs2_super *osb) 1709140db04SSrinivas Eeda { 1719140db04SSrinivas Eeda struct ocfs2_replay_map *replay_map = osb->replay_map; 1729140db04SSrinivas Eeda 1739140db04SSrinivas Eeda if (!osb->replay_map) 1749140db04SSrinivas Eeda return; 1759140db04SSrinivas Eeda 1769140db04SSrinivas Eeda kfree(replay_map); 1779140db04SSrinivas Eeda osb->replay_map = NULL; 1789140db04SSrinivas Eeda } 1799140db04SSrinivas Eeda 180553abd04SJoel Becker int ocfs2_recovery_init(struct ocfs2_super *osb) 181553abd04SJoel Becker { 182553abd04SJoel Becker struct ocfs2_recovery_map *rm; 183553abd04SJoel Becker 184553abd04SJoel Becker mutex_init(&osb->recovery_lock); 185553abd04SJoel Becker osb->disable_recovery = 0; 186553abd04SJoel Becker osb->recovery_thread_task = NULL; 187553abd04SJoel Becker init_waitqueue_head(&osb->recovery_event); 188553abd04SJoel Becker 189553abd04SJoel Becker rm = kzalloc(sizeof(struct ocfs2_recovery_map) + 190553abd04SJoel Becker osb->max_slots * sizeof(unsigned int), 191553abd04SJoel Becker GFP_KERNEL); 192553abd04SJoel Becker if (!rm) { 193553abd04SJoel Becker mlog_errno(-ENOMEM); 194553abd04SJoel Becker return -ENOMEM; 195553abd04SJoel Becker } 196553abd04SJoel Becker 197553abd04SJoel Becker rm->rm_entries = (unsigned int *)((char *)rm + 198553abd04SJoel Becker sizeof(struct ocfs2_recovery_map)); 199553abd04SJoel Becker osb->recovery_map = rm; 200553abd04SJoel Becker 201553abd04SJoel Becker return 0; 202553abd04SJoel Becker } 203553abd04SJoel Becker 204553abd04SJoel Becker /* we can't grab the goofy sem lock from inside wait_event, so we use 205553abd04SJoel Becker * memory barriers to make sure that we'll see the null task before 206553abd04SJoel Becker * being woken up */ 207553abd04SJoel Becker static int ocfs2_recovery_thread_running(struct ocfs2_super *osb) 208553abd04SJoel Becker { 209553abd04SJoel Becker mb(); 210553abd04SJoel Becker return osb->recovery_thread_task != NULL; 211553abd04SJoel Becker } 212553abd04SJoel Becker 213553abd04SJoel Becker void ocfs2_recovery_exit(struct ocfs2_super *osb) 214553abd04SJoel Becker { 215553abd04SJoel Becker struct ocfs2_recovery_map *rm; 216553abd04SJoel Becker 217553abd04SJoel Becker /* disable any new recovery threads and wait for any currently 218553abd04SJoel Becker * running ones to exit. Do this before setting the vol_state. */ 219553abd04SJoel Becker mutex_lock(&osb->recovery_lock); 220553abd04SJoel Becker osb->disable_recovery = 1; 221553abd04SJoel Becker mutex_unlock(&osb->recovery_lock); 222553abd04SJoel Becker wait_event(osb->recovery_event, !ocfs2_recovery_thread_running(osb)); 223553abd04SJoel Becker 224553abd04SJoel Becker /* At this point, we know that no more recovery threads can be 225553abd04SJoel Becker * launched, so wait for any recovery completion work to 226553abd04SJoel Becker * complete. */ 227553abd04SJoel Becker flush_workqueue(ocfs2_wq); 228553abd04SJoel Becker 229553abd04SJoel Becker /* 230553abd04SJoel Becker * Now that recovery is shut down, and the osb is about to be 231553abd04SJoel Becker * freed, the osb_lock is not taken here. 232553abd04SJoel Becker */ 233553abd04SJoel Becker rm = osb->recovery_map; 234553abd04SJoel Becker /* XXX: Should we bug if there are dirty entries? */ 235553abd04SJoel Becker 236553abd04SJoel Becker kfree(rm); 237553abd04SJoel Becker } 238553abd04SJoel Becker 239553abd04SJoel Becker static int __ocfs2_recovery_map_test(struct ocfs2_super *osb, 240553abd04SJoel Becker unsigned int node_num) 241553abd04SJoel Becker { 242553abd04SJoel Becker int i; 243553abd04SJoel Becker struct ocfs2_recovery_map *rm = osb->recovery_map; 244553abd04SJoel Becker 245553abd04SJoel Becker assert_spin_locked(&osb->osb_lock); 246553abd04SJoel Becker 247553abd04SJoel Becker for (i = 0; i < rm->rm_used; i++) { 248553abd04SJoel Becker if (rm->rm_entries[i] == node_num) 249553abd04SJoel Becker return 1; 250553abd04SJoel Becker } 251553abd04SJoel Becker 252553abd04SJoel Becker return 0; 253553abd04SJoel Becker } 254553abd04SJoel Becker 255553abd04SJoel Becker /* Behaves like test-and-set. Returns the previous value */ 256553abd04SJoel Becker static int ocfs2_recovery_map_set(struct ocfs2_super *osb, 257553abd04SJoel Becker unsigned int node_num) 258553abd04SJoel Becker { 259553abd04SJoel Becker struct ocfs2_recovery_map *rm = osb->recovery_map; 260553abd04SJoel Becker 261553abd04SJoel Becker spin_lock(&osb->osb_lock); 262553abd04SJoel Becker if (__ocfs2_recovery_map_test(osb, node_num)) { 263553abd04SJoel Becker spin_unlock(&osb->osb_lock); 264553abd04SJoel Becker return 1; 265553abd04SJoel Becker } 266553abd04SJoel Becker 267553abd04SJoel Becker /* XXX: Can this be exploited? Not from o2dlm... */ 268553abd04SJoel Becker BUG_ON(rm->rm_used >= osb->max_slots); 269553abd04SJoel Becker 270553abd04SJoel Becker rm->rm_entries[rm->rm_used] = node_num; 271553abd04SJoel Becker rm->rm_used++; 272553abd04SJoel Becker spin_unlock(&osb->osb_lock); 273553abd04SJoel Becker 274553abd04SJoel Becker return 0; 275553abd04SJoel Becker } 276553abd04SJoel Becker 277553abd04SJoel Becker static void ocfs2_recovery_map_clear(struct ocfs2_super *osb, 278553abd04SJoel Becker unsigned int node_num) 279553abd04SJoel Becker { 280553abd04SJoel Becker int i; 281553abd04SJoel Becker struct ocfs2_recovery_map *rm = osb->recovery_map; 282553abd04SJoel Becker 283553abd04SJoel Becker spin_lock(&osb->osb_lock); 284553abd04SJoel Becker 285553abd04SJoel Becker for (i = 0; i < rm->rm_used; i++) { 286553abd04SJoel Becker if (rm->rm_entries[i] == node_num) 287553abd04SJoel Becker break; 288553abd04SJoel Becker } 289553abd04SJoel Becker 290553abd04SJoel Becker if (i < rm->rm_used) { 291553abd04SJoel Becker /* XXX: be careful with the pointer math */ 292553abd04SJoel Becker memmove(&(rm->rm_entries[i]), &(rm->rm_entries[i + 1]), 293553abd04SJoel Becker (rm->rm_used - i - 1) * sizeof(unsigned int)); 294553abd04SJoel Becker rm->rm_used--; 295553abd04SJoel Becker } 296553abd04SJoel Becker 297553abd04SJoel Becker spin_unlock(&osb->osb_lock); 298553abd04SJoel Becker } 299553abd04SJoel Becker 300ccd979bdSMark Fasheh static int ocfs2_commit_cache(struct ocfs2_super *osb) 301ccd979bdSMark Fasheh { 302ccd979bdSMark Fasheh int status = 0; 303ccd979bdSMark Fasheh unsigned int flushed; 304ccd979bdSMark Fasheh unsigned long old_id; 305ccd979bdSMark Fasheh struct ocfs2_journal *journal = NULL; 306ccd979bdSMark Fasheh 307ccd979bdSMark Fasheh mlog_entry_void(); 308ccd979bdSMark Fasheh 309ccd979bdSMark Fasheh journal = osb->journal; 310ccd979bdSMark Fasheh 311ccd979bdSMark Fasheh /* Flush all pending commits and checkpoint the journal. */ 312ccd979bdSMark Fasheh down_write(&journal->j_trans_barrier); 313ccd979bdSMark Fasheh 314ccd979bdSMark Fasheh if (atomic_read(&journal->j_num_trans) == 0) { 315ccd979bdSMark Fasheh up_write(&journal->j_trans_barrier); 316ccd979bdSMark Fasheh mlog(0, "No transactions for me to flush!\n"); 317ccd979bdSMark Fasheh goto finally; 318ccd979bdSMark Fasheh } 319ccd979bdSMark Fasheh 3202b4e30fbSJoel Becker jbd2_journal_lock_updates(journal->j_journal); 3212b4e30fbSJoel Becker status = jbd2_journal_flush(journal->j_journal); 3222b4e30fbSJoel Becker jbd2_journal_unlock_updates(journal->j_journal); 323ccd979bdSMark Fasheh if (status < 0) { 324ccd979bdSMark Fasheh up_write(&journal->j_trans_barrier); 325ccd979bdSMark Fasheh mlog_errno(status); 326ccd979bdSMark Fasheh goto finally; 327ccd979bdSMark Fasheh } 328ccd979bdSMark Fasheh 329ccd979bdSMark Fasheh old_id = ocfs2_inc_trans_id(journal); 330ccd979bdSMark Fasheh 331ccd979bdSMark Fasheh flushed = atomic_read(&journal->j_num_trans); 332ccd979bdSMark Fasheh atomic_set(&journal->j_num_trans, 0); 333ccd979bdSMark Fasheh up_write(&journal->j_trans_barrier); 334ccd979bdSMark Fasheh 335ccd979bdSMark Fasheh mlog(0, "commit_thread: flushed transaction %lu (%u handles)\n", 336ccd979bdSMark Fasheh journal->j_trans_id, flushed); 337ccd979bdSMark Fasheh 33834d024f8SMark Fasheh ocfs2_wake_downconvert_thread(osb); 339ccd979bdSMark Fasheh wake_up(&journal->j_checkpointed); 340ccd979bdSMark Fasheh finally: 341ccd979bdSMark Fasheh mlog_exit(status); 342ccd979bdSMark Fasheh return status; 343ccd979bdSMark Fasheh } 344ccd979bdSMark Fasheh 345ccd979bdSMark Fasheh /* pass it NULL and it will allocate a new handle object for you. If 346ccd979bdSMark Fasheh * you pass it a handle however, it may still return error, in which 347ccd979bdSMark Fasheh * case it has free'd the passed handle for you. */ 3481fabe148SMark Fasheh handle_t *ocfs2_start_trans(struct ocfs2_super *osb, int max_buffs) 349ccd979bdSMark Fasheh { 350ccd979bdSMark Fasheh journal_t *journal = osb->journal->j_journal; 3511fabe148SMark Fasheh handle_t *handle; 352ccd979bdSMark Fasheh 353ebdec83bSEric Sesterhenn / snakebyte BUG_ON(!osb || !osb->journal->j_journal); 354ccd979bdSMark Fasheh 35565eff9ccSMark Fasheh if (ocfs2_is_hard_readonly(osb)) 35665eff9ccSMark Fasheh return ERR_PTR(-EROFS); 357ccd979bdSMark Fasheh 358ccd979bdSMark Fasheh BUG_ON(osb->journal->j_state == OCFS2_JOURNAL_FREE); 359ccd979bdSMark Fasheh BUG_ON(max_buffs <= 0); 360ccd979bdSMark Fasheh 36190e86a63SJan Kara /* Nested transaction? Just return the handle... */ 36290e86a63SJan Kara if (journal_current_handle()) 36390e86a63SJan Kara return jbd2_journal_start(journal, max_buffs); 364ccd979bdSMark Fasheh 365ccd979bdSMark Fasheh down_read(&osb->journal->j_trans_barrier); 366ccd979bdSMark Fasheh 3672b4e30fbSJoel Becker handle = jbd2_journal_start(journal, max_buffs); 3681fabe148SMark Fasheh if (IS_ERR(handle)) { 369ccd979bdSMark Fasheh up_read(&osb->journal->j_trans_barrier); 370ccd979bdSMark Fasheh 3711fabe148SMark Fasheh mlog_errno(PTR_ERR(handle)); 372ccd979bdSMark Fasheh 373ccd979bdSMark Fasheh if (is_journal_aborted(journal)) { 374ccd979bdSMark Fasheh ocfs2_abort(osb->sb, "Detected aborted journal"); 3751fabe148SMark Fasheh handle = ERR_PTR(-EROFS); 376ccd979bdSMark Fasheh } 377c271c5c2SSunil Mushran } else { 378c271c5c2SSunil Mushran if (!ocfs2_mount_local(osb)) 379ccd979bdSMark Fasheh atomic_inc(&(osb->journal->j_num_trans)); 380c271c5c2SSunil Mushran } 381ccd979bdSMark Fasheh 382ccd979bdSMark Fasheh return handle; 383ccd979bdSMark Fasheh } 384ccd979bdSMark Fasheh 3851fabe148SMark Fasheh int ocfs2_commit_trans(struct ocfs2_super *osb, 3861fabe148SMark Fasheh handle_t *handle) 387ccd979bdSMark Fasheh { 38890e86a63SJan Kara int ret, nested; 38902dc1af4SMark Fasheh struct ocfs2_journal *journal = osb->journal; 390ccd979bdSMark Fasheh 391ccd979bdSMark Fasheh BUG_ON(!handle); 392ccd979bdSMark Fasheh 39390e86a63SJan Kara nested = handle->h_ref > 1; 3942b4e30fbSJoel Becker ret = jbd2_journal_stop(handle); 3951fabe148SMark Fasheh if (ret < 0) 3961fabe148SMark Fasheh mlog_errno(ret); 397ccd979bdSMark Fasheh 39890e86a63SJan Kara if (!nested) 399ccd979bdSMark Fasheh up_read(&journal->j_trans_barrier); 400ccd979bdSMark Fasheh 4011fabe148SMark Fasheh return ret; 402ccd979bdSMark Fasheh } 403ccd979bdSMark Fasheh 404ccd979bdSMark Fasheh /* 405c901fb00STao Ma * 'nblocks' is what you want to add to the current transaction. 406ccd979bdSMark Fasheh * 4072b4e30fbSJoel Becker * This might call jbd2_journal_restart() which will commit dirty buffers 408e8aed345SMark Fasheh * and then restart the transaction. Before calling 409e8aed345SMark Fasheh * ocfs2_extend_trans(), any changed blocks should have been 410e8aed345SMark Fasheh * dirtied. After calling it, all blocks which need to be changed must 411e8aed345SMark Fasheh * go through another set of journal_access/journal_dirty calls. 412e8aed345SMark Fasheh * 413ccd979bdSMark Fasheh * WARNING: This will not release any semaphores or disk locks taken 414ccd979bdSMark Fasheh * during the transaction, so make sure they were taken *before* 415ccd979bdSMark Fasheh * start_trans or we'll have ordering deadlocks. 416ccd979bdSMark Fasheh * 417ccd979bdSMark Fasheh * WARNING2: Note that we do *not* drop j_trans_barrier here. This is 418ccd979bdSMark Fasheh * good because transaction ids haven't yet been recorded on the 419ccd979bdSMark Fasheh * cluster locks associated with this handle. 420ccd979bdSMark Fasheh */ 4211fc58146SMark Fasheh int ocfs2_extend_trans(handle_t *handle, int nblocks) 422ccd979bdSMark Fasheh { 423c901fb00STao Ma int status, old_nblocks; 424ccd979bdSMark Fasheh 425ccd979bdSMark Fasheh BUG_ON(!handle); 426c901fb00STao Ma BUG_ON(nblocks < 0); 427ccd979bdSMark Fasheh 428c901fb00STao Ma if (!nblocks) 429c901fb00STao Ma return 0; 430c901fb00STao Ma 431c901fb00STao Ma old_nblocks = handle->h_buffer_credits; 432ccd979bdSMark Fasheh mlog_entry_void(); 433ccd979bdSMark Fasheh 434ccd979bdSMark Fasheh mlog(0, "Trying to extend transaction by %d blocks\n", nblocks); 435ccd979bdSMark Fasheh 436e407e397SJoel Becker #ifdef CONFIG_OCFS2_DEBUG_FS 4370879c584SMark Fasheh status = 1; 4380879c584SMark Fasheh #else 4392b4e30fbSJoel Becker status = jbd2_journal_extend(handle, nblocks); 440ccd979bdSMark Fasheh if (status < 0) { 441ccd979bdSMark Fasheh mlog_errno(status); 442ccd979bdSMark Fasheh goto bail; 443ccd979bdSMark Fasheh } 4440879c584SMark Fasheh #endif 445ccd979bdSMark Fasheh 446ccd979bdSMark Fasheh if (status > 0) { 4472b4e30fbSJoel Becker mlog(0, 4482b4e30fbSJoel Becker "jbd2_journal_extend failed, trying " 4492b4e30fbSJoel Becker "jbd2_journal_restart\n"); 450c901fb00STao Ma status = jbd2_journal_restart(handle, 451c901fb00STao Ma old_nblocks + nblocks); 452ccd979bdSMark Fasheh if (status < 0) { 453ccd979bdSMark Fasheh mlog_errno(status); 454ccd979bdSMark Fasheh goto bail; 455ccd979bdSMark Fasheh } 45601ddf1e1SMark Fasheh } 457ccd979bdSMark Fasheh 458ccd979bdSMark Fasheh status = 0; 459ccd979bdSMark Fasheh bail: 460ccd979bdSMark Fasheh 461ccd979bdSMark Fasheh mlog_exit(status); 462ccd979bdSMark Fasheh return status; 463ccd979bdSMark Fasheh } 464ccd979bdSMark Fasheh 46550655ae9SJoel Becker struct ocfs2_triggers { 46650655ae9SJoel Becker struct jbd2_buffer_trigger_type ot_triggers; 46750655ae9SJoel Becker int ot_offset; 46850655ae9SJoel Becker }; 46950655ae9SJoel Becker 47050655ae9SJoel Becker static inline struct ocfs2_triggers *to_ocfs2_trigger(struct jbd2_buffer_trigger_type *triggers) 47150655ae9SJoel Becker { 47250655ae9SJoel Becker return container_of(triggers, struct ocfs2_triggers, ot_triggers); 47350655ae9SJoel Becker } 47450655ae9SJoel Becker 47550655ae9SJoel Becker static void ocfs2_commit_trigger(struct jbd2_buffer_trigger_type *triggers, 47650655ae9SJoel Becker struct buffer_head *bh, 47750655ae9SJoel Becker void *data, size_t size) 47850655ae9SJoel Becker { 47950655ae9SJoel Becker struct ocfs2_triggers *ot = to_ocfs2_trigger(triggers); 48050655ae9SJoel Becker 48150655ae9SJoel Becker /* 48250655ae9SJoel Becker * We aren't guaranteed to have the superblock here, so we 48350655ae9SJoel Becker * must unconditionally compute the ecc data. 48450655ae9SJoel Becker * __ocfs2_journal_access() will only set the triggers if 48550655ae9SJoel Becker * metaecc is enabled. 48650655ae9SJoel Becker */ 48750655ae9SJoel Becker ocfs2_block_check_compute(data, size, data + ot->ot_offset); 48850655ae9SJoel Becker } 48950655ae9SJoel Becker 49050655ae9SJoel Becker /* 49150655ae9SJoel Becker * Quota blocks have their own trigger because the struct ocfs2_block_check 49250655ae9SJoel Becker * offset depends on the blocksize. 49350655ae9SJoel Becker */ 49450655ae9SJoel Becker static void ocfs2_dq_commit_trigger(struct jbd2_buffer_trigger_type *triggers, 49550655ae9SJoel Becker struct buffer_head *bh, 49650655ae9SJoel Becker void *data, size_t size) 49750655ae9SJoel Becker { 49850655ae9SJoel Becker struct ocfs2_disk_dqtrailer *dqt = 49950655ae9SJoel Becker ocfs2_block_dqtrailer(size, data); 50050655ae9SJoel Becker 50150655ae9SJoel Becker /* 50250655ae9SJoel Becker * We aren't guaranteed to have the superblock here, so we 50350655ae9SJoel Becker * must unconditionally compute the ecc data. 50450655ae9SJoel Becker * __ocfs2_journal_access() will only set the triggers if 50550655ae9SJoel Becker * metaecc is enabled. 50650655ae9SJoel Becker */ 50750655ae9SJoel Becker ocfs2_block_check_compute(data, size, &dqt->dq_check); 50850655ae9SJoel Becker } 50950655ae9SJoel Becker 510c175a518SJoel Becker /* 511c175a518SJoel Becker * Directory blocks also have their own trigger because the 512c175a518SJoel Becker * struct ocfs2_block_check offset depends on the blocksize. 513c175a518SJoel Becker */ 514c175a518SJoel Becker static void ocfs2_db_commit_trigger(struct jbd2_buffer_trigger_type *triggers, 515c175a518SJoel Becker struct buffer_head *bh, 516c175a518SJoel Becker void *data, size_t size) 517c175a518SJoel Becker { 518c175a518SJoel Becker struct ocfs2_dir_block_trailer *trailer = 519c175a518SJoel Becker ocfs2_dir_trailer_from_size(size, data); 520c175a518SJoel Becker 521c175a518SJoel Becker /* 522c175a518SJoel Becker * We aren't guaranteed to have the superblock here, so we 523c175a518SJoel Becker * must unconditionally compute the ecc data. 524c175a518SJoel Becker * __ocfs2_journal_access() will only set the triggers if 525c175a518SJoel Becker * metaecc is enabled. 526c175a518SJoel Becker */ 527c175a518SJoel Becker ocfs2_block_check_compute(data, size, &trailer->db_check); 528c175a518SJoel Becker } 529c175a518SJoel Becker 53050655ae9SJoel Becker static void ocfs2_abort_trigger(struct jbd2_buffer_trigger_type *triggers, 53150655ae9SJoel Becker struct buffer_head *bh) 53250655ae9SJoel Becker { 53350655ae9SJoel Becker mlog(ML_ERROR, 53450655ae9SJoel Becker "ocfs2_abort_trigger called by JBD2. bh = 0x%lx, " 53550655ae9SJoel Becker "bh->b_blocknr = %llu\n", 53650655ae9SJoel Becker (unsigned long)bh, 53750655ae9SJoel Becker (unsigned long long)bh->b_blocknr); 53850655ae9SJoel Becker 53950655ae9SJoel Becker /* We aren't guaranteed to have the superblock here - but if we 54050655ae9SJoel Becker * don't, it'll just crash. */ 54150655ae9SJoel Becker ocfs2_error(bh->b_assoc_map->host->i_sb, 54250655ae9SJoel Becker "JBD2 has aborted our journal, ocfs2 cannot continue\n"); 54350655ae9SJoel Becker } 54450655ae9SJoel Becker 54550655ae9SJoel Becker static struct ocfs2_triggers di_triggers = { 54650655ae9SJoel Becker .ot_triggers = { 54750655ae9SJoel Becker .t_commit = ocfs2_commit_trigger, 54850655ae9SJoel Becker .t_abort = ocfs2_abort_trigger, 54950655ae9SJoel Becker }, 55050655ae9SJoel Becker .ot_offset = offsetof(struct ocfs2_dinode, i_check), 55150655ae9SJoel Becker }; 55250655ae9SJoel Becker 55350655ae9SJoel Becker static struct ocfs2_triggers eb_triggers = { 55450655ae9SJoel Becker .ot_triggers = { 55550655ae9SJoel Becker .t_commit = ocfs2_commit_trigger, 55650655ae9SJoel Becker .t_abort = ocfs2_abort_trigger, 55750655ae9SJoel Becker }, 55850655ae9SJoel Becker .ot_offset = offsetof(struct ocfs2_extent_block, h_check), 55950655ae9SJoel Becker }; 56050655ae9SJoel Becker 56193c97087STao Ma static struct ocfs2_triggers rb_triggers = { 56293c97087STao Ma .ot_triggers = { 56393c97087STao Ma .t_commit = ocfs2_commit_trigger, 56493c97087STao Ma .t_abort = ocfs2_abort_trigger, 56593c97087STao Ma }, 56693c97087STao Ma .ot_offset = offsetof(struct ocfs2_refcount_block, rf_check), 56793c97087STao Ma }; 56893c97087STao Ma 56950655ae9SJoel Becker static struct ocfs2_triggers gd_triggers = { 57050655ae9SJoel Becker .ot_triggers = { 57150655ae9SJoel Becker .t_commit = ocfs2_commit_trigger, 57250655ae9SJoel Becker .t_abort = ocfs2_abort_trigger, 57350655ae9SJoel Becker }, 57450655ae9SJoel Becker .ot_offset = offsetof(struct ocfs2_group_desc, bg_check), 57550655ae9SJoel Becker }; 57650655ae9SJoel Becker 577c175a518SJoel Becker static struct ocfs2_triggers db_triggers = { 578c175a518SJoel Becker .ot_triggers = { 579c175a518SJoel Becker .t_commit = ocfs2_db_commit_trigger, 580c175a518SJoel Becker .t_abort = ocfs2_abort_trigger, 581c175a518SJoel Becker }, 582c175a518SJoel Becker }; 583c175a518SJoel Becker 58450655ae9SJoel Becker static struct ocfs2_triggers xb_triggers = { 58550655ae9SJoel Becker .ot_triggers = { 58650655ae9SJoel Becker .t_commit = ocfs2_commit_trigger, 58750655ae9SJoel Becker .t_abort = ocfs2_abort_trigger, 58850655ae9SJoel Becker }, 58950655ae9SJoel Becker .ot_offset = offsetof(struct ocfs2_xattr_block, xb_check), 59050655ae9SJoel Becker }; 59150655ae9SJoel Becker 59250655ae9SJoel Becker static struct ocfs2_triggers dq_triggers = { 59350655ae9SJoel Becker .ot_triggers = { 59450655ae9SJoel Becker .t_commit = ocfs2_dq_commit_trigger, 59550655ae9SJoel Becker .t_abort = ocfs2_abort_trigger, 59650655ae9SJoel Becker }, 59750655ae9SJoel Becker }; 59850655ae9SJoel Becker 5999b7895efSMark Fasheh static struct ocfs2_triggers dr_triggers = { 6009b7895efSMark Fasheh .ot_triggers = { 6019b7895efSMark Fasheh .t_commit = ocfs2_commit_trigger, 6029b7895efSMark Fasheh .t_abort = ocfs2_abort_trigger, 6039b7895efSMark Fasheh }, 6049b7895efSMark Fasheh .ot_offset = offsetof(struct ocfs2_dx_root_block, dr_check), 6059b7895efSMark Fasheh }; 6069b7895efSMark Fasheh 6079b7895efSMark Fasheh static struct ocfs2_triggers dl_triggers = { 6089b7895efSMark Fasheh .ot_triggers = { 6099b7895efSMark Fasheh .t_commit = ocfs2_commit_trigger, 6109b7895efSMark Fasheh .t_abort = ocfs2_abort_trigger, 6119b7895efSMark Fasheh }, 6129b7895efSMark Fasheh .ot_offset = offsetof(struct ocfs2_dx_leaf, dl_check), 6139b7895efSMark Fasheh }; 6149b7895efSMark Fasheh 61550655ae9SJoel Becker static int __ocfs2_journal_access(handle_t *handle, 6160cf2f763SJoel Becker struct ocfs2_caching_info *ci, 617ccd979bdSMark Fasheh struct buffer_head *bh, 61850655ae9SJoel Becker struct ocfs2_triggers *triggers, 619ccd979bdSMark Fasheh int type) 620ccd979bdSMark Fasheh { 621ccd979bdSMark Fasheh int status; 6220cf2f763SJoel Becker struct ocfs2_super *osb = 6230cf2f763SJoel Becker OCFS2_SB(ocfs2_metadata_cache_get_super(ci)); 624ccd979bdSMark Fasheh 6250cf2f763SJoel Becker BUG_ON(!ci || !ci->ci_ops); 626ccd979bdSMark Fasheh BUG_ON(!handle); 627ccd979bdSMark Fasheh BUG_ON(!bh); 628ccd979bdSMark Fasheh 629205f87f6SBadari Pulavarty mlog_entry("bh->b_blocknr=%llu, type=%d (\"%s\"), bh->b_size = %zu\n", 630ccd979bdSMark Fasheh (unsigned long long)bh->b_blocknr, type, 631ccd979bdSMark Fasheh (type == OCFS2_JOURNAL_ACCESS_CREATE) ? 632ccd979bdSMark Fasheh "OCFS2_JOURNAL_ACCESS_CREATE" : 633ccd979bdSMark Fasheh "OCFS2_JOURNAL_ACCESS_WRITE", 634ccd979bdSMark Fasheh bh->b_size); 635ccd979bdSMark Fasheh 636ccd979bdSMark Fasheh /* we can safely remove this assertion after testing. */ 637ccd979bdSMark Fasheh if (!buffer_uptodate(bh)) { 638ccd979bdSMark Fasheh mlog(ML_ERROR, "giving me a buffer that's not uptodate!\n"); 639ccd979bdSMark Fasheh mlog(ML_ERROR, "b_blocknr=%llu\n", 640ccd979bdSMark Fasheh (unsigned long long)bh->b_blocknr); 641ccd979bdSMark Fasheh BUG(); 642ccd979bdSMark Fasheh } 643ccd979bdSMark Fasheh 6440cf2f763SJoel Becker /* Set the current transaction information on the ci so 645ccd979bdSMark Fasheh * that the locking code knows whether it can drop it's locks 6460cf2f763SJoel Becker * on this ci or not. We're protected from the commit 647ccd979bdSMark Fasheh * thread updating the current transaction id until 648ccd979bdSMark Fasheh * ocfs2_commit_trans() because ocfs2_start_trans() took 649ccd979bdSMark Fasheh * j_trans_barrier for us. */ 6500cf2f763SJoel Becker ocfs2_set_ci_lock_trans(osb->journal, ci); 651ccd979bdSMark Fasheh 6520cf2f763SJoel Becker ocfs2_metadata_cache_io_lock(ci); 653ccd979bdSMark Fasheh switch (type) { 654ccd979bdSMark Fasheh case OCFS2_JOURNAL_ACCESS_CREATE: 655ccd979bdSMark Fasheh case OCFS2_JOURNAL_ACCESS_WRITE: 6562b4e30fbSJoel Becker status = jbd2_journal_get_write_access(handle, bh); 657ccd979bdSMark Fasheh break; 658ccd979bdSMark Fasheh 659ccd979bdSMark Fasheh case OCFS2_JOURNAL_ACCESS_UNDO: 6602b4e30fbSJoel Becker status = jbd2_journal_get_undo_access(handle, bh); 661ccd979bdSMark Fasheh break; 662ccd979bdSMark Fasheh 663ccd979bdSMark Fasheh default: 664ccd979bdSMark Fasheh status = -EINVAL; 665af901ca1SAndré Goddard Rosa mlog(ML_ERROR, "Unknown access type!\n"); 666ccd979bdSMark Fasheh } 6670cf2f763SJoel Becker if (!status && ocfs2_meta_ecc(osb) && triggers) 66850655ae9SJoel Becker jbd2_journal_set_triggers(bh, &triggers->ot_triggers); 6690cf2f763SJoel Becker ocfs2_metadata_cache_io_unlock(ci); 670ccd979bdSMark Fasheh 671ccd979bdSMark Fasheh if (status < 0) 672ccd979bdSMark Fasheh mlog(ML_ERROR, "Error %d getting %d access to buffer!\n", 673ccd979bdSMark Fasheh status, type); 674ccd979bdSMark Fasheh 675ccd979bdSMark Fasheh mlog_exit(status); 676ccd979bdSMark Fasheh return status; 677ccd979bdSMark Fasheh } 678ccd979bdSMark Fasheh 6790cf2f763SJoel Becker int ocfs2_journal_access_di(handle_t *handle, struct ocfs2_caching_info *ci, 68050655ae9SJoel Becker struct buffer_head *bh, int type) 68150655ae9SJoel Becker { 6820cf2f763SJoel Becker return __ocfs2_journal_access(handle, ci, bh, &di_triggers, type); 68350655ae9SJoel Becker } 68450655ae9SJoel Becker 6850cf2f763SJoel Becker int ocfs2_journal_access_eb(handle_t *handle, struct ocfs2_caching_info *ci, 68650655ae9SJoel Becker struct buffer_head *bh, int type) 68750655ae9SJoel Becker { 6880cf2f763SJoel Becker return __ocfs2_journal_access(handle, ci, bh, &eb_triggers, type); 68950655ae9SJoel Becker } 69050655ae9SJoel Becker 69193c97087STao Ma int ocfs2_journal_access_rb(handle_t *handle, struct ocfs2_caching_info *ci, 69293c97087STao Ma struct buffer_head *bh, int type) 69393c97087STao Ma { 69493c97087STao Ma return __ocfs2_journal_access(handle, ci, bh, &rb_triggers, 69593c97087STao Ma type); 69693c97087STao Ma } 69793c97087STao Ma 6980cf2f763SJoel Becker int ocfs2_journal_access_gd(handle_t *handle, struct ocfs2_caching_info *ci, 69950655ae9SJoel Becker struct buffer_head *bh, int type) 70050655ae9SJoel Becker { 7010cf2f763SJoel Becker return __ocfs2_journal_access(handle, ci, bh, &gd_triggers, type); 70250655ae9SJoel Becker } 70350655ae9SJoel Becker 7040cf2f763SJoel Becker int ocfs2_journal_access_db(handle_t *handle, struct ocfs2_caching_info *ci, 70550655ae9SJoel Becker struct buffer_head *bh, int type) 70650655ae9SJoel Becker { 7070cf2f763SJoel Becker return __ocfs2_journal_access(handle, ci, bh, &db_triggers, type); 70850655ae9SJoel Becker } 70950655ae9SJoel Becker 7100cf2f763SJoel Becker int ocfs2_journal_access_xb(handle_t *handle, struct ocfs2_caching_info *ci, 71150655ae9SJoel Becker struct buffer_head *bh, int type) 71250655ae9SJoel Becker { 7130cf2f763SJoel Becker return __ocfs2_journal_access(handle, ci, bh, &xb_triggers, type); 71450655ae9SJoel Becker } 71550655ae9SJoel Becker 7160cf2f763SJoel Becker int ocfs2_journal_access_dq(handle_t *handle, struct ocfs2_caching_info *ci, 71750655ae9SJoel Becker struct buffer_head *bh, int type) 71850655ae9SJoel Becker { 7190cf2f763SJoel Becker return __ocfs2_journal_access(handle, ci, bh, &dq_triggers, type); 72050655ae9SJoel Becker } 72150655ae9SJoel Becker 7220cf2f763SJoel Becker int ocfs2_journal_access_dr(handle_t *handle, struct ocfs2_caching_info *ci, 7239b7895efSMark Fasheh struct buffer_head *bh, int type) 7249b7895efSMark Fasheh { 7250cf2f763SJoel Becker return __ocfs2_journal_access(handle, ci, bh, &dr_triggers, type); 7269b7895efSMark Fasheh } 7279b7895efSMark Fasheh 7280cf2f763SJoel Becker int ocfs2_journal_access_dl(handle_t *handle, struct ocfs2_caching_info *ci, 7299b7895efSMark Fasheh struct buffer_head *bh, int type) 7309b7895efSMark Fasheh { 7310cf2f763SJoel Becker return __ocfs2_journal_access(handle, ci, bh, &dl_triggers, type); 7329b7895efSMark Fasheh } 7339b7895efSMark Fasheh 7340cf2f763SJoel Becker int ocfs2_journal_access(handle_t *handle, struct ocfs2_caching_info *ci, 73550655ae9SJoel Becker struct buffer_head *bh, int type) 73650655ae9SJoel Becker { 7370cf2f763SJoel Becker return __ocfs2_journal_access(handle, ci, bh, NULL, type); 73850655ae9SJoel Becker } 73950655ae9SJoel Becker 740ec20cec7SJoel Becker void ocfs2_journal_dirty(handle_t *handle, struct buffer_head *bh) 741ccd979bdSMark Fasheh { 742ccd979bdSMark Fasheh int status; 743ccd979bdSMark Fasheh 744ccd979bdSMark Fasheh mlog_entry("(bh->b_blocknr=%llu)\n", 745ccd979bdSMark Fasheh (unsigned long long)bh->b_blocknr); 746ccd979bdSMark Fasheh 7472b4e30fbSJoel Becker status = jbd2_journal_dirty_metadata(handle, bh); 748ec20cec7SJoel Becker BUG_ON(status); 749ccd979bdSMark Fasheh 750ec20cec7SJoel Becker mlog_exit_void(); 751ccd979bdSMark Fasheh } 752ccd979bdSMark Fasheh 7532b4e30fbSJoel Becker #define OCFS2_DEFAULT_COMMIT_INTERVAL (HZ * JBD2_DEFAULT_MAX_COMMIT_AGE) 754ccd979bdSMark Fasheh 755ccd979bdSMark Fasheh void ocfs2_set_journal_params(struct ocfs2_super *osb) 756ccd979bdSMark Fasheh { 757ccd979bdSMark Fasheh journal_t *journal = osb->journal->j_journal; 758d147b3d6SMark Fasheh unsigned long commit_interval = OCFS2_DEFAULT_COMMIT_INTERVAL; 759d147b3d6SMark Fasheh 760d147b3d6SMark Fasheh if (osb->osb_commit_interval) 761d147b3d6SMark Fasheh commit_interval = osb->osb_commit_interval; 762ccd979bdSMark Fasheh 763*a931da6aSTheodore Ts'o write_lock(&journal->j_state_lock); 764d147b3d6SMark Fasheh journal->j_commit_interval = commit_interval; 765ccd979bdSMark Fasheh if (osb->s_mount_opt & OCFS2_MOUNT_BARRIER) 7662b4e30fbSJoel Becker journal->j_flags |= JBD2_BARRIER; 767ccd979bdSMark Fasheh else 7682b4e30fbSJoel Becker journal->j_flags &= ~JBD2_BARRIER; 769*a931da6aSTheodore Ts'o write_unlock(&journal->j_state_lock); 770ccd979bdSMark Fasheh } 771ccd979bdSMark Fasheh 772ccd979bdSMark Fasheh int ocfs2_journal_init(struct ocfs2_journal *journal, int *dirty) 773ccd979bdSMark Fasheh { 774ccd979bdSMark Fasheh int status = -1; 775ccd979bdSMark Fasheh struct inode *inode = NULL; /* the journal inode */ 776ccd979bdSMark Fasheh journal_t *j_journal = NULL; 777ccd979bdSMark Fasheh struct ocfs2_dinode *di = NULL; 778ccd979bdSMark Fasheh struct buffer_head *bh = NULL; 779ccd979bdSMark Fasheh struct ocfs2_super *osb; 780e63aecb6SMark Fasheh int inode_lock = 0; 781ccd979bdSMark Fasheh 782ccd979bdSMark Fasheh mlog_entry_void(); 783ccd979bdSMark Fasheh 784ccd979bdSMark Fasheh BUG_ON(!journal); 785ccd979bdSMark Fasheh 786ccd979bdSMark Fasheh osb = journal->j_osb; 787ccd979bdSMark Fasheh 788ccd979bdSMark Fasheh /* already have the inode for our journal */ 789ccd979bdSMark Fasheh inode = ocfs2_get_system_file_inode(osb, JOURNAL_SYSTEM_INODE, 790ccd979bdSMark Fasheh osb->slot_num); 791ccd979bdSMark Fasheh if (inode == NULL) { 792ccd979bdSMark Fasheh status = -EACCES; 793ccd979bdSMark Fasheh mlog_errno(status); 794ccd979bdSMark Fasheh goto done; 795ccd979bdSMark Fasheh } 796ccd979bdSMark Fasheh if (is_bad_inode(inode)) { 797ccd979bdSMark Fasheh mlog(ML_ERROR, "access error (bad inode)\n"); 798ccd979bdSMark Fasheh iput(inode); 799ccd979bdSMark Fasheh inode = NULL; 800ccd979bdSMark Fasheh status = -EACCES; 801ccd979bdSMark Fasheh goto done; 802ccd979bdSMark Fasheh } 803ccd979bdSMark Fasheh 804ccd979bdSMark Fasheh SET_INODE_JOURNAL(inode); 805ccd979bdSMark Fasheh OCFS2_I(inode)->ip_open_count++; 806ccd979bdSMark Fasheh 8076eff5790SMark Fasheh /* Skip recovery waits here - journal inode metadata never 8086eff5790SMark Fasheh * changes in a live cluster so it can be considered an 8096eff5790SMark Fasheh * exception to the rule. */ 810e63aecb6SMark Fasheh status = ocfs2_inode_lock_full(inode, &bh, 1, OCFS2_META_LOCK_RECOVERY); 811ccd979bdSMark Fasheh if (status < 0) { 812ccd979bdSMark Fasheh if (status != -ERESTARTSYS) 813ccd979bdSMark Fasheh mlog(ML_ERROR, "Could not get lock on journal!\n"); 814ccd979bdSMark Fasheh goto done; 815ccd979bdSMark Fasheh } 816ccd979bdSMark Fasheh 817e63aecb6SMark Fasheh inode_lock = 1; 818ccd979bdSMark Fasheh di = (struct ocfs2_dinode *)bh->b_data; 819ccd979bdSMark Fasheh 820ccd979bdSMark Fasheh if (inode->i_size < OCFS2_MIN_JOURNAL_SIZE) { 821ccd979bdSMark Fasheh mlog(ML_ERROR, "Journal file size (%lld) is too small!\n", 822ccd979bdSMark Fasheh inode->i_size); 823ccd979bdSMark Fasheh status = -EINVAL; 824ccd979bdSMark Fasheh goto done; 825ccd979bdSMark Fasheh } 826ccd979bdSMark Fasheh 827ccd979bdSMark Fasheh mlog(0, "inode->i_size = %lld\n", inode->i_size); 8285515eff8SAndrew Morton mlog(0, "inode->i_blocks = %llu\n", 8295515eff8SAndrew Morton (unsigned long long)inode->i_blocks); 830ccd979bdSMark Fasheh mlog(0, "inode->ip_clusters = %u\n", OCFS2_I(inode)->ip_clusters); 831ccd979bdSMark Fasheh 832ccd979bdSMark Fasheh /* call the kernels journal init function now */ 8332b4e30fbSJoel Becker j_journal = jbd2_journal_init_inode(inode); 834ccd979bdSMark Fasheh if (j_journal == NULL) { 835ccd979bdSMark Fasheh mlog(ML_ERROR, "Linux journal layer error\n"); 836ccd979bdSMark Fasheh status = -EINVAL; 837ccd979bdSMark Fasheh goto done; 838ccd979bdSMark Fasheh } 839ccd979bdSMark Fasheh 8402b4e30fbSJoel Becker mlog(0, "Returned from jbd2_journal_init_inode\n"); 841ccd979bdSMark Fasheh mlog(0, "j_journal->j_maxlen = %u\n", j_journal->j_maxlen); 842ccd979bdSMark Fasheh 843ccd979bdSMark Fasheh *dirty = (le32_to_cpu(di->id1.journal1.ij_flags) & 844ccd979bdSMark Fasheh OCFS2_JOURNAL_DIRTY_FL); 845ccd979bdSMark Fasheh 846ccd979bdSMark Fasheh journal->j_journal = j_journal; 847ccd979bdSMark Fasheh journal->j_inode = inode; 848ccd979bdSMark Fasheh journal->j_bh = bh; 849ccd979bdSMark Fasheh 850ccd979bdSMark Fasheh ocfs2_set_journal_params(osb); 851ccd979bdSMark Fasheh 852ccd979bdSMark Fasheh journal->j_state = OCFS2_JOURNAL_LOADED; 853ccd979bdSMark Fasheh 854ccd979bdSMark Fasheh status = 0; 855ccd979bdSMark Fasheh done: 856ccd979bdSMark Fasheh if (status < 0) { 857e63aecb6SMark Fasheh if (inode_lock) 858e63aecb6SMark Fasheh ocfs2_inode_unlock(inode, 1); 859ccd979bdSMark Fasheh brelse(bh); 860ccd979bdSMark Fasheh if (inode) { 861ccd979bdSMark Fasheh OCFS2_I(inode)->ip_open_count--; 862ccd979bdSMark Fasheh iput(inode); 863ccd979bdSMark Fasheh } 864ccd979bdSMark Fasheh } 865ccd979bdSMark Fasheh 866ccd979bdSMark Fasheh mlog_exit(status); 867ccd979bdSMark Fasheh return status; 868ccd979bdSMark Fasheh } 869ccd979bdSMark Fasheh 870539d8264SSunil Mushran static void ocfs2_bump_recovery_generation(struct ocfs2_dinode *di) 871539d8264SSunil Mushran { 872539d8264SSunil Mushran le32_add_cpu(&(di->id1.journal1.ij_recovery_generation), 1); 873539d8264SSunil Mushran } 874539d8264SSunil Mushran 875539d8264SSunil Mushran static u32 ocfs2_get_recovery_generation(struct ocfs2_dinode *di) 876539d8264SSunil Mushran { 877539d8264SSunil Mushran return le32_to_cpu(di->id1.journal1.ij_recovery_generation); 878539d8264SSunil Mushran } 879539d8264SSunil Mushran 880ccd979bdSMark Fasheh static int ocfs2_journal_toggle_dirty(struct ocfs2_super *osb, 881539d8264SSunil Mushran int dirty, int replayed) 882ccd979bdSMark Fasheh { 883ccd979bdSMark Fasheh int status; 884ccd979bdSMark Fasheh unsigned int flags; 885ccd979bdSMark Fasheh struct ocfs2_journal *journal = osb->journal; 886ccd979bdSMark Fasheh struct buffer_head *bh = journal->j_bh; 887ccd979bdSMark Fasheh struct ocfs2_dinode *fe; 888ccd979bdSMark Fasheh 889ccd979bdSMark Fasheh mlog_entry_void(); 890ccd979bdSMark Fasheh 891ccd979bdSMark Fasheh fe = (struct ocfs2_dinode *)bh->b_data; 89210995aa2SJoel Becker 89310995aa2SJoel Becker /* The journal bh on the osb always comes from ocfs2_journal_init() 89410995aa2SJoel Becker * and was validated there inside ocfs2_inode_lock_full(). It's a 89510995aa2SJoel Becker * code bug if we mess it up. */ 89610995aa2SJoel Becker BUG_ON(!OCFS2_IS_VALID_DINODE(fe)); 897ccd979bdSMark Fasheh 898ccd979bdSMark Fasheh flags = le32_to_cpu(fe->id1.journal1.ij_flags); 899ccd979bdSMark Fasheh if (dirty) 900ccd979bdSMark Fasheh flags |= OCFS2_JOURNAL_DIRTY_FL; 901ccd979bdSMark Fasheh else 902ccd979bdSMark Fasheh flags &= ~OCFS2_JOURNAL_DIRTY_FL; 903ccd979bdSMark Fasheh fe->id1.journal1.ij_flags = cpu_to_le32(flags); 904ccd979bdSMark Fasheh 905539d8264SSunil Mushran if (replayed) 906539d8264SSunil Mushran ocfs2_bump_recovery_generation(fe); 907539d8264SSunil Mushran 90813723d00SJoel Becker ocfs2_compute_meta_ecc(osb->sb, bh->b_data, &fe->i_check); 9098cb471e8SJoel Becker status = ocfs2_write_block(osb, bh, INODE_CACHE(journal->j_inode)); 910ccd979bdSMark Fasheh if (status < 0) 911ccd979bdSMark Fasheh mlog_errno(status); 912ccd979bdSMark Fasheh 913ccd979bdSMark Fasheh mlog_exit(status); 914ccd979bdSMark Fasheh return status; 915ccd979bdSMark Fasheh } 916ccd979bdSMark Fasheh 917ccd979bdSMark Fasheh /* 918ccd979bdSMark Fasheh * If the journal has been kmalloc'd it needs to be freed after this 919ccd979bdSMark Fasheh * call. 920ccd979bdSMark Fasheh */ 921ccd979bdSMark Fasheh void ocfs2_journal_shutdown(struct ocfs2_super *osb) 922ccd979bdSMark Fasheh { 923ccd979bdSMark Fasheh struct ocfs2_journal *journal = NULL; 924ccd979bdSMark Fasheh int status = 0; 925ccd979bdSMark Fasheh struct inode *inode = NULL; 926ccd979bdSMark Fasheh int num_running_trans = 0; 927ccd979bdSMark Fasheh 928ccd979bdSMark Fasheh mlog_entry_void(); 929ccd979bdSMark Fasheh 930ebdec83bSEric Sesterhenn / snakebyte BUG_ON(!osb); 931ccd979bdSMark Fasheh 932ccd979bdSMark Fasheh journal = osb->journal; 933ccd979bdSMark Fasheh if (!journal) 934ccd979bdSMark Fasheh goto done; 935ccd979bdSMark Fasheh 936ccd979bdSMark Fasheh inode = journal->j_inode; 937ccd979bdSMark Fasheh 938ccd979bdSMark Fasheh if (journal->j_state != OCFS2_JOURNAL_LOADED) 939ccd979bdSMark Fasheh goto done; 940ccd979bdSMark Fasheh 9412b4e30fbSJoel Becker /* need to inc inode use count - jbd2_journal_destroy will iput. */ 942ccd979bdSMark Fasheh if (!igrab(inode)) 943ccd979bdSMark Fasheh BUG(); 944ccd979bdSMark Fasheh 945ccd979bdSMark Fasheh num_running_trans = atomic_read(&(osb->journal->j_num_trans)); 946ccd979bdSMark Fasheh if (num_running_trans > 0) 947ccd979bdSMark Fasheh mlog(0, "Shutting down journal: must wait on %d " 948ccd979bdSMark Fasheh "running transactions!\n", 949ccd979bdSMark Fasheh num_running_trans); 950ccd979bdSMark Fasheh 951ccd979bdSMark Fasheh /* Do a commit_cache here. It will flush our journal, *and* 952ccd979bdSMark Fasheh * release any locks that are still held. 953ccd979bdSMark Fasheh * set the SHUTDOWN flag and release the trans lock. 954ccd979bdSMark Fasheh * the commit thread will take the trans lock for us below. */ 955ccd979bdSMark Fasheh journal->j_state = OCFS2_JOURNAL_IN_SHUTDOWN; 956ccd979bdSMark Fasheh 957ccd979bdSMark Fasheh /* The OCFS2_JOURNAL_IN_SHUTDOWN will signal to commit_cache to not 958ccd979bdSMark Fasheh * drop the trans_lock (which we want to hold until we 959ccd979bdSMark Fasheh * completely destroy the journal. */ 960ccd979bdSMark Fasheh if (osb->commit_task) { 961ccd979bdSMark Fasheh /* Wait for the commit thread */ 962ccd979bdSMark Fasheh mlog(0, "Waiting for ocfs2commit to exit....\n"); 963ccd979bdSMark Fasheh kthread_stop(osb->commit_task); 964ccd979bdSMark Fasheh osb->commit_task = NULL; 965ccd979bdSMark Fasheh } 966ccd979bdSMark Fasheh 967ccd979bdSMark Fasheh BUG_ON(atomic_read(&(osb->journal->j_num_trans)) != 0); 968ccd979bdSMark Fasheh 969c271c5c2SSunil Mushran if (ocfs2_mount_local(osb)) { 9702b4e30fbSJoel Becker jbd2_journal_lock_updates(journal->j_journal); 9712b4e30fbSJoel Becker status = jbd2_journal_flush(journal->j_journal); 9722b4e30fbSJoel Becker jbd2_journal_unlock_updates(journal->j_journal); 973c271c5c2SSunil Mushran if (status < 0) 974c271c5c2SSunil Mushran mlog_errno(status); 975c271c5c2SSunil Mushran } 976c271c5c2SSunil Mushran 977c271c5c2SSunil Mushran if (status == 0) { 978c271c5c2SSunil Mushran /* 979c271c5c2SSunil Mushran * Do not toggle if flush was unsuccessful otherwise 980c271c5c2SSunil Mushran * will leave dirty metadata in a "clean" journal 981c271c5c2SSunil Mushran */ 982539d8264SSunil Mushran status = ocfs2_journal_toggle_dirty(osb, 0, 0); 983ccd979bdSMark Fasheh if (status < 0) 984ccd979bdSMark Fasheh mlog_errno(status); 985c271c5c2SSunil Mushran } 986ccd979bdSMark Fasheh 987ccd979bdSMark Fasheh /* Shutdown the kernel journal system */ 9882b4e30fbSJoel Becker jbd2_journal_destroy(journal->j_journal); 989ae0dff68SSunil Mushran journal->j_journal = NULL; 990ccd979bdSMark Fasheh 991ccd979bdSMark Fasheh OCFS2_I(inode)->ip_open_count--; 992ccd979bdSMark Fasheh 993ccd979bdSMark Fasheh /* unlock our journal */ 994e63aecb6SMark Fasheh ocfs2_inode_unlock(inode, 1); 995ccd979bdSMark Fasheh 996ccd979bdSMark Fasheh brelse(journal->j_bh); 997ccd979bdSMark Fasheh journal->j_bh = NULL; 998ccd979bdSMark Fasheh 999ccd979bdSMark Fasheh journal->j_state = OCFS2_JOURNAL_FREE; 1000ccd979bdSMark Fasheh 1001ccd979bdSMark Fasheh // up_write(&journal->j_trans_barrier); 1002ccd979bdSMark Fasheh done: 1003ccd979bdSMark Fasheh if (inode) 1004ccd979bdSMark Fasheh iput(inode); 1005ccd979bdSMark Fasheh mlog_exit_void(); 1006ccd979bdSMark Fasheh } 1007ccd979bdSMark Fasheh 1008ccd979bdSMark Fasheh static void ocfs2_clear_journal_error(struct super_block *sb, 1009ccd979bdSMark Fasheh journal_t *journal, 1010ccd979bdSMark Fasheh int slot) 1011ccd979bdSMark Fasheh { 1012ccd979bdSMark Fasheh int olderr; 1013ccd979bdSMark Fasheh 10142b4e30fbSJoel Becker olderr = jbd2_journal_errno(journal); 1015ccd979bdSMark Fasheh if (olderr) { 1016ccd979bdSMark Fasheh mlog(ML_ERROR, "File system error %d recorded in " 1017ccd979bdSMark Fasheh "journal %u.\n", olderr, slot); 1018ccd979bdSMark Fasheh mlog(ML_ERROR, "File system on device %s needs checking.\n", 1019ccd979bdSMark Fasheh sb->s_id); 1020ccd979bdSMark Fasheh 10212b4e30fbSJoel Becker jbd2_journal_ack_err(journal); 10222b4e30fbSJoel Becker jbd2_journal_clear_err(journal); 1023ccd979bdSMark Fasheh } 1024ccd979bdSMark Fasheh } 1025ccd979bdSMark Fasheh 1026539d8264SSunil Mushran int ocfs2_journal_load(struct ocfs2_journal *journal, int local, int replayed) 1027ccd979bdSMark Fasheh { 1028ccd979bdSMark Fasheh int status = 0; 1029ccd979bdSMark Fasheh struct ocfs2_super *osb; 1030ccd979bdSMark Fasheh 1031ccd979bdSMark Fasheh mlog_entry_void(); 1032ccd979bdSMark Fasheh 1033b1f3550fSJulia Lawall BUG_ON(!journal); 1034ccd979bdSMark Fasheh 1035ccd979bdSMark Fasheh osb = journal->j_osb; 1036ccd979bdSMark Fasheh 10372b4e30fbSJoel Becker status = jbd2_journal_load(journal->j_journal); 1038ccd979bdSMark Fasheh if (status < 0) { 1039ccd979bdSMark Fasheh mlog(ML_ERROR, "Failed to load journal!\n"); 1040ccd979bdSMark Fasheh goto done; 1041ccd979bdSMark Fasheh } 1042ccd979bdSMark Fasheh 1043ccd979bdSMark Fasheh ocfs2_clear_journal_error(osb->sb, journal->j_journal, osb->slot_num); 1044ccd979bdSMark Fasheh 1045539d8264SSunil Mushran status = ocfs2_journal_toggle_dirty(osb, 1, replayed); 1046ccd979bdSMark Fasheh if (status < 0) { 1047ccd979bdSMark Fasheh mlog_errno(status); 1048ccd979bdSMark Fasheh goto done; 1049ccd979bdSMark Fasheh } 1050ccd979bdSMark Fasheh 1051ccd979bdSMark Fasheh /* Launch the commit thread */ 1052c271c5c2SSunil Mushran if (!local) { 1053c271c5c2SSunil Mushran osb->commit_task = kthread_run(ocfs2_commit_thread, osb, 1054c271c5c2SSunil Mushran "ocfs2cmt"); 1055ccd979bdSMark Fasheh if (IS_ERR(osb->commit_task)) { 1056ccd979bdSMark Fasheh status = PTR_ERR(osb->commit_task); 1057ccd979bdSMark Fasheh osb->commit_task = NULL; 1058c271c5c2SSunil Mushran mlog(ML_ERROR, "unable to launch ocfs2commit thread, " 1059c271c5c2SSunil Mushran "error=%d", status); 1060ccd979bdSMark Fasheh goto done; 1061ccd979bdSMark Fasheh } 1062c271c5c2SSunil Mushran } else 1063c271c5c2SSunil Mushran osb->commit_task = NULL; 1064ccd979bdSMark Fasheh 1065ccd979bdSMark Fasheh done: 1066ccd979bdSMark Fasheh mlog_exit(status); 1067ccd979bdSMark Fasheh return status; 1068ccd979bdSMark Fasheh } 1069ccd979bdSMark Fasheh 1070ccd979bdSMark Fasheh 1071ccd979bdSMark Fasheh /* 'full' flag tells us whether we clear out all blocks or if we just 1072ccd979bdSMark Fasheh * mark the journal clean */ 1073ccd979bdSMark Fasheh int ocfs2_journal_wipe(struct ocfs2_journal *journal, int full) 1074ccd979bdSMark Fasheh { 1075ccd979bdSMark Fasheh int status; 1076ccd979bdSMark Fasheh 1077ccd979bdSMark Fasheh mlog_entry_void(); 1078ccd979bdSMark Fasheh 1079ebdec83bSEric Sesterhenn / snakebyte BUG_ON(!journal); 1080ccd979bdSMark Fasheh 10812b4e30fbSJoel Becker status = jbd2_journal_wipe(journal->j_journal, full); 1082ccd979bdSMark Fasheh if (status < 0) { 1083ccd979bdSMark Fasheh mlog_errno(status); 1084ccd979bdSMark Fasheh goto bail; 1085ccd979bdSMark Fasheh } 1086ccd979bdSMark Fasheh 1087539d8264SSunil Mushran status = ocfs2_journal_toggle_dirty(journal->j_osb, 0, 0); 1088ccd979bdSMark Fasheh if (status < 0) 1089ccd979bdSMark Fasheh mlog_errno(status); 1090ccd979bdSMark Fasheh 1091ccd979bdSMark Fasheh bail: 1092ccd979bdSMark Fasheh mlog_exit(status); 1093ccd979bdSMark Fasheh return status; 1094ccd979bdSMark Fasheh } 1095ccd979bdSMark Fasheh 1096553abd04SJoel Becker static int ocfs2_recovery_completed(struct ocfs2_super *osb) 1097553abd04SJoel Becker { 1098553abd04SJoel Becker int empty; 1099553abd04SJoel Becker struct ocfs2_recovery_map *rm = osb->recovery_map; 1100553abd04SJoel Becker 1101553abd04SJoel Becker spin_lock(&osb->osb_lock); 1102553abd04SJoel Becker empty = (rm->rm_used == 0); 1103553abd04SJoel Becker spin_unlock(&osb->osb_lock); 1104553abd04SJoel Becker 1105553abd04SJoel Becker return empty; 1106553abd04SJoel Becker } 1107553abd04SJoel Becker 1108553abd04SJoel Becker void ocfs2_wait_for_recovery(struct ocfs2_super *osb) 1109553abd04SJoel Becker { 1110553abd04SJoel Becker wait_event(osb->recovery_event, ocfs2_recovery_completed(osb)); 1111553abd04SJoel Becker } 1112553abd04SJoel Becker 1113ccd979bdSMark Fasheh /* 1114ccd979bdSMark Fasheh * JBD Might read a cached version of another nodes journal file. We 1115ccd979bdSMark Fasheh * don't want this as this file changes often and we get no 1116ccd979bdSMark Fasheh * notification on those changes. The only way to be sure that we've 1117ccd979bdSMark Fasheh * got the most up to date version of those blocks then is to force 1118ccd979bdSMark Fasheh * read them off disk. Just searching through the buffer cache won't 1119ccd979bdSMark Fasheh * work as there may be pages backing this file which are still marked 1120ccd979bdSMark Fasheh * up to date. We know things can't change on this file underneath us 1121ccd979bdSMark Fasheh * as we have the lock by now :) 1122ccd979bdSMark Fasheh */ 1123ccd979bdSMark Fasheh static int ocfs2_force_read_journal(struct inode *inode) 1124ccd979bdSMark Fasheh { 1125ccd979bdSMark Fasheh int status = 0; 11264f902c37SMark Fasheh int i; 11278110b073SMark Fasheh u64 v_blkno, p_blkno, p_blocks, num_blocks; 11284f902c37SMark Fasheh #define CONCURRENT_JOURNAL_FILL 32ULL 1129ccd979bdSMark Fasheh struct buffer_head *bhs[CONCURRENT_JOURNAL_FILL]; 1130ccd979bdSMark Fasheh 1131ccd979bdSMark Fasheh mlog_entry_void(); 1132ccd979bdSMark Fasheh 1133ccd979bdSMark Fasheh memset(bhs, 0, sizeof(struct buffer_head *) * CONCURRENT_JOURNAL_FILL); 1134ccd979bdSMark Fasheh 11358110b073SMark Fasheh num_blocks = ocfs2_blocks_for_bytes(inode->i_sb, inode->i_size); 1136ccd979bdSMark Fasheh v_blkno = 0; 11378110b073SMark Fasheh while (v_blkno < num_blocks) { 1138ccd979bdSMark Fasheh status = ocfs2_extent_map_get_blocks(inode, v_blkno, 113949cb8d2dSMark Fasheh &p_blkno, &p_blocks, NULL); 1140ccd979bdSMark Fasheh if (status < 0) { 1141ccd979bdSMark Fasheh mlog_errno(status); 1142ccd979bdSMark Fasheh goto bail; 1143ccd979bdSMark Fasheh } 1144ccd979bdSMark Fasheh 1145ccd979bdSMark Fasheh if (p_blocks > CONCURRENT_JOURNAL_FILL) 1146ccd979bdSMark Fasheh p_blocks = CONCURRENT_JOURNAL_FILL; 1147ccd979bdSMark Fasheh 1148dd4a2c2bSMark Fasheh /* We are reading journal data which should not 1149dd4a2c2bSMark Fasheh * be put in the uptodate cache */ 1150da1e9098SJoel Becker status = ocfs2_read_blocks_sync(OCFS2_SB(inode->i_sb), 1151da1e9098SJoel Becker p_blkno, p_blocks, bhs); 1152ccd979bdSMark Fasheh if (status < 0) { 1153ccd979bdSMark Fasheh mlog_errno(status); 1154ccd979bdSMark Fasheh goto bail; 1155ccd979bdSMark Fasheh } 1156ccd979bdSMark Fasheh 1157ccd979bdSMark Fasheh for(i = 0; i < p_blocks; i++) { 1158ccd979bdSMark Fasheh brelse(bhs[i]); 1159ccd979bdSMark Fasheh bhs[i] = NULL; 1160ccd979bdSMark Fasheh } 1161ccd979bdSMark Fasheh 1162ccd979bdSMark Fasheh v_blkno += p_blocks; 1163ccd979bdSMark Fasheh } 1164ccd979bdSMark Fasheh 1165ccd979bdSMark Fasheh bail: 1166ccd979bdSMark Fasheh for(i = 0; i < CONCURRENT_JOURNAL_FILL; i++) 1167ccd979bdSMark Fasheh brelse(bhs[i]); 1168ccd979bdSMark Fasheh mlog_exit(status); 1169ccd979bdSMark Fasheh return status; 1170ccd979bdSMark Fasheh } 1171ccd979bdSMark Fasheh 1172ccd979bdSMark Fasheh struct ocfs2_la_recovery_item { 1173ccd979bdSMark Fasheh struct list_head lri_list; 1174ccd979bdSMark Fasheh int lri_slot; 1175ccd979bdSMark Fasheh struct ocfs2_dinode *lri_la_dinode; 1176ccd979bdSMark Fasheh struct ocfs2_dinode *lri_tl_dinode; 11772205363dSJan Kara struct ocfs2_quota_recovery *lri_qrec; 1178ccd979bdSMark Fasheh }; 1179ccd979bdSMark Fasheh 1180ccd979bdSMark Fasheh /* Does the second half of the recovery process. By this point, the 1181ccd979bdSMark Fasheh * node is marked clean and can actually be considered recovered, 1182ccd979bdSMark Fasheh * hence it's no longer in the recovery map, but there's still some 1183ccd979bdSMark Fasheh * cleanup we can do which shouldn't happen within the recovery thread 1184ccd979bdSMark Fasheh * as locking in that context becomes very difficult if we are to take 1185ccd979bdSMark Fasheh * recovering nodes into account. 1186ccd979bdSMark Fasheh * 1187ccd979bdSMark Fasheh * NOTE: This function can and will sleep on recovery of other nodes 1188ccd979bdSMark Fasheh * during cluster locking, just like any other ocfs2 process. 1189ccd979bdSMark Fasheh */ 1190c4028958SDavid Howells void ocfs2_complete_recovery(struct work_struct *work) 1191ccd979bdSMark Fasheh { 1192ccd979bdSMark Fasheh int ret; 1193c4028958SDavid Howells struct ocfs2_journal *journal = 1194c4028958SDavid Howells container_of(work, struct ocfs2_journal, j_recovery_work); 1195c4028958SDavid Howells struct ocfs2_super *osb = journal->j_osb; 1196ccd979bdSMark Fasheh struct ocfs2_dinode *la_dinode, *tl_dinode; 1197800deef3SChristoph Hellwig struct ocfs2_la_recovery_item *item, *n; 11982205363dSJan Kara struct ocfs2_quota_recovery *qrec; 1199ccd979bdSMark Fasheh LIST_HEAD(tmp_la_list); 1200ccd979bdSMark Fasheh 1201ccd979bdSMark Fasheh mlog_entry_void(); 1202ccd979bdSMark Fasheh 1203ccd979bdSMark Fasheh mlog(0, "completing recovery from keventd\n"); 1204ccd979bdSMark Fasheh 1205ccd979bdSMark Fasheh spin_lock(&journal->j_lock); 1206ccd979bdSMark Fasheh list_splice_init(&journal->j_la_cleanups, &tmp_la_list); 1207ccd979bdSMark Fasheh spin_unlock(&journal->j_lock); 1208ccd979bdSMark Fasheh 1209800deef3SChristoph Hellwig list_for_each_entry_safe(item, n, &tmp_la_list, lri_list) { 1210ccd979bdSMark Fasheh list_del_init(&item->lri_list); 1211ccd979bdSMark Fasheh 1212ccd979bdSMark Fasheh mlog(0, "Complete recovery for slot %d\n", item->lri_slot); 1213ccd979bdSMark Fasheh 121419ece546SJan Kara ocfs2_wait_on_quotas(osb); 121519ece546SJan Kara 1216ccd979bdSMark Fasheh la_dinode = item->lri_la_dinode; 1217ccd979bdSMark Fasheh if (la_dinode) { 1218b0697053SMark Fasheh mlog(0, "Clean up local alloc %llu\n", 12191ca1a111SMark Fasheh (unsigned long long)le64_to_cpu(la_dinode->i_blkno)); 1220ccd979bdSMark Fasheh 1221ccd979bdSMark Fasheh ret = ocfs2_complete_local_alloc_recovery(osb, 1222ccd979bdSMark Fasheh la_dinode); 1223ccd979bdSMark Fasheh if (ret < 0) 1224ccd979bdSMark Fasheh mlog_errno(ret); 1225ccd979bdSMark Fasheh 1226ccd979bdSMark Fasheh kfree(la_dinode); 1227ccd979bdSMark Fasheh } 1228ccd979bdSMark Fasheh 1229ccd979bdSMark Fasheh tl_dinode = item->lri_tl_dinode; 1230ccd979bdSMark Fasheh if (tl_dinode) { 1231b0697053SMark Fasheh mlog(0, "Clean up truncate log %llu\n", 12321ca1a111SMark Fasheh (unsigned long long)le64_to_cpu(tl_dinode->i_blkno)); 1233ccd979bdSMark Fasheh 1234ccd979bdSMark Fasheh ret = ocfs2_complete_truncate_log_recovery(osb, 1235ccd979bdSMark Fasheh tl_dinode); 1236ccd979bdSMark Fasheh if (ret < 0) 1237ccd979bdSMark Fasheh mlog_errno(ret); 1238ccd979bdSMark Fasheh 1239ccd979bdSMark Fasheh kfree(tl_dinode); 1240ccd979bdSMark Fasheh } 1241ccd979bdSMark Fasheh 1242ccd979bdSMark Fasheh ret = ocfs2_recover_orphans(osb, item->lri_slot); 1243ccd979bdSMark Fasheh if (ret < 0) 1244ccd979bdSMark Fasheh mlog_errno(ret); 1245ccd979bdSMark Fasheh 12462205363dSJan Kara qrec = item->lri_qrec; 12472205363dSJan Kara if (qrec) { 12482205363dSJan Kara mlog(0, "Recovering quota files"); 12492205363dSJan Kara ret = ocfs2_finish_quota_recovery(osb, qrec, 12502205363dSJan Kara item->lri_slot); 12512205363dSJan Kara if (ret < 0) 12522205363dSJan Kara mlog_errno(ret); 12532205363dSJan Kara /* Recovery info is already freed now */ 12542205363dSJan Kara } 12552205363dSJan Kara 1256ccd979bdSMark Fasheh kfree(item); 1257ccd979bdSMark Fasheh } 1258ccd979bdSMark Fasheh 1259ccd979bdSMark Fasheh mlog(0, "Recovery completion\n"); 1260ccd979bdSMark Fasheh mlog_exit_void(); 1261ccd979bdSMark Fasheh } 1262ccd979bdSMark Fasheh 1263ccd979bdSMark Fasheh /* NOTE: This function always eats your references to la_dinode and 1264ccd979bdSMark Fasheh * tl_dinode, either manually on error, or by passing them to 1265ccd979bdSMark Fasheh * ocfs2_complete_recovery */ 1266ccd979bdSMark Fasheh static void ocfs2_queue_recovery_completion(struct ocfs2_journal *journal, 1267ccd979bdSMark Fasheh int slot_num, 1268ccd979bdSMark Fasheh struct ocfs2_dinode *la_dinode, 12692205363dSJan Kara struct ocfs2_dinode *tl_dinode, 12702205363dSJan Kara struct ocfs2_quota_recovery *qrec) 1271ccd979bdSMark Fasheh { 1272ccd979bdSMark Fasheh struct ocfs2_la_recovery_item *item; 1273ccd979bdSMark Fasheh 1274afae00abSSunil Mushran item = kmalloc(sizeof(struct ocfs2_la_recovery_item), GFP_NOFS); 1275ccd979bdSMark Fasheh if (!item) { 1276ccd979bdSMark Fasheh /* Though we wish to avoid it, we are in fact safe in 1277ccd979bdSMark Fasheh * skipping local alloc cleanup as fsck.ocfs2 is more 1278ccd979bdSMark Fasheh * than capable of reclaiming unused space. */ 1279ccd979bdSMark Fasheh if (la_dinode) 1280ccd979bdSMark Fasheh kfree(la_dinode); 1281ccd979bdSMark Fasheh 1282ccd979bdSMark Fasheh if (tl_dinode) 1283ccd979bdSMark Fasheh kfree(tl_dinode); 1284ccd979bdSMark Fasheh 12852205363dSJan Kara if (qrec) 12862205363dSJan Kara ocfs2_free_quota_recovery(qrec); 12872205363dSJan Kara 1288ccd979bdSMark Fasheh mlog_errno(-ENOMEM); 1289ccd979bdSMark Fasheh return; 1290ccd979bdSMark Fasheh } 1291ccd979bdSMark Fasheh 1292ccd979bdSMark Fasheh INIT_LIST_HEAD(&item->lri_list); 1293ccd979bdSMark Fasheh item->lri_la_dinode = la_dinode; 1294ccd979bdSMark Fasheh item->lri_slot = slot_num; 1295ccd979bdSMark Fasheh item->lri_tl_dinode = tl_dinode; 12962205363dSJan Kara item->lri_qrec = qrec; 1297ccd979bdSMark Fasheh 1298ccd979bdSMark Fasheh spin_lock(&journal->j_lock); 1299ccd979bdSMark Fasheh list_add_tail(&item->lri_list, &journal->j_la_cleanups); 1300ccd979bdSMark Fasheh queue_work(ocfs2_wq, &journal->j_recovery_work); 1301ccd979bdSMark Fasheh spin_unlock(&journal->j_lock); 1302ccd979bdSMark Fasheh } 1303ccd979bdSMark Fasheh 1304ccd979bdSMark Fasheh /* Called by the mount code to queue recovery the last part of 13059140db04SSrinivas Eeda * recovery for it's own and offline slot(s). */ 1306ccd979bdSMark Fasheh void ocfs2_complete_mount_recovery(struct ocfs2_super *osb) 1307ccd979bdSMark Fasheh { 1308ccd979bdSMark Fasheh struct ocfs2_journal *journal = osb->journal; 1309ccd979bdSMark Fasheh 13109140db04SSrinivas Eeda /* No need to queue up our truncate_log as regular cleanup will catch 13119140db04SSrinivas Eeda * that */ 13129140db04SSrinivas Eeda ocfs2_queue_recovery_completion(journal, osb->slot_num, 13139140db04SSrinivas Eeda osb->local_alloc_copy, NULL, NULL); 1314ccd979bdSMark Fasheh ocfs2_schedule_truncate_log_flush(osb, 0); 1315ccd979bdSMark Fasheh 1316ccd979bdSMark Fasheh osb->local_alloc_copy = NULL; 1317ccd979bdSMark Fasheh osb->dirty = 0; 13189140db04SSrinivas Eeda 13199140db04SSrinivas Eeda /* queue to recover orphan slots for all offline slots */ 13209140db04SSrinivas Eeda ocfs2_replay_map_set_state(osb, REPLAY_NEEDED); 13219140db04SSrinivas Eeda ocfs2_queue_replay_slots(osb); 13229140db04SSrinivas Eeda ocfs2_free_replay_slots(osb); 1323ccd979bdSMark Fasheh } 1324ccd979bdSMark Fasheh 13252205363dSJan Kara void ocfs2_complete_quota_recovery(struct ocfs2_super *osb) 13262205363dSJan Kara { 13272205363dSJan Kara if (osb->quota_rec) { 13282205363dSJan Kara ocfs2_queue_recovery_completion(osb->journal, 13292205363dSJan Kara osb->slot_num, 13302205363dSJan Kara NULL, 13312205363dSJan Kara NULL, 13322205363dSJan Kara osb->quota_rec); 13332205363dSJan Kara osb->quota_rec = NULL; 13342205363dSJan Kara } 13352205363dSJan Kara } 13362205363dSJan Kara 1337ccd979bdSMark Fasheh static int __ocfs2_recovery_thread(void *arg) 1338ccd979bdSMark Fasheh { 13392205363dSJan Kara int status, node_num, slot_num; 1340ccd979bdSMark Fasheh struct ocfs2_super *osb = arg; 1341553abd04SJoel Becker struct ocfs2_recovery_map *rm = osb->recovery_map; 13422205363dSJan Kara int *rm_quota = NULL; 13432205363dSJan Kara int rm_quota_used = 0, i; 13442205363dSJan Kara struct ocfs2_quota_recovery *qrec; 1345ccd979bdSMark Fasheh 1346ccd979bdSMark Fasheh mlog_entry_void(); 1347ccd979bdSMark Fasheh 1348ccd979bdSMark Fasheh status = ocfs2_wait_on_mount(osb); 1349ccd979bdSMark Fasheh if (status < 0) { 1350ccd979bdSMark Fasheh goto bail; 1351ccd979bdSMark Fasheh } 1352ccd979bdSMark Fasheh 13532205363dSJan Kara rm_quota = kzalloc(osb->max_slots * sizeof(int), GFP_NOFS); 13542205363dSJan Kara if (!rm_quota) { 13552205363dSJan Kara status = -ENOMEM; 13562205363dSJan Kara goto bail; 13572205363dSJan Kara } 1358ccd979bdSMark Fasheh restart: 1359ccd979bdSMark Fasheh status = ocfs2_super_lock(osb, 1); 1360ccd979bdSMark Fasheh if (status < 0) { 1361ccd979bdSMark Fasheh mlog_errno(status); 1362ccd979bdSMark Fasheh goto bail; 1363ccd979bdSMark Fasheh } 1364ccd979bdSMark Fasheh 13659140db04SSrinivas Eeda status = ocfs2_compute_replay_slots(osb); 13669140db04SSrinivas Eeda if (status < 0) 13679140db04SSrinivas Eeda mlog_errno(status); 13689140db04SSrinivas Eeda 13699140db04SSrinivas Eeda /* queue recovery for our own slot */ 13709140db04SSrinivas Eeda ocfs2_queue_recovery_completion(osb->journal, osb->slot_num, NULL, 13719140db04SSrinivas Eeda NULL, NULL); 13729140db04SSrinivas Eeda 1373553abd04SJoel Becker spin_lock(&osb->osb_lock); 1374553abd04SJoel Becker while (rm->rm_used) { 1375553abd04SJoel Becker /* It's always safe to remove entry zero, as we won't 1376553abd04SJoel Becker * clear it until ocfs2_recover_node() has succeeded. */ 1377553abd04SJoel Becker node_num = rm->rm_entries[0]; 1378553abd04SJoel Becker spin_unlock(&osb->osb_lock); 13792205363dSJan Kara mlog(0, "checking node %d\n", node_num); 13802205363dSJan Kara slot_num = ocfs2_node_num_to_slot(osb, node_num); 13812205363dSJan Kara if (slot_num == -ENOENT) { 13822205363dSJan Kara status = 0; 13832205363dSJan Kara mlog(0, "no slot for this node, so no recovery" 13842205363dSJan Kara "required.\n"); 13852205363dSJan Kara goto skip_recovery; 13862205363dSJan Kara } 13872205363dSJan Kara mlog(0, "node %d was using slot %d\n", node_num, slot_num); 1388ccd979bdSMark Fasheh 13892205363dSJan Kara /* It is a bit subtle with quota recovery. We cannot do it 13902205363dSJan Kara * immediately because we have to obtain cluster locks from 13912205363dSJan Kara * quota files and we also don't want to just skip it because 13922205363dSJan Kara * then quota usage would be out of sync until some node takes 13932205363dSJan Kara * the slot. So we remember which nodes need quota recovery 13942205363dSJan Kara * and when everything else is done, we recover quotas. */ 13952205363dSJan Kara for (i = 0; i < rm_quota_used && rm_quota[i] != slot_num; i++); 13962205363dSJan Kara if (i == rm_quota_used) 13972205363dSJan Kara rm_quota[rm_quota_used++] = slot_num; 13982205363dSJan Kara 13992205363dSJan Kara status = ocfs2_recover_node(osb, node_num, slot_num); 14002205363dSJan Kara skip_recovery: 1401553abd04SJoel Becker if (!status) { 1402553abd04SJoel Becker ocfs2_recovery_map_clear(osb, node_num); 1403553abd04SJoel Becker } else { 1404ccd979bdSMark Fasheh mlog(ML_ERROR, 1405ccd979bdSMark Fasheh "Error %d recovering node %d on device (%u,%u)!\n", 1406ccd979bdSMark Fasheh status, node_num, 1407ccd979bdSMark Fasheh MAJOR(osb->sb->s_dev), MINOR(osb->sb->s_dev)); 1408ccd979bdSMark Fasheh mlog(ML_ERROR, "Volume requires unmount.\n"); 1409ccd979bdSMark Fasheh } 1410ccd979bdSMark Fasheh 1411553abd04SJoel Becker spin_lock(&osb->osb_lock); 1412ccd979bdSMark Fasheh } 1413553abd04SJoel Becker spin_unlock(&osb->osb_lock); 1414553abd04SJoel Becker mlog(0, "All nodes recovered\n"); 1415553abd04SJoel Becker 1416539d8264SSunil Mushran /* Refresh all journal recovery generations from disk */ 1417539d8264SSunil Mushran status = ocfs2_check_journals_nolocks(osb); 1418539d8264SSunil Mushran status = (status == -EROFS) ? 0 : status; 1419539d8264SSunil Mushran if (status < 0) 1420539d8264SSunil Mushran mlog_errno(status); 1421539d8264SSunil Mushran 14222205363dSJan Kara /* Now it is right time to recover quotas... We have to do this under 14232205363dSJan Kara * superblock lock so that noone can start using the slot (and crash) 14242205363dSJan Kara * before we recover it */ 14252205363dSJan Kara for (i = 0; i < rm_quota_used; i++) { 14262205363dSJan Kara qrec = ocfs2_begin_quota_recovery(osb, rm_quota[i]); 14272205363dSJan Kara if (IS_ERR(qrec)) { 14282205363dSJan Kara status = PTR_ERR(qrec); 14292205363dSJan Kara mlog_errno(status); 14302205363dSJan Kara continue; 14312205363dSJan Kara } 14322205363dSJan Kara ocfs2_queue_recovery_completion(osb->journal, rm_quota[i], 14332205363dSJan Kara NULL, NULL, qrec); 14342205363dSJan Kara } 14352205363dSJan Kara 1436ccd979bdSMark Fasheh ocfs2_super_unlock(osb, 1); 1437ccd979bdSMark Fasheh 14389140db04SSrinivas Eeda /* queue recovery for offline slots */ 14399140db04SSrinivas Eeda ocfs2_queue_replay_slots(osb); 1440ccd979bdSMark Fasheh 1441ccd979bdSMark Fasheh bail: 1442c74ec2f7SArjan van de Ven mutex_lock(&osb->recovery_lock); 1443553abd04SJoel Becker if (!status && !ocfs2_recovery_completed(osb)) { 1444c74ec2f7SArjan van de Ven mutex_unlock(&osb->recovery_lock); 1445ccd979bdSMark Fasheh goto restart; 1446ccd979bdSMark Fasheh } 1447ccd979bdSMark Fasheh 14489140db04SSrinivas Eeda ocfs2_free_replay_slots(osb); 1449ccd979bdSMark Fasheh osb->recovery_thread_task = NULL; 1450ccd979bdSMark Fasheh mb(); /* sync with ocfs2_recovery_thread_running */ 1451ccd979bdSMark Fasheh wake_up(&osb->recovery_event); 1452ccd979bdSMark Fasheh 1453c74ec2f7SArjan van de Ven mutex_unlock(&osb->recovery_lock); 1454ccd979bdSMark Fasheh 14552205363dSJan Kara if (rm_quota) 14562205363dSJan Kara kfree(rm_quota); 14572205363dSJan Kara 1458ccd979bdSMark Fasheh mlog_exit(status); 1459ccd979bdSMark Fasheh /* no one is callint kthread_stop() for us so the kthread() api 1460ccd979bdSMark Fasheh * requires that we call do_exit(). And it isn't exported, but 1461ccd979bdSMark Fasheh * complete_and_exit() seems to be a minimal wrapper around it. */ 1462ccd979bdSMark Fasheh complete_and_exit(NULL, status); 1463ccd979bdSMark Fasheh return status; 1464ccd979bdSMark Fasheh } 1465ccd979bdSMark Fasheh 1466ccd979bdSMark Fasheh void ocfs2_recovery_thread(struct ocfs2_super *osb, int node_num) 1467ccd979bdSMark Fasheh { 1468ccd979bdSMark Fasheh mlog_entry("(node_num=%d, osb->node_num = %d)\n", 1469ccd979bdSMark Fasheh node_num, osb->node_num); 1470ccd979bdSMark Fasheh 1471c74ec2f7SArjan van de Ven mutex_lock(&osb->recovery_lock); 1472ccd979bdSMark Fasheh if (osb->disable_recovery) 1473ccd979bdSMark Fasheh goto out; 1474ccd979bdSMark Fasheh 1475ccd979bdSMark Fasheh /* People waiting on recovery will wait on 1476ccd979bdSMark Fasheh * the recovery map to empty. */ 1477553abd04SJoel Becker if (ocfs2_recovery_map_set(osb, node_num)) 1478553abd04SJoel Becker mlog(0, "node %d already in recovery map.\n", node_num); 1479ccd979bdSMark Fasheh 1480ccd979bdSMark Fasheh mlog(0, "starting recovery thread...\n"); 1481ccd979bdSMark Fasheh 1482ccd979bdSMark Fasheh if (osb->recovery_thread_task) 1483ccd979bdSMark Fasheh goto out; 1484ccd979bdSMark Fasheh 1485ccd979bdSMark Fasheh osb->recovery_thread_task = kthread_run(__ocfs2_recovery_thread, osb, 148678427043SMark Fasheh "ocfs2rec"); 1487ccd979bdSMark Fasheh if (IS_ERR(osb->recovery_thread_task)) { 1488ccd979bdSMark Fasheh mlog_errno((int)PTR_ERR(osb->recovery_thread_task)); 1489ccd979bdSMark Fasheh osb->recovery_thread_task = NULL; 1490ccd979bdSMark Fasheh } 1491ccd979bdSMark Fasheh 1492ccd979bdSMark Fasheh out: 1493c74ec2f7SArjan van de Ven mutex_unlock(&osb->recovery_lock); 1494ccd979bdSMark Fasheh wake_up(&osb->recovery_event); 1495ccd979bdSMark Fasheh 1496ccd979bdSMark Fasheh mlog_exit_void(); 1497ccd979bdSMark Fasheh } 1498ccd979bdSMark Fasheh 1499539d8264SSunil Mushran static int ocfs2_read_journal_inode(struct ocfs2_super *osb, 1500539d8264SSunil Mushran int slot_num, 1501539d8264SSunil Mushran struct buffer_head **bh, 1502539d8264SSunil Mushran struct inode **ret_inode) 1503539d8264SSunil Mushran { 1504539d8264SSunil Mushran int status = -EACCES; 1505539d8264SSunil Mushran struct inode *inode = NULL; 1506539d8264SSunil Mushran 1507539d8264SSunil Mushran BUG_ON(slot_num >= osb->max_slots); 1508539d8264SSunil Mushran 1509539d8264SSunil Mushran inode = ocfs2_get_system_file_inode(osb, JOURNAL_SYSTEM_INODE, 1510539d8264SSunil Mushran slot_num); 1511539d8264SSunil Mushran if (!inode || is_bad_inode(inode)) { 1512539d8264SSunil Mushran mlog_errno(status); 1513539d8264SSunil Mushran goto bail; 1514539d8264SSunil Mushran } 1515539d8264SSunil Mushran SET_INODE_JOURNAL(inode); 1516539d8264SSunil Mushran 1517b657c95cSJoel Becker status = ocfs2_read_inode_block_full(inode, bh, OCFS2_BH_IGNORE_CACHE); 1518539d8264SSunil Mushran if (status < 0) { 1519539d8264SSunil Mushran mlog_errno(status); 1520539d8264SSunil Mushran goto bail; 1521539d8264SSunil Mushran } 1522539d8264SSunil Mushran 1523539d8264SSunil Mushran status = 0; 1524539d8264SSunil Mushran 1525539d8264SSunil Mushran bail: 1526539d8264SSunil Mushran if (inode) { 1527539d8264SSunil Mushran if (status || !ret_inode) 1528539d8264SSunil Mushran iput(inode); 1529539d8264SSunil Mushran else 1530539d8264SSunil Mushran *ret_inode = inode; 1531539d8264SSunil Mushran } 1532539d8264SSunil Mushran return status; 1533539d8264SSunil Mushran } 1534539d8264SSunil Mushran 1535ccd979bdSMark Fasheh /* Does the actual journal replay and marks the journal inode as 1536ccd979bdSMark Fasheh * clean. Will only replay if the journal inode is marked dirty. */ 1537ccd979bdSMark Fasheh static int ocfs2_replay_journal(struct ocfs2_super *osb, 1538ccd979bdSMark Fasheh int node_num, 1539ccd979bdSMark Fasheh int slot_num) 1540ccd979bdSMark Fasheh { 1541ccd979bdSMark Fasheh int status; 1542ccd979bdSMark Fasheh int got_lock = 0; 1543ccd979bdSMark Fasheh unsigned int flags; 1544ccd979bdSMark Fasheh struct inode *inode = NULL; 1545ccd979bdSMark Fasheh struct ocfs2_dinode *fe; 1546ccd979bdSMark Fasheh journal_t *journal = NULL; 1547ccd979bdSMark Fasheh struct buffer_head *bh = NULL; 1548539d8264SSunil Mushran u32 slot_reco_gen; 1549ccd979bdSMark Fasheh 1550539d8264SSunil Mushran status = ocfs2_read_journal_inode(osb, slot_num, &bh, &inode); 1551539d8264SSunil Mushran if (status) { 1552ccd979bdSMark Fasheh mlog_errno(status); 1553ccd979bdSMark Fasheh goto done; 1554ccd979bdSMark Fasheh } 1555539d8264SSunil Mushran 1556539d8264SSunil Mushran fe = (struct ocfs2_dinode *)bh->b_data; 1557539d8264SSunil Mushran slot_reco_gen = ocfs2_get_recovery_generation(fe); 1558539d8264SSunil Mushran brelse(bh); 1559539d8264SSunil Mushran bh = NULL; 1560539d8264SSunil Mushran 1561539d8264SSunil Mushran /* 1562539d8264SSunil Mushran * As the fs recovery is asynchronous, there is a small chance that 1563539d8264SSunil Mushran * another node mounted (and recovered) the slot before the recovery 1564539d8264SSunil Mushran * thread could get the lock. To handle that, we dirty read the journal 1565539d8264SSunil Mushran * inode for that slot to get the recovery generation. If it is 1566539d8264SSunil Mushran * different than what we expected, the slot has been recovered. 1567539d8264SSunil Mushran * If not, it needs recovery. 1568539d8264SSunil Mushran */ 1569539d8264SSunil Mushran if (osb->slot_recovery_generations[slot_num] != slot_reco_gen) { 1570539d8264SSunil Mushran mlog(0, "Slot %u already recovered (old/new=%u/%u)\n", slot_num, 1571539d8264SSunil Mushran osb->slot_recovery_generations[slot_num], slot_reco_gen); 1572539d8264SSunil Mushran osb->slot_recovery_generations[slot_num] = slot_reco_gen; 1573539d8264SSunil Mushran status = -EBUSY; 1574ccd979bdSMark Fasheh goto done; 1575ccd979bdSMark Fasheh } 1576539d8264SSunil Mushran 1577539d8264SSunil Mushran /* Continue with recovery as the journal has not yet been recovered */ 1578ccd979bdSMark Fasheh 1579e63aecb6SMark Fasheh status = ocfs2_inode_lock_full(inode, &bh, 1, OCFS2_META_LOCK_RECOVERY); 1580ccd979bdSMark Fasheh if (status < 0) { 1581e63aecb6SMark Fasheh mlog(0, "status returned from ocfs2_inode_lock=%d\n", status); 1582ccd979bdSMark Fasheh if (status != -ERESTARTSYS) 1583ccd979bdSMark Fasheh mlog(ML_ERROR, "Could not lock journal!\n"); 1584ccd979bdSMark Fasheh goto done; 1585ccd979bdSMark Fasheh } 1586ccd979bdSMark Fasheh got_lock = 1; 1587ccd979bdSMark Fasheh 1588ccd979bdSMark Fasheh fe = (struct ocfs2_dinode *) bh->b_data; 1589ccd979bdSMark Fasheh 1590ccd979bdSMark Fasheh flags = le32_to_cpu(fe->id1.journal1.ij_flags); 1591539d8264SSunil Mushran slot_reco_gen = ocfs2_get_recovery_generation(fe); 1592ccd979bdSMark Fasheh 1593ccd979bdSMark Fasheh if (!(flags & OCFS2_JOURNAL_DIRTY_FL)) { 1594ccd979bdSMark Fasheh mlog(0, "No recovery required for node %d\n", node_num); 1595539d8264SSunil Mushran /* Refresh recovery generation for the slot */ 1596539d8264SSunil Mushran osb->slot_recovery_generations[slot_num] = slot_reco_gen; 1597ccd979bdSMark Fasheh goto done; 1598ccd979bdSMark Fasheh } 1599ccd979bdSMark Fasheh 16009140db04SSrinivas Eeda /* we need to run complete recovery for offline orphan slots */ 16019140db04SSrinivas Eeda ocfs2_replay_map_set_state(osb, REPLAY_NEEDED); 16029140db04SSrinivas Eeda 1603ccd979bdSMark Fasheh mlog(ML_NOTICE, "Recovering node %d from slot %d on device (%u,%u)\n", 1604ccd979bdSMark Fasheh node_num, slot_num, 1605ccd979bdSMark Fasheh MAJOR(osb->sb->s_dev), MINOR(osb->sb->s_dev)); 1606ccd979bdSMark Fasheh 1607ccd979bdSMark Fasheh OCFS2_I(inode)->ip_clusters = le32_to_cpu(fe->i_clusters); 1608ccd979bdSMark Fasheh 1609ccd979bdSMark Fasheh status = ocfs2_force_read_journal(inode); 1610ccd979bdSMark Fasheh if (status < 0) { 1611ccd979bdSMark Fasheh mlog_errno(status); 1612ccd979bdSMark Fasheh goto done; 1613ccd979bdSMark Fasheh } 1614ccd979bdSMark Fasheh 1615ccd979bdSMark Fasheh mlog(0, "calling journal_init_inode\n"); 16162b4e30fbSJoel Becker journal = jbd2_journal_init_inode(inode); 1617ccd979bdSMark Fasheh if (journal == NULL) { 1618ccd979bdSMark Fasheh mlog(ML_ERROR, "Linux journal layer error\n"); 1619ccd979bdSMark Fasheh status = -EIO; 1620ccd979bdSMark Fasheh goto done; 1621ccd979bdSMark Fasheh } 1622ccd979bdSMark Fasheh 16232b4e30fbSJoel Becker status = jbd2_journal_load(journal); 1624ccd979bdSMark Fasheh if (status < 0) { 1625ccd979bdSMark Fasheh mlog_errno(status); 1626ccd979bdSMark Fasheh if (!igrab(inode)) 1627ccd979bdSMark Fasheh BUG(); 16282b4e30fbSJoel Becker jbd2_journal_destroy(journal); 1629ccd979bdSMark Fasheh goto done; 1630ccd979bdSMark Fasheh } 1631ccd979bdSMark Fasheh 1632ccd979bdSMark Fasheh ocfs2_clear_journal_error(osb->sb, journal, slot_num); 1633ccd979bdSMark Fasheh 1634ccd979bdSMark Fasheh /* wipe the journal */ 1635ccd979bdSMark Fasheh mlog(0, "flushing the journal.\n"); 16362b4e30fbSJoel Becker jbd2_journal_lock_updates(journal); 16372b4e30fbSJoel Becker status = jbd2_journal_flush(journal); 16382b4e30fbSJoel Becker jbd2_journal_unlock_updates(journal); 1639ccd979bdSMark Fasheh if (status < 0) 1640ccd979bdSMark Fasheh mlog_errno(status); 1641ccd979bdSMark Fasheh 1642ccd979bdSMark Fasheh /* This will mark the node clean */ 1643ccd979bdSMark Fasheh flags = le32_to_cpu(fe->id1.journal1.ij_flags); 1644ccd979bdSMark Fasheh flags &= ~OCFS2_JOURNAL_DIRTY_FL; 1645ccd979bdSMark Fasheh fe->id1.journal1.ij_flags = cpu_to_le32(flags); 1646ccd979bdSMark Fasheh 1647539d8264SSunil Mushran /* Increment recovery generation to indicate successful recovery */ 1648539d8264SSunil Mushran ocfs2_bump_recovery_generation(fe); 1649539d8264SSunil Mushran osb->slot_recovery_generations[slot_num] = 1650539d8264SSunil Mushran ocfs2_get_recovery_generation(fe); 1651539d8264SSunil Mushran 165213723d00SJoel Becker ocfs2_compute_meta_ecc(osb->sb, bh->b_data, &fe->i_check); 16538cb471e8SJoel Becker status = ocfs2_write_block(osb, bh, INODE_CACHE(inode)); 1654ccd979bdSMark Fasheh if (status < 0) 1655ccd979bdSMark Fasheh mlog_errno(status); 1656ccd979bdSMark Fasheh 1657ccd979bdSMark Fasheh if (!igrab(inode)) 1658ccd979bdSMark Fasheh BUG(); 1659ccd979bdSMark Fasheh 16602b4e30fbSJoel Becker jbd2_journal_destroy(journal); 1661ccd979bdSMark Fasheh 1662ccd979bdSMark Fasheh done: 1663ccd979bdSMark Fasheh /* drop the lock on this nodes journal */ 1664ccd979bdSMark Fasheh if (got_lock) 1665e63aecb6SMark Fasheh ocfs2_inode_unlock(inode, 1); 1666ccd979bdSMark Fasheh 1667ccd979bdSMark Fasheh if (inode) 1668ccd979bdSMark Fasheh iput(inode); 1669ccd979bdSMark Fasheh 1670ccd979bdSMark Fasheh brelse(bh); 1671ccd979bdSMark Fasheh 1672ccd979bdSMark Fasheh mlog_exit(status); 1673ccd979bdSMark Fasheh return status; 1674ccd979bdSMark Fasheh } 1675ccd979bdSMark Fasheh 1676ccd979bdSMark Fasheh /* 1677ccd979bdSMark Fasheh * Do the most important parts of node recovery: 1678ccd979bdSMark Fasheh * - Replay it's journal 1679ccd979bdSMark Fasheh * - Stamp a clean local allocator file 1680ccd979bdSMark Fasheh * - Stamp a clean truncate log 1681ccd979bdSMark Fasheh * - Mark the node clean 1682ccd979bdSMark Fasheh * 1683ccd979bdSMark Fasheh * If this function completes without error, a node in OCFS2 can be 1684ccd979bdSMark Fasheh * said to have been safely recovered. As a result, failure during the 1685ccd979bdSMark Fasheh * second part of a nodes recovery process (local alloc recovery) is 1686ccd979bdSMark Fasheh * far less concerning. 1687ccd979bdSMark Fasheh */ 1688ccd979bdSMark Fasheh static int ocfs2_recover_node(struct ocfs2_super *osb, 16892205363dSJan Kara int node_num, int slot_num) 1690ccd979bdSMark Fasheh { 1691ccd979bdSMark Fasheh int status = 0; 1692ccd979bdSMark Fasheh struct ocfs2_dinode *la_copy = NULL; 1693ccd979bdSMark Fasheh struct ocfs2_dinode *tl_copy = NULL; 1694ccd979bdSMark Fasheh 16952205363dSJan Kara mlog_entry("(node_num=%d, slot_num=%d, osb->node_num = %d)\n", 16962205363dSJan Kara node_num, slot_num, osb->node_num); 1697ccd979bdSMark Fasheh 1698ccd979bdSMark Fasheh /* Should not ever be called to recover ourselves -- in that 1699ccd979bdSMark Fasheh * case we should've called ocfs2_journal_load instead. */ 1700ebdec83bSEric Sesterhenn / snakebyte BUG_ON(osb->node_num == node_num); 1701ccd979bdSMark Fasheh 1702ccd979bdSMark Fasheh status = ocfs2_replay_journal(osb, node_num, slot_num); 1703ccd979bdSMark Fasheh if (status < 0) { 1704539d8264SSunil Mushran if (status == -EBUSY) { 1705539d8264SSunil Mushran mlog(0, "Skipping recovery for slot %u (node %u) " 1706539d8264SSunil Mushran "as another node has recovered it\n", slot_num, 1707539d8264SSunil Mushran node_num); 1708539d8264SSunil Mushran status = 0; 1709539d8264SSunil Mushran goto done; 1710539d8264SSunil Mushran } 1711ccd979bdSMark Fasheh mlog_errno(status); 1712ccd979bdSMark Fasheh goto done; 1713ccd979bdSMark Fasheh } 1714ccd979bdSMark Fasheh 1715ccd979bdSMark Fasheh /* Stamp a clean local alloc file AFTER recovering the journal... */ 1716ccd979bdSMark Fasheh status = ocfs2_begin_local_alloc_recovery(osb, slot_num, &la_copy); 1717ccd979bdSMark Fasheh if (status < 0) { 1718ccd979bdSMark Fasheh mlog_errno(status); 1719ccd979bdSMark Fasheh goto done; 1720ccd979bdSMark Fasheh } 1721ccd979bdSMark Fasheh 1722ccd979bdSMark Fasheh /* An error from begin_truncate_log_recovery is not 1723ccd979bdSMark Fasheh * serious enough to warrant halting the rest of 1724ccd979bdSMark Fasheh * recovery. */ 1725ccd979bdSMark Fasheh status = ocfs2_begin_truncate_log_recovery(osb, slot_num, &tl_copy); 1726ccd979bdSMark Fasheh if (status < 0) 1727ccd979bdSMark Fasheh mlog_errno(status); 1728ccd979bdSMark Fasheh 1729ccd979bdSMark Fasheh /* Likewise, this would be a strange but ultimately not so 1730ccd979bdSMark Fasheh * harmful place to get an error... */ 17318e8a4603SMark Fasheh status = ocfs2_clear_slot(osb, slot_num); 1732ccd979bdSMark Fasheh if (status < 0) 1733ccd979bdSMark Fasheh mlog_errno(status); 1734ccd979bdSMark Fasheh 1735ccd979bdSMark Fasheh /* This will kfree the memory pointed to by la_copy and tl_copy */ 1736ccd979bdSMark Fasheh ocfs2_queue_recovery_completion(osb->journal, slot_num, la_copy, 17372205363dSJan Kara tl_copy, NULL); 1738ccd979bdSMark Fasheh 1739ccd979bdSMark Fasheh status = 0; 1740ccd979bdSMark Fasheh done: 1741ccd979bdSMark Fasheh 1742ccd979bdSMark Fasheh mlog_exit(status); 1743ccd979bdSMark Fasheh return status; 1744ccd979bdSMark Fasheh } 1745ccd979bdSMark Fasheh 1746ccd979bdSMark Fasheh /* Test node liveness by trylocking his journal. If we get the lock, 1747ccd979bdSMark Fasheh * we drop it here. Return 0 if we got the lock, -EAGAIN if node is 1748ccd979bdSMark Fasheh * still alive (we couldn't get the lock) and < 0 on error. */ 1749ccd979bdSMark Fasheh static int ocfs2_trylock_journal(struct ocfs2_super *osb, 1750ccd979bdSMark Fasheh int slot_num) 1751ccd979bdSMark Fasheh { 1752ccd979bdSMark Fasheh int status, flags; 1753ccd979bdSMark Fasheh struct inode *inode = NULL; 1754ccd979bdSMark Fasheh 1755ccd979bdSMark Fasheh inode = ocfs2_get_system_file_inode(osb, JOURNAL_SYSTEM_INODE, 1756ccd979bdSMark Fasheh slot_num); 1757ccd979bdSMark Fasheh if (inode == NULL) { 1758ccd979bdSMark Fasheh mlog(ML_ERROR, "access error\n"); 1759ccd979bdSMark Fasheh status = -EACCES; 1760ccd979bdSMark Fasheh goto bail; 1761ccd979bdSMark Fasheh } 1762ccd979bdSMark Fasheh if (is_bad_inode(inode)) { 1763ccd979bdSMark Fasheh mlog(ML_ERROR, "access error (bad inode)\n"); 1764ccd979bdSMark Fasheh iput(inode); 1765ccd979bdSMark Fasheh inode = NULL; 1766ccd979bdSMark Fasheh status = -EACCES; 1767ccd979bdSMark Fasheh goto bail; 1768ccd979bdSMark Fasheh } 1769ccd979bdSMark Fasheh SET_INODE_JOURNAL(inode); 1770ccd979bdSMark Fasheh 1771ccd979bdSMark Fasheh flags = OCFS2_META_LOCK_RECOVERY | OCFS2_META_LOCK_NOQUEUE; 1772e63aecb6SMark Fasheh status = ocfs2_inode_lock_full(inode, NULL, 1, flags); 1773ccd979bdSMark Fasheh if (status < 0) { 1774ccd979bdSMark Fasheh if (status != -EAGAIN) 1775ccd979bdSMark Fasheh mlog_errno(status); 1776ccd979bdSMark Fasheh goto bail; 1777ccd979bdSMark Fasheh } 1778ccd979bdSMark Fasheh 1779e63aecb6SMark Fasheh ocfs2_inode_unlock(inode, 1); 1780ccd979bdSMark Fasheh bail: 1781ccd979bdSMark Fasheh if (inode) 1782ccd979bdSMark Fasheh iput(inode); 1783ccd979bdSMark Fasheh 1784ccd979bdSMark Fasheh return status; 1785ccd979bdSMark Fasheh } 1786ccd979bdSMark Fasheh 1787ccd979bdSMark Fasheh /* Call this underneath ocfs2_super_lock. It also assumes that the 1788ccd979bdSMark Fasheh * slot info struct has been updated from disk. */ 1789ccd979bdSMark Fasheh int ocfs2_mark_dead_nodes(struct ocfs2_super *osb) 1790ccd979bdSMark Fasheh { 1791d85b20e4SJoel Becker unsigned int node_num; 1792d85b20e4SJoel Becker int status, i; 1793a1af7d15SMark Fasheh u32 gen; 1794539d8264SSunil Mushran struct buffer_head *bh = NULL; 1795539d8264SSunil Mushran struct ocfs2_dinode *di; 1796ccd979bdSMark Fasheh 1797ccd979bdSMark Fasheh /* This is called with the super block cluster lock, so we 1798ccd979bdSMark Fasheh * know that the slot map can't change underneath us. */ 1799ccd979bdSMark Fasheh 1800d85b20e4SJoel Becker for (i = 0; i < osb->max_slots; i++) { 1801539d8264SSunil Mushran /* Read journal inode to get the recovery generation */ 1802539d8264SSunil Mushran status = ocfs2_read_journal_inode(osb, i, &bh, NULL); 1803539d8264SSunil Mushran if (status) { 1804539d8264SSunil Mushran mlog_errno(status); 1805539d8264SSunil Mushran goto bail; 1806539d8264SSunil Mushran } 1807539d8264SSunil Mushran di = (struct ocfs2_dinode *)bh->b_data; 1808a1af7d15SMark Fasheh gen = ocfs2_get_recovery_generation(di); 1809539d8264SSunil Mushran brelse(bh); 1810539d8264SSunil Mushran bh = NULL; 1811539d8264SSunil Mushran 1812a1af7d15SMark Fasheh spin_lock(&osb->osb_lock); 1813a1af7d15SMark Fasheh osb->slot_recovery_generations[i] = gen; 1814a1af7d15SMark Fasheh 1815539d8264SSunil Mushran mlog(0, "Slot %u recovery generation is %u\n", i, 1816539d8264SSunil Mushran osb->slot_recovery_generations[i]); 1817539d8264SSunil Mushran 1818a1af7d15SMark Fasheh if (i == osb->slot_num) { 1819a1af7d15SMark Fasheh spin_unlock(&osb->osb_lock); 1820ccd979bdSMark Fasheh continue; 1821a1af7d15SMark Fasheh } 1822d85b20e4SJoel Becker 1823d85b20e4SJoel Becker status = ocfs2_slot_to_node_num_locked(osb, i, &node_num); 1824a1af7d15SMark Fasheh if (status == -ENOENT) { 1825a1af7d15SMark Fasheh spin_unlock(&osb->osb_lock); 1826ccd979bdSMark Fasheh continue; 1827a1af7d15SMark Fasheh } 1828ccd979bdSMark Fasheh 1829a1af7d15SMark Fasheh if (__ocfs2_recovery_map_test(osb, node_num)) { 1830a1af7d15SMark Fasheh spin_unlock(&osb->osb_lock); 1831ccd979bdSMark Fasheh continue; 1832a1af7d15SMark Fasheh } 1833d85b20e4SJoel Becker spin_unlock(&osb->osb_lock); 1834ccd979bdSMark Fasheh 1835ccd979bdSMark Fasheh /* Ok, we have a slot occupied by another node which 1836ccd979bdSMark Fasheh * is not in the recovery map. We trylock his journal 1837ccd979bdSMark Fasheh * file here to test if he's alive. */ 1838ccd979bdSMark Fasheh status = ocfs2_trylock_journal(osb, i); 1839ccd979bdSMark Fasheh if (!status) { 1840ccd979bdSMark Fasheh /* Since we're called from mount, we know that 1841ccd979bdSMark Fasheh * the recovery thread can't race us on 1842ccd979bdSMark Fasheh * setting / checking the recovery bits. */ 1843ccd979bdSMark Fasheh ocfs2_recovery_thread(osb, node_num); 1844ccd979bdSMark Fasheh } else if ((status < 0) && (status != -EAGAIN)) { 1845ccd979bdSMark Fasheh mlog_errno(status); 1846ccd979bdSMark Fasheh goto bail; 1847ccd979bdSMark Fasheh } 1848ccd979bdSMark Fasheh } 1849ccd979bdSMark Fasheh 1850ccd979bdSMark Fasheh status = 0; 1851ccd979bdSMark Fasheh bail: 1852ccd979bdSMark Fasheh mlog_exit(status); 1853ccd979bdSMark Fasheh return status; 1854ccd979bdSMark Fasheh } 1855ccd979bdSMark Fasheh 185683273932SSrinivas Eeda /* 185783273932SSrinivas Eeda * Scan timer should get fired every ORPHAN_SCAN_SCHEDULE_TIMEOUT. Add some 185883273932SSrinivas Eeda * randomness to the timeout to minimize multple nodes firing the timer at the 185983273932SSrinivas Eeda * same time. 186083273932SSrinivas Eeda */ 186183273932SSrinivas Eeda static inline unsigned long ocfs2_orphan_scan_timeout(void) 186283273932SSrinivas Eeda { 186383273932SSrinivas Eeda unsigned long time; 186483273932SSrinivas Eeda 186583273932SSrinivas Eeda get_random_bytes(&time, sizeof(time)); 186683273932SSrinivas Eeda time = ORPHAN_SCAN_SCHEDULE_TIMEOUT + (time % 5000); 186783273932SSrinivas Eeda return msecs_to_jiffies(time); 186883273932SSrinivas Eeda } 186983273932SSrinivas Eeda 187083273932SSrinivas Eeda /* 187183273932SSrinivas Eeda * ocfs2_queue_orphan_scan calls ocfs2_queue_recovery_completion for 187283273932SSrinivas Eeda * every slot, queuing a recovery of the slot on the ocfs2_wq thread. This 187383273932SSrinivas Eeda * is done to catch any orphans that are left over in orphan directories. 187483273932SSrinivas Eeda * 187583273932SSrinivas Eeda * ocfs2_queue_orphan_scan gets called every ORPHAN_SCAN_SCHEDULE_TIMEOUT 187683273932SSrinivas Eeda * seconds. It gets an EX lock on os_lockres and checks sequence number 187783273932SSrinivas Eeda * stored in LVB. If the sequence number has changed, it means some other 187883273932SSrinivas Eeda * node has done the scan. This node skips the scan and tracks the 187983273932SSrinivas Eeda * sequence number. If the sequence number didn't change, it means a scan 188083273932SSrinivas Eeda * hasn't happened. The node queues a scan and increments the 188183273932SSrinivas Eeda * sequence number in the LVB. 188283273932SSrinivas Eeda */ 188383273932SSrinivas Eeda void ocfs2_queue_orphan_scan(struct ocfs2_super *osb) 188483273932SSrinivas Eeda { 188583273932SSrinivas Eeda struct ocfs2_orphan_scan *os; 188683273932SSrinivas Eeda int status, i; 188783273932SSrinivas Eeda u32 seqno = 0; 188883273932SSrinivas Eeda 188983273932SSrinivas Eeda os = &osb->osb_orphan_scan; 189083273932SSrinivas Eeda 1891692684e1SSunil Mushran if (atomic_read(&os->os_state) == ORPHAN_SCAN_INACTIVE) 1892692684e1SSunil Mushran goto out; 1893692684e1SSunil Mushran 1894df152c24SSunil Mushran status = ocfs2_orphan_scan_lock(osb, &seqno); 189583273932SSrinivas Eeda if (status < 0) { 189683273932SSrinivas Eeda if (status != -EAGAIN) 189783273932SSrinivas Eeda mlog_errno(status); 189883273932SSrinivas Eeda goto out; 189983273932SSrinivas Eeda } 190083273932SSrinivas Eeda 1901692684e1SSunil Mushran /* Do no queue the tasks if the volume is being umounted */ 1902692684e1SSunil Mushran if (atomic_read(&os->os_state) == ORPHAN_SCAN_INACTIVE) 1903692684e1SSunil Mushran goto unlock; 1904692684e1SSunil Mushran 190583273932SSrinivas Eeda if (os->os_seqno != seqno) { 190683273932SSrinivas Eeda os->os_seqno = seqno; 190783273932SSrinivas Eeda goto unlock; 190883273932SSrinivas Eeda } 190983273932SSrinivas Eeda 191083273932SSrinivas Eeda for (i = 0; i < osb->max_slots; i++) 191183273932SSrinivas Eeda ocfs2_queue_recovery_completion(osb->journal, i, NULL, NULL, 191283273932SSrinivas Eeda NULL); 191383273932SSrinivas Eeda /* 191483273932SSrinivas Eeda * We queued a recovery on orphan slots, increment the sequence 191583273932SSrinivas Eeda * number and update LVB so other node will skip the scan for a while 191683273932SSrinivas Eeda */ 191783273932SSrinivas Eeda seqno++; 191815633a22SSrinivas Eeda os->os_count++; 191915633a22SSrinivas Eeda os->os_scantime = CURRENT_TIME; 192083273932SSrinivas Eeda unlock: 1921df152c24SSunil Mushran ocfs2_orphan_scan_unlock(osb, seqno); 192283273932SSrinivas Eeda out: 192383273932SSrinivas Eeda return; 192483273932SSrinivas Eeda } 192583273932SSrinivas Eeda 192683273932SSrinivas Eeda /* Worker task that gets fired every ORPHAN_SCAN_SCHEDULE_TIMEOUT millsec */ 192783273932SSrinivas Eeda void ocfs2_orphan_scan_work(struct work_struct *work) 192883273932SSrinivas Eeda { 192983273932SSrinivas Eeda struct ocfs2_orphan_scan *os; 193083273932SSrinivas Eeda struct ocfs2_super *osb; 193183273932SSrinivas Eeda 193283273932SSrinivas Eeda os = container_of(work, struct ocfs2_orphan_scan, 193383273932SSrinivas Eeda os_orphan_scan_work.work); 193483273932SSrinivas Eeda osb = os->os_osb; 193583273932SSrinivas Eeda 193683273932SSrinivas Eeda mutex_lock(&os->os_lock); 193783273932SSrinivas Eeda ocfs2_queue_orphan_scan(osb); 1938692684e1SSunil Mushran if (atomic_read(&os->os_state) == ORPHAN_SCAN_ACTIVE) 193983273932SSrinivas Eeda schedule_delayed_work(&os->os_orphan_scan_work, 194083273932SSrinivas Eeda ocfs2_orphan_scan_timeout()); 194183273932SSrinivas Eeda mutex_unlock(&os->os_lock); 194283273932SSrinivas Eeda } 194383273932SSrinivas Eeda 194483273932SSrinivas Eeda void ocfs2_orphan_scan_stop(struct ocfs2_super *osb) 194583273932SSrinivas Eeda { 194683273932SSrinivas Eeda struct ocfs2_orphan_scan *os; 194783273932SSrinivas Eeda 194883273932SSrinivas Eeda os = &osb->osb_orphan_scan; 1949df152c24SSunil Mushran if (atomic_read(&os->os_state) == ORPHAN_SCAN_ACTIVE) { 1950692684e1SSunil Mushran atomic_set(&os->os_state, ORPHAN_SCAN_INACTIVE); 195183273932SSrinivas Eeda mutex_lock(&os->os_lock); 195283273932SSrinivas Eeda cancel_delayed_work(&os->os_orphan_scan_work); 195383273932SSrinivas Eeda mutex_unlock(&os->os_lock); 195483273932SSrinivas Eeda } 1955df152c24SSunil Mushran } 195683273932SSrinivas Eeda 1957df152c24SSunil Mushran void ocfs2_orphan_scan_init(struct ocfs2_super *osb) 195883273932SSrinivas Eeda { 195983273932SSrinivas Eeda struct ocfs2_orphan_scan *os; 196083273932SSrinivas Eeda 196183273932SSrinivas Eeda os = &osb->osb_orphan_scan; 196283273932SSrinivas Eeda os->os_osb = osb; 196315633a22SSrinivas Eeda os->os_count = 0; 19643211949fSSunil Mushran os->os_seqno = 0; 196583273932SSrinivas Eeda mutex_init(&os->os_lock); 1966df152c24SSunil Mushran INIT_DELAYED_WORK(&os->os_orphan_scan_work, ocfs2_orphan_scan_work); 19678b712cd5SJeff Mahoney } 196883273932SSrinivas Eeda 19698b712cd5SJeff Mahoney void ocfs2_orphan_scan_start(struct ocfs2_super *osb) 19708b712cd5SJeff Mahoney { 19718b712cd5SJeff Mahoney struct ocfs2_orphan_scan *os; 19728b712cd5SJeff Mahoney 19738b712cd5SJeff Mahoney os = &osb->osb_orphan_scan; 19748b712cd5SJeff Mahoney os->os_scantime = CURRENT_TIME; 1975df152c24SSunil Mushran if (ocfs2_is_hard_readonly(osb) || ocfs2_mount_local(osb)) 1976df152c24SSunil Mushran atomic_set(&os->os_state, ORPHAN_SCAN_INACTIVE); 1977df152c24SSunil Mushran else { 1978df152c24SSunil Mushran atomic_set(&os->os_state, ORPHAN_SCAN_ACTIVE); 197983273932SSrinivas Eeda schedule_delayed_work(&os->os_orphan_scan_work, 198083273932SSrinivas Eeda ocfs2_orphan_scan_timeout()); 1981df152c24SSunil Mushran } 198283273932SSrinivas Eeda } 198383273932SSrinivas Eeda 19845eae5b96SMark Fasheh struct ocfs2_orphan_filldir_priv { 19855eae5b96SMark Fasheh struct inode *head; 19865eae5b96SMark Fasheh struct ocfs2_super *osb; 19875eae5b96SMark Fasheh }; 19885eae5b96SMark Fasheh 19895eae5b96SMark Fasheh static int ocfs2_orphan_filldir(void *priv, const char *name, int name_len, 19905eae5b96SMark Fasheh loff_t pos, u64 ino, unsigned type) 19915eae5b96SMark Fasheh { 19925eae5b96SMark Fasheh struct ocfs2_orphan_filldir_priv *p = priv; 19935eae5b96SMark Fasheh struct inode *iter; 19945eae5b96SMark Fasheh 19955eae5b96SMark Fasheh if (name_len == 1 && !strncmp(".", name, 1)) 19965eae5b96SMark Fasheh return 0; 19975eae5b96SMark Fasheh if (name_len == 2 && !strncmp("..", name, 2)) 19985eae5b96SMark Fasheh return 0; 19995eae5b96SMark Fasheh 20005eae5b96SMark Fasheh /* Skip bad inodes so that recovery can continue */ 20015eae5b96SMark Fasheh iter = ocfs2_iget(p->osb, ino, 20025fa0613eSJan Kara OCFS2_FI_FLAG_ORPHAN_RECOVERY, 0); 20035eae5b96SMark Fasheh if (IS_ERR(iter)) 20045eae5b96SMark Fasheh return 0; 20055eae5b96SMark Fasheh 20065eae5b96SMark Fasheh mlog(0, "queue orphan %llu\n", 20075eae5b96SMark Fasheh (unsigned long long)OCFS2_I(iter)->ip_blkno); 20085eae5b96SMark Fasheh /* No locking is required for the next_orphan queue as there 20095eae5b96SMark Fasheh * is only ever a single process doing orphan recovery. */ 20105eae5b96SMark Fasheh OCFS2_I(iter)->ip_next_orphan = p->head; 20115eae5b96SMark Fasheh p->head = iter; 20125eae5b96SMark Fasheh 20135eae5b96SMark Fasheh return 0; 20145eae5b96SMark Fasheh } 20155eae5b96SMark Fasheh 2016b4df6ed8SMark Fasheh static int ocfs2_queue_orphans(struct ocfs2_super *osb, 2017b4df6ed8SMark Fasheh int slot, 2018b4df6ed8SMark Fasheh struct inode **head) 2019ccd979bdSMark Fasheh { 2020b4df6ed8SMark Fasheh int status; 2021ccd979bdSMark Fasheh struct inode *orphan_dir_inode = NULL; 20225eae5b96SMark Fasheh struct ocfs2_orphan_filldir_priv priv; 20235eae5b96SMark Fasheh loff_t pos = 0; 20245eae5b96SMark Fasheh 20255eae5b96SMark Fasheh priv.osb = osb; 20265eae5b96SMark Fasheh priv.head = *head; 2027ccd979bdSMark Fasheh 2028ccd979bdSMark Fasheh orphan_dir_inode = ocfs2_get_system_file_inode(osb, 2029ccd979bdSMark Fasheh ORPHAN_DIR_SYSTEM_INODE, 2030ccd979bdSMark Fasheh slot); 2031ccd979bdSMark Fasheh if (!orphan_dir_inode) { 2032ccd979bdSMark Fasheh status = -ENOENT; 2033ccd979bdSMark Fasheh mlog_errno(status); 2034b4df6ed8SMark Fasheh return status; 2035ccd979bdSMark Fasheh } 2036ccd979bdSMark Fasheh 20371b1dcc1bSJes Sorensen mutex_lock(&orphan_dir_inode->i_mutex); 2038e63aecb6SMark Fasheh status = ocfs2_inode_lock(orphan_dir_inode, NULL, 0); 2039ccd979bdSMark Fasheh if (status < 0) { 2040ccd979bdSMark Fasheh mlog_errno(status); 2041ccd979bdSMark Fasheh goto out; 2042ccd979bdSMark Fasheh } 2043ccd979bdSMark Fasheh 20445eae5b96SMark Fasheh status = ocfs2_dir_foreach(orphan_dir_inode, &pos, &priv, 20455eae5b96SMark Fasheh ocfs2_orphan_filldir); 20465eae5b96SMark Fasheh if (status) { 2047ccd979bdSMark Fasheh mlog_errno(status); 2048a86370fbSMark Fasheh goto out_cluster; 2049ccd979bdSMark Fasheh } 2050ccd979bdSMark Fasheh 20515eae5b96SMark Fasheh *head = priv.head; 2052ccd979bdSMark Fasheh 2053a86370fbSMark Fasheh out_cluster: 2054e63aecb6SMark Fasheh ocfs2_inode_unlock(orphan_dir_inode, 0); 2055b4df6ed8SMark Fasheh out: 2056b4df6ed8SMark Fasheh mutex_unlock(&orphan_dir_inode->i_mutex); 2057ccd979bdSMark Fasheh iput(orphan_dir_inode); 2058b4df6ed8SMark Fasheh return status; 2059b4df6ed8SMark Fasheh } 2060b4df6ed8SMark Fasheh 2061b4df6ed8SMark Fasheh static int ocfs2_orphan_recovery_can_continue(struct ocfs2_super *osb, 2062b4df6ed8SMark Fasheh int slot) 2063b4df6ed8SMark Fasheh { 2064b4df6ed8SMark Fasheh int ret; 2065b4df6ed8SMark Fasheh 2066b4df6ed8SMark Fasheh spin_lock(&osb->osb_lock); 2067b4df6ed8SMark Fasheh ret = !osb->osb_orphan_wipes[slot]; 2068b4df6ed8SMark Fasheh spin_unlock(&osb->osb_lock); 2069b4df6ed8SMark Fasheh return ret; 2070b4df6ed8SMark Fasheh } 2071b4df6ed8SMark Fasheh 2072b4df6ed8SMark Fasheh static void ocfs2_mark_recovering_orphan_dir(struct ocfs2_super *osb, 2073b4df6ed8SMark Fasheh int slot) 2074b4df6ed8SMark Fasheh { 2075b4df6ed8SMark Fasheh spin_lock(&osb->osb_lock); 2076b4df6ed8SMark Fasheh /* Mark ourselves such that new processes in delete_inode() 2077b4df6ed8SMark Fasheh * know to quit early. */ 2078b4df6ed8SMark Fasheh ocfs2_node_map_set_bit(osb, &osb->osb_recovering_orphan_dirs, slot); 2079b4df6ed8SMark Fasheh while (osb->osb_orphan_wipes[slot]) { 2080b4df6ed8SMark Fasheh /* If any processes are already in the middle of an 2081b4df6ed8SMark Fasheh * orphan wipe on this dir, then we need to wait for 2082b4df6ed8SMark Fasheh * them. */ 2083b4df6ed8SMark Fasheh spin_unlock(&osb->osb_lock); 2084b4df6ed8SMark Fasheh wait_event_interruptible(osb->osb_wipe_event, 2085b4df6ed8SMark Fasheh ocfs2_orphan_recovery_can_continue(osb, slot)); 2086b4df6ed8SMark Fasheh spin_lock(&osb->osb_lock); 2087b4df6ed8SMark Fasheh } 2088b4df6ed8SMark Fasheh spin_unlock(&osb->osb_lock); 2089b4df6ed8SMark Fasheh } 2090b4df6ed8SMark Fasheh 2091b4df6ed8SMark Fasheh static void ocfs2_clear_recovering_orphan_dir(struct ocfs2_super *osb, 2092b4df6ed8SMark Fasheh int slot) 2093b4df6ed8SMark Fasheh { 2094b4df6ed8SMark Fasheh ocfs2_node_map_clear_bit(osb, &osb->osb_recovering_orphan_dirs, slot); 2095b4df6ed8SMark Fasheh } 2096b4df6ed8SMark Fasheh 2097b4df6ed8SMark Fasheh /* 2098b4df6ed8SMark Fasheh * Orphan recovery. Each mounted node has it's own orphan dir which we 2099b4df6ed8SMark Fasheh * must run during recovery. Our strategy here is to build a list of 2100b4df6ed8SMark Fasheh * the inodes in the orphan dir and iget/iput them. The VFS does 2101b4df6ed8SMark Fasheh * (most) of the rest of the work. 2102b4df6ed8SMark Fasheh * 2103b4df6ed8SMark Fasheh * Orphan recovery can happen at any time, not just mount so we have a 2104b4df6ed8SMark Fasheh * couple of extra considerations. 2105b4df6ed8SMark Fasheh * 2106b4df6ed8SMark Fasheh * - We grab as many inodes as we can under the orphan dir lock - 2107b4df6ed8SMark Fasheh * doing iget() outside the orphan dir risks getting a reference on 2108b4df6ed8SMark Fasheh * an invalid inode. 2109b4df6ed8SMark Fasheh * - We must be sure not to deadlock with other processes on the 2110b4df6ed8SMark Fasheh * system wanting to run delete_inode(). This can happen when they go 2111b4df6ed8SMark Fasheh * to lock the orphan dir and the orphan recovery process attempts to 2112b4df6ed8SMark Fasheh * iget() inside the orphan dir lock. This can be avoided by 2113b4df6ed8SMark Fasheh * advertising our state to ocfs2_delete_inode(). 2114b4df6ed8SMark Fasheh */ 2115b4df6ed8SMark Fasheh static int ocfs2_recover_orphans(struct ocfs2_super *osb, 2116b4df6ed8SMark Fasheh int slot) 2117b4df6ed8SMark Fasheh { 2118b4df6ed8SMark Fasheh int ret = 0; 2119b4df6ed8SMark Fasheh struct inode *inode = NULL; 2120b4df6ed8SMark Fasheh struct inode *iter; 2121b4df6ed8SMark Fasheh struct ocfs2_inode_info *oi; 2122b4df6ed8SMark Fasheh 2123b4df6ed8SMark Fasheh mlog(0, "Recover inodes from orphan dir in slot %d\n", slot); 2124b4df6ed8SMark Fasheh 2125b4df6ed8SMark Fasheh ocfs2_mark_recovering_orphan_dir(osb, slot); 2126b4df6ed8SMark Fasheh ret = ocfs2_queue_orphans(osb, slot, &inode); 2127b4df6ed8SMark Fasheh ocfs2_clear_recovering_orphan_dir(osb, slot); 2128b4df6ed8SMark Fasheh 2129b4df6ed8SMark Fasheh /* Error here should be noted, but we want to continue with as 2130b4df6ed8SMark Fasheh * many queued inodes as we've got. */ 2131b4df6ed8SMark Fasheh if (ret) 2132b4df6ed8SMark Fasheh mlog_errno(ret); 2133ccd979bdSMark Fasheh 2134ccd979bdSMark Fasheh while (inode) { 2135ccd979bdSMark Fasheh oi = OCFS2_I(inode); 2136b0697053SMark Fasheh mlog(0, "iput orphan %llu\n", (unsigned long long)oi->ip_blkno); 2137ccd979bdSMark Fasheh 2138ccd979bdSMark Fasheh iter = oi->ip_next_orphan; 2139ccd979bdSMark Fasheh 2140ccd979bdSMark Fasheh spin_lock(&oi->ip_lock); 214134d024f8SMark Fasheh /* The remote delete code may have set these on the 214234d024f8SMark Fasheh * assumption that the other node would wipe them 214334d024f8SMark Fasheh * successfully. If they are still in the node's 214434d024f8SMark Fasheh * orphan dir, we need to reset that state. */ 2145ccd979bdSMark Fasheh oi->ip_flags &= ~(OCFS2_INODE_DELETED|OCFS2_INODE_SKIP_DELETE); 2146ccd979bdSMark Fasheh 2147ccd979bdSMark Fasheh /* Set the proper information to get us going into 2148ccd979bdSMark Fasheh * ocfs2_delete_inode. */ 2149ccd979bdSMark Fasheh oi->ip_flags |= OCFS2_INODE_MAYBE_ORPHANED; 2150ccd979bdSMark Fasheh spin_unlock(&oi->ip_lock); 2151ccd979bdSMark Fasheh 2152ccd979bdSMark Fasheh iput(inode); 2153ccd979bdSMark Fasheh 2154ccd979bdSMark Fasheh inode = iter; 2155ccd979bdSMark Fasheh } 2156ccd979bdSMark Fasheh 2157b4df6ed8SMark Fasheh return ret; 2158ccd979bdSMark Fasheh } 2159ccd979bdSMark Fasheh 216019ece546SJan Kara static int __ocfs2_wait_on_mount(struct ocfs2_super *osb, int quota) 2161ccd979bdSMark Fasheh { 2162ccd979bdSMark Fasheh /* This check is good because ocfs2 will wait on our recovery 2163ccd979bdSMark Fasheh * thread before changing it to something other than MOUNTED 2164ccd979bdSMark Fasheh * or DISABLED. */ 2165ccd979bdSMark Fasheh wait_event(osb->osb_mount_event, 216619ece546SJan Kara (!quota && atomic_read(&osb->vol_state) == VOLUME_MOUNTED) || 216719ece546SJan Kara atomic_read(&osb->vol_state) == VOLUME_MOUNTED_QUOTAS || 2168ccd979bdSMark Fasheh atomic_read(&osb->vol_state) == VOLUME_DISABLED); 2169ccd979bdSMark Fasheh 2170ccd979bdSMark Fasheh /* If there's an error on mount, then we may never get to the 2171ccd979bdSMark Fasheh * MOUNTED flag, but this is set right before 2172ccd979bdSMark Fasheh * dismount_volume() so we can trust it. */ 2173ccd979bdSMark Fasheh if (atomic_read(&osb->vol_state) == VOLUME_DISABLED) { 2174ccd979bdSMark Fasheh mlog(0, "mount error, exiting!\n"); 2175ccd979bdSMark Fasheh return -EBUSY; 2176ccd979bdSMark Fasheh } 2177ccd979bdSMark Fasheh 2178ccd979bdSMark Fasheh return 0; 2179ccd979bdSMark Fasheh } 2180ccd979bdSMark Fasheh 2181ccd979bdSMark Fasheh static int ocfs2_commit_thread(void *arg) 2182ccd979bdSMark Fasheh { 2183ccd979bdSMark Fasheh int status; 2184ccd979bdSMark Fasheh struct ocfs2_super *osb = arg; 2185ccd979bdSMark Fasheh struct ocfs2_journal *journal = osb->journal; 2186ccd979bdSMark Fasheh 2187ccd979bdSMark Fasheh /* we can trust j_num_trans here because _should_stop() is only set in 2188ccd979bdSMark Fasheh * shutdown and nobody other than ourselves should be able to start 2189ccd979bdSMark Fasheh * transactions. committing on shutdown might take a few iterations 2190ccd979bdSMark Fasheh * as final transactions put deleted inodes on the list */ 2191ccd979bdSMark Fasheh while (!(kthread_should_stop() && 2192ccd979bdSMark Fasheh atomic_read(&journal->j_num_trans) == 0)) { 2193ccd979bdSMark Fasheh 2194745ae8baSMark Fasheh wait_event_interruptible(osb->checkpoint_event, 2195ccd979bdSMark Fasheh atomic_read(&journal->j_num_trans) 2196745ae8baSMark Fasheh || kthread_should_stop()); 2197ccd979bdSMark Fasheh 2198ccd979bdSMark Fasheh status = ocfs2_commit_cache(osb); 2199ccd979bdSMark Fasheh if (status < 0) 2200ccd979bdSMark Fasheh mlog_errno(status); 2201ccd979bdSMark Fasheh 2202ccd979bdSMark Fasheh if (kthread_should_stop() && atomic_read(&journal->j_num_trans)){ 2203ccd979bdSMark Fasheh mlog(ML_KTHREAD, 2204ccd979bdSMark Fasheh "commit_thread: %u transactions pending on " 2205ccd979bdSMark Fasheh "shutdown\n", 2206ccd979bdSMark Fasheh atomic_read(&journal->j_num_trans)); 2207ccd979bdSMark Fasheh } 2208ccd979bdSMark Fasheh } 2209ccd979bdSMark Fasheh 2210ccd979bdSMark Fasheh return 0; 2211ccd979bdSMark Fasheh } 2212ccd979bdSMark Fasheh 2213539d8264SSunil Mushran /* Reads all the journal inodes without taking any cluster locks. Used 2214539d8264SSunil Mushran * for hard readonly access to determine whether any journal requires 2215539d8264SSunil Mushran * recovery. Also used to refresh the recovery generation numbers after 2216539d8264SSunil Mushran * a journal has been recovered by another node. 2217539d8264SSunil Mushran */ 2218ccd979bdSMark Fasheh int ocfs2_check_journals_nolocks(struct ocfs2_super *osb) 2219ccd979bdSMark Fasheh { 2220ccd979bdSMark Fasheh int ret = 0; 2221ccd979bdSMark Fasheh unsigned int slot; 2222539d8264SSunil Mushran struct buffer_head *di_bh = NULL; 2223ccd979bdSMark Fasheh struct ocfs2_dinode *di; 2224539d8264SSunil Mushran int journal_dirty = 0; 2225ccd979bdSMark Fasheh 2226ccd979bdSMark Fasheh for(slot = 0; slot < osb->max_slots; slot++) { 2227539d8264SSunil Mushran ret = ocfs2_read_journal_inode(osb, slot, &di_bh, NULL); 2228539d8264SSunil Mushran if (ret) { 2229ccd979bdSMark Fasheh mlog_errno(ret); 2230ccd979bdSMark Fasheh goto out; 2231ccd979bdSMark Fasheh } 2232ccd979bdSMark Fasheh 2233ccd979bdSMark Fasheh di = (struct ocfs2_dinode *) di_bh->b_data; 2234ccd979bdSMark Fasheh 2235539d8264SSunil Mushran osb->slot_recovery_generations[slot] = 2236539d8264SSunil Mushran ocfs2_get_recovery_generation(di); 2237539d8264SSunil Mushran 2238ccd979bdSMark Fasheh if (le32_to_cpu(di->id1.journal1.ij_flags) & 2239ccd979bdSMark Fasheh OCFS2_JOURNAL_DIRTY_FL) 2240539d8264SSunil Mushran journal_dirty = 1; 2241ccd979bdSMark Fasheh 2242ccd979bdSMark Fasheh brelse(di_bh); 2243539d8264SSunil Mushran di_bh = NULL; 2244ccd979bdSMark Fasheh } 2245ccd979bdSMark Fasheh 2246ccd979bdSMark Fasheh out: 2247539d8264SSunil Mushran if (journal_dirty) 2248539d8264SSunil Mushran ret = -EROFS; 2249ccd979bdSMark Fasheh return ret; 2250ccd979bdSMark Fasheh } 2251