1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * journal.c
4 *
5 * Defines functions of journalling api
6 *
7 * Copyright (C) 2003, 2004 Oracle. All rights reserved.
8 */
9
10 #include <linux/fs.h>
11 #include <linux/types.h>
12 #include <linux/slab.h>
13 #include <linux/highmem.h>
14 #include <linux/kthread.h>
15 #include <linux/time.h>
16 #include <linux/random.h>
17 #include <linux/delay.h>
18 #include <linux/writeback.h>
19
20 #include <cluster/masklog.h>
21
22 #include "ocfs2.h"
23
24 #include "alloc.h"
25 #include "blockcheck.h"
26 #include "dir.h"
27 #include "dlmglue.h"
28 #include "extent_map.h"
29 #include "heartbeat.h"
30 #include "inode.h"
31 #include "journal.h"
32 #include "localalloc.h"
33 #include "slot_map.h"
34 #include "super.h"
35 #include "sysfile.h"
36 #include "uptodate.h"
37 #include "quota.h"
38 #include "file.h"
39 #include "namei.h"
40
41 #include "buffer_head_io.h"
42 #include "ocfs2_trace.h"
43
44 DEFINE_SPINLOCK(trans_inc_lock);
45
46 #define ORPHAN_SCAN_SCHEDULE_TIMEOUT 300000
47
48 static int ocfs2_force_read_journal(struct inode *inode);
49 static int ocfs2_recover_node(struct ocfs2_super *osb,
50 int node_num, int slot_num);
51 static int __ocfs2_recovery_thread(void *arg);
52 static int ocfs2_commit_cache(struct ocfs2_super *osb);
53 static int __ocfs2_wait_on_mount(struct ocfs2_super *osb, int quota);
54 static int ocfs2_journal_toggle_dirty(struct ocfs2_super *osb,
55 int dirty, int replayed);
56 static int ocfs2_trylock_journal(struct ocfs2_super *osb,
57 int slot_num);
58 static int ocfs2_recover_orphans(struct ocfs2_super *osb,
59 int slot,
60 enum ocfs2_orphan_reco_type orphan_reco_type);
61 static int ocfs2_commit_thread(void *arg);
62 static void ocfs2_queue_recovery_completion(struct ocfs2_journal *journal,
63 int slot_num,
64 struct ocfs2_dinode *la_dinode,
65 struct ocfs2_dinode *tl_dinode,
66 struct ocfs2_quota_recovery *qrec,
67 enum ocfs2_orphan_reco_type orphan_reco_type);
68
ocfs2_wait_on_mount(struct ocfs2_super * osb)69 static inline int ocfs2_wait_on_mount(struct ocfs2_super *osb)
70 {
71 return __ocfs2_wait_on_mount(osb, 0);
72 }
73
ocfs2_wait_on_quotas(struct ocfs2_super * osb)74 static inline int ocfs2_wait_on_quotas(struct ocfs2_super *osb)
75 {
76 return __ocfs2_wait_on_mount(osb, 1);
77 }
78
79 /*
80 * This replay_map is to track online/offline slots, so we could recover
81 * offline slots during recovery and mount
82 */
83
84 enum ocfs2_replay_state {
85 REPLAY_UNNEEDED = 0, /* Replay is not needed, so ignore this map */
86 REPLAY_NEEDED, /* Replay slots marked in rm_replay_slots */
87 REPLAY_DONE /* Replay was already queued */
88 };
89
90 struct ocfs2_replay_map {
91 unsigned int rm_slots;
92 enum ocfs2_replay_state rm_state;
93 unsigned char rm_replay_slots[];
94 };
95
ocfs2_replay_map_set_state(struct ocfs2_super * osb,int state)96 static void ocfs2_replay_map_set_state(struct ocfs2_super *osb, int state)
97 {
98 if (!osb->replay_map)
99 return;
100
101 /* If we've already queued the replay, we don't have any more to do */
102 if (osb->replay_map->rm_state == REPLAY_DONE)
103 return;
104
105 osb->replay_map->rm_state = state;
106 }
107
ocfs2_compute_replay_slots(struct ocfs2_super * osb)108 int ocfs2_compute_replay_slots(struct ocfs2_super *osb)
109 {
110 struct ocfs2_replay_map *replay_map;
111 int i, node_num;
112
113 /* If replay map is already set, we don't do it again */
114 if (osb->replay_map)
115 return 0;
116
117 replay_map = kzalloc(struct_size(replay_map, rm_replay_slots,
118 osb->max_slots),
119 GFP_KERNEL);
120 if (!replay_map) {
121 mlog_errno(-ENOMEM);
122 return -ENOMEM;
123 }
124
125 spin_lock(&osb->osb_lock);
126
127 replay_map->rm_slots = osb->max_slots;
128 replay_map->rm_state = REPLAY_UNNEEDED;
129
130 /* set rm_replay_slots for offline slot(s) */
131 for (i = 0; i < replay_map->rm_slots; i++) {
132 if (ocfs2_slot_to_node_num_locked(osb, i, &node_num) == -ENOENT)
133 replay_map->rm_replay_slots[i] = 1;
134 }
135
136 osb->replay_map = replay_map;
137 spin_unlock(&osb->osb_lock);
138 return 0;
139 }
140
ocfs2_queue_replay_slots(struct ocfs2_super * osb,enum ocfs2_orphan_reco_type orphan_reco_type)141 static void ocfs2_queue_replay_slots(struct ocfs2_super *osb,
142 enum ocfs2_orphan_reco_type orphan_reco_type)
143 {
144 struct ocfs2_replay_map *replay_map = osb->replay_map;
145 int i;
146
147 if (!replay_map)
148 return;
149
150 if (replay_map->rm_state != REPLAY_NEEDED)
151 return;
152
153 for (i = 0; i < replay_map->rm_slots; i++)
154 if (replay_map->rm_replay_slots[i])
155 ocfs2_queue_recovery_completion(osb->journal, i, NULL,
156 NULL, NULL,
157 orphan_reco_type);
158 replay_map->rm_state = REPLAY_DONE;
159 }
160
ocfs2_free_replay_slots(struct ocfs2_super * osb)161 void ocfs2_free_replay_slots(struct ocfs2_super *osb)
162 {
163 struct ocfs2_replay_map *replay_map = osb->replay_map;
164
165 if (!osb->replay_map)
166 return;
167
168 kfree(replay_map);
169 osb->replay_map = NULL;
170 }
171
ocfs2_recovery_init(struct ocfs2_super * osb)172 int ocfs2_recovery_init(struct ocfs2_super *osb)
173 {
174 struct ocfs2_recovery_map *rm;
175
176 mutex_init(&osb->recovery_lock);
177 osb->disable_recovery = 0;
178 osb->recovery_thread_task = NULL;
179 init_waitqueue_head(&osb->recovery_event);
180
181 rm = kzalloc(struct_size(rm, rm_entries, osb->max_slots),
182 GFP_KERNEL);
183 if (!rm) {
184 mlog_errno(-ENOMEM);
185 return -ENOMEM;
186 }
187
188 osb->recovery_map = rm;
189
190 return 0;
191 }
192
193 /* we can't grab the goofy sem lock from inside wait_event, so we use
194 * memory barriers to make sure that we'll see the null task before
195 * being woken up */
ocfs2_recovery_thread_running(struct ocfs2_super * osb)196 static int ocfs2_recovery_thread_running(struct ocfs2_super *osb)
197 {
198 mb();
199 return osb->recovery_thread_task != NULL;
200 }
201
ocfs2_recovery_exit(struct ocfs2_super * osb)202 void ocfs2_recovery_exit(struct ocfs2_super *osb)
203 {
204 struct ocfs2_recovery_map *rm;
205
206 /* disable any new recovery threads and wait for any currently
207 * running ones to exit. Do this before setting the vol_state. */
208 mutex_lock(&osb->recovery_lock);
209 osb->disable_recovery = 1;
210 mutex_unlock(&osb->recovery_lock);
211 wait_event(osb->recovery_event, !ocfs2_recovery_thread_running(osb));
212
213 /* At this point, we know that no more recovery threads can be
214 * launched, so wait for any recovery completion work to
215 * complete. */
216 if (osb->ocfs2_wq)
217 flush_workqueue(osb->ocfs2_wq);
218
219 /*
220 * Now that recovery is shut down, and the osb is about to be
221 * freed, the osb_lock is not taken here.
222 */
223 rm = osb->recovery_map;
224 /* XXX: Should we bug if there are dirty entries? */
225
226 kfree(rm);
227 }
228
__ocfs2_recovery_map_test(struct ocfs2_super * osb,unsigned int node_num)229 static int __ocfs2_recovery_map_test(struct ocfs2_super *osb,
230 unsigned int node_num)
231 {
232 int i;
233 struct ocfs2_recovery_map *rm = osb->recovery_map;
234
235 assert_spin_locked(&osb->osb_lock);
236
237 for (i = 0; i < rm->rm_used; i++) {
238 if (rm->rm_entries[i] == node_num)
239 return 1;
240 }
241
242 return 0;
243 }
244
245 /* Behaves like test-and-set. Returns the previous value */
ocfs2_recovery_map_set(struct ocfs2_super * osb,unsigned int node_num)246 static int ocfs2_recovery_map_set(struct ocfs2_super *osb,
247 unsigned int node_num)
248 {
249 struct ocfs2_recovery_map *rm = osb->recovery_map;
250
251 spin_lock(&osb->osb_lock);
252 if (__ocfs2_recovery_map_test(osb, node_num)) {
253 spin_unlock(&osb->osb_lock);
254 return 1;
255 }
256
257 /* XXX: Can this be exploited? Not from o2dlm... */
258 BUG_ON(rm->rm_used >= osb->max_slots);
259
260 rm->rm_entries[rm->rm_used] = node_num;
261 rm->rm_used++;
262 spin_unlock(&osb->osb_lock);
263
264 return 0;
265 }
266
ocfs2_recovery_map_clear(struct ocfs2_super * osb,unsigned int node_num)267 static void ocfs2_recovery_map_clear(struct ocfs2_super *osb,
268 unsigned int node_num)
269 {
270 int i;
271 struct ocfs2_recovery_map *rm = osb->recovery_map;
272
273 spin_lock(&osb->osb_lock);
274
275 for (i = 0; i < rm->rm_used; i++) {
276 if (rm->rm_entries[i] == node_num)
277 break;
278 }
279
280 if (i < rm->rm_used) {
281 /* XXX: be careful with the pointer math */
282 memmove(&(rm->rm_entries[i]), &(rm->rm_entries[i + 1]),
283 (rm->rm_used - i - 1) * sizeof(unsigned int));
284 rm->rm_used--;
285 }
286
287 spin_unlock(&osb->osb_lock);
288 }
289
ocfs2_commit_cache(struct ocfs2_super * osb)290 static int ocfs2_commit_cache(struct ocfs2_super *osb)
291 {
292 int status = 0;
293 unsigned int flushed;
294 struct ocfs2_journal *journal = NULL;
295
296 journal = osb->journal;
297
298 /* Flush all pending commits and checkpoint the journal. */
299 down_write(&journal->j_trans_barrier);
300
301 flushed = atomic_read(&journal->j_num_trans);
302 trace_ocfs2_commit_cache_begin(flushed);
303 if (flushed == 0) {
304 up_write(&journal->j_trans_barrier);
305 goto finally;
306 }
307
308 jbd2_journal_lock_updates(journal->j_journal);
309 status = jbd2_journal_flush(journal->j_journal, 0);
310 jbd2_journal_unlock_updates(journal->j_journal);
311 if (status < 0) {
312 up_write(&journal->j_trans_barrier);
313 mlog_errno(status);
314 goto finally;
315 }
316
317 ocfs2_inc_trans_id(journal);
318
319 flushed = atomic_read(&journal->j_num_trans);
320 atomic_set(&journal->j_num_trans, 0);
321 up_write(&journal->j_trans_barrier);
322
323 trace_ocfs2_commit_cache_end(journal->j_trans_id, flushed);
324
325 ocfs2_wake_downconvert_thread(osb);
326 wake_up(&journal->j_checkpointed);
327 finally:
328 return status;
329 }
330
ocfs2_start_trans(struct ocfs2_super * osb,int max_buffs)331 handle_t *ocfs2_start_trans(struct ocfs2_super *osb, int max_buffs)
332 {
333 journal_t *journal = osb->journal->j_journal;
334 handle_t *handle;
335
336 BUG_ON(!osb || !osb->journal->j_journal);
337
338 if (ocfs2_is_hard_readonly(osb))
339 return ERR_PTR(-EROFS);
340
341 BUG_ON(osb->journal->j_state == OCFS2_JOURNAL_FREE);
342 BUG_ON(max_buffs <= 0);
343
344 /* Nested transaction? Just return the handle... */
345 if (journal_current_handle())
346 return jbd2_journal_start(journal, max_buffs);
347
348 sb_start_intwrite(osb->sb);
349
350 down_read(&osb->journal->j_trans_barrier);
351
352 handle = jbd2_journal_start(journal, max_buffs);
353 if (IS_ERR(handle)) {
354 up_read(&osb->journal->j_trans_barrier);
355 sb_end_intwrite(osb->sb);
356
357 mlog_errno(PTR_ERR(handle));
358
359 if (is_journal_aborted(journal)) {
360 ocfs2_abort(osb->sb, "Detected aborted journal\n");
361 handle = ERR_PTR(-EROFS);
362 }
363 } else {
364 if (!ocfs2_mount_local(osb))
365 atomic_inc(&(osb->journal->j_num_trans));
366 }
367
368 return handle;
369 }
370
ocfs2_commit_trans(struct ocfs2_super * osb,handle_t * handle)371 int ocfs2_commit_trans(struct ocfs2_super *osb,
372 handle_t *handle)
373 {
374 int ret, nested;
375 struct ocfs2_journal *journal = osb->journal;
376
377 BUG_ON(!handle);
378
379 nested = handle->h_ref > 1;
380 ret = jbd2_journal_stop(handle);
381 if (ret < 0)
382 mlog_errno(ret);
383
384 if (!nested) {
385 up_read(&journal->j_trans_barrier);
386 sb_end_intwrite(osb->sb);
387 }
388
389 return ret;
390 }
391
392 /*
393 * 'nblocks' is what you want to add to the current transaction.
394 *
395 * This might call jbd2_journal_restart() which will commit dirty buffers
396 * and then restart the transaction. Before calling
397 * ocfs2_extend_trans(), any changed blocks should have been
398 * dirtied. After calling it, all blocks which need to be changed must
399 * go through another set of journal_access/journal_dirty calls.
400 *
401 * WARNING: This will not release any semaphores or disk locks taken
402 * during the transaction, so make sure they were taken *before*
403 * start_trans or we'll have ordering deadlocks.
404 *
405 * WARNING2: Note that we do *not* drop j_trans_barrier here. This is
406 * good because transaction ids haven't yet been recorded on the
407 * cluster locks associated with this handle.
408 */
ocfs2_extend_trans(handle_t * handle,int nblocks)409 int ocfs2_extend_trans(handle_t *handle, int nblocks)
410 {
411 int status, old_nblocks;
412
413 BUG_ON(!handle);
414 BUG_ON(nblocks < 0);
415
416 if (!nblocks)
417 return 0;
418
419 old_nblocks = jbd2_handle_buffer_credits(handle);
420
421 trace_ocfs2_extend_trans(old_nblocks, nblocks);
422
423 #ifdef CONFIG_OCFS2_DEBUG_FS
424 status = 1;
425 #else
426 status = jbd2_journal_extend(handle, nblocks, 0);
427 if (status < 0) {
428 mlog_errno(status);
429 goto bail;
430 }
431 #endif
432
433 if (status > 0) {
434 trace_ocfs2_extend_trans_restart(old_nblocks + nblocks);
435 status = jbd2_journal_restart(handle,
436 old_nblocks + nblocks);
437 if (status < 0) {
438 mlog_errno(status);
439 goto bail;
440 }
441 }
442
443 status = 0;
444 bail:
445 return status;
446 }
447
448 /*
449 * If we have fewer than thresh credits, extend by OCFS2_MAX_TRANS_DATA.
450 * If that fails, restart the transaction & regain write access for the
451 * buffer head which is used for metadata modifications.
452 * Taken from Ext4: extend_or_restart_transaction()
453 */
ocfs2_allocate_extend_trans(handle_t * handle,int thresh)454 int ocfs2_allocate_extend_trans(handle_t *handle, int thresh)
455 {
456 int status, old_nblks;
457
458 BUG_ON(!handle);
459
460 old_nblks = jbd2_handle_buffer_credits(handle);
461 trace_ocfs2_allocate_extend_trans(old_nblks, thresh);
462
463 if (old_nblks < thresh)
464 return 0;
465
466 status = jbd2_journal_extend(handle, OCFS2_MAX_TRANS_DATA, 0);
467 if (status < 0) {
468 mlog_errno(status);
469 goto bail;
470 }
471
472 if (status > 0) {
473 status = jbd2_journal_restart(handle, OCFS2_MAX_TRANS_DATA);
474 if (status < 0)
475 mlog_errno(status);
476 }
477
478 bail:
479 return status;
480 }
481
to_ocfs2_trigger(struct jbd2_buffer_trigger_type * triggers)482 static inline struct ocfs2_triggers *to_ocfs2_trigger(struct jbd2_buffer_trigger_type *triggers)
483 {
484 return container_of(triggers, struct ocfs2_triggers, ot_triggers);
485 }
486
ocfs2_frozen_trigger(struct jbd2_buffer_trigger_type * triggers,struct buffer_head * bh,void * data,size_t size)487 static void ocfs2_frozen_trigger(struct jbd2_buffer_trigger_type *triggers,
488 struct buffer_head *bh,
489 void *data, size_t size)
490 {
491 struct ocfs2_triggers *ot = to_ocfs2_trigger(triggers);
492
493 /*
494 * We aren't guaranteed to have the superblock here, so we
495 * must unconditionally compute the ecc data.
496 * __ocfs2_journal_access() will only set the triggers if
497 * metaecc is enabled.
498 */
499 ocfs2_block_check_compute(data, size, data + ot->ot_offset);
500 }
501
502 /*
503 * Quota blocks have their own trigger because the struct ocfs2_block_check
504 * offset depends on the blocksize.
505 */
ocfs2_dq_frozen_trigger(struct jbd2_buffer_trigger_type * triggers,struct buffer_head * bh,void * data,size_t size)506 static void ocfs2_dq_frozen_trigger(struct jbd2_buffer_trigger_type *triggers,
507 struct buffer_head *bh,
508 void *data, size_t size)
509 {
510 struct ocfs2_disk_dqtrailer *dqt =
511 ocfs2_block_dqtrailer(size, data);
512
513 /*
514 * We aren't guaranteed to have the superblock here, so we
515 * must unconditionally compute the ecc data.
516 * __ocfs2_journal_access() will only set the triggers if
517 * metaecc is enabled.
518 */
519 ocfs2_block_check_compute(data, size, &dqt->dq_check);
520 }
521
522 /*
523 * Directory blocks also have their own trigger because the
524 * struct ocfs2_block_check offset depends on the blocksize.
525 */
ocfs2_db_frozen_trigger(struct jbd2_buffer_trigger_type * triggers,struct buffer_head * bh,void * data,size_t size)526 static void ocfs2_db_frozen_trigger(struct jbd2_buffer_trigger_type *triggers,
527 struct buffer_head *bh,
528 void *data, size_t size)
529 {
530 struct ocfs2_dir_block_trailer *trailer =
531 ocfs2_dir_trailer_from_size(size, data);
532
533 /*
534 * We aren't guaranteed to have the superblock here, so we
535 * must unconditionally compute the ecc data.
536 * __ocfs2_journal_access() will only set the triggers if
537 * metaecc is enabled.
538 */
539 ocfs2_block_check_compute(data, size, &trailer->db_check);
540 }
541
ocfs2_abort_trigger(struct jbd2_buffer_trigger_type * triggers,struct buffer_head * bh)542 static void ocfs2_abort_trigger(struct jbd2_buffer_trigger_type *triggers,
543 struct buffer_head *bh)
544 {
545 struct ocfs2_triggers *ot = to_ocfs2_trigger(triggers);
546
547 mlog(ML_ERROR,
548 "ocfs2_abort_trigger called by JBD2. bh = 0x%lx, "
549 "bh->b_blocknr = %llu\n",
550 (unsigned long)bh,
551 (unsigned long long)bh->b_blocknr);
552
553 ocfs2_error(ot->sb,
554 "JBD2 has aborted our journal, ocfs2 cannot continue\n");
555 }
556
ocfs2_setup_csum_triggers(struct super_block * sb,enum ocfs2_journal_trigger_type type,struct ocfs2_triggers * ot)557 static void ocfs2_setup_csum_triggers(struct super_block *sb,
558 enum ocfs2_journal_trigger_type type,
559 struct ocfs2_triggers *ot)
560 {
561 BUG_ON(type >= OCFS2_JOURNAL_TRIGGER_COUNT);
562
563 switch (type) {
564 case OCFS2_JTR_DI:
565 ot->ot_triggers.t_frozen = ocfs2_frozen_trigger;
566 ot->ot_offset = offsetof(struct ocfs2_dinode, i_check);
567 break;
568 case OCFS2_JTR_EB:
569 ot->ot_triggers.t_frozen = ocfs2_frozen_trigger;
570 ot->ot_offset = offsetof(struct ocfs2_extent_block, h_check);
571 break;
572 case OCFS2_JTR_RB:
573 ot->ot_triggers.t_frozen = ocfs2_frozen_trigger;
574 ot->ot_offset = offsetof(struct ocfs2_refcount_block, rf_check);
575 break;
576 case OCFS2_JTR_GD:
577 ot->ot_triggers.t_frozen = ocfs2_frozen_trigger;
578 ot->ot_offset = offsetof(struct ocfs2_group_desc, bg_check);
579 break;
580 case OCFS2_JTR_DB:
581 ot->ot_triggers.t_frozen = ocfs2_db_frozen_trigger;
582 break;
583 case OCFS2_JTR_XB:
584 ot->ot_triggers.t_frozen = ocfs2_frozen_trigger;
585 ot->ot_offset = offsetof(struct ocfs2_xattr_block, xb_check);
586 break;
587 case OCFS2_JTR_DQ:
588 ot->ot_triggers.t_frozen = ocfs2_dq_frozen_trigger;
589 break;
590 case OCFS2_JTR_DR:
591 ot->ot_triggers.t_frozen = ocfs2_frozen_trigger;
592 ot->ot_offset = offsetof(struct ocfs2_dx_root_block, dr_check);
593 break;
594 case OCFS2_JTR_DL:
595 ot->ot_triggers.t_frozen = ocfs2_frozen_trigger;
596 ot->ot_offset = offsetof(struct ocfs2_dx_leaf, dl_check);
597 break;
598 case OCFS2_JTR_NONE:
599 /* To make compiler happy... */
600 return;
601 }
602
603 ot->ot_triggers.t_abort = ocfs2_abort_trigger;
604 ot->sb = sb;
605 }
606
ocfs2_initialize_journal_triggers(struct super_block * sb,struct ocfs2_triggers triggers[])607 void ocfs2_initialize_journal_triggers(struct super_block *sb,
608 struct ocfs2_triggers triggers[])
609 {
610 enum ocfs2_journal_trigger_type type;
611
612 for (type = OCFS2_JTR_DI; type < OCFS2_JOURNAL_TRIGGER_COUNT; type++)
613 ocfs2_setup_csum_triggers(sb, type, &triggers[type]);
614 }
615
__ocfs2_journal_access(handle_t * handle,struct ocfs2_caching_info * ci,struct buffer_head * bh,struct ocfs2_triggers * triggers,int type)616 static int __ocfs2_journal_access(handle_t *handle,
617 struct ocfs2_caching_info *ci,
618 struct buffer_head *bh,
619 struct ocfs2_triggers *triggers,
620 int type)
621 {
622 int status;
623 struct ocfs2_super *osb =
624 OCFS2_SB(ocfs2_metadata_cache_get_super(ci));
625
626 BUG_ON(!ci || !ci->ci_ops);
627 BUG_ON(!handle);
628 BUG_ON(!bh);
629
630 trace_ocfs2_journal_access(
631 (unsigned long long)ocfs2_metadata_cache_owner(ci),
632 (unsigned long long)bh->b_blocknr, type, bh->b_size);
633
634 /* we can safely remove this assertion after testing. */
635 if (!buffer_uptodate(bh)) {
636 mlog(ML_ERROR, "giving me a buffer that's not uptodate!\n");
637 mlog(ML_ERROR, "b_blocknr=%llu, b_state=0x%lx\n",
638 (unsigned long long)bh->b_blocknr, bh->b_state);
639
640 lock_buffer(bh);
641 /*
642 * A previous transaction with a couple of buffer heads fail
643 * to checkpoint, so all the bhs are marked as BH_Write_EIO.
644 * For current transaction, the bh is just among those error
645 * bhs which previous transaction handle. We can't just clear
646 * its BH_Write_EIO and reuse directly, since other bhs are
647 * not written to disk yet and that will cause metadata
648 * inconsistency. So we should set fs read-only to avoid
649 * further damage.
650 */
651 if (buffer_write_io_error(bh) && !buffer_uptodate(bh)) {
652 unlock_buffer(bh);
653 return ocfs2_error(osb->sb, "A previous attempt to "
654 "write this buffer head failed\n");
655 }
656 unlock_buffer(bh);
657 }
658
659 /* Set the current transaction information on the ci so
660 * that the locking code knows whether it can drop it's locks
661 * on this ci or not. We're protected from the commit
662 * thread updating the current transaction id until
663 * ocfs2_commit_trans() because ocfs2_start_trans() took
664 * j_trans_barrier for us. */
665 ocfs2_set_ci_lock_trans(osb->journal, ci);
666
667 ocfs2_metadata_cache_io_lock(ci);
668 switch (type) {
669 case OCFS2_JOURNAL_ACCESS_CREATE:
670 case OCFS2_JOURNAL_ACCESS_WRITE:
671 status = jbd2_journal_get_write_access(handle, bh);
672 break;
673
674 case OCFS2_JOURNAL_ACCESS_UNDO:
675 status = jbd2_journal_get_undo_access(handle, bh);
676 break;
677
678 default:
679 status = -EINVAL;
680 mlog(ML_ERROR, "Unknown access type!\n");
681 }
682 if (!status && ocfs2_meta_ecc(osb) && triggers)
683 jbd2_journal_set_triggers(bh, &triggers->ot_triggers);
684 ocfs2_metadata_cache_io_unlock(ci);
685
686 if (status < 0)
687 mlog(ML_ERROR, "Error %d getting %d access to buffer!\n",
688 status, type);
689
690 return status;
691 }
692
ocfs2_journal_access_di(handle_t * handle,struct ocfs2_caching_info * ci,struct buffer_head * bh,int type)693 int ocfs2_journal_access_di(handle_t *handle, struct ocfs2_caching_info *ci,
694 struct buffer_head *bh, int type)
695 {
696 struct ocfs2_super *osb = OCFS2_SB(ocfs2_metadata_cache_get_super(ci));
697
698 return __ocfs2_journal_access(handle, ci, bh,
699 &osb->s_journal_triggers[OCFS2_JTR_DI],
700 type);
701 }
702
ocfs2_journal_access_eb(handle_t * handle,struct ocfs2_caching_info * ci,struct buffer_head * bh,int type)703 int ocfs2_journal_access_eb(handle_t *handle, struct ocfs2_caching_info *ci,
704 struct buffer_head *bh, int type)
705 {
706 struct ocfs2_super *osb = OCFS2_SB(ocfs2_metadata_cache_get_super(ci));
707
708 return __ocfs2_journal_access(handle, ci, bh,
709 &osb->s_journal_triggers[OCFS2_JTR_EB],
710 type);
711 }
712
ocfs2_journal_access_rb(handle_t * handle,struct ocfs2_caching_info * ci,struct buffer_head * bh,int type)713 int ocfs2_journal_access_rb(handle_t *handle, struct ocfs2_caching_info *ci,
714 struct buffer_head *bh, int type)
715 {
716 struct ocfs2_super *osb = OCFS2_SB(ocfs2_metadata_cache_get_super(ci));
717
718 return __ocfs2_journal_access(handle, ci, bh,
719 &osb->s_journal_triggers[OCFS2_JTR_RB],
720 type);
721 }
722
ocfs2_journal_access_gd(handle_t * handle,struct ocfs2_caching_info * ci,struct buffer_head * bh,int type)723 int ocfs2_journal_access_gd(handle_t *handle, struct ocfs2_caching_info *ci,
724 struct buffer_head *bh, int type)
725 {
726 struct ocfs2_super *osb = OCFS2_SB(ocfs2_metadata_cache_get_super(ci));
727
728 return __ocfs2_journal_access(handle, ci, bh,
729 &osb->s_journal_triggers[OCFS2_JTR_GD],
730 type);
731 }
732
ocfs2_journal_access_db(handle_t * handle,struct ocfs2_caching_info * ci,struct buffer_head * bh,int type)733 int ocfs2_journal_access_db(handle_t *handle, struct ocfs2_caching_info *ci,
734 struct buffer_head *bh, int type)
735 {
736 struct ocfs2_super *osb = OCFS2_SB(ocfs2_metadata_cache_get_super(ci));
737
738 return __ocfs2_journal_access(handle, ci, bh,
739 &osb->s_journal_triggers[OCFS2_JTR_DB],
740 type);
741 }
742
ocfs2_journal_access_xb(handle_t * handle,struct ocfs2_caching_info * ci,struct buffer_head * bh,int type)743 int ocfs2_journal_access_xb(handle_t *handle, struct ocfs2_caching_info *ci,
744 struct buffer_head *bh, int type)
745 {
746 struct ocfs2_super *osb = OCFS2_SB(ocfs2_metadata_cache_get_super(ci));
747
748 return __ocfs2_journal_access(handle, ci, bh,
749 &osb->s_journal_triggers[OCFS2_JTR_XB],
750 type);
751 }
752
ocfs2_journal_access_dq(handle_t * handle,struct ocfs2_caching_info * ci,struct buffer_head * bh,int type)753 int ocfs2_journal_access_dq(handle_t *handle, struct ocfs2_caching_info *ci,
754 struct buffer_head *bh, int type)
755 {
756 struct ocfs2_super *osb = OCFS2_SB(ocfs2_metadata_cache_get_super(ci));
757
758 return __ocfs2_journal_access(handle, ci, bh,
759 &osb->s_journal_triggers[OCFS2_JTR_DQ],
760 type);
761 }
762
ocfs2_journal_access_dr(handle_t * handle,struct ocfs2_caching_info * ci,struct buffer_head * bh,int type)763 int ocfs2_journal_access_dr(handle_t *handle, struct ocfs2_caching_info *ci,
764 struct buffer_head *bh, int type)
765 {
766 struct ocfs2_super *osb = OCFS2_SB(ocfs2_metadata_cache_get_super(ci));
767
768 return __ocfs2_journal_access(handle, ci, bh,
769 &osb->s_journal_triggers[OCFS2_JTR_DR],
770 type);
771 }
772
ocfs2_journal_access_dl(handle_t * handle,struct ocfs2_caching_info * ci,struct buffer_head * bh,int type)773 int ocfs2_journal_access_dl(handle_t *handle, struct ocfs2_caching_info *ci,
774 struct buffer_head *bh, int type)
775 {
776 struct ocfs2_super *osb = OCFS2_SB(ocfs2_metadata_cache_get_super(ci));
777
778 return __ocfs2_journal_access(handle, ci, bh,
779 &osb->s_journal_triggers[OCFS2_JTR_DL],
780 type);
781 }
782
ocfs2_journal_access(handle_t * handle,struct ocfs2_caching_info * ci,struct buffer_head * bh,int type)783 int ocfs2_journal_access(handle_t *handle, struct ocfs2_caching_info *ci,
784 struct buffer_head *bh, int type)
785 {
786 return __ocfs2_journal_access(handle, ci, bh, NULL, type);
787 }
788
ocfs2_journal_dirty(handle_t * handle,struct buffer_head * bh)789 void ocfs2_journal_dirty(handle_t *handle, struct buffer_head *bh)
790 {
791 int status;
792
793 trace_ocfs2_journal_dirty((unsigned long long)bh->b_blocknr);
794
795 status = jbd2_journal_dirty_metadata(handle, bh);
796 if (status) {
797 mlog_errno(status);
798 if (!is_handle_aborted(handle)) {
799 journal_t *journal = handle->h_transaction->t_journal;
800
801 mlog(ML_ERROR, "jbd2_journal_dirty_metadata failed: "
802 "handle type %u started at line %u, credits %u/%u "
803 "errcode %d. Aborting transaction and journal.\n",
804 handle->h_type, handle->h_line_no,
805 handle->h_requested_credits,
806 jbd2_handle_buffer_credits(handle), status);
807 handle->h_err = status;
808 jbd2_journal_abort_handle(handle);
809 jbd2_journal_abort(journal, status);
810 }
811 }
812 }
813
814 #define OCFS2_DEFAULT_COMMIT_INTERVAL (HZ * JBD2_DEFAULT_MAX_COMMIT_AGE)
815
ocfs2_set_journal_params(struct ocfs2_super * osb)816 void ocfs2_set_journal_params(struct ocfs2_super *osb)
817 {
818 journal_t *journal = osb->journal->j_journal;
819 unsigned long commit_interval = OCFS2_DEFAULT_COMMIT_INTERVAL;
820
821 if (osb->osb_commit_interval)
822 commit_interval = osb->osb_commit_interval;
823
824 write_lock(&journal->j_state_lock);
825 journal->j_commit_interval = commit_interval;
826 if (osb->s_mount_opt & OCFS2_MOUNT_BARRIER)
827 journal->j_flags |= JBD2_BARRIER;
828 else
829 journal->j_flags &= ~JBD2_BARRIER;
830 write_unlock(&journal->j_state_lock);
831 }
832
833 /*
834 * alloc & initialize skeleton for journal structure.
835 * ocfs2_journal_init() will make fs have journal ability.
836 */
ocfs2_journal_alloc(struct ocfs2_super * osb)837 int ocfs2_journal_alloc(struct ocfs2_super *osb)
838 {
839 int status = 0;
840 struct ocfs2_journal *journal;
841
842 journal = kzalloc(sizeof(struct ocfs2_journal), GFP_KERNEL);
843 if (!journal) {
844 mlog(ML_ERROR, "unable to alloc journal\n");
845 status = -ENOMEM;
846 goto bail;
847 }
848 osb->journal = journal;
849 journal->j_osb = osb;
850
851 atomic_set(&journal->j_num_trans, 0);
852 init_rwsem(&journal->j_trans_barrier);
853 init_waitqueue_head(&journal->j_checkpointed);
854 spin_lock_init(&journal->j_lock);
855 journal->j_trans_id = 1UL;
856 INIT_LIST_HEAD(&journal->j_la_cleanups);
857 INIT_WORK(&journal->j_recovery_work, ocfs2_complete_recovery);
858 journal->j_state = OCFS2_JOURNAL_FREE;
859
860 bail:
861 return status;
862 }
863
ocfs2_journal_submit_inode_data_buffers(struct jbd2_inode * jinode)864 static int ocfs2_journal_submit_inode_data_buffers(struct jbd2_inode *jinode)
865 {
866 struct address_space *mapping = jinode->i_vfs_inode->i_mapping;
867 struct writeback_control wbc = {
868 .sync_mode = WB_SYNC_ALL,
869 .nr_to_write = mapping->nrpages * 2,
870 .range_start = jinode->i_dirty_start,
871 .range_end = jinode->i_dirty_end,
872 };
873
874 return filemap_fdatawrite_wbc(mapping, &wbc);
875 }
876
ocfs2_journal_init(struct ocfs2_super * osb,int * dirty)877 int ocfs2_journal_init(struct ocfs2_super *osb, int *dirty)
878 {
879 int status = -1;
880 struct inode *inode = NULL; /* the journal inode */
881 journal_t *j_journal = NULL;
882 struct ocfs2_journal *journal = osb->journal;
883 struct ocfs2_dinode *di = NULL;
884 struct buffer_head *bh = NULL;
885 int inode_lock = 0;
886
887 BUG_ON(!journal);
888 /* already have the inode for our journal */
889 inode = ocfs2_get_system_file_inode(osb, JOURNAL_SYSTEM_INODE,
890 osb->slot_num);
891 if (inode == NULL) {
892 status = -EACCES;
893 mlog_errno(status);
894 goto done;
895 }
896 if (is_bad_inode(inode)) {
897 mlog(ML_ERROR, "access error (bad inode)\n");
898 iput(inode);
899 inode = NULL;
900 status = -EACCES;
901 goto done;
902 }
903
904 SET_INODE_JOURNAL(inode);
905 OCFS2_I(inode)->ip_open_count++;
906
907 /* Skip recovery waits here - journal inode metadata never
908 * changes in a live cluster so it can be considered an
909 * exception to the rule. */
910 status = ocfs2_inode_lock_full(inode, &bh, 1, OCFS2_META_LOCK_RECOVERY);
911 if (status < 0) {
912 if (status != -ERESTARTSYS)
913 mlog(ML_ERROR, "Could not get lock on journal!\n");
914 goto done;
915 }
916
917 inode_lock = 1;
918 di = (struct ocfs2_dinode *)bh->b_data;
919
920 if (i_size_read(inode) < OCFS2_MIN_JOURNAL_SIZE) {
921 mlog(ML_ERROR, "Journal file size (%lld) is too small!\n",
922 i_size_read(inode));
923 status = -EINVAL;
924 goto done;
925 }
926
927 trace_ocfs2_journal_init(i_size_read(inode),
928 (unsigned long long)inode->i_blocks,
929 OCFS2_I(inode)->ip_clusters);
930
931 /* call the kernels journal init function now */
932 j_journal = jbd2_journal_init_inode(inode);
933 if (IS_ERR(j_journal)) {
934 mlog(ML_ERROR, "Linux journal layer error\n");
935 status = PTR_ERR(j_journal);
936 goto done;
937 }
938
939 trace_ocfs2_journal_init_maxlen(j_journal->j_total_len);
940
941 *dirty = (le32_to_cpu(di->id1.journal1.ij_flags) &
942 OCFS2_JOURNAL_DIRTY_FL);
943
944 journal->j_journal = j_journal;
945 journal->j_journal->j_submit_inode_data_buffers =
946 ocfs2_journal_submit_inode_data_buffers;
947 journal->j_journal->j_finish_inode_data_buffers =
948 jbd2_journal_finish_inode_data_buffers;
949 journal->j_inode = inode;
950 journal->j_bh = bh;
951
952 ocfs2_set_journal_params(osb);
953
954 journal->j_state = OCFS2_JOURNAL_LOADED;
955
956 status = 0;
957 done:
958 if (status < 0) {
959 if (inode_lock)
960 ocfs2_inode_unlock(inode, 1);
961 brelse(bh);
962 if (inode) {
963 OCFS2_I(inode)->ip_open_count--;
964 iput(inode);
965 }
966 }
967
968 return status;
969 }
970
ocfs2_bump_recovery_generation(struct ocfs2_dinode * di)971 static void ocfs2_bump_recovery_generation(struct ocfs2_dinode *di)
972 {
973 le32_add_cpu(&(di->id1.journal1.ij_recovery_generation), 1);
974 }
975
ocfs2_get_recovery_generation(struct ocfs2_dinode * di)976 static u32 ocfs2_get_recovery_generation(struct ocfs2_dinode *di)
977 {
978 return le32_to_cpu(di->id1.journal1.ij_recovery_generation);
979 }
980
ocfs2_journal_toggle_dirty(struct ocfs2_super * osb,int dirty,int replayed)981 static int ocfs2_journal_toggle_dirty(struct ocfs2_super *osb,
982 int dirty, int replayed)
983 {
984 int status;
985 unsigned int flags;
986 struct ocfs2_journal *journal = osb->journal;
987 struct buffer_head *bh = journal->j_bh;
988 struct ocfs2_dinode *fe;
989
990 fe = (struct ocfs2_dinode *)bh->b_data;
991
992 /* The journal bh on the osb always comes from ocfs2_journal_init()
993 * and was validated there inside ocfs2_inode_lock_full(). It's a
994 * code bug if we mess it up. */
995 BUG_ON(!OCFS2_IS_VALID_DINODE(fe));
996
997 flags = le32_to_cpu(fe->id1.journal1.ij_flags);
998 if (dirty)
999 flags |= OCFS2_JOURNAL_DIRTY_FL;
1000 else
1001 flags &= ~OCFS2_JOURNAL_DIRTY_FL;
1002 fe->id1.journal1.ij_flags = cpu_to_le32(flags);
1003
1004 if (replayed)
1005 ocfs2_bump_recovery_generation(fe);
1006
1007 ocfs2_compute_meta_ecc(osb->sb, bh->b_data, &fe->i_check);
1008 status = ocfs2_write_block(osb, bh, INODE_CACHE(journal->j_inode));
1009 if (status < 0)
1010 mlog_errno(status);
1011
1012 return status;
1013 }
1014
1015 /*
1016 * If the journal has been kmalloc'd it needs to be freed after this
1017 * call.
1018 */
ocfs2_journal_shutdown(struct ocfs2_super * osb)1019 void ocfs2_journal_shutdown(struct ocfs2_super *osb)
1020 {
1021 struct ocfs2_journal *journal = NULL;
1022 int status = 0;
1023 struct inode *inode = NULL;
1024 int num_running_trans = 0;
1025
1026 BUG_ON(!osb);
1027
1028 journal = osb->journal;
1029 if (!journal)
1030 goto done;
1031
1032 inode = journal->j_inode;
1033
1034 if (journal->j_state != OCFS2_JOURNAL_LOADED)
1035 goto done;
1036
1037 /* need to inc inode use count - jbd2_journal_destroy will iput. */
1038 if (!igrab(inode))
1039 BUG();
1040
1041 num_running_trans = atomic_read(&(osb->journal->j_num_trans));
1042 trace_ocfs2_journal_shutdown(num_running_trans);
1043
1044 /* Do a commit_cache here. It will flush our journal, *and*
1045 * release any locks that are still held.
1046 * set the SHUTDOWN flag and release the trans lock.
1047 * the commit thread will take the trans lock for us below. */
1048 journal->j_state = OCFS2_JOURNAL_IN_SHUTDOWN;
1049
1050 /* The OCFS2_JOURNAL_IN_SHUTDOWN will signal to commit_cache to not
1051 * drop the trans_lock (which we want to hold until we
1052 * completely destroy the journal. */
1053 if (osb->commit_task) {
1054 /* Wait for the commit thread */
1055 trace_ocfs2_journal_shutdown_wait(osb->commit_task);
1056 kthread_stop(osb->commit_task);
1057 osb->commit_task = NULL;
1058 }
1059
1060 BUG_ON(atomic_read(&(osb->journal->j_num_trans)) != 0);
1061
1062 if (ocfs2_mount_local(osb)) {
1063 jbd2_journal_lock_updates(journal->j_journal);
1064 status = jbd2_journal_flush(journal->j_journal, 0);
1065 jbd2_journal_unlock_updates(journal->j_journal);
1066 if (status < 0)
1067 mlog_errno(status);
1068 }
1069
1070 /* Shutdown the kernel journal system */
1071 if (!jbd2_journal_destroy(journal->j_journal) && !status) {
1072 /*
1073 * Do not toggle if flush was unsuccessful otherwise
1074 * will leave dirty metadata in a "clean" journal
1075 */
1076 status = ocfs2_journal_toggle_dirty(osb, 0, 0);
1077 if (status < 0)
1078 mlog_errno(status);
1079 }
1080 journal->j_journal = NULL;
1081
1082 OCFS2_I(inode)->ip_open_count--;
1083
1084 /* unlock our journal */
1085 ocfs2_inode_unlock(inode, 1);
1086
1087 brelse(journal->j_bh);
1088 journal->j_bh = NULL;
1089
1090 journal->j_state = OCFS2_JOURNAL_FREE;
1091
1092 done:
1093 iput(inode);
1094 kfree(journal);
1095 osb->journal = NULL;
1096 }
1097
ocfs2_clear_journal_error(struct super_block * sb,journal_t * journal,int slot)1098 static void ocfs2_clear_journal_error(struct super_block *sb,
1099 journal_t *journal,
1100 int slot)
1101 {
1102 int olderr;
1103
1104 olderr = jbd2_journal_errno(journal);
1105 if (olderr) {
1106 mlog(ML_ERROR, "File system error %d recorded in "
1107 "journal %u.\n", olderr, slot);
1108 mlog(ML_ERROR, "File system on device %s needs checking.\n",
1109 sb->s_id);
1110
1111 jbd2_journal_ack_err(journal);
1112 jbd2_journal_clear_err(journal);
1113 }
1114 }
1115
ocfs2_journal_load(struct ocfs2_journal * journal,int local,int replayed)1116 int ocfs2_journal_load(struct ocfs2_journal *journal, int local, int replayed)
1117 {
1118 int status = 0;
1119 struct ocfs2_super *osb;
1120
1121 BUG_ON(!journal);
1122
1123 osb = journal->j_osb;
1124
1125 status = jbd2_journal_load(journal->j_journal);
1126 if (status < 0) {
1127 mlog(ML_ERROR, "Failed to load journal!\n");
1128 goto done;
1129 }
1130
1131 ocfs2_clear_journal_error(osb->sb, journal->j_journal, osb->slot_num);
1132
1133 if (replayed) {
1134 jbd2_journal_lock_updates(journal->j_journal);
1135 status = jbd2_journal_flush(journal->j_journal, 0);
1136 jbd2_journal_unlock_updates(journal->j_journal);
1137 if (status < 0)
1138 mlog_errno(status);
1139 }
1140
1141 status = ocfs2_journal_toggle_dirty(osb, 1, replayed);
1142 if (status < 0) {
1143 mlog_errno(status);
1144 goto done;
1145 }
1146
1147 /* Launch the commit thread */
1148 if (!local) {
1149 osb->commit_task = kthread_run(ocfs2_commit_thread, osb,
1150 "ocfs2cmt-%s", osb->uuid_str);
1151 if (IS_ERR(osb->commit_task)) {
1152 status = PTR_ERR(osb->commit_task);
1153 osb->commit_task = NULL;
1154 mlog(ML_ERROR, "unable to launch ocfs2commit thread, "
1155 "error=%d", status);
1156 goto done;
1157 }
1158 } else
1159 osb->commit_task = NULL;
1160
1161 done:
1162 return status;
1163 }
1164
1165
1166 /* 'full' flag tells us whether we clear out all blocks or if we just
1167 * mark the journal clean */
ocfs2_journal_wipe(struct ocfs2_journal * journal,int full)1168 int ocfs2_journal_wipe(struct ocfs2_journal *journal, int full)
1169 {
1170 int status;
1171
1172 BUG_ON(!journal);
1173
1174 status = jbd2_journal_wipe(journal->j_journal, full);
1175 if (status < 0) {
1176 mlog_errno(status);
1177 goto bail;
1178 }
1179
1180 status = ocfs2_journal_toggle_dirty(journal->j_osb, 0, 0);
1181 if (status < 0)
1182 mlog_errno(status);
1183
1184 bail:
1185 return status;
1186 }
1187
ocfs2_recovery_completed(struct ocfs2_super * osb)1188 static int ocfs2_recovery_completed(struct ocfs2_super *osb)
1189 {
1190 int empty;
1191 struct ocfs2_recovery_map *rm = osb->recovery_map;
1192
1193 spin_lock(&osb->osb_lock);
1194 empty = (rm->rm_used == 0);
1195 spin_unlock(&osb->osb_lock);
1196
1197 return empty;
1198 }
1199
ocfs2_wait_for_recovery(struct ocfs2_super * osb)1200 void ocfs2_wait_for_recovery(struct ocfs2_super *osb)
1201 {
1202 wait_event(osb->recovery_event, ocfs2_recovery_completed(osb));
1203 }
1204
1205 /*
1206 * JBD Might read a cached version of another nodes journal file. We
1207 * don't want this as this file changes often and we get no
1208 * notification on those changes. The only way to be sure that we've
1209 * got the most up to date version of those blocks then is to force
1210 * read them off disk. Just searching through the buffer cache won't
1211 * work as there may be pages backing this file which are still marked
1212 * up to date. We know things can't change on this file underneath us
1213 * as we have the lock by now :)
1214 */
ocfs2_force_read_journal(struct inode * inode)1215 static int ocfs2_force_read_journal(struct inode *inode)
1216 {
1217 int status = 0;
1218 int i;
1219 u64 v_blkno, p_blkno, p_blocks, num_blocks;
1220 struct buffer_head *bh = NULL;
1221 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1222
1223 num_blocks = ocfs2_blocks_for_bytes(inode->i_sb, i_size_read(inode));
1224 v_blkno = 0;
1225 while (v_blkno < num_blocks) {
1226 status = ocfs2_extent_map_get_blocks(inode, v_blkno,
1227 &p_blkno, &p_blocks, NULL);
1228 if (status < 0) {
1229 mlog_errno(status);
1230 goto bail;
1231 }
1232
1233 for (i = 0; i < p_blocks; i++, p_blkno++) {
1234 bh = __find_get_block(osb->sb->s_bdev, p_blkno,
1235 osb->sb->s_blocksize);
1236 /* block not cached. */
1237 if (!bh)
1238 continue;
1239
1240 brelse(bh);
1241 bh = NULL;
1242 /* We are reading journal data which should not
1243 * be put in the uptodate cache.
1244 */
1245 status = ocfs2_read_blocks_sync(osb, p_blkno, 1, &bh);
1246 if (status < 0) {
1247 mlog_errno(status);
1248 goto bail;
1249 }
1250
1251 brelse(bh);
1252 bh = NULL;
1253 }
1254
1255 v_blkno += p_blocks;
1256 }
1257
1258 bail:
1259 return status;
1260 }
1261
1262 struct ocfs2_la_recovery_item {
1263 struct list_head lri_list;
1264 int lri_slot;
1265 struct ocfs2_dinode *lri_la_dinode;
1266 struct ocfs2_dinode *lri_tl_dinode;
1267 struct ocfs2_quota_recovery *lri_qrec;
1268 enum ocfs2_orphan_reco_type lri_orphan_reco_type;
1269 };
1270
1271 /* Does the second half of the recovery process. By this point, the
1272 * node is marked clean and can actually be considered recovered,
1273 * hence it's no longer in the recovery map, but there's still some
1274 * cleanup we can do which shouldn't happen within the recovery thread
1275 * as locking in that context becomes very difficult if we are to take
1276 * recovering nodes into account.
1277 *
1278 * NOTE: This function can and will sleep on recovery of other nodes
1279 * during cluster locking, just like any other ocfs2 process.
1280 */
ocfs2_complete_recovery(struct work_struct * work)1281 void ocfs2_complete_recovery(struct work_struct *work)
1282 {
1283 int ret = 0;
1284 struct ocfs2_journal *journal =
1285 container_of(work, struct ocfs2_journal, j_recovery_work);
1286 struct ocfs2_super *osb = journal->j_osb;
1287 struct ocfs2_dinode *la_dinode, *tl_dinode;
1288 struct ocfs2_la_recovery_item *item, *n;
1289 struct ocfs2_quota_recovery *qrec;
1290 enum ocfs2_orphan_reco_type orphan_reco_type;
1291 LIST_HEAD(tmp_la_list);
1292
1293 trace_ocfs2_complete_recovery(
1294 (unsigned long long)OCFS2_I(journal->j_inode)->ip_blkno);
1295
1296 spin_lock(&journal->j_lock);
1297 list_splice_init(&journal->j_la_cleanups, &tmp_la_list);
1298 spin_unlock(&journal->j_lock);
1299
1300 list_for_each_entry_safe(item, n, &tmp_la_list, lri_list) {
1301 list_del_init(&item->lri_list);
1302
1303 ocfs2_wait_on_quotas(osb);
1304
1305 la_dinode = item->lri_la_dinode;
1306 tl_dinode = item->lri_tl_dinode;
1307 qrec = item->lri_qrec;
1308 orphan_reco_type = item->lri_orphan_reco_type;
1309
1310 trace_ocfs2_complete_recovery_slot(item->lri_slot,
1311 la_dinode ? le64_to_cpu(la_dinode->i_blkno) : 0,
1312 tl_dinode ? le64_to_cpu(tl_dinode->i_blkno) : 0,
1313 qrec);
1314
1315 if (la_dinode) {
1316 ret = ocfs2_complete_local_alloc_recovery(osb,
1317 la_dinode);
1318 if (ret < 0)
1319 mlog_errno(ret);
1320
1321 kfree(la_dinode);
1322 }
1323
1324 if (tl_dinode) {
1325 ret = ocfs2_complete_truncate_log_recovery(osb,
1326 tl_dinode);
1327 if (ret < 0)
1328 mlog_errno(ret);
1329
1330 kfree(tl_dinode);
1331 }
1332
1333 ret = ocfs2_recover_orphans(osb, item->lri_slot,
1334 orphan_reco_type);
1335 if (ret < 0)
1336 mlog_errno(ret);
1337
1338 if (qrec) {
1339 ret = ocfs2_finish_quota_recovery(osb, qrec,
1340 item->lri_slot);
1341 if (ret < 0)
1342 mlog_errno(ret);
1343 /* Recovery info is already freed now */
1344 }
1345
1346 kfree(item);
1347 }
1348
1349 trace_ocfs2_complete_recovery_end(ret);
1350 }
1351
1352 /* NOTE: This function always eats your references to la_dinode and
1353 * tl_dinode, either manually on error, or by passing them to
1354 * ocfs2_complete_recovery */
ocfs2_queue_recovery_completion(struct ocfs2_journal * journal,int slot_num,struct ocfs2_dinode * la_dinode,struct ocfs2_dinode * tl_dinode,struct ocfs2_quota_recovery * qrec,enum ocfs2_orphan_reco_type orphan_reco_type)1355 static void ocfs2_queue_recovery_completion(struct ocfs2_journal *journal,
1356 int slot_num,
1357 struct ocfs2_dinode *la_dinode,
1358 struct ocfs2_dinode *tl_dinode,
1359 struct ocfs2_quota_recovery *qrec,
1360 enum ocfs2_orphan_reco_type orphan_reco_type)
1361 {
1362 struct ocfs2_la_recovery_item *item;
1363
1364 item = kmalloc(sizeof(struct ocfs2_la_recovery_item), GFP_NOFS);
1365 if (!item) {
1366 /* Though we wish to avoid it, we are in fact safe in
1367 * skipping local alloc cleanup as fsck.ocfs2 is more
1368 * than capable of reclaiming unused space. */
1369 kfree(la_dinode);
1370 kfree(tl_dinode);
1371
1372 if (qrec)
1373 ocfs2_free_quota_recovery(qrec);
1374
1375 mlog_errno(-ENOMEM);
1376 return;
1377 }
1378
1379 INIT_LIST_HEAD(&item->lri_list);
1380 item->lri_la_dinode = la_dinode;
1381 item->lri_slot = slot_num;
1382 item->lri_tl_dinode = tl_dinode;
1383 item->lri_qrec = qrec;
1384 item->lri_orphan_reco_type = orphan_reco_type;
1385
1386 spin_lock(&journal->j_lock);
1387 list_add_tail(&item->lri_list, &journal->j_la_cleanups);
1388 queue_work(journal->j_osb->ocfs2_wq, &journal->j_recovery_work);
1389 spin_unlock(&journal->j_lock);
1390 }
1391
1392 /* Called by the mount code to queue recovery the last part of
1393 * recovery for it's own and offline slot(s). */
ocfs2_complete_mount_recovery(struct ocfs2_super * osb)1394 void ocfs2_complete_mount_recovery(struct ocfs2_super *osb)
1395 {
1396 struct ocfs2_journal *journal = osb->journal;
1397
1398 if (ocfs2_is_hard_readonly(osb))
1399 return;
1400
1401 /* No need to queue up our truncate_log as regular cleanup will catch
1402 * that */
1403 ocfs2_queue_recovery_completion(journal, osb->slot_num,
1404 osb->local_alloc_copy, NULL, NULL,
1405 ORPHAN_NEED_TRUNCATE);
1406 ocfs2_schedule_truncate_log_flush(osb, 0);
1407
1408 osb->local_alloc_copy = NULL;
1409
1410 /* queue to recover orphan slots for all offline slots */
1411 ocfs2_replay_map_set_state(osb, REPLAY_NEEDED);
1412 ocfs2_queue_replay_slots(osb, ORPHAN_NEED_TRUNCATE);
1413 ocfs2_free_replay_slots(osb);
1414 }
1415
ocfs2_complete_quota_recovery(struct ocfs2_super * osb)1416 void ocfs2_complete_quota_recovery(struct ocfs2_super *osb)
1417 {
1418 if (osb->quota_rec) {
1419 ocfs2_queue_recovery_completion(osb->journal,
1420 osb->slot_num,
1421 NULL,
1422 NULL,
1423 osb->quota_rec,
1424 ORPHAN_NEED_TRUNCATE);
1425 osb->quota_rec = NULL;
1426 }
1427 }
1428
__ocfs2_recovery_thread(void * arg)1429 static int __ocfs2_recovery_thread(void *arg)
1430 {
1431 int status, node_num, slot_num;
1432 struct ocfs2_super *osb = arg;
1433 struct ocfs2_recovery_map *rm = osb->recovery_map;
1434 int *rm_quota = NULL;
1435 int rm_quota_used = 0, i;
1436 struct ocfs2_quota_recovery *qrec;
1437
1438 /* Whether the quota supported. */
1439 int quota_enabled = OCFS2_HAS_RO_COMPAT_FEATURE(osb->sb,
1440 OCFS2_FEATURE_RO_COMPAT_USRQUOTA)
1441 || OCFS2_HAS_RO_COMPAT_FEATURE(osb->sb,
1442 OCFS2_FEATURE_RO_COMPAT_GRPQUOTA);
1443
1444 status = ocfs2_wait_on_mount(osb);
1445 if (status < 0) {
1446 goto bail;
1447 }
1448
1449 if (quota_enabled) {
1450 rm_quota = kcalloc(osb->max_slots, sizeof(int), GFP_NOFS);
1451 if (!rm_quota) {
1452 status = -ENOMEM;
1453 goto bail;
1454 }
1455 }
1456 restart:
1457 status = ocfs2_super_lock(osb, 1);
1458 if (status < 0) {
1459 mlog_errno(status);
1460 goto bail;
1461 }
1462
1463 status = ocfs2_compute_replay_slots(osb);
1464 if (status < 0)
1465 mlog_errno(status);
1466
1467 /* queue recovery for our own slot */
1468 ocfs2_queue_recovery_completion(osb->journal, osb->slot_num, NULL,
1469 NULL, NULL, ORPHAN_NO_NEED_TRUNCATE);
1470
1471 spin_lock(&osb->osb_lock);
1472 while (rm->rm_used) {
1473 /* It's always safe to remove entry zero, as we won't
1474 * clear it until ocfs2_recover_node() has succeeded. */
1475 node_num = rm->rm_entries[0];
1476 spin_unlock(&osb->osb_lock);
1477 slot_num = ocfs2_node_num_to_slot(osb, node_num);
1478 trace_ocfs2_recovery_thread_node(node_num, slot_num);
1479 if (slot_num == -ENOENT) {
1480 status = 0;
1481 goto skip_recovery;
1482 }
1483
1484 /* It is a bit subtle with quota recovery. We cannot do it
1485 * immediately because we have to obtain cluster locks from
1486 * quota files and we also don't want to just skip it because
1487 * then quota usage would be out of sync until some node takes
1488 * the slot. So we remember which nodes need quota recovery
1489 * and when everything else is done, we recover quotas. */
1490 if (quota_enabled) {
1491 for (i = 0; i < rm_quota_used
1492 && rm_quota[i] != slot_num; i++)
1493 ;
1494
1495 if (i == rm_quota_used)
1496 rm_quota[rm_quota_used++] = slot_num;
1497 }
1498
1499 status = ocfs2_recover_node(osb, node_num, slot_num);
1500 skip_recovery:
1501 if (!status) {
1502 ocfs2_recovery_map_clear(osb, node_num);
1503 } else {
1504 mlog(ML_ERROR,
1505 "Error %d recovering node %d on device (%u,%u)!\n",
1506 status, node_num,
1507 MAJOR(osb->sb->s_dev), MINOR(osb->sb->s_dev));
1508 mlog(ML_ERROR, "Volume requires unmount.\n");
1509 }
1510
1511 spin_lock(&osb->osb_lock);
1512 }
1513 spin_unlock(&osb->osb_lock);
1514 trace_ocfs2_recovery_thread_end(status);
1515
1516 /* Refresh all journal recovery generations from disk */
1517 status = ocfs2_check_journals_nolocks(osb);
1518 status = (status == -EROFS) ? 0 : status;
1519 if (status < 0)
1520 mlog_errno(status);
1521
1522 /* Now it is right time to recover quotas... We have to do this under
1523 * superblock lock so that no one can start using the slot (and crash)
1524 * before we recover it */
1525 if (quota_enabled) {
1526 for (i = 0; i < rm_quota_used; i++) {
1527 qrec = ocfs2_begin_quota_recovery(osb, rm_quota[i]);
1528 if (IS_ERR(qrec)) {
1529 status = PTR_ERR(qrec);
1530 mlog_errno(status);
1531 continue;
1532 }
1533 ocfs2_queue_recovery_completion(osb->journal,
1534 rm_quota[i],
1535 NULL, NULL, qrec,
1536 ORPHAN_NEED_TRUNCATE);
1537 }
1538 }
1539
1540 ocfs2_super_unlock(osb, 1);
1541
1542 /* queue recovery for offline slots */
1543 ocfs2_queue_replay_slots(osb, ORPHAN_NEED_TRUNCATE);
1544
1545 bail:
1546 mutex_lock(&osb->recovery_lock);
1547 if (!status && !ocfs2_recovery_completed(osb)) {
1548 mutex_unlock(&osb->recovery_lock);
1549 goto restart;
1550 }
1551
1552 ocfs2_free_replay_slots(osb);
1553 osb->recovery_thread_task = NULL;
1554 mb(); /* sync with ocfs2_recovery_thread_running */
1555 wake_up(&osb->recovery_event);
1556
1557 mutex_unlock(&osb->recovery_lock);
1558
1559 if (quota_enabled)
1560 kfree(rm_quota);
1561
1562 return status;
1563 }
1564
ocfs2_recovery_thread(struct ocfs2_super * osb,int node_num)1565 void ocfs2_recovery_thread(struct ocfs2_super *osb, int node_num)
1566 {
1567 mutex_lock(&osb->recovery_lock);
1568
1569 trace_ocfs2_recovery_thread(node_num, osb->node_num,
1570 osb->disable_recovery, osb->recovery_thread_task,
1571 osb->disable_recovery ?
1572 -1 : ocfs2_recovery_map_set(osb, node_num));
1573
1574 if (osb->disable_recovery)
1575 goto out;
1576
1577 if (osb->recovery_thread_task)
1578 goto out;
1579
1580 osb->recovery_thread_task = kthread_run(__ocfs2_recovery_thread, osb,
1581 "ocfs2rec-%s", osb->uuid_str);
1582 if (IS_ERR(osb->recovery_thread_task)) {
1583 mlog_errno((int)PTR_ERR(osb->recovery_thread_task));
1584 osb->recovery_thread_task = NULL;
1585 }
1586
1587 out:
1588 mutex_unlock(&osb->recovery_lock);
1589 wake_up(&osb->recovery_event);
1590 }
1591
ocfs2_read_journal_inode(struct ocfs2_super * osb,int slot_num,struct buffer_head ** bh,struct inode ** ret_inode)1592 static int ocfs2_read_journal_inode(struct ocfs2_super *osb,
1593 int slot_num,
1594 struct buffer_head **bh,
1595 struct inode **ret_inode)
1596 {
1597 int status = -EACCES;
1598 struct inode *inode = NULL;
1599
1600 BUG_ON(slot_num >= osb->max_slots);
1601
1602 inode = ocfs2_get_system_file_inode(osb, JOURNAL_SYSTEM_INODE,
1603 slot_num);
1604 if (!inode || is_bad_inode(inode)) {
1605 mlog_errno(status);
1606 goto bail;
1607 }
1608 SET_INODE_JOURNAL(inode);
1609
1610 status = ocfs2_read_inode_block_full(inode, bh, OCFS2_BH_IGNORE_CACHE);
1611 if (status < 0) {
1612 mlog_errno(status);
1613 goto bail;
1614 }
1615
1616 status = 0;
1617
1618 bail:
1619 if (inode) {
1620 if (status || !ret_inode)
1621 iput(inode);
1622 else
1623 *ret_inode = inode;
1624 }
1625 return status;
1626 }
1627
1628 /* Does the actual journal replay and marks the journal inode as
1629 * clean. Will only replay if the journal inode is marked dirty. */
ocfs2_replay_journal(struct ocfs2_super * osb,int node_num,int slot_num)1630 static int ocfs2_replay_journal(struct ocfs2_super *osb,
1631 int node_num,
1632 int slot_num)
1633 {
1634 int status;
1635 int got_lock = 0;
1636 unsigned int flags;
1637 struct inode *inode = NULL;
1638 struct ocfs2_dinode *fe;
1639 journal_t *journal = NULL;
1640 struct buffer_head *bh = NULL;
1641 u32 slot_reco_gen;
1642
1643 status = ocfs2_read_journal_inode(osb, slot_num, &bh, &inode);
1644 if (status) {
1645 mlog_errno(status);
1646 goto done;
1647 }
1648
1649 fe = (struct ocfs2_dinode *)bh->b_data;
1650 slot_reco_gen = ocfs2_get_recovery_generation(fe);
1651 brelse(bh);
1652 bh = NULL;
1653
1654 /*
1655 * As the fs recovery is asynchronous, there is a small chance that
1656 * another node mounted (and recovered) the slot before the recovery
1657 * thread could get the lock. To handle that, we dirty read the journal
1658 * inode for that slot to get the recovery generation. If it is
1659 * different than what we expected, the slot has been recovered.
1660 * If not, it needs recovery.
1661 */
1662 if (osb->slot_recovery_generations[slot_num] != slot_reco_gen) {
1663 trace_ocfs2_replay_journal_recovered(slot_num,
1664 osb->slot_recovery_generations[slot_num], slot_reco_gen);
1665 osb->slot_recovery_generations[slot_num] = slot_reco_gen;
1666 status = -EBUSY;
1667 goto done;
1668 }
1669
1670 /* Continue with recovery as the journal has not yet been recovered */
1671
1672 status = ocfs2_inode_lock_full(inode, &bh, 1, OCFS2_META_LOCK_RECOVERY);
1673 if (status < 0) {
1674 trace_ocfs2_replay_journal_lock_err(status);
1675 if (status != -ERESTARTSYS)
1676 mlog(ML_ERROR, "Could not lock journal!\n");
1677 goto done;
1678 }
1679 got_lock = 1;
1680
1681 fe = (struct ocfs2_dinode *) bh->b_data;
1682
1683 flags = le32_to_cpu(fe->id1.journal1.ij_flags);
1684 slot_reco_gen = ocfs2_get_recovery_generation(fe);
1685
1686 if (!(flags & OCFS2_JOURNAL_DIRTY_FL)) {
1687 trace_ocfs2_replay_journal_skip(node_num);
1688 /* Refresh recovery generation for the slot */
1689 osb->slot_recovery_generations[slot_num] = slot_reco_gen;
1690 goto done;
1691 }
1692
1693 /* we need to run complete recovery for offline orphan slots */
1694 ocfs2_replay_map_set_state(osb, REPLAY_NEEDED);
1695
1696 printk(KERN_NOTICE "ocfs2: Begin replay journal (node %d, slot %d) on "\
1697 "device (%u,%u)\n", node_num, slot_num, MAJOR(osb->sb->s_dev),
1698 MINOR(osb->sb->s_dev));
1699
1700 OCFS2_I(inode)->ip_clusters = le32_to_cpu(fe->i_clusters);
1701
1702 status = ocfs2_force_read_journal(inode);
1703 if (status < 0) {
1704 mlog_errno(status);
1705 goto done;
1706 }
1707
1708 journal = jbd2_journal_init_inode(inode);
1709 if (IS_ERR(journal)) {
1710 mlog(ML_ERROR, "Linux journal layer error\n");
1711 status = PTR_ERR(journal);
1712 goto done;
1713 }
1714
1715 status = jbd2_journal_load(journal);
1716 if (status < 0) {
1717 mlog_errno(status);
1718 BUG_ON(!igrab(inode));
1719 jbd2_journal_destroy(journal);
1720 goto done;
1721 }
1722
1723 ocfs2_clear_journal_error(osb->sb, journal, slot_num);
1724
1725 /* wipe the journal */
1726 jbd2_journal_lock_updates(journal);
1727 status = jbd2_journal_flush(journal, 0);
1728 jbd2_journal_unlock_updates(journal);
1729 if (status < 0)
1730 mlog_errno(status);
1731
1732 /* This will mark the node clean */
1733 flags = le32_to_cpu(fe->id1.journal1.ij_flags);
1734 flags &= ~OCFS2_JOURNAL_DIRTY_FL;
1735 fe->id1.journal1.ij_flags = cpu_to_le32(flags);
1736
1737 /* Increment recovery generation to indicate successful recovery */
1738 ocfs2_bump_recovery_generation(fe);
1739 osb->slot_recovery_generations[slot_num] =
1740 ocfs2_get_recovery_generation(fe);
1741
1742 ocfs2_compute_meta_ecc(osb->sb, bh->b_data, &fe->i_check);
1743 status = ocfs2_write_block(osb, bh, INODE_CACHE(inode));
1744 if (status < 0)
1745 mlog_errno(status);
1746
1747 BUG_ON(!igrab(inode));
1748
1749 jbd2_journal_destroy(journal);
1750
1751 printk(KERN_NOTICE "ocfs2: End replay journal (node %d, slot %d) on "\
1752 "device (%u,%u)\n", node_num, slot_num, MAJOR(osb->sb->s_dev),
1753 MINOR(osb->sb->s_dev));
1754 done:
1755 /* drop the lock on this nodes journal */
1756 if (got_lock)
1757 ocfs2_inode_unlock(inode, 1);
1758
1759 iput(inode);
1760 brelse(bh);
1761
1762 return status;
1763 }
1764
1765 /*
1766 * Do the most important parts of node recovery:
1767 * - Replay it's journal
1768 * - Stamp a clean local allocator file
1769 * - Stamp a clean truncate log
1770 * - Mark the node clean
1771 *
1772 * If this function completes without error, a node in OCFS2 can be
1773 * said to have been safely recovered. As a result, failure during the
1774 * second part of a nodes recovery process (local alloc recovery) is
1775 * far less concerning.
1776 */
ocfs2_recover_node(struct ocfs2_super * osb,int node_num,int slot_num)1777 static int ocfs2_recover_node(struct ocfs2_super *osb,
1778 int node_num, int slot_num)
1779 {
1780 int status = 0;
1781 struct ocfs2_dinode *la_copy = NULL;
1782 struct ocfs2_dinode *tl_copy = NULL;
1783
1784 trace_ocfs2_recover_node(node_num, slot_num, osb->node_num);
1785
1786 /* Should not ever be called to recover ourselves -- in that
1787 * case we should've called ocfs2_journal_load instead. */
1788 BUG_ON(osb->node_num == node_num);
1789
1790 status = ocfs2_replay_journal(osb, node_num, slot_num);
1791 if (status < 0) {
1792 if (status == -EBUSY) {
1793 trace_ocfs2_recover_node_skip(slot_num, node_num);
1794 status = 0;
1795 goto done;
1796 }
1797 mlog_errno(status);
1798 goto done;
1799 }
1800
1801 /* Stamp a clean local alloc file AFTER recovering the journal... */
1802 status = ocfs2_begin_local_alloc_recovery(osb, slot_num, &la_copy);
1803 if (status < 0) {
1804 mlog_errno(status);
1805 goto done;
1806 }
1807
1808 /* An error from begin_truncate_log_recovery is not
1809 * serious enough to warrant halting the rest of
1810 * recovery. */
1811 status = ocfs2_begin_truncate_log_recovery(osb, slot_num, &tl_copy);
1812 if (status < 0)
1813 mlog_errno(status);
1814
1815 /* Likewise, this would be a strange but ultimately not so
1816 * harmful place to get an error... */
1817 status = ocfs2_clear_slot(osb, slot_num);
1818 if (status < 0)
1819 mlog_errno(status);
1820
1821 /* This will kfree the memory pointed to by la_copy and tl_copy */
1822 ocfs2_queue_recovery_completion(osb->journal, slot_num, la_copy,
1823 tl_copy, NULL, ORPHAN_NEED_TRUNCATE);
1824
1825 status = 0;
1826 done:
1827
1828 return status;
1829 }
1830
1831 /* Test node liveness by trylocking his journal. If we get the lock,
1832 * we drop it here. Return 0 if we got the lock, -EAGAIN if node is
1833 * still alive (we couldn't get the lock) and < 0 on error. */
ocfs2_trylock_journal(struct ocfs2_super * osb,int slot_num)1834 static int ocfs2_trylock_journal(struct ocfs2_super *osb,
1835 int slot_num)
1836 {
1837 int status, flags;
1838 struct inode *inode = NULL;
1839
1840 inode = ocfs2_get_system_file_inode(osb, JOURNAL_SYSTEM_INODE,
1841 slot_num);
1842 if (inode == NULL) {
1843 mlog(ML_ERROR, "access error\n");
1844 status = -EACCES;
1845 goto bail;
1846 }
1847 if (is_bad_inode(inode)) {
1848 mlog(ML_ERROR, "access error (bad inode)\n");
1849 iput(inode);
1850 inode = NULL;
1851 status = -EACCES;
1852 goto bail;
1853 }
1854 SET_INODE_JOURNAL(inode);
1855
1856 flags = OCFS2_META_LOCK_RECOVERY | OCFS2_META_LOCK_NOQUEUE;
1857 status = ocfs2_inode_lock_full(inode, NULL, 1, flags);
1858 if (status < 0) {
1859 if (status != -EAGAIN)
1860 mlog_errno(status);
1861 goto bail;
1862 }
1863
1864 ocfs2_inode_unlock(inode, 1);
1865 bail:
1866 iput(inode);
1867
1868 return status;
1869 }
1870
1871 /* Call this underneath ocfs2_super_lock. It also assumes that the
1872 * slot info struct has been updated from disk. */
ocfs2_mark_dead_nodes(struct ocfs2_super * osb)1873 int ocfs2_mark_dead_nodes(struct ocfs2_super *osb)
1874 {
1875 unsigned int node_num;
1876 int status, i;
1877 u32 gen;
1878 struct buffer_head *bh = NULL;
1879 struct ocfs2_dinode *di;
1880
1881 /* This is called with the super block cluster lock, so we
1882 * know that the slot map can't change underneath us. */
1883
1884 for (i = 0; i < osb->max_slots; i++) {
1885 /* Read journal inode to get the recovery generation */
1886 status = ocfs2_read_journal_inode(osb, i, &bh, NULL);
1887 if (status) {
1888 mlog_errno(status);
1889 goto bail;
1890 }
1891 di = (struct ocfs2_dinode *)bh->b_data;
1892 gen = ocfs2_get_recovery_generation(di);
1893 brelse(bh);
1894 bh = NULL;
1895
1896 spin_lock(&osb->osb_lock);
1897 osb->slot_recovery_generations[i] = gen;
1898
1899 trace_ocfs2_mark_dead_nodes(i,
1900 osb->slot_recovery_generations[i]);
1901
1902 if (i == osb->slot_num) {
1903 spin_unlock(&osb->osb_lock);
1904 continue;
1905 }
1906
1907 status = ocfs2_slot_to_node_num_locked(osb, i, &node_num);
1908 if (status == -ENOENT) {
1909 spin_unlock(&osb->osb_lock);
1910 continue;
1911 }
1912
1913 if (__ocfs2_recovery_map_test(osb, node_num)) {
1914 spin_unlock(&osb->osb_lock);
1915 continue;
1916 }
1917 spin_unlock(&osb->osb_lock);
1918
1919 /* Ok, we have a slot occupied by another node which
1920 * is not in the recovery map. We trylock his journal
1921 * file here to test if he's alive. */
1922 status = ocfs2_trylock_journal(osb, i);
1923 if (!status) {
1924 /* Since we're called from mount, we know that
1925 * the recovery thread can't race us on
1926 * setting / checking the recovery bits. */
1927 ocfs2_recovery_thread(osb, node_num);
1928 } else if ((status < 0) && (status != -EAGAIN)) {
1929 mlog_errno(status);
1930 goto bail;
1931 }
1932 }
1933
1934 status = 0;
1935 bail:
1936 return status;
1937 }
1938
1939 /*
1940 * Scan timer should get fired every ORPHAN_SCAN_SCHEDULE_TIMEOUT. Add some
1941 * randomness to the timeout to minimize multple nodes firing the timer at the
1942 * same time.
1943 */
ocfs2_orphan_scan_timeout(void)1944 static inline unsigned long ocfs2_orphan_scan_timeout(void)
1945 {
1946 unsigned long time;
1947
1948 get_random_bytes(&time, sizeof(time));
1949 time = ORPHAN_SCAN_SCHEDULE_TIMEOUT + (time % 5000);
1950 return msecs_to_jiffies(time);
1951 }
1952
1953 /*
1954 * ocfs2_queue_orphan_scan calls ocfs2_queue_recovery_completion for
1955 * every slot, queuing a recovery of the slot on the ocfs2_wq thread. This
1956 * is done to catch any orphans that are left over in orphan directories.
1957 *
1958 * It scans all slots, even ones that are in use. It does so to handle the
1959 * case described below:
1960 *
1961 * Node 1 has an inode it was using. The dentry went away due to memory
1962 * pressure. Node 1 closes the inode, but it's on the free list. The node
1963 * has the open lock.
1964 * Node 2 unlinks the inode. It grabs the dentry lock to notify others,
1965 * but node 1 has no dentry and doesn't get the message. It trylocks the
1966 * open lock, sees that another node has a PR, and does nothing.
1967 * Later node 2 runs its orphan dir. It igets the inode, trylocks the
1968 * open lock, sees the PR still, and does nothing.
1969 * Basically, we have to trigger an orphan iput on node 1. The only way
1970 * for this to happen is if node 1 runs node 2's orphan dir.
1971 *
1972 * ocfs2_queue_orphan_scan gets called every ORPHAN_SCAN_SCHEDULE_TIMEOUT
1973 * seconds. It gets an EX lock on os_lockres and checks sequence number
1974 * stored in LVB. If the sequence number has changed, it means some other
1975 * node has done the scan. This node skips the scan and tracks the
1976 * sequence number. If the sequence number didn't change, it means a scan
1977 * hasn't happened. The node queues a scan and increments the
1978 * sequence number in the LVB.
1979 */
ocfs2_queue_orphan_scan(struct ocfs2_super * osb)1980 static void ocfs2_queue_orphan_scan(struct ocfs2_super *osb)
1981 {
1982 struct ocfs2_orphan_scan *os;
1983 int status, i;
1984 u32 seqno = 0;
1985
1986 os = &osb->osb_orphan_scan;
1987
1988 if (atomic_read(&os->os_state) == ORPHAN_SCAN_INACTIVE)
1989 goto out;
1990
1991 trace_ocfs2_queue_orphan_scan_begin(os->os_count, os->os_seqno,
1992 atomic_read(&os->os_state));
1993
1994 status = ocfs2_orphan_scan_lock(osb, &seqno);
1995 if (status < 0) {
1996 if (status != -EAGAIN)
1997 mlog_errno(status);
1998 goto out;
1999 }
2000
2001 /* Do no queue the tasks if the volume is being umounted */
2002 if (atomic_read(&os->os_state) == ORPHAN_SCAN_INACTIVE)
2003 goto unlock;
2004
2005 if (os->os_seqno != seqno) {
2006 os->os_seqno = seqno;
2007 goto unlock;
2008 }
2009
2010 for (i = 0; i < osb->max_slots; i++)
2011 ocfs2_queue_recovery_completion(osb->journal, i, NULL, NULL,
2012 NULL, ORPHAN_NO_NEED_TRUNCATE);
2013 /*
2014 * We queued a recovery on orphan slots, increment the sequence
2015 * number and update LVB so other node will skip the scan for a while
2016 */
2017 seqno++;
2018 os->os_count++;
2019 os->os_scantime = ktime_get_seconds();
2020 unlock:
2021 ocfs2_orphan_scan_unlock(osb, seqno);
2022 out:
2023 trace_ocfs2_queue_orphan_scan_end(os->os_count, os->os_seqno,
2024 atomic_read(&os->os_state));
2025 return;
2026 }
2027
2028 /* Worker task that gets fired every ORPHAN_SCAN_SCHEDULE_TIMEOUT millsec */
ocfs2_orphan_scan_work(struct work_struct * work)2029 static void ocfs2_orphan_scan_work(struct work_struct *work)
2030 {
2031 struct ocfs2_orphan_scan *os;
2032 struct ocfs2_super *osb;
2033
2034 os = container_of(work, struct ocfs2_orphan_scan,
2035 os_orphan_scan_work.work);
2036 osb = os->os_osb;
2037
2038 mutex_lock(&os->os_lock);
2039 ocfs2_queue_orphan_scan(osb);
2040 if (atomic_read(&os->os_state) == ORPHAN_SCAN_ACTIVE)
2041 queue_delayed_work(osb->ocfs2_wq, &os->os_orphan_scan_work,
2042 ocfs2_orphan_scan_timeout());
2043 mutex_unlock(&os->os_lock);
2044 }
2045
ocfs2_orphan_scan_stop(struct ocfs2_super * osb)2046 void ocfs2_orphan_scan_stop(struct ocfs2_super *osb)
2047 {
2048 struct ocfs2_orphan_scan *os;
2049
2050 os = &osb->osb_orphan_scan;
2051 if (atomic_read(&os->os_state) == ORPHAN_SCAN_ACTIVE) {
2052 atomic_set(&os->os_state, ORPHAN_SCAN_INACTIVE);
2053 mutex_lock(&os->os_lock);
2054 cancel_delayed_work(&os->os_orphan_scan_work);
2055 mutex_unlock(&os->os_lock);
2056 }
2057 }
2058
ocfs2_orphan_scan_init(struct ocfs2_super * osb)2059 void ocfs2_orphan_scan_init(struct ocfs2_super *osb)
2060 {
2061 struct ocfs2_orphan_scan *os;
2062
2063 os = &osb->osb_orphan_scan;
2064 os->os_osb = osb;
2065 os->os_count = 0;
2066 os->os_seqno = 0;
2067 mutex_init(&os->os_lock);
2068 INIT_DELAYED_WORK(&os->os_orphan_scan_work, ocfs2_orphan_scan_work);
2069 }
2070
ocfs2_orphan_scan_start(struct ocfs2_super * osb)2071 void ocfs2_orphan_scan_start(struct ocfs2_super *osb)
2072 {
2073 struct ocfs2_orphan_scan *os;
2074
2075 os = &osb->osb_orphan_scan;
2076 os->os_scantime = ktime_get_seconds();
2077 if (ocfs2_is_hard_readonly(osb) || ocfs2_mount_local(osb))
2078 atomic_set(&os->os_state, ORPHAN_SCAN_INACTIVE);
2079 else {
2080 atomic_set(&os->os_state, ORPHAN_SCAN_ACTIVE);
2081 queue_delayed_work(osb->ocfs2_wq, &os->os_orphan_scan_work,
2082 ocfs2_orphan_scan_timeout());
2083 }
2084 }
2085
2086 struct ocfs2_orphan_filldir_priv {
2087 struct dir_context ctx;
2088 struct inode *head;
2089 struct ocfs2_super *osb;
2090 enum ocfs2_orphan_reco_type orphan_reco_type;
2091 };
2092
ocfs2_orphan_filldir(struct dir_context * ctx,const char * name,int name_len,loff_t pos,u64 ino,unsigned type)2093 static bool ocfs2_orphan_filldir(struct dir_context *ctx, const char *name,
2094 int name_len, loff_t pos, u64 ino,
2095 unsigned type)
2096 {
2097 struct ocfs2_orphan_filldir_priv *p =
2098 container_of(ctx, struct ocfs2_orphan_filldir_priv, ctx);
2099 struct inode *iter;
2100
2101 if (name_len == 1 && !strncmp(".", name, 1))
2102 return true;
2103 if (name_len == 2 && !strncmp("..", name, 2))
2104 return true;
2105
2106 /* do not include dio entry in case of orphan scan */
2107 if ((p->orphan_reco_type == ORPHAN_NO_NEED_TRUNCATE) &&
2108 (!strncmp(name, OCFS2_DIO_ORPHAN_PREFIX,
2109 OCFS2_DIO_ORPHAN_PREFIX_LEN)))
2110 return true;
2111
2112 /* Skip bad inodes so that recovery can continue */
2113 iter = ocfs2_iget(p->osb, ino,
2114 OCFS2_FI_FLAG_ORPHAN_RECOVERY, 0);
2115 if (IS_ERR(iter))
2116 return true;
2117
2118 if (!strncmp(name, OCFS2_DIO_ORPHAN_PREFIX,
2119 OCFS2_DIO_ORPHAN_PREFIX_LEN))
2120 OCFS2_I(iter)->ip_flags |= OCFS2_INODE_DIO_ORPHAN_ENTRY;
2121
2122 /* Skip inodes which are already added to recover list, since dio may
2123 * happen concurrently with unlink/rename */
2124 if (OCFS2_I(iter)->ip_next_orphan) {
2125 iput(iter);
2126 return true;
2127 }
2128
2129 trace_ocfs2_orphan_filldir((unsigned long long)OCFS2_I(iter)->ip_blkno);
2130 /* No locking is required for the next_orphan queue as there
2131 * is only ever a single process doing orphan recovery. */
2132 OCFS2_I(iter)->ip_next_orphan = p->head;
2133 p->head = iter;
2134
2135 return true;
2136 }
2137
ocfs2_queue_orphans(struct ocfs2_super * osb,int slot,struct inode ** head,enum ocfs2_orphan_reco_type orphan_reco_type)2138 static int ocfs2_queue_orphans(struct ocfs2_super *osb,
2139 int slot,
2140 struct inode **head,
2141 enum ocfs2_orphan_reco_type orphan_reco_type)
2142 {
2143 int status;
2144 struct inode *orphan_dir_inode = NULL;
2145 struct ocfs2_orphan_filldir_priv priv = {
2146 .ctx.actor = ocfs2_orphan_filldir,
2147 .osb = osb,
2148 .head = *head,
2149 .orphan_reco_type = orphan_reco_type
2150 };
2151
2152 orphan_dir_inode = ocfs2_get_system_file_inode(osb,
2153 ORPHAN_DIR_SYSTEM_INODE,
2154 slot);
2155 if (!orphan_dir_inode) {
2156 status = -ENOENT;
2157 mlog_errno(status);
2158 return status;
2159 }
2160
2161 inode_lock(orphan_dir_inode);
2162 status = ocfs2_inode_lock(orphan_dir_inode, NULL, 0);
2163 if (status < 0) {
2164 mlog_errno(status);
2165 goto out;
2166 }
2167
2168 status = ocfs2_dir_foreach(orphan_dir_inode, &priv.ctx);
2169 if (status) {
2170 mlog_errno(status);
2171 goto out_cluster;
2172 }
2173
2174 *head = priv.head;
2175
2176 out_cluster:
2177 ocfs2_inode_unlock(orphan_dir_inode, 0);
2178 out:
2179 inode_unlock(orphan_dir_inode);
2180 iput(orphan_dir_inode);
2181 return status;
2182 }
2183
ocfs2_orphan_recovery_can_continue(struct ocfs2_super * osb,int slot)2184 static int ocfs2_orphan_recovery_can_continue(struct ocfs2_super *osb,
2185 int slot)
2186 {
2187 int ret;
2188
2189 spin_lock(&osb->osb_lock);
2190 ret = !osb->osb_orphan_wipes[slot];
2191 spin_unlock(&osb->osb_lock);
2192 return ret;
2193 }
2194
ocfs2_mark_recovering_orphan_dir(struct ocfs2_super * osb,int slot)2195 static void ocfs2_mark_recovering_orphan_dir(struct ocfs2_super *osb,
2196 int slot)
2197 {
2198 spin_lock(&osb->osb_lock);
2199 /* Mark ourselves such that new processes in delete_inode()
2200 * know to quit early. */
2201 ocfs2_node_map_set_bit(osb, &osb->osb_recovering_orphan_dirs, slot);
2202 while (osb->osb_orphan_wipes[slot]) {
2203 /* If any processes are already in the middle of an
2204 * orphan wipe on this dir, then we need to wait for
2205 * them. */
2206 spin_unlock(&osb->osb_lock);
2207 wait_event_interruptible(osb->osb_wipe_event,
2208 ocfs2_orphan_recovery_can_continue(osb, slot));
2209 spin_lock(&osb->osb_lock);
2210 }
2211 spin_unlock(&osb->osb_lock);
2212 }
2213
ocfs2_clear_recovering_orphan_dir(struct ocfs2_super * osb,int slot)2214 static void ocfs2_clear_recovering_orphan_dir(struct ocfs2_super *osb,
2215 int slot)
2216 {
2217 ocfs2_node_map_clear_bit(osb, &osb->osb_recovering_orphan_dirs, slot);
2218 }
2219
2220 /*
2221 * Orphan recovery. Each mounted node has it's own orphan dir which we
2222 * must run during recovery. Our strategy here is to build a list of
2223 * the inodes in the orphan dir and iget/iput them. The VFS does
2224 * (most) of the rest of the work.
2225 *
2226 * Orphan recovery can happen at any time, not just mount so we have a
2227 * couple of extra considerations.
2228 *
2229 * - We grab as many inodes as we can under the orphan dir lock -
2230 * doing iget() outside the orphan dir risks getting a reference on
2231 * an invalid inode.
2232 * - We must be sure not to deadlock with other processes on the
2233 * system wanting to run delete_inode(). This can happen when they go
2234 * to lock the orphan dir and the orphan recovery process attempts to
2235 * iget() inside the orphan dir lock. This can be avoided by
2236 * advertising our state to ocfs2_delete_inode().
2237 */
ocfs2_recover_orphans(struct ocfs2_super * osb,int slot,enum ocfs2_orphan_reco_type orphan_reco_type)2238 static int ocfs2_recover_orphans(struct ocfs2_super *osb,
2239 int slot,
2240 enum ocfs2_orphan_reco_type orphan_reco_type)
2241 {
2242 int ret = 0;
2243 struct inode *inode = NULL;
2244 struct inode *iter;
2245 struct ocfs2_inode_info *oi;
2246 struct buffer_head *di_bh = NULL;
2247 struct ocfs2_dinode *di = NULL;
2248
2249 trace_ocfs2_recover_orphans(slot);
2250
2251 ocfs2_mark_recovering_orphan_dir(osb, slot);
2252 ret = ocfs2_queue_orphans(osb, slot, &inode, orphan_reco_type);
2253 ocfs2_clear_recovering_orphan_dir(osb, slot);
2254
2255 /* Error here should be noted, but we want to continue with as
2256 * many queued inodes as we've got. */
2257 if (ret)
2258 mlog_errno(ret);
2259
2260 while (inode) {
2261 oi = OCFS2_I(inode);
2262 trace_ocfs2_recover_orphans_iput(
2263 (unsigned long long)oi->ip_blkno);
2264
2265 iter = oi->ip_next_orphan;
2266 oi->ip_next_orphan = NULL;
2267
2268 if (oi->ip_flags & OCFS2_INODE_DIO_ORPHAN_ENTRY) {
2269 inode_lock(inode);
2270 ret = ocfs2_rw_lock(inode, 1);
2271 if (ret < 0) {
2272 mlog_errno(ret);
2273 goto unlock_mutex;
2274 }
2275 /*
2276 * We need to take and drop the inode lock to
2277 * force read inode from disk.
2278 */
2279 ret = ocfs2_inode_lock(inode, &di_bh, 1);
2280 if (ret) {
2281 mlog_errno(ret);
2282 goto unlock_rw;
2283 }
2284
2285 di = (struct ocfs2_dinode *)di_bh->b_data;
2286
2287 if (di->i_flags & cpu_to_le32(OCFS2_DIO_ORPHANED_FL)) {
2288 ret = ocfs2_truncate_file(inode, di_bh,
2289 i_size_read(inode));
2290 if (ret < 0) {
2291 if (ret != -ENOSPC)
2292 mlog_errno(ret);
2293 goto unlock_inode;
2294 }
2295
2296 ret = ocfs2_del_inode_from_orphan(osb, inode,
2297 di_bh, 0, 0);
2298 if (ret)
2299 mlog_errno(ret);
2300 }
2301 unlock_inode:
2302 ocfs2_inode_unlock(inode, 1);
2303 brelse(di_bh);
2304 di_bh = NULL;
2305 unlock_rw:
2306 ocfs2_rw_unlock(inode, 1);
2307 unlock_mutex:
2308 inode_unlock(inode);
2309
2310 /* clear dio flag in ocfs2_inode_info */
2311 oi->ip_flags &= ~OCFS2_INODE_DIO_ORPHAN_ENTRY;
2312 } else {
2313 spin_lock(&oi->ip_lock);
2314 /* Set the proper information to get us going into
2315 * ocfs2_delete_inode. */
2316 oi->ip_flags |= OCFS2_INODE_MAYBE_ORPHANED;
2317 spin_unlock(&oi->ip_lock);
2318 }
2319
2320 iput(inode);
2321 inode = iter;
2322 }
2323
2324 return ret;
2325 }
2326
__ocfs2_wait_on_mount(struct ocfs2_super * osb,int quota)2327 static int __ocfs2_wait_on_mount(struct ocfs2_super *osb, int quota)
2328 {
2329 /* This check is good because ocfs2 will wait on our recovery
2330 * thread before changing it to something other than MOUNTED
2331 * or DISABLED. */
2332 wait_event(osb->osb_mount_event,
2333 (!quota && atomic_read(&osb->vol_state) == VOLUME_MOUNTED) ||
2334 atomic_read(&osb->vol_state) == VOLUME_MOUNTED_QUOTAS ||
2335 atomic_read(&osb->vol_state) == VOLUME_DISABLED);
2336
2337 /* If there's an error on mount, then we may never get to the
2338 * MOUNTED flag, but this is set right before
2339 * dismount_volume() so we can trust it. */
2340 if (atomic_read(&osb->vol_state) == VOLUME_DISABLED) {
2341 trace_ocfs2_wait_on_mount(VOLUME_DISABLED);
2342 mlog(0, "mount error, exiting!\n");
2343 return -EBUSY;
2344 }
2345
2346 return 0;
2347 }
2348
ocfs2_commit_thread(void * arg)2349 static int ocfs2_commit_thread(void *arg)
2350 {
2351 int status;
2352 struct ocfs2_super *osb = arg;
2353 struct ocfs2_journal *journal = osb->journal;
2354
2355 /* we can trust j_num_trans here because _should_stop() is only set in
2356 * shutdown and nobody other than ourselves should be able to start
2357 * transactions. committing on shutdown might take a few iterations
2358 * as final transactions put deleted inodes on the list */
2359 while (!(kthread_should_stop() &&
2360 atomic_read(&journal->j_num_trans) == 0)) {
2361
2362 wait_event_interruptible(osb->checkpoint_event,
2363 atomic_read(&journal->j_num_trans)
2364 || kthread_should_stop());
2365
2366 status = ocfs2_commit_cache(osb);
2367 if (status < 0) {
2368 static unsigned long abort_warn_time;
2369
2370 /* Warn about this once per minute */
2371 if (printk_timed_ratelimit(&abort_warn_time, 60*HZ))
2372 mlog(ML_ERROR, "status = %d, journal is "
2373 "already aborted.\n", status);
2374 /*
2375 * After ocfs2_commit_cache() fails, j_num_trans has a
2376 * non-zero value. Sleep here to avoid a busy-wait
2377 * loop.
2378 */
2379 msleep_interruptible(1000);
2380 }
2381
2382 if (kthread_should_stop() && atomic_read(&journal->j_num_trans)){
2383 mlog(ML_KTHREAD,
2384 "commit_thread: %u transactions pending on "
2385 "shutdown\n",
2386 atomic_read(&journal->j_num_trans));
2387 }
2388 }
2389
2390 return 0;
2391 }
2392
2393 /* Reads all the journal inodes without taking any cluster locks. Used
2394 * for hard readonly access to determine whether any journal requires
2395 * recovery. Also used to refresh the recovery generation numbers after
2396 * a journal has been recovered by another node.
2397 */
ocfs2_check_journals_nolocks(struct ocfs2_super * osb)2398 int ocfs2_check_journals_nolocks(struct ocfs2_super *osb)
2399 {
2400 int ret = 0;
2401 unsigned int slot;
2402 struct buffer_head *di_bh = NULL;
2403 struct ocfs2_dinode *di;
2404 int journal_dirty = 0;
2405
2406 for(slot = 0; slot < osb->max_slots; slot++) {
2407 ret = ocfs2_read_journal_inode(osb, slot, &di_bh, NULL);
2408 if (ret) {
2409 mlog_errno(ret);
2410 goto out;
2411 }
2412
2413 di = (struct ocfs2_dinode *) di_bh->b_data;
2414
2415 osb->slot_recovery_generations[slot] =
2416 ocfs2_get_recovery_generation(di);
2417
2418 if (le32_to_cpu(di->id1.journal1.ij_flags) &
2419 OCFS2_JOURNAL_DIRTY_FL)
2420 journal_dirty = 1;
2421
2422 brelse(di_bh);
2423 di_bh = NULL;
2424 }
2425
2426 out:
2427 if (journal_dirty)
2428 ret = -EROFS;
2429 return ret;
2430 }
2431