xref: /openbmc/linux/fs/xfs/xfs_log_recover.c (revision ae213c44)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (c) 2000-2006 Silicon Graphics, Inc.
4  * All Rights Reserved.
5  */
6 #include "xfs.h"
7 #include "xfs_fs.h"
8 #include "xfs_shared.h"
9 #include "xfs_format.h"
10 #include "xfs_log_format.h"
11 #include "xfs_trans_resv.h"
12 #include "xfs_bit.h"
13 #include "xfs_sb.h"
14 #include "xfs_mount.h"
15 #include "xfs_defer.h"
16 #include "xfs_da_format.h"
17 #include "xfs_da_btree.h"
18 #include "xfs_inode.h"
19 #include "xfs_trans.h"
20 #include "xfs_log.h"
21 #include "xfs_log_priv.h"
22 #include "xfs_log_recover.h"
23 #include "xfs_inode_item.h"
24 #include "xfs_extfree_item.h"
25 #include "xfs_trans_priv.h"
26 #include "xfs_alloc.h"
27 #include "xfs_ialloc.h"
28 #include "xfs_quota.h"
29 #include "xfs_cksum.h"
30 #include "xfs_trace.h"
31 #include "xfs_icache.h"
32 #include "xfs_bmap_btree.h"
33 #include "xfs_error.h"
34 #include "xfs_dir2.h"
35 #include "xfs_rmap_item.h"
36 #include "xfs_buf_item.h"
37 #include "xfs_refcount_item.h"
38 #include "xfs_bmap_item.h"
39 
40 #define BLK_AVG(blk1, blk2)	((blk1+blk2) >> 1)
41 
42 STATIC int
43 xlog_find_zeroed(
44 	struct xlog	*,
45 	xfs_daddr_t	*);
46 STATIC int
47 xlog_clear_stale_blocks(
48 	struct xlog	*,
49 	xfs_lsn_t);
50 #if defined(DEBUG)
51 STATIC void
52 xlog_recover_check_summary(
53 	struct xlog *);
54 #else
55 #define	xlog_recover_check_summary(log)
56 #endif
57 STATIC int
58 xlog_do_recovery_pass(
59         struct xlog *, xfs_daddr_t, xfs_daddr_t, int, xfs_daddr_t *);
60 
61 /*
62  * This structure is used during recovery to record the buf log items which
63  * have been canceled and should not be replayed.
64  */
65 struct xfs_buf_cancel {
66 	xfs_daddr_t		bc_blkno;
67 	uint			bc_len;
68 	int			bc_refcount;
69 	struct list_head	bc_list;
70 };
71 
72 /*
73  * Sector aligned buffer routines for buffer create/read/write/access
74  */
75 
76 /*
77  * Verify the log-relative block number and length in basic blocks are valid for
78  * an operation involving the given XFS log buffer. Returns true if the fields
79  * are valid, false otherwise.
80  */
81 static inline bool
82 xlog_verify_bp(
83 	struct xlog	*log,
84 	xfs_daddr_t	blk_no,
85 	int		bbcount)
86 {
87 	if (blk_no < 0 || blk_no >= log->l_logBBsize)
88 		return false;
89 	if (bbcount <= 0 || (blk_no + bbcount) > log->l_logBBsize)
90 		return false;
91 	return true;
92 }
93 
94 /*
95  * Allocate a buffer to hold log data.  The buffer needs to be able
96  * to map to a range of nbblks basic blocks at any valid (basic
97  * block) offset within the log.
98  */
99 STATIC xfs_buf_t *
100 xlog_get_bp(
101 	struct xlog	*log,
102 	int		nbblks)
103 {
104 	struct xfs_buf	*bp;
105 
106 	/*
107 	 * Pass log block 0 since we don't have an addr yet, buffer will be
108 	 * verified on read.
109 	 */
110 	if (!xlog_verify_bp(log, 0, nbblks)) {
111 		xfs_warn(log->l_mp, "Invalid block length (0x%x) for buffer",
112 			nbblks);
113 		XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_HIGH, log->l_mp);
114 		return NULL;
115 	}
116 
117 	/*
118 	 * We do log I/O in units of log sectors (a power-of-2
119 	 * multiple of the basic block size), so we round up the
120 	 * requested size to accommodate the basic blocks required
121 	 * for complete log sectors.
122 	 *
123 	 * In addition, the buffer may be used for a non-sector-
124 	 * aligned block offset, in which case an I/O of the
125 	 * requested size could extend beyond the end of the
126 	 * buffer.  If the requested size is only 1 basic block it
127 	 * will never straddle a sector boundary, so this won't be
128 	 * an issue.  Nor will this be a problem if the log I/O is
129 	 * done in basic blocks (sector size 1).  But otherwise we
130 	 * extend the buffer by one extra log sector to ensure
131 	 * there's space to accommodate this possibility.
132 	 */
133 	if (nbblks > 1 && log->l_sectBBsize > 1)
134 		nbblks += log->l_sectBBsize;
135 	nbblks = round_up(nbblks, log->l_sectBBsize);
136 
137 	bp = xfs_buf_get_uncached(log->l_mp->m_logdev_targp, nbblks, 0);
138 	if (bp)
139 		xfs_buf_unlock(bp);
140 	return bp;
141 }
142 
143 STATIC void
144 xlog_put_bp(
145 	xfs_buf_t	*bp)
146 {
147 	xfs_buf_free(bp);
148 }
149 
150 /*
151  * Return the address of the start of the given block number's data
152  * in a log buffer.  The buffer covers a log sector-aligned region.
153  */
154 STATIC char *
155 xlog_align(
156 	struct xlog	*log,
157 	xfs_daddr_t	blk_no,
158 	int		nbblks,
159 	struct xfs_buf	*bp)
160 {
161 	xfs_daddr_t	offset = blk_no & ((xfs_daddr_t)log->l_sectBBsize - 1);
162 
163 	ASSERT(offset + nbblks <= bp->b_length);
164 	return bp->b_addr + BBTOB(offset);
165 }
166 
167 
168 /*
169  * nbblks should be uint, but oh well.  Just want to catch that 32-bit length.
170  */
171 STATIC int
172 xlog_bread_noalign(
173 	struct xlog	*log,
174 	xfs_daddr_t	blk_no,
175 	int		nbblks,
176 	struct xfs_buf	*bp)
177 {
178 	int		error;
179 
180 	if (!xlog_verify_bp(log, blk_no, nbblks)) {
181 		xfs_warn(log->l_mp,
182 			 "Invalid log block/length (0x%llx, 0x%x) for buffer",
183 			 blk_no, nbblks);
184 		XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_HIGH, log->l_mp);
185 		return -EFSCORRUPTED;
186 	}
187 
188 	blk_no = round_down(blk_no, log->l_sectBBsize);
189 	nbblks = round_up(nbblks, log->l_sectBBsize);
190 
191 	ASSERT(nbblks > 0);
192 	ASSERT(nbblks <= bp->b_length);
193 
194 	XFS_BUF_SET_ADDR(bp, log->l_logBBstart + blk_no);
195 	bp->b_flags |= XBF_READ;
196 	bp->b_io_length = nbblks;
197 	bp->b_error = 0;
198 
199 	error = xfs_buf_submit(bp);
200 	if (error && !XFS_FORCED_SHUTDOWN(log->l_mp))
201 		xfs_buf_ioerror_alert(bp, __func__);
202 	return error;
203 }
204 
205 STATIC int
206 xlog_bread(
207 	struct xlog	*log,
208 	xfs_daddr_t	blk_no,
209 	int		nbblks,
210 	struct xfs_buf	*bp,
211 	char		**offset)
212 {
213 	int		error;
214 
215 	error = xlog_bread_noalign(log, blk_no, nbblks, bp);
216 	if (error)
217 		return error;
218 
219 	*offset = xlog_align(log, blk_no, nbblks, bp);
220 	return 0;
221 }
222 
223 /*
224  * Read at an offset into the buffer. Returns with the buffer in it's original
225  * state regardless of the result of the read.
226  */
227 STATIC int
228 xlog_bread_offset(
229 	struct xlog	*log,
230 	xfs_daddr_t	blk_no,		/* block to read from */
231 	int		nbblks,		/* blocks to read */
232 	struct xfs_buf	*bp,
233 	char		*offset)
234 {
235 	char		*orig_offset = bp->b_addr;
236 	int		orig_len = BBTOB(bp->b_length);
237 	int		error, error2;
238 
239 	error = xfs_buf_associate_memory(bp, offset, BBTOB(nbblks));
240 	if (error)
241 		return error;
242 
243 	error = xlog_bread_noalign(log, blk_no, nbblks, bp);
244 
245 	/* must reset buffer pointer even on error */
246 	error2 = xfs_buf_associate_memory(bp, orig_offset, orig_len);
247 	if (error)
248 		return error;
249 	return error2;
250 }
251 
252 /*
253  * Write out the buffer at the given block for the given number of blocks.
254  * The buffer is kept locked across the write and is returned locked.
255  * This can only be used for synchronous log writes.
256  */
257 STATIC int
258 xlog_bwrite(
259 	struct xlog	*log,
260 	xfs_daddr_t	blk_no,
261 	int		nbblks,
262 	struct xfs_buf	*bp)
263 {
264 	int		error;
265 
266 	if (!xlog_verify_bp(log, blk_no, nbblks)) {
267 		xfs_warn(log->l_mp,
268 			 "Invalid log block/length (0x%llx, 0x%x) for buffer",
269 			 blk_no, nbblks);
270 		XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_HIGH, log->l_mp);
271 		return -EFSCORRUPTED;
272 	}
273 
274 	blk_no = round_down(blk_no, log->l_sectBBsize);
275 	nbblks = round_up(nbblks, log->l_sectBBsize);
276 
277 	ASSERT(nbblks > 0);
278 	ASSERT(nbblks <= bp->b_length);
279 
280 	XFS_BUF_SET_ADDR(bp, log->l_logBBstart + blk_no);
281 	xfs_buf_hold(bp);
282 	xfs_buf_lock(bp);
283 	bp->b_io_length = nbblks;
284 	bp->b_error = 0;
285 
286 	error = xfs_bwrite(bp);
287 	if (error)
288 		xfs_buf_ioerror_alert(bp, __func__);
289 	xfs_buf_relse(bp);
290 	return error;
291 }
292 
293 #ifdef DEBUG
294 /*
295  * dump debug superblock and log record information
296  */
297 STATIC void
298 xlog_header_check_dump(
299 	xfs_mount_t		*mp,
300 	xlog_rec_header_t	*head)
301 {
302 	xfs_debug(mp, "%s:  SB : uuid = %pU, fmt = %d",
303 		__func__, &mp->m_sb.sb_uuid, XLOG_FMT);
304 	xfs_debug(mp, "    log : uuid = %pU, fmt = %d",
305 		&head->h_fs_uuid, be32_to_cpu(head->h_fmt));
306 }
307 #else
308 #define xlog_header_check_dump(mp, head)
309 #endif
310 
311 /*
312  * check log record header for recovery
313  */
314 STATIC int
315 xlog_header_check_recover(
316 	xfs_mount_t		*mp,
317 	xlog_rec_header_t	*head)
318 {
319 	ASSERT(head->h_magicno == cpu_to_be32(XLOG_HEADER_MAGIC_NUM));
320 
321 	/*
322 	 * IRIX doesn't write the h_fmt field and leaves it zeroed
323 	 * (XLOG_FMT_UNKNOWN). This stops us from trying to recover
324 	 * a dirty log created in IRIX.
325 	 */
326 	if (unlikely(head->h_fmt != cpu_to_be32(XLOG_FMT))) {
327 		xfs_warn(mp,
328 	"dirty log written in incompatible format - can't recover");
329 		xlog_header_check_dump(mp, head);
330 		XFS_ERROR_REPORT("xlog_header_check_recover(1)",
331 				 XFS_ERRLEVEL_HIGH, mp);
332 		return -EFSCORRUPTED;
333 	} else if (unlikely(!uuid_equal(&mp->m_sb.sb_uuid, &head->h_fs_uuid))) {
334 		xfs_warn(mp,
335 	"dirty log entry has mismatched uuid - can't recover");
336 		xlog_header_check_dump(mp, head);
337 		XFS_ERROR_REPORT("xlog_header_check_recover(2)",
338 				 XFS_ERRLEVEL_HIGH, mp);
339 		return -EFSCORRUPTED;
340 	}
341 	return 0;
342 }
343 
344 /*
345  * read the head block of the log and check the header
346  */
347 STATIC int
348 xlog_header_check_mount(
349 	xfs_mount_t		*mp,
350 	xlog_rec_header_t	*head)
351 {
352 	ASSERT(head->h_magicno == cpu_to_be32(XLOG_HEADER_MAGIC_NUM));
353 
354 	if (uuid_is_null(&head->h_fs_uuid)) {
355 		/*
356 		 * IRIX doesn't write the h_fs_uuid or h_fmt fields. If
357 		 * h_fs_uuid is null, we assume this log was last mounted
358 		 * by IRIX and continue.
359 		 */
360 		xfs_warn(mp, "null uuid in log - IRIX style log");
361 	} else if (unlikely(!uuid_equal(&mp->m_sb.sb_uuid, &head->h_fs_uuid))) {
362 		xfs_warn(mp, "log has mismatched uuid - can't recover");
363 		xlog_header_check_dump(mp, head);
364 		XFS_ERROR_REPORT("xlog_header_check_mount",
365 				 XFS_ERRLEVEL_HIGH, mp);
366 		return -EFSCORRUPTED;
367 	}
368 	return 0;
369 }
370 
371 STATIC void
372 xlog_recover_iodone(
373 	struct xfs_buf	*bp)
374 {
375 	if (bp->b_error) {
376 		/*
377 		 * We're not going to bother about retrying
378 		 * this during recovery. One strike!
379 		 */
380 		if (!XFS_FORCED_SHUTDOWN(bp->b_target->bt_mount)) {
381 			xfs_buf_ioerror_alert(bp, __func__);
382 			xfs_force_shutdown(bp->b_target->bt_mount,
383 						SHUTDOWN_META_IO_ERROR);
384 		}
385 	}
386 
387 	/*
388 	 * On v5 supers, a bli could be attached to update the metadata LSN.
389 	 * Clean it up.
390 	 */
391 	if (bp->b_log_item)
392 		xfs_buf_item_relse(bp);
393 	ASSERT(bp->b_log_item == NULL);
394 
395 	bp->b_iodone = NULL;
396 	xfs_buf_ioend(bp);
397 }
398 
399 /*
400  * This routine finds (to an approximation) the first block in the physical
401  * log which contains the given cycle.  It uses a binary search algorithm.
402  * Note that the algorithm can not be perfect because the disk will not
403  * necessarily be perfect.
404  */
405 STATIC int
406 xlog_find_cycle_start(
407 	struct xlog	*log,
408 	struct xfs_buf	*bp,
409 	xfs_daddr_t	first_blk,
410 	xfs_daddr_t	*last_blk,
411 	uint		cycle)
412 {
413 	char		*offset;
414 	xfs_daddr_t	mid_blk;
415 	xfs_daddr_t	end_blk;
416 	uint		mid_cycle;
417 	int		error;
418 
419 	end_blk = *last_blk;
420 	mid_blk = BLK_AVG(first_blk, end_blk);
421 	while (mid_blk != first_blk && mid_blk != end_blk) {
422 		error = xlog_bread(log, mid_blk, 1, bp, &offset);
423 		if (error)
424 			return error;
425 		mid_cycle = xlog_get_cycle(offset);
426 		if (mid_cycle == cycle)
427 			end_blk = mid_blk;   /* last_half_cycle == mid_cycle */
428 		else
429 			first_blk = mid_blk; /* first_half_cycle == mid_cycle */
430 		mid_blk = BLK_AVG(first_blk, end_blk);
431 	}
432 	ASSERT((mid_blk == first_blk && mid_blk+1 == end_blk) ||
433 	       (mid_blk == end_blk && mid_blk-1 == first_blk));
434 
435 	*last_blk = end_blk;
436 
437 	return 0;
438 }
439 
440 /*
441  * Check that a range of blocks does not contain stop_on_cycle_no.
442  * Fill in *new_blk with the block offset where such a block is
443  * found, or with -1 (an invalid block number) if there is no such
444  * block in the range.  The scan needs to occur from front to back
445  * and the pointer into the region must be updated since a later
446  * routine will need to perform another test.
447  */
448 STATIC int
449 xlog_find_verify_cycle(
450 	struct xlog	*log,
451 	xfs_daddr_t	start_blk,
452 	int		nbblks,
453 	uint		stop_on_cycle_no,
454 	xfs_daddr_t	*new_blk)
455 {
456 	xfs_daddr_t	i, j;
457 	uint		cycle;
458 	xfs_buf_t	*bp;
459 	xfs_daddr_t	bufblks;
460 	char		*buf = NULL;
461 	int		error = 0;
462 
463 	/*
464 	 * Greedily allocate a buffer big enough to handle the full
465 	 * range of basic blocks we'll be examining.  If that fails,
466 	 * try a smaller size.  We need to be able to read at least
467 	 * a log sector, or we're out of luck.
468 	 */
469 	bufblks = 1 << ffs(nbblks);
470 	while (bufblks > log->l_logBBsize)
471 		bufblks >>= 1;
472 	while (!(bp = xlog_get_bp(log, bufblks))) {
473 		bufblks >>= 1;
474 		if (bufblks < log->l_sectBBsize)
475 			return -ENOMEM;
476 	}
477 
478 	for (i = start_blk; i < start_blk + nbblks; i += bufblks) {
479 		int	bcount;
480 
481 		bcount = min(bufblks, (start_blk + nbblks - i));
482 
483 		error = xlog_bread(log, i, bcount, bp, &buf);
484 		if (error)
485 			goto out;
486 
487 		for (j = 0; j < bcount; j++) {
488 			cycle = xlog_get_cycle(buf);
489 			if (cycle == stop_on_cycle_no) {
490 				*new_blk = i+j;
491 				goto out;
492 			}
493 
494 			buf += BBSIZE;
495 		}
496 	}
497 
498 	*new_blk = -1;
499 
500 out:
501 	xlog_put_bp(bp);
502 	return error;
503 }
504 
505 /*
506  * Potentially backup over partial log record write.
507  *
508  * In the typical case, last_blk is the number of the block directly after
509  * a good log record.  Therefore, we subtract one to get the block number
510  * of the last block in the given buffer.  extra_bblks contains the number
511  * of blocks we would have read on a previous read.  This happens when the
512  * last log record is split over the end of the physical log.
513  *
514  * extra_bblks is the number of blocks potentially verified on a previous
515  * call to this routine.
516  */
517 STATIC int
518 xlog_find_verify_log_record(
519 	struct xlog		*log,
520 	xfs_daddr_t		start_blk,
521 	xfs_daddr_t		*last_blk,
522 	int			extra_bblks)
523 {
524 	xfs_daddr_t		i;
525 	xfs_buf_t		*bp;
526 	char			*offset = NULL;
527 	xlog_rec_header_t	*head = NULL;
528 	int			error = 0;
529 	int			smallmem = 0;
530 	int			num_blks = *last_blk - start_blk;
531 	int			xhdrs;
532 
533 	ASSERT(start_blk != 0 || *last_blk != start_blk);
534 
535 	if (!(bp = xlog_get_bp(log, num_blks))) {
536 		if (!(bp = xlog_get_bp(log, 1)))
537 			return -ENOMEM;
538 		smallmem = 1;
539 	} else {
540 		error = xlog_bread(log, start_blk, num_blks, bp, &offset);
541 		if (error)
542 			goto out;
543 		offset += ((num_blks - 1) << BBSHIFT);
544 	}
545 
546 	for (i = (*last_blk) - 1; i >= 0; i--) {
547 		if (i < start_blk) {
548 			/* valid log record not found */
549 			xfs_warn(log->l_mp,
550 		"Log inconsistent (didn't find previous header)");
551 			ASSERT(0);
552 			error = -EIO;
553 			goto out;
554 		}
555 
556 		if (smallmem) {
557 			error = xlog_bread(log, i, 1, bp, &offset);
558 			if (error)
559 				goto out;
560 		}
561 
562 		head = (xlog_rec_header_t *)offset;
563 
564 		if (head->h_magicno == cpu_to_be32(XLOG_HEADER_MAGIC_NUM))
565 			break;
566 
567 		if (!smallmem)
568 			offset -= BBSIZE;
569 	}
570 
571 	/*
572 	 * We hit the beginning of the physical log & still no header.  Return
573 	 * to caller.  If caller can handle a return of -1, then this routine
574 	 * will be called again for the end of the physical log.
575 	 */
576 	if (i == -1) {
577 		error = 1;
578 		goto out;
579 	}
580 
581 	/*
582 	 * We have the final block of the good log (the first block
583 	 * of the log record _before_ the head. So we check the uuid.
584 	 */
585 	if ((error = xlog_header_check_mount(log->l_mp, head)))
586 		goto out;
587 
588 	/*
589 	 * We may have found a log record header before we expected one.
590 	 * last_blk will be the 1st block # with a given cycle #.  We may end
591 	 * up reading an entire log record.  In this case, we don't want to
592 	 * reset last_blk.  Only when last_blk points in the middle of a log
593 	 * record do we update last_blk.
594 	 */
595 	if (xfs_sb_version_haslogv2(&log->l_mp->m_sb)) {
596 		uint	h_size = be32_to_cpu(head->h_size);
597 
598 		xhdrs = h_size / XLOG_HEADER_CYCLE_SIZE;
599 		if (h_size % XLOG_HEADER_CYCLE_SIZE)
600 			xhdrs++;
601 	} else {
602 		xhdrs = 1;
603 	}
604 
605 	if (*last_blk - i + extra_bblks !=
606 	    BTOBB(be32_to_cpu(head->h_len)) + xhdrs)
607 		*last_blk = i;
608 
609 out:
610 	xlog_put_bp(bp);
611 	return error;
612 }
613 
614 /*
615  * Head is defined to be the point of the log where the next log write
616  * could go.  This means that incomplete LR writes at the end are
617  * eliminated when calculating the head.  We aren't guaranteed that previous
618  * LR have complete transactions.  We only know that a cycle number of
619  * current cycle number -1 won't be present in the log if we start writing
620  * from our current block number.
621  *
622  * last_blk contains the block number of the first block with a given
623  * cycle number.
624  *
625  * Return: zero if normal, non-zero if error.
626  */
627 STATIC int
628 xlog_find_head(
629 	struct xlog	*log,
630 	xfs_daddr_t	*return_head_blk)
631 {
632 	xfs_buf_t	*bp;
633 	char		*offset;
634 	xfs_daddr_t	new_blk, first_blk, start_blk, last_blk, head_blk;
635 	int		num_scan_bblks;
636 	uint		first_half_cycle, last_half_cycle;
637 	uint		stop_on_cycle;
638 	int		error, log_bbnum = log->l_logBBsize;
639 
640 	/* Is the end of the log device zeroed? */
641 	error = xlog_find_zeroed(log, &first_blk);
642 	if (error < 0) {
643 		xfs_warn(log->l_mp, "empty log check failed");
644 		return error;
645 	}
646 	if (error == 1) {
647 		*return_head_blk = first_blk;
648 
649 		/* Is the whole lot zeroed? */
650 		if (!first_blk) {
651 			/* Linux XFS shouldn't generate totally zeroed logs -
652 			 * mkfs etc write a dummy unmount record to a fresh
653 			 * log so we can store the uuid in there
654 			 */
655 			xfs_warn(log->l_mp, "totally zeroed log");
656 		}
657 
658 		return 0;
659 	}
660 
661 	first_blk = 0;			/* get cycle # of 1st block */
662 	bp = xlog_get_bp(log, 1);
663 	if (!bp)
664 		return -ENOMEM;
665 
666 	error = xlog_bread(log, 0, 1, bp, &offset);
667 	if (error)
668 		goto bp_err;
669 
670 	first_half_cycle = xlog_get_cycle(offset);
671 
672 	last_blk = head_blk = log_bbnum - 1;	/* get cycle # of last block */
673 	error = xlog_bread(log, last_blk, 1, bp, &offset);
674 	if (error)
675 		goto bp_err;
676 
677 	last_half_cycle = xlog_get_cycle(offset);
678 	ASSERT(last_half_cycle != 0);
679 
680 	/*
681 	 * If the 1st half cycle number is equal to the last half cycle number,
682 	 * then the entire log is stamped with the same cycle number.  In this
683 	 * case, head_blk can't be set to zero (which makes sense).  The below
684 	 * math doesn't work out properly with head_blk equal to zero.  Instead,
685 	 * we set it to log_bbnum which is an invalid block number, but this
686 	 * value makes the math correct.  If head_blk doesn't changed through
687 	 * all the tests below, *head_blk is set to zero at the very end rather
688 	 * than log_bbnum.  In a sense, log_bbnum and zero are the same block
689 	 * in a circular file.
690 	 */
691 	if (first_half_cycle == last_half_cycle) {
692 		/*
693 		 * In this case we believe that the entire log should have
694 		 * cycle number last_half_cycle.  We need to scan backwards
695 		 * from the end verifying that there are no holes still
696 		 * containing last_half_cycle - 1.  If we find such a hole,
697 		 * then the start of that hole will be the new head.  The
698 		 * simple case looks like
699 		 *        x | x ... | x - 1 | x
700 		 * Another case that fits this picture would be
701 		 *        x | x + 1 | x ... | x
702 		 * In this case the head really is somewhere at the end of the
703 		 * log, as one of the latest writes at the beginning was
704 		 * incomplete.
705 		 * One more case is
706 		 *        x | x + 1 | x ... | x - 1 | x
707 		 * This is really the combination of the above two cases, and
708 		 * the head has to end up at the start of the x-1 hole at the
709 		 * end of the log.
710 		 *
711 		 * In the 256k log case, we will read from the beginning to the
712 		 * end of the log and search for cycle numbers equal to x-1.
713 		 * We don't worry about the x+1 blocks that we encounter,
714 		 * because we know that they cannot be the head since the log
715 		 * started with x.
716 		 */
717 		head_blk = log_bbnum;
718 		stop_on_cycle = last_half_cycle - 1;
719 	} else {
720 		/*
721 		 * In this case we want to find the first block with cycle
722 		 * number matching last_half_cycle.  We expect the log to be
723 		 * some variation on
724 		 *        x + 1 ... | x ... | x
725 		 * The first block with cycle number x (last_half_cycle) will
726 		 * be where the new head belongs.  First we do a binary search
727 		 * for the first occurrence of last_half_cycle.  The binary
728 		 * search may not be totally accurate, so then we scan back
729 		 * from there looking for occurrences of last_half_cycle before
730 		 * us.  If that backwards scan wraps around the beginning of
731 		 * the log, then we look for occurrences of last_half_cycle - 1
732 		 * at the end of the log.  The cases we're looking for look
733 		 * like
734 		 *                               v binary search stopped here
735 		 *        x + 1 ... | x | x + 1 | x ... | x
736 		 *                   ^ but we want to locate this spot
737 		 * or
738 		 *        <---------> less than scan distance
739 		 *        x + 1 ... | x ... | x - 1 | x
740 		 *                           ^ we want to locate this spot
741 		 */
742 		stop_on_cycle = last_half_cycle;
743 		if ((error = xlog_find_cycle_start(log, bp, first_blk,
744 						&head_blk, last_half_cycle)))
745 			goto bp_err;
746 	}
747 
748 	/*
749 	 * Now validate the answer.  Scan back some number of maximum possible
750 	 * blocks and make sure each one has the expected cycle number.  The
751 	 * maximum is determined by the total possible amount of buffering
752 	 * in the in-core log.  The following number can be made tighter if
753 	 * we actually look at the block size of the filesystem.
754 	 */
755 	num_scan_bblks = min_t(int, log_bbnum, XLOG_TOTAL_REC_SHIFT(log));
756 	if (head_blk >= num_scan_bblks) {
757 		/*
758 		 * We are guaranteed that the entire check can be performed
759 		 * in one buffer.
760 		 */
761 		start_blk = head_blk - num_scan_bblks;
762 		if ((error = xlog_find_verify_cycle(log,
763 						start_blk, num_scan_bblks,
764 						stop_on_cycle, &new_blk)))
765 			goto bp_err;
766 		if (new_blk != -1)
767 			head_blk = new_blk;
768 	} else {		/* need to read 2 parts of log */
769 		/*
770 		 * We are going to scan backwards in the log in two parts.
771 		 * First we scan the physical end of the log.  In this part
772 		 * of the log, we are looking for blocks with cycle number
773 		 * last_half_cycle - 1.
774 		 * If we find one, then we know that the log starts there, as
775 		 * we've found a hole that didn't get written in going around
776 		 * the end of the physical log.  The simple case for this is
777 		 *        x + 1 ... | x ... | x - 1 | x
778 		 *        <---------> less than scan distance
779 		 * If all of the blocks at the end of the log have cycle number
780 		 * last_half_cycle, then we check the blocks at the start of
781 		 * the log looking for occurrences of last_half_cycle.  If we
782 		 * find one, then our current estimate for the location of the
783 		 * first occurrence of last_half_cycle is wrong and we move
784 		 * back to the hole we've found.  This case looks like
785 		 *        x + 1 ... | x | x + 1 | x ...
786 		 *                               ^ binary search stopped here
787 		 * Another case we need to handle that only occurs in 256k
788 		 * logs is
789 		 *        x + 1 ... | x ... | x+1 | x ...
790 		 *                   ^ binary search stops here
791 		 * In a 256k log, the scan at the end of the log will see the
792 		 * x + 1 blocks.  We need to skip past those since that is
793 		 * certainly not the head of the log.  By searching for
794 		 * last_half_cycle-1 we accomplish that.
795 		 */
796 		ASSERT(head_blk <= INT_MAX &&
797 			(xfs_daddr_t) num_scan_bblks >= head_blk);
798 		start_blk = log_bbnum - (num_scan_bblks - head_blk);
799 		if ((error = xlog_find_verify_cycle(log, start_blk,
800 					num_scan_bblks - (int)head_blk,
801 					(stop_on_cycle - 1), &new_blk)))
802 			goto bp_err;
803 		if (new_blk != -1) {
804 			head_blk = new_blk;
805 			goto validate_head;
806 		}
807 
808 		/*
809 		 * Scan beginning of log now.  The last part of the physical
810 		 * log is good.  This scan needs to verify that it doesn't find
811 		 * the last_half_cycle.
812 		 */
813 		start_blk = 0;
814 		ASSERT(head_blk <= INT_MAX);
815 		if ((error = xlog_find_verify_cycle(log,
816 					start_blk, (int)head_blk,
817 					stop_on_cycle, &new_blk)))
818 			goto bp_err;
819 		if (new_blk != -1)
820 			head_blk = new_blk;
821 	}
822 
823 validate_head:
824 	/*
825 	 * Now we need to make sure head_blk is not pointing to a block in
826 	 * the middle of a log record.
827 	 */
828 	num_scan_bblks = XLOG_REC_SHIFT(log);
829 	if (head_blk >= num_scan_bblks) {
830 		start_blk = head_blk - num_scan_bblks; /* don't read head_blk */
831 
832 		/* start ptr at last block ptr before head_blk */
833 		error = xlog_find_verify_log_record(log, start_blk, &head_blk, 0);
834 		if (error == 1)
835 			error = -EIO;
836 		if (error)
837 			goto bp_err;
838 	} else {
839 		start_blk = 0;
840 		ASSERT(head_blk <= INT_MAX);
841 		error = xlog_find_verify_log_record(log, start_blk, &head_blk, 0);
842 		if (error < 0)
843 			goto bp_err;
844 		if (error == 1) {
845 			/* We hit the beginning of the log during our search */
846 			start_blk = log_bbnum - (num_scan_bblks - head_blk);
847 			new_blk = log_bbnum;
848 			ASSERT(start_blk <= INT_MAX &&
849 				(xfs_daddr_t) log_bbnum-start_blk >= 0);
850 			ASSERT(head_blk <= INT_MAX);
851 			error = xlog_find_verify_log_record(log, start_blk,
852 							&new_blk, (int)head_blk);
853 			if (error == 1)
854 				error = -EIO;
855 			if (error)
856 				goto bp_err;
857 			if (new_blk != log_bbnum)
858 				head_blk = new_blk;
859 		} else if (error)
860 			goto bp_err;
861 	}
862 
863 	xlog_put_bp(bp);
864 	if (head_blk == log_bbnum)
865 		*return_head_blk = 0;
866 	else
867 		*return_head_blk = head_blk;
868 	/*
869 	 * When returning here, we have a good block number.  Bad block
870 	 * means that during a previous crash, we didn't have a clean break
871 	 * from cycle number N to cycle number N-1.  In this case, we need
872 	 * to find the first block with cycle number N-1.
873 	 */
874 	return 0;
875 
876  bp_err:
877 	xlog_put_bp(bp);
878 
879 	if (error)
880 		xfs_warn(log->l_mp, "failed to find log head");
881 	return error;
882 }
883 
884 /*
885  * Seek backwards in the log for log record headers.
886  *
887  * Given a starting log block, walk backwards until we find the provided number
888  * of records or hit the provided tail block. The return value is the number of
889  * records encountered or a negative error code. The log block and buffer
890  * pointer of the last record seen are returned in rblk and rhead respectively.
891  */
892 STATIC int
893 xlog_rseek_logrec_hdr(
894 	struct xlog		*log,
895 	xfs_daddr_t		head_blk,
896 	xfs_daddr_t		tail_blk,
897 	int			count,
898 	struct xfs_buf		*bp,
899 	xfs_daddr_t		*rblk,
900 	struct xlog_rec_header	**rhead,
901 	bool			*wrapped)
902 {
903 	int			i;
904 	int			error;
905 	int			found = 0;
906 	char			*offset = NULL;
907 	xfs_daddr_t		end_blk;
908 
909 	*wrapped = false;
910 
911 	/*
912 	 * Walk backwards from the head block until we hit the tail or the first
913 	 * block in the log.
914 	 */
915 	end_blk = head_blk > tail_blk ? tail_blk : 0;
916 	for (i = (int) head_blk - 1; i >= end_blk; i--) {
917 		error = xlog_bread(log, i, 1, bp, &offset);
918 		if (error)
919 			goto out_error;
920 
921 		if (*(__be32 *) offset == cpu_to_be32(XLOG_HEADER_MAGIC_NUM)) {
922 			*rblk = i;
923 			*rhead = (struct xlog_rec_header *) offset;
924 			if (++found == count)
925 				break;
926 		}
927 	}
928 
929 	/*
930 	 * If we haven't hit the tail block or the log record header count,
931 	 * start looking again from the end of the physical log. Note that
932 	 * callers can pass head == tail if the tail is not yet known.
933 	 */
934 	if (tail_blk >= head_blk && found != count) {
935 		for (i = log->l_logBBsize - 1; i >= (int) tail_blk; i--) {
936 			error = xlog_bread(log, i, 1, bp, &offset);
937 			if (error)
938 				goto out_error;
939 
940 			if (*(__be32 *)offset ==
941 			    cpu_to_be32(XLOG_HEADER_MAGIC_NUM)) {
942 				*wrapped = true;
943 				*rblk = i;
944 				*rhead = (struct xlog_rec_header *) offset;
945 				if (++found == count)
946 					break;
947 			}
948 		}
949 	}
950 
951 	return found;
952 
953 out_error:
954 	return error;
955 }
956 
957 /*
958  * Seek forward in the log for log record headers.
959  *
960  * Given head and tail blocks, walk forward from the tail block until we find
961  * the provided number of records or hit the head block. The return value is the
962  * number of records encountered or a negative error code. The log block and
963  * buffer pointer of the last record seen are returned in rblk and rhead
964  * respectively.
965  */
966 STATIC int
967 xlog_seek_logrec_hdr(
968 	struct xlog		*log,
969 	xfs_daddr_t		head_blk,
970 	xfs_daddr_t		tail_blk,
971 	int			count,
972 	struct xfs_buf		*bp,
973 	xfs_daddr_t		*rblk,
974 	struct xlog_rec_header	**rhead,
975 	bool			*wrapped)
976 {
977 	int			i;
978 	int			error;
979 	int			found = 0;
980 	char			*offset = NULL;
981 	xfs_daddr_t		end_blk;
982 
983 	*wrapped = false;
984 
985 	/*
986 	 * Walk forward from the tail block until we hit the head or the last
987 	 * block in the log.
988 	 */
989 	end_blk = head_blk > tail_blk ? head_blk : log->l_logBBsize - 1;
990 	for (i = (int) tail_blk; i <= end_blk; i++) {
991 		error = xlog_bread(log, i, 1, bp, &offset);
992 		if (error)
993 			goto out_error;
994 
995 		if (*(__be32 *) offset == cpu_to_be32(XLOG_HEADER_MAGIC_NUM)) {
996 			*rblk = i;
997 			*rhead = (struct xlog_rec_header *) offset;
998 			if (++found == count)
999 				break;
1000 		}
1001 	}
1002 
1003 	/*
1004 	 * If we haven't hit the head block or the log record header count,
1005 	 * start looking again from the start of the physical log.
1006 	 */
1007 	if (tail_blk > head_blk && found != count) {
1008 		for (i = 0; i < (int) head_blk; i++) {
1009 			error = xlog_bread(log, i, 1, bp, &offset);
1010 			if (error)
1011 				goto out_error;
1012 
1013 			if (*(__be32 *)offset ==
1014 			    cpu_to_be32(XLOG_HEADER_MAGIC_NUM)) {
1015 				*wrapped = true;
1016 				*rblk = i;
1017 				*rhead = (struct xlog_rec_header *) offset;
1018 				if (++found == count)
1019 					break;
1020 			}
1021 		}
1022 	}
1023 
1024 	return found;
1025 
1026 out_error:
1027 	return error;
1028 }
1029 
1030 /*
1031  * Calculate distance from head to tail (i.e., unused space in the log).
1032  */
1033 static inline int
1034 xlog_tail_distance(
1035 	struct xlog	*log,
1036 	xfs_daddr_t	head_blk,
1037 	xfs_daddr_t	tail_blk)
1038 {
1039 	if (head_blk < tail_blk)
1040 		return tail_blk - head_blk;
1041 
1042 	return tail_blk + (log->l_logBBsize - head_blk);
1043 }
1044 
1045 /*
1046  * Verify the log tail. This is particularly important when torn or incomplete
1047  * writes have been detected near the front of the log and the head has been
1048  * walked back accordingly.
1049  *
1050  * We also have to handle the case where the tail was pinned and the head
1051  * blocked behind the tail right before a crash. If the tail had been pushed
1052  * immediately prior to the crash and the subsequent checkpoint was only
1053  * partially written, it's possible it overwrote the last referenced tail in the
1054  * log with garbage. This is not a coherency problem because the tail must have
1055  * been pushed before it can be overwritten, but appears as log corruption to
1056  * recovery because we have no way to know the tail was updated if the
1057  * subsequent checkpoint didn't write successfully.
1058  *
1059  * Therefore, CRC check the log from tail to head. If a failure occurs and the
1060  * offending record is within max iclog bufs from the head, walk the tail
1061  * forward and retry until a valid tail is found or corruption is detected out
1062  * of the range of a possible overwrite.
1063  */
1064 STATIC int
1065 xlog_verify_tail(
1066 	struct xlog		*log,
1067 	xfs_daddr_t		head_blk,
1068 	xfs_daddr_t		*tail_blk,
1069 	int			hsize)
1070 {
1071 	struct xlog_rec_header	*thead;
1072 	struct xfs_buf		*bp;
1073 	xfs_daddr_t		first_bad;
1074 	int			error = 0;
1075 	bool			wrapped;
1076 	xfs_daddr_t		tmp_tail;
1077 	xfs_daddr_t		orig_tail = *tail_blk;
1078 
1079 	bp = xlog_get_bp(log, 1);
1080 	if (!bp)
1081 		return -ENOMEM;
1082 
1083 	/*
1084 	 * Make sure the tail points to a record (returns positive count on
1085 	 * success).
1086 	 */
1087 	error = xlog_seek_logrec_hdr(log, head_blk, *tail_blk, 1, bp,
1088 			&tmp_tail, &thead, &wrapped);
1089 	if (error < 0)
1090 		goto out;
1091 	if (*tail_blk != tmp_tail)
1092 		*tail_blk = tmp_tail;
1093 
1094 	/*
1095 	 * Run a CRC check from the tail to the head. We can't just check
1096 	 * MAX_ICLOGS records past the tail because the tail may point to stale
1097 	 * blocks cleared during the search for the head/tail. These blocks are
1098 	 * overwritten with zero-length records and thus record count is not a
1099 	 * reliable indicator of the iclog state before a crash.
1100 	 */
1101 	first_bad = 0;
1102 	error = xlog_do_recovery_pass(log, head_blk, *tail_blk,
1103 				      XLOG_RECOVER_CRCPASS, &first_bad);
1104 	while ((error == -EFSBADCRC || error == -EFSCORRUPTED) && first_bad) {
1105 		int	tail_distance;
1106 
1107 		/*
1108 		 * Is corruption within range of the head? If so, retry from
1109 		 * the next record. Otherwise return an error.
1110 		 */
1111 		tail_distance = xlog_tail_distance(log, head_blk, first_bad);
1112 		if (tail_distance > BTOBB(XLOG_MAX_ICLOGS * hsize))
1113 			break;
1114 
1115 		/* skip to the next record; returns positive count on success */
1116 		error = xlog_seek_logrec_hdr(log, head_blk, first_bad, 2, bp,
1117 				&tmp_tail, &thead, &wrapped);
1118 		if (error < 0)
1119 			goto out;
1120 
1121 		*tail_blk = tmp_tail;
1122 		first_bad = 0;
1123 		error = xlog_do_recovery_pass(log, head_blk, *tail_blk,
1124 					      XLOG_RECOVER_CRCPASS, &first_bad);
1125 	}
1126 
1127 	if (!error && *tail_blk != orig_tail)
1128 		xfs_warn(log->l_mp,
1129 		"Tail block (0x%llx) overwrite detected. Updated to 0x%llx",
1130 			 orig_tail, *tail_blk);
1131 out:
1132 	xlog_put_bp(bp);
1133 	return error;
1134 }
1135 
1136 /*
1137  * Detect and trim torn writes from the head of the log.
1138  *
1139  * Storage without sector atomicity guarantees can result in torn writes in the
1140  * log in the event of a crash. Our only means to detect this scenario is via
1141  * CRC verification. While we can't always be certain that CRC verification
1142  * failure is due to a torn write vs. an unrelated corruption, we do know that
1143  * only a certain number (XLOG_MAX_ICLOGS) of log records can be written out at
1144  * one time. Therefore, CRC verify up to XLOG_MAX_ICLOGS records at the head of
1145  * the log and treat failures in this range as torn writes as a matter of
1146  * policy. In the event of CRC failure, the head is walked back to the last good
1147  * record in the log and the tail is updated from that record and verified.
1148  */
1149 STATIC int
1150 xlog_verify_head(
1151 	struct xlog		*log,
1152 	xfs_daddr_t		*head_blk,	/* in/out: unverified head */
1153 	xfs_daddr_t		*tail_blk,	/* out: tail block */
1154 	struct xfs_buf		*bp,
1155 	xfs_daddr_t		*rhead_blk,	/* start blk of last record */
1156 	struct xlog_rec_header	**rhead,	/* ptr to last record */
1157 	bool			*wrapped)	/* last rec. wraps phys. log */
1158 {
1159 	struct xlog_rec_header	*tmp_rhead;
1160 	struct xfs_buf		*tmp_bp;
1161 	xfs_daddr_t		first_bad;
1162 	xfs_daddr_t		tmp_rhead_blk;
1163 	int			found;
1164 	int			error;
1165 	bool			tmp_wrapped;
1166 
1167 	/*
1168 	 * Check the head of the log for torn writes. Search backwards from the
1169 	 * head until we hit the tail or the maximum number of log record I/Os
1170 	 * that could have been in flight at one time. Use a temporary buffer so
1171 	 * we don't trash the rhead/bp pointers from the caller.
1172 	 */
1173 	tmp_bp = xlog_get_bp(log, 1);
1174 	if (!tmp_bp)
1175 		return -ENOMEM;
1176 	error = xlog_rseek_logrec_hdr(log, *head_blk, *tail_blk,
1177 				      XLOG_MAX_ICLOGS, tmp_bp, &tmp_rhead_blk,
1178 				      &tmp_rhead, &tmp_wrapped);
1179 	xlog_put_bp(tmp_bp);
1180 	if (error < 0)
1181 		return error;
1182 
1183 	/*
1184 	 * Now run a CRC verification pass over the records starting at the
1185 	 * block found above to the current head. If a CRC failure occurs, the
1186 	 * log block of the first bad record is saved in first_bad.
1187 	 */
1188 	error = xlog_do_recovery_pass(log, *head_blk, tmp_rhead_blk,
1189 				      XLOG_RECOVER_CRCPASS, &first_bad);
1190 	if ((error == -EFSBADCRC || error == -EFSCORRUPTED) && first_bad) {
1191 		/*
1192 		 * We've hit a potential torn write. Reset the error and warn
1193 		 * about it.
1194 		 */
1195 		error = 0;
1196 		xfs_warn(log->l_mp,
1197 "Torn write (CRC failure) detected at log block 0x%llx. Truncating head block from 0x%llx.",
1198 			 first_bad, *head_blk);
1199 
1200 		/*
1201 		 * Get the header block and buffer pointer for the last good
1202 		 * record before the bad record.
1203 		 *
1204 		 * Note that xlog_find_tail() clears the blocks at the new head
1205 		 * (i.e., the records with invalid CRC) if the cycle number
1206 		 * matches the the current cycle.
1207 		 */
1208 		found = xlog_rseek_logrec_hdr(log, first_bad, *tail_blk, 1, bp,
1209 					      rhead_blk, rhead, wrapped);
1210 		if (found < 0)
1211 			return found;
1212 		if (found == 0)		/* XXX: right thing to do here? */
1213 			return -EIO;
1214 
1215 		/*
1216 		 * Reset the head block to the starting block of the first bad
1217 		 * log record and set the tail block based on the last good
1218 		 * record.
1219 		 *
1220 		 * Bail out if the updated head/tail match as this indicates
1221 		 * possible corruption outside of the acceptable
1222 		 * (XLOG_MAX_ICLOGS) range. This is a job for xfs_repair...
1223 		 */
1224 		*head_blk = first_bad;
1225 		*tail_blk = BLOCK_LSN(be64_to_cpu((*rhead)->h_tail_lsn));
1226 		if (*head_blk == *tail_blk) {
1227 			ASSERT(0);
1228 			return 0;
1229 		}
1230 	}
1231 	if (error)
1232 		return error;
1233 
1234 	return xlog_verify_tail(log, *head_blk, tail_blk,
1235 				be32_to_cpu((*rhead)->h_size));
1236 }
1237 
1238 /*
1239  * We need to make sure we handle log wrapping properly, so we can't use the
1240  * calculated logbno directly. Make sure it wraps to the correct bno inside the
1241  * log.
1242  *
1243  * The log is limited to 32 bit sizes, so we use the appropriate modulus
1244  * operation here and cast it back to a 64 bit daddr on return.
1245  */
1246 static inline xfs_daddr_t
1247 xlog_wrap_logbno(
1248 	struct xlog		*log,
1249 	xfs_daddr_t		bno)
1250 {
1251 	int			mod;
1252 
1253 	div_s64_rem(bno, log->l_logBBsize, &mod);
1254 	return mod;
1255 }
1256 
1257 /*
1258  * Check whether the head of the log points to an unmount record. In other
1259  * words, determine whether the log is clean. If so, update the in-core state
1260  * appropriately.
1261  */
1262 static int
1263 xlog_check_unmount_rec(
1264 	struct xlog		*log,
1265 	xfs_daddr_t		*head_blk,
1266 	xfs_daddr_t		*tail_blk,
1267 	struct xlog_rec_header	*rhead,
1268 	xfs_daddr_t		rhead_blk,
1269 	struct xfs_buf		*bp,
1270 	bool			*clean)
1271 {
1272 	struct xlog_op_header	*op_head;
1273 	xfs_daddr_t		umount_data_blk;
1274 	xfs_daddr_t		after_umount_blk;
1275 	int			hblks;
1276 	int			error;
1277 	char			*offset;
1278 
1279 	*clean = false;
1280 
1281 	/*
1282 	 * Look for unmount record. If we find it, then we know there was a
1283 	 * clean unmount. Since 'i' could be the last block in the physical
1284 	 * log, we convert to a log block before comparing to the head_blk.
1285 	 *
1286 	 * Save the current tail lsn to use to pass to xlog_clear_stale_blocks()
1287 	 * below. We won't want to clear the unmount record if there is one, so
1288 	 * we pass the lsn of the unmount record rather than the block after it.
1289 	 */
1290 	if (xfs_sb_version_haslogv2(&log->l_mp->m_sb)) {
1291 		int	h_size = be32_to_cpu(rhead->h_size);
1292 		int	h_version = be32_to_cpu(rhead->h_version);
1293 
1294 		if ((h_version & XLOG_VERSION_2) &&
1295 		    (h_size > XLOG_HEADER_CYCLE_SIZE)) {
1296 			hblks = h_size / XLOG_HEADER_CYCLE_SIZE;
1297 			if (h_size % XLOG_HEADER_CYCLE_SIZE)
1298 				hblks++;
1299 		} else {
1300 			hblks = 1;
1301 		}
1302 	} else {
1303 		hblks = 1;
1304 	}
1305 
1306 	after_umount_blk = xlog_wrap_logbno(log,
1307 			rhead_blk + hblks + BTOBB(be32_to_cpu(rhead->h_len)));
1308 
1309 	if (*head_blk == after_umount_blk &&
1310 	    be32_to_cpu(rhead->h_num_logops) == 1) {
1311 		umount_data_blk = xlog_wrap_logbno(log, rhead_blk + hblks);
1312 		error = xlog_bread(log, umount_data_blk, 1, bp, &offset);
1313 		if (error)
1314 			return error;
1315 
1316 		op_head = (struct xlog_op_header *)offset;
1317 		if (op_head->oh_flags & XLOG_UNMOUNT_TRANS) {
1318 			/*
1319 			 * Set tail and last sync so that newly written log
1320 			 * records will point recovery to after the current
1321 			 * unmount record.
1322 			 */
1323 			xlog_assign_atomic_lsn(&log->l_tail_lsn,
1324 					log->l_curr_cycle, after_umount_blk);
1325 			xlog_assign_atomic_lsn(&log->l_last_sync_lsn,
1326 					log->l_curr_cycle, after_umount_blk);
1327 			*tail_blk = after_umount_blk;
1328 
1329 			*clean = true;
1330 		}
1331 	}
1332 
1333 	return 0;
1334 }
1335 
1336 static void
1337 xlog_set_state(
1338 	struct xlog		*log,
1339 	xfs_daddr_t		head_blk,
1340 	struct xlog_rec_header	*rhead,
1341 	xfs_daddr_t		rhead_blk,
1342 	bool			bump_cycle)
1343 {
1344 	/*
1345 	 * Reset log values according to the state of the log when we
1346 	 * crashed.  In the case where head_blk == 0, we bump curr_cycle
1347 	 * one because the next write starts a new cycle rather than
1348 	 * continuing the cycle of the last good log record.  At this
1349 	 * point we have guaranteed that all partial log records have been
1350 	 * accounted for.  Therefore, we know that the last good log record
1351 	 * written was complete and ended exactly on the end boundary
1352 	 * of the physical log.
1353 	 */
1354 	log->l_prev_block = rhead_blk;
1355 	log->l_curr_block = (int)head_blk;
1356 	log->l_curr_cycle = be32_to_cpu(rhead->h_cycle);
1357 	if (bump_cycle)
1358 		log->l_curr_cycle++;
1359 	atomic64_set(&log->l_tail_lsn, be64_to_cpu(rhead->h_tail_lsn));
1360 	atomic64_set(&log->l_last_sync_lsn, be64_to_cpu(rhead->h_lsn));
1361 	xlog_assign_grant_head(&log->l_reserve_head.grant, log->l_curr_cycle,
1362 					BBTOB(log->l_curr_block));
1363 	xlog_assign_grant_head(&log->l_write_head.grant, log->l_curr_cycle,
1364 					BBTOB(log->l_curr_block));
1365 }
1366 
1367 /*
1368  * Find the sync block number or the tail of the log.
1369  *
1370  * This will be the block number of the last record to have its
1371  * associated buffers synced to disk.  Every log record header has
1372  * a sync lsn embedded in it.  LSNs hold block numbers, so it is easy
1373  * to get a sync block number.  The only concern is to figure out which
1374  * log record header to believe.
1375  *
1376  * The following algorithm uses the log record header with the largest
1377  * lsn.  The entire log record does not need to be valid.  We only care
1378  * that the header is valid.
1379  *
1380  * We could speed up search by using current head_blk buffer, but it is not
1381  * available.
1382  */
1383 STATIC int
1384 xlog_find_tail(
1385 	struct xlog		*log,
1386 	xfs_daddr_t		*head_blk,
1387 	xfs_daddr_t		*tail_blk)
1388 {
1389 	xlog_rec_header_t	*rhead;
1390 	char			*offset = NULL;
1391 	xfs_buf_t		*bp;
1392 	int			error;
1393 	xfs_daddr_t		rhead_blk;
1394 	xfs_lsn_t		tail_lsn;
1395 	bool			wrapped = false;
1396 	bool			clean = false;
1397 
1398 	/*
1399 	 * Find previous log record
1400 	 */
1401 	if ((error = xlog_find_head(log, head_blk)))
1402 		return error;
1403 	ASSERT(*head_blk < INT_MAX);
1404 
1405 	bp = xlog_get_bp(log, 1);
1406 	if (!bp)
1407 		return -ENOMEM;
1408 	if (*head_blk == 0) {				/* special case */
1409 		error = xlog_bread(log, 0, 1, bp, &offset);
1410 		if (error)
1411 			goto done;
1412 
1413 		if (xlog_get_cycle(offset) == 0) {
1414 			*tail_blk = 0;
1415 			/* leave all other log inited values alone */
1416 			goto done;
1417 		}
1418 	}
1419 
1420 	/*
1421 	 * Search backwards through the log looking for the log record header
1422 	 * block. This wraps all the way back around to the head so something is
1423 	 * seriously wrong if we can't find it.
1424 	 */
1425 	error = xlog_rseek_logrec_hdr(log, *head_blk, *head_blk, 1, bp,
1426 				      &rhead_blk, &rhead, &wrapped);
1427 	if (error < 0)
1428 		return error;
1429 	if (!error) {
1430 		xfs_warn(log->l_mp, "%s: couldn't find sync record", __func__);
1431 		return -EIO;
1432 	}
1433 	*tail_blk = BLOCK_LSN(be64_to_cpu(rhead->h_tail_lsn));
1434 
1435 	/*
1436 	 * Set the log state based on the current head record.
1437 	 */
1438 	xlog_set_state(log, *head_blk, rhead, rhead_blk, wrapped);
1439 	tail_lsn = atomic64_read(&log->l_tail_lsn);
1440 
1441 	/*
1442 	 * Look for an unmount record at the head of the log. This sets the log
1443 	 * state to determine whether recovery is necessary.
1444 	 */
1445 	error = xlog_check_unmount_rec(log, head_blk, tail_blk, rhead,
1446 				       rhead_blk, bp, &clean);
1447 	if (error)
1448 		goto done;
1449 
1450 	/*
1451 	 * Verify the log head if the log is not clean (e.g., we have anything
1452 	 * but an unmount record at the head). This uses CRC verification to
1453 	 * detect and trim torn writes. If discovered, CRC failures are
1454 	 * considered torn writes and the log head is trimmed accordingly.
1455 	 *
1456 	 * Note that we can only run CRC verification when the log is dirty
1457 	 * because there's no guarantee that the log data behind an unmount
1458 	 * record is compatible with the current architecture.
1459 	 */
1460 	if (!clean) {
1461 		xfs_daddr_t	orig_head = *head_blk;
1462 
1463 		error = xlog_verify_head(log, head_blk, tail_blk, bp,
1464 					 &rhead_blk, &rhead, &wrapped);
1465 		if (error)
1466 			goto done;
1467 
1468 		/* update in-core state again if the head changed */
1469 		if (*head_blk != orig_head) {
1470 			xlog_set_state(log, *head_blk, rhead, rhead_blk,
1471 				       wrapped);
1472 			tail_lsn = atomic64_read(&log->l_tail_lsn);
1473 			error = xlog_check_unmount_rec(log, head_blk, tail_blk,
1474 						       rhead, rhead_blk, bp,
1475 						       &clean);
1476 			if (error)
1477 				goto done;
1478 		}
1479 	}
1480 
1481 	/*
1482 	 * Note that the unmount was clean. If the unmount was not clean, we
1483 	 * need to know this to rebuild the superblock counters from the perag
1484 	 * headers if we have a filesystem using non-persistent counters.
1485 	 */
1486 	if (clean)
1487 		log->l_mp->m_flags |= XFS_MOUNT_WAS_CLEAN;
1488 
1489 	/*
1490 	 * Make sure that there are no blocks in front of the head
1491 	 * with the same cycle number as the head.  This can happen
1492 	 * because we allow multiple outstanding log writes concurrently,
1493 	 * and the later writes might make it out before earlier ones.
1494 	 *
1495 	 * We use the lsn from before modifying it so that we'll never
1496 	 * overwrite the unmount record after a clean unmount.
1497 	 *
1498 	 * Do this only if we are going to recover the filesystem
1499 	 *
1500 	 * NOTE: This used to say "if (!readonly)"
1501 	 * However on Linux, we can & do recover a read-only filesystem.
1502 	 * We only skip recovery if NORECOVERY is specified on mount,
1503 	 * in which case we would not be here.
1504 	 *
1505 	 * But... if the -device- itself is readonly, just skip this.
1506 	 * We can't recover this device anyway, so it won't matter.
1507 	 */
1508 	if (!xfs_readonly_buftarg(log->l_mp->m_logdev_targp))
1509 		error = xlog_clear_stale_blocks(log, tail_lsn);
1510 
1511 done:
1512 	xlog_put_bp(bp);
1513 
1514 	if (error)
1515 		xfs_warn(log->l_mp, "failed to locate log tail");
1516 	return error;
1517 }
1518 
1519 /*
1520  * Is the log zeroed at all?
1521  *
1522  * The last binary search should be changed to perform an X block read
1523  * once X becomes small enough.  You can then search linearly through
1524  * the X blocks.  This will cut down on the number of reads we need to do.
1525  *
1526  * If the log is partially zeroed, this routine will pass back the blkno
1527  * of the first block with cycle number 0.  It won't have a complete LR
1528  * preceding it.
1529  *
1530  * Return:
1531  *	0  => the log is completely written to
1532  *	1 => use *blk_no as the first block of the log
1533  *	<0 => error has occurred
1534  */
1535 STATIC int
1536 xlog_find_zeroed(
1537 	struct xlog	*log,
1538 	xfs_daddr_t	*blk_no)
1539 {
1540 	xfs_buf_t	*bp;
1541 	char		*offset;
1542 	uint	        first_cycle, last_cycle;
1543 	xfs_daddr_t	new_blk, last_blk, start_blk;
1544 	xfs_daddr_t     num_scan_bblks;
1545 	int	        error, log_bbnum = log->l_logBBsize;
1546 
1547 	*blk_no = 0;
1548 
1549 	/* check totally zeroed log */
1550 	bp = xlog_get_bp(log, 1);
1551 	if (!bp)
1552 		return -ENOMEM;
1553 	error = xlog_bread(log, 0, 1, bp, &offset);
1554 	if (error)
1555 		goto bp_err;
1556 
1557 	first_cycle = xlog_get_cycle(offset);
1558 	if (first_cycle == 0) {		/* completely zeroed log */
1559 		*blk_no = 0;
1560 		xlog_put_bp(bp);
1561 		return 1;
1562 	}
1563 
1564 	/* check partially zeroed log */
1565 	error = xlog_bread(log, log_bbnum-1, 1, bp, &offset);
1566 	if (error)
1567 		goto bp_err;
1568 
1569 	last_cycle = xlog_get_cycle(offset);
1570 	if (last_cycle != 0) {		/* log completely written to */
1571 		xlog_put_bp(bp);
1572 		return 0;
1573 	}
1574 
1575 	/* we have a partially zeroed log */
1576 	last_blk = log_bbnum-1;
1577 	if ((error = xlog_find_cycle_start(log, bp, 0, &last_blk, 0)))
1578 		goto bp_err;
1579 
1580 	/*
1581 	 * Validate the answer.  Because there is no way to guarantee that
1582 	 * the entire log is made up of log records which are the same size,
1583 	 * we scan over the defined maximum blocks.  At this point, the maximum
1584 	 * is not chosen to mean anything special.   XXXmiken
1585 	 */
1586 	num_scan_bblks = XLOG_TOTAL_REC_SHIFT(log);
1587 	ASSERT(num_scan_bblks <= INT_MAX);
1588 
1589 	if (last_blk < num_scan_bblks)
1590 		num_scan_bblks = last_blk;
1591 	start_blk = last_blk - num_scan_bblks;
1592 
1593 	/*
1594 	 * We search for any instances of cycle number 0 that occur before
1595 	 * our current estimate of the head.  What we're trying to detect is
1596 	 *        1 ... | 0 | 1 | 0...
1597 	 *                       ^ binary search ends here
1598 	 */
1599 	if ((error = xlog_find_verify_cycle(log, start_blk,
1600 					 (int)num_scan_bblks, 0, &new_blk)))
1601 		goto bp_err;
1602 	if (new_blk != -1)
1603 		last_blk = new_blk;
1604 
1605 	/*
1606 	 * Potentially backup over partial log record write.  We don't need
1607 	 * to search the end of the log because we know it is zero.
1608 	 */
1609 	error = xlog_find_verify_log_record(log, start_blk, &last_blk, 0);
1610 	if (error == 1)
1611 		error = -EIO;
1612 	if (error)
1613 		goto bp_err;
1614 
1615 	*blk_no = last_blk;
1616 bp_err:
1617 	xlog_put_bp(bp);
1618 	if (error)
1619 		return error;
1620 	return 1;
1621 }
1622 
1623 /*
1624  * These are simple subroutines used by xlog_clear_stale_blocks() below
1625  * to initialize a buffer full of empty log record headers and write
1626  * them into the log.
1627  */
1628 STATIC void
1629 xlog_add_record(
1630 	struct xlog		*log,
1631 	char			*buf,
1632 	int			cycle,
1633 	int			block,
1634 	int			tail_cycle,
1635 	int			tail_block)
1636 {
1637 	xlog_rec_header_t	*recp = (xlog_rec_header_t *)buf;
1638 
1639 	memset(buf, 0, BBSIZE);
1640 	recp->h_magicno = cpu_to_be32(XLOG_HEADER_MAGIC_NUM);
1641 	recp->h_cycle = cpu_to_be32(cycle);
1642 	recp->h_version = cpu_to_be32(
1643 			xfs_sb_version_haslogv2(&log->l_mp->m_sb) ? 2 : 1);
1644 	recp->h_lsn = cpu_to_be64(xlog_assign_lsn(cycle, block));
1645 	recp->h_tail_lsn = cpu_to_be64(xlog_assign_lsn(tail_cycle, tail_block));
1646 	recp->h_fmt = cpu_to_be32(XLOG_FMT);
1647 	memcpy(&recp->h_fs_uuid, &log->l_mp->m_sb.sb_uuid, sizeof(uuid_t));
1648 }
1649 
1650 STATIC int
1651 xlog_write_log_records(
1652 	struct xlog	*log,
1653 	int		cycle,
1654 	int		start_block,
1655 	int		blocks,
1656 	int		tail_cycle,
1657 	int		tail_block)
1658 {
1659 	char		*offset;
1660 	xfs_buf_t	*bp;
1661 	int		balign, ealign;
1662 	int		sectbb = log->l_sectBBsize;
1663 	int		end_block = start_block + blocks;
1664 	int		bufblks;
1665 	int		error = 0;
1666 	int		i, j = 0;
1667 
1668 	/*
1669 	 * Greedily allocate a buffer big enough to handle the full
1670 	 * range of basic blocks to be written.  If that fails, try
1671 	 * a smaller size.  We need to be able to write at least a
1672 	 * log sector, or we're out of luck.
1673 	 */
1674 	bufblks = 1 << ffs(blocks);
1675 	while (bufblks > log->l_logBBsize)
1676 		bufblks >>= 1;
1677 	while (!(bp = xlog_get_bp(log, bufblks))) {
1678 		bufblks >>= 1;
1679 		if (bufblks < sectbb)
1680 			return -ENOMEM;
1681 	}
1682 
1683 	/* We may need to do a read at the start to fill in part of
1684 	 * the buffer in the starting sector not covered by the first
1685 	 * write below.
1686 	 */
1687 	balign = round_down(start_block, sectbb);
1688 	if (balign != start_block) {
1689 		error = xlog_bread_noalign(log, start_block, 1, bp);
1690 		if (error)
1691 			goto out_put_bp;
1692 
1693 		j = start_block - balign;
1694 	}
1695 
1696 	for (i = start_block; i < end_block; i += bufblks) {
1697 		int		bcount, endcount;
1698 
1699 		bcount = min(bufblks, end_block - start_block);
1700 		endcount = bcount - j;
1701 
1702 		/* We may need to do a read at the end to fill in part of
1703 		 * the buffer in the final sector not covered by the write.
1704 		 * If this is the same sector as the above read, skip it.
1705 		 */
1706 		ealign = round_down(end_block, sectbb);
1707 		if (j == 0 && (start_block + endcount > ealign)) {
1708 			offset = bp->b_addr + BBTOB(ealign - start_block);
1709 			error = xlog_bread_offset(log, ealign, sectbb,
1710 							bp, offset);
1711 			if (error)
1712 				break;
1713 
1714 		}
1715 
1716 		offset = xlog_align(log, start_block, endcount, bp);
1717 		for (; j < endcount; j++) {
1718 			xlog_add_record(log, offset, cycle, i+j,
1719 					tail_cycle, tail_block);
1720 			offset += BBSIZE;
1721 		}
1722 		error = xlog_bwrite(log, start_block, endcount, bp);
1723 		if (error)
1724 			break;
1725 		start_block += endcount;
1726 		j = 0;
1727 	}
1728 
1729  out_put_bp:
1730 	xlog_put_bp(bp);
1731 	return error;
1732 }
1733 
1734 /*
1735  * This routine is called to blow away any incomplete log writes out
1736  * in front of the log head.  We do this so that we won't become confused
1737  * if we come up, write only a little bit more, and then crash again.
1738  * If we leave the partial log records out there, this situation could
1739  * cause us to think those partial writes are valid blocks since they
1740  * have the current cycle number.  We get rid of them by overwriting them
1741  * with empty log records with the old cycle number rather than the
1742  * current one.
1743  *
1744  * The tail lsn is passed in rather than taken from
1745  * the log so that we will not write over the unmount record after a
1746  * clean unmount in a 512 block log.  Doing so would leave the log without
1747  * any valid log records in it until a new one was written.  If we crashed
1748  * during that time we would not be able to recover.
1749  */
1750 STATIC int
1751 xlog_clear_stale_blocks(
1752 	struct xlog	*log,
1753 	xfs_lsn_t	tail_lsn)
1754 {
1755 	int		tail_cycle, head_cycle;
1756 	int		tail_block, head_block;
1757 	int		tail_distance, max_distance;
1758 	int		distance;
1759 	int		error;
1760 
1761 	tail_cycle = CYCLE_LSN(tail_lsn);
1762 	tail_block = BLOCK_LSN(tail_lsn);
1763 	head_cycle = log->l_curr_cycle;
1764 	head_block = log->l_curr_block;
1765 
1766 	/*
1767 	 * Figure out the distance between the new head of the log
1768 	 * and the tail.  We want to write over any blocks beyond the
1769 	 * head that we may have written just before the crash, but
1770 	 * we don't want to overwrite the tail of the log.
1771 	 */
1772 	if (head_cycle == tail_cycle) {
1773 		/*
1774 		 * The tail is behind the head in the physical log,
1775 		 * so the distance from the head to the tail is the
1776 		 * distance from the head to the end of the log plus
1777 		 * the distance from the beginning of the log to the
1778 		 * tail.
1779 		 */
1780 		if (unlikely(head_block < tail_block || head_block >= log->l_logBBsize)) {
1781 			XFS_ERROR_REPORT("xlog_clear_stale_blocks(1)",
1782 					 XFS_ERRLEVEL_LOW, log->l_mp);
1783 			return -EFSCORRUPTED;
1784 		}
1785 		tail_distance = tail_block + (log->l_logBBsize - head_block);
1786 	} else {
1787 		/*
1788 		 * The head is behind the tail in the physical log,
1789 		 * so the distance from the head to the tail is just
1790 		 * the tail block minus the head block.
1791 		 */
1792 		if (unlikely(head_block >= tail_block || head_cycle != (tail_cycle + 1))){
1793 			XFS_ERROR_REPORT("xlog_clear_stale_blocks(2)",
1794 					 XFS_ERRLEVEL_LOW, log->l_mp);
1795 			return -EFSCORRUPTED;
1796 		}
1797 		tail_distance = tail_block - head_block;
1798 	}
1799 
1800 	/*
1801 	 * If the head is right up against the tail, we can't clear
1802 	 * anything.
1803 	 */
1804 	if (tail_distance <= 0) {
1805 		ASSERT(tail_distance == 0);
1806 		return 0;
1807 	}
1808 
1809 	max_distance = XLOG_TOTAL_REC_SHIFT(log);
1810 	/*
1811 	 * Take the smaller of the maximum amount of outstanding I/O
1812 	 * we could have and the distance to the tail to clear out.
1813 	 * We take the smaller so that we don't overwrite the tail and
1814 	 * we don't waste all day writing from the head to the tail
1815 	 * for no reason.
1816 	 */
1817 	max_distance = min(max_distance, tail_distance);
1818 
1819 	if ((head_block + max_distance) <= log->l_logBBsize) {
1820 		/*
1821 		 * We can stomp all the blocks we need to without
1822 		 * wrapping around the end of the log.  Just do it
1823 		 * in a single write.  Use the cycle number of the
1824 		 * current cycle minus one so that the log will look like:
1825 		 *     n ... | n - 1 ...
1826 		 */
1827 		error = xlog_write_log_records(log, (head_cycle - 1),
1828 				head_block, max_distance, tail_cycle,
1829 				tail_block);
1830 		if (error)
1831 			return error;
1832 	} else {
1833 		/*
1834 		 * We need to wrap around the end of the physical log in
1835 		 * order to clear all the blocks.  Do it in two separate
1836 		 * I/Os.  The first write should be from the head to the
1837 		 * end of the physical log, and it should use the current
1838 		 * cycle number minus one just like above.
1839 		 */
1840 		distance = log->l_logBBsize - head_block;
1841 		error = xlog_write_log_records(log, (head_cycle - 1),
1842 				head_block, distance, tail_cycle,
1843 				tail_block);
1844 
1845 		if (error)
1846 			return error;
1847 
1848 		/*
1849 		 * Now write the blocks at the start of the physical log.
1850 		 * This writes the remainder of the blocks we want to clear.
1851 		 * It uses the current cycle number since we're now on the
1852 		 * same cycle as the head so that we get:
1853 		 *    n ... n ... | n - 1 ...
1854 		 *    ^^^^^ blocks we're writing
1855 		 */
1856 		distance = max_distance - (log->l_logBBsize - head_block);
1857 		error = xlog_write_log_records(log, head_cycle, 0, distance,
1858 				tail_cycle, tail_block);
1859 		if (error)
1860 			return error;
1861 	}
1862 
1863 	return 0;
1864 }
1865 
1866 /******************************************************************************
1867  *
1868  *		Log recover routines
1869  *
1870  ******************************************************************************
1871  */
1872 
1873 /*
1874  * Sort the log items in the transaction.
1875  *
1876  * The ordering constraints are defined by the inode allocation and unlink
1877  * behaviour. The rules are:
1878  *
1879  *	1. Every item is only logged once in a given transaction. Hence it
1880  *	   represents the last logged state of the item. Hence ordering is
1881  *	   dependent on the order in which operations need to be performed so
1882  *	   required initial conditions are always met.
1883  *
1884  *	2. Cancelled buffers are recorded in pass 1 in a separate table and
1885  *	   there's nothing to replay from them so we can simply cull them
1886  *	   from the transaction. However, we can't do that until after we've
1887  *	   replayed all the other items because they may be dependent on the
1888  *	   cancelled buffer and replaying the cancelled buffer can remove it
1889  *	   form the cancelled buffer table. Hence they have tobe done last.
1890  *
1891  *	3. Inode allocation buffers must be replayed before inode items that
1892  *	   read the buffer and replay changes into it. For filesystems using the
1893  *	   ICREATE transactions, this means XFS_LI_ICREATE objects need to get
1894  *	   treated the same as inode allocation buffers as they create and
1895  *	   initialise the buffers directly.
1896  *
1897  *	4. Inode unlink buffers must be replayed after inode items are replayed.
1898  *	   This ensures that inodes are completely flushed to the inode buffer
1899  *	   in a "free" state before we remove the unlinked inode list pointer.
1900  *
1901  * Hence the ordering needs to be inode allocation buffers first, inode items
1902  * second, inode unlink buffers third and cancelled buffers last.
1903  *
1904  * But there's a problem with that - we can't tell an inode allocation buffer
1905  * apart from a regular buffer, so we can't separate them. We can, however,
1906  * tell an inode unlink buffer from the others, and so we can separate them out
1907  * from all the other buffers and move them to last.
1908  *
1909  * Hence, 4 lists, in order from head to tail:
1910  *	- buffer_list for all buffers except cancelled/inode unlink buffers
1911  *	- item_list for all non-buffer items
1912  *	- inode_buffer_list for inode unlink buffers
1913  *	- cancel_list for the cancelled buffers
1914  *
1915  * Note that we add objects to the tail of the lists so that first-to-last
1916  * ordering is preserved within the lists. Adding objects to the head of the
1917  * list means when we traverse from the head we walk them in last-to-first
1918  * order. For cancelled buffers and inode unlink buffers this doesn't matter,
1919  * but for all other items there may be specific ordering that we need to
1920  * preserve.
1921  */
1922 STATIC int
1923 xlog_recover_reorder_trans(
1924 	struct xlog		*log,
1925 	struct xlog_recover	*trans,
1926 	int			pass)
1927 {
1928 	xlog_recover_item_t	*item, *n;
1929 	int			error = 0;
1930 	LIST_HEAD(sort_list);
1931 	LIST_HEAD(cancel_list);
1932 	LIST_HEAD(buffer_list);
1933 	LIST_HEAD(inode_buffer_list);
1934 	LIST_HEAD(inode_list);
1935 
1936 	list_splice_init(&trans->r_itemq, &sort_list);
1937 	list_for_each_entry_safe(item, n, &sort_list, ri_list) {
1938 		xfs_buf_log_format_t	*buf_f = item->ri_buf[0].i_addr;
1939 
1940 		switch (ITEM_TYPE(item)) {
1941 		case XFS_LI_ICREATE:
1942 			list_move_tail(&item->ri_list, &buffer_list);
1943 			break;
1944 		case XFS_LI_BUF:
1945 			if (buf_f->blf_flags & XFS_BLF_CANCEL) {
1946 				trace_xfs_log_recover_item_reorder_head(log,
1947 							trans, item, pass);
1948 				list_move(&item->ri_list, &cancel_list);
1949 				break;
1950 			}
1951 			if (buf_f->blf_flags & XFS_BLF_INODE_BUF) {
1952 				list_move(&item->ri_list, &inode_buffer_list);
1953 				break;
1954 			}
1955 			list_move_tail(&item->ri_list, &buffer_list);
1956 			break;
1957 		case XFS_LI_INODE:
1958 		case XFS_LI_DQUOT:
1959 		case XFS_LI_QUOTAOFF:
1960 		case XFS_LI_EFD:
1961 		case XFS_LI_EFI:
1962 		case XFS_LI_RUI:
1963 		case XFS_LI_RUD:
1964 		case XFS_LI_CUI:
1965 		case XFS_LI_CUD:
1966 		case XFS_LI_BUI:
1967 		case XFS_LI_BUD:
1968 			trace_xfs_log_recover_item_reorder_tail(log,
1969 							trans, item, pass);
1970 			list_move_tail(&item->ri_list, &inode_list);
1971 			break;
1972 		default:
1973 			xfs_warn(log->l_mp,
1974 				"%s: unrecognized type of log operation",
1975 				__func__);
1976 			ASSERT(0);
1977 			/*
1978 			 * return the remaining items back to the transaction
1979 			 * item list so they can be freed in caller.
1980 			 */
1981 			if (!list_empty(&sort_list))
1982 				list_splice_init(&sort_list, &trans->r_itemq);
1983 			error = -EIO;
1984 			goto out;
1985 		}
1986 	}
1987 out:
1988 	ASSERT(list_empty(&sort_list));
1989 	if (!list_empty(&buffer_list))
1990 		list_splice(&buffer_list, &trans->r_itemq);
1991 	if (!list_empty(&inode_list))
1992 		list_splice_tail(&inode_list, &trans->r_itemq);
1993 	if (!list_empty(&inode_buffer_list))
1994 		list_splice_tail(&inode_buffer_list, &trans->r_itemq);
1995 	if (!list_empty(&cancel_list))
1996 		list_splice_tail(&cancel_list, &trans->r_itemq);
1997 	return error;
1998 }
1999 
2000 /*
2001  * Build up the table of buf cancel records so that we don't replay
2002  * cancelled data in the second pass.  For buffer records that are
2003  * not cancel records, there is nothing to do here so we just return.
2004  *
2005  * If we get a cancel record which is already in the table, this indicates
2006  * that the buffer was cancelled multiple times.  In order to ensure
2007  * that during pass 2 we keep the record in the table until we reach its
2008  * last occurrence in the log, we keep a reference count in the cancel
2009  * record in the table to tell us how many times we expect to see this
2010  * record during the second pass.
2011  */
2012 STATIC int
2013 xlog_recover_buffer_pass1(
2014 	struct xlog			*log,
2015 	struct xlog_recover_item	*item)
2016 {
2017 	xfs_buf_log_format_t	*buf_f = item->ri_buf[0].i_addr;
2018 	struct list_head	*bucket;
2019 	struct xfs_buf_cancel	*bcp;
2020 
2021 	/*
2022 	 * If this isn't a cancel buffer item, then just return.
2023 	 */
2024 	if (!(buf_f->blf_flags & XFS_BLF_CANCEL)) {
2025 		trace_xfs_log_recover_buf_not_cancel(log, buf_f);
2026 		return 0;
2027 	}
2028 
2029 	/*
2030 	 * Insert an xfs_buf_cancel record into the hash table of them.
2031 	 * If there is already an identical record, bump its reference count.
2032 	 */
2033 	bucket = XLOG_BUF_CANCEL_BUCKET(log, buf_f->blf_blkno);
2034 	list_for_each_entry(bcp, bucket, bc_list) {
2035 		if (bcp->bc_blkno == buf_f->blf_blkno &&
2036 		    bcp->bc_len == buf_f->blf_len) {
2037 			bcp->bc_refcount++;
2038 			trace_xfs_log_recover_buf_cancel_ref_inc(log, buf_f);
2039 			return 0;
2040 		}
2041 	}
2042 
2043 	bcp = kmem_alloc(sizeof(struct xfs_buf_cancel), KM_SLEEP);
2044 	bcp->bc_blkno = buf_f->blf_blkno;
2045 	bcp->bc_len = buf_f->blf_len;
2046 	bcp->bc_refcount = 1;
2047 	list_add_tail(&bcp->bc_list, bucket);
2048 
2049 	trace_xfs_log_recover_buf_cancel_add(log, buf_f);
2050 	return 0;
2051 }
2052 
2053 /*
2054  * Check to see whether the buffer being recovered has a corresponding
2055  * entry in the buffer cancel record table. If it is, return the cancel
2056  * buffer structure to the caller.
2057  */
2058 STATIC struct xfs_buf_cancel *
2059 xlog_peek_buffer_cancelled(
2060 	struct xlog		*log,
2061 	xfs_daddr_t		blkno,
2062 	uint			len,
2063 	unsigned short			flags)
2064 {
2065 	struct list_head	*bucket;
2066 	struct xfs_buf_cancel	*bcp;
2067 
2068 	if (!log->l_buf_cancel_table) {
2069 		/* empty table means no cancelled buffers in the log */
2070 		ASSERT(!(flags & XFS_BLF_CANCEL));
2071 		return NULL;
2072 	}
2073 
2074 	bucket = XLOG_BUF_CANCEL_BUCKET(log, blkno);
2075 	list_for_each_entry(bcp, bucket, bc_list) {
2076 		if (bcp->bc_blkno == blkno && bcp->bc_len == len)
2077 			return bcp;
2078 	}
2079 
2080 	/*
2081 	 * We didn't find a corresponding entry in the table, so return 0 so
2082 	 * that the buffer is NOT cancelled.
2083 	 */
2084 	ASSERT(!(flags & XFS_BLF_CANCEL));
2085 	return NULL;
2086 }
2087 
2088 /*
2089  * If the buffer is being cancelled then return 1 so that it will be cancelled,
2090  * otherwise return 0.  If the buffer is actually a buffer cancel item
2091  * (XFS_BLF_CANCEL is set), then decrement the refcount on the entry in the
2092  * table and remove it from the table if this is the last reference.
2093  *
2094  * We remove the cancel record from the table when we encounter its last
2095  * occurrence in the log so that if the same buffer is re-used again after its
2096  * last cancellation we actually replay the changes made at that point.
2097  */
2098 STATIC int
2099 xlog_check_buffer_cancelled(
2100 	struct xlog		*log,
2101 	xfs_daddr_t		blkno,
2102 	uint			len,
2103 	unsigned short			flags)
2104 {
2105 	struct xfs_buf_cancel	*bcp;
2106 
2107 	bcp = xlog_peek_buffer_cancelled(log, blkno, len, flags);
2108 	if (!bcp)
2109 		return 0;
2110 
2111 	/*
2112 	 * We've go a match, so return 1 so that the recovery of this buffer
2113 	 * is cancelled.  If this buffer is actually a buffer cancel log
2114 	 * item, then decrement the refcount on the one in the table and
2115 	 * remove it if this is the last reference.
2116 	 */
2117 	if (flags & XFS_BLF_CANCEL) {
2118 		if (--bcp->bc_refcount == 0) {
2119 			list_del(&bcp->bc_list);
2120 			kmem_free(bcp);
2121 		}
2122 	}
2123 	return 1;
2124 }
2125 
2126 /*
2127  * Perform recovery for a buffer full of inodes.  In these buffers, the only
2128  * data which should be recovered is that which corresponds to the
2129  * di_next_unlinked pointers in the on disk inode structures.  The rest of the
2130  * data for the inodes is always logged through the inodes themselves rather
2131  * than the inode buffer and is recovered in xlog_recover_inode_pass2().
2132  *
2133  * The only time when buffers full of inodes are fully recovered is when the
2134  * buffer is full of newly allocated inodes.  In this case the buffer will
2135  * not be marked as an inode buffer and so will be sent to
2136  * xlog_recover_do_reg_buffer() below during recovery.
2137  */
2138 STATIC int
2139 xlog_recover_do_inode_buffer(
2140 	struct xfs_mount	*mp,
2141 	xlog_recover_item_t	*item,
2142 	struct xfs_buf		*bp,
2143 	xfs_buf_log_format_t	*buf_f)
2144 {
2145 	int			i;
2146 	int			item_index = 0;
2147 	int			bit = 0;
2148 	int			nbits = 0;
2149 	int			reg_buf_offset = 0;
2150 	int			reg_buf_bytes = 0;
2151 	int			next_unlinked_offset;
2152 	int			inodes_per_buf;
2153 	xfs_agino_t		*logged_nextp;
2154 	xfs_agino_t		*buffer_nextp;
2155 
2156 	trace_xfs_log_recover_buf_inode_buf(mp->m_log, buf_f);
2157 
2158 	/*
2159 	 * Post recovery validation only works properly on CRC enabled
2160 	 * filesystems.
2161 	 */
2162 	if (xfs_sb_version_hascrc(&mp->m_sb))
2163 		bp->b_ops = &xfs_inode_buf_ops;
2164 
2165 	inodes_per_buf = BBTOB(bp->b_io_length) >> mp->m_sb.sb_inodelog;
2166 	for (i = 0; i < inodes_per_buf; i++) {
2167 		next_unlinked_offset = (i * mp->m_sb.sb_inodesize) +
2168 			offsetof(xfs_dinode_t, di_next_unlinked);
2169 
2170 		while (next_unlinked_offset >=
2171 		       (reg_buf_offset + reg_buf_bytes)) {
2172 			/*
2173 			 * The next di_next_unlinked field is beyond
2174 			 * the current logged region.  Find the next
2175 			 * logged region that contains or is beyond
2176 			 * the current di_next_unlinked field.
2177 			 */
2178 			bit += nbits;
2179 			bit = xfs_next_bit(buf_f->blf_data_map,
2180 					   buf_f->blf_map_size, bit);
2181 
2182 			/*
2183 			 * If there are no more logged regions in the
2184 			 * buffer, then we're done.
2185 			 */
2186 			if (bit == -1)
2187 				return 0;
2188 
2189 			nbits = xfs_contig_bits(buf_f->blf_data_map,
2190 						buf_f->blf_map_size, bit);
2191 			ASSERT(nbits > 0);
2192 			reg_buf_offset = bit << XFS_BLF_SHIFT;
2193 			reg_buf_bytes = nbits << XFS_BLF_SHIFT;
2194 			item_index++;
2195 		}
2196 
2197 		/*
2198 		 * If the current logged region starts after the current
2199 		 * di_next_unlinked field, then move on to the next
2200 		 * di_next_unlinked field.
2201 		 */
2202 		if (next_unlinked_offset < reg_buf_offset)
2203 			continue;
2204 
2205 		ASSERT(item->ri_buf[item_index].i_addr != NULL);
2206 		ASSERT((item->ri_buf[item_index].i_len % XFS_BLF_CHUNK) == 0);
2207 		ASSERT((reg_buf_offset + reg_buf_bytes) <=
2208 							BBTOB(bp->b_io_length));
2209 
2210 		/*
2211 		 * The current logged region contains a copy of the
2212 		 * current di_next_unlinked field.  Extract its value
2213 		 * and copy it to the buffer copy.
2214 		 */
2215 		logged_nextp = item->ri_buf[item_index].i_addr +
2216 				next_unlinked_offset - reg_buf_offset;
2217 		if (unlikely(*logged_nextp == 0)) {
2218 			xfs_alert(mp,
2219 		"Bad inode buffer log record (ptr = "PTR_FMT", bp = "PTR_FMT"). "
2220 		"Trying to replay bad (0) inode di_next_unlinked field.",
2221 				item, bp);
2222 			XFS_ERROR_REPORT("xlog_recover_do_inode_buf",
2223 					 XFS_ERRLEVEL_LOW, mp);
2224 			return -EFSCORRUPTED;
2225 		}
2226 
2227 		buffer_nextp = xfs_buf_offset(bp, next_unlinked_offset);
2228 		*buffer_nextp = *logged_nextp;
2229 
2230 		/*
2231 		 * If necessary, recalculate the CRC in the on-disk inode. We
2232 		 * have to leave the inode in a consistent state for whoever
2233 		 * reads it next....
2234 		 */
2235 		xfs_dinode_calc_crc(mp,
2236 				xfs_buf_offset(bp, i * mp->m_sb.sb_inodesize));
2237 
2238 	}
2239 
2240 	return 0;
2241 }
2242 
2243 /*
2244  * V5 filesystems know the age of the buffer on disk being recovered. We can
2245  * have newer objects on disk than we are replaying, and so for these cases we
2246  * don't want to replay the current change as that will make the buffer contents
2247  * temporarily invalid on disk.
2248  *
2249  * The magic number might not match the buffer type we are going to recover
2250  * (e.g. reallocated blocks), so we ignore the xfs_buf_log_format flags.  Hence
2251  * extract the LSN of the existing object in the buffer based on it's current
2252  * magic number.  If we don't recognise the magic number in the buffer, then
2253  * return a LSN of -1 so that the caller knows it was an unrecognised block and
2254  * so can recover the buffer.
2255  *
2256  * Note: we cannot rely solely on magic number matches to determine that the
2257  * buffer has a valid LSN - we also need to verify that it belongs to this
2258  * filesystem, so we need to extract the object's LSN and compare it to that
2259  * which we read from the superblock. If the UUIDs don't match, then we've got a
2260  * stale metadata block from an old filesystem instance that we need to recover
2261  * over the top of.
2262  */
2263 static xfs_lsn_t
2264 xlog_recover_get_buf_lsn(
2265 	struct xfs_mount	*mp,
2266 	struct xfs_buf		*bp)
2267 {
2268 	uint32_t		magic32;
2269 	uint16_t		magic16;
2270 	uint16_t		magicda;
2271 	void			*blk = bp->b_addr;
2272 	uuid_t			*uuid;
2273 	xfs_lsn_t		lsn = -1;
2274 
2275 	/* v4 filesystems always recover immediately */
2276 	if (!xfs_sb_version_hascrc(&mp->m_sb))
2277 		goto recover_immediately;
2278 
2279 	magic32 = be32_to_cpu(*(__be32 *)blk);
2280 	switch (magic32) {
2281 	case XFS_ABTB_CRC_MAGIC:
2282 	case XFS_ABTC_CRC_MAGIC:
2283 	case XFS_ABTB_MAGIC:
2284 	case XFS_ABTC_MAGIC:
2285 	case XFS_RMAP_CRC_MAGIC:
2286 	case XFS_REFC_CRC_MAGIC:
2287 	case XFS_IBT_CRC_MAGIC:
2288 	case XFS_IBT_MAGIC: {
2289 		struct xfs_btree_block *btb = blk;
2290 
2291 		lsn = be64_to_cpu(btb->bb_u.s.bb_lsn);
2292 		uuid = &btb->bb_u.s.bb_uuid;
2293 		break;
2294 	}
2295 	case XFS_BMAP_CRC_MAGIC:
2296 	case XFS_BMAP_MAGIC: {
2297 		struct xfs_btree_block *btb = blk;
2298 
2299 		lsn = be64_to_cpu(btb->bb_u.l.bb_lsn);
2300 		uuid = &btb->bb_u.l.bb_uuid;
2301 		break;
2302 	}
2303 	case XFS_AGF_MAGIC:
2304 		lsn = be64_to_cpu(((struct xfs_agf *)blk)->agf_lsn);
2305 		uuid = &((struct xfs_agf *)blk)->agf_uuid;
2306 		break;
2307 	case XFS_AGFL_MAGIC:
2308 		lsn = be64_to_cpu(((struct xfs_agfl *)blk)->agfl_lsn);
2309 		uuid = &((struct xfs_agfl *)blk)->agfl_uuid;
2310 		break;
2311 	case XFS_AGI_MAGIC:
2312 		lsn = be64_to_cpu(((struct xfs_agi *)blk)->agi_lsn);
2313 		uuid = &((struct xfs_agi *)blk)->agi_uuid;
2314 		break;
2315 	case XFS_SYMLINK_MAGIC:
2316 		lsn = be64_to_cpu(((struct xfs_dsymlink_hdr *)blk)->sl_lsn);
2317 		uuid = &((struct xfs_dsymlink_hdr *)blk)->sl_uuid;
2318 		break;
2319 	case XFS_DIR3_BLOCK_MAGIC:
2320 	case XFS_DIR3_DATA_MAGIC:
2321 	case XFS_DIR3_FREE_MAGIC:
2322 		lsn = be64_to_cpu(((struct xfs_dir3_blk_hdr *)blk)->lsn);
2323 		uuid = &((struct xfs_dir3_blk_hdr *)blk)->uuid;
2324 		break;
2325 	case XFS_ATTR3_RMT_MAGIC:
2326 		/*
2327 		 * Remote attr blocks are written synchronously, rather than
2328 		 * being logged. That means they do not contain a valid LSN
2329 		 * (i.e. transactionally ordered) in them, and hence any time we
2330 		 * see a buffer to replay over the top of a remote attribute
2331 		 * block we should simply do so.
2332 		 */
2333 		goto recover_immediately;
2334 	case XFS_SB_MAGIC:
2335 		/*
2336 		 * superblock uuids are magic. We may or may not have a
2337 		 * sb_meta_uuid on disk, but it will be set in the in-core
2338 		 * superblock. We set the uuid pointer for verification
2339 		 * according to the superblock feature mask to ensure we check
2340 		 * the relevant UUID in the superblock.
2341 		 */
2342 		lsn = be64_to_cpu(((struct xfs_dsb *)blk)->sb_lsn);
2343 		if (xfs_sb_version_hasmetauuid(&mp->m_sb))
2344 			uuid = &((struct xfs_dsb *)blk)->sb_meta_uuid;
2345 		else
2346 			uuid = &((struct xfs_dsb *)blk)->sb_uuid;
2347 		break;
2348 	default:
2349 		break;
2350 	}
2351 
2352 	if (lsn != (xfs_lsn_t)-1) {
2353 		if (!uuid_equal(&mp->m_sb.sb_meta_uuid, uuid))
2354 			goto recover_immediately;
2355 		return lsn;
2356 	}
2357 
2358 	magicda = be16_to_cpu(((struct xfs_da_blkinfo *)blk)->magic);
2359 	switch (magicda) {
2360 	case XFS_DIR3_LEAF1_MAGIC:
2361 	case XFS_DIR3_LEAFN_MAGIC:
2362 	case XFS_DA3_NODE_MAGIC:
2363 		lsn = be64_to_cpu(((struct xfs_da3_blkinfo *)blk)->lsn);
2364 		uuid = &((struct xfs_da3_blkinfo *)blk)->uuid;
2365 		break;
2366 	default:
2367 		break;
2368 	}
2369 
2370 	if (lsn != (xfs_lsn_t)-1) {
2371 		if (!uuid_equal(&mp->m_sb.sb_uuid, uuid))
2372 			goto recover_immediately;
2373 		return lsn;
2374 	}
2375 
2376 	/*
2377 	 * We do individual object checks on dquot and inode buffers as they
2378 	 * have their own individual LSN records. Also, we could have a stale
2379 	 * buffer here, so we have to at least recognise these buffer types.
2380 	 *
2381 	 * A notd complexity here is inode unlinked list processing - it logs
2382 	 * the inode directly in the buffer, but we don't know which inodes have
2383 	 * been modified, and there is no global buffer LSN. Hence we need to
2384 	 * recover all inode buffer types immediately. This problem will be
2385 	 * fixed by logical logging of the unlinked list modifications.
2386 	 */
2387 	magic16 = be16_to_cpu(*(__be16 *)blk);
2388 	switch (magic16) {
2389 	case XFS_DQUOT_MAGIC:
2390 	case XFS_DINODE_MAGIC:
2391 		goto recover_immediately;
2392 	default:
2393 		break;
2394 	}
2395 
2396 	/* unknown buffer contents, recover immediately */
2397 
2398 recover_immediately:
2399 	return (xfs_lsn_t)-1;
2400 
2401 }
2402 
2403 /*
2404  * Validate the recovered buffer is of the correct type and attach the
2405  * appropriate buffer operations to them for writeback. Magic numbers are in a
2406  * few places:
2407  *	the first 16 bits of the buffer (inode buffer, dquot buffer),
2408  *	the first 32 bits of the buffer (most blocks),
2409  *	inside a struct xfs_da_blkinfo at the start of the buffer.
2410  */
2411 static void
2412 xlog_recover_validate_buf_type(
2413 	struct xfs_mount	*mp,
2414 	struct xfs_buf		*bp,
2415 	xfs_buf_log_format_t	*buf_f,
2416 	xfs_lsn_t		current_lsn)
2417 {
2418 	struct xfs_da_blkinfo	*info = bp->b_addr;
2419 	uint32_t		magic32;
2420 	uint16_t		magic16;
2421 	uint16_t		magicda;
2422 	char			*warnmsg = NULL;
2423 
2424 	/*
2425 	 * We can only do post recovery validation on items on CRC enabled
2426 	 * fielsystems as we need to know when the buffer was written to be able
2427 	 * to determine if we should have replayed the item. If we replay old
2428 	 * metadata over a newer buffer, then it will enter a temporarily
2429 	 * inconsistent state resulting in verification failures. Hence for now
2430 	 * just avoid the verification stage for non-crc filesystems
2431 	 */
2432 	if (!xfs_sb_version_hascrc(&mp->m_sb))
2433 		return;
2434 
2435 	magic32 = be32_to_cpu(*(__be32 *)bp->b_addr);
2436 	magic16 = be16_to_cpu(*(__be16*)bp->b_addr);
2437 	magicda = be16_to_cpu(info->magic);
2438 	switch (xfs_blft_from_flags(buf_f)) {
2439 	case XFS_BLFT_BTREE_BUF:
2440 		switch (magic32) {
2441 		case XFS_ABTB_CRC_MAGIC:
2442 		case XFS_ABTB_MAGIC:
2443 			bp->b_ops = &xfs_bnobt_buf_ops;
2444 			break;
2445 		case XFS_ABTC_CRC_MAGIC:
2446 		case XFS_ABTC_MAGIC:
2447 			bp->b_ops = &xfs_cntbt_buf_ops;
2448 			break;
2449 		case XFS_IBT_CRC_MAGIC:
2450 		case XFS_IBT_MAGIC:
2451 			bp->b_ops = &xfs_inobt_buf_ops;
2452 			break;
2453 		case XFS_FIBT_CRC_MAGIC:
2454 		case XFS_FIBT_MAGIC:
2455 			bp->b_ops = &xfs_finobt_buf_ops;
2456 			break;
2457 		case XFS_BMAP_CRC_MAGIC:
2458 		case XFS_BMAP_MAGIC:
2459 			bp->b_ops = &xfs_bmbt_buf_ops;
2460 			break;
2461 		case XFS_RMAP_CRC_MAGIC:
2462 			bp->b_ops = &xfs_rmapbt_buf_ops;
2463 			break;
2464 		case XFS_REFC_CRC_MAGIC:
2465 			bp->b_ops = &xfs_refcountbt_buf_ops;
2466 			break;
2467 		default:
2468 			warnmsg = "Bad btree block magic!";
2469 			break;
2470 		}
2471 		break;
2472 	case XFS_BLFT_AGF_BUF:
2473 		if (magic32 != XFS_AGF_MAGIC) {
2474 			warnmsg = "Bad AGF block magic!";
2475 			break;
2476 		}
2477 		bp->b_ops = &xfs_agf_buf_ops;
2478 		break;
2479 	case XFS_BLFT_AGFL_BUF:
2480 		if (magic32 != XFS_AGFL_MAGIC) {
2481 			warnmsg = "Bad AGFL block magic!";
2482 			break;
2483 		}
2484 		bp->b_ops = &xfs_agfl_buf_ops;
2485 		break;
2486 	case XFS_BLFT_AGI_BUF:
2487 		if (magic32 != XFS_AGI_MAGIC) {
2488 			warnmsg = "Bad AGI block magic!";
2489 			break;
2490 		}
2491 		bp->b_ops = &xfs_agi_buf_ops;
2492 		break;
2493 	case XFS_BLFT_UDQUOT_BUF:
2494 	case XFS_BLFT_PDQUOT_BUF:
2495 	case XFS_BLFT_GDQUOT_BUF:
2496 #ifdef CONFIG_XFS_QUOTA
2497 		if (magic16 != XFS_DQUOT_MAGIC) {
2498 			warnmsg = "Bad DQUOT block magic!";
2499 			break;
2500 		}
2501 		bp->b_ops = &xfs_dquot_buf_ops;
2502 #else
2503 		xfs_alert(mp,
2504 	"Trying to recover dquots without QUOTA support built in!");
2505 		ASSERT(0);
2506 #endif
2507 		break;
2508 	case XFS_BLFT_DINO_BUF:
2509 		if (magic16 != XFS_DINODE_MAGIC) {
2510 			warnmsg = "Bad INODE block magic!";
2511 			break;
2512 		}
2513 		bp->b_ops = &xfs_inode_buf_ops;
2514 		break;
2515 	case XFS_BLFT_SYMLINK_BUF:
2516 		if (magic32 != XFS_SYMLINK_MAGIC) {
2517 			warnmsg = "Bad symlink block magic!";
2518 			break;
2519 		}
2520 		bp->b_ops = &xfs_symlink_buf_ops;
2521 		break;
2522 	case XFS_BLFT_DIR_BLOCK_BUF:
2523 		if (magic32 != XFS_DIR2_BLOCK_MAGIC &&
2524 		    magic32 != XFS_DIR3_BLOCK_MAGIC) {
2525 			warnmsg = "Bad dir block magic!";
2526 			break;
2527 		}
2528 		bp->b_ops = &xfs_dir3_block_buf_ops;
2529 		break;
2530 	case XFS_BLFT_DIR_DATA_BUF:
2531 		if (magic32 != XFS_DIR2_DATA_MAGIC &&
2532 		    magic32 != XFS_DIR3_DATA_MAGIC) {
2533 			warnmsg = "Bad dir data magic!";
2534 			break;
2535 		}
2536 		bp->b_ops = &xfs_dir3_data_buf_ops;
2537 		break;
2538 	case XFS_BLFT_DIR_FREE_BUF:
2539 		if (magic32 != XFS_DIR2_FREE_MAGIC &&
2540 		    magic32 != XFS_DIR3_FREE_MAGIC) {
2541 			warnmsg = "Bad dir3 free magic!";
2542 			break;
2543 		}
2544 		bp->b_ops = &xfs_dir3_free_buf_ops;
2545 		break;
2546 	case XFS_BLFT_DIR_LEAF1_BUF:
2547 		if (magicda != XFS_DIR2_LEAF1_MAGIC &&
2548 		    magicda != XFS_DIR3_LEAF1_MAGIC) {
2549 			warnmsg = "Bad dir leaf1 magic!";
2550 			break;
2551 		}
2552 		bp->b_ops = &xfs_dir3_leaf1_buf_ops;
2553 		break;
2554 	case XFS_BLFT_DIR_LEAFN_BUF:
2555 		if (magicda != XFS_DIR2_LEAFN_MAGIC &&
2556 		    magicda != XFS_DIR3_LEAFN_MAGIC) {
2557 			warnmsg = "Bad dir leafn magic!";
2558 			break;
2559 		}
2560 		bp->b_ops = &xfs_dir3_leafn_buf_ops;
2561 		break;
2562 	case XFS_BLFT_DA_NODE_BUF:
2563 		if (magicda != XFS_DA_NODE_MAGIC &&
2564 		    magicda != XFS_DA3_NODE_MAGIC) {
2565 			warnmsg = "Bad da node magic!";
2566 			break;
2567 		}
2568 		bp->b_ops = &xfs_da3_node_buf_ops;
2569 		break;
2570 	case XFS_BLFT_ATTR_LEAF_BUF:
2571 		if (magicda != XFS_ATTR_LEAF_MAGIC &&
2572 		    magicda != XFS_ATTR3_LEAF_MAGIC) {
2573 			warnmsg = "Bad attr leaf magic!";
2574 			break;
2575 		}
2576 		bp->b_ops = &xfs_attr3_leaf_buf_ops;
2577 		break;
2578 	case XFS_BLFT_ATTR_RMT_BUF:
2579 		if (magic32 != XFS_ATTR3_RMT_MAGIC) {
2580 			warnmsg = "Bad attr remote magic!";
2581 			break;
2582 		}
2583 		bp->b_ops = &xfs_attr3_rmt_buf_ops;
2584 		break;
2585 	case XFS_BLFT_SB_BUF:
2586 		if (magic32 != XFS_SB_MAGIC) {
2587 			warnmsg = "Bad SB block magic!";
2588 			break;
2589 		}
2590 		bp->b_ops = &xfs_sb_buf_ops;
2591 		break;
2592 #ifdef CONFIG_XFS_RT
2593 	case XFS_BLFT_RTBITMAP_BUF:
2594 	case XFS_BLFT_RTSUMMARY_BUF:
2595 		/* no magic numbers for verification of RT buffers */
2596 		bp->b_ops = &xfs_rtbuf_ops;
2597 		break;
2598 #endif /* CONFIG_XFS_RT */
2599 	default:
2600 		xfs_warn(mp, "Unknown buffer type %d!",
2601 			 xfs_blft_from_flags(buf_f));
2602 		break;
2603 	}
2604 
2605 	/*
2606 	 * Nothing else to do in the case of a NULL current LSN as this means
2607 	 * the buffer is more recent than the change in the log and will be
2608 	 * skipped.
2609 	 */
2610 	if (current_lsn == NULLCOMMITLSN)
2611 		return;
2612 
2613 	if (warnmsg) {
2614 		xfs_warn(mp, warnmsg);
2615 		ASSERT(0);
2616 	}
2617 
2618 	/*
2619 	 * We must update the metadata LSN of the buffer as it is written out to
2620 	 * ensure that older transactions never replay over this one and corrupt
2621 	 * the buffer. This can occur if log recovery is interrupted at some
2622 	 * point after the current transaction completes, at which point a
2623 	 * subsequent mount starts recovery from the beginning.
2624 	 *
2625 	 * Write verifiers update the metadata LSN from log items attached to
2626 	 * the buffer. Therefore, initialize a bli purely to carry the LSN to
2627 	 * the verifier. We'll clean it up in our ->iodone() callback.
2628 	 */
2629 	if (bp->b_ops) {
2630 		struct xfs_buf_log_item	*bip;
2631 
2632 		ASSERT(!bp->b_iodone || bp->b_iodone == xlog_recover_iodone);
2633 		bp->b_iodone = xlog_recover_iodone;
2634 		xfs_buf_item_init(bp, mp);
2635 		bip = bp->b_log_item;
2636 		bip->bli_item.li_lsn = current_lsn;
2637 	}
2638 }
2639 
2640 /*
2641  * Perform a 'normal' buffer recovery.  Each logged region of the
2642  * buffer should be copied over the corresponding region in the
2643  * given buffer.  The bitmap in the buf log format structure indicates
2644  * where to place the logged data.
2645  */
2646 STATIC void
2647 xlog_recover_do_reg_buffer(
2648 	struct xfs_mount	*mp,
2649 	xlog_recover_item_t	*item,
2650 	struct xfs_buf		*bp,
2651 	xfs_buf_log_format_t	*buf_f,
2652 	xfs_lsn_t		current_lsn)
2653 {
2654 	int			i;
2655 	int			bit;
2656 	int			nbits;
2657 	xfs_failaddr_t		fa;
2658 
2659 	trace_xfs_log_recover_buf_reg_buf(mp->m_log, buf_f);
2660 
2661 	bit = 0;
2662 	i = 1;  /* 0 is the buf format structure */
2663 	while (1) {
2664 		bit = xfs_next_bit(buf_f->blf_data_map,
2665 				   buf_f->blf_map_size, bit);
2666 		if (bit == -1)
2667 			break;
2668 		nbits = xfs_contig_bits(buf_f->blf_data_map,
2669 					buf_f->blf_map_size, bit);
2670 		ASSERT(nbits > 0);
2671 		ASSERT(item->ri_buf[i].i_addr != NULL);
2672 		ASSERT(item->ri_buf[i].i_len % XFS_BLF_CHUNK == 0);
2673 		ASSERT(BBTOB(bp->b_io_length) >=
2674 		       ((uint)bit << XFS_BLF_SHIFT) + (nbits << XFS_BLF_SHIFT));
2675 
2676 		/*
2677 		 * The dirty regions logged in the buffer, even though
2678 		 * contiguous, may span multiple chunks. This is because the
2679 		 * dirty region may span a physical page boundary in a buffer
2680 		 * and hence be split into two separate vectors for writing into
2681 		 * the log. Hence we need to trim nbits back to the length of
2682 		 * the current region being copied out of the log.
2683 		 */
2684 		if (item->ri_buf[i].i_len < (nbits << XFS_BLF_SHIFT))
2685 			nbits = item->ri_buf[i].i_len >> XFS_BLF_SHIFT;
2686 
2687 		/*
2688 		 * Do a sanity check if this is a dquot buffer. Just checking
2689 		 * the first dquot in the buffer should do. XXXThis is
2690 		 * probably a good thing to do for other buf types also.
2691 		 */
2692 		fa = NULL;
2693 		if (buf_f->blf_flags &
2694 		   (XFS_BLF_UDQUOT_BUF|XFS_BLF_PDQUOT_BUF|XFS_BLF_GDQUOT_BUF)) {
2695 			if (item->ri_buf[i].i_addr == NULL) {
2696 				xfs_alert(mp,
2697 					"XFS: NULL dquot in %s.", __func__);
2698 				goto next;
2699 			}
2700 			if (item->ri_buf[i].i_len < sizeof(xfs_disk_dquot_t)) {
2701 				xfs_alert(mp,
2702 					"XFS: dquot too small (%d) in %s.",
2703 					item->ri_buf[i].i_len, __func__);
2704 				goto next;
2705 			}
2706 			fa = xfs_dquot_verify(mp, item->ri_buf[i].i_addr,
2707 					       -1, 0);
2708 			if (fa) {
2709 				xfs_alert(mp,
2710 	"dquot corrupt at %pS trying to replay into block 0x%llx",
2711 					fa, bp->b_bn);
2712 				goto next;
2713 			}
2714 		}
2715 
2716 		memcpy(xfs_buf_offset(bp,
2717 			(uint)bit << XFS_BLF_SHIFT),	/* dest */
2718 			item->ri_buf[i].i_addr,		/* source */
2719 			nbits<<XFS_BLF_SHIFT);		/* length */
2720  next:
2721 		i++;
2722 		bit += nbits;
2723 	}
2724 
2725 	/* Shouldn't be any more regions */
2726 	ASSERT(i == item->ri_total);
2727 
2728 	xlog_recover_validate_buf_type(mp, bp, buf_f, current_lsn);
2729 }
2730 
2731 /*
2732  * Perform a dquot buffer recovery.
2733  * Simple algorithm: if we have found a QUOTAOFF log item of the same type
2734  * (ie. USR or GRP), then just toss this buffer away; don't recover it.
2735  * Else, treat it as a regular buffer and do recovery.
2736  *
2737  * Return false if the buffer was tossed and true if we recovered the buffer to
2738  * indicate to the caller if the buffer needs writing.
2739  */
2740 STATIC bool
2741 xlog_recover_do_dquot_buffer(
2742 	struct xfs_mount		*mp,
2743 	struct xlog			*log,
2744 	struct xlog_recover_item	*item,
2745 	struct xfs_buf			*bp,
2746 	struct xfs_buf_log_format	*buf_f)
2747 {
2748 	uint			type;
2749 
2750 	trace_xfs_log_recover_buf_dquot_buf(log, buf_f);
2751 
2752 	/*
2753 	 * Filesystems are required to send in quota flags at mount time.
2754 	 */
2755 	if (!mp->m_qflags)
2756 		return false;
2757 
2758 	type = 0;
2759 	if (buf_f->blf_flags & XFS_BLF_UDQUOT_BUF)
2760 		type |= XFS_DQ_USER;
2761 	if (buf_f->blf_flags & XFS_BLF_PDQUOT_BUF)
2762 		type |= XFS_DQ_PROJ;
2763 	if (buf_f->blf_flags & XFS_BLF_GDQUOT_BUF)
2764 		type |= XFS_DQ_GROUP;
2765 	/*
2766 	 * This type of quotas was turned off, so ignore this buffer
2767 	 */
2768 	if (log->l_quotaoffs_flag & type)
2769 		return false;
2770 
2771 	xlog_recover_do_reg_buffer(mp, item, bp, buf_f, NULLCOMMITLSN);
2772 	return true;
2773 }
2774 
2775 /*
2776  * This routine replays a modification made to a buffer at runtime.
2777  * There are actually two types of buffer, regular and inode, which
2778  * are handled differently.  Inode buffers are handled differently
2779  * in that we only recover a specific set of data from them, namely
2780  * the inode di_next_unlinked fields.  This is because all other inode
2781  * data is actually logged via inode records and any data we replay
2782  * here which overlaps that may be stale.
2783  *
2784  * When meta-data buffers are freed at run time we log a buffer item
2785  * with the XFS_BLF_CANCEL bit set to indicate that previous copies
2786  * of the buffer in the log should not be replayed at recovery time.
2787  * This is so that if the blocks covered by the buffer are reused for
2788  * file data before we crash we don't end up replaying old, freed
2789  * meta-data into a user's file.
2790  *
2791  * To handle the cancellation of buffer log items, we make two passes
2792  * over the log during recovery.  During the first we build a table of
2793  * those buffers which have been cancelled, and during the second we
2794  * only replay those buffers which do not have corresponding cancel
2795  * records in the table.  See xlog_recover_buffer_pass[1,2] above
2796  * for more details on the implementation of the table of cancel records.
2797  */
2798 STATIC int
2799 xlog_recover_buffer_pass2(
2800 	struct xlog			*log,
2801 	struct list_head		*buffer_list,
2802 	struct xlog_recover_item	*item,
2803 	xfs_lsn_t			current_lsn)
2804 {
2805 	xfs_buf_log_format_t	*buf_f = item->ri_buf[0].i_addr;
2806 	xfs_mount_t		*mp = log->l_mp;
2807 	xfs_buf_t		*bp;
2808 	int			error;
2809 	uint			buf_flags;
2810 	xfs_lsn_t		lsn;
2811 
2812 	/*
2813 	 * In this pass we only want to recover all the buffers which have
2814 	 * not been cancelled and are not cancellation buffers themselves.
2815 	 */
2816 	if (xlog_check_buffer_cancelled(log, buf_f->blf_blkno,
2817 			buf_f->blf_len, buf_f->blf_flags)) {
2818 		trace_xfs_log_recover_buf_cancel(log, buf_f);
2819 		return 0;
2820 	}
2821 
2822 	trace_xfs_log_recover_buf_recover(log, buf_f);
2823 
2824 	buf_flags = 0;
2825 	if (buf_f->blf_flags & XFS_BLF_INODE_BUF)
2826 		buf_flags |= XBF_UNMAPPED;
2827 
2828 	bp = xfs_buf_read(mp->m_ddev_targp, buf_f->blf_blkno, buf_f->blf_len,
2829 			  buf_flags, NULL);
2830 	if (!bp)
2831 		return -ENOMEM;
2832 	error = bp->b_error;
2833 	if (error) {
2834 		xfs_buf_ioerror_alert(bp, "xlog_recover_do..(read#1)");
2835 		goto out_release;
2836 	}
2837 
2838 	/*
2839 	 * Recover the buffer only if we get an LSN from it and it's less than
2840 	 * the lsn of the transaction we are replaying.
2841 	 *
2842 	 * Note that we have to be extremely careful of readahead here.
2843 	 * Readahead does not attach verfiers to the buffers so if we don't
2844 	 * actually do any replay after readahead because of the LSN we found
2845 	 * in the buffer if more recent than that current transaction then we
2846 	 * need to attach the verifier directly. Failure to do so can lead to
2847 	 * future recovery actions (e.g. EFI and unlinked list recovery) can
2848 	 * operate on the buffers and they won't get the verifier attached. This
2849 	 * can lead to blocks on disk having the correct content but a stale
2850 	 * CRC.
2851 	 *
2852 	 * It is safe to assume these clean buffers are currently up to date.
2853 	 * If the buffer is dirtied by a later transaction being replayed, then
2854 	 * the verifier will be reset to match whatever recover turns that
2855 	 * buffer into.
2856 	 */
2857 	lsn = xlog_recover_get_buf_lsn(mp, bp);
2858 	if (lsn && lsn != -1 && XFS_LSN_CMP(lsn, current_lsn) >= 0) {
2859 		trace_xfs_log_recover_buf_skip(log, buf_f);
2860 		xlog_recover_validate_buf_type(mp, bp, buf_f, NULLCOMMITLSN);
2861 		goto out_release;
2862 	}
2863 
2864 	if (buf_f->blf_flags & XFS_BLF_INODE_BUF) {
2865 		error = xlog_recover_do_inode_buffer(mp, item, bp, buf_f);
2866 		if (error)
2867 			goto out_release;
2868 	} else if (buf_f->blf_flags &
2869 		  (XFS_BLF_UDQUOT_BUF|XFS_BLF_PDQUOT_BUF|XFS_BLF_GDQUOT_BUF)) {
2870 		bool	dirty;
2871 
2872 		dirty = xlog_recover_do_dquot_buffer(mp, log, item, bp, buf_f);
2873 		if (!dirty)
2874 			goto out_release;
2875 	} else {
2876 		xlog_recover_do_reg_buffer(mp, item, bp, buf_f, current_lsn);
2877 	}
2878 
2879 	/*
2880 	 * Perform delayed write on the buffer.  Asynchronous writes will be
2881 	 * slower when taking into account all the buffers to be flushed.
2882 	 *
2883 	 * Also make sure that only inode buffers with good sizes stay in
2884 	 * the buffer cache.  The kernel moves inodes in buffers of 1 block
2885 	 * or mp->m_inode_cluster_size bytes, whichever is bigger.  The inode
2886 	 * buffers in the log can be a different size if the log was generated
2887 	 * by an older kernel using unclustered inode buffers or a newer kernel
2888 	 * running with a different inode cluster size.  Regardless, if the
2889 	 * the inode buffer size isn't max(blocksize, mp->m_inode_cluster_size)
2890 	 * for *our* value of mp->m_inode_cluster_size, then we need to keep
2891 	 * the buffer out of the buffer cache so that the buffer won't
2892 	 * overlap with future reads of those inodes.
2893 	 */
2894 	if (XFS_DINODE_MAGIC ==
2895 	    be16_to_cpu(*((__be16 *)xfs_buf_offset(bp, 0))) &&
2896 	    (BBTOB(bp->b_io_length) != max(log->l_mp->m_sb.sb_blocksize,
2897 			(uint32_t)log->l_mp->m_inode_cluster_size))) {
2898 		xfs_buf_stale(bp);
2899 		error = xfs_bwrite(bp);
2900 	} else {
2901 		ASSERT(bp->b_target->bt_mount == mp);
2902 		bp->b_iodone = xlog_recover_iodone;
2903 		xfs_buf_delwri_queue(bp, buffer_list);
2904 	}
2905 
2906 out_release:
2907 	xfs_buf_relse(bp);
2908 	return error;
2909 }
2910 
2911 /*
2912  * Inode fork owner changes
2913  *
2914  * If we have been told that we have to reparent the inode fork, it's because an
2915  * extent swap operation on a CRC enabled filesystem has been done and we are
2916  * replaying it. We need to walk the BMBT of the appropriate fork and change the
2917  * owners of it.
2918  *
2919  * The complexity here is that we don't have an inode context to work with, so
2920  * after we've replayed the inode we need to instantiate one.  This is where the
2921  * fun begins.
2922  *
2923  * We are in the middle of log recovery, so we can't run transactions. That
2924  * means we cannot use cache coherent inode instantiation via xfs_iget(), as
2925  * that will result in the corresponding iput() running the inode through
2926  * xfs_inactive(). If we've just replayed an inode core that changes the link
2927  * count to zero (i.e. it's been unlinked), then xfs_inactive() will run
2928  * transactions (bad!).
2929  *
2930  * So, to avoid this, we instantiate an inode directly from the inode core we've
2931  * just recovered. We have the buffer still locked, and all we really need to
2932  * instantiate is the inode core and the forks being modified. We can do this
2933  * manually, then run the inode btree owner change, and then tear down the
2934  * xfs_inode without having to run any transactions at all.
2935  *
2936  * Also, because we don't have a transaction context available here but need to
2937  * gather all the buffers we modify for writeback so we pass the buffer_list
2938  * instead for the operation to use.
2939  */
2940 
2941 STATIC int
2942 xfs_recover_inode_owner_change(
2943 	struct xfs_mount	*mp,
2944 	struct xfs_dinode	*dip,
2945 	struct xfs_inode_log_format *in_f,
2946 	struct list_head	*buffer_list)
2947 {
2948 	struct xfs_inode	*ip;
2949 	int			error;
2950 
2951 	ASSERT(in_f->ilf_fields & (XFS_ILOG_DOWNER|XFS_ILOG_AOWNER));
2952 
2953 	ip = xfs_inode_alloc(mp, in_f->ilf_ino);
2954 	if (!ip)
2955 		return -ENOMEM;
2956 
2957 	/* instantiate the inode */
2958 	xfs_inode_from_disk(ip, dip);
2959 	ASSERT(ip->i_d.di_version >= 3);
2960 
2961 	error = xfs_iformat_fork(ip, dip);
2962 	if (error)
2963 		goto out_free_ip;
2964 
2965 	if (!xfs_inode_verify_forks(ip)) {
2966 		error = -EFSCORRUPTED;
2967 		goto out_free_ip;
2968 	}
2969 
2970 	if (in_f->ilf_fields & XFS_ILOG_DOWNER) {
2971 		ASSERT(in_f->ilf_fields & XFS_ILOG_DBROOT);
2972 		error = xfs_bmbt_change_owner(NULL, ip, XFS_DATA_FORK,
2973 					      ip->i_ino, buffer_list);
2974 		if (error)
2975 			goto out_free_ip;
2976 	}
2977 
2978 	if (in_f->ilf_fields & XFS_ILOG_AOWNER) {
2979 		ASSERT(in_f->ilf_fields & XFS_ILOG_ABROOT);
2980 		error = xfs_bmbt_change_owner(NULL, ip, XFS_ATTR_FORK,
2981 					      ip->i_ino, buffer_list);
2982 		if (error)
2983 			goto out_free_ip;
2984 	}
2985 
2986 out_free_ip:
2987 	xfs_inode_free(ip);
2988 	return error;
2989 }
2990 
2991 STATIC int
2992 xlog_recover_inode_pass2(
2993 	struct xlog			*log,
2994 	struct list_head		*buffer_list,
2995 	struct xlog_recover_item	*item,
2996 	xfs_lsn_t			current_lsn)
2997 {
2998 	struct xfs_inode_log_format	*in_f;
2999 	xfs_mount_t		*mp = log->l_mp;
3000 	xfs_buf_t		*bp;
3001 	xfs_dinode_t		*dip;
3002 	int			len;
3003 	char			*src;
3004 	char			*dest;
3005 	int			error;
3006 	int			attr_index;
3007 	uint			fields;
3008 	struct xfs_log_dinode	*ldip;
3009 	uint			isize;
3010 	int			need_free = 0;
3011 
3012 	if (item->ri_buf[0].i_len == sizeof(struct xfs_inode_log_format)) {
3013 		in_f = item->ri_buf[0].i_addr;
3014 	} else {
3015 		in_f = kmem_alloc(sizeof(struct xfs_inode_log_format), KM_SLEEP);
3016 		need_free = 1;
3017 		error = xfs_inode_item_format_convert(&item->ri_buf[0], in_f);
3018 		if (error)
3019 			goto error;
3020 	}
3021 
3022 	/*
3023 	 * Inode buffers can be freed, look out for it,
3024 	 * and do not replay the inode.
3025 	 */
3026 	if (xlog_check_buffer_cancelled(log, in_f->ilf_blkno,
3027 					in_f->ilf_len, 0)) {
3028 		error = 0;
3029 		trace_xfs_log_recover_inode_cancel(log, in_f);
3030 		goto error;
3031 	}
3032 	trace_xfs_log_recover_inode_recover(log, in_f);
3033 
3034 	bp = xfs_buf_read(mp->m_ddev_targp, in_f->ilf_blkno, in_f->ilf_len, 0,
3035 			  &xfs_inode_buf_ops);
3036 	if (!bp) {
3037 		error = -ENOMEM;
3038 		goto error;
3039 	}
3040 	error = bp->b_error;
3041 	if (error) {
3042 		xfs_buf_ioerror_alert(bp, "xlog_recover_do..(read#2)");
3043 		goto out_release;
3044 	}
3045 	ASSERT(in_f->ilf_fields & XFS_ILOG_CORE);
3046 	dip = xfs_buf_offset(bp, in_f->ilf_boffset);
3047 
3048 	/*
3049 	 * Make sure the place we're flushing out to really looks
3050 	 * like an inode!
3051 	 */
3052 	if (unlikely(!xfs_verify_magic16(bp, dip->di_magic))) {
3053 		xfs_alert(mp,
3054 	"%s: Bad inode magic number, dip = "PTR_FMT", dino bp = "PTR_FMT", ino = %Ld",
3055 			__func__, dip, bp, in_f->ilf_ino);
3056 		XFS_ERROR_REPORT("xlog_recover_inode_pass2(1)",
3057 				 XFS_ERRLEVEL_LOW, mp);
3058 		error = -EFSCORRUPTED;
3059 		goto out_release;
3060 	}
3061 	ldip = item->ri_buf[1].i_addr;
3062 	if (unlikely(ldip->di_magic != XFS_DINODE_MAGIC)) {
3063 		xfs_alert(mp,
3064 			"%s: Bad inode log record, rec ptr "PTR_FMT", ino %Ld",
3065 			__func__, item, in_f->ilf_ino);
3066 		XFS_ERROR_REPORT("xlog_recover_inode_pass2(2)",
3067 				 XFS_ERRLEVEL_LOW, mp);
3068 		error = -EFSCORRUPTED;
3069 		goto out_release;
3070 	}
3071 
3072 	/*
3073 	 * If the inode has an LSN in it, recover the inode only if it's less
3074 	 * than the lsn of the transaction we are replaying. Note: we still
3075 	 * need to replay an owner change even though the inode is more recent
3076 	 * than the transaction as there is no guarantee that all the btree
3077 	 * blocks are more recent than this transaction, too.
3078 	 */
3079 	if (dip->di_version >= 3) {
3080 		xfs_lsn_t	lsn = be64_to_cpu(dip->di_lsn);
3081 
3082 		if (lsn && lsn != -1 && XFS_LSN_CMP(lsn, current_lsn) >= 0) {
3083 			trace_xfs_log_recover_inode_skip(log, in_f);
3084 			error = 0;
3085 			goto out_owner_change;
3086 		}
3087 	}
3088 
3089 	/*
3090 	 * di_flushiter is only valid for v1/2 inodes. All changes for v3 inodes
3091 	 * are transactional and if ordering is necessary we can determine that
3092 	 * more accurately by the LSN field in the V3 inode core. Don't trust
3093 	 * the inode versions we might be changing them here - use the
3094 	 * superblock flag to determine whether we need to look at di_flushiter
3095 	 * to skip replay when the on disk inode is newer than the log one
3096 	 */
3097 	if (!xfs_sb_version_hascrc(&mp->m_sb) &&
3098 	    ldip->di_flushiter < be16_to_cpu(dip->di_flushiter)) {
3099 		/*
3100 		 * Deal with the wrap case, DI_MAX_FLUSH is less
3101 		 * than smaller numbers
3102 		 */
3103 		if (be16_to_cpu(dip->di_flushiter) == DI_MAX_FLUSH &&
3104 		    ldip->di_flushiter < (DI_MAX_FLUSH >> 1)) {
3105 			/* do nothing */
3106 		} else {
3107 			trace_xfs_log_recover_inode_skip(log, in_f);
3108 			error = 0;
3109 			goto out_release;
3110 		}
3111 	}
3112 
3113 	/* Take the opportunity to reset the flush iteration count */
3114 	ldip->di_flushiter = 0;
3115 
3116 	if (unlikely(S_ISREG(ldip->di_mode))) {
3117 		if ((ldip->di_format != XFS_DINODE_FMT_EXTENTS) &&
3118 		    (ldip->di_format != XFS_DINODE_FMT_BTREE)) {
3119 			XFS_CORRUPTION_ERROR("xlog_recover_inode_pass2(3)",
3120 					 XFS_ERRLEVEL_LOW, mp, ldip,
3121 					 sizeof(*ldip));
3122 			xfs_alert(mp,
3123 		"%s: Bad regular inode log record, rec ptr "PTR_FMT", "
3124 		"ino ptr = "PTR_FMT", ino bp = "PTR_FMT", ino %Ld",
3125 				__func__, item, dip, bp, in_f->ilf_ino);
3126 			error = -EFSCORRUPTED;
3127 			goto out_release;
3128 		}
3129 	} else if (unlikely(S_ISDIR(ldip->di_mode))) {
3130 		if ((ldip->di_format != XFS_DINODE_FMT_EXTENTS) &&
3131 		    (ldip->di_format != XFS_DINODE_FMT_BTREE) &&
3132 		    (ldip->di_format != XFS_DINODE_FMT_LOCAL)) {
3133 			XFS_CORRUPTION_ERROR("xlog_recover_inode_pass2(4)",
3134 					     XFS_ERRLEVEL_LOW, mp, ldip,
3135 					     sizeof(*ldip));
3136 			xfs_alert(mp,
3137 		"%s: Bad dir inode log record, rec ptr "PTR_FMT", "
3138 		"ino ptr = "PTR_FMT", ino bp = "PTR_FMT", ino %Ld",
3139 				__func__, item, dip, bp, in_f->ilf_ino);
3140 			error = -EFSCORRUPTED;
3141 			goto out_release;
3142 		}
3143 	}
3144 	if (unlikely(ldip->di_nextents + ldip->di_anextents > ldip->di_nblocks)){
3145 		XFS_CORRUPTION_ERROR("xlog_recover_inode_pass2(5)",
3146 				     XFS_ERRLEVEL_LOW, mp, ldip,
3147 				     sizeof(*ldip));
3148 		xfs_alert(mp,
3149 	"%s: Bad inode log record, rec ptr "PTR_FMT", dino ptr "PTR_FMT", "
3150 	"dino bp "PTR_FMT", ino %Ld, total extents = %d, nblocks = %Ld",
3151 			__func__, item, dip, bp, in_f->ilf_ino,
3152 			ldip->di_nextents + ldip->di_anextents,
3153 			ldip->di_nblocks);
3154 		error = -EFSCORRUPTED;
3155 		goto out_release;
3156 	}
3157 	if (unlikely(ldip->di_forkoff > mp->m_sb.sb_inodesize)) {
3158 		XFS_CORRUPTION_ERROR("xlog_recover_inode_pass2(6)",
3159 				     XFS_ERRLEVEL_LOW, mp, ldip,
3160 				     sizeof(*ldip));
3161 		xfs_alert(mp,
3162 	"%s: Bad inode log record, rec ptr "PTR_FMT", dino ptr "PTR_FMT", "
3163 	"dino bp "PTR_FMT", ino %Ld, forkoff 0x%x", __func__,
3164 			item, dip, bp, in_f->ilf_ino, ldip->di_forkoff);
3165 		error = -EFSCORRUPTED;
3166 		goto out_release;
3167 	}
3168 	isize = xfs_log_dinode_size(ldip->di_version);
3169 	if (unlikely(item->ri_buf[1].i_len > isize)) {
3170 		XFS_CORRUPTION_ERROR("xlog_recover_inode_pass2(7)",
3171 				     XFS_ERRLEVEL_LOW, mp, ldip,
3172 				     sizeof(*ldip));
3173 		xfs_alert(mp,
3174 			"%s: Bad inode log record length %d, rec ptr "PTR_FMT,
3175 			__func__, item->ri_buf[1].i_len, item);
3176 		error = -EFSCORRUPTED;
3177 		goto out_release;
3178 	}
3179 
3180 	/* recover the log dinode inode into the on disk inode */
3181 	xfs_log_dinode_to_disk(ldip, dip);
3182 
3183 	fields = in_f->ilf_fields;
3184 	if (fields & XFS_ILOG_DEV)
3185 		xfs_dinode_put_rdev(dip, in_f->ilf_u.ilfu_rdev);
3186 
3187 	if (in_f->ilf_size == 2)
3188 		goto out_owner_change;
3189 	len = item->ri_buf[2].i_len;
3190 	src = item->ri_buf[2].i_addr;
3191 	ASSERT(in_f->ilf_size <= 4);
3192 	ASSERT((in_f->ilf_size == 3) || (fields & XFS_ILOG_AFORK));
3193 	ASSERT(!(fields & XFS_ILOG_DFORK) ||
3194 	       (len == in_f->ilf_dsize));
3195 
3196 	switch (fields & XFS_ILOG_DFORK) {
3197 	case XFS_ILOG_DDATA:
3198 	case XFS_ILOG_DEXT:
3199 		memcpy(XFS_DFORK_DPTR(dip), src, len);
3200 		break;
3201 
3202 	case XFS_ILOG_DBROOT:
3203 		xfs_bmbt_to_bmdr(mp, (struct xfs_btree_block *)src, len,
3204 				 (xfs_bmdr_block_t *)XFS_DFORK_DPTR(dip),
3205 				 XFS_DFORK_DSIZE(dip, mp));
3206 		break;
3207 
3208 	default:
3209 		/*
3210 		 * There are no data fork flags set.
3211 		 */
3212 		ASSERT((fields & XFS_ILOG_DFORK) == 0);
3213 		break;
3214 	}
3215 
3216 	/*
3217 	 * If we logged any attribute data, recover it.  There may or
3218 	 * may not have been any other non-core data logged in this
3219 	 * transaction.
3220 	 */
3221 	if (in_f->ilf_fields & XFS_ILOG_AFORK) {
3222 		if (in_f->ilf_fields & XFS_ILOG_DFORK) {
3223 			attr_index = 3;
3224 		} else {
3225 			attr_index = 2;
3226 		}
3227 		len = item->ri_buf[attr_index].i_len;
3228 		src = item->ri_buf[attr_index].i_addr;
3229 		ASSERT(len == in_f->ilf_asize);
3230 
3231 		switch (in_f->ilf_fields & XFS_ILOG_AFORK) {
3232 		case XFS_ILOG_ADATA:
3233 		case XFS_ILOG_AEXT:
3234 			dest = XFS_DFORK_APTR(dip);
3235 			ASSERT(len <= XFS_DFORK_ASIZE(dip, mp));
3236 			memcpy(dest, src, len);
3237 			break;
3238 
3239 		case XFS_ILOG_ABROOT:
3240 			dest = XFS_DFORK_APTR(dip);
3241 			xfs_bmbt_to_bmdr(mp, (struct xfs_btree_block *)src,
3242 					 len, (xfs_bmdr_block_t*)dest,
3243 					 XFS_DFORK_ASIZE(dip, mp));
3244 			break;
3245 
3246 		default:
3247 			xfs_warn(log->l_mp, "%s: Invalid flag", __func__);
3248 			ASSERT(0);
3249 			error = -EIO;
3250 			goto out_release;
3251 		}
3252 	}
3253 
3254 out_owner_change:
3255 	/* Recover the swapext owner change unless inode has been deleted */
3256 	if ((in_f->ilf_fields & (XFS_ILOG_DOWNER|XFS_ILOG_AOWNER)) &&
3257 	    (dip->di_mode != 0))
3258 		error = xfs_recover_inode_owner_change(mp, dip, in_f,
3259 						       buffer_list);
3260 	/* re-generate the checksum. */
3261 	xfs_dinode_calc_crc(log->l_mp, dip);
3262 
3263 	ASSERT(bp->b_target->bt_mount == mp);
3264 	bp->b_iodone = xlog_recover_iodone;
3265 	xfs_buf_delwri_queue(bp, buffer_list);
3266 
3267 out_release:
3268 	xfs_buf_relse(bp);
3269 error:
3270 	if (need_free)
3271 		kmem_free(in_f);
3272 	return error;
3273 }
3274 
3275 /*
3276  * Recover QUOTAOFF records. We simply make a note of it in the xlog
3277  * structure, so that we know not to do any dquot item or dquot buffer recovery,
3278  * of that type.
3279  */
3280 STATIC int
3281 xlog_recover_quotaoff_pass1(
3282 	struct xlog			*log,
3283 	struct xlog_recover_item	*item)
3284 {
3285 	xfs_qoff_logformat_t	*qoff_f = item->ri_buf[0].i_addr;
3286 	ASSERT(qoff_f);
3287 
3288 	/*
3289 	 * The logitem format's flag tells us if this was user quotaoff,
3290 	 * group/project quotaoff or both.
3291 	 */
3292 	if (qoff_f->qf_flags & XFS_UQUOTA_ACCT)
3293 		log->l_quotaoffs_flag |= XFS_DQ_USER;
3294 	if (qoff_f->qf_flags & XFS_PQUOTA_ACCT)
3295 		log->l_quotaoffs_flag |= XFS_DQ_PROJ;
3296 	if (qoff_f->qf_flags & XFS_GQUOTA_ACCT)
3297 		log->l_quotaoffs_flag |= XFS_DQ_GROUP;
3298 
3299 	return 0;
3300 }
3301 
3302 /*
3303  * Recover a dquot record
3304  */
3305 STATIC int
3306 xlog_recover_dquot_pass2(
3307 	struct xlog			*log,
3308 	struct list_head		*buffer_list,
3309 	struct xlog_recover_item	*item,
3310 	xfs_lsn_t			current_lsn)
3311 {
3312 	xfs_mount_t		*mp = log->l_mp;
3313 	xfs_buf_t		*bp;
3314 	struct xfs_disk_dquot	*ddq, *recddq;
3315 	xfs_failaddr_t		fa;
3316 	int			error;
3317 	xfs_dq_logformat_t	*dq_f;
3318 	uint			type;
3319 
3320 
3321 	/*
3322 	 * Filesystems are required to send in quota flags at mount time.
3323 	 */
3324 	if (mp->m_qflags == 0)
3325 		return 0;
3326 
3327 	recddq = item->ri_buf[1].i_addr;
3328 	if (recddq == NULL) {
3329 		xfs_alert(log->l_mp, "NULL dquot in %s.", __func__);
3330 		return -EIO;
3331 	}
3332 	if (item->ri_buf[1].i_len < sizeof(xfs_disk_dquot_t)) {
3333 		xfs_alert(log->l_mp, "dquot too small (%d) in %s.",
3334 			item->ri_buf[1].i_len, __func__);
3335 		return -EIO;
3336 	}
3337 
3338 	/*
3339 	 * This type of quotas was turned off, so ignore this record.
3340 	 */
3341 	type = recddq->d_flags & (XFS_DQ_USER | XFS_DQ_PROJ | XFS_DQ_GROUP);
3342 	ASSERT(type);
3343 	if (log->l_quotaoffs_flag & type)
3344 		return 0;
3345 
3346 	/*
3347 	 * At this point we know that quota was _not_ turned off.
3348 	 * Since the mount flags are not indicating to us otherwise, this
3349 	 * must mean that quota is on, and the dquot needs to be replayed.
3350 	 * Remember that we may not have fully recovered the superblock yet,
3351 	 * so we can't do the usual trick of looking at the SB quota bits.
3352 	 *
3353 	 * The other possibility, of course, is that the quota subsystem was
3354 	 * removed since the last mount - ENOSYS.
3355 	 */
3356 	dq_f = item->ri_buf[0].i_addr;
3357 	ASSERT(dq_f);
3358 	fa = xfs_dquot_verify(mp, recddq, dq_f->qlf_id, 0);
3359 	if (fa) {
3360 		xfs_alert(mp, "corrupt dquot ID 0x%x in log at %pS",
3361 				dq_f->qlf_id, fa);
3362 		return -EIO;
3363 	}
3364 	ASSERT(dq_f->qlf_len == 1);
3365 
3366 	/*
3367 	 * At this point we are assuming that the dquots have been allocated
3368 	 * and hence the buffer has valid dquots stamped in it. It should,
3369 	 * therefore, pass verifier validation. If the dquot is bad, then the
3370 	 * we'll return an error here, so we don't need to specifically check
3371 	 * the dquot in the buffer after the verifier has run.
3372 	 */
3373 	error = xfs_trans_read_buf(mp, NULL, mp->m_ddev_targp, dq_f->qlf_blkno,
3374 				   XFS_FSB_TO_BB(mp, dq_f->qlf_len), 0, &bp,
3375 				   &xfs_dquot_buf_ops);
3376 	if (error)
3377 		return error;
3378 
3379 	ASSERT(bp);
3380 	ddq = xfs_buf_offset(bp, dq_f->qlf_boffset);
3381 
3382 	/*
3383 	 * If the dquot has an LSN in it, recover the dquot only if it's less
3384 	 * than the lsn of the transaction we are replaying.
3385 	 */
3386 	if (xfs_sb_version_hascrc(&mp->m_sb)) {
3387 		struct xfs_dqblk *dqb = (struct xfs_dqblk *)ddq;
3388 		xfs_lsn_t	lsn = be64_to_cpu(dqb->dd_lsn);
3389 
3390 		if (lsn && lsn != -1 && XFS_LSN_CMP(lsn, current_lsn) >= 0) {
3391 			goto out_release;
3392 		}
3393 	}
3394 
3395 	memcpy(ddq, recddq, item->ri_buf[1].i_len);
3396 	if (xfs_sb_version_hascrc(&mp->m_sb)) {
3397 		xfs_update_cksum((char *)ddq, sizeof(struct xfs_dqblk),
3398 				 XFS_DQUOT_CRC_OFF);
3399 	}
3400 
3401 	ASSERT(dq_f->qlf_size == 2);
3402 	ASSERT(bp->b_target->bt_mount == mp);
3403 	bp->b_iodone = xlog_recover_iodone;
3404 	xfs_buf_delwri_queue(bp, buffer_list);
3405 
3406 out_release:
3407 	xfs_buf_relse(bp);
3408 	return 0;
3409 }
3410 
3411 /*
3412  * This routine is called to create an in-core extent free intent
3413  * item from the efi format structure which was logged on disk.
3414  * It allocates an in-core efi, copies the extents from the format
3415  * structure into it, and adds the efi to the AIL with the given
3416  * LSN.
3417  */
3418 STATIC int
3419 xlog_recover_efi_pass2(
3420 	struct xlog			*log,
3421 	struct xlog_recover_item	*item,
3422 	xfs_lsn_t			lsn)
3423 {
3424 	int				error;
3425 	struct xfs_mount		*mp = log->l_mp;
3426 	struct xfs_efi_log_item		*efip;
3427 	struct xfs_efi_log_format	*efi_formatp;
3428 
3429 	efi_formatp = item->ri_buf[0].i_addr;
3430 
3431 	efip = xfs_efi_init(mp, efi_formatp->efi_nextents);
3432 	error = xfs_efi_copy_format(&item->ri_buf[0], &efip->efi_format);
3433 	if (error) {
3434 		xfs_efi_item_free(efip);
3435 		return error;
3436 	}
3437 	atomic_set(&efip->efi_next_extent, efi_formatp->efi_nextents);
3438 
3439 	spin_lock(&log->l_ailp->ail_lock);
3440 	/*
3441 	 * The EFI has two references. One for the EFD and one for EFI to ensure
3442 	 * it makes it into the AIL. Insert the EFI into the AIL directly and
3443 	 * drop the EFI reference. Note that xfs_trans_ail_update() drops the
3444 	 * AIL lock.
3445 	 */
3446 	xfs_trans_ail_update(log->l_ailp, &efip->efi_item, lsn);
3447 	xfs_efi_release(efip);
3448 	return 0;
3449 }
3450 
3451 
3452 /*
3453  * This routine is called when an EFD format structure is found in a committed
3454  * transaction in the log. Its purpose is to cancel the corresponding EFI if it
3455  * was still in the log. To do this it searches the AIL for the EFI with an id
3456  * equal to that in the EFD format structure. If we find it we drop the EFD
3457  * reference, which removes the EFI from the AIL and frees it.
3458  */
3459 STATIC int
3460 xlog_recover_efd_pass2(
3461 	struct xlog			*log,
3462 	struct xlog_recover_item	*item)
3463 {
3464 	xfs_efd_log_format_t	*efd_formatp;
3465 	xfs_efi_log_item_t	*efip = NULL;
3466 	xfs_log_item_t		*lip;
3467 	uint64_t		efi_id;
3468 	struct xfs_ail_cursor	cur;
3469 	struct xfs_ail		*ailp = log->l_ailp;
3470 
3471 	efd_formatp = item->ri_buf[0].i_addr;
3472 	ASSERT((item->ri_buf[0].i_len == (sizeof(xfs_efd_log_format_32_t) +
3473 		((efd_formatp->efd_nextents - 1) * sizeof(xfs_extent_32_t)))) ||
3474 	       (item->ri_buf[0].i_len == (sizeof(xfs_efd_log_format_64_t) +
3475 		((efd_formatp->efd_nextents - 1) * sizeof(xfs_extent_64_t)))));
3476 	efi_id = efd_formatp->efd_efi_id;
3477 
3478 	/*
3479 	 * Search for the EFI with the id in the EFD format structure in the
3480 	 * AIL.
3481 	 */
3482 	spin_lock(&ailp->ail_lock);
3483 	lip = xfs_trans_ail_cursor_first(ailp, &cur, 0);
3484 	while (lip != NULL) {
3485 		if (lip->li_type == XFS_LI_EFI) {
3486 			efip = (xfs_efi_log_item_t *)lip;
3487 			if (efip->efi_format.efi_id == efi_id) {
3488 				/*
3489 				 * Drop the EFD reference to the EFI. This
3490 				 * removes the EFI from the AIL and frees it.
3491 				 */
3492 				spin_unlock(&ailp->ail_lock);
3493 				xfs_efi_release(efip);
3494 				spin_lock(&ailp->ail_lock);
3495 				break;
3496 			}
3497 		}
3498 		lip = xfs_trans_ail_cursor_next(ailp, &cur);
3499 	}
3500 
3501 	xfs_trans_ail_cursor_done(&cur);
3502 	spin_unlock(&ailp->ail_lock);
3503 
3504 	return 0;
3505 }
3506 
3507 /*
3508  * This routine is called to create an in-core extent rmap update
3509  * item from the rui format structure which was logged on disk.
3510  * It allocates an in-core rui, copies the extents from the format
3511  * structure into it, and adds the rui to the AIL with the given
3512  * LSN.
3513  */
3514 STATIC int
3515 xlog_recover_rui_pass2(
3516 	struct xlog			*log,
3517 	struct xlog_recover_item	*item,
3518 	xfs_lsn_t			lsn)
3519 {
3520 	int				error;
3521 	struct xfs_mount		*mp = log->l_mp;
3522 	struct xfs_rui_log_item		*ruip;
3523 	struct xfs_rui_log_format	*rui_formatp;
3524 
3525 	rui_formatp = item->ri_buf[0].i_addr;
3526 
3527 	ruip = xfs_rui_init(mp, rui_formatp->rui_nextents);
3528 	error = xfs_rui_copy_format(&item->ri_buf[0], &ruip->rui_format);
3529 	if (error) {
3530 		xfs_rui_item_free(ruip);
3531 		return error;
3532 	}
3533 	atomic_set(&ruip->rui_next_extent, rui_formatp->rui_nextents);
3534 
3535 	spin_lock(&log->l_ailp->ail_lock);
3536 	/*
3537 	 * The RUI has two references. One for the RUD and one for RUI to ensure
3538 	 * it makes it into the AIL. Insert the RUI into the AIL directly and
3539 	 * drop the RUI reference. Note that xfs_trans_ail_update() drops the
3540 	 * AIL lock.
3541 	 */
3542 	xfs_trans_ail_update(log->l_ailp, &ruip->rui_item, lsn);
3543 	xfs_rui_release(ruip);
3544 	return 0;
3545 }
3546 
3547 
3548 /*
3549  * This routine is called when an RUD format structure is found in a committed
3550  * transaction in the log. Its purpose is to cancel the corresponding RUI if it
3551  * was still in the log. To do this it searches the AIL for the RUI with an id
3552  * equal to that in the RUD format structure. If we find it we drop the RUD
3553  * reference, which removes the RUI from the AIL and frees it.
3554  */
3555 STATIC int
3556 xlog_recover_rud_pass2(
3557 	struct xlog			*log,
3558 	struct xlog_recover_item	*item)
3559 {
3560 	struct xfs_rud_log_format	*rud_formatp;
3561 	struct xfs_rui_log_item		*ruip = NULL;
3562 	struct xfs_log_item		*lip;
3563 	uint64_t			rui_id;
3564 	struct xfs_ail_cursor		cur;
3565 	struct xfs_ail			*ailp = log->l_ailp;
3566 
3567 	rud_formatp = item->ri_buf[0].i_addr;
3568 	ASSERT(item->ri_buf[0].i_len == sizeof(struct xfs_rud_log_format));
3569 	rui_id = rud_formatp->rud_rui_id;
3570 
3571 	/*
3572 	 * Search for the RUI with the id in the RUD format structure in the
3573 	 * AIL.
3574 	 */
3575 	spin_lock(&ailp->ail_lock);
3576 	lip = xfs_trans_ail_cursor_first(ailp, &cur, 0);
3577 	while (lip != NULL) {
3578 		if (lip->li_type == XFS_LI_RUI) {
3579 			ruip = (struct xfs_rui_log_item *)lip;
3580 			if (ruip->rui_format.rui_id == rui_id) {
3581 				/*
3582 				 * Drop the RUD reference to the RUI. This
3583 				 * removes the RUI from the AIL and frees it.
3584 				 */
3585 				spin_unlock(&ailp->ail_lock);
3586 				xfs_rui_release(ruip);
3587 				spin_lock(&ailp->ail_lock);
3588 				break;
3589 			}
3590 		}
3591 		lip = xfs_trans_ail_cursor_next(ailp, &cur);
3592 	}
3593 
3594 	xfs_trans_ail_cursor_done(&cur);
3595 	spin_unlock(&ailp->ail_lock);
3596 
3597 	return 0;
3598 }
3599 
3600 /*
3601  * Copy an CUI format buffer from the given buf, and into the destination
3602  * CUI format structure.  The CUI/CUD items were designed not to need any
3603  * special alignment handling.
3604  */
3605 static int
3606 xfs_cui_copy_format(
3607 	struct xfs_log_iovec		*buf,
3608 	struct xfs_cui_log_format	*dst_cui_fmt)
3609 {
3610 	struct xfs_cui_log_format	*src_cui_fmt;
3611 	uint				len;
3612 
3613 	src_cui_fmt = buf->i_addr;
3614 	len = xfs_cui_log_format_sizeof(src_cui_fmt->cui_nextents);
3615 
3616 	if (buf->i_len == len) {
3617 		memcpy(dst_cui_fmt, src_cui_fmt, len);
3618 		return 0;
3619 	}
3620 	return -EFSCORRUPTED;
3621 }
3622 
3623 /*
3624  * This routine is called to create an in-core extent refcount update
3625  * item from the cui format structure which was logged on disk.
3626  * It allocates an in-core cui, copies the extents from the format
3627  * structure into it, and adds the cui to the AIL with the given
3628  * LSN.
3629  */
3630 STATIC int
3631 xlog_recover_cui_pass2(
3632 	struct xlog			*log,
3633 	struct xlog_recover_item	*item,
3634 	xfs_lsn_t			lsn)
3635 {
3636 	int				error;
3637 	struct xfs_mount		*mp = log->l_mp;
3638 	struct xfs_cui_log_item		*cuip;
3639 	struct xfs_cui_log_format	*cui_formatp;
3640 
3641 	cui_formatp = item->ri_buf[0].i_addr;
3642 
3643 	cuip = xfs_cui_init(mp, cui_formatp->cui_nextents);
3644 	error = xfs_cui_copy_format(&item->ri_buf[0], &cuip->cui_format);
3645 	if (error) {
3646 		xfs_cui_item_free(cuip);
3647 		return error;
3648 	}
3649 	atomic_set(&cuip->cui_next_extent, cui_formatp->cui_nextents);
3650 
3651 	spin_lock(&log->l_ailp->ail_lock);
3652 	/*
3653 	 * The CUI has two references. One for the CUD and one for CUI to ensure
3654 	 * it makes it into the AIL. Insert the CUI into the AIL directly and
3655 	 * drop the CUI reference. Note that xfs_trans_ail_update() drops the
3656 	 * AIL lock.
3657 	 */
3658 	xfs_trans_ail_update(log->l_ailp, &cuip->cui_item, lsn);
3659 	xfs_cui_release(cuip);
3660 	return 0;
3661 }
3662 
3663 
3664 /*
3665  * This routine is called when an CUD format structure is found in a committed
3666  * transaction in the log. Its purpose is to cancel the corresponding CUI if it
3667  * was still in the log. To do this it searches the AIL for the CUI with an id
3668  * equal to that in the CUD format structure. If we find it we drop the CUD
3669  * reference, which removes the CUI from the AIL and frees it.
3670  */
3671 STATIC int
3672 xlog_recover_cud_pass2(
3673 	struct xlog			*log,
3674 	struct xlog_recover_item	*item)
3675 {
3676 	struct xfs_cud_log_format	*cud_formatp;
3677 	struct xfs_cui_log_item		*cuip = NULL;
3678 	struct xfs_log_item		*lip;
3679 	uint64_t			cui_id;
3680 	struct xfs_ail_cursor		cur;
3681 	struct xfs_ail			*ailp = log->l_ailp;
3682 
3683 	cud_formatp = item->ri_buf[0].i_addr;
3684 	if (item->ri_buf[0].i_len != sizeof(struct xfs_cud_log_format))
3685 		return -EFSCORRUPTED;
3686 	cui_id = cud_formatp->cud_cui_id;
3687 
3688 	/*
3689 	 * Search for the CUI with the id in the CUD format structure in the
3690 	 * AIL.
3691 	 */
3692 	spin_lock(&ailp->ail_lock);
3693 	lip = xfs_trans_ail_cursor_first(ailp, &cur, 0);
3694 	while (lip != NULL) {
3695 		if (lip->li_type == XFS_LI_CUI) {
3696 			cuip = (struct xfs_cui_log_item *)lip;
3697 			if (cuip->cui_format.cui_id == cui_id) {
3698 				/*
3699 				 * Drop the CUD reference to the CUI. This
3700 				 * removes the CUI from the AIL and frees it.
3701 				 */
3702 				spin_unlock(&ailp->ail_lock);
3703 				xfs_cui_release(cuip);
3704 				spin_lock(&ailp->ail_lock);
3705 				break;
3706 			}
3707 		}
3708 		lip = xfs_trans_ail_cursor_next(ailp, &cur);
3709 	}
3710 
3711 	xfs_trans_ail_cursor_done(&cur);
3712 	spin_unlock(&ailp->ail_lock);
3713 
3714 	return 0;
3715 }
3716 
3717 /*
3718  * Copy an BUI format buffer from the given buf, and into the destination
3719  * BUI format structure.  The BUI/BUD items were designed not to need any
3720  * special alignment handling.
3721  */
3722 static int
3723 xfs_bui_copy_format(
3724 	struct xfs_log_iovec		*buf,
3725 	struct xfs_bui_log_format	*dst_bui_fmt)
3726 {
3727 	struct xfs_bui_log_format	*src_bui_fmt;
3728 	uint				len;
3729 
3730 	src_bui_fmt = buf->i_addr;
3731 	len = xfs_bui_log_format_sizeof(src_bui_fmt->bui_nextents);
3732 
3733 	if (buf->i_len == len) {
3734 		memcpy(dst_bui_fmt, src_bui_fmt, len);
3735 		return 0;
3736 	}
3737 	return -EFSCORRUPTED;
3738 }
3739 
3740 /*
3741  * This routine is called to create an in-core extent bmap update
3742  * item from the bui format structure which was logged on disk.
3743  * It allocates an in-core bui, copies the extents from the format
3744  * structure into it, and adds the bui to the AIL with the given
3745  * LSN.
3746  */
3747 STATIC int
3748 xlog_recover_bui_pass2(
3749 	struct xlog			*log,
3750 	struct xlog_recover_item	*item,
3751 	xfs_lsn_t			lsn)
3752 {
3753 	int				error;
3754 	struct xfs_mount		*mp = log->l_mp;
3755 	struct xfs_bui_log_item		*buip;
3756 	struct xfs_bui_log_format	*bui_formatp;
3757 
3758 	bui_formatp = item->ri_buf[0].i_addr;
3759 
3760 	if (bui_formatp->bui_nextents != XFS_BUI_MAX_FAST_EXTENTS)
3761 		return -EFSCORRUPTED;
3762 	buip = xfs_bui_init(mp);
3763 	error = xfs_bui_copy_format(&item->ri_buf[0], &buip->bui_format);
3764 	if (error) {
3765 		xfs_bui_item_free(buip);
3766 		return error;
3767 	}
3768 	atomic_set(&buip->bui_next_extent, bui_formatp->bui_nextents);
3769 
3770 	spin_lock(&log->l_ailp->ail_lock);
3771 	/*
3772 	 * The RUI has two references. One for the RUD and one for RUI to ensure
3773 	 * it makes it into the AIL. Insert the RUI into the AIL directly and
3774 	 * drop the RUI reference. Note that xfs_trans_ail_update() drops the
3775 	 * AIL lock.
3776 	 */
3777 	xfs_trans_ail_update(log->l_ailp, &buip->bui_item, lsn);
3778 	xfs_bui_release(buip);
3779 	return 0;
3780 }
3781 
3782 
3783 /*
3784  * This routine is called when an BUD format structure is found in a committed
3785  * transaction in the log. Its purpose is to cancel the corresponding BUI if it
3786  * was still in the log. To do this it searches the AIL for the BUI with an id
3787  * equal to that in the BUD format structure. If we find it we drop the BUD
3788  * reference, which removes the BUI from the AIL and frees it.
3789  */
3790 STATIC int
3791 xlog_recover_bud_pass2(
3792 	struct xlog			*log,
3793 	struct xlog_recover_item	*item)
3794 {
3795 	struct xfs_bud_log_format	*bud_formatp;
3796 	struct xfs_bui_log_item		*buip = NULL;
3797 	struct xfs_log_item		*lip;
3798 	uint64_t			bui_id;
3799 	struct xfs_ail_cursor		cur;
3800 	struct xfs_ail			*ailp = log->l_ailp;
3801 
3802 	bud_formatp = item->ri_buf[0].i_addr;
3803 	if (item->ri_buf[0].i_len != sizeof(struct xfs_bud_log_format))
3804 		return -EFSCORRUPTED;
3805 	bui_id = bud_formatp->bud_bui_id;
3806 
3807 	/*
3808 	 * Search for the BUI with the id in the BUD format structure in the
3809 	 * AIL.
3810 	 */
3811 	spin_lock(&ailp->ail_lock);
3812 	lip = xfs_trans_ail_cursor_first(ailp, &cur, 0);
3813 	while (lip != NULL) {
3814 		if (lip->li_type == XFS_LI_BUI) {
3815 			buip = (struct xfs_bui_log_item *)lip;
3816 			if (buip->bui_format.bui_id == bui_id) {
3817 				/*
3818 				 * Drop the BUD reference to the BUI. This
3819 				 * removes the BUI from the AIL and frees it.
3820 				 */
3821 				spin_unlock(&ailp->ail_lock);
3822 				xfs_bui_release(buip);
3823 				spin_lock(&ailp->ail_lock);
3824 				break;
3825 			}
3826 		}
3827 		lip = xfs_trans_ail_cursor_next(ailp, &cur);
3828 	}
3829 
3830 	xfs_trans_ail_cursor_done(&cur);
3831 	spin_unlock(&ailp->ail_lock);
3832 
3833 	return 0;
3834 }
3835 
3836 /*
3837  * This routine is called when an inode create format structure is found in a
3838  * committed transaction in the log.  It's purpose is to initialise the inodes
3839  * being allocated on disk. This requires us to get inode cluster buffers that
3840  * match the range to be initialised, stamped with inode templates and written
3841  * by delayed write so that subsequent modifications will hit the cached buffer
3842  * and only need writing out at the end of recovery.
3843  */
3844 STATIC int
3845 xlog_recover_do_icreate_pass2(
3846 	struct xlog		*log,
3847 	struct list_head	*buffer_list,
3848 	xlog_recover_item_t	*item)
3849 {
3850 	struct xfs_mount	*mp = log->l_mp;
3851 	struct xfs_icreate_log	*icl;
3852 	xfs_agnumber_t		agno;
3853 	xfs_agblock_t		agbno;
3854 	unsigned int		count;
3855 	unsigned int		isize;
3856 	xfs_agblock_t		length;
3857 	int			bb_per_cluster;
3858 	int			cancel_count;
3859 	int			nbufs;
3860 	int			i;
3861 
3862 	icl = (struct xfs_icreate_log *)item->ri_buf[0].i_addr;
3863 	if (icl->icl_type != XFS_LI_ICREATE) {
3864 		xfs_warn(log->l_mp, "xlog_recover_do_icreate_trans: bad type");
3865 		return -EINVAL;
3866 	}
3867 
3868 	if (icl->icl_size != 1) {
3869 		xfs_warn(log->l_mp, "xlog_recover_do_icreate_trans: bad icl size");
3870 		return -EINVAL;
3871 	}
3872 
3873 	agno = be32_to_cpu(icl->icl_ag);
3874 	if (agno >= mp->m_sb.sb_agcount) {
3875 		xfs_warn(log->l_mp, "xlog_recover_do_icreate_trans: bad agno");
3876 		return -EINVAL;
3877 	}
3878 	agbno = be32_to_cpu(icl->icl_agbno);
3879 	if (!agbno || agbno == NULLAGBLOCK || agbno >= mp->m_sb.sb_agblocks) {
3880 		xfs_warn(log->l_mp, "xlog_recover_do_icreate_trans: bad agbno");
3881 		return -EINVAL;
3882 	}
3883 	isize = be32_to_cpu(icl->icl_isize);
3884 	if (isize != mp->m_sb.sb_inodesize) {
3885 		xfs_warn(log->l_mp, "xlog_recover_do_icreate_trans: bad isize");
3886 		return -EINVAL;
3887 	}
3888 	count = be32_to_cpu(icl->icl_count);
3889 	if (!count) {
3890 		xfs_warn(log->l_mp, "xlog_recover_do_icreate_trans: bad count");
3891 		return -EINVAL;
3892 	}
3893 	length = be32_to_cpu(icl->icl_length);
3894 	if (!length || length >= mp->m_sb.sb_agblocks) {
3895 		xfs_warn(log->l_mp, "xlog_recover_do_icreate_trans: bad length");
3896 		return -EINVAL;
3897 	}
3898 
3899 	/*
3900 	 * The inode chunk is either full or sparse and we only support
3901 	 * m_ialloc_min_blks sized sparse allocations at this time.
3902 	 */
3903 	if (length != mp->m_ialloc_blks &&
3904 	    length != mp->m_ialloc_min_blks) {
3905 		xfs_warn(log->l_mp,
3906 			 "%s: unsupported chunk length", __FUNCTION__);
3907 		return -EINVAL;
3908 	}
3909 
3910 	/* verify inode count is consistent with extent length */
3911 	if ((count >> mp->m_sb.sb_inopblog) != length) {
3912 		xfs_warn(log->l_mp,
3913 			 "%s: inconsistent inode count and chunk length",
3914 			 __FUNCTION__);
3915 		return -EINVAL;
3916 	}
3917 
3918 	/*
3919 	 * The icreate transaction can cover multiple cluster buffers and these
3920 	 * buffers could have been freed and reused. Check the individual
3921 	 * buffers for cancellation so we don't overwrite anything written after
3922 	 * a cancellation.
3923 	 */
3924 	bb_per_cluster = XFS_FSB_TO_BB(mp, mp->m_blocks_per_cluster);
3925 	nbufs = length / mp->m_blocks_per_cluster;
3926 	for (i = 0, cancel_count = 0; i < nbufs; i++) {
3927 		xfs_daddr_t	daddr;
3928 
3929 		daddr = XFS_AGB_TO_DADDR(mp, agno,
3930 					 agbno + i * mp->m_blocks_per_cluster);
3931 		if (xlog_check_buffer_cancelled(log, daddr, bb_per_cluster, 0))
3932 			cancel_count++;
3933 	}
3934 
3935 	/*
3936 	 * We currently only use icreate for a single allocation at a time. This
3937 	 * means we should expect either all or none of the buffers to be
3938 	 * cancelled. Be conservative and skip replay if at least one buffer is
3939 	 * cancelled, but warn the user that something is awry if the buffers
3940 	 * are not consistent.
3941 	 *
3942 	 * XXX: This must be refined to only skip cancelled clusters once we use
3943 	 * icreate for multiple chunk allocations.
3944 	 */
3945 	ASSERT(!cancel_count || cancel_count == nbufs);
3946 	if (cancel_count) {
3947 		if (cancel_count != nbufs)
3948 			xfs_warn(mp,
3949 	"WARNING: partial inode chunk cancellation, skipped icreate.");
3950 		trace_xfs_log_recover_icreate_cancel(log, icl);
3951 		return 0;
3952 	}
3953 
3954 	trace_xfs_log_recover_icreate_recover(log, icl);
3955 	return xfs_ialloc_inode_init(mp, NULL, buffer_list, count, agno, agbno,
3956 				     length, be32_to_cpu(icl->icl_gen));
3957 }
3958 
3959 STATIC void
3960 xlog_recover_buffer_ra_pass2(
3961 	struct xlog                     *log,
3962 	struct xlog_recover_item        *item)
3963 {
3964 	struct xfs_buf_log_format	*buf_f = item->ri_buf[0].i_addr;
3965 	struct xfs_mount		*mp = log->l_mp;
3966 
3967 	if (xlog_peek_buffer_cancelled(log, buf_f->blf_blkno,
3968 			buf_f->blf_len, buf_f->blf_flags)) {
3969 		return;
3970 	}
3971 
3972 	xfs_buf_readahead(mp->m_ddev_targp, buf_f->blf_blkno,
3973 				buf_f->blf_len, NULL);
3974 }
3975 
3976 STATIC void
3977 xlog_recover_inode_ra_pass2(
3978 	struct xlog                     *log,
3979 	struct xlog_recover_item        *item)
3980 {
3981 	struct xfs_inode_log_format	ilf_buf;
3982 	struct xfs_inode_log_format	*ilfp;
3983 	struct xfs_mount		*mp = log->l_mp;
3984 	int			error;
3985 
3986 	if (item->ri_buf[0].i_len == sizeof(struct xfs_inode_log_format)) {
3987 		ilfp = item->ri_buf[0].i_addr;
3988 	} else {
3989 		ilfp = &ilf_buf;
3990 		memset(ilfp, 0, sizeof(*ilfp));
3991 		error = xfs_inode_item_format_convert(&item->ri_buf[0], ilfp);
3992 		if (error)
3993 			return;
3994 	}
3995 
3996 	if (xlog_peek_buffer_cancelled(log, ilfp->ilf_blkno, ilfp->ilf_len, 0))
3997 		return;
3998 
3999 	xfs_buf_readahead(mp->m_ddev_targp, ilfp->ilf_blkno,
4000 				ilfp->ilf_len, &xfs_inode_buf_ra_ops);
4001 }
4002 
4003 STATIC void
4004 xlog_recover_dquot_ra_pass2(
4005 	struct xlog			*log,
4006 	struct xlog_recover_item	*item)
4007 {
4008 	struct xfs_mount	*mp = log->l_mp;
4009 	struct xfs_disk_dquot	*recddq;
4010 	struct xfs_dq_logformat	*dq_f;
4011 	uint			type;
4012 	int			len;
4013 
4014 
4015 	if (mp->m_qflags == 0)
4016 		return;
4017 
4018 	recddq = item->ri_buf[1].i_addr;
4019 	if (recddq == NULL)
4020 		return;
4021 	if (item->ri_buf[1].i_len < sizeof(struct xfs_disk_dquot))
4022 		return;
4023 
4024 	type = recddq->d_flags & (XFS_DQ_USER | XFS_DQ_PROJ | XFS_DQ_GROUP);
4025 	ASSERT(type);
4026 	if (log->l_quotaoffs_flag & type)
4027 		return;
4028 
4029 	dq_f = item->ri_buf[0].i_addr;
4030 	ASSERT(dq_f);
4031 	ASSERT(dq_f->qlf_len == 1);
4032 
4033 	len = XFS_FSB_TO_BB(mp, dq_f->qlf_len);
4034 	if (xlog_peek_buffer_cancelled(log, dq_f->qlf_blkno, len, 0))
4035 		return;
4036 
4037 	xfs_buf_readahead(mp->m_ddev_targp, dq_f->qlf_blkno, len,
4038 			  &xfs_dquot_buf_ra_ops);
4039 }
4040 
4041 STATIC void
4042 xlog_recover_ra_pass2(
4043 	struct xlog			*log,
4044 	struct xlog_recover_item	*item)
4045 {
4046 	switch (ITEM_TYPE(item)) {
4047 	case XFS_LI_BUF:
4048 		xlog_recover_buffer_ra_pass2(log, item);
4049 		break;
4050 	case XFS_LI_INODE:
4051 		xlog_recover_inode_ra_pass2(log, item);
4052 		break;
4053 	case XFS_LI_DQUOT:
4054 		xlog_recover_dquot_ra_pass2(log, item);
4055 		break;
4056 	case XFS_LI_EFI:
4057 	case XFS_LI_EFD:
4058 	case XFS_LI_QUOTAOFF:
4059 	case XFS_LI_RUI:
4060 	case XFS_LI_RUD:
4061 	case XFS_LI_CUI:
4062 	case XFS_LI_CUD:
4063 	case XFS_LI_BUI:
4064 	case XFS_LI_BUD:
4065 	default:
4066 		break;
4067 	}
4068 }
4069 
4070 STATIC int
4071 xlog_recover_commit_pass1(
4072 	struct xlog			*log,
4073 	struct xlog_recover		*trans,
4074 	struct xlog_recover_item	*item)
4075 {
4076 	trace_xfs_log_recover_item_recover(log, trans, item, XLOG_RECOVER_PASS1);
4077 
4078 	switch (ITEM_TYPE(item)) {
4079 	case XFS_LI_BUF:
4080 		return xlog_recover_buffer_pass1(log, item);
4081 	case XFS_LI_QUOTAOFF:
4082 		return xlog_recover_quotaoff_pass1(log, item);
4083 	case XFS_LI_INODE:
4084 	case XFS_LI_EFI:
4085 	case XFS_LI_EFD:
4086 	case XFS_LI_DQUOT:
4087 	case XFS_LI_ICREATE:
4088 	case XFS_LI_RUI:
4089 	case XFS_LI_RUD:
4090 	case XFS_LI_CUI:
4091 	case XFS_LI_CUD:
4092 	case XFS_LI_BUI:
4093 	case XFS_LI_BUD:
4094 		/* nothing to do in pass 1 */
4095 		return 0;
4096 	default:
4097 		xfs_warn(log->l_mp, "%s: invalid item type (%d)",
4098 			__func__, ITEM_TYPE(item));
4099 		ASSERT(0);
4100 		return -EIO;
4101 	}
4102 }
4103 
4104 STATIC int
4105 xlog_recover_commit_pass2(
4106 	struct xlog			*log,
4107 	struct xlog_recover		*trans,
4108 	struct list_head		*buffer_list,
4109 	struct xlog_recover_item	*item)
4110 {
4111 	trace_xfs_log_recover_item_recover(log, trans, item, XLOG_RECOVER_PASS2);
4112 
4113 	switch (ITEM_TYPE(item)) {
4114 	case XFS_LI_BUF:
4115 		return xlog_recover_buffer_pass2(log, buffer_list, item,
4116 						 trans->r_lsn);
4117 	case XFS_LI_INODE:
4118 		return xlog_recover_inode_pass2(log, buffer_list, item,
4119 						 trans->r_lsn);
4120 	case XFS_LI_EFI:
4121 		return xlog_recover_efi_pass2(log, item, trans->r_lsn);
4122 	case XFS_LI_EFD:
4123 		return xlog_recover_efd_pass2(log, item);
4124 	case XFS_LI_RUI:
4125 		return xlog_recover_rui_pass2(log, item, trans->r_lsn);
4126 	case XFS_LI_RUD:
4127 		return xlog_recover_rud_pass2(log, item);
4128 	case XFS_LI_CUI:
4129 		return xlog_recover_cui_pass2(log, item, trans->r_lsn);
4130 	case XFS_LI_CUD:
4131 		return xlog_recover_cud_pass2(log, item);
4132 	case XFS_LI_BUI:
4133 		return xlog_recover_bui_pass2(log, item, trans->r_lsn);
4134 	case XFS_LI_BUD:
4135 		return xlog_recover_bud_pass2(log, item);
4136 	case XFS_LI_DQUOT:
4137 		return xlog_recover_dquot_pass2(log, buffer_list, item,
4138 						trans->r_lsn);
4139 	case XFS_LI_ICREATE:
4140 		return xlog_recover_do_icreate_pass2(log, buffer_list, item);
4141 	case XFS_LI_QUOTAOFF:
4142 		/* nothing to do in pass2 */
4143 		return 0;
4144 	default:
4145 		xfs_warn(log->l_mp, "%s: invalid item type (%d)",
4146 			__func__, ITEM_TYPE(item));
4147 		ASSERT(0);
4148 		return -EIO;
4149 	}
4150 }
4151 
4152 STATIC int
4153 xlog_recover_items_pass2(
4154 	struct xlog                     *log,
4155 	struct xlog_recover             *trans,
4156 	struct list_head                *buffer_list,
4157 	struct list_head                *item_list)
4158 {
4159 	struct xlog_recover_item	*item;
4160 	int				error = 0;
4161 
4162 	list_for_each_entry(item, item_list, ri_list) {
4163 		error = xlog_recover_commit_pass2(log, trans,
4164 					  buffer_list, item);
4165 		if (error)
4166 			return error;
4167 	}
4168 
4169 	return error;
4170 }
4171 
4172 /*
4173  * Perform the transaction.
4174  *
4175  * If the transaction modifies a buffer or inode, do it now.  Otherwise,
4176  * EFIs and EFDs get queued up by adding entries into the AIL for them.
4177  */
4178 STATIC int
4179 xlog_recover_commit_trans(
4180 	struct xlog		*log,
4181 	struct xlog_recover	*trans,
4182 	int			pass,
4183 	struct list_head	*buffer_list)
4184 {
4185 	int				error = 0;
4186 	int				items_queued = 0;
4187 	struct xlog_recover_item	*item;
4188 	struct xlog_recover_item	*next;
4189 	LIST_HEAD			(ra_list);
4190 	LIST_HEAD			(done_list);
4191 
4192 	#define XLOG_RECOVER_COMMIT_QUEUE_MAX 100
4193 
4194 	hlist_del_init(&trans->r_list);
4195 
4196 	error = xlog_recover_reorder_trans(log, trans, pass);
4197 	if (error)
4198 		return error;
4199 
4200 	list_for_each_entry_safe(item, next, &trans->r_itemq, ri_list) {
4201 		switch (pass) {
4202 		case XLOG_RECOVER_PASS1:
4203 			error = xlog_recover_commit_pass1(log, trans, item);
4204 			break;
4205 		case XLOG_RECOVER_PASS2:
4206 			xlog_recover_ra_pass2(log, item);
4207 			list_move_tail(&item->ri_list, &ra_list);
4208 			items_queued++;
4209 			if (items_queued >= XLOG_RECOVER_COMMIT_QUEUE_MAX) {
4210 				error = xlog_recover_items_pass2(log, trans,
4211 						buffer_list, &ra_list);
4212 				list_splice_tail_init(&ra_list, &done_list);
4213 				items_queued = 0;
4214 			}
4215 
4216 			break;
4217 		default:
4218 			ASSERT(0);
4219 		}
4220 
4221 		if (error)
4222 			goto out;
4223 	}
4224 
4225 out:
4226 	if (!list_empty(&ra_list)) {
4227 		if (!error)
4228 			error = xlog_recover_items_pass2(log, trans,
4229 					buffer_list, &ra_list);
4230 		list_splice_tail_init(&ra_list, &done_list);
4231 	}
4232 
4233 	if (!list_empty(&done_list))
4234 		list_splice_init(&done_list, &trans->r_itemq);
4235 
4236 	return error;
4237 }
4238 
4239 STATIC void
4240 xlog_recover_add_item(
4241 	struct list_head	*head)
4242 {
4243 	xlog_recover_item_t	*item;
4244 
4245 	item = kmem_zalloc(sizeof(xlog_recover_item_t), KM_SLEEP);
4246 	INIT_LIST_HEAD(&item->ri_list);
4247 	list_add_tail(&item->ri_list, head);
4248 }
4249 
4250 STATIC int
4251 xlog_recover_add_to_cont_trans(
4252 	struct xlog		*log,
4253 	struct xlog_recover	*trans,
4254 	char			*dp,
4255 	int			len)
4256 {
4257 	xlog_recover_item_t	*item;
4258 	char			*ptr, *old_ptr;
4259 	int			old_len;
4260 
4261 	/*
4262 	 * If the transaction is empty, the header was split across this and the
4263 	 * previous record. Copy the rest of the header.
4264 	 */
4265 	if (list_empty(&trans->r_itemq)) {
4266 		ASSERT(len <= sizeof(struct xfs_trans_header));
4267 		if (len > sizeof(struct xfs_trans_header)) {
4268 			xfs_warn(log->l_mp, "%s: bad header length", __func__);
4269 			return -EIO;
4270 		}
4271 
4272 		xlog_recover_add_item(&trans->r_itemq);
4273 		ptr = (char *)&trans->r_theader +
4274 				sizeof(struct xfs_trans_header) - len;
4275 		memcpy(ptr, dp, len);
4276 		return 0;
4277 	}
4278 
4279 	/* take the tail entry */
4280 	item = list_entry(trans->r_itemq.prev, xlog_recover_item_t, ri_list);
4281 
4282 	old_ptr = item->ri_buf[item->ri_cnt-1].i_addr;
4283 	old_len = item->ri_buf[item->ri_cnt-1].i_len;
4284 
4285 	ptr = kmem_realloc(old_ptr, len + old_len, KM_SLEEP);
4286 	memcpy(&ptr[old_len], dp, len);
4287 	item->ri_buf[item->ri_cnt-1].i_len += len;
4288 	item->ri_buf[item->ri_cnt-1].i_addr = ptr;
4289 	trace_xfs_log_recover_item_add_cont(log, trans, item, 0);
4290 	return 0;
4291 }
4292 
4293 /*
4294  * The next region to add is the start of a new region.  It could be
4295  * a whole region or it could be the first part of a new region.  Because
4296  * of this, the assumption here is that the type and size fields of all
4297  * format structures fit into the first 32 bits of the structure.
4298  *
4299  * This works because all regions must be 32 bit aligned.  Therefore, we
4300  * either have both fields or we have neither field.  In the case we have
4301  * neither field, the data part of the region is zero length.  We only have
4302  * a log_op_header and can throw away the header since a new one will appear
4303  * later.  If we have at least 4 bytes, then we can determine how many regions
4304  * will appear in the current log item.
4305  */
4306 STATIC int
4307 xlog_recover_add_to_trans(
4308 	struct xlog		*log,
4309 	struct xlog_recover	*trans,
4310 	char			*dp,
4311 	int			len)
4312 {
4313 	struct xfs_inode_log_format	*in_f;			/* any will do */
4314 	xlog_recover_item_t	*item;
4315 	char			*ptr;
4316 
4317 	if (!len)
4318 		return 0;
4319 	if (list_empty(&trans->r_itemq)) {
4320 		/* we need to catch log corruptions here */
4321 		if (*(uint *)dp != XFS_TRANS_HEADER_MAGIC) {
4322 			xfs_warn(log->l_mp, "%s: bad header magic number",
4323 				__func__);
4324 			ASSERT(0);
4325 			return -EIO;
4326 		}
4327 
4328 		if (len > sizeof(struct xfs_trans_header)) {
4329 			xfs_warn(log->l_mp, "%s: bad header length", __func__);
4330 			ASSERT(0);
4331 			return -EIO;
4332 		}
4333 
4334 		/*
4335 		 * The transaction header can be arbitrarily split across op
4336 		 * records. If we don't have the whole thing here, copy what we
4337 		 * do have and handle the rest in the next record.
4338 		 */
4339 		if (len == sizeof(struct xfs_trans_header))
4340 			xlog_recover_add_item(&trans->r_itemq);
4341 		memcpy(&trans->r_theader, dp, len);
4342 		return 0;
4343 	}
4344 
4345 	ptr = kmem_alloc(len, KM_SLEEP);
4346 	memcpy(ptr, dp, len);
4347 	in_f = (struct xfs_inode_log_format *)ptr;
4348 
4349 	/* take the tail entry */
4350 	item = list_entry(trans->r_itemq.prev, xlog_recover_item_t, ri_list);
4351 	if (item->ri_total != 0 &&
4352 	     item->ri_total == item->ri_cnt) {
4353 		/* tail item is in use, get a new one */
4354 		xlog_recover_add_item(&trans->r_itemq);
4355 		item = list_entry(trans->r_itemq.prev,
4356 					xlog_recover_item_t, ri_list);
4357 	}
4358 
4359 	if (item->ri_total == 0) {		/* first region to be added */
4360 		if (in_f->ilf_size == 0 ||
4361 		    in_f->ilf_size > XLOG_MAX_REGIONS_IN_ITEM) {
4362 			xfs_warn(log->l_mp,
4363 		"bad number of regions (%d) in inode log format",
4364 				  in_f->ilf_size);
4365 			ASSERT(0);
4366 			kmem_free(ptr);
4367 			return -EIO;
4368 		}
4369 
4370 		item->ri_total = in_f->ilf_size;
4371 		item->ri_buf =
4372 			kmem_zalloc(item->ri_total * sizeof(xfs_log_iovec_t),
4373 				    KM_SLEEP);
4374 	}
4375 	ASSERT(item->ri_total > item->ri_cnt);
4376 	/* Description region is ri_buf[0] */
4377 	item->ri_buf[item->ri_cnt].i_addr = ptr;
4378 	item->ri_buf[item->ri_cnt].i_len  = len;
4379 	item->ri_cnt++;
4380 	trace_xfs_log_recover_item_add(log, trans, item, 0);
4381 	return 0;
4382 }
4383 
4384 /*
4385  * Free up any resources allocated by the transaction
4386  *
4387  * Remember that EFIs, EFDs, and IUNLINKs are handled later.
4388  */
4389 STATIC void
4390 xlog_recover_free_trans(
4391 	struct xlog_recover	*trans)
4392 {
4393 	xlog_recover_item_t	*item, *n;
4394 	int			i;
4395 
4396 	hlist_del_init(&trans->r_list);
4397 
4398 	list_for_each_entry_safe(item, n, &trans->r_itemq, ri_list) {
4399 		/* Free the regions in the item. */
4400 		list_del(&item->ri_list);
4401 		for (i = 0; i < item->ri_cnt; i++)
4402 			kmem_free(item->ri_buf[i].i_addr);
4403 		/* Free the item itself */
4404 		kmem_free(item->ri_buf);
4405 		kmem_free(item);
4406 	}
4407 	/* Free the transaction recover structure */
4408 	kmem_free(trans);
4409 }
4410 
4411 /*
4412  * On error or completion, trans is freed.
4413  */
4414 STATIC int
4415 xlog_recovery_process_trans(
4416 	struct xlog		*log,
4417 	struct xlog_recover	*trans,
4418 	char			*dp,
4419 	unsigned int		len,
4420 	unsigned int		flags,
4421 	int			pass,
4422 	struct list_head	*buffer_list)
4423 {
4424 	int			error = 0;
4425 	bool			freeit = false;
4426 
4427 	/* mask off ophdr transaction container flags */
4428 	flags &= ~XLOG_END_TRANS;
4429 	if (flags & XLOG_WAS_CONT_TRANS)
4430 		flags &= ~XLOG_CONTINUE_TRANS;
4431 
4432 	/*
4433 	 * Callees must not free the trans structure. We'll decide if we need to
4434 	 * free it or not based on the operation being done and it's result.
4435 	 */
4436 	switch (flags) {
4437 	/* expected flag values */
4438 	case 0:
4439 	case XLOG_CONTINUE_TRANS:
4440 		error = xlog_recover_add_to_trans(log, trans, dp, len);
4441 		break;
4442 	case XLOG_WAS_CONT_TRANS:
4443 		error = xlog_recover_add_to_cont_trans(log, trans, dp, len);
4444 		break;
4445 	case XLOG_COMMIT_TRANS:
4446 		error = xlog_recover_commit_trans(log, trans, pass,
4447 						  buffer_list);
4448 		/* success or fail, we are now done with this transaction. */
4449 		freeit = true;
4450 		break;
4451 
4452 	/* unexpected flag values */
4453 	case XLOG_UNMOUNT_TRANS:
4454 		/* just skip trans */
4455 		xfs_warn(log->l_mp, "%s: Unmount LR", __func__);
4456 		freeit = true;
4457 		break;
4458 	case XLOG_START_TRANS:
4459 	default:
4460 		xfs_warn(log->l_mp, "%s: bad flag 0x%x", __func__, flags);
4461 		ASSERT(0);
4462 		error = -EIO;
4463 		break;
4464 	}
4465 	if (error || freeit)
4466 		xlog_recover_free_trans(trans);
4467 	return error;
4468 }
4469 
4470 /*
4471  * Lookup the transaction recovery structure associated with the ID in the
4472  * current ophdr. If the transaction doesn't exist and the start flag is set in
4473  * the ophdr, then allocate a new transaction for future ID matches to find.
4474  * Either way, return what we found during the lookup - an existing transaction
4475  * or nothing.
4476  */
4477 STATIC struct xlog_recover *
4478 xlog_recover_ophdr_to_trans(
4479 	struct hlist_head	rhash[],
4480 	struct xlog_rec_header	*rhead,
4481 	struct xlog_op_header	*ohead)
4482 {
4483 	struct xlog_recover	*trans;
4484 	xlog_tid_t		tid;
4485 	struct hlist_head	*rhp;
4486 
4487 	tid = be32_to_cpu(ohead->oh_tid);
4488 	rhp = &rhash[XLOG_RHASH(tid)];
4489 	hlist_for_each_entry(trans, rhp, r_list) {
4490 		if (trans->r_log_tid == tid)
4491 			return trans;
4492 	}
4493 
4494 	/*
4495 	 * skip over non-start transaction headers - we could be
4496 	 * processing slack space before the next transaction starts
4497 	 */
4498 	if (!(ohead->oh_flags & XLOG_START_TRANS))
4499 		return NULL;
4500 
4501 	ASSERT(be32_to_cpu(ohead->oh_len) == 0);
4502 
4503 	/*
4504 	 * This is a new transaction so allocate a new recovery container to
4505 	 * hold the recovery ops that will follow.
4506 	 */
4507 	trans = kmem_zalloc(sizeof(struct xlog_recover), KM_SLEEP);
4508 	trans->r_log_tid = tid;
4509 	trans->r_lsn = be64_to_cpu(rhead->h_lsn);
4510 	INIT_LIST_HEAD(&trans->r_itemq);
4511 	INIT_HLIST_NODE(&trans->r_list);
4512 	hlist_add_head(&trans->r_list, rhp);
4513 
4514 	/*
4515 	 * Nothing more to do for this ophdr. Items to be added to this new
4516 	 * transaction will be in subsequent ophdr containers.
4517 	 */
4518 	return NULL;
4519 }
4520 
4521 STATIC int
4522 xlog_recover_process_ophdr(
4523 	struct xlog		*log,
4524 	struct hlist_head	rhash[],
4525 	struct xlog_rec_header	*rhead,
4526 	struct xlog_op_header	*ohead,
4527 	char			*dp,
4528 	char			*end,
4529 	int			pass,
4530 	struct list_head	*buffer_list)
4531 {
4532 	struct xlog_recover	*trans;
4533 	unsigned int		len;
4534 	int			error;
4535 
4536 	/* Do we understand who wrote this op? */
4537 	if (ohead->oh_clientid != XFS_TRANSACTION &&
4538 	    ohead->oh_clientid != XFS_LOG) {
4539 		xfs_warn(log->l_mp, "%s: bad clientid 0x%x",
4540 			__func__, ohead->oh_clientid);
4541 		ASSERT(0);
4542 		return -EIO;
4543 	}
4544 
4545 	/*
4546 	 * Check the ophdr contains all the data it is supposed to contain.
4547 	 */
4548 	len = be32_to_cpu(ohead->oh_len);
4549 	if (dp + len > end) {
4550 		xfs_warn(log->l_mp, "%s: bad length 0x%x", __func__, len);
4551 		WARN_ON(1);
4552 		return -EIO;
4553 	}
4554 
4555 	trans = xlog_recover_ophdr_to_trans(rhash, rhead, ohead);
4556 	if (!trans) {
4557 		/* nothing to do, so skip over this ophdr */
4558 		return 0;
4559 	}
4560 
4561 	/*
4562 	 * The recovered buffer queue is drained only once we know that all
4563 	 * recovery items for the current LSN have been processed. This is
4564 	 * required because:
4565 	 *
4566 	 * - Buffer write submission updates the metadata LSN of the buffer.
4567 	 * - Log recovery skips items with a metadata LSN >= the current LSN of
4568 	 *   the recovery item.
4569 	 * - Separate recovery items against the same metadata buffer can share
4570 	 *   a current LSN. I.e., consider that the LSN of a recovery item is
4571 	 *   defined as the starting LSN of the first record in which its
4572 	 *   transaction appears, that a record can hold multiple transactions,
4573 	 *   and/or that a transaction can span multiple records.
4574 	 *
4575 	 * In other words, we are allowed to submit a buffer from log recovery
4576 	 * once per current LSN. Otherwise, we may incorrectly skip recovery
4577 	 * items and cause corruption.
4578 	 *
4579 	 * We don't know up front whether buffers are updated multiple times per
4580 	 * LSN. Therefore, track the current LSN of each commit log record as it
4581 	 * is processed and drain the queue when it changes. Use commit records
4582 	 * because they are ordered correctly by the logging code.
4583 	 */
4584 	if (log->l_recovery_lsn != trans->r_lsn &&
4585 	    ohead->oh_flags & XLOG_COMMIT_TRANS) {
4586 		error = xfs_buf_delwri_submit(buffer_list);
4587 		if (error)
4588 			return error;
4589 		log->l_recovery_lsn = trans->r_lsn;
4590 	}
4591 
4592 	return xlog_recovery_process_trans(log, trans, dp, len,
4593 					   ohead->oh_flags, pass, buffer_list);
4594 }
4595 
4596 /*
4597  * There are two valid states of the r_state field.  0 indicates that the
4598  * transaction structure is in a normal state.  We have either seen the
4599  * start of the transaction or the last operation we added was not a partial
4600  * operation.  If the last operation we added to the transaction was a
4601  * partial operation, we need to mark r_state with XLOG_WAS_CONT_TRANS.
4602  *
4603  * NOTE: skip LRs with 0 data length.
4604  */
4605 STATIC int
4606 xlog_recover_process_data(
4607 	struct xlog		*log,
4608 	struct hlist_head	rhash[],
4609 	struct xlog_rec_header	*rhead,
4610 	char			*dp,
4611 	int			pass,
4612 	struct list_head	*buffer_list)
4613 {
4614 	struct xlog_op_header	*ohead;
4615 	char			*end;
4616 	int			num_logops;
4617 	int			error;
4618 
4619 	end = dp + be32_to_cpu(rhead->h_len);
4620 	num_logops = be32_to_cpu(rhead->h_num_logops);
4621 
4622 	/* check the log format matches our own - else we can't recover */
4623 	if (xlog_header_check_recover(log->l_mp, rhead))
4624 		return -EIO;
4625 
4626 	trace_xfs_log_recover_record(log, rhead, pass);
4627 	while ((dp < end) && num_logops) {
4628 
4629 		ohead = (struct xlog_op_header *)dp;
4630 		dp += sizeof(*ohead);
4631 		ASSERT(dp <= end);
4632 
4633 		/* errors will abort recovery */
4634 		error = xlog_recover_process_ophdr(log, rhash, rhead, ohead,
4635 						   dp, end, pass, buffer_list);
4636 		if (error)
4637 			return error;
4638 
4639 		dp += be32_to_cpu(ohead->oh_len);
4640 		num_logops--;
4641 	}
4642 	return 0;
4643 }
4644 
4645 /* Recover the EFI if necessary. */
4646 STATIC int
4647 xlog_recover_process_efi(
4648 	struct xfs_mount		*mp,
4649 	struct xfs_ail			*ailp,
4650 	struct xfs_log_item		*lip)
4651 {
4652 	struct xfs_efi_log_item		*efip;
4653 	int				error;
4654 
4655 	/*
4656 	 * Skip EFIs that we've already processed.
4657 	 */
4658 	efip = container_of(lip, struct xfs_efi_log_item, efi_item);
4659 	if (test_bit(XFS_EFI_RECOVERED, &efip->efi_flags))
4660 		return 0;
4661 
4662 	spin_unlock(&ailp->ail_lock);
4663 	error = xfs_efi_recover(mp, efip);
4664 	spin_lock(&ailp->ail_lock);
4665 
4666 	return error;
4667 }
4668 
4669 /* Release the EFI since we're cancelling everything. */
4670 STATIC void
4671 xlog_recover_cancel_efi(
4672 	struct xfs_mount		*mp,
4673 	struct xfs_ail			*ailp,
4674 	struct xfs_log_item		*lip)
4675 {
4676 	struct xfs_efi_log_item		*efip;
4677 
4678 	efip = container_of(lip, struct xfs_efi_log_item, efi_item);
4679 
4680 	spin_unlock(&ailp->ail_lock);
4681 	xfs_efi_release(efip);
4682 	spin_lock(&ailp->ail_lock);
4683 }
4684 
4685 /* Recover the RUI if necessary. */
4686 STATIC int
4687 xlog_recover_process_rui(
4688 	struct xfs_mount		*mp,
4689 	struct xfs_ail			*ailp,
4690 	struct xfs_log_item		*lip)
4691 {
4692 	struct xfs_rui_log_item		*ruip;
4693 	int				error;
4694 
4695 	/*
4696 	 * Skip RUIs that we've already processed.
4697 	 */
4698 	ruip = container_of(lip, struct xfs_rui_log_item, rui_item);
4699 	if (test_bit(XFS_RUI_RECOVERED, &ruip->rui_flags))
4700 		return 0;
4701 
4702 	spin_unlock(&ailp->ail_lock);
4703 	error = xfs_rui_recover(mp, ruip);
4704 	spin_lock(&ailp->ail_lock);
4705 
4706 	return error;
4707 }
4708 
4709 /* Release the RUI since we're cancelling everything. */
4710 STATIC void
4711 xlog_recover_cancel_rui(
4712 	struct xfs_mount		*mp,
4713 	struct xfs_ail			*ailp,
4714 	struct xfs_log_item		*lip)
4715 {
4716 	struct xfs_rui_log_item		*ruip;
4717 
4718 	ruip = container_of(lip, struct xfs_rui_log_item, rui_item);
4719 
4720 	spin_unlock(&ailp->ail_lock);
4721 	xfs_rui_release(ruip);
4722 	spin_lock(&ailp->ail_lock);
4723 }
4724 
4725 /* Recover the CUI if necessary. */
4726 STATIC int
4727 xlog_recover_process_cui(
4728 	struct xfs_trans		*parent_tp,
4729 	struct xfs_ail			*ailp,
4730 	struct xfs_log_item		*lip)
4731 {
4732 	struct xfs_cui_log_item		*cuip;
4733 	int				error;
4734 
4735 	/*
4736 	 * Skip CUIs that we've already processed.
4737 	 */
4738 	cuip = container_of(lip, struct xfs_cui_log_item, cui_item);
4739 	if (test_bit(XFS_CUI_RECOVERED, &cuip->cui_flags))
4740 		return 0;
4741 
4742 	spin_unlock(&ailp->ail_lock);
4743 	error = xfs_cui_recover(parent_tp, cuip);
4744 	spin_lock(&ailp->ail_lock);
4745 
4746 	return error;
4747 }
4748 
4749 /* Release the CUI since we're cancelling everything. */
4750 STATIC void
4751 xlog_recover_cancel_cui(
4752 	struct xfs_mount		*mp,
4753 	struct xfs_ail			*ailp,
4754 	struct xfs_log_item		*lip)
4755 {
4756 	struct xfs_cui_log_item		*cuip;
4757 
4758 	cuip = container_of(lip, struct xfs_cui_log_item, cui_item);
4759 
4760 	spin_unlock(&ailp->ail_lock);
4761 	xfs_cui_release(cuip);
4762 	spin_lock(&ailp->ail_lock);
4763 }
4764 
4765 /* Recover the BUI if necessary. */
4766 STATIC int
4767 xlog_recover_process_bui(
4768 	struct xfs_trans		*parent_tp,
4769 	struct xfs_ail			*ailp,
4770 	struct xfs_log_item		*lip)
4771 {
4772 	struct xfs_bui_log_item		*buip;
4773 	int				error;
4774 
4775 	/*
4776 	 * Skip BUIs that we've already processed.
4777 	 */
4778 	buip = container_of(lip, struct xfs_bui_log_item, bui_item);
4779 	if (test_bit(XFS_BUI_RECOVERED, &buip->bui_flags))
4780 		return 0;
4781 
4782 	spin_unlock(&ailp->ail_lock);
4783 	error = xfs_bui_recover(parent_tp, buip);
4784 	spin_lock(&ailp->ail_lock);
4785 
4786 	return error;
4787 }
4788 
4789 /* Release the BUI since we're cancelling everything. */
4790 STATIC void
4791 xlog_recover_cancel_bui(
4792 	struct xfs_mount		*mp,
4793 	struct xfs_ail			*ailp,
4794 	struct xfs_log_item		*lip)
4795 {
4796 	struct xfs_bui_log_item		*buip;
4797 
4798 	buip = container_of(lip, struct xfs_bui_log_item, bui_item);
4799 
4800 	spin_unlock(&ailp->ail_lock);
4801 	xfs_bui_release(buip);
4802 	spin_lock(&ailp->ail_lock);
4803 }
4804 
4805 /* Is this log item a deferred action intent? */
4806 static inline bool xlog_item_is_intent(struct xfs_log_item *lip)
4807 {
4808 	switch (lip->li_type) {
4809 	case XFS_LI_EFI:
4810 	case XFS_LI_RUI:
4811 	case XFS_LI_CUI:
4812 	case XFS_LI_BUI:
4813 		return true;
4814 	default:
4815 		return false;
4816 	}
4817 }
4818 
4819 /* Take all the collected deferred ops and finish them in order. */
4820 static int
4821 xlog_finish_defer_ops(
4822 	struct xfs_trans	*parent_tp)
4823 {
4824 	struct xfs_mount	*mp = parent_tp->t_mountp;
4825 	struct xfs_trans	*tp;
4826 	int64_t			freeblks;
4827 	uint			resblks;
4828 	int			error;
4829 
4830 	/*
4831 	 * We're finishing the defer_ops that accumulated as a result of
4832 	 * recovering unfinished intent items during log recovery.  We
4833 	 * reserve an itruncate transaction because it is the largest
4834 	 * permanent transaction type.  Since we're the only user of the fs
4835 	 * right now, take 93% (15/16) of the available free blocks.  Use
4836 	 * weird math to avoid a 64-bit division.
4837 	 */
4838 	freeblks = percpu_counter_sum(&mp->m_fdblocks);
4839 	if (freeblks <= 0)
4840 		return -ENOSPC;
4841 	resblks = min_t(int64_t, UINT_MAX, freeblks);
4842 	resblks = (resblks * 15) >> 4;
4843 	error = xfs_trans_alloc(mp, &M_RES(mp)->tr_itruncate, resblks,
4844 			0, XFS_TRANS_RESERVE, &tp);
4845 	if (error)
4846 		return error;
4847 	/* transfer all collected dfops to this transaction */
4848 	xfs_defer_move(tp, parent_tp);
4849 
4850 	return xfs_trans_commit(tp);
4851 }
4852 
4853 /*
4854  * When this is called, all of the log intent items which did not have
4855  * corresponding log done items should be in the AIL.  What we do now
4856  * is update the data structures associated with each one.
4857  *
4858  * Since we process the log intent items in normal transactions, they
4859  * will be removed at some point after the commit.  This prevents us
4860  * from just walking down the list processing each one.  We'll use a
4861  * flag in the intent item to skip those that we've already processed
4862  * and use the AIL iteration mechanism's generation count to try to
4863  * speed this up at least a bit.
4864  *
4865  * When we start, we know that the intents are the only things in the
4866  * AIL.  As we process them, however, other items are added to the
4867  * AIL.
4868  */
4869 STATIC int
4870 xlog_recover_process_intents(
4871 	struct xlog		*log)
4872 {
4873 	struct xfs_trans	*parent_tp;
4874 	struct xfs_ail_cursor	cur;
4875 	struct xfs_log_item	*lip;
4876 	struct xfs_ail		*ailp;
4877 	int			error;
4878 #if defined(DEBUG) || defined(XFS_WARN)
4879 	xfs_lsn_t		last_lsn;
4880 #endif
4881 
4882 	/*
4883 	 * The intent recovery handlers commit transactions to complete recovery
4884 	 * for individual intents, but any new deferred operations that are
4885 	 * queued during that process are held off until the very end. The
4886 	 * purpose of this transaction is to serve as a container for deferred
4887 	 * operations. Each intent recovery handler must transfer dfops here
4888 	 * before its local transaction commits, and we'll finish the entire
4889 	 * list below.
4890 	 */
4891 	error = xfs_trans_alloc_empty(log->l_mp, &parent_tp);
4892 	if (error)
4893 		return error;
4894 
4895 	ailp = log->l_ailp;
4896 	spin_lock(&ailp->ail_lock);
4897 	lip = xfs_trans_ail_cursor_first(ailp, &cur, 0);
4898 #if defined(DEBUG) || defined(XFS_WARN)
4899 	last_lsn = xlog_assign_lsn(log->l_curr_cycle, log->l_curr_block);
4900 #endif
4901 	while (lip != NULL) {
4902 		/*
4903 		 * We're done when we see something other than an intent.
4904 		 * There should be no intents left in the AIL now.
4905 		 */
4906 		if (!xlog_item_is_intent(lip)) {
4907 #ifdef DEBUG
4908 			for (; lip; lip = xfs_trans_ail_cursor_next(ailp, &cur))
4909 				ASSERT(!xlog_item_is_intent(lip));
4910 #endif
4911 			break;
4912 		}
4913 
4914 		/*
4915 		 * We should never see a redo item with a LSN higher than
4916 		 * the last transaction we found in the log at the start
4917 		 * of recovery.
4918 		 */
4919 		ASSERT(XFS_LSN_CMP(last_lsn, lip->li_lsn) >= 0);
4920 
4921 		/*
4922 		 * NOTE: If your intent processing routine can create more
4923 		 * deferred ops, you /must/ attach them to the dfops in this
4924 		 * routine or else those subsequent intents will get
4925 		 * replayed in the wrong order!
4926 		 */
4927 		switch (lip->li_type) {
4928 		case XFS_LI_EFI:
4929 			error = xlog_recover_process_efi(log->l_mp, ailp, lip);
4930 			break;
4931 		case XFS_LI_RUI:
4932 			error = xlog_recover_process_rui(log->l_mp, ailp, lip);
4933 			break;
4934 		case XFS_LI_CUI:
4935 			error = xlog_recover_process_cui(parent_tp, ailp, lip);
4936 			break;
4937 		case XFS_LI_BUI:
4938 			error = xlog_recover_process_bui(parent_tp, ailp, lip);
4939 			break;
4940 		}
4941 		if (error)
4942 			goto out;
4943 		lip = xfs_trans_ail_cursor_next(ailp, &cur);
4944 	}
4945 out:
4946 	xfs_trans_ail_cursor_done(&cur);
4947 	spin_unlock(&ailp->ail_lock);
4948 	if (!error)
4949 		error = xlog_finish_defer_ops(parent_tp);
4950 	xfs_trans_cancel(parent_tp);
4951 
4952 	return error;
4953 }
4954 
4955 /*
4956  * A cancel occurs when the mount has failed and we're bailing out.
4957  * Release all pending log intent items so they don't pin the AIL.
4958  */
4959 STATIC int
4960 xlog_recover_cancel_intents(
4961 	struct xlog		*log)
4962 {
4963 	struct xfs_log_item	*lip;
4964 	int			error = 0;
4965 	struct xfs_ail_cursor	cur;
4966 	struct xfs_ail		*ailp;
4967 
4968 	ailp = log->l_ailp;
4969 	spin_lock(&ailp->ail_lock);
4970 	lip = xfs_trans_ail_cursor_first(ailp, &cur, 0);
4971 	while (lip != NULL) {
4972 		/*
4973 		 * We're done when we see something other than an intent.
4974 		 * There should be no intents left in the AIL now.
4975 		 */
4976 		if (!xlog_item_is_intent(lip)) {
4977 #ifdef DEBUG
4978 			for (; lip; lip = xfs_trans_ail_cursor_next(ailp, &cur))
4979 				ASSERT(!xlog_item_is_intent(lip));
4980 #endif
4981 			break;
4982 		}
4983 
4984 		switch (lip->li_type) {
4985 		case XFS_LI_EFI:
4986 			xlog_recover_cancel_efi(log->l_mp, ailp, lip);
4987 			break;
4988 		case XFS_LI_RUI:
4989 			xlog_recover_cancel_rui(log->l_mp, ailp, lip);
4990 			break;
4991 		case XFS_LI_CUI:
4992 			xlog_recover_cancel_cui(log->l_mp, ailp, lip);
4993 			break;
4994 		case XFS_LI_BUI:
4995 			xlog_recover_cancel_bui(log->l_mp, ailp, lip);
4996 			break;
4997 		}
4998 
4999 		lip = xfs_trans_ail_cursor_next(ailp, &cur);
5000 	}
5001 
5002 	xfs_trans_ail_cursor_done(&cur);
5003 	spin_unlock(&ailp->ail_lock);
5004 	return error;
5005 }
5006 
5007 /*
5008  * This routine performs a transaction to null out a bad inode pointer
5009  * in an agi unlinked inode hash bucket.
5010  */
5011 STATIC void
5012 xlog_recover_clear_agi_bucket(
5013 	xfs_mount_t	*mp,
5014 	xfs_agnumber_t	agno,
5015 	int		bucket)
5016 {
5017 	xfs_trans_t	*tp;
5018 	xfs_agi_t	*agi;
5019 	xfs_buf_t	*agibp;
5020 	int		offset;
5021 	int		error;
5022 
5023 	error = xfs_trans_alloc(mp, &M_RES(mp)->tr_clearagi, 0, 0, 0, &tp);
5024 	if (error)
5025 		goto out_error;
5026 
5027 	error = xfs_read_agi(mp, tp, agno, &agibp);
5028 	if (error)
5029 		goto out_abort;
5030 
5031 	agi = XFS_BUF_TO_AGI(agibp);
5032 	agi->agi_unlinked[bucket] = cpu_to_be32(NULLAGINO);
5033 	offset = offsetof(xfs_agi_t, agi_unlinked) +
5034 		 (sizeof(xfs_agino_t) * bucket);
5035 	xfs_trans_log_buf(tp, agibp, offset,
5036 			  (offset + sizeof(xfs_agino_t) - 1));
5037 
5038 	error = xfs_trans_commit(tp);
5039 	if (error)
5040 		goto out_error;
5041 	return;
5042 
5043 out_abort:
5044 	xfs_trans_cancel(tp);
5045 out_error:
5046 	xfs_warn(mp, "%s: failed to clear agi %d. Continuing.", __func__, agno);
5047 	return;
5048 }
5049 
5050 STATIC xfs_agino_t
5051 xlog_recover_process_one_iunlink(
5052 	struct xfs_mount		*mp,
5053 	xfs_agnumber_t			agno,
5054 	xfs_agino_t			agino,
5055 	int				bucket)
5056 {
5057 	struct xfs_buf			*ibp;
5058 	struct xfs_dinode		*dip;
5059 	struct xfs_inode		*ip;
5060 	xfs_ino_t			ino;
5061 	int				error;
5062 
5063 	ino = XFS_AGINO_TO_INO(mp, agno, agino);
5064 	error = xfs_iget(mp, NULL, ino, 0, 0, &ip);
5065 	if (error)
5066 		goto fail;
5067 
5068 	/*
5069 	 * Get the on disk inode to find the next inode in the bucket.
5070 	 */
5071 	error = xfs_imap_to_bp(mp, NULL, &ip->i_imap, &dip, &ibp, 0, 0);
5072 	if (error)
5073 		goto fail_iput;
5074 
5075 	xfs_iflags_clear(ip, XFS_IRECOVERY);
5076 	ASSERT(VFS_I(ip)->i_nlink == 0);
5077 	ASSERT(VFS_I(ip)->i_mode != 0);
5078 
5079 	/* setup for the next pass */
5080 	agino = be32_to_cpu(dip->di_next_unlinked);
5081 	xfs_buf_relse(ibp);
5082 
5083 	/*
5084 	 * Prevent any DMAPI event from being sent when the reference on
5085 	 * the inode is dropped.
5086 	 */
5087 	ip->i_d.di_dmevmask = 0;
5088 
5089 	xfs_irele(ip);
5090 	return agino;
5091 
5092  fail_iput:
5093 	xfs_irele(ip);
5094  fail:
5095 	/*
5096 	 * We can't read in the inode this bucket points to, or this inode
5097 	 * is messed up.  Just ditch this bucket of inodes.  We will lose
5098 	 * some inodes and space, but at least we won't hang.
5099 	 *
5100 	 * Call xlog_recover_clear_agi_bucket() to perform a transaction to
5101 	 * clear the inode pointer in the bucket.
5102 	 */
5103 	xlog_recover_clear_agi_bucket(mp, agno, bucket);
5104 	return NULLAGINO;
5105 }
5106 
5107 /*
5108  * xlog_iunlink_recover
5109  *
5110  * This is called during recovery to process any inodes which
5111  * we unlinked but not freed when the system crashed.  These
5112  * inodes will be on the lists in the AGI blocks.  What we do
5113  * here is scan all the AGIs and fully truncate and free any
5114  * inodes found on the lists.  Each inode is removed from the
5115  * lists when it has been fully truncated and is freed.  The
5116  * freeing of the inode and its removal from the list must be
5117  * atomic.
5118  */
5119 STATIC void
5120 xlog_recover_process_iunlinks(
5121 	struct xlog	*log)
5122 {
5123 	xfs_mount_t	*mp;
5124 	xfs_agnumber_t	agno;
5125 	xfs_agi_t	*agi;
5126 	xfs_buf_t	*agibp;
5127 	xfs_agino_t	agino;
5128 	int		bucket;
5129 	int		error;
5130 
5131 	mp = log->l_mp;
5132 
5133 	for (agno = 0; agno < mp->m_sb.sb_agcount; agno++) {
5134 		/*
5135 		 * Find the agi for this ag.
5136 		 */
5137 		error = xfs_read_agi(mp, NULL, agno, &agibp);
5138 		if (error) {
5139 			/*
5140 			 * AGI is b0rked. Don't process it.
5141 			 *
5142 			 * We should probably mark the filesystem as corrupt
5143 			 * after we've recovered all the ag's we can....
5144 			 */
5145 			continue;
5146 		}
5147 		/*
5148 		 * Unlock the buffer so that it can be acquired in the normal
5149 		 * course of the transaction to truncate and free each inode.
5150 		 * Because we are not racing with anyone else here for the AGI
5151 		 * buffer, we don't even need to hold it locked to read the
5152 		 * initial unlinked bucket entries out of the buffer. We keep
5153 		 * buffer reference though, so that it stays pinned in memory
5154 		 * while we need the buffer.
5155 		 */
5156 		agi = XFS_BUF_TO_AGI(agibp);
5157 		xfs_buf_unlock(agibp);
5158 
5159 		for (bucket = 0; bucket < XFS_AGI_UNLINKED_BUCKETS; bucket++) {
5160 			agino = be32_to_cpu(agi->agi_unlinked[bucket]);
5161 			while (agino != NULLAGINO) {
5162 				agino = xlog_recover_process_one_iunlink(mp,
5163 							agno, agino, bucket);
5164 			}
5165 		}
5166 		xfs_buf_rele(agibp);
5167 	}
5168 }
5169 
5170 STATIC void
5171 xlog_unpack_data(
5172 	struct xlog_rec_header	*rhead,
5173 	char			*dp,
5174 	struct xlog		*log)
5175 {
5176 	int			i, j, k;
5177 
5178 	for (i = 0; i < BTOBB(be32_to_cpu(rhead->h_len)) &&
5179 		  i < (XLOG_HEADER_CYCLE_SIZE / BBSIZE); i++) {
5180 		*(__be32 *)dp = *(__be32 *)&rhead->h_cycle_data[i];
5181 		dp += BBSIZE;
5182 	}
5183 
5184 	if (xfs_sb_version_haslogv2(&log->l_mp->m_sb)) {
5185 		xlog_in_core_2_t *xhdr = (xlog_in_core_2_t *)rhead;
5186 		for ( ; i < BTOBB(be32_to_cpu(rhead->h_len)); i++) {
5187 			j = i / (XLOG_HEADER_CYCLE_SIZE / BBSIZE);
5188 			k = i % (XLOG_HEADER_CYCLE_SIZE / BBSIZE);
5189 			*(__be32 *)dp = xhdr[j].hic_xheader.xh_cycle_data[k];
5190 			dp += BBSIZE;
5191 		}
5192 	}
5193 }
5194 
5195 /*
5196  * CRC check, unpack and process a log record.
5197  */
5198 STATIC int
5199 xlog_recover_process(
5200 	struct xlog		*log,
5201 	struct hlist_head	rhash[],
5202 	struct xlog_rec_header	*rhead,
5203 	char			*dp,
5204 	int			pass,
5205 	struct list_head	*buffer_list)
5206 {
5207 	__le32			old_crc = rhead->h_crc;
5208 	__le32			crc;
5209 
5210 	crc = xlog_cksum(log, rhead, dp, be32_to_cpu(rhead->h_len));
5211 
5212 	/*
5213 	 * Nothing else to do if this is a CRC verification pass. Just return
5214 	 * if this a record with a non-zero crc. Unfortunately, mkfs always
5215 	 * sets old_crc to 0 so we must consider this valid even on v5 supers.
5216 	 * Otherwise, return EFSBADCRC on failure so the callers up the stack
5217 	 * know precisely what failed.
5218 	 */
5219 	if (pass == XLOG_RECOVER_CRCPASS) {
5220 		if (old_crc && crc != old_crc)
5221 			return -EFSBADCRC;
5222 		return 0;
5223 	}
5224 
5225 	/*
5226 	 * We're in the normal recovery path. Issue a warning if and only if the
5227 	 * CRC in the header is non-zero. This is an advisory warning and the
5228 	 * zero CRC check prevents warnings from being emitted when upgrading
5229 	 * the kernel from one that does not add CRCs by default.
5230 	 */
5231 	if (crc != old_crc) {
5232 		if (old_crc || xfs_sb_version_hascrc(&log->l_mp->m_sb)) {
5233 			xfs_alert(log->l_mp,
5234 		"log record CRC mismatch: found 0x%x, expected 0x%x.",
5235 					le32_to_cpu(old_crc),
5236 					le32_to_cpu(crc));
5237 			xfs_hex_dump(dp, 32);
5238 		}
5239 
5240 		/*
5241 		 * If the filesystem is CRC enabled, this mismatch becomes a
5242 		 * fatal log corruption failure.
5243 		 */
5244 		if (xfs_sb_version_hascrc(&log->l_mp->m_sb))
5245 			return -EFSCORRUPTED;
5246 	}
5247 
5248 	xlog_unpack_data(rhead, dp, log);
5249 
5250 	return xlog_recover_process_data(log, rhash, rhead, dp, pass,
5251 					 buffer_list);
5252 }
5253 
5254 STATIC int
5255 xlog_valid_rec_header(
5256 	struct xlog		*log,
5257 	struct xlog_rec_header	*rhead,
5258 	xfs_daddr_t		blkno)
5259 {
5260 	int			hlen;
5261 
5262 	if (unlikely(rhead->h_magicno != cpu_to_be32(XLOG_HEADER_MAGIC_NUM))) {
5263 		XFS_ERROR_REPORT("xlog_valid_rec_header(1)",
5264 				XFS_ERRLEVEL_LOW, log->l_mp);
5265 		return -EFSCORRUPTED;
5266 	}
5267 	if (unlikely(
5268 	    (!rhead->h_version ||
5269 	    (be32_to_cpu(rhead->h_version) & (~XLOG_VERSION_OKBITS))))) {
5270 		xfs_warn(log->l_mp, "%s: unrecognised log version (%d).",
5271 			__func__, be32_to_cpu(rhead->h_version));
5272 		return -EIO;
5273 	}
5274 
5275 	/* LR body must have data or it wouldn't have been written */
5276 	hlen = be32_to_cpu(rhead->h_len);
5277 	if (unlikely( hlen <= 0 || hlen > INT_MAX )) {
5278 		XFS_ERROR_REPORT("xlog_valid_rec_header(2)",
5279 				XFS_ERRLEVEL_LOW, log->l_mp);
5280 		return -EFSCORRUPTED;
5281 	}
5282 	if (unlikely( blkno > log->l_logBBsize || blkno > INT_MAX )) {
5283 		XFS_ERROR_REPORT("xlog_valid_rec_header(3)",
5284 				XFS_ERRLEVEL_LOW, log->l_mp);
5285 		return -EFSCORRUPTED;
5286 	}
5287 	return 0;
5288 }
5289 
5290 /*
5291  * Read the log from tail to head and process the log records found.
5292  * Handle the two cases where the tail and head are in the same cycle
5293  * and where the active portion of the log wraps around the end of
5294  * the physical log separately.  The pass parameter is passed through
5295  * to the routines called to process the data and is not looked at
5296  * here.
5297  */
5298 STATIC int
5299 xlog_do_recovery_pass(
5300 	struct xlog		*log,
5301 	xfs_daddr_t		head_blk,
5302 	xfs_daddr_t		tail_blk,
5303 	int			pass,
5304 	xfs_daddr_t		*first_bad)	/* out: first bad log rec */
5305 {
5306 	xlog_rec_header_t	*rhead;
5307 	xfs_daddr_t		blk_no, rblk_no;
5308 	xfs_daddr_t		rhead_blk;
5309 	char			*offset;
5310 	xfs_buf_t		*hbp, *dbp;
5311 	int			error = 0, h_size, h_len;
5312 	int			error2 = 0;
5313 	int			bblks, split_bblks;
5314 	int			hblks, split_hblks, wrapped_hblks;
5315 	int			i;
5316 	struct hlist_head	rhash[XLOG_RHASH_SIZE];
5317 	LIST_HEAD		(buffer_list);
5318 
5319 	ASSERT(head_blk != tail_blk);
5320 	blk_no = rhead_blk = tail_blk;
5321 
5322 	for (i = 0; i < XLOG_RHASH_SIZE; i++)
5323 		INIT_HLIST_HEAD(&rhash[i]);
5324 
5325 	/*
5326 	 * Read the header of the tail block and get the iclog buffer size from
5327 	 * h_size.  Use this to tell how many sectors make up the log header.
5328 	 */
5329 	if (xfs_sb_version_haslogv2(&log->l_mp->m_sb)) {
5330 		/*
5331 		 * When using variable length iclogs, read first sector of
5332 		 * iclog header and extract the header size from it.  Get a
5333 		 * new hbp that is the correct size.
5334 		 */
5335 		hbp = xlog_get_bp(log, 1);
5336 		if (!hbp)
5337 			return -ENOMEM;
5338 
5339 		error = xlog_bread(log, tail_blk, 1, hbp, &offset);
5340 		if (error)
5341 			goto bread_err1;
5342 
5343 		rhead = (xlog_rec_header_t *)offset;
5344 		error = xlog_valid_rec_header(log, rhead, tail_blk);
5345 		if (error)
5346 			goto bread_err1;
5347 
5348 		/*
5349 		 * xfsprogs has a bug where record length is based on lsunit but
5350 		 * h_size (iclog size) is hardcoded to 32k. Now that we
5351 		 * unconditionally CRC verify the unmount record, this means the
5352 		 * log buffer can be too small for the record and cause an
5353 		 * overrun.
5354 		 *
5355 		 * Detect this condition here. Use lsunit for the buffer size as
5356 		 * long as this looks like the mkfs case. Otherwise, return an
5357 		 * error to avoid a buffer overrun.
5358 		 */
5359 		h_size = be32_to_cpu(rhead->h_size);
5360 		h_len = be32_to_cpu(rhead->h_len);
5361 		if (h_len > h_size) {
5362 			if (h_len <= log->l_mp->m_logbsize &&
5363 			    be32_to_cpu(rhead->h_num_logops) == 1) {
5364 				xfs_warn(log->l_mp,
5365 		"invalid iclog size (%d bytes), using lsunit (%d bytes)",
5366 					 h_size, log->l_mp->m_logbsize);
5367 				h_size = log->l_mp->m_logbsize;
5368 			} else
5369 				return -EFSCORRUPTED;
5370 		}
5371 
5372 		if ((be32_to_cpu(rhead->h_version) & XLOG_VERSION_2) &&
5373 		    (h_size > XLOG_HEADER_CYCLE_SIZE)) {
5374 			hblks = h_size / XLOG_HEADER_CYCLE_SIZE;
5375 			if (h_size % XLOG_HEADER_CYCLE_SIZE)
5376 				hblks++;
5377 			xlog_put_bp(hbp);
5378 			hbp = xlog_get_bp(log, hblks);
5379 		} else {
5380 			hblks = 1;
5381 		}
5382 	} else {
5383 		ASSERT(log->l_sectBBsize == 1);
5384 		hblks = 1;
5385 		hbp = xlog_get_bp(log, 1);
5386 		h_size = XLOG_BIG_RECORD_BSIZE;
5387 	}
5388 
5389 	if (!hbp)
5390 		return -ENOMEM;
5391 	dbp = xlog_get_bp(log, BTOBB(h_size));
5392 	if (!dbp) {
5393 		xlog_put_bp(hbp);
5394 		return -ENOMEM;
5395 	}
5396 
5397 	memset(rhash, 0, sizeof(rhash));
5398 	if (tail_blk > head_blk) {
5399 		/*
5400 		 * Perform recovery around the end of the physical log.
5401 		 * When the head is not on the same cycle number as the tail,
5402 		 * we can't do a sequential recovery.
5403 		 */
5404 		while (blk_no < log->l_logBBsize) {
5405 			/*
5406 			 * Check for header wrapping around physical end-of-log
5407 			 */
5408 			offset = hbp->b_addr;
5409 			split_hblks = 0;
5410 			wrapped_hblks = 0;
5411 			if (blk_no + hblks <= log->l_logBBsize) {
5412 				/* Read header in one read */
5413 				error = xlog_bread(log, blk_no, hblks, hbp,
5414 						   &offset);
5415 				if (error)
5416 					goto bread_err2;
5417 			} else {
5418 				/* This LR is split across physical log end */
5419 				if (blk_no != log->l_logBBsize) {
5420 					/* some data before physical log end */
5421 					ASSERT(blk_no <= INT_MAX);
5422 					split_hblks = log->l_logBBsize - (int)blk_no;
5423 					ASSERT(split_hblks > 0);
5424 					error = xlog_bread(log, blk_no,
5425 							   split_hblks, hbp,
5426 							   &offset);
5427 					if (error)
5428 						goto bread_err2;
5429 				}
5430 
5431 				/*
5432 				 * Note: this black magic still works with
5433 				 * large sector sizes (non-512) only because:
5434 				 * - we increased the buffer size originally
5435 				 *   by 1 sector giving us enough extra space
5436 				 *   for the second read;
5437 				 * - the log start is guaranteed to be sector
5438 				 *   aligned;
5439 				 * - we read the log end (LR header start)
5440 				 *   _first_, then the log start (LR header end)
5441 				 *   - order is important.
5442 				 */
5443 				wrapped_hblks = hblks - split_hblks;
5444 				error = xlog_bread_offset(log, 0,
5445 						wrapped_hblks, hbp,
5446 						offset + BBTOB(split_hblks));
5447 				if (error)
5448 					goto bread_err2;
5449 			}
5450 			rhead = (xlog_rec_header_t *)offset;
5451 			error = xlog_valid_rec_header(log, rhead,
5452 						split_hblks ? blk_no : 0);
5453 			if (error)
5454 				goto bread_err2;
5455 
5456 			bblks = (int)BTOBB(be32_to_cpu(rhead->h_len));
5457 			blk_no += hblks;
5458 
5459 			/*
5460 			 * Read the log record data in multiple reads if it
5461 			 * wraps around the end of the log. Note that if the
5462 			 * header already wrapped, blk_no could point past the
5463 			 * end of the log. The record data is contiguous in
5464 			 * that case.
5465 			 */
5466 			if (blk_no + bblks <= log->l_logBBsize ||
5467 			    blk_no >= log->l_logBBsize) {
5468 				rblk_no = xlog_wrap_logbno(log, blk_no);
5469 				error = xlog_bread(log, rblk_no, bblks, dbp,
5470 						   &offset);
5471 				if (error)
5472 					goto bread_err2;
5473 			} else {
5474 				/* This log record is split across the
5475 				 * physical end of log */
5476 				offset = dbp->b_addr;
5477 				split_bblks = 0;
5478 				if (blk_no != log->l_logBBsize) {
5479 					/* some data is before the physical
5480 					 * end of log */
5481 					ASSERT(!wrapped_hblks);
5482 					ASSERT(blk_no <= INT_MAX);
5483 					split_bblks =
5484 						log->l_logBBsize - (int)blk_no;
5485 					ASSERT(split_bblks > 0);
5486 					error = xlog_bread(log, blk_no,
5487 							split_bblks, dbp,
5488 							&offset);
5489 					if (error)
5490 						goto bread_err2;
5491 				}
5492 
5493 				/*
5494 				 * Note: this black magic still works with
5495 				 * large sector sizes (non-512) only because:
5496 				 * - we increased the buffer size originally
5497 				 *   by 1 sector giving us enough extra space
5498 				 *   for the second read;
5499 				 * - the log start is guaranteed to be sector
5500 				 *   aligned;
5501 				 * - we read the log end (LR header start)
5502 				 *   _first_, then the log start (LR header end)
5503 				 *   - order is important.
5504 				 */
5505 				error = xlog_bread_offset(log, 0,
5506 						bblks - split_bblks, dbp,
5507 						offset + BBTOB(split_bblks));
5508 				if (error)
5509 					goto bread_err2;
5510 			}
5511 
5512 			error = xlog_recover_process(log, rhash, rhead, offset,
5513 						     pass, &buffer_list);
5514 			if (error)
5515 				goto bread_err2;
5516 
5517 			blk_no += bblks;
5518 			rhead_blk = blk_no;
5519 		}
5520 
5521 		ASSERT(blk_no >= log->l_logBBsize);
5522 		blk_no -= log->l_logBBsize;
5523 		rhead_blk = blk_no;
5524 	}
5525 
5526 	/* read first part of physical log */
5527 	while (blk_no < head_blk) {
5528 		error = xlog_bread(log, blk_no, hblks, hbp, &offset);
5529 		if (error)
5530 			goto bread_err2;
5531 
5532 		rhead = (xlog_rec_header_t *)offset;
5533 		error = xlog_valid_rec_header(log, rhead, blk_no);
5534 		if (error)
5535 			goto bread_err2;
5536 
5537 		/* blocks in data section */
5538 		bblks = (int)BTOBB(be32_to_cpu(rhead->h_len));
5539 		error = xlog_bread(log, blk_no+hblks, bblks, dbp,
5540 				   &offset);
5541 		if (error)
5542 			goto bread_err2;
5543 
5544 		error = xlog_recover_process(log, rhash, rhead, offset, pass,
5545 					     &buffer_list);
5546 		if (error)
5547 			goto bread_err2;
5548 
5549 		blk_no += bblks + hblks;
5550 		rhead_blk = blk_no;
5551 	}
5552 
5553  bread_err2:
5554 	xlog_put_bp(dbp);
5555  bread_err1:
5556 	xlog_put_bp(hbp);
5557 
5558 	/*
5559 	 * Submit buffers that have been added from the last record processed,
5560 	 * regardless of error status.
5561 	 */
5562 	if (!list_empty(&buffer_list))
5563 		error2 = xfs_buf_delwri_submit(&buffer_list);
5564 
5565 	if (error && first_bad)
5566 		*first_bad = rhead_blk;
5567 
5568 	/*
5569 	 * Transactions are freed at commit time but transactions without commit
5570 	 * records on disk are never committed. Free any that may be left in the
5571 	 * hash table.
5572 	 */
5573 	for (i = 0; i < XLOG_RHASH_SIZE; i++) {
5574 		struct hlist_node	*tmp;
5575 		struct xlog_recover	*trans;
5576 
5577 		hlist_for_each_entry_safe(trans, tmp, &rhash[i], r_list)
5578 			xlog_recover_free_trans(trans);
5579 	}
5580 
5581 	return error ? error : error2;
5582 }
5583 
5584 /*
5585  * Do the recovery of the log.  We actually do this in two phases.
5586  * The two passes are necessary in order to implement the function
5587  * of cancelling a record written into the log.  The first pass
5588  * determines those things which have been cancelled, and the
5589  * second pass replays log items normally except for those which
5590  * have been cancelled.  The handling of the replay and cancellations
5591  * takes place in the log item type specific routines.
5592  *
5593  * The table of items which have cancel records in the log is allocated
5594  * and freed at this level, since only here do we know when all of
5595  * the log recovery has been completed.
5596  */
5597 STATIC int
5598 xlog_do_log_recovery(
5599 	struct xlog	*log,
5600 	xfs_daddr_t	head_blk,
5601 	xfs_daddr_t	tail_blk)
5602 {
5603 	int		error, i;
5604 
5605 	ASSERT(head_blk != tail_blk);
5606 
5607 	/*
5608 	 * First do a pass to find all of the cancelled buf log items.
5609 	 * Store them in the buf_cancel_table for use in the second pass.
5610 	 */
5611 	log->l_buf_cancel_table = kmem_zalloc(XLOG_BC_TABLE_SIZE *
5612 						 sizeof(struct list_head),
5613 						 KM_SLEEP);
5614 	for (i = 0; i < XLOG_BC_TABLE_SIZE; i++)
5615 		INIT_LIST_HEAD(&log->l_buf_cancel_table[i]);
5616 
5617 	error = xlog_do_recovery_pass(log, head_blk, tail_blk,
5618 				      XLOG_RECOVER_PASS1, NULL);
5619 	if (error != 0) {
5620 		kmem_free(log->l_buf_cancel_table);
5621 		log->l_buf_cancel_table = NULL;
5622 		return error;
5623 	}
5624 	/*
5625 	 * Then do a second pass to actually recover the items in the log.
5626 	 * When it is complete free the table of buf cancel items.
5627 	 */
5628 	error = xlog_do_recovery_pass(log, head_blk, tail_blk,
5629 				      XLOG_RECOVER_PASS2, NULL);
5630 #ifdef DEBUG
5631 	if (!error) {
5632 		int	i;
5633 
5634 		for (i = 0; i < XLOG_BC_TABLE_SIZE; i++)
5635 			ASSERT(list_empty(&log->l_buf_cancel_table[i]));
5636 	}
5637 #endif	/* DEBUG */
5638 
5639 	kmem_free(log->l_buf_cancel_table);
5640 	log->l_buf_cancel_table = NULL;
5641 
5642 	return error;
5643 }
5644 
5645 /*
5646  * Do the actual recovery
5647  */
5648 STATIC int
5649 xlog_do_recover(
5650 	struct xlog	*log,
5651 	xfs_daddr_t	head_blk,
5652 	xfs_daddr_t	tail_blk)
5653 {
5654 	struct xfs_mount *mp = log->l_mp;
5655 	int		error;
5656 	xfs_buf_t	*bp;
5657 	xfs_sb_t	*sbp;
5658 
5659 	trace_xfs_log_recover(log, head_blk, tail_blk);
5660 
5661 	/*
5662 	 * First replay the images in the log.
5663 	 */
5664 	error = xlog_do_log_recovery(log, head_blk, tail_blk);
5665 	if (error)
5666 		return error;
5667 
5668 	/*
5669 	 * If IO errors happened during recovery, bail out.
5670 	 */
5671 	if (XFS_FORCED_SHUTDOWN(mp)) {
5672 		return -EIO;
5673 	}
5674 
5675 	/*
5676 	 * We now update the tail_lsn since much of the recovery has completed
5677 	 * and there may be space available to use.  If there were no extent
5678 	 * or iunlinks, we can free up the entire log and set the tail_lsn to
5679 	 * be the last_sync_lsn.  This was set in xlog_find_tail to be the
5680 	 * lsn of the last known good LR on disk.  If there are extent frees
5681 	 * or iunlinks they will have some entries in the AIL; so we look at
5682 	 * the AIL to determine how to set the tail_lsn.
5683 	 */
5684 	xlog_assign_tail_lsn(mp);
5685 
5686 	/*
5687 	 * Now that we've finished replaying all buffer and inode
5688 	 * updates, re-read in the superblock and reverify it.
5689 	 */
5690 	bp = xfs_getsb(mp, 0);
5691 	bp->b_flags &= ~(XBF_DONE | XBF_ASYNC);
5692 	ASSERT(!(bp->b_flags & XBF_WRITE));
5693 	bp->b_flags |= XBF_READ;
5694 	bp->b_ops = &xfs_sb_buf_ops;
5695 
5696 	error = xfs_buf_submit(bp);
5697 	if (error) {
5698 		if (!XFS_FORCED_SHUTDOWN(mp)) {
5699 			xfs_buf_ioerror_alert(bp, __func__);
5700 			ASSERT(0);
5701 		}
5702 		xfs_buf_relse(bp);
5703 		return error;
5704 	}
5705 
5706 	/* Convert superblock from on-disk format */
5707 	sbp = &mp->m_sb;
5708 	xfs_sb_from_disk(sbp, XFS_BUF_TO_SBP(bp));
5709 	xfs_buf_relse(bp);
5710 
5711 	/* re-initialise in-core superblock and geometry structures */
5712 	xfs_reinit_percpu_counters(mp);
5713 	error = xfs_initialize_perag(mp, sbp->sb_agcount, &mp->m_maxagi);
5714 	if (error) {
5715 		xfs_warn(mp, "Failed post-recovery per-ag init: %d", error);
5716 		return error;
5717 	}
5718 	mp->m_alloc_set_aside = xfs_alloc_set_aside(mp);
5719 
5720 	xlog_recover_check_summary(log);
5721 
5722 	/* Normal transactions can now occur */
5723 	log->l_flags &= ~XLOG_ACTIVE_RECOVERY;
5724 	return 0;
5725 }
5726 
5727 /*
5728  * Perform recovery and re-initialize some log variables in xlog_find_tail.
5729  *
5730  * Return error or zero.
5731  */
5732 int
5733 xlog_recover(
5734 	struct xlog	*log)
5735 {
5736 	xfs_daddr_t	head_blk, tail_blk;
5737 	int		error;
5738 
5739 	/* find the tail of the log */
5740 	error = xlog_find_tail(log, &head_blk, &tail_blk);
5741 	if (error)
5742 		return error;
5743 
5744 	/*
5745 	 * The superblock was read before the log was available and thus the LSN
5746 	 * could not be verified. Check the superblock LSN against the current
5747 	 * LSN now that it's known.
5748 	 */
5749 	if (xfs_sb_version_hascrc(&log->l_mp->m_sb) &&
5750 	    !xfs_log_check_lsn(log->l_mp, log->l_mp->m_sb.sb_lsn))
5751 		return -EINVAL;
5752 
5753 	if (tail_blk != head_blk) {
5754 		/* There used to be a comment here:
5755 		 *
5756 		 * disallow recovery on read-only mounts.  note -- mount
5757 		 * checks for ENOSPC and turns it into an intelligent
5758 		 * error message.
5759 		 * ...but this is no longer true.  Now, unless you specify
5760 		 * NORECOVERY (in which case this function would never be
5761 		 * called), we just go ahead and recover.  We do this all
5762 		 * under the vfs layer, so we can get away with it unless
5763 		 * the device itself is read-only, in which case we fail.
5764 		 */
5765 		if ((error = xfs_dev_is_read_only(log->l_mp, "recovery"))) {
5766 			return error;
5767 		}
5768 
5769 		/*
5770 		 * Version 5 superblock log feature mask validation. We know the
5771 		 * log is dirty so check if there are any unknown log features
5772 		 * in what we need to recover. If there are unknown features
5773 		 * (e.g. unsupported transactions, then simply reject the
5774 		 * attempt at recovery before touching anything.
5775 		 */
5776 		if (XFS_SB_VERSION_NUM(&log->l_mp->m_sb) == XFS_SB_VERSION_5 &&
5777 		    xfs_sb_has_incompat_log_feature(&log->l_mp->m_sb,
5778 					XFS_SB_FEAT_INCOMPAT_LOG_UNKNOWN)) {
5779 			xfs_warn(log->l_mp,
5780 "Superblock has unknown incompatible log features (0x%x) enabled.",
5781 				(log->l_mp->m_sb.sb_features_log_incompat &
5782 					XFS_SB_FEAT_INCOMPAT_LOG_UNKNOWN));
5783 			xfs_warn(log->l_mp,
5784 "The log can not be fully and/or safely recovered by this kernel.");
5785 			xfs_warn(log->l_mp,
5786 "Please recover the log on a kernel that supports the unknown features.");
5787 			return -EINVAL;
5788 		}
5789 
5790 		/*
5791 		 * Delay log recovery if the debug hook is set. This is debug
5792 		 * instrumention to coordinate simulation of I/O failures with
5793 		 * log recovery.
5794 		 */
5795 		if (xfs_globals.log_recovery_delay) {
5796 			xfs_notice(log->l_mp,
5797 				"Delaying log recovery for %d seconds.",
5798 				xfs_globals.log_recovery_delay);
5799 			msleep(xfs_globals.log_recovery_delay * 1000);
5800 		}
5801 
5802 		xfs_notice(log->l_mp, "Starting recovery (logdev: %s)",
5803 				log->l_mp->m_logname ? log->l_mp->m_logname
5804 						     : "internal");
5805 
5806 		error = xlog_do_recover(log, head_blk, tail_blk);
5807 		log->l_flags |= XLOG_RECOVERY_NEEDED;
5808 	}
5809 	return error;
5810 }
5811 
5812 /*
5813  * In the first part of recovery we replay inodes and buffers and build
5814  * up the list of extent free items which need to be processed.  Here
5815  * we process the extent free items and clean up the on disk unlinked
5816  * inode lists.  This is separated from the first part of recovery so
5817  * that the root and real-time bitmap inodes can be read in from disk in
5818  * between the two stages.  This is necessary so that we can free space
5819  * in the real-time portion of the file system.
5820  */
5821 int
5822 xlog_recover_finish(
5823 	struct xlog	*log)
5824 {
5825 	/*
5826 	 * Now we're ready to do the transactions needed for the
5827 	 * rest of recovery.  Start with completing all the extent
5828 	 * free intent records and then process the unlinked inode
5829 	 * lists.  At this point, we essentially run in normal mode
5830 	 * except that we're still performing recovery actions
5831 	 * rather than accepting new requests.
5832 	 */
5833 	if (log->l_flags & XLOG_RECOVERY_NEEDED) {
5834 		int	error;
5835 		error = xlog_recover_process_intents(log);
5836 		if (error) {
5837 			xfs_alert(log->l_mp, "Failed to recover intents");
5838 			return error;
5839 		}
5840 
5841 		/*
5842 		 * Sync the log to get all the intents out of the AIL.
5843 		 * This isn't absolutely necessary, but it helps in
5844 		 * case the unlink transactions would have problems
5845 		 * pushing the intents out of the way.
5846 		 */
5847 		xfs_log_force(log->l_mp, XFS_LOG_SYNC);
5848 
5849 		xlog_recover_process_iunlinks(log);
5850 
5851 		xlog_recover_check_summary(log);
5852 
5853 		xfs_notice(log->l_mp, "Ending recovery (logdev: %s)",
5854 				log->l_mp->m_logname ? log->l_mp->m_logname
5855 						     : "internal");
5856 		log->l_flags &= ~XLOG_RECOVERY_NEEDED;
5857 	} else {
5858 		xfs_info(log->l_mp, "Ending clean mount");
5859 	}
5860 	return 0;
5861 }
5862 
5863 int
5864 xlog_recover_cancel(
5865 	struct xlog	*log)
5866 {
5867 	int		error = 0;
5868 
5869 	if (log->l_flags & XLOG_RECOVERY_NEEDED)
5870 		error = xlog_recover_cancel_intents(log);
5871 
5872 	return error;
5873 }
5874 
5875 #if defined(DEBUG)
5876 /*
5877  * Read all of the agf and agi counters and check that they
5878  * are consistent with the superblock counters.
5879  */
5880 STATIC void
5881 xlog_recover_check_summary(
5882 	struct xlog	*log)
5883 {
5884 	xfs_mount_t	*mp;
5885 	xfs_agf_t	*agfp;
5886 	xfs_buf_t	*agfbp;
5887 	xfs_buf_t	*agibp;
5888 	xfs_agnumber_t	agno;
5889 	uint64_t	freeblks;
5890 	uint64_t	itotal;
5891 	uint64_t	ifree;
5892 	int		error;
5893 
5894 	mp = log->l_mp;
5895 
5896 	freeblks = 0LL;
5897 	itotal = 0LL;
5898 	ifree = 0LL;
5899 	for (agno = 0; agno < mp->m_sb.sb_agcount; agno++) {
5900 		error = xfs_read_agf(mp, NULL, agno, 0, &agfbp);
5901 		if (error) {
5902 			xfs_alert(mp, "%s agf read failed agno %d error %d",
5903 						__func__, agno, error);
5904 		} else {
5905 			agfp = XFS_BUF_TO_AGF(agfbp);
5906 			freeblks += be32_to_cpu(agfp->agf_freeblks) +
5907 				    be32_to_cpu(agfp->agf_flcount);
5908 			xfs_buf_relse(agfbp);
5909 		}
5910 
5911 		error = xfs_read_agi(mp, NULL, agno, &agibp);
5912 		if (error) {
5913 			xfs_alert(mp, "%s agi read failed agno %d error %d",
5914 						__func__, agno, error);
5915 		} else {
5916 			struct xfs_agi	*agi = XFS_BUF_TO_AGI(agibp);
5917 
5918 			itotal += be32_to_cpu(agi->agi_count);
5919 			ifree += be32_to_cpu(agi->agi_freecount);
5920 			xfs_buf_relse(agibp);
5921 		}
5922 	}
5923 }
5924 #endif /* DEBUG */
5925