xref: /openbmc/linux/fs/xfs/xfs_log_recover.c (revision 55fd7e02)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (c) 2000-2006 Silicon Graphics, Inc.
4  * All Rights Reserved.
5  */
6 #include "xfs.h"
7 #include "xfs_fs.h"
8 #include "xfs_shared.h"
9 #include "xfs_format.h"
10 #include "xfs_log_format.h"
11 #include "xfs_trans_resv.h"
12 #include "xfs_bit.h"
13 #include "xfs_sb.h"
14 #include "xfs_mount.h"
15 #include "xfs_defer.h"
16 #include "xfs_inode.h"
17 #include "xfs_trans.h"
18 #include "xfs_log.h"
19 #include "xfs_log_priv.h"
20 #include "xfs_log_recover.h"
21 #include "xfs_trans_priv.h"
22 #include "xfs_alloc.h"
23 #include "xfs_ialloc.h"
24 #include "xfs_trace.h"
25 #include "xfs_icache.h"
26 #include "xfs_error.h"
27 #include "xfs_buf_item.h"
28 
29 #define BLK_AVG(blk1, blk2)	((blk1+blk2) >> 1)
30 
31 STATIC int
32 xlog_find_zeroed(
33 	struct xlog	*,
34 	xfs_daddr_t	*);
35 STATIC int
36 xlog_clear_stale_blocks(
37 	struct xlog	*,
38 	xfs_lsn_t);
39 #if defined(DEBUG)
40 STATIC void
41 xlog_recover_check_summary(
42 	struct xlog *);
43 #else
44 #define	xlog_recover_check_summary(log)
45 #endif
46 STATIC int
47 xlog_do_recovery_pass(
48         struct xlog *, xfs_daddr_t, xfs_daddr_t, int, xfs_daddr_t *);
49 
50 /*
51  * Sector aligned buffer routines for buffer create/read/write/access
52  */
53 
54 /*
55  * Verify the log-relative block number and length in basic blocks are valid for
56  * an operation involving the given XFS log buffer. Returns true if the fields
57  * are valid, false otherwise.
58  */
59 static inline bool
60 xlog_verify_bno(
61 	struct xlog	*log,
62 	xfs_daddr_t	blk_no,
63 	int		bbcount)
64 {
65 	if (blk_no < 0 || blk_no >= log->l_logBBsize)
66 		return false;
67 	if (bbcount <= 0 || (blk_no + bbcount) > log->l_logBBsize)
68 		return false;
69 	return true;
70 }
71 
72 /*
73  * Allocate a buffer to hold log data.  The buffer needs to be able to map to
74  * a range of nbblks basic blocks at any valid offset within the log.
75  */
76 static char *
77 xlog_alloc_buffer(
78 	struct xlog	*log,
79 	int		nbblks)
80 {
81 	int align_mask = xfs_buftarg_dma_alignment(log->l_targ);
82 
83 	/*
84 	 * Pass log block 0 since we don't have an addr yet, buffer will be
85 	 * verified on read.
86 	 */
87 	if (XFS_IS_CORRUPT(log->l_mp, !xlog_verify_bno(log, 0, nbblks))) {
88 		xfs_warn(log->l_mp, "Invalid block length (0x%x) for buffer",
89 			nbblks);
90 		return NULL;
91 	}
92 
93 	/*
94 	 * We do log I/O in units of log sectors (a power-of-2 multiple of the
95 	 * basic block size), so we round up the requested size to accommodate
96 	 * the basic blocks required for complete log sectors.
97 	 *
98 	 * In addition, the buffer may be used for a non-sector-aligned block
99 	 * offset, in which case an I/O of the requested size could extend
100 	 * beyond the end of the buffer.  If the requested size is only 1 basic
101 	 * block it will never straddle a sector boundary, so this won't be an
102 	 * issue.  Nor will this be a problem if the log I/O is done in basic
103 	 * blocks (sector size 1).  But otherwise we extend the buffer by one
104 	 * extra log sector to ensure there's space to accommodate this
105 	 * possibility.
106 	 */
107 	if (nbblks > 1 && log->l_sectBBsize > 1)
108 		nbblks += log->l_sectBBsize;
109 	nbblks = round_up(nbblks, log->l_sectBBsize);
110 	return kmem_alloc_io(BBTOB(nbblks), align_mask, KM_MAYFAIL | KM_ZERO);
111 }
112 
113 /*
114  * Return the address of the start of the given block number's data
115  * in a log buffer.  The buffer covers a log sector-aligned region.
116  */
117 static inline unsigned int
118 xlog_align(
119 	struct xlog	*log,
120 	xfs_daddr_t	blk_no)
121 {
122 	return BBTOB(blk_no & ((xfs_daddr_t)log->l_sectBBsize - 1));
123 }
124 
125 static int
126 xlog_do_io(
127 	struct xlog		*log,
128 	xfs_daddr_t		blk_no,
129 	unsigned int		nbblks,
130 	char			*data,
131 	unsigned int		op)
132 {
133 	int			error;
134 
135 	if (XFS_IS_CORRUPT(log->l_mp, !xlog_verify_bno(log, blk_no, nbblks))) {
136 		xfs_warn(log->l_mp,
137 			 "Invalid log block/length (0x%llx, 0x%x) for buffer",
138 			 blk_no, nbblks);
139 		return -EFSCORRUPTED;
140 	}
141 
142 	blk_no = round_down(blk_no, log->l_sectBBsize);
143 	nbblks = round_up(nbblks, log->l_sectBBsize);
144 	ASSERT(nbblks > 0);
145 
146 	error = xfs_rw_bdev(log->l_targ->bt_bdev, log->l_logBBstart + blk_no,
147 			BBTOB(nbblks), data, op);
148 	if (error && !XFS_FORCED_SHUTDOWN(log->l_mp)) {
149 		xfs_alert(log->l_mp,
150 			  "log recovery %s I/O error at daddr 0x%llx len %d error %d",
151 			  op == REQ_OP_WRITE ? "write" : "read",
152 			  blk_no, nbblks, error);
153 	}
154 	return error;
155 }
156 
157 STATIC int
158 xlog_bread_noalign(
159 	struct xlog	*log,
160 	xfs_daddr_t	blk_no,
161 	int		nbblks,
162 	char		*data)
163 {
164 	return xlog_do_io(log, blk_no, nbblks, data, REQ_OP_READ);
165 }
166 
167 STATIC int
168 xlog_bread(
169 	struct xlog	*log,
170 	xfs_daddr_t	blk_no,
171 	int		nbblks,
172 	char		*data,
173 	char		**offset)
174 {
175 	int		error;
176 
177 	error = xlog_do_io(log, blk_no, nbblks, data, REQ_OP_READ);
178 	if (!error)
179 		*offset = data + xlog_align(log, blk_no);
180 	return error;
181 }
182 
183 STATIC int
184 xlog_bwrite(
185 	struct xlog	*log,
186 	xfs_daddr_t	blk_no,
187 	int		nbblks,
188 	char		*data)
189 {
190 	return xlog_do_io(log, blk_no, nbblks, data, REQ_OP_WRITE);
191 }
192 
193 #ifdef DEBUG
194 /*
195  * dump debug superblock and log record information
196  */
197 STATIC void
198 xlog_header_check_dump(
199 	xfs_mount_t		*mp,
200 	xlog_rec_header_t	*head)
201 {
202 	xfs_debug(mp, "%s:  SB : uuid = %pU, fmt = %d",
203 		__func__, &mp->m_sb.sb_uuid, XLOG_FMT);
204 	xfs_debug(mp, "    log : uuid = %pU, fmt = %d",
205 		&head->h_fs_uuid, be32_to_cpu(head->h_fmt));
206 }
207 #else
208 #define xlog_header_check_dump(mp, head)
209 #endif
210 
211 /*
212  * check log record header for recovery
213  */
214 STATIC int
215 xlog_header_check_recover(
216 	xfs_mount_t		*mp,
217 	xlog_rec_header_t	*head)
218 {
219 	ASSERT(head->h_magicno == cpu_to_be32(XLOG_HEADER_MAGIC_NUM));
220 
221 	/*
222 	 * IRIX doesn't write the h_fmt field and leaves it zeroed
223 	 * (XLOG_FMT_UNKNOWN). This stops us from trying to recover
224 	 * a dirty log created in IRIX.
225 	 */
226 	if (XFS_IS_CORRUPT(mp, head->h_fmt != cpu_to_be32(XLOG_FMT))) {
227 		xfs_warn(mp,
228 	"dirty log written in incompatible format - can't recover");
229 		xlog_header_check_dump(mp, head);
230 		return -EFSCORRUPTED;
231 	}
232 	if (XFS_IS_CORRUPT(mp, !uuid_equal(&mp->m_sb.sb_uuid,
233 					   &head->h_fs_uuid))) {
234 		xfs_warn(mp,
235 	"dirty log entry has mismatched uuid - can't recover");
236 		xlog_header_check_dump(mp, head);
237 		return -EFSCORRUPTED;
238 	}
239 	return 0;
240 }
241 
242 /*
243  * read the head block of the log and check the header
244  */
245 STATIC int
246 xlog_header_check_mount(
247 	xfs_mount_t		*mp,
248 	xlog_rec_header_t	*head)
249 {
250 	ASSERT(head->h_magicno == cpu_to_be32(XLOG_HEADER_MAGIC_NUM));
251 
252 	if (uuid_is_null(&head->h_fs_uuid)) {
253 		/*
254 		 * IRIX doesn't write the h_fs_uuid or h_fmt fields. If
255 		 * h_fs_uuid is null, we assume this log was last mounted
256 		 * by IRIX and continue.
257 		 */
258 		xfs_warn(mp, "null uuid in log - IRIX style log");
259 	} else if (XFS_IS_CORRUPT(mp, !uuid_equal(&mp->m_sb.sb_uuid,
260 						  &head->h_fs_uuid))) {
261 		xfs_warn(mp, "log has mismatched uuid - can't recover");
262 		xlog_header_check_dump(mp, head);
263 		return -EFSCORRUPTED;
264 	}
265 	return 0;
266 }
267 
268 void
269 xlog_recover_iodone(
270 	struct xfs_buf	*bp)
271 {
272 	if (bp->b_error) {
273 		/*
274 		 * We're not going to bother about retrying
275 		 * this during recovery. One strike!
276 		 */
277 		if (!XFS_FORCED_SHUTDOWN(bp->b_mount)) {
278 			xfs_buf_ioerror_alert(bp, __this_address);
279 			xfs_force_shutdown(bp->b_mount, SHUTDOWN_META_IO_ERROR);
280 		}
281 	}
282 
283 	/*
284 	 * On v5 supers, a bli could be attached to update the metadata LSN.
285 	 * Clean it up.
286 	 */
287 	if (bp->b_log_item)
288 		xfs_buf_item_relse(bp);
289 	ASSERT(bp->b_log_item == NULL);
290 
291 	bp->b_iodone = NULL;
292 	xfs_buf_ioend(bp);
293 }
294 
295 /*
296  * This routine finds (to an approximation) the first block in the physical
297  * log which contains the given cycle.  It uses a binary search algorithm.
298  * Note that the algorithm can not be perfect because the disk will not
299  * necessarily be perfect.
300  */
301 STATIC int
302 xlog_find_cycle_start(
303 	struct xlog	*log,
304 	char		*buffer,
305 	xfs_daddr_t	first_blk,
306 	xfs_daddr_t	*last_blk,
307 	uint		cycle)
308 {
309 	char		*offset;
310 	xfs_daddr_t	mid_blk;
311 	xfs_daddr_t	end_blk;
312 	uint		mid_cycle;
313 	int		error;
314 
315 	end_blk = *last_blk;
316 	mid_blk = BLK_AVG(first_blk, end_blk);
317 	while (mid_blk != first_blk && mid_blk != end_blk) {
318 		error = xlog_bread(log, mid_blk, 1, buffer, &offset);
319 		if (error)
320 			return error;
321 		mid_cycle = xlog_get_cycle(offset);
322 		if (mid_cycle == cycle)
323 			end_blk = mid_blk;   /* last_half_cycle == mid_cycle */
324 		else
325 			first_blk = mid_blk; /* first_half_cycle == mid_cycle */
326 		mid_blk = BLK_AVG(first_blk, end_blk);
327 	}
328 	ASSERT((mid_blk == first_blk && mid_blk+1 == end_blk) ||
329 	       (mid_blk == end_blk && mid_blk-1 == first_blk));
330 
331 	*last_blk = end_blk;
332 
333 	return 0;
334 }
335 
336 /*
337  * Check that a range of blocks does not contain stop_on_cycle_no.
338  * Fill in *new_blk with the block offset where such a block is
339  * found, or with -1 (an invalid block number) if there is no such
340  * block in the range.  The scan needs to occur from front to back
341  * and the pointer into the region must be updated since a later
342  * routine will need to perform another test.
343  */
344 STATIC int
345 xlog_find_verify_cycle(
346 	struct xlog	*log,
347 	xfs_daddr_t	start_blk,
348 	int		nbblks,
349 	uint		stop_on_cycle_no,
350 	xfs_daddr_t	*new_blk)
351 {
352 	xfs_daddr_t	i, j;
353 	uint		cycle;
354 	char		*buffer;
355 	xfs_daddr_t	bufblks;
356 	char		*buf = NULL;
357 	int		error = 0;
358 
359 	/*
360 	 * Greedily allocate a buffer big enough to handle the full
361 	 * range of basic blocks we'll be examining.  If that fails,
362 	 * try a smaller size.  We need to be able to read at least
363 	 * a log sector, or we're out of luck.
364 	 */
365 	bufblks = 1 << ffs(nbblks);
366 	while (bufblks > log->l_logBBsize)
367 		bufblks >>= 1;
368 	while (!(buffer = xlog_alloc_buffer(log, bufblks))) {
369 		bufblks >>= 1;
370 		if (bufblks < log->l_sectBBsize)
371 			return -ENOMEM;
372 	}
373 
374 	for (i = start_blk; i < start_blk + nbblks; i += bufblks) {
375 		int	bcount;
376 
377 		bcount = min(bufblks, (start_blk + nbblks - i));
378 
379 		error = xlog_bread(log, i, bcount, buffer, &buf);
380 		if (error)
381 			goto out;
382 
383 		for (j = 0; j < bcount; j++) {
384 			cycle = xlog_get_cycle(buf);
385 			if (cycle == stop_on_cycle_no) {
386 				*new_blk = i+j;
387 				goto out;
388 			}
389 
390 			buf += BBSIZE;
391 		}
392 	}
393 
394 	*new_blk = -1;
395 
396 out:
397 	kmem_free(buffer);
398 	return error;
399 }
400 
401 /*
402  * Potentially backup over partial log record write.
403  *
404  * In the typical case, last_blk is the number of the block directly after
405  * a good log record.  Therefore, we subtract one to get the block number
406  * of the last block in the given buffer.  extra_bblks contains the number
407  * of blocks we would have read on a previous read.  This happens when the
408  * last log record is split over the end of the physical log.
409  *
410  * extra_bblks is the number of blocks potentially verified on a previous
411  * call to this routine.
412  */
413 STATIC int
414 xlog_find_verify_log_record(
415 	struct xlog		*log,
416 	xfs_daddr_t		start_blk,
417 	xfs_daddr_t		*last_blk,
418 	int			extra_bblks)
419 {
420 	xfs_daddr_t		i;
421 	char			*buffer;
422 	char			*offset = NULL;
423 	xlog_rec_header_t	*head = NULL;
424 	int			error = 0;
425 	int			smallmem = 0;
426 	int			num_blks = *last_blk - start_blk;
427 	int			xhdrs;
428 
429 	ASSERT(start_blk != 0 || *last_blk != start_blk);
430 
431 	buffer = xlog_alloc_buffer(log, num_blks);
432 	if (!buffer) {
433 		buffer = xlog_alloc_buffer(log, 1);
434 		if (!buffer)
435 			return -ENOMEM;
436 		smallmem = 1;
437 	} else {
438 		error = xlog_bread(log, start_blk, num_blks, buffer, &offset);
439 		if (error)
440 			goto out;
441 		offset += ((num_blks - 1) << BBSHIFT);
442 	}
443 
444 	for (i = (*last_blk) - 1; i >= 0; i--) {
445 		if (i < start_blk) {
446 			/* valid log record not found */
447 			xfs_warn(log->l_mp,
448 		"Log inconsistent (didn't find previous header)");
449 			ASSERT(0);
450 			error = -EFSCORRUPTED;
451 			goto out;
452 		}
453 
454 		if (smallmem) {
455 			error = xlog_bread(log, i, 1, buffer, &offset);
456 			if (error)
457 				goto out;
458 		}
459 
460 		head = (xlog_rec_header_t *)offset;
461 
462 		if (head->h_magicno == cpu_to_be32(XLOG_HEADER_MAGIC_NUM))
463 			break;
464 
465 		if (!smallmem)
466 			offset -= BBSIZE;
467 	}
468 
469 	/*
470 	 * We hit the beginning of the physical log & still no header.  Return
471 	 * to caller.  If caller can handle a return of -1, then this routine
472 	 * will be called again for the end of the physical log.
473 	 */
474 	if (i == -1) {
475 		error = 1;
476 		goto out;
477 	}
478 
479 	/*
480 	 * We have the final block of the good log (the first block
481 	 * of the log record _before_ the head. So we check the uuid.
482 	 */
483 	if ((error = xlog_header_check_mount(log->l_mp, head)))
484 		goto out;
485 
486 	/*
487 	 * We may have found a log record header before we expected one.
488 	 * last_blk will be the 1st block # with a given cycle #.  We may end
489 	 * up reading an entire log record.  In this case, we don't want to
490 	 * reset last_blk.  Only when last_blk points in the middle of a log
491 	 * record do we update last_blk.
492 	 */
493 	if (xfs_sb_version_haslogv2(&log->l_mp->m_sb)) {
494 		uint	h_size = be32_to_cpu(head->h_size);
495 
496 		xhdrs = h_size / XLOG_HEADER_CYCLE_SIZE;
497 		if (h_size % XLOG_HEADER_CYCLE_SIZE)
498 			xhdrs++;
499 	} else {
500 		xhdrs = 1;
501 	}
502 
503 	if (*last_blk - i + extra_bblks !=
504 	    BTOBB(be32_to_cpu(head->h_len)) + xhdrs)
505 		*last_blk = i;
506 
507 out:
508 	kmem_free(buffer);
509 	return error;
510 }
511 
512 /*
513  * Head is defined to be the point of the log where the next log write
514  * could go.  This means that incomplete LR writes at the end are
515  * eliminated when calculating the head.  We aren't guaranteed that previous
516  * LR have complete transactions.  We only know that a cycle number of
517  * current cycle number -1 won't be present in the log if we start writing
518  * from our current block number.
519  *
520  * last_blk contains the block number of the first block with a given
521  * cycle number.
522  *
523  * Return: zero if normal, non-zero if error.
524  */
525 STATIC int
526 xlog_find_head(
527 	struct xlog	*log,
528 	xfs_daddr_t	*return_head_blk)
529 {
530 	char		*buffer;
531 	char		*offset;
532 	xfs_daddr_t	new_blk, first_blk, start_blk, last_blk, head_blk;
533 	int		num_scan_bblks;
534 	uint		first_half_cycle, last_half_cycle;
535 	uint		stop_on_cycle;
536 	int		error, log_bbnum = log->l_logBBsize;
537 
538 	/* Is the end of the log device zeroed? */
539 	error = xlog_find_zeroed(log, &first_blk);
540 	if (error < 0) {
541 		xfs_warn(log->l_mp, "empty log check failed");
542 		return error;
543 	}
544 	if (error == 1) {
545 		*return_head_blk = first_blk;
546 
547 		/* Is the whole lot zeroed? */
548 		if (!first_blk) {
549 			/* Linux XFS shouldn't generate totally zeroed logs -
550 			 * mkfs etc write a dummy unmount record to a fresh
551 			 * log so we can store the uuid in there
552 			 */
553 			xfs_warn(log->l_mp, "totally zeroed log");
554 		}
555 
556 		return 0;
557 	}
558 
559 	first_blk = 0;			/* get cycle # of 1st block */
560 	buffer = xlog_alloc_buffer(log, 1);
561 	if (!buffer)
562 		return -ENOMEM;
563 
564 	error = xlog_bread(log, 0, 1, buffer, &offset);
565 	if (error)
566 		goto out_free_buffer;
567 
568 	first_half_cycle = xlog_get_cycle(offset);
569 
570 	last_blk = head_blk = log_bbnum - 1;	/* get cycle # of last block */
571 	error = xlog_bread(log, last_blk, 1, buffer, &offset);
572 	if (error)
573 		goto out_free_buffer;
574 
575 	last_half_cycle = xlog_get_cycle(offset);
576 	ASSERT(last_half_cycle != 0);
577 
578 	/*
579 	 * If the 1st half cycle number is equal to the last half cycle number,
580 	 * then the entire log is stamped with the same cycle number.  In this
581 	 * case, head_blk can't be set to zero (which makes sense).  The below
582 	 * math doesn't work out properly with head_blk equal to zero.  Instead,
583 	 * we set it to log_bbnum which is an invalid block number, but this
584 	 * value makes the math correct.  If head_blk doesn't changed through
585 	 * all the tests below, *head_blk is set to zero at the very end rather
586 	 * than log_bbnum.  In a sense, log_bbnum and zero are the same block
587 	 * in a circular file.
588 	 */
589 	if (first_half_cycle == last_half_cycle) {
590 		/*
591 		 * In this case we believe that the entire log should have
592 		 * cycle number last_half_cycle.  We need to scan backwards
593 		 * from the end verifying that there are no holes still
594 		 * containing last_half_cycle - 1.  If we find such a hole,
595 		 * then the start of that hole will be the new head.  The
596 		 * simple case looks like
597 		 *        x | x ... | x - 1 | x
598 		 * Another case that fits this picture would be
599 		 *        x | x + 1 | x ... | x
600 		 * In this case the head really is somewhere at the end of the
601 		 * log, as one of the latest writes at the beginning was
602 		 * incomplete.
603 		 * One more case is
604 		 *        x | x + 1 | x ... | x - 1 | x
605 		 * This is really the combination of the above two cases, and
606 		 * the head has to end up at the start of the x-1 hole at the
607 		 * end of the log.
608 		 *
609 		 * In the 256k log case, we will read from the beginning to the
610 		 * end of the log and search for cycle numbers equal to x-1.
611 		 * We don't worry about the x+1 blocks that we encounter,
612 		 * because we know that they cannot be the head since the log
613 		 * started with x.
614 		 */
615 		head_blk = log_bbnum;
616 		stop_on_cycle = last_half_cycle - 1;
617 	} else {
618 		/*
619 		 * In this case we want to find the first block with cycle
620 		 * number matching last_half_cycle.  We expect the log to be
621 		 * some variation on
622 		 *        x + 1 ... | x ... | x
623 		 * The first block with cycle number x (last_half_cycle) will
624 		 * be where the new head belongs.  First we do a binary search
625 		 * for the first occurrence of last_half_cycle.  The binary
626 		 * search may not be totally accurate, so then we scan back
627 		 * from there looking for occurrences of last_half_cycle before
628 		 * us.  If that backwards scan wraps around the beginning of
629 		 * the log, then we look for occurrences of last_half_cycle - 1
630 		 * at the end of the log.  The cases we're looking for look
631 		 * like
632 		 *                               v binary search stopped here
633 		 *        x + 1 ... | x | x + 1 | x ... | x
634 		 *                   ^ but we want to locate this spot
635 		 * or
636 		 *        <---------> less than scan distance
637 		 *        x + 1 ... | x ... | x - 1 | x
638 		 *                           ^ we want to locate this spot
639 		 */
640 		stop_on_cycle = last_half_cycle;
641 		error = xlog_find_cycle_start(log, buffer, first_blk, &head_blk,
642 				last_half_cycle);
643 		if (error)
644 			goto out_free_buffer;
645 	}
646 
647 	/*
648 	 * Now validate the answer.  Scan back some number of maximum possible
649 	 * blocks and make sure each one has the expected cycle number.  The
650 	 * maximum is determined by the total possible amount of buffering
651 	 * in the in-core log.  The following number can be made tighter if
652 	 * we actually look at the block size of the filesystem.
653 	 */
654 	num_scan_bblks = min_t(int, log_bbnum, XLOG_TOTAL_REC_SHIFT(log));
655 	if (head_blk >= num_scan_bblks) {
656 		/*
657 		 * We are guaranteed that the entire check can be performed
658 		 * in one buffer.
659 		 */
660 		start_blk = head_blk - num_scan_bblks;
661 		if ((error = xlog_find_verify_cycle(log,
662 						start_blk, num_scan_bblks,
663 						stop_on_cycle, &new_blk)))
664 			goto out_free_buffer;
665 		if (new_blk != -1)
666 			head_blk = new_blk;
667 	} else {		/* need to read 2 parts of log */
668 		/*
669 		 * We are going to scan backwards in the log in two parts.
670 		 * First we scan the physical end of the log.  In this part
671 		 * of the log, we are looking for blocks with cycle number
672 		 * last_half_cycle - 1.
673 		 * If we find one, then we know that the log starts there, as
674 		 * we've found a hole that didn't get written in going around
675 		 * the end of the physical log.  The simple case for this is
676 		 *        x + 1 ... | x ... | x - 1 | x
677 		 *        <---------> less than scan distance
678 		 * If all of the blocks at the end of the log have cycle number
679 		 * last_half_cycle, then we check the blocks at the start of
680 		 * the log looking for occurrences of last_half_cycle.  If we
681 		 * find one, then our current estimate for the location of the
682 		 * first occurrence of last_half_cycle is wrong and we move
683 		 * back to the hole we've found.  This case looks like
684 		 *        x + 1 ... | x | x + 1 | x ...
685 		 *                               ^ binary search stopped here
686 		 * Another case we need to handle that only occurs in 256k
687 		 * logs is
688 		 *        x + 1 ... | x ... | x+1 | x ...
689 		 *                   ^ binary search stops here
690 		 * In a 256k log, the scan at the end of the log will see the
691 		 * x + 1 blocks.  We need to skip past those since that is
692 		 * certainly not the head of the log.  By searching for
693 		 * last_half_cycle-1 we accomplish that.
694 		 */
695 		ASSERT(head_blk <= INT_MAX &&
696 			(xfs_daddr_t) num_scan_bblks >= head_blk);
697 		start_blk = log_bbnum - (num_scan_bblks - head_blk);
698 		if ((error = xlog_find_verify_cycle(log, start_blk,
699 					num_scan_bblks - (int)head_blk,
700 					(stop_on_cycle - 1), &new_blk)))
701 			goto out_free_buffer;
702 		if (new_blk != -1) {
703 			head_blk = new_blk;
704 			goto validate_head;
705 		}
706 
707 		/*
708 		 * Scan beginning of log now.  The last part of the physical
709 		 * log is good.  This scan needs to verify that it doesn't find
710 		 * the last_half_cycle.
711 		 */
712 		start_blk = 0;
713 		ASSERT(head_blk <= INT_MAX);
714 		if ((error = xlog_find_verify_cycle(log,
715 					start_blk, (int)head_blk,
716 					stop_on_cycle, &new_blk)))
717 			goto out_free_buffer;
718 		if (new_blk != -1)
719 			head_blk = new_blk;
720 	}
721 
722 validate_head:
723 	/*
724 	 * Now we need to make sure head_blk is not pointing to a block in
725 	 * the middle of a log record.
726 	 */
727 	num_scan_bblks = XLOG_REC_SHIFT(log);
728 	if (head_blk >= num_scan_bblks) {
729 		start_blk = head_blk - num_scan_bblks; /* don't read head_blk */
730 
731 		/* start ptr at last block ptr before head_blk */
732 		error = xlog_find_verify_log_record(log, start_blk, &head_blk, 0);
733 		if (error == 1)
734 			error = -EIO;
735 		if (error)
736 			goto out_free_buffer;
737 	} else {
738 		start_blk = 0;
739 		ASSERT(head_blk <= INT_MAX);
740 		error = xlog_find_verify_log_record(log, start_blk, &head_blk, 0);
741 		if (error < 0)
742 			goto out_free_buffer;
743 		if (error == 1) {
744 			/* We hit the beginning of the log during our search */
745 			start_blk = log_bbnum - (num_scan_bblks - head_blk);
746 			new_blk = log_bbnum;
747 			ASSERT(start_blk <= INT_MAX &&
748 				(xfs_daddr_t) log_bbnum-start_blk >= 0);
749 			ASSERT(head_blk <= INT_MAX);
750 			error = xlog_find_verify_log_record(log, start_blk,
751 							&new_blk, (int)head_blk);
752 			if (error == 1)
753 				error = -EIO;
754 			if (error)
755 				goto out_free_buffer;
756 			if (new_blk != log_bbnum)
757 				head_blk = new_blk;
758 		} else if (error)
759 			goto out_free_buffer;
760 	}
761 
762 	kmem_free(buffer);
763 	if (head_blk == log_bbnum)
764 		*return_head_blk = 0;
765 	else
766 		*return_head_blk = head_blk;
767 	/*
768 	 * When returning here, we have a good block number.  Bad block
769 	 * means that during a previous crash, we didn't have a clean break
770 	 * from cycle number N to cycle number N-1.  In this case, we need
771 	 * to find the first block with cycle number N-1.
772 	 */
773 	return 0;
774 
775 out_free_buffer:
776 	kmem_free(buffer);
777 	if (error)
778 		xfs_warn(log->l_mp, "failed to find log head");
779 	return error;
780 }
781 
782 /*
783  * Seek backwards in the log for log record headers.
784  *
785  * Given a starting log block, walk backwards until we find the provided number
786  * of records or hit the provided tail block. The return value is the number of
787  * records encountered or a negative error code. The log block and buffer
788  * pointer of the last record seen are returned in rblk and rhead respectively.
789  */
790 STATIC int
791 xlog_rseek_logrec_hdr(
792 	struct xlog		*log,
793 	xfs_daddr_t		head_blk,
794 	xfs_daddr_t		tail_blk,
795 	int			count,
796 	char			*buffer,
797 	xfs_daddr_t		*rblk,
798 	struct xlog_rec_header	**rhead,
799 	bool			*wrapped)
800 {
801 	int			i;
802 	int			error;
803 	int			found = 0;
804 	char			*offset = NULL;
805 	xfs_daddr_t		end_blk;
806 
807 	*wrapped = false;
808 
809 	/*
810 	 * Walk backwards from the head block until we hit the tail or the first
811 	 * block in the log.
812 	 */
813 	end_blk = head_blk > tail_blk ? tail_blk : 0;
814 	for (i = (int) head_blk - 1; i >= end_blk; i--) {
815 		error = xlog_bread(log, i, 1, buffer, &offset);
816 		if (error)
817 			goto out_error;
818 
819 		if (*(__be32 *) offset == cpu_to_be32(XLOG_HEADER_MAGIC_NUM)) {
820 			*rblk = i;
821 			*rhead = (struct xlog_rec_header *) offset;
822 			if (++found == count)
823 				break;
824 		}
825 	}
826 
827 	/*
828 	 * If we haven't hit the tail block or the log record header count,
829 	 * start looking again from the end of the physical log. Note that
830 	 * callers can pass head == tail if the tail is not yet known.
831 	 */
832 	if (tail_blk >= head_blk && found != count) {
833 		for (i = log->l_logBBsize - 1; i >= (int) tail_blk; i--) {
834 			error = xlog_bread(log, i, 1, buffer, &offset);
835 			if (error)
836 				goto out_error;
837 
838 			if (*(__be32 *)offset ==
839 			    cpu_to_be32(XLOG_HEADER_MAGIC_NUM)) {
840 				*wrapped = true;
841 				*rblk = i;
842 				*rhead = (struct xlog_rec_header *) offset;
843 				if (++found == count)
844 					break;
845 			}
846 		}
847 	}
848 
849 	return found;
850 
851 out_error:
852 	return error;
853 }
854 
855 /*
856  * Seek forward in the log for log record headers.
857  *
858  * Given head and tail blocks, walk forward from the tail block until we find
859  * the provided number of records or hit the head block. The return value is the
860  * number of records encountered or a negative error code. The log block and
861  * buffer pointer of the last record seen are returned in rblk and rhead
862  * respectively.
863  */
864 STATIC int
865 xlog_seek_logrec_hdr(
866 	struct xlog		*log,
867 	xfs_daddr_t		head_blk,
868 	xfs_daddr_t		tail_blk,
869 	int			count,
870 	char			*buffer,
871 	xfs_daddr_t		*rblk,
872 	struct xlog_rec_header	**rhead,
873 	bool			*wrapped)
874 {
875 	int			i;
876 	int			error;
877 	int			found = 0;
878 	char			*offset = NULL;
879 	xfs_daddr_t		end_blk;
880 
881 	*wrapped = false;
882 
883 	/*
884 	 * Walk forward from the tail block until we hit the head or the last
885 	 * block in the log.
886 	 */
887 	end_blk = head_blk > tail_blk ? head_blk : log->l_logBBsize - 1;
888 	for (i = (int) tail_blk; i <= end_blk; i++) {
889 		error = xlog_bread(log, i, 1, buffer, &offset);
890 		if (error)
891 			goto out_error;
892 
893 		if (*(__be32 *) offset == cpu_to_be32(XLOG_HEADER_MAGIC_NUM)) {
894 			*rblk = i;
895 			*rhead = (struct xlog_rec_header *) offset;
896 			if (++found == count)
897 				break;
898 		}
899 	}
900 
901 	/*
902 	 * If we haven't hit the head block or the log record header count,
903 	 * start looking again from the start of the physical log.
904 	 */
905 	if (tail_blk > head_blk && found != count) {
906 		for (i = 0; i < (int) head_blk; i++) {
907 			error = xlog_bread(log, i, 1, buffer, &offset);
908 			if (error)
909 				goto out_error;
910 
911 			if (*(__be32 *)offset ==
912 			    cpu_to_be32(XLOG_HEADER_MAGIC_NUM)) {
913 				*wrapped = true;
914 				*rblk = i;
915 				*rhead = (struct xlog_rec_header *) offset;
916 				if (++found == count)
917 					break;
918 			}
919 		}
920 	}
921 
922 	return found;
923 
924 out_error:
925 	return error;
926 }
927 
928 /*
929  * Calculate distance from head to tail (i.e., unused space in the log).
930  */
931 static inline int
932 xlog_tail_distance(
933 	struct xlog	*log,
934 	xfs_daddr_t	head_blk,
935 	xfs_daddr_t	tail_blk)
936 {
937 	if (head_blk < tail_blk)
938 		return tail_blk - head_blk;
939 
940 	return tail_blk + (log->l_logBBsize - head_blk);
941 }
942 
943 /*
944  * Verify the log tail. This is particularly important when torn or incomplete
945  * writes have been detected near the front of the log and the head has been
946  * walked back accordingly.
947  *
948  * We also have to handle the case where the tail was pinned and the head
949  * blocked behind the tail right before a crash. If the tail had been pushed
950  * immediately prior to the crash and the subsequent checkpoint was only
951  * partially written, it's possible it overwrote the last referenced tail in the
952  * log with garbage. This is not a coherency problem because the tail must have
953  * been pushed before it can be overwritten, but appears as log corruption to
954  * recovery because we have no way to know the tail was updated if the
955  * subsequent checkpoint didn't write successfully.
956  *
957  * Therefore, CRC check the log from tail to head. If a failure occurs and the
958  * offending record is within max iclog bufs from the head, walk the tail
959  * forward and retry until a valid tail is found or corruption is detected out
960  * of the range of a possible overwrite.
961  */
962 STATIC int
963 xlog_verify_tail(
964 	struct xlog		*log,
965 	xfs_daddr_t		head_blk,
966 	xfs_daddr_t		*tail_blk,
967 	int			hsize)
968 {
969 	struct xlog_rec_header	*thead;
970 	char			*buffer;
971 	xfs_daddr_t		first_bad;
972 	int			error = 0;
973 	bool			wrapped;
974 	xfs_daddr_t		tmp_tail;
975 	xfs_daddr_t		orig_tail = *tail_blk;
976 
977 	buffer = xlog_alloc_buffer(log, 1);
978 	if (!buffer)
979 		return -ENOMEM;
980 
981 	/*
982 	 * Make sure the tail points to a record (returns positive count on
983 	 * success).
984 	 */
985 	error = xlog_seek_logrec_hdr(log, head_blk, *tail_blk, 1, buffer,
986 			&tmp_tail, &thead, &wrapped);
987 	if (error < 0)
988 		goto out;
989 	if (*tail_blk != tmp_tail)
990 		*tail_blk = tmp_tail;
991 
992 	/*
993 	 * Run a CRC check from the tail to the head. We can't just check
994 	 * MAX_ICLOGS records past the tail because the tail may point to stale
995 	 * blocks cleared during the search for the head/tail. These blocks are
996 	 * overwritten with zero-length records and thus record count is not a
997 	 * reliable indicator of the iclog state before a crash.
998 	 */
999 	first_bad = 0;
1000 	error = xlog_do_recovery_pass(log, head_blk, *tail_blk,
1001 				      XLOG_RECOVER_CRCPASS, &first_bad);
1002 	while ((error == -EFSBADCRC || error == -EFSCORRUPTED) && first_bad) {
1003 		int	tail_distance;
1004 
1005 		/*
1006 		 * Is corruption within range of the head? If so, retry from
1007 		 * the next record. Otherwise return an error.
1008 		 */
1009 		tail_distance = xlog_tail_distance(log, head_blk, first_bad);
1010 		if (tail_distance > BTOBB(XLOG_MAX_ICLOGS * hsize))
1011 			break;
1012 
1013 		/* skip to the next record; returns positive count on success */
1014 		error = xlog_seek_logrec_hdr(log, head_blk, first_bad, 2,
1015 				buffer, &tmp_tail, &thead, &wrapped);
1016 		if (error < 0)
1017 			goto out;
1018 
1019 		*tail_blk = tmp_tail;
1020 		first_bad = 0;
1021 		error = xlog_do_recovery_pass(log, head_blk, *tail_blk,
1022 					      XLOG_RECOVER_CRCPASS, &first_bad);
1023 	}
1024 
1025 	if (!error && *tail_blk != orig_tail)
1026 		xfs_warn(log->l_mp,
1027 		"Tail block (0x%llx) overwrite detected. Updated to 0x%llx",
1028 			 orig_tail, *tail_blk);
1029 out:
1030 	kmem_free(buffer);
1031 	return error;
1032 }
1033 
1034 /*
1035  * Detect and trim torn writes from the head of the log.
1036  *
1037  * Storage without sector atomicity guarantees can result in torn writes in the
1038  * log in the event of a crash. Our only means to detect this scenario is via
1039  * CRC verification. While we can't always be certain that CRC verification
1040  * failure is due to a torn write vs. an unrelated corruption, we do know that
1041  * only a certain number (XLOG_MAX_ICLOGS) of log records can be written out at
1042  * one time. Therefore, CRC verify up to XLOG_MAX_ICLOGS records at the head of
1043  * the log and treat failures in this range as torn writes as a matter of
1044  * policy. In the event of CRC failure, the head is walked back to the last good
1045  * record in the log and the tail is updated from that record and verified.
1046  */
1047 STATIC int
1048 xlog_verify_head(
1049 	struct xlog		*log,
1050 	xfs_daddr_t		*head_blk,	/* in/out: unverified head */
1051 	xfs_daddr_t		*tail_blk,	/* out: tail block */
1052 	char			*buffer,
1053 	xfs_daddr_t		*rhead_blk,	/* start blk of last record */
1054 	struct xlog_rec_header	**rhead,	/* ptr to last record */
1055 	bool			*wrapped)	/* last rec. wraps phys. log */
1056 {
1057 	struct xlog_rec_header	*tmp_rhead;
1058 	char			*tmp_buffer;
1059 	xfs_daddr_t		first_bad;
1060 	xfs_daddr_t		tmp_rhead_blk;
1061 	int			found;
1062 	int			error;
1063 	bool			tmp_wrapped;
1064 
1065 	/*
1066 	 * Check the head of the log for torn writes. Search backwards from the
1067 	 * head until we hit the tail or the maximum number of log record I/Os
1068 	 * that could have been in flight at one time. Use a temporary buffer so
1069 	 * we don't trash the rhead/buffer pointers from the caller.
1070 	 */
1071 	tmp_buffer = xlog_alloc_buffer(log, 1);
1072 	if (!tmp_buffer)
1073 		return -ENOMEM;
1074 	error = xlog_rseek_logrec_hdr(log, *head_blk, *tail_blk,
1075 				      XLOG_MAX_ICLOGS, tmp_buffer,
1076 				      &tmp_rhead_blk, &tmp_rhead, &tmp_wrapped);
1077 	kmem_free(tmp_buffer);
1078 	if (error < 0)
1079 		return error;
1080 
1081 	/*
1082 	 * Now run a CRC verification pass over the records starting at the
1083 	 * block found above to the current head. If a CRC failure occurs, the
1084 	 * log block of the first bad record is saved in first_bad.
1085 	 */
1086 	error = xlog_do_recovery_pass(log, *head_blk, tmp_rhead_blk,
1087 				      XLOG_RECOVER_CRCPASS, &first_bad);
1088 	if ((error == -EFSBADCRC || error == -EFSCORRUPTED) && first_bad) {
1089 		/*
1090 		 * We've hit a potential torn write. Reset the error and warn
1091 		 * about it.
1092 		 */
1093 		error = 0;
1094 		xfs_warn(log->l_mp,
1095 "Torn write (CRC failure) detected at log block 0x%llx. Truncating head block from 0x%llx.",
1096 			 first_bad, *head_blk);
1097 
1098 		/*
1099 		 * Get the header block and buffer pointer for the last good
1100 		 * record before the bad record.
1101 		 *
1102 		 * Note that xlog_find_tail() clears the blocks at the new head
1103 		 * (i.e., the records with invalid CRC) if the cycle number
1104 		 * matches the the current cycle.
1105 		 */
1106 		found = xlog_rseek_logrec_hdr(log, first_bad, *tail_blk, 1,
1107 				buffer, rhead_blk, rhead, wrapped);
1108 		if (found < 0)
1109 			return found;
1110 		if (found == 0)		/* XXX: right thing to do here? */
1111 			return -EIO;
1112 
1113 		/*
1114 		 * Reset the head block to the starting block of the first bad
1115 		 * log record and set the tail block based on the last good
1116 		 * record.
1117 		 *
1118 		 * Bail out if the updated head/tail match as this indicates
1119 		 * possible corruption outside of the acceptable
1120 		 * (XLOG_MAX_ICLOGS) range. This is a job for xfs_repair...
1121 		 */
1122 		*head_blk = first_bad;
1123 		*tail_blk = BLOCK_LSN(be64_to_cpu((*rhead)->h_tail_lsn));
1124 		if (*head_blk == *tail_blk) {
1125 			ASSERT(0);
1126 			return 0;
1127 		}
1128 	}
1129 	if (error)
1130 		return error;
1131 
1132 	return xlog_verify_tail(log, *head_blk, tail_blk,
1133 				be32_to_cpu((*rhead)->h_size));
1134 }
1135 
1136 /*
1137  * We need to make sure we handle log wrapping properly, so we can't use the
1138  * calculated logbno directly. Make sure it wraps to the correct bno inside the
1139  * log.
1140  *
1141  * The log is limited to 32 bit sizes, so we use the appropriate modulus
1142  * operation here and cast it back to a 64 bit daddr on return.
1143  */
1144 static inline xfs_daddr_t
1145 xlog_wrap_logbno(
1146 	struct xlog		*log,
1147 	xfs_daddr_t		bno)
1148 {
1149 	int			mod;
1150 
1151 	div_s64_rem(bno, log->l_logBBsize, &mod);
1152 	return mod;
1153 }
1154 
1155 /*
1156  * Check whether the head of the log points to an unmount record. In other
1157  * words, determine whether the log is clean. If so, update the in-core state
1158  * appropriately.
1159  */
1160 static int
1161 xlog_check_unmount_rec(
1162 	struct xlog		*log,
1163 	xfs_daddr_t		*head_blk,
1164 	xfs_daddr_t		*tail_blk,
1165 	struct xlog_rec_header	*rhead,
1166 	xfs_daddr_t		rhead_blk,
1167 	char			*buffer,
1168 	bool			*clean)
1169 {
1170 	struct xlog_op_header	*op_head;
1171 	xfs_daddr_t		umount_data_blk;
1172 	xfs_daddr_t		after_umount_blk;
1173 	int			hblks;
1174 	int			error;
1175 	char			*offset;
1176 
1177 	*clean = false;
1178 
1179 	/*
1180 	 * Look for unmount record. If we find it, then we know there was a
1181 	 * clean unmount. Since 'i' could be the last block in the physical
1182 	 * log, we convert to a log block before comparing to the head_blk.
1183 	 *
1184 	 * Save the current tail lsn to use to pass to xlog_clear_stale_blocks()
1185 	 * below. We won't want to clear the unmount record if there is one, so
1186 	 * we pass the lsn of the unmount record rather than the block after it.
1187 	 */
1188 	if (xfs_sb_version_haslogv2(&log->l_mp->m_sb)) {
1189 		int	h_size = be32_to_cpu(rhead->h_size);
1190 		int	h_version = be32_to_cpu(rhead->h_version);
1191 
1192 		if ((h_version & XLOG_VERSION_2) &&
1193 		    (h_size > XLOG_HEADER_CYCLE_SIZE)) {
1194 			hblks = h_size / XLOG_HEADER_CYCLE_SIZE;
1195 			if (h_size % XLOG_HEADER_CYCLE_SIZE)
1196 				hblks++;
1197 		} else {
1198 			hblks = 1;
1199 		}
1200 	} else {
1201 		hblks = 1;
1202 	}
1203 
1204 	after_umount_blk = xlog_wrap_logbno(log,
1205 			rhead_blk + hblks + BTOBB(be32_to_cpu(rhead->h_len)));
1206 
1207 	if (*head_blk == after_umount_blk &&
1208 	    be32_to_cpu(rhead->h_num_logops) == 1) {
1209 		umount_data_blk = xlog_wrap_logbno(log, rhead_blk + hblks);
1210 		error = xlog_bread(log, umount_data_blk, 1, buffer, &offset);
1211 		if (error)
1212 			return error;
1213 
1214 		op_head = (struct xlog_op_header *)offset;
1215 		if (op_head->oh_flags & XLOG_UNMOUNT_TRANS) {
1216 			/*
1217 			 * Set tail and last sync so that newly written log
1218 			 * records will point recovery to after the current
1219 			 * unmount record.
1220 			 */
1221 			xlog_assign_atomic_lsn(&log->l_tail_lsn,
1222 					log->l_curr_cycle, after_umount_blk);
1223 			xlog_assign_atomic_lsn(&log->l_last_sync_lsn,
1224 					log->l_curr_cycle, after_umount_blk);
1225 			*tail_blk = after_umount_blk;
1226 
1227 			*clean = true;
1228 		}
1229 	}
1230 
1231 	return 0;
1232 }
1233 
1234 static void
1235 xlog_set_state(
1236 	struct xlog		*log,
1237 	xfs_daddr_t		head_blk,
1238 	struct xlog_rec_header	*rhead,
1239 	xfs_daddr_t		rhead_blk,
1240 	bool			bump_cycle)
1241 {
1242 	/*
1243 	 * Reset log values according to the state of the log when we
1244 	 * crashed.  In the case where head_blk == 0, we bump curr_cycle
1245 	 * one because the next write starts a new cycle rather than
1246 	 * continuing the cycle of the last good log record.  At this
1247 	 * point we have guaranteed that all partial log records have been
1248 	 * accounted for.  Therefore, we know that the last good log record
1249 	 * written was complete and ended exactly on the end boundary
1250 	 * of the physical log.
1251 	 */
1252 	log->l_prev_block = rhead_blk;
1253 	log->l_curr_block = (int)head_blk;
1254 	log->l_curr_cycle = be32_to_cpu(rhead->h_cycle);
1255 	if (bump_cycle)
1256 		log->l_curr_cycle++;
1257 	atomic64_set(&log->l_tail_lsn, be64_to_cpu(rhead->h_tail_lsn));
1258 	atomic64_set(&log->l_last_sync_lsn, be64_to_cpu(rhead->h_lsn));
1259 	xlog_assign_grant_head(&log->l_reserve_head.grant, log->l_curr_cycle,
1260 					BBTOB(log->l_curr_block));
1261 	xlog_assign_grant_head(&log->l_write_head.grant, log->l_curr_cycle,
1262 					BBTOB(log->l_curr_block));
1263 }
1264 
1265 /*
1266  * Find the sync block number or the tail of the log.
1267  *
1268  * This will be the block number of the last record to have its
1269  * associated buffers synced to disk.  Every log record header has
1270  * a sync lsn embedded in it.  LSNs hold block numbers, so it is easy
1271  * to get a sync block number.  The only concern is to figure out which
1272  * log record header to believe.
1273  *
1274  * The following algorithm uses the log record header with the largest
1275  * lsn.  The entire log record does not need to be valid.  We only care
1276  * that the header is valid.
1277  *
1278  * We could speed up search by using current head_blk buffer, but it is not
1279  * available.
1280  */
1281 STATIC int
1282 xlog_find_tail(
1283 	struct xlog		*log,
1284 	xfs_daddr_t		*head_blk,
1285 	xfs_daddr_t		*tail_blk)
1286 {
1287 	xlog_rec_header_t	*rhead;
1288 	char			*offset = NULL;
1289 	char			*buffer;
1290 	int			error;
1291 	xfs_daddr_t		rhead_blk;
1292 	xfs_lsn_t		tail_lsn;
1293 	bool			wrapped = false;
1294 	bool			clean = false;
1295 
1296 	/*
1297 	 * Find previous log record
1298 	 */
1299 	if ((error = xlog_find_head(log, head_blk)))
1300 		return error;
1301 	ASSERT(*head_blk < INT_MAX);
1302 
1303 	buffer = xlog_alloc_buffer(log, 1);
1304 	if (!buffer)
1305 		return -ENOMEM;
1306 	if (*head_blk == 0) {				/* special case */
1307 		error = xlog_bread(log, 0, 1, buffer, &offset);
1308 		if (error)
1309 			goto done;
1310 
1311 		if (xlog_get_cycle(offset) == 0) {
1312 			*tail_blk = 0;
1313 			/* leave all other log inited values alone */
1314 			goto done;
1315 		}
1316 	}
1317 
1318 	/*
1319 	 * Search backwards through the log looking for the log record header
1320 	 * block. This wraps all the way back around to the head so something is
1321 	 * seriously wrong if we can't find it.
1322 	 */
1323 	error = xlog_rseek_logrec_hdr(log, *head_blk, *head_blk, 1, buffer,
1324 				      &rhead_blk, &rhead, &wrapped);
1325 	if (error < 0)
1326 		goto done;
1327 	if (!error) {
1328 		xfs_warn(log->l_mp, "%s: couldn't find sync record", __func__);
1329 		error = -EFSCORRUPTED;
1330 		goto done;
1331 	}
1332 	*tail_blk = BLOCK_LSN(be64_to_cpu(rhead->h_tail_lsn));
1333 
1334 	/*
1335 	 * Set the log state based on the current head record.
1336 	 */
1337 	xlog_set_state(log, *head_blk, rhead, rhead_blk, wrapped);
1338 	tail_lsn = atomic64_read(&log->l_tail_lsn);
1339 
1340 	/*
1341 	 * Look for an unmount record at the head of the log. This sets the log
1342 	 * state to determine whether recovery is necessary.
1343 	 */
1344 	error = xlog_check_unmount_rec(log, head_blk, tail_blk, rhead,
1345 				       rhead_blk, buffer, &clean);
1346 	if (error)
1347 		goto done;
1348 
1349 	/*
1350 	 * Verify the log head if the log is not clean (e.g., we have anything
1351 	 * but an unmount record at the head). This uses CRC verification to
1352 	 * detect and trim torn writes. If discovered, CRC failures are
1353 	 * considered torn writes and the log head is trimmed accordingly.
1354 	 *
1355 	 * Note that we can only run CRC verification when the log is dirty
1356 	 * because there's no guarantee that the log data behind an unmount
1357 	 * record is compatible with the current architecture.
1358 	 */
1359 	if (!clean) {
1360 		xfs_daddr_t	orig_head = *head_blk;
1361 
1362 		error = xlog_verify_head(log, head_blk, tail_blk, buffer,
1363 					 &rhead_blk, &rhead, &wrapped);
1364 		if (error)
1365 			goto done;
1366 
1367 		/* update in-core state again if the head changed */
1368 		if (*head_blk != orig_head) {
1369 			xlog_set_state(log, *head_blk, rhead, rhead_blk,
1370 				       wrapped);
1371 			tail_lsn = atomic64_read(&log->l_tail_lsn);
1372 			error = xlog_check_unmount_rec(log, head_blk, tail_blk,
1373 						       rhead, rhead_blk, buffer,
1374 						       &clean);
1375 			if (error)
1376 				goto done;
1377 		}
1378 	}
1379 
1380 	/*
1381 	 * Note that the unmount was clean. If the unmount was not clean, we
1382 	 * need to know this to rebuild the superblock counters from the perag
1383 	 * headers if we have a filesystem using non-persistent counters.
1384 	 */
1385 	if (clean)
1386 		log->l_mp->m_flags |= XFS_MOUNT_WAS_CLEAN;
1387 
1388 	/*
1389 	 * Make sure that there are no blocks in front of the head
1390 	 * with the same cycle number as the head.  This can happen
1391 	 * because we allow multiple outstanding log writes concurrently,
1392 	 * and the later writes might make it out before earlier ones.
1393 	 *
1394 	 * We use the lsn from before modifying it so that we'll never
1395 	 * overwrite the unmount record after a clean unmount.
1396 	 *
1397 	 * Do this only if we are going to recover the filesystem
1398 	 *
1399 	 * NOTE: This used to say "if (!readonly)"
1400 	 * However on Linux, we can & do recover a read-only filesystem.
1401 	 * We only skip recovery if NORECOVERY is specified on mount,
1402 	 * in which case we would not be here.
1403 	 *
1404 	 * But... if the -device- itself is readonly, just skip this.
1405 	 * We can't recover this device anyway, so it won't matter.
1406 	 */
1407 	if (!xfs_readonly_buftarg(log->l_targ))
1408 		error = xlog_clear_stale_blocks(log, tail_lsn);
1409 
1410 done:
1411 	kmem_free(buffer);
1412 
1413 	if (error)
1414 		xfs_warn(log->l_mp, "failed to locate log tail");
1415 	return error;
1416 }
1417 
1418 /*
1419  * Is the log zeroed at all?
1420  *
1421  * The last binary search should be changed to perform an X block read
1422  * once X becomes small enough.  You can then search linearly through
1423  * the X blocks.  This will cut down on the number of reads we need to do.
1424  *
1425  * If the log is partially zeroed, this routine will pass back the blkno
1426  * of the first block with cycle number 0.  It won't have a complete LR
1427  * preceding it.
1428  *
1429  * Return:
1430  *	0  => the log is completely written to
1431  *	1 => use *blk_no as the first block of the log
1432  *	<0 => error has occurred
1433  */
1434 STATIC int
1435 xlog_find_zeroed(
1436 	struct xlog	*log,
1437 	xfs_daddr_t	*blk_no)
1438 {
1439 	char		*buffer;
1440 	char		*offset;
1441 	uint	        first_cycle, last_cycle;
1442 	xfs_daddr_t	new_blk, last_blk, start_blk;
1443 	xfs_daddr_t     num_scan_bblks;
1444 	int	        error, log_bbnum = log->l_logBBsize;
1445 
1446 	*blk_no = 0;
1447 
1448 	/* check totally zeroed log */
1449 	buffer = xlog_alloc_buffer(log, 1);
1450 	if (!buffer)
1451 		return -ENOMEM;
1452 	error = xlog_bread(log, 0, 1, buffer, &offset);
1453 	if (error)
1454 		goto out_free_buffer;
1455 
1456 	first_cycle = xlog_get_cycle(offset);
1457 	if (first_cycle == 0) {		/* completely zeroed log */
1458 		*blk_no = 0;
1459 		kmem_free(buffer);
1460 		return 1;
1461 	}
1462 
1463 	/* check partially zeroed log */
1464 	error = xlog_bread(log, log_bbnum-1, 1, buffer, &offset);
1465 	if (error)
1466 		goto out_free_buffer;
1467 
1468 	last_cycle = xlog_get_cycle(offset);
1469 	if (last_cycle != 0) {		/* log completely written to */
1470 		kmem_free(buffer);
1471 		return 0;
1472 	}
1473 
1474 	/* we have a partially zeroed log */
1475 	last_blk = log_bbnum-1;
1476 	error = xlog_find_cycle_start(log, buffer, 0, &last_blk, 0);
1477 	if (error)
1478 		goto out_free_buffer;
1479 
1480 	/*
1481 	 * Validate the answer.  Because there is no way to guarantee that
1482 	 * the entire log is made up of log records which are the same size,
1483 	 * we scan over the defined maximum blocks.  At this point, the maximum
1484 	 * is not chosen to mean anything special.   XXXmiken
1485 	 */
1486 	num_scan_bblks = XLOG_TOTAL_REC_SHIFT(log);
1487 	ASSERT(num_scan_bblks <= INT_MAX);
1488 
1489 	if (last_blk < num_scan_bblks)
1490 		num_scan_bblks = last_blk;
1491 	start_blk = last_blk - num_scan_bblks;
1492 
1493 	/*
1494 	 * We search for any instances of cycle number 0 that occur before
1495 	 * our current estimate of the head.  What we're trying to detect is
1496 	 *        1 ... | 0 | 1 | 0...
1497 	 *                       ^ binary search ends here
1498 	 */
1499 	if ((error = xlog_find_verify_cycle(log, start_blk,
1500 					 (int)num_scan_bblks, 0, &new_blk)))
1501 		goto out_free_buffer;
1502 	if (new_blk != -1)
1503 		last_blk = new_blk;
1504 
1505 	/*
1506 	 * Potentially backup over partial log record write.  We don't need
1507 	 * to search the end of the log because we know it is zero.
1508 	 */
1509 	error = xlog_find_verify_log_record(log, start_blk, &last_blk, 0);
1510 	if (error == 1)
1511 		error = -EIO;
1512 	if (error)
1513 		goto out_free_buffer;
1514 
1515 	*blk_no = last_blk;
1516 out_free_buffer:
1517 	kmem_free(buffer);
1518 	if (error)
1519 		return error;
1520 	return 1;
1521 }
1522 
1523 /*
1524  * These are simple subroutines used by xlog_clear_stale_blocks() below
1525  * to initialize a buffer full of empty log record headers and write
1526  * them into the log.
1527  */
1528 STATIC void
1529 xlog_add_record(
1530 	struct xlog		*log,
1531 	char			*buf,
1532 	int			cycle,
1533 	int			block,
1534 	int			tail_cycle,
1535 	int			tail_block)
1536 {
1537 	xlog_rec_header_t	*recp = (xlog_rec_header_t *)buf;
1538 
1539 	memset(buf, 0, BBSIZE);
1540 	recp->h_magicno = cpu_to_be32(XLOG_HEADER_MAGIC_NUM);
1541 	recp->h_cycle = cpu_to_be32(cycle);
1542 	recp->h_version = cpu_to_be32(
1543 			xfs_sb_version_haslogv2(&log->l_mp->m_sb) ? 2 : 1);
1544 	recp->h_lsn = cpu_to_be64(xlog_assign_lsn(cycle, block));
1545 	recp->h_tail_lsn = cpu_to_be64(xlog_assign_lsn(tail_cycle, tail_block));
1546 	recp->h_fmt = cpu_to_be32(XLOG_FMT);
1547 	memcpy(&recp->h_fs_uuid, &log->l_mp->m_sb.sb_uuid, sizeof(uuid_t));
1548 }
1549 
1550 STATIC int
1551 xlog_write_log_records(
1552 	struct xlog	*log,
1553 	int		cycle,
1554 	int		start_block,
1555 	int		blocks,
1556 	int		tail_cycle,
1557 	int		tail_block)
1558 {
1559 	char		*offset;
1560 	char		*buffer;
1561 	int		balign, ealign;
1562 	int		sectbb = log->l_sectBBsize;
1563 	int		end_block = start_block + blocks;
1564 	int		bufblks;
1565 	int		error = 0;
1566 	int		i, j = 0;
1567 
1568 	/*
1569 	 * Greedily allocate a buffer big enough to handle the full
1570 	 * range of basic blocks to be written.  If that fails, try
1571 	 * a smaller size.  We need to be able to write at least a
1572 	 * log sector, or we're out of luck.
1573 	 */
1574 	bufblks = 1 << ffs(blocks);
1575 	while (bufblks > log->l_logBBsize)
1576 		bufblks >>= 1;
1577 	while (!(buffer = xlog_alloc_buffer(log, bufblks))) {
1578 		bufblks >>= 1;
1579 		if (bufblks < sectbb)
1580 			return -ENOMEM;
1581 	}
1582 
1583 	/* We may need to do a read at the start to fill in part of
1584 	 * the buffer in the starting sector not covered by the first
1585 	 * write below.
1586 	 */
1587 	balign = round_down(start_block, sectbb);
1588 	if (balign != start_block) {
1589 		error = xlog_bread_noalign(log, start_block, 1, buffer);
1590 		if (error)
1591 			goto out_free_buffer;
1592 
1593 		j = start_block - balign;
1594 	}
1595 
1596 	for (i = start_block; i < end_block; i += bufblks) {
1597 		int		bcount, endcount;
1598 
1599 		bcount = min(bufblks, end_block - start_block);
1600 		endcount = bcount - j;
1601 
1602 		/* We may need to do a read at the end to fill in part of
1603 		 * the buffer in the final sector not covered by the write.
1604 		 * If this is the same sector as the above read, skip it.
1605 		 */
1606 		ealign = round_down(end_block, sectbb);
1607 		if (j == 0 && (start_block + endcount > ealign)) {
1608 			error = xlog_bread_noalign(log, ealign, sectbb,
1609 					buffer + BBTOB(ealign - start_block));
1610 			if (error)
1611 				break;
1612 
1613 		}
1614 
1615 		offset = buffer + xlog_align(log, start_block);
1616 		for (; j < endcount; j++) {
1617 			xlog_add_record(log, offset, cycle, i+j,
1618 					tail_cycle, tail_block);
1619 			offset += BBSIZE;
1620 		}
1621 		error = xlog_bwrite(log, start_block, endcount, buffer);
1622 		if (error)
1623 			break;
1624 		start_block += endcount;
1625 		j = 0;
1626 	}
1627 
1628 out_free_buffer:
1629 	kmem_free(buffer);
1630 	return error;
1631 }
1632 
1633 /*
1634  * This routine is called to blow away any incomplete log writes out
1635  * in front of the log head.  We do this so that we won't become confused
1636  * if we come up, write only a little bit more, and then crash again.
1637  * If we leave the partial log records out there, this situation could
1638  * cause us to think those partial writes are valid blocks since they
1639  * have the current cycle number.  We get rid of them by overwriting them
1640  * with empty log records with the old cycle number rather than the
1641  * current one.
1642  *
1643  * The tail lsn is passed in rather than taken from
1644  * the log so that we will not write over the unmount record after a
1645  * clean unmount in a 512 block log.  Doing so would leave the log without
1646  * any valid log records in it until a new one was written.  If we crashed
1647  * during that time we would not be able to recover.
1648  */
1649 STATIC int
1650 xlog_clear_stale_blocks(
1651 	struct xlog	*log,
1652 	xfs_lsn_t	tail_lsn)
1653 {
1654 	int		tail_cycle, head_cycle;
1655 	int		tail_block, head_block;
1656 	int		tail_distance, max_distance;
1657 	int		distance;
1658 	int		error;
1659 
1660 	tail_cycle = CYCLE_LSN(tail_lsn);
1661 	tail_block = BLOCK_LSN(tail_lsn);
1662 	head_cycle = log->l_curr_cycle;
1663 	head_block = log->l_curr_block;
1664 
1665 	/*
1666 	 * Figure out the distance between the new head of the log
1667 	 * and the tail.  We want to write over any blocks beyond the
1668 	 * head that we may have written just before the crash, but
1669 	 * we don't want to overwrite the tail of the log.
1670 	 */
1671 	if (head_cycle == tail_cycle) {
1672 		/*
1673 		 * The tail is behind the head in the physical log,
1674 		 * so the distance from the head to the tail is the
1675 		 * distance from the head to the end of the log plus
1676 		 * the distance from the beginning of the log to the
1677 		 * tail.
1678 		 */
1679 		if (XFS_IS_CORRUPT(log->l_mp,
1680 				   head_block < tail_block ||
1681 				   head_block >= log->l_logBBsize))
1682 			return -EFSCORRUPTED;
1683 		tail_distance = tail_block + (log->l_logBBsize - head_block);
1684 	} else {
1685 		/*
1686 		 * The head is behind the tail in the physical log,
1687 		 * so the distance from the head to the tail is just
1688 		 * the tail block minus the head block.
1689 		 */
1690 		if (XFS_IS_CORRUPT(log->l_mp,
1691 				   head_block >= tail_block ||
1692 				   head_cycle != tail_cycle + 1))
1693 			return -EFSCORRUPTED;
1694 		tail_distance = tail_block - head_block;
1695 	}
1696 
1697 	/*
1698 	 * If the head is right up against the tail, we can't clear
1699 	 * anything.
1700 	 */
1701 	if (tail_distance <= 0) {
1702 		ASSERT(tail_distance == 0);
1703 		return 0;
1704 	}
1705 
1706 	max_distance = XLOG_TOTAL_REC_SHIFT(log);
1707 	/*
1708 	 * Take the smaller of the maximum amount of outstanding I/O
1709 	 * we could have and the distance to the tail to clear out.
1710 	 * We take the smaller so that we don't overwrite the tail and
1711 	 * we don't waste all day writing from the head to the tail
1712 	 * for no reason.
1713 	 */
1714 	max_distance = min(max_distance, tail_distance);
1715 
1716 	if ((head_block + max_distance) <= log->l_logBBsize) {
1717 		/*
1718 		 * We can stomp all the blocks we need to without
1719 		 * wrapping around the end of the log.  Just do it
1720 		 * in a single write.  Use the cycle number of the
1721 		 * current cycle minus one so that the log will look like:
1722 		 *     n ... | n - 1 ...
1723 		 */
1724 		error = xlog_write_log_records(log, (head_cycle - 1),
1725 				head_block, max_distance, tail_cycle,
1726 				tail_block);
1727 		if (error)
1728 			return error;
1729 	} else {
1730 		/*
1731 		 * We need to wrap around the end of the physical log in
1732 		 * order to clear all the blocks.  Do it in two separate
1733 		 * I/Os.  The first write should be from the head to the
1734 		 * end of the physical log, and it should use the current
1735 		 * cycle number minus one just like above.
1736 		 */
1737 		distance = log->l_logBBsize - head_block;
1738 		error = xlog_write_log_records(log, (head_cycle - 1),
1739 				head_block, distance, tail_cycle,
1740 				tail_block);
1741 
1742 		if (error)
1743 			return error;
1744 
1745 		/*
1746 		 * Now write the blocks at the start of the physical log.
1747 		 * This writes the remainder of the blocks we want to clear.
1748 		 * It uses the current cycle number since we're now on the
1749 		 * same cycle as the head so that we get:
1750 		 *    n ... n ... | n - 1 ...
1751 		 *    ^^^^^ blocks we're writing
1752 		 */
1753 		distance = max_distance - (log->l_logBBsize - head_block);
1754 		error = xlog_write_log_records(log, head_cycle, 0, distance,
1755 				tail_cycle, tail_block);
1756 		if (error)
1757 			return error;
1758 	}
1759 
1760 	return 0;
1761 }
1762 
1763 /*
1764  * Release the recovered intent item in the AIL that matches the given intent
1765  * type and intent id.
1766  */
1767 void
1768 xlog_recover_release_intent(
1769 	struct xlog		*log,
1770 	unsigned short		intent_type,
1771 	uint64_t		intent_id)
1772 {
1773 	struct xfs_ail_cursor	cur;
1774 	struct xfs_log_item	*lip;
1775 	struct xfs_ail		*ailp = log->l_ailp;
1776 
1777 	spin_lock(&ailp->ail_lock);
1778 	for (lip = xfs_trans_ail_cursor_first(ailp, &cur, 0); lip != NULL;
1779 	     lip = xfs_trans_ail_cursor_next(ailp, &cur)) {
1780 		if (lip->li_type != intent_type)
1781 			continue;
1782 		if (!lip->li_ops->iop_match(lip, intent_id))
1783 			continue;
1784 
1785 		spin_unlock(&ailp->ail_lock);
1786 		lip->li_ops->iop_release(lip);
1787 		spin_lock(&ailp->ail_lock);
1788 		break;
1789 	}
1790 
1791 	xfs_trans_ail_cursor_done(&cur);
1792 	spin_unlock(&ailp->ail_lock);
1793 }
1794 
1795 /******************************************************************************
1796  *
1797  *		Log recover routines
1798  *
1799  ******************************************************************************
1800  */
1801 static const struct xlog_recover_item_ops *xlog_recover_item_ops[] = {
1802 	&xlog_buf_item_ops,
1803 	&xlog_inode_item_ops,
1804 	&xlog_dquot_item_ops,
1805 	&xlog_quotaoff_item_ops,
1806 	&xlog_icreate_item_ops,
1807 	&xlog_efi_item_ops,
1808 	&xlog_efd_item_ops,
1809 	&xlog_rui_item_ops,
1810 	&xlog_rud_item_ops,
1811 	&xlog_cui_item_ops,
1812 	&xlog_cud_item_ops,
1813 	&xlog_bui_item_ops,
1814 	&xlog_bud_item_ops,
1815 };
1816 
1817 static const struct xlog_recover_item_ops *
1818 xlog_find_item_ops(
1819 	struct xlog_recover_item		*item)
1820 {
1821 	unsigned int				i;
1822 
1823 	for (i = 0; i < ARRAY_SIZE(xlog_recover_item_ops); i++)
1824 		if (ITEM_TYPE(item) == xlog_recover_item_ops[i]->item_type)
1825 			return xlog_recover_item_ops[i];
1826 
1827 	return NULL;
1828 }
1829 
1830 /*
1831  * Sort the log items in the transaction.
1832  *
1833  * The ordering constraints are defined by the inode allocation and unlink
1834  * behaviour. The rules are:
1835  *
1836  *	1. Every item is only logged once in a given transaction. Hence it
1837  *	   represents the last logged state of the item. Hence ordering is
1838  *	   dependent on the order in which operations need to be performed so
1839  *	   required initial conditions are always met.
1840  *
1841  *	2. Cancelled buffers are recorded in pass 1 in a separate table and
1842  *	   there's nothing to replay from them so we can simply cull them
1843  *	   from the transaction. However, we can't do that until after we've
1844  *	   replayed all the other items because they may be dependent on the
1845  *	   cancelled buffer and replaying the cancelled buffer can remove it
1846  *	   form the cancelled buffer table. Hence they have tobe done last.
1847  *
1848  *	3. Inode allocation buffers must be replayed before inode items that
1849  *	   read the buffer and replay changes into it. For filesystems using the
1850  *	   ICREATE transactions, this means XFS_LI_ICREATE objects need to get
1851  *	   treated the same as inode allocation buffers as they create and
1852  *	   initialise the buffers directly.
1853  *
1854  *	4. Inode unlink buffers must be replayed after inode items are replayed.
1855  *	   This ensures that inodes are completely flushed to the inode buffer
1856  *	   in a "free" state before we remove the unlinked inode list pointer.
1857  *
1858  * Hence the ordering needs to be inode allocation buffers first, inode items
1859  * second, inode unlink buffers third and cancelled buffers last.
1860  *
1861  * But there's a problem with that - we can't tell an inode allocation buffer
1862  * apart from a regular buffer, so we can't separate them. We can, however,
1863  * tell an inode unlink buffer from the others, and so we can separate them out
1864  * from all the other buffers and move them to last.
1865  *
1866  * Hence, 4 lists, in order from head to tail:
1867  *	- buffer_list for all buffers except cancelled/inode unlink buffers
1868  *	- item_list for all non-buffer items
1869  *	- inode_buffer_list for inode unlink buffers
1870  *	- cancel_list for the cancelled buffers
1871  *
1872  * Note that we add objects to the tail of the lists so that first-to-last
1873  * ordering is preserved within the lists. Adding objects to the head of the
1874  * list means when we traverse from the head we walk them in last-to-first
1875  * order. For cancelled buffers and inode unlink buffers this doesn't matter,
1876  * but for all other items there may be specific ordering that we need to
1877  * preserve.
1878  */
1879 STATIC int
1880 xlog_recover_reorder_trans(
1881 	struct xlog		*log,
1882 	struct xlog_recover	*trans,
1883 	int			pass)
1884 {
1885 	struct xlog_recover_item *item, *n;
1886 	int			error = 0;
1887 	LIST_HEAD(sort_list);
1888 	LIST_HEAD(cancel_list);
1889 	LIST_HEAD(buffer_list);
1890 	LIST_HEAD(inode_buffer_list);
1891 	LIST_HEAD(item_list);
1892 
1893 	list_splice_init(&trans->r_itemq, &sort_list);
1894 	list_for_each_entry_safe(item, n, &sort_list, ri_list) {
1895 		enum xlog_recover_reorder	fate = XLOG_REORDER_ITEM_LIST;
1896 
1897 		item->ri_ops = xlog_find_item_ops(item);
1898 		if (!item->ri_ops) {
1899 			xfs_warn(log->l_mp,
1900 				"%s: unrecognized type of log operation (%d)",
1901 				__func__, ITEM_TYPE(item));
1902 			ASSERT(0);
1903 			/*
1904 			 * return the remaining items back to the transaction
1905 			 * item list so they can be freed in caller.
1906 			 */
1907 			if (!list_empty(&sort_list))
1908 				list_splice_init(&sort_list, &trans->r_itemq);
1909 			error = -EFSCORRUPTED;
1910 			break;
1911 		}
1912 
1913 		if (item->ri_ops->reorder)
1914 			fate = item->ri_ops->reorder(item);
1915 
1916 		switch (fate) {
1917 		case XLOG_REORDER_BUFFER_LIST:
1918 			list_move_tail(&item->ri_list, &buffer_list);
1919 			break;
1920 		case XLOG_REORDER_CANCEL_LIST:
1921 			trace_xfs_log_recover_item_reorder_head(log,
1922 					trans, item, pass);
1923 			list_move(&item->ri_list, &cancel_list);
1924 			break;
1925 		case XLOG_REORDER_INODE_BUFFER_LIST:
1926 			list_move(&item->ri_list, &inode_buffer_list);
1927 			break;
1928 		case XLOG_REORDER_ITEM_LIST:
1929 			trace_xfs_log_recover_item_reorder_tail(log,
1930 							trans, item, pass);
1931 			list_move_tail(&item->ri_list, &item_list);
1932 			break;
1933 		}
1934 	}
1935 
1936 	ASSERT(list_empty(&sort_list));
1937 	if (!list_empty(&buffer_list))
1938 		list_splice(&buffer_list, &trans->r_itemq);
1939 	if (!list_empty(&item_list))
1940 		list_splice_tail(&item_list, &trans->r_itemq);
1941 	if (!list_empty(&inode_buffer_list))
1942 		list_splice_tail(&inode_buffer_list, &trans->r_itemq);
1943 	if (!list_empty(&cancel_list))
1944 		list_splice_tail(&cancel_list, &trans->r_itemq);
1945 	return error;
1946 }
1947 
1948 void
1949 xlog_buf_readahead(
1950 	struct xlog		*log,
1951 	xfs_daddr_t		blkno,
1952 	uint			len,
1953 	const struct xfs_buf_ops *ops)
1954 {
1955 	if (!xlog_is_buffer_cancelled(log, blkno, len))
1956 		xfs_buf_readahead(log->l_mp->m_ddev_targp, blkno, len, ops);
1957 }
1958 
1959 STATIC int
1960 xlog_recover_items_pass2(
1961 	struct xlog                     *log,
1962 	struct xlog_recover             *trans,
1963 	struct list_head                *buffer_list,
1964 	struct list_head                *item_list)
1965 {
1966 	struct xlog_recover_item	*item;
1967 	int				error = 0;
1968 
1969 	list_for_each_entry(item, item_list, ri_list) {
1970 		trace_xfs_log_recover_item_recover(log, trans, item,
1971 				XLOG_RECOVER_PASS2);
1972 
1973 		if (item->ri_ops->commit_pass2)
1974 			error = item->ri_ops->commit_pass2(log, buffer_list,
1975 					item, trans->r_lsn);
1976 		if (error)
1977 			return error;
1978 	}
1979 
1980 	return error;
1981 }
1982 
1983 /*
1984  * Perform the transaction.
1985  *
1986  * If the transaction modifies a buffer or inode, do it now.  Otherwise,
1987  * EFIs and EFDs get queued up by adding entries into the AIL for them.
1988  */
1989 STATIC int
1990 xlog_recover_commit_trans(
1991 	struct xlog		*log,
1992 	struct xlog_recover	*trans,
1993 	int			pass,
1994 	struct list_head	*buffer_list)
1995 {
1996 	int				error = 0;
1997 	int				items_queued = 0;
1998 	struct xlog_recover_item	*item;
1999 	struct xlog_recover_item	*next;
2000 	LIST_HEAD			(ra_list);
2001 	LIST_HEAD			(done_list);
2002 
2003 	#define XLOG_RECOVER_COMMIT_QUEUE_MAX 100
2004 
2005 	hlist_del_init(&trans->r_list);
2006 
2007 	error = xlog_recover_reorder_trans(log, trans, pass);
2008 	if (error)
2009 		return error;
2010 
2011 	list_for_each_entry_safe(item, next, &trans->r_itemq, ri_list) {
2012 		trace_xfs_log_recover_item_recover(log, trans, item, pass);
2013 
2014 		switch (pass) {
2015 		case XLOG_RECOVER_PASS1:
2016 			if (item->ri_ops->commit_pass1)
2017 				error = item->ri_ops->commit_pass1(log, item);
2018 			break;
2019 		case XLOG_RECOVER_PASS2:
2020 			if (item->ri_ops->ra_pass2)
2021 				item->ri_ops->ra_pass2(log, item);
2022 			list_move_tail(&item->ri_list, &ra_list);
2023 			items_queued++;
2024 			if (items_queued >= XLOG_RECOVER_COMMIT_QUEUE_MAX) {
2025 				error = xlog_recover_items_pass2(log, trans,
2026 						buffer_list, &ra_list);
2027 				list_splice_tail_init(&ra_list, &done_list);
2028 				items_queued = 0;
2029 			}
2030 
2031 			break;
2032 		default:
2033 			ASSERT(0);
2034 		}
2035 
2036 		if (error)
2037 			goto out;
2038 	}
2039 
2040 out:
2041 	if (!list_empty(&ra_list)) {
2042 		if (!error)
2043 			error = xlog_recover_items_pass2(log, trans,
2044 					buffer_list, &ra_list);
2045 		list_splice_tail_init(&ra_list, &done_list);
2046 	}
2047 
2048 	if (!list_empty(&done_list))
2049 		list_splice_init(&done_list, &trans->r_itemq);
2050 
2051 	return error;
2052 }
2053 
2054 STATIC void
2055 xlog_recover_add_item(
2056 	struct list_head	*head)
2057 {
2058 	struct xlog_recover_item *item;
2059 
2060 	item = kmem_zalloc(sizeof(struct xlog_recover_item), 0);
2061 	INIT_LIST_HEAD(&item->ri_list);
2062 	list_add_tail(&item->ri_list, head);
2063 }
2064 
2065 STATIC int
2066 xlog_recover_add_to_cont_trans(
2067 	struct xlog		*log,
2068 	struct xlog_recover	*trans,
2069 	char			*dp,
2070 	int			len)
2071 {
2072 	struct xlog_recover_item *item;
2073 	char			*ptr, *old_ptr;
2074 	int			old_len;
2075 
2076 	/*
2077 	 * If the transaction is empty, the header was split across this and the
2078 	 * previous record. Copy the rest of the header.
2079 	 */
2080 	if (list_empty(&trans->r_itemq)) {
2081 		ASSERT(len <= sizeof(struct xfs_trans_header));
2082 		if (len > sizeof(struct xfs_trans_header)) {
2083 			xfs_warn(log->l_mp, "%s: bad header length", __func__);
2084 			return -EFSCORRUPTED;
2085 		}
2086 
2087 		xlog_recover_add_item(&trans->r_itemq);
2088 		ptr = (char *)&trans->r_theader +
2089 				sizeof(struct xfs_trans_header) - len;
2090 		memcpy(ptr, dp, len);
2091 		return 0;
2092 	}
2093 
2094 	/* take the tail entry */
2095 	item = list_entry(trans->r_itemq.prev, struct xlog_recover_item,
2096 			  ri_list);
2097 
2098 	old_ptr = item->ri_buf[item->ri_cnt-1].i_addr;
2099 	old_len = item->ri_buf[item->ri_cnt-1].i_len;
2100 
2101 	ptr = kmem_realloc(old_ptr, len + old_len, 0);
2102 	memcpy(&ptr[old_len], dp, len);
2103 	item->ri_buf[item->ri_cnt-1].i_len += len;
2104 	item->ri_buf[item->ri_cnt-1].i_addr = ptr;
2105 	trace_xfs_log_recover_item_add_cont(log, trans, item, 0);
2106 	return 0;
2107 }
2108 
2109 /*
2110  * The next region to add is the start of a new region.  It could be
2111  * a whole region or it could be the first part of a new region.  Because
2112  * of this, the assumption here is that the type and size fields of all
2113  * format structures fit into the first 32 bits of the structure.
2114  *
2115  * This works because all regions must be 32 bit aligned.  Therefore, we
2116  * either have both fields or we have neither field.  In the case we have
2117  * neither field, the data part of the region is zero length.  We only have
2118  * a log_op_header and can throw away the header since a new one will appear
2119  * later.  If we have at least 4 bytes, then we can determine how many regions
2120  * will appear in the current log item.
2121  */
2122 STATIC int
2123 xlog_recover_add_to_trans(
2124 	struct xlog		*log,
2125 	struct xlog_recover	*trans,
2126 	char			*dp,
2127 	int			len)
2128 {
2129 	struct xfs_inode_log_format	*in_f;			/* any will do */
2130 	struct xlog_recover_item *item;
2131 	char			*ptr;
2132 
2133 	if (!len)
2134 		return 0;
2135 	if (list_empty(&trans->r_itemq)) {
2136 		/* we need to catch log corruptions here */
2137 		if (*(uint *)dp != XFS_TRANS_HEADER_MAGIC) {
2138 			xfs_warn(log->l_mp, "%s: bad header magic number",
2139 				__func__);
2140 			ASSERT(0);
2141 			return -EFSCORRUPTED;
2142 		}
2143 
2144 		if (len > sizeof(struct xfs_trans_header)) {
2145 			xfs_warn(log->l_mp, "%s: bad header length", __func__);
2146 			ASSERT(0);
2147 			return -EFSCORRUPTED;
2148 		}
2149 
2150 		/*
2151 		 * The transaction header can be arbitrarily split across op
2152 		 * records. If we don't have the whole thing here, copy what we
2153 		 * do have and handle the rest in the next record.
2154 		 */
2155 		if (len == sizeof(struct xfs_trans_header))
2156 			xlog_recover_add_item(&trans->r_itemq);
2157 		memcpy(&trans->r_theader, dp, len);
2158 		return 0;
2159 	}
2160 
2161 	ptr = kmem_alloc(len, 0);
2162 	memcpy(ptr, dp, len);
2163 	in_f = (struct xfs_inode_log_format *)ptr;
2164 
2165 	/* take the tail entry */
2166 	item = list_entry(trans->r_itemq.prev, struct xlog_recover_item,
2167 			  ri_list);
2168 	if (item->ri_total != 0 &&
2169 	     item->ri_total == item->ri_cnt) {
2170 		/* tail item is in use, get a new one */
2171 		xlog_recover_add_item(&trans->r_itemq);
2172 		item = list_entry(trans->r_itemq.prev,
2173 					struct xlog_recover_item, ri_list);
2174 	}
2175 
2176 	if (item->ri_total == 0) {		/* first region to be added */
2177 		if (in_f->ilf_size == 0 ||
2178 		    in_f->ilf_size > XLOG_MAX_REGIONS_IN_ITEM) {
2179 			xfs_warn(log->l_mp,
2180 		"bad number of regions (%d) in inode log format",
2181 				  in_f->ilf_size);
2182 			ASSERT(0);
2183 			kmem_free(ptr);
2184 			return -EFSCORRUPTED;
2185 		}
2186 
2187 		item->ri_total = in_f->ilf_size;
2188 		item->ri_buf =
2189 			kmem_zalloc(item->ri_total * sizeof(xfs_log_iovec_t),
2190 				    0);
2191 	}
2192 
2193 	if (item->ri_total <= item->ri_cnt) {
2194 		xfs_warn(log->l_mp,
2195 	"log item region count (%d) overflowed size (%d)",
2196 				item->ri_cnt, item->ri_total);
2197 		ASSERT(0);
2198 		kmem_free(ptr);
2199 		return -EFSCORRUPTED;
2200 	}
2201 
2202 	/* Description region is ri_buf[0] */
2203 	item->ri_buf[item->ri_cnt].i_addr = ptr;
2204 	item->ri_buf[item->ri_cnt].i_len  = len;
2205 	item->ri_cnt++;
2206 	trace_xfs_log_recover_item_add(log, trans, item, 0);
2207 	return 0;
2208 }
2209 
2210 /*
2211  * Free up any resources allocated by the transaction
2212  *
2213  * Remember that EFIs, EFDs, and IUNLINKs are handled later.
2214  */
2215 STATIC void
2216 xlog_recover_free_trans(
2217 	struct xlog_recover	*trans)
2218 {
2219 	struct xlog_recover_item *item, *n;
2220 	int			i;
2221 
2222 	hlist_del_init(&trans->r_list);
2223 
2224 	list_for_each_entry_safe(item, n, &trans->r_itemq, ri_list) {
2225 		/* Free the regions in the item. */
2226 		list_del(&item->ri_list);
2227 		for (i = 0; i < item->ri_cnt; i++)
2228 			kmem_free(item->ri_buf[i].i_addr);
2229 		/* Free the item itself */
2230 		kmem_free(item->ri_buf);
2231 		kmem_free(item);
2232 	}
2233 	/* Free the transaction recover structure */
2234 	kmem_free(trans);
2235 }
2236 
2237 /*
2238  * On error or completion, trans is freed.
2239  */
2240 STATIC int
2241 xlog_recovery_process_trans(
2242 	struct xlog		*log,
2243 	struct xlog_recover	*trans,
2244 	char			*dp,
2245 	unsigned int		len,
2246 	unsigned int		flags,
2247 	int			pass,
2248 	struct list_head	*buffer_list)
2249 {
2250 	int			error = 0;
2251 	bool			freeit = false;
2252 
2253 	/* mask off ophdr transaction container flags */
2254 	flags &= ~XLOG_END_TRANS;
2255 	if (flags & XLOG_WAS_CONT_TRANS)
2256 		flags &= ~XLOG_CONTINUE_TRANS;
2257 
2258 	/*
2259 	 * Callees must not free the trans structure. We'll decide if we need to
2260 	 * free it or not based on the operation being done and it's result.
2261 	 */
2262 	switch (flags) {
2263 	/* expected flag values */
2264 	case 0:
2265 	case XLOG_CONTINUE_TRANS:
2266 		error = xlog_recover_add_to_trans(log, trans, dp, len);
2267 		break;
2268 	case XLOG_WAS_CONT_TRANS:
2269 		error = xlog_recover_add_to_cont_trans(log, trans, dp, len);
2270 		break;
2271 	case XLOG_COMMIT_TRANS:
2272 		error = xlog_recover_commit_trans(log, trans, pass,
2273 						  buffer_list);
2274 		/* success or fail, we are now done with this transaction. */
2275 		freeit = true;
2276 		break;
2277 
2278 	/* unexpected flag values */
2279 	case XLOG_UNMOUNT_TRANS:
2280 		/* just skip trans */
2281 		xfs_warn(log->l_mp, "%s: Unmount LR", __func__);
2282 		freeit = true;
2283 		break;
2284 	case XLOG_START_TRANS:
2285 	default:
2286 		xfs_warn(log->l_mp, "%s: bad flag 0x%x", __func__, flags);
2287 		ASSERT(0);
2288 		error = -EFSCORRUPTED;
2289 		break;
2290 	}
2291 	if (error || freeit)
2292 		xlog_recover_free_trans(trans);
2293 	return error;
2294 }
2295 
2296 /*
2297  * Lookup the transaction recovery structure associated with the ID in the
2298  * current ophdr. If the transaction doesn't exist and the start flag is set in
2299  * the ophdr, then allocate a new transaction for future ID matches to find.
2300  * Either way, return what we found during the lookup - an existing transaction
2301  * or nothing.
2302  */
2303 STATIC struct xlog_recover *
2304 xlog_recover_ophdr_to_trans(
2305 	struct hlist_head	rhash[],
2306 	struct xlog_rec_header	*rhead,
2307 	struct xlog_op_header	*ohead)
2308 {
2309 	struct xlog_recover	*trans;
2310 	xlog_tid_t		tid;
2311 	struct hlist_head	*rhp;
2312 
2313 	tid = be32_to_cpu(ohead->oh_tid);
2314 	rhp = &rhash[XLOG_RHASH(tid)];
2315 	hlist_for_each_entry(trans, rhp, r_list) {
2316 		if (trans->r_log_tid == tid)
2317 			return trans;
2318 	}
2319 
2320 	/*
2321 	 * skip over non-start transaction headers - we could be
2322 	 * processing slack space before the next transaction starts
2323 	 */
2324 	if (!(ohead->oh_flags & XLOG_START_TRANS))
2325 		return NULL;
2326 
2327 	ASSERT(be32_to_cpu(ohead->oh_len) == 0);
2328 
2329 	/*
2330 	 * This is a new transaction so allocate a new recovery container to
2331 	 * hold the recovery ops that will follow.
2332 	 */
2333 	trans = kmem_zalloc(sizeof(struct xlog_recover), 0);
2334 	trans->r_log_tid = tid;
2335 	trans->r_lsn = be64_to_cpu(rhead->h_lsn);
2336 	INIT_LIST_HEAD(&trans->r_itemq);
2337 	INIT_HLIST_NODE(&trans->r_list);
2338 	hlist_add_head(&trans->r_list, rhp);
2339 
2340 	/*
2341 	 * Nothing more to do for this ophdr. Items to be added to this new
2342 	 * transaction will be in subsequent ophdr containers.
2343 	 */
2344 	return NULL;
2345 }
2346 
2347 STATIC int
2348 xlog_recover_process_ophdr(
2349 	struct xlog		*log,
2350 	struct hlist_head	rhash[],
2351 	struct xlog_rec_header	*rhead,
2352 	struct xlog_op_header	*ohead,
2353 	char			*dp,
2354 	char			*end,
2355 	int			pass,
2356 	struct list_head	*buffer_list)
2357 {
2358 	struct xlog_recover	*trans;
2359 	unsigned int		len;
2360 	int			error;
2361 
2362 	/* Do we understand who wrote this op? */
2363 	if (ohead->oh_clientid != XFS_TRANSACTION &&
2364 	    ohead->oh_clientid != XFS_LOG) {
2365 		xfs_warn(log->l_mp, "%s: bad clientid 0x%x",
2366 			__func__, ohead->oh_clientid);
2367 		ASSERT(0);
2368 		return -EFSCORRUPTED;
2369 	}
2370 
2371 	/*
2372 	 * Check the ophdr contains all the data it is supposed to contain.
2373 	 */
2374 	len = be32_to_cpu(ohead->oh_len);
2375 	if (dp + len > end) {
2376 		xfs_warn(log->l_mp, "%s: bad length 0x%x", __func__, len);
2377 		WARN_ON(1);
2378 		return -EFSCORRUPTED;
2379 	}
2380 
2381 	trans = xlog_recover_ophdr_to_trans(rhash, rhead, ohead);
2382 	if (!trans) {
2383 		/* nothing to do, so skip over this ophdr */
2384 		return 0;
2385 	}
2386 
2387 	/*
2388 	 * The recovered buffer queue is drained only once we know that all
2389 	 * recovery items for the current LSN have been processed. This is
2390 	 * required because:
2391 	 *
2392 	 * - Buffer write submission updates the metadata LSN of the buffer.
2393 	 * - Log recovery skips items with a metadata LSN >= the current LSN of
2394 	 *   the recovery item.
2395 	 * - Separate recovery items against the same metadata buffer can share
2396 	 *   a current LSN. I.e., consider that the LSN of a recovery item is
2397 	 *   defined as the starting LSN of the first record in which its
2398 	 *   transaction appears, that a record can hold multiple transactions,
2399 	 *   and/or that a transaction can span multiple records.
2400 	 *
2401 	 * In other words, we are allowed to submit a buffer from log recovery
2402 	 * once per current LSN. Otherwise, we may incorrectly skip recovery
2403 	 * items and cause corruption.
2404 	 *
2405 	 * We don't know up front whether buffers are updated multiple times per
2406 	 * LSN. Therefore, track the current LSN of each commit log record as it
2407 	 * is processed and drain the queue when it changes. Use commit records
2408 	 * because they are ordered correctly by the logging code.
2409 	 */
2410 	if (log->l_recovery_lsn != trans->r_lsn &&
2411 	    ohead->oh_flags & XLOG_COMMIT_TRANS) {
2412 		error = xfs_buf_delwri_submit(buffer_list);
2413 		if (error)
2414 			return error;
2415 		log->l_recovery_lsn = trans->r_lsn;
2416 	}
2417 
2418 	return xlog_recovery_process_trans(log, trans, dp, len,
2419 					   ohead->oh_flags, pass, buffer_list);
2420 }
2421 
2422 /*
2423  * There are two valid states of the r_state field.  0 indicates that the
2424  * transaction structure is in a normal state.  We have either seen the
2425  * start of the transaction or the last operation we added was not a partial
2426  * operation.  If the last operation we added to the transaction was a
2427  * partial operation, we need to mark r_state with XLOG_WAS_CONT_TRANS.
2428  *
2429  * NOTE: skip LRs with 0 data length.
2430  */
2431 STATIC int
2432 xlog_recover_process_data(
2433 	struct xlog		*log,
2434 	struct hlist_head	rhash[],
2435 	struct xlog_rec_header	*rhead,
2436 	char			*dp,
2437 	int			pass,
2438 	struct list_head	*buffer_list)
2439 {
2440 	struct xlog_op_header	*ohead;
2441 	char			*end;
2442 	int			num_logops;
2443 	int			error;
2444 
2445 	end = dp + be32_to_cpu(rhead->h_len);
2446 	num_logops = be32_to_cpu(rhead->h_num_logops);
2447 
2448 	/* check the log format matches our own - else we can't recover */
2449 	if (xlog_header_check_recover(log->l_mp, rhead))
2450 		return -EIO;
2451 
2452 	trace_xfs_log_recover_record(log, rhead, pass);
2453 	while ((dp < end) && num_logops) {
2454 
2455 		ohead = (struct xlog_op_header *)dp;
2456 		dp += sizeof(*ohead);
2457 		ASSERT(dp <= end);
2458 
2459 		/* errors will abort recovery */
2460 		error = xlog_recover_process_ophdr(log, rhash, rhead, ohead,
2461 						   dp, end, pass, buffer_list);
2462 		if (error)
2463 			return error;
2464 
2465 		dp += be32_to_cpu(ohead->oh_len);
2466 		num_logops--;
2467 	}
2468 	return 0;
2469 }
2470 
2471 /* Take all the collected deferred ops and finish them in order. */
2472 static int
2473 xlog_finish_defer_ops(
2474 	struct xfs_trans	*parent_tp)
2475 {
2476 	struct xfs_mount	*mp = parent_tp->t_mountp;
2477 	struct xfs_trans	*tp;
2478 	int64_t			freeblks;
2479 	uint			resblks;
2480 	int			error;
2481 
2482 	/*
2483 	 * We're finishing the defer_ops that accumulated as a result of
2484 	 * recovering unfinished intent items during log recovery.  We
2485 	 * reserve an itruncate transaction because it is the largest
2486 	 * permanent transaction type.  Since we're the only user of the fs
2487 	 * right now, take 93% (15/16) of the available free blocks.  Use
2488 	 * weird math to avoid a 64-bit division.
2489 	 */
2490 	freeblks = percpu_counter_sum(&mp->m_fdblocks);
2491 	if (freeblks <= 0)
2492 		return -ENOSPC;
2493 	resblks = min_t(int64_t, UINT_MAX, freeblks);
2494 	resblks = (resblks * 15) >> 4;
2495 	error = xfs_trans_alloc(mp, &M_RES(mp)->tr_itruncate, resblks,
2496 			0, XFS_TRANS_RESERVE, &tp);
2497 	if (error)
2498 		return error;
2499 	/* transfer all collected dfops to this transaction */
2500 	xfs_defer_move(tp, parent_tp);
2501 
2502 	return xfs_trans_commit(tp);
2503 }
2504 
2505 /* Is this log item a deferred action intent? */
2506 static inline bool xlog_item_is_intent(struct xfs_log_item *lip)
2507 {
2508 	return lip->li_ops->iop_recover != NULL &&
2509 	       lip->li_ops->iop_match != NULL;
2510 }
2511 
2512 /*
2513  * When this is called, all of the log intent items which did not have
2514  * corresponding log done items should be in the AIL.  What we do now
2515  * is update the data structures associated with each one.
2516  *
2517  * Since we process the log intent items in normal transactions, they
2518  * will be removed at some point after the commit.  This prevents us
2519  * from just walking down the list processing each one.  We'll use a
2520  * flag in the intent item to skip those that we've already processed
2521  * and use the AIL iteration mechanism's generation count to try to
2522  * speed this up at least a bit.
2523  *
2524  * When we start, we know that the intents are the only things in the
2525  * AIL.  As we process them, however, other items are added to the
2526  * AIL.
2527  */
2528 STATIC int
2529 xlog_recover_process_intents(
2530 	struct xlog		*log)
2531 {
2532 	struct xfs_trans	*parent_tp;
2533 	struct xfs_ail_cursor	cur;
2534 	struct xfs_log_item	*lip;
2535 	struct xfs_ail		*ailp;
2536 	int			error;
2537 #if defined(DEBUG) || defined(XFS_WARN)
2538 	xfs_lsn_t		last_lsn;
2539 #endif
2540 
2541 	/*
2542 	 * The intent recovery handlers commit transactions to complete recovery
2543 	 * for individual intents, but any new deferred operations that are
2544 	 * queued during that process are held off until the very end. The
2545 	 * purpose of this transaction is to serve as a container for deferred
2546 	 * operations. Each intent recovery handler must transfer dfops here
2547 	 * before its local transaction commits, and we'll finish the entire
2548 	 * list below.
2549 	 */
2550 	error = xfs_trans_alloc_empty(log->l_mp, &parent_tp);
2551 	if (error)
2552 		return error;
2553 
2554 	ailp = log->l_ailp;
2555 	spin_lock(&ailp->ail_lock);
2556 	lip = xfs_trans_ail_cursor_first(ailp, &cur, 0);
2557 #if defined(DEBUG) || defined(XFS_WARN)
2558 	last_lsn = xlog_assign_lsn(log->l_curr_cycle, log->l_curr_block);
2559 #endif
2560 	while (lip != NULL) {
2561 		/*
2562 		 * We're done when we see something other than an intent.
2563 		 * There should be no intents left in the AIL now.
2564 		 */
2565 		if (!xlog_item_is_intent(lip)) {
2566 #ifdef DEBUG
2567 			for (; lip; lip = xfs_trans_ail_cursor_next(ailp, &cur))
2568 				ASSERT(!xlog_item_is_intent(lip));
2569 #endif
2570 			break;
2571 		}
2572 
2573 		/*
2574 		 * We should never see a redo item with a LSN higher than
2575 		 * the last transaction we found in the log at the start
2576 		 * of recovery.
2577 		 */
2578 		ASSERT(XFS_LSN_CMP(last_lsn, lip->li_lsn) >= 0);
2579 
2580 		/*
2581 		 * NOTE: If your intent processing routine can create more
2582 		 * deferred ops, you /must/ attach them to the transaction in
2583 		 * this routine or else those subsequent intents will get
2584 		 * replayed in the wrong order!
2585 		 */
2586 		if (!test_and_set_bit(XFS_LI_RECOVERED, &lip->li_flags)) {
2587 			spin_unlock(&ailp->ail_lock);
2588 			error = lip->li_ops->iop_recover(lip, parent_tp);
2589 			spin_lock(&ailp->ail_lock);
2590 		}
2591 		if (error)
2592 			goto out;
2593 		lip = xfs_trans_ail_cursor_next(ailp, &cur);
2594 	}
2595 out:
2596 	xfs_trans_ail_cursor_done(&cur);
2597 	spin_unlock(&ailp->ail_lock);
2598 	if (!error)
2599 		error = xlog_finish_defer_ops(parent_tp);
2600 	xfs_trans_cancel(parent_tp);
2601 
2602 	return error;
2603 }
2604 
2605 /*
2606  * A cancel occurs when the mount has failed and we're bailing out.
2607  * Release all pending log intent items so they don't pin the AIL.
2608  */
2609 STATIC void
2610 xlog_recover_cancel_intents(
2611 	struct xlog		*log)
2612 {
2613 	struct xfs_log_item	*lip;
2614 	struct xfs_ail_cursor	cur;
2615 	struct xfs_ail		*ailp;
2616 
2617 	ailp = log->l_ailp;
2618 	spin_lock(&ailp->ail_lock);
2619 	lip = xfs_trans_ail_cursor_first(ailp, &cur, 0);
2620 	while (lip != NULL) {
2621 		/*
2622 		 * We're done when we see something other than an intent.
2623 		 * There should be no intents left in the AIL now.
2624 		 */
2625 		if (!xlog_item_is_intent(lip)) {
2626 #ifdef DEBUG
2627 			for (; lip; lip = xfs_trans_ail_cursor_next(ailp, &cur))
2628 				ASSERT(!xlog_item_is_intent(lip));
2629 #endif
2630 			break;
2631 		}
2632 
2633 		spin_unlock(&ailp->ail_lock);
2634 		lip->li_ops->iop_release(lip);
2635 		spin_lock(&ailp->ail_lock);
2636 		lip = xfs_trans_ail_cursor_next(ailp, &cur);
2637 	}
2638 
2639 	xfs_trans_ail_cursor_done(&cur);
2640 	spin_unlock(&ailp->ail_lock);
2641 }
2642 
2643 /*
2644  * This routine performs a transaction to null out a bad inode pointer
2645  * in an agi unlinked inode hash bucket.
2646  */
2647 STATIC void
2648 xlog_recover_clear_agi_bucket(
2649 	xfs_mount_t	*mp,
2650 	xfs_agnumber_t	agno,
2651 	int		bucket)
2652 {
2653 	xfs_trans_t	*tp;
2654 	xfs_agi_t	*agi;
2655 	xfs_buf_t	*agibp;
2656 	int		offset;
2657 	int		error;
2658 
2659 	error = xfs_trans_alloc(mp, &M_RES(mp)->tr_clearagi, 0, 0, 0, &tp);
2660 	if (error)
2661 		goto out_error;
2662 
2663 	error = xfs_read_agi(mp, tp, agno, &agibp);
2664 	if (error)
2665 		goto out_abort;
2666 
2667 	agi = agibp->b_addr;
2668 	agi->agi_unlinked[bucket] = cpu_to_be32(NULLAGINO);
2669 	offset = offsetof(xfs_agi_t, agi_unlinked) +
2670 		 (sizeof(xfs_agino_t) * bucket);
2671 	xfs_trans_log_buf(tp, agibp, offset,
2672 			  (offset + sizeof(xfs_agino_t) - 1));
2673 
2674 	error = xfs_trans_commit(tp);
2675 	if (error)
2676 		goto out_error;
2677 	return;
2678 
2679 out_abort:
2680 	xfs_trans_cancel(tp);
2681 out_error:
2682 	xfs_warn(mp, "%s: failed to clear agi %d. Continuing.", __func__, agno);
2683 	return;
2684 }
2685 
2686 STATIC xfs_agino_t
2687 xlog_recover_process_one_iunlink(
2688 	struct xfs_mount		*mp,
2689 	xfs_agnumber_t			agno,
2690 	xfs_agino_t			agino,
2691 	int				bucket)
2692 {
2693 	struct xfs_buf			*ibp;
2694 	struct xfs_dinode		*dip;
2695 	struct xfs_inode		*ip;
2696 	xfs_ino_t			ino;
2697 	int				error;
2698 
2699 	ino = XFS_AGINO_TO_INO(mp, agno, agino);
2700 	error = xfs_iget(mp, NULL, ino, 0, 0, &ip);
2701 	if (error)
2702 		goto fail;
2703 
2704 	/*
2705 	 * Get the on disk inode to find the next inode in the bucket.
2706 	 */
2707 	error = xfs_imap_to_bp(mp, NULL, &ip->i_imap, &dip, &ibp, 0);
2708 	if (error)
2709 		goto fail_iput;
2710 
2711 	xfs_iflags_clear(ip, XFS_IRECOVERY);
2712 	ASSERT(VFS_I(ip)->i_nlink == 0);
2713 	ASSERT(VFS_I(ip)->i_mode != 0);
2714 
2715 	/* setup for the next pass */
2716 	agino = be32_to_cpu(dip->di_next_unlinked);
2717 	xfs_buf_relse(ibp);
2718 
2719 	/*
2720 	 * Prevent any DMAPI event from being sent when the reference on
2721 	 * the inode is dropped.
2722 	 */
2723 	ip->i_d.di_dmevmask = 0;
2724 
2725 	xfs_irele(ip);
2726 	return agino;
2727 
2728  fail_iput:
2729 	xfs_irele(ip);
2730  fail:
2731 	/*
2732 	 * We can't read in the inode this bucket points to, or this inode
2733 	 * is messed up.  Just ditch this bucket of inodes.  We will lose
2734 	 * some inodes and space, but at least we won't hang.
2735 	 *
2736 	 * Call xlog_recover_clear_agi_bucket() to perform a transaction to
2737 	 * clear the inode pointer in the bucket.
2738 	 */
2739 	xlog_recover_clear_agi_bucket(mp, agno, bucket);
2740 	return NULLAGINO;
2741 }
2742 
2743 /*
2744  * Recover AGI unlinked lists
2745  *
2746  * This is called during recovery to process any inodes which we unlinked but
2747  * not freed when the system crashed.  These inodes will be on the lists in the
2748  * AGI blocks. What we do here is scan all the AGIs and fully truncate and free
2749  * any inodes found on the lists. Each inode is removed from the lists when it
2750  * has been fully truncated and is freed. The freeing of the inode and its
2751  * removal from the list must be atomic.
2752  *
2753  * If everything we touch in the agi processing loop is already in memory, this
2754  * loop can hold the cpu for a long time. It runs without lock contention,
2755  * memory allocation contention, the need wait for IO, etc, and so will run
2756  * until we either run out of inodes to process, run low on memory or we run out
2757  * of log space.
2758  *
2759  * This behaviour is bad for latency on single CPU and non-preemptible kernels,
2760  * and can prevent other filesytem work (such as CIL pushes) from running. This
2761  * can lead to deadlocks if the recovery process runs out of log reservation
2762  * space. Hence we need to yield the CPU when there is other kernel work
2763  * scheduled on this CPU to ensure other scheduled work can run without undue
2764  * latency.
2765  */
2766 STATIC void
2767 xlog_recover_process_iunlinks(
2768 	struct xlog	*log)
2769 {
2770 	xfs_mount_t	*mp;
2771 	xfs_agnumber_t	agno;
2772 	xfs_agi_t	*agi;
2773 	xfs_buf_t	*agibp;
2774 	xfs_agino_t	agino;
2775 	int		bucket;
2776 	int		error;
2777 
2778 	mp = log->l_mp;
2779 
2780 	for (agno = 0; agno < mp->m_sb.sb_agcount; agno++) {
2781 		/*
2782 		 * Find the agi for this ag.
2783 		 */
2784 		error = xfs_read_agi(mp, NULL, agno, &agibp);
2785 		if (error) {
2786 			/*
2787 			 * AGI is b0rked. Don't process it.
2788 			 *
2789 			 * We should probably mark the filesystem as corrupt
2790 			 * after we've recovered all the ag's we can....
2791 			 */
2792 			continue;
2793 		}
2794 		/*
2795 		 * Unlock the buffer so that it can be acquired in the normal
2796 		 * course of the transaction to truncate and free each inode.
2797 		 * Because we are not racing with anyone else here for the AGI
2798 		 * buffer, we don't even need to hold it locked to read the
2799 		 * initial unlinked bucket entries out of the buffer. We keep
2800 		 * buffer reference though, so that it stays pinned in memory
2801 		 * while we need the buffer.
2802 		 */
2803 		agi = agibp->b_addr;
2804 		xfs_buf_unlock(agibp);
2805 
2806 		for (bucket = 0; bucket < XFS_AGI_UNLINKED_BUCKETS; bucket++) {
2807 			agino = be32_to_cpu(agi->agi_unlinked[bucket]);
2808 			while (agino != NULLAGINO) {
2809 				agino = xlog_recover_process_one_iunlink(mp,
2810 							agno, agino, bucket);
2811 				cond_resched();
2812 			}
2813 		}
2814 		xfs_buf_rele(agibp);
2815 	}
2816 }
2817 
2818 STATIC void
2819 xlog_unpack_data(
2820 	struct xlog_rec_header	*rhead,
2821 	char			*dp,
2822 	struct xlog		*log)
2823 {
2824 	int			i, j, k;
2825 
2826 	for (i = 0; i < BTOBB(be32_to_cpu(rhead->h_len)) &&
2827 		  i < (XLOG_HEADER_CYCLE_SIZE / BBSIZE); i++) {
2828 		*(__be32 *)dp = *(__be32 *)&rhead->h_cycle_data[i];
2829 		dp += BBSIZE;
2830 	}
2831 
2832 	if (xfs_sb_version_haslogv2(&log->l_mp->m_sb)) {
2833 		xlog_in_core_2_t *xhdr = (xlog_in_core_2_t *)rhead;
2834 		for ( ; i < BTOBB(be32_to_cpu(rhead->h_len)); i++) {
2835 			j = i / (XLOG_HEADER_CYCLE_SIZE / BBSIZE);
2836 			k = i % (XLOG_HEADER_CYCLE_SIZE / BBSIZE);
2837 			*(__be32 *)dp = xhdr[j].hic_xheader.xh_cycle_data[k];
2838 			dp += BBSIZE;
2839 		}
2840 	}
2841 }
2842 
2843 /*
2844  * CRC check, unpack and process a log record.
2845  */
2846 STATIC int
2847 xlog_recover_process(
2848 	struct xlog		*log,
2849 	struct hlist_head	rhash[],
2850 	struct xlog_rec_header	*rhead,
2851 	char			*dp,
2852 	int			pass,
2853 	struct list_head	*buffer_list)
2854 {
2855 	__le32			old_crc = rhead->h_crc;
2856 	__le32			crc;
2857 
2858 	crc = xlog_cksum(log, rhead, dp, be32_to_cpu(rhead->h_len));
2859 
2860 	/*
2861 	 * Nothing else to do if this is a CRC verification pass. Just return
2862 	 * if this a record with a non-zero crc. Unfortunately, mkfs always
2863 	 * sets old_crc to 0 so we must consider this valid even on v5 supers.
2864 	 * Otherwise, return EFSBADCRC on failure so the callers up the stack
2865 	 * know precisely what failed.
2866 	 */
2867 	if (pass == XLOG_RECOVER_CRCPASS) {
2868 		if (old_crc && crc != old_crc)
2869 			return -EFSBADCRC;
2870 		return 0;
2871 	}
2872 
2873 	/*
2874 	 * We're in the normal recovery path. Issue a warning if and only if the
2875 	 * CRC in the header is non-zero. This is an advisory warning and the
2876 	 * zero CRC check prevents warnings from being emitted when upgrading
2877 	 * the kernel from one that does not add CRCs by default.
2878 	 */
2879 	if (crc != old_crc) {
2880 		if (old_crc || xfs_sb_version_hascrc(&log->l_mp->m_sb)) {
2881 			xfs_alert(log->l_mp,
2882 		"log record CRC mismatch: found 0x%x, expected 0x%x.",
2883 					le32_to_cpu(old_crc),
2884 					le32_to_cpu(crc));
2885 			xfs_hex_dump(dp, 32);
2886 		}
2887 
2888 		/*
2889 		 * If the filesystem is CRC enabled, this mismatch becomes a
2890 		 * fatal log corruption failure.
2891 		 */
2892 		if (xfs_sb_version_hascrc(&log->l_mp->m_sb)) {
2893 			XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, log->l_mp);
2894 			return -EFSCORRUPTED;
2895 		}
2896 	}
2897 
2898 	xlog_unpack_data(rhead, dp, log);
2899 
2900 	return xlog_recover_process_data(log, rhash, rhead, dp, pass,
2901 					 buffer_list);
2902 }
2903 
2904 STATIC int
2905 xlog_valid_rec_header(
2906 	struct xlog		*log,
2907 	struct xlog_rec_header	*rhead,
2908 	xfs_daddr_t		blkno)
2909 {
2910 	int			hlen;
2911 
2912 	if (XFS_IS_CORRUPT(log->l_mp,
2913 			   rhead->h_magicno != cpu_to_be32(XLOG_HEADER_MAGIC_NUM)))
2914 		return -EFSCORRUPTED;
2915 	if (XFS_IS_CORRUPT(log->l_mp,
2916 			   (!rhead->h_version ||
2917 			   (be32_to_cpu(rhead->h_version) &
2918 			    (~XLOG_VERSION_OKBITS))))) {
2919 		xfs_warn(log->l_mp, "%s: unrecognised log version (%d).",
2920 			__func__, be32_to_cpu(rhead->h_version));
2921 		return -EFSCORRUPTED;
2922 	}
2923 
2924 	/* LR body must have data or it wouldn't have been written */
2925 	hlen = be32_to_cpu(rhead->h_len);
2926 	if (XFS_IS_CORRUPT(log->l_mp, hlen <= 0 || hlen > INT_MAX))
2927 		return -EFSCORRUPTED;
2928 	if (XFS_IS_CORRUPT(log->l_mp,
2929 			   blkno > log->l_logBBsize || blkno > INT_MAX))
2930 		return -EFSCORRUPTED;
2931 	return 0;
2932 }
2933 
2934 /*
2935  * Read the log from tail to head and process the log records found.
2936  * Handle the two cases where the tail and head are in the same cycle
2937  * and where the active portion of the log wraps around the end of
2938  * the physical log separately.  The pass parameter is passed through
2939  * to the routines called to process the data and is not looked at
2940  * here.
2941  */
2942 STATIC int
2943 xlog_do_recovery_pass(
2944 	struct xlog		*log,
2945 	xfs_daddr_t		head_blk,
2946 	xfs_daddr_t		tail_blk,
2947 	int			pass,
2948 	xfs_daddr_t		*first_bad)	/* out: first bad log rec */
2949 {
2950 	xlog_rec_header_t	*rhead;
2951 	xfs_daddr_t		blk_no, rblk_no;
2952 	xfs_daddr_t		rhead_blk;
2953 	char			*offset;
2954 	char			*hbp, *dbp;
2955 	int			error = 0, h_size, h_len;
2956 	int			error2 = 0;
2957 	int			bblks, split_bblks;
2958 	int			hblks, split_hblks, wrapped_hblks;
2959 	int			i;
2960 	struct hlist_head	rhash[XLOG_RHASH_SIZE];
2961 	LIST_HEAD		(buffer_list);
2962 
2963 	ASSERT(head_blk != tail_blk);
2964 	blk_no = rhead_blk = tail_blk;
2965 
2966 	for (i = 0; i < XLOG_RHASH_SIZE; i++)
2967 		INIT_HLIST_HEAD(&rhash[i]);
2968 
2969 	/*
2970 	 * Read the header of the tail block and get the iclog buffer size from
2971 	 * h_size.  Use this to tell how many sectors make up the log header.
2972 	 */
2973 	if (xfs_sb_version_haslogv2(&log->l_mp->m_sb)) {
2974 		/*
2975 		 * When using variable length iclogs, read first sector of
2976 		 * iclog header and extract the header size from it.  Get a
2977 		 * new hbp that is the correct size.
2978 		 */
2979 		hbp = xlog_alloc_buffer(log, 1);
2980 		if (!hbp)
2981 			return -ENOMEM;
2982 
2983 		error = xlog_bread(log, tail_blk, 1, hbp, &offset);
2984 		if (error)
2985 			goto bread_err1;
2986 
2987 		rhead = (xlog_rec_header_t *)offset;
2988 		error = xlog_valid_rec_header(log, rhead, tail_blk);
2989 		if (error)
2990 			goto bread_err1;
2991 
2992 		/*
2993 		 * xfsprogs has a bug where record length is based on lsunit but
2994 		 * h_size (iclog size) is hardcoded to 32k. Now that we
2995 		 * unconditionally CRC verify the unmount record, this means the
2996 		 * log buffer can be too small for the record and cause an
2997 		 * overrun.
2998 		 *
2999 		 * Detect this condition here. Use lsunit for the buffer size as
3000 		 * long as this looks like the mkfs case. Otherwise, return an
3001 		 * error to avoid a buffer overrun.
3002 		 */
3003 		h_size = be32_to_cpu(rhead->h_size);
3004 		h_len = be32_to_cpu(rhead->h_len);
3005 		if (h_len > h_size) {
3006 			if (h_len <= log->l_mp->m_logbsize &&
3007 			    be32_to_cpu(rhead->h_num_logops) == 1) {
3008 				xfs_warn(log->l_mp,
3009 		"invalid iclog size (%d bytes), using lsunit (%d bytes)",
3010 					 h_size, log->l_mp->m_logbsize);
3011 				h_size = log->l_mp->m_logbsize;
3012 			} else {
3013 				XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW,
3014 						log->l_mp);
3015 				error = -EFSCORRUPTED;
3016 				goto bread_err1;
3017 			}
3018 		}
3019 
3020 		if ((be32_to_cpu(rhead->h_version) & XLOG_VERSION_2) &&
3021 		    (h_size > XLOG_HEADER_CYCLE_SIZE)) {
3022 			hblks = h_size / XLOG_HEADER_CYCLE_SIZE;
3023 			if (h_size % XLOG_HEADER_CYCLE_SIZE)
3024 				hblks++;
3025 			kmem_free(hbp);
3026 			hbp = xlog_alloc_buffer(log, hblks);
3027 		} else {
3028 			hblks = 1;
3029 		}
3030 	} else {
3031 		ASSERT(log->l_sectBBsize == 1);
3032 		hblks = 1;
3033 		hbp = xlog_alloc_buffer(log, 1);
3034 		h_size = XLOG_BIG_RECORD_BSIZE;
3035 	}
3036 
3037 	if (!hbp)
3038 		return -ENOMEM;
3039 	dbp = xlog_alloc_buffer(log, BTOBB(h_size));
3040 	if (!dbp) {
3041 		kmem_free(hbp);
3042 		return -ENOMEM;
3043 	}
3044 
3045 	memset(rhash, 0, sizeof(rhash));
3046 	if (tail_blk > head_blk) {
3047 		/*
3048 		 * Perform recovery around the end of the physical log.
3049 		 * When the head is not on the same cycle number as the tail,
3050 		 * we can't do a sequential recovery.
3051 		 */
3052 		while (blk_no < log->l_logBBsize) {
3053 			/*
3054 			 * Check for header wrapping around physical end-of-log
3055 			 */
3056 			offset = hbp;
3057 			split_hblks = 0;
3058 			wrapped_hblks = 0;
3059 			if (blk_no + hblks <= log->l_logBBsize) {
3060 				/* Read header in one read */
3061 				error = xlog_bread(log, blk_no, hblks, hbp,
3062 						   &offset);
3063 				if (error)
3064 					goto bread_err2;
3065 			} else {
3066 				/* This LR is split across physical log end */
3067 				if (blk_no != log->l_logBBsize) {
3068 					/* some data before physical log end */
3069 					ASSERT(blk_no <= INT_MAX);
3070 					split_hblks = log->l_logBBsize - (int)blk_no;
3071 					ASSERT(split_hblks > 0);
3072 					error = xlog_bread(log, blk_no,
3073 							   split_hblks, hbp,
3074 							   &offset);
3075 					if (error)
3076 						goto bread_err2;
3077 				}
3078 
3079 				/*
3080 				 * Note: this black magic still works with
3081 				 * large sector sizes (non-512) only because:
3082 				 * - we increased the buffer size originally
3083 				 *   by 1 sector giving us enough extra space
3084 				 *   for the second read;
3085 				 * - the log start is guaranteed to be sector
3086 				 *   aligned;
3087 				 * - we read the log end (LR header start)
3088 				 *   _first_, then the log start (LR header end)
3089 				 *   - order is important.
3090 				 */
3091 				wrapped_hblks = hblks - split_hblks;
3092 				error = xlog_bread_noalign(log, 0,
3093 						wrapped_hblks,
3094 						offset + BBTOB(split_hblks));
3095 				if (error)
3096 					goto bread_err2;
3097 			}
3098 			rhead = (xlog_rec_header_t *)offset;
3099 			error = xlog_valid_rec_header(log, rhead,
3100 						split_hblks ? blk_no : 0);
3101 			if (error)
3102 				goto bread_err2;
3103 
3104 			bblks = (int)BTOBB(be32_to_cpu(rhead->h_len));
3105 			blk_no += hblks;
3106 
3107 			/*
3108 			 * Read the log record data in multiple reads if it
3109 			 * wraps around the end of the log. Note that if the
3110 			 * header already wrapped, blk_no could point past the
3111 			 * end of the log. The record data is contiguous in
3112 			 * that case.
3113 			 */
3114 			if (blk_no + bblks <= log->l_logBBsize ||
3115 			    blk_no >= log->l_logBBsize) {
3116 				rblk_no = xlog_wrap_logbno(log, blk_no);
3117 				error = xlog_bread(log, rblk_no, bblks, dbp,
3118 						   &offset);
3119 				if (error)
3120 					goto bread_err2;
3121 			} else {
3122 				/* This log record is split across the
3123 				 * physical end of log */
3124 				offset = dbp;
3125 				split_bblks = 0;
3126 				if (blk_no != log->l_logBBsize) {
3127 					/* some data is before the physical
3128 					 * end of log */
3129 					ASSERT(!wrapped_hblks);
3130 					ASSERT(blk_no <= INT_MAX);
3131 					split_bblks =
3132 						log->l_logBBsize - (int)blk_no;
3133 					ASSERT(split_bblks > 0);
3134 					error = xlog_bread(log, blk_no,
3135 							split_bblks, dbp,
3136 							&offset);
3137 					if (error)
3138 						goto bread_err2;
3139 				}
3140 
3141 				/*
3142 				 * Note: this black magic still works with
3143 				 * large sector sizes (non-512) only because:
3144 				 * - we increased the buffer size originally
3145 				 *   by 1 sector giving us enough extra space
3146 				 *   for the second read;
3147 				 * - the log start is guaranteed to be sector
3148 				 *   aligned;
3149 				 * - we read the log end (LR header start)
3150 				 *   _first_, then the log start (LR header end)
3151 				 *   - order is important.
3152 				 */
3153 				error = xlog_bread_noalign(log, 0,
3154 						bblks - split_bblks,
3155 						offset + BBTOB(split_bblks));
3156 				if (error)
3157 					goto bread_err2;
3158 			}
3159 
3160 			error = xlog_recover_process(log, rhash, rhead, offset,
3161 						     pass, &buffer_list);
3162 			if (error)
3163 				goto bread_err2;
3164 
3165 			blk_no += bblks;
3166 			rhead_blk = blk_no;
3167 		}
3168 
3169 		ASSERT(blk_no >= log->l_logBBsize);
3170 		blk_no -= log->l_logBBsize;
3171 		rhead_blk = blk_no;
3172 	}
3173 
3174 	/* read first part of physical log */
3175 	while (blk_no < head_blk) {
3176 		error = xlog_bread(log, blk_no, hblks, hbp, &offset);
3177 		if (error)
3178 			goto bread_err2;
3179 
3180 		rhead = (xlog_rec_header_t *)offset;
3181 		error = xlog_valid_rec_header(log, rhead, blk_no);
3182 		if (error)
3183 			goto bread_err2;
3184 
3185 		/* blocks in data section */
3186 		bblks = (int)BTOBB(be32_to_cpu(rhead->h_len));
3187 		error = xlog_bread(log, blk_no+hblks, bblks, dbp,
3188 				   &offset);
3189 		if (error)
3190 			goto bread_err2;
3191 
3192 		error = xlog_recover_process(log, rhash, rhead, offset, pass,
3193 					     &buffer_list);
3194 		if (error)
3195 			goto bread_err2;
3196 
3197 		blk_no += bblks + hblks;
3198 		rhead_blk = blk_no;
3199 	}
3200 
3201  bread_err2:
3202 	kmem_free(dbp);
3203  bread_err1:
3204 	kmem_free(hbp);
3205 
3206 	/*
3207 	 * Submit buffers that have been added from the last record processed,
3208 	 * regardless of error status.
3209 	 */
3210 	if (!list_empty(&buffer_list))
3211 		error2 = xfs_buf_delwri_submit(&buffer_list);
3212 
3213 	if (error && first_bad)
3214 		*first_bad = rhead_blk;
3215 
3216 	/*
3217 	 * Transactions are freed at commit time but transactions without commit
3218 	 * records on disk are never committed. Free any that may be left in the
3219 	 * hash table.
3220 	 */
3221 	for (i = 0; i < XLOG_RHASH_SIZE; i++) {
3222 		struct hlist_node	*tmp;
3223 		struct xlog_recover	*trans;
3224 
3225 		hlist_for_each_entry_safe(trans, tmp, &rhash[i], r_list)
3226 			xlog_recover_free_trans(trans);
3227 	}
3228 
3229 	return error ? error : error2;
3230 }
3231 
3232 /*
3233  * Do the recovery of the log.  We actually do this in two phases.
3234  * The two passes are necessary in order to implement the function
3235  * of cancelling a record written into the log.  The first pass
3236  * determines those things which have been cancelled, and the
3237  * second pass replays log items normally except for those which
3238  * have been cancelled.  The handling of the replay and cancellations
3239  * takes place in the log item type specific routines.
3240  *
3241  * The table of items which have cancel records in the log is allocated
3242  * and freed at this level, since only here do we know when all of
3243  * the log recovery has been completed.
3244  */
3245 STATIC int
3246 xlog_do_log_recovery(
3247 	struct xlog	*log,
3248 	xfs_daddr_t	head_blk,
3249 	xfs_daddr_t	tail_blk)
3250 {
3251 	int		error, i;
3252 
3253 	ASSERT(head_blk != tail_blk);
3254 
3255 	/*
3256 	 * First do a pass to find all of the cancelled buf log items.
3257 	 * Store them in the buf_cancel_table for use in the second pass.
3258 	 */
3259 	log->l_buf_cancel_table = kmem_zalloc(XLOG_BC_TABLE_SIZE *
3260 						 sizeof(struct list_head),
3261 						 0);
3262 	for (i = 0; i < XLOG_BC_TABLE_SIZE; i++)
3263 		INIT_LIST_HEAD(&log->l_buf_cancel_table[i]);
3264 
3265 	error = xlog_do_recovery_pass(log, head_blk, tail_blk,
3266 				      XLOG_RECOVER_PASS1, NULL);
3267 	if (error != 0) {
3268 		kmem_free(log->l_buf_cancel_table);
3269 		log->l_buf_cancel_table = NULL;
3270 		return error;
3271 	}
3272 	/*
3273 	 * Then do a second pass to actually recover the items in the log.
3274 	 * When it is complete free the table of buf cancel items.
3275 	 */
3276 	error = xlog_do_recovery_pass(log, head_blk, tail_blk,
3277 				      XLOG_RECOVER_PASS2, NULL);
3278 #ifdef DEBUG
3279 	if (!error) {
3280 		int	i;
3281 
3282 		for (i = 0; i < XLOG_BC_TABLE_SIZE; i++)
3283 			ASSERT(list_empty(&log->l_buf_cancel_table[i]));
3284 	}
3285 #endif	/* DEBUG */
3286 
3287 	kmem_free(log->l_buf_cancel_table);
3288 	log->l_buf_cancel_table = NULL;
3289 
3290 	return error;
3291 }
3292 
3293 /*
3294  * Do the actual recovery
3295  */
3296 STATIC int
3297 xlog_do_recover(
3298 	struct xlog	*log,
3299 	xfs_daddr_t	head_blk,
3300 	xfs_daddr_t	tail_blk)
3301 {
3302 	struct xfs_mount *mp = log->l_mp;
3303 	int		error;
3304 	xfs_buf_t	*bp;
3305 	xfs_sb_t	*sbp;
3306 
3307 	trace_xfs_log_recover(log, head_blk, tail_blk);
3308 
3309 	/*
3310 	 * First replay the images in the log.
3311 	 */
3312 	error = xlog_do_log_recovery(log, head_blk, tail_blk);
3313 	if (error)
3314 		return error;
3315 
3316 	/*
3317 	 * If IO errors happened during recovery, bail out.
3318 	 */
3319 	if (XFS_FORCED_SHUTDOWN(mp)) {
3320 		return -EIO;
3321 	}
3322 
3323 	/*
3324 	 * We now update the tail_lsn since much of the recovery has completed
3325 	 * and there may be space available to use.  If there were no extent
3326 	 * or iunlinks, we can free up the entire log and set the tail_lsn to
3327 	 * be the last_sync_lsn.  This was set in xlog_find_tail to be the
3328 	 * lsn of the last known good LR on disk.  If there are extent frees
3329 	 * or iunlinks they will have some entries in the AIL; so we look at
3330 	 * the AIL to determine how to set the tail_lsn.
3331 	 */
3332 	xlog_assign_tail_lsn(mp);
3333 
3334 	/*
3335 	 * Now that we've finished replaying all buffer and inode
3336 	 * updates, re-read in the superblock and reverify it.
3337 	 */
3338 	bp = xfs_getsb(mp);
3339 	bp->b_flags &= ~(XBF_DONE | XBF_ASYNC);
3340 	ASSERT(!(bp->b_flags & XBF_WRITE));
3341 	bp->b_flags |= XBF_READ;
3342 	bp->b_ops = &xfs_sb_buf_ops;
3343 
3344 	error = xfs_buf_submit(bp);
3345 	if (error) {
3346 		if (!XFS_FORCED_SHUTDOWN(mp)) {
3347 			xfs_buf_ioerror_alert(bp, __this_address);
3348 			ASSERT(0);
3349 		}
3350 		xfs_buf_relse(bp);
3351 		return error;
3352 	}
3353 
3354 	/* Convert superblock from on-disk format */
3355 	sbp = &mp->m_sb;
3356 	xfs_sb_from_disk(sbp, bp->b_addr);
3357 	xfs_buf_relse(bp);
3358 
3359 	/* re-initialise in-core superblock and geometry structures */
3360 	xfs_reinit_percpu_counters(mp);
3361 	error = xfs_initialize_perag(mp, sbp->sb_agcount, &mp->m_maxagi);
3362 	if (error) {
3363 		xfs_warn(mp, "Failed post-recovery per-ag init: %d", error);
3364 		return error;
3365 	}
3366 	mp->m_alloc_set_aside = xfs_alloc_set_aside(mp);
3367 
3368 	xlog_recover_check_summary(log);
3369 
3370 	/* Normal transactions can now occur */
3371 	log->l_flags &= ~XLOG_ACTIVE_RECOVERY;
3372 	return 0;
3373 }
3374 
3375 /*
3376  * Perform recovery and re-initialize some log variables in xlog_find_tail.
3377  *
3378  * Return error or zero.
3379  */
3380 int
3381 xlog_recover(
3382 	struct xlog	*log)
3383 {
3384 	xfs_daddr_t	head_blk, tail_blk;
3385 	int		error;
3386 
3387 	/* find the tail of the log */
3388 	error = xlog_find_tail(log, &head_blk, &tail_blk);
3389 	if (error)
3390 		return error;
3391 
3392 	/*
3393 	 * The superblock was read before the log was available and thus the LSN
3394 	 * could not be verified. Check the superblock LSN against the current
3395 	 * LSN now that it's known.
3396 	 */
3397 	if (xfs_sb_version_hascrc(&log->l_mp->m_sb) &&
3398 	    !xfs_log_check_lsn(log->l_mp, log->l_mp->m_sb.sb_lsn))
3399 		return -EINVAL;
3400 
3401 	if (tail_blk != head_blk) {
3402 		/* There used to be a comment here:
3403 		 *
3404 		 * disallow recovery on read-only mounts.  note -- mount
3405 		 * checks for ENOSPC and turns it into an intelligent
3406 		 * error message.
3407 		 * ...but this is no longer true.  Now, unless you specify
3408 		 * NORECOVERY (in which case this function would never be
3409 		 * called), we just go ahead and recover.  We do this all
3410 		 * under the vfs layer, so we can get away with it unless
3411 		 * the device itself is read-only, in which case we fail.
3412 		 */
3413 		if ((error = xfs_dev_is_read_only(log->l_mp, "recovery"))) {
3414 			return error;
3415 		}
3416 
3417 		/*
3418 		 * Version 5 superblock log feature mask validation. We know the
3419 		 * log is dirty so check if there are any unknown log features
3420 		 * in what we need to recover. If there are unknown features
3421 		 * (e.g. unsupported transactions, then simply reject the
3422 		 * attempt at recovery before touching anything.
3423 		 */
3424 		if (XFS_SB_VERSION_NUM(&log->l_mp->m_sb) == XFS_SB_VERSION_5 &&
3425 		    xfs_sb_has_incompat_log_feature(&log->l_mp->m_sb,
3426 					XFS_SB_FEAT_INCOMPAT_LOG_UNKNOWN)) {
3427 			xfs_warn(log->l_mp,
3428 "Superblock has unknown incompatible log features (0x%x) enabled.",
3429 				(log->l_mp->m_sb.sb_features_log_incompat &
3430 					XFS_SB_FEAT_INCOMPAT_LOG_UNKNOWN));
3431 			xfs_warn(log->l_mp,
3432 "The log can not be fully and/or safely recovered by this kernel.");
3433 			xfs_warn(log->l_mp,
3434 "Please recover the log on a kernel that supports the unknown features.");
3435 			return -EINVAL;
3436 		}
3437 
3438 		/*
3439 		 * Delay log recovery if the debug hook is set. This is debug
3440 		 * instrumention to coordinate simulation of I/O failures with
3441 		 * log recovery.
3442 		 */
3443 		if (xfs_globals.log_recovery_delay) {
3444 			xfs_notice(log->l_mp,
3445 				"Delaying log recovery for %d seconds.",
3446 				xfs_globals.log_recovery_delay);
3447 			msleep(xfs_globals.log_recovery_delay * 1000);
3448 		}
3449 
3450 		xfs_notice(log->l_mp, "Starting recovery (logdev: %s)",
3451 				log->l_mp->m_logname ? log->l_mp->m_logname
3452 						     : "internal");
3453 
3454 		error = xlog_do_recover(log, head_blk, tail_blk);
3455 		log->l_flags |= XLOG_RECOVERY_NEEDED;
3456 	}
3457 	return error;
3458 }
3459 
3460 /*
3461  * In the first part of recovery we replay inodes and buffers and build
3462  * up the list of extent free items which need to be processed.  Here
3463  * we process the extent free items and clean up the on disk unlinked
3464  * inode lists.  This is separated from the first part of recovery so
3465  * that the root and real-time bitmap inodes can be read in from disk in
3466  * between the two stages.  This is necessary so that we can free space
3467  * in the real-time portion of the file system.
3468  */
3469 int
3470 xlog_recover_finish(
3471 	struct xlog	*log)
3472 {
3473 	/*
3474 	 * Now we're ready to do the transactions needed for the
3475 	 * rest of recovery.  Start with completing all the extent
3476 	 * free intent records and then process the unlinked inode
3477 	 * lists.  At this point, we essentially run in normal mode
3478 	 * except that we're still performing recovery actions
3479 	 * rather than accepting new requests.
3480 	 */
3481 	if (log->l_flags & XLOG_RECOVERY_NEEDED) {
3482 		int	error;
3483 		error = xlog_recover_process_intents(log);
3484 		if (error) {
3485 			xfs_alert(log->l_mp, "Failed to recover intents");
3486 			return error;
3487 		}
3488 
3489 		/*
3490 		 * Sync the log to get all the intents out of the AIL.
3491 		 * This isn't absolutely necessary, but it helps in
3492 		 * case the unlink transactions would have problems
3493 		 * pushing the intents out of the way.
3494 		 */
3495 		xfs_log_force(log->l_mp, XFS_LOG_SYNC);
3496 
3497 		xlog_recover_process_iunlinks(log);
3498 
3499 		xlog_recover_check_summary(log);
3500 
3501 		xfs_notice(log->l_mp, "Ending recovery (logdev: %s)",
3502 				log->l_mp->m_logname ? log->l_mp->m_logname
3503 						     : "internal");
3504 		log->l_flags &= ~XLOG_RECOVERY_NEEDED;
3505 	} else {
3506 		xfs_info(log->l_mp, "Ending clean mount");
3507 	}
3508 	return 0;
3509 }
3510 
3511 void
3512 xlog_recover_cancel(
3513 	struct xlog	*log)
3514 {
3515 	if (log->l_flags & XLOG_RECOVERY_NEEDED)
3516 		xlog_recover_cancel_intents(log);
3517 }
3518 
3519 #if defined(DEBUG)
3520 /*
3521  * Read all of the agf and agi counters and check that they
3522  * are consistent with the superblock counters.
3523  */
3524 STATIC void
3525 xlog_recover_check_summary(
3526 	struct xlog	*log)
3527 {
3528 	xfs_mount_t	*mp;
3529 	xfs_buf_t	*agfbp;
3530 	xfs_buf_t	*agibp;
3531 	xfs_agnumber_t	agno;
3532 	uint64_t	freeblks;
3533 	uint64_t	itotal;
3534 	uint64_t	ifree;
3535 	int		error;
3536 
3537 	mp = log->l_mp;
3538 
3539 	freeblks = 0LL;
3540 	itotal = 0LL;
3541 	ifree = 0LL;
3542 	for (agno = 0; agno < mp->m_sb.sb_agcount; agno++) {
3543 		error = xfs_read_agf(mp, NULL, agno, 0, &agfbp);
3544 		if (error) {
3545 			xfs_alert(mp, "%s agf read failed agno %d error %d",
3546 						__func__, agno, error);
3547 		} else {
3548 			struct xfs_agf	*agfp = agfbp->b_addr;
3549 
3550 			freeblks += be32_to_cpu(agfp->agf_freeblks) +
3551 				    be32_to_cpu(agfp->agf_flcount);
3552 			xfs_buf_relse(agfbp);
3553 		}
3554 
3555 		error = xfs_read_agi(mp, NULL, agno, &agibp);
3556 		if (error) {
3557 			xfs_alert(mp, "%s agi read failed agno %d error %d",
3558 						__func__, agno, error);
3559 		} else {
3560 			struct xfs_agi	*agi = agibp->b_addr;
3561 
3562 			itotal += be32_to_cpu(agi->agi_count);
3563 			ifree += be32_to_cpu(agi->agi_freecount);
3564 			xfs_buf_relse(agibp);
3565 		}
3566 	}
3567 }
3568 #endif /* DEBUG */
3569