xref: /openbmc/linux/fs/xfs/xfs_log_recover.c (revision 6724ed7f)
1 /*
2  * Copyright (c) 2000-2006 Silicon Graphics, Inc.
3  * All Rights Reserved.
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU General Public License as
7  * published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it would be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write the Free Software Foundation,
16  * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
17  */
18 #include "xfs.h"
19 #include "xfs_fs.h"
20 #include "xfs_shared.h"
21 #include "xfs_format.h"
22 #include "xfs_log_format.h"
23 #include "xfs_trans_resv.h"
24 #include "xfs_bit.h"
25 #include "xfs_sb.h"
26 #include "xfs_mount.h"
27 #include "xfs_defer.h"
28 #include "xfs_da_format.h"
29 #include "xfs_da_btree.h"
30 #include "xfs_inode.h"
31 #include "xfs_trans.h"
32 #include "xfs_log.h"
33 #include "xfs_log_priv.h"
34 #include "xfs_log_recover.h"
35 #include "xfs_inode_item.h"
36 #include "xfs_extfree_item.h"
37 #include "xfs_trans_priv.h"
38 #include "xfs_alloc.h"
39 #include "xfs_ialloc.h"
40 #include "xfs_quota.h"
41 #include "xfs_cksum.h"
42 #include "xfs_trace.h"
43 #include "xfs_icache.h"
44 #include "xfs_bmap_btree.h"
45 #include "xfs_error.h"
46 #include "xfs_dir2.h"
47 #include "xfs_rmap_item.h"
48 #include "xfs_buf_item.h"
49 #include "xfs_refcount_item.h"
50 #include "xfs_bmap_item.h"
51 
52 #define BLK_AVG(blk1, blk2)	((blk1+blk2) >> 1)
53 
54 STATIC int
55 xlog_find_zeroed(
56 	struct xlog	*,
57 	xfs_daddr_t	*);
58 STATIC int
59 xlog_clear_stale_blocks(
60 	struct xlog	*,
61 	xfs_lsn_t);
62 #if defined(DEBUG)
63 STATIC void
64 xlog_recover_check_summary(
65 	struct xlog *);
66 #else
67 #define	xlog_recover_check_summary(log)
68 #endif
69 STATIC int
70 xlog_do_recovery_pass(
71         struct xlog *, xfs_daddr_t, xfs_daddr_t, int, xfs_daddr_t *);
72 
73 /*
74  * This structure is used during recovery to record the buf log items which
75  * have been canceled and should not be replayed.
76  */
77 struct xfs_buf_cancel {
78 	xfs_daddr_t		bc_blkno;
79 	uint			bc_len;
80 	int			bc_refcount;
81 	struct list_head	bc_list;
82 };
83 
84 /*
85  * Sector aligned buffer routines for buffer create/read/write/access
86  */
87 
88 /*
89  * Verify the log-relative block number and length in basic blocks are valid for
90  * an operation involving the given XFS log buffer. Returns true if the fields
91  * are valid, false otherwise.
92  */
93 static inline bool
94 xlog_verify_bp(
95 	struct xlog	*log,
96 	xfs_daddr_t	blk_no,
97 	int		bbcount)
98 {
99 	if (blk_no < 0 || blk_no >= log->l_logBBsize)
100 		return false;
101 	if (bbcount <= 0 || (blk_no + bbcount) > log->l_logBBsize)
102 		return false;
103 	return true;
104 }
105 
106 /*
107  * Allocate a buffer to hold log data.  The buffer needs to be able
108  * to map to a range of nbblks basic blocks at any valid (basic
109  * block) offset within the log.
110  */
111 STATIC xfs_buf_t *
112 xlog_get_bp(
113 	struct xlog	*log,
114 	int		nbblks)
115 {
116 	struct xfs_buf	*bp;
117 
118 	/*
119 	 * Pass log block 0 since we don't have an addr yet, buffer will be
120 	 * verified on read.
121 	 */
122 	if (!xlog_verify_bp(log, 0, nbblks)) {
123 		xfs_warn(log->l_mp, "Invalid block length (0x%x) for buffer",
124 			nbblks);
125 		XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_HIGH, log->l_mp);
126 		return NULL;
127 	}
128 
129 	/*
130 	 * We do log I/O in units of log sectors (a power-of-2
131 	 * multiple of the basic block size), so we round up the
132 	 * requested size to accommodate the basic blocks required
133 	 * for complete log sectors.
134 	 *
135 	 * In addition, the buffer may be used for a non-sector-
136 	 * aligned block offset, in which case an I/O of the
137 	 * requested size could extend beyond the end of the
138 	 * buffer.  If the requested size is only 1 basic block it
139 	 * will never straddle a sector boundary, so this won't be
140 	 * an issue.  Nor will this be a problem if the log I/O is
141 	 * done in basic blocks (sector size 1).  But otherwise we
142 	 * extend the buffer by one extra log sector to ensure
143 	 * there's space to accommodate this possibility.
144 	 */
145 	if (nbblks > 1 && log->l_sectBBsize > 1)
146 		nbblks += log->l_sectBBsize;
147 	nbblks = round_up(nbblks, log->l_sectBBsize);
148 
149 	bp = xfs_buf_get_uncached(log->l_mp->m_logdev_targp, nbblks, 0);
150 	if (bp)
151 		xfs_buf_unlock(bp);
152 	return bp;
153 }
154 
155 STATIC void
156 xlog_put_bp(
157 	xfs_buf_t	*bp)
158 {
159 	xfs_buf_free(bp);
160 }
161 
162 /*
163  * Return the address of the start of the given block number's data
164  * in a log buffer.  The buffer covers a log sector-aligned region.
165  */
166 STATIC char *
167 xlog_align(
168 	struct xlog	*log,
169 	xfs_daddr_t	blk_no,
170 	int		nbblks,
171 	struct xfs_buf	*bp)
172 {
173 	xfs_daddr_t	offset = blk_no & ((xfs_daddr_t)log->l_sectBBsize - 1);
174 
175 	ASSERT(offset + nbblks <= bp->b_length);
176 	return bp->b_addr + BBTOB(offset);
177 }
178 
179 
180 /*
181  * nbblks should be uint, but oh well.  Just want to catch that 32-bit length.
182  */
183 STATIC int
184 xlog_bread_noalign(
185 	struct xlog	*log,
186 	xfs_daddr_t	blk_no,
187 	int		nbblks,
188 	struct xfs_buf	*bp)
189 {
190 	int		error;
191 
192 	if (!xlog_verify_bp(log, blk_no, nbblks)) {
193 		xfs_warn(log->l_mp,
194 			 "Invalid log block/length (0x%llx, 0x%x) for buffer",
195 			 blk_no, nbblks);
196 		XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_HIGH, log->l_mp);
197 		return -EFSCORRUPTED;
198 	}
199 
200 	blk_no = round_down(blk_no, log->l_sectBBsize);
201 	nbblks = round_up(nbblks, log->l_sectBBsize);
202 
203 	ASSERT(nbblks > 0);
204 	ASSERT(nbblks <= bp->b_length);
205 
206 	XFS_BUF_SET_ADDR(bp, log->l_logBBstart + blk_no);
207 	bp->b_flags |= XBF_READ;
208 	bp->b_io_length = nbblks;
209 	bp->b_error = 0;
210 
211 	error = xfs_buf_submit_wait(bp);
212 	if (error && !XFS_FORCED_SHUTDOWN(log->l_mp))
213 		xfs_buf_ioerror_alert(bp, __func__);
214 	return error;
215 }
216 
217 STATIC int
218 xlog_bread(
219 	struct xlog	*log,
220 	xfs_daddr_t	blk_no,
221 	int		nbblks,
222 	struct xfs_buf	*bp,
223 	char		**offset)
224 {
225 	int		error;
226 
227 	error = xlog_bread_noalign(log, blk_no, nbblks, bp);
228 	if (error)
229 		return error;
230 
231 	*offset = xlog_align(log, blk_no, nbblks, bp);
232 	return 0;
233 }
234 
235 /*
236  * Read at an offset into the buffer. Returns with the buffer in it's original
237  * state regardless of the result of the read.
238  */
239 STATIC int
240 xlog_bread_offset(
241 	struct xlog	*log,
242 	xfs_daddr_t	blk_no,		/* block to read from */
243 	int		nbblks,		/* blocks to read */
244 	struct xfs_buf	*bp,
245 	char		*offset)
246 {
247 	char		*orig_offset = bp->b_addr;
248 	int		orig_len = BBTOB(bp->b_length);
249 	int		error, error2;
250 
251 	error = xfs_buf_associate_memory(bp, offset, BBTOB(nbblks));
252 	if (error)
253 		return error;
254 
255 	error = xlog_bread_noalign(log, blk_no, nbblks, bp);
256 
257 	/* must reset buffer pointer even on error */
258 	error2 = xfs_buf_associate_memory(bp, orig_offset, orig_len);
259 	if (error)
260 		return error;
261 	return error2;
262 }
263 
264 /*
265  * Write out the buffer at the given block for the given number of blocks.
266  * The buffer is kept locked across the write and is returned locked.
267  * This can only be used for synchronous log writes.
268  */
269 STATIC int
270 xlog_bwrite(
271 	struct xlog	*log,
272 	xfs_daddr_t	blk_no,
273 	int		nbblks,
274 	struct xfs_buf	*bp)
275 {
276 	int		error;
277 
278 	if (!xlog_verify_bp(log, blk_no, nbblks)) {
279 		xfs_warn(log->l_mp,
280 			 "Invalid log block/length (0x%llx, 0x%x) for buffer",
281 			 blk_no, nbblks);
282 		XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_HIGH, log->l_mp);
283 		return -EFSCORRUPTED;
284 	}
285 
286 	blk_no = round_down(blk_no, log->l_sectBBsize);
287 	nbblks = round_up(nbblks, log->l_sectBBsize);
288 
289 	ASSERT(nbblks > 0);
290 	ASSERT(nbblks <= bp->b_length);
291 
292 	XFS_BUF_SET_ADDR(bp, log->l_logBBstart + blk_no);
293 	xfs_buf_hold(bp);
294 	xfs_buf_lock(bp);
295 	bp->b_io_length = nbblks;
296 	bp->b_error = 0;
297 
298 	error = xfs_bwrite(bp);
299 	if (error)
300 		xfs_buf_ioerror_alert(bp, __func__);
301 	xfs_buf_relse(bp);
302 	return error;
303 }
304 
305 #ifdef DEBUG
306 /*
307  * dump debug superblock and log record information
308  */
309 STATIC void
310 xlog_header_check_dump(
311 	xfs_mount_t		*mp,
312 	xlog_rec_header_t	*head)
313 {
314 	xfs_debug(mp, "%s:  SB : uuid = %pU, fmt = %d",
315 		__func__, &mp->m_sb.sb_uuid, XLOG_FMT);
316 	xfs_debug(mp, "    log : uuid = %pU, fmt = %d",
317 		&head->h_fs_uuid, be32_to_cpu(head->h_fmt));
318 }
319 #else
320 #define xlog_header_check_dump(mp, head)
321 #endif
322 
323 /*
324  * check log record header for recovery
325  */
326 STATIC int
327 xlog_header_check_recover(
328 	xfs_mount_t		*mp,
329 	xlog_rec_header_t	*head)
330 {
331 	ASSERT(head->h_magicno == cpu_to_be32(XLOG_HEADER_MAGIC_NUM));
332 
333 	/*
334 	 * IRIX doesn't write the h_fmt field and leaves it zeroed
335 	 * (XLOG_FMT_UNKNOWN). This stops us from trying to recover
336 	 * a dirty log created in IRIX.
337 	 */
338 	if (unlikely(head->h_fmt != cpu_to_be32(XLOG_FMT))) {
339 		xfs_warn(mp,
340 	"dirty log written in incompatible format - can't recover");
341 		xlog_header_check_dump(mp, head);
342 		XFS_ERROR_REPORT("xlog_header_check_recover(1)",
343 				 XFS_ERRLEVEL_HIGH, mp);
344 		return -EFSCORRUPTED;
345 	} else if (unlikely(!uuid_equal(&mp->m_sb.sb_uuid, &head->h_fs_uuid))) {
346 		xfs_warn(mp,
347 	"dirty log entry has mismatched uuid - can't recover");
348 		xlog_header_check_dump(mp, head);
349 		XFS_ERROR_REPORT("xlog_header_check_recover(2)",
350 				 XFS_ERRLEVEL_HIGH, mp);
351 		return -EFSCORRUPTED;
352 	}
353 	return 0;
354 }
355 
356 /*
357  * read the head block of the log and check the header
358  */
359 STATIC int
360 xlog_header_check_mount(
361 	xfs_mount_t		*mp,
362 	xlog_rec_header_t	*head)
363 {
364 	ASSERT(head->h_magicno == cpu_to_be32(XLOG_HEADER_MAGIC_NUM));
365 
366 	if (uuid_is_null(&head->h_fs_uuid)) {
367 		/*
368 		 * IRIX doesn't write the h_fs_uuid or h_fmt fields. If
369 		 * h_fs_uuid is null, we assume this log was last mounted
370 		 * by IRIX and continue.
371 		 */
372 		xfs_warn(mp, "null uuid in log - IRIX style log");
373 	} else if (unlikely(!uuid_equal(&mp->m_sb.sb_uuid, &head->h_fs_uuid))) {
374 		xfs_warn(mp, "log has mismatched uuid - can't recover");
375 		xlog_header_check_dump(mp, head);
376 		XFS_ERROR_REPORT("xlog_header_check_mount",
377 				 XFS_ERRLEVEL_HIGH, mp);
378 		return -EFSCORRUPTED;
379 	}
380 	return 0;
381 }
382 
383 STATIC void
384 xlog_recover_iodone(
385 	struct xfs_buf	*bp)
386 {
387 	if (bp->b_error) {
388 		/*
389 		 * We're not going to bother about retrying
390 		 * this during recovery. One strike!
391 		 */
392 		if (!XFS_FORCED_SHUTDOWN(bp->b_target->bt_mount)) {
393 			xfs_buf_ioerror_alert(bp, __func__);
394 			xfs_force_shutdown(bp->b_target->bt_mount,
395 						SHUTDOWN_META_IO_ERROR);
396 		}
397 	}
398 
399 	/*
400 	 * On v5 supers, a bli could be attached to update the metadata LSN.
401 	 * Clean it up.
402 	 */
403 	if (bp->b_fspriv)
404 		xfs_buf_item_relse(bp);
405 	ASSERT(bp->b_fspriv == NULL);
406 
407 	bp->b_iodone = NULL;
408 	xfs_buf_ioend(bp);
409 }
410 
411 /*
412  * This routine finds (to an approximation) the first block in the physical
413  * log which contains the given cycle.  It uses a binary search algorithm.
414  * Note that the algorithm can not be perfect because the disk will not
415  * necessarily be perfect.
416  */
417 STATIC int
418 xlog_find_cycle_start(
419 	struct xlog	*log,
420 	struct xfs_buf	*bp,
421 	xfs_daddr_t	first_blk,
422 	xfs_daddr_t	*last_blk,
423 	uint		cycle)
424 {
425 	char		*offset;
426 	xfs_daddr_t	mid_blk;
427 	xfs_daddr_t	end_blk;
428 	uint		mid_cycle;
429 	int		error;
430 
431 	end_blk = *last_blk;
432 	mid_blk = BLK_AVG(first_blk, end_blk);
433 	while (mid_blk != first_blk && mid_blk != end_blk) {
434 		error = xlog_bread(log, mid_blk, 1, bp, &offset);
435 		if (error)
436 			return error;
437 		mid_cycle = xlog_get_cycle(offset);
438 		if (mid_cycle == cycle)
439 			end_blk = mid_blk;   /* last_half_cycle == mid_cycle */
440 		else
441 			first_blk = mid_blk; /* first_half_cycle == mid_cycle */
442 		mid_blk = BLK_AVG(first_blk, end_blk);
443 	}
444 	ASSERT((mid_blk == first_blk && mid_blk+1 == end_blk) ||
445 	       (mid_blk == end_blk && mid_blk-1 == first_blk));
446 
447 	*last_blk = end_blk;
448 
449 	return 0;
450 }
451 
452 /*
453  * Check that a range of blocks does not contain stop_on_cycle_no.
454  * Fill in *new_blk with the block offset where such a block is
455  * found, or with -1 (an invalid block number) if there is no such
456  * block in the range.  The scan needs to occur from front to back
457  * and the pointer into the region must be updated since a later
458  * routine will need to perform another test.
459  */
460 STATIC int
461 xlog_find_verify_cycle(
462 	struct xlog	*log,
463 	xfs_daddr_t	start_blk,
464 	int		nbblks,
465 	uint		stop_on_cycle_no,
466 	xfs_daddr_t	*new_blk)
467 {
468 	xfs_daddr_t	i, j;
469 	uint		cycle;
470 	xfs_buf_t	*bp;
471 	xfs_daddr_t	bufblks;
472 	char		*buf = NULL;
473 	int		error = 0;
474 
475 	/*
476 	 * Greedily allocate a buffer big enough to handle the full
477 	 * range of basic blocks we'll be examining.  If that fails,
478 	 * try a smaller size.  We need to be able to read at least
479 	 * a log sector, or we're out of luck.
480 	 */
481 	bufblks = 1 << ffs(nbblks);
482 	while (bufblks > log->l_logBBsize)
483 		bufblks >>= 1;
484 	while (!(bp = xlog_get_bp(log, bufblks))) {
485 		bufblks >>= 1;
486 		if (bufblks < log->l_sectBBsize)
487 			return -ENOMEM;
488 	}
489 
490 	for (i = start_blk; i < start_blk + nbblks; i += bufblks) {
491 		int	bcount;
492 
493 		bcount = min(bufblks, (start_blk + nbblks - i));
494 
495 		error = xlog_bread(log, i, bcount, bp, &buf);
496 		if (error)
497 			goto out;
498 
499 		for (j = 0; j < bcount; j++) {
500 			cycle = xlog_get_cycle(buf);
501 			if (cycle == stop_on_cycle_no) {
502 				*new_blk = i+j;
503 				goto out;
504 			}
505 
506 			buf += BBSIZE;
507 		}
508 	}
509 
510 	*new_blk = -1;
511 
512 out:
513 	xlog_put_bp(bp);
514 	return error;
515 }
516 
517 /*
518  * Potentially backup over partial log record write.
519  *
520  * In the typical case, last_blk is the number of the block directly after
521  * a good log record.  Therefore, we subtract one to get the block number
522  * of the last block in the given buffer.  extra_bblks contains the number
523  * of blocks we would have read on a previous read.  This happens when the
524  * last log record is split over the end of the physical log.
525  *
526  * extra_bblks is the number of blocks potentially verified on a previous
527  * call to this routine.
528  */
529 STATIC int
530 xlog_find_verify_log_record(
531 	struct xlog		*log,
532 	xfs_daddr_t		start_blk,
533 	xfs_daddr_t		*last_blk,
534 	int			extra_bblks)
535 {
536 	xfs_daddr_t		i;
537 	xfs_buf_t		*bp;
538 	char			*offset = NULL;
539 	xlog_rec_header_t	*head = NULL;
540 	int			error = 0;
541 	int			smallmem = 0;
542 	int			num_blks = *last_blk - start_blk;
543 	int			xhdrs;
544 
545 	ASSERT(start_blk != 0 || *last_blk != start_blk);
546 
547 	if (!(bp = xlog_get_bp(log, num_blks))) {
548 		if (!(bp = xlog_get_bp(log, 1)))
549 			return -ENOMEM;
550 		smallmem = 1;
551 	} else {
552 		error = xlog_bread(log, start_blk, num_blks, bp, &offset);
553 		if (error)
554 			goto out;
555 		offset += ((num_blks - 1) << BBSHIFT);
556 	}
557 
558 	for (i = (*last_blk) - 1; i >= 0; i--) {
559 		if (i < start_blk) {
560 			/* valid log record not found */
561 			xfs_warn(log->l_mp,
562 		"Log inconsistent (didn't find previous header)");
563 			ASSERT(0);
564 			error = -EIO;
565 			goto out;
566 		}
567 
568 		if (smallmem) {
569 			error = xlog_bread(log, i, 1, bp, &offset);
570 			if (error)
571 				goto out;
572 		}
573 
574 		head = (xlog_rec_header_t *)offset;
575 
576 		if (head->h_magicno == cpu_to_be32(XLOG_HEADER_MAGIC_NUM))
577 			break;
578 
579 		if (!smallmem)
580 			offset -= BBSIZE;
581 	}
582 
583 	/*
584 	 * We hit the beginning of the physical log & still no header.  Return
585 	 * to caller.  If caller can handle a return of -1, then this routine
586 	 * will be called again for the end of the physical log.
587 	 */
588 	if (i == -1) {
589 		error = 1;
590 		goto out;
591 	}
592 
593 	/*
594 	 * We have the final block of the good log (the first block
595 	 * of the log record _before_ the head. So we check the uuid.
596 	 */
597 	if ((error = xlog_header_check_mount(log->l_mp, head)))
598 		goto out;
599 
600 	/*
601 	 * We may have found a log record header before we expected one.
602 	 * last_blk will be the 1st block # with a given cycle #.  We may end
603 	 * up reading an entire log record.  In this case, we don't want to
604 	 * reset last_blk.  Only when last_blk points in the middle of a log
605 	 * record do we update last_blk.
606 	 */
607 	if (xfs_sb_version_haslogv2(&log->l_mp->m_sb)) {
608 		uint	h_size = be32_to_cpu(head->h_size);
609 
610 		xhdrs = h_size / XLOG_HEADER_CYCLE_SIZE;
611 		if (h_size % XLOG_HEADER_CYCLE_SIZE)
612 			xhdrs++;
613 	} else {
614 		xhdrs = 1;
615 	}
616 
617 	if (*last_blk - i + extra_bblks !=
618 	    BTOBB(be32_to_cpu(head->h_len)) + xhdrs)
619 		*last_blk = i;
620 
621 out:
622 	xlog_put_bp(bp);
623 	return error;
624 }
625 
626 /*
627  * Head is defined to be the point of the log where the next log write
628  * could go.  This means that incomplete LR writes at the end are
629  * eliminated when calculating the head.  We aren't guaranteed that previous
630  * LR have complete transactions.  We only know that a cycle number of
631  * current cycle number -1 won't be present in the log if we start writing
632  * from our current block number.
633  *
634  * last_blk contains the block number of the first block with a given
635  * cycle number.
636  *
637  * Return: zero if normal, non-zero if error.
638  */
639 STATIC int
640 xlog_find_head(
641 	struct xlog	*log,
642 	xfs_daddr_t	*return_head_blk)
643 {
644 	xfs_buf_t	*bp;
645 	char		*offset;
646 	xfs_daddr_t	new_blk, first_blk, start_blk, last_blk, head_blk;
647 	int		num_scan_bblks;
648 	uint		first_half_cycle, last_half_cycle;
649 	uint		stop_on_cycle;
650 	int		error, log_bbnum = log->l_logBBsize;
651 
652 	/* Is the end of the log device zeroed? */
653 	error = xlog_find_zeroed(log, &first_blk);
654 	if (error < 0) {
655 		xfs_warn(log->l_mp, "empty log check failed");
656 		return error;
657 	}
658 	if (error == 1) {
659 		*return_head_blk = first_blk;
660 
661 		/* Is the whole lot zeroed? */
662 		if (!first_blk) {
663 			/* Linux XFS shouldn't generate totally zeroed logs -
664 			 * mkfs etc write a dummy unmount record to a fresh
665 			 * log so we can store the uuid in there
666 			 */
667 			xfs_warn(log->l_mp, "totally zeroed log");
668 		}
669 
670 		return 0;
671 	}
672 
673 	first_blk = 0;			/* get cycle # of 1st block */
674 	bp = xlog_get_bp(log, 1);
675 	if (!bp)
676 		return -ENOMEM;
677 
678 	error = xlog_bread(log, 0, 1, bp, &offset);
679 	if (error)
680 		goto bp_err;
681 
682 	first_half_cycle = xlog_get_cycle(offset);
683 
684 	last_blk = head_blk = log_bbnum - 1;	/* get cycle # of last block */
685 	error = xlog_bread(log, last_blk, 1, bp, &offset);
686 	if (error)
687 		goto bp_err;
688 
689 	last_half_cycle = xlog_get_cycle(offset);
690 	ASSERT(last_half_cycle != 0);
691 
692 	/*
693 	 * If the 1st half cycle number is equal to the last half cycle number,
694 	 * then the entire log is stamped with the same cycle number.  In this
695 	 * case, head_blk can't be set to zero (which makes sense).  The below
696 	 * math doesn't work out properly with head_blk equal to zero.  Instead,
697 	 * we set it to log_bbnum which is an invalid block number, but this
698 	 * value makes the math correct.  If head_blk doesn't changed through
699 	 * all the tests below, *head_blk is set to zero at the very end rather
700 	 * than log_bbnum.  In a sense, log_bbnum and zero are the same block
701 	 * in a circular file.
702 	 */
703 	if (first_half_cycle == last_half_cycle) {
704 		/*
705 		 * In this case we believe that the entire log should have
706 		 * cycle number last_half_cycle.  We need to scan backwards
707 		 * from the end verifying that there are no holes still
708 		 * containing last_half_cycle - 1.  If we find such a hole,
709 		 * then the start of that hole will be the new head.  The
710 		 * simple case looks like
711 		 *        x | x ... | x - 1 | x
712 		 * Another case that fits this picture would be
713 		 *        x | x + 1 | x ... | x
714 		 * In this case the head really is somewhere at the end of the
715 		 * log, as one of the latest writes at the beginning was
716 		 * incomplete.
717 		 * One more case is
718 		 *        x | x + 1 | x ... | x - 1 | x
719 		 * This is really the combination of the above two cases, and
720 		 * the head has to end up at the start of the x-1 hole at the
721 		 * end of the log.
722 		 *
723 		 * In the 256k log case, we will read from the beginning to the
724 		 * end of the log and search for cycle numbers equal to x-1.
725 		 * We don't worry about the x+1 blocks that we encounter,
726 		 * because we know that they cannot be the head since the log
727 		 * started with x.
728 		 */
729 		head_blk = log_bbnum;
730 		stop_on_cycle = last_half_cycle - 1;
731 	} else {
732 		/*
733 		 * In this case we want to find the first block with cycle
734 		 * number matching last_half_cycle.  We expect the log to be
735 		 * some variation on
736 		 *        x + 1 ... | x ... | x
737 		 * The first block with cycle number x (last_half_cycle) will
738 		 * be where the new head belongs.  First we do a binary search
739 		 * for the first occurrence of last_half_cycle.  The binary
740 		 * search may not be totally accurate, so then we scan back
741 		 * from there looking for occurrences of last_half_cycle before
742 		 * us.  If that backwards scan wraps around the beginning of
743 		 * the log, then we look for occurrences of last_half_cycle - 1
744 		 * at the end of the log.  The cases we're looking for look
745 		 * like
746 		 *                               v binary search stopped here
747 		 *        x + 1 ... | x | x + 1 | x ... | x
748 		 *                   ^ but we want to locate this spot
749 		 * or
750 		 *        <---------> less than scan distance
751 		 *        x + 1 ... | x ... | x - 1 | x
752 		 *                           ^ we want to locate this spot
753 		 */
754 		stop_on_cycle = last_half_cycle;
755 		if ((error = xlog_find_cycle_start(log, bp, first_blk,
756 						&head_blk, last_half_cycle)))
757 			goto bp_err;
758 	}
759 
760 	/*
761 	 * Now validate the answer.  Scan back some number of maximum possible
762 	 * blocks and make sure each one has the expected cycle number.  The
763 	 * maximum is determined by the total possible amount of buffering
764 	 * in the in-core log.  The following number can be made tighter if
765 	 * we actually look at the block size of the filesystem.
766 	 */
767 	num_scan_bblks = min_t(int, log_bbnum, XLOG_TOTAL_REC_SHIFT(log));
768 	if (head_blk >= num_scan_bblks) {
769 		/*
770 		 * We are guaranteed that the entire check can be performed
771 		 * in one buffer.
772 		 */
773 		start_blk = head_blk - num_scan_bblks;
774 		if ((error = xlog_find_verify_cycle(log,
775 						start_blk, num_scan_bblks,
776 						stop_on_cycle, &new_blk)))
777 			goto bp_err;
778 		if (new_blk != -1)
779 			head_blk = new_blk;
780 	} else {		/* need to read 2 parts of log */
781 		/*
782 		 * We are going to scan backwards in the log in two parts.
783 		 * First we scan the physical end of the log.  In this part
784 		 * of the log, we are looking for blocks with cycle number
785 		 * last_half_cycle - 1.
786 		 * If we find one, then we know that the log starts there, as
787 		 * we've found a hole that didn't get written in going around
788 		 * the end of the physical log.  The simple case for this is
789 		 *        x + 1 ... | x ... | x - 1 | x
790 		 *        <---------> less than scan distance
791 		 * If all of the blocks at the end of the log have cycle number
792 		 * last_half_cycle, then we check the blocks at the start of
793 		 * the log looking for occurrences of last_half_cycle.  If we
794 		 * find one, then our current estimate for the location of the
795 		 * first occurrence of last_half_cycle is wrong and we move
796 		 * back to the hole we've found.  This case looks like
797 		 *        x + 1 ... | x | x + 1 | x ...
798 		 *                               ^ binary search stopped here
799 		 * Another case we need to handle that only occurs in 256k
800 		 * logs is
801 		 *        x + 1 ... | x ... | x+1 | x ...
802 		 *                   ^ binary search stops here
803 		 * In a 256k log, the scan at the end of the log will see the
804 		 * x + 1 blocks.  We need to skip past those since that is
805 		 * certainly not the head of the log.  By searching for
806 		 * last_half_cycle-1 we accomplish that.
807 		 */
808 		ASSERT(head_blk <= INT_MAX &&
809 			(xfs_daddr_t) num_scan_bblks >= head_blk);
810 		start_blk = log_bbnum - (num_scan_bblks - head_blk);
811 		if ((error = xlog_find_verify_cycle(log, start_blk,
812 					num_scan_bblks - (int)head_blk,
813 					(stop_on_cycle - 1), &new_blk)))
814 			goto bp_err;
815 		if (new_blk != -1) {
816 			head_blk = new_blk;
817 			goto validate_head;
818 		}
819 
820 		/*
821 		 * Scan beginning of log now.  The last part of the physical
822 		 * log is good.  This scan needs to verify that it doesn't find
823 		 * the last_half_cycle.
824 		 */
825 		start_blk = 0;
826 		ASSERT(head_blk <= INT_MAX);
827 		if ((error = xlog_find_verify_cycle(log,
828 					start_blk, (int)head_blk,
829 					stop_on_cycle, &new_blk)))
830 			goto bp_err;
831 		if (new_blk != -1)
832 			head_blk = new_blk;
833 	}
834 
835 validate_head:
836 	/*
837 	 * Now we need to make sure head_blk is not pointing to a block in
838 	 * the middle of a log record.
839 	 */
840 	num_scan_bblks = XLOG_REC_SHIFT(log);
841 	if (head_blk >= num_scan_bblks) {
842 		start_blk = head_blk - num_scan_bblks; /* don't read head_blk */
843 
844 		/* start ptr at last block ptr before head_blk */
845 		error = xlog_find_verify_log_record(log, start_blk, &head_blk, 0);
846 		if (error == 1)
847 			error = -EIO;
848 		if (error)
849 			goto bp_err;
850 	} else {
851 		start_blk = 0;
852 		ASSERT(head_blk <= INT_MAX);
853 		error = xlog_find_verify_log_record(log, start_blk, &head_blk, 0);
854 		if (error < 0)
855 			goto bp_err;
856 		if (error == 1) {
857 			/* We hit the beginning of the log during our search */
858 			start_blk = log_bbnum - (num_scan_bblks - head_blk);
859 			new_blk = log_bbnum;
860 			ASSERT(start_blk <= INT_MAX &&
861 				(xfs_daddr_t) log_bbnum-start_blk >= 0);
862 			ASSERT(head_blk <= INT_MAX);
863 			error = xlog_find_verify_log_record(log, start_blk,
864 							&new_blk, (int)head_blk);
865 			if (error == 1)
866 				error = -EIO;
867 			if (error)
868 				goto bp_err;
869 			if (new_blk != log_bbnum)
870 				head_blk = new_blk;
871 		} else if (error)
872 			goto bp_err;
873 	}
874 
875 	xlog_put_bp(bp);
876 	if (head_blk == log_bbnum)
877 		*return_head_blk = 0;
878 	else
879 		*return_head_blk = head_blk;
880 	/*
881 	 * When returning here, we have a good block number.  Bad block
882 	 * means that during a previous crash, we didn't have a clean break
883 	 * from cycle number N to cycle number N-1.  In this case, we need
884 	 * to find the first block with cycle number N-1.
885 	 */
886 	return 0;
887 
888  bp_err:
889 	xlog_put_bp(bp);
890 
891 	if (error)
892 		xfs_warn(log->l_mp, "failed to find log head");
893 	return error;
894 }
895 
896 /*
897  * Seek backwards in the log for log record headers.
898  *
899  * Given a starting log block, walk backwards until we find the provided number
900  * of records or hit the provided tail block. The return value is the number of
901  * records encountered or a negative error code. The log block and buffer
902  * pointer of the last record seen are returned in rblk and rhead respectively.
903  */
904 STATIC int
905 xlog_rseek_logrec_hdr(
906 	struct xlog		*log,
907 	xfs_daddr_t		head_blk,
908 	xfs_daddr_t		tail_blk,
909 	int			count,
910 	struct xfs_buf		*bp,
911 	xfs_daddr_t		*rblk,
912 	struct xlog_rec_header	**rhead,
913 	bool			*wrapped)
914 {
915 	int			i;
916 	int			error;
917 	int			found = 0;
918 	char			*offset = NULL;
919 	xfs_daddr_t		end_blk;
920 
921 	*wrapped = false;
922 
923 	/*
924 	 * Walk backwards from the head block until we hit the tail or the first
925 	 * block in the log.
926 	 */
927 	end_blk = head_blk > tail_blk ? tail_blk : 0;
928 	for (i = (int) head_blk - 1; i >= end_blk; i--) {
929 		error = xlog_bread(log, i, 1, bp, &offset);
930 		if (error)
931 			goto out_error;
932 
933 		if (*(__be32 *) offset == cpu_to_be32(XLOG_HEADER_MAGIC_NUM)) {
934 			*rblk = i;
935 			*rhead = (struct xlog_rec_header *) offset;
936 			if (++found == count)
937 				break;
938 		}
939 	}
940 
941 	/*
942 	 * If we haven't hit the tail block or the log record header count,
943 	 * start looking again from the end of the physical log. Note that
944 	 * callers can pass head == tail if the tail is not yet known.
945 	 */
946 	if (tail_blk >= head_blk && found != count) {
947 		for (i = log->l_logBBsize - 1; i >= (int) tail_blk; i--) {
948 			error = xlog_bread(log, i, 1, bp, &offset);
949 			if (error)
950 				goto out_error;
951 
952 			if (*(__be32 *)offset ==
953 			    cpu_to_be32(XLOG_HEADER_MAGIC_NUM)) {
954 				*wrapped = true;
955 				*rblk = i;
956 				*rhead = (struct xlog_rec_header *) offset;
957 				if (++found == count)
958 					break;
959 			}
960 		}
961 	}
962 
963 	return found;
964 
965 out_error:
966 	return error;
967 }
968 
969 /*
970  * Seek forward in the log for log record headers.
971  *
972  * Given head and tail blocks, walk forward from the tail block until we find
973  * the provided number of records or hit the head block. The return value is the
974  * number of records encountered or a negative error code. The log block and
975  * buffer pointer of the last record seen are returned in rblk and rhead
976  * respectively.
977  */
978 STATIC int
979 xlog_seek_logrec_hdr(
980 	struct xlog		*log,
981 	xfs_daddr_t		head_blk,
982 	xfs_daddr_t		tail_blk,
983 	int			count,
984 	struct xfs_buf		*bp,
985 	xfs_daddr_t		*rblk,
986 	struct xlog_rec_header	**rhead,
987 	bool			*wrapped)
988 {
989 	int			i;
990 	int			error;
991 	int			found = 0;
992 	char			*offset = NULL;
993 	xfs_daddr_t		end_blk;
994 
995 	*wrapped = false;
996 
997 	/*
998 	 * Walk forward from the tail block until we hit the head or the last
999 	 * block in the log.
1000 	 */
1001 	end_blk = head_blk > tail_blk ? head_blk : log->l_logBBsize - 1;
1002 	for (i = (int) tail_blk; i <= end_blk; i++) {
1003 		error = xlog_bread(log, i, 1, bp, &offset);
1004 		if (error)
1005 			goto out_error;
1006 
1007 		if (*(__be32 *) offset == cpu_to_be32(XLOG_HEADER_MAGIC_NUM)) {
1008 			*rblk = i;
1009 			*rhead = (struct xlog_rec_header *) offset;
1010 			if (++found == count)
1011 				break;
1012 		}
1013 	}
1014 
1015 	/*
1016 	 * If we haven't hit the head block or the log record header count,
1017 	 * start looking again from the start of the physical log.
1018 	 */
1019 	if (tail_blk > head_blk && found != count) {
1020 		for (i = 0; i < (int) head_blk; i++) {
1021 			error = xlog_bread(log, i, 1, bp, &offset);
1022 			if (error)
1023 				goto out_error;
1024 
1025 			if (*(__be32 *)offset ==
1026 			    cpu_to_be32(XLOG_HEADER_MAGIC_NUM)) {
1027 				*wrapped = true;
1028 				*rblk = i;
1029 				*rhead = (struct xlog_rec_header *) offset;
1030 				if (++found == count)
1031 					break;
1032 			}
1033 		}
1034 	}
1035 
1036 	return found;
1037 
1038 out_error:
1039 	return error;
1040 }
1041 
1042 /*
1043  * Calculate distance from head to tail (i.e., unused space in the log).
1044  */
1045 static inline int
1046 xlog_tail_distance(
1047 	struct xlog	*log,
1048 	xfs_daddr_t	head_blk,
1049 	xfs_daddr_t	tail_blk)
1050 {
1051 	if (head_blk < tail_blk)
1052 		return tail_blk - head_blk;
1053 
1054 	return tail_blk + (log->l_logBBsize - head_blk);
1055 }
1056 
1057 /*
1058  * Verify the log tail. This is particularly important when torn or incomplete
1059  * writes have been detected near the front of the log and the head has been
1060  * walked back accordingly.
1061  *
1062  * We also have to handle the case where the tail was pinned and the head
1063  * blocked behind the tail right before a crash. If the tail had been pushed
1064  * immediately prior to the crash and the subsequent checkpoint was only
1065  * partially written, it's possible it overwrote the last referenced tail in the
1066  * log with garbage. This is not a coherency problem because the tail must have
1067  * been pushed before it can be overwritten, but appears as log corruption to
1068  * recovery because we have no way to know the tail was updated if the
1069  * subsequent checkpoint didn't write successfully.
1070  *
1071  * Therefore, CRC check the log from tail to head. If a failure occurs and the
1072  * offending record is within max iclog bufs from the head, walk the tail
1073  * forward and retry until a valid tail is found or corruption is detected out
1074  * of the range of a possible overwrite.
1075  */
1076 STATIC int
1077 xlog_verify_tail(
1078 	struct xlog		*log,
1079 	xfs_daddr_t		head_blk,
1080 	xfs_daddr_t		*tail_blk,
1081 	int			hsize)
1082 {
1083 	struct xlog_rec_header	*thead;
1084 	struct xfs_buf		*bp;
1085 	xfs_daddr_t		first_bad;
1086 	int			error = 0;
1087 	bool			wrapped;
1088 	xfs_daddr_t		tmp_tail;
1089 	xfs_daddr_t		orig_tail = *tail_blk;
1090 
1091 	bp = xlog_get_bp(log, 1);
1092 	if (!bp)
1093 		return -ENOMEM;
1094 
1095 	/*
1096 	 * Make sure the tail points to a record (returns positive count on
1097 	 * success).
1098 	 */
1099 	error = xlog_seek_logrec_hdr(log, head_blk, *tail_blk, 1, bp,
1100 			&tmp_tail, &thead, &wrapped);
1101 	if (error < 0)
1102 		goto out;
1103 	if (*tail_blk != tmp_tail)
1104 		*tail_blk = tmp_tail;
1105 
1106 	/*
1107 	 * Run a CRC check from the tail to the head. We can't just check
1108 	 * MAX_ICLOGS records past the tail because the tail may point to stale
1109 	 * blocks cleared during the search for the head/tail. These blocks are
1110 	 * overwritten with zero-length records and thus record count is not a
1111 	 * reliable indicator of the iclog state before a crash.
1112 	 */
1113 	first_bad = 0;
1114 	error = xlog_do_recovery_pass(log, head_blk, *tail_blk,
1115 				      XLOG_RECOVER_CRCPASS, &first_bad);
1116 	while ((error == -EFSBADCRC || error == -EFSCORRUPTED) && first_bad) {
1117 		int	tail_distance;
1118 
1119 		/*
1120 		 * Is corruption within range of the head? If so, retry from
1121 		 * the next record. Otherwise return an error.
1122 		 */
1123 		tail_distance = xlog_tail_distance(log, head_blk, first_bad);
1124 		if (tail_distance > BTOBB(XLOG_MAX_ICLOGS * hsize))
1125 			break;
1126 
1127 		/* skip to the next record; returns positive count on success */
1128 		error = xlog_seek_logrec_hdr(log, head_blk, first_bad, 2, bp,
1129 				&tmp_tail, &thead, &wrapped);
1130 		if (error < 0)
1131 			goto out;
1132 
1133 		*tail_blk = tmp_tail;
1134 		first_bad = 0;
1135 		error = xlog_do_recovery_pass(log, head_blk, *tail_blk,
1136 					      XLOG_RECOVER_CRCPASS, &first_bad);
1137 	}
1138 
1139 	if (!error && *tail_blk != orig_tail)
1140 		xfs_warn(log->l_mp,
1141 		"Tail block (0x%llx) overwrite detected. Updated to 0x%llx",
1142 			 orig_tail, *tail_blk);
1143 out:
1144 	xlog_put_bp(bp);
1145 	return error;
1146 }
1147 
1148 /*
1149  * Detect and trim torn writes from the head of the log.
1150  *
1151  * Storage without sector atomicity guarantees can result in torn writes in the
1152  * log in the event of a crash. Our only means to detect this scenario is via
1153  * CRC verification. While we can't always be certain that CRC verification
1154  * failure is due to a torn write vs. an unrelated corruption, we do know that
1155  * only a certain number (XLOG_MAX_ICLOGS) of log records can be written out at
1156  * one time. Therefore, CRC verify up to XLOG_MAX_ICLOGS records at the head of
1157  * the log and treat failures in this range as torn writes as a matter of
1158  * policy. In the event of CRC failure, the head is walked back to the last good
1159  * record in the log and the tail is updated from that record and verified.
1160  */
1161 STATIC int
1162 xlog_verify_head(
1163 	struct xlog		*log,
1164 	xfs_daddr_t		*head_blk,	/* in/out: unverified head */
1165 	xfs_daddr_t		*tail_blk,	/* out: tail block */
1166 	struct xfs_buf		*bp,
1167 	xfs_daddr_t		*rhead_blk,	/* start blk of last record */
1168 	struct xlog_rec_header	**rhead,	/* ptr to last record */
1169 	bool			*wrapped)	/* last rec. wraps phys. log */
1170 {
1171 	struct xlog_rec_header	*tmp_rhead;
1172 	struct xfs_buf		*tmp_bp;
1173 	xfs_daddr_t		first_bad;
1174 	xfs_daddr_t		tmp_rhead_blk;
1175 	int			found;
1176 	int			error;
1177 	bool			tmp_wrapped;
1178 
1179 	/*
1180 	 * Check the head of the log for torn writes. Search backwards from the
1181 	 * head until we hit the tail or the maximum number of log record I/Os
1182 	 * that could have been in flight at one time. Use a temporary buffer so
1183 	 * we don't trash the rhead/bp pointers from the caller.
1184 	 */
1185 	tmp_bp = xlog_get_bp(log, 1);
1186 	if (!tmp_bp)
1187 		return -ENOMEM;
1188 	error = xlog_rseek_logrec_hdr(log, *head_blk, *tail_blk,
1189 				      XLOG_MAX_ICLOGS, tmp_bp, &tmp_rhead_blk,
1190 				      &tmp_rhead, &tmp_wrapped);
1191 	xlog_put_bp(tmp_bp);
1192 	if (error < 0)
1193 		return error;
1194 
1195 	/*
1196 	 * Now run a CRC verification pass over the records starting at the
1197 	 * block found above to the current head. If a CRC failure occurs, the
1198 	 * log block of the first bad record is saved in first_bad.
1199 	 */
1200 	error = xlog_do_recovery_pass(log, *head_blk, tmp_rhead_blk,
1201 				      XLOG_RECOVER_CRCPASS, &first_bad);
1202 	if ((error == -EFSBADCRC || error == -EFSCORRUPTED) && first_bad) {
1203 		/*
1204 		 * We've hit a potential torn write. Reset the error and warn
1205 		 * about it.
1206 		 */
1207 		error = 0;
1208 		xfs_warn(log->l_mp,
1209 "Torn write (CRC failure) detected at log block 0x%llx. Truncating head block from 0x%llx.",
1210 			 first_bad, *head_blk);
1211 
1212 		/*
1213 		 * Get the header block and buffer pointer for the last good
1214 		 * record before the bad record.
1215 		 *
1216 		 * Note that xlog_find_tail() clears the blocks at the new head
1217 		 * (i.e., the records with invalid CRC) if the cycle number
1218 		 * matches the the current cycle.
1219 		 */
1220 		found = xlog_rseek_logrec_hdr(log, first_bad, *tail_blk, 1, bp,
1221 					      rhead_blk, rhead, wrapped);
1222 		if (found < 0)
1223 			return found;
1224 		if (found == 0)		/* XXX: right thing to do here? */
1225 			return -EIO;
1226 
1227 		/*
1228 		 * Reset the head block to the starting block of the first bad
1229 		 * log record and set the tail block based on the last good
1230 		 * record.
1231 		 *
1232 		 * Bail out if the updated head/tail match as this indicates
1233 		 * possible corruption outside of the acceptable
1234 		 * (XLOG_MAX_ICLOGS) range. This is a job for xfs_repair...
1235 		 */
1236 		*head_blk = first_bad;
1237 		*tail_blk = BLOCK_LSN(be64_to_cpu((*rhead)->h_tail_lsn));
1238 		if (*head_blk == *tail_blk) {
1239 			ASSERT(0);
1240 			return 0;
1241 		}
1242 	}
1243 	if (error)
1244 		return error;
1245 
1246 	return xlog_verify_tail(log, *head_blk, tail_blk,
1247 				be32_to_cpu((*rhead)->h_size));
1248 }
1249 
1250 /*
1251  * Check whether the head of the log points to an unmount record. In other
1252  * words, determine whether the log is clean. If so, update the in-core state
1253  * appropriately.
1254  */
1255 static int
1256 xlog_check_unmount_rec(
1257 	struct xlog		*log,
1258 	xfs_daddr_t		*head_blk,
1259 	xfs_daddr_t		*tail_blk,
1260 	struct xlog_rec_header	*rhead,
1261 	xfs_daddr_t		rhead_blk,
1262 	struct xfs_buf		*bp,
1263 	bool			*clean)
1264 {
1265 	struct xlog_op_header	*op_head;
1266 	xfs_daddr_t		umount_data_blk;
1267 	xfs_daddr_t		after_umount_blk;
1268 	int			hblks;
1269 	int			error;
1270 	char			*offset;
1271 
1272 	*clean = false;
1273 
1274 	/*
1275 	 * Look for unmount record. If we find it, then we know there was a
1276 	 * clean unmount. Since 'i' could be the last block in the physical
1277 	 * log, we convert to a log block before comparing to the head_blk.
1278 	 *
1279 	 * Save the current tail lsn to use to pass to xlog_clear_stale_blocks()
1280 	 * below. We won't want to clear the unmount record if there is one, so
1281 	 * we pass the lsn of the unmount record rather than the block after it.
1282 	 */
1283 	if (xfs_sb_version_haslogv2(&log->l_mp->m_sb)) {
1284 		int	h_size = be32_to_cpu(rhead->h_size);
1285 		int	h_version = be32_to_cpu(rhead->h_version);
1286 
1287 		if ((h_version & XLOG_VERSION_2) &&
1288 		    (h_size > XLOG_HEADER_CYCLE_SIZE)) {
1289 			hblks = h_size / XLOG_HEADER_CYCLE_SIZE;
1290 			if (h_size % XLOG_HEADER_CYCLE_SIZE)
1291 				hblks++;
1292 		} else {
1293 			hblks = 1;
1294 		}
1295 	} else {
1296 		hblks = 1;
1297 	}
1298 	after_umount_blk = rhead_blk + hblks + BTOBB(be32_to_cpu(rhead->h_len));
1299 	after_umount_blk = do_mod(after_umount_blk, log->l_logBBsize);
1300 	if (*head_blk == after_umount_blk &&
1301 	    be32_to_cpu(rhead->h_num_logops) == 1) {
1302 		umount_data_blk = rhead_blk + hblks;
1303 		umount_data_blk = do_mod(umount_data_blk, log->l_logBBsize);
1304 		error = xlog_bread(log, umount_data_blk, 1, bp, &offset);
1305 		if (error)
1306 			return error;
1307 
1308 		op_head = (struct xlog_op_header *)offset;
1309 		if (op_head->oh_flags & XLOG_UNMOUNT_TRANS) {
1310 			/*
1311 			 * Set tail and last sync so that newly written log
1312 			 * records will point recovery to after the current
1313 			 * unmount record.
1314 			 */
1315 			xlog_assign_atomic_lsn(&log->l_tail_lsn,
1316 					log->l_curr_cycle, after_umount_blk);
1317 			xlog_assign_atomic_lsn(&log->l_last_sync_lsn,
1318 					log->l_curr_cycle, after_umount_blk);
1319 			*tail_blk = after_umount_blk;
1320 
1321 			*clean = true;
1322 		}
1323 	}
1324 
1325 	return 0;
1326 }
1327 
1328 static void
1329 xlog_set_state(
1330 	struct xlog		*log,
1331 	xfs_daddr_t		head_blk,
1332 	struct xlog_rec_header	*rhead,
1333 	xfs_daddr_t		rhead_blk,
1334 	bool			bump_cycle)
1335 {
1336 	/*
1337 	 * Reset log values according to the state of the log when we
1338 	 * crashed.  In the case where head_blk == 0, we bump curr_cycle
1339 	 * one because the next write starts a new cycle rather than
1340 	 * continuing the cycle of the last good log record.  At this
1341 	 * point we have guaranteed that all partial log records have been
1342 	 * accounted for.  Therefore, we know that the last good log record
1343 	 * written was complete and ended exactly on the end boundary
1344 	 * of the physical log.
1345 	 */
1346 	log->l_prev_block = rhead_blk;
1347 	log->l_curr_block = (int)head_blk;
1348 	log->l_curr_cycle = be32_to_cpu(rhead->h_cycle);
1349 	if (bump_cycle)
1350 		log->l_curr_cycle++;
1351 	atomic64_set(&log->l_tail_lsn, be64_to_cpu(rhead->h_tail_lsn));
1352 	atomic64_set(&log->l_last_sync_lsn, be64_to_cpu(rhead->h_lsn));
1353 	xlog_assign_grant_head(&log->l_reserve_head.grant, log->l_curr_cycle,
1354 					BBTOB(log->l_curr_block));
1355 	xlog_assign_grant_head(&log->l_write_head.grant, log->l_curr_cycle,
1356 					BBTOB(log->l_curr_block));
1357 }
1358 
1359 /*
1360  * Find the sync block number or the tail of the log.
1361  *
1362  * This will be the block number of the last record to have its
1363  * associated buffers synced to disk.  Every log record header has
1364  * a sync lsn embedded in it.  LSNs hold block numbers, so it is easy
1365  * to get a sync block number.  The only concern is to figure out which
1366  * log record header to believe.
1367  *
1368  * The following algorithm uses the log record header with the largest
1369  * lsn.  The entire log record does not need to be valid.  We only care
1370  * that the header is valid.
1371  *
1372  * We could speed up search by using current head_blk buffer, but it is not
1373  * available.
1374  */
1375 STATIC int
1376 xlog_find_tail(
1377 	struct xlog		*log,
1378 	xfs_daddr_t		*head_blk,
1379 	xfs_daddr_t		*tail_blk)
1380 {
1381 	xlog_rec_header_t	*rhead;
1382 	char			*offset = NULL;
1383 	xfs_buf_t		*bp;
1384 	int			error;
1385 	xfs_daddr_t		rhead_blk;
1386 	xfs_lsn_t		tail_lsn;
1387 	bool			wrapped = false;
1388 	bool			clean = false;
1389 
1390 	/*
1391 	 * Find previous log record
1392 	 */
1393 	if ((error = xlog_find_head(log, head_blk)))
1394 		return error;
1395 	ASSERT(*head_blk < INT_MAX);
1396 
1397 	bp = xlog_get_bp(log, 1);
1398 	if (!bp)
1399 		return -ENOMEM;
1400 	if (*head_blk == 0) {				/* special case */
1401 		error = xlog_bread(log, 0, 1, bp, &offset);
1402 		if (error)
1403 			goto done;
1404 
1405 		if (xlog_get_cycle(offset) == 0) {
1406 			*tail_blk = 0;
1407 			/* leave all other log inited values alone */
1408 			goto done;
1409 		}
1410 	}
1411 
1412 	/*
1413 	 * Search backwards through the log looking for the log record header
1414 	 * block. This wraps all the way back around to the head so something is
1415 	 * seriously wrong if we can't find it.
1416 	 */
1417 	error = xlog_rseek_logrec_hdr(log, *head_blk, *head_blk, 1, bp,
1418 				      &rhead_blk, &rhead, &wrapped);
1419 	if (error < 0)
1420 		return error;
1421 	if (!error) {
1422 		xfs_warn(log->l_mp, "%s: couldn't find sync record", __func__);
1423 		return -EIO;
1424 	}
1425 	*tail_blk = BLOCK_LSN(be64_to_cpu(rhead->h_tail_lsn));
1426 
1427 	/*
1428 	 * Set the log state based on the current head record.
1429 	 */
1430 	xlog_set_state(log, *head_blk, rhead, rhead_blk, wrapped);
1431 	tail_lsn = atomic64_read(&log->l_tail_lsn);
1432 
1433 	/*
1434 	 * Look for an unmount record at the head of the log. This sets the log
1435 	 * state to determine whether recovery is necessary.
1436 	 */
1437 	error = xlog_check_unmount_rec(log, head_blk, tail_blk, rhead,
1438 				       rhead_blk, bp, &clean);
1439 	if (error)
1440 		goto done;
1441 
1442 	/*
1443 	 * Verify the log head if the log is not clean (e.g., we have anything
1444 	 * but an unmount record at the head). This uses CRC verification to
1445 	 * detect and trim torn writes. If discovered, CRC failures are
1446 	 * considered torn writes and the log head is trimmed accordingly.
1447 	 *
1448 	 * Note that we can only run CRC verification when the log is dirty
1449 	 * because there's no guarantee that the log data behind an unmount
1450 	 * record is compatible with the current architecture.
1451 	 */
1452 	if (!clean) {
1453 		xfs_daddr_t	orig_head = *head_blk;
1454 
1455 		error = xlog_verify_head(log, head_blk, tail_blk, bp,
1456 					 &rhead_blk, &rhead, &wrapped);
1457 		if (error)
1458 			goto done;
1459 
1460 		/* update in-core state again if the head changed */
1461 		if (*head_blk != orig_head) {
1462 			xlog_set_state(log, *head_blk, rhead, rhead_blk,
1463 				       wrapped);
1464 			tail_lsn = atomic64_read(&log->l_tail_lsn);
1465 			error = xlog_check_unmount_rec(log, head_blk, tail_blk,
1466 						       rhead, rhead_blk, bp,
1467 						       &clean);
1468 			if (error)
1469 				goto done;
1470 		}
1471 	}
1472 
1473 	/*
1474 	 * Note that the unmount was clean. If the unmount was not clean, we
1475 	 * need to know this to rebuild the superblock counters from the perag
1476 	 * headers if we have a filesystem using non-persistent counters.
1477 	 */
1478 	if (clean)
1479 		log->l_mp->m_flags |= XFS_MOUNT_WAS_CLEAN;
1480 
1481 	/*
1482 	 * Make sure that there are no blocks in front of the head
1483 	 * with the same cycle number as the head.  This can happen
1484 	 * because we allow multiple outstanding log writes concurrently,
1485 	 * and the later writes might make it out before earlier ones.
1486 	 *
1487 	 * We use the lsn from before modifying it so that we'll never
1488 	 * overwrite the unmount record after a clean unmount.
1489 	 *
1490 	 * Do this only if we are going to recover the filesystem
1491 	 *
1492 	 * NOTE: This used to say "if (!readonly)"
1493 	 * However on Linux, we can & do recover a read-only filesystem.
1494 	 * We only skip recovery if NORECOVERY is specified on mount,
1495 	 * in which case we would not be here.
1496 	 *
1497 	 * But... if the -device- itself is readonly, just skip this.
1498 	 * We can't recover this device anyway, so it won't matter.
1499 	 */
1500 	if (!xfs_readonly_buftarg(log->l_mp->m_logdev_targp))
1501 		error = xlog_clear_stale_blocks(log, tail_lsn);
1502 
1503 done:
1504 	xlog_put_bp(bp);
1505 
1506 	if (error)
1507 		xfs_warn(log->l_mp, "failed to locate log tail");
1508 	return error;
1509 }
1510 
1511 /*
1512  * Is the log zeroed at all?
1513  *
1514  * The last binary search should be changed to perform an X block read
1515  * once X becomes small enough.  You can then search linearly through
1516  * the X blocks.  This will cut down on the number of reads we need to do.
1517  *
1518  * If the log is partially zeroed, this routine will pass back the blkno
1519  * of the first block with cycle number 0.  It won't have a complete LR
1520  * preceding it.
1521  *
1522  * Return:
1523  *	0  => the log is completely written to
1524  *	1 => use *blk_no as the first block of the log
1525  *	<0 => error has occurred
1526  */
1527 STATIC int
1528 xlog_find_zeroed(
1529 	struct xlog	*log,
1530 	xfs_daddr_t	*blk_no)
1531 {
1532 	xfs_buf_t	*bp;
1533 	char		*offset;
1534 	uint	        first_cycle, last_cycle;
1535 	xfs_daddr_t	new_blk, last_blk, start_blk;
1536 	xfs_daddr_t     num_scan_bblks;
1537 	int	        error, log_bbnum = log->l_logBBsize;
1538 
1539 	*blk_no = 0;
1540 
1541 	/* check totally zeroed log */
1542 	bp = xlog_get_bp(log, 1);
1543 	if (!bp)
1544 		return -ENOMEM;
1545 	error = xlog_bread(log, 0, 1, bp, &offset);
1546 	if (error)
1547 		goto bp_err;
1548 
1549 	first_cycle = xlog_get_cycle(offset);
1550 	if (first_cycle == 0) {		/* completely zeroed log */
1551 		*blk_no = 0;
1552 		xlog_put_bp(bp);
1553 		return 1;
1554 	}
1555 
1556 	/* check partially zeroed log */
1557 	error = xlog_bread(log, log_bbnum-1, 1, bp, &offset);
1558 	if (error)
1559 		goto bp_err;
1560 
1561 	last_cycle = xlog_get_cycle(offset);
1562 	if (last_cycle != 0) {		/* log completely written to */
1563 		xlog_put_bp(bp);
1564 		return 0;
1565 	} else if (first_cycle != 1) {
1566 		/*
1567 		 * If the cycle of the last block is zero, the cycle of
1568 		 * the first block must be 1. If it's not, maybe we're
1569 		 * not looking at a log... Bail out.
1570 		 */
1571 		xfs_warn(log->l_mp,
1572 			"Log inconsistent or not a log (last==0, first!=1)");
1573 		error = -EINVAL;
1574 		goto bp_err;
1575 	}
1576 
1577 	/* we have a partially zeroed log */
1578 	last_blk = log_bbnum-1;
1579 	if ((error = xlog_find_cycle_start(log, bp, 0, &last_blk, 0)))
1580 		goto bp_err;
1581 
1582 	/*
1583 	 * Validate the answer.  Because there is no way to guarantee that
1584 	 * the entire log is made up of log records which are the same size,
1585 	 * we scan over the defined maximum blocks.  At this point, the maximum
1586 	 * is not chosen to mean anything special.   XXXmiken
1587 	 */
1588 	num_scan_bblks = XLOG_TOTAL_REC_SHIFT(log);
1589 	ASSERT(num_scan_bblks <= INT_MAX);
1590 
1591 	if (last_blk < num_scan_bblks)
1592 		num_scan_bblks = last_blk;
1593 	start_blk = last_blk - num_scan_bblks;
1594 
1595 	/*
1596 	 * We search for any instances of cycle number 0 that occur before
1597 	 * our current estimate of the head.  What we're trying to detect is
1598 	 *        1 ... | 0 | 1 | 0...
1599 	 *                       ^ binary search ends here
1600 	 */
1601 	if ((error = xlog_find_verify_cycle(log, start_blk,
1602 					 (int)num_scan_bblks, 0, &new_blk)))
1603 		goto bp_err;
1604 	if (new_blk != -1)
1605 		last_blk = new_blk;
1606 
1607 	/*
1608 	 * Potentially backup over partial log record write.  We don't need
1609 	 * to search the end of the log because we know it is zero.
1610 	 */
1611 	error = xlog_find_verify_log_record(log, start_blk, &last_blk, 0);
1612 	if (error == 1)
1613 		error = -EIO;
1614 	if (error)
1615 		goto bp_err;
1616 
1617 	*blk_no = last_blk;
1618 bp_err:
1619 	xlog_put_bp(bp);
1620 	if (error)
1621 		return error;
1622 	return 1;
1623 }
1624 
1625 /*
1626  * These are simple subroutines used by xlog_clear_stale_blocks() below
1627  * to initialize a buffer full of empty log record headers and write
1628  * them into the log.
1629  */
1630 STATIC void
1631 xlog_add_record(
1632 	struct xlog		*log,
1633 	char			*buf,
1634 	int			cycle,
1635 	int			block,
1636 	int			tail_cycle,
1637 	int			tail_block)
1638 {
1639 	xlog_rec_header_t	*recp = (xlog_rec_header_t *)buf;
1640 
1641 	memset(buf, 0, BBSIZE);
1642 	recp->h_magicno = cpu_to_be32(XLOG_HEADER_MAGIC_NUM);
1643 	recp->h_cycle = cpu_to_be32(cycle);
1644 	recp->h_version = cpu_to_be32(
1645 			xfs_sb_version_haslogv2(&log->l_mp->m_sb) ? 2 : 1);
1646 	recp->h_lsn = cpu_to_be64(xlog_assign_lsn(cycle, block));
1647 	recp->h_tail_lsn = cpu_to_be64(xlog_assign_lsn(tail_cycle, tail_block));
1648 	recp->h_fmt = cpu_to_be32(XLOG_FMT);
1649 	memcpy(&recp->h_fs_uuid, &log->l_mp->m_sb.sb_uuid, sizeof(uuid_t));
1650 }
1651 
1652 STATIC int
1653 xlog_write_log_records(
1654 	struct xlog	*log,
1655 	int		cycle,
1656 	int		start_block,
1657 	int		blocks,
1658 	int		tail_cycle,
1659 	int		tail_block)
1660 {
1661 	char		*offset;
1662 	xfs_buf_t	*bp;
1663 	int		balign, ealign;
1664 	int		sectbb = log->l_sectBBsize;
1665 	int		end_block = start_block + blocks;
1666 	int		bufblks;
1667 	int		error = 0;
1668 	int		i, j = 0;
1669 
1670 	/*
1671 	 * Greedily allocate a buffer big enough to handle the full
1672 	 * range of basic blocks to be written.  If that fails, try
1673 	 * a smaller size.  We need to be able to write at least a
1674 	 * log sector, or we're out of luck.
1675 	 */
1676 	bufblks = 1 << ffs(blocks);
1677 	while (bufblks > log->l_logBBsize)
1678 		bufblks >>= 1;
1679 	while (!(bp = xlog_get_bp(log, bufblks))) {
1680 		bufblks >>= 1;
1681 		if (bufblks < sectbb)
1682 			return -ENOMEM;
1683 	}
1684 
1685 	/* We may need to do a read at the start to fill in part of
1686 	 * the buffer in the starting sector not covered by the first
1687 	 * write below.
1688 	 */
1689 	balign = round_down(start_block, sectbb);
1690 	if (balign != start_block) {
1691 		error = xlog_bread_noalign(log, start_block, 1, bp);
1692 		if (error)
1693 			goto out_put_bp;
1694 
1695 		j = start_block - balign;
1696 	}
1697 
1698 	for (i = start_block; i < end_block; i += bufblks) {
1699 		int		bcount, endcount;
1700 
1701 		bcount = min(bufblks, end_block - start_block);
1702 		endcount = bcount - j;
1703 
1704 		/* We may need to do a read at the end to fill in part of
1705 		 * the buffer in the final sector not covered by the write.
1706 		 * If this is the same sector as the above read, skip it.
1707 		 */
1708 		ealign = round_down(end_block, sectbb);
1709 		if (j == 0 && (start_block + endcount > ealign)) {
1710 			offset = bp->b_addr + BBTOB(ealign - start_block);
1711 			error = xlog_bread_offset(log, ealign, sectbb,
1712 							bp, offset);
1713 			if (error)
1714 				break;
1715 
1716 		}
1717 
1718 		offset = xlog_align(log, start_block, endcount, bp);
1719 		for (; j < endcount; j++) {
1720 			xlog_add_record(log, offset, cycle, i+j,
1721 					tail_cycle, tail_block);
1722 			offset += BBSIZE;
1723 		}
1724 		error = xlog_bwrite(log, start_block, endcount, bp);
1725 		if (error)
1726 			break;
1727 		start_block += endcount;
1728 		j = 0;
1729 	}
1730 
1731  out_put_bp:
1732 	xlog_put_bp(bp);
1733 	return error;
1734 }
1735 
1736 /*
1737  * This routine is called to blow away any incomplete log writes out
1738  * in front of the log head.  We do this so that we won't become confused
1739  * if we come up, write only a little bit more, and then crash again.
1740  * If we leave the partial log records out there, this situation could
1741  * cause us to think those partial writes are valid blocks since they
1742  * have the current cycle number.  We get rid of them by overwriting them
1743  * with empty log records with the old cycle number rather than the
1744  * current one.
1745  *
1746  * The tail lsn is passed in rather than taken from
1747  * the log so that we will not write over the unmount record after a
1748  * clean unmount in a 512 block log.  Doing so would leave the log without
1749  * any valid log records in it until a new one was written.  If we crashed
1750  * during that time we would not be able to recover.
1751  */
1752 STATIC int
1753 xlog_clear_stale_blocks(
1754 	struct xlog	*log,
1755 	xfs_lsn_t	tail_lsn)
1756 {
1757 	int		tail_cycle, head_cycle;
1758 	int		tail_block, head_block;
1759 	int		tail_distance, max_distance;
1760 	int		distance;
1761 	int		error;
1762 
1763 	tail_cycle = CYCLE_LSN(tail_lsn);
1764 	tail_block = BLOCK_LSN(tail_lsn);
1765 	head_cycle = log->l_curr_cycle;
1766 	head_block = log->l_curr_block;
1767 
1768 	/*
1769 	 * Figure out the distance between the new head of the log
1770 	 * and the tail.  We want to write over any blocks beyond the
1771 	 * head that we may have written just before the crash, but
1772 	 * we don't want to overwrite the tail of the log.
1773 	 */
1774 	if (head_cycle == tail_cycle) {
1775 		/*
1776 		 * The tail is behind the head in the physical log,
1777 		 * so the distance from the head to the tail is the
1778 		 * distance from the head to the end of the log plus
1779 		 * the distance from the beginning of the log to the
1780 		 * tail.
1781 		 */
1782 		if (unlikely(head_block < tail_block || head_block >= log->l_logBBsize)) {
1783 			XFS_ERROR_REPORT("xlog_clear_stale_blocks(1)",
1784 					 XFS_ERRLEVEL_LOW, log->l_mp);
1785 			return -EFSCORRUPTED;
1786 		}
1787 		tail_distance = tail_block + (log->l_logBBsize - head_block);
1788 	} else {
1789 		/*
1790 		 * The head is behind the tail in the physical log,
1791 		 * so the distance from the head to the tail is just
1792 		 * the tail block minus the head block.
1793 		 */
1794 		if (unlikely(head_block >= tail_block || head_cycle != (tail_cycle + 1))){
1795 			XFS_ERROR_REPORT("xlog_clear_stale_blocks(2)",
1796 					 XFS_ERRLEVEL_LOW, log->l_mp);
1797 			return -EFSCORRUPTED;
1798 		}
1799 		tail_distance = tail_block - head_block;
1800 	}
1801 
1802 	/*
1803 	 * If the head is right up against the tail, we can't clear
1804 	 * anything.
1805 	 */
1806 	if (tail_distance <= 0) {
1807 		ASSERT(tail_distance == 0);
1808 		return 0;
1809 	}
1810 
1811 	max_distance = XLOG_TOTAL_REC_SHIFT(log);
1812 	/*
1813 	 * Take the smaller of the maximum amount of outstanding I/O
1814 	 * we could have and the distance to the tail to clear out.
1815 	 * We take the smaller so that we don't overwrite the tail and
1816 	 * we don't waste all day writing from the head to the tail
1817 	 * for no reason.
1818 	 */
1819 	max_distance = MIN(max_distance, tail_distance);
1820 
1821 	if ((head_block + max_distance) <= log->l_logBBsize) {
1822 		/*
1823 		 * We can stomp all the blocks we need to without
1824 		 * wrapping around the end of the log.  Just do it
1825 		 * in a single write.  Use the cycle number of the
1826 		 * current cycle minus one so that the log will look like:
1827 		 *     n ... | n - 1 ...
1828 		 */
1829 		error = xlog_write_log_records(log, (head_cycle - 1),
1830 				head_block, max_distance, tail_cycle,
1831 				tail_block);
1832 		if (error)
1833 			return error;
1834 	} else {
1835 		/*
1836 		 * We need to wrap around the end of the physical log in
1837 		 * order to clear all the blocks.  Do it in two separate
1838 		 * I/Os.  The first write should be from the head to the
1839 		 * end of the physical log, and it should use the current
1840 		 * cycle number minus one just like above.
1841 		 */
1842 		distance = log->l_logBBsize - head_block;
1843 		error = xlog_write_log_records(log, (head_cycle - 1),
1844 				head_block, distance, tail_cycle,
1845 				tail_block);
1846 
1847 		if (error)
1848 			return error;
1849 
1850 		/*
1851 		 * Now write the blocks at the start of the physical log.
1852 		 * This writes the remainder of the blocks we want to clear.
1853 		 * It uses the current cycle number since we're now on the
1854 		 * same cycle as the head so that we get:
1855 		 *    n ... n ... | n - 1 ...
1856 		 *    ^^^^^ blocks we're writing
1857 		 */
1858 		distance = max_distance - (log->l_logBBsize - head_block);
1859 		error = xlog_write_log_records(log, head_cycle, 0, distance,
1860 				tail_cycle, tail_block);
1861 		if (error)
1862 			return error;
1863 	}
1864 
1865 	return 0;
1866 }
1867 
1868 /******************************************************************************
1869  *
1870  *		Log recover routines
1871  *
1872  ******************************************************************************
1873  */
1874 
1875 /*
1876  * Sort the log items in the transaction.
1877  *
1878  * The ordering constraints are defined by the inode allocation and unlink
1879  * behaviour. The rules are:
1880  *
1881  *	1. Every item is only logged once in a given transaction. Hence it
1882  *	   represents the last logged state of the item. Hence ordering is
1883  *	   dependent on the order in which operations need to be performed so
1884  *	   required initial conditions are always met.
1885  *
1886  *	2. Cancelled buffers are recorded in pass 1 in a separate table and
1887  *	   there's nothing to replay from them so we can simply cull them
1888  *	   from the transaction. However, we can't do that until after we've
1889  *	   replayed all the other items because they may be dependent on the
1890  *	   cancelled buffer and replaying the cancelled buffer can remove it
1891  *	   form the cancelled buffer table. Hence they have tobe done last.
1892  *
1893  *	3. Inode allocation buffers must be replayed before inode items that
1894  *	   read the buffer and replay changes into it. For filesystems using the
1895  *	   ICREATE transactions, this means XFS_LI_ICREATE objects need to get
1896  *	   treated the same as inode allocation buffers as they create and
1897  *	   initialise the buffers directly.
1898  *
1899  *	4. Inode unlink buffers must be replayed after inode items are replayed.
1900  *	   This ensures that inodes are completely flushed to the inode buffer
1901  *	   in a "free" state before we remove the unlinked inode list pointer.
1902  *
1903  * Hence the ordering needs to be inode allocation buffers first, inode items
1904  * second, inode unlink buffers third and cancelled buffers last.
1905  *
1906  * But there's a problem with that - we can't tell an inode allocation buffer
1907  * apart from a regular buffer, so we can't separate them. We can, however,
1908  * tell an inode unlink buffer from the others, and so we can separate them out
1909  * from all the other buffers and move them to last.
1910  *
1911  * Hence, 4 lists, in order from head to tail:
1912  *	- buffer_list for all buffers except cancelled/inode unlink buffers
1913  *	- item_list for all non-buffer items
1914  *	- inode_buffer_list for inode unlink buffers
1915  *	- cancel_list for the cancelled buffers
1916  *
1917  * Note that we add objects to the tail of the lists so that first-to-last
1918  * ordering is preserved within the lists. Adding objects to the head of the
1919  * list means when we traverse from the head we walk them in last-to-first
1920  * order. For cancelled buffers and inode unlink buffers this doesn't matter,
1921  * but for all other items there may be specific ordering that we need to
1922  * preserve.
1923  */
1924 STATIC int
1925 xlog_recover_reorder_trans(
1926 	struct xlog		*log,
1927 	struct xlog_recover	*trans,
1928 	int			pass)
1929 {
1930 	xlog_recover_item_t	*item, *n;
1931 	int			error = 0;
1932 	LIST_HEAD(sort_list);
1933 	LIST_HEAD(cancel_list);
1934 	LIST_HEAD(buffer_list);
1935 	LIST_HEAD(inode_buffer_list);
1936 	LIST_HEAD(inode_list);
1937 
1938 	list_splice_init(&trans->r_itemq, &sort_list);
1939 	list_for_each_entry_safe(item, n, &sort_list, ri_list) {
1940 		xfs_buf_log_format_t	*buf_f = item->ri_buf[0].i_addr;
1941 
1942 		switch (ITEM_TYPE(item)) {
1943 		case XFS_LI_ICREATE:
1944 			list_move_tail(&item->ri_list, &buffer_list);
1945 			break;
1946 		case XFS_LI_BUF:
1947 			if (buf_f->blf_flags & XFS_BLF_CANCEL) {
1948 				trace_xfs_log_recover_item_reorder_head(log,
1949 							trans, item, pass);
1950 				list_move(&item->ri_list, &cancel_list);
1951 				break;
1952 			}
1953 			if (buf_f->blf_flags & XFS_BLF_INODE_BUF) {
1954 				list_move(&item->ri_list, &inode_buffer_list);
1955 				break;
1956 			}
1957 			list_move_tail(&item->ri_list, &buffer_list);
1958 			break;
1959 		case XFS_LI_INODE:
1960 		case XFS_LI_DQUOT:
1961 		case XFS_LI_QUOTAOFF:
1962 		case XFS_LI_EFD:
1963 		case XFS_LI_EFI:
1964 		case XFS_LI_RUI:
1965 		case XFS_LI_RUD:
1966 		case XFS_LI_CUI:
1967 		case XFS_LI_CUD:
1968 		case XFS_LI_BUI:
1969 		case XFS_LI_BUD:
1970 			trace_xfs_log_recover_item_reorder_tail(log,
1971 							trans, item, pass);
1972 			list_move_tail(&item->ri_list, &inode_list);
1973 			break;
1974 		default:
1975 			xfs_warn(log->l_mp,
1976 				"%s: unrecognized type of log operation",
1977 				__func__);
1978 			ASSERT(0);
1979 			/*
1980 			 * return the remaining items back to the transaction
1981 			 * item list so they can be freed in caller.
1982 			 */
1983 			if (!list_empty(&sort_list))
1984 				list_splice_init(&sort_list, &trans->r_itemq);
1985 			error = -EIO;
1986 			goto out;
1987 		}
1988 	}
1989 out:
1990 	ASSERT(list_empty(&sort_list));
1991 	if (!list_empty(&buffer_list))
1992 		list_splice(&buffer_list, &trans->r_itemq);
1993 	if (!list_empty(&inode_list))
1994 		list_splice_tail(&inode_list, &trans->r_itemq);
1995 	if (!list_empty(&inode_buffer_list))
1996 		list_splice_tail(&inode_buffer_list, &trans->r_itemq);
1997 	if (!list_empty(&cancel_list))
1998 		list_splice_tail(&cancel_list, &trans->r_itemq);
1999 	return error;
2000 }
2001 
2002 /*
2003  * Build up the table of buf cancel records so that we don't replay
2004  * cancelled data in the second pass.  For buffer records that are
2005  * not cancel records, there is nothing to do here so we just return.
2006  *
2007  * If we get a cancel record which is already in the table, this indicates
2008  * that the buffer was cancelled multiple times.  In order to ensure
2009  * that during pass 2 we keep the record in the table until we reach its
2010  * last occurrence in the log, we keep a reference count in the cancel
2011  * record in the table to tell us how many times we expect to see this
2012  * record during the second pass.
2013  */
2014 STATIC int
2015 xlog_recover_buffer_pass1(
2016 	struct xlog			*log,
2017 	struct xlog_recover_item	*item)
2018 {
2019 	xfs_buf_log_format_t	*buf_f = item->ri_buf[0].i_addr;
2020 	struct list_head	*bucket;
2021 	struct xfs_buf_cancel	*bcp;
2022 
2023 	/*
2024 	 * If this isn't a cancel buffer item, then just return.
2025 	 */
2026 	if (!(buf_f->blf_flags & XFS_BLF_CANCEL)) {
2027 		trace_xfs_log_recover_buf_not_cancel(log, buf_f);
2028 		return 0;
2029 	}
2030 
2031 	/*
2032 	 * Insert an xfs_buf_cancel record into the hash table of them.
2033 	 * If there is already an identical record, bump its reference count.
2034 	 */
2035 	bucket = XLOG_BUF_CANCEL_BUCKET(log, buf_f->blf_blkno);
2036 	list_for_each_entry(bcp, bucket, bc_list) {
2037 		if (bcp->bc_blkno == buf_f->blf_blkno &&
2038 		    bcp->bc_len == buf_f->blf_len) {
2039 			bcp->bc_refcount++;
2040 			trace_xfs_log_recover_buf_cancel_ref_inc(log, buf_f);
2041 			return 0;
2042 		}
2043 	}
2044 
2045 	bcp = kmem_alloc(sizeof(struct xfs_buf_cancel), KM_SLEEP);
2046 	bcp->bc_blkno = buf_f->blf_blkno;
2047 	bcp->bc_len = buf_f->blf_len;
2048 	bcp->bc_refcount = 1;
2049 	list_add_tail(&bcp->bc_list, bucket);
2050 
2051 	trace_xfs_log_recover_buf_cancel_add(log, buf_f);
2052 	return 0;
2053 }
2054 
2055 /*
2056  * Check to see whether the buffer being recovered has a corresponding
2057  * entry in the buffer cancel record table. If it is, return the cancel
2058  * buffer structure to the caller.
2059  */
2060 STATIC struct xfs_buf_cancel *
2061 xlog_peek_buffer_cancelled(
2062 	struct xlog		*log,
2063 	xfs_daddr_t		blkno,
2064 	uint			len,
2065 	unsigned short			flags)
2066 {
2067 	struct list_head	*bucket;
2068 	struct xfs_buf_cancel	*bcp;
2069 
2070 	if (!log->l_buf_cancel_table) {
2071 		/* empty table means no cancelled buffers in the log */
2072 		ASSERT(!(flags & XFS_BLF_CANCEL));
2073 		return NULL;
2074 	}
2075 
2076 	bucket = XLOG_BUF_CANCEL_BUCKET(log, blkno);
2077 	list_for_each_entry(bcp, bucket, bc_list) {
2078 		if (bcp->bc_blkno == blkno && bcp->bc_len == len)
2079 			return bcp;
2080 	}
2081 
2082 	/*
2083 	 * We didn't find a corresponding entry in the table, so return 0 so
2084 	 * that the buffer is NOT cancelled.
2085 	 */
2086 	ASSERT(!(flags & XFS_BLF_CANCEL));
2087 	return NULL;
2088 }
2089 
2090 /*
2091  * If the buffer is being cancelled then return 1 so that it will be cancelled,
2092  * otherwise return 0.  If the buffer is actually a buffer cancel item
2093  * (XFS_BLF_CANCEL is set), then decrement the refcount on the entry in the
2094  * table and remove it from the table if this is the last reference.
2095  *
2096  * We remove the cancel record from the table when we encounter its last
2097  * occurrence in the log so that if the same buffer is re-used again after its
2098  * last cancellation we actually replay the changes made at that point.
2099  */
2100 STATIC int
2101 xlog_check_buffer_cancelled(
2102 	struct xlog		*log,
2103 	xfs_daddr_t		blkno,
2104 	uint			len,
2105 	unsigned short			flags)
2106 {
2107 	struct xfs_buf_cancel	*bcp;
2108 
2109 	bcp = xlog_peek_buffer_cancelled(log, blkno, len, flags);
2110 	if (!bcp)
2111 		return 0;
2112 
2113 	/*
2114 	 * We've go a match, so return 1 so that the recovery of this buffer
2115 	 * is cancelled.  If this buffer is actually a buffer cancel log
2116 	 * item, then decrement the refcount on the one in the table and
2117 	 * remove it if this is the last reference.
2118 	 */
2119 	if (flags & XFS_BLF_CANCEL) {
2120 		if (--bcp->bc_refcount == 0) {
2121 			list_del(&bcp->bc_list);
2122 			kmem_free(bcp);
2123 		}
2124 	}
2125 	return 1;
2126 }
2127 
2128 /*
2129  * Perform recovery for a buffer full of inodes.  In these buffers, the only
2130  * data which should be recovered is that which corresponds to the
2131  * di_next_unlinked pointers in the on disk inode structures.  The rest of the
2132  * data for the inodes is always logged through the inodes themselves rather
2133  * than the inode buffer and is recovered in xlog_recover_inode_pass2().
2134  *
2135  * The only time when buffers full of inodes are fully recovered is when the
2136  * buffer is full of newly allocated inodes.  In this case the buffer will
2137  * not be marked as an inode buffer and so will be sent to
2138  * xlog_recover_do_reg_buffer() below during recovery.
2139  */
2140 STATIC int
2141 xlog_recover_do_inode_buffer(
2142 	struct xfs_mount	*mp,
2143 	xlog_recover_item_t	*item,
2144 	struct xfs_buf		*bp,
2145 	xfs_buf_log_format_t	*buf_f)
2146 {
2147 	int			i;
2148 	int			item_index = 0;
2149 	int			bit = 0;
2150 	int			nbits = 0;
2151 	int			reg_buf_offset = 0;
2152 	int			reg_buf_bytes = 0;
2153 	int			next_unlinked_offset;
2154 	int			inodes_per_buf;
2155 	xfs_agino_t		*logged_nextp;
2156 	xfs_agino_t		*buffer_nextp;
2157 
2158 	trace_xfs_log_recover_buf_inode_buf(mp->m_log, buf_f);
2159 
2160 	/*
2161 	 * Post recovery validation only works properly on CRC enabled
2162 	 * filesystems.
2163 	 */
2164 	if (xfs_sb_version_hascrc(&mp->m_sb))
2165 		bp->b_ops = &xfs_inode_buf_ops;
2166 
2167 	inodes_per_buf = BBTOB(bp->b_io_length) >> mp->m_sb.sb_inodelog;
2168 	for (i = 0; i < inodes_per_buf; i++) {
2169 		next_unlinked_offset = (i * mp->m_sb.sb_inodesize) +
2170 			offsetof(xfs_dinode_t, di_next_unlinked);
2171 
2172 		while (next_unlinked_offset >=
2173 		       (reg_buf_offset + reg_buf_bytes)) {
2174 			/*
2175 			 * The next di_next_unlinked field is beyond
2176 			 * the current logged region.  Find the next
2177 			 * logged region that contains or is beyond
2178 			 * the current di_next_unlinked field.
2179 			 */
2180 			bit += nbits;
2181 			bit = xfs_next_bit(buf_f->blf_data_map,
2182 					   buf_f->blf_map_size, bit);
2183 
2184 			/*
2185 			 * If there are no more logged regions in the
2186 			 * buffer, then we're done.
2187 			 */
2188 			if (bit == -1)
2189 				return 0;
2190 
2191 			nbits = xfs_contig_bits(buf_f->blf_data_map,
2192 						buf_f->blf_map_size, bit);
2193 			ASSERT(nbits > 0);
2194 			reg_buf_offset = bit << XFS_BLF_SHIFT;
2195 			reg_buf_bytes = nbits << XFS_BLF_SHIFT;
2196 			item_index++;
2197 		}
2198 
2199 		/*
2200 		 * If the current logged region starts after the current
2201 		 * di_next_unlinked field, then move on to the next
2202 		 * di_next_unlinked field.
2203 		 */
2204 		if (next_unlinked_offset < reg_buf_offset)
2205 			continue;
2206 
2207 		ASSERT(item->ri_buf[item_index].i_addr != NULL);
2208 		ASSERT((item->ri_buf[item_index].i_len % XFS_BLF_CHUNK) == 0);
2209 		ASSERT((reg_buf_offset + reg_buf_bytes) <=
2210 							BBTOB(bp->b_io_length));
2211 
2212 		/*
2213 		 * The current logged region contains a copy of the
2214 		 * current di_next_unlinked field.  Extract its value
2215 		 * and copy it to the buffer copy.
2216 		 */
2217 		logged_nextp = item->ri_buf[item_index].i_addr +
2218 				next_unlinked_offset - reg_buf_offset;
2219 		if (unlikely(*logged_nextp == 0)) {
2220 			xfs_alert(mp,
2221 		"Bad inode buffer log record (ptr = 0x%p, bp = 0x%p). "
2222 		"Trying to replay bad (0) inode di_next_unlinked field.",
2223 				item, bp);
2224 			XFS_ERROR_REPORT("xlog_recover_do_inode_buf",
2225 					 XFS_ERRLEVEL_LOW, mp);
2226 			return -EFSCORRUPTED;
2227 		}
2228 
2229 		buffer_nextp = xfs_buf_offset(bp, next_unlinked_offset);
2230 		*buffer_nextp = *logged_nextp;
2231 
2232 		/*
2233 		 * If necessary, recalculate the CRC in the on-disk inode. We
2234 		 * have to leave the inode in a consistent state for whoever
2235 		 * reads it next....
2236 		 */
2237 		xfs_dinode_calc_crc(mp,
2238 				xfs_buf_offset(bp, i * mp->m_sb.sb_inodesize));
2239 
2240 	}
2241 
2242 	return 0;
2243 }
2244 
2245 /*
2246  * V5 filesystems know the age of the buffer on disk being recovered. We can
2247  * have newer objects on disk than we are replaying, and so for these cases we
2248  * don't want to replay the current change as that will make the buffer contents
2249  * temporarily invalid on disk.
2250  *
2251  * The magic number might not match the buffer type we are going to recover
2252  * (e.g. reallocated blocks), so we ignore the xfs_buf_log_format flags.  Hence
2253  * extract the LSN of the existing object in the buffer based on it's current
2254  * magic number.  If we don't recognise the magic number in the buffer, then
2255  * return a LSN of -1 so that the caller knows it was an unrecognised block and
2256  * so can recover the buffer.
2257  *
2258  * Note: we cannot rely solely on magic number matches to determine that the
2259  * buffer has a valid LSN - we also need to verify that it belongs to this
2260  * filesystem, so we need to extract the object's LSN and compare it to that
2261  * which we read from the superblock. If the UUIDs don't match, then we've got a
2262  * stale metadata block from an old filesystem instance that we need to recover
2263  * over the top of.
2264  */
2265 static xfs_lsn_t
2266 xlog_recover_get_buf_lsn(
2267 	struct xfs_mount	*mp,
2268 	struct xfs_buf		*bp)
2269 {
2270 	uint32_t		magic32;
2271 	uint16_t		magic16;
2272 	uint16_t		magicda;
2273 	void			*blk = bp->b_addr;
2274 	uuid_t			*uuid;
2275 	xfs_lsn_t		lsn = -1;
2276 
2277 	/* v4 filesystems always recover immediately */
2278 	if (!xfs_sb_version_hascrc(&mp->m_sb))
2279 		goto recover_immediately;
2280 
2281 	magic32 = be32_to_cpu(*(__be32 *)blk);
2282 	switch (magic32) {
2283 	case XFS_ABTB_CRC_MAGIC:
2284 	case XFS_ABTC_CRC_MAGIC:
2285 	case XFS_ABTB_MAGIC:
2286 	case XFS_ABTC_MAGIC:
2287 	case XFS_RMAP_CRC_MAGIC:
2288 	case XFS_REFC_CRC_MAGIC:
2289 	case XFS_IBT_CRC_MAGIC:
2290 	case XFS_IBT_MAGIC: {
2291 		struct xfs_btree_block *btb = blk;
2292 
2293 		lsn = be64_to_cpu(btb->bb_u.s.bb_lsn);
2294 		uuid = &btb->bb_u.s.bb_uuid;
2295 		break;
2296 	}
2297 	case XFS_BMAP_CRC_MAGIC:
2298 	case XFS_BMAP_MAGIC: {
2299 		struct xfs_btree_block *btb = blk;
2300 
2301 		lsn = be64_to_cpu(btb->bb_u.l.bb_lsn);
2302 		uuid = &btb->bb_u.l.bb_uuid;
2303 		break;
2304 	}
2305 	case XFS_AGF_MAGIC:
2306 		lsn = be64_to_cpu(((struct xfs_agf *)blk)->agf_lsn);
2307 		uuid = &((struct xfs_agf *)blk)->agf_uuid;
2308 		break;
2309 	case XFS_AGFL_MAGIC:
2310 		lsn = be64_to_cpu(((struct xfs_agfl *)blk)->agfl_lsn);
2311 		uuid = &((struct xfs_agfl *)blk)->agfl_uuid;
2312 		break;
2313 	case XFS_AGI_MAGIC:
2314 		lsn = be64_to_cpu(((struct xfs_agi *)blk)->agi_lsn);
2315 		uuid = &((struct xfs_agi *)blk)->agi_uuid;
2316 		break;
2317 	case XFS_SYMLINK_MAGIC:
2318 		lsn = be64_to_cpu(((struct xfs_dsymlink_hdr *)blk)->sl_lsn);
2319 		uuid = &((struct xfs_dsymlink_hdr *)blk)->sl_uuid;
2320 		break;
2321 	case XFS_DIR3_BLOCK_MAGIC:
2322 	case XFS_DIR3_DATA_MAGIC:
2323 	case XFS_DIR3_FREE_MAGIC:
2324 		lsn = be64_to_cpu(((struct xfs_dir3_blk_hdr *)blk)->lsn);
2325 		uuid = &((struct xfs_dir3_blk_hdr *)blk)->uuid;
2326 		break;
2327 	case XFS_ATTR3_RMT_MAGIC:
2328 		/*
2329 		 * Remote attr blocks are written synchronously, rather than
2330 		 * being logged. That means they do not contain a valid LSN
2331 		 * (i.e. transactionally ordered) in them, and hence any time we
2332 		 * see a buffer to replay over the top of a remote attribute
2333 		 * block we should simply do so.
2334 		 */
2335 		goto recover_immediately;
2336 	case XFS_SB_MAGIC:
2337 		/*
2338 		 * superblock uuids are magic. We may or may not have a
2339 		 * sb_meta_uuid on disk, but it will be set in the in-core
2340 		 * superblock. We set the uuid pointer for verification
2341 		 * according to the superblock feature mask to ensure we check
2342 		 * the relevant UUID in the superblock.
2343 		 */
2344 		lsn = be64_to_cpu(((struct xfs_dsb *)blk)->sb_lsn);
2345 		if (xfs_sb_version_hasmetauuid(&mp->m_sb))
2346 			uuid = &((struct xfs_dsb *)blk)->sb_meta_uuid;
2347 		else
2348 			uuid = &((struct xfs_dsb *)blk)->sb_uuid;
2349 		break;
2350 	default:
2351 		break;
2352 	}
2353 
2354 	if (lsn != (xfs_lsn_t)-1) {
2355 		if (!uuid_equal(&mp->m_sb.sb_meta_uuid, uuid))
2356 			goto recover_immediately;
2357 		return lsn;
2358 	}
2359 
2360 	magicda = be16_to_cpu(((struct xfs_da_blkinfo *)blk)->magic);
2361 	switch (magicda) {
2362 	case XFS_DIR3_LEAF1_MAGIC:
2363 	case XFS_DIR3_LEAFN_MAGIC:
2364 	case XFS_DA3_NODE_MAGIC:
2365 		lsn = be64_to_cpu(((struct xfs_da3_blkinfo *)blk)->lsn);
2366 		uuid = &((struct xfs_da3_blkinfo *)blk)->uuid;
2367 		break;
2368 	default:
2369 		break;
2370 	}
2371 
2372 	if (lsn != (xfs_lsn_t)-1) {
2373 		if (!uuid_equal(&mp->m_sb.sb_uuid, uuid))
2374 			goto recover_immediately;
2375 		return lsn;
2376 	}
2377 
2378 	/*
2379 	 * We do individual object checks on dquot and inode buffers as they
2380 	 * have their own individual LSN records. Also, we could have a stale
2381 	 * buffer here, so we have to at least recognise these buffer types.
2382 	 *
2383 	 * A notd complexity here is inode unlinked list processing - it logs
2384 	 * the inode directly in the buffer, but we don't know which inodes have
2385 	 * been modified, and there is no global buffer LSN. Hence we need to
2386 	 * recover all inode buffer types immediately. This problem will be
2387 	 * fixed by logical logging of the unlinked list modifications.
2388 	 */
2389 	magic16 = be16_to_cpu(*(__be16 *)blk);
2390 	switch (magic16) {
2391 	case XFS_DQUOT_MAGIC:
2392 	case XFS_DINODE_MAGIC:
2393 		goto recover_immediately;
2394 	default:
2395 		break;
2396 	}
2397 
2398 	/* unknown buffer contents, recover immediately */
2399 
2400 recover_immediately:
2401 	return (xfs_lsn_t)-1;
2402 
2403 }
2404 
2405 /*
2406  * Validate the recovered buffer is of the correct type and attach the
2407  * appropriate buffer operations to them for writeback. Magic numbers are in a
2408  * few places:
2409  *	the first 16 bits of the buffer (inode buffer, dquot buffer),
2410  *	the first 32 bits of the buffer (most blocks),
2411  *	inside a struct xfs_da_blkinfo at the start of the buffer.
2412  */
2413 static void
2414 xlog_recover_validate_buf_type(
2415 	struct xfs_mount	*mp,
2416 	struct xfs_buf		*bp,
2417 	xfs_buf_log_format_t	*buf_f,
2418 	xfs_lsn_t		current_lsn)
2419 {
2420 	struct xfs_da_blkinfo	*info = bp->b_addr;
2421 	uint32_t		magic32;
2422 	uint16_t		magic16;
2423 	uint16_t		magicda;
2424 	char			*warnmsg = NULL;
2425 
2426 	/*
2427 	 * We can only do post recovery validation on items on CRC enabled
2428 	 * fielsystems as we need to know when the buffer was written to be able
2429 	 * to determine if we should have replayed the item. If we replay old
2430 	 * metadata over a newer buffer, then it will enter a temporarily
2431 	 * inconsistent state resulting in verification failures. Hence for now
2432 	 * just avoid the verification stage for non-crc filesystems
2433 	 */
2434 	if (!xfs_sb_version_hascrc(&mp->m_sb))
2435 		return;
2436 
2437 	magic32 = be32_to_cpu(*(__be32 *)bp->b_addr);
2438 	magic16 = be16_to_cpu(*(__be16*)bp->b_addr);
2439 	magicda = be16_to_cpu(info->magic);
2440 	switch (xfs_blft_from_flags(buf_f)) {
2441 	case XFS_BLFT_BTREE_BUF:
2442 		switch (magic32) {
2443 		case XFS_ABTB_CRC_MAGIC:
2444 		case XFS_ABTC_CRC_MAGIC:
2445 		case XFS_ABTB_MAGIC:
2446 		case XFS_ABTC_MAGIC:
2447 			bp->b_ops = &xfs_allocbt_buf_ops;
2448 			break;
2449 		case XFS_IBT_CRC_MAGIC:
2450 		case XFS_FIBT_CRC_MAGIC:
2451 		case XFS_IBT_MAGIC:
2452 		case XFS_FIBT_MAGIC:
2453 			bp->b_ops = &xfs_inobt_buf_ops;
2454 			break;
2455 		case XFS_BMAP_CRC_MAGIC:
2456 		case XFS_BMAP_MAGIC:
2457 			bp->b_ops = &xfs_bmbt_buf_ops;
2458 			break;
2459 		case XFS_RMAP_CRC_MAGIC:
2460 			bp->b_ops = &xfs_rmapbt_buf_ops;
2461 			break;
2462 		case XFS_REFC_CRC_MAGIC:
2463 			bp->b_ops = &xfs_refcountbt_buf_ops;
2464 			break;
2465 		default:
2466 			warnmsg = "Bad btree block magic!";
2467 			break;
2468 		}
2469 		break;
2470 	case XFS_BLFT_AGF_BUF:
2471 		if (magic32 != XFS_AGF_MAGIC) {
2472 			warnmsg = "Bad AGF block magic!";
2473 			break;
2474 		}
2475 		bp->b_ops = &xfs_agf_buf_ops;
2476 		break;
2477 	case XFS_BLFT_AGFL_BUF:
2478 		if (magic32 != XFS_AGFL_MAGIC) {
2479 			warnmsg = "Bad AGFL block magic!";
2480 			break;
2481 		}
2482 		bp->b_ops = &xfs_agfl_buf_ops;
2483 		break;
2484 	case XFS_BLFT_AGI_BUF:
2485 		if (magic32 != XFS_AGI_MAGIC) {
2486 			warnmsg = "Bad AGI block magic!";
2487 			break;
2488 		}
2489 		bp->b_ops = &xfs_agi_buf_ops;
2490 		break;
2491 	case XFS_BLFT_UDQUOT_BUF:
2492 	case XFS_BLFT_PDQUOT_BUF:
2493 	case XFS_BLFT_GDQUOT_BUF:
2494 #ifdef CONFIG_XFS_QUOTA
2495 		if (magic16 != XFS_DQUOT_MAGIC) {
2496 			warnmsg = "Bad DQUOT block magic!";
2497 			break;
2498 		}
2499 		bp->b_ops = &xfs_dquot_buf_ops;
2500 #else
2501 		xfs_alert(mp,
2502 	"Trying to recover dquots without QUOTA support built in!");
2503 		ASSERT(0);
2504 #endif
2505 		break;
2506 	case XFS_BLFT_DINO_BUF:
2507 		if (magic16 != XFS_DINODE_MAGIC) {
2508 			warnmsg = "Bad INODE block magic!";
2509 			break;
2510 		}
2511 		bp->b_ops = &xfs_inode_buf_ops;
2512 		break;
2513 	case XFS_BLFT_SYMLINK_BUF:
2514 		if (magic32 != XFS_SYMLINK_MAGIC) {
2515 			warnmsg = "Bad symlink block magic!";
2516 			break;
2517 		}
2518 		bp->b_ops = &xfs_symlink_buf_ops;
2519 		break;
2520 	case XFS_BLFT_DIR_BLOCK_BUF:
2521 		if (magic32 != XFS_DIR2_BLOCK_MAGIC &&
2522 		    magic32 != XFS_DIR3_BLOCK_MAGIC) {
2523 			warnmsg = "Bad dir block magic!";
2524 			break;
2525 		}
2526 		bp->b_ops = &xfs_dir3_block_buf_ops;
2527 		break;
2528 	case XFS_BLFT_DIR_DATA_BUF:
2529 		if (magic32 != XFS_DIR2_DATA_MAGIC &&
2530 		    magic32 != XFS_DIR3_DATA_MAGIC) {
2531 			warnmsg = "Bad dir data magic!";
2532 			break;
2533 		}
2534 		bp->b_ops = &xfs_dir3_data_buf_ops;
2535 		break;
2536 	case XFS_BLFT_DIR_FREE_BUF:
2537 		if (magic32 != XFS_DIR2_FREE_MAGIC &&
2538 		    magic32 != XFS_DIR3_FREE_MAGIC) {
2539 			warnmsg = "Bad dir3 free magic!";
2540 			break;
2541 		}
2542 		bp->b_ops = &xfs_dir3_free_buf_ops;
2543 		break;
2544 	case XFS_BLFT_DIR_LEAF1_BUF:
2545 		if (magicda != XFS_DIR2_LEAF1_MAGIC &&
2546 		    magicda != XFS_DIR3_LEAF1_MAGIC) {
2547 			warnmsg = "Bad dir leaf1 magic!";
2548 			break;
2549 		}
2550 		bp->b_ops = &xfs_dir3_leaf1_buf_ops;
2551 		break;
2552 	case XFS_BLFT_DIR_LEAFN_BUF:
2553 		if (magicda != XFS_DIR2_LEAFN_MAGIC &&
2554 		    magicda != XFS_DIR3_LEAFN_MAGIC) {
2555 			warnmsg = "Bad dir leafn magic!";
2556 			break;
2557 		}
2558 		bp->b_ops = &xfs_dir3_leafn_buf_ops;
2559 		break;
2560 	case XFS_BLFT_DA_NODE_BUF:
2561 		if (magicda != XFS_DA_NODE_MAGIC &&
2562 		    magicda != XFS_DA3_NODE_MAGIC) {
2563 			warnmsg = "Bad da node magic!";
2564 			break;
2565 		}
2566 		bp->b_ops = &xfs_da3_node_buf_ops;
2567 		break;
2568 	case XFS_BLFT_ATTR_LEAF_BUF:
2569 		if (magicda != XFS_ATTR_LEAF_MAGIC &&
2570 		    magicda != XFS_ATTR3_LEAF_MAGIC) {
2571 			warnmsg = "Bad attr leaf magic!";
2572 			break;
2573 		}
2574 		bp->b_ops = &xfs_attr3_leaf_buf_ops;
2575 		break;
2576 	case XFS_BLFT_ATTR_RMT_BUF:
2577 		if (magic32 != XFS_ATTR3_RMT_MAGIC) {
2578 			warnmsg = "Bad attr remote magic!";
2579 			break;
2580 		}
2581 		bp->b_ops = &xfs_attr3_rmt_buf_ops;
2582 		break;
2583 	case XFS_BLFT_SB_BUF:
2584 		if (magic32 != XFS_SB_MAGIC) {
2585 			warnmsg = "Bad SB block magic!";
2586 			break;
2587 		}
2588 		bp->b_ops = &xfs_sb_buf_ops;
2589 		break;
2590 #ifdef CONFIG_XFS_RT
2591 	case XFS_BLFT_RTBITMAP_BUF:
2592 	case XFS_BLFT_RTSUMMARY_BUF:
2593 		/* no magic numbers for verification of RT buffers */
2594 		bp->b_ops = &xfs_rtbuf_ops;
2595 		break;
2596 #endif /* CONFIG_XFS_RT */
2597 	default:
2598 		xfs_warn(mp, "Unknown buffer type %d!",
2599 			 xfs_blft_from_flags(buf_f));
2600 		break;
2601 	}
2602 
2603 	/*
2604 	 * Nothing else to do in the case of a NULL current LSN as this means
2605 	 * the buffer is more recent than the change in the log and will be
2606 	 * skipped.
2607 	 */
2608 	if (current_lsn == NULLCOMMITLSN)
2609 		return;
2610 
2611 	if (warnmsg) {
2612 		xfs_warn(mp, warnmsg);
2613 		ASSERT(0);
2614 	}
2615 
2616 	/*
2617 	 * We must update the metadata LSN of the buffer as it is written out to
2618 	 * ensure that older transactions never replay over this one and corrupt
2619 	 * the buffer. This can occur if log recovery is interrupted at some
2620 	 * point after the current transaction completes, at which point a
2621 	 * subsequent mount starts recovery from the beginning.
2622 	 *
2623 	 * Write verifiers update the metadata LSN from log items attached to
2624 	 * the buffer. Therefore, initialize a bli purely to carry the LSN to
2625 	 * the verifier. We'll clean it up in our ->iodone() callback.
2626 	 */
2627 	if (bp->b_ops) {
2628 		struct xfs_buf_log_item	*bip;
2629 
2630 		ASSERT(!bp->b_iodone || bp->b_iodone == xlog_recover_iodone);
2631 		bp->b_iodone = xlog_recover_iodone;
2632 		xfs_buf_item_init(bp, mp);
2633 		bip = bp->b_fspriv;
2634 		bip->bli_item.li_lsn = current_lsn;
2635 	}
2636 }
2637 
2638 /*
2639  * Perform a 'normal' buffer recovery.  Each logged region of the
2640  * buffer should be copied over the corresponding region in the
2641  * given buffer.  The bitmap in the buf log format structure indicates
2642  * where to place the logged data.
2643  */
2644 STATIC void
2645 xlog_recover_do_reg_buffer(
2646 	struct xfs_mount	*mp,
2647 	xlog_recover_item_t	*item,
2648 	struct xfs_buf		*bp,
2649 	xfs_buf_log_format_t	*buf_f,
2650 	xfs_lsn_t		current_lsn)
2651 {
2652 	int			i;
2653 	int			bit;
2654 	int			nbits;
2655 	int                     error;
2656 
2657 	trace_xfs_log_recover_buf_reg_buf(mp->m_log, buf_f);
2658 
2659 	bit = 0;
2660 	i = 1;  /* 0 is the buf format structure */
2661 	while (1) {
2662 		bit = xfs_next_bit(buf_f->blf_data_map,
2663 				   buf_f->blf_map_size, bit);
2664 		if (bit == -1)
2665 			break;
2666 		nbits = xfs_contig_bits(buf_f->blf_data_map,
2667 					buf_f->blf_map_size, bit);
2668 		ASSERT(nbits > 0);
2669 		ASSERT(item->ri_buf[i].i_addr != NULL);
2670 		ASSERT(item->ri_buf[i].i_len % XFS_BLF_CHUNK == 0);
2671 		ASSERT(BBTOB(bp->b_io_length) >=
2672 		       ((uint)bit << XFS_BLF_SHIFT) + (nbits << XFS_BLF_SHIFT));
2673 
2674 		/*
2675 		 * The dirty regions logged in the buffer, even though
2676 		 * contiguous, may span multiple chunks. This is because the
2677 		 * dirty region may span a physical page boundary in a buffer
2678 		 * and hence be split into two separate vectors for writing into
2679 		 * the log. Hence we need to trim nbits back to the length of
2680 		 * the current region being copied out of the log.
2681 		 */
2682 		if (item->ri_buf[i].i_len < (nbits << XFS_BLF_SHIFT))
2683 			nbits = item->ri_buf[i].i_len >> XFS_BLF_SHIFT;
2684 
2685 		/*
2686 		 * Do a sanity check if this is a dquot buffer. Just checking
2687 		 * the first dquot in the buffer should do. XXXThis is
2688 		 * probably a good thing to do for other buf types also.
2689 		 */
2690 		error = 0;
2691 		if (buf_f->blf_flags &
2692 		   (XFS_BLF_UDQUOT_BUF|XFS_BLF_PDQUOT_BUF|XFS_BLF_GDQUOT_BUF)) {
2693 			if (item->ri_buf[i].i_addr == NULL) {
2694 				xfs_alert(mp,
2695 					"XFS: NULL dquot in %s.", __func__);
2696 				goto next;
2697 			}
2698 			if (item->ri_buf[i].i_len < sizeof(xfs_disk_dquot_t)) {
2699 				xfs_alert(mp,
2700 					"XFS: dquot too small (%d) in %s.",
2701 					item->ri_buf[i].i_len, __func__);
2702 				goto next;
2703 			}
2704 			error = xfs_dqcheck(mp, item->ri_buf[i].i_addr,
2705 					       -1, 0, XFS_QMOPT_DOWARN,
2706 					       "dquot_buf_recover");
2707 			if (error)
2708 				goto next;
2709 		}
2710 
2711 		memcpy(xfs_buf_offset(bp,
2712 			(uint)bit << XFS_BLF_SHIFT),	/* dest */
2713 			item->ri_buf[i].i_addr,		/* source */
2714 			nbits<<XFS_BLF_SHIFT);		/* length */
2715  next:
2716 		i++;
2717 		bit += nbits;
2718 	}
2719 
2720 	/* Shouldn't be any more regions */
2721 	ASSERT(i == item->ri_total);
2722 
2723 	xlog_recover_validate_buf_type(mp, bp, buf_f, current_lsn);
2724 }
2725 
2726 /*
2727  * Perform a dquot buffer recovery.
2728  * Simple algorithm: if we have found a QUOTAOFF log item of the same type
2729  * (ie. USR or GRP), then just toss this buffer away; don't recover it.
2730  * Else, treat it as a regular buffer and do recovery.
2731  *
2732  * Return false if the buffer was tossed and true if we recovered the buffer to
2733  * indicate to the caller if the buffer needs writing.
2734  */
2735 STATIC bool
2736 xlog_recover_do_dquot_buffer(
2737 	struct xfs_mount		*mp,
2738 	struct xlog			*log,
2739 	struct xlog_recover_item	*item,
2740 	struct xfs_buf			*bp,
2741 	struct xfs_buf_log_format	*buf_f)
2742 {
2743 	uint			type;
2744 
2745 	trace_xfs_log_recover_buf_dquot_buf(log, buf_f);
2746 
2747 	/*
2748 	 * Filesystems are required to send in quota flags at mount time.
2749 	 */
2750 	if (!mp->m_qflags)
2751 		return false;
2752 
2753 	type = 0;
2754 	if (buf_f->blf_flags & XFS_BLF_UDQUOT_BUF)
2755 		type |= XFS_DQ_USER;
2756 	if (buf_f->blf_flags & XFS_BLF_PDQUOT_BUF)
2757 		type |= XFS_DQ_PROJ;
2758 	if (buf_f->blf_flags & XFS_BLF_GDQUOT_BUF)
2759 		type |= XFS_DQ_GROUP;
2760 	/*
2761 	 * This type of quotas was turned off, so ignore this buffer
2762 	 */
2763 	if (log->l_quotaoffs_flag & type)
2764 		return false;
2765 
2766 	xlog_recover_do_reg_buffer(mp, item, bp, buf_f, NULLCOMMITLSN);
2767 	return true;
2768 }
2769 
2770 /*
2771  * This routine replays a modification made to a buffer at runtime.
2772  * There are actually two types of buffer, regular and inode, which
2773  * are handled differently.  Inode buffers are handled differently
2774  * in that we only recover a specific set of data from them, namely
2775  * the inode di_next_unlinked fields.  This is because all other inode
2776  * data is actually logged via inode records and any data we replay
2777  * here which overlaps that may be stale.
2778  *
2779  * When meta-data buffers are freed at run time we log a buffer item
2780  * with the XFS_BLF_CANCEL bit set to indicate that previous copies
2781  * of the buffer in the log should not be replayed at recovery time.
2782  * This is so that if the blocks covered by the buffer are reused for
2783  * file data before we crash we don't end up replaying old, freed
2784  * meta-data into a user's file.
2785  *
2786  * To handle the cancellation of buffer log items, we make two passes
2787  * over the log during recovery.  During the first we build a table of
2788  * those buffers which have been cancelled, and during the second we
2789  * only replay those buffers which do not have corresponding cancel
2790  * records in the table.  See xlog_recover_buffer_pass[1,2] above
2791  * for more details on the implementation of the table of cancel records.
2792  */
2793 STATIC int
2794 xlog_recover_buffer_pass2(
2795 	struct xlog			*log,
2796 	struct list_head		*buffer_list,
2797 	struct xlog_recover_item	*item,
2798 	xfs_lsn_t			current_lsn)
2799 {
2800 	xfs_buf_log_format_t	*buf_f = item->ri_buf[0].i_addr;
2801 	xfs_mount_t		*mp = log->l_mp;
2802 	xfs_buf_t		*bp;
2803 	int			error;
2804 	uint			buf_flags;
2805 	xfs_lsn_t		lsn;
2806 
2807 	/*
2808 	 * In this pass we only want to recover all the buffers which have
2809 	 * not been cancelled and are not cancellation buffers themselves.
2810 	 */
2811 	if (xlog_check_buffer_cancelled(log, buf_f->blf_blkno,
2812 			buf_f->blf_len, buf_f->blf_flags)) {
2813 		trace_xfs_log_recover_buf_cancel(log, buf_f);
2814 		return 0;
2815 	}
2816 
2817 	trace_xfs_log_recover_buf_recover(log, buf_f);
2818 
2819 	buf_flags = 0;
2820 	if (buf_f->blf_flags & XFS_BLF_INODE_BUF)
2821 		buf_flags |= XBF_UNMAPPED;
2822 
2823 	bp = xfs_buf_read(mp->m_ddev_targp, buf_f->blf_blkno, buf_f->blf_len,
2824 			  buf_flags, NULL);
2825 	if (!bp)
2826 		return -ENOMEM;
2827 	error = bp->b_error;
2828 	if (error) {
2829 		xfs_buf_ioerror_alert(bp, "xlog_recover_do..(read#1)");
2830 		goto out_release;
2831 	}
2832 
2833 	/*
2834 	 * Recover the buffer only if we get an LSN from it and it's less than
2835 	 * the lsn of the transaction we are replaying.
2836 	 *
2837 	 * Note that we have to be extremely careful of readahead here.
2838 	 * Readahead does not attach verfiers to the buffers so if we don't
2839 	 * actually do any replay after readahead because of the LSN we found
2840 	 * in the buffer if more recent than that current transaction then we
2841 	 * need to attach the verifier directly. Failure to do so can lead to
2842 	 * future recovery actions (e.g. EFI and unlinked list recovery) can
2843 	 * operate on the buffers and they won't get the verifier attached. This
2844 	 * can lead to blocks on disk having the correct content but a stale
2845 	 * CRC.
2846 	 *
2847 	 * It is safe to assume these clean buffers are currently up to date.
2848 	 * If the buffer is dirtied by a later transaction being replayed, then
2849 	 * the verifier will be reset to match whatever recover turns that
2850 	 * buffer into.
2851 	 */
2852 	lsn = xlog_recover_get_buf_lsn(mp, bp);
2853 	if (lsn && lsn != -1 && XFS_LSN_CMP(lsn, current_lsn) >= 0) {
2854 		trace_xfs_log_recover_buf_skip(log, buf_f);
2855 		xlog_recover_validate_buf_type(mp, bp, buf_f, NULLCOMMITLSN);
2856 		goto out_release;
2857 	}
2858 
2859 	if (buf_f->blf_flags & XFS_BLF_INODE_BUF) {
2860 		error = xlog_recover_do_inode_buffer(mp, item, bp, buf_f);
2861 		if (error)
2862 			goto out_release;
2863 	} else if (buf_f->blf_flags &
2864 		  (XFS_BLF_UDQUOT_BUF|XFS_BLF_PDQUOT_BUF|XFS_BLF_GDQUOT_BUF)) {
2865 		bool	dirty;
2866 
2867 		dirty = xlog_recover_do_dquot_buffer(mp, log, item, bp, buf_f);
2868 		if (!dirty)
2869 			goto out_release;
2870 	} else {
2871 		xlog_recover_do_reg_buffer(mp, item, bp, buf_f, current_lsn);
2872 	}
2873 
2874 	/*
2875 	 * Perform delayed write on the buffer.  Asynchronous writes will be
2876 	 * slower when taking into account all the buffers to be flushed.
2877 	 *
2878 	 * Also make sure that only inode buffers with good sizes stay in
2879 	 * the buffer cache.  The kernel moves inodes in buffers of 1 block
2880 	 * or mp->m_inode_cluster_size bytes, whichever is bigger.  The inode
2881 	 * buffers in the log can be a different size if the log was generated
2882 	 * by an older kernel using unclustered inode buffers or a newer kernel
2883 	 * running with a different inode cluster size.  Regardless, if the
2884 	 * the inode buffer size isn't MAX(blocksize, mp->m_inode_cluster_size)
2885 	 * for *our* value of mp->m_inode_cluster_size, then we need to keep
2886 	 * the buffer out of the buffer cache so that the buffer won't
2887 	 * overlap with future reads of those inodes.
2888 	 */
2889 	if (XFS_DINODE_MAGIC ==
2890 	    be16_to_cpu(*((__be16 *)xfs_buf_offset(bp, 0))) &&
2891 	    (BBTOB(bp->b_io_length) != MAX(log->l_mp->m_sb.sb_blocksize,
2892 			(uint32_t)log->l_mp->m_inode_cluster_size))) {
2893 		xfs_buf_stale(bp);
2894 		error = xfs_bwrite(bp);
2895 	} else {
2896 		ASSERT(bp->b_target->bt_mount == mp);
2897 		bp->b_iodone = xlog_recover_iodone;
2898 		xfs_buf_delwri_queue(bp, buffer_list);
2899 	}
2900 
2901 out_release:
2902 	xfs_buf_relse(bp);
2903 	return error;
2904 }
2905 
2906 /*
2907  * Inode fork owner changes
2908  *
2909  * If we have been told that we have to reparent the inode fork, it's because an
2910  * extent swap operation on a CRC enabled filesystem has been done and we are
2911  * replaying it. We need to walk the BMBT of the appropriate fork and change the
2912  * owners of it.
2913  *
2914  * The complexity here is that we don't have an inode context to work with, so
2915  * after we've replayed the inode we need to instantiate one.  This is where the
2916  * fun begins.
2917  *
2918  * We are in the middle of log recovery, so we can't run transactions. That
2919  * means we cannot use cache coherent inode instantiation via xfs_iget(), as
2920  * that will result in the corresponding iput() running the inode through
2921  * xfs_inactive(). If we've just replayed an inode core that changes the link
2922  * count to zero (i.e. it's been unlinked), then xfs_inactive() will run
2923  * transactions (bad!).
2924  *
2925  * So, to avoid this, we instantiate an inode directly from the inode core we've
2926  * just recovered. We have the buffer still locked, and all we really need to
2927  * instantiate is the inode core and the forks being modified. We can do this
2928  * manually, then run the inode btree owner change, and then tear down the
2929  * xfs_inode without having to run any transactions at all.
2930  *
2931  * Also, because we don't have a transaction context available here but need to
2932  * gather all the buffers we modify for writeback so we pass the buffer_list
2933  * instead for the operation to use.
2934  */
2935 
2936 STATIC int
2937 xfs_recover_inode_owner_change(
2938 	struct xfs_mount	*mp,
2939 	struct xfs_dinode	*dip,
2940 	struct xfs_inode_log_format *in_f,
2941 	struct list_head	*buffer_list)
2942 {
2943 	struct xfs_inode	*ip;
2944 	int			error;
2945 
2946 	ASSERT(in_f->ilf_fields & (XFS_ILOG_DOWNER|XFS_ILOG_AOWNER));
2947 
2948 	ip = xfs_inode_alloc(mp, in_f->ilf_ino);
2949 	if (!ip)
2950 		return -ENOMEM;
2951 
2952 	/* instantiate the inode */
2953 	xfs_inode_from_disk(ip, dip);
2954 	ASSERT(ip->i_d.di_version >= 3);
2955 
2956 	error = xfs_iformat_fork(ip, dip);
2957 	if (error)
2958 		goto out_free_ip;
2959 
2960 
2961 	if (in_f->ilf_fields & XFS_ILOG_DOWNER) {
2962 		ASSERT(in_f->ilf_fields & XFS_ILOG_DBROOT);
2963 		error = xfs_bmbt_change_owner(NULL, ip, XFS_DATA_FORK,
2964 					      ip->i_ino, buffer_list);
2965 		if (error)
2966 			goto out_free_ip;
2967 	}
2968 
2969 	if (in_f->ilf_fields & XFS_ILOG_AOWNER) {
2970 		ASSERT(in_f->ilf_fields & XFS_ILOG_ABROOT);
2971 		error = xfs_bmbt_change_owner(NULL, ip, XFS_ATTR_FORK,
2972 					      ip->i_ino, buffer_list);
2973 		if (error)
2974 			goto out_free_ip;
2975 	}
2976 
2977 out_free_ip:
2978 	xfs_inode_free(ip);
2979 	return error;
2980 }
2981 
2982 STATIC int
2983 xlog_recover_inode_pass2(
2984 	struct xlog			*log,
2985 	struct list_head		*buffer_list,
2986 	struct xlog_recover_item	*item,
2987 	xfs_lsn_t			current_lsn)
2988 {
2989 	struct xfs_inode_log_format	*in_f;
2990 	xfs_mount_t		*mp = log->l_mp;
2991 	xfs_buf_t		*bp;
2992 	xfs_dinode_t		*dip;
2993 	int			len;
2994 	char			*src;
2995 	char			*dest;
2996 	int			error;
2997 	int			attr_index;
2998 	uint			fields;
2999 	struct xfs_log_dinode	*ldip;
3000 	uint			isize;
3001 	int			need_free = 0;
3002 
3003 	if (item->ri_buf[0].i_len == sizeof(struct xfs_inode_log_format)) {
3004 		in_f = item->ri_buf[0].i_addr;
3005 	} else {
3006 		in_f = kmem_alloc(sizeof(struct xfs_inode_log_format), KM_SLEEP);
3007 		need_free = 1;
3008 		error = xfs_inode_item_format_convert(&item->ri_buf[0], in_f);
3009 		if (error)
3010 			goto error;
3011 	}
3012 
3013 	/*
3014 	 * Inode buffers can be freed, look out for it,
3015 	 * and do not replay the inode.
3016 	 */
3017 	if (xlog_check_buffer_cancelled(log, in_f->ilf_blkno,
3018 					in_f->ilf_len, 0)) {
3019 		error = 0;
3020 		trace_xfs_log_recover_inode_cancel(log, in_f);
3021 		goto error;
3022 	}
3023 	trace_xfs_log_recover_inode_recover(log, in_f);
3024 
3025 	bp = xfs_buf_read(mp->m_ddev_targp, in_f->ilf_blkno, in_f->ilf_len, 0,
3026 			  &xfs_inode_buf_ops);
3027 	if (!bp) {
3028 		error = -ENOMEM;
3029 		goto error;
3030 	}
3031 	error = bp->b_error;
3032 	if (error) {
3033 		xfs_buf_ioerror_alert(bp, "xlog_recover_do..(read#2)");
3034 		goto out_release;
3035 	}
3036 	ASSERT(in_f->ilf_fields & XFS_ILOG_CORE);
3037 	dip = xfs_buf_offset(bp, in_f->ilf_boffset);
3038 
3039 	/*
3040 	 * Make sure the place we're flushing out to really looks
3041 	 * like an inode!
3042 	 */
3043 	if (unlikely(dip->di_magic != cpu_to_be16(XFS_DINODE_MAGIC))) {
3044 		xfs_alert(mp,
3045 	"%s: Bad inode magic number, dip = 0x%p, dino bp = 0x%p, ino = %Ld",
3046 			__func__, dip, bp, in_f->ilf_ino);
3047 		XFS_ERROR_REPORT("xlog_recover_inode_pass2(1)",
3048 				 XFS_ERRLEVEL_LOW, mp);
3049 		error = -EFSCORRUPTED;
3050 		goto out_release;
3051 	}
3052 	ldip = item->ri_buf[1].i_addr;
3053 	if (unlikely(ldip->di_magic != XFS_DINODE_MAGIC)) {
3054 		xfs_alert(mp,
3055 			"%s: Bad inode log record, rec ptr 0x%p, ino %Ld",
3056 			__func__, item, in_f->ilf_ino);
3057 		XFS_ERROR_REPORT("xlog_recover_inode_pass2(2)",
3058 				 XFS_ERRLEVEL_LOW, mp);
3059 		error = -EFSCORRUPTED;
3060 		goto out_release;
3061 	}
3062 
3063 	/*
3064 	 * If the inode has an LSN in it, recover the inode only if it's less
3065 	 * than the lsn of the transaction we are replaying. Note: we still
3066 	 * need to replay an owner change even though the inode is more recent
3067 	 * than the transaction as there is no guarantee that all the btree
3068 	 * blocks are more recent than this transaction, too.
3069 	 */
3070 	if (dip->di_version >= 3) {
3071 		xfs_lsn_t	lsn = be64_to_cpu(dip->di_lsn);
3072 
3073 		if (lsn && lsn != -1 && XFS_LSN_CMP(lsn, current_lsn) >= 0) {
3074 			trace_xfs_log_recover_inode_skip(log, in_f);
3075 			error = 0;
3076 			goto out_owner_change;
3077 		}
3078 	}
3079 
3080 	/*
3081 	 * di_flushiter is only valid for v1/2 inodes. All changes for v3 inodes
3082 	 * are transactional and if ordering is necessary we can determine that
3083 	 * more accurately by the LSN field in the V3 inode core. Don't trust
3084 	 * the inode versions we might be changing them here - use the
3085 	 * superblock flag to determine whether we need to look at di_flushiter
3086 	 * to skip replay when the on disk inode is newer than the log one
3087 	 */
3088 	if (!xfs_sb_version_hascrc(&mp->m_sb) &&
3089 	    ldip->di_flushiter < be16_to_cpu(dip->di_flushiter)) {
3090 		/*
3091 		 * Deal with the wrap case, DI_MAX_FLUSH is less
3092 		 * than smaller numbers
3093 		 */
3094 		if (be16_to_cpu(dip->di_flushiter) == DI_MAX_FLUSH &&
3095 		    ldip->di_flushiter < (DI_MAX_FLUSH >> 1)) {
3096 			/* do nothing */
3097 		} else {
3098 			trace_xfs_log_recover_inode_skip(log, in_f);
3099 			error = 0;
3100 			goto out_release;
3101 		}
3102 	}
3103 
3104 	/* Take the opportunity to reset the flush iteration count */
3105 	ldip->di_flushiter = 0;
3106 
3107 	if (unlikely(S_ISREG(ldip->di_mode))) {
3108 		if ((ldip->di_format != XFS_DINODE_FMT_EXTENTS) &&
3109 		    (ldip->di_format != XFS_DINODE_FMT_BTREE)) {
3110 			XFS_CORRUPTION_ERROR("xlog_recover_inode_pass2(3)",
3111 					 XFS_ERRLEVEL_LOW, mp, ldip);
3112 			xfs_alert(mp,
3113 		"%s: Bad regular inode log record, rec ptr 0x%p, "
3114 		"ino ptr = 0x%p, ino bp = 0x%p, ino %Ld",
3115 				__func__, item, dip, bp, in_f->ilf_ino);
3116 			error = -EFSCORRUPTED;
3117 			goto out_release;
3118 		}
3119 	} else if (unlikely(S_ISDIR(ldip->di_mode))) {
3120 		if ((ldip->di_format != XFS_DINODE_FMT_EXTENTS) &&
3121 		    (ldip->di_format != XFS_DINODE_FMT_BTREE) &&
3122 		    (ldip->di_format != XFS_DINODE_FMT_LOCAL)) {
3123 			XFS_CORRUPTION_ERROR("xlog_recover_inode_pass2(4)",
3124 					     XFS_ERRLEVEL_LOW, mp, ldip);
3125 			xfs_alert(mp,
3126 		"%s: Bad dir inode log record, rec ptr 0x%p, "
3127 		"ino ptr = 0x%p, ino bp = 0x%p, ino %Ld",
3128 				__func__, item, dip, bp, in_f->ilf_ino);
3129 			error = -EFSCORRUPTED;
3130 			goto out_release;
3131 		}
3132 	}
3133 	if (unlikely(ldip->di_nextents + ldip->di_anextents > ldip->di_nblocks)){
3134 		XFS_CORRUPTION_ERROR("xlog_recover_inode_pass2(5)",
3135 				     XFS_ERRLEVEL_LOW, mp, ldip);
3136 		xfs_alert(mp,
3137 	"%s: Bad inode log record, rec ptr 0x%p, dino ptr 0x%p, "
3138 	"dino bp 0x%p, ino %Ld, total extents = %d, nblocks = %Ld",
3139 			__func__, item, dip, bp, in_f->ilf_ino,
3140 			ldip->di_nextents + ldip->di_anextents,
3141 			ldip->di_nblocks);
3142 		error = -EFSCORRUPTED;
3143 		goto out_release;
3144 	}
3145 	if (unlikely(ldip->di_forkoff > mp->m_sb.sb_inodesize)) {
3146 		XFS_CORRUPTION_ERROR("xlog_recover_inode_pass2(6)",
3147 				     XFS_ERRLEVEL_LOW, mp, ldip);
3148 		xfs_alert(mp,
3149 	"%s: Bad inode log record, rec ptr 0x%p, dino ptr 0x%p, "
3150 	"dino bp 0x%p, ino %Ld, forkoff 0x%x", __func__,
3151 			item, dip, bp, in_f->ilf_ino, ldip->di_forkoff);
3152 		error = -EFSCORRUPTED;
3153 		goto out_release;
3154 	}
3155 	isize = xfs_log_dinode_size(ldip->di_version);
3156 	if (unlikely(item->ri_buf[1].i_len > isize)) {
3157 		XFS_CORRUPTION_ERROR("xlog_recover_inode_pass2(7)",
3158 				     XFS_ERRLEVEL_LOW, mp, ldip);
3159 		xfs_alert(mp,
3160 			"%s: Bad inode log record length %d, rec ptr 0x%p",
3161 			__func__, item->ri_buf[1].i_len, item);
3162 		error = -EFSCORRUPTED;
3163 		goto out_release;
3164 	}
3165 
3166 	/* recover the log dinode inode into the on disk inode */
3167 	xfs_log_dinode_to_disk(ldip, dip);
3168 
3169 	/* the rest is in on-disk format */
3170 	if (item->ri_buf[1].i_len > isize) {
3171 		memcpy((char *)dip + isize,
3172 			item->ri_buf[1].i_addr + isize,
3173 			item->ri_buf[1].i_len - isize);
3174 	}
3175 
3176 	fields = in_f->ilf_fields;
3177 	if (fields & XFS_ILOG_DEV)
3178 		xfs_dinode_put_rdev(dip, in_f->ilf_u.ilfu_rdev);
3179 
3180 	if (in_f->ilf_size == 2)
3181 		goto out_owner_change;
3182 	len = item->ri_buf[2].i_len;
3183 	src = item->ri_buf[2].i_addr;
3184 	ASSERT(in_f->ilf_size <= 4);
3185 	ASSERT((in_f->ilf_size == 3) || (fields & XFS_ILOG_AFORK));
3186 	ASSERT(!(fields & XFS_ILOG_DFORK) ||
3187 	       (len == in_f->ilf_dsize));
3188 
3189 	switch (fields & XFS_ILOG_DFORK) {
3190 	case XFS_ILOG_DDATA:
3191 	case XFS_ILOG_DEXT:
3192 		memcpy(XFS_DFORK_DPTR(dip), src, len);
3193 		break;
3194 
3195 	case XFS_ILOG_DBROOT:
3196 		xfs_bmbt_to_bmdr(mp, (struct xfs_btree_block *)src, len,
3197 				 (xfs_bmdr_block_t *)XFS_DFORK_DPTR(dip),
3198 				 XFS_DFORK_DSIZE(dip, mp));
3199 		break;
3200 
3201 	default:
3202 		/*
3203 		 * There are no data fork flags set.
3204 		 */
3205 		ASSERT((fields & XFS_ILOG_DFORK) == 0);
3206 		break;
3207 	}
3208 
3209 	/*
3210 	 * If we logged any attribute data, recover it.  There may or
3211 	 * may not have been any other non-core data logged in this
3212 	 * transaction.
3213 	 */
3214 	if (in_f->ilf_fields & XFS_ILOG_AFORK) {
3215 		if (in_f->ilf_fields & XFS_ILOG_DFORK) {
3216 			attr_index = 3;
3217 		} else {
3218 			attr_index = 2;
3219 		}
3220 		len = item->ri_buf[attr_index].i_len;
3221 		src = item->ri_buf[attr_index].i_addr;
3222 		ASSERT(len == in_f->ilf_asize);
3223 
3224 		switch (in_f->ilf_fields & XFS_ILOG_AFORK) {
3225 		case XFS_ILOG_ADATA:
3226 		case XFS_ILOG_AEXT:
3227 			dest = XFS_DFORK_APTR(dip);
3228 			ASSERT(len <= XFS_DFORK_ASIZE(dip, mp));
3229 			memcpy(dest, src, len);
3230 			break;
3231 
3232 		case XFS_ILOG_ABROOT:
3233 			dest = XFS_DFORK_APTR(dip);
3234 			xfs_bmbt_to_bmdr(mp, (struct xfs_btree_block *)src,
3235 					 len, (xfs_bmdr_block_t*)dest,
3236 					 XFS_DFORK_ASIZE(dip, mp));
3237 			break;
3238 
3239 		default:
3240 			xfs_warn(log->l_mp, "%s: Invalid flag", __func__);
3241 			ASSERT(0);
3242 			error = -EIO;
3243 			goto out_release;
3244 		}
3245 	}
3246 
3247 out_owner_change:
3248 	if (in_f->ilf_fields & (XFS_ILOG_DOWNER|XFS_ILOG_AOWNER))
3249 		error = xfs_recover_inode_owner_change(mp, dip, in_f,
3250 						       buffer_list);
3251 	/* re-generate the checksum. */
3252 	xfs_dinode_calc_crc(log->l_mp, dip);
3253 
3254 	ASSERT(bp->b_target->bt_mount == mp);
3255 	bp->b_iodone = xlog_recover_iodone;
3256 	xfs_buf_delwri_queue(bp, buffer_list);
3257 
3258 out_release:
3259 	xfs_buf_relse(bp);
3260 error:
3261 	if (need_free)
3262 		kmem_free(in_f);
3263 	return error;
3264 }
3265 
3266 /*
3267  * Recover QUOTAOFF records. We simply make a note of it in the xlog
3268  * structure, so that we know not to do any dquot item or dquot buffer recovery,
3269  * of that type.
3270  */
3271 STATIC int
3272 xlog_recover_quotaoff_pass1(
3273 	struct xlog			*log,
3274 	struct xlog_recover_item	*item)
3275 {
3276 	xfs_qoff_logformat_t	*qoff_f = item->ri_buf[0].i_addr;
3277 	ASSERT(qoff_f);
3278 
3279 	/*
3280 	 * The logitem format's flag tells us if this was user quotaoff,
3281 	 * group/project quotaoff or both.
3282 	 */
3283 	if (qoff_f->qf_flags & XFS_UQUOTA_ACCT)
3284 		log->l_quotaoffs_flag |= XFS_DQ_USER;
3285 	if (qoff_f->qf_flags & XFS_PQUOTA_ACCT)
3286 		log->l_quotaoffs_flag |= XFS_DQ_PROJ;
3287 	if (qoff_f->qf_flags & XFS_GQUOTA_ACCT)
3288 		log->l_quotaoffs_flag |= XFS_DQ_GROUP;
3289 
3290 	return 0;
3291 }
3292 
3293 /*
3294  * Recover a dquot record
3295  */
3296 STATIC int
3297 xlog_recover_dquot_pass2(
3298 	struct xlog			*log,
3299 	struct list_head		*buffer_list,
3300 	struct xlog_recover_item	*item,
3301 	xfs_lsn_t			current_lsn)
3302 {
3303 	xfs_mount_t		*mp = log->l_mp;
3304 	xfs_buf_t		*bp;
3305 	struct xfs_disk_dquot	*ddq, *recddq;
3306 	int			error;
3307 	xfs_dq_logformat_t	*dq_f;
3308 	uint			type;
3309 
3310 
3311 	/*
3312 	 * Filesystems are required to send in quota flags at mount time.
3313 	 */
3314 	if (mp->m_qflags == 0)
3315 		return 0;
3316 
3317 	recddq = item->ri_buf[1].i_addr;
3318 	if (recddq == NULL) {
3319 		xfs_alert(log->l_mp, "NULL dquot in %s.", __func__);
3320 		return -EIO;
3321 	}
3322 	if (item->ri_buf[1].i_len < sizeof(xfs_disk_dquot_t)) {
3323 		xfs_alert(log->l_mp, "dquot too small (%d) in %s.",
3324 			item->ri_buf[1].i_len, __func__);
3325 		return -EIO;
3326 	}
3327 
3328 	/*
3329 	 * This type of quotas was turned off, so ignore this record.
3330 	 */
3331 	type = recddq->d_flags & (XFS_DQ_USER | XFS_DQ_PROJ | XFS_DQ_GROUP);
3332 	ASSERT(type);
3333 	if (log->l_quotaoffs_flag & type)
3334 		return 0;
3335 
3336 	/*
3337 	 * At this point we know that quota was _not_ turned off.
3338 	 * Since the mount flags are not indicating to us otherwise, this
3339 	 * must mean that quota is on, and the dquot needs to be replayed.
3340 	 * Remember that we may not have fully recovered the superblock yet,
3341 	 * so we can't do the usual trick of looking at the SB quota bits.
3342 	 *
3343 	 * The other possibility, of course, is that the quota subsystem was
3344 	 * removed since the last mount - ENOSYS.
3345 	 */
3346 	dq_f = item->ri_buf[0].i_addr;
3347 	ASSERT(dq_f);
3348 	error = xfs_dqcheck(mp, recddq, dq_f->qlf_id, 0, XFS_QMOPT_DOWARN,
3349 			   "xlog_recover_dquot_pass2 (log copy)");
3350 	if (error)
3351 		return -EIO;
3352 	ASSERT(dq_f->qlf_len == 1);
3353 
3354 	/*
3355 	 * At this point we are assuming that the dquots have been allocated
3356 	 * and hence the buffer has valid dquots stamped in it. It should,
3357 	 * therefore, pass verifier validation. If the dquot is bad, then the
3358 	 * we'll return an error here, so we don't need to specifically check
3359 	 * the dquot in the buffer after the verifier has run.
3360 	 */
3361 	error = xfs_trans_read_buf(mp, NULL, mp->m_ddev_targp, dq_f->qlf_blkno,
3362 				   XFS_FSB_TO_BB(mp, dq_f->qlf_len), 0, &bp,
3363 				   &xfs_dquot_buf_ops);
3364 	if (error)
3365 		return error;
3366 
3367 	ASSERT(bp);
3368 	ddq = xfs_buf_offset(bp, dq_f->qlf_boffset);
3369 
3370 	/*
3371 	 * If the dquot has an LSN in it, recover the dquot only if it's less
3372 	 * than the lsn of the transaction we are replaying.
3373 	 */
3374 	if (xfs_sb_version_hascrc(&mp->m_sb)) {
3375 		struct xfs_dqblk *dqb = (struct xfs_dqblk *)ddq;
3376 		xfs_lsn_t	lsn = be64_to_cpu(dqb->dd_lsn);
3377 
3378 		if (lsn && lsn != -1 && XFS_LSN_CMP(lsn, current_lsn) >= 0) {
3379 			goto out_release;
3380 		}
3381 	}
3382 
3383 	memcpy(ddq, recddq, item->ri_buf[1].i_len);
3384 	if (xfs_sb_version_hascrc(&mp->m_sb)) {
3385 		xfs_update_cksum((char *)ddq, sizeof(struct xfs_dqblk),
3386 				 XFS_DQUOT_CRC_OFF);
3387 	}
3388 
3389 	ASSERT(dq_f->qlf_size == 2);
3390 	ASSERT(bp->b_target->bt_mount == mp);
3391 	bp->b_iodone = xlog_recover_iodone;
3392 	xfs_buf_delwri_queue(bp, buffer_list);
3393 
3394 out_release:
3395 	xfs_buf_relse(bp);
3396 	return 0;
3397 }
3398 
3399 /*
3400  * This routine is called to create an in-core extent free intent
3401  * item from the efi format structure which was logged on disk.
3402  * It allocates an in-core efi, copies the extents from the format
3403  * structure into it, and adds the efi to the AIL with the given
3404  * LSN.
3405  */
3406 STATIC int
3407 xlog_recover_efi_pass2(
3408 	struct xlog			*log,
3409 	struct xlog_recover_item	*item,
3410 	xfs_lsn_t			lsn)
3411 {
3412 	int				error;
3413 	struct xfs_mount		*mp = log->l_mp;
3414 	struct xfs_efi_log_item		*efip;
3415 	struct xfs_efi_log_format	*efi_formatp;
3416 
3417 	efi_formatp = item->ri_buf[0].i_addr;
3418 
3419 	efip = xfs_efi_init(mp, efi_formatp->efi_nextents);
3420 	error = xfs_efi_copy_format(&item->ri_buf[0], &efip->efi_format);
3421 	if (error) {
3422 		xfs_efi_item_free(efip);
3423 		return error;
3424 	}
3425 	atomic_set(&efip->efi_next_extent, efi_formatp->efi_nextents);
3426 
3427 	spin_lock(&log->l_ailp->xa_lock);
3428 	/*
3429 	 * The EFI has two references. One for the EFD and one for EFI to ensure
3430 	 * it makes it into the AIL. Insert the EFI into the AIL directly and
3431 	 * drop the EFI reference. Note that xfs_trans_ail_update() drops the
3432 	 * AIL lock.
3433 	 */
3434 	xfs_trans_ail_update(log->l_ailp, &efip->efi_item, lsn);
3435 	xfs_efi_release(efip);
3436 	return 0;
3437 }
3438 
3439 
3440 /*
3441  * This routine is called when an EFD format structure is found in a committed
3442  * transaction in the log. Its purpose is to cancel the corresponding EFI if it
3443  * was still in the log. To do this it searches the AIL for the EFI with an id
3444  * equal to that in the EFD format structure. If we find it we drop the EFD
3445  * reference, which removes the EFI from the AIL and frees it.
3446  */
3447 STATIC int
3448 xlog_recover_efd_pass2(
3449 	struct xlog			*log,
3450 	struct xlog_recover_item	*item)
3451 {
3452 	xfs_efd_log_format_t	*efd_formatp;
3453 	xfs_efi_log_item_t	*efip = NULL;
3454 	xfs_log_item_t		*lip;
3455 	uint64_t		efi_id;
3456 	struct xfs_ail_cursor	cur;
3457 	struct xfs_ail		*ailp = log->l_ailp;
3458 
3459 	efd_formatp = item->ri_buf[0].i_addr;
3460 	ASSERT((item->ri_buf[0].i_len == (sizeof(xfs_efd_log_format_32_t) +
3461 		((efd_formatp->efd_nextents - 1) * sizeof(xfs_extent_32_t)))) ||
3462 	       (item->ri_buf[0].i_len == (sizeof(xfs_efd_log_format_64_t) +
3463 		((efd_formatp->efd_nextents - 1) * sizeof(xfs_extent_64_t)))));
3464 	efi_id = efd_formatp->efd_efi_id;
3465 
3466 	/*
3467 	 * Search for the EFI with the id in the EFD format structure in the
3468 	 * AIL.
3469 	 */
3470 	spin_lock(&ailp->xa_lock);
3471 	lip = xfs_trans_ail_cursor_first(ailp, &cur, 0);
3472 	while (lip != NULL) {
3473 		if (lip->li_type == XFS_LI_EFI) {
3474 			efip = (xfs_efi_log_item_t *)lip;
3475 			if (efip->efi_format.efi_id == efi_id) {
3476 				/*
3477 				 * Drop the EFD reference to the EFI. This
3478 				 * removes the EFI from the AIL and frees it.
3479 				 */
3480 				spin_unlock(&ailp->xa_lock);
3481 				xfs_efi_release(efip);
3482 				spin_lock(&ailp->xa_lock);
3483 				break;
3484 			}
3485 		}
3486 		lip = xfs_trans_ail_cursor_next(ailp, &cur);
3487 	}
3488 
3489 	xfs_trans_ail_cursor_done(&cur);
3490 	spin_unlock(&ailp->xa_lock);
3491 
3492 	return 0;
3493 }
3494 
3495 /*
3496  * This routine is called to create an in-core extent rmap update
3497  * item from the rui format structure which was logged on disk.
3498  * It allocates an in-core rui, copies the extents from the format
3499  * structure into it, and adds the rui to the AIL with the given
3500  * LSN.
3501  */
3502 STATIC int
3503 xlog_recover_rui_pass2(
3504 	struct xlog			*log,
3505 	struct xlog_recover_item	*item,
3506 	xfs_lsn_t			lsn)
3507 {
3508 	int				error;
3509 	struct xfs_mount		*mp = log->l_mp;
3510 	struct xfs_rui_log_item		*ruip;
3511 	struct xfs_rui_log_format	*rui_formatp;
3512 
3513 	rui_formatp = item->ri_buf[0].i_addr;
3514 
3515 	ruip = xfs_rui_init(mp, rui_formatp->rui_nextents);
3516 	error = xfs_rui_copy_format(&item->ri_buf[0], &ruip->rui_format);
3517 	if (error) {
3518 		xfs_rui_item_free(ruip);
3519 		return error;
3520 	}
3521 	atomic_set(&ruip->rui_next_extent, rui_formatp->rui_nextents);
3522 
3523 	spin_lock(&log->l_ailp->xa_lock);
3524 	/*
3525 	 * The RUI has two references. One for the RUD and one for RUI to ensure
3526 	 * it makes it into the AIL. Insert the RUI into the AIL directly and
3527 	 * drop the RUI reference. Note that xfs_trans_ail_update() drops the
3528 	 * AIL lock.
3529 	 */
3530 	xfs_trans_ail_update(log->l_ailp, &ruip->rui_item, lsn);
3531 	xfs_rui_release(ruip);
3532 	return 0;
3533 }
3534 
3535 
3536 /*
3537  * This routine is called when an RUD format structure is found in a committed
3538  * transaction in the log. Its purpose is to cancel the corresponding RUI if it
3539  * was still in the log. To do this it searches the AIL for the RUI with an id
3540  * equal to that in the RUD format structure. If we find it we drop the RUD
3541  * reference, which removes the RUI from the AIL and frees it.
3542  */
3543 STATIC int
3544 xlog_recover_rud_pass2(
3545 	struct xlog			*log,
3546 	struct xlog_recover_item	*item)
3547 {
3548 	struct xfs_rud_log_format	*rud_formatp;
3549 	struct xfs_rui_log_item		*ruip = NULL;
3550 	struct xfs_log_item		*lip;
3551 	uint64_t			rui_id;
3552 	struct xfs_ail_cursor		cur;
3553 	struct xfs_ail			*ailp = log->l_ailp;
3554 
3555 	rud_formatp = item->ri_buf[0].i_addr;
3556 	ASSERT(item->ri_buf[0].i_len == sizeof(struct xfs_rud_log_format));
3557 	rui_id = rud_formatp->rud_rui_id;
3558 
3559 	/*
3560 	 * Search for the RUI with the id in the RUD format structure in the
3561 	 * AIL.
3562 	 */
3563 	spin_lock(&ailp->xa_lock);
3564 	lip = xfs_trans_ail_cursor_first(ailp, &cur, 0);
3565 	while (lip != NULL) {
3566 		if (lip->li_type == XFS_LI_RUI) {
3567 			ruip = (struct xfs_rui_log_item *)lip;
3568 			if (ruip->rui_format.rui_id == rui_id) {
3569 				/*
3570 				 * Drop the RUD reference to the RUI. This
3571 				 * removes the RUI from the AIL and frees it.
3572 				 */
3573 				spin_unlock(&ailp->xa_lock);
3574 				xfs_rui_release(ruip);
3575 				spin_lock(&ailp->xa_lock);
3576 				break;
3577 			}
3578 		}
3579 		lip = xfs_trans_ail_cursor_next(ailp, &cur);
3580 	}
3581 
3582 	xfs_trans_ail_cursor_done(&cur);
3583 	spin_unlock(&ailp->xa_lock);
3584 
3585 	return 0;
3586 }
3587 
3588 /*
3589  * Copy an CUI format buffer from the given buf, and into the destination
3590  * CUI format structure.  The CUI/CUD items were designed not to need any
3591  * special alignment handling.
3592  */
3593 static int
3594 xfs_cui_copy_format(
3595 	struct xfs_log_iovec		*buf,
3596 	struct xfs_cui_log_format	*dst_cui_fmt)
3597 {
3598 	struct xfs_cui_log_format	*src_cui_fmt;
3599 	uint				len;
3600 
3601 	src_cui_fmt = buf->i_addr;
3602 	len = xfs_cui_log_format_sizeof(src_cui_fmt->cui_nextents);
3603 
3604 	if (buf->i_len == len) {
3605 		memcpy(dst_cui_fmt, src_cui_fmt, len);
3606 		return 0;
3607 	}
3608 	return -EFSCORRUPTED;
3609 }
3610 
3611 /*
3612  * This routine is called to create an in-core extent refcount update
3613  * item from the cui format structure which was logged on disk.
3614  * It allocates an in-core cui, copies the extents from the format
3615  * structure into it, and adds the cui to the AIL with the given
3616  * LSN.
3617  */
3618 STATIC int
3619 xlog_recover_cui_pass2(
3620 	struct xlog			*log,
3621 	struct xlog_recover_item	*item,
3622 	xfs_lsn_t			lsn)
3623 {
3624 	int				error;
3625 	struct xfs_mount		*mp = log->l_mp;
3626 	struct xfs_cui_log_item		*cuip;
3627 	struct xfs_cui_log_format	*cui_formatp;
3628 
3629 	cui_formatp = item->ri_buf[0].i_addr;
3630 
3631 	cuip = xfs_cui_init(mp, cui_formatp->cui_nextents);
3632 	error = xfs_cui_copy_format(&item->ri_buf[0], &cuip->cui_format);
3633 	if (error) {
3634 		xfs_cui_item_free(cuip);
3635 		return error;
3636 	}
3637 	atomic_set(&cuip->cui_next_extent, cui_formatp->cui_nextents);
3638 
3639 	spin_lock(&log->l_ailp->xa_lock);
3640 	/*
3641 	 * The CUI has two references. One for the CUD and one for CUI to ensure
3642 	 * it makes it into the AIL. Insert the CUI into the AIL directly and
3643 	 * drop the CUI reference. Note that xfs_trans_ail_update() drops the
3644 	 * AIL lock.
3645 	 */
3646 	xfs_trans_ail_update(log->l_ailp, &cuip->cui_item, lsn);
3647 	xfs_cui_release(cuip);
3648 	return 0;
3649 }
3650 
3651 
3652 /*
3653  * This routine is called when an CUD format structure is found in a committed
3654  * transaction in the log. Its purpose is to cancel the corresponding CUI if it
3655  * was still in the log. To do this it searches the AIL for the CUI with an id
3656  * equal to that in the CUD format structure. If we find it we drop the CUD
3657  * reference, which removes the CUI from the AIL and frees it.
3658  */
3659 STATIC int
3660 xlog_recover_cud_pass2(
3661 	struct xlog			*log,
3662 	struct xlog_recover_item	*item)
3663 {
3664 	struct xfs_cud_log_format	*cud_formatp;
3665 	struct xfs_cui_log_item		*cuip = NULL;
3666 	struct xfs_log_item		*lip;
3667 	uint64_t			cui_id;
3668 	struct xfs_ail_cursor		cur;
3669 	struct xfs_ail			*ailp = log->l_ailp;
3670 
3671 	cud_formatp = item->ri_buf[0].i_addr;
3672 	if (item->ri_buf[0].i_len != sizeof(struct xfs_cud_log_format))
3673 		return -EFSCORRUPTED;
3674 	cui_id = cud_formatp->cud_cui_id;
3675 
3676 	/*
3677 	 * Search for the CUI with the id in the CUD format structure in the
3678 	 * AIL.
3679 	 */
3680 	spin_lock(&ailp->xa_lock);
3681 	lip = xfs_trans_ail_cursor_first(ailp, &cur, 0);
3682 	while (lip != NULL) {
3683 		if (lip->li_type == XFS_LI_CUI) {
3684 			cuip = (struct xfs_cui_log_item *)lip;
3685 			if (cuip->cui_format.cui_id == cui_id) {
3686 				/*
3687 				 * Drop the CUD reference to the CUI. This
3688 				 * removes the CUI from the AIL and frees it.
3689 				 */
3690 				spin_unlock(&ailp->xa_lock);
3691 				xfs_cui_release(cuip);
3692 				spin_lock(&ailp->xa_lock);
3693 				break;
3694 			}
3695 		}
3696 		lip = xfs_trans_ail_cursor_next(ailp, &cur);
3697 	}
3698 
3699 	xfs_trans_ail_cursor_done(&cur);
3700 	spin_unlock(&ailp->xa_lock);
3701 
3702 	return 0;
3703 }
3704 
3705 /*
3706  * Copy an BUI format buffer from the given buf, and into the destination
3707  * BUI format structure.  The BUI/BUD items were designed not to need any
3708  * special alignment handling.
3709  */
3710 static int
3711 xfs_bui_copy_format(
3712 	struct xfs_log_iovec		*buf,
3713 	struct xfs_bui_log_format	*dst_bui_fmt)
3714 {
3715 	struct xfs_bui_log_format	*src_bui_fmt;
3716 	uint				len;
3717 
3718 	src_bui_fmt = buf->i_addr;
3719 	len = xfs_bui_log_format_sizeof(src_bui_fmt->bui_nextents);
3720 
3721 	if (buf->i_len == len) {
3722 		memcpy(dst_bui_fmt, src_bui_fmt, len);
3723 		return 0;
3724 	}
3725 	return -EFSCORRUPTED;
3726 }
3727 
3728 /*
3729  * This routine is called to create an in-core extent bmap update
3730  * item from the bui format structure which was logged on disk.
3731  * It allocates an in-core bui, copies the extents from the format
3732  * structure into it, and adds the bui to the AIL with the given
3733  * LSN.
3734  */
3735 STATIC int
3736 xlog_recover_bui_pass2(
3737 	struct xlog			*log,
3738 	struct xlog_recover_item	*item,
3739 	xfs_lsn_t			lsn)
3740 {
3741 	int				error;
3742 	struct xfs_mount		*mp = log->l_mp;
3743 	struct xfs_bui_log_item		*buip;
3744 	struct xfs_bui_log_format	*bui_formatp;
3745 
3746 	bui_formatp = item->ri_buf[0].i_addr;
3747 
3748 	if (bui_formatp->bui_nextents != XFS_BUI_MAX_FAST_EXTENTS)
3749 		return -EFSCORRUPTED;
3750 	buip = xfs_bui_init(mp);
3751 	error = xfs_bui_copy_format(&item->ri_buf[0], &buip->bui_format);
3752 	if (error) {
3753 		xfs_bui_item_free(buip);
3754 		return error;
3755 	}
3756 	atomic_set(&buip->bui_next_extent, bui_formatp->bui_nextents);
3757 
3758 	spin_lock(&log->l_ailp->xa_lock);
3759 	/*
3760 	 * The RUI has two references. One for the RUD and one for RUI to ensure
3761 	 * it makes it into the AIL. Insert the RUI into the AIL directly and
3762 	 * drop the RUI reference. Note that xfs_trans_ail_update() drops the
3763 	 * AIL lock.
3764 	 */
3765 	xfs_trans_ail_update(log->l_ailp, &buip->bui_item, lsn);
3766 	xfs_bui_release(buip);
3767 	return 0;
3768 }
3769 
3770 
3771 /*
3772  * This routine is called when an BUD format structure is found in a committed
3773  * transaction in the log. Its purpose is to cancel the corresponding BUI if it
3774  * was still in the log. To do this it searches the AIL for the BUI with an id
3775  * equal to that in the BUD format structure. If we find it we drop the BUD
3776  * reference, which removes the BUI from the AIL and frees it.
3777  */
3778 STATIC int
3779 xlog_recover_bud_pass2(
3780 	struct xlog			*log,
3781 	struct xlog_recover_item	*item)
3782 {
3783 	struct xfs_bud_log_format	*bud_formatp;
3784 	struct xfs_bui_log_item		*buip = NULL;
3785 	struct xfs_log_item		*lip;
3786 	uint64_t			bui_id;
3787 	struct xfs_ail_cursor		cur;
3788 	struct xfs_ail			*ailp = log->l_ailp;
3789 
3790 	bud_formatp = item->ri_buf[0].i_addr;
3791 	if (item->ri_buf[0].i_len != sizeof(struct xfs_bud_log_format))
3792 		return -EFSCORRUPTED;
3793 	bui_id = bud_formatp->bud_bui_id;
3794 
3795 	/*
3796 	 * Search for the BUI with the id in the BUD format structure in the
3797 	 * AIL.
3798 	 */
3799 	spin_lock(&ailp->xa_lock);
3800 	lip = xfs_trans_ail_cursor_first(ailp, &cur, 0);
3801 	while (lip != NULL) {
3802 		if (lip->li_type == XFS_LI_BUI) {
3803 			buip = (struct xfs_bui_log_item *)lip;
3804 			if (buip->bui_format.bui_id == bui_id) {
3805 				/*
3806 				 * Drop the BUD reference to the BUI. This
3807 				 * removes the BUI from the AIL and frees it.
3808 				 */
3809 				spin_unlock(&ailp->xa_lock);
3810 				xfs_bui_release(buip);
3811 				spin_lock(&ailp->xa_lock);
3812 				break;
3813 			}
3814 		}
3815 		lip = xfs_trans_ail_cursor_next(ailp, &cur);
3816 	}
3817 
3818 	xfs_trans_ail_cursor_done(&cur);
3819 	spin_unlock(&ailp->xa_lock);
3820 
3821 	return 0;
3822 }
3823 
3824 /*
3825  * This routine is called when an inode create format structure is found in a
3826  * committed transaction in the log.  It's purpose is to initialise the inodes
3827  * being allocated on disk. This requires us to get inode cluster buffers that
3828  * match the range to be initialised, stamped with inode templates and written
3829  * by delayed write so that subsequent modifications will hit the cached buffer
3830  * and only need writing out at the end of recovery.
3831  */
3832 STATIC int
3833 xlog_recover_do_icreate_pass2(
3834 	struct xlog		*log,
3835 	struct list_head	*buffer_list,
3836 	xlog_recover_item_t	*item)
3837 {
3838 	struct xfs_mount	*mp = log->l_mp;
3839 	struct xfs_icreate_log	*icl;
3840 	xfs_agnumber_t		agno;
3841 	xfs_agblock_t		agbno;
3842 	unsigned int		count;
3843 	unsigned int		isize;
3844 	xfs_agblock_t		length;
3845 	int			blks_per_cluster;
3846 	int			bb_per_cluster;
3847 	int			cancel_count;
3848 	int			nbufs;
3849 	int			i;
3850 
3851 	icl = (struct xfs_icreate_log *)item->ri_buf[0].i_addr;
3852 	if (icl->icl_type != XFS_LI_ICREATE) {
3853 		xfs_warn(log->l_mp, "xlog_recover_do_icreate_trans: bad type");
3854 		return -EINVAL;
3855 	}
3856 
3857 	if (icl->icl_size != 1) {
3858 		xfs_warn(log->l_mp, "xlog_recover_do_icreate_trans: bad icl size");
3859 		return -EINVAL;
3860 	}
3861 
3862 	agno = be32_to_cpu(icl->icl_ag);
3863 	if (agno >= mp->m_sb.sb_agcount) {
3864 		xfs_warn(log->l_mp, "xlog_recover_do_icreate_trans: bad agno");
3865 		return -EINVAL;
3866 	}
3867 	agbno = be32_to_cpu(icl->icl_agbno);
3868 	if (!agbno || agbno == NULLAGBLOCK || agbno >= mp->m_sb.sb_agblocks) {
3869 		xfs_warn(log->l_mp, "xlog_recover_do_icreate_trans: bad agbno");
3870 		return -EINVAL;
3871 	}
3872 	isize = be32_to_cpu(icl->icl_isize);
3873 	if (isize != mp->m_sb.sb_inodesize) {
3874 		xfs_warn(log->l_mp, "xlog_recover_do_icreate_trans: bad isize");
3875 		return -EINVAL;
3876 	}
3877 	count = be32_to_cpu(icl->icl_count);
3878 	if (!count) {
3879 		xfs_warn(log->l_mp, "xlog_recover_do_icreate_trans: bad count");
3880 		return -EINVAL;
3881 	}
3882 	length = be32_to_cpu(icl->icl_length);
3883 	if (!length || length >= mp->m_sb.sb_agblocks) {
3884 		xfs_warn(log->l_mp, "xlog_recover_do_icreate_trans: bad length");
3885 		return -EINVAL;
3886 	}
3887 
3888 	/*
3889 	 * The inode chunk is either full or sparse and we only support
3890 	 * m_ialloc_min_blks sized sparse allocations at this time.
3891 	 */
3892 	if (length != mp->m_ialloc_blks &&
3893 	    length != mp->m_ialloc_min_blks) {
3894 		xfs_warn(log->l_mp,
3895 			 "%s: unsupported chunk length", __FUNCTION__);
3896 		return -EINVAL;
3897 	}
3898 
3899 	/* verify inode count is consistent with extent length */
3900 	if ((count >> mp->m_sb.sb_inopblog) != length) {
3901 		xfs_warn(log->l_mp,
3902 			 "%s: inconsistent inode count and chunk length",
3903 			 __FUNCTION__);
3904 		return -EINVAL;
3905 	}
3906 
3907 	/*
3908 	 * The icreate transaction can cover multiple cluster buffers and these
3909 	 * buffers could have been freed and reused. Check the individual
3910 	 * buffers for cancellation so we don't overwrite anything written after
3911 	 * a cancellation.
3912 	 */
3913 	blks_per_cluster = xfs_icluster_size_fsb(mp);
3914 	bb_per_cluster = XFS_FSB_TO_BB(mp, blks_per_cluster);
3915 	nbufs = length / blks_per_cluster;
3916 	for (i = 0, cancel_count = 0; i < nbufs; i++) {
3917 		xfs_daddr_t	daddr;
3918 
3919 		daddr = XFS_AGB_TO_DADDR(mp, agno,
3920 					 agbno + i * blks_per_cluster);
3921 		if (xlog_check_buffer_cancelled(log, daddr, bb_per_cluster, 0))
3922 			cancel_count++;
3923 	}
3924 
3925 	/*
3926 	 * We currently only use icreate for a single allocation at a time. This
3927 	 * means we should expect either all or none of the buffers to be
3928 	 * cancelled. Be conservative and skip replay if at least one buffer is
3929 	 * cancelled, but warn the user that something is awry if the buffers
3930 	 * are not consistent.
3931 	 *
3932 	 * XXX: This must be refined to only skip cancelled clusters once we use
3933 	 * icreate for multiple chunk allocations.
3934 	 */
3935 	ASSERT(!cancel_count || cancel_count == nbufs);
3936 	if (cancel_count) {
3937 		if (cancel_count != nbufs)
3938 			xfs_warn(mp,
3939 	"WARNING: partial inode chunk cancellation, skipped icreate.");
3940 		trace_xfs_log_recover_icreate_cancel(log, icl);
3941 		return 0;
3942 	}
3943 
3944 	trace_xfs_log_recover_icreate_recover(log, icl);
3945 	return xfs_ialloc_inode_init(mp, NULL, buffer_list, count, agno, agbno,
3946 				     length, be32_to_cpu(icl->icl_gen));
3947 }
3948 
3949 STATIC void
3950 xlog_recover_buffer_ra_pass2(
3951 	struct xlog                     *log,
3952 	struct xlog_recover_item        *item)
3953 {
3954 	struct xfs_buf_log_format	*buf_f = item->ri_buf[0].i_addr;
3955 	struct xfs_mount		*mp = log->l_mp;
3956 
3957 	if (xlog_peek_buffer_cancelled(log, buf_f->blf_blkno,
3958 			buf_f->blf_len, buf_f->blf_flags)) {
3959 		return;
3960 	}
3961 
3962 	xfs_buf_readahead(mp->m_ddev_targp, buf_f->blf_blkno,
3963 				buf_f->blf_len, NULL);
3964 }
3965 
3966 STATIC void
3967 xlog_recover_inode_ra_pass2(
3968 	struct xlog                     *log,
3969 	struct xlog_recover_item        *item)
3970 {
3971 	struct xfs_inode_log_format	ilf_buf;
3972 	struct xfs_inode_log_format	*ilfp;
3973 	struct xfs_mount		*mp = log->l_mp;
3974 	int			error;
3975 
3976 	if (item->ri_buf[0].i_len == sizeof(struct xfs_inode_log_format)) {
3977 		ilfp = item->ri_buf[0].i_addr;
3978 	} else {
3979 		ilfp = &ilf_buf;
3980 		memset(ilfp, 0, sizeof(*ilfp));
3981 		error = xfs_inode_item_format_convert(&item->ri_buf[0], ilfp);
3982 		if (error)
3983 			return;
3984 	}
3985 
3986 	if (xlog_peek_buffer_cancelled(log, ilfp->ilf_blkno, ilfp->ilf_len, 0))
3987 		return;
3988 
3989 	xfs_buf_readahead(mp->m_ddev_targp, ilfp->ilf_blkno,
3990 				ilfp->ilf_len, &xfs_inode_buf_ra_ops);
3991 }
3992 
3993 STATIC void
3994 xlog_recover_dquot_ra_pass2(
3995 	struct xlog			*log,
3996 	struct xlog_recover_item	*item)
3997 {
3998 	struct xfs_mount	*mp = log->l_mp;
3999 	struct xfs_disk_dquot	*recddq;
4000 	struct xfs_dq_logformat	*dq_f;
4001 	uint			type;
4002 	int			len;
4003 
4004 
4005 	if (mp->m_qflags == 0)
4006 		return;
4007 
4008 	recddq = item->ri_buf[1].i_addr;
4009 	if (recddq == NULL)
4010 		return;
4011 	if (item->ri_buf[1].i_len < sizeof(struct xfs_disk_dquot))
4012 		return;
4013 
4014 	type = recddq->d_flags & (XFS_DQ_USER | XFS_DQ_PROJ | XFS_DQ_GROUP);
4015 	ASSERT(type);
4016 	if (log->l_quotaoffs_flag & type)
4017 		return;
4018 
4019 	dq_f = item->ri_buf[0].i_addr;
4020 	ASSERT(dq_f);
4021 	ASSERT(dq_f->qlf_len == 1);
4022 
4023 	len = XFS_FSB_TO_BB(mp, dq_f->qlf_len);
4024 	if (xlog_peek_buffer_cancelled(log, dq_f->qlf_blkno, len, 0))
4025 		return;
4026 
4027 	xfs_buf_readahead(mp->m_ddev_targp, dq_f->qlf_blkno, len,
4028 			  &xfs_dquot_buf_ra_ops);
4029 }
4030 
4031 STATIC void
4032 xlog_recover_ra_pass2(
4033 	struct xlog			*log,
4034 	struct xlog_recover_item	*item)
4035 {
4036 	switch (ITEM_TYPE(item)) {
4037 	case XFS_LI_BUF:
4038 		xlog_recover_buffer_ra_pass2(log, item);
4039 		break;
4040 	case XFS_LI_INODE:
4041 		xlog_recover_inode_ra_pass2(log, item);
4042 		break;
4043 	case XFS_LI_DQUOT:
4044 		xlog_recover_dquot_ra_pass2(log, item);
4045 		break;
4046 	case XFS_LI_EFI:
4047 	case XFS_LI_EFD:
4048 	case XFS_LI_QUOTAOFF:
4049 	case XFS_LI_RUI:
4050 	case XFS_LI_RUD:
4051 	case XFS_LI_CUI:
4052 	case XFS_LI_CUD:
4053 	case XFS_LI_BUI:
4054 	case XFS_LI_BUD:
4055 	default:
4056 		break;
4057 	}
4058 }
4059 
4060 STATIC int
4061 xlog_recover_commit_pass1(
4062 	struct xlog			*log,
4063 	struct xlog_recover		*trans,
4064 	struct xlog_recover_item	*item)
4065 {
4066 	trace_xfs_log_recover_item_recover(log, trans, item, XLOG_RECOVER_PASS1);
4067 
4068 	switch (ITEM_TYPE(item)) {
4069 	case XFS_LI_BUF:
4070 		return xlog_recover_buffer_pass1(log, item);
4071 	case XFS_LI_QUOTAOFF:
4072 		return xlog_recover_quotaoff_pass1(log, item);
4073 	case XFS_LI_INODE:
4074 	case XFS_LI_EFI:
4075 	case XFS_LI_EFD:
4076 	case XFS_LI_DQUOT:
4077 	case XFS_LI_ICREATE:
4078 	case XFS_LI_RUI:
4079 	case XFS_LI_RUD:
4080 	case XFS_LI_CUI:
4081 	case XFS_LI_CUD:
4082 	case XFS_LI_BUI:
4083 	case XFS_LI_BUD:
4084 		/* nothing to do in pass 1 */
4085 		return 0;
4086 	default:
4087 		xfs_warn(log->l_mp, "%s: invalid item type (%d)",
4088 			__func__, ITEM_TYPE(item));
4089 		ASSERT(0);
4090 		return -EIO;
4091 	}
4092 }
4093 
4094 STATIC int
4095 xlog_recover_commit_pass2(
4096 	struct xlog			*log,
4097 	struct xlog_recover		*trans,
4098 	struct list_head		*buffer_list,
4099 	struct xlog_recover_item	*item)
4100 {
4101 	trace_xfs_log_recover_item_recover(log, trans, item, XLOG_RECOVER_PASS2);
4102 
4103 	switch (ITEM_TYPE(item)) {
4104 	case XFS_LI_BUF:
4105 		return xlog_recover_buffer_pass2(log, buffer_list, item,
4106 						 trans->r_lsn);
4107 	case XFS_LI_INODE:
4108 		return xlog_recover_inode_pass2(log, buffer_list, item,
4109 						 trans->r_lsn);
4110 	case XFS_LI_EFI:
4111 		return xlog_recover_efi_pass2(log, item, trans->r_lsn);
4112 	case XFS_LI_EFD:
4113 		return xlog_recover_efd_pass2(log, item);
4114 	case XFS_LI_RUI:
4115 		return xlog_recover_rui_pass2(log, item, trans->r_lsn);
4116 	case XFS_LI_RUD:
4117 		return xlog_recover_rud_pass2(log, item);
4118 	case XFS_LI_CUI:
4119 		return xlog_recover_cui_pass2(log, item, trans->r_lsn);
4120 	case XFS_LI_CUD:
4121 		return xlog_recover_cud_pass2(log, item);
4122 	case XFS_LI_BUI:
4123 		return xlog_recover_bui_pass2(log, item, trans->r_lsn);
4124 	case XFS_LI_BUD:
4125 		return xlog_recover_bud_pass2(log, item);
4126 	case XFS_LI_DQUOT:
4127 		return xlog_recover_dquot_pass2(log, buffer_list, item,
4128 						trans->r_lsn);
4129 	case XFS_LI_ICREATE:
4130 		return xlog_recover_do_icreate_pass2(log, buffer_list, item);
4131 	case XFS_LI_QUOTAOFF:
4132 		/* nothing to do in pass2 */
4133 		return 0;
4134 	default:
4135 		xfs_warn(log->l_mp, "%s: invalid item type (%d)",
4136 			__func__, ITEM_TYPE(item));
4137 		ASSERT(0);
4138 		return -EIO;
4139 	}
4140 }
4141 
4142 STATIC int
4143 xlog_recover_items_pass2(
4144 	struct xlog                     *log,
4145 	struct xlog_recover             *trans,
4146 	struct list_head                *buffer_list,
4147 	struct list_head                *item_list)
4148 {
4149 	struct xlog_recover_item	*item;
4150 	int				error = 0;
4151 
4152 	list_for_each_entry(item, item_list, ri_list) {
4153 		error = xlog_recover_commit_pass2(log, trans,
4154 					  buffer_list, item);
4155 		if (error)
4156 			return error;
4157 	}
4158 
4159 	return error;
4160 }
4161 
4162 /*
4163  * Perform the transaction.
4164  *
4165  * If the transaction modifies a buffer or inode, do it now.  Otherwise,
4166  * EFIs and EFDs get queued up by adding entries into the AIL for them.
4167  */
4168 STATIC int
4169 xlog_recover_commit_trans(
4170 	struct xlog		*log,
4171 	struct xlog_recover	*trans,
4172 	int			pass,
4173 	struct list_head	*buffer_list)
4174 {
4175 	int				error = 0;
4176 	int				items_queued = 0;
4177 	struct xlog_recover_item	*item;
4178 	struct xlog_recover_item	*next;
4179 	LIST_HEAD			(ra_list);
4180 	LIST_HEAD			(done_list);
4181 
4182 	#define XLOG_RECOVER_COMMIT_QUEUE_MAX 100
4183 
4184 	hlist_del_init(&trans->r_list);
4185 
4186 	error = xlog_recover_reorder_trans(log, trans, pass);
4187 	if (error)
4188 		return error;
4189 
4190 	list_for_each_entry_safe(item, next, &trans->r_itemq, ri_list) {
4191 		switch (pass) {
4192 		case XLOG_RECOVER_PASS1:
4193 			error = xlog_recover_commit_pass1(log, trans, item);
4194 			break;
4195 		case XLOG_RECOVER_PASS2:
4196 			xlog_recover_ra_pass2(log, item);
4197 			list_move_tail(&item->ri_list, &ra_list);
4198 			items_queued++;
4199 			if (items_queued >= XLOG_RECOVER_COMMIT_QUEUE_MAX) {
4200 				error = xlog_recover_items_pass2(log, trans,
4201 						buffer_list, &ra_list);
4202 				list_splice_tail_init(&ra_list, &done_list);
4203 				items_queued = 0;
4204 			}
4205 
4206 			break;
4207 		default:
4208 			ASSERT(0);
4209 		}
4210 
4211 		if (error)
4212 			goto out;
4213 	}
4214 
4215 out:
4216 	if (!list_empty(&ra_list)) {
4217 		if (!error)
4218 			error = xlog_recover_items_pass2(log, trans,
4219 					buffer_list, &ra_list);
4220 		list_splice_tail_init(&ra_list, &done_list);
4221 	}
4222 
4223 	if (!list_empty(&done_list))
4224 		list_splice_init(&done_list, &trans->r_itemq);
4225 
4226 	return error;
4227 }
4228 
4229 STATIC void
4230 xlog_recover_add_item(
4231 	struct list_head	*head)
4232 {
4233 	xlog_recover_item_t	*item;
4234 
4235 	item = kmem_zalloc(sizeof(xlog_recover_item_t), KM_SLEEP);
4236 	INIT_LIST_HEAD(&item->ri_list);
4237 	list_add_tail(&item->ri_list, head);
4238 }
4239 
4240 STATIC int
4241 xlog_recover_add_to_cont_trans(
4242 	struct xlog		*log,
4243 	struct xlog_recover	*trans,
4244 	char			*dp,
4245 	int			len)
4246 {
4247 	xlog_recover_item_t	*item;
4248 	char			*ptr, *old_ptr;
4249 	int			old_len;
4250 
4251 	/*
4252 	 * If the transaction is empty, the header was split across this and the
4253 	 * previous record. Copy the rest of the header.
4254 	 */
4255 	if (list_empty(&trans->r_itemq)) {
4256 		ASSERT(len <= sizeof(struct xfs_trans_header));
4257 		if (len > sizeof(struct xfs_trans_header)) {
4258 			xfs_warn(log->l_mp, "%s: bad header length", __func__);
4259 			return -EIO;
4260 		}
4261 
4262 		xlog_recover_add_item(&trans->r_itemq);
4263 		ptr = (char *)&trans->r_theader +
4264 				sizeof(struct xfs_trans_header) - len;
4265 		memcpy(ptr, dp, len);
4266 		return 0;
4267 	}
4268 
4269 	/* take the tail entry */
4270 	item = list_entry(trans->r_itemq.prev, xlog_recover_item_t, ri_list);
4271 
4272 	old_ptr = item->ri_buf[item->ri_cnt-1].i_addr;
4273 	old_len = item->ri_buf[item->ri_cnt-1].i_len;
4274 
4275 	ptr = kmem_realloc(old_ptr, len + old_len, KM_SLEEP);
4276 	memcpy(&ptr[old_len], dp, len);
4277 	item->ri_buf[item->ri_cnt-1].i_len += len;
4278 	item->ri_buf[item->ri_cnt-1].i_addr = ptr;
4279 	trace_xfs_log_recover_item_add_cont(log, trans, item, 0);
4280 	return 0;
4281 }
4282 
4283 /*
4284  * The next region to add is the start of a new region.  It could be
4285  * a whole region or it could be the first part of a new region.  Because
4286  * of this, the assumption here is that the type and size fields of all
4287  * format structures fit into the first 32 bits of the structure.
4288  *
4289  * This works because all regions must be 32 bit aligned.  Therefore, we
4290  * either have both fields or we have neither field.  In the case we have
4291  * neither field, the data part of the region is zero length.  We only have
4292  * a log_op_header and can throw away the header since a new one will appear
4293  * later.  If we have at least 4 bytes, then we can determine how many regions
4294  * will appear in the current log item.
4295  */
4296 STATIC int
4297 xlog_recover_add_to_trans(
4298 	struct xlog		*log,
4299 	struct xlog_recover	*trans,
4300 	char			*dp,
4301 	int			len)
4302 {
4303 	struct xfs_inode_log_format	*in_f;			/* any will do */
4304 	xlog_recover_item_t	*item;
4305 	char			*ptr;
4306 
4307 	if (!len)
4308 		return 0;
4309 	if (list_empty(&trans->r_itemq)) {
4310 		/* we need to catch log corruptions here */
4311 		if (*(uint *)dp != XFS_TRANS_HEADER_MAGIC) {
4312 			xfs_warn(log->l_mp, "%s: bad header magic number",
4313 				__func__);
4314 			ASSERT(0);
4315 			return -EIO;
4316 		}
4317 
4318 		if (len > sizeof(struct xfs_trans_header)) {
4319 			xfs_warn(log->l_mp, "%s: bad header length", __func__);
4320 			ASSERT(0);
4321 			return -EIO;
4322 		}
4323 
4324 		/*
4325 		 * The transaction header can be arbitrarily split across op
4326 		 * records. If we don't have the whole thing here, copy what we
4327 		 * do have and handle the rest in the next record.
4328 		 */
4329 		if (len == sizeof(struct xfs_trans_header))
4330 			xlog_recover_add_item(&trans->r_itemq);
4331 		memcpy(&trans->r_theader, dp, len);
4332 		return 0;
4333 	}
4334 
4335 	ptr = kmem_alloc(len, KM_SLEEP);
4336 	memcpy(ptr, dp, len);
4337 	in_f = (struct xfs_inode_log_format *)ptr;
4338 
4339 	/* take the tail entry */
4340 	item = list_entry(trans->r_itemq.prev, xlog_recover_item_t, ri_list);
4341 	if (item->ri_total != 0 &&
4342 	     item->ri_total == item->ri_cnt) {
4343 		/* tail item is in use, get a new one */
4344 		xlog_recover_add_item(&trans->r_itemq);
4345 		item = list_entry(trans->r_itemq.prev,
4346 					xlog_recover_item_t, ri_list);
4347 	}
4348 
4349 	if (item->ri_total == 0) {		/* first region to be added */
4350 		if (in_f->ilf_size == 0 ||
4351 		    in_f->ilf_size > XLOG_MAX_REGIONS_IN_ITEM) {
4352 			xfs_warn(log->l_mp,
4353 		"bad number of regions (%d) in inode log format",
4354 				  in_f->ilf_size);
4355 			ASSERT(0);
4356 			kmem_free(ptr);
4357 			return -EIO;
4358 		}
4359 
4360 		item->ri_total = in_f->ilf_size;
4361 		item->ri_buf =
4362 			kmem_zalloc(item->ri_total * sizeof(xfs_log_iovec_t),
4363 				    KM_SLEEP);
4364 	}
4365 	ASSERT(item->ri_total > item->ri_cnt);
4366 	/* Description region is ri_buf[0] */
4367 	item->ri_buf[item->ri_cnt].i_addr = ptr;
4368 	item->ri_buf[item->ri_cnt].i_len  = len;
4369 	item->ri_cnt++;
4370 	trace_xfs_log_recover_item_add(log, trans, item, 0);
4371 	return 0;
4372 }
4373 
4374 /*
4375  * Free up any resources allocated by the transaction
4376  *
4377  * Remember that EFIs, EFDs, and IUNLINKs are handled later.
4378  */
4379 STATIC void
4380 xlog_recover_free_trans(
4381 	struct xlog_recover	*trans)
4382 {
4383 	xlog_recover_item_t	*item, *n;
4384 	int			i;
4385 
4386 	hlist_del_init(&trans->r_list);
4387 
4388 	list_for_each_entry_safe(item, n, &trans->r_itemq, ri_list) {
4389 		/* Free the regions in the item. */
4390 		list_del(&item->ri_list);
4391 		for (i = 0; i < item->ri_cnt; i++)
4392 			kmem_free(item->ri_buf[i].i_addr);
4393 		/* Free the item itself */
4394 		kmem_free(item->ri_buf);
4395 		kmem_free(item);
4396 	}
4397 	/* Free the transaction recover structure */
4398 	kmem_free(trans);
4399 }
4400 
4401 /*
4402  * On error or completion, trans is freed.
4403  */
4404 STATIC int
4405 xlog_recovery_process_trans(
4406 	struct xlog		*log,
4407 	struct xlog_recover	*trans,
4408 	char			*dp,
4409 	unsigned int		len,
4410 	unsigned int		flags,
4411 	int			pass,
4412 	struct list_head	*buffer_list)
4413 {
4414 	int			error = 0;
4415 	bool			freeit = false;
4416 
4417 	/* mask off ophdr transaction container flags */
4418 	flags &= ~XLOG_END_TRANS;
4419 	if (flags & XLOG_WAS_CONT_TRANS)
4420 		flags &= ~XLOG_CONTINUE_TRANS;
4421 
4422 	/*
4423 	 * Callees must not free the trans structure. We'll decide if we need to
4424 	 * free it or not based on the operation being done and it's result.
4425 	 */
4426 	switch (flags) {
4427 	/* expected flag values */
4428 	case 0:
4429 	case XLOG_CONTINUE_TRANS:
4430 		error = xlog_recover_add_to_trans(log, trans, dp, len);
4431 		break;
4432 	case XLOG_WAS_CONT_TRANS:
4433 		error = xlog_recover_add_to_cont_trans(log, trans, dp, len);
4434 		break;
4435 	case XLOG_COMMIT_TRANS:
4436 		error = xlog_recover_commit_trans(log, trans, pass,
4437 						  buffer_list);
4438 		/* success or fail, we are now done with this transaction. */
4439 		freeit = true;
4440 		break;
4441 
4442 	/* unexpected flag values */
4443 	case XLOG_UNMOUNT_TRANS:
4444 		/* just skip trans */
4445 		xfs_warn(log->l_mp, "%s: Unmount LR", __func__);
4446 		freeit = true;
4447 		break;
4448 	case XLOG_START_TRANS:
4449 	default:
4450 		xfs_warn(log->l_mp, "%s: bad flag 0x%x", __func__, flags);
4451 		ASSERT(0);
4452 		error = -EIO;
4453 		break;
4454 	}
4455 	if (error || freeit)
4456 		xlog_recover_free_trans(trans);
4457 	return error;
4458 }
4459 
4460 /*
4461  * Lookup the transaction recovery structure associated with the ID in the
4462  * current ophdr. If the transaction doesn't exist and the start flag is set in
4463  * the ophdr, then allocate a new transaction for future ID matches to find.
4464  * Either way, return what we found during the lookup - an existing transaction
4465  * or nothing.
4466  */
4467 STATIC struct xlog_recover *
4468 xlog_recover_ophdr_to_trans(
4469 	struct hlist_head	rhash[],
4470 	struct xlog_rec_header	*rhead,
4471 	struct xlog_op_header	*ohead)
4472 {
4473 	struct xlog_recover	*trans;
4474 	xlog_tid_t		tid;
4475 	struct hlist_head	*rhp;
4476 
4477 	tid = be32_to_cpu(ohead->oh_tid);
4478 	rhp = &rhash[XLOG_RHASH(tid)];
4479 	hlist_for_each_entry(trans, rhp, r_list) {
4480 		if (trans->r_log_tid == tid)
4481 			return trans;
4482 	}
4483 
4484 	/*
4485 	 * skip over non-start transaction headers - we could be
4486 	 * processing slack space before the next transaction starts
4487 	 */
4488 	if (!(ohead->oh_flags & XLOG_START_TRANS))
4489 		return NULL;
4490 
4491 	ASSERT(be32_to_cpu(ohead->oh_len) == 0);
4492 
4493 	/*
4494 	 * This is a new transaction so allocate a new recovery container to
4495 	 * hold the recovery ops that will follow.
4496 	 */
4497 	trans = kmem_zalloc(sizeof(struct xlog_recover), KM_SLEEP);
4498 	trans->r_log_tid = tid;
4499 	trans->r_lsn = be64_to_cpu(rhead->h_lsn);
4500 	INIT_LIST_HEAD(&trans->r_itemq);
4501 	INIT_HLIST_NODE(&trans->r_list);
4502 	hlist_add_head(&trans->r_list, rhp);
4503 
4504 	/*
4505 	 * Nothing more to do for this ophdr. Items to be added to this new
4506 	 * transaction will be in subsequent ophdr containers.
4507 	 */
4508 	return NULL;
4509 }
4510 
4511 STATIC int
4512 xlog_recover_process_ophdr(
4513 	struct xlog		*log,
4514 	struct hlist_head	rhash[],
4515 	struct xlog_rec_header	*rhead,
4516 	struct xlog_op_header	*ohead,
4517 	char			*dp,
4518 	char			*end,
4519 	int			pass,
4520 	struct list_head	*buffer_list)
4521 {
4522 	struct xlog_recover	*trans;
4523 	unsigned int		len;
4524 	int			error;
4525 
4526 	/* Do we understand who wrote this op? */
4527 	if (ohead->oh_clientid != XFS_TRANSACTION &&
4528 	    ohead->oh_clientid != XFS_LOG) {
4529 		xfs_warn(log->l_mp, "%s: bad clientid 0x%x",
4530 			__func__, ohead->oh_clientid);
4531 		ASSERT(0);
4532 		return -EIO;
4533 	}
4534 
4535 	/*
4536 	 * Check the ophdr contains all the data it is supposed to contain.
4537 	 */
4538 	len = be32_to_cpu(ohead->oh_len);
4539 	if (dp + len > end) {
4540 		xfs_warn(log->l_mp, "%s: bad length 0x%x", __func__, len);
4541 		WARN_ON(1);
4542 		return -EIO;
4543 	}
4544 
4545 	trans = xlog_recover_ophdr_to_trans(rhash, rhead, ohead);
4546 	if (!trans) {
4547 		/* nothing to do, so skip over this ophdr */
4548 		return 0;
4549 	}
4550 
4551 	/*
4552 	 * The recovered buffer queue is drained only once we know that all
4553 	 * recovery items for the current LSN have been processed. This is
4554 	 * required because:
4555 	 *
4556 	 * - Buffer write submission updates the metadata LSN of the buffer.
4557 	 * - Log recovery skips items with a metadata LSN >= the current LSN of
4558 	 *   the recovery item.
4559 	 * - Separate recovery items against the same metadata buffer can share
4560 	 *   a current LSN. I.e., consider that the LSN of a recovery item is
4561 	 *   defined as the starting LSN of the first record in which its
4562 	 *   transaction appears, that a record can hold multiple transactions,
4563 	 *   and/or that a transaction can span multiple records.
4564 	 *
4565 	 * In other words, we are allowed to submit a buffer from log recovery
4566 	 * once per current LSN. Otherwise, we may incorrectly skip recovery
4567 	 * items and cause corruption.
4568 	 *
4569 	 * We don't know up front whether buffers are updated multiple times per
4570 	 * LSN. Therefore, track the current LSN of each commit log record as it
4571 	 * is processed and drain the queue when it changes. Use commit records
4572 	 * because they are ordered correctly by the logging code.
4573 	 */
4574 	if (log->l_recovery_lsn != trans->r_lsn &&
4575 	    ohead->oh_flags & XLOG_COMMIT_TRANS) {
4576 		error = xfs_buf_delwri_submit(buffer_list);
4577 		if (error)
4578 			return error;
4579 		log->l_recovery_lsn = trans->r_lsn;
4580 	}
4581 
4582 	return xlog_recovery_process_trans(log, trans, dp, len,
4583 					   ohead->oh_flags, pass, buffer_list);
4584 }
4585 
4586 /*
4587  * There are two valid states of the r_state field.  0 indicates that the
4588  * transaction structure is in a normal state.  We have either seen the
4589  * start of the transaction or the last operation we added was not a partial
4590  * operation.  If the last operation we added to the transaction was a
4591  * partial operation, we need to mark r_state with XLOG_WAS_CONT_TRANS.
4592  *
4593  * NOTE: skip LRs with 0 data length.
4594  */
4595 STATIC int
4596 xlog_recover_process_data(
4597 	struct xlog		*log,
4598 	struct hlist_head	rhash[],
4599 	struct xlog_rec_header	*rhead,
4600 	char			*dp,
4601 	int			pass,
4602 	struct list_head	*buffer_list)
4603 {
4604 	struct xlog_op_header	*ohead;
4605 	char			*end;
4606 	int			num_logops;
4607 	int			error;
4608 
4609 	end = dp + be32_to_cpu(rhead->h_len);
4610 	num_logops = be32_to_cpu(rhead->h_num_logops);
4611 
4612 	/* check the log format matches our own - else we can't recover */
4613 	if (xlog_header_check_recover(log->l_mp, rhead))
4614 		return -EIO;
4615 
4616 	trace_xfs_log_recover_record(log, rhead, pass);
4617 	while ((dp < end) && num_logops) {
4618 
4619 		ohead = (struct xlog_op_header *)dp;
4620 		dp += sizeof(*ohead);
4621 		ASSERT(dp <= end);
4622 
4623 		/* errors will abort recovery */
4624 		error = xlog_recover_process_ophdr(log, rhash, rhead, ohead,
4625 						   dp, end, pass, buffer_list);
4626 		if (error)
4627 			return error;
4628 
4629 		dp += be32_to_cpu(ohead->oh_len);
4630 		num_logops--;
4631 	}
4632 	return 0;
4633 }
4634 
4635 /* Recover the EFI if necessary. */
4636 STATIC int
4637 xlog_recover_process_efi(
4638 	struct xfs_mount		*mp,
4639 	struct xfs_ail			*ailp,
4640 	struct xfs_log_item		*lip)
4641 {
4642 	struct xfs_efi_log_item		*efip;
4643 	int				error;
4644 
4645 	/*
4646 	 * Skip EFIs that we've already processed.
4647 	 */
4648 	efip = container_of(lip, struct xfs_efi_log_item, efi_item);
4649 	if (test_bit(XFS_EFI_RECOVERED, &efip->efi_flags))
4650 		return 0;
4651 
4652 	spin_unlock(&ailp->xa_lock);
4653 	error = xfs_efi_recover(mp, efip);
4654 	spin_lock(&ailp->xa_lock);
4655 
4656 	return error;
4657 }
4658 
4659 /* Release the EFI since we're cancelling everything. */
4660 STATIC void
4661 xlog_recover_cancel_efi(
4662 	struct xfs_mount		*mp,
4663 	struct xfs_ail			*ailp,
4664 	struct xfs_log_item		*lip)
4665 {
4666 	struct xfs_efi_log_item		*efip;
4667 
4668 	efip = container_of(lip, struct xfs_efi_log_item, efi_item);
4669 
4670 	spin_unlock(&ailp->xa_lock);
4671 	xfs_efi_release(efip);
4672 	spin_lock(&ailp->xa_lock);
4673 }
4674 
4675 /* Recover the RUI if necessary. */
4676 STATIC int
4677 xlog_recover_process_rui(
4678 	struct xfs_mount		*mp,
4679 	struct xfs_ail			*ailp,
4680 	struct xfs_log_item		*lip)
4681 {
4682 	struct xfs_rui_log_item		*ruip;
4683 	int				error;
4684 
4685 	/*
4686 	 * Skip RUIs that we've already processed.
4687 	 */
4688 	ruip = container_of(lip, struct xfs_rui_log_item, rui_item);
4689 	if (test_bit(XFS_RUI_RECOVERED, &ruip->rui_flags))
4690 		return 0;
4691 
4692 	spin_unlock(&ailp->xa_lock);
4693 	error = xfs_rui_recover(mp, ruip);
4694 	spin_lock(&ailp->xa_lock);
4695 
4696 	return error;
4697 }
4698 
4699 /* Release the RUI since we're cancelling everything. */
4700 STATIC void
4701 xlog_recover_cancel_rui(
4702 	struct xfs_mount		*mp,
4703 	struct xfs_ail			*ailp,
4704 	struct xfs_log_item		*lip)
4705 {
4706 	struct xfs_rui_log_item		*ruip;
4707 
4708 	ruip = container_of(lip, struct xfs_rui_log_item, rui_item);
4709 
4710 	spin_unlock(&ailp->xa_lock);
4711 	xfs_rui_release(ruip);
4712 	spin_lock(&ailp->xa_lock);
4713 }
4714 
4715 /* Recover the CUI if necessary. */
4716 STATIC int
4717 xlog_recover_process_cui(
4718 	struct xfs_mount		*mp,
4719 	struct xfs_ail			*ailp,
4720 	struct xfs_log_item		*lip,
4721 	struct xfs_defer_ops		*dfops)
4722 {
4723 	struct xfs_cui_log_item		*cuip;
4724 	int				error;
4725 
4726 	/*
4727 	 * Skip CUIs that we've already processed.
4728 	 */
4729 	cuip = container_of(lip, struct xfs_cui_log_item, cui_item);
4730 	if (test_bit(XFS_CUI_RECOVERED, &cuip->cui_flags))
4731 		return 0;
4732 
4733 	spin_unlock(&ailp->xa_lock);
4734 	error = xfs_cui_recover(mp, cuip, dfops);
4735 	spin_lock(&ailp->xa_lock);
4736 
4737 	return error;
4738 }
4739 
4740 /* Release the CUI since we're cancelling everything. */
4741 STATIC void
4742 xlog_recover_cancel_cui(
4743 	struct xfs_mount		*mp,
4744 	struct xfs_ail			*ailp,
4745 	struct xfs_log_item		*lip)
4746 {
4747 	struct xfs_cui_log_item		*cuip;
4748 
4749 	cuip = container_of(lip, struct xfs_cui_log_item, cui_item);
4750 
4751 	spin_unlock(&ailp->xa_lock);
4752 	xfs_cui_release(cuip);
4753 	spin_lock(&ailp->xa_lock);
4754 }
4755 
4756 /* Recover the BUI if necessary. */
4757 STATIC int
4758 xlog_recover_process_bui(
4759 	struct xfs_mount		*mp,
4760 	struct xfs_ail			*ailp,
4761 	struct xfs_log_item		*lip,
4762 	struct xfs_defer_ops		*dfops)
4763 {
4764 	struct xfs_bui_log_item		*buip;
4765 	int				error;
4766 
4767 	/*
4768 	 * Skip BUIs that we've already processed.
4769 	 */
4770 	buip = container_of(lip, struct xfs_bui_log_item, bui_item);
4771 	if (test_bit(XFS_BUI_RECOVERED, &buip->bui_flags))
4772 		return 0;
4773 
4774 	spin_unlock(&ailp->xa_lock);
4775 	error = xfs_bui_recover(mp, buip, dfops);
4776 	spin_lock(&ailp->xa_lock);
4777 
4778 	return error;
4779 }
4780 
4781 /* Release the BUI since we're cancelling everything. */
4782 STATIC void
4783 xlog_recover_cancel_bui(
4784 	struct xfs_mount		*mp,
4785 	struct xfs_ail			*ailp,
4786 	struct xfs_log_item		*lip)
4787 {
4788 	struct xfs_bui_log_item		*buip;
4789 
4790 	buip = container_of(lip, struct xfs_bui_log_item, bui_item);
4791 
4792 	spin_unlock(&ailp->xa_lock);
4793 	xfs_bui_release(buip);
4794 	spin_lock(&ailp->xa_lock);
4795 }
4796 
4797 /* Is this log item a deferred action intent? */
4798 static inline bool xlog_item_is_intent(struct xfs_log_item *lip)
4799 {
4800 	switch (lip->li_type) {
4801 	case XFS_LI_EFI:
4802 	case XFS_LI_RUI:
4803 	case XFS_LI_CUI:
4804 	case XFS_LI_BUI:
4805 		return true;
4806 	default:
4807 		return false;
4808 	}
4809 }
4810 
4811 /* Take all the collected deferred ops and finish them in order. */
4812 static int
4813 xlog_finish_defer_ops(
4814 	struct xfs_mount	*mp,
4815 	struct xfs_defer_ops	*dfops)
4816 {
4817 	struct xfs_trans	*tp;
4818 	int64_t			freeblks;
4819 	uint			resblks;
4820 	int			error;
4821 
4822 	/*
4823 	 * We're finishing the defer_ops that accumulated as a result of
4824 	 * recovering unfinished intent items during log recovery.  We
4825 	 * reserve an itruncate transaction because it is the largest
4826 	 * permanent transaction type.  Since we're the only user of the fs
4827 	 * right now, take 93% (15/16) of the available free blocks.  Use
4828 	 * weird math to avoid a 64-bit division.
4829 	 */
4830 	freeblks = percpu_counter_sum(&mp->m_fdblocks);
4831 	if (freeblks <= 0)
4832 		return -ENOSPC;
4833 	resblks = min_t(int64_t, UINT_MAX, freeblks);
4834 	resblks = (resblks * 15) >> 4;
4835 	error = xfs_trans_alloc(mp, &M_RES(mp)->tr_itruncate, resblks,
4836 			0, XFS_TRANS_RESERVE, &tp);
4837 	if (error)
4838 		return error;
4839 
4840 	error = xfs_defer_finish(&tp, dfops);
4841 	if (error)
4842 		goto out_cancel;
4843 
4844 	return xfs_trans_commit(tp);
4845 
4846 out_cancel:
4847 	xfs_trans_cancel(tp);
4848 	return error;
4849 }
4850 
4851 /*
4852  * When this is called, all of the log intent items which did not have
4853  * corresponding log done items should be in the AIL.  What we do now
4854  * is update the data structures associated with each one.
4855  *
4856  * Since we process the log intent items in normal transactions, they
4857  * will be removed at some point after the commit.  This prevents us
4858  * from just walking down the list processing each one.  We'll use a
4859  * flag in the intent item to skip those that we've already processed
4860  * and use the AIL iteration mechanism's generation count to try to
4861  * speed this up at least a bit.
4862  *
4863  * When we start, we know that the intents are the only things in the
4864  * AIL.  As we process them, however, other items are added to the
4865  * AIL.
4866  */
4867 STATIC int
4868 xlog_recover_process_intents(
4869 	struct xlog		*log)
4870 {
4871 	struct xfs_defer_ops	dfops;
4872 	struct xfs_ail_cursor	cur;
4873 	struct xfs_log_item	*lip;
4874 	struct xfs_ail		*ailp;
4875 	xfs_fsblock_t		firstfsb;
4876 	int			error = 0;
4877 #if defined(DEBUG) || defined(XFS_WARN)
4878 	xfs_lsn_t		last_lsn;
4879 #endif
4880 
4881 	ailp = log->l_ailp;
4882 	spin_lock(&ailp->xa_lock);
4883 	lip = xfs_trans_ail_cursor_first(ailp, &cur, 0);
4884 #if defined(DEBUG) || defined(XFS_WARN)
4885 	last_lsn = xlog_assign_lsn(log->l_curr_cycle, log->l_curr_block);
4886 #endif
4887 	xfs_defer_init(&dfops, &firstfsb);
4888 	while (lip != NULL) {
4889 		/*
4890 		 * We're done when we see something other than an intent.
4891 		 * There should be no intents left in the AIL now.
4892 		 */
4893 		if (!xlog_item_is_intent(lip)) {
4894 #ifdef DEBUG
4895 			for (; lip; lip = xfs_trans_ail_cursor_next(ailp, &cur))
4896 				ASSERT(!xlog_item_is_intent(lip));
4897 #endif
4898 			break;
4899 		}
4900 
4901 		/*
4902 		 * We should never see a redo item with a LSN higher than
4903 		 * the last transaction we found in the log at the start
4904 		 * of recovery.
4905 		 */
4906 		ASSERT(XFS_LSN_CMP(last_lsn, lip->li_lsn) >= 0);
4907 
4908 		/*
4909 		 * NOTE: If your intent processing routine can create more
4910 		 * deferred ops, you /must/ attach them to the dfops in this
4911 		 * routine or else those subsequent intents will get
4912 		 * replayed in the wrong order!
4913 		 */
4914 		switch (lip->li_type) {
4915 		case XFS_LI_EFI:
4916 			error = xlog_recover_process_efi(log->l_mp, ailp, lip);
4917 			break;
4918 		case XFS_LI_RUI:
4919 			error = xlog_recover_process_rui(log->l_mp, ailp, lip);
4920 			break;
4921 		case XFS_LI_CUI:
4922 			error = xlog_recover_process_cui(log->l_mp, ailp, lip,
4923 					&dfops);
4924 			break;
4925 		case XFS_LI_BUI:
4926 			error = xlog_recover_process_bui(log->l_mp, ailp, lip,
4927 					&dfops);
4928 			break;
4929 		}
4930 		if (error)
4931 			goto out;
4932 		lip = xfs_trans_ail_cursor_next(ailp, &cur);
4933 	}
4934 out:
4935 	xfs_trans_ail_cursor_done(&cur);
4936 	spin_unlock(&ailp->xa_lock);
4937 	if (error)
4938 		xfs_defer_cancel(&dfops);
4939 	else
4940 		error = xlog_finish_defer_ops(log->l_mp, &dfops);
4941 
4942 	return error;
4943 }
4944 
4945 /*
4946  * A cancel occurs when the mount has failed and we're bailing out.
4947  * Release all pending log intent items so they don't pin the AIL.
4948  */
4949 STATIC int
4950 xlog_recover_cancel_intents(
4951 	struct xlog		*log)
4952 {
4953 	struct xfs_log_item	*lip;
4954 	int			error = 0;
4955 	struct xfs_ail_cursor	cur;
4956 	struct xfs_ail		*ailp;
4957 
4958 	ailp = log->l_ailp;
4959 	spin_lock(&ailp->xa_lock);
4960 	lip = xfs_trans_ail_cursor_first(ailp, &cur, 0);
4961 	while (lip != NULL) {
4962 		/*
4963 		 * We're done when we see something other than an intent.
4964 		 * There should be no intents left in the AIL now.
4965 		 */
4966 		if (!xlog_item_is_intent(lip)) {
4967 #ifdef DEBUG
4968 			for (; lip; lip = xfs_trans_ail_cursor_next(ailp, &cur))
4969 				ASSERT(!xlog_item_is_intent(lip));
4970 #endif
4971 			break;
4972 		}
4973 
4974 		switch (lip->li_type) {
4975 		case XFS_LI_EFI:
4976 			xlog_recover_cancel_efi(log->l_mp, ailp, lip);
4977 			break;
4978 		case XFS_LI_RUI:
4979 			xlog_recover_cancel_rui(log->l_mp, ailp, lip);
4980 			break;
4981 		case XFS_LI_CUI:
4982 			xlog_recover_cancel_cui(log->l_mp, ailp, lip);
4983 			break;
4984 		case XFS_LI_BUI:
4985 			xlog_recover_cancel_bui(log->l_mp, ailp, lip);
4986 			break;
4987 		}
4988 
4989 		lip = xfs_trans_ail_cursor_next(ailp, &cur);
4990 	}
4991 
4992 	xfs_trans_ail_cursor_done(&cur);
4993 	spin_unlock(&ailp->xa_lock);
4994 	return error;
4995 }
4996 
4997 /*
4998  * This routine performs a transaction to null out a bad inode pointer
4999  * in an agi unlinked inode hash bucket.
5000  */
5001 STATIC void
5002 xlog_recover_clear_agi_bucket(
5003 	xfs_mount_t	*mp,
5004 	xfs_agnumber_t	agno,
5005 	int		bucket)
5006 {
5007 	xfs_trans_t	*tp;
5008 	xfs_agi_t	*agi;
5009 	xfs_buf_t	*agibp;
5010 	int		offset;
5011 	int		error;
5012 
5013 	error = xfs_trans_alloc(mp, &M_RES(mp)->tr_clearagi, 0, 0, 0, &tp);
5014 	if (error)
5015 		goto out_error;
5016 
5017 	error = xfs_read_agi(mp, tp, agno, &agibp);
5018 	if (error)
5019 		goto out_abort;
5020 
5021 	agi = XFS_BUF_TO_AGI(agibp);
5022 	agi->agi_unlinked[bucket] = cpu_to_be32(NULLAGINO);
5023 	offset = offsetof(xfs_agi_t, agi_unlinked) +
5024 		 (sizeof(xfs_agino_t) * bucket);
5025 	xfs_trans_log_buf(tp, agibp, offset,
5026 			  (offset + sizeof(xfs_agino_t) - 1));
5027 
5028 	error = xfs_trans_commit(tp);
5029 	if (error)
5030 		goto out_error;
5031 	return;
5032 
5033 out_abort:
5034 	xfs_trans_cancel(tp);
5035 out_error:
5036 	xfs_warn(mp, "%s: failed to clear agi %d. Continuing.", __func__, agno);
5037 	return;
5038 }
5039 
5040 STATIC xfs_agino_t
5041 xlog_recover_process_one_iunlink(
5042 	struct xfs_mount		*mp,
5043 	xfs_agnumber_t			agno,
5044 	xfs_agino_t			agino,
5045 	int				bucket)
5046 {
5047 	struct xfs_buf			*ibp;
5048 	struct xfs_dinode		*dip;
5049 	struct xfs_inode		*ip;
5050 	xfs_ino_t			ino;
5051 	int				error;
5052 
5053 	ino = XFS_AGINO_TO_INO(mp, agno, agino);
5054 	error = xfs_iget(mp, NULL, ino, 0, 0, &ip);
5055 	if (error)
5056 		goto fail;
5057 
5058 	/*
5059 	 * Get the on disk inode to find the next inode in the bucket.
5060 	 */
5061 	error = xfs_imap_to_bp(mp, NULL, &ip->i_imap, &dip, &ibp, 0, 0);
5062 	if (error)
5063 		goto fail_iput;
5064 
5065 	xfs_iflags_clear(ip, XFS_IRECOVERY);
5066 	ASSERT(VFS_I(ip)->i_nlink == 0);
5067 	ASSERT(VFS_I(ip)->i_mode != 0);
5068 
5069 	/* setup for the next pass */
5070 	agino = be32_to_cpu(dip->di_next_unlinked);
5071 	xfs_buf_relse(ibp);
5072 
5073 	/*
5074 	 * Prevent any DMAPI event from being sent when the reference on
5075 	 * the inode is dropped.
5076 	 */
5077 	ip->i_d.di_dmevmask = 0;
5078 
5079 	IRELE(ip);
5080 	return agino;
5081 
5082  fail_iput:
5083 	IRELE(ip);
5084  fail:
5085 	/*
5086 	 * We can't read in the inode this bucket points to, or this inode
5087 	 * is messed up.  Just ditch this bucket of inodes.  We will lose
5088 	 * some inodes and space, but at least we won't hang.
5089 	 *
5090 	 * Call xlog_recover_clear_agi_bucket() to perform a transaction to
5091 	 * clear the inode pointer in the bucket.
5092 	 */
5093 	xlog_recover_clear_agi_bucket(mp, agno, bucket);
5094 	return NULLAGINO;
5095 }
5096 
5097 /*
5098  * xlog_iunlink_recover
5099  *
5100  * This is called during recovery to process any inodes which
5101  * we unlinked but not freed when the system crashed.  These
5102  * inodes will be on the lists in the AGI blocks.  What we do
5103  * here is scan all the AGIs and fully truncate and free any
5104  * inodes found on the lists.  Each inode is removed from the
5105  * lists when it has been fully truncated and is freed.  The
5106  * freeing of the inode and its removal from the list must be
5107  * atomic.
5108  */
5109 STATIC void
5110 xlog_recover_process_iunlinks(
5111 	struct xlog	*log)
5112 {
5113 	xfs_mount_t	*mp;
5114 	xfs_agnumber_t	agno;
5115 	xfs_agi_t	*agi;
5116 	xfs_buf_t	*agibp;
5117 	xfs_agino_t	agino;
5118 	int		bucket;
5119 	int		error;
5120 	uint		mp_dmevmask;
5121 
5122 	mp = log->l_mp;
5123 
5124 	/*
5125 	 * Prevent any DMAPI event from being sent while in this function.
5126 	 */
5127 	mp_dmevmask = mp->m_dmevmask;
5128 	mp->m_dmevmask = 0;
5129 
5130 	for (agno = 0; agno < mp->m_sb.sb_agcount; agno++) {
5131 		/*
5132 		 * Find the agi for this ag.
5133 		 */
5134 		error = xfs_read_agi(mp, NULL, agno, &agibp);
5135 		if (error) {
5136 			/*
5137 			 * AGI is b0rked. Don't process it.
5138 			 *
5139 			 * We should probably mark the filesystem as corrupt
5140 			 * after we've recovered all the ag's we can....
5141 			 */
5142 			continue;
5143 		}
5144 		/*
5145 		 * Unlock the buffer so that it can be acquired in the normal
5146 		 * course of the transaction to truncate and free each inode.
5147 		 * Because we are not racing with anyone else here for the AGI
5148 		 * buffer, we don't even need to hold it locked to read the
5149 		 * initial unlinked bucket entries out of the buffer. We keep
5150 		 * buffer reference though, so that it stays pinned in memory
5151 		 * while we need the buffer.
5152 		 */
5153 		agi = XFS_BUF_TO_AGI(agibp);
5154 		xfs_buf_unlock(agibp);
5155 
5156 		for (bucket = 0; bucket < XFS_AGI_UNLINKED_BUCKETS; bucket++) {
5157 			agino = be32_to_cpu(agi->agi_unlinked[bucket]);
5158 			while (agino != NULLAGINO) {
5159 				agino = xlog_recover_process_one_iunlink(mp,
5160 							agno, agino, bucket);
5161 			}
5162 		}
5163 		xfs_buf_rele(agibp);
5164 	}
5165 
5166 	mp->m_dmevmask = mp_dmevmask;
5167 }
5168 
5169 STATIC int
5170 xlog_unpack_data(
5171 	struct xlog_rec_header	*rhead,
5172 	char			*dp,
5173 	struct xlog		*log)
5174 {
5175 	int			i, j, k;
5176 
5177 	for (i = 0; i < BTOBB(be32_to_cpu(rhead->h_len)) &&
5178 		  i < (XLOG_HEADER_CYCLE_SIZE / BBSIZE); i++) {
5179 		*(__be32 *)dp = *(__be32 *)&rhead->h_cycle_data[i];
5180 		dp += BBSIZE;
5181 	}
5182 
5183 	if (xfs_sb_version_haslogv2(&log->l_mp->m_sb)) {
5184 		xlog_in_core_2_t *xhdr = (xlog_in_core_2_t *)rhead;
5185 		for ( ; i < BTOBB(be32_to_cpu(rhead->h_len)); i++) {
5186 			j = i / (XLOG_HEADER_CYCLE_SIZE / BBSIZE);
5187 			k = i % (XLOG_HEADER_CYCLE_SIZE / BBSIZE);
5188 			*(__be32 *)dp = xhdr[j].hic_xheader.xh_cycle_data[k];
5189 			dp += BBSIZE;
5190 		}
5191 	}
5192 
5193 	return 0;
5194 }
5195 
5196 /*
5197  * CRC check, unpack and process a log record.
5198  */
5199 STATIC int
5200 xlog_recover_process(
5201 	struct xlog		*log,
5202 	struct hlist_head	rhash[],
5203 	struct xlog_rec_header	*rhead,
5204 	char			*dp,
5205 	int			pass,
5206 	struct list_head	*buffer_list)
5207 {
5208 	int			error;
5209 	__le32			old_crc = rhead->h_crc;
5210 	__le32			crc;
5211 
5212 
5213 	crc = xlog_cksum(log, rhead, dp, be32_to_cpu(rhead->h_len));
5214 
5215 	/*
5216 	 * Nothing else to do if this is a CRC verification pass. Just return
5217 	 * if this a record with a non-zero crc. Unfortunately, mkfs always
5218 	 * sets old_crc to 0 so we must consider this valid even on v5 supers.
5219 	 * Otherwise, return EFSBADCRC on failure so the callers up the stack
5220 	 * know precisely what failed.
5221 	 */
5222 	if (pass == XLOG_RECOVER_CRCPASS) {
5223 		if (old_crc && crc != old_crc)
5224 			return -EFSBADCRC;
5225 		return 0;
5226 	}
5227 
5228 	/*
5229 	 * We're in the normal recovery path. Issue a warning if and only if the
5230 	 * CRC in the header is non-zero. This is an advisory warning and the
5231 	 * zero CRC check prevents warnings from being emitted when upgrading
5232 	 * the kernel from one that does not add CRCs by default.
5233 	 */
5234 	if (crc != old_crc) {
5235 		if (old_crc || xfs_sb_version_hascrc(&log->l_mp->m_sb)) {
5236 			xfs_alert(log->l_mp,
5237 		"log record CRC mismatch: found 0x%x, expected 0x%x.",
5238 					le32_to_cpu(old_crc),
5239 					le32_to_cpu(crc));
5240 			xfs_hex_dump(dp, 32);
5241 		}
5242 
5243 		/*
5244 		 * If the filesystem is CRC enabled, this mismatch becomes a
5245 		 * fatal log corruption failure.
5246 		 */
5247 		if (xfs_sb_version_hascrc(&log->l_mp->m_sb))
5248 			return -EFSCORRUPTED;
5249 	}
5250 
5251 	error = xlog_unpack_data(rhead, dp, log);
5252 	if (error)
5253 		return error;
5254 
5255 	return xlog_recover_process_data(log, rhash, rhead, dp, pass,
5256 					 buffer_list);
5257 }
5258 
5259 STATIC int
5260 xlog_valid_rec_header(
5261 	struct xlog		*log,
5262 	struct xlog_rec_header	*rhead,
5263 	xfs_daddr_t		blkno)
5264 {
5265 	int			hlen;
5266 
5267 	if (unlikely(rhead->h_magicno != cpu_to_be32(XLOG_HEADER_MAGIC_NUM))) {
5268 		XFS_ERROR_REPORT("xlog_valid_rec_header(1)",
5269 				XFS_ERRLEVEL_LOW, log->l_mp);
5270 		return -EFSCORRUPTED;
5271 	}
5272 	if (unlikely(
5273 	    (!rhead->h_version ||
5274 	    (be32_to_cpu(rhead->h_version) & (~XLOG_VERSION_OKBITS))))) {
5275 		xfs_warn(log->l_mp, "%s: unrecognised log version (%d).",
5276 			__func__, be32_to_cpu(rhead->h_version));
5277 		return -EIO;
5278 	}
5279 
5280 	/* LR body must have data or it wouldn't have been written */
5281 	hlen = be32_to_cpu(rhead->h_len);
5282 	if (unlikely( hlen <= 0 || hlen > INT_MAX )) {
5283 		XFS_ERROR_REPORT("xlog_valid_rec_header(2)",
5284 				XFS_ERRLEVEL_LOW, log->l_mp);
5285 		return -EFSCORRUPTED;
5286 	}
5287 	if (unlikely( blkno > log->l_logBBsize || blkno > INT_MAX )) {
5288 		XFS_ERROR_REPORT("xlog_valid_rec_header(3)",
5289 				XFS_ERRLEVEL_LOW, log->l_mp);
5290 		return -EFSCORRUPTED;
5291 	}
5292 	return 0;
5293 }
5294 
5295 /*
5296  * Read the log from tail to head and process the log records found.
5297  * Handle the two cases where the tail and head are in the same cycle
5298  * and where the active portion of the log wraps around the end of
5299  * the physical log separately.  The pass parameter is passed through
5300  * to the routines called to process the data and is not looked at
5301  * here.
5302  */
5303 STATIC int
5304 xlog_do_recovery_pass(
5305 	struct xlog		*log,
5306 	xfs_daddr_t		head_blk,
5307 	xfs_daddr_t		tail_blk,
5308 	int			pass,
5309 	xfs_daddr_t		*first_bad)	/* out: first bad log rec */
5310 {
5311 	xlog_rec_header_t	*rhead;
5312 	xfs_daddr_t		blk_no, rblk_no;
5313 	xfs_daddr_t		rhead_blk;
5314 	char			*offset;
5315 	xfs_buf_t		*hbp, *dbp;
5316 	int			error = 0, h_size, h_len;
5317 	int			error2 = 0;
5318 	int			bblks, split_bblks;
5319 	int			hblks, split_hblks, wrapped_hblks;
5320 	int			i;
5321 	struct hlist_head	rhash[XLOG_RHASH_SIZE];
5322 	LIST_HEAD		(buffer_list);
5323 
5324 	ASSERT(head_blk != tail_blk);
5325 	blk_no = rhead_blk = tail_blk;
5326 
5327 	for (i = 0; i < XLOG_RHASH_SIZE; i++)
5328 		INIT_HLIST_HEAD(&rhash[i]);
5329 
5330 	/*
5331 	 * Read the header of the tail block and get the iclog buffer size from
5332 	 * h_size.  Use this to tell how many sectors make up the log header.
5333 	 */
5334 	if (xfs_sb_version_haslogv2(&log->l_mp->m_sb)) {
5335 		/*
5336 		 * When using variable length iclogs, read first sector of
5337 		 * iclog header and extract the header size from it.  Get a
5338 		 * new hbp that is the correct size.
5339 		 */
5340 		hbp = xlog_get_bp(log, 1);
5341 		if (!hbp)
5342 			return -ENOMEM;
5343 
5344 		error = xlog_bread(log, tail_blk, 1, hbp, &offset);
5345 		if (error)
5346 			goto bread_err1;
5347 
5348 		rhead = (xlog_rec_header_t *)offset;
5349 		error = xlog_valid_rec_header(log, rhead, tail_blk);
5350 		if (error)
5351 			goto bread_err1;
5352 
5353 		/*
5354 		 * xfsprogs has a bug where record length is based on lsunit but
5355 		 * h_size (iclog size) is hardcoded to 32k. Now that we
5356 		 * unconditionally CRC verify the unmount record, this means the
5357 		 * log buffer can be too small for the record and cause an
5358 		 * overrun.
5359 		 *
5360 		 * Detect this condition here. Use lsunit for the buffer size as
5361 		 * long as this looks like the mkfs case. Otherwise, return an
5362 		 * error to avoid a buffer overrun.
5363 		 */
5364 		h_size = be32_to_cpu(rhead->h_size);
5365 		h_len = be32_to_cpu(rhead->h_len);
5366 		if (h_len > h_size) {
5367 			if (h_len <= log->l_mp->m_logbsize &&
5368 			    be32_to_cpu(rhead->h_num_logops) == 1) {
5369 				xfs_warn(log->l_mp,
5370 		"invalid iclog size (%d bytes), using lsunit (%d bytes)",
5371 					 h_size, log->l_mp->m_logbsize);
5372 				h_size = log->l_mp->m_logbsize;
5373 			} else
5374 				return -EFSCORRUPTED;
5375 		}
5376 
5377 		if ((be32_to_cpu(rhead->h_version) & XLOG_VERSION_2) &&
5378 		    (h_size > XLOG_HEADER_CYCLE_SIZE)) {
5379 			hblks = h_size / XLOG_HEADER_CYCLE_SIZE;
5380 			if (h_size % XLOG_HEADER_CYCLE_SIZE)
5381 				hblks++;
5382 			xlog_put_bp(hbp);
5383 			hbp = xlog_get_bp(log, hblks);
5384 		} else {
5385 			hblks = 1;
5386 		}
5387 	} else {
5388 		ASSERT(log->l_sectBBsize == 1);
5389 		hblks = 1;
5390 		hbp = xlog_get_bp(log, 1);
5391 		h_size = XLOG_BIG_RECORD_BSIZE;
5392 	}
5393 
5394 	if (!hbp)
5395 		return -ENOMEM;
5396 	dbp = xlog_get_bp(log, BTOBB(h_size));
5397 	if (!dbp) {
5398 		xlog_put_bp(hbp);
5399 		return -ENOMEM;
5400 	}
5401 
5402 	memset(rhash, 0, sizeof(rhash));
5403 	if (tail_blk > head_blk) {
5404 		/*
5405 		 * Perform recovery around the end of the physical log.
5406 		 * When the head is not on the same cycle number as the tail,
5407 		 * we can't do a sequential recovery.
5408 		 */
5409 		while (blk_no < log->l_logBBsize) {
5410 			/*
5411 			 * Check for header wrapping around physical end-of-log
5412 			 */
5413 			offset = hbp->b_addr;
5414 			split_hblks = 0;
5415 			wrapped_hblks = 0;
5416 			if (blk_no + hblks <= log->l_logBBsize) {
5417 				/* Read header in one read */
5418 				error = xlog_bread(log, blk_no, hblks, hbp,
5419 						   &offset);
5420 				if (error)
5421 					goto bread_err2;
5422 			} else {
5423 				/* This LR is split across physical log end */
5424 				if (blk_no != log->l_logBBsize) {
5425 					/* some data before physical log end */
5426 					ASSERT(blk_no <= INT_MAX);
5427 					split_hblks = log->l_logBBsize - (int)blk_no;
5428 					ASSERT(split_hblks > 0);
5429 					error = xlog_bread(log, blk_no,
5430 							   split_hblks, hbp,
5431 							   &offset);
5432 					if (error)
5433 						goto bread_err2;
5434 				}
5435 
5436 				/*
5437 				 * Note: this black magic still works with
5438 				 * large sector sizes (non-512) only because:
5439 				 * - we increased the buffer size originally
5440 				 *   by 1 sector giving us enough extra space
5441 				 *   for the second read;
5442 				 * - the log start is guaranteed to be sector
5443 				 *   aligned;
5444 				 * - we read the log end (LR header start)
5445 				 *   _first_, then the log start (LR header end)
5446 				 *   - order is important.
5447 				 */
5448 				wrapped_hblks = hblks - split_hblks;
5449 				error = xlog_bread_offset(log, 0,
5450 						wrapped_hblks, hbp,
5451 						offset + BBTOB(split_hblks));
5452 				if (error)
5453 					goto bread_err2;
5454 			}
5455 			rhead = (xlog_rec_header_t *)offset;
5456 			error = xlog_valid_rec_header(log, rhead,
5457 						split_hblks ? blk_no : 0);
5458 			if (error)
5459 				goto bread_err2;
5460 
5461 			bblks = (int)BTOBB(be32_to_cpu(rhead->h_len));
5462 			blk_no += hblks;
5463 
5464 			/*
5465 			 * Read the log record data in multiple reads if it
5466 			 * wraps around the end of the log. Note that if the
5467 			 * header already wrapped, blk_no could point past the
5468 			 * end of the log. The record data is contiguous in
5469 			 * that case.
5470 			 */
5471 			if (blk_no + bblks <= log->l_logBBsize ||
5472 			    blk_no >= log->l_logBBsize) {
5473 				/* mod blk_no in case the header wrapped and
5474 				 * pushed it beyond the end of the log */
5475 				rblk_no = do_mod(blk_no, log->l_logBBsize);
5476 				error = xlog_bread(log, rblk_no, bblks, dbp,
5477 						   &offset);
5478 				if (error)
5479 					goto bread_err2;
5480 			} else {
5481 				/* This log record is split across the
5482 				 * physical end of log */
5483 				offset = dbp->b_addr;
5484 				split_bblks = 0;
5485 				if (blk_no != log->l_logBBsize) {
5486 					/* some data is before the physical
5487 					 * end of log */
5488 					ASSERT(!wrapped_hblks);
5489 					ASSERT(blk_no <= INT_MAX);
5490 					split_bblks =
5491 						log->l_logBBsize - (int)blk_no;
5492 					ASSERT(split_bblks > 0);
5493 					error = xlog_bread(log, blk_no,
5494 							split_bblks, dbp,
5495 							&offset);
5496 					if (error)
5497 						goto bread_err2;
5498 				}
5499 
5500 				/*
5501 				 * Note: this black magic still works with
5502 				 * large sector sizes (non-512) only because:
5503 				 * - we increased the buffer size originally
5504 				 *   by 1 sector giving us enough extra space
5505 				 *   for the second read;
5506 				 * - the log start is guaranteed to be sector
5507 				 *   aligned;
5508 				 * - we read the log end (LR header start)
5509 				 *   _first_, then the log start (LR header end)
5510 				 *   - order is important.
5511 				 */
5512 				error = xlog_bread_offset(log, 0,
5513 						bblks - split_bblks, dbp,
5514 						offset + BBTOB(split_bblks));
5515 				if (error)
5516 					goto bread_err2;
5517 			}
5518 
5519 			error = xlog_recover_process(log, rhash, rhead, offset,
5520 						     pass, &buffer_list);
5521 			if (error)
5522 				goto bread_err2;
5523 
5524 			blk_no += bblks;
5525 			rhead_blk = blk_no;
5526 		}
5527 
5528 		ASSERT(blk_no >= log->l_logBBsize);
5529 		blk_no -= log->l_logBBsize;
5530 		rhead_blk = blk_no;
5531 	}
5532 
5533 	/* read first part of physical log */
5534 	while (blk_no < head_blk) {
5535 		error = xlog_bread(log, blk_no, hblks, hbp, &offset);
5536 		if (error)
5537 			goto bread_err2;
5538 
5539 		rhead = (xlog_rec_header_t *)offset;
5540 		error = xlog_valid_rec_header(log, rhead, blk_no);
5541 		if (error)
5542 			goto bread_err2;
5543 
5544 		/* blocks in data section */
5545 		bblks = (int)BTOBB(be32_to_cpu(rhead->h_len));
5546 		error = xlog_bread(log, blk_no+hblks, bblks, dbp,
5547 				   &offset);
5548 		if (error)
5549 			goto bread_err2;
5550 
5551 		error = xlog_recover_process(log, rhash, rhead, offset, pass,
5552 					     &buffer_list);
5553 		if (error)
5554 			goto bread_err2;
5555 
5556 		blk_no += bblks + hblks;
5557 		rhead_blk = blk_no;
5558 	}
5559 
5560  bread_err2:
5561 	xlog_put_bp(dbp);
5562  bread_err1:
5563 	xlog_put_bp(hbp);
5564 
5565 	/*
5566 	 * Submit buffers that have been added from the last record processed,
5567 	 * regardless of error status.
5568 	 */
5569 	if (!list_empty(&buffer_list))
5570 		error2 = xfs_buf_delwri_submit(&buffer_list);
5571 
5572 	if (error && first_bad)
5573 		*first_bad = rhead_blk;
5574 
5575 	/*
5576 	 * Transactions are freed at commit time but transactions without commit
5577 	 * records on disk are never committed. Free any that may be left in the
5578 	 * hash table.
5579 	 */
5580 	for (i = 0; i < XLOG_RHASH_SIZE; i++) {
5581 		struct hlist_node	*tmp;
5582 		struct xlog_recover	*trans;
5583 
5584 		hlist_for_each_entry_safe(trans, tmp, &rhash[i], r_list)
5585 			xlog_recover_free_trans(trans);
5586 	}
5587 
5588 	return error ? error : error2;
5589 }
5590 
5591 /*
5592  * Do the recovery of the log.  We actually do this in two phases.
5593  * The two passes are necessary in order to implement the function
5594  * of cancelling a record written into the log.  The first pass
5595  * determines those things which have been cancelled, and the
5596  * second pass replays log items normally except for those which
5597  * have been cancelled.  The handling of the replay and cancellations
5598  * takes place in the log item type specific routines.
5599  *
5600  * The table of items which have cancel records in the log is allocated
5601  * and freed at this level, since only here do we know when all of
5602  * the log recovery has been completed.
5603  */
5604 STATIC int
5605 xlog_do_log_recovery(
5606 	struct xlog	*log,
5607 	xfs_daddr_t	head_blk,
5608 	xfs_daddr_t	tail_blk)
5609 {
5610 	int		error, i;
5611 
5612 	ASSERT(head_blk != tail_blk);
5613 
5614 	/*
5615 	 * First do a pass to find all of the cancelled buf log items.
5616 	 * Store them in the buf_cancel_table for use in the second pass.
5617 	 */
5618 	log->l_buf_cancel_table = kmem_zalloc(XLOG_BC_TABLE_SIZE *
5619 						 sizeof(struct list_head),
5620 						 KM_SLEEP);
5621 	for (i = 0; i < XLOG_BC_TABLE_SIZE; i++)
5622 		INIT_LIST_HEAD(&log->l_buf_cancel_table[i]);
5623 
5624 	error = xlog_do_recovery_pass(log, head_blk, tail_blk,
5625 				      XLOG_RECOVER_PASS1, NULL);
5626 	if (error != 0) {
5627 		kmem_free(log->l_buf_cancel_table);
5628 		log->l_buf_cancel_table = NULL;
5629 		return error;
5630 	}
5631 	/*
5632 	 * Then do a second pass to actually recover the items in the log.
5633 	 * When it is complete free the table of buf cancel items.
5634 	 */
5635 	error = xlog_do_recovery_pass(log, head_blk, tail_blk,
5636 				      XLOG_RECOVER_PASS2, NULL);
5637 #ifdef DEBUG
5638 	if (!error) {
5639 		int	i;
5640 
5641 		for (i = 0; i < XLOG_BC_TABLE_SIZE; i++)
5642 			ASSERT(list_empty(&log->l_buf_cancel_table[i]));
5643 	}
5644 #endif	/* DEBUG */
5645 
5646 	kmem_free(log->l_buf_cancel_table);
5647 	log->l_buf_cancel_table = NULL;
5648 
5649 	return error;
5650 }
5651 
5652 /*
5653  * Do the actual recovery
5654  */
5655 STATIC int
5656 xlog_do_recover(
5657 	struct xlog	*log,
5658 	xfs_daddr_t	head_blk,
5659 	xfs_daddr_t	tail_blk)
5660 {
5661 	struct xfs_mount *mp = log->l_mp;
5662 	int		error;
5663 	xfs_buf_t	*bp;
5664 	xfs_sb_t	*sbp;
5665 
5666 	trace_xfs_log_recover(log, head_blk, tail_blk);
5667 
5668 	/*
5669 	 * First replay the images in the log.
5670 	 */
5671 	error = xlog_do_log_recovery(log, head_blk, tail_blk);
5672 	if (error)
5673 		return error;
5674 
5675 	/*
5676 	 * If IO errors happened during recovery, bail out.
5677 	 */
5678 	if (XFS_FORCED_SHUTDOWN(mp)) {
5679 		return -EIO;
5680 	}
5681 
5682 	/*
5683 	 * We now update the tail_lsn since much of the recovery has completed
5684 	 * and there may be space available to use.  If there were no extent
5685 	 * or iunlinks, we can free up the entire log and set the tail_lsn to
5686 	 * be the last_sync_lsn.  This was set in xlog_find_tail to be the
5687 	 * lsn of the last known good LR on disk.  If there are extent frees
5688 	 * or iunlinks they will have some entries in the AIL; so we look at
5689 	 * the AIL to determine how to set the tail_lsn.
5690 	 */
5691 	xlog_assign_tail_lsn(mp);
5692 
5693 	/*
5694 	 * Now that we've finished replaying all buffer and inode
5695 	 * updates, re-read in the superblock and reverify it.
5696 	 */
5697 	bp = xfs_getsb(mp, 0);
5698 	bp->b_flags &= ~(XBF_DONE | XBF_ASYNC);
5699 	ASSERT(!(bp->b_flags & XBF_WRITE));
5700 	bp->b_flags |= XBF_READ;
5701 	bp->b_ops = &xfs_sb_buf_ops;
5702 
5703 	error = xfs_buf_submit_wait(bp);
5704 	if (error) {
5705 		if (!XFS_FORCED_SHUTDOWN(mp)) {
5706 			xfs_buf_ioerror_alert(bp, __func__);
5707 			ASSERT(0);
5708 		}
5709 		xfs_buf_relse(bp);
5710 		return error;
5711 	}
5712 
5713 	/* Convert superblock from on-disk format */
5714 	sbp = &mp->m_sb;
5715 	xfs_sb_from_disk(sbp, XFS_BUF_TO_SBP(bp));
5716 	xfs_buf_relse(bp);
5717 
5718 	/* re-initialise in-core superblock and geometry structures */
5719 	xfs_reinit_percpu_counters(mp);
5720 	error = xfs_initialize_perag(mp, sbp->sb_agcount, &mp->m_maxagi);
5721 	if (error) {
5722 		xfs_warn(mp, "Failed post-recovery per-ag init: %d", error);
5723 		return error;
5724 	}
5725 	mp->m_alloc_set_aside = xfs_alloc_set_aside(mp);
5726 
5727 	xlog_recover_check_summary(log);
5728 
5729 	/* Normal transactions can now occur */
5730 	log->l_flags &= ~XLOG_ACTIVE_RECOVERY;
5731 	return 0;
5732 }
5733 
5734 /*
5735  * Perform recovery and re-initialize some log variables in xlog_find_tail.
5736  *
5737  * Return error or zero.
5738  */
5739 int
5740 xlog_recover(
5741 	struct xlog	*log)
5742 {
5743 	xfs_daddr_t	head_blk, tail_blk;
5744 	int		error;
5745 
5746 	/* find the tail of the log */
5747 	error = xlog_find_tail(log, &head_blk, &tail_blk);
5748 	if (error)
5749 		return error;
5750 
5751 	/*
5752 	 * The superblock was read before the log was available and thus the LSN
5753 	 * could not be verified. Check the superblock LSN against the current
5754 	 * LSN now that it's known.
5755 	 */
5756 	if (xfs_sb_version_hascrc(&log->l_mp->m_sb) &&
5757 	    !xfs_log_check_lsn(log->l_mp, log->l_mp->m_sb.sb_lsn))
5758 		return -EINVAL;
5759 
5760 	if (tail_blk != head_blk) {
5761 		/* There used to be a comment here:
5762 		 *
5763 		 * disallow recovery on read-only mounts.  note -- mount
5764 		 * checks for ENOSPC and turns it into an intelligent
5765 		 * error message.
5766 		 * ...but this is no longer true.  Now, unless you specify
5767 		 * NORECOVERY (in which case this function would never be
5768 		 * called), we just go ahead and recover.  We do this all
5769 		 * under the vfs layer, so we can get away with it unless
5770 		 * the device itself is read-only, in which case we fail.
5771 		 */
5772 		if ((error = xfs_dev_is_read_only(log->l_mp, "recovery"))) {
5773 			return error;
5774 		}
5775 
5776 		/*
5777 		 * Version 5 superblock log feature mask validation. We know the
5778 		 * log is dirty so check if there are any unknown log features
5779 		 * in what we need to recover. If there are unknown features
5780 		 * (e.g. unsupported transactions, then simply reject the
5781 		 * attempt at recovery before touching anything.
5782 		 */
5783 		if (XFS_SB_VERSION_NUM(&log->l_mp->m_sb) == XFS_SB_VERSION_5 &&
5784 		    xfs_sb_has_incompat_log_feature(&log->l_mp->m_sb,
5785 					XFS_SB_FEAT_INCOMPAT_LOG_UNKNOWN)) {
5786 			xfs_warn(log->l_mp,
5787 "Superblock has unknown incompatible log features (0x%x) enabled.",
5788 				(log->l_mp->m_sb.sb_features_log_incompat &
5789 					XFS_SB_FEAT_INCOMPAT_LOG_UNKNOWN));
5790 			xfs_warn(log->l_mp,
5791 "The log can not be fully and/or safely recovered by this kernel.");
5792 			xfs_warn(log->l_mp,
5793 "Please recover the log on a kernel that supports the unknown features.");
5794 			return -EINVAL;
5795 		}
5796 
5797 		/*
5798 		 * Delay log recovery if the debug hook is set. This is debug
5799 		 * instrumention to coordinate simulation of I/O failures with
5800 		 * log recovery.
5801 		 */
5802 		if (xfs_globals.log_recovery_delay) {
5803 			xfs_notice(log->l_mp,
5804 				"Delaying log recovery for %d seconds.",
5805 				xfs_globals.log_recovery_delay);
5806 			msleep(xfs_globals.log_recovery_delay * 1000);
5807 		}
5808 
5809 		xfs_notice(log->l_mp, "Starting recovery (logdev: %s)",
5810 				log->l_mp->m_logname ? log->l_mp->m_logname
5811 						     : "internal");
5812 
5813 		error = xlog_do_recover(log, head_blk, tail_blk);
5814 		log->l_flags |= XLOG_RECOVERY_NEEDED;
5815 	}
5816 	return error;
5817 }
5818 
5819 /*
5820  * In the first part of recovery we replay inodes and buffers and build
5821  * up the list of extent free items which need to be processed.  Here
5822  * we process the extent free items and clean up the on disk unlinked
5823  * inode lists.  This is separated from the first part of recovery so
5824  * that the root and real-time bitmap inodes can be read in from disk in
5825  * between the two stages.  This is necessary so that we can free space
5826  * in the real-time portion of the file system.
5827  */
5828 int
5829 xlog_recover_finish(
5830 	struct xlog	*log)
5831 {
5832 	/*
5833 	 * Now we're ready to do the transactions needed for the
5834 	 * rest of recovery.  Start with completing all the extent
5835 	 * free intent records and then process the unlinked inode
5836 	 * lists.  At this point, we essentially run in normal mode
5837 	 * except that we're still performing recovery actions
5838 	 * rather than accepting new requests.
5839 	 */
5840 	if (log->l_flags & XLOG_RECOVERY_NEEDED) {
5841 		int	error;
5842 		error = xlog_recover_process_intents(log);
5843 		if (error) {
5844 			xfs_alert(log->l_mp, "Failed to recover intents");
5845 			return error;
5846 		}
5847 
5848 		/*
5849 		 * Sync the log to get all the intents out of the AIL.
5850 		 * This isn't absolutely necessary, but it helps in
5851 		 * case the unlink transactions would have problems
5852 		 * pushing the intents out of the way.
5853 		 */
5854 		xfs_log_force(log->l_mp, XFS_LOG_SYNC);
5855 
5856 		xlog_recover_process_iunlinks(log);
5857 
5858 		xlog_recover_check_summary(log);
5859 
5860 		xfs_notice(log->l_mp, "Ending recovery (logdev: %s)",
5861 				log->l_mp->m_logname ? log->l_mp->m_logname
5862 						     : "internal");
5863 		log->l_flags &= ~XLOG_RECOVERY_NEEDED;
5864 	} else {
5865 		xfs_info(log->l_mp, "Ending clean mount");
5866 	}
5867 	return 0;
5868 }
5869 
5870 int
5871 xlog_recover_cancel(
5872 	struct xlog	*log)
5873 {
5874 	int		error = 0;
5875 
5876 	if (log->l_flags & XLOG_RECOVERY_NEEDED)
5877 		error = xlog_recover_cancel_intents(log);
5878 
5879 	return error;
5880 }
5881 
5882 #if defined(DEBUG)
5883 /*
5884  * Read all of the agf and agi counters and check that they
5885  * are consistent with the superblock counters.
5886  */
5887 STATIC void
5888 xlog_recover_check_summary(
5889 	struct xlog	*log)
5890 {
5891 	xfs_mount_t	*mp;
5892 	xfs_agf_t	*agfp;
5893 	xfs_buf_t	*agfbp;
5894 	xfs_buf_t	*agibp;
5895 	xfs_agnumber_t	agno;
5896 	uint64_t	freeblks;
5897 	uint64_t	itotal;
5898 	uint64_t	ifree;
5899 	int		error;
5900 
5901 	mp = log->l_mp;
5902 
5903 	freeblks = 0LL;
5904 	itotal = 0LL;
5905 	ifree = 0LL;
5906 	for (agno = 0; agno < mp->m_sb.sb_agcount; agno++) {
5907 		error = xfs_read_agf(mp, NULL, agno, 0, &agfbp);
5908 		if (error) {
5909 			xfs_alert(mp, "%s agf read failed agno %d error %d",
5910 						__func__, agno, error);
5911 		} else {
5912 			agfp = XFS_BUF_TO_AGF(agfbp);
5913 			freeblks += be32_to_cpu(agfp->agf_freeblks) +
5914 				    be32_to_cpu(agfp->agf_flcount);
5915 			xfs_buf_relse(agfbp);
5916 		}
5917 
5918 		error = xfs_read_agi(mp, NULL, agno, &agibp);
5919 		if (error) {
5920 			xfs_alert(mp, "%s agi read failed agno %d error %d",
5921 						__func__, agno, error);
5922 		} else {
5923 			struct xfs_agi	*agi = XFS_BUF_TO_AGI(agibp);
5924 
5925 			itotal += be32_to_cpu(agi->agi_count);
5926 			ifree += be32_to_cpu(agi->agi_freecount);
5927 			xfs_buf_relse(agibp);
5928 		}
5929 	}
5930 }
5931 #endif /* DEBUG */
5932