xref: /openbmc/linux/fs/xfs/xfs_log_recover.c (revision 80ecbd24)
1 /*
2  * Copyright (c) 2000-2006 Silicon Graphics, Inc.
3  * All Rights Reserved.
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU General Public License as
7  * published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it would be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write the Free Software Foundation,
16  * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
17  */
18 #include "xfs.h"
19 #include "xfs_fs.h"
20 #include "xfs_types.h"
21 #include "xfs_bit.h"
22 #include "xfs_log.h"
23 #include "xfs_inum.h"
24 #include "xfs_trans.h"
25 #include "xfs_sb.h"
26 #include "xfs_ag.h"
27 #include "xfs_mount.h"
28 #include "xfs_error.h"
29 #include "xfs_bmap_btree.h"
30 #include "xfs_alloc_btree.h"
31 #include "xfs_ialloc_btree.h"
32 #include "xfs_btree.h"
33 #include "xfs_dinode.h"
34 #include "xfs_inode.h"
35 #include "xfs_inode_item.h"
36 #include "xfs_alloc.h"
37 #include "xfs_ialloc.h"
38 #include "xfs_log_priv.h"
39 #include "xfs_buf_item.h"
40 #include "xfs_log_recover.h"
41 #include "xfs_extfree_item.h"
42 #include "xfs_trans_priv.h"
43 #include "xfs_quota.h"
44 #include "xfs_utils.h"
45 #include "xfs_cksum.h"
46 #include "xfs_trace.h"
47 #include "xfs_icache.h"
48 #include "xfs_icreate_item.h"
49 
50 /* Need all the magic numbers and buffer ops structures from these headers */
51 #include "xfs_symlink.h"
52 #include "xfs_da_btree.h"
53 #include "xfs_dir2_format.h"
54 #include "xfs_dir2_priv.h"
55 #include "xfs_attr_leaf.h"
56 #include "xfs_attr_remote.h"
57 
58 STATIC int
59 xlog_find_zeroed(
60 	struct xlog	*,
61 	xfs_daddr_t	*);
62 STATIC int
63 xlog_clear_stale_blocks(
64 	struct xlog	*,
65 	xfs_lsn_t);
66 #if defined(DEBUG)
67 STATIC void
68 xlog_recover_check_summary(
69 	struct xlog *);
70 #else
71 #define	xlog_recover_check_summary(log)
72 #endif
73 
74 /*
75  * This structure is used during recovery to record the buf log items which
76  * have been canceled and should not be replayed.
77  */
78 struct xfs_buf_cancel {
79 	xfs_daddr_t		bc_blkno;
80 	uint			bc_len;
81 	int			bc_refcount;
82 	struct list_head	bc_list;
83 };
84 
85 /*
86  * Sector aligned buffer routines for buffer create/read/write/access
87  */
88 
89 /*
90  * Verify the given count of basic blocks is valid number of blocks
91  * to specify for an operation involving the given XFS log buffer.
92  * Returns nonzero if the count is valid, 0 otherwise.
93  */
94 
95 static inline int
96 xlog_buf_bbcount_valid(
97 	struct xlog	*log,
98 	int		bbcount)
99 {
100 	return bbcount > 0 && bbcount <= log->l_logBBsize;
101 }
102 
103 /*
104  * Allocate a buffer to hold log data.  The buffer needs to be able
105  * to map to a range of nbblks basic blocks at any valid (basic
106  * block) offset within the log.
107  */
108 STATIC xfs_buf_t *
109 xlog_get_bp(
110 	struct xlog	*log,
111 	int		nbblks)
112 {
113 	struct xfs_buf	*bp;
114 
115 	if (!xlog_buf_bbcount_valid(log, nbblks)) {
116 		xfs_warn(log->l_mp, "Invalid block length (0x%x) for buffer",
117 			nbblks);
118 		XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_HIGH, log->l_mp);
119 		return NULL;
120 	}
121 
122 	/*
123 	 * We do log I/O in units of log sectors (a power-of-2
124 	 * multiple of the basic block size), so we round up the
125 	 * requested size to accommodate the basic blocks required
126 	 * for complete log sectors.
127 	 *
128 	 * In addition, the buffer may be used for a non-sector-
129 	 * aligned block offset, in which case an I/O of the
130 	 * requested size could extend beyond the end of the
131 	 * buffer.  If the requested size is only 1 basic block it
132 	 * will never straddle a sector boundary, so this won't be
133 	 * an issue.  Nor will this be a problem if the log I/O is
134 	 * done in basic blocks (sector size 1).  But otherwise we
135 	 * extend the buffer by one extra log sector to ensure
136 	 * there's space to accommodate this possibility.
137 	 */
138 	if (nbblks > 1 && log->l_sectBBsize > 1)
139 		nbblks += log->l_sectBBsize;
140 	nbblks = round_up(nbblks, log->l_sectBBsize);
141 
142 	bp = xfs_buf_get_uncached(log->l_mp->m_logdev_targp, nbblks, 0);
143 	if (bp)
144 		xfs_buf_unlock(bp);
145 	return bp;
146 }
147 
148 STATIC void
149 xlog_put_bp(
150 	xfs_buf_t	*bp)
151 {
152 	xfs_buf_free(bp);
153 }
154 
155 /*
156  * Return the address of the start of the given block number's data
157  * in a log buffer.  The buffer covers a log sector-aligned region.
158  */
159 STATIC xfs_caddr_t
160 xlog_align(
161 	struct xlog	*log,
162 	xfs_daddr_t	blk_no,
163 	int		nbblks,
164 	struct xfs_buf	*bp)
165 {
166 	xfs_daddr_t	offset = blk_no & ((xfs_daddr_t)log->l_sectBBsize - 1);
167 
168 	ASSERT(offset + nbblks <= bp->b_length);
169 	return bp->b_addr + BBTOB(offset);
170 }
171 
172 
173 /*
174  * nbblks should be uint, but oh well.  Just want to catch that 32-bit length.
175  */
176 STATIC int
177 xlog_bread_noalign(
178 	struct xlog	*log,
179 	xfs_daddr_t	blk_no,
180 	int		nbblks,
181 	struct xfs_buf	*bp)
182 {
183 	int		error;
184 
185 	if (!xlog_buf_bbcount_valid(log, nbblks)) {
186 		xfs_warn(log->l_mp, "Invalid block length (0x%x) for buffer",
187 			nbblks);
188 		XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_HIGH, log->l_mp);
189 		return EFSCORRUPTED;
190 	}
191 
192 	blk_no = round_down(blk_no, log->l_sectBBsize);
193 	nbblks = round_up(nbblks, log->l_sectBBsize);
194 
195 	ASSERT(nbblks > 0);
196 	ASSERT(nbblks <= bp->b_length);
197 
198 	XFS_BUF_SET_ADDR(bp, log->l_logBBstart + blk_no);
199 	XFS_BUF_READ(bp);
200 	bp->b_io_length = nbblks;
201 	bp->b_error = 0;
202 
203 	xfsbdstrat(log->l_mp, bp);
204 	error = xfs_buf_iowait(bp);
205 	if (error)
206 		xfs_buf_ioerror_alert(bp, __func__);
207 	return error;
208 }
209 
210 STATIC int
211 xlog_bread(
212 	struct xlog	*log,
213 	xfs_daddr_t	blk_no,
214 	int		nbblks,
215 	struct xfs_buf	*bp,
216 	xfs_caddr_t	*offset)
217 {
218 	int		error;
219 
220 	error = xlog_bread_noalign(log, blk_no, nbblks, bp);
221 	if (error)
222 		return error;
223 
224 	*offset = xlog_align(log, blk_no, nbblks, bp);
225 	return 0;
226 }
227 
228 /*
229  * Read at an offset into the buffer. Returns with the buffer in it's original
230  * state regardless of the result of the read.
231  */
232 STATIC int
233 xlog_bread_offset(
234 	struct xlog	*log,
235 	xfs_daddr_t	blk_no,		/* block to read from */
236 	int		nbblks,		/* blocks to read */
237 	struct xfs_buf	*bp,
238 	xfs_caddr_t	offset)
239 {
240 	xfs_caddr_t	orig_offset = bp->b_addr;
241 	int		orig_len = BBTOB(bp->b_length);
242 	int		error, error2;
243 
244 	error = xfs_buf_associate_memory(bp, offset, BBTOB(nbblks));
245 	if (error)
246 		return error;
247 
248 	error = xlog_bread_noalign(log, blk_no, nbblks, bp);
249 
250 	/* must reset buffer pointer even on error */
251 	error2 = xfs_buf_associate_memory(bp, orig_offset, orig_len);
252 	if (error)
253 		return error;
254 	return error2;
255 }
256 
257 /*
258  * Write out the buffer at the given block for the given number of blocks.
259  * The buffer is kept locked across the write and is returned locked.
260  * This can only be used for synchronous log writes.
261  */
262 STATIC int
263 xlog_bwrite(
264 	struct xlog	*log,
265 	xfs_daddr_t	blk_no,
266 	int		nbblks,
267 	struct xfs_buf	*bp)
268 {
269 	int		error;
270 
271 	if (!xlog_buf_bbcount_valid(log, nbblks)) {
272 		xfs_warn(log->l_mp, "Invalid block length (0x%x) for buffer",
273 			nbblks);
274 		XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_HIGH, log->l_mp);
275 		return EFSCORRUPTED;
276 	}
277 
278 	blk_no = round_down(blk_no, log->l_sectBBsize);
279 	nbblks = round_up(nbblks, log->l_sectBBsize);
280 
281 	ASSERT(nbblks > 0);
282 	ASSERT(nbblks <= bp->b_length);
283 
284 	XFS_BUF_SET_ADDR(bp, log->l_logBBstart + blk_no);
285 	XFS_BUF_ZEROFLAGS(bp);
286 	xfs_buf_hold(bp);
287 	xfs_buf_lock(bp);
288 	bp->b_io_length = nbblks;
289 	bp->b_error = 0;
290 
291 	error = xfs_bwrite(bp);
292 	if (error)
293 		xfs_buf_ioerror_alert(bp, __func__);
294 	xfs_buf_relse(bp);
295 	return error;
296 }
297 
298 #ifdef DEBUG
299 /*
300  * dump debug superblock and log record information
301  */
302 STATIC void
303 xlog_header_check_dump(
304 	xfs_mount_t		*mp,
305 	xlog_rec_header_t	*head)
306 {
307 	xfs_debug(mp, "%s:  SB : uuid = %pU, fmt = %d\n",
308 		__func__, &mp->m_sb.sb_uuid, XLOG_FMT);
309 	xfs_debug(mp, "    log : uuid = %pU, fmt = %d\n",
310 		&head->h_fs_uuid, be32_to_cpu(head->h_fmt));
311 }
312 #else
313 #define xlog_header_check_dump(mp, head)
314 #endif
315 
316 /*
317  * check log record header for recovery
318  */
319 STATIC int
320 xlog_header_check_recover(
321 	xfs_mount_t		*mp,
322 	xlog_rec_header_t	*head)
323 {
324 	ASSERT(head->h_magicno == cpu_to_be32(XLOG_HEADER_MAGIC_NUM));
325 
326 	/*
327 	 * IRIX doesn't write the h_fmt field and leaves it zeroed
328 	 * (XLOG_FMT_UNKNOWN). This stops us from trying to recover
329 	 * a dirty log created in IRIX.
330 	 */
331 	if (unlikely(head->h_fmt != cpu_to_be32(XLOG_FMT))) {
332 		xfs_warn(mp,
333 	"dirty log written in incompatible format - can't recover");
334 		xlog_header_check_dump(mp, head);
335 		XFS_ERROR_REPORT("xlog_header_check_recover(1)",
336 				 XFS_ERRLEVEL_HIGH, mp);
337 		return XFS_ERROR(EFSCORRUPTED);
338 	} else if (unlikely(!uuid_equal(&mp->m_sb.sb_uuid, &head->h_fs_uuid))) {
339 		xfs_warn(mp,
340 	"dirty log entry has mismatched uuid - can't recover");
341 		xlog_header_check_dump(mp, head);
342 		XFS_ERROR_REPORT("xlog_header_check_recover(2)",
343 				 XFS_ERRLEVEL_HIGH, mp);
344 		return XFS_ERROR(EFSCORRUPTED);
345 	}
346 	return 0;
347 }
348 
349 /*
350  * read the head block of the log and check the header
351  */
352 STATIC int
353 xlog_header_check_mount(
354 	xfs_mount_t		*mp,
355 	xlog_rec_header_t	*head)
356 {
357 	ASSERT(head->h_magicno == cpu_to_be32(XLOG_HEADER_MAGIC_NUM));
358 
359 	if (uuid_is_nil(&head->h_fs_uuid)) {
360 		/*
361 		 * IRIX doesn't write the h_fs_uuid or h_fmt fields. If
362 		 * h_fs_uuid is nil, we assume this log was last mounted
363 		 * by IRIX and continue.
364 		 */
365 		xfs_warn(mp, "nil uuid in log - IRIX style log");
366 	} else if (unlikely(!uuid_equal(&mp->m_sb.sb_uuid, &head->h_fs_uuid))) {
367 		xfs_warn(mp, "log has mismatched uuid - can't recover");
368 		xlog_header_check_dump(mp, head);
369 		XFS_ERROR_REPORT("xlog_header_check_mount",
370 				 XFS_ERRLEVEL_HIGH, mp);
371 		return XFS_ERROR(EFSCORRUPTED);
372 	}
373 	return 0;
374 }
375 
376 STATIC void
377 xlog_recover_iodone(
378 	struct xfs_buf	*bp)
379 {
380 	if (bp->b_error) {
381 		/*
382 		 * We're not going to bother about retrying
383 		 * this during recovery. One strike!
384 		 */
385 		xfs_buf_ioerror_alert(bp, __func__);
386 		xfs_force_shutdown(bp->b_target->bt_mount,
387 					SHUTDOWN_META_IO_ERROR);
388 	}
389 	bp->b_iodone = NULL;
390 	xfs_buf_ioend(bp, 0);
391 }
392 
393 /*
394  * This routine finds (to an approximation) the first block in the physical
395  * log which contains the given cycle.  It uses a binary search algorithm.
396  * Note that the algorithm can not be perfect because the disk will not
397  * necessarily be perfect.
398  */
399 STATIC int
400 xlog_find_cycle_start(
401 	struct xlog	*log,
402 	struct xfs_buf	*bp,
403 	xfs_daddr_t	first_blk,
404 	xfs_daddr_t	*last_blk,
405 	uint		cycle)
406 {
407 	xfs_caddr_t	offset;
408 	xfs_daddr_t	mid_blk;
409 	xfs_daddr_t	end_blk;
410 	uint		mid_cycle;
411 	int		error;
412 
413 	end_blk = *last_blk;
414 	mid_blk = BLK_AVG(first_blk, end_blk);
415 	while (mid_blk != first_blk && mid_blk != end_blk) {
416 		error = xlog_bread(log, mid_blk, 1, bp, &offset);
417 		if (error)
418 			return error;
419 		mid_cycle = xlog_get_cycle(offset);
420 		if (mid_cycle == cycle)
421 			end_blk = mid_blk;   /* last_half_cycle == mid_cycle */
422 		else
423 			first_blk = mid_blk; /* first_half_cycle == mid_cycle */
424 		mid_blk = BLK_AVG(first_blk, end_blk);
425 	}
426 	ASSERT((mid_blk == first_blk && mid_blk+1 == end_blk) ||
427 	       (mid_blk == end_blk && mid_blk-1 == first_blk));
428 
429 	*last_blk = end_blk;
430 
431 	return 0;
432 }
433 
434 /*
435  * Check that a range of blocks does not contain stop_on_cycle_no.
436  * Fill in *new_blk with the block offset where such a block is
437  * found, or with -1 (an invalid block number) if there is no such
438  * block in the range.  The scan needs to occur from front to back
439  * and the pointer into the region must be updated since a later
440  * routine will need to perform another test.
441  */
442 STATIC int
443 xlog_find_verify_cycle(
444 	struct xlog	*log,
445 	xfs_daddr_t	start_blk,
446 	int		nbblks,
447 	uint		stop_on_cycle_no,
448 	xfs_daddr_t	*new_blk)
449 {
450 	xfs_daddr_t	i, j;
451 	uint		cycle;
452 	xfs_buf_t	*bp;
453 	xfs_daddr_t	bufblks;
454 	xfs_caddr_t	buf = NULL;
455 	int		error = 0;
456 
457 	/*
458 	 * Greedily allocate a buffer big enough to handle the full
459 	 * range of basic blocks we'll be examining.  If that fails,
460 	 * try a smaller size.  We need to be able to read at least
461 	 * a log sector, or we're out of luck.
462 	 */
463 	bufblks = 1 << ffs(nbblks);
464 	while (bufblks > log->l_logBBsize)
465 		bufblks >>= 1;
466 	while (!(bp = xlog_get_bp(log, bufblks))) {
467 		bufblks >>= 1;
468 		if (bufblks < log->l_sectBBsize)
469 			return ENOMEM;
470 	}
471 
472 	for (i = start_blk; i < start_blk + nbblks; i += bufblks) {
473 		int	bcount;
474 
475 		bcount = min(bufblks, (start_blk + nbblks - i));
476 
477 		error = xlog_bread(log, i, bcount, bp, &buf);
478 		if (error)
479 			goto out;
480 
481 		for (j = 0; j < bcount; j++) {
482 			cycle = xlog_get_cycle(buf);
483 			if (cycle == stop_on_cycle_no) {
484 				*new_blk = i+j;
485 				goto out;
486 			}
487 
488 			buf += BBSIZE;
489 		}
490 	}
491 
492 	*new_blk = -1;
493 
494 out:
495 	xlog_put_bp(bp);
496 	return error;
497 }
498 
499 /*
500  * Potentially backup over partial log record write.
501  *
502  * In the typical case, last_blk is the number of the block directly after
503  * a good log record.  Therefore, we subtract one to get the block number
504  * of the last block in the given buffer.  extra_bblks contains the number
505  * of blocks we would have read on a previous read.  This happens when the
506  * last log record is split over the end of the physical log.
507  *
508  * extra_bblks is the number of blocks potentially verified on a previous
509  * call to this routine.
510  */
511 STATIC int
512 xlog_find_verify_log_record(
513 	struct xlog		*log,
514 	xfs_daddr_t		start_blk,
515 	xfs_daddr_t		*last_blk,
516 	int			extra_bblks)
517 {
518 	xfs_daddr_t		i;
519 	xfs_buf_t		*bp;
520 	xfs_caddr_t		offset = NULL;
521 	xlog_rec_header_t	*head = NULL;
522 	int			error = 0;
523 	int			smallmem = 0;
524 	int			num_blks = *last_blk - start_blk;
525 	int			xhdrs;
526 
527 	ASSERT(start_blk != 0 || *last_blk != start_blk);
528 
529 	if (!(bp = xlog_get_bp(log, num_blks))) {
530 		if (!(bp = xlog_get_bp(log, 1)))
531 			return ENOMEM;
532 		smallmem = 1;
533 	} else {
534 		error = xlog_bread(log, start_blk, num_blks, bp, &offset);
535 		if (error)
536 			goto out;
537 		offset += ((num_blks - 1) << BBSHIFT);
538 	}
539 
540 	for (i = (*last_blk) - 1; i >= 0; i--) {
541 		if (i < start_blk) {
542 			/* valid log record not found */
543 			xfs_warn(log->l_mp,
544 		"Log inconsistent (didn't find previous header)");
545 			ASSERT(0);
546 			error = XFS_ERROR(EIO);
547 			goto out;
548 		}
549 
550 		if (smallmem) {
551 			error = xlog_bread(log, i, 1, bp, &offset);
552 			if (error)
553 				goto out;
554 		}
555 
556 		head = (xlog_rec_header_t *)offset;
557 
558 		if (head->h_magicno == cpu_to_be32(XLOG_HEADER_MAGIC_NUM))
559 			break;
560 
561 		if (!smallmem)
562 			offset -= BBSIZE;
563 	}
564 
565 	/*
566 	 * We hit the beginning of the physical log & still no header.  Return
567 	 * to caller.  If caller can handle a return of -1, then this routine
568 	 * will be called again for the end of the physical log.
569 	 */
570 	if (i == -1) {
571 		error = -1;
572 		goto out;
573 	}
574 
575 	/*
576 	 * We have the final block of the good log (the first block
577 	 * of the log record _before_ the head. So we check the uuid.
578 	 */
579 	if ((error = xlog_header_check_mount(log->l_mp, head)))
580 		goto out;
581 
582 	/*
583 	 * We may have found a log record header before we expected one.
584 	 * last_blk will be the 1st block # with a given cycle #.  We may end
585 	 * up reading an entire log record.  In this case, we don't want to
586 	 * reset last_blk.  Only when last_blk points in the middle of a log
587 	 * record do we update last_blk.
588 	 */
589 	if (xfs_sb_version_haslogv2(&log->l_mp->m_sb)) {
590 		uint	h_size = be32_to_cpu(head->h_size);
591 
592 		xhdrs = h_size / XLOG_HEADER_CYCLE_SIZE;
593 		if (h_size % XLOG_HEADER_CYCLE_SIZE)
594 			xhdrs++;
595 	} else {
596 		xhdrs = 1;
597 	}
598 
599 	if (*last_blk - i + extra_bblks !=
600 	    BTOBB(be32_to_cpu(head->h_len)) + xhdrs)
601 		*last_blk = i;
602 
603 out:
604 	xlog_put_bp(bp);
605 	return error;
606 }
607 
608 /*
609  * Head is defined to be the point of the log where the next log write
610  * write could go.  This means that incomplete LR writes at the end are
611  * eliminated when calculating the head.  We aren't guaranteed that previous
612  * LR have complete transactions.  We only know that a cycle number of
613  * current cycle number -1 won't be present in the log if we start writing
614  * from our current block number.
615  *
616  * last_blk contains the block number of the first block with a given
617  * cycle number.
618  *
619  * Return: zero if normal, non-zero if error.
620  */
621 STATIC int
622 xlog_find_head(
623 	struct xlog	*log,
624 	xfs_daddr_t	*return_head_blk)
625 {
626 	xfs_buf_t	*bp;
627 	xfs_caddr_t	offset;
628 	xfs_daddr_t	new_blk, first_blk, start_blk, last_blk, head_blk;
629 	int		num_scan_bblks;
630 	uint		first_half_cycle, last_half_cycle;
631 	uint		stop_on_cycle;
632 	int		error, log_bbnum = log->l_logBBsize;
633 
634 	/* Is the end of the log device zeroed? */
635 	if ((error = xlog_find_zeroed(log, &first_blk)) == -1) {
636 		*return_head_blk = first_blk;
637 
638 		/* Is the whole lot zeroed? */
639 		if (!first_blk) {
640 			/* Linux XFS shouldn't generate totally zeroed logs -
641 			 * mkfs etc write a dummy unmount record to a fresh
642 			 * log so we can store the uuid in there
643 			 */
644 			xfs_warn(log->l_mp, "totally zeroed log");
645 		}
646 
647 		return 0;
648 	} else if (error) {
649 		xfs_warn(log->l_mp, "empty log check failed");
650 		return error;
651 	}
652 
653 	first_blk = 0;			/* get cycle # of 1st block */
654 	bp = xlog_get_bp(log, 1);
655 	if (!bp)
656 		return ENOMEM;
657 
658 	error = xlog_bread(log, 0, 1, bp, &offset);
659 	if (error)
660 		goto bp_err;
661 
662 	first_half_cycle = xlog_get_cycle(offset);
663 
664 	last_blk = head_blk = log_bbnum - 1;	/* get cycle # of last block */
665 	error = xlog_bread(log, last_blk, 1, bp, &offset);
666 	if (error)
667 		goto bp_err;
668 
669 	last_half_cycle = xlog_get_cycle(offset);
670 	ASSERT(last_half_cycle != 0);
671 
672 	/*
673 	 * If the 1st half cycle number is equal to the last half cycle number,
674 	 * then the entire log is stamped with the same cycle number.  In this
675 	 * case, head_blk can't be set to zero (which makes sense).  The below
676 	 * math doesn't work out properly with head_blk equal to zero.  Instead,
677 	 * we set it to log_bbnum which is an invalid block number, but this
678 	 * value makes the math correct.  If head_blk doesn't changed through
679 	 * all the tests below, *head_blk is set to zero at the very end rather
680 	 * than log_bbnum.  In a sense, log_bbnum and zero are the same block
681 	 * in a circular file.
682 	 */
683 	if (first_half_cycle == last_half_cycle) {
684 		/*
685 		 * In this case we believe that the entire log should have
686 		 * cycle number last_half_cycle.  We need to scan backwards
687 		 * from the end verifying that there are no holes still
688 		 * containing last_half_cycle - 1.  If we find such a hole,
689 		 * then the start of that hole will be the new head.  The
690 		 * simple case looks like
691 		 *        x | x ... | x - 1 | x
692 		 * Another case that fits this picture would be
693 		 *        x | x + 1 | x ... | x
694 		 * In this case the head really is somewhere at the end of the
695 		 * log, as one of the latest writes at the beginning was
696 		 * incomplete.
697 		 * One more case is
698 		 *        x | x + 1 | x ... | x - 1 | x
699 		 * This is really the combination of the above two cases, and
700 		 * the head has to end up at the start of the x-1 hole at the
701 		 * end of the log.
702 		 *
703 		 * In the 256k log case, we will read from the beginning to the
704 		 * end of the log and search for cycle numbers equal to x-1.
705 		 * We don't worry about the x+1 blocks that we encounter,
706 		 * because we know that they cannot be the head since the log
707 		 * started with x.
708 		 */
709 		head_blk = log_bbnum;
710 		stop_on_cycle = last_half_cycle - 1;
711 	} else {
712 		/*
713 		 * In this case we want to find the first block with cycle
714 		 * number matching last_half_cycle.  We expect the log to be
715 		 * some variation on
716 		 *        x + 1 ... | x ... | x
717 		 * The first block with cycle number x (last_half_cycle) will
718 		 * be where the new head belongs.  First we do a binary search
719 		 * for the first occurrence of last_half_cycle.  The binary
720 		 * search may not be totally accurate, so then we scan back
721 		 * from there looking for occurrences of last_half_cycle before
722 		 * us.  If that backwards scan wraps around the beginning of
723 		 * the log, then we look for occurrences of last_half_cycle - 1
724 		 * at the end of the log.  The cases we're looking for look
725 		 * like
726 		 *                               v binary search stopped here
727 		 *        x + 1 ... | x | x + 1 | x ... | x
728 		 *                   ^ but we want to locate this spot
729 		 * or
730 		 *        <---------> less than scan distance
731 		 *        x + 1 ... | x ... | x - 1 | x
732 		 *                           ^ we want to locate this spot
733 		 */
734 		stop_on_cycle = last_half_cycle;
735 		if ((error = xlog_find_cycle_start(log, bp, first_blk,
736 						&head_blk, last_half_cycle)))
737 			goto bp_err;
738 	}
739 
740 	/*
741 	 * Now validate the answer.  Scan back some number of maximum possible
742 	 * blocks and make sure each one has the expected cycle number.  The
743 	 * maximum is determined by the total possible amount of buffering
744 	 * in the in-core log.  The following number can be made tighter if
745 	 * we actually look at the block size of the filesystem.
746 	 */
747 	num_scan_bblks = XLOG_TOTAL_REC_SHIFT(log);
748 	if (head_blk >= num_scan_bblks) {
749 		/*
750 		 * We are guaranteed that the entire check can be performed
751 		 * in one buffer.
752 		 */
753 		start_blk = head_blk - num_scan_bblks;
754 		if ((error = xlog_find_verify_cycle(log,
755 						start_blk, num_scan_bblks,
756 						stop_on_cycle, &new_blk)))
757 			goto bp_err;
758 		if (new_blk != -1)
759 			head_blk = new_blk;
760 	} else {		/* need to read 2 parts of log */
761 		/*
762 		 * We are going to scan backwards in the log in two parts.
763 		 * First we scan the physical end of the log.  In this part
764 		 * of the log, we are looking for blocks with cycle number
765 		 * last_half_cycle - 1.
766 		 * If we find one, then we know that the log starts there, as
767 		 * we've found a hole that didn't get written in going around
768 		 * the end of the physical log.  The simple case for this is
769 		 *        x + 1 ... | x ... | x - 1 | x
770 		 *        <---------> less than scan distance
771 		 * If all of the blocks at the end of the log have cycle number
772 		 * last_half_cycle, then we check the blocks at the start of
773 		 * the log looking for occurrences of last_half_cycle.  If we
774 		 * find one, then our current estimate for the location of the
775 		 * first occurrence of last_half_cycle is wrong and we move
776 		 * back to the hole we've found.  This case looks like
777 		 *        x + 1 ... | x | x + 1 | x ...
778 		 *                               ^ binary search stopped here
779 		 * Another case we need to handle that only occurs in 256k
780 		 * logs is
781 		 *        x + 1 ... | x ... | x+1 | x ...
782 		 *                   ^ binary search stops here
783 		 * In a 256k log, the scan at the end of the log will see the
784 		 * x + 1 blocks.  We need to skip past those since that is
785 		 * certainly not the head of the log.  By searching for
786 		 * last_half_cycle-1 we accomplish that.
787 		 */
788 		ASSERT(head_blk <= INT_MAX &&
789 			(xfs_daddr_t) num_scan_bblks >= head_blk);
790 		start_blk = log_bbnum - (num_scan_bblks - head_blk);
791 		if ((error = xlog_find_verify_cycle(log, start_blk,
792 					num_scan_bblks - (int)head_blk,
793 					(stop_on_cycle - 1), &new_blk)))
794 			goto bp_err;
795 		if (new_blk != -1) {
796 			head_blk = new_blk;
797 			goto validate_head;
798 		}
799 
800 		/*
801 		 * Scan beginning of log now.  The last part of the physical
802 		 * log is good.  This scan needs to verify that it doesn't find
803 		 * the last_half_cycle.
804 		 */
805 		start_blk = 0;
806 		ASSERT(head_blk <= INT_MAX);
807 		if ((error = xlog_find_verify_cycle(log,
808 					start_blk, (int)head_blk,
809 					stop_on_cycle, &new_blk)))
810 			goto bp_err;
811 		if (new_blk != -1)
812 			head_blk = new_blk;
813 	}
814 
815 validate_head:
816 	/*
817 	 * Now we need to make sure head_blk is not pointing to a block in
818 	 * the middle of a log record.
819 	 */
820 	num_scan_bblks = XLOG_REC_SHIFT(log);
821 	if (head_blk >= num_scan_bblks) {
822 		start_blk = head_blk - num_scan_bblks; /* don't read head_blk */
823 
824 		/* start ptr at last block ptr before head_blk */
825 		if ((error = xlog_find_verify_log_record(log, start_blk,
826 							&head_blk, 0)) == -1) {
827 			error = XFS_ERROR(EIO);
828 			goto bp_err;
829 		} else if (error)
830 			goto bp_err;
831 	} else {
832 		start_blk = 0;
833 		ASSERT(head_blk <= INT_MAX);
834 		if ((error = xlog_find_verify_log_record(log, start_blk,
835 							&head_blk, 0)) == -1) {
836 			/* We hit the beginning of the log during our search */
837 			start_blk = log_bbnum - (num_scan_bblks - head_blk);
838 			new_blk = log_bbnum;
839 			ASSERT(start_blk <= INT_MAX &&
840 				(xfs_daddr_t) log_bbnum-start_blk >= 0);
841 			ASSERT(head_blk <= INT_MAX);
842 			if ((error = xlog_find_verify_log_record(log,
843 							start_blk, &new_blk,
844 							(int)head_blk)) == -1) {
845 				error = XFS_ERROR(EIO);
846 				goto bp_err;
847 			} else if (error)
848 				goto bp_err;
849 			if (new_blk != log_bbnum)
850 				head_blk = new_blk;
851 		} else if (error)
852 			goto bp_err;
853 	}
854 
855 	xlog_put_bp(bp);
856 	if (head_blk == log_bbnum)
857 		*return_head_blk = 0;
858 	else
859 		*return_head_blk = head_blk;
860 	/*
861 	 * When returning here, we have a good block number.  Bad block
862 	 * means that during a previous crash, we didn't have a clean break
863 	 * from cycle number N to cycle number N-1.  In this case, we need
864 	 * to find the first block with cycle number N-1.
865 	 */
866 	return 0;
867 
868  bp_err:
869 	xlog_put_bp(bp);
870 
871 	if (error)
872 		xfs_warn(log->l_mp, "failed to find log head");
873 	return error;
874 }
875 
876 /*
877  * Find the sync block number or the tail of the log.
878  *
879  * This will be the block number of the last record to have its
880  * associated buffers synced to disk.  Every log record header has
881  * a sync lsn embedded in it.  LSNs hold block numbers, so it is easy
882  * to get a sync block number.  The only concern is to figure out which
883  * log record header to believe.
884  *
885  * The following algorithm uses the log record header with the largest
886  * lsn.  The entire log record does not need to be valid.  We only care
887  * that the header is valid.
888  *
889  * We could speed up search by using current head_blk buffer, but it is not
890  * available.
891  */
892 STATIC int
893 xlog_find_tail(
894 	struct xlog		*log,
895 	xfs_daddr_t		*head_blk,
896 	xfs_daddr_t		*tail_blk)
897 {
898 	xlog_rec_header_t	*rhead;
899 	xlog_op_header_t	*op_head;
900 	xfs_caddr_t		offset = NULL;
901 	xfs_buf_t		*bp;
902 	int			error, i, found;
903 	xfs_daddr_t		umount_data_blk;
904 	xfs_daddr_t		after_umount_blk;
905 	xfs_lsn_t		tail_lsn;
906 	int			hblks;
907 
908 	found = 0;
909 
910 	/*
911 	 * Find previous log record
912 	 */
913 	if ((error = xlog_find_head(log, head_blk)))
914 		return error;
915 
916 	bp = xlog_get_bp(log, 1);
917 	if (!bp)
918 		return ENOMEM;
919 	if (*head_blk == 0) {				/* special case */
920 		error = xlog_bread(log, 0, 1, bp, &offset);
921 		if (error)
922 			goto done;
923 
924 		if (xlog_get_cycle(offset) == 0) {
925 			*tail_blk = 0;
926 			/* leave all other log inited values alone */
927 			goto done;
928 		}
929 	}
930 
931 	/*
932 	 * Search backwards looking for log record header block
933 	 */
934 	ASSERT(*head_blk < INT_MAX);
935 	for (i = (int)(*head_blk) - 1; i >= 0; i--) {
936 		error = xlog_bread(log, i, 1, bp, &offset);
937 		if (error)
938 			goto done;
939 
940 		if (*(__be32 *)offset == cpu_to_be32(XLOG_HEADER_MAGIC_NUM)) {
941 			found = 1;
942 			break;
943 		}
944 	}
945 	/*
946 	 * If we haven't found the log record header block, start looking
947 	 * again from the end of the physical log.  XXXmiken: There should be
948 	 * a check here to make sure we didn't search more than N blocks in
949 	 * the previous code.
950 	 */
951 	if (!found) {
952 		for (i = log->l_logBBsize - 1; i >= (int)(*head_blk); i--) {
953 			error = xlog_bread(log, i, 1, bp, &offset);
954 			if (error)
955 				goto done;
956 
957 			if (*(__be32 *)offset ==
958 			    cpu_to_be32(XLOG_HEADER_MAGIC_NUM)) {
959 				found = 2;
960 				break;
961 			}
962 		}
963 	}
964 	if (!found) {
965 		xfs_warn(log->l_mp, "%s: couldn't find sync record", __func__);
966 		ASSERT(0);
967 		return XFS_ERROR(EIO);
968 	}
969 
970 	/* find blk_no of tail of log */
971 	rhead = (xlog_rec_header_t *)offset;
972 	*tail_blk = BLOCK_LSN(be64_to_cpu(rhead->h_tail_lsn));
973 
974 	/*
975 	 * Reset log values according to the state of the log when we
976 	 * crashed.  In the case where head_blk == 0, we bump curr_cycle
977 	 * one because the next write starts a new cycle rather than
978 	 * continuing the cycle of the last good log record.  At this
979 	 * point we have guaranteed that all partial log records have been
980 	 * accounted for.  Therefore, we know that the last good log record
981 	 * written was complete and ended exactly on the end boundary
982 	 * of the physical log.
983 	 */
984 	log->l_prev_block = i;
985 	log->l_curr_block = (int)*head_blk;
986 	log->l_curr_cycle = be32_to_cpu(rhead->h_cycle);
987 	if (found == 2)
988 		log->l_curr_cycle++;
989 	atomic64_set(&log->l_tail_lsn, be64_to_cpu(rhead->h_tail_lsn));
990 	atomic64_set(&log->l_last_sync_lsn, be64_to_cpu(rhead->h_lsn));
991 	xlog_assign_grant_head(&log->l_reserve_head.grant, log->l_curr_cycle,
992 					BBTOB(log->l_curr_block));
993 	xlog_assign_grant_head(&log->l_write_head.grant, log->l_curr_cycle,
994 					BBTOB(log->l_curr_block));
995 
996 	/*
997 	 * Look for unmount record.  If we find it, then we know there
998 	 * was a clean unmount.  Since 'i' could be the last block in
999 	 * the physical log, we convert to a log block before comparing
1000 	 * to the head_blk.
1001 	 *
1002 	 * Save the current tail lsn to use to pass to
1003 	 * xlog_clear_stale_blocks() below.  We won't want to clear the
1004 	 * unmount record if there is one, so we pass the lsn of the
1005 	 * unmount record rather than the block after it.
1006 	 */
1007 	if (xfs_sb_version_haslogv2(&log->l_mp->m_sb)) {
1008 		int	h_size = be32_to_cpu(rhead->h_size);
1009 		int	h_version = be32_to_cpu(rhead->h_version);
1010 
1011 		if ((h_version & XLOG_VERSION_2) &&
1012 		    (h_size > XLOG_HEADER_CYCLE_SIZE)) {
1013 			hblks = h_size / XLOG_HEADER_CYCLE_SIZE;
1014 			if (h_size % XLOG_HEADER_CYCLE_SIZE)
1015 				hblks++;
1016 		} else {
1017 			hblks = 1;
1018 		}
1019 	} else {
1020 		hblks = 1;
1021 	}
1022 	after_umount_blk = (i + hblks + (int)
1023 		BTOBB(be32_to_cpu(rhead->h_len))) % log->l_logBBsize;
1024 	tail_lsn = atomic64_read(&log->l_tail_lsn);
1025 	if (*head_blk == after_umount_blk &&
1026 	    be32_to_cpu(rhead->h_num_logops) == 1) {
1027 		umount_data_blk = (i + hblks) % log->l_logBBsize;
1028 		error = xlog_bread(log, umount_data_blk, 1, bp, &offset);
1029 		if (error)
1030 			goto done;
1031 
1032 		op_head = (xlog_op_header_t *)offset;
1033 		if (op_head->oh_flags & XLOG_UNMOUNT_TRANS) {
1034 			/*
1035 			 * Set tail and last sync so that newly written
1036 			 * log records will point recovery to after the
1037 			 * current unmount record.
1038 			 */
1039 			xlog_assign_atomic_lsn(&log->l_tail_lsn,
1040 					log->l_curr_cycle, after_umount_blk);
1041 			xlog_assign_atomic_lsn(&log->l_last_sync_lsn,
1042 					log->l_curr_cycle, after_umount_blk);
1043 			*tail_blk = after_umount_blk;
1044 
1045 			/*
1046 			 * Note that the unmount was clean. If the unmount
1047 			 * was not clean, we need to know this to rebuild the
1048 			 * superblock counters from the perag headers if we
1049 			 * have a filesystem using non-persistent counters.
1050 			 */
1051 			log->l_mp->m_flags |= XFS_MOUNT_WAS_CLEAN;
1052 		}
1053 	}
1054 
1055 	/*
1056 	 * Make sure that there are no blocks in front of the head
1057 	 * with the same cycle number as the head.  This can happen
1058 	 * because we allow multiple outstanding log writes concurrently,
1059 	 * and the later writes might make it out before earlier ones.
1060 	 *
1061 	 * We use the lsn from before modifying it so that we'll never
1062 	 * overwrite the unmount record after a clean unmount.
1063 	 *
1064 	 * Do this only if we are going to recover the filesystem
1065 	 *
1066 	 * NOTE: This used to say "if (!readonly)"
1067 	 * However on Linux, we can & do recover a read-only filesystem.
1068 	 * We only skip recovery if NORECOVERY is specified on mount,
1069 	 * in which case we would not be here.
1070 	 *
1071 	 * But... if the -device- itself is readonly, just skip this.
1072 	 * We can't recover this device anyway, so it won't matter.
1073 	 */
1074 	if (!xfs_readonly_buftarg(log->l_mp->m_logdev_targp))
1075 		error = xlog_clear_stale_blocks(log, tail_lsn);
1076 
1077 done:
1078 	xlog_put_bp(bp);
1079 
1080 	if (error)
1081 		xfs_warn(log->l_mp, "failed to locate log tail");
1082 	return error;
1083 }
1084 
1085 /*
1086  * Is the log zeroed at all?
1087  *
1088  * The last binary search should be changed to perform an X block read
1089  * once X becomes small enough.  You can then search linearly through
1090  * the X blocks.  This will cut down on the number of reads we need to do.
1091  *
1092  * If the log is partially zeroed, this routine will pass back the blkno
1093  * of the first block with cycle number 0.  It won't have a complete LR
1094  * preceding it.
1095  *
1096  * Return:
1097  *	0  => the log is completely written to
1098  *	-1 => use *blk_no as the first block of the log
1099  *	>0 => error has occurred
1100  */
1101 STATIC int
1102 xlog_find_zeroed(
1103 	struct xlog	*log,
1104 	xfs_daddr_t	*blk_no)
1105 {
1106 	xfs_buf_t	*bp;
1107 	xfs_caddr_t	offset;
1108 	uint	        first_cycle, last_cycle;
1109 	xfs_daddr_t	new_blk, last_blk, start_blk;
1110 	xfs_daddr_t     num_scan_bblks;
1111 	int	        error, log_bbnum = log->l_logBBsize;
1112 
1113 	*blk_no = 0;
1114 
1115 	/* check totally zeroed log */
1116 	bp = xlog_get_bp(log, 1);
1117 	if (!bp)
1118 		return ENOMEM;
1119 	error = xlog_bread(log, 0, 1, bp, &offset);
1120 	if (error)
1121 		goto bp_err;
1122 
1123 	first_cycle = xlog_get_cycle(offset);
1124 	if (first_cycle == 0) {		/* completely zeroed log */
1125 		*blk_no = 0;
1126 		xlog_put_bp(bp);
1127 		return -1;
1128 	}
1129 
1130 	/* check partially zeroed log */
1131 	error = xlog_bread(log, log_bbnum-1, 1, bp, &offset);
1132 	if (error)
1133 		goto bp_err;
1134 
1135 	last_cycle = xlog_get_cycle(offset);
1136 	if (last_cycle != 0) {		/* log completely written to */
1137 		xlog_put_bp(bp);
1138 		return 0;
1139 	} else if (first_cycle != 1) {
1140 		/*
1141 		 * If the cycle of the last block is zero, the cycle of
1142 		 * the first block must be 1. If it's not, maybe we're
1143 		 * not looking at a log... Bail out.
1144 		 */
1145 		xfs_warn(log->l_mp,
1146 			"Log inconsistent or not a log (last==0, first!=1)");
1147 		return XFS_ERROR(EINVAL);
1148 	}
1149 
1150 	/* we have a partially zeroed log */
1151 	last_blk = log_bbnum-1;
1152 	if ((error = xlog_find_cycle_start(log, bp, 0, &last_blk, 0)))
1153 		goto bp_err;
1154 
1155 	/*
1156 	 * Validate the answer.  Because there is no way to guarantee that
1157 	 * the entire log is made up of log records which are the same size,
1158 	 * we scan over the defined maximum blocks.  At this point, the maximum
1159 	 * is not chosen to mean anything special.   XXXmiken
1160 	 */
1161 	num_scan_bblks = XLOG_TOTAL_REC_SHIFT(log);
1162 	ASSERT(num_scan_bblks <= INT_MAX);
1163 
1164 	if (last_blk < num_scan_bblks)
1165 		num_scan_bblks = last_blk;
1166 	start_blk = last_blk - num_scan_bblks;
1167 
1168 	/*
1169 	 * We search for any instances of cycle number 0 that occur before
1170 	 * our current estimate of the head.  What we're trying to detect is
1171 	 *        1 ... | 0 | 1 | 0...
1172 	 *                       ^ binary search ends here
1173 	 */
1174 	if ((error = xlog_find_verify_cycle(log, start_blk,
1175 					 (int)num_scan_bblks, 0, &new_blk)))
1176 		goto bp_err;
1177 	if (new_blk != -1)
1178 		last_blk = new_blk;
1179 
1180 	/*
1181 	 * Potentially backup over partial log record write.  We don't need
1182 	 * to search the end of the log because we know it is zero.
1183 	 */
1184 	if ((error = xlog_find_verify_log_record(log, start_blk,
1185 				&last_blk, 0)) == -1) {
1186 	    error = XFS_ERROR(EIO);
1187 	    goto bp_err;
1188 	} else if (error)
1189 	    goto bp_err;
1190 
1191 	*blk_no = last_blk;
1192 bp_err:
1193 	xlog_put_bp(bp);
1194 	if (error)
1195 		return error;
1196 	return -1;
1197 }
1198 
1199 /*
1200  * These are simple subroutines used by xlog_clear_stale_blocks() below
1201  * to initialize a buffer full of empty log record headers and write
1202  * them into the log.
1203  */
1204 STATIC void
1205 xlog_add_record(
1206 	struct xlog		*log,
1207 	xfs_caddr_t		buf,
1208 	int			cycle,
1209 	int			block,
1210 	int			tail_cycle,
1211 	int			tail_block)
1212 {
1213 	xlog_rec_header_t	*recp = (xlog_rec_header_t *)buf;
1214 
1215 	memset(buf, 0, BBSIZE);
1216 	recp->h_magicno = cpu_to_be32(XLOG_HEADER_MAGIC_NUM);
1217 	recp->h_cycle = cpu_to_be32(cycle);
1218 	recp->h_version = cpu_to_be32(
1219 			xfs_sb_version_haslogv2(&log->l_mp->m_sb) ? 2 : 1);
1220 	recp->h_lsn = cpu_to_be64(xlog_assign_lsn(cycle, block));
1221 	recp->h_tail_lsn = cpu_to_be64(xlog_assign_lsn(tail_cycle, tail_block));
1222 	recp->h_fmt = cpu_to_be32(XLOG_FMT);
1223 	memcpy(&recp->h_fs_uuid, &log->l_mp->m_sb.sb_uuid, sizeof(uuid_t));
1224 }
1225 
1226 STATIC int
1227 xlog_write_log_records(
1228 	struct xlog	*log,
1229 	int		cycle,
1230 	int		start_block,
1231 	int		blocks,
1232 	int		tail_cycle,
1233 	int		tail_block)
1234 {
1235 	xfs_caddr_t	offset;
1236 	xfs_buf_t	*bp;
1237 	int		balign, ealign;
1238 	int		sectbb = log->l_sectBBsize;
1239 	int		end_block = start_block + blocks;
1240 	int		bufblks;
1241 	int		error = 0;
1242 	int		i, j = 0;
1243 
1244 	/*
1245 	 * Greedily allocate a buffer big enough to handle the full
1246 	 * range of basic blocks to be written.  If that fails, try
1247 	 * a smaller size.  We need to be able to write at least a
1248 	 * log sector, or we're out of luck.
1249 	 */
1250 	bufblks = 1 << ffs(blocks);
1251 	while (bufblks > log->l_logBBsize)
1252 		bufblks >>= 1;
1253 	while (!(bp = xlog_get_bp(log, bufblks))) {
1254 		bufblks >>= 1;
1255 		if (bufblks < sectbb)
1256 			return ENOMEM;
1257 	}
1258 
1259 	/* We may need to do a read at the start to fill in part of
1260 	 * the buffer in the starting sector not covered by the first
1261 	 * write below.
1262 	 */
1263 	balign = round_down(start_block, sectbb);
1264 	if (balign != start_block) {
1265 		error = xlog_bread_noalign(log, start_block, 1, bp);
1266 		if (error)
1267 			goto out_put_bp;
1268 
1269 		j = start_block - balign;
1270 	}
1271 
1272 	for (i = start_block; i < end_block; i += bufblks) {
1273 		int		bcount, endcount;
1274 
1275 		bcount = min(bufblks, end_block - start_block);
1276 		endcount = bcount - j;
1277 
1278 		/* We may need to do a read at the end to fill in part of
1279 		 * the buffer in the final sector not covered by the write.
1280 		 * If this is the same sector as the above read, skip it.
1281 		 */
1282 		ealign = round_down(end_block, sectbb);
1283 		if (j == 0 && (start_block + endcount > ealign)) {
1284 			offset = bp->b_addr + BBTOB(ealign - start_block);
1285 			error = xlog_bread_offset(log, ealign, sectbb,
1286 							bp, offset);
1287 			if (error)
1288 				break;
1289 
1290 		}
1291 
1292 		offset = xlog_align(log, start_block, endcount, bp);
1293 		for (; j < endcount; j++) {
1294 			xlog_add_record(log, offset, cycle, i+j,
1295 					tail_cycle, tail_block);
1296 			offset += BBSIZE;
1297 		}
1298 		error = xlog_bwrite(log, start_block, endcount, bp);
1299 		if (error)
1300 			break;
1301 		start_block += endcount;
1302 		j = 0;
1303 	}
1304 
1305  out_put_bp:
1306 	xlog_put_bp(bp);
1307 	return error;
1308 }
1309 
1310 /*
1311  * This routine is called to blow away any incomplete log writes out
1312  * in front of the log head.  We do this so that we won't become confused
1313  * if we come up, write only a little bit more, and then crash again.
1314  * If we leave the partial log records out there, this situation could
1315  * cause us to think those partial writes are valid blocks since they
1316  * have the current cycle number.  We get rid of them by overwriting them
1317  * with empty log records with the old cycle number rather than the
1318  * current one.
1319  *
1320  * The tail lsn is passed in rather than taken from
1321  * the log so that we will not write over the unmount record after a
1322  * clean unmount in a 512 block log.  Doing so would leave the log without
1323  * any valid log records in it until a new one was written.  If we crashed
1324  * during that time we would not be able to recover.
1325  */
1326 STATIC int
1327 xlog_clear_stale_blocks(
1328 	struct xlog	*log,
1329 	xfs_lsn_t	tail_lsn)
1330 {
1331 	int		tail_cycle, head_cycle;
1332 	int		tail_block, head_block;
1333 	int		tail_distance, max_distance;
1334 	int		distance;
1335 	int		error;
1336 
1337 	tail_cycle = CYCLE_LSN(tail_lsn);
1338 	tail_block = BLOCK_LSN(tail_lsn);
1339 	head_cycle = log->l_curr_cycle;
1340 	head_block = log->l_curr_block;
1341 
1342 	/*
1343 	 * Figure out the distance between the new head of the log
1344 	 * and the tail.  We want to write over any blocks beyond the
1345 	 * head that we may have written just before the crash, but
1346 	 * we don't want to overwrite the tail of the log.
1347 	 */
1348 	if (head_cycle == tail_cycle) {
1349 		/*
1350 		 * The tail is behind the head in the physical log,
1351 		 * so the distance from the head to the tail is the
1352 		 * distance from the head to the end of the log plus
1353 		 * the distance from the beginning of the log to the
1354 		 * tail.
1355 		 */
1356 		if (unlikely(head_block < tail_block || head_block >= log->l_logBBsize)) {
1357 			XFS_ERROR_REPORT("xlog_clear_stale_blocks(1)",
1358 					 XFS_ERRLEVEL_LOW, log->l_mp);
1359 			return XFS_ERROR(EFSCORRUPTED);
1360 		}
1361 		tail_distance = tail_block + (log->l_logBBsize - head_block);
1362 	} else {
1363 		/*
1364 		 * The head is behind the tail in the physical log,
1365 		 * so the distance from the head to the tail is just
1366 		 * the tail block minus the head block.
1367 		 */
1368 		if (unlikely(head_block >= tail_block || head_cycle != (tail_cycle + 1))){
1369 			XFS_ERROR_REPORT("xlog_clear_stale_blocks(2)",
1370 					 XFS_ERRLEVEL_LOW, log->l_mp);
1371 			return XFS_ERROR(EFSCORRUPTED);
1372 		}
1373 		tail_distance = tail_block - head_block;
1374 	}
1375 
1376 	/*
1377 	 * If the head is right up against the tail, we can't clear
1378 	 * anything.
1379 	 */
1380 	if (tail_distance <= 0) {
1381 		ASSERT(tail_distance == 0);
1382 		return 0;
1383 	}
1384 
1385 	max_distance = XLOG_TOTAL_REC_SHIFT(log);
1386 	/*
1387 	 * Take the smaller of the maximum amount of outstanding I/O
1388 	 * we could have and the distance to the tail to clear out.
1389 	 * We take the smaller so that we don't overwrite the tail and
1390 	 * we don't waste all day writing from the head to the tail
1391 	 * for no reason.
1392 	 */
1393 	max_distance = MIN(max_distance, tail_distance);
1394 
1395 	if ((head_block + max_distance) <= log->l_logBBsize) {
1396 		/*
1397 		 * We can stomp all the blocks we need to without
1398 		 * wrapping around the end of the log.  Just do it
1399 		 * in a single write.  Use the cycle number of the
1400 		 * current cycle minus one so that the log will look like:
1401 		 *     n ... | n - 1 ...
1402 		 */
1403 		error = xlog_write_log_records(log, (head_cycle - 1),
1404 				head_block, max_distance, tail_cycle,
1405 				tail_block);
1406 		if (error)
1407 			return error;
1408 	} else {
1409 		/*
1410 		 * We need to wrap around the end of the physical log in
1411 		 * order to clear all the blocks.  Do it in two separate
1412 		 * I/Os.  The first write should be from the head to the
1413 		 * end of the physical log, and it should use the current
1414 		 * cycle number minus one just like above.
1415 		 */
1416 		distance = log->l_logBBsize - head_block;
1417 		error = xlog_write_log_records(log, (head_cycle - 1),
1418 				head_block, distance, tail_cycle,
1419 				tail_block);
1420 
1421 		if (error)
1422 			return error;
1423 
1424 		/*
1425 		 * Now write the blocks at the start of the physical log.
1426 		 * This writes the remainder of the blocks we want to clear.
1427 		 * It uses the current cycle number since we're now on the
1428 		 * same cycle as the head so that we get:
1429 		 *    n ... n ... | n - 1 ...
1430 		 *    ^^^^^ blocks we're writing
1431 		 */
1432 		distance = max_distance - (log->l_logBBsize - head_block);
1433 		error = xlog_write_log_records(log, head_cycle, 0, distance,
1434 				tail_cycle, tail_block);
1435 		if (error)
1436 			return error;
1437 	}
1438 
1439 	return 0;
1440 }
1441 
1442 /******************************************************************************
1443  *
1444  *		Log recover routines
1445  *
1446  ******************************************************************************
1447  */
1448 
1449 STATIC xlog_recover_t *
1450 xlog_recover_find_tid(
1451 	struct hlist_head	*head,
1452 	xlog_tid_t		tid)
1453 {
1454 	xlog_recover_t		*trans;
1455 
1456 	hlist_for_each_entry(trans, head, r_list) {
1457 		if (trans->r_log_tid == tid)
1458 			return trans;
1459 	}
1460 	return NULL;
1461 }
1462 
1463 STATIC void
1464 xlog_recover_new_tid(
1465 	struct hlist_head	*head,
1466 	xlog_tid_t		tid,
1467 	xfs_lsn_t		lsn)
1468 {
1469 	xlog_recover_t		*trans;
1470 
1471 	trans = kmem_zalloc(sizeof(xlog_recover_t), KM_SLEEP);
1472 	trans->r_log_tid   = tid;
1473 	trans->r_lsn	   = lsn;
1474 	INIT_LIST_HEAD(&trans->r_itemq);
1475 
1476 	INIT_HLIST_NODE(&trans->r_list);
1477 	hlist_add_head(&trans->r_list, head);
1478 }
1479 
1480 STATIC void
1481 xlog_recover_add_item(
1482 	struct list_head	*head)
1483 {
1484 	xlog_recover_item_t	*item;
1485 
1486 	item = kmem_zalloc(sizeof(xlog_recover_item_t), KM_SLEEP);
1487 	INIT_LIST_HEAD(&item->ri_list);
1488 	list_add_tail(&item->ri_list, head);
1489 }
1490 
1491 STATIC int
1492 xlog_recover_add_to_cont_trans(
1493 	struct xlog		*log,
1494 	struct xlog_recover	*trans,
1495 	xfs_caddr_t		dp,
1496 	int			len)
1497 {
1498 	xlog_recover_item_t	*item;
1499 	xfs_caddr_t		ptr, old_ptr;
1500 	int			old_len;
1501 
1502 	if (list_empty(&trans->r_itemq)) {
1503 		/* finish copying rest of trans header */
1504 		xlog_recover_add_item(&trans->r_itemq);
1505 		ptr = (xfs_caddr_t) &trans->r_theader +
1506 				sizeof(xfs_trans_header_t) - len;
1507 		memcpy(ptr, dp, len); /* d, s, l */
1508 		return 0;
1509 	}
1510 	/* take the tail entry */
1511 	item = list_entry(trans->r_itemq.prev, xlog_recover_item_t, ri_list);
1512 
1513 	old_ptr = item->ri_buf[item->ri_cnt-1].i_addr;
1514 	old_len = item->ri_buf[item->ri_cnt-1].i_len;
1515 
1516 	ptr = kmem_realloc(old_ptr, len+old_len, old_len, KM_SLEEP);
1517 	memcpy(&ptr[old_len], dp, len); /* d, s, l */
1518 	item->ri_buf[item->ri_cnt-1].i_len += len;
1519 	item->ri_buf[item->ri_cnt-1].i_addr = ptr;
1520 	trace_xfs_log_recover_item_add_cont(log, trans, item, 0);
1521 	return 0;
1522 }
1523 
1524 /*
1525  * The next region to add is the start of a new region.  It could be
1526  * a whole region or it could be the first part of a new region.  Because
1527  * of this, the assumption here is that the type and size fields of all
1528  * format structures fit into the first 32 bits of the structure.
1529  *
1530  * This works because all regions must be 32 bit aligned.  Therefore, we
1531  * either have both fields or we have neither field.  In the case we have
1532  * neither field, the data part of the region is zero length.  We only have
1533  * a log_op_header and can throw away the header since a new one will appear
1534  * later.  If we have at least 4 bytes, then we can determine how many regions
1535  * will appear in the current log item.
1536  */
1537 STATIC int
1538 xlog_recover_add_to_trans(
1539 	struct xlog		*log,
1540 	struct xlog_recover	*trans,
1541 	xfs_caddr_t		dp,
1542 	int			len)
1543 {
1544 	xfs_inode_log_format_t	*in_f;			/* any will do */
1545 	xlog_recover_item_t	*item;
1546 	xfs_caddr_t		ptr;
1547 
1548 	if (!len)
1549 		return 0;
1550 	if (list_empty(&trans->r_itemq)) {
1551 		/* we need to catch log corruptions here */
1552 		if (*(uint *)dp != XFS_TRANS_HEADER_MAGIC) {
1553 			xfs_warn(log->l_mp, "%s: bad header magic number",
1554 				__func__);
1555 			ASSERT(0);
1556 			return XFS_ERROR(EIO);
1557 		}
1558 		if (len == sizeof(xfs_trans_header_t))
1559 			xlog_recover_add_item(&trans->r_itemq);
1560 		memcpy(&trans->r_theader, dp, len); /* d, s, l */
1561 		return 0;
1562 	}
1563 
1564 	ptr = kmem_alloc(len, KM_SLEEP);
1565 	memcpy(ptr, dp, len);
1566 	in_f = (xfs_inode_log_format_t *)ptr;
1567 
1568 	/* take the tail entry */
1569 	item = list_entry(trans->r_itemq.prev, xlog_recover_item_t, ri_list);
1570 	if (item->ri_total != 0 &&
1571 	     item->ri_total == item->ri_cnt) {
1572 		/* tail item is in use, get a new one */
1573 		xlog_recover_add_item(&trans->r_itemq);
1574 		item = list_entry(trans->r_itemq.prev,
1575 					xlog_recover_item_t, ri_list);
1576 	}
1577 
1578 	if (item->ri_total == 0) {		/* first region to be added */
1579 		if (in_f->ilf_size == 0 ||
1580 		    in_f->ilf_size > XLOG_MAX_REGIONS_IN_ITEM) {
1581 			xfs_warn(log->l_mp,
1582 		"bad number of regions (%d) in inode log format",
1583 				  in_f->ilf_size);
1584 			ASSERT(0);
1585 			return XFS_ERROR(EIO);
1586 		}
1587 
1588 		item->ri_total = in_f->ilf_size;
1589 		item->ri_buf =
1590 			kmem_zalloc(item->ri_total * sizeof(xfs_log_iovec_t),
1591 				    KM_SLEEP);
1592 	}
1593 	ASSERT(item->ri_total > item->ri_cnt);
1594 	/* Description region is ri_buf[0] */
1595 	item->ri_buf[item->ri_cnt].i_addr = ptr;
1596 	item->ri_buf[item->ri_cnt].i_len  = len;
1597 	item->ri_cnt++;
1598 	trace_xfs_log_recover_item_add(log, trans, item, 0);
1599 	return 0;
1600 }
1601 
1602 /*
1603  * Sort the log items in the transaction.
1604  *
1605  * The ordering constraints are defined by the inode allocation and unlink
1606  * behaviour. The rules are:
1607  *
1608  *	1. Every item is only logged once in a given transaction. Hence it
1609  *	   represents the last logged state of the item. Hence ordering is
1610  *	   dependent on the order in which operations need to be performed so
1611  *	   required initial conditions are always met.
1612  *
1613  *	2. Cancelled buffers are recorded in pass 1 in a separate table and
1614  *	   there's nothing to replay from them so we can simply cull them
1615  *	   from the transaction. However, we can't do that until after we've
1616  *	   replayed all the other items because they may be dependent on the
1617  *	   cancelled buffer and replaying the cancelled buffer can remove it
1618  *	   form the cancelled buffer table. Hence they have tobe done last.
1619  *
1620  *	3. Inode allocation buffers must be replayed before inode items that
1621  *	   read the buffer and replay changes into it. For filesystems using the
1622  *	   ICREATE transactions, this means XFS_LI_ICREATE objects need to get
1623  *	   treated the same as inode allocation buffers as they create and
1624  *	   initialise the buffers directly.
1625  *
1626  *	4. Inode unlink buffers must be replayed after inode items are replayed.
1627  *	   This ensures that inodes are completely flushed to the inode buffer
1628  *	   in a "free" state before we remove the unlinked inode list pointer.
1629  *
1630  * Hence the ordering needs to be inode allocation buffers first, inode items
1631  * second, inode unlink buffers third and cancelled buffers last.
1632  *
1633  * But there's a problem with that - we can't tell an inode allocation buffer
1634  * apart from a regular buffer, so we can't separate them. We can, however,
1635  * tell an inode unlink buffer from the others, and so we can separate them out
1636  * from all the other buffers and move them to last.
1637  *
1638  * Hence, 4 lists, in order from head to tail:
1639  *	- buffer_list for all buffers except cancelled/inode unlink buffers
1640  *	- item_list for all non-buffer items
1641  *	- inode_buffer_list for inode unlink buffers
1642  *	- cancel_list for the cancelled buffers
1643  *
1644  * Note that we add objects to the tail of the lists so that first-to-last
1645  * ordering is preserved within the lists. Adding objects to the head of the
1646  * list means when we traverse from the head we walk them in last-to-first
1647  * order. For cancelled buffers and inode unlink buffers this doesn't matter,
1648  * but for all other items there may be specific ordering that we need to
1649  * preserve.
1650  */
1651 STATIC int
1652 xlog_recover_reorder_trans(
1653 	struct xlog		*log,
1654 	struct xlog_recover	*trans,
1655 	int			pass)
1656 {
1657 	xlog_recover_item_t	*item, *n;
1658 	LIST_HEAD(sort_list);
1659 	LIST_HEAD(cancel_list);
1660 	LIST_HEAD(buffer_list);
1661 	LIST_HEAD(inode_buffer_list);
1662 	LIST_HEAD(inode_list);
1663 
1664 	list_splice_init(&trans->r_itemq, &sort_list);
1665 	list_for_each_entry_safe(item, n, &sort_list, ri_list) {
1666 		xfs_buf_log_format_t	*buf_f = item->ri_buf[0].i_addr;
1667 
1668 		switch (ITEM_TYPE(item)) {
1669 		case XFS_LI_ICREATE:
1670 			list_move_tail(&item->ri_list, &buffer_list);
1671 			break;
1672 		case XFS_LI_BUF:
1673 			if (buf_f->blf_flags & XFS_BLF_CANCEL) {
1674 				trace_xfs_log_recover_item_reorder_head(log,
1675 							trans, item, pass);
1676 				list_move(&item->ri_list, &cancel_list);
1677 				break;
1678 			}
1679 			if (buf_f->blf_flags & XFS_BLF_INODE_BUF) {
1680 				list_move(&item->ri_list, &inode_buffer_list);
1681 				break;
1682 			}
1683 			list_move_tail(&item->ri_list, &buffer_list);
1684 			break;
1685 		case XFS_LI_INODE:
1686 		case XFS_LI_DQUOT:
1687 		case XFS_LI_QUOTAOFF:
1688 		case XFS_LI_EFD:
1689 		case XFS_LI_EFI:
1690 			trace_xfs_log_recover_item_reorder_tail(log,
1691 							trans, item, pass);
1692 			list_move_tail(&item->ri_list, &inode_list);
1693 			break;
1694 		default:
1695 			xfs_warn(log->l_mp,
1696 				"%s: unrecognized type of log operation",
1697 				__func__);
1698 			ASSERT(0);
1699 			return XFS_ERROR(EIO);
1700 		}
1701 	}
1702 	ASSERT(list_empty(&sort_list));
1703 	if (!list_empty(&buffer_list))
1704 		list_splice(&buffer_list, &trans->r_itemq);
1705 	if (!list_empty(&inode_list))
1706 		list_splice_tail(&inode_list, &trans->r_itemq);
1707 	if (!list_empty(&inode_buffer_list))
1708 		list_splice_tail(&inode_buffer_list, &trans->r_itemq);
1709 	if (!list_empty(&cancel_list))
1710 		list_splice_tail(&cancel_list, &trans->r_itemq);
1711 	return 0;
1712 }
1713 
1714 /*
1715  * Build up the table of buf cancel records so that we don't replay
1716  * cancelled data in the second pass.  For buffer records that are
1717  * not cancel records, there is nothing to do here so we just return.
1718  *
1719  * If we get a cancel record which is already in the table, this indicates
1720  * that the buffer was cancelled multiple times.  In order to ensure
1721  * that during pass 2 we keep the record in the table until we reach its
1722  * last occurrence in the log, we keep a reference count in the cancel
1723  * record in the table to tell us how many times we expect to see this
1724  * record during the second pass.
1725  */
1726 STATIC int
1727 xlog_recover_buffer_pass1(
1728 	struct xlog			*log,
1729 	struct xlog_recover_item	*item)
1730 {
1731 	xfs_buf_log_format_t	*buf_f = item->ri_buf[0].i_addr;
1732 	struct list_head	*bucket;
1733 	struct xfs_buf_cancel	*bcp;
1734 
1735 	/*
1736 	 * If this isn't a cancel buffer item, then just return.
1737 	 */
1738 	if (!(buf_f->blf_flags & XFS_BLF_CANCEL)) {
1739 		trace_xfs_log_recover_buf_not_cancel(log, buf_f);
1740 		return 0;
1741 	}
1742 
1743 	/*
1744 	 * Insert an xfs_buf_cancel record into the hash table of them.
1745 	 * If there is already an identical record, bump its reference count.
1746 	 */
1747 	bucket = XLOG_BUF_CANCEL_BUCKET(log, buf_f->blf_blkno);
1748 	list_for_each_entry(bcp, bucket, bc_list) {
1749 		if (bcp->bc_blkno == buf_f->blf_blkno &&
1750 		    bcp->bc_len == buf_f->blf_len) {
1751 			bcp->bc_refcount++;
1752 			trace_xfs_log_recover_buf_cancel_ref_inc(log, buf_f);
1753 			return 0;
1754 		}
1755 	}
1756 
1757 	bcp = kmem_alloc(sizeof(struct xfs_buf_cancel), KM_SLEEP);
1758 	bcp->bc_blkno = buf_f->blf_blkno;
1759 	bcp->bc_len = buf_f->blf_len;
1760 	bcp->bc_refcount = 1;
1761 	list_add_tail(&bcp->bc_list, bucket);
1762 
1763 	trace_xfs_log_recover_buf_cancel_add(log, buf_f);
1764 	return 0;
1765 }
1766 
1767 /*
1768  * Check to see whether the buffer being recovered has a corresponding
1769  * entry in the buffer cancel record table.  If it does then return 1
1770  * so that it will be cancelled, otherwise return 0.  If the buffer is
1771  * actually a buffer cancel item (XFS_BLF_CANCEL is set), then decrement
1772  * the refcount on the entry in the table and remove it from the table
1773  * if this is the last reference.
1774  *
1775  * We remove the cancel record from the table when we encounter its
1776  * last occurrence in the log so that if the same buffer is re-used
1777  * again after its last cancellation we actually replay the changes
1778  * made at that point.
1779  */
1780 STATIC int
1781 xlog_check_buffer_cancelled(
1782 	struct xlog		*log,
1783 	xfs_daddr_t		blkno,
1784 	uint			len,
1785 	ushort			flags)
1786 {
1787 	struct list_head	*bucket;
1788 	struct xfs_buf_cancel	*bcp;
1789 
1790 	if (log->l_buf_cancel_table == NULL) {
1791 		/*
1792 		 * There is nothing in the table built in pass one,
1793 		 * so this buffer must not be cancelled.
1794 		 */
1795 		ASSERT(!(flags & XFS_BLF_CANCEL));
1796 		return 0;
1797 	}
1798 
1799 	/*
1800 	 * Search for an entry in the  cancel table that matches our buffer.
1801 	 */
1802 	bucket = XLOG_BUF_CANCEL_BUCKET(log, blkno);
1803 	list_for_each_entry(bcp, bucket, bc_list) {
1804 		if (bcp->bc_blkno == blkno && bcp->bc_len == len)
1805 			goto found;
1806 	}
1807 
1808 	/*
1809 	 * We didn't find a corresponding entry in the table, so return 0 so
1810 	 * that the buffer is NOT cancelled.
1811 	 */
1812 	ASSERT(!(flags & XFS_BLF_CANCEL));
1813 	return 0;
1814 
1815 found:
1816 	/*
1817 	 * We've go a match, so return 1 so that the recovery of this buffer
1818 	 * is cancelled.  If this buffer is actually a buffer cancel log
1819 	 * item, then decrement the refcount on the one in the table and
1820 	 * remove it if this is the last reference.
1821 	 */
1822 	if (flags & XFS_BLF_CANCEL) {
1823 		if (--bcp->bc_refcount == 0) {
1824 			list_del(&bcp->bc_list);
1825 			kmem_free(bcp);
1826 		}
1827 	}
1828 	return 1;
1829 }
1830 
1831 /*
1832  * Perform recovery for a buffer full of inodes.  In these buffers, the only
1833  * data which should be recovered is that which corresponds to the
1834  * di_next_unlinked pointers in the on disk inode structures.  The rest of the
1835  * data for the inodes is always logged through the inodes themselves rather
1836  * than the inode buffer and is recovered in xlog_recover_inode_pass2().
1837  *
1838  * The only time when buffers full of inodes are fully recovered is when the
1839  * buffer is full of newly allocated inodes.  In this case the buffer will
1840  * not be marked as an inode buffer and so will be sent to
1841  * xlog_recover_do_reg_buffer() below during recovery.
1842  */
1843 STATIC int
1844 xlog_recover_do_inode_buffer(
1845 	struct xfs_mount	*mp,
1846 	xlog_recover_item_t	*item,
1847 	struct xfs_buf		*bp,
1848 	xfs_buf_log_format_t	*buf_f)
1849 {
1850 	int			i;
1851 	int			item_index = 0;
1852 	int			bit = 0;
1853 	int			nbits = 0;
1854 	int			reg_buf_offset = 0;
1855 	int			reg_buf_bytes = 0;
1856 	int			next_unlinked_offset;
1857 	int			inodes_per_buf;
1858 	xfs_agino_t		*logged_nextp;
1859 	xfs_agino_t		*buffer_nextp;
1860 
1861 	trace_xfs_log_recover_buf_inode_buf(mp->m_log, buf_f);
1862 
1863 	/*
1864 	 * Post recovery validation only works properly on CRC enabled
1865 	 * filesystems.
1866 	 */
1867 	if (xfs_sb_version_hascrc(&mp->m_sb))
1868 		bp->b_ops = &xfs_inode_buf_ops;
1869 
1870 	inodes_per_buf = BBTOB(bp->b_io_length) >> mp->m_sb.sb_inodelog;
1871 	for (i = 0; i < inodes_per_buf; i++) {
1872 		next_unlinked_offset = (i * mp->m_sb.sb_inodesize) +
1873 			offsetof(xfs_dinode_t, di_next_unlinked);
1874 
1875 		while (next_unlinked_offset >=
1876 		       (reg_buf_offset + reg_buf_bytes)) {
1877 			/*
1878 			 * The next di_next_unlinked field is beyond
1879 			 * the current logged region.  Find the next
1880 			 * logged region that contains or is beyond
1881 			 * the current di_next_unlinked field.
1882 			 */
1883 			bit += nbits;
1884 			bit = xfs_next_bit(buf_f->blf_data_map,
1885 					   buf_f->blf_map_size, bit);
1886 
1887 			/*
1888 			 * If there are no more logged regions in the
1889 			 * buffer, then we're done.
1890 			 */
1891 			if (bit == -1)
1892 				return 0;
1893 
1894 			nbits = xfs_contig_bits(buf_f->blf_data_map,
1895 						buf_f->blf_map_size, bit);
1896 			ASSERT(nbits > 0);
1897 			reg_buf_offset = bit << XFS_BLF_SHIFT;
1898 			reg_buf_bytes = nbits << XFS_BLF_SHIFT;
1899 			item_index++;
1900 		}
1901 
1902 		/*
1903 		 * If the current logged region starts after the current
1904 		 * di_next_unlinked field, then move on to the next
1905 		 * di_next_unlinked field.
1906 		 */
1907 		if (next_unlinked_offset < reg_buf_offset)
1908 			continue;
1909 
1910 		ASSERT(item->ri_buf[item_index].i_addr != NULL);
1911 		ASSERT((item->ri_buf[item_index].i_len % XFS_BLF_CHUNK) == 0);
1912 		ASSERT((reg_buf_offset + reg_buf_bytes) <=
1913 							BBTOB(bp->b_io_length));
1914 
1915 		/*
1916 		 * The current logged region contains a copy of the
1917 		 * current di_next_unlinked field.  Extract its value
1918 		 * and copy it to the buffer copy.
1919 		 */
1920 		logged_nextp = item->ri_buf[item_index].i_addr +
1921 				next_unlinked_offset - reg_buf_offset;
1922 		if (unlikely(*logged_nextp == 0)) {
1923 			xfs_alert(mp,
1924 		"Bad inode buffer log record (ptr = 0x%p, bp = 0x%p). "
1925 		"Trying to replay bad (0) inode di_next_unlinked field.",
1926 				item, bp);
1927 			XFS_ERROR_REPORT("xlog_recover_do_inode_buf",
1928 					 XFS_ERRLEVEL_LOW, mp);
1929 			return XFS_ERROR(EFSCORRUPTED);
1930 		}
1931 
1932 		buffer_nextp = (xfs_agino_t *)xfs_buf_offset(bp,
1933 					      next_unlinked_offset);
1934 		*buffer_nextp = *logged_nextp;
1935 
1936 		/*
1937 		 * If necessary, recalculate the CRC in the on-disk inode. We
1938 		 * have to leave the inode in a consistent state for whoever
1939 		 * reads it next....
1940 		 */
1941 		xfs_dinode_calc_crc(mp, (struct xfs_dinode *)
1942 				xfs_buf_offset(bp, i * mp->m_sb.sb_inodesize));
1943 
1944 	}
1945 
1946 	return 0;
1947 }
1948 
1949 /*
1950  * Validate the recovered buffer is of the correct type and attach the
1951  * appropriate buffer operations to them for writeback. Magic numbers are in a
1952  * few places:
1953  *	the first 16 bits of the buffer (inode buffer, dquot buffer),
1954  *	the first 32 bits of the buffer (most blocks),
1955  *	inside a struct xfs_da_blkinfo at the start of the buffer.
1956  */
1957 static void
1958 xlog_recovery_validate_buf_type(
1959 	struct xfs_mount	*mp,
1960 	struct xfs_buf		*bp,
1961 	xfs_buf_log_format_t	*buf_f)
1962 {
1963 	struct xfs_da_blkinfo	*info = bp->b_addr;
1964 	__uint32_t		magic32;
1965 	__uint16_t		magic16;
1966 	__uint16_t		magicda;
1967 
1968 	magic32 = be32_to_cpu(*(__be32 *)bp->b_addr);
1969 	magic16 = be16_to_cpu(*(__be16*)bp->b_addr);
1970 	magicda = be16_to_cpu(info->magic);
1971 	switch (xfs_blft_from_flags(buf_f)) {
1972 	case XFS_BLFT_BTREE_BUF:
1973 		switch (magic32) {
1974 		case XFS_ABTB_CRC_MAGIC:
1975 		case XFS_ABTC_CRC_MAGIC:
1976 		case XFS_ABTB_MAGIC:
1977 		case XFS_ABTC_MAGIC:
1978 			bp->b_ops = &xfs_allocbt_buf_ops;
1979 			break;
1980 		case XFS_IBT_CRC_MAGIC:
1981 		case XFS_IBT_MAGIC:
1982 			bp->b_ops = &xfs_inobt_buf_ops;
1983 			break;
1984 		case XFS_BMAP_CRC_MAGIC:
1985 		case XFS_BMAP_MAGIC:
1986 			bp->b_ops = &xfs_bmbt_buf_ops;
1987 			break;
1988 		default:
1989 			xfs_warn(mp, "Bad btree block magic!");
1990 			ASSERT(0);
1991 			break;
1992 		}
1993 		break;
1994 	case XFS_BLFT_AGF_BUF:
1995 		if (magic32 != XFS_AGF_MAGIC) {
1996 			xfs_warn(mp, "Bad AGF block magic!");
1997 			ASSERT(0);
1998 			break;
1999 		}
2000 		bp->b_ops = &xfs_agf_buf_ops;
2001 		break;
2002 	case XFS_BLFT_AGFL_BUF:
2003 		if (!xfs_sb_version_hascrc(&mp->m_sb))
2004 			break;
2005 		if (magic32 != XFS_AGFL_MAGIC) {
2006 			xfs_warn(mp, "Bad AGFL block magic!");
2007 			ASSERT(0);
2008 			break;
2009 		}
2010 		bp->b_ops = &xfs_agfl_buf_ops;
2011 		break;
2012 	case XFS_BLFT_AGI_BUF:
2013 		if (magic32 != XFS_AGI_MAGIC) {
2014 			xfs_warn(mp, "Bad AGI block magic!");
2015 			ASSERT(0);
2016 			break;
2017 		}
2018 		bp->b_ops = &xfs_agi_buf_ops;
2019 		break;
2020 	case XFS_BLFT_UDQUOT_BUF:
2021 	case XFS_BLFT_PDQUOT_BUF:
2022 	case XFS_BLFT_GDQUOT_BUF:
2023 #ifdef CONFIG_XFS_QUOTA
2024 		if (magic16 != XFS_DQUOT_MAGIC) {
2025 			xfs_warn(mp, "Bad DQUOT block magic!");
2026 			ASSERT(0);
2027 			break;
2028 		}
2029 		bp->b_ops = &xfs_dquot_buf_ops;
2030 #else
2031 		xfs_alert(mp,
2032 	"Trying to recover dquots without QUOTA support built in!");
2033 		ASSERT(0);
2034 #endif
2035 		break;
2036 	case XFS_BLFT_DINO_BUF:
2037 		/*
2038 		 * we get here with inode allocation buffers, not buffers that
2039 		 * track unlinked list changes.
2040 		 */
2041 		if (magic16 != XFS_DINODE_MAGIC) {
2042 			xfs_warn(mp, "Bad INODE block magic!");
2043 			ASSERT(0);
2044 			break;
2045 		}
2046 		bp->b_ops = &xfs_inode_buf_ops;
2047 		break;
2048 	case XFS_BLFT_SYMLINK_BUF:
2049 		if (magic32 != XFS_SYMLINK_MAGIC) {
2050 			xfs_warn(mp, "Bad symlink block magic!");
2051 			ASSERT(0);
2052 			break;
2053 		}
2054 		bp->b_ops = &xfs_symlink_buf_ops;
2055 		break;
2056 	case XFS_BLFT_DIR_BLOCK_BUF:
2057 		if (magic32 != XFS_DIR2_BLOCK_MAGIC &&
2058 		    magic32 != XFS_DIR3_BLOCK_MAGIC) {
2059 			xfs_warn(mp, "Bad dir block magic!");
2060 			ASSERT(0);
2061 			break;
2062 		}
2063 		bp->b_ops = &xfs_dir3_block_buf_ops;
2064 		break;
2065 	case XFS_BLFT_DIR_DATA_BUF:
2066 		if (magic32 != XFS_DIR2_DATA_MAGIC &&
2067 		    magic32 != XFS_DIR3_DATA_MAGIC) {
2068 			xfs_warn(mp, "Bad dir data magic!");
2069 			ASSERT(0);
2070 			break;
2071 		}
2072 		bp->b_ops = &xfs_dir3_data_buf_ops;
2073 		break;
2074 	case XFS_BLFT_DIR_FREE_BUF:
2075 		if (magic32 != XFS_DIR2_FREE_MAGIC &&
2076 		    magic32 != XFS_DIR3_FREE_MAGIC) {
2077 			xfs_warn(mp, "Bad dir3 free magic!");
2078 			ASSERT(0);
2079 			break;
2080 		}
2081 		bp->b_ops = &xfs_dir3_free_buf_ops;
2082 		break;
2083 	case XFS_BLFT_DIR_LEAF1_BUF:
2084 		if (magicda != XFS_DIR2_LEAF1_MAGIC &&
2085 		    magicda != XFS_DIR3_LEAF1_MAGIC) {
2086 			xfs_warn(mp, "Bad dir leaf1 magic!");
2087 			ASSERT(0);
2088 			break;
2089 		}
2090 		bp->b_ops = &xfs_dir3_leaf1_buf_ops;
2091 		break;
2092 	case XFS_BLFT_DIR_LEAFN_BUF:
2093 		if (magicda != XFS_DIR2_LEAFN_MAGIC &&
2094 		    magicda != XFS_DIR3_LEAFN_MAGIC) {
2095 			xfs_warn(mp, "Bad dir leafn magic!");
2096 			ASSERT(0);
2097 			break;
2098 		}
2099 		bp->b_ops = &xfs_dir3_leafn_buf_ops;
2100 		break;
2101 	case XFS_BLFT_DA_NODE_BUF:
2102 		if (magicda != XFS_DA_NODE_MAGIC &&
2103 		    magicda != XFS_DA3_NODE_MAGIC) {
2104 			xfs_warn(mp, "Bad da node magic!");
2105 			ASSERT(0);
2106 			break;
2107 		}
2108 		bp->b_ops = &xfs_da3_node_buf_ops;
2109 		break;
2110 	case XFS_BLFT_ATTR_LEAF_BUF:
2111 		if (magicda != XFS_ATTR_LEAF_MAGIC &&
2112 		    magicda != XFS_ATTR3_LEAF_MAGIC) {
2113 			xfs_warn(mp, "Bad attr leaf magic!");
2114 			ASSERT(0);
2115 			break;
2116 		}
2117 		bp->b_ops = &xfs_attr3_leaf_buf_ops;
2118 		break;
2119 	case XFS_BLFT_ATTR_RMT_BUF:
2120 		if (!xfs_sb_version_hascrc(&mp->m_sb))
2121 			break;
2122 		if (magic32 != XFS_ATTR3_RMT_MAGIC) {
2123 			xfs_warn(mp, "Bad attr remote magic!");
2124 			ASSERT(0);
2125 			break;
2126 		}
2127 		bp->b_ops = &xfs_attr3_rmt_buf_ops;
2128 		break;
2129 	case XFS_BLFT_SB_BUF:
2130 		if (magic32 != XFS_SB_MAGIC) {
2131 			xfs_warn(mp, "Bad SB block magic!");
2132 			ASSERT(0);
2133 			break;
2134 		}
2135 		bp->b_ops = &xfs_sb_buf_ops;
2136 		break;
2137 	default:
2138 		xfs_warn(mp, "Unknown buffer type %d!",
2139 			 xfs_blft_from_flags(buf_f));
2140 		break;
2141 	}
2142 }
2143 
2144 /*
2145  * Perform a 'normal' buffer recovery.  Each logged region of the
2146  * buffer should be copied over the corresponding region in the
2147  * given buffer.  The bitmap in the buf log format structure indicates
2148  * where to place the logged data.
2149  */
2150 STATIC void
2151 xlog_recover_do_reg_buffer(
2152 	struct xfs_mount	*mp,
2153 	xlog_recover_item_t	*item,
2154 	struct xfs_buf		*bp,
2155 	xfs_buf_log_format_t	*buf_f)
2156 {
2157 	int			i;
2158 	int			bit;
2159 	int			nbits;
2160 	int                     error;
2161 
2162 	trace_xfs_log_recover_buf_reg_buf(mp->m_log, buf_f);
2163 
2164 	bit = 0;
2165 	i = 1;  /* 0 is the buf format structure */
2166 	while (1) {
2167 		bit = xfs_next_bit(buf_f->blf_data_map,
2168 				   buf_f->blf_map_size, bit);
2169 		if (bit == -1)
2170 			break;
2171 		nbits = xfs_contig_bits(buf_f->blf_data_map,
2172 					buf_f->blf_map_size, bit);
2173 		ASSERT(nbits > 0);
2174 		ASSERT(item->ri_buf[i].i_addr != NULL);
2175 		ASSERT(item->ri_buf[i].i_len % XFS_BLF_CHUNK == 0);
2176 		ASSERT(BBTOB(bp->b_io_length) >=
2177 		       ((uint)bit << XFS_BLF_SHIFT) + (nbits << XFS_BLF_SHIFT));
2178 
2179 		/*
2180 		 * The dirty regions logged in the buffer, even though
2181 		 * contiguous, may span multiple chunks. This is because the
2182 		 * dirty region may span a physical page boundary in a buffer
2183 		 * and hence be split into two separate vectors for writing into
2184 		 * the log. Hence we need to trim nbits back to the length of
2185 		 * the current region being copied out of the log.
2186 		 */
2187 		if (item->ri_buf[i].i_len < (nbits << XFS_BLF_SHIFT))
2188 			nbits = item->ri_buf[i].i_len >> XFS_BLF_SHIFT;
2189 
2190 		/*
2191 		 * Do a sanity check if this is a dquot buffer. Just checking
2192 		 * the first dquot in the buffer should do. XXXThis is
2193 		 * probably a good thing to do for other buf types also.
2194 		 */
2195 		error = 0;
2196 		if (buf_f->blf_flags &
2197 		   (XFS_BLF_UDQUOT_BUF|XFS_BLF_PDQUOT_BUF|XFS_BLF_GDQUOT_BUF)) {
2198 			if (item->ri_buf[i].i_addr == NULL) {
2199 				xfs_alert(mp,
2200 					"XFS: NULL dquot in %s.", __func__);
2201 				goto next;
2202 			}
2203 			if (item->ri_buf[i].i_len < sizeof(xfs_disk_dquot_t)) {
2204 				xfs_alert(mp,
2205 					"XFS: dquot too small (%d) in %s.",
2206 					item->ri_buf[i].i_len, __func__);
2207 				goto next;
2208 			}
2209 			error = xfs_qm_dqcheck(mp, item->ri_buf[i].i_addr,
2210 					       -1, 0, XFS_QMOPT_DOWARN,
2211 					       "dquot_buf_recover");
2212 			if (error)
2213 				goto next;
2214 		}
2215 
2216 		memcpy(xfs_buf_offset(bp,
2217 			(uint)bit << XFS_BLF_SHIFT),	/* dest */
2218 			item->ri_buf[i].i_addr,		/* source */
2219 			nbits<<XFS_BLF_SHIFT);		/* length */
2220  next:
2221 		i++;
2222 		bit += nbits;
2223 	}
2224 
2225 	/* Shouldn't be any more regions */
2226 	ASSERT(i == item->ri_total);
2227 
2228 	/*
2229 	 * We can only do post recovery validation on items on CRC enabled
2230 	 * fielsystems as we need to know when the buffer was written to be able
2231 	 * to determine if we should have replayed the item. If we replay old
2232 	 * metadata over a newer buffer, then it will enter a temporarily
2233 	 * inconsistent state resulting in verification failures. Hence for now
2234 	 * just avoid the verification stage for non-crc filesystems
2235 	 */
2236 	if (xfs_sb_version_hascrc(&mp->m_sb))
2237 		xlog_recovery_validate_buf_type(mp, bp, buf_f);
2238 }
2239 
2240 /*
2241  * Do some primitive error checking on ondisk dquot data structures.
2242  */
2243 int
2244 xfs_qm_dqcheck(
2245 	struct xfs_mount *mp,
2246 	xfs_disk_dquot_t *ddq,
2247 	xfs_dqid_t	 id,
2248 	uint		 type,	  /* used only when IO_dorepair is true */
2249 	uint		 flags,
2250 	char		 *str)
2251 {
2252 	xfs_dqblk_t	 *d = (xfs_dqblk_t *)ddq;
2253 	int		errs = 0;
2254 
2255 	/*
2256 	 * We can encounter an uninitialized dquot buffer for 2 reasons:
2257 	 * 1. If we crash while deleting the quotainode(s), and those blks got
2258 	 *    used for user data. This is because we take the path of regular
2259 	 *    file deletion; however, the size field of quotainodes is never
2260 	 *    updated, so all the tricks that we play in itruncate_finish
2261 	 *    don't quite matter.
2262 	 *
2263 	 * 2. We don't play the quota buffers when there's a quotaoff logitem.
2264 	 *    But the allocation will be replayed so we'll end up with an
2265 	 *    uninitialized quota block.
2266 	 *
2267 	 * This is all fine; things are still consistent, and we haven't lost
2268 	 * any quota information. Just don't complain about bad dquot blks.
2269 	 */
2270 	if (ddq->d_magic != cpu_to_be16(XFS_DQUOT_MAGIC)) {
2271 		if (flags & XFS_QMOPT_DOWARN)
2272 			xfs_alert(mp,
2273 			"%s : XFS dquot ID 0x%x, magic 0x%x != 0x%x",
2274 			str, id, be16_to_cpu(ddq->d_magic), XFS_DQUOT_MAGIC);
2275 		errs++;
2276 	}
2277 	if (ddq->d_version != XFS_DQUOT_VERSION) {
2278 		if (flags & XFS_QMOPT_DOWARN)
2279 			xfs_alert(mp,
2280 			"%s : XFS dquot ID 0x%x, version 0x%x != 0x%x",
2281 			str, id, ddq->d_version, XFS_DQUOT_VERSION);
2282 		errs++;
2283 	}
2284 
2285 	if (ddq->d_flags != XFS_DQ_USER &&
2286 	    ddq->d_flags != XFS_DQ_PROJ &&
2287 	    ddq->d_flags != XFS_DQ_GROUP) {
2288 		if (flags & XFS_QMOPT_DOWARN)
2289 			xfs_alert(mp,
2290 			"%s : XFS dquot ID 0x%x, unknown flags 0x%x",
2291 			str, id, ddq->d_flags);
2292 		errs++;
2293 	}
2294 
2295 	if (id != -1 && id != be32_to_cpu(ddq->d_id)) {
2296 		if (flags & XFS_QMOPT_DOWARN)
2297 			xfs_alert(mp,
2298 			"%s : ondisk-dquot 0x%p, ID mismatch: "
2299 			"0x%x expected, found id 0x%x",
2300 			str, ddq, id, be32_to_cpu(ddq->d_id));
2301 		errs++;
2302 	}
2303 
2304 	if (!errs && ddq->d_id) {
2305 		if (ddq->d_blk_softlimit &&
2306 		    be64_to_cpu(ddq->d_bcount) >
2307 				be64_to_cpu(ddq->d_blk_softlimit)) {
2308 			if (!ddq->d_btimer) {
2309 				if (flags & XFS_QMOPT_DOWARN)
2310 					xfs_alert(mp,
2311 			"%s : Dquot ID 0x%x (0x%p) BLK TIMER NOT STARTED",
2312 					str, (int)be32_to_cpu(ddq->d_id), ddq);
2313 				errs++;
2314 			}
2315 		}
2316 		if (ddq->d_ino_softlimit &&
2317 		    be64_to_cpu(ddq->d_icount) >
2318 				be64_to_cpu(ddq->d_ino_softlimit)) {
2319 			if (!ddq->d_itimer) {
2320 				if (flags & XFS_QMOPT_DOWARN)
2321 					xfs_alert(mp,
2322 			"%s : Dquot ID 0x%x (0x%p) INODE TIMER NOT STARTED",
2323 					str, (int)be32_to_cpu(ddq->d_id), ddq);
2324 				errs++;
2325 			}
2326 		}
2327 		if (ddq->d_rtb_softlimit &&
2328 		    be64_to_cpu(ddq->d_rtbcount) >
2329 				be64_to_cpu(ddq->d_rtb_softlimit)) {
2330 			if (!ddq->d_rtbtimer) {
2331 				if (flags & XFS_QMOPT_DOWARN)
2332 					xfs_alert(mp,
2333 			"%s : Dquot ID 0x%x (0x%p) RTBLK TIMER NOT STARTED",
2334 					str, (int)be32_to_cpu(ddq->d_id), ddq);
2335 				errs++;
2336 			}
2337 		}
2338 	}
2339 
2340 	if (!errs || !(flags & XFS_QMOPT_DQREPAIR))
2341 		return errs;
2342 
2343 	if (flags & XFS_QMOPT_DOWARN)
2344 		xfs_notice(mp, "Re-initializing dquot ID 0x%x", id);
2345 
2346 	/*
2347 	 * Typically, a repair is only requested by quotacheck.
2348 	 */
2349 	ASSERT(id != -1);
2350 	ASSERT(flags & XFS_QMOPT_DQREPAIR);
2351 	memset(d, 0, sizeof(xfs_dqblk_t));
2352 
2353 	d->dd_diskdq.d_magic = cpu_to_be16(XFS_DQUOT_MAGIC);
2354 	d->dd_diskdq.d_version = XFS_DQUOT_VERSION;
2355 	d->dd_diskdq.d_flags = type;
2356 	d->dd_diskdq.d_id = cpu_to_be32(id);
2357 
2358 	if (xfs_sb_version_hascrc(&mp->m_sb)) {
2359 		uuid_copy(&d->dd_uuid, &mp->m_sb.sb_uuid);
2360 		xfs_update_cksum((char *)d, sizeof(struct xfs_dqblk),
2361 				 XFS_DQUOT_CRC_OFF);
2362 	}
2363 
2364 	return errs;
2365 }
2366 
2367 /*
2368  * Perform a dquot buffer recovery.
2369  * Simple algorithm: if we have found a QUOTAOFF logitem of the same type
2370  * (ie. USR or GRP), then just toss this buffer away; don't recover it.
2371  * Else, treat it as a regular buffer and do recovery.
2372  */
2373 STATIC void
2374 xlog_recover_do_dquot_buffer(
2375 	struct xfs_mount		*mp,
2376 	struct xlog			*log,
2377 	struct xlog_recover_item	*item,
2378 	struct xfs_buf			*bp,
2379 	struct xfs_buf_log_format	*buf_f)
2380 {
2381 	uint			type;
2382 
2383 	trace_xfs_log_recover_buf_dquot_buf(log, buf_f);
2384 
2385 	/*
2386 	 * Filesystems are required to send in quota flags at mount time.
2387 	 */
2388 	if (mp->m_qflags == 0) {
2389 		return;
2390 	}
2391 
2392 	type = 0;
2393 	if (buf_f->blf_flags & XFS_BLF_UDQUOT_BUF)
2394 		type |= XFS_DQ_USER;
2395 	if (buf_f->blf_flags & XFS_BLF_PDQUOT_BUF)
2396 		type |= XFS_DQ_PROJ;
2397 	if (buf_f->blf_flags & XFS_BLF_GDQUOT_BUF)
2398 		type |= XFS_DQ_GROUP;
2399 	/*
2400 	 * This type of quotas was turned off, so ignore this buffer
2401 	 */
2402 	if (log->l_quotaoffs_flag & type)
2403 		return;
2404 
2405 	xlog_recover_do_reg_buffer(mp, item, bp, buf_f);
2406 }
2407 
2408 /*
2409  * This routine replays a modification made to a buffer at runtime.
2410  * There are actually two types of buffer, regular and inode, which
2411  * are handled differently.  Inode buffers are handled differently
2412  * in that we only recover a specific set of data from them, namely
2413  * the inode di_next_unlinked fields.  This is because all other inode
2414  * data is actually logged via inode records and any data we replay
2415  * here which overlaps that may be stale.
2416  *
2417  * When meta-data buffers are freed at run time we log a buffer item
2418  * with the XFS_BLF_CANCEL bit set to indicate that previous copies
2419  * of the buffer in the log should not be replayed at recovery time.
2420  * This is so that if the blocks covered by the buffer are reused for
2421  * file data before we crash we don't end up replaying old, freed
2422  * meta-data into a user's file.
2423  *
2424  * To handle the cancellation of buffer log items, we make two passes
2425  * over the log during recovery.  During the first we build a table of
2426  * those buffers which have been cancelled, and during the second we
2427  * only replay those buffers which do not have corresponding cancel
2428  * records in the table.  See xlog_recover_do_buffer_pass[1,2] above
2429  * for more details on the implementation of the table of cancel records.
2430  */
2431 STATIC int
2432 xlog_recover_buffer_pass2(
2433 	struct xlog			*log,
2434 	struct list_head		*buffer_list,
2435 	struct xlog_recover_item	*item)
2436 {
2437 	xfs_buf_log_format_t	*buf_f = item->ri_buf[0].i_addr;
2438 	xfs_mount_t		*mp = log->l_mp;
2439 	xfs_buf_t		*bp;
2440 	int			error;
2441 	uint			buf_flags;
2442 
2443 	/*
2444 	 * In this pass we only want to recover all the buffers which have
2445 	 * not been cancelled and are not cancellation buffers themselves.
2446 	 */
2447 	if (xlog_check_buffer_cancelled(log, buf_f->blf_blkno,
2448 			buf_f->blf_len, buf_f->blf_flags)) {
2449 		trace_xfs_log_recover_buf_cancel(log, buf_f);
2450 		return 0;
2451 	}
2452 
2453 	trace_xfs_log_recover_buf_recover(log, buf_f);
2454 
2455 	buf_flags = 0;
2456 	if (buf_f->blf_flags & XFS_BLF_INODE_BUF)
2457 		buf_flags |= XBF_UNMAPPED;
2458 
2459 	bp = xfs_buf_read(mp->m_ddev_targp, buf_f->blf_blkno, buf_f->blf_len,
2460 			  buf_flags, NULL);
2461 	if (!bp)
2462 		return XFS_ERROR(ENOMEM);
2463 	error = bp->b_error;
2464 	if (error) {
2465 		xfs_buf_ioerror_alert(bp, "xlog_recover_do..(read#1)");
2466 		xfs_buf_relse(bp);
2467 		return error;
2468 	}
2469 
2470 	if (buf_f->blf_flags & XFS_BLF_INODE_BUF) {
2471 		error = xlog_recover_do_inode_buffer(mp, item, bp, buf_f);
2472 	} else if (buf_f->blf_flags &
2473 		  (XFS_BLF_UDQUOT_BUF|XFS_BLF_PDQUOT_BUF|XFS_BLF_GDQUOT_BUF)) {
2474 		xlog_recover_do_dquot_buffer(mp, log, item, bp, buf_f);
2475 	} else {
2476 		xlog_recover_do_reg_buffer(mp, item, bp, buf_f);
2477 	}
2478 	if (error)
2479 		return XFS_ERROR(error);
2480 
2481 	/*
2482 	 * Perform delayed write on the buffer.  Asynchronous writes will be
2483 	 * slower when taking into account all the buffers to be flushed.
2484 	 *
2485 	 * Also make sure that only inode buffers with good sizes stay in
2486 	 * the buffer cache.  The kernel moves inodes in buffers of 1 block
2487 	 * or XFS_INODE_CLUSTER_SIZE bytes, whichever is bigger.  The inode
2488 	 * buffers in the log can be a different size if the log was generated
2489 	 * by an older kernel using unclustered inode buffers or a newer kernel
2490 	 * running with a different inode cluster size.  Regardless, if the
2491 	 * the inode buffer size isn't MAX(blocksize, XFS_INODE_CLUSTER_SIZE)
2492 	 * for *our* value of XFS_INODE_CLUSTER_SIZE, then we need to keep
2493 	 * the buffer out of the buffer cache so that the buffer won't
2494 	 * overlap with future reads of those inodes.
2495 	 */
2496 	if (XFS_DINODE_MAGIC ==
2497 	    be16_to_cpu(*((__be16 *)xfs_buf_offset(bp, 0))) &&
2498 	    (BBTOB(bp->b_io_length) != MAX(log->l_mp->m_sb.sb_blocksize,
2499 			(__uint32_t)XFS_INODE_CLUSTER_SIZE(log->l_mp)))) {
2500 		xfs_buf_stale(bp);
2501 		error = xfs_bwrite(bp);
2502 	} else {
2503 		ASSERT(bp->b_target->bt_mount == mp);
2504 		bp->b_iodone = xlog_recover_iodone;
2505 		xfs_buf_delwri_queue(bp, buffer_list);
2506 	}
2507 
2508 	xfs_buf_relse(bp);
2509 	return error;
2510 }
2511 
2512 STATIC int
2513 xlog_recover_inode_pass2(
2514 	struct xlog			*log,
2515 	struct list_head		*buffer_list,
2516 	struct xlog_recover_item	*item)
2517 {
2518 	xfs_inode_log_format_t	*in_f;
2519 	xfs_mount_t		*mp = log->l_mp;
2520 	xfs_buf_t		*bp;
2521 	xfs_dinode_t		*dip;
2522 	int			len;
2523 	xfs_caddr_t		src;
2524 	xfs_caddr_t		dest;
2525 	int			error;
2526 	int			attr_index;
2527 	uint			fields;
2528 	xfs_icdinode_t		*dicp;
2529 	uint			isize;
2530 	int			need_free = 0;
2531 
2532 	if (item->ri_buf[0].i_len == sizeof(xfs_inode_log_format_t)) {
2533 		in_f = item->ri_buf[0].i_addr;
2534 	} else {
2535 		in_f = kmem_alloc(sizeof(xfs_inode_log_format_t), KM_SLEEP);
2536 		need_free = 1;
2537 		error = xfs_inode_item_format_convert(&item->ri_buf[0], in_f);
2538 		if (error)
2539 			goto error;
2540 	}
2541 
2542 	/*
2543 	 * Inode buffers can be freed, look out for it,
2544 	 * and do not replay the inode.
2545 	 */
2546 	if (xlog_check_buffer_cancelled(log, in_f->ilf_blkno,
2547 					in_f->ilf_len, 0)) {
2548 		error = 0;
2549 		trace_xfs_log_recover_inode_cancel(log, in_f);
2550 		goto error;
2551 	}
2552 	trace_xfs_log_recover_inode_recover(log, in_f);
2553 
2554 	bp = xfs_buf_read(mp->m_ddev_targp, in_f->ilf_blkno, in_f->ilf_len, 0,
2555 			  &xfs_inode_buf_ops);
2556 	if (!bp) {
2557 		error = ENOMEM;
2558 		goto error;
2559 	}
2560 	error = bp->b_error;
2561 	if (error) {
2562 		xfs_buf_ioerror_alert(bp, "xlog_recover_do..(read#2)");
2563 		xfs_buf_relse(bp);
2564 		goto error;
2565 	}
2566 	ASSERT(in_f->ilf_fields & XFS_ILOG_CORE);
2567 	dip = (xfs_dinode_t *)xfs_buf_offset(bp, in_f->ilf_boffset);
2568 
2569 	/*
2570 	 * Make sure the place we're flushing out to really looks
2571 	 * like an inode!
2572 	 */
2573 	if (unlikely(dip->di_magic != cpu_to_be16(XFS_DINODE_MAGIC))) {
2574 		xfs_buf_relse(bp);
2575 		xfs_alert(mp,
2576 	"%s: Bad inode magic number, dip = 0x%p, dino bp = 0x%p, ino = %Ld",
2577 			__func__, dip, bp, in_f->ilf_ino);
2578 		XFS_ERROR_REPORT("xlog_recover_inode_pass2(1)",
2579 				 XFS_ERRLEVEL_LOW, mp);
2580 		error = EFSCORRUPTED;
2581 		goto error;
2582 	}
2583 	dicp = item->ri_buf[1].i_addr;
2584 	if (unlikely(dicp->di_magic != XFS_DINODE_MAGIC)) {
2585 		xfs_buf_relse(bp);
2586 		xfs_alert(mp,
2587 			"%s: Bad inode log record, rec ptr 0x%p, ino %Ld",
2588 			__func__, item, in_f->ilf_ino);
2589 		XFS_ERROR_REPORT("xlog_recover_inode_pass2(2)",
2590 				 XFS_ERRLEVEL_LOW, mp);
2591 		error = EFSCORRUPTED;
2592 		goto error;
2593 	}
2594 
2595 	/*
2596 	 * di_flushiter is only valid for v1/2 inodes. All changes for v3 inodes
2597 	 * are transactional and if ordering is necessary we can determine that
2598 	 * more accurately by the LSN field in the V3 inode core. Don't trust
2599 	 * the inode versions we might be changing them here - use the
2600 	 * superblock flag to determine whether we need to look at di_flushiter
2601 	 * to skip replay when the on disk inode is newer than the log one
2602 	 */
2603 	if (!xfs_sb_version_hascrc(&mp->m_sb) &&
2604 	    dicp->di_flushiter < be16_to_cpu(dip->di_flushiter)) {
2605 		/*
2606 		 * Deal with the wrap case, DI_MAX_FLUSH is less
2607 		 * than smaller numbers
2608 		 */
2609 		if (be16_to_cpu(dip->di_flushiter) == DI_MAX_FLUSH &&
2610 		    dicp->di_flushiter < (DI_MAX_FLUSH >> 1)) {
2611 			/* do nothing */
2612 		} else {
2613 			xfs_buf_relse(bp);
2614 			trace_xfs_log_recover_inode_skip(log, in_f);
2615 			error = 0;
2616 			goto error;
2617 		}
2618 	}
2619 
2620 	/* Take the opportunity to reset the flush iteration count */
2621 	dicp->di_flushiter = 0;
2622 
2623 	if (unlikely(S_ISREG(dicp->di_mode))) {
2624 		if ((dicp->di_format != XFS_DINODE_FMT_EXTENTS) &&
2625 		    (dicp->di_format != XFS_DINODE_FMT_BTREE)) {
2626 			XFS_CORRUPTION_ERROR("xlog_recover_inode_pass2(3)",
2627 					 XFS_ERRLEVEL_LOW, mp, dicp);
2628 			xfs_buf_relse(bp);
2629 			xfs_alert(mp,
2630 		"%s: Bad regular inode log record, rec ptr 0x%p, "
2631 		"ino ptr = 0x%p, ino bp = 0x%p, ino %Ld",
2632 				__func__, item, dip, bp, in_f->ilf_ino);
2633 			error = EFSCORRUPTED;
2634 			goto error;
2635 		}
2636 	} else if (unlikely(S_ISDIR(dicp->di_mode))) {
2637 		if ((dicp->di_format != XFS_DINODE_FMT_EXTENTS) &&
2638 		    (dicp->di_format != XFS_DINODE_FMT_BTREE) &&
2639 		    (dicp->di_format != XFS_DINODE_FMT_LOCAL)) {
2640 			XFS_CORRUPTION_ERROR("xlog_recover_inode_pass2(4)",
2641 					     XFS_ERRLEVEL_LOW, mp, dicp);
2642 			xfs_buf_relse(bp);
2643 			xfs_alert(mp,
2644 		"%s: Bad dir inode log record, rec ptr 0x%p, "
2645 		"ino ptr = 0x%p, ino bp = 0x%p, ino %Ld",
2646 				__func__, item, dip, bp, in_f->ilf_ino);
2647 			error = EFSCORRUPTED;
2648 			goto error;
2649 		}
2650 	}
2651 	if (unlikely(dicp->di_nextents + dicp->di_anextents > dicp->di_nblocks)){
2652 		XFS_CORRUPTION_ERROR("xlog_recover_inode_pass2(5)",
2653 				     XFS_ERRLEVEL_LOW, mp, dicp);
2654 		xfs_buf_relse(bp);
2655 		xfs_alert(mp,
2656 	"%s: Bad inode log record, rec ptr 0x%p, dino ptr 0x%p, "
2657 	"dino bp 0x%p, ino %Ld, total extents = %d, nblocks = %Ld",
2658 			__func__, item, dip, bp, in_f->ilf_ino,
2659 			dicp->di_nextents + dicp->di_anextents,
2660 			dicp->di_nblocks);
2661 		error = EFSCORRUPTED;
2662 		goto error;
2663 	}
2664 	if (unlikely(dicp->di_forkoff > mp->m_sb.sb_inodesize)) {
2665 		XFS_CORRUPTION_ERROR("xlog_recover_inode_pass2(6)",
2666 				     XFS_ERRLEVEL_LOW, mp, dicp);
2667 		xfs_buf_relse(bp);
2668 		xfs_alert(mp,
2669 	"%s: Bad inode log record, rec ptr 0x%p, dino ptr 0x%p, "
2670 	"dino bp 0x%p, ino %Ld, forkoff 0x%x", __func__,
2671 			item, dip, bp, in_f->ilf_ino, dicp->di_forkoff);
2672 		error = EFSCORRUPTED;
2673 		goto error;
2674 	}
2675 	isize = xfs_icdinode_size(dicp->di_version);
2676 	if (unlikely(item->ri_buf[1].i_len > isize)) {
2677 		XFS_CORRUPTION_ERROR("xlog_recover_inode_pass2(7)",
2678 				     XFS_ERRLEVEL_LOW, mp, dicp);
2679 		xfs_buf_relse(bp);
2680 		xfs_alert(mp,
2681 			"%s: Bad inode log record length %d, rec ptr 0x%p",
2682 			__func__, item->ri_buf[1].i_len, item);
2683 		error = EFSCORRUPTED;
2684 		goto error;
2685 	}
2686 
2687 	/* The core is in in-core format */
2688 	xfs_dinode_to_disk(dip, dicp);
2689 
2690 	/* the rest is in on-disk format */
2691 	if (item->ri_buf[1].i_len > isize) {
2692 		memcpy((char *)dip + isize,
2693 			item->ri_buf[1].i_addr + isize,
2694 			item->ri_buf[1].i_len - isize);
2695 	}
2696 
2697 	fields = in_f->ilf_fields;
2698 	switch (fields & (XFS_ILOG_DEV | XFS_ILOG_UUID)) {
2699 	case XFS_ILOG_DEV:
2700 		xfs_dinode_put_rdev(dip, in_f->ilf_u.ilfu_rdev);
2701 		break;
2702 	case XFS_ILOG_UUID:
2703 		memcpy(XFS_DFORK_DPTR(dip),
2704 		       &in_f->ilf_u.ilfu_uuid,
2705 		       sizeof(uuid_t));
2706 		break;
2707 	}
2708 
2709 	if (in_f->ilf_size == 2)
2710 		goto write_inode_buffer;
2711 	len = item->ri_buf[2].i_len;
2712 	src = item->ri_buf[2].i_addr;
2713 	ASSERT(in_f->ilf_size <= 4);
2714 	ASSERT((in_f->ilf_size == 3) || (fields & XFS_ILOG_AFORK));
2715 	ASSERT(!(fields & XFS_ILOG_DFORK) ||
2716 	       (len == in_f->ilf_dsize));
2717 
2718 	switch (fields & XFS_ILOG_DFORK) {
2719 	case XFS_ILOG_DDATA:
2720 	case XFS_ILOG_DEXT:
2721 		memcpy(XFS_DFORK_DPTR(dip), src, len);
2722 		break;
2723 
2724 	case XFS_ILOG_DBROOT:
2725 		xfs_bmbt_to_bmdr(mp, (struct xfs_btree_block *)src, len,
2726 				 (xfs_bmdr_block_t *)XFS_DFORK_DPTR(dip),
2727 				 XFS_DFORK_DSIZE(dip, mp));
2728 		break;
2729 
2730 	default:
2731 		/*
2732 		 * There are no data fork flags set.
2733 		 */
2734 		ASSERT((fields & XFS_ILOG_DFORK) == 0);
2735 		break;
2736 	}
2737 
2738 	/*
2739 	 * If we logged any attribute data, recover it.  There may or
2740 	 * may not have been any other non-core data logged in this
2741 	 * transaction.
2742 	 */
2743 	if (in_f->ilf_fields & XFS_ILOG_AFORK) {
2744 		if (in_f->ilf_fields & XFS_ILOG_DFORK) {
2745 			attr_index = 3;
2746 		} else {
2747 			attr_index = 2;
2748 		}
2749 		len = item->ri_buf[attr_index].i_len;
2750 		src = item->ri_buf[attr_index].i_addr;
2751 		ASSERT(len == in_f->ilf_asize);
2752 
2753 		switch (in_f->ilf_fields & XFS_ILOG_AFORK) {
2754 		case XFS_ILOG_ADATA:
2755 		case XFS_ILOG_AEXT:
2756 			dest = XFS_DFORK_APTR(dip);
2757 			ASSERT(len <= XFS_DFORK_ASIZE(dip, mp));
2758 			memcpy(dest, src, len);
2759 			break;
2760 
2761 		case XFS_ILOG_ABROOT:
2762 			dest = XFS_DFORK_APTR(dip);
2763 			xfs_bmbt_to_bmdr(mp, (struct xfs_btree_block *)src,
2764 					 len, (xfs_bmdr_block_t*)dest,
2765 					 XFS_DFORK_ASIZE(dip, mp));
2766 			break;
2767 
2768 		default:
2769 			xfs_warn(log->l_mp, "%s: Invalid flag", __func__);
2770 			ASSERT(0);
2771 			xfs_buf_relse(bp);
2772 			error = EIO;
2773 			goto error;
2774 		}
2775 	}
2776 
2777 write_inode_buffer:
2778 	/* re-generate the checksum. */
2779 	xfs_dinode_calc_crc(log->l_mp, dip);
2780 
2781 	ASSERT(bp->b_target->bt_mount == mp);
2782 	bp->b_iodone = xlog_recover_iodone;
2783 	xfs_buf_delwri_queue(bp, buffer_list);
2784 	xfs_buf_relse(bp);
2785 error:
2786 	if (need_free)
2787 		kmem_free(in_f);
2788 	return XFS_ERROR(error);
2789 }
2790 
2791 /*
2792  * Recover QUOTAOFF records. We simply make a note of it in the xlog
2793  * structure, so that we know not to do any dquot item or dquot buffer recovery,
2794  * of that type.
2795  */
2796 STATIC int
2797 xlog_recover_quotaoff_pass1(
2798 	struct xlog			*log,
2799 	struct xlog_recover_item	*item)
2800 {
2801 	xfs_qoff_logformat_t	*qoff_f = item->ri_buf[0].i_addr;
2802 	ASSERT(qoff_f);
2803 
2804 	/*
2805 	 * The logitem format's flag tells us if this was user quotaoff,
2806 	 * group/project quotaoff or both.
2807 	 */
2808 	if (qoff_f->qf_flags & XFS_UQUOTA_ACCT)
2809 		log->l_quotaoffs_flag |= XFS_DQ_USER;
2810 	if (qoff_f->qf_flags & XFS_PQUOTA_ACCT)
2811 		log->l_quotaoffs_flag |= XFS_DQ_PROJ;
2812 	if (qoff_f->qf_flags & XFS_GQUOTA_ACCT)
2813 		log->l_quotaoffs_flag |= XFS_DQ_GROUP;
2814 
2815 	return (0);
2816 }
2817 
2818 /*
2819  * Recover a dquot record
2820  */
2821 STATIC int
2822 xlog_recover_dquot_pass2(
2823 	struct xlog			*log,
2824 	struct list_head		*buffer_list,
2825 	struct xlog_recover_item	*item)
2826 {
2827 	xfs_mount_t		*mp = log->l_mp;
2828 	xfs_buf_t		*bp;
2829 	struct xfs_disk_dquot	*ddq, *recddq;
2830 	int			error;
2831 	xfs_dq_logformat_t	*dq_f;
2832 	uint			type;
2833 
2834 
2835 	/*
2836 	 * Filesystems are required to send in quota flags at mount time.
2837 	 */
2838 	if (mp->m_qflags == 0)
2839 		return (0);
2840 
2841 	recddq = item->ri_buf[1].i_addr;
2842 	if (recddq == NULL) {
2843 		xfs_alert(log->l_mp, "NULL dquot in %s.", __func__);
2844 		return XFS_ERROR(EIO);
2845 	}
2846 	if (item->ri_buf[1].i_len < sizeof(xfs_disk_dquot_t)) {
2847 		xfs_alert(log->l_mp, "dquot too small (%d) in %s.",
2848 			item->ri_buf[1].i_len, __func__);
2849 		return XFS_ERROR(EIO);
2850 	}
2851 
2852 	/*
2853 	 * This type of quotas was turned off, so ignore this record.
2854 	 */
2855 	type = recddq->d_flags & (XFS_DQ_USER | XFS_DQ_PROJ | XFS_DQ_GROUP);
2856 	ASSERT(type);
2857 	if (log->l_quotaoffs_flag & type)
2858 		return (0);
2859 
2860 	/*
2861 	 * At this point we know that quota was _not_ turned off.
2862 	 * Since the mount flags are not indicating to us otherwise, this
2863 	 * must mean that quota is on, and the dquot needs to be replayed.
2864 	 * Remember that we may not have fully recovered the superblock yet,
2865 	 * so we can't do the usual trick of looking at the SB quota bits.
2866 	 *
2867 	 * The other possibility, of course, is that the quota subsystem was
2868 	 * removed since the last mount - ENOSYS.
2869 	 */
2870 	dq_f = item->ri_buf[0].i_addr;
2871 	ASSERT(dq_f);
2872 	error = xfs_qm_dqcheck(mp, recddq, dq_f->qlf_id, 0, XFS_QMOPT_DOWARN,
2873 			   "xlog_recover_dquot_pass2 (log copy)");
2874 	if (error)
2875 		return XFS_ERROR(EIO);
2876 	ASSERT(dq_f->qlf_len == 1);
2877 
2878 	error = xfs_trans_read_buf(mp, NULL, mp->m_ddev_targp, dq_f->qlf_blkno,
2879 				   XFS_FSB_TO_BB(mp, dq_f->qlf_len), 0, &bp,
2880 				   NULL);
2881 	if (error)
2882 		return error;
2883 
2884 	ASSERT(bp);
2885 	ddq = (xfs_disk_dquot_t *)xfs_buf_offset(bp, dq_f->qlf_boffset);
2886 
2887 	/*
2888 	 * At least the magic num portion should be on disk because this
2889 	 * was among a chunk of dquots created earlier, and we did some
2890 	 * minimal initialization then.
2891 	 */
2892 	error = xfs_qm_dqcheck(mp, ddq, dq_f->qlf_id, 0, XFS_QMOPT_DOWARN,
2893 			   "xlog_recover_dquot_pass2");
2894 	if (error) {
2895 		xfs_buf_relse(bp);
2896 		return XFS_ERROR(EIO);
2897 	}
2898 
2899 	memcpy(ddq, recddq, item->ri_buf[1].i_len);
2900 	if (xfs_sb_version_hascrc(&mp->m_sb)) {
2901 		xfs_update_cksum((char *)ddq, sizeof(struct xfs_dqblk),
2902 				 XFS_DQUOT_CRC_OFF);
2903 	}
2904 
2905 	ASSERT(dq_f->qlf_size == 2);
2906 	ASSERT(bp->b_target->bt_mount == mp);
2907 	bp->b_iodone = xlog_recover_iodone;
2908 	xfs_buf_delwri_queue(bp, buffer_list);
2909 	xfs_buf_relse(bp);
2910 
2911 	return (0);
2912 }
2913 
2914 /*
2915  * This routine is called to create an in-core extent free intent
2916  * item from the efi format structure which was logged on disk.
2917  * It allocates an in-core efi, copies the extents from the format
2918  * structure into it, and adds the efi to the AIL with the given
2919  * LSN.
2920  */
2921 STATIC int
2922 xlog_recover_efi_pass2(
2923 	struct xlog			*log,
2924 	struct xlog_recover_item	*item,
2925 	xfs_lsn_t			lsn)
2926 {
2927 	int			error;
2928 	xfs_mount_t		*mp = log->l_mp;
2929 	xfs_efi_log_item_t	*efip;
2930 	xfs_efi_log_format_t	*efi_formatp;
2931 
2932 	efi_formatp = item->ri_buf[0].i_addr;
2933 
2934 	efip = xfs_efi_init(mp, efi_formatp->efi_nextents);
2935 	if ((error = xfs_efi_copy_format(&(item->ri_buf[0]),
2936 					 &(efip->efi_format)))) {
2937 		xfs_efi_item_free(efip);
2938 		return error;
2939 	}
2940 	atomic_set(&efip->efi_next_extent, efi_formatp->efi_nextents);
2941 
2942 	spin_lock(&log->l_ailp->xa_lock);
2943 	/*
2944 	 * xfs_trans_ail_update() drops the AIL lock.
2945 	 */
2946 	xfs_trans_ail_update(log->l_ailp, &efip->efi_item, lsn);
2947 	return 0;
2948 }
2949 
2950 
2951 /*
2952  * This routine is called when an efd format structure is found in
2953  * a committed transaction in the log.  It's purpose is to cancel
2954  * the corresponding efi if it was still in the log.  To do this
2955  * it searches the AIL for the efi with an id equal to that in the
2956  * efd format structure.  If we find it, we remove the efi from the
2957  * AIL and free it.
2958  */
2959 STATIC int
2960 xlog_recover_efd_pass2(
2961 	struct xlog			*log,
2962 	struct xlog_recover_item	*item)
2963 {
2964 	xfs_efd_log_format_t	*efd_formatp;
2965 	xfs_efi_log_item_t	*efip = NULL;
2966 	xfs_log_item_t		*lip;
2967 	__uint64_t		efi_id;
2968 	struct xfs_ail_cursor	cur;
2969 	struct xfs_ail		*ailp = log->l_ailp;
2970 
2971 	efd_formatp = item->ri_buf[0].i_addr;
2972 	ASSERT((item->ri_buf[0].i_len == (sizeof(xfs_efd_log_format_32_t) +
2973 		((efd_formatp->efd_nextents - 1) * sizeof(xfs_extent_32_t)))) ||
2974 	       (item->ri_buf[0].i_len == (sizeof(xfs_efd_log_format_64_t) +
2975 		((efd_formatp->efd_nextents - 1) * sizeof(xfs_extent_64_t)))));
2976 	efi_id = efd_formatp->efd_efi_id;
2977 
2978 	/*
2979 	 * Search for the efi with the id in the efd format structure
2980 	 * in the AIL.
2981 	 */
2982 	spin_lock(&ailp->xa_lock);
2983 	lip = xfs_trans_ail_cursor_first(ailp, &cur, 0);
2984 	while (lip != NULL) {
2985 		if (lip->li_type == XFS_LI_EFI) {
2986 			efip = (xfs_efi_log_item_t *)lip;
2987 			if (efip->efi_format.efi_id == efi_id) {
2988 				/*
2989 				 * xfs_trans_ail_delete() drops the
2990 				 * AIL lock.
2991 				 */
2992 				xfs_trans_ail_delete(ailp, lip,
2993 						     SHUTDOWN_CORRUPT_INCORE);
2994 				xfs_efi_item_free(efip);
2995 				spin_lock(&ailp->xa_lock);
2996 				break;
2997 			}
2998 		}
2999 		lip = xfs_trans_ail_cursor_next(ailp, &cur);
3000 	}
3001 	xfs_trans_ail_cursor_done(ailp, &cur);
3002 	spin_unlock(&ailp->xa_lock);
3003 
3004 	return 0;
3005 }
3006 
3007 /*
3008  * This routine is called when an inode create format structure is found in a
3009  * committed transaction in the log.  It's purpose is to initialise the inodes
3010  * being allocated on disk. This requires us to get inode cluster buffers that
3011  * match the range to be intialised, stamped with inode templates and written
3012  * by delayed write so that subsequent modifications will hit the cached buffer
3013  * and only need writing out at the end of recovery.
3014  */
3015 STATIC int
3016 xlog_recover_do_icreate_pass2(
3017 	struct xlog		*log,
3018 	struct list_head	*buffer_list,
3019 	xlog_recover_item_t	*item)
3020 {
3021 	struct xfs_mount	*mp = log->l_mp;
3022 	struct xfs_icreate_log	*icl;
3023 	xfs_agnumber_t		agno;
3024 	xfs_agblock_t		agbno;
3025 	unsigned int		count;
3026 	unsigned int		isize;
3027 	xfs_agblock_t		length;
3028 
3029 	icl = (struct xfs_icreate_log *)item->ri_buf[0].i_addr;
3030 	if (icl->icl_type != XFS_LI_ICREATE) {
3031 		xfs_warn(log->l_mp, "xlog_recover_do_icreate_trans: bad type");
3032 		return EINVAL;
3033 	}
3034 
3035 	if (icl->icl_size != 1) {
3036 		xfs_warn(log->l_mp, "xlog_recover_do_icreate_trans: bad icl size");
3037 		return EINVAL;
3038 	}
3039 
3040 	agno = be32_to_cpu(icl->icl_ag);
3041 	if (agno >= mp->m_sb.sb_agcount) {
3042 		xfs_warn(log->l_mp, "xlog_recover_do_icreate_trans: bad agno");
3043 		return EINVAL;
3044 	}
3045 	agbno = be32_to_cpu(icl->icl_agbno);
3046 	if (!agbno || agbno == NULLAGBLOCK || agbno >= mp->m_sb.sb_agblocks) {
3047 		xfs_warn(log->l_mp, "xlog_recover_do_icreate_trans: bad agbno");
3048 		return EINVAL;
3049 	}
3050 	isize = be32_to_cpu(icl->icl_isize);
3051 	if (isize != mp->m_sb.sb_inodesize) {
3052 		xfs_warn(log->l_mp, "xlog_recover_do_icreate_trans: bad isize");
3053 		return EINVAL;
3054 	}
3055 	count = be32_to_cpu(icl->icl_count);
3056 	if (!count) {
3057 		xfs_warn(log->l_mp, "xlog_recover_do_icreate_trans: bad count");
3058 		return EINVAL;
3059 	}
3060 	length = be32_to_cpu(icl->icl_length);
3061 	if (!length || length >= mp->m_sb.sb_agblocks) {
3062 		xfs_warn(log->l_mp, "xlog_recover_do_icreate_trans: bad length");
3063 		return EINVAL;
3064 	}
3065 
3066 	/* existing allocation is fixed value */
3067 	ASSERT(count == XFS_IALLOC_INODES(mp));
3068 	ASSERT(length == XFS_IALLOC_BLOCKS(mp));
3069 	if (count != XFS_IALLOC_INODES(mp) ||
3070 	     length != XFS_IALLOC_BLOCKS(mp)) {
3071 		xfs_warn(log->l_mp, "xlog_recover_do_icreate_trans: bad count 2");
3072 		return EINVAL;
3073 	}
3074 
3075 	/*
3076 	 * Inode buffers can be freed. Do not replay the inode initialisation as
3077 	 * we could be overwriting something written after this inode buffer was
3078 	 * cancelled.
3079 	 *
3080 	 * XXX: we need to iterate all buffers and only init those that are not
3081 	 * cancelled. I think that a more fine grained factoring of
3082 	 * xfs_ialloc_inode_init may be appropriate here to enable this to be
3083 	 * done easily.
3084 	 */
3085 	if (xlog_check_buffer_cancelled(log,
3086 			XFS_AGB_TO_DADDR(mp, agno, agbno), length, 0))
3087 		return 0;
3088 
3089 	xfs_ialloc_inode_init(mp, NULL, buffer_list, agno, agbno, length,
3090 					be32_to_cpu(icl->icl_gen));
3091 	return 0;
3092 }
3093 
3094 /*
3095  * Free up any resources allocated by the transaction
3096  *
3097  * Remember that EFIs, EFDs, and IUNLINKs are handled later.
3098  */
3099 STATIC void
3100 xlog_recover_free_trans(
3101 	struct xlog_recover	*trans)
3102 {
3103 	xlog_recover_item_t	*item, *n;
3104 	int			i;
3105 
3106 	list_for_each_entry_safe(item, n, &trans->r_itemq, ri_list) {
3107 		/* Free the regions in the item. */
3108 		list_del(&item->ri_list);
3109 		for (i = 0; i < item->ri_cnt; i++)
3110 			kmem_free(item->ri_buf[i].i_addr);
3111 		/* Free the item itself */
3112 		kmem_free(item->ri_buf);
3113 		kmem_free(item);
3114 	}
3115 	/* Free the transaction recover structure */
3116 	kmem_free(trans);
3117 }
3118 
3119 STATIC int
3120 xlog_recover_commit_pass1(
3121 	struct xlog			*log,
3122 	struct xlog_recover		*trans,
3123 	struct xlog_recover_item	*item)
3124 {
3125 	trace_xfs_log_recover_item_recover(log, trans, item, XLOG_RECOVER_PASS1);
3126 
3127 	switch (ITEM_TYPE(item)) {
3128 	case XFS_LI_BUF:
3129 		return xlog_recover_buffer_pass1(log, item);
3130 	case XFS_LI_QUOTAOFF:
3131 		return xlog_recover_quotaoff_pass1(log, item);
3132 	case XFS_LI_INODE:
3133 	case XFS_LI_EFI:
3134 	case XFS_LI_EFD:
3135 	case XFS_LI_DQUOT:
3136 	case XFS_LI_ICREATE:
3137 		/* nothing to do in pass 1 */
3138 		return 0;
3139 	default:
3140 		xfs_warn(log->l_mp, "%s: invalid item type (%d)",
3141 			__func__, ITEM_TYPE(item));
3142 		ASSERT(0);
3143 		return XFS_ERROR(EIO);
3144 	}
3145 }
3146 
3147 STATIC int
3148 xlog_recover_commit_pass2(
3149 	struct xlog			*log,
3150 	struct xlog_recover		*trans,
3151 	struct list_head		*buffer_list,
3152 	struct xlog_recover_item	*item)
3153 {
3154 	trace_xfs_log_recover_item_recover(log, trans, item, XLOG_RECOVER_PASS2);
3155 
3156 	switch (ITEM_TYPE(item)) {
3157 	case XFS_LI_BUF:
3158 		return xlog_recover_buffer_pass2(log, buffer_list, item);
3159 	case XFS_LI_INODE:
3160 		return xlog_recover_inode_pass2(log, buffer_list, item);
3161 	case XFS_LI_EFI:
3162 		return xlog_recover_efi_pass2(log, item, trans->r_lsn);
3163 	case XFS_LI_EFD:
3164 		return xlog_recover_efd_pass2(log, item);
3165 	case XFS_LI_DQUOT:
3166 		return xlog_recover_dquot_pass2(log, buffer_list, item);
3167 	case XFS_LI_ICREATE:
3168 		return xlog_recover_do_icreate_pass2(log, buffer_list, item);
3169 	case XFS_LI_QUOTAOFF:
3170 		/* nothing to do in pass2 */
3171 		return 0;
3172 	default:
3173 		xfs_warn(log->l_mp, "%s: invalid item type (%d)",
3174 			__func__, ITEM_TYPE(item));
3175 		ASSERT(0);
3176 		return XFS_ERROR(EIO);
3177 	}
3178 }
3179 
3180 /*
3181  * Perform the transaction.
3182  *
3183  * If the transaction modifies a buffer or inode, do it now.  Otherwise,
3184  * EFIs and EFDs get queued up by adding entries into the AIL for them.
3185  */
3186 STATIC int
3187 xlog_recover_commit_trans(
3188 	struct xlog		*log,
3189 	struct xlog_recover	*trans,
3190 	int			pass)
3191 {
3192 	int			error = 0, error2;
3193 	xlog_recover_item_t	*item;
3194 	LIST_HEAD		(buffer_list);
3195 
3196 	hlist_del(&trans->r_list);
3197 
3198 	error = xlog_recover_reorder_trans(log, trans, pass);
3199 	if (error)
3200 		return error;
3201 
3202 	list_for_each_entry(item, &trans->r_itemq, ri_list) {
3203 		switch (pass) {
3204 		case XLOG_RECOVER_PASS1:
3205 			error = xlog_recover_commit_pass1(log, trans, item);
3206 			break;
3207 		case XLOG_RECOVER_PASS2:
3208 			error = xlog_recover_commit_pass2(log, trans,
3209 							  &buffer_list, item);
3210 			break;
3211 		default:
3212 			ASSERT(0);
3213 		}
3214 
3215 		if (error)
3216 			goto out;
3217 	}
3218 
3219 	xlog_recover_free_trans(trans);
3220 
3221 out:
3222 	error2 = xfs_buf_delwri_submit(&buffer_list);
3223 	return error ? error : error2;
3224 }
3225 
3226 STATIC int
3227 xlog_recover_unmount_trans(
3228 	struct xlog		*log,
3229 	struct xlog_recover	*trans)
3230 {
3231 	/* Do nothing now */
3232 	xfs_warn(log->l_mp, "%s: Unmount LR", __func__);
3233 	return 0;
3234 }
3235 
3236 /*
3237  * There are two valid states of the r_state field.  0 indicates that the
3238  * transaction structure is in a normal state.  We have either seen the
3239  * start of the transaction or the last operation we added was not a partial
3240  * operation.  If the last operation we added to the transaction was a
3241  * partial operation, we need to mark r_state with XLOG_WAS_CONT_TRANS.
3242  *
3243  * NOTE: skip LRs with 0 data length.
3244  */
3245 STATIC int
3246 xlog_recover_process_data(
3247 	struct xlog		*log,
3248 	struct hlist_head	rhash[],
3249 	struct xlog_rec_header	*rhead,
3250 	xfs_caddr_t		dp,
3251 	int			pass)
3252 {
3253 	xfs_caddr_t		lp;
3254 	int			num_logops;
3255 	xlog_op_header_t	*ohead;
3256 	xlog_recover_t		*trans;
3257 	xlog_tid_t		tid;
3258 	int			error;
3259 	unsigned long		hash;
3260 	uint			flags;
3261 
3262 	lp = dp + be32_to_cpu(rhead->h_len);
3263 	num_logops = be32_to_cpu(rhead->h_num_logops);
3264 
3265 	/* check the log format matches our own - else we can't recover */
3266 	if (xlog_header_check_recover(log->l_mp, rhead))
3267 		return (XFS_ERROR(EIO));
3268 
3269 	while ((dp < lp) && num_logops) {
3270 		ASSERT(dp + sizeof(xlog_op_header_t) <= lp);
3271 		ohead = (xlog_op_header_t *)dp;
3272 		dp += sizeof(xlog_op_header_t);
3273 		if (ohead->oh_clientid != XFS_TRANSACTION &&
3274 		    ohead->oh_clientid != XFS_LOG) {
3275 			xfs_warn(log->l_mp, "%s: bad clientid 0x%x",
3276 					__func__, ohead->oh_clientid);
3277 			ASSERT(0);
3278 			return (XFS_ERROR(EIO));
3279 		}
3280 		tid = be32_to_cpu(ohead->oh_tid);
3281 		hash = XLOG_RHASH(tid);
3282 		trans = xlog_recover_find_tid(&rhash[hash], tid);
3283 		if (trans == NULL) {		   /* not found; add new tid */
3284 			if (ohead->oh_flags & XLOG_START_TRANS)
3285 				xlog_recover_new_tid(&rhash[hash], tid,
3286 					be64_to_cpu(rhead->h_lsn));
3287 		} else {
3288 			if (dp + be32_to_cpu(ohead->oh_len) > lp) {
3289 				xfs_warn(log->l_mp, "%s: bad length 0x%x",
3290 					__func__, be32_to_cpu(ohead->oh_len));
3291 				WARN_ON(1);
3292 				return (XFS_ERROR(EIO));
3293 			}
3294 			flags = ohead->oh_flags & ~XLOG_END_TRANS;
3295 			if (flags & XLOG_WAS_CONT_TRANS)
3296 				flags &= ~XLOG_CONTINUE_TRANS;
3297 			switch (flags) {
3298 			case XLOG_COMMIT_TRANS:
3299 				error = xlog_recover_commit_trans(log,
3300 								trans, pass);
3301 				break;
3302 			case XLOG_UNMOUNT_TRANS:
3303 				error = xlog_recover_unmount_trans(log, trans);
3304 				break;
3305 			case XLOG_WAS_CONT_TRANS:
3306 				error = xlog_recover_add_to_cont_trans(log,
3307 						trans, dp,
3308 						be32_to_cpu(ohead->oh_len));
3309 				break;
3310 			case XLOG_START_TRANS:
3311 				xfs_warn(log->l_mp, "%s: bad transaction",
3312 					__func__);
3313 				ASSERT(0);
3314 				error = XFS_ERROR(EIO);
3315 				break;
3316 			case 0:
3317 			case XLOG_CONTINUE_TRANS:
3318 				error = xlog_recover_add_to_trans(log, trans,
3319 						dp, be32_to_cpu(ohead->oh_len));
3320 				break;
3321 			default:
3322 				xfs_warn(log->l_mp, "%s: bad flag 0x%x",
3323 					__func__, flags);
3324 				ASSERT(0);
3325 				error = XFS_ERROR(EIO);
3326 				break;
3327 			}
3328 			if (error)
3329 				return error;
3330 		}
3331 		dp += be32_to_cpu(ohead->oh_len);
3332 		num_logops--;
3333 	}
3334 	return 0;
3335 }
3336 
3337 /*
3338  * Process an extent free intent item that was recovered from
3339  * the log.  We need to free the extents that it describes.
3340  */
3341 STATIC int
3342 xlog_recover_process_efi(
3343 	xfs_mount_t		*mp,
3344 	xfs_efi_log_item_t	*efip)
3345 {
3346 	xfs_efd_log_item_t	*efdp;
3347 	xfs_trans_t		*tp;
3348 	int			i;
3349 	int			error = 0;
3350 	xfs_extent_t		*extp;
3351 	xfs_fsblock_t		startblock_fsb;
3352 
3353 	ASSERT(!test_bit(XFS_EFI_RECOVERED, &efip->efi_flags));
3354 
3355 	/*
3356 	 * First check the validity of the extents described by the
3357 	 * EFI.  If any are bad, then assume that all are bad and
3358 	 * just toss the EFI.
3359 	 */
3360 	for (i = 0; i < efip->efi_format.efi_nextents; i++) {
3361 		extp = &(efip->efi_format.efi_extents[i]);
3362 		startblock_fsb = XFS_BB_TO_FSB(mp,
3363 				   XFS_FSB_TO_DADDR(mp, extp->ext_start));
3364 		if ((startblock_fsb == 0) ||
3365 		    (extp->ext_len == 0) ||
3366 		    (startblock_fsb >= mp->m_sb.sb_dblocks) ||
3367 		    (extp->ext_len >= mp->m_sb.sb_agblocks)) {
3368 			/*
3369 			 * This will pull the EFI from the AIL and
3370 			 * free the memory associated with it.
3371 			 */
3372 			set_bit(XFS_EFI_RECOVERED, &efip->efi_flags);
3373 			xfs_efi_release(efip, efip->efi_format.efi_nextents);
3374 			return XFS_ERROR(EIO);
3375 		}
3376 	}
3377 
3378 	tp = xfs_trans_alloc(mp, 0);
3379 	error = xfs_trans_reserve(tp, 0, XFS_ITRUNCATE_LOG_RES(mp), 0, 0, 0);
3380 	if (error)
3381 		goto abort_error;
3382 	efdp = xfs_trans_get_efd(tp, efip, efip->efi_format.efi_nextents);
3383 
3384 	for (i = 0; i < efip->efi_format.efi_nextents; i++) {
3385 		extp = &(efip->efi_format.efi_extents[i]);
3386 		error = xfs_free_extent(tp, extp->ext_start, extp->ext_len);
3387 		if (error)
3388 			goto abort_error;
3389 		xfs_trans_log_efd_extent(tp, efdp, extp->ext_start,
3390 					 extp->ext_len);
3391 	}
3392 
3393 	set_bit(XFS_EFI_RECOVERED, &efip->efi_flags);
3394 	error = xfs_trans_commit(tp, 0);
3395 	return error;
3396 
3397 abort_error:
3398 	xfs_trans_cancel(tp, XFS_TRANS_ABORT);
3399 	return error;
3400 }
3401 
3402 /*
3403  * When this is called, all of the EFIs which did not have
3404  * corresponding EFDs should be in the AIL.  What we do now
3405  * is free the extents associated with each one.
3406  *
3407  * Since we process the EFIs in normal transactions, they
3408  * will be removed at some point after the commit.  This prevents
3409  * us from just walking down the list processing each one.
3410  * We'll use a flag in the EFI to skip those that we've already
3411  * processed and use the AIL iteration mechanism's generation
3412  * count to try to speed this up at least a bit.
3413  *
3414  * When we start, we know that the EFIs are the only things in
3415  * the AIL.  As we process them, however, other items are added
3416  * to the AIL.  Since everything added to the AIL must come after
3417  * everything already in the AIL, we stop processing as soon as
3418  * we see something other than an EFI in the AIL.
3419  */
3420 STATIC int
3421 xlog_recover_process_efis(
3422 	struct xlog	*log)
3423 {
3424 	xfs_log_item_t		*lip;
3425 	xfs_efi_log_item_t	*efip;
3426 	int			error = 0;
3427 	struct xfs_ail_cursor	cur;
3428 	struct xfs_ail		*ailp;
3429 
3430 	ailp = log->l_ailp;
3431 	spin_lock(&ailp->xa_lock);
3432 	lip = xfs_trans_ail_cursor_first(ailp, &cur, 0);
3433 	while (lip != NULL) {
3434 		/*
3435 		 * We're done when we see something other than an EFI.
3436 		 * There should be no EFIs left in the AIL now.
3437 		 */
3438 		if (lip->li_type != XFS_LI_EFI) {
3439 #ifdef DEBUG
3440 			for (; lip; lip = xfs_trans_ail_cursor_next(ailp, &cur))
3441 				ASSERT(lip->li_type != XFS_LI_EFI);
3442 #endif
3443 			break;
3444 		}
3445 
3446 		/*
3447 		 * Skip EFIs that we've already processed.
3448 		 */
3449 		efip = (xfs_efi_log_item_t *)lip;
3450 		if (test_bit(XFS_EFI_RECOVERED, &efip->efi_flags)) {
3451 			lip = xfs_trans_ail_cursor_next(ailp, &cur);
3452 			continue;
3453 		}
3454 
3455 		spin_unlock(&ailp->xa_lock);
3456 		error = xlog_recover_process_efi(log->l_mp, efip);
3457 		spin_lock(&ailp->xa_lock);
3458 		if (error)
3459 			goto out;
3460 		lip = xfs_trans_ail_cursor_next(ailp, &cur);
3461 	}
3462 out:
3463 	xfs_trans_ail_cursor_done(ailp, &cur);
3464 	spin_unlock(&ailp->xa_lock);
3465 	return error;
3466 }
3467 
3468 /*
3469  * This routine performs a transaction to null out a bad inode pointer
3470  * in an agi unlinked inode hash bucket.
3471  */
3472 STATIC void
3473 xlog_recover_clear_agi_bucket(
3474 	xfs_mount_t	*mp,
3475 	xfs_agnumber_t	agno,
3476 	int		bucket)
3477 {
3478 	xfs_trans_t	*tp;
3479 	xfs_agi_t	*agi;
3480 	xfs_buf_t	*agibp;
3481 	int		offset;
3482 	int		error;
3483 
3484 	tp = xfs_trans_alloc(mp, XFS_TRANS_CLEAR_AGI_BUCKET);
3485 	error = xfs_trans_reserve(tp, 0, XFS_CLEAR_AGI_BUCKET_LOG_RES(mp),
3486 				  0, 0, 0);
3487 	if (error)
3488 		goto out_abort;
3489 
3490 	error = xfs_read_agi(mp, tp, agno, &agibp);
3491 	if (error)
3492 		goto out_abort;
3493 
3494 	agi = XFS_BUF_TO_AGI(agibp);
3495 	agi->agi_unlinked[bucket] = cpu_to_be32(NULLAGINO);
3496 	offset = offsetof(xfs_agi_t, agi_unlinked) +
3497 		 (sizeof(xfs_agino_t) * bucket);
3498 	xfs_trans_log_buf(tp, agibp, offset,
3499 			  (offset + sizeof(xfs_agino_t) - 1));
3500 
3501 	error = xfs_trans_commit(tp, 0);
3502 	if (error)
3503 		goto out_error;
3504 	return;
3505 
3506 out_abort:
3507 	xfs_trans_cancel(tp, XFS_TRANS_ABORT);
3508 out_error:
3509 	xfs_warn(mp, "%s: failed to clear agi %d. Continuing.", __func__, agno);
3510 	return;
3511 }
3512 
3513 STATIC xfs_agino_t
3514 xlog_recover_process_one_iunlink(
3515 	struct xfs_mount		*mp,
3516 	xfs_agnumber_t			agno,
3517 	xfs_agino_t			agino,
3518 	int				bucket)
3519 {
3520 	struct xfs_buf			*ibp;
3521 	struct xfs_dinode		*dip;
3522 	struct xfs_inode		*ip;
3523 	xfs_ino_t			ino;
3524 	int				error;
3525 
3526 	ino = XFS_AGINO_TO_INO(mp, agno, agino);
3527 	error = xfs_iget(mp, NULL, ino, 0, 0, &ip);
3528 	if (error)
3529 		goto fail;
3530 
3531 	/*
3532 	 * Get the on disk inode to find the next inode in the bucket.
3533 	 */
3534 	error = xfs_imap_to_bp(mp, NULL, &ip->i_imap, &dip, &ibp, 0, 0);
3535 	if (error)
3536 		goto fail_iput;
3537 
3538 	ASSERT(ip->i_d.di_nlink == 0);
3539 	ASSERT(ip->i_d.di_mode != 0);
3540 
3541 	/* setup for the next pass */
3542 	agino = be32_to_cpu(dip->di_next_unlinked);
3543 	xfs_buf_relse(ibp);
3544 
3545 	/*
3546 	 * Prevent any DMAPI event from being sent when the reference on
3547 	 * the inode is dropped.
3548 	 */
3549 	ip->i_d.di_dmevmask = 0;
3550 
3551 	IRELE(ip);
3552 	return agino;
3553 
3554  fail_iput:
3555 	IRELE(ip);
3556  fail:
3557 	/*
3558 	 * We can't read in the inode this bucket points to, or this inode
3559 	 * is messed up.  Just ditch this bucket of inodes.  We will lose
3560 	 * some inodes and space, but at least we won't hang.
3561 	 *
3562 	 * Call xlog_recover_clear_agi_bucket() to perform a transaction to
3563 	 * clear the inode pointer in the bucket.
3564 	 */
3565 	xlog_recover_clear_agi_bucket(mp, agno, bucket);
3566 	return NULLAGINO;
3567 }
3568 
3569 /*
3570  * xlog_iunlink_recover
3571  *
3572  * This is called during recovery to process any inodes which
3573  * we unlinked but not freed when the system crashed.  These
3574  * inodes will be on the lists in the AGI blocks.  What we do
3575  * here is scan all the AGIs and fully truncate and free any
3576  * inodes found on the lists.  Each inode is removed from the
3577  * lists when it has been fully truncated and is freed.  The
3578  * freeing of the inode and its removal from the list must be
3579  * atomic.
3580  */
3581 STATIC void
3582 xlog_recover_process_iunlinks(
3583 	struct xlog	*log)
3584 {
3585 	xfs_mount_t	*mp;
3586 	xfs_agnumber_t	agno;
3587 	xfs_agi_t	*agi;
3588 	xfs_buf_t	*agibp;
3589 	xfs_agino_t	agino;
3590 	int		bucket;
3591 	int		error;
3592 	uint		mp_dmevmask;
3593 
3594 	mp = log->l_mp;
3595 
3596 	/*
3597 	 * Prevent any DMAPI event from being sent while in this function.
3598 	 */
3599 	mp_dmevmask = mp->m_dmevmask;
3600 	mp->m_dmevmask = 0;
3601 
3602 	for (agno = 0; agno < mp->m_sb.sb_agcount; agno++) {
3603 		/*
3604 		 * Find the agi for this ag.
3605 		 */
3606 		error = xfs_read_agi(mp, NULL, agno, &agibp);
3607 		if (error) {
3608 			/*
3609 			 * AGI is b0rked. Don't process it.
3610 			 *
3611 			 * We should probably mark the filesystem as corrupt
3612 			 * after we've recovered all the ag's we can....
3613 			 */
3614 			continue;
3615 		}
3616 		/*
3617 		 * Unlock the buffer so that it can be acquired in the normal
3618 		 * course of the transaction to truncate and free each inode.
3619 		 * Because we are not racing with anyone else here for the AGI
3620 		 * buffer, we don't even need to hold it locked to read the
3621 		 * initial unlinked bucket entries out of the buffer. We keep
3622 		 * buffer reference though, so that it stays pinned in memory
3623 		 * while we need the buffer.
3624 		 */
3625 		agi = XFS_BUF_TO_AGI(agibp);
3626 		xfs_buf_unlock(agibp);
3627 
3628 		for (bucket = 0; bucket < XFS_AGI_UNLINKED_BUCKETS; bucket++) {
3629 			agino = be32_to_cpu(agi->agi_unlinked[bucket]);
3630 			while (agino != NULLAGINO) {
3631 				agino = xlog_recover_process_one_iunlink(mp,
3632 							agno, agino, bucket);
3633 			}
3634 		}
3635 		xfs_buf_rele(agibp);
3636 	}
3637 
3638 	mp->m_dmevmask = mp_dmevmask;
3639 }
3640 
3641 /*
3642  * Upack the log buffer data and crc check it. If the check fails, issue a
3643  * warning if and only if the CRC in the header is non-zero. This makes the
3644  * check an advisory warning, and the zero CRC check will prevent failure
3645  * warnings from being emitted when upgrading the kernel from one that does not
3646  * add CRCs by default.
3647  *
3648  * When filesystems are CRC enabled, this CRC mismatch becomes a fatal log
3649  * corruption failure
3650  */
3651 STATIC int
3652 xlog_unpack_data_crc(
3653 	struct xlog_rec_header	*rhead,
3654 	xfs_caddr_t		dp,
3655 	struct xlog		*log)
3656 {
3657 	__le32			crc;
3658 
3659 	crc = xlog_cksum(log, rhead, dp, be32_to_cpu(rhead->h_len));
3660 	if (crc != rhead->h_crc) {
3661 		if (rhead->h_crc || xfs_sb_version_hascrc(&log->l_mp->m_sb)) {
3662 			xfs_alert(log->l_mp,
3663 		"log record CRC mismatch: found 0x%x, expected 0x%x.\n",
3664 					le32_to_cpu(rhead->h_crc),
3665 					le32_to_cpu(crc));
3666 			xfs_hex_dump(dp, 32);
3667 		}
3668 
3669 		/*
3670 		 * If we've detected a log record corruption, then we can't
3671 		 * recover past this point. Abort recovery if we are enforcing
3672 		 * CRC protection by punting an error back up the stack.
3673 		 */
3674 		if (xfs_sb_version_hascrc(&log->l_mp->m_sb))
3675 			return EFSCORRUPTED;
3676 	}
3677 
3678 	return 0;
3679 }
3680 
3681 STATIC int
3682 xlog_unpack_data(
3683 	struct xlog_rec_header	*rhead,
3684 	xfs_caddr_t		dp,
3685 	struct xlog		*log)
3686 {
3687 	int			i, j, k;
3688 	int			error;
3689 
3690 	error = xlog_unpack_data_crc(rhead, dp, log);
3691 	if (error)
3692 		return error;
3693 
3694 	for (i = 0; i < BTOBB(be32_to_cpu(rhead->h_len)) &&
3695 		  i < (XLOG_HEADER_CYCLE_SIZE / BBSIZE); i++) {
3696 		*(__be32 *)dp = *(__be32 *)&rhead->h_cycle_data[i];
3697 		dp += BBSIZE;
3698 	}
3699 
3700 	if (xfs_sb_version_haslogv2(&log->l_mp->m_sb)) {
3701 		xlog_in_core_2_t *xhdr = (xlog_in_core_2_t *)rhead;
3702 		for ( ; i < BTOBB(be32_to_cpu(rhead->h_len)); i++) {
3703 			j = i / (XLOG_HEADER_CYCLE_SIZE / BBSIZE);
3704 			k = i % (XLOG_HEADER_CYCLE_SIZE / BBSIZE);
3705 			*(__be32 *)dp = xhdr[j].hic_xheader.xh_cycle_data[k];
3706 			dp += BBSIZE;
3707 		}
3708 	}
3709 
3710 	return 0;
3711 }
3712 
3713 STATIC int
3714 xlog_valid_rec_header(
3715 	struct xlog		*log,
3716 	struct xlog_rec_header	*rhead,
3717 	xfs_daddr_t		blkno)
3718 {
3719 	int			hlen;
3720 
3721 	if (unlikely(rhead->h_magicno != cpu_to_be32(XLOG_HEADER_MAGIC_NUM))) {
3722 		XFS_ERROR_REPORT("xlog_valid_rec_header(1)",
3723 				XFS_ERRLEVEL_LOW, log->l_mp);
3724 		return XFS_ERROR(EFSCORRUPTED);
3725 	}
3726 	if (unlikely(
3727 	    (!rhead->h_version ||
3728 	    (be32_to_cpu(rhead->h_version) & (~XLOG_VERSION_OKBITS))))) {
3729 		xfs_warn(log->l_mp, "%s: unrecognised log version (%d).",
3730 			__func__, be32_to_cpu(rhead->h_version));
3731 		return XFS_ERROR(EIO);
3732 	}
3733 
3734 	/* LR body must have data or it wouldn't have been written */
3735 	hlen = be32_to_cpu(rhead->h_len);
3736 	if (unlikely( hlen <= 0 || hlen > INT_MAX )) {
3737 		XFS_ERROR_REPORT("xlog_valid_rec_header(2)",
3738 				XFS_ERRLEVEL_LOW, log->l_mp);
3739 		return XFS_ERROR(EFSCORRUPTED);
3740 	}
3741 	if (unlikely( blkno > log->l_logBBsize || blkno > INT_MAX )) {
3742 		XFS_ERROR_REPORT("xlog_valid_rec_header(3)",
3743 				XFS_ERRLEVEL_LOW, log->l_mp);
3744 		return XFS_ERROR(EFSCORRUPTED);
3745 	}
3746 	return 0;
3747 }
3748 
3749 /*
3750  * Read the log from tail to head and process the log records found.
3751  * Handle the two cases where the tail and head are in the same cycle
3752  * and where the active portion of the log wraps around the end of
3753  * the physical log separately.  The pass parameter is passed through
3754  * to the routines called to process the data and is not looked at
3755  * here.
3756  */
3757 STATIC int
3758 xlog_do_recovery_pass(
3759 	struct xlog		*log,
3760 	xfs_daddr_t		head_blk,
3761 	xfs_daddr_t		tail_blk,
3762 	int			pass)
3763 {
3764 	xlog_rec_header_t	*rhead;
3765 	xfs_daddr_t		blk_no;
3766 	xfs_caddr_t		offset;
3767 	xfs_buf_t		*hbp, *dbp;
3768 	int			error = 0, h_size;
3769 	int			bblks, split_bblks;
3770 	int			hblks, split_hblks, wrapped_hblks;
3771 	struct hlist_head	rhash[XLOG_RHASH_SIZE];
3772 
3773 	ASSERT(head_blk != tail_blk);
3774 
3775 	/*
3776 	 * Read the header of the tail block and get the iclog buffer size from
3777 	 * h_size.  Use this to tell how many sectors make up the log header.
3778 	 */
3779 	if (xfs_sb_version_haslogv2(&log->l_mp->m_sb)) {
3780 		/*
3781 		 * When using variable length iclogs, read first sector of
3782 		 * iclog header and extract the header size from it.  Get a
3783 		 * new hbp that is the correct size.
3784 		 */
3785 		hbp = xlog_get_bp(log, 1);
3786 		if (!hbp)
3787 			return ENOMEM;
3788 
3789 		error = xlog_bread(log, tail_blk, 1, hbp, &offset);
3790 		if (error)
3791 			goto bread_err1;
3792 
3793 		rhead = (xlog_rec_header_t *)offset;
3794 		error = xlog_valid_rec_header(log, rhead, tail_blk);
3795 		if (error)
3796 			goto bread_err1;
3797 		h_size = be32_to_cpu(rhead->h_size);
3798 		if ((be32_to_cpu(rhead->h_version) & XLOG_VERSION_2) &&
3799 		    (h_size > XLOG_HEADER_CYCLE_SIZE)) {
3800 			hblks = h_size / XLOG_HEADER_CYCLE_SIZE;
3801 			if (h_size % XLOG_HEADER_CYCLE_SIZE)
3802 				hblks++;
3803 			xlog_put_bp(hbp);
3804 			hbp = xlog_get_bp(log, hblks);
3805 		} else {
3806 			hblks = 1;
3807 		}
3808 	} else {
3809 		ASSERT(log->l_sectBBsize == 1);
3810 		hblks = 1;
3811 		hbp = xlog_get_bp(log, 1);
3812 		h_size = XLOG_BIG_RECORD_BSIZE;
3813 	}
3814 
3815 	if (!hbp)
3816 		return ENOMEM;
3817 	dbp = xlog_get_bp(log, BTOBB(h_size));
3818 	if (!dbp) {
3819 		xlog_put_bp(hbp);
3820 		return ENOMEM;
3821 	}
3822 
3823 	memset(rhash, 0, sizeof(rhash));
3824 	if (tail_blk <= head_blk) {
3825 		for (blk_no = tail_blk; blk_no < head_blk; ) {
3826 			error = xlog_bread(log, blk_no, hblks, hbp, &offset);
3827 			if (error)
3828 				goto bread_err2;
3829 
3830 			rhead = (xlog_rec_header_t *)offset;
3831 			error = xlog_valid_rec_header(log, rhead, blk_no);
3832 			if (error)
3833 				goto bread_err2;
3834 
3835 			/* blocks in data section */
3836 			bblks = (int)BTOBB(be32_to_cpu(rhead->h_len));
3837 			error = xlog_bread(log, blk_no + hblks, bblks, dbp,
3838 					   &offset);
3839 			if (error)
3840 				goto bread_err2;
3841 
3842 			error = xlog_unpack_data(rhead, offset, log);
3843 			if (error)
3844 				goto bread_err2;
3845 
3846 			error = xlog_recover_process_data(log,
3847 						rhash, rhead, offset, pass);
3848 			if (error)
3849 				goto bread_err2;
3850 			blk_no += bblks + hblks;
3851 		}
3852 	} else {
3853 		/*
3854 		 * Perform recovery around the end of the physical log.
3855 		 * When the head is not on the same cycle number as the tail,
3856 		 * we can't do a sequential recovery as above.
3857 		 */
3858 		blk_no = tail_blk;
3859 		while (blk_no < log->l_logBBsize) {
3860 			/*
3861 			 * Check for header wrapping around physical end-of-log
3862 			 */
3863 			offset = hbp->b_addr;
3864 			split_hblks = 0;
3865 			wrapped_hblks = 0;
3866 			if (blk_no + hblks <= log->l_logBBsize) {
3867 				/* Read header in one read */
3868 				error = xlog_bread(log, blk_no, hblks, hbp,
3869 						   &offset);
3870 				if (error)
3871 					goto bread_err2;
3872 			} else {
3873 				/* This LR is split across physical log end */
3874 				if (blk_no != log->l_logBBsize) {
3875 					/* some data before physical log end */
3876 					ASSERT(blk_no <= INT_MAX);
3877 					split_hblks = log->l_logBBsize - (int)blk_no;
3878 					ASSERT(split_hblks > 0);
3879 					error = xlog_bread(log, blk_no,
3880 							   split_hblks, hbp,
3881 							   &offset);
3882 					if (error)
3883 						goto bread_err2;
3884 				}
3885 
3886 				/*
3887 				 * Note: this black magic still works with
3888 				 * large sector sizes (non-512) only because:
3889 				 * - we increased the buffer size originally
3890 				 *   by 1 sector giving us enough extra space
3891 				 *   for the second read;
3892 				 * - the log start is guaranteed to be sector
3893 				 *   aligned;
3894 				 * - we read the log end (LR header start)
3895 				 *   _first_, then the log start (LR header end)
3896 				 *   - order is important.
3897 				 */
3898 				wrapped_hblks = hblks - split_hblks;
3899 				error = xlog_bread_offset(log, 0,
3900 						wrapped_hblks, hbp,
3901 						offset + BBTOB(split_hblks));
3902 				if (error)
3903 					goto bread_err2;
3904 			}
3905 			rhead = (xlog_rec_header_t *)offset;
3906 			error = xlog_valid_rec_header(log, rhead,
3907 						split_hblks ? blk_no : 0);
3908 			if (error)
3909 				goto bread_err2;
3910 
3911 			bblks = (int)BTOBB(be32_to_cpu(rhead->h_len));
3912 			blk_no += hblks;
3913 
3914 			/* Read in data for log record */
3915 			if (blk_no + bblks <= log->l_logBBsize) {
3916 				error = xlog_bread(log, blk_no, bblks, dbp,
3917 						   &offset);
3918 				if (error)
3919 					goto bread_err2;
3920 			} else {
3921 				/* This log record is split across the
3922 				 * physical end of log */
3923 				offset = dbp->b_addr;
3924 				split_bblks = 0;
3925 				if (blk_no != log->l_logBBsize) {
3926 					/* some data is before the physical
3927 					 * end of log */
3928 					ASSERT(!wrapped_hblks);
3929 					ASSERT(blk_no <= INT_MAX);
3930 					split_bblks =
3931 						log->l_logBBsize - (int)blk_no;
3932 					ASSERT(split_bblks > 0);
3933 					error = xlog_bread(log, blk_no,
3934 							split_bblks, dbp,
3935 							&offset);
3936 					if (error)
3937 						goto bread_err2;
3938 				}
3939 
3940 				/*
3941 				 * Note: this black magic still works with
3942 				 * large sector sizes (non-512) only because:
3943 				 * - we increased the buffer size originally
3944 				 *   by 1 sector giving us enough extra space
3945 				 *   for the second read;
3946 				 * - the log start is guaranteed to be sector
3947 				 *   aligned;
3948 				 * - we read the log end (LR header start)
3949 				 *   _first_, then the log start (LR header end)
3950 				 *   - order is important.
3951 				 */
3952 				error = xlog_bread_offset(log, 0,
3953 						bblks - split_bblks, dbp,
3954 						offset + BBTOB(split_bblks));
3955 				if (error)
3956 					goto bread_err2;
3957 			}
3958 
3959 			error = xlog_unpack_data(rhead, offset, log);
3960 			if (error)
3961 				goto bread_err2;
3962 
3963 			error = xlog_recover_process_data(log, rhash,
3964 							rhead, offset, pass);
3965 			if (error)
3966 				goto bread_err2;
3967 			blk_no += bblks;
3968 		}
3969 
3970 		ASSERT(blk_no >= log->l_logBBsize);
3971 		blk_no -= log->l_logBBsize;
3972 
3973 		/* read first part of physical log */
3974 		while (blk_no < head_blk) {
3975 			error = xlog_bread(log, blk_no, hblks, hbp, &offset);
3976 			if (error)
3977 				goto bread_err2;
3978 
3979 			rhead = (xlog_rec_header_t *)offset;
3980 			error = xlog_valid_rec_header(log, rhead, blk_no);
3981 			if (error)
3982 				goto bread_err2;
3983 
3984 			bblks = (int)BTOBB(be32_to_cpu(rhead->h_len));
3985 			error = xlog_bread(log, blk_no+hblks, bblks, dbp,
3986 					   &offset);
3987 			if (error)
3988 				goto bread_err2;
3989 
3990 			error = xlog_unpack_data(rhead, offset, log);
3991 			if (error)
3992 				goto bread_err2;
3993 
3994 			error = xlog_recover_process_data(log, rhash,
3995 							rhead, offset, pass);
3996 			if (error)
3997 				goto bread_err2;
3998 			blk_no += bblks + hblks;
3999 		}
4000 	}
4001 
4002  bread_err2:
4003 	xlog_put_bp(dbp);
4004  bread_err1:
4005 	xlog_put_bp(hbp);
4006 	return error;
4007 }
4008 
4009 /*
4010  * Do the recovery of the log.  We actually do this in two phases.
4011  * The two passes are necessary in order to implement the function
4012  * of cancelling a record written into the log.  The first pass
4013  * determines those things which have been cancelled, and the
4014  * second pass replays log items normally except for those which
4015  * have been cancelled.  The handling of the replay and cancellations
4016  * takes place in the log item type specific routines.
4017  *
4018  * The table of items which have cancel records in the log is allocated
4019  * and freed at this level, since only here do we know when all of
4020  * the log recovery has been completed.
4021  */
4022 STATIC int
4023 xlog_do_log_recovery(
4024 	struct xlog	*log,
4025 	xfs_daddr_t	head_blk,
4026 	xfs_daddr_t	tail_blk)
4027 {
4028 	int		error, i;
4029 
4030 	ASSERT(head_blk != tail_blk);
4031 
4032 	/*
4033 	 * First do a pass to find all of the cancelled buf log items.
4034 	 * Store them in the buf_cancel_table for use in the second pass.
4035 	 */
4036 	log->l_buf_cancel_table = kmem_zalloc(XLOG_BC_TABLE_SIZE *
4037 						 sizeof(struct list_head),
4038 						 KM_SLEEP);
4039 	for (i = 0; i < XLOG_BC_TABLE_SIZE; i++)
4040 		INIT_LIST_HEAD(&log->l_buf_cancel_table[i]);
4041 
4042 	error = xlog_do_recovery_pass(log, head_blk, tail_blk,
4043 				      XLOG_RECOVER_PASS1);
4044 	if (error != 0) {
4045 		kmem_free(log->l_buf_cancel_table);
4046 		log->l_buf_cancel_table = NULL;
4047 		return error;
4048 	}
4049 	/*
4050 	 * Then do a second pass to actually recover the items in the log.
4051 	 * When it is complete free the table of buf cancel items.
4052 	 */
4053 	error = xlog_do_recovery_pass(log, head_blk, tail_blk,
4054 				      XLOG_RECOVER_PASS2);
4055 #ifdef DEBUG
4056 	if (!error) {
4057 		int	i;
4058 
4059 		for (i = 0; i < XLOG_BC_TABLE_SIZE; i++)
4060 			ASSERT(list_empty(&log->l_buf_cancel_table[i]));
4061 	}
4062 #endif	/* DEBUG */
4063 
4064 	kmem_free(log->l_buf_cancel_table);
4065 	log->l_buf_cancel_table = NULL;
4066 
4067 	return error;
4068 }
4069 
4070 /*
4071  * Do the actual recovery
4072  */
4073 STATIC int
4074 xlog_do_recover(
4075 	struct xlog	*log,
4076 	xfs_daddr_t	head_blk,
4077 	xfs_daddr_t	tail_blk)
4078 {
4079 	int		error;
4080 	xfs_buf_t	*bp;
4081 	xfs_sb_t	*sbp;
4082 
4083 	/*
4084 	 * First replay the images in the log.
4085 	 */
4086 	error = xlog_do_log_recovery(log, head_blk, tail_blk);
4087 	if (error)
4088 		return error;
4089 
4090 	/*
4091 	 * If IO errors happened during recovery, bail out.
4092 	 */
4093 	if (XFS_FORCED_SHUTDOWN(log->l_mp)) {
4094 		return (EIO);
4095 	}
4096 
4097 	/*
4098 	 * We now update the tail_lsn since much of the recovery has completed
4099 	 * and there may be space available to use.  If there were no extent
4100 	 * or iunlinks, we can free up the entire log and set the tail_lsn to
4101 	 * be the last_sync_lsn.  This was set in xlog_find_tail to be the
4102 	 * lsn of the last known good LR on disk.  If there are extent frees
4103 	 * or iunlinks they will have some entries in the AIL; so we look at
4104 	 * the AIL to determine how to set the tail_lsn.
4105 	 */
4106 	xlog_assign_tail_lsn(log->l_mp);
4107 
4108 	/*
4109 	 * Now that we've finished replaying all buffer and inode
4110 	 * updates, re-read in the superblock and reverify it.
4111 	 */
4112 	bp = xfs_getsb(log->l_mp, 0);
4113 	XFS_BUF_UNDONE(bp);
4114 	ASSERT(!(XFS_BUF_ISWRITE(bp)));
4115 	XFS_BUF_READ(bp);
4116 	XFS_BUF_UNASYNC(bp);
4117 	bp->b_ops = &xfs_sb_buf_ops;
4118 	xfsbdstrat(log->l_mp, bp);
4119 	error = xfs_buf_iowait(bp);
4120 	if (error) {
4121 		xfs_buf_ioerror_alert(bp, __func__);
4122 		ASSERT(0);
4123 		xfs_buf_relse(bp);
4124 		return error;
4125 	}
4126 
4127 	/* Convert superblock from on-disk format */
4128 	sbp = &log->l_mp->m_sb;
4129 	xfs_sb_from_disk(sbp, XFS_BUF_TO_SBP(bp));
4130 	ASSERT(sbp->sb_magicnum == XFS_SB_MAGIC);
4131 	ASSERT(xfs_sb_good_version(sbp));
4132 	xfs_buf_relse(bp);
4133 
4134 	/* We've re-read the superblock so re-initialize per-cpu counters */
4135 	xfs_icsb_reinit_counters(log->l_mp);
4136 
4137 	xlog_recover_check_summary(log);
4138 
4139 	/* Normal transactions can now occur */
4140 	log->l_flags &= ~XLOG_ACTIVE_RECOVERY;
4141 	return 0;
4142 }
4143 
4144 /*
4145  * Perform recovery and re-initialize some log variables in xlog_find_tail.
4146  *
4147  * Return error or zero.
4148  */
4149 int
4150 xlog_recover(
4151 	struct xlog	*log)
4152 {
4153 	xfs_daddr_t	head_blk, tail_blk;
4154 	int		error;
4155 
4156 	/* find the tail of the log */
4157 	if ((error = xlog_find_tail(log, &head_blk, &tail_blk)))
4158 		return error;
4159 
4160 	if (tail_blk != head_blk) {
4161 		/* There used to be a comment here:
4162 		 *
4163 		 * disallow recovery on read-only mounts.  note -- mount
4164 		 * checks for ENOSPC and turns it into an intelligent
4165 		 * error message.
4166 		 * ...but this is no longer true.  Now, unless you specify
4167 		 * NORECOVERY (in which case this function would never be
4168 		 * called), we just go ahead and recover.  We do this all
4169 		 * under the vfs layer, so we can get away with it unless
4170 		 * the device itself is read-only, in which case we fail.
4171 		 */
4172 		if ((error = xfs_dev_is_read_only(log->l_mp, "recovery"))) {
4173 			return error;
4174 		}
4175 
4176 		/*
4177 		 * Version 5 superblock log feature mask validation. We know the
4178 		 * log is dirty so check if there are any unknown log features
4179 		 * in what we need to recover. If there are unknown features
4180 		 * (e.g. unsupported transactions, then simply reject the
4181 		 * attempt at recovery before touching anything.
4182 		 */
4183 		if (XFS_SB_VERSION_NUM(&log->l_mp->m_sb) == XFS_SB_VERSION_5 &&
4184 		    xfs_sb_has_incompat_log_feature(&log->l_mp->m_sb,
4185 					XFS_SB_FEAT_INCOMPAT_LOG_UNKNOWN)) {
4186 			xfs_warn(log->l_mp,
4187 "Superblock has unknown incompatible log features (0x%x) enabled.\n"
4188 "The log can not be fully and/or safely recovered by this kernel.\n"
4189 "Please recover the log on a kernel that supports the unknown features.",
4190 				(log->l_mp->m_sb.sb_features_log_incompat &
4191 					XFS_SB_FEAT_INCOMPAT_LOG_UNKNOWN));
4192 			return EINVAL;
4193 		}
4194 
4195 		xfs_notice(log->l_mp, "Starting recovery (logdev: %s)",
4196 				log->l_mp->m_logname ? log->l_mp->m_logname
4197 						     : "internal");
4198 
4199 		error = xlog_do_recover(log, head_blk, tail_blk);
4200 		log->l_flags |= XLOG_RECOVERY_NEEDED;
4201 	}
4202 	return error;
4203 }
4204 
4205 /*
4206  * In the first part of recovery we replay inodes and buffers and build
4207  * up the list of extent free items which need to be processed.  Here
4208  * we process the extent free items and clean up the on disk unlinked
4209  * inode lists.  This is separated from the first part of recovery so
4210  * that the root and real-time bitmap inodes can be read in from disk in
4211  * between the two stages.  This is necessary so that we can free space
4212  * in the real-time portion of the file system.
4213  */
4214 int
4215 xlog_recover_finish(
4216 	struct xlog	*log)
4217 {
4218 	/*
4219 	 * Now we're ready to do the transactions needed for the
4220 	 * rest of recovery.  Start with completing all the extent
4221 	 * free intent records and then process the unlinked inode
4222 	 * lists.  At this point, we essentially run in normal mode
4223 	 * except that we're still performing recovery actions
4224 	 * rather than accepting new requests.
4225 	 */
4226 	if (log->l_flags & XLOG_RECOVERY_NEEDED) {
4227 		int	error;
4228 		error = xlog_recover_process_efis(log);
4229 		if (error) {
4230 			xfs_alert(log->l_mp, "Failed to recover EFIs");
4231 			return error;
4232 		}
4233 		/*
4234 		 * Sync the log to get all the EFIs out of the AIL.
4235 		 * This isn't absolutely necessary, but it helps in
4236 		 * case the unlink transactions would have problems
4237 		 * pushing the EFIs out of the way.
4238 		 */
4239 		xfs_log_force(log->l_mp, XFS_LOG_SYNC);
4240 
4241 		xlog_recover_process_iunlinks(log);
4242 
4243 		xlog_recover_check_summary(log);
4244 
4245 		xfs_notice(log->l_mp, "Ending recovery (logdev: %s)",
4246 				log->l_mp->m_logname ? log->l_mp->m_logname
4247 						     : "internal");
4248 		log->l_flags &= ~XLOG_RECOVERY_NEEDED;
4249 	} else {
4250 		xfs_info(log->l_mp, "Ending clean mount");
4251 	}
4252 	return 0;
4253 }
4254 
4255 
4256 #if defined(DEBUG)
4257 /*
4258  * Read all of the agf and agi counters and check that they
4259  * are consistent with the superblock counters.
4260  */
4261 void
4262 xlog_recover_check_summary(
4263 	struct xlog	*log)
4264 {
4265 	xfs_mount_t	*mp;
4266 	xfs_agf_t	*agfp;
4267 	xfs_buf_t	*agfbp;
4268 	xfs_buf_t	*agibp;
4269 	xfs_agnumber_t	agno;
4270 	__uint64_t	freeblks;
4271 	__uint64_t	itotal;
4272 	__uint64_t	ifree;
4273 	int		error;
4274 
4275 	mp = log->l_mp;
4276 
4277 	freeblks = 0LL;
4278 	itotal = 0LL;
4279 	ifree = 0LL;
4280 	for (agno = 0; agno < mp->m_sb.sb_agcount; agno++) {
4281 		error = xfs_read_agf(mp, NULL, agno, 0, &agfbp);
4282 		if (error) {
4283 			xfs_alert(mp, "%s agf read failed agno %d error %d",
4284 						__func__, agno, error);
4285 		} else {
4286 			agfp = XFS_BUF_TO_AGF(agfbp);
4287 			freeblks += be32_to_cpu(agfp->agf_freeblks) +
4288 				    be32_to_cpu(agfp->agf_flcount);
4289 			xfs_buf_relse(agfbp);
4290 		}
4291 
4292 		error = xfs_read_agi(mp, NULL, agno, &agibp);
4293 		if (error) {
4294 			xfs_alert(mp, "%s agi read failed agno %d error %d",
4295 						__func__, agno, error);
4296 		} else {
4297 			struct xfs_agi	*agi = XFS_BUF_TO_AGI(agibp);
4298 
4299 			itotal += be32_to_cpu(agi->agi_count);
4300 			ifree += be32_to_cpu(agi->agi_freecount);
4301 			xfs_buf_relse(agibp);
4302 		}
4303 	}
4304 }
4305 #endif /* DEBUG */
4306