xref: /openbmc/linux/fs/xfs/xfs_log_recover.c (revision 3932b9ca)
1 /*
2  * Copyright (c) 2000-2006 Silicon Graphics, Inc.
3  * All Rights Reserved.
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU General Public License as
7  * published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it would be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write the Free Software Foundation,
16  * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
17  */
18 #include "xfs.h"
19 #include "xfs_fs.h"
20 #include "xfs_shared.h"
21 #include "xfs_format.h"
22 #include "xfs_log_format.h"
23 #include "xfs_trans_resv.h"
24 #include "xfs_bit.h"
25 #include "xfs_inum.h"
26 #include "xfs_sb.h"
27 #include "xfs_ag.h"
28 #include "xfs_mount.h"
29 #include "xfs_da_format.h"
30 #include "xfs_inode.h"
31 #include "xfs_trans.h"
32 #include "xfs_log.h"
33 #include "xfs_log_priv.h"
34 #include "xfs_log_recover.h"
35 #include "xfs_inode_item.h"
36 #include "xfs_extfree_item.h"
37 #include "xfs_trans_priv.h"
38 #include "xfs_alloc.h"
39 #include "xfs_ialloc.h"
40 #include "xfs_quota.h"
41 #include "xfs_cksum.h"
42 #include "xfs_trace.h"
43 #include "xfs_icache.h"
44 #include "xfs_bmap_btree.h"
45 #include "xfs_dinode.h"
46 #include "xfs_error.h"
47 #include "xfs_dir2.h"
48 
49 #define BLK_AVG(blk1, blk2)	((blk1+blk2) >> 1)
50 
51 STATIC int
52 xlog_find_zeroed(
53 	struct xlog	*,
54 	xfs_daddr_t	*);
55 STATIC int
56 xlog_clear_stale_blocks(
57 	struct xlog	*,
58 	xfs_lsn_t);
59 #if defined(DEBUG)
60 STATIC void
61 xlog_recover_check_summary(
62 	struct xlog *);
63 #else
64 #define	xlog_recover_check_summary(log)
65 #endif
66 
67 /*
68  * This structure is used during recovery to record the buf log items which
69  * have been canceled and should not be replayed.
70  */
71 struct xfs_buf_cancel {
72 	xfs_daddr_t		bc_blkno;
73 	uint			bc_len;
74 	int			bc_refcount;
75 	struct list_head	bc_list;
76 };
77 
78 /*
79  * Sector aligned buffer routines for buffer create/read/write/access
80  */
81 
82 /*
83  * Verify the given count of basic blocks is valid number of blocks
84  * to specify for an operation involving the given XFS log buffer.
85  * Returns nonzero if the count is valid, 0 otherwise.
86  */
87 
88 static inline int
89 xlog_buf_bbcount_valid(
90 	struct xlog	*log,
91 	int		bbcount)
92 {
93 	return bbcount > 0 && bbcount <= log->l_logBBsize;
94 }
95 
96 /*
97  * Allocate a buffer to hold log data.  The buffer needs to be able
98  * to map to a range of nbblks basic blocks at any valid (basic
99  * block) offset within the log.
100  */
101 STATIC xfs_buf_t *
102 xlog_get_bp(
103 	struct xlog	*log,
104 	int		nbblks)
105 {
106 	struct xfs_buf	*bp;
107 
108 	if (!xlog_buf_bbcount_valid(log, nbblks)) {
109 		xfs_warn(log->l_mp, "Invalid block length (0x%x) for buffer",
110 			nbblks);
111 		XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_HIGH, log->l_mp);
112 		return NULL;
113 	}
114 
115 	/*
116 	 * We do log I/O in units of log sectors (a power-of-2
117 	 * multiple of the basic block size), so we round up the
118 	 * requested size to accommodate the basic blocks required
119 	 * for complete log sectors.
120 	 *
121 	 * In addition, the buffer may be used for a non-sector-
122 	 * aligned block offset, in which case an I/O of the
123 	 * requested size could extend beyond the end of the
124 	 * buffer.  If the requested size is only 1 basic block it
125 	 * will never straddle a sector boundary, so this won't be
126 	 * an issue.  Nor will this be a problem if the log I/O is
127 	 * done in basic blocks (sector size 1).  But otherwise we
128 	 * extend the buffer by one extra log sector to ensure
129 	 * there's space to accommodate this possibility.
130 	 */
131 	if (nbblks > 1 && log->l_sectBBsize > 1)
132 		nbblks += log->l_sectBBsize;
133 	nbblks = round_up(nbblks, log->l_sectBBsize);
134 
135 	bp = xfs_buf_get_uncached(log->l_mp->m_logdev_targp, nbblks, 0);
136 	if (bp)
137 		xfs_buf_unlock(bp);
138 	return bp;
139 }
140 
141 STATIC void
142 xlog_put_bp(
143 	xfs_buf_t	*bp)
144 {
145 	xfs_buf_free(bp);
146 }
147 
148 /*
149  * Return the address of the start of the given block number's data
150  * in a log buffer.  The buffer covers a log sector-aligned region.
151  */
152 STATIC xfs_caddr_t
153 xlog_align(
154 	struct xlog	*log,
155 	xfs_daddr_t	blk_no,
156 	int		nbblks,
157 	struct xfs_buf	*bp)
158 {
159 	xfs_daddr_t	offset = blk_no & ((xfs_daddr_t)log->l_sectBBsize - 1);
160 
161 	ASSERT(offset + nbblks <= bp->b_length);
162 	return bp->b_addr + BBTOB(offset);
163 }
164 
165 
166 /*
167  * nbblks should be uint, but oh well.  Just want to catch that 32-bit length.
168  */
169 STATIC int
170 xlog_bread_noalign(
171 	struct xlog	*log,
172 	xfs_daddr_t	blk_no,
173 	int		nbblks,
174 	struct xfs_buf	*bp)
175 {
176 	int		error;
177 
178 	if (!xlog_buf_bbcount_valid(log, nbblks)) {
179 		xfs_warn(log->l_mp, "Invalid block length (0x%x) for buffer",
180 			nbblks);
181 		XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_HIGH, log->l_mp);
182 		return -EFSCORRUPTED;
183 	}
184 
185 	blk_no = round_down(blk_no, log->l_sectBBsize);
186 	nbblks = round_up(nbblks, log->l_sectBBsize);
187 
188 	ASSERT(nbblks > 0);
189 	ASSERT(nbblks <= bp->b_length);
190 
191 	XFS_BUF_SET_ADDR(bp, log->l_logBBstart + blk_no);
192 	XFS_BUF_READ(bp);
193 	bp->b_io_length = nbblks;
194 	bp->b_error = 0;
195 
196 	if (XFS_FORCED_SHUTDOWN(log->l_mp))
197 		return -EIO;
198 
199 	xfs_buf_iorequest(bp);
200 	error = xfs_buf_iowait(bp);
201 	if (error)
202 		xfs_buf_ioerror_alert(bp, __func__);
203 	return error;
204 }
205 
206 STATIC int
207 xlog_bread(
208 	struct xlog	*log,
209 	xfs_daddr_t	blk_no,
210 	int		nbblks,
211 	struct xfs_buf	*bp,
212 	xfs_caddr_t	*offset)
213 {
214 	int		error;
215 
216 	error = xlog_bread_noalign(log, blk_no, nbblks, bp);
217 	if (error)
218 		return error;
219 
220 	*offset = xlog_align(log, blk_no, nbblks, bp);
221 	return 0;
222 }
223 
224 /*
225  * Read at an offset into the buffer. Returns with the buffer in it's original
226  * state regardless of the result of the read.
227  */
228 STATIC int
229 xlog_bread_offset(
230 	struct xlog	*log,
231 	xfs_daddr_t	blk_no,		/* block to read from */
232 	int		nbblks,		/* blocks to read */
233 	struct xfs_buf	*bp,
234 	xfs_caddr_t	offset)
235 {
236 	xfs_caddr_t	orig_offset = bp->b_addr;
237 	int		orig_len = BBTOB(bp->b_length);
238 	int		error, error2;
239 
240 	error = xfs_buf_associate_memory(bp, offset, BBTOB(nbblks));
241 	if (error)
242 		return error;
243 
244 	error = xlog_bread_noalign(log, blk_no, nbblks, bp);
245 
246 	/* must reset buffer pointer even on error */
247 	error2 = xfs_buf_associate_memory(bp, orig_offset, orig_len);
248 	if (error)
249 		return error;
250 	return error2;
251 }
252 
253 /*
254  * Write out the buffer at the given block for the given number of blocks.
255  * The buffer is kept locked across the write and is returned locked.
256  * This can only be used for synchronous log writes.
257  */
258 STATIC int
259 xlog_bwrite(
260 	struct xlog	*log,
261 	xfs_daddr_t	blk_no,
262 	int		nbblks,
263 	struct xfs_buf	*bp)
264 {
265 	int		error;
266 
267 	if (!xlog_buf_bbcount_valid(log, nbblks)) {
268 		xfs_warn(log->l_mp, "Invalid block length (0x%x) for buffer",
269 			nbblks);
270 		XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_HIGH, log->l_mp);
271 		return -EFSCORRUPTED;
272 	}
273 
274 	blk_no = round_down(blk_no, log->l_sectBBsize);
275 	nbblks = round_up(nbblks, log->l_sectBBsize);
276 
277 	ASSERT(nbblks > 0);
278 	ASSERT(nbblks <= bp->b_length);
279 
280 	XFS_BUF_SET_ADDR(bp, log->l_logBBstart + blk_no);
281 	XFS_BUF_ZEROFLAGS(bp);
282 	xfs_buf_hold(bp);
283 	xfs_buf_lock(bp);
284 	bp->b_io_length = nbblks;
285 	bp->b_error = 0;
286 
287 	error = xfs_bwrite(bp);
288 	if (error)
289 		xfs_buf_ioerror_alert(bp, __func__);
290 	xfs_buf_relse(bp);
291 	return error;
292 }
293 
294 #ifdef DEBUG
295 /*
296  * dump debug superblock and log record information
297  */
298 STATIC void
299 xlog_header_check_dump(
300 	xfs_mount_t		*mp,
301 	xlog_rec_header_t	*head)
302 {
303 	xfs_debug(mp, "%s:  SB : uuid = %pU, fmt = %d",
304 		__func__, &mp->m_sb.sb_uuid, XLOG_FMT);
305 	xfs_debug(mp, "    log : uuid = %pU, fmt = %d",
306 		&head->h_fs_uuid, be32_to_cpu(head->h_fmt));
307 }
308 #else
309 #define xlog_header_check_dump(mp, head)
310 #endif
311 
312 /*
313  * check log record header for recovery
314  */
315 STATIC int
316 xlog_header_check_recover(
317 	xfs_mount_t		*mp,
318 	xlog_rec_header_t	*head)
319 {
320 	ASSERT(head->h_magicno == cpu_to_be32(XLOG_HEADER_MAGIC_NUM));
321 
322 	/*
323 	 * IRIX doesn't write the h_fmt field and leaves it zeroed
324 	 * (XLOG_FMT_UNKNOWN). This stops us from trying to recover
325 	 * a dirty log created in IRIX.
326 	 */
327 	if (unlikely(head->h_fmt != cpu_to_be32(XLOG_FMT))) {
328 		xfs_warn(mp,
329 	"dirty log written in incompatible format - can't recover");
330 		xlog_header_check_dump(mp, head);
331 		XFS_ERROR_REPORT("xlog_header_check_recover(1)",
332 				 XFS_ERRLEVEL_HIGH, mp);
333 		return -EFSCORRUPTED;
334 	} else if (unlikely(!uuid_equal(&mp->m_sb.sb_uuid, &head->h_fs_uuid))) {
335 		xfs_warn(mp,
336 	"dirty log entry has mismatched uuid - can't recover");
337 		xlog_header_check_dump(mp, head);
338 		XFS_ERROR_REPORT("xlog_header_check_recover(2)",
339 				 XFS_ERRLEVEL_HIGH, mp);
340 		return -EFSCORRUPTED;
341 	}
342 	return 0;
343 }
344 
345 /*
346  * read the head block of the log and check the header
347  */
348 STATIC int
349 xlog_header_check_mount(
350 	xfs_mount_t		*mp,
351 	xlog_rec_header_t	*head)
352 {
353 	ASSERT(head->h_magicno == cpu_to_be32(XLOG_HEADER_MAGIC_NUM));
354 
355 	if (uuid_is_nil(&head->h_fs_uuid)) {
356 		/*
357 		 * IRIX doesn't write the h_fs_uuid or h_fmt fields. If
358 		 * h_fs_uuid is nil, we assume this log was last mounted
359 		 * by IRIX and continue.
360 		 */
361 		xfs_warn(mp, "nil uuid in log - IRIX style log");
362 	} else if (unlikely(!uuid_equal(&mp->m_sb.sb_uuid, &head->h_fs_uuid))) {
363 		xfs_warn(mp, "log has mismatched uuid - can't recover");
364 		xlog_header_check_dump(mp, head);
365 		XFS_ERROR_REPORT("xlog_header_check_mount",
366 				 XFS_ERRLEVEL_HIGH, mp);
367 		return -EFSCORRUPTED;
368 	}
369 	return 0;
370 }
371 
372 STATIC void
373 xlog_recover_iodone(
374 	struct xfs_buf	*bp)
375 {
376 	if (bp->b_error) {
377 		/*
378 		 * We're not going to bother about retrying
379 		 * this during recovery. One strike!
380 		 */
381 		xfs_buf_ioerror_alert(bp, __func__);
382 		xfs_force_shutdown(bp->b_target->bt_mount,
383 					SHUTDOWN_META_IO_ERROR);
384 	}
385 	bp->b_iodone = NULL;
386 	xfs_buf_ioend(bp, 0);
387 }
388 
389 /*
390  * This routine finds (to an approximation) the first block in the physical
391  * log which contains the given cycle.  It uses a binary search algorithm.
392  * Note that the algorithm can not be perfect because the disk will not
393  * necessarily be perfect.
394  */
395 STATIC int
396 xlog_find_cycle_start(
397 	struct xlog	*log,
398 	struct xfs_buf	*bp,
399 	xfs_daddr_t	first_blk,
400 	xfs_daddr_t	*last_blk,
401 	uint		cycle)
402 {
403 	xfs_caddr_t	offset;
404 	xfs_daddr_t	mid_blk;
405 	xfs_daddr_t	end_blk;
406 	uint		mid_cycle;
407 	int		error;
408 
409 	end_blk = *last_blk;
410 	mid_blk = BLK_AVG(first_blk, end_blk);
411 	while (mid_blk != first_blk && mid_blk != end_blk) {
412 		error = xlog_bread(log, mid_blk, 1, bp, &offset);
413 		if (error)
414 			return error;
415 		mid_cycle = xlog_get_cycle(offset);
416 		if (mid_cycle == cycle)
417 			end_blk = mid_blk;   /* last_half_cycle == mid_cycle */
418 		else
419 			first_blk = mid_blk; /* first_half_cycle == mid_cycle */
420 		mid_blk = BLK_AVG(first_blk, end_blk);
421 	}
422 	ASSERT((mid_blk == first_blk && mid_blk+1 == end_blk) ||
423 	       (mid_blk == end_blk && mid_blk-1 == first_blk));
424 
425 	*last_blk = end_blk;
426 
427 	return 0;
428 }
429 
430 /*
431  * Check that a range of blocks does not contain stop_on_cycle_no.
432  * Fill in *new_blk with the block offset where such a block is
433  * found, or with -1 (an invalid block number) if there is no such
434  * block in the range.  The scan needs to occur from front to back
435  * and the pointer into the region must be updated since a later
436  * routine will need to perform another test.
437  */
438 STATIC int
439 xlog_find_verify_cycle(
440 	struct xlog	*log,
441 	xfs_daddr_t	start_blk,
442 	int		nbblks,
443 	uint		stop_on_cycle_no,
444 	xfs_daddr_t	*new_blk)
445 {
446 	xfs_daddr_t	i, j;
447 	uint		cycle;
448 	xfs_buf_t	*bp;
449 	xfs_daddr_t	bufblks;
450 	xfs_caddr_t	buf = NULL;
451 	int		error = 0;
452 
453 	/*
454 	 * Greedily allocate a buffer big enough to handle the full
455 	 * range of basic blocks we'll be examining.  If that fails,
456 	 * try a smaller size.  We need to be able to read at least
457 	 * a log sector, or we're out of luck.
458 	 */
459 	bufblks = 1 << ffs(nbblks);
460 	while (bufblks > log->l_logBBsize)
461 		bufblks >>= 1;
462 	while (!(bp = xlog_get_bp(log, bufblks))) {
463 		bufblks >>= 1;
464 		if (bufblks < log->l_sectBBsize)
465 			return -ENOMEM;
466 	}
467 
468 	for (i = start_blk; i < start_blk + nbblks; i += bufblks) {
469 		int	bcount;
470 
471 		bcount = min(bufblks, (start_blk + nbblks - i));
472 
473 		error = xlog_bread(log, i, bcount, bp, &buf);
474 		if (error)
475 			goto out;
476 
477 		for (j = 0; j < bcount; j++) {
478 			cycle = xlog_get_cycle(buf);
479 			if (cycle == stop_on_cycle_no) {
480 				*new_blk = i+j;
481 				goto out;
482 			}
483 
484 			buf += BBSIZE;
485 		}
486 	}
487 
488 	*new_blk = -1;
489 
490 out:
491 	xlog_put_bp(bp);
492 	return error;
493 }
494 
495 /*
496  * Potentially backup over partial log record write.
497  *
498  * In the typical case, last_blk is the number of the block directly after
499  * a good log record.  Therefore, we subtract one to get the block number
500  * of the last block in the given buffer.  extra_bblks contains the number
501  * of blocks we would have read on a previous read.  This happens when the
502  * last log record is split over the end of the physical log.
503  *
504  * extra_bblks is the number of blocks potentially verified on a previous
505  * call to this routine.
506  */
507 STATIC int
508 xlog_find_verify_log_record(
509 	struct xlog		*log,
510 	xfs_daddr_t		start_blk,
511 	xfs_daddr_t		*last_blk,
512 	int			extra_bblks)
513 {
514 	xfs_daddr_t		i;
515 	xfs_buf_t		*bp;
516 	xfs_caddr_t		offset = NULL;
517 	xlog_rec_header_t	*head = NULL;
518 	int			error = 0;
519 	int			smallmem = 0;
520 	int			num_blks = *last_blk - start_blk;
521 	int			xhdrs;
522 
523 	ASSERT(start_blk != 0 || *last_blk != start_blk);
524 
525 	if (!(bp = xlog_get_bp(log, num_blks))) {
526 		if (!(bp = xlog_get_bp(log, 1)))
527 			return -ENOMEM;
528 		smallmem = 1;
529 	} else {
530 		error = xlog_bread(log, start_blk, num_blks, bp, &offset);
531 		if (error)
532 			goto out;
533 		offset += ((num_blks - 1) << BBSHIFT);
534 	}
535 
536 	for (i = (*last_blk) - 1; i >= 0; i--) {
537 		if (i < start_blk) {
538 			/* valid log record not found */
539 			xfs_warn(log->l_mp,
540 		"Log inconsistent (didn't find previous header)");
541 			ASSERT(0);
542 			error = -EIO;
543 			goto out;
544 		}
545 
546 		if (smallmem) {
547 			error = xlog_bread(log, i, 1, bp, &offset);
548 			if (error)
549 				goto out;
550 		}
551 
552 		head = (xlog_rec_header_t *)offset;
553 
554 		if (head->h_magicno == cpu_to_be32(XLOG_HEADER_MAGIC_NUM))
555 			break;
556 
557 		if (!smallmem)
558 			offset -= BBSIZE;
559 	}
560 
561 	/*
562 	 * We hit the beginning of the physical log & still no header.  Return
563 	 * to caller.  If caller can handle a return of -1, then this routine
564 	 * will be called again for the end of the physical log.
565 	 */
566 	if (i == -1) {
567 		error = 1;
568 		goto out;
569 	}
570 
571 	/*
572 	 * We have the final block of the good log (the first block
573 	 * of the log record _before_ the head. So we check the uuid.
574 	 */
575 	if ((error = xlog_header_check_mount(log->l_mp, head)))
576 		goto out;
577 
578 	/*
579 	 * We may have found a log record header before we expected one.
580 	 * last_blk will be the 1st block # with a given cycle #.  We may end
581 	 * up reading an entire log record.  In this case, we don't want to
582 	 * reset last_blk.  Only when last_blk points in the middle of a log
583 	 * record do we update last_blk.
584 	 */
585 	if (xfs_sb_version_haslogv2(&log->l_mp->m_sb)) {
586 		uint	h_size = be32_to_cpu(head->h_size);
587 
588 		xhdrs = h_size / XLOG_HEADER_CYCLE_SIZE;
589 		if (h_size % XLOG_HEADER_CYCLE_SIZE)
590 			xhdrs++;
591 	} else {
592 		xhdrs = 1;
593 	}
594 
595 	if (*last_blk - i + extra_bblks !=
596 	    BTOBB(be32_to_cpu(head->h_len)) + xhdrs)
597 		*last_blk = i;
598 
599 out:
600 	xlog_put_bp(bp);
601 	return error;
602 }
603 
604 /*
605  * Head is defined to be the point of the log where the next log write
606  * could go.  This means that incomplete LR writes at the end are
607  * eliminated when calculating the head.  We aren't guaranteed that previous
608  * LR have complete transactions.  We only know that a cycle number of
609  * current cycle number -1 won't be present in the log if we start writing
610  * from our current block number.
611  *
612  * last_blk contains the block number of the first block with a given
613  * cycle number.
614  *
615  * Return: zero if normal, non-zero if error.
616  */
617 STATIC int
618 xlog_find_head(
619 	struct xlog	*log,
620 	xfs_daddr_t	*return_head_blk)
621 {
622 	xfs_buf_t	*bp;
623 	xfs_caddr_t	offset;
624 	xfs_daddr_t	new_blk, first_blk, start_blk, last_blk, head_blk;
625 	int		num_scan_bblks;
626 	uint		first_half_cycle, last_half_cycle;
627 	uint		stop_on_cycle;
628 	int		error, log_bbnum = log->l_logBBsize;
629 
630 	/* Is the end of the log device zeroed? */
631 	error = xlog_find_zeroed(log, &first_blk);
632 	if (error < 0) {
633 		xfs_warn(log->l_mp, "empty log check failed");
634 		return error;
635 	}
636 	if (error == 1) {
637 		*return_head_blk = first_blk;
638 
639 		/* Is the whole lot zeroed? */
640 		if (!first_blk) {
641 			/* Linux XFS shouldn't generate totally zeroed logs -
642 			 * mkfs etc write a dummy unmount record to a fresh
643 			 * log so we can store the uuid in there
644 			 */
645 			xfs_warn(log->l_mp, "totally zeroed log");
646 		}
647 
648 		return 0;
649 	}
650 
651 	first_blk = 0;			/* get cycle # of 1st block */
652 	bp = xlog_get_bp(log, 1);
653 	if (!bp)
654 		return -ENOMEM;
655 
656 	error = xlog_bread(log, 0, 1, bp, &offset);
657 	if (error)
658 		goto bp_err;
659 
660 	first_half_cycle = xlog_get_cycle(offset);
661 
662 	last_blk = head_blk = log_bbnum - 1;	/* get cycle # of last block */
663 	error = xlog_bread(log, last_blk, 1, bp, &offset);
664 	if (error)
665 		goto bp_err;
666 
667 	last_half_cycle = xlog_get_cycle(offset);
668 	ASSERT(last_half_cycle != 0);
669 
670 	/*
671 	 * If the 1st half cycle number is equal to the last half cycle number,
672 	 * then the entire log is stamped with the same cycle number.  In this
673 	 * case, head_blk can't be set to zero (which makes sense).  The below
674 	 * math doesn't work out properly with head_blk equal to zero.  Instead,
675 	 * we set it to log_bbnum which is an invalid block number, but this
676 	 * value makes the math correct.  If head_blk doesn't changed through
677 	 * all the tests below, *head_blk is set to zero at the very end rather
678 	 * than log_bbnum.  In a sense, log_bbnum and zero are the same block
679 	 * in a circular file.
680 	 */
681 	if (first_half_cycle == last_half_cycle) {
682 		/*
683 		 * In this case we believe that the entire log should have
684 		 * cycle number last_half_cycle.  We need to scan backwards
685 		 * from the end verifying that there are no holes still
686 		 * containing last_half_cycle - 1.  If we find such a hole,
687 		 * then the start of that hole will be the new head.  The
688 		 * simple case looks like
689 		 *        x | x ... | x - 1 | x
690 		 * Another case that fits this picture would be
691 		 *        x | x + 1 | x ... | x
692 		 * In this case the head really is somewhere at the end of the
693 		 * log, as one of the latest writes at the beginning was
694 		 * incomplete.
695 		 * One more case is
696 		 *        x | x + 1 | x ... | x - 1 | x
697 		 * This is really the combination of the above two cases, and
698 		 * the head has to end up at the start of the x-1 hole at the
699 		 * end of the log.
700 		 *
701 		 * In the 256k log case, we will read from the beginning to the
702 		 * end of the log and search for cycle numbers equal to x-1.
703 		 * We don't worry about the x+1 blocks that we encounter,
704 		 * because we know that they cannot be the head since the log
705 		 * started with x.
706 		 */
707 		head_blk = log_bbnum;
708 		stop_on_cycle = last_half_cycle - 1;
709 	} else {
710 		/*
711 		 * In this case we want to find the first block with cycle
712 		 * number matching last_half_cycle.  We expect the log to be
713 		 * some variation on
714 		 *        x + 1 ... | x ... | x
715 		 * The first block with cycle number x (last_half_cycle) will
716 		 * be where the new head belongs.  First we do a binary search
717 		 * for the first occurrence of last_half_cycle.  The binary
718 		 * search may not be totally accurate, so then we scan back
719 		 * from there looking for occurrences of last_half_cycle before
720 		 * us.  If that backwards scan wraps around the beginning of
721 		 * the log, then we look for occurrences of last_half_cycle - 1
722 		 * at the end of the log.  The cases we're looking for look
723 		 * like
724 		 *                               v binary search stopped here
725 		 *        x + 1 ... | x | x + 1 | x ... | x
726 		 *                   ^ but we want to locate this spot
727 		 * or
728 		 *        <---------> less than scan distance
729 		 *        x + 1 ... | x ... | x - 1 | x
730 		 *                           ^ we want to locate this spot
731 		 */
732 		stop_on_cycle = last_half_cycle;
733 		if ((error = xlog_find_cycle_start(log, bp, first_blk,
734 						&head_blk, last_half_cycle)))
735 			goto bp_err;
736 	}
737 
738 	/*
739 	 * Now validate the answer.  Scan back some number of maximum possible
740 	 * blocks and make sure each one has the expected cycle number.  The
741 	 * maximum is determined by the total possible amount of buffering
742 	 * in the in-core log.  The following number can be made tighter if
743 	 * we actually look at the block size of the filesystem.
744 	 */
745 	num_scan_bblks = XLOG_TOTAL_REC_SHIFT(log);
746 	if (head_blk >= num_scan_bblks) {
747 		/*
748 		 * We are guaranteed that the entire check can be performed
749 		 * in one buffer.
750 		 */
751 		start_blk = head_blk - num_scan_bblks;
752 		if ((error = xlog_find_verify_cycle(log,
753 						start_blk, num_scan_bblks,
754 						stop_on_cycle, &new_blk)))
755 			goto bp_err;
756 		if (new_blk != -1)
757 			head_blk = new_blk;
758 	} else {		/* need to read 2 parts of log */
759 		/*
760 		 * We are going to scan backwards in the log in two parts.
761 		 * First we scan the physical end of the log.  In this part
762 		 * of the log, we are looking for blocks with cycle number
763 		 * last_half_cycle - 1.
764 		 * If we find one, then we know that the log starts there, as
765 		 * we've found a hole that didn't get written in going around
766 		 * the end of the physical log.  The simple case for this is
767 		 *        x + 1 ... | x ... | x - 1 | x
768 		 *        <---------> less than scan distance
769 		 * If all of the blocks at the end of the log have cycle number
770 		 * last_half_cycle, then we check the blocks at the start of
771 		 * the log looking for occurrences of last_half_cycle.  If we
772 		 * find one, then our current estimate for the location of the
773 		 * first occurrence of last_half_cycle is wrong and we move
774 		 * back to the hole we've found.  This case looks like
775 		 *        x + 1 ... | x | x + 1 | x ...
776 		 *                               ^ binary search stopped here
777 		 * Another case we need to handle that only occurs in 256k
778 		 * logs is
779 		 *        x + 1 ... | x ... | x+1 | x ...
780 		 *                   ^ binary search stops here
781 		 * In a 256k log, the scan at the end of the log will see the
782 		 * x + 1 blocks.  We need to skip past those since that is
783 		 * certainly not the head of the log.  By searching for
784 		 * last_half_cycle-1 we accomplish that.
785 		 */
786 		ASSERT(head_blk <= INT_MAX &&
787 			(xfs_daddr_t) num_scan_bblks >= head_blk);
788 		start_blk = log_bbnum - (num_scan_bblks - head_blk);
789 		if ((error = xlog_find_verify_cycle(log, start_blk,
790 					num_scan_bblks - (int)head_blk,
791 					(stop_on_cycle - 1), &new_blk)))
792 			goto bp_err;
793 		if (new_blk != -1) {
794 			head_blk = new_blk;
795 			goto validate_head;
796 		}
797 
798 		/*
799 		 * Scan beginning of log now.  The last part of the physical
800 		 * log is good.  This scan needs to verify that it doesn't find
801 		 * the last_half_cycle.
802 		 */
803 		start_blk = 0;
804 		ASSERT(head_blk <= INT_MAX);
805 		if ((error = xlog_find_verify_cycle(log,
806 					start_blk, (int)head_blk,
807 					stop_on_cycle, &new_blk)))
808 			goto bp_err;
809 		if (new_blk != -1)
810 			head_blk = new_blk;
811 	}
812 
813 validate_head:
814 	/*
815 	 * Now we need to make sure head_blk is not pointing to a block in
816 	 * the middle of a log record.
817 	 */
818 	num_scan_bblks = XLOG_REC_SHIFT(log);
819 	if (head_blk >= num_scan_bblks) {
820 		start_blk = head_blk - num_scan_bblks; /* don't read head_blk */
821 
822 		/* start ptr at last block ptr before head_blk */
823 		error = xlog_find_verify_log_record(log, start_blk, &head_blk, 0);
824 		if (error == 1)
825 			error = -EIO;
826 		if (error)
827 			goto bp_err;
828 	} else {
829 		start_blk = 0;
830 		ASSERT(head_blk <= INT_MAX);
831 		error = xlog_find_verify_log_record(log, start_blk, &head_blk, 0);
832 		if (error < 0)
833 			goto bp_err;
834 		if (error == 1) {
835 			/* We hit the beginning of the log during our search */
836 			start_blk = log_bbnum - (num_scan_bblks - head_blk);
837 			new_blk = log_bbnum;
838 			ASSERT(start_blk <= INT_MAX &&
839 				(xfs_daddr_t) log_bbnum-start_blk >= 0);
840 			ASSERT(head_blk <= INT_MAX);
841 			error = xlog_find_verify_log_record(log, start_blk,
842 							&new_blk, (int)head_blk);
843 			if (error == 1)
844 				error = -EIO;
845 			if (error)
846 				goto bp_err;
847 			if (new_blk != log_bbnum)
848 				head_blk = new_blk;
849 		} else if (error)
850 			goto bp_err;
851 	}
852 
853 	xlog_put_bp(bp);
854 	if (head_blk == log_bbnum)
855 		*return_head_blk = 0;
856 	else
857 		*return_head_blk = head_blk;
858 	/*
859 	 * When returning here, we have a good block number.  Bad block
860 	 * means that during a previous crash, we didn't have a clean break
861 	 * from cycle number N to cycle number N-1.  In this case, we need
862 	 * to find the first block with cycle number N-1.
863 	 */
864 	return 0;
865 
866  bp_err:
867 	xlog_put_bp(bp);
868 
869 	if (error)
870 		xfs_warn(log->l_mp, "failed to find log head");
871 	return error;
872 }
873 
874 /*
875  * Find the sync block number or the tail of the log.
876  *
877  * This will be the block number of the last record to have its
878  * associated buffers synced to disk.  Every log record header has
879  * a sync lsn embedded in it.  LSNs hold block numbers, so it is easy
880  * to get a sync block number.  The only concern is to figure out which
881  * log record header to believe.
882  *
883  * The following algorithm uses the log record header with the largest
884  * lsn.  The entire log record does not need to be valid.  We only care
885  * that the header is valid.
886  *
887  * We could speed up search by using current head_blk buffer, but it is not
888  * available.
889  */
890 STATIC int
891 xlog_find_tail(
892 	struct xlog		*log,
893 	xfs_daddr_t		*head_blk,
894 	xfs_daddr_t		*tail_blk)
895 {
896 	xlog_rec_header_t	*rhead;
897 	xlog_op_header_t	*op_head;
898 	xfs_caddr_t		offset = NULL;
899 	xfs_buf_t		*bp;
900 	int			error, i, found;
901 	xfs_daddr_t		umount_data_blk;
902 	xfs_daddr_t		after_umount_blk;
903 	xfs_lsn_t		tail_lsn;
904 	int			hblks;
905 
906 	found = 0;
907 
908 	/*
909 	 * Find previous log record
910 	 */
911 	if ((error = xlog_find_head(log, head_blk)))
912 		return error;
913 
914 	bp = xlog_get_bp(log, 1);
915 	if (!bp)
916 		return -ENOMEM;
917 	if (*head_blk == 0) {				/* special case */
918 		error = xlog_bread(log, 0, 1, bp, &offset);
919 		if (error)
920 			goto done;
921 
922 		if (xlog_get_cycle(offset) == 0) {
923 			*tail_blk = 0;
924 			/* leave all other log inited values alone */
925 			goto done;
926 		}
927 	}
928 
929 	/*
930 	 * Search backwards looking for log record header block
931 	 */
932 	ASSERT(*head_blk < INT_MAX);
933 	for (i = (int)(*head_blk) - 1; i >= 0; i--) {
934 		error = xlog_bread(log, i, 1, bp, &offset);
935 		if (error)
936 			goto done;
937 
938 		if (*(__be32 *)offset == cpu_to_be32(XLOG_HEADER_MAGIC_NUM)) {
939 			found = 1;
940 			break;
941 		}
942 	}
943 	/*
944 	 * If we haven't found the log record header block, start looking
945 	 * again from the end of the physical log.  XXXmiken: There should be
946 	 * a check here to make sure we didn't search more than N blocks in
947 	 * the previous code.
948 	 */
949 	if (!found) {
950 		for (i = log->l_logBBsize - 1; i >= (int)(*head_blk); i--) {
951 			error = xlog_bread(log, i, 1, bp, &offset);
952 			if (error)
953 				goto done;
954 
955 			if (*(__be32 *)offset ==
956 			    cpu_to_be32(XLOG_HEADER_MAGIC_NUM)) {
957 				found = 2;
958 				break;
959 			}
960 		}
961 	}
962 	if (!found) {
963 		xfs_warn(log->l_mp, "%s: couldn't find sync record", __func__);
964 		xlog_put_bp(bp);
965 		ASSERT(0);
966 		return -EIO;
967 	}
968 
969 	/* find blk_no of tail of log */
970 	rhead = (xlog_rec_header_t *)offset;
971 	*tail_blk = BLOCK_LSN(be64_to_cpu(rhead->h_tail_lsn));
972 
973 	/*
974 	 * Reset log values according to the state of the log when we
975 	 * crashed.  In the case where head_blk == 0, we bump curr_cycle
976 	 * one because the next write starts a new cycle rather than
977 	 * continuing the cycle of the last good log record.  At this
978 	 * point we have guaranteed that all partial log records have been
979 	 * accounted for.  Therefore, we know that the last good log record
980 	 * written was complete and ended exactly on the end boundary
981 	 * of the physical log.
982 	 */
983 	log->l_prev_block = i;
984 	log->l_curr_block = (int)*head_blk;
985 	log->l_curr_cycle = be32_to_cpu(rhead->h_cycle);
986 	if (found == 2)
987 		log->l_curr_cycle++;
988 	atomic64_set(&log->l_tail_lsn, be64_to_cpu(rhead->h_tail_lsn));
989 	atomic64_set(&log->l_last_sync_lsn, be64_to_cpu(rhead->h_lsn));
990 	xlog_assign_grant_head(&log->l_reserve_head.grant, log->l_curr_cycle,
991 					BBTOB(log->l_curr_block));
992 	xlog_assign_grant_head(&log->l_write_head.grant, log->l_curr_cycle,
993 					BBTOB(log->l_curr_block));
994 
995 	/*
996 	 * Look for unmount record.  If we find it, then we know there
997 	 * was a clean unmount.  Since 'i' could be the last block in
998 	 * the physical log, we convert to a log block before comparing
999 	 * to the head_blk.
1000 	 *
1001 	 * Save the current tail lsn to use to pass to
1002 	 * xlog_clear_stale_blocks() below.  We won't want to clear the
1003 	 * unmount record if there is one, so we pass the lsn of the
1004 	 * unmount record rather than the block after it.
1005 	 */
1006 	if (xfs_sb_version_haslogv2(&log->l_mp->m_sb)) {
1007 		int	h_size = be32_to_cpu(rhead->h_size);
1008 		int	h_version = be32_to_cpu(rhead->h_version);
1009 
1010 		if ((h_version & XLOG_VERSION_2) &&
1011 		    (h_size > XLOG_HEADER_CYCLE_SIZE)) {
1012 			hblks = h_size / XLOG_HEADER_CYCLE_SIZE;
1013 			if (h_size % XLOG_HEADER_CYCLE_SIZE)
1014 				hblks++;
1015 		} else {
1016 			hblks = 1;
1017 		}
1018 	} else {
1019 		hblks = 1;
1020 	}
1021 	after_umount_blk = (i + hblks + (int)
1022 		BTOBB(be32_to_cpu(rhead->h_len))) % log->l_logBBsize;
1023 	tail_lsn = atomic64_read(&log->l_tail_lsn);
1024 	if (*head_blk == after_umount_blk &&
1025 	    be32_to_cpu(rhead->h_num_logops) == 1) {
1026 		umount_data_blk = (i + hblks) % log->l_logBBsize;
1027 		error = xlog_bread(log, umount_data_blk, 1, bp, &offset);
1028 		if (error)
1029 			goto done;
1030 
1031 		op_head = (xlog_op_header_t *)offset;
1032 		if (op_head->oh_flags & XLOG_UNMOUNT_TRANS) {
1033 			/*
1034 			 * Set tail and last sync so that newly written
1035 			 * log records will point recovery to after the
1036 			 * current unmount record.
1037 			 */
1038 			xlog_assign_atomic_lsn(&log->l_tail_lsn,
1039 					log->l_curr_cycle, after_umount_blk);
1040 			xlog_assign_atomic_lsn(&log->l_last_sync_lsn,
1041 					log->l_curr_cycle, after_umount_blk);
1042 			*tail_blk = after_umount_blk;
1043 
1044 			/*
1045 			 * Note that the unmount was clean. If the unmount
1046 			 * was not clean, we need to know this to rebuild the
1047 			 * superblock counters from the perag headers if we
1048 			 * have a filesystem using non-persistent counters.
1049 			 */
1050 			log->l_mp->m_flags |= XFS_MOUNT_WAS_CLEAN;
1051 		}
1052 	}
1053 
1054 	/*
1055 	 * Make sure that there are no blocks in front of the head
1056 	 * with the same cycle number as the head.  This can happen
1057 	 * because we allow multiple outstanding log writes concurrently,
1058 	 * and the later writes might make it out before earlier ones.
1059 	 *
1060 	 * We use the lsn from before modifying it so that we'll never
1061 	 * overwrite the unmount record after a clean unmount.
1062 	 *
1063 	 * Do this only if we are going to recover the filesystem
1064 	 *
1065 	 * NOTE: This used to say "if (!readonly)"
1066 	 * However on Linux, we can & do recover a read-only filesystem.
1067 	 * We only skip recovery if NORECOVERY is specified on mount,
1068 	 * in which case we would not be here.
1069 	 *
1070 	 * But... if the -device- itself is readonly, just skip this.
1071 	 * We can't recover this device anyway, so it won't matter.
1072 	 */
1073 	if (!xfs_readonly_buftarg(log->l_mp->m_logdev_targp))
1074 		error = xlog_clear_stale_blocks(log, tail_lsn);
1075 
1076 done:
1077 	xlog_put_bp(bp);
1078 
1079 	if (error)
1080 		xfs_warn(log->l_mp, "failed to locate log tail");
1081 	return error;
1082 }
1083 
1084 /*
1085  * Is the log zeroed at all?
1086  *
1087  * The last binary search should be changed to perform an X block read
1088  * once X becomes small enough.  You can then search linearly through
1089  * the X blocks.  This will cut down on the number of reads we need to do.
1090  *
1091  * If the log is partially zeroed, this routine will pass back the blkno
1092  * of the first block with cycle number 0.  It won't have a complete LR
1093  * preceding it.
1094  *
1095  * Return:
1096  *	0  => the log is completely written to
1097  *	1 => use *blk_no as the first block of the log
1098  *	<0 => error has occurred
1099  */
1100 STATIC int
1101 xlog_find_zeroed(
1102 	struct xlog	*log,
1103 	xfs_daddr_t	*blk_no)
1104 {
1105 	xfs_buf_t	*bp;
1106 	xfs_caddr_t	offset;
1107 	uint	        first_cycle, last_cycle;
1108 	xfs_daddr_t	new_blk, last_blk, start_blk;
1109 	xfs_daddr_t     num_scan_bblks;
1110 	int	        error, log_bbnum = log->l_logBBsize;
1111 
1112 	*blk_no = 0;
1113 
1114 	/* check totally zeroed log */
1115 	bp = xlog_get_bp(log, 1);
1116 	if (!bp)
1117 		return -ENOMEM;
1118 	error = xlog_bread(log, 0, 1, bp, &offset);
1119 	if (error)
1120 		goto bp_err;
1121 
1122 	first_cycle = xlog_get_cycle(offset);
1123 	if (first_cycle == 0) {		/* completely zeroed log */
1124 		*blk_no = 0;
1125 		xlog_put_bp(bp);
1126 		return 1;
1127 	}
1128 
1129 	/* check partially zeroed log */
1130 	error = xlog_bread(log, log_bbnum-1, 1, bp, &offset);
1131 	if (error)
1132 		goto bp_err;
1133 
1134 	last_cycle = xlog_get_cycle(offset);
1135 	if (last_cycle != 0) {		/* log completely written to */
1136 		xlog_put_bp(bp);
1137 		return 0;
1138 	} else if (first_cycle != 1) {
1139 		/*
1140 		 * If the cycle of the last block is zero, the cycle of
1141 		 * the first block must be 1. If it's not, maybe we're
1142 		 * not looking at a log... Bail out.
1143 		 */
1144 		xfs_warn(log->l_mp,
1145 			"Log inconsistent or not a log (last==0, first!=1)");
1146 		error = -EINVAL;
1147 		goto bp_err;
1148 	}
1149 
1150 	/* we have a partially zeroed log */
1151 	last_blk = log_bbnum-1;
1152 	if ((error = xlog_find_cycle_start(log, bp, 0, &last_blk, 0)))
1153 		goto bp_err;
1154 
1155 	/*
1156 	 * Validate the answer.  Because there is no way to guarantee that
1157 	 * the entire log is made up of log records which are the same size,
1158 	 * we scan over the defined maximum blocks.  At this point, the maximum
1159 	 * is not chosen to mean anything special.   XXXmiken
1160 	 */
1161 	num_scan_bblks = XLOG_TOTAL_REC_SHIFT(log);
1162 	ASSERT(num_scan_bblks <= INT_MAX);
1163 
1164 	if (last_blk < num_scan_bblks)
1165 		num_scan_bblks = last_blk;
1166 	start_blk = last_blk - num_scan_bblks;
1167 
1168 	/*
1169 	 * We search for any instances of cycle number 0 that occur before
1170 	 * our current estimate of the head.  What we're trying to detect is
1171 	 *        1 ... | 0 | 1 | 0...
1172 	 *                       ^ binary search ends here
1173 	 */
1174 	if ((error = xlog_find_verify_cycle(log, start_blk,
1175 					 (int)num_scan_bblks, 0, &new_blk)))
1176 		goto bp_err;
1177 	if (new_blk != -1)
1178 		last_blk = new_blk;
1179 
1180 	/*
1181 	 * Potentially backup over partial log record write.  We don't need
1182 	 * to search the end of the log because we know it is zero.
1183 	 */
1184 	error = xlog_find_verify_log_record(log, start_blk, &last_blk, 0);
1185 	if (error == 1)
1186 		error = -EIO;
1187 	if (error)
1188 		goto bp_err;
1189 
1190 	*blk_no = last_blk;
1191 bp_err:
1192 	xlog_put_bp(bp);
1193 	if (error)
1194 		return error;
1195 	return 1;
1196 }
1197 
1198 /*
1199  * These are simple subroutines used by xlog_clear_stale_blocks() below
1200  * to initialize a buffer full of empty log record headers and write
1201  * them into the log.
1202  */
1203 STATIC void
1204 xlog_add_record(
1205 	struct xlog		*log,
1206 	xfs_caddr_t		buf,
1207 	int			cycle,
1208 	int			block,
1209 	int			tail_cycle,
1210 	int			tail_block)
1211 {
1212 	xlog_rec_header_t	*recp = (xlog_rec_header_t *)buf;
1213 
1214 	memset(buf, 0, BBSIZE);
1215 	recp->h_magicno = cpu_to_be32(XLOG_HEADER_MAGIC_NUM);
1216 	recp->h_cycle = cpu_to_be32(cycle);
1217 	recp->h_version = cpu_to_be32(
1218 			xfs_sb_version_haslogv2(&log->l_mp->m_sb) ? 2 : 1);
1219 	recp->h_lsn = cpu_to_be64(xlog_assign_lsn(cycle, block));
1220 	recp->h_tail_lsn = cpu_to_be64(xlog_assign_lsn(tail_cycle, tail_block));
1221 	recp->h_fmt = cpu_to_be32(XLOG_FMT);
1222 	memcpy(&recp->h_fs_uuid, &log->l_mp->m_sb.sb_uuid, sizeof(uuid_t));
1223 }
1224 
1225 STATIC int
1226 xlog_write_log_records(
1227 	struct xlog	*log,
1228 	int		cycle,
1229 	int		start_block,
1230 	int		blocks,
1231 	int		tail_cycle,
1232 	int		tail_block)
1233 {
1234 	xfs_caddr_t	offset;
1235 	xfs_buf_t	*bp;
1236 	int		balign, ealign;
1237 	int		sectbb = log->l_sectBBsize;
1238 	int		end_block = start_block + blocks;
1239 	int		bufblks;
1240 	int		error = 0;
1241 	int		i, j = 0;
1242 
1243 	/*
1244 	 * Greedily allocate a buffer big enough to handle the full
1245 	 * range of basic blocks to be written.  If that fails, try
1246 	 * a smaller size.  We need to be able to write at least a
1247 	 * log sector, or we're out of luck.
1248 	 */
1249 	bufblks = 1 << ffs(blocks);
1250 	while (bufblks > log->l_logBBsize)
1251 		bufblks >>= 1;
1252 	while (!(bp = xlog_get_bp(log, bufblks))) {
1253 		bufblks >>= 1;
1254 		if (bufblks < sectbb)
1255 			return -ENOMEM;
1256 	}
1257 
1258 	/* We may need to do a read at the start to fill in part of
1259 	 * the buffer in the starting sector not covered by the first
1260 	 * write below.
1261 	 */
1262 	balign = round_down(start_block, sectbb);
1263 	if (balign != start_block) {
1264 		error = xlog_bread_noalign(log, start_block, 1, bp);
1265 		if (error)
1266 			goto out_put_bp;
1267 
1268 		j = start_block - balign;
1269 	}
1270 
1271 	for (i = start_block; i < end_block; i += bufblks) {
1272 		int		bcount, endcount;
1273 
1274 		bcount = min(bufblks, end_block - start_block);
1275 		endcount = bcount - j;
1276 
1277 		/* We may need to do a read at the end to fill in part of
1278 		 * the buffer in the final sector not covered by the write.
1279 		 * If this is the same sector as the above read, skip it.
1280 		 */
1281 		ealign = round_down(end_block, sectbb);
1282 		if (j == 0 && (start_block + endcount > ealign)) {
1283 			offset = bp->b_addr + BBTOB(ealign - start_block);
1284 			error = xlog_bread_offset(log, ealign, sectbb,
1285 							bp, offset);
1286 			if (error)
1287 				break;
1288 
1289 		}
1290 
1291 		offset = xlog_align(log, start_block, endcount, bp);
1292 		for (; j < endcount; j++) {
1293 			xlog_add_record(log, offset, cycle, i+j,
1294 					tail_cycle, tail_block);
1295 			offset += BBSIZE;
1296 		}
1297 		error = xlog_bwrite(log, start_block, endcount, bp);
1298 		if (error)
1299 			break;
1300 		start_block += endcount;
1301 		j = 0;
1302 	}
1303 
1304  out_put_bp:
1305 	xlog_put_bp(bp);
1306 	return error;
1307 }
1308 
1309 /*
1310  * This routine is called to blow away any incomplete log writes out
1311  * in front of the log head.  We do this so that we won't become confused
1312  * if we come up, write only a little bit more, and then crash again.
1313  * If we leave the partial log records out there, this situation could
1314  * cause us to think those partial writes are valid blocks since they
1315  * have the current cycle number.  We get rid of them by overwriting them
1316  * with empty log records with the old cycle number rather than the
1317  * current one.
1318  *
1319  * The tail lsn is passed in rather than taken from
1320  * the log so that we will not write over the unmount record after a
1321  * clean unmount in a 512 block log.  Doing so would leave the log without
1322  * any valid log records in it until a new one was written.  If we crashed
1323  * during that time we would not be able to recover.
1324  */
1325 STATIC int
1326 xlog_clear_stale_blocks(
1327 	struct xlog	*log,
1328 	xfs_lsn_t	tail_lsn)
1329 {
1330 	int		tail_cycle, head_cycle;
1331 	int		tail_block, head_block;
1332 	int		tail_distance, max_distance;
1333 	int		distance;
1334 	int		error;
1335 
1336 	tail_cycle = CYCLE_LSN(tail_lsn);
1337 	tail_block = BLOCK_LSN(tail_lsn);
1338 	head_cycle = log->l_curr_cycle;
1339 	head_block = log->l_curr_block;
1340 
1341 	/*
1342 	 * Figure out the distance between the new head of the log
1343 	 * and the tail.  We want to write over any blocks beyond the
1344 	 * head that we may have written just before the crash, but
1345 	 * we don't want to overwrite the tail of the log.
1346 	 */
1347 	if (head_cycle == tail_cycle) {
1348 		/*
1349 		 * The tail is behind the head in the physical log,
1350 		 * so the distance from the head to the tail is the
1351 		 * distance from the head to the end of the log plus
1352 		 * the distance from the beginning of the log to the
1353 		 * tail.
1354 		 */
1355 		if (unlikely(head_block < tail_block || head_block >= log->l_logBBsize)) {
1356 			XFS_ERROR_REPORT("xlog_clear_stale_blocks(1)",
1357 					 XFS_ERRLEVEL_LOW, log->l_mp);
1358 			return -EFSCORRUPTED;
1359 		}
1360 		tail_distance = tail_block + (log->l_logBBsize - head_block);
1361 	} else {
1362 		/*
1363 		 * The head is behind the tail in the physical log,
1364 		 * so the distance from the head to the tail is just
1365 		 * the tail block minus the head block.
1366 		 */
1367 		if (unlikely(head_block >= tail_block || head_cycle != (tail_cycle + 1))){
1368 			XFS_ERROR_REPORT("xlog_clear_stale_blocks(2)",
1369 					 XFS_ERRLEVEL_LOW, log->l_mp);
1370 			return -EFSCORRUPTED;
1371 		}
1372 		tail_distance = tail_block - head_block;
1373 	}
1374 
1375 	/*
1376 	 * If the head is right up against the tail, we can't clear
1377 	 * anything.
1378 	 */
1379 	if (tail_distance <= 0) {
1380 		ASSERT(tail_distance == 0);
1381 		return 0;
1382 	}
1383 
1384 	max_distance = XLOG_TOTAL_REC_SHIFT(log);
1385 	/*
1386 	 * Take the smaller of the maximum amount of outstanding I/O
1387 	 * we could have and the distance to the tail to clear out.
1388 	 * We take the smaller so that we don't overwrite the tail and
1389 	 * we don't waste all day writing from the head to the tail
1390 	 * for no reason.
1391 	 */
1392 	max_distance = MIN(max_distance, tail_distance);
1393 
1394 	if ((head_block + max_distance) <= log->l_logBBsize) {
1395 		/*
1396 		 * We can stomp all the blocks we need to without
1397 		 * wrapping around the end of the log.  Just do it
1398 		 * in a single write.  Use the cycle number of the
1399 		 * current cycle minus one so that the log will look like:
1400 		 *     n ... | n - 1 ...
1401 		 */
1402 		error = xlog_write_log_records(log, (head_cycle - 1),
1403 				head_block, max_distance, tail_cycle,
1404 				tail_block);
1405 		if (error)
1406 			return error;
1407 	} else {
1408 		/*
1409 		 * We need to wrap around the end of the physical log in
1410 		 * order to clear all the blocks.  Do it in two separate
1411 		 * I/Os.  The first write should be from the head to the
1412 		 * end of the physical log, and it should use the current
1413 		 * cycle number minus one just like above.
1414 		 */
1415 		distance = log->l_logBBsize - head_block;
1416 		error = xlog_write_log_records(log, (head_cycle - 1),
1417 				head_block, distance, tail_cycle,
1418 				tail_block);
1419 
1420 		if (error)
1421 			return error;
1422 
1423 		/*
1424 		 * Now write the blocks at the start of the physical log.
1425 		 * This writes the remainder of the blocks we want to clear.
1426 		 * It uses the current cycle number since we're now on the
1427 		 * same cycle as the head so that we get:
1428 		 *    n ... n ... | n - 1 ...
1429 		 *    ^^^^^ blocks we're writing
1430 		 */
1431 		distance = max_distance - (log->l_logBBsize - head_block);
1432 		error = xlog_write_log_records(log, head_cycle, 0, distance,
1433 				tail_cycle, tail_block);
1434 		if (error)
1435 			return error;
1436 	}
1437 
1438 	return 0;
1439 }
1440 
1441 /******************************************************************************
1442  *
1443  *		Log recover routines
1444  *
1445  ******************************************************************************
1446  */
1447 
1448 STATIC xlog_recover_t *
1449 xlog_recover_find_tid(
1450 	struct hlist_head	*head,
1451 	xlog_tid_t		tid)
1452 {
1453 	xlog_recover_t		*trans;
1454 
1455 	hlist_for_each_entry(trans, head, r_list) {
1456 		if (trans->r_log_tid == tid)
1457 			return trans;
1458 	}
1459 	return NULL;
1460 }
1461 
1462 STATIC void
1463 xlog_recover_new_tid(
1464 	struct hlist_head	*head,
1465 	xlog_tid_t		tid,
1466 	xfs_lsn_t		lsn)
1467 {
1468 	xlog_recover_t		*trans;
1469 
1470 	trans = kmem_zalloc(sizeof(xlog_recover_t), KM_SLEEP);
1471 	trans->r_log_tid   = tid;
1472 	trans->r_lsn	   = lsn;
1473 	INIT_LIST_HEAD(&trans->r_itemq);
1474 
1475 	INIT_HLIST_NODE(&trans->r_list);
1476 	hlist_add_head(&trans->r_list, head);
1477 }
1478 
1479 STATIC void
1480 xlog_recover_add_item(
1481 	struct list_head	*head)
1482 {
1483 	xlog_recover_item_t	*item;
1484 
1485 	item = kmem_zalloc(sizeof(xlog_recover_item_t), KM_SLEEP);
1486 	INIT_LIST_HEAD(&item->ri_list);
1487 	list_add_tail(&item->ri_list, head);
1488 }
1489 
1490 STATIC int
1491 xlog_recover_add_to_cont_trans(
1492 	struct xlog		*log,
1493 	struct xlog_recover	*trans,
1494 	xfs_caddr_t		dp,
1495 	int			len)
1496 {
1497 	xlog_recover_item_t	*item;
1498 	xfs_caddr_t		ptr, old_ptr;
1499 	int			old_len;
1500 
1501 	if (list_empty(&trans->r_itemq)) {
1502 		/* finish copying rest of trans header */
1503 		xlog_recover_add_item(&trans->r_itemq);
1504 		ptr = (xfs_caddr_t) &trans->r_theader +
1505 				sizeof(xfs_trans_header_t) - len;
1506 		memcpy(ptr, dp, len); /* d, s, l */
1507 		return 0;
1508 	}
1509 	/* take the tail entry */
1510 	item = list_entry(trans->r_itemq.prev, xlog_recover_item_t, ri_list);
1511 
1512 	old_ptr = item->ri_buf[item->ri_cnt-1].i_addr;
1513 	old_len = item->ri_buf[item->ri_cnt-1].i_len;
1514 
1515 	ptr = kmem_realloc(old_ptr, len+old_len, old_len, KM_SLEEP);
1516 	memcpy(&ptr[old_len], dp, len); /* d, s, l */
1517 	item->ri_buf[item->ri_cnt-1].i_len += len;
1518 	item->ri_buf[item->ri_cnt-1].i_addr = ptr;
1519 	trace_xfs_log_recover_item_add_cont(log, trans, item, 0);
1520 	return 0;
1521 }
1522 
1523 /*
1524  * The next region to add is the start of a new region.  It could be
1525  * a whole region or it could be the first part of a new region.  Because
1526  * of this, the assumption here is that the type and size fields of all
1527  * format structures fit into the first 32 bits of the structure.
1528  *
1529  * This works because all regions must be 32 bit aligned.  Therefore, we
1530  * either have both fields or we have neither field.  In the case we have
1531  * neither field, the data part of the region is zero length.  We only have
1532  * a log_op_header and can throw away the header since a new one will appear
1533  * later.  If we have at least 4 bytes, then we can determine how many regions
1534  * will appear in the current log item.
1535  */
1536 STATIC int
1537 xlog_recover_add_to_trans(
1538 	struct xlog		*log,
1539 	struct xlog_recover	*trans,
1540 	xfs_caddr_t		dp,
1541 	int			len)
1542 {
1543 	xfs_inode_log_format_t	*in_f;			/* any will do */
1544 	xlog_recover_item_t	*item;
1545 	xfs_caddr_t		ptr;
1546 
1547 	if (!len)
1548 		return 0;
1549 	if (list_empty(&trans->r_itemq)) {
1550 		/* we need to catch log corruptions here */
1551 		if (*(uint *)dp != XFS_TRANS_HEADER_MAGIC) {
1552 			xfs_warn(log->l_mp, "%s: bad header magic number",
1553 				__func__);
1554 			ASSERT(0);
1555 			return -EIO;
1556 		}
1557 		if (len == sizeof(xfs_trans_header_t))
1558 			xlog_recover_add_item(&trans->r_itemq);
1559 		memcpy(&trans->r_theader, dp, len); /* d, s, l */
1560 		return 0;
1561 	}
1562 
1563 	ptr = kmem_alloc(len, KM_SLEEP);
1564 	memcpy(ptr, dp, len);
1565 	in_f = (xfs_inode_log_format_t *)ptr;
1566 
1567 	/* take the tail entry */
1568 	item = list_entry(trans->r_itemq.prev, xlog_recover_item_t, ri_list);
1569 	if (item->ri_total != 0 &&
1570 	     item->ri_total == item->ri_cnt) {
1571 		/* tail item is in use, get a new one */
1572 		xlog_recover_add_item(&trans->r_itemq);
1573 		item = list_entry(trans->r_itemq.prev,
1574 					xlog_recover_item_t, ri_list);
1575 	}
1576 
1577 	if (item->ri_total == 0) {		/* first region to be added */
1578 		if (in_f->ilf_size == 0 ||
1579 		    in_f->ilf_size > XLOG_MAX_REGIONS_IN_ITEM) {
1580 			xfs_warn(log->l_mp,
1581 		"bad number of regions (%d) in inode log format",
1582 				  in_f->ilf_size);
1583 			ASSERT(0);
1584 			kmem_free(ptr);
1585 			return -EIO;
1586 		}
1587 
1588 		item->ri_total = in_f->ilf_size;
1589 		item->ri_buf =
1590 			kmem_zalloc(item->ri_total * sizeof(xfs_log_iovec_t),
1591 				    KM_SLEEP);
1592 	}
1593 	ASSERT(item->ri_total > item->ri_cnt);
1594 	/* Description region is ri_buf[0] */
1595 	item->ri_buf[item->ri_cnt].i_addr = ptr;
1596 	item->ri_buf[item->ri_cnt].i_len  = len;
1597 	item->ri_cnt++;
1598 	trace_xfs_log_recover_item_add(log, trans, item, 0);
1599 	return 0;
1600 }
1601 
1602 /*
1603  * Sort the log items in the transaction.
1604  *
1605  * The ordering constraints are defined by the inode allocation and unlink
1606  * behaviour. The rules are:
1607  *
1608  *	1. Every item is only logged once in a given transaction. Hence it
1609  *	   represents the last logged state of the item. Hence ordering is
1610  *	   dependent on the order in which operations need to be performed so
1611  *	   required initial conditions are always met.
1612  *
1613  *	2. Cancelled buffers are recorded in pass 1 in a separate table and
1614  *	   there's nothing to replay from them so we can simply cull them
1615  *	   from the transaction. However, we can't do that until after we've
1616  *	   replayed all the other items because they may be dependent on the
1617  *	   cancelled buffer and replaying the cancelled buffer can remove it
1618  *	   form the cancelled buffer table. Hence they have tobe done last.
1619  *
1620  *	3. Inode allocation buffers must be replayed before inode items that
1621  *	   read the buffer and replay changes into it. For filesystems using the
1622  *	   ICREATE transactions, this means XFS_LI_ICREATE objects need to get
1623  *	   treated the same as inode allocation buffers as they create and
1624  *	   initialise the buffers directly.
1625  *
1626  *	4. Inode unlink buffers must be replayed after inode items are replayed.
1627  *	   This ensures that inodes are completely flushed to the inode buffer
1628  *	   in a "free" state before we remove the unlinked inode list pointer.
1629  *
1630  * Hence the ordering needs to be inode allocation buffers first, inode items
1631  * second, inode unlink buffers third and cancelled buffers last.
1632  *
1633  * But there's a problem with that - we can't tell an inode allocation buffer
1634  * apart from a regular buffer, so we can't separate them. We can, however,
1635  * tell an inode unlink buffer from the others, and so we can separate them out
1636  * from all the other buffers and move them to last.
1637  *
1638  * Hence, 4 lists, in order from head to tail:
1639  *	- buffer_list for all buffers except cancelled/inode unlink buffers
1640  *	- item_list for all non-buffer items
1641  *	- inode_buffer_list for inode unlink buffers
1642  *	- cancel_list for the cancelled buffers
1643  *
1644  * Note that we add objects to the tail of the lists so that first-to-last
1645  * ordering is preserved within the lists. Adding objects to the head of the
1646  * list means when we traverse from the head we walk them in last-to-first
1647  * order. For cancelled buffers and inode unlink buffers this doesn't matter,
1648  * but for all other items there may be specific ordering that we need to
1649  * preserve.
1650  */
1651 STATIC int
1652 xlog_recover_reorder_trans(
1653 	struct xlog		*log,
1654 	struct xlog_recover	*trans,
1655 	int			pass)
1656 {
1657 	xlog_recover_item_t	*item, *n;
1658 	int			error = 0;
1659 	LIST_HEAD(sort_list);
1660 	LIST_HEAD(cancel_list);
1661 	LIST_HEAD(buffer_list);
1662 	LIST_HEAD(inode_buffer_list);
1663 	LIST_HEAD(inode_list);
1664 
1665 	list_splice_init(&trans->r_itemq, &sort_list);
1666 	list_for_each_entry_safe(item, n, &sort_list, ri_list) {
1667 		xfs_buf_log_format_t	*buf_f = item->ri_buf[0].i_addr;
1668 
1669 		switch (ITEM_TYPE(item)) {
1670 		case XFS_LI_ICREATE:
1671 			list_move_tail(&item->ri_list, &buffer_list);
1672 			break;
1673 		case XFS_LI_BUF:
1674 			if (buf_f->blf_flags & XFS_BLF_CANCEL) {
1675 				trace_xfs_log_recover_item_reorder_head(log,
1676 							trans, item, pass);
1677 				list_move(&item->ri_list, &cancel_list);
1678 				break;
1679 			}
1680 			if (buf_f->blf_flags & XFS_BLF_INODE_BUF) {
1681 				list_move(&item->ri_list, &inode_buffer_list);
1682 				break;
1683 			}
1684 			list_move_tail(&item->ri_list, &buffer_list);
1685 			break;
1686 		case XFS_LI_INODE:
1687 		case XFS_LI_DQUOT:
1688 		case XFS_LI_QUOTAOFF:
1689 		case XFS_LI_EFD:
1690 		case XFS_LI_EFI:
1691 			trace_xfs_log_recover_item_reorder_tail(log,
1692 							trans, item, pass);
1693 			list_move_tail(&item->ri_list, &inode_list);
1694 			break;
1695 		default:
1696 			xfs_warn(log->l_mp,
1697 				"%s: unrecognized type of log operation",
1698 				__func__);
1699 			ASSERT(0);
1700 			/*
1701 			 * return the remaining items back to the transaction
1702 			 * item list so they can be freed in caller.
1703 			 */
1704 			if (!list_empty(&sort_list))
1705 				list_splice_init(&sort_list, &trans->r_itemq);
1706 			error = -EIO;
1707 			goto out;
1708 		}
1709 	}
1710 out:
1711 	ASSERT(list_empty(&sort_list));
1712 	if (!list_empty(&buffer_list))
1713 		list_splice(&buffer_list, &trans->r_itemq);
1714 	if (!list_empty(&inode_list))
1715 		list_splice_tail(&inode_list, &trans->r_itemq);
1716 	if (!list_empty(&inode_buffer_list))
1717 		list_splice_tail(&inode_buffer_list, &trans->r_itemq);
1718 	if (!list_empty(&cancel_list))
1719 		list_splice_tail(&cancel_list, &trans->r_itemq);
1720 	return error;
1721 }
1722 
1723 /*
1724  * Build up the table of buf cancel records so that we don't replay
1725  * cancelled data in the second pass.  For buffer records that are
1726  * not cancel records, there is nothing to do here so we just return.
1727  *
1728  * If we get a cancel record which is already in the table, this indicates
1729  * that the buffer was cancelled multiple times.  In order to ensure
1730  * that during pass 2 we keep the record in the table until we reach its
1731  * last occurrence in the log, we keep a reference count in the cancel
1732  * record in the table to tell us how many times we expect to see this
1733  * record during the second pass.
1734  */
1735 STATIC int
1736 xlog_recover_buffer_pass1(
1737 	struct xlog			*log,
1738 	struct xlog_recover_item	*item)
1739 {
1740 	xfs_buf_log_format_t	*buf_f = item->ri_buf[0].i_addr;
1741 	struct list_head	*bucket;
1742 	struct xfs_buf_cancel	*bcp;
1743 
1744 	/*
1745 	 * If this isn't a cancel buffer item, then just return.
1746 	 */
1747 	if (!(buf_f->blf_flags & XFS_BLF_CANCEL)) {
1748 		trace_xfs_log_recover_buf_not_cancel(log, buf_f);
1749 		return 0;
1750 	}
1751 
1752 	/*
1753 	 * Insert an xfs_buf_cancel record into the hash table of them.
1754 	 * If there is already an identical record, bump its reference count.
1755 	 */
1756 	bucket = XLOG_BUF_CANCEL_BUCKET(log, buf_f->blf_blkno);
1757 	list_for_each_entry(bcp, bucket, bc_list) {
1758 		if (bcp->bc_blkno == buf_f->blf_blkno &&
1759 		    bcp->bc_len == buf_f->blf_len) {
1760 			bcp->bc_refcount++;
1761 			trace_xfs_log_recover_buf_cancel_ref_inc(log, buf_f);
1762 			return 0;
1763 		}
1764 	}
1765 
1766 	bcp = kmem_alloc(sizeof(struct xfs_buf_cancel), KM_SLEEP);
1767 	bcp->bc_blkno = buf_f->blf_blkno;
1768 	bcp->bc_len = buf_f->blf_len;
1769 	bcp->bc_refcount = 1;
1770 	list_add_tail(&bcp->bc_list, bucket);
1771 
1772 	trace_xfs_log_recover_buf_cancel_add(log, buf_f);
1773 	return 0;
1774 }
1775 
1776 /*
1777  * Check to see whether the buffer being recovered has a corresponding
1778  * entry in the buffer cancel record table. If it is, return the cancel
1779  * buffer structure to the caller.
1780  */
1781 STATIC struct xfs_buf_cancel *
1782 xlog_peek_buffer_cancelled(
1783 	struct xlog		*log,
1784 	xfs_daddr_t		blkno,
1785 	uint			len,
1786 	ushort			flags)
1787 {
1788 	struct list_head	*bucket;
1789 	struct xfs_buf_cancel	*bcp;
1790 
1791 	if (!log->l_buf_cancel_table) {
1792 		/* empty table means no cancelled buffers in the log */
1793 		ASSERT(!(flags & XFS_BLF_CANCEL));
1794 		return NULL;
1795 	}
1796 
1797 	bucket = XLOG_BUF_CANCEL_BUCKET(log, blkno);
1798 	list_for_each_entry(bcp, bucket, bc_list) {
1799 		if (bcp->bc_blkno == blkno && bcp->bc_len == len)
1800 			return bcp;
1801 	}
1802 
1803 	/*
1804 	 * We didn't find a corresponding entry in the table, so return 0 so
1805 	 * that the buffer is NOT cancelled.
1806 	 */
1807 	ASSERT(!(flags & XFS_BLF_CANCEL));
1808 	return NULL;
1809 }
1810 
1811 /*
1812  * If the buffer is being cancelled then return 1 so that it will be cancelled,
1813  * otherwise return 0.  If the buffer is actually a buffer cancel item
1814  * (XFS_BLF_CANCEL is set), then decrement the refcount on the entry in the
1815  * table and remove it from the table if this is the last reference.
1816  *
1817  * We remove the cancel record from the table when we encounter its last
1818  * occurrence in the log so that if the same buffer is re-used again after its
1819  * last cancellation we actually replay the changes made at that point.
1820  */
1821 STATIC int
1822 xlog_check_buffer_cancelled(
1823 	struct xlog		*log,
1824 	xfs_daddr_t		blkno,
1825 	uint			len,
1826 	ushort			flags)
1827 {
1828 	struct xfs_buf_cancel	*bcp;
1829 
1830 	bcp = xlog_peek_buffer_cancelled(log, blkno, len, flags);
1831 	if (!bcp)
1832 		return 0;
1833 
1834 	/*
1835 	 * We've go a match, so return 1 so that the recovery of this buffer
1836 	 * is cancelled.  If this buffer is actually a buffer cancel log
1837 	 * item, then decrement the refcount on the one in the table and
1838 	 * remove it if this is the last reference.
1839 	 */
1840 	if (flags & XFS_BLF_CANCEL) {
1841 		if (--bcp->bc_refcount == 0) {
1842 			list_del(&bcp->bc_list);
1843 			kmem_free(bcp);
1844 		}
1845 	}
1846 	return 1;
1847 }
1848 
1849 /*
1850  * Perform recovery for a buffer full of inodes.  In these buffers, the only
1851  * data which should be recovered is that which corresponds to the
1852  * di_next_unlinked pointers in the on disk inode structures.  The rest of the
1853  * data for the inodes is always logged through the inodes themselves rather
1854  * than the inode buffer and is recovered in xlog_recover_inode_pass2().
1855  *
1856  * The only time when buffers full of inodes are fully recovered is when the
1857  * buffer is full of newly allocated inodes.  In this case the buffer will
1858  * not be marked as an inode buffer and so will be sent to
1859  * xlog_recover_do_reg_buffer() below during recovery.
1860  */
1861 STATIC int
1862 xlog_recover_do_inode_buffer(
1863 	struct xfs_mount	*mp,
1864 	xlog_recover_item_t	*item,
1865 	struct xfs_buf		*bp,
1866 	xfs_buf_log_format_t	*buf_f)
1867 {
1868 	int			i;
1869 	int			item_index = 0;
1870 	int			bit = 0;
1871 	int			nbits = 0;
1872 	int			reg_buf_offset = 0;
1873 	int			reg_buf_bytes = 0;
1874 	int			next_unlinked_offset;
1875 	int			inodes_per_buf;
1876 	xfs_agino_t		*logged_nextp;
1877 	xfs_agino_t		*buffer_nextp;
1878 
1879 	trace_xfs_log_recover_buf_inode_buf(mp->m_log, buf_f);
1880 
1881 	/*
1882 	 * Post recovery validation only works properly on CRC enabled
1883 	 * filesystems.
1884 	 */
1885 	if (xfs_sb_version_hascrc(&mp->m_sb))
1886 		bp->b_ops = &xfs_inode_buf_ops;
1887 
1888 	inodes_per_buf = BBTOB(bp->b_io_length) >> mp->m_sb.sb_inodelog;
1889 	for (i = 0; i < inodes_per_buf; i++) {
1890 		next_unlinked_offset = (i * mp->m_sb.sb_inodesize) +
1891 			offsetof(xfs_dinode_t, di_next_unlinked);
1892 
1893 		while (next_unlinked_offset >=
1894 		       (reg_buf_offset + reg_buf_bytes)) {
1895 			/*
1896 			 * The next di_next_unlinked field is beyond
1897 			 * the current logged region.  Find the next
1898 			 * logged region that contains or is beyond
1899 			 * the current di_next_unlinked field.
1900 			 */
1901 			bit += nbits;
1902 			bit = xfs_next_bit(buf_f->blf_data_map,
1903 					   buf_f->blf_map_size, bit);
1904 
1905 			/*
1906 			 * If there are no more logged regions in the
1907 			 * buffer, then we're done.
1908 			 */
1909 			if (bit == -1)
1910 				return 0;
1911 
1912 			nbits = xfs_contig_bits(buf_f->blf_data_map,
1913 						buf_f->blf_map_size, bit);
1914 			ASSERT(nbits > 0);
1915 			reg_buf_offset = bit << XFS_BLF_SHIFT;
1916 			reg_buf_bytes = nbits << XFS_BLF_SHIFT;
1917 			item_index++;
1918 		}
1919 
1920 		/*
1921 		 * If the current logged region starts after the current
1922 		 * di_next_unlinked field, then move on to the next
1923 		 * di_next_unlinked field.
1924 		 */
1925 		if (next_unlinked_offset < reg_buf_offset)
1926 			continue;
1927 
1928 		ASSERT(item->ri_buf[item_index].i_addr != NULL);
1929 		ASSERT((item->ri_buf[item_index].i_len % XFS_BLF_CHUNK) == 0);
1930 		ASSERT((reg_buf_offset + reg_buf_bytes) <=
1931 							BBTOB(bp->b_io_length));
1932 
1933 		/*
1934 		 * The current logged region contains a copy of the
1935 		 * current di_next_unlinked field.  Extract its value
1936 		 * and copy it to the buffer copy.
1937 		 */
1938 		logged_nextp = item->ri_buf[item_index].i_addr +
1939 				next_unlinked_offset - reg_buf_offset;
1940 		if (unlikely(*logged_nextp == 0)) {
1941 			xfs_alert(mp,
1942 		"Bad inode buffer log record (ptr = 0x%p, bp = 0x%p). "
1943 		"Trying to replay bad (0) inode di_next_unlinked field.",
1944 				item, bp);
1945 			XFS_ERROR_REPORT("xlog_recover_do_inode_buf",
1946 					 XFS_ERRLEVEL_LOW, mp);
1947 			return -EFSCORRUPTED;
1948 		}
1949 
1950 		buffer_nextp = (xfs_agino_t *)xfs_buf_offset(bp,
1951 					      next_unlinked_offset);
1952 		*buffer_nextp = *logged_nextp;
1953 
1954 		/*
1955 		 * If necessary, recalculate the CRC in the on-disk inode. We
1956 		 * have to leave the inode in a consistent state for whoever
1957 		 * reads it next....
1958 		 */
1959 		xfs_dinode_calc_crc(mp, (struct xfs_dinode *)
1960 				xfs_buf_offset(bp, i * mp->m_sb.sb_inodesize));
1961 
1962 	}
1963 
1964 	return 0;
1965 }
1966 
1967 /*
1968  * V5 filesystems know the age of the buffer on disk being recovered. We can
1969  * have newer objects on disk than we are replaying, and so for these cases we
1970  * don't want to replay the current change as that will make the buffer contents
1971  * temporarily invalid on disk.
1972  *
1973  * The magic number might not match the buffer type we are going to recover
1974  * (e.g. reallocated blocks), so we ignore the xfs_buf_log_format flags.  Hence
1975  * extract the LSN of the existing object in the buffer based on it's current
1976  * magic number.  If we don't recognise the magic number in the buffer, then
1977  * return a LSN of -1 so that the caller knows it was an unrecognised block and
1978  * so can recover the buffer.
1979  *
1980  * Note: we cannot rely solely on magic number matches to determine that the
1981  * buffer has a valid LSN - we also need to verify that it belongs to this
1982  * filesystem, so we need to extract the object's LSN and compare it to that
1983  * which we read from the superblock. If the UUIDs don't match, then we've got a
1984  * stale metadata block from an old filesystem instance that we need to recover
1985  * over the top of.
1986  */
1987 static xfs_lsn_t
1988 xlog_recover_get_buf_lsn(
1989 	struct xfs_mount	*mp,
1990 	struct xfs_buf		*bp)
1991 {
1992 	__uint32_t		magic32;
1993 	__uint16_t		magic16;
1994 	__uint16_t		magicda;
1995 	void			*blk = bp->b_addr;
1996 	uuid_t			*uuid;
1997 	xfs_lsn_t		lsn = -1;
1998 
1999 	/* v4 filesystems always recover immediately */
2000 	if (!xfs_sb_version_hascrc(&mp->m_sb))
2001 		goto recover_immediately;
2002 
2003 	magic32 = be32_to_cpu(*(__be32 *)blk);
2004 	switch (magic32) {
2005 	case XFS_ABTB_CRC_MAGIC:
2006 	case XFS_ABTC_CRC_MAGIC:
2007 	case XFS_ABTB_MAGIC:
2008 	case XFS_ABTC_MAGIC:
2009 	case XFS_IBT_CRC_MAGIC:
2010 	case XFS_IBT_MAGIC: {
2011 		struct xfs_btree_block *btb = blk;
2012 
2013 		lsn = be64_to_cpu(btb->bb_u.s.bb_lsn);
2014 		uuid = &btb->bb_u.s.bb_uuid;
2015 		break;
2016 	}
2017 	case XFS_BMAP_CRC_MAGIC:
2018 	case XFS_BMAP_MAGIC: {
2019 		struct xfs_btree_block *btb = blk;
2020 
2021 		lsn = be64_to_cpu(btb->bb_u.l.bb_lsn);
2022 		uuid = &btb->bb_u.l.bb_uuid;
2023 		break;
2024 	}
2025 	case XFS_AGF_MAGIC:
2026 		lsn = be64_to_cpu(((struct xfs_agf *)blk)->agf_lsn);
2027 		uuid = &((struct xfs_agf *)blk)->agf_uuid;
2028 		break;
2029 	case XFS_AGFL_MAGIC:
2030 		lsn = be64_to_cpu(((struct xfs_agfl *)blk)->agfl_lsn);
2031 		uuid = &((struct xfs_agfl *)blk)->agfl_uuid;
2032 		break;
2033 	case XFS_AGI_MAGIC:
2034 		lsn = be64_to_cpu(((struct xfs_agi *)blk)->agi_lsn);
2035 		uuid = &((struct xfs_agi *)blk)->agi_uuid;
2036 		break;
2037 	case XFS_SYMLINK_MAGIC:
2038 		lsn = be64_to_cpu(((struct xfs_dsymlink_hdr *)blk)->sl_lsn);
2039 		uuid = &((struct xfs_dsymlink_hdr *)blk)->sl_uuid;
2040 		break;
2041 	case XFS_DIR3_BLOCK_MAGIC:
2042 	case XFS_DIR3_DATA_MAGIC:
2043 	case XFS_DIR3_FREE_MAGIC:
2044 		lsn = be64_to_cpu(((struct xfs_dir3_blk_hdr *)blk)->lsn);
2045 		uuid = &((struct xfs_dir3_blk_hdr *)blk)->uuid;
2046 		break;
2047 	case XFS_ATTR3_RMT_MAGIC:
2048 		lsn = be64_to_cpu(((struct xfs_attr3_rmt_hdr *)blk)->rm_lsn);
2049 		uuid = &((struct xfs_attr3_rmt_hdr *)blk)->rm_uuid;
2050 		break;
2051 	case XFS_SB_MAGIC:
2052 		lsn = be64_to_cpu(((struct xfs_dsb *)blk)->sb_lsn);
2053 		uuid = &((struct xfs_dsb *)blk)->sb_uuid;
2054 		break;
2055 	default:
2056 		break;
2057 	}
2058 
2059 	if (lsn != (xfs_lsn_t)-1) {
2060 		if (!uuid_equal(&mp->m_sb.sb_uuid, uuid))
2061 			goto recover_immediately;
2062 		return lsn;
2063 	}
2064 
2065 	magicda = be16_to_cpu(((struct xfs_da_blkinfo *)blk)->magic);
2066 	switch (magicda) {
2067 	case XFS_DIR3_LEAF1_MAGIC:
2068 	case XFS_DIR3_LEAFN_MAGIC:
2069 	case XFS_DA3_NODE_MAGIC:
2070 		lsn = be64_to_cpu(((struct xfs_da3_blkinfo *)blk)->lsn);
2071 		uuid = &((struct xfs_da3_blkinfo *)blk)->uuid;
2072 		break;
2073 	default:
2074 		break;
2075 	}
2076 
2077 	if (lsn != (xfs_lsn_t)-1) {
2078 		if (!uuid_equal(&mp->m_sb.sb_uuid, uuid))
2079 			goto recover_immediately;
2080 		return lsn;
2081 	}
2082 
2083 	/*
2084 	 * We do individual object checks on dquot and inode buffers as they
2085 	 * have their own individual LSN records. Also, we could have a stale
2086 	 * buffer here, so we have to at least recognise these buffer types.
2087 	 *
2088 	 * A notd complexity here is inode unlinked list processing - it logs
2089 	 * the inode directly in the buffer, but we don't know which inodes have
2090 	 * been modified, and there is no global buffer LSN. Hence we need to
2091 	 * recover all inode buffer types immediately. This problem will be
2092 	 * fixed by logical logging of the unlinked list modifications.
2093 	 */
2094 	magic16 = be16_to_cpu(*(__be16 *)blk);
2095 	switch (magic16) {
2096 	case XFS_DQUOT_MAGIC:
2097 	case XFS_DINODE_MAGIC:
2098 		goto recover_immediately;
2099 	default:
2100 		break;
2101 	}
2102 
2103 	/* unknown buffer contents, recover immediately */
2104 
2105 recover_immediately:
2106 	return (xfs_lsn_t)-1;
2107 
2108 }
2109 
2110 /*
2111  * Validate the recovered buffer is of the correct type and attach the
2112  * appropriate buffer operations to them for writeback. Magic numbers are in a
2113  * few places:
2114  *	the first 16 bits of the buffer (inode buffer, dquot buffer),
2115  *	the first 32 bits of the buffer (most blocks),
2116  *	inside a struct xfs_da_blkinfo at the start of the buffer.
2117  */
2118 static void
2119 xlog_recover_validate_buf_type(
2120 	struct xfs_mount	*mp,
2121 	struct xfs_buf		*bp,
2122 	xfs_buf_log_format_t	*buf_f)
2123 {
2124 	struct xfs_da_blkinfo	*info = bp->b_addr;
2125 	__uint32_t		magic32;
2126 	__uint16_t		magic16;
2127 	__uint16_t		magicda;
2128 
2129 	/*
2130 	 * We can only do post recovery validation on items on CRC enabled
2131 	 * fielsystems as we need to know when the buffer was written to be able
2132 	 * to determine if we should have replayed the item. If we replay old
2133 	 * metadata over a newer buffer, then it will enter a temporarily
2134 	 * inconsistent state resulting in verification failures. Hence for now
2135 	 * just avoid the verification stage for non-crc filesystems
2136 	 */
2137 	if (!xfs_sb_version_hascrc(&mp->m_sb))
2138 		return;
2139 
2140 	magic32 = be32_to_cpu(*(__be32 *)bp->b_addr);
2141 	magic16 = be16_to_cpu(*(__be16*)bp->b_addr);
2142 	magicda = be16_to_cpu(info->magic);
2143 	switch (xfs_blft_from_flags(buf_f)) {
2144 	case XFS_BLFT_BTREE_BUF:
2145 		switch (magic32) {
2146 		case XFS_ABTB_CRC_MAGIC:
2147 		case XFS_ABTC_CRC_MAGIC:
2148 		case XFS_ABTB_MAGIC:
2149 		case XFS_ABTC_MAGIC:
2150 			bp->b_ops = &xfs_allocbt_buf_ops;
2151 			break;
2152 		case XFS_IBT_CRC_MAGIC:
2153 		case XFS_FIBT_CRC_MAGIC:
2154 		case XFS_IBT_MAGIC:
2155 		case XFS_FIBT_MAGIC:
2156 			bp->b_ops = &xfs_inobt_buf_ops;
2157 			break;
2158 		case XFS_BMAP_CRC_MAGIC:
2159 		case XFS_BMAP_MAGIC:
2160 			bp->b_ops = &xfs_bmbt_buf_ops;
2161 			break;
2162 		default:
2163 			xfs_warn(mp, "Bad btree block magic!");
2164 			ASSERT(0);
2165 			break;
2166 		}
2167 		break;
2168 	case XFS_BLFT_AGF_BUF:
2169 		if (magic32 != XFS_AGF_MAGIC) {
2170 			xfs_warn(mp, "Bad AGF block magic!");
2171 			ASSERT(0);
2172 			break;
2173 		}
2174 		bp->b_ops = &xfs_agf_buf_ops;
2175 		break;
2176 	case XFS_BLFT_AGFL_BUF:
2177 		if (magic32 != XFS_AGFL_MAGIC) {
2178 			xfs_warn(mp, "Bad AGFL block magic!");
2179 			ASSERT(0);
2180 			break;
2181 		}
2182 		bp->b_ops = &xfs_agfl_buf_ops;
2183 		break;
2184 	case XFS_BLFT_AGI_BUF:
2185 		if (magic32 != XFS_AGI_MAGIC) {
2186 			xfs_warn(mp, "Bad AGI block magic!");
2187 			ASSERT(0);
2188 			break;
2189 		}
2190 		bp->b_ops = &xfs_agi_buf_ops;
2191 		break;
2192 	case XFS_BLFT_UDQUOT_BUF:
2193 	case XFS_BLFT_PDQUOT_BUF:
2194 	case XFS_BLFT_GDQUOT_BUF:
2195 #ifdef CONFIG_XFS_QUOTA
2196 		if (magic16 != XFS_DQUOT_MAGIC) {
2197 			xfs_warn(mp, "Bad DQUOT block magic!");
2198 			ASSERT(0);
2199 			break;
2200 		}
2201 		bp->b_ops = &xfs_dquot_buf_ops;
2202 #else
2203 		xfs_alert(mp,
2204 	"Trying to recover dquots without QUOTA support built in!");
2205 		ASSERT(0);
2206 #endif
2207 		break;
2208 	case XFS_BLFT_DINO_BUF:
2209 		if (magic16 != XFS_DINODE_MAGIC) {
2210 			xfs_warn(mp, "Bad INODE block magic!");
2211 			ASSERT(0);
2212 			break;
2213 		}
2214 		bp->b_ops = &xfs_inode_buf_ops;
2215 		break;
2216 	case XFS_BLFT_SYMLINK_BUF:
2217 		if (magic32 != XFS_SYMLINK_MAGIC) {
2218 			xfs_warn(mp, "Bad symlink block magic!");
2219 			ASSERT(0);
2220 			break;
2221 		}
2222 		bp->b_ops = &xfs_symlink_buf_ops;
2223 		break;
2224 	case XFS_BLFT_DIR_BLOCK_BUF:
2225 		if (magic32 != XFS_DIR2_BLOCK_MAGIC &&
2226 		    magic32 != XFS_DIR3_BLOCK_MAGIC) {
2227 			xfs_warn(mp, "Bad dir block magic!");
2228 			ASSERT(0);
2229 			break;
2230 		}
2231 		bp->b_ops = &xfs_dir3_block_buf_ops;
2232 		break;
2233 	case XFS_BLFT_DIR_DATA_BUF:
2234 		if (magic32 != XFS_DIR2_DATA_MAGIC &&
2235 		    magic32 != XFS_DIR3_DATA_MAGIC) {
2236 			xfs_warn(mp, "Bad dir data magic!");
2237 			ASSERT(0);
2238 			break;
2239 		}
2240 		bp->b_ops = &xfs_dir3_data_buf_ops;
2241 		break;
2242 	case XFS_BLFT_DIR_FREE_BUF:
2243 		if (magic32 != XFS_DIR2_FREE_MAGIC &&
2244 		    magic32 != XFS_DIR3_FREE_MAGIC) {
2245 			xfs_warn(mp, "Bad dir3 free magic!");
2246 			ASSERT(0);
2247 			break;
2248 		}
2249 		bp->b_ops = &xfs_dir3_free_buf_ops;
2250 		break;
2251 	case XFS_BLFT_DIR_LEAF1_BUF:
2252 		if (magicda != XFS_DIR2_LEAF1_MAGIC &&
2253 		    magicda != XFS_DIR3_LEAF1_MAGIC) {
2254 			xfs_warn(mp, "Bad dir leaf1 magic!");
2255 			ASSERT(0);
2256 			break;
2257 		}
2258 		bp->b_ops = &xfs_dir3_leaf1_buf_ops;
2259 		break;
2260 	case XFS_BLFT_DIR_LEAFN_BUF:
2261 		if (magicda != XFS_DIR2_LEAFN_MAGIC &&
2262 		    magicda != XFS_DIR3_LEAFN_MAGIC) {
2263 			xfs_warn(mp, "Bad dir leafn magic!");
2264 			ASSERT(0);
2265 			break;
2266 		}
2267 		bp->b_ops = &xfs_dir3_leafn_buf_ops;
2268 		break;
2269 	case XFS_BLFT_DA_NODE_BUF:
2270 		if (magicda != XFS_DA_NODE_MAGIC &&
2271 		    magicda != XFS_DA3_NODE_MAGIC) {
2272 			xfs_warn(mp, "Bad da node magic!");
2273 			ASSERT(0);
2274 			break;
2275 		}
2276 		bp->b_ops = &xfs_da3_node_buf_ops;
2277 		break;
2278 	case XFS_BLFT_ATTR_LEAF_BUF:
2279 		if (magicda != XFS_ATTR_LEAF_MAGIC &&
2280 		    magicda != XFS_ATTR3_LEAF_MAGIC) {
2281 			xfs_warn(mp, "Bad attr leaf magic!");
2282 			ASSERT(0);
2283 			break;
2284 		}
2285 		bp->b_ops = &xfs_attr3_leaf_buf_ops;
2286 		break;
2287 	case XFS_BLFT_ATTR_RMT_BUF:
2288 		if (magic32 != XFS_ATTR3_RMT_MAGIC) {
2289 			xfs_warn(mp, "Bad attr remote magic!");
2290 			ASSERT(0);
2291 			break;
2292 		}
2293 		bp->b_ops = &xfs_attr3_rmt_buf_ops;
2294 		break;
2295 	case XFS_BLFT_SB_BUF:
2296 		if (magic32 != XFS_SB_MAGIC) {
2297 			xfs_warn(mp, "Bad SB block magic!");
2298 			ASSERT(0);
2299 			break;
2300 		}
2301 		bp->b_ops = &xfs_sb_buf_ops;
2302 		break;
2303 	default:
2304 		xfs_warn(mp, "Unknown buffer type %d!",
2305 			 xfs_blft_from_flags(buf_f));
2306 		break;
2307 	}
2308 }
2309 
2310 /*
2311  * Perform a 'normal' buffer recovery.  Each logged region of the
2312  * buffer should be copied over the corresponding region in the
2313  * given buffer.  The bitmap in the buf log format structure indicates
2314  * where to place the logged data.
2315  */
2316 STATIC void
2317 xlog_recover_do_reg_buffer(
2318 	struct xfs_mount	*mp,
2319 	xlog_recover_item_t	*item,
2320 	struct xfs_buf		*bp,
2321 	xfs_buf_log_format_t	*buf_f)
2322 {
2323 	int			i;
2324 	int			bit;
2325 	int			nbits;
2326 	int                     error;
2327 
2328 	trace_xfs_log_recover_buf_reg_buf(mp->m_log, buf_f);
2329 
2330 	bit = 0;
2331 	i = 1;  /* 0 is the buf format structure */
2332 	while (1) {
2333 		bit = xfs_next_bit(buf_f->blf_data_map,
2334 				   buf_f->blf_map_size, bit);
2335 		if (bit == -1)
2336 			break;
2337 		nbits = xfs_contig_bits(buf_f->blf_data_map,
2338 					buf_f->blf_map_size, bit);
2339 		ASSERT(nbits > 0);
2340 		ASSERT(item->ri_buf[i].i_addr != NULL);
2341 		ASSERT(item->ri_buf[i].i_len % XFS_BLF_CHUNK == 0);
2342 		ASSERT(BBTOB(bp->b_io_length) >=
2343 		       ((uint)bit << XFS_BLF_SHIFT) + (nbits << XFS_BLF_SHIFT));
2344 
2345 		/*
2346 		 * The dirty regions logged in the buffer, even though
2347 		 * contiguous, may span multiple chunks. This is because the
2348 		 * dirty region may span a physical page boundary in a buffer
2349 		 * and hence be split into two separate vectors for writing into
2350 		 * the log. Hence we need to trim nbits back to the length of
2351 		 * the current region being copied out of the log.
2352 		 */
2353 		if (item->ri_buf[i].i_len < (nbits << XFS_BLF_SHIFT))
2354 			nbits = item->ri_buf[i].i_len >> XFS_BLF_SHIFT;
2355 
2356 		/*
2357 		 * Do a sanity check if this is a dquot buffer. Just checking
2358 		 * the first dquot in the buffer should do. XXXThis is
2359 		 * probably a good thing to do for other buf types also.
2360 		 */
2361 		error = 0;
2362 		if (buf_f->blf_flags &
2363 		   (XFS_BLF_UDQUOT_BUF|XFS_BLF_PDQUOT_BUF|XFS_BLF_GDQUOT_BUF)) {
2364 			if (item->ri_buf[i].i_addr == NULL) {
2365 				xfs_alert(mp,
2366 					"XFS: NULL dquot in %s.", __func__);
2367 				goto next;
2368 			}
2369 			if (item->ri_buf[i].i_len < sizeof(xfs_disk_dquot_t)) {
2370 				xfs_alert(mp,
2371 					"XFS: dquot too small (%d) in %s.",
2372 					item->ri_buf[i].i_len, __func__);
2373 				goto next;
2374 			}
2375 			error = xfs_dqcheck(mp, item->ri_buf[i].i_addr,
2376 					       -1, 0, XFS_QMOPT_DOWARN,
2377 					       "dquot_buf_recover");
2378 			if (error)
2379 				goto next;
2380 		}
2381 
2382 		memcpy(xfs_buf_offset(bp,
2383 			(uint)bit << XFS_BLF_SHIFT),	/* dest */
2384 			item->ri_buf[i].i_addr,		/* source */
2385 			nbits<<XFS_BLF_SHIFT);		/* length */
2386  next:
2387 		i++;
2388 		bit += nbits;
2389 	}
2390 
2391 	/* Shouldn't be any more regions */
2392 	ASSERT(i == item->ri_total);
2393 
2394 	xlog_recover_validate_buf_type(mp, bp, buf_f);
2395 }
2396 
2397 /*
2398  * Perform a dquot buffer recovery.
2399  * Simple algorithm: if we have found a QUOTAOFF log item of the same type
2400  * (ie. USR or GRP), then just toss this buffer away; don't recover it.
2401  * Else, treat it as a regular buffer and do recovery.
2402  *
2403  * Return false if the buffer was tossed and true if we recovered the buffer to
2404  * indicate to the caller if the buffer needs writing.
2405  */
2406 STATIC bool
2407 xlog_recover_do_dquot_buffer(
2408 	struct xfs_mount		*mp,
2409 	struct xlog			*log,
2410 	struct xlog_recover_item	*item,
2411 	struct xfs_buf			*bp,
2412 	struct xfs_buf_log_format	*buf_f)
2413 {
2414 	uint			type;
2415 
2416 	trace_xfs_log_recover_buf_dquot_buf(log, buf_f);
2417 
2418 	/*
2419 	 * Filesystems are required to send in quota flags at mount time.
2420 	 */
2421 	if (!mp->m_qflags)
2422 		return false;
2423 
2424 	type = 0;
2425 	if (buf_f->blf_flags & XFS_BLF_UDQUOT_BUF)
2426 		type |= XFS_DQ_USER;
2427 	if (buf_f->blf_flags & XFS_BLF_PDQUOT_BUF)
2428 		type |= XFS_DQ_PROJ;
2429 	if (buf_f->blf_flags & XFS_BLF_GDQUOT_BUF)
2430 		type |= XFS_DQ_GROUP;
2431 	/*
2432 	 * This type of quotas was turned off, so ignore this buffer
2433 	 */
2434 	if (log->l_quotaoffs_flag & type)
2435 		return false;
2436 
2437 	xlog_recover_do_reg_buffer(mp, item, bp, buf_f);
2438 	return true;
2439 }
2440 
2441 /*
2442  * This routine replays a modification made to a buffer at runtime.
2443  * There are actually two types of buffer, regular and inode, which
2444  * are handled differently.  Inode buffers are handled differently
2445  * in that we only recover a specific set of data from them, namely
2446  * the inode di_next_unlinked fields.  This is because all other inode
2447  * data is actually logged via inode records and any data we replay
2448  * here which overlaps that may be stale.
2449  *
2450  * When meta-data buffers are freed at run time we log a buffer item
2451  * with the XFS_BLF_CANCEL bit set to indicate that previous copies
2452  * of the buffer in the log should not be replayed at recovery time.
2453  * This is so that if the blocks covered by the buffer are reused for
2454  * file data before we crash we don't end up replaying old, freed
2455  * meta-data into a user's file.
2456  *
2457  * To handle the cancellation of buffer log items, we make two passes
2458  * over the log during recovery.  During the first we build a table of
2459  * those buffers which have been cancelled, and during the second we
2460  * only replay those buffers which do not have corresponding cancel
2461  * records in the table.  See xlog_recover_buffer_pass[1,2] above
2462  * for more details on the implementation of the table of cancel records.
2463  */
2464 STATIC int
2465 xlog_recover_buffer_pass2(
2466 	struct xlog			*log,
2467 	struct list_head		*buffer_list,
2468 	struct xlog_recover_item	*item,
2469 	xfs_lsn_t			current_lsn)
2470 {
2471 	xfs_buf_log_format_t	*buf_f = item->ri_buf[0].i_addr;
2472 	xfs_mount_t		*mp = log->l_mp;
2473 	xfs_buf_t		*bp;
2474 	int			error;
2475 	uint			buf_flags;
2476 	xfs_lsn_t		lsn;
2477 
2478 	/*
2479 	 * In this pass we only want to recover all the buffers which have
2480 	 * not been cancelled and are not cancellation buffers themselves.
2481 	 */
2482 	if (xlog_check_buffer_cancelled(log, buf_f->blf_blkno,
2483 			buf_f->blf_len, buf_f->blf_flags)) {
2484 		trace_xfs_log_recover_buf_cancel(log, buf_f);
2485 		return 0;
2486 	}
2487 
2488 	trace_xfs_log_recover_buf_recover(log, buf_f);
2489 
2490 	buf_flags = 0;
2491 	if (buf_f->blf_flags & XFS_BLF_INODE_BUF)
2492 		buf_flags |= XBF_UNMAPPED;
2493 
2494 	bp = xfs_buf_read(mp->m_ddev_targp, buf_f->blf_blkno, buf_f->blf_len,
2495 			  buf_flags, NULL);
2496 	if (!bp)
2497 		return -ENOMEM;
2498 	error = bp->b_error;
2499 	if (error) {
2500 		xfs_buf_ioerror_alert(bp, "xlog_recover_do..(read#1)");
2501 		goto out_release;
2502 	}
2503 
2504 	/*
2505 	 * Recover the buffer only if we get an LSN from it and it's less than
2506 	 * the lsn of the transaction we are replaying.
2507 	 *
2508 	 * Note that we have to be extremely careful of readahead here.
2509 	 * Readahead does not attach verfiers to the buffers so if we don't
2510 	 * actually do any replay after readahead because of the LSN we found
2511 	 * in the buffer if more recent than that current transaction then we
2512 	 * need to attach the verifier directly. Failure to do so can lead to
2513 	 * future recovery actions (e.g. EFI and unlinked list recovery) can
2514 	 * operate on the buffers and they won't get the verifier attached. This
2515 	 * can lead to blocks on disk having the correct content but a stale
2516 	 * CRC.
2517 	 *
2518 	 * It is safe to assume these clean buffers are currently up to date.
2519 	 * If the buffer is dirtied by a later transaction being replayed, then
2520 	 * the verifier will be reset to match whatever recover turns that
2521 	 * buffer into.
2522 	 */
2523 	lsn = xlog_recover_get_buf_lsn(mp, bp);
2524 	if (lsn && lsn != -1 && XFS_LSN_CMP(lsn, current_lsn) >= 0) {
2525 		xlog_recover_validate_buf_type(mp, bp, buf_f);
2526 		goto out_release;
2527 	}
2528 
2529 	if (buf_f->blf_flags & XFS_BLF_INODE_BUF) {
2530 		error = xlog_recover_do_inode_buffer(mp, item, bp, buf_f);
2531 		if (error)
2532 			goto out_release;
2533 	} else if (buf_f->blf_flags &
2534 		  (XFS_BLF_UDQUOT_BUF|XFS_BLF_PDQUOT_BUF|XFS_BLF_GDQUOT_BUF)) {
2535 		bool	dirty;
2536 
2537 		dirty = xlog_recover_do_dquot_buffer(mp, log, item, bp, buf_f);
2538 		if (!dirty)
2539 			goto out_release;
2540 	} else {
2541 		xlog_recover_do_reg_buffer(mp, item, bp, buf_f);
2542 	}
2543 
2544 	/*
2545 	 * Perform delayed write on the buffer.  Asynchronous writes will be
2546 	 * slower when taking into account all the buffers to be flushed.
2547 	 *
2548 	 * Also make sure that only inode buffers with good sizes stay in
2549 	 * the buffer cache.  The kernel moves inodes in buffers of 1 block
2550 	 * or mp->m_inode_cluster_size bytes, whichever is bigger.  The inode
2551 	 * buffers in the log can be a different size if the log was generated
2552 	 * by an older kernel using unclustered inode buffers or a newer kernel
2553 	 * running with a different inode cluster size.  Regardless, if the
2554 	 * the inode buffer size isn't MAX(blocksize, mp->m_inode_cluster_size)
2555 	 * for *our* value of mp->m_inode_cluster_size, then we need to keep
2556 	 * the buffer out of the buffer cache so that the buffer won't
2557 	 * overlap with future reads of those inodes.
2558 	 */
2559 	if (XFS_DINODE_MAGIC ==
2560 	    be16_to_cpu(*((__be16 *)xfs_buf_offset(bp, 0))) &&
2561 	    (BBTOB(bp->b_io_length) != MAX(log->l_mp->m_sb.sb_blocksize,
2562 			(__uint32_t)log->l_mp->m_inode_cluster_size))) {
2563 		xfs_buf_stale(bp);
2564 		error = xfs_bwrite(bp);
2565 	} else {
2566 		ASSERT(bp->b_target->bt_mount == mp);
2567 		bp->b_iodone = xlog_recover_iodone;
2568 		xfs_buf_delwri_queue(bp, buffer_list);
2569 	}
2570 
2571 out_release:
2572 	xfs_buf_relse(bp);
2573 	return error;
2574 }
2575 
2576 /*
2577  * Inode fork owner changes
2578  *
2579  * If we have been told that we have to reparent the inode fork, it's because an
2580  * extent swap operation on a CRC enabled filesystem has been done and we are
2581  * replaying it. We need to walk the BMBT of the appropriate fork and change the
2582  * owners of it.
2583  *
2584  * The complexity here is that we don't have an inode context to work with, so
2585  * after we've replayed the inode we need to instantiate one.  This is where the
2586  * fun begins.
2587  *
2588  * We are in the middle of log recovery, so we can't run transactions. That
2589  * means we cannot use cache coherent inode instantiation via xfs_iget(), as
2590  * that will result in the corresponding iput() running the inode through
2591  * xfs_inactive(). If we've just replayed an inode core that changes the link
2592  * count to zero (i.e. it's been unlinked), then xfs_inactive() will run
2593  * transactions (bad!).
2594  *
2595  * So, to avoid this, we instantiate an inode directly from the inode core we've
2596  * just recovered. We have the buffer still locked, and all we really need to
2597  * instantiate is the inode core and the forks being modified. We can do this
2598  * manually, then run the inode btree owner change, and then tear down the
2599  * xfs_inode without having to run any transactions at all.
2600  *
2601  * Also, because we don't have a transaction context available here but need to
2602  * gather all the buffers we modify for writeback so we pass the buffer_list
2603  * instead for the operation to use.
2604  */
2605 
2606 STATIC int
2607 xfs_recover_inode_owner_change(
2608 	struct xfs_mount	*mp,
2609 	struct xfs_dinode	*dip,
2610 	struct xfs_inode_log_format *in_f,
2611 	struct list_head	*buffer_list)
2612 {
2613 	struct xfs_inode	*ip;
2614 	int			error;
2615 
2616 	ASSERT(in_f->ilf_fields & (XFS_ILOG_DOWNER|XFS_ILOG_AOWNER));
2617 
2618 	ip = xfs_inode_alloc(mp, in_f->ilf_ino);
2619 	if (!ip)
2620 		return -ENOMEM;
2621 
2622 	/* instantiate the inode */
2623 	xfs_dinode_from_disk(&ip->i_d, dip);
2624 	ASSERT(ip->i_d.di_version >= 3);
2625 
2626 	error = xfs_iformat_fork(ip, dip);
2627 	if (error)
2628 		goto out_free_ip;
2629 
2630 
2631 	if (in_f->ilf_fields & XFS_ILOG_DOWNER) {
2632 		ASSERT(in_f->ilf_fields & XFS_ILOG_DBROOT);
2633 		error = xfs_bmbt_change_owner(NULL, ip, XFS_DATA_FORK,
2634 					      ip->i_ino, buffer_list);
2635 		if (error)
2636 			goto out_free_ip;
2637 	}
2638 
2639 	if (in_f->ilf_fields & XFS_ILOG_AOWNER) {
2640 		ASSERT(in_f->ilf_fields & XFS_ILOG_ABROOT);
2641 		error = xfs_bmbt_change_owner(NULL, ip, XFS_ATTR_FORK,
2642 					      ip->i_ino, buffer_list);
2643 		if (error)
2644 			goto out_free_ip;
2645 	}
2646 
2647 out_free_ip:
2648 	xfs_inode_free(ip);
2649 	return error;
2650 }
2651 
2652 STATIC int
2653 xlog_recover_inode_pass2(
2654 	struct xlog			*log,
2655 	struct list_head		*buffer_list,
2656 	struct xlog_recover_item	*item,
2657 	xfs_lsn_t			current_lsn)
2658 {
2659 	xfs_inode_log_format_t	*in_f;
2660 	xfs_mount_t		*mp = log->l_mp;
2661 	xfs_buf_t		*bp;
2662 	xfs_dinode_t		*dip;
2663 	int			len;
2664 	xfs_caddr_t		src;
2665 	xfs_caddr_t		dest;
2666 	int			error;
2667 	int			attr_index;
2668 	uint			fields;
2669 	xfs_icdinode_t		*dicp;
2670 	uint			isize;
2671 	int			need_free = 0;
2672 
2673 	if (item->ri_buf[0].i_len == sizeof(xfs_inode_log_format_t)) {
2674 		in_f = item->ri_buf[0].i_addr;
2675 	} else {
2676 		in_f = kmem_alloc(sizeof(xfs_inode_log_format_t), KM_SLEEP);
2677 		need_free = 1;
2678 		error = xfs_inode_item_format_convert(&item->ri_buf[0], in_f);
2679 		if (error)
2680 			goto error;
2681 	}
2682 
2683 	/*
2684 	 * Inode buffers can be freed, look out for it,
2685 	 * and do not replay the inode.
2686 	 */
2687 	if (xlog_check_buffer_cancelled(log, in_f->ilf_blkno,
2688 					in_f->ilf_len, 0)) {
2689 		error = 0;
2690 		trace_xfs_log_recover_inode_cancel(log, in_f);
2691 		goto error;
2692 	}
2693 	trace_xfs_log_recover_inode_recover(log, in_f);
2694 
2695 	bp = xfs_buf_read(mp->m_ddev_targp, in_f->ilf_blkno, in_f->ilf_len, 0,
2696 			  &xfs_inode_buf_ops);
2697 	if (!bp) {
2698 		error = -ENOMEM;
2699 		goto error;
2700 	}
2701 	error = bp->b_error;
2702 	if (error) {
2703 		xfs_buf_ioerror_alert(bp, "xlog_recover_do..(read#2)");
2704 		goto out_release;
2705 	}
2706 	ASSERT(in_f->ilf_fields & XFS_ILOG_CORE);
2707 	dip = (xfs_dinode_t *)xfs_buf_offset(bp, in_f->ilf_boffset);
2708 
2709 	/*
2710 	 * Make sure the place we're flushing out to really looks
2711 	 * like an inode!
2712 	 */
2713 	if (unlikely(dip->di_magic != cpu_to_be16(XFS_DINODE_MAGIC))) {
2714 		xfs_alert(mp,
2715 	"%s: Bad inode magic number, dip = 0x%p, dino bp = 0x%p, ino = %Ld",
2716 			__func__, dip, bp, in_f->ilf_ino);
2717 		XFS_ERROR_REPORT("xlog_recover_inode_pass2(1)",
2718 				 XFS_ERRLEVEL_LOW, mp);
2719 		error = -EFSCORRUPTED;
2720 		goto out_release;
2721 	}
2722 	dicp = item->ri_buf[1].i_addr;
2723 	if (unlikely(dicp->di_magic != XFS_DINODE_MAGIC)) {
2724 		xfs_alert(mp,
2725 			"%s: Bad inode log record, rec ptr 0x%p, ino %Ld",
2726 			__func__, item, in_f->ilf_ino);
2727 		XFS_ERROR_REPORT("xlog_recover_inode_pass2(2)",
2728 				 XFS_ERRLEVEL_LOW, mp);
2729 		error = -EFSCORRUPTED;
2730 		goto out_release;
2731 	}
2732 
2733 	/*
2734 	 * If the inode has an LSN in it, recover the inode only if it's less
2735 	 * than the lsn of the transaction we are replaying. Note: we still
2736 	 * need to replay an owner change even though the inode is more recent
2737 	 * than the transaction as there is no guarantee that all the btree
2738 	 * blocks are more recent than this transaction, too.
2739 	 */
2740 	if (dip->di_version >= 3) {
2741 		xfs_lsn_t	lsn = be64_to_cpu(dip->di_lsn);
2742 
2743 		if (lsn && lsn != -1 && XFS_LSN_CMP(lsn, current_lsn) >= 0) {
2744 			trace_xfs_log_recover_inode_skip(log, in_f);
2745 			error = 0;
2746 			goto out_owner_change;
2747 		}
2748 	}
2749 
2750 	/*
2751 	 * di_flushiter is only valid for v1/2 inodes. All changes for v3 inodes
2752 	 * are transactional and if ordering is necessary we can determine that
2753 	 * more accurately by the LSN field in the V3 inode core. Don't trust
2754 	 * the inode versions we might be changing them here - use the
2755 	 * superblock flag to determine whether we need to look at di_flushiter
2756 	 * to skip replay when the on disk inode is newer than the log one
2757 	 */
2758 	if (!xfs_sb_version_hascrc(&mp->m_sb) &&
2759 	    dicp->di_flushiter < be16_to_cpu(dip->di_flushiter)) {
2760 		/*
2761 		 * Deal with the wrap case, DI_MAX_FLUSH is less
2762 		 * than smaller numbers
2763 		 */
2764 		if (be16_to_cpu(dip->di_flushiter) == DI_MAX_FLUSH &&
2765 		    dicp->di_flushiter < (DI_MAX_FLUSH >> 1)) {
2766 			/* do nothing */
2767 		} else {
2768 			trace_xfs_log_recover_inode_skip(log, in_f);
2769 			error = 0;
2770 			goto out_release;
2771 		}
2772 	}
2773 
2774 	/* Take the opportunity to reset the flush iteration count */
2775 	dicp->di_flushiter = 0;
2776 
2777 	if (unlikely(S_ISREG(dicp->di_mode))) {
2778 		if ((dicp->di_format != XFS_DINODE_FMT_EXTENTS) &&
2779 		    (dicp->di_format != XFS_DINODE_FMT_BTREE)) {
2780 			XFS_CORRUPTION_ERROR("xlog_recover_inode_pass2(3)",
2781 					 XFS_ERRLEVEL_LOW, mp, dicp);
2782 			xfs_alert(mp,
2783 		"%s: Bad regular inode log record, rec ptr 0x%p, "
2784 		"ino ptr = 0x%p, ino bp = 0x%p, ino %Ld",
2785 				__func__, item, dip, bp, in_f->ilf_ino);
2786 			error = -EFSCORRUPTED;
2787 			goto out_release;
2788 		}
2789 	} else if (unlikely(S_ISDIR(dicp->di_mode))) {
2790 		if ((dicp->di_format != XFS_DINODE_FMT_EXTENTS) &&
2791 		    (dicp->di_format != XFS_DINODE_FMT_BTREE) &&
2792 		    (dicp->di_format != XFS_DINODE_FMT_LOCAL)) {
2793 			XFS_CORRUPTION_ERROR("xlog_recover_inode_pass2(4)",
2794 					     XFS_ERRLEVEL_LOW, mp, dicp);
2795 			xfs_alert(mp,
2796 		"%s: Bad dir inode log record, rec ptr 0x%p, "
2797 		"ino ptr = 0x%p, ino bp = 0x%p, ino %Ld",
2798 				__func__, item, dip, bp, in_f->ilf_ino);
2799 			error = -EFSCORRUPTED;
2800 			goto out_release;
2801 		}
2802 	}
2803 	if (unlikely(dicp->di_nextents + dicp->di_anextents > dicp->di_nblocks)){
2804 		XFS_CORRUPTION_ERROR("xlog_recover_inode_pass2(5)",
2805 				     XFS_ERRLEVEL_LOW, mp, dicp);
2806 		xfs_alert(mp,
2807 	"%s: Bad inode log record, rec ptr 0x%p, dino ptr 0x%p, "
2808 	"dino bp 0x%p, ino %Ld, total extents = %d, nblocks = %Ld",
2809 			__func__, item, dip, bp, in_f->ilf_ino,
2810 			dicp->di_nextents + dicp->di_anextents,
2811 			dicp->di_nblocks);
2812 		error = -EFSCORRUPTED;
2813 		goto out_release;
2814 	}
2815 	if (unlikely(dicp->di_forkoff > mp->m_sb.sb_inodesize)) {
2816 		XFS_CORRUPTION_ERROR("xlog_recover_inode_pass2(6)",
2817 				     XFS_ERRLEVEL_LOW, mp, dicp);
2818 		xfs_alert(mp,
2819 	"%s: Bad inode log record, rec ptr 0x%p, dino ptr 0x%p, "
2820 	"dino bp 0x%p, ino %Ld, forkoff 0x%x", __func__,
2821 			item, dip, bp, in_f->ilf_ino, dicp->di_forkoff);
2822 		error = -EFSCORRUPTED;
2823 		goto out_release;
2824 	}
2825 	isize = xfs_icdinode_size(dicp->di_version);
2826 	if (unlikely(item->ri_buf[1].i_len > isize)) {
2827 		XFS_CORRUPTION_ERROR("xlog_recover_inode_pass2(7)",
2828 				     XFS_ERRLEVEL_LOW, mp, dicp);
2829 		xfs_alert(mp,
2830 			"%s: Bad inode log record length %d, rec ptr 0x%p",
2831 			__func__, item->ri_buf[1].i_len, item);
2832 		error = -EFSCORRUPTED;
2833 		goto out_release;
2834 	}
2835 
2836 	/* The core is in in-core format */
2837 	xfs_dinode_to_disk(dip, dicp);
2838 
2839 	/* the rest is in on-disk format */
2840 	if (item->ri_buf[1].i_len > isize) {
2841 		memcpy((char *)dip + isize,
2842 			item->ri_buf[1].i_addr + isize,
2843 			item->ri_buf[1].i_len - isize);
2844 	}
2845 
2846 	fields = in_f->ilf_fields;
2847 	switch (fields & (XFS_ILOG_DEV | XFS_ILOG_UUID)) {
2848 	case XFS_ILOG_DEV:
2849 		xfs_dinode_put_rdev(dip, in_f->ilf_u.ilfu_rdev);
2850 		break;
2851 	case XFS_ILOG_UUID:
2852 		memcpy(XFS_DFORK_DPTR(dip),
2853 		       &in_f->ilf_u.ilfu_uuid,
2854 		       sizeof(uuid_t));
2855 		break;
2856 	}
2857 
2858 	if (in_f->ilf_size == 2)
2859 		goto out_owner_change;
2860 	len = item->ri_buf[2].i_len;
2861 	src = item->ri_buf[2].i_addr;
2862 	ASSERT(in_f->ilf_size <= 4);
2863 	ASSERT((in_f->ilf_size == 3) || (fields & XFS_ILOG_AFORK));
2864 	ASSERT(!(fields & XFS_ILOG_DFORK) ||
2865 	       (len == in_f->ilf_dsize));
2866 
2867 	switch (fields & XFS_ILOG_DFORK) {
2868 	case XFS_ILOG_DDATA:
2869 	case XFS_ILOG_DEXT:
2870 		memcpy(XFS_DFORK_DPTR(dip), src, len);
2871 		break;
2872 
2873 	case XFS_ILOG_DBROOT:
2874 		xfs_bmbt_to_bmdr(mp, (struct xfs_btree_block *)src, len,
2875 				 (xfs_bmdr_block_t *)XFS_DFORK_DPTR(dip),
2876 				 XFS_DFORK_DSIZE(dip, mp));
2877 		break;
2878 
2879 	default:
2880 		/*
2881 		 * There are no data fork flags set.
2882 		 */
2883 		ASSERT((fields & XFS_ILOG_DFORK) == 0);
2884 		break;
2885 	}
2886 
2887 	/*
2888 	 * If we logged any attribute data, recover it.  There may or
2889 	 * may not have been any other non-core data logged in this
2890 	 * transaction.
2891 	 */
2892 	if (in_f->ilf_fields & XFS_ILOG_AFORK) {
2893 		if (in_f->ilf_fields & XFS_ILOG_DFORK) {
2894 			attr_index = 3;
2895 		} else {
2896 			attr_index = 2;
2897 		}
2898 		len = item->ri_buf[attr_index].i_len;
2899 		src = item->ri_buf[attr_index].i_addr;
2900 		ASSERT(len == in_f->ilf_asize);
2901 
2902 		switch (in_f->ilf_fields & XFS_ILOG_AFORK) {
2903 		case XFS_ILOG_ADATA:
2904 		case XFS_ILOG_AEXT:
2905 			dest = XFS_DFORK_APTR(dip);
2906 			ASSERT(len <= XFS_DFORK_ASIZE(dip, mp));
2907 			memcpy(dest, src, len);
2908 			break;
2909 
2910 		case XFS_ILOG_ABROOT:
2911 			dest = XFS_DFORK_APTR(dip);
2912 			xfs_bmbt_to_bmdr(mp, (struct xfs_btree_block *)src,
2913 					 len, (xfs_bmdr_block_t*)dest,
2914 					 XFS_DFORK_ASIZE(dip, mp));
2915 			break;
2916 
2917 		default:
2918 			xfs_warn(log->l_mp, "%s: Invalid flag", __func__);
2919 			ASSERT(0);
2920 			error = -EIO;
2921 			goto out_release;
2922 		}
2923 	}
2924 
2925 out_owner_change:
2926 	if (in_f->ilf_fields & (XFS_ILOG_DOWNER|XFS_ILOG_AOWNER))
2927 		error = xfs_recover_inode_owner_change(mp, dip, in_f,
2928 						       buffer_list);
2929 	/* re-generate the checksum. */
2930 	xfs_dinode_calc_crc(log->l_mp, dip);
2931 
2932 	ASSERT(bp->b_target->bt_mount == mp);
2933 	bp->b_iodone = xlog_recover_iodone;
2934 	xfs_buf_delwri_queue(bp, buffer_list);
2935 
2936 out_release:
2937 	xfs_buf_relse(bp);
2938 error:
2939 	if (need_free)
2940 		kmem_free(in_f);
2941 	return error;
2942 }
2943 
2944 /*
2945  * Recover QUOTAOFF records. We simply make a note of it in the xlog
2946  * structure, so that we know not to do any dquot item or dquot buffer recovery,
2947  * of that type.
2948  */
2949 STATIC int
2950 xlog_recover_quotaoff_pass1(
2951 	struct xlog			*log,
2952 	struct xlog_recover_item	*item)
2953 {
2954 	xfs_qoff_logformat_t	*qoff_f = item->ri_buf[0].i_addr;
2955 	ASSERT(qoff_f);
2956 
2957 	/*
2958 	 * The logitem format's flag tells us if this was user quotaoff,
2959 	 * group/project quotaoff or both.
2960 	 */
2961 	if (qoff_f->qf_flags & XFS_UQUOTA_ACCT)
2962 		log->l_quotaoffs_flag |= XFS_DQ_USER;
2963 	if (qoff_f->qf_flags & XFS_PQUOTA_ACCT)
2964 		log->l_quotaoffs_flag |= XFS_DQ_PROJ;
2965 	if (qoff_f->qf_flags & XFS_GQUOTA_ACCT)
2966 		log->l_quotaoffs_flag |= XFS_DQ_GROUP;
2967 
2968 	return 0;
2969 }
2970 
2971 /*
2972  * Recover a dquot record
2973  */
2974 STATIC int
2975 xlog_recover_dquot_pass2(
2976 	struct xlog			*log,
2977 	struct list_head		*buffer_list,
2978 	struct xlog_recover_item	*item,
2979 	xfs_lsn_t			current_lsn)
2980 {
2981 	xfs_mount_t		*mp = log->l_mp;
2982 	xfs_buf_t		*bp;
2983 	struct xfs_disk_dquot	*ddq, *recddq;
2984 	int			error;
2985 	xfs_dq_logformat_t	*dq_f;
2986 	uint			type;
2987 
2988 
2989 	/*
2990 	 * Filesystems are required to send in quota flags at mount time.
2991 	 */
2992 	if (mp->m_qflags == 0)
2993 		return 0;
2994 
2995 	recddq = item->ri_buf[1].i_addr;
2996 	if (recddq == NULL) {
2997 		xfs_alert(log->l_mp, "NULL dquot in %s.", __func__);
2998 		return -EIO;
2999 	}
3000 	if (item->ri_buf[1].i_len < sizeof(xfs_disk_dquot_t)) {
3001 		xfs_alert(log->l_mp, "dquot too small (%d) in %s.",
3002 			item->ri_buf[1].i_len, __func__);
3003 		return -EIO;
3004 	}
3005 
3006 	/*
3007 	 * This type of quotas was turned off, so ignore this record.
3008 	 */
3009 	type = recddq->d_flags & (XFS_DQ_USER | XFS_DQ_PROJ | XFS_DQ_GROUP);
3010 	ASSERT(type);
3011 	if (log->l_quotaoffs_flag & type)
3012 		return 0;
3013 
3014 	/*
3015 	 * At this point we know that quota was _not_ turned off.
3016 	 * Since the mount flags are not indicating to us otherwise, this
3017 	 * must mean that quota is on, and the dquot needs to be replayed.
3018 	 * Remember that we may not have fully recovered the superblock yet,
3019 	 * so we can't do the usual trick of looking at the SB quota bits.
3020 	 *
3021 	 * The other possibility, of course, is that the quota subsystem was
3022 	 * removed since the last mount - ENOSYS.
3023 	 */
3024 	dq_f = item->ri_buf[0].i_addr;
3025 	ASSERT(dq_f);
3026 	error = xfs_dqcheck(mp, recddq, dq_f->qlf_id, 0, XFS_QMOPT_DOWARN,
3027 			   "xlog_recover_dquot_pass2 (log copy)");
3028 	if (error)
3029 		return -EIO;
3030 	ASSERT(dq_f->qlf_len == 1);
3031 
3032 	/*
3033 	 * At this point we are assuming that the dquots have been allocated
3034 	 * and hence the buffer has valid dquots stamped in it. It should,
3035 	 * therefore, pass verifier validation. If the dquot is bad, then the
3036 	 * we'll return an error here, so we don't need to specifically check
3037 	 * the dquot in the buffer after the verifier has run.
3038 	 */
3039 	error = xfs_trans_read_buf(mp, NULL, mp->m_ddev_targp, dq_f->qlf_blkno,
3040 				   XFS_FSB_TO_BB(mp, dq_f->qlf_len), 0, &bp,
3041 				   &xfs_dquot_buf_ops);
3042 	if (error)
3043 		return error;
3044 
3045 	ASSERT(bp);
3046 	ddq = (xfs_disk_dquot_t *)xfs_buf_offset(bp, dq_f->qlf_boffset);
3047 
3048 	/*
3049 	 * If the dquot has an LSN in it, recover the dquot only if it's less
3050 	 * than the lsn of the transaction we are replaying.
3051 	 */
3052 	if (xfs_sb_version_hascrc(&mp->m_sb)) {
3053 		struct xfs_dqblk *dqb = (struct xfs_dqblk *)ddq;
3054 		xfs_lsn_t	lsn = be64_to_cpu(dqb->dd_lsn);
3055 
3056 		if (lsn && lsn != -1 && XFS_LSN_CMP(lsn, current_lsn) >= 0) {
3057 			goto out_release;
3058 		}
3059 	}
3060 
3061 	memcpy(ddq, recddq, item->ri_buf[1].i_len);
3062 	if (xfs_sb_version_hascrc(&mp->m_sb)) {
3063 		xfs_update_cksum((char *)ddq, sizeof(struct xfs_dqblk),
3064 				 XFS_DQUOT_CRC_OFF);
3065 	}
3066 
3067 	ASSERT(dq_f->qlf_size == 2);
3068 	ASSERT(bp->b_target->bt_mount == mp);
3069 	bp->b_iodone = xlog_recover_iodone;
3070 	xfs_buf_delwri_queue(bp, buffer_list);
3071 
3072 out_release:
3073 	xfs_buf_relse(bp);
3074 	return 0;
3075 }
3076 
3077 /*
3078  * This routine is called to create an in-core extent free intent
3079  * item from the efi format structure which was logged on disk.
3080  * It allocates an in-core efi, copies the extents from the format
3081  * structure into it, and adds the efi to the AIL with the given
3082  * LSN.
3083  */
3084 STATIC int
3085 xlog_recover_efi_pass2(
3086 	struct xlog			*log,
3087 	struct xlog_recover_item	*item,
3088 	xfs_lsn_t			lsn)
3089 {
3090 	int			error;
3091 	xfs_mount_t		*mp = log->l_mp;
3092 	xfs_efi_log_item_t	*efip;
3093 	xfs_efi_log_format_t	*efi_formatp;
3094 
3095 	efi_formatp = item->ri_buf[0].i_addr;
3096 
3097 	efip = xfs_efi_init(mp, efi_formatp->efi_nextents);
3098 	if ((error = xfs_efi_copy_format(&(item->ri_buf[0]),
3099 					 &(efip->efi_format)))) {
3100 		xfs_efi_item_free(efip);
3101 		return error;
3102 	}
3103 	atomic_set(&efip->efi_next_extent, efi_formatp->efi_nextents);
3104 
3105 	spin_lock(&log->l_ailp->xa_lock);
3106 	/*
3107 	 * xfs_trans_ail_update() drops the AIL lock.
3108 	 */
3109 	xfs_trans_ail_update(log->l_ailp, &efip->efi_item, lsn);
3110 	return 0;
3111 }
3112 
3113 
3114 /*
3115  * This routine is called when an efd format structure is found in
3116  * a committed transaction in the log.  It's purpose is to cancel
3117  * the corresponding efi if it was still in the log.  To do this
3118  * it searches the AIL for the efi with an id equal to that in the
3119  * efd format structure.  If we find it, we remove the efi from the
3120  * AIL and free it.
3121  */
3122 STATIC int
3123 xlog_recover_efd_pass2(
3124 	struct xlog			*log,
3125 	struct xlog_recover_item	*item)
3126 {
3127 	xfs_efd_log_format_t	*efd_formatp;
3128 	xfs_efi_log_item_t	*efip = NULL;
3129 	xfs_log_item_t		*lip;
3130 	__uint64_t		efi_id;
3131 	struct xfs_ail_cursor	cur;
3132 	struct xfs_ail		*ailp = log->l_ailp;
3133 
3134 	efd_formatp = item->ri_buf[0].i_addr;
3135 	ASSERT((item->ri_buf[0].i_len == (sizeof(xfs_efd_log_format_32_t) +
3136 		((efd_formatp->efd_nextents - 1) * sizeof(xfs_extent_32_t)))) ||
3137 	       (item->ri_buf[0].i_len == (sizeof(xfs_efd_log_format_64_t) +
3138 		((efd_formatp->efd_nextents - 1) * sizeof(xfs_extent_64_t)))));
3139 	efi_id = efd_formatp->efd_efi_id;
3140 
3141 	/*
3142 	 * Search for the efi with the id in the efd format structure
3143 	 * in the AIL.
3144 	 */
3145 	spin_lock(&ailp->xa_lock);
3146 	lip = xfs_trans_ail_cursor_first(ailp, &cur, 0);
3147 	while (lip != NULL) {
3148 		if (lip->li_type == XFS_LI_EFI) {
3149 			efip = (xfs_efi_log_item_t *)lip;
3150 			if (efip->efi_format.efi_id == efi_id) {
3151 				/*
3152 				 * xfs_trans_ail_delete() drops the
3153 				 * AIL lock.
3154 				 */
3155 				xfs_trans_ail_delete(ailp, lip,
3156 						     SHUTDOWN_CORRUPT_INCORE);
3157 				xfs_efi_item_free(efip);
3158 				spin_lock(&ailp->xa_lock);
3159 				break;
3160 			}
3161 		}
3162 		lip = xfs_trans_ail_cursor_next(ailp, &cur);
3163 	}
3164 	xfs_trans_ail_cursor_done(&cur);
3165 	spin_unlock(&ailp->xa_lock);
3166 
3167 	return 0;
3168 }
3169 
3170 /*
3171  * This routine is called when an inode create format structure is found in a
3172  * committed transaction in the log.  It's purpose is to initialise the inodes
3173  * being allocated on disk. This requires us to get inode cluster buffers that
3174  * match the range to be intialised, stamped with inode templates and written
3175  * by delayed write so that subsequent modifications will hit the cached buffer
3176  * and only need writing out at the end of recovery.
3177  */
3178 STATIC int
3179 xlog_recover_do_icreate_pass2(
3180 	struct xlog		*log,
3181 	struct list_head	*buffer_list,
3182 	xlog_recover_item_t	*item)
3183 {
3184 	struct xfs_mount	*mp = log->l_mp;
3185 	struct xfs_icreate_log	*icl;
3186 	xfs_agnumber_t		agno;
3187 	xfs_agblock_t		agbno;
3188 	unsigned int		count;
3189 	unsigned int		isize;
3190 	xfs_agblock_t		length;
3191 
3192 	icl = (struct xfs_icreate_log *)item->ri_buf[0].i_addr;
3193 	if (icl->icl_type != XFS_LI_ICREATE) {
3194 		xfs_warn(log->l_mp, "xlog_recover_do_icreate_trans: bad type");
3195 		return -EINVAL;
3196 	}
3197 
3198 	if (icl->icl_size != 1) {
3199 		xfs_warn(log->l_mp, "xlog_recover_do_icreate_trans: bad icl size");
3200 		return -EINVAL;
3201 	}
3202 
3203 	agno = be32_to_cpu(icl->icl_ag);
3204 	if (agno >= mp->m_sb.sb_agcount) {
3205 		xfs_warn(log->l_mp, "xlog_recover_do_icreate_trans: bad agno");
3206 		return -EINVAL;
3207 	}
3208 	agbno = be32_to_cpu(icl->icl_agbno);
3209 	if (!agbno || agbno == NULLAGBLOCK || agbno >= mp->m_sb.sb_agblocks) {
3210 		xfs_warn(log->l_mp, "xlog_recover_do_icreate_trans: bad agbno");
3211 		return -EINVAL;
3212 	}
3213 	isize = be32_to_cpu(icl->icl_isize);
3214 	if (isize != mp->m_sb.sb_inodesize) {
3215 		xfs_warn(log->l_mp, "xlog_recover_do_icreate_trans: bad isize");
3216 		return -EINVAL;
3217 	}
3218 	count = be32_to_cpu(icl->icl_count);
3219 	if (!count) {
3220 		xfs_warn(log->l_mp, "xlog_recover_do_icreate_trans: bad count");
3221 		return -EINVAL;
3222 	}
3223 	length = be32_to_cpu(icl->icl_length);
3224 	if (!length || length >= mp->m_sb.sb_agblocks) {
3225 		xfs_warn(log->l_mp, "xlog_recover_do_icreate_trans: bad length");
3226 		return -EINVAL;
3227 	}
3228 
3229 	/* existing allocation is fixed value */
3230 	ASSERT(count == mp->m_ialloc_inos);
3231 	ASSERT(length == mp->m_ialloc_blks);
3232 	if (count != mp->m_ialloc_inos ||
3233 	     length != mp->m_ialloc_blks) {
3234 		xfs_warn(log->l_mp, "xlog_recover_do_icreate_trans: bad count 2");
3235 		return -EINVAL;
3236 	}
3237 
3238 	/*
3239 	 * Inode buffers can be freed. Do not replay the inode initialisation as
3240 	 * we could be overwriting something written after this inode buffer was
3241 	 * cancelled.
3242 	 *
3243 	 * XXX: we need to iterate all buffers and only init those that are not
3244 	 * cancelled. I think that a more fine grained factoring of
3245 	 * xfs_ialloc_inode_init may be appropriate here to enable this to be
3246 	 * done easily.
3247 	 */
3248 	if (xlog_check_buffer_cancelled(log,
3249 			XFS_AGB_TO_DADDR(mp, agno, agbno), length, 0))
3250 		return 0;
3251 
3252 	xfs_ialloc_inode_init(mp, NULL, buffer_list, agno, agbno, length,
3253 					be32_to_cpu(icl->icl_gen));
3254 	return 0;
3255 }
3256 
3257 /*
3258  * Free up any resources allocated by the transaction
3259  *
3260  * Remember that EFIs, EFDs, and IUNLINKs are handled later.
3261  */
3262 STATIC void
3263 xlog_recover_free_trans(
3264 	struct xlog_recover	*trans)
3265 {
3266 	xlog_recover_item_t	*item, *n;
3267 	int			i;
3268 
3269 	list_for_each_entry_safe(item, n, &trans->r_itemq, ri_list) {
3270 		/* Free the regions in the item. */
3271 		list_del(&item->ri_list);
3272 		for (i = 0; i < item->ri_cnt; i++)
3273 			kmem_free(item->ri_buf[i].i_addr);
3274 		/* Free the item itself */
3275 		kmem_free(item->ri_buf);
3276 		kmem_free(item);
3277 	}
3278 	/* Free the transaction recover structure */
3279 	kmem_free(trans);
3280 }
3281 
3282 STATIC void
3283 xlog_recover_buffer_ra_pass2(
3284 	struct xlog                     *log,
3285 	struct xlog_recover_item        *item)
3286 {
3287 	struct xfs_buf_log_format	*buf_f = item->ri_buf[0].i_addr;
3288 	struct xfs_mount		*mp = log->l_mp;
3289 
3290 	if (xlog_peek_buffer_cancelled(log, buf_f->blf_blkno,
3291 			buf_f->blf_len, buf_f->blf_flags)) {
3292 		return;
3293 	}
3294 
3295 	xfs_buf_readahead(mp->m_ddev_targp, buf_f->blf_blkno,
3296 				buf_f->blf_len, NULL);
3297 }
3298 
3299 STATIC void
3300 xlog_recover_inode_ra_pass2(
3301 	struct xlog                     *log,
3302 	struct xlog_recover_item        *item)
3303 {
3304 	struct xfs_inode_log_format	ilf_buf;
3305 	struct xfs_inode_log_format	*ilfp;
3306 	struct xfs_mount		*mp = log->l_mp;
3307 	int			error;
3308 
3309 	if (item->ri_buf[0].i_len == sizeof(struct xfs_inode_log_format)) {
3310 		ilfp = item->ri_buf[0].i_addr;
3311 	} else {
3312 		ilfp = &ilf_buf;
3313 		memset(ilfp, 0, sizeof(*ilfp));
3314 		error = xfs_inode_item_format_convert(&item->ri_buf[0], ilfp);
3315 		if (error)
3316 			return;
3317 	}
3318 
3319 	if (xlog_peek_buffer_cancelled(log, ilfp->ilf_blkno, ilfp->ilf_len, 0))
3320 		return;
3321 
3322 	xfs_buf_readahead(mp->m_ddev_targp, ilfp->ilf_blkno,
3323 				ilfp->ilf_len, &xfs_inode_buf_ra_ops);
3324 }
3325 
3326 STATIC void
3327 xlog_recover_dquot_ra_pass2(
3328 	struct xlog			*log,
3329 	struct xlog_recover_item	*item)
3330 {
3331 	struct xfs_mount	*mp = log->l_mp;
3332 	struct xfs_disk_dquot	*recddq;
3333 	struct xfs_dq_logformat	*dq_f;
3334 	uint			type;
3335 
3336 
3337 	if (mp->m_qflags == 0)
3338 		return;
3339 
3340 	recddq = item->ri_buf[1].i_addr;
3341 	if (recddq == NULL)
3342 		return;
3343 	if (item->ri_buf[1].i_len < sizeof(struct xfs_disk_dquot))
3344 		return;
3345 
3346 	type = recddq->d_flags & (XFS_DQ_USER | XFS_DQ_PROJ | XFS_DQ_GROUP);
3347 	ASSERT(type);
3348 	if (log->l_quotaoffs_flag & type)
3349 		return;
3350 
3351 	dq_f = item->ri_buf[0].i_addr;
3352 	ASSERT(dq_f);
3353 	ASSERT(dq_f->qlf_len == 1);
3354 
3355 	xfs_buf_readahead(mp->m_ddev_targp, dq_f->qlf_blkno,
3356 			  XFS_FSB_TO_BB(mp, dq_f->qlf_len), NULL);
3357 }
3358 
3359 STATIC void
3360 xlog_recover_ra_pass2(
3361 	struct xlog			*log,
3362 	struct xlog_recover_item	*item)
3363 {
3364 	switch (ITEM_TYPE(item)) {
3365 	case XFS_LI_BUF:
3366 		xlog_recover_buffer_ra_pass2(log, item);
3367 		break;
3368 	case XFS_LI_INODE:
3369 		xlog_recover_inode_ra_pass2(log, item);
3370 		break;
3371 	case XFS_LI_DQUOT:
3372 		xlog_recover_dquot_ra_pass2(log, item);
3373 		break;
3374 	case XFS_LI_EFI:
3375 	case XFS_LI_EFD:
3376 	case XFS_LI_QUOTAOFF:
3377 	default:
3378 		break;
3379 	}
3380 }
3381 
3382 STATIC int
3383 xlog_recover_commit_pass1(
3384 	struct xlog			*log,
3385 	struct xlog_recover		*trans,
3386 	struct xlog_recover_item	*item)
3387 {
3388 	trace_xfs_log_recover_item_recover(log, trans, item, XLOG_RECOVER_PASS1);
3389 
3390 	switch (ITEM_TYPE(item)) {
3391 	case XFS_LI_BUF:
3392 		return xlog_recover_buffer_pass1(log, item);
3393 	case XFS_LI_QUOTAOFF:
3394 		return xlog_recover_quotaoff_pass1(log, item);
3395 	case XFS_LI_INODE:
3396 	case XFS_LI_EFI:
3397 	case XFS_LI_EFD:
3398 	case XFS_LI_DQUOT:
3399 	case XFS_LI_ICREATE:
3400 		/* nothing to do in pass 1 */
3401 		return 0;
3402 	default:
3403 		xfs_warn(log->l_mp, "%s: invalid item type (%d)",
3404 			__func__, ITEM_TYPE(item));
3405 		ASSERT(0);
3406 		return -EIO;
3407 	}
3408 }
3409 
3410 STATIC int
3411 xlog_recover_commit_pass2(
3412 	struct xlog			*log,
3413 	struct xlog_recover		*trans,
3414 	struct list_head		*buffer_list,
3415 	struct xlog_recover_item	*item)
3416 {
3417 	trace_xfs_log_recover_item_recover(log, trans, item, XLOG_RECOVER_PASS2);
3418 
3419 	switch (ITEM_TYPE(item)) {
3420 	case XFS_LI_BUF:
3421 		return xlog_recover_buffer_pass2(log, buffer_list, item,
3422 						 trans->r_lsn);
3423 	case XFS_LI_INODE:
3424 		return xlog_recover_inode_pass2(log, buffer_list, item,
3425 						 trans->r_lsn);
3426 	case XFS_LI_EFI:
3427 		return xlog_recover_efi_pass2(log, item, trans->r_lsn);
3428 	case XFS_LI_EFD:
3429 		return xlog_recover_efd_pass2(log, item);
3430 	case XFS_LI_DQUOT:
3431 		return xlog_recover_dquot_pass2(log, buffer_list, item,
3432 						trans->r_lsn);
3433 	case XFS_LI_ICREATE:
3434 		return xlog_recover_do_icreate_pass2(log, buffer_list, item);
3435 	case XFS_LI_QUOTAOFF:
3436 		/* nothing to do in pass2 */
3437 		return 0;
3438 	default:
3439 		xfs_warn(log->l_mp, "%s: invalid item type (%d)",
3440 			__func__, ITEM_TYPE(item));
3441 		ASSERT(0);
3442 		return -EIO;
3443 	}
3444 }
3445 
3446 STATIC int
3447 xlog_recover_items_pass2(
3448 	struct xlog                     *log,
3449 	struct xlog_recover             *trans,
3450 	struct list_head                *buffer_list,
3451 	struct list_head                *item_list)
3452 {
3453 	struct xlog_recover_item	*item;
3454 	int				error = 0;
3455 
3456 	list_for_each_entry(item, item_list, ri_list) {
3457 		error = xlog_recover_commit_pass2(log, trans,
3458 					  buffer_list, item);
3459 		if (error)
3460 			return error;
3461 	}
3462 
3463 	return error;
3464 }
3465 
3466 /*
3467  * Perform the transaction.
3468  *
3469  * If the transaction modifies a buffer or inode, do it now.  Otherwise,
3470  * EFIs and EFDs get queued up by adding entries into the AIL for them.
3471  */
3472 STATIC int
3473 xlog_recover_commit_trans(
3474 	struct xlog		*log,
3475 	struct xlog_recover	*trans,
3476 	int			pass)
3477 {
3478 	int				error = 0;
3479 	int				error2;
3480 	int				items_queued = 0;
3481 	struct xlog_recover_item	*item;
3482 	struct xlog_recover_item	*next;
3483 	LIST_HEAD			(buffer_list);
3484 	LIST_HEAD			(ra_list);
3485 	LIST_HEAD			(done_list);
3486 
3487 	#define XLOG_RECOVER_COMMIT_QUEUE_MAX 100
3488 
3489 	hlist_del(&trans->r_list);
3490 
3491 	error = xlog_recover_reorder_trans(log, trans, pass);
3492 	if (error)
3493 		return error;
3494 
3495 	list_for_each_entry_safe(item, next, &trans->r_itemq, ri_list) {
3496 		switch (pass) {
3497 		case XLOG_RECOVER_PASS1:
3498 			error = xlog_recover_commit_pass1(log, trans, item);
3499 			break;
3500 		case XLOG_RECOVER_PASS2:
3501 			xlog_recover_ra_pass2(log, item);
3502 			list_move_tail(&item->ri_list, &ra_list);
3503 			items_queued++;
3504 			if (items_queued >= XLOG_RECOVER_COMMIT_QUEUE_MAX) {
3505 				error = xlog_recover_items_pass2(log, trans,
3506 						&buffer_list, &ra_list);
3507 				list_splice_tail_init(&ra_list, &done_list);
3508 				items_queued = 0;
3509 			}
3510 
3511 			break;
3512 		default:
3513 			ASSERT(0);
3514 		}
3515 
3516 		if (error)
3517 			goto out;
3518 	}
3519 
3520 out:
3521 	if (!list_empty(&ra_list)) {
3522 		if (!error)
3523 			error = xlog_recover_items_pass2(log, trans,
3524 					&buffer_list, &ra_list);
3525 		list_splice_tail_init(&ra_list, &done_list);
3526 	}
3527 
3528 	if (!list_empty(&done_list))
3529 		list_splice_init(&done_list, &trans->r_itemq);
3530 
3531 	xlog_recover_free_trans(trans);
3532 
3533 	error2 = xfs_buf_delwri_submit(&buffer_list);
3534 	return error ? error : error2;
3535 }
3536 
3537 STATIC int
3538 xlog_recover_unmount_trans(
3539 	struct xlog		*log)
3540 {
3541 	/* Do nothing now */
3542 	xfs_warn(log->l_mp, "%s: Unmount LR", __func__);
3543 	return 0;
3544 }
3545 
3546 /*
3547  * There are two valid states of the r_state field.  0 indicates that the
3548  * transaction structure is in a normal state.  We have either seen the
3549  * start of the transaction or the last operation we added was not a partial
3550  * operation.  If the last operation we added to the transaction was a
3551  * partial operation, we need to mark r_state with XLOG_WAS_CONT_TRANS.
3552  *
3553  * NOTE: skip LRs with 0 data length.
3554  */
3555 STATIC int
3556 xlog_recover_process_data(
3557 	struct xlog		*log,
3558 	struct hlist_head	rhash[],
3559 	struct xlog_rec_header	*rhead,
3560 	xfs_caddr_t		dp,
3561 	int			pass)
3562 {
3563 	xfs_caddr_t		lp;
3564 	int			num_logops;
3565 	xlog_op_header_t	*ohead;
3566 	xlog_recover_t		*trans;
3567 	xlog_tid_t		tid;
3568 	int			error;
3569 	unsigned long		hash;
3570 	uint			flags;
3571 
3572 	lp = dp + be32_to_cpu(rhead->h_len);
3573 	num_logops = be32_to_cpu(rhead->h_num_logops);
3574 
3575 	/* check the log format matches our own - else we can't recover */
3576 	if (xlog_header_check_recover(log->l_mp, rhead))
3577 		return -EIO;
3578 
3579 	while ((dp < lp) && num_logops) {
3580 		ASSERT(dp + sizeof(xlog_op_header_t) <= lp);
3581 		ohead = (xlog_op_header_t *)dp;
3582 		dp += sizeof(xlog_op_header_t);
3583 		if (ohead->oh_clientid != XFS_TRANSACTION &&
3584 		    ohead->oh_clientid != XFS_LOG) {
3585 			xfs_warn(log->l_mp, "%s: bad clientid 0x%x",
3586 					__func__, ohead->oh_clientid);
3587 			ASSERT(0);
3588 			return -EIO;
3589 		}
3590 		tid = be32_to_cpu(ohead->oh_tid);
3591 		hash = XLOG_RHASH(tid);
3592 		trans = xlog_recover_find_tid(&rhash[hash], tid);
3593 		if (trans == NULL) {		   /* not found; add new tid */
3594 			if (ohead->oh_flags & XLOG_START_TRANS)
3595 				xlog_recover_new_tid(&rhash[hash], tid,
3596 					be64_to_cpu(rhead->h_lsn));
3597 		} else {
3598 			if (dp + be32_to_cpu(ohead->oh_len) > lp) {
3599 				xfs_warn(log->l_mp, "%s: bad length 0x%x",
3600 					__func__, be32_to_cpu(ohead->oh_len));
3601 				WARN_ON(1);
3602 				return -EIO;
3603 			}
3604 			flags = ohead->oh_flags & ~XLOG_END_TRANS;
3605 			if (flags & XLOG_WAS_CONT_TRANS)
3606 				flags &= ~XLOG_CONTINUE_TRANS;
3607 			switch (flags) {
3608 			case XLOG_COMMIT_TRANS:
3609 				error = xlog_recover_commit_trans(log,
3610 								trans, pass);
3611 				break;
3612 			case XLOG_UNMOUNT_TRANS:
3613 				error = xlog_recover_unmount_trans(log);
3614 				break;
3615 			case XLOG_WAS_CONT_TRANS:
3616 				error = xlog_recover_add_to_cont_trans(log,
3617 						trans, dp,
3618 						be32_to_cpu(ohead->oh_len));
3619 				break;
3620 			case XLOG_START_TRANS:
3621 				xfs_warn(log->l_mp, "%s: bad transaction",
3622 					__func__);
3623 				ASSERT(0);
3624 				error = -EIO;
3625 				break;
3626 			case 0:
3627 			case XLOG_CONTINUE_TRANS:
3628 				error = xlog_recover_add_to_trans(log, trans,
3629 						dp, be32_to_cpu(ohead->oh_len));
3630 				break;
3631 			default:
3632 				xfs_warn(log->l_mp, "%s: bad flag 0x%x",
3633 					__func__, flags);
3634 				ASSERT(0);
3635 				error = -EIO;
3636 				break;
3637 			}
3638 			if (error) {
3639 				xlog_recover_free_trans(trans);
3640 				return error;
3641 			}
3642 		}
3643 		dp += be32_to_cpu(ohead->oh_len);
3644 		num_logops--;
3645 	}
3646 	return 0;
3647 }
3648 
3649 /*
3650  * Process an extent free intent item that was recovered from
3651  * the log.  We need to free the extents that it describes.
3652  */
3653 STATIC int
3654 xlog_recover_process_efi(
3655 	xfs_mount_t		*mp,
3656 	xfs_efi_log_item_t	*efip)
3657 {
3658 	xfs_efd_log_item_t	*efdp;
3659 	xfs_trans_t		*tp;
3660 	int			i;
3661 	int			error = 0;
3662 	xfs_extent_t		*extp;
3663 	xfs_fsblock_t		startblock_fsb;
3664 
3665 	ASSERT(!test_bit(XFS_EFI_RECOVERED, &efip->efi_flags));
3666 
3667 	/*
3668 	 * First check the validity of the extents described by the
3669 	 * EFI.  If any are bad, then assume that all are bad and
3670 	 * just toss the EFI.
3671 	 */
3672 	for (i = 0; i < efip->efi_format.efi_nextents; i++) {
3673 		extp = &(efip->efi_format.efi_extents[i]);
3674 		startblock_fsb = XFS_BB_TO_FSB(mp,
3675 				   XFS_FSB_TO_DADDR(mp, extp->ext_start));
3676 		if ((startblock_fsb == 0) ||
3677 		    (extp->ext_len == 0) ||
3678 		    (startblock_fsb >= mp->m_sb.sb_dblocks) ||
3679 		    (extp->ext_len >= mp->m_sb.sb_agblocks)) {
3680 			/*
3681 			 * This will pull the EFI from the AIL and
3682 			 * free the memory associated with it.
3683 			 */
3684 			set_bit(XFS_EFI_RECOVERED, &efip->efi_flags);
3685 			xfs_efi_release(efip, efip->efi_format.efi_nextents);
3686 			return -EIO;
3687 		}
3688 	}
3689 
3690 	tp = xfs_trans_alloc(mp, 0);
3691 	error = xfs_trans_reserve(tp, &M_RES(mp)->tr_itruncate, 0, 0);
3692 	if (error)
3693 		goto abort_error;
3694 	efdp = xfs_trans_get_efd(tp, efip, efip->efi_format.efi_nextents);
3695 
3696 	for (i = 0; i < efip->efi_format.efi_nextents; i++) {
3697 		extp = &(efip->efi_format.efi_extents[i]);
3698 		error = xfs_free_extent(tp, extp->ext_start, extp->ext_len);
3699 		if (error)
3700 			goto abort_error;
3701 		xfs_trans_log_efd_extent(tp, efdp, extp->ext_start,
3702 					 extp->ext_len);
3703 	}
3704 
3705 	set_bit(XFS_EFI_RECOVERED, &efip->efi_flags);
3706 	error = xfs_trans_commit(tp, 0);
3707 	return error;
3708 
3709 abort_error:
3710 	xfs_trans_cancel(tp, XFS_TRANS_ABORT);
3711 	return error;
3712 }
3713 
3714 /*
3715  * When this is called, all of the EFIs which did not have
3716  * corresponding EFDs should be in the AIL.  What we do now
3717  * is free the extents associated with each one.
3718  *
3719  * Since we process the EFIs in normal transactions, they
3720  * will be removed at some point after the commit.  This prevents
3721  * us from just walking down the list processing each one.
3722  * We'll use a flag in the EFI to skip those that we've already
3723  * processed and use the AIL iteration mechanism's generation
3724  * count to try to speed this up at least a bit.
3725  *
3726  * When we start, we know that the EFIs are the only things in
3727  * the AIL.  As we process them, however, other items are added
3728  * to the AIL.  Since everything added to the AIL must come after
3729  * everything already in the AIL, we stop processing as soon as
3730  * we see something other than an EFI in the AIL.
3731  */
3732 STATIC int
3733 xlog_recover_process_efis(
3734 	struct xlog	*log)
3735 {
3736 	xfs_log_item_t		*lip;
3737 	xfs_efi_log_item_t	*efip;
3738 	int			error = 0;
3739 	struct xfs_ail_cursor	cur;
3740 	struct xfs_ail		*ailp;
3741 
3742 	ailp = log->l_ailp;
3743 	spin_lock(&ailp->xa_lock);
3744 	lip = xfs_trans_ail_cursor_first(ailp, &cur, 0);
3745 	while (lip != NULL) {
3746 		/*
3747 		 * We're done when we see something other than an EFI.
3748 		 * There should be no EFIs left in the AIL now.
3749 		 */
3750 		if (lip->li_type != XFS_LI_EFI) {
3751 #ifdef DEBUG
3752 			for (; lip; lip = xfs_trans_ail_cursor_next(ailp, &cur))
3753 				ASSERT(lip->li_type != XFS_LI_EFI);
3754 #endif
3755 			break;
3756 		}
3757 
3758 		/*
3759 		 * Skip EFIs that we've already processed.
3760 		 */
3761 		efip = (xfs_efi_log_item_t *)lip;
3762 		if (test_bit(XFS_EFI_RECOVERED, &efip->efi_flags)) {
3763 			lip = xfs_trans_ail_cursor_next(ailp, &cur);
3764 			continue;
3765 		}
3766 
3767 		spin_unlock(&ailp->xa_lock);
3768 		error = xlog_recover_process_efi(log->l_mp, efip);
3769 		spin_lock(&ailp->xa_lock);
3770 		if (error)
3771 			goto out;
3772 		lip = xfs_trans_ail_cursor_next(ailp, &cur);
3773 	}
3774 out:
3775 	xfs_trans_ail_cursor_done(&cur);
3776 	spin_unlock(&ailp->xa_lock);
3777 	return error;
3778 }
3779 
3780 /*
3781  * This routine performs a transaction to null out a bad inode pointer
3782  * in an agi unlinked inode hash bucket.
3783  */
3784 STATIC void
3785 xlog_recover_clear_agi_bucket(
3786 	xfs_mount_t	*mp,
3787 	xfs_agnumber_t	agno,
3788 	int		bucket)
3789 {
3790 	xfs_trans_t	*tp;
3791 	xfs_agi_t	*agi;
3792 	xfs_buf_t	*agibp;
3793 	int		offset;
3794 	int		error;
3795 
3796 	tp = xfs_trans_alloc(mp, XFS_TRANS_CLEAR_AGI_BUCKET);
3797 	error = xfs_trans_reserve(tp, &M_RES(mp)->tr_clearagi, 0, 0);
3798 	if (error)
3799 		goto out_abort;
3800 
3801 	error = xfs_read_agi(mp, tp, agno, &agibp);
3802 	if (error)
3803 		goto out_abort;
3804 
3805 	agi = XFS_BUF_TO_AGI(agibp);
3806 	agi->agi_unlinked[bucket] = cpu_to_be32(NULLAGINO);
3807 	offset = offsetof(xfs_agi_t, agi_unlinked) +
3808 		 (sizeof(xfs_agino_t) * bucket);
3809 	xfs_trans_log_buf(tp, agibp, offset,
3810 			  (offset + sizeof(xfs_agino_t) - 1));
3811 
3812 	error = xfs_trans_commit(tp, 0);
3813 	if (error)
3814 		goto out_error;
3815 	return;
3816 
3817 out_abort:
3818 	xfs_trans_cancel(tp, XFS_TRANS_ABORT);
3819 out_error:
3820 	xfs_warn(mp, "%s: failed to clear agi %d. Continuing.", __func__, agno);
3821 	return;
3822 }
3823 
3824 STATIC xfs_agino_t
3825 xlog_recover_process_one_iunlink(
3826 	struct xfs_mount		*mp,
3827 	xfs_agnumber_t			agno,
3828 	xfs_agino_t			agino,
3829 	int				bucket)
3830 {
3831 	struct xfs_buf			*ibp;
3832 	struct xfs_dinode		*dip;
3833 	struct xfs_inode		*ip;
3834 	xfs_ino_t			ino;
3835 	int				error;
3836 
3837 	ino = XFS_AGINO_TO_INO(mp, agno, agino);
3838 	error = xfs_iget(mp, NULL, ino, 0, 0, &ip);
3839 	if (error)
3840 		goto fail;
3841 
3842 	/*
3843 	 * Get the on disk inode to find the next inode in the bucket.
3844 	 */
3845 	error = xfs_imap_to_bp(mp, NULL, &ip->i_imap, &dip, &ibp, 0, 0);
3846 	if (error)
3847 		goto fail_iput;
3848 
3849 	ASSERT(ip->i_d.di_nlink == 0);
3850 	ASSERT(ip->i_d.di_mode != 0);
3851 
3852 	/* setup for the next pass */
3853 	agino = be32_to_cpu(dip->di_next_unlinked);
3854 	xfs_buf_relse(ibp);
3855 
3856 	/*
3857 	 * Prevent any DMAPI event from being sent when the reference on
3858 	 * the inode is dropped.
3859 	 */
3860 	ip->i_d.di_dmevmask = 0;
3861 
3862 	IRELE(ip);
3863 	return agino;
3864 
3865  fail_iput:
3866 	IRELE(ip);
3867  fail:
3868 	/*
3869 	 * We can't read in the inode this bucket points to, or this inode
3870 	 * is messed up.  Just ditch this bucket of inodes.  We will lose
3871 	 * some inodes and space, but at least we won't hang.
3872 	 *
3873 	 * Call xlog_recover_clear_agi_bucket() to perform a transaction to
3874 	 * clear the inode pointer in the bucket.
3875 	 */
3876 	xlog_recover_clear_agi_bucket(mp, agno, bucket);
3877 	return NULLAGINO;
3878 }
3879 
3880 /*
3881  * xlog_iunlink_recover
3882  *
3883  * This is called during recovery to process any inodes which
3884  * we unlinked but not freed when the system crashed.  These
3885  * inodes will be on the lists in the AGI blocks.  What we do
3886  * here is scan all the AGIs and fully truncate and free any
3887  * inodes found on the lists.  Each inode is removed from the
3888  * lists when it has been fully truncated and is freed.  The
3889  * freeing of the inode and its removal from the list must be
3890  * atomic.
3891  */
3892 STATIC void
3893 xlog_recover_process_iunlinks(
3894 	struct xlog	*log)
3895 {
3896 	xfs_mount_t	*mp;
3897 	xfs_agnumber_t	agno;
3898 	xfs_agi_t	*agi;
3899 	xfs_buf_t	*agibp;
3900 	xfs_agino_t	agino;
3901 	int		bucket;
3902 	int		error;
3903 	uint		mp_dmevmask;
3904 
3905 	mp = log->l_mp;
3906 
3907 	/*
3908 	 * Prevent any DMAPI event from being sent while in this function.
3909 	 */
3910 	mp_dmevmask = mp->m_dmevmask;
3911 	mp->m_dmevmask = 0;
3912 
3913 	for (agno = 0; agno < mp->m_sb.sb_agcount; agno++) {
3914 		/*
3915 		 * Find the agi for this ag.
3916 		 */
3917 		error = xfs_read_agi(mp, NULL, agno, &agibp);
3918 		if (error) {
3919 			/*
3920 			 * AGI is b0rked. Don't process it.
3921 			 *
3922 			 * We should probably mark the filesystem as corrupt
3923 			 * after we've recovered all the ag's we can....
3924 			 */
3925 			continue;
3926 		}
3927 		/*
3928 		 * Unlock the buffer so that it can be acquired in the normal
3929 		 * course of the transaction to truncate and free each inode.
3930 		 * Because we are not racing with anyone else here for the AGI
3931 		 * buffer, we don't even need to hold it locked to read the
3932 		 * initial unlinked bucket entries out of the buffer. We keep
3933 		 * buffer reference though, so that it stays pinned in memory
3934 		 * while we need the buffer.
3935 		 */
3936 		agi = XFS_BUF_TO_AGI(agibp);
3937 		xfs_buf_unlock(agibp);
3938 
3939 		for (bucket = 0; bucket < XFS_AGI_UNLINKED_BUCKETS; bucket++) {
3940 			agino = be32_to_cpu(agi->agi_unlinked[bucket]);
3941 			while (agino != NULLAGINO) {
3942 				agino = xlog_recover_process_one_iunlink(mp,
3943 							agno, agino, bucket);
3944 			}
3945 		}
3946 		xfs_buf_rele(agibp);
3947 	}
3948 
3949 	mp->m_dmevmask = mp_dmevmask;
3950 }
3951 
3952 /*
3953  * Upack the log buffer data and crc check it. If the check fails, issue a
3954  * warning if and only if the CRC in the header is non-zero. This makes the
3955  * check an advisory warning, and the zero CRC check will prevent failure
3956  * warnings from being emitted when upgrading the kernel from one that does not
3957  * add CRCs by default.
3958  *
3959  * When filesystems are CRC enabled, this CRC mismatch becomes a fatal log
3960  * corruption failure
3961  */
3962 STATIC int
3963 xlog_unpack_data_crc(
3964 	struct xlog_rec_header	*rhead,
3965 	xfs_caddr_t		dp,
3966 	struct xlog		*log)
3967 {
3968 	__le32			crc;
3969 
3970 	crc = xlog_cksum(log, rhead, dp, be32_to_cpu(rhead->h_len));
3971 	if (crc != rhead->h_crc) {
3972 		if (rhead->h_crc || xfs_sb_version_hascrc(&log->l_mp->m_sb)) {
3973 			xfs_alert(log->l_mp,
3974 		"log record CRC mismatch: found 0x%x, expected 0x%x.",
3975 					le32_to_cpu(rhead->h_crc),
3976 					le32_to_cpu(crc));
3977 			xfs_hex_dump(dp, 32);
3978 		}
3979 
3980 		/*
3981 		 * If we've detected a log record corruption, then we can't
3982 		 * recover past this point. Abort recovery if we are enforcing
3983 		 * CRC protection by punting an error back up the stack.
3984 		 */
3985 		if (xfs_sb_version_hascrc(&log->l_mp->m_sb))
3986 			return -EFSCORRUPTED;
3987 	}
3988 
3989 	return 0;
3990 }
3991 
3992 STATIC int
3993 xlog_unpack_data(
3994 	struct xlog_rec_header	*rhead,
3995 	xfs_caddr_t		dp,
3996 	struct xlog		*log)
3997 {
3998 	int			i, j, k;
3999 	int			error;
4000 
4001 	error = xlog_unpack_data_crc(rhead, dp, log);
4002 	if (error)
4003 		return error;
4004 
4005 	for (i = 0; i < BTOBB(be32_to_cpu(rhead->h_len)) &&
4006 		  i < (XLOG_HEADER_CYCLE_SIZE / BBSIZE); i++) {
4007 		*(__be32 *)dp = *(__be32 *)&rhead->h_cycle_data[i];
4008 		dp += BBSIZE;
4009 	}
4010 
4011 	if (xfs_sb_version_haslogv2(&log->l_mp->m_sb)) {
4012 		xlog_in_core_2_t *xhdr = (xlog_in_core_2_t *)rhead;
4013 		for ( ; i < BTOBB(be32_to_cpu(rhead->h_len)); i++) {
4014 			j = i / (XLOG_HEADER_CYCLE_SIZE / BBSIZE);
4015 			k = i % (XLOG_HEADER_CYCLE_SIZE / BBSIZE);
4016 			*(__be32 *)dp = xhdr[j].hic_xheader.xh_cycle_data[k];
4017 			dp += BBSIZE;
4018 		}
4019 	}
4020 
4021 	return 0;
4022 }
4023 
4024 STATIC int
4025 xlog_valid_rec_header(
4026 	struct xlog		*log,
4027 	struct xlog_rec_header	*rhead,
4028 	xfs_daddr_t		blkno)
4029 {
4030 	int			hlen;
4031 
4032 	if (unlikely(rhead->h_magicno != cpu_to_be32(XLOG_HEADER_MAGIC_NUM))) {
4033 		XFS_ERROR_REPORT("xlog_valid_rec_header(1)",
4034 				XFS_ERRLEVEL_LOW, log->l_mp);
4035 		return -EFSCORRUPTED;
4036 	}
4037 	if (unlikely(
4038 	    (!rhead->h_version ||
4039 	    (be32_to_cpu(rhead->h_version) & (~XLOG_VERSION_OKBITS))))) {
4040 		xfs_warn(log->l_mp, "%s: unrecognised log version (%d).",
4041 			__func__, be32_to_cpu(rhead->h_version));
4042 		return -EIO;
4043 	}
4044 
4045 	/* LR body must have data or it wouldn't have been written */
4046 	hlen = be32_to_cpu(rhead->h_len);
4047 	if (unlikely( hlen <= 0 || hlen > INT_MAX )) {
4048 		XFS_ERROR_REPORT("xlog_valid_rec_header(2)",
4049 				XFS_ERRLEVEL_LOW, log->l_mp);
4050 		return -EFSCORRUPTED;
4051 	}
4052 	if (unlikely( blkno > log->l_logBBsize || blkno > INT_MAX )) {
4053 		XFS_ERROR_REPORT("xlog_valid_rec_header(3)",
4054 				XFS_ERRLEVEL_LOW, log->l_mp);
4055 		return -EFSCORRUPTED;
4056 	}
4057 	return 0;
4058 }
4059 
4060 /*
4061  * Read the log from tail to head and process the log records found.
4062  * Handle the two cases where the tail and head are in the same cycle
4063  * and where the active portion of the log wraps around the end of
4064  * the physical log separately.  The pass parameter is passed through
4065  * to the routines called to process the data and is not looked at
4066  * here.
4067  */
4068 STATIC int
4069 xlog_do_recovery_pass(
4070 	struct xlog		*log,
4071 	xfs_daddr_t		head_blk,
4072 	xfs_daddr_t		tail_blk,
4073 	int			pass)
4074 {
4075 	xlog_rec_header_t	*rhead;
4076 	xfs_daddr_t		blk_no;
4077 	xfs_caddr_t		offset;
4078 	xfs_buf_t		*hbp, *dbp;
4079 	int			error = 0, h_size;
4080 	int			bblks, split_bblks;
4081 	int			hblks, split_hblks, wrapped_hblks;
4082 	struct hlist_head	rhash[XLOG_RHASH_SIZE];
4083 
4084 	ASSERT(head_blk != tail_blk);
4085 
4086 	/*
4087 	 * Read the header of the tail block and get the iclog buffer size from
4088 	 * h_size.  Use this to tell how many sectors make up the log header.
4089 	 */
4090 	if (xfs_sb_version_haslogv2(&log->l_mp->m_sb)) {
4091 		/*
4092 		 * When using variable length iclogs, read first sector of
4093 		 * iclog header and extract the header size from it.  Get a
4094 		 * new hbp that is the correct size.
4095 		 */
4096 		hbp = xlog_get_bp(log, 1);
4097 		if (!hbp)
4098 			return -ENOMEM;
4099 
4100 		error = xlog_bread(log, tail_blk, 1, hbp, &offset);
4101 		if (error)
4102 			goto bread_err1;
4103 
4104 		rhead = (xlog_rec_header_t *)offset;
4105 		error = xlog_valid_rec_header(log, rhead, tail_blk);
4106 		if (error)
4107 			goto bread_err1;
4108 		h_size = be32_to_cpu(rhead->h_size);
4109 		if ((be32_to_cpu(rhead->h_version) & XLOG_VERSION_2) &&
4110 		    (h_size > XLOG_HEADER_CYCLE_SIZE)) {
4111 			hblks = h_size / XLOG_HEADER_CYCLE_SIZE;
4112 			if (h_size % XLOG_HEADER_CYCLE_SIZE)
4113 				hblks++;
4114 			xlog_put_bp(hbp);
4115 			hbp = xlog_get_bp(log, hblks);
4116 		} else {
4117 			hblks = 1;
4118 		}
4119 	} else {
4120 		ASSERT(log->l_sectBBsize == 1);
4121 		hblks = 1;
4122 		hbp = xlog_get_bp(log, 1);
4123 		h_size = XLOG_BIG_RECORD_BSIZE;
4124 	}
4125 
4126 	if (!hbp)
4127 		return -ENOMEM;
4128 	dbp = xlog_get_bp(log, BTOBB(h_size));
4129 	if (!dbp) {
4130 		xlog_put_bp(hbp);
4131 		return -ENOMEM;
4132 	}
4133 
4134 	memset(rhash, 0, sizeof(rhash));
4135 	if (tail_blk <= head_blk) {
4136 		for (blk_no = tail_blk; blk_no < head_blk; ) {
4137 			error = xlog_bread(log, blk_no, hblks, hbp, &offset);
4138 			if (error)
4139 				goto bread_err2;
4140 
4141 			rhead = (xlog_rec_header_t *)offset;
4142 			error = xlog_valid_rec_header(log, rhead, blk_no);
4143 			if (error)
4144 				goto bread_err2;
4145 
4146 			/* blocks in data section */
4147 			bblks = (int)BTOBB(be32_to_cpu(rhead->h_len));
4148 			error = xlog_bread(log, blk_no + hblks, bblks, dbp,
4149 					   &offset);
4150 			if (error)
4151 				goto bread_err2;
4152 
4153 			error = xlog_unpack_data(rhead, offset, log);
4154 			if (error)
4155 				goto bread_err2;
4156 
4157 			error = xlog_recover_process_data(log,
4158 						rhash, rhead, offset, pass);
4159 			if (error)
4160 				goto bread_err2;
4161 			blk_no += bblks + hblks;
4162 		}
4163 	} else {
4164 		/*
4165 		 * Perform recovery around the end of the physical log.
4166 		 * When the head is not on the same cycle number as the tail,
4167 		 * we can't do a sequential recovery as above.
4168 		 */
4169 		blk_no = tail_blk;
4170 		while (blk_no < log->l_logBBsize) {
4171 			/*
4172 			 * Check for header wrapping around physical end-of-log
4173 			 */
4174 			offset = hbp->b_addr;
4175 			split_hblks = 0;
4176 			wrapped_hblks = 0;
4177 			if (blk_no + hblks <= log->l_logBBsize) {
4178 				/* Read header in one read */
4179 				error = xlog_bread(log, blk_no, hblks, hbp,
4180 						   &offset);
4181 				if (error)
4182 					goto bread_err2;
4183 			} else {
4184 				/* This LR is split across physical log end */
4185 				if (blk_no != log->l_logBBsize) {
4186 					/* some data before physical log end */
4187 					ASSERT(blk_no <= INT_MAX);
4188 					split_hblks = log->l_logBBsize - (int)blk_no;
4189 					ASSERT(split_hblks > 0);
4190 					error = xlog_bread(log, blk_no,
4191 							   split_hblks, hbp,
4192 							   &offset);
4193 					if (error)
4194 						goto bread_err2;
4195 				}
4196 
4197 				/*
4198 				 * Note: this black magic still works with
4199 				 * large sector sizes (non-512) only because:
4200 				 * - we increased the buffer size originally
4201 				 *   by 1 sector giving us enough extra space
4202 				 *   for the second read;
4203 				 * - the log start is guaranteed to be sector
4204 				 *   aligned;
4205 				 * - we read the log end (LR header start)
4206 				 *   _first_, then the log start (LR header end)
4207 				 *   - order is important.
4208 				 */
4209 				wrapped_hblks = hblks - split_hblks;
4210 				error = xlog_bread_offset(log, 0,
4211 						wrapped_hblks, hbp,
4212 						offset + BBTOB(split_hblks));
4213 				if (error)
4214 					goto bread_err2;
4215 			}
4216 			rhead = (xlog_rec_header_t *)offset;
4217 			error = xlog_valid_rec_header(log, rhead,
4218 						split_hblks ? blk_no : 0);
4219 			if (error)
4220 				goto bread_err2;
4221 
4222 			bblks = (int)BTOBB(be32_to_cpu(rhead->h_len));
4223 			blk_no += hblks;
4224 
4225 			/* Read in data for log record */
4226 			if (blk_no + bblks <= log->l_logBBsize) {
4227 				error = xlog_bread(log, blk_no, bblks, dbp,
4228 						   &offset);
4229 				if (error)
4230 					goto bread_err2;
4231 			} else {
4232 				/* This log record is split across the
4233 				 * physical end of log */
4234 				offset = dbp->b_addr;
4235 				split_bblks = 0;
4236 				if (blk_no != log->l_logBBsize) {
4237 					/* some data is before the physical
4238 					 * end of log */
4239 					ASSERT(!wrapped_hblks);
4240 					ASSERT(blk_no <= INT_MAX);
4241 					split_bblks =
4242 						log->l_logBBsize - (int)blk_no;
4243 					ASSERT(split_bblks > 0);
4244 					error = xlog_bread(log, blk_no,
4245 							split_bblks, dbp,
4246 							&offset);
4247 					if (error)
4248 						goto bread_err2;
4249 				}
4250 
4251 				/*
4252 				 * Note: this black magic still works with
4253 				 * large sector sizes (non-512) only because:
4254 				 * - we increased the buffer size originally
4255 				 *   by 1 sector giving us enough extra space
4256 				 *   for the second read;
4257 				 * - the log start is guaranteed to be sector
4258 				 *   aligned;
4259 				 * - we read the log end (LR header start)
4260 				 *   _first_, then the log start (LR header end)
4261 				 *   - order is important.
4262 				 */
4263 				error = xlog_bread_offset(log, 0,
4264 						bblks - split_bblks, dbp,
4265 						offset + BBTOB(split_bblks));
4266 				if (error)
4267 					goto bread_err2;
4268 			}
4269 
4270 			error = xlog_unpack_data(rhead, offset, log);
4271 			if (error)
4272 				goto bread_err2;
4273 
4274 			error = xlog_recover_process_data(log, rhash,
4275 							rhead, offset, pass);
4276 			if (error)
4277 				goto bread_err2;
4278 			blk_no += bblks;
4279 		}
4280 
4281 		ASSERT(blk_no >= log->l_logBBsize);
4282 		blk_no -= log->l_logBBsize;
4283 
4284 		/* read first part of physical log */
4285 		while (blk_no < head_blk) {
4286 			error = xlog_bread(log, blk_no, hblks, hbp, &offset);
4287 			if (error)
4288 				goto bread_err2;
4289 
4290 			rhead = (xlog_rec_header_t *)offset;
4291 			error = xlog_valid_rec_header(log, rhead, blk_no);
4292 			if (error)
4293 				goto bread_err2;
4294 
4295 			bblks = (int)BTOBB(be32_to_cpu(rhead->h_len));
4296 			error = xlog_bread(log, blk_no+hblks, bblks, dbp,
4297 					   &offset);
4298 			if (error)
4299 				goto bread_err2;
4300 
4301 			error = xlog_unpack_data(rhead, offset, log);
4302 			if (error)
4303 				goto bread_err2;
4304 
4305 			error = xlog_recover_process_data(log, rhash,
4306 							rhead, offset, pass);
4307 			if (error)
4308 				goto bread_err2;
4309 			blk_no += bblks + hblks;
4310 		}
4311 	}
4312 
4313  bread_err2:
4314 	xlog_put_bp(dbp);
4315  bread_err1:
4316 	xlog_put_bp(hbp);
4317 	return error;
4318 }
4319 
4320 /*
4321  * Do the recovery of the log.  We actually do this in two phases.
4322  * The two passes are necessary in order to implement the function
4323  * of cancelling a record written into the log.  The first pass
4324  * determines those things which have been cancelled, and the
4325  * second pass replays log items normally except for those which
4326  * have been cancelled.  The handling of the replay and cancellations
4327  * takes place in the log item type specific routines.
4328  *
4329  * The table of items which have cancel records in the log is allocated
4330  * and freed at this level, since only here do we know when all of
4331  * the log recovery has been completed.
4332  */
4333 STATIC int
4334 xlog_do_log_recovery(
4335 	struct xlog	*log,
4336 	xfs_daddr_t	head_blk,
4337 	xfs_daddr_t	tail_blk)
4338 {
4339 	int		error, i;
4340 
4341 	ASSERT(head_blk != tail_blk);
4342 
4343 	/*
4344 	 * First do a pass to find all of the cancelled buf log items.
4345 	 * Store them in the buf_cancel_table for use in the second pass.
4346 	 */
4347 	log->l_buf_cancel_table = kmem_zalloc(XLOG_BC_TABLE_SIZE *
4348 						 sizeof(struct list_head),
4349 						 KM_SLEEP);
4350 	for (i = 0; i < XLOG_BC_TABLE_SIZE; i++)
4351 		INIT_LIST_HEAD(&log->l_buf_cancel_table[i]);
4352 
4353 	error = xlog_do_recovery_pass(log, head_blk, tail_blk,
4354 				      XLOG_RECOVER_PASS1);
4355 	if (error != 0) {
4356 		kmem_free(log->l_buf_cancel_table);
4357 		log->l_buf_cancel_table = NULL;
4358 		return error;
4359 	}
4360 	/*
4361 	 * Then do a second pass to actually recover the items in the log.
4362 	 * When it is complete free the table of buf cancel items.
4363 	 */
4364 	error = xlog_do_recovery_pass(log, head_blk, tail_blk,
4365 				      XLOG_RECOVER_PASS2);
4366 #ifdef DEBUG
4367 	if (!error) {
4368 		int	i;
4369 
4370 		for (i = 0; i < XLOG_BC_TABLE_SIZE; i++)
4371 			ASSERT(list_empty(&log->l_buf_cancel_table[i]));
4372 	}
4373 #endif	/* DEBUG */
4374 
4375 	kmem_free(log->l_buf_cancel_table);
4376 	log->l_buf_cancel_table = NULL;
4377 
4378 	return error;
4379 }
4380 
4381 /*
4382  * Do the actual recovery
4383  */
4384 STATIC int
4385 xlog_do_recover(
4386 	struct xlog	*log,
4387 	xfs_daddr_t	head_blk,
4388 	xfs_daddr_t	tail_blk)
4389 {
4390 	int		error;
4391 	xfs_buf_t	*bp;
4392 	xfs_sb_t	*sbp;
4393 
4394 	/*
4395 	 * First replay the images in the log.
4396 	 */
4397 	error = xlog_do_log_recovery(log, head_blk, tail_blk);
4398 	if (error)
4399 		return error;
4400 
4401 	/*
4402 	 * If IO errors happened during recovery, bail out.
4403 	 */
4404 	if (XFS_FORCED_SHUTDOWN(log->l_mp)) {
4405 		return -EIO;
4406 	}
4407 
4408 	/*
4409 	 * We now update the tail_lsn since much of the recovery has completed
4410 	 * and there may be space available to use.  If there were no extent
4411 	 * or iunlinks, we can free up the entire log and set the tail_lsn to
4412 	 * be the last_sync_lsn.  This was set in xlog_find_tail to be the
4413 	 * lsn of the last known good LR on disk.  If there are extent frees
4414 	 * or iunlinks they will have some entries in the AIL; so we look at
4415 	 * the AIL to determine how to set the tail_lsn.
4416 	 */
4417 	xlog_assign_tail_lsn(log->l_mp);
4418 
4419 	/*
4420 	 * Now that we've finished replaying all buffer and inode
4421 	 * updates, re-read in the superblock and reverify it.
4422 	 */
4423 	bp = xfs_getsb(log->l_mp, 0);
4424 	XFS_BUF_UNDONE(bp);
4425 	ASSERT(!(XFS_BUF_ISWRITE(bp)));
4426 	XFS_BUF_READ(bp);
4427 	XFS_BUF_UNASYNC(bp);
4428 	bp->b_ops = &xfs_sb_buf_ops;
4429 
4430 	if (XFS_FORCED_SHUTDOWN(log->l_mp)) {
4431 		xfs_buf_relse(bp);
4432 		return -EIO;
4433 	}
4434 
4435 	xfs_buf_iorequest(bp);
4436 	error = xfs_buf_iowait(bp);
4437 	if (error) {
4438 		xfs_buf_ioerror_alert(bp, __func__);
4439 		ASSERT(0);
4440 		xfs_buf_relse(bp);
4441 		return error;
4442 	}
4443 
4444 	/* Convert superblock from on-disk format */
4445 	sbp = &log->l_mp->m_sb;
4446 	xfs_sb_from_disk(sbp, XFS_BUF_TO_SBP(bp));
4447 	ASSERT(sbp->sb_magicnum == XFS_SB_MAGIC);
4448 	ASSERT(xfs_sb_good_version(sbp));
4449 	xfs_buf_relse(bp);
4450 
4451 	/* We've re-read the superblock so re-initialize per-cpu counters */
4452 	xfs_icsb_reinit_counters(log->l_mp);
4453 
4454 	xlog_recover_check_summary(log);
4455 
4456 	/* Normal transactions can now occur */
4457 	log->l_flags &= ~XLOG_ACTIVE_RECOVERY;
4458 	return 0;
4459 }
4460 
4461 /*
4462  * Perform recovery and re-initialize some log variables in xlog_find_tail.
4463  *
4464  * Return error or zero.
4465  */
4466 int
4467 xlog_recover(
4468 	struct xlog	*log)
4469 {
4470 	xfs_daddr_t	head_blk, tail_blk;
4471 	int		error;
4472 
4473 	/* find the tail of the log */
4474 	if ((error = xlog_find_tail(log, &head_blk, &tail_blk)))
4475 		return error;
4476 
4477 	if (tail_blk != head_blk) {
4478 		/* There used to be a comment here:
4479 		 *
4480 		 * disallow recovery on read-only mounts.  note -- mount
4481 		 * checks for ENOSPC and turns it into an intelligent
4482 		 * error message.
4483 		 * ...but this is no longer true.  Now, unless you specify
4484 		 * NORECOVERY (in which case this function would never be
4485 		 * called), we just go ahead and recover.  We do this all
4486 		 * under the vfs layer, so we can get away with it unless
4487 		 * the device itself is read-only, in which case we fail.
4488 		 */
4489 		if ((error = xfs_dev_is_read_only(log->l_mp, "recovery"))) {
4490 			return error;
4491 		}
4492 
4493 		/*
4494 		 * Version 5 superblock log feature mask validation. We know the
4495 		 * log is dirty so check if there are any unknown log features
4496 		 * in what we need to recover. If there are unknown features
4497 		 * (e.g. unsupported transactions, then simply reject the
4498 		 * attempt at recovery before touching anything.
4499 		 */
4500 		if (XFS_SB_VERSION_NUM(&log->l_mp->m_sb) == XFS_SB_VERSION_5 &&
4501 		    xfs_sb_has_incompat_log_feature(&log->l_mp->m_sb,
4502 					XFS_SB_FEAT_INCOMPAT_LOG_UNKNOWN)) {
4503 			xfs_warn(log->l_mp,
4504 "Superblock has unknown incompatible log features (0x%x) enabled.\n"
4505 "The log can not be fully and/or safely recovered by this kernel.\n"
4506 "Please recover the log on a kernel that supports the unknown features.",
4507 				(log->l_mp->m_sb.sb_features_log_incompat &
4508 					XFS_SB_FEAT_INCOMPAT_LOG_UNKNOWN));
4509 			return -EINVAL;
4510 		}
4511 
4512 		xfs_notice(log->l_mp, "Starting recovery (logdev: %s)",
4513 				log->l_mp->m_logname ? log->l_mp->m_logname
4514 						     : "internal");
4515 
4516 		error = xlog_do_recover(log, head_blk, tail_blk);
4517 		log->l_flags |= XLOG_RECOVERY_NEEDED;
4518 	}
4519 	return error;
4520 }
4521 
4522 /*
4523  * In the first part of recovery we replay inodes and buffers and build
4524  * up the list of extent free items which need to be processed.  Here
4525  * we process the extent free items and clean up the on disk unlinked
4526  * inode lists.  This is separated from the first part of recovery so
4527  * that the root and real-time bitmap inodes can be read in from disk in
4528  * between the two stages.  This is necessary so that we can free space
4529  * in the real-time portion of the file system.
4530  */
4531 int
4532 xlog_recover_finish(
4533 	struct xlog	*log)
4534 {
4535 	/*
4536 	 * Now we're ready to do the transactions needed for the
4537 	 * rest of recovery.  Start with completing all the extent
4538 	 * free intent records and then process the unlinked inode
4539 	 * lists.  At this point, we essentially run in normal mode
4540 	 * except that we're still performing recovery actions
4541 	 * rather than accepting new requests.
4542 	 */
4543 	if (log->l_flags & XLOG_RECOVERY_NEEDED) {
4544 		int	error;
4545 		error = xlog_recover_process_efis(log);
4546 		if (error) {
4547 			xfs_alert(log->l_mp, "Failed to recover EFIs");
4548 			return error;
4549 		}
4550 		/*
4551 		 * Sync the log to get all the EFIs out of the AIL.
4552 		 * This isn't absolutely necessary, but it helps in
4553 		 * case the unlink transactions would have problems
4554 		 * pushing the EFIs out of the way.
4555 		 */
4556 		xfs_log_force(log->l_mp, XFS_LOG_SYNC);
4557 
4558 		xlog_recover_process_iunlinks(log);
4559 
4560 		xlog_recover_check_summary(log);
4561 
4562 		xfs_notice(log->l_mp, "Ending recovery (logdev: %s)",
4563 				log->l_mp->m_logname ? log->l_mp->m_logname
4564 						     : "internal");
4565 		log->l_flags &= ~XLOG_RECOVERY_NEEDED;
4566 	} else {
4567 		xfs_info(log->l_mp, "Ending clean mount");
4568 	}
4569 	return 0;
4570 }
4571 
4572 
4573 #if defined(DEBUG)
4574 /*
4575  * Read all of the agf and agi counters and check that they
4576  * are consistent with the superblock counters.
4577  */
4578 void
4579 xlog_recover_check_summary(
4580 	struct xlog	*log)
4581 {
4582 	xfs_mount_t	*mp;
4583 	xfs_agf_t	*agfp;
4584 	xfs_buf_t	*agfbp;
4585 	xfs_buf_t	*agibp;
4586 	xfs_agnumber_t	agno;
4587 	__uint64_t	freeblks;
4588 	__uint64_t	itotal;
4589 	__uint64_t	ifree;
4590 	int		error;
4591 
4592 	mp = log->l_mp;
4593 
4594 	freeblks = 0LL;
4595 	itotal = 0LL;
4596 	ifree = 0LL;
4597 	for (agno = 0; agno < mp->m_sb.sb_agcount; agno++) {
4598 		error = xfs_read_agf(mp, NULL, agno, 0, &agfbp);
4599 		if (error) {
4600 			xfs_alert(mp, "%s agf read failed agno %d error %d",
4601 						__func__, agno, error);
4602 		} else {
4603 			agfp = XFS_BUF_TO_AGF(agfbp);
4604 			freeblks += be32_to_cpu(agfp->agf_freeblks) +
4605 				    be32_to_cpu(agfp->agf_flcount);
4606 			xfs_buf_relse(agfbp);
4607 		}
4608 
4609 		error = xfs_read_agi(mp, NULL, agno, &agibp);
4610 		if (error) {
4611 			xfs_alert(mp, "%s agi read failed agno %d error %d",
4612 						__func__, agno, error);
4613 		} else {
4614 			struct xfs_agi	*agi = XFS_BUF_TO_AGI(agibp);
4615 
4616 			itotal += be32_to_cpu(agi->agi_count);
4617 			ifree += be32_to_cpu(agi->agi_freecount);
4618 			xfs_buf_relse(agibp);
4619 		}
4620 	}
4621 }
4622 #endif /* DEBUG */
4623