xref: /openbmc/linux/fs/xfs/xfs_log_recover.c (revision ee89bd6b)
1 /*
2  * Copyright (c) 2000-2006 Silicon Graphics, Inc.
3  * All Rights Reserved.
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU General Public License as
7  * published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it would be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write the Free Software Foundation,
16  * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
17  */
18 #include "xfs.h"
19 #include "xfs_fs.h"
20 #include "xfs_types.h"
21 #include "xfs_bit.h"
22 #include "xfs_log.h"
23 #include "xfs_inum.h"
24 #include "xfs_trans.h"
25 #include "xfs_sb.h"
26 #include "xfs_ag.h"
27 #include "xfs_mount.h"
28 #include "xfs_error.h"
29 #include "xfs_bmap_btree.h"
30 #include "xfs_alloc_btree.h"
31 #include "xfs_ialloc_btree.h"
32 #include "xfs_btree.h"
33 #include "xfs_dinode.h"
34 #include "xfs_inode.h"
35 #include "xfs_inode_item.h"
36 #include "xfs_alloc.h"
37 #include "xfs_ialloc.h"
38 #include "xfs_log_priv.h"
39 #include "xfs_buf_item.h"
40 #include "xfs_log_recover.h"
41 #include "xfs_extfree_item.h"
42 #include "xfs_trans_priv.h"
43 #include "xfs_quota.h"
44 #include "xfs_utils.h"
45 #include "xfs_cksum.h"
46 #include "xfs_trace.h"
47 #include "xfs_icache.h"
48 
49 /* Need all the magic numbers and buffer ops structures from these headers */
50 #include "xfs_symlink.h"
51 #include "xfs_da_btree.h"
52 #include "xfs_dir2_format.h"
53 #include "xfs_dir2_priv.h"
54 #include "xfs_attr_leaf.h"
55 #include "xfs_attr_remote.h"
56 
57 STATIC int
58 xlog_find_zeroed(
59 	struct xlog	*,
60 	xfs_daddr_t	*);
61 STATIC int
62 xlog_clear_stale_blocks(
63 	struct xlog	*,
64 	xfs_lsn_t);
65 #if defined(DEBUG)
66 STATIC void
67 xlog_recover_check_summary(
68 	struct xlog *);
69 #else
70 #define	xlog_recover_check_summary(log)
71 #endif
72 
73 /*
74  * This structure is used during recovery to record the buf log items which
75  * have been canceled and should not be replayed.
76  */
77 struct xfs_buf_cancel {
78 	xfs_daddr_t		bc_blkno;
79 	uint			bc_len;
80 	int			bc_refcount;
81 	struct list_head	bc_list;
82 };
83 
84 /*
85  * Sector aligned buffer routines for buffer create/read/write/access
86  */
87 
88 /*
89  * Verify the given count of basic blocks is valid number of blocks
90  * to specify for an operation involving the given XFS log buffer.
91  * Returns nonzero if the count is valid, 0 otherwise.
92  */
93 
94 static inline int
95 xlog_buf_bbcount_valid(
96 	struct xlog	*log,
97 	int		bbcount)
98 {
99 	return bbcount > 0 && bbcount <= log->l_logBBsize;
100 }
101 
102 /*
103  * Allocate a buffer to hold log data.  The buffer needs to be able
104  * to map to a range of nbblks basic blocks at any valid (basic
105  * block) offset within the log.
106  */
107 STATIC xfs_buf_t *
108 xlog_get_bp(
109 	struct xlog	*log,
110 	int		nbblks)
111 {
112 	struct xfs_buf	*bp;
113 
114 	if (!xlog_buf_bbcount_valid(log, nbblks)) {
115 		xfs_warn(log->l_mp, "Invalid block length (0x%x) for buffer",
116 			nbblks);
117 		XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_HIGH, log->l_mp);
118 		return NULL;
119 	}
120 
121 	/*
122 	 * We do log I/O in units of log sectors (a power-of-2
123 	 * multiple of the basic block size), so we round up the
124 	 * requested size to accommodate the basic blocks required
125 	 * for complete log sectors.
126 	 *
127 	 * In addition, the buffer may be used for a non-sector-
128 	 * aligned block offset, in which case an I/O of the
129 	 * requested size could extend beyond the end of the
130 	 * buffer.  If the requested size is only 1 basic block it
131 	 * will never straddle a sector boundary, so this won't be
132 	 * an issue.  Nor will this be a problem if the log I/O is
133 	 * done in basic blocks (sector size 1).  But otherwise we
134 	 * extend the buffer by one extra log sector to ensure
135 	 * there's space to accommodate this possibility.
136 	 */
137 	if (nbblks > 1 && log->l_sectBBsize > 1)
138 		nbblks += log->l_sectBBsize;
139 	nbblks = round_up(nbblks, log->l_sectBBsize);
140 
141 	bp = xfs_buf_get_uncached(log->l_mp->m_logdev_targp, nbblks, 0);
142 	if (bp)
143 		xfs_buf_unlock(bp);
144 	return bp;
145 }
146 
147 STATIC void
148 xlog_put_bp(
149 	xfs_buf_t	*bp)
150 {
151 	xfs_buf_free(bp);
152 }
153 
154 /*
155  * Return the address of the start of the given block number's data
156  * in a log buffer.  The buffer covers a log sector-aligned region.
157  */
158 STATIC xfs_caddr_t
159 xlog_align(
160 	struct xlog	*log,
161 	xfs_daddr_t	blk_no,
162 	int		nbblks,
163 	struct xfs_buf	*bp)
164 {
165 	xfs_daddr_t	offset = blk_no & ((xfs_daddr_t)log->l_sectBBsize - 1);
166 
167 	ASSERT(offset + nbblks <= bp->b_length);
168 	return bp->b_addr + BBTOB(offset);
169 }
170 
171 
172 /*
173  * nbblks should be uint, but oh well.  Just want to catch that 32-bit length.
174  */
175 STATIC int
176 xlog_bread_noalign(
177 	struct xlog	*log,
178 	xfs_daddr_t	blk_no,
179 	int		nbblks,
180 	struct xfs_buf	*bp)
181 {
182 	int		error;
183 
184 	if (!xlog_buf_bbcount_valid(log, nbblks)) {
185 		xfs_warn(log->l_mp, "Invalid block length (0x%x) for buffer",
186 			nbblks);
187 		XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_HIGH, log->l_mp);
188 		return EFSCORRUPTED;
189 	}
190 
191 	blk_no = round_down(blk_no, log->l_sectBBsize);
192 	nbblks = round_up(nbblks, log->l_sectBBsize);
193 
194 	ASSERT(nbblks > 0);
195 	ASSERT(nbblks <= bp->b_length);
196 
197 	XFS_BUF_SET_ADDR(bp, log->l_logBBstart + blk_no);
198 	XFS_BUF_READ(bp);
199 	bp->b_io_length = nbblks;
200 	bp->b_error = 0;
201 
202 	xfsbdstrat(log->l_mp, bp);
203 	error = xfs_buf_iowait(bp);
204 	if (error)
205 		xfs_buf_ioerror_alert(bp, __func__);
206 	return error;
207 }
208 
209 STATIC int
210 xlog_bread(
211 	struct xlog	*log,
212 	xfs_daddr_t	blk_no,
213 	int		nbblks,
214 	struct xfs_buf	*bp,
215 	xfs_caddr_t	*offset)
216 {
217 	int		error;
218 
219 	error = xlog_bread_noalign(log, blk_no, nbblks, bp);
220 	if (error)
221 		return error;
222 
223 	*offset = xlog_align(log, blk_no, nbblks, bp);
224 	return 0;
225 }
226 
227 /*
228  * Read at an offset into the buffer. Returns with the buffer in it's original
229  * state regardless of the result of the read.
230  */
231 STATIC int
232 xlog_bread_offset(
233 	struct xlog	*log,
234 	xfs_daddr_t	blk_no,		/* block to read from */
235 	int		nbblks,		/* blocks to read */
236 	struct xfs_buf	*bp,
237 	xfs_caddr_t	offset)
238 {
239 	xfs_caddr_t	orig_offset = bp->b_addr;
240 	int		orig_len = BBTOB(bp->b_length);
241 	int		error, error2;
242 
243 	error = xfs_buf_associate_memory(bp, offset, BBTOB(nbblks));
244 	if (error)
245 		return error;
246 
247 	error = xlog_bread_noalign(log, blk_no, nbblks, bp);
248 
249 	/* must reset buffer pointer even on error */
250 	error2 = xfs_buf_associate_memory(bp, orig_offset, orig_len);
251 	if (error)
252 		return error;
253 	return error2;
254 }
255 
256 /*
257  * Write out the buffer at the given block for the given number of blocks.
258  * The buffer is kept locked across the write and is returned locked.
259  * This can only be used for synchronous log writes.
260  */
261 STATIC int
262 xlog_bwrite(
263 	struct xlog	*log,
264 	xfs_daddr_t	blk_no,
265 	int		nbblks,
266 	struct xfs_buf	*bp)
267 {
268 	int		error;
269 
270 	if (!xlog_buf_bbcount_valid(log, nbblks)) {
271 		xfs_warn(log->l_mp, "Invalid block length (0x%x) for buffer",
272 			nbblks);
273 		XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_HIGH, log->l_mp);
274 		return EFSCORRUPTED;
275 	}
276 
277 	blk_no = round_down(blk_no, log->l_sectBBsize);
278 	nbblks = round_up(nbblks, log->l_sectBBsize);
279 
280 	ASSERT(nbblks > 0);
281 	ASSERT(nbblks <= bp->b_length);
282 
283 	XFS_BUF_SET_ADDR(bp, log->l_logBBstart + blk_no);
284 	XFS_BUF_ZEROFLAGS(bp);
285 	xfs_buf_hold(bp);
286 	xfs_buf_lock(bp);
287 	bp->b_io_length = nbblks;
288 	bp->b_error = 0;
289 
290 	error = xfs_bwrite(bp);
291 	if (error)
292 		xfs_buf_ioerror_alert(bp, __func__);
293 	xfs_buf_relse(bp);
294 	return error;
295 }
296 
297 #ifdef DEBUG
298 /*
299  * dump debug superblock and log record information
300  */
301 STATIC void
302 xlog_header_check_dump(
303 	xfs_mount_t		*mp,
304 	xlog_rec_header_t	*head)
305 {
306 	xfs_debug(mp, "%s:  SB : uuid = %pU, fmt = %d\n",
307 		__func__, &mp->m_sb.sb_uuid, XLOG_FMT);
308 	xfs_debug(mp, "    log : uuid = %pU, fmt = %d\n",
309 		&head->h_fs_uuid, be32_to_cpu(head->h_fmt));
310 }
311 #else
312 #define xlog_header_check_dump(mp, head)
313 #endif
314 
315 /*
316  * check log record header for recovery
317  */
318 STATIC int
319 xlog_header_check_recover(
320 	xfs_mount_t		*mp,
321 	xlog_rec_header_t	*head)
322 {
323 	ASSERT(head->h_magicno == cpu_to_be32(XLOG_HEADER_MAGIC_NUM));
324 
325 	/*
326 	 * IRIX doesn't write the h_fmt field and leaves it zeroed
327 	 * (XLOG_FMT_UNKNOWN). This stops us from trying to recover
328 	 * a dirty log created in IRIX.
329 	 */
330 	if (unlikely(head->h_fmt != cpu_to_be32(XLOG_FMT))) {
331 		xfs_warn(mp,
332 	"dirty log written in incompatible format - can't recover");
333 		xlog_header_check_dump(mp, head);
334 		XFS_ERROR_REPORT("xlog_header_check_recover(1)",
335 				 XFS_ERRLEVEL_HIGH, mp);
336 		return XFS_ERROR(EFSCORRUPTED);
337 	} else if (unlikely(!uuid_equal(&mp->m_sb.sb_uuid, &head->h_fs_uuid))) {
338 		xfs_warn(mp,
339 	"dirty log entry has mismatched uuid - can't recover");
340 		xlog_header_check_dump(mp, head);
341 		XFS_ERROR_REPORT("xlog_header_check_recover(2)",
342 				 XFS_ERRLEVEL_HIGH, mp);
343 		return XFS_ERROR(EFSCORRUPTED);
344 	}
345 	return 0;
346 }
347 
348 /*
349  * read the head block of the log and check the header
350  */
351 STATIC int
352 xlog_header_check_mount(
353 	xfs_mount_t		*mp,
354 	xlog_rec_header_t	*head)
355 {
356 	ASSERT(head->h_magicno == cpu_to_be32(XLOG_HEADER_MAGIC_NUM));
357 
358 	if (uuid_is_nil(&head->h_fs_uuid)) {
359 		/*
360 		 * IRIX doesn't write the h_fs_uuid or h_fmt fields. If
361 		 * h_fs_uuid is nil, we assume this log was last mounted
362 		 * by IRIX and continue.
363 		 */
364 		xfs_warn(mp, "nil uuid in log - IRIX style log");
365 	} else if (unlikely(!uuid_equal(&mp->m_sb.sb_uuid, &head->h_fs_uuid))) {
366 		xfs_warn(mp, "log has mismatched uuid - can't recover");
367 		xlog_header_check_dump(mp, head);
368 		XFS_ERROR_REPORT("xlog_header_check_mount",
369 				 XFS_ERRLEVEL_HIGH, mp);
370 		return XFS_ERROR(EFSCORRUPTED);
371 	}
372 	return 0;
373 }
374 
375 STATIC void
376 xlog_recover_iodone(
377 	struct xfs_buf	*bp)
378 {
379 	if (bp->b_error) {
380 		/*
381 		 * We're not going to bother about retrying
382 		 * this during recovery. One strike!
383 		 */
384 		xfs_buf_ioerror_alert(bp, __func__);
385 		xfs_force_shutdown(bp->b_target->bt_mount,
386 					SHUTDOWN_META_IO_ERROR);
387 	}
388 	bp->b_iodone = NULL;
389 	xfs_buf_ioend(bp, 0);
390 }
391 
392 /*
393  * This routine finds (to an approximation) the first block in the physical
394  * log which contains the given cycle.  It uses a binary search algorithm.
395  * Note that the algorithm can not be perfect because the disk will not
396  * necessarily be perfect.
397  */
398 STATIC int
399 xlog_find_cycle_start(
400 	struct xlog	*log,
401 	struct xfs_buf	*bp,
402 	xfs_daddr_t	first_blk,
403 	xfs_daddr_t	*last_blk,
404 	uint		cycle)
405 {
406 	xfs_caddr_t	offset;
407 	xfs_daddr_t	mid_blk;
408 	xfs_daddr_t	end_blk;
409 	uint		mid_cycle;
410 	int		error;
411 
412 	end_blk = *last_blk;
413 	mid_blk = BLK_AVG(first_blk, end_blk);
414 	while (mid_blk != first_blk && mid_blk != end_blk) {
415 		error = xlog_bread(log, mid_blk, 1, bp, &offset);
416 		if (error)
417 			return error;
418 		mid_cycle = xlog_get_cycle(offset);
419 		if (mid_cycle == cycle)
420 			end_blk = mid_blk;   /* last_half_cycle == mid_cycle */
421 		else
422 			first_blk = mid_blk; /* first_half_cycle == mid_cycle */
423 		mid_blk = BLK_AVG(first_blk, end_blk);
424 	}
425 	ASSERT((mid_blk == first_blk && mid_blk+1 == end_blk) ||
426 	       (mid_blk == end_blk && mid_blk-1 == first_blk));
427 
428 	*last_blk = end_blk;
429 
430 	return 0;
431 }
432 
433 /*
434  * Check that a range of blocks does not contain stop_on_cycle_no.
435  * Fill in *new_blk with the block offset where such a block is
436  * found, or with -1 (an invalid block number) if there is no such
437  * block in the range.  The scan needs to occur from front to back
438  * and the pointer into the region must be updated since a later
439  * routine will need to perform another test.
440  */
441 STATIC int
442 xlog_find_verify_cycle(
443 	struct xlog	*log,
444 	xfs_daddr_t	start_blk,
445 	int		nbblks,
446 	uint		stop_on_cycle_no,
447 	xfs_daddr_t	*new_blk)
448 {
449 	xfs_daddr_t	i, j;
450 	uint		cycle;
451 	xfs_buf_t	*bp;
452 	xfs_daddr_t	bufblks;
453 	xfs_caddr_t	buf = NULL;
454 	int		error = 0;
455 
456 	/*
457 	 * Greedily allocate a buffer big enough to handle the full
458 	 * range of basic blocks we'll be examining.  If that fails,
459 	 * try a smaller size.  We need to be able to read at least
460 	 * a log sector, or we're out of luck.
461 	 */
462 	bufblks = 1 << ffs(nbblks);
463 	while (bufblks > log->l_logBBsize)
464 		bufblks >>= 1;
465 	while (!(bp = xlog_get_bp(log, bufblks))) {
466 		bufblks >>= 1;
467 		if (bufblks < log->l_sectBBsize)
468 			return ENOMEM;
469 	}
470 
471 	for (i = start_blk; i < start_blk + nbblks; i += bufblks) {
472 		int	bcount;
473 
474 		bcount = min(bufblks, (start_blk + nbblks - i));
475 
476 		error = xlog_bread(log, i, bcount, bp, &buf);
477 		if (error)
478 			goto out;
479 
480 		for (j = 0; j < bcount; j++) {
481 			cycle = xlog_get_cycle(buf);
482 			if (cycle == stop_on_cycle_no) {
483 				*new_blk = i+j;
484 				goto out;
485 			}
486 
487 			buf += BBSIZE;
488 		}
489 	}
490 
491 	*new_blk = -1;
492 
493 out:
494 	xlog_put_bp(bp);
495 	return error;
496 }
497 
498 /*
499  * Potentially backup over partial log record write.
500  *
501  * In the typical case, last_blk is the number of the block directly after
502  * a good log record.  Therefore, we subtract one to get the block number
503  * of the last block in the given buffer.  extra_bblks contains the number
504  * of blocks we would have read on a previous read.  This happens when the
505  * last log record is split over the end of the physical log.
506  *
507  * extra_bblks is the number of blocks potentially verified on a previous
508  * call to this routine.
509  */
510 STATIC int
511 xlog_find_verify_log_record(
512 	struct xlog		*log,
513 	xfs_daddr_t		start_blk,
514 	xfs_daddr_t		*last_blk,
515 	int			extra_bblks)
516 {
517 	xfs_daddr_t		i;
518 	xfs_buf_t		*bp;
519 	xfs_caddr_t		offset = NULL;
520 	xlog_rec_header_t	*head = NULL;
521 	int			error = 0;
522 	int			smallmem = 0;
523 	int			num_blks = *last_blk - start_blk;
524 	int			xhdrs;
525 
526 	ASSERT(start_blk != 0 || *last_blk != start_blk);
527 
528 	if (!(bp = xlog_get_bp(log, num_blks))) {
529 		if (!(bp = xlog_get_bp(log, 1)))
530 			return ENOMEM;
531 		smallmem = 1;
532 	} else {
533 		error = xlog_bread(log, start_blk, num_blks, bp, &offset);
534 		if (error)
535 			goto out;
536 		offset += ((num_blks - 1) << BBSHIFT);
537 	}
538 
539 	for (i = (*last_blk) - 1; i >= 0; i--) {
540 		if (i < start_blk) {
541 			/* valid log record not found */
542 			xfs_warn(log->l_mp,
543 		"Log inconsistent (didn't find previous header)");
544 			ASSERT(0);
545 			error = XFS_ERROR(EIO);
546 			goto out;
547 		}
548 
549 		if (smallmem) {
550 			error = xlog_bread(log, i, 1, bp, &offset);
551 			if (error)
552 				goto out;
553 		}
554 
555 		head = (xlog_rec_header_t *)offset;
556 
557 		if (head->h_magicno == cpu_to_be32(XLOG_HEADER_MAGIC_NUM))
558 			break;
559 
560 		if (!smallmem)
561 			offset -= BBSIZE;
562 	}
563 
564 	/*
565 	 * We hit the beginning of the physical log & still no header.  Return
566 	 * to caller.  If caller can handle a return of -1, then this routine
567 	 * will be called again for the end of the physical log.
568 	 */
569 	if (i == -1) {
570 		error = -1;
571 		goto out;
572 	}
573 
574 	/*
575 	 * We have the final block of the good log (the first block
576 	 * of the log record _before_ the head. So we check the uuid.
577 	 */
578 	if ((error = xlog_header_check_mount(log->l_mp, head)))
579 		goto out;
580 
581 	/*
582 	 * We may have found a log record header before we expected one.
583 	 * last_blk will be the 1st block # with a given cycle #.  We may end
584 	 * up reading an entire log record.  In this case, we don't want to
585 	 * reset last_blk.  Only when last_blk points in the middle of a log
586 	 * record do we update last_blk.
587 	 */
588 	if (xfs_sb_version_haslogv2(&log->l_mp->m_sb)) {
589 		uint	h_size = be32_to_cpu(head->h_size);
590 
591 		xhdrs = h_size / XLOG_HEADER_CYCLE_SIZE;
592 		if (h_size % XLOG_HEADER_CYCLE_SIZE)
593 			xhdrs++;
594 	} else {
595 		xhdrs = 1;
596 	}
597 
598 	if (*last_blk - i + extra_bblks !=
599 	    BTOBB(be32_to_cpu(head->h_len)) + xhdrs)
600 		*last_blk = i;
601 
602 out:
603 	xlog_put_bp(bp);
604 	return error;
605 }
606 
607 /*
608  * Head is defined to be the point of the log where the next log write
609  * write could go.  This means that incomplete LR writes at the end are
610  * eliminated when calculating the head.  We aren't guaranteed that previous
611  * LR have complete transactions.  We only know that a cycle number of
612  * current cycle number -1 won't be present in the log if we start writing
613  * from our current block number.
614  *
615  * last_blk contains the block number of the first block with a given
616  * cycle number.
617  *
618  * Return: zero if normal, non-zero if error.
619  */
620 STATIC int
621 xlog_find_head(
622 	struct xlog	*log,
623 	xfs_daddr_t	*return_head_blk)
624 {
625 	xfs_buf_t	*bp;
626 	xfs_caddr_t	offset;
627 	xfs_daddr_t	new_blk, first_blk, start_blk, last_blk, head_blk;
628 	int		num_scan_bblks;
629 	uint		first_half_cycle, last_half_cycle;
630 	uint		stop_on_cycle;
631 	int		error, log_bbnum = log->l_logBBsize;
632 
633 	/* Is the end of the log device zeroed? */
634 	if ((error = xlog_find_zeroed(log, &first_blk)) == -1) {
635 		*return_head_blk = first_blk;
636 
637 		/* Is the whole lot zeroed? */
638 		if (!first_blk) {
639 			/* Linux XFS shouldn't generate totally zeroed logs -
640 			 * mkfs etc write a dummy unmount record to a fresh
641 			 * log so we can store the uuid in there
642 			 */
643 			xfs_warn(log->l_mp, "totally zeroed log");
644 		}
645 
646 		return 0;
647 	} else if (error) {
648 		xfs_warn(log->l_mp, "empty log check failed");
649 		return error;
650 	}
651 
652 	first_blk = 0;			/* get cycle # of 1st block */
653 	bp = xlog_get_bp(log, 1);
654 	if (!bp)
655 		return ENOMEM;
656 
657 	error = xlog_bread(log, 0, 1, bp, &offset);
658 	if (error)
659 		goto bp_err;
660 
661 	first_half_cycle = xlog_get_cycle(offset);
662 
663 	last_blk = head_blk = log_bbnum - 1;	/* get cycle # of last block */
664 	error = xlog_bread(log, last_blk, 1, bp, &offset);
665 	if (error)
666 		goto bp_err;
667 
668 	last_half_cycle = xlog_get_cycle(offset);
669 	ASSERT(last_half_cycle != 0);
670 
671 	/*
672 	 * If the 1st half cycle number is equal to the last half cycle number,
673 	 * then the entire log is stamped with the same cycle number.  In this
674 	 * case, head_blk can't be set to zero (which makes sense).  The below
675 	 * math doesn't work out properly with head_blk equal to zero.  Instead,
676 	 * we set it to log_bbnum which is an invalid block number, but this
677 	 * value makes the math correct.  If head_blk doesn't changed through
678 	 * all the tests below, *head_blk is set to zero at the very end rather
679 	 * than log_bbnum.  In a sense, log_bbnum and zero are the same block
680 	 * in a circular file.
681 	 */
682 	if (first_half_cycle == last_half_cycle) {
683 		/*
684 		 * In this case we believe that the entire log should have
685 		 * cycle number last_half_cycle.  We need to scan backwards
686 		 * from the end verifying that there are no holes still
687 		 * containing last_half_cycle - 1.  If we find such a hole,
688 		 * then the start of that hole will be the new head.  The
689 		 * simple case looks like
690 		 *        x | x ... | x - 1 | x
691 		 * Another case that fits this picture would be
692 		 *        x | x + 1 | x ... | x
693 		 * In this case the head really is somewhere at the end of the
694 		 * log, as one of the latest writes at the beginning was
695 		 * incomplete.
696 		 * One more case is
697 		 *        x | x + 1 | x ... | x - 1 | x
698 		 * This is really the combination of the above two cases, and
699 		 * the head has to end up at the start of the x-1 hole at the
700 		 * end of the log.
701 		 *
702 		 * In the 256k log case, we will read from the beginning to the
703 		 * end of the log and search for cycle numbers equal to x-1.
704 		 * We don't worry about the x+1 blocks that we encounter,
705 		 * because we know that they cannot be the head since the log
706 		 * started with x.
707 		 */
708 		head_blk = log_bbnum;
709 		stop_on_cycle = last_half_cycle - 1;
710 	} else {
711 		/*
712 		 * In this case we want to find the first block with cycle
713 		 * number matching last_half_cycle.  We expect the log to be
714 		 * some variation on
715 		 *        x + 1 ... | x ... | x
716 		 * The first block with cycle number x (last_half_cycle) will
717 		 * be where the new head belongs.  First we do a binary search
718 		 * for the first occurrence of last_half_cycle.  The binary
719 		 * search may not be totally accurate, so then we scan back
720 		 * from there looking for occurrences of last_half_cycle before
721 		 * us.  If that backwards scan wraps around the beginning of
722 		 * the log, then we look for occurrences of last_half_cycle - 1
723 		 * at the end of the log.  The cases we're looking for look
724 		 * like
725 		 *                               v binary search stopped here
726 		 *        x + 1 ... | x | x + 1 | x ... | x
727 		 *                   ^ but we want to locate this spot
728 		 * or
729 		 *        <---------> less than scan distance
730 		 *        x + 1 ... | x ... | x - 1 | x
731 		 *                           ^ we want to locate this spot
732 		 */
733 		stop_on_cycle = last_half_cycle;
734 		if ((error = xlog_find_cycle_start(log, bp, first_blk,
735 						&head_blk, last_half_cycle)))
736 			goto bp_err;
737 	}
738 
739 	/*
740 	 * Now validate the answer.  Scan back some number of maximum possible
741 	 * blocks and make sure each one has the expected cycle number.  The
742 	 * maximum is determined by the total possible amount of buffering
743 	 * in the in-core log.  The following number can be made tighter if
744 	 * we actually look at the block size of the filesystem.
745 	 */
746 	num_scan_bblks = XLOG_TOTAL_REC_SHIFT(log);
747 	if (head_blk >= num_scan_bblks) {
748 		/*
749 		 * We are guaranteed that the entire check can be performed
750 		 * in one buffer.
751 		 */
752 		start_blk = head_blk - num_scan_bblks;
753 		if ((error = xlog_find_verify_cycle(log,
754 						start_blk, num_scan_bblks,
755 						stop_on_cycle, &new_blk)))
756 			goto bp_err;
757 		if (new_blk != -1)
758 			head_blk = new_blk;
759 	} else {		/* need to read 2 parts of log */
760 		/*
761 		 * We are going to scan backwards in the log in two parts.
762 		 * First we scan the physical end of the log.  In this part
763 		 * of the log, we are looking for blocks with cycle number
764 		 * last_half_cycle - 1.
765 		 * If we find one, then we know that the log starts there, as
766 		 * we've found a hole that didn't get written in going around
767 		 * the end of the physical log.  The simple case for this is
768 		 *        x + 1 ... | x ... | x - 1 | x
769 		 *        <---------> less than scan distance
770 		 * If all of the blocks at the end of the log have cycle number
771 		 * last_half_cycle, then we check the blocks at the start of
772 		 * the log looking for occurrences of last_half_cycle.  If we
773 		 * find one, then our current estimate for the location of the
774 		 * first occurrence of last_half_cycle is wrong and we move
775 		 * back to the hole we've found.  This case looks like
776 		 *        x + 1 ... | x | x + 1 | x ...
777 		 *                               ^ binary search stopped here
778 		 * Another case we need to handle that only occurs in 256k
779 		 * logs is
780 		 *        x + 1 ... | x ... | x+1 | x ...
781 		 *                   ^ binary search stops here
782 		 * In a 256k log, the scan at the end of the log will see the
783 		 * x + 1 blocks.  We need to skip past those since that is
784 		 * certainly not the head of the log.  By searching for
785 		 * last_half_cycle-1 we accomplish that.
786 		 */
787 		ASSERT(head_blk <= INT_MAX &&
788 			(xfs_daddr_t) num_scan_bblks >= head_blk);
789 		start_blk = log_bbnum - (num_scan_bblks - head_blk);
790 		if ((error = xlog_find_verify_cycle(log, start_blk,
791 					num_scan_bblks - (int)head_blk,
792 					(stop_on_cycle - 1), &new_blk)))
793 			goto bp_err;
794 		if (new_blk != -1) {
795 			head_blk = new_blk;
796 			goto validate_head;
797 		}
798 
799 		/*
800 		 * Scan beginning of log now.  The last part of the physical
801 		 * log is good.  This scan needs to verify that it doesn't find
802 		 * the last_half_cycle.
803 		 */
804 		start_blk = 0;
805 		ASSERT(head_blk <= INT_MAX);
806 		if ((error = xlog_find_verify_cycle(log,
807 					start_blk, (int)head_blk,
808 					stop_on_cycle, &new_blk)))
809 			goto bp_err;
810 		if (new_blk != -1)
811 			head_blk = new_blk;
812 	}
813 
814 validate_head:
815 	/*
816 	 * Now we need to make sure head_blk is not pointing to a block in
817 	 * the middle of a log record.
818 	 */
819 	num_scan_bblks = XLOG_REC_SHIFT(log);
820 	if (head_blk >= num_scan_bblks) {
821 		start_blk = head_blk - num_scan_bblks; /* don't read head_blk */
822 
823 		/* start ptr at last block ptr before head_blk */
824 		if ((error = xlog_find_verify_log_record(log, start_blk,
825 							&head_blk, 0)) == -1) {
826 			error = XFS_ERROR(EIO);
827 			goto bp_err;
828 		} else if (error)
829 			goto bp_err;
830 	} else {
831 		start_blk = 0;
832 		ASSERT(head_blk <= INT_MAX);
833 		if ((error = xlog_find_verify_log_record(log, start_blk,
834 							&head_blk, 0)) == -1) {
835 			/* We hit the beginning of the log during our search */
836 			start_blk = log_bbnum - (num_scan_bblks - head_blk);
837 			new_blk = log_bbnum;
838 			ASSERT(start_blk <= INT_MAX &&
839 				(xfs_daddr_t) log_bbnum-start_blk >= 0);
840 			ASSERT(head_blk <= INT_MAX);
841 			if ((error = xlog_find_verify_log_record(log,
842 							start_blk, &new_blk,
843 							(int)head_blk)) == -1) {
844 				error = XFS_ERROR(EIO);
845 				goto bp_err;
846 			} else if (error)
847 				goto bp_err;
848 			if (new_blk != log_bbnum)
849 				head_blk = new_blk;
850 		} else if (error)
851 			goto bp_err;
852 	}
853 
854 	xlog_put_bp(bp);
855 	if (head_blk == log_bbnum)
856 		*return_head_blk = 0;
857 	else
858 		*return_head_blk = head_blk;
859 	/*
860 	 * When returning here, we have a good block number.  Bad block
861 	 * means that during a previous crash, we didn't have a clean break
862 	 * from cycle number N to cycle number N-1.  In this case, we need
863 	 * to find the first block with cycle number N-1.
864 	 */
865 	return 0;
866 
867  bp_err:
868 	xlog_put_bp(bp);
869 
870 	if (error)
871 		xfs_warn(log->l_mp, "failed to find log head");
872 	return error;
873 }
874 
875 /*
876  * Find the sync block number or the tail of the log.
877  *
878  * This will be the block number of the last record to have its
879  * associated buffers synced to disk.  Every log record header has
880  * a sync lsn embedded in it.  LSNs hold block numbers, so it is easy
881  * to get a sync block number.  The only concern is to figure out which
882  * log record header to believe.
883  *
884  * The following algorithm uses the log record header with the largest
885  * lsn.  The entire log record does not need to be valid.  We only care
886  * that the header is valid.
887  *
888  * We could speed up search by using current head_blk buffer, but it is not
889  * available.
890  */
891 STATIC int
892 xlog_find_tail(
893 	struct xlog		*log,
894 	xfs_daddr_t		*head_blk,
895 	xfs_daddr_t		*tail_blk)
896 {
897 	xlog_rec_header_t	*rhead;
898 	xlog_op_header_t	*op_head;
899 	xfs_caddr_t		offset = NULL;
900 	xfs_buf_t		*bp;
901 	int			error, i, found;
902 	xfs_daddr_t		umount_data_blk;
903 	xfs_daddr_t		after_umount_blk;
904 	xfs_lsn_t		tail_lsn;
905 	int			hblks;
906 
907 	found = 0;
908 
909 	/*
910 	 * Find previous log record
911 	 */
912 	if ((error = xlog_find_head(log, head_blk)))
913 		return error;
914 
915 	bp = xlog_get_bp(log, 1);
916 	if (!bp)
917 		return ENOMEM;
918 	if (*head_blk == 0) {				/* special case */
919 		error = xlog_bread(log, 0, 1, bp, &offset);
920 		if (error)
921 			goto done;
922 
923 		if (xlog_get_cycle(offset) == 0) {
924 			*tail_blk = 0;
925 			/* leave all other log inited values alone */
926 			goto done;
927 		}
928 	}
929 
930 	/*
931 	 * Search backwards looking for log record header block
932 	 */
933 	ASSERT(*head_blk < INT_MAX);
934 	for (i = (int)(*head_blk) - 1; i >= 0; i--) {
935 		error = xlog_bread(log, i, 1, bp, &offset);
936 		if (error)
937 			goto done;
938 
939 		if (*(__be32 *)offset == cpu_to_be32(XLOG_HEADER_MAGIC_NUM)) {
940 			found = 1;
941 			break;
942 		}
943 	}
944 	/*
945 	 * If we haven't found the log record header block, start looking
946 	 * again from the end of the physical log.  XXXmiken: There should be
947 	 * a check here to make sure we didn't search more than N blocks in
948 	 * the previous code.
949 	 */
950 	if (!found) {
951 		for (i = log->l_logBBsize - 1; i >= (int)(*head_blk); i--) {
952 			error = xlog_bread(log, i, 1, bp, &offset);
953 			if (error)
954 				goto done;
955 
956 			if (*(__be32 *)offset ==
957 			    cpu_to_be32(XLOG_HEADER_MAGIC_NUM)) {
958 				found = 2;
959 				break;
960 			}
961 		}
962 	}
963 	if (!found) {
964 		xfs_warn(log->l_mp, "%s: couldn't find sync record", __func__);
965 		ASSERT(0);
966 		return XFS_ERROR(EIO);
967 	}
968 
969 	/* find blk_no of tail of log */
970 	rhead = (xlog_rec_header_t *)offset;
971 	*tail_blk = BLOCK_LSN(be64_to_cpu(rhead->h_tail_lsn));
972 
973 	/*
974 	 * Reset log values according to the state of the log when we
975 	 * crashed.  In the case where head_blk == 0, we bump curr_cycle
976 	 * one because the next write starts a new cycle rather than
977 	 * continuing the cycle of the last good log record.  At this
978 	 * point we have guaranteed that all partial log records have been
979 	 * accounted for.  Therefore, we know that the last good log record
980 	 * written was complete and ended exactly on the end boundary
981 	 * of the physical log.
982 	 */
983 	log->l_prev_block = i;
984 	log->l_curr_block = (int)*head_blk;
985 	log->l_curr_cycle = be32_to_cpu(rhead->h_cycle);
986 	if (found == 2)
987 		log->l_curr_cycle++;
988 	atomic64_set(&log->l_tail_lsn, be64_to_cpu(rhead->h_tail_lsn));
989 	atomic64_set(&log->l_last_sync_lsn, be64_to_cpu(rhead->h_lsn));
990 	xlog_assign_grant_head(&log->l_reserve_head.grant, log->l_curr_cycle,
991 					BBTOB(log->l_curr_block));
992 	xlog_assign_grant_head(&log->l_write_head.grant, log->l_curr_cycle,
993 					BBTOB(log->l_curr_block));
994 
995 	/*
996 	 * Look for unmount record.  If we find it, then we know there
997 	 * was a clean unmount.  Since 'i' could be the last block in
998 	 * the physical log, we convert to a log block before comparing
999 	 * to the head_blk.
1000 	 *
1001 	 * Save the current tail lsn to use to pass to
1002 	 * xlog_clear_stale_blocks() below.  We won't want to clear the
1003 	 * unmount record if there is one, so we pass the lsn of the
1004 	 * unmount record rather than the block after it.
1005 	 */
1006 	if (xfs_sb_version_haslogv2(&log->l_mp->m_sb)) {
1007 		int	h_size = be32_to_cpu(rhead->h_size);
1008 		int	h_version = be32_to_cpu(rhead->h_version);
1009 
1010 		if ((h_version & XLOG_VERSION_2) &&
1011 		    (h_size > XLOG_HEADER_CYCLE_SIZE)) {
1012 			hblks = h_size / XLOG_HEADER_CYCLE_SIZE;
1013 			if (h_size % XLOG_HEADER_CYCLE_SIZE)
1014 				hblks++;
1015 		} else {
1016 			hblks = 1;
1017 		}
1018 	} else {
1019 		hblks = 1;
1020 	}
1021 	after_umount_blk = (i + hblks + (int)
1022 		BTOBB(be32_to_cpu(rhead->h_len))) % log->l_logBBsize;
1023 	tail_lsn = atomic64_read(&log->l_tail_lsn);
1024 	if (*head_blk == after_umount_blk &&
1025 	    be32_to_cpu(rhead->h_num_logops) == 1) {
1026 		umount_data_blk = (i + hblks) % log->l_logBBsize;
1027 		error = xlog_bread(log, umount_data_blk, 1, bp, &offset);
1028 		if (error)
1029 			goto done;
1030 
1031 		op_head = (xlog_op_header_t *)offset;
1032 		if (op_head->oh_flags & XLOG_UNMOUNT_TRANS) {
1033 			/*
1034 			 * Set tail and last sync so that newly written
1035 			 * log records will point recovery to after the
1036 			 * current unmount record.
1037 			 */
1038 			xlog_assign_atomic_lsn(&log->l_tail_lsn,
1039 					log->l_curr_cycle, after_umount_blk);
1040 			xlog_assign_atomic_lsn(&log->l_last_sync_lsn,
1041 					log->l_curr_cycle, after_umount_blk);
1042 			*tail_blk = after_umount_blk;
1043 
1044 			/*
1045 			 * Note that the unmount was clean. If the unmount
1046 			 * was not clean, we need to know this to rebuild the
1047 			 * superblock counters from the perag headers if we
1048 			 * have a filesystem using non-persistent counters.
1049 			 */
1050 			log->l_mp->m_flags |= XFS_MOUNT_WAS_CLEAN;
1051 		}
1052 	}
1053 
1054 	/*
1055 	 * Make sure that there are no blocks in front of the head
1056 	 * with the same cycle number as the head.  This can happen
1057 	 * because we allow multiple outstanding log writes concurrently,
1058 	 * and the later writes might make it out before earlier ones.
1059 	 *
1060 	 * We use the lsn from before modifying it so that we'll never
1061 	 * overwrite the unmount record after a clean unmount.
1062 	 *
1063 	 * Do this only if we are going to recover the filesystem
1064 	 *
1065 	 * NOTE: This used to say "if (!readonly)"
1066 	 * However on Linux, we can & do recover a read-only filesystem.
1067 	 * We only skip recovery if NORECOVERY is specified on mount,
1068 	 * in which case we would not be here.
1069 	 *
1070 	 * But... if the -device- itself is readonly, just skip this.
1071 	 * We can't recover this device anyway, so it won't matter.
1072 	 */
1073 	if (!xfs_readonly_buftarg(log->l_mp->m_logdev_targp))
1074 		error = xlog_clear_stale_blocks(log, tail_lsn);
1075 
1076 done:
1077 	xlog_put_bp(bp);
1078 
1079 	if (error)
1080 		xfs_warn(log->l_mp, "failed to locate log tail");
1081 	return error;
1082 }
1083 
1084 /*
1085  * Is the log zeroed at all?
1086  *
1087  * The last binary search should be changed to perform an X block read
1088  * once X becomes small enough.  You can then search linearly through
1089  * the X blocks.  This will cut down on the number of reads we need to do.
1090  *
1091  * If the log is partially zeroed, this routine will pass back the blkno
1092  * of the first block with cycle number 0.  It won't have a complete LR
1093  * preceding it.
1094  *
1095  * Return:
1096  *	0  => the log is completely written to
1097  *	-1 => use *blk_no as the first block of the log
1098  *	>0 => error has occurred
1099  */
1100 STATIC int
1101 xlog_find_zeroed(
1102 	struct xlog	*log,
1103 	xfs_daddr_t	*blk_no)
1104 {
1105 	xfs_buf_t	*bp;
1106 	xfs_caddr_t	offset;
1107 	uint	        first_cycle, last_cycle;
1108 	xfs_daddr_t	new_blk, last_blk, start_blk;
1109 	xfs_daddr_t     num_scan_bblks;
1110 	int	        error, log_bbnum = log->l_logBBsize;
1111 
1112 	*blk_no = 0;
1113 
1114 	/* check totally zeroed log */
1115 	bp = xlog_get_bp(log, 1);
1116 	if (!bp)
1117 		return ENOMEM;
1118 	error = xlog_bread(log, 0, 1, bp, &offset);
1119 	if (error)
1120 		goto bp_err;
1121 
1122 	first_cycle = xlog_get_cycle(offset);
1123 	if (first_cycle == 0) {		/* completely zeroed log */
1124 		*blk_no = 0;
1125 		xlog_put_bp(bp);
1126 		return -1;
1127 	}
1128 
1129 	/* check partially zeroed log */
1130 	error = xlog_bread(log, log_bbnum-1, 1, bp, &offset);
1131 	if (error)
1132 		goto bp_err;
1133 
1134 	last_cycle = xlog_get_cycle(offset);
1135 	if (last_cycle != 0) {		/* log completely written to */
1136 		xlog_put_bp(bp);
1137 		return 0;
1138 	} else if (first_cycle != 1) {
1139 		/*
1140 		 * If the cycle of the last block is zero, the cycle of
1141 		 * the first block must be 1. If it's not, maybe we're
1142 		 * not looking at a log... Bail out.
1143 		 */
1144 		xfs_warn(log->l_mp,
1145 			"Log inconsistent or not a log (last==0, first!=1)");
1146 		return XFS_ERROR(EINVAL);
1147 	}
1148 
1149 	/* we have a partially zeroed log */
1150 	last_blk = log_bbnum-1;
1151 	if ((error = xlog_find_cycle_start(log, bp, 0, &last_blk, 0)))
1152 		goto bp_err;
1153 
1154 	/*
1155 	 * Validate the answer.  Because there is no way to guarantee that
1156 	 * the entire log is made up of log records which are the same size,
1157 	 * we scan over the defined maximum blocks.  At this point, the maximum
1158 	 * is not chosen to mean anything special.   XXXmiken
1159 	 */
1160 	num_scan_bblks = XLOG_TOTAL_REC_SHIFT(log);
1161 	ASSERT(num_scan_bblks <= INT_MAX);
1162 
1163 	if (last_blk < num_scan_bblks)
1164 		num_scan_bblks = last_blk;
1165 	start_blk = last_blk - num_scan_bblks;
1166 
1167 	/*
1168 	 * We search for any instances of cycle number 0 that occur before
1169 	 * our current estimate of the head.  What we're trying to detect is
1170 	 *        1 ... | 0 | 1 | 0...
1171 	 *                       ^ binary search ends here
1172 	 */
1173 	if ((error = xlog_find_verify_cycle(log, start_blk,
1174 					 (int)num_scan_bblks, 0, &new_blk)))
1175 		goto bp_err;
1176 	if (new_blk != -1)
1177 		last_blk = new_blk;
1178 
1179 	/*
1180 	 * Potentially backup over partial log record write.  We don't need
1181 	 * to search the end of the log because we know it is zero.
1182 	 */
1183 	if ((error = xlog_find_verify_log_record(log, start_blk,
1184 				&last_blk, 0)) == -1) {
1185 	    error = XFS_ERROR(EIO);
1186 	    goto bp_err;
1187 	} else if (error)
1188 	    goto bp_err;
1189 
1190 	*blk_no = last_blk;
1191 bp_err:
1192 	xlog_put_bp(bp);
1193 	if (error)
1194 		return error;
1195 	return -1;
1196 }
1197 
1198 /*
1199  * These are simple subroutines used by xlog_clear_stale_blocks() below
1200  * to initialize a buffer full of empty log record headers and write
1201  * them into the log.
1202  */
1203 STATIC void
1204 xlog_add_record(
1205 	struct xlog		*log,
1206 	xfs_caddr_t		buf,
1207 	int			cycle,
1208 	int			block,
1209 	int			tail_cycle,
1210 	int			tail_block)
1211 {
1212 	xlog_rec_header_t	*recp = (xlog_rec_header_t *)buf;
1213 
1214 	memset(buf, 0, BBSIZE);
1215 	recp->h_magicno = cpu_to_be32(XLOG_HEADER_MAGIC_NUM);
1216 	recp->h_cycle = cpu_to_be32(cycle);
1217 	recp->h_version = cpu_to_be32(
1218 			xfs_sb_version_haslogv2(&log->l_mp->m_sb) ? 2 : 1);
1219 	recp->h_lsn = cpu_to_be64(xlog_assign_lsn(cycle, block));
1220 	recp->h_tail_lsn = cpu_to_be64(xlog_assign_lsn(tail_cycle, tail_block));
1221 	recp->h_fmt = cpu_to_be32(XLOG_FMT);
1222 	memcpy(&recp->h_fs_uuid, &log->l_mp->m_sb.sb_uuid, sizeof(uuid_t));
1223 }
1224 
1225 STATIC int
1226 xlog_write_log_records(
1227 	struct xlog	*log,
1228 	int		cycle,
1229 	int		start_block,
1230 	int		blocks,
1231 	int		tail_cycle,
1232 	int		tail_block)
1233 {
1234 	xfs_caddr_t	offset;
1235 	xfs_buf_t	*bp;
1236 	int		balign, ealign;
1237 	int		sectbb = log->l_sectBBsize;
1238 	int		end_block = start_block + blocks;
1239 	int		bufblks;
1240 	int		error = 0;
1241 	int		i, j = 0;
1242 
1243 	/*
1244 	 * Greedily allocate a buffer big enough to handle the full
1245 	 * range of basic blocks to be written.  If that fails, try
1246 	 * a smaller size.  We need to be able to write at least a
1247 	 * log sector, or we're out of luck.
1248 	 */
1249 	bufblks = 1 << ffs(blocks);
1250 	while (bufblks > log->l_logBBsize)
1251 		bufblks >>= 1;
1252 	while (!(bp = xlog_get_bp(log, bufblks))) {
1253 		bufblks >>= 1;
1254 		if (bufblks < sectbb)
1255 			return ENOMEM;
1256 	}
1257 
1258 	/* We may need to do a read at the start to fill in part of
1259 	 * the buffer in the starting sector not covered by the first
1260 	 * write below.
1261 	 */
1262 	balign = round_down(start_block, sectbb);
1263 	if (balign != start_block) {
1264 		error = xlog_bread_noalign(log, start_block, 1, bp);
1265 		if (error)
1266 			goto out_put_bp;
1267 
1268 		j = start_block - balign;
1269 	}
1270 
1271 	for (i = start_block; i < end_block; i += bufblks) {
1272 		int		bcount, endcount;
1273 
1274 		bcount = min(bufblks, end_block - start_block);
1275 		endcount = bcount - j;
1276 
1277 		/* We may need to do a read at the end to fill in part of
1278 		 * the buffer in the final sector not covered by the write.
1279 		 * If this is the same sector as the above read, skip it.
1280 		 */
1281 		ealign = round_down(end_block, sectbb);
1282 		if (j == 0 && (start_block + endcount > ealign)) {
1283 			offset = bp->b_addr + BBTOB(ealign - start_block);
1284 			error = xlog_bread_offset(log, ealign, sectbb,
1285 							bp, offset);
1286 			if (error)
1287 				break;
1288 
1289 		}
1290 
1291 		offset = xlog_align(log, start_block, endcount, bp);
1292 		for (; j < endcount; j++) {
1293 			xlog_add_record(log, offset, cycle, i+j,
1294 					tail_cycle, tail_block);
1295 			offset += BBSIZE;
1296 		}
1297 		error = xlog_bwrite(log, start_block, endcount, bp);
1298 		if (error)
1299 			break;
1300 		start_block += endcount;
1301 		j = 0;
1302 	}
1303 
1304  out_put_bp:
1305 	xlog_put_bp(bp);
1306 	return error;
1307 }
1308 
1309 /*
1310  * This routine is called to blow away any incomplete log writes out
1311  * in front of the log head.  We do this so that we won't become confused
1312  * if we come up, write only a little bit more, and then crash again.
1313  * If we leave the partial log records out there, this situation could
1314  * cause us to think those partial writes are valid blocks since they
1315  * have the current cycle number.  We get rid of them by overwriting them
1316  * with empty log records with the old cycle number rather than the
1317  * current one.
1318  *
1319  * The tail lsn is passed in rather than taken from
1320  * the log so that we will not write over the unmount record after a
1321  * clean unmount in a 512 block log.  Doing so would leave the log without
1322  * any valid log records in it until a new one was written.  If we crashed
1323  * during that time we would not be able to recover.
1324  */
1325 STATIC int
1326 xlog_clear_stale_blocks(
1327 	struct xlog	*log,
1328 	xfs_lsn_t	tail_lsn)
1329 {
1330 	int		tail_cycle, head_cycle;
1331 	int		tail_block, head_block;
1332 	int		tail_distance, max_distance;
1333 	int		distance;
1334 	int		error;
1335 
1336 	tail_cycle = CYCLE_LSN(tail_lsn);
1337 	tail_block = BLOCK_LSN(tail_lsn);
1338 	head_cycle = log->l_curr_cycle;
1339 	head_block = log->l_curr_block;
1340 
1341 	/*
1342 	 * Figure out the distance between the new head of the log
1343 	 * and the tail.  We want to write over any blocks beyond the
1344 	 * head that we may have written just before the crash, but
1345 	 * we don't want to overwrite the tail of the log.
1346 	 */
1347 	if (head_cycle == tail_cycle) {
1348 		/*
1349 		 * The tail is behind the head in the physical log,
1350 		 * so the distance from the head to the tail is the
1351 		 * distance from the head to the end of the log plus
1352 		 * the distance from the beginning of the log to the
1353 		 * tail.
1354 		 */
1355 		if (unlikely(head_block < tail_block || head_block >= log->l_logBBsize)) {
1356 			XFS_ERROR_REPORT("xlog_clear_stale_blocks(1)",
1357 					 XFS_ERRLEVEL_LOW, log->l_mp);
1358 			return XFS_ERROR(EFSCORRUPTED);
1359 		}
1360 		tail_distance = tail_block + (log->l_logBBsize - head_block);
1361 	} else {
1362 		/*
1363 		 * The head is behind the tail in the physical log,
1364 		 * so the distance from the head to the tail is just
1365 		 * the tail block minus the head block.
1366 		 */
1367 		if (unlikely(head_block >= tail_block || head_cycle != (tail_cycle + 1))){
1368 			XFS_ERROR_REPORT("xlog_clear_stale_blocks(2)",
1369 					 XFS_ERRLEVEL_LOW, log->l_mp);
1370 			return XFS_ERROR(EFSCORRUPTED);
1371 		}
1372 		tail_distance = tail_block - head_block;
1373 	}
1374 
1375 	/*
1376 	 * If the head is right up against the tail, we can't clear
1377 	 * anything.
1378 	 */
1379 	if (tail_distance <= 0) {
1380 		ASSERT(tail_distance == 0);
1381 		return 0;
1382 	}
1383 
1384 	max_distance = XLOG_TOTAL_REC_SHIFT(log);
1385 	/*
1386 	 * Take the smaller of the maximum amount of outstanding I/O
1387 	 * we could have and the distance to the tail to clear out.
1388 	 * We take the smaller so that we don't overwrite the tail and
1389 	 * we don't waste all day writing from the head to the tail
1390 	 * for no reason.
1391 	 */
1392 	max_distance = MIN(max_distance, tail_distance);
1393 
1394 	if ((head_block + max_distance) <= log->l_logBBsize) {
1395 		/*
1396 		 * We can stomp all the blocks we need to without
1397 		 * wrapping around the end of the log.  Just do it
1398 		 * in a single write.  Use the cycle number of the
1399 		 * current cycle minus one so that the log will look like:
1400 		 *     n ... | n - 1 ...
1401 		 */
1402 		error = xlog_write_log_records(log, (head_cycle - 1),
1403 				head_block, max_distance, tail_cycle,
1404 				tail_block);
1405 		if (error)
1406 			return error;
1407 	} else {
1408 		/*
1409 		 * We need to wrap around the end of the physical log in
1410 		 * order to clear all the blocks.  Do it in two separate
1411 		 * I/Os.  The first write should be from the head to the
1412 		 * end of the physical log, and it should use the current
1413 		 * cycle number minus one just like above.
1414 		 */
1415 		distance = log->l_logBBsize - head_block;
1416 		error = xlog_write_log_records(log, (head_cycle - 1),
1417 				head_block, distance, tail_cycle,
1418 				tail_block);
1419 
1420 		if (error)
1421 			return error;
1422 
1423 		/*
1424 		 * Now write the blocks at the start of the physical log.
1425 		 * This writes the remainder of the blocks we want to clear.
1426 		 * It uses the current cycle number since we're now on the
1427 		 * same cycle as the head so that we get:
1428 		 *    n ... n ... | n - 1 ...
1429 		 *    ^^^^^ blocks we're writing
1430 		 */
1431 		distance = max_distance - (log->l_logBBsize - head_block);
1432 		error = xlog_write_log_records(log, head_cycle, 0, distance,
1433 				tail_cycle, tail_block);
1434 		if (error)
1435 			return error;
1436 	}
1437 
1438 	return 0;
1439 }
1440 
1441 /******************************************************************************
1442  *
1443  *		Log recover routines
1444  *
1445  ******************************************************************************
1446  */
1447 
1448 STATIC xlog_recover_t *
1449 xlog_recover_find_tid(
1450 	struct hlist_head	*head,
1451 	xlog_tid_t		tid)
1452 {
1453 	xlog_recover_t		*trans;
1454 
1455 	hlist_for_each_entry(trans, head, r_list) {
1456 		if (trans->r_log_tid == tid)
1457 			return trans;
1458 	}
1459 	return NULL;
1460 }
1461 
1462 STATIC void
1463 xlog_recover_new_tid(
1464 	struct hlist_head	*head,
1465 	xlog_tid_t		tid,
1466 	xfs_lsn_t		lsn)
1467 {
1468 	xlog_recover_t		*trans;
1469 
1470 	trans = kmem_zalloc(sizeof(xlog_recover_t), KM_SLEEP);
1471 	trans->r_log_tid   = tid;
1472 	trans->r_lsn	   = lsn;
1473 	INIT_LIST_HEAD(&trans->r_itemq);
1474 
1475 	INIT_HLIST_NODE(&trans->r_list);
1476 	hlist_add_head(&trans->r_list, head);
1477 }
1478 
1479 STATIC void
1480 xlog_recover_add_item(
1481 	struct list_head	*head)
1482 {
1483 	xlog_recover_item_t	*item;
1484 
1485 	item = kmem_zalloc(sizeof(xlog_recover_item_t), KM_SLEEP);
1486 	INIT_LIST_HEAD(&item->ri_list);
1487 	list_add_tail(&item->ri_list, head);
1488 }
1489 
1490 STATIC int
1491 xlog_recover_add_to_cont_trans(
1492 	struct xlog		*log,
1493 	struct xlog_recover	*trans,
1494 	xfs_caddr_t		dp,
1495 	int			len)
1496 {
1497 	xlog_recover_item_t	*item;
1498 	xfs_caddr_t		ptr, old_ptr;
1499 	int			old_len;
1500 
1501 	if (list_empty(&trans->r_itemq)) {
1502 		/* finish copying rest of trans header */
1503 		xlog_recover_add_item(&trans->r_itemq);
1504 		ptr = (xfs_caddr_t) &trans->r_theader +
1505 				sizeof(xfs_trans_header_t) - len;
1506 		memcpy(ptr, dp, len); /* d, s, l */
1507 		return 0;
1508 	}
1509 	/* take the tail entry */
1510 	item = list_entry(trans->r_itemq.prev, xlog_recover_item_t, ri_list);
1511 
1512 	old_ptr = item->ri_buf[item->ri_cnt-1].i_addr;
1513 	old_len = item->ri_buf[item->ri_cnt-1].i_len;
1514 
1515 	ptr = kmem_realloc(old_ptr, len+old_len, old_len, KM_SLEEP);
1516 	memcpy(&ptr[old_len], dp, len); /* d, s, l */
1517 	item->ri_buf[item->ri_cnt-1].i_len += len;
1518 	item->ri_buf[item->ri_cnt-1].i_addr = ptr;
1519 	trace_xfs_log_recover_item_add_cont(log, trans, item, 0);
1520 	return 0;
1521 }
1522 
1523 /*
1524  * The next region to add is the start of a new region.  It could be
1525  * a whole region or it could be the first part of a new region.  Because
1526  * of this, the assumption here is that the type and size fields of all
1527  * format structures fit into the first 32 bits of the structure.
1528  *
1529  * This works because all regions must be 32 bit aligned.  Therefore, we
1530  * either have both fields or we have neither field.  In the case we have
1531  * neither field, the data part of the region is zero length.  We only have
1532  * a log_op_header and can throw away the header since a new one will appear
1533  * later.  If we have at least 4 bytes, then we can determine how many regions
1534  * will appear in the current log item.
1535  */
1536 STATIC int
1537 xlog_recover_add_to_trans(
1538 	struct xlog		*log,
1539 	struct xlog_recover	*trans,
1540 	xfs_caddr_t		dp,
1541 	int			len)
1542 {
1543 	xfs_inode_log_format_t	*in_f;			/* any will do */
1544 	xlog_recover_item_t	*item;
1545 	xfs_caddr_t		ptr;
1546 
1547 	if (!len)
1548 		return 0;
1549 	if (list_empty(&trans->r_itemq)) {
1550 		/* we need to catch log corruptions here */
1551 		if (*(uint *)dp != XFS_TRANS_HEADER_MAGIC) {
1552 			xfs_warn(log->l_mp, "%s: bad header magic number",
1553 				__func__);
1554 			ASSERT(0);
1555 			return XFS_ERROR(EIO);
1556 		}
1557 		if (len == sizeof(xfs_trans_header_t))
1558 			xlog_recover_add_item(&trans->r_itemq);
1559 		memcpy(&trans->r_theader, dp, len); /* d, s, l */
1560 		return 0;
1561 	}
1562 
1563 	ptr = kmem_alloc(len, KM_SLEEP);
1564 	memcpy(ptr, dp, len);
1565 	in_f = (xfs_inode_log_format_t *)ptr;
1566 
1567 	/* take the tail entry */
1568 	item = list_entry(trans->r_itemq.prev, xlog_recover_item_t, ri_list);
1569 	if (item->ri_total != 0 &&
1570 	     item->ri_total == item->ri_cnt) {
1571 		/* tail item is in use, get a new one */
1572 		xlog_recover_add_item(&trans->r_itemq);
1573 		item = list_entry(trans->r_itemq.prev,
1574 					xlog_recover_item_t, ri_list);
1575 	}
1576 
1577 	if (item->ri_total == 0) {		/* first region to be added */
1578 		if (in_f->ilf_size == 0 ||
1579 		    in_f->ilf_size > XLOG_MAX_REGIONS_IN_ITEM) {
1580 			xfs_warn(log->l_mp,
1581 		"bad number of regions (%d) in inode log format",
1582 				  in_f->ilf_size);
1583 			ASSERT(0);
1584 			return XFS_ERROR(EIO);
1585 		}
1586 
1587 		item->ri_total = in_f->ilf_size;
1588 		item->ri_buf =
1589 			kmem_zalloc(item->ri_total * sizeof(xfs_log_iovec_t),
1590 				    KM_SLEEP);
1591 	}
1592 	ASSERT(item->ri_total > item->ri_cnt);
1593 	/* Description region is ri_buf[0] */
1594 	item->ri_buf[item->ri_cnt].i_addr = ptr;
1595 	item->ri_buf[item->ri_cnt].i_len  = len;
1596 	item->ri_cnt++;
1597 	trace_xfs_log_recover_item_add(log, trans, item, 0);
1598 	return 0;
1599 }
1600 
1601 /*
1602  * Sort the log items in the transaction.
1603  *
1604  * The ordering constraints are defined by the inode allocation and unlink
1605  * behaviour. The rules are:
1606  *
1607  *	1. Every item is only logged once in a given transaction. Hence it
1608  *	   represents the last logged state of the item. Hence ordering is
1609  *	   dependent on the order in which operations need to be performed so
1610  *	   required initial conditions are always met.
1611  *
1612  *	2. Cancelled buffers are recorded in pass 1 in a separate table and
1613  *	   there's nothing to replay from them so we can simply cull them
1614  *	   from the transaction. However, we can't do that until after we've
1615  *	   replayed all the other items because they may be dependent on the
1616  *	   cancelled buffer and replaying the cancelled buffer can remove it
1617  *	   form the cancelled buffer table. Hence they have tobe done last.
1618  *
1619  *	3. Inode allocation buffers must be replayed before inode items that
1620  *	   read the buffer and replay changes into it.
1621  *
1622  *	4. Inode unlink buffers must be replayed after inode items are replayed.
1623  *	   This ensures that inodes are completely flushed to the inode buffer
1624  *	   in a "free" state before we remove the unlinked inode list pointer.
1625  *
1626  * Hence the ordering needs to be inode allocation buffers first, inode items
1627  * second, inode unlink buffers third and cancelled buffers last.
1628  *
1629  * But there's a problem with that - we can't tell an inode allocation buffer
1630  * apart from a regular buffer, so we can't separate them. We can, however,
1631  * tell an inode unlink buffer from the others, and so we can separate them out
1632  * from all the other buffers and move them to last.
1633  *
1634  * Hence, 4 lists, in order from head to tail:
1635  * 	- buffer_list for all buffers except cancelled/inode unlink buffers
1636  * 	- item_list for all non-buffer items
1637  * 	- inode_buffer_list for inode unlink buffers
1638  * 	- cancel_list for the cancelled buffers
1639  */
1640 STATIC int
1641 xlog_recover_reorder_trans(
1642 	struct xlog		*log,
1643 	struct xlog_recover	*trans,
1644 	int			pass)
1645 {
1646 	xlog_recover_item_t	*item, *n;
1647 	LIST_HEAD(sort_list);
1648 	LIST_HEAD(cancel_list);
1649 	LIST_HEAD(buffer_list);
1650 	LIST_HEAD(inode_buffer_list);
1651 	LIST_HEAD(inode_list);
1652 
1653 	list_splice_init(&trans->r_itemq, &sort_list);
1654 	list_for_each_entry_safe(item, n, &sort_list, ri_list) {
1655 		xfs_buf_log_format_t	*buf_f = item->ri_buf[0].i_addr;
1656 
1657 		switch (ITEM_TYPE(item)) {
1658 		case XFS_LI_BUF:
1659 			if (buf_f->blf_flags & XFS_BLF_CANCEL) {
1660 				trace_xfs_log_recover_item_reorder_head(log,
1661 							trans, item, pass);
1662 				list_move(&item->ri_list, &cancel_list);
1663 				break;
1664 			}
1665 			if (buf_f->blf_flags & XFS_BLF_INODE_BUF) {
1666 				list_move(&item->ri_list, &inode_buffer_list);
1667 				break;
1668 			}
1669 			list_move_tail(&item->ri_list, &buffer_list);
1670 			break;
1671 		case XFS_LI_INODE:
1672 		case XFS_LI_DQUOT:
1673 		case XFS_LI_QUOTAOFF:
1674 		case XFS_LI_EFD:
1675 		case XFS_LI_EFI:
1676 			trace_xfs_log_recover_item_reorder_tail(log,
1677 							trans, item, pass);
1678 			list_move_tail(&item->ri_list, &inode_list);
1679 			break;
1680 		default:
1681 			xfs_warn(log->l_mp,
1682 				"%s: unrecognized type of log operation",
1683 				__func__);
1684 			ASSERT(0);
1685 			return XFS_ERROR(EIO);
1686 		}
1687 	}
1688 	ASSERT(list_empty(&sort_list));
1689 	if (!list_empty(&buffer_list))
1690 		list_splice(&buffer_list, &trans->r_itemq);
1691 	if (!list_empty(&inode_list))
1692 		list_splice_tail(&inode_list, &trans->r_itemq);
1693 	if (!list_empty(&inode_buffer_list))
1694 		list_splice_tail(&inode_buffer_list, &trans->r_itemq);
1695 	if (!list_empty(&cancel_list))
1696 		list_splice_tail(&cancel_list, &trans->r_itemq);
1697 	return 0;
1698 }
1699 
1700 /*
1701  * Build up the table of buf cancel records so that we don't replay
1702  * cancelled data in the second pass.  For buffer records that are
1703  * not cancel records, there is nothing to do here so we just return.
1704  *
1705  * If we get a cancel record which is already in the table, this indicates
1706  * that the buffer was cancelled multiple times.  In order to ensure
1707  * that during pass 2 we keep the record in the table until we reach its
1708  * last occurrence in the log, we keep a reference count in the cancel
1709  * record in the table to tell us how many times we expect to see this
1710  * record during the second pass.
1711  */
1712 STATIC int
1713 xlog_recover_buffer_pass1(
1714 	struct xlog			*log,
1715 	struct xlog_recover_item	*item)
1716 {
1717 	xfs_buf_log_format_t	*buf_f = item->ri_buf[0].i_addr;
1718 	struct list_head	*bucket;
1719 	struct xfs_buf_cancel	*bcp;
1720 
1721 	/*
1722 	 * If this isn't a cancel buffer item, then just return.
1723 	 */
1724 	if (!(buf_f->blf_flags & XFS_BLF_CANCEL)) {
1725 		trace_xfs_log_recover_buf_not_cancel(log, buf_f);
1726 		return 0;
1727 	}
1728 
1729 	/*
1730 	 * Insert an xfs_buf_cancel record into the hash table of them.
1731 	 * If there is already an identical record, bump its reference count.
1732 	 */
1733 	bucket = XLOG_BUF_CANCEL_BUCKET(log, buf_f->blf_blkno);
1734 	list_for_each_entry(bcp, bucket, bc_list) {
1735 		if (bcp->bc_blkno == buf_f->blf_blkno &&
1736 		    bcp->bc_len == buf_f->blf_len) {
1737 			bcp->bc_refcount++;
1738 			trace_xfs_log_recover_buf_cancel_ref_inc(log, buf_f);
1739 			return 0;
1740 		}
1741 	}
1742 
1743 	bcp = kmem_alloc(sizeof(struct xfs_buf_cancel), KM_SLEEP);
1744 	bcp->bc_blkno = buf_f->blf_blkno;
1745 	bcp->bc_len = buf_f->blf_len;
1746 	bcp->bc_refcount = 1;
1747 	list_add_tail(&bcp->bc_list, bucket);
1748 
1749 	trace_xfs_log_recover_buf_cancel_add(log, buf_f);
1750 	return 0;
1751 }
1752 
1753 /*
1754  * Check to see whether the buffer being recovered has a corresponding
1755  * entry in the buffer cancel record table.  If it does then return 1
1756  * so that it will be cancelled, otherwise return 0.  If the buffer is
1757  * actually a buffer cancel item (XFS_BLF_CANCEL is set), then decrement
1758  * the refcount on the entry in the table and remove it from the table
1759  * if this is the last reference.
1760  *
1761  * We remove the cancel record from the table when we encounter its
1762  * last occurrence in the log so that if the same buffer is re-used
1763  * again after its last cancellation we actually replay the changes
1764  * made at that point.
1765  */
1766 STATIC int
1767 xlog_check_buffer_cancelled(
1768 	struct xlog		*log,
1769 	xfs_daddr_t		blkno,
1770 	uint			len,
1771 	ushort			flags)
1772 {
1773 	struct list_head	*bucket;
1774 	struct xfs_buf_cancel	*bcp;
1775 
1776 	if (log->l_buf_cancel_table == NULL) {
1777 		/*
1778 		 * There is nothing in the table built in pass one,
1779 		 * so this buffer must not be cancelled.
1780 		 */
1781 		ASSERT(!(flags & XFS_BLF_CANCEL));
1782 		return 0;
1783 	}
1784 
1785 	/*
1786 	 * Search for an entry in the  cancel table that matches our buffer.
1787 	 */
1788 	bucket = XLOG_BUF_CANCEL_BUCKET(log, blkno);
1789 	list_for_each_entry(bcp, bucket, bc_list) {
1790 		if (bcp->bc_blkno == blkno && bcp->bc_len == len)
1791 			goto found;
1792 	}
1793 
1794 	/*
1795 	 * We didn't find a corresponding entry in the table, so return 0 so
1796 	 * that the buffer is NOT cancelled.
1797 	 */
1798 	ASSERT(!(flags & XFS_BLF_CANCEL));
1799 	return 0;
1800 
1801 found:
1802 	/*
1803 	 * We've go a match, so return 1 so that the recovery of this buffer
1804 	 * is cancelled.  If this buffer is actually a buffer cancel log
1805 	 * item, then decrement the refcount on the one in the table and
1806 	 * remove it if this is the last reference.
1807 	 */
1808 	if (flags & XFS_BLF_CANCEL) {
1809 		if (--bcp->bc_refcount == 0) {
1810 			list_del(&bcp->bc_list);
1811 			kmem_free(bcp);
1812 		}
1813 	}
1814 	return 1;
1815 }
1816 
1817 /*
1818  * Perform recovery for a buffer full of inodes.  In these buffers, the only
1819  * data which should be recovered is that which corresponds to the
1820  * di_next_unlinked pointers in the on disk inode structures.  The rest of the
1821  * data for the inodes is always logged through the inodes themselves rather
1822  * than the inode buffer and is recovered in xlog_recover_inode_pass2().
1823  *
1824  * The only time when buffers full of inodes are fully recovered is when the
1825  * buffer is full of newly allocated inodes.  In this case the buffer will
1826  * not be marked as an inode buffer and so will be sent to
1827  * xlog_recover_do_reg_buffer() below during recovery.
1828  */
1829 STATIC int
1830 xlog_recover_do_inode_buffer(
1831 	struct xfs_mount	*mp,
1832 	xlog_recover_item_t	*item,
1833 	struct xfs_buf		*bp,
1834 	xfs_buf_log_format_t	*buf_f)
1835 {
1836 	int			i;
1837 	int			item_index = 0;
1838 	int			bit = 0;
1839 	int			nbits = 0;
1840 	int			reg_buf_offset = 0;
1841 	int			reg_buf_bytes = 0;
1842 	int			next_unlinked_offset;
1843 	int			inodes_per_buf;
1844 	xfs_agino_t		*logged_nextp;
1845 	xfs_agino_t		*buffer_nextp;
1846 
1847 	trace_xfs_log_recover_buf_inode_buf(mp->m_log, buf_f);
1848 	bp->b_ops = &xfs_inode_buf_ops;
1849 
1850 	inodes_per_buf = BBTOB(bp->b_io_length) >> mp->m_sb.sb_inodelog;
1851 	for (i = 0; i < inodes_per_buf; i++) {
1852 		next_unlinked_offset = (i * mp->m_sb.sb_inodesize) +
1853 			offsetof(xfs_dinode_t, di_next_unlinked);
1854 
1855 		while (next_unlinked_offset >=
1856 		       (reg_buf_offset + reg_buf_bytes)) {
1857 			/*
1858 			 * The next di_next_unlinked field is beyond
1859 			 * the current logged region.  Find the next
1860 			 * logged region that contains or is beyond
1861 			 * the current di_next_unlinked field.
1862 			 */
1863 			bit += nbits;
1864 			bit = xfs_next_bit(buf_f->blf_data_map,
1865 					   buf_f->blf_map_size, bit);
1866 
1867 			/*
1868 			 * If there are no more logged regions in the
1869 			 * buffer, then we're done.
1870 			 */
1871 			if (bit == -1)
1872 				return 0;
1873 
1874 			nbits = xfs_contig_bits(buf_f->blf_data_map,
1875 						buf_f->blf_map_size, bit);
1876 			ASSERT(nbits > 0);
1877 			reg_buf_offset = bit << XFS_BLF_SHIFT;
1878 			reg_buf_bytes = nbits << XFS_BLF_SHIFT;
1879 			item_index++;
1880 		}
1881 
1882 		/*
1883 		 * If the current logged region starts after the current
1884 		 * di_next_unlinked field, then move on to the next
1885 		 * di_next_unlinked field.
1886 		 */
1887 		if (next_unlinked_offset < reg_buf_offset)
1888 			continue;
1889 
1890 		ASSERT(item->ri_buf[item_index].i_addr != NULL);
1891 		ASSERT((item->ri_buf[item_index].i_len % XFS_BLF_CHUNK) == 0);
1892 		ASSERT((reg_buf_offset + reg_buf_bytes) <=
1893 							BBTOB(bp->b_io_length));
1894 
1895 		/*
1896 		 * The current logged region contains a copy of the
1897 		 * current di_next_unlinked field.  Extract its value
1898 		 * and copy it to the buffer copy.
1899 		 */
1900 		logged_nextp = item->ri_buf[item_index].i_addr +
1901 				next_unlinked_offset - reg_buf_offset;
1902 		if (unlikely(*logged_nextp == 0)) {
1903 			xfs_alert(mp,
1904 		"Bad inode buffer log record (ptr = 0x%p, bp = 0x%p). "
1905 		"Trying to replay bad (0) inode di_next_unlinked field.",
1906 				item, bp);
1907 			XFS_ERROR_REPORT("xlog_recover_do_inode_buf",
1908 					 XFS_ERRLEVEL_LOW, mp);
1909 			return XFS_ERROR(EFSCORRUPTED);
1910 		}
1911 
1912 		buffer_nextp = (xfs_agino_t *)xfs_buf_offset(bp,
1913 					      next_unlinked_offset);
1914 		*buffer_nextp = *logged_nextp;
1915 
1916 		/*
1917 		 * If necessary, recalculate the CRC in the on-disk inode. We
1918 		 * have to leave the inode in a consistent state for whoever
1919 		 * reads it next....
1920 		 */
1921 		xfs_dinode_calc_crc(mp, (struct xfs_dinode *)
1922 				xfs_buf_offset(bp, i * mp->m_sb.sb_inodesize));
1923 
1924 	}
1925 
1926 	return 0;
1927 }
1928 
1929 /*
1930  * Validate the recovered buffer is of the correct type and attach the
1931  * appropriate buffer operations to them for writeback. Magic numbers are in a
1932  * few places:
1933  *	the first 16 bits of the buffer (inode buffer, dquot buffer),
1934  *	the first 32 bits of the buffer (most blocks),
1935  *	inside a struct xfs_da_blkinfo at the start of the buffer.
1936  */
1937 static void
1938 xlog_recovery_validate_buf_type(
1939 	struct xfs_mount	*mp,
1940 	struct xfs_buf		*bp,
1941 	xfs_buf_log_format_t	*buf_f)
1942 {
1943 	struct xfs_da_blkinfo	*info = bp->b_addr;
1944 	__uint32_t		magic32;
1945 	__uint16_t		magic16;
1946 	__uint16_t		magicda;
1947 
1948 	magic32 = be32_to_cpu(*(__be32 *)bp->b_addr);
1949 	magic16 = be16_to_cpu(*(__be16*)bp->b_addr);
1950 	magicda = be16_to_cpu(info->magic);
1951 	switch (xfs_blft_from_flags(buf_f)) {
1952 	case XFS_BLFT_BTREE_BUF:
1953 		switch (magic32) {
1954 		case XFS_ABTB_CRC_MAGIC:
1955 		case XFS_ABTC_CRC_MAGIC:
1956 		case XFS_ABTB_MAGIC:
1957 		case XFS_ABTC_MAGIC:
1958 			bp->b_ops = &xfs_allocbt_buf_ops;
1959 			break;
1960 		case XFS_IBT_CRC_MAGIC:
1961 		case XFS_IBT_MAGIC:
1962 			bp->b_ops = &xfs_inobt_buf_ops;
1963 			break;
1964 		case XFS_BMAP_CRC_MAGIC:
1965 		case XFS_BMAP_MAGIC:
1966 			bp->b_ops = &xfs_bmbt_buf_ops;
1967 			break;
1968 		default:
1969 			xfs_warn(mp, "Bad btree block magic!");
1970 			ASSERT(0);
1971 			break;
1972 		}
1973 		break;
1974 	case XFS_BLFT_AGF_BUF:
1975 		if (magic32 != XFS_AGF_MAGIC) {
1976 			xfs_warn(mp, "Bad AGF block magic!");
1977 			ASSERT(0);
1978 			break;
1979 		}
1980 		bp->b_ops = &xfs_agf_buf_ops;
1981 		break;
1982 	case XFS_BLFT_AGFL_BUF:
1983 		if (!xfs_sb_version_hascrc(&mp->m_sb))
1984 			break;
1985 		if (magic32 != XFS_AGFL_MAGIC) {
1986 			xfs_warn(mp, "Bad AGFL block magic!");
1987 			ASSERT(0);
1988 			break;
1989 		}
1990 		bp->b_ops = &xfs_agfl_buf_ops;
1991 		break;
1992 	case XFS_BLFT_AGI_BUF:
1993 		if (magic32 != XFS_AGI_MAGIC) {
1994 			xfs_warn(mp, "Bad AGI block magic!");
1995 			ASSERT(0);
1996 			break;
1997 		}
1998 		bp->b_ops = &xfs_agi_buf_ops;
1999 		break;
2000 	case XFS_BLFT_UDQUOT_BUF:
2001 	case XFS_BLFT_PDQUOT_BUF:
2002 	case XFS_BLFT_GDQUOT_BUF:
2003 #ifdef CONFIG_XFS_QUOTA
2004 		if (magic16 != XFS_DQUOT_MAGIC) {
2005 			xfs_warn(mp, "Bad DQUOT block magic!");
2006 			ASSERT(0);
2007 			break;
2008 		}
2009 		bp->b_ops = &xfs_dquot_buf_ops;
2010 #else
2011 		xfs_alert(mp,
2012 	"Trying to recover dquots without QUOTA support built in!");
2013 		ASSERT(0);
2014 #endif
2015 		break;
2016 	case XFS_BLFT_DINO_BUF:
2017 		/*
2018 		 * we get here with inode allocation buffers, not buffers that
2019 		 * track unlinked list changes.
2020 		 */
2021 		if (magic16 != XFS_DINODE_MAGIC) {
2022 			xfs_warn(mp, "Bad INODE block magic!");
2023 			ASSERT(0);
2024 			break;
2025 		}
2026 		bp->b_ops = &xfs_inode_buf_ops;
2027 		break;
2028 	case XFS_BLFT_SYMLINK_BUF:
2029 		if (magic32 != XFS_SYMLINK_MAGIC) {
2030 			xfs_warn(mp, "Bad symlink block magic!");
2031 			ASSERT(0);
2032 			break;
2033 		}
2034 		bp->b_ops = &xfs_symlink_buf_ops;
2035 		break;
2036 	case XFS_BLFT_DIR_BLOCK_BUF:
2037 		if (magic32 != XFS_DIR2_BLOCK_MAGIC &&
2038 		    magic32 != XFS_DIR3_BLOCK_MAGIC) {
2039 			xfs_warn(mp, "Bad dir block magic!");
2040 			ASSERT(0);
2041 			break;
2042 		}
2043 		bp->b_ops = &xfs_dir3_block_buf_ops;
2044 		break;
2045 	case XFS_BLFT_DIR_DATA_BUF:
2046 		if (magic32 != XFS_DIR2_DATA_MAGIC &&
2047 		    magic32 != XFS_DIR3_DATA_MAGIC) {
2048 			xfs_warn(mp, "Bad dir data magic!");
2049 			ASSERT(0);
2050 			break;
2051 		}
2052 		bp->b_ops = &xfs_dir3_data_buf_ops;
2053 		break;
2054 	case XFS_BLFT_DIR_FREE_BUF:
2055 		if (magic32 != XFS_DIR2_FREE_MAGIC &&
2056 		    magic32 != XFS_DIR3_FREE_MAGIC) {
2057 			xfs_warn(mp, "Bad dir3 free magic!");
2058 			ASSERT(0);
2059 			break;
2060 		}
2061 		bp->b_ops = &xfs_dir3_free_buf_ops;
2062 		break;
2063 	case XFS_BLFT_DIR_LEAF1_BUF:
2064 		if (magicda != XFS_DIR2_LEAF1_MAGIC &&
2065 		    magicda != XFS_DIR3_LEAF1_MAGIC) {
2066 			xfs_warn(mp, "Bad dir leaf1 magic!");
2067 			ASSERT(0);
2068 			break;
2069 		}
2070 		bp->b_ops = &xfs_dir3_leaf1_buf_ops;
2071 		break;
2072 	case XFS_BLFT_DIR_LEAFN_BUF:
2073 		if (magicda != XFS_DIR2_LEAFN_MAGIC &&
2074 		    magicda != XFS_DIR3_LEAFN_MAGIC) {
2075 			xfs_warn(mp, "Bad dir leafn magic!");
2076 			ASSERT(0);
2077 			break;
2078 		}
2079 		bp->b_ops = &xfs_dir3_leafn_buf_ops;
2080 		break;
2081 	case XFS_BLFT_DA_NODE_BUF:
2082 		if (magicda != XFS_DA_NODE_MAGIC &&
2083 		    magicda != XFS_DA3_NODE_MAGIC) {
2084 			xfs_warn(mp, "Bad da node magic!");
2085 			ASSERT(0);
2086 			break;
2087 		}
2088 		bp->b_ops = &xfs_da3_node_buf_ops;
2089 		break;
2090 	case XFS_BLFT_ATTR_LEAF_BUF:
2091 		if (magicda != XFS_ATTR_LEAF_MAGIC &&
2092 		    magicda != XFS_ATTR3_LEAF_MAGIC) {
2093 			xfs_warn(mp, "Bad attr leaf magic!");
2094 			ASSERT(0);
2095 			break;
2096 		}
2097 		bp->b_ops = &xfs_attr3_leaf_buf_ops;
2098 		break;
2099 	case XFS_BLFT_ATTR_RMT_BUF:
2100 		if (!xfs_sb_version_hascrc(&mp->m_sb))
2101 			break;
2102 		if (magic32 != XFS_ATTR3_RMT_MAGIC) {
2103 			xfs_warn(mp, "Bad attr remote magic!");
2104 			ASSERT(0);
2105 			break;
2106 		}
2107 		bp->b_ops = &xfs_attr3_rmt_buf_ops;
2108 		break;
2109 	case XFS_BLFT_SB_BUF:
2110 		if (magic32 != XFS_SB_MAGIC) {
2111 			xfs_warn(mp, "Bad SB block magic!");
2112 			ASSERT(0);
2113 			break;
2114 		}
2115 		bp->b_ops = &xfs_sb_buf_ops;
2116 		break;
2117 	default:
2118 		xfs_warn(mp, "Unknown buffer type %d!",
2119 			 xfs_blft_from_flags(buf_f));
2120 		break;
2121 	}
2122 }
2123 
2124 /*
2125  * Perform a 'normal' buffer recovery.  Each logged region of the
2126  * buffer should be copied over the corresponding region in the
2127  * given buffer.  The bitmap in the buf log format structure indicates
2128  * where to place the logged data.
2129  */
2130 STATIC void
2131 xlog_recover_do_reg_buffer(
2132 	struct xfs_mount	*mp,
2133 	xlog_recover_item_t	*item,
2134 	struct xfs_buf		*bp,
2135 	xfs_buf_log_format_t	*buf_f)
2136 {
2137 	int			i;
2138 	int			bit;
2139 	int			nbits;
2140 	int                     error;
2141 
2142 	trace_xfs_log_recover_buf_reg_buf(mp->m_log, buf_f);
2143 
2144 	bit = 0;
2145 	i = 1;  /* 0 is the buf format structure */
2146 	while (1) {
2147 		bit = xfs_next_bit(buf_f->blf_data_map,
2148 				   buf_f->blf_map_size, bit);
2149 		if (bit == -1)
2150 			break;
2151 		nbits = xfs_contig_bits(buf_f->blf_data_map,
2152 					buf_f->blf_map_size, bit);
2153 		ASSERT(nbits > 0);
2154 		ASSERT(item->ri_buf[i].i_addr != NULL);
2155 		ASSERT(item->ri_buf[i].i_len % XFS_BLF_CHUNK == 0);
2156 		ASSERT(BBTOB(bp->b_io_length) >=
2157 		       ((uint)bit << XFS_BLF_SHIFT) + (nbits << XFS_BLF_SHIFT));
2158 
2159 		/*
2160 		 * The dirty regions logged in the buffer, even though
2161 		 * contiguous, may span multiple chunks. This is because the
2162 		 * dirty region may span a physical page boundary in a buffer
2163 		 * and hence be split into two separate vectors for writing into
2164 		 * the log. Hence we need to trim nbits back to the length of
2165 		 * the current region being copied out of the log.
2166 		 */
2167 		if (item->ri_buf[i].i_len < (nbits << XFS_BLF_SHIFT))
2168 			nbits = item->ri_buf[i].i_len >> XFS_BLF_SHIFT;
2169 
2170 		/*
2171 		 * Do a sanity check if this is a dquot buffer. Just checking
2172 		 * the first dquot in the buffer should do. XXXThis is
2173 		 * probably a good thing to do for other buf types also.
2174 		 */
2175 		error = 0;
2176 		if (buf_f->blf_flags &
2177 		   (XFS_BLF_UDQUOT_BUF|XFS_BLF_PDQUOT_BUF|XFS_BLF_GDQUOT_BUF)) {
2178 			if (item->ri_buf[i].i_addr == NULL) {
2179 				xfs_alert(mp,
2180 					"XFS: NULL dquot in %s.", __func__);
2181 				goto next;
2182 			}
2183 			if (item->ri_buf[i].i_len < sizeof(xfs_disk_dquot_t)) {
2184 				xfs_alert(mp,
2185 					"XFS: dquot too small (%d) in %s.",
2186 					item->ri_buf[i].i_len, __func__);
2187 				goto next;
2188 			}
2189 			error = xfs_qm_dqcheck(mp, item->ri_buf[i].i_addr,
2190 					       -1, 0, XFS_QMOPT_DOWARN,
2191 					       "dquot_buf_recover");
2192 			if (error)
2193 				goto next;
2194 		}
2195 
2196 		memcpy(xfs_buf_offset(bp,
2197 			(uint)bit << XFS_BLF_SHIFT),	/* dest */
2198 			item->ri_buf[i].i_addr,		/* source */
2199 			nbits<<XFS_BLF_SHIFT);		/* length */
2200  next:
2201 		i++;
2202 		bit += nbits;
2203 	}
2204 
2205 	/* Shouldn't be any more regions */
2206 	ASSERT(i == item->ri_total);
2207 
2208 	xlog_recovery_validate_buf_type(mp, bp, buf_f);
2209 }
2210 
2211 /*
2212  * Do some primitive error checking on ondisk dquot data structures.
2213  */
2214 int
2215 xfs_qm_dqcheck(
2216 	struct xfs_mount *mp,
2217 	xfs_disk_dquot_t *ddq,
2218 	xfs_dqid_t	 id,
2219 	uint		 type,	  /* used only when IO_dorepair is true */
2220 	uint		 flags,
2221 	char		 *str)
2222 {
2223 	xfs_dqblk_t	 *d = (xfs_dqblk_t *)ddq;
2224 	int		errs = 0;
2225 
2226 	/*
2227 	 * We can encounter an uninitialized dquot buffer for 2 reasons:
2228 	 * 1. If we crash while deleting the quotainode(s), and those blks got
2229 	 *    used for user data. This is because we take the path of regular
2230 	 *    file deletion; however, the size field of quotainodes is never
2231 	 *    updated, so all the tricks that we play in itruncate_finish
2232 	 *    don't quite matter.
2233 	 *
2234 	 * 2. We don't play the quota buffers when there's a quotaoff logitem.
2235 	 *    But the allocation will be replayed so we'll end up with an
2236 	 *    uninitialized quota block.
2237 	 *
2238 	 * This is all fine; things are still consistent, and we haven't lost
2239 	 * any quota information. Just don't complain about bad dquot blks.
2240 	 */
2241 	if (ddq->d_magic != cpu_to_be16(XFS_DQUOT_MAGIC)) {
2242 		if (flags & XFS_QMOPT_DOWARN)
2243 			xfs_alert(mp,
2244 			"%s : XFS dquot ID 0x%x, magic 0x%x != 0x%x",
2245 			str, id, be16_to_cpu(ddq->d_magic), XFS_DQUOT_MAGIC);
2246 		errs++;
2247 	}
2248 	if (ddq->d_version != XFS_DQUOT_VERSION) {
2249 		if (flags & XFS_QMOPT_DOWARN)
2250 			xfs_alert(mp,
2251 			"%s : XFS dquot ID 0x%x, version 0x%x != 0x%x",
2252 			str, id, ddq->d_version, XFS_DQUOT_VERSION);
2253 		errs++;
2254 	}
2255 
2256 	if (ddq->d_flags != XFS_DQ_USER &&
2257 	    ddq->d_flags != XFS_DQ_PROJ &&
2258 	    ddq->d_flags != XFS_DQ_GROUP) {
2259 		if (flags & XFS_QMOPT_DOWARN)
2260 			xfs_alert(mp,
2261 			"%s : XFS dquot ID 0x%x, unknown flags 0x%x",
2262 			str, id, ddq->d_flags);
2263 		errs++;
2264 	}
2265 
2266 	if (id != -1 && id != be32_to_cpu(ddq->d_id)) {
2267 		if (flags & XFS_QMOPT_DOWARN)
2268 			xfs_alert(mp,
2269 			"%s : ondisk-dquot 0x%p, ID mismatch: "
2270 			"0x%x expected, found id 0x%x",
2271 			str, ddq, id, be32_to_cpu(ddq->d_id));
2272 		errs++;
2273 	}
2274 
2275 	if (!errs && ddq->d_id) {
2276 		if (ddq->d_blk_softlimit &&
2277 		    be64_to_cpu(ddq->d_bcount) >
2278 				be64_to_cpu(ddq->d_blk_softlimit)) {
2279 			if (!ddq->d_btimer) {
2280 				if (flags & XFS_QMOPT_DOWARN)
2281 					xfs_alert(mp,
2282 			"%s : Dquot ID 0x%x (0x%p) BLK TIMER NOT STARTED",
2283 					str, (int)be32_to_cpu(ddq->d_id), ddq);
2284 				errs++;
2285 			}
2286 		}
2287 		if (ddq->d_ino_softlimit &&
2288 		    be64_to_cpu(ddq->d_icount) >
2289 				be64_to_cpu(ddq->d_ino_softlimit)) {
2290 			if (!ddq->d_itimer) {
2291 				if (flags & XFS_QMOPT_DOWARN)
2292 					xfs_alert(mp,
2293 			"%s : Dquot ID 0x%x (0x%p) INODE TIMER NOT STARTED",
2294 					str, (int)be32_to_cpu(ddq->d_id), ddq);
2295 				errs++;
2296 			}
2297 		}
2298 		if (ddq->d_rtb_softlimit &&
2299 		    be64_to_cpu(ddq->d_rtbcount) >
2300 				be64_to_cpu(ddq->d_rtb_softlimit)) {
2301 			if (!ddq->d_rtbtimer) {
2302 				if (flags & XFS_QMOPT_DOWARN)
2303 					xfs_alert(mp,
2304 			"%s : Dquot ID 0x%x (0x%p) RTBLK TIMER NOT STARTED",
2305 					str, (int)be32_to_cpu(ddq->d_id), ddq);
2306 				errs++;
2307 			}
2308 		}
2309 	}
2310 
2311 	if (!errs || !(flags & XFS_QMOPT_DQREPAIR))
2312 		return errs;
2313 
2314 	if (flags & XFS_QMOPT_DOWARN)
2315 		xfs_notice(mp, "Re-initializing dquot ID 0x%x", id);
2316 
2317 	/*
2318 	 * Typically, a repair is only requested by quotacheck.
2319 	 */
2320 	ASSERT(id != -1);
2321 	ASSERT(flags & XFS_QMOPT_DQREPAIR);
2322 	memset(d, 0, sizeof(xfs_dqblk_t));
2323 
2324 	d->dd_diskdq.d_magic = cpu_to_be16(XFS_DQUOT_MAGIC);
2325 	d->dd_diskdq.d_version = XFS_DQUOT_VERSION;
2326 	d->dd_diskdq.d_flags = type;
2327 	d->dd_diskdq.d_id = cpu_to_be32(id);
2328 
2329 	if (xfs_sb_version_hascrc(&mp->m_sb)) {
2330 		uuid_copy(&d->dd_uuid, &mp->m_sb.sb_uuid);
2331 		xfs_update_cksum((char *)d, sizeof(struct xfs_dqblk),
2332 				 XFS_DQUOT_CRC_OFF);
2333 	}
2334 
2335 	return errs;
2336 }
2337 
2338 /*
2339  * Perform a dquot buffer recovery.
2340  * Simple algorithm: if we have found a QUOTAOFF logitem of the same type
2341  * (ie. USR or GRP), then just toss this buffer away; don't recover it.
2342  * Else, treat it as a regular buffer and do recovery.
2343  */
2344 STATIC void
2345 xlog_recover_do_dquot_buffer(
2346 	struct xfs_mount		*mp,
2347 	struct xlog			*log,
2348 	struct xlog_recover_item	*item,
2349 	struct xfs_buf			*bp,
2350 	struct xfs_buf_log_format	*buf_f)
2351 {
2352 	uint			type;
2353 
2354 	trace_xfs_log_recover_buf_dquot_buf(log, buf_f);
2355 
2356 	/*
2357 	 * Filesystems are required to send in quota flags at mount time.
2358 	 */
2359 	if (mp->m_qflags == 0) {
2360 		return;
2361 	}
2362 
2363 	type = 0;
2364 	if (buf_f->blf_flags & XFS_BLF_UDQUOT_BUF)
2365 		type |= XFS_DQ_USER;
2366 	if (buf_f->blf_flags & XFS_BLF_PDQUOT_BUF)
2367 		type |= XFS_DQ_PROJ;
2368 	if (buf_f->blf_flags & XFS_BLF_GDQUOT_BUF)
2369 		type |= XFS_DQ_GROUP;
2370 	/*
2371 	 * This type of quotas was turned off, so ignore this buffer
2372 	 */
2373 	if (log->l_quotaoffs_flag & type)
2374 		return;
2375 
2376 	xlog_recover_do_reg_buffer(mp, item, bp, buf_f);
2377 }
2378 
2379 /*
2380  * This routine replays a modification made to a buffer at runtime.
2381  * There are actually two types of buffer, regular and inode, which
2382  * are handled differently.  Inode buffers are handled differently
2383  * in that we only recover a specific set of data from them, namely
2384  * the inode di_next_unlinked fields.  This is because all other inode
2385  * data is actually logged via inode records and any data we replay
2386  * here which overlaps that may be stale.
2387  *
2388  * When meta-data buffers are freed at run time we log a buffer item
2389  * with the XFS_BLF_CANCEL bit set to indicate that previous copies
2390  * of the buffer in the log should not be replayed at recovery time.
2391  * This is so that if the blocks covered by the buffer are reused for
2392  * file data before we crash we don't end up replaying old, freed
2393  * meta-data into a user's file.
2394  *
2395  * To handle the cancellation of buffer log items, we make two passes
2396  * over the log during recovery.  During the first we build a table of
2397  * those buffers which have been cancelled, and during the second we
2398  * only replay those buffers which do not have corresponding cancel
2399  * records in the table.  See xlog_recover_do_buffer_pass[1,2] above
2400  * for more details on the implementation of the table of cancel records.
2401  */
2402 STATIC int
2403 xlog_recover_buffer_pass2(
2404 	struct xlog			*log,
2405 	struct list_head		*buffer_list,
2406 	struct xlog_recover_item	*item)
2407 {
2408 	xfs_buf_log_format_t	*buf_f = item->ri_buf[0].i_addr;
2409 	xfs_mount_t		*mp = log->l_mp;
2410 	xfs_buf_t		*bp;
2411 	int			error;
2412 	uint			buf_flags;
2413 
2414 	/*
2415 	 * In this pass we only want to recover all the buffers which have
2416 	 * not been cancelled and are not cancellation buffers themselves.
2417 	 */
2418 	if (xlog_check_buffer_cancelled(log, buf_f->blf_blkno,
2419 			buf_f->blf_len, buf_f->blf_flags)) {
2420 		trace_xfs_log_recover_buf_cancel(log, buf_f);
2421 		return 0;
2422 	}
2423 
2424 	trace_xfs_log_recover_buf_recover(log, buf_f);
2425 
2426 	buf_flags = 0;
2427 	if (buf_f->blf_flags & XFS_BLF_INODE_BUF)
2428 		buf_flags |= XBF_UNMAPPED;
2429 
2430 	bp = xfs_buf_read(mp->m_ddev_targp, buf_f->blf_blkno, buf_f->blf_len,
2431 			  buf_flags, NULL);
2432 	if (!bp)
2433 		return XFS_ERROR(ENOMEM);
2434 	error = bp->b_error;
2435 	if (error) {
2436 		xfs_buf_ioerror_alert(bp, "xlog_recover_do..(read#1)");
2437 		xfs_buf_relse(bp);
2438 		return error;
2439 	}
2440 
2441 	if (buf_f->blf_flags & XFS_BLF_INODE_BUF) {
2442 		error = xlog_recover_do_inode_buffer(mp, item, bp, buf_f);
2443 	} else if (buf_f->blf_flags &
2444 		  (XFS_BLF_UDQUOT_BUF|XFS_BLF_PDQUOT_BUF|XFS_BLF_GDQUOT_BUF)) {
2445 		xlog_recover_do_dquot_buffer(mp, log, item, bp, buf_f);
2446 	} else {
2447 		xlog_recover_do_reg_buffer(mp, item, bp, buf_f);
2448 	}
2449 	if (error)
2450 		return XFS_ERROR(error);
2451 
2452 	/*
2453 	 * Perform delayed write on the buffer.  Asynchronous writes will be
2454 	 * slower when taking into account all the buffers to be flushed.
2455 	 *
2456 	 * Also make sure that only inode buffers with good sizes stay in
2457 	 * the buffer cache.  The kernel moves inodes in buffers of 1 block
2458 	 * or XFS_INODE_CLUSTER_SIZE bytes, whichever is bigger.  The inode
2459 	 * buffers in the log can be a different size if the log was generated
2460 	 * by an older kernel using unclustered inode buffers or a newer kernel
2461 	 * running with a different inode cluster size.  Regardless, if the
2462 	 * the inode buffer size isn't MAX(blocksize, XFS_INODE_CLUSTER_SIZE)
2463 	 * for *our* value of XFS_INODE_CLUSTER_SIZE, then we need to keep
2464 	 * the buffer out of the buffer cache so that the buffer won't
2465 	 * overlap with future reads of those inodes.
2466 	 */
2467 	if (XFS_DINODE_MAGIC ==
2468 	    be16_to_cpu(*((__be16 *)xfs_buf_offset(bp, 0))) &&
2469 	    (BBTOB(bp->b_io_length) != MAX(log->l_mp->m_sb.sb_blocksize,
2470 			(__uint32_t)XFS_INODE_CLUSTER_SIZE(log->l_mp)))) {
2471 		xfs_buf_stale(bp);
2472 		error = xfs_bwrite(bp);
2473 	} else {
2474 		ASSERT(bp->b_target->bt_mount == mp);
2475 		bp->b_iodone = xlog_recover_iodone;
2476 		xfs_buf_delwri_queue(bp, buffer_list);
2477 	}
2478 
2479 	xfs_buf_relse(bp);
2480 	return error;
2481 }
2482 
2483 STATIC int
2484 xlog_recover_inode_pass2(
2485 	struct xlog			*log,
2486 	struct list_head		*buffer_list,
2487 	struct xlog_recover_item	*item)
2488 {
2489 	xfs_inode_log_format_t	*in_f;
2490 	xfs_mount_t		*mp = log->l_mp;
2491 	xfs_buf_t		*bp;
2492 	xfs_dinode_t		*dip;
2493 	int			len;
2494 	xfs_caddr_t		src;
2495 	xfs_caddr_t		dest;
2496 	int			error;
2497 	int			attr_index;
2498 	uint			fields;
2499 	xfs_icdinode_t		*dicp;
2500 	uint			isize;
2501 	int			need_free = 0;
2502 
2503 	if (item->ri_buf[0].i_len == sizeof(xfs_inode_log_format_t)) {
2504 		in_f = item->ri_buf[0].i_addr;
2505 	} else {
2506 		in_f = kmem_alloc(sizeof(xfs_inode_log_format_t), KM_SLEEP);
2507 		need_free = 1;
2508 		error = xfs_inode_item_format_convert(&item->ri_buf[0], in_f);
2509 		if (error)
2510 			goto error;
2511 	}
2512 
2513 	/*
2514 	 * Inode buffers can be freed, look out for it,
2515 	 * and do not replay the inode.
2516 	 */
2517 	if (xlog_check_buffer_cancelled(log, in_f->ilf_blkno,
2518 					in_f->ilf_len, 0)) {
2519 		error = 0;
2520 		trace_xfs_log_recover_inode_cancel(log, in_f);
2521 		goto error;
2522 	}
2523 	trace_xfs_log_recover_inode_recover(log, in_f);
2524 
2525 	bp = xfs_buf_read(mp->m_ddev_targp, in_f->ilf_blkno, in_f->ilf_len, 0,
2526 			  &xfs_inode_buf_ops);
2527 	if (!bp) {
2528 		error = ENOMEM;
2529 		goto error;
2530 	}
2531 	error = bp->b_error;
2532 	if (error) {
2533 		xfs_buf_ioerror_alert(bp, "xlog_recover_do..(read#2)");
2534 		xfs_buf_relse(bp);
2535 		goto error;
2536 	}
2537 	ASSERT(in_f->ilf_fields & XFS_ILOG_CORE);
2538 	dip = (xfs_dinode_t *)xfs_buf_offset(bp, in_f->ilf_boffset);
2539 
2540 	/*
2541 	 * Make sure the place we're flushing out to really looks
2542 	 * like an inode!
2543 	 */
2544 	if (unlikely(dip->di_magic != cpu_to_be16(XFS_DINODE_MAGIC))) {
2545 		xfs_buf_relse(bp);
2546 		xfs_alert(mp,
2547 	"%s: Bad inode magic number, dip = 0x%p, dino bp = 0x%p, ino = %Ld",
2548 			__func__, dip, bp, in_f->ilf_ino);
2549 		XFS_ERROR_REPORT("xlog_recover_inode_pass2(1)",
2550 				 XFS_ERRLEVEL_LOW, mp);
2551 		error = EFSCORRUPTED;
2552 		goto error;
2553 	}
2554 	dicp = item->ri_buf[1].i_addr;
2555 	if (unlikely(dicp->di_magic != XFS_DINODE_MAGIC)) {
2556 		xfs_buf_relse(bp);
2557 		xfs_alert(mp,
2558 			"%s: Bad inode log record, rec ptr 0x%p, ino %Ld",
2559 			__func__, item, in_f->ilf_ino);
2560 		XFS_ERROR_REPORT("xlog_recover_inode_pass2(2)",
2561 				 XFS_ERRLEVEL_LOW, mp);
2562 		error = EFSCORRUPTED;
2563 		goto error;
2564 	}
2565 
2566 	/* Skip replay when the on disk inode is newer than the log one */
2567 	if (dicp->di_flushiter < be16_to_cpu(dip->di_flushiter)) {
2568 		/*
2569 		 * Deal with the wrap case, DI_MAX_FLUSH is less
2570 		 * than smaller numbers
2571 		 */
2572 		if (be16_to_cpu(dip->di_flushiter) == DI_MAX_FLUSH &&
2573 		    dicp->di_flushiter < (DI_MAX_FLUSH >> 1)) {
2574 			/* do nothing */
2575 		} else {
2576 			xfs_buf_relse(bp);
2577 			trace_xfs_log_recover_inode_skip(log, in_f);
2578 			error = 0;
2579 			goto error;
2580 		}
2581 	}
2582 	/* Take the opportunity to reset the flush iteration count */
2583 	dicp->di_flushiter = 0;
2584 
2585 	if (unlikely(S_ISREG(dicp->di_mode))) {
2586 		if ((dicp->di_format != XFS_DINODE_FMT_EXTENTS) &&
2587 		    (dicp->di_format != XFS_DINODE_FMT_BTREE)) {
2588 			XFS_CORRUPTION_ERROR("xlog_recover_inode_pass2(3)",
2589 					 XFS_ERRLEVEL_LOW, mp, dicp);
2590 			xfs_buf_relse(bp);
2591 			xfs_alert(mp,
2592 		"%s: Bad regular inode log record, rec ptr 0x%p, "
2593 		"ino ptr = 0x%p, ino bp = 0x%p, ino %Ld",
2594 				__func__, item, dip, bp, in_f->ilf_ino);
2595 			error = EFSCORRUPTED;
2596 			goto error;
2597 		}
2598 	} else if (unlikely(S_ISDIR(dicp->di_mode))) {
2599 		if ((dicp->di_format != XFS_DINODE_FMT_EXTENTS) &&
2600 		    (dicp->di_format != XFS_DINODE_FMT_BTREE) &&
2601 		    (dicp->di_format != XFS_DINODE_FMT_LOCAL)) {
2602 			XFS_CORRUPTION_ERROR("xlog_recover_inode_pass2(4)",
2603 					     XFS_ERRLEVEL_LOW, mp, dicp);
2604 			xfs_buf_relse(bp);
2605 			xfs_alert(mp,
2606 		"%s: Bad dir inode log record, rec ptr 0x%p, "
2607 		"ino ptr = 0x%p, ino bp = 0x%p, ino %Ld",
2608 				__func__, item, dip, bp, in_f->ilf_ino);
2609 			error = EFSCORRUPTED;
2610 			goto error;
2611 		}
2612 	}
2613 	if (unlikely(dicp->di_nextents + dicp->di_anextents > dicp->di_nblocks)){
2614 		XFS_CORRUPTION_ERROR("xlog_recover_inode_pass2(5)",
2615 				     XFS_ERRLEVEL_LOW, mp, dicp);
2616 		xfs_buf_relse(bp);
2617 		xfs_alert(mp,
2618 	"%s: Bad inode log record, rec ptr 0x%p, dino ptr 0x%p, "
2619 	"dino bp 0x%p, ino %Ld, total extents = %d, nblocks = %Ld",
2620 			__func__, item, dip, bp, in_f->ilf_ino,
2621 			dicp->di_nextents + dicp->di_anextents,
2622 			dicp->di_nblocks);
2623 		error = EFSCORRUPTED;
2624 		goto error;
2625 	}
2626 	if (unlikely(dicp->di_forkoff > mp->m_sb.sb_inodesize)) {
2627 		XFS_CORRUPTION_ERROR("xlog_recover_inode_pass2(6)",
2628 				     XFS_ERRLEVEL_LOW, mp, dicp);
2629 		xfs_buf_relse(bp);
2630 		xfs_alert(mp,
2631 	"%s: Bad inode log record, rec ptr 0x%p, dino ptr 0x%p, "
2632 	"dino bp 0x%p, ino %Ld, forkoff 0x%x", __func__,
2633 			item, dip, bp, in_f->ilf_ino, dicp->di_forkoff);
2634 		error = EFSCORRUPTED;
2635 		goto error;
2636 	}
2637 	isize = xfs_icdinode_size(dicp->di_version);
2638 	if (unlikely(item->ri_buf[1].i_len > isize)) {
2639 		XFS_CORRUPTION_ERROR("xlog_recover_inode_pass2(7)",
2640 				     XFS_ERRLEVEL_LOW, mp, dicp);
2641 		xfs_buf_relse(bp);
2642 		xfs_alert(mp,
2643 			"%s: Bad inode log record length %d, rec ptr 0x%p",
2644 			__func__, item->ri_buf[1].i_len, item);
2645 		error = EFSCORRUPTED;
2646 		goto error;
2647 	}
2648 
2649 	/* The core is in in-core format */
2650 	xfs_dinode_to_disk(dip, dicp);
2651 
2652 	/* the rest is in on-disk format */
2653 	if (item->ri_buf[1].i_len > isize) {
2654 		memcpy((char *)dip + isize,
2655 			item->ri_buf[1].i_addr + isize,
2656 			item->ri_buf[1].i_len - isize);
2657 	}
2658 
2659 	fields = in_f->ilf_fields;
2660 	switch (fields & (XFS_ILOG_DEV | XFS_ILOG_UUID)) {
2661 	case XFS_ILOG_DEV:
2662 		xfs_dinode_put_rdev(dip, in_f->ilf_u.ilfu_rdev);
2663 		break;
2664 	case XFS_ILOG_UUID:
2665 		memcpy(XFS_DFORK_DPTR(dip),
2666 		       &in_f->ilf_u.ilfu_uuid,
2667 		       sizeof(uuid_t));
2668 		break;
2669 	}
2670 
2671 	if (in_f->ilf_size == 2)
2672 		goto write_inode_buffer;
2673 	len = item->ri_buf[2].i_len;
2674 	src = item->ri_buf[2].i_addr;
2675 	ASSERT(in_f->ilf_size <= 4);
2676 	ASSERT((in_f->ilf_size == 3) || (fields & XFS_ILOG_AFORK));
2677 	ASSERT(!(fields & XFS_ILOG_DFORK) ||
2678 	       (len == in_f->ilf_dsize));
2679 
2680 	switch (fields & XFS_ILOG_DFORK) {
2681 	case XFS_ILOG_DDATA:
2682 	case XFS_ILOG_DEXT:
2683 		memcpy(XFS_DFORK_DPTR(dip), src, len);
2684 		break;
2685 
2686 	case XFS_ILOG_DBROOT:
2687 		xfs_bmbt_to_bmdr(mp, (struct xfs_btree_block *)src, len,
2688 				 (xfs_bmdr_block_t *)XFS_DFORK_DPTR(dip),
2689 				 XFS_DFORK_DSIZE(dip, mp));
2690 		break;
2691 
2692 	default:
2693 		/*
2694 		 * There are no data fork flags set.
2695 		 */
2696 		ASSERT((fields & XFS_ILOG_DFORK) == 0);
2697 		break;
2698 	}
2699 
2700 	/*
2701 	 * If we logged any attribute data, recover it.  There may or
2702 	 * may not have been any other non-core data logged in this
2703 	 * transaction.
2704 	 */
2705 	if (in_f->ilf_fields & XFS_ILOG_AFORK) {
2706 		if (in_f->ilf_fields & XFS_ILOG_DFORK) {
2707 			attr_index = 3;
2708 		} else {
2709 			attr_index = 2;
2710 		}
2711 		len = item->ri_buf[attr_index].i_len;
2712 		src = item->ri_buf[attr_index].i_addr;
2713 		ASSERT(len == in_f->ilf_asize);
2714 
2715 		switch (in_f->ilf_fields & XFS_ILOG_AFORK) {
2716 		case XFS_ILOG_ADATA:
2717 		case XFS_ILOG_AEXT:
2718 			dest = XFS_DFORK_APTR(dip);
2719 			ASSERT(len <= XFS_DFORK_ASIZE(dip, mp));
2720 			memcpy(dest, src, len);
2721 			break;
2722 
2723 		case XFS_ILOG_ABROOT:
2724 			dest = XFS_DFORK_APTR(dip);
2725 			xfs_bmbt_to_bmdr(mp, (struct xfs_btree_block *)src,
2726 					 len, (xfs_bmdr_block_t*)dest,
2727 					 XFS_DFORK_ASIZE(dip, mp));
2728 			break;
2729 
2730 		default:
2731 			xfs_warn(log->l_mp, "%s: Invalid flag", __func__);
2732 			ASSERT(0);
2733 			xfs_buf_relse(bp);
2734 			error = EIO;
2735 			goto error;
2736 		}
2737 	}
2738 
2739 write_inode_buffer:
2740 	/* re-generate the checksum. */
2741 	xfs_dinode_calc_crc(log->l_mp, dip);
2742 
2743 	ASSERT(bp->b_target->bt_mount == mp);
2744 	bp->b_iodone = xlog_recover_iodone;
2745 	xfs_buf_delwri_queue(bp, buffer_list);
2746 	xfs_buf_relse(bp);
2747 error:
2748 	if (need_free)
2749 		kmem_free(in_f);
2750 	return XFS_ERROR(error);
2751 }
2752 
2753 /*
2754  * Recover QUOTAOFF records. We simply make a note of it in the xlog
2755  * structure, so that we know not to do any dquot item or dquot buffer recovery,
2756  * of that type.
2757  */
2758 STATIC int
2759 xlog_recover_quotaoff_pass1(
2760 	struct xlog			*log,
2761 	struct xlog_recover_item	*item)
2762 {
2763 	xfs_qoff_logformat_t	*qoff_f = item->ri_buf[0].i_addr;
2764 	ASSERT(qoff_f);
2765 
2766 	/*
2767 	 * The logitem format's flag tells us if this was user quotaoff,
2768 	 * group/project quotaoff or both.
2769 	 */
2770 	if (qoff_f->qf_flags & XFS_UQUOTA_ACCT)
2771 		log->l_quotaoffs_flag |= XFS_DQ_USER;
2772 	if (qoff_f->qf_flags & XFS_PQUOTA_ACCT)
2773 		log->l_quotaoffs_flag |= XFS_DQ_PROJ;
2774 	if (qoff_f->qf_flags & XFS_GQUOTA_ACCT)
2775 		log->l_quotaoffs_flag |= XFS_DQ_GROUP;
2776 
2777 	return (0);
2778 }
2779 
2780 /*
2781  * Recover a dquot record
2782  */
2783 STATIC int
2784 xlog_recover_dquot_pass2(
2785 	struct xlog			*log,
2786 	struct list_head		*buffer_list,
2787 	struct xlog_recover_item	*item)
2788 {
2789 	xfs_mount_t		*mp = log->l_mp;
2790 	xfs_buf_t		*bp;
2791 	struct xfs_disk_dquot	*ddq, *recddq;
2792 	int			error;
2793 	xfs_dq_logformat_t	*dq_f;
2794 	uint			type;
2795 
2796 
2797 	/*
2798 	 * Filesystems are required to send in quota flags at mount time.
2799 	 */
2800 	if (mp->m_qflags == 0)
2801 		return (0);
2802 
2803 	recddq = item->ri_buf[1].i_addr;
2804 	if (recddq == NULL) {
2805 		xfs_alert(log->l_mp, "NULL dquot in %s.", __func__);
2806 		return XFS_ERROR(EIO);
2807 	}
2808 	if (item->ri_buf[1].i_len < sizeof(xfs_disk_dquot_t)) {
2809 		xfs_alert(log->l_mp, "dquot too small (%d) in %s.",
2810 			item->ri_buf[1].i_len, __func__);
2811 		return XFS_ERROR(EIO);
2812 	}
2813 
2814 	/*
2815 	 * This type of quotas was turned off, so ignore this record.
2816 	 */
2817 	type = recddq->d_flags & (XFS_DQ_USER | XFS_DQ_PROJ | XFS_DQ_GROUP);
2818 	ASSERT(type);
2819 	if (log->l_quotaoffs_flag & type)
2820 		return (0);
2821 
2822 	/*
2823 	 * At this point we know that quota was _not_ turned off.
2824 	 * Since the mount flags are not indicating to us otherwise, this
2825 	 * must mean that quota is on, and the dquot needs to be replayed.
2826 	 * Remember that we may not have fully recovered the superblock yet,
2827 	 * so we can't do the usual trick of looking at the SB quota bits.
2828 	 *
2829 	 * The other possibility, of course, is that the quota subsystem was
2830 	 * removed since the last mount - ENOSYS.
2831 	 */
2832 	dq_f = item->ri_buf[0].i_addr;
2833 	ASSERT(dq_f);
2834 	error = xfs_qm_dqcheck(mp, recddq, dq_f->qlf_id, 0, XFS_QMOPT_DOWARN,
2835 			   "xlog_recover_dquot_pass2 (log copy)");
2836 	if (error)
2837 		return XFS_ERROR(EIO);
2838 	ASSERT(dq_f->qlf_len == 1);
2839 
2840 	error = xfs_trans_read_buf(mp, NULL, mp->m_ddev_targp, dq_f->qlf_blkno,
2841 				   XFS_FSB_TO_BB(mp, dq_f->qlf_len), 0, &bp,
2842 				   NULL);
2843 	if (error)
2844 		return error;
2845 
2846 	ASSERT(bp);
2847 	ddq = (xfs_disk_dquot_t *)xfs_buf_offset(bp, dq_f->qlf_boffset);
2848 
2849 	/*
2850 	 * At least the magic num portion should be on disk because this
2851 	 * was among a chunk of dquots created earlier, and we did some
2852 	 * minimal initialization then.
2853 	 */
2854 	error = xfs_qm_dqcheck(mp, ddq, dq_f->qlf_id, 0, XFS_QMOPT_DOWARN,
2855 			   "xlog_recover_dquot_pass2");
2856 	if (error) {
2857 		xfs_buf_relse(bp);
2858 		return XFS_ERROR(EIO);
2859 	}
2860 
2861 	memcpy(ddq, recddq, item->ri_buf[1].i_len);
2862 	if (xfs_sb_version_hascrc(&mp->m_sb)) {
2863 		xfs_update_cksum((char *)ddq, sizeof(struct xfs_dqblk),
2864 				 XFS_DQUOT_CRC_OFF);
2865 	}
2866 
2867 	ASSERT(dq_f->qlf_size == 2);
2868 	ASSERT(bp->b_target->bt_mount == mp);
2869 	bp->b_iodone = xlog_recover_iodone;
2870 	xfs_buf_delwri_queue(bp, buffer_list);
2871 	xfs_buf_relse(bp);
2872 
2873 	return (0);
2874 }
2875 
2876 /*
2877  * This routine is called to create an in-core extent free intent
2878  * item from the efi format structure which was logged on disk.
2879  * It allocates an in-core efi, copies the extents from the format
2880  * structure into it, and adds the efi to the AIL with the given
2881  * LSN.
2882  */
2883 STATIC int
2884 xlog_recover_efi_pass2(
2885 	struct xlog			*log,
2886 	struct xlog_recover_item	*item,
2887 	xfs_lsn_t			lsn)
2888 {
2889 	int			error;
2890 	xfs_mount_t		*mp = log->l_mp;
2891 	xfs_efi_log_item_t	*efip;
2892 	xfs_efi_log_format_t	*efi_formatp;
2893 
2894 	efi_formatp = item->ri_buf[0].i_addr;
2895 
2896 	efip = xfs_efi_init(mp, efi_formatp->efi_nextents);
2897 	if ((error = xfs_efi_copy_format(&(item->ri_buf[0]),
2898 					 &(efip->efi_format)))) {
2899 		xfs_efi_item_free(efip);
2900 		return error;
2901 	}
2902 	atomic_set(&efip->efi_next_extent, efi_formatp->efi_nextents);
2903 
2904 	spin_lock(&log->l_ailp->xa_lock);
2905 	/*
2906 	 * xfs_trans_ail_update() drops the AIL lock.
2907 	 */
2908 	xfs_trans_ail_update(log->l_ailp, &efip->efi_item, lsn);
2909 	return 0;
2910 }
2911 
2912 
2913 /*
2914  * This routine is called when an efd format structure is found in
2915  * a committed transaction in the log.  It's purpose is to cancel
2916  * the corresponding efi if it was still in the log.  To do this
2917  * it searches the AIL for the efi with an id equal to that in the
2918  * efd format structure.  If we find it, we remove the efi from the
2919  * AIL and free it.
2920  */
2921 STATIC int
2922 xlog_recover_efd_pass2(
2923 	struct xlog			*log,
2924 	struct xlog_recover_item	*item)
2925 {
2926 	xfs_efd_log_format_t	*efd_formatp;
2927 	xfs_efi_log_item_t	*efip = NULL;
2928 	xfs_log_item_t		*lip;
2929 	__uint64_t		efi_id;
2930 	struct xfs_ail_cursor	cur;
2931 	struct xfs_ail		*ailp = log->l_ailp;
2932 
2933 	efd_formatp = item->ri_buf[0].i_addr;
2934 	ASSERT((item->ri_buf[0].i_len == (sizeof(xfs_efd_log_format_32_t) +
2935 		((efd_formatp->efd_nextents - 1) * sizeof(xfs_extent_32_t)))) ||
2936 	       (item->ri_buf[0].i_len == (sizeof(xfs_efd_log_format_64_t) +
2937 		((efd_formatp->efd_nextents - 1) * sizeof(xfs_extent_64_t)))));
2938 	efi_id = efd_formatp->efd_efi_id;
2939 
2940 	/*
2941 	 * Search for the efi with the id in the efd format structure
2942 	 * in the AIL.
2943 	 */
2944 	spin_lock(&ailp->xa_lock);
2945 	lip = xfs_trans_ail_cursor_first(ailp, &cur, 0);
2946 	while (lip != NULL) {
2947 		if (lip->li_type == XFS_LI_EFI) {
2948 			efip = (xfs_efi_log_item_t *)lip;
2949 			if (efip->efi_format.efi_id == efi_id) {
2950 				/*
2951 				 * xfs_trans_ail_delete() drops the
2952 				 * AIL lock.
2953 				 */
2954 				xfs_trans_ail_delete(ailp, lip,
2955 						     SHUTDOWN_CORRUPT_INCORE);
2956 				xfs_efi_item_free(efip);
2957 				spin_lock(&ailp->xa_lock);
2958 				break;
2959 			}
2960 		}
2961 		lip = xfs_trans_ail_cursor_next(ailp, &cur);
2962 	}
2963 	xfs_trans_ail_cursor_done(ailp, &cur);
2964 	spin_unlock(&ailp->xa_lock);
2965 
2966 	return 0;
2967 }
2968 
2969 /*
2970  * Free up any resources allocated by the transaction
2971  *
2972  * Remember that EFIs, EFDs, and IUNLINKs are handled later.
2973  */
2974 STATIC void
2975 xlog_recover_free_trans(
2976 	struct xlog_recover	*trans)
2977 {
2978 	xlog_recover_item_t	*item, *n;
2979 	int			i;
2980 
2981 	list_for_each_entry_safe(item, n, &trans->r_itemq, ri_list) {
2982 		/* Free the regions in the item. */
2983 		list_del(&item->ri_list);
2984 		for (i = 0; i < item->ri_cnt; i++)
2985 			kmem_free(item->ri_buf[i].i_addr);
2986 		/* Free the item itself */
2987 		kmem_free(item->ri_buf);
2988 		kmem_free(item);
2989 	}
2990 	/* Free the transaction recover structure */
2991 	kmem_free(trans);
2992 }
2993 
2994 STATIC int
2995 xlog_recover_commit_pass1(
2996 	struct xlog			*log,
2997 	struct xlog_recover		*trans,
2998 	struct xlog_recover_item	*item)
2999 {
3000 	trace_xfs_log_recover_item_recover(log, trans, item, XLOG_RECOVER_PASS1);
3001 
3002 	switch (ITEM_TYPE(item)) {
3003 	case XFS_LI_BUF:
3004 		return xlog_recover_buffer_pass1(log, item);
3005 	case XFS_LI_QUOTAOFF:
3006 		return xlog_recover_quotaoff_pass1(log, item);
3007 	case XFS_LI_INODE:
3008 	case XFS_LI_EFI:
3009 	case XFS_LI_EFD:
3010 	case XFS_LI_DQUOT:
3011 		/* nothing to do in pass 1 */
3012 		return 0;
3013 	default:
3014 		xfs_warn(log->l_mp, "%s: invalid item type (%d)",
3015 			__func__, ITEM_TYPE(item));
3016 		ASSERT(0);
3017 		return XFS_ERROR(EIO);
3018 	}
3019 }
3020 
3021 STATIC int
3022 xlog_recover_commit_pass2(
3023 	struct xlog			*log,
3024 	struct xlog_recover		*trans,
3025 	struct list_head		*buffer_list,
3026 	struct xlog_recover_item	*item)
3027 {
3028 	trace_xfs_log_recover_item_recover(log, trans, item, XLOG_RECOVER_PASS2);
3029 
3030 	switch (ITEM_TYPE(item)) {
3031 	case XFS_LI_BUF:
3032 		return xlog_recover_buffer_pass2(log, buffer_list, item);
3033 	case XFS_LI_INODE:
3034 		return xlog_recover_inode_pass2(log, buffer_list, item);
3035 	case XFS_LI_EFI:
3036 		return xlog_recover_efi_pass2(log, item, trans->r_lsn);
3037 	case XFS_LI_EFD:
3038 		return xlog_recover_efd_pass2(log, item);
3039 	case XFS_LI_DQUOT:
3040 		return xlog_recover_dquot_pass2(log, buffer_list, item);
3041 	case XFS_LI_QUOTAOFF:
3042 		/* nothing to do in pass2 */
3043 		return 0;
3044 	default:
3045 		xfs_warn(log->l_mp, "%s: invalid item type (%d)",
3046 			__func__, ITEM_TYPE(item));
3047 		ASSERT(0);
3048 		return XFS_ERROR(EIO);
3049 	}
3050 }
3051 
3052 /*
3053  * Perform the transaction.
3054  *
3055  * If the transaction modifies a buffer or inode, do it now.  Otherwise,
3056  * EFIs and EFDs get queued up by adding entries into the AIL for them.
3057  */
3058 STATIC int
3059 xlog_recover_commit_trans(
3060 	struct xlog		*log,
3061 	struct xlog_recover	*trans,
3062 	int			pass)
3063 {
3064 	int			error = 0, error2;
3065 	xlog_recover_item_t	*item;
3066 	LIST_HEAD		(buffer_list);
3067 
3068 	hlist_del(&trans->r_list);
3069 
3070 	error = xlog_recover_reorder_trans(log, trans, pass);
3071 	if (error)
3072 		return error;
3073 
3074 	list_for_each_entry(item, &trans->r_itemq, ri_list) {
3075 		switch (pass) {
3076 		case XLOG_RECOVER_PASS1:
3077 			error = xlog_recover_commit_pass1(log, trans, item);
3078 			break;
3079 		case XLOG_RECOVER_PASS2:
3080 			error = xlog_recover_commit_pass2(log, trans,
3081 							  &buffer_list, item);
3082 			break;
3083 		default:
3084 			ASSERT(0);
3085 		}
3086 
3087 		if (error)
3088 			goto out;
3089 	}
3090 
3091 	xlog_recover_free_trans(trans);
3092 
3093 out:
3094 	error2 = xfs_buf_delwri_submit(&buffer_list);
3095 	return error ? error : error2;
3096 }
3097 
3098 STATIC int
3099 xlog_recover_unmount_trans(
3100 	struct xlog		*log,
3101 	struct xlog_recover	*trans)
3102 {
3103 	/* Do nothing now */
3104 	xfs_warn(log->l_mp, "%s: Unmount LR", __func__);
3105 	return 0;
3106 }
3107 
3108 /*
3109  * There are two valid states of the r_state field.  0 indicates that the
3110  * transaction structure is in a normal state.  We have either seen the
3111  * start of the transaction or the last operation we added was not a partial
3112  * operation.  If the last operation we added to the transaction was a
3113  * partial operation, we need to mark r_state with XLOG_WAS_CONT_TRANS.
3114  *
3115  * NOTE: skip LRs with 0 data length.
3116  */
3117 STATIC int
3118 xlog_recover_process_data(
3119 	struct xlog		*log,
3120 	struct hlist_head	rhash[],
3121 	struct xlog_rec_header	*rhead,
3122 	xfs_caddr_t		dp,
3123 	int			pass)
3124 {
3125 	xfs_caddr_t		lp;
3126 	int			num_logops;
3127 	xlog_op_header_t	*ohead;
3128 	xlog_recover_t		*trans;
3129 	xlog_tid_t		tid;
3130 	int			error;
3131 	unsigned long		hash;
3132 	uint			flags;
3133 
3134 	lp = dp + be32_to_cpu(rhead->h_len);
3135 	num_logops = be32_to_cpu(rhead->h_num_logops);
3136 
3137 	/* check the log format matches our own - else we can't recover */
3138 	if (xlog_header_check_recover(log->l_mp, rhead))
3139 		return (XFS_ERROR(EIO));
3140 
3141 	while ((dp < lp) && num_logops) {
3142 		ASSERT(dp + sizeof(xlog_op_header_t) <= lp);
3143 		ohead = (xlog_op_header_t *)dp;
3144 		dp += sizeof(xlog_op_header_t);
3145 		if (ohead->oh_clientid != XFS_TRANSACTION &&
3146 		    ohead->oh_clientid != XFS_LOG) {
3147 			xfs_warn(log->l_mp, "%s: bad clientid 0x%x",
3148 					__func__, ohead->oh_clientid);
3149 			ASSERT(0);
3150 			return (XFS_ERROR(EIO));
3151 		}
3152 		tid = be32_to_cpu(ohead->oh_tid);
3153 		hash = XLOG_RHASH(tid);
3154 		trans = xlog_recover_find_tid(&rhash[hash], tid);
3155 		if (trans == NULL) {		   /* not found; add new tid */
3156 			if (ohead->oh_flags & XLOG_START_TRANS)
3157 				xlog_recover_new_tid(&rhash[hash], tid,
3158 					be64_to_cpu(rhead->h_lsn));
3159 		} else {
3160 			if (dp + be32_to_cpu(ohead->oh_len) > lp) {
3161 				xfs_warn(log->l_mp, "%s: bad length 0x%x",
3162 					__func__, be32_to_cpu(ohead->oh_len));
3163 				WARN_ON(1);
3164 				return (XFS_ERROR(EIO));
3165 			}
3166 			flags = ohead->oh_flags & ~XLOG_END_TRANS;
3167 			if (flags & XLOG_WAS_CONT_TRANS)
3168 				flags &= ~XLOG_CONTINUE_TRANS;
3169 			switch (flags) {
3170 			case XLOG_COMMIT_TRANS:
3171 				error = xlog_recover_commit_trans(log,
3172 								trans, pass);
3173 				break;
3174 			case XLOG_UNMOUNT_TRANS:
3175 				error = xlog_recover_unmount_trans(log, trans);
3176 				break;
3177 			case XLOG_WAS_CONT_TRANS:
3178 				error = xlog_recover_add_to_cont_trans(log,
3179 						trans, dp,
3180 						be32_to_cpu(ohead->oh_len));
3181 				break;
3182 			case XLOG_START_TRANS:
3183 				xfs_warn(log->l_mp, "%s: bad transaction",
3184 					__func__);
3185 				ASSERT(0);
3186 				error = XFS_ERROR(EIO);
3187 				break;
3188 			case 0:
3189 			case XLOG_CONTINUE_TRANS:
3190 				error = xlog_recover_add_to_trans(log, trans,
3191 						dp, be32_to_cpu(ohead->oh_len));
3192 				break;
3193 			default:
3194 				xfs_warn(log->l_mp, "%s: bad flag 0x%x",
3195 					__func__, flags);
3196 				ASSERT(0);
3197 				error = XFS_ERROR(EIO);
3198 				break;
3199 			}
3200 			if (error)
3201 				return error;
3202 		}
3203 		dp += be32_to_cpu(ohead->oh_len);
3204 		num_logops--;
3205 	}
3206 	return 0;
3207 }
3208 
3209 /*
3210  * Process an extent free intent item that was recovered from
3211  * the log.  We need to free the extents that it describes.
3212  */
3213 STATIC int
3214 xlog_recover_process_efi(
3215 	xfs_mount_t		*mp,
3216 	xfs_efi_log_item_t	*efip)
3217 {
3218 	xfs_efd_log_item_t	*efdp;
3219 	xfs_trans_t		*tp;
3220 	int			i;
3221 	int			error = 0;
3222 	xfs_extent_t		*extp;
3223 	xfs_fsblock_t		startblock_fsb;
3224 
3225 	ASSERT(!test_bit(XFS_EFI_RECOVERED, &efip->efi_flags));
3226 
3227 	/*
3228 	 * First check the validity of the extents described by the
3229 	 * EFI.  If any are bad, then assume that all are bad and
3230 	 * just toss the EFI.
3231 	 */
3232 	for (i = 0; i < efip->efi_format.efi_nextents; i++) {
3233 		extp = &(efip->efi_format.efi_extents[i]);
3234 		startblock_fsb = XFS_BB_TO_FSB(mp,
3235 				   XFS_FSB_TO_DADDR(mp, extp->ext_start));
3236 		if ((startblock_fsb == 0) ||
3237 		    (extp->ext_len == 0) ||
3238 		    (startblock_fsb >= mp->m_sb.sb_dblocks) ||
3239 		    (extp->ext_len >= mp->m_sb.sb_agblocks)) {
3240 			/*
3241 			 * This will pull the EFI from the AIL and
3242 			 * free the memory associated with it.
3243 			 */
3244 			set_bit(XFS_EFI_RECOVERED, &efip->efi_flags);
3245 			xfs_efi_release(efip, efip->efi_format.efi_nextents);
3246 			return XFS_ERROR(EIO);
3247 		}
3248 	}
3249 
3250 	tp = xfs_trans_alloc(mp, 0);
3251 	error = xfs_trans_reserve(tp, 0, XFS_ITRUNCATE_LOG_RES(mp), 0, 0, 0);
3252 	if (error)
3253 		goto abort_error;
3254 	efdp = xfs_trans_get_efd(tp, efip, efip->efi_format.efi_nextents);
3255 
3256 	for (i = 0; i < efip->efi_format.efi_nextents; i++) {
3257 		extp = &(efip->efi_format.efi_extents[i]);
3258 		error = xfs_free_extent(tp, extp->ext_start, extp->ext_len);
3259 		if (error)
3260 			goto abort_error;
3261 		xfs_trans_log_efd_extent(tp, efdp, extp->ext_start,
3262 					 extp->ext_len);
3263 	}
3264 
3265 	set_bit(XFS_EFI_RECOVERED, &efip->efi_flags);
3266 	error = xfs_trans_commit(tp, 0);
3267 	return error;
3268 
3269 abort_error:
3270 	xfs_trans_cancel(tp, XFS_TRANS_ABORT);
3271 	return error;
3272 }
3273 
3274 /*
3275  * When this is called, all of the EFIs which did not have
3276  * corresponding EFDs should be in the AIL.  What we do now
3277  * is free the extents associated with each one.
3278  *
3279  * Since we process the EFIs in normal transactions, they
3280  * will be removed at some point after the commit.  This prevents
3281  * us from just walking down the list processing each one.
3282  * We'll use a flag in the EFI to skip those that we've already
3283  * processed and use the AIL iteration mechanism's generation
3284  * count to try to speed this up at least a bit.
3285  *
3286  * When we start, we know that the EFIs are the only things in
3287  * the AIL.  As we process them, however, other items are added
3288  * to the AIL.  Since everything added to the AIL must come after
3289  * everything already in the AIL, we stop processing as soon as
3290  * we see something other than an EFI in the AIL.
3291  */
3292 STATIC int
3293 xlog_recover_process_efis(
3294 	struct xlog	*log)
3295 {
3296 	xfs_log_item_t		*lip;
3297 	xfs_efi_log_item_t	*efip;
3298 	int			error = 0;
3299 	struct xfs_ail_cursor	cur;
3300 	struct xfs_ail		*ailp;
3301 
3302 	ailp = log->l_ailp;
3303 	spin_lock(&ailp->xa_lock);
3304 	lip = xfs_trans_ail_cursor_first(ailp, &cur, 0);
3305 	while (lip != NULL) {
3306 		/*
3307 		 * We're done when we see something other than an EFI.
3308 		 * There should be no EFIs left in the AIL now.
3309 		 */
3310 		if (lip->li_type != XFS_LI_EFI) {
3311 #ifdef DEBUG
3312 			for (; lip; lip = xfs_trans_ail_cursor_next(ailp, &cur))
3313 				ASSERT(lip->li_type != XFS_LI_EFI);
3314 #endif
3315 			break;
3316 		}
3317 
3318 		/*
3319 		 * Skip EFIs that we've already processed.
3320 		 */
3321 		efip = (xfs_efi_log_item_t *)lip;
3322 		if (test_bit(XFS_EFI_RECOVERED, &efip->efi_flags)) {
3323 			lip = xfs_trans_ail_cursor_next(ailp, &cur);
3324 			continue;
3325 		}
3326 
3327 		spin_unlock(&ailp->xa_lock);
3328 		error = xlog_recover_process_efi(log->l_mp, efip);
3329 		spin_lock(&ailp->xa_lock);
3330 		if (error)
3331 			goto out;
3332 		lip = xfs_trans_ail_cursor_next(ailp, &cur);
3333 	}
3334 out:
3335 	xfs_trans_ail_cursor_done(ailp, &cur);
3336 	spin_unlock(&ailp->xa_lock);
3337 	return error;
3338 }
3339 
3340 /*
3341  * This routine performs a transaction to null out a bad inode pointer
3342  * in an agi unlinked inode hash bucket.
3343  */
3344 STATIC void
3345 xlog_recover_clear_agi_bucket(
3346 	xfs_mount_t	*mp,
3347 	xfs_agnumber_t	agno,
3348 	int		bucket)
3349 {
3350 	xfs_trans_t	*tp;
3351 	xfs_agi_t	*agi;
3352 	xfs_buf_t	*agibp;
3353 	int		offset;
3354 	int		error;
3355 
3356 	tp = xfs_trans_alloc(mp, XFS_TRANS_CLEAR_AGI_BUCKET);
3357 	error = xfs_trans_reserve(tp, 0, XFS_CLEAR_AGI_BUCKET_LOG_RES(mp),
3358 				  0, 0, 0);
3359 	if (error)
3360 		goto out_abort;
3361 
3362 	error = xfs_read_agi(mp, tp, agno, &agibp);
3363 	if (error)
3364 		goto out_abort;
3365 
3366 	agi = XFS_BUF_TO_AGI(agibp);
3367 	agi->agi_unlinked[bucket] = cpu_to_be32(NULLAGINO);
3368 	offset = offsetof(xfs_agi_t, agi_unlinked) +
3369 		 (sizeof(xfs_agino_t) * bucket);
3370 	xfs_trans_log_buf(tp, agibp, offset,
3371 			  (offset + sizeof(xfs_agino_t) - 1));
3372 
3373 	error = xfs_trans_commit(tp, 0);
3374 	if (error)
3375 		goto out_error;
3376 	return;
3377 
3378 out_abort:
3379 	xfs_trans_cancel(tp, XFS_TRANS_ABORT);
3380 out_error:
3381 	xfs_warn(mp, "%s: failed to clear agi %d. Continuing.", __func__, agno);
3382 	return;
3383 }
3384 
3385 STATIC xfs_agino_t
3386 xlog_recover_process_one_iunlink(
3387 	struct xfs_mount		*mp,
3388 	xfs_agnumber_t			agno,
3389 	xfs_agino_t			agino,
3390 	int				bucket)
3391 {
3392 	struct xfs_buf			*ibp;
3393 	struct xfs_dinode		*dip;
3394 	struct xfs_inode		*ip;
3395 	xfs_ino_t			ino;
3396 	int				error;
3397 
3398 	ino = XFS_AGINO_TO_INO(mp, agno, agino);
3399 	error = xfs_iget(mp, NULL, ino, 0, 0, &ip);
3400 	if (error)
3401 		goto fail;
3402 
3403 	/*
3404 	 * Get the on disk inode to find the next inode in the bucket.
3405 	 */
3406 	error = xfs_imap_to_bp(mp, NULL, &ip->i_imap, &dip, &ibp, 0, 0);
3407 	if (error)
3408 		goto fail_iput;
3409 
3410 	ASSERT(ip->i_d.di_nlink == 0);
3411 	ASSERT(ip->i_d.di_mode != 0);
3412 
3413 	/* setup for the next pass */
3414 	agino = be32_to_cpu(dip->di_next_unlinked);
3415 	xfs_buf_relse(ibp);
3416 
3417 	/*
3418 	 * Prevent any DMAPI event from being sent when the reference on
3419 	 * the inode is dropped.
3420 	 */
3421 	ip->i_d.di_dmevmask = 0;
3422 
3423 	IRELE(ip);
3424 	return agino;
3425 
3426  fail_iput:
3427 	IRELE(ip);
3428  fail:
3429 	/*
3430 	 * We can't read in the inode this bucket points to, or this inode
3431 	 * is messed up.  Just ditch this bucket of inodes.  We will lose
3432 	 * some inodes and space, but at least we won't hang.
3433 	 *
3434 	 * Call xlog_recover_clear_agi_bucket() to perform a transaction to
3435 	 * clear the inode pointer in the bucket.
3436 	 */
3437 	xlog_recover_clear_agi_bucket(mp, agno, bucket);
3438 	return NULLAGINO;
3439 }
3440 
3441 /*
3442  * xlog_iunlink_recover
3443  *
3444  * This is called during recovery to process any inodes which
3445  * we unlinked but not freed when the system crashed.  These
3446  * inodes will be on the lists in the AGI blocks.  What we do
3447  * here is scan all the AGIs and fully truncate and free any
3448  * inodes found on the lists.  Each inode is removed from the
3449  * lists when it has been fully truncated and is freed.  The
3450  * freeing of the inode and its removal from the list must be
3451  * atomic.
3452  */
3453 STATIC void
3454 xlog_recover_process_iunlinks(
3455 	struct xlog	*log)
3456 {
3457 	xfs_mount_t	*mp;
3458 	xfs_agnumber_t	agno;
3459 	xfs_agi_t	*agi;
3460 	xfs_buf_t	*agibp;
3461 	xfs_agino_t	agino;
3462 	int		bucket;
3463 	int		error;
3464 	uint		mp_dmevmask;
3465 
3466 	mp = log->l_mp;
3467 
3468 	/*
3469 	 * Prevent any DMAPI event from being sent while in this function.
3470 	 */
3471 	mp_dmevmask = mp->m_dmevmask;
3472 	mp->m_dmevmask = 0;
3473 
3474 	for (agno = 0; agno < mp->m_sb.sb_agcount; agno++) {
3475 		/*
3476 		 * Find the agi for this ag.
3477 		 */
3478 		error = xfs_read_agi(mp, NULL, agno, &agibp);
3479 		if (error) {
3480 			/*
3481 			 * AGI is b0rked. Don't process it.
3482 			 *
3483 			 * We should probably mark the filesystem as corrupt
3484 			 * after we've recovered all the ag's we can....
3485 			 */
3486 			continue;
3487 		}
3488 		/*
3489 		 * Unlock the buffer so that it can be acquired in the normal
3490 		 * course of the transaction to truncate and free each inode.
3491 		 * Because we are not racing with anyone else here for the AGI
3492 		 * buffer, we don't even need to hold it locked to read the
3493 		 * initial unlinked bucket entries out of the buffer. We keep
3494 		 * buffer reference though, so that it stays pinned in memory
3495 		 * while we need the buffer.
3496 		 */
3497 		agi = XFS_BUF_TO_AGI(agibp);
3498 		xfs_buf_unlock(agibp);
3499 
3500 		for (bucket = 0; bucket < XFS_AGI_UNLINKED_BUCKETS; bucket++) {
3501 			agino = be32_to_cpu(agi->agi_unlinked[bucket]);
3502 			while (agino != NULLAGINO) {
3503 				agino = xlog_recover_process_one_iunlink(mp,
3504 							agno, agino, bucket);
3505 			}
3506 		}
3507 		xfs_buf_rele(agibp);
3508 	}
3509 
3510 	mp->m_dmevmask = mp_dmevmask;
3511 }
3512 
3513 /*
3514  * Upack the log buffer data and crc check it. If the check fails, issue a
3515  * warning if and only if the CRC in the header is non-zero. This makes the
3516  * check an advisory warning, and the zero CRC check will prevent failure
3517  * warnings from being emitted when upgrading the kernel from one that does not
3518  * add CRCs by default.
3519  *
3520  * When filesystems are CRC enabled, this CRC mismatch becomes a fatal log
3521  * corruption failure
3522  */
3523 STATIC int
3524 xlog_unpack_data_crc(
3525 	struct xlog_rec_header	*rhead,
3526 	xfs_caddr_t		dp,
3527 	struct xlog		*log)
3528 {
3529 	__le32			crc;
3530 
3531 	crc = xlog_cksum(log, rhead, dp, be32_to_cpu(rhead->h_len));
3532 	if (crc != rhead->h_crc) {
3533 		if (rhead->h_crc || xfs_sb_version_hascrc(&log->l_mp->m_sb)) {
3534 			xfs_alert(log->l_mp,
3535 		"log record CRC mismatch: found 0x%x, expected 0x%x.\n",
3536 					le32_to_cpu(rhead->h_crc),
3537 					le32_to_cpu(crc));
3538 			xfs_hex_dump(dp, 32);
3539 		}
3540 
3541 		/*
3542 		 * If we've detected a log record corruption, then we can't
3543 		 * recover past this point. Abort recovery if we are enforcing
3544 		 * CRC protection by punting an error back up the stack.
3545 		 */
3546 		if (xfs_sb_version_hascrc(&log->l_mp->m_sb))
3547 			return EFSCORRUPTED;
3548 	}
3549 
3550 	return 0;
3551 }
3552 
3553 STATIC int
3554 xlog_unpack_data(
3555 	struct xlog_rec_header	*rhead,
3556 	xfs_caddr_t		dp,
3557 	struct xlog		*log)
3558 {
3559 	int			i, j, k;
3560 	int			error;
3561 
3562 	error = xlog_unpack_data_crc(rhead, dp, log);
3563 	if (error)
3564 		return error;
3565 
3566 	for (i = 0; i < BTOBB(be32_to_cpu(rhead->h_len)) &&
3567 		  i < (XLOG_HEADER_CYCLE_SIZE / BBSIZE); i++) {
3568 		*(__be32 *)dp = *(__be32 *)&rhead->h_cycle_data[i];
3569 		dp += BBSIZE;
3570 	}
3571 
3572 	if (xfs_sb_version_haslogv2(&log->l_mp->m_sb)) {
3573 		xlog_in_core_2_t *xhdr = (xlog_in_core_2_t *)rhead;
3574 		for ( ; i < BTOBB(be32_to_cpu(rhead->h_len)); i++) {
3575 			j = i / (XLOG_HEADER_CYCLE_SIZE / BBSIZE);
3576 			k = i % (XLOG_HEADER_CYCLE_SIZE / BBSIZE);
3577 			*(__be32 *)dp = xhdr[j].hic_xheader.xh_cycle_data[k];
3578 			dp += BBSIZE;
3579 		}
3580 	}
3581 
3582 	return 0;
3583 }
3584 
3585 STATIC int
3586 xlog_valid_rec_header(
3587 	struct xlog		*log,
3588 	struct xlog_rec_header	*rhead,
3589 	xfs_daddr_t		blkno)
3590 {
3591 	int			hlen;
3592 
3593 	if (unlikely(rhead->h_magicno != cpu_to_be32(XLOG_HEADER_MAGIC_NUM))) {
3594 		XFS_ERROR_REPORT("xlog_valid_rec_header(1)",
3595 				XFS_ERRLEVEL_LOW, log->l_mp);
3596 		return XFS_ERROR(EFSCORRUPTED);
3597 	}
3598 	if (unlikely(
3599 	    (!rhead->h_version ||
3600 	    (be32_to_cpu(rhead->h_version) & (~XLOG_VERSION_OKBITS))))) {
3601 		xfs_warn(log->l_mp, "%s: unrecognised log version (%d).",
3602 			__func__, be32_to_cpu(rhead->h_version));
3603 		return XFS_ERROR(EIO);
3604 	}
3605 
3606 	/* LR body must have data or it wouldn't have been written */
3607 	hlen = be32_to_cpu(rhead->h_len);
3608 	if (unlikely( hlen <= 0 || hlen > INT_MAX )) {
3609 		XFS_ERROR_REPORT("xlog_valid_rec_header(2)",
3610 				XFS_ERRLEVEL_LOW, log->l_mp);
3611 		return XFS_ERROR(EFSCORRUPTED);
3612 	}
3613 	if (unlikely( blkno > log->l_logBBsize || blkno > INT_MAX )) {
3614 		XFS_ERROR_REPORT("xlog_valid_rec_header(3)",
3615 				XFS_ERRLEVEL_LOW, log->l_mp);
3616 		return XFS_ERROR(EFSCORRUPTED);
3617 	}
3618 	return 0;
3619 }
3620 
3621 /*
3622  * Read the log from tail to head and process the log records found.
3623  * Handle the two cases where the tail and head are in the same cycle
3624  * and where the active portion of the log wraps around the end of
3625  * the physical log separately.  The pass parameter is passed through
3626  * to the routines called to process the data and is not looked at
3627  * here.
3628  */
3629 STATIC int
3630 xlog_do_recovery_pass(
3631 	struct xlog		*log,
3632 	xfs_daddr_t		head_blk,
3633 	xfs_daddr_t		tail_blk,
3634 	int			pass)
3635 {
3636 	xlog_rec_header_t	*rhead;
3637 	xfs_daddr_t		blk_no;
3638 	xfs_caddr_t		offset;
3639 	xfs_buf_t		*hbp, *dbp;
3640 	int			error = 0, h_size;
3641 	int			bblks, split_bblks;
3642 	int			hblks, split_hblks, wrapped_hblks;
3643 	struct hlist_head	rhash[XLOG_RHASH_SIZE];
3644 
3645 	ASSERT(head_blk != tail_blk);
3646 
3647 	/*
3648 	 * Read the header of the tail block and get the iclog buffer size from
3649 	 * h_size.  Use this to tell how many sectors make up the log header.
3650 	 */
3651 	if (xfs_sb_version_haslogv2(&log->l_mp->m_sb)) {
3652 		/*
3653 		 * When using variable length iclogs, read first sector of
3654 		 * iclog header and extract the header size from it.  Get a
3655 		 * new hbp that is the correct size.
3656 		 */
3657 		hbp = xlog_get_bp(log, 1);
3658 		if (!hbp)
3659 			return ENOMEM;
3660 
3661 		error = xlog_bread(log, tail_blk, 1, hbp, &offset);
3662 		if (error)
3663 			goto bread_err1;
3664 
3665 		rhead = (xlog_rec_header_t *)offset;
3666 		error = xlog_valid_rec_header(log, rhead, tail_blk);
3667 		if (error)
3668 			goto bread_err1;
3669 		h_size = be32_to_cpu(rhead->h_size);
3670 		if ((be32_to_cpu(rhead->h_version) & XLOG_VERSION_2) &&
3671 		    (h_size > XLOG_HEADER_CYCLE_SIZE)) {
3672 			hblks = h_size / XLOG_HEADER_CYCLE_SIZE;
3673 			if (h_size % XLOG_HEADER_CYCLE_SIZE)
3674 				hblks++;
3675 			xlog_put_bp(hbp);
3676 			hbp = xlog_get_bp(log, hblks);
3677 		} else {
3678 			hblks = 1;
3679 		}
3680 	} else {
3681 		ASSERT(log->l_sectBBsize == 1);
3682 		hblks = 1;
3683 		hbp = xlog_get_bp(log, 1);
3684 		h_size = XLOG_BIG_RECORD_BSIZE;
3685 	}
3686 
3687 	if (!hbp)
3688 		return ENOMEM;
3689 	dbp = xlog_get_bp(log, BTOBB(h_size));
3690 	if (!dbp) {
3691 		xlog_put_bp(hbp);
3692 		return ENOMEM;
3693 	}
3694 
3695 	memset(rhash, 0, sizeof(rhash));
3696 	if (tail_blk <= head_blk) {
3697 		for (blk_no = tail_blk; blk_no < head_blk; ) {
3698 			error = xlog_bread(log, blk_no, hblks, hbp, &offset);
3699 			if (error)
3700 				goto bread_err2;
3701 
3702 			rhead = (xlog_rec_header_t *)offset;
3703 			error = xlog_valid_rec_header(log, rhead, blk_no);
3704 			if (error)
3705 				goto bread_err2;
3706 
3707 			/* blocks in data section */
3708 			bblks = (int)BTOBB(be32_to_cpu(rhead->h_len));
3709 			error = xlog_bread(log, blk_no + hblks, bblks, dbp,
3710 					   &offset);
3711 			if (error)
3712 				goto bread_err2;
3713 
3714 			error = xlog_unpack_data(rhead, offset, log);
3715 			if (error)
3716 				goto bread_err2;
3717 
3718 			error = xlog_recover_process_data(log,
3719 						rhash, rhead, offset, pass);
3720 			if (error)
3721 				goto bread_err2;
3722 			blk_no += bblks + hblks;
3723 		}
3724 	} else {
3725 		/*
3726 		 * Perform recovery around the end of the physical log.
3727 		 * When the head is not on the same cycle number as the tail,
3728 		 * we can't do a sequential recovery as above.
3729 		 */
3730 		blk_no = tail_blk;
3731 		while (blk_no < log->l_logBBsize) {
3732 			/*
3733 			 * Check for header wrapping around physical end-of-log
3734 			 */
3735 			offset = hbp->b_addr;
3736 			split_hblks = 0;
3737 			wrapped_hblks = 0;
3738 			if (blk_no + hblks <= log->l_logBBsize) {
3739 				/* Read header in one read */
3740 				error = xlog_bread(log, blk_no, hblks, hbp,
3741 						   &offset);
3742 				if (error)
3743 					goto bread_err2;
3744 			} else {
3745 				/* This LR is split across physical log end */
3746 				if (blk_no != log->l_logBBsize) {
3747 					/* some data before physical log end */
3748 					ASSERT(blk_no <= INT_MAX);
3749 					split_hblks = log->l_logBBsize - (int)blk_no;
3750 					ASSERT(split_hblks > 0);
3751 					error = xlog_bread(log, blk_no,
3752 							   split_hblks, hbp,
3753 							   &offset);
3754 					if (error)
3755 						goto bread_err2;
3756 				}
3757 
3758 				/*
3759 				 * Note: this black magic still works with
3760 				 * large sector sizes (non-512) only because:
3761 				 * - we increased the buffer size originally
3762 				 *   by 1 sector giving us enough extra space
3763 				 *   for the second read;
3764 				 * - the log start is guaranteed to be sector
3765 				 *   aligned;
3766 				 * - we read the log end (LR header start)
3767 				 *   _first_, then the log start (LR header end)
3768 				 *   - order is important.
3769 				 */
3770 				wrapped_hblks = hblks - split_hblks;
3771 				error = xlog_bread_offset(log, 0,
3772 						wrapped_hblks, hbp,
3773 						offset + BBTOB(split_hblks));
3774 				if (error)
3775 					goto bread_err2;
3776 			}
3777 			rhead = (xlog_rec_header_t *)offset;
3778 			error = xlog_valid_rec_header(log, rhead,
3779 						split_hblks ? blk_no : 0);
3780 			if (error)
3781 				goto bread_err2;
3782 
3783 			bblks = (int)BTOBB(be32_to_cpu(rhead->h_len));
3784 			blk_no += hblks;
3785 
3786 			/* Read in data for log record */
3787 			if (blk_no + bblks <= log->l_logBBsize) {
3788 				error = xlog_bread(log, blk_no, bblks, dbp,
3789 						   &offset);
3790 				if (error)
3791 					goto bread_err2;
3792 			} else {
3793 				/* This log record is split across the
3794 				 * physical end of log */
3795 				offset = dbp->b_addr;
3796 				split_bblks = 0;
3797 				if (blk_no != log->l_logBBsize) {
3798 					/* some data is before the physical
3799 					 * end of log */
3800 					ASSERT(!wrapped_hblks);
3801 					ASSERT(blk_no <= INT_MAX);
3802 					split_bblks =
3803 						log->l_logBBsize - (int)blk_no;
3804 					ASSERT(split_bblks > 0);
3805 					error = xlog_bread(log, blk_no,
3806 							split_bblks, dbp,
3807 							&offset);
3808 					if (error)
3809 						goto bread_err2;
3810 				}
3811 
3812 				/*
3813 				 * Note: this black magic still works with
3814 				 * large sector sizes (non-512) only because:
3815 				 * - we increased the buffer size originally
3816 				 *   by 1 sector giving us enough extra space
3817 				 *   for the second read;
3818 				 * - the log start is guaranteed to be sector
3819 				 *   aligned;
3820 				 * - we read the log end (LR header start)
3821 				 *   _first_, then the log start (LR header end)
3822 				 *   - order is important.
3823 				 */
3824 				error = xlog_bread_offset(log, 0,
3825 						bblks - split_bblks, dbp,
3826 						offset + BBTOB(split_bblks));
3827 				if (error)
3828 					goto bread_err2;
3829 			}
3830 
3831 			error = xlog_unpack_data(rhead, offset, log);
3832 			if (error)
3833 				goto bread_err2;
3834 
3835 			error = xlog_recover_process_data(log, rhash,
3836 							rhead, offset, pass);
3837 			if (error)
3838 				goto bread_err2;
3839 			blk_no += bblks;
3840 		}
3841 
3842 		ASSERT(blk_no >= log->l_logBBsize);
3843 		blk_no -= log->l_logBBsize;
3844 
3845 		/* read first part of physical log */
3846 		while (blk_no < head_blk) {
3847 			error = xlog_bread(log, blk_no, hblks, hbp, &offset);
3848 			if (error)
3849 				goto bread_err2;
3850 
3851 			rhead = (xlog_rec_header_t *)offset;
3852 			error = xlog_valid_rec_header(log, rhead, blk_no);
3853 			if (error)
3854 				goto bread_err2;
3855 
3856 			bblks = (int)BTOBB(be32_to_cpu(rhead->h_len));
3857 			error = xlog_bread(log, blk_no+hblks, bblks, dbp,
3858 					   &offset);
3859 			if (error)
3860 				goto bread_err2;
3861 
3862 			error = xlog_unpack_data(rhead, offset, log);
3863 			if (error)
3864 				goto bread_err2;
3865 
3866 			error = xlog_recover_process_data(log, rhash,
3867 							rhead, offset, pass);
3868 			if (error)
3869 				goto bread_err2;
3870 			blk_no += bblks + hblks;
3871 		}
3872 	}
3873 
3874  bread_err2:
3875 	xlog_put_bp(dbp);
3876  bread_err1:
3877 	xlog_put_bp(hbp);
3878 	return error;
3879 }
3880 
3881 /*
3882  * Do the recovery of the log.  We actually do this in two phases.
3883  * The two passes are necessary in order to implement the function
3884  * of cancelling a record written into the log.  The first pass
3885  * determines those things which have been cancelled, and the
3886  * second pass replays log items normally except for those which
3887  * have been cancelled.  The handling of the replay and cancellations
3888  * takes place in the log item type specific routines.
3889  *
3890  * The table of items which have cancel records in the log is allocated
3891  * and freed at this level, since only here do we know when all of
3892  * the log recovery has been completed.
3893  */
3894 STATIC int
3895 xlog_do_log_recovery(
3896 	struct xlog	*log,
3897 	xfs_daddr_t	head_blk,
3898 	xfs_daddr_t	tail_blk)
3899 {
3900 	int		error, i;
3901 
3902 	ASSERT(head_blk != tail_blk);
3903 
3904 	/*
3905 	 * First do a pass to find all of the cancelled buf log items.
3906 	 * Store them in the buf_cancel_table for use in the second pass.
3907 	 */
3908 	log->l_buf_cancel_table = kmem_zalloc(XLOG_BC_TABLE_SIZE *
3909 						 sizeof(struct list_head),
3910 						 KM_SLEEP);
3911 	for (i = 0; i < XLOG_BC_TABLE_SIZE; i++)
3912 		INIT_LIST_HEAD(&log->l_buf_cancel_table[i]);
3913 
3914 	error = xlog_do_recovery_pass(log, head_blk, tail_blk,
3915 				      XLOG_RECOVER_PASS1);
3916 	if (error != 0) {
3917 		kmem_free(log->l_buf_cancel_table);
3918 		log->l_buf_cancel_table = NULL;
3919 		return error;
3920 	}
3921 	/*
3922 	 * Then do a second pass to actually recover the items in the log.
3923 	 * When it is complete free the table of buf cancel items.
3924 	 */
3925 	error = xlog_do_recovery_pass(log, head_blk, tail_blk,
3926 				      XLOG_RECOVER_PASS2);
3927 #ifdef DEBUG
3928 	if (!error) {
3929 		int	i;
3930 
3931 		for (i = 0; i < XLOG_BC_TABLE_SIZE; i++)
3932 			ASSERT(list_empty(&log->l_buf_cancel_table[i]));
3933 	}
3934 #endif	/* DEBUG */
3935 
3936 	kmem_free(log->l_buf_cancel_table);
3937 	log->l_buf_cancel_table = NULL;
3938 
3939 	return error;
3940 }
3941 
3942 /*
3943  * Do the actual recovery
3944  */
3945 STATIC int
3946 xlog_do_recover(
3947 	struct xlog	*log,
3948 	xfs_daddr_t	head_blk,
3949 	xfs_daddr_t	tail_blk)
3950 {
3951 	int		error;
3952 	xfs_buf_t	*bp;
3953 	xfs_sb_t	*sbp;
3954 
3955 	/*
3956 	 * First replay the images in the log.
3957 	 */
3958 	error = xlog_do_log_recovery(log, head_blk, tail_blk);
3959 	if (error)
3960 		return error;
3961 
3962 	/*
3963 	 * If IO errors happened during recovery, bail out.
3964 	 */
3965 	if (XFS_FORCED_SHUTDOWN(log->l_mp)) {
3966 		return (EIO);
3967 	}
3968 
3969 	/*
3970 	 * We now update the tail_lsn since much of the recovery has completed
3971 	 * and there may be space available to use.  If there were no extent
3972 	 * or iunlinks, we can free up the entire log and set the tail_lsn to
3973 	 * be the last_sync_lsn.  This was set in xlog_find_tail to be the
3974 	 * lsn of the last known good LR on disk.  If there are extent frees
3975 	 * or iunlinks they will have some entries in the AIL; so we look at
3976 	 * the AIL to determine how to set the tail_lsn.
3977 	 */
3978 	xlog_assign_tail_lsn(log->l_mp);
3979 
3980 	/*
3981 	 * Now that we've finished replaying all buffer and inode
3982 	 * updates, re-read in the superblock and reverify it.
3983 	 */
3984 	bp = xfs_getsb(log->l_mp, 0);
3985 	XFS_BUF_UNDONE(bp);
3986 	ASSERT(!(XFS_BUF_ISWRITE(bp)));
3987 	XFS_BUF_READ(bp);
3988 	XFS_BUF_UNASYNC(bp);
3989 	bp->b_ops = &xfs_sb_buf_ops;
3990 	xfsbdstrat(log->l_mp, bp);
3991 	error = xfs_buf_iowait(bp);
3992 	if (error) {
3993 		xfs_buf_ioerror_alert(bp, __func__);
3994 		ASSERT(0);
3995 		xfs_buf_relse(bp);
3996 		return error;
3997 	}
3998 
3999 	/* Convert superblock from on-disk format */
4000 	sbp = &log->l_mp->m_sb;
4001 	xfs_sb_from_disk(sbp, XFS_BUF_TO_SBP(bp));
4002 	ASSERT(sbp->sb_magicnum == XFS_SB_MAGIC);
4003 	ASSERT(xfs_sb_good_version(sbp));
4004 	xfs_buf_relse(bp);
4005 
4006 	/* We've re-read the superblock so re-initialize per-cpu counters */
4007 	xfs_icsb_reinit_counters(log->l_mp);
4008 
4009 	xlog_recover_check_summary(log);
4010 
4011 	/* Normal transactions can now occur */
4012 	log->l_flags &= ~XLOG_ACTIVE_RECOVERY;
4013 	return 0;
4014 }
4015 
4016 /*
4017  * Perform recovery and re-initialize some log variables in xlog_find_tail.
4018  *
4019  * Return error or zero.
4020  */
4021 int
4022 xlog_recover(
4023 	struct xlog	*log)
4024 {
4025 	xfs_daddr_t	head_blk, tail_blk;
4026 	int		error;
4027 
4028 	/* find the tail of the log */
4029 	if ((error = xlog_find_tail(log, &head_blk, &tail_blk)))
4030 		return error;
4031 
4032 	if (tail_blk != head_blk) {
4033 		/* There used to be a comment here:
4034 		 *
4035 		 * disallow recovery on read-only mounts.  note -- mount
4036 		 * checks for ENOSPC and turns it into an intelligent
4037 		 * error message.
4038 		 * ...but this is no longer true.  Now, unless you specify
4039 		 * NORECOVERY (in which case this function would never be
4040 		 * called), we just go ahead and recover.  We do this all
4041 		 * under the vfs layer, so we can get away with it unless
4042 		 * the device itself is read-only, in which case we fail.
4043 		 */
4044 		if ((error = xfs_dev_is_read_only(log->l_mp, "recovery"))) {
4045 			return error;
4046 		}
4047 
4048 		/*
4049 		 * Version 5 superblock log feature mask validation. We know the
4050 		 * log is dirty so check if there are any unknown log features
4051 		 * in what we need to recover. If there are unknown features
4052 		 * (e.g. unsupported transactions, then simply reject the
4053 		 * attempt at recovery before touching anything.
4054 		 */
4055 		if (XFS_SB_VERSION_NUM(&log->l_mp->m_sb) == XFS_SB_VERSION_5 &&
4056 		    xfs_sb_has_incompat_log_feature(&log->l_mp->m_sb,
4057 					XFS_SB_FEAT_INCOMPAT_LOG_UNKNOWN)) {
4058 			xfs_warn(log->l_mp,
4059 "Superblock has unknown incompatible log features (0x%x) enabled.\n"
4060 "The log can not be fully and/or safely recovered by this kernel.\n"
4061 "Please recover the log on a kernel that supports the unknown features.",
4062 				(log->l_mp->m_sb.sb_features_log_incompat &
4063 					XFS_SB_FEAT_INCOMPAT_LOG_UNKNOWN));
4064 			return EINVAL;
4065 		}
4066 
4067 		xfs_notice(log->l_mp, "Starting recovery (logdev: %s)",
4068 				log->l_mp->m_logname ? log->l_mp->m_logname
4069 						     : "internal");
4070 
4071 		error = xlog_do_recover(log, head_blk, tail_blk);
4072 		log->l_flags |= XLOG_RECOVERY_NEEDED;
4073 	}
4074 	return error;
4075 }
4076 
4077 /*
4078  * In the first part of recovery we replay inodes and buffers and build
4079  * up the list of extent free items which need to be processed.  Here
4080  * we process the extent free items and clean up the on disk unlinked
4081  * inode lists.  This is separated from the first part of recovery so
4082  * that the root and real-time bitmap inodes can be read in from disk in
4083  * between the two stages.  This is necessary so that we can free space
4084  * in the real-time portion of the file system.
4085  */
4086 int
4087 xlog_recover_finish(
4088 	struct xlog	*log)
4089 {
4090 	/*
4091 	 * Now we're ready to do the transactions needed for the
4092 	 * rest of recovery.  Start with completing all the extent
4093 	 * free intent records and then process the unlinked inode
4094 	 * lists.  At this point, we essentially run in normal mode
4095 	 * except that we're still performing recovery actions
4096 	 * rather than accepting new requests.
4097 	 */
4098 	if (log->l_flags & XLOG_RECOVERY_NEEDED) {
4099 		int	error;
4100 		error = xlog_recover_process_efis(log);
4101 		if (error) {
4102 			xfs_alert(log->l_mp, "Failed to recover EFIs");
4103 			return error;
4104 		}
4105 		/*
4106 		 * Sync the log to get all the EFIs out of the AIL.
4107 		 * This isn't absolutely necessary, but it helps in
4108 		 * case the unlink transactions would have problems
4109 		 * pushing the EFIs out of the way.
4110 		 */
4111 		xfs_log_force(log->l_mp, XFS_LOG_SYNC);
4112 
4113 		xlog_recover_process_iunlinks(log);
4114 
4115 		xlog_recover_check_summary(log);
4116 
4117 		xfs_notice(log->l_mp, "Ending recovery (logdev: %s)",
4118 				log->l_mp->m_logname ? log->l_mp->m_logname
4119 						     : "internal");
4120 		log->l_flags &= ~XLOG_RECOVERY_NEEDED;
4121 	} else {
4122 		xfs_info(log->l_mp, "Ending clean mount");
4123 	}
4124 	return 0;
4125 }
4126 
4127 
4128 #if defined(DEBUG)
4129 /*
4130  * Read all of the agf and agi counters and check that they
4131  * are consistent with the superblock counters.
4132  */
4133 void
4134 xlog_recover_check_summary(
4135 	struct xlog	*log)
4136 {
4137 	xfs_mount_t	*mp;
4138 	xfs_agf_t	*agfp;
4139 	xfs_buf_t	*agfbp;
4140 	xfs_buf_t	*agibp;
4141 	xfs_agnumber_t	agno;
4142 	__uint64_t	freeblks;
4143 	__uint64_t	itotal;
4144 	__uint64_t	ifree;
4145 	int		error;
4146 
4147 	mp = log->l_mp;
4148 
4149 	freeblks = 0LL;
4150 	itotal = 0LL;
4151 	ifree = 0LL;
4152 	for (agno = 0; agno < mp->m_sb.sb_agcount; agno++) {
4153 		error = xfs_read_agf(mp, NULL, agno, 0, &agfbp);
4154 		if (error) {
4155 			xfs_alert(mp, "%s agf read failed agno %d error %d",
4156 						__func__, agno, error);
4157 		} else {
4158 			agfp = XFS_BUF_TO_AGF(agfbp);
4159 			freeblks += be32_to_cpu(agfp->agf_freeblks) +
4160 				    be32_to_cpu(agfp->agf_flcount);
4161 			xfs_buf_relse(agfbp);
4162 		}
4163 
4164 		error = xfs_read_agi(mp, NULL, agno, &agibp);
4165 		if (error) {
4166 			xfs_alert(mp, "%s agi read failed agno %d error %d",
4167 						__func__, agno, error);
4168 		} else {
4169 			struct xfs_agi	*agi = XFS_BUF_TO_AGI(agibp);
4170 
4171 			itotal += be32_to_cpu(agi->agi_count);
4172 			ifree += be32_to_cpu(agi->agi_freecount);
4173 			xfs_buf_relse(agibp);
4174 		}
4175 	}
4176 }
4177 #endif /* DEBUG */
4178