xref: /openbmc/linux/fs/xfs/xfs_log_recover.c (revision d0b73b48)
1 /*
2  * Copyright (c) 2000-2006 Silicon Graphics, Inc.
3  * All Rights Reserved.
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU General Public License as
7  * published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it would be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write the Free Software Foundation,
16  * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
17  */
18 #include "xfs.h"
19 #include "xfs_fs.h"
20 #include "xfs_types.h"
21 #include "xfs_bit.h"
22 #include "xfs_log.h"
23 #include "xfs_inum.h"
24 #include "xfs_trans.h"
25 #include "xfs_sb.h"
26 #include "xfs_ag.h"
27 #include "xfs_mount.h"
28 #include "xfs_error.h"
29 #include "xfs_bmap_btree.h"
30 #include "xfs_alloc_btree.h"
31 #include "xfs_ialloc_btree.h"
32 #include "xfs_dinode.h"
33 #include "xfs_inode.h"
34 #include "xfs_inode_item.h"
35 #include "xfs_alloc.h"
36 #include "xfs_ialloc.h"
37 #include "xfs_log_priv.h"
38 #include "xfs_buf_item.h"
39 #include "xfs_log_recover.h"
40 #include "xfs_extfree_item.h"
41 #include "xfs_trans_priv.h"
42 #include "xfs_quota.h"
43 #include "xfs_utils.h"
44 #include "xfs_cksum.h"
45 #include "xfs_trace.h"
46 #include "xfs_icache.h"
47 
48 STATIC int
49 xlog_find_zeroed(
50 	struct xlog	*,
51 	xfs_daddr_t	*);
52 STATIC int
53 xlog_clear_stale_blocks(
54 	struct xlog	*,
55 	xfs_lsn_t);
56 #if defined(DEBUG)
57 STATIC void
58 xlog_recover_check_summary(
59 	struct xlog *);
60 #else
61 #define	xlog_recover_check_summary(log)
62 #endif
63 
64 /*
65  * This structure is used during recovery to record the buf log items which
66  * have been canceled and should not be replayed.
67  */
68 struct xfs_buf_cancel {
69 	xfs_daddr_t		bc_blkno;
70 	uint			bc_len;
71 	int			bc_refcount;
72 	struct list_head	bc_list;
73 };
74 
75 /*
76  * Sector aligned buffer routines for buffer create/read/write/access
77  */
78 
79 /*
80  * Verify the given count of basic blocks is valid number of blocks
81  * to specify for an operation involving the given XFS log buffer.
82  * Returns nonzero if the count is valid, 0 otherwise.
83  */
84 
85 static inline int
86 xlog_buf_bbcount_valid(
87 	struct xlog	*log,
88 	int		bbcount)
89 {
90 	return bbcount > 0 && bbcount <= log->l_logBBsize;
91 }
92 
93 /*
94  * Allocate a buffer to hold log data.  The buffer needs to be able
95  * to map to a range of nbblks basic blocks at any valid (basic
96  * block) offset within the log.
97  */
98 STATIC xfs_buf_t *
99 xlog_get_bp(
100 	struct xlog	*log,
101 	int		nbblks)
102 {
103 	struct xfs_buf	*bp;
104 
105 	if (!xlog_buf_bbcount_valid(log, nbblks)) {
106 		xfs_warn(log->l_mp, "Invalid block length (0x%x) for buffer",
107 			nbblks);
108 		XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_HIGH, log->l_mp);
109 		return NULL;
110 	}
111 
112 	/*
113 	 * We do log I/O in units of log sectors (a power-of-2
114 	 * multiple of the basic block size), so we round up the
115 	 * requested size to accommodate the basic blocks required
116 	 * for complete log sectors.
117 	 *
118 	 * In addition, the buffer may be used for a non-sector-
119 	 * aligned block offset, in which case an I/O of the
120 	 * requested size could extend beyond the end of the
121 	 * buffer.  If the requested size is only 1 basic block it
122 	 * will never straddle a sector boundary, so this won't be
123 	 * an issue.  Nor will this be a problem if the log I/O is
124 	 * done in basic blocks (sector size 1).  But otherwise we
125 	 * extend the buffer by one extra log sector to ensure
126 	 * there's space to accommodate this possibility.
127 	 */
128 	if (nbblks > 1 && log->l_sectBBsize > 1)
129 		nbblks += log->l_sectBBsize;
130 	nbblks = round_up(nbblks, log->l_sectBBsize);
131 
132 	bp = xfs_buf_get_uncached(log->l_mp->m_logdev_targp, nbblks, 0);
133 	if (bp)
134 		xfs_buf_unlock(bp);
135 	return bp;
136 }
137 
138 STATIC void
139 xlog_put_bp(
140 	xfs_buf_t	*bp)
141 {
142 	xfs_buf_free(bp);
143 }
144 
145 /*
146  * Return the address of the start of the given block number's data
147  * in a log buffer.  The buffer covers a log sector-aligned region.
148  */
149 STATIC xfs_caddr_t
150 xlog_align(
151 	struct xlog	*log,
152 	xfs_daddr_t	blk_no,
153 	int		nbblks,
154 	struct xfs_buf	*bp)
155 {
156 	xfs_daddr_t	offset = blk_no & ((xfs_daddr_t)log->l_sectBBsize - 1);
157 
158 	ASSERT(offset + nbblks <= bp->b_length);
159 	return bp->b_addr + BBTOB(offset);
160 }
161 
162 
163 /*
164  * nbblks should be uint, but oh well.  Just want to catch that 32-bit length.
165  */
166 STATIC int
167 xlog_bread_noalign(
168 	struct xlog	*log,
169 	xfs_daddr_t	blk_no,
170 	int		nbblks,
171 	struct xfs_buf	*bp)
172 {
173 	int		error;
174 
175 	if (!xlog_buf_bbcount_valid(log, nbblks)) {
176 		xfs_warn(log->l_mp, "Invalid block length (0x%x) for buffer",
177 			nbblks);
178 		XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_HIGH, log->l_mp);
179 		return EFSCORRUPTED;
180 	}
181 
182 	blk_no = round_down(blk_no, log->l_sectBBsize);
183 	nbblks = round_up(nbblks, log->l_sectBBsize);
184 
185 	ASSERT(nbblks > 0);
186 	ASSERT(nbblks <= bp->b_length);
187 
188 	XFS_BUF_SET_ADDR(bp, log->l_logBBstart + blk_no);
189 	XFS_BUF_READ(bp);
190 	bp->b_io_length = nbblks;
191 	bp->b_error = 0;
192 
193 	xfsbdstrat(log->l_mp, bp);
194 	error = xfs_buf_iowait(bp);
195 	if (error)
196 		xfs_buf_ioerror_alert(bp, __func__);
197 	return error;
198 }
199 
200 STATIC int
201 xlog_bread(
202 	struct xlog	*log,
203 	xfs_daddr_t	blk_no,
204 	int		nbblks,
205 	struct xfs_buf	*bp,
206 	xfs_caddr_t	*offset)
207 {
208 	int		error;
209 
210 	error = xlog_bread_noalign(log, blk_no, nbblks, bp);
211 	if (error)
212 		return error;
213 
214 	*offset = xlog_align(log, blk_no, nbblks, bp);
215 	return 0;
216 }
217 
218 /*
219  * Read at an offset into the buffer. Returns with the buffer in it's original
220  * state regardless of the result of the read.
221  */
222 STATIC int
223 xlog_bread_offset(
224 	struct xlog	*log,
225 	xfs_daddr_t	blk_no,		/* block to read from */
226 	int		nbblks,		/* blocks to read */
227 	struct xfs_buf	*bp,
228 	xfs_caddr_t	offset)
229 {
230 	xfs_caddr_t	orig_offset = bp->b_addr;
231 	int		orig_len = BBTOB(bp->b_length);
232 	int		error, error2;
233 
234 	error = xfs_buf_associate_memory(bp, offset, BBTOB(nbblks));
235 	if (error)
236 		return error;
237 
238 	error = xlog_bread_noalign(log, blk_no, nbblks, bp);
239 
240 	/* must reset buffer pointer even on error */
241 	error2 = xfs_buf_associate_memory(bp, orig_offset, orig_len);
242 	if (error)
243 		return error;
244 	return error2;
245 }
246 
247 /*
248  * Write out the buffer at the given block for the given number of blocks.
249  * The buffer is kept locked across the write and is returned locked.
250  * This can only be used for synchronous log writes.
251  */
252 STATIC int
253 xlog_bwrite(
254 	struct xlog	*log,
255 	xfs_daddr_t	blk_no,
256 	int		nbblks,
257 	struct xfs_buf	*bp)
258 {
259 	int		error;
260 
261 	if (!xlog_buf_bbcount_valid(log, nbblks)) {
262 		xfs_warn(log->l_mp, "Invalid block length (0x%x) for buffer",
263 			nbblks);
264 		XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_HIGH, log->l_mp);
265 		return EFSCORRUPTED;
266 	}
267 
268 	blk_no = round_down(blk_no, log->l_sectBBsize);
269 	nbblks = round_up(nbblks, log->l_sectBBsize);
270 
271 	ASSERT(nbblks > 0);
272 	ASSERT(nbblks <= bp->b_length);
273 
274 	XFS_BUF_SET_ADDR(bp, log->l_logBBstart + blk_no);
275 	XFS_BUF_ZEROFLAGS(bp);
276 	xfs_buf_hold(bp);
277 	xfs_buf_lock(bp);
278 	bp->b_io_length = nbblks;
279 	bp->b_error = 0;
280 
281 	error = xfs_bwrite(bp);
282 	if (error)
283 		xfs_buf_ioerror_alert(bp, __func__);
284 	xfs_buf_relse(bp);
285 	return error;
286 }
287 
288 #ifdef DEBUG
289 /*
290  * dump debug superblock and log record information
291  */
292 STATIC void
293 xlog_header_check_dump(
294 	xfs_mount_t		*mp,
295 	xlog_rec_header_t	*head)
296 {
297 	xfs_debug(mp, "%s:  SB : uuid = %pU, fmt = %d\n",
298 		__func__, &mp->m_sb.sb_uuid, XLOG_FMT);
299 	xfs_debug(mp, "    log : uuid = %pU, fmt = %d\n",
300 		&head->h_fs_uuid, be32_to_cpu(head->h_fmt));
301 }
302 #else
303 #define xlog_header_check_dump(mp, head)
304 #endif
305 
306 /*
307  * check log record header for recovery
308  */
309 STATIC int
310 xlog_header_check_recover(
311 	xfs_mount_t		*mp,
312 	xlog_rec_header_t	*head)
313 {
314 	ASSERT(head->h_magicno == cpu_to_be32(XLOG_HEADER_MAGIC_NUM));
315 
316 	/*
317 	 * IRIX doesn't write the h_fmt field and leaves it zeroed
318 	 * (XLOG_FMT_UNKNOWN). This stops us from trying to recover
319 	 * a dirty log created in IRIX.
320 	 */
321 	if (unlikely(head->h_fmt != cpu_to_be32(XLOG_FMT))) {
322 		xfs_warn(mp,
323 	"dirty log written in incompatible format - can't recover");
324 		xlog_header_check_dump(mp, head);
325 		XFS_ERROR_REPORT("xlog_header_check_recover(1)",
326 				 XFS_ERRLEVEL_HIGH, mp);
327 		return XFS_ERROR(EFSCORRUPTED);
328 	} else if (unlikely(!uuid_equal(&mp->m_sb.sb_uuid, &head->h_fs_uuid))) {
329 		xfs_warn(mp,
330 	"dirty log entry has mismatched uuid - can't recover");
331 		xlog_header_check_dump(mp, head);
332 		XFS_ERROR_REPORT("xlog_header_check_recover(2)",
333 				 XFS_ERRLEVEL_HIGH, mp);
334 		return XFS_ERROR(EFSCORRUPTED);
335 	}
336 	return 0;
337 }
338 
339 /*
340  * read the head block of the log and check the header
341  */
342 STATIC int
343 xlog_header_check_mount(
344 	xfs_mount_t		*mp,
345 	xlog_rec_header_t	*head)
346 {
347 	ASSERT(head->h_magicno == cpu_to_be32(XLOG_HEADER_MAGIC_NUM));
348 
349 	if (uuid_is_nil(&head->h_fs_uuid)) {
350 		/*
351 		 * IRIX doesn't write the h_fs_uuid or h_fmt fields. If
352 		 * h_fs_uuid is nil, we assume this log was last mounted
353 		 * by IRIX and continue.
354 		 */
355 		xfs_warn(mp, "nil uuid in log - IRIX style log");
356 	} else if (unlikely(!uuid_equal(&mp->m_sb.sb_uuid, &head->h_fs_uuid))) {
357 		xfs_warn(mp, "log has mismatched uuid - can't recover");
358 		xlog_header_check_dump(mp, head);
359 		XFS_ERROR_REPORT("xlog_header_check_mount",
360 				 XFS_ERRLEVEL_HIGH, mp);
361 		return XFS_ERROR(EFSCORRUPTED);
362 	}
363 	return 0;
364 }
365 
366 STATIC void
367 xlog_recover_iodone(
368 	struct xfs_buf	*bp)
369 {
370 	if (bp->b_error) {
371 		/*
372 		 * We're not going to bother about retrying
373 		 * this during recovery. One strike!
374 		 */
375 		xfs_buf_ioerror_alert(bp, __func__);
376 		xfs_force_shutdown(bp->b_target->bt_mount,
377 					SHUTDOWN_META_IO_ERROR);
378 	}
379 	bp->b_iodone = NULL;
380 	xfs_buf_ioend(bp, 0);
381 }
382 
383 /*
384  * This routine finds (to an approximation) the first block in the physical
385  * log which contains the given cycle.  It uses a binary search algorithm.
386  * Note that the algorithm can not be perfect because the disk will not
387  * necessarily be perfect.
388  */
389 STATIC int
390 xlog_find_cycle_start(
391 	struct xlog	*log,
392 	struct xfs_buf	*bp,
393 	xfs_daddr_t	first_blk,
394 	xfs_daddr_t	*last_blk,
395 	uint		cycle)
396 {
397 	xfs_caddr_t	offset;
398 	xfs_daddr_t	mid_blk;
399 	xfs_daddr_t	end_blk;
400 	uint		mid_cycle;
401 	int		error;
402 
403 	end_blk = *last_blk;
404 	mid_blk = BLK_AVG(first_blk, end_blk);
405 	while (mid_blk != first_blk && mid_blk != end_blk) {
406 		error = xlog_bread(log, mid_blk, 1, bp, &offset);
407 		if (error)
408 			return error;
409 		mid_cycle = xlog_get_cycle(offset);
410 		if (mid_cycle == cycle)
411 			end_blk = mid_blk;   /* last_half_cycle == mid_cycle */
412 		else
413 			first_blk = mid_blk; /* first_half_cycle == mid_cycle */
414 		mid_blk = BLK_AVG(first_blk, end_blk);
415 	}
416 	ASSERT((mid_blk == first_blk && mid_blk+1 == end_blk) ||
417 	       (mid_blk == end_blk && mid_blk-1 == first_blk));
418 
419 	*last_blk = end_blk;
420 
421 	return 0;
422 }
423 
424 /*
425  * Check that a range of blocks does not contain stop_on_cycle_no.
426  * Fill in *new_blk with the block offset where such a block is
427  * found, or with -1 (an invalid block number) if there is no such
428  * block in the range.  The scan needs to occur from front to back
429  * and the pointer into the region must be updated since a later
430  * routine will need to perform another test.
431  */
432 STATIC int
433 xlog_find_verify_cycle(
434 	struct xlog	*log,
435 	xfs_daddr_t	start_blk,
436 	int		nbblks,
437 	uint		stop_on_cycle_no,
438 	xfs_daddr_t	*new_blk)
439 {
440 	xfs_daddr_t	i, j;
441 	uint		cycle;
442 	xfs_buf_t	*bp;
443 	xfs_daddr_t	bufblks;
444 	xfs_caddr_t	buf = NULL;
445 	int		error = 0;
446 
447 	/*
448 	 * Greedily allocate a buffer big enough to handle the full
449 	 * range of basic blocks we'll be examining.  If that fails,
450 	 * try a smaller size.  We need to be able to read at least
451 	 * a log sector, or we're out of luck.
452 	 */
453 	bufblks = 1 << ffs(nbblks);
454 	while (bufblks > log->l_logBBsize)
455 		bufblks >>= 1;
456 	while (!(bp = xlog_get_bp(log, bufblks))) {
457 		bufblks >>= 1;
458 		if (bufblks < log->l_sectBBsize)
459 			return ENOMEM;
460 	}
461 
462 	for (i = start_blk; i < start_blk + nbblks; i += bufblks) {
463 		int	bcount;
464 
465 		bcount = min(bufblks, (start_blk + nbblks - i));
466 
467 		error = xlog_bread(log, i, bcount, bp, &buf);
468 		if (error)
469 			goto out;
470 
471 		for (j = 0; j < bcount; j++) {
472 			cycle = xlog_get_cycle(buf);
473 			if (cycle == stop_on_cycle_no) {
474 				*new_blk = i+j;
475 				goto out;
476 			}
477 
478 			buf += BBSIZE;
479 		}
480 	}
481 
482 	*new_blk = -1;
483 
484 out:
485 	xlog_put_bp(bp);
486 	return error;
487 }
488 
489 /*
490  * Potentially backup over partial log record write.
491  *
492  * In the typical case, last_blk is the number of the block directly after
493  * a good log record.  Therefore, we subtract one to get the block number
494  * of the last block in the given buffer.  extra_bblks contains the number
495  * of blocks we would have read on a previous read.  This happens when the
496  * last log record is split over the end of the physical log.
497  *
498  * extra_bblks is the number of blocks potentially verified on a previous
499  * call to this routine.
500  */
501 STATIC int
502 xlog_find_verify_log_record(
503 	struct xlog		*log,
504 	xfs_daddr_t		start_blk,
505 	xfs_daddr_t		*last_blk,
506 	int			extra_bblks)
507 {
508 	xfs_daddr_t		i;
509 	xfs_buf_t		*bp;
510 	xfs_caddr_t		offset = NULL;
511 	xlog_rec_header_t	*head = NULL;
512 	int			error = 0;
513 	int			smallmem = 0;
514 	int			num_blks = *last_blk - start_blk;
515 	int			xhdrs;
516 
517 	ASSERT(start_blk != 0 || *last_blk != start_blk);
518 
519 	if (!(bp = xlog_get_bp(log, num_blks))) {
520 		if (!(bp = xlog_get_bp(log, 1)))
521 			return ENOMEM;
522 		smallmem = 1;
523 	} else {
524 		error = xlog_bread(log, start_blk, num_blks, bp, &offset);
525 		if (error)
526 			goto out;
527 		offset += ((num_blks - 1) << BBSHIFT);
528 	}
529 
530 	for (i = (*last_blk) - 1; i >= 0; i--) {
531 		if (i < start_blk) {
532 			/* valid log record not found */
533 			xfs_warn(log->l_mp,
534 		"Log inconsistent (didn't find previous header)");
535 			ASSERT(0);
536 			error = XFS_ERROR(EIO);
537 			goto out;
538 		}
539 
540 		if (smallmem) {
541 			error = xlog_bread(log, i, 1, bp, &offset);
542 			if (error)
543 				goto out;
544 		}
545 
546 		head = (xlog_rec_header_t *)offset;
547 
548 		if (head->h_magicno == cpu_to_be32(XLOG_HEADER_MAGIC_NUM))
549 			break;
550 
551 		if (!smallmem)
552 			offset -= BBSIZE;
553 	}
554 
555 	/*
556 	 * We hit the beginning of the physical log & still no header.  Return
557 	 * to caller.  If caller can handle a return of -1, then this routine
558 	 * will be called again for the end of the physical log.
559 	 */
560 	if (i == -1) {
561 		error = -1;
562 		goto out;
563 	}
564 
565 	/*
566 	 * We have the final block of the good log (the first block
567 	 * of the log record _before_ the head. So we check the uuid.
568 	 */
569 	if ((error = xlog_header_check_mount(log->l_mp, head)))
570 		goto out;
571 
572 	/*
573 	 * We may have found a log record header before we expected one.
574 	 * last_blk will be the 1st block # with a given cycle #.  We may end
575 	 * up reading an entire log record.  In this case, we don't want to
576 	 * reset last_blk.  Only when last_blk points in the middle of a log
577 	 * record do we update last_blk.
578 	 */
579 	if (xfs_sb_version_haslogv2(&log->l_mp->m_sb)) {
580 		uint	h_size = be32_to_cpu(head->h_size);
581 
582 		xhdrs = h_size / XLOG_HEADER_CYCLE_SIZE;
583 		if (h_size % XLOG_HEADER_CYCLE_SIZE)
584 			xhdrs++;
585 	} else {
586 		xhdrs = 1;
587 	}
588 
589 	if (*last_blk - i + extra_bblks !=
590 	    BTOBB(be32_to_cpu(head->h_len)) + xhdrs)
591 		*last_blk = i;
592 
593 out:
594 	xlog_put_bp(bp);
595 	return error;
596 }
597 
598 /*
599  * Head is defined to be the point of the log where the next log write
600  * write could go.  This means that incomplete LR writes at the end are
601  * eliminated when calculating the head.  We aren't guaranteed that previous
602  * LR have complete transactions.  We only know that a cycle number of
603  * current cycle number -1 won't be present in the log if we start writing
604  * from our current block number.
605  *
606  * last_blk contains the block number of the first block with a given
607  * cycle number.
608  *
609  * Return: zero if normal, non-zero if error.
610  */
611 STATIC int
612 xlog_find_head(
613 	struct xlog	*log,
614 	xfs_daddr_t	*return_head_blk)
615 {
616 	xfs_buf_t	*bp;
617 	xfs_caddr_t	offset;
618 	xfs_daddr_t	new_blk, first_blk, start_blk, last_blk, head_blk;
619 	int		num_scan_bblks;
620 	uint		first_half_cycle, last_half_cycle;
621 	uint		stop_on_cycle;
622 	int		error, log_bbnum = log->l_logBBsize;
623 
624 	/* Is the end of the log device zeroed? */
625 	if ((error = xlog_find_zeroed(log, &first_blk)) == -1) {
626 		*return_head_blk = first_blk;
627 
628 		/* Is the whole lot zeroed? */
629 		if (!first_blk) {
630 			/* Linux XFS shouldn't generate totally zeroed logs -
631 			 * mkfs etc write a dummy unmount record to a fresh
632 			 * log so we can store the uuid in there
633 			 */
634 			xfs_warn(log->l_mp, "totally zeroed log");
635 		}
636 
637 		return 0;
638 	} else if (error) {
639 		xfs_warn(log->l_mp, "empty log check failed");
640 		return error;
641 	}
642 
643 	first_blk = 0;			/* get cycle # of 1st block */
644 	bp = xlog_get_bp(log, 1);
645 	if (!bp)
646 		return ENOMEM;
647 
648 	error = xlog_bread(log, 0, 1, bp, &offset);
649 	if (error)
650 		goto bp_err;
651 
652 	first_half_cycle = xlog_get_cycle(offset);
653 
654 	last_blk = head_blk = log_bbnum - 1;	/* get cycle # of last block */
655 	error = xlog_bread(log, last_blk, 1, bp, &offset);
656 	if (error)
657 		goto bp_err;
658 
659 	last_half_cycle = xlog_get_cycle(offset);
660 	ASSERT(last_half_cycle != 0);
661 
662 	/*
663 	 * If the 1st half cycle number is equal to the last half cycle number,
664 	 * then the entire log is stamped with the same cycle number.  In this
665 	 * case, head_blk can't be set to zero (which makes sense).  The below
666 	 * math doesn't work out properly with head_blk equal to zero.  Instead,
667 	 * we set it to log_bbnum which is an invalid block number, but this
668 	 * value makes the math correct.  If head_blk doesn't changed through
669 	 * all the tests below, *head_blk is set to zero at the very end rather
670 	 * than log_bbnum.  In a sense, log_bbnum and zero are the same block
671 	 * in a circular file.
672 	 */
673 	if (first_half_cycle == last_half_cycle) {
674 		/*
675 		 * In this case we believe that the entire log should have
676 		 * cycle number last_half_cycle.  We need to scan backwards
677 		 * from the end verifying that there are no holes still
678 		 * containing last_half_cycle - 1.  If we find such a hole,
679 		 * then the start of that hole will be the new head.  The
680 		 * simple case looks like
681 		 *        x | x ... | x - 1 | x
682 		 * Another case that fits this picture would be
683 		 *        x | x + 1 | x ... | x
684 		 * In this case the head really is somewhere at the end of the
685 		 * log, as one of the latest writes at the beginning was
686 		 * incomplete.
687 		 * One more case is
688 		 *        x | x + 1 | x ... | x - 1 | x
689 		 * This is really the combination of the above two cases, and
690 		 * the head has to end up at the start of the x-1 hole at the
691 		 * end of the log.
692 		 *
693 		 * In the 256k log case, we will read from the beginning to the
694 		 * end of the log and search for cycle numbers equal to x-1.
695 		 * We don't worry about the x+1 blocks that we encounter,
696 		 * because we know that they cannot be the head since the log
697 		 * started with x.
698 		 */
699 		head_blk = log_bbnum;
700 		stop_on_cycle = last_half_cycle - 1;
701 	} else {
702 		/*
703 		 * In this case we want to find the first block with cycle
704 		 * number matching last_half_cycle.  We expect the log to be
705 		 * some variation on
706 		 *        x + 1 ... | x ... | x
707 		 * The first block with cycle number x (last_half_cycle) will
708 		 * be where the new head belongs.  First we do a binary search
709 		 * for the first occurrence of last_half_cycle.  The binary
710 		 * search may not be totally accurate, so then we scan back
711 		 * from there looking for occurrences of last_half_cycle before
712 		 * us.  If that backwards scan wraps around the beginning of
713 		 * the log, then we look for occurrences of last_half_cycle - 1
714 		 * at the end of the log.  The cases we're looking for look
715 		 * like
716 		 *                               v binary search stopped here
717 		 *        x + 1 ... | x | x + 1 | x ... | x
718 		 *                   ^ but we want to locate this spot
719 		 * or
720 		 *        <---------> less than scan distance
721 		 *        x + 1 ... | x ... | x - 1 | x
722 		 *                           ^ we want to locate this spot
723 		 */
724 		stop_on_cycle = last_half_cycle;
725 		if ((error = xlog_find_cycle_start(log, bp, first_blk,
726 						&head_blk, last_half_cycle)))
727 			goto bp_err;
728 	}
729 
730 	/*
731 	 * Now validate the answer.  Scan back some number of maximum possible
732 	 * blocks and make sure each one has the expected cycle number.  The
733 	 * maximum is determined by the total possible amount of buffering
734 	 * in the in-core log.  The following number can be made tighter if
735 	 * we actually look at the block size of the filesystem.
736 	 */
737 	num_scan_bblks = XLOG_TOTAL_REC_SHIFT(log);
738 	if (head_blk >= num_scan_bblks) {
739 		/*
740 		 * We are guaranteed that the entire check can be performed
741 		 * in one buffer.
742 		 */
743 		start_blk = head_blk - num_scan_bblks;
744 		if ((error = xlog_find_verify_cycle(log,
745 						start_blk, num_scan_bblks,
746 						stop_on_cycle, &new_blk)))
747 			goto bp_err;
748 		if (new_blk != -1)
749 			head_blk = new_blk;
750 	} else {		/* need to read 2 parts of log */
751 		/*
752 		 * We are going to scan backwards in the log in two parts.
753 		 * First we scan the physical end of the log.  In this part
754 		 * of the log, we are looking for blocks with cycle number
755 		 * last_half_cycle - 1.
756 		 * If we find one, then we know that the log starts there, as
757 		 * we've found a hole that didn't get written in going around
758 		 * the end of the physical log.  The simple case for this is
759 		 *        x + 1 ... | x ... | x - 1 | x
760 		 *        <---------> less than scan distance
761 		 * If all of the blocks at the end of the log have cycle number
762 		 * last_half_cycle, then we check the blocks at the start of
763 		 * the log looking for occurrences of last_half_cycle.  If we
764 		 * find one, then our current estimate for the location of the
765 		 * first occurrence of last_half_cycle is wrong and we move
766 		 * back to the hole we've found.  This case looks like
767 		 *        x + 1 ... | x | x + 1 | x ...
768 		 *                               ^ binary search stopped here
769 		 * Another case we need to handle that only occurs in 256k
770 		 * logs is
771 		 *        x + 1 ... | x ... | x+1 | x ...
772 		 *                   ^ binary search stops here
773 		 * In a 256k log, the scan at the end of the log will see the
774 		 * x + 1 blocks.  We need to skip past those since that is
775 		 * certainly not the head of the log.  By searching for
776 		 * last_half_cycle-1 we accomplish that.
777 		 */
778 		ASSERT(head_blk <= INT_MAX &&
779 			(xfs_daddr_t) num_scan_bblks >= head_blk);
780 		start_blk = log_bbnum - (num_scan_bblks - head_blk);
781 		if ((error = xlog_find_verify_cycle(log, start_blk,
782 					num_scan_bblks - (int)head_blk,
783 					(stop_on_cycle - 1), &new_blk)))
784 			goto bp_err;
785 		if (new_blk != -1) {
786 			head_blk = new_blk;
787 			goto validate_head;
788 		}
789 
790 		/*
791 		 * Scan beginning of log now.  The last part of the physical
792 		 * log is good.  This scan needs to verify that it doesn't find
793 		 * the last_half_cycle.
794 		 */
795 		start_blk = 0;
796 		ASSERT(head_blk <= INT_MAX);
797 		if ((error = xlog_find_verify_cycle(log,
798 					start_blk, (int)head_blk,
799 					stop_on_cycle, &new_blk)))
800 			goto bp_err;
801 		if (new_blk != -1)
802 			head_blk = new_blk;
803 	}
804 
805 validate_head:
806 	/*
807 	 * Now we need to make sure head_blk is not pointing to a block in
808 	 * the middle of a log record.
809 	 */
810 	num_scan_bblks = XLOG_REC_SHIFT(log);
811 	if (head_blk >= num_scan_bblks) {
812 		start_blk = head_blk - num_scan_bblks; /* don't read head_blk */
813 
814 		/* start ptr at last block ptr before head_blk */
815 		if ((error = xlog_find_verify_log_record(log, start_blk,
816 							&head_blk, 0)) == -1) {
817 			error = XFS_ERROR(EIO);
818 			goto bp_err;
819 		} else if (error)
820 			goto bp_err;
821 	} else {
822 		start_blk = 0;
823 		ASSERT(head_blk <= INT_MAX);
824 		if ((error = xlog_find_verify_log_record(log, start_blk,
825 							&head_blk, 0)) == -1) {
826 			/* We hit the beginning of the log during our search */
827 			start_blk = log_bbnum - (num_scan_bblks - head_blk);
828 			new_blk = log_bbnum;
829 			ASSERT(start_blk <= INT_MAX &&
830 				(xfs_daddr_t) log_bbnum-start_blk >= 0);
831 			ASSERT(head_blk <= INT_MAX);
832 			if ((error = xlog_find_verify_log_record(log,
833 							start_blk, &new_blk,
834 							(int)head_blk)) == -1) {
835 				error = XFS_ERROR(EIO);
836 				goto bp_err;
837 			} else if (error)
838 				goto bp_err;
839 			if (new_blk != log_bbnum)
840 				head_blk = new_blk;
841 		} else if (error)
842 			goto bp_err;
843 	}
844 
845 	xlog_put_bp(bp);
846 	if (head_blk == log_bbnum)
847 		*return_head_blk = 0;
848 	else
849 		*return_head_blk = head_blk;
850 	/*
851 	 * When returning here, we have a good block number.  Bad block
852 	 * means that during a previous crash, we didn't have a clean break
853 	 * from cycle number N to cycle number N-1.  In this case, we need
854 	 * to find the first block with cycle number N-1.
855 	 */
856 	return 0;
857 
858  bp_err:
859 	xlog_put_bp(bp);
860 
861 	if (error)
862 		xfs_warn(log->l_mp, "failed to find log head");
863 	return error;
864 }
865 
866 /*
867  * Find the sync block number or the tail of the log.
868  *
869  * This will be the block number of the last record to have its
870  * associated buffers synced to disk.  Every log record header has
871  * a sync lsn embedded in it.  LSNs hold block numbers, so it is easy
872  * to get a sync block number.  The only concern is to figure out which
873  * log record header to believe.
874  *
875  * The following algorithm uses the log record header with the largest
876  * lsn.  The entire log record does not need to be valid.  We only care
877  * that the header is valid.
878  *
879  * We could speed up search by using current head_blk buffer, but it is not
880  * available.
881  */
882 STATIC int
883 xlog_find_tail(
884 	struct xlog		*log,
885 	xfs_daddr_t		*head_blk,
886 	xfs_daddr_t		*tail_blk)
887 {
888 	xlog_rec_header_t	*rhead;
889 	xlog_op_header_t	*op_head;
890 	xfs_caddr_t		offset = NULL;
891 	xfs_buf_t		*bp;
892 	int			error, i, found;
893 	xfs_daddr_t		umount_data_blk;
894 	xfs_daddr_t		after_umount_blk;
895 	xfs_lsn_t		tail_lsn;
896 	int			hblks;
897 
898 	found = 0;
899 
900 	/*
901 	 * Find previous log record
902 	 */
903 	if ((error = xlog_find_head(log, head_blk)))
904 		return error;
905 
906 	bp = xlog_get_bp(log, 1);
907 	if (!bp)
908 		return ENOMEM;
909 	if (*head_blk == 0) {				/* special case */
910 		error = xlog_bread(log, 0, 1, bp, &offset);
911 		if (error)
912 			goto done;
913 
914 		if (xlog_get_cycle(offset) == 0) {
915 			*tail_blk = 0;
916 			/* leave all other log inited values alone */
917 			goto done;
918 		}
919 	}
920 
921 	/*
922 	 * Search backwards looking for log record header block
923 	 */
924 	ASSERT(*head_blk < INT_MAX);
925 	for (i = (int)(*head_blk) - 1; i >= 0; i--) {
926 		error = xlog_bread(log, i, 1, bp, &offset);
927 		if (error)
928 			goto done;
929 
930 		if (*(__be32 *)offset == cpu_to_be32(XLOG_HEADER_MAGIC_NUM)) {
931 			found = 1;
932 			break;
933 		}
934 	}
935 	/*
936 	 * If we haven't found the log record header block, start looking
937 	 * again from the end of the physical log.  XXXmiken: There should be
938 	 * a check here to make sure we didn't search more than N blocks in
939 	 * the previous code.
940 	 */
941 	if (!found) {
942 		for (i = log->l_logBBsize - 1; i >= (int)(*head_blk); i--) {
943 			error = xlog_bread(log, i, 1, bp, &offset);
944 			if (error)
945 				goto done;
946 
947 			if (*(__be32 *)offset ==
948 			    cpu_to_be32(XLOG_HEADER_MAGIC_NUM)) {
949 				found = 2;
950 				break;
951 			}
952 		}
953 	}
954 	if (!found) {
955 		xfs_warn(log->l_mp, "%s: couldn't find sync record", __func__);
956 		ASSERT(0);
957 		return XFS_ERROR(EIO);
958 	}
959 
960 	/* find blk_no of tail of log */
961 	rhead = (xlog_rec_header_t *)offset;
962 	*tail_blk = BLOCK_LSN(be64_to_cpu(rhead->h_tail_lsn));
963 
964 	/*
965 	 * Reset log values according to the state of the log when we
966 	 * crashed.  In the case where head_blk == 0, we bump curr_cycle
967 	 * one because the next write starts a new cycle rather than
968 	 * continuing the cycle of the last good log record.  At this
969 	 * point we have guaranteed that all partial log records have been
970 	 * accounted for.  Therefore, we know that the last good log record
971 	 * written was complete and ended exactly on the end boundary
972 	 * of the physical log.
973 	 */
974 	log->l_prev_block = i;
975 	log->l_curr_block = (int)*head_blk;
976 	log->l_curr_cycle = be32_to_cpu(rhead->h_cycle);
977 	if (found == 2)
978 		log->l_curr_cycle++;
979 	atomic64_set(&log->l_tail_lsn, be64_to_cpu(rhead->h_tail_lsn));
980 	atomic64_set(&log->l_last_sync_lsn, be64_to_cpu(rhead->h_lsn));
981 	xlog_assign_grant_head(&log->l_reserve_head.grant, log->l_curr_cycle,
982 					BBTOB(log->l_curr_block));
983 	xlog_assign_grant_head(&log->l_write_head.grant, log->l_curr_cycle,
984 					BBTOB(log->l_curr_block));
985 
986 	/*
987 	 * Look for unmount record.  If we find it, then we know there
988 	 * was a clean unmount.  Since 'i' could be the last block in
989 	 * the physical log, we convert to a log block before comparing
990 	 * to the head_blk.
991 	 *
992 	 * Save the current tail lsn to use to pass to
993 	 * xlog_clear_stale_blocks() below.  We won't want to clear the
994 	 * unmount record if there is one, so we pass the lsn of the
995 	 * unmount record rather than the block after it.
996 	 */
997 	if (xfs_sb_version_haslogv2(&log->l_mp->m_sb)) {
998 		int	h_size = be32_to_cpu(rhead->h_size);
999 		int	h_version = be32_to_cpu(rhead->h_version);
1000 
1001 		if ((h_version & XLOG_VERSION_2) &&
1002 		    (h_size > XLOG_HEADER_CYCLE_SIZE)) {
1003 			hblks = h_size / XLOG_HEADER_CYCLE_SIZE;
1004 			if (h_size % XLOG_HEADER_CYCLE_SIZE)
1005 				hblks++;
1006 		} else {
1007 			hblks = 1;
1008 		}
1009 	} else {
1010 		hblks = 1;
1011 	}
1012 	after_umount_blk = (i + hblks + (int)
1013 		BTOBB(be32_to_cpu(rhead->h_len))) % log->l_logBBsize;
1014 	tail_lsn = atomic64_read(&log->l_tail_lsn);
1015 	if (*head_blk == after_umount_blk &&
1016 	    be32_to_cpu(rhead->h_num_logops) == 1) {
1017 		umount_data_blk = (i + hblks) % log->l_logBBsize;
1018 		error = xlog_bread(log, umount_data_blk, 1, bp, &offset);
1019 		if (error)
1020 			goto done;
1021 
1022 		op_head = (xlog_op_header_t *)offset;
1023 		if (op_head->oh_flags & XLOG_UNMOUNT_TRANS) {
1024 			/*
1025 			 * Set tail and last sync so that newly written
1026 			 * log records will point recovery to after the
1027 			 * current unmount record.
1028 			 */
1029 			xlog_assign_atomic_lsn(&log->l_tail_lsn,
1030 					log->l_curr_cycle, after_umount_blk);
1031 			xlog_assign_atomic_lsn(&log->l_last_sync_lsn,
1032 					log->l_curr_cycle, after_umount_blk);
1033 			*tail_blk = after_umount_blk;
1034 
1035 			/*
1036 			 * Note that the unmount was clean. If the unmount
1037 			 * was not clean, we need to know this to rebuild the
1038 			 * superblock counters from the perag headers if we
1039 			 * have a filesystem using non-persistent counters.
1040 			 */
1041 			log->l_mp->m_flags |= XFS_MOUNT_WAS_CLEAN;
1042 		}
1043 	}
1044 
1045 	/*
1046 	 * Make sure that there are no blocks in front of the head
1047 	 * with the same cycle number as the head.  This can happen
1048 	 * because we allow multiple outstanding log writes concurrently,
1049 	 * and the later writes might make it out before earlier ones.
1050 	 *
1051 	 * We use the lsn from before modifying it so that we'll never
1052 	 * overwrite the unmount record after a clean unmount.
1053 	 *
1054 	 * Do this only if we are going to recover the filesystem
1055 	 *
1056 	 * NOTE: This used to say "if (!readonly)"
1057 	 * However on Linux, we can & do recover a read-only filesystem.
1058 	 * We only skip recovery if NORECOVERY is specified on mount,
1059 	 * in which case we would not be here.
1060 	 *
1061 	 * But... if the -device- itself is readonly, just skip this.
1062 	 * We can't recover this device anyway, so it won't matter.
1063 	 */
1064 	if (!xfs_readonly_buftarg(log->l_mp->m_logdev_targp))
1065 		error = xlog_clear_stale_blocks(log, tail_lsn);
1066 
1067 done:
1068 	xlog_put_bp(bp);
1069 
1070 	if (error)
1071 		xfs_warn(log->l_mp, "failed to locate log tail");
1072 	return error;
1073 }
1074 
1075 /*
1076  * Is the log zeroed at all?
1077  *
1078  * The last binary search should be changed to perform an X block read
1079  * once X becomes small enough.  You can then search linearly through
1080  * the X blocks.  This will cut down on the number of reads we need to do.
1081  *
1082  * If the log is partially zeroed, this routine will pass back the blkno
1083  * of the first block with cycle number 0.  It won't have a complete LR
1084  * preceding it.
1085  *
1086  * Return:
1087  *	0  => the log is completely written to
1088  *	-1 => use *blk_no as the first block of the log
1089  *	>0 => error has occurred
1090  */
1091 STATIC int
1092 xlog_find_zeroed(
1093 	struct xlog	*log,
1094 	xfs_daddr_t	*blk_no)
1095 {
1096 	xfs_buf_t	*bp;
1097 	xfs_caddr_t	offset;
1098 	uint	        first_cycle, last_cycle;
1099 	xfs_daddr_t	new_blk, last_blk, start_blk;
1100 	xfs_daddr_t     num_scan_bblks;
1101 	int	        error, log_bbnum = log->l_logBBsize;
1102 
1103 	*blk_no = 0;
1104 
1105 	/* check totally zeroed log */
1106 	bp = xlog_get_bp(log, 1);
1107 	if (!bp)
1108 		return ENOMEM;
1109 	error = xlog_bread(log, 0, 1, bp, &offset);
1110 	if (error)
1111 		goto bp_err;
1112 
1113 	first_cycle = xlog_get_cycle(offset);
1114 	if (first_cycle == 0) {		/* completely zeroed log */
1115 		*blk_no = 0;
1116 		xlog_put_bp(bp);
1117 		return -1;
1118 	}
1119 
1120 	/* check partially zeroed log */
1121 	error = xlog_bread(log, log_bbnum-1, 1, bp, &offset);
1122 	if (error)
1123 		goto bp_err;
1124 
1125 	last_cycle = xlog_get_cycle(offset);
1126 	if (last_cycle != 0) {		/* log completely written to */
1127 		xlog_put_bp(bp);
1128 		return 0;
1129 	} else if (first_cycle != 1) {
1130 		/*
1131 		 * If the cycle of the last block is zero, the cycle of
1132 		 * the first block must be 1. If it's not, maybe we're
1133 		 * not looking at a log... Bail out.
1134 		 */
1135 		xfs_warn(log->l_mp,
1136 			"Log inconsistent or not a log (last==0, first!=1)");
1137 		return XFS_ERROR(EINVAL);
1138 	}
1139 
1140 	/* we have a partially zeroed log */
1141 	last_blk = log_bbnum-1;
1142 	if ((error = xlog_find_cycle_start(log, bp, 0, &last_blk, 0)))
1143 		goto bp_err;
1144 
1145 	/*
1146 	 * Validate the answer.  Because there is no way to guarantee that
1147 	 * the entire log is made up of log records which are the same size,
1148 	 * we scan over the defined maximum blocks.  At this point, the maximum
1149 	 * is not chosen to mean anything special.   XXXmiken
1150 	 */
1151 	num_scan_bblks = XLOG_TOTAL_REC_SHIFT(log);
1152 	ASSERT(num_scan_bblks <= INT_MAX);
1153 
1154 	if (last_blk < num_scan_bblks)
1155 		num_scan_bblks = last_blk;
1156 	start_blk = last_blk - num_scan_bblks;
1157 
1158 	/*
1159 	 * We search for any instances of cycle number 0 that occur before
1160 	 * our current estimate of the head.  What we're trying to detect is
1161 	 *        1 ... | 0 | 1 | 0...
1162 	 *                       ^ binary search ends here
1163 	 */
1164 	if ((error = xlog_find_verify_cycle(log, start_blk,
1165 					 (int)num_scan_bblks, 0, &new_blk)))
1166 		goto bp_err;
1167 	if (new_blk != -1)
1168 		last_blk = new_blk;
1169 
1170 	/*
1171 	 * Potentially backup over partial log record write.  We don't need
1172 	 * to search the end of the log because we know it is zero.
1173 	 */
1174 	if ((error = xlog_find_verify_log_record(log, start_blk,
1175 				&last_blk, 0)) == -1) {
1176 	    error = XFS_ERROR(EIO);
1177 	    goto bp_err;
1178 	} else if (error)
1179 	    goto bp_err;
1180 
1181 	*blk_no = last_blk;
1182 bp_err:
1183 	xlog_put_bp(bp);
1184 	if (error)
1185 		return error;
1186 	return -1;
1187 }
1188 
1189 /*
1190  * These are simple subroutines used by xlog_clear_stale_blocks() below
1191  * to initialize a buffer full of empty log record headers and write
1192  * them into the log.
1193  */
1194 STATIC void
1195 xlog_add_record(
1196 	struct xlog		*log,
1197 	xfs_caddr_t		buf,
1198 	int			cycle,
1199 	int			block,
1200 	int			tail_cycle,
1201 	int			tail_block)
1202 {
1203 	xlog_rec_header_t	*recp = (xlog_rec_header_t *)buf;
1204 
1205 	memset(buf, 0, BBSIZE);
1206 	recp->h_magicno = cpu_to_be32(XLOG_HEADER_MAGIC_NUM);
1207 	recp->h_cycle = cpu_to_be32(cycle);
1208 	recp->h_version = cpu_to_be32(
1209 			xfs_sb_version_haslogv2(&log->l_mp->m_sb) ? 2 : 1);
1210 	recp->h_lsn = cpu_to_be64(xlog_assign_lsn(cycle, block));
1211 	recp->h_tail_lsn = cpu_to_be64(xlog_assign_lsn(tail_cycle, tail_block));
1212 	recp->h_fmt = cpu_to_be32(XLOG_FMT);
1213 	memcpy(&recp->h_fs_uuid, &log->l_mp->m_sb.sb_uuid, sizeof(uuid_t));
1214 }
1215 
1216 STATIC int
1217 xlog_write_log_records(
1218 	struct xlog	*log,
1219 	int		cycle,
1220 	int		start_block,
1221 	int		blocks,
1222 	int		tail_cycle,
1223 	int		tail_block)
1224 {
1225 	xfs_caddr_t	offset;
1226 	xfs_buf_t	*bp;
1227 	int		balign, ealign;
1228 	int		sectbb = log->l_sectBBsize;
1229 	int		end_block = start_block + blocks;
1230 	int		bufblks;
1231 	int		error = 0;
1232 	int		i, j = 0;
1233 
1234 	/*
1235 	 * Greedily allocate a buffer big enough to handle the full
1236 	 * range of basic blocks to be written.  If that fails, try
1237 	 * a smaller size.  We need to be able to write at least a
1238 	 * log sector, or we're out of luck.
1239 	 */
1240 	bufblks = 1 << ffs(blocks);
1241 	while (bufblks > log->l_logBBsize)
1242 		bufblks >>= 1;
1243 	while (!(bp = xlog_get_bp(log, bufblks))) {
1244 		bufblks >>= 1;
1245 		if (bufblks < sectbb)
1246 			return ENOMEM;
1247 	}
1248 
1249 	/* We may need to do a read at the start to fill in part of
1250 	 * the buffer in the starting sector not covered by the first
1251 	 * write below.
1252 	 */
1253 	balign = round_down(start_block, sectbb);
1254 	if (balign != start_block) {
1255 		error = xlog_bread_noalign(log, start_block, 1, bp);
1256 		if (error)
1257 			goto out_put_bp;
1258 
1259 		j = start_block - balign;
1260 	}
1261 
1262 	for (i = start_block; i < end_block; i += bufblks) {
1263 		int		bcount, endcount;
1264 
1265 		bcount = min(bufblks, end_block - start_block);
1266 		endcount = bcount - j;
1267 
1268 		/* We may need to do a read at the end to fill in part of
1269 		 * the buffer in the final sector not covered by the write.
1270 		 * If this is the same sector as the above read, skip it.
1271 		 */
1272 		ealign = round_down(end_block, sectbb);
1273 		if (j == 0 && (start_block + endcount > ealign)) {
1274 			offset = bp->b_addr + BBTOB(ealign - start_block);
1275 			error = xlog_bread_offset(log, ealign, sectbb,
1276 							bp, offset);
1277 			if (error)
1278 				break;
1279 
1280 		}
1281 
1282 		offset = xlog_align(log, start_block, endcount, bp);
1283 		for (; j < endcount; j++) {
1284 			xlog_add_record(log, offset, cycle, i+j,
1285 					tail_cycle, tail_block);
1286 			offset += BBSIZE;
1287 		}
1288 		error = xlog_bwrite(log, start_block, endcount, bp);
1289 		if (error)
1290 			break;
1291 		start_block += endcount;
1292 		j = 0;
1293 	}
1294 
1295  out_put_bp:
1296 	xlog_put_bp(bp);
1297 	return error;
1298 }
1299 
1300 /*
1301  * This routine is called to blow away any incomplete log writes out
1302  * in front of the log head.  We do this so that we won't become confused
1303  * if we come up, write only a little bit more, and then crash again.
1304  * If we leave the partial log records out there, this situation could
1305  * cause us to think those partial writes are valid blocks since they
1306  * have the current cycle number.  We get rid of them by overwriting them
1307  * with empty log records with the old cycle number rather than the
1308  * current one.
1309  *
1310  * The tail lsn is passed in rather than taken from
1311  * the log so that we will not write over the unmount record after a
1312  * clean unmount in a 512 block log.  Doing so would leave the log without
1313  * any valid log records in it until a new one was written.  If we crashed
1314  * during that time we would not be able to recover.
1315  */
1316 STATIC int
1317 xlog_clear_stale_blocks(
1318 	struct xlog	*log,
1319 	xfs_lsn_t	tail_lsn)
1320 {
1321 	int		tail_cycle, head_cycle;
1322 	int		tail_block, head_block;
1323 	int		tail_distance, max_distance;
1324 	int		distance;
1325 	int		error;
1326 
1327 	tail_cycle = CYCLE_LSN(tail_lsn);
1328 	tail_block = BLOCK_LSN(tail_lsn);
1329 	head_cycle = log->l_curr_cycle;
1330 	head_block = log->l_curr_block;
1331 
1332 	/*
1333 	 * Figure out the distance between the new head of the log
1334 	 * and the tail.  We want to write over any blocks beyond the
1335 	 * head that we may have written just before the crash, but
1336 	 * we don't want to overwrite the tail of the log.
1337 	 */
1338 	if (head_cycle == tail_cycle) {
1339 		/*
1340 		 * The tail is behind the head in the physical log,
1341 		 * so the distance from the head to the tail is the
1342 		 * distance from the head to the end of the log plus
1343 		 * the distance from the beginning of the log to the
1344 		 * tail.
1345 		 */
1346 		if (unlikely(head_block < tail_block || head_block >= log->l_logBBsize)) {
1347 			XFS_ERROR_REPORT("xlog_clear_stale_blocks(1)",
1348 					 XFS_ERRLEVEL_LOW, log->l_mp);
1349 			return XFS_ERROR(EFSCORRUPTED);
1350 		}
1351 		tail_distance = tail_block + (log->l_logBBsize - head_block);
1352 	} else {
1353 		/*
1354 		 * The head is behind the tail in the physical log,
1355 		 * so the distance from the head to the tail is just
1356 		 * the tail block minus the head block.
1357 		 */
1358 		if (unlikely(head_block >= tail_block || head_cycle != (tail_cycle + 1))){
1359 			XFS_ERROR_REPORT("xlog_clear_stale_blocks(2)",
1360 					 XFS_ERRLEVEL_LOW, log->l_mp);
1361 			return XFS_ERROR(EFSCORRUPTED);
1362 		}
1363 		tail_distance = tail_block - head_block;
1364 	}
1365 
1366 	/*
1367 	 * If the head is right up against the tail, we can't clear
1368 	 * anything.
1369 	 */
1370 	if (tail_distance <= 0) {
1371 		ASSERT(tail_distance == 0);
1372 		return 0;
1373 	}
1374 
1375 	max_distance = XLOG_TOTAL_REC_SHIFT(log);
1376 	/*
1377 	 * Take the smaller of the maximum amount of outstanding I/O
1378 	 * we could have and the distance to the tail to clear out.
1379 	 * We take the smaller so that we don't overwrite the tail and
1380 	 * we don't waste all day writing from the head to the tail
1381 	 * for no reason.
1382 	 */
1383 	max_distance = MIN(max_distance, tail_distance);
1384 
1385 	if ((head_block + max_distance) <= log->l_logBBsize) {
1386 		/*
1387 		 * We can stomp all the blocks we need to without
1388 		 * wrapping around the end of the log.  Just do it
1389 		 * in a single write.  Use the cycle number of the
1390 		 * current cycle minus one so that the log will look like:
1391 		 *     n ... | n - 1 ...
1392 		 */
1393 		error = xlog_write_log_records(log, (head_cycle - 1),
1394 				head_block, max_distance, tail_cycle,
1395 				tail_block);
1396 		if (error)
1397 			return error;
1398 	} else {
1399 		/*
1400 		 * We need to wrap around the end of the physical log in
1401 		 * order to clear all the blocks.  Do it in two separate
1402 		 * I/Os.  The first write should be from the head to the
1403 		 * end of the physical log, and it should use the current
1404 		 * cycle number minus one just like above.
1405 		 */
1406 		distance = log->l_logBBsize - head_block;
1407 		error = xlog_write_log_records(log, (head_cycle - 1),
1408 				head_block, distance, tail_cycle,
1409 				tail_block);
1410 
1411 		if (error)
1412 			return error;
1413 
1414 		/*
1415 		 * Now write the blocks at the start of the physical log.
1416 		 * This writes the remainder of the blocks we want to clear.
1417 		 * It uses the current cycle number since we're now on the
1418 		 * same cycle as the head so that we get:
1419 		 *    n ... n ... | n - 1 ...
1420 		 *    ^^^^^ blocks we're writing
1421 		 */
1422 		distance = max_distance - (log->l_logBBsize - head_block);
1423 		error = xlog_write_log_records(log, head_cycle, 0, distance,
1424 				tail_cycle, tail_block);
1425 		if (error)
1426 			return error;
1427 	}
1428 
1429 	return 0;
1430 }
1431 
1432 /******************************************************************************
1433  *
1434  *		Log recover routines
1435  *
1436  ******************************************************************************
1437  */
1438 
1439 STATIC xlog_recover_t *
1440 xlog_recover_find_tid(
1441 	struct hlist_head	*head,
1442 	xlog_tid_t		tid)
1443 {
1444 	xlog_recover_t		*trans;
1445 	struct hlist_node	*n;
1446 
1447 	hlist_for_each_entry(trans, n, head, r_list) {
1448 		if (trans->r_log_tid == tid)
1449 			return trans;
1450 	}
1451 	return NULL;
1452 }
1453 
1454 STATIC void
1455 xlog_recover_new_tid(
1456 	struct hlist_head	*head,
1457 	xlog_tid_t		tid,
1458 	xfs_lsn_t		lsn)
1459 {
1460 	xlog_recover_t		*trans;
1461 
1462 	trans = kmem_zalloc(sizeof(xlog_recover_t), KM_SLEEP);
1463 	trans->r_log_tid   = tid;
1464 	trans->r_lsn	   = lsn;
1465 	INIT_LIST_HEAD(&trans->r_itemq);
1466 
1467 	INIT_HLIST_NODE(&trans->r_list);
1468 	hlist_add_head(&trans->r_list, head);
1469 }
1470 
1471 STATIC void
1472 xlog_recover_add_item(
1473 	struct list_head	*head)
1474 {
1475 	xlog_recover_item_t	*item;
1476 
1477 	item = kmem_zalloc(sizeof(xlog_recover_item_t), KM_SLEEP);
1478 	INIT_LIST_HEAD(&item->ri_list);
1479 	list_add_tail(&item->ri_list, head);
1480 }
1481 
1482 STATIC int
1483 xlog_recover_add_to_cont_trans(
1484 	struct xlog		*log,
1485 	struct xlog_recover	*trans,
1486 	xfs_caddr_t		dp,
1487 	int			len)
1488 {
1489 	xlog_recover_item_t	*item;
1490 	xfs_caddr_t		ptr, old_ptr;
1491 	int			old_len;
1492 
1493 	if (list_empty(&trans->r_itemq)) {
1494 		/* finish copying rest of trans header */
1495 		xlog_recover_add_item(&trans->r_itemq);
1496 		ptr = (xfs_caddr_t) &trans->r_theader +
1497 				sizeof(xfs_trans_header_t) - len;
1498 		memcpy(ptr, dp, len); /* d, s, l */
1499 		return 0;
1500 	}
1501 	/* take the tail entry */
1502 	item = list_entry(trans->r_itemq.prev, xlog_recover_item_t, ri_list);
1503 
1504 	old_ptr = item->ri_buf[item->ri_cnt-1].i_addr;
1505 	old_len = item->ri_buf[item->ri_cnt-1].i_len;
1506 
1507 	ptr = kmem_realloc(old_ptr, len+old_len, old_len, KM_SLEEP);
1508 	memcpy(&ptr[old_len], dp, len); /* d, s, l */
1509 	item->ri_buf[item->ri_cnt-1].i_len += len;
1510 	item->ri_buf[item->ri_cnt-1].i_addr = ptr;
1511 	trace_xfs_log_recover_item_add_cont(log, trans, item, 0);
1512 	return 0;
1513 }
1514 
1515 /*
1516  * The next region to add is the start of a new region.  It could be
1517  * a whole region or it could be the first part of a new region.  Because
1518  * of this, the assumption here is that the type and size fields of all
1519  * format structures fit into the first 32 bits of the structure.
1520  *
1521  * This works because all regions must be 32 bit aligned.  Therefore, we
1522  * either have both fields or we have neither field.  In the case we have
1523  * neither field, the data part of the region is zero length.  We only have
1524  * a log_op_header and can throw away the header since a new one will appear
1525  * later.  If we have at least 4 bytes, then we can determine how many regions
1526  * will appear in the current log item.
1527  */
1528 STATIC int
1529 xlog_recover_add_to_trans(
1530 	struct xlog		*log,
1531 	struct xlog_recover	*trans,
1532 	xfs_caddr_t		dp,
1533 	int			len)
1534 {
1535 	xfs_inode_log_format_t	*in_f;			/* any will do */
1536 	xlog_recover_item_t	*item;
1537 	xfs_caddr_t		ptr;
1538 
1539 	if (!len)
1540 		return 0;
1541 	if (list_empty(&trans->r_itemq)) {
1542 		/* we need to catch log corruptions here */
1543 		if (*(uint *)dp != XFS_TRANS_HEADER_MAGIC) {
1544 			xfs_warn(log->l_mp, "%s: bad header magic number",
1545 				__func__);
1546 			ASSERT(0);
1547 			return XFS_ERROR(EIO);
1548 		}
1549 		if (len == sizeof(xfs_trans_header_t))
1550 			xlog_recover_add_item(&trans->r_itemq);
1551 		memcpy(&trans->r_theader, dp, len); /* d, s, l */
1552 		return 0;
1553 	}
1554 
1555 	ptr = kmem_alloc(len, KM_SLEEP);
1556 	memcpy(ptr, dp, len);
1557 	in_f = (xfs_inode_log_format_t *)ptr;
1558 
1559 	/* take the tail entry */
1560 	item = list_entry(trans->r_itemq.prev, xlog_recover_item_t, ri_list);
1561 	if (item->ri_total != 0 &&
1562 	     item->ri_total == item->ri_cnt) {
1563 		/* tail item is in use, get a new one */
1564 		xlog_recover_add_item(&trans->r_itemq);
1565 		item = list_entry(trans->r_itemq.prev,
1566 					xlog_recover_item_t, ri_list);
1567 	}
1568 
1569 	if (item->ri_total == 0) {		/* first region to be added */
1570 		if (in_f->ilf_size == 0 ||
1571 		    in_f->ilf_size > XLOG_MAX_REGIONS_IN_ITEM) {
1572 			xfs_warn(log->l_mp,
1573 		"bad number of regions (%d) in inode log format",
1574 				  in_f->ilf_size);
1575 			ASSERT(0);
1576 			return XFS_ERROR(EIO);
1577 		}
1578 
1579 		item->ri_total = in_f->ilf_size;
1580 		item->ri_buf =
1581 			kmem_zalloc(item->ri_total * sizeof(xfs_log_iovec_t),
1582 				    KM_SLEEP);
1583 	}
1584 	ASSERT(item->ri_total > item->ri_cnt);
1585 	/* Description region is ri_buf[0] */
1586 	item->ri_buf[item->ri_cnt].i_addr = ptr;
1587 	item->ri_buf[item->ri_cnt].i_len  = len;
1588 	item->ri_cnt++;
1589 	trace_xfs_log_recover_item_add(log, trans, item, 0);
1590 	return 0;
1591 }
1592 
1593 /*
1594  * Sort the log items in the transaction. Cancelled buffers need
1595  * to be put first so they are processed before any items that might
1596  * modify the buffers. If they are cancelled, then the modifications
1597  * don't need to be replayed.
1598  */
1599 STATIC int
1600 xlog_recover_reorder_trans(
1601 	struct xlog		*log,
1602 	struct xlog_recover	*trans,
1603 	int			pass)
1604 {
1605 	xlog_recover_item_t	*item, *n;
1606 	LIST_HEAD(sort_list);
1607 
1608 	list_splice_init(&trans->r_itemq, &sort_list);
1609 	list_for_each_entry_safe(item, n, &sort_list, ri_list) {
1610 		xfs_buf_log_format_t	*buf_f = item->ri_buf[0].i_addr;
1611 
1612 		switch (ITEM_TYPE(item)) {
1613 		case XFS_LI_BUF:
1614 			if (!(buf_f->blf_flags & XFS_BLF_CANCEL)) {
1615 				trace_xfs_log_recover_item_reorder_head(log,
1616 							trans, item, pass);
1617 				list_move(&item->ri_list, &trans->r_itemq);
1618 				break;
1619 			}
1620 		case XFS_LI_INODE:
1621 		case XFS_LI_DQUOT:
1622 		case XFS_LI_QUOTAOFF:
1623 		case XFS_LI_EFD:
1624 		case XFS_LI_EFI:
1625 			trace_xfs_log_recover_item_reorder_tail(log,
1626 							trans, item, pass);
1627 			list_move_tail(&item->ri_list, &trans->r_itemq);
1628 			break;
1629 		default:
1630 			xfs_warn(log->l_mp,
1631 				"%s: unrecognized type of log operation",
1632 				__func__);
1633 			ASSERT(0);
1634 			return XFS_ERROR(EIO);
1635 		}
1636 	}
1637 	ASSERT(list_empty(&sort_list));
1638 	return 0;
1639 }
1640 
1641 /*
1642  * Build up the table of buf cancel records so that we don't replay
1643  * cancelled data in the second pass.  For buffer records that are
1644  * not cancel records, there is nothing to do here so we just return.
1645  *
1646  * If we get a cancel record which is already in the table, this indicates
1647  * that the buffer was cancelled multiple times.  In order to ensure
1648  * that during pass 2 we keep the record in the table until we reach its
1649  * last occurrence in the log, we keep a reference count in the cancel
1650  * record in the table to tell us how many times we expect to see this
1651  * record during the second pass.
1652  */
1653 STATIC int
1654 xlog_recover_buffer_pass1(
1655 	struct xlog			*log,
1656 	struct xlog_recover_item	*item)
1657 {
1658 	xfs_buf_log_format_t	*buf_f = item->ri_buf[0].i_addr;
1659 	struct list_head	*bucket;
1660 	struct xfs_buf_cancel	*bcp;
1661 
1662 	/*
1663 	 * If this isn't a cancel buffer item, then just return.
1664 	 */
1665 	if (!(buf_f->blf_flags & XFS_BLF_CANCEL)) {
1666 		trace_xfs_log_recover_buf_not_cancel(log, buf_f);
1667 		return 0;
1668 	}
1669 
1670 	/*
1671 	 * Insert an xfs_buf_cancel record into the hash table of them.
1672 	 * If there is already an identical record, bump its reference count.
1673 	 */
1674 	bucket = XLOG_BUF_CANCEL_BUCKET(log, buf_f->blf_blkno);
1675 	list_for_each_entry(bcp, bucket, bc_list) {
1676 		if (bcp->bc_blkno == buf_f->blf_blkno &&
1677 		    bcp->bc_len == buf_f->blf_len) {
1678 			bcp->bc_refcount++;
1679 			trace_xfs_log_recover_buf_cancel_ref_inc(log, buf_f);
1680 			return 0;
1681 		}
1682 	}
1683 
1684 	bcp = kmem_alloc(sizeof(struct xfs_buf_cancel), KM_SLEEP);
1685 	bcp->bc_blkno = buf_f->blf_blkno;
1686 	bcp->bc_len = buf_f->blf_len;
1687 	bcp->bc_refcount = 1;
1688 	list_add_tail(&bcp->bc_list, bucket);
1689 
1690 	trace_xfs_log_recover_buf_cancel_add(log, buf_f);
1691 	return 0;
1692 }
1693 
1694 /*
1695  * Check to see whether the buffer being recovered has a corresponding
1696  * entry in the buffer cancel record table.  If it does then return 1
1697  * so that it will be cancelled, otherwise return 0.  If the buffer is
1698  * actually a buffer cancel item (XFS_BLF_CANCEL is set), then decrement
1699  * the refcount on the entry in the table and remove it from the table
1700  * if this is the last reference.
1701  *
1702  * We remove the cancel record from the table when we encounter its
1703  * last occurrence in the log so that if the same buffer is re-used
1704  * again after its last cancellation we actually replay the changes
1705  * made at that point.
1706  */
1707 STATIC int
1708 xlog_check_buffer_cancelled(
1709 	struct xlog		*log,
1710 	xfs_daddr_t		blkno,
1711 	uint			len,
1712 	ushort			flags)
1713 {
1714 	struct list_head	*bucket;
1715 	struct xfs_buf_cancel	*bcp;
1716 
1717 	if (log->l_buf_cancel_table == NULL) {
1718 		/*
1719 		 * There is nothing in the table built in pass one,
1720 		 * so this buffer must not be cancelled.
1721 		 */
1722 		ASSERT(!(flags & XFS_BLF_CANCEL));
1723 		return 0;
1724 	}
1725 
1726 	/*
1727 	 * Search for an entry in the  cancel table that matches our buffer.
1728 	 */
1729 	bucket = XLOG_BUF_CANCEL_BUCKET(log, blkno);
1730 	list_for_each_entry(bcp, bucket, bc_list) {
1731 		if (bcp->bc_blkno == blkno && bcp->bc_len == len)
1732 			goto found;
1733 	}
1734 
1735 	/*
1736 	 * We didn't find a corresponding entry in the table, so return 0 so
1737 	 * that the buffer is NOT cancelled.
1738 	 */
1739 	ASSERT(!(flags & XFS_BLF_CANCEL));
1740 	return 0;
1741 
1742 found:
1743 	/*
1744 	 * We've go a match, so return 1 so that the recovery of this buffer
1745 	 * is cancelled.  If this buffer is actually a buffer cancel log
1746 	 * item, then decrement the refcount on the one in the table and
1747 	 * remove it if this is the last reference.
1748 	 */
1749 	if (flags & XFS_BLF_CANCEL) {
1750 		if (--bcp->bc_refcount == 0) {
1751 			list_del(&bcp->bc_list);
1752 			kmem_free(bcp);
1753 		}
1754 	}
1755 	return 1;
1756 }
1757 
1758 /*
1759  * Perform recovery for a buffer full of inodes.  In these buffers, the only
1760  * data which should be recovered is that which corresponds to the
1761  * di_next_unlinked pointers in the on disk inode structures.  The rest of the
1762  * data for the inodes is always logged through the inodes themselves rather
1763  * than the inode buffer and is recovered in xlog_recover_inode_pass2().
1764  *
1765  * The only time when buffers full of inodes are fully recovered is when the
1766  * buffer is full of newly allocated inodes.  In this case the buffer will
1767  * not be marked as an inode buffer and so will be sent to
1768  * xlog_recover_do_reg_buffer() below during recovery.
1769  */
1770 STATIC int
1771 xlog_recover_do_inode_buffer(
1772 	struct xfs_mount	*mp,
1773 	xlog_recover_item_t	*item,
1774 	struct xfs_buf		*bp,
1775 	xfs_buf_log_format_t	*buf_f)
1776 {
1777 	int			i;
1778 	int			item_index = 0;
1779 	int			bit = 0;
1780 	int			nbits = 0;
1781 	int			reg_buf_offset = 0;
1782 	int			reg_buf_bytes = 0;
1783 	int			next_unlinked_offset;
1784 	int			inodes_per_buf;
1785 	xfs_agino_t		*logged_nextp;
1786 	xfs_agino_t		*buffer_nextp;
1787 
1788 	trace_xfs_log_recover_buf_inode_buf(mp->m_log, buf_f);
1789 
1790 	inodes_per_buf = BBTOB(bp->b_io_length) >> mp->m_sb.sb_inodelog;
1791 	for (i = 0; i < inodes_per_buf; i++) {
1792 		next_unlinked_offset = (i * mp->m_sb.sb_inodesize) +
1793 			offsetof(xfs_dinode_t, di_next_unlinked);
1794 
1795 		while (next_unlinked_offset >=
1796 		       (reg_buf_offset + reg_buf_bytes)) {
1797 			/*
1798 			 * The next di_next_unlinked field is beyond
1799 			 * the current logged region.  Find the next
1800 			 * logged region that contains or is beyond
1801 			 * the current di_next_unlinked field.
1802 			 */
1803 			bit += nbits;
1804 			bit = xfs_next_bit(buf_f->blf_data_map,
1805 					   buf_f->blf_map_size, bit);
1806 
1807 			/*
1808 			 * If there are no more logged regions in the
1809 			 * buffer, then we're done.
1810 			 */
1811 			if (bit == -1)
1812 				return 0;
1813 
1814 			nbits = xfs_contig_bits(buf_f->blf_data_map,
1815 						buf_f->blf_map_size, bit);
1816 			ASSERT(nbits > 0);
1817 			reg_buf_offset = bit << XFS_BLF_SHIFT;
1818 			reg_buf_bytes = nbits << XFS_BLF_SHIFT;
1819 			item_index++;
1820 		}
1821 
1822 		/*
1823 		 * If the current logged region starts after the current
1824 		 * di_next_unlinked field, then move on to the next
1825 		 * di_next_unlinked field.
1826 		 */
1827 		if (next_unlinked_offset < reg_buf_offset)
1828 			continue;
1829 
1830 		ASSERT(item->ri_buf[item_index].i_addr != NULL);
1831 		ASSERT((item->ri_buf[item_index].i_len % XFS_BLF_CHUNK) == 0);
1832 		ASSERT((reg_buf_offset + reg_buf_bytes) <=
1833 							BBTOB(bp->b_io_length));
1834 
1835 		/*
1836 		 * The current logged region contains a copy of the
1837 		 * current di_next_unlinked field.  Extract its value
1838 		 * and copy it to the buffer copy.
1839 		 */
1840 		logged_nextp = item->ri_buf[item_index].i_addr +
1841 				next_unlinked_offset - reg_buf_offset;
1842 		if (unlikely(*logged_nextp == 0)) {
1843 			xfs_alert(mp,
1844 		"Bad inode buffer log record (ptr = 0x%p, bp = 0x%p). "
1845 		"Trying to replay bad (0) inode di_next_unlinked field.",
1846 				item, bp);
1847 			XFS_ERROR_REPORT("xlog_recover_do_inode_buf",
1848 					 XFS_ERRLEVEL_LOW, mp);
1849 			return XFS_ERROR(EFSCORRUPTED);
1850 		}
1851 
1852 		buffer_nextp = (xfs_agino_t *)xfs_buf_offset(bp,
1853 					      next_unlinked_offset);
1854 		*buffer_nextp = *logged_nextp;
1855 	}
1856 
1857 	return 0;
1858 }
1859 
1860 /*
1861  * Perform a 'normal' buffer recovery.  Each logged region of the
1862  * buffer should be copied over the corresponding region in the
1863  * given buffer.  The bitmap in the buf log format structure indicates
1864  * where to place the logged data.
1865  */
1866 STATIC void
1867 xlog_recover_do_reg_buffer(
1868 	struct xfs_mount	*mp,
1869 	xlog_recover_item_t	*item,
1870 	struct xfs_buf		*bp,
1871 	xfs_buf_log_format_t	*buf_f)
1872 {
1873 	int			i;
1874 	int			bit;
1875 	int			nbits;
1876 	int                     error;
1877 
1878 	trace_xfs_log_recover_buf_reg_buf(mp->m_log, buf_f);
1879 
1880 	bit = 0;
1881 	i = 1;  /* 0 is the buf format structure */
1882 	while (1) {
1883 		bit = xfs_next_bit(buf_f->blf_data_map,
1884 				   buf_f->blf_map_size, bit);
1885 		if (bit == -1)
1886 			break;
1887 		nbits = xfs_contig_bits(buf_f->blf_data_map,
1888 					buf_f->blf_map_size, bit);
1889 		ASSERT(nbits > 0);
1890 		ASSERT(item->ri_buf[i].i_addr != NULL);
1891 		ASSERT(item->ri_buf[i].i_len % XFS_BLF_CHUNK == 0);
1892 		ASSERT(BBTOB(bp->b_io_length) >=
1893 		       ((uint)bit << XFS_BLF_SHIFT) + (nbits << XFS_BLF_SHIFT));
1894 
1895 		/*
1896 		 * Do a sanity check if this is a dquot buffer. Just checking
1897 		 * the first dquot in the buffer should do. XXXThis is
1898 		 * probably a good thing to do for other buf types also.
1899 		 */
1900 		error = 0;
1901 		if (buf_f->blf_flags &
1902 		   (XFS_BLF_UDQUOT_BUF|XFS_BLF_PDQUOT_BUF|XFS_BLF_GDQUOT_BUF)) {
1903 			if (item->ri_buf[i].i_addr == NULL) {
1904 				xfs_alert(mp,
1905 					"XFS: NULL dquot in %s.", __func__);
1906 				goto next;
1907 			}
1908 			if (item->ri_buf[i].i_len < sizeof(xfs_disk_dquot_t)) {
1909 				xfs_alert(mp,
1910 					"XFS: dquot too small (%d) in %s.",
1911 					item->ri_buf[i].i_len, __func__);
1912 				goto next;
1913 			}
1914 			error = xfs_qm_dqcheck(mp, item->ri_buf[i].i_addr,
1915 					       -1, 0, XFS_QMOPT_DOWARN,
1916 					       "dquot_buf_recover");
1917 			if (error)
1918 				goto next;
1919 		}
1920 
1921 		memcpy(xfs_buf_offset(bp,
1922 			(uint)bit << XFS_BLF_SHIFT),	/* dest */
1923 			item->ri_buf[i].i_addr,		/* source */
1924 			nbits<<XFS_BLF_SHIFT);		/* length */
1925  next:
1926 		i++;
1927 		bit += nbits;
1928 	}
1929 
1930 	/* Shouldn't be any more regions */
1931 	ASSERT(i == item->ri_total);
1932 }
1933 
1934 /*
1935  * Do some primitive error checking on ondisk dquot data structures.
1936  */
1937 int
1938 xfs_qm_dqcheck(
1939 	struct xfs_mount *mp,
1940 	xfs_disk_dquot_t *ddq,
1941 	xfs_dqid_t	 id,
1942 	uint		 type,	  /* used only when IO_dorepair is true */
1943 	uint		 flags,
1944 	char		 *str)
1945 {
1946 	xfs_dqblk_t	 *d = (xfs_dqblk_t *)ddq;
1947 	int		errs = 0;
1948 
1949 	/*
1950 	 * We can encounter an uninitialized dquot buffer for 2 reasons:
1951 	 * 1. If we crash while deleting the quotainode(s), and those blks got
1952 	 *    used for user data. This is because we take the path of regular
1953 	 *    file deletion; however, the size field of quotainodes is never
1954 	 *    updated, so all the tricks that we play in itruncate_finish
1955 	 *    don't quite matter.
1956 	 *
1957 	 * 2. We don't play the quota buffers when there's a quotaoff logitem.
1958 	 *    But the allocation will be replayed so we'll end up with an
1959 	 *    uninitialized quota block.
1960 	 *
1961 	 * This is all fine; things are still consistent, and we haven't lost
1962 	 * any quota information. Just don't complain about bad dquot blks.
1963 	 */
1964 	if (ddq->d_magic != cpu_to_be16(XFS_DQUOT_MAGIC)) {
1965 		if (flags & XFS_QMOPT_DOWARN)
1966 			xfs_alert(mp,
1967 			"%s : XFS dquot ID 0x%x, magic 0x%x != 0x%x",
1968 			str, id, be16_to_cpu(ddq->d_magic), XFS_DQUOT_MAGIC);
1969 		errs++;
1970 	}
1971 	if (ddq->d_version != XFS_DQUOT_VERSION) {
1972 		if (flags & XFS_QMOPT_DOWARN)
1973 			xfs_alert(mp,
1974 			"%s : XFS dquot ID 0x%x, version 0x%x != 0x%x",
1975 			str, id, ddq->d_version, XFS_DQUOT_VERSION);
1976 		errs++;
1977 	}
1978 
1979 	if (ddq->d_flags != XFS_DQ_USER &&
1980 	    ddq->d_flags != XFS_DQ_PROJ &&
1981 	    ddq->d_flags != XFS_DQ_GROUP) {
1982 		if (flags & XFS_QMOPT_DOWARN)
1983 			xfs_alert(mp,
1984 			"%s : XFS dquot ID 0x%x, unknown flags 0x%x",
1985 			str, id, ddq->d_flags);
1986 		errs++;
1987 	}
1988 
1989 	if (id != -1 && id != be32_to_cpu(ddq->d_id)) {
1990 		if (flags & XFS_QMOPT_DOWARN)
1991 			xfs_alert(mp,
1992 			"%s : ondisk-dquot 0x%p, ID mismatch: "
1993 			"0x%x expected, found id 0x%x",
1994 			str, ddq, id, be32_to_cpu(ddq->d_id));
1995 		errs++;
1996 	}
1997 
1998 	if (!errs && ddq->d_id) {
1999 		if (ddq->d_blk_softlimit &&
2000 		    be64_to_cpu(ddq->d_bcount) >
2001 				be64_to_cpu(ddq->d_blk_softlimit)) {
2002 			if (!ddq->d_btimer) {
2003 				if (flags & XFS_QMOPT_DOWARN)
2004 					xfs_alert(mp,
2005 			"%s : Dquot ID 0x%x (0x%p) BLK TIMER NOT STARTED",
2006 					str, (int)be32_to_cpu(ddq->d_id), ddq);
2007 				errs++;
2008 			}
2009 		}
2010 		if (ddq->d_ino_softlimit &&
2011 		    be64_to_cpu(ddq->d_icount) >
2012 				be64_to_cpu(ddq->d_ino_softlimit)) {
2013 			if (!ddq->d_itimer) {
2014 				if (flags & XFS_QMOPT_DOWARN)
2015 					xfs_alert(mp,
2016 			"%s : Dquot ID 0x%x (0x%p) INODE TIMER NOT STARTED",
2017 					str, (int)be32_to_cpu(ddq->d_id), ddq);
2018 				errs++;
2019 			}
2020 		}
2021 		if (ddq->d_rtb_softlimit &&
2022 		    be64_to_cpu(ddq->d_rtbcount) >
2023 				be64_to_cpu(ddq->d_rtb_softlimit)) {
2024 			if (!ddq->d_rtbtimer) {
2025 				if (flags & XFS_QMOPT_DOWARN)
2026 					xfs_alert(mp,
2027 			"%s : Dquot ID 0x%x (0x%p) RTBLK TIMER NOT STARTED",
2028 					str, (int)be32_to_cpu(ddq->d_id), ddq);
2029 				errs++;
2030 			}
2031 		}
2032 	}
2033 
2034 	if (!errs || !(flags & XFS_QMOPT_DQREPAIR))
2035 		return errs;
2036 
2037 	if (flags & XFS_QMOPT_DOWARN)
2038 		xfs_notice(mp, "Re-initializing dquot ID 0x%x", id);
2039 
2040 	/*
2041 	 * Typically, a repair is only requested by quotacheck.
2042 	 */
2043 	ASSERT(id != -1);
2044 	ASSERT(flags & XFS_QMOPT_DQREPAIR);
2045 	memset(d, 0, sizeof(xfs_dqblk_t));
2046 
2047 	d->dd_diskdq.d_magic = cpu_to_be16(XFS_DQUOT_MAGIC);
2048 	d->dd_diskdq.d_version = XFS_DQUOT_VERSION;
2049 	d->dd_diskdq.d_flags = type;
2050 	d->dd_diskdq.d_id = cpu_to_be32(id);
2051 
2052 	return errs;
2053 }
2054 
2055 /*
2056  * Perform a dquot buffer recovery.
2057  * Simple algorithm: if we have found a QUOTAOFF logitem of the same type
2058  * (ie. USR or GRP), then just toss this buffer away; don't recover it.
2059  * Else, treat it as a regular buffer and do recovery.
2060  */
2061 STATIC void
2062 xlog_recover_do_dquot_buffer(
2063 	struct xfs_mount		*mp,
2064 	struct xlog			*log,
2065 	struct xlog_recover_item	*item,
2066 	struct xfs_buf			*bp,
2067 	struct xfs_buf_log_format	*buf_f)
2068 {
2069 	uint			type;
2070 
2071 	trace_xfs_log_recover_buf_dquot_buf(log, buf_f);
2072 
2073 	/*
2074 	 * Filesystems are required to send in quota flags at mount time.
2075 	 */
2076 	if (mp->m_qflags == 0) {
2077 		return;
2078 	}
2079 
2080 	type = 0;
2081 	if (buf_f->blf_flags & XFS_BLF_UDQUOT_BUF)
2082 		type |= XFS_DQ_USER;
2083 	if (buf_f->blf_flags & XFS_BLF_PDQUOT_BUF)
2084 		type |= XFS_DQ_PROJ;
2085 	if (buf_f->blf_flags & XFS_BLF_GDQUOT_BUF)
2086 		type |= XFS_DQ_GROUP;
2087 	/*
2088 	 * This type of quotas was turned off, so ignore this buffer
2089 	 */
2090 	if (log->l_quotaoffs_flag & type)
2091 		return;
2092 
2093 	xlog_recover_do_reg_buffer(mp, item, bp, buf_f);
2094 }
2095 
2096 /*
2097  * This routine replays a modification made to a buffer at runtime.
2098  * There are actually two types of buffer, regular and inode, which
2099  * are handled differently.  Inode buffers are handled differently
2100  * in that we only recover a specific set of data from them, namely
2101  * the inode di_next_unlinked fields.  This is because all other inode
2102  * data is actually logged via inode records and any data we replay
2103  * here which overlaps that may be stale.
2104  *
2105  * When meta-data buffers are freed at run time we log a buffer item
2106  * with the XFS_BLF_CANCEL bit set to indicate that previous copies
2107  * of the buffer in the log should not be replayed at recovery time.
2108  * This is so that if the blocks covered by the buffer are reused for
2109  * file data before we crash we don't end up replaying old, freed
2110  * meta-data into a user's file.
2111  *
2112  * To handle the cancellation of buffer log items, we make two passes
2113  * over the log during recovery.  During the first we build a table of
2114  * those buffers which have been cancelled, and during the second we
2115  * only replay those buffers which do not have corresponding cancel
2116  * records in the table.  See xlog_recover_do_buffer_pass[1,2] above
2117  * for more details on the implementation of the table of cancel records.
2118  */
2119 STATIC int
2120 xlog_recover_buffer_pass2(
2121 	struct xlog			*log,
2122 	struct list_head		*buffer_list,
2123 	struct xlog_recover_item	*item)
2124 {
2125 	xfs_buf_log_format_t	*buf_f = item->ri_buf[0].i_addr;
2126 	xfs_mount_t		*mp = log->l_mp;
2127 	xfs_buf_t		*bp;
2128 	int			error;
2129 	uint			buf_flags;
2130 
2131 	/*
2132 	 * In this pass we only want to recover all the buffers which have
2133 	 * not been cancelled and are not cancellation buffers themselves.
2134 	 */
2135 	if (xlog_check_buffer_cancelled(log, buf_f->blf_blkno,
2136 			buf_f->blf_len, buf_f->blf_flags)) {
2137 		trace_xfs_log_recover_buf_cancel(log, buf_f);
2138 		return 0;
2139 	}
2140 
2141 	trace_xfs_log_recover_buf_recover(log, buf_f);
2142 
2143 	buf_flags = 0;
2144 	if (buf_f->blf_flags & XFS_BLF_INODE_BUF)
2145 		buf_flags |= XBF_UNMAPPED;
2146 
2147 	bp = xfs_buf_read(mp->m_ddev_targp, buf_f->blf_blkno, buf_f->blf_len,
2148 			  buf_flags, NULL);
2149 	if (!bp)
2150 		return XFS_ERROR(ENOMEM);
2151 	error = bp->b_error;
2152 	if (error) {
2153 		xfs_buf_ioerror_alert(bp, "xlog_recover_do..(read#1)");
2154 		xfs_buf_relse(bp);
2155 		return error;
2156 	}
2157 
2158 	if (buf_f->blf_flags & XFS_BLF_INODE_BUF) {
2159 		error = xlog_recover_do_inode_buffer(mp, item, bp, buf_f);
2160 	} else if (buf_f->blf_flags &
2161 		  (XFS_BLF_UDQUOT_BUF|XFS_BLF_PDQUOT_BUF|XFS_BLF_GDQUOT_BUF)) {
2162 		xlog_recover_do_dquot_buffer(mp, log, item, bp, buf_f);
2163 	} else {
2164 		xlog_recover_do_reg_buffer(mp, item, bp, buf_f);
2165 	}
2166 	if (error)
2167 		return XFS_ERROR(error);
2168 
2169 	/*
2170 	 * Perform delayed write on the buffer.  Asynchronous writes will be
2171 	 * slower when taking into account all the buffers to be flushed.
2172 	 *
2173 	 * Also make sure that only inode buffers with good sizes stay in
2174 	 * the buffer cache.  The kernel moves inodes in buffers of 1 block
2175 	 * or XFS_INODE_CLUSTER_SIZE bytes, whichever is bigger.  The inode
2176 	 * buffers in the log can be a different size if the log was generated
2177 	 * by an older kernel using unclustered inode buffers or a newer kernel
2178 	 * running with a different inode cluster size.  Regardless, if the
2179 	 * the inode buffer size isn't MAX(blocksize, XFS_INODE_CLUSTER_SIZE)
2180 	 * for *our* value of XFS_INODE_CLUSTER_SIZE, then we need to keep
2181 	 * the buffer out of the buffer cache so that the buffer won't
2182 	 * overlap with future reads of those inodes.
2183 	 */
2184 	if (XFS_DINODE_MAGIC ==
2185 	    be16_to_cpu(*((__be16 *)xfs_buf_offset(bp, 0))) &&
2186 	    (BBTOB(bp->b_io_length) != MAX(log->l_mp->m_sb.sb_blocksize,
2187 			(__uint32_t)XFS_INODE_CLUSTER_SIZE(log->l_mp)))) {
2188 		xfs_buf_stale(bp);
2189 		error = xfs_bwrite(bp);
2190 	} else {
2191 		ASSERT(bp->b_target->bt_mount == mp);
2192 		bp->b_iodone = xlog_recover_iodone;
2193 		xfs_buf_delwri_queue(bp, buffer_list);
2194 	}
2195 
2196 	xfs_buf_relse(bp);
2197 	return error;
2198 }
2199 
2200 STATIC int
2201 xlog_recover_inode_pass2(
2202 	struct xlog			*log,
2203 	struct list_head		*buffer_list,
2204 	struct xlog_recover_item	*item)
2205 {
2206 	xfs_inode_log_format_t	*in_f;
2207 	xfs_mount_t		*mp = log->l_mp;
2208 	xfs_buf_t		*bp;
2209 	xfs_dinode_t		*dip;
2210 	int			len;
2211 	xfs_caddr_t		src;
2212 	xfs_caddr_t		dest;
2213 	int			error;
2214 	int			attr_index;
2215 	uint			fields;
2216 	xfs_icdinode_t		*dicp;
2217 	int			need_free = 0;
2218 
2219 	if (item->ri_buf[0].i_len == sizeof(xfs_inode_log_format_t)) {
2220 		in_f = item->ri_buf[0].i_addr;
2221 	} else {
2222 		in_f = kmem_alloc(sizeof(xfs_inode_log_format_t), KM_SLEEP);
2223 		need_free = 1;
2224 		error = xfs_inode_item_format_convert(&item->ri_buf[0], in_f);
2225 		if (error)
2226 			goto error;
2227 	}
2228 
2229 	/*
2230 	 * Inode buffers can be freed, look out for it,
2231 	 * and do not replay the inode.
2232 	 */
2233 	if (xlog_check_buffer_cancelled(log, in_f->ilf_blkno,
2234 					in_f->ilf_len, 0)) {
2235 		error = 0;
2236 		trace_xfs_log_recover_inode_cancel(log, in_f);
2237 		goto error;
2238 	}
2239 	trace_xfs_log_recover_inode_recover(log, in_f);
2240 
2241 	bp = xfs_buf_read(mp->m_ddev_targp, in_f->ilf_blkno, in_f->ilf_len, 0,
2242 			  NULL);
2243 	if (!bp) {
2244 		error = ENOMEM;
2245 		goto error;
2246 	}
2247 	error = bp->b_error;
2248 	if (error) {
2249 		xfs_buf_ioerror_alert(bp, "xlog_recover_do..(read#2)");
2250 		xfs_buf_relse(bp);
2251 		goto error;
2252 	}
2253 	ASSERT(in_f->ilf_fields & XFS_ILOG_CORE);
2254 	dip = (xfs_dinode_t *)xfs_buf_offset(bp, in_f->ilf_boffset);
2255 
2256 	/*
2257 	 * Make sure the place we're flushing out to really looks
2258 	 * like an inode!
2259 	 */
2260 	if (unlikely(dip->di_magic != cpu_to_be16(XFS_DINODE_MAGIC))) {
2261 		xfs_buf_relse(bp);
2262 		xfs_alert(mp,
2263 	"%s: Bad inode magic number, dip = 0x%p, dino bp = 0x%p, ino = %Ld",
2264 			__func__, dip, bp, in_f->ilf_ino);
2265 		XFS_ERROR_REPORT("xlog_recover_inode_pass2(1)",
2266 				 XFS_ERRLEVEL_LOW, mp);
2267 		error = EFSCORRUPTED;
2268 		goto error;
2269 	}
2270 	dicp = item->ri_buf[1].i_addr;
2271 	if (unlikely(dicp->di_magic != XFS_DINODE_MAGIC)) {
2272 		xfs_buf_relse(bp);
2273 		xfs_alert(mp,
2274 			"%s: Bad inode log record, rec ptr 0x%p, ino %Ld",
2275 			__func__, item, in_f->ilf_ino);
2276 		XFS_ERROR_REPORT("xlog_recover_inode_pass2(2)",
2277 				 XFS_ERRLEVEL_LOW, mp);
2278 		error = EFSCORRUPTED;
2279 		goto error;
2280 	}
2281 
2282 	/* Skip replay when the on disk inode is newer than the log one */
2283 	if (dicp->di_flushiter < be16_to_cpu(dip->di_flushiter)) {
2284 		/*
2285 		 * Deal with the wrap case, DI_MAX_FLUSH is less
2286 		 * than smaller numbers
2287 		 */
2288 		if (be16_to_cpu(dip->di_flushiter) == DI_MAX_FLUSH &&
2289 		    dicp->di_flushiter < (DI_MAX_FLUSH >> 1)) {
2290 			/* do nothing */
2291 		} else {
2292 			xfs_buf_relse(bp);
2293 			trace_xfs_log_recover_inode_skip(log, in_f);
2294 			error = 0;
2295 			goto error;
2296 		}
2297 	}
2298 	/* Take the opportunity to reset the flush iteration count */
2299 	dicp->di_flushiter = 0;
2300 
2301 	if (unlikely(S_ISREG(dicp->di_mode))) {
2302 		if ((dicp->di_format != XFS_DINODE_FMT_EXTENTS) &&
2303 		    (dicp->di_format != XFS_DINODE_FMT_BTREE)) {
2304 			XFS_CORRUPTION_ERROR("xlog_recover_inode_pass2(3)",
2305 					 XFS_ERRLEVEL_LOW, mp, dicp);
2306 			xfs_buf_relse(bp);
2307 			xfs_alert(mp,
2308 		"%s: Bad regular inode log record, rec ptr 0x%p, "
2309 		"ino ptr = 0x%p, ino bp = 0x%p, ino %Ld",
2310 				__func__, item, dip, bp, in_f->ilf_ino);
2311 			error = EFSCORRUPTED;
2312 			goto error;
2313 		}
2314 	} else if (unlikely(S_ISDIR(dicp->di_mode))) {
2315 		if ((dicp->di_format != XFS_DINODE_FMT_EXTENTS) &&
2316 		    (dicp->di_format != XFS_DINODE_FMT_BTREE) &&
2317 		    (dicp->di_format != XFS_DINODE_FMT_LOCAL)) {
2318 			XFS_CORRUPTION_ERROR("xlog_recover_inode_pass2(4)",
2319 					     XFS_ERRLEVEL_LOW, mp, dicp);
2320 			xfs_buf_relse(bp);
2321 			xfs_alert(mp,
2322 		"%s: Bad dir inode log record, rec ptr 0x%p, "
2323 		"ino ptr = 0x%p, ino bp = 0x%p, ino %Ld",
2324 				__func__, item, dip, bp, in_f->ilf_ino);
2325 			error = EFSCORRUPTED;
2326 			goto error;
2327 		}
2328 	}
2329 	if (unlikely(dicp->di_nextents + dicp->di_anextents > dicp->di_nblocks)){
2330 		XFS_CORRUPTION_ERROR("xlog_recover_inode_pass2(5)",
2331 				     XFS_ERRLEVEL_LOW, mp, dicp);
2332 		xfs_buf_relse(bp);
2333 		xfs_alert(mp,
2334 	"%s: Bad inode log record, rec ptr 0x%p, dino ptr 0x%p, "
2335 	"dino bp 0x%p, ino %Ld, total extents = %d, nblocks = %Ld",
2336 			__func__, item, dip, bp, in_f->ilf_ino,
2337 			dicp->di_nextents + dicp->di_anextents,
2338 			dicp->di_nblocks);
2339 		error = EFSCORRUPTED;
2340 		goto error;
2341 	}
2342 	if (unlikely(dicp->di_forkoff > mp->m_sb.sb_inodesize)) {
2343 		XFS_CORRUPTION_ERROR("xlog_recover_inode_pass2(6)",
2344 				     XFS_ERRLEVEL_LOW, mp, dicp);
2345 		xfs_buf_relse(bp);
2346 		xfs_alert(mp,
2347 	"%s: Bad inode log record, rec ptr 0x%p, dino ptr 0x%p, "
2348 	"dino bp 0x%p, ino %Ld, forkoff 0x%x", __func__,
2349 			item, dip, bp, in_f->ilf_ino, dicp->di_forkoff);
2350 		error = EFSCORRUPTED;
2351 		goto error;
2352 	}
2353 	if (unlikely(item->ri_buf[1].i_len > sizeof(struct xfs_icdinode))) {
2354 		XFS_CORRUPTION_ERROR("xlog_recover_inode_pass2(7)",
2355 				     XFS_ERRLEVEL_LOW, mp, dicp);
2356 		xfs_buf_relse(bp);
2357 		xfs_alert(mp,
2358 			"%s: Bad inode log record length %d, rec ptr 0x%p",
2359 			__func__, item->ri_buf[1].i_len, item);
2360 		error = EFSCORRUPTED;
2361 		goto error;
2362 	}
2363 
2364 	/* The core is in in-core format */
2365 	xfs_dinode_to_disk(dip, item->ri_buf[1].i_addr);
2366 
2367 	/* the rest is in on-disk format */
2368 	if (item->ri_buf[1].i_len > sizeof(struct xfs_icdinode)) {
2369 		memcpy((xfs_caddr_t) dip + sizeof(struct xfs_icdinode),
2370 			item->ri_buf[1].i_addr + sizeof(struct xfs_icdinode),
2371 			item->ri_buf[1].i_len  - sizeof(struct xfs_icdinode));
2372 	}
2373 
2374 	fields = in_f->ilf_fields;
2375 	switch (fields & (XFS_ILOG_DEV | XFS_ILOG_UUID)) {
2376 	case XFS_ILOG_DEV:
2377 		xfs_dinode_put_rdev(dip, in_f->ilf_u.ilfu_rdev);
2378 		break;
2379 	case XFS_ILOG_UUID:
2380 		memcpy(XFS_DFORK_DPTR(dip),
2381 		       &in_f->ilf_u.ilfu_uuid,
2382 		       sizeof(uuid_t));
2383 		break;
2384 	}
2385 
2386 	if (in_f->ilf_size == 2)
2387 		goto write_inode_buffer;
2388 	len = item->ri_buf[2].i_len;
2389 	src = item->ri_buf[2].i_addr;
2390 	ASSERT(in_f->ilf_size <= 4);
2391 	ASSERT((in_f->ilf_size == 3) || (fields & XFS_ILOG_AFORK));
2392 	ASSERT(!(fields & XFS_ILOG_DFORK) ||
2393 	       (len == in_f->ilf_dsize));
2394 
2395 	switch (fields & XFS_ILOG_DFORK) {
2396 	case XFS_ILOG_DDATA:
2397 	case XFS_ILOG_DEXT:
2398 		memcpy(XFS_DFORK_DPTR(dip), src, len);
2399 		break;
2400 
2401 	case XFS_ILOG_DBROOT:
2402 		xfs_bmbt_to_bmdr(mp, (struct xfs_btree_block *)src, len,
2403 				 (xfs_bmdr_block_t *)XFS_DFORK_DPTR(dip),
2404 				 XFS_DFORK_DSIZE(dip, mp));
2405 		break;
2406 
2407 	default:
2408 		/*
2409 		 * There are no data fork flags set.
2410 		 */
2411 		ASSERT((fields & XFS_ILOG_DFORK) == 0);
2412 		break;
2413 	}
2414 
2415 	/*
2416 	 * If we logged any attribute data, recover it.  There may or
2417 	 * may not have been any other non-core data logged in this
2418 	 * transaction.
2419 	 */
2420 	if (in_f->ilf_fields & XFS_ILOG_AFORK) {
2421 		if (in_f->ilf_fields & XFS_ILOG_DFORK) {
2422 			attr_index = 3;
2423 		} else {
2424 			attr_index = 2;
2425 		}
2426 		len = item->ri_buf[attr_index].i_len;
2427 		src = item->ri_buf[attr_index].i_addr;
2428 		ASSERT(len == in_f->ilf_asize);
2429 
2430 		switch (in_f->ilf_fields & XFS_ILOG_AFORK) {
2431 		case XFS_ILOG_ADATA:
2432 		case XFS_ILOG_AEXT:
2433 			dest = XFS_DFORK_APTR(dip);
2434 			ASSERT(len <= XFS_DFORK_ASIZE(dip, mp));
2435 			memcpy(dest, src, len);
2436 			break;
2437 
2438 		case XFS_ILOG_ABROOT:
2439 			dest = XFS_DFORK_APTR(dip);
2440 			xfs_bmbt_to_bmdr(mp, (struct xfs_btree_block *)src,
2441 					 len, (xfs_bmdr_block_t*)dest,
2442 					 XFS_DFORK_ASIZE(dip, mp));
2443 			break;
2444 
2445 		default:
2446 			xfs_warn(log->l_mp, "%s: Invalid flag", __func__);
2447 			ASSERT(0);
2448 			xfs_buf_relse(bp);
2449 			error = EIO;
2450 			goto error;
2451 		}
2452 	}
2453 
2454 write_inode_buffer:
2455 	ASSERT(bp->b_target->bt_mount == mp);
2456 	bp->b_iodone = xlog_recover_iodone;
2457 	xfs_buf_delwri_queue(bp, buffer_list);
2458 	xfs_buf_relse(bp);
2459 error:
2460 	if (need_free)
2461 		kmem_free(in_f);
2462 	return XFS_ERROR(error);
2463 }
2464 
2465 /*
2466  * Recover QUOTAOFF records. We simply make a note of it in the xlog
2467  * structure, so that we know not to do any dquot item or dquot buffer recovery,
2468  * of that type.
2469  */
2470 STATIC int
2471 xlog_recover_quotaoff_pass1(
2472 	struct xlog			*log,
2473 	struct xlog_recover_item	*item)
2474 {
2475 	xfs_qoff_logformat_t	*qoff_f = item->ri_buf[0].i_addr;
2476 	ASSERT(qoff_f);
2477 
2478 	/*
2479 	 * The logitem format's flag tells us if this was user quotaoff,
2480 	 * group/project quotaoff or both.
2481 	 */
2482 	if (qoff_f->qf_flags & XFS_UQUOTA_ACCT)
2483 		log->l_quotaoffs_flag |= XFS_DQ_USER;
2484 	if (qoff_f->qf_flags & XFS_PQUOTA_ACCT)
2485 		log->l_quotaoffs_flag |= XFS_DQ_PROJ;
2486 	if (qoff_f->qf_flags & XFS_GQUOTA_ACCT)
2487 		log->l_quotaoffs_flag |= XFS_DQ_GROUP;
2488 
2489 	return (0);
2490 }
2491 
2492 /*
2493  * Recover a dquot record
2494  */
2495 STATIC int
2496 xlog_recover_dquot_pass2(
2497 	struct xlog			*log,
2498 	struct list_head		*buffer_list,
2499 	struct xlog_recover_item	*item)
2500 {
2501 	xfs_mount_t		*mp = log->l_mp;
2502 	xfs_buf_t		*bp;
2503 	struct xfs_disk_dquot	*ddq, *recddq;
2504 	int			error;
2505 	xfs_dq_logformat_t	*dq_f;
2506 	uint			type;
2507 
2508 
2509 	/*
2510 	 * Filesystems are required to send in quota flags at mount time.
2511 	 */
2512 	if (mp->m_qflags == 0)
2513 		return (0);
2514 
2515 	recddq = item->ri_buf[1].i_addr;
2516 	if (recddq == NULL) {
2517 		xfs_alert(log->l_mp, "NULL dquot in %s.", __func__);
2518 		return XFS_ERROR(EIO);
2519 	}
2520 	if (item->ri_buf[1].i_len < sizeof(xfs_disk_dquot_t)) {
2521 		xfs_alert(log->l_mp, "dquot too small (%d) in %s.",
2522 			item->ri_buf[1].i_len, __func__);
2523 		return XFS_ERROR(EIO);
2524 	}
2525 
2526 	/*
2527 	 * This type of quotas was turned off, so ignore this record.
2528 	 */
2529 	type = recddq->d_flags & (XFS_DQ_USER | XFS_DQ_PROJ | XFS_DQ_GROUP);
2530 	ASSERT(type);
2531 	if (log->l_quotaoffs_flag & type)
2532 		return (0);
2533 
2534 	/*
2535 	 * At this point we know that quota was _not_ turned off.
2536 	 * Since the mount flags are not indicating to us otherwise, this
2537 	 * must mean that quota is on, and the dquot needs to be replayed.
2538 	 * Remember that we may not have fully recovered the superblock yet,
2539 	 * so we can't do the usual trick of looking at the SB quota bits.
2540 	 *
2541 	 * The other possibility, of course, is that the quota subsystem was
2542 	 * removed since the last mount - ENOSYS.
2543 	 */
2544 	dq_f = item->ri_buf[0].i_addr;
2545 	ASSERT(dq_f);
2546 	error = xfs_qm_dqcheck(mp, recddq, dq_f->qlf_id, 0, XFS_QMOPT_DOWARN,
2547 			   "xlog_recover_dquot_pass2 (log copy)");
2548 	if (error)
2549 		return XFS_ERROR(EIO);
2550 	ASSERT(dq_f->qlf_len == 1);
2551 
2552 	error = xfs_trans_read_buf(mp, NULL, mp->m_ddev_targp, dq_f->qlf_blkno,
2553 				   XFS_FSB_TO_BB(mp, dq_f->qlf_len), 0, &bp,
2554 				   NULL);
2555 	if (error)
2556 		return error;
2557 
2558 	ASSERT(bp);
2559 	ddq = (xfs_disk_dquot_t *)xfs_buf_offset(bp, dq_f->qlf_boffset);
2560 
2561 	/*
2562 	 * At least the magic num portion should be on disk because this
2563 	 * was among a chunk of dquots created earlier, and we did some
2564 	 * minimal initialization then.
2565 	 */
2566 	error = xfs_qm_dqcheck(mp, ddq, dq_f->qlf_id, 0, XFS_QMOPT_DOWARN,
2567 			   "xlog_recover_dquot_pass2");
2568 	if (error) {
2569 		xfs_buf_relse(bp);
2570 		return XFS_ERROR(EIO);
2571 	}
2572 
2573 	memcpy(ddq, recddq, item->ri_buf[1].i_len);
2574 
2575 	ASSERT(dq_f->qlf_size == 2);
2576 	ASSERT(bp->b_target->bt_mount == mp);
2577 	bp->b_iodone = xlog_recover_iodone;
2578 	xfs_buf_delwri_queue(bp, buffer_list);
2579 	xfs_buf_relse(bp);
2580 
2581 	return (0);
2582 }
2583 
2584 /*
2585  * This routine is called to create an in-core extent free intent
2586  * item from the efi format structure which was logged on disk.
2587  * It allocates an in-core efi, copies the extents from the format
2588  * structure into it, and adds the efi to the AIL with the given
2589  * LSN.
2590  */
2591 STATIC int
2592 xlog_recover_efi_pass2(
2593 	struct xlog			*log,
2594 	struct xlog_recover_item	*item,
2595 	xfs_lsn_t			lsn)
2596 {
2597 	int			error;
2598 	xfs_mount_t		*mp = log->l_mp;
2599 	xfs_efi_log_item_t	*efip;
2600 	xfs_efi_log_format_t	*efi_formatp;
2601 
2602 	efi_formatp = item->ri_buf[0].i_addr;
2603 
2604 	efip = xfs_efi_init(mp, efi_formatp->efi_nextents);
2605 	if ((error = xfs_efi_copy_format(&(item->ri_buf[0]),
2606 					 &(efip->efi_format)))) {
2607 		xfs_efi_item_free(efip);
2608 		return error;
2609 	}
2610 	atomic_set(&efip->efi_next_extent, efi_formatp->efi_nextents);
2611 
2612 	spin_lock(&log->l_ailp->xa_lock);
2613 	/*
2614 	 * xfs_trans_ail_update() drops the AIL lock.
2615 	 */
2616 	xfs_trans_ail_update(log->l_ailp, &efip->efi_item, lsn);
2617 	return 0;
2618 }
2619 
2620 
2621 /*
2622  * This routine is called when an efd format structure is found in
2623  * a committed transaction in the log.  It's purpose is to cancel
2624  * the corresponding efi if it was still in the log.  To do this
2625  * it searches the AIL for the efi with an id equal to that in the
2626  * efd format structure.  If we find it, we remove the efi from the
2627  * AIL and free it.
2628  */
2629 STATIC int
2630 xlog_recover_efd_pass2(
2631 	struct xlog			*log,
2632 	struct xlog_recover_item	*item)
2633 {
2634 	xfs_efd_log_format_t	*efd_formatp;
2635 	xfs_efi_log_item_t	*efip = NULL;
2636 	xfs_log_item_t		*lip;
2637 	__uint64_t		efi_id;
2638 	struct xfs_ail_cursor	cur;
2639 	struct xfs_ail		*ailp = log->l_ailp;
2640 
2641 	efd_formatp = item->ri_buf[0].i_addr;
2642 	ASSERT((item->ri_buf[0].i_len == (sizeof(xfs_efd_log_format_32_t) +
2643 		((efd_formatp->efd_nextents - 1) * sizeof(xfs_extent_32_t)))) ||
2644 	       (item->ri_buf[0].i_len == (sizeof(xfs_efd_log_format_64_t) +
2645 		((efd_formatp->efd_nextents - 1) * sizeof(xfs_extent_64_t)))));
2646 	efi_id = efd_formatp->efd_efi_id;
2647 
2648 	/*
2649 	 * Search for the efi with the id in the efd format structure
2650 	 * in the AIL.
2651 	 */
2652 	spin_lock(&ailp->xa_lock);
2653 	lip = xfs_trans_ail_cursor_first(ailp, &cur, 0);
2654 	while (lip != NULL) {
2655 		if (lip->li_type == XFS_LI_EFI) {
2656 			efip = (xfs_efi_log_item_t *)lip;
2657 			if (efip->efi_format.efi_id == efi_id) {
2658 				/*
2659 				 * xfs_trans_ail_delete() drops the
2660 				 * AIL lock.
2661 				 */
2662 				xfs_trans_ail_delete(ailp, lip,
2663 						     SHUTDOWN_CORRUPT_INCORE);
2664 				xfs_efi_item_free(efip);
2665 				spin_lock(&ailp->xa_lock);
2666 				break;
2667 			}
2668 		}
2669 		lip = xfs_trans_ail_cursor_next(ailp, &cur);
2670 	}
2671 	xfs_trans_ail_cursor_done(ailp, &cur);
2672 	spin_unlock(&ailp->xa_lock);
2673 
2674 	return 0;
2675 }
2676 
2677 /*
2678  * Free up any resources allocated by the transaction
2679  *
2680  * Remember that EFIs, EFDs, and IUNLINKs are handled later.
2681  */
2682 STATIC void
2683 xlog_recover_free_trans(
2684 	struct xlog_recover	*trans)
2685 {
2686 	xlog_recover_item_t	*item, *n;
2687 	int			i;
2688 
2689 	list_for_each_entry_safe(item, n, &trans->r_itemq, ri_list) {
2690 		/* Free the regions in the item. */
2691 		list_del(&item->ri_list);
2692 		for (i = 0; i < item->ri_cnt; i++)
2693 			kmem_free(item->ri_buf[i].i_addr);
2694 		/* Free the item itself */
2695 		kmem_free(item->ri_buf);
2696 		kmem_free(item);
2697 	}
2698 	/* Free the transaction recover structure */
2699 	kmem_free(trans);
2700 }
2701 
2702 STATIC int
2703 xlog_recover_commit_pass1(
2704 	struct xlog			*log,
2705 	struct xlog_recover		*trans,
2706 	struct xlog_recover_item	*item)
2707 {
2708 	trace_xfs_log_recover_item_recover(log, trans, item, XLOG_RECOVER_PASS1);
2709 
2710 	switch (ITEM_TYPE(item)) {
2711 	case XFS_LI_BUF:
2712 		return xlog_recover_buffer_pass1(log, item);
2713 	case XFS_LI_QUOTAOFF:
2714 		return xlog_recover_quotaoff_pass1(log, item);
2715 	case XFS_LI_INODE:
2716 	case XFS_LI_EFI:
2717 	case XFS_LI_EFD:
2718 	case XFS_LI_DQUOT:
2719 		/* nothing to do in pass 1 */
2720 		return 0;
2721 	default:
2722 		xfs_warn(log->l_mp, "%s: invalid item type (%d)",
2723 			__func__, ITEM_TYPE(item));
2724 		ASSERT(0);
2725 		return XFS_ERROR(EIO);
2726 	}
2727 }
2728 
2729 STATIC int
2730 xlog_recover_commit_pass2(
2731 	struct xlog			*log,
2732 	struct xlog_recover		*trans,
2733 	struct list_head		*buffer_list,
2734 	struct xlog_recover_item	*item)
2735 {
2736 	trace_xfs_log_recover_item_recover(log, trans, item, XLOG_RECOVER_PASS2);
2737 
2738 	switch (ITEM_TYPE(item)) {
2739 	case XFS_LI_BUF:
2740 		return xlog_recover_buffer_pass2(log, buffer_list, item);
2741 	case XFS_LI_INODE:
2742 		return xlog_recover_inode_pass2(log, buffer_list, item);
2743 	case XFS_LI_EFI:
2744 		return xlog_recover_efi_pass2(log, item, trans->r_lsn);
2745 	case XFS_LI_EFD:
2746 		return xlog_recover_efd_pass2(log, item);
2747 	case XFS_LI_DQUOT:
2748 		return xlog_recover_dquot_pass2(log, buffer_list, item);
2749 	case XFS_LI_QUOTAOFF:
2750 		/* nothing to do in pass2 */
2751 		return 0;
2752 	default:
2753 		xfs_warn(log->l_mp, "%s: invalid item type (%d)",
2754 			__func__, ITEM_TYPE(item));
2755 		ASSERT(0);
2756 		return XFS_ERROR(EIO);
2757 	}
2758 }
2759 
2760 /*
2761  * Perform the transaction.
2762  *
2763  * If the transaction modifies a buffer or inode, do it now.  Otherwise,
2764  * EFIs and EFDs get queued up by adding entries into the AIL for them.
2765  */
2766 STATIC int
2767 xlog_recover_commit_trans(
2768 	struct xlog		*log,
2769 	struct xlog_recover	*trans,
2770 	int			pass)
2771 {
2772 	int			error = 0, error2;
2773 	xlog_recover_item_t	*item;
2774 	LIST_HEAD		(buffer_list);
2775 
2776 	hlist_del(&trans->r_list);
2777 
2778 	error = xlog_recover_reorder_trans(log, trans, pass);
2779 	if (error)
2780 		return error;
2781 
2782 	list_for_each_entry(item, &trans->r_itemq, ri_list) {
2783 		switch (pass) {
2784 		case XLOG_RECOVER_PASS1:
2785 			error = xlog_recover_commit_pass1(log, trans, item);
2786 			break;
2787 		case XLOG_RECOVER_PASS2:
2788 			error = xlog_recover_commit_pass2(log, trans,
2789 							  &buffer_list, item);
2790 			break;
2791 		default:
2792 			ASSERT(0);
2793 		}
2794 
2795 		if (error)
2796 			goto out;
2797 	}
2798 
2799 	xlog_recover_free_trans(trans);
2800 
2801 out:
2802 	error2 = xfs_buf_delwri_submit(&buffer_list);
2803 	return error ? error : error2;
2804 }
2805 
2806 STATIC int
2807 xlog_recover_unmount_trans(
2808 	struct xlog		*log,
2809 	struct xlog_recover	*trans)
2810 {
2811 	/* Do nothing now */
2812 	xfs_warn(log->l_mp, "%s: Unmount LR", __func__);
2813 	return 0;
2814 }
2815 
2816 /*
2817  * There are two valid states of the r_state field.  0 indicates that the
2818  * transaction structure is in a normal state.  We have either seen the
2819  * start of the transaction or the last operation we added was not a partial
2820  * operation.  If the last operation we added to the transaction was a
2821  * partial operation, we need to mark r_state with XLOG_WAS_CONT_TRANS.
2822  *
2823  * NOTE: skip LRs with 0 data length.
2824  */
2825 STATIC int
2826 xlog_recover_process_data(
2827 	struct xlog		*log,
2828 	struct hlist_head	rhash[],
2829 	struct xlog_rec_header	*rhead,
2830 	xfs_caddr_t		dp,
2831 	int			pass)
2832 {
2833 	xfs_caddr_t		lp;
2834 	int			num_logops;
2835 	xlog_op_header_t	*ohead;
2836 	xlog_recover_t		*trans;
2837 	xlog_tid_t		tid;
2838 	int			error;
2839 	unsigned long		hash;
2840 	uint			flags;
2841 
2842 	lp = dp + be32_to_cpu(rhead->h_len);
2843 	num_logops = be32_to_cpu(rhead->h_num_logops);
2844 
2845 	/* check the log format matches our own - else we can't recover */
2846 	if (xlog_header_check_recover(log->l_mp, rhead))
2847 		return (XFS_ERROR(EIO));
2848 
2849 	while ((dp < lp) && num_logops) {
2850 		ASSERT(dp + sizeof(xlog_op_header_t) <= lp);
2851 		ohead = (xlog_op_header_t *)dp;
2852 		dp += sizeof(xlog_op_header_t);
2853 		if (ohead->oh_clientid != XFS_TRANSACTION &&
2854 		    ohead->oh_clientid != XFS_LOG) {
2855 			xfs_warn(log->l_mp, "%s: bad clientid 0x%x",
2856 					__func__, ohead->oh_clientid);
2857 			ASSERT(0);
2858 			return (XFS_ERROR(EIO));
2859 		}
2860 		tid = be32_to_cpu(ohead->oh_tid);
2861 		hash = XLOG_RHASH(tid);
2862 		trans = xlog_recover_find_tid(&rhash[hash], tid);
2863 		if (trans == NULL) {		   /* not found; add new tid */
2864 			if (ohead->oh_flags & XLOG_START_TRANS)
2865 				xlog_recover_new_tid(&rhash[hash], tid,
2866 					be64_to_cpu(rhead->h_lsn));
2867 		} else {
2868 			if (dp + be32_to_cpu(ohead->oh_len) > lp) {
2869 				xfs_warn(log->l_mp, "%s: bad length 0x%x",
2870 					__func__, be32_to_cpu(ohead->oh_len));
2871 				WARN_ON(1);
2872 				return (XFS_ERROR(EIO));
2873 			}
2874 			flags = ohead->oh_flags & ~XLOG_END_TRANS;
2875 			if (flags & XLOG_WAS_CONT_TRANS)
2876 				flags &= ~XLOG_CONTINUE_TRANS;
2877 			switch (flags) {
2878 			case XLOG_COMMIT_TRANS:
2879 				error = xlog_recover_commit_trans(log,
2880 								trans, pass);
2881 				break;
2882 			case XLOG_UNMOUNT_TRANS:
2883 				error = xlog_recover_unmount_trans(log, trans);
2884 				break;
2885 			case XLOG_WAS_CONT_TRANS:
2886 				error = xlog_recover_add_to_cont_trans(log,
2887 						trans, dp,
2888 						be32_to_cpu(ohead->oh_len));
2889 				break;
2890 			case XLOG_START_TRANS:
2891 				xfs_warn(log->l_mp, "%s: bad transaction",
2892 					__func__);
2893 				ASSERT(0);
2894 				error = XFS_ERROR(EIO);
2895 				break;
2896 			case 0:
2897 			case XLOG_CONTINUE_TRANS:
2898 				error = xlog_recover_add_to_trans(log, trans,
2899 						dp, be32_to_cpu(ohead->oh_len));
2900 				break;
2901 			default:
2902 				xfs_warn(log->l_mp, "%s: bad flag 0x%x",
2903 					__func__, flags);
2904 				ASSERT(0);
2905 				error = XFS_ERROR(EIO);
2906 				break;
2907 			}
2908 			if (error)
2909 				return error;
2910 		}
2911 		dp += be32_to_cpu(ohead->oh_len);
2912 		num_logops--;
2913 	}
2914 	return 0;
2915 }
2916 
2917 /*
2918  * Process an extent free intent item that was recovered from
2919  * the log.  We need to free the extents that it describes.
2920  */
2921 STATIC int
2922 xlog_recover_process_efi(
2923 	xfs_mount_t		*mp,
2924 	xfs_efi_log_item_t	*efip)
2925 {
2926 	xfs_efd_log_item_t	*efdp;
2927 	xfs_trans_t		*tp;
2928 	int			i;
2929 	int			error = 0;
2930 	xfs_extent_t		*extp;
2931 	xfs_fsblock_t		startblock_fsb;
2932 
2933 	ASSERT(!test_bit(XFS_EFI_RECOVERED, &efip->efi_flags));
2934 
2935 	/*
2936 	 * First check the validity of the extents described by the
2937 	 * EFI.  If any are bad, then assume that all are bad and
2938 	 * just toss the EFI.
2939 	 */
2940 	for (i = 0; i < efip->efi_format.efi_nextents; i++) {
2941 		extp = &(efip->efi_format.efi_extents[i]);
2942 		startblock_fsb = XFS_BB_TO_FSB(mp,
2943 				   XFS_FSB_TO_DADDR(mp, extp->ext_start));
2944 		if ((startblock_fsb == 0) ||
2945 		    (extp->ext_len == 0) ||
2946 		    (startblock_fsb >= mp->m_sb.sb_dblocks) ||
2947 		    (extp->ext_len >= mp->m_sb.sb_agblocks)) {
2948 			/*
2949 			 * This will pull the EFI from the AIL and
2950 			 * free the memory associated with it.
2951 			 */
2952 			xfs_efi_release(efip, efip->efi_format.efi_nextents);
2953 			return XFS_ERROR(EIO);
2954 		}
2955 	}
2956 
2957 	tp = xfs_trans_alloc(mp, 0);
2958 	error = xfs_trans_reserve(tp, 0, XFS_ITRUNCATE_LOG_RES(mp), 0, 0, 0);
2959 	if (error)
2960 		goto abort_error;
2961 	efdp = xfs_trans_get_efd(tp, efip, efip->efi_format.efi_nextents);
2962 
2963 	for (i = 0; i < efip->efi_format.efi_nextents; i++) {
2964 		extp = &(efip->efi_format.efi_extents[i]);
2965 		error = xfs_free_extent(tp, extp->ext_start, extp->ext_len);
2966 		if (error)
2967 			goto abort_error;
2968 		xfs_trans_log_efd_extent(tp, efdp, extp->ext_start,
2969 					 extp->ext_len);
2970 	}
2971 
2972 	set_bit(XFS_EFI_RECOVERED, &efip->efi_flags);
2973 	error = xfs_trans_commit(tp, 0);
2974 	return error;
2975 
2976 abort_error:
2977 	xfs_trans_cancel(tp, XFS_TRANS_ABORT);
2978 	return error;
2979 }
2980 
2981 /*
2982  * When this is called, all of the EFIs which did not have
2983  * corresponding EFDs should be in the AIL.  What we do now
2984  * is free the extents associated with each one.
2985  *
2986  * Since we process the EFIs in normal transactions, they
2987  * will be removed at some point after the commit.  This prevents
2988  * us from just walking down the list processing each one.
2989  * We'll use a flag in the EFI to skip those that we've already
2990  * processed and use the AIL iteration mechanism's generation
2991  * count to try to speed this up at least a bit.
2992  *
2993  * When we start, we know that the EFIs are the only things in
2994  * the AIL.  As we process them, however, other items are added
2995  * to the AIL.  Since everything added to the AIL must come after
2996  * everything already in the AIL, we stop processing as soon as
2997  * we see something other than an EFI in the AIL.
2998  */
2999 STATIC int
3000 xlog_recover_process_efis(
3001 	struct xlog	*log)
3002 {
3003 	xfs_log_item_t		*lip;
3004 	xfs_efi_log_item_t	*efip;
3005 	int			error = 0;
3006 	struct xfs_ail_cursor	cur;
3007 	struct xfs_ail		*ailp;
3008 
3009 	ailp = log->l_ailp;
3010 	spin_lock(&ailp->xa_lock);
3011 	lip = xfs_trans_ail_cursor_first(ailp, &cur, 0);
3012 	while (lip != NULL) {
3013 		/*
3014 		 * We're done when we see something other than an EFI.
3015 		 * There should be no EFIs left in the AIL now.
3016 		 */
3017 		if (lip->li_type != XFS_LI_EFI) {
3018 #ifdef DEBUG
3019 			for (; lip; lip = xfs_trans_ail_cursor_next(ailp, &cur))
3020 				ASSERT(lip->li_type != XFS_LI_EFI);
3021 #endif
3022 			break;
3023 		}
3024 
3025 		/*
3026 		 * Skip EFIs that we've already processed.
3027 		 */
3028 		efip = (xfs_efi_log_item_t *)lip;
3029 		if (test_bit(XFS_EFI_RECOVERED, &efip->efi_flags)) {
3030 			lip = xfs_trans_ail_cursor_next(ailp, &cur);
3031 			continue;
3032 		}
3033 
3034 		spin_unlock(&ailp->xa_lock);
3035 		error = xlog_recover_process_efi(log->l_mp, efip);
3036 		spin_lock(&ailp->xa_lock);
3037 		if (error)
3038 			goto out;
3039 		lip = xfs_trans_ail_cursor_next(ailp, &cur);
3040 	}
3041 out:
3042 	xfs_trans_ail_cursor_done(ailp, &cur);
3043 	spin_unlock(&ailp->xa_lock);
3044 	return error;
3045 }
3046 
3047 /*
3048  * This routine performs a transaction to null out a bad inode pointer
3049  * in an agi unlinked inode hash bucket.
3050  */
3051 STATIC void
3052 xlog_recover_clear_agi_bucket(
3053 	xfs_mount_t	*mp,
3054 	xfs_agnumber_t	agno,
3055 	int		bucket)
3056 {
3057 	xfs_trans_t	*tp;
3058 	xfs_agi_t	*agi;
3059 	xfs_buf_t	*agibp;
3060 	int		offset;
3061 	int		error;
3062 
3063 	tp = xfs_trans_alloc(mp, XFS_TRANS_CLEAR_AGI_BUCKET);
3064 	error = xfs_trans_reserve(tp, 0, XFS_CLEAR_AGI_BUCKET_LOG_RES(mp),
3065 				  0, 0, 0);
3066 	if (error)
3067 		goto out_abort;
3068 
3069 	error = xfs_read_agi(mp, tp, agno, &agibp);
3070 	if (error)
3071 		goto out_abort;
3072 
3073 	agi = XFS_BUF_TO_AGI(agibp);
3074 	agi->agi_unlinked[bucket] = cpu_to_be32(NULLAGINO);
3075 	offset = offsetof(xfs_agi_t, agi_unlinked) +
3076 		 (sizeof(xfs_agino_t) * bucket);
3077 	xfs_trans_log_buf(tp, agibp, offset,
3078 			  (offset + sizeof(xfs_agino_t) - 1));
3079 
3080 	error = xfs_trans_commit(tp, 0);
3081 	if (error)
3082 		goto out_error;
3083 	return;
3084 
3085 out_abort:
3086 	xfs_trans_cancel(tp, XFS_TRANS_ABORT);
3087 out_error:
3088 	xfs_warn(mp, "%s: failed to clear agi %d. Continuing.", __func__, agno);
3089 	return;
3090 }
3091 
3092 STATIC xfs_agino_t
3093 xlog_recover_process_one_iunlink(
3094 	struct xfs_mount		*mp,
3095 	xfs_agnumber_t			agno,
3096 	xfs_agino_t			agino,
3097 	int				bucket)
3098 {
3099 	struct xfs_buf			*ibp;
3100 	struct xfs_dinode		*dip;
3101 	struct xfs_inode		*ip;
3102 	xfs_ino_t			ino;
3103 	int				error;
3104 
3105 	ino = XFS_AGINO_TO_INO(mp, agno, agino);
3106 	error = xfs_iget(mp, NULL, ino, 0, 0, &ip);
3107 	if (error)
3108 		goto fail;
3109 
3110 	/*
3111 	 * Get the on disk inode to find the next inode in the bucket.
3112 	 */
3113 	error = xfs_imap_to_bp(mp, NULL, &ip->i_imap, &dip, &ibp, 0, 0);
3114 	if (error)
3115 		goto fail_iput;
3116 
3117 	ASSERT(ip->i_d.di_nlink == 0);
3118 	ASSERT(ip->i_d.di_mode != 0);
3119 
3120 	/* setup for the next pass */
3121 	agino = be32_to_cpu(dip->di_next_unlinked);
3122 	xfs_buf_relse(ibp);
3123 
3124 	/*
3125 	 * Prevent any DMAPI event from being sent when the reference on
3126 	 * the inode is dropped.
3127 	 */
3128 	ip->i_d.di_dmevmask = 0;
3129 
3130 	IRELE(ip);
3131 	return agino;
3132 
3133  fail_iput:
3134 	IRELE(ip);
3135  fail:
3136 	/*
3137 	 * We can't read in the inode this bucket points to, or this inode
3138 	 * is messed up.  Just ditch this bucket of inodes.  We will lose
3139 	 * some inodes and space, but at least we won't hang.
3140 	 *
3141 	 * Call xlog_recover_clear_agi_bucket() to perform a transaction to
3142 	 * clear the inode pointer in the bucket.
3143 	 */
3144 	xlog_recover_clear_agi_bucket(mp, agno, bucket);
3145 	return NULLAGINO;
3146 }
3147 
3148 /*
3149  * xlog_iunlink_recover
3150  *
3151  * This is called during recovery to process any inodes which
3152  * we unlinked but not freed when the system crashed.  These
3153  * inodes will be on the lists in the AGI blocks.  What we do
3154  * here is scan all the AGIs and fully truncate and free any
3155  * inodes found on the lists.  Each inode is removed from the
3156  * lists when it has been fully truncated and is freed.  The
3157  * freeing of the inode and its removal from the list must be
3158  * atomic.
3159  */
3160 STATIC void
3161 xlog_recover_process_iunlinks(
3162 	struct xlog	*log)
3163 {
3164 	xfs_mount_t	*mp;
3165 	xfs_agnumber_t	agno;
3166 	xfs_agi_t	*agi;
3167 	xfs_buf_t	*agibp;
3168 	xfs_agino_t	agino;
3169 	int		bucket;
3170 	int		error;
3171 	uint		mp_dmevmask;
3172 
3173 	mp = log->l_mp;
3174 
3175 	/*
3176 	 * Prevent any DMAPI event from being sent while in this function.
3177 	 */
3178 	mp_dmevmask = mp->m_dmevmask;
3179 	mp->m_dmevmask = 0;
3180 
3181 	for (agno = 0; agno < mp->m_sb.sb_agcount; agno++) {
3182 		/*
3183 		 * Find the agi for this ag.
3184 		 */
3185 		error = xfs_read_agi(mp, NULL, agno, &agibp);
3186 		if (error) {
3187 			/*
3188 			 * AGI is b0rked. Don't process it.
3189 			 *
3190 			 * We should probably mark the filesystem as corrupt
3191 			 * after we've recovered all the ag's we can....
3192 			 */
3193 			continue;
3194 		}
3195 		/*
3196 		 * Unlock the buffer so that it can be acquired in the normal
3197 		 * course of the transaction to truncate and free each inode.
3198 		 * Because we are not racing with anyone else here for the AGI
3199 		 * buffer, we don't even need to hold it locked to read the
3200 		 * initial unlinked bucket entries out of the buffer. We keep
3201 		 * buffer reference though, so that it stays pinned in memory
3202 		 * while we need the buffer.
3203 		 */
3204 		agi = XFS_BUF_TO_AGI(agibp);
3205 		xfs_buf_unlock(agibp);
3206 
3207 		for (bucket = 0; bucket < XFS_AGI_UNLINKED_BUCKETS; bucket++) {
3208 			agino = be32_to_cpu(agi->agi_unlinked[bucket]);
3209 			while (agino != NULLAGINO) {
3210 				agino = xlog_recover_process_one_iunlink(mp,
3211 							agno, agino, bucket);
3212 			}
3213 		}
3214 		xfs_buf_rele(agibp);
3215 	}
3216 
3217 	mp->m_dmevmask = mp_dmevmask;
3218 }
3219 
3220 /*
3221  * Upack the log buffer data and crc check it. If the check fails, issue a
3222  * warning if and only if the CRC in the header is non-zero. This makes the
3223  * check an advisory warning, and the zero CRC check will prevent failure
3224  * warnings from being emitted when upgrading the kernel from one that does not
3225  * add CRCs by default.
3226  *
3227  * When filesystems are CRC enabled, this CRC mismatch becomes a fatal log
3228  * corruption failure
3229  */
3230 STATIC int
3231 xlog_unpack_data_crc(
3232 	struct xlog_rec_header	*rhead,
3233 	xfs_caddr_t		dp,
3234 	struct xlog		*log)
3235 {
3236 	__le32			crc;
3237 
3238 	crc = xlog_cksum(log, rhead, dp, be32_to_cpu(rhead->h_len));
3239 	if (crc != rhead->h_crc) {
3240 		if (rhead->h_crc || xfs_sb_version_hascrc(&log->l_mp->m_sb)) {
3241 			xfs_alert(log->l_mp,
3242 		"log record CRC mismatch: found 0x%x, expected 0x%x.\n",
3243 					le32_to_cpu(rhead->h_crc),
3244 					le32_to_cpu(crc));
3245 			xfs_hex_dump(dp, 32);
3246 		}
3247 
3248 		/*
3249 		 * If we've detected a log record corruption, then we can't
3250 		 * recover past this point. Abort recovery if we are enforcing
3251 		 * CRC protection by punting an error back up the stack.
3252 		 */
3253 		if (xfs_sb_version_hascrc(&log->l_mp->m_sb))
3254 			return EFSCORRUPTED;
3255 	}
3256 
3257 	return 0;
3258 }
3259 
3260 STATIC int
3261 xlog_unpack_data(
3262 	struct xlog_rec_header	*rhead,
3263 	xfs_caddr_t		dp,
3264 	struct xlog		*log)
3265 {
3266 	int			i, j, k;
3267 	int			error;
3268 
3269 	error = xlog_unpack_data_crc(rhead, dp, log);
3270 	if (error)
3271 		return error;
3272 
3273 	for (i = 0; i < BTOBB(be32_to_cpu(rhead->h_len)) &&
3274 		  i < (XLOG_HEADER_CYCLE_SIZE / BBSIZE); i++) {
3275 		*(__be32 *)dp = *(__be32 *)&rhead->h_cycle_data[i];
3276 		dp += BBSIZE;
3277 	}
3278 
3279 	if (xfs_sb_version_haslogv2(&log->l_mp->m_sb)) {
3280 		xlog_in_core_2_t *xhdr = (xlog_in_core_2_t *)rhead;
3281 		for ( ; i < BTOBB(be32_to_cpu(rhead->h_len)); i++) {
3282 			j = i / (XLOG_HEADER_CYCLE_SIZE / BBSIZE);
3283 			k = i % (XLOG_HEADER_CYCLE_SIZE / BBSIZE);
3284 			*(__be32 *)dp = xhdr[j].hic_xheader.xh_cycle_data[k];
3285 			dp += BBSIZE;
3286 		}
3287 	}
3288 
3289 	return 0;
3290 }
3291 
3292 STATIC int
3293 xlog_valid_rec_header(
3294 	struct xlog		*log,
3295 	struct xlog_rec_header	*rhead,
3296 	xfs_daddr_t		blkno)
3297 {
3298 	int			hlen;
3299 
3300 	if (unlikely(rhead->h_magicno != cpu_to_be32(XLOG_HEADER_MAGIC_NUM))) {
3301 		XFS_ERROR_REPORT("xlog_valid_rec_header(1)",
3302 				XFS_ERRLEVEL_LOW, log->l_mp);
3303 		return XFS_ERROR(EFSCORRUPTED);
3304 	}
3305 	if (unlikely(
3306 	    (!rhead->h_version ||
3307 	    (be32_to_cpu(rhead->h_version) & (~XLOG_VERSION_OKBITS))))) {
3308 		xfs_warn(log->l_mp, "%s: unrecognised log version (%d).",
3309 			__func__, be32_to_cpu(rhead->h_version));
3310 		return XFS_ERROR(EIO);
3311 	}
3312 
3313 	/* LR body must have data or it wouldn't have been written */
3314 	hlen = be32_to_cpu(rhead->h_len);
3315 	if (unlikely( hlen <= 0 || hlen > INT_MAX )) {
3316 		XFS_ERROR_REPORT("xlog_valid_rec_header(2)",
3317 				XFS_ERRLEVEL_LOW, log->l_mp);
3318 		return XFS_ERROR(EFSCORRUPTED);
3319 	}
3320 	if (unlikely( blkno > log->l_logBBsize || blkno > INT_MAX )) {
3321 		XFS_ERROR_REPORT("xlog_valid_rec_header(3)",
3322 				XFS_ERRLEVEL_LOW, log->l_mp);
3323 		return XFS_ERROR(EFSCORRUPTED);
3324 	}
3325 	return 0;
3326 }
3327 
3328 /*
3329  * Read the log from tail to head and process the log records found.
3330  * Handle the two cases where the tail and head are in the same cycle
3331  * and where the active portion of the log wraps around the end of
3332  * the physical log separately.  The pass parameter is passed through
3333  * to the routines called to process the data and is not looked at
3334  * here.
3335  */
3336 STATIC int
3337 xlog_do_recovery_pass(
3338 	struct xlog		*log,
3339 	xfs_daddr_t		head_blk,
3340 	xfs_daddr_t		tail_blk,
3341 	int			pass)
3342 {
3343 	xlog_rec_header_t	*rhead;
3344 	xfs_daddr_t		blk_no;
3345 	xfs_caddr_t		offset;
3346 	xfs_buf_t		*hbp, *dbp;
3347 	int			error = 0, h_size;
3348 	int			bblks, split_bblks;
3349 	int			hblks, split_hblks, wrapped_hblks;
3350 	struct hlist_head	rhash[XLOG_RHASH_SIZE];
3351 
3352 	ASSERT(head_blk != tail_blk);
3353 
3354 	/*
3355 	 * Read the header of the tail block and get the iclog buffer size from
3356 	 * h_size.  Use this to tell how many sectors make up the log header.
3357 	 */
3358 	if (xfs_sb_version_haslogv2(&log->l_mp->m_sb)) {
3359 		/*
3360 		 * When using variable length iclogs, read first sector of
3361 		 * iclog header and extract the header size from it.  Get a
3362 		 * new hbp that is the correct size.
3363 		 */
3364 		hbp = xlog_get_bp(log, 1);
3365 		if (!hbp)
3366 			return ENOMEM;
3367 
3368 		error = xlog_bread(log, tail_blk, 1, hbp, &offset);
3369 		if (error)
3370 			goto bread_err1;
3371 
3372 		rhead = (xlog_rec_header_t *)offset;
3373 		error = xlog_valid_rec_header(log, rhead, tail_blk);
3374 		if (error)
3375 			goto bread_err1;
3376 		h_size = be32_to_cpu(rhead->h_size);
3377 		if ((be32_to_cpu(rhead->h_version) & XLOG_VERSION_2) &&
3378 		    (h_size > XLOG_HEADER_CYCLE_SIZE)) {
3379 			hblks = h_size / XLOG_HEADER_CYCLE_SIZE;
3380 			if (h_size % XLOG_HEADER_CYCLE_SIZE)
3381 				hblks++;
3382 			xlog_put_bp(hbp);
3383 			hbp = xlog_get_bp(log, hblks);
3384 		} else {
3385 			hblks = 1;
3386 		}
3387 	} else {
3388 		ASSERT(log->l_sectBBsize == 1);
3389 		hblks = 1;
3390 		hbp = xlog_get_bp(log, 1);
3391 		h_size = XLOG_BIG_RECORD_BSIZE;
3392 	}
3393 
3394 	if (!hbp)
3395 		return ENOMEM;
3396 	dbp = xlog_get_bp(log, BTOBB(h_size));
3397 	if (!dbp) {
3398 		xlog_put_bp(hbp);
3399 		return ENOMEM;
3400 	}
3401 
3402 	memset(rhash, 0, sizeof(rhash));
3403 	if (tail_blk <= head_blk) {
3404 		for (blk_no = tail_blk; blk_no < head_blk; ) {
3405 			error = xlog_bread(log, blk_no, hblks, hbp, &offset);
3406 			if (error)
3407 				goto bread_err2;
3408 
3409 			rhead = (xlog_rec_header_t *)offset;
3410 			error = xlog_valid_rec_header(log, rhead, blk_no);
3411 			if (error)
3412 				goto bread_err2;
3413 
3414 			/* blocks in data section */
3415 			bblks = (int)BTOBB(be32_to_cpu(rhead->h_len));
3416 			error = xlog_bread(log, blk_no + hblks, bblks, dbp,
3417 					   &offset);
3418 			if (error)
3419 				goto bread_err2;
3420 
3421 			error = xlog_unpack_data(rhead, offset, log);
3422 			if (error)
3423 				goto bread_err2;
3424 
3425 			error = xlog_recover_process_data(log,
3426 						rhash, rhead, offset, pass);
3427 			if (error)
3428 				goto bread_err2;
3429 			blk_no += bblks + hblks;
3430 		}
3431 	} else {
3432 		/*
3433 		 * Perform recovery around the end of the physical log.
3434 		 * When the head is not on the same cycle number as the tail,
3435 		 * we can't do a sequential recovery as above.
3436 		 */
3437 		blk_no = tail_blk;
3438 		while (blk_no < log->l_logBBsize) {
3439 			/*
3440 			 * Check for header wrapping around physical end-of-log
3441 			 */
3442 			offset = hbp->b_addr;
3443 			split_hblks = 0;
3444 			wrapped_hblks = 0;
3445 			if (blk_no + hblks <= log->l_logBBsize) {
3446 				/* Read header in one read */
3447 				error = xlog_bread(log, blk_no, hblks, hbp,
3448 						   &offset);
3449 				if (error)
3450 					goto bread_err2;
3451 			} else {
3452 				/* This LR is split across physical log end */
3453 				if (blk_no != log->l_logBBsize) {
3454 					/* some data before physical log end */
3455 					ASSERT(blk_no <= INT_MAX);
3456 					split_hblks = log->l_logBBsize - (int)blk_no;
3457 					ASSERT(split_hblks > 0);
3458 					error = xlog_bread(log, blk_no,
3459 							   split_hblks, hbp,
3460 							   &offset);
3461 					if (error)
3462 						goto bread_err2;
3463 				}
3464 
3465 				/*
3466 				 * Note: this black magic still works with
3467 				 * large sector sizes (non-512) only because:
3468 				 * - we increased the buffer size originally
3469 				 *   by 1 sector giving us enough extra space
3470 				 *   for the second read;
3471 				 * - the log start is guaranteed to be sector
3472 				 *   aligned;
3473 				 * - we read the log end (LR header start)
3474 				 *   _first_, then the log start (LR header end)
3475 				 *   - order is important.
3476 				 */
3477 				wrapped_hblks = hblks - split_hblks;
3478 				error = xlog_bread_offset(log, 0,
3479 						wrapped_hblks, hbp,
3480 						offset + BBTOB(split_hblks));
3481 				if (error)
3482 					goto bread_err2;
3483 			}
3484 			rhead = (xlog_rec_header_t *)offset;
3485 			error = xlog_valid_rec_header(log, rhead,
3486 						split_hblks ? blk_no : 0);
3487 			if (error)
3488 				goto bread_err2;
3489 
3490 			bblks = (int)BTOBB(be32_to_cpu(rhead->h_len));
3491 			blk_no += hblks;
3492 
3493 			/* Read in data for log record */
3494 			if (blk_no + bblks <= log->l_logBBsize) {
3495 				error = xlog_bread(log, blk_no, bblks, dbp,
3496 						   &offset);
3497 				if (error)
3498 					goto bread_err2;
3499 			} else {
3500 				/* This log record is split across the
3501 				 * physical end of log */
3502 				offset = dbp->b_addr;
3503 				split_bblks = 0;
3504 				if (blk_no != log->l_logBBsize) {
3505 					/* some data is before the physical
3506 					 * end of log */
3507 					ASSERT(!wrapped_hblks);
3508 					ASSERT(blk_no <= INT_MAX);
3509 					split_bblks =
3510 						log->l_logBBsize - (int)blk_no;
3511 					ASSERT(split_bblks > 0);
3512 					error = xlog_bread(log, blk_no,
3513 							split_bblks, dbp,
3514 							&offset);
3515 					if (error)
3516 						goto bread_err2;
3517 				}
3518 
3519 				/*
3520 				 * Note: this black magic still works with
3521 				 * large sector sizes (non-512) only because:
3522 				 * - we increased the buffer size originally
3523 				 *   by 1 sector giving us enough extra space
3524 				 *   for the second read;
3525 				 * - the log start is guaranteed to be sector
3526 				 *   aligned;
3527 				 * - we read the log end (LR header start)
3528 				 *   _first_, then the log start (LR header end)
3529 				 *   - order is important.
3530 				 */
3531 				error = xlog_bread_offset(log, 0,
3532 						bblks - split_bblks, dbp,
3533 						offset + BBTOB(split_bblks));
3534 				if (error)
3535 					goto bread_err2;
3536 			}
3537 
3538 			error = xlog_unpack_data(rhead, offset, log);
3539 			if (error)
3540 				goto bread_err2;
3541 
3542 			error = xlog_recover_process_data(log, rhash,
3543 							rhead, offset, pass);
3544 			if (error)
3545 				goto bread_err2;
3546 			blk_no += bblks;
3547 		}
3548 
3549 		ASSERT(blk_no >= log->l_logBBsize);
3550 		blk_no -= log->l_logBBsize;
3551 
3552 		/* read first part of physical log */
3553 		while (blk_no < head_blk) {
3554 			error = xlog_bread(log, blk_no, hblks, hbp, &offset);
3555 			if (error)
3556 				goto bread_err2;
3557 
3558 			rhead = (xlog_rec_header_t *)offset;
3559 			error = xlog_valid_rec_header(log, rhead, blk_no);
3560 			if (error)
3561 				goto bread_err2;
3562 
3563 			bblks = (int)BTOBB(be32_to_cpu(rhead->h_len));
3564 			error = xlog_bread(log, blk_no+hblks, bblks, dbp,
3565 					   &offset);
3566 			if (error)
3567 				goto bread_err2;
3568 
3569 			error = xlog_unpack_data(rhead, offset, log);
3570 			if (error)
3571 				goto bread_err2;
3572 
3573 			error = xlog_recover_process_data(log, rhash,
3574 							rhead, offset, pass);
3575 			if (error)
3576 				goto bread_err2;
3577 			blk_no += bblks + hblks;
3578 		}
3579 	}
3580 
3581  bread_err2:
3582 	xlog_put_bp(dbp);
3583  bread_err1:
3584 	xlog_put_bp(hbp);
3585 	return error;
3586 }
3587 
3588 /*
3589  * Do the recovery of the log.  We actually do this in two phases.
3590  * The two passes are necessary in order to implement the function
3591  * of cancelling a record written into the log.  The first pass
3592  * determines those things which have been cancelled, and the
3593  * second pass replays log items normally except for those which
3594  * have been cancelled.  The handling of the replay and cancellations
3595  * takes place in the log item type specific routines.
3596  *
3597  * The table of items which have cancel records in the log is allocated
3598  * and freed at this level, since only here do we know when all of
3599  * the log recovery has been completed.
3600  */
3601 STATIC int
3602 xlog_do_log_recovery(
3603 	struct xlog	*log,
3604 	xfs_daddr_t	head_blk,
3605 	xfs_daddr_t	tail_blk)
3606 {
3607 	int		error, i;
3608 
3609 	ASSERT(head_blk != tail_blk);
3610 
3611 	/*
3612 	 * First do a pass to find all of the cancelled buf log items.
3613 	 * Store them in the buf_cancel_table for use in the second pass.
3614 	 */
3615 	log->l_buf_cancel_table = kmem_zalloc(XLOG_BC_TABLE_SIZE *
3616 						 sizeof(struct list_head),
3617 						 KM_SLEEP);
3618 	for (i = 0; i < XLOG_BC_TABLE_SIZE; i++)
3619 		INIT_LIST_HEAD(&log->l_buf_cancel_table[i]);
3620 
3621 	error = xlog_do_recovery_pass(log, head_blk, tail_blk,
3622 				      XLOG_RECOVER_PASS1);
3623 	if (error != 0) {
3624 		kmem_free(log->l_buf_cancel_table);
3625 		log->l_buf_cancel_table = NULL;
3626 		return error;
3627 	}
3628 	/*
3629 	 * Then do a second pass to actually recover the items in the log.
3630 	 * When it is complete free the table of buf cancel items.
3631 	 */
3632 	error = xlog_do_recovery_pass(log, head_blk, tail_blk,
3633 				      XLOG_RECOVER_PASS2);
3634 #ifdef DEBUG
3635 	if (!error) {
3636 		int	i;
3637 
3638 		for (i = 0; i < XLOG_BC_TABLE_SIZE; i++)
3639 			ASSERT(list_empty(&log->l_buf_cancel_table[i]));
3640 	}
3641 #endif	/* DEBUG */
3642 
3643 	kmem_free(log->l_buf_cancel_table);
3644 	log->l_buf_cancel_table = NULL;
3645 
3646 	return error;
3647 }
3648 
3649 /*
3650  * Do the actual recovery
3651  */
3652 STATIC int
3653 xlog_do_recover(
3654 	struct xlog	*log,
3655 	xfs_daddr_t	head_blk,
3656 	xfs_daddr_t	tail_blk)
3657 {
3658 	int		error;
3659 	xfs_buf_t	*bp;
3660 	xfs_sb_t	*sbp;
3661 
3662 	/*
3663 	 * First replay the images in the log.
3664 	 */
3665 	error = xlog_do_log_recovery(log, head_blk, tail_blk);
3666 	if (error)
3667 		return error;
3668 
3669 	/*
3670 	 * If IO errors happened during recovery, bail out.
3671 	 */
3672 	if (XFS_FORCED_SHUTDOWN(log->l_mp)) {
3673 		return (EIO);
3674 	}
3675 
3676 	/*
3677 	 * We now update the tail_lsn since much of the recovery has completed
3678 	 * and there may be space available to use.  If there were no extent
3679 	 * or iunlinks, we can free up the entire log and set the tail_lsn to
3680 	 * be the last_sync_lsn.  This was set in xlog_find_tail to be the
3681 	 * lsn of the last known good LR on disk.  If there are extent frees
3682 	 * or iunlinks they will have some entries in the AIL; so we look at
3683 	 * the AIL to determine how to set the tail_lsn.
3684 	 */
3685 	xlog_assign_tail_lsn(log->l_mp);
3686 
3687 	/*
3688 	 * Now that we've finished replaying all buffer and inode
3689 	 * updates, re-read in the superblock and reverify it.
3690 	 */
3691 	bp = xfs_getsb(log->l_mp, 0);
3692 	XFS_BUF_UNDONE(bp);
3693 	ASSERT(!(XFS_BUF_ISWRITE(bp)));
3694 	XFS_BUF_READ(bp);
3695 	XFS_BUF_UNASYNC(bp);
3696 	bp->b_ops = &xfs_sb_buf_ops;
3697 	xfsbdstrat(log->l_mp, bp);
3698 	error = xfs_buf_iowait(bp);
3699 	if (error) {
3700 		xfs_buf_ioerror_alert(bp, __func__);
3701 		ASSERT(0);
3702 		xfs_buf_relse(bp);
3703 		return error;
3704 	}
3705 
3706 	/* Convert superblock from on-disk format */
3707 	sbp = &log->l_mp->m_sb;
3708 	xfs_sb_from_disk(sbp, XFS_BUF_TO_SBP(bp));
3709 	ASSERT(sbp->sb_magicnum == XFS_SB_MAGIC);
3710 	ASSERT(xfs_sb_good_version(sbp));
3711 	xfs_buf_relse(bp);
3712 
3713 	/* We've re-read the superblock so re-initialize per-cpu counters */
3714 	xfs_icsb_reinit_counters(log->l_mp);
3715 
3716 	xlog_recover_check_summary(log);
3717 
3718 	/* Normal transactions can now occur */
3719 	log->l_flags &= ~XLOG_ACTIVE_RECOVERY;
3720 	return 0;
3721 }
3722 
3723 /*
3724  * Perform recovery and re-initialize some log variables in xlog_find_tail.
3725  *
3726  * Return error or zero.
3727  */
3728 int
3729 xlog_recover(
3730 	struct xlog	*log)
3731 {
3732 	xfs_daddr_t	head_blk, tail_blk;
3733 	int		error;
3734 
3735 	/* find the tail of the log */
3736 	if ((error = xlog_find_tail(log, &head_blk, &tail_blk)))
3737 		return error;
3738 
3739 	if (tail_blk != head_blk) {
3740 		/* There used to be a comment here:
3741 		 *
3742 		 * disallow recovery on read-only mounts.  note -- mount
3743 		 * checks for ENOSPC and turns it into an intelligent
3744 		 * error message.
3745 		 * ...but this is no longer true.  Now, unless you specify
3746 		 * NORECOVERY (in which case this function would never be
3747 		 * called), we just go ahead and recover.  We do this all
3748 		 * under the vfs layer, so we can get away with it unless
3749 		 * the device itself is read-only, in which case we fail.
3750 		 */
3751 		if ((error = xfs_dev_is_read_only(log->l_mp, "recovery"))) {
3752 			return error;
3753 		}
3754 
3755 		xfs_notice(log->l_mp, "Starting recovery (logdev: %s)",
3756 				log->l_mp->m_logname ? log->l_mp->m_logname
3757 						     : "internal");
3758 
3759 		error = xlog_do_recover(log, head_blk, tail_blk);
3760 		log->l_flags |= XLOG_RECOVERY_NEEDED;
3761 	}
3762 	return error;
3763 }
3764 
3765 /*
3766  * In the first part of recovery we replay inodes and buffers and build
3767  * up the list of extent free items which need to be processed.  Here
3768  * we process the extent free items and clean up the on disk unlinked
3769  * inode lists.  This is separated from the first part of recovery so
3770  * that the root and real-time bitmap inodes can be read in from disk in
3771  * between the two stages.  This is necessary so that we can free space
3772  * in the real-time portion of the file system.
3773  */
3774 int
3775 xlog_recover_finish(
3776 	struct xlog	*log)
3777 {
3778 	/*
3779 	 * Now we're ready to do the transactions needed for the
3780 	 * rest of recovery.  Start with completing all the extent
3781 	 * free intent records and then process the unlinked inode
3782 	 * lists.  At this point, we essentially run in normal mode
3783 	 * except that we're still performing recovery actions
3784 	 * rather than accepting new requests.
3785 	 */
3786 	if (log->l_flags & XLOG_RECOVERY_NEEDED) {
3787 		int	error;
3788 		error = xlog_recover_process_efis(log);
3789 		if (error) {
3790 			xfs_alert(log->l_mp, "Failed to recover EFIs");
3791 			return error;
3792 		}
3793 		/*
3794 		 * Sync the log to get all the EFIs out of the AIL.
3795 		 * This isn't absolutely necessary, but it helps in
3796 		 * case the unlink transactions would have problems
3797 		 * pushing the EFIs out of the way.
3798 		 */
3799 		xfs_log_force(log->l_mp, XFS_LOG_SYNC);
3800 
3801 		xlog_recover_process_iunlinks(log);
3802 
3803 		xlog_recover_check_summary(log);
3804 
3805 		xfs_notice(log->l_mp, "Ending recovery (logdev: %s)",
3806 				log->l_mp->m_logname ? log->l_mp->m_logname
3807 						     : "internal");
3808 		log->l_flags &= ~XLOG_RECOVERY_NEEDED;
3809 	} else {
3810 		xfs_info(log->l_mp, "Ending clean mount");
3811 	}
3812 	return 0;
3813 }
3814 
3815 
3816 #if defined(DEBUG)
3817 /*
3818  * Read all of the agf and agi counters and check that they
3819  * are consistent with the superblock counters.
3820  */
3821 void
3822 xlog_recover_check_summary(
3823 	struct xlog	*log)
3824 {
3825 	xfs_mount_t	*mp;
3826 	xfs_agf_t	*agfp;
3827 	xfs_buf_t	*agfbp;
3828 	xfs_buf_t	*agibp;
3829 	xfs_agnumber_t	agno;
3830 	__uint64_t	freeblks;
3831 	__uint64_t	itotal;
3832 	__uint64_t	ifree;
3833 	int		error;
3834 
3835 	mp = log->l_mp;
3836 
3837 	freeblks = 0LL;
3838 	itotal = 0LL;
3839 	ifree = 0LL;
3840 	for (agno = 0; agno < mp->m_sb.sb_agcount; agno++) {
3841 		error = xfs_read_agf(mp, NULL, agno, 0, &agfbp);
3842 		if (error) {
3843 			xfs_alert(mp, "%s agf read failed agno %d error %d",
3844 						__func__, agno, error);
3845 		} else {
3846 			agfp = XFS_BUF_TO_AGF(agfbp);
3847 			freeblks += be32_to_cpu(agfp->agf_freeblks) +
3848 				    be32_to_cpu(agfp->agf_flcount);
3849 			xfs_buf_relse(agfbp);
3850 		}
3851 
3852 		error = xfs_read_agi(mp, NULL, agno, &agibp);
3853 		if (error) {
3854 			xfs_alert(mp, "%s agi read failed agno %d error %d",
3855 						__func__, agno, error);
3856 		} else {
3857 			struct xfs_agi	*agi = XFS_BUF_TO_AGI(agibp);
3858 
3859 			itotal += be32_to_cpu(agi->agi_count);
3860 			ifree += be32_to_cpu(agi->agi_freecount);
3861 			xfs_buf_relse(agibp);
3862 		}
3863 	}
3864 }
3865 #endif /* DEBUG */
3866