xref: /openbmc/linux/fs/xfs/xfs_log_recover.c (revision 161f4089)
1 /*
2  * Copyright (c) 2000-2006 Silicon Graphics, Inc.
3  * All Rights Reserved.
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU General Public License as
7  * published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it would be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write the Free Software Foundation,
16  * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
17  */
18 #include "xfs.h"
19 #include "xfs_fs.h"
20 #include "xfs_format.h"
21 #include "xfs_bit.h"
22 #include "xfs_log.h"
23 #include "xfs_inum.h"
24 #include "xfs_trans.h"
25 #include "xfs_sb.h"
26 #include "xfs_ag.h"
27 #include "xfs_mount.h"
28 #include "xfs_error.h"
29 #include "xfs_bmap_btree.h"
30 #include "xfs_alloc_btree.h"
31 #include "xfs_ialloc_btree.h"
32 #include "xfs_btree.h"
33 #include "xfs_dinode.h"
34 #include "xfs_inode.h"
35 #include "xfs_inode_item.h"
36 #include "xfs_alloc.h"
37 #include "xfs_ialloc.h"
38 #include "xfs_log_priv.h"
39 #include "xfs_buf_item.h"
40 #include "xfs_log_recover.h"
41 #include "xfs_extfree_item.h"
42 #include "xfs_trans_priv.h"
43 #include "xfs_quota.h"
44 #include "xfs_cksum.h"
45 #include "xfs_trace.h"
46 #include "xfs_icache.h"
47 #include "xfs_icreate_item.h"
48 
49 /* Need all the magic numbers and buffer ops structures from these headers */
50 #include "xfs_symlink.h"
51 #include "xfs_da_btree.h"
52 #include "xfs_dir2_format.h"
53 #include "xfs_dir2.h"
54 #include "xfs_attr_leaf.h"
55 #include "xfs_attr_remote.h"
56 
57 #define BLK_AVG(blk1, blk2)	((blk1+blk2) >> 1)
58 
59 STATIC int
60 xlog_find_zeroed(
61 	struct xlog	*,
62 	xfs_daddr_t	*);
63 STATIC int
64 xlog_clear_stale_blocks(
65 	struct xlog	*,
66 	xfs_lsn_t);
67 #if defined(DEBUG)
68 STATIC void
69 xlog_recover_check_summary(
70 	struct xlog *);
71 #else
72 #define	xlog_recover_check_summary(log)
73 #endif
74 
75 /*
76  * This structure is used during recovery to record the buf log items which
77  * have been canceled and should not be replayed.
78  */
79 struct xfs_buf_cancel {
80 	xfs_daddr_t		bc_blkno;
81 	uint			bc_len;
82 	int			bc_refcount;
83 	struct list_head	bc_list;
84 };
85 
86 /*
87  * Sector aligned buffer routines for buffer create/read/write/access
88  */
89 
90 /*
91  * Verify the given count of basic blocks is valid number of blocks
92  * to specify for an operation involving the given XFS log buffer.
93  * Returns nonzero if the count is valid, 0 otherwise.
94  */
95 
96 static inline int
97 xlog_buf_bbcount_valid(
98 	struct xlog	*log,
99 	int		bbcount)
100 {
101 	return bbcount > 0 && bbcount <= log->l_logBBsize;
102 }
103 
104 /*
105  * Allocate a buffer to hold log data.  The buffer needs to be able
106  * to map to a range of nbblks basic blocks at any valid (basic
107  * block) offset within the log.
108  */
109 STATIC xfs_buf_t *
110 xlog_get_bp(
111 	struct xlog	*log,
112 	int		nbblks)
113 {
114 	struct xfs_buf	*bp;
115 
116 	if (!xlog_buf_bbcount_valid(log, nbblks)) {
117 		xfs_warn(log->l_mp, "Invalid block length (0x%x) for buffer",
118 			nbblks);
119 		XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_HIGH, log->l_mp);
120 		return NULL;
121 	}
122 
123 	/*
124 	 * We do log I/O in units of log sectors (a power-of-2
125 	 * multiple of the basic block size), so we round up the
126 	 * requested size to accommodate the basic blocks required
127 	 * for complete log sectors.
128 	 *
129 	 * In addition, the buffer may be used for a non-sector-
130 	 * aligned block offset, in which case an I/O of the
131 	 * requested size could extend beyond the end of the
132 	 * buffer.  If the requested size is only 1 basic block it
133 	 * will never straddle a sector boundary, so this won't be
134 	 * an issue.  Nor will this be a problem if the log I/O is
135 	 * done in basic blocks (sector size 1).  But otherwise we
136 	 * extend the buffer by one extra log sector to ensure
137 	 * there's space to accommodate this possibility.
138 	 */
139 	if (nbblks > 1 && log->l_sectBBsize > 1)
140 		nbblks += log->l_sectBBsize;
141 	nbblks = round_up(nbblks, log->l_sectBBsize);
142 
143 	bp = xfs_buf_get_uncached(log->l_mp->m_logdev_targp, nbblks, 0);
144 	if (bp)
145 		xfs_buf_unlock(bp);
146 	return bp;
147 }
148 
149 STATIC void
150 xlog_put_bp(
151 	xfs_buf_t	*bp)
152 {
153 	xfs_buf_free(bp);
154 }
155 
156 /*
157  * Return the address of the start of the given block number's data
158  * in a log buffer.  The buffer covers a log sector-aligned region.
159  */
160 STATIC xfs_caddr_t
161 xlog_align(
162 	struct xlog	*log,
163 	xfs_daddr_t	blk_no,
164 	int		nbblks,
165 	struct xfs_buf	*bp)
166 {
167 	xfs_daddr_t	offset = blk_no & ((xfs_daddr_t)log->l_sectBBsize - 1);
168 
169 	ASSERT(offset + nbblks <= bp->b_length);
170 	return bp->b_addr + BBTOB(offset);
171 }
172 
173 
174 /*
175  * nbblks should be uint, but oh well.  Just want to catch that 32-bit length.
176  */
177 STATIC int
178 xlog_bread_noalign(
179 	struct xlog	*log,
180 	xfs_daddr_t	blk_no,
181 	int		nbblks,
182 	struct xfs_buf	*bp)
183 {
184 	int		error;
185 
186 	if (!xlog_buf_bbcount_valid(log, nbblks)) {
187 		xfs_warn(log->l_mp, "Invalid block length (0x%x) for buffer",
188 			nbblks);
189 		XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_HIGH, log->l_mp);
190 		return EFSCORRUPTED;
191 	}
192 
193 	blk_no = round_down(blk_no, log->l_sectBBsize);
194 	nbblks = round_up(nbblks, log->l_sectBBsize);
195 
196 	ASSERT(nbblks > 0);
197 	ASSERT(nbblks <= bp->b_length);
198 
199 	XFS_BUF_SET_ADDR(bp, log->l_logBBstart + blk_no);
200 	XFS_BUF_READ(bp);
201 	bp->b_io_length = nbblks;
202 	bp->b_error = 0;
203 
204 	xfsbdstrat(log->l_mp, bp);
205 	error = xfs_buf_iowait(bp);
206 	if (error)
207 		xfs_buf_ioerror_alert(bp, __func__);
208 	return error;
209 }
210 
211 STATIC int
212 xlog_bread(
213 	struct xlog	*log,
214 	xfs_daddr_t	blk_no,
215 	int		nbblks,
216 	struct xfs_buf	*bp,
217 	xfs_caddr_t	*offset)
218 {
219 	int		error;
220 
221 	error = xlog_bread_noalign(log, blk_no, nbblks, bp);
222 	if (error)
223 		return error;
224 
225 	*offset = xlog_align(log, blk_no, nbblks, bp);
226 	return 0;
227 }
228 
229 /*
230  * Read at an offset into the buffer. Returns with the buffer in it's original
231  * state regardless of the result of the read.
232  */
233 STATIC int
234 xlog_bread_offset(
235 	struct xlog	*log,
236 	xfs_daddr_t	blk_no,		/* block to read from */
237 	int		nbblks,		/* blocks to read */
238 	struct xfs_buf	*bp,
239 	xfs_caddr_t	offset)
240 {
241 	xfs_caddr_t	orig_offset = bp->b_addr;
242 	int		orig_len = BBTOB(bp->b_length);
243 	int		error, error2;
244 
245 	error = xfs_buf_associate_memory(bp, offset, BBTOB(nbblks));
246 	if (error)
247 		return error;
248 
249 	error = xlog_bread_noalign(log, blk_no, nbblks, bp);
250 
251 	/* must reset buffer pointer even on error */
252 	error2 = xfs_buf_associate_memory(bp, orig_offset, orig_len);
253 	if (error)
254 		return error;
255 	return error2;
256 }
257 
258 /*
259  * Write out the buffer at the given block for the given number of blocks.
260  * The buffer is kept locked across the write and is returned locked.
261  * This can only be used for synchronous log writes.
262  */
263 STATIC int
264 xlog_bwrite(
265 	struct xlog	*log,
266 	xfs_daddr_t	blk_no,
267 	int		nbblks,
268 	struct xfs_buf	*bp)
269 {
270 	int		error;
271 
272 	if (!xlog_buf_bbcount_valid(log, nbblks)) {
273 		xfs_warn(log->l_mp, "Invalid block length (0x%x) for buffer",
274 			nbblks);
275 		XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_HIGH, log->l_mp);
276 		return EFSCORRUPTED;
277 	}
278 
279 	blk_no = round_down(blk_no, log->l_sectBBsize);
280 	nbblks = round_up(nbblks, log->l_sectBBsize);
281 
282 	ASSERT(nbblks > 0);
283 	ASSERT(nbblks <= bp->b_length);
284 
285 	XFS_BUF_SET_ADDR(bp, log->l_logBBstart + blk_no);
286 	XFS_BUF_ZEROFLAGS(bp);
287 	xfs_buf_hold(bp);
288 	xfs_buf_lock(bp);
289 	bp->b_io_length = nbblks;
290 	bp->b_error = 0;
291 
292 	error = xfs_bwrite(bp);
293 	if (error)
294 		xfs_buf_ioerror_alert(bp, __func__);
295 	xfs_buf_relse(bp);
296 	return error;
297 }
298 
299 #ifdef DEBUG
300 /*
301  * dump debug superblock and log record information
302  */
303 STATIC void
304 xlog_header_check_dump(
305 	xfs_mount_t		*mp,
306 	xlog_rec_header_t	*head)
307 {
308 	xfs_debug(mp, "%s:  SB : uuid = %pU, fmt = %d\n",
309 		__func__, &mp->m_sb.sb_uuid, XLOG_FMT);
310 	xfs_debug(mp, "    log : uuid = %pU, fmt = %d\n",
311 		&head->h_fs_uuid, be32_to_cpu(head->h_fmt));
312 }
313 #else
314 #define xlog_header_check_dump(mp, head)
315 #endif
316 
317 /*
318  * check log record header for recovery
319  */
320 STATIC int
321 xlog_header_check_recover(
322 	xfs_mount_t		*mp,
323 	xlog_rec_header_t	*head)
324 {
325 	ASSERT(head->h_magicno == cpu_to_be32(XLOG_HEADER_MAGIC_NUM));
326 
327 	/*
328 	 * IRIX doesn't write the h_fmt field and leaves it zeroed
329 	 * (XLOG_FMT_UNKNOWN). This stops us from trying to recover
330 	 * a dirty log created in IRIX.
331 	 */
332 	if (unlikely(head->h_fmt != cpu_to_be32(XLOG_FMT))) {
333 		xfs_warn(mp,
334 	"dirty log written in incompatible format - can't recover");
335 		xlog_header_check_dump(mp, head);
336 		XFS_ERROR_REPORT("xlog_header_check_recover(1)",
337 				 XFS_ERRLEVEL_HIGH, mp);
338 		return XFS_ERROR(EFSCORRUPTED);
339 	} else if (unlikely(!uuid_equal(&mp->m_sb.sb_uuid, &head->h_fs_uuid))) {
340 		xfs_warn(mp,
341 	"dirty log entry has mismatched uuid - can't recover");
342 		xlog_header_check_dump(mp, head);
343 		XFS_ERROR_REPORT("xlog_header_check_recover(2)",
344 				 XFS_ERRLEVEL_HIGH, mp);
345 		return XFS_ERROR(EFSCORRUPTED);
346 	}
347 	return 0;
348 }
349 
350 /*
351  * read the head block of the log and check the header
352  */
353 STATIC int
354 xlog_header_check_mount(
355 	xfs_mount_t		*mp,
356 	xlog_rec_header_t	*head)
357 {
358 	ASSERT(head->h_magicno == cpu_to_be32(XLOG_HEADER_MAGIC_NUM));
359 
360 	if (uuid_is_nil(&head->h_fs_uuid)) {
361 		/*
362 		 * IRIX doesn't write the h_fs_uuid or h_fmt fields. If
363 		 * h_fs_uuid is nil, we assume this log was last mounted
364 		 * by IRIX and continue.
365 		 */
366 		xfs_warn(mp, "nil uuid in log - IRIX style log");
367 	} else if (unlikely(!uuid_equal(&mp->m_sb.sb_uuid, &head->h_fs_uuid))) {
368 		xfs_warn(mp, "log has mismatched uuid - can't recover");
369 		xlog_header_check_dump(mp, head);
370 		XFS_ERROR_REPORT("xlog_header_check_mount",
371 				 XFS_ERRLEVEL_HIGH, mp);
372 		return XFS_ERROR(EFSCORRUPTED);
373 	}
374 	return 0;
375 }
376 
377 STATIC void
378 xlog_recover_iodone(
379 	struct xfs_buf	*bp)
380 {
381 	if (bp->b_error) {
382 		/*
383 		 * We're not going to bother about retrying
384 		 * this during recovery. One strike!
385 		 */
386 		xfs_buf_ioerror_alert(bp, __func__);
387 		xfs_force_shutdown(bp->b_target->bt_mount,
388 					SHUTDOWN_META_IO_ERROR);
389 	}
390 	bp->b_iodone = NULL;
391 	xfs_buf_ioend(bp, 0);
392 }
393 
394 /*
395  * This routine finds (to an approximation) the first block in the physical
396  * log which contains the given cycle.  It uses a binary search algorithm.
397  * Note that the algorithm can not be perfect because the disk will not
398  * necessarily be perfect.
399  */
400 STATIC int
401 xlog_find_cycle_start(
402 	struct xlog	*log,
403 	struct xfs_buf	*bp,
404 	xfs_daddr_t	first_blk,
405 	xfs_daddr_t	*last_blk,
406 	uint		cycle)
407 {
408 	xfs_caddr_t	offset;
409 	xfs_daddr_t	mid_blk;
410 	xfs_daddr_t	end_blk;
411 	uint		mid_cycle;
412 	int		error;
413 
414 	end_blk = *last_blk;
415 	mid_blk = BLK_AVG(first_blk, end_blk);
416 	while (mid_blk != first_blk && mid_blk != end_blk) {
417 		error = xlog_bread(log, mid_blk, 1, bp, &offset);
418 		if (error)
419 			return error;
420 		mid_cycle = xlog_get_cycle(offset);
421 		if (mid_cycle == cycle)
422 			end_blk = mid_blk;   /* last_half_cycle == mid_cycle */
423 		else
424 			first_blk = mid_blk; /* first_half_cycle == mid_cycle */
425 		mid_blk = BLK_AVG(first_blk, end_blk);
426 	}
427 	ASSERT((mid_blk == first_blk && mid_blk+1 == end_blk) ||
428 	       (mid_blk == end_blk && mid_blk-1 == first_blk));
429 
430 	*last_blk = end_blk;
431 
432 	return 0;
433 }
434 
435 /*
436  * Check that a range of blocks does not contain stop_on_cycle_no.
437  * Fill in *new_blk with the block offset where such a block is
438  * found, or with -1 (an invalid block number) if there is no such
439  * block in the range.  The scan needs to occur from front to back
440  * and the pointer into the region must be updated since a later
441  * routine will need to perform another test.
442  */
443 STATIC int
444 xlog_find_verify_cycle(
445 	struct xlog	*log,
446 	xfs_daddr_t	start_blk,
447 	int		nbblks,
448 	uint		stop_on_cycle_no,
449 	xfs_daddr_t	*new_blk)
450 {
451 	xfs_daddr_t	i, j;
452 	uint		cycle;
453 	xfs_buf_t	*bp;
454 	xfs_daddr_t	bufblks;
455 	xfs_caddr_t	buf = NULL;
456 	int		error = 0;
457 
458 	/*
459 	 * Greedily allocate a buffer big enough to handle the full
460 	 * range of basic blocks we'll be examining.  If that fails,
461 	 * try a smaller size.  We need to be able to read at least
462 	 * a log sector, or we're out of luck.
463 	 */
464 	bufblks = 1 << ffs(nbblks);
465 	while (bufblks > log->l_logBBsize)
466 		bufblks >>= 1;
467 	while (!(bp = xlog_get_bp(log, bufblks))) {
468 		bufblks >>= 1;
469 		if (bufblks < log->l_sectBBsize)
470 			return ENOMEM;
471 	}
472 
473 	for (i = start_blk; i < start_blk + nbblks; i += bufblks) {
474 		int	bcount;
475 
476 		bcount = min(bufblks, (start_blk + nbblks - i));
477 
478 		error = xlog_bread(log, i, bcount, bp, &buf);
479 		if (error)
480 			goto out;
481 
482 		for (j = 0; j < bcount; j++) {
483 			cycle = xlog_get_cycle(buf);
484 			if (cycle == stop_on_cycle_no) {
485 				*new_blk = i+j;
486 				goto out;
487 			}
488 
489 			buf += BBSIZE;
490 		}
491 	}
492 
493 	*new_blk = -1;
494 
495 out:
496 	xlog_put_bp(bp);
497 	return error;
498 }
499 
500 /*
501  * Potentially backup over partial log record write.
502  *
503  * In the typical case, last_blk is the number of the block directly after
504  * a good log record.  Therefore, we subtract one to get the block number
505  * of the last block in the given buffer.  extra_bblks contains the number
506  * of blocks we would have read on a previous read.  This happens when the
507  * last log record is split over the end of the physical log.
508  *
509  * extra_bblks is the number of blocks potentially verified on a previous
510  * call to this routine.
511  */
512 STATIC int
513 xlog_find_verify_log_record(
514 	struct xlog		*log,
515 	xfs_daddr_t		start_blk,
516 	xfs_daddr_t		*last_blk,
517 	int			extra_bblks)
518 {
519 	xfs_daddr_t		i;
520 	xfs_buf_t		*bp;
521 	xfs_caddr_t		offset = NULL;
522 	xlog_rec_header_t	*head = NULL;
523 	int			error = 0;
524 	int			smallmem = 0;
525 	int			num_blks = *last_blk - start_blk;
526 	int			xhdrs;
527 
528 	ASSERT(start_blk != 0 || *last_blk != start_blk);
529 
530 	if (!(bp = xlog_get_bp(log, num_blks))) {
531 		if (!(bp = xlog_get_bp(log, 1)))
532 			return ENOMEM;
533 		smallmem = 1;
534 	} else {
535 		error = xlog_bread(log, start_blk, num_blks, bp, &offset);
536 		if (error)
537 			goto out;
538 		offset += ((num_blks - 1) << BBSHIFT);
539 	}
540 
541 	for (i = (*last_blk) - 1; i >= 0; i--) {
542 		if (i < start_blk) {
543 			/* valid log record not found */
544 			xfs_warn(log->l_mp,
545 		"Log inconsistent (didn't find previous header)");
546 			ASSERT(0);
547 			error = XFS_ERROR(EIO);
548 			goto out;
549 		}
550 
551 		if (smallmem) {
552 			error = xlog_bread(log, i, 1, bp, &offset);
553 			if (error)
554 				goto out;
555 		}
556 
557 		head = (xlog_rec_header_t *)offset;
558 
559 		if (head->h_magicno == cpu_to_be32(XLOG_HEADER_MAGIC_NUM))
560 			break;
561 
562 		if (!smallmem)
563 			offset -= BBSIZE;
564 	}
565 
566 	/*
567 	 * We hit the beginning of the physical log & still no header.  Return
568 	 * to caller.  If caller can handle a return of -1, then this routine
569 	 * will be called again for the end of the physical log.
570 	 */
571 	if (i == -1) {
572 		error = -1;
573 		goto out;
574 	}
575 
576 	/*
577 	 * We have the final block of the good log (the first block
578 	 * of the log record _before_ the head. So we check the uuid.
579 	 */
580 	if ((error = xlog_header_check_mount(log->l_mp, head)))
581 		goto out;
582 
583 	/*
584 	 * We may have found a log record header before we expected one.
585 	 * last_blk will be the 1st block # with a given cycle #.  We may end
586 	 * up reading an entire log record.  In this case, we don't want to
587 	 * reset last_blk.  Only when last_blk points in the middle of a log
588 	 * record do we update last_blk.
589 	 */
590 	if (xfs_sb_version_haslogv2(&log->l_mp->m_sb)) {
591 		uint	h_size = be32_to_cpu(head->h_size);
592 
593 		xhdrs = h_size / XLOG_HEADER_CYCLE_SIZE;
594 		if (h_size % XLOG_HEADER_CYCLE_SIZE)
595 			xhdrs++;
596 	} else {
597 		xhdrs = 1;
598 	}
599 
600 	if (*last_blk - i + extra_bblks !=
601 	    BTOBB(be32_to_cpu(head->h_len)) + xhdrs)
602 		*last_blk = i;
603 
604 out:
605 	xlog_put_bp(bp);
606 	return error;
607 }
608 
609 /*
610  * Head is defined to be the point of the log where the next log write
611  * could go.  This means that incomplete LR writes at the end are
612  * eliminated when calculating the head.  We aren't guaranteed that previous
613  * LR have complete transactions.  We only know that a cycle number of
614  * current cycle number -1 won't be present in the log if we start writing
615  * from our current block number.
616  *
617  * last_blk contains the block number of the first block with a given
618  * cycle number.
619  *
620  * Return: zero if normal, non-zero if error.
621  */
622 STATIC int
623 xlog_find_head(
624 	struct xlog	*log,
625 	xfs_daddr_t	*return_head_blk)
626 {
627 	xfs_buf_t	*bp;
628 	xfs_caddr_t	offset;
629 	xfs_daddr_t	new_blk, first_blk, start_blk, last_blk, head_blk;
630 	int		num_scan_bblks;
631 	uint		first_half_cycle, last_half_cycle;
632 	uint		stop_on_cycle;
633 	int		error, log_bbnum = log->l_logBBsize;
634 
635 	/* Is the end of the log device zeroed? */
636 	if ((error = xlog_find_zeroed(log, &first_blk)) == -1) {
637 		*return_head_blk = first_blk;
638 
639 		/* Is the whole lot zeroed? */
640 		if (!first_blk) {
641 			/* Linux XFS shouldn't generate totally zeroed logs -
642 			 * mkfs etc write a dummy unmount record to a fresh
643 			 * log so we can store the uuid in there
644 			 */
645 			xfs_warn(log->l_mp, "totally zeroed log");
646 		}
647 
648 		return 0;
649 	} else if (error) {
650 		xfs_warn(log->l_mp, "empty log check failed");
651 		return error;
652 	}
653 
654 	first_blk = 0;			/* get cycle # of 1st block */
655 	bp = xlog_get_bp(log, 1);
656 	if (!bp)
657 		return ENOMEM;
658 
659 	error = xlog_bread(log, 0, 1, bp, &offset);
660 	if (error)
661 		goto bp_err;
662 
663 	first_half_cycle = xlog_get_cycle(offset);
664 
665 	last_blk = head_blk = log_bbnum - 1;	/* get cycle # of last block */
666 	error = xlog_bread(log, last_blk, 1, bp, &offset);
667 	if (error)
668 		goto bp_err;
669 
670 	last_half_cycle = xlog_get_cycle(offset);
671 	ASSERT(last_half_cycle != 0);
672 
673 	/*
674 	 * If the 1st half cycle number is equal to the last half cycle number,
675 	 * then the entire log is stamped with the same cycle number.  In this
676 	 * case, head_blk can't be set to zero (which makes sense).  The below
677 	 * math doesn't work out properly with head_blk equal to zero.  Instead,
678 	 * we set it to log_bbnum which is an invalid block number, but this
679 	 * value makes the math correct.  If head_blk doesn't changed through
680 	 * all the tests below, *head_blk is set to zero at the very end rather
681 	 * than log_bbnum.  In a sense, log_bbnum and zero are the same block
682 	 * in a circular file.
683 	 */
684 	if (first_half_cycle == last_half_cycle) {
685 		/*
686 		 * In this case we believe that the entire log should have
687 		 * cycle number last_half_cycle.  We need to scan backwards
688 		 * from the end verifying that there are no holes still
689 		 * containing last_half_cycle - 1.  If we find such a hole,
690 		 * then the start of that hole will be the new head.  The
691 		 * simple case looks like
692 		 *        x | x ... | x - 1 | x
693 		 * Another case that fits this picture would be
694 		 *        x | x + 1 | x ... | x
695 		 * In this case the head really is somewhere at the end of the
696 		 * log, as one of the latest writes at the beginning was
697 		 * incomplete.
698 		 * One more case is
699 		 *        x | x + 1 | x ... | x - 1 | x
700 		 * This is really the combination of the above two cases, and
701 		 * the head has to end up at the start of the x-1 hole at the
702 		 * end of the log.
703 		 *
704 		 * In the 256k log case, we will read from the beginning to the
705 		 * end of the log and search for cycle numbers equal to x-1.
706 		 * We don't worry about the x+1 blocks that we encounter,
707 		 * because we know that they cannot be the head since the log
708 		 * started with x.
709 		 */
710 		head_blk = log_bbnum;
711 		stop_on_cycle = last_half_cycle - 1;
712 	} else {
713 		/*
714 		 * In this case we want to find the first block with cycle
715 		 * number matching last_half_cycle.  We expect the log to be
716 		 * some variation on
717 		 *        x + 1 ... | x ... | x
718 		 * The first block with cycle number x (last_half_cycle) will
719 		 * be where the new head belongs.  First we do a binary search
720 		 * for the first occurrence of last_half_cycle.  The binary
721 		 * search may not be totally accurate, so then we scan back
722 		 * from there looking for occurrences of last_half_cycle before
723 		 * us.  If that backwards scan wraps around the beginning of
724 		 * the log, then we look for occurrences of last_half_cycle - 1
725 		 * at the end of the log.  The cases we're looking for look
726 		 * like
727 		 *                               v binary search stopped here
728 		 *        x + 1 ... | x | x + 1 | x ... | x
729 		 *                   ^ but we want to locate this spot
730 		 * or
731 		 *        <---------> less than scan distance
732 		 *        x + 1 ... | x ... | x - 1 | x
733 		 *                           ^ we want to locate this spot
734 		 */
735 		stop_on_cycle = last_half_cycle;
736 		if ((error = xlog_find_cycle_start(log, bp, first_blk,
737 						&head_blk, last_half_cycle)))
738 			goto bp_err;
739 	}
740 
741 	/*
742 	 * Now validate the answer.  Scan back some number of maximum possible
743 	 * blocks and make sure each one has the expected cycle number.  The
744 	 * maximum is determined by the total possible amount of buffering
745 	 * in the in-core log.  The following number can be made tighter if
746 	 * we actually look at the block size of the filesystem.
747 	 */
748 	num_scan_bblks = XLOG_TOTAL_REC_SHIFT(log);
749 	if (head_blk >= num_scan_bblks) {
750 		/*
751 		 * We are guaranteed that the entire check can be performed
752 		 * in one buffer.
753 		 */
754 		start_blk = head_blk - num_scan_bblks;
755 		if ((error = xlog_find_verify_cycle(log,
756 						start_blk, num_scan_bblks,
757 						stop_on_cycle, &new_blk)))
758 			goto bp_err;
759 		if (new_blk != -1)
760 			head_blk = new_blk;
761 	} else {		/* need to read 2 parts of log */
762 		/*
763 		 * We are going to scan backwards in the log in two parts.
764 		 * First we scan the physical end of the log.  In this part
765 		 * of the log, we are looking for blocks with cycle number
766 		 * last_half_cycle - 1.
767 		 * If we find one, then we know that the log starts there, as
768 		 * we've found a hole that didn't get written in going around
769 		 * the end of the physical log.  The simple case for this is
770 		 *        x + 1 ... | x ... | x - 1 | x
771 		 *        <---------> less than scan distance
772 		 * If all of the blocks at the end of the log have cycle number
773 		 * last_half_cycle, then we check the blocks at the start of
774 		 * the log looking for occurrences of last_half_cycle.  If we
775 		 * find one, then our current estimate for the location of the
776 		 * first occurrence of last_half_cycle is wrong and we move
777 		 * back to the hole we've found.  This case looks like
778 		 *        x + 1 ... | x | x + 1 | x ...
779 		 *                               ^ binary search stopped here
780 		 * Another case we need to handle that only occurs in 256k
781 		 * logs is
782 		 *        x + 1 ... | x ... | x+1 | x ...
783 		 *                   ^ binary search stops here
784 		 * In a 256k log, the scan at the end of the log will see the
785 		 * x + 1 blocks.  We need to skip past those since that is
786 		 * certainly not the head of the log.  By searching for
787 		 * last_half_cycle-1 we accomplish that.
788 		 */
789 		ASSERT(head_blk <= INT_MAX &&
790 			(xfs_daddr_t) num_scan_bblks >= head_blk);
791 		start_blk = log_bbnum - (num_scan_bblks - head_blk);
792 		if ((error = xlog_find_verify_cycle(log, start_blk,
793 					num_scan_bblks - (int)head_blk,
794 					(stop_on_cycle - 1), &new_blk)))
795 			goto bp_err;
796 		if (new_blk != -1) {
797 			head_blk = new_blk;
798 			goto validate_head;
799 		}
800 
801 		/*
802 		 * Scan beginning of log now.  The last part of the physical
803 		 * log is good.  This scan needs to verify that it doesn't find
804 		 * the last_half_cycle.
805 		 */
806 		start_blk = 0;
807 		ASSERT(head_blk <= INT_MAX);
808 		if ((error = xlog_find_verify_cycle(log,
809 					start_blk, (int)head_blk,
810 					stop_on_cycle, &new_blk)))
811 			goto bp_err;
812 		if (new_blk != -1)
813 			head_blk = new_blk;
814 	}
815 
816 validate_head:
817 	/*
818 	 * Now we need to make sure head_blk is not pointing to a block in
819 	 * the middle of a log record.
820 	 */
821 	num_scan_bblks = XLOG_REC_SHIFT(log);
822 	if (head_blk >= num_scan_bblks) {
823 		start_blk = head_blk - num_scan_bblks; /* don't read head_blk */
824 
825 		/* start ptr at last block ptr before head_blk */
826 		if ((error = xlog_find_verify_log_record(log, start_blk,
827 							&head_blk, 0)) == -1) {
828 			error = XFS_ERROR(EIO);
829 			goto bp_err;
830 		} else if (error)
831 			goto bp_err;
832 	} else {
833 		start_blk = 0;
834 		ASSERT(head_blk <= INT_MAX);
835 		if ((error = xlog_find_verify_log_record(log, start_blk,
836 							&head_blk, 0)) == -1) {
837 			/* We hit the beginning of the log during our search */
838 			start_blk = log_bbnum - (num_scan_bblks - head_blk);
839 			new_blk = log_bbnum;
840 			ASSERT(start_blk <= INT_MAX &&
841 				(xfs_daddr_t) log_bbnum-start_blk >= 0);
842 			ASSERT(head_blk <= INT_MAX);
843 			if ((error = xlog_find_verify_log_record(log,
844 							start_blk, &new_blk,
845 							(int)head_blk)) == -1) {
846 				error = XFS_ERROR(EIO);
847 				goto bp_err;
848 			} else if (error)
849 				goto bp_err;
850 			if (new_blk != log_bbnum)
851 				head_blk = new_blk;
852 		} else if (error)
853 			goto bp_err;
854 	}
855 
856 	xlog_put_bp(bp);
857 	if (head_blk == log_bbnum)
858 		*return_head_blk = 0;
859 	else
860 		*return_head_blk = head_blk;
861 	/*
862 	 * When returning here, we have a good block number.  Bad block
863 	 * means that during a previous crash, we didn't have a clean break
864 	 * from cycle number N to cycle number N-1.  In this case, we need
865 	 * to find the first block with cycle number N-1.
866 	 */
867 	return 0;
868 
869  bp_err:
870 	xlog_put_bp(bp);
871 
872 	if (error)
873 		xfs_warn(log->l_mp, "failed to find log head");
874 	return error;
875 }
876 
877 /*
878  * Find the sync block number or the tail of the log.
879  *
880  * This will be the block number of the last record to have its
881  * associated buffers synced to disk.  Every log record header has
882  * a sync lsn embedded in it.  LSNs hold block numbers, so it is easy
883  * to get a sync block number.  The only concern is to figure out which
884  * log record header to believe.
885  *
886  * The following algorithm uses the log record header with the largest
887  * lsn.  The entire log record does not need to be valid.  We only care
888  * that the header is valid.
889  *
890  * We could speed up search by using current head_blk buffer, but it is not
891  * available.
892  */
893 STATIC int
894 xlog_find_tail(
895 	struct xlog		*log,
896 	xfs_daddr_t		*head_blk,
897 	xfs_daddr_t		*tail_blk)
898 {
899 	xlog_rec_header_t	*rhead;
900 	xlog_op_header_t	*op_head;
901 	xfs_caddr_t		offset = NULL;
902 	xfs_buf_t		*bp;
903 	int			error, i, found;
904 	xfs_daddr_t		umount_data_blk;
905 	xfs_daddr_t		after_umount_blk;
906 	xfs_lsn_t		tail_lsn;
907 	int			hblks;
908 
909 	found = 0;
910 
911 	/*
912 	 * Find previous log record
913 	 */
914 	if ((error = xlog_find_head(log, head_blk)))
915 		return error;
916 
917 	bp = xlog_get_bp(log, 1);
918 	if (!bp)
919 		return ENOMEM;
920 	if (*head_blk == 0) {				/* special case */
921 		error = xlog_bread(log, 0, 1, bp, &offset);
922 		if (error)
923 			goto done;
924 
925 		if (xlog_get_cycle(offset) == 0) {
926 			*tail_blk = 0;
927 			/* leave all other log inited values alone */
928 			goto done;
929 		}
930 	}
931 
932 	/*
933 	 * Search backwards looking for log record header block
934 	 */
935 	ASSERT(*head_blk < INT_MAX);
936 	for (i = (int)(*head_blk) - 1; i >= 0; i--) {
937 		error = xlog_bread(log, i, 1, bp, &offset);
938 		if (error)
939 			goto done;
940 
941 		if (*(__be32 *)offset == cpu_to_be32(XLOG_HEADER_MAGIC_NUM)) {
942 			found = 1;
943 			break;
944 		}
945 	}
946 	/*
947 	 * If we haven't found the log record header block, start looking
948 	 * again from the end of the physical log.  XXXmiken: There should be
949 	 * a check here to make sure we didn't search more than N blocks in
950 	 * the previous code.
951 	 */
952 	if (!found) {
953 		for (i = log->l_logBBsize - 1; i >= (int)(*head_blk); i--) {
954 			error = xlog_bread(log, i, 1, bp, &offset);
955 			if (error)
956 				goto done;
957 
958 			if (*(__be32 *)offset ==
959 			    cpu_to_be32(XLOG_HEADER_MAGIC_NUM)) {
960 				found = 2;
961 				break;
962 			}
963 		}
964 	}
965 	if (!found) {
966 		xfs_warn(log->l_mp, "%s: couldn't find sync record", __func__);
967 		xlog_put_bp(bp);
968 		ASSERT(0);
969 		return XFS_ERROR(EIO);
970 	}
971 
972 	/* find blk_no of tail of log */
973 	rhead = (xlog_rec_header_t *)offset;
974 	*tail_blk = BLOCK_LSN(be64_to_cpu(rhead->h_tail_lsn));
975 
976 	/*
977 	 * Reset log values according to the state of the log when we
978 	 * crashed.  In the case where head_blk == 0, we bump curr_cycle
979 	 * one because the next write starts a new cycle rather than
980 	 * continuing the cycle of the last good log record.  At this
981 	 * point we have guaranteed that all partial log records have been
982 	 * accounted for.  Therefore, we know that the last good log record
983 	 * written was complete and ended exactly on the end boundary
984 	 * of the physical log.
985 	 */
986 	log->l_prev_block = i;
987 	log->l_curr_block = (int)*head_blk;
988 	log->l_curr_cycle = be32_to_cpu(rhead->h_cycle);
989 	if (found == 2)
990 		log->l_curr_cycle++;
991 	atomic64_set(&log->l_tail_lsn, be64_to_cpu(rhead->h_tail_lsn));
992 	atomic64_set(&log->l_last_sync_lsn, be64_to_cpu(rhead->h_lsn));
993 	xlog_assign_grant_head(&log->l_reserve_head.grant, log->l_curr_cycle,
994 					BBTOB(log->l_curr_block));
995 	xlog_assign_grant_head(&log->l_write_head.grant, log->l_curr_cycle,
996 					BBTOB(log->l_curr_block));
997 
998 	/*
999 	 * Look for unmount record.  If we find it, then we know there
1000 	 * was a clean unmount.  Since 'i' could be the last block in
1001 	 * the physical log, we convert to a log block before comparing
1002 	 * to the head_blk.
1003 	 *
1004 	 * Save the current tail lsn to use to pass to
1005 	 * xlog_clear_stale_blocks() below.  We won't want to clear the
1006 	 * unmount record if there is one, so we pass the lsn of the
1007 	 * unmount record rather than the block after it.
1008 	 */
1009 	if (xfs_sb_version_haslogv2(&log->l_mp->m_sb)) {
1010 		int	h_size = be32_to_cpu(rhead->h_size);
1011 		int	h_version = be32_to_cpu(rhead->h_version);
1012 
1013 		if ((h_version & XLOG_VERSION_2) &&
1014 		    (h_size > XLOG_HEADER_CYCLE_SIZE)) {
1015 			hblks = h_size / XLOG_HEADER_CYCLE_SIZE;
1016 			if (h_size % XLOG_HEADER_CYCLE_SIZE)
1017 				hblks++;
1018 		} else {
1019 			hblks = 1;
1020 		}
1021 	} else {
1022 		hblks = 1;
1023 	}
1024 	after_umount_blk = (i + hblks + (int)
1025 		BTOBB(be32_to_cpu(rhead->h_len))) % log->l_logBBsize;
1026 	tail_lsn = atomic64_read(&log->l_tail_lsn);
1027 	if (*head_blk == after_umount_blk &&
1028 	    be32_to_cpu(rhead->h_num_logops) == 1) {
1029 		umount_data_blk = (i + hblks) % log->l_logBBsize;
1030 		error = xlog_bread(log, umount_data_blk, 1, bp, &offset);
1031 		if (error)
1032 			goto done;
1033 
1034 		op_head = (xlog_op_header_t *)offset;
1035 		if (op_head->oh_flags & XLOG_UNMOUNT_TRANS) {
1036 			/*
1037 			 * Set tail and last sync so that newly written
1038 			 * log records will point recovery to after the
1039 			 * current unmount record.
1040 			 */
1041 			xlog_assign_atomic_lsn(&log->l_tail_lsn,
1042 					log->l_curr_cycle, after_umount_blk);
1043 			xlog_assign_atomic_lsn(&log->l_last_sync_lsn,
1044 					log->l_curr_cycle, after_umount_blk);
1045 			*tail_blk = after_umount_blk;
1046 
1047 			/*
1048 			 * Note that the unmount was clean. If the unmount
1049 			 * was not clean, we need to know this to rebuild the
1050 			 * superblock counters from the perag headers if we
1051 			 * have a filesystem using non-persistent counters.
1052 			 */
1053 			log->l_mp->m_flags |= XFS_MOUNT_WAS_CLEAN;
1054 		}
1055 	}
1056 
1057 	/*
1058 	 * Make sure that there are no blocks in front of the head
1059 	 * with the same cycle number as the head.  This can happen
1060 	 * because we allow multiple outstanding log writes concurrently,
1061 	 * and the later writes might make it out before earlier ones.
1062 	 *
1063 	 * We use the lsn from before modifying it so that we'll never
1064 	 * overwrite the unmount record after a clean unmount.
1065 	 *
1066 	 * Do this only if we are going to recover the filesystem
1067 	 *
1068 	 * NOTE: This used to say "if (!readonly)"
1069 	 * However on Linux, we can & do recover a read-only filesystem.
1070 	 * We only skip recovery if NORECOVERY is specified on mount,
1071 	 * in which case we would not be here.
1072 	 *
1073 	 * But... if the -device- itself is readonly, just skip this.
1074 	 * We can't recover this device anyway, so it won't matter.
1075 	 */
1076 	if (!xfs_readonly_buftarg(log->l_mp->m_logdev_targp))
1077 		error = xlog_clear_stale_blocks(log, tail_lsn);
1078 
1079 done:
1080 	xlog_put_bp(bp);
1081 
1082 	if (error)
1083 		xfs_warn(log->l_mp, "failed to locate log tail");
1084 	return error;
1085 }
1086 
1087 /*
1088  * Is the log zeroed at all?
1089  *
1090  * The last binary search should be changed to perform an X block read
1091  * once X becomes small enough.  You can then search linearly through
1092  * the X blocks.  This will cut down on the number of reads we need to do.
1093  *
1094  * If the log is partially zeroed, this routine will pass back the blkno
1095  * of the first block with cycle number 0.  It won't have a complete LR
1096  * preceding it.
1097  *
1098  * Return:
1099  *	0  => the log is completely written to
1100  *	-1 => use *blk_no as the first block of the log
1101  *	>0 => error has occurred
1102  */
1103 STATIC int
1104 xlog_find_zeroed(
1105 	struct xlog	*log,
1106 	xfs_daddr_t	*blk_no)
1107 {
1108 	xfs_buf_t	*bp;
1109 	xfs_caddr_t	offset;
1110 	uint	        first_cycle, last_cycle;
1111 	xfs_daddr_t	new_blk, last_blk, start_blk;
1112 	xfs_daddr_t     num_scan_bblks;
1113 	int	        error, log_bbnum = log->l_logBBsize;
1114 
1115 	*blk_no = 0;
1116 
1117 	/* check totally zeroed log */
1118 	bp = xlog_get_bp(log, 1);
1119 	if (!bp)
1120 		return ENOMEM;
1121 	error = xlog_bread(log, 0, 1, bp, &offset);
1122 	if (error)
1123 		goto bp_err;
1124 
1125 	first_cycle = xlog_get_cycle(offset);
1126 	if (first_cycle == 0) {		/* completely zeroed log */
1127 		*blk_no = 0;
1128 		xlog_put_bp(bp);
1129 		return -1;
1130 	}
1131 
1132 	/* check partially zeroed log */
1133 	error = xlog_bread(log, log_bbnum-1, 1, bp, &offset);
1134 	if (error)
1135 		goto bp_err;
1136 
1137 	last_cycle = xlog_get_cycle(offset);
1138 	if (last_cycle != 0) {		/* log completely written to */
1139 		xlog_put_bp(bp);
1140 		return 0;
1141 	} else if (first_cycle != 1) {
1142 		/*
1143 		 * If the cycle of the last block is zero, the cycle of
1144 		 * the first block must be 1. If it's not, maybe we're
1145 		 * not looking at a log... Bail out.
1146 		 */
1147 		xfs_warn(log->l_mp,
1148 			"Log inconsistent or not a log (last==0, first!=1)");
1149 		error = XFS_ERROR(EINVAL);
1150 		goto bp_err;
1151 	}
1152 
1153 	/* we have a partially zeroed log */
1154 	last_blk = log_bbnum-1;
1155 	if ((error = xlog_find_cycle_start(log, bp, 0, &last_blk, 0)))
1156 		goto bp_err;
1157 
1158 	/*
1159 	 * Validate the answer.  Because there is no way to guarantee that
1160 	 * the entire log is made up of log records which are the same size,
1161 	 * we scan over the defined maximum blocks.  At this point, the maximum
1162 	 * is not chosen to mean anything special.   XXXmiken
1163 	 */
1164 	num_scan_bblks = XLOG_TOTAL_REC_SHIFT(log);
1165 	ASSERT(num_scan_bblks <= INT_MAX);
1166 
1167 	if (last_blk < num_scan_bblks)
1168 		num_scan_bblks = last_blk;
1169 	start_blk = last_blk - num_scan_bblks;
1170 
1171 	/*
1172 	 * We search for any instances of cycle number 0 that occur before
1173 	 * our current estimate of the head.  What we're trying to detect is
1174 	 *        1 ... | 0 | 1 | 0...
1175 	 *                       ^ binary search ends here
1176 	 */
1177 	if ((error = xlog_find_verify_cycle(log, start_blk,
1178 					 (int)num_scan_bblks, 0, &new_blk)))
1179 		goto bp_err;
1180 	if (new_blk != -1)
1181 		last_blk = new_blk;
1182 
1183 	/*
1184 	 * Potentially backup over partial log record write.  We don't need
1185 	 * to search the end of the log because we know it is zero.
1186 	 */
1187 	if ((error = xlog_find_verify_log_record(log, start_blk,
1188 				&last_blk, 0)) == -1) {
1189 	    error = XFS_ERROR(EIO);
1190 	    goto bp_err;
1191 	} else if (error)
1192 	    goto bp_err;
1193 
1194 	*blk_no = last_blk;
1195 bp_err:
1196 	xlog_put_bp(bp);
1197 	if (error)
1198 		return error;
1199 	return -1;
1200 }
1201 
1202 /*
1203  * These are simple subroutines used by xlog_clear_stale_blocks() below
1204  * to initialize a buffer full of empty log record headers and write
1205  * them into the log.
1206  */
1207 STATIC void
1208 xlog_add_record(
1209 	struct xlog		*log,
1210 	xfs_caddr_t		buf,
1211 	int			cycle,
1212 	int			block,
1213 	int			tail_cycle,
1214 	int			tail_block)
1215 {
1216 	xlog_rec_header_t	*recp = (xlog_rec_header_t *)buf;
1217 
1218 	memset(buf, 0, BBSIZE);
1219 	recp->h_magicno = cpu_to_be32(XLOG_HEADER_MAGIC_NUM);
1220 	recp->h_cycle = cpu_to_be32(cycle);
1221 	recp->h_version = cpu_to_be32(
1222 			xfs_sb_version_haslogv2(&log->l_mp->m_sb) ? 2 : 1);
1223 	recp->h_lsn = cpu_to_be64(xlog_assign_lsn(cycle, block));
1224 	recp->h_tail_lsn = cpu_to_be64(xlog_assign_lsn(tail_cycle, tail_block));
1225 	recp->h_fmt = cpu_to_be32(XLOG_FMT);
1226 	memcpy(&recp->h_fs_uuid, &log->l_mp->m_sb.sb_uuid, sizeof(uuid_t));
1227 }
1228 
1229 STATIC int
1230 xlog_write_log_records(
1231 	struct xlog	*log,
1232 	int		cycle,
1233 	int		start_block,
1234 	int		blocks,
1235 	int		tail_cycle,
1236 	int		tail_block)
1237 {
1238 	xfs_caddr_t	offset;
1239 	xfs_buf_t	*bp;
1240 	int		balign, ealign;
1241 	int		sectbb = log->l_sectBBsize;
1242 	int		end_block = start_block + blocks;
1243 	int		bufblks;
1244 	int		error = 0;
1245 	int		i, j = 0;
1246 
1247 	/*
1248 	 * Greedily allocate a buffer big enough to handle the full
1249 	 * range of basic blocks to be written.  If that fails, try
1250 	 * a smaller size.  We need to be able to write at least a
1251 	 * log sector, or we're out of luck.
1252 	 */
1253 	bufblks = 1 << ffs(blocks);
1254 	while (bufblks > log->l_logBBsize)
1255 		bufblks >>= 1;
1256 	while (!(bp = xlog_get_bp(log, bufblks))) {
1257 		bufblks >>= 1;
1258 		if (bufblks < sectbb)
1259 			return ENOMEM;
1260 	}
1261 
1262 	/* We may need to do a read at the start to fill in part of
1263 	 * the buffer in the starting sector not covered by the first
1264 	 * write below.
1265 	 */
1266 	balign = round_down(start_block, sectbb);
1267 	if (balign != start_block) {
1268 		error = xlog_bread_noalign(log, start_block, 1, bp);
1269 		if (error)
1270 			goto out_put_bp;
1271 
1272 		j = start_block - balign;
1273 	}
1274 
1275 	for (i = start_block; i < end_block; i += bufblks) {
1276 		int		bcount, endcount;
1277 
1278 		bcount = min(bufblks, end_block - start_block);
1279 		endcount = bcount - j;
1280 
1281 		/* We may need to do a read at the end to fill in part of
1282 		 * the buffer in the final sector not covered by the write.
1283 		 * If this is the same sector as the above read, skip it.
1284 		 */
1285 		ealign = round_down(end_block, sectbb);
1286 		if (j == 0 && (start_block + endcount > ealign)) {
1287 			offset = bp->b_addr + BBTOB(ealign - start_block);
1288 			error = xlog_bread_offset(log, ealign, sectbb,
1289 							bp, offset);
1290 			if (error)
1291 				break;
1292 
1293 		}
1294 
1295 		offset = xlog_align(log, start_block, endcount, bp);
1296 		for (; j < endcount; j++) {
1297 			xlog_add_record(log, offset, cycle, i+j,
1298 					tail_cycle, tail_block);
1299 			offset += BBSIZE;
1300 		}
1301 		error = xlog_bwrite(log, start_block, endcount, bp);
1302 		if (error)
1303 			break;
1304 		start_block += endcount;
1305 		j = 0;
1306 	}
1307 
1308  out_put_bp:
1309 	xlog_put_bp(bp);
1310 	return error;
1311 }
1312 
1313 /*
1314  * This routine is called to blow away any incomplete log writes out
1315  * in front of the log head.  We do this so that we won't become confused
1316  * if we come up, write only a little bit more, and then crash again.
1317  * If we leave the partial log records out there, this situation could
1318  * cause us to think those partial writes are valid blocks since they
1319  * have the current cycle number.  We get rid of them by overwriting them
1320  * with empty log records with the old cycle number rather than the
1321  * current one.
1322  *
1323  * The tail lsn is passed in rather than taken from
1324  * the log so that we will not write over the unmount record after a
1325  * clean unmount in a 512 block log.  Doing so would leave the log without
1326  * any valid log records in it until a new one was written.  If we crashed
1327  * during that time we would not be able to recover.
1328  */
1329 STATIC int
1330 xlog_clear_stale_blocks(
1331 	struct xlog	*log,
1332 	xfs_lsn_t	tail_lsn)
1333 {
1334 	int		tail_cycle, head_cycle;
1335 	int		tail_block, head_block;
1336 	int		tail_distance, max_distance;
1337 	int		distance;
1338 	int		error;
1339 
1340 	tail_cycle = CYCLE_LSN(tail_lsn);
1341 	tail_block = BLOCK_LSN(tail_lsn);
1342 	head_cycle = log->l_curr_cycle;
1343 	head_block = log->l_curr_block;
1344 
1345 	/*
1346 	 * Figure out the distance between the new head of the log
1347 	 * and the tail.  We want to write over any blocks beyond the
1348 	 * head that we may have written just before the crash, but
1349 	 * we don't want to overwrite the tail of the log.
1350 	 */
1351 	if (head_cycle == tail_cycle) {
1352 		/*
1353 		 * The tail is behind the head in the physical log,
1354 		 * so the distance from the head to the tail is the
1355 		 * distance from the head to the end of the log plus
1356 		 * the distance from the beginning of the log to the
1357 		 * tail.
1358 		 */
1359 		if (unlikely(head_block < tail_block || head_block >= log->l_logBBsize)) {
1360 			XFS_ERROR_REPORT("xlog_clear_stale_blocks(1)",
1361 					 XFS_ERRLEVEL_LOW, log->l_mp);
1362 			return XFS_ERROR(EFSCORRUPTED);
1363 		}
1364 		tail_distance = tail_block + (log->l_logBBsize - head_block);
1365 	} else {
1366 		/*
1367 		 * The head is behind the tail in the physical log,
1368 		 * so the distance from the head to the tail is just
1369 		 * the tail block minus the head block.
1370 		 */
1371 		if (unlikely(head_block >= tail_block || head_cycle != (tail_cycle + 1))){
1372 			XFS_ERROR_REPORT("xlog_clear_stale_blocks(2)",
1373 					 XFS_ERRLEVEL_LOW, log->l_mp);
1374 			return XFS_ERROR(EFSCORRUPTED);
1375 		}
1376 		tail_distance = tail_block - head_block;
1377 	}
1378 
1379 	/*
1380 	 * If the head is right up against the tail, we can't clear
1381 	 * anything.
1382 	 */
1383 	if (tail_distance <= 0) {
1384 		ASSERT(tail_distance == 0);
1385 		return 0;
1386 	}
1387 
1388 	max_distance = XLOG_TOTAL_REC_SHIFT(log);
1389 	/*
1390 	 * Take the smaller of the maximum amount of outstanding I/O
1391 	 * we could have and the distance to the tail to clear out.
1392 	 * We take the smaller so that we don't overwrite the tail and
1393 	 * we don't waste all day writing from the head to the tail
1394 	 * for no reason.
1395 	 */
1396 	max_distance = MIN(max_distance, tail_distance);
1397 
1398 	if ((head_block + max_distance) <= log->l_logBBsize) {
1399 		/*
1400 		 * We can stomp all the blocks we need to without
1401 		 * wrapping around the end of the log.  Just do it
1402 		 * in a single write.  Use the cycle number of the
1403 		 * current cycle minus one so that the log will look like:
1404 		 *     n ... | n - 1 ...
1405 		 */
1406 		error = xlog_write_log_records(log, (head_cycle - 1),
1407 				head_block, max_distance, tail_cycle,
1408 				tail_block);
1409 		if (error)
1410 			return error;
1411 	} else {
1412 		/*
1413 		 * We need to wrap around the end of the physical log in
1414 		 * order to clear all the blocks.  Do it in two separate
1415 		 * I/Os.  The first write should be from the head to the
1416 		 * end of the physical log, and it should use the current
1417 		 * cycle number minus one just like above.
1418 		 */
1419 		distance = log->l_logBBsize - head_block;
1420 		error = xlog_write_log_records(log, (head_cycle - 1),
1421 				head_block, distance, tail_cycle,
1422 				tail_block);
1423 
1424 		if (error)
1425 			return error;
1426 
1427 		/*
1428 		 * Now write the blocks at the start of the physical log.
1429 		 * This writes the remainder of the blocks we want to clear.
1430 		 * It uses the current cycle number since we're now on the
1431 		 * same cycle as the head so that we get:
1432 		 *    n ... n ... | n - 1 ...
1433 		 *    ^^^^^ blocks we're writing
1434 		 */
1435 		distance = max_distance - (log->l_logBBsize - head_block);
1436 		error = xlog_write_log_records(log, head_cycle, 0, distance,
1437 				tail_cycle, tail_block);
1438 		if (error)
1439 			return error;
1440 	}
1441 
1442 	return 0;
1443 }
1444 
1445 /******************************************************************************
1446  *
1447  *		Log recover routines
1448  *
1449  ******************************************************************************
1450  */
1451 
1452 STATIC xlog_recover_t *
1453 xlog_recover_find_tid(
1454 	struct hlist_head	*head,
1455 	xlog_tid_t		tid)
1456 {
1457 	xlog_recover_t		*trans;
1458 
1459 	hlist_for_each_entry(trans, head, r_list) {
1460 		if (trans->r_log_tid == tid)
1461 			return trans;
1462 	}
1463 	return NULL;
1464 }
1465 
1466 STATIC void
1467 xlog_recover_new_tid(
1468 	struct hlist_head	*head,
1469 	xlog_tid_t		tid,
1470 	xfs_lsn_t		lsn)
1471 {
1472 	xlog_recover_t		*trans;
1473 
1474 	trans = kmem_zalloc(sizeof(xlog_recover_t), KM_SLEEP);
1475 	trans->r_log_tid   = tid;
1476 	trans->r_lsn	   = lsn;
1477 	INIT_LIST_HEAD(&trans->r_itemq);
1478 
1479 	INIT_HLIST_NODE(&trans->r_list);
1480 	hlist_add_head(&trans->r_list, head);
1481 }
1482 
1483 STATIC void
1484 xlog_recover_add_item(
1485 	struct list_head	*head)
1486 {
1487 	xlog_recover_item_t	*item;
1488 
1489 	item = kmem_zalloc(sizeof(xlog_recover_item_t), KM_SLEEP);
1490 	INIT_LIST_HEAD(&item->ri_list);
1491 	list_add_tail(&item->ri_list, head);
1492 }
1493 
1494 STATIC int
1495 xlog_recover_add_to_cont_trans(
1496 	struct xlog		*log,
1497 	struct xlog_recover	*trans,
1498 	xfs_caddr_t		dp,
1499 	int			len)
1500 {
1501 	xlog_recover_item_t	*item;
1502 	xfs_caddr_t		ptr, old_ptr;
1503 	int			old_len;
1504 
1505 	if (list_empty(&trans->r_itemq)) {
1506 		/* finish copying rest of trans header */
1507 		xlog_recover_add_item(&trans->r_itemq);
1508 		ptr = (xfs_caddr_t) &trans->r_theader +
1509 				sizeof(xfs_trans_header_t) - len;
1510 		memcpy(ptr, dp, len); /* d, s, l */
1511 		return 0;
1512 	}
1513 	/* take the tail entry */
1514 	item = list_entry(trans->r_itemq.prev, xlog_recover_item_t, ri_list);
1515 
1516 	old_ptr = item->ri_buf[item->ri_cnt-1].i_addr;
1517 	old_len = item->ri_buf[item->ri_cnt-1].i_len;
1518 
1519 	ptr = kmem_realloc(old_ptr, len+old_len, old_len, KM_SLEEP);
1520 	memcpy(&ptr[old_len], dp, len); /* d, s, l */
1521 	item->ri_buf[item->ri_cnt-1].i_len += len;
1522 	item->ri_buf[item->ri_cnt-1].i_addr = ptr;
1523 	trace_xfs_log_recover_item_add_cont(log, trans, item, 0);
1524 	return 0;
1525 }
1526 
1527 /*
1528  * The next region to add is the start of a new region.  It could be
1529  * a whole region or it could be the first part of a new region.  Because
1530  * of this, the assumption here is that the type and size fields of all
1531  * format structures fit into the first 32 bits of the structure.
1532  *
1533  * This works because all regions must be 32 bit aligned.  Therefore, we
1534  * either have both fields or we have neither field.  In the case we have
1535  * neither field, the data part of the region is zero length.  We only have
1536  * a log_op_header and can throw away the header since a new one will appear
1537  * later.  If we have at least 4 bytes, then we can determine how many regions
1538  * will appear in the current log item.
1539  */
1540 STATIC int
1541 xlog_recover_add_to_trans(
1542 	struct xlog		*log,
1543 	struct xlog_recover	*trans,
1544 	xfs_caddr_t		dp,
1545 	int			len)
1546 {
1547 	xfs_inode_log_format_t	*in_f;			/* any will do */
1548 	xlog_recover_item_t	*item;
1549 	xfs_caddr_t		ptr;
1550 
1551 	if (!len)
1552 		return 0;
1553 	if (list_empty(&trans->r_itemq)) {
1554 		/* we need to catch log corruptions here */
1555 		if (*(uint *)dp != XFS_TRANS_HEADER_MAGIC) {
1556 			xfs_warn(log->l_mp, "%s: bad header magic number",
1557 				__func__);
1558 			ASSERT(0);
1559 			return XFS_ERROR(EIO);
1560 		}
1561 		if (len == sizeof(xfs_trans_header_t))
1562 			xlog_recover_add_item(&trans->r_itemq);
1563 		memcpy(&trans->r_theader, dp, len); /* d, s, l */
1564 		return 0;
1565 	}
1566 
1567 	ptr = kmem_alloc(len, KM_SLEEP);
1568 	memcpy(ptr, dp, len);
1569 	in_f = (xfs_inode_log_format_t *)ptr;
1570 
1571 	/* take the tail entry */
1572 	item = list_entry(trans->r_itemq.prev, xlog_recover_item_t, ri_list);
1573 	if (item->ri_total != 0 &&
1574 	     item->ri_total == item->ri_cnt) {
1575 		/* tail item is in use, get a new one */
1576 		xlog_recover_add_item(&trans->r_itemq);
1577 		item = list_entry(trans->r_itemq.prev,
1578 					xlog_recover_item_t, ri_list);
1579 	}
1580 
1581 	if (item->ri_total == 0) {		/* first region to be added */
1582 		if (in_f->ilf_size == 0 ||
1583 		    in_f->ilf_size > XLOG_MAX_REGIONS_IN_ITEM) {
1584 			xfs_warn(log->l_mp,
1585 		"bad number of regions (%d) in inode log format",
1586 				  in_f->ilf_size);
1587 			ASSERT(0);
1588 			kmem_free(ptr);
1589 			return XFS_ERROR(EIO);
1590 		}
1591 
1592 		item->ri_total = in_f->ilf_size;
1593 		item->ri_buf =
1594 			kmem_zalloc(item->ri_total * sizeof(xfs_log_iovec_t),
1595 				    KM_SLEEP);
1596 	}
1597 	ASSERT(item->ri_total > item->ri_cnt);
1598 	/* Description region is ri_buf[0] */
1599 	item->ri_buf[item->ri_cnt].i_addr = ptr;
1600 	item->ri_buf[item->ri_cnt].i_len  = len;
1601 	item->ri_cnt++;
1602 	trace_xfs_log_recover_item_add(log, trans, item, 0);
1603 	return 0;
1604 }
1605 
1606 /*
1607  * Sort the log items in the transaction.
1608  *
1609  * The ordering constraints are defined by the inode allocation and unlink
1610  * behaviour. The rules are:
1611  *
1612  *	1. Every item is only logged once in a given transaction. Hence it
1613  *	   represents the last logged state of the item. Hence ordering is
1614  *	   dependent on the order in which operations need to be performed so
1615  *	   required initial conditions are always met.
1616  *
1617  *	2. Cancelled buffers are recorded in pass 1 in a separate table and
1618  *	   there's nothing to replay from them so we can simply cull them
1619  *	   from the transaction. However, we can't do that until after we've
1620  *	   replayed all the other items because they may be dependent on the
1621  *	   cancelled buffer and replaying the cancelled buffer can remove it
1622  *	   form the cancelled buffer table. Hence they have tobe done last.
1623  *
1624  *	3. Inode allocation buffers must be replayed before inode items that
1625  *	   read the buffer and replay changes into it. For filesystems using the
1626  *	   ICREATE transactions, this means XFS_LI_ICREATE objects need to get
1627  *	   treated the same as inode allocation buffers as they create and
1628  *	   initialise the buffers directly.
1629  *
1630  *	4. Inode unlink buffers must be replayed after inode items are replayed.
1631  *	   This ensures that inodes are completely flushed to the inode buffer
1632  *	   in a "free" state before we remove the unlinked inode list pointer.
1633  *
1634  * Hence the ordering needs to be inode allocation buffers first, inode items
1635  * second, inode unlink buffers third and cancelled buffers last.
1636  *
1637  * But there's a problem with that - we can't tell an inode allocation buffer
1638  * apart from a regular buffer, so we can't separate them. We can, however,
1639  * tell an inode unlink buffer from the others, and so we can separate them out
1640  * from all the other buffers and move them to last.
1641  *
1642  * Hence, 4 lists, in order from head to tail:
1643  *	- buffer_list for all buffers except cancelled/inode unlink buffers
1644  *	- item_list for all non-buffer items
1645  *	- inode_buffer_list for inode unlink buffers
1646  *	- cancel_list for the cancelled buffers
1647  *
1648  * Note that we add objects to the tail of the lists so that first-to-last
1649  * ordering is preserved within the lists. Adding objects to the head of the
1650  * list means when we traverse from the head we walk them in last-to-first
1651  * order. For cancelled buffers and inode unlink buffers this doesn't matter,
1652  * but for all other items there may be specific ordering that we need to
1653  * preserve.
1654  */
1655 STATIC int
1656 xlog_recover_reorder_trans(
1657 	struct xlog		*log,
1658 	struct xlog_recover	*trans,
1659 	int			pass)
1660 {
1661 	xlog_recover_item_t	*item, *n;
1662 	LIST_HEAD(sort_list);
1663 	LIST_HEAD(cancel_list);
1664 	LIST_HEAD(buffer_list);
1665 	LIST_HEAD(inode_buffer_list);
1666 	LIST_HEAD(inode_list);
1667 
1668 	list_splice_init(&trans->r_itemq, &sort_list);
1669 	list_for_each_entry_safe(item, n, &sort_list, ri_list) {
1670 		xfs_buf_log_format_t	*buf_f = item->ri_buf[0].i_addr;
1671 
1672 		switch (ITEM_TYPE(item)) {
1673 		case XFS_LI_ICREATE:
1674 			list_move_tail(&item->ri_list, &buffer_list);
1675 			break;
1676 		case XFS_LI_BUF:
1677 			if (buf_f->blf_flags & XFS_BLF_CANCEL) {
1678 				trace_xfs_log_recover_item_reorder_head(log,
1679 							trans, item, pass);
1680 				list_move(&item->ri_list, &cancel_list);
1681 				break;
1682 			}
1683 			if (buf_f->blf_flags & XFS_BLF_INODE_BUF) {
1684 				list_move(&item->ri_list, &inode_buffer_list);
1685 				break;
1686 			}
1687 			list_move_tail(&item->ri_list, &buffer_list);
1688 			break;
1689 		case XFS_LI_INODE:
1690 		case XFS_LI_DQUOT:
1691 		case XFS_LI_QUOTAOFF:
1692 		case XFS_LI_EFD:
1693 		case XFS_LI_EFI:
1694 			trace_xfs_log_recover_item_reorder_tail(log,
1695 							trans, item, pass);
1696 			list_move_tail(&item->ri_list, &inode_list);
1697 			break;
1698 		default:
1699 			xfs_warn(log->l_mp,
1700 				"%s: unrecognized type of log operation",
1701 				__func__);
1702 			ASSERT(0);
1703 			return XFS_ERROR(EIO);
1704 		}
1705 	}
1706 	ASSERT(list_empty(&sort_list));
1707 	if (!list_empty(&buffer_list))
1708 		list_splice(&buffer_list, &trans->r_itemq);
1709 	if (!list_empty(&inode_list))
1710 		list_splice_tail(&inode_list, &trans->r_itemq);
1711 	if (!list_empty(&inode_buffer_list))
1712 		list_splice_tail(&inode_buffer_list, &trans->r_itemq);
1713 	if (!list_empty(&cancel_list))
1714 		list_splice_tail(&cancel_list, &trans->r_itemq);
1715 	return 0;
1716 }
1717 
1718 /*
1719  * Build up the table of buf cancel records so that we don't replay
1720  * cancelled data in the second pass.  For buffer records that are
1721  * not cancel records, there is nothing to do here so we just return.
1722  *
1723  * If we get a cancel record which is already in the table, this indicates
1724  * that the buffer was cancelled multiple times.  In order to ensure
1725  * that during pass 2 we keep the record in the table until we reach its
1726  * last occurrence in the log, we keep a reference count in the cancel
1727  * record in the table to tell us how many times we expect to see this
1728  * record during the second pass.
1729  */
1730 STATIC int
1731 xlog_recover_buffer_pass1(
1732 	struct xlog			*log,
1733 	struct xlog_recover_item	*item)
1734 {
1735 	xfs_buf_log_format_t	*buf_f = item->ri_buf[0].i_addr;
1736 	struct list_head	*bucket;
1737 	struct xfs_buf_cancel	*bcp;
1738 
1739 	/*
1740 	 * If this isn't a cancel buffer item, then just return.
1741 	 */
1742 	if (!(buf_f->blf_flags & XFS_BLF_CANCEL)) {
1743 		trace_xfs_log_recover_buf_not_cancel(log, buf_f);
1744 		return 0;
1745 	}
1746 
1747 	/*
1748 	 * Insert an xfs_buf_cancel record into the hash table of them.
1749 	 * If there is already an identical record, bump its reference count.
1750 	 */
1751 	bucket = XLOG_BUF_CANCEL_BUCKET(log, buf_f->blf_blkno);
1752 	list_for_each_entry(bcp, bucket, bc_list) {
1753 		if (bcp->bc_blkno == buf_f->blf_blkno &&
1754 		    bcp->bc_len == buf_f->blf_len) {
1755 			bcp->bc_refcount++;
1756 			trace_xfs_log_recover_buf_cancel_ref_inc(log, buf_f);
1757 			return 0;
1758 		}
1759 	}
1760 
1761 	bcp = kmem_alloc(sizeof(struct xfs_buf_cancel), KM_SLEEP);
1762 	bcp->bc_blkno = buf_f->blf_blkno;
1763 	bcp->bc_len = buf_f->blf_len;
1764 	bcp->bc_refcount = 1;
1765 	list_add_tail(&bcp->bc_list, bucket);
1766 
1767 	trace_xfs_log_recover_buf_cancel_add(log, buf_f);
1768 	return 0;
1769 }
1770 
1771 /*
1772  * Check to see whether the buffer being recovered has a corresponding
1773  * entry in the buffer cancel record table. If it is, return the cancel
1774  * buffer structure to the caller.
1775  */
1776 STATIC struct xfs_buf_cancel *
1777 xlog_peek_buffer_cancelled(
1778 	struct xlog		*log,
1779 	xfs_daddr_t		blkno,
1780 	uint			len,
1781 	ushort			flags)
1782 {
1783 	struct list_head	*bucket;
1784 	struct xfs_buf_cancel	*bcp;
1785 
1786 	if (!log->l_buf_cancel_table) {
1787 		/* empty table means no cancelled buffers in the log */
1788 		ASSERT(!(flags & XFS_BLF_CANCEL));
1789 		return NULL;
1790 	}
1791 
1792 	bucket = XLOG_BUF_CANCEL_BUCKET(log, blkno);
1793 	list_for_each_entry(bcp, bucket, bc_list) {
1794 		if (bcp->bc_blkno == blkno && bcp->bc_len == len)
1795 			return bcp;
1796 	}
1797 
1798 	/*
1799 	 * We didn't find a corresponding entry in the table, so return 0 so
1800 	 * that the buffer is NOT cancelled.
1801 	 */
1802 	ASSERT(!(flags & XFS_BLF_CANCEL));
1803 	return NULL;
1804 }
1805 
1806 /*
1807  * If the buffer is being cancelled then return 1 so that it will be cancelled,
1808  * otherwise return 0.  If the buffer is actually a buffer cancel item
1809  * (XFS_BLF_CANCEL is set), then decrement the refcount on the entry in the
1810  * table and remove it from the table if this is the last reference.
1811  *
1812  * We remove the cancel record from the table when we encounter its last
1813  * occurrence in the log so that if the same buffer is re-used again after its
1814  * last cancellation we actually replay the changes made at that point.
1815  */
1816 STATIC int
1817 xlog_check_buffer_cancelled(
1818 	struct xlog		*log,
1819 	xfs_daddr_t		blkno,
1820 	uint			len,
1821 	ushort			flags)
1822 {
1823 	struct xfs_buf_cancel	*bcp;
1824 
1825 	bcp = xlog_peek_buffer_cancelled(log, blkno, len, flags);
1826 	if (!bcp)
1827 		return 0;
1828 
1829 	/*
1830 	 * We've go a match, so return 1 so that the recovery of this buffer
1831 	 * is cancelled.  If this buffer is actually a buffer cancel log
1832 	 * item, then decrement the refcount on the one in the table and
1833 	 * remove it if this is the last reference.
1834 	 */
1835 	if (flags & XFS_BLF_CANCEL) {
1836 		if (--bcp->bc_refcount == 0) {
1837 			list_del(&bcp->bc_list);
1838 			kmem_free(bcp);
1839 		}
1840 	}
1841 	return 1;
1842 }
1843 
1844 /*
1845  * Perform recovery for a buffer full of inodes.  In these buffers, the only
1846  * data which should be recovered is that which corresponds to the
1847  * di_next_unlinked pointers in the on disk inode structures.  The rest of the
1848  * data for the inodes is always logged through the inodes themselves rather
1849  * than the inode buffer and is recovered in xlog_recover_inode_pass2().
1850  *
1851  * The only time when buffers full of inodes are fully recovered is when the
1852  * buffer is full of newly allocated inodes.  In this case the buffer will
1853  * not be marked as an inode buffer and so will be sent to
1854  * xlog_recover_do_reg_buffer() below during recovery.
1855  */
1856 STATIC int
1857 xlog_recover_do_inode_buffer(
1858 	struct xfs_mount	*mp,
1859 	xlog_recover_item_t	*item,
1860 	struct xfs_buf		*bp,
1861 	xfs_buf_log_format_t	*buf_f)
1862 {
1863 	int			i;
1864 	int			item_index = 0;
1865 	int			bit = 0;
1866 	int			nbits = 0;
1867 	int			reg_buf_offset = 0;
1868 	int			reg_buf_bytes = 0;
1869 	int			next_unlinked_offset;
1870 	int			inodes_per_buf;
1871 	xfs_agino_t		*logged_nextp;
1872 	xfs_agino_t		*buffer_nextp;
1873 
1874 	trace_xfs_log_recover_buf_inode_buf(mp->m_log, buf_f);
1875 
1876 	/*
1877 	 * Post recovery validation only works properly on CRC enabled
1878 	 * filesystems.
1879 	 */
1880 	if (xfs_sb_version_hascrc(&mp->m_sb))
1881 		bp->b_ops = &xfs_inode_buf_ops;
1882 
1883 	inodes_per_buf = BBTOB(bp->b_io_length) >> mp->m_sb.sb_inodelog;
1884 	for (i = 0; i < inodes_per_buf; i++) {
1885 		next_unlinked_offset = (i * mp->m_sb.sb_inodesize) +
1886 			offsetof(xfs_dinode_t, di_next_unlinked);
1887 
1888 		while (next_unlinked_offset >=
1889 		       (reg_buf_offset + reg_buf_bytes)) {
1890 			/*
1891 			 * The next di_next_unlinked field is beyond
1892 			 * the current logged region.  Find the next
1893 			 * logged region that contains or is beyond
1894 			 * the current di_next_unlinked field.
1895 			 */
1896 			bit += nbits;
1897 			bit = xfs_next_bit(buf_f->blf_data_map,
1898 					   buf_f->blf_map_size, bit);
1899 
1900 			/*
1901 			 * If there are no more logged regions in the
1902 			 * buffer, then we're done.
1903 			 */
1904 			if (bit == -1)
1905 				return 0;
1906 
1907 			nbits = xfs_contig_bits(buf_f->blf_data_map,
1908 						buf_f->blf_map_size, bit);
1909 			ASSERT(nbits > 0);
1910 			reg_buf_offset = bit << XFS_BLF_SHIFT;
1911 			reg_buf_bytes = nbits << XFS_BLF_SHIFT;
1912 			item_index++;
1913 		}
1914 
1915 		/*
1916 		 * If the current logged region starts after the current
1917 		 * di_next_unlinked field, then move on to the next
1918 		 * di_next_unlinked field.
1919 		 */
1920 		if (next_unlinked_offset < reg_buf_offset)
1921 			continue;
1922 
1923 		ASSERT(item->ri_buf[item_index].i_addr != NULL);
1924 		ASSERT((item->ri_buf[item_index].i_len % XFS_BLF_CHUNK) == 0);
1925 		ASSERT((reg_buf_offset + reg_buf_bytes) <=
1926 							BBTOB(bp->b_io_length));
1927 
1928 		/*
1929 		 * The current logged region contains a copy of the
1930 		 * current di_next_unlinked field.  Extract its value
1931 		 * and copy it to the buffer copy.
1932 		 */
1933 		logged_nextp = item->ri_buf[item_index].i_addr +
1934 				next_unlinked_offset - reg_buf_offset;
1935 		if (unlikely(*logged_nextp == 0)) {
1936 			xfs_alert(mp,
1937 		"Bad inode buffer log record (ptr = 0x%p, bp = 0x%p). "
1938 		"Trying to replay bad (0) inode di_next_unlinked field.",
1939 				item, bp);
1940 			XFS_ERROR_REPORT("xlog_recover_do_inode_buf",
1941 					 XFS_ERRLEVEL_LOW, mp);
1942 			return XFS_ERROR(EFSCORRUPTED);
1943 		}
1944 
1945 		buffer_nextp = (xfs_agino_t *)xfs_buf_offset(bp,
1946 					      next_unlinked_offset);
1947 		*buffer_nextp = *logged_nextp;
1948 
1949 		/*
1950 		 * If necessary, recalculate the CRC in the on-disk inode. We
1951 		 * have to leave the inode in a consistent state for whoever
1952 		 * reads it next....
1953 		 */
1954 		xfs_dinode_calc_crc(mp, (struct xfs_dinode *)
1955 				xfs_buf_offset(bp, i * mp->m_sb.sb_inodesize));
1956 
1957 	}
1958 
1959 	return 0;
1960 }
1961 
1962 /*
1963  * V5 filesystems know the age of the buffer on disk being recovered. We can
1964  * have newer objects on disk than we are replaying, and so for these cases we
1965  * don't want to replay the current change as that will make the buffer contents
1966  * temporarily invalid on disk.
1967  *
1968  * The magic number might not match the buffer type we are going to recover
1969  * (e.g. reallocated blocks), so we ignore the xfs_buf_log_format flags.  Hence
1970  * extract the LSN of the existing object in the buffer based on it's current
1971  * magic number.  If we don't recognise the magic number in the buffer, then
1972  * return a LSN of -1 so that the caller knows it was an unrecognised block and
1973  * so can recover the buffer.
1974  *
1975  * Note: we cannot rely solely on magic number matches to determine that the
1976  * buffer has a valid LSN - we also need to verify that it belongs to this
1977  * filesystem, so we need to extract the object's LSN and compare it to that
1978  * which we read from the superblock. If the UUIDs don't match, then we've got a
1979  * stale metadata block from an old filesystem instance that we need to recover
1980  * over the top of.
1981  */
1982 static xfs_lsn_t
1983 xlog_recover_get_buf_lsn(
1984 	struct xfs_mount	*mp,
1985 	struct xfs_buf		*bp)
1986 {
1987 	__uint32_t		magic32;
1988 	__uint16_t		magic16;
1989 	__uint16_t		magicda;
1990 	void			*blk = bp->b_addr;
1991 	uuid_t			*uuid;
1992 	xfs_lsn_t		lsn = -1;
1993 
1994 	/* v4 filesystems always recover immediately */
1995 	if (!xfs_sb_version_hascrc(&mp->m_sb))
1996 		goto recover_immediately;
1997 
1998 	magic32 = be32_to_cpu(*(__be32 *)blk);
1999 	switch (magic32) {
2000 	case XFS_ABTB_CRC_MAGIC:
2001 	case XFS_ABTC_CRC_MAGIC:
2002 	case XFS_ABTB_MAGIC:
2003 	case XFS_ABTC_MAGIC:
2004 	case XFS_IBT_CRC_MAGIC:
2005 	case XFS_IBT_MAGIC: {
2006 		struct xfs_btree_block *btb = blk;
2007 
2008 		lsn = be64_to_cpu(btb->bb_u.s.bb_lsn);
2009 		uuid = &btb->bb_u.s.bb_uuid;
2010 		break;
2011 	}
2012 	case XFS_BMAP_CRC_MAGIC:
2013 	case XFS_BMAP_MAGIC: {
2014 		struct xfs_btree_block *btb = blk;
2015 
2016 		lsn = be64_to_cpu(btb->bb_u.l.bb_lsn);
2017 		uuid = &btb->bb_u.l.bb_uuid;
2018 		break;
2019 	}
2020 	case XFS_AGF_MAGIC:
2021 		lsn = be64_to_cpu(((struct xfs_agf *)blk)->agf_lsn);
2022 		uuid = &((struct xfs_agf *)blk)->agf_uuid;
2023 		break;
2024 	case XFS_AGFL_MAGIC:
2025 		lsn = be64_to_cpu(((struct xfs_agfl *)blk)->agfl_lsn);
2026 		uuid = &((struct xfs_agfl *)blk)->agfl_uuid;
2027 		break;
2028 	case XFS_AGI_MAGIC:
2029 		lsn = be64_to_cpu(((struct xfs_agi *)blk)->agi_lsn);
2030 		uuid = &((struct xfs_agi *)blk)->agi_uuid;
2031 		break;
2032 	case XFS_SYMLINK_MAGIC:
2033 		lsn = be64_to_cpu(((struct xfs_dsymlink_hdr *)blk)->sl_lsn);
2034 		uuid = &((struct xfs_dsymlink_hdr *)blk)->sl_uuid;
2035 		break;
2036 	case XFS_DIR3_BLOCK_MAGIC:
2037 	case XFS_DIR3_DATA_MAGIC:
2038 	case XFS_DIR3_FREE_MAGIC:
2039 		lsn = be64_to_cpu(((struct xfs_dir3_blk_hdr *)blk)->lsn);
2040 		uuid = &((struct xfs_dir3_blk_hdr *)blk)->uuid;
2041 		break;
2042 	case XFS_ATTR3_RMT_MAGIC:
2043 		lsn = be64_to_cpu(((struct xfs_attr3_rmt_hdr *)blk)->rm_lsn);
2044 		uuid = &((struct xfs_attr3_rmt_hdr *)blk)->rm_uuid;
2045 		break;
2046 	case XFS_SB_MAGIC:
2047 		lsn = be64_to_cpu(((struct xfs_dsb *)blk)->sb_lsn);
2048 		uuid = &((struct xfs_dsb *)blk)->sb_uuid;
2049 		break;
2050 	default:
2051 		break;
2052 	}
2053 
2054 	if (lsn != (xfs_lsn_t)-1) {
2055 		if (!uuid_equal(&mp->m_sb.sb_uuid, uuid))
2056 			goto recover_immediately;
2057 		return lsn;
2058 	}
2059 
2060 	magicda = be16_to_cpu(((struct xfs_da_blkinfo *)blk)->magic);
2061 	switch (magicda) {
2062 	case XFS_DIR3_LEAF1_MAGIC:
2063 	case XFS_DIR3_LEAFN_MAGIC:
2064 	case XFS_DA3_NODE_MAGIC:
2065 		lsn = be64_to_cpu(((struct xfs_da3_blkinfo *)blk)->lsn);
2066 		uuid = &((struct xfs_da3_blkinfo *)blk)->uuid;
2067 		break;
2068 	default:
2069 		break;
2070 	}
2071 
2072 	if (lsn != (xfs_lsn_t)-1) {
2073 		if (!uuid_equal(&mp->m_sb.sb_uuid, uuid))
2074 			goto recover_immediately;
2075 		return lsn;
2076 	}
2077 
2078 	/*
2079 	 * We do individual object checks on dquot and inode buffers as they
2080 	 * have their own individual LSN records. Also, we could have a stale
2081 	 * buffer here, so we have to at least recognise these buffer types.
2082 	 *
2083 	 * A notd complexity here is inode unlinked list processing - it logs
2084 	 * the inode directly in the buffer, but we don't know which inodes have
2085 	 * been modified, and there is no global buffer LSN. Hence we need to
2086 	 * recover all inode buffer types immediately. This problem will be
2087 	 * fixed by logical logging of the unlinked list modifications.
2088 	 */
2089 	magic16 = be16_to_cpu(*(__be16 *)blk);
2090 	switch (magic16) {
2091 	case XFS_DQUOT_MAGIC:
2092 	case XFS_DINODE_MAGIC:
2093 		goto recover_immediately;
2094 	default:
2095 		break;
2096 	}
2097 
2098 	/* unknown buffer contents, recover immediately */
2099 
2100 recover_immediately:
2101 	return (xfs_lsn_t)-1;
2102 
2103 }
2104 
2105 /*
2106  * Validate the recovered buffer is of the correct type and attach the
2107  * appropriate buffer operations to them for writeback. Magic numbers are in a
2108  * few places:
2109  *	the first 16 bits of the buffer (inode buffer, dquot buffer),
2110  *	the first 32 bits of the buffer (most blocks),
2111  *	inside a struct xfs_da_blkinfo at the start of the buffer.
2112  */
2113 static void
2114 xlog_recover_validate_buf_type(
2115 	struct xfs_mount	*mp,
2116 	struct xfs_buf		*bp,
2117 	xfs_buf_log_format_t	*buf_f)
2118 {
2119 	struct xfs_da_blkinfo	*info = bp->b_addr;
2120 	__uint32_t		magic32;
2121 	__uint16_t		magic16;
2122 	__uint16_t		magicda;
2123 
2124 	magic32 = be32_to_cpu(*(__be32 *)bp->b_addr);
2125 	magic16 = be16_to_cpu(*(__be16*)bp->b_addr);
2126 	magicda = be16_to_cpu(info->magic);
2127 	switch (xfs_blft_from_flags(buf_f)) {
2128 	case XFS_BLFT_BTREE_BUF:
2129 		switch (magic32) {
2130 		case XFS_ABTB_CRC_MAGIC:
2131 		case XFS_ABTC_CRC_MAGIC:
2132 		case XFS_ABTB_MAGIC:
2133 		case XFS_ABTC_MAGIC:
2134 			bp->b_ops = &xfs_allocbt_buf_ops;
2135 			break;
2136 		case XFS_IBT_CRC_MAGIC:
2137 		case XFS_IBT_MAGIC:
2138 			bp->b_ops = &xfs_inobt_buf_ops;
2139 			break;
2140 		case XFS_BMAP_CRC_MAGIC:
2141 		case XFS_BMAP_MAGIC:
2142 			bp->b_ops = &xfs_bmbt_buf_ops;
2143 			break;
2144 		default:
2145 			xfs_warn(mp, "Bad btree block magic!");
2146 			ASSERT(0);
2147 			break;
2148 		}
2149 		break;
2150 	case XFS_BLFT_AGF_BUF:
2151 		if (magic32 != XFS_AGF_MAGIC) {
2152 			xfs_warn(mp, "Bad AGF block magic!");
2153 			ASSERT(0);
2154 			break;
2155 		}
2156 		bp->b_ops = &xfs_agf_buf_ops;
2157 		break;
2158 	case XFS_BLFT_AGFL_BUF:
2159 		if (!xfs_sb_version_hascrc(&mp->m_sb))
2160 			break;
2161 		if (magic32 != XFS_AGFL_MAGIC) {
2162 			xfs_warn(mp, "Bad AGFL block magic!");
2163 			ASSERT(0);
2164 			break;
2165 		}
2166 		bp->b_ops = &xfs_agfl_buf_ops;
2167 		break;
2168 	case XFS_BLFT_AGI_BUF:
2169 		if (magic32 != XFS_AGI_MAGIC) {
2170 			xfs_warn(mp, "Bad AGI block magic!");
2171 			ASSERT(0);
2172 			break;
2173 		}
2174 		bp->b_ops = &xfs_agi_buf_ops;
2175 		break;
2176 	case XFS_BLFT_UDQUOT_BUF:
2177 	case XFS_BLFT_PDQUOT_BUF:
2178 	case XFS_BLFT_GDQUOT_BUF:
2179 #ifdef CONFIG_XFS_QUOTA
2180 		if (magic16 != XFS_DQUOT_MAGIC) {
2181 			xfs_warn(mp, "Bad DQUOT block magic!");
2182 			ASSERT(0);
2183 			break;
2184 		}
2185 		bp->b_ops = &xfs_dquot_buf_ops;
2186 #else
2187 		xfs_alert(mp,
2188 	"Trying to recover dquots without QUOTA support built in!");
2189 		ASSERT(0);
2190 #endif
2191 		break;
2192 	case XFS_BLFT_DINO_BUF:
2193 		/*
2194 		 * we get here with inode allocation buffers, not buffers that
2195 		 * track unlinked list changes.
2196 		 */
2197 		if (magic16 != XFS_DINODE_MAGIC) {
2198 			xfs_warn(mp, "Bad INODE block magic!");
2199 			ASSERT(0);
2200 			break;
2201 		}
2202 		bp->b_ops = &xfs_inode_buf_ops;
2203 		break;
2204 	case XFS_BLFT_SYMLINK_BUF:
2205 		if (magic32 != XFS_SYMLINK_MAGIC) {
2206 			xfs_warn(mp, "Bad symlink block magic!");
2207 			ASSERT(0);
2208 			break;
2209 		}
2210 		bp->b_ops = &xfs_symlink_buf_ops;
2211 		break;
2212 	case XFS_BLFT_DIR_BLOCK_BUF:
2213 		if (magic32 != XFS_DIR2_BLOCK_MAGIC &&
2214 		    magic32 != XFS_DIR3_BLOCK_MAGIC) {
2215 			xfs_warn(mp, "Bad dir block magic!");
2216 			ASSERT(0);
2217 			break;
2218 		}
2219 		bp->b_ops = &xfs_dir3_block_buf_ops;
2220 		break;
2221 	case XFS_BLFT_DIR_DATA_BUF:
2222 		if (magic32 != XFS_DIR2_DATA_MAGIC &&
2223 		    magic32 != XFS_DIR3_DATA_MAGIC) {
2224 			xfs_warn(mp, "Bad dir data magic!");
2225 			ASSERT(0);
2226 			break;
2227 		}
2228 		bp->b_ops = &xfs_dir3_data_buf_ops;
2229 		break;
2230 	case XFS_BLFT_DIR_FREE_BUF:
2231 		if (magic32 != XFS_DIR2_FREE_MAGIC &&
2232 		    magic32 != XFS_DIR3_FREE_MAGIC) {
2233 			xfs_warn(mp, "Bad dir3 free magic!");
2234 			ASSERT(0);
2235 			break;
2236 		}
2237 		bp->b_ops = &xfs_dir3_free_buf_ops;
2238 		break;
2239 	case XFS_BLFT_DIR_LEAF1_BUF:
2240 		if (magicda != XFS_DIR2_LEAF1_MAGIC &&
2241 		    magicda != XFS_DIR3_LEAF1_MAGIC) {
2242 			xfs_warn(mp, "Bad dir leaf1 magic!");
2243 			ASSERT(0);
2244 			break;
2245 		}
2246 		bp->b_ops = &xfs_dir3_leaf1_buf_ops;
2247 		break;
2248 	case XFS_BLFT_DIR_LEAFN_BUF:
2249 		if (magicda != XFS_DIR2_LEAFN_MAGIC &&
2250 		    magicda != XFS_DIR3_LEAFN_MAGIC) {
2251 			xfs_warn(mp, "Bad dir leafn magic!");
2252 			ASSERT(0);
2253 			break;
2254 		}
2255 		bp->b_ops = &xfs_dir3_leafn_buf_ops;
2256 		break;
2257 	case XFS_BLFT_DA_NODE_BUF:
2258 		if (magicda != XFS_DA_NODE_MAGIC &&
2259 		    magicda != XFS_DA3_NODE_MAGIC) {
2260 			xfs_warn(mp, "Bad da node magic!");
2261 			ASSERT(0);
2262 			break;
2263 		}
2264 		bp->b_ops = &xfs_da3_node_buf_ops;
2265 		break;
2266 	case XFS_BLFT_ATTR_LEAF_BUF:
2267 		if (magicda != XFS_ATTR_LEAF_MAGIC &&
2268 		    magicda != XFS_ATTR3_LEAF_MAGIC) {
2269 			xfs_warn(mp, "Bad attr leaf magic!");
2270 			ASSERT(0);
2271 			break;
2272 		}
2273 		bp->b_ops = &xfs_attr3_leaf_buf_ops;
2274 		break;
2275 	case XFS_BLFT_ATTR_RMT_BUF:
2276 		if (!xfs_sb_version_hascrc(&mp->m_sb))
2277 			break;
2278 		if (magic32 != XFS_ATTR3_RMT_MAGIC) {
2279 			xfs_warn(mp, "Bad attr remote magic!");
2280 			ASSERT(0);
2281 			break;
2282 		}
2283 		bp->b_ops = &xfs_attr3_rmt_buf_ops;
2284 		break;
2285 	case XFS_BLFT_SB_BUF:
2286 		if (magic32 != XFS_SB_MAGIC) {
2287 			xfs_warn(mp, "Bad SB block magic!");
2288 			ASSERT(0);
2289 			break;
2290 		}
2291 		bp->b_ops = &xfs_sb_buf_ops;
2292 		break;
2293 	default:
2294 		xfs_warn(mp, "Unknown buffer type %d!",
2295 			 xfs_blft_from_flags(buf_f));
2296 		break;
2297 	}
2298 }
2299 
2300 /*
2301  * Perform a 'normal' buffer recovery.  Each logged region of the
2302  * buffer should be copied over the corresponding region in the
2303  * given buffer.  The bitmap in the buf log format structure indicates
2304  * where to place the logged data.
2305  */
2306 STATIC void
2307 xlog_recover_do_reg_buffer(
2308 	struct xfs_mount	*mp,
2309 	xlog_recover_item_t	*item,
2310 	struct xfs_buf		*bp,
2311 	xfs_buf_log_format_t	*buf_f)
2312 {
2313 	int			i;
2314 	int			bit;
2315 	int			nbits;
2316 	int                     error;
2317 
2318 	trace_xfs_log_recover_buf_reg_buf(mp->m_log, buf_f);
2319 
2320 	bit = 0;
2321 	i = 1;  /* 0 is the buf format structure */
2322 	while (1) {
2323 		bit = xfs_next_bit(buf_f->blf_data_map,
2324 				   buf_f->blf_map_size, bit);
2325 		if (bit == -1)
2326 			break;
2327 		nbits = xfs_contig_bits(buf_f->blf_data_map,
2328 					buf_f->blf_map_size, bit);
2329 		ASSERT(nbits > 0);
2330 		ASSERT(item->ri_buf[i].i_addr != NULL);
2331 		ASSERT(item->ri_buf[i].i_len % XFS_BLF_CHUNK == 0);
2332 		ASSERT(BBTOB(bp->b_io_length) >=
2333 		       ((uint)bit << XFS_BLF_SHIFT) + (nbits << XFS_BLF_SHIFT));
2334 
2335 		/*
2336 		 * The dirty regions logged in the buffer, even though
2337 		 * contiguous, may span multiple chunks. This is because the
2338 		 * dirty region may span a physical page boundary in a buffer
2339 		 * and hence be split into two separate vectors for writing into
2340 		 * the log. Hence we need to trim nbits back to the length of
2341 		 * the current region being copied out of the log.
2342 		 */
2343 		if (item->ri_buf[i].i_len < (nbits << XFS_BLF_SHIFT))
2344 			nbits = item->ri_buf[i].i_len >> XFS_BLF_SHIFT;
2345 
2346 		/*
2347 		 * Do a sanity check if this is a dquot buffer. Just checking
2348 		 * the first dquot in the buffer should do. XXXThis is
2349 		 * probably a good thing to do for other buf types also.
2350 		 */
2351 		error = 0;
2352 		if (buf_f->blf_flags &
2353 		   (XFS_BLF_UDQUOT_BUF|XFS_BLF_PDQUOT_BUF|XFS_BLF_GDQUOT_BUF)) {
2354 			if (item->ri_buf[i].i_addr == NULL) {
2355 				xfs_alert(mp,
2356 					"XFS: NULL dquot in %s.", __func__);
2357 				goto next;
2358 			}
2359 			if (item->ri_buf[i].i_len < sizeof(xfs_disk_dquot_t)) {
2360 				xfs_alert(mp,
2361 					"XFS: dquot too small (%d) in %s.",
2362 					item->ri_buf[i].i_len, __func__);
2363 				goto next;
2364 			}
2365 			error = xfs_qm_dqcheck(mp, item->ri_buf[i].i_addr,
2366 					       -1, 0, XFS_QMOPT_DOWARN,
2367 					       "dquot_buf_recover");
2368 			if (error)
2369 				goto next;
2370 		}
2371 
2372 		memcpy(xfs_buf_offset(bp,
2373 			(uint)bit << XFS_BLF_SHIFT),	/* dest */
2374 			item->ri_buf[i].i_addr,		/* source */
2375 			nbits<<XFS_BLF_SHIFT);		/* length */
2376  next:
2377 		i++;
2378 		bit += nbits;
2379 	}
2380 
2381 	/* Shouldn't be any more regions */
2382 	ASSERT(i == item->ri_total);
2383 
2384 	/*
2385 	 * We can only do post recovery validation on items on CRC enabled
2386 	 * fielsystems as we need to know when the buffer was written to be able
2387 	 * to determine if we should have replayed the item. If we replay old
2388 	 * metadata over a newer buffer, then it will enter a temporarily
2389 	 * inconsistent state resulting in verification failures. Hence for now
2390 	 * just avoid the verification stage for non-crc filesystems
2391 	 */
2392 	if (xfs_sb_version_hascrc(&mp->m_sb))
2393 		xlog_recover_validate_buf_type(mp, bp, buf_f);
2394 }
2395 
2396 /*
2397  * Do some primitive error checking on ondisk dquot data structures.
2398  */
2399 int
2400 xfs_qm_dqcheck(
2401 	struct xfs_mount *mp,
2402 	xfs_disk_dquot_t *ddq,
2403 	xfs_dqid_t	 id,
2404 	uint		 type,	  /* used only when IO_dorepair is true */
2405 	uint		 flags,
2406 	char		 *str)
2407 {
2408 	xfs_dqblk_t	 *d = (xfs_dqblk_t *)ddq;
2409 	int		errs = 0;
2410 
2411 	/*
2412 	 * We can encounter an uninitialized dquot buffer for 2 reasons:
2413 	 * 1. If we crash while deleting the quotainode(s), and those blks got
2414 	 *    used for user data. This is because we take the path of regular
2415 	 *    file deletion; however, the size field of quotainodes is never
2416 	 *    updated, so all the tricks that we play in itruncate_finish
2417 	 *    don't quite matter.
2418 	 *
2419 	 * 2. We don't play the quota buffers when there's a quotaoff logitem.
2420 	 *    But the allocation will be replayed so we'll end up with an
2421 	 *    uninitialized quota block.
2422 	 *
2423 	 * This is all fine; things are still consistent, and we haven't lost
2424 	 * any quota information. Just don't complain about bad dquot blks.
2425 	 */
2426 	if (ddq->d_magic != cpu_to_be16(XFS_DQUOT_MAGIC)) {
2427 		if (flags & XFS_QMOPT_DOWARN)
2428 			xfs_alert(mp,
2429 			"%s : XFS dquot ID 0x%x, magic 0x%x != 0x%x",
2430 			str, id, be16_to_cpu(ddq->d_magic), XFS_DQUOT_MAGIC);
2431 		errs++;
2432 	}
2433 	if (ddq->d_version != XFS_DQUOT_VERSION) {
2434 		if (flags & XFS_QMOPT_DOWARN)
2435 			xfs_alert(mp,
2436 			"%s : XFS dquot ID 0x%x, version 0x%x != 0x%x",
2437 			str, id, ddq->d_version, XFS_DQUOT_VERSION);
2438 		errs++;
2439 	}
2440 
2441 	if (ddq->d_flags != XFS_DQ_USER &&
2442 	    ddq->d_flags != XFS_DQ_PROJ &&
2443 	    ddq->d_flags != XFS_DQ_GROUP) {
2444 		if (flags & XFS_QMOPT_DOWARN)
2445 			xfs_alert(mp,
2446 			"%s : XFS dquot ID 0x%x, unknown flags 0x%x",
2447 			str, id, ddq->d_flags);
2448 		errs++;
2449 	}
2450 
2451 	if (id != -1 && id != be32_to_cpu(ddq->d_id)) {
2452 		if (flags & XFS_QMOPT_DOWARN)
2453 			xfs_alert(mp,
2454 			"%s : ondisk-dquot 0x%p, ID mismatch: "
2455 			"0x%x expected, found id 0x%x",
2456 			str, ddq, id, be32_to_cpu(ddq->d_id));
2457 		errs++;
2458 	}
2459 
2460 	if (!errs && ddq->d_id) {
2461 		if (ddq->d_blk_softlimit &&
2462 		    be64_to_cpu(ddq->d_bcount) >
2463 				be64_to_cpu(ddq->d_blk_softlimit)) {
2464 			if (!ddq->d_btimer) {
2465 				if (flags & XFS_QMOPT_DOWARN)
2466 					xfs_alert(mp,
2467 			"%s : Dquot ID 0x%x (0x%p) BLK TIMER NOT STARTED",
2468 					str, (int)be32_to_cpu(ddq->d_id), ddq);
2469 				errs++;
2470 			}
2471 		}
2472 		if (ddq->d_ino_softlimit &&
2473 		    be64_to_cpu(ddq->d_icount) >
2474 				be64_to_cpu(ddq->d_ino_softlimit)) {
2475 			if (!ddq->d_itimer) {
2476 				if (flags & XFS_QMOPT_DOWARN)
2477 					xfs_alert(mp,
2478 			"%s : Dquot ID 0x%x (0x%p) INODE TIMER NOT STARTED",
2479 					str, (int)be32_to_cpu(ddq->d_id), ddq);
2480 				errs++;
2481 			}
2482 		}
2483 		if (ddq->d_rtb_softlimit &&
2484 		    be64_to_cpu(ddq->d_rtbcount) >
2485 				be64_to_cpu(ddq->d_rtb_softlimit)) {
2486 			if (!ddq->d_rtbtimer) {
2487 				if (flags & XFS_QMOPT_DOWARN)
2488 					xfs_alert(mp,
2489 			"%s : Dquot ID 0x%x (0x%p) RTBLK TIMER NOT STARTED",
2490 					str, (int)be32_to_cpu(ddq->d_id), ddq);
2491 				errs++;
2492 			}
2493 		}
2494 	}
2495 
2496 	if (!errs || !(flags & XFS_QMOPT_DQREPAIR))
2497 		return errs;
2498 
2499 	if (flags & XFS_QMOPT_DOWARN)
2500 		xfs_notice(mp, "Re-initializing dquot ID 0x%x", id);
2501 
2502 	/*
2503 	 * Typically, a repair is only requested by quotacheck.
2504 	 */
2505 	ASSERT(id != -1);
2506 	ASSERT(flags & XFS_QMOPT_DQREPAIR);
2507 	memset(d, 0, sizeof(xfs_dqblk_t));
2508 
2509 	d->dd_diskdq.d_magic = cpu_to_be16(XFS_DQUOT_MAGIC);
2510 	d->dd_diskdq.d_version = XFS_DQUOT_VERSION;
2511 	d->dd_diskdq.d_flags = type;
2512 	d->dd_diskdq.d_id = cpu_to_be32(id);
2513 
2514 	if (xfs_sb_version_hascrc(&mp->m_sb)) {
2515 		uuid_copy(&d->dd_uuid, &mp->m_sb.sb_uuid);
2516 		xfs_update_cksum((char *)d, sizeof(struct xfs_dqblk),
2517 				 XFS_DQUOT_CRC_OFF);
2518 	}
2519 
2520 	return errs;
2521 }
2522 
2523 /*
2524  * Perform a dquot buffer recovery.
2525  * Simple algorithm: if we have found a QUOTAOFF log item of the same type
2526  * (ie. USR or GRP), then just toss this buffer away; don't recover it.
2527  * Else, treat it as a regular buffer and do recovery.
2528  */
2529 STATIC void
2530 xlog_recover_do_dquot_buffer(
2531 	struct xfs_mount		*mp,
2532 	struct xlog			*log,
2533 	struct xlog_recover_item	*item,
2534 	struct xfs_buf			*bp,
2535 	struct xfs_buf_log_format	*buf_f)
2536 {
2537 	uint			type;
2538 
2539 	trace_xfs_log_recover_buf_dquot_buf(log, buf_f);
2540 
2541 	/*
2542 	 * Filesystems are required to send in quota flags at mount time.
2543 	 */
2544 	if (mp->m_qflags == 0) {
2545 		return;
2546 	}
2547 
2548 	type = 0;
2549 	if (buf_f->blf_flags & XFS_BLF_UDQUOT_BUF)
2550 		type |= XFS_DQ_USER;
2551 	if (buf_f->blf_flags & XFS_BLF_PDQUOT_BUF)
2552 		type |= XFS_DQ_PROJ;
2553 	if (buf_f->blf_flags & XFS_BLF_GDQUOT_BUF)
2554 		type |= XFS_DQ_GROUP;
2555 	/*
2556 	 * This type of quotas was turned off, so ignore this buffer
2557 	 */
2558 	if (log->l_quotaoffs_flag & type)
2559 		return;
2560 
2561 	xlog_recover_do_reg_buffer(mp, item, bp, buf_f);
2562 }
2563 
2564 /*
2565  * This routine replays a modification made to a buffer at runtime.
2566  * There are actually two types of buffer, regular and inode, which
2567  * are handled differently.  Inode buffers are handled differently
2568  * in that we only recover a specific set of data from them, namely
2569  * the inode di_next_unlinked fields.  This is because all other inode
2570  * data is actually logged via inode records and any data we replay
2571  * here which overlaps that may be stale.
2572  *
2573  * When meta-data buffers are freed at run time we log a buffer item
2574  * with the XFS_BLF_CANCEL bit set to indicate that previous copies
2575  * of the buffer in the log should not be replayed at recovery time.
2576  * This is so that if the blocks covered by the buffer are reused for
2577  * file data before we crash we don't end up replaying old, freed
2578  * meta-data into a user's file.
2579  *
2580  * To handle the cancellation of buffer log items, we make two passes
2581  * over the log during recovery.  During the first we build a table of
2582  * those buffers which have been cancelled, and during the second we
2583  * only replay those buffers which do not have corresponding cancel
2584  * records in the table.  See xlog_recover_buffer_pass[1,2] above
2585  * for more details on the implementation of the table of cancel records.
2586  */
2587 STATIC int
2588 xlog_recover_buffer_pass2(
2589 	struct xlog			*log,
2590 	struct list_head		*buffer_list,
2591 	struct xlog_recover_item	*item,
2592 	xfs_lsn_t			current_lsn)
2593 {
2594 	xfs_buf_log_format_t	*buf_f = item->ri_buf[0].i_addr;
2595 	xfs_mount_t		*mp = log->l_mp;
2596 	xfs_buf_t		*bp;
2597 	int			error;
2598 	uint			buf_flags;
2599 	xfs_lsn_t		lsn;
2600 
2601 	/*
2602 	 * In this pass we only want to recover all the buffers which have
2603 	 * not been cancelled and are not cancellation buffers themselves.
2604 	 */
2605 	if (xlog_check_buffer_cancelled(log, buf_f->blf_blkno,
2606 			buf_f->blf_len, buf_f->blf_flags)) {
2607 		trace_xfs_log_recover_buf_cancel(log, buf_f);
2608 		return 0;
2609 	}
2610 
2611 	trace_xfs_log_recover_buf_recover(log, buf_f);
2612 
2613 	buf_flags = 0;
2614 	if (buf_f->blf_flags & XFS_BLF_INODE_BUF)
2615 		buf_flags |= XBF_UNMAPPED;
2616 
2617 	bp = xfs_buf_read(mp->m_ddev_targp, buf_f->blf_blkno, buf_f->blf_len,
2618 			  buf_flags, NULL);
2619 	if (!bp)
2620 		return XFS_ERROR(ENOMEM);
2621 	error = bp->b_error;
2622 	if (error) {
2623 		xfs_buf_ioerror_alert(bp, "xlog_recover_do..(read#1)");
2624 		goto out_release;
2625 	}
2626 
2627 	/*
2628 	 * recover the buffer only if we get an LSN from it and it's less than
2629 	 * the lsn of the transaction we are replaying.
2630 	 */
2631 	lsn = xlog_recover_get_buf_lsn(mp, bp);
2632 	if (lsn && lsn != -1 && XFS_LSN_CMP(lsn, current_lsn) >= 0)
2633 		goto out_release;
2634 
2635 	if (buf_f->blf_flags & XFS_BLF_INODE_BUF) {
2636 		error = xlog_recover_do_inode_buffer(mp, item, bp, buf_f);
2637 	} else if (buf_f->blf_flags &
2638 		  (XFS_BLF_UDQUOT_BUF|XFS_BLF_PDQUOT_BUF|XFS_BLF_GDQUOT_BUF)) {
2639 		xlog_recover_do_dquot_buffer(mp, log, item, bp, buf_f);
2640 	} else {
2641 		xlog_recover_do_reg_buffer(mp, item, bp, buf_f);
2642 	}
2643 	if (error)
2644 		goto out_release;
2645 
2646 	/*
2647 	 * Perform delayed write on the buffer.  Asynchronous writes will be
2648 	 * slower when taking into account all the buffers to be flushed.
2649 	 *
2650 	 * Also make sure that only inode buffers with good sizes stay in
2651 	 * the buffer cache.  The kernel moves inodes in buffers of 1 block
2652 	 * or XFS_INODE_CLUSTER_SIZE bytes, whichever is bigger.  The inode
2653 	 * buffers in the log can be a different size if the log was generated
2654 	 * by an older kernel using unclustered inode buffers or a newer kernel
2655 	 * running with a different inode cluster size.  Regardless, if the
2656 	 * the inode buffer size isn't MAX(blocksize, XFS_INODE_CLUSTER_SIZE)
2657 	 * for *our* value of XFS_INODE_CLUSTER_SIZE, then we need to keep
2658 	 * the buffer out of the buffer cache so that the buffer won't
2659 	 * overlap with future reads of those inodes.
2660 	 */
2661 	if (XFS_DINODE_MAGIC ==
2662 	    be16_to_cpu(*((__be16 *)xfs_buf_offset(bp, 0))) &&
2663 	    (BBTOB(bp->b_io_length) != MAX(log->l_mp->m_sb.sb_blocksize,
2664 			(__uint32_t)XFS_INODE_CLUSTER_SIZE(log->l_mp)))) {
2665 		xfs_buf_stale(bp);
2666 		error = xfs_bwrite(bp);
2667 	} else {
2668 		ASSERT(bp->b_target->bt_mount == mp);
2669 		bp->b_iodone = xlog_recover_iodone;
2670 		xfs_buf_delwri_queue(bp, buffer_list);
2671 	}
2672 
2673 out_release:
2674 	xfs_buf_relse(bp);
2675 	return error;
2676 }
2677 
2678 /*
2679  * Inode fork owner changes
2680  *
2681  * If we have been told that we have to reparent the inode fork, it's because an
2682  * extent swap operation on a CRC enabled filesystem has been done and we are
2683  * replaying it. We need to walk the BMBT of the appropriate fork and change the
2684  * owners of it.
2685  *
2686  * The complexity here is that we don't have an inode context to work with, so
2687  * after we've replayed the inode we need to instantiate one.  This is where the
2688  * fun begins.
2689  *
2690  * We are in the middle of log recovery, so we can't run transactions. That
2691  * means we cannot use cache coherent inode instantiation via xfs_iget(), as
2692  * that will result in the corresponding iput() running the inode through
2693  * xfs_inactive(). If we've just replayed an inode core that changes the link
2694  * count to zero (i.e. it's been unlinked), then xfs_inactive() will run
2695  * transactions (bad!).
2696  *
2697  * So, to avoid this, we instantiate an inode directly from the inode core we've
2698  * just recovered. We have the buffer still locked, and all we really need to
2699  * instantiate is the inode core and the forks being modified. We can do this
2700  * manually, then run the inode btree owner change, and then tear down the
2701  * xfs_inode without having to run any transactions at all.
2702  *
2703  * Also, because we don't have a transaction context available here but need to
2704  * gather all the buffers we modify for writeback so we pass the buffer_list
2705  * instead for the operation to use.
2706  */
2707 
2708 STATIC int
2709 xfs_recover_inode_owner_change(
2710 	struct xfs_mount	*mp,
2711 	struct xfs_dinode	*dip,
2712 	struct xfs_inode_log_format *in_f,
2713 	struct list_head	*buffer_list)
2714 {
2715 	struct xfs_inode	*ip;
2716 	int			error;
2717 
2718 	ASSERT(in_f->ilf_fields & (XFS_ILOG_DOWNER|XFS_ILOG_AOWNER));
2719 
2720 	ip = xfs_inode_alloc(mp, in_f->ilf_ino);
2721 	if (!ip)
2722 		return ENOMEM;
2723 
2724 	/* instantiate the inode */
2725 	xfs_dinode_from_disk(&ip->i_d, dip);
2726 	ASSERT(ip->i_d.di_version >= 3);
2727 
2728 	error = xfs_iformat_fork(ip, dip);
2729 	if (error)
2730 		goto out_free_ip;
2731 
2732 
2733 	if (in_f->ilf_fields & XFS_ILOG_DOWNER) {
2734 		ASSERT(in_f->ilf_fields & XFS_ILOG_DBROOT);
2735 		error = xfs_bmbt_change_owner(NULL, ip, XFS_DATA_FORK,
2736 					      ip->i_ino, buffer_list);
2737 		if (error)
2738 			goto out_free_ip;
2739 	}
2740 
2741 	if (in_f->ilf_fields & XFS_ILOG_AOWNER) {
2742 		ASSERT(in_f->ilf_fields & XFS_ILOG_ABROOT);
2743 		error = xfs_bmbt_change_owner(NULL, ip, XFS_ATTR_FORK,
2744 					      ip->i_ino, buffer_list);
2745 		if (error)
2746 			goto out_free_ip;
2747 	}
2748 
2749 out_free_ip:
2750 	xfs_inode_free(ip);
2751 	return error;
2752 }
2753 
2754 STATIC int
2755 xlog_recover_inode_pass2(
2756 	struct xlog			*log,
2757 	struct list_head		*buffer_list,
2758 	struct xlog_recover_item	*item,
2759 	xfs_lsn_t			current_lsn)
2760 {
2761 	xfs_inode_log_format_t	*in_f;
2762 	xfs_mount_t		*mp = log->l_mp;
2763 	xfs_buf_t		*bp;
2764 	xfs_dinode_t		*dip;
2765 	int			len;
2766 	xfs_caddr_t		src;
2767 	xfs_caddr_t		dest;
2768 	int			error;
2769 	int			attr_index;
2770 	uint			fields;
2771 	xfs_icdinode_t		*dicp;
2772 	uint			isize;
2773 	int			need_free = 0;
2774 
2775 	if (item->ri_buf[0].i_len == sizeof(xfs_inode_log_format_t)) {
2776 		in_f = item->ri_buf[0].i_addr;
2777 	} else {
2778 		in_f = kmem_alloc(sizeof(xfs_inode_log_format_t), KM_SLEEP);
2779 		need_free = 1;
2780 		error = xfs_inode_item_format_convert(&item->ri_buf[0], in_f);
2781 		if (error)
2782 			goto error;
2783 	}
2784 
2785 	/*
2786 	 * Inode buffers can be freed, look out for it,
2787 	 * and do not replay the inode.
2788 	 */
2789 	if (xlog_check_buffer_cancelled(log, in_f->ilf_blkno,
2790 					in_f->ilf_len, 0)) {
2791 		error = 0;
2792 		trace_xfs_log_recover_inode_cancel(log, in_f);
2793 		goto error;
2794 	}
2795 	trace_xfs_log_recover_inode_recover(log, in_f);
2796 
2797 	bp = xfs_buf_read(mp->m_ddev_targp, in_f->ilf_blkno, in_f->ilf_len, 0,
2798 			  &xfs_inode_buf_ops);
2799 	if (!bp) {
2800 		error = ENOMEM;
2801 		goto error;
2802 	}
2803 	error = bp->b_error;
2804 	if (error) {
2805 		xfs_buf_ioerror_alert(bp, "xlog_recover_do..(read#2)");
2806 		goto out_release;
2807 	}
2808 	ASSERT(in_f->ilf_fields & XFS_ILOG_CORE);
2809 	dip = (xfs_dinode_t *)xfs_buf_offset(bp, in_f->ilf_boffset);
2810 
2811 	/*
2812 	 * Make sure the place we're flushing out to really looks
2813 	 * like an inode!
2814 	 */
2815 	if (unlikely(dip->di_magic != cpu_to_be16(XFS_DINODE_MAGIC))) {
2816 		xfs_alert(mp,
2817 	"%s: Bad inode magic number, dip = 0x%p, dino bp = 0x%p, ino = %Ld",
2818 			__func__, dip, bp, in_f->ilf_ino);
2819 		XFS_ERROR_REPORT("xlog_recover_inode_pass2(1)",
2820 				 XFS_ERRLEVEL_LOW, mp);
2821 		error = EFSCORRUPTED;
2822 		goto out_release;
2823 	}
2824 	dicp = item->ri_buf[1].i_addr;
2825 	if (unlikely(dicp->di_magic != XFS_DINODE_MAGIC)) {
2826 		xfs_alert(mp,
2827 			"%s: Bad inode log record, rec ptr 0x%p, ino %Ld",
2828 			__func__, item, in_f->ilf_ino);
2829 		XFS_ERROR_REPORT("xlog_recover_inode_pass2(2)",
2830 				 XFS_ERRLEVEL_LOW, mp);
2831 		error = EFSCORRUPTED;
2832 		goto out_release;
2833 	}
2834 
2835 	/*
2836 	 * If the inode has an LSN in it, recover the inode only if it's less
2837 	 * than the lsn of the transaction we are replaying. Note: we still
2838 	 * need to replay an owner change even though the inode is more recent
2839 	 * than the transaction as there is no guarantee that all the btree
2840 	 * blocks are more recent than this transaction, too.
2841 	 */
2842 	if (dip->di_version >= 3) {
2843 		xfs_lsn_t	lsn = be64_to_cpu(dip->di_lsn);
2844 
2845 		if (lsn && lsn != -1 && XFS_LSN_CMP(lsn, current_lsn) >= 0) {
2846 			trace_xfs_log_recover_inode_skip(log, in_f);
2847 			error = 0;
2848 			goto out_owner_change;
2849 		}
2850 	}
2851 
2852 	/*
2853 	 * di_flushiter is only valid for v1/2 inodes. All changes for v3 inodes
2854 	 * are transactional and if ordering is necessary we can determine that
2855 	 * more accurately by the LSN field in the V3 inode core. Don't trust
2856 	 * the inode versions we might be changing them here - use the
2857 	 * superblock flag to determine whether we need to look at di_flushiter
2858 	 * to skip replay when the on disk inode is newer than the log one
2859 	 */
2860 	if (!xfs_sb_version_hascrc(&mp->m_sb) &&
2861 	    dicp->di_flushiter < be16_to_cpu(dip->di_flushiter)) {
2862 		/*
2863 		 * Deal with the wrap case, DI_MAX_FLUSH is less
2864 		 * than smaller numbers
2865 		 */
2866 		if (be16_to_cpu(dip->di_flushiter) == DI_MAX_FLUSH &&
2867 		    dicp->di_flushiter < (DI_MAX_FLUSH >> 1)) {
2868 			/* do nothing */
2869 		} else {
2870 			trace_xfs_log_recover_inode_skip(log, in_f);
2871 			error = 0;
2872 			goto out_release;
2873 		}
2874 	}
2875 
2876 	/* Take the opportunity to reset the flush iteration count */
2877 	dicp->di_flushiter = 0;
2878 
2879 	if (unlikely(S_ISREG(dicp->di_mode))) {
2880 		if ((dicp->di_format != XFS_DINODE_FMT_EXTENTS) &&
2881 		    (dicp->di_format != XFS_DINODE_FMT_BTREE)) {
2882 			XFS_CORRUPTION_ERROR("xlog_recover_inode_pass2(3)",
2883 					 XFS_ERRLEVEL_LOW, mp, dicp);
2884 			xfs_alert(mp,
2885 		"%s: Bad regular inode log record, rec ptr 0x%p, "
2886 		"ino ptr = 0x%p, ino bp = 0x%p, ino %Ld",
2887 				__func__, item, dip, bp, in_f->ilf_ino);
2888 			error = EFSCORRUPTED;
2889 			goto out_release;
2890 		}
2891 	} else if (unlikely(S_ISDIR(dicp->di_mode))) {
2892 		if ((dicp->di_format != XFS_DINODE_FMT_EXTENTS) &&
2893 		    (dicp->di_format != XFS_DINODE_FMT_BTREE) &&
2894 		    (dicp->di_format != XFS_DINODE_FMT_LOCAL)) {
2895 			XFS_CORRUPTION_ERROR("xlog_recover_inode_pass2(4)",
2896 					     XFS_ERRLEVEL_LOW, mp, dicp);
2897 			xfs_alert(mp,
2898 		"%s: Bad dir inode log record, rec ptr 0x%p, "
2899 		"ino ptr = 0x%p, ino bp = 0x%p, ino %Ld",
2900 				__func__, item, dip, bp, in_f->ilf_ino);
2901 			error = EFSCORRUPTED;
2902 			goto out_release;
2903 		}
2904 	}
2905 	if (unlikely(dicp->di_nextents + dicp->di_anextents > dicp->di_nblocks)){
2906 		XFS_CORRUPTION_ERROR("xlog_recover_inode_pass2(5)",
2907 				     XFS_ERRLEVEL_LOW, mp, dicp);
2908 		xfs_alert(mp,
2909 	"%s: Bad inode log record, rec ptr 0x%p, dino ptr 0x%p, "
2910 	"dino bp 0x%p, ino %Ld, total extents = %d, nblocks = %Ld",
2911 			__func__, item, dip, bp, in_f->ilf_ino,
2912 			dicp->di_nextents + dicp->di_anextents,
2913 			dicp->di_nblocks);
2914 		error = EFSCORRUPTED;
2915 		goto out_release;
2916 	}
2917 	if (unlikely(dicp->di_forkoff > mp->m_sb.sb_inodesize)) {
2918 		XFS_CORRUPTION_ERROR("xlog_recover_inode_pass2(6)",
2919 				     XFS_ERRLEVEL_LOW, mp, dicp);
2920 		xfs_alert(mp,
2921 	"%s: Bad inode log record, rec ptr 0x%p, dino ptr 0x%p, "
2922 	"dino bp 0x%p, ino %Ld, forkoff 0x%x", __func__,
2923 			item, dip, bp, in_f->ilf_ino, dicp->di_forkoff);
2924 		error = EFSCORRUPTED;
2925 		goto out_release;
2926 	}
2927 	isize = xfs_icdinode_size(dicp->di_version);
2928 	if (unlikely(item->ri_buf[1].i_len > isize)) {
2929 		XFS_CORRUPTION_ERROR("xlog_recover_inode_pass2(7)",
2930 				     XFS_ERRLEVEL_LOW, mp, dicp);
2931 		xfs_alert(mp,
2932 			"%s: Bad inode log record length %d, rec ptr 0x%p",
2933 			__func__, item->ri_buf[1].i_len, item);
2934 		error = EFSCORRUPTED;
2935 		goto out_release;
2936 	}
2937 
2938 	/* The core is in in-core format */
2939 	xfs_dinode_to_disk(dip, dicp);
2940 
2941 	/* the rest is in on-disk format */
2942 	if (item->ri_buf[1].i_len > isize) {
2943 		memcpy((char *)dip + isize,
2944 			item->ri_buf[1].i_addr + isize,
2945 			item->ri_buf[1].i_len - isize);
2946 	}
2947 
2948 	fields = in_f->ilf_fields;
2949 	switch (fields & (XFS_ILOG_DEV | XFS_ILOG_UUID)) {
2950 	case XFS_ILOG_DEV:
2951 		xfs_dinode_put_rdev(dip, in_f->ilf_u.ilfu_rdev);
2952 		break;
2953 	case XFS_ILOG_UUID:
2954 		memcpy(XFS_DFORK_DPTR(dip),
2955 		       &in_f->ilf_u.ilfu_uuid,
2956 		       sizeof(uuid_t));
2957 		break;
2958 	}
2959 
2960 	if (in_f->ilf_size == 2)
2961 		goto out_owner_change;
2962 	len = item->ri_buf[2].i_len;
2963 	src = item->ri_buf[2].i_addr;
2964 	ASSERT(in_f->ilf_size <= 4);
2965 	ASSERT((in_f->ilf_size == 3) || (fields & XFS_ILOG_AFORK));
2966 	ASSERT(!(fields & XFS_ILOG_DFORK) ||
2967 	       (len == in_f->ilf_dsize));
2968 
2969 	switch (fields & XFS_ILOG_DFORK) {
2970 	case XFS_ILOG_DDATA:
2971 	case XFS_ILOG_DEXT:
2972 		memcpy(XFS_DFORK_DPTR(dip), src, len);
2973 		break;
2974 
2975 	case XFS_ILOG_DBROOT:
2976 		xfs_bmbt_to_bmdr(mp, (struct xfs_btree_block *)src, len,
2977 				 (xfs_bmdr_block_t *)XFS_DFORK_DPTR(dip),
2978 				 XFS_DFORK_DSIZE(dip, mp));
2979 		break;
2980 
2981 	default:
2982 		/*
2983 		 * There are no data fork flags set.
2984 		 */
2985 		ASSERT((fields & XFS_ILOG_DFORK) == 0);
2986 		break;
2987 	}
2988 
2989 	/*
2990 	 * If we logged any attribute data, recover it.  There may or
2991 	 * may not have been any other non-core data logged in this
2992 	 * transaction.
2993 	 */
2994 	if (in_f->ilf_fields & XFS_ILOG_AFORK) {
2995 		if (in_f->ilf_fields & XFS_ILOG_DFORK) {
2996 			attr_index = 3;
2997 		} else {
2998 			attr_index = 2;
2999 		}
3000 		len = item->ri_buf[attr_index].i_len;
3001 		src = item->ri_buf[attr_index].i_addr;
3002 		ASSERT(len == in_f->ilf_asize);
3003 
3004 		switch (in_f->ilf_fields & XFS_ILOG_AFORK) {
3005 		case XFS_ILOG_ADATA:
3006 		case XFS_ILOG_AEXT:
3007 			dest = XFS_DFORK_APTR(dip);
3008 			ASSERT(len <= XFS_DFORK_ASIZE(dip, mp));
3009 			memcpy(dest, src, len);
3010 			break;
3011 
3012 		case XFS_ILOG_ABROOT:
3013 			dest = XFS_DFORK_APTR(dip);
3014 			xfs_bmbt_to_bmdr(mp, (struct xfs_btree_block *)src,
3015 					 len, (xfs_bmdr_block_t*)dest,
3016 					 XFS_DFORK_ASIZE(dip, mp));
3017 			break;
3018 
3019 		default:
3020 			xfs_warn(log->l_mp, "%s: Invalid flag", __func__);
3021 			ASSERT(0);
3022 			error = EIO;
3023 			goto out_release;
3024 		}
3025 	}
3026 
3027 out_owner_change:
3028 	if (in_f->ilf_fields & (XFS_ILOG_DOWNER|XFS_ILOG_AOWNER))
3029 		error = xfs_recover_inode_owner_change(mp, dip, in_f,
3030 						       buffer_list);
3031 	/* re-generate the checksum. */
3032 	xfs_dinode_calc_crc(log->l_mp, dip);
3033 
3034 	ASSERT(bp->b_target->bt_mount == mp);
3035 	bp->b_iodone = xlog_recover_iodone;
3036 	xfs_buf_delwri_queue(bp, buffer_list);
3037 
3038 out_release:
3039 	xfs_buf_relse(bp);
3040 error:
3041 	if (need_free)
3042 		kmem_free(in_f);
3043 	return XFS_ERROR(error);
3044 }
3045 
3046 /*
3047  * Recover QUOTAOFF records. We simply make a note of it in the xlog
3048  * structure, so that we know not to do any dquot item or dquot buffer recovery,
3049  * of that type.
3050  */
3051 STATIC int
3052 xlog_recover_quotaoff_pass1(
3053 	struct xlog			*log,
3054 	struct xlog_recover_item	*item)
3055 {
3056 	xfs_qoff_logformat_t	*qoff_f = item->ri_buf[0].i_addr;
3057 	ASSERT(qoff_f);
3058 
3059 	/*
3060 	 * The logitem format's flag tells us if this was user quotaoff,
3061 	 * group/project quotaoff or both.
3062 	 */
3063 	if (qoff_f->qf_flags & XFS_UQUOTA_ACCT)
3064 		log->l_quotaoffs_flag |= XFS_DQ_USER;
3065 	if (qoff_f->qf_flags & XFS_PQUOTA_ACCT)
3066 		log->l_quotaoffs_flag |= XFS_DQ_PROJ;
3067 	if (qoff_f->qf_flags & XFS_GQUOTA_ACCT)
3068 		log->l_quotaoffs_flag |= XFS_DQ_GROUP;
3069 
3070 	return (0);
3071 }
3072 
3073 /*
3074  * Recover a dquot record
3075  */
3076 STATIC int
3077 xlog_recover_dquot_pass2(
3078 	struct xlog			*log,
3079 	struct list_head		*buffer_list,
3080 	struct xlog_recover_item	*item,
3081 	xfs_lsn_t			current_lsn)
3082 {
3083 	xfs_mount_t		*mp = log->l_mp;
3084 	xfs_buf_t		*bp;
3085 	struct xfs_disk_dquot	*ddq, *recddq;
3086 	int			error;
3087 	xfs_dq_logformat_t	*dq_f;
3088 	uint			type;
3089 
3090 
3091 	/*
3092 	 * Filesystems are required to send in quota flags at mount time.
3093 	 */
3094 	if (mp->m_qflags == 0)
3095 		return (0);
3096 
3097 	recddq = item->ri_buf[1].i_addr;
3098 	if (recddq == NULL) {
3099 		xfs_alert(log->l_mp, "NULL dquot in %s.", __func__);
3100 		return XFS_ERROR(EIO);
3101 	}
3102 	if (item->ri_buf[1].i_len < sizeof(xfs_disk_dquot_t)) {
3103 		xfs_alert(log->l_mp, "dquot too small (%d) in %s.",
3104 			item->ri_buf[1].i_len, __func__);
3105 		return XFS_ERROR(EIO);
3106 	}
3107 
3108 	/*
3109 	 * This type of quotas was turned off, so ignore this record.
3110 	 */
3111 	type = recddq->d_flags & (XFS_DQ_USER | XFS_DQ_PROJ | XFS_DQ_GROUP);
3112 	ASSERT(type);
3113 	if (log->l_quotaoffs_flag & type)
3114 		return (0);
3115 
3116 	/*
3117 	 * At this point we know that quota was _not_ turned off.
3118 	 * Since the mount flags are not indicating to us otherwise, this
3119 	 * must mean that quota is on, and the dquot needs to be replayed.
3120 	 * Remember that we may not have fully recovered the superblock yet,
3121 	 * so we can't do the usual trick of looking at the SB quota bits.
3122 	 *
3123 	 * The other possibility, of course, is that the quota subsystem was
3124 	 * removed since the last mount - ENOSYS.
3125 	 */
3126 	dq_f = item->ri_buf[0].i_addr;
3127 	ASSERT(dq_f);
3128 	error = xfs_qm_dqcheck(mp, recddq, dq_f->qlf_id, 0, XFS_QMOPT_DOWARN,
3129 			   "xlog_recover_dquot_pass2 (log copy)");
3130 	if (error)
3131 		return XFS_ERROR(EIO);
3132 	ASSERT(dq_f->qlf_len == 1);
3133 
3134 	error = xfs_trans_read_buf(mp, NULL, mp->m_ddev_targp, dq_f->qlf_blkno,
3135 				   XFS_FSB_TO_BB(mp, dq_f->qlf_len), 0, &bp,
3136 				   NULL);
3137 	if (error)
3138 		return error;
3139 
3140 	ASSERT(bp);
3141 	ddq = (xfs_disk_dquot_t *)xfs_buf_offset(bp, dq_f->qlf_boffset);
3142 
3143 	/*
3144 	 * At least the magic num portion should be on disk because this
3145 	 * was among a chunk of dquots created earlier, and we did some
3146 	 * minimal initialization then.
3147 	 */
3148 	error = xfs_qm_dqcheck(mp, ddq, dq_f->qlf_id, 0, XFS_QMOPT_DOWARN,
3149 			   "xlog_recover_dquot_pass2");
3150 	if (error) {
3151 		xfs_buf_relse(bp);
3152 		return XFS_ERROR(EIO);
3153 	}
3154 
3155 	/*
3156 	 * If the dquot has an LSN in it, recover the dquot only if it's less
3157 	 * than the lsn of the transaction we are replaying.
3158 	 */
3159 	if (xfs_sb_version_hascrc(&mp->m_sb)) {
3160 		struct xfs_dqblk *dqb = (struct xfs_dqblk *)ddq;
3161 		xfs_lsn_t	lsn = be64_to_cpu(dqb->dd_lsn);
3162 
3163 		if (lsn && lsn != -1 && XFS_LSN_CMP(lsn, current_lsn) >= 0) {
3164 			goto out_release;
3165 		}
3166 	}
3167 
3168 	memcpy(ddq, recddq, item->ri_buf[1].i_len);
3169 	if (xfs_sb_version_hascrc(&mp->m_sb)) {
3170 		xfs_update_cksum((char *)ddq, sizeof(struct xfs_dqblk),
3171 				 XFS_DQUOT_CRC_OFF);
3172 	}
3173 
3174 	ASSERT(dq_f->qlf_size == 2);
3175 	ASSERT(bp->b_target->bt_mount == mp);
3176 	bp->b_iodone = xlog_recover_iodone;
3177 	xfs_buf_delwri_queue(bp, buffer_list);
3178 
3179 out_release:
3180 	xfs_buf_relse(bp);
3181 	return 0;
3182 }
3183 
3184 /*
3185  * This routine is called to create an in-core extent free intent
3186  * item from the efi format structure which was logged on disk.
3187  * It allocates an in-core efi, copies the extents from the format
3188  * structure into it, and adds the efi to the AIL with the given
3189  * LSN.
3190  */
3191 STATIC int
3192 xlog_recover_efi_pass2(
3193 	struct xlog			*log,
3194 	struct xlog_recover_item	*item,
3195 	xfs_lsn_t			lsn)
3196 {
3197 	int			error;
3198 	xfs_mount_t		*mp = log->l_mp;
3199 	xfs_efi_log_item_t	*efip;
3200 	xfs_efi_log_format_t	*efi_formatp;
3201 
3202 	efi_formatp = item->ri_buf[0].i_addr;
3203 
3204 	efip = xfs_efi_init(mp, efi_formatp->efi_nextents);
3205 	if ((error = xfs_efi_copy_format(&(item->ri_buf[0]),
3206 					 &(efip->efi_format)))) {
3207 		xfs_efi_item_free(efip);
3208 		return error;
3209 	}
3210 	atomic_set(&efip->efi_next_extent, efi_formatp->efi_nextents);
3211 
3212 	spin_lock(&log->l_ailp->xa_lock);
3213 	/*
3214 	 * xfs_trans_ail_update() drops the AIL lock.
3215 	 */
3216 	xfs_trans_ail_update(log->l_ailp, &efip->efi_item, lsn);
3217 	return 0;
3218 }
3219 
3220 
3221 /*
3222  * This routine is called when an efd format structure is found in
3223  * a committed transaction in the log.  It's purpose is to cancel
3224  * the corresponding efi if it was still in the log.  To do this
3225  * it searches the AIL for the efi with an id equal to that in the
3226  * efd format structure.  If we find it, we remove the efi from the
3227  * AIL and free it.
3228  */
3229 STATIC int
3230 xlog_recover_efd_pass2(
3231 	struct xlog			*log,
3232 	struct xlog_recover_item	*item)
3233 {
3234 	xfs_efd_log_format_t	*efd_formatp;
3235 	xfs_efi_log_item_t	*efip = NULL;
3236 	xfs_log_item_t		*lip;
3237 	__uint64_t		efi_id;
3238 	struct xfs_ail_cursor	cur;
3239 	struct xfs_ail		*ailp = log->l_ailp;
3240 
3241 	efd_formatp = item->ri_buf[0].i_addr;
3242 	ASSERT((item->ri_buf[0].i_len == (sizeof(xfs_efd_log_format_32_t) +
3243 		((efd_formatp->efd_nextents - 1) * sizeof(xfs_extent_32_t)))) ||
3244 	       (item->ri_buf[0].i_len == (sizeof(xfs_efd_log_format_64_t) +
3245 		((efd_formatp->efd_nextents - 1) * sizeof(xfs_extent_64_t)))));
3246 	efi_id = efd_formatp->efd_efi_id;
3247 
3248 	/*
3249 	 * Search for the efi with the id in the efd format structure
3250 	 * in the AIL.
3251 	 */
3252 	spin_lock(&ailp->xa_lock);
3253 	lip = xfs_trans_ail_cursor_first(ailp, &cur, 0);
3254 	while (lip != NULL) {
3255 		if (lip->li_type == XFS_LI_EFI) {
3256 			efip = (xfs_efi_log_item_t *)lip;
3257 			if (efip->efi_format.efi_id == efi_id) {
3258 				/*
3259 				 * xfs_trans_ail_delete() drops the
3260 				 * AIL lock.
3261 				 */
3262 				xfs_trans_ail_delete(ailp, lip,
3263 						     SHUTDOWN_CORRUPT_INCORE);
3264 				xfs_efi_item_free(efip);
3265 				spin_lock(&ailp->xa_lock);
3266 				break;
3267 			}
3268 		}
3269 		lip = xfs_trans_ail_cursor_next(ailp, &cur);
3270 	}
3271 	xfs_trans_ail_cursor_done(ailp, &cur);
3272 	spin_unlock(&ailp->xa_lock);
3273 
3274 	return 0;
3275 }
3276 
3277 /*
3278  * This routine is called when an inode create format structure is found in a
3279  * committed transaction in the log.  It's purpose is to initialise the inodes
3280  * being allocated on disk. This requires us to get inode cluster buffers that
3281  * match the range to be intialised, stamped with inode templates and written
3282  * by delayed write so that subsequent modifications will hit the cached buffer
3283  * and only need writing out at the end of recovery.
3284  */
3285 STATIC int
3286 xlog_recover_do_icreate_pass2(
3287 	struct xlog		*log,
3288 	struct list_head	*buffer_list,
3289 	xlog_recover_item_t	*item)
3290 {
3291 	struct xfs_mount	*mp = log->l_mp;
3292 	struct xfs_icreate_log	*icl;
3293 	xfs_agnumber_t		agno;
3294 	xfs_agblock_t		agbno;
3295 	unsigned int		count;
3296 	unsigned int		isize;
3297 	xfs_agblock_t		length;
3298 
3299 	icl = (struct xfs_icreate_log *)item->ri_buf[0].i_addr;
3300 	if (icl->icl_type != XFS_LI_ICREATE) {
3301 		xfs_warn(log->l_mp, "xlog_recover_do_icreate_trans: bad type");
3302 		return EINVAL;
3303 	}
3304 
3305 	if (icl->icl_size != 1) {
3306 		xfs_warn(log->l_mp, "xlog_recover_do_icreate_trans: bad icl size");
3307 		return EINVAL;
3308 	}
3309 
3310 	agno = be32_to_cpu(icl->icl_ag);
3311 	if (agno >= mp->m_sb.sb_agcount) {
3312 		xfs_warn(log->l_mp, "xlog_recover_do_icreate_trans: bad agno");
3313 		return EINVAL;
3314 	}
3315 	agbno = be32_to_cpu(icl->icl_agbno);
3316 	if (!agbno || agbno == NULLAGBLOCK || agbno >= mp->m_sb.sb_agblocks) {
3317 		xfs_warn(log->l_mp, "xlog_recover_do_icreate_trans: bad agbno");
3318 		return EINVAL;
3319 	}
3320 	isize = be32_to_cpu(icl->icl_isize);
3321 	if (isize != mp->m_sb.sb_inodesize) {
3322 		xfs_warn(log->l_mp, "xlog_recover_do_icreate_trans: bad isize");
3323 		return EINVAL;
3324 	}
3325 	count = be32_to_cpu(icl->icl_count);
3326 	if (!count) {
3327 		xfs_warn(log->l_mp, "xlog_recover_do_icreate_trans: bad count");
3328 		return EINVAL;
3329 	}
3330 	length = be32_to_cpu(icl->icl_length);
3331 	if (!length || length >= mp->m_sb.sb_agblocks) {
3332 		xfs_warn(log->l_mp, "xlog_recover_do_icreate_trans: bad length");
3333 		return EINVAL;
3334 	}
3335 
3336 	/* existing allocation is fixed value */
3337 	ASSERT(count == XFS_IALLOC_INODES(mp));
3338 	ASSERT(length == XFS_IALLOC_BLOCKS(mp));
3339 	if (count != XFS_IALLOC_INODES(mp) ||
3340 	     length != XFS_IALLOC_BLOCKS(mp)) {
3341 		xfs_warn(log->l_mp, "xlog_recover_do_icreate_trans: bad count 2");
3342 		return EINVAL;
3343 	}
3344 
3345 	/*
3346 	 * Inode buffers can be freed. Do not replay the inode initialisation as
3347 	 * we could be overwriting something written after this inode buffer was
3348 	 * cancelled.
3349 	 *
3350 	 * XXX: we need to iterate all buffers and only init those that are not
3351 	 * cancelled. I think that a more fine grained factoring of
3352 	 * xfs_ialloc_inode_init may be appropriate here to enable this to be
3353 	 * done easily.
3354 	 */
3355 	if (xlog_check_buffer_cancelled(log,
3356 			XFS_AGB_TO_DADDR(mp, agno, agbno), length, 0))
3357 		return 0;
3358 
3359 	xfs_ialloc_inode_init(mp, NULL, buffer_list, agno, agbno, length,
3360 					be32_to_cpu(icl->icl_gen));
3361 	return 0;
3362 }
3363 
3364 /*
3365  * Free up any resources allocated by the transaction
3366  *
3367  * Remember that EFIs, EFDs, and IUNLINKs are handled later.
3368  */
3369 STATIC void
3370 xlog_recover_free_trans(
3371 	struct xlog_recover	*trans)
3372 {
3373 	xlog_recover_item_t	*item, *n;
3374 	int			i;
3375 
3376 	list_for_each_entry_safe(item, n, &trans->r_itemq, ri_list) {
3377 		/* Free the regions in the item. */
3378 		list_del(&item->ri_list);
3379 		for (i = 0; i < item->ri_cnt; i++)
3380 			kmem_free(item->ri_buf[i].i_addr);
3381 		/* Free the item itself */
3382 		kmem_free(item->ri_buf);
3383 		kmem_free(item);
3384 	}
3385 	/* Free the transaction recover structure */
3386 	kmem_free(trans);
3387 }
3388 
3389 STATIC void
3390 xlog_recover_buffer_ra_pass2(
3391 	struct xlog                     *log,
3392 	struct xlog_recover_item        *item)
3393 {
3394 	struct xfs_buf_log_format	*buf_f = item->ri_buf[0].i_addr;
3395 	struct xfs_mount		*mp = log->l_mp;
3396 
3397 	if (xlog_peek_buffer_cancelled(log, buf_f->blf_blkno,
3398 			buf_f->blf_len, buf_f->blf_flags)) {
3399 		return;
3400 	}
3401 
3402 	xfs_buf_readahead(mp->m_ddev_targp, buf_f->blf_blkno,
3403 				buf_f->blf_len, NULL);
3404 }
3405 
3406 STATIC void
3407 xlog_recover_inode_ra_pass2(
3408 	struct xlog                     *log,
3409 	struct xlog_recover_item        *item)
3410 {
3411 	struct xfs_inode_log_format	ilf_buf;
3412 	struct xfs_inode_log_format	*ilfp;
3413 	struct xfs_mount		*mp = log->l_mp;
3414 	int			error;
3415 
3416 	if (item->ri_buf[0].i_len == sizeof(struct xfs_inode_log_format)) {
3417 		ilfp = item->ri_buf[0].i_addr;
3418 	} else {
3419 		ilfp = &ilf_buf;
3420 		memset(ilfp, 0, sizeof(*ilfp));
3421 		error = xfs_inode_item_format_convert(&item->ri_buf[0], ilfp);
3422 		if (error)
3423 			return;
3424 	}
3425 
3426 	if (xlog_peek_buffer_cancelled(log, ilfp->ilf_blkno, ilfp->ilf_len, 0))
3427 		return;
3428 
3429 	xfs_buf_readahead(mp->m_ddev_targp, ilfp->ilf_blkno,
3430 				ilfp->ilf_len, &xfs_inode_buf_ra_ops);
3431 }
3432 
3433 STATIC void
3434 xlog_recover_dquot_ra_pass2(
3435 	struct xlog			*log,
3436 	struct xlog_recover_item	*item)
3437 {
3438 	struct xfs_mount	*mp = log->l_mp;
3439 	struct xfs_disk_dquot	*recddq;
3440 	struct xfs_dq_logformat	*dq_f;
3441 	uint			type;
3442 
3443 
3444 	if (mp->m_qflags == 0)
3445 		return;
3446 
3447 	recddq = item->ri_buf[1].i_addr;
3448 	if (recddq == NULL)
3449 		return;
3450 	if (item->ri_buf[1].i_len < sizeof(struct xfs_disk_dquot))
3451 		return;
3452 
3453 	type = recddq->d_flags & (XFS_DQ_USER | XFS_DQ_PROJ | XFS_DQ_GROUP);
3454 	ASSERT(type);
3455 	if (log->l_quotaoffs_flag & type)
3456 		return;
3457 
3458 	dq_f = item->ri_buf[0].i_addr;
3459 	ASSERT(dq_f);
3460 	ASSERT(dq_f->qlf_len == 1);
3461 
3462 	xfs_buf_readahead(mp->m_ddev_targp, dq_f->qlf_blkno,
3463 			  XFS_FSB_TO_BB(mp, dq_f->qlf_len), NULL);
3464 }
3465 
3466 STATIC void
3467 xlog_recover_ra_pass2(
3468 	struct xlog			*log,
3469 	struct xlog_recover_item	*item)
3470 {
3471 	switch (ITEM_TYPE(item)) {
3472 	case XFS_LI_BUF:
3473 		xlog_recover_buffer_ra_pass2(log, item);
3474 		break;
3475 	case XFS_LI_INODE:
3476 		xlog_recover_inode_ra_pass2(log, item);
3477 		break;
3478 	case XFS_LI_DQUOT:
3479 		xlog_recover_dquot_ra_pass2(log, item);
3480 		break;
3481 	case XFS_LI_EFI:
3482 	case XFS_LI_EFD:
3483 	case XFS_LI_QUOTAOFF:
3484 	default:
3485 		break;
3486 	}
3487 }
3488 
3489 STATIC int
3490 xlog_recover_commit_pass1(
3491 	struct xlog			*log,
3492 	struct xlog_recover		*trans,
3493 	struct xlog_recover_item	*item)
3494 {
3495 	trace_xfs_log_recover_item_recover(log, trans, item, XLOG_RECOVER_PASS1);
3496 
3497 	switch (ITEM_TYPE(item)) {
3498 	case XFS_LI_BUF:
3499 		return xlog_recover_buffer_pass1(log, item);
3500 	case XFS_LI_QUOTAOFF:
3501 		return xlog_recover_quotaoff_pass1(log, item);
3502 	case XFS_LI_INODE:
3503 	case XFS_LI_EFI:
3504 	case XFS_LI_EFD:
3505 	case XFS_LI_DQUOT:
3506 	case XFS_LI_ICREATE:
3507 		/* nothing to do in pass 1 */
3508 		return 0;
3509 	default:
3510 		xfs_warn(log->l_mp, "%s: invalid item type (%d)",
3511 			__func__, ITEM_TYPE(item));
3512 		ASSERT(0);
3513 		return XFS_ERROR(EIO);
3514 	}
3515 }
3516 
3517 STATIC int
3518 xlog_recover_commit_pass2(
3519 	struct xlog			*log,
3520 	struct xlog_recover		*trans,
3521 	struct list_head		*buffer_list,
3522 	struct xlog_recover_item	*item)
3523 {
3524 	trace_xfs_log_recover_item_recover(log, trans, item, XLOG_RECOVER_PASS2);
3525 
3526 	switch (ITEM_TYPE(item)) {
3527 	case XFS_LI_BUF:
3528 		return xlog_recover_buffer_pass2(log, buffer_list, item,
3529 						 trans->r_lsn);
3530 	case XFS_LI_INODE:
3531 		return xlog_recover_inode_pass2(log, buffer_list, item,
3532 						 trans->r_lsn);
3533 	case XFS_LI_EFI:
3534 		return xlog_recover_efi_pass2(log, item, trans->r_lsn);
3535 	case XFS_LI_EFD:
3536 		return xlog_recover_efd_pass2(log, item);
3537 	case XFS_LI_DQUOT:
3538 		return xlog_recover_dquot_pass2(log, buffer_list, item,
3539 						trans->r_lsn);
3540 	case XFS_LI_ICREATE:
3541 		return xlog_recover_do_icreate_pass2(log, buffer_list, item);
3542 	case XFS_LI_QUOTAOFF:
3543 		/* nothing to do in pass2 */
3544 		return 0;
3545 	default:
3546 		xfs_warn(log->l_mp, "%s: invalid item type (%d)",
3547 			__func__, ITEM_TYPE(item));
3548 		ASSERT(0);
3549 		return XFS_ERROR(EIO);
3550 	}
3551 }
3552 
3553 STATIC int
3554 xlog_recover_items_pass2(
3555 	struct xlog                     *log,
3556 	struct xlog_recover             *trans,
3557 	struct list_head                *buffer_list,
3558 	struct list_head                *item_list)
3559 {
3560 	struct xlog_recover_item	*item;
3561 	int				error = 0;
3562 
3563 	list_for_each_entry(item, item_list, ri_list) {
3564 		error = xlog_recover_commit_pass2(log, trans,
3565 					  buffer_list, item);
3566 		if (error)
3567 			return error;
3568 	}
3569 
3570 	return error;
3571 }
3572 
3573 /*
3574  * Perform the transaction.
3575  *
3576  * If the transaction modifies a buffer or inode, do it now.  Otherwise,
3577  * EFIs and EFDs get queued up by adding entries into the AIL for them.
3578  */
3579 STATIC int
3580 xlog_recover_commit_trans(
3581 	struct xlog		*log,
3582 	struct xlog_recover	*trans,
3583 	int			pass)
3584 {
3585 	int				error = 0;
3586 	int				error2;
3587 	int				items_queued = 0;
3588 	struct xlog_recover_item	*item;
3589 	struct xlog_recover_item	*next;
3590 	LIST_HEAD			(buffer_list);
3591 	LIST_HEAD			(ra_list);
3592 	LIST_HEAD			(done_list);
3593 
3594 	#define XLOG_RECOVER_COMMIT_QUEUE_MAX 100
3595 
3596 	hlist_del(&trans->r_list);
3597 
3598 	error = xlog_recover_reorder_trans(log, trans, pass);
3599 	if (error)
3600 		return error;
3601 
3602 	list_for_each_entry_safe(item, next, &trans->r_itemq, ri_list) {
3603 		switch (pass) {
3604 		case XLOG_RECOVER_PASS1:
3605 			error = xlog_recover_commit_pass1(log, trans, item);
3606 			break;
3607 		case XLOG_RECOVER_PASS2:
3608 			xlog_recover_ra_pass2(log, item);
3609 			list_move_tail(&item->ri_list, &ra_list);
3610 			items_queued++;
3611 			if (items_queued >= XLOG_RECOVER_COMMIT_QUEUE_MAX) {
3612 				error = xlog_recover_items_pass2(log, trans,
3613 						&buffer_list, &ra_list);
3614 				list_splice_tail_init(&ra_list, &done_list);
3615 				items_queued = 0;
3616 			}
3617 
3618 			break;
3619 		default:
3620 			ASSERT(0);
3621 		}
3622 
3623 		if (error)
3624 			goto out;
3625 	}
3626 
3627 out:
3628 	if (!list_empty(&ra_list)) {
3629 		if (!error)
3630 			error = xlog_recover_items_pass2(log, trans,
3631 					&buffer_list, &ra_list);
3632 		list_splice_tail_init(&ra_list, &done_list);
3633 	}
3634 
3635 	if (!list_empty(&done_list))
3636 		list_splice_init(&done_list, &trans->r_itemq);
3637 
3638 	xlog_recover_free_trans(trans);
3639 
3640 	error2 = xfs_buf_delwri_submit(&buffer_list);
3641 	return error ? error : error2;
3642 }
3643 
3644 STATIC int
3645 xlog_recover_unmount_trans(
3646 	struct xlog		*log,
3647 	struct xlog_recover	*trans)
3648 {
3649 	/* Do nothing now */
3650 	xfs_warn(log->l_mp, "%s: Unmount LR", __func__);
3651 	return 0;
3652 }
3653 
3654 /*
3655  * There are two valid states of the r_state field.  0 indicates that the
3656  * transaction structure is in a normal state.  We have either seen the
3657  * start of the transaction or the last operation we added was not a partial
3658  * operation.  If the last operation we added to the transaction was a
3659  * partial operation, we need to mark r_state with XLOG_WAS_CONT_TRANS.
3660  *
3661  * NOTE: skip LRs with 0 data length.
3662  */
3663 STATIC int
3664 xlog_recover_process_data(
3665 	struct xlog		*log,
3666 	struct hlist_head	rhash[],
3667 	struct xlog_rec_header	*rhead,
3668 	xfs_caddr_t		dp,
3669 	int			pass)
3670 {
3671 	xfs_caddr_t		lp;
3672 	int			num_logops;
3673 	xlog_op_header_t	*ohead;
3674 	xlog_recover_t		*trans;
3675 	xlog_tid_t		tid;
3676 	int			error;
3677 	unsigned long		hash;
3678 	uint			flags;
3679 
3680 	lp = dp + be32_to_cpu(rhead->h_len);
3681 	num_logops = be32_to_cpu(rhead->h_num_logops);
3682 
3683 	/* check the log format matches our own - else we can't recover */
3684 	if (xlog_header_check_recover(log->l_mp, rhead))
3685 		return (XFS_ERROR(EIO));
3686 
3687 	while ((dp < lp) && num_logops) {
3688 		ASSERT(dp + sizeof(xlog_op_header_t) <= lp);
3689 		ohead = (xlog_op_header_t *)dp;
3690 		dp += sizeof(xlog_op_header_t);
3691 		if (ohead->oh_clientid != XFS_TRANSACTION &&
3692 		    ohead->oh_clientid != XFS_LOG) {
3693 			xfs_warn(log->l_mp, "%s: bad clientid 0x%x",
3694 					__func__, ohead->oh_clientid);
3695 			ASSERT(0);
3696 			return (XFS_ERROR(EIO));
3697 		}
3698 		tid = be32_to_cpu(ohead->oh_tid);
3699 		hash = XLOG_RHASH(tid);
3700 		trans = xlog_recover_find_tid(&rhash[hash], tid);
3701 		if (trans == NULL) {		   /* not found; add new tid */
3702 			if (ohead->oh_flags & XLOG_START_TRANS)
3703 				xlog_recover_new_tid(&rhash[hash], tid,
3704 					be64_to_cpu(rhead->h_lsn));
3705 		} else {
3706 			if (dp + be32_to_cpu(ohead->oh_len) > lp) {
3707 				xfs_warn(log->l_mp, "%s: bad length 0x%x",
3708 					__func__, be32_to_cpu(ohead->oh_len));
3709 				WARN_ON(1);
3710 				return (XFS_ERROR(EIO));
3711 			}
3712 			flags = ohead->oh_flags & ~XLOG_END_TRANS;
3713 			if (flags & XLOG_WAS_CONT_TRANS)
3714 				flags &= ~XLOG_CONTINUE_TRANS;
3715 			switch (flags) {
3716 			case XLOG_COMMIT_TRANS:
3717 				error = xlog_recover_commit_trans(log,
3718 								trans, pass);
3719 				break;
3720 			case XLOG_UNMOUNT_TRANS:
3721 				error = xlog_recover_unmount_trans(log, trans);
3722 				break;
3723 			case XLOG_WAS_CONT_TRANS:
3724 				error = xlog_recover_add_to_cont_trans(log,
3725 						trans, dp,
3726 						be32_to_cpu(ohead->oh_len));
3727 				break;
3728 			case XLOG_START_TRANS:
3729 				xfs_warn(log->l_mp, "%s: bad transaction",
3730 					__func__);
3731 				ASSERT(0);
3732 				error = XFS_ERROR(EIO);
3733 				break;
3734 			case 0:
3735 			case XLOG_CONTINUE_TRANS:
3736 				error = xlog_recover_add_to_trans(log, trans,
3737 						dp, be32_to_cpu(ohead->oh_len));
3738 				break;
3739 			default:
3740 				xfs_warn(log->l_mp, "%s: bad flag 0x%x",
3741 					__func__, flags);
3742 				ASSERT(0);
3743 				error = XFS_ERROR(EIO);
3744 				break;
3745 			}
3746 			if (error)
3747 				return error;
3748 		}
3749 		dp += be32_to_cpu(ohead->oh_len);
3750 		num_logops--;
3751 	}
3752 	return 0;
3753 }
3754 
3755 /*
3756  * Process an extent free intent item that was recovered from
3757  * the log.  We need to free the extents that it describes.
3758  */
3759 STATIC int
3760 xlog_recover_process_efi(
3761 	xfs_mount_t		*mp,
3762 	xfs_efi_log_item_t	*efip)
3763 {
3764 	xfs_efd_log_item_t	*efdp;
3765 	xfs_trans_t		*tp;
3766 	int			i;
3767 	int			error = 0;
3768 	xfs_extent_t		*extp;
3769 	xfs_fsblock_t		startblock_fsb;
3770 
3771 	ASSERT(!test_bit(XFS_EFI_RECOVERED, &efip->efi_flags));
3772 
3773 	/*
3774 	 * First check the validity of the extents described by the
3775 	 * EFI.  If any are bad, then assume that all are bad and
3776 	 * just toss the EFI.
3777 	 */
3778 	for (i = 0; i < efip->efi_format.efi_nextents; i++) {
3779 		extp = &(efip->efi_format.efi_extents[i]);
3780 		startblock_fsb = XFS_BB_TO_FSB(mp,
3781 				   XFS_FSB_TO_DADDR(mp, extp->ext_start));
3782 		if ((startblock_fsb == 0) ||
3783 		    (extp->ext_len == 0) ||
3784 		    (startblock_fsb >= mp->m_sb.sb_dblocks) ||
3785 		    (extp->ext_len >= mp->m_sb.sb_agblocks)) {
3786 			/*
3787 			 * This will pull the EFI from the AIL and
3788 			 * free the memory associated with it.
3789 			 */
3790 			set_bit(XFS_EFI_RECOVERED, &efip->efi_flags);
3791 			xfs_efi_release(efip, efip->efi_format.efi_nextents);
3792 			return XFS_ERROR(EIO);
3793 		}
3794 	}
3795 
3796 	tp = xfs_trans_alloc(mp, 0);
3797 	error = xfs_trans_reserve(tp, &M_RES(mp)->tr_itruncate, 0, 0);
3798 	if (error)
3799 		goto abort_error;
3800 	efdp = xfs_trans_get_efd(tp, efip, efip->efi_format.efi_nextents);
3801 
3802 	for (i = 0; i < efip->efi_format.efi_nextents; i++) {
3803 		extp = &(efip->efi_format.efi_extents[i]);
3804 		error = xfs_free_extent(tp, extp->ext_start, extp->ext_len);
3805 		if (error)
3806 			goto abort_error;
3807 		xfs_trans_log_efd_extent(tp, efdp, extp->ext_start,
3808 					 extp->ext_len);
3809 	}
3810 
3811 	set_bit(XFS_EFI_RECOVERED, &efip->efi_flags);
3812 	error = xfs_trans_commit(tp, 0);
3813 	return error;
3814 
3815 abort_error:
3816 	xfs_trans_cancel(tp, XFS_TRANS_ABORT);
3817 	return error;
3818 }
3819 
3820 /*
3821  * When this is called, all of the EFIs which did not have
3822  * corresponding EFDs should be in the AIL.  What we do now
3823  * is free the extents associated with each one.
3824  *
3825  * Since we process the EFIs in normal transactions, they
3826  * will be removed at some point after the commit.  This prevents
3827  * us from just walking down the list processing each one.
3828  * We'll use a flag in the EFI to skip those that we've already
3829  * processed and use the AIL iteration mechanism's generation
3830  * count to try to speed this up at least a bit.
3831  *
3832  * When we start, we know that the EFIs are the only things in
3833  * the AIL.  As we process them, however, other items are added
3834  * to the AIL.  Since everything added to the AIL must come after
3835  * everything already in the AIL, we stop processing as soon as
3836  * we see something other than an EFI in the AIL.
3837  */
3838 STATIC int
3839 xlog_recover_process_efis(
3840 	struct xlog	*log)
3841 {
3842 	xfs_log_item_t		*lip;
3843 	xfs_efi_log_item_t	*efip;
3844 	int			error = 0;
3845 	struct xfs_ail_cursor	cur;
3846 	struct xfs_ail		*ailp;
3847 
3848 	ailp = log->l_ailp;
3849 	spin_lock(&ailp->xa_lock);
3850 	lip = xfs_trans_ail_cursor_first(ailp, &cur, 0);
3851 	while (lip != NULL) {
3852 		/*
3853 		 * We're done when we see something other than an EFI.
3854 		 * There should be no EFIs left in the AIL now.
3855 		 */
3856 		if (lip->li_type != XFS_LI_EFI) {
3857 #ifdef DEBUG
3858 			for (; lip; lip = xfs_trans_ail_cursor_next(ailp, &cur))
3859 				ASSERT(lip->li_type != XFS_LI_EFI);
3860 #endif
3861 			break;
3862 		}
3863 
3864 		/*
3865 		 * Skip EFIs that we've already processed.
3866 		 */
3867 		efip = (xfs_efi_log_item_t *)lip;
3868 		if (test_bit(XFS_EFI_RECOVERED, &efip->efi_flags)) {
3869 			lip = xfs_trans_ail_cursor_next(ailp, &cur);
3870 			continue;
3871 		}
3872 
3873 		spin_unlock(&ailp->xa_lock);
3874 		error = xlog_recover_process_efi(log->l_mp, efip);
3875 		spin_lock(&ailp->xa_lock);
3876 		if (error)
3877 			goto out;
3878 		lip = xfs_trans_ail_cursor_next(ailp, &cur);
3879 	}
3880 out:
3881 	xfs_trans_ail_cursor_done(ailp, &cur);
3882 	spin_unlock(&ailp->xa_lock);
3883 	return error;
3884 }
3885 
3886 /*
3887  * This routine performs a transaction to null out a bad inode pointer
3888  * in an agi unlinked inode hash bucket.
3889  */
3890 STATIC void
3891 xlog_recover_clear_agi_bucket(
3892 	xfs_mount_t	*mp,
3893 	xfs_agnumber_t	agno,
3894 	int		bucket)
3895 {
3896 	xfs_trans_t	*tp;
3897 	xfs_agi_t	*agi;
3898 	xfs_buf_t	*agibp;
3899 	int		offset;
3900 	int		error;
3901 
3902 	tp = xfs_trans_alloc(mp, XFS_TRANS_CLEAR_AGI_BUCKET);
3903 	error = xfs_trans_reserve(tp, &M_RES(mp)->tr_clearagi, 0, 0);
3904 	if (error)
3905 		goto out_abort;
3906 
3907 	error = xfs_read_agi(mp, tp, agno, &agibp);
3908 	if (error)
3909 		goto out_abort;
3910 
3911 	agi = XFS_BUF_TO_AGI(agibp);
3912 	agi->agi_unlinked[bucket] = cpu_to_be32(NULLAGINO);
3913 	offset = offsetof(xfs_agi_t, agi_unlinked) +
3914 		 (sizeof(xfs_agino_t) * bucket);
3915 	xfs_trans_log_buf(tp, agibp, offset,
3916 			  (offset + sizeof(xfs_agino_t) - 1));
3917 
3918 	error = xfs_trans_commit(tp, 0);
3919 	if (error)
3920 		goto out_error;
3921 	return;
3922 
3923 out_abort:
3924 	xfs_trans_cancel(tp, XFS_TRANS_ABORT);
3925 out_error:
3926 	xfs_warn(mp, "%s: failed to clear agi %d. Continuing.", __func__, agno);
3927 	return;
3928 }
3929 
3930 STATIC xfs_agino_t
3931 xlog_recover_process_one_iunlink(
3932 	struct xfs_mount		*mp,
3933 	xfs_agnumber_t			agno,
3934 	xfs_agino_t			agino,
3935 	int				bucket)
3936 {
3937 	struct xfs_buf			*ibp;
3938 	struct xfs_dinode		*dip;
3939 	struct xfs_inode		*ip;
3940 	xfs_ino_t			ino;
3941 	int				error;
3942 
3943 	ino = XFS_AGINO_TO_INO(mp, agno, agino);
3944 	error = xfs_iget(mp, NULL, ino, 0, 0, &ip);
3945 	if (error)
3946 		goto fail;
3947 
3948 	/*
3949 	 * Get the on disk inode to find the next inode in the bucket.
3950 	 */
3951 	error = xfs_imap_to_bp(mp, NULL, &ip->i_imap, &dip, &ibp, 0, 0);
3952 	if (error)
3953 		goto fail_iput;
3954 
3955 	ASSERT(ip->i_d.di_nlink == 0);
3956 	ASSERT(ip->i_d.di_mode != 0);
3957 
3958 	/* setup for the next pass */
3959 	agino = be32_to_cpu(dip->di_next_unlinked);
3960 	xfs_buf_relse(ibp);
3961 
3962 	/*
3963 	 * Prevent any DMAPI event from being sent when the reference on
3964 	 * the inode is dropped.
3965 	 */
3966 	ip->i_d.di_dmevmask = 0;
3967 
3968 	IRELE(ip);
3969 	return agino;
3970 
3971  fail_iput:
3972 	IRELE(ip);
3973  fail:
3974 	/*
3975 	 * We can't read in the inode this bucket points to, or this inode
3976 	 * is messed up.  Just ditch this bucket of inodes.  We will lose
3977 	 * some inodes and space, but at least we won't hang.
3978 	 *
3979 	 * Call xlog_recover_clear_agi_bucket() to perform a transaction to
3980 	 * clear the inode pointer in the bucket.
3981 	 */
3982 	xlog_recover_clear_agi_bucket(mp, agno, bucket);
3983 	return NULLAGINO;
3984 }
3985 
3986 /*
3987  * xlog_iunlink_recover
3988  *
3989  * This is called during recovery to process any inodes which
3990  * we unlinked but not freed when the system crashed.  These
3991  * inodes will be on the lists in the AGI blocks.  What we do
3992  * here is scan all the AGIs and fully truncate and free any
3993  * inodes found on the lists.  Each inode is removed from the
3994  * lists when it has been fully truncated and is freed.  The
3995  * freeing of the inode and its removal from the list must be
3996  * atomic.
3997  */
3998 STATIC void
3999 xlog_recover_process_iunlinks(
4000 	struct xlog	*log)
4001 {
4002 	xfs_mount_t	*mp;
4003 	xfs_agnumber_t	agno;
4004 	xfs_agi_t	*agi;
4005 	xfs_buf_t	*agibp;
4006 	xfs_agino_t	agino;
4007 	int		bucket;
4008 	int		error;
4009 	uint		mp_dmevmask;
4010 
4011 	mp = log->l_mp;
4012 
4013 	/*
4014 	 * Prevent any DMAPI event from being sent while in this function.
4015 	 */
4016 	mp_dmevmask = mp->m_dmevmask;
4017 	mp->m_dmevmask = 0;
4018 
4019 	for (agno = 0; agno < mp->m_sb.sb_agcount; agno++) {
4020 		/*
4021 		 * Find the agi for this ag.
4022 		 */
4023 		error = xfs_read_agi(mp, NULL, agno, &agibp);
4024 		if (error) {
4025 			/*
4026 			 * AGI is b0rked. Don't process it.
4027 			 *
4028 			 * We should probably mark the filesystem as corrupt
4029 			 * after we've recovered all the ag's we can....
4030 			 */
4031 			continue;
4032 		}
4033 		/*
4034 		 * Unlock the buffer so that it can be acquired in the normal
4035 		 * course of the transaction to truncate and free each inode.
4036 		 * Because we are not racing with anyone else here for the AGI
4037 		 * buffer, we don't even need to hold it locked to read the
4038 		 * initial unlinked bucket entries out of the buffer. We keep
4039 		 * buffer reference though, so that it stays pinned in memory
4040 		 * while we need the buffer.
4041 		 */
4042 		agi = XFS_BUF_TO_AGI(agibp);
4043 		xfs_buf_unlock(agibp);
4044 
4045 		for (bucket = 0; bucket < XFS_AGI_UNLINKED_BUCKETS; bucket++) {
4046 			agino = be32_to_cpu(agi->agi_unlinked[bucket]);
4047 			while (agino != NULLAGINO) {
4048 				agino = xlog_recover_process_one_iunlink(mp,
4049 							agno, agino, bucket);
4050 			}
4051 		}
4052 		xfs_buf_rele(agibp);
4053 	}
4054 
4055 	mp->m_dmevmask = mp_dmevmask;
4056 }
4057 
4058 /*
4059  * Upack the log buffer data and crc check it. If the check fails, issue a
4060  * warning if and only if the CRC in the header is non-zero. This makes the
4061  * check an advisory warning, and the zero CRC check will prevent failure
4062  * warnings from being emitted when upgrading the kernel from one that does not
4063  * add CRCs by default.
4064  *
4065  * When filesystems are CRC enabled, this CRC mismatch becomes a fatal log
4066  * corruption failure
4067  */
4068 STATIC int
4069 xlog_unpack_data_crc(
4070 	struct xlog_rec_header	*rhead,
4071 	xfs_caddr_t		dp,
4072 	struct xlog		*log)
4073 {
4074 	__le32			crc;
4075 
4076 	crc = xlog_cksum(log, rhead, dp, be32_to_cpu(rhead->h_len));
4077 	if (crc != rhead->h_crc) {
4078 		if (rhead->h_crc || xfs_sb_version_hascrc(&log->l_mp->m_sb)) {
4079 			xfs_alert(log->l_mp,
4080 		"log record CRC mismatch: found 0x%x, expected 0x%x.\n",
4081 					le32_to_cpu(rhead->h_crc),
4082 					le32_to_cpu(crc));
4083 			xfs_hex_dump(dp, 32);
4084 		}
4085 
4086 		/*
4087 		 * If we've detected a log record corruption, then we can't
4088 		 * recover past this point. Abort recovery if we are enforcing
4089 		 * CRC protection by punting an error back up the stack.
4090 		 */
4091 		if (xfs_sb_version_hascrc(&log->l_mp->m_sb))
4092 			return EFSCORRUPTED;
4093 	}
4094 
4095 	return 0;
4096 }
4097 
4098 STATIC int
4099 xlog_unpack_data(
4100 	struct xlog_rec_header	*rhead,
4101 	xfs_caddr_t		dp,
4102 	struct xlog		*log)
4103 {
4104 	int			i, j, k;
4105 	int			error;
4106 
4107 	error = xlog_unpack_data_crc(rhead, dp, log);
4108 	if (error)
4109 		return error;
4110 
4111 	for (i = 0; i < BTOBB(be32_to_cpu(rhead->h_len)) &&
4112 		  i < (XLOG_HEADER_CYCLE_SIZE / BBSIZE); i++) {
4113 		*(__be32 *)dp = *(__be32 *)&rhead->h_cycle_data[i];
4114 		dp += BBSIZE;
4115 	}
4116 
4117 	if (xfs_sb_version_haslogv2(&log->l_mp->m_sb)) {
4118 		xlog_in_core_2_t *xhdr = (xlog_in_core_2_t *)rhead;
4119 		for ( ; i < BTOBB(be32_to_cpu(rhead->h_len)); i++) {
4120 			j = i / (XLOG_HEADER_CYCLE_SIZE / BBSIZE);
4121 			k = i % (XLOG_HEADER_CYCLE_SIZE / BBSIZE);
4122 			*(__be32 *)dp = xhdr[j].hic_xheader.xh_cycle_data[k];
4123 			dp += BBSIZE;
4124 		}
4125 	}
4126 
4127 	return 0;
4128 }
4129 
4130 STATIC int
4131 xlog_valid_rec_header(
4132 	struct xlog		*log,
4133 	struct xlog_rec_header	*rhead,
4134 	xfs_daddr_t		blkno)
4135 {
4136 	int			hlen;
4137 
4138 	if (unlikely(rhead->h_magicno != cpu_to_be32(XLOG_HEADER_MAGIC_NUM))) {
4139 		XFS_ERROR_REPORT("xlog_valid_rec_header(1)",
4140 				XFS_ERRLEVEL_LOW, log->l_mp);
4141 		return XFS_ERROR(EFSCORRUPTED);
4142 	}
4143 	if (unlikely(
4144 	    (!rhead->h_version ||
4145 	    (be32_to_cpu(rhead->h_version) & (~XLOG_VERSION_OKBITS))))) {
4146 		xfs_warn(log->l_mp, "%s: unrecognised log version (%d).",
4147 			__func__, be32_to_cpu(rhead->h_version));
4148 		return XFS_ERROR(EIO);
4149 	}
4150 
4151 	/* LR body must have data or it wouldn't have been written */
4152 	hlen = be32_to_cpu(rhead->h_len);
4153 	if (unlikely( hlen <= 0 || hlen > INT_MAX )) {
4154 		XFS_ERROR_REPORT("xlog_valid_rec_header(2)",
4155 				XFS_ERRLEVEL_LOW, log->l_mp);
4156 		return XFS_ERROR(EFSCORRUPTED);
4157 	}
4158 	if (unlikely( blkno > log->l_logBBsize || blkno > INT_MAX )) {
4159 		XFS_ERROR_REPORT("xlog_valid_rec_header(3)",
4160 				XFS_ERRLEVEL_LOW, log->l_mp);
4161 		return XFS_ERROR(EFSCORRUPTED);
4162 	}
4163 	return 0;
4164 }
4165 
4166 /*
4167  * Read the log from tail to head and process the log records found.
4168  * Handle the two cases where the tail and head are in the same cycle
4169  * and where the active portion of the log wraps around the end of
4170  * the physical log separately.  The pass parameter is passed through
4171  * to the routines called to process the data and is not looked at
4172  * here.
4173  */
4174 STATIC int
4175 xlog_do_recovery_pass(
4176 	struct xlog		*log,
4177 	xfs_daddr_t		head_blk,
4178 	xfs_daddr_t		tail_blk,
4179 	int			pass)
4180 {
4181 	xlog_rec_header_t	*rhead;
4182 	xfs_daddr_t		blk_no;
4183 	xfs_caddr_t		offset;
4184 	xfs_buf_t		*hbp, *dbp;
4185 	int			error = 0, h_size;
4186 	int			bblks, split_bblks;
4187 	int			hblks, split_hblks, wrapped_hblks;
4188 	struct hlist_head	rhash[XLOG_RHASH_SIZE];
4189 
4190 	ASSERT(head_blk != tail_blk);
4191 
4192 	/*
4193 	 * Read the header of the tail block and get the iclog buffer size from
4194 	 * h_size.  Use this to tell how many sectors make up the log header.
4195 	 */
4196 	if (xfs_sb_version_haslogv2(&log->l_mp->m_sb)) {
4197 		/*
4198 		 * When using variable length iclogs, read first sector of
4199 		 * iclog header and extract the header size from it.  Get a
4200 		 * new hbp that is the correct size.
4201 		 */
4202 		hbp = xlog_get_bp(log, 1);
4203 		if (!hbp)
4204 			return ENOMEM;
4205 
4206 		error = xlog_bread(log, tail_blk, 1, hbp, &offset);
4207 		if (error)
4208 			goto bread_err1;
4209 
4210 		rhead = (xlog_rec_header_t *)offset;
4211 		error = xlog_valid_rec_header(log, rhead, tail_blk);
4212 		if (error)
4213 			goto bread_err1;
4214 		h_size = be32_to_cpu(rhead->h_size);
4215 		if ((be32_to_cpu(rhead->h_version) & XLOG_VERSION_2) &&
4216 		    (h_size > XLOG_HEADER_CYCLE_SIZE)) {
4217 			hblks = h_size / XLOG_HEADER_CYCLE_SIZE;
4218 			if (h_size % XLOG_HEADER_CYCLE_SIZE)
4219 				hblks++;
4220 			xlog_put_bp(hbp);
4221 			hbp = xlog_get_bp(log, hblks);
4222 		} else {
4223 			hblks = 1;
4224 		}
4225 	} else {
4226 		ASSERT(log->l_sectBBsize == 1);
4227 		hblks = 1;
4228 		hbp = xlog_get_bp(log, 1);
4229 		h_size = XLOG_BIG_RECORD_BSIZE;
4230 	}
4231 
4232 	if (!hbp)
4233 		return ENOMEM;
4234 	dbp = xlog_get_bp(log, BTOBB(h_size));
4235 	if (!dbp) {
4236 		xlog_put_bp(hbp);
4237 		return ENOMEM;
4238 	}
4239 
4240 	memset(rhash, 0, sizeof(rhash));
4241 	if (tail_blk <= head_blk) {
4242 		for (blk_no = tail_blk; blk_no < head_blk; ) {
4243 			error = xlog_bread(log, blk_no, hblks, hbp, &offset);
4244 			if (error)
4245 				goto bread_err2;
4246 
4247 			rhead = (xlog_rec_header_t *)offset;
4248 			error = xlog_valid_rec_header(log, rhead, blk_no);
4249 			if (error)
4250 				goto bread_err2;
4251 
4252 			/* blocks in data section */
4253 			bblks = (int)BTOBB(be32_to_cpu(rhead->h_len));
4254 			error = xlog_bread(log, blk_no + hblks, bblks, dbp,
4255 					   &offset);
4256 			if (error)
4257 				goto bread_err2;
4258 
4259 			error = xlog_unpack_data(rhead, offset, log);
4260 			if (error)
4261 				goto bread_err2;
4262 
4263 			error = xlog_recover_process_data(log,
4264 						rhash, rhead, offset, pass);
4265 			if (error)
4266 				goto bread_err2;
4267 			blk_no += bblks + hblks;
4268 		}
4269 	} else {
4270 		/*
4271 		 * Perform recovery around the end of the physical log.
4272 		 * When the head is not on the same cycle number as the tail,
4273 		 * we can't do a sequential recovery as above.
4274 		 */
4275 		blk_no = tail_blk;
4276 		while (blk_no < log->l_logBBsize) {
4277 			/*
4278 			 * Check for header wrapping around physical end-of-log
4279 			 */
4280 			offset = hbp->b_addr;
4281 			split_hblks = 0;
4282 			wrapped_hblks = 0;
4283 			if (blk_no + hblks <= log->l_logBBsize) {
4284 				/* Read header in one read */
4285 				error = xlog_bread(log, blk_no, hblks, hbp,
4286 						   &offset);
4287 				if (error)
4288 					goto bread_err2;
4289 			} else {
4290 				/* This LR is split across physical log end */
4291 				if (blk_no != log->l_logBBsize) {
4292 					/* some data before physical log end */
4293 					ASSERT(blk_no <= INT_MAX);
4294 					split_hblks = log->l_logBBsize - (int)blk_no;
4295 					ASSERT(split_hblks > 0);
4296 					error = xlog_bread(log, blk_no,
4297 							   split_hblks, hbp,
4298 							   &offset);
4299 					if (error)
4300 						goto bread_err2;
4301 				}
4302 
4303 				/*
4304 				 * Note: this black magic still works with
4305 				 * large sector sizes (non-512) only because:
4306 				 * - we increased the buffer size originally
4307 				 *   by 1 sector giving us enough extra space
4308 				 *   for the second read;
4309 				 * - the log start is guaranteed to be sector
4310 				 *   aligned;
4311 				 * - we read the log end (LR header start)
4312 				 *   _first_, then the log start (LR header end)
4313 				 *   - order is important.
4314 				 */
4315 				wrapped_hblks = hblks - split_hblks;
4316 				error = xlog_bread_offset(log, 0,
4317 						wrapped_hblks, hbp,
4318 						offset + BBTOB(split_hblks));
4319 				if (error)
4320 					goto bread_err2;
4321 			}
4322 			rhead = (xlog_rec_header_t *)offset;
4323 			error = xlog_valid_rec_header(log, rhead,
4324 						split_hblks ? blk_no : 0);
4325 			if (error)
4326 				goto bread_err2;
4327 
4328 			bblks = (int)BTOBB(be32_to_cpu(rhead->h_len));
4329 			blk_no += hblks;
4330 
4331 			/* Read in data for log record */
4332 			if (blk_no + bblks <= log->l_logBBsize) {
4333 				error = xlog_bread(log, blk_no, bblks, dbp,
4334 						   &offset);
4335 				if (error)
4336 					goto bread_err2;
4337 			} else {
4338 				/* This log record is split across the
4339 				 * physical end of log */
4340 				offset = dbp->b_addr;
4341 				split_bblks = 0;
4342 				if (blk_no != log->l_logBBsize) {
4343 					/* some data is before the physical
4344 					 * end of log */
4345 					ASSERT(!wrapped_hblks);
4346 					ASSERT(blk_no <= INT_MAX);
4347 					split_bblks =
4348 						log->l_logBBsize - (int)blk_no;
4349 					ASSERT(split_bblks > 0);
4350 					error = xlog_bread(log, blk_no,
4351 							split_bblks, dbp,
4352 							&offset);
4353 					if (error)
4354 						goto bread_err2;
4355 				}
4356 
4357 				/*
4358 				 * Note: this black magic still works with
4359 				 * large sector sizes (non-512) only because:
4360 				 * - we increased the buffer size originally
4361 				 *   by 1 sector giving us enough extra space
4362 				 *   for the second read;
4363 				 * - the log start is guaranteed to be sector
4364 				 *   aligned;
4365 				 * - we read the log end (LR header start)
4366 				 *   _first_, then the log start (LR header end)
4367 				 *   - order is important.
4368 				 */
4369 				error = xlog_bread_offset(log, 0,
4370 						bblks - split_bblks, dbp,
4371 						offset + BBTOB(split_bblks));
4372 				if (error)
4373 					goto bread_err2;
4374 			}
4375 
4376 			error = xlog_unpack_data(rhead, offset, log);
4377 			if (error)
4378 				goto bread_err2;
4379 
4380 			error = xlog_recover_process_data(log, rhash,
4381 							rhead, offset, pass);
4382 			if (error)
4383 				goto bread_err2;
4384 			blk_no += bblks;
4385 		}
4386 
4387 		ASSERT(blk_no >= log->l_logBBsize);
4388 		blk_no -= log->l_logBBsize;
4389 
4390 		/* read first part of physical log */
4391 		while (blk_no < head_blk) {
4392 			error = xlog_bread(log, blk_no, hblks, hbp, &offset);
4393 			if (error)
4394 				goto bread_err2;
4395 
4396 			rhead = (xlog_rec_header_t *)offset;
4397 			error = xlog_valid_rec_header(log, rhead, blk_no);
4398 			if (error)
4399 				goto bread_err2;
4400 
4401 			bblks = (int)BTOBB(be32_to_cpu(rhead->h_len));
4402 			error = xlog_bread(log, blk_no+hblks, bblks, dbp,
4403 					   &offset);
4404 			if (error)
4405 				goto bread_err2;
4406 
4407 			error = xlog_unpack_data(rhead, offset, log);
4408 			if (error)
4409 				goto bread_err2;
4410 
4411 			error = xlog_recover_process_data(log, rhash,
4412 							rhead, offset, pass);
4413 			if (error)
4414 				goto bread_err2;
4415 			blk_no += bblks + hblks;
4416 		}
4417 	}
4418 
4419  bread_err2:
4420 	xlog_put_bp(dbp);
4421  bread_err1:
4422 	xlog_put_bp(hbp);
4423 	return error;
4424 }
4425 
4426 /*
4427  * Do the recovery of the log.  We actually do this in two phases.
4428  * The two passes are necessary in order to implement the function
4429  * of cancelling a record written into the log.  The first pass
4430  * determines those things which have been cancelled, and the
4431  * second pass replays log items normally except for those which
4432  * have been cancelled.  The handling of the replay and cancellations
4433  * takes place in the log item type specific routines.
4434  *
4435  * The table of items which have cancel records in the log is allocated
4436  * and freed at this level, since only here do we know when all of
4437  * the log recovery has been completed.
4438  */
4439 STATIC int
4440 xlog_do_log_recovery(
4441 	struct xlog	*log,
4442 	xfs_daddr_t	head_blk,
4443 	xfs_daddr_t	tail_blk)
4444 {
4445 	int		error, i;
4446 
4447 	ASSERT(head_blk != tail_blk);
4448 
4449 	/*
4450 	 * First do a pass to find all of the cancelled buf log items.
4451 	 * Store them in the buf_cancel_table for use in the second pass.
4452 	 */
4453 	log->l_buf_cancel_table = kmem_zalloc(XLOG_BC_TABLE_SIZE *
4454 						 sizeof(struct list_head),
4455 						 KM_SLEEP);
4456 	for (i = 0; i < XLOG_BC_TABLE_SIZE; i++)
4457 		INIT_LIST_HEAD(&log->l_buf_cancel_table[i]);
4458 
4459 	error = xlog_do_recovery_pass(log, head_blk, tail_blk,
4460 				      XLOG_RECOVER_PASS1);
4461 	if (error != 0) {
4462 		kmem_free(log->l_buf_cancel_table);
4463 		log->l_buf_cancel_table = NULL;
4464 		return error;
4465 	}
4466 	/*
4467 	 * Then do a second pass to actually recover the items in the log.
4468 	 * When it is complete free the table of buf cancel items.
4469 	 */
4470 	error = xlog_do_recovery_pass(log, head_blk, tail_blk,
4471 				      XLOG_RECOVER_PASS2);
4472 #ifdef DEBUG
4473 	if (!error) {
4474 		int	i;
4475 
4476 		for (i = 0; i < XLOG_BC_TABLE_SIZE; i++)
4477 			ASSERT(list_empty(&log->l_buf_cancel_table[i]));
4478 	}
4479 #endif	/* DEBUG */
4480 
4481 	kmem_free(log->l_buf_cancel_table);
4482 	log->l_buf_cancel_table = NULL;
4483 
4484 	return error;
4485 }
4486 
4487 /*
4488  * Do the actual recovery
4489  */
4490 STATIC int
4491 xlog_do_recover(
4492 	struct xlog	*log,
4493 	xfs_daddr_t	head_blk,
4494 	xfs_daddr_t	tail_blk)
4495 {
4496 	int		error;
4497 	xfs_buf_t	*bp;
4498 	xfs_sb_t	*sbp;
4499 
4500 	/*
4501 	 * First replay the images in the log.
4502 	 */
4503 	error = xlog_do_log_recovery(log, head_blk, tail_blk);
4504 	if (error)
4505 		return error;
4506 
4507 	/*
4508 	 * If IO errors happened during recovery, bail out.
4509 	 */
4510 	if (XFS_FORCED_SHUTDOWN(log->l_mp)) {
4511 		return (EIO);
4512 	}
4513 
4514 	/*
4515 	 * We now update the tail_lsn since much of the recovery has completed
4516 	 * and there may be space available to use.  If there were no extent
4517 	 * or iunlinks, we can free up the entire log and set the tail_lsn to
4518 	 * be the last_sync_lsn.  This was set in xlog_find_tail to be the
4519 	 * lsn of the last known good LR on disk.  If there are extent frees
4520 	 * or iunlinks they will have some entries in the AIL; so we look at
4521 	 * the AIL to determine how to set the tail_lsn.
4522 	 */
4523 	xlog_assign_tail_lsn(log->l_mp);
4524 
4525 	/*
4526 	 * Now that we've finished replaying all buffer and inode
4527 	 * updates, re-read in the superblock and reverify it.
4528 	 */
4529 	bp = xfs_getsb(log->l_mp, 0);
4530 	XFS_BUF_UNDONE(bp);
4531 	ASSERT(!(XFS_BUF_ISWRITE(bp)));
4532 	XFS_BUF_READ(bp);
4533 	XFS_BUF_UNASYNC(bp);
4534 	bp->b_ops = &xfs_sb_buf_ops;
4535 	xfsbdstrat(log->l_mp, bp);
4536 	error = xfs_buf_iowait(bp);
4537 	if (error) {
4538 		xfs_buf_ioerror_alert(bp, __func__);
4539 		ASSERT(0);
4540 		xfs_buf_relse(bp);
4541 		return error;
4542 	}
4543 
4544 	/* Convert superblock from on-disk format */
4545 	sbp = &log->l_mp->m_sb;
4546 	xfs_sb_from_disk(sbp, XFS_BUF_TO_SBP(bp));
4547 	ASSERT(sbp->sb_magicnum == XFS_SB_MAGIC);
4548 	ASSERT(xfs_sb_good_version(sbp));
4549 	xfs_buf_relse(bp);
4550 
4551 	/* We've re-read the superblock so re-initialize per-cpu counters */
4552 	xfs_icsb_reinit_counters(log->l_mp);
4553 
4554 	xlog_recover_check_summary(log);
4555 
4556 	/* Normal transactions can now occur */
4557 	log->l_flags &= ~XLOG_ACTIVE_RECOVERY;
4558 	return 0;
4559 }
4560 
4561 /*
4562  * Perform recovery and re-initialize some log variables in xlog_find_tail.
4563  *
4564  * Return error or zero.
4565  */
4566 int
4567 xlog_recover(
4568 	struct xlog	*log)
4569 {
4570 	xfs_daddr_t	head_blk, tail_blk;
4571 	int		error;
4572 
4573 	/* find the tail of the log */
4574 	if ((error = xlog_find_tail(log, &head_blk, &tail_blk)))
4575 		return error;
4576 
4577 	if (tail_blk != head_blk) {
4578 		/* There used to be a comment here:
4579 		 *
4580 		 * disallow recovery on read-only mounts.  note -- mount
4581 		 * checks for ENOSPC and turns it into an intelligent
4582 		 * error message.
4583 		 * ...but this is no longer true.  Now, unless you specify
4584 		 * NORECOVERY (in which case this function would never be
4585 		 * called), we just go ahead and recover.  We do this all
4586 		 * under the vfs layer, so we can get away with it unless
4587 		 * the device itself is read-only, in which case we fail.
4588 		 */
4589 		if ((error = xfs_dev_is_read_only(log->l_mp, "recovery"))) {
4590 			return error;
4591 		}
4592 
4593 		/*
4594 		 * Version 5 superblock log feature mask validation. We know the
4595 		 * log is dirty so check if there are any unknown log features
4596 		 * in what we need to recover. If there are unknown features
4597 		 * (e.g. unsupported transactions, then simply reject the
4598 		 * attempt at recovery before touching anything.
4599 		 */
4600 		if (XFS_SB_VERSION_NUM(&log->l_mp->m_sb) == XFS_SB_VERSION_5 &&
4601 		    xfs_sb_has_incompat_log_feature(&log->l_mp->m_sb,
4602 					XFS_SB_FEAT_INCOMPAT_LOG_UNKNOWN)) {
4603 			xfs_warn(log->l_mp,
4604 "Superblock has unknown incompatible log features (0x%x) enabled.\n"
4605 "The log can not be fully and/or safely recovered by this kernel.\n"
4606 "Please recover the log on a kernel that supports the unknown features.",
4607 				(log->l_mp->m_sb.sb_features_log_incompat &
4608 					XFS_SB_FEAT_INCOMPAT_LOG_UNKNOWN));
4609 			return EINVAL;
4610 		}
4611 
4612 		xfs_notice(log->l_mp, "Starting recovery (logdev: %s)",
4613 				log->l_mp->m_logname ? log->l_mp->m_logname
4614 						     : "internal");
4615 
4616 		error = xlog_do_recover(log, head_blk, tail_blk);
4617 		log->l_flags |= XLOG_RECOVERY_NEEDED;
4618 	}
4619 	return error;
4620 }
4621 
4622 /*
4623  * In the first part of recovery we replay inodes and buffers and build
4624  * up the list of extent free items which need to be processed.  Here
4625  * we process the extent free items and clean up the on disk unlinked
4626  * inode lists.  This is separated from the first part of recovery so
4627  * that the root and real-time bitmap inodes can be read in from disk in
4628  * between the two stages.  This is necessary so that we can free space
4629  * in the real-time portion of the file system.
4630  */
4631 int
4632 xlog_recover_finish(
4633 	struct xlog	*log)
4634 {
4635 	/*
4636 	 * Now we're ready to do the transactions needed for the
4637 	 * rest of recovery.  Start with completing all the extent
4638 	 * free intent records and then process the unlinked inode
4639 	 * lists.  At this point, we essentially run in normal mode
4640 	 * except that we're still performing recovery actions
4641 	 * rather than accepting new requests.
4642 	 */
4643 	if (log->l_flags & XLOG_RECOVERY_NEEDED) {
4644 		int	error;
4645 		error = xlog_recover_process_efis(log);
4646 		if (error) {
4647 			xfs_alert(log->l_mp, "Failed to recover EFIs");
4648 			return error;
4649 		}
4650 		/*
4651 		 * Sync the log to get all the EFIs out of the AIL.
4652 		 * This isn't absolutely necessary, but it helps in
4653 		 * case the unlink transactions would have problems
4654 		 * pushing the EFIs out of the way.
4655 		 */
4656 		xfs_log_force(log->l_mp, XFS_LOG_SYNC);
4657 
4658 		xlog_recover_process_iunlinks(log);
4659 
4660 		xlog_recover_check_summary(log);
4661 
4662 		xfs_notice(log->l_mp, "Ending recovery (logdev: %s)",
4663 				log->l_mp->m_logname ? log->l_mp->m_logname
4664 						     : "internal");
4665 		log->l_flags &= ~XLOG_RECOVERY_NEEDED;
4666 	} else {
4667 		xfs_info(log->l_mp, "Ending clean mount");
4668 	}
4669 	return 0;
4670 }
4671 
4672 
4673 #if defined(DEBUG)
4674 /*
4675  * Read all of the agf and agi counters and check that they
4676  * are consistent with the superblock counters.
4677  */
4678 void
4679 xlog_recover_check_summary(
4680 	struct xlog	*log)
4681 {
4682 	xfs_mount_t	*mp;
4683 	xfs_agf_t	*agfp;
4684 	xfs_buf_t	*agfbp;
4685 	xfs_buf_t	*agibp;
4686 	xfs_agnumber_t	agno;
4687 	__uint64_t	freeblks;
4688 	__uint64_t	itotal;
4689 	__uint64_t	ifree;
4690 	int		error;
4691 
4692 	mp = log->l_mp;
4693 
4694 	freeblks = 0LL;
4695 	itotal = 0LL;
4696 	ifree = 0LL;
4697 	for (agno = 0; agno < mp->m_sb.sb_agcount; agno++) {
4698 		error = xfs_read_agf(mp, NULL, agno, 0, &agfbp);
4699 		if (error) {
4700 			xfs_alert(mp, "%s agf read failed agno %d error %d",
4701 						__func__, agno, error);
4702 		} else {
4703 			agfp = XFS_BUF_TO_AGF(agfbp);
4704 			freeblks += be32_to_cpu(agfp->agf_freeblks) +
4705 				    be32_to_cpu(agfp->agf_flcount);
4706 			xfs_buf_relse(agfbp);
4707 		}
4708 
4709 		error = xfs_read_agi(mp, NULL, agno, &agibp);
4710 		if (error) {
4711 			xfs_alert(mp, "%s agi read failed agno %d error %d",
4712 						__func__, agno, error);
4713 		} else {
4714 			struct xfs_agi	*agi = XFS_BUF_TO_AGI(agibp);
4715 
4716 			itotal += be32_to_cpu(agi->agi_count);
4717 			ifree += be32_to_cpu(agi->agi_freecount);
4718 			xfs_buf_relse(agibp);
4719 		}
4720 	}
4721 }
4722 #endif /* DEBUG */
4723