xref: /openbmc/linux/fs/xfs/xfs_log_recover.c (revision ab73b751)
1 /*
2  * Copyright (c) 2000-2006 Silicon Graphics, Inc.
3  * All Rights Reserved.
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU General Public License as
7  * published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it would be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write the Free Software Foundation,
16  * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
17  */
18 #include "xfs.h"
19 #include "xfs_fs.h"
20 #include "xfs_types.h"
21 #include "xfs_bit.h"
22 #include "xfs_log.h"
23 #include "xfs_inum.h"
24 #include "xfs_trans.h"
25 #include "xfs_sb.h"
26 #include "xfs_ag.h"
27 #include "xfs_mount.h"
28 #include "xfs_error.h"
29 #include "xfs_bmap_btree.h"
30 #include "xfs_alloc_btree.h"
31 #include "xfs_ialloc_btree.h"
32 #include "xfs_dinode.h"
33 #include "xfs_inode.h"
34 #include "xfs_inode_item.h"
35 #include "xfs_alloc.h"
36 #include "xfs_ialloc.h"
37 #include "xfs_log_priv.h"
38 #include "xfs_buf_item.h"
39 #include "xfs_log_recover.h"
40 #include "xfs_extfree_item.h"
41 #include "xfs_trans_priv.h"
42 #include "xfs_quota.h"
43 #include "xfs_utils.h"
44 #include "xfs_trace.h"
45 
46 STATIC int	xlog_find_zeroed(xlog_t *, xfs_daddr_t *);
47 STATIC int	xlog_clear_stale_blocks(xlog_t *, xfs_lsn_t);
48 #if defined(DEBUG)
49 STATIC void	xlog_recover_check_summary(xlog_t *);
50 #else
51 #define	xlog_recover_check_summary(log)
52 #endif
53 
54 /*
55  * This structure is used during recovery to record the buf log items which
56  * have been canceled and should not be replayed.
57  */
58 struct xfs_buf_cancel {
59 	xfs_daddr_t		bc_blkno;
60 	uint			bc_len;
61 	int			bc_refcount;
62 	struct list_head	bc_list;
63 };
64 
65 /*
66  * Sector aligned buffer routines for buffer create/read/write/access
67  */
68 
69 /*
70  * Verify the given count of basic blocks is valid number of blocks
71  * to specify for an operation involving the given XFS log buffer.
72  * Returns nonzero if the count is valid, 0 otherwise.
73  */
74 
75 static inline int
76 xlog_buf_bbcount_valid(
77 	xlog_t		*log,
78 	int		bbcount)
79 {
80 	return bbcount > 0 && bbcount <= log->l_logBBsize;
81 }
82 
83 /*
84  * Allocate a buffer to hold log data.  The buffer needs to be able
85  * to map to a range of nbblks basic blocks at any valid (basic
86  * block) offset within the log.
87  */
88 STATIC xfs_buf_t *
89 xlog_get_bp(
90 	xlog_t		*log,
91 	int		nbblks)
92 {
93 	struct xfs_buf	*bp;
94 
95 	if (!xlog_buf_bbcount_valid(log, nbblks)) {
96 		xfs_warn(log->l_mp, "Invalid block length (0x%x) for buffer",
97 			nbblks);
98 		XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_HIGH, log->l_mp);
99 		return NULL;
100 	}
101 
102 	/*
103 	 * We do log I/O in units of log sectors (a power-of-2
104 	 * multiple of the basic block size), so we round up the
105 	 * requested size to accommodate the basic blocks required
106 	 * for complete log sectors.
107 	 *
108 	 * In addition, the buffer may be used for a non-sector-
109 	 * aligned block offset, in which case an I/O of the
110 	 * requested size could extend beyond the end of the
111 	 * buffer.  If the requested size is only 1 basic block it
112 	 * will never straddle a sector boundary, so this won't be
113 	 * an issue.  Nor will this be a problem if the log I/O is
114 	 * done in basic blocks (sector size 1).  But otherwise we
115 	 * extend the buffer by one extra log sector to ensure
116 	 * there's space to accommodate this possibility.
117 	 */
118 	if (nbblks > 1 && log->l_sectBBsize > 1)
119 		nbblks += log->l_sectBBsize;
120 	nbblks = round_up(nbblks, log->l_sectBBsize);
121 
122 	bp = xfs_buf_get_uncached(log->l_mp->m_logdev_targp, nbblks, 0);
123 	if (bp)
124 		xfs_buf_unlock(bp);
125 	return bp;
126 }
127 
128 STATIC void
129 xlog_put_bp(
130 	xfs_buf_t	*bp)
131 {
132 	xfs_buf_free(bp);
133 }
134 
135 /*
136  * Return the address of the start of the given block number's data
137  * in a log buffer.  The buffer covers a log sector-aligned region.
138  */
139 STATIC xfs_caddr_t
140 xlog_align(
141 	xlog_t		*log,
142 	xfs_daddr_t	blk_no,
143 	int		nbblks,
144 	xfs_buf_t	*bp)
145 {
146 	xfs_daddr_t	offset = blk_no & ((xfs_daddr_t)log->l_sectBBsize - 1);
147 
148 	ASSERT(offset + nbblks <= bp->b_length);
149 	return bp->b_addr + BBTOB(offset);
150 }
151 
152 
153 /*
154  * nbblks should be uint, but oh well.  Just want to catch that 32-bit length.
155  */
156 STATIC int
157 xlog_bread_noalign(
158 	xlog_t		*log,
159 	xfs_daddr_t	blk_no,
160 	int		nbblks,
161 	xfs_buf_t	*bp)
162 {
163 	int		error;
164 
165 	if (!xlog_buf_bbcount_valid(log, nbblks)) {
166 		xfs_warn(log->l_mp, "Invalid block length (0x%x) for buffer",
167 			nbblks);
168 		XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_HIGH, log->l_mp);
169 		return EFSCORRUPTED;
170 	}
171 
172 	blk_no = round_down(blk_no, log->l_sectBBsize);
173 	nbblks = round_up(nbblks, log->l_sectBBsize);
174 
175 	ASSERT(nbblks > 0);
176 	ASSERT(nbblks <= bp->b_length);
177 
178 	XFS_BUF_SET_ADDR(bp, log->l_logBBstart + blk_no);
179 	XFS_BUF_READ(bp);
180 	bp->b_io_length = nbblks;
181 	bp->b_error = 0;
182 
183 	xfsbdstrat(log->l_mp, bp);
184 	error = xfs_buf_iowait(bp);
185 	if (error)
186 		xfs_buf_ioerror_alert(bp, __func__);
187 	return error;
188 }
189 
190 STATIC int
191 xlog_bread(
192 	xlog_t		*log,
193 	xfs_daddr_t	blk_no,
194 	int		nbblks,
195 	xfs_buf_t	*bp,
196 	xfs_caddr_t	*offset)
197 {
198 	int		error;
199 
200 	error = xlog_bread_noalign(log, blk_no, nbblks, bp);
201 	if (error)
202 		return error;
203 
204 	*offset = xlog_align(log, blk_no, nbblks, bp);
205 	return 0;
206 }
207 
208 /*
209  * Read at an offset into the buffer. Returns with the buffer in it's original
210  * state regardless of the result of the read.
211  */
212 STATIC int
213 xlog_bread_offset(
214 	xlog_t		*log,
215 	xfs_daddr_t	blk_no,		/* block to read from */
216 	int		nbblks,		/* blocks to read */
217 	xfs_buf_t	*bp,
218 	xfs_caddr_t	offset)
219 {
220 	xfs_caddr_t	orig_offset = bp->b_addr;
221 	int		orig_len = BBTOB(bp->b_length);
222 	int		error, error2;
223 
224 	error = xfs_buf_associate_memory(bp, offset, BBTOB(nbblks));
225 	if (error)
226 		return error;
227 
228 	error = xlog_bread_noalign(log, blk_no, nbblks, bp);
229 
230 	/* must reset buffer pointer even on error */
231 	error2 = xfs_buf_associate_memory(bp, orig_offset, orig_len);
232 	if (error)
233 		return error;
234 	return error2;
235 }
236 
237 /*
238  * Write out the buffer at the given block for the given number of blocks.
239  * The buffer is kept locked across the write and is returned locked.
240  * This can only be used for synchronous log writes.
241  */
242 STATIC int
243 xlog_bwrite(
244 	xlog_t		*log,
245 	xfs_daddr_t	blk_no,
246 	int		nbblks,
247 	xfs_buf_t	*bp)
248 {
249 	int		error;
250 
251 	if (!xlog_buf_bbcount_valid(log, nbblks)) {
252 		xfs_warn(log->l_mp, "Invalid block length (0x%x) for buffer",
253 			nbblks);
254 		XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_HIGH, log->l_mp);
255 		return EFSCORRUPTED;
256 	}
257 
258 	blk_no = round_down(blk_no, log->l_sectBBsize);
259 	nbblks = round_up(nbblks, log->l_sectBBsize);
260 
261 	ASSERT(nbblks > 0);
262 	ASSERT(nbblks <= bp->b_length);
263 
264 	XFS_BUF_SET_ADDR(bp, log->l_logBBstart + blk_no);
265 	XFS_BUF_ZEROFLAGS(bp);
266 	xfs_buf_hold(bp);
267 	xfs_buf_lock(bp);
268 	bp->b_io_length = nbblks;
269 	bp->b_error = 0;
270 
271 	error = xfs_bwrite(bp);
272 	if (error)
273 		xfs_buf_ioerror_alert(bp, __func__);
274 	xfs_buf_relse(bp);
275 	return error;
276 }
277 
278 #ifdef DEBUG
279 /*
280  * dump debug superblock and log record information
281  */
282 STATIC void
283 xlog_header_check_dump(
284 	xfs_mount_t		*mp,
285 	xlog_rec_header_t	*head)
286 {
287 	xfs_debug(mp, "%s:  SB : uuid = %pU, fmt = %d\n",
288 		__func__, &mp->m_sb.sb_uuid, XLOG_FMT);
289 	xfs_debug(mp, "    log : uuid = %pU, fmt = %d\n",
290 		&head->h_fs_uuid, be32_to_cpu(head->h_fmt));
291 }
292 #else
293 #define xlog_header_check_dump(mp, head)
294 #endif
295 
296 /*
297  * check log record header for recovery
298  */
299 STATIC int
300 xlog_header_check_recover(
301 	xfs_mount_t		*mp,
302 	xlog_rec_header_t	*head)
303 {
304 	ASSERT(head->h_magicno == cpu_to_be32(XLOG_HEADER_MAGIC_NUM));
305 
306 	/*
307 	 * IRIX doesn't write the h_fmt field and leaves it zeroed
308 	 * (XLOG_FMT_UNKNOWN). This stops us from trying to recover
309 	 * a dirty log created in IRIX.
310 	 */
311 	if (unlikely(head->h_fmt != cpu_to_be32(XLOG_FMT))) {
312 		xfs_warn(mp,
313 	"dirty log written in incompatible format - can't recover");
314 		xlog_header_check_dump(mp, head);
315 		XFS_ERROR_REPORT("xlog_header_check_recover(1)",
316 				 XFS_ERRLEVEL_HIGH, mp);
317 		return XFS_ERROR(EFSCORRUPTED);
318 	} else if (unlikely(!uuid_equal(&mp->m_sb.sb_uuid, &head->h_fs_uuid))) {
319 		xfs_warn(mp,
320 	"dirty log entry has mismatched uuid - can't recover");
321 		xlog_header_check_dump(mp, head);
322 		XFS_ERROR_REPORT("xlog_header_check_recover(2)",
323 				 XFS_ERRLEVEL_HIGH, mp);
324 		return XFS_ERROR(EFSCORRUPTED);
325 	}
326 	return 0;
327 }
328 
329 /*
330  * read the head block of the log and check the header
331  */
332 STATIC int
333 xlog_header_check_mount(
334 	xfs_mount_t		*mp,
335 	xlog_rec_header_t	*head)
336 {
337 	ASSERT(head->h_magicno == cpu_to_be32(XLOG_HEADER_MAGIC_NUM));
338 
339 	if (uuid_is_nil(&head->h_fs_uuid)) {
340 		/*
341 		 * IRIX doesn't write the h_fs_uuid or h_fmt fields. If
342 		 * h_fs_uuid is nil, we assume this log was last mounted
343 		 * by IRIX and continue.
344 		 */
345 		xfs_warn(mp, "nil uuid in log - IRIX style log");
346 	} else if (unlikely(!uuid_equal(&mp->m_sb.sb_uuid, &head->h_fs_uuid))) {
347 		xfs_warn(mp, "log has mismatched uuid - can't recover");
348 		xlog_header_check_dump(mp, head);
349 		XFS_ERROR_REPORT("xlog_header_check_mount",
350 				 XFS_ERRLEVEL_HIGH, mp);
351 		return XFS_ERROR(EFSCORRUPTED);
352 	}
353 	return 0;
354 }
355 
356 STATIC void
357 xlog_recover_iodone(
358 	struct xfs_buf	*bp)
359 {
360 	if (bp->b_error) {
361 		/*
362 		 * We're not going to bother about retrying
363 		 * this during recovery. One strike!
364 		 */
365 		xfs_buf_ioerror_alert(bp, __func__);
366 		xfs_force_shutdown(bp->b_target->bt_mount,
367 					SHUTDOWN_META_IO_ERROR);
368 	}
369 	bp->b_iodone = NULL;
370 	xfs_buf_ioend(bp, 0);
371 }
372 
373 /*
374  * This routine finds (to an approximation) the first block in the physical
375  * log which contains the given cycle.  It uses a binary search algorithm.
376  * Note that the algorithm can not be perfect because the disk will not
377  * necessarily be perfect.
378  */
379 STATIC int
380 xlog_find_cycle_start(
381 	xlog_t		*log,
382 	xfs_buf_t	*bp,
383 	xfs_daddr_t	first_blk,
384 	xfs_daddr_t	*last_blk,
385 	uint		cycle)
386 {
387 	xfs_caddr_t	offset;
388 	xfs_daddr_t	mid_blk;
389 	xfs_daddr_t	end_blk;
390 	uint		mid_cycle;
391 	int		error;
392 
393 	end_blk = *last_blk;
394 	mid_blk = BLK_AVG(first_blk, end_blk);
395 	while (mid_blk != first_blk && mid_blk != end_blk) {
396 		error = xlog_bread(log, mid_blk, 1, bp, &offset);
397 		if (error)
398 			return error;
399 		mid_cycle = xlog_get_cycle(offset);
400 		if (mid_cycle == cycle)
401 			end_blk = mid_blk;   /* last_half_cycle == mid_cycle */
402 		else
403 			first_blk = mid_blk; /* first_half_cycle == mid_cycle */
404 		mid_blk = BLK_AVG(first_blk, end_blk);
405 	}
406 	ASSERT((mid_blk == first_blk && mid_blk+1 == end_blk) ||
407 	       (mid_blk == end_blk && mid_blk-1 == first_blk));
408 
409 	*last_blk = end_blk;
410 
411 	return 0;
412 }
413 
414 /*
415  * Check that a range of blocks does not contain stop_on_cycle_no.
416  * Fill in *new_blk with the block offset where such a block is
417  * found, or with -1 (an invalid block number) if there is no such
418  * block in the range.  The scan needs to occur from front to back
419  * and the pointer into the region must be updated since a later
420  * routine will need to perform another test.
421  */
422 STATIC int
423 xlog_find_verify_cycle(
424 	xlog_t		*log,
425 	xfs_daddr_t	start_blk,
426 	int		nbblks,
427 	uint		stop_on_cycle_no,
428 	xfs_daddr_t	*new_blk)
429 {
430 	xfs_daddr_t	i, j;
431 	uint		cycle;
432 	xfs_buf_t	*bp;
433 	xfs_daddr_t	bufblks;
434 	xfs_caddr_t	buf = NULL;
435 	int		error = 0;
436 
437 	/*
438 	 * Greedily allocate a buffer big enough to handle the full
439 	 * range of basic blocks we'll be examining.  If that fails,
440 	 * try a smaller size.  We need to be able to read at least
441 	 * a log sector, or we're out of luck.
442 	 */
443 	bufblks = 1 << ffs(nbblks);
444 	while (bufblks > log->l_logBBsize)
445 		bufblks >>= 1;
446 	while (!(bp = xlog_get_bp(log, bufblks))) {
447 		bufblks >>= 1;
448 		if (bufblks < log->l_sectBBsize)
449 			return ENOMEM;
450 	}
451 
452 	for (i = start_blk; i < start_blk + nbblks; i += bufblks) {
453 		int	bcount;
454 
455 		bcount = min(bufblks, (start_blk + nbblks - i));
456 
457 		error = xlog_bread(log, i, bcount, bp, &buf);
458 		if (error)
459 			goto out;
460 
461 		for (j = 0; j < bcount; j++) {
462 			cycle = xlog_get_cycle(buf);
463 			if (cycle == stop_on_cycle_no) {
464 				*new_blk = i+j;
465 				goto out;
466 			}
467 
468 			buf += BBSIZE;
469 		}
470 	}
471 
472 	*new_blk = -1;
473 
474 out:
475 	xlog_put_bp(bp);
476 	return error;
477 }
478 
479 /*
480  * Potentially backup over partial log record write.
481  *
482  * In the typical case, last_blk is the number of the block directly after
483  * a good log record.  Therefore, we subtract one to get the block number
484  * of the last block in the given buffer.  extra_bblks contains the number
485  * of blocks we would have read on a previous read.  This happens when the
486  * last log record is split over the end of the physical log.
487  *
488  * extra_bblks is the number of blocks potentially verified on a previous
489  * call to this routine.
490  */
491 STATIC int
492 xlog_find_verify_log_record(
493 	xlog_t			*log,
494 	xfs_daddr_t		start_blk,
495 	xfs_daddr_t		*last_blk,
496 	int			extra_bblks)
497 {
498 	xfs_daddr_t		i;
499 	xfs_buf_t		*bp;
500 	xfs_caddr_t		offset = NULL;
501 	xlog_rec_header_t	*head = NULL;
502 	int			error = 0;
503 	int			smallmem = 0;
504 	int			num_blks = *last_blk - start_blk;
505 	int			xhdrs;
506 
507 	ASSERT(start_blk != 0 || *last_blk != start_blk);
508 
509 	if (!(bp = xlog_get_bp(log, num_blks))) {
510 		if (!(bp = xlog_get_bp(log, 1)))
511 			return ENOMEM;
512 		smallmem = 1;
513 	} else {
514 		error = xlog_bread(log, start_blk, num_blks, bp, &offset);
515 		if (error)
516 			goto out;
517 		offset += ((num_blks - 1) << BBSHIFT);
518 	}
519 
520 	for (i = (*last_blk) - 1; i >= 0; i--) {
521 		if (i < start_blk) {
522 			/* valid log record not found */
523 			xfs_warn(log->l_mp,
524 		"Log inconsistent (didn't find previous header)");
525 			ASSERT(0);
526 			error = XFS_ERROR(EIO);
527 			goto out;
528 		}
529 
530 		if (smallmem) {
531 			error = xlog_bread(log, i, 1, bp, &offset);
532 			if (error)
533 				goto out;
534 		}
535 
536 		head = (xlog_rec_header_t *)offset;
537 
538 		if (head->h_magicno == cpu_to_be32(XLOG_HEADER_MAGIC_NUM))
539 			break;
540 
541 		if (!smallmem)
542 			offset -= BBSIZE;
543 	}
544 
545 	/*
546 	 * We hit the beginning of the physical log & still no header.  Return
547 	 * to caller.  If caller can handle a return of -1, then this routine
548 	 * will be called again for the end of the physical log.
549 	 */
550 	if (i == -1) {
551 		error = -1;
552 		goto out;
553 	}
554 
555 	/*
556 	 * We have the final block of the good log (the first block
557 	 * of the log record _before_ the head. So we check the uuid.
558 	 */
559 	if ((error = xlog_header_check_mount(log->l_mp, head)))
560 		goto out;
561 
562 	/*
563 	 * We may have found a log record header before we expected one.
564 	 * last_blk will be the 1st block # with a given cycle #.  We may end
565 	 * up reading an entire log record.  In this case, we don't want to
566 	 * reset last_blk.  Only when last_blk points in the middle of a log
567 	 * record do we update last_blk.
568 	 */
569 	if (xfs_sb_version_haslogv2(&log->l_mp->m_sb)) {
570 		uint	h_size = be32_to_cpu(head->h_size);
571 
572 		xhdrs = h_size / XLOG_HEADER_CYCLE_SIZE;
573 		if (h_size % XLOG_HEADER_CYCLE_SIZE)
574 			xhdrs++;
575 	} else {
576 		xhdrs = 1;
577 	}
578 
579 	if (*last_blk - i + extra_bblks !=
580 	    BTOBB(be32_to_cpu(head->h_len)) + xhdrs)
581 		*last_blk = i;
582 
583 out:
584 	xlog_put_bp(bp);
585 	return error;
586 }
587 
588 /*
589  * Head is defined to be the point of the log where the next log write
590  * write could go.  This means that incomplete LR writes at the end are
591  * eliminated when calculating the head.  We aren't guaranteed that previous
592  * LR have complete transactions.  We only know that a cycle number of
593  * current cycle number -1 won't be present in the log if we start writing
594  * from our current block number.
595  *
596  * last_blk contains the block number of the first block with a given
597  * cycle number.
598  *
599  * Return: zero if normal, non-zero if error.
600  */
601 STATIC int
602 xlog_find_head(
603 	xlog_t 		*log,
604 	xfs_daddr_t	*return_head_blk)
605 {
606 	xfs_buf_t	*bp;
607 	xfs_caddr_t	offset;
608 	xfs_daddr_t	new_blk, first_blk, start_blk, last_blk, head_blk;
609 	int		num_scan_bblks;
610 	uint		first_half_cycle, last_half_cycle;
611 	uint		stop_on_cycle;
612 	int		error, log_bbnum = log->l_logBBsize;
613 
614 	/* Is the end of the log device zeroed? */
615 	if ((error = xlog_find_zeroed(log, &first_blk)) == -1) {
616 		*return_head_blk = first_blk;
617 
618 		/* Is the whole lot zeroed? */
619 		if (!first_blk) {
620 			/* Linux XFS shouldn't generate totally zeroed logs -
621 			 * mkfs etc write a dummy unmount record to a fresh
622 			 * log so we can store the uuid in there
623 			 */
624 			xfs_warn(log->l_mp, "totally zeroed log");
625 		}
626 
627 		return 0;
628 	} else if (error) {
629 		xfs_warn(log->l_mp, "empty log check failed");
630 		return error;
631 	}
632 
633 	first_blk = 0;			/* get cycle # of 1st block */
634 	bp = xlog_get_bp(log, 1);
635 	if (!bp)
636 		return ENOMEM;
637 
638 	error = xlog_bread(log, 0, 1, bp, &offset);
639 	if (error)
640 		goto bp_err;
641 
642 	first_half_cycle = xlog_get_cycle(offset);
643 
644 	last_blk = head_blk = log_bbnum - 1;	/* get cycle # of last block */
645 	error = xlog_bread(log, last_blk, 1, bp, &offset);
646 	if (error)
647 		goto bp_err;
648 
649 	last_half_cycle = xlog_get_cycle(offset);
650 	ASSERT(last_half_cycle != 0);
651 
652 	/*
653 	 * If the 1st half cycle number is equal to the last half cycle number,
654 	 * then the entire log is stamped with the same cycle number.  In this
655 	 * case, head_blk can't be set to zero (which makes sense).  The below
656 	 * math doesn't work out properly with head_blk equal to zero.  Instead,
657 	 * we set it to log_bbnum which is an invalid block number, but this
658 	 * value makes the math correct.  If head_blk doesn't changed through
659 	 * all the tests below, *head_blk is set to zero at the very end rather
660 	 * than log_bbnum.  In a sense, log_bbnum and zero are the same block
661 	 * in a circular file.
662 	 */
663 	if (first_half_cycle == last_half_cycle) {
664 		/*
665 		 * In this case we believe that the entire log should have
666 		 * cycle number last_half_cycle.  We need to scan backwards
667 		 * from the end verifying that there are no holes still
668 		 * containing last_half_cycle - 1.  If we find such a hole,
669 		 * then the start of that hole will be the new head.  The
670 		 * simple case looks like
671 		 *        x | x ... | x - 1 | x
672 		 * Another case that fits this picture would be
673 		 *        x | x + 1 | x ... | x
674 		 * In this case the head really is somewhere at the end of the
675 		 * log, as one of the latest writes at the beginning was
676 		 * incomplete.
677 		 * One more case is
678 		 *        x | x + 1 | x ... | x - 1 | x
679 		 * This is really the combination of the above two cases, and
680 		 * the head has to end up at the start of the x-1 hole at the
681 		 * end of the log.
682 		 *
683 		 * In the 256k log case, we will read from the beginning to the
684 		 * end of the log and search for cycle numbers equal to x-1.
685 		 * We don't worry about the x+1 blocks that we encounter,
686 		 * because we know that they cannot be the head since the log
687 		 * started with x.
688 		 */
689 		head_blk = log_bbnum;
690 		stop_on_cycle = last_half_cycle - 1;
691 	} else {
692 		/*
693 		 * In this case we want to find the first block with cycle
694 		 * number matching last_half_cycle.  We expect the log to be
695 		 * some variation on
696 		 *        x + 1 ... | x ... | x
697 		 * The first block with cycle number x (last_half_cycle) will
698 		 * be where the new head belongs.  First we do a binary search
699 		 * for the first occurrence of last_half_cycle.  The binary
700 		 * search may not be totally accurate, so then we scan back
701 		 * from there looking for occurrences of last_half_cycle before
702 		 * us.  If that backwards scan wraps around the beginning of
703 		 * the log, then we look for occurrences of last_half_cycle - 1
704 		 * at the end of the log.  The cases we're looking for look
705 		 * like
706 		 *                               v binary search stopped here
707 		 *        x + 1 ... | x | x + 1 | x ... | x
708 		 *                   ^ but we want to locate this spot
709 		 * or
710 		 *        <---------> less than scan distance
711 		 *        x + 1 ... | x ... | x - 1 | x
712 		 *                           ^ we want to locate this spot
713 		 */
714 		stop_on_cycle = last_half_cycle;
715 		if ((error = xlog_find_cycle_start(log, bp, first_blk,
716 						&head_blk, last_half_cycle)))
717 			goto bp_err;
718 	}
719 
720 	/*
721 	 * Now validate the answer.  Scan back some number of maximum possible
722 	 * blocks and make sure each one has the expected cycle number.  The
723 	 * maximum is determined by the total possible amount of buffering
724 	 * in the in-core log.  The following number can be made tighter if
725 	 * we actually look at the block size of the filesystem.
726 	 */
727 	num_scan_bblks = XLOG_TOTAL_REC_SHIFT(log);
728 	if (head_blk >= num_scan_bblks) {
729 		/*
730 		 * We are guaranteed that the entire check can be performed
731 		 * in one buffer.
732 		 */
733 		start_blk = head_blk - num_scan_bblks;
734 		if ((error = xlog_find_verify_cycle(log,
735 						start_blk, num_scan_bblks,
736 						stop_on_cycle, &new_blk)))
737 			goto bp_err;
738 		if (new_blk != -1)
739 			head_blk = new_blk;
740 	} else {		/* need to read 2 parts of log */
741 		/*
742 		 * We are going to scan backwards in the log in two parts.
743 		 * First we scan the physical end of the log.  In this part
744 		 * of the log, we are looking for blocks with cycle number
745 		 * last_half_cycle - 1.
746 		 * If we find one, then we know that the log starts there, as
747 		 * we've found a hole that didn't get written in going around
748 		 * the end of the physical log.  The simple case for this is
749 		 *        x + 1 ... | x ... | x - 1 | x
750 		 *        <---------> less than scan distance
751 		 * If all of the blocks at the end of the log have cycle number
752 		 * last_half_cycle, then we check the blocks at the start of
753 		 * the log looking for occurrences of last_half_cycle.  If we
754 		 * find one, then our current estimate for the location of the
755 		 * first occurrence of last_half_cycle is wrong and we move
756 		 * back to the hole we've found.  This case looks like
757 		 *        x + 1 ... | x | x + 1 | x ...
758 		 *                               ^ binary search stopped here
759 		 * Another case we need to handle that only occurs in 256k
760 		 * logs is
761 		 *        x + 1 ... | x ... | x+1 | x ...
762 		 *                   ^ binary search stops here
763 		 * In a 256k log, the scan at the end of the log will see the
764 		 * x + 1 blocks.  We need to skip past those since that is
765 		 * certainly not the head of the log.  By searching for
766 		 * last_half_cycle-1 we accomplish that.
767 		 */
768 		ASSERT(head_blk <= INT_MAX &&
769 			(xfs_daddr_t) num_scan_bblks >= head_blk);
770 		start_blk = log_bbnum - (num_scan_bblks - head_blk);
771 		if ((error = xlog_find_verify_cycle(log, start_blk,
772 					num_scan_bblks - (int)head_blk,
773 					(stop_on_cycle - 1), &new_blk)))
774 			goto bp_err;
775 		if (new_blk != -1) {
776 			head_blk = new_blk;
777 			goto validate_head;
778 		}
779 
780 		/*
781 		 * Scan beginning of log now.  The last part of the physical
782 		 * log is good.  This scan needs to verify that it doesn't find
783 		 * the last_half_cycle.
784 		 */
785 		start_blk = 0;
786 		ASSERT(head_blk <= INT_MAX);
787 		if ((error = xlog_find_verify_cycle(log,
788 					start_blk, (int)head_blk,
789 					stop_on_cycle, &new_blk)))
790 			goto bp_err;
791 		if (new_blk != -1)
792 			head_blk = new_blk;
793 	}
794 
795 validate_head:
796 	/*
797 	 * Now we need to make sure head_blk is not pointing to a block in
798 	 * the middle of a log record.
799 	 */
800 	num_scan_bblks = XLOG_REC_SHIFT(log);
801 	if (head_blk >= num_scan_bblks) {
802 		start_blk = head_blk - num_scan_bblks; /* don't read head_blk */
803 
804 		/* start ptr at last block ptr before head_blk */
805 		if ((error = xlog_find_verify_log_record(log, start_blk,
806 							&head_blk, 0)) == -1) {
807 			error = XFS_ERROR(EIO);
808 			goto bp_err;
809 		} else if (error)
810 			goto bp_err;
811 	} else {
812 		start_blk = 0;
813 		ASSERT(head_blk <= INT_MAX);
814 		if ((error = xlog_find_verify_log_record(log, start_blk,
815 							&head_blk, 0)) == -1) {
816 			/* We hit the beginning of the log during our search */
817 			start_blk = log_bbnum - (num_scan_bblks - head_blk);
818 			new_blk = log_bbnum;
819 			ASSERT(start_blk <= INT_MAX &&
820 				(xfs_daddr_t) log_bbnum-start_blk >= 0);
821 			ASSERT(head_blk <= INT_MAX);
822 			if ((error = xlog_find_verify_log_record(log,
823 							start_blk, &new_blk,
824 							(int)head_blk)) == -1) {
825 				error = XFS_ERROR(EIO);
826 				goto bp_err;
827 			} else if (error)
828 				goto bp_err;
829 			if (new_blk != log_bbnum)
830 				head_blk = new_blk;
831 		} else if (error)
832 			goto bp_err;
833 	}
834 
835 	xlog_put_bp(bp);
836 	if (head_blk == log_bbnum)
837 		*return_head_blk = 0;
838 	else
839 		*return_head_blk = head_blk;
840 	/*
841 	 * When returning here, we have a good block number.  Bad block
842 	 * means that during a previous crash, we didn't have a clean break
843 	 * from cycle number N to cycle number N-1.  In this case, we need
844 	 * to find the first block with cycle number N-1.
845 	 */
846 	return 0;
847 
848  bp_err:
849 	xlog_put_bp(bp);
850 
851 	if (error)
852 		xfs_warn(log->l_mp, "failed to find log head");
853 	return error;
854 }
855 
856 /*
857  * Find the sync block number or the tail of the log.
858  *
859  * This will be the block number of the last record to have its
860  * associated buffers synced to disk.  Every log record header has
861  * a sync lsn embedded in it.  LSNs hold block numbers, so it is easy
862  * to get a sync block number.  The only concern is to figure out which
863  * log record header to believe.
864  *
865  * The following algorithm uses the log record header with the largest
866  * lsn.  The entire log record does not need to be valid.  We only care
867  * that the header is valid.
868  *
869  * We could speed up search by using current head_blk buffer, but it is not
870  * available.
871  */
872 STATIC int
873 xlog_find_tail(
874 	xlog_t			*log,
875 	xfs_daddr_t		*head_blk,
876 	xfs_daddr_t		*tail_blk)
877 {
878 	xlog_rec_header_t	*rhead;
879 	xlog_op_header_t	*op_head;
880 	xfs_caddr_t		offset = NULL;
881 	xfs_buf_t		*bp;
882 	int			error, i, found;
883 	xfs_daddr_t		umount_data_blk;
884 	xfs_daddr_t		after_umount_blk;
885 	xfs_lsn_t		tail_lsn;
886 	int			hblks;
887 
888 	found = 0;
889 
890 	/*
891 	 * Find previous log record
892 	 */
893 	if ((error = xlog_find_head(log, head_blk)))
894 		return error;
895 
896 	bp = xlog_get_bp(log, 1);
897 	if (!bp)
898 		return ENOMEM;
899 	if (*head_blk == 0) {				/* special case */
900 		error = xlog_bread(log, 0, 1, bp, &offset);
901 		if (error)
902 			goto done;
903 
904 		if (xlog_get_cycle(offset) == 0) {
905 			*tail_blk = 0;
906 			/* leave all other log inited values alone */
907 			goto done;
908 		}
909 	}
910 
911 	/*
912 	 * Search backwards looking for log record header block
913 	 */
914 	ASSERT(*head_blk < INT_MAX);
915 	for (i = (int)(*head_blk) - 1; i >= 0; i--) {
916 		error = xlog_bread(log, i, 1, bp, &offset);
917 		if (error)
918 			goto done;
919 
920 		if (*(__be32 *)offset == cpu_to_be32(XLOG_HEADER_MAGIC_NUM)) {
921 			found = 1;
922 			break;
923 		}
924 	}
925 	/*
926 	 * If we haven't found the log record header block, start looking
927 	 * again from the end of the physical log.  XXXmiken: There should be
928 	 * a check here to make sure we didn't search more than N blocks in
929 	 * the previous code.
930 	 */
931 	if (!found) {
932 		for (i = log->l_logBBsize - 1; i >= (int)(*head_blk); i--) {
933 			error = xlog_bread(log, i, 1, bp, &offset);
934 			if (error)
935 				goto done;
936 
937 			if (*(__be32 *)offset ==
938 			    cpu_to_be32(XLOG_HEADER_MAGIC_NUM)) {
939 				found = 2;
940 				break;
941 			}
942 		}
943 	}
944 	if (!found) {
945 		xfs_warn(log->l_mp, "%s: couldn't find sync record", __func__);
946 		ASSERT(0);
947 		return XFS_ERROR(EIO);
948 	}
949 
950 	/* find blk_no of tail of log */
951 	rhead = (xlog_rec_header_t *)offset;
952 	*tail_blk = BLOCK_LSN(be64_to_cpu(rhead->h_tail_lsn));
953 
954 	/*
955 	 * Reset log values according to the state of the log when we
956 	 * crashed.  In the case where head_blk == 0, we bump curr_cycle
957 	 * one because the next write starts a new cycle rather than
958 	 * continuing the cycle of the last good log record.  At this
959 	 * point we have guaranteed that all partial log records have been
960 	 * accounted for.  Therefore, we know that the last good log record
961 	 * written was complete and ended exactly on the end boundary
962 	 * of the physical log.
963 	 */
964 	log->l_prev_block = i;
965 	log->l_curr_block = (int)*head_blk;
966 	log->l_curr_cycle = be32_to_cpu(rhead->h_cycle);
967 	if (found == 2)
968 		log->l_curr_cycle++;
969 	atomic64_set(&log->l_tail_lsn, be64_to_cpu(rhead->h_tail_lsn));
970 	atomic64_set(&log->l_last_sync_lsn, be64_to_cpu(rhead->h_lsn));
971 	xlog_assign_grant_head(&log->l_reserve_head.grant, log->l_curr_cycle,
972 					BBTOB(log->l_curr_block));
973 	xlog_assign_grant_head(&log->l_write_head.grant, log->l_curr_cycle,
974 					BBTOB(log->l_curr_block));
975 
976 	/*
977 	 * Look for unmount record.  If we find it, then we know there
978 	 * was a clean unmount.  Since 'i' could be the last block in
979 	 * the physical log, we convert to a log block before comparing
980 	 * to the head_blk.
981 	 *
982 	 * Save the current tail lsn to use to pass to
983 	 * xlog_clear_stale_blocks() below.  We won't want to clear the
984 	 * unmount record if there is one, so we pass the lsn of the
985 	 * unmount record rather than the block after it.
986 	 */
987 	if (xfs_sb_version_haslogv2(&log->l_mp->m_sb)) {
988 		int	h_size = be32_to_cpu(rhead->h_size);
989 		int	h_version = be32_to_cpu(rhead->h_version);
990 
991 		if ((h_version & XLOG_VERSION_2) &&
992 		    (h_size > XLOG_HEADER_CYCLE_SIZE)) {
993 			hblks = h_size / XLOG_HEADER_CYCLE_SIZE;
994 			if (h_size % XLOG_HEADER_CYCLE_SIZE)
995 				hblks++;
996 		} else {
997 			hblks = 1;
998 		}
999 	} else {
1000 		hblks = 1;
1001 	}
1002 	after_umount_blk = (i + hblks + (int)
1003 		BTOBB(be32_to_cpu(rhead->h_len))) % log->l_logBBsize;
1004 	tail_lsn = atomic64_read(&log->l_tail_lsn);
1005 	if (*head_blk == after_umount_blk &&
1006 	    be32_to_cpu(rhead->h_num_logops) == 1) {
1007 		umount_data_blk = (i + hblks) % log->l_logBBsize;
1008 		error = xlog_bread(log, umount_data_blk, 1, bp, &offset);
1009 		if (error)
1010 			goto done;
1011 
1012 		op_head = (xlog_op_header_t *)offset;
1013 		if (op_head->oh_flags & XLOG_UNMOUNT_TRANS) {
1014 			/*
1015 			 * Set tail and last sync so that newly written
1016 			 * log records will point recovery to after the
1017 			 * current unmount record.
1018 			 */
1019 			xlog_assign_atomic_lsn(&log->l_tail_lsn,
1020 					log->l_curr_cycle, after_umount_blk);
1021 			xlog_assign_atomic_lsn(&log->l_last_sync_lsn,
1022 					log->l_curr_cycle, after_umount_blk);
1023 			*tail_blk = after_umount_blk;
1024 
1025 			/*
1026 			 * Note that the unmount was clean. If the unmount
1027 			 * was not clean, we need to know this to rebuild the
1028 			 * superblock counters from the perag headers if we
1029 			 * have a filesystem using non-persistent counters.
1030 			 */
1031 			log->l_mp->m_flags |= XFS_MOUNT_WAS_CLEAN;
1032 		}
1033 	}
1034 
1035 	/*
1036 	 * Make sure that there are no blocks in front of the head
1037 	 * with the same cycle number as the head.  This can happen
1038 	 * because we allow multiple outstanding log writes concurrently,
1039 	 * and the later writes might make it out before earlier ones.
1040 	 *
1041 	 * We use the lsn from before modifying it so that we'll never
1042 	 * overwrite the unmount record after a clean unmount.
1043 	 *
1044 	 * Do this only if we are going to recover the filesystem
1045 	 *
1046 	 * NOTE: This used to say "if (!readonly)"
1047 	 * However on Linux, we can & do recover a read-only filesystem.
1048 	 * We only skip recovery if NORECOVERY is specified on mount,
1049 	 * in which case we would not be here.
1050 	 *
1051 	 * But... if the -device- itself is readonly, just skip this.
1052 	 * We can't recover this device anyway, so it won't matter.
1053 	 */
1054 	if (!xfs_readonly_buftarg(log->l_mp->m_logdev_targp))
1055 		error = xlog_clear_stale_blocks(log, tail_lsn);
1056 
1057 done:
1058 	xlog_put_bp(bp);
1059 
1060 	if (error)
1061 		xfs_warn(log->l_mp, "failed to locate log tail");
1062 	return error;
1063 }
1064 
1065 /*
1066  * Is the log zeroed at all?
1067  *
1068  * The last binary search should be changed to perform an X block read
1069  * once X becomes small enough.  You can then search linearly through
1070  * the X blocks.  This will cut down on the number of reads we need to do.
1071  *
1072  * If the log is partially zeroed, this routine will pass back the blkno
1073  * of the first block with cycle number 0.  It won't have a complete LR
1074  * preceding it.
1075  *
1076  * Return:
1077  *	0  => the log is completely written to
1078  *	-1 => use *blk_no as the first block of the log
1079  *	>0 => error has occurred
1080  */
1081 STATIC int
1082 xlog_find_zeroed(
1083 	xlog_t		*log,
1084 	xfs_daddr_t	*blk_no)
1085 {
1086 	xfs_buf_t	*bp;
1087 	xfs_caddr_t	offset;
1088 	uint	        first_cycle, last_cycle;
1089 	xfs_daddr_t	new_blk, last_blk, start_blk;
1090 	xfs_daddr_t     num_scan_bblks;
1091 	int	        error, log_bbnum = log->l_logBBsize;
1092 
1093 	*blk_no = 0;
1094 
1095 	/* check totally zeroed log */
1096 	bp = xlog_get_bp(log, 1);
1097 	if (!bp)
1098 		return ENOMEM;
1099 	error = xlog_bread(log, 0, 1, bp, &offset);
1100 	if (error)
1101 		goto bp_err;
1102 
1103 	first_cycle = xlog_get_cycle(offset);
1104 	if (first_cycle == 0) {		/* completely zeroed log */
1105 		*blk_no = 0;
1106 		xlog_put_bp(bp);
1107 		return -1;
1108 	}
1109 
1110 	/* check partially zeroed log */
1111 	error = xlog_bread(log, log_bbnum-1, 1, bp, &offset);
1112 	if (error)
1113 		goto bp_err;
1114 
1115 	last_cycle = xlog_get_cycle(offset);
1116 	if (last_cycle != 0) {		/* log completely written to */
1117 		xlog_put_bp(bp);
1118 		return 0;
1119 	} else if (first_cycle != 1) {
1120 		/*
1121 		 * If the cycle of the last block is zero, the cycle of
1122 		 * the first block must be 1. If it's not, maybe we're
1123 		 * not looking at a log... Bail out.
1124 		 */
1125 		xfs_warn(log->l_mp,
1126 			"Log inconsistent or not a log (last==0, first!=1)");
1127 		return XFS_ERROR(EINVAL);
1128 	}
1129 
1130 	/* we have a partially zeroed log */
1131 	last_blk = log_bbnum-1;
1132 	if ((error = xlog_find_cycle_start(log, bp, 0, &last_blk, 0)))
1133 		goto bp_err;
1134 
1135 	/*
1136 	 * Validate the answer.  Because there is no way to guarantee that
1137 	 * the entire log is made up of log records which are the same size,
1138 	 * we scan over the defined maximum blocks.  At this point, the maximum
1139 	 * is not chosen to mean anything special.   XXXmiken
1140 	 */
1141 	num_scan_bblks = XLOG_TOTAL_REC_SHIFT(log);
1142 	ASSERT(num_scan_bblks <= INT_MAX);
1143 
1144 	if (last_blk < num_scan_bblks)
1145 		num_scan_bblks = last_blk;
1146 	start_blk = last_blk - num_scan_bblks;
1147 
1148 	/*
1149 	 * We search for any instances of cycle number 0 that occur before
1150 	 * our current estimate of the head.  What we're trying to detect is
1151 	 *        1 ... | 0 | 1 | 0...
1152 	 *                       ^ binary search ends here
1153 	 */
1154 	if ((error = xlog_find_verify_cycle(log, start_blk,
1155 					 (int)num_scan_bblks, 0, &new_blk)))
1156 		goto bp_err;
1157 	if (new_blk != -1)
1158 		last_blk = new_blk;
1159 
1160 	/*
1161 	 * Potentially backup over partial log record write.  We don't need
1162 	 * to search the end of the log because we know it is zero.
1163 	 */
1164 	if ((error = xlog_find_verify_log_record(log, start_blk,
1165 				&last_blk, 0)) == -1) {
1166 	    error = XFS_ERROR(EIO);
1167 	    goto bp_err;
1168 	} else if (error)
1169 	    goto bp_err;
1170 
1171 	*blk_no = last_blk;
1172 bp_err:
1173 	xlog_put_bp(bp);
1174 	if (error)
1175 		return error;
1176 	return -1;
1177 }
1178 
1179 /*
1180  * These are simple subroutines used by xlog_clear_stale_blocks() below
1181  * to initialize a buffer full of empty log record headers and write
1182  * them into the log.
1183  */
1184 STATIC void
1185 xlog_add_record(
1186 	xlog_t			*log,
1187 	xfs_caddr_t		buf,
1188 	int			cycle,
1189 	int			block,
1190 	int			tail_cycle,
1191 	int			tail_block)
1192 {
1193 	xlog_rec_header_t	*recp = (xlog_rec_header_t *)buf;
1194 
1195 	memset(buf, 0, BBSIZE);
1196 	recp->h_magicno = cpu_to_be32(XLOG_HEADER_MAGIC_NUM);
1197 	recp->h_cycle = cpu_to_be32(cycle);
1198 	recp->h_version = cpu_to_be32(
1199 			xfs_sb_version_haslogv2(&log->l_mp->m_sb) ? 2 : 1);
1200 	recp->h_lsn = cpu_to_be64(xlog_assign_lsn(cycle, block));
1201 	recp->h_tail_lsn = cpu_to_be64(xlog_assign_lsn(tail_cycle, tail_block));
1202 	recp->h_fmt = cpu_to_be32(XLOG_FMT);
1203 	memcpy(&recp->h_fs_uuid, &log->l_mp->m_sb.sb_uuid, sizeof(uuid_t));
1204 }
1205 
1206 STATIC int
1207 xlog_write_log_records(
1208 	xlog_t		*log,
1209 	int		cycle,
1210 	int		start_block,
1211 	int		blocks,
1212 	int		tail_cycle,
1213 	int		tail_block)
1214 {
1215 	xfs_caddr_t	offset;
1216 	xfs_buf_t	*bp;
1217 	int		balign, ealign;
1218 	int		sectbb = log->l_sectBBsize;
1219 	int		end_block = start_block + blocks;
1220 	int		bufblks;
1221 	int		error = 0;
1222 	int		i, j = 0;
1223 
1224 	/*
1225 	 * Greedily allocate a buffer big enough to handle the full
1226 	 * range of basic blocks to be written.  If that fails, try
1227 	 * a smaller size.  We need to be able to write at least a
1228 	 * log sector, or we're out of luck.
1229 	 */
1230 	bufblks = 1 << ffs(blocks);
1231 	while (bufblks > log->l_logBBsize)
1232 		bufblks >>= 1;
1233 	while (!(bp = xlog_get_bp(log, bufblks))) {
1234 		bufblks >>= 1;
1235 		if (bufblks < sectbb)
1236 			return ENOMEM;
1237 	}
1238 
1239 	/* We may need to do a read at the start to fill in part of
1240 	 * the buffer in the starting sector not covered by the first
1241 	 * write below.
1242 	 */
1243 	balign = round_down(start_block, sectbb);
1244 	if (balign != start_block) {
1245 		error = xlog_bread_noalign(log, start_block, 1, bp);
1246 		if (error)
1247 			goto out_put_bp;
1248 
1249 		j = start_block - balign;
1250 	}
1251 
1252 	for (i = start_block; i < end_block; i += bufblks) {
1253 		int		bcount, endcount;
1254 
1255 		bcount = min(bufblks, end_block - start_block);
1256 		endcount = bcount - j;
1257 
1258 		/* We may need to do a read at the end to fill in part of
1259 		 * the buffer in the final sector not covered by the write.
1260 		 * If this is the same sector as the above read, skip it.
1261 		 */
1262 		ealign = round_down(end_block, sectbb);
1263 		if (j == 0 && (start_block + endcount > ealign)) {
1264 			offset = bp->b_addr + BBTOB(ealign - start_block);
1265 			error = xlog_bread_offset(log, ealign, sectbb,
1266 							bp, offset);
1267 			if (error)
1268 				break;
1269 
1270 		}
1271 
1272 		offset = xlog_align(log, start_block, endcount, bp);
1273 		for (; j < endcount; j++) {
1274 			xlog_add_record(log, offset, cycle, i+j,
1275 					tail_cycle, tail_block);
1276 			offset += BBSIZE;
1277 		}
1278 		error = xlog_bwrite(log, start_block, endcount, bp);
1279 		if (error)
1280 			break;
1281 		start_block += endcount;
1282 		j = 0;
1283 	}
1284 
1285  out_put_bp:
1286 	xlog_put_bp(bp);
1287 	return error;
1288 }
1289 
1290 /*
1291  * This routine is called to blow away any incomplete log writes out
1292  * in front of the log head.  We do this so that we won't become confused
1293  * if we come up, write only a little bit more, and then crash again.
1294  * If we leave the partial log records out there, this situation could
1295  * cause us to think those partial writes are valid blocks since they
1296  * have the current cycle number.  We get rid of them by overwriting them
1297  * with empty log records with the old cycle number rather than the
1298  * current one.
1299  *
1300  * The tail lsn is passed in rather than taken from
1301  * the log so that we will not write over the unmount record after a
1302  * clean unmount in a 512 block log.  Doing so would leave the log without
1303  * any valid log records in it until a new one was written.  If we crashed
1304  * during that time we would not be able to recover.
1305  */
1306 STATIC int
1307 xlog_clear_stale_blocks(
1308 	xlog_t		*log,
1309 	xfs_lsn_t	tail_lsn)
1310 {
1311 	int		tail_cycle, head_cycle;
1312 	int		tail_block, head_block;
1313 	int		tail_distance, max_distance;
1314 	int		distance;
1315 	int		error;
1316 
1317 	tail_cycle = CYCLE_LSN(tail_lsn);
1318 	tail_block = BLOCK_LSN(tail_lsn);
1319 	head_cycle = log->l_curr_cycle;
1320 	head_block = log->l_curr_block;
1321 
1322 	/*
1323 	 * Figure out the distance between the new head of the log
1324 	 * and the tail.  We want to write over any blocks beyond the
1325 	 * head that we may have written just before the crash, but
1326 	 * we don't want to overwrite the tail of the log.
1327 	 */
1328 	if (head_cycle == tail_cycle) {
1329 		/*
1330 		 * The tail is behind the head in the physical log,
1331 		 * so the distance from the head to the tail is the
1332 		 * distance from the head to the end of the log plus
1333 		 * the distance from the beginning of the log to the
1334 		 * tail.
1335 		 */
1336 		if (unlikely(head_block < tail_block || head_block >= log->l_logBBsize)) {
1337 			XFS_ERROR_REPORT("xlog_clear_stale_blocks(1)",
1338 					 XFS_ERRLEVEL_LOW, log->l_mp);
1339 			return XFS_ERROR(EFSCORRUPTED);
1340 		}
1341 		tail_distance = tail_block + (log->l_logBBsize - head_block);
1342 	} else {
1343 		/*
1344 		 * The head is behind the tail in the physical log,
1345 		 * so the distance from the head to the tail is just
1346 		 * the tail block minus the head block.
1347 		 */
1348 		if (unlikely(head_block >= tail_block || head_cycle != (tail_cycle + 1))){
1349 			XFS_ERROR_REPORT("xlog_clear_stale_blocks(2)",
1350 					 XFS_ERRLEVEL_LOW, log->l_mp);
1351 			return XFS_ERROR(EFSCORRUPTED);
1352 		}
1353 		tail_distance = tail_block - head_block;
1354 	}
1355 
1356 	/*
1357 	 * If the head is right up against the tail, we can't clear
1358 	 * anything.
1359 	 */
1360 	if (tail_distance <= 0) {
1361 		ASSERT(tail_distance == 0);
1362 		return 0;
1363 	}
1364 
1365 	max_distance = XLOG_TOTAL_REC_SHIFT(log);
1366 	/*
1367 	 * Take the smaller of the maximum amount of outstanding I/O
1368 	 * we could have and the distance to the tail to clear out.
1369 	 * We take the smaller so that we don't overwrite the tail and
1370 	 * we don't waste all day writing from the head to the tail
1371 	 * for no reason.
1372 	 */
1373 	max_distance = MIN(max_distance, tail_distance);
1374 
1375 	if ((head_block + max_distance) <= log->l_logBBsize) {
1376 		/*
1377 		 * We can stomp all the blocks we need to without
1378 		 * wrapping around the end of the log.  Just do it
1379 		 * in a single write.  Use the cycle number of the
1380 		 * current cycle minus one so that the log will look like:
1381 		 *     n ... | n - 1 ...
1382 		 */
1383 		error = xlog_write_log_records(log, (head_cycle - 1),
1384 				head_block, max_distance, tail_cycle,
1385 				tail_block);
1386 		if (error)
1387 			return error;
1388 	} else {
1389 		/*
1390 		 * We need to wrap around the end of the physical log in
1391 		 * order to clear all the blocks.  Do it in two separate
1392 		 * I/Os.  The first write should be from the head to the
1393 		 * end of the physical log, and it should use the current
1394 		 * cycle number minus one just like above.
1395 		 */
1396 		distance = log->l_logBBsize - head_block;
1397 		error = xlog_write_log_records(log, (head_cycle - 1),
1398 				head_block, distance, tail_cycle,
1399 				tail_block);
1400 
1401 		if (error)
1402 			return error;
1403 
1404 		/*
1405 		 * Now write the blocks at the start of the physical log.
1406 		 * This writes the remainder of the blocks we want to clear.
1407 		 * It uses the current cycle number since we're now on the
1408 		 * same cycle as the head so that we get:
1409 		 *    n ... n ... | n - 1 ...
1410 		 *    ^^^^^ blocks we're writing
1411 		 */
1412 		distance = max_distance - (log->l_logBBsize - head_block);
1413 		error = xlog_write_log_records(log, head_cycle, 0, distance,
1414 				tail_cycle, tail_block);
1415 		if (error)
1416 			return error;
1417 	}
1418 
1419 	return 0;
1420 }
1421 
1422 /******************************************************************************
1423  *
1424  *		Log recover routines
1425  *
1426  ******************************************************************************
1427  */
1428 
1429 STATIC xlog_recover_t *
1430 xlog_recover_find_tid(
1431 	struct hlist_head	*head,
1432 	xlog_tid_t		tid)
1433 {
1434 	xlog_recover_t		*trans;
1435 	struct hlist_node	*n;
1436 
1437 	hlist_for_each_entry(trans, n, head, r_list) {
1438 		if (trans->r_log_tid == tid)
1439 			return trans;
1440 	}
1441 	return NULL;
1442 }
1443 
1444 STATIC void
1445 xlog_recover_new_tid(
1446 	struct hlist_head	*head,
1447 	xlog_tid_t		tid,
1448 	xfs_lsn_t		lsn)
1449 {
1450 	xlog_recover_t		*trans;
1451 
1452 	trans = kmem_zalloc(sizeof(xlog_recover_t), KM_SLEEP);
1453 	trans->r_log_tid   = tid;
1454 	trans->r_lsn	   = lsn;
1455 	INIT_LIST_HEAD(&trans->r_itemq);
1456 
1457 	INIT_HLIST_NODE(&trans->r_list);
1458 	hlist_add_head(&trans->r_list, head);
1459 }
1460 
1461 STATIC void
1462 xlog_recover_add_item(
1463 	struct list_head	*head)
1464 {
1465 	xlog_recover_item_t	*item;
1466 
1467 	item = kmem_zalloc(sizeof(xlog_recover_item_t), KM_SLEEP);
1468 	INIT_LIST_HEAD(&item->ri_list);
1469 	list_add_tail(&item->ri_list, head);
1470 }
1471 
1472 STATIC int
1473 xlog_recover_add_to_cont_trans(
1474 	struct log		*log,
1475 	xlog_recover_t		*trans,
1476 	xfs_caddr_t		dp,
1477 	int			len)
1478 {
1479 	xlog_recover_item_t	*item;
1480 	xfs_caddr_t		ptr, old_ptr;
1481 	int			old_len;
1482 
1483 	if (list_empty(&trans->r_itemq)) {
1484 		/* finish copying rest of trans header */
1485 		xlog_recover_add_item(&trans->r_itemq);
1486 		ptr = (xfs_caddr_t) &trans->r_theader +
1487 				sizeof(xfs_trans_header_t) - len;
1488 		memcpy(ptr, dp, len); /* d, s, l */
1489 		return 0;
1490 	}
1491 	/* take the tail entry */
1492 	item = list_entry(trans->r_itemq.prev, xlog_recover_item_t, ri_list);
1493 
1494 	old_ptr = item->ri_buf[item->ri_cnt-1].i_addr;
1495 	old_len = item->ri_buf[item->ri_cnt-1].i_len;
1496 
1497 	ptr = kmem_realloc(old_ptr, len+old_len, old_len, KM_SLEEP);
1498 	memcpy(&ptr[old_len], dp, len); /* d, s, l */
1499 	item->ri_buf[item->ri_cnt-1].i_len += len;
1500 	item->ri_buf[item->ri_cnt-1].i_addr = ptr;
1501 	trace_xfs_log_recover_item_add_cont(log, trans, item, 0);
1502 	return 0;
1503 }
1504 
1505 /*
1506  * The next region to add is the start of a new region.  It could be
1507  * a whole region or it could be the first part of a new region.  Because
1508  * of this, the assumption here is that the type and size fields of all
1509  * format structures fit into the first 32 bits of the structure.
1510  *
1511  * This works because all regions must be 32 bit aligned.  Therefore, we
1512  * either have both fields or we have neither field.  In the case we have
1513  * neither field, the data part of the region is zero length.  We only have
1514  * a log_op_header and can throw away the header since a new one will appear
1515  * later.  If we have at least 4 bytes, then we can determine how many regions
1516  * will appear in the current log item.
1517  */
1518 STATIC int
1519 xlog_recover_add_to_trans(
1520 	struct log		*log,
1521 	xlog_recover_t		*trans,
1522 	xfs_caddr_t		dp,
1523 	int			len)
1524 {
1525 	xfs_inode_log_format_t	*in_f;			/* any will do */
1526 	xlog_recover_item_t	*item;
1527 	xfs_caddr_t		ptr;
1528 
1529 	if (!len)
1530 		return 0;
1531 	if (list_empty(&trans->r_itemq)) {
1532 		/* we need to catch log corruptions here */
1533 		if (*(uint *)dp != XFS_TRANS_HEADER_MAGIC) {
1534 			xfs_warn(log->l_mp, "%s: bad header magic number",
1535 				__func__);
1536 			ASSERT(0);
1537 			return XFS_ERROR(EIO);
1538 		}
1539 		if (len == sizeof(xfs_trans_header_t))
1540 			xlog_recover_add_item(&trans->r_itemq);
1541 		memcpy(&trans->r_theader, dp, len); /* d, s, l */
1542 		return 0;
1543 	}
1544 
1545 	ptr = kmem_alloc(len, KM_SLEEP);
1546 	memcpy(ptr, dp, len);
1547 	in_f = (xfs_inode_log_format_t *)ptr;
1548 
1549 	/* take the tail entry */
1550 	item = list_entry(trans->r_itemq.prev, xlog_recover_item_t, ri_list);
1551 	if (item->ri_total != 0 &&
1552 	     item->ri_total == item->ri_cnt) {
1553 		/* tail item is in use, get a new one */
1554 		xlog_recover_add_item(&trans->r_itemq);
1555 		item = list_entry(trans->r_itemq.prev,
1556 					xlog_recover_item_t, ri_list);
1557 	}
1558 
1559 	if (item->ri_total == 0) {		/* first region to be added */
1560 		if (in_f->ilf_size == 0 ||
1561 		    in_f->ilf_size > XLOG_MAX_REGIONS_IN_ITEM) {
1562 			xfs_warn(log->l_mp,
1563 		"bad number of regions (%d) in inode log format",
1564 				  in_f->ilf_size);
1565 			ASSERT(0);
1566 			return XFS_ERROR(EIO);
1567 		}
1568 
1569 		item->ri_total = in_f->ilf_size;
1570 		item->ri_buf =
1571 			kmem_zalloc(item->ri_total * sizeof(xfs_log_iovec_t),
1572 				    KM_SLEEP);
1573 	}
1574 	ASSERT(item->ri_total > item->ri_cnt);
1575 	/* Description region is ri_buf[0] */
1576 	item->ri_buf[item->ri_cnt].i_addr = ptr;
1577 	item->ri_buf[item->ri_cnt].i_len  = len;
1578 	item->ri_cnt++;
1579 	trace_xfs_log_recover_item_add(log, trans, item, 0);
1580 	return 0;
1581 }
1582 
1583 /*
1584  * Sort the log items in the transaction. Cancelled buffers need
1585  * to be put first so they are processed before any items that might
1586  * modify the buffers. If they are cancelled, then the modifications
1587  * don't need to be replayed.
1588  */
1589 STATIC int
1590 xlog_recover_reorder_trans(
1591 	struct log		*log,
1592 	xlog_recover_t		*trans,
1593 	int			pass)
1594 {
1595 	xlog_recover_item_t	*item, *n;
1596 	LIST_HEAD(sort_list);
1597 
1598 	list_splice_init(&trans->r_itemq, &sort_list);
1599 	list_for_each_entry_safe(item, n, &sort_list, ri_list) {
1600 		xfs_buf_log_format_t	*buf_f = item->ri_buf[0].i_addr;
1601 
1602 		switch (ITEM_TYPE(item)) {
1603 		case XFS_LI_BUF:
1604 			if (!(buf_f->blf_flags & XFS_BLF_CANCEL)) {
1605 				trace_xfs_log_recover_item_reorder_head(log,
1606 							trans, item, pass);
1607 				list_move(&item->ri_list, &trans->r_itemq);
1608 				break;
1609 			}
1610 		case XFS_LI_INODE:
1611 		case XFS_LI_DQUOT:
1612 		case XFS_LI_QUOTAOFF:
1613 		case XFS_LI_EFD:
1614 		case XFS_LI_EFI:
1615 			trace_xfs_log_recover_item_reorder_tail(log,
1616 							trans, item, pass);
1617 			list_move_tail(&item->ri_list, &trans->r_itemq);
1618 			break;
1619 		default:
1620 			xfs_warn(log->l_mp,
1621 				"%s: unrecognized type of log operation",
1622 				__func__);
1623 			ASSERT(0);
1624 			return XFS_ERROR(EIO);
1625 		}
1626 	}
1627 	ASSERT(list_empty(&sort_list));
1628 	return 0;
1629 }
1630 
1631 /*
1632  * Build up the table of buf cancel records so that we don't replay
1633  * cancelled data in the second pass.  For buffer records that are
1634  * not cancel records, there is nothing to do here so we just return.
1635  *
1636  * If we get a cancel record which is already in the table, this indicates
1637  * that the buffer was cancelled multiple times.  In order to ensure
1638  * that during pass 2 we keep the record in the table until we reach its
1639  * last occurrence in the log, we keep a reference count in the cancel
1640  * record in the table to tell us how many times we expect to see this
1641  * record during the second pass.
1642  */
1643 STATIC int
1644 xlog_recover_buffer_pass1(
1645 	struct log		*log,
1646 	xlog_recover_item_t	*item)
1647 {
1648 	xfs_buf_log_format_t	*buf_f = item->ri_buf[0].i_addr;
1649 	struct list_head	*bucket;
1650 	struct xfs_buf_cancel	*bcp;
1651 
1652 	/*
1653 	 * If this isn't a cancel buffer item, then just return.
1654 	 */
1655 	if (!(buf_f->blf_flags & XFS_BLF_CANCEL)) {
1656 		trace_xfs_log_recover_buf_not_cancel(log, buf_f);
1657 		return 0;
1658 	}
1659 
1660 	/*
1661 	 * Insert an xfs_buf_cancel record into the hash table of them.
1662 	 * If there is already an identical record, bump its reference count.
1663 	 */
1664 	bucket = XLOG_BUF_CANCEL_BUCKET(log, buf_f->blf_blkno);
1665 	list_for_each_entry(bcp, bucket, bc_list) {
1666 		if (bcp->bc_blkno == buf_f->blf_blkno &&
1667 		    bcp->bc_len == buf_f->blf_len) {
1668 			bcp->bc_refcount++;
1669 			trace_xfs_log_recover_buf_cancel_ref_inc(log, buf_f);
1670 			return 0;
1671 		}
1672 	}
1673 
1674 	bcp = kmem_alloc(sizeof(struct xfs_buf_cancel), KM_SLEEP);
1675 	bcp->bc_blkno = buf_f->blf_blkno;
1676 	bcp->bc_len = buf_f->blf_len;
1677 	bcp->bc_refcount = 1;
1678 	list_add_tail(&bcp->bc_list, bucket);
1679 
1680 	trace_xfs_log_recover_buf_cancel_add(log, buf_f);
1681 	return 0;
1682 }
1683 
1684 /*
1685  * Check to see whether the buffer being recovered has a corresponding
1686  * entry in the buffer cancel record table.  If it does then return 1
1687  * so that it will be cancelled, otherwise return 0.  If the buffer is
1688  * actually a buffer cancel item (XFS_BLF_CANCEL is set), then decrement
1689  * the refcount on the entry in the table and remove it from the table
1690  * if this is the last reference.
1691  *
1692  * We remove the cancel record from the table when we encounter its
1693  * last occurrence in the log so that if the same buffer is re-used
1694  * again after its last cancellation we actually replay the changes
1695  * made at that point.
1696  */
1697 STATIC int
1698 xlog_check_buffer_cancelled(
1699 	struct log		*log,
1700 	xfs_daddr_t		blkno,
1701 	uint			len,
1702 	ushort			flags)
1703 {
1704 	struct list_head	*bucket;
1705 	struct xfs_buf_cancel	*bcp;
1706 
1707 	if (log->l_buf_cancel_table == NULL) {
1708 		/*
1709 		 * There is nothing in the table built in pass one,
1710 		 * so this buffer must not be cancelled.
1711 		 */
1712 		ASSERT(!(flags & XFS_BLF_CANCEL));
1713 		return 0;
1714 	}
1715 
1716 	/*
1717 	 * Search for an entry in the  cancel table that matches our buffer.
1718 	 */
1719 	bucket = XLOG_BUF_CANCEL_BUCKET(log, blkno);
1720 	list_for_each_entry(bcp, bucket, bc_list) {
1721 		if (bcp->bc_blkno == blkno && bcp->bc_len == len)
1722 			goto found;
1723 	}
1724 
1725 	/*
1726 	 * We didn't find a corresponding entry in the table, so return 0 so
1727 	 * that the buffer is NOT cancelled.
1728 	 */
1729 	ASSERT(!(flags & XFS_BLF_CANCEL));
1730 	return 0;
1731 
1732 found:
1733 	/*
1734 	 * We've go a match, so return 1 so that the recovery of this buffer
1735 	 * is cancelled.  If this buffer is actually a buffer cancel log
1736 	 * item, then decrement the refcount on the one in the table and
1737 	 * remove it if this is the last reference.
1738 	 */
1739 	if (flags & XFS_BLF_CANCEL) {
1740 		if (--bcp->bc_refcount == 0) {
1741 			list_del(&bcp->bc_list);
1742 			kmem_free(bcp);
1743 		}
1744 	}
1745 	return 1;
1746 }
1747 
1748 /*
1749  * Perform recovery for a buffer full of inodes.  In these buffers, the only
1750  * data which should be recovered is that which corresponds to the
1751  * di_next_unlinked pointers in the on disk inode structures.  The rest of the
1752  * data for the inodes is always logged through the inodes themselves rather
1753  * than the inode buffer and is recovered in xlog_recover_inode_pass2().
1754  *
1755  * The only time when buffers full of inodes are fully recovered is when the
1756  * buffer is full of newly allocated inodes.  In this case the buffer will
1757  * not be marked as an inode buffer and so will be sent to
1758  * xlog_recover_do_reg_buffer() below during recovery.
1759  */
1760 STATIC int
1761 xlog_recover_do_inode_buffer(
1762 	struct xfs_mount	*mp,
1763 	xlog_recover_item_t	*item,
1764 	struct xfs_buf		*bp,
1765 	xfs_buf_log_format_t	*buf_f)
1766 {
1767 	int			i;
1768 	int			item_index = 0;
1769 	int			bit = 0;
1770 	int			nbits = 0;
1771 	int			reg_buf_offset = 0;
1772 	int			reg_buf_bytes = 0;
1773 	int			next_unlinked_offset;
1774 	int			inodes_per_buf;
1775 	xfs_agino_t		*logged_nextp;
1776 	xfs_agino_t		*buffer_nextp;
1777 
1778 	trace_xfs_log_recover_buf_inode_buf(mp->m_log, buf_f);
1779 
1780 	inodes_per_buf = BBTOB(bp->b_io_length) >> mp->m_sb.sb_inodelog;
1781 	for (i = 0; i < inodes_per_buf; i++) {
1782 		next_unlinked_offset = (i * mp->m_sb.sb_inodesize) +
1783 			offsetof(xfs_dinode_t, di_next_unlinked);
1784 
1785 		while (next_unlinked_offset >=
1786 		       (reg_buf_offset + reg_buf_bytes)) {
1787 			/*
1788 			 * The next di_next_unlinked field is beyond
1789 			 * the current logged region.  Find the next
1790 			 * logged region that contains or is beyond
1791 			 * the current di_next_unlinked field.
1792 			 */
1793 			bit += nbits;
1794 			bit = xfs_next_bit(buf_f->blf_data_map,
1795 					   buf_f->blf_map_size, bit);
1796 
1797 			/*
1798 			 * If there are no more logged regions in the
1799 			 * buffer, then we're done.
1800 			 */
1801 			if (bit == -1)
1802 				return 0;
1803 
1804 			nbits = xfs_contig_bits(buf_f->blf_data_map,
1805 						buf_f->blf_map_size, bit);
1806 			ASSERT(nbits > 0);
1807 			reg_buf_offset = bit << XFS_BLF_SHIFT;
1808 			reg_buf_bytes = nbits << XFS_BLF_SHIFT;
1809 			item_index++;
1810 		}
1811 
1812 		/*
1813 		 * If the current logged region starts after the current
1814 		 * di_next_unlinked field, then move on to the next
1815 		 * di_next_unlinked field.
1816 		 */
1817 		if (next_unlinked_offset < reg_buf_offset)
1818 			continue;
1819 
1820 		ASSERT(item->ri_buf[item_index].i_addr != NULL);
1821 		ASSERT((item->ri_buf[item_index].i_len % XFS_BLF_CHUNK) == 0);
1822 		ASSERT((reg_buf_offset + reg_buf_bytes) <=
1823 							BBTOB(bp->b_io_length));
1824 
1825 		/*
1826 		 * The current logged region contains a copy of the
1827 		 * current di_next_unlinked field.  Extract its value
1828 		 * and copy it to the buffer copy.
1829 		 */
1830 		logged_nextp = item->ri_buf[item_index].i_addr +
1831 				next_unlinked_offset - reg_buf_offset;
1832 		if (unlikely(*logged_nextp == 0)) {
1833 			xfs_alert(mp,
1834 		"Bad inode buffer log record (ptr = 0x%p, bp = 0x%p). "
1835 		"Trying to replay bad (0) inode di_next_unlinked field.",
1836 				item, bp);
1837 			XFS_ERROR_REPORT("xlog_recover_do_inode_buf",
1838 					 XFS_ERRLEVEL_LOW, mp);
1839 			return XFS_ERROR(EFSCORRUPTED);
1840 		}
1841 
1842 		buffer_nextp = (xfs_agino_t *)xfs_buf_offset(bp,
1843 					      next_unlinked_offset);
1844 		*buffer_nextp = *logged_nextp;
1845 	}
1846 
1847 	return 0;
1848 }
1849 
1850 /*
1851  * Perform a 'normal' buffer recovery.  Each logged region of the
1852  * buffer should be copied over the corresponding region in the
1853  * given buffer.  The bitmap in the buf log format structure indicates
1854  * where to place the logged data.
1855  */
1856 STATIC void
1857 xlog_recover_do_reg_buffer(
1858 	struct xfs_mount	*mp,
1859 	xlog_recover_item_t	*item,
1860 	struct xfs_buf		*bp,
1861 	xfs_buf_log_format_t	*buf_f)
1862 {
1863 	int			i;
1864 	int			bit;
1865 	int			nbits;
1866 	int                     error;
1867 
1868 	trace_xfs_log_recover_buf_reg_buf(mp->m_log, buf_f);
1869 
1870 	bit = 0;
1871 	i = 1;  /* 0 is the buf format structure */
1872 	while (1) {
1873 		bit = xfs_next_bit(buf_f->blf_data_map,
1874 				   buf_f->blf_map_size, bit);
1875 		if (bit == -1)
1876 			break;
1877 		nbits = xfs_contig_bits(buf_f->blf_data_map,
1878 					buf_f->blf_map_size, bit);
1879 		ASSERT(nbits > 0);
1880 		ASSERT(item->ri_buf[i].i_addr != NULL);
1881 		ASSERT(item->ri_buf[i].i_len % XFS_BLF_CHUNK == 0);
1882 		ASSERT(BBTOB(bp->b_io_length) >=
1883 		       ((uint)bit << XFS_BLF_SHIFT) + (nbits << XFS_BLF_SHIFT));
1884 
1885 		/*
1886 		 * Do a sanity check if this is a dquot buffer. Just checking
1887 		 * the first dquot in the buffer should do. XXXThis is
1888 		 * probably a good thing to do for other buf types also.
1889 		 */
1890 		error = 0;
1891 		if (buf_f->blf_flags &
1892 		   (XFS_BLF_UDQUOT_BUF|XFS_BLF_PDQUOT_BUF|XFS_BLF_GDQUOT_BUF)) {
1893 			if (item->ri_buf[i].i_addr == NULL) {
1894 				xfs_alert(mp,
1895 					"XFS: NULL dquot in %s.", __func__);
1896 				goto next;
1897 			}
1898 			if (item->ri_buf[i].i_len < sizeof(xfs_disk_dquot_t)) {
1899 				xfs_alert(mp,
1900 					"XFS: dquot too small (%d) in %s.",
1901 					item->ri_buf[i].i_len, __func__);
1902 				goto next;
1903 			}
1904 			error = xfs_qm_dqcheck(mp, item->ri_buf[i].i_addr,
1905 					       -1, 0, XFS_QMOPT_DOWARN,
1906 					       "dquot_buf_recover");
1907 			if (error)
1908 				goto next;
1909 		}
1910 
1911 		memcpy(xfs_buf_offset(bp,
1912 			(uint)bit << XFS_BLF_SHIFT),	/* dest */
1913 			item->ri_buf[i].i_addr,		/* source */
1914 			nbits<<XFS_BLF_SHIFT);		/* length */
1915  next:
1916 		i++;
1917 		bit += nbits;
1918 	}
1919 
1920 	/* Shouldn't be any more regions */
1921 	ASSERT(i == item->ri_total);
1922 }
1923 
1924 /*
1925  * Do some primitive error checking on ondisk dquot data structures.
1926  */
1927 int
1928 xfs_qm_dqcheck(
1929 	struct xfs_mount *mp,
1930 	xfs_disk_dquot_t *ddq,
1931 	xfs_dqid_t	 id,
1932 	uint		 type,	  /* used only when IO_dorepair is true */
1933 	uint		 flags,
1934 	char		 *str)
1935 {
1936 	xfs_dqblk_t	 *d = (xfs_dqblk_t *)ddq;
1937 	int		errs = 0;
1938 
1939 	/*
1940 	 * We can encounter an uninitialized dquot buffer for 2 reasons:
1941 	 * 1. If we crash while deleting the quotainode(s), and those blks got
1942 	 *    used for user data. This is because we take the path of regular
1943 	 *    file deletion; however, the size field of quotainodes is never
1944 	 *    updated, so all the tricks that we play in itruncate_finish
1945 	 *    don't quite matter.
1946 	 *
1947 	 * 2. We don't play the quota buffers when there's a quotaoff logitem.
1948 	 *    But the allocation will be replayed so we'll end up with an
1949 	 *    uninitialized quota block.
1950 	 *
1951 	 * This is all fine; things are still consistent, and we haven't lost
1952 	 * any quota information. Just don't complain about bad dquot blks.
1953 	 */
1954 	if (ddq->d_magic != cpu_to_be16(XFS_DQUOT_MAGIC)) {
1955 		if (flags & XFS_QMOPT_DOWARN)
1956 			xfs_alert(mp,
1957 			"%s : XFS dquot ID 0x%x, magic 0x%x != 0x%x",
1958 			str, id, be16_to_cpu(ddq->d_magic), XFS_DQUOT_MAGIC);
1959 		errs++;
1960 	}
1961 	if (ddq->d_version != XFS_DQUOT_VERSION) {
1962 		if (flags & XFS_QMOPT_DOWARN)
1963 			xfs_alert(mp,
1964 			"%s : XFS dquot ID 0x%x, version 0x%x != 0x%x",
1965 			str, id, ddq->d_version, XFS_DQUOT_VERSION);
1966 		errs++;
1967 	}
1968 
1969 	if (ddq->d_flags != XFS_DQ_USER &&
1970 	    ddq->d_flags != XFS_DQ_PROJ &&
1971 	    ddq->d_flags != XFS_DQ_GROUP) {
1972 		if (flags & XFS_QMOPT_DOWARN)
1973 			xfs_alert(mp,
1974 			"%s : XFS dquot ID 0x%x, unknown flags 0x%x",
1975 			str, id, ddq->d_flags);
1976 		errs++;
1977 	}
1978 
1979 	if (id != -1 && id != be32_to_cpu(ddq->d_id)) {
1980 		if (flags & XFS_QMOPT_DOWARN)
1981 			xfs_alert(mp,
1982 			"%s : ondisk-dquot 0x%p, ID mismatch: "
1983 			"0x%x expected, found id 0x%x",
1984 			str, ddq, id, be32_to_cpu(ddq->d_id));
1985 		errs++;
1986 	}
1987 
1988 	if (!errs && ddq->d_id) {
1989 		if (ddq->d_blk_softlimit &&
1990 		    be64_to_cpu(ddq->d_bcount) >
1991 				be64_to_cpu(ddq->d_blk_softlimit)) {
1992 			if (!ddq->d_btimer) {
1993 				if (flags & XFS_QMOPT_DOWARN)
1994 					xfs_alert(mp,
1995 			"%s : Dquot ID 0x%x (0x%p) BLK TIMER NOT STARTED",
1996 					str, (int)be32_to_cpu(ddq->d_id), ddq);
1997 				errs++;
1998 			}
1999 		}
2000 		if (ddq->d_ino_softlimit &&
2001 		    be64_to_cpu(ddq->d_icount) >
2002 				be64_to_cpu(ddq->d_ino_softlimit)) {
2003 			if (!ddq->d_itimer) {
2004 				if (flags & XFS_QMOPT_DOWARN)
2005 					xfs_alert(mp,
2006 			"%s : Dquot ID 0x%x (0x%p) INODE TIMER NOT STARTED",
2007 					str, (int)be32_to_cpu(ddq->d_id), ddq);
2008 				errs++;
2009 			}
2010 		}
2011 		if (ddq->d_rtb_softlimit &&
2012 		    be64_to_cpu(ddq->d_rtbcount) >
2013 				be64_to_cpu(ddq->d_rtb_softlimit)) {
2014 			if (!ddq->d_rtbtimer) {
2015 				if (flags & XFS_QMOPT_DOWARN)
2016 					xfs_alert(mp,
2017 			"%s : Dquot ID 0x%x (0x%p) RTBLK TIMER NOT STARTED",
2018 					str, (int)be32_to_cpu(ddq->d_id), ddq);
2019 				errs++;
2020 			}
2021 		}
2022 	}
2023 
2024 	if (!errs || !(flags & XFS_QMOPT_DQREPAIR))
2025 		return errs;
2026 
2027 	if (flags & XFS_QMOPT_DOWARN)
2028 		xfs_notice(mp, "Re-initializing dquot ID 0x%x", id);
2029 
2030 	/*
2031 	 * Typically, a repair is only requested by quotacheck.
2032 	 */
2033 	ASSERT(id != -1);
2034 	ASSERT(flags & XFS_QMOPT_DQREPAIR);
2035 	memset(d, 0, sizeof(xfs_dqblk_t));
2036 
2037 	d->dd_diskdq.d_magic = cpu_to_be16(XFS_DQUOT_MAGIC);
2038 	d->dd_diskdq.d_version = XFS_DQUOT_VERSION;
2039 	d->dd_diskdq.d_flags = type;
2040 	d->dd_diskdq.d_id = cpu_to_be32(id);
2041 
2042 	return errs;
2043 }
2044 
2045 /*
2046  * Perform a dquot buffer recovery.
2047  * Simple algorithm: if we have found a QUOTAOFF logitem of the same type
2048  * (ie. USR or GRP), then just toss this buffer away; don't recover it.
2049  * Else, treat it as a regular buffer and do recovery.
2050  */
2051 STATIC void
2052 xlog_recover_do_dquot_buffer(
2053 	xfs_mount_t		*mp,
2054 	xlog_t			*log,
2055 	xlog_recover_item_t	*item,
2056 	xfs_buf_t		*bp,
2057 	xfs_buf_log_format_t	*buf_f)
2058 {
2059 	uint			type;
2060 
2061 	trace_xfs_log_recover_buf_dquot_buf(log, buf_f);
2062 
2063 	/*
2064 	 * Filesystems are required to send in quota flags at mount time.
2065 	 */
2066 	if (mp->m_qflags == 0) {
2067 		return;
2068 	}
2069 
2070 	type = 0;
2071 	if (buf_f->blf_flags & XFS_BLF_UDQUOT_BUF)
2072 		type |= XFS_DQ_USER;
2073 	if (buf_f->blf_flags & XFS_BLF_PDQUOT_BUF)
2074 		type |= XFS_DQ_PROJ;
2075 	if (buf_f->blf_flags & XFS_BLF_GDQUOT_BUF)
2076 		type |= XFS_DQ_GROUP;
2077 	/*
2078 	 * This type of quotas was turned off, so ignore this buffer
2079 	 */
2080 	if (log->l_quotaoffs_flag & type)
2081 		return;
2082 
2083 	xlog_recover_do_reg_buffer(mp, item, bp, buf_f);
2084 }
2085 
2086 /*
2087  * This routine replays a modification made to a buffer at runtime.
2088  * There are actually two types of buffer, regular and inode, which
2089  * are handled differently.  Inode buffers are handled differently
2090  * in that we only recover a specific set of data from them, namely
2091  * the inode di_next_unlinked fields.  This is because all other inode
2092  * data is actually logged via inode records and any data we replay
2093  * here which overlaps that may be stale.
2094  *
2095  * When meta-data buffers are freed at run time we log a buffer item
2096  * with the XFS_BLF_CANCEL bit set to indicate that previous copies
2097  * of the buffer in the log should not be replayed at recovery time.
2098  * This is so that if the blocks covered by the buffer are reused for
2099  * file data before we crash we don't end up replaying old, freed
2100  * meta-data into a user's file.
2101  *
2102  * To handle the cancellation of buffer log items, we make two passes
2103  * over the log during recovery.  During the first we build a table of
2104  * those buffers which have been cancelled, and during the second we
2105  * only replay those buffers which do not have corresponding cancel
2106  * records in the table.  See xlog_recover_do_buffer_pass[1,2] above
2107  * for more details on the implementation of the table of cancel records.
2108  */
2109 STATIC int
2110 xlog_recover_buffer_pass2(
2111 	xlog_t			*log,
2112 	struct list_head	*buffer_list,
2113 	xlog_recover_item_t	*item)
2114 {
2115 	xfs_buf_log_format_t	*buf_f = item->ri_buf[0].i_addr;
2116 	xfs_mount_t		*mp = log->l_mp;
2117 	xfs_buf_t		*bp;
2118 	int			error;
2119 	uint			buf_flags;
2120 
2121 	/*
2122 	 * In this pass we only want to recover all the buffers which have
2123 	 * not been cancelled and are not cancellation buffers themselves.
2124 	 */
2125 	if (xlog_check_buffer_cancelled(log, buf_f->blf_blkno,
2126 			buf_f->blf_len, buf_f->blf_flags)) {
2127 		trace_xfs_log_recover_buf_cancel(log, buf_f);
2128 		return 0;
2129 	}
2130 
2131 	trace_xfs_log_recover_buf_recover(log, buf_f);
2132 
2133 	buf_flags = 0;
2134 	if (buf_f->blf_flags & XFS_BLF_INODE_BUF)
2135 		buf_flags |= XBF_UNMAPPED;
2136 
2137 	bp = xfs_buf_read(mp->m_ddev_targp, buf_f->blf_blkno, buf_f->blf_len,
2138 			  buf_flags);
2139 	if (!bp)
2140 		return XFS_ERROR(ENOMEM);
2141 	error = bp->b_error;
2142 	if (error) {
2143 		xfs_buf_ioerror_alert(bp, "xlog_recover_do..(read#1)");
2144 		xfs_buf_relse(bp);
2145 		return error;
2146 	}
2147 
2148 	if (buf_f->blf_flags & XFS_BLF_INODE_BUF) {
2149 		error = xlog_recover_do_inode_buffer(mp, item, bp, buf_f);
2150 	} else if (buf_f->blf_flags &
2151 		  (XFS_BLF_UDQUOT_BUF|XFS_BLF_PDQUOT_BUF|XFS_BLF_GDQUOT_BUF)) {
2152 		xlog_recover_do_dquot_buffer(mp, log, item, bp, buf_f);
2153 	} else {
2154 		xlog_recover_do_reg_buffer(mp, item, bp, buf_f);
2155 	}
2156 	if (error)
2157 		return XFS_ERROR(error);
2158 
2159 	/*
2160 	 * Perform delayed write on the buffer.  Asynchronous writes will be
2161 	 * slower when taking into account all the buffers to be flushed.
2162 	 *
2163 	 * Also make sure that only inode buffers with good sizes stay in
2164 	 * the buffer cache.  The kernel moves inodes in buffers of 1 block
2165 	 * or XFS_INODE_CLUSTER_SIZE bytes, whichever is bigger.  The inode
2166 	 * buffers in the log can be a different size if the log was generated
2167 	 * by an older kernel using unclustered inode buffers or a newer kernel
2168 	 * running with a different inode cluster size.  Regardless, if the
2169 	 * the inode buffer size isn't MAX(blocksize, XFS_INODE_CLUSTER_SIZE)
2170 	 * for *our* value of XFS_INODE_CLUSTER_SIZE, then we need to keep
2171 	 * the buffer out of the buffer cache so that the buffer won't
2172 	 * overlap with future reads of those inodes.
2173 	 */
2174 	if (XFS_DINODE_MAGIC ==
2175 	    be16_to_cpu(*((__be16 *)xfs_buf_offset(bp, 0))) &&
2176 	    (BBTOB(bp->b_io_length) != MAX(log->l_mp->m_sb.sb_blocksize,
2177 			(__uint32_t)XFS_INODE_CLUSTER_SIZE(log->l_mp)))) {
2178 		xfs_buf_stale(bp);
2179 		error = xfs_bwrite(bp);
2180 	} else {
2181 		ASSERT(bp->b_target->bt_mount == mp);
2182 		bp->b_iodone = xlog_recover_iodone;
2183 		xfs_buf_delwri_queue(bp, buffer_list);
2184 	}
2185 
2186 	xfs_buf_relse(bp);
2187 	return error;
2188 }
2189 
2190 STATIC int
2191 xlog_recover_inode_pass2(
2192 	xlog_t			*log,
2193 	struct list_head	*buffer_list,
2194 	xlog_recover_item_t	*item)
2195 {
2196 	xfs_inode_log_format_t	*in_f;
2197 	xfs_mount_t		*mp = log->l_mp;
2198 	xfs_buf_t		*bp;
2199 	xfs_dinode_t		*dip;
2200 	int			len;
2201 	xfs_caddr_t		src;
2202 	xfs_caddr_t		dest;
2203 	int			error;
2204 	int			attr_index;
2205 	uint			fields;
2206 	xfs_icdinode_t		*dicp;
2207 	int			need_free = 0;
2208 
2209 	if (item->ri_buf[0].i_len == sizeof(xfs_inode_log_format_t)) {
2210 		in_f = item->ri_buf[0].i_addr;
2211 	} else {
2212 		in_f = kmem_alloc(sizeof(xfs_inode_log_format_t), KM_SLEEP);
2213 		need_free = 1;
2214 		error = xfs_inode_item_format_convert(&item->ri_buf[0], in_f);
2215 		if (error)
2216 			goto error;
2217 	}
2218 
2219 	/*
2220 	 * Inode buffers can be freed, look out for it,
2221 	 * and do not replay the inode.
2222 	 */
2223 	if (xlog_check_buffer_cancelled(log, in_f->ilf_blkno,
2224 					in_f->ilf_len, 0)) {
2225 		error = 0;
2226 		trace_xfs_log_recover_inode_cancel(log, in_f);
2227 		goto error;
2228 	}
2229 	trace_xfs_log_recover_inode_recover(log, in_f);
2230 
2231 	bp = xfs_buf_read(mp->m_ddev_targp, in_f->ilf_blkno, in_f->ilf_len, 0);
2232 	if (!bp) {
2233 		error = ENOMEM;
2234 		goto error;
2235 	}
2236 	error = bp->b_error;
2237 	if (error) {
2238 		xfs_buf_ioerror_alert(bp, "xlog_recover_do..(read#2)");
2239 		xfs_buf_relse(bp);
2240 		goto error;
2241 	}
2242 	ASSERT(in_f->ilf_fields & XFS_ILOG_CORE);
2243 	dip = (xfs_dinode_t *)xfs_buf_offset(bp, in_f->ilf_boffset);
2244 
2245 	/*
2246 	 * Make sure the place we're flushing out to really looks
2247 	 * like an inode!
2248 	 */
2249 	if (unlikely(dip->di_magic != cpu_to_be16(XFS_DINODE_MAGIC))) {
2250 		xfs_buf_relse(bp);
2251 		xfs_alert(mp,
2252 	"%s: Bad inode magic number, dip = 0x%p, dino bp = 0x%p, ino = %Ld",
2253 			__func__, dip, bp, in_f->ilf_ino);
2254 		XFS_ERROR_REPORT("xlog_recover_inode_pass2(1)",
2255 				 XFS_ERRLEVEL_LOW, mp);
2256 		error = EFSCORRUPTED;
2257 		goto error;
2258 	}
2259 	dicp = item->ri_buf[1].i_addr;
2260 	if (unlikely(dicp->di_magic != XFS_DINODE_MAGIC)) {
2261 		xfs_buf_relse(bp);
2262 		xfs_alert(mp,
2263 			"%s: Bad inode log record, rec ptr 0x%p, ino %Ld",
2264 			__func__, item, in_f->ilf_ino);
2265 		XFS_ERROR_REPORT("xlog_recover_inode_pass2(2)",
2266 				 XFS_ERRLEVEL_LOW, mp);
2267 		error = EFSCORRUPTED;
2268 		goto error;
2269 	}
2270 
2271 	/* Skip replay when the on disk inode is newer than the log one */
2272 	if (dicp->di_flushiter < be16_to_cpu(dip->di_flushiter)) {
2273 		/*
2274 		 * Deal with the wrap case, DI_MAX_FLUSH is less
2275 		 * than smaller numbers
2276 		 */
2277 		if (be16_to_cpu(dip->di_flushiter) == DI_MAX_FLUSH &&
2278 		    dicp->di_flushiter < (DI_MAX_FLUSH >> 1)) {
2279 			/* do nothing */
2280 		} else {
2281 			xfs_buf_relse(bp);
2282 			trace_xfs_log_recover_inode_skip(log, in_f);
2283 			error = 0;
2284 			goto error;
2285 		}
2286 	}
2287 	/* Take the opportunity to reset the flush iteration count */
2288 	dicp->di_flushiter = 0;
2289 
2290 	if (unlikely(S_ISREG(dicp->di_mode))) {
2291 		if ((dicp->di_format != XFS_DINODE_FMT_EXTENTS) &&
2292 		    (dicp->di_format != XFS_DINODE_FMT_BTREE)) {
2293 			XFS_CORRUPTION_ERROR("xlog_recover_inode_pass2(3)",
2294 					 XFS_ERRLEVEL_LOW, mp, dicp);
2295 			xfs_buf_relse(bp);
2296 			xfs_alert(mp,
2297 		"%s: Bad regular inode log record, rec ptr 0x%p, "
2298 		"ino ptr = 0x%p, ino bp = 0x%p, ino %Ld",
2299 				__func__, item, dip, bp, in_f->ilf_ino);
2300 			error = EFSCORRUPTED;
2301 			goto error;
2302 		}
2303 	} else if (unlikely(S_ISDIR(dicp->di_mode))) {
2304 		if ((dicp->di_format != XFS_DINODE_FMT_EXTENTS) &&
2305 		    (dicp->di_format != XFS_DINODE_FMT_BTREE) &&
2306 		    (dicp->di_format != XFS_DINODE_FMT_LOCAL)) {
2307 			XFS_CORRUPTION_ERROR("xlog_recover_inode_pass2(4)",
2308 					     XFS_ERRLEVEL_LOW, mp, dicp);
2309 			xfs_buf_relse(bp);
2310 			xfs_alert(mp,
2311 		"%s: Bad dir inode log record, rec ptr 0x%p, "
2312 		"ino ptr = 0x%p, ino bp = 0x%p, ino %Ld",
2313 				__func__, item, dip, bp, in_f->ilf_ino);
2314 			error = EFSCORRUPTED;
2315 			goto error;
2316 		}
2317 	}
2318 	if (unlikely(dicp->di_nextents + dicp->di_anextents > dicp->di_nblocks)){
2319 		XFS_CORRUPTION_ERROR("xlog_recover_inode_pass2(5)",
2320 				     XFS_ERRLEVEL_LOW, mp, dicp);
2321 		xfs_buf_relse(bp);
2322 		xfs_alert(mp,
2323 	"%s: Bad inode log record, rec ptr 0x%p, dino ptr 0x%p, "
2324 	"dino bp 0x%p, ino %Ld, total extents = %d, nblocks = %Ld",
2325 			__func__, item, dip, bp, in_f->ilf_ino,
2326 			dicp->di_nextents + dicp->di_anextents,
2327 			dicp->di_nblocks);
2328 		error = EFSCORRUPTED;
2329 		goto error;
2330 	}
2331 	if (unlikely(dicp->di_forkoff > mp->m_sb.sb_inodesize)) {
2332 		XFS_CORRUPTION_ERROR("xlog_recover_inode_pass2(6)",
2333 				     XFS_ERRLEVEL_LOW, mp, dicp);
2334 		xfs_buf_relse(bp);
2335 		xfs_alert(mp,
2336 	"%s: Bad inode log record, rec ptr 0x%p, dino ptr 0x%p, "
2337 	"dino bp 0x%p, ino %Ld, forkoff 0x%x", __func__,
2338 			item, dip, bp, in_f->ilf_ino, dicp->di_forkoff);
2339 		error = EFSCORRUPTED;
2340 		goto error;
2341 	}
2342 	if (unlikely(item->ri_buf[1].i_len > sizeof(struct xfs_icdinode))) {
2343 		XFS_CORRUPTION_ERROR("xlog_recover_inode_pass2(7)",
2344 				     XFS_ERRLEVEL_LOW, mp, dicp);
2345 		xfs_buf_relse(bp);
2346 		xfs_alert(mp,
2347 			"%s: Bad inode log record length %d, rec ptr 0x%p",
2348 			__func__, item->ri_buf[1].i_len, item);
2349 		error = EFSCORRUPTED;
2350 		goto error;
2351 	}
2352 
2353 	/* The core is in in-core format */
2354 	xfs_dinode_to_disk(dip, item->ri_buf[1].i_addr);
2355 
2356 	/* the rest is in on-disk format */
2357 	if (item->ri_buf[1].i_len > sizeof(struct xfs_icdinode)) {
2358 		memcpy((xfs_caddr_t) dip + sizeof(struct xfs_icdinode),
2359 			item->ri_buf[1].i_addr + sizeof(struct xfs_icdinode),
2360 			item->ri_buf[1].i_len  - sizeof(struct xfs_icdinode));
2361 	}
2362 
2363 	fields = in_f->ilf_fields;
2364 	switch (fields & (XFS_ILOG_DEV | XFS_ILOG_UUID)) {
2365 	case XFS_ILOG_DEV:
2366 		xfs_dinode_put_rdev(dip, in_f->ilf_u.ilfu_rdev);
2367 		break;
2368 	case XFS_ILOG_UUID:
2369 		memcpy(XFS_DFORK_DPTR(dip),
2370 		       &in_f->ilf_u.ilfu_uuid,
2371 		       sizeof(uuid_t));
2372 		break;
2373 	}
2374 
2375 	if (in_f->ilf_size == 2)
2376 		goto write_inode_buffer;
2377 	len = item->ri_buf[2].i_len;
2378 	src = item->ri_buf[2].i_addr;
2379 	ASSERT(in_f->ilf_size <= 4);
2380 	ASSERT((in_f->ilf_size == 3) || (fields & XFS_ILOG_AFORK));
2381 	ASSERT(!(fields & XFS_ILOG_DFORK) ||
2382 	       (len == in_f->ilf_dsize));
2383 
2384 	switch (fields & XFS_ILOG_DFORK) {
2385 	case XFS_ILOG_DDATA:
2386 	case XFS_ILOG_DEXT:
2387 		memcpy(XFS_DFORK_DPTR(dip), src, len);
2388 		break;
2389 
2390 	case XFS_ILOG_DBROOT:
2391 		xfs_bmbt_to_bmdr(mp, (struct xfs_btree_block *)src, len,
2392 				 (xfs_bmdr_block_t *)XFS_DFORK_DPTR(dip),
2393 				 XFS_DFORK_DSIZE(dip, mp));
2394 		break;
2395 
2396 	default:
2397 		/*
2398 		 * There are no data fork flags set.
2399 		 */
2400 		ASSERT((fields & XFS_ILOG_DFORK) == 0);
2401 		break;
2402 	}
2403 
2404 	/*
2405 	 * If we logged any attribute data, recover it.  There may or
2406 	 * may not have been any other non-core data logged in this
2407 	 * transaction.
2408 	 */
2409 	if (in_f->ilf_fields & XFS_ILOG_AFORK) {
2410 		if (in_f->ilf_fields & XFS_ILOG_DFORK) {
2411 			attr_index = 3;
2412 		} else {
2413 			attr_index = 2;
2414 		}
2415 		len = item->ri_buf[attr_index].i_len;
2416 		src = item->ri_buf[attr_index].i_addr;
2417 		ASSERT(len == in_f->ilf_asize);
2418 
2419 		switch (in_f->ilf_fields & XFS_ILOG_AFORK) {
2420 		case XFS_ILOG_ADATA:
2421 		case XFS_ILOG_AEXT:
2422 			dest = XFS_DFORK_APTR(dip);
2423 			ASSERT(len <= XFS_DFORK_ASIZE(dip, mp));
2424 			memcpy(dest, src, len);
2425 			break;
2426 
2427 		case XFS_ILOG_ABROOT:
2428 			dest = XFS_DFORK_APTR(dip);
2429 			xfs_bmbt_to_bmdr(mp, (struct xfs_btree_block *)src,
2430 					 len, (xfs_bmdr_block_t*)dest,
2431 					 XFS_DFORK_ASIZE(dip, mp));
2432 			break;
2433 
2434 		default:
2435 			xfs_warn(log->l_mp, "%s: Invalid flag", __func__);
2436 			ASSERT(0);
2437 			xfs_buf_relse(bp);
2438 			error = EIO;
2439 			goto error;
2440 		}
2441 	}
2442 
2443 write_inode_buffer:
2444 	ASSERT(bp->b_target->bt_mount == mp);
2445 	bp->b_iodone = xlog_recover_iodone;
2446 	xfs_buf_delwri_queue(bp, buffer_list);
2447 	xfs_buf_relse(bp);
2448 error:
2449 	if (need_free)
2450 		kmem_free(in_f);
2451 	return XFS_ERROR(error);
2452 }
2453 
2454 /*
2455  * Recover QUOTAOFF records. We simply make a note of it in the xlog_t
2456  * structure, so that we know not to do any dquot item or dquot buffer recovery,
2457  * of that type.
2458  */
2459 STATIC int
2460 xlog_recover_quotaoff_pass1(
2461 	xlog_t			*log,
2462 	xlog_recover_item_t	*item)
2463 {
2464 	xfs_qoff_logformat_t	*qoff_f = item->ri_buf[0].i_addr;
2465 	ASSERT(qoff_f);
2466 
2467 	/*
2468 	 * The logitem format's flag tells us if this was user quotaoff,
2469 	 * group/project quotaoff or both.
2470 	 */
2471 	if (qoff_f->qf_flags & XFS_UQUOTA_ACCT)
2472 		log->l_quotaoffs_flag |= XFS_DQ_USER;
2473 	if (qoff_f->qf_flags & XFS_PQUOTA_ACCT)
2474 		log->l_quotaoffs_flag |= XFS_DQ_PROJ;
2475 	if (qoff_f->qf_flags & XFS_GQUOTA_ACCT)
2476 		log->l_quotaoffs_flag |= XFS_DQ_GROUP;
2477 
2478 	return (0);
2479 }
2480 
2481 /*
2482  * Recover a dquot record
2483  */
2484 STATIC int
2485 xlog_recover_dquot_pass2(
2486 	xlog_t			*log,
2487 	struct list_head	*buffer_list,
2488 	xlog_recover_item_t	*item)
2489 {
2490 	xfs_mount_t		*mp = log->l_mp;
2491 	xfs_buf_t		*bp;
2492 	struct xfs_disk_dquot	*ddq, *recddq;
2493 	int			error;
2494 	xfs_dq_logformat_t	*dq_f;
2495 	uint			type;
2496 
2497 
2498 	/*
2499 	 * Filesystems are required to send in quota flags at mount time.
2500 	 */
2501 	if (mp->m_qflags == 0)
2502 		return (0);
2503 
2504 	recddq = item->ri_buf[1].i_addr;
2505 	if (recddq == NULL) {
2506 		xfs_alert(log->l_mp, "NULL dquot in %s.", __func__);
2507 		return XFS_ERROR(EIO);
2508 	}
2509 	if (item->ri_buf[1].i_len < sizeof(xfs_disk_dquot_t)) {
2510 		xfs_alert(log->l_mp, "dquot too small (%d) in %s.",
2511 			item->ri_buf[1].i_len, __func__);
2512 		return XFS_ERROR(EIO);
2513 	}
2514 
2515 	/*
2516 	 * This type of quotas was turned off, so ignore this record.
2517 	 */
2518 	type = recddq->d_flags & (XFS_DQ_USER | XFS_DQ_PROJ | XFS_DQ_GROUP);
2519 	ASSERT(type);
2520 	if (log->l_quotaoffs_flag & type)
2521 		return (0);
2522 
2523 	/*
2524 	 * At this point we know that quota was _not_ turned off.
2525 	 * Since the mount flags are not indicating to us otherwise, this
2526 	 * must mean that quota is on, and the dquot needs to be replayed.
2527 	 * Remember that we may not have fully recovered the superblock yet,
2528 	 * so we can't do the usual trick of looking at the SB quota bits.
2529 	 *
2530 	 * The other possibility, of course, is that the quota subsystem was
2531 	 * removed since the last mount - ENOSYS.
2532 	 */
2533 	dq_f = item->ri_buf[0].i_addr;
2534 	ASSERT(dq_f);
2535 	error = xfs_qm_dqcheck(mp, recddq, dq_f->qlf_id, 0, XFS_QMOPT_DOWARN,
2536 			   "xlog_recover_dquot_pass2 (log copy)");
2537 	if (error)
2538 		return XFS_ERROR(EIO);
2539 	ASSERT(dq_f->qlf_len == 1);
2540 
2541 	error = xfs_trans_read_buf(mp, NULL, mp->m_ddev_targp, dq_f->qlf_blkno,
2542 				   XFS_FSB_TO_BB(mp, dq_f->qlf_len), 0, &bp);
2543 	if (error)
2544 		return error;
2545 
2546 	ASSERT(bp);
2547 	ddq = (xfs_disk_dquot_t *)xfs_buf_offset(bp, dq_f->qlf_boffset);
2548 
2549 	/*
2550 	 * At least the magic num portion should be on disk because this
2551 	 * was among a chunk of dquots created earlier, and we did some
2552 	 * minimal initialization then.
2553 	 */
2554 	error = xfs_qm_dqcheck(mp, ddq, dq_f->qlf_id, 0, XFS_QMOPT_DOWARN,
2555 			   "xlog_recover_dquot_pass2");
2556 	if (error) {
2557 		xfs_buf_relse(bp);
2558 		return XFS_ERROR(EIO);
2559 	}
2560 
2561 	memcpy(ddq, recddq, item->ri_buf[1].i_len);
2562 
2563 	ASSERT(dq_f->qlf_size == 2);
2564 	ASSERT(bp->b_target->bt_mount == mp);
2565 	bp->b_iodone = xlog_recover_iodone;
2566 	xfs_buf_delwri_queue(bp, buffer_list);
2567 	xfs_buf_relse(bp);
2568 
2569 	return (0);
2570 }
2571 
2572 /*
2573  * This routine is called to create an in-core extent free intent
2574  * item from the efi format structure which was logged on disk.
2575  * It allocates an in-core efi, copies the extents from the format
2576  * structure into it, and adds the efi to the AIL with the given
2577  * LSN.
2578  */
2579 STATIC int
2580 xlog_recover_efi_pass2(
2581 	xlog_t			*log,
2582 	xlog_recover_item_t	*item,
2583 	xfs_lsn_t		lsn)
2584 {
2585 	int			error;
2586 	xfs_mount_t		*mp = log->l_mp;
2587 	xfs_efi_log_item_t	*efip;
2588 	xfs_efi_log_format_t	*efi_formatp;
2589 
2590 	efi_formatp = item->ri_buf[0].i_addr;
2591 
2592 	efip = xfs_efi_init(mp, efi_formatp->efi_nextents);
2593 	if ((error = xfs_efi_copy_format(&(item->ri_buf[0]),
2594 					 &(efip->efi_format)))) {
2595 		xfs_efi_item_free(efip);
2596 		return error;
2597 	}
2598 	atomic_set(&efip->efi_next_extent, efi_formatp->efi_nextents);
2599 
2600 	spin_lock(&log->l_ailp->xa_lock);
2601 	/*
2602 	 * xfs_trans_ail_update() drops the AIL lock.
2603 	 */
2604 	xfs_trans_ail_update(log->l_ailp, &efip->efi_item, lsn);
2605 	return 0;
2606 }
2607 
2608 
2609 /*
2610  * This routine is called when an efd format structure is found in
2611  * a committed transaction in the log.  It's purpose is to cancel
2612  * the corresponding efi if it was still in the log.  To do this
2613  * it searches the AIL for the efi with an id equal to that in the
2614  * efd format structure.  If we find it, we remove the efi from the
2615  * AIL and free it.
2616  */
2617 STATIC int
2618 xlog_recover_efd_pass2(
2619 	xlog_t			*log,
2620 	xlog_recover_item_t	*item)
2621 {
2622 	xfs_efd_log_format_t	*efd_formatp;
2623 	xfs_efi_log_item_t	*efip = NULL;
2624 	xfs_log_item_t		*lip;
2625 	__uint64_t		efi_id;
2626 	struct xfs_ail_cursor	cur;
2627 	struct xfs_ail		*ailp = log->l_ailp;
2628 
2629 	efd_formatp = item->ri_buf[0].i_addr;
2630 	ASSERT((item->ri_buf[0].i_len == (sizeof(xfs_efd_log_format_32_t) +
2631 		((efd_formatp->efd_nextents - 1) * sizeof(xfs_extent_32_t)))) ||
2632 	       (item->ri_buf[0].i_len == (sizeof(xfs_efd_log_format_64_t) +
2633 		((efd_formatp->efd_nextents - 1) * sizeof(xfs_extent_64_t)))));
2634 	efi_id = efd_formatp->efd_efi_id;
2635 
2636 	/*
2637 	 * Search for the efi with the id in the efd format structure
2638 	 * in the AIL.
2639 	 */
2640 	spin_lock(&ailp->xa_lock);
2641 	lip = xfs_trans_ail_cursor_first(ailp, &cur, 0);
2642 	while (lip != NULL) {
2643 		if (lip->li_type == XFS_LI_EFI) {
2644 			efip = (xfs_efi_log_item_t *)lip;
2645 			if (efip->efi_format.efi_id == efi_id) {
2646 				/*
2647 				 * xfs_trans_ail_delete() drops the
2648 				 * AIL lock.
2649 				 */
2650 				xfs_trans_ail_delete(ailp, lip,
2651 						     SHUTDOWN_CORRUPT_INCORE);
2652 				xfs_efi_item_free(efip);
2653 				spin_lock(&ailp->xa_lock);
2654 				break;
2655 			}
2656 		}
2657 		lip = xfs_trans_ail_cursor_next(ailp, &cur);
2658 	}
2659 	xfs_trans_ail_cursor_done(ailp, &cur);
2660 	spin_unlock(&ailp->xa_lock);
2661 
2662 	return 0;
2663 }
2664 
2665 /*
2666  * Free up any resources allocated by the transaction
2667  *
2668  * Remember that EFIs, EFDs, and IUNLINKs are handled later.
2669  */
2670 STATIC void
2671 xlog_recover_free_trans(
2672 	struct xlog_recover	*trans)
2673 {
2674 	xlog_recover_item_t	*item, *n;
2675 	int			i;
2676 
2677 	list_for_each_entry_safe(item, n, &trans->r_itemq, ri_list) {
2678 		/* Free the regions in the item. */
2679 		list_del(&item->ri_list);
2680 		for (i = 0; i < item->ri_cnt; i++)
2681 			kmem_free(item->ri_buf[i].i_addr);
2682 		/* Free the item itself */
2683 		kmem_free(item->ri_buf);
2684 		kmem_free(item);
2685 	}
2686 	/* Free the transaction recover structure */
2687 	kmem_free(trans);
2688 }
2689 
2690 STATIC int
2691 xlog_recover_commit_pass1(
2692 	struct log		*log,
2693 	struct xlog_recover	*trans,
2694 	xlog_recover_item_t	*item)
2695 {
2696 	trace_xfs_log_recover_item_recover(log, trans, item, XLOG_RECOVER_PASS1);
2697 
2698 	switch (ITEM_TYPE(item)) {
2699 	case XFS_LI_BUF:
2700 		return xlog_recover_buffer_pass1(log, item);
2701 	case XFS_LI_QUOTAOFF:
2702 		return xlog_recover_quotaoff_pass1(log, item);
2703 	case XFS_LI_INODE:
2704 	case XFS_LI_EFI:
2705 	case XFS_LI_EFD:
2706 	case XFS_LI_DQUOT:
2707 		/* nothing to do in pass 1 */
2708 		return 0;
2709 	default:
2710 		xfs_warn(log->l_mp, "%s: invalid item type (%d)",
2711 			__func__, ITEM_TYPE(item));
2712 		ASSERT(0);
2713 		return XFS_ERROR(EIO);
2714 	}
2715 }
2716 
2717 STATIC int
2718 xlog_recover_commit_pass2(
2719 	struct log		*log,
2720 	struct xlog_recover	*trans,
2721 	struct list_head	*buffer_list,
2722 	xlog_recover_item_t	*item)
2723 {
2724 	trace_xfs_log_recover_item_recover(log, trans, item, XLOG_RECOVER_PASS2);
2725 
2726 	switch (ITEM_TYPE(item)) {
2727 	case XFS_LI_BUF:
2728 		return xlog_recover_buffer_pass2(log, buffer_list, item);
2729 	case XFS_LI_INODE:
2730 		return xlog_recover_inode_pass2(log, buffer_list, item);
2731 	case XFS_LI_EFI:
2732 		return xlog_recover_efi_pass2(log, item, trans->r_lsn);
2733 	case XFS_LI_EFD:
2734 		return xlog_recover_efd_pass2(log, item);
2735 	case XFS_LI_DQUOT:
2736 		return xlog_recover_dquot_pass2(log, buffer_list, item);
2737 	case XFS_LI_QUOTAOFF:
2738 		/* nothing to do in pass2 */
2739 		return 0;
2740 	default:
2741 		xfs_warn(log->l_mp, "%s: invalid item type (%d)",
2742 			__func__, ITEM_TYPE(item));
2743 		ASSERT(0);
2744 		return XFS_ERROR(EIO);
2745 	}
2746 }
2747 
2748 /*
2749  * Perform the transaction.
2750  *
2751  * If the transaction modifies a buffer or inode, do it now.  Otherwise,
2752  * EFIs and EFDs get queued up by adding entries into the AIL for them.
2753  */
2754 STATIC int
2755 xlog_recover_commit_trans(
2756 	struct log		*log,
2757 	struct xlog_recover	*trans,
2758 	int			pass)
2759 {
2760 	int			error = 0, error2;
2761 	xlog_recover_item_t	*item;
2762 	LIST_HEAD		(buffer_list);
2763 
2764 	hlist_del(&trans->r_list);
2765 
2766 	error = xlog_recover_reorder_trans(log, trans, pass);
2767 	if (error)
2768 		return error;
2769 
2770 	list_for_each_entry(item, &trans->r_itemq, ri_list) {
2771 		switch (pass) {
2772 		case XLOG_RECOVER_PASS1:
2773 			error = xlog_recover_commit_pass1(log, trans, item);
2774 			break;
2775 		case XLOG_RECOVER_PASS2:
2776 			error = xlog_recover_commit_pass2(log, trans,
2777 							  &buffer_list, item);
2778 			break;
2779 		default:
2780 			ASSERT(0);
2781 		}
2782 
2783 		if (error)
2784 			goto out;
2785 	}
2786 
2787 	xlog_recover_free_trans(trans);
2788 
2789 out:
2790 	error2 = xfs_buf_delwri_submit(&buffer_list);
2791 	return error ? error : error2;
2792 }
2793 
2794 STATIC int
2795 xlog_recover_unmount_trans(
2796 	struct log		*log,
2797 	xlog_recover_t		*trans)
2798 {
2799 	/* Do nothing now */
2800 	xfs_warn(log->l_mp, "%s: Unmount LR", __func__);
2801 	return 0;
2802 }
2803 
2804 /*
2805  * There are two valid states of the r_state field.  0 indicates that the
2806  * transaction structure is in a normal state.  We have either seen the
2807  * start of the transaction or the last operation we added was not a partial
2808  * operation.  If the last operation we added to the transaction was a
2809  * partial operation, we need to mark r_state with XLOG_WAS_CONT_TRANS.
2810  *
2811  * NOTE: skip LRs with 0 data length.
2812  */
2813 STATIC int
2814 xlog_recover_process_data(
2815 	xlog_t			*log,
2816 	struct hlist_head	rhash[],
2817 	xlog_rec_header_t	*rhead,
2818 	xfs_caddr_t		dp,
2819 	int			pass)
2820 {
2821 	xfs_caddr_t		lp;
2822 	int			num_logops;
2823 	xlog_op_header_t	*ohead;
2824 	xlog_recover_t		*trans;
2825 	xlog_tid_t		tid;
2826 	int			error;
2827 	unsigned long		hash;
2828 	uint			flags;
2829 
2830 	lp = dp + be32_to_cpu(rhead->h_len);
2831 	num_logops = be32_to_cpu(rhead->h_num_logops);
2832 
2833 	/* check the log format matches our own - else we can't recover */
2834 	if (xlog_header_check_recover(log->l_mp, rhead))
2835 		return (XFS_ERROR(EIO));
2836 
2837 	while ((dp < lp) && num_logops) {
2838 		ASSERT(dp + sizeof(xlog_op_header_t) <= lp);
2839 		ohead = (xlog_op_header_t *)dp;
2840 		dp += sizeof(xlog_op_header_t);
2841 		if (ohead->oh_clientid != XFS_TRANSACTION &&
2842 		    ohead->oh_clientid != XFS_LOG) {
2843 			xfs_warn(log->l_mp, "%s: bad clientid 0x%x",
2844 					__func__, ohead->oh_clientid);
2845 			ASSERT(0);
2846 			return (XFS_ERROR(EIO));
2847 		}
2848 		tid = be32_to_cpu(ohead->oh_tid);
2849 		hash = XLOG_RHASH(tid);
2850 		trans = xlog_recover_find_tid(&rhash[hash], tid);
2851 		if (trans == NULL) {		   /* not found; add new tid */
2852 			if (ohead->oh_flags & XLOG_START_TRANS)
2853 				xlog_recover_new_tid(&rhash[hash], tid,
2854 					be64_to_cpu(rhead->h_lsn));
2855 		} else {
2856 			if (dp + be32_to_cpu(ohead->oh_len) > lp) {
2857 				xfs_warn(log->l_mp, "%s: bad length 0x%x",
2858 					__func__, be32_to_cpu(ohead->oh_len));
2859 				WARN_ON(1);
2860 				return (XFS_ERROR(EIO));
2861 			}
2862 			flags = ohead->oh_flags & ~XLOG_END_TRANS;
2863 			if (flags & XLOG_WAS_CONT_TRANS)
2864 				flags &= ~XLOG_CONTINUE_TRANS;
2865 			switch (flags) {
2866 			case XLOG_COMMIT_TRANS:
2867 				error = xlog_recover_commit_trans(log,
2868 								trans, pass);
2869 				break;
2870 			case XLOG_UNMOUNT_TRANS:
2871 				error = xlog_recover_unmount_trans(log, trans);
2872 				break;
2873 			case XLOG_WAS_CONT_TRANS:
2874 				error = xlog_recover_add_to_cont_trans(log,
2875 						trans, dp,
2876 						be32_to_cpu(ohead->oh_len));
2877 				break;
2878 			case XLOG_START_TRANS:
2879 				xfs_warn(log->l_mp, "%s: bad transaction",
2880 					__func__);
2881 				ASSERT(0);
2882 				error = XFS_ERROR(EIO);
2883 				break;
2884 			case 0:
2885 			case XLOG_CONTINUE_TRANS:
2886 				error = xlog_recover_add_to_trans(log, trans,
2887 						dp, be32_to_cpu(ohead->oh_len));
2888 				break;
2889 			default:
2890 				xfs_warn(log->l_mp, "%s: bad flag 0x%x",
2891 					__func__, flags);
2892 				ASSERT(0);
2893 				error = XFS_ERROR(EIO);
2894 				break;
2895 			}
2896 			if (error)
2897 				return error;
2898 		}
2899 		dp += be32_to_cpu(ohead->oh_len);
2900 		num_logops--;
2901 	}
2902 	return 0;
2903 }
2904 
2905 /*
2906  * Process an extent free intent item that was recovered from
2907  * the log.  We need to free the extents that it describes.
2908  */
2909 STATIC int
2910 xlog_recover_process_efi(
2911 	xfs_mount_t		*mp,
2912 	xfs_efi_log_item_t	*efip)
2913 {
2914 	xfs_efd_log_item_t	*efdp;
2915 	xfs_trans_t		*tp;
2916 	int			i;
2917 	int			error = 0;
2918 	xfs_extent_t		*extp;
2919 	xfs_fsblock_t		startblock_fsb;
2920 
2921 	ASSERT(!test_bit(XFS_EFI_RECOVERED, &efip->efi_flags));
2922 
2923 	/*
2924 	 * First check the validity of the extents described by the
2925 	 * EFI.  If any are bad, then assume that all are bad and
2926 	 * just toss the EFI.
2927 	 */
2928 	for (i = 0; i < efip->efi_format.efi_nextents; i++) {
2929 		extp = &(efip->efi_format.efi_extents[i]);
2930 		startblock_fsb = XFS_BB_TO_FSB(mp,
2931 				   XFS_FSB_TO_DADDR(mp, extp->ext_start));
2932 		if ((startblock_fsb == 0) ||
2933 		    (extp->ext_len == 0) ||
2934 		    (startblock_fsb >= mp->m_sb.sb_dblocks) ||
2935 		    (extp->ext_len >= mp->m_sb.sb_agblocks)) {
2936 			/*
2937 			 * This will pull the EFI from the AIL and
2938 			 * free the memory associated with it.
2939 			 */
2940 			xfs_efi_release(efip, efip->efi_format.efi_nextents);
2941 			return XFS_ERROR(EIO);
2942 		}
2943 	}
2944 
2945 	tp = xfs_trans_alloc(mp, 0);
2946 	error = xfs_trans_reserve(tp, 0, XFS_ITRUNCATE_LOG_RES(mp), 0, 0, 0);
2947 	if (error)
2948 		goto abort_error;
2949 	efdp = xfs_trans_get_efd(tp, efip, efip->efi_format.efi_nextents);
2950 
2951 	for (i = 0; i < efip->efi_format.efi_nextents; i++) {
2952 		extp = &(efip->efi_format.efi_extents[i]);
2953 		error = xfs_free_extent(tp, extp->ext_start, extp->ext_len);
2954 		if (error)
2955 			goto abort_error;
2956 		xfs_trans_log_efd_extent(tp, efdp, extp->ext_start,
2957 					 extp->ext_len);
2958 	}
2959 
2960 	set_bit(XFS_EFI_RECOVERED, &efip->efi_flags);
2961 	error = xfs_trans_commit(tp, 0);
2962 	return error;
2963 
2964 abort_error:
2965 	xfs_trans_cancel(tp, XFS_TRANS_ABORT);
2966 	return error;
2967 }
2968 
2969 /*
2970  * When this is called, all of the EFIs which did not have
2971  * corresponding EFDs should be in the AIL.  What we do now
2972  * is free the extents associated with each one.
2973  *
2974  * Since we process the EFIs in normal transactions, they
2975  * will be removed at some point after the commit.  This prevents
2976  * us from just walking down the list processing each one.
2977  * We'll use a flag in the EFI to skip those that we've already
2978  * processed and use the AIL iteration mechanism's generation
2979  * count to try to speed this up at least a bit.
2980  *
2981  * When we start, we know that the EFIs are the only things in
2982  * the AIL.  As we process them, however, other items are added
2983  * to the AIL.  Since everything added to the AIL must come after
2984  * everything already in the AIL, we stop processing as soon as
2985  * we see something other than an EFI in the AIL.
2986  */
2987 STATIC int
2988 xlog_recover_process_efis(
2989 	xlog_t			*log)
2990 {
2991 	xfs_log_item_t		*lip;
2992 	xfs_efi_log_item_t	*efip;
2993 	int			error = 0;
2994 	struct xfs_ail_cursor	cur;
2995 	struct xfs_ail		*ailp;
2996 
2997 	ailp = log->l_ailp;
2998 	spin_lock(&ailp->xa_lock);
2999 	lip = xfs_trans_ail_cursor_first(ailp, &cur, 0);
3000 	while (lip != NULL) {
3001 		/*
3002 		 * We're done when we see something other than an EFI.
3003 		 * There should be no EFIs left in the AIL now.
3004 		 */
3005 		if (lip->li_type != XFS_LI_EFI) {
3006 #ifdef DEBUG
3007 			for (; lip; lip = xfs_trans_ail_cursor_next(ailp, &cur))
3008 				ASSERT(lip->li_type != XFS_LI_EFI);
3009 #endif
3010 			break;
3011 		}
3012 
3013 		/*
3014 		 * Skip EFIs that we've already processed.
3015 		 */
3016 		efip = (xfs_efi_log_item_t *)lip;
3017 		if (test_bit(XFS_EFI_RECOVERED, &efip->efi_flags)) {
3018 			lip = xfs_trans_ail_cursor_next(ailp, &cur);
3019 			continue;
3020 		}
3021 
3022 		spin_unlock(&ailp->xa_lock);
3023 		error = xlog_recover_process_efi(log->l_mp, efip);
3024 		spin_lock(&ailp->xa_lock);
3025 		if (error)
3026 			goto out;
3027 		lip = xfs_trans_ail_cursor_next(ailp, &cur);
3028 	}
3029 out:
3030 	xfs_trans_ail_cursor_done(ailp, &cur);
3031 	spin_unlock(&ailp->xa_lock);
3032 	return error;
3033 }
3034 
3035 /*
3036  * This routine performs a transaction to null out a bad inode pointer
3037  * in an agi unlinked inode hash bucket.
3038  */
3039 STATIC void
3040 xlog_recover_clear_agi_bucket(
3041 	xfs_mount_t	*mp,
3042 	xfs_agnumber_t	agno,
3043 	int		bucket)
3044 {
3045 	xfs_trans_t	*tp;
3046 	xfs_agi_t	*agi;
3047 	xfs_buf_t	*agibp;
3048 	int		offset;
3049 	int		error;
3050 
3051 	tp = xfs_trans_alloc(mp, XFS_TRANS_CLEAR_AGI_BUCKET);
3052 	error = xfs_trans_reserve(tp, 0, XFS_CLEAR_AGI_BUCKET_LOG_RES(mp),
3053 				  0, 0, 0);
3054 	if (error)
3055 		goto out_abort;
3056 
3057 	error = xfs_read_agi(mp, tp, agno, &agibp);
3058 	if (error)
3059 		goto out_abort;
3060 
3061 	agi = XFS_BUF_TO_AGI(agibp);
3062 	agi->agi_unlinked[bucket] = cpu_to_be32(NULLAGINO);
3063 	offset = offsetof(xfs_agi_t, agi_unlinked) +
3064 		 (sizeof(xfs_agino_t) * bucket);
3065 	xfs_trans_log_buf(tp, agibp, offset,
3066 			  (offset + sizeof(xfs_agino_t) - 1));
3067 
3068 	error = xfs_trans_commit(tp, 0);
3069 	if (error)
3070 		goto out_error;
3071 	return;
3072 
3073 out_abort:
3074 	xfs_trans_cancel(tp, XFS_TRANS_ABORT);
3075 out_error:
3076 	xfs_warn(mp, "%s: failed to clear agi %d. Continuing.", __func__, agno);
3077 	return;
3078 }
3079 
3080 STATIC xfs_agino_t
3081 xlog_recover_process_one_iunlink(
3082 	struct xfs_mount		*mp,
3083 	xfs_agnumber_t			agno,
3084 	xfs_agino_t			agino,
3085 	int				bucket)
3086 {
3087 	struct xfs_buf			*ibp;
3088 	struct xfs_dinode		*dip;
3089 	struct xfs_inode		*ip;
3090 	xfs_ino_t			ino;
3091 	int				error;
3092 
3093 	ino = XFS_AGINO_TO_INO(mp, agno, agino);
3094 	error = xfs_iget(mp, NULL, ino, 0, 0, &ip);
3095 	if (error)
3096 		goto fail;
3097 
3098 	/*
3099 	 * Get the on disk inode to find the next inode in the bucket.
3100 	 */
3101 	error = xfs_itobp(mp, NULL, ip, &dip, &ibp, 0);
3102 	if (error)
3103 		goto fail_iput;
3104 
3105 	ASSERT(ip->i_d.di_nlink == 0);
3106 	ASSERT(ip->i_d.di_mode != 0);
3107 
3108 	/* setup for the next pass */
3109 	agino = be32_to_cpu(dip->di_next_unlinked);
3110 	xfs_buf_relse(ibp);
3111 
3112 	/*
3113 	 * Prevent any DMAPI event from being sent when the reference on
3114 	 * the inode is dropped.
3115 	 */
3116 	ip->i_d.di_dmevmask = 0;
3117 
3118 	IRELE(ip);
3119 	return agino;
3120 
3121  fail_iput:
3122 	IRELE(ip);
3123  fail:
3124 	/*
3125 	 * We can't read in the inode this bucket points to, or this inode
3126 	 * is messed up.  Just ditch this bucket of inodes.  We will lose
3127 	 * some inodes and space, but at least we won't hang.
3128 	 *
3129 	 * Call xlog_recover_clear_agi_bucket() to perform a transaction to
3130 	 * clear the inode pointer in the bucket.
3131 	 */
3132 	xlog_recover_clear_agi_bucket(mp, agno, bucket);
3133 	return NULLAGINO;
3134 }
3135 
3136 /*
3137  * xlog_iunlink_recover
3138  *
3139  * This is called during recovery to process any inodes which
3140  * we unlinked but not freed when the system crashed.  These
3141  * inodes will be on the lists in the AGI blocks.  What we do
3142  * here is scan all the AGIs and fully truncate and free any
3143  * inodes found on the lists.  Each inode is removed from the
3144  * lists when it has been fully truncated and is freed.  The
3145  * freeing of the inode and its removal from the list must be
3146  * atomic.
3147  */
3148 STATIC void
3149 xlog_recover_process_iunlinks(
3150 	xlog_t		*log)
3151 {
3152 	xfs_mount_t	*mp;
3153 	xfs_agnumber_t	agno;
3154 	xfs_agi_t	*agi;
3155 	xfs_buf_t	*agibp;
3156 	xfs_agino_t	agino;
3157 	int		bucket;
3158 	int		error;
3159 	uint		mp_dmevmask;
3160 
3161 	mp = log->l_mp;
3162 
3163 	/*
3164 	 * Prevent any DMAPI event from being sent while in this function.
3165 	 */
3166 	mp_dmevmask = mp->m_dmevmask;
3167 	mp->m_dmevmask = 0;
3168 
3169 	for (agno = 0; agno < mp->m_sb.sb_agcount; agno++) {
3170 		/*
3171 		 * Find the agi for this ag.
3172 		 */
3173 		error = xfs_read_agi(mp, NULL, agno, &agibp);
3174 		if (error) {
3175 			/*
3176 			 * AGI is b0rked. Don't process it.
3177 			 *
3178 			 * We should probably mark the filesystem as corrupt
3179 			 * after we've recovered all the ag's we can....
3180 			 */
3181 			continue;
3182 		}
3183 		/*
3184 		 * Unlock the buffer so that it can be acquired in the normal
3185 		 * course of the transaction to truncate and free each inode.
3186 		 * Because we are not racing with anyone else here for the AGI
3187 		 * buffer, we don't even need to hold it locked to read the
3188 		 * initial unlinked bucket entries out of the buffer. We keep
3189 		 * buffer reference though, so that it stays pinned in memory
3190 		 * while we need the buffer.
3191 		 */
3192 		agi = XFS_BUF_TO_AGI(agibp);
3193 		xfs_buf_unlock(agibp);
3194 
3195 		for (bucket = 0; bucket < XFS_AGI_UNLINKED_BUCKETS; bucket++) {
3196 			agino = be32_to_cpu(agi->agi_unlinked[bucket]);
3197 			while (agino != NULLAGINO) {
3198 				agino = xlog_recover_process_one_iunlink(mp,
3199 							agno, agino, bucket);
3200 			}
3201 		}
3202 		xfs_buf_rele(agibp);
3203 	}
3204 
3205 	mp->m_dmevmask = mp_dmevmask;
3206 }
3207 
3208 
3209 #ifdef DEBUG
3210 STATIC void
3211 xlog_pack_data_checksum(
3212 	xlog_t		*log,
3213 	xlog_in_core_t	*iclog,
3214 	int		size)
3215 {
3216 	int		i;
3217 	__be32		*up;
3218 	uint		chksum = 0;
3219 
3220 	up = (__be32 *)iclog->ic_datap;
3221 	/* divide length by 4 to get # words */
3222 	for (i = 0; i < (size >> 2); i++) {
3223 		chksum ^= be32_to_cpu(*up);
3224 		up++;
3225 	}
3226 	iclog->ic_header.h_chksum = cpu_to_be32(chksum);
3227 }
3228 #else
3229 #define xlog_pack_data_checksum(log, iclog, size)
3230 #endif
3231 
3232 /*
3233  * Stamp cycle number in every block
3234  */
3235 void
3236 xlog_pack_data(
3237 	xlog_t			*log,
3238 	xlog_in_core_t		*iclog,
3239 	int			roundoff)
3240 {
3241 	int			i, j, k;
3242 	int			size = iclog->ic_offset + roundoff;
3243 	__be32			cycle_lsn;
3244 	xfs_caddr_t		dp;
3245 
3246 	xlog_pack_data_checksum(log, iclog, size);
3247 
3248 	cycle_lsn = CYCLE_LSN_DISK(iclog->ic_header.h_lsn);
3249 
3250 	dp = iclog->ic_datap;
3251 	for (i = 0; i < BTOBB(size) &&
3252 		i < (XLOG_HEADER_CYCLE_SIZE / BBSIZE); i++) {
3253 		iclog->ic_header.h_cycle_data[i] = *(__be32 *)dp;
3254 		*(__be32 *)dp = cycle_lsn;
3255 		dp += BBSIZE;
3256 	}
3257 
3258 	if (xfs_sb_version_haslogv2(&log->l_mp->m_sb)) {
3259 		xlog_in_core_2_t *xhdr = iclog->ic_data;
3260 
3261 		for ( ; i < BTOBB(size); i++) {
3262 			j = i / (XLOG_HEADER_CYCLE_SIZE / BBSIZE);
3263 			k = i % (XLOG_HEADER_CYCLE_SIZE / BBSIZE);
3264 			xhdr[j].hic_xheader.xh_cycle_data[k] = *(__be32 *)dp;
3265 			*(__be32 *)dp = cycle_lsn;
3266 			dp += BBSIZE;
3267 		}
3268 
3269 		for (i = 1; i < log->l_iclog_heads; i++) {
3270 			xhdr[i].hic_xheader.xh_cycle = cycle_lsn;
3271 		}
3272 	}
3273 }
3274 
3275 STATIC void
3276 xlog_unpack_data(
3277 	xlog_rec_header_t	*rhead,
3278 	xfs_caddr_t		dp,
3279 	xlog_t			*log)
3280 {
3281 	int			i, j, k;
3282 
3283 	for (i = 0; i < BTOBB(be32_to_cpu(rhead->h_len)) &&
3284 		  i < (XLOG_HEADER_CYCLE_SIZE / BBSIZE); i++) {
3285 		*(__be32 *)dp = *(__be32 *)&rhead->h_cycle_data[i];
3286 		dp += BBSIZE;
3287 	}
3288 
3289 	if (xfs_sb_version_haslogv2(&log->l_mp->m_sb)) {
3290 		xlog_in_core_2_t *xhdr = (xlog_in_core_2_t *)rhead;
3291 		for ( ; i < BTOBB(be32_to_cpu(rhead->h_len)); i++) {
3292 			j = i / (XLOG_HEADER_CYCLE_SIZE / BBSIZE);
3293 			k = i % (XLOG_HEADER_CYCLE_SIZE / BBSIZE);
3294 			*(__be32 *)dp = xhdr[j].hic_xheader.xh_cycle_data[k];
3295 			dp += BBSIZE;
3296 		}
3297 	}
3298 }
3299 
3300 STATIC int
3301 xlog_valid_rec_header(
3302 	xlog_t			*log,
3303 	xlog_rec_header_t	*rhead,
3304 	xfs_daddr_t		blkno)
3305 {
3306 	int			hlen;
3307 
3308 	if (unlikely(rhead->h_magicno != cpu_to_be32(XLOG_HEADER_MAGIC_NUM))) {
3309 		XFS_ERROR_REPORT("xlog_valid_rec_header(1)",
3310 				XFS_ERRLEVEL_LOW, log->l_mp);
3311 		return XFS_ERROR(EFSCORRUPTED);
3312 	}
3313 	if (unlikely(
3314 	    (!rhead->h_version ||
3315 	    (be32_to_cpu(rhead->h_version) & (~XLOG_VERSION_OKBITS))))) {
3316 		xfs_warn(log->l_mp, "%s: unrecognised log version (%d).",
3317 			__func__, be32_to_cpu(rhead->h_version));
3318 		return XFS_ERROR(EIO);
3319 	}
3320 
3321 	/* LR body must have data or it wouldn't have been written */
3322 	hlen = be32_to_cpu(rhead->h_len);
3323 	if (unlikely( hlen <= 0 || hlen > INT_MAX )) {
3324 		XFS_ERROR_REPORT("xlog_valid_rec_header(2)",
3325 				XFS_ERRLEVEL_LOW, log->l_mp);
3326 		return XFS_ERROR(EFSCORRUPTED);
3327 	}
3328 	if (unlikely( blkno > log->l_logBBsize || blkno > INT_MAX )) {
3329 		XFS_ERROR_REPORT("xlog_valid_rec_header(3)",
3330 				XFS_ERRLEVEL_LOW, log->l_mp);
3331 		return XFS_ERROR(EFSCORRUPTED);
3332 	}
3333 	return 0;
3334 }
3335 
3336 /*
3337  * Read the log from tail to head and process the log records found.
3338  * Handle the two cases where the tail and head are in the same cycle
3339  * and where the active portion of the log wraps around the end of
3340  * the physical log separately.  The pass parameter is passed through
3341  * to the routines called to process the data and is not looked at
3342  * here.
3343  */
3344 STATIC int
3345 xlog_do_recovery_pass(
3346 	xlog_t			*log,
3347 	xfs_daddr_t		head_blk,
3348 	xfs_daddr_t		tail_blk,
3349 	int			pass)
3350 {
3351 	xlog_rec_header_t	*rhead;
3352 	xfs_daddr_t		blk_no;
3353 	xfs_caddr_t		offset;
3354 	xfs_buf_t		*hbp, *dbp;
3355 	int			error = 0, h_size;
3356 	int			bblks, split_bblks;
3357 	int			hblks, split_hblks, wrapped_hblks;
3358 	struct hlist_head	rhash[XLOG_RHASH_SIZE];
3359 
3360 	ASSERT(head_blk != tail_blk);
3361 
3362 	/*
3363 	 * Read the header of the tail block and get the iclog buffer size from
3364 	 * h_size.  Use this to tell how many sectors make up the log header.
3365 	 */
3366 	if (xfs_sb_version_haslogv2(&log->l_mp->m_sb)) {
3367 		/*
3368 		 * When using variable length iclogs, read first sector of
3369 		 * iclog header and extract the header size from it.  Get a
3370 		 * new hbp that is the correct size.
3371 		 */
3372 		hbp = xlog_get_bp(log, 1);
3373 		if (!hbp)
3374 			return ENOMEM;
3375 
3376 		error = xlog_bread(log, tail_blk, 1, hbp, &offset);
3377 		if (error)
3378 			goto bread_err1;
3379 
3380 		rhead = (xlog_rec_header_t *)offset;
3381 		error = xlog_valid_rec_header(log, rhead, tail_blk);
3382 		if (error)
3383 			goto bread_err1;
3384 		h_size = be32_to_cpu(rhead->h_size);
3385 		if ((be32_to_cpu(rhead->h_version) & XLOG_VERSION_2) &&
3386 		    (h_size > XLOG_HEADER_CYCLE_SIZE)) {
3387 			hblks = h_size / XLOG_HEADER_CYCLE_SIZE;
3388 			if (h_size % XLOG_HEADER_CYCLE_SIZE)
3389 				hblks++;
3390 			xlog_put_bp(hbp);
3391 			hbp = xlog_get_bp(log, hblks);
3392 		} else {
3393 			hblks = 1;
3394 		}
3395 	} else {
3396 		ASSERT(log->l_sectBBsize == 1);
3397 		hblks = 1;
3398 		hbp = xlog_get_bp(log, 1);
3399 		h_size = XLOG_BIG_RECORD_BSIZE;
3400 	}
3401 
3402 	if (!hbp)
3403 		return ENOMEM;
3404 	dbp = xlog_get_bp(log, BTOBB(h_size));
3405 	if (!dbp) {
3406 		xlog_put_bp(hbp);
3407 		return ENOMEM;
3408 	}
3409 
3410 	memset(rhash, 0, sizeof(rhash));
3411 	if (tail_blk <= head_blk) {
3412 		for (blk_no = tail_blk; blk_no < head_blk; ) {
3413 			error = xlog_bread(log, blk_no, hblks, hbp, &offset);
3414 			if (error)
3415 				goto bread_err2;
3416 
3417 			rhead = (xlog_rec_header_t *)offset;
3418 			error = xlog_valid_rec_header(log, rhead, blk_no);
3419 			if (error)
3420 				goto bread_err2;
3421 
3422 			/* blocks in data section */
3423 			bblks = (int)BTOBB(be32_to_cpu(rhead->h_len));
3424 			error = xlog_bread(log, blk_no + hblks, bblks, dbp,
3425 					   &offset);
3426 			if (error)
3427 				goto bread_err2;
3428 
3429 			xlog_unpack_data(rhead, offset, log);
3430 			if ((error = xlog_recover_process_data(log,
3431 						rhash, rhead, offset, pass)))
3432 				goto bread_err2;
3433 			blk_no += bblks + hblks;
3434 		}
3435 	} else {
3436 		/*
3437 		 * Perform recovery around the end of the physical log.
3438 		 * When the head is not on the same cycle number as the tail,
3439 		 * we can't do a sequential recovery as above.
3440 		 */
3441 		blk_no = tail_blk;
3442 		while (blk_no < log->l_logBBsize) {
3443 			/*
3444 			 * Check for header wrapping around physical end-of-log
3445 			 */
3446 			offset = hbp->b_addr;
3447 			split_hblks = 0;
3448 			wrapped_hblks = 0;
3449 			if (blk_no + hblks <= log->l_logBBsize) {
3450 				/* Read header in one read */
3451 				error = xlog_bread(log, blk_no, hblks, hbp,
3452 						   &offset);
3453 				if (error)
3454 					goto bread_err2;
3455 			} else {
3456 				/* This LR is split across physical log end */
3457 				if (blk_no != log->l_logBBsize) {
3458 					/* some data before physical log end */
3459 					ASSERT(blk_no <= INT_MAX);
3460 					split_hblks = log->l_logBBsize - (int)blk_no;
3461 					ASSERT(split_hblks > 0);
3462 					error = xlog_bread(log, blk_no,
3463 							   split_hblks, hbp,
3464 							   &offset);
3465 					if (error)
3466 						goto bread_err2;
3467 				}
3468 
3469 				/*
3470 				 * Note: this black magic still works with
3471 				 * large sector sizes (non-512) only because:
3472 				 * - we increased the buffer size originally
3473 				 *   by 1 sector giving us enough extra space
3474 				 *   for the second read;
3475 				 * - the log start is guaranteed to be sector
3476 				 *   aligned;
3477 				 * - we read the log end (LR header start)
3478 				 *   _first_, then the log start (LR header end)
3479 				 *   - order is important.
3480 				 */
3481 				wrapped_hblks = hblks - split_hblks;
3482 				error = xlog_bread_offset(log, 0,
3483 						wrapped_hblks, hbp,
3484 						offset + BBTOB(split_hblks));
3485 				if (error)
3486 					goto bread_err2;
3487 			}
3488 			rhead = (xlog_rec_header_t *)offset;
3489 			error = xlog_valid_rec_header(log, rhead,
3490 						split_hblks ? blk_no : 0);
3491 			if (error)
3492 				goto bread_err2;
3493 
3494 			bblks = (int)BTOBB(be32_to_cpu(rhead->h_len));
3495 			blk_no += hblks;
3496 
3497 			/* Read in data for log record */
3498 			if (blk_no + bblks <= log->l_logBBsize) {
3499 				error = xlog_bread(log, blk_no, bblks, dbp,
3500 						   &offset);
3501 				if (error)
3502 					goto bread_err2;
3503 			} else {
3504 				/* This log record is split across the
3505 				 * physical end of log */
3506 				offset = dbp->b_addr;
3507 				split_bblks = 0;
3508 				if (blk_no != log->l_logBBsize) {
3509 					/* some data is before the physical
3510 					 * end of log */
3511 					ASSERT(!wrapped_hblks);
3512 					ASSERT(blk_no <= INT_MAX);
3513 					split_bblks =
3514 						log->l_logBBsize - (int)blk_no;
3515 					ASSERT(split_bblks > 0);
3516 					error = xlog_bread(log, blk_no,
3517 							split_bblks, dbp,
3518 							&offset);
3519 					if (error)
3520 						goto bread_err2;
3521 				}
3522 
3523 				/*
3524 				 * Note: this black magic still works with
3525 				 * large sector sizes (non-512) only because:
3526 				 * - we increased the buffer size originally
3527 				 *   by 1 sector giving us enough extra space
3528 				 *   for the second read;
3529 				 * - the log start is guaranteed to be sector
3530 				 *   aligned;
3531 				 * - we read the log end (LR header start)
3532 				 *   _first_, then the log start (LR header end)
3533 				 *   - order is important.
3534 				 */
3535 				error = xlog_bread_offset(log, 0,
3536 						bblks - split_bblks, hbp,
3537 						offset + BBTOB(split_bblks));
3538 				if (error)
3539 					goto bread_err2;
3540 			}
3541 			xlog_unpack_data(rhead, offset, log);
3542 			if ((error = xlog_recover_process_data(log, rhash,
3543 							rhead, offset, pass)))
3544 				goto bread_err2;
3545 			blk_no += bblks;
3546 		}
3547 
3548 		ASSERT(blk_no >= log->l_logBBsize);
3549 		blk_no -= log->l_logBBsize;
3550 
3551 		/* read first part of physical log */
3552 		while (blk_no < head_blk) {
3553 			error = xlog_bread(log, blk_no, hblks, hbp, &offset);
3554 			if (error)
3555 				goto bread_err2;
3556 
3557 			rhead = (xlog_rec_header_t *)offset;
3558 			error = xlog_valid_rec_header(log, rhead, blk_no);
3559 			if (error)
3560 				goto bread_err2;
3561 
3562 			bblks = (int)BTOBB(be32_to_cpu(rhead->h_len));
3563 			error = xlog_bread(log, blk_no+hblks, bblks, dbp,
3564 					   &offset);
3565 			if (error)
3566 				goto bread_err2;
3567 
3568 			xlog_unpack_data(rhead, offset, log);
3569 			if ((error = xlog_recover_process_data(log, rhash,
3570 							rhead, offset, pass)))
3571 				goto bread_err2;
3572 			blk_no += bblks + hblks;
3573 		}
3574 	}
3575 
3576  bread_err2:
3577 	xlog_put_bp(dbp);
3578  bread_err1:
3579 	xlog_put_bp(hbp);
3580 	return error;
3581 }
3582 
3583 /*
3584  * Do the recovery of the log.  We actually do this in two phases.
3585  * The two passes are necessary in order to implement the function
3586  * of cancelling a record written into the log.  The first pass
3587  * determines those things which have been cancelled, and the
3588  * second pass replays log items normally except for those which
3589  * have been cancelled.  The handling of the replay and cancellations
3590  * takes place in the log item type specific routines.
3591  *
3592  * The table of items which have cancel records in the log is allocated
3593  * and freed at this level, since only here do we know when all of
3594  * the log recovery has been completed.
3595  */
3596 STATIC int
3597 xlog_do_log_recovery(
3598 	xlog_t		*log,
3599 	xfs_daddr_t	head_blk,
3600 	xfs_daddr_t	tail_blk)
3601 {
3602 	int		error, i;
3603 
3604 	ASSERT(head_blk != tail_blk);
3605 
3606 	/*
3607 	 * First do a pass to find all of the cancelled buf log items.
3608 	 * Store them in the buf_cancel_table for use in the second pass.
3609 	 */
3610 	log->l_buf_cancel_table = kmem_zalloc(XLOG_BC_TABLE_SIZE *
3611 						 sizeof(struct list_head),
3612 						 KM_SLEEP);
3613 	for (i = 0; i < XLOG_BC_TABLE_SIZE; i++)
3614 		INIT_LIST_HEAD(&log->l_buf_cancel_table[i]);
3615 
3616 	error = xlog_do_recovery_pass(log, head_blk, tail_blk,
3617 				      XLOG_RECOVER_PASS1);
3618 	if (error != 0) {
3619 		kmem_free(log->l_buf_cancel_table);
3620 		log->l_buf_cancel_table = NULL;
3621 		return error;
3622 	}
3623 	/*
3624 	 * Then do a second pass to actually recover the items in the log.
3625 	 * When it is complete free the table of buf cancel items.
3626 	 */
3627 	error = xlog_do_recovery_pass(log, head_blk, tail_blk,
3628 				      XLOG_RECOVER_PASS2);
3629 #ifdef DEBUG
3630 	if (!error) {
3631 		int	i;
3632 
3633 		for (i = 0; i < XLOG_BC_TABLE_SIZE; i++)
3634 			ASSERT(list_empty(&log->l_buf_cancel_table[i]));
3635 	}
3636 #endif	/* DEBUG */
3637 
3638 	kmem_free(log->l_buf_cancel_table);
3639 	log->l_buf_cancel_table = NULL;
3640 
3641 	return error;
3642 }
3643 
3644 /*
3645  * Do the actual recovery
3646  */
3647 STATIC int
3648 xlog_do_recover(
3649 	xlog_t		*log,
3650 	xfs_daddr_t	head_blk,
3651 	xfs_daddr_t	tail_blk)
3652 {
3653 	int		error;
3654 	xfs_buf_t	*bp;
3655 	xfs_sb_t	*sbp;
3656 
3657 	/*
3658 	 * First replay the images in the log.
3659 	 */
3660 	error = xlog_do_log_recovery(log, head_blk, tail_blk);
3661 	if (error)
3662 		return error;
3663 
3664 	/*
3665 	 * If IO errors happened during recovery, bail out.
3666 	 */
3667 	if (XFS_FORCED_SHUTDOWN(log->l_mp)) {
3668 		return (EIO);
3669 	}
3670 
3671 	/*
3672 	 * We now update the tail_lsn since much of the recovery has completed
3673 	 * and there may be space available to use.  If there were no extent
3674 	 * or iunlinks, we can free up the entire log and set the tail_lsn to
3675 	 * be the last_sync_lsn.  This was set in xlog_find_tail to be the
3676 	 * lsn of the last known good LR on disk.  If there are extent frees
3677 	 * or iunlinks they will have some entries in the AIL; so we look at
3678 	 * the AIL to determine how to set the tail_lsn.
3679 	 */
3680 	xlog_assign_tail_lsn(log->l_mp);
3681 
3682 	/*
3683 	 * Now that we've finished replaying all buffer and inode
3684 	 * updates, re-read in the superblock.
3685 	 */
3686 	bp = xfs_getsb(log->l_mp, 0);
3687 	XFS_BUF_UNDONE(bp);
3688 	ASSERT(!(XFS_BUF_ISWRITE(bp)));
3689 	XFS_BUF_READ(bp);
3690 	XFS_BUF_UNASYNC(bp);
3691 	xfsbdstrat(log->l_mp, bp);
3692 	error = xfs_buf_iowait(bp);
3693 	if (error) {
3694 		xfs_buf_ioerror_alert(bp, __func__);
3695 		ASSERT(0);
3696 		xfs_buf_relse(bp);
3697 		return error;
3698 	}
3699 
3700 	/* Convert superblock from on-disk format */
3701 	sbp = &log->l_mp->m_sb;
3702 	xfs_sb_from_disk(log->l_mp, XFS_BUF_TO_SBP(bp));
3703 	ASSERT(sbp->sb_magicnum == XFS_SB_MAGIC);
3704 	ASSERT(xfs_sb_good_version(sbp));
3705 	xfs_buf_relse(bp);
3706 
3707 	/* We've re-read the superblock so re-initialize per-cpu counters */
3708 	xfs_icsb_reinit_counters(log->l_mp);
3709 
3710 	xlog_recover_check_summary(log);
3711 
3712 	/* Normal transactions can now occur */
3713 	log->l_flags &= ~XLOG_ACTIVE_RECOVERY;
3714 	return 0;
3715 }
3716 
3717 /*
3718  * Perform recovery and re-initialize some log variables in xlog_find_tail.
3719  *
3720  * Return error or zero.
3721  */
3722 int
3723 xlog_recover(
3724 	xlog_t		*log)
3725 {
3726 	xfs_daddr_t	head_blk, tail_blk;
3727 	int		error;
3728 
3729 	/* find the tail of the log */
3730 	if ((error = xlog_find_tail(log, &head_blk, &tail_blk)))
3731 		return error;
3732 
3733 	if (tail_blk != head_blk) {
3734 		/* There used to be a comment here:
3735 		 *
3736 		 * disallow recovery on read-only mounts.  note -- mount
3737 		 * checks for ENOSPC and turns it into an intelligent
3738 		 * error message.
3739 		 * ...but this is no longer true.  Now, unless you specify
3740 		 * NORECOVERY (in which case this function would never be
3741 		 * called), we just go ahead and recover.  We do this all
3742 		 * under the vfs layer, so we can get away with it unless
3743 		 * the device itself is read-only, in which case we fail.
3744 		 */
3745 		if ((error = xfs_dev_is_read_only(log->l_mp, "recovery"))) {
3746 			return error;
3747 		}
3748 
3749 		xfs_notice(log->l_mp, "Starting recovery (logdev: %s)",
3750 				log->l_mp->m_logname ? log->l_mp->m_logname
3751 						     : "internal");
3752 
3753 		error = xlog_do_recover(log, head_blk, tail_blk);
3754 		log->l_flags |= XLOG_RECOVERY_NEEDED;
3755 	}
3756 	return error;
3757 }
3758 
3759 /*
3760  * In the first part of recovery we replay inodes and buffers and build
3761  * up the list of extent free items which need to be processed.  Here
3762  * we process the extent free items and clean up the on disk unlinked
3763  * inode lists.  This is separated from the first part of recovery so
3764  * that the root and real-time bitmap inodes can be read in from disk in
3765  * between the two stages.  This is necessary so that we can free space
3766  * in the real-time portion of the file system.
3767  */
3768 int
3769 xlog_recover_finish(
3770 	xlog_t		*log)
3771 {
3772 	/*
3773 	 * Now we're ready to do the transactions needed for the
3774 	 * rest of recovery.  Start with completing all the extent
3775 	 * free intent records and then process the unlinked inode
3776 	 * lists.  At this point, we essentially run in normal mode
3777 	 * except that we're still performing recovery actions
3778 	 * rather than accepting new requests.
3779 	 */
3780 	if (log->l_flags & XLOG_RECOVERY_NEEDED) {
3781 		int	error;
3782 		error = xlog_recover_process_efis(log);
3783 		if (error) {
3784 			xfs_alert(log->l_mp, "Failed to recover EFIs");
3785 			return error;
3786 		}
3787 		/*
3788 		 * Sync the log to get all the EFIs out of the AIL.
3789 		 * This isn't absolutely necessary, but it helps in
3790 		 * case the unlink transactions would have problems
3791 		 * pushing the EFIs out of the way.
3792 		 */
3793 		xfs_log_force(log->l_mp, XFS_LOG_SYNC);
3794 
3795 		xlog_recover_process_iunlinks(log);
3796 
3797 		xlog_recover_check_summary(log);
3798 
3799 		xfs_notice(log->l_mp, "Ending recovery (logdev: %s)",
3800 				log->l_mp->m_logname ? log->l_mp->m_logname
3801 						     : "internal");
3802 		log->l_flags &= ~XLOG_RECOVERY_NEEDED;
3803 	} else {
3804 		xfs_info(log->l_mp, "Ending clean mount");
3805 	}
3806 	return 0;
3807 }
3808 
3809 
3810 #if defined(DEBUG)
3811 /*
3812  * Read all of the agf and agi counters and check that they
3813  * are consistent with the superblock counters.
3814  */
3815 void
3816 xlog_recover_check_summary(
3817 	xlog_t		*log)
3818 {
3819 	xfs_mount_t	*mp;
3820 	xfs_agf_t	*agfp;
3821 	xfs_buf_t	*agfbp;
3822 	xfs_buf_t	*agibp;
3823 	xfs_agnumber_t	agno;
3824 	__uint64_t	freeblks;
3825 	__uint64_t	itotal;
3826 	__uint64_t	ifree;
3827 	int		error;
3828 
3829 	mp = log->l_mp;
3830 
3831 	freeblks = 0LL;
3832 	itotal = 0LL;
3833 	ifree = 0LL;
3834 	for (agno = 0; agno < mp->m_sb.sb_agcount; agno++) {
3835 		error = xfs_read_agf(mp, NULL, agno, 0, &agfbp);
3836 		if (error) {
3837 			xfs_alert(mp, "%s agf read failed agno %d error %d",
3838 						__func__, agno, error);
3839 		} else {
3840 			agfp = XFS_BUF_TO_AGF(agfbp);
3841 			freeblks += be32_to_cpu(agfp->agf_freeblks) +
3842 				    be32_to_cpu(agfp->agf_flcount);
3843 			xfs_buf_relse(agfbp);
3844 		}
3845 
3846 		error = xfs_read_agi(mp, NULL, agno, &agibp);
3847 		if (error) {
3848 			xfs_alert(mp, "%s agi read failed agno %d error %d",
3849 						__func__, agno, error);
3850 		} else {
3851 			struct xfs_agi	*agi = XFS_BUF_TO_AGI(agibp);
3852 
3853 			itotal += be32_to_cpu(agi->agi_count);
3854 			ifree += be32_to_cpu(agi->agi_freecount);
3855 			xfs_buf_relse(agibp);
3856 		}
3857 	}
3858 }
3859 #endif /* DEBUG */
3860