xref: /openbmc/linux/fs/xfs/xfs_rmap_item.c (revision f3c799c2)
10b61f8a4SDave Chinner // SPDX-License-Identifier: GPL-2.0+
25880f2d7SDarrick J. Wong /*
35880f2d7SDarrick J. Wong  * Copyright (C) 2016 Oracle.  All Rights Reserved.
45880f2d7SDarrick J. Wong  * Author: Darrick J. Wong <darrick.wong@oracle.com>
55880f2d7SDarrick J. Wong  */
65880f2d7SDarrick J. Wong #include "xfs.h"
75880f2d7SDarrick J. Wong #include "xfs_fs.h"
85880f2d7SDarrick J. Wong #include "xfs_format.h"
95880f2d7SDarrick J. Wong #include "xfs_log_format.h"
105880f2d7SDarrick J. Wong #include "xfs_trans_resv.h"
119e88b5d8SDarrick J. Wong #include "xfs_bit.h"
12b31c2bdcSDarrick J. Wong #include "xfs_shared.h"
135880f2d7SDarrick J. Wong #include "xfs_mount.h"
149c194644SDarrick J. Wong #include "xfs_defer.h"
155880f2d7SDarrick J. Wong #include "xfs_trans.h"
165880f2d7SDarrick J. Wong #include "xfs_trans_priv.h"
175880f2d7SDarrick J. Wong #include "xfs_rmap_item.h"
185880f2d7SDarrick J. Wong #include "xfs_log.h"
199c194644SDarrick J. Wong #include "xfs_rmap.h"
20a5155b87SDarrick J. Wong #include "xfs_error.h"
2107590a9dSDarrick J. Wong #include "xfs_log_priv.h"
2286ffa471SDarrick J. Wong #include "xfs_log_recover.h"
235880f2d7SDarrick J. Wong 
24182696fbSDarrick J. Wong struct kmem_cache	*xfs_rui_cache;
25182696fbSDarrick J. Wong struct kmem_cache	*xfs_rud_cache;
265880f2d7SDarrick J. Wong 
27cba0ccacSDarrick J. Wong static const struct xfs_item_ops xfs_rui_item_ops;
28cba0ccacSDarrick J. Wong 
295880f2d7SDarrick J. Wong static inline struct xfs_rui_log_item *RUI_ITEM(struct xfs_log_item *lip)
305880f2d7SDarrick J. Wong {
315880f2d7SDarrick J. Wong 	return container_of(lip, struct xfs_rui_log_item, rui_item);
325880f2d7SDarrick J. Wong }
335880f2d7SDarrick J. Wong 
3407590a9dSDarrick J. Wong STATIC void
355880f2d7SDarrick J. Wong xfs_rui_item_free(
365880f2d7SDarrick J. Wong 	struct xfs_rui_log_item	*ruip)
375880f2d7SDarrick J. Wong {
385880f2d7SDarrick J. Wong 	if (ruip->rui_format.rui_nextents > XFS_RUI_MAX_FAST_EXTENTS)
395880f2d7SDarrick J. Wong 		kmem_free(ruip);
405880f2d7SDarrick J. Wong 	else
41182696fbSDarrick J. Wong 		kmem_cache_free(xfs_rui_cache, ruip);
425880f2d7SDarrick J. Wong }
435880f2d7SDarrick J. Wong 
440612d116SDave Chinner /*
450612d116SDave Chinner  * Freeing the RUI requires that we remove it from the AIL if it has already
460612d116SDave Chinner  * been placed there. However, the RUI may not yet have been placed in the AIL
470612d116SDave Chinner  * when called by xfs_rui_release() from RUD processing due to the ordering of
480612d116SDave Chinner  * committed vs unpin operations in bulk insert operations. Hence the reference
490612d116SDave Chinner  * count to ensure only the last caller frees the RUI.
500612d116SDave Chinner  */
51cba0ccacSDarrick J. Wong STATIC void
520612d116SDave Chinner xfs_rui_release(
530612d116SDave Chinner 	struct xfs_rui_log_item	*ruip)
540612d116SDave Chinner {
550612d116SDave Chinner 	ASSERT(atomic_read(&ruip->rui_refcount) > 0);
560612d116SDave Chinner 	if (atomic_dec_and_test(&ruip->rui_refcount)) {
5765587929SBrian Foster 		xfs_trans_ail_delete(&ruip->rui_item, SHUTDOWN_LOG_IO_ERROR);
580612d116SDave Chinner 		xfs_rui_item_free(ruip);
590612d116SDave Chinner 	}
600612d116SDave Chinner }
610612d116SDave Chinner 
625880f2d7SDarrick J. Wong STATIC void
635880f2d7SDarrick J. Wong xfs_rui_item_size(
645880f2d7SDarrick J. Wong 	struct xfs_log_item	*lip,
655880f2d7SDarrick J. Wong 	int			*nvecs,
665880f2d7SDarrick J. Wong 	int			*nbytes)
675880f2d7SDarrick J. Wong {
68cd00158cSDarrick J. Wong 	struct xfs_rui_log_item	*ruip = RUI_ITEM(lip);
69cd00158cSDarrick J. Wong 
705880f2d7SDarrick J. Wong 	*nvecs += 1;
71cd00158cSDarrick J. Wong 	*nbytes += xfs_rui_log_format_sizeof(ruip->rui_format.rui_nextents);
725880f2d7SDarrick J. Wong }
735880f2d7SDarrick J. Wong 
745880f2d7SDarrick J. Wong /*
755880f2d7SDarrick J. Wong  * This is called to fill in the vector of log iovecs for the
765880f2d7SDarrick J. Wong  * given rui log item. We use only 1 iovec, and we point that
775880f2d7SDarrick J. Wong  * at the rui_log_format structure embedded in the rui item.
785880f2d7SDarrick J. Wong  * It is at this point that we assert that all of the extent
795880f2d7SDarrick J. Wong  * slots in the rui item have been filled.
805880f2d7SDarrick J. Wong  */
815880f2d7SDarrick J. Wong STATIC void
825880f2d7SDarrick J. Wong xfs_rui_item_format(
835880f2d7SDarrick J. Wong 	struct xfs_log_item	*lip,
845880f2d7SDarrick J. Wong 	struct xfs_log_vec	*lv)
855880f2d7SDarrick J. Wong {
865880f2d7SDarrick J. Wong 	struct xfs_rui_log_item	*ruip = RUI_ITEM(lip);
875880f2d7SDarrick J. Wong 	struct xfs_log_iovec	*vecp = NULL;
885880f2d7SDarrick J. Wong 
895880f2d7SDarrick J. Wong 	ASSERT(atomic_read(&ruip->rui_next_extent) ==
905880f2d7SDarrick J. Wong 			ruip->rui_format.rui_nextents);
915880f2d7SDarrick J. Wong 
925880f2d7SDarrick J. Wong 	ruip->rui_format.rui_type = XFS_LI_RUI;
935880f2d7SDarrick J. Wong 	ruip->rui_format.rui_size = 1;
945880f2d7SDarrick J. Wong 
955880f2d7SDarrick J. Wong 	xlog_copy_iovec(lv, &vecp, XLOG_REG_TYPE_RUI_FORMAT, &ruip->rui_format,
96cd00158cSDarrick J. Wong 			xfs_rui_log_format_sizeof(ruip->rui_format.rui_nextents));
975880f2d7SDarrick J. Wong }
985880f2d7SDarrick J. Wong 
995880f2d7SDarrick J. Wong /*
1005880f2d7SDarrick J. Wong  * The unpin operation is the last place an RUI is manipulated in the log. It is
1015880f2d7SDarrick J. Wong  * either inserted in the AIL or aborted in the event of a log I/O error. In
1025880f2d7SDarrick J. Wong  * either case, the RUI transaction has been successfully committed to make it
1035880f2d7SDarrick J. Wong  * this far. Therefore, we expect whoever committed the RUI to either construct
1045880f2d7SDarrick J. Wong  * and commit the RUD or drop the RUD's reference in the event of error. Simply
1055880f2d7SDarrick J. Wong  * drop the log's RUI reference now that the log is done with it.
1065880f2d7SDarrick J. Wong  */
1075880f2d7SDarrick J. Wong STATIC void
1085880f2d7SDarrick J. Wong xfs_rui_item_unpin(
1095880f2d7SDarrick J. Wong 	struct xfs_log_item	*lip,
1105880f2d7SDarrick J. Wong 	int			remove)
1115880f2d7SDarrick J. Wong {
1125880f2d7SDarrick J. Wong 	struct xfs_rui_log_item	*ruip = RUI_ITEM(lip);
1135880f2d7SDarrick J. Wong 
1145880f2d7SDarrick J. Wong 	xfs_rui_release(ruip);
1155880f2d7SDarrick J. Wong }
1165880f2d7SDarrick J. Wong 
1175880f2d7SDarrick J. Wong /*
1185880f2d7SDarrick J. Wong  * The RUI has been either committed or aborted if the transaction has been
1195880f2d7SDarrick J. Wong  * cancelled. If the transaction was cancelled, an RUD isn't going to be
1205880f2d7SDarrick J. Wong  * constructed and thus we free the RUI here directly.
1215880f2d7SDarrick J. Wong  */
1225880f2d7SDarrick J. Wong STATIC void
123ddf92053SChristoph Hellwig xfs_rui_item_release(
1245880f2d7SDarrick J. Wong 	struct xfs_log_item	*lip)
1255880f2d7SDarrick J. Wong {
1260612d116SDave Chinner 	xfs_rui_release(RUI_ITEM(lip));
1275880f2d7SDarrick J. Wong }
1285880f2d7SDarrick J. Wong 
1295880f2d7SDarrick J. Wong /*
1305880f2d7SDarrick J. Wong  * Allocate and initialize an rui item with the given number of extents.
1315880f2d7SDarrick J. Wong  */
13207590a9dSDarrick J. Wong STATIC struct xfs_rui_log_item *
1335880f2d7SDarrick J. Wong xfs_rui_init(
1345880f2d7SDarrick J. Wong 	struct xfs_mount		*mp,
1355880f2d7SDarrick J. Wong 	uint				nextents)
1365880f2d7SDarrick J. Wong 
1375880f2d7SDarrick J. Wong {
1385880f2d7SDarrick J. Wong 	struct xfs_rui_log_item		*ruip;
1395880f2d7SDarrick J. Wong 
1405880f2d7SDarrick J. Wong 	ASSERT(nextents > 0);
141cd00158cSDarrick J. Wong 	if (nextents > XFS_RUI_MAX_FAST_EXTENTS)
142707e0ddaSTetsuo Handa 		ruip = kmem_zalloc(xfs_rui_log_item_sizeof(nextents), 0);
143cd00158cSDarrick J. Wong 	else
144182696fbSDarrick J. Wong 		ruip = kmem_cache_zalloc(xfs_rui_cache,
14532a2b11fSCarlos Maiolino 					 GFP_KERNEL | __GFP_NOFAIL);
1465880f2d7SDarrick J. Wong 
1475880f2d7SDarrick J. Wong 	xfs_log_item_init(mp, &ruip->rui_item, XFS_LI_RUI, &xfs_rui_item_ops);
1485880f2d7SDarrick J. Wong 	ruip->rui_format.rui_nextents = nextents;
1495880f2d7SDarrick J. Wong 	ruip->rui_format.rui_id = (uintptr_t)(void *)ruip;
1505880f2d7SDarrick J. Wong 	atomic_set(&ruip->rui_next_extent, 0);
1515880f2d7SDarrick J. Wong 	atomic_set(&ruip->rui_refcount, 2);
1525880f2d7SDarrick J. Wong 
1535880f2d7SDarrick J. Wong 	return ruip;
1545880f2d7SDarrick J. Wong }
1555880f2d7SDarrick J. Wong 
1565880f2d7SDarrick J. Wong /*
1575880f2d7SDarrick J. Wong  * Copy an RUI format buffer from the given buf, and into the destination
1585880f2d7SDarrick J. Wong  * RUI format structure.  The RUI/RUD items were designed not to need any
1595880f2d7SDarrick J. Wong  * special alignment handling.
1605880f2d7SDarrick J. Wong  */
16107590a9dSDarrick J. Wong STATIC int
1625880f2d7SDarrick J. Wong xfs_rui_copy_format(
1635880f2d7SDarrick J. Wong 	struct xfs_log_iovec		*buf,
1645880f2d7SDarrick J. Wong 	struct xfs_rui_log_format	*dst_rui_fmt)
1655880f2d7SDarrick J. Wong {
1665880f2d7SDarrick J. Wong 	struct xfs_rui_log_format	*src_rui_fmt;
1675880f2d7SDarrick J. Wong 	uint				len;
1685880f2d7SDarrick J. Wong 
1695880f2d7SDarrick J. Wong 	src_rui_fmt = buf->i_addr;
170cd00158cSDarrick J. Wong 	len = xfs_rui_log_format_sizeof(src_rui_fmt->rui_nextents);
1715880f2d7SDarrick J. Wong 
172a5155b87SDarrick J. Wong 	if (buf->i_len != len) {
173a5155b87SDarrick J. Wong 		XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, NULL);
1745880f2d7SDarrick J. Wong 		return -EFSCORRUPTED;
175a5155b87SDarrick J. Wong 	}
1765880f2d7SDarrick J. Wong 
177cd00158cSDarrick J. Wong 	memcpy(dst_rui_fmt, src_rui_fmt, len);
1785880f2d7SDarrick J. Wong 	return 0;
1795880f2d7SDarrick J. Wong }
1805880f2d7SDarrick J. Wong 
1815880f2d7SDarrick J. Wong static inline struct xfs_rud_log_item *RUD_ITEM(struct xfs_log_item *lip)
1825880f2d7SDarrick J. Wong {
1835880f2d7SDarrick J. Wong 	return container_of(lip, struct xfs_rud_log_item, rud_item);
1845880f2d7SDarrick J. Wong }
1855880f2d7SDarrick J. Wong 
1865880f2d7SDarrick J. Wong STATIC void
1875880f2d7SDarrick J. Wong xfs_rud_item_size(
1885880f2d7SDarrick J. Wong 	struct xfs_log_item	*lip,
1895880f2d7SDarrick J. Wong 	int			*nvecs,
1905880f2d7SDarrick J. Wong 	int			*nbytes)
1915880f2d7SDarrick J. Wong {
1925880f2d7SDarrick J. Wong 	*nvecs += 1;
193722e2517SDarrick J. Wong 	*nbytes += sizeof(struct xfs_rud_log_format);
1945880f2d7SDarrick J. Wong }
1955880f2d7SDarrick J. Wong 
1965880f2d7SDarrick J. Wong /*
1975880f2d7SDarrick J. Wong  * This is called to fill in the vector of log iovecs for the
1985880f2d7SDarrick J. Wong  * given rud log item. We use only 1 iovec, and we point that
1995880f2d7SDarrick J. Wong  * at the rud_log_format structure embedded in the rud item.
2005880f2d7SDarrick J. Wong  * It is at this point that we assert that all of the extent
2015880f2d7SDarrick J. Wong  * slots in the rud item have been filled.
2025880f2d7SDarrick J. Wong  */
2035880f2d7SDarrick J. Wong STATIC void
2045880f2d7SDarrick J. Wong xfs_rud_item_format(
2055880f2d7SDarrick J. Wong 	struct xfs_log_item	*lip,
2065880f2d7SDarrick J. Wong 	struct xfs_log_vec	*lv)
2075880f2d7SDarrick J. Wong {
2085880f2d7SDarrick J. Wong 	struct xfs_rud_log_item	*rudp = RUD_ITEM(lip);
2095880f2d7SDarrick J. Wong 	struct xfs_log_iovec	*vecp = NULL;
2105880f2d7SDarrick J. Wong 
2115880f2d7SDarrick J. Wong 	rudp->rud_format.rud_type = XFS_LI_RUD;
2125880f2d7SDarrick J. Wong 	rudp->rud_format.rud_size = 1;
2135880f2d7SDarrick J. Wong 
2145880f2d7SDarrick J. Wong 	xlog_copy_iovec(lv, &vecp, XLOG_REG_TYPE_RUD_FORMAT, &rudp->rud_format,
215722e2517SDarrick J. Wong 			sizeof(struct xfs_rud_log_format));
2165880f2d7SDarrick J. Wong }
2175880f2d7SDarrick J. Wong 
2185880f2d7SDarrick J. Wong /*
2195880f2d7SDarrick J. Wong  * The RUD is either committed or aborted if the transaction is cancelled. If
2205880f2d7SDarrick J. Wong  * the transaction is cancelled, drop our reference to the RUI and free the
2215880f2d7SDarrick J. Wong  * RUD.
2225880f2d7SDarrick J. Wong  */
2235880f2d7SDarrick J. Wong STATIC void
224ddf92053SChristoph Hellwig xfs_rud_item_release(
2255880f2d7SDarrick J. Wong 	struct xfs_log_item	*lip)
2265880f2d7SDarrick J. Wong {
2275880f2d7SDarrick J. Wong 	struct xfs_rud_log_item	*rudp = RUD_ITEM(lip);
2285880f2d7SDarrick J. Wong 
2295880f2d7SDarrick J. Wong 	xfs_rui_release(rudp->rud_ruip);
230182696fbSDarrick J. Wong 	kmem_cache_free(xfs_rud_cache, rudp);
2315880f2d7SDarrick J. Wong }
2325880f2d7SDarrick J. Wong 
2335880f2d7SDarrick J. Wong static const struct xfs_item_ops xfs_rud_item_ops = {
2349ce632a2SChristoph Hellwig 	.flags		= XFS_ITEM_RELEASE_WHEN_COMMITTED,
2355880f2d7SDarrick J. Wong 	.iop_size	= xfs_rud_item_size,
2365880f2d7SDarrick J. Wong 	.iop_format	= xfs_rud_item_format,
237ddf92053SChristoph Hellwig 	.iop_release	= xfs_rud_item_release,
2385880f2d7SDarrick J. Wong };
2395880f2d7SDarrick J. Wong 
2403cfce1e3SChristoph Hellwig static struct xfs_rud_log_item *
24160883447SChristoph Hellwig xfs_trans_get_rud(
24260883447SChristoph Hellwig 	struct xfs_trans		*tp,
243722e2517SDarrick J. Wong 	struct xfs_rui_log_item		*ruip)
2445880f2d7SDarrick J. Wong {
2455880f2d7SDarrick J. Wong 	struct xfs_rud_log_item		*rudp;
2465880f2d7SDarrick J. Wong 
247182696fbSDarrick J. Wong 	rudp = kmem_cache_zalloc(xfs_rud_cache, GFP_KERNEL | __GFP_NOFAIL);
24860883447SChristoph Hellwig 	xfs_log_item_init(tp->t_mountp, &rudp->rud_item, XFS_LI_RUD,
24960883447SChristoph Hellwig 			  &xfs_rud_item_ops);
2505880f2d7SDarrick J. Wong 	rudp->rud_ruip = ruip;
2515880f2d7SDarrick J. Wong 	rudp->rud_format.rud_rui_id = ruip->rui_format.rui_id;
2525880f2d7SDarrick J. Wong 
25360883447SChristoph Hellwig 	xfs_trans_add_item(tp, &rudp->rud_item);
2545880f2d7SDarrick J. Wong 	return rudp;
2555880f2d7SDarrick J. Wong }
2569e88b5d8SDarrick J. Wong 
2573cfce1e3SChristoph Hellwig /* Set the map extent flags for this reverse mapping. */
2583cfce1e3SChristoph Hellwig static void
2593cfce1e3SChristoph Hellwig xfs_trans_set_rmap_flags(
2603cfce1e3SChristoph Hellwig 	struct xfs_map_extent		*rmap,
2613cfce1e3SChristoph Hellwig 	enum xfs_rmap_intent_type	type,
2623cfce1e3SChristoph Hellwig 	int				whichfork,
2633cfce1e3SChristoph Hellwig 	xfs_exntst_t			state)
2643cfce1e3SChristoph Hellwig {
2653cfce1e3SChristoph Hellwig 	rmap->me_flags = 0;
2663cfce1e3SChristoph Hellwig 	if (state == XFS_EXT_UNWRITTEN)
2673cfce1e3SChristoph Hellwig 		rmap->me_flags |= XFS_RMAP_EXTENT_UNWRITTEN;
2683cfce1e3SChristoph Hellwig 	if (whichfork == XFS_ATTR_FORK)
2693cfce1e3SChristoph Hellwig 		rmap->me_flags |= XFS_RMAP_EXTENT_ATTR_FORK;
2703cfce1e3SChristoph Hellwig 	switch (type) {
2713cfce1e3SChristoph Hellwig 	case XFS_RMAP_MAP:
2723cfce1e3SChristoph Hellwig 		rmap->me_flags |= XFS_RMAP_EXTENT_MAP;
2733cfce1e3SChristoph Hellwig 		break;
2743cfce1e3SChristoph Hellwig 	case XFS_RMAP_MAP_SHARED:
2753cfce1e3SChristoph Hellwig 		rmap->me_flags |= XFS_RMAP_EXTENT_MAP_SHARED;
2763cfce1e3SChristoph Hellwig 		break;
2773cfce1e3SChristoph Hellwig 	case XFS_RMAP_UNMAP:
2783cfce1e3SChristoph Hellwig 		rmap->me_flags |= XFS_RMAP_EXTENT_UNMAP;
2793cfce1e3SChristoph Hellwig 		break;
2803cfce1e3SChristoph Hellwig 	case XFS_RMAP_UNMAP_SHARED:
2813cfce1e3SChristoph Hellwig 		rmap->me_flags |= XFS_RMAP_EXTENT_UNMAP_SHARED;
2823cfce1e3SChristoph Hellwig 		break;
2833cfce1e3SChristoph Hellwig 	case XFS_RMAP_CONVERT:
2843cfce1e3SChristoph Hellwig 		rmap->me_flags |= XFS_RMAP_EXTENT_CONVERT;
2853cfce1e3SChristoph Hellwig 		break;
2863cfce1e3SChristoph Hellwig 	case XFS_RMAP_CONVERT_SHARED:
2873cfce1e3SChristoph Hellwig 		rmap->me_flags |= XFS_RMAP_EXTENT_CONVERT_SHARED;
2883cfce1e3SChristoph Hellwig 		break;
2893cfce1e3SChristoph Hellwig 	case XFS_RMAP_ALLOC:
2903cfce1e3SChristoph Hellwig 		rmap->me_flags |= XFS_RMAP_EXTENT_ALLOC;
2913cfce1e3SChristoph Hellwig 		break;
2923cfce1e3SChristoph Hellwig 	case XFS_RMAP_FREE:
2933cfce1e3SChristoph Hellwig 		rmap->me_flags |= XFS_RMAP_EXTENT_FREE;
2943cfce1e3SChristoph Hellwig 		break;
2953cfce1e3SChristoph Hellwig 	default:
2963cfce1e3SChristoph Hellwig 		ASSERT(0);
2973cfce1e3SChristoph Hellwig 	}
2983cfce1e3SChristoph Hellwig }
2993cfce1e3SChristoph Hellwig 
3003cfce1e3SChristoph Hellwig /*
3013cfce1e3SChristoph Hellwig  * Finish an rmap update and log it to the RUD. Note that the transaction is
3023cfce1e3SChristoph Hellwig  * marked dirty regardless of whether the rmap update succeeds or fails to
3033cfce1e3SChristoph Hellwig  * support the RUI/RUD lifecycle rules.
3043cfce1e3SChristoph Hellwig  */
3053cfce1e3SChristoph Hellwig static int
3063cfce1e3SChristoph Hellwig xfs_trans_log_finish_rmap_update(
3073cfce1e3SChristoph Hellwig 	struct xfs_trans		*tp,
3083cfce1e3SChristoph Hellwig 	struct xfs_rud_log_item		*rudp,
3093cfce1e3SChristoph Hellwig 	enum xfs_rmap_intent_type	type,
3103cfce1e3SChristoph Hellwig 	uint64_t			owner,
3113cfce1e3SChristoph Hellwig 	int				whichfork,
3123cfce1e3SChristoph Hellwig 	xfs_fileoff_t			startoff,
3133cfce1e3SChristoph Hellwig 	xfs_fsblock_t			startblock,
3143cfce1e3SChristoph Hellwig 	xfs_filblks_t			blockcount,
3153cfce1e3SChristoph Hellwig 	xfs_exntst_t			state,
3163cfce1e3SChristoph Hellwig 	struct xfs_btree_cur		**pcur)
3173cfce1e3SChristoph Hellwig {
3183cfce1e3SChristoph Hellwig 	int				error;
3193cfce1e3SChristoph Hellwig 
3203cfce1e3SChristoph Hellwig 	error = xfs_rmap_finish_one(tp, type, owner, whichfork, startoff,
3213cfce1e3SChristoph Hellwig 			startblock, blockcount, state, pcur);
3223cfce1e3SChristoph Hellwig 
3233cfce1e3SChristoph Hellwig 	/*
3243cfce1e3SChristoph Hellwig 	 * Mark the transaction dirty, even on error. This ensures the
3253cfce1e3SChristoph Hellwig 	 * transaction is aborted, which:
3263cfce1e3SChristoph Hellwig 	 *
3273cfce1e3SChristoph Hellwig 	 * 1.) releases the RUI and frees the RUD
3283cfce1e3SChristoph Hellwig 	 * 2.) shuts down the filesystem
3293cfce1e3SChristoph Hellwig 	 */
3303cfce1e3SChristoph Hellwig 	tp->t_flags |= XFS_TRANS_DIRTY;
3313cfce1e3SChristoph Hellwig 	set_bit(XFS_LI_DIRTY, &rudp->rud_item.li_flags);
3323cfce1e3SChristoph Hellwig 
3333cfce1e3SChristoph Hellwig 	return error;
3343cfce1e3SChristoph Hellwig }
3353cfce1e3SChristoph Hellwig 
3363cfce1e3SChristoph Hellwig /* Sort rmap intents by AG. */
3373cfce1e3SChristoph Hellwig static int
3383cfce1e3SChristoph Hellwig xfs_rmap_update_diff_items(
3393cfce1e3SChristoph Hellwig 	void				*priv,
3404f0f586bSSami Tolvanen 	const struct list_head		*a,
3414f0f586bSSami Tolvanen 	const struct list_head		*b)
3423cfce1e3SChristoph Hellwig {
3433cfce1e3SChristoph Hellwig 	struct xfs_mount		*mp = priv;
3443cfce1e3SChristoph Hellwig 	struct xfs_rmap_intent		*ra;
3453cfce1e3SChristoph Hellwig 	struct xfs_rmap_intent		*rb;
3463cfce1e3SChristoph Hellwig 
3473cfce1e3SChristoph Hellwig 	ra = container_of(a, struct xfs_rmap_intent, ri_list);
3483cfce1e3SChristoph Hellwig 	rb = container_of(b, struct xfs_rmap_intent, ri_list);
3493cfce1e3SChristoph Hellwig 	return  XFS_FSB_TO_AGNO(mp, ra->ri_bmap.br_startblock) -
3503cfce1e3SChristoph Hellwig 		XFS_FSB_TO_AGNO(mp, rb->ri_bmap.br_startblock);
3513cfce1e3SChristoph Hellwig }
3523cfce1e3SChristoph Hellwig 
3533cfce1e3SChristoph Hellwig /* Log rmap updates in the intent item. */
3543cfce1e3SChristoph Hellwig STATIC void
3553cfce1e3SChristoph Hellwig xfs_rmap_update_log_item(
3563cfce1e3SChristoph Hellwig 	struct xfs_trans		*tp,
357c1f09188SChristoph Hellwig 	struct xfs_rui_log_item		*ruip,
358c1f09188SChristoph Hellwig 	struct xfs_rmap_intent		*rmap)
3593cfce1e3SChristoph Hellwig {
3603cfce1e3SChristoph Hellwig 	uint				next_extent;
3613cfce1e3SChristoph Hellwig 	struct xfs_map_extent		*map;
3623cfce1e3SChristoph Hellwig 
3633cfce1e3SChristoph Hellwig 	tp->t_flags |= XFS_TRANS_DIRTY;
3643cfce1e3SChristoph Hellwig 	set_bit(XFS_LI_DIRTY, &ruip->rui_item.li_flags);
3653cfce1e3SChristoph Hellwig 
3663cfce1e3SChristoph Hellwig 	/*
3673cfce1e3SChristoph Hellwig 	 * atomic_inc_return gives us the value after the increment;
3683cfce1e3SChristoph Hellwig 	 * we want to use it as an array index so we need to subtract 1 from
3693cfce1e3SChristoph Hellwig 	 * it.
3703cfce1e3SChristoph Hellwig 	 */
3713cfce1e3SChristoph Hellwig 	next_extent = atomic_inc_return(&ruip->rui_next_extent) - 1;
3723cfce1e3SChristoph Hellwig 	ASSERT(next_extent < ruip->rui_format.rui_nextents);
3733cfce1e3SChristoph Hellwig 	map = &ruip->rui_format.rui_extents[next_extent];
3743cfce1e3SChristoph Hellwig 	map->me_owner = rmap->ri_owner;
3753cfce1e3SChristoph Hellwig 	map->me_startblock = rmap->ri_bmap.br_startblock;
3763cfce1e3SChristoph Hellwig 	map->me_startoff = rmap->ri_bmap.br_startoff;
3773cfce1e3SChristoph Hellwig 	map->me_len = rmap->ri_bmap.br_blockcount;
3783cfce1e3SChristoph Hellwig 	xfs_trans_set_rmap_flags(map, rmap->ri_type, rmap->ri_whichfork,
3793cfce1e3SChristoph Hellwig 			rmap->ri_bmap.br_state);
3803cfce1e3SChristoph Hellwig }
3813cfce1e3SChristoph Hellwig 
38213a83333SChristoph Hellwig static struct xfs_log_item *
383c1f09188SChristoph Hellwig xfs_rmap_update_create_intent(
384c1f09188SChristoph Hellwig 	struct xfs_trans		*tp,
385c1f09188SChristoph Hellwig 	struct list_head		*items,
386d367a868SChristoph Hellwig 	unsigned int			count,
387d367a868SChristoph Hellwig 	bool				sort)
388c1f09188SChristoph Hellwig {
389c1f09188SChristoph Hellwig 	struct xfs_mount		*mp = tp->t_mountp;
390c1f09188SChristoph Hellwig 	struct xfs_rui_log_item		*ruip = xfs_rui_init(mp, count);
391c1f09188SChristoph Hellwig 	struct xfs_rmap_intent		*rmap;
392c1f09188SChristoph Hellwig 
393c1f09188SChristoph Hellwig 	ASSERT(count > 0);
394c1f09188SChristoph Hellwig 
395c1f09188SChristoph Hellwig 	xfs_trans_add_item(tp, &ruip->rui_item);
396d367a868SChristoph Hellwig 	if (sort)
397d367a868SChristoph Hellwig 		list_sort(mp, items, xfs_rmap_update_diff_items);
398c1f09188SChristoph Hellwig 	list_for_each_entry(rmap, items, ri_list)
399c1f09188SChristoph Hellwig 		xfs_rmap_update_log_item(tp, ruip, rmap);
40013a83333SChristoph Hellwig 	return &ruip->rui_item;
401c1f09188SChristoph Hellwig }
402c1f09188SChristoph Hellwig 
4033cfce1e3SChristoph Hellwig /* Get an RUD so we can process all the deferred rmap updates. */
404f09d167cSChristoph Hellwig static struct xfs_log_item *
4053cfce1e3SChristoph Hellwig xfs_rmap_update_create_done(
4063cfce1e3SChristoph Hellwig 	struct xfs_trans		*tp,
40713a83333SChristoph Hellwig 	struct xfs_log_item		*intent,
4083cfce1e3SChristoph Hellwig 	unsigned int			count)
4093cfce1e3SChristoph Hellwig {
410f09d167cSChristoph Hellwig 	return &xfs_trans_get_rud(tp, RUI_ITEM(intent))->rud_item;
4113cfce1e3SChristoph Hellwig }
4123cfce1e3SChristoph Hellwig 
4133cfce1e3SChristoph Hellwig /* Process a deferred rmap update. */
4143cfce1e3SChristoph Hellwig STATIC int
4153cfce1e3SChristoph Hellwig xfs_rmap_update_finish_item(
4163cfce1e3SChristoph Hellwig 	struct xfs_trans		*tp,
417f09d167cSChristoph Hellwig 	struct xfs_log_item		*done,
4183cfce1e3SChristoph Hellwig 	struct list_head		*item,
4193ec1b26cSChristoph Hellwig 	struct xfs_btree_cur		**state)
4203cfce1e3SChristoph Hellwig {
4213cfce1e3SChristoph Hellwig 	struct xfs_rmap_intent		*rmap;
4223cfce1e3SChristoph Hellwig 	int				error;
4233cfce1e3SChristoph Hellwig 
4243cfce1e3SChristoph Hellwig 	rmap = container_of(item, struct xfs_rmap_intent, ri_list);
425f09d167cSChristoph Hellwig 	error = xfs_trans_log_finish_rmap_update(tp, RUD_ITEM(done),
4263ec1b26cSChristoph Hellwig 			rmap->ri_type, rmap->ri_owner, rmap->ri_whichfork,
4273ec1b26cSChristoph Hellwig 			rmap->ri_bmap.br_startoff, rmap->ri_bmap.br_startblock,
4283ec1b26cSChristoph Hellwig 			rmap->ri_bmap.br_blockcount, rmap->ri_bmap.br_state,
4293ec1b26cSChristoph Hellwig 			state);
430*f3c799c2SDarrick J. Wong 	kmem_cache_free(xfs_rmap_intent_cache, rmap);
4313cfce1e3SChristoph Hellwig 	return error;
4323cfce1e3SChristoph Hellwig }
4333cfce1e3SChristoph Hellwig 
4343cfce1e3SChristoph Hellwig /* Abort all pending RUIs. */
4353cfce1e3SChristoph Hellwig STATIC void
4363cfce1e3SChristoph Hellwig xfs_rmap_update_abort_intent(
43713a83333SChristoph Hellwig 	struct xfs_log_item	*intent)
4383cfce1e3SChristoph Hellwig {
43913a83333SChristoph Hellwig 	xfs_rui_release(RUI_ITEM(intent));
4403cfce1e3SChristoph Hellwig }
4413cfce1e3SChristoph Hellwig 
4423cfce1e3SChristoph Hellwig /* Cancel a deferred rmap update. */
4433cfce1e3SChristoph Hellwig STATIC void
4443cfce1e3SChristoph Hellwig xfs_rmap_update_cancel_item(
4453cfce1e3SChristoph Hellwig 	struct list_head		*item)
4463cfce1e3SChristoph Hellwig {
4473cfce1e3SChristoph Hellwig 	struct xfs_rmap_intent		*rmap;
4483cfce1e3SChristoph Hellwig 
4493cfce1e3SChristoph Hellwig 	rmap = container_of(item, struct xfs_rmap_intent, ri_list);
450*f3c799c2SDarrick J. Wong 	kmem_cache_free(xfs_rmap_intent_cache, rmap);
4513cfce1e3SChristoph Hellwig }
4523cfce1e3SChristoph Hellwig 
4533cfce1e3SChristoph Hellwig const struct xfs_defer_op_type xfs_rmap_update_defer_type = {
4543cfce1e3SChristoph Hellwig 	.max_items	= XFS_RUI_MAX_FAST_EXTENTS,
4553cfce1e3SChristoph Hellwig 	.create_intent	= xfs_rmap_update_create_intent,
4563cfce1e3SChristoph Hellwig 	.abort_intent	= xfs_rmap_update_abort_intent,
4573cfce1e3SChristoph Hellwig 	.create_done	= xfs_rmap_update_create_done,
4583cfce1e3SChristoph Hellwig 	.finish_item	= xfs_rmap_update_finish_item,
4593ec1b26cSChristoph Hellwig 	.finish_cleanup = xfs_rmap_finish_one_cleanup,
4603cfce1e3SChristoph Hellwig 	.cancel_item	= xfs_rmap_update_cancel_item,
4613cfce1e3SChristoph Hellwig };
4623cfce1e3SChristoph Hellwig 
463dda7ba65SDarrick J. Wong /* Is this recovered RUI ok? */
464dda7ba65SDarrick J. Wong static inline bool
465dda7ba65SDarrick J. Wong xfs_rui_validate_map(
466dda7ba65SDarrick J. Wong 	struct xfs_mount		*mp,
467dda7ba65SDarrick J. Wong 	struct xfs_map_extent		*rmap)
4689e88b5d8SDarrick J. Wong {
46938c26bfdSDave Chinner 	if (!xfs_has_rmapbt(mp))
470da5de110SDarrick J. Wong 		return false;
471da5de110SDarrick J. Wong 
472c447ad62SDarrick J. Wong 	if (rmap->me_flags & ~XFS_RMAP_EXTENT_FLAGS)
473c447ad62SDarrick J. Wong 		return false;
4749e88b5d8SDarrick J. Wong 
4759e88b5d8SDarrick J. Wong 	switch (rmap->me_flags & XFS_RMAP_EXTENT_TYPE_MASK) {
4769e88b5d8SDarrick J. Wong 	case XFS_RMAP_EXTENT_MAP:
4770e07c039SDarrick J. Wong 	case XFS_RMAP_EXTENT_MAP_SHARED:
4789e88b5d8SDarrick J. Wong 	case XFS_RMAP_EXTENT_UNMAP:
4790e07c039SDarrick J. Wong 	case XFS_RMAP_EXTENT_UNMAP_SHARED:
4809e88b5d8SDarrick J. Wong 	case XFS_RMAP_EXTENT_CONVERT:
4810e07c039SDarrick J. Wong 	case XFS_RMAP_EXTENT_CONVERT_SHARED:
4829e88b5d8SDarrick J. Wong 	case XFS_RMAP_EXTENT_ALLOC:
4839e88b5d8SDarrick J. Wong 	case XFS_RMAP_EXTENT_FREE:
4849e88b5d8SDarrick J. Wong 		break;
4859e88b5d8SDarrick J. Wong 	default:
486c447ad62SDarrick J. Wong 		return false;
4879e88b5d8SDarrick J. Wong 	}
488c447ad62SDarrick J. Wong 
489c447ad62SDarrick J. Wong 	if (!XFS_RMAP_NON_INODE_OWNER(rmap->me_owner) &&
490c447ad62SDarrick J. Wong 	    !xfs_verify_ino(mp, rmap->me_owner))
491c447ad62SDarrick J. Wong 		return false;
492c447ad62SDarrick J. Wong 
49333005fd0SDarrick J. Wong 	if (!xfs_verify_fileext(mp, rmap->me_startoff, rmap->me_len))
494c447ad62SDarrick J. Wong 		return false;
495c447ad62SDarrick J. Wong 
49667457eb0SDarrick J. Wong 	return xfs_verify_fsbext(mp, rmap->me_startblock, rmap->me_len);
497dda7ba65SDarrick J. Wong }
498dda7ba65SDarrick J. Wong 
499dda7ba65SDarrick J. Wong /*
500dda7ba65SDarrick J. Wong  * Process an rmap update intent item that was recovered from the log.
501dda7ba65SDarrick J. Wong  * We need to update the rmapbt.
502dda7ba65SDarrick J. Wong  */
503dda7ba65SDarrick J. Wong STATIC int
504dda7ba65SDarrick J. Wong xfs_rui_item_recover(
505dda7ba65SDarrick J. Wong 	struct xfs_log_item		*lip,
506dda7ba65SDarrick J. Wong 	struct list_head		*capture_list)
507dda7ba65SDarrick J. Wong {
508dda7ba65SDarrick J. Wong 	struct xfs_rui_log_item		*ruip = RUI_ITEM(lip);
509dda7ba65SDarrick J. Wong 	struct xfs_map_extent		*rmap;
510dda7ba65SDarrick J. Wong 	struct xfs_rud_log_item		*rudp;
511dda7ba65SDarrick J. Wong 	struct xfs_trans		*tp;
512dda7ba65SDarrick J. Wong 	struct xfs_btree_cur		*rcur = NULL;
513dda7ba65SDarrick J. Wong 	struct xfs_mount		*mp = lip->li_mountp;
514dda7ba65SDarrick J. Wong 	enum xfs_rmap_intent_type	type;
515dda7ba65SDarrick J. Wong 	xfs_exntst_t			state;
516dda7ba65SDarrick J. Wong 	int				i;
517dda7ba65SDarrick J. Wong 	int				whichfork;
518dda7ba65SDarrick J. Wong 	int				error = 0;
519dda7ba65SDarrick J. Wong 
520dda7ba65SDarrick J. Wong 	/*
521dda7ba65SDarrick J. Wong 	 * First check the validity of the extents described by the
522dda7ba65SDarrick J. Wong 	 * RUI.  If any are bad, then assume that all are bad and
523dda7ba65SDarrick J. Wong 	 * just toss the RUI.
524dda7ba65SDarrick J. Wong 	 */
525dda7ba65SDarrick J. Wong 	for (i = 0; i < ruip->rui_format.rui_nextents; i++) {
526dda7ba65SDarrick J. Wong 		if (!xfs_rui_validate_map(mp,
527dda7ba65SDarrick J. Wong 					&ruip->rui_format.rui_extents[i])) {
528dda7ba65SDarrick J. Wong 			XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp,
529dda7ba65SDarrick J. Wong 					&ruip->rui_format,
530dda7ba65SDarrick J. Wong 					sizeof(ruip->rui_format));
531895e196fSDarrick J. Wong 			return -EFSCORRUPTED;
5329e88b5d8SDarrick J. Wong 		}
533dda7ba65SDarrick J. Wong 	}
5349e88b5d8SDarrick J. Wong 
535b31c2bdcSDarrick J. Wong 	error = xfs_trans_alloc(mp, &M_RES(mp)->tr_itruncate,
536b31c2bdcSDarrick J. Wong 			mp->m_rmap_maxlevels, 0, XFS_TRANS_RESERVE, &tp);
5379c194644SDarrick J. Wong 	if (error)
5389c194644SDarrick J. Wong 		return error;
539722e2517SDarrick J. Wong 	rudp = xfs_trans_get_rud(tp, ruip);
5409c194644SDarrick J. Wong 
5419c194644SDarrick J. Wong 	for (i = 0; i < ruip->rui_format.rui_nextents; i++) {
542e127fafdSDarrick J. Wong 		rmap = &ruip->rui_format.rui_extents[i];
5439c194644SDarrick J. Wong 		state = (rmap->me_flags & XFS_RMAP_EXTENT_UNWRITTEN) ?
5449c194644SDarrick J. Wong 				XFS_EXT_UNWRITTEN : XFS_EXT_NORM;
5459c194644SDarrick J. Wong 		whichfork = (rmap->me_flags & XFS_RMAP_EXTENT_ATTR_FORK) ?
5469c194644SDarrick J. Wong 				XFS_ATTR_FORK : XFS_DATA_FORK;
5479c194644SDarrick J. Wong 		switch (rmap->me_flags & XFS_RMAP_EXTENT_TYPE_MASK) {
5489c194644SDarrick J. Wong 		case XFS_RMAP_EXTENT_MAP:
5499c194644SDarrick J. Wong 			type = XFS_RMAP_MAP;
5509c194644SDarrick J. Wong 			break;
551ceeb9c83SDarrick J. Wong 		case XFS_RMAP_EXTENT_MAP_SHARED:
552ceeb9c83SDarrick J. Wong 			type = XFS_RMAP_MAP_SHARED;
553ceeb9c83SDarrick J. Wong 			break;
5549c194644SDarrick J. Wong 		case XFS_RMAP_EXTENT_UNMAP:
5559c194644SDarrick J. Wong 			type = XFS_RMAP_UNMAP;
5569c194644SDarrick J. Wong 			break;
557ceeb9c83SDarrick J. Wong 		case XFS_RMAP_EXTENT_UNMAP_SHARED:
558ceeb9c83SDarrick J. Wong 			type = XFS_RMAP_UNMAP_SHARED;
559ceeb9c83SDarrick J. Wong 			break;
5609c194644SDarrick J. Wong 		case XFS_RMAP_EXTENT_CONVERT:
5619c194644SDarrick J. Wong 			type = XFS_RMAP_CONVERT;
5629c194644SDarrick J. Wong 			break;
5633f165b33SDarrick J. Wong 		case XFS_RMAP_EXTENT_CONVERT_SHARED:
5643f165b33SDarrick J. Wong 			type = XFS_RMAP_CONVERT_SHARED;
5653f165b33SDarrick J. Wong 			break;
5669c194644SDarrick J. Wong 		case XFS_RMAP_EXTENT_ALLOC:
5679c194644SDarrick J. Wong 			type = XFS_RMAP_ALLOC;
5689c194644SDarrick J. Wong 			break;
5699c194644SDarrick J. Wong 		case XFS_RMAP_EXTENT_FREE:
5709c194644SDarrick J. Wong 			type = XFS_RMAP_FREE;
5719c194644SDarrick J. Wong 			break;
5729c194644SDarrick J. Wong 		default:
573a5155b87SDarrick J. Wong 			XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, NULL);
5749c194644SDarrick J. Wong 			error = -EFSCORRUPTED;
5759c194644SDarrick J. Wong 			goto abort_error;
5769c194644SDarrick J. Wong 		}
5779c194644SDarrick J. Wong 		error = xfs_trans_log_finish_rmap_update(tp, rudp, type,
5789c194644SDarrick J. Wong 				rmap->me_owner, whichfork,
5799c194644SDarrick J. Wong 				rmap->me_startoff, rmap->me_startblock,
5809c194644SDarrick J. Wong 				rmap->me_len, state, &rcur);
58143059d54SDarrick J. Wong 		if (error == -EFSCORRUPTED)
58243059d54SDarrick J. Wong 			XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp,
58343059d54SDarrick J. Wong 					rmap, sizeof(*rmap));
5849c194644SDarrick J. Wong 		if (error)
5859c194644SDarrick J. Wong 			goto abort_error;
5869c194644SDarrick J. Wong 
5879c194644SDarrick J. Wong 	}
5889c194644SDarrick J. Wong 
5899c194644SDarrick J. Wong 	xfs_rmap_finish_one_cleanup(tp, rcur, error);
590512edfacSDarrick J. Wong 	return xfs_defer_ops_capture_and_commit(tp, capture_list);
5919c194644SDarrick J. Wong 
5929c194644SDarrick J. Wong abort_error:
5939c194644SDarrick J. Wong 	xfs_rmap_finish_one_cleanup(tp, rcur, error);
5949c194644SDarrick J. Wong 	xfs_trans_cancel(tp);
5959e88b5d8SDarrick J. Wong 	return error;
5969e88b5d8SDarrick J. Wong }
59786ffa471SDarrick J. Wong 
598154c733aSDarrick J. Wong STATIC bool
599154c733aSDarrick J. Wong xfs_rui_item_match(
600154c733aSDarrick J. Wong 	struct xfs_log_item	*lip,
601154c733aSDarrick J. Wong 	uint64_t		intent_id)
602154c733aSDarrick J. Wong {
603154c733aSDarrick J. Wong 	return RUI_ITEM(lip)->rui_format.rui_id == intent_id;
604154c733aSDarrick J. Wong }
605154c733aSDarrick J. Wong 
6064e919af7SDarrick J. Wong /* Relog an intent item to push the log tail forward. */
6074e919af7SDarrick J. Wong static struct xfs_log_item *
6084e919af7SDarrick J. Wong xfs_rui_item_relog(
6094e919af7SDarrick J. Wong 	struct xfs_log_item		*intent,
6104e919af7SDarrick J. Wong 	struct xfs_trans		*tp)
6114e919af7SDarrick J. Wong {
6124e919af7SDarrick J. Wong 	struct xfs_rud_log_item		*rudp;
6134e919af7SDarrick J. Wong 	struct xfs_rui_log_item		*ruip;
6144e919af7SDarrick J. Wong 	struct xfs_map_extent		*extp;
6154e919af7SDarrick J. Wong 	unsigned int			count;
6164e919af7SDarrick J. Wong 
6174e919af7SDarrick J. Wong 	count = RUI_ITEM(intent)->rui_format.rui_nextents;
6184e919af7SDarrick J. Wong 	extp = RUI_ITEM(intent)->rui_format.rui_extents;
6194e919af7SDarrick J. Wong 
6204e919af7SDarrick J. Wong 	tp->t_flags |= XFS_TRANS_DIRTY;
6214e919af7SDarrick J. Wong 	rudp = xfs_trans_get_rud(tp, RUI_ITEM(intent));
6224e919af7SDarrick J. Wong 	set_bit(XFS_LI_DIRTY, &rudp->rud_item.li_flags);
6234e919af7SDarrick J. Wong 
6244e919af7SDarrick J. Wong 	ruip = xfs_rui_init(tp->t_mountp, count);
6254e919af7SDarrick J. Wong 	memcpy(ruip->rui_format.rui_extents, extp, count * sizeof(*extp));
6264e919af7SDarrick J. Wong 	atomic_set(&ruip->rui_next_extent, count);
6274e919af7SDarrick J. Wong 	xfs_trans_add_item(tp, &ruip->rui_item);
6284e919af7SDarrick J. Wong 	set_bit(XFS_LI_DIRTY, &ruip->rui_item.li_flags);
6294e919af7SDarrick J. Wong 	return &ruip->rui_item;
6304e919af7SDarrick J. Wong }
6314e919af7SDarrick J. Wong 
632cba0ccacSDarrick J. Wong static const struct xfs_item_ops xfs_rui_item_ops = {
633cba0ccacSDarrick J. Wong 	.iop_size	= xfs_rui_item_size,
634cba0ccacSDarrick J. Wong 	.iop_format	= xfs_rui_item_format,
635cba0ccacSDarrick J. Wong 	.iop_unpin	= xfs_rui_item_unpin,
636cba0ccacSDarrick J. Wong 	.iop_release	= xfs_rui_item_release,
637cba0ccacSDarrick J. Wong 	.iop_recover	= xfs_rui_item_recover,
638154c733aSDarrick J. Wong 	.iop_match	= xfs_rui_item_match,
6394e919af7SDarrick J. Wong 	.iop_relog	= xfs_rui_item_relog,
640cba0ccacSDarrick J. Wong };
641cba0ccacSDarrick J. Wong 
64207590a9dSDarrick J. Wong /*
64307590a9dSDarrick J. Wong  * This routine is called to create an in-core extent rmap update
64407590a9dSDarrick J. Wong  * item from the rui format structure which was logged on disk.
64507590a9dSDarrick J. Wong  * It allocates an in-core rui, copies the extents from the format
64607590a9dSDarrick J. Wong  * structure into it, and adds the rui to the AIL with the given
64707590a9dSDarrick J. Wong  * LSN.
64807590a9dSDarrick J. Wong  */
64907590a9dSDarrick J. Wong STATIC int
65007590a9dSDarrick J. Wong xlog_recover_rui_commit_pass2(
65107590a9dSDarrick J. Wong 	struct xlog			*log,
65207590a9dSDarrick J. Wong 	struct list_head		*buffer_list,
65307590a9dSDarrick J. Wong 	struct xlog_recover_item	*item,
65407590a9dSDarrick J. Wong 	xfs_lsn_t			lsn)
65507590a9dSDarrick J. Wong {
65607590a9dSDarrick J. Wong 	int				error;
65707590a9dSDarrick J. Wong 	struct xfs_mount		*mp = log->l_mp;
65807590a9dSDarrick J. Wong 	struct xfs_rui_log_item		*ruip;
65907590a9dSDarrick J. Wong 	struct xfs_rui_log_format	*rui_formatp;
66007590a9dSDarrick J. Wong 
66107590a9dSDarrick J. Wong 	rui_formatp = item->ri_buf[0].i_addr;
66207590a9dSDarrick J. Wong 
66307590a9dSDarrick J. Wong 	ruip = xfs_rui_init(mp, rui_formatp->rui_nextents);
66407590a9dSDarrick J. Wong 	error = xfs_rui_copy_format(&item->ri_buf[0], &ruip->rui_format);
66507590a9dSDarrick J. Wong 	if (error) {
66607590a9dSDarrick J. Wong 		xfs_rui_item_free(ruip);
66707590a9dSDarrick J. Wong 		return error;
66807590a9dSDarrick J. Wong 	}
66907590a9dSDarrick J. Wong 	atomic_set(&ruip->rui_next_extent, rui_formatp->rui_nextents);
67007590a9dSDarrick J. Wong 	/*
67186a37174SDarrick J. Wong 	 * Insert the intent into the AIL directly and drop one reference so
67286a37174SDarrick J. Wong 	 * that finishing or canceling the work will drop the other.
67307590a9dSDarrick J. Wong 	 */
67486a37174SDarrick J. Wong 	xfs_trans_ail_insert(log->l_ailp, &ruip->rui_item, lsn);
67507590a9dSDarrick J. Wong 	xfs_rui_release(ruip);
67607590a9dSDarrick J. Wong 	return 0;
67707590a9dSDarrick J. Wong }
67807590a9dSDarrick J. Wong 
67986ffa471SDarrick J. Wong const struct xlog_recover_item_ops xlog_rui_item_ops = {
68086ffa471SDarrick J. Wong 	.item_type		= XFS_LI_RUI,
68107590a9dSDarrick J. Wong 	.commit_pass2		= xlog_recover_rui_commit_pass2,
68286ffa471SDarrick J. Wong };
68386ffa471SDarrick J. Wong 
68407590a9dSDarrick J. Wong /*
68507590a9dSDarrick J. Wong  * This routine is called when an RUD format structure is found in a committed
68607590a9dSDarrick J. Wong  * transaction in the log. Its purpose is to cancel the corresponding RUI if it
68707590a9dSDarrick J. Wong  * was still in the log. To do this it searches the AIL for the RUI with an id
68807590a9dSDarrick J. Wong  * equal to that in the RUD format structure. If we find it we drop the RUD
68907590a9dSDarrick J. Wong  * reference, which removes the RUI from the AIL and frees it.
69007590a9dSDarrick J. Wong  */
69107590a9dSDarrick J. Wong STATIC int
69207590a9dSDarrick J. Wong xlog_recover_rud_commit_pass2(
69307590a9dSDarrick J. Wong 	struct xlog			*log,
69407590a9dSDarrick J. Wong 	struct list_head		*buffer_list,
69507590a9dSDarrick J. Wong 	struct xlog_recover_item	*item,
69607590a9dSDarrick J. Wong 	xfs_lsn_t			lsn)
69707590a9dSDarrick J. Wong {
69807590a9dSDarrick J. Wong 	struct xfs_rud_log_format	*rud_formatp;
69907590a9dSDarrick J. Wong 
70007590a9dSDarrick J. Wong 	rud_formatp = item->ri_buf[0].i_addr;
70107590a9dSDarrick J. Wong 	ASSERT(item->ri_buf[0].i_len == sizeof(struct xfs_rud_log_format));
70207590a9dSDarrick J. Wong 
703154c733aSDarrick J. Wong 	xlog_recover_release_intent(log, XFS_LI_RUI, rud_formatp->rud_rui_id);
70407590a9dSDarrick J. Wong 	return 0;
70507590a9dSDarrick J. Wong }
70607590a9dSDarrick J. Wong 
70786ffa471SDarrick J. Wong const struct xlog_recover_item_ops xlog_rud_item_ops = {
70886ffa471SDarrick J. Wong 	.item_type		= XFS_LI_RUD,
70907590a9dSDarrick J. Wong 	.commit_pass2		= xlog_recover_rud_commit_pass2,
71086ffa471SDarrick J. Wong };
711