xref: /openbmc/linux/fs/xfs/xfs_rmap_item.c (revision f5b81200)
10b61f8a4SDave Chinner // SPDX-License-Identifier: GPL-2.0+
25880f2d7SDarrick J. Wong /*
35880f2d7SDarrick J. Wong  * Copyright (C) 2016 Oracle.  All Rights Reserved.
45880f2d7SDarrick J. Wong  * Author: Darrick J. Wong <darrick.wong@oracle.com>
55880f2d7SDarrick J. Wong  */
65880f2d7SDarrick J. Wong #include "xfs.h"
75880f2d7SDarrick J. Wong #include "xfs_fs.h"
85880f2d7SDarrick J. Wong #include "xfs_format.h"
95880f2d7SDarrick J. Wong #include "xfs_log_format.h"
105880f2d7SDarrick J. Wong #include "xfs_trans_resv.h"
119e88b5d8SDarrick J. Wong #include "xfs_bit.h"
12b31c2bdcSDarrick J. Wong #include "xfs_shared.h"
135880f2d7SDarrick J. Wong #include "xfs_mount.h"
149c194644SDarrick J. Wong #include "xfs_defer.h"
155880f2d7SDarrick J. Wong #include "xfs_trans.h"
165880f2d7SDarrick J. Wong #include "xfs_trans_priv.h"
175880f2d7SDarrick J. Wong #include "xfs_rmap_item.h"
185880f2d7SDarrick J. Wong #include "xfs_log.h"
199c194644SDarrick J. Wong #include "xfs_rmap.h"
20a5155b87SDarrick J. Wong #include "xfs_error.h"
2107590a9dSDarrick J. Wong #include "xfs_log_priv.h"
2286ffa471SDarrick J. Wong #include "xfs_log_recover.h"
235880f2d7SDarrick J. Wong 
24182696fbSDarrick J. Wong struct kmem_cache	*xfs_rui_cache;
25182696fbSDarrick J. Wong struct kmem_cache	*xfs_rud_cache;
265880f2d7SDarrick J. Wong 
27cba0ccacSDarrick J. Wong static const struct xfs_item_ops xfs_rui_item_ops;
28cba0ccacSDarrick J. Wong 
295880f2d7SDarrick J. Wong static inline struct xfs_rui_log_item *RUI_ITEM(struct xfs_log_item *lip)
305880f2d7SDarrick J. Wong {
315880f2d7SDarrick J. Wong 	return container_of(lip, struct xfs_rui_log_item, rui_item);
325880f2d7SDarrick J. Wong }
335880f2d7SDarrick J. Wong 
3407590a9dSDarrick J. Wong STATIC void
355880f2d7SDarrick J. Wong xfs_rui_item_free(
365880f2d7SDarrick J. Wong 	struct xfs_rui_log_item	*ruip)
375880f2d7SDarrick J. Wong {
38c230a4a8SDave Chinner 	kmem_free(ruip->rui_item.li_lv_shadow);
395880f2d7SDarrick J. Wong 	if (ruip->rui_format.rui_nextents > XFS_RUI_MAX_FAST_EXTENTS)
405880f2d7SDarrick J. Wong 		kmem_free(ruip);
415880f2d7SDarrick J. Wong 	else
42182696fbSDarrick J. Wong 		kmem_cache_free(xfs_rui_cache, ruip);
435880f2d7SDarrick J. Wong }
445880f2d7SDarrick J. Wong 
450612d116SDave Chinner /*
460612d116SDave Chinner  * Freeing the RUI requires that we remove it from the AIL if it has already
470612d116SDave Chinner  * been placed there. However, the RUI may not yet have been placed in the AIL
480612d116SDave Chinner  * when called by xfs_rui_release() from RUD processing due to the ordering of
490612d116SDave Chinner  * committed vs unpin operations in bulk insert operations. Hence the reference
500612d116SDave Chinner  * count to ensure only the last caller frees the RUI.
510612d116SDave Chinner  */
52cba0ccacSDarrick J. Wong STATIC void
530612d116SDave Chinner xfs_rui_release(
540612d116SDave Chinner 	struct xfs_rui_log_item	*ruip)
550612d116SDave Chinner {
560612d116SDave Chinner 	ASSERT(atomic_read(&ruip->rui_refcount) > 0);
570612d116SDave Chinner 	if (atomic_dec_and_test(&ruip->rui_refcount)) {
5865587929SBrian Foster 		xfs_trans_ail_delete(&ruip->rui_item, SHUTDOWN_LOG_IO_ERROR);
590612d116SDave Chinner 		xfs_rui_item_free(ruip);
600612d116SDave Chinner 	}
610612d116SDave Chinner }
620612d116SDave Chinner 
635880f2d7SDarrick J. Wong STATIC void
645880f2d7SDarrick J. Wong xfs_rui_item_size(
655880f2d7SDarrick J. Wong 	struct xfs_log_item	*lip,
665880f2d7SDarrick J. Wong 	int			*nvecs,
675880f2d7SDarrick J. Wong 	int			*nbytes)
685880f2d7SDarrick J. Wong {
69cd00158cSDarrick J. Wong 	struct xfs_rui_log_item	*ruip = RUI_ITEM(lip);
70cd00158cSDarrick J. Wong 
715880f2d7SDarrick J. Wong 	*nvecs += 1;
72cd00158cSDarrick J. Wong 	*nbytes += xfs_rui_log_format_sizeof(ruip->rui_format.rui_nextents);
735880f2d7SDarrick J. Wong }
745880f2d7SDarrick J. Wong 
755880f2d7SDarrick J. Wong /*
765880f2d7SDarrick J. Wong  * This is called to fill in the vector of log iovecs for the
775880f2d7SDarrick J. Wong  * given rui log item. We use only 1 iovec, and we point that
785880f2d7SDarrick J. Wong  * at the rui_log_format structure embedded in the rui item.
795880f2d7SDarrick J. Wong  * It is at this point that we assert that all of the extent
805880f2d7SDarrick J. Wong  * slots in the rui item have been filled.
815880f2d7SDarrick J. Wong  */
825880f2d7SDarrick J. Wong STATIC void
835880f2d7SDarrick J. Wong xfs_rui_item_format(
845880f2d7SDarrick J. Wong 	struct xfs_log_item	*lip,
855880f2d7SDarrick J. Wong 	struct xfs_log_vec	*lv)
865880f2d7SDarrick J. Wong {
875880f2d7SDarrick J. Wong 	struct xfs_rui_log_item	*ruip = RUI_ITEM(lip);
885880f2d7SDarrick J. Wong 	struct xfs_log_iovec	*vecp = NULL;
895880f2d7SDarrick J. Wong 
905880f2d7SDarrick J. Wong 	ASSERT(atomic_read(&ruip->rui_next_extent) ==
915880f2d7SDarrick J. Wong 			ruip->rui_format.rui_nextents);
925880f2d7SDarrick J. Wong 
935880f2d7SDarrick J. Wong 	ruip->rui_format.rui_type = XFS_LI_RUI;
945880f2d7SDarrick J. Wong 	ruip->rui_format.rui_size = 1;
955880f2d7SDarrick J. Wong 
965880f2d7SDarrick J. Wong 	xlog_copy_iovec(lv, &vecp, XLOG_REG_TYPE_RUI_FORMAT, &ruip->rui_format,
97cd00158cSDarrick J. Wong 			xfs_rui_log_format_sizeof(ruip->rui_format.rui_nextents));
985880f2d7SDarrick J. Wong }
995880f2d7SDarrick J. Wong 
1005880f2d7SDarrick J. Wong /*
1015880f2d7SDarrick J. Wong  * The unpin operation is the last place an RUI is manipulated in the log. It is
1025880f2d7SDarrick J. Wong  * either inserted in the AIL or aborted in the event of a log I/O error. In
1035880f2d7SDarrick J. Wong  * either case, the RUI transaction has been successfully committed to make it
1045880f2d7SDarrick J. Wong  * this far. Therefore, we expect whoever committed the RUI to either construct
1055880f2d7SDarrick J. Wong  * and commit the RUD or drop the RUD's reference in the event of error. Simply
1065880f2d7SDarrick J. Wong  * drop the log's RUI reference now that the log is done with it.
1075880f2d7SDarrick J. Wong  */
1085880f2d7SDarrick J. Wong STATIC void
1095880f2d7SDarrick J. Wong xfs_rui_item_unpin(
1105880f2d7SDarrick J. Wong 	struct xfs_log_item	*lip,
1115880f2d7SDarrick J. Wong 	int			remove)
1125880f2d7SDarrick J. Wong {
1135880f2d7SDarrick J. Wong 	struct xfs_rui_log_item	*ruip = RUI_ITEM(lip);
1145880f2d7SDarrick J. Wong 
1155880f2d7SDarrick J. Wong 	xfs_rui_release(ruip);
1165880f2d7SDarrick J. Wong }
1175880f2d7SDarrick J. Wong 
1185880f2d7SDarrick J. Wong /*
1195880f2d7SDarrick J. Wong  * The RUI has been either committed or aborted if the transaction has been
1205880f2d7SDarrick J. Wong  * cancelled. If the transaction was cancelled, an RUD isn't going to be
1215880f2d7SDarrick J. Wong  * constructed and thus we free the RUI here directly.
1225880f2d7SDarrick J. Wong  */
1235880f2d7SDarrick J. Wong STATIC void
124ddf92053SChristoph Hellwig xfs_rui_item_release(
1255880f2d7SDarrick J. Wong 	struct xfs_log_item	*lip)
1265880f2d7SDarrick J. Wong {
1270612d116SDave Chinner 	xfs_rui_release(RUI_ITEM(lip));
1285880f2d7SDarrick J. Wong }
1295880f2d7SDarrick J. Wong 
1305880f2d7SDarrick J. Wong /*
1315880f2d7SDarrick J. Wong  * Allocate and initialize an rui item with the given number of extents.
1325880f2d7SDarrick J. Wong  */
13307590a9dSDarrick J. Wong STATIC struct xfs_rui_log_item *
1345880f2d7SDarrick J. Wong xfs_rui_init(
1355880f2d7SDarrick J. Wong 	struct xfs_mount		*mp,
1365880f2d7SDarrick J. Wong 	uint				nextents)
1375880f2d7SDarrick J. Wong 
1385880f2d7SDarrick J. Wong {
1395880f2d7SDarrick J. Wong 	struct xfs_rui_log_item		*ruip;
1405880f2d7SDarrick J. Wong 
1415880f2d7SDarrick J. Wong 	ASSERT(nextents > 0);
142cd00158cSDarrick J. Wong 	if (nextents > XFS_RUI_MAX_FAST_EXTENTS)
143707e0ddaSTetsuo Handa 		ruip = kmem_zalloc(xfs_rui_log_item_sizeof(nextents), 0);
144cd00158cSDarrick J. Wong 	else
145182696fbSDarrick J. Wong 		ruip = kmem_cache_zalloc(xfs_rui_cache,
14632a2b11fSCarlos Maiolino 					 GFP_KERNEL | __GFP_NOFAIL);
1475880f2d7SDarrick J. Wong 
1485880f2d7SDarrick J. Wong 	xfs_log_item_init(mp, &ruip->rui_item, XFS_LI_RUI, &xfs_rui_item_ops);
1495880f2d7SDarrick J. Wong 	ruip->rui_format.rui_nextents = nextents;
1505880f2d7SDarrick J. Wong 	ruip->rui_format.rui_id = (uintptr_t)(void *)ruip;
1515880f2d7SDarrick J. Wong 	atomic_set(&ruip->rui_next_extent, 0);
1525880f2d7SDarrick J. Wong 	atomic_set(&ruip->rui_refcount, 2);
1535880f2d7SDarrick J. Wong 
1545880f2d7SDarrick J. Wong 	return ruip;
1555880f2d7SDarrick J. Wong }
1565880f2d7SDarrick J. Wong 
1575880f2d7SDarrick J. Wong /*
1585880f2d7SDarrick J. Wong  * Copy an RUI format buffer from the given buf, and into the destination
1595880f2d7SDarrick J. Wong  * RUI format structure.  The RUI/RUD items were designed not to need any
1605880f2d7SDarrick J. Wong  * special alignment handling.
1615880f2d7SDarrick J. Wong  */
16207590a9dSDarrick J. Wong STATIC int
1635880f2d7SDarrick J. Wong xfs_rui_copy_format(
1645880f2d7SDarrick J. Wong 	struct xfs_log_iovec		*buf,
1655880f2d7SDarrick J. Wong 	struct xfs_rui_log_format	*dst_rui_fmt)
1665880f2d7SDarrick J. Wong {
1675880f2d7SDarrick J. Wong 	struct xfs_rui_log_format	*src_rui_fmt;
1685880f2d7SDarrick J. Wong 	uint				len;
1695880f2d7SDarrick J. Wong 
1705880f2d7SDarrick J. Wong 	src_rui_fmt = buf->i_addr;
171cd00158cSDarrick J. Wong 	len = xfs_rui_log_format_sizeof(src_rui_fmt->rui_nextents);
1725880f2d7SDarrick J. Wong 
173a5155b87SDarrick J. Wong 	if (buf->i_len != len) {
174a5155b87SDarrick J. Wong 		XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, NULL);
1755880f2d7SDarrick J. Wong 		return -EFSCORRUPTED;
176a5155b87SDarrick J. Wong 	}
1775880f2d7SDarrick J. Wong 
178cd00158cSDarrick J. Wong 	memcpy(dst_rui_fmt, src_rui_fmt, len);
1795880f2d7SDarrick J. Wong 	return 0;
1805880f2d7SDarrick J. Wong }
1815880f2d7SDarrick J. Wong 
1825880f2d7SDarrick J. Wong static inline struct xfs_rud_log_item *RUD_ITEM(struct xfs_log_item *lip)
1835880f2d7SDarrick J. Wong {
1845880f2d7SDarrick J. Wong 	return container_of(lip, struct xfs_rud_log_item, rud_item);
1855880f2d7SDarrick J. Wong }
1865880f2d7SDarrick J. Wong 
1875880f2d7SDarrick J. Wong STATIC void
1885880f2d7SDarrick J. Wong xfs_rud_item_size(
1895880f2d7SDarrick J. Wong 	struct xfs_log_item	*lip,
1905880f2d7SDarrick J. Wong 	int			*nvecs,
1915880f2d7SDarrick J. Wong 	int			*nbytes)
1925880f2d7SDarrick J. Wong {
1935880f2d7SDarrick J. Wong 	*nvecs += 1;
194722e2517SDarrick J. Wong 	*nbytes += sizeof(struct xfs_rud_log_format);
1955880f2d7SDarrick J. Wong }
1965880f2d7SDarrick J. Wong 
1975880f2d7SDarrick J. Wong /*
1985880f2d7SDarrick J. Wong  * This is called to fill in the vector of log iovecs for the
1995880f2d7SDarrick J. Wong  * given rud log item. We use only 1 iovec, and we point that
2005880f2d7SDarrick J. Wong  * at the rud_log_format structure embedded in the rud item.
2015880f2d7SDarrick J. Wong  * It is at this point that we assert that all of the extent
2025880f2d7SDarrick J. Wong  * slots in the rud item have been filled.
2035880f2d7SDarrick J. Wong  */
2045880f2d7SDarrick J. Wong STATIC void
2055880f2d7SDarrick J. Wong xfs_rud_item_format(
2065880f2d7SDarrick J. Wong 	struct xfs_log_item	*lip,
2075880f2d7SDarrick J. Wong 	struct xfs_log_vec	*lv)
2085880f2d7SDarrick J. Wong {
2095880f2d7SDarrick J. Wong 	struct xfs_rud_log_item	*rudp = RUD_ITEM(lip);
2105880f2d7SDarrick J. Wong 	struct xfs_log_iovec	*vecp = NULL;
2115880f2d7SDarrick J. Wong 
2125880f2d7SDarrick J. Wong 	rudp->rud_format.rud_type = XFS_LI_RUD;
2135880f2d7SDarrick J. Wong 	rudp->rud_format.rud_size = 1;
2145880f2d7SDarrick J. Wong 
2155880f2d7SDarrick J. Wong 	xlog_copy_iovec(lv, &vecp, XLOG_REG_TYPE_RUD_FORMAT, &rudp->rud_format,
216722e2517SDarrick J. Wong 			sizeof(struct xfs_rud_log_format));
2175880f2d7SDarrick J. Wong }
2185880f2d7SDarrick J. Wong 
2195880f2d7SDarrick J. Wong /*
2205880f2d7SDarrick J. Wong  * The RUD is either committed or aborted if the transaction is cancelled. If
2215880f2d7SDarrick J. Wong  * the transaction is cancelled, drop our reference to the RUI and free the
2225880f2d7SDarrick J. Wong  * RUD.
2235880f2d7SDarrick J. Wong  */
2245880f2d7SDarrick J. Wong STATIC void
225ddf92053SChristoph Hellwig xfs_rud_item_release(
2265880f2d7SDarrick J. Wong 	struct xfs_log_item	*lip)
2275880f2d7SDarrick J. Wong {
2285880f2d7SDarrick J. Wong 	struct xfs_rud_log_item	*rudp = RUD_ITEM(lip);
2295880f2d7SDarrick J. Wong 
2305880f2d7SDarrick J. Wong 	xfs_rui_release(rudp->rud_ruip);
231c230a4a8SDave Chinner 	kmem_free(rudp->rud_item.li_lv_shadow);
232182696fbSDarrick J. Wong 	kmem_cache_free(xfs_rud_cache, rudp);
2335880f2d7SDarrick J. Wong }
2345880f2d7SDarrick J. Wong 
2355880f2d7SDarrick J. Wong static const struct xfs_item_ops xfs_rud_item_ops = {
236*f5b81200SDave Chinner 	.flags		= XFS_ITEM_RELEASE_WHEN_COMMITTED |
237*f5b81200SDave Chinner 			  XFS_ITEM_INTENT_DONE,
2385880f2d7SDarrick J. Wong 	.iop_size	= xfs_rud_item_size,
2395880f2d7SDarrick J. Wong 	.iop_format	= xfs_rud_item_format,
240ddf92053SChristoph Hellwig 	.iop_release	= xfs_rud_item_release,
2415880f2d7SDarrick J. Wong };
2425880f2d7SDarrick J. Wong 
2433cfce1e3SChristoph Hellwig static struct xfs_rud_log_item *
24460883447SChristoph Hellwig xfs_trans_get_rud(
24560883447SChristoph Hellwig 	struct xfs_trans		*tp,
246722e2517SDarrick J. Wong 	struct xfs_rui_log_item		*ruip)
2475880f2d7SDarrick J. Wong {
2485880f2d7SDarrick J. Wong 	struct xfs_rud_log_item		*rudp;
2495880f2d7SDarrick J. Wong 
250182696fbSDarrick J. Wong 	rudp = kmem_cache_zalloc(xfs_rud_cache, GFP_KERNEL | __GFP_NOFAIL);
25160883447SChristoph Hellwig 	xfs_log_item_init(tp->t_mountp, &rudp->rud_item, XFS_LI_RUD,
25260883447SChristoph Hellwig 			  &xfs_rud_item_ops);
2535880f2d7SDarrick J. Wong 	rudp->rud_ruip = ruip;
2545880f2d7SDarrick J. Wong 	rudp->rud_format.rud_rui_id = ruip->rui_format.rui_id;
2555880f2d7SDarrick J. Wong 
25660883447SChristoph Hellwig 	xfs_trans_add_item(tp, &rudp->rud_item);
2575880f2d7SDarrick J. Wong 	return rudp;
2585880f2d7SDarrick J. Wong }
2599e88b5d8SDarrick J. Wong 
2603cfce1e3SChristoph Hellwig /* Set the map extent flags for this reverse mapping. */
2613cfce1e3SChristoph Hellwig static void
2623cfce1e3SChristoph Hellwig xfs_trans_set_rmap_flags(
2633cfce1e3SChristoph Hellwig 	struct xfs_map_extent		*rmap,
2643cfce1e3SChristoph Hellwig 	enum xfs_rmap_intent_type	type,
2653cfce1e3SChristoph Hellwig 	int				whichfork,
2663cfce1e3SChristoph Hellwig 	xfs_exntst_t			state)
2673cfce1e3SChristoph Hellwig {
2683cfce1e3SChristoph Hellwig 	rmap->me_flags = 0;
2693cfce1e3SChristoph Hellwig 	if (state == XFS_EXT_UNWRITTEN)
2703cfce1e3SChristoph Hellwig 		rmap->me_flags |= XFS_RMAP_EXTENT_UNWRITTEN;
2713cfce1e3SChristoph Hellwig 	if (whichfork == XFS_ATTR_FORK)
2723cfce1e3SChristoph Hellwig 		rmap->me_flags |= XFS_RMAP_EXTENT_ATTR_FORK;
2733cfce1e3SChristoph Hellwig 	switch (type) {
2743cfce1e3SChristoph Hellwig 	case XFS_RMAP_MAP:
2753cfce1e3SChristoph Hellwig 		rmap->me_flags |= XFS_RMAP_EXTENT_MAP;
2763cfce1e3SChristoph Hellwig 		break;
2773cfce1e3SChristoph Hellwig 	case XFS_RMAP_MAP_SHARED:
2783cfce1e3SChristoph Hellwig 		rmap->me_flags |= XFS_RMAP_EXTENT_MAP_SHARED;
2793cfce1e3SChristoph Hellwig 		break;
2803cfce1e3SChristoph Hellwig 	case XFS_RMAP_UNMAP:
2813cfce1e3SChristoph Hellwig 		rmap->me_flags |= XFS_RMAP_EXTENT_UNMAP;
2823cfce1e3SChristoph Hellwig 		break;
2833cfce1e3SChristoph Hellwig 	case XFS_RMAP_UNMAP_SHARED:
2843cfce1e3SChristoph Hellwig 		rmap->me_flags |= XFS_RMAP_EXTENT_UNMAP_SHARED;
2853cfce1e3SChristoph Hellwig 		break;
2863cfce1e3SChristoph Hellwig 	case XFS_RMAP_CONVERT:
2873cfce1e3SChristoph Hellwig 		rmap->me_flags |= XFS_RMAP_EXTENT_CONVERT;
2883cfce1e3SChristoph Hellwig 		break;
2893cfce1e3SChristoph Hellwig 	case XFS_RMAP_CONVERT_SHARED:
2903cfce1e3SChristoph Hellwig 		rmap->me_flags |= XFS_RMAP_EXTENT_CONVERT_SHARED;
2913cfce1e3SChristoph Hellwig 		break;
2923cfce1e3SChristoph Hellwig 	case XFS_RMAP_ALLOC:
2933cfce1e3SChristoph Hellwig 		rmap->me_flags |= XFS_RMAP_EXTENT_ALLOC;
2943cfce1e3SChristoph Hellwig 		break;
2953cfce1e3SChristoph Hellwig 	case XFS_RMAP_FREE:
2963cfce1e3SChristoph Hellwig 		rmap->me_flags |= XFS_RMAP_EXTENT_FREE;
2973cfce1e3SChristoph Hellwig 		break;
2983cfce1e3SChristoph Hellwig 	default:
2993cfce1e3SChristoph Hellwig 		ASSERT(0);
3003cfce1e3SChristoph Hellwig 	}
3013cfce1e3SChristoph Hellwig }
3023cfce1e3SChristoph Hellwig 
3033cfce1e3SChristoph Hellwig /*
3043cfce1e3SChristoph Hellwig  * Finish an rmap update and log it to the RUD. Note that the transaction is
3053cfce1e3SChristoph Hellwig  * marked dirty regardless of whether the rmap update succeeds or fails to
3063cfce1e3SChristoph Hellwig  * support the RUI/RUD lifecycle rules.
3073cfce1e3SChristoph Hellwig  */
3083cfce1e3SChristoph Hellwig static int
3093cfce1e3SChristoph Hellwig xfs_trans_log_finish_rmap_update(
3103cfce1e3SChristoph Hellwig 	struct xfs_trans		*tp,
3113cfce1e3SChristoph Hellwig 	struct xfs_rud_log_item		*rudp,
3123cfce1e3SChristoph Hellwig 	enum xfs_rmap_intent_type	type,
3133cfce1e3SChristoph Hellwig 	uint64_t			owner,
3143cfce1e3SChristoph Hellwig 	int				whichfork,
3153cfce1e3SChristoph Hellwig 	xfs_fileoff_t			startoff,
3163cfce1e3SChristoph Hellwig 	xfs_fsblock_t			startblock,
3173cfce1e3SChristoph Hellwig 	xfs_filblks_t			blockcount,
3183cfce1e3SChristoph Hellwig 	xfs_exntst_t			state,
3193cfce1e3SChristoph Hellwig 	struct xfs_btree_cur		**pcur)
3203cfce1e3SChristoph Hellwig {
3213cfce1e3SChristoph Hellwig 	int				error;
3223cfce1e3SChristoph Hellwig 
3233cfce1e3SChristoph Hellwig 	error = xfs_rmap_finish_one(tp, type, owner, whichfork, startoff,
3243cfce1e3SChristoph Hellwig 			startblock, blockcount, state, pcur);
3253cfce1e3SChristoph Hellwig 
3263cfce1e3SChristoph Hellwig 	/*
3273cfce1e3SChristoph Hellwig 	 * Mark the transaction dirty, even on error. This ensures the
3283cfce1e3SChristoph Hellwig 	 * transaction is aborted, which:
3293cfce1e3SChristoph Hellwig 	 *
3303cfce1e3SChristoph Hellwig 	 * 1.) releases the RUI and frees the RUD
3313cfce1e3SChristoph Hellwig 	 * 2.) shuts down the filesystem
3323cfce1e3SChristoph Hellwig 	 */
3333cfce1e3SChristoph Hellwig 	tp->t_flags |= XFS_TRANS_DIRTY;
3343cfce1e3SChristoph Hellwig 	set_bit(XFS_LI_DIRTY, &rudp->rud_item.li_flags);
3353cfce1e3SChristoph Hellwig 
3363cfce1e3SChristoph Hellwig 	return error;
3373cfce1e3SChristoph Hellwig }
3383cfce1e3SChristoph Hellwig 
3393cfce1e3SChristoph Hellwig /* Sort rmap intents by AG. */
3403cfce1e3SChristoph Hellwig static int
3413cfce1e3SChristoph Hellwig xfs_rmap_update_diff_items(
3423cfce1e3SChristoph Hellwig 	void				*priv,
3434f0f586bSSami Tolvanen 	const struct list_head		*a,
3444f0f586bSSami Tolvanen 	const struct list_head		*b)
3453cfce1e3SChristoph Hellwig {
3463cfce1e3SChristoph Hellwig 	struct xfs_mount		*mp = priv;
3473cfce1e3SChristoph Hellwig 	struct xfs_rmap_intent		*ra;
3483cfce1e3SChristoph Hellwig 	struct xfs_rmap_intent		*rb;
3493cfce1e3SChristoph Hellwig 
3503cfce1e3SChristoph Hellwig 	ra = container_of(a, struct xfs_rmap_intent, ri_list);
3513cfce1e3SChristoph Hellwig 	rb = container_of(b, struct xfs_rmap_intent, ri_list);
3523cfce1e3SChristoph Hellwig 	return  XFS_FSB_TO_AGNO(mp, ra->ri_bmap.br_startblock) -
3533cfce1e3SChristoph Hellwig 		XFS_FSB_TO_AGNO(mp, rb->ri_bmap.br_startblock);
3543cfce1e3SChristoph Hellwig }
3553cfce1e3SChristoph Hellwig 
3563cfce1e3SChristoph Hellwig /* Log rmap updates in the intent item. */
3573cfce1e3SChristoph Hellwig STATIC void
3583cfce1e3SChristoph Hellwig xfs_rmap_update_log_item(
3593cfce1e3SChristoph Hellwig 	struct xfs_trans		*tp,
360c1f09188SChristoph Hellwig 	struct xfs_rui_log_item		*ruip,
361c1f09188SChristoph Hellwig 	struct xfs_rmap_intent		*rmap)
3623cfce1e3SChristoph Hellwig {
3633cfce1e3SChristoph Hellwig 	uint				next_extent;
3643cfce1e3SChristoph Hellwig 	struct xfs_map_extent		*map;
3653cfce1e3SChristoph Hellwig 
3663cfce1e3SChristoph Hellwig 	tp->t_flags |= XFS_TRANS_DIRTY;
3673cfce1e3SChristoph Hellwig 	set_bit(XFS_LI_DIRTY, &ruip->rui_item.li_flags);
3683cfce1e3SChristoph Hellwig 
3693cfce1e3SChristoph Hellwig 	/*
3703cfce1e3SChristoph Hellwig 	 * atomic_inc_return gives us the value after the increment;
3713cfce1e3SChristoph Hellwig 	 * we want to use it as an array index so we need to subtract 1 from
3723cfce1e3SChristoph Hellwig 	 * it.
3733cfce1e3SChristoph Hellwig 	 */
3743cfce1e3SChristoph Hellwig 	next_extent = atomic_inc_return(&ruip->rui_next_extent) - 1;
3753cfce1e3SChristoph Hellwig 	ASSERT(next_extent < ruip->rui_format.rui_nextents);
3763cfce1e3SChristoph Hellwig 	map = &ruip->rui_format.rui_extents[next_extent];
3773cfce1e3SChristoph Hellwig 	map->me_owner = rmap->ri_owner;
3783cfce1e3SChristoph Hellwig 	map->me_startblock = rmap->ri_bmap.br_startblock;
3793cfce1e3SChristoph Hellwig 	map->me_startoff = rmap->ri_bmap.br_startoff;
3803cfce1e3SChristoph Hellwig 	map->me_len = rmap->ri_bmap.br_blockcount;
3813cfce1e3SChristoph Hellwig 	xfs_trans_set_rmap_flags(map, rmap->ri_type, rmap->ri_whichfork,
3823cfce1e3SChristoph Hellwig 			rmap->ri_bmap.br_state);
3833cfce1e3SChristoph Hellwig }
3843cfce1e3SChristoph Hellwig 
38513a83333SChristoph Hellwig static struct xfs_log_item *
386c1f09188SChristoph Hellwig xfs_rmap_update_create_intent(
387c1f09188SChristoph Hellwig 	struct xfs_trans		*tp,
388c1f09188SChristoph Hellwig 	struct list_head		*items,
389d367a868SChristoph Hellwig 	unsigned int			count,
390d367a868SChristoph Hellwig 	bool				sort)
391c1f09188SChristoph Hellwig {
392c1f09188SChristoph Hellwig 	struct xfs_mount		*mp = tp->t_mountp;
393c1f09188SChristoph Hellwig 	struct xfs_rui_log_item		*ruip = xfs_rui_init(mp, count);
394c1f09188SChristoph Hellwig 	struct xfs_rmap_intent		*rmap;
395c1f09188SChristoph Hellwig 
396c1f09188SChristoph Hellwig 	ASSERT(count > 0);
397c1f09188SChristoph Hellwig 
398c1f09188SChristoph Hellwig 	xfs_trans_add_item(tp, &ruip->rui_item);
399d367a868SChristoph Hellwig 	if (sort)
400d367a868SChristoph Hellwig 		list_sort(mp, items, xfs_rmap_update_diff_items);
401c1f09188SChristoph Hellwig 	list_for_each_entry(rmap, items, ri_list)
402c1f09188SChristoph Hellwig 		xfs_rmap_update_log_item(tp, ruip, rmap);
40313a83333SChristoph Hellwig 	return &ruip->rui_item;
404c1f09188SChristoph Hellwig }
405c1f09188SChristoph Hellwig 
4063cfce1e3SChristoph Hellwig /* Get an RUD so we can process all the deferred rmap updates. */
407f09d167cSChristoph Hellwig static struct xfs_log_item *
4083cfce1e3SChristoph Hellwig xfs_rmap_update_create_done(
4093cfce1e3SChristoph Hellwig 	struct xfs_trans		*tp,
41013a83333SChristoph Hellwig 	struct xfs_log_item		*intent,
4113cfce1e3SChristoph Hellwig 	unsigned int			count)
4123cfce1e3SChristoph Hellwig {
413f09d167cSChristoph Hellwig 	return &xfs_trans_get_rud(tp, RUI_ITEM(intent))->rud_item;
4143cfce1e3SChristoph Hellwig }
4153cfce1e3SChristoph Hellwig 
4163cfce1e3SChristoph Hellwig /* Process a deferred rmap update. */
4173cfce1e3SChristoph Hellwig STATIC int
4183cfce1e3SChristoph Hellwig xfs_rmap_update_finish_item(
4193cfce1e3SChristoph Hellwig 	struct xfs_trans		*tp,
420f09d167cSChristoph Hellwig 	struct xfs_log_item		*done,
4213cfce1e3SChristoph Hellwig 	struct list_head		*item,
4223ec1b26cSChristoph Hellwig 	struct xfs_btree_cur		**state)
4233cfce1e3SChristoph Hellwig {
4243cfce1e3SChristoph Hellwig 	struct xfs_rmap_intent		*rmap;
4253cfce1e3SChristoph Hellwig 	int				error;
4263cfce1e3SChristoph Hellwig 
4273cfce1e3SChristoph Hellwig 	rmap = container_of(item, struct xfs_rmap_intent, ri_list);
428f09d167cSChristoph Hellwig 	error = xfs_trans_log_finish_rmap_update(tp, RUD_ITEM(done),
4293ec1b26cSChristoph Hellwig 			rmap->ri_type, rmap->ri_owner, rmap->ri_whichfork,
4303ec1b26cSChristoph Hellwig 			rmap->ri_bmap.br_startoff, rmap->ri_bmap.br_startblock,
4313ec1b26cSChristoph Hellwig 			rmap->ri_bmap.br_blockcount, rmap->ri_bmap.br_state,
4323ec1b26cSChristoph Hellwig 			state);
433f3c799c2SDarrick J. Wong 	kmem_cache_free(xfs_rmap_intent_cache, rmap);
4343cfce1e3SChristoph Hellwig 	return error;
4353cfce1e3SChristoph Hellwig }
4363cfce1e3SChristoph Hellwig 
4373cfce1e3SChristoph Hellwig /* Abort all pending RUIs. */
4383cfce1e3SChristoph Hellwig STATIC void
4393cfce1e3SChristoph Hellwig xfs_rmap_update_abort_intent(
44013a83333SChristoph Hellwig 	struct xfs_log_item	*intent)
4413cfce1e3SChristoph Hellwig {
44213a83333SChristoph Hellwig 	xfs_rui_release(RUI_ITEM(intent));
4433cfce1e3SChristoph Hellwig }
4443cfce1e3SChristoph Hellwig 
4453cfce1e3SChristoph Hellwig /* Cancel a deferred rmap update. */
4463cfce1e3SChristoph Hellwig STATIC void
4473cfce1e3SChristoph Hellwig xfs_rmap_update_cancel_item(
4483cfce1e3SChristoph Hellwig 	struct list_head		*item)
4493cfce1e3SChristoph Hellwig {
4503cfce1e3SChristoph Hellwig 	struct xfs_rmap_intent		*rmap;
4513cfce1e3SChristoph Hellwig 
4523cfce1e3SChristoph Hellwig 	rmap = container_of(item, struct xfs_rmap_intent, ri_list);
453f3c799c2SDarrick J. Wong 	kmem_cache_free(xfs_rmap_intent_cache, rmap);
4543cfce1e3SChristoph Hellwig }
4553cfce1e3SChristoph Hellwig 
4563cfce1e3SChristoph Hellwig const struct xfs_defer_op_type xfs_rmap_update_defer_type = {
4573cfce1e3SChristoph Hellwig 	.max_items	= XFS_RUI_MAX_FAST_EXTENTS,
4583cfce1e3SChristoph Hellwig 	.create_intent	= xfs_rmap_update_create_intent,
4593cfce1e3SChristoph Hellwig 	.abort_intent	= xfs_rmap_update_abort_intent,
4603cfce1e3SChristoph Hellwig 	.create_done	= xfs_rmap_update_create_done,
4613cfce1e3SChristoph Hellwig 	.finish_item	= xfs_rmap_update_finish_item,
4623ec1b26cSChristoph Hellwig 	.finish_cleanup = xfs_rmap_finish_one_cleanup,
4633cfce1e3SChristoph Hellwig 	.cancel_item	= xfs_rmap_update_cancel_item,
4643cfce1e3SChristoph Hellwig };
4653cfce1e3SChristoph Hellwig 
466dda7ba65SDarrick J. Wong /* Is this recovered RUI ok? */
467dda7ba65SDarrick J. Wong static inline bool
468dda7ba65SDarrick J. Wong xfs_rui_validate_map(
469dda7ba65SDarrick J. Wong 	struct xfs_mount		*mp,
470dda7ba65SDarrick J. Wong 	struct xfs_map_extent		*rmap)
4719e88b5d8SDarrick J. Wong {
47238c26bfdSDave Chinner 	if (!xfs_has_rmapbt(mp))
473da5de110SDarrick J. Wong 		return false;
474da5de110SDarrick J. Wong 
475c447ad62SDarrick J. Wong 	if (rmap->me_flags & ~XFS_RMAP_EXTENT_FLAGS)
476c447ad62SDarrick J. Wong 		return false;
4779e88b5d8SDarrick J. Wong 
4789e88b5d8SDarrick J. Wong 	switch (rmap->me_flags & XFS_RMAP_EXTENT_TYPE_MASK) {
4799e88b5d8SDarrick J. Wong 	case XFS_RMAP_EXTENT_MAP:
4800e07c039SDarrick J. Wong 	case XFS_RMAP_EXTENT_MAP_SHARED:
4819e88b5d8SDarrick J. Wong 	case XFS_RMAP_EXTENT_UNMAP:
4820e07c039SDarrick J. Wong 	case XFS_RMAP_EXTENT_UNMAP_SHARED:
4839e88b5d8SDarrick J. Wong 	case XFS_RMAP_EXTENT_CONVERT:
4840e07c039SDarrick J. Wong 	case XFS_RMAP_EXTENT_CONVERT_SHARED:
4859e88b5d8SDarrick J. Wong 	case XFS_RMAP_EXTENT_ALLOC:
4869e88b5d8SDarrick J. Wong 	case XFS_RMAP_EXTENT_FREE:
4879e88b5d8SDarrick J. Wong 		break;
4889e88b5d8SDarrick J. Wong 	default:
489c447ad62SDarrick J. Wong 		return false;
4909e88b5d8SDarrick J. Wong 	}
491c447ad62SDarrick J. Wong 
492c447ad62SDarrick J. Wong 	if (!XFS_RMAP_NON_INODE_OWNER(rmap->me_owner) &&
493c447ad62SDarrick J. Wong 	    !xfs_verify_ino(mp, rmap->me_owner))
494c447ad62SDarrick J. Wong 		return false;
495c447ad62SDarrick J. Wong 
49633005fd0SDarrick J. Wong 	if (!xfs_verify_fileext(mp, rmap->me_startoff, rmap->me_len))
497c447ad62SDarrick J. Wong 		return false;
498c447ad62SDarrick J. Wong 
49967457eb0SDarrick J. Wong 	return xfs_verify_fsbext(mp, rmap->me_startblock, rmap->me_len);
500dda7ba65SDarrick J. Wong }
501dda7ba65SDarrick J. Wong 
502dda7ba65SDarrick J. Wong /*
503dda7ba65SDarrick J. Wong  * Process an rmap update intent item that was recovered from the log.
504dda7ba65SDarrick J. Wong  * We need to update the rmapbt.
505dda7ba65SDarrick J. Wong  */
506dda7ba65SDarrick J. Wong STATIC int
507dda7ba65SDarrick J. Wong xfs_rui_item_recover(
508dda7ba65SDarrick J. Wong 	struct xfs_log_item		*lip,
509dda7ba65SDarrick J. Wong 	struct list_head		*capture_list)
510dda7ba65SDarrick J. Wong {
511dda7ba65SDarrick J. Wong 	struct xfs_rui_log_item		*ruip = RUI_ITEM(lip);
512dda7ba65SDarrick J. Wong 	struct xfs_map_extent		*rmap;
513dda7ba65SDarrick J. Wong 	struct xfs_rud_log_item		*rudp;
514dda7ba65SDarrick J. Wong 	struct xfs_trans		*tp;
515dda7ba65SDarrick J. Wong 	struct xfs_btree_cur		*rcur = NULL;
516d86142ddSDave Chinner 	struct xfs_mount		*mp = lip->li_log->l_mp;
517dda7ba65SDarrick J. Wong 	enum xfs_rmap_intent_type	type;
518dda7ba65SDarrick J. Wong 	xfs_exntst_t			state;
519dda7ba65SDarrick J. Wong 	int				i;
520dda7ba65SDarrick J. Wong 	int				whichfork;
521dda7ba65SDarrick J. Wong 	int				error = 0;
522dda7ba65SDarrick J. Wong 
523dda7ba65SDarrick J. Wong 	/*
524dda7ba65SDarrick J. Wong 	 * First check the validity of the extents described by the
525dda7ba65SDarrick J. Wong 	 * RUI.  If any are bad, then assume that all are bad and
526dda7ba65SDarrick J. Wong 	 * just toss the RUI.
527dda7ba65SDarrick J. Wong 	 */
528dda7ba65SDarrick J. Wong 	for (i = 0; i < ruip->rui_format.rui_nextents; i++) {
529dda7ba65SDarrick J. Wong 		if (!xfs_rui_validate_map(mp,
530dda7ba65SDarrick J. Wong 					&ruip->rui_format.rui_extents[i])) {
531dda7ba65SDarrick J. Wong 			XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp,
532dda7ba65SDarrick J. Wong 					&ruip->rui_format,
533dda7ba65SDarrick J. Wong 					sizeof(ruip->rui_format));
534895e196fSDarrick J. Wong 			return -EFSCORRUPTED;
5359e88b5d8SDarrick J. Wong 		}
536dda7ba65SDarrick J. Wong 	}
5379e88b5d8SDarrick J. Wong 
538b31c2bdcSDarrick J. Wong 	error = xfs_trans_alloc(mp, &M_RES(mp)->tr_itruncate,
539b31c2bdcSDarrick J. Wong 			mp->m_rmap_maxlevels, 0, XFS_TRANS_RESERVE, &tp);
5409c194644SDarrick J. Wong 	if (error)
5419c194644SDarrick J. Wong 		return error;
542722e2517SDarrick J. Wong 	rudp = xfs_trans_get_rud(tp, ruip);
5439c194644SDarrick J. Wong 
5449c194644SDarrick J. Wong 	for (i = 0; i < ruip->rui_format.rui_nextents; i++) {
545e127fafdSDarrick J. Wong 		rmap = &ruip->rui_format.rui_extents[i];
5469c194644SDarrick J. Wong 		state = (rmap->me_flags & XFS_RMAP_EXTENT_UNWRITTEN) ?
5479c194644SDarrick J. Wong 				XFS_EXT_UNWRITTEN : XFS_EXT_NORM;
5489c194644SDarrick J. Wong 		whichfork = (rmap->me_flags & XFS_RMAP_EXTENT_ATTR_FORK) ?
5499c194644SDarrick J. Wong 				XFS_ATTR_FORK : XFS_DATA_FORK;
5509c194644SDarrick J. Wong 		switch (rmap->me_flags & XFS_RMAP_EXTENT_TYPE_MASK) {
5519c194644SDarrick J. Wong 		case XFS_RMAP_EXTENT_MAP:
5529c194644SDarrick J. Wong 			type = XFS_RMAP_MAP;
5539c194644SDarrick J. Wong 			break;
554ceeb9c83SDarrick J. Wong 		case XFS_RMAP_EXTENT_MAP_SHARED:
555ceeb9c83SDarrick J. Wong 			type = XFS_RMAP_MAP_SHARED;
556ceeb9c83SDarrick J. Wong 			break;
5579c194644SDarrick J. Wong 		case XFS_RMAP_EXTENT_UNMAP:
5589c194644SDarrick J. Wong 			type = XFS_RMAP_UNMAP;
5599c194644SDarrick J. Wong 			break;
560ceeb9c83SDarrick J. Wong 		case XFS_RMAP_EXTENT_UNMAP_SHARED:
561ceeb9c83SDarrick J. Wong 			type = XFS_RMAP_UNMAP_SHARED;
562ceeb9c83SDarrick J. Wong 			break;
5639c194644SDarrick J. Wong 		case XFS_RMAP_EXTENT_CONVERT:
5649c194644SDarrick J. Wong 			type = XFS_RMAP_CONVERT;
5659c194644SDarrick J. Wong 			break;
5663f165b33SDarrick J. Wong 		case XFS_RMAP_EXTENT_CONVERT_SHARED:
5673f165b33SDarrick J. Wong 			type = XFS_RMAP_CONVERT_SHARED;
5683f165b33SDarrick J. Wong 			break;
5699c194644SDarrick J. Wong 		case XFS_RMAP_EXTENT_ALLOC:
5709c194644SDarrick J. Wong 			type = XFS_RMAP_ALLOC;
5719c194644SDarrick J. Wong 			break;
5729c194644SDarrick J. Wong 		case XFS_RMAP_EXTENT_FREE:
5739c194644SDarrick J. Wong 			type = XFS_RMAP_FREE;
5749c194644SDarrick J. Wong 			break;
5759c194644SDarrick J. Wong 		default:
576a5155b87SDarrick J. Wong 			XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, NULL);
5779c194644SDarrick J. Wong 			error = -EFSCORRUPTED;
5789c194644SDarrick J. Wong 			goto abort_error;
5799c194644SDarrick J. Wong 		}
5809c194644SDarrick J. Wong 		error = xfs_trans_log_finish_rmap_update(tp, rudp, type,
5819c194644SDarrick J. Wong 				rmap->me_owner, whichfork,
5829c194644SDarrick J. Wong 				rmap->me_startoff, rmap->me_startblock,
5839c194644SDarrick J. Wong 				rmap->me_len, state, &rcur);
58443059d54SDarrick J. Wong 		if (error == -EFSCORRUPTED)
58543059d54SDarrick J. Wong 			XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp,
58643059d54SDarrick J. Wong 					rmap, sizeof(*rmap));
5879c194644SDarrick J. Wong 		if (error)
5889c194644SDarrick J. Wong 			goto abort_error;
5899c194644SDarrick J. Wong 
5909c194644SDarrick J. Wong 	}
5919c194644SDarrick J. Wong 
5929c194644SDarrick J. Wong 	xfs_rmap_finish_one_cleanup(tp, rcur, error);
593512edfacSDarrick J. Wong 	return xfs_defer_ops_capture_and_commit(tp, capture_list);
5949c194644SDarrick J. Wong 
5959c194644SDarrick J. Wong abort_error:
5969c194644SDarrick J. Wong 	xfs_rmap_finish_one_cleanup(tp, rcur, error);
5979c194644SDarrick J. Wong 	xfs_trans_cancel(tp);
5989e88b5d8SDarrick J. Wong 	return error;
5999e88b5d8SDarrick J. Wong }
60086ffa471SDarrick J. Wong 
601154c733aSDarrick J. Wong STATIC bool
602154c733aSDarrick J. Wong xfs_rui_item_match(
603154c733aSDarrick J. Wong 	struct xfs_log_item	*lip,
604154c733aSDarrick J. Wong 	uint64_t		intent_id)
605154c733aSDarrick J. Wong {
606154c733aSDarrick J. Wong 	return RUI_ITEM(lip)->rui_format.rui_id == intent_id;
607154c733aSDarrick J. Wong }
608154c733aSDarrick J. Wong 
6094e919af7SDarrick J. Wong /* Relog an intent item to push the log tail forward. */
6104e919af7SDarrick J. Wong static struct xfs_log_item *
6114e919af7SDarrick J. Wong xfs_rui_item_relog(
6124e919af7SDarrick J. Wong 	struct xfs_log_item		*intent,
6134e919af7SDarrick J. Wong 	struct xfs_trans		*tp)
6144e919af7SDarrick J. Wong {
6154e919af7SDarrick J. Wong 	struct xfs_rud_log_item		*rudp;
6164e919af7SDarrick J. Wong 	struct xfs_rui_log_item		*ruip;
6174e919af7SDarrick J. Wong 	struct xfs_map_extent		*extp;
6184e919af7SDarrick J. Wong 	unsigned int			count;
6194e919af7SDarrick J. Wong 
6204e919af7SDarrick J. Wong 	count = RUI_ITEM(intent)->rui_format.rui_nextents;
6214e919af7SDarrick J. Wong 	extp = RUI_ITEM(intent)->rui_format.rui_extents;
6224e919af7SDarrick J. Wong 
6234e919af7SDarrick J. Wong 	tp->t_flags |= XFS_TRANS_DIRTY;
6244e919af7SDarrick J. Wong 	rudp = xfs_trans_get_rud(tp, RUI_ITEM(intent));
6254e919af7SDarrick J. Wong 	set_bit(XFS_LI_DIRTY, &rudp->rud_item.li_flags);
6264e919af7SDarrick J. Wong 
6274e919af7SDarrick J. Wong 	ruip = xfs_rui_init(tp->t_mountp, count);
6284e919af7SDarrick J. Wong 	memcpy(ruip->rui_format.rui_extents, extp, count * sizeof(*extp));
6294e919af7SDarrick J. Wong 	atomic_set(&ruip->rui_next_extent, count);
6304e919af7SDarrick J. Wong 	xfs_trans_add_item(tp, &ruip->rui_item);
6314e919af7SDarrick J. Wong 	set_bit(XFS_LI_DIRTY, &ruip->rui_item.li_flags);
6324e919af7SDarrick J. Wong 	return &ruip->rui_item;
6334e919af7SDarrick J. Wong }
6344e919af7SDarrick J. Wong 
635cba0ccacSDarrick J. Wong static const struct xfs_item_ops xfs_rui_item_ops = {
636*f5b81200SDave Chinner 	.flags		= XFS_ITEM_INTENT,
637cba0ccacSDarrick J. Wong 	.iop_size	= xfs_rui_item_size,
638cba0ccacSDarrick J. Wong 	.iop_format	= xfs_rui_item_format,
639cba0ccacSDarrick J. Wong 	.iop_unpin	= xfs_rui_item_unpin,
640cba0ccacSDarrick J. Wong 	.iop_release	= xfs_rui_item_release,
641cba0ccacSDarrick J. Wong 	.iop_recover	= xfs_rui_item_recover,
642154c733aSDarrick J. Wong 	.iop_match	= xfs_rui_item_match,
6434e919af7SDarrick J. Wong 	.iop_relog	= xfs_rui_item_relog,
644cba0ccacSDarrick J. Wong };
645cba0ccacSDarrick J. Wong 
64607590a9dSDarrick J. Wong /*
64707590a9dSDarrick J. Wong  * This routine is called to create an in-core extent rmap update
64807590a9dSDarrick J. Wong  * item from the rui format structure which was logged on disk.
64907590a9dSDarrick J. Wong  * It allocates an in-core rui, copies the extents from the format
65007590a9dSDarrick J. Wong  * structure into it, and adds the rui to the AIL with the given
65107590a9dSDarrick J. Wong  * LSN.
65207590a9dSDarrick J. Wong  */
65307590a9dSDarrick J. Wong STATIC int
65407590a9dSDarrick J. Wong xlog_recover_rui_commit_pass2(
65507590a9dSDarrick J. Wong 	struct xlog			*log,
65607590a9dSDarrick J. Wong 	struct list_head		*buffer_list,
65707590a9dSDarrick J. Wong 	struct xlog_recover_item	*item,
65807590a9dSDarrick J. Wong 	xfs_lsn_t			lsn)
65907590a9dSDarrick J. Wong {
66007590a9dSDarrick J. Wong 	int				error;
66107590a9dSDarrick J. Wong 	struct xfs_mount		*mp = log->l_mp;
66207590a9dSDarrick J. Wong 	struct xfs_rui_log_item		*ruip;
66307590a9dSDarrick J. Wong 	struct xfs_rui_log_format	*rui_formatp;
66407590a9dSDarrick J. Wong 
66507590a9dSDarrick J. Wong 	rui_formatp = item->ri_buf[0].i_addr;
66607590a9dSDarrick J. Wong 
66707590a9dSDarrick J. Wong 	ruip = xfs_rui_init(mp, rui_formatp->rui_nextents);
66807590a9dSDarrick J. Wong 	error = xfs_rui_copy_format(&item->ri_buf[0], &ruip->rui_format);
66907590a9dSDarrick J. Wong 	if (error) {
67007590a9dSDarrick J. Wong 		xfs_rui_item_free(ruip);
67107590a9dSDarrick J. Wong 		return error;
67207590a9dSDarrick J. Wong 	}
67307590a9dSDarrick J. Wong 	atomic_set(&ruip->rui_next_extent, rui_formatp->rui_nextents);
67407590a9dSDarrick J. Wong 	/*
67586a37174SDarrick J. Wong 	 * Insert the intent into the AIL directly and drop one reference so
67686a37174SDarrick J. Wong 	 * that finishing or canceling the work will drop the other.
67707590a9dSDarrick J. Wong 	 */
67886a37174SDarrick J. Wong 	xfs_trans_ail_insert(log->l_ailp, &ruip->rui_item, lsn);
67907590a9dSDarrick J. Wong 	xfs_rui_release(ruip);
68007590a9dSDarrick J. Wong 	return 0;
68107590a9dSDarrick J. Wong }
68207590a9dSDarrick J. Wong 
68386ffa471SDarrick J. Wong const struct xlog_recover_item_ops xlog_rui_item_ops = {
68486ffa471SDarrick J. Wong 	.item_type		= XFS_LI_RUI,
68507590a9dSDarrick J. Wong 	.commit_pass2		= xlog_recover_rui_commit_pass2,
68686ffa471SDarrick J. Wong };
68786ffa471SDarrick J. Wong 
68807590a9dSDarrick J. Wong /*
68907590a9dSDarrick J. Wong  * This routine is called when an RUD format structure is found in a committed
69007590a9dSDarrick J. Wong  * transaction in the log. Its purpose is to cancel the corresponding RUI if it
69107590a9dSDarrick J. Wong  * was still in the log. To do this it searches the AIL for the RUI with an id
69207590a9dSDarrick J. Wong  * equal to that in the RUD format structure. If we find it we drop the RUD
69307590a9dSDarrick J. Wong  * reference, which removes the RUI from the AIL and frees it.
69407590a9dSDarrick J. Wong  */
69507590a9dSDarrick J. Wong STATIC int
69607590a9dSDarrick J. Wong xlog_recover_rud_commit_pass2(
69707590a9dSDarrick J. Wong 	struct xlog			*log,
69807590a9dSDarrick J. Wong 	struct list_head		*buffer_list,
69907590a9dSDarrick J. Wong 	struct xlog_recover_item	*item,
70007590a9dSDarrick J. Wong 	xfs_lsn_t			lsn)
70107590a9dSDarrick J. Wong {
70207590a9dSDarrick J. Wong 	struct xfs_rud_log_format	*rud_formatp;
70307590a9dSDarrick J. Wong 
70407590a9dSDarrick J. Wong 	rud_formatp = item->ri_buf[0].i_addr;
70507590a9dSDarrick J. Wong 	ASSERT(item->ri_buf[0].i_len == sizeof(struct xfs_rud_log_format));
70607590a9dSDarrick J. Wong 
707154c733aSDarrick J. Wong 	xlog_recover_release_intent(log, XFS_LI_RUI, rud_formatp->rud_rui_id);
70807590a9dSDarrick J. Wong 	return 0;
70907590a9dSDarrick J. Wong }
71007590a9dSDarrick J. Wong 
71186ffa471SDarrick J. Wong const struct xlog_recover_item_ops xlog_rud_item_ops = {
71286ffa471SDarrick J. Wong 	.item_type		= XFS_LI_RUD,
71307590a9dSDarrick J. Wong 	.commit_pass2		= xlog_recover_rud_commit_pass2,
71486ffa471SDarrick J. Wong };
715