xref: /openbmc/linux/fs/xfs/xfs_rmap_item.c (revision 680776e5)
10b61f8a4SDave Chinner // SPDX-License-Identifier: GPL-2.0+
25880f2d7SDarrick J. Wong /*
35880f2d7SDarrick J. Wong  * Copyright (C) 2016 Oracle.  All Rights Reserved.
45880f2d7SDarrick J. Wong  * Author: Darrick J. Wong <darrick.wong@oracle.com>
55880f2d7SDarrick J. Wong  */
65880f2d7SDarrick J. Wong #include "xfs.h"
75880f2d7SDarrick J. Wong #include "xfs_fs.h"
85880f2d7SDarrick J. Wong #include "xfs_format.h"
95880f2d7SDarrick J. Wong #include "xfs_log_format.h"
105880f2d7SDarrick J. Wong #include "xfs_trans_resv.h"
119e88b5d8SDarrick J. Wong #include "xfs_bit.h"
12b31c2bdcSDarrick J. Wong #include "xfs_shared.h"
135880f2d7SDarrick J. Wong #include "xfs_mount.h"
149c194644SDarrick J. Wong #include "xfs_defer.h"
155880f2d7SDarrick J. Wong #include "xfs_trans.h"
165880f2d7SDarrick J. Wong #include "xfs_trans_priv.h"
175880f2d7SDarrick J. Wong #include "xfs_rmap_item.h"
185880f2d7SDarrick J. Wong #include "xfs_log.h"
199c194644SDarrick J. Wong #include "xfs_rmap.h"
20a5155b87SDarrick J. Wong #include "xfs_error.h"
2107590a9dSDarrick J. Wong #include "xfs_log_priv.h"
2286ffa471SDarrick J. Wong #include "xfs_log_recover.h"
23c13418e8SDarrick J. Wong #include "xfs_ag.h"
245880f2d7SDarrick J. Wong 
25182696fbSDarrick J. Wong struct kmem_cache	*xfs_rui_cache;
26182696fbSDarrick J. Wong struct kmem_cache	*xfs_rud_cache;
275880f2d7SDarrick J. Wong 
28cba0ccacSDarrick J. Wong static const struct xfs_item_ops xfs_rui_item_ops;
29cba0ccacSDarrick J. Wong 
RUI_ITEM(struct xfs_log_item * lip)305880f2d7SDarrick J. Wong static inline struct xfs_rui_log_item *RUI_ITEM(struct xfs_log_item *lip)
315880f2d7SDarrick J. Wong {
325880f2d7SDarrick J. Wong 	return container_of(lip, struct xfs_rui_log_item, rui_item);
335880f2d7SDarrick J. Wong }
345880f2d7SDarrick J. Wong 
3507590a9dSDarrick J. Wong STATIC void
xfs_rui_item_free(struct xfs_rui_log_item * ruip)365880f2d7SDarrick J. Wong xfs_rui_item_free(
375880f2d7SDarrick J. Wong 	struct xfs_rui_log_item	*ruip)
385880f2d7SDarrick J. Wong {
39c230a4a8SDave Chinner 	kmem_free(ruip->rui_item.li_lv_shadow);
405880f2d7SDarrick J. Wong 	if (ruip->rui_format.rui_nextents > XFS_RUI_MAX_FAST_EXTENTS)
415880f2d7SDarrick J. Wong 		kmem_free(ruip);
425880f2d7SDarrick J. Wong 	else
43182696fbSDarrick J. Wong 		kmem_cache_free(xfs_rui_cache, ruip);
445880f2d7SDarrick J. Wong }
455880f2d7SDarrick J. Wong 
460612d116SDave Chinner /*
470612d116SDave Chinner  * Freeing the RUI requires that we remove it from the AIL if it has already
480612d116SDave Chinner  * been placed there. However, the RUI may not yet have been placed in the AIL
490612d116SDave Chinner  * when called by xfs_rui_release() from RUD processing due to the ordering of
500612d116SDave Chinner  * committed vs unpin operations in bulk insert operations. Hence the reference
510612d116SDave Chinner  * count to ensure only the last caller frees the RUI.
520612d116SDave Chinner  */
53cba0ccacSDarrick J. Wong STATIC void
xfs_rui_release(struct xfs_rui_log_item * ruip)540612d116SDave Chinner xfs_rui_release(
550612d116SDave Chinner 	struct xfs_rui_log_item	*ruip)
560612d116SDave Chinner {
570612d116SDave Chinner 	ASSERT(atomic_read(&ruip->rui_refcount) > 0);
583512fc1eSDave Chinner 	if (!atomic_dec_and_test(&ruip->rui_refcount))
593512fc1eSDave Chinner 		return;
603512fc1eSDave Chinner 
613512fc1eSDave Chinner 	xfs_trans_ail_delete(&ruip->rui_item, 0);
620612d116SDave Chinner 	xfs_rui_item_free(ruip);
630612d116SDave Chinner }
640612d116SDave Chinner 
655880f2d7SDarrick J. Wong STATIC void
xfs_rui_item_size(struct xfs_log_item * lip,int * nvecs,int * nbytes)665880f2d7SDarrick J. Wong xfs_rui_item_size(
675880f2d7SDarrick J. Wong 	struct xfs_log_item	*lip,
685880f2d7SDarrick J. Wong 	int			*nvecs,
695880f2d7SDarrick J. Wong 	int			*nbytes)
705880f2d7SDarrick J. Wong {
71cd00158cSDarrick J. Wong 	struct xfs_rui_log_item	*ruip = RUI_ITEM(lip);
72cd00158cSDarrick J. Wong 
735880f2d7SDarrick J. Wong 	*nvecs += 1;
74cd00158cSDarrick J. Wong 	*nbytes += xfs_rui_log_format_sizeof(ruip->rui_format.rui_nextents);
755880f2d7SDarrick J. Wong }
765880f2d7SDarrick J. Wong 
775880f2d7SDarrick J. Wong /*
785880f2d7SDarrick J. Wong  * This is called to fill in the vector of log iovecs for the
795880f2d7SDarrick J. Wong  * given rui log item. We use only 1 iovec, and we point that
805880f2d7SDarrick J. Wong  * at the rui_log_format structure embedded in the rui item.
815880f2d7SDarrick J. Wong  * It is at this point that we assert that all of the extent
825880f2d7SDarrick J. Wong  * slots in the rui item have been filled.
835880f2d7SDarrick J. Wong  */
845880f2d7SDarrick J. Wong STATIC void
xfs_rui_item_format(struct xfs_log_item * lip,struct xfs_log_vec * lv)855880f2d7SDarrick J. Wong xfs_rui_item_format(
865880f2d7SDarrick J. Wong 	struct xfs_log_item	*lip,
875880f2d7SDarrick J. Wong 	struct xfs_log_vec	*lv)
885880f2d7SDarrick J. Wong {
895880f2d7SDarrick J. Wong 	struct xfs_rui_log_item	*ruip = RUI_ITEM(lip);
905880f2d7SDarrick J. Wong 	struct xfs_log_iovec	*vecp = NULL;
915880f2d7SDarrick J. Wong 
925880f2d7SDarrick J. Wong 	ASSERT(atomic_read(&ruip->rui_next_extent) ==
935880f2d7SDarrick J. Wong 			ruip->rui_format.rui_nextents);
945880f2d7SDarrick J. Wong 
955880f2d7SDarrick J. Wong 	ruip->rui_format.rui_type = XFS_LI_RUI;
965880f2d7SDarrick J. Wong 	ruip->rui_format.rui_size = 1;
975880f2d7SDarrick J. Wong 
985880f2d7SDarrick J. Wong 	xlog_copy_iovec(lv, &vecp, XLOG_REG_TYPE_RUI_FORMAT, &ruip->rui_format,
99cd00158cSDarrick J. Wong 			xfs_rui_log_format_sizeof(ruip->rui_format.rui_nextents));
1005880f2d7SDarrick J. Wong }
1015880f2d7SDarrick J. Wong 
1025880f2d7SDarrick J. Wong /*
1035880f2d7SDarrick J. Wong  * The unpin operation is the last place an RUI is manipulated in the log. It is
1045880f2d7SDarrick J. Wong  * either inserted in the AIL or aborted in the event of a log I/O error. In
1055880f2d7SDarrick J. Wong  * either case, the RUI transaction has been successfully committed to make it
1065880f2d7SDarrick J. Wong  * this far. Therefore, we expect whoever committed the RUI to either construct
1075880f2d7SDarrick J. Wong  * and commit the RUD or drop the RUD's reference in the event of error. Simply
1085880f2d7SDarrick J. Wong  * drop the log's RUI reference now that the log is done with it.
1095880f2d7SDarrick J. Wong  */
1105880f2d7SDarrick J. Wong STATIC void
xfs_rui_item_unpin(struct xfs_log_item * lip,int remove)1115880f2d7SDarrick J. Wong xfs_rui_item_unpin(
1125880f2d7SDarrick J. Wong 	struct xfs_log_item	*lip,
1135880f2d7SDarrick J. Wong 	int			remove)
1145880f2d7SDarrick J. Wong {
1155880f2d7SDarrick J. Wong 	struct xfs_rui_log_item	*ruip = RUI_ITEM(lip);
1165880f2d7SDarrick J. Wong 
1175880f2d7SDarrick J. Wong 	xfs_rui_release(ruip);
1185880f2d7SDarrick J. Wong }
1195880f2d7SDarrick J. Wong 
1205880f2d7SDarrick J. Wong /*
1215880f2d7SDarrick J. Wong  * The RUI has been either committed or aborted if the transaction has been
1225880f2d7SDarrick J. Wong  * cancelled. If the transaction was cancelled, an RUD isn't going to be
1235880f2d7SDarrick J. Wong  * constructed and thus we free the RUI here directly.
1245880f2d7SDarrick J. Wong  */
1255880f2d7SDarrick J. Wong STATIC void
xfs_rui_item_release(struct xfs_log_item * lip)126ddf92053SChristoph Hellwig xfs_rui_item_release(
1275880f2d7SDarrick J. Wong 	struct xfs_log_item	*lip)
1285880f2d7SDarrick J. Wong {
1290612d116SDave Chinner 	xfs_rui_release(RUI_ITEM(lip));
1305880f2d7SDarrick J. Wong }
1315880f2d7SDarrick J. Wong 
1325880f2d7SDarrick J. Wong /*
1335880f2d7SDarrick J. Wong  * Allocate and initialize an rui item with the given number of extents.
1345880f2d7SDarrick J. Wong  */
13507590a9dSDarrick J. Wong STATIC struct xfs_rui_log_item *
xfs_rui_init(struct xfs_mount * mp,uint nextents)1365880f2d7SDarrick J. Wong xfs_rui_init(
1375880f2d7SDarrick J. Wong 	struct xfs_mount		*mp,
1385880f2d7SDarrick J. Wong 	uint				nextents)
1395880f2d7SDarrick J. Wong 
1405880f2d7SDarrick J. Wong {
1415880f2d7SDarrick J. Wong 	struct xfs_rui_log_item		*ruip;
1425880f2d7SDarrick J. Wong 
1435880f2d7SDarrick J. Wong 	ASSERT(nextents > 0);
144cd00158cSDarrick J. Wong 	if (nextents > XFS_RUI_MAX_FAST_EXTENTS)
145707e0ddaSTetsuo Handa 		ruip = kmem_zalloc(xfs_rui_log_item_sizeof(nextents), 0);
146cd00158cSDarrick J. Wong 	else
147182696fbSDarrick J. Wong 		ruip = kmem_cache_zalloc(xfs_rui_cache,
14832a2b11fSCarlos Maiolino 					 GFP_KERNEL | __GFP_NOFAIL);
1495880f2d7SDarrick J. Wong 
1505880f2d7SDarrick J. Wong 	xfs_log_item_init(mp, &ruip->rui_item, XFS_LI_RUI, &xfs_rui_item_ops);
1515880f2d7SDarrick J. Wong 	ruip->rui_format.rui_nextents = nextents;
1525880f2d7SDarrick J. Wong 	ruip->rui_format.rui_id = (uintptr_t)(void *)ruip;
1535880f2d7SDarrick J. Wong 	atomic_set(&ruip->rui_next_extent, 0);
1545880f2d7SDarrick J. Wong 	atomic_set(&ruip->rui_refcount, 2);
1555880f2d7SDarrick J. Wong 
1565880f2d7SDarrick J. Wong 	return ruip;
1575880f2d7SDarrick J. Wong }
1585880f2d7SDarrick J. Wong 
RUD_ITEM(struct xfs_log_item * lip)1595880f2d7SDarrick J. Wong static inline struct xfs_rud_log_item *RUD_ITEM(struct xfs_log_item *lip)
1605880f2d7SDarrick J. Wong {
1615880f2d7SDarrick J. Wong 	return container_of(lip, struct xfs_rud_log_item, rud_item);
1625880f2d7SDarrick J. Wong }
1635880f2d7SDarrick J. Wong 
1645880f2d7SDarrick J. Wong STATIC void
xfs_rud_item_size(struct xfs_log_item * lip,int * nvecs,int * nbytes)1655880f2d7SDarrick J. Wong xfs_rud_item_size(
1665880f2d7SDarrick J. Wong 	struct xfs_log_item	*lip,
1675880f2d7SDarrick J. Wong 	int			*nvecs,
1685880f2d7SDarrick J. Wong 	int			*nbytes)
1695880f2d7SDarrick J. Wong {
1705880f2d7SDarrick J. Wong 	*nvecs += 1;
171722e2517SDarrick J. Wong 	*nbytes += sizeof(struct xfs_rud_log_format);
1725880f2d7SDarrick J. Wong }
1735880f2d7SDarrick J. Wong 
1745880f2d7SDarrick J. Wong /*
1755880f2d7SDarrick J. Wong  * This is called to fill in the vector of log iovecs for the
1765880f2d7SDarrick J. Wong  * given rud log item. We use only 1 iovec, and we point that
1775880f2d7SDarrick J. Wong  * at the rud_log_format structure embedded in the rud item.
1785880f2d7SDarrick J. Wong  * It is at this point that we assert that all of the extent
1795880f2d7SDarrick J. Wong  * slots in the rud item have been filled.
1805880f2d7SDarrick J. Wong  */
1815880f2d7SDarrick J. Wong STATIC void
xfs_rud_item_format(struct xfs_log_item * lip,struct xfs_log_vec * lv)1825880f2d7SDarrick J. Wong xfs_rud_item_format(
1835880f2d7SDarrick J. Wong 	struct xfs_log_item	*lip,
1845880f2d7SDarrick J. Wong 	struct xfs_log_vec	*lv)
1855880f2d7SDarrick J. Wong {
1865880f2d7SDarrick J. Wong 	struct xfs_rud_log_item	*rudp = RUD_ITEM(lip);
1875880f2d7SDarrick J. Wong 	struct xfs_log_iovec	*vecp = NULL;
1885880f2d7SDarrick J. Wong 
1895880f2d7SDarrick J. Wong 	rudp->rud_format.rud_type = XFS_LI_RUD;
1905880f2d7SDarrick J. Wong 	rudp->rud_format.rud_size = 1;
1915880f2d7SDarrick J. Wong 
1925880f2d7SDarrick J. Wong 	xlog_copy_iovec(lv, &vecp, XLOG_REG_TYPE_RUD_FORMAT, &rudp->rud_format,
193722e2517SDarrick J. Wong 			sizeof(struct xfs_rud_log_format));
1945880f2d7SDarrick J. Wong }
1955880f2d7SDarrick J. Wong 
1965880f2d7SDarrick J. Wong /*
1975880f2d7SDarrick J. Wong  * The RUD is either committed or aborted if the transaction is cancelled. If
1985880f2d7SDarrick J. Wong  * the transaction is cancelled, drop our reference to the RUI and free the
1995880f2d7SDarrick J. Wong  * RUD.
2005880f2d7SDarrick J. Wong  */
2015880f2d7SDarrick J. Wong STATIC void
xfs_rud_item_release(struct xfs_log_item * lip)202ddf92053SChristoph Hellwig xfs_rud_item_release(
2035880f2d7SDarrick J. Wong 	struct xfs_log_item	*lip)
2045880f2d7SDarrick J. Wong {
2055880f2d7SDarrick J. Wong 	struct xfs_rud_log_item	*rudp = RUD_ITEM(lip);
2065880f2d7SDarrick J. Wong 
2075880f2d7SDarrick J. Wong 	xfs_rui_release(rudp->rud_ruip);
208c230a4a8SDave Chinner 	kmem_free(rudp->rud_item.li_lv_shadow);
209182696fbSDarrick J. Wong 	kmem_cache_free(xfs_rud_cache, rudp);
2105880f2d7SDarrick J. Wong }
2115880f2d7SDarrick J. Wong 
212c23ab603SDave Chinner static struct xfs_log_item *
xfs_rud_item_intent(struct xfs_log_item * lip)213c23ab603SDave Chinner xfs_rud_item_intent(
214c23ab603SDave Chinner 	struct xfs_log_item	*lip)
215c23ab603SDave Chinner {
216c23ab603SDave Chinner 	return &RUD_ITEM(lip)->rud_ruip->rui_item;
217c23ab603SDave Chinner }
218c23ab603SDave Chinner 
2195880f2d7SDarrick J. Wong static const struct xfs_item_ops xfs_rud_item_ops = {
220f5b81200SDave Chinner 	.flags		= XFS_ITEM_RELEASE_WHEN_COMMITTED |
221f5b81200SDave Chinner 			  XFS_ITEM_INTENT_DONE,
2225880f2d7SDarrick J. Wong 	.iop_size	= xfs_rud_item_size,
2235880f2d7SDarrick J. Wong 	.iop_format	= xfs_rud_item_format,
224ddf92053SChristoph Hellwig 	.iop_release	= xfs_rud_item_release,
225c23ab603SDave Chinner 	.iop_intent	= xfs_rud_item_intent,
2265880f2d7SDarrick J. Wong };
2275880f2d7SDarrick J. Wong 
2283cfce1e3SChristoph Hellwig static struct xfs_rud_log_item *
xfs_trans_get_rud(struct xfs_trans * tp,struct xfs_rui_log_item * ruip)22960883447SChristoph Hellwig xfs_trans_get_rud(
23060883447SChristoph Hellwig 	struct xfs_trans		*tp,
231722e2517SDarrick J. Wong 	struct xfs_rui_log_item		*ruip)
2325880f2d7SDarrick J. Wong {
2335880f2d7SDarrick J. Wong 	struct xfs_rud_log_item		*rudp;
2345880f2d7SDarrick J. Wong 
235182696fbSDarrick J. Wong 	rudp = kmem_cache_zalloc(xfs_rud_cache, GFP_KERNEL | __GFP_NOFAIL);
23660883447SChristoph Hellwig 	xfs_log_item_init(tp->t_mountp, &rudp->rud_item, XFS_LI_RUD,
23760883447SChristoph Hellwig 			  &xfs_rud_item_ops);
2385880f2d7SDarrick J. Wong 	rudp->rud_ruip = ruip;
2395880f2d7SDarrick J. Wong 	rudp->rud_format.rud_rui_id = ruip->rui_format.rui_id;
2405880f2d7SDarrick J. Wong 
24160883447SChristoph Hellwig 	xfs_trans_add_item(tp, &rudp->rud_item);
2425880f2d7SDarrick J. Wong 	return rudp;
2435880f2d7SDarrick J. Wong }
2449e88b5d8SDarrick J. Wong 
2453cfce1e3SChristoph Hellwig /* Set the map extent flags for this reverse mapping. */
2463cfce1e3SChristoph Hellwig static void
xfs_trans_set_rmap_flags(struct xfs_map_extent * map,enum xfs_rmap_intent_type type,int whichfork,xfs_exntst_t state)2473cfce1e3SChristoph Hellwig xfs_trans_set_rmap_flags(
248ffaa196fSDarrick J. Wong 	struct xfs_map_extent		*map,
2493cfce1e3SChristoph Hellwig 	enum xfs_rmap_intent_type	type,
2503cfce1e3SChristoph Hellwig 	int				whichfork,
2513cfce1e3SChristoph Hellwig 	xfs_exntst_t			state)
2523cfce1e3SChristoph Hellwig {
253ffaa196fSDarrick J. Wong 	map->me_flags = 0;
2543cfce1e3SChristoph Hellwig 	if (state == XFS_EXT_UNWRITTEN)
255ffaa196fSDarrick J. Wong 		map->me_flags |= XFS_RMAP_EXTENT_UNWRITTEN;
2563cfce1e3SChristoph Hellwig 	if (whichfork == XFS_ATTR_FORK)
257ffaa196fSDarrick J. Wong 		map->me_flags |= XFS_RMAP_EXTENT_ATTR_FORK;
2583cfce1e3SChristoph Hellwig 	switch (type) {
2593cfce1e3SChristoph Hellwig 	case XFS_RMAP_MAP:
260ffaa196fSDarrick J. Wong 		map->me_flags |= XFS_RMAP_EXTENT_MAP;
2613cfce1e3SChristoph Hellwig 		break;
2623cfce1e3SChristoph Hellwig 	case XFS_RMAP_MAP_SHARED:
263ffaa196fSDarrick J. Wong 		map->me_flags |= XFS_RMAP_EXTENT_MAP_SHARED;
2643cfce1e3SChristoph Hellwig 		break;
2653cfce1e3SChristoph Hellwig 	case XFS_RMAP_UNMAP:
266ffaa196fSDarrick J. Wong 		map->me_flags |= XFS_RMAP_EXTENT_UNMAP;
2673cfce1e3SChristoph Hellwig 		break;
2683cfce1e3SChristoph Hellwig 	case XFS_RMAP_UNMAP_SHARED:
269ffaa196fSDarrick J. Wong 		map->me_flags |= XFS_RMAP_EXTENT_UNMAP_SHARED;
2703cfce1e3SChristoph Hellwig 		break;
2713cfce1e3SChristoph Hellwig 	case XFS_RMAP_CONVERT:
272ffaa196fSDarrick J. Wong 		map->me_flags |= XFS_RMAP_EXTENT_CONVERT;
2733cfce1e3SChristoph Hellwig 		break;
2743cfce1e3SChristoph Hellwig 	case XFS_RMAP_CONVERT_SHARED:
275ffaa196fSDarrick J. Wong 		map->me_flags |= XFS_RMAP_EXTENT_CONVERT_SHARED;
2763cfce1e3SChristoph Hellwig 		break;
2773cfce1e3SChristoph Hellwig 	case XFS_RMAP_ALLOC:
278ffaa196fSDarrick J. Wong 		map->me_flags |= XFS_RMAP_EXTENT_ALLOC;
2793cfce1e3SChristoph Hellwig 		break;
2803cfce1e3SChristoph Hellwig 	case XFS_RMAP_FREE:
281ffaa196fSDarrick J. Wong 		map->me_flags |= XFS_RMAP_EXTENT_FREE;
2823cfce1e3SChristoph Hellwig 		break;
2833cfce1e3SChristoph Hellwig 	default:
2843cfce1e3SChristoph Hellwig 		ASSERT(0);
2853cfce1e3SChristoph Hellwig 	}
2863cfce1e3SChristoph Hellwig }
2873cfce1e3SChristoph Hellwig 
2883cfce1e3SChristoph Hellwig /*
2893cfce1e3SChristoph Hellwig  * Finish an rmap update and log it to the RUD. Note that the transaction is
2903cfce1e3SChristoph Hellwig  * marked dirty regardless of whether the rmap update succeeds or fails to
2913cfce1e3SChristoph Hellwig  * support the RUI/RUD lifecycle rules.
2923cfce1e3SChristoph Hellwig  */
2933cfce1e3SChristoph Hellwig static int
xfs_trans_log_finish_rmap_update(struct xfs_trans * tp,struct xfs_rud_log_item * rudp,struct xfs_rmap_intent * ri,struct xfs_btree_cur ** pcur)2943cfce1e3SChristoph Hellwig xfs_trans_log_finish_rmap_update(
2953cfce1e3SChristoph Hellwig 	struct xfs_trans		*tp,
2963cfce1e3SChristoph Hellwig 	struct xfs_rud_log_item		*rudp,
2971534328bSDarrick J. Wong 	struct xfs_rmap_intent		*ri,
2983cfce1e3SChristoph Hellwig 	struct xfs_btree_cur		**pcur)
2993cfce1e3SChristoph Hellwig {
3003cfce1e3SChristoph Hellwig 	int				error;
3013cfce1e3SChristoph Hellwig 
3021534328bSDarrick J. Wong 	error = xfs_rmap_finish_one(tp, ri, pcur);
3033cfce1e3SChristoph Hellwig 
3043cfce1e3SChristoph Hellwig 	/*
3053cfce1e3SChristoph Hellwig 	 * Mark the transaction dirty, even on error. This ensures the
3063cfce1e3SChristoph Hellwig 	 * transaction is aborted, which:
3073cfce1e3SChristoph Hellwig 	 *
3083cfce1e3SChristoph Hellwig 	 * 1.) releases the RUI and frees the RUD
3093cfce1e3SChristoph Hellwig 	 * 2.) shuts down the filesystem
3103cfce1e3SChristoph Hellwig 	 */
311bb7b1c9cSDave Chinner 	tp->t_flags |= XFS_TRANS_DIRTY | XFS_TRANS_HAS_INTENT_DONE;
3123cfce1e3SChristoph Hellwig 	set_bit(XFS_LI_DIRTY, &rudp->rud_item.li_flags);
3133cfce1e3SChristoph Hellwig 
3143cfce1e3SChristoph Hellwig 	return error;
3153cfce1e3SChristoph Hellwig }
3163cfce1e3SChristoph Hellwig 
3173cfce1e3SChristoph Hellwig /* Sort rmap intents by AG. */
3183cfce1e3SChristoph Hellwig static int
xfs_rmap_update_diff_items(void * priv,const struct list_head * a,const struct list_head * b)3193cfce1e3SChristoph Hellwig xfs_rmap_update_diff_items(
3203cfce1e3SChristoph Hellwig 	void				*priv,
3214f0f586bSSami Tolvanen 	const struct list_head		*a,
3224f0f586bSSami Tolvanen 	const struct list_head		*b)
3233cfce1e3SChristoph Hellwig {
3243cfce1e3SChristoph Hellwig 	struct xfs_rmap_intent		*ra;
3253cfce1e3SChristoph Hellwig 	struct xfs_rmap_intent		*rb;
3263cfce1e3SChristoph Hellwig 
3273cfce1e3SChristoph Hellwig 	ra = container_of(a, struct xfs_rmap_intent, ri_list);
3283cfce1e3SChristoph Hellwig 	rb = container_of(b, struct xfs_rmap_intent, ri_list);
329c13418e8SDarrick J. Wong 
330c13418e8SDarrick J. Wong 	return ra->ri_pag->pag_agno - rb->ri_pag->pag_agno;
3313cfce1e3SChristoph Hellwig }
3323cfce1e3SChristoph Hellwig 
3333cfce1e3SChristoph Hellwig /* Log rmap updates in the intent item. */
3343cfce1e3SChristoph Hellwig STATIC void
xfs_rmap_update_log_item(struct xfs_trans * tp,struct xfs_rui_log_item * ruip,struct xfs_rmap_intent * ri)3353cfce1e3SChristoph Hellwig xfs_rmap_update_log_item(
3363cfce1e3SChristoph Hellwig 	struct xfs_trans		*tp,
337c1f09188SChristoph Hellwig 	struct xfs_rui_log_item		*ruip,
338ffaa196fSDarrick J. Wong 	struct xfs_rmap_intent		*ri)
3393cfce1e3SChristoph Hellwig {
3403cfce1e3SChristoph Hellwig 	uint				next_extent;
3413cfce1e3SChristoph Hellwig 	struct xfs_map_extent		*map;
3423cfce1e3SChristoph Hellwig 
3433cfce1e3SChristoph Hellwig 	tp->t_flags |= XFS_TRANS_DIRTY;
3443cfce1e3SChristoph Hellwig 	set_bit(XFS_LI_DIRTY, &ruip->rui_item.li_flags);
3453cfce1e3SChristoph Hellwig 
3463cfce1e3SChristoph Hellwig 	/*
3473cfce1e3SChristoph Hellwig 	 * atomic_inc_return gives us the value after the increment;
3483cfce1e3SChristoph Hellwig 	 * we want to use it as an array index so we need to subtract 1 from
3493cfce1e3SChristoph Hellwig 	 * it.
3503cfce1e3SChristoph Hellwig 	 */
3513cfce1e3SChristoph Hellwig 	next_extent = atomic_inc_return(&ruip->rui_next_extent) - 1;
3523cfce1e3SChristoph Hellwig 	ASSERT(next_extent < ruip->rui_format.rui_nextents);
3533cfce1e3SChristoph Hellwig 	map = &ruip->rui_format.rui_extents[next_extent];
354ffaa196fSDarrick J. Wong 	map->me_owner = ri->ri_owner;
355ffaa196fSDarrick J. Wong 	map->me_startblock = ri->ri_bmap.br_startblock;
356ffaa196fSDarrick J. Wong 	map->me_startoff = ri->ri_bmap.br_startoff;
357ffaa196fSDarrick J. Wong 	map->me_len = ri->ri_bmap.br_blockcount;
358ffaa196fSDarrick J. Wong 	xfs_trans_set_rmap_flags(map, ri->ri_type, ri->ri_whichfork,
359ffaa196fSDarrick J. Wong 			ri->ri_bmap.br_state);
3603cfce1e3SChristoph Hellwig }
3613cfce1e3SChristoph Hellwig 
36213a83333SChristoph Hellwig static struct xfs_log_item *
xfs_rmap_update_create_intent(struct xfs_trans * tp,struct list_head * items,unsigned int count,bool sort)363c1f09188SChristoph Hellwig xfs_rmap_update_create_intent(
364c1f09188SChristoph Hellwig 	struct xfs_trans		*tp,
365c1f09188SChristoph Hellwig 	struct list_head		*items,
366d367a868SChristoph Hellwig 	unsigned int			count,
367d367a868SChristoph Hellwig 	bool				sort)
368c1f09188SChristoph Hellwig {
369c1f09188SChristoph Hellwig 	struct xfs_mount		*mp = tp->t_mountp;
370c1f09188SChristoph Hellwig 	struct xfs_rui_log_item		*ruip = xfs_rui_init(mp, count);
371ffaa196fSDarrick J. Wong 	struct xfs_rmap_intent		*ri;
372c1f09188SChristoph Hellwig 
373c1f09188SChristoph Hellwig 	ASSERT(count > 0);
374c1f09188SChristoph Hellwig 
375c1f09188SChristoph Hellwig 	xfs_trans_add_item(tp, &ruip->rui_item);
376d367a868SChristoph Hellwig 	if (sort)
377d367a868SChristoph Hellwig 		list_sort(mp, items, xfs_rmap_update_diff_items);
378ffaa196fSDarrick J. Wong 	list_for_each_entry(ri, items, ri_list)
379ffaa196fSDarrick J. Wong 		xfs_rmap_update_log_item(tp, ruip, ri);
38013a83333SChristoph Hellwig 	return &ruip->rui_item;
381c1f09188SChristoph Hellwig }
382c1f09188SChristoph Hellwig 
3833cfce1e3SChristoph Hellwig /* Get an RUD so we can process all the deferred rmap updates. */
384f09d167cSChristoph Hellwig static struct xfs_log_item *
xfs_rmap_update_create_done(struct xfs_trans * tp,struct xfs_log_item * intent,unsigned int count)3853cfce1e3SChristoph Hellwig xfs_rmap_update_create_done(
3863cfce1e3SChristoph Hellwig 	struct xfs_trans		*tp,
38713a83333SChristoph Hellwig 	struct xfs_log_item		*intent,
3883cfce1e3SChristoph Hellwig 	unsigned int			count)
3893cfce1e3SChristoph Hellwig {
390f09d167cSChristoph Hellwig 	return &xfs_trans_get_rud(tp, RUI_ITEM(intent))->rud_item;
3913cfce1e3SChristoph Hellwig }
3923cfce1e3SChristoph Hellwig 
393c13418e8SDarrick J. Wong /* Take a passive ref to the AG containing the space we're rmapping. */
394c13418e8SDarrick J. Wong void
xfs_rmap_update_get_group(struct xfs_mount * mp,struct xfs_rmap_intent * ri)395c13418e8SDarrick J. Wong xfs_rmap_update_get_group(
396c13418e8SDarrick J. Wong 	struct xfs_mount	*mp,
397c13418e8SDarrick J. Wong 	struct xfs_rmap_intent	*ri)
398c13418e8SDarrick J. Wong {
399c13418e8SDarrick J. Wong 	xfs_agnumber_t		agno;
400c13418e8SDarrick J. Wong 
401c13418e8SDarrick J. Wong 	agno = XFS_FSB_TO_AGNO(mp, ri->ri_bmap.br_startblock);
402d5c88131SDarrick J. Wong 	ri->ri_pag = xfs_perag_intent_get(mp, agno);
403c13418e8SDarrick J. Wong }
404c13418e8SDarrick J. Wong 
405c13418e8SDarrick J. Wong /* Release a passive AG ref after finishing rmapping work. */
406c13418e8SDarrick J. Wong static inline void
xfs_rmap_update_put_group(struct xfs_rmap_intent * ri)407c13418e8SDarrick J. Wong xfs_rmap_update_put_group(
408c13418e8SDarrick J. Wong 	struct xfs_rmap_intent	*ri)
409c13418e8SDarrick J. Wong {
410d5c88131SDarrick J. Wong 	xfs_perag_intent_put(ri->ri_pag);
411c13418e8SDarrick J. Wong }
412c13418e8SDarrick J. Wong 
4133cfce1e3SChristoph Hellwig /* Process a deferred rmap update. */
4143cfce1e3SChristoph Hellwig STATIC int
xfs_rmap_update_finish_item(struct xfs_trans * tp,struct xfs_log_item * done,struct list_head * item,struct xfs_btree_cur ** state)4153cfce1e3SChristoph Hellwig xfs_rmap_update_finish_item(
4163cfce1e3SChristoph Hellwig 	struct xfs_trans		*tp,
417f09d167cSChristoph Hellwig 	struct xfs_log_item		*done,
4183cfce1e3SChristoph Hellwig 	struct list_head		*item,
4193ec1b26cSChristoph Hellwig 	struct xfs_btree_cur		**state)
4203cfce1e3SChristoph Hellwig {
421ffaa196fSDarrick J. Wong 	struct xfs_rmap_intent		*ri;
4223cfce1e3SChristoph Hellwig 	int				error;
4233cfce1e3SChristoph Hellwig 
424ffaa196fSDarrick J. Wong 	ri = container_of(item, struct xfs_rmap_intent, ri_list);
425ffaa196fSDarrick J. Wong 
426ffaa196fSDarrick J. Wong 	error = xfs_trans_log_finish_rmap_update(tp, RUD_ITEM(done), ri,
4273ec1b26cSChristoph Hellwig 			state);
428c13418e8SDarrick J. Wong 
429c13418e8SDarrick J. Wong 	xfs_rmap_update_put_group(ri);
430ffaa196fSDarrick J. Wong 	kmem_cache_free(xfs_rmap_intent_cache, ri);
4313cfce1e3SChristoph Hellwig 	return error;
4323cfce1e3SChristoph Hellwig }
4333cfce1e3SChristoph Hellwig 
4343cfce1e3SChristoph Hellwig /* Abort all pending RUIs. */
4353cfce1e3SChristoph Hellwig STATIC void
xfs_rmap_update_abort_intent(struct xfs_log_item * intent)4363cfce1e3SChristoph Hellwig xfs_rmap_update_abort_intent(
43713a83333SChristoph Hellwig 	struct xfs_log_item	*intent)
4383cfce1e3SChristoph Hellwig {
43913a83333SChristoph Hellwig 	xfs_rui_release(RUI_ITEM(intent));
4403cfce1e3SChristoph Hellwig }
4413cfce1e3SChristoph Hellwig 
4423cfce1e3SChristoph Hellwig /* Cancel a deferred rmap update. */
4433cfce1e3SChristoph Hellwig STATIC void
xfs_rmap_update_cancel_item(struct list_head * item)4443cfce1e3SChristoph Hellwig xfs_rmap_update_cancel_item(
4453cfce1e3SChristoph Hellwig 	struct list_head		*item)
4463cfce1e3SChristoph Hellwig {
447ffaa196fSDarrick J. Wong 	struct xfs_rmap_intent		*ri;
4483cfce1e3SChristoph Hellwig 
449ffaa196fSDarrick J. Wong 	ri = container_of(item, struct xfs_rmap_intent, ri_list);
450c13418e8SDarrick J. Wong 
451c13418e8SDarrick J. Wong 	xfs_rmap_update_put_group(ri);
452ffaa196fSDarrick J. Wong 	kmem_cache_free(xfs_rmap_intent_cache, ri);
4533cfce1e3SChristoph Hellwig }
4543cfce1e3SChristoph Hellwig 
4553cfce1e3SChristoph Hellwig const struct xfs_defer_op_type xfs_rmap_update_defer_type = {
4563cfce1e3SChristoph Hellwig 	.max_items	= XFS_RUI_MAX_FAST_EXTENTS,
4573cfce1e3SChristoph Hellwig 	.create_intent	= xfs_rmap_update_create_intent,
4583cfce1e3SChristoph Hellwig 	.abort_intent	= xfs_rmap_update_abort_intent,
4593cfce1e3SChristoph Hellwig 	.create_done	= xfs_rmap_update_create_done,
4603cfce1e3SChristoph Hellwig 	.finish_item	= xfs_rmap_update_finish_item,
4613ec1b26cSChristoph Hellwig 	.finish_cleanup = xfs_rmap_finish_one_cleanup,
4623cfce1e3SChristoph Hellwig 	.cancel_item	= xfs_rmap_update_cancel_item,
4633cfce1e3SChristoph Hellwig };
4643cfce1e3SChristoph Hellwig 
465dda7ba65SDarrick J. Wong /* Is this recovered RUI ok? */
466dda7ba65SDarrick J. Wong static inline bool
xfs_rui_validate_map(struct xfs_mount * mp,struct xfs_map_extent * map)467dda7ba65SDarrick J. Wong xfs_rui_validate_map(
468dda7ba65SDarrick J. Wong 	struct xfs_mount		*mp,
469ffaa196fSDarrick J. Wong 	struct xfs_map_extent		*map)
4709e88b5d8SDarrick J. Wong {
47138c26bfdSDave Chinner 	if (!xfs_has_rmapbt(mp))
472da5de110SDarrick J. Wong 		return false;
473da5de110SDarrick J. Wong 
474ffaa196fSDarrick J. Wong 	if (map->me_flags & ~XFS_RMAP_EXTENT_FLAGS)
475c447ad62SDarrick J. Wong 		return false;
4769e88b5d8SDarrick J. Wong 
477ffaa196fSDarrick J. Wong 	switch (map->me_flags & XFS_RMAP_EXTENT_TYPE_MASK) {
4789e88b5d8SDarrick J. Wong 	case XFS_RMAP_EXTENT_MAP:
4790e07c039SDarrick J. Wong 	case XFS_RMAP_EXTENT_MAP_SHARED:
4809e88b5d8SDarrick J. Wong 	case XFS_RMAP_EXTENT_UNMAP:
4810e07c039SDarrick J. Wong 	case XFS_RMAP_EXTENT_UNMAP_SHARED:
4829e88b5d8SDarrick J. Wong 	case XFS_RMAP_EXTENT_CONVERT:
4830e07c039SDarrick J. Wong 	case XFS_RMAP_EXTENT_CONVERT_SHARED:
4849e88b5d8SDarrick J. Wong 	case XFS_RMAP_EXTENT_ALLOC:
4859e88b5d8SDarrick J. Wong 	case XFS_RMAP_EXTENT_FREE:
4869e88b5d8SDarrick J. Wong 		break;
4879e88b5d8SDarrick J. Wong 	default:
488c447ad62SDarrick J. Wong 		return false;
4899e88b5d8SDarrick J. Wong 	}
490c447ad62SDarrick J. Wong 
491ffaa196fSDarrick J. Wong 	if (!XFS_RMAP_NON_INODE_OWNER(map->me_owner) &&
492ffaa196fSDarrick J. Wong 	    !xfs_verify_ino(mp, map->me_owner))
493c447ad62SDarrick J. Wong 		return false;
494c447ad62SDarrick J. Wong 
495ffaa196fSDarrick J. Wong 	if (!xfs_verify_fileext(mp, map->me_startoff, map->me_len))
496c447ad62SDarrick J. Wong 		return false;
497c447ad62SDarrick J. Wong 
498ffaa196fSDarrick J. Wong 	return xfs_verify_fsbext(mp, map->me_startblock, map->me_len);
499dda7ba65SDarrick J. Wong }
500dda7ba65SDarrick J. Wong 
501dda7ba65SDarrick J. Wong /*
502dda7ba65SDarrick J. Wong  * Process an rmap update intent item that was recovered from the log.
503dda7ba65SDarrick J. Wong  * We need to update the rmapbt.
504dda7ba65SDarrick J. Wong  */
505dda7ba65SDarrick J. Wong STATIC int
xfs_rui_item_recover(struct xfs_defer_pending * dfp,struct list_head * capture_list)506dda7ba65SDarrick J. Wong xfs_rui_item_recover(
50787db24c8SDarrick J. Wong 	struct xfs_defer_pending	*dfp,
508dda7ba65SDarrick J. Wong 	struct list_head		*capture_list)
509dda7ba65SDarrick J. Wong {
5103c919b09SDarrick J. Wong 	struct xfs_trans_res		resv;
51187db24c8SDarrick J. Wong 	struct xfs_log_item		*lip = dfp->dfp_intent;
512dda7ba65SDarrick J. Wong 	struct xfs_rui_log_item		*ruip = RUI_ITEM(lip);
513dda7ba65SDarrick J. Wong 	struct xfs_rud_log_item		*rudp;
514dda7ba65SDarrick J. Wong 	struct xfs_trans		*tp;
515dda7ba65SDarrick J. Wong 	struct xfs_btree_cur		*rcur = NULL;
516d86142ddSDave Chinner 	struct xfs_mount		*mp = lip->li_log->l_mp;
517dda7ba65SDarrick J. Wong 	int				i;
518dda7ba65SDarrick J. Wong 	int				error = 0;
519dda7ba65SDarrick J. Wong 
520dda7ba65SDarrick J. Wong 	/*
521dda7ba65SDarrick J. Wong 	 * First check the validity of the extents described by the
522dda7ba65SDarrick J. Wong 	 * RUI.  If any are bad, then assume that all are bad and
523dda7ba65SDarrick J. Wong 	 * just toss the RUI.
524dda7ba65SDarrick J. Wong 	 */
525dda7ba65SDarrick J. Wong 	for (i = 0; i < ruip->rui_format.rui_nextents; i++) {
526dda7ba65SDarrick J. Wong 		if (!xfs_rui_validate_map(mp,
527dda7ba65SDarrick J. Wong 					&ruip->rui_format.rui_extents[i])) {
528dda7ba65SDarrick J. Wong 			XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp,
529dda7ba65SDarrick J. Wong 					&ruip->rui_format,
530dda7ba65SDarrick J. Wong 					sizeof(ruip->rui_format));
531895e196fSDarrick J. Wong 			return -EFSCORRUPTED;
5329e88b5d8SDarrick J. Wong 		}
533dda7ba65SDarrick J. Wong 	}
5349e88b5d8SDarrick J. Wong 
5353c919b09SDarrick J. Wong 	resv = xlog_recover_resv(&M_RES(mp)->tr_itruncate);
5363c919b09SDarrick J. Wong 	error = xfs_trans_alloc(mp, &resv, mp->m_rmap_maxlevels, 0,
5373c919b09SDarrick J. Wong 			XFS_TRANS_RESERVE, &tp);
5389c194644SDarrick J. Wong 	if (error)
5399c194644SDarrick J. Wong 		return error;
540680776e5SDarrick J. Wong 
541722e2517SDarrick J. Wong 	rudp = xfs_trans_get_rud(tp, ruip);
542680776e5SDarrick J. Wong 	xlog_recover_transfer_intent(tp, dfp);
5439c194644SDarrick J. Wong 
5449c194644SDarrick J. Wong 	for (i = 0; i < ruip->rui_format.rui_nextents; i++) {
5451534328bSDarrick J. Wong 		struct xfs_rmap_intent	fake = { };
5461534328bSDarrick J. Wong 		struct xfs_map_extent	*map;
5471534328bSDarrick J. Wong 
5481534328bSDarrick J. Wong 		map = &ruip->rui_format.rui_extents[i];
5491534328bSDarrick J. Wong 		switch (map->me_flags & XFS_RMAP_EXTENT_TYPE_MASK) {
5509c194644SDarrick J. Wong 		case XFS_RMAP_EXTENT_MAP:
5511534328bSDarrick J. Wong 			fake.ri_type = XFS_RMAP_MAP;
5529c194644SDarrick J. Wong 			break;
553ceeb9c83SDarrick J. Wong 		case XFS_RMAP_EXTENT_MAP_SHARED:
5541534328bSDarrick J. Wong 			fake.ri_type = XFS_RMAP_MAP_SHARED;
555ceeb9c83SDarrick J. Wong 			break;
5569c194644SDarrick J. Wong 		case XFS_RMAP_EXTENT_UNMAP:
5571534328bSDarrick J. Wong 			fake.ri_type = XFS_RMAP_UNMAP;
5589c194644SDarrick J. Wong 			break;
559ceeb9c83SDarrick J. Wong 		case XFS_RMAP_EXTENT_UNMAP_SHARED:
5601534328bSDarrick J. Wong 			fake.ri_type = XFS_RMAP_UNMAP_SHARED;
561ceeb9c83SDarrick J. Wong 			break;
5629c194644SDarrick J. Wong 		case XFS_RMAP_EXTENT_CONVERT:
5631534328bSDarrick J. Wong 			fake.ri_type = XFS_RMAP_CONVERT;
5649c194644SDarrick J. Wong 			break;
5653f165b33SDarrick J. Wong 		case XFS_RMAP_EXTENT_CONVERT_SHARED:
5661534328bSDarrick J. Wong 			fake.ri_type = XFS_RMAP_CONVERT_SHARED;
5673f165b33SDarrick J. Wong 			break;
5689c194644SDarrick J. Wong 		case XFS_RMAP_EXTENT_ALLOC:
5691534328bSDarrick J. Wong 			fake.ri_type = XFS_RMAP_ALLOC;
5709c194644SDarrick J. Wong 			break;
5719c194644SDarrick J. Wong 		case XFS_RMAP_EXTENT_FREE:
5721534328bSDarrick J. Wong 			fake.ri_type = XFS_RMAP_FREE;
5739c194644SDarrick J. Wong 			break;
5749c194644SDarrick J. Wong 		default:
575950f0d50SDarrick J. Wong 			XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp,
576950f0d50SDarrick J. Wong 					&ruip->rui_format,
577950f0d50SDarrick J. Wong 					sizeof(ruip->rui_format));
5789c194644SDarrick J. Wong 			error = -EFSCORRUPTED;
5799c194644SDarrick J. Wong 			goto abort_error;
5809c194644SDarrick J. Wong 		}
5811534328bSDarrick J. Wong 
5821534328bSDarrick J. Wong 		fake.ri_owner = map->me_owner;
5831534328bSDarrick J. Wong 		fake.ri_whichfork = (map->me_flags & XFS_RMAP_EXTENT_ATTR_FORK) ?
5841534328bSDarrick J. Wong 				XFS_ATTR_FORK : XFS_DATA_FORK;
5851534328bSDarrick J. Wong 		fake.ri_bmap.br_startblock = map->me_startblock;
5861534328bSDarrick J. Wong 		fake.ri_bmap.br_startoff = map->me_startoff;
5871534328bSDarrick J. Wong 		fake.ri_bmap.br_blockcount = map->me_len;
5881534328bSDarrick J. Wong 		fake.ri_bmap.br_state = (map->me_flags & XFS_RMAP_EXTENT_UNWRITTEN) ?
5891534328bSDarrick J. Wong 				XFS_EXT_UNWRITTEN : XFS_EXT_NORM;
5901534328bSDarrick J. Wong 
591c13418e8SDarrick J. Wong 		xfs_rmap_update_get_group(mp, &fake);
5921534328bSDarrick J. Wong 		error = xfs_trans_log_finish_rmap_update(tp, rudp, &fake,
5931534328bSDarrick J. Wong 				&rcur);
59443059d54SDarrick J. Wong 		if (error == -EFSCORRUPTED)
59543059d54SDarrick J. Wong 			XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp,
5961534328bSDarrick J. Wong 					map, sizeof(*map));
597c13418e8SDarrick J. Wong 		xfs_rmap_update_put_group(&fake);
5989c194644SDarrick J. Wong 		if (error)
5999c194644SDarrick J. Wong 			goto abort_error;
6009c194644SDarrick J. Wong 
6019c194644SDarrick J. Wong 	}
6029c194644SDarrick J. Wong 
6039c194644SDarrick J. Wong 	xfs_rmap_finish_one_cleanup(tp, rcur, error);
604512edfacSDarrick J. Wong 	return xfs_defer_ops_capture_and_commit(tp, capture_list);
6059c194644SDarrick J. Wong 
6069c194644SDarrick J. Wong abort_error:
6079c194644SDarrick J. Wong 	xfs_rmap_finish_one_cleanup(tp, rcur, error);
6089c194644SDarrick J. Wong 	xfs_trans_cancel(tp);
6099e88b5d8SDarrick J. Wong 	return error;
6109e88b5d8SDarrick J. Wong }
61186ffa471SDarrick J. Wong 
612154c733aSDarrick J. Wong STATIC bool
xfs_rui_item_match(struct xfs_log_item * lip,uint64_t intent_id)613154c733aSDarrick J. Wong xfs_rui_item_match(
614154c733aSDarrick J. Wong 	struct xfs_log_item	*lip,
615154c733aSDarrick J. Wong 	uint64_t		intent_id)
616154c733aSDarrick J. Wong {
617154c733aSDarrick J. Wong 	return RUI_ITEM(lip)->rui_format.rui_id == intent_id;
618154c733aSDarrick J. Wong }
619154c733aSDarrick J. Wong 
6204e919af7SDarrick J. Wong /* Relog an intent item to push the log tail forward. */
6214e919af7SDarrick J. Wong static struct xfs_log_item *
xfs_rui_item_relog(struct xfs_log_item * intent,struct xfs_trans * tp)6224e919af7SDarrick J. Wong xfs_rui_item_relog(
6234e919af7SDarrick J. Wong 	struct xfs_log_item		*intent,
6244e919af7SDarrick J. Wong 	struct xfs_trans		*tp)
6254e919af7SDarrick J. Wong {
6264e919af7SDarrick J. Wong 	struct xfs_rud_log_item		*rudp;
6274e919af7SDarrick J. Wong 	struct xfs_rui_log_item		*ruip;
628ffaa196fSDarrick J. Wong 	struct xfs_map_extent		*map;
6294e919af7SDarrick J. Wong 	unsigned int			count;
6304e919af7SDarrick J. Wong 
6314e919af7SDarrick J. Wong 	count = RUI_ITEM(intent)->rui_format.rui_nextents;
632ffaa196fSDarrick J. Wong 	map = RUI_ITEM(intent)->rui_format.rui_extents;
6334e919af7SDarrick J. Wong 
6344e919af7SDarrick J. Wong 	tp->t_flags |= XFS_TRANS_DIRTY;
6354e919af7SDarrick J. Wong 	rudp = xfs_trans_get_rud(tp, RUI_ITEM(intent));
6364e919af7SDarrick J. Wong 	set_bit(XFS_LI_DIRTY, &rudp->rud_item.li_flags);
6374e919af7SDarrick J. Wong 
6384e919af7SDarrick J. Wong 	ruip = xfs_rui_init(tp->t_mountp, count);
639ffaa196fSDarrick J. Wong 	memcpy(ruip->rui_format.rui_extents, map, count * sizeof(*map));
6404e919af7SDarrick J. Wong 	atomic_set(&ruip->rui_next_extent, count);
6414e919af7SDarrick J. Wong 	xfs_trans_add_item(tp, &ruip->rui_item);
6424e919af7SDarrick J. Wong 	set_bit(XFS_LI_DIRTY, &ruip->rui_item.li_flags);
6434e919af7SDarrick J. Wong 	return &ruip->rui_item;
6444e919af7SDarrick J. Wong }
6454e919af7SDarrick J. Wong 
646cba0ccacSDarrick J. Wong static const struct xfs_item_ops xfs_rui_item_ops = {
647f5b81200SDave Chinner 	.flags		= XFS_ITEM_INTENT,
648cba0ccacSDarrick J. Wong 	.iop_size	= xfs_rui_item_size,
649cba0ccacSDarrick J. Wong 	.iop_format	= xfs_rui_item_format,
650cba0ccacSDarrick J. Wong 	.iop_unpin	= xfs_rui_item_unpin,
651cba0ccacSDarrick J. Wong 	.iop_release	= xfs_rui_item_release,
652cba0ccacSDarrick J. Wong 	.iop_recover	= xfs_rui_item_recover,
653154c733aSDarrick J. Wong 	.iop_match	= xfs_rui_item_match,
6544e919af7SDarrick J. Wong 	.iop_relog	= xfs_rui_item_relog,
655cba0ccacSDarrick J. Wong };
656cba0ccacSDarrick J. Wong 
657b45ca961SDarrick J. Wong static inline void
xfs_rui_copy_format(struct xfs_rui_log_format * dst,const struct xfs_rui_log_format * src)658b45ca961SDarrick J. Wong xfs_rui_copy_format(
659b45ca961SDarrick J. Wong 	struct xfs_rui_log_format	*dst,
660b45ca961SDarrick J. Wong 	const struct xfs_rui_log_format	*src)
661b45ca961SDarrick J. Wong {
662b45ca961SDarrick J. Wong 	unsigned int			i;
663b45ca961SDarrick J. Wong 
664b45ca961SDarrick J. Wong 	memcpy(dst, src, offsetof(struct xfs_rui_log_format, rui_extents));
665b45ca961SDarrick J. Wong 
666b45ca961SDarrick J. Wong 	for (i = 0; i < src->rui_nextents; i++)
667b45ca961SDarrick J. Wong 		memcpy(&dst->rui_extents[i], &src->rui_extents[i],
668b45ca961SDarrick J. Wong 				sizeof(struct xfs_map_extent));
669b45ca961SDarrick J. Wong }
670b45ca961SDarrick J. Wong 
67107590a9dSDarrick J. Wong /*
67207590a9dSDarrick J. Wong  * This routine is called to create an in-core extent rmap update
67307590a9dSDarrick J. Wong  * item from the rui format structure which was logged on disk.
67407590a9dSDarrick J. Wong  * It allocates an in-core rui, copies the extents from the format
67507590a9dSDarrick J. Wong  * structure into it, and adds the rui to the AIL with the given
67607590a9dSDarrick J. Wong  * LSN.
67707590a9dSDarrick J. Wong  */
67807590a9dSDarrick J. Wong STATIC int
xlog_recover_rui_commit_pass2(struct xlog * log,struct list_head * buffer_list,struct xlog_recover_item * item,xfs_lsn_t lsn)67907590a9dSDarrick J. Wong xlog_recover_rui_commit_pass2(
68007590a9dSDarrick J. Wong 	struct xlog			*log,
68107590a9dSDarrick J. Wong 	struct list_head		*buffer_list,
68207590a9dSDarrick J. Wong 	struct xlog_recover_item	*item,
68307590a9dSDarrick J. Wong 	xfs_lsn_t			lsn)
68407590a9dSDarrick J. Wong {
68507590a9dSDarrick J. Wong 	struct xfs_mount		*mp = log->l_mp;
68607590a9dSDarrick J. Wong 	struct xfs_rui_log_item		*ruip;
68707590a9dSDarrick J. Wong 	struct xfs_rui_log_format	*rui_formatp;
688b45ca961SDarrick J. Wong 	size_t				len;
68907590a9dSDarrick J. Wong 
69007590a9dSDarrick J. Wong 	rui_formatp = item->ri_buf[0].i_addr;
69107590a9dSDarrick J. Wong 
692b45ca961SDarrick J. Wong 	if (item->ri_buf[0].i_len < xfs_rui_log_format_sizeof(0)) {
693950f0d50SDarrick J. Wong 		XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp,
694950f0d50SDarrick J. Wong 				item->ri_buf[0].i_addr, item->ri_buf[0].i_len);
695b45ca961SDarrick J. Wong 		return -EFSCORRUPTED;
69607590a9dSDarrick J. Wong 	}
697b45ca961SDarrick J. Wong 
698b45ca961SDarrick J. Wong 	len = xfs_rui_log_format_sizeof(rui_formatp->rui_nextents);
699b45ca961SDarrick J. Wong 	if (item->ri_buf[0].i_len != len) {
700950f0d50SDarrick J. Wong 		XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp,
701950f0d50SDarrick J. Wong 				item->ri_buf[0].i_addr, item->ri_buf[0].i_len);
702b45ca961SDarrick J. Wong 		return -EFSCORRUPTED;
703b45ca961SDarrick J. Wong 	}
704b45ca961SDarrick J. Wong 
705b45ca961SDarrick J. Wong 	ruip = xfs_rui_init(mp, rui_formatp->rui_nextents);
706b45ca961SDarrick J. Wong 	xfs_rui_copy_format(&ruip->rui_format, rui_formatp);
70707590a9dSDarrick J. Wong 	atomic_set(&ruip->rui_next_extent, rui_formatp->rui_nextents);
708cd3c2cf3SDarrick J. Wong 
709cd3c2cf3SDarrick J. Wong 	xlog_recover_intent_item(log, &ruip->rui_item, lsn,
710cd3c2cf3SDarrick J. Wong 			XFS_DEFER_OPS_TYPE_RMAP);
71107590a9dSDarrick J. Wong 	return 0;
71207590a9dSDarrick J. Wong }
71307590a9dSDarrick J. Wong 
71486ffa471SDarrick J. Wong const struct xlog_recover_item_ops xlog_rui_item_ops = {
71586ffa471SDarrick J. Wong 	.item_type		= XFS_LI_RUI,
71607590a9dSDarrick J. Wong 	.commit_pass2		= xlog_recover_rui_commit_pass2,
71786ffa471SDarrick J. Wong };
71886ffa471SDarrick J. Wong 
71907590a9dSDarrick J. Wong /*
72007590a9dSDarrick J. Wong  * This routine is called when an RUD format structure is found in a committed
72107590a9dSDarrick J. Wong  * transaction in the log. Its purpose is to cancel the corresponding RUI if it
72207590a9dSDarrick J. Wong  * was still in the log. To do this it searches the AIL for the RUI with an id
72307590a9dSDarrick J. Wong  * equal to that in the RUD format structure. If we find it we drop the RUD
72407590a9dSDarrick J. Wong  * reference, which removes the RUI from the AIL and frees it.
72507590a9dSDarrick J. Wong  */
72607590a9dSDarrick J. Wong STATIC int
xlog_recover_rud_commit_pass2(struct xlog * log,struct list_head * buffer_list,struct xlog_recover_item * item,xfs_lsn_t lsn)72707590a9dSDarrick J. Wong xlog_recover_rud_commit_pass2(
72807590a9dSDarrick J. Wong 	struct xlog			*log,
72907590a9dSDarrick J. Wong 	struct list_head		*buffer_list,
73007590a9dSDarrick J. Wong 	struct xlog_recover_item	*item,
73107590a9dSDarrick J. Wong 	xfs_lsn_t			lsn)
73207590a9dSDarrick J. Wong {
73307590a9dSDarrick J. Wong 	struct xfs_rud_log_format	*rud_formatp;
73407590a9dSDarrick J. Wong 
73507590a9dSDarrick J. Wong 	rud_formatp = item->ri_buf[0].i_addr;
736921ed96bSDarrick J. Wong 	if (item->ri_buf[0].i_len != sizeof(struct xfs_rud_log_format)) {
737921ed96bSDarrick J. Wong 		XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, log->l_mp,
738921ed96bSDarrick J. Wong 				rud_formatp, item->ri_buf[0].i_len);
739921ed96bSDarrick J. Wong 		return -EFSCORRUPTED;
740921ed96bSDarrick J. Wong 	}
74107590a9dSDarrick J. Wong 
742154c733aSDarrick J. Wong 	xlog_recover_release_intent(log, XFS_LI_RUI, rud_formatp->rud_rui_id);
74307590a9dSDarrick J. Wong 	return 0;
74407590a9dSDarrick J. Wong }
74507590a9dSDarrick J. Wong 
74686ffa471SDarrick J. Wong const struct xlog_recover_item_ops xlog_rud_item_ops = {
74786ffa471SDarrick J. Wong 	.item_type		= XFS_LI_RUD,
74807590a9dSDarrick J. Wong 	.commit_pass2		= xlog_recover_rud_commit_pass2,
74986ffa471SDarrick J. Wong };
750