xref: /openbmc/linux/fs/xfs/xfs_attr_inactive.c (revision 49813a21)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (c) 2000-2005 Silicon Graphics, Inc.
4  * Copyright (c) 2013 Red Hat, Inc.
5  * All Rights Reserved.
6  */
7 #include "xfs.h"
8 #include "xfs_fs.h"
9 #include "xfs_shared.h"
10 #include "xfs_format.h"
11 #include "xfs_log_format.h"
12 #include "xfs_trans_resv.h"
13 #include "xfs_bit.h"
14 #include "xfs_mount.h"
15 #include "xfs_da_format.h"
16 #include "xfs_da_btree.h"
17 #include "xfs_inode.h"
18 #include "xfs_attr.h"
19 #include "xfs_attr_remote.h"
20 #include "xfs_trans.h"
21 #include "xfs_bmap.h"
22 #include "xfs_attr_leaf.h"
23 #include "xfs_quota.h"
24 #include "xfs_dir2.h"
25 #include "xfs_error.h"
26 
27 /*
28  * Invalidate any incore buffers associated with this remote attribute value
29  * extent.   We never log remote attribute value buffers, which means that they
30  * won't be attached to a transaction and are therefore safe to mark stale.
31  * The actual bunmapi will be taken care of later.
32  */
33 STATIC int
xfs_attr3_rmt_stale(struct xfs_inode * dp,xfs_dablk_t blkno,int blkcnt)34 xfs_attr3_rmt_stale(
35 	struct xfs_inode	*dp,
36 	xfs_dablk_t		blkno,
37 	int			blkcnt)
38 {
39 	struct xfs_bmbt_irec	map;
40 	int			nmap;
41 	int			error;
42 
43 	/*
44 	 * Roll through the "value", invalidating the attribute value's
45 	 * blocks.
46 	 */
47 	while (blkcnt > 0) {
48 		/*
49 		 * Try to remember where we decided to put the value.
50 		 */
51 		nmap = 1;
52 		error = xfs_bmapi_read(dp, (xfs_fileoff_t)blkno, blkcnt,
53 				       &map, &nmap, XFS_BMAPI_ATTRFORK);
54 		if (error)
55 			return error;
56 		if (XFS_IS_CORRUPT(dp->i_mount, nmap != 1))
57 			return -EFSCORRUPTED;
58 
59 		/*
60 		 * Mark any incore buffers for the remote value as stale.  We
61 		 * never log remote attr value buffers, so the buffer should be
62 		 * easy to kill.
63 		 */
64 		error = xfs_attr_rmtval_stale(dp, &map, 0);
65 		if (error)
66 			return error;
67 
68 		blkno += map.br_blockcount;
69 		blkcnt -= map.br_blockcount;
70 	}
71 
72 	return 0;
73 }
74 
75 /*
76  * Invalidate all of the "remote" value regions pointed to by a particular
77  * leaf block.
78  * Note that we must release the lock on the buffer so that we are not
79  * caught holding something that the logging code wants to flush to disk.
80  */
81 STATIC int
xfs_attr3_leaf_inactive(struct xfs_trans ** trans,struct xfs_inode * dp,struct xfs_buf * bp)82 xfs_attr3_leaf_inactive(
83 	struct xfs_trans		**trans,
84 	struct xfs_inode		*dp,
85 	struct xfs_buf			*bp)
86 {
87 	struct xfs_attr3_icleaf_hdr	ichdr;
88 	struct xfs_mount		*mp = bp->b_mount;
89 	struct xfs_attr_leafblock	*leaf = bp->b_addr;
90 	struct xfs_attr_leaf_entry	*entry;
91 	struct xfs_attr_leaf_name_remote *name_rmt;
92 	int				error = 0;
93 	int				i;
94 
95 	xfs_attr3_leaf_hdr_from_disk(mp->m_attr_geo, &ichdr, leaf);
96 
97 	/*
98 	 * Find the remote value extents for this leaf and invalidate their
99 	 * incore buffers.
100 	 */
101 	entry = xfs_attr3_leaf_entryp(leaf);
102 	for (i = 0; i < ichdr.count; entry++, i++) {
103 		int		blkcnt;
104 
105 		if (!entry->nameidx || (entry->flags & XFS_ATTR_LOCAL))
106 			continue;
107 
108 		name_rmt = xfs_attr3_leaf_name_remote(leaf, i);
109 		if (!name_rmt->valueblk)
110 			continue;
111 
112 		blkcnt = xfs_attr3_rmt_blocks(dp->i_mount,
113 				be32_to_cpu(name_rmt->valuelen));
114 		error = xfs_attr3_rmt_stale(dp,
115 				be32_to_cpu(name_rmt->valueblk), blkcnt);
116 		if (error)
117 			goto err;
118 	}
119 
120 	xfs_trans_brelse(*trans, bp);
121 err:
122 	return error;
123 }
124 
125 /*
126  * Recurse (gasp!) through the attribute nodes until we find leaves.
127  * We're doing a depth-first traversal in order to invalidate everything.
128  */
129 STATIC int
xfs_attr3_node_inactive(struct xfs_trans ** trans,struct xfs_inode * dp,struct xfs_buf * bp,int level)130 xfs_attr3_node_inactive(
131 	struct xfs_trans	**trans,
132 	struct xfs_inode	*dp,
133 	struct xfs_buf		*bp,
134 	int			level)
135 {
136 	struct xfs_mount	*mp = dp->i_mount;
137 	struct xfs_da_blkinfo	*info;
138 	xfs_dablk_t		child_fsb;
139 	xfs_daddr_t		parent_blkno, child_blkno;
140 	struct xfs_buf		*child_bp;
141 	struct xfs_da3_icnode_hdr ichdr;
142 	int			error, i;
143 
144 	/*
145 	 * Since this code is recursive (gasp!) we must protect ourselves.
146 	 */
147 	if (level > XFS_DA_NODE_MAXDEPTH) {
148 		xfs_buf_mark_corrupt(bp);
149 		xfs_trans_brelse(*trans, bp);	/* no locks for later trans */
150 		return -EFSCORRUPTED;
151 	}
152 
153 	xfs_da3_node_hdr_from_disk(dp->i_mount, &ichdr, bp->b_addr);
154 	parent_blkno = xfs_buf_daddr(bp);
155 	if (!ichdr.count) {
156 		xfs_trans_brelse(*trans, bp);
157 		return 0;
158 	}
159 	child_fsb = be32_to_cpu(ichdr.btree[0].before);
160 	xfs_trans_brelse(*trans, bp);	/* no locks for later trans */
161 	bp = NULL;
162 
163 	/*
164 	 * If this is the node level just above the leaves, simply loop
165 	 * over the leaves removing all of them.  If this is higher up
166 	 * in the tree, recurse downward.
167 	 */
168 	for (i = 0; i < ichdr.count; i++) {
169 		/*
170 		 * Read the subsidiary block to see what we have to work with.
171 		 * Don't do this in a transaction.  This is a depth-first
172 		 * traversal of the tree so we may deal with many blocks
173 		 * before we come back to this one.
174 		 */
175 		error = xfs_da3_node_read(*trans, dp, child_fsb, &child_bp,
176 					  XFS_ATTR_FORK);
177 		if (error)
178 			return error;
179 
180 		/* save for re-read later */
181 		child_blkno = xfs_buf_daddr(child_bp);
182 
183 		/*
184 		 * Invalidate the subtree, however we have to.
185 		 */
186 		info = child_bp->b_addr;
187 		switch (info->magic) {
188 		case cpu_to_be16(XFS_DA_NODE_MAGIC):
189 		case cpu_to_be16(XFS_DA3_NODE_MAGIC):
190 			error = xfs_attr3_node_inactive(trans, dp, child_bp,
191 							level + 1);
192 			break;
193 		case cpu_to_be16(XFS_ATTR_LEAF_MAGIC):
194 		case cpu_to_be16(XFS_ATTR3_LEAF_MAGIC):
195 			error = xfs_attr3_leaf_inactive(trans, dp, child_bp);
196 			break;
197 		default:
198 			xfs_buf_mark_corrupt(child_bp);
199 			xfs_trans_brelse(*trans, child_bp);
200 			error = -EFSCORRUPTED;
201 			break;
202 		}
203 		if (error)
204 			return error;
205 
206 		/*
207 		 * Remove the subsidiary block from the cache and from the log.
208 		 */
209 		error = xfs_trans_get_buf(*trans, mp->m_ddev_targp,
210 				child_blkno,
211 				XFS_FSB_TO_BB(mp, mp->m_attr_geo->fsbcount), 0,
212 				&child_bp);
213 		if (error)
214 			return error;
215 		xfs_trans_binval(*trans, child_bp);
216 		child_bp = NULL;
217 
218 		/*
219 		 * If we're not done, re-read the parent to get the next
220 		 * child block number.
221 		 */
222 		if (i + 1 < ichdr.count) {
223 			struct xfs_da3_icnode_hdr phdr;
224 
225 			error = xfs_da3_node_read_mapped(*trans, dp,
226 					parent_blkno, &bp, XFS_ATTR_FORK);
227 			if (error)
228 				return error;
229 			xfs_da3_node_hdr_from_disk(dp->i_mount, &phdr,
230 						  bp->b_addr);
231 			child_fsb = be32_to_cpu(phdr.btree[i + 1].before);
232 			xfs_trans_brelse(*trans, bp);
233 			bp = NULL;
234 		}
235 		/*
236 		 * Atomically commit the whole invalidate stuff.
237 		 */
238 		error = xfs_trans_roll_inode(trans, dp);
239 		if (error)
240 			return  error;
241 	}
242 
243 	return 0;
244 }
245 
246 /*
247  * Indiscriminately delete the entire attribute fork
248  *
249  * Recurse (gasp!) through the attribute nodes until we find leaves.
250  * We're doing a depth-first traversal in order to invalidate everything.
251  */
252 static int
xfs_attr3_root_inactive(struct xfs_trans ** trans,struct xfs_inode * dp)253 xfs_attr3_root_inactive(
254 	struct xfs_trans	**trans,
255 	struct xfs_inode	*dp)
256 {
257 	struct xfs_mount	*mp = dp->i_mount;
258 	struct xfs_da_blkinfo	*info;
259 	struct xfs_buf		*bp;
260 	xfs_daddr_t		blkno;
261 	int			error;
262 
263 	/*
264 	 * Read block 0 to see what we have to work with.
265 	 * We only get here if we have extents, since we remove
266 	 * the extents in reverse order the extent containing
267 	 * block 0 must still be there.
268 	 */
269 	error = xfs_da3_node_read(*trans, dp, 0, &bp, XFS_ATTR_FORK);
270 	if (error)
271 		return error;
272 	blkno = xfs_buf_daddr(bp);
273 
274 	/*
275 	 * Invalidate the tree, even if the "tree" is only a single leaf block.
276 	 * This is a depth-first traversal!
277 	 */
278 	info = bp->b_addr;
279 	switch (info->magic) {
280 	case cpu_to_be16(XFS_DA_NODE_MAGIC):
281 	case cpu_to_be16(XFS_DA3_NODE_MAGIC):
282 		error = xfs_attr3_node_inactive(trans, dp, bp, 1);
283 		break;
284 	case cpu_to_be16(XFS_ATTR_LEAF_MAGIC):
285 	case cpu_to_be16(XFS_ATTR3_LEAF_MAGIC):
286 		error = xfs_attr3_leaf_inactive(trans, dp, bp);
287 		break;
288 	default:
289 		error = -EFSCORRUPTED;
290 		xfs_buf_mark_corrupt(bp);
291 		xfs_trans_brelse(*trans, bp);
292 		break;
293 	}
294 	if (error)
295 		return error;
296 
297 	/*
298 	 * Invalidate the incore copy of the root block.
299 	 */
300 	error = xfs_trans_get_buf(*trans, mp->m_ddev_targp, blkno,
301 			XFS_FSB_TO_BB(mp, mp->m_attr_geo->fsbcount), 0, &bp);
302 	if (error)
303 		return error;
304 	error = bp->b_error;
305 	if (error) {
306 		xfs_trans_brelse(*trans, bp);
307 		return error;
308 	}
309 	xfs_trans_binval(*trans, bp);	/* remove from cache */
310 	/*
311 	 * Commit the invalidate and start the next transaction.
312 	 */
313 	error = xfs_trans_roll_inode(trans, dp);
314 
315 	return error;
316 }
317 
318 /*
319  * xfs_attr_inactive kills all traces of an attribute fork on an inode. It
320  * removes both the on-disk and in-memory inode fork. Note that this also has to
321  * handle the condition of inodes without attributes but with an attribute fork
322  * configured, so we can't use xfs_inode_hasattr() here.
323  *
324  * The in-memory attribute fork is removed even on error.
325  */
326 int
xfs_attr_inactive(struct xfs_inode * dp)327 xfs_attr_inactive(
328 	struct xfs_inode	*dp)
329 {
330 	struct xfs_trans	*trans;
331 	struct xfs_mount	*mp;
332 	int			lock_mode = XFS_ILOCK_SHARED;
333 	int			error = 0;
334 
335 	mp = dp->i_mount;
336 
337 	xfs_ilock(dp, lock_mode);
338 	if (!xfs_inode_has_attr_fork(dp))
339 		goto out_destroy_fork;
340 	xfs_iunlock(dp, lock_mode);
341 
342 	lock_mode = 0;
343 
344 	error = xfs_trans_alloc(mp, &M_RES(mp)->tr_attrinval, 0, 0, 0, &trans);
345 	if (error)
346 		goto out_destroy_fork;
347 
348 	lock_mode = XFS_ILOCK_EXCL;
349 	xfs_ilock(dp, lock_mode);
350 
351 	if (!xfs_inode_has_attr_fork(dp))
352 		goto out_cancel;
353 
354 	/*
355 	 * No need to make quota reservations here. We expect to release some
356 	 * blocks, not allocate, in the common case.
357 	 */
358 	xfs_trans_ijoin(trans, dp, 0);
359 
360 	/*
361 	 * Invalidate and truncate the attribute fork extents. Make sure the
362 	 * fork actually has xattr blocks as otherwise the invalidation has no
363 	 * blocks to read and returns an error. In this case, just do the fork
364 	 * removal below.
365 	 */
366 	if (dp->i_af.if_nextents > 0) {
367 		error = xfs_attr3_root_inactive(&trans, dp);
368 		if (error)
369 			goto out_cancel;
370 
371 		error = xfs_itruncate_extents(&trans, dp, XFS_ATTR_FORK, 0);
372 		if (error)
373 			goto out_cancel;
374 	}
375 
376 	/* Reset the attribute fork - this also destroys the in-core fork */
377 	xfs_attr_fork_remove(dp, trans);
378 
379 	error = xfs_trans_commit(trans);
380 	xfs_iunlock(dp, lock_mode);
381 	return error;
382 
383 out_cancel:
384 	xfs_trans_cancel(trans);
385 out_destroy_fork:
386 	/* kill the in-core attr fork before we drop the inode lock */
387 	xfs_ifork_zap_attr(dp);
388 	if (lock_mode)
389 		xfs_iunlock(dp, lock_mode);
390 	return error;
391 }
392