xref: /openbmc/linux/fs/xfs/xfs_itable.c (revision 2f190ac2)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (c) 2000-2002,2005 Silicon Graphics, Inc.
4  * All Rights Reserved.
5  */
6 #include "xfs.h"
7 #include "xfs_fs.h"
8 #include "xfs_shared.h"
9 #include "xfs_format.h"
10 #include "xfs_log_format.h"
11 #include "xfs_trans_resv.h"
12 #include "xfs_mount.h"
13 #include "xfs_inode.h"
14 #include "xfs_btree.h"
15 #include "xfs_ialloc.h"
16 #include "xfs_ialloc_btree.h"
17 #include "xfs_iwalk.h"
18 #include "xfs_itable.h"
19 #include "xfs_error.h"
20 #include "xfs_icache.h"
21 #include "xfs_health.h"
22 #include "xfs_trans.h"
23 
24 /*
25  * Bulk Stat
26  * =========
27  *
28  * Use the inode walking functions to fill out struct xfs_bulkstat for every
29  * allocated inode, then pass the stat information to some externally provided
30  * iteration function.
31  */
32 
33 struct xfs_bstat_chunk {
34 	bulkstat_one_fmt_pf	formatter;
35 	struct xfs_ibulk	*breq;
36 	struct xfs_bulkstat	*buf;
37 };
38 
39 /*
40  * Fill out the bulkstat info for a single inode and report it somewhere.
41  *
42  * bc->breq->lastino is effectively the inode cursor as we walk through the
43  * filesystem.  Therefore, we update it any time we need to move the cursor
44  * forward, regardless of whether or not we're sending any bstat information
45  * back to userspace.  If the inode is internal metadata or, has been freed
46  * out from under us, we just simply keep going.
47  *
48  * However, if any other type of error happens we want to stop right where we
49  * are so that userspace will call back with exact number of the bad inode and
50  * we can send back an error code.
51  *
52  * Note that if the formatter tells us there's no space left in the buffer we
53  * move the cursor forward and abort the walk.
54  */
55 STATIC int
56 xfs_bulkstat_one_int(
57 	struct xfs_mount	*mp,
58 	struct user_namespace	*mnt_userns,
59 	struct xfs_trans	*tp,
60 	xfs_ino_t		ino,
61 	struct xfs_bstat_chunk	*bc)
62 {
63 	struct user_namespace	*sb_userns = mp->m_super->s_user_ns;
64 	struct xfs_inode	*ip;		/* incore inode pointer */
65 	struct inode		*inode;
66 	struct xfs_bulkstat	*buf = bc->buf;
67 	xfs_extnum_t		nextents;
68 	int			error = -EINVAL;
69 
70 	if (xfs_internal_inum(mp, ino))
71 		goto out_advance;
72 
73 	error = xfs_iget(mp, tp, ino,
74 			 (XFS_IGET_DONTCACHE | XFS_IGET_UNTRUSTED),
75 			 XFS_ILOCK_SHARED, &ip);
76 	if (error == -ENOENT || error == -EINVAL)
77 		goto out_advance;
78 	if (error)
79 		goto out;
80 
81 	ASSERT(ip != NULL);
82 	ASSERT(ip->i_imap.im_blkno != 0);
83 	inode = VFS_I(ip);
84 
85 	/* xfs_iget returns the following without needing
86 	 * further change.
87 	 */
88 	buf->bs_projectid = ip->i_projid;
89 	buf->bs_ino = ino;
90 	buf->bs_uid = from_kuid(sb_userns, i_uid_into_mnt(mnt_userns, inode));
91 	buf->bs_gid = from_kgid(sb_userns, i_gid_into_mnt(mnt_userns, inode));
92 	buf->bs_size = ip->i_disk_size;
93 
94 	buf->bs_nlink = inode->i_nlink;
95 	buf->bs_atime = inode->i_atime.tv_sec;
96 	buf->bs_atime_nsec = inode->i_atime.tv_nsec;
97 	buf->bs_mtime = inode->i_mtime.tv_sec;
98 	buf->bs_mtime_nsec = inode->i_mtime.tv_nsec;
99 	buf->bs_ctime = inode->i_ctime.tv_sec;
100 	buf->bs_ctime_nsec = inode->i_ctime.tv_nsec;
101 	buf->bs_gen = inode->i_generation;
102 	buf->bs_mode = inode->i_mode;
103 
104 	buf->bs_xflags = xfs_ip2xflags(ip);
105 	buf->bs_extsize_blks = ip->i_extsize;
106 
107 	nextents = xfs_ifork_nextents(&ip->i_df);
108 	if (!(bc->breq->flags & XFS_IBULK_NREXT64))
109 		buf->bs_extents = min(nextents, XFS_MAX_EXTCNT_DATA_FORK_SMALL);
110 	else
111 		buf->bs_extents64 = nextents;
112 
113 	xfs_bulkstat_health(ip, buf);
114 	buf->bs_aextents = xfs_ifork_nextents(&ip->i_af);
115 	buf->bs_forkoff = xfs_inode_fork_boff(ip);
116 	buf->bs_version = XFS_BULKSTAT_VERSION_V5;
117 
118 	if (xfs_has_v3inodes(mp)) {
119 		buf->bs_btime = ip->i_crtime.tv_sec;
120 		buf->bs_btime_nsec = ip->i_crtime.tv_nsec;
121 		if (ip->i_diflags2 & XFS_DIFLAG2_COWEXTSIZE)
122 			buf->bs_cowextsize_blks = ip->i_cowextsize;
123 	}
124 
125 	switch (ip->i_df.if_format) {
126 	case XFS_DINODE_FMT_DEV:
127 		buf->bs_rdev = sysv_encode_dev(inode->i_rdev);
128 		buf->bs_blksize = BLKDEV_IOSIZE;
129 		buf->bs_blocks = 0;
130 		break;
131 	case XFS_DINODE_FMT_LOCAL:
132 		buf->bs_rdev = 0;
133 		buf->bs_blksize = mp->m_sb.sb_blocksize;
134 		buf->bs_blocks = 0;
135 		break;
136 	case XFS_DINODE_FMT_EXTENTS:
137 	case XFS_DINODE_FMT_BTREE:
138 		buf->bs_rdev = 0;
139 		buf->bs_blksize = mp->m_sb.sb_blocksize;
140 		buf->bs_blocks = ip->i_nblocks + ip->i_delayed_blks;
141 		break;
142 	}
143 	xfs_iunlock(ip, XFS_ILOCK_SHARED);
144 	xfs_irele(ip);
145 
146 	error = bc->formatter(bc->breq, buf);
147 	if (error == -ECANCELED)
148 		goto out_advance;
149 	if (error)
150 		goto out;
151 
152 out_advance:
153 	/*
154 	 * Advance the cursor to the inode that comes after the one we just
155 	 * looked at.  We want the caller to move along if the bulkstat
156 	 * information was copied successfully; if we tried to grab the inode
157 	 * but it's no longer allocated; or if it's internal metadata.
158 	 */
159 	bc->breq->startino = ino + 1;
160 out:
161 	return error;
162 }
163 
164 /* Bulkstat a single inode. */
165 int
166 xfs_bulkstat_one(
167 	struct xfs_ibulk	*breq,
168 	bulkstat_one_fmt_pf	formatter)
169 {
170 	struct xfs_bstat_chunk	bc = {
171 		.formatter	= formatter,
172 		.breq		= breq,
173 	};
174 	struct xfs_trans	*tp;
175 	int			error;
176 
177 	if (breq->mnt_userns != &init_user_ns) {
178 		xfs_warn_ratelimited(breq->mp,
179 			"bulkstat not supported inside of idmapped mounts.");
180 		return -EINVAL;
181 	}
182 
183 	ASSERT(breq->icount == 1);
184 
185 	bc.buf = kmem_zalloc(sizeof(struct xfs_bulkstat),
186 			KM_MAYFAIL);
187 	if (!bc.buf)
188 		return -ENOMEM;
189 
190 	/*
191 	 * Grab an empty transaction so that we can use its recursive buffer
192 	 * locking abilities to detect cycles in the inobt without deadlocking.
193 	 */
194 	error = xfs_trans_alloc_empty(breq->mp, &tp);
195 	if (error)
196 		goto out;
197 
198 	error = xfs_bulkstat_one_int(breq->mp, breq->mnt_userns, tp,
199 			breq->startino, &bc);
200 	xfs_trans_cancel(tp);
201 out:
202 	kmem_free(bc.buf);
203 
204 	/*
205 	 * If we reported one inode to userspace then we abort because we hit
206 	 * the end of the buffer.  Don't leak that back to userspace.
207 	 */
208 	if (error == -ECANCELED)
209 		error = 0;
210 
211 	return error;
212 }
213 
214 static int
215 xfs_bulkstat_iwalk(
216 	struct xfs_mount	*mp,
217 	struct xfs_trans	*tp,
218 	xfs_ino_t		ino,
219 	void			*data)
220 {
221 	struct xfs_bstat_chunk	*bc = data;
222 	int			error;
223 
224 	error = xfs_bulkstat_one_int(mp, bc->breq->mnt_userns, tp, ino, data);
225 	/* bulkstat just skips over missing inodes */
226 	if (error == -ENOENT || error == -EINVAL)
227 		return 0;
228 	return error;
229 }
230 
231 /*
232  * Check the incoming lastino parameter.
233  *
234  * We allow any inode value that could map to physical space inside the
235  * filesystem because if there are no inodes there, bulkstat moves on to the
236  * next chunk.  In other words, the magic agino value of zero takes us to the
237  * first chunk in the AG, and an agino value past the end of the AG takes us to
238  * the first chunk in the next AG.
239  *
240  * Therefore we can end early if the requested inode is beyond the end of the
241  * filesystem or doesn't map properly.
242  */
243 static inline bool
244 xfs_bulkstat_already_done(
245 	struct xfs_mount	*mp,
246 	xfs_ino_t		startino)
247 {
248 	xfs_agnumber_t		agno = XFS_INO_TO_AGNO(mp, startino);
249 	xfs_agino_t		agino = XFS_INO_TO_AGINO(mp, startino);
250 
251 	return agno >= mp->m_sb.sb_agcount ||
252 	       startino != XFS_AGINO_TO_INO(mp, agno, agino);
253 }
254 
255 /* Return stat information in bulk (by-inode) for the filesystem. */
256 int
257 xfs_bulkstat(
258 	struct xfs_ibulk	*breq,
259 	bulkstat_one_fmt_pf	formatter)
260 {
261 	struct xfs_bstat_chunk	bc = {
262 		.formatter	= formatter,
263 		.breq		= breq,
264 	};
265 	struct xfs_trans	*tp;
266 	unsigned int		iwalk_flags = 0;
267 	int			error;
268 
269 	if (breq->mnt_userns != &init_user_ns) {
270 		xfs_warn_ratelimited(breq->mp,
271 			"bulkstat not supported inside of idmapped mounts.");
272 		return -EINVAL;
273 	}
274 	if (xfs_bulkstat_already_done(breq->mp, breq->startino))
275 		return 0;
276 
277 	bc.buf = kmem_zalloc(sizeof(struct xfs_bulkstat),
278 			KM_MAYFAIL);
279 	if (!bc.buf)
280 		return -ENOMEM;
281 
282 	/*
283 	 * Grab an empty transaction so that we can use its recursive buffer
284 	 * locking abilities to detect cycles in the inobt without deadlocking.
285 	 */
286 	error = xfs_trans_alloc_empty(breq->mp, &tp);
287 	if (error)
288 		goto out;
289 
290 	if (breq->flags & XFS_IBULK_SAME_AG)
291 		iwalk_flags |= XFS_IWALK_SAME_AG;
292 
293 	error = xfs_iwalk(breq->mp, tp, breq->startino, iwalk_flags,
294 			xfs_bulkstat_iwalk, breq->icount, &bc);
295 	xfs_trans_cancel(tp);
296 out:
297 	kmem_free(bc.buf);
298 
299 	/*
300 	 * We found some inodes, so clear the error status and return them.
301 	 * The lastino pointer will point directly at the inode that triggered
302 	 * any error that occurred, so on the next call the error will be
303 	 * triggered again and propagated to userspace as there will be no
304 	 * formatted inodes in the buffer.
305 	 */
306 	if (breq->ocount > 0)
307 		error = 0;
308 
309 	return error;
310 }
311 
312 /* Convert bulkstat (v5) to bstat (v1). */
313 void
314 xfs_bulkstat_to_bstat(
315 	struct xfs_mount		*mp,
316 	struct xfs_bstat		*bs1,
317 	const struct xfs_bulkstat	*bstat)
318 {
319 	/* memset is needed here because of padding holes in the structure. */
320 	memset(bs1, 0, sizeof(struct xfs_bstat));
321 	bs1->bs_ino = bstat->bs_ino;
322 	bs1->bs_mode = bstat->bs_mode;
323 	bs1->bs_nlink = bstat->bs_nlink;
324 	bs1->bs_uid = bstat->bs_uid;
325 	bs1->bs_gid = bstat->bs_gid;
326 	bs1->bs_rdev = bstat->bs_rdev;
327 	bs1->bs_blksize = bstat->bs_blksize;
328 	bs1->bs_size = bstat->bs_size;
329 	bs1->bs_atime.tv_sec = bstat->bs_atime;
330 	bs1->bs_mtime.tv_sec = bstat->bs_mtime;
331 	bs1->bs_ctime.tv_sec = bstat->bs_ctime;
332 	bs1->bs_atime.tv_nsec = bstat->bs_atime_nsec;
333 	bs1->bs_mtime.tv_nsec = bstat->bs_mtime_nsec;
334 	bs1->bs_ctime.tv_nsec = bstat->bs_ctime_nsec;
335 	bs1->bs_blocks = bstat->bs_blocks;
336 	bs1->bs_xflags = bstat->bs_xflags;
337 	bs1->bs_extsize = XFS_FSB_TO_B(mp, bstat->bs_extsize_blks);
338 	bs1->bs_extents = bstat->bs_extents;
339 	bs1->bs_gen = bstat->bs_gen;
340 	bs1->bs_projid_lo = bstat->bs_projectid & 0xFFFF;
341 	bs1->bs_forkoff = bstat->bs_forkoff;
342 	bs1->bs_projid_hi = bstat->bs_projectid >> 16;
343 	bs1->bs_sick = bstat->bs_sick;
344 	bs1->bs_checked = bstat->bs_checked;
345 	bs1->bs_cowextsize = XFS_FSB_TO_B(mp, bstat->bs_cowextsize_blks);
346 	bs1->bs_dmevmask = 0;
347 	bs1->bs_dmstate = 0;
348 	bs1->bs_aextents = bstat->bs_aextents;
349 }
350 
351 struct xfs_inumbers_chunk {
352 	inumbers_fmt_pf		formatter;
353 	struct xfs_ibulk	*breq;
354 };
355 
356 /*
357  * INUMBERS
358  * ========
359  * This is how we export inode btree records to userspace, so that XFS tools
360  * can figure out where inodes are allocated.
361  */
362 
363 /*
364  * Format the inode group structure and report it somewhere.
365  *
366  * Similar to xfs_bulkstat_one_int, lastino is the inode cursor as we walk
367  * through the filesystem so we move it forward unless there was a runtime
368  * error.  If the formatter tells us the buffer is now full we also move the
369  * cursor forward and abort the walk.
370  */
371 STATIC int
372 xfs_inumbers_walk(
373 	struct xfs_mount	*mp,
374 	struct xfs_trans	*tp,
375 	xfs_agnumber_t		agno,
376 	const struct xfs_inobt_rec_incore *irec,
377 	void			*data)
378 {
379 	struct xfs_inumbers	inogrp = {
380 		.xi_startino	= XFS_AGINO_TO_INO(mp, agno, irec->ir_startino),
381 		.xi_alloccount	= irec->ir_count - irec->ir_freecount,
382 		.xi_allocmask	= ~irec->ir_free,
383 		.xi_version	= XFS_INUMBERS_VERSION_V5,
384 	};
385 	struct xfs_inumbers_chunk *ic = data;
386 	int			error;
387 
388 	error = ic->formatter(ic->breq, &inogrp);
389 	if (error && error != -ECANCELED)
390 		return error;
391 
392 	ic->breq->startino = XFS_AGINO_TO_INO(mp, agno, irec->ir_startino) +
393 			XFS_INODES_PER_CHUNK;
394 	return error;
395 }
396 
397 /*
398  * Return inode number table for the filesystem.
399  */
400 int
401 xfs_inumbers(
402 	struct xfs_ibulk	*breq,
403 	inumbers_fmt_pf		formatter)
404 {
405 	struct xfs_inumbers_chunk ic = {
406 		.formatter	= formatter,
407 		.breq		= breq,
408 	};
409 	struct xfs_trans	*tp;
410 	int			error = 0;
411 
412 	if (xfs_bulkstat_already_done(breq->mp, breq->startino))
413 		return 0;
414 
415 	/*
416 	 * Grab an empty transaction so that we can use its recursive buffer
417 	 * locking abilities to detect cycles in the inobt without deadlocking.
418 	 */
419 	error = xfs_trans_alloc_empty(breq->mp, &tp);
420 	if (error)
421 		goto out;
422 
423 	error = xfs_inobt_walk(breq->mp, tp, breq->startino, breq->flags,
424 			xfs_inumbers_walk, breq->icount, &ic);
425 	xfs_trans_cancel(tp);
426 out:
427 
428 	/*
429 	 * We found some inode groups, so clear the error status and return
430 	 * them.  The lastino pointer will point directly at the inode that
431 	 * triggered any error that occurred, so on the next call the error
432 	 * will be triggered again and propagated to userspace as there will be
433 	 * no formatted inode groups in the buffer.
434 	 */
435 	if (breq->ocount > 0)
436 		error = 0;
437 
438 	return error;
439 }
440 
441 /* Convert an inumbers (v5) struct to a inogrp (v1) struct. */
442 void
443 xfs_inumbers_to_inogrp(
444 	struct xfs_inogrp		*ig1,
445 	const struct xfs_inumbers	*ig)
446 {
447 	/* memset is needed here because of padding holes in the structure. */
448 	memset(ig1, 0, sizeof(struct xfs_inogrp));
449 	ig1->xi_startino = ig->xi_startino;
450 	ig1->xi_alloccount = ig->xi_alloccount;
451 	ig1->xi_allocmask = ig->xi_allocmask;
452 }
453