xref: /openbmc/linux/fs/xfs/xfs_itable.c (revision f3d7c2cd)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (c) 2000-2002,2005 Silicon Graphics, Inc.
4  * All Rights Reserved.
5  */
6 #include "xfs.h"
7 #include "xfs_fs.h"
8 #include "xfs_shared.h"
9 #include "xfs_format.h"
10 #include "xfs_log_format.h"
11 #include "xfs_trans_resv.h"
12 #include "xfs_mount.h"
13 #include "xfs_inode.h"
14 #include "xfs_btree.h"
15 #include "xfs_ialloc.h"
16 #include "xfs_ialloc_btree.h"
17 #include "xfs_iwalk.h"
18 #include "xfs_itable.h"
19 #include "xfs_error.h"
20 #include "xfs_icache.h"
21 #include "xfs_health.h"
22 #include "xfs_trans.h"
23 
24 /*
25  * Bulk Stat
26  * =========
27  *
28  * Use the inode walking functions to fill out struct xfs_bulkstat for every
29  * allocated inode, then pass the stat information to some externally provided
30  * iteration function.
31  */
32 
33 struct xfs_bstat_chunk {
34 	bulkstat_one_fmt_pf	formatter;
35 	struct xfs_ibulk	*breq;
36 	struct xfs_bulkstat	*buf;
37 };
38 
39 /*
40  * Fill out the bulkstat info for a single inode and report it somewhere.
41  *
42  * bc->breq->lastino is effectively the inode cursor as we walk through the
43  * filesystem.  Therefore, we update it any time we need to move the cursor
44  * forward, regardless of whether or not we're sending any bstat information
45  * back to userspace.  If the inode is internal metadata or, has been freed
46  * out from under us, we just simply keep going.
47  *
48  * However, if any other type of error happens we want to stop right where we
49  * are so that userspace will call back with exact number of the bad inode and
50  * we can send back an error code.
51  *
52  * Note that if the formatter tells us there's no space left in the buffer we
53  * move the cursor forward and abort the walk.
54  */
55 STATIC int
56 xfs_bulkstat_one_int(
57 	struct xfs_mount	*mp,
58 	struct user_namespace	*mnt_userns,
59 	struct xfs_trans	*tp,
60 	xfs_ino_t		ino,
61 	struct xfs_bstat_chunk	*bc)
62 {
63 	struct user_namespace	*sb_userns = mp->m_super->s_user_ns;
64 	struct xfs_inode	*ip;		/* incore inode pointer */
65 	struct inode		*inode;
66 	struct xfs_bulkstat	*buf = bc->buf;
67 	int			error = -EINVAL;
68 
69 	if (xfs_internal_inum(mp, ino))
70 		goto out_advance;
71 
72 	error = xfs_iget(mp, tp, ino,
73 			 (XFS_IGET_DONTCACHE | XFS_IGET_UNTRUSTED),
74 			 XFS_ILOCK_SHARED, &ip);
75 	if (error == -ENOENT || error == -EINVAL)
76 		goto out_advance;
77 	if (error)
78 		goto out;
79 
80 	ASSERT(ip != NULL);
81 	ASSERT(ip->i_imap.im_blkno != 0);
82 	inode = VFS_I(ip);
83 
84 	/* xfs_iget returns the following without needing
85 	 * further change.
86 	 */
87 	buf->bs_projectid = ip->i_projid;
88 	buf->bs_ino = ino;
89 	buf->bs_uid = from_kuid(sb_userns, i_uid_into_mnt(mnt_userns, inode));
90 	buf->bs_gid = from_kgid(sb_userns, i_gid_into_mnt(mnt_userns, inode));
91 	buf->bs_size = ip->i_disk_size;
92 
93 	buf->bs_nlink = inode->i_nlink;
94 	buf->bs_atime = inode->i_atime.tv_sec;
95 	buf->bs_atime_nsec = inode->i_atime.tv_nsec;
96 	buf->bs_mtime = inode->i_mtime.tv_sec;
97 	buf->bs_mtime_nsec = inode->i_mtime.tv_nsec;
98 	buf->bs_ctime = inode->i_ctime.tv_sec;
99 	buf->bs_ctime_nsec = inode->i_ctime.tv_nsec;
100 	buf->bs_gen = inode->i_generation;
101 	buf->bs_mode = inode->i_mode;
102 
103 	buf->bs_xflags = xfs_ip2xflags(ip);
104 	buf->bs_extsize_blks = ip->i_extsize;
105 	buf->bs_extents = xfs_ifork_nextents(&ip->i_df);
106 	xfs_bulkstat_health(ip, buf);
107 	buf->bs_aextents = xfs_ifork_nextents(ip->i_afp);
108 	buf->bs_forkoff = XFS_IFORK_BOFF(ip);
109 	buf->bs_version = XFS_BULKSTAT_VERSION_V5;
110 
111 	if (xfs_has_v3inodes(mp)) {
112 		buf->bs_btime = ip->i_crtime.tv_sec;
113 		buf->bs_btime_nsec = ip->i_crtime.tv_nsec;
114 		if (ip->i_diflags2 & XFS_DIFLAG2_COWEXTSIZE)
115 			buf->bs_cowextsize_blks = ip->i_cowextsize;
116 	}
117 
118 	switch (ip->i_df.if_format) {
119 	case XFS_DINODE_FMT_DEV:
120 		buf->bs_rdev = sysv_encode_dev(inode->i_rdev);
121 		buf->bs_blksize = BLKDEV_IOSIZE;
122 		buf->bs_blocks = 0;
123 		break;
124 	case XFS_DINODE_FMT_LOCAL:
125 		buf->bs_rdev = 0;
126 		buf->bs_blksize = mp->m_sb.sb_blocksize;
127 		buf->bs_blocks = 0;
128 		break;
129 	case XFS_DINODE_FMT_EXTENTS:
130 	case XFS_DINODE_FMT_BTREE:
131 		buf->bs_rdev = 0;
132 		buf->bs_blksize = mp->m_sb.sb_blocksize;
133 		buf->bs_blocks = ip->i_nblocks + ip->i_delayed_blks;
134 		break;
135 	}
136 	xfs_iunlock(ip, XFS_ILOCK_SHARED);
137 	xfs_irele(ip);
138 
139 	error = bc->formatter(bc->breq, buf);
140 	if (error == -ECANCELED)
141 		goto out_advance;
142 	if (error)
143 		goto out;
144 
145 out_advance:
146 	/*
147 	 * Advance the cursor to the inode that comes after the one we just
148 	 * looked at.  We want the caller to move along if the bulkstat
149 	 * information was copied successfully; if we tried to grab the inode
150 	 * but it's no longer allocated; or if it's internal metadata.
151 	 */
152 	bc->breq->startino = ino + 1;
153 out:
154 	return error;
155 }
156 
157 /* Bulkstat a single inode. */
158 int
159 xfs_bulkstat_one(
160 	struct xfs_ibulk	*breq,
161 	bulkstat_one_fmt_pf	formatter)
162 {
163 	struct xfs_bstat_chunk	bc = {
164 		.formatter	= formatter,
165 		.breq		= breq,
166 	};
167 	struct xfs_trans	*tp;
168 	int			error;
169 
170 	if (breq->mnt_userns != &init_user_ns) {
171 		xfs_warn_ratelimited(breq->mp,
172 			"bulkstat not supported inside of idmapped mounts.");
173 		return -EINVAL;
174 	}
175 
176 	ASSERT(breq->icount == 1);
177 
178 	bc.buf = kmem_zalloc(sizeof(struct xfs_bulkstat),
179 			KM_MAYFAIL);
180 	if (!bc.buf)
181 		return -ENOMEM;
182 
183 	/*
184 	 * Grab an empty transaction so that we can use its recursive buffer
185 	 * locking abilities to detect cycles in the inobt without deadlocking.
186 	 */
187 	error = xfs_trans_alloc_empty(breq->mp, &tp);
188 	if (error)
189 		goto out;
190 
191 	error = xfs_bulkstat_one_int(breq->mp, breq->mnt_userns, tp,
192 			breq->startino, &bc);
193 	xfs_trans_cancel(tp);
194 out:
195 	kmem_free(bc.buf);
196 
197 	/*
198 	 * If we reported one inode to userspace then we abort because we hit
199 	 * the end of the buffer.  Don't leak that back to userspace.
200 	 */
201 	if (error == -ECANCELED)
202 		error = 0;
203 
204 	return error;
205 }
206 
207 static int
208 xfs_bulkstat_iwalk(
209 	struct xfs_mount	*mp,
210 	struct xfs_trans	*tp,
211 	xfs_ino_t		ino,
212 	void			*data)
213 {
214 	struct xfs_bstat_chunk	*bc = data;
215 	int			error;
216 
217 	error = xfs_bulkstat_one_int(mp, bc->breq->mnt_userns, tp, ino, data);
218 	/* bulkstat just skips over missing inodes */
219 	if (error == -ENOENT || error == -EINVAL)
220 		return 0;
221 	return error;
222 }
223 
224 /*
225  * Check the incoming lastino parameter.
226  *
227  * We allow any inode value that could map to physical space inside the
228  * filesystem because if there are no inodes there, bulkstat moves on to the
229  * next chunk.  In other words, the magic agino value of zero takes us to the
230  * first chunk in the AG, and an agino value past the end of the AG takes us to
231  * the first chunk in the next AG.
232  *
233  * Therefore we can end early if the requested inode is beyond the end of the
234  * filesystem or doesn't map properly.
235  */
236 static inline bool
237 xfs_bulkstat_already_done(
238 	struct xfs_mount	*mp,
239 	xfs_ino_t		startino)
240 {
241 	xfs_agnumber_t		agno = XFS_INO_TO_AGNO(mp, startino);
242 	xfs_agino_t		agino = XFS_INO_TO_AGINO(mp, startino);
243 
244 	return agno >= mp->m_sb.sb_agcount ||
245 	       startino != XFS_AGINO_TO_INO(mp, agno, agino);
246 }
247 
248 /* Return stat information in bulk (by-inode) for the filesystem. */
249 int
250 xfs_bulkstat(
251 	struct xfs_ibulk	*breq,
252 	bulkstat_one_fmt_pf	formatter)
253 {
254 	struct xfs_bstat_chunk	bc = {
255 		.formatter	= formatter,
256 		.breq		= breq,
257 	};
258 	struct xfs_trans	*tp;
259 	int			error;
260 
261 	if (breq->mnt_userns != &init_user_ns) {
262 		xfs_warn_ratelimited(breq->mp,
263 			"bulkstat not supported inside of idmapped mounts.");
264 		return -EINVAL;
265 	}
266 	if (xfs_bulkstat_already_done(breq->mp, breq->startino))
267 		return 0;
268 
269 	bc.buf = kmem_zalloc(sizeof(struct xfs_bulkstat),
270 			KM_MAYFAIL);
271 	if (!bc.buf)
272 		return -ENOMEM;
273 
274 	/*
275 	 * Grab an empty transaction so that we can use its recursive buffer
276 	 * locking abilities to detect cycles in the inobt without deadlocking.
277 	 */
278 	error = xfs_trans_alloc_empty(breq->mp, &tp);
279 	if (error)
280 		goto out;
281 
282 	error = xfs_iwalk(breq->mp, tp, breq->startino, breq->flags,
283 			xfs_bulkstat_iwalk, breq->icount, &bc);
284 	xfs_trans_cancel(tp);
285 out:
286 	kmem_free(bc.buf);
287 
288 	/*
289 	 * We found some inodes, so clear the error status and return them.
290 	 * The lastino pointer will point directly at the inode that triggered
291 	 * any error that occurred, so on the next call the error will be
292 	 * triggered again and propagated to userspace as there will be no
293 	 * formatted inodes in the buffer.
294 	 */
295 	if (breq->ocount > 0)
296 		error = 0;
297 
298 	return error;
299 }
300 
301 /* Convert bulkstat (v5) to bstat (v1). */
302 void
303 xfs_bulkstat_to_bstat(
304 	struct xfs_mount		*mp,
305 	struct xfs_bstat		*bs1,
306 	const struct xfs_bulkstat	*bstat)
307 {
308 	/* memset is needed here because of padding holes in the structure. */
309 	memset(bs1, 0, sizeof(struct xfs_bstat));
310 	bs1->bs_ino = bstat->bs_ino;
311 	bs1->bs_mode = bstat->bs_mode;
312 	bs1->bs_nlink = bstat->bs_nlink;
313 	bs1->bs_uid = bstat->bs_uid;
314 	bs1->bs_gid = bstat->bs_gid;
315 	bs1->bs_rdev = bstat->bs_rdev;
316 	bs1->bs_blksize = bstat->bs_blksize;
317 	bs1->bs_size = bstat->bs_size;
318 	bs1->bs_atime.tv_sec = bstat->bs_atime;
319 	bs1->bs_mtime.tv_sec = bstat->bs_mtime;
320 	bs1->bs_ctime.tv_sec = bstat->bs_ctime;
321 	bs1->bs_atime.tv_nsec = bstat->bs_atime_nsec;
322 	bs1->bs_mtime.tv_nsec = bstat->bs_mtime_nsec;
323 	bs1->bs_ctime.tv_nsec = bstat->bs_ctime_nsec;
324 	bs1->bs_blocks = bstat->bs_blocks;
325 	bs1->bs_xflags = bstat->bs_xflags;
326 	bs1->bs_extsize = XFS_FSB_TO_B(mp, bstat->bs_extsize_blks);
327 	bs1->bs_extents = bstat->bs_extents;
328 	bs1->bs_gen = bstat->bs_gen;
329 	bs1->bs_projid_lo = bstat->bs_projectid & 0xFFFF;
330 	bs1->bs_forkoff = bstat->bs_forkoff;
331 	bs1->bs_projid_hi = bstat->bs_projectid >> 16;
332 	bs1->bs_sick = bstat->bs_sick;
333 	bs1->bs_checked = bstat->bs_checked;
334 	bs1->bs_cowextsize = XFS_FSB_TO_B(mp, bstat->bs_cowextsize_blks);
335 	bs1->bs_dmevmask = 0;
336 	bs1->bs_dmstate = 0;
337 	bs1->bs_aextents = bstat->bs_aextents;
338 }
339 
340 struct xfs_inumbers_chunk {
341 	inumbers_fmt_pf		formatter;
342 	struct xfs_ibulk	*breq;
343 };
344 
345 /*
346  * INUMBERS
347  * ========
348  * This is how we export inode btree records to userspace, so that XFS tools
349  * can figure out where inodes are allocated.
350  */
351 
352 /*
353  * Format the inode group structure and report it somewhere.
354  *
355  * Similar to xfs_bulkstat_one_int, lastino is the inode cursor as we walk
356  * through the filesystem so we move it forward unless there was a runtime
357  * error.  If the formatter tells us the buffer is now full we also move the
358  * cursor forward and abort the walk.
359  */
360 STATIC int
361 xfs_inumbers_walk(
362 	struct xfs_mount	*mp,
363 	struct xfs_trans	*tp,
364 	xfs_agnumber_t		agno,
365 	const struct xfs_inobt_rec_incore *irec,
366 	void			*data)
367 {
368 	struct xfs_inumbers	inogrp = {
369 		.xi_startino	= XFS_AGINO_TO_INO(mp, agno, irec->ir_startino),
370 		.xi_alloccount	= irec->ir_count - irec->ir_freecount,
371 		.xi_allocmask	= ~irec->ir_free,
372 		.xi_version	= XFS_INUMBERS_VERSION_V5,
373 	};
374 	struct xfs_inumbers_chunk *ic = data;
375 	int			error;
376 
377 	error = ic->formatter(ic->breq, &inogrp);
378 	if (error && error != -ECANCELED)
379 		return error;
380 
381 	ic->breq->startino = XFS_AGINO_TO_INO(mp, agno, irec->ir_startino) +
382 			XFS_INODES_PER_CHUNK;
383 	return error;
384 }
385 
386 /*
387  * Return inode number table for the filesystem.
388  */
389 int
390 xfs_inumbers(
391 	struct xfs_ibulk	*breq,
392 	inumbers_fmt_pf		formatter)
393 {
394 	struct xfs_inumbers_chunk ic = {
395 		.formatter	= formatter,
396 		.breq		= breq,
397 	};
398 	struct xfs_trans	*tp;
399 	int			error = 0;
400 
401 	if (xfs_bulkstat_already_done(breq->mp, breq->startino))
402 		return 0;
403 
404 	/*
405 	 * Grab an empty transaction so that we can use its recursive buffer
406 	 * locking abilities to detect cycles in the inobt without deadlocking.
407 	 */
408 	error = xfs_trans_alloc_empty(breq->mp, &tp);
409 	if (error)
410 		goto out;
411 
412 	error = xfs_inobt_walk(breq->mp, tp, breq->startino, breq->flags,
413 			xfs_inumbers_walk, breq->icount, &ic);
414 	xfs_trans_cancel(tp);
415 out:
416 
417 	/*
418 	 * We found some inode groups, so clear the error status and return
419 	 * them.  The lastino pointer will point directly at the inode that
420 	 * triggered any error that occurred, so on the next call the error
421 	 * will be triggered again and propagated to userspace as there will be
422 	 * no formatted inode groups in the buffer.
423 	 */
424 	if (breq->ocount > 0)
425 		error = 0;
426 
427 	return error;
428 }
429 
430 /* Convert an inumbers (v5) struct to a inogrp (v1) struct. */
431 void
432 xfs_inumbers_to_inogrp(
433 	struct xfs_inogrp		*ig1,
434 	const struct xfs_inumbers	*ig)
435 {
436 	/* memset is needed here because of padding holes in the structure. */
437 	memset(ig1, 0, sizeof(struct xfs_inogrp));
438 	ig1->xi_startino = ig->xi_startino;
439 	ig1->xi_alloccount = ig->xi_alloccount;
440 	ig1->xi_allocmask = ig->xi_allocmask;
441 }
442