xref: /openbmc/linux/fs/xfs/xfs_fsops.c (revision 06f3ef6e1705612b88aa0b6991e2ac3b8ed3f8ec)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (c) 2000-2005 Silicon Graphics, Inc.
4  * All Rights Reserved.
5  */
6 #include "xfs.h"
7 #include "xfs_fs.h"
8 #include "xfs_shared.h"
9 #include "xfs_format.h"
10 #include "xfs_log_format.h"
11 #include "xfs_trans_resv.h"
12 #include "xfs_sb.h"
13 #include "xfs_mount.h"
14 #include "xfs_trans.h"
15 #include "xfs_error.h"
16 #include "xfs_alloc.h"
17 #include "xfs_fsops.h"
18 #include "xfs_trans_space.h"
19 #include "xfs_log.h"
20 #include "xfs_log_priv.h"
21 #include "xfs_ag.h"
22 #include "xfs_ag_resv.h"
23 #include "xfs_trace.h"
24 
25 /*
26  * Write new AG headers to disk. Non-transactional, but need to be
27  * written and completed prior to the growfs transaction being logged.
28  * To do this, we use a delayed write buffer list and wait for
29  * submission and IO completion of the list as a whole. This allows the
30  * IO subsystem to merge all the AG headers in a single AG into a single
31  * IO and hide most of the latency of the IO from us.
32  *
33  * This also means that if we get an error whilst building the buffer
34  * list to write, we can cancel the entire list without having written
35  * anything.
36  */
37 static int
38 xfs_resizefs_init_new_ags(
39 	struct xfs_trans	*tp,
40 	struct aghdr_init_data	*id,
41 	xfs_agnumber_t		oagcount,
42 	xfs_agnumber_t		nagcount,
43 	xfs_rfsblock_t		delta,
44 	struct xfs_perag	*last_pag,
45 	bool			*lastag_extended)
46 {
47 	struct xfs_mount	*mp = tp->t_mountp;
48 	xfs_rfsblock_t		nb = mp->m_sb.sb_dblocks + delta;
49 	int			error;
50 
51 	*lastag_extended = false;
52 
53 	INIT_LIST_HEAD(&id->buffer_list);
54 	for (id->agno = nagcount - 1;
55 	     id->agno >= oagcount;
56 	     id->agno--, delta -= id->agsize) {
57 
58 		if (id->agno == nagcount - 1)
59 			id->agsize = nb - (id->agno *
60 					(xfs_rfsblock_t)mp->m_sb.sb_agblocks);
61 		else
62 			id->agsize = mp->m_sb.sb_agblocks;
63 
64 		error = xfs_ag_init_headers(mp, id);
65 		if (error) {
66 			xfs_buf_delwri_cancel(&id->buffer_list);
67 			return error;
68 		}
69 	}
70 
71 	error = xfs_buf_delwri_submit(&id->buffer_list);
72 	if (error)
73 		return error;
74 
75 	if (delta) {
76 		*lastag_extended = true;
77 		error = xfs_ag_extend_space(last_pag, tp, delta);
78 	}
79 	return error;
80 }
81 
82 /*
83  * growfs operations
84  */
85 static int
86 xfs_growfs_data_private(
87 	struct xfs_mount	*mp,		/* mount point for filesystem */
88 	struct xfs_growfs_data	*in)		/* growfs data input struct */
89 {
90 	struct xfs_buf		*bp;
91 	int			error;
92 	xfs_agnumber_t		nagcount;
93 	xfs_agnumber_t		nagimax = 0;
94 	xfs_rfsblock_t		nb, nb_div, nb_mod;
95 	int64_t			delta;
96 	bool			lastag_extended;
97 	xfs_agnumber_t		oagcount;
98 	struct xfs_trans	*tp;
99 	struct aghdr_init_data	id = {};
100 	struct xfs_perag	*last_pag;
101 
102 	nb = in->newblocks;
103 	error = xfs_sb_validate_fsb_count(&mp->m_sb, nb);
104 	if (error)
105 		return error;
106 
107 	if (nb > mp->m_sb.sb_dblocks) {
108 		error = xfs_buf_read_uncached(mp->m_ddev_targp,
109 				XFS_FSB_TO_BB(mp, nb) - XFS_FSS_TO_BB(mp, 1),
110 				XFS_FSS_TO_BB(mp, 1), 0, &bp, NULL);
111 		if (error)
112 			return error;
113 		xfs_buf_relse(bp);
114 	}
115 
116 	nb_div = nb;
117 	nb_mod = do_div(nb_div, mp->m_sb.sb_agblocks);
118 	nagcount = nb_div + (nb_mod != 0);
119 	if (nb_mod && nb_mod < XFS_MIN_AG_BLOCKS) {
120 		nagcount--;
121 		nb = (xfs_rfsblock_t)nagcount * mp->m_sb.sb_agblocks;
122 	}
123 	delta = nb - mp->m_sb.sb_dblocks;
124 	/*
125 	 * Reject filesystems with a single AG because they are not
126 	 * supported, and reject a shrink operation that would cause a
127 	 * filesystem to become unsupported.
128 	 */
129 	if (delta < 0 && nagcount < 2)
130 		return -EINVAL;
131 
132 	oagcount = mp->m_sb.sb_agcount;
133 	/* allocate the new per-ag structures */
134 	if (nagcount > oagcount) {
135 		error = xfs_initialize_perag(mp, nagcount, nb, &nagimax);
136 		if (error)
137 			return error;
138 	} else if (nagcount < oagcount) {
139 		/* TODO: shrinking the entire AGs hasn't yet completed */
140 		return -EINVAL;
141 	}
142 
143 	if (delta > 0)
144 		error = xfs_trans_alloc(mp, &M_RES(mp)->tr_growdata,
145 				XFS_GROWFS_SPACE_RES(mp), 0, XFS_TRANS_RESERVE,
146 				&tp);
147 	else
148 		error = xfs_trans_alloc(mp, &M_RES(mp)->tr_growdata, -delta, 0,
149 				0, &tp);
150 	if (error)
151 		return error;
152 
153 	last_pag = xfs_perag_get(mp, oagcount - 1);
154 	if (delta > 0) {
155 		error = xfs_resizefs_init_new_ags(tp, &id, oagcount, nagcount,
156 				delta, last_pag, &lastag_extended);
157 	} else {
158 		xfs_warn_mount(mp, XFS_OPSTATE_WARNED_SHRINK,
159 	"EXPERIMENTAL online shrink feature in use. Use at your own risk!");
160 
161 		error = xfs_ag_shrink_space(last_pag, &tp, -delta);
162 	}
163 	xfs_perag_put(last_pag);
164 	if (error)
165 		goto out_trans_cancel;
166 
167 	/*
168 	 * Update changed superblock fields transactionally. These are not
169 	 * seen by the rest of the world until the transaction commit applies
170 	 * them atomically to the superblock.
171 	 */
172 	if (nagcount > oagcount)
173 		xfs_trans_mod_sb(tp, XFS_TRANS_SB_AGCOUNT, nagcount - oagcount);
174 	if (delta)
175 		xfs_trans_mod_sb(tp, XFS_TRANS_SB_DBLOCKS, delta);
176 	if (id.nfree)
177 		xfs_trans_mod_sb(tp, XFS_TRANS_SB_FDBLOCKS, id.nfree);
178 
179 	/*
180 	 * Sync sb counters now to reflect the updated values. This is
181 	 * particularly important for shrink because the write verifier
182 	 * will fail if sb_fdblocks is ever larger than sb_dblocks.
183 	 */
184 	if (xfs_has_lazysbcount(mp))
185 		xfs_log_sb(tp);
186 
187 	xfs_trans_set_sync(tp);
188 	error = xfs_trans_commit(tp);
189 	if (error)
190 		return error;
191 
192 	/* New allocation groups fully initialized, so update mount struct */
193 	if (nagimax)
194 		mp->m_maxagi = nagimax;
195 	xfs_set_low_space_thresholds(mp);
196 	mp->m_alloc_set_aside = xfs_alloc_set_aside(mp);
197 
198 	if (delta > 0) {
199 		/*
200 		 * If we expanded the last AG, free the per-AG reservation
201 		 * so we can reinitialize it with the new size.
202 		 */
203 		if (lastag_extended) {
204 			struct xfs_perag	*pag;
205 
206 			pag = xfs_perag_get(mp, id.agno);
207 			error = xfs_ag_resv_free(pag);
208 			xfs_perag_put(pag);
209 			if (error)
210 				return error;
211 		}
212 		/*
213 		 * Reserve AG metadata blocks. ENOSPC here does not mean there
214 		 * was a growfs failure, just that there still isn't space for
215 		 * new user data after the grow has been run.
216 		 */
217 		error = xfs_fs_reserve_ag_blocks(mp);
218 		if (error == -ENOSPC)
219 			error = 0;
220 	}
221 	return error;
222 
223 out_trans_cancel:
224 	xfs_trans_cancel(tp);
225 	return error;
226 }
227 
228 static int
229 xfs_growfs_log_private(
230 	struct xfs_mount	*mp,	/* mount point for filesystem */
231 	struct xfs_growfs_log	*in)	/* growfs log input struct */
232 {
233 	xfs_extlen_t		nb;
234 
235 	nb = in->newblocks;
236 	if (nb < XFS_MIN_LOG_BLOCKS || nb < XFS_B_TO_FSB(mp, XFS_MIN_LOG_BYTES))
237 		return -EINVAL;
238 	if (nb == mp->m_sb.sb_logblocks &&
239 	    in->isint == (mp->m_sb.sb_logstart != 0))
240 		return -EINVAL;
241 	/*
242 	 * Moving the log is hard, need new interfaces to sync
243 	 * the log first, hold off all activity while moving it.
244 	 * Can have shorter or longer log in the same space,
245 	 * or transform internal to external log or vice versa.
246 	 */
247 	return -ENOSYS;
248 }
249 
250 static int
251 xfs_growfs_imaxpct(
252 	struct xfs_mount	*mp,
253 	__u32			imaxpct)
254 {
255 	struct xfs_trans	*tp;
256 	int			dpct;
257 	int			error;
258 
259 	if (imaxpct > 100)
260 		return -EINVAL;
261 
262 	error = xfs_trans_alloc(mp, &M_RES(mp)->tr_growdata,
263 			XFS_GROWFS_SPACE_RES(mp), 0, XFS_TRANS_RESERVE, &tp);
264 	if (error)
265 		return error;
266 
267 	dpct = imaxpct - mp->m_sb.sb_imax_pct;
268 	xfs_trans_mod_sb(tp, XFS_TRANS_SB_IMAXPCT, dpct);
269 	xfs_trans_set_sync(tp);
270 	return xfs_trans_commit(tp);
271 }
272 
273 /*
274  * protected versions of growfs function acquire and release locks on the mount
275  * point - exported through ioctls: XFS_IOC_FSGROWFSDATA, XFS_IOC_FSGROWFSLOG,
276  * XFS_IOC_FSGROWFSRT
277  */
278 int
279 xfs_growfs_data(
280 	struct xfs_mount	*mp,
281 	struct xfs_growfs_data	*in)
282 {
283 	int			error = 0;
284 
285 	if (!capable(CAP_SYS_ADMIN))
286 		return -EPERM;
287 	if (!mutex_trylock(&mp->m_growlock))
288 		return -EWOULDBLOCK;
289 
290 	/* update imaxpct separately to the physical grow of the filesystem */
291 	if (in->imaxpct != mp->m_sb.sb_imax_pct) {
292 		error = xfs_growfs_imaxpct(mp, in->imaxpct);
293 		if (error)
294 			goto out_error;
295 	}
296 
297 	if (in->newblocks != mp->m_sb.sb_dblocks) {
298 		error = xfs_growfs_data_private(mp, in);
299 		if (error)
300 			goto out_error;
301 	}
302 
303 	/* Post growfs calculations needed to reflect new state in operations */
304 	if (mp->m_sb.sb_imax_pct) {
305 		uint64_t icount = mp->m_sb.sb_dblocks * mp->m_sb.sb_imax_pct;
306 		do_div(icount, 100);
307 		M_IGEO(mp)->maxicount = XFS_FSB_TO_INO(mp, icount);
308 	} else
309 		M_IGEO(mp)->maxicount = 0;
310 
311 	/* Update secondary superblocks now the physical grow has completed */
312 	error = xfs_update_secondary_sbs(mp);
313 
314 out_error:
315 	/*
316 	 * Increment the generation unconditionally, the error could be from
317 	 * updating the secondary superblocks, in which case the new size
318 	 * is live already.
319 	 */
320 	mp->m_generation++;
321 	mutex_unlock(&mp->m_growlock);
322 	return error;
323 }
324 
325 int
326 xfs_growfs_log(
327 	xfs_mount_t		*mp,
328 	struct xfs_growfs_log	*in)
329 {
330 	int error;
331 
332 	if (!capable(CAP_SYS_ADMIN))
333 		return -EPERM;
334 	if (!mutex_trylock(&mp->m_growlock))
335 		return -EWOULDBLOCK;
336 	error = xfs_growfs_log_private(mp, in);
337 	mutex_unlock(&mp->m_growlock);
338 	return error;
339 }
340 
341 /*
342  * exported through ioctl XFS_IOC_FSCOUNTS
343  */
344 
345 void
346 xfs_fs_counts(
347 	xfs_mount_t		*mp,
348 	xfs_fsop_counts_t	*cnt)
349 {
350 	cnt->allocino = percpu_counter_read_positive(&mp->m_icount);
351 	cnt->freeino = percpu_counter_read_positive(&mp->m_ifree);
352 	cnt->freedata = percpu_counter_read_positive(&mp->m_fdblocks) -
353 						xfs_fdblocks_unavailable(mp);
354 	cnt->freertx = percpu_counter_read_positive(&mp->m_frextents);
355 }
356 
357 /*
358  * exported through ioctl XFS_IOC_SET_RESBLKS & XFS_IOC_GET_RESBLKS
359  *
360  * xfs_reserve_blocks is called to set m_resblks
361  * in the in-core mount table. The number of unused reserved blocks
362  * is kept in m_resblks_avail.
363  *
364  * Reserve the requested number of blocks if available. Otherwise return
365  * as many as possible to satisfy the request. The actual number
366  * reserved are returned in outval
367  *
368  * A null inval pointer indicates that only the current reserved blocks
369  * available  should  be returned no settings are changed.
370  */
371 
372 int
373 xfs_reserve_blocks(
374 	xfs_mount_t             *mp,
375 	uint64_t              *inval,
376 	xfs_fsop_resblks_t      *outval)
377 {
378 	int64_t			lcounter, delta;
379 	int64_t			fdblks_delta = 0;
380 	uint64_t		request;
381 	int64_t			free;
382 	int			error = 0;
383 
384 	/* If inval is null, report current values and return */
385 	if (inval == (uint64_t *)NULL) {
386 		if (!outval)
387 			return -EINVAL;
388 		outval->resblks = mp->m_resblks;
389 		outval->resblks_avail = mp->m_resblks_avail;
390 		return 0;
391 	}
392 
393 	request = *inval;
394 
395 	/*
396 	 * With per-cpu counters, this becomes an interesting problem. we need
397 	 * to work out if we are freeing or allocation blocks first, then we can
398 	 * do the modification as necessary.
399 	 *
400 	 * We do this under the m_sb_lock so that if we are near ENOSPC, we will
401 	 * hold out any changes while we work out what to do. This means that
402 	 * the amount of free space can change while we do this, so we need to
403 	 * retry if we end up trying to reserve more space than is available.
404 	 */
405 	spin_lock(&mp->m_sb_lock);
406 
407 	/*
408 	 * If our previous reservation was larger than the current value,
409 	 * then move any unused blocks back to the free pool. Modify the resblks
410 	 * counters directly since we shouldn't have any problems unreserving
411 	 * space.
412 	 */
413 	if (mp->m_resblks > request) {
414 		lcounter = mp->m_resblks_avail - request;
415 		if (lcounter  > 0) {		/* release unused blocks */
416 			fdblks_delta = lcounter;
417 			mp->m_resblks_avail -= lcounter;
418 		}
419 		mp->m_resblks = request;
420 		if (fdblks_delta) {
421 			spin_unlock(&mp->m_sb_lock);
422 			error = xfs_mod_fdblocks(mp, fdblks_delta, 0);
423 			spin_lock(&mp->m_sb_lock);
424 		}
425 
426 		goto out;
427 	}
428 
429 	/*
430 	 * If the request is larger than the current reservation, reserve the
431 	 * blocks before we update the reserve counters. Sample m_fdblocks and
432 	 * perform a partial reservation if the request exceeds free space.
433 	 *
434 	 * The code below estimates how many blocks it can request from
435 	 * fdblocks to stash in the reserve pool.  This is a classic TOCTOU
436 	 * race since fdblocks updates are not always coordinated via
437 	 * m_sb_lock.  Set the reserve size even if there's not enough free
438 	 * space to fill it because mod_fdblocks will refill an undersized
439 	 * reserve when it can.
440 	 */
441 	free = percpu_counter_sum(&mp->m_fdblocks) -
442 						xfs_fdblocks_unavailable(mp);
443 	delta = request - mp->m_resblks;
444 	mp->m_resblks = request;
445 	if (delta > 0 && free > 0) {
446 		/*
447 		 * We'll either succeed in getting space from the free block
448 		 * count or we'll get an ENOSPC.  Don't set the reserved flag
449 		 * here - we don't want to reserve the extra reserve blocks
450 		 * from the reserve.
451 		 *
452 		 * The desired reserve size can change after we drop the lock.
453 		 * Use mod_fdblocks to put the space into the reserve or into
454 		 * fdblocks as appropriate.
455 		 */
456 		fdblks_delta = min(free, delta);
457 		spin_unlock(&mp->m_sb_lock);
458 		error = xfs_mod_fdblocks(mp, -fdblks_delta, 0);
459 		if (!error)
460 			xfs_mod_fdblocks(mp, fdblks_delta, 0);
461 		spin_lock(&mp->m_sb_lock);
462 	}
463 out:
464 	if (outval) {
465 		outval->resblks = mp->m_resblks;
466 		outval->resblks_avail = mp->m_resblks_avail;
467 	}
468 
469 	spin_unlock(&mp->m_sb_lock);
470 	return error;
471 }
472 
473 int
474 xfs_fs_goingdown(
475 	xfs_mount_t	*mp,
476 	uint32_t	inflags)
477 {
478 	switch (inflags) {
479 	case XFS_FSOP_GOING_FLAGS_DEFAULT: {
480 		if (!freeze_bdev(mp->m_super->s_bdev)) {
481 			xfs_force_shutdown(mp, SHUTDOWN_FORCE_UMOUNT);
482 			thaw_bdev(mp->m_super->s_bdev);
483 		}
484 		break;
485 	}
486 	case XFS_FSOP_GOING_FLAGS_LOGFLUSH:
487 		xfs_force_shutdown(mp, SHUTDOWN_FORCE_UMOUNT);
488 		break;
489 	case XFS_FSOP_GOING_FLAGS_NOLOGFLUSH:
490 		xfs_force_shutdown(mp,
491 				SHUTDOWN_FORCE_UMOUNT | SHUTDOWN_LOG_IO_ERROR);
492 		break;
493 	default:
494 		return -EINVAL;
495 	}
496 
497 	return 0;
498 }
499 
500 /*
501  * Force a shutdown of the filesystem instantly while keeping the filesystem
502  * consistent. We don't do an unmount here; just shutdown the shop, make sure
503  * that absolutely nothing persistent happens to this filesystem after this
504  * point.
505  *
506  * The shutdown state change is atomic, resulting in the first and only the
507  * first shutdown call processing the shutdown. This means we only shutdown the
508  * log once as it requires, and we don't spam the logs when multiple concurrent
509  * shutdowns race to set the shutdown flags.
510  */
511 void
512 xfs_do_force_shutdown(
513 	struct xfs_mount *mp,
514 	uint32_t	flags,
515 	char		*fname,
516 	int		lnnum)
517 {
518 	int		tag;
519 	const char	*why;
520 
521 
522 	if (test_and_set_bit(XFS_OPSTATE_SHUTDOWN, &mp->m_opstate)) {
523 		xlog_shutdown_wait(mp->m_log);
524 		return;
525 	}
526 	if (mp->m_sb_bp)
527 		mp->m_sb_bp->b_flags |= XBF_DONE;
528 
529 	if (flags & SHUTDOWN_FORCE_UMOUNT)
530 		xfs_alert(mp, "User initiated shutdown received.");
531 
532 	if (xlog_force_shutdown(mp->m_log, flags)) {
533 		tag = XFS_PTAG_SHUTDOWN_LOGERROR;
534 		why = "Log I/O Error";
535 	} else if (flags & SHUTDOWN_CORRUPT_INCORE) {
536 		tag = XFS_PTAG_SHUTDOWN_CORRUPT;
537 		why = "Corruption of in-memory data";
538 	} else if (flags & SHUTDOWN_CORRUPT_ONDISK) {
539 		tag = XFS_PTAG_SHUTDOWN_CORRUPT;
540 		why = "Corruption of on-disk metadata";
541 	} else {
542 		tag = XFS_PTAG_SHUTDOWN_IOERROR;
543 		why = "Metadata I/O Error";
544 	}
545 
546 	trace_xfs_force_shutdown(mp, tag, flags, fname, lnnum);
547 
548 	xfs_alert_tag(mp, tag,
549 "%s (0x%x) detected at %pS (%s:%d).  Shutting down filesystem.",
550 			why, flags, __return_address, fname, lnnum);
551 	xfs_alert(mp,
552 		"Please unmount the filesystem and rectify the problem(s)");
553 	if (xfs_error_level >= XFS_ERRLEVEL_HIGH)
554 		xfs_stack_trace();
555 }
556 
557 /*
558  * Reserve free space for per-AG metadata.
559  */
560 int
561 xfs_fs_reserve_ag_blocks(
562 	struct xfs_mount	*mp)
563 {
564 	xfs_agnumber_t		agno;
565 	struct xfs_perag	*pag;
566 	int			error = 0;
567 	int			err2;
568 
569 	mp->m_finobt_nores = false;
570 	for_each_perag(mp, agno, pag) {
571 		err2 = xfs_ag_resv_init(pag, NULL);
572 		if (err2 && !error)
573 			error = err2;
574 	}
575 
576 	if (error && error != -ENOSPC) {
577 		xfs_warn(mp,
578 	"Error %d reserving per-AG metadata reserve pool.", error);
579 		xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
580 	}
581 
582 	return error;
583 }
584 
585 /*
586  * Free space reserved for per-AG metadata.
587  */
588 int
589 xfs_fs_unreserve_ag_blocks(
590 	struct xfs_mount	*mp)
591 {
592 	xfs_agnumber_t		agno;
593 	struct xfs_perag	*pag;
594 	int			error = 0;
595 	int			err2;
596 
597 	for_each_perag(mp, agno, pag) {
598 		err2 = xfs_ag_resv_free(pag);
599 		if (err2 && !error)
600 			error = err2;
601 	}
602 
603 	if (error)
604 		xfs_warn(mp,
605 	"Error %d freeing per-AG metadata reserve pool.", error);
606 
607 	return error;
608 }
609