xref: /openbmc/linux/fs/xfs/xfs_fsops.c (revision d0b73b48)
1 /*
2  * Copyright (c) 2000-2005 Silicon Graphics, Inc.
3  * All Rights Reserved.
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU General Public License as
7  * published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it would be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write the Free Software Foundation,
16  * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
17  */
18 #include "xfs.h"
19 #include "xfs_fs.h"
20 #include "xfs_types.h"
21 #include "xfs_log.h"
22 #include "xfs_trans.h"
23 #include "xfs_sb.h"
24 #include "xfs_ag.h"
25 #include "xfs_mount.h"
26 #include "xfs_bmap_btree.h"
27 #include "xfs_alloc_btree.h"
28 #include "xfs_ialloc_btree.h"
29 #include "xfs_dinode.h"
30 #include "xfs_inode.h"
31 #include "xfs_inode_item.h"
32 #include "xfs_btree.h"
33 #include "xfs_error.h"
34 #include "xfs_alloc.h"
35 #include "xfs_ialloc.h"
36 #include "xfs_fsops.h"
37 #include "xfs_itable.h"
38 #include "xfs_trans_space.h"
39 #include "xfs_rtalloc.h"
40 #include "xfs_filestream.h"
41 #include "xfs_trace.h"
42 
43 /*
44  * File system operations
45  */
46 
47 int
48 xfs_fs_geometry(
49 	xfs_mount_t		*mp,
50 	xfs_fsop_geom_t		*geo,
51 	int			new_version)
52 {
53 
54 	memset(geo, 0, sizeof(*geo));
55 
56 	geo->blocksize = mp->m_sb.sb_blocksize;
57 	geo->rtextsize = mp->m_sb.sb_rextsize;
58 	geo->agblocks = mp->m_sb.sb_agblocks;
59 	geo->agcount = mp->m_sb.sb_agcount;
60 	geo->logblocks = mp->m_sb.sb_logblocks;
61 	geo->sectsize = mp->m_sb.sb_sectsize;
62 	geo->inodesize = mp->m_sb.sb_inodesize;
63 	geo->imaxpct = mp->m_sb.sb_imax_pct;
64 	geo->datablocks = mp->m_sb.sb_dblocks;
65 	geo->rtblocks = mp->m_sb.sb_rblocks;
66 	geo->rtextents = mp->m_sb.sb_rextents;
67 	geo->logstart = mp->m_sb.sb_logstart;
68 	ASSERT(sizeof(geo->uuid)==sizeof(mp->m_sb.sb_uuid));
69 	memcpy(geo->uuid, &mp->m_sb.sb_uuid, sizeof(mp->m_sb.sb_uuid));
70 	if (new_version >= 2) {
71 		geo->sunit = mp->m_sb.sb_unit;
72 		geo->swidth = mp->m_sb.sb_width;
73 	}
74 	if (new_version >= 3) {
75 		geo->version = XFS_FSOP_GEOM_VERSION;
76 		geo->flags =
77 			(xfs_sb_version_hasattr(&mp->m_sb) ?
78 				XFS_FSOP_GEOM_FLAGS_ATTR : 0) |
79 			(xfs_sb_version_hasnlink(&mp->m_sb) ?
80 				XFS_FSOP_GEOM_FLAGS_NLINK : 0) |
81 			(xfs_sb_version_hasquota(&mp->m_sb) ?
82 				XFS_FSOP_GEOM_FLAGS_QUOTA : 0) |
83 			(xfs_sb_version_hasalign(&mp->m_sb) ?
84 				XFS_FSOP_GEOM_FLAGS_IALIGN : 0) |
85 			(xfs_sb_version_hasdalign(&mp->m_sb) ?
86 				XFS_FSOP_GEOM_FLAGS_DALIGN : 0) |
87 			(xfs_sb_version_hasshared(&mp->m_sb) ?
88 				XFS_FSOP_GEOM_FLAGS_SHARED : 0) |
89 			(xfs_sb_version_hasextflgbit(&mp->m_sb) ?
90 				XFS_FSOP_GEOM_FLAGS_EXTFLG : 0) |
91 			(xfs_sb_version_hasdirv2(&mp->m_sb) ?
92 				XFS_FSOP_GEOM_FLAGS_DIRV2 : 0) |
93 			(xfs_sb_version_hassector(&mp->m_sb) ?
94 				XFS_FSOP_GEOM_FLAGS_SECTOR : 0) |
95 			(xfs_sb_version_hasasciici(&mp->m_sb) ?
96 				XFS_FSOP_GEOM_FLAGS_DIRV2CI : 0) |
97 			(xfs_sb_version_haslazysbcount(&mp->m_sb) ?
98 				XFS_FSOP_GEOM_FLAGS_LAZYSB : 0) |
99 			(xfs_sb_version_hasattr2(&mp->m_sb) ?
100 				XFS_FSOP_GEOM_FLAGS_ATTR2 : 0) |
101 			(xfs_sb_version_hasprojid32bit(&mp->m_sb) ?
102 				XFS_FSOP_GEOM_FLAGS_PROJID32 : 0);
103 		geo->logsectsize = xfs_sb_version_hassector(&mp->m_sb) ?
104 				mp->m_sb.sb_logsectsize : BBSIZE;
105 		geo->rtsectsize = mp->m_sb.sb_blocksize;
106 		geo->dirblocksize = mp->m_dirblksize;
107 	}
108 	if (new_version >= 4) {
109 		geo->flags |=
110 			(xfs_sb_version_haslogv2(&mp->m_sb) ?
111 				XFS_FSOP_GEOM_FLAGS_LOGV2 : 0);
112 		geo->logsunit = mp->m_sb.sb_logsunit;
113 	}
114 	return 0;
115 }
116 
117 static struct xfs_buf *
118 xfs_growfs_get_hdr_buf(
119 	struct xfs_mount	*mp,
120 	xfs_daddr_t		blkno,
121 	size_t			numblks,
122 	int			flags,
123 	const struct xfs_buf_ops *ops)
124 {
125 	struct xfs_buf		*bp;
126 
127 	bp = xfs_buf_get_uncached(mp->m_ddev_targp, numblks, flags);
128 	if (!bp)
129 		return NULL;
130 
131 	xfs_buf_zero(bp, 0, BBTOB(bp->b_length));
132 	bp->b_bn = blkno;
133 	bp->b_maps[0].bm_bn = blkno;
134 	bp->b_ops = ops;
135 
136 	return bp;
137 }
138 
139 static int
140 xfs_growfs_data_private(
141 	xfs_mount_t		*mp,		/* mount point for filesystem */
142 	xfs_growfs_data_t	*in)		/* growfs data input struct */
143 {
144 	xfs_agf_t		*agf;
145 	struct xfs_agfl		*agfl;
146 	xfs_agi_t		*agi;
147 	xfs_agnumber_t		agno;
148 	xfs_extlen_t		agsize;
149 	xfs_extlen_t		tmpsize;
150 	xfs_alloc_rec_t		*arec;
151 	xfs_buf_t		*bp;
152 	int			bucket;
153 	int			dpct;
154 	int			error;
155 	xfs_agnumber_t		nagcount;
156 	xfs_agnumber_t		nagimax = 0;
157 	xfs_rfsblock_t		nb, nb_mod;
158 	xfs_rfsblock_t		new;
159 	xfs_rfsblock_t		nfree;
160 	xfs_agnumber_t		oagcount;
161 	int			pct;
162 	xfs_trans_t		*tp;
163 
164 	nb = in->newblocks;
165 	pct = in->imaxpct;
166 	if (nb < mp->m_sb.sb_dblocks || pct < 0 || pct > 100)
167 		return XFS_ERROR(EINVAL);
168 	if ((error = xfs_sb_validate_fsb_count(&mp->m_sb, nb)))
169 		return error;
170 	dpct = pct - mp->m_sb.sb_imax_pct;
171 	bp = xfs_buf_read_uncached(mp->m_ddev_targp,
172 				XFS_FSB_TO_BB(mp, nb) - XFS_FSS_TO_BB(mp, 1),
173 				XFS_FSS_TO_BB(mp, 1), 0, NULL);
174 	if (!bp)
175 		return EIO;
176 	if (bp->b_error) {
177 		int	error = bp->b_error;
178 		xfs_buf_relse(bp);
179 		return error;
180 	}
181 	xfs_buf_relse(bp);
182 
183 	new = nb;	/* use new as a temporary here */
184 	nb_mod = do_div(new, mp->m_sb.sb_agblocks);
185 	nagcount = new + (nb_mod != 0);
186 	if (nb_mod && nb_mod < XFS_MIN_AG_BLOCKS) {
187 		nagcount--;
188 		nb = (xfs_rfsblock_t)nagcount * mp->m_sb.sb_agblocks;
189 		if (nb < mp->m_sb.sb_dblocks)
190 			return XFS_ERROR(EINVAL);
191 	}
192 	new = nb - mp->m_sb.sb_dblocks;
193 	oagcount = mp->m_sb.sb_agcount;
194 
195 	/* allocate the new per-ag structures */
196 	if (nagcount > oagcount) {
197 		error = xfs_initialize_perag(mp, nagcount, &nagimax);
198 		if (error)
199 			return error;
200 	}
201 
202 	tp = xfs_trans_alloc(mp, XFS_TRANS_GROWFS);
203 	tp->t_flags |= XFS_TRANS_RESERVE;
204 	if ((error = xfs_trans_reserve(tp, XFS_GROWFS_SPACE_RES(mp),
205 			XFS_GROWDATA_LOG_RES(mp), 0, 0, 0))) {
206 		xfs_trans_cancel(tp, 0);
207 		return error;
208 	}
209 
210 	/*
211 	 * Write new AG headers to disk. Non-transactional, but written
212 	 * synchronously so they are completed prior to the growfs transaction
213 	 * being logged.
214 	 */
215 	nfree = 0;
216 	for (agno = nagcount - 1; agno >= oagcount; agno--, new -= agsize) {
217 		/*
218 		 * AG freespace header block
219 		 */
220 		bp = xfs_growfs_get_hdr_buf(mp,
221 				XFS_AG_DADDR(mp, agno, XFS_AGF_DADDR(mp)),
222 				XFS_FSS_TO_BB(mp, 1), 0,
223 				&xfs_agf_buf_ops);
224 		if (!bp) {
225 			error = ENOMEM;
226 			goto error0;
227 		}
228 
229 		agf = XFS_BUF_TO_AGF(bp);
230 		agf->agf_magicnum = cpu_to_be32(XFS_AGF_MAGIC);
231 		agf->agf_versionnum = cpu_to_be32(XFS_AGF_VERSION);
232 		agf->agf_seqno = cpu_to_be32(agno);
233 		if (agno == nagcount - 1)
234 			agsize =
235 				nb -
236 				(agno * (xfs_rfsblock_t)mp->m_sb.sb_agblocks);
237 		else
238 			agsize = mp->m_sb.sb_agblocks;
239 		agf->agf_length = cpu_to_be32(agsize);
240 		agf->agf_roots[XFS_BTNUM_BNOi] = cpu_to_be32(XFS_BNO_BLOCK(mp));
241 		agf->agf_roots[XFS_BTNUM_CNTi] = cpu_to_be32(XFS_CNT_BLOCK(mp));
242 		agf->agf_levels[XFS_BTNUM_BNOi] = cpu_to_be32(1);
243 		agf->agf_levels[XFS_BTNUM_CNTi] = cpu_to_be32(1);
244 		agf->agf_flfirst = 0;
245 		agf->agf_fllast = cpu_to_be32(XFS_AGFL_SIZE(mp) - 1);
246 		agf->agf_flcount = 0;
247 		tmpsize = agsize - XFS_PREALLOC_BLOCKS(mp);
248 		agf->agf_freeblks = cpu_to_be32(tmpsize);
249 		agf->agf_longest = cpu_to_be32(tmpsize);
250 		error = xfs_bwrite(bp);
251 		xfs_buf_relse(bp);
252 		if (error)
253 			goto error0;
254 
255 		/*
256 		 * AG freelist header block
257 		 */
258 		bp = xfs_growfs_get_hdr_buf(mp,
259 				XFS_AG_DADDR(mp, agno, XFS_AGFL_DADDR(mp)),
260 				XFS_FSS_TO_BB(mp, 1), 0,
261 				&xfs_agfl_buf_ops);
262 		if (!bp) {
263 			error = ENOMEM;
264 			goto error0;
265 		}
266 
267 		agfl = XFS_BUF_TO_AGFL(bp);
268 		for (bucket = 0; bucket < XFS_AGFL_SIZE(mp); bucket++)
269 			agfl->agfl_bno[bucket] = cpu_to_be32(NULLAGBLOCK);
270 
271 		error = xfs_bwrite(bp);
272 		xfs_buf_relse(bp);
273 		if (error)
274 			goto error0;
275 
276 		/*
277 		 * AG inode header block
278 		 */
279 		bp = xfs_growfs_get_hdr_buf(mp,
280 				XFS_AG_DADDR(mp, agno, XFS_AGI_DADDR(mp)),
281 				XFS_FSS_TO_BB(mp, 1), 0,
282 				&xfs_agi_buf_ops);
283 		if (!bp) {
284 			error = ENOMEM;
285 			goto error0;
286 		}
287 
288 		agi = XFS_BUF_TO_AGI(bp);
289 		agi->agi_magicnum = cpu_to_be32(XFS_AGI_MAGIC);
290 		agi->agi_versionnum = cpu_to_be32(XFS_AGI_VERSION);
291 		agi->agi_seqno = cpu_to_be32(agno);
292 		agi->agi_length = cpu_to_be32(agsize);
293 		agi->agi_count = 0;
294 		agi->agi_root = cpu_to_be32(XFS_IBT_BLOCK(mp));
295 		agi->agi_level = cpu_to_be32(1);
296 		agi->agi_freecount = 0;
297 		agi->agi_newino = cpu_to_be32(NULLAGINO);
298 		agi->agi_dirino = cpu_to_be32(NULLAGINO);
299 		for (bucket = 0; bucket < XFS_AGI_UNLINKED_BUCKETS; bucket++)
300 			agi->agi_unlinked[bucket] = cpu_to_be32(NULLAGINO);
301 		error = xfs_bwrite(bp);
302 		xfs_buf_relse(bp);
303 		if (error)
304 			goto error0;
305 
306 		/*
307 		 * BNO btree root block
308 		 */
309 		bp = xfs_growfs_get_hdr_buf(mp,
310 				XFS_AGB_TO_DADDR(mp, agno, XFS_BNO_BLOCK(mp)),
311 				BTOBB(mp->m_sb.sb_blocksize), 0,
312 				&xfs_allocbt_buf_ops);
313 
314 		if (!bp) {
315 			error = ENOMEM;
316 			goto error0;
317 		}
318 
319 		xfs_btree_init_block(mp, bp, XFS_ABTB_MAGIC, 0, 1, 0);
320 		arec = XFS_ALLOC_REC_ADDR(mp, XFS_BUF_TO_BLOCK(bp), 1);
321 		arec->ar_startblock = cpu_to_be32(XFS_PREALLOC_BLOCKS(mp));
322 		arec->ar_blockcount = cpu_to_be32(
323 			agsize - be32_to_cpu(arec->ar_startblock));
324 
325 		error = xfs_bwrite(bp);
326 		xfs_buf_relse(bp);
327 		if (error)
328 			goto error0;
329 
330 		/*
331 		 * CNT btree root block
332 		 */
333 		bp = xfs_growfs_get_hdr_buf(mp,
334 				XFS_AGB_TO_DADDR(mp, agno, XFS_CNT_BLOCK(mp)),
335 				BTOBB(mp->m_sb.sb_blocksize), 0,
336 				&xfs_allocbt_buf_ops);
337 		if (!bp) {
338 			error = ENOMEM;
339 			goto error0;
340 		}
341 
342 		xfs_btree_init_block(mp, bp, XFS_ABTC_MAGIC, 0, 1, 0);
343 		arec = XFS_ALLOC_REC_ADDR(mp, XFS_BUF_TO_BLOCK(bp), 1);
344 		arec->ar_startblock = cpu_to_be32(XFS_PREALLOC_BLOCKS(mp));
345 		arec->ar_blockcount = cpu_to_be32(
346 			agsize - be32_to_cpu(arec->ar_startblock));
347 		nfree += be32_to_cpu(arec->ar_blockcount);
348 
349 		error = xfs_bwrite(bp);
350 		xfs_buf_relse(bp);
351 		if (error)
352 			goto error0;
353 
354 		/*
355 		 * INO btree root block
356 		 */
357 		bp = xfs_growfs_get_hdr_buf(mp,
358 				XFS_AGB_TO_DADDR(mp, agno, XFS_IBT_BLOCK(mp)),
359 				BTOBB(mp->m_sb.sb_blocksize), 0,
360 				&xfs_inobt_buf_ops);
361 		if (!bp) {
362 			error = ENOMEM;
363 			goto error0;
364 		}
365 
366 		xfs_btree_init_block(mp, bp, XFS_IBT_MAGIC, 0, 0, 0);
367 
368 		error = xfs_bwrite(bp);
369 		xfs_buf_relse(bp);
370 		if (error)
371 			goto error0;
372 	}
373 	xfs_trans_agblocks_delta(tp, nfree);
374 	/*
375 	 * There are new blocks in the old last a.g.
376 	 */
377 	if (new) {
378 		/*
379 		 * Change the agi length.
380 		 */
381 		error = xfs_ialloc_read_agi(mp, tp, agno, &bp);
382 		if (error) {
383 			goto error0;
384 		}
385 		ASSERT(bp);
386 		agi = XFS_BUF_TO_AGI(bp);
387 		be32_add_cpu(&agi->agi_length, new);
388 		ASSERT(nagcount == oagcount ||
389 		       be32_to_cpu(agi->agi_length) == mp->m_sb.sb_agblocks);
390 		xfs_ialloc_log_agi(tp, bp, XFS_AGI_LENGTH);
391 		/*
392 		 * Change agf length.
393 		 */
394 		error = xfs_alloc_read_agf(mp, tp, agno, 0, &bp);
395 		if (error) {
396 			goto error0;
397 		}
398 		ASSERT(bp);
399 		agf = XFS_BUF_TO_AGF(bp);
400 		be32_add_cpu(&agf->agf_length, new);
401 		ASSERT(be32_to_cpu(agf->agf_length) ==
402 		       be32_to_cpu(agi->agi_length));
403 
404 		xfs_alloc_log_agf(tp, bp, XFS_AGF_LENGTH);
405 		/*
406 		 * Free the new space.
407 		 */
408 		error = xfs_free_extent(tp, XFS_AGB_TO_FSB(mp, agno,
409 			be32_to_cpu(agf->agf_length) - new), new);
410 		if (error) {
411 			goto error0;
412 		}
413 	}
414 
415 	/*
416 	 * Update changed superblock fields transactionally. These are not
417 	 * seen by the rest of the world until the transaction commit applies
418 	 * them atomically to the superblock.
419 	 */
420 	if (nagcount > oagcount)
421 		xfs_trans_mod_sb(tp, XFS_TRANS_SB_AGCOUNT, nagcount - oagcount);
422 	if (nb > mp->m_sb.sb_dblocks)
423 		xfs_trans_mod_sb(tp, XFS_TRANS_SB_DBLOCKS,
424 				 nb - mp->m_sb.sb_dblocks);
425 	if (nfree)
426 		xfs_trans_mod_sb(tp, XFS_TRANS_SB_FDBLOCKS, nfree);
427 	if (dpct)
428 		xfs_trans_mod_sb(tp, XFS_TRANS_SB_IMAXPCT, dpct);
429 	error = xfs_trans_commit(tp, 0);
430 	if (error)
431 		return error;
432 
433 	/* New allocation groups fully initialized, so update mount struct */
434 	if (nagimax)
435 		mp->m_maxagi = nagimax;
436 	if (mp->m_sb.sb_imax_pct) {
437 		__uint64_t icount = mp->m_sb.sb_dblocks * mp->m_sb.sb_imax_pct;
438 		do_div(icount, 100);
439 		mp->m_maxicount = icount << mp->m_sb.sb_inopblog;
440 	} else
441 		mp->m_maxicount = 0;
442 	xfs_set_low_space_thresholds(mp);
443 
444 	/* update secondary superblocks. */
445 	for (agno = 1; agno < nagcount; agno++) {
446 		error = 0;
447 		/*
448 		 * new secondary superblocks need to be zeroed, not read from
449 		 * disk as the contents of the new area we are growing into is
450 		 * completely unknown.
451 		 */
452 		if (agno < oagcount) {
453 			error = xfs_trans_read_buf(mp, NULL, mp->m_ddev_targp,
454 				  XFS_AGB_TO_DADDR(mp, agno, XFS_SB_BLOCK(mp)),
455 				  XFS_FSS_TO_BB(mp, 1), 0, &bp,
456 				  &xfs_sb_buf_ops);
457 		} else {
458 			bp = xfs_trans_get_buf(NULL, mp->m_ddev_targp,
459 				  XFS_AGB_TO_DADDR(mp, agno, XFS_SB_BLOCK(mp)),
460 				  XFS_FSS_TO_BB(mp, 1), 0);
461 			if (bp) {
462 				bp->b_ops = &xfs_sb_buf_ops;
463 				xfs_buf_zero(bp, 0, BBTOB(bp->b_length));
464 			} else
465 				error = ENOMEM;
466 		}
467 
468 		if (error) {
469 			xfs_warn(mp,
470 		"error %d reading secondary superblock for ag %d",
471 				error, agno);
472 			break;
473 		}
474 		xfs_sb_to_disk(XFS_BUF_TO_SBP(bp), &mp->m_sb, XFS_SB_ALL_BITS);
475 
476 		/*
477 		 * If we get an error writing out the alternate superblocks,
478 		 * just issue a warning and continue.  The real work is
479 		 * already done and committed.
480 		 */
481 		error = xfs_bwrite(bp);
482 		xfs_buf_relse(bp);
483 		if (error) {
484 			xfs_warn(mp,
485 		"write error %d updating secondary superblock for ag %d",
486 				error, agno);
487 			break; /* no point in continuing */
488 		}
489 	}
490 	return error;
491 
492  error0:
493 	xfs_trans_cancel(tp, XFS_TRANS_ABORT);
494 	return error;
495 }
496 
497 static int
498 xfs_growfs_log_private(
499 	xfs_mount_t		*mp,	/* mount point for filesystem */
500 	xfs_growfs_log_t	*in)	/* growfs log input struct */
501 {
502 	xfs_extlen_t		nb;
503 
504 	nb = in->newblocks;
505 	if (nb < XFS_MIN_LOG_BLOCKS || nb < XFS_B_TO_FSB(mp, XFS_MIN_LOG_BYTES))
506 		return XFS_ERROR(EINVAL);
507 	if (nb == mp->m_sb.sb_logblocks &&
508 	    in->isint == (mp->m_sb.sb_logstart != 0))
509 		return XFS_ERROR(EINVAL);
510 	/*
511 	 * Moving the log is hard, need new interfaces to sync
512 	 * the log first, hold off all activity while moving it.
513 	 * Can have shorter or longer log in the same space,
514 	 * or transform internal to external log or vice versa.
515 	 */
516 	return XFS_ERROR(ENOSYS);
517 }
518 
519 /*
520  * protected versions of growfs function acquire and release locks on the mount
521  * point - exported through ioctls: XFS_IOC_FSGROWFSDATA, XFS_IOC_FSGROWFSLOG,
522  * XFS_IOC_FSGROWFSRT
523  */
524 
525 
526 int
527 xfs_growfs_data(
528 	xfs_mount_t		*mp,
529 	xfs_growfs_data_t	*in)
530 {
531 	int error;
532 
533 	if (!capable(CAP_SYS_ADMIN))
534 		return XFS_ERROR(EPERM);
535 	if (!mutex_trylock(&mp->m_growlock))
536 		return XFS_ERROR(EWOULDBLOCK);
537 	error = xfs_growfs_data_private(mp, in);
538 	mutex_unlock(&mp->m_growlock);
539 	return error;
540 }
541 
542 int
543 xfs_growfs_log(
544 	xfs_mount_t		*mp,
545 	xfs_growfs_log_t	*in)
546 {
547 	int error;
548 
549 	if (!capable(CAP_SYS_ADMIN))
550 		return XFS_ERROR(EPERM);
551 	if (!mutex_trylock(&mp->m_growlock))
552 		return XFS_ERROR(EWOULDBLOCK);
553 	error = xfs_growfs_log_private(mp, in);
554 	mutex_unlock(&mp->m_growlock);
555 	return error;
556 }
557 
558 /*
559  * exported through ioctl XFS_IOC_FSCOUNTS
560  */
561 
562 int
563 xfs_fs_counts(
564 	xfs_mount_t		*mp,
565 	xfs_fsop_counts_t	*cnt)
566 {
567 	xfs_icsb_sync_counters(mp, XFS_ICSB_LAZY_COUNT);
568 	spin_lock(&mp->m_sb_lock);
569 	cnt->freedata = mp->m_sb.sb_fdblocks - XFS_ALLOC_SET_ASIDE(mp);
570 	cnt->freertx = mp->m_sb.sb_frextents;
571 	cnt->freeino = mp->m_sb.sb_ifree;
572 	cnt->allocino = mp->m_sb.sb_icount;
573 	spin_unlock(&mp->m_sb_lock);
574 	return 0;
575 }
576 
577 /*
578  * exported through ioctl XFS_IOC_SET_RESBLKS & XFS_IOC_GET_RESBLKS
579  *
580  * xfs_reserve_blocks is called to set m_resblks
581  * in the in-core mount table. The number of unused reserved blocks
582  * is kept in m_resblks_avail.
583  *
584  * Reserve the requested number of blocks if available. Otherwise return
585  * as many as possible to satisfy the request. The actual number
586  * reserved are returned in outval
587  *
588  * A null inval pointer indicates that only the current reserved blocks
589  * available  should  be returned no settings are changed.
590  */
591 
592 int
593 xfs_reserve_blocks(
594 	xfs_mount_t             *mp,
595 	__uint64_t              *inval,
596 	xfs_fsop_resblks_t      *outval)
597 {
598 	__int64_t		lcounter, delta, fdblks_delta;
599 	__uint64_t		request;
600 
601 	/* If inval is null, report current values and return */
602 	if (inval == (__uint64_t *)NULL) {
603 		if (!outval)
604 			return EINVAL;
605 		outval->resblks = mp->m_resblks;
606 		outval->resblks_avail = mp->m_resblks_avail;
607 		return 0;
608 	}
609 
610 	request = *inval;
611 
612 	/*
613 	 * With per-cpu counters, this becomes an interesting
614 	 * problem. we needto work out if we are freeing or allocation
615 	 * blocks first, then we can do the modification as necessary.
616 	 *
617 	 * We do this under the m_sb_lock so that if we are near
618 	 * ENOSPC, we will hold out any changes while we work out
619 	 * what to do. This means that the amount of free space can
620 	 * change while we do this, so we need to retry if we end up
621 	 * trying to reserve more space than is available.
622 	 *
623 	 * We also use the xfs_mod_incore_sb() interface so that we
624 	 * don't have to care about whether per cpu counter are
625 	 * enabled, disabled or even compiled in....
626 	 */
627 retry:
628 	spin_lock(&mp->m_sb_lock);
629 	xfs_icsb_sync_counters_locked(mp, 0);
630 
631 	/*
632 	 * If our previous reservation was larger than the current value,
633 	 * then move any unused blocks back to the free pool.
634 	 */
635 	fdblks_delta = 0;
636 	if (mp->m_resblks > request) {
637 		lcounter = mp->m_resblks_avail - request;
638 		if (lcounter  > 0) {		/* release unused blocks */
639 			fdblks_delta = lcounter;
640 			mp->m_resblks_avail -= lcounter;
641 		}
642 		mp->m_resblks = request;
643 	} else {
644 		__int64_t	free;
645 
646 		free =  mp->m_sb.sb_fdblocks - XFS_ALLOC_SET_ASIDE(mp);
647 		if (!free)
648 			goto out; /* ENOSPC and fdblks_delta = 0 */
649 
650 		delta = request - mp->m_resblks;
651 		lcounter = free - delta;
652 		if (lcounter < 0) {
653 			/* We can't satisfy the request, just get what we can */
654 			mp->m_resblks += free;
655 			mp->m_resblks_avail += free;
656 			fdblks_delta = -free;
657 		} else {
658 			fdblks_delta = -delta;
659 			mp->m_resblks = request;
660 			mp->m_resblks_avail += delta;
661 		}
662 	}
663 out:
664 	if (outval) {
665 		outval->resblks = mp->m_resblks;
666 		outval->resblks_avail = mp->m_resblks_avail;
667 	}
668 	spin_unlock(&mp->m_sb_lock);
669 
670 	if (fdblks_delta) {
671 		/*
672 		 * If we are putting blocks back here, m_resblks_avail is
673 		 * already at its max so this will put it in the free pool.
674 		 *
675 		 * If we need space, we'll either succeed in getting it
676 		 * from the free block count or we'll get an enospc. If
677 		 * we get a ENOSPC, it means things changed while we were
678 		 * calculating fdblks_delta and so we should try again to
679 		 * see if there is anything left to reserve.
680 		 *
681 		 * Don't set the reserved flag here - we don't want to reserve
682 		 * the extra reserve blocks from the reserve.....
683 		 */
684 		int error;
685 		error = xfs_icsb_modify_counters(mp, XFS_SBS_FDBLOCKS,
686 						 fdblks_delta, 0);
687 		if (error == ENOSPC)
688 			goto retry;
689 	}
690 	return 0;
691 }
692 
693 /*
694  * Dump a transaction into the log that contains no real change. This is needed
695  * to be able to make the log dirty or stamp the current tail LSN into the log
696  * during the covering operation.
697  *
698  * We cannot use an inode here for this - that will push dirty state back up
699  * into the VFS and then periodic inode flushing will prevent log covering from
700  * making progress. Hence we log a field in the superblock instead and use a
701  * synchronous transaction to ensure the superblock is immediately unpinned
702  * and can be written back.
703  */
704 int
705 xfs_fs_log_dummy(
706 	xfs_mount_t	*mp)
707 {
708 	xfs_trans_t	*tp;
709 	int		error;
710 
711 	tp = _xfs_trans_alloc(mp, XFS_TRANS_DUMMY1, KM_SLEEP);
712 	error = xfs_trans_reserve(tp, 0, mp->m_sb.sb_sectsize + 128, 0, 0,
713 					XFS_DEFAULT_LOG_COUNT);
714 	if (error) {
715 		xfs_trans_cancel(tp, 0);
716 		return error;
717 	}
718 
719 	/* log the UUID because it is an unchanging field */
720 	xfs_mod_sb(tp, XFS_SB_UUID);
721 	xfs_trans_set_sync(tp);
722 	return xfs_trans_commit(tp, 0);
723 }
724 
725 int
726 xfs_fs_goingdown(
727 	xfs_mount_t	*mp,
728 	__uint32_t	inflags)
729 {
730 	switch (inflags) {
731 	case XFS_FSOP_GOING_FLAGS_DEFAULT: {
732 		struct super_block *sb = freeze_bdev(mp->m_super->s_bdev);
733 
734 		if (sb && !IS_ERR(sb)) {
735 			xfs_force_shutdown(mp, SHUTDOWN_FORCE_UMOUNT);
736 			thaw_bdev(sb->s_bdev, sb);
737 		}
738 
739 		break;
740 	}
741 	case XFS_FSOP_GOING_FLAGS_LOGFLUSH:
742 		xfs_force_shutdown(mp, SHUTDOWN_FORCE_UMOUNT);
743 		break;
744 	case XFS_FSOP_GOING_FLAGS_NOLOGFLUSH:
745 		xfs_force_shutdown(mp,
746 				SHUTDOWN_FORCE_UMOUNT | SHUTDOWN_LOG_IO_ERROR);
747 		break;
748 	default:
749 		return XFS_ERROR(EINVAL);
750 	}
751 
752 	return 0;
753 }
754 
755 /*
756  * Force a shutdown of the filesystem instantly while keeping the filesystem
757  * consistent. We don't do an unmount here; just shutdown the shop, make sure
758  * that absolutely nothing persistent happens to this filesystem after this
759  * point.
760  */
761 void
762 xfs_do_force_shutdown(
763 	xfs_mount_t	*mp,
764 	int		flags,
765 	char		*fname,
766 	int		lnnum)
767 {
768 	int		logerror;
769 
770 	logerror = flags & SHUTDOWN_LOG_IO_ERROR;
771 
772 	if (!(flags & SHUTDOWN_FORCE_UMOUNT)) {
773 		xfs_notice(mp,
774 	"%s(0x%x) called from line %d of file %s.  Return address = 0x%p",
775 			__func__, flags, lnnum, fname, __return_address);
776 	}
777 	/*
778 	 * No need to duplicate efforts.
779 	 */
780 	if (XFS_FORCED_SHUTDOWN(mp) && !logerror)
781 		return;
782 
783 	/*
784 	 * This flags XFS_MOUNT_FS_SHUTDOWN, makes sure that we don't
785 	 * queue up anybody new on the log reservations, and wakes up
786 	 * everybody who's sleeping on log reservations to tell them
787 	 * the bad news.
788 	 */
789 	if (xfs_log_force_umount(mp, logerror))
790 		return;
791 
792 	if (flags & SHUTDOWN_CORRUPT_INCORE) {
793 		xfs_alert_tag(mp, XFS_PTAG_SHUTDOWN_CORRUPT,
794     "Corruption of in-memory data detected.  Shutting down filesystem");
795 		if (XFS_ERRLEVEL_HIGH <= xfs_error_level)
796 			xfs_stack_trace();
797 	} else if (!(flags & SHUTDOWN_FORCE_UMOUNT)) {
798 		if (logerror) {
799 			xfs_alert_tag(mp, XFS_PTAG_SHUTDOWN_LOGERROR,
800 		"Log I/O Error Detected.  Shutting down filesystem");
801 		} else if (flags & SHUTDOWN_DEVICE_REQ) {
802 			xfs_alert_tag(mp, XFS_PTAG_SHUTDOWN_IOERROR,
803 		"All device paths lost.  Shutting down filesystem");
804 		} else if (!(flags & SHUTDOWN_REMOTE_REQ)) {
805 			xfs_alert_tag(mp, XFS_PTAG_SHUTDOWN_IOERROR,
806 		"I/O Error Detected. Shutting down filesystem");
807 		}
808 	}
809 	if (!(flags & SHUTDOWN_FORCE_UMOUNT)) {
810 		xfs_alert(mp,
811 	"Please umount the filesystem and rectify the problem(s)");
812 	}
813 }
814