xref: /openbmc/linux/fs/xfs/xfs_qm_syscalls.c (revision 8bd1369b)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (c) 2000-2005 Silicon Graphics, Inc.
4  * All Rights Reserved.
5  */
6 
7 #include <linux/capability.h>
8 
9 #include "xfs.h"
10 #include "xfs_fs.h"
11 #include "xfs_shared.h"
12 #include "xfs_format.h"
13 #include "xfs_log_format.h"
14 #include "xfs_trans_resv.h"
15 #include "xfs_bit.h"
16 #include "xfs_sb.h"
17 #include "xfs_mount.h"
18 #include "xfs_inode.h"
19 #include "xfs_trans.h"
20 #include "xfs_error.h"
21 #include "xfs_quota.h"
22 #include "xfs_qm.h"
23 #include "xfs_trace.h"
24 #include "xfs_icache.h"
25 
26 STATIC int	xfs_qm_log_quotaoff(xfs_mount_t *, xfs_qoff_logitem_t **, uint);
27 STATIC int	xfs_qm_log_quotaoff_end(xfs_mount_t *, xfs_qoff_logitem_t *,
28 					uint);
29 
30 /*
31  * Turn off quota accounting and/or enforcement for all udquots and/or
32  * gdquots. Called only at unmount time.
33  *
34  * This assumes that there are no dquots of this file system cached
35  * incore, and modifies the ondisk dquot directly. Therefore, for example,
36  * it is an error to call this twice, without purging the cache.
37  */
38 int
39 xfs_qm_scall_quotaoff(
40 	xfs_mount_t		*mp,
41 	uint			flags)
42 {
43 	struct xfs_quotainfo	*q = mp->m_quotainfo;
44 	uint			dqtype;
45 	int			error;
46 	uint			inactivate_flags;
47 	xfs_qoff_logitem_t	*qoffstart;
48 
49 	/*
50 	 * No file system can have quotas enabled on disk but not in core.
51 	 * Note that quota utilities (like quotaoff) _expect_
52 	 * errno == -EEXIST here.
53 	 */
54 	if ((mp->m_qflags & flags) == 0)
55 		return -EEXIST;
56 	error = 0;
57 
58 	flags &= (XFS_ALL_QUOTA_ACCT | XFS_ALL_QUOTA_ENFD);
59 
60 	/*
61 	 * We don't want to deal with two quotaoffs messing up each other,
62 	 * so we're going to serialize it. quotaoff isn't exactly a performance
63 	 * critical thing.
64 	 * If quotaoff, then we must be dealing with the root filesystem.
65 	 */
66 	ASSERT(q);
67 	mutex_lock(&q->qi_quotaofflock);
68 
69 	/*
70 	 * If we're just turning off quota enforcement, change mp and go.
71 	 */
72 	if ((flags & XFS_ALL_QUOTA_ACCT) == 0) {
73 		mp->m_qflags &= ~(flags);
74 
75 		spin_lock(&mp->m_sb_lock);
76 		mp->m_sb.sb_qflags = mp->m_qflags;
77 		spin_unlock(&mp->m_sb_lock);
78 		mutex_unlock(&q->qi_quotaofflock);
79 
80 		/* XXX what to do if error ? Revert back to old vals incore ? */
81 		return xfs_sync_sb(mp, false);
82 	}
83 
84 	dqtype = 0;
85 	inactivate_flags = 0;
86 	/*
87 	 * If accounting is off, we must turn enforcement off, clear the
88 	 * quota 'CHKD' certificate to make it known that we have to
89 	 * do a quotacheck the next time this quota is turned on.
90 	 */
91 	if (flags & XFS_UQUOTA_ACCT) {
92 		dqtype |= XFS_QMOPT_UQUOTA;
93 		flags |= (XFS_UQUOTA_CHKD | XFS_UQUOTA_ENFD);
94 		inactivate_flags |= XFS_UQUOTA_ACTIVE;
95 	}
96 	if (flags & XFS_GQUOTA_ACCT) {
97 		dqtype |= XFS_QMOPT_GQUOTA;
98 		flags |= (XFS_GQUOTA_CHKD | XFS_GQUOTA_ENFD);
99 		inactivate_flags |= XFS_GQUOTA_ACTIVE;
100 	}
101 	if (flags & XFS_PQUOTA_ACCT) {
102 		dqtype |= XFS_QMOPT_PQUOTA;
103 		flags |= (XFS_PQUOTA_CHKD | XFS_PQUOTA_ENFD);
104 		inactivate_flags |= XFS_PQUOTA_ACTIVE;
105 	}
106 
107 	/*
108 	 * Nothing to do?  Don't complain. This happens when we're just
109 	 * turning off quota enforcement.
110 	 */
111 	if ((mp->m_qflags & flags) == 0)
112 		goto out_unlock;
113 
114 	/*
115 	 * Write the LI_QUOTAOFF log record, and do SB changes atomically,
116 	 * and synchronously. If we fail to write, we should abort the
117 	 * operation as it cannot be recovered safely if we crash.
118 	 */
119 	error = xfs_qm_log_quotaoff(mp, &qoffstart, flags);
120 	if (error)
121 		goto out_unlock;
122 
123 	/*
124 	 * Next we clear the XFS_MOUNT_*DQ_ACTIVE bit(s) in the mount struct
125 	 * to take care of the race between dqget and quotaoff. We don't take
126 	 * any special locks to reset these bits. All processes need to check
127 	 * these bits *after* taking inode lock(s) to see if the particular
128 	 * quota type is in the process of being turned off. If *ACTIVE, it is
129 	 * guaranteed that all dquot structures and all quotainode ptrs will all
130 	 * stay valid as long as that inode is kept locked.
131 	 *
132 	 * There is no turning back after this.
133 	 */
134 	mp->m_qflags &= ~inactivate_flags;
135 
136 	/*
137 	 * Give back all the dquot reference(s) held by inodes.
138 	 * Here we go thru every single incore inode in this file system, and
139 	 * do a dqrele on the i_udquot/i_gdquot that it may have.
140 	 * Essentially, as long as somebody has an inode locked, this guarantees
141 	 * that quotas will not be turned off. This is handy because in a
142 	 * transaction once we lock the inode(s) and check for quotaon, we can
143 	 * depend on the quota inodes (and other things) being valid as long as
144 	 * we keep the lock(s).
145 	 */
146 	xfs_qm_dqrele_all_inodes(mp, flags);
147 
148 	/*
149 	 * Next we make the changes in the quota flag in the mount struct.
150 	 * This isn't protected by a particular lock directly, because we
151 	 * don't want to take a mrlock every time we depend on quotas being on.
152 	 */
153 	mp->m_qflags &= ~flags;
154 
155 	/*
156 	 * Go through all the dquots of this file system and purge them,
157 	 * according to what was turned off.
158 	 */
159 	xfs_qm_dqpurge_all(mp, dqtype);
160 
161 	/*
162 	 * Transactions that had started before ACTIVE state bit was cleared
163 	 * could have logged many dquots, so they'd have higher LSNs than
164 	 * the first QUOTAOFF log record does. If we happen to crash when
165 	 * the tail of the log has gone past the QUOTAOFF record, but
166 	 * before the last dquot modification, those dquots __will__
167 	 * recover, and that's not good.
168 	 *
169 	 * So, we have QUOTAOFF start and end logitems; the start
170 	 * logitem won't get overwritten until the end logitem appears...
171 	 */
172 	error = xfs_qm_log_quotaoff_end(mp, qoffstart, flags);
173 	if (error) {
174 		/* We're screwed now. Shutdown is the only option. */
175 		xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
176 		goto out_unlock;
177 	}
178 
179 	/*
180 	 * If all quotas are completely turned off, close shop.
181 	 */
182 	if (mp->m_qflags == 0) {
183 		mutex_unlock(&q->qi_quotaofflock);
184 		xfs_qm_destroy_quotainfo(mp);
185 		return 0;
186 	}
187 
188 	/*
189 	 * Release our quotainode references if we don't need them anymore.
190 	 */
191 	if ((dqtype & XFS_QMOPT_UQUOTA) && q->qi_uquotaip) {
192 		IRELE(q->qi_uquotaip);
193 		q->qi_uquotaip = NULL;
194 	}
195 	if ((dqtype & XFS_QMOPT_GQUOTA) && q->qi_gquotaip) {
196 		IRELE(q->qi_gquotaip);
197 		q->qi_gquotaip = NULL;
198 	}
199 	if ((dqtype & XFS_QMOPT_PQUOTA) && q->qi_pquotaip) {
200 		IRELE(q->qi_pquotaip);
201 		q->qi_pquotaip = NULL;
202 	}
203 
204 out_unlock:
205 	mutex_unlock(&q->qi_quotaofflock);
206 	return error;
207 }
208 
209 STATIC int
210 xfs_qm_scall_trunc_qfile(
211 	struct xfs_mount	*mp,
212 	xfs_ino_t		ino)
213 {
214 	struct xfs_inode	*ip;
215 	struct xfs_trans	*tp;
216 	int			error;
217 
218 	if (ino == NULLFSINO)
219 		return 0;
220 
221 	error = xfs_iget(mp, NULL, ino, 0, 0, &ip);
222 	if (error)
223 		return error;
224 
225 	xfs_ilock(ip, XFS_IOLOCK_EXCL);
226 
227 	error = xfs_trans_alloc(mp, &M_RES(mp)->tr_itruncate, 0, 0, 0, &tp);
228 	if (error) {
229 		xfs_iunlock(ip, XFS_IOLOCK_EXCL);
230 		goto out_put;
231 	}
232 
233 	xfs_ilock(ip, XFS_ILOCK_EXCL);
234 	xfs_trans_ijoin(tp, ip, 0);
235 
236 	ip->i_d.di_size = 0;
237 	xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
238 
239 	error = xfs_itruncate_extents(&tp, ip, XFS_DATA_FORK, 0);
240 	if (error) {
241 		xfs_trans_cancel(tp);
242 		goto out_unlock;
243 	}
244 
245 	ASSERT(ip->i_d.di_nextents == 0);
246 
247 	xfs_trans_ichgtime(tp, ip, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
248 	error = xfs_trans_commit(tp);
249 
250 out_unlock:
251 	xfs_iunlock(ip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL);
252 out_put:
253 	IRELE(ip);
254 	return error;
255 }
256 
257 int
258 xfs_qm_scall_trunc_qfiles(
259 	xfs_mount_t	*mp,
260 	uint		flags)
261 {
262 	int		error = -EINVAL;
263 
264 	if (!xfs_sb_version_hasquota(&mp->m_sb) || flags == 0 ||
265 	    (flags & ~XFS_DQ_ALLTYPES)) {
266 		xfs_debug(mp, "%s: flags=%x m_qflags=%x",
267 			__func__, flags, mp->m_qflags);
268 		return -EINVAL;
269 	}
270 
271 	if (flags & XFS_DQ_USER) {
272 		error = xfs_qm_scall_trunc_qfile(mp, mp->m_sb.sb_uquotino);
273 		if (error)
274 			return error;
275 	}
276 	if (flags & XFS_DQ_GROUP) {
277 		error = xfs_qm_scall_trunc_qfile(mp, mp->m_sb.sb_gquotino);
278 		if (error)
279 			return error;
280 	}
281 	if (flags & XFS_DQ_PROJ)
282 		error = xfs_qm_scall_trunc_qfile(mp, mp->m_sb.sb_pquotino);
283 
284 	return error;
285 }
286 
287 /*
288  * Switch on (a given) quota enforcement for a filesystem.  This takes
289  * effect immediately.
290  * (Switching on quota accounting must be done at mount time.)
291  */
292 int
293 xfs_qm_scall_quotaon(
294 	xfs_mount_t	*mp,
295 	uint		flags)
296 {
297 	int		error;
298 	uint		qf;
299 
300 	flags &= (XFS_ALL_QUOTA_ACCT | XFS_ALL_QUOTA_ENFD);
301 	/*
302 	 * Switching on quota accounting must be done at mount time.
303 	 */
304 	flags &= ~(XFS_ALL_QUOTA_ACCT);
305 
306 	if (flags == 0) {
307 		xfs_debug(mp, "%s: zero flags, m_qflags=%x",
308 			__func__, mp->m_qflags);
309 		return -EINVAL;
310 	}
311 
312 	/*
313 	 * Can't enforce without accounting. We check the superblock
314 	 * qflags here instead of m_qflags because rootfs can have
315 	 * quota acct on ondisk without m_qflags' knowing.
316 	 */
317 	if (((mp->m_sb.sb_qflags & XFS_UQUOTA_ACCT) == 0 &&
318 	     (flags & XFS_UQUOTA_ENFD)) ||
319 	    ((mp->m_sb.sb_qflags & XFS_GQUOTA_ACCT) == 0 &&
320 	     (flags & XFS_GQUOTA_ENFD)) ||
321 	    ((mp->m_sb.sb_qflags & XFS_PQUOTA_ACCT) == 0 &&
322 	     (flags & XFS_PQUOTA_ENFD))) {
323 		xfs_debug(mp,
324 			"%s: Can't enforce without acct, flags=%x sbflags=%x",
325 			__func__, flags, mp->m_sb.sb_qflags);
326 		return -EINVAL;
327 	}
328 	/*
329 	 * If everything's up to-date incore, then don't waste time.
330 	 */
331 	if ((mp->m_qflags & flags) == flags)
332 		return -EEXIST;
333 
334 	/*
335 	 * Change sb_qflags on disk but not incore mp->qflags
336 	 * if this is the root filesystem.
337 	 */
338 	spin_lock(&mp->m_sb_lock);
339 	qf = mp->m_sb.sb_qflags;
340 	mp->m_sb.sb_qflags = qf | flags;
341 	spin_unlock(&mp->m_sb_lock);
342 
343 	/*
344 	 * There's nothing to change if it's the same.
345 	 */
346 	if ((qf & flags) == flags)
347 		return -EEXIST;
348 
349 	error = xfs_sync_sb(mp, false);
350 	if (error)
351 		return error;
352 	/*
353 	 * If we aren't trying to switch on quota enforcement, we are done.
354 	 */
355 	if  (((mp->m_sb.sb_qflags & XFS_UQUOTA_ACCT) !=
356 	     (mp->m_qflags & XFS_UQUOTA_ACCT)) ||
357 	     ((mp->m_sb.sb_qflags & XFS_PQUOTA_ACCT) !=
358 	     (mp->m_qflags & XFS_PQUOTA_ACCT)) ||
359 	     ((mp->m_sb.sb_qflags & XFS_GQUOTA_ACCT) !=
360 	     (mp->m_qflags & XFS_GQUOTA_ACCT)))
361 		return 0;
362 
363 	if (! XFS_IS_QUOTA_RUNNING(mp))
364 		return -ESRCH;
365 
366 	/*
367 	 * Switch on quota enforcement in core.
368 	 */
369 	mutex_lock(&mp->m_quotainfo->qi_quotaofflock);
370 	mp->m_qflags |= (flags & XFS_ALL_QUOTA_ENFD);
371 	mutex_unlock(&mp->m_quotainfo->qi_quotaofflock);
372 
373 	return 0;
374 }
375 
376 #define XFS_QC_MASK \
377 	(QC_LIMIT_MASK | QC_TIMER_MASK | QC_WARNS_MASK)
378 
379 /*
380  * Adjust quota limits, and start/stop timers accordingly.
381  */
382 int
383 xfs_qm_scall_setqlim(
384 	struct xfs_mount	*mp,
385 	xfs_dqid_t		id,
386 	uint			type,
387 	struct qc_dqblk		*newlim)
388 {
389 	struct xfs_quotainfo	*q = mp->m_quotainfo;
390 	struct xfs_disk_dquot	*ddq;
391 	struct xfs_dquot	*dqp;
392 	struct xfs_trans	*tp;
393 	struct xfs_def_quota	*defq;
394 	int			error;
395 	xfs_qcnt_t		hard, soft;
396 
397 	if (newlim->d_fieldmask & ~XFS_QC_MASK)
398 		return -EINVAL;
399 	if ((newlim->d_fieldmask & XFS_QC_MASK) == 0)
400 		return 0;
401 
402 	/*
403 	 * We don't want to race with a quotaoff so take the quotaoff lock.
404 	 * We don't hold an inode lock, so there's nothing else to stop
405 	 * a quotaoff from happening.
406 	 */
407 	mutex_lock(&q->qi_quotaofflock);
408 
409 	/*
410 	 * Get the dquot (locked) before we start, as we need to do a
411 	 * transaction to allocate it if it doesn't exist. Once we have the
412 	 * dquot, unlock it so we can start the next transaction safely. We hold
413 	 * a reference to the dquot, so it's safe to do this unlock/lock without
414 	 * it being reclaimed in the mean time.
415 	 */
416 	error = xfs_qm_dqget(mp, id, type, true, &dqp);
417 	if (error) {
418 		ASSERT(error != -ENOENT);
419 		goto out_unlock;
420 	}
421 
422 	defq = xfs_get_defquota(dqp, q);
423 	xfs_dqunlock(dqp);
424 
425 	error = xfs_trans_alloc(mp, &M_RES(mp)->tr_qm_setqlim, 0, 0, 0, &tp);
426 	if (error)
427 		goto out_rele;
428 
429 	xfs_dqlock(dqp);
430 	xfs_trans_dqjoin(tp, dqp);
431 	ddq = &dqp->q_core;
432 
433 	/*
434 	 * Make sure that hardlimits are >= soft limits before changing.
435 	 */
436 	hard = (newlim->d_fieldmask & QC_SPC_HARD) ?
437 		(xfs_qcnt_t) XFS_B_TO_FSB(mp, newlim->d_spc_hardlimit) :
438 			be64_to_cpu(ddq->d_blk_hardlimit);
439 	soft = (newlim->d_fieldmask & QC_SPC_SOFT) ?
440 		(xfs_qcnt_t) XFS_B_TO_FSB(mp, newlim->d_spc_softlimit) :
441 			be64_to_cpu(ddq->d_blk_softlimit);
442 	if (hard == 0 || hard >= soft) {
443 		ddq->d_blk_hardlimit = cpu_to_be64(hard);
444 		ddq->d_blk_softlimit = cpu_to_be64(soft);
445 		xfs_dquot_set_prealloc_limits(dqp);
446 		if (id == 0) {
447 			defq->bhardlimit = hard;
448 			defq->bsoftlimit = soft;
449 		}
450 	} else {
451 		xfs_debug(mp, "blkhard %Ld < blksoft %Ld", hard, soft);
452 	}
453 	hard = (newlim->d_fieldmask & QC_RT_SPC_HARD) ?
454 		(xfs_qcnt_t) XFS_B_TO_FSB(mp, newlim->d_rt_spc_hardlimit) :
455 			be64_to_cpu(ddq->d_rtb_hardlimit);
456 	soft = (newlim->d_fieldmask & QC_RT_SPC_SOFT) ?
457 		(xfs_qcnt_t) XFS_B_TO_FSB(mp, newlim->d_rt_spc_softlimit) :
458 			be64_to_cpu(ddq->d_rtb_softlimit);
459 	if (hard == 0 || hard >= soft) {
460 		ddq->d_rtb_hardlimit = cpu_to_be64(hard);
461 		ddq->d_rtb_softlimit = cpu_to_be64(soft);
462 		if (id == 0) {
463 			defq->rtbhardlimit = hard;
464 			defq->rtbsoftlimit = soft;
465 		}
466 	} else {
467 		xfs_debug(mp, "rtbhard %Ld < rtbsoft %Ld", hard, soft);
468 	}
469 
470 	hard = (newlim->d_fieldmask & QC_INO_HARD) ?
471 		(xfs_qcnt_t) newlim->d_ino_hardlimit :
472 			be64_to_cpu(ddq->d_ino_hardlimit);
473 	soft = (newlim->d_fieldmask & QC_INO_SOFT) ?
474 		(xfs_qcnt_t) newlim->d_ino_softlimit :
475 			be64_to_cpu(ddq->d_ino_softlimit);
476 	if (hard == 0 || hard >= soft) {
477 		ddq->d_ino_hardlimit = cpu_to_be64(hard);
478 		ddq->d_ino_softlimit = cpu_to_be64(soft);
479 		if (id == 0) {
480 			defq->ihardlimit = hard;
481 			defq->isoftlimit = soft;
482 		}
483 	} else {
484 		xfs_debug(mp, "ihard %Ld < isoft %Ld", hard, soft);
485 	}
486 
487 	/*
488 	 * Update warnings counter(s) if requested
489 	 */
490 	if (newlim->d_fieldmask & QC_SPC_WARNS)
491 		ddq->d_bwarns = cpu_to_be16(newlim->d_spc_warns);
492 	if (newlim->d_fieldmask & QC_INO_WARNS)
493 		ddq->d_iwarns = cpu_to_be16(newlim->d_ino_warns);
494 	if (newlim->d_fieldmask & QC_RT_SPC_WARNS)
495 		ddq->d_rtbwarns = cpu_to_be16(newlim->d_rt_spc_warns);
496 
497 	if (id == 0) {
498 		/*
499 		 * Timelimits for the super user set the relative time
500 		 * the other users can be over quota for this file system.
501 		 * If it is zero a default is used.  Ditto for the default
502 		 * soft and hard limit values (already done, above), and
503 		 * for warnings.
504 		 */
505 		if (newlim->d_fieldmask & QC_SPC_TIMER) {
506 			q->qi_btimelimit = newlim->d_spc_timer;
507 			ddq->d_btimer = cpu_to_be32(newlim->d_spc_timer);
508 		}
509 		if (newlim->d_fieldmask & QC_INO_TIMER) {
510 			q->qi_itimelimit = newlim->d_ino_timer;
511 			ddq->d_itimer = cpu_to_be32(newlim->d_ino_timer);
512 		}
513 		if (newlim->d_fieldmask & QC_RT_SPC_TIMER) {
514 			q->qi_rtbtimelimit = newlim->d_rt_spc_timer;
515 			ddq->d_rtbtimer = cpu_to_be32(newlim->d_rt_spc_timer);
516 		}
517 		if (newlim->d_fieldmask & QC_SPC_WARNS)
518 			q->qi_bwarnlimit = newlim->d_spc_warns;
519 		if (newlim->d_fieldmask & QC_INO_WARNS)
520 			q->qi_iwarnlimit = newlim->d_ino_warns;
521 		if (newlim->d_fieldmask & QC_RT_SPC_WARNS)
522 			q->qi_rtbwarnlimit = newlim->d_rt_spc_warns;
523 	} else {
524 		/*
525 		 * If the user is now over quota, start the timelimit.
526 		 * The user will not be 'warned'.
527 		 * Note that we keep the timers ticking, whether enforcement
528 		 * is on or off. We don't really want to bother with iterating
529 		 * over all ondisk dquots and turning the timers on/off.
530 		 */
531 		xfs_qm_adjust_dqtimers(mp, ddq);
532 	}
533 	dqp->dq_flags |= XFS_DQ_DIRTY;
534 	xfs_trans_log_dquot(tp, dqp);
535 
536 	error = xfs_trans_commit(tp);
537 
538 out_rele:
539 	xfs_qm_dqrele(dqp);
540 out_unlock:
541 	mutex_unlock(&q->qi_quotaofflock);
542 	return error;
543 }
544 
545 STATIC int
546 xfs_qm_log_quotaoff_end(
547 	xfs_mount_t		*mp,
548 	xfs_qoff_logitem_t	*startqoff,
549 	uint			flags)
550 {
551 	xfs_trans_t		*tp;
552 	int			error;
553 	xfs_qoff_logitem_t	*qoffi;
554 
555 	error = xfs_trans_alloc(mp, &M_RES(mp)->tr_qm_equotaoff, 0, 0, 0, &tp);
556 	if (error)
557 		return error;
558 
559 	qoffi = xfs_trans_get_qoff_item(tp, startqoff,
560 					flags & XFS_ALL_QUOTA_ACCT);
561 	xfs_trans_log_quotaoff_item(tp, qoffi);
562 
563 	/*
564 	 * We have to make sure that the transaction is secure on disk before we
565 	 * return and actually stop quota accounting. So, make it synchronous.
566 	 * We don't care about quotoff's performance.
567 	 */
568 	xfs_trans_set_sync(tp);
569 	return xfs_trans_commit(tp);
570 }
571 
572 
573 STATIC int
574 xfs_qm_log_quotaoff(
575 	xfs_mount_t	       *mp,
576 	xfs_qoff_logitem_t     **qoffstartp,
577 	uint		       flags)
578 {
579 	xfs_trans_t	       *tp;
580 	int			error;
581 	xfs_qoff_logitem_t     *qoffi;
582 
583 	*qoffstartp = NULL;
584 
585 	error = xfs_trans_alloc(mp, &M_RES(mp)->tr_qm_quotaoff, 0, 0, 0, &tp);
586 	if (error)
587 		goto out;
588 
589 	qoffi = xfs_trans_get_qoff_item(tp, NULL, flags & XFS_ALL_QUOTA_ACCT);
590 	xfs_trans_log_quotaoff_item(tp, qoffi);
591 
592 	spin_lock(&mp->m_sb_lock);
593 	mp->m_sb.sb_qflags = (mp->m_qflags & ~(flags)) & XFS_MOUNT_QUOTA_ALL;
594 	spin_unlock(&mp->m_sb_lock);
595 
596 	xfs_log_sb(tp);
597 
598 	/*
599 	 * We have to make sure that the transaction is secure on disk before we
600 	 * return and actually stop quota accounting. So, make it synchronous.
601 	 * We don't care about quotoff's performance.
602 	 */
603 	xfs_trans_set_sync(tp);
604 	error = xfs_trans_commit(tp);
605 	if (error)
606 		goto out;
607 
608 	*qoffstartp = qoffi;
609 out:
610 	return error;
611 }
612 
613 /* Fill out the quota context. */
614 static void
615 xfs_qm_scall_getquota_fill_qc(
616 	struct xfs_mount	*mp,
617 	uint			type,
618 	const struct xfs_dquot	*dqp,
619 	struct qc_dqblk		*dst)
620 {
621 	memset(dst, 0, sizeof(*dst));
622 	dst->d_spc_hardlimit =
623 		XFS_FSB_TO_B(mp, be64_to_cpu(dqp->q_core.d_blk_hardlimit));
624 	dst->d_spc_softlimit =
625 		XFS_FSB_TO_B(mp, be64_to_cpu(dqp->q_core.d_blk_softlimit));
626 	dst->d_ino_hardlimit = be64_to_cpu(dqp->q_core.d_ino_hardlimit);
627 	dst->d_ino_softlimit = be64_to_cpu(dqp->q_core.d_ino_softlimit);
628 	dst->d_space = XFS_FSB_TO_B(mp, dqp->q_res_bcount);
629 	dst->d_ino_count = dqp->q_res_icount;
630 	dst->d_spc_timer = be32_to_cpu(dqp->q_core.d_btimer);
631 	dst->d_ino_timer = be32_to_cpu(dqp->q_core.d_itimer);
632 	dst->d_ino_warns = be16_to_cpu(dqp->q_core.d_iwarns);
633 	dst->d_spc_warns = be16_to_cpu(dqp->q_core.d_bwarns);
634 	dst->d_rt_spc_hardlimit =
635 		XFS_FSB_TO_B(mp, be64_to_cpu(dqp->q_core.d_rtb_hardlimit));
636 	dst->d_rt_spc_softlimit =
637 		XFS_FSB_TO_B(mp, be64_to_cpu(dqp->q_core.d_rtb_softlimit));
638 	dst->d_rt_space = XFS_FSB_TO_B(mp, dqp->q_res_rtbcount);
639 	dst->d_rt_spc_timer = be32_to_cpu(dqp->q_core.d_rtbtimer);
640 	dst->d_rt_spc_warns = be16_to_cpu(dqp->q_core.d_rtbwarns);
641 
642 	/*
643 	 * Internally, we don't reset all the timers when quota enforcement
644 	 * gets turned off. No need to confuse the user level code,
645 	 * so return zeroes in that case.
646 	 */
647 	if ((!XFS_IS_UQUOTA_ENFORCED(mp) &&
648 	     dqp->q_core.d_flags == XFS_DQ_USER) ||
649 	    (!XFS_IS_GQUOTA_ENFORCED(mp) &&
650 	     dqp->q_core.d_flags == XFS_DQ_GROUP) ||
651 	    (!XFS_IS_PQUOTA_ENFORCED(mp) &&
652 	     dqp->q_core.d_flags == XFS_DQ_PROJ)) {
653 		dst->d_spc_timer = 0;
654 		dst->d_ino_timer = 0;
655 		dst->d_rt_spc_timer = 0;
656 	}
657 
658 #ifdef DEBUG
659 	if (((XFS_IS_UQUOTA_ENFORCED(mp) && type == XFS_DQ_USER) ||
660 	     (XFS_IS_GQUOTA_ENFORCED(mp) && type == XFS_DQ_GROUP) ||
661 	     (XFS_IS_PQUOTA_ENFORCED(mp) && type == XFS_DQ_PROJ)) &&
662 	    dqp->q_core.d_id != 0) {
663 		if ((dst->d_space > dst->d_spc_softlimit) &&
664 		    (dst->d_spc_softlimit > 0)) {
665 			ASSERT(dst->d_spc_timer != 0);
666 		}
667 		if ((dst->d_ino_count > dst->d_ino_softlimit) &&
668 		    (dst->d_ino_softlimit > 0)) {
669 			ASSERT(dst->d_ino_timer != 0);
670 		}
671 	}
672 #endif
673 }
674 
675 /* Return the quota information for the dquot matching id. */
676 int
677 xfs_qm_scall_getquota(
678 	struct xfs_mount	*mp,
679 	xfs_dqid_t		id,
680 	uint			type,
681 	struct qc_dqblk		*dst)
682 {
683 	struct xfs_dquot	*dqp;
684 	int			error;
685 
686 	/*
687 	 * Try to get the dquot. We don't want it allocated on disk, so don't
688 	 * set doalloc. If it doesn't exist, we'll get ENOENT back.
689 	 */
690 	error = xfs_qm_dqget(mp, id, type, false, &dqp);
691 	if (error)
692 		return error;
693 
694 	/*
695 	 * If everything's NULL, this dquot doesn't quite exist as far as
696 	 * our utility programs are concerned.
697 	 */
698 	if (XFS_IS_DQUOT_UNINITIALIZED(dqp)) {
699 		error = -ENOENT;
700 		goto out_put;
701 	}
702 
703 	xfs_qm_scall_getquota_fill_qc(mp, type, dqp, dst);
704 
705 out_put:
706 	xfs_qm_dqput(dqp);
707 	return error;
708 }
709 
710 /*
711  * Return the quota information for the first initialized dquot whose id
712  * is at least as high as id.
713  */
714 int
715 xfs_qm_scall_getquota_next(
716 	struct xfs_mount	*mp,
717 	xfs_dqid_t		*id,
718 	uint			type,
719 	struct qc_dqblk		*dst)
720 {
721 	struct xfs_dquot	*dqp;
722 	int			error;
723 
724 	error = xfs_qm_dqget_next(mp, *id, type, &dqp);
725 	if (error)
726 		return error;
727 
728 	/* Fill in the ID we actually read from disk */
729 	*id = be32_to_cpu(dqp->q_core.d_id);
730 
731 	xfs_qm_scall_getquota_fill_qc(mp, type, dqp, dst);
732 
733 	xfs_qm_dqput(dqp);
734 	return error;
735 }
736 
737 STATIC int
738 xfs_dqrele_inode(
739 	struct xfs_inode	*ip,
740 	int			flags,
741 	void			*args)
742 {
743 	/* skip quota inodes */
744 	if (ip == ip->i_mount->m_quotainfo->qi_uquotaip ||
745 	    ip == ip->i_mount->m_quotainfo->qi_gquotaip ||
746 	    ip == ip->i_mount->m_quotainfo->qi_pquotaip) {
747 		ASSERT(ip->i_udquot == NULL);
748 		ASSERT(ip->i_gdquot == NULL);
749 		ASSERT(ip->i_pdquot == NULL);
750 		return 0;
751 	}
752 
753 	xfs_ilock(ip, XFS_ILOCK_EXCL);
754 	if ((flags & XFS_UQUOTA_ACCT) && ip->i_udquot) {
755 		xfs_qm_dqrele(ip->i_udquot);
756 		ip->i_udquot = NULL;
757 	}
758 	if ((flags & XFS_GQUOTA_ACCT) && ip->i_gdquot) {
759 		xfs_qm_dqrele(ip->i_gdquot);
760 		ip->i_gdquot = NULL;
761 	}
762 	if ((flags & XFS_PQUOTA_ACCT) && ip->i_pdquot) {
763 		xfs_qm_dqrele(ip->i_pdquot);
764 		ip->i_pdquot = NULL;
765 	}
766 	xfs_iunlock(ip, XFS_ILOCK_EXCL);
767 	return 0;
768 }
769 
770 
771 /*
772  * Go thru all the inodes in the file system, releasing their dquots.
773  *
774  * Note that the mount structure gets modified to indicate that quotas are off
775  * AFTER this, in the case of quotaoff.
776  */
777 void
778 xfs_qm_dqrele_all_inodes(
779 	struct xfs_mount *mp,
780 	uint		 flags)
781 {
782 	ASSERT(mp->m_quotainfo);
783 	xfs_inode_ag_iterator_flags(mp, xfs_dqrele_inode, flags, NULL,
784 				    XFS_AGITER_INEW_WAIT);
785 }
786