xref: /openbmc/linux/fs/xfs/xfs_qm_syscalls.c (revision 8dda2eac)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (c) 2000-2005 Silicon Graphics, Inc.
4  * All Rights Reserved.
5  */
6 
7 
8 #include "xfs.h"
9 #include "xfs_fs.h"
10 #include "xfs_shared.h"
11 #include "xfs_format.h"
12 #include "xfs_log_format.h"
13 #include "xfs_trans_resv.h"
14 #include "xfs_sb.h"
15 #include "xfs_mount.h"
16 #include "xfs_inode.h"
17 #include "xfs_trans.h"
18 #include "xfs_quota.h"
19 #include "xfs_qm.h"
20 #include "xfs_icache.h"
21 
22 STATIC int
23 xfs_qm_log_quotaoff(
24 	struct xfs_mount	*mp,
25 	struct xfs_qoff_logitem	**qoffstartp,
26 	uint			flags)
27 {
28 	struct xfs_trans	*tp;
29 	int			error;
30 	struct xfs_qoff_logitem	*qoffi;
31 
32 	error = xfs_trans_alloc(mp, &M_RES(mp)->tr_qm_quotaoff, 0, 0, 0, &tp);
33 	if (error)
34 		goto out;
35 
36 	qoffi = xfs_trans_get_qoff_item(tp, NULL, flags & XFS_ALL_QUOTA_ACCT);
37 	xfs_trans_log_quotaoff_item(tp, qoffi);
38 
39 	spin_lock(&mp->m_sb_lock);
40 	mp->m_sb.sb_qflags = (mp->m_qflags & ~(flags)) & XFS_MOUNT_QUOTA_ALL;
41 	spin_unlock(&mp->m_sb_lock);
42 
43 	xfs_log_sb(tp);
44 
45 	/*
46 	 * We have to make sure that the transaction is secure on disk before we
47 	 * return and actually stop quota accounting. So, make it synchronous.
48 	 * We don't care about quotoff's performance.
49 	 */
50 	xfs_trans_set_sync(tp);
51 	error = xfs_trans_commit(tp);
52 	if (error)
53 		goto out;
54 
55 	*qoffstartp = qoffi;
56 out:
57 	return error;
58 }
59 
60 STATIC int
61 xfs_qm_log_quotaoff_end(
62 	struct xfs_mount	*mp,
63 	struct xfs_qoff_logitem	**startqoff,
64 	uint			flags)
65 {
66 	struct xfs_trans	*tp;
67 	int			error;
68 	struct xfs_qoff_logitem	*qoffi;
69 
70 	error = xfs_trans_alloc(mp, &M_RES(mp)->tr_qm_equotaoff, 0, 0, 0, &tp);
71 	if (error)
72 		return error;
73 
74 	qoffi = xfs_trans_get_qoff_item(tp, *startqoff,
75 					flags & XFS_ALL_QUOTA_ACCT);
76 	xfs_trans_log_quotaoff_item(tp, qoffi);
77 	*startqoff = NULL;
78 
79 	/*
80 	 * We have to make sure that the transaction is secure on disk before we
81 	 * return and actually stop quota accounting. So, make it synchronous.
82 	 * We don't care about quotoff's performance.
83 	 */
84 	xfs_trans_set_sync(tp);
85 	return xfs_trans_commit(tp);
86 }
87 
88 /*
89  * Turn off quota accounting and/or enforcement for all udquots and/or
90  * gdquots. Called only at unmount time.
91  *
92  * This assumes that there are no dquots of this file system cached
93  * incore, and modifies the ondisk dquot directly. Therefore, for example,
94  * it is an error to call this twice, without purging the cache.
95  */
96 int
97 xfs_qm_scall_quotaoff(
98 	xfs_mount_t		*mp,
99 	uint			flags)
100 {
101 	struct xfs_quotainfo	*q = mp->m_quotainfo;
102 	uint			dqtype;
103 	int			error;
104 	uint			inactivate_flags;
105 	struct xfs_qoff_logitem	*qoffstart = NULL;
106 
107 	/*
108 	 * No file system can have quotas enabled on disk but not in core.
109 	 * Note that quota utilities (like quotaoff) _expect_
110 	 * errno == -EEXIST here.
111 	 */
112 	if ((mp->m_qflags & flags) == 0)
113 		return -EEXIST;
114 	error = 0;
115 
116 	flags &= (XFS_ALL_QUOTA_ACCT | XFS_ALL_QUOTA_ENFD);
117 
118 	/*
119 	 * We don't want to deal with two quotaoffs messing up each other,
120 	 * so we're going to serialize it. quotaoff isn't exactly a performance
121 	 * critical thing.
122 	 * If quotaoff, then we must be dealing with the root filesystem.
123 	 */
124 	ASSERT(q);
125 	mutex_lock(&q->qi_quotaofflock);
126 
127 	/*
128 	 * If we're just turning off quota enforcement, change mp and go.
129 	 */
130 	if ((flags & XFS_ALL_QUOTA_ACCT) == 0) {
131 		mp->m_qflags &= ~(flags);
132 
133 		spin_lock(&mp->m_sb_lock);
134 		mp->m_sb.sb_qflags = mp->m_qflags;
135 		spin_unlock(&mp->m_sb_lock);
136 		mutex_unlock(&q->qi_quotaofflock);
137 
138 		/* XXX what to do if error ? Revert back to old vals incore ? */
139 		return xfs_sync_sb(mp, false);
140 	}
141 
142 	dqtype = 0;
143 	inactivate_flags = 0;
144 	/*
145 	 * If accounting is off, we must turn enforcement off, clear the
146 	 * quota 'CHKD' certificate to make it known that we have to
147 	 * do a quotacheck the next time this quota is turned on.
148 	 */
149 	if (flags & XFS_UQUOTA_ACCT) {
150 		dqtype |= XFS_QMOPT_UQUOTA;
151 		flags |= (XFS_UQUOTA_CHKD | XFS_UQUOTA_ENFD);
152 		inactivate_flags |= XFS_UQUOTA_ACTIVE;
153 	}
154 	if (flags & XFS_GQUOTA_ACCT) {
155 		dqtype |= XFS_QMOPT_GQUOTA;
156 		flags |= (XFS_GQUOTA_CHKD | XFS_GQUOTA_ENFD);
157 		inactivate_flags |= XFS_GQUOTA_ACTIVE;
158 	}
159 	if (flags & XFS_PQUOTA_ACCT) {
160 		dqtype |= XFS_QMOPT_PQUOTA;
161 		flags |= (XFS_PQUOTA_CHKD | XFS_PQUOTA_ENFD);
162 		inactivate_flags |= XFS_PQUOTA_ACTIVE;
163 	}
164 
165 	/*
166 	 * Nothing to do?  Don't complain. This happens when we're just
167 	 * turning off quota enforcement.
168 	 */
169 	if ((mp->m_qflags & flags) == 0)
170 		goto out_unlock;
171 
172 	/*
173 	 * Write the LI_QUOTAOFF log record, and do SB changes atomically,
174 	 * and synchronously. If we fail to write, we should abort the
175 	 * operation as it cannot be recovered safely if we crash.
176 	 */
177 	error = xfs_qm_log_quotaoff(mp, &qoffstart, flags);
178 	if (error)
179 		goto out_unlock;
180 
181 	/*
182 	 * Next we clear the XFS_MOUNT_*DQ_ACTIVE bit(s) in the mount struct
183 	 * to take care of the race between dqget and quotaoff. We don't take
184 	 * any special locks to reset these bits. All processes need to check
185 	 * these bits *after* taking inode lock(s) to see if the particular
186 	 * quota type is in the process of being turned off. If *ACTIVE, it is
187 	 * guaranteed that all dquot structures and all quotainode ptrs will all
188 	 * stay valid as long as that inode is kept locked.
189 	 *
190 	 * There is no turning back after this.
191 	 */
192 	mp->m_qflags &= ~inactivate_flags;
193 
194 	/*
195 	 * Give back all the dquot reference(s) held by inodes.
196 	 * Here we go thru every single incore inode in this file system, and
197 	 * do a dqrele on the i_udquot/i_gdquot that it may have.
198 	 * Essentially, as long as somebody has an inode locked, this guarantees
199 	 * that quotas will not be turned off. This is handy because in a
200 	 * transaction once we lock the inode(s) and check for quotaon, we can
201 	 * depend on the quota inodes (and other things) being valid as long as
202 	 * we keep the lock(s).
203 	 */
204 	error = xfs_dqrele_all_inodes(mp, flags);
205 	ASSERT(!error);
206 
207 	/*
208 	 * Next we make the changes in the quota flag in the mount struct.
209 	 * This isn't protected by a particular lock directly, because we
210 	 * don't want to take a mrlock every time we depend on quotas being on.
211 	 */
212 	mp->m_qflags &= ~flags;
213 
214 	/*
215 	 * Go through all the dquots of this file system and purge them,
216 	 * according to what was turned off.
217 	 */
218 	xfs_qm_dqpurge_all(mp, dqtype);
219 
220 	/*
221 	 * Transactions that had started before ACTIVE state bit was cleared
222 	 * could have logged many dquots, so they'd have higher LSNs than
223 	 * the first QUOTAOFF log record does. If we happen to crash when
224 	 * the tail of the log has gone past the QUOTAOFF record, but
225 	 * before the last dquot modification, those dquots __will__
226 	 * recover, and that's not good.
227 	 *
228 	 * So, we have QUOTAOFF start and end logitems; the start
229 	 * logitem won't get overwritten until the end logitem appears...
230 	 */
231 	error = xfs_qm_log_quotaoff_end(mp, &qoffstart, flags);
232 	if (error) {
233 		/* We're screwed now. Shutdown is the only option. */
234 		xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
235 		goto out_unlock;
236 	}
237 
238 	/*
239 	 * If all quotas are completely turned off, close shop.
240 	 */
241 	if (mp->m_qflags == 0) {
242 		mutex_unlock(&q->qi_quotaofflock);
243 		xfs_qm_destroy_quotainfo(mp);
244 		return 0;
245 	}
246 
247 	/*
248 	 * Release our quotainode references if we don't need them anymore.
249 	 */
250 	if ((dqtype & XFS_QMOPT_UQUOTA) && q->qi_uquotaip) {
251 		xfs_irele(q->qi_uquotaip);
252 		q->qi_uquotaip = NULL;
253 	}
254 	if ((dqtype & XFS_QMOPT_GQUOTA) && q->qi_gquotaip) {
255 		xfs_irele(q->qi_gquotaip);
256 		q->qi_gquotaip = NULL;
257 	}
258 	if ((dqtype & XFS_QMOPT_PQUOTA) && q->qi_pquotaip) {
259 		xfs_irele(q->qi_pquotaip);
260 		q->qi_pquotaip = NULL;
261 	}
262 
263 out_unlock:
264 	if (error && qoffstart)
265 		xfs_qm_qoff_logitem_relse(qoffstart);
266 	mutex_unlock(&q->qi_quotaofflock);
267 	return error;
268 }
269 
270 STATIC int
271 xfs_qm_scall_trunc_qfile(
272 	struct xfs_mount	*mp,
273 	xfs_ino_t		ino)
274 {
275 	struct xfs_inode	*ip;
276 	struct xfs_trans	*tp;
277 	int			error;
278 
279 	if (ino == NULLFSINO)
280 		return 0;
281 
282 	error = xfs_iget(mp, NULL, ino, 0, 0, &ip);
283 	if (error)
284 		return error;
285 
286 	xfs_ilock(ip, XFS_IOLOCK_EXCL);
287 
288 	error = xfs_trans_alloc(mp, &M_RES(mp)->tr_itruncate, 0, 0, 0, &tp);
289 	if (error) {
290 		xfs_iunlock(ip, XFS_IOLOCK_EXCL);
291 		goto out_put;
292 	}
293 
294 	xfs_ilock(ip, XFS_ILOCK_EXCL);
295 	xfs_trans_ijoin(tp, ip, 0);
296 
297 	ip->i_disk_size = 0;
298 	xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
299 
300 	error = xfs_itruncate_extents(&tp, ip, XFS_DATA_FORK, 0);
301 	if (error) {
302 		xfs_trans_cancel(tp);
303 		goto out_unlock;
304 	}
305 
306 	ASSERT(ip->i_df.if_nextents == 0);
307 
308 	xfs_trans_ichgtime(tp, ip, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
309 	error = xfs_trans_commit(tp);
310 
311 out_unlock:
312 	xfs_iunlock(ip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL);
313 out_put:
314 	xfs_irele(ip);
315 	return error;
316 }
317 
318 int
319 xfs_qm_scall_trunc_qfiles(
320 	xfs_mount_t	*mp,
321 	uint		flags)
322 {
323 	int		error = -EINVAL;
324 
325 	if (!xfs_sb_version_hasquota(&mp->m_sb) || flags == 0 ||
326 	    (flags & ~XFS_QMOPT_QUOTALL)) {
327 		xfs_debug(mp, "%s: flags=%x m_qflags=%x",
328 			__func__, flags, mp->m_qflags);
329 		return -EINVAL;
330 	}
331 
332 	if (flags & XFS_QMOPT_UQUOTA) {
333 		error = xfs_qm_scall_trunc_qfile(mp, mp->m_sb.sb_uquotino);
334 		if (error)
335 			return error;
336 	}
337 	if (flags & XFS_QMOPT_GQUOTA) {
338 		error = xfs_qm_scall_trunc_qfile(mp, mp->m_sb.sb_gquotino);
339 		if (error)
340 			return error;
341 	}
342 	if (flags & XFS_QMOPT_PQUOTA)
343 		error = xfs_qm_scall_trunc_qfile(mp, mp->m_sb.sb_pquotino);
344 
345 	return error;
346 }
347 
348 /*
349  * Switch on (a given) quota enforcement for a filesystem.  This takes
350  * effect immediately.
351  * (Switching on quota accounting must be done at mount time.)
352  */
353 int
354 xfs_qm_scall_quotaon(
355 	xfs_mount_t	*mp,
356 	uint		flags)
357 {
358 	int		error;
359 	uint		qf;
360 
361 	/*
362 	 * Switching on quota accounting must be done at mount time,
363 	 * only consider quota enforcement stuff here.
364 	 */
365 	flags &= XFS_ALL_QUOTA_ENFD;
366 
367 	if (flags == 0) {
368 		xfs_debug(mp, "%s: zero flags, m_qflags=%x",
369 			__func__, mp->m_qflags);
370 		return -EINVAL;
371 	}
372 
373 	/*
374 	 * Can't enforce without accounting. We check the superblock
375 	 * qflags here instead of m_qflags because rootfs can have
376 	 * quota acct on ondisk without m_qflags' knowing.
377 	 */
378 	if (((mp->m_sb.sb_qflags & XFS_UQUOTA_ACCT) == 0 &&
379 	     (flags & XFS_UQUOTA_ENFD)) ||
380 	    ((mp->m_sb.sb_qflags & XFS_GQUOTA_ACCT) == 0 &&
381 	     (flags & XFS_GQUOTA_ENFD)) ||
382 	    ((mp->m_sb.sb_qflags & XFS_PQUOTA_ACCT) == 0 &&
383 	     (flags & XFS_PQUOTA_ENFD))) {
384 		xfs_debug(mp,
385 			"%s: Can't enforce without acct, flags=%x sbflags=%x",
386 			__func__, flags, mp->m_sb.sb_qflags);
387 		return -EINVAL;
388 	}
389 	/*
390 	 * If everything's up to-date incore, then don't waste time.
391 	 */
392 	if ((mp->m_qflags & flags) == flags)
393 		return -EEXIST;
394 
395 	/*
396 	 * Change sb_qflags on disk but not incore mp->qflags
397 	 * if this is the root filesystem.
398 	 */
399 	spin_lock(&mp->m_sb_lock);
400 	qf = mp->m_sb.sb_qflags;
401 	mp->m_sb.sb_qflags = qf | flags;
402 	spin_unlock(&mp->m_sb_lock);
403 
404 	/*
405 	 * There's nothing to change if it's the same.
406 	 */
407 	if ((qf & flags) == flags)
408 		return -EEXIST;
409 
410 	error = xfs_sync_sb(mp, false);
411 	if (error)
412 		return error;
413 	/*
414 	 * If we aren't trying to switch on quota enforcement, we are done.
415 	 */
416 	if  (((mp->m_sb.sb_qflags & XFS_UQUOTA_ACCT) !=
417 	     (mp->m_qflags & XFS_UQUOTA_ACCT)) ||
418 	     ((mp->m_sb.sb_qflags & XFS_PQUOTA_ACCT) !=
419 	     (mp->m_qflags & XFS_PQUOTA_ACCT)) ||
420 	     ((mp->m_sb.sb_qflags & XFS_GQUOTA_ACCT) !=
421 	     (mp->m_qflags & XFS_GQUOTA_ACCT)))
422 		return 0;
423 
424 	if (! XFS_IS_QUOTA_RUNNING(mp))
425 		return -ESRCH;
426 
427 	/*
428 	 * Switch on quota enforcement in core.
429 	 */
430 	mutex_lock(&mp->m_quotainfo->qi_quotaofflock);
431 	mp->m_qflags |= (flags & XFS_ALL_QUOTA_ENFD);
432 	mutex_unlock(&mp->m_quotainfo->qi_quotaofflock);
433 
434 	return 0;
435 }
436 
437 #define XFS_QC_MASK \
438 	(QC_LIMIT_MASK | QC_TIMER_MASK | QC_WARNS_MASK)
439 
440 /*
441  * Adjust limits of this quota, and the defaults if passed in.  Returns true
442  * if the new limits made sense and were applied, false otherwise.
443  */
444 static inline bool
445 xfs_setqlim_limits(
446 	struct xfs_mount	*mp,
447 	struct xfs_dquot_res	*res,
448 	struct xfs_quota_limits	*qlim,
449 	xfs_qcnt_t		hard,
450 	xfs_qcnt_t		soft,
451 	const char		*tag)
452 {
453 	/* The hard limit can't be less than the soft limit. */
454 	if (hard != 0 && hard < soft) {
455 		xfs_debug(mp, "%shard %lld < %ssoft %lld", tag, hard, tag,
456 				soft);
457 		return false;
458 	}
459 
460 	res->hardlimit = hard;
461 	res->softlimit = soft;
462 	if (qlim) {
463 		qlim->hard = hard;
464 		qlim->soft = soft;
465 	}
466 
467 	return true;
468 }
469 
470 static inline void
471 xfs_setqlim_warns(
472 	struct xfs_dquot_res	*res,
473 	struct xfs_quota_limits	*qlim,
474 	int			warns)
475 {
476 	res->warnings = warns;
477 	if (qlim)
478 		qlim->warn = warns;
479 }
480 
481 static inline void
482 xfs_setqlim_timer(
483 	struct xfs_mount	*mp,
484 	struct xfs_dquot_res	*res,
485 	struct xfs_quota_limits	*qlim,
486 	s64			timer)
487 {
488 	if (qlim) {
489 		/* Set the length of the default grace period. */
490 		res->timer = xfs_dquot_set_grace_period(timer);
491 		qlim->time = res->timer;
492 	} else {
493 		/* Set the grace period expiration on a quota. */
494 		res->timer = xfs_dquot_set_timeout(mp, timer);
495 	}
496 }
497 
498 /*
499  * Adjust quota limits, and start/stop timers accordingly.
500  */
501 int
502 xfs_qm_scall_setqlim(
503 	struct xfs_mount	*mp,
504 	xfs_dqid_t		id,
505 	xfs_dqtype_t		type,
506 	struct qc_dqblk		*newlim)
507 {
508 	struct xfs_quotainfo	*q = mp->m_quotainfo;
509 	struct xfs_dquot	*dqp;
510 	struct xfs_trans	*tp;
511 	struct xfs_def_quota	*defq;
512 	struct xfs_dquot_res	*res;
513 	struct xfs_quota_limits	*qlim;
514 	int			error;
515 	xfs_qcnt_t		hard, soft;
516 
517 	if (newlim->d_fieldmask & ~XFS_QC_MASK)
518 		return -EINVAL;
519 	if ((newlim->d_fieldmask & XFS_QC_MASK) == 0)
520 		return 0;
521 
522 	/*
523 	 * We don't want to race with a quotaoff so take the quotaoff lock.
524 	 * We don't hold an inode lock, so there's nothing else to stop
525 	 * a quotaoff from happening.
526 	 */
527 	mutex_lock(&q->qi_quotaofflock);
528 
529 	/*
530 	 * Get the dquot (locked) before we start, as we need to do a
531 	 * transaction to allocate it if it doesn't exist. Once we have the
532 	 * dquot, unlock it so we can start the next transaction safely. We hold
533 	 * a reference to the dquot, so it's safe to do this unlock/lock without
534 	 * it being reclaimed in the mean time.
535 	 */
536 	error = xfs_qm_dqget(mp, id, type, true, &dqp);
537 	if (error) {
538 		ASSERT(error != -ENOENT);
539 		goto out_unlock;
540 	}
541 
542 	defq = xfs_get_defquota(q, xfs_dquot_type(dqp));
543 	xfs_dqunlock(dqp);
544 
545 	error = xfs_trans_alloc(mp, &M_RES(mp)->tr_qm_setqlim, 0, 0, 0, &tp);
546 	if (error)
547 		goto out_rele;
548 
549 	xfs_dqlock(dqp);
550 	xfs_trans_dqjoin(tp, dqp);
551 
552 	/*
553 	 * Update quota limits, warnings, and timers, and the defaults
554 	 * if we're touching id == 0.
555 	 *
556 	 * Make sure that hardlimits are >= soft limits before changing.
557 	 *
558 	 * Update warnings counter(s) if requested.
559 	 *
560 	 * Timelimits for the super user set the relative time the other users
561 	 * can be over quota for this file system. If it is zero a default is
562 	 * used.  Ditto for the default soft and hard limit values (already
563 	 * done, above), and for warnings.
564 	 *
565 	 * For other IDs, userspace can bump out the grace period if over
566 	 * the soft limit.
567 	 */
568 
569 	/* Blocks on the data device. */
570 	hard = (newlim->d_fieldmask & QC_SPC_HARD) ?
571 		(xfs_qcnt_t) XFS_B_TO_FSB(mp, newlim->d_spc_hardlimit) :
572 			dqp->q_blk.hardlimit;
573 	soft = (newlim->d_fieldmask & QC_SPC_SOFT) ?
574 		(xfs_qcnt_t) XFS_B_TO_FSB(mp, newlim->d_spc_softlimit) :
575 			dqp->q_blk.softlimit;
576 	res = &dqp->q_blk;
577 	qlim = id == 0 ? &defq->blk : NULL;
578 
579 	if (xfs_setqlim_limits(mp, res, qlim, hard, soft, "blk"))
580 		xfs_dquot_set_prealloc_limits(dqp);
581 	if (newlim->d_fieldmask & QC_SPC_WARNS)
582 		xfs_setqlim_warns(res, qlim, newlim->d_spc_warns);
583 	if (newlim->d_fieldmask & QC_SPC_TIMER)
584 		xfs_setqlim_timer(mp, res, qlim, newlim->d_spc_timer);
585 
586 	/* Blocks on the realtime device. */
587 	hard = (newlim->d_fieldmask & QC_RT_SPC_HARD) ?
588 		(xfs_qcnt_t) XFS_B_TO_FSB(mp, newlim->d_rt_spc_hardlimit) :
589 			dqp->q_rtb.hardlimit;
590 	soft = (newlim->d_fieldmask & QC_RT_SPC_SOFT) ?
591 		(xfs_qcnt_t) XFS_B_TO_FSB(mp, newlim->d_rt_spc_softlimit) :
592 			dqp->q_rtb.softlimit;
593 	res = &dqp->q_rtb;
594 	qlim = id == 0 ? &defq->rtb : NULL;
595 
596 	xfs_setqlim_limits(mp, res, qlim, hard, soft, "rtb");
597 	if (newlim->d_fieldmask & QC_RT_SPC_WARNS)
598 		xfs_setqlim_warns(res, qlim, newlim->d_rt_spc_warns);
599 	if (newlim->d_fieldmask & QC_RT_SPC_TIMER)
600 		xfs_setqlim_timer(mp, res, qlim, newlim->d_rt_spc_timer);
601 
602 	/* Inodes */
603 	hard = (newlim->d_fieldmask & QC_INO_HARD) ?
604 		(xfs_qcnt_t) newlim->d_ino_hardlimit :
605 			dqp->q_ino.hardlimit;
606 	soft = (newlim->d_fieldmask & QC_INO_SOFT) ?
607 		(xfs_qcnt_t) newlim->d_ino_softlimit :
608 			dqp->q_ino.softlimit;
609 	res = &dqp->q_ino;
610 	qlim = id == 0 ? &defq->ino : NULL;
611 
612 	xfs_setqlim_limits(mp, res, qlim, hard, soft, "ino");
613 	if (newlim->d_fieldmask & QC_INO_WARNS)
614 		xfs_setqlim_warns(res, qlim, newlim->d_ino_warns);
615 	if (newlim->d_fieldmask & QC_INO_TIMER)
616 		xfs_setqlim_timer(mp, res, qlim, newlim->d_ino_timer);
617 
618 	if (id != 0) {
619 		/*
620 		 * If the user is now over quota, start the timelimit.
621 		 * The user will not be 'warned'.
622 		 * Note that we keep the timers ticking, whether enforcement
623 		 * is on or off. We don't really want to bother with iterating
624 		 * over all ondisk dquots and turning the timers on/off.
625 		 */
626 		xfs_qm_adjust_dqtimers(dqp);
627 	}
628 	dqp->q_flags |= XFS_DQFLAG_DIRTY;
629 	xfs_trans_log_dquot(tp, dqp);
630 
631 	error = xfs_trans_commit(tp);
632 
633 out_rele:
634 	xfs_qm_dqrele(dqp);
635 out_unlock:
636 	mutex_unlock(&q->qi_quotaofflock);
637 	return error;
638 }
639 
640 /* Fill out the quota context. */
641 static void
642 xfs_qm_scall_getquota_fill_qc(
643 	struct xfs_mount	*mp,
644 	xfs_dqtype_t		type,
645 	const struct xfs_dquot	*dqp,
646 	struct qc_dqblk		*dst)
647 {
648 	memset(dst, 0, sizeof(*dst));
649 	dst->d_spc_hardlimit = XFS_FSB_TO_B(mp, dqp->q_blk.hardlimit);
650 	dst->d_spc_softlimit = XFS_FSB_TO_B(mp, dqp->q_blk.softlimit);
651 	dst->d_ino_hardlimit = dqp->q_ino.hardlimit;
652 	dst->d_ino_softlimit = dqp->q_ino.softlimit;
653 	dst->d_space = XFS_FSB_TO_B(mp, dqp->q_blk.reserved);
654 	dst->d_ino_count = dqp->q_ino.reserved;
655 	dst->d_spc_timer = dqp->q_blk.timer;
656 	dst->d_ino_timer = dqp->q_ino.timer;
657 	dst->d_ino_warns = dqp->q_ino.warnings;
658 	dst->d_spc_warns = dqp->q_blk.warnings;
659 	dst->d_rt_spc_hardlimit = XFS_FSB_TO_B(mp, dqp->q_rtb.hardlimit);
660 	dst->d_rt_spc_softlimit = XFS_FSB_TO_B(mp, dqp->q_rtb.softlimit);
661 	dst->d_rt_space = XFS_FSB_TO_B(mp, dqp->q_rtb.reserved);
662 	dst->d_rt_spc_timer = dqp->q_rtb.timer;
663 	dst->d_rt_spc_warns = dqp->q_rtb.warnings;
664 
665 	/*
666 	 * Internally, we don't reset all the timers when quota enforcement
667 	 * gets turned off. No need to confuse the user level code,
668 	 * so return zeroes in that case.
669 	 */
670 	if (!xfs_dquot_is_enforced(dqp)) {
671 		dst->d_spc_timer = 0;
672 		dst->d_ino_timer = 0;
673 		dst->d_rt_spc_timer = 0;
674 	}
675 
676 #ifdef DEBUG
677 	if (xfs_dquot_is_enforced(dqp) && dqp->q_id != 0) {
678 		if ((dst->d_space > dst->d_spc_softlimit) &&
679 		    (dst->d_spc_softlimit > 0)) {
680 			ASSERT(dst->d_spc_timer != 0);
681 		}
682 		if ((dst->d_ino_count > dqp->q_ino.softlimit) &&
683 		    (dqp->q_ino.softlimit > 0)) {
684 			ASSERT(dst->d_ino_timer != 0);
685 		}
686 	}
687 #endif
688 }
689 
690 /* Return the quota information for the dquot matching id. */
691 int
692 xfs_qm_scall_getquota(
693 	struct xfs_mount	*mp,
694 	xfs_dqid_t		id,
695 	xfs_dqtype_t		type,
696 	struct qc_dqblk		*dst)
697 {
698 	struct xfs_dquot	*dqp;
699 	int			error;
700 
701 	/*
702 	 * Try to get the dquot. We don't want it allocated on disk, so don't
703 	 * set doalloc. If it doesn't exist, we'll get ENOENT back.
704 	 */
705 	error = xfs_qm_dqget(mp, id, type, false, &dqp);
706 	if (error)
707 		return error;
708 
709 	/*
710 	 * If everything's NULL, this dquot doesn't quite exist as far as
711 	 * our utility programs are concerned.
712 	 */
713 	if (XFS_IS_DQUOT_UNINITIALIZED(dqp)) {
714 		error = -ENOENT;
715 		goto out_put;
716 	}
717 
718 	xfs_qm_scall_getquota_fill_qc(mp, type, dqp, dst);
719 
720 out_put:
721 	xfs_qm_dqput(dqp);
722 	return error;
723 }
724 
725 /*
726  * Return the quota information for the first initialized dquot whose id
727  * is at least as high as id.
728  */
729 int
730 xfs_qm_scall_getquota_next(
731 	struct xfs_mount	*mp,
732 	xfs_dqid_t		*id,
733 	xfs_dqtype_t		type,
734 	struct qc_dqblk		*dst)
735 {
736 	struct xfs_dquot	*dqp;
737 	int			error;
738 
739 	error = xfs_qm_dqget_next(mp, *id, type, &dqp);
740 	if (error)
741 		return error;
742 
743 	/* Fill in the ID we actually read from disk */
744 	*id = dqp->q_id;
745 
746 	xfs_qm_scall_getquota_fill_qc(mp, type, dqp, dst);
747 
748 	xfs_qm_dqput(dqp);
749 	return error;
750 }
751