xref: /openbmc/linux/fs/xfs/xfs_qm_syscalls.c (revision 089a49b6)
1 /*
2  * Copyright (c) 2000-2005 Silicon Graphics, Inc.
3  * All Rights Reserved.
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU General Public License as
7  * published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it would be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write the Free Software Foundation,
16  * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
17  */
18 
19 #include <linux/capability.h>
20 
21 #include "xfs.h"
22 #include "xfs_fs.h"
23 #include "xfs_format.h"
24 #include "xfs_bit.h"
25 #include "xfs_log.h"
26 #include "xfs_trans.h"
27 #include "xfs_sb.h"
28 #include "xfs_ag.h"
29 #include "xfs_alloc.h"
30 #include "xfs_quota.h"
31 #include "xfs_mount.h"
32 #include "xfs_bmap_btree.h"
33 #include "xfs_inode.h"
34 #include "xfs_inode_item.h"
35 #include "xfs_itable.h"
36 #include "xfs_bmap.h"
37 #include "xfs_rtalloc.h"
38 #include "xfs_error.h"
39 #include "xfs_attr.h"
40 #include "xfs_buf_item.h"
41 #include "xfs_qm.h"
42 #include "xfs_trace.h"
43 #include "xfs_icache.h"
44 
45 STATIC int	xfs_qm_log_quotaoff(xfs_mount_t *, xfs_qoff_logitem_t **, uint);
46 STATIC int	xfs_qm_log_quotaoff_end(xfs_mount_t *, xfs_qoff_logitem_t *,
47 					uint);
48 STATIC uint	xfs_qm_export_flags(uint);
49 STATIC uint	xfs_qm_export_qtype_flags(uint);
50 
51 /*
52  * Turn off quota accounting and/or enforcement for all udquots and/or
53  * gdquots. Called only at unmount time.
54  *
55  * This assumes that there are no dquots of this file system cached
56  * incore, and modifies the ondisk dquot directly. Therefore, for example,
57  * it is an error to call this twice, without purging the cache.
58  */
59 int
60 xfs_qm_scall_quotaoff(
61 	xfs_mount_t		*mp,
62 	uint			flags)
63 {
64 	struct xfs_quotainfo	*q = mp->m_quotainfo;
65 	uint			dqtype;
66 	int			error;
67 	uint			inactivate_flags;
68 	xfs_qoff_logitem_t	*qoffstart;
69 
70 	/*
71 	 * No file system can have quotas enabled on disk but not in core.
72 	 * Note that quota utilities (like quotaoff) _expect_
73 	 * errno == EEXIST here.
74 	 */
75 	if ((mp->m_qflags & flags) == 0)
76 		return XFS_ERROR(EEXIST);
77 	error = 0;
78 
79 	flags &= (XFS_ALL_QUOTA_ACCT | XFS_ALL_QUOTA_ENFD);
80 
81 	/*
82 	 * We don't want to deal with two quotaoffs messing up each other,
83 	 * so we're going to serialize it. quotaoff isn't exactly a performance
84 	 * critical thing.
85 	 * If quotaoff, then we must be dealing with the root filesystem.
86 	 */
87 	ASSERT(q);
88 	mutex_lock(&q->qi_quotaofflock);
89 
90 	/*
91 	 * If we're just turning off quota enforcement, change mp and go.
92 	 */
93 	if ((flags & XFS_ALL_QUOTA_ACCT) == 0) {
94 		mp->m_qflags &= ~(flags);
95 
96 		spin_lock(&mp->m_sb_lock);
97 		mp->m_sb.sb_qflags = mp->m_qflags;
98 		spin_unlock(&mp->m_sb_lock);
99 		mutex_unlock(&q->qi_quotaofflock);
100 
101 		/* XXX what to do if error ? Revert back to old vals incore ? */
102 		error = xfs_qm_write_sb_changes(mp, XFS_SB_QFLAGS);
103 		return (error);
104 	}
105 
106 	dqtype = 0;
107 	inactivate_flags = 0;
108 	/*
109 	 * If accounting is off, we must turn enforcement off, clear the
110 	 * quota 'CHKD' certificate to make it known that we have to
111 	 * do a quotacheck the next time this quota is turned on.
112 	 */
113 	if (flags & XFS_UQUOTA_ACCT) {
114 		dqtype |= XFS_QMOPT_UQUOTA;
115 		flags |= (XFS_UQUOTA_CHKD | XFS_UQUOTA_ENFD);
116 		inactivate_flags |= XFS_UQUOTA_ACTIVE;
117 	}
118 	if (flags & XFS_GQUOTA_ACCT) {
119 		dqtype |= XFS_QMOPT_GQUOTA;
120 		flags |= (XFS_GQUOTA_CHKD | XFS_GQUOTA_ENFD);
121 		inactivate_flags |= XFS_GQUOTA_ACTIVE;
122 	}
123 	if (flags & XFS_PQUOTA_ACCT) {
124 		dqtype |= XFS_QMOPT_PQUOTA;
125 		flags |= (XFS_PQUOTA_CHKD | XFS_PQUOTA_ENFD);
126 		inactivate_flags |= XFS_PQUOTA_ACTIVE;
127 	}
128 
129 	/*
130 	 * Nothing to do?  Don't complain. This happens when we're just
131 	 * turning off quota enforcement.
132 	 */
133 	if ((mp->m_qflags & flags) == 0)
134 		goto out_unlock;
135 
136 	/*
137 	 * Write the LI_QUOTAOFF log record, and do SB changes atomically,
138 	 * and synchronously. If we fail to write, we should abort the
139 	 * operation as it cannot be recovered safely if we crash.
140 	 */
141 	error = xfs_qm_log_quotaoff(mp, &qoffstart, flags);
142 	if (error)
143 		goto out_unlock;
144 
145 	/*
146 	 * Next we clear the XFS_MOUNT_*DQ_ACTIVE bit(s) in the mount struct
147 	 * to take care of the race between dqget and quotaoff. We don't take
148 	 * any special locks to reset these bits. All processes need to check
149 	 * these bits *after* taking inode lock(s) to see if the particular
150 	 * quota type is in the process of being turned off. If *ACTIVE, it is
151 	 * guaranteed that all dquot structures and all quotainode ptrs will all
152 	 * stay valid as long as that inode is kept locked.
153 	 *
154 	 * There is no turning back after this.
155 	 */
156 	mp->m_qflags &= ~inactivate_flags;
157 
158 	/*
159 	 * Give back all the dquot reference(s) held by inodes.
160 	 * Here we go thru every single incore inode in this file system, and
161 	 * do a dqrele on the i_udquot/i_gdquot that it may have.
162 	 * Essentially, as long as somebody has an inode locked, this guarantees
163 	 * that quotas will not be turned off. This is handy because in a
164 	 * transaction once we lock the inode(s) and check for quotaon, we can
165 	 * depend on the quota inodes (and other things) being valid as long as
166 	 * we keep the lock(s).
167 	 */
168 	xfs_qm_dqrele_all_inodes(mp, flags);
169 
170 	/*
171 	 * Next we make the changes in the quota flag in the mount struct.
172 	 * This isn't protected by a particular lock directly, because we
173 	 * don't want to take a mrlock every time we depend on quotas being on.
174 	 */
175 	mp->m_qflags &= ~flags;
176 
177 	/*
178 	 * Go through all the dquots of this file system and purge them,
179 	 * according to what was turned off.
180 	 */
181 	xfs_qm_dqpurge_all(mp, dqtype);
182 
183 	/*
184 	 * Transactions that had started before ACTIVE state bit was cleared
185 	 * could have logged many dquots, so they'd have higher LSNs than
186 	 * the first QUOTAOFF log record does. If we happen to crash when
187 	 * the tail of the log has gone past the QUOTAOFF record, but
188 	 * before the last dquot modification, those dquots __will__
189 	 * recover, and that's not good.
190 	 *
191 	 * So, we have QUOTAOFF start and end logitems; the start
192 	 * logitem won't get overwritten until the end logitem appears...
193 	 */
194 	error = xfs_qm_log_quotaoff_end(mp, qoffstart, flags);
195 	if (error) {
196 		/* We're screwed now. Shutdown is the only option. */
197 		xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
198 		goto out_unlock;
199 	}
200 
201 	/*
202 	 * If all quotas are completely turned off, close shop.
203 	 */
204 	if (mp->m_qflags == 0) {
205 		mutex_unlock(&q->qi_quotaofflock);
206 		xfs_qm_destroy_quotainfo(mp);
207 		return (0);
208 	}
209 
210 	/*
211 	 * Release our quotainode references if we don't need them anymore.
212 	 */
213 	if ((dqtype & XFS_QMOPT_UQUOTA) && q->qi_uquotaip) {
214 		IRELE(q->qi_uquotaip);
215 		q->qi_uquotaip = NULL;
216 	}
217 	if ((dqtype & XFS_QMOPT_GQUOTA) && q->qi_gquotaip) {
218 		IRELE(q->qi_gquotaip);
219 		q->qi_gquotaip = NULL;
220 	}
221 	if ((dqtype & XFS_QMOPT_PQUOTA) && q->qi_pquotaip) {
222 		IRELE(q->qi_pquotaip);
223 		q->qi_pquotaip = NULL;
224 	}
225 
226 out_unlock:
227 	mutex_unlock(&q->qi_quotaofflock);
228 	return error;
229 }
230 
231 STATIC int
232 xfs_qm_scall_trunc_qfile(
233 	struct xfs_mount	*mp,
234 	xfs_ino_t		ino)
235 {
236 	struct xfs_inode	*ip;
237 	struct xfs_trans	*tp;
238 	int			error;
239 
240 	if (ino == NULLFSINO)
241 		return 0;
242 
243 	error = xfs_iget(mp, NULL, ino, 0, 0, &ip);
244 	if (error)
245 		return error;
246 
247 	xfs_ilock(ip, XFS_IOLOCK_EXCL);
248 
249 	tp = xfs_trans_alloc(mp, XFS_TRANS_TRUNCATE_FILE);
250 	error = xfs_trans_reserve(tp, &M_RES(mp)->tr_itruncate, 0, 0);
251 	if (error) {
252 		xfs_trans_cancel(tp, 0);
253 		xfs_iunlock(ip, XFS_IOLOCK_EXCL);
254 		goto out_put;
255 	}
256 
257 	xfs_ilock(ip, XFS_ILOCK_EXCL);
258 	xfs_trans_ijoin(tp, ip, 0);
259 
260 	ip->i_d.di_size = 0;
261 	xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
262 
263 	error = xfs_itruncate_extents(&tp, ip, XFS_DATA_FORK, 0);
264 	if (error) {
265 		xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES |
266 				     XFS_TRANS_ABORT);
267 		goto out_unlock;
268 	}
269 
270 	ASSERT(ip->i_d.di_nextents == 0);
271 
272 	xfs_trans_ichgtime(tp, ip, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
273 	error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES);
274 
275 out_unlock:
276 	xfs_iunlock(ip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL);
277 out_put:
278 	IRELE(ip);
279 	return error;
280 }
281 
282 int
283 xfs_qm_scall_trunc_qfiles(
284 	xfs_mount_t	*mp,
285 	uint		flags)
286 {
287 	int		error = 0, error2 = 0;
288 
289 	if (!xfs_sb_version_hasquota(&mp->m_sb) || flags == 0) {
290 		xfs_debug(mp, "%s: flags=%x m_qflags=%x\n",
291 			__func__, flags, mp->m_qflags);
292 		return XFS_ERROR(EINVAL);
293 	}
294 
295 	if (flags & XFS_DQ_USER)
296 		error = xfs_qm_scall_trunc_qfile(mp, mp->m_sb.sb_uquotino);
297 	if (flags & XFS_DQ_GROUP)
298 		error2 = xfs_qm_scall_trunc_qfile(mp, mp->m_sb.sb_gquotino);
299 	if (flags & XFS_DQ_PROJ)
300 		error2 = xfs_qm_scall_trunc_qfile(mp, mp->m_sb.sb_pquotino);
301 
302 	return error ? error : error2;
303 }
304 
305 /*
306  * Switch on (a given) quota enforcement for a filesystem.  This takes
307  * effect immediately.
308  * (Switching on quota accounting must be done at mount time.)
309  */
310 int
311 xfs_qm_scall_quotaon(
312 	xfs_mount_t	*mp,
313 	uint		flags)
314 {
315 	int		error;
316 	uint		qf;
317 	__int64_t	sbflags;
318 
319 	flags &= (XFS_ALL_QUOTA_ACCT | XFS_ALL_QUOTA_ENFD);
320 	/*
321 	 * Switching on quota accounting must be done at mount time.
322 	 */
323 	flags &= ~(XFS_ALL_QUOTA_ACCT);
324 
325 	sbflags = 0;
326 
327 	if (flags == 0) {
328 		xfs_debug(mp, "%s: zero flags, m_qflags=%x\n",
329 			__func__, mp->m_qflags);
330 		return XFS_ERROR(EINVAL);
331 	}
332 
333 	/* No fs can turn on quotas with a delayed effect */
334 	ASSERT((flags & XFS_ALL_QUOTA_ACCT) == 0);
335 
336 	/*
337 	 * Can't enforce without accounting. We check the superblock
338 	 * qflags here instead of m_qflags because rootfs can have
339 	 * quota acct on ondisk without m_qflags' knowing.
340 	 */
341 	if (((flags & XFS_UQUOTA_ACCT) == 0 &&
342 	     (mp->m_sb.sb_qflags & XFS_UQUOTA_ACCT) == 0 &&
343 	     (flags & XFS_UQUOTA_ENFD)) ||
344 	    ((flags & XFS_GQUOTA_ACCT) == 0 &&
345 	     (mp->m_sb.sb_qflags & XFS_GQUOTA_ACCT) == 0 &&
346 	     (flags & XFS_GQUOTA_ENFD)) ||
347 	    ((flags & XFS_PQUOTA_ACCT) == 0 &&
348 	     (mp->m_sb.sb_qflags & XFS_PQUOTA_ACCT) == 0 &&
349 	     (flags & XFS_PQUOTA_ENFD))) {
350 		xfs_debug(mp,
351 			"%s: Can't enforce without acct, flags=%x sbflags=%x\n",
352 			__func__, flags, mp->m_sb.sb_qflags);
353 		return XFS_ERROR(EINVAL);
354 	}
355 	/*
356 	 * If everything's up to-date incore, then don't waste time.
357 	 */
358 	if ((mp->m_qflags & flags) == flags)
359 		return XFS_ERROR(EEXIST);
360 
361 	/*
362 	 * Change sb_qflags on disk but not incore mp->qflags
363 	 * if this is the root filesystem.
364 	 */
365 	spin_lock(&mp->m_sb_lock);
366 	qf = mp->m_sb.sb_qflags;
367 	mp->m_sb.sb_qflags = qf | flags;
368 	spin_unlock(&mp->m_sb_lock);
369 
370 	/*
371 	 * There's nothing to change if it's the same.
372 	 */
373 	if ((qf & flags) == flags && sbflags == 0)
374 		return XFS_ERROR(EEXIST);
375 	sbflags |= XFS_SB_QFLAGS;
376 
377 	if ((error = xfs_qm_write_sb_changes(mp, sbflags)))
378 		return (error);
379 	/*
380 	 * If we aren't trying to switch on quota enforcement, we are done.
381 	 */
382 	if  (((mp->m_sb.sb_qflags & XFS_UQUOTA_ACCT) !=
383 	     (mp->m_qflags & XFS_UQUOTA_ACCT)) ||
384 	     ((mp->m_sb.sb_qflags & XFS_PQUOTA_ACCT) !=
385 	     (mp->m_qflags & XFS_PQUOTA_ACCT)) ||
386 	     ((mp->m_sb.sb_qflags & XFS_GQUOTA_ACCT) !=
387 	     (mp->m_qflags & XFS_GQUOTA_ACCT)) ||
388 	    (flags & XFS_ALL_QUOTA_ENFD) == 0)
389 		return (0);
390 
391 	if (! XFS_IS_QUOTA_RUNNING(mp))
392 		return XFS_ERROR(ESRCH);
393 
394 	/*
395 	 * Switch on quota enforcement in core.
396 	 */
397 	mutex_lock(&mp->m_quotainfo->qi_quotaofflock);
398 	mp->m_qflags |= (flags & XFS_ALL_QUOTA_ENFD);
399 	mutex_unlock(&mp->m_quotainfo->qi_quotaofflock);
400 
401 	return (0);
402 }
403 
404 
405 /*
406  * Return quota status information, such as uquota-off, enforcements, etc.
407  * for Q_XGETQSTAT command.
408  */
409 int
410 xfs_qm_scall_getqstat(
411 	struct xfs_mount	*mp,
412 	struct fs_quota_stat	*out)
413 {
414 	struct xfs_quotainfo	*q = mp->m_quotainfo;
415 	struct xfs_inode	*uip = NULL;
416 	struct xfs_inode	*gip = NULL;
417 	struct xfs_inode	*pip = NULL;
418 	bool                    tempuqip = false;
419 	bool                    tempgqip = false;
420 	bool                    temppqip = false;
421 
422 	memset(out, 0, sizeof(fs_quota_stat_t));
423 
424 	out->qs_version = FS_QSTAT_VERSION;
425 	if (!xfs_sb_version_hasquota(&mp->m_sb)) {
426 		out->qs_uquota.qfs_ino = NULLFSINO;
427 		out->qs_gquota.qfs_ino = NULLFSINO;
428 		return (0);
429 	}
430 
431 	out->qs_flags = (__uint16_t) xfs_qm_export_flags(mp->m_qflags &
432 							(XFS_ALL_QUOTA_ACCT|
433 							 XFS_ALL_QUOTA_ENFD));
434 	if (q) {
435 		uip = q->qi_uquotaip;
436 		gip = q->qi_gquotaip;
437 		pip = q->qi_pquotaip;
438 	}
439 	if (!uip && mp->m_sb.sb_uquotino != NULLFSINO) {
440 		if (xfs_iget(mp, NULL, mp->m_sb.sb_uquotino,
441 					0, 0, &uip) == 0)
442 			tempuqip = true;
443 	}
444 	if (!gip && mp->m_sb.sb_gquotino != NULLFSINO) {
445 		if (xfs_iget(mp, NULL, mp->m_sb.sb_gquotino,
446 					0, 0, &gip) == 0)
447 			tempgqip = true;
448 	}
449 	/*
450 	 * Q_XGETQSTAT doesn't have room for both group and project quotas.
451 	 * So, allow the project quota values to be copied out only if
452 	 * there is no group quota information available.
453 	 */
454 	if (!gip) {
455 		if (!pip && mp->m_sb.sb_pquotino != NULLFSINO) {
456 			if (xfs_iget(mp, NULL, mp->m_sb.sb_pquotino,
457 						0, 0, &pip) == 0)
458 				temppqip = true;
459 		}
460 	} else
461 		pip = NULL;
462 	if (uip) {
463 		out->qs_uquota.qfs_ino = mp->m_sb.sb_uquotino;
464 		out->qs_uquota.qfs_nblks = uip->i_d.di_nblocks;
465 		out->qs_uquota.qfs_nextents = uip->i_d.di_nextents;
466 		if (tempuqip)
467 			IRELE(uip);
468 	}
469 
470 	if (gip) {
471 		out->qs_gquota.qfs_ino = mp->m_sb.sb_gquotino;
472 		out->qs_gquota.qfs_nblks = gip->i_d.di_nblocks;
473 		out->qs_gquota.qfs_nextents = gip->i_d.di_nextents;
474 		if (tempgqip)
475 			IRELE(gip);
476 	}
477 	if (pip) {
478 		out->qs_gquota.qfs_ino = mp->m_sb.sb_gquotino;
479 		out->qs_gquota.qfs_nblks = pip->i_d.di_nblocks;
480 		out->qs_gquota.qfs_nextents = pip->i_d.di_nextents;
481 		if (temppqip)
482 			IRELE(pip);
483 	}
484 	if (q) {
485 		out->qs_incoredqs = q->qi_dquots;
486 		out->qs_btimelimit = q->qi_btimelimit;
487 		out->qs_itimelimit = q->qi_itimelimit;
488 		out->qs_rtbtimelimit = q->qi_rtbtimelimit;
489 		out->qs_bwarnlimit = q->qi_bwarnlimit;
490 		out->qs_iwarnlimit = q->qi_iwarnlimit;
491 	}
492 	return 0;
493 }
494 
495 /*
496  * Return quota status information, such as uquota-off, enforcements, etc.
497  * for Q_XGETQSTATV command, to support separate project quota field.
498  */
499 int
500 xfs_qm_scall_getqstatv(
501 	struct xfs_mount	*mp,
502 	struct fs_quota_statv	*out)
503 {
504 	struct xfs_quotainfo	*q = mp->m_quotainfo;
505 	struct xfs_inode	*uip = NULL;
506 	struct xfs_inode	*gip = NULL;
507 	struct xfs_inode	*pip = NULL;
508 	bool                    tempuqip = false;
509 	bool                    tempgqip = false;
510 	bool                    temppqip = false;
511 
512 	if (!xfs_sb_version_hasquota(&mp->m_sb)) {
513 		out->qs_uquota.qfs_ino = NULLFSINO;
514 		out->qs_gquota.qfs_ino = NULLFSINO;
515 		out->qs_pquota.qfs_ino = NULLFSINO;
516 		return (0);
517 	}
518 
519 	out->qs_flags = (__uint16_t) xfs_qm_export_flags(mp->m_qflags &
520 							(XFS_ALL_QUOTA_ACCT|
521 							 XFS_ALL_QUOTA_ENFD));
522 	out->qs_uquota.qfs_ino = mp->m_sb.sb_uquotino;
523 	out->qs_gquota.qfs_ino = mp->m_sb.sb_gquotino;
524 	out->qs_pquota.qfs_ino = mp->m_sb.sb_pquotino;
525 
526 	if (q) {
527 		uip = q->qi_uquotaip;
528 		gip = q->qi_gquotaip;
529 		pip = q->qi_pquotaip;
530 	}
531 	if (!uip && mp->m_sb.sb_uquotino != NULLFSINO) {
532 		if (xfs_iget(mp, NULL, mp->m_sb.sb_uquotino,
533 					0, 0, &uip) == 0)
534 			tempuqip = true;
535 	}
536 	if (!gip && mp->m_sb.sb_gquotino != NULLFSINO) {
537 		if (xfs_iget(mp, NULL, mp->m_sb.sb_gquotino,
538 					0, 0, &gip) == 0)
539 			tempgqip = true;
540 	}
541 	if (!pip && mp->m_sb.sb_pquotino != NULLFSINO) {
542 		if (xfs_iget(mp, NULL, mp->m_sb.sb_pquotino,
543 					0, 0, &pip) == 0)
544 			temppqip = true;
545 	}
546 	if (uip) {
547 		out->qs_uquota.qfs_nblks = uip->i_d.di_nblocks;
548 		out->qs_uquota.qfs_nextents = uip->i_d.di_nextents;
549 		if (tempuqip)
550 			IRELE(uip);
551 	}
552 
553 	if (gip) {
554 		out->qs_gquota.qfs_nblks = gip->i_d.di_nblocks;
555 		out->qs_gquota.qfs_nextents = gip->i_d.di_nextents;
556 		if (tempgqip)
557 			IRELE(gip);
558 	}
559 	if (pip) {
560 		out->qs_pquota.qfs_nblks = pip->i_d.di_nblocks;
561 		out->qs_pquota.qfs_nextents = pip->i_d.di_nextents;
562 		if (temppqip)
563 			IRELE(pip);
564 	}
565 	if (q) {
566 		out->qs_incoredqs = q->qi_dquots;
567 		out->qs_btimelimit = q->qi_btimelimit;
568 		out->qs_itimelimit = q->qi_itimelimit;
569 		out->qs_rtbtimelimit = q->qi_rtbtimelimit;
570 		out->qs_bwarnlimit = q->qi_bwarnlimit;
571 		out->qs_iwarnlimit = q->qi_iwarnlimit;
572 	}
573 	return 0;
574 }
575 
576 #define XFS_DQ_MASK \
577 	(FS_DQ_LIMIT_MASK | FS_DQ_TIMER_MASK | FS_DQ_WARNS_MASK)
578 
579 /*
580  * Adjust quota limits, and start/stop timers accordingly.
581  */
582 int
583 xfs_qm_scall_setqlim(
584 	struct xfs_mount	*mp,
585 	xfs_dqid_t		id,
586 	uint			type,
587 	fs_disk_quota_t		*newlim)
588 {
589 	struct xfs_quotainfo	*q = mp->m_quotainfo;
590 	struct xfs_disk_dquot	*ddq;
591 	struct xfs_dquot	*dqp;
592 	struct xfs_trans	*tp;
593 	int			error;
594 	xfs_qcnt_t		hard, soft;
595 
596 	if (newlim->d_fieldmask & ~XFS_DQ_MASK)
597 		return EINVAL;
598 	if ((newlim->d_fieldmask & XFS_DQ_MASK) == 0)
599 		return 0;
600 
601 	/*
602 	 * We don't want to race with a quotaoff so take the quotaoff lock.
603 	 * We don't hold an inode lock, so there's nothing else to stop
604 	 * a quotaoff from happening.
605 	 */
606 	mutex_lock(&q->qi_quotaofflock);
607 
608 	/*
609 	 * Get the dquot (locked) before we start, as we need to do a
610 	 * transaction to allocate it if it doesn't exist. Once we have the
611 	 * dquot, unlock it so we can start the next transaction safely. We hold
612 	 * a reference to the dquot, so it's safe to do this unlock/lock without
613 	 * it being reclaimed in the mean time.
614 	 */
615 	error = xfs_qm_dqget(mp, NULL, id, type, XFS_QMOPT_DQALLOC, &dqp);
616 	if (error) {
617 		ASSERT(error != ENOENT);
618 		goto out_unlock;
619 	}
620 	xfs_dqunlock(dqp);
621 
622 	tp = xfs_trans_alloc(mp, XFS_TRANS_QM_SETQLIM);
623 	error = xfs_trans_reserve(tp, &M_RES(mp)->tr_qm_setqlim, 0, 0);
624 	if (error) {
625 		xfs_trans_cancel(tp, 0);
626 		goto out_rele;
627 	}
628 
629 	xfs_dqlock(dqp);
630 	xfs_trans_dqjoin(tp, dqp);
631 	ddq = &dqp->q_core;
632 
633 	/*
634 	 * Make sure that hardlimits are >= soft limits before changing.
635 	 */
636 	hard = (newlim->d_fieldmask & FS_DQ_BHARD) ?
637 		(xfs_qcnt_t) XFS_BB_TO_FSB(mp, newlim->d_blk_hardlimit) :
638 			be64_to_cpu(ddq->d_blk_hardlimit);
639 	soft = (newlim->d_fieldmask & FS_DQ_BSOFT) ?
640 		(xfs_qcnt_t) XFS_BB_TO_FSB(mp, newlim->d_blk_softlimit) :
641 			be64_to_cpu(ddq->d_blk_softlimit);
642 	if (hard == 0 || hard >= soft) {
643 		ddq->d_blk_hardlimit = cpu_to_be64(hard);
644 		ddq->d_blk_softlimit = cpu_to_be64(soft);
645 		xfs_dquot_set_prealloc_limits(dqp);
646 		if (id == 0) {
647 			q->qi_bhardlimit = hard;
648 			q->qi_bsoftlimit = soft;
649 		}
650 	} else {
651 		xfs_debug(mp, "blkhard %Ld < blksoft %Ld\n", hard, soft);
652 	}
653 	hard = (newlim->d_fieldmask & FS_DQ_RTBHARD) ?
654 		(xfs_qcnt_t) XFS_BB_TO_FSB(mp, newlim->d_rtb_hardlimit) :
655 			be64_to_cpu(ddq->d_rtb_hardlimit);
656 	soft = (newlim->d_fieldmask & FS_DQ_RTBSOFT) ?
657 		(xfs_qcnt_t) XFS_BB_TO_FSB(mp, newlim->d_rtb_softlimit) :
658 			be64_to_cpu(ddq->d_rtb_softlimit);
659 	if (hard == 0 || hard >= soft) {
660 		ddq->d_rtb_hardlimit = cpu_to_be64(hard);
661 		ddq->d_rtb_softlimit = cpu_to_be64(soft);
662 		if (id == 0) {
663 			q->qi_rtbhardlimit = hard;
664 			q->qi_rtbsoftlimit = soft;
665 		}
666 	} else {
667 		xfs_debug(mp, "rtbhard %Ld < rtbsoft %Ld\n", hard, soft);
668 	}
669 
670 	hard = (newlim->d_fieldmask & FS_DQ_IHARD) ?
671 		(xfs_qcnt_t) newlim->d_ino_hardlimit :
672 			be64_to_cpu(ddq->d_ino_hardlimit);
673 	soft = (newlim->d_fieldmask & FS_DQ_ISOFT) ?
674 		(xfs_qcnt_t) newlim->d_ino_softlimit :
675 			be64_to_cpu(ddq->d_ino_softlimit);
676 	if (hard == 0 || hard >= soft) {
677 		ddq->d_ino_hardlimit = cpu_to_be64(hard);
678 		ddq->d_ino_softlimit = cpu_to_be64(soft);
679 		if (id == 0) {
680 			q->qi_ihardlimit = hard;
681 			q->qi_isoftlimit = soft;
682 		}
683 	} else {
684 		xfs_debug(mp, "ihard %Ld < isoft %Ld\n", hard, soft);
685 	}
686 
687 	/*
688 	 * Update warnings counter(s) if requested
689 	 */
690 	if (newlim->d_fieldmask & FS_DQ_BWARNS)
691 		ddq->d_bwarns = cpu_to_be16(newlim->d_bwarns);
692 	if (newlim->d_fieldmask & FS_DQ_IWARNS)
693 		ddq->d_iwarns = cpu_to_be16(newlim->d_iwarns);
694 	if (newlim->d_fieldmask & FS_DQ_RTBWARNS)
695 		ddq->d_rtbwarns = cpu_to_be16(newlim->d_rtbwarns);
696 
697 	if (id == 0) {
698 		/*
699 		 * Timelimits for the super user set the relative time
700 		 * the other users can be over quota for this file system.
701 		 * If it is zero a default is used.  Ditto for the default
702 		 * soft and hard limit values (already done, above), and
703 		 * for warnings.
704 		 */
705 		if (newlim->d_fieldmask & FS_DQ_BTIMER) {
706 			q->qi_btimelimit = newlim->d_btimer;
707 			ddq->d_btimer = cpu_to_be32(newlim->d_btimer);
708 		}
709 		if (newlim->d_fieldmask & FS_DQ_ITIMER) {
710 			q->qi_itimelimit = newlim->d_itimer;
711 			ddq->d_itimer = cpu_to_be32(newlim->d_itimer);
712 		}
713 		if (newlim->d_fieldmask & FS_DQ_RTBTIMER) {
714 			q->qi_rtbtimelimit = newlim->d_rtbtimer;
715 			ddq->d_rtbtimer = cpu_to_be32(newlim->d_rtbtimer);
716 		}
717 		if (newlim->d_fieldmask & FS_DQ_BWARNS)
718 			q->qi_bwarnlimit = newlim->d_bwarns;
719 		if (newlim->d_fieldmask & FS_DQ_IWARNS)
720 			q->qi_iwarnlimit = newlim->d_iwarns;
721 		if (newlim->d_fieldmask & FS_DQ_RTBWARNS)
722 			q->qi_rtbwarnlimit = newlim->d_rtbwarns;
723 	} else {
724 		/*
725 		 * If the user is now over quota, start the timelimit.
726 		 * The user will not be 'warned'.
727 		 * Note that we keep the timers ticking, whether enforcement
728 		 * is on or off. We don't really want to bother with iterating
729 		 * over all ondisk dquots and turning the timers on/off.
730 		 */
731 		xfs_qm_adjust_dqtimers(mp, ddq);
732 	}
733 	dqp->dq_flags |= XFS_DQ_DIRTY;
734 	xfs_trans_log_dquot(tp, dqp);
735 
736 	error = xfs_trans_commit(tp, 0);
737 
738 out_rele:
739 	xfs_qm_dqrele(dqp);
740 out_unlock:
741 	mutex_unlock(&q->qi_quotaofflock);
742 	return error;
743 }
744 
745 STATIC int
746 xfs_qm_log_quotaoff_end(
747 	xfs_mount_t		*mp,
748 	xfs_qoff_logitem_t	*startqoff,
749 	uint			flags)
750 {
751 	xfs_trans_t		*tp;
752 	int			error;
753 	xfs_qoff_logitem_t	*qoffi;
754 
755 	tp = xfs_trans_alloc(mp, XFS_TRANS_QM_QUOTAOFF_END);
756 
757 	error = xfs_trans_reserve(tp, &M_RES(mp)->tr_qm_equotaoff, 0, 0);
758 	if (error) {
759 		xfs_trans_cancel(tp, 0);
760 		return (error);
761 	}
762 
763 	qoffi = xfs_trans_get_qoff_item(tp, startqoff,
764 					flags & XFS_ALL_QUOTA_ACCT);
765 	xfs_trans_log_quotaoff_item(tp, qoffi);
766 
767 	/*
768 	 * We have to make sure that the transaction is secure on disk before we
769 	 * return and actually stop quota accounting. So, make it synchronous.
770 	 * We don't care about quotoff's performance.
771 	 */
772 	xfs_trans_set_sync(tp);
773 	error = xfs_trans_commit(tp, 0);
774 	return (error);
775 }
776 
777 
778 STATIC int
779 xfs_qm_log_quotaoff(
780 	xfs_mount_t	       *mp,
781 	xfs_qoff_logitem_t     **qoffstartp,
782 	uint		       flags)
783 {
784 	xfs_trans_t	       *tp;
785 	int			error;
786 	xfs_qoff_logitem_t     *qoffi=NULL;
787 	uint			oldsbqflag=0;
788 
789 	tp = xfs_trans_alloc(mp, XFS_TRANS_QM_QUOTAOFF);
790 	error = xfs_trans_reserve(tp, &M_RES(mp)->tr_qm_quotaoff, 0, 0);
791 	if (error)
792 		goto error0;
793 
794 	qoffi = xfs_trans_get_qoff_item(tp, NULL, flags & XFS_ALL_QUOTA_ACCT);
795 	xfs_trans_log_quotaoff_item(tp, qoffi);
796 
797 	spin_lock(&mp->m_sb_lock);
798 	oldsbqflag = mp->m_sb.sb_qflags;
799 	mp->m_sb.sb_qflags = (mp->m_qflags & ~(flags)) & XFS_MOUNT_QUOTA_ALL;
800 	spin_unlock(&mp->m_sb_lock);
801 
802 	xfs_mod_sb(tp, XFS_SB_QFLAGS);
803 
804 	/*
805 	 * We have to make sure that the transaction is secure on disk before we
806 	 * return and actually stop quota accounting. So, make it synchronous.
807 	 * We don't care about quotoff's performance.
808 	 */
809 	xfs_trans_set_sync(tp);
810 	error = xfs_trans_commit(tp, 0);
811 
812 error0:
813 	if (error) {
814 		xfs_trans_cancel(tp, 0);
815 		/*
816 		 * No one else is modifying sb_qflags, so this is OK.
817 		 * We still hold the quotaofflock.
818 		 */
819 		spin_lock(&mp->m_sb_lock);
820 		mp->m_sb.sb_qflags = oldsbqflag;
821 		spin_unlock(&mp->m_sb_lock);
822 	}
823 	*qoffstartp = qoffi;
824 	return (error);
825 }
826 
827 
828 int
829 xfs_qm_scall_getquota(
830 	struct xfs_mount	*mp,
831 	xfs_dqid_t		id,
832 	uint			type,
833 	struct fs_disk_quota	*dst)
834 {
835 	struct xfs_dquot	*dqp;
836 	int			error;
837 
838 	/*
839 	 * Try to get the dquot. We don't want it allocated on disk, so
840 	 * we aren't passing the XFS_QMOPT_DOALLOC flag. If it doesn't
841 	 * exist, we'll get ENOENT back.
842 	 */
843 	error = xfs_qm_dqget(mp, NULL, id, type, 0, &dqp);
844 	if (error)
845 		return error;
846 
847 	/*
848 	 * If everything's NULL, this dquot doesn't quite exist as far as
849 	 * our utility programs are concerned.
850 	 */
851 	if (XFS_IS_DQUOT_UNINITIALIZED(dqp)) {
852 		error = XFS_ERROR(ENOENT);
853 		goto out_put;
854 	}
855 
856 	memset(dst, 0, sizeof(*dst));
857 	dst->d_version = FS_DQUOT_VERSION;
858 	dst->d_flags = xfs_qm_export_qtype_flags(dqp->q_core.d_flags);
859 	dst->d_id = be32_to_cpu(dqp->q_core.d_id);
860 	dst->d_blk_hardlimit =
861 		XFS_FSB_TO_BB(mp, be64_to_cpu(dqp->q_core.d_blk_hardlimit));
862 	dst->d_blk_softlimit =
863 		XFS_FSB_TO_BB(mp, be64_to_cpu(dqp->q_core.d_blk_softlimit));
864 	dst->d_ino_hardlimit = be64_to_cpu(dqp->q_core.d_ino_hardlimit);
865 	dst->d_ino_softlimit = be64_to_cpu(dqp->q_core.d_ino_softlimit);
866 	dst->d_bcount = XFS_FSB_TO_BB(mp, dqp->q_res_bcount);
867 	dst->d_icount = dqp->q_res_icount;
868 	dst->d_btimer = be32_to_cpu(dqp->q_core.d_btimer);
869 	dst->d_itimer = be32_to_cpu(dqp->q_core.d_itimer);
870 	dst->d_iwarns = be16_to_cpu(dqp->q_core.d_iwarns);
871 	dst->d_bwarns = be16_to_cpu(dqp->q_core.d_bwarns);
872 	dst->d_rtb_hardlimit =
873 		XFS_FSB_TO_BB(mp, be64_to_cpu(dqp->q_core.d_rtb_hardlimit));
874 	dst->d_rtb_softlimit =
875 		XFS_FSB_TO_BB(mp, be64_to_cpu(dqp->q_core.d_rtb_softlimit));
876 	dst->d_rtbcount = XFS_FSB_TO_BB(mp, dqp->q_res_rtbcount);
877 	dst->d_rtbtimer = be32_to_cpu(dqp->q_core.d_rtbtimer);
878 	dst->d_rtbwarns = be16_to_cpu(dqp->q_core.d_rtbwarns);
879 
880 	/*
881 	 * Internally, we don't reset all the timers when quota enforcement
882 	 * gets turned off. No need to confuse the user level code,
883 	 * so return zeroes in that case.
884 	 */
885 	if ((!XFS_IS_UQUOTA_ENFORCED(mp) &&
886 	     dqp->q_core.d_flags == XFS_DQ_USER) ||
887 	    (!XFS_IS_GQUOTA_ENFORCED(mp) &&
888 	     dqp->q_core.d_flags == XFS_DQ_GROUP) ||
889 	    (!XFS_IS_PQUOTA_ENFORCED(mp) &&
890 	     dqp->q_core.d_flags == XFS_DQ_PROJ)) {
891 		dst->d_btimer = 0;
892 		dst->d_itimer = 0;
893 		dst->d_rtbtimer = 0;
894 	}
895 
896 #ifdef DEBUG
897 	if (((XFS_IS_UQUOTA_ENFORCED(mp) && dst->d_flags == FS_USER_QUOTA) ||
898 	     (XFS_IS_GQUOTA_ENFORCED(mp) && dst->d_flags == FS_GROUP_QUOTA) ||
899 	     (XFS_IS_PQUOTA_ENFORCED(mp) && dst->d_flags == FS_PROJ_QUOTA)) &&
900 	    dst->d_id != 0) {
901 		if ((dst->d_bcount > dst->d_blk_softlimit) &&
902 		    (dst->d_blk_softlimit > 0)) {
903 			ASSERT(dst->d_btimer != 0);
904 		}
905 		if ((dst->d_icount > dst->d_ino_softlimit) &&
906 		    (dst->d_ino_softlimit > 0)) {
907 			ASSERT(dst->d_itimer != 0);
908 		}
909 	}
910 #endif
911 out_put:
912 	xfs_qm_dqput(dqp);
913 	return error;
914 }
915 
916 STATIC uint
917 xfs_qm_export_qtype_flags(
918 	uint flags)
919 {
920 	/*
921 	 * Can't be more than one, or none.
922 	 */
923 	ASSERT((flags & (FS_PROJ_QUOTA | FS_USER_QUOTA)) !=
924 		(FS_PROJ_QUOTA | FS_USER_QUOTA));
925 	ASSERT((flags & (FS_PROJ_QUOTA | FS_GROUP_QUOTA)) !=
926 		(FS_PROJ_QUOTA | FS_GROUP_QUOTA));
927 	ASSERT((flags & (FS_USER_QUOTA | FS_GROUP_QUOTA)) !=
928 		(FS_USER_QUOTA | FS_GROUP_QUOTA));
929 	ASSERT((flags & (FS_PROJ_QUOTA|FS_USER_QUOTA|FS_GROUP_QUOTA)) != 0);
930 
931 	return (flags & XFS_DQ_USER) ?
932 		FS_USER_QUOTA : (flags & XFS_DQ_PROJ) ?
933 			FS_PROJ_QUOTA : FS_GROUP_QUOTA;
934 }
935 
936 STATIC uint
937 xfs_qm_export_flags(
938 	uint flags)
939 {
940 	uint uflags;
941 
942 	uflags = 0;
943 	if (flags & XFS_UQUOTA_ACCT)
944 		uflags |= FS_QUOTA_UDQ_ACCT;
945 	if (flags & XFS_GQUOTA_ACCT)
946 		uflags |= FS_QUOTA_GDQ_ACCT;
947 	if (flags & XFS_PQUOTA_ACCT)
948 		uflags |= FS_QUOTA_PDQ_ACCT;
949 	if (flags & XFS_UQUOTA_ENFD)
950 		uflags |= FS_QUOTA_UDQ_ENFD;
951 	if (flags & XFS_GQUOTA_ENFD)
952 		uflags |= FS_QUOTA_GDQ_ENFD;
953 	if (flags & XFS_PQUOTA_ENFD)
954 		uflags |= FS_QUOTA_PDQ_ENFD;
955 	return (uflags);
956 }
957 
958 
959 STATIC int
960 xfs_dqrele_inode(
961 	struct xfs_inode	*ip,
962 	struct xfs_perag	*pag,
963 	int			flags,
964 	void			*args)
965 {
966 	/* skip quota inodes */
967 	if (ip == ip->i_mount->m_quotainfo->qi_uquotaip ||
968 	    ip == ip->i_mount->m_quotainfo->qi_gquotaip ||
969 	    ip == ip->i_mount->m_quotainfo->qi_pquotaip) {
970 		ASSERT(ip->i_udquot == NULL);
971 		ASSERT(ip->i_gdquot == NULL);
972 		ASSERT(ip->i_pdquot == NULL);
973 		return 0;
974 	}
975 
976 	xfs_ilock(ip, XFS_ILOCK_EXCL);
977 	if ((flags & XFS_UQUOTA_ACCT) && ip->i_udquot) {
978 		xfs_qm_dqrele(ip->i_udquot);
979 		ip->i_udquot = NULL;
980 	}
981 	if ((flags & XFS_GQUOTA_ACCT) && ip->i_gdquot) {
982 		xfs_qm_dqrele(ip->i_gdquot);
983 		ip->i_gdquot = NULL;
984 	}
985 	if ((flags & XFS_PQUOTA_ACCT) && ip->i_pdquot) {
986 		xfs_qm_dqrele(ip->i_pdquot);
987 		ip->i_pdquot = NULL;
988 	}
989 	xfs_iunlock(ip, XFS_ILOCK_EXCL);
990 	return 0;
991 }
992 
993 
994 /*
995  * Go thru all the inodes in the file system, releasing their dquots.
996  *
997  * Note that the mount structure gets modified to indicate that quotas are off
998  * AFTER this, in the case of quotaoff.
999  */
1000 void
1001 xfs_qm_dqrele_all_inodes(
1002 	struct xfs_mount *mp,
1003 	uint		 flags)
1004 {
1005 	ASSERT(mp->m_quotainfo);
1006 	xfs_inode_ag_iterator(mp, xfs_dqrele_inode, flags, NULL);
1007 }
1008