xref: /openbmc/linux/fs/xfs/xfs_qm_syscalls.c (revision 9d749629)
1 /*
2  * Copyright (c) 2000-2005 Silicon Graphics, Inc.
3  * All Rights Reserved.
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU General Public License as
7  * published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it would be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write the Free Software Foundation,
16  * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
17  */
18 
19 #include <linux/capability.h>
20 
21 #include "xfs.h"
22 #include "xfs_fs.h"
23 #include "xfs_bit.h"
24 #include "xfs_log.h"
25 #include "xfs_trans.h"
26 #include "xfs_sb.h"
27 #include "xfs_ag.h"
28 #include "xfs_alloc.h"
29 #include "xfs_quota.h"
30 #include "xfs_mount.h"
31 #include "xfs_bmap_btree.h"
32 #include "xfs_inode.h"
33 #include "xfs_inode_item.h"
34 #include "xfs_itable.h"
35 #include "xfs_bmap.h"
36 #include "xfs_rtalloc.h"
37 #include "xfs_error.h"
38 #include "xfs_attr.h"
39 #include "xfs_buf_item.h"
40 #include "xfs_utils.h"
41 #include "xfs_qm.h"
42 #include "xfs_trace.h"
43 #include "xfs_icache.h"
44 
45 STATIC int	xfs_qm_log_quotaoff(xfs_mount_t *, xfs_qoff_logitem_t **, uint);
46 STATIC int	xfs_qm_log_quotaoff_end(xfs_mount_t *, xfs_qoff_logitem_t *,
47 					uint);
48 STATIC uint	xfs_qm_export_flags(uint);
49 STATIC uint	xfs_qm_export_qtype_flags(uint);
50 
51 /*
52  * Turn off quota accounting and/or enforcement for all udquots and/or
53  * gdquots. Called only at unmount time.
54  *
55  * This assumes that there are no dquots of this file system cached
56  * incore, and modifies the ondisk dquot directly. Therefore, for example,
57  * it is an error to call this twice, without purging the cache.
58  */
59 int
60 xfs_qm_scall_quotaoff(
61 	xfs_mount_t		*mp,
62 	uint			flags)
63 {
64 	struct xfs_quotainfo	*q = mp->m_quotainfo;
65 	uint			dqtype;
66 	int			error;
67 	uint			inactivate_flags;
68 	xfs_qoff_logitem_t	*qoffstart;
69 
70 	/*
71 	 * No file system can have quotas enabled on disk but not in core.
72 	 * Note that quota utilities (like quotaoff) _expect_
73 	 * errno == EEXIST here.
74 	 */
75 	if ((mp->m_qflags & flags) == 0)
76 		return XFS_ERROR(EEXIST);
77 	error = 0;
78 
79 	flags &= (XFS_ALL_QUOTA_ACCT | XFS_ALL_QUOTA_ENFD);
80 
81 	/*
82 	 * We don't want to deal with two quotaoffs messing up each other,
83 	 * so we're going to serialize it. quotaoff isn't exactly a performance
84 	 * critical thing.
85 	 * If quotaoff, then we must be dealing with the root filesystem.
86 	 */
87 	ASSERT(q);
88 	mutex_lock(&q->qi_quotaofflock);
89 
90 	/*
91 	 * If we're just turning off quota enforcement, change mp and go.
92 	 */
93 	if ((flags & XFS_ALL_QUOTA_ACCT) == 0) {
94 		mp->m_qflags &= ~(flags);
95 
96 		spin_lock(&mp->m_sb_lock);
97 		mp->m_sb.sb_qflags = mp->m_qflags;
98 		spin_unlock(&mp->m_sb_lock);
99 		mutex_unlock(&q->qi_quotaofflock);
100 
101 		/* XXX what to do if error ? Revert back to old vals incore ? */
102 		error = xfs_qm_write_sb_changes(mp, XFS_SB_QFLAGS);
103 		return (error);
104 	}
105 
106 	dqtype = 0;
107 	inactivate_flags = 0;
108 	/*
109 	 * If accounting is off, we must turn enforcement off, clear the
110 	 * quota 'CHKD' certificate to make it known that we have to
111 	 * do a quotacheck the next time this quota is turned on.
112 	 */
113 	if (flags & XFS_UQUOTA_ACCT) {
114 		dqtype |= XFS_QMOPT_UQUOTA;
115 		flags |= (XFS_UQUOTA_CHKD | XFS_UQUOTA_ENFD);
116 		inactivate_flags |= XFS_UQUOTA_ACTIVE;
117 	}
118 	if (flags & XFS_GQUOTA_ACCT) {
119 		dqtype |= XFS_QMOPT_GQUOTA;
120 		flags |= (XFS_OQUOTA_CHKD | XFS_OQUOTA_ENFD);
121 		inactivate_flags |= XFS_GQUOTA_ACTIVE;
122 	} else if (flags & XFS_PQUOTA_ACCT) {
123 		dqtype |= XFS_QMOPT_PQUOTA;
124 		flags |= (XFS_OQUOTA_CHKD | XFS_OQUOTA_ENFD);
125 		inactivate_flags |= XFS_PQUOTA_ACTIVE;
126 	}
127 
128 	/*
129 	 * Nothing to do?  Don't complain. This happens when we're just
130 	 * turning off quota enforcement.
131 	 */
132 	if ((mp->m_qflags & flags) == 0)
133 		goto out_unlock;
134 
135 	/*
136 	 * Write the LI_QUOTAOFF log record, and do SB changes atomically,
137 	 * and synchronously. If we fail to write, we should abort the
138 	 * operation as it cannot be recovered safely if we crash.
139 	 */
140 	error = xfs_qm_log_quotaoff(mp, &qoffstart, flags);
141 	if (error)
142 		goto out_unlock;
143 
144 	/*
145 	 * Next we clear the XFS_MOUNT_*DQ_ACTIVE bit(s) in the mount struct
146 	 * to take care of the race between dqget and quotaoff. We don't take
147 	 * any special locks to reset these bits. All processes need to check
148 	 * these bits *after* taking inode lock(s) to see if the particular
149 	 * quota type is in the process of being turned off. If *ACTIVE, it is
150 	 * guaranteed that all dquot structures and all quotainode ptrs will all
151 	 * stay valid as long as that inode is kept locked.
152 	 *
153 	 * There is no turning back after this.
154 	 */
155 	mp->m_qflags &= ~inactivate_flags;
156 
157 	/*
158 	 * Give back all the dquot reference(s) held by inodes.
159 	 * Here we go thru every single incore inode in this file system, and
160 	 * do a dqrele on the i_udquot/i_gdquot that it may have.
161 	 * Essentially, as long as somebody has an inode locked, this guarantees
162 	 * that quotas will not be turned off. This is handy because in a
163 	 * transaction once we lock the inode(s) and check for quotaon, we can
164 	 * depend on the quota inodes (and other things) being valid as long as
165 	 * we keep the lock(s).
166 	 */
167 	xfs_qm_dqrele_all_inodes(mp, flags);
168 
169 	/*
170 	 * Next we make the changes in the quota flag in the mount struct.
171 	 * This isn't protected by a particular lock directly, because we
172 	 * don't want to take a mrlock every time we depend on quotas being on.
173 	 */
174 	mp->m_qflags &= ~flags;
175 
176 	/*
177 	 * Go through all the dquots of this file system and purge them,
178 	 * according to what was turned off.
179 	 */
180 	xfs_qm_dqpurge_all(mp, dqtype);
181 
182 	/*
183 	 * Transactions that had started before ACTIVE state bit was cleared
184 	 * could have logged many dquots, so they'd have higher LSNs than
185 	 * the first QUOTAOFF log record does. If we happen to crash when
186 	 * the tail of the log has gone past the QUOTAOFF record, but
187 	 * before the last dquot modification, those dquots __will__
188 	 * recover, and that's not good.
189 	 *
190 	 * So, we have QUOTAOFF start and end logitems; the start
191 	 * logitem won't get overwritten until the end logitem appears...
192 	 */
193 	error = xfs_qm_log_quotaoff_end(mp, qoffstart, flags);
194 	if (error) {
195 		/* We're screwed now. Shutdown is the only option. */
196 		xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
197 		goto out_unlock;
198 	}
199 
200 	/*
201 	 * If quotas is completely disabled, close shop.
202 	 */
203 	if (((flags & XFS_MOUNT_QUOTA_ALL) == XFS_MOUNT_QUOTA_SET1) ||
204 	    ((flags & XFS_MOUNT_QUOTA_ALL) == XFS_MOUNT_QUOTA_SET2)) {
205 		mutex_unlock(&q->qi_quotaofflock);
206 		xfs_qm_destroy_quotainfo(mp);
207 		return (0);
208 	}
209 
210 	/*
211 	 * Release our quotainode references if we don't need them anymore.
212 	 */
213 	if ((dqtype & XFS_QMOPT_UQUOTA) && q->qi_uquotaip) {
214 		IRELE(q->qi_uquotaip);
215 		q->qi_uquotaip = NULL;
216 	}
217 	if ((dqtype & (XFS_QMOPT_GQUOTA|XFS_QMOPT_PQUOTA)) && q->qi_gquotaip) {
218 		IRELE(q->qi_gquotaip);
219 		q->qi_gquotaip = NULL;
220 	}
221 
222 out_unlock:
223 	mutex_unlock(&q->qi_quotaofflock);
224 	return error;
225 }
226 
227 STATIC int
228 xfs_qm_scall_trunc_qfile(
229 	struct xfs_mount	*mp,
230 	xfs_ino_t		ino)
231 {
232 	struct xfs_inode	*ip;
233 	struct xfs_trans	*tp;
234 	int			error;
235 
236 	if (ino == NULLFSINO)
237 		return 0;
238 
239 	error = xfs_iget(mp, NULL, ino, 0, 0, &ip);
240 	if (error)
241 		return error;
242 
243 	xfs_ilock(ip, XFS_IOLOCK_EXCL);
244 
245 	tp = xfs_trans_alloc(mp, XFS_TRANS_TRUNCATE_FILE);
246 	error = xfs_trans_reserve(tp, 0, XFS_ITRUNCATE_LOG_RES(mp), 0,
247 				  XFS_TRANS_PERM_LOG_RES,
248 				  XFS_ITRUNCATE_LOG_COUNT);
249 	if (error) {
250 		xfs_trans_cancel(tp, 0);
251 		xfs_iunlock(ip, XFS_IOLOCK_EXCL);
252 		goto out_put;
253 	}
254 
255 	xfs_ilock(ip, XFS_ILOCK_EXCL);
256 	xfs_trans_ijoin(tp, ip, 0);
257 
258 	ip->i_d.di_size = 0;
259 	xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
260 
261 	error = xfs_itruncate_extents(&tp, ip, XFS_DATA_FORK, 0);
262 	if (error) {
263 		xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES |
264 				     XFS_TRANS_ABORT);
265 		goto out_unlock;
266 	}
267 
268 	ASSERT(ip->i_d.di_nextents == 0);
269 
270 	xfs_trans_ichgtime(tp, ip, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
271 	error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES);
272 
273 out_unlock:
274 	xfs_iunlock(ip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL);
275 out_put:
276 	IRELE(ip);
277 	return error;
278 }
279 
280 int
281 xfs_qm_scall_trunc_qfiles(
282 	xfs_mount_t	*mp,
283 	uint		flags)
284 {
285 	int		error = 0, error2 = 0;
286 
287 	if (!xfs_sb_version_hasquota(&mp->m_sb) || flags == 0) {
288 		xfs_debug(mp, "%s: flags=%x m_qflags=%x\n",
289 			__func__, flags, mp->m_qflags);
290 		return XFS_ERROR(EINVAL);
291 	}
292 
293 	if (flags & XFS_DQ_USER)
294 		error = xfs_qm_scall_trunc_qfile(mp, mp->m_sb.sb_uquotino);
295 	if (flags & (XFS_DQ_GROUP|XFS_DQ_PROJ))
296 		error2 = xfs_qm_scall_trunc_qfile(mp, mp->m_sb.sb_gquotino);
297 
298 	return error ? error : error2;
299 }
300 
301 /*
302  * Switch on (a given) quota enforcement for a filesystem.  This takes
303  * effect immediately.
304  * (Switching on quota accounting must be done at mount time.)
305  */
306 int
307 xfs_qm_scall_quotaon(
308 	xfs_mount_t	*mp,
309 	uint		flags)
310 {
311 	int		error;
312 	uint		qf;
313 	__int64_t	sbflags;
314 
315 	flags &= (XFS_ALL_QUOTA_ACCT | XFS_ALL_QUOTA_ENFD);
316 	/*
317 	 * Switching on quota accounting must be done at mount time.
318 	 */
319 	flags &= ~(XFS_ALL_QUOTA_ACCT);
320 
321 	sbflags = 0;
322 
323 	if (flags == 0) {
324 		xfs_debug(mp, "%s: zero flags, m_qflags=%x\n",
325 			__func__, mp->m_qflags);
326 		return XFS_ERROR(EINVAL);
327 	}
328 
329 	/* No fs can turn on quotas with a delayed effect */
330 	ASSERT((flags & XFS_ALL_QUOTA_ACCT) == 0);
331 
332 	/*
333 	 * Can't enforce without accounting. We check the superblock
334 	 * qflags here instead of m_qflags because rootfs can have
335 	 * quota acct on ondisk without m_qflags' knowing.
336 	 */
337 	if (((flags & XFS_UQUOTA_ACCT) == 0 &&
338 	    (mp->m_sb.sb_qflags & XFS_UQUOTA_ACCT) == 0 &&
339 	    (flags & XFS_UQUOTA_ENFD))
340 	    ||
341 	    ((flags & XFS_PQUOTA_ACCT) == 0 &&
342 	    (mp->m_sb.sb_qflags & XFS_PQUOTA_ACCT) == 0 &&
343 	    (flags & XFS_GQUOTA_ACCT) == 0 &&
344 	    (mp->m_sb.sb_qflags & XFS_GQUOTA_ACCT) == 0 &&
345 	    (flags & XFS_OQUOTA_ENFD))) {
346 		xfs_debug(mp,
347 			"%s: Can't enforce without acct, flags=%x sbflags=%x\n",
348 			__func__, flags, mp->m_sb.sb_qflags);
349 		return XFS_ERROR(EINVAL);
350 	}
351 	/*
352 	 * If everything's up to-date incore, then don't waste time.
353 	 */
354 	if ((mp->m_qflags & flags) == flags)
355 		return XFS_ERROR(EEXIST);
356 
357 	/*
358 	 * Change sb_qflags on disk but not incore mp->qflags
359 	 * if this is the root filesystem.
360 	 */
361 	spin_lock(&mp->m_sb_lock);
362 	qf = mp->m_sb.sb_qflags;
363 	mp->m_sb.sb_qflags = qf | flags;
364 	spin_unlock(&mp->m_sb_lock);
365 
366 	/*
367 	 * There's nothing to change if it's the same.
368 	 */
369 	if ((qf & flags) == flags && sbflags == 0)
370 		return XFS_ERROR(EEXIST);
371 	sbflags |= XFS_SB_QFLAGS;
372 
373 	if ((error = xfs_qm_write_sb_changes(mp, sbflags)))
374 		return (error);
375 	/*
376 	 * If we aren't trying to switch on quota enforcement, we are done.
377 	 */
378 	if  (((mp->m_sb.sb_qflags & XFS_UQUOTA_ACCT) !=
379 	     (mp->m_qflags & XFS_UQUOTA_ACCT)) ||
380 	     ((mp->m_sb.sb_qflags & XFS_PQUOTA_ACCT) !=
381 	     (mp->m_qflags & XFS_PQUOTA_ACCT)) ||
382 	     ((mp->m_sb.sb_qflags & XFS_GQUOTA_ACCT) !=
383 	     (mp->m_qflags & XFS_GQUOTA_ACCT)) ||
384 	    (flags & XFS_ALL_QUOTA_ENFD) == 0)
385 		return (0);
386 
387 	if (! XFS_IS_QUOTA_RUNNING(mp))
388 		return XFS_ERROR(ESRCH);
389 
390 	/*
391 	 * Switch on quota enforcement in core.
392 	 */
393 	mutex_lock(&mp->m_quotainfo->qi_quotaofflock);
394 	mp->m_qflags |= (flags & XFS_ALL_QUOTA_ENFD);
395 	mutex_unlock(&mp->m_quotainfo->qi_quotaofflock);
396 
397 	return (0);
398 }
399 
400 
401 /*
402  * Return quota status information, such as uquota-off, enforcements, etc.
403  */
404 int
405 xfs_qm_scall_getqstat(
406 	struct xfs_mount	*mp,
407 	struct fs_quota_stat	*out)
408 {
409 	struct xfs_quotainfo	*q = mp->m_quotainfo;
410 	struct xfs_inode	*uip, *gip;
411 	bool                    tempuqip, tempgqip;
412 
413 	uip = gip = NULL;
414 	tempuqip = tempgqip = false;
415 	memset(out, 0, sizeof(fs_quota_stat_t));
416 
417 	out->qs_version = FS_QSTAT_VERSION;
418 	if (!xfs_sb_version_hasquota(&mp->m_sb)) {
419 		out->qs_uquota.qfs_ino = NULLFSINO;
420 		out->qs_gquota.qfs_ino = NULLFSINO;
421 		return (0);
422 	}
423 	out->qs_flags = (__uint16_t) xfs_qm_export_flags(mp->m_qflags &
424 							(XFS_ALL_QUOTA_ACCT|
425 							 XFS_ALL_QUOTA_ENFD));
426 	out->qs_pad = 0;
427 	out->qs_uquota.qfs_ino = mp->m_sb.sb_uquotino;
428 	out->qs_gquota.qfs_ino = mp->m_sb.sb_gquotino;
429 
430 	if (q) {
431 		uip = q->qi_uquotaip;
432 		gip = q->qi_gquotaip;
433 	}
434 	if (!uip && mp->m_sb.sb_uquotino != NULLFSINO) {
435 		if (xfs_iget(mp, NULL, mp->m_sb.sb_uquotino,
436 					0, 0, &uip) == 0)
437 			tempuqip = true;
438 	}
439 	if (!gip && mp->m_sb.sb_gquotino != NULLFSINO) {
440 		if (xfs_iget(mp, NULL, mp->m_sb.sb_gquotino,
441 					0, 0, &gip) == 0)
442 			tempgqip = true;
443 	}
444 	if (uip) {
445 		out->qs_uquota.qfs_nblks = uip->i_d.di_nblocks;
446 		out->qs_uquota.qfs_nextents = uip->i_d.di_nextents;
447 		if (tempuqip)
448 			IRELE(uip);
449 	}
450 	if (gip) {
451 		out->qs_gquota.qfs_nblks = gip->i_d.di_nblocks;
452 		out->qs_gquota.qfs_nextents = gip->i_d.di_nextents;
453 		if (tempgqip)
454 			IRELE(gip);
455 	}
456 	if (q) {
457 		out->qs_incoredqs = q->qi_dquots;
458 		out->qs_btimelimit = q->qi_btimelimit;
459 		out->qs_itimelimit = q->qi_itimelimit;
460 		out->qs_rtbtimelimit = q->qi_rtbtimelimit;
461 		out->qs_bwarnlimit = q->qi_bwarnlimit;
462 		out->qs_iwarnlimit = q->qi_iwarnlimit;
463 	}
464 	return 0;
465 }
466 
467 #define XFS_DQ_MASK \
468 	(FS_DQ_LIMIT_MASK | FS_DQ_TIMER_MASK | FS_DQ_WARNS_MASK)
469 
470 /*
471  * Adjust quota limits, and start/stop timers accordingly.
472  */
473 int
474 xfs_qm_scall_setqlim(
475 	xfs_mount_t		*mp,
476 	xfs_dqid_t		id,
477 	uint			type,
478 	fs_disk_quota_t		*newlim)
479 {
480 	struct xfs_quotainfo	*q = mp->m_quotainfo;
481 	xfs_disk_dquot_t	*ddq;
482 	xfs_dquot_t		*dqp;
483 	xfs_trans_t		*tp;
484 	int			error;
485 	xfs_qcnt_t		hard, soft;
486 
487 	if (newlim->d_fieldmask & ~XFS_DQ_MASK)
488 		return EINVAL;
489 	if ((newlim->d_fieldmask & XFS_DQ_MASK) == 0)
490 		return 0;
491 
492 	tp = xfs_trans_alloc(mp, XFS_TRANS_QM_SETQLIM);
493 	error = xfs_trans_reserve(tp, 0, XFS_QM_SETQLIM_LOG_RES(mp),
494 				  0, 0, XFS_DEFAULT_LOG_COUNT);
495 	if (error) {
496 		xfs_trans_cancel(tp, 0);
497 		return (error);
498 	}
499 
500 	/*
501 	 * We don't want to race with a quotaoff so take the quotaoff lock.
502 	 * (We don't hold an inode lock, so there's nothing else to stop
503 	 * a quotaoff from happening). (XXXThis doesn't currently happen
504 	 * because we take the vfslock before calling xfs_qm_sysent).
505 	 */
506 	mutex_lock(&q->qi_quotaofflock);
507 
508 	/*
509 	 * Get the dquot (locked), and join it to the transaction.
510 	 * Allocate the dquot if this doesn't exist.
511 	 */
512 	if ((error = xfs_qm_dqget(mp, NULL, id, type, XFS_QMOPT_DQALLOC, &dqp))) {
513 		xfs_trans_cancel(tp, XFS_TRANS_ABORT);
514 		ASSERT(error != ENOENT);
515 		goto out_unlock;
516 	}
517 	xfs_trans_dqjoin(tp, dqp);
518 	ddq = &dqp->q_core;
519 
520 	/*
521 	 * Make sure that hardlimits are >= soft limits before changing.
522 	 */
523 	hard = (newlim->d_fieldmask & FS_DQ_BHARD) ?
524 		(xfs_qcnt_t) XFS_BB_TO_FSB(mp, newlim->d_blk_hardlimit) :
525 			be64_to_cpu(ddq->d_blk_hardlimit);
526 	soft = (newlim->d_fieldmask & FS_DQ_BSOFT) ?
527 		(xfs_qcnt_t) XFS_BB_TO_FSB(mp, newlim->d_blk_softlimit) :
528 			be64_to_cpu(ddq->d_blk_softlimit);
529 	if (hard == 0 || hard >= soft) {
530 		ddq->d_blk_hardlimit = cpu_to_be64(hard);
531 		ddq->d_blk_softlimit = cpu_to_be64(soft);
532 		if (id == 0) {
533 			q->qi_bhardlimit = hard;
534 			q->qi_bsoftlimit = soft;
535 		}
536 	} else {
537 		xfs_debug(mp, "blkhard %Ld < blksoft %Ld\n", hard, soft);
538 	}
539 	hard = (newlim->d_fieldmask & FS_DQ_RTBHARD) ?
540 		(xfs_qcnt_t) XFS_BB_TO_FSB(mp, newlim->d_rtb_hardlimit) :
541 			be64_to_cpu(ddq->d_rtb_hardlimit);
542 	soft = (newlim->d_fieldmask & FS_DQ_RTBSOFT) ?
543 		(xfs_qcnt_t) XFS_BB_TO_FSB(mp, newlim->d_rtb_softlimit) :
544 			be64_to_cpu(ddq->d_rtb_softlimit);
545 	if (hard == 0 || hard >= soft) {
546 		ddq->d_rtb_hardlimit = cpu_to_be64(hard);
547 		ddq->d_rtb_softlimit = cpu_to_be64(soft);
548 		if (id == 0) {
549 			q->qi_rtbhardlimit = hard;
550 			q->qi_rtbsoftlimit = soft;
551 		}
552 	} else {
553 		xfs_debug(mp, "rtbhard %Ld < rtbsoft %Ld\n", hard, soft);
554 	}
555 
556 	hard = (newlim->d_fieldmask & FS_DQ_IHARD) ?
557 		(xfs_qcnt_t) newlim->d_ino_hardlimit :
558 			be64_to_cpu(ddq->d_ino_hardlimit);
559 	soft = (newlim->d_fieldmask & FS_DQ_ISOFT) ?
560 		(xfs_qcnt_t) newlim->d_ino_softlimit :
561 			be64_to_cpu(ddq->d_ino_softlimit);
562 	if (hard == 0 || hard >= soft) {
563 		ddq->d_ino_hardlimit = cpu_to_be64(hard);
564 		ddq->d_ino_softlimit = cpu_to_be64(soft);
565 		if (id == 0) {
566 			q->qi_ihardlimit = hard;
567 			q->qi_isoftlimit = soft;
568 		}
569 	} else {
570 		xfs_debug(mp, "ihard %Ld < isoft %Ld\n", hard, soft);
571 	}
572 
573 	/*
574 	 * Update warnings counter(s) if requested
575 	 */
576 	if (newlim->d_fieldmask & FS_DQ_BWARNS)
577 		ddq->d_bwarns = cpu_to_be16(newlim->d_bwarns);
578 	if (newlim->d_fieldmask & FS_DQ_IWARNS)
579 		ddq->d_iwarns = cpu_to_be16(newlim->d_iwarns);
580 	if (newlim->d_fieldmask & FS_DQ_RTBWARNS)
581 		ddq->d_rtbwarns = cpu_to_be16(newlim->d_rtbwarns);
582 
583 	if (id == 0) {
584 		/*
585 		 * Timelimits for the super user set the relative time
586 		 * the other users can be over quota for this file system.
587 		 * If it is zero a default is used.  Ditto for the default
588 		 * soft and hard limit values (already done, above), and
589 		 * for warnings.
590 		 */
591 		if (newlim->d_fieldmask & FS_DQ_BTIMER) {
592 			q->qi_btimelimit = newlim->d_btimer;
593 			ddq->d_btimer = cpu_to_be32(newlim->d_btimer);
594 		}
595 		if (newlim->d_fieldmask & FS_DQ_ITIMER) {
596 			q->qi_itimelimit = newlim->d_itimer;
597 			ddq->d_itimer = cpu_to_be32(newlim->d_itimer);
598 		}
599 		if (newlim->d_fieldmask & FS_DQ_RTBTIMER) {
600 			q->qi_rtbtimelimit = newlim->d_rtbtimer;
601 			ddq->d_rtbtimer = cpu_to_be32(newlim->d_rtbtimer);
602 		}
603 		if (newlim->d_fieldmask & FS_DQ_BWARNS)
604 			q->qi_bwarnlimit = newlim->d_bwarns;
605 		if (newlim->d_fieldmask & FS_DQ_IWARNS)
606 			q->qi_iwarnlimit = newlim->d_iwarns;
607 		if (newlim->d_fieldmask & FS_DQ_RTBWARNS)
608 			q->qi_rtbwarnlimit = newlim->d_rtbwarns;
609 	} else {
610 		/*
611 		 * If the user is now over quota, start the timelimit.
612 		 * The user will not be 'warned'.
613 		 * Note that we keep the timers ticking, whether enforcement
614 		 * is on or off. We don't really want to bother with iterating
615 		 * over all ondisk dquots and turning the timers on/off.
616 		 */
617 		xfs_qm_adjust_dqtimers(mp, ddq);
618 	}
619 	dqp->dq_flags |= XFS_DQ_DIRTY;
620 	xfs_trans_log_dquot(tp, dqp);
621 
622 	error = xfs_trans_commit(tp, 0);
623 	xfs_qm_dqrele(dqp);
624 
625  out_unlock:
626 	mutex_unlock(&q->qi_quotaofflock);
627 	return error;
628 }
629 
630 STATIC int
631 xfs_qm_log_quotaoff_end(
632 	xfs_mount_t		*mp,
633 	xfs_qoff_logitem_t	*startqoff,
634 	uint			flags)
635 {
636 	xfs_trans_t		*tp;
637 	int			error;
638 	xfs_qoff_logitem_t	*qoffi;
639 
640 	tp = xfs_trans_alloc(mp, XFS_TRANS_QM_QUOTAOFF_END);
641 
642 	error = xfs_trans_reserve(tp, 0, XFS_QM_QUOTAOFF_END_LOG_RES(mp),
643 				  0, 0, XFS_DEFAULT_LOG_COUNT);
644 	if (error) {
645 		xfs_trans_cancel(tp, 0);
646 		return (error);
647 	}
648 
649 	qoffi = xfs_trans_get_qoff_item(tp, startqoff,
650 					flags & XFS_ALL_QUOTA_ACCT);
651 	xfs_trans_log_quotaoff_item(tp, qoffi);
652 
653 	/*
654 	 * We have to make sure that the transaction is secure on disk before we
655 	 * return and actually stop quota accounting. So, make it synchronous.
656 	 * We don't care about quotoff's performance.
657 	 */
658 	xfs_trans_set_sync(tp);
659 	error = xfs_trans_commit(tp, 0);
660 	return (error);
661 }
662 
663 
664 STATIC int
665 xfs_qm_log_quotaoff(
666 	xfs_mount_t	       *mp,
667 	xfs_qoff_logitem_t     **qoffstartp,
668 	uint		       flags)
669 {
670 	xfs_trans_t	       *tp;
671 	int			error;
672 	xfs_qoff_logitem_t     *qoffi=NULL;
673 	uint			oldsbqflag=0;
674 
675 	tp = xfs_trans_alloc(mp, XFS_TRANS_QM_QUOTAOFF);
676 	error = xfs_trans_reserve(tp, 0, XFS_QM_QUOTAOFF_LOG_RES(mp),
677 				  0, 0, XFS_DEFAULT_LOG_COUNT);
678 	if (error)
679 		goto error0;
680 
681 	qoffi = xfs_trans_get_qoff_item(tp, NULL, flags & XFS_ALL_QUOTA_ACCT);
682 	xfs_trans_log_quotaoff_item(tp, qoffi);
683 
684 	spin_lock(&mp->m_sb_lock);
685 	oldsbqflag = mp->m_sb.sb_qflags;
686 	mp->m_sb.sb_qflags = (mp->m_qflags & ~(flags)) & XFS_MOUNT_QUOTA_ALL;
687 	spin_unlock(&mp->m_sb_lock);
688 
689 	xfs_mod_sb(tp, XFS_SB_QFLAGS);
690 
691 	/*
692 	 * We have to make sure that the transaction is secure on disk before we
693 	 * return and actually stop quota accounting. So, make it synchronous.
694 	 * We don't care about quotoff's performance.
695 	 */
696 	xfs_trans_set_sync(tp);
697 	error = xfs_trans_commit(tp, 0);
698 
699 error0:
700 	if (error) {
701 		xfs_trans_cancel(tp, 0);
702 		/*
703 		 * No one else is modifying sb_qflags, so this is OK.
704 		 * We still hold the quotaofflock.
705 		 */
706 		spin_lock(&mp->m_sb_lock);
707 		mp->m_sb.sb_qflags = oldsbqflag;
708 		spin_unlock(&mp->m_sb_lock);
709 	}
710 	*qoffstartp = qoffi;
711 	return (error);
712 }
713 
714 
715 int
716 xfs_qm_scall_getquota(
717 	struct xfs_mount	*mp,
718 	xfs_dqid_t		id,
719 	uint			type,
720 	struct fs_disk_quota	*dst)
721 {
722 	struct xfs_dquot	*dqp;
723 	int			error;
724 
725 	/*
726 	 * Try to get the dquot. We don't want it allocated on disk, so
727 	 * we aren't passing the XFS_QMOPT_DOALLOC flag. If it doesn't
728 	 * exist, we'll get ENOENT back.
729 	 */
730 	error = xfs_qm_dqget(mp, NULL, id, type, 0, &dqp);
731 	if (error)
732 		return error;
733 
734 	/*
735 	 * If everything's NULL, this dquot doesn't quite exist as far as
736 	 * our utility programs are concerned.
737 	 */
738 	if (XFS_IS_DQUOT_UNINITIALIZED(dqp)) {
739 		error = XFS_ERROR(ENOENT);
740 		goto out_put;
741 	}
742 
743 	memset(dst, 0, sizeof(*dst));
744 	dst->d_version = FS_DQUOT_VERSION;
745 	dst->d_flags = xfs_qm_export_qtype_flags(dqp->q_core.d_flags);
746 	dst->d_id = be32_to_cpu(dqp->q_core.d_id);
747 	dst->d_blk_hardlimit =
748 		XFS_FSB_TO_BB(mp, be64_to_cpu(dqp->q_core.d_blk_hardlimit));
749 	dst->d_blk_softlimit =
750 		XFS_FSB_TO_BB(mp, be64_to_cpu(dqp->q_core.d_blk_softlimit));
751 	dst->d_ino_hardlimit = be64_to_cpu(dqp->q_core.d_ino_hardlimit);
752 	dst->d_ino_softlimit = be64_to_cpu(dqp->q_core.d_ino_softlimit);
753 	dst->d_bcount = XFS_FSB_TO_BB(mp, dqp->q_res_bcount);
754 	dst->d_icount = dqp->q_res_icount;
755 	dst->d_btimer = be32_to_cpu(dqp->q_core.d_btimer);
756 	dst->d_itimer = be32_to_cpu(dqp->q_core.d_itimer);
757 	dst->d_iwarns = be16_to_cpu(dqp->q_core.d_iwarns);
758 	dst->d_bwarns = be16_to_cpu(dqp->q_core.d_bwarns);
759 	dst->d_rtb_hardlimit =
760 		XFS_FSB_TO_BB(mp, be64_to_cpu(dqp->q_core.d_rtb_hardlimit));
761 	dst->d_rtb_softlimit =
762 		XFS_FSB_TO_BB(mp, be64_to_cpu(dqp->q_core.d_rtb_softlimit));
763 	dst->d_rtbcount = XFS_FSB_TO_BB(mp, dqp->q_res_rtbcount);
764 	dst->d_rtbtimer = be32_to_cpu(dqp->q_core.d_rtbtimer);
765 	dst->d_rtbwarns = be16_to_cpu(dqp->q_core.d_rtbwarns);
766 
767 	/*
768 	 * Internally, we don't reset all the timers when quota enforcement
769 	 * gets turned off. No need to confuse the user level code,
770 	 * so return zeroes in that case.
771 	 */
772 	if ((!XFS_IS_UQUOTA_ENFORCED(mp) && dqp->q_core.d_flags == XFS_DQ_USER) ||
773 	    (!XFS_IS_OQUOTA_ENFORCED(mp) &&
774 			(dqp->q_core.d_flags & (XFS_DQ_PROJ | XFS_DQ_GROUP)))) {
775 		dst->d_btimer = 0;
776 		dst->d_itimer = 0;
777 		dst->d_rtbtimer = 0;
778 	}
779 
780 #ifdef DEBUG
781 	if (((XFS_IS_UQUOTA_ENFORCED(mp) && dst->d_flags == FS_USER_QUOTA) ||
782 	     (XFS_IS_OQUOTA_ENFORCED(mp) &&
783 			(dst->d_flags & (FS_PROJ_QUOTA | FS_GROUP_QUOTA)))) &&
784 	    dst->d_id != 0) {
785 		if ((dst->d_bcount > dst->d_blk_softlimit) &&
786 		    (dst->d_blk_softlimit > 0)) {
787 			ASSERT(dst->d_btimer != 0);
788 		}
789 		if ((dst->d_icount > dst->d_ino_softlimit) &&
790 		    (dst->d_ino_softlimit > 0)) {
791 			ASSERT(dst->d_itimer != 0);
792 		}
793 	}
794 #endif
795 out_put:
796 	xfs_qm_dqput(dqp);
797 	return error;
798 }
799 
800 STATIC uint
801 xfs_qm_export_qtype_flags(
802 	uint flags)
803 {
804 	/*
805 	 * Can't be more than one, or none.
806 	 */
807 	ASSERT((flags & (FS_PROJ_QUOTA | FS_USER_QUOTA)) !=
808 		(FS_PROJ_QUOTA | FS_USER_QUOTA));
809 	ASSERT((flags & (FS_PROJ_QUOTA | FS_GROUP_QUOTA)) !=
810 		(FS_PROJ_QUOTA | FS_GROUP_QUOTA));
811 	ASSERT((flags & (FS_USER_QUOTA | FS_GROUP_QUOTA)) !=
812 		(FS_USER_QUOTA | FS_GROUP_QUOTA));
813 	ASSERT((flags & (FS_PROJ_QUOTA|FS_USER_QUOTA|FS_GROUP_QUOTA)) != 0);
814 
815 	return (flags & XFS_DQ_USER) ?
816 		FS_USER_QUOTA : (flags & XFS_DQ_PROJ) ?
817 			FS_PROJ_QUOTA : FS_GROUP_QUOTA;
818 }
819 
820 STATIC uint
821 xfs_qm_export_flags(
822 	uint flags)
823 {
824 	uint uflags;
825 
826 	uflags = 0;
827 	if (flags & XFS_UQUOTA_ACCT)
828 		uflags |= FS_QUOTA_UDQ_ACCT;
829 	if (flags & XFS_PQUOTA_ACCT)
830 		uflags |= FS_QUOTA_PDQ_ACCT;
831 	if (flags & XFS_GQUOTA_ACCT)
832 		uflags |= FS_QUOTA_GDQ_ACCT;
833 	if (flags & XFS_UQUOTA_ENFD)
834 		uflags |= FS_QUOTA_UDQ_ENFD;
835 	if (flags & (XFS_OQUOTA_ENFD)) {
836 		uflags |= (flags & XFS_GQUOTA_ACCT) ?
837 			FS_QUOTA_GDQ_ENFD : FS_QUOTA_PDQ_ENFD;
838 	}
839 	return (uflags);
840 }
841 
842 
843 STATIC int
844 xfs_dqrele_inode(
845 	struct xfs_inode	*ip,
846 	struct xfs_perag	*pag,
847 	int			flags,
848 	void			*args)
849 {
850 	/* skip quota inodes */
851 	if (ip == ip->i_mount->m_quotainfo->qi_uquotaip ||
852 	    ip == ip->i_mount->m_quotainfo->qi_gquotaip) {
853 		ASSERT(ip->i_udquot == NULL);
854 		ASSERT(ip->i_gdquot == NULL);
855 		return 0;
856 	}
857 
858 	xfs_ilock(ip, XFS_ILOCK_EXCL);
859 	if ((flags & XFS_UQUOTA_ACCT) && ip->i_udquot) {
860 		xfs_qm_dqrele(ip->i_udquot);
861 		ip->i_udquot = NULL;
862 	}
863 	if (flags & (XFS_PQUOTA_ACCT|XFS_GQUOTA_ACCT) && ip->i_gdquot) {
864 		xfs_qm_dqrele(ip->i_gdquot);
865 		ip->i_gdquot = NULL;
866 	}
867 	xfs_iunlock(ip, XFS_ILOCK_EXCL);
868 	return 0;
869 }
870 
871 
872 /*
873  * Go thru all the inodes in the file system, releasing their dquots.
874  *
875  * Note that the mount structure gets modified to indicate that quotas are off
876  * AFTER this, in the case of quotaoff.
877  */
878 void
879 xfs_qm_dqrele_all_inodes(
880 	struct xfs_mount *mp,
881 	uint		 flags)
882 {
883 	ASSERT(mp->m_quotainfo);
884 	xfs_inode_ag_iterator(mp, xfs_dqrele_inode, flags, NULL);
885 }
886