xref: /openbmc/linux/fs/xfs/xfs_qm.c (revision b34081f1)
1 /*
2  * Copyright (c) 2000-2005 Silicon Graphics, Inc.
3  * All Rights Reserved.
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU General Public License as
7  * published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it would be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write the Free Software Foundation,
16  * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
17  */
18 #include "xfs.h"
19 #include "xfs_fs.h"
20 #include "xfs_format.h"
21 #include "xfs_bit.h"
22 #include "xfs_log.h"
23 #include "xfs_trans.h"
24 #include "xfs_sb.h"
25 #include "xfs_ag.h"
26 #include "xfs_alloc.h"
27 #include "xfs_quota.h"
28 #include "xfs_mount.h"
29 #include "xfs_bmap_btree.h"
30 #include "xfs_ialloc_btree.h"
31 #include "xfs_dinode.h"
32 #include "xfs_inode.h"
33 #include "xfs_ialloc.h"
34 #include "xfs_itable.h"
35 #include "xfs_rtalloc.h"
36 #include "xfs_error.h"
37 #include "xfs_bmap.h"
38 #include "xfs_attr.h"
39 #include "xfs_buf_item.h"
40 #include "xfs_trans_space.h"
41 #include "xfs_qm.h"
42 #include "xfs_trace.h"
43 #include "xfs_icache.h"
44 #include "xfs_cksum.h"
45 
46 /*
47  * The global quota manager. There is only one of these for the entire
48  * system, _not_ one per file system. XQM keeps track of the overall
49  * quota functionality, including maintaining the freelist and hash
50  * tables of dquots.
51  */
52 STATIC int	xfs_qm_init_quotainos(xfs_mount_t *);
53 STATIC int	xfs_qm_init_quotainfo(xfs_mount_t *);
54 STATIC int	xfs_qm_shake(struct shrinker *, struct shrink_control *);
55 
56 /*
57  * We use the batch lookup interface to iterate over the dquots as it
58  * currently is the only interface into the radix tree code that allows
59  * fuzzy lookups instead of exact matches.  Holding the lock over multiple
60  * operations is fine as all callers are used either during mount/umount
61  * or quotaoff.
62  */
63 #define XFS_DQ_LOOKUP_BATCH	32
64 
65 STATIC int
66 xfs_qm_dquot_walk(
67 	struct xfs_mount	*mp,
68 	int			type,
69 	int			(*execute)(struct xfs_dquot *dqp, void *data),
70 	void			*data)
71 {
72 	struct xfs_quotainfo	*qi = mp->m_quotainfo;
73 	struct radix_tree_root	*tree = xfs_dquot_tree(qi, type);
74 	uint32_t		next_index;
75 	int			last_error = 0;
76 	int			skipped;
77 	int			nr_found;
78 
79 restart:
80 	skipped = 0;
81 	next_index = 0;
82 	nr_found = 0;
83 
84 	while (1) {
85 		struct xfs_dquot *batch[XFS_DQ_LOOKUP_BATCH];
86 		int		error = 0;
87 		int		i;
88 
89 		mutex_lock(&qi->qi_tree_lock);
90 		nr_found = radix_tree_gang_lookup(tree, (void **)batch,
91 					next_index, XFS_DQ_LOOKUP_BATCH);
92 		if (!nr_found) {
93 			mutex_unlock(&qi->qi_tree_lock);
94 			break;
95 		}
96 
97 		for (i = 0; i < nr_found; i++) {
98 			struct xfs_dquot *dqp = batch[i];
99 
100 			next_index = be32_to_cpu(dqp->q_core.d_id) + 1;
101 
102 			error = execute(batch[i], data);
103 			if (error == EAGAIN) {
104 				skipped++;
105 				continue;
106 			}
107 			if (error && last_error != EFSCORRUPTED)
108 				last_error = error;
109 		}
110 
111 		mutex_unlock(&qi->qi_tree_lock);
112 
113 		/* bail out if the filesystem is corrupted.  */
114 		if (last_error == EFSCORRUPTED) {
115 			skipped = 0;
116 			break;
117 		}
118 	}
119 
120 	if (skipped) {
121 		delay(1);
122 		goto restart;
123 	}
124 
125 	return last_error;
126 }
127 
128 
129 /*
130  * Purge a dquot from all tracking data structures and free it.
131  */
132 STATIC int
133 xfs_qm_dqpurge(
134 	struct xfs_dquot	*dqp,
135 	void			*data)
136 {
137 	struct xfs_mount	*mp = dqp->q_mount;
138 	struct xfs_quotainfo	*qi = mp->m_quotainfo;
139 	struct xfs_dquot	*gdqp = NULL;
140 	struct xfs_dquot	*pdqp = NULL;
141 
142 	xfs_dqlock(dqp);
143 	if ((dqp->dq_flags & XFS_DQ_FREEING) || dqp->q_nrefs != 0) {
144 		xfs_dqunlock(dqp);
145 		return EAGAIN;
146 	}
147 
148 	/*
149 	 * If this quota has a hint attached, prepare for releasing it now.
150 	 */
151 	gdqp = dqp->q_gdquot;
152 	if (gdqp) {
153 		xfs_dqlock(gdqp);
154 		dqp->q_gdquot = NULL;
155 	}
156 
157 	pdqp = dqp->q_pdquot;
158 	if (pdqp) {
159 		xfs_dqlock(pdqp);
160 		dqp->q_pdquot = NULL;
161 	}
162 
163 	dqp->dq_flags |= XFS_DQ_FREEING;
164 
165 	xfs_dqflock(dqp);
166 
167 	/*
168 	 * If we are turning this type of quotas off, we don't care
169 	 * about the dirty metadata sitting in this dquot. OTOH, if
170 	 * we're unmounting, we do care, so we flush it and wait.
171 	 */
172 	if (XFS_DQ_IS_DIRTY(dqp)) {
173 		struct xfs_buf	*bp = NULL;
174 		int		error;
175 
176 		/*
177 		 * We don't care about getting disk errors here. We need
178 		 * to purge this dquot anyway, so we go ahead regardless.
179 		 */
180 		error = xfs_qm_dqflush(dqp, &bp);
181 		if (error) {
182 			xfs_warn(mp, "%s: dquot %p flush failed",
183 				__func__, dqp);
184 		} else {
185 			error = xfs_bwrite(bp);
186 			xfs_buf_relse(bp);
187 		}
188 		xfs_dqflock(dqp);
189 	}
190 
191 	ASSERT(atomic_read(&dqp->q_pincount) == 0);
192 	ASSERT(XFS_FORCED_SHUTDOWN(mp) ||
193 	       !(dqp->q_logitem.qli_item.li_flags & XFS_LI_IN_AIL));
194 
195 	xfs_dqfunlock(dqp);
196 	xfs_dqunlock(dqp);
197 
198 	radix_tree_delete(xfs_dquot_tree(qi, dqp->q_core.d_flags),
199 			  be32_to_cpu(dqp->q_core.d_id));
200 	qi->qi_dquots--;
201 
202 	/*
203 	 * We move dquots to the freelist as soon as their reference count
204 	 * hits zero, so it really should be on the freelist here.
205 	 */
206 	mutex_lock(&qi->qi_lru_lock);
207 	ASSERT(!list_empty(&dqp->q_lru));
208 	list_del_init(&dqp->q_lru);
209 	qi->qi_lru_count--;
210 	XFS_STATS_DEC(xs_qm_dquot_unused);
211 	mutex_unlock(&qi->qi_lru_lock);
212 
213 	xfs_qm_dqdestroy(dqp);
214 
215 	if (gdqp)
216 		xfs_qm_dqput(gdqp);
217 	if (pdqp)
218 		xfs_qm_dqput(pdqp);
219 	return 0;
220 }
221 
222 /*
223  * Purge the dquot cache.
224  */
225 void
226 xfs_qm_dqpurge_all(
227 	struct xfs_mount	*mp,
228 	uint			flags)
229 {
230 	if (flags & XFS_QMOPT_UQUOTA)
231 		xfs_qm_dquot_walk(mp, XFS_DQ_USER, xfs_qm_dqpurge, NULL);
232 	if (flags & XFS_QMOPT_GQUOTA)
233 		xfs_qm_dquot_walk(mp, XFS_DQ_GROUP, xfs_qm_dqpurge, NULL);
234 	if (flags & XFS_QMOPT_PQUOTA)
235 		xfs_qm_dquot_walk(mp, XFS_DQ_PROJ, xfs_qm_dqpurge, NULL);
236 }
237 
238 /*
239  * Just destroy the quotainfo structure.
240  */
241 void
242 xfs_qm_unmount(
243 	struct xfs_mount	*mp)
244 {
245 	if (mp->m_quotainfo) {
246 		xfs_qm_dqpurge_all(mp, XFS_QMOPT_QUOTALL);
247 		xfs_qm_destroy_quotainfo(mp);
248 	}
249 }
250 
251 
252 /*
253  * This is called from xfs_mountfs to start quotas and initialize all
254  * necessary data structures like quotainfo.  This is also responsible for
255  * running a quotacheck as necessary.  We are guaranteed that the superblock
256  * is consistently read in at this point.
257  *
258  * If we fail here, the mount will continue with quota turned off. We don't
259  * need to inidicate success or failure at all.
260  */
261 void
262 xfs_qm_mount_quotas(
263 	xfs_mount_t	*mp)
264 {
265 	int		error = 0;
266 	uint		sbf;
267 
268 	/*
269 	 * If quotas on realtime volumes is not supported, we disable
270 	 * quotas immediately.
271 	 */
272 	if (mp->m_sb.sb_rextents) {
273 		xfs_notice(mp, "Cannot turn on quotas for realtime filesystem");
274 		mp->m_qflags = 0;
275 		goto write_changes;
276 	}
277 
278 	ASSERT(XFS_IS_QUOTA_RUNNING(mp));
279 
280 	/*
281 	 * Allocate the quotainfo structure inside the mount struct, and
282 	 * create quotainode(s), and change/rev superblock if necessary.
283 	 */
284 	error = xfs_qm_init_quotainfo(mp);
285 	if (error) {
286 		/*
287 		 * We must turn off quotas.
288 		 */
289 		ASSERT(mp->m_quotainfo == NULL);
290 		mp->m_qflags = 0;
291 		goto write_changes;
292 	}
293 	/*
294 	 * If any of the quotas are not consistent, do a quotacheck.
295 	 */
296 	if (XFS_QM_NEED_QUOTACHECK(mp)) {
297 		error = xfs_qm_quotacheck(mp);
298 		if (error) {
299 			/* Quotacheck failed and disabled quotas. */
300 			return;
301 		}
302 	}
303 	/*
304 	 * If one type of quotas is off, then it will lose its
305 	 * quotachecked status, since we won't be doing accounting for
306 	 * that type anymore.
307 	 */
308 	if (!XFS_IS_UQUOTA_ON(mp))
309 		mp->m_qflags &= ~XFS_UQUOTA_CHKD;
310 	if (!XFS_IS_GQUOTA_ON(mp))
311 		mp->m_qflags &= ~XFS_GQUOTA_CHKD;
312 	if (!XFS_IS_PQUOTA_ON(mp))
313 		mp->m_qflags &= ~XFS_PQUOTA_CHKD;
314 
315  write_changes:
316 	/*
317 	 * We actually don't have to acquire the m_sb_lock at all.
318 	 * This can only be called from mount, and that's single threaded. XXX
319 	 */
320 	spin_lock(&mp->m_sb_lock);
321 	sbf = mp->m_sb.sb_qflags;
322 	mp->m_sb.sb_qflags = mp->m_qflags & XFS_MOUNT_QUOTA_ALL;
323 	spin_unlock(&mp->m_sb_lock);
324 
325 	if (sbf != (mp->m_qflags & XFS_MOUNT_QUOTA_ALL)) {
326 		if (xfs_qm_write_sb_changes(mp, XFS_SB_QFLAGS)) {
327 			/*
328 			 * We could only have been turning quotas off.
329 			 * We aren't in very good shape actually because
330 			 * the incore structures are convinced that quotas are
331 			 * off, but the on disk superblock doesn't know that !
332 			 */
333 			ASSERT(!(XFS_IS_QUOTA_RUNNING(mp)));
334 			xfs_alert(mp, "%s: Superblock update failed!",
335 				__func__);
336 		}
337 	}
338 
339 	if (error) {
340 		xfs_warn(mp, "Failed to initialize disk quotas.");
341 		return;
342 	}
343 }
344 
345 /*
346  * Called from the vfsops layer.
347  */
348 void
349 xfs_qm_unmount_quotas(
350 	xfs_mount_t	*mp)
351 {
352 	/*
353 	 * Release the dquots that root inode, et al might be holding,
354 	 * before we flush quotas and blow away the quotainfo structure.
355 	 */
356 	ASSERT(mp->m_rootip);
357 	xfs_qm_dqdetach(mp->m_rootip);
358 	if (mp->m_rbmip)
359 		xfs_qm_dqdetach(mp->m_rbmip);
360 	if (mp->m_rsumip)
361 		xfs_qm_dqdetach(mp->m_rsumip);
362 
363 	/*
364 	 * Release the quota inodes.
365 	 */
366 	if (mp->m_quotainfo) {
367 		if (mp->m_quotainfo->qi_uquotaip) {
368 			IRELE(mp->m_quotainfo->qi_uquotaip);
369 			mp->m_quotainfo->qi_uquotaip = NULL;
370 		}
371 		if (mp->m_quotainfo->qi_gquotaip) {
372 			IRELE(mp->m_quotainfo->qi_gquotaip);
373 			mp->m_quotainfo->qi_gquotaip = NULL;
374 		}
375 		if (mp->m_quotainfo->qi_pquotaip) {
376 			IRELE(mp->m_quotainfo->qi_pquotaip);
377 			mp->m_quotainfo->qi_pquotaip = NULL;
378 		}
379 	}
380 }
381 
382 STATIC int
383 xfs_qm_dqattach_one(
384 	xfs_inode_t	*ip,
385 	xfs_dqid_t	id,
386 	uint		type,
387 	uint		doalloc,
388 	xfs_dquot_t	*udqhint, /* hint */
389 	xfs_dquot_t	**IO_idqpp)
390 {
391 	xfs_dquot_t	*dqp;
392 	int		error;
393 
394 	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
395 	error = 0;
396 
397 	/*
398 	 * See if we already have it in the inode itself. IO_idqpp is
399 	 * &i_udquot or &i_gdquot. This made the code look weird, but
400 	 * made the logic a lot simpler.
401 	 */
402 	dqp = *IO_idqpp;
403 	if (dqp) {
404 		trace_xfs_dqattach_found(dqp);
405 		return 0;
406 	}
407 
408 	/*
409 	 * udqhint is the i_udquot field in inode, and is non-NULL only
410 	 * when the type arg is group/project. Its purpose is to save a
411 	 * lookup by dqid (xfs_qm_dqget) by caching a group dquot inside
412 	 * the user dquot.
413 	 */
414 	if (udqhint) {
415 		ASSERT(type == XFS_DQ_GROUP || type == XFS_DQ_PROJ);
416 		xfs_dqlock(udqhint);
417 
418 		/*
419 		 * No need to take dqlock to look at the id.
420 		 *
421 		 * The ID can't change until it gets reclaimed, and it won't
422 		 * be reclaimed as long as we have a ref from inode and we
423 		 * hold the ilock.
424 		 */
425 		if (type == XFS_DQ_GROUP)
426 			dqp = udqhint->q_gdquot;
427 		else
428 			dqp = udqhint->q_pdquot;
429 		if (dqp && be32_to_cpu(dqp->q_core.d_id) == id) {
430 			ASSERT(*IO_idqpp == NULL);
431 
432 			*IO_idqpp = xfs_qm_dqhold(dqp);
433 			xfs_dqunlock(udqhint);
434 			return 0;
435 		}
436 
437 		/*
438 		 * We can't hold a dquot lock when we call the dqget code.
439 		 * We'll deadlock in no time, because of (not conforming to)
440 		 * lock ordering - the inodelock comes before any dquot lock,
441 		 * and we may drop and reacquire the ilock in xfs_qm_dqget().
442 		 */
443 		xfs_dqunlock(udqhint);
444 	}
445 
446 	/*
447 	 * Find the dquot from somewhere. This bumps the
448 	 * reference count of dquot and returns it locked.
449 	 * This can return ENOENT if dquot didn't exist on
450 	 * disk and we didn't ask it to allocate;
451 	 * ESRCH if quotas got turned off suddenly.
452 	 */
453 	error = xfs_qm_dqget(ip->i_mount, ip, id, type,
454 			     doalloc | XFS_QMOPT_DOWARN, &dqp);
455 	if (error)
456 		return error;
457 
458 	trace_xfs_dqattach_get(dqp);
459 
460 	/*
461 	 * dqget may have dropped and re-acquired the ilock, but it guarantees
462 	 * that the dquot returned is the one that should go in the inode.
463 	 */
464 	*IO_idqpp = dqp;
465 	xfs_dqunlock(dqp);
466 	return 0;
467 }
468 
469 
470 /*
471  * Given a udquot and group/project type, attach the group/project
472  * dquot pointer to the udquot as a hint for future lookups.
473  */
474 STATIC void
475 xfs_qm_dqattach_hint(
476 	struct xfs_inode	*ip,
477 	int			type)
478 {
479 	struct xfs_dquot **dqhintp;
480 	struct xfs_dquot *dqp;
481 	struct xfs_dquot *udq = ip->i_udquot;
482 
483 	ASSERT(type == XFS_DQ_GROUP || type == XFS_DQ_PROJ);
484 
485 	xfs_dqlock(udq);
486 
487 	if (type == XFS_DQ_GROUP) {
488 		dqp = ip->i_gdquot;
489 		dqhintp = &udq->q_gdquot;
490 	} else {
491 		dqp = ip->i_pdquot;
492 		dqhintp = &udq->q_pdquot;
493 	}
494 
495 	if (*dqhintp) {
496 		struct xfs_dquot *tmp;
497 
498 		if (*dqhintp == dqp)
499 			goto done;
500 
501 		tmp = *dqhintp;
502 		*dqhintp = NULL;
503 		xfs_qm_dqrele(tmp);
504 	}
505 
506 	*dqhintp = xfs_qm_dqhold(dqp);
507 done:
508 	xfs_dqunlock(udq);
509 }
510 
511 static bool
512 xfs_qm_need_dqattach(
513 	struct xfs_inode	*ip)
514 {
515 	struct xfs_mount	*mp = ip->i_mount;
516 
517 	if (!XFS_IS_QUOTA_RUNNING(mp))
518 		return false;
519 	if (!XFS_IS_QUOTA_ON(mp))
520 		return false;
521 	if (!XFS_NOT_DQATTACHED(mp, ip))
522 		return false;
523 	if (xfs_is_quota_inode(&mp->m_sb, ip->i_ino))
524 		return false;
525 	return true;
526 }
527 
528 /*
529  * Given a locked inode, attach dquot(s) to it, taking U/G/P-QUOTAON
530  * into account.
531  * If XFS_QMOPT_DQALLOC, the dquot(s) will be allocated if needed.
532  * Inode may get unlocked and relocked in here, and the caller must deal with
533  * the consequences.
534  */
535 int
536 xfs_qm_dqattach_locked(
537 	xfs_inode_t	*ip,
538 	uint		flags)
539 {
540 	xfs_mount_t	*mp = ip->i_mount;
541 	uint		nquotas = 0;
542 	int		error = 0;
543 
544 	if (!xfs_qm_need_dqattach(ip))
545 		return 0;
546 
547 	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
548 
549 	if (XFS_IS_UQUOTA_ON(mp)) {
550 		error = xfs_qm_dqattach_one(ip, ip->i_d.di_uid, XFS_DQ_USER,
551 						flags & XFS_QMOPT_DQALLOC,
552 						NULL, &ip->i_udquot);
553 		if (error)
554 			goto done;
555 		nquotas++;
556 	}
557 
558 	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
559 	if (XFS_IS_GQUOTA_ON(mp)) {
560 		error = xfs_qm_dqattach_one(ip, ip->i_d.di_gid, XFS_DQ_GROUP,
561 						flags & XFS_QMOPT_DQALLOC,
562 						ip->i_udquot, &ip->i_gdquot);
563 		/*
564 		 * Don't worry about the udquot that we may have
565 		 * attached above. It'll get detached, if not already.
566 		 */
567 		if (error)
568 			goto done;
569 		nquotas++;
570 	}
571 
572 	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
573 	if (XFS_IS_PQUOTA_ON(mp)) {
574 		error = xfs_qm_dqattach_one(ip, xfs_get_projid(ip), XFS_DQ_PROJ,
575 						flags & XFS_QMOPT_DQALLOC,
576 						ip->i_udquot, &ip->i_pdquot);
577 		/*
578 		 * Don't worry about the udquot that we may have
579 		 * attached above. It'll get detached, if not already.
580 		 */
581 		if (error)
582 			goto done;
583 		nquotas++;
584 	}
585 
586 	/*
587 	 * Attach this group/project quota to the user quota as a hint.
588 	 * This WON'T, in general, result in a thrash.
589 	 */
590 	if (nquotas > 1 && ip->i_udquot) {
591 		ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
592 		ASSERT(ip->i_gdquot || !XFS_IS_GQUOTA_ON(mp));
593 		ASSERT(ip->i_pdquot || !XFS_IS_PQUOTA_ON(mp));
594 
595 		/*
596 		 * We do not have i_udquot locked at this point, but this check
597 		 * is OK since we don't depend on the i_gdquot to be accurate
598 		 * 100% all the time. It is just a hint, and this will
599 		 * succeed in general.
600 		 */
601 		if (ip->i_udquot->q_gdquot != ip->i_gdquot)
602 			xfs_qm_dqattach_hint(ip, XFS_DQ_GROUP);
603 
604 		if (ip->i_udquot->q_pdquot != ip->i_pdquot)
605 			xfs_qm_dqattach_hint(ip, XFS_DQ_PROJ);
606 	}
607 
608  done:
609 #ifdef DEBUG
610 	if (!error) {
611 		if (XFS_IS_UQUOTA_ON(mp))
612 			ASSERT(ip->i_udquot);
613 		if (XFS_IS_GQUOTA_ON(mp))
614 			ASSERT(ip->i_gdquot);
615 		if (XFS_IS_PQUOTA_ON(mp))
616 			ASSERT(ip->i_pdquot);
617 	}
618 	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
619 #endif
620 	return error;
621 }
622 
623 int
624 xfs_qm_dqattach(
625 	struct xfs_inode	*ip,
626 	uint			flags)
627 {
628 	int			error;
629 
630 	if (!xfs_qm_need_dqattach(ip))
631 		return 0;
632 
633 	xfs_ilock(ip, XFS_ILOCK_EXCL);
634 	error = xfs_qm_dqattach_locked(ip, flags);
635 	xfs_iunlock(ip, XFS_ILOCK_EXCL);
636 
637 	return error;
638 }
639 
640 /*
641  * Release dquots (and their references) if any.
642  * The inode should be locked EXCL except when this's called by
643  * xfs_ireclaim.
644  */
645 void
646 xfs_qm_dqdetach(
647 	xfs_inode_t	*ip)
648 {
649 	if (!(ip->i_udquot || ip->i_gdquot || ip->i_pdquot))
650 		return;
651 
652 	trace_xfs_dquot_dqdetach(ip);
653 
654 	ASSERT(!xfs_is_quota_inode(&ip->i_mount->m_sb, ip->i_ino));
655 	if (ip->i_udquot) {
656 		xfs_qm_dqrele(ip->i_udquot);
657 		ip->i_udquot = NULL;
658 	}
659 	if (ip->i_gdquot) {
660 		xfs_qm_dqrele(ip->i_gdquot);
661 		ip->i_gdquot = NULL;
662 	}
663 	if (ip->i_pdquot) {
664 		xfs_qm_dqrele(ip->i_pdquot);
665 		ip->i_pdquot = NULL;
666 	}
667 }
668 
669 int
670 xfs_qm_calc_dquots_per_chunk(
671 	struct xfs_mount	*mp,
672 	unsigned int		nbblks)	/* basic block units */
673 {
674 	unsigned int	ndquots;
675 
676 	ASSERT(nbblks > 0);
677 	ndquots = BBTOB(nbblks);
678 	do_div(ndquots, sizeof(xfs_dqblk_t));
679 
680 	return ndquots;
681 }
682 
683 /*
684  * This initializes all the quota information that's kept in the
685  * mount structure
686  */
687 STATIC int
688 xfs_qm_init_quotainfo(
689 	xfs_mount_t	*mp)
690 {
691 	xfs_quotainfo_t *qinf;
692 	int		error;
693 	xfs_dquot_t	*dqp;
694 
695 	ASSERT(XFS_IS_QUOTA_RUNNING(mp));
696 
697 	qinf = mp->m_quotainfo = kmem_zalloc(sizeof(xfs_quotainfo_t), KM_SLEEP);
698 
699 	/*
700 	 * See if quotainodes are setup, and if not, allocate them,
701 	 * and change the superblock accordingly.
702 	 */
703 	if ((error = xfs_qm_init_quotainos(mp))) {
704 		kmem_free(qinf);
705 		mp->m_quotainfo = NULL;
706 		return error;
707 	}
708 
709 	INIT_RADIX_TREE(&qinf->qi_uquota_tree, GFP_NOFS);
710 	INIT_RADIX_TREE(&qinf->qi_gquota_tree, GFP_NOFS);
711 	INIT_RADIX_TREE(&qinf->qi_pquota_tree, GFP_NOFS);
712 	mutex_init(&qinf->qi_tree_lock);
713 
714 	INIT_LIST_HEAD(&qinf->qi_lru_list);
715 	qinf->qi_lru_count = 0;
716 	mutex_init(&qinf->qi_lru_lock);
717 
718 	/* mutex used to serialize quotaoffs */
719 	mutex_init(&qinf->qi_quotaofflock);
720 
721 	/* Precalc some constants */
722 	qinf->qi_dqchunklen = XFS_FSB_TO_BB(mp, XFS_DQUOT_CLUSTER_SIZE_FSB);
723 	qinf->qi_dqperchunk = xfs_qm_calc_dquots_per_chunk(mp,
724 							qinf->qi_dqchunklen);
725 
726 	mp->m_qflags |= (mp->m_sb.sb_qflags & XFS_ALL_QUOTA_CHKD);
727 
728 	/*
729 	 * We try to get the limits from the superuser's limits fields.
730 	 * This is quite hacky, but it is standard quota practice.
731 	 *
732 	 * We look at the USR dquot with id == 0 first, but if user quotas
733 	 * are not enabled we goto the GRP dquot with id == 0.
734 	 * We don't really care to keep separate default limits for user
735 	 * and group quotas, at least not at this point.
736 	 *
737 	 * Since we may not have done a quotacheck by this point, just read
738 	 * the dquot without attaching it to any hashtables or lists.
739 	 */
740 	error = xfs_qm_dqread(mp, 0,
741 			XFS_IS_UQUOTA_RUNNING(mp) ? XFS_DQ_USER :
742 			 (XFS_IS_GQUOTA_RUNNING(mp) ? XFS_DQ_GROUP :
743 			  XFS_DQ_PROJ),
744 			XFS_QMOPT_DOWARN, &dqp);
745 	if (!error) {
746 		xfs_disk_dquot_t	*ddqp = &dqp->q_core;
747 
748 		/*
749 		 * The warnings and timers set the grace period given to
750 		 * a user or group before he or she can not perform any
751 		 * more writing. If it is zero, a default is used.
752 		 */
753 		qinf->qi_btimelimit = ddqp->d_btimer ?
754 			be32_to_cpu(ddqp->d_btimer) : XFS_QM_BTIMELIMIT;
755 		qinf->qi_itimelimit = ddqp->d_itimer ?
756 			be32_to_cpu(ddqp->d_itimer) : XFS_QM_ITIMELIMIT;
757 		qinf->qi_rtbtimelimit = ddqp->d_rtbtimer ?
758 			be32_to_cpu(ddqp->d_rtbtimer) : XFS_QM_RTBTIMELIMIT;
759 		qinf->qi_bwarnlimit = ddqp->d_bwarns ?
760 			be16_to_cpu(ddqp->d_bwarns) : XFS_QM_BWARNLIMIT;
761 		qinf->qi_iwarnlimit = ddqp->d_iwarns ?
762 			be16_to_cpu(ddqp->d_iwarns) : XFS_QM_IWARNLIMIT;
763 		qinf->qi_rtbwarnlimit = ddqp->d_rtbwarns ?
764 			be16_to_cpu(ddqp->d_rtbwarns) : XFS_QM_RTBWARNLIMIT;
765 		qinf->qi_bhardlimit = be64_to_cpu(ddqp->d_blk_hardlimit);
766 		qinf->qi_bsoftlimit = be64_to_cpu(ddqp->d_blk_softlimit);
767 		qinf->qi_ihardlimit = be64_to_cpu(ddqp->d_ino_hardlimit);
768 		qinf->qi_isoftlimit = be64_to_cpu(ddqp->d_ino_softlimit);
769 		qinf->qi_rtbhardlimit = be64_to_cpu(ddqp->d_rtb_hardlimit);
770 		qinf->qi_rtbsoftlimit = be64_to_cpu(ddqp->d_rtb_softlimit);
771 
772 		xfs_qm_dqdestroy(dqp);
773 	} else {
774 		qinf->qi_btimelimit = XFS_QM_BTIMELIMIT;
775 		qinf->qi_itimelimit = XFS_QM_ITIMELIMIT;
776 		qinf->qi_rtbtimelimit = XFS_QM_RTBTIMELIMIT;
777 		qinf->qi_bwarnlimit = XFS_QM_BWARNLIMIT;
778 		qinf->qi_iwarnlimit = XFS_QM_IWARNLIMIT;
779 		qinf->qi_rtbwarnlimit = XFS_QM_RTBWARNLIMIT;
780 	}
781 
782 	qinf->qi_shrinker.shrink = xfs_qm_shake;
783 	qinf->qi_shrinker.seeks = DEFAULT_SEEKS;
784 	register_shrinker(&qinf->qi_shrinker);
785 	return 0;
786 }
787 
788 
789 /*
790  * Gets called when unmounting a filesystem or when all quotas get
791  * turned off.
792  * This purges the quota inodes, destroys locks and frees itself.
793  */
794 void
795 xfs_qm_destroy_quotainfo(
796 	xfs_mount_t	*mp)
797 {
798 	xfs_quotainfo_t *qi;
799 
800 	qi = mp->m_quotainfo;
801 	ASSERT(qi != NULL);
802 
803 	unregister_shrinker(&qi->qi_shrinker);
804 
805 	if (qi->qi_uquotaip) {
806 		IRELE(qi->qi_uquotaip);
807 		qi->qi_uquotaip = NULL; /* paranoia */
808 	}
809 	if (qi->qi_gquotaip) {
810 		IRELE(qi->qi_gquotaip);
811 		qi->qi_gquotaip = NULL;
812 	}
813 	if (qi->qi_pquotaip) {
814 		IRELE(qi->qi_pquotaip);
815 		qi->qi_pquotaip = NULL;
816 	}
817 	mutex_destroy(&qi->qi_quotaofflock);
818 	kmem_free(qi);
819 	mp->m_quotainfo = NULL;
820 }
821 
822 /*
823  * Create an inode and return with a reference already taken, but unlocked
824  * This is how we create quota inodes
825  */
826 STATIC int
827 xfs_qm_qino_alloc(
828 	xfs_mount_t	*mp,
829 	xfs_inode_t	**ip,
830 	__int64_t	sbfields,
831 	uint		flags)
832 {
833 	xfs_trans_t	*tp;
834 	int		error;
835 	int		committed;
836 
837 	*ip = NULL;
838 	/*
839 	 * With superblock that doesn't have separate pquotino, we
840 	 * share an inode between gquota and pquota. If the on-disk
841 	 * superblock has GQUOTA and the filesystem is now mounted
842 	 * with PQUOTA, just use sb_gquotino for sb_pquotino and
843 	 * vice-versa.
844 	 */
845 	if (!xfs_sb_version_has_pquotino(&mp->m_sb) &&
846 			(flags & (XFS_QMOPT_PQUOTA|XFS_QMOPT_GQUOTA))) {
847 		xfs_ino_t ino = NULLFSINO;
848 
849 		if ((flags & XFS_QMOPT_PQUOTA) &&
850 			     (mp->m_sb.sb_gquotino != NULLFSINO)) {
851 			ino = mp->m_sb.sb_gquotino;
852 			ASSERT(mp->m_sb.sb_pquotino == NULLFSINO);
853 		} else if ((flags & XFS_QMOPT_GQUOTA) &&
854 			     (mp->m_sb.sb_pquotino != NULLFSINO)) {
855 			ino = mp->m_sb.sb_pquotino;
856 			ASSERT(mp->m_sb.sb_gquotino == NULLFSINO);
857 		}
858 		if (ino != NULLFSINO) {
859 			error = xfs_iget(mp, NULL, ino, 0, 0, ip);
860 			if (error)
861 				return error;
862 			mp->m_sb.sb_gquotino = NULLFSINO;
863 			mp->m_sb.sb_pquotino = NULLFSINO;
864 		}
865 	}
866 
867 	tp = xfs_trans_alloc(mp, XFS_TRANS_QM_QINOCREATE);
868 	error = xfs_trans_reserve(tp, &M_RES(mp)->tr_create,
869 				  XFS_QM_QINOCREATE_SPACE_RES(mp), 0);
870 	if (error) {
871 		xfs_trans_cancel(tp, 0);
872 		return error;
873 	}
874 
875 	if (!*ip) {
876 		error = xfs_dir_ialloc(&tp, NULL, S_IFREG, 1, 0, 0, 1, ip,
877 								&committed);
878 		if (error) {
879 			xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES |
880 					 XFS_TRANS_ABORT);
881 			return error;
882 		}
883 	}
884 
885 	/*
886 	 * Make the changes in the superblock, and log those too.
887 	 * sbfields arg may contain fields other than *QUOTINO;
888 	 * VERSIONNUM for example.
889 	 */
890 	spin_lock(&mp->m_sb_lock);
891 	if (flags & XFS_QMOPT_SBVERSION) {
892 		ASSERT(!xfs_sb_version_hasquota(&mp->m_sb));
893 		ASSERT((sbfields & (XFS_SB_VERSIONNUM | XFS_SB_UQUOTINO |
894 			XFS_SB_GQUOTINO | XFS_SB_PQUOTINO | XFS_SB_QFLAGS)) ==
895 				(XFS_SB_VERSIONNUM | XFS_SB_UQUOTINO |
896 				 XFS_SB_GQUOTINO | XFS_SB_PQUOTINO |
897 				 XFS_SB_QFLAGS));
898 
899 		xfs_sb_version_addquota(&mp->m_sb);
900 		mp->m_sb.sb_uquotino = NULLFSINO;
901 		mp->m_sb.sb_gquotino = NULLFSINO;
902 		mp->m_sb.sb_pquotino = NULLFSINO;
903 
904 		/* qflags will get updated fully _after_ quotacheck */
905 		mp->m_sb.sb_qflags = mp->m_qflags & XFS_ALL_QUOTA_ACCT;
906 	}
907 	if (flags & XFS_QMOPT_UQUOTA)
908 		mp->m_sb.sb_uquotino = (*ip)->i_ino;
909 	else if (flags & XFS_QMOPT_GQUOTA)
910 		mp->m_sb.sb_gquotino = (*ip)->i_ino;
911 	else
912 		mp->m_sb.sb_pquotino = (*ip)->i_ino;
913 	spin_unlock(&mp->m_sb_lock);
914 	xfs_mod_sb(tp, sbfields);
915 
916 	if ((error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES))) {
917 		xfs_alert(mp, "%s failed (error %d)!", __func__, error);
918 		return error;
919 	}
920 	return 0;
921 }
922 
923 
924 STATIC void
925 xfs_qm_reset_dqcounts(
926 	xfs_mount_t	*mp,
927 	xfs_buf_t	*bp,
928 	xfs_dqid_t	id,
929 	uint		type)
930 {
931 	struct xfs_dqblk	*dqb;
932 	int			j;
933 
934 	trace_xfs_reset_dqcounts(bp, _RET_IP_);
935 
936 	/*
937 	 * Reset all counters and timers. They'll be
938 	 * started afresh by xfs_qm_quotacheck.
939 	 */
940 #ifdef DEBUG
941 	j = XFS_FSB_TO_B(mp, XFS_DQUOT_CLUSTER_SIZE_FSB);
942 	do_div(j, sizeof(xfs_dqblk_t));
943 	ASSERT(mp->m_quotainfo->qi_dqperchunk == j);
944 #endif
945 	dqb = bp->b_addr;
946 	for (j = 0; j < mp->m_quotainfo->qi_dqperchunk; j++) {
947 		struct xfs_disk_dquot	*ddq;
948 
949 		ddq = (struct xfs_disk_dquot *)&dqb[j];
950 
951 		/*
952 		 * Do a sanity check, and if needed, repair the dqblk. Don't
953 		 * output any warnings because it's perfectly possible to
954 		 * find uninitialised dquot blks. See comment in xfs_qm_dqcheck.
955 		 */
956 		(void) xfs_qm_dqcheck(mp, ddq, id+j, type, XFS_QMOPT_DQREPAIR,
957 				      "xfs_quotacheck");
958 		ddq->d_bcount = 0;
959 		ddq->d_icount = 0;
960 		ddq->d_rtbcount = 0;
961 		ddq->d_btimer = 0;
962 		ddq->d_itimer = 0;
963 		ddq->d_rtbtimer = 0;
964 		ddq->d_bwarns = 0;
965 		ddq->d_iwarns = 0;
966 		ddq->d_rtbwarns = 0;
967 
968 		if (xfs_sb_version_hascrc(&mp->m_sb)) {
969 			xfs_update_cksum((char *)&dqb[j],
970 					 sizeof(struct xfs_dqblk),
971 					 XFS_DQUOT_CRC_OFF);
972 		}
973 	}
974 }
975 
976 STATIC int
977 xfs_qm_dqiter_bufs(
978 	struct xfs_mount	*mp,
979 	xfs_dqid_t		firstid,
980 	xfs_fsblock_t		bno,
981 	xfs_filblks_t		blkcnt,
982 	uint			flags,
983 	struct list_head	*buffer_list)
984 {
985 	struct xfs_buf		*bp;
986 	int			error;
987 	int			type;
988 
989 	ASSERT(blkcnt > 0);
990 	type = flags & XFS_QMOPT_UQUOTA ? XFS_DQ_USER :
991 		(flags & XFS_QMOPT_PQUOTA ? XFS_DQ_PROJ : XFS_DQ_GROUP);
992 	error = 0;
993 
994 	/*
995 	 * Blkcnt arg can be a very big number, and might even be
996 	 * larger than the log itself. So, we have to break it up into
997 	 * manageable-sized transactions.
998 	 * Note that we don't start a permanent transaction here; we might
999 	 * not be able to get a log reservation for the whole thing up front,
1000 	 * and we don't really care to either, because we just discard
1001 	 * everything if we were to crash in the middle of this loop.
1002 	 */
1003 	while (blkcnt--) {
1004 		error = xfs_trans_read_buf(mp, NULL, mp->m_ddev_targp,
1005 			      XFS_FSB_TO_DADDR(mp, bno),
1006 			      mp->m_quotainfo->qi_dqchunklen, 0, &bp,
1007 			      &xfs_dquot_buf_ops);
1008 
1009 		/*
1010 		 * CRC and validation errors will return a EFSCORRUPTED here. If
1011 		 * this occurs, re-read without CRC validation so that we can
1012 		 * repair the damage via xfs_qm_reset_dqcounts(). This process
1013 		 * will leave a trace in the log indicating corruption has
1014 		 * been detected.
1015 		 */
1016 		if (error == EFSCORRUPTED) {
1017 			error = xfs_trans_read_buf(mp, NULL, mp->m_ddev_targp,
1018 				      XFS_FSB_TO_DADDR(mp, bno),
1019 				      mp->m_quotainfo->qi_dqchunklen, 0, &bp,
1020 				      NULL);
1021 		}
1022 
1023 		if (error)
1024 			break;
1025 
1026 		xfs_qm_reset_dqcounts(mp, bp, firstid, type);
1027 		xfs_buf_delwri_queue(bp, buffer_list);
1028 		xfs_buf_relse(bp);
1029 
1030 		/* goto the next block. */
1031 		bno++;
1032 		firstid += mp->m_quotainfo->qi_dqperchunk;
1033 	}
1034 
1035 	return error;
1036 }
1037 
1038 /*
1039  * Iterate over all allocated USR/GRP/PRJ dquots in the system, calling a
1040  * caller supplied function for every chunk of dquots that we find.
1041  */
1042 STATIC int
1043 xfs_qm_dqiterate(
1044 	struct xfs_mount	*mp,
1045 	struct xfs_inode	*qip,
1046 	uint			flags,
1047 	struct list_head	*buffer_list)
1048 {
1049 	struct xfs_bmbt_irec	*map;
1050 	int			i, nmaps;	/* number of map entries */
1051 	int			error;		/* return value */
1052 	xfs_fileoff_t		lblkno;
1053 	xfs_filblks_t		maxlblkcnt;
1054 	xfs_dqid_t		firstid;
1055 	xfs_fsblock_t		rablkno;
1056 	xfs_filblks_t		rablkcnt;
1057 
1058 	error = 0;
1059 	/*
1060 	 * This looks racy, but we can't keep an inode lock across a
1061 	 * trans_reserve. But, this gets called during quotacheck, and that
1062 	 * happens only at mount time which is single threaded.
1063 	 */
1064 	if (qip->i_d.di_nblocks == 0)
1065 		return 0;
1066 
1067 	map = kmem_alloc(XFS_DQITER_MAP_SIZE * sizeof(*map), KM_SLEEP);
1068 
1069 	lblkno = 0;
1070 	maxlblkcnt = XFS_B_TO_FSB(mp, mp->m_super->s_maxbytes);
1071 	do {
1072 		nmaps = XFS_DQITER_MAP_SIZE;
1073 		/*
1074 		 * We aren't changing the inode itself. Just changing
1075 		 * some of its data. No new blocks are added here, and
1076 		 * the inode is never added to the transaction.
1077 		 */
1078 		xfs_ilock(qip, XFS_ILOCK_SHARED);
1079 		error = xfs_bmapi_read(qip, lblkno, maxlblkcnt - lblkno,
1080 				       map, &nmaps, 0);
1081 		xfs_iunlock(qip, XFS_ILOCK_SHARED);
1082 		if (error)
1083 			break;
1084 
1085 		ASSERT(nmaps <= XFS_DQITER_MAP_SIZE);
1086 		for (i = 0; i < nmaps; i++) {
1087 			ASSERT(map[i].br_startblock != DELAYSTARTBLOCK);
1088 			ASSERT(map[i].br_blockcount);
1089 
1090 
1091 			lblkno += map[i].br_blockcount;
1092 
1093 			if (map[i].br_startblock == HOLESTARTBLOCK)
1094 				continue;
1095 
1096 			firstid = (xfs_dqid_t) map[i].br_startoff *
1097 				mp->m_quotainfo->qi_dqperchunk;
1098 			/*
1099 			 * Do a read-ahead on the next extent.
1100 			 */
1101 			if ((i+1 < nmaps) &&
1102 			    (map[i+1].br_startblock != HOLESTARTBLOCK)) {
1103 				rablkcnt =  map[i+1].br_blockcount;
1104 				rablkno = map[i+1].br_startblock;
1105 				while (rablkcnt--) {
1106 					xfs_buf_readahead(mp->m_ddev_targp,
1107 					       XFS_FSB_TO_DADDR(mp, rablkno),
1108 					       mp->m_quotainfo->qi_dqchunklen,
1109 					       NULL);
1110 					rablkno++;
1111 				}
1112 			}
1113 			/*
1114 			 * Iterate thru all the blks in the extent and
1115 			 * reset the counters of all the dquots inside them.
1116 			 */
1117 			error = xfs_qm_dqiter_bufs(mp, firstid,
1118 						   map[i].br_startblock,
1119 						   map[i].br_blockcount,
1120 						   flags, buffer_list);
1121 			if (error)
1122 				goto out;
1123 		}
1124 	} while (nmaps > 0);
1125 
1126 out:
1127 	kmem_free(map);
1128 	return error;
1129 }
1130 
1131 /*
1132  * Called by dqusage_adjust in doing a quotacheck.
1133  *
1134  * Given the inode, and a dquot id this updates both the incore dqout as well
1135  * as the buffer copy. This is so that once the quotacheck is done, we can
1136  * just log all the buffers, as opposed to logging numerous updates to
1137  * individual dquots.
1138  */
1139 STATIC int
1140 xfs_qm_quotacheck_dqadjust(
1141 	struct xfs_inode	*ip,
1142 	xfs_dqid_t		id,
1143 	uint			type,
1144 	xfs_qcnt_t		nblks,
1145 	xfs_qcnt_t		rtblks)
1146 {
1147 	struct xfs_mount	*mp = ip->i_mount;
1148 	struct xfs_dquot	*dqp;
1149 	int			error;
1150 
1151 	error = xfs_qm_dqget(mp, ip, id, type,
1152 			     XFS_QMOPT_DQALLOC | XFS_QMOPT_DOWARN, &dqp);
1153 	if (error) {
1154 		/*
1155 		 * Shouldn't be able to turn off quotas here.
1156 		 */
1157 		ASSERT(error != ESRCH);
1158 		ASSERT(error != ENOENT);
1159 		return error;
1160 	}
1161 
1162 	trace_xfs_dqadjust(dqp);
1163 
1164 	/*
1165 	 * Adjust the inode count and the block count to reflect this inode's
1166 	 * resource usage.
1167 	 */
1168 	be64_add_cpu(&dqp->q_core.d_icount, 1);
1169 	dqp->q_res_icount++;
1170 	if (nblks) {
1171 		be64_add_cpu(&dqp->q_core.d_bcount, nblks);
1172 		dqp->q_res_bcount += nblks;
1173 	}
1174 	if (rtblks) {
1175 		be64_add_cpu(&dqp->q_core.d_rtbcount, rtblks);
1176 		dqp->q_res_rtbcount += rtblks;
1177 	}
1178 
1179 	/*
1180 	 * Set default limits, adjust timers (since we changed usages)
1181 	 *
1182 	 * There are no timers for the default values set in the root dquot.
1183 	 */
1184 	if (dqp->q_core.d_id) {
1185 		xfs_qm_adjust_dqlimits(mp, dqp);
1186 		xfs_qm_adjust_dqtimers(mp, &dqp->q_core);
1187 	}
1188 
1189 	dqp->dq_flags |= XFS_DQ_DIRTY;
1190 	xfs_qm_dqput(dqp);
1191 	return 0;
1192 }
1193 
1194 STATIC int
1195 xfs_qm_get_rtblks(
1196 	xfs_inode_t	*ip,
1197 	xfs_qcnt_t	*O_rtblks)
1198 {
1199 	xfs_filblks_t	rtblks;			/* total rt blks */
1200 	xfs_extnum_t	idx;			/* extent record index */
1201 	xfs_ifork_t	*ifp;			/* inode fork pointer */
1202 	xfs_extnum_t	nextents;		/* number of extent entries */
1203 	int		error;
1204 
1205 	ASSERT(XFS_IS_REALTIME_INODE(ip));
1206 	ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK);
1207 	if (!(ifp->if_flags & XFS_IFEXTENTS)) {
1208 		if ((error = xfs_iread_extents(NULL, ip, XFS_DATA_FORK)))
1209 			return error;
1210 	}
1211 	rtblks = 0;
1212 	nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
1213 	for (idx = 0; idx < nextents; idx++)
1214 		rtblks += xfs_bmbt_get_blockcount(xfs_iext_get_ext(ifp, idx));
1215 	*O_rtblks = (xfs_qcnt_t)rtblks;
1216 	return 0;
1217 }
1218 
1219 /*
1220  * callback routine supplied to bulkstat(). Given an inumber, find its
1221  * dquots and update them to account for resources taken by that inode.
1222  */
1223 /* ARGSUSED */
1224 STATIC int
1225 xfs_qm_dqusage_adjust(
1226 	xfs_mount_t	*mp,		/* mount point for filesystem */
1227 	xfs_ino_t	ino,		/* inode number to get data for */
1228 	void		__user *buffer,	/* not used */
1229 	int		ubsize,		/* not used */
1230 	int		*ubused,	/* not used */
1231 	int		*res)		/* result code value */
1232 {
1233 	xfs_inode_t	*ip;
1234 	xfs_qcnt_t	nblks, rtblks = 0;
1235 	int		error;
1236 
1237 	ASSERT(XFS_IS_QUOTA_RUNNING(mp));
1238 
1239 	/*
1240 	 * rootino must have its resources accounted for, not so with the quota
1241 	 * inodes.
1242 	 */
1243 	if (xfs_is_quota_inode(&mp->m_sb, ino)) {
1244 		*res = BULKSTAT_RV_NOTHING;
1245 		return XFS_ERROR(EINVAL);
1246 	}
1247 
1248 	/*
1249 	 * We don't _need_ to take the ilock EXCL. However, the xfs_qm_dqget
1250 	 * interface expects the inode to be exclusively locked because that's
1251 	 * the case in all other instances. It's OK that we do this because
1252 	 * quotacheck is done only at mount time.
1253 	 */
1254 	error = xfs_iget(mp, NULL, ino, 0, XFS_ILOCK_EXCL, &ip);
1255 	if (error) {
1256 		*res = BULKSTAT_RV_NOTHING;
1257 		return error;
1258 	}
1259 
1260 	ASSERT(ip->i_delayed_blks == 0);
1261 
1262 	if (XFS_IS_REALTIME_INODE(ip)) {
1263 		/*
1264 		 * Walk thru the extent list and count the realtime blocks.
1265 		 */
1266 		error = xfs_qm_get_rtblks(ip, &rtblks);
1267 		if (error)
1268 			goto error0;
1269 	}
1270 
1271 	nblks = (xfs_qcnt_t)ip->i_d.di_nblocks - rtblks;
1272 
1273 	/*
1274 	 * Add the (disk blocks and inode) resources occupied by this
1275 	 * inode to its dquots. We do this adjustment in the incore dquot,
1276 	 * and also copy the changes to its buffer.
1277 	 * We don't care about putting these changes in a transaction
1278 	 * envelope because if we crash in the middle of a 'quotacheck'
1279 	 * we have to start from the beginning anyway.
1280 	 * Once we're done, we'll log all the dquot bufs.
1281 	 *
1282 	 * The *QUOTA_ON checks below may look pretty racy, but quotachecks
1283 	 * and quotaoffs don't race. (Quotachecks happen at mount time only).
1284 	 */
1285 	if (XFS_IS_UQUOTA_ON(mp)) {
1286 		error = xfs_qm_quotacheck_dqadjust(ip, ip->i_d.di_uid,
1287 						   XFS_DQ_USER, nblks, rtblks);
1288 		if (error)
1289 			goto error0;
1290 	}
1291 
1292 	if (XFS_IS_GQUOTA_ON(mp)) {
1293 		error = xfs_qm_quotacheck_dqadjust(ip, ip->i_d.di_gid,
1294 						   XFS_DQ_GROUP, nblks, rtblks);
1295 		if (error)
1296 			goto error0;
1297 	}
1298 
1299 	if (XFS_IS_PQUOTA_ON(mp)) {
1300 		error = xfs_qm_quotacheck_dqadjust(ip, xfs_get_projid(ip),
1301 						   XFS_DQ_PROJ, nblks, rtblks);
1302 		if (error)
1303 			goto error0;
1304 	}
1305 
1306 	xfs_iunlock(ip, XFS_ILOCK_EXCL);
1307 	IRELE(ip);
1308 	*res = BULKSTAT_RV_DIDONE;
1309 	return 0;
1310 
1311 error0:
1312 	xfs_iunlock(ip, XFS_ILOCK_EXCL);
1313 	IRELE(ip);
1314 	*res = BULKSTAT_RV_GIVEUP;
1315 	return error;
1316 }
1317 
1318 STATIC int
1319 xfs_qm_flush_one(
1320 	struct xfs_dquot	*dqp,
1321 	void			*data)
1322 {
1323 	struct list_head	*buffer_list = data;
1324 	struct xfs_buf		*bp = NULL;
1325 	int			error = 0;
1326 
1327 	xfs_dqlock(dqp);
1328 	if (dqp->dq_flags & XFS_DQ_FREEING)
1329 		goto out_unlock;
1330 	if (!XFS_DQ_IS_DIRTY(dqp))
1331 		goto out_unlock;
1332 
1333 	xfs_dqflock(dqp);
1334 	error = xfs_qm_dqflush(dqp, &bp);
1335 	if (error)
1336 		goto out_unlock;
1337 
1338 	xfs_buf_delwri_queue(bp, buffer_list);
1339 	xfs_buf_relse(bp);
1340 out_unlock:
1341 	xfs_dqunlock(dqp);
1342 	return error;
1343 }
1344 
1345 /*
1346  * Walk thru all the filesystem inodes and construct a consistent view
1347  * of the disk quota world. If the quotacheck fails, disable quotas.
1348  */
1349 int
1350 xfs_qm_quotacheck(
1351 	xfs_mount_t	*mp)
1352 {
1353 	int			done, count, error, error2;
1354 	xfs_ino_t		lastino;
1355 	size_t			structsz;
1356 	uint			flags;
1357 	LIST_HEAD		(buffer_list);
1358 	struct xfs_inode	*uip = mp->m_quotainfo->qi_uquotaip;
1359 	struct xfs_inode	*gip = mp->m_quotainfo->qi_gquotaip;
1360 	struct xfs_inode	*pip = mp->m_quotainfo->qi_pquotaip;
1361 
1362 	count = INT_MAX;
1363 	structsz = 1;
1364 	lastino = 0;
1365 	flags = 0;
1366 
1367 	ASSERT(uip || gip || pip);
1368 	ASSERT(XFS_IS_QUOTA_RUNNING(mp));
1369 
1370 	xfs_notice(mp, "Quotacheck needed: Please wait.");
1371 
1372 	/*
1373 	 * First we go thru all the dquots on disk, USR and GRP/PRJ, and reset
1374 	 * their counters to zero. We need a clean slate.
1375 	 * We don't log our changes till later.
1376 	 */
1377 	if (uip) {
1378 		error = xfs_qm_dqiterate(mp, uip, XFS_QMOPT_UQUOTA,
1379 					 &buffer_list);
1380 		if (error)
1381 			goto error_return;
1382 		flags |= XFS_UQUOTA_CHKD;
1383 	}
1384 
1385 	if (gip) {
1386 		error = xfs_qm_dqiterate(mp, gip, XFS_QMOPT_GQUOTA,
1387 					 &buffer_list);
1388 		if (error)
1389 			goto error_return;
1390 		flags |= XFS_GQUOTA_CHKD;
1391 	}
1392 
1393 	if (pip) {
1394 		error = xfs_qm_dqiterate(mp, pip, XFS_QMOPT_PQUOTA,
1395 					 &buffer_list);
1396 		if (error)
1397 			goto error_return;
1398 		flags |= XFS_PQUOTA_CHKD;
1399 	}
1400 
1401 	do {
1402 		/*
1403 		 * Iterate thru all the inodes in the file system,
1404 		 * adjusting the corresponding dquot counters in core.
1405 		 */
1406 		error = xfs_bulkstat(mp, &lastino, &count,
1407 				     xfs_qm_dqusage_adjust,
1408 				     structsz, NULL, &done);
1409 		if (error)
1410 			break;
1411 
1412 	} while (!done);
1413 
1414 	/*
1415 	 * We've made all the changes that we need to make incore.  Flush them
1416 	 * down to disk buffers if everything was updated successfully.
1417 	 */
1418 	if (XFS_IS_UQUOTA_ON(mp)) {
1419 		error = xfs_qm_dquot_walk(mp, XFS_DQ_USER, xfs_qm_flush_one,
1420 					  &buffer_list);
1421 	}
1422 	if (XFS_IS_GQUOTA_ON(mp)) {
1423 		error2 = xfs_qm_dquot_walk(mp, XFS_DQ_GROUP, xfs_qm_flush_one,
1424 					   &buffer_list);
1425 		if (!error)
1426 			error = error2;
1427 	}
1428 	if (XFS_IS_PQUOTA_ON(mp)) {
1429 		error2 = xfs_qm_dquot_walk(mp, XFS_DQ_PROJ, xfs_qm_flush_one,
1430 					   &buffer_list);
1431 		if (!error)
1432 			error = error2;
1433 	}
1434 
1435 	error2 = xfs_buf_delwri_submit(&buffer_list);
1436 	if (!error)
1437 		error = error2;
1438 
1439 	/*
1440 	 * We can get this error if we couldn't do a dquot allocation inside
1441 	 * xfs_qm_dqusage_adjust (via bulkstat). We don't care about the
1442 	 * dirty dquots that might be cached, we just want to get rid of them
1443 	 * and turn quotaoff. The dquots won't be attached to any of the inodes
1444 	 * at this point (because we intentionally didn't in dqget_noattach).
1445 	 */
1446 	if (error) {
1447 		xfs_qm_dqpurge_all(mp, XFS_QMOPT_QUOTALL);
1448 		goto error_return;
1449 	}
1450 
1451 	/*
1452 	 * If one type of quotas is off, then it will lose its
1453 	 * quotachecked status, since we won't be doing accounting for
1454 	 * that type anymore.
1455 	 */
1456 	mp->m_qflags &= ~XFS_ALL_QUOTA_CHKD;
1457 	mp->m_qflags |= flags;
1458 
1459  error_return:
1460 	while (!list_empty(&buffer_list)) {
1461 		struct xfs_buf *bp =
1462 			list_first_entry(&buffer_list, struct xfs_buf, b_list);
1463 		list_del_init(&bp->b_list);
1464 		xfs_buf_relse(bp);
1465 	}
1466 
1467 	if (error) {
1468 		xfs_warn(mp,
1469 	"Quotacheck: Unsuccessful (Error %d): Disabling quotas.",
1470 			error);
1471 		/*
1472 		 * We must turn off quotas.
1473 		 */
1474 		ASSERT(mp->m_quotainfo != NULL);
1475 		xfs_qm_destroy_quotainfo(mp);
1476 		if (xfs_mount_reset_sbqflags(mp)) {
1477 			xfs_warn(mp,
1478 				"Quotacheck: Failed to reset quota flags.");
1479 		}
1480 	} else
1481 		xfs_notice(mp, "Quotacheck: Done.");
1482 	return (error);
1483 }
1484 
1485 /*
1486  * This is called after the superblock has been read in and we're ready to
1487  * iget the quota inodes.
1488  */
1489 STATIC int
1490 xfs_qm_init_quotainos(
1491 	xfs_mount_t	*mp)
1492 {
1493 	struct xfs_inode	*uip = NULL;
1494 	struct xfs_inode	*gip = NULL;
1495 	struct xfs_inode	*pip = NULL;
1496 	int			error;
1497 	__int64_t		sbflags = 0;
1498 	uint			flags = 0;
1499 
1500 	ASSERT(mp->m_quotainfo);
1501 
1502 	/*
1503 	 * Get the uquota and gquota inodes
1504 	 */
1505 	if (xfs_sb_version_hasquota(&mp->m_sb)) {
1506 		if (XFS_IS_UQUOTA_ON(mp) &&
1507 		    mp->m_sb.sb_uquotino != NULLFSINO) {
1508 			ASSERT(mp->m_sb.sb_uquotino > 0);
1509 			error = xfs_iget(mp, NULL, mp->m_sb.sb_uquotino,
1510 					     0, 0, &uip);
1511 			if (error)
1512 				return XFS_ERROR(error);
1513 		}
1514 		if (XFS_IS_GQUOTA_ON(mp) &&
1515 		    mp->m_sb.sb_gquotino != NULLFSINO) {
1516 			ASSERT(mp->m_sb.sb_gquotino > 0);
1517 			error = xfs_iget(mp, NULL, mp->m_sb.sb_gquotino,
1518 					     0, 0, &gip);
1519 			if (error)
1520 				goto error_rele;
1521 		}
1522 		if (XFS_IS_PQUOTA_ON(mp) &&
1523 		    mp->m_sb.sb_pquotino != NULLFSINO) {
1524 			ASSERT(mp->m_sb.sb_pquotino > 0);
1525 			error = xfs_iget(mp, NULL, mp->m_sb.sb_pquotino,
1526 					     0, 0, &pip);
1527 			if (error)
1528 				goto error_rele;
1529 		}
1530 	} else {
1531 		flags |= XFS_QMOPT_SBVERSION;
1532 		sbflags |= (XFS_SB_VERSIONNUM | XFS_SB_UQUOTINO |
1533 			    XFS_SB_GQUOTINO | XFS_SB_PQUOTINO |
1534 			    XFS_SB_QFLAGS);
1535 	}
1536 
1537 	/*
1538 	 * Create the three inodes, if they don't exist already. The changes
1539 	 * made above will get added to a transaction and logged in one of
1540 	 * the qino_alloc calls below.  If the device is readonly,
1541 	 * temporarily switch to read-write to do this.
1542 	 */
1543 	if (XFS_IS_UQUOTA_ON(mp) && uip == NULL) {
1544 		error = xfs_qm_qino_alloc(mp, &uip,
1545 					      sbflags | XFS_SB_UQUOTINO,
1546 					      flags | XFS_QMOPT_UQUOTA);
1547 		if (error)
1548 			goto error_rele;
1549 
1550 		flags &= ~XFS_QMOPT_SBVERSION;
1551 	}
1552 	if (XFS_IS_GQUOTA_ON(mp) && gip == NULL) {
1553 		error = xfs_qm_qino_alloc(mp, &gip,
1554 					  sbflags | XFS_SB_GQUOTINO,
1555 					  flags | XFS_QMOPT_GQUOTA);
1556 		if (error)
1557 			goto error_rele;
1558 
1559 		flags &= ~XFS_QMOPT_SBVERSION;
1560 	}
1561 	if (XFS_IS_PQUOTA_ON(mp) && pip == NULL) {
1562 		error = xfs_qm_qino_alloc(mp, &pip,
1563 					  sbflags | XFS_SB_PQUOTINO,
1564 					  flags | XFS_QMOPT_PQUOTA);
1565 		if (error)
1566 			goto error_rele;
1567 	}
1568 
1569 	mp->m_quotainfo->qi_uquotaip = uip;
1570 	mp->m_quotainfo->qi_gquotaip = gip;
1571 	mp->m_quotainfo->qi_pquotaip = pip;
1572 
1573 	return 0;
1574 
1575 error_rele:
1576 	if (uip)
1577 		IRELE(uip);
1578 	if (gip)
1579 		IRELE(gip);
1580 	if (pip)
1581 		IRELE(pip);
1582 	return XFS_ERROR(error);
1583 }
1584 
1585 STATIC void
1586 xfs_qm_dqfree_one(
1587 	struct xfs_dquot	*dqp)
1588 {
1589 	struct xfs_mount	*mp = dqp->q_mount;
1590 	struct xfs_quotainfo	*qi = mp->m_quotainfo;
1591 
1592 	mutex_lock(&qi->qi_tree_lock);
1593 	radix_tree_delete(xfs_dquot_tree(qi, dqp->q_core.d_flags),
1594 			  be32_to_cpu(dqp->q_core.d_id));
1595 
1596 	qi->qi_dquots--;
1597 	mutex_unlock(&qi->qi_tree_lock);
1598 
1599 	xfs_qm_dqdestroy(dqp);
1600 }
1601 
1602 STATIC void
1603 xfs_qm_dqreclaim_one(
1604 	struct xfs_dquot	*dqp,
1605 	struct list_head	*buffer_list,
1606 	struct list_head	*dispose_list)
1607 {
1608 	struct xfs_mount	*mp = dqp->q_mount;
1609 	struct xfs_quotainfo	*qi = mp->m_quotainfo;
1610 	int			error;
1611 
1612 	if (!xfs_dqlock_nowait(dqp))
1613 		goto out_move_tail;
1614 
1615 	/*
1616 	 * This dquot has acquired a reference in the meantime remove it from
1617 	 * the freelist and try again.
1618 	 */
1619 	if (dqp->q_nrefs) {
1620 		xfs_dqunlock(dqp);
1621 
1622 		trace_xfs_dqreclaim_want(dqp);
1623 		XFS_STATS_INC(xs_qm_dqwants);
1624 
1625 		list_del_init(&dqp->q_lru);
1626 		qi->qi_lru_count--;
1627 		XFS_STATS_DEC(xs_qm_dquot_unused);
1628 		return;
1629 	}
1630 
1631 	/*
1632 	 * Try to grab the flush lock. If this dquot is in the process of
1633 	 * getting flushed to disk, we don't want to reclaim it.
1634 	 */
1635 	if (!xfs_dqflock_nowait(dqp))
1636 		goto out_unlock_move_tail;
1637 
1638 	if (XFS_DQ_IS_DIRTY(dqp)) {
1639 		struct xfs_buf	*bp = NULL;
1640 
1641 		trace_xfs_dqreclaim_dirty(dqp);
1642 
1643 		error = xfs_qm_dqflush(dqp, &bp);
1644 		if (error) {
1645 			xfs_warn(mp, "%s: dquot %p flush failed",
1646 				 __func__, dqp);
1647 			goto out_unlock_move_tail;
1648 		}
1649 
1650 		xfs_buf_delwri_queue(bp, buffer_list);
1651 		xfs_buf_relse(bp);
1652 		/*
1653 		 * Give the dquot another try on the freelist, as the
1654 		 * flushing will take some time.
1655 		 */
1656 		goto out_unlock_move_tail;
1657 	}
1658 	xfs_dqfunlock(dqp);
1659 
1660 	/*
1661 	 * Prevent lookups now that we are past the point of no return.
1662 	 */
1663 	dqp->dq_flags |= XFS_DQ_FREEING;
1664 	xfs_dqunlock(dqp);
1665 
1666 	ASSERT(dqp->q_nrefs == 0);
1667 	list_move_tail(&dqp->q_lru, dispose_list);
1668 	qi->qi_lru_count--;
1669 	XFS_STATS_DEC(xs_qm_dquot_unused);
1670 
1671 	trace_xfs_dqreclaim_done(dqp);
1672 	XFS_STATS_INC(xs_qm_dqreclaims);
1673 	return;
1674 
1675 	/*
1676 	 * Move the dquot to the tail of the list so that we don't spin on it.
1677 	 */
1678 out_unlock_move_tail:
1679 	xfs_dqunlock(dqp);
1680 out_move_tail:
1681 	list_move_tail(&dqp->q_lru, &qi->qi_lru_list);
1682 	trace_xfs_dqreclaim_busy(dqp);
1683 	XFS_STATS_INC(xs_qm_dqreclaim_misses);
1684 }
1685 
1686 STATIC int
1687 xfs_qm_shake(
1688 	struct shrinker		*shrink,
1689 	struct shrink_control	*sc)
1690 {
1691 	struct xfs_quotainfo	*qi =
1692 		container_of(shrink, struct xfs_quotainfo, qi_shrinker);
1693 	int			nr_to_scan = sc->nr_to_scan;
1694 	LIST_HEAD		(buffer_list);
1695 	LIST_HEAD		(dispose_list);
1696 	struct xfs_dquot	*dqp;
1697 	int			error;
1698 
1699 	if ((sc->gfp_mask & (__GFP_FS|__GFP_WAIT)) != (__GFP_FS|__GFP_WAIT))
1700 		return 0;
1701 	if (!nr_to_scan)
1702 		goto out;
1703 
1704 	mutex_lock(&qi->qi_lru_lock);
1705 	while (!list_empty(&qi->qi_lru_list)) {
1706 		if (nr_to_scan-- <= 0)
1707 			break;
1708 		dqp = list_first_entry(&qi->qi_lru_list, struct xfs_dquot,
1709 				       q_lru);
1710 		xfs_qm_dqreclaim_one(dqp, &buffer_list, &dispose_list);
1711 	}
1712 	mutex_unlock(&qi->qi_lru_lock);
1713 
1714 	error = xfs_buf_delwri_submit(&buffer_list);
1715 	if (error)
1716 		xfs_warn(NULL, "%s: dquot reclaim failed", __func__);
1717 
1718 	while (!list_empty(&dispose_list)) {
1719 		dqp = list_first_entry(&dispose_list, struct xfs_dquot, q_lru);
1720 		list_del_init(&dqp->q_lru);
1721 		xfs_qm_dqfree_one(dqp);
1722 	}
1723 
1724 out:
1725 	return (qi->qi_lru_count / 100) * sysctl_vfs_cache_pressure;
1726 }
1727 
1728 /*
1729  * Start a transaction and write the incore superblock changes to
1730  * disk. flags parameter indicates which fields have changed.
1731  */
1732 int
1733 xfs_qm_write_sb_changes(
1734 	xfs_mount_t	*mp,
1735 	__int64_t	flags)
1736 {
1737 	xfs_trans_t	*tp;
1738 	int		error;
1739 
1740 	tp = xfs_trans_alloc(mp, XFS_TRANS_QM_SBCHANGE);
1741 	error = xfs_trans_reserve(tp, &M_RES(mp)->tr_qm_sbchange, 0, 0);
1742 	if (error) {
1743 		xfs_trans_cancel(tp, 0);
1744 		return error;
1745 	}
1746 
1747 	xfs_mod_sb(tp, flags);
1748 	error = xfs_trans_commit(tp, 0);
1749 
1750 	return error;
1751 }
1752 
1753 
1754 /* --------------- utility functions for vnodeops ---------------- */
1755 
1756 
1757 /*
1758  * Given an inode, a uid, gid and prid make sure that we have
1759  * allocated relevant dquot(s) on disk, and that we won't exceed inode
1760  * quotas by creating this file.
1761  * This also attaches dquot(s) to the given inode after locking it,
1762  * and returns the dquots corresponding to the uid and/or gid.
1763  *
1764  * in	: inode (unlocked)
1765  * out	: udquot, gdquot with references taken and unlocked
1766  */
1767 int
1768 xfs_qm_vop_dqalloc(
1769 	struct xfs_inode	*ip,
1770 	xfs_dqid_t		uid,
1771 	xfs_dqid_t		gid,
1772 	prid_t			prid,
1773 	uint			flags,
1774 	struct xfs_dquot	**O_udqpp,
1775 	struct xfs_dquot	**O_gdqpp,
1776 	struct xfs_dquot	**O_pdqpp)
1777 {
1778 	struct xfs_mount	*mp = ip->i_mount;
1779 	struct xfs_dquot	*uq = NULL;
1780 	struct xfs_dquot	*gq = NULL;
1781 	struct xfs_dquot	*pq = NULL;
1782 	int			error;
1783 	uint			lockflags;
1784 
1785 	if (!XFS_IS_QUOTA_RUNNING(mp) || !XFS_IS_QUOTA_ON(mp))
1786 		return 0;
1787 
1788 	lockflags = XFS_ILOCK_EXCL;
1789 	xfs_ilock(ip, lockflags);
1790 
1791 	if ((flags & XFS_QMOPT_INHERIT) && XFS_INHERIT_GID(ip))
1792 		gid = ip->i_d.di_gid;
1793 
1794 	/*
1795 	 * Attach the dquot(s) to this inode, doing a dquot allocation
1796 	 * if necessary. The dquot(s) will not be locked.
1797 	 */
1798 	if (XFS_NOT_DQATTACHED(mp, ip)) {
1799 		error = xfs_qm_dqattach_locked(ip, XFS_QMOPT_DQALLOC);
1800 		if (error) {
1801 			xfs_iunlock(ip, lockflags);
1802 			return error;
1803 		}
1804 	}
1805 
1806 	if ((flags & XFS_QMOPT_UQUOTA) && XFS_IS_UQUOTA_ON(mp)) {
1807 		if (ip->i_d.di_uid != uid) {
1808 			/*
1809 			 * What we need is the dquot that has this uid, and
1810 			 * if we send the inode to dqget, the uid of the inode
1811 			 * takes priority over what's sent in the uid argument.
1812 			 * We must unlock inode here before calling dqget if
1813 			 * we're not sending the inode, because otherwise
1814 			 * we'll deadlock by doing trans_reserve while
1815 			 * holding ilock.
1816 			 */
1817 			xfs_iunlock(ip, lockflags);
1818 			error = xfs_qm_dqget(mp, NULL, uid,
1819 						 XFS_DQ_USER,
1820 						 XFS_QMOPT_DQALLOC |
1821 						 XFS_QMOPT_DOWARN,
1822 						 &uq);
1823 			if (error) {
1824 				ASSERT(error != ENOENT);
1825 				return error;
1826 			}
1827 			/*
1828 			 * Get the ilock in the right order.
1829 			 */
1830 			xfs_dqunlock(uq);
1831 			lockflags = XFS_ILOCK_SHARED;
1832 			xfs_ilock(ip, lockflags);
1833 		} else {
1834 			/*
1835 			 * Take an extra reference, because we'll return
1836 			 * this to caller
1837 			 */
1838 			ASSERT(ip->i_udquot);
1839 			uq = xfs_qm_dqhold(ip->i_udquot);
1840 		}
1841 	}
1842 	if ((flags & XFS_QMOPT_GQUOTA) && XFS_IS_GQUOTA_ON(mp)) {
1843 		if (ip->i_d.di_gid != gid) {
1844 			xfs_iunlock(ip, lockflags);
1845 			error = xfs_qm_dqget(mp, NULL, gid,
1846 						 XFS_DQ_GROUP,
1847 						 XFS_QMOPT_DQALLOC |
1848 						 XFS_QMOPT_DOWARN,
1849 						 &gq);
1850 			if (error) {
1851 				ASSERT(error != ENOENT);
1852 				goto error_rele;
1853 			}
1854 			xfs_dqunlock(gq);
1855 			lockflags = XFS_ILOCK_SHARED;
1856 			xfs_ilock(ip, lockflags);
1857 		} else {
1858 			ASSERT(ip->i_gdquot);
1859 			gq = xfs_qm_dqhold(ip->i_gdquot);
1860 		}
1861 	}
1862 	if ((flags & XFS_QMOPT_PQUOTA) && XFS_IS_PQUOTA_ON(mp)) {
1863 		if (xfs_get_projid(ip) != prid) {
1864 			xfs_iunlock(ip, lockflags);
1865 			error = xfs_qm_dqget(mp, NULL, (xfs_dqid_t)prid,
1866 						 XFS_DQ_PROJ,
1867 						 XFS_QMOPT_DQALLOC |
1868 						 XFS_QMOPT_DOWARN,
1869 						 &pq);
1870 			if (error) {
1871 				ASSERT(error != ENOENT);
1872 				goto error_rele;
1873 			}
1874 			xfs_dqunlock(pq);
1875 			lockflags = XFS_ILOCK_SHARED;
1876 			xfs_ilock(ip, lockflags);
1877 		} else {
1878 			ASSERT(ip->i_pdquot);
1879 			pq = xfs_qm_dqhold(ip->i_pdquot);
1880 		}
1881 	}
1882 	if (uq)
1883 		trace_xfs_dquot_dqalloc(ip);
1884 
1885 	xfs_iunlock(ip, lockflags);
1886 	if (O_udqpp)
1887 		*O_udqpp = uq;
1888 	else if (uq)
1889 		xfs_qm_dqrele(uq);
1890 	if (O_gdqpp)
1891 		*O_gdqpp = gq;
1892 	else if (gq)
1893 		xfs_qm_dqrele(gq);
1894 	if (O_pdqpp)
1895 		*O_pdqpp = pq;
1896 	else if (pq)
1897 		xfs_qm_dqrele(pq);
1898 	return 0;
1899 
1900 error_rele:
1901 	if (gq)
1902 		xfs_qm_dqrele(gq);
1903 	if (uq)
1904 		xfs_qm_dqrele(uq);
1905 	return error;
1906 }
1907 
1908 /*
1909  * Actually transfer ownership, and do dquot modifications.
1910  * These were already reserved.
1911  */
1912 xfs_dquot_t *
1913 xfs_qm_vop_chown(
1914 	xfs_trans_t	*tp,
1915 	xfs_inode_t	*ip,
1916 	xfs_dquot_t	**IO_olddq,
1917 	xfs_dquot_t	*newdq)
1918 {
1919 	xfs_dquot_t	*prevdq;
1920 	uint		bfield = XFS_IS_REALTIME_INODE(ip) ?
1921 				 XFS_TRANS_DQ_RTBCOUNT : XFS_TRANS_DQ_BCOUNT;
1922 
1923 
1924 	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
1925 	ASSERT(XFS_IS_QUOTA_RUNNING(ip->i_mount));
1926 
1927 	/* old dquot */
1928 	prevdq = *IO_olddq;
1929 	ASSERT(prevdq);
1930 	ASSERT(prevdq != newdq);
1931 
1932 	xfs_trans_mod_dquot(tp, prevdq, bfield, -(ip->i_d.di_nblocks));
1933 	xfs_trans_mod_dquot(tp, prevdq, XFS_TRANS_DQ_ICOUNT, -1);
1934 
1935 	/* the sparkling new dquot */
1936 	xfs_trans_mod_dquot(tp, newdq, bfield, ip->i_d.di_nblocks);
1937 	xfs_trans_mod_dquot(tp, newdq, XFS_TRANS_DQ_ICOUNT, 1);
1938 
1939 	/*
1940 	 * Take an extra reference, because the inode is going to keep
1941 	 * this dquot pointer even after the trans_commit.
1942 	 */
1943 	*IO_olddq = xfs_qm_dqhold(newdq);
1944 
1945 	return prevdq;
1946 }
1947 
1948 /*
1949  * Quota reservations for setattr(AT_UID|AT_GID|AT_PROJID).
1950  */
1951 int
1952 xfs_qm_vop_chown_reserve(
1953 	struct xfs_trans	*tp,
1954 	struct xfs_inode	*ip,
1955 	struct xfs_dquot	*udqp,
1956 	struct xfs_dquot	*gdqp,
1957 	struct xfs_dquot	*pdqp,
1958 	uint			flags)
1959 {
1960 	struct xfs_mount	*mp = ip->i_mount;
1961 	uint			delblks, blkflags, prjflags = 0;
1962 	struct xfs_dquot	*udq_unres = NULL;
1963 	struct xfs_dquot	*gdq_unres = NULL;
1964 	struct xfs_dquot	*pdq_unres = NULL;
1965 	struct xfs_dquot	*udq_delblks = NULL;
1966 	struct xfs_dquot	*gdq_delblks = NULL;
1967 	struct xfs_dquot	*pdq_delblks = NULL;
1968 	int			error;
1969 
1970 
1971 	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED));
1972 	ASSERT(XFS_IS_QUOTA_RUNNING(mp));
1973 
1974 	delblks = ip->i_delayed_blks;
1975 	blkflags = XFS_IS_REALTIME_INODE(ip) ?
1976 			XFS_QMOPT_RES_RTBLKS : XFS_QMOPT_RES_REGBLKS;
1977 
1978 	if (XFS_IS_UQUOTA_ON(mp) && udqp &&
1979 	    ip->i_d.di_uid != be32_to_cpu(udqp->q_core.d_id)) {
1980 		udq_delblks = udqp;
1981 		/*
1982 		 * If there are delayed allocation blocks, then we have to
1983 		 * unreserve those from the old dquot, and add them to the
1984 		 * new dquot.
1985 		 */
1986 		if (delblks) {
1987 			ASSERT(ip->i_udquot);
1988 			udq_unres = ip->i_udquot;
1989 		}
1990 	}
1991 	if (XFS_IS_GQUOTA_ON(ip->i_mount) && gdqp &&
1992 	    ip->i_d.di_gid != be32_to_cpu(gdqp->q_core.d_id)) {
1993 		gdq_delblks = gdqp;
1994 		if (delblks) {
1995 			ASSERT(ip->i_gdquot);
1996 			gdq_unres = ip->i_gdquot;
1997 		}
1998 	}
1999 
2000 	if (XFS_IS_PQUOTA_ON(ip->i_mount) && pdqp &&
2001 	    xfs_get_projid(ip) != be32_to_cpu(pdqp->q_core.d_id)) {
2002 		prjflags = XFS_QMOPT_ENOSPC;
2003 		pdq_delblks = pdqp;
2004 		if (delblks) {
2005 			ASSERT(ip->i_pdquot);
2006 			pdq_unres = ip->i_pdquot;
2007 		}
2008 	}
2009 
2010 	error = xfs_trans_reserve_quota_bydquots(tp, ip->i_mount,
2011 				udq_delblks, gdq_delblks, pdq_delblks,
2012 				ip->i_d.di_nblocks, 1,
2013 				flags | blkflags | prjflags);
2014 	if (error)
2015 		return error;
2016 
2017 	/*
2018 	 * Do the delayed blks reservations/unreservations now. Since, these
2019 	 * are done without the help of a transaction, if a reservation fails
2020 	 * its previous reservations won't be automatically undone by trans
2021 	 * code. So, we have to do it manually here.
2022 	 */
2023 	if (delblks) {
2024 		/*
2025 		 * Do the reservations first. Unreservation can't fail.
2026 		 */
2027 		ASSERT(udq_delblks || gdq_delblks || pdq_delblks);
2028 		ASSERT(udq_unres || gdq_unres || pdq_unres);
2029 		error = xfs_trans_reserve_quota_bydquots(NULL, ip->i_mount,
2030 			    udq_delblks, gdq_delblks, pdq_delblks,
2031 			    (xfs_qcnt_t)delblks, 0,
2032 			    flags | blkflags | prjflags);
2033 		if (error)
2034 			return error;
2035 		xfs_trans_reserve_quota_bydquots(NULL, ip->i_mount,
2036 				udq_unres, gdq_unres, pdq_unres,
2037 				-((xfs_qcnt_t)delblks), 0, blkflags);
2038 	}
2039 
2040 	return (0);
2041 }
2042 
2043 int
2044 xfs_qm_vop_rename_dqattach(
2045 	struct xfs_inode	**i_tab)
2046 {
2047 	struct xfs_mount	*mp = i_tab[0]->i_mount;
2048 	int			i;
2049 
2050 	if (!XFS_IS_QUOTA_RUNNING(mp) || !XFS_IS_QUOTA_ON(mp))
2051 		return 0;
2052 
2053 	for (i = 0; (i < 4 && i_tab[i]); i++) {
2054 		struct xfs_inode	*ip = i_tab[i];
2055 		int			error;
2056 
2057 		/*
2058 		 * Watch out for duplicate entries in the table.
2059 		 */
2060 		if (i == 0 || ip != i_tab[i-1]) {
2061 			if (XFS_NOT_DQATTACHED(mp, ip)) {
2062 				error = xfs_qm_dqattach(ip, 0);
2063 				if (error)
2064 					return error;
2065 			}
2066 		}
2067 	}
2068 	return 0;
2069 }
2070 
2071 void
2072 xfs_qm_vop_create_dqattach(
2073 	struct xfs_trans	*tp,
2074 	struct xfs_inode	*ip,
2075 	struct xfs_dquot	*udqp,
2076 	struct xfs_dquot	*gdqp,
2077 	struct xfs_dquot	*pdqp)
2078 {
2079 	struct xfs_mount	*mp = tp->t_mountp;
2080 
2081 	if (!XFS_IS_QUOTA_RUNNING(mp) || !XFS_IS_QUOTA_ON(mp))
2082 		return;
2083 
2084 	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
2085 	ASSERT(XFS_IS_QUOTA_RUNNING(mp));
2086 
2087 	if (udqp) {
2088 		ASSERT(ip->i_udquot == NULL);
2089 		ASSERT(XFS_IS_UQUOTA_ON(mp));
2090 		ASSERT(ip->i_d.di_uid == be32_to_cpu(udqp->q_core.d_id));
2091 
2092 		ip->i_udquot = xfs_qm_dqhold(udqp);
2093 		xfs_trans_mod_dquot(tp, udqp, XFS_TRANS_DQ_ICOUNT, 1);
2094 	}
2095 	if (gdqp) {
2096 		ASSERT(ip->i_gdquot == NULL);
2097 		ASSERT(XFS_IS_GQUOTA_ON(mp));
2098 		ASSERT(ip->i_d.di_gid == be32_to_cpu(gdqp->q_core.d_id));
2099 		ip->i_gdquot = xfs_qm_dqhold(gdqp);
2100 		xfs_trans_mod_dquot(tp, gdqp, XFS_TRANS_DQ_ICOUNT, 1);
2101 	}
2102 	if (pdqp) {
2103 		ASSERT(ip->i_pdquot == NULL);
2104 		ASSERT(XFS_IS_PQUOTA_ON(mp));
2105 		ASSERT(xfs_get_projid(ip) == be32_to_cpu(pdqp->q_core.d_id));
2106 
2107 		ip->i_pdquot = xfs_qm_dqhold(pdqp);
2108 		xfs_trans_mod_dquot(tp, pdqp, XFS_TRANS_DQ_ICOUNT, 1);
2109 	}
2110 }
2111 
2112