xref: /openbmc/linux/fs/xfs/xfs_qm.c (revision f5cc14e4)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (c) 2000-2005 Silicon Graphics, Inc.
4  * All Rights Reserved.
5  */
6 #include "xfs.h"
7 #include "xfs_fs.h"
8 #include "xfs_shared.h"
9 #include "xfs_format.h"
10 #include "xfs_log_format.h"
11 #include "xfs_trans_resv.h"
12 #include "xfs_bit.h"
13 #include "xfs_sb.h"
14 #include "xfs_mount.h"
15 #include "xfs_inode.h"
16 #include "xfs_iwalk.h"
17 #include "xfs_quota.h"
18 #include "xfs_bmap.h"
19 #include "xfs_bmap_util.h"
20 #include "xfs_trans.h"
21 #include "xfs_trans_space.h"
22 #include "xfs_qm.h"
23 #include "xfs_trace.h"
24 #include "xfs_icache.h"
25 #include "xfs_error.h"
26 
27 /*
28  * The global quota manager. There is only one of these for the entire
29  * system, _not_ one per file system. XQM keeps track of the overall
30  * quota functionality, including maintaining the freelist and hash
31  * tables of dquots.
32  */
33 STATIC int	xfs_qm_init_quotainos(struct xfs_mount *mp);
34 STATIC int	xfs_qm_init_quotainfo(struct xfs_mount *mp);
35 
36 STATIC void	xfs_qm_destroy_quotainos(struct xfs_quotainfo *qi);
37 STATIC void	xfs_qm_dqfree_one(struct xfs_dquot *dqp);
38 /*
39  * We use the batch lookup interface to iterate over the dquots as it
40  * currently is the only interface into the radix tree code that allows
41  * fuzzy lookups instead of exact matches.  Holding the lock over multiple
42  * operations is fine as all callers are used either during mount/umount
43  * or quotaoff.
44  */
45 #define XFS_DQ_LOOKUP_BATCH	32
46 
47 STATIC int
48 xfs_qm_dquot_walk(
49 	struct xfs_mount	*mp,
50 	xfs_dqtype_t		type,
51 	int			(*execute)(struct xfs_dquot *dqp, void *data),
52 	void			*data)
53 {
54 	struct xfs_quotainfo	*qi = mp->m_quotainfo;
55 	struct radix_tree_root	*tree = xfs_dquot_tree(qi, type);
56 	uint32_t		next_index;
57 	int			last_error = 0;
58 	int			skipped;
59 	int			nr_found;
60 
61 restart:
62 	skipped = 0;
63 	next_index = 0;
64 	nr_found = 0;
65 
66 	while (1) {
67 		struct xfs_dquot *batch[XFS_DQ_LOOKUP_BATCH];
68 		int		error = 0;
69 		int		i;
70 
71 		mutex_lock(&qi->qi_tree_lock);
72 		nr_found = radix_tree_gang_lookup(tree, (void **)batch,
73 					next_index, XFS_DQ_LOOKUP_BATCH);
74 		if (!nr_found) {
75 			mutex_unlock(&qi->qi_tree_lock);
76 			break;
77 		}
78 
79 		for (i = 0; i < nr_found; i++) {
80 			struct xfs_dquot *dqp = batch[i];
81 
82 			next_index = dqp->q_id + 1;
83 
84 			error = execute(batch[i], data);
85 			if (error == -EAGAIN) {
86 				skipped++;
87 				continue;
88 			}
89 			if (error && last_error != -EFSCORRUPTED)
90 				last_error = error;
91 		}
92 
93 		mutex_unlock(&qi->qi_tree_lock);
94 
95 		/* bail out if the filesystem is corrupted.  */
96 		if (last_error == -EFSCORRUPTED) {
97 			skipped = 0;
98 			break;
99 		}
100 		/* we're done if id overflows back to zero */
101 		if (!next_index)
102 			break;
103 	}
104 
105 	if (skipped) {
106 		delay(1);
107 		goto restart;
108 	}
109 
110 	return last_error;
111 }
112 
113 
114 /*
115  * Purge a dquot from all tracking data structures and free it.
116  */
117 STATIC int
118 xfs_qm_dqpurge(
119 	struct xfs_dquot	*dqp,
120 	void			*data)
121 {
122 	struct xfs_mount	*mp = dqp->q_mount;
123 	struct xfs_quotainfo	*qi = mp->m_quotainfo;
124 	int			error = -EAGAIN;
125 
126 	xfs_dqlock(dqp);
127 	if ((dqp->q_flags & XFS_DQFLAG_FREEING) || dqp->q_nrefs != 0)
128 		goto out_unlock;
129 
130 	dqp->q_flags |= XFS_DQFLAG_FREEING;
131 
132 	xfs_dqflock(dqp);
133 
134 	/*
135 	 * If we are turning this type of quotas off, we don't care
136 	 * about the dirty metadata sitting in this dquot. OTOH, if
137 	 * we're unmounting, we do care, so we flush it and wait.
138 	 */
139 	if (XFS_DQ_IS_DIRTY(dqp)) {
140 		struct xfs_buf	*bp = NULL;
141 
142 		/*
143 		 * We don't care about getting disk errors here. We need
144 		 * to purge this dquot anyway, so we go ahead regardless.
145 		 */
146 		error = xfs_qm_dqflush(dqp, &bp);
147 		if (!error) {
148 			error = xfs_bwrite(bp);
149 			xfs_buf_relse(bp);
150 		} else if (error == -EAGAIN) {
151 			dqp->q_flags &= ~XFS_DQFLAG_FREEING;
152 			goto out_unlock;
153 		}
154 		xfs_dqflock(dqp);
155 	}
156 
157 	ASSERT(atomic_read(&dqp->q_pincount) == 0);
158 	ASSERT(XFS_FORCED_SHUTDOWN(mp) ||
159 		!test_bit(XFS_LI_IN_AIL, &dqp->q_logitem.qli_item.li_flags));
160 
161 	xfs_dqfunlock(dqp);
162 	xfs_dqunlock(dqp);
163 
164 	radix_tree_delete(xfs_dquot_tree(qi, xfs_dquot_type(dqp)), dqp->q_id);
165 	qi->qi_dquots--;
166 
167 	/*
168 	 * We move dquots to the freelist as soon as their reference count
169 	 * hits zero, so it really should be on the freelist here.
170 	 */
171 	ASSERT(!list_empty(&dqp->q_lru));
172 	list_lru_del(&qi->qi_lru, &dqp->q_lru);
173 	XFS_STATS_DEC(mp, xs_qm_dquot_unused);
174 
175 	xfs_qm_dqdestroy(dqp);
176 	return 0;
177 
178 out_unlock:
179 	xfs_dqunlock(dqp);
180 	return error;
181 }
182 
183 /*
184  * Purge the dquot cache.
185  */
186 void
187 xfs_qm_dqpurge_all(
188 	struct xfs_mount	*mp,
189 	uint			flags)
190 {
191 	if (flags & XFS_QMOPT_UQUOTA)
192 		xfs_qm_dquot_walk(mp, XFS_DQTYPE_USER, xfs_qm_dqpurge, NULL);
193 	if (flags & XFS_QMOPT_GQUOTA)
194 		xfs_qm_dquot_walk(mp, XFS_DQTYPE_GROUP, xfs_qm_dqpurge, NULL);
195 	if (flags & XFS_QMOPT_PQUOTA)
196 		xfs_qm_dquot_walk(mp, XFS_DQTYPE_PROJ, xfs_qm_dqpurge, NULL);
197 }
198 
199 /*
200  * Just destroy the quotainfo structure.
201  */
202 void
203 xfs_qm_unmount(
204 	struct xfs_mount	*mp)
205 {
206 	if (mp->m_quotainfo) {
207 		xfs_qm_dqpurge_all(mp, XFS_QMOPT_QUOTALL);
208 		xfs_qm_destroy_quotainfo(mp);
209 	}
210 }
211 
212 /*
213  * Called from the vfsops layer.
214  */
215 void
216 xfs_qm_unmount_quotas(
217 	xfs_mount_t	*mp)
218 {
219 	/*
220 	 * Release the dquots that root inode, et al might be holding,
221 	 * before we flush quotas and blow away the quotainfo structure.
222 	 */
223 	ASSERT(mp->m_rootip);
224 	xfs_qm_dqdetach(mp->m_rootip);
225 	if (mp->m_rbmip)
226 		xfs_qm_dqdetach(mp->m_rbmip);
227 	if (mp->m_rsumip)
228 		xfs_qm_dqdetach(mp->m_rsumip);
229 
230 	/*
231 	 * Release the quota inodes.
232 	 */
233 	if (mp->m_quotainfo) {
234 		if (mp->m_quotainfo->qi_uquotaip) {
235 			xfs_irele(mp->m_quotainfo->qi_uquotaip);
236 			mp->m_quotainfo->qi_uquotaip = NULL;
237 		}
238 		if (mp->m_quotainfo->qi_gquotaip) {
239 			xfs_irele(mp->m_quotainfo->qi_gquotaip);
240 			mp->m_quotainfo->qi_gquotaip = NULL;
241 		}
242 		if (mp->m_quotainfo->qi_pquotaip) {
243 			xfs_irele(mp->m_quotainfo->qi_pquotaip);
244 			mp->m_quotainfo->qi_pquotaip = NULL;
245 		}
246 	}
247 }
248 
249 STATIC int
250 xfs_qm_dqattach_one(
251 	struct xfs_inode	*ip,
252 	xfs_dqtype_t		type,
253 	bool			doalloc,
254 	struct xfs_dquot	**IO_idqpp)
255 {
256 	struct xfs_dquot	*dqp;
257 	int			error;
258 
259 	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
260 	error = 0;
261 
262 	/*
263 	 * See if we already have it in the inode itself. IO_idqpp is &i_udquot
264 	 * or &i_gdquot. This made the code look weird, but made the logic a lot
265 	 * simpler.
266 	 */
267 	dqp = *IO_idqpp;
268 	if (dqp) {
269 		trace_xfs_dqattach_found(dqp);
270 		return 0;
271 	}
272 
273 	/*
274 	 * Find the dquot from somewhere. This bumps the reference count of
275 	 * dquot and returns it locked.  This can return ENOENT if dquot didn't
276 	 * exist on disk and we didn't ask it to allocate; ESRCH if quotas got
277 	 * turned off suddenly.
278 	 */
279 	error = xfs_qm_dqget_inode(ip, type, doalloc, &dqp);
280 	if (error)
281 		return error;
282 
283 	trace_xfs_dqattach_get(dqp);
284 
285 	/*
286 	 * dqget may have dropped and re-acquired the ilock, but it guarantees
287 	 * that the dquot returned is the one that should go in the inode.
288 	 */
289 	*IO_idqpp = dqp;
290 	xfs_dqunlock(dqp);
291 	return 0;
292 }
293 
294 static bool
295 xfs_qm_need_dqattach(
296 	struct xfs_inode	*ip)
297 {
298 	struct xfs_mount	*mp = ip->i_mount;
299 
300 	if (!XFS_IS_QUOTA_RUNNING(mp))
301 		return false;
302 	if (!XFS_IS_QUOTA_ON(mp))
303 		return false;
304 	if (!XFS_NOT_DQATTACHED(mp, ip))
305 		return false;
306 	if (xfs_is_quota_inode(&mp->m_sb, ip->i_ino))
307 		return false;
308 	return true;
309 }
310 
311 /*
312  * Given a locked inode, attach dquot(s) to it, taking U/G/P-QUOTAON
313  * into account.
314  * If @doalloc is true, the dquot(s) will be allocated if needed.
315  * Inode may get unlocked and relocked in here, and the caller must deal with
316  * the consequences.
317  */
318 int
319 xfs_qm_dqattach_locked(
320 	xfs_inode_t	*ip,
321 	bool		doalloc)
322 {
323 	xfs_mount_t	*mp = ip->i_mount;
324 	int		error = 0;
325 
326 	if (!xfs_qm_need_dqattach(ip))
327 		return 0;
328 
329 	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
330 
331 	if (XFS_IS_UQUOTA_ON(mp) && !ip->i_udquot) {
332 		error = xfs_qm_dqattach_one(ip, XFS_DQTYPE_USER,
333 				doalloc, &ip->i_udquot);
334 		if (error)
335 			goto done;
336 		ASSERT(ip->i_udquot);
337 	}
338 
339 	if (XFS_IS_GQUOTA_ON(mp) && !ip->i_gdquot) {
340 		error = xfs_qm_dqattach_one(ip, XFS_DQTYPE_GROUP,
341 				doalloc, &ip->i_gdquot);
342 		if (error)
343 			goto done;
344 		ASSERT(ip->i_gdquot);
345 	}
346 
347 	if (XFS_IS_PQUOTA_ON(mp) && !ip->i_pdquot) {
348 		error = xfs_qm_dqattach_one(ip, XFS_DQTYPE_PROJ,
349 				doalloc, &ip->i_pdquot);
350 		if (error)
351 			goto done;
352 		ASSERT(ip->i_pdquot);
353 	}
354 
355 done:
356 	/*
357 	 * Don't worry about the dquots that we may have attached before any
358 	 * error - they'll get detached later if it has not already been done.
359 	 */
360 	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
361 	return error;
362 }
363 
364 int
365 xfs_qm_dqattach(
366 	struct xfs_inode	*ip)
367 {
368 	int			error;
369 
370 	if (!xfs_qm_need_dqattach(ip))
371 		return 0;
372 
373 	xfs_ilock(ip, XFS_ILOCK_EXCL);
374 	error = xfs_qm_dqattach_locked(ip, false);
375 	xfs_iunlock(ip, XFS_ILOCK_EXCL);
376 
377 	return error;
378 }
379 
380 /*
381  * Release dquots (and their references) if any.
382  * The inode should be locked EXCL except when this's called by
383  * xfs_ireclaim.
384  */
385 void
386 xfs_qm_dqdetach(
387 	xfs_inode_t	*ip)
388 {
389 	if (!(ip->i_udquot || ip->i_gdquot || ip->i_pdquot))
390 		return;
391 
392 	trace_xfs_dquot_dqdetach(ip);
393 
394 	ASSERT(!xfs_is_quota_inode(&ip->i_mount->m_sb, ip->i_ino));
395 	if (ip->i_udquot) {
396 		xfs_qm_dqrele(ip->i_udquot);
397 		ip->i_udquot = NULL;
398 	}
399 	if (ip->i_gdquot) {
400 		xfs_qm_dqrele(ip->i_gdquot);
401 		ip->i_gdquot = NULL;
402 	}
403 	if (ip->i_pdquot) {
404 		xfs_qm_dqrele(ip->i_pdquot);
405 		ip->i_pdquot = NULL;
406 	}
407 }
408 
409 struct xfs_qm_isolate {
410 	struct list_head	buffers;
411 	struct list_head	dispose;
412 };
413 
414 static enum lru_status
415 xfs_qm_dquot_isolate(
416 	struct list_head	*item,
417 	struct list_lru_one	*lru,
418 	spinlock_t		*lru_lock,
419 	void			*arg)
420 		__releases(lru_lock) __acquires(lru_lock)
421 {
422 	struct xfs_dquot	*dqp = container_of(item,
423 						struct xfs_dquot, q_lru);
424 	struct xfs_qm_isolate	*isol = arg;
425 
426 	if (!xfs_dqlock_nowait(dqp))
427 		goto out_miss_busy;
428 
429 	/*
430 	 * This dquot has acquired a reference in the meantime remove it from
431 	 * the freelist and try again.
432 	 */
433 	if (dqp->q_nrefs) {
434 		xfs_dqunlock(dqp);
435 		XFS_STATS_INC(dqp->q_mount, xs_qm_dqwants);
436 
437 		trace_xfs_dqreclaim_want(dqp);
438 		list_lru_isolate(lru, &dqp->q_lru);
439 		XFS_STATS_DEC(dqp->q_mount, xs_qm_dquot_unused);
440 		return LRU_REMOVED;
441 	}
442 
443 	/*
444 	 * If the dquot is dirty, flush it. If it's already being flushed, just
445 	 * skip it so there is time for the IO to complete before we try to
446 	 * reclaim it again on the next LRU pass.
447 	 */
448 	if (!xfs_dqflock_nowait(dqp)) {
449 		xfs_dqunlock(dqp);
450 		goto out_miss_busy;
451 	}
452 
453 	if (XFS_DQ_IS_DIRTY(dqp)) {
454 		struct xfs_buf	*bp = NULL;
455 		int		error;
456 
457 		trace_xfs_dqreclaim_dirty(dqp);
458 
459 		/* we have to drop the LRU lock to flush the dquot */
460 		spin_unlock(lru_lock);
461 
462 		error = xfs_qm_dqflush(dqp, &bp);
463 		if (error)
464 			goto out_unlock_dirty;
465 
466 		xfs_buf_delwri_queue(bp, &isol->buffers);
467 		xfs_buf_relse(bp);
468 		goto out_unlock_dirty;
469 	}
470 	xfs_dqfunlock(dqp);
471 
472 	/*
473 	 * Prevent lookups now that we are past the point of no return.
474 	 */
475 	dqp->q_flags |= XFS_DQFLAG_FREEING;
476 	xfs_dqunlock(dqp);
477 
478 	ASSERT(dqp->q_nrefs == 0);
479 	list_lru_isolate_move(lru, &dqp->q_lru, &isol->dispose);
480 	XFS_STATS_DEC(dqp->q_mount, xs_qm_dquot_unused);
481 	trace_xfs_dqreclaim_done(dqp);
482 	XFS_STATS_INC(dqp->q_mount, xs_qm_dqreclaims);
483 	return LRU_REMOVED;
484 
485 out_miss_busy:
486 	trace_xfs_dqreclaim_busy(dqp);
487 	XFS_STATS_INC(dqp->q_mount, xs_qm_dqreclaim_misses);
488 	return LRU_SKIP;
489 
490 out_unlock_dirty:
491 	trace_xfs_dqreclaim_busy(dqp);
492 	XFS_STATS_INC(dqp->q_mount, xs_qm_dqreclaim_misses);
493 	xfs_dqunlock(dqp);
494 	spin_lock(lru_lock);
495 	return LRU_RETRY;
496 }
497 
498 static unsigned long
499 xfs_qm_shrink_scan(
500 	struct shrinker		*shrink,
501 	struct shrink_control	*sc)
502 {
503 	struct xfs_quotainfo	*qi = container_of(shrink,
504 					struct xfs_quotainfo, qi_shrinker);
505 	struct xfs_qm_isolate	isol;
506 	unsigned long		freed;
507 	int			error;
508 
509 	if ((sc->gfp_mask & (__GFP_FS|__GFP_DIRECT_RECLAIM)) != (__GFP_FS|__GFP_DIRECT_RECLAIM))
510 		return 0;
511 
512 	INIT_LIST_HEAD(&isol.buffers);
513 	INIT_LIST_HEAD(&isol.dispose);
514 
515 	freed = list_lru_shrink_walk(&qi->qi_lru, sc,
516 				     xfs_qm_dquot_isolate, &isol);
517 
518 	error = xfs_buf_delwri_submit(&isol.buffers);
519 	if (error)
520 		xfs_warn(NULL, "%s: dquot reclaim failed", __func__);
521 
522 	while (!list_empty(&isol.dispose)) {
523 		struct xfs_dquot	*dqp;
524 
525 		dqp = list_first_entry(&isol.dispose, struct xfs_dquot, q_lru);
526 		list_del_init(&dqp->q_lru);
527 		xfs_qm_dqfree_one(dqp);
528 	}
529 
530 	return freed;
531 }
532 
533 static unsigned long
534 xfs_qm_shrink_count(
535 	struct shrinker		*shrink,
536 	struct shrink_control	*sc)
537 {
538 	struct xfs_quotainfo	*qi = container_of(shrink,
539 					struct xfs_quotainfo, qi_shrinker);
540 
541 	return list_lru_shrink_count(&qi->qi_lru, sc);
542 }
543 
544 STATIC void
545 xfs_qm_set_defquota(
546 	struct xfs_mount	*mp,
547 	xfs_dqtype_t		type,
548 	struct xfs_quotainfo	*qinf)
549 {
550 	struct xfs_dquot	*dqp;
551 	struct xfs_def_quota	*defq;
552 	int			error;
553 
554 	error = xfs_qm_dqget_uncached(mp, 0, type, &dqp);
555 	if (error)
556 		return;
557 
558 	defq = xfs_get_defquota(qinf, xfs_dquot_type(dqp));
559 
560 	/*
561 	 * Timers and warnings have been already set, let's just set the
562 	 * default limits for this quota type
563 	 */
564 	defq->blk.hard = dqp->q_blk.hardlimit;
565 	defq->blk.soft = dqp->q_blk.softlimit;
566 	defq->ino.hard = dqp->q_ino.hardlimit;
567 	defq->ino.soft = dqp->q_ino.softlimit;
568 	defq->rtb.hard = dqp->q_rtb.hardlimit;
569 	defq->rtb.soft = dqp->q_rtb.softlimit;
570 	xfs_qm_dqdestroy(dqp);
571 }
572 
573 /* Initialize quota time limits from the root dquot. */
574 static void
575 xfs_qm_init_timelimits(
576 	struct xfs_mount	*mp,
577 	xfs_dqtype_t		type)
578 {
579 	struct xfs_quotainfo	*qinf = mp->m_quotainfo;
580 	struct xfs_def_quota	*defq;
581 	struct xfs_dquot	*dqp;
582 	int			error;
583 
584 	defq = xfs_get_defquota(qinf, type);
585 
586 	defq->blk.time = XFS_QM_BTIMELIMIT;
587 	defq->ino.time = XFS_QM_ITIMELIMIT;
588 	defq->rtb.time = XFS_QM_RTBTIMELIMIT;
589 	defq->blk.warn = XFS_QM_BWARNLIMIT;
590 	defq->ino.warn = XFS_QM_IWARNLIMIT;
591 	defq->rtb.warn = XFS_QM_RTBWARNLIMIT;
592 
593 	/*
594 	 * We try to get the limits from the superuser's limits fields.
595 	 * This is quite hacky, but it is standard quota practice.
596 	 *
597 	 * Since we may not have done a quotacheck by this point, just read
598 	 * the dquot without attaching it to any hashtables or lists.
599 	 */
600 	error = xfs_qm_dqget_uncached(mp, 0, type, &dqp);
601 	if (error)
602 		return;
603 
604 	/*
605 	 * The warnings and timers set the grace period given to
606 	 * a user or group before he or she can not perform any
607 	 * more writing. If it is zero, a default is used.
608 	 */
609 	if (dqp->q_blk.timer)
610 		defq->blk.time = dqp->q_blk.timer;
611 	if (dqp->q_ino.timer)
612 		defq->ino.time = dqp->q_ino.timer;
613 	if (dqp->q_rtb.timer)
614 		defq->rtb.time = dqp->q_rtb.timer;
615 	if (dqp->q_blk.warnings)
616 		defq->blk.warn = dqp->q_blk.warnings;
617 	if (dqp->q_ino.warnings)
618 		defq->ino.warn = dqp->q_ino.warnings;
619 	if (dqp->q_rtb.warnings)
620 		defq->rtb.warn = dqp->q_rtb.warnings;
621 
622 	xfs_qm_dqdestroy(dqp);
623 }
624 
625 /*
626  * This initializes all the quota information that's kept in the
627  * mount structure
628  */
629 STATIC int
630 xfs_qm_init_quotainfo(
631 	struct xfs_mount	*mp)
632 {
633 	struct xfs_quotainfo	*qinf;
634 	int			error;
635 
636 	ASSERT(XFS_IS_QUOTA_RUNNING(mp));
637 
638 	qinf = mp->m_quotainfo = kmem_zalloc(sizeof(struct xfs_quotainfo), 0);
639 
640 	error = list_lru_init(&qinf->qi_lru);
641 	if (error)
642 		goto out_free_qinf;
643 
644 	/*
645 	 * See if quotainodes are setup, and if not, allocate them,
646 	 * and change the superblock accordingly.
647 	 */
648 	error = xfs_qm_init_quotainos(mp);
649 	if (error)
650 		goto out_free_lru;
651 
652 	INIT_RADIX_TREE(&qinf->qi_uquota_tree, GFP_NOFS);
653 	INIT_RADIX_TREE(&qinf->qi_gquota_tree, GFP_NOFS);
654 	INIT_RADIX_TREE(&qinf->qi_pquota_tree, GFP_NOFS);
655 	mutex_init(&qinf->qi_tree_lock);
656 
657 	/* mutex used to serialize quotaoffs */
658 	mutex_init(&qinf->qi_quotaofflock);
659 
660 	/* Precalc some constants */
661 	qinf->qi_dqchunklen = XFS_FSB_TO_BB(mp, XFS_DQUOT_CLUSTER_SIZE_FSB);
662 	qinf->qi_dqperchunk = xfs_calc_dquots_per_chunk(qinf->qi_dqchunklen);
663 	if (xfs_sb_version_hasbigtime(&mp->m_sb)) {
664 		qinf->qi_expiry_min =
665 			xfs_dq_bigtime_to_unix(XFS_DQ_BIGTIME_EXPIRY_MIN);
666 		qinf->qi_expiry_max =
667 			xfs_dq_bigtime_to_unix(XFS_DQ_BIGTIME_EXPIRY_MAX);
668 	} else {
669 		qinf->qi_expiry_min = XFS_DQ_LEGACY_EXPIRY_MIN;
670 		qinf->qi_expiry_max = XFS_DQ_LEGACY_EXPIRY_MAX;
671 	}
672 	trace_xfs_quota_expiry_range(mp, qinf->qi_expiry_min,
673 			qinf->qi_expiry_max);
674 
675 	mp->m_qflags |= (mp->m_sb.sb_qflags & XFS_ALL_QUOTA_CHKD);
676 
677 	xfs_qm_init_timelimits(mp, XFS_DQTYPE_USER);
678 	xfs_qm_init_timelimits(mp, XFS_DQTYPE_GROUP);
679 	xfs_qm_init_timelimits(mp, XFS_DQTYPE_PROJ);
680 
681 	if (XFS_IS_UQUOTA_RUNNING(mp))
682 		xfs_qm_set_defquota(mp, XFS_DQTYPE_USER, qinf);
683 	if (XFS_IS_GQUOTA_RUNNING(mp))
684 		xfs_qm_set_defquota(mp, XFS_DQTYPE_GROUP, qinf);
685 	if (XFS_IS_PQUOTA_RUNNING(mp))
686 		xfs_qm_set_defquota(mp, XFS_DQTYPE_PROJ, qinf);
687 
688 	qinf->qi_shrinker.count_objects = xfs_qm_shrink_count;
689 	qinf->qi_shrinker.scan_objects = xfs_qm_shrink_scan;
690 	qinf->qi_shrinker.seeks = DEFAULT_SEEKS;
691 	qinf->qi_shrinker.flags = SHRINKER_NUMA_AWARE;
692 
693 	error = register_shrinker(&qinf->qi_shrinker);
694 	if (error)
695 		goto out_free_inos;
696 
697 	return 0;
698 
699 out_free_inos:
700 	mutex_destroy(&qinf->qi_quotaofflock);
701 	mutex_destroy(&qinf->qi_tree_lock);
702 	xfs_qm_destroy_quotainos(qinf);
703 out_free_lru:
704 	list_lru_destroy(&qinf->qi_lru);
705 out_free_qinf:
706 	kmem_free(qinf);
707 	mp->m_quotainfo = NULL;
708 	return error;
709 }
710 
711 /*
712  * Gets called when unmounting a filesystem or when all quotas get
713  * turned off.
714  * This purges the quota inodes, destroys locks and frees itself.
715  */
716 void
717 xfs_qm_destroy_quotainfo(
718 	struct xfs_mount	*mp)
719 {
720 	struct xfs_quotainfo	*qi;
721 
722 	qi = mp->m_quotainfo;
723 	ASSERT(qi != NULL);
724 
725 	unregister_shrinker(&qi->qi_shrinker);
726 	list_lru_destroy(&qi->qi_lru);
727 	xfs_qm_destroy_quotainos(qi);
728 	mutex_destroy(&qi->qi_tree_lock);
729 	mutex_destroy(&qi->qi_quotaofflock);
730 	kmem_free(qi);
731 	mp->m_quotainfo = NULL;
732 }
733 
734 /*
735  * Create an inode and return with a reference already taken, but unlocked
736  * This is how we create quota inodes
737  */
738 STATIC int
739 xfs_qm_qino_alloc(
740 	struct xfs_mount	*mp,
741 	struct xfs_inode	**ipp,
742 	unsigned int		flags)
743 {
744 	struct xfs_trans	*tp;
745 	int			error;
746 	bool			need_alloc = true;
747 
748 	*ipp = NULL;
749 	/*
750 	 * With superblock that doesn't have separate pquotino, we
751 	 * share an inode between gquota and pquota. If the on-disk
752 	 * superblock has GQUOTA and the filesystem is now mounted
753 	 * with PQUOTA, just use sb_gquotino for sb_pquotino and
754 	 * vice-versa.
755 	 */
756 	if (!xfs_sb_version_has_pquotino(&mp->m_sb) &&
757 			(flags & (XFS_QMOPT_PQUOTA|XFS_QMOPT_GQUOTA))) {
758 		xfs_ino_t ino = NULLFSINO;
759 
760 		if ((flags & XFS_QMOPT_PQUOTA) &&
761 			     (mp->m_sb.sb_gquotino != NULLFSINO)) {
762 			ino = mp->m_sb.sb_gquotino;
763 			if (XFS_IS_CORRUPT(mp,
764 					   mp->m_sb.sb_pquotino != NULLFSINO))
765 				return -EFSCORRUPTED;
766 		} else if ((flags & XFS_QMOPT_GQUOTA) &&
767 			     (mp->m_sb.sb_pquotino != NULLFSINO)) {
768 			ino = mp->m_sb.sb_pquotino;
769 			if (XFS_IS_CORRUPT(mp,
770 					   mp->m_sb.sb_gquotino != NULLFSINO))
771 				return -EFSCORRUPTED;
772 		}
773 		if (ino != NULLFSINO) {
774 			error = xfs_iget(mp, NULL, ino, 0, 0, ipp);
775 			if (error)
776 				return error;
777 			mp->m_sb.sb_gquotino = NULLFSINO;
778 			mp->m_sb.sb_pquotino = NULLFSINO;
779 			need_alloc = false;
780 		}
781 	}
782 
783 	error = xfs_trans_alloc(mp, &M_RES(mp)->tr_create,
784 			need_alloc ? XFS_QM_QINOCREATE_SPACE_RES(mp) : 0,
785 			0, 0, &tp);
786 	if (error)
787 		return error;
788 
789 	if (need_alloc) {
790 		error = xfs_dir_ialloc(&tp, NULL, S_IFREG, 1, 0, 0, ipp);
791 		if (error) {
792 			xfs_trans_cancel(tp);
793 			return error;
794 		}
795 	}
796 
797 	/*
798 	 * Make the changes in the superblock, and log those too.
799 	 * sbfields arg may contain fields other than *QUOTINO;
800 	 * VERSIONNUM for example.
801 	 */
802 	spin_lock(&mp->m_sb_lock);
803 	if (flags & XFS_QMOPT_SBVERSION) {
804 		ASSERT(!xfs_sb_version_hasquota(&mp->m_sb));
805 
806 		xfs_sb_version_addquota(&mp->m_sb);
807 		mp->m_sb.sb_uquotino = NULLFSINO;
808 		mp->m_sb.sb_gquotino = NULLFSINO;
809 		mp->m_sb.sb_pquotino = NULLFSINO;
810 
811 		/* qflags will get updated fully _after_ quotacheck */
812 		mp->m_sb.sb_qflags = mp->m_qflags & XFS_ALL_QUOTA_ACCT;
813 	}
814 	if (flags & XFS_QMOPT_UQUOTA)
815 		mp->m_sb.sb_uquotino = (*ipp)->i_ino;
816 	else if (flags & XFS_QMOPT_GQUOTA)
817 		mp->m_sb.sb_gquotino = (*ipp)->i_ino;
818 	else
819 		mp->m_sb.sb_pquotino = (*ipp)->i_ino;
820 	spin_unlock(&mp->m_sb_lock);
821 	xfs_log_sb(tp);
822 
823 	error = xfs_trans_commit(tp);
824 	if (error) {
825 		ASSERT(XFS_FORCED_SHUTDOWN(mp));
826 		xfs_alert(mp, "%s failed (error %d)!", __func__, error);
827 	}
828 	if (need_alloc)
829 		xfs_finish_inode_setup(*ipp);
830 	return error;
831 }
832 
833 
834 STATIC void
835 xfs_qm_reset_dqcounts(
836 	struct xfs_mount	*mp,
837 	struct xfs_buf		*bp,
838 	xfs_dqid_t		id,
839 	xfs_dqtype_t		type)
840 {
841 	struct xfs_dqblk	*dqb;
842 	int			j;
843 
844 	trace_xfs_reset_dqcounts(bp, _RET_IP_);
845 
846 	/*
847 	 * Reset all counters and timers. They'll be
848 	 * started afresh by xfs_qm_quotacheck.
849 	 */
850 #ifdef DEBUG
851 	j = (int)XFS_FSB_TO_B(mp, XFS_DQUOT_CLUSTER_SIZE_FSB) /
852 		sizeof(xfs_dqblk_t);
853 	ASSERT(mp->m_quotainfo->qi_dqperchunk == j);
854 #endif
855 	dqb = bp->b_addr;
856 	for (j = 0; j < mp->m_quotainfo->qi_dqperchunk; j++) {
857 		struct xfs_disk_dquot	*ddq;
858 
859 		ddq = (struct xfs_disk_dquot *)&dqb[j];
860 
861 		/*
862 		 * Do a sanity check, and if needed, repair the dqblk. Don't
863 		 * output any warnings because it's perfectly possible to
864 		 * find uninitialised dquot blks. See comment in
865 		 * xfs_dquot_verify.
866 		 */
867 		if (xfs_dqblk_verify(mp, &dqb[j], id + j) ||
868 		    (dqb[j].dd_diskdq.d_type & XFS_DQTYPE_REC_MASK) != type)
869 			xfs_dqblk_repair(mp, &dqb[j], id + j, type);
870 
871 		/*
872 		 * Reset type in case we are reusing group quota file for
873 		 * project quotas or vice versa
874 		 */
875 		ddq->d_type = type;
876 		ddq->d_bcount = 0;
877 		ddq->d_icount = 0;
878 		ddq->d_rtbcount = 0;
879 
880 		/*
881 		 * dquot id 0 stores the default grace period and the maximum
882 		 * warning limit that were set by the administrator, so we
883 		 * should not reset them.
884 		 */
885 		if (ddq->d_id != 0) {
886 			ddq->d_btimer = 0;
887 			ddq->d_itimer = 0;
888 			ddq->d_rtbtimer = 0;
889 			ddq->d_bwarns = 0;
890 			ddq->d_iwarns = 0;
891 			ddq->d_rtbwarns = 0;
892 			if (xfs_sb_version_hasbigtime(&mp->m_sb))
893 				ddq->d_type |= XFS_DQTYPE_BIGTIME;
894 		}
895 
896 		if (xfs_sb_version_hascrc(&mp->m_sb)) {
897 			xfs_update_cksum((char *)&dqb[j],
898 					 sizeof(struct xfs_dqblk),
899 					 XFS_DQUOT_CRC_OFF);
900 		}
901 	}
902 }
903 
904 STATIC int
905 xfs_qm_reset_dqcounts_all(
906 	struct xfs_mount	*mp,
907 	xfs_dqid_t		firstid,
908 	xfs_fsblock_t		bno,
909 	xfs_filblks_t		blkcnt,
910 	xfs_dqtype_t		type,
911 	struct list_head	*buffer_list)
912 {
913 	struct xfs_buf		*bp;
914 	int			error = 0;
915 
916 	ASSERT(blkcnt > 0);
917 
918 	/*
919 	 * Blkcnt arg can be a very big number, and might even be
920 	 * larger than the log itself. So, we have to break it up into
921 	 * manageable-sized transactions.
922 	 * Note that we don't start a permanent transaction here; we might
923 	 * not be able to get a log reservation for the whole thing up front,
924 	 * and we don't really care to either, because we just discard
925 	 * everything if we were to crash in the middle of this loop.
926 	 */
927 	while (blkcnt--) {
928 		error = xfs_trans_read_buf(mp, NULL, mp->m_ddev_targp,
929 			      XFS_FSB_TO_DADDR(mp, bno),
930 			      mp->m_quotainfo->qi_dqchunklen, 0, &bp,
931 			      &xfs_dquot_buf_ops);
932 
933 		/*
934 		 * CRC and validation errors will return a EFSCORRUPTED here. If
935 		 * this occurs, re-read without CRC validation so that we can
936 		 * repair the damage via xfs_qm_reset_dqcounts(). This process
937 		 * will leave a trace in the log indicating corruption has
938 		 * been detected.
939 		 */
940 		if (error == -EFSCORRUPTED) {
941 			error = xfs_trans_read_buf(mp, NULL, mp->m_ddev_targp,
942 				      XFS_FSB_TO_DADDR(mp, bno),
943 				      mp->m_quotainfo->qi_dqchunklen, 0, &bp,
944 				      NULL);
945 		}
946 
947 		if (error)
948 			break;
949 
950 		/*
951 		 * A corrupt buffer might not have a verifier attached, so
952 		 * make sure we have the correct one attached before writeback
953 		 * occurs.
954 		 */
955 		bp->b_ops = &xfs_dquot_buf_ops;
956 		xfs_qm_reset_dqcounts(mp, bp, firstid, type);
957 		xfs_buf_delwri_queue(bp, buffer_list);
958 		xfs_buf_relse(bp);
959 
960 		/* goto the next block. */
961 		bno++;
962 		firstid += mp->m_quotainfo->qi_dqperchunk;
963 	}
964 
965 	return error;
966 }
967 
968 /*
969  * Iterate over all allocated dquot blocks in this quota inode, zeroing all
970  * counters for every chunk of dquots that we find.
971  */
972 STATIC int
973 xfs_qm_reset_dqcounts_buf(
974 	struct xfs_mount	*mp,
975 	struct xfs_inode	*qip,
976 	xfs_dqtype_t		type,
977 	struct list_head	*buffer_list)
978 {
979 	struct xfs_bmbt_irec	*map;
980 	int			i, nmaps;	/* number of map entries */
981 	int			error;		/* return value */
982 	xfs_fileoff_t		lblkno;
983 	xfs_filblks_t		maxlblkcnt;
984 	xfs_dqid_t		firstid;
985 	xfs_fsblock_t		rablkno;
986 	xfs_filblks_t		rablkcnt;
987 
988 	error = 0;
989 	/*
990 	 * This looks racy, but we can't keep an inode lock across a
991 	 * trans_reserve. But, this gets called during quotacheck, and that
992 	 * happens only at mount time which is single threaded.
993 	 */
994 	if (qip->i_d.di_nblocks == 0)
995 		return 0;
996 
997 	map = kmem_alloc(XFS_DQITER_MAP_SIZE * sizeof(*map), 0);
998 
999 	lblkno = 0;
1000 	maxlblkcnt = XFS_B_TO_FSB(mp, mp->m_super->s_maxbytes);
1001 	do {
1002 		uint		lock_mode;
1003 
1004 		nmaps = XFS_DQITER_MAP_SIZE;
1005 		/*
1006 		 * We aren't changing the inode itself. Just changing
1007 		 * some of its data. No new blocks are added here, and
1008 		 * the inode is never added to the transaction.
1009 		 */
1010 		lock_mode = xfs_ilock_data_map_shared(qip);
1011 		error = xfs_bmapi_read(qip, lblkno, maxlblkcnt - lblkno,
1012 				       map, &nmaps, 0);
1013 		xfs_iunlock(qip, lock_mode);
1014 		if (error)
1015 			break;
1016 
1017 		ASSERT(nmaps <= XFS_DQITER_MAP_SIZE);
1018 		for (i = 0; i < nmaps; i++) {
1019 			ASSERT(map[i].br_startblock != DELAYSTARTBLOCK);
1020 			ASSERT(map[i].br_blockcount);
1021 
1022 
1023 			lblkno += map[i].br_blockcount;
1024 
1025 			if (map[i].br_startblock == HOLESTARTBLOCK)
1026 				continue;
1027 
1028 			firstid = (xfs_dqid_t) map[i].br_startoff *
1029 				mp->m_quotainfo->qi_dqperchunk;
1030 			/*
1031 			 * Do a read-ahead on the next extent.
1032 			 */
1033 			if ((i+1 < nmaps) &&
1034 			    (map[i+1].br_startblock != HOLESTARTBLOCK)) {
1035 				rablkcnt =  map[i+1].br_blockcount;
1036 				rablkno = map[i+1].br_startblock;
1037 				while (rablkcnt--) {
1038 					xfs_buf_readahead(mp->m_ddev_targp,
1039 					       XFS_FSB_TO_DADDR(mp, rablkno),
1040 					       mp->m_quotainfo->qi_dqchunklen,
1041 					       &xfs_dquot_buf_ops);
1042 					rablkno++;
1043 				}
1044 			}
1045 			/*
1046 			 * Iterate thru all the blks in the extent and
1047 			 * reset the counters of all the dquots inside them.
1048 			 */
1049 			error = xfs_qm_reset_dqcounts_all(mp, firstid,
1050 						   map[i].br_startblock,
1051 						   map[i].br_blockcount,
1052 						   type, buffer_list);
1053 			if (error)
1054 				goto out;
1055 		}
1056 	} while (nmaps > 0);
1057 
1058 out:
1059 	kmem_free(map);
1060 	return error;
1061 }
1062 
1063 /*
1064  * Called by dqusage_adjust in doing a quotacheck.
1065  *
1066  * Given the inode, and a dquot id this updates both the incore dqout as well
1067  * as the buffer copy. This is so that once the quotacheck is done, we can
1068  * just log all the buffers, as opposed to logging numerous updates to
1069  * individual dquots.
1070  */
1071 STATIC int
1072 xfs_qm_quotacheck_dqadjust(
1073 	struct xfs_inode	*ip,
1074 	xfs_dqtype_t		type,
1075 	xfs_qcnt_t		nblks,
1076 	xfs_qcnt_t		rtblks)
1077 {
1078 	struct xfs_mount	*mp = ip->i_mount;
1079 	struct xfs_dquot	*dqp;
1080 	xfs_dqid_t		id;
1081 	int			error;
1082 
1083 	id = xfs_qm_id_for_quotatype(ip, type);
1084 	error = xfs_qm_dqget(mp, id, type, true, &dqp);
1085 	if (error) {
1086 		/*
1087 		 * Shouldn't be able to turn off quotas here.
1088 		 */
1089 		ASSERT(error != -ESRCH);
1090 		ASSERT(error != -ENOENT);
1091 		return error;
1092 	}
1093 
1094 	trace_xfs_dqadjust(dqp);
1095 
1096 	/*
1097 	 * Adjust the inode count and the block count to reflect this inode's
1098 	 * resource usage.
1099 	 */
1100 	dqp->q_ino.count++;
1101 	dqp->q_ino.reserved++;
1102 	if (nblks) {
1103 		dqp->q_blk.count += nblks;
1104 		dqp->q_blk.reserved += nblks;
1105 	}
1106 	if (rtblks) {
1107 		dqp->q_rtb.count += rtblks;
1108 		dqp->q_rtb.reserved += rtblks;
1109 	}
1110 
1111 	/*
1112 	 * Set default limits, adjust timers (since we changed usages)
1113 	 *
1114 	 * There are no timers for the default values set in the root dquot.
1115 	 */
1116 	if (dqp->q_id) {
1117 		xfs_qm_adjust_dqlimits(dqp);
1118 		xfs_qm_adjust_dqtimers(dqp);
1119 	}
1120 
1121 	dqp->q_flags |= XFS_DQFLAG_DIRTY;
1122 	xfs_qm_dqput(dqp);
1123 	return 0;
1124 }
1125 
1126 /*
1127  * callback routine supplied to bulkstat(). Given an inumber, find its
1128  * dquots and update them to account for resources taken by that inode.
1129  */
1130 /* ARGSUSED */
1131 STATIC int
1132 xfs_qm_dqusage_adjust(
1133 	struct xfs_mount	*mp,
1134 	struct xfs_trans	*tp,
1135 	xfs_ino_t		ino,
1136 	void			*data)
1137 {
1138 	struct xfs_inode	*ip;
1139 	xfs_qcnt_t		nblks;
1140 	xfs_filblks_t		rtblks = 0;	/* total rt blks */
1141 	int			error;
1142 
1143 	ASSERT(XFS_IS_QUOTA_RUNNING(mp));
1144 
1145 	/*
1146 	 * rootino must have its resources accounted for, not so with the quota
1147 	 * inodes.
1148 	 */
1149 	if (xfs_is_quota_inode(&mp->m_sb, ino))
1150 		return 0;
1151 
1152 	/*
1153 	 * We don't _need_ to take the ilock EXCL here because quotacheck runs
1154 	 * at mount time and therefore nobody will be racing chown/chproj.
1155 	 */
1156 	error = xfs_iget(mp, tp, ino, XFS_IGET_DONTCACHE, 0, &ip);
1157 	if (error == -EINVAL || error == -ENOENT)
1158 		return 0;
1159 	if (error)
1160 		return error;
1161 
1162 	ASSERT(ip->i_delayed_blks == 0);
1163 
1164 	if (XFS_IS_REALTIME_INODE(ip)) {
1165 		struct xfs_ifork	*ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK);
1166 
1167 		if (!(ifp->if_flags & XFS_IFEXTENTS)) {
1168 			error = xfs_iread_extents(tp, ip, XFS_DATA_FORK);
1169 			if (error)
1170 				goto error0;
1171 		}
1172 
1173 		xfs_bmap_count_leaves(ifp, &rtblks);
1174 	}
1175 
1176 	nblks = (xfs_qcnt_t)ip->i_d.di_nblocks - rtblks;
1177 
1178 	/*
1179 	 * Add the (disk blocks and inode) resources occupied by this
1180 	 * inode to its dquots. We do this adjustment in the incore dquot,
1181 	 * and also copy the changes to its buffer.
1182 	 * We don't care about putting these changes in a transaction
1183 	 * envelope because if we crash in the middle of a 'quotacheck'
1184 	 * we have to start from the beginning anyway.
1185 	 * Once we're done, we'll log all the dquot bufs.
1186 	 *
1187 	 * The *QUOTA_ON checks below may look pretty racy, but quotachecks
1188 	 * and quotaoffs don't race. (Quotachecks happen at mount time only).
1189 	 */
1190 	if (XFS_IS_UQUOTA_ON(mp)) {
1191 		error = xfs_qm_quotacheck_dqadjust(ip, XFS_DQTYPE_USER, nblks,
1192 				rtblks);
1193 		if (error)
1194 			goto error0;
1195 	}
1196 
1197 	if (XFS_IS_GQUOTA_ON(mp)) {
1198 		error = xfs_qm_quotacheck_dqadjust(ip, XFS_DQTYPE_GROUP, nblks,
1199 				rtblks);
1200 		if (error)
1201 			goto error0;
1202 	}
1203 
1204 	if (XFS_IS_PQUOTA_ON(mp)) {
1205 		error = xfs_qm_quotacheck_dqadjust(ip, XFS_DQTYPE_PROJ, nblks,
1206 				rtblks);
1207 		if (error)
1208 			goto error0;
1209 	}
1210 
1211 error0:
1212 	xfs_irele(ip);
1213 	return error;
1214 }
1215 
1216 STATIC int
1217 xfs_qm_flush_one(
1218 	struct xfs_dquot	*dqp,
1219 	void			*data)
1220 {
1221 	struct xfs_mount	*mp = dqp->q_mount;
1222 	struct list_head	*buffer_list = data;
1223 	struct xfs_buf		*bp = NULL;
1224 	int			error = 0;
1225 
1226 	xfs_dqlock(dqp);
1227 	if (dqp->q_flags & XFS_DQFLAG_FREEING)
1228 		goto out_unlock;
1229 	if (!XFS_DQ_IS_DIRTY(dqp))
1230 		goto out_unlock;
1231 
1232 	/*
1233 	 * The only way the dquot is already flush locked by the time quotacheck
1234 	 * gets here is if reclaim flushed it before the dqadjust walk dirtied
1235 	 * it for the final time. Quotacheck collects all dquot bufs in the
1236 	 * local delwri queue before dquots are dirtied, so reclaim can't have
1237 	 * possibly queued it for I/O. The only way out is to push the buffer to
1238 	 * cycle the flush lock.
1239 	 */
1240 	if (!xfs_dqflock_nowait(dqp)) {
1241 		/* buf is pinned in-core by delwri list */
1242 		bp = xfs_buf_incore(mp->m_ddev_targp, dqp->q_blkno,
1243 				mp->m_quotainfo->qi_dqchunklen, 0);
1244 		if (!bp) {
1245 			error = -EINVAL;
1246 			goto out_unlock;
1247 		}
1248 		xfs_buf_unlock(bp);
1249 
1250 		xfs_buf_delwri_pushbuf(bp, buffer_list);
1251 		xfs_buf_rele(bp);
1252 
1253 		error = -EAGAIN;
1254 		goto out_unlock;
1255 	}
1256 
1257 	error = xfs_qm_dqflush(dqp, &bp);
1258 	if (error)
1259 		goto out_unlock;
1260 
1261 	xfs_buf_delwri_queue(bp, buffer_list);
1262 	xfs_buf_relse(bp);
1263 out_unlock:
1264 	xfs_dqunlock(dqp);
1265 	return error;
1266 }
1267 
1268 /*
1269  * Walk thru all the filesystem inodes and construct a consistent view
1270  * of the disk quota world. If the quotacheck fails, disable quotas.
1271  */
1272 STATIC int
1273 xfs_qm_quotacheck(
1274 	xfs_mount_t	*mp)
1275 {
1276 	int			error, error2;
1277 	uint			flags;
1278 	LIST_HEAD		(buffer_list);
1279 	struct xfs_inode	*uip = mp->m_quotainfo->qi_uquotaip;
1280 	struct xfs_inode	*gip = mp->m_quotainfo->qi_gquotaip;
1281 	struct xfs_inode	*pip = mp->m_quotainfo->qi_pquotaip;
1282 
1283 	flags = 0;
1284 
1285 	ASSERT(uip || gip || pip);
1286 	ASSERT(XFS_IS_QUOTA_RUNNING(mp));
1287 
1288 	xfs_notice(mp, "Quotacheck needed: Please wait.");
1289 
1290 	/*
1291 	 * First we go thru all the dquots on disk, USR and GRP/PRJ, and reset
1292 	 * their counters to zero. We need a clean slate.
1293 	 * We don't log our changes till later.
1294 	 */
1295 	if (uip) {
1296 		error = xfs_qm_reset_dqcounts_buf(mp, uip, XFS_DQTYPE_USER,
1297 					 &buffer_list);
1298 		if (error)
1299 			goto error_return;
1300 		flags |= XFS_UQUOTA_CHKD;
1301 	}
1302 
1303 	if (gip) {
1304 		error = xfs_qm_reset_dqcounts_buf(mp, gip, XFS_DQTYPE_GROUP,
1305 					 &buffer_list);
1306 		if (error)
1307 			goto error_return;
1308 		flags |= XFS_GQUOTA_CHKD;
1309 	}
1310 
1311 	if (pip) {
1312 		error = xfs_qm_reset_dqcounts_buf(mp, pip, XFS_DQTYPE_PROJ,
1313 					 &buffer_list);
1314 		if (error)
1315 			goto error_return;
1316 		flags |= XFS_PQUOTA_CHKD;
1317 	}
1318 
1319 	error = xfs_iwalk_threaded(mp, 0, 0, xfs_qm_dqusage_adjust, 0, true,
1320 			NULL);
1321 	if (error)
1322 		goto error_return;
1323 
1324 	/*
1325 	 * We've made all the changes that we need to make incore.  Flush them
1326 	 * down to disk buffers if everything was updated successfully.
1327 	 */
1328 	if (XFS_IS_UQUOTA_ON(mp)) {
1329 		error = xfs_qm_dquot_walk(mp, XFS_DQTYPE_USER, xfs_qm_flush_one,
1330 					  &buffer_list);
1331 	}
1332 	if (XFS_IS_GQUOTA_ON(mp)) {
1333 		error2 = xfs_qm_dquot_walk(mp, XFS_DQTYPE_GROUP, xfs_qm_flush_one,
1334 					   &buffer_list);
1335 		if (!error)
1336 			error = error2;
1337 	}
1338 	if (XFS_IS_PQUOTA_ON(mp)) {
1339 		error2 = xfs_qm_dquot_walk(mp, XFS_DQTYPE_PROJ, xfs_qm_flush_one,
1340 					   &buffer_list);
1341 		if (!error)
1342 			error = error2;
1343 	}
1344 
1345 	error2 = xfs_buf_delwri_submit(&buffer_list);
1346 	if (!error)
1347 		error = error2;
1348 
1349 	/*
1350 	 * We can get this error if we couldn't do a dquot allocation inside
1351 	 * xfs_qm_dqusage_adjust (via bulkstat). We don't care about the
1352 	 * dirty dquots that might be cached, we just want to get rid of them
1353 	 * and turn quotaoff. The dquots won't be attached to any of the inodes
1354 	 * at this point (because we intentionally didn't in dqget_noattach).
1355 	 */
1356 	if (error) {
1357 		xfs_qm_dqpurge_all(mp, XFS_QMOPT_QUOTALL);
1358 		goto error_return;
1359 	}
1360 
1361 	/*
1362 	 * If one type of quotas is off, then it will lose its
1363 	 * quotachecked status, since we won't be doing accounting for
1364 	 * that type anymore.
1365 	 */
1366 	mp->m_qflags &= ~XFS_ALL_QUOTA_CHKD;
1367 	mp->m_qflags |= flags;
1368 
1369  error_return:
1370 	xfs_buf_delwri_cancel(&buffer_list);
1371 
1372 	if (error) {
1373 		xfs_warn(mp,
1374 	"Quotacheck: Unsuccessful (Error %d): Disabling quotas.",
1375 			error);
1376 		/*
1377 		 * We must turn off quotas.
1378 		 */
1379 		ASSERT(mp->m_quotainfo != NULL);
1380 		xfs_qm_destroy_quotainfo(mp);
1381 		if (xfs_mount_reset_sbqflags(mp)) {
1382 			xfs_warn(mp,
1383 				"Quotacheck: Failed to reset quota flags.");
1384 		}
1385 	} else
1386 		xfs_notice(mp, "Quotacheck: Done.");
1387 	return error;
1388 }
1389 
1390 /*
1391  * This is called from xfs_mountfs to start quotas and initialize all
1392  * necessary data structures like quotainfo.  This is also responsible for
1393  * running a quotacheck as necessary.  We are guaranteed that the superblock
1394  * is consistently read in at this point.
1395  *
1396  * If we fail here, the mount will continue with quota turned off. We don't
1397  * need to inidicate success or failure at all.
1398  */
1399 void
1400 xfs_qm_mount_quotas(
1401 	struct xfs_mount	*mp)
1402 {
1403 	int			error = 0;
1404 	uint			sbf;
1405 
1406 	/*
1407 	 * If quotas on realtime volumes is not supported, we disable
1408 	 * quotas immediately.
1409 	 */
1410 	if (mp->m_sb.sb_rextents) {
1411 		xfs_notice(mp, "Cannot turn on quotas for realtime filesystem");
1412 		mp->m_qflags = 0;
1413 		goto write_changes;
1414 	}
1415 
1416 	ASSERT(XFS_IS_QUOTA_RUNNING(mp));
1417 
1418 	/*
1419 	 * Allocate the quotainfo structure inside the mount struct, and
1420 	 * create quotainode(s), and change/rev superblock if necessary.
1421 	 */
1422 	error = xfs_qm_init_quotainfo(mp);
1423 	if (error) {
1424 		/*
1425 		 * We must turn off quotas.
1426 		 */
1427 		ASSERT(mp->m_quotainfo == NULL);
1428 		mp->m_qflags = 0;
1429 		goto write_changes;
1430 	}
1431 	/*
1432 	 * If any of the quotas are not consistent, do a quotacheck.
1433 	 */
1434 	if (XFS_QM_NEED_QUOTACHECK(mp)) {
1435 		error = xfs_qm_quotacheck(mp);
1436 		if (error) {
1437 			/* Quotacheck failed and disabled quotas. */
1438 			return;
1439 		}
1440 	}
1441 	/*
1442 	 * If one type of quotas is off, then it will lose its
1443 	 * quotachecked status, since we won't be doing accounting for
1444 	 * that type anymore.
1445 	 */
1446 	if (!XFS_IS_UQUOTA_ON(mp))
1447 		mp->m_qflags &= ~XFS_UQUOTA_CHKD;
1448 	if (!XFS_IS_GQUOTA_ON(mp))
1449 		mp->m_qflags &= ~XFS_GQUOTA_CHKD;
1450 	if (!XFS_IS_PQUOTA_ON(mp))
1451 		mp->m_qflags &= ~XFS_PQUOTA_CHKD;
1452 
1453  write_changes:
1454 	/*
1455 	 * We actually don't have to acquire the m_sb_lock at all.
1456 	 * This can only be called from mount, and that's single threaded. XXX
1457 	 */
1458 	spin_lock(&mp->m_sb_lock);
1459 	sbf = mp->m_sb.sb_qflags;
1460 	mp->m_sb.sb_qflags = mp->m_qflags & XFS_MOUNT_QUOTA_ALL;
1461 	spin_unlock(&mp->m_sb_lock);
1462 
1463 	if (sbf != (mp->m_qflags & XFS_MOUNT_QUOTA_ALL)) {
1464 		if (xfs_sync_sb(mp, false)) {
1465 			/*
1466 			 * We could only have been turning quotas off.
1467 			 * We aren't in very good shape actually because
1468 			 * the incore structures are convinced that quotas are
1469 			 * off, but the on disk superblock doesn't know that !
1470 			 */
1471 			ASSERT(!(XFS_IS_QUOTA_RUNNING(mp)));
1472 			xfs_alert(mp, "%s: Superblock update failed!",
1473 				__func__);
1474 		}
1475 	}
1476 
1477 	if (error) {
1478 		xfs_warn(mp, "Failed to initialize disk quotas.");
1479 		return;
1480 	}
1481 }
1482 
1483 /*
1484  * This is called after the superblock has been read in and we're ready to
1485  * iget the quota inodes.
1486  */
1487 STATIC int
1488 xfs_qm_init_quotainos(
1489 	xfs_mount_t	*mp)
1490 {
1491 	struct xfs_inode	*uip = NULL;
1492 	struct xfs_inode	*gip = NULL;
1493 	struct xfs_inode	*pip = NULL;
1494 	int			error;
1495 	uint			flags = 0;
1496 
1497 	ASSERT(mp->m_quotainfo);
1498 
1499 	/*
1500 	 * Get the uquota and gquota inodes
1501 	 */
1502 	if (xfs_sb_version_hasquota(&mp->m_sb)) {
1503 		if (XFS_IS_UQUOTA_ON(mp) &&
1504 		    mp->m_sb.sb_uquotino != NULLFSINO) {
1505 			ASSERT(mp->m_sb.sb_uquotino > 0);
1506 			error = xfs_iget(mp, NULL, mp->m_sb.sb_uquotino,
1507 					     0, 0, &uip);
1508 			if (error)
1509 				return error;
1510 		}
1511 		if (XFS_IS_GQUOTA_ON(mp) &&
1512 		    mp->m_sb.sb_gquotino != NULLFSINO) {
1513 			ASSERT(mp->m_sb.sb_gquotino > 0);
1514 			error = xfs_iget(mp, NULL, mp->m_sb.sb_gquotino,
1515 					     0, 0, &gip);
1516 			if (error)
1517 				goto error_rele;
1518 		}
1519 		if (XFS_IS_PQUOTA_ON(mp) &&
1520 		    mp->m_sb.sb_pquotino != NULLFSINO) {
1521 			ASSERT(mp->m_sb.sb_pquotino > 0);
1522 			error = xfs_iget(mp, NULL, mp->m_sb.sb_pquotino,
1523 					     0, 0, &pip);
1524 			if (error)
1525 				goto error_rele;
1526 		}
1527 	} else {
1528 		flags |= XFS_QMOPT_SBVERSION;
1529 	}
1530 
1531 	/*
1532 	 * Create the three inodes, if they don't exist already. The changes
1533 	 * made above will get added to a transaction and logged in one of
1534 	 * the qino_alloc calls below.  If the device is readonly,
1535 	 * temporarily switch to read-write to do this.
1536 	 */
1537 	if (XFS_IS_UQUOTA_ON(mp) && uip == NULL) {
1538 		error = xfs_qm_qino_alloc(mp, &uip,
1539 					      flags | XFS_QMOPT_UQUOTA);
1540 		if (error)
1541 			goto error_rele;
1542 
1543 		flags &= ~XFS_QMOPT_SBVERSION;
1544 	}
1545 	if (XFS_IS_GQUOTA_ON(mp) && gip == NULL) {
1546 		error = xfs_qm_qino_alloc(mp, &gip,
1547 					  flags | XFS_QMOPT_GQUOTA);
1548 		if (error)
1549 			goto error_rele;
1550 
1551 		flags &= ~XFS_QMOPT_SBVERSION;
1552 	}
1553 	if (XFS_IS_PQUOTA_ON(mp) && pip == NULL) {
1554 		error = xfs_qm_qino_alloc(mp, &pip,
1555 					  flags | XFS_QMOPT_PQUOTA);
1556 		if (error)
1557 			goto error_rele;
1558 	}
1559 
1560 	mp->m_quotainfo->qi_uquotaip = uip;
1561 	mp->m_quotainfo->qi_gquotaip = gip;
1562 	mp->m_quotainfo->qi_pquotaip = pip;
1563 
1564 	return 0;
1565 
1566 error_rele:
1567 	if (uip)
1568 		xfs_irele(uip);
1569 	if (gip)
1570 		xfs_irele(gip);
1571 	if (pip)
1572 		xfs_irele(pip);
1573 	return error;
1574 }
1575 
1576 STATIC void
1577 xfs_qm_destroy_quotainos(
1578 	struct xfs_quotainfo	*qi)
1579 {
1580 	if (qi->qi_uquotaip) {
1581 		xfs_irele(qi->qi_uquotaip);
1582 		qi->qi_uquotaip = NULL; /* paranoia */
1583 	}
1584 	if (qi->qi_gquotaip) {
1585 		xfs_irele(qi->qi_gquotaip);
1586 		qi->qi_gquotaip = NULL;
1587 	}
1588 	if (qi->qi_pquotaip) {
1589 		xfs_irele(qi->qi_pquotaip);
1590 		qi->qi_pquotaip = NULL;
1591 	}
1592 }
1593 
1594 STATIC void
1595 xfs_qm_dqfree_one(
1596 	struct xfs_dquot	*dqp)
1597 {
1598 	struct xfs_mount	*mp = dqp->q_mount;
1599 	struct xfs_quotainfo	*qi = mp->m_quotainfo;
1600 
1601 	mutex_lock(&qi->qi_tree_lock);
1602 	radix_tree_delete(xfs_dquot_tree(qi, xfs_dquot_type(dqp)), dqp->q_id);
1603 
1604 	qi->qi_dquots--;
1605 	mutex_unlock(&qi->qi_tree_lock);
1606 
1607 	xfs_qm_dqdestroy(dqp);
1608 }
1609 
1610 /* --------------- utility functions for vnodeops ---------------- */
1611 
1612 
1613 /*
1614  * Given an inode, a uid, gid and prid make sure that we have
1615  * allocated relevant dquot(s) on disk, and that we won't exceed inode
1616  * quotas by creating this file.
1617  * This also attaches dquot(s) to the given inode after locking it,
1618  * and returns the dquots corresponding to the uid and/or gid.
1619  *
1620  * in	: inode (unlocked)
1621  * out	: udquot, gdquot with references taken and unlocked
1622  */
1623 int
1624 xfs_qm_vop_dqalloc(
1625 	struct xfs_inode	*ip,
1626 	kuid_t			uid,
1627 	kgid_t			gid,
1628 	prid_t			prid,
1629 	uint			flags,
1630 	struct xfs_dquot	**O_udqpp,
1631 	struct xfs_dquot	**O_gdqpp,
1632 	struct xfs_dquot	**O_pdqpp)
1633 {
1634 	struct xfs_mount	*mp = ip->i_mount;
1635 	struct inode		*inode = VFS_I(ip);
1636 	struct user_namespace	*user_ns = inode->i_sb->s_user_ns;
1637 	struct xfs_dquot	*uq = NULL;
1638 	struct xfs_dquot	*gq = NULL;
1639 	struct xfs_dquot	*pq = NULL;
1640 	int			error;
1641 	uint			lockflags;
1642 
1643 	if (!XFS_IS_QUOTA_RUNNING(mp) || !XFS_IS_QUOTA_ON(mp))
1644 		return 0;
1645 
1646 	lockflags = XFS_ILOCK_EXCL;
1647 	xfs_ilock(ip, lockflags);
1648 
1649 	if ((flags & XFS_QMOPT_INHERIT) && XFS_INHERIT_GID(ip))
1650 		gid = inode->i_gid;
1651 
1652 	/*
1653 	 * Attach the dquot(s) to this inode, doing a dquot allocation
1654 	 * if necessary. The dquot(s) will not be locked.
1655 	 */
1656 	if (XFS_NOT_DQATTACHED(mp, ip)) {
1657 		error = xfs_qm_dqattach_locked(ip, true);
1658 		if (error) {
1659 			xfs_iunlock(ip, lockflags);
1660 			return error;
1661 		}
1662 	}
1663 
1664 	if ((flags & XFS_QMOPT_UQUOTA) && XFS_IS_UQUOTA_ON(mp)) {
1665 		ASSERT(O_udqpp);
1666 		if (!uid_eq(inode->i_uid, uid)) {
1667 			/*
1668 			 * What we need is the dquot that has this uid, and
1669 			 * if we send the inode to dqget, the uid of the inode
1670 			 * takes priority over what's sent in the uid argument.
1671 			 * We must unlock inode here before calling dqget if
1672 			 * we're not sending the inode, because otherwise
1673 			 * we'll deadlock by doing trans_reserve while
1674 			 * holding ilock.
1675 			 */
1676 			xfs_iunlock(ip, lockflags);
1677 			error = xfs_qm_dqget(mp, from_kuid(user_ns, uid),
1678 					XFS_DQTYPE_USER, true, &uq);
1679 			if (error) {
1680 				ASSERT(error != -ENOENT);
1681 				return error;
1682 			}
1683 			/*
1684 			 * Get the ilock in the right order.
1685 			 */
1686 			xfs_dqunlock(uq);
1687 			lockflags = XFS_ILOCK_SHARED;
1688 			xfs_ilock(ip, lockflags);
1689 		} else {
1690 			/*
1691 			 * Take an extra reference, because we'll return
1692 			 * this to caller
1693 			 */
1694 			ASSERT(ip->i_udquot);
1695 			uq = xfs_qm_dqhold(ip->i_udquot);
1696 		}
1697 	}
1698 	if ((flags & XFS_QMOPT_GQUOTA) && XFS_IS_GQUOTA_ON(mp)) {
1699 		ASSERT(O_gdqpp);
1700 		if (!gid_eq(inode->i_gid, gid)) {
1701 			xfs_iunlock(ip, lockflags);
1702 			error = xfs_qm_dqget(mp, from_kgid(user_ns, gid),
1703 					XFS_DQTYPE_GROUP, true, &gq);
1704 			if (error) {
1705 				ASSERT(error != -ENOENT);
1706 				goto error_rele;
1707 			}
1708 			xfs_dqunlock(gq);
1709 			lockflags = XFS_ILOCK_SHARED;
1710 			xfs_ilock(ip, lockflags);
1711 		} else {
1712 			ASSERT(ip->i_gdquot);
1713 			gq = xfs_qm_dqhold(ip->i_gdquot);
1714 		}
1715 	}
1716 	if ((flags & XFS_QMOPT_PQUOTA) && XFS_IS_PQUOTA_ON(mp)) {
1717 		ASSERT(O_pdqpp);
1718 		if (ip->i_d.di_projid != prid) {
1719 			xfs_iunlock(ip, lockflags);
1720 			error = xfs_qm_dqget(mp, prid,
1721 					XFS_DQTYPE_PROJ, true, &pq);
1722 			if (error) {
1723 				ASSERT(error != -ENOENT);
1724 				goto error_rele;
1725 			}
1726 			xfs_dqunlock(pq);
1727 			lockflags = XFS_ILOCK_SHARED;
1728 			xfs_ilock(ip, lockflags);
1729 		} else {
1730 			ASSERT(ip->i_pdquot);
1731 			pq = xfs_qm_dqhold(ip->i_pdquot);
1732 		}
1733 	}
1734 	trace_xfs_dquot_dqalloc(ip);
1735 
1736 	xfs_iunlock(ip, lockflags);
1737 	if (O_udqpp)
1738 		*O_udqpp = uq;
1739 	else
1740 		xfs_qm_dqrele(uq);
1741 	if (O_gdqpp)
1742 		*O_gdqpp = gq;
1743 	else
1744 		xfs_qm_dqrele(gq);
1745 	if (O_pdqpp)
1746 		*O_pdqpp = pq;
1747 	else
1748 		xfs_qm_dqrele(pq);
1749 	return 0;
1750 
1751 error_rele:
1752 	xfs_qm_dqrele(gq);
1753 	xfs_qm_dqrele(uq);
1754 	return error;
1755 }
1756 
1757 /*
1758  * Actually transfer ownership, and do dquot modifications.
1759  * These were already reserved.
1760  */
1761 struct xfs_dquot *
1762 xfs_qm_vop_chown(
1763 	struct xfs_trans	*tp,
1764 	struct xfs_inode	*ip,
1765 	struct xfs_dquot	**IO_olddq,
1766 	struct xfs_dquot	*newdq)
1767 {
1768 	struct xfs_dquot	*prevdq;
1769 	uint		bfield = XFS_IS_REALTIME_INODE(ip) ?
1770 				 XFS_TRANS_DQ_RTBCOUNT : XFS_TRANS_DQ_BCOUNT;
1771 
1772 
1773 	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
1774 	ASSERT(XFS_IS_QUOTA_RUNNING(ip->i_mount));
1775 
1776 	/* old dquot */
1777 	prevdq = *IO_olddq;
1778 	ASSERT(prevdq);
1779 	ASSERT(prevdq != newdq);
1780 
1781 	xfs_trans_mod_dquot(tp, prevdq, bfield, -(ip->i_d.di_nblocks));
1782 	xfs_trans_mod_dquot(tp, prevdq, XFS_TRANS_DQ_ICOUNT, -1);
1783 
1784 	/* the sparkling new dquot */
1785 	xfs_trans_mod_dquot(tp, newdq, bfield, ip->i_d.di_nblocks);
1786 	xfs_trans_mod_dquot(tp, newdq, XFS_TRANS_DQ_ICOUNT, 1);
1787 
1788 	/*
1789 	 * Take an extra reference, because the inode is going to keep
1790 	 * this dquot pointer even after the trans_commit.
1791 	 */
1792 	*IO_olddq = xfs_qm_dqhold(newdq);
1793 
1794 	return prevdq;
1795 }
1796 
1797 /*
1798  * Quota reservations for setattr(AT_UID|AT_GID|AT_PROJID).
1799  */
1800 int
1801 xfs_qm_vop_chown_reserve(
1802 	struct xfs_trans	*tp,
1803 	struct xfs_inode	*ip,
1804 	struct xfs_dquot	*udqp,
1805 	struct xfs_dquot	*gdqp,
1806 	struct xfs_dquot	*pdqp,
1807 	uint			flags)
1808 {
1809 	struct xfs_mount	*mp = ip->i_mount;
1810 	uint64_t		delblks;
1811 	unsigned int		blkflags;
1812 	struct xfs_dquot	*udq_unres = NULL;
1813 	struct xfs_dquot	*gdq_unres = NULL;
1814 	struct xfs_dquot	*pdq_unres = NULL;
1815 	struct xfs_dquot	*udq_delblks = NULL;
1816 	struct xfs_dquot	*gdq_delblks = NULL;
1817 	struct xfs_dquot	*pdq_delblks = NULL;
1818 	int			error;
1819 
1820 
1821 	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED));
1822 	ASSERT(XFS_IS_QUOTA_RUNNING(mp));
1823 
1824 	delblks = ip->i_delayed_blks;
1825 	blkflags = XFS_IS_REALTIME_INODE(ip) ?
1826 			XFS_QMOPT_RES_RTBLKS : XFS_QMOPT_RES_REGBLKS;
1827 
1828 	if (XFS_IS_UQUOTA_ON(mp) && udqp &&
1829 	    i_uid_read(VFS_I(ip)) != udqp->q_id) {
1830 		udq_delblks = udqp;
1831 		/*
1832 		 * If there are delayed allocation blocks, then we have to
1833 		 * unreserve those from the old dquot, and add them to the
1834 		 * new dquot.
1835 		 */
1836 		if (delblks) {
1837 			ASSERT(ip->i_udquot);
1838 			udq_unres = ip->i_udquot;
1839 		}
1840 	}
1841 	if (XFS_IS_GQUOTA_ON(ip->i_mount) && gdqp &&
1842 	    i_gid_read(VFS_I(ip)) != gdqp->q_id) {
1843 		gdq_delblks = gdqp;
1844 		if (delblks) {
1845 			ASSERT(ip->i_gdquot);
1846 			gdq_unres = ip->i_gdquot;
1847 		}
1848 	}
1849 
1850 	if (XFS_IS_PQUOTA_ON(ip->i_mount) && pdqp &&
1851 	    ip->i_d.di_projid != pdqp->q_id) {
1852 		pdq_delblks = pdqp;
1853 		if (delblks) {
1854 			ASSERT(ip->i_pdquot);
1855 			pdq_unres = ip->i_pdquot;
1856 		}
1857 	}
1858 
1859 	error = xfs_trans_reserve_quota_bydquots(tp, ip->i_mount,
1860 				udq_delblks, gdq_delblks, pdq_delblks,
1861 				ip->i_d.di_nblocks, 1, flags | blkflags);
1862 	if (error)
1863 		return error;
1864 
1865 	/*
1866 	 * Do the delayed blks reservations/unreservations now. Since, these
1867 	 * are done without the help of a transaction, if a reservation fails
1868 	 * its previous reservations won't be automatically undone by trans
1869 	 * code. So, we have to do it manually here.
1870 	 */
1871 	if (delblks) {
1872 		/*
1873 		 * Do the reservations first. Unreservation can't fail.
1874 		 */
1875 		ASSERT(udq_delblks || gdq_delblks || pdq_delblks);
1876 		ASSERT(udq_unres || gdq_unres || pdq_unres);
1877 		error = xfs_trans_reserve_quota_bydquots(NULL, ip->i_mount,
1878 			    udq_delblks, gdq_delblks, pdq_delblks,
1879 			    (xfs_qcnt_t)delblks, 0, flags | blkflags);
1880 		if (error)
1881 			return error;
1882 		xfs_trans_reserve_quota_bydquots(NULL, ip->i_mount,
1883 				udq_unres, gdq_unres, pdq_unres,
1884 				-((xfs_qcnt_t)delblks), 0, blkflags);
1885 	}
1886 
1887 	return 0;
1888 }
1889 
1890 int
1891 xfs_qm_vop_rename_dqattach(
1892 	struct xfs_inode	**i_tab)
1893 {
1894 	struct xfs_mount	*mp = i_tab[0]->i_mount;
1895 	int			i;
1896 
1897 	if (!XFS_IS_QUOTA_RUNNING(mp) || !XFS_IS_QUOTA_ON(mp))
1898 		return 0;
1899 
1900 	for (i = 0; (i < 4 && i_tab[i]); i++) {
1901 		struct xfs_inode	*ip = i_tab[i];
1902 		int			error;
1903 
1904 		/*
1905 		 * Watch out for duplicate entries in the table.
1906 		 */
1907 		if (i == 0 || ip != i_tab[i-1]) {
1908 			if (XFS_NOT_DQATTACHED(mp, ip)) {
1909 				error = xfs_qm_dqattach(ip);
1910 				if (error)
1911 					return error;
1912 			}
1913 		}
1914 	}
1915 	return 0;
1916 }
1917 
1918 void
1919 xfs_qm_vop_create_dqattach(
1920 	struct xfs_trans	*tp,
1921 	struct xfs_inode	*ip,
1922 	struct xfs_dquot	*udqp,
1923 	struct xfs_dquot	*gdqp,
1924 	struct xfs_dquot	*pdqp)
1925 {
1926 	struct xfs_mount	*mp = tp->t_mountp;
1927 
1928 	if (!XFS_IS_QUOTA_RUNNING(mp) || !XFS_IS_QUOTA_ON(mp))
1929 		return;
1930 
1931 	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
1932 
1933 	if (udqp && XFS_IS_UQUOTA_ON(mp)) {
1934 		ASSERT(ip->i_udquot == NULL);
1935 		ASSERT(i_uid_read(VFS_I(ip)) == udqp->q_id);
1936 
1937 		ip->i_udquot = xfs_qm_dqhold(udqp);
1938 		xfs_trans_mod_dquot(tp, udqp, XFS_TRANS_DQ_ICOUNT, 1);
1939 	}
1940 	if (gdqp && XFS_IS_GQUOTA_ON(mp)) {
1941 		ASSERT(ip->i_gdquot == NULL);
1942 		ASSERT(i_gid_read(VFS_I(ip)) == gdqp->q_id);
1943 
1944 		ip->i_gdquot = xfs_qm_dqhold(gdqp);
1945 		xfs_trans_mod_dquot(tp, gdqp, XFS_TRANS_DQ_ICOUNT, 1);
1946 	}
1947 	if (pdqp && XFS_IS_PQUOTA_ON(mp)) {
1948 		ASSERT(ip->i_pdquot == NULL);
1949 		ASSERT(ip->i_d.di_projid == pdqp->q_id);
1950 
1951 		ip->i_pdquot = xfs_qm_dqhold(pdqp);
1952 		xfs_trans_mod_dquot(tp, pdqp, XFS_TRANS_DQ_ICOUNT, 1);
1953 	}
1954 }
1955 
1956