xref: /openbmc/linux/fs/xfs/xfs_qm.c (revision adb57164)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (c) 2000-2005 Silicon Graphics, Inc.
4  * All Rights Reserved.
5  */
6 #include "xfs.h"
7 #include "xfs_fs.h"
8 #include "xfs_shared.h"
9 #include "xfs_format.h"
10 #include "xfs_log_format.h"
11 #include "xfs_trans_resv.h"
12 #include "xfs_bit.h"
13 #include "xfs_sb.h"
14 #include "xfs_mount.h"
15 #include "xfs_inode.h"
16 #include "xfs_iwalk.h"
17 #include "xfs_quota.h"
18 #include "xfs_bmap.h"
19 #include "xfs_bmap_util.h"
20 #include "xfs_trans.h"
21 #include "xfs_trans_space.h"
22 #include "xfs_qm.h"
23 #include "xfs_trace.h"
24 #include "xfs_icache.h"
25 #include "xfs_error.h"
26 
27 /*
28  * The global quota manager. There is only one of these for the entire
29  * system, _not_ one per file system. XQM keeps track of the overall
30  * quota functionality, including maintaining the freelist and hash
31  * tables of dquots.
32  */
33 STATIC int	xfs_qm_init_quotainos(struct xfs_mount *mp);
34 STATIC int	xfs_qm_init_quotainfo(struct xfs_mount *mp);
35 
36 STATIC void	xfs_qm_destroy_quotainos(struct xfs_quotainfo *qi);
37 STATIC void	xfs_qm_dqfree_one(struct xfs_dquot *dqp);
38 /*
39  * We use the batch lookup interface to iterate over the dquots as it
40  * currently is the only interface into the radix tree code that allows
41  * fuzzy lookups instead of exact matches.  Holding the lock over multiple
42  * operations is fine as all callers are used either during mount/umount
43  * or quotaoff.
44  */
45 #define XFS_DQ_LOOKUP_BATCH	32
46 
47 STATIC int
48 xfs_qm_dquot_walk(
49 	struct xfs_mount	*mp,
50 	int			type,
51 	int			(*execute)(struct xfs_dquot *dqp, void *data),
52 	void			*data)
53 {
54 	struct xfs_quotainfo	*qi = mp->m_quotainfo;
55 	struct radix_tree_root	*tree = xfs_dquot_tree(qi, type);
56 	uint32_t		next_index;
57 	int			last_error = 0;
58 	int			skipped;
59 	int			nr_found;
60 
61 restart:
62 	skipped = 0;
63 	next_index = 0;
64 	nr_found = 0;
65 
66 	while (1) {
67 		struct xfs_dquot *batch[XFS_DQ_LOOKUP_BATCH];
68 		int		error = 0;
69 		int		i;
70 
71 		mutex_lock(&qi->qi_tree_lock);
72 		nr_found = radix_tree_gang_lookup(tree, (void **)batch,
73 					next_index, XFS_DQ_LOOKUP_BATCH);
74 		if (!nr_found) {
75 			mutex_unlock(&qi->qi_tree_lock);
76 			break;
77 		}
78 
79 		for (i = 0; i < nr_found; i++) {
80 			struct xfs_dquot *dqp = batch[i];
81 
82 			next_index = be32_to_cpu(dqp->q_core.d_id) + 1;
83 
84 			error = execute(batch[i], data);
85 			if (error == -EAGAIN) {
86 				skipped++;
87 				continue;
88 			}
89 			if (error && last_error != -EFSCORRUPTED)
90 				last_error = error;
91 		}
92 
93 		mutex_unlock(&qi->qi_tree_lock);
94 
95 		/* bail out if the filesystem is corrupted.  */
96 		if (last_error == -EFSCORRUPTED) {
97 			skipped = 0;
98 			break;
99 		}
100 		/* we're done if id overflows back to zero */
101 		if (!next_index)
102 			break;
103 	}
104 
105 	if (skipped) {
106 		delay(1);
107 		goto restart;
108 	}
109 
110 	return last_error;
111 }
112 
113 
114 /*
115  * Purge a dquot from all tracking data structures and free it.
116  */
117 STATIC int
118 xfs_qm_dqpurge(
119 	struct xfs_dquot	*dqp,
120 	void			*data)
121 {
122 	struct xfs_mount	*mp = dqp->q_mount;
123 	struct xfs_quotainfo	*qi = mp->m_quotainfo;
124 	int			error = -EAGAIN;
125 
126 	xfs_dqlock(dqp);
127 	if ((dqp->dq_flags & XFS_DQ_FREEING) || dqp->q_nrefs != 0)
128 		goto out_unlock;
129 
130 	dqp->dq_flags |= XFS_DQ_FREEING;
131 
132 	xfs_dqflock(dqp);
133 
134 	/*
135 	 * If we are turning this type of quotas off, we don't care
136 	 * about the dirty metadata sitting in this dquot. OTOH, if
137 	 * we're unmounting, we do care, so we flush it and wait.
138 	 */
139 	if (XFS_DQ_IS_DIRTY(dqp)) {
140 		struct xfs_buf	*bp = NULL;
141 
142 		/*
143 		 * We don't care about getting disk errors here. We need
144 		 * to purge this dquot anyway, so we go ahead regardless.
145 		 */
146 		error = xfs_qm_dqflush(dqp, &bp);
147 		if (!error) {
148 			error = xfs_bwrite(bp);
149 			xfs_buf_relse(bp);
150 		} else if (error == -EAGAIN) {
151 			goto out_unlock;
152 		}
153 		xfs_dqflock(dqp);
154 	}
155 
156 	ASSERT(atomic_read(&dqp->q_pincount) == 0);
157 	ASSERT(XFS_FORCED_SHUTDOWN(mp) ||
158 		!test_bit(XFS_LI_IN_AIL, &dqp->q_logitem.qli_item.li_flags));
159 
160 	xfs_dqfunlock(dqp);
161 	xfs_dqunlock(dqp);
162 
163 	radix_tree_delete(xfs_dquot_tree(qi, dqp->q_core.d_flags),
164 			  be32_to_cpu(dqp->q_core.d_id));
165 	qi->qi_dquots--;
166 
167 	/*
168 	 * We move dquots to the freelist as soon as their reference count
169 	 * hits zero, so it really should be on the freelist here.
170 	 */
171 	ASSERT(!list_empty(&dqp->q_lru));
172 	list_lru_del(&qi->qi_lru, &dqp->q_lru);
173 	XFS_STATS_DEC(mp, xs_qm_dquot_unused);
174 
175 	xfs_qm_dqdestroy(dqp);
176 	return 0;
177 
178 out_unlock:
179 	xfs_dqunlock(dqp);
180 	return error;
181 }
182 
183 /*
184  * Purge the dquot cache.
185  */
186 void
187 xfs_qm_dqpurge_all(
188 	struct xfs_mount	*mp,
189 	uint			flags)
190 {
191 	if (flags & XFS_QMOPT_UQUOTA)
192 		xfs_qm_dquot_walk(mp, XFS_DQ_USER, xfs_qm_dqpurge, NULL);
193 	if (flags & XFS_QMOPT_GQUOTA)
194 		xfs_qm_dquot_walk(mp, XFS_DQ_GROUP, xfs_qm_dqpurge, NULL);
195 	if (flags & XFS_QMOPT_PQUOTA)
196 		xfs_qm_dquot_walk(mp, XFS_DQ_PROJ, xfs_qm_dqpurge, NULL);
197 }
198 
199 /*
200  * Just destroy the quotainfo structure.
201  */
202 void
203 xfs_qm_unmount(
204 	struct xfs_mount	*mp)
205 {
206 	if (mp->m_quotainfo) {
207 		xfs_qm_dqpurge_all(mp, XFS_QMOPT_QUOTALL);
208 		xfs_qm_destroy_quotainfo(mp);
209 	}
210 }
211 
212 /*
213  * Called from the vfsops layer.
214  */
215 void
216 xfs_qm_unmount_quotas(
217 	xfs_mount_t	*mp)
218 {
219 	/*
220 	 * Release the dquots that root inode, et al might be holding,
221 	 * before we flush quotas and blow away the quotainfo structure.
222 	 */
223 	ASSERT(mp->m_rootip);
224 	xfs_qm_dqdetach(mp->m_rootip);
225 	if (mp->m_rbmip)
226 		xfs_qm_dqdetach(mp->m_rbmip);
227 	if (mp->m_rsumip)
228 		xfs_qm_dqdetach(mp->m_rsumip);
229 
230 	/*
231 	 * Release the quota inodes.
232 	 */
233 	if (mp->m_quotainfo) {
234 		if (mp->m_quotainfo->qi_uquotaip) {
235 			xfs_irele(mp->m_quotainfo->qi_uquotaip);
236 			mp->m_quotainfo->qi_uquotaip = NULL;
237 		}
238 		if (mp->m_quotainfo->qi_gquotaip) {
239 			xfs_irele(mp->m_quotainfo->qi_gquotaip);
240 			mp->m_quotainfo->qi_gquotaip = NULL;
241 		}
242 		if (mp->m_quotainfo->qi_pquotaip) {
243 			xfs_irele(mp->m_quotainfo->qi_pquotaip);
244 			mp->m_quotainfo->qi_pquotaip = NULL;
245 		}
246 	}
247 }
248 
249 STATIC int
250 xfs_qm_dqattach_one(
251 	struct xfs_inode	*ip,
252 	xfs_dqid_t		id,
253 	uint			type,
254 	bool			doalloc,
255 	struct xfs_dquot	**IO_idqpp)
256 {
257 	struct xfs_dquot	*dqp;
258 	int			error;
259 
260 	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
261 	error = 0;
262 
263 	/*
264 	 * See if we already have it in the inode itself. IO_idqpp is &i_udquot
265 	 * or &i_gdquot. This made the code look weird, but made the logic a lot
266 	 * simpler.
267 	 */
268 	dqp = *IO_idqpp;
269 	if (dqp) {
270 		trace_xfs_dqattach_found(dqp);
271 		return 0;
272 	}
273 
274 	/*
275 	 * Find the dquot from somewhere. This bumps the reference count of
276 	 * dquot and returns it locked.  This can return ENOENT if dquot didn't
277 	 * exist on disk and we didn't ask it to allocate; ESRCH if quotas got
278 	 * turned off suddenly.
279 	 */
280 	error = xfs_qm_dqget_inode(ip, type, doalloc, &dqp);
281 	if (error)
282 		return error;
283 
284 	trace_xfs_dqattach_get(dqp);
285 
286 	/*
287 	 * dqget may have dropped and re-acquired the ilock, but it guarantees
288 	 * that the dquot returned is the one that should go in the inode.
289 	 */
290 	*IO_idqpp = dqp;
291 	xfs_dqunlock(dqp);
292 	return 0;
293 }
294 
295 static bool
296 xfs_qm_need_dqattach(
297 	struct xfs_inode	*ip)
298 {
299 	struct xfs_mount	*mp = ip->i_mount;
300 
301 	if (!XFS_IS_QUOTA_RUNNING(mp))
302 		return false;
303 	if (!XFS_IS_QUOTA_ON(mp))
304 		return false;
305 	if (!XFS_NOT_DQATTACHED(mp, ip))
306 		return false;
307 	if (xfs_is_quota_inode(&mp->m_sb, ip->i_ino))
308 		return false;
309 	return true;
310 }
311 
312 /*
313  * Given a locked inode, attach dquot(s) to it, taking U/G/P-QUOTAON
314  * into account.
315  * If @doalloc is true, the dquot(s) will be allocated if needed.
316  * Inode may get unlocked and relocked in here, and the caller must deal with
317  * the consequences.
318  */
319 int
320 xfs_qm_dqattach_locked(
321 	xfs_inode_t	*ip,
322 	bool		doalloc)
323 {
324 	xfs_mount_t	*mp = ip->i_mount;
325 	int		error = 0;
326 
327 	if (!xfs_qm_need_dqattach(ip))
328 		return 0;
329 
330 	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
331 
332 	if (XFS_IS_UQUOTA_ON(mp) && !ip->i_udquot) {
333 		error = xfs_qm_dqattach_one(ip, i_uid_read(VFS_I(ip)),
334 				XFS_DQ_USER, doalloc, &ip->i_udquot);
335 		if (error)
336 			goto done;
337 		ASSERT(ip->i_udquot);
338 	}
339 
340 	if (XFS_IS_GQUOTA_ON(mp) && !ip->i_gdquot) {
341 		error = xfs_qm_dqattach_one(ip, i_gid_read(VFS_I(ip)),
342 				XFS_DQ_GROUP, doalloc, &ip->i_gdquot);
343 		if (error)
344 			goto done;
345 		ASSERT(ip->i_gdquot);
346 	}
347 
348 	if (XFS_IS_PQUOTA_ON(mp) && !ip->i_pdquot) {
349 		error = xfs_qm_dqattach_one(ip, ip->i_d.di_projid, XFS_DQ_PROJ,
350 				doalloc, &ip->i_pdquot);
351 		if (error)
352 			goto done;
353 		ASSERT(ip->i_pdquot);
354 	}
355 
356 done:
357 	/*
358 	 * Don't worry about the dquots that we may have attached before any
359 	 * error - they'll get detached later if it has not already been done.
360 	 */
361 	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
362 	return error;
363 }
364 
365 int
366 xfs_qm_dqattach(
367 	struct xfs_inode	*ip)
368 {
369 	int			error;
370 
371 	if (!xfs_qm_need_dqattach(ip))
372 		return 0;
373 
374 	xfs_ilock(ip, XFS_ILOCK_EXCL);
375 	error = xfs_qm_dqattach_locked(ip, false);
376 	xfs_iunlock(ip, XFS_ILOCK_EXCL);
377 
378 	return error;
379 }
380 
381 /*
382  * Release dquots (and their references) if any.
383  * The inode should be locked EXCL except when this's called by
384  * xfs_ireclaim.
385  */
386 void
387 xfs_qm_dqdetach(
388 	xfs_inode_t	*ip)
389 {
390 	if (!(ip->i_udquot || ip->i_gdquot || ip->i_pdquot))
391 		return;
392 
393 	trace_xfs_dquot_dqdetach(ip);
394 
395 	ASSERT(!xfs_is_quota_inode(&ip->i_mount->m_sb, ip->i_ino));
396 	if (ip->i_udquot) {
397 		xfs_qm_dqrele(ip->i_udquot);
398 		ip->i_udquot = NULL;
399 	}
400 	if (ip->i_gdquot) {
401 		xfs_qm_dqrele(ip->i_gdquot);
402 		ip->i_gdquot = NULL;
403 	}
404 	if (ip->i_pdquot) {
405 		xfs_qm_dqrele(ip->i_pdquot);
406 		ip->i_pdquot = NULL;
407 	}
408 }
409 
410 struct xfs_qm_isolate {
411 	struct list_head	buffers;
412 	struct list_head	dispose;
413 };
414 
415 static enum lru_status
416 xfs_qm_dquot_isolate(
417 	struct list_head	*item,
418 	struct list_lru_one	*lru,
419 	spinlock_t		*lru_lock,
420 	void			*arg)
421 		__releases(lru_lock) __acquires(lru_lock)
422 {
423 	struct xfs_dquot	*dqp = container_of(item,
424 						struct xfs_dquot, q_lru);
425 	struct xfs_qm_isolate	*isol = arg;
426 
427 	if (!xfs_dqlock_nowait(dqp))
428 		goto out_miss_busy;
429 
430 	/*
431 	 * This dquot has acquired a reference in the meantime remove it from
432 	 * the freelist and try again.
433 	 */
434 	if (dqp->q_nrefs) {
435 		xfs_dqunlock(dqp);
436 		XFS_STATS_INC(dqp->q_mount, xs_qm_dqwants);
437 
438 		trace_xfs_dqreclaim_want(dqp);
439 		list_lru_isolate(lru, &dqp->q_lru);
440 		XFS_STATS_DEC(dqp->q_mount, xs_qm_dquot_unused);
441 		return LRU_REMOVED;
442 	}
443 
444 	/*
445 	 * If the dquot is dirty, flush it. If it's already being flushed, just
446 	 * skip it so there is time for the IO to complete before we try to
447 	 * reclaim it again on the next LRU pass.
448 	 */
449 	if (!xfs_dqflock_nowait(dqp)) {
450 		xfs_dqunlock(dqp);
451 		goto out_miss_busy;
452 	}
453 
454 	if (XFS_DQ_IS_DIRTY(dqp)) {
455 		struct xfs_buf	*bp = NULL;
456 		int		error;
457 
458 		trace_xfs_dqreclaim_dirty(dqp);
459 
460 		/* we have to drop the LRU lock to flush the dquot */
461 		spin_unlock(lru_lock);
462 
463 		error = xfs_qm_dqflush(dqp, &bp);
464 		if (error)
465 			goto out_unlock_dirty;
466 
467 		xfs_buf_delwri_queue(bp, &isol->buffers);
468 		xfs_buf_relse(bp);
469 		goto out_unlock_dirty;
470 	}
471 	xfs_dqfunlock(dqp);
472 
473 	/*
474 	 * Prevent lookups now that we are past the point of no return.
475 	 */
476 	dqp->dq_flags |= XFS_DQ_FREEING;
477 	xfs_dqunlock(dqp);
478 
479 	ASSERT(dqp->q_nrefs == 0);
480 	list_lru_isolate_move(lru, &dqp->q_lru, &isol->dispose);
481 	XFS_STATS_DEC(dqp->q_mount, xs_qm_dquot_unused);
482 	trace_xfs_dqreclaim_done(dqp);
483 	XFS_STATS_INC(dqp->q_mount, xs_qm_dqreclaims);
484 	return LRU_REMOVED;
485 
486 out_miss_busy:
487 	trace_xfs_dqreclaim_busy(dqp);
488 	XFS_STATS_INC(dqp->q_mount, xs_qm_dqreclaim_misses);
489 	return LRU_SKIP;
490 
491 out_unlock_dirty:
492 	trace_xfs_dqreclaim_busy(dqp);
493 	XFS_STATS_INC(dqp->q_mount, xs_qm_dqreclaim_misses);
494 	xfs_dqunlock(dqp);
495 	spin_lock(lru_lock);
496 	return LRU_RETRY;
497 }
498 
499 static unsigned long
500 xfs_qm_shrink_scan(
501 	struct shrinker		*shrink,
502 	struct shrink_control	*sc)
503 {
504 	struct xfs_quotainfo	*qi = container_of(shrink,
505 					struct xfs_quotainfo, qi_shrinker);
506 	struct xfs_qm_isolate	isol;
507 	unsigned long		freed;
508 	int			error;
509 
510 	if ((sc->gfp_mask & (__GFP_FS|__GFP_DIRECT_RECLAIM)) != (__GFP_FS|__GFP_DIRECT_RECLAIM))
511 		return 0;
512 
513 	INIT_LIST_HEAD(&isol.buffers);
514 	INIT_LIST_HEAD(&isol.dispose);
515 
516 	freed = list_lru_shrink_walk(&qi->qi_lru, sc,
517 				     xfs_qm_dquot_isolate, &isol);
518 
519 	error = xfs_buf_delwri_submit(&isol.buffers);
520 	if (error)
521 		xfs_warn(NULL, "%s: dquot reclaim failed", __func__);
522 
523 	while (!list_empty(&isol.dispose)) {
524 		struct xfs_dquot	*dqp;
525 
526 		dqp = list_first_entry(&isol.dispose, struct xfs_dquot, q_lru);
527 		list_del_init(&dqp->q_lru);
528 		xfs_qm_dqfree_one(dqp);
529 	}
530 
531 	return freed;
532 }
533 
534 static unsigned long
535 xfs_qm_shrink_count(
536 	struct shrinker		*shrink,
537 	struct shrink_control	*sc)
538 {
539 	struct xfs_quotainfo	*qi = container_of(shrink,
540 					struct xfs_quotainfo, qi_shrinker);
541 
542 	return list_lru_shrink_count(&qi->qi_lru, sc);
543 }
544 
545 STATIC void
546 xfs_qm_set_defquota(
547 	struct xfs_mount	*mp,
548 	uint			type,
549 	struct xfs_quotainfo	*qinf)
550 {
551 	struct xfs_dquot	*dqp;
552 	struct xfs_def_quota	*defq;
553 	struct xfs_disk_dquot	*ddqp;
554 	int			error;
555 
556 	error = xfs_qm_dqget_uncached(mp, 0, type, &dqp);
557 	if (error)
558 		return;
559 
560 	ddqp = &dqp->q_core;
561 	defq = xfs_get_defquota(dqp, qinf);
562 
563 	/*
564 	 * Timers and warnings have been already set, let's just set the
565 	 * default limits for this quota type
566 	 */
567 	defq->bhardlimit = be64_to_cpu(ddqp->d_blk_hardlimit);
568 	defq->bsoftlimit = be64_to_cpu(ddqp->d_blk_softlimit);
569 	defq->ihardlimit = be64_to_cpu(ddqp->d_ino_hardlimit);
570 	defq->isoftlimit = be64_to_cpu(ddqp->d_ino_softlimit);
571 	defq->rtbhardlimit = be64_to_cpu(ddqp->d_rtb_hardlimit);
572 	defq->rtbsoftlimit = be64_to_cpu(ddqp->d_rtb_softlimit);
573 	xfs_qm_dqdestroy(dqp);
574 }
575 
576 /* Initialize quota time limits from the root dquot. */
577 static void
578 xfs_qm_init_timelimits(
579 	struct xfs_mount	*mp,
580 	struct xfs_quotainfo	*qinf)
581 {
582 	struct xfs_disk_dquot	*ddqp;
583 	struct xfs_dquot	*dqp;
584 	uint			type;
585 	int			error;
586 
587 	qinf->qi_btimelimit = XFS_QM_BTIMELIMIT;
588 	qinf->qi_itimelimit = XFS_QM_ITIMELIMIT;
589 	qinf->qi_rtbtimelimit = XFS_QM_RTBTIMELIMIT;
590 	qinf->qi_bwarnlimit = XFS_QM_BWARNLIMIT;
591 	qinf->qi_iwarnlimit = XFS_QM_IWARNLIMIT;
592 	qinf->qi_rtbwarnlimit = XFS_QM_RTBWARNLIMIT;
593 
594 	/*
595 	 * We try to get the limits from the superuser's limits fields.
596 	 * This is quite hacky, but it is standard quota practice.
597 	 *
598 	 * Since we may not have done a quotacheck by this point, just read
599 	 * the dquot without attaching it to any hashtables or lists.
600 	 *
601 	 * Timers and warnings are globally set by the first timer found in
602 	 * user/group/proj quota types, otherwise a default value is used.
603 	 * This should be split into different fields per quota type.
604 	 */
605 	if (XFS_IS_UQUOTA_RUNNING(mp))
606 		type = XFS_DQ_USER;
607 	else if (XFS_IS_GQUOTA_RUNNING(mp))
608 		type = XFS_DQ_GROUP;
609 	else
610 		type = XFS_DQ_PROJ;
611 	error = xfs_qm_dqget_uncached(mp, 0, type, &dqp);
612 	if (error)
613 		return;
614 
615 	ddqp = &dqp->q_core;
616 	/*
617 	 * The warnings and timers set the grace period given to
618 	 * a user or group before he or she can not perform any
619 	 * more writing. If it is zero, a default is used.
620 	 */
621 	if (ddqp->d_btimer)
622 		qinf->qi_btimelimit = be32_to_cpu(ddqp->d_btimer);
623 	if (ddqp->d_itimer)
624 		qinf->qi_itimelimit = be32_to_cpu(ddqp->d_itimer);
625 	if (ddqp->d_rtbtimer)
626 		qinf->qi_rtbtimelimit = be32_to_cpu(ddqp->d_rtbtimer);
627 	if (ddqp->d_bwarns)
628 		qinf->qi_bwarnlimit = be16_to_cpu(ddqp->d_bwarns);
629 	if (ddqp->d_iwarns)
630 		qinf->qi_iwarnlimit = be16_to_cpu(ddqp->d_iwarns);
631 	if (ddqp->d_rtbwarns)
632 		qinf->qi_rtbwarnlimit = be16_to_cpu(ddqp->d_rtbwarns);
633 
634 	xfs_qm_dqdestroy(dqp);
635 }
636 
637 /*
638  * This initializes all the quota information that's kept in the
639  * mount structure
640  */
641 STATIC int
642 xfs_qm_init_quotainfo(
643 	struct xfs_mount	*mp)
644 {
645 	struct xfs_quotainfo	*qinf;
646 	int			error;
647 
648 	ASSERT(XFS_IS_QUOTA_RUNNING(mp));
649 
650 	qinf = mp->m_quotainfo = kmem_zalloc(sizeof(struct xfs_quotainfo), 0);
651 
652 	error = list_lru_init(&qinf->qi_lru);
653 	if (error)
654 		goto out_free_qinf;
655 
656 	/*
657 	 * See if quotainodes are setup, and if not, allocate them,
658 	 * and change the superblock accordingly.
659 	 */
660 	error = xfs_qm_init_quotainos(mp);
661 	if (error)
662 		goto out_free_lru;
663 
664 	INIT_RADIX_TREE(&qinf->qi_uquota_tree, GFP_NOFS);
665 	INIT_RADIX_TREE(&qinf->qi_gquota_tree, GFP_NOFS);
666 	INIT_RADIX_TREE(&qinf->qi_pquota_tree, GFP_NOFS);
667 	mutex_init(&qinf->qi_tree_lock);
668 
669 	/* mutex used to serialize quotaoffs */
670 	mutex_init(&qinf->qi_quotaofflock);
671 
672 	/* Precalc some constants */
673 	qinf->qi_dqchunklen = XFS_FSB_TO_BB(mp, XFS_DQUOT_CLUSTER_SIZE_FSB);
674 	qinf->qi_dqperchunk = xfs_calc_dquots_per_chunk(qinf->qi_dqchunklen);
675 
676 	mp->m_qflags |= (mp->m_sb.sb_qflags & XFS_ALL_QUOTA_CHKD);
677 
678 	xfs_qm_init_timelimits(mp, qinf);
679 
680 	if (XFS_IS_UQUOTA_RUNNING(mp))
681 		xfs_qm_set_defquota(mp, XFS_DQ_USER, qinf);
682 	if (XFS_IS_GQUOTA_RUNNING(mp))
683 		xfs_qm_set_defquota(mp, XFS_DQ_GROUP, qinf);
684 	if (XFS_IS_PQUOTA_RUNNING(mp))
685 		xfs_qm_set_defquota(mp, XFS_DQ_PROJ, qinf);
686 
687 	qinf->qi_shrinker.count_objects = xfs_qm_shrink_count;
688 	qinf->qi_shrinker.scan_objects = xfs_qm_shrink_scan;
689 	qinf->qi_shrinker.seeks = DEFAULT_SEEKS;
690 	qinf->qi_shrinker.flags = SHRINKER_NUMA_AWARE;
691 
692 	error = register_shrinker(&qinf->qi_shrinker);
693 	if (error)
694 		goto out_free_inos;
695 
696 	return 0;
697 
698 out_free_inos:
699 	mutex_destroy(&qinf->qi_quotaofflock);
700 	mutex_destroy(&qinf->qi_tree_lock);
701 	xfs_qm_destroy_quotainos(qinf);
702 out_free_lru:
703 	list_lru_destroy(&qinf->qi_lru);
704 out_free_qinf:
705 	kmem_free(qinf);
706 	mp->m_quotainfo = NULL;
707 	return error;
708 }
709 
710 /*
711  * Gets called when unmounting a filesystem or when all quotas get
712  * turned off.
713  * This purges the quota inodes, destroys locks and frees itself.
714  */
715 void
716 xfs_qm_destroy_quotainfo(
717 	struct xfs_mount	*mp)
718 {
719 	struct xfs_quotainfo	*qi;
720 
721 	qi = mp->m_quotainfo;
722 	ASSERT(qi != NULL);
723 
724 	unregister_shrinker(&qi->qi_shrinker);
725 	list_lru_destroy(&qi->qi_lru);
726 	xfs_qm_destroy_quotainos(qi);
727 	mutex_destroy(&qi->qi_tree_lock);
728 	mutex_destroy(&qi->qi_quotaofflock);
729 	kmem_free(qi);
730 	mp->m_quotainfo = NULL;
731 }
732 
733 /*
734  * Create an inode and return with a reference already taken, but unlocked
735  * This is how we create quota inodes
736  */
737 STATIC int
738 xfs_qm_qino_alloc(
739 	xfs_mount_t	*mp,
740 	xfs_inode_t	**ip,
741 	uint		flags)
742 {
743 	xfs_trans_t	*tp;
744 	int		error;
745 	bool		need_alloc = true;
746 
747 	*ip = NULL;
748 	/*
749 	 * With superblock that doesn't have separate pquotino, we
750 	 * share an inode between gquota and pquota. If the on-disk
751 	 * superblock has GQUOTA and the filesystem is now mounted
752 	 * with PQUOTA, just use sb_gquotino for sb_pquotino and
753 	 * vice-versa.
754 	 */
755 	if (!xfs_sb_version_has_pquotino(&mp->m_sb) &&
756 			(flags & (XFS_QMOPT_PQUOTA|XFS_QMOPT_GQUOTA))) {
757 		xfs_ino_t ino = NULLFSINO;
758 
759 		if ((flags & XFS_QMOPT_PQUOTA) &&
760 			     (mp->m_sb.sb_gquotino != NULLFSINO)) {
761 			ino = mp->m_sb.sb_gquotino;
762 			if (XFS_IS_CORRUPT(mp,
763 					   mp->m_sb.sb_pquotino != NULLFSINO))
764 				return -EFSCORRUPTED;
765 		} else if ((flags & XFS_QMOPT_GQUOTA) &&
766 			     (mp->m_sb.sb_pquotino != NULLFSINO)) {
767 			ino = mp->m_sb.sb_pquotino;
768 			if (XFS_IS_CORRUPT(mp,
769 					   mp->m_sb.sb_gquotino != NULLFSINO))
770 				return -EFSCORRUPTED;
771 		}
772 		if (ino != NULLFSINO) {
773 			error = xfs_iget(mp, NULL, ino, 0, 0, ip);
774 			if (error)
775 				return error;
776 			mp->m_sb.sb_gquotino = NULLFSINO;
777 			mp->m_sb.sb_pquotino = NULLFSINO;
778 			need_alloc = false;
779 		}
780 	}
781 
782 	error = xfs_trans_alloc(mp, &M_RES(mp)->tr_create,
783 			XFS_QM_QINOCREATE_SPACE_RES(mp), 0, 0, &tp);
784 	if (error)
785 		return error;
786 
787 	if (need_alloc) {
788 		error = xfs_dir_ialloc(&tp, NULL, S_IFREG, 1, 0, 0, ip);
789 		if (error) {
790 			xfs_trans_cancel(tp);
791 			return error;
792 		}
793 	}
794 
795 	/*
796 	 * Make the changes in the superblock, and log those too.
797 	 * sbfields arg may contain fields other than *QUOTINO;
798 	 * VERSIONNUM for example.
799 	 */
800 	spin_lock(&mp->m_sb_lock);
801 	if (flags & XFS_QMOPT_SBVERSION) {
802 		ASSERT(!xfs_sb_version_hasquota(&mp->m_sb));
803 
804 		xfs_sb_version_addquota(&mp->m_sb);
805 		mp->m_sb.sb_uquotino = NULLFSINO;
806 		mp->m_sb.sb_gquotino = NULLFSINO;
807 		mp->m_sb.sb_pquotino = NULLFSINO;
808 
809 		/* qflags will get updated fully _after_ quotacheck */
810 		mp->m_sb.sb_qflags = mp->m_qflags & XFS_ALL_QUOTA_ACCT;
811 	}
812 	if (flags & XFS_QMOPT_UQUOTA)
813 		mp->m_sb.sb_uquotino = (*ip)->i_ino;
814 	else if (flags & XFS_QMOPT_GQUOTA)
815 		mp->m_sb.sb_gquotino = (*ip)->i_ino;
816 	else
817 		mp->m_sb.sb_pquotino = (*ip)->i_ino;
818 	spin_unlock(&mp->m_sb_lock);
819 	xfs_log_sb(tp);
820 
821 	error = xfs_trans_commit(tp);
822 	if (error) {
823 		ASSERT(XFS_FORCED_SHUTDOWN(mp));
824 		xfs_alert(mp, "%s failed (error %d)!", __func__, error);
825 	}
826 	if (need_alloc)
827 		xfs_finish_inode_setup(*ip);
828 	return error;
829 }
830 
831 
832 STATIC void
833 xfs_qm_reset_dqcounts(
834 	xfs_mount_t	*mp,
835 	xfs_buf_t	*bp,
836 	xfs_dqid_t	id,
837 	uint		type)
838 {
839 	struct xfs_dqblk	*dqb;
840 	int			j;
841 	xfs_failaddr_t		fa;
842 
843 	trace_xfs_reset_dqcounts(bp, _RET_IP_);
844 
845 	/*
846 	 * Reset all counters and timers. They'll be
847 	 * started afresh by xfs_qm_quotacheck.
848 	 */
849 #ifdef DEBUG
850 	j = (int)XFS_FSB_TO_B(mp, XFS_DQUOT_CLUSTER_SIZE_FSB) /
851 		sizeof(xfs_dqblk_t);
852 	ASSERT(mp->m_quotainfo->qi_dqperchunk == j);
853 #endif
854 	dqb = bp->b_addr;
855 	for (j = 0; j < mp->m_quotainfo->qi_dqperchunk; j++) {
856 		struct xfs_disk_dquot	*ddq;
857 
858 		ddq = (struct xfs_disk_dquot *)&dqb[j];
859 
860 		/*
861 		 * Do a sanity check, and if needed, repair the dqblk. Don't
862 		 * output any warnings because it's perfectly possible to
863 		 * find uninitialised dquot blks. See comment in
864 		 * xfs_dquot_verify.
865 		 */
866 		fa = xfs_dqblk_verify(mp, &dqb[j], id + j, type);
867 		if (fa)
868 			xfs_dqblk_repair(mp, &dqb[j], id + j, type);
869 
870 		/*
871 		 * Reset type in case we are reusing group quota file for
872 		 * project quotas or vice versa
873 		 */
874 		ddq->d_flags = type;
875 		ddq->d_bcount = 0;
876 		ddq->d_icount = 0;
877 		ddq->d_rtbcount = 0;
878 
879 		/*
880 		 * dquot id 0 stores the default grace period and the maximum
881 		 * warning limit that were set by the administrator, so we
882 		 * should not reset them.
883 		 */
884 		if (ddq->d_id != 0) {
885 			ddq->d_btimer = 0;
886 			ddq->d_itimer = 0;
887 			ddq->d_rtbtimer = 0;
888 			ddq->d_bwarns = 0;
889 			ddq->d_iwarns = 0;
890 			ddq->d_rtbwarns = 0;
891 		}
892 
893 		if (xfs_sb_version_hascrc(&mp->m_sb)) {
894 			xfs_update_cksum((char *)&dqb[j],
895 					 sizeof(struct xfs_dqblk),
896 					 XFS_DQUOT_CRC_OFF);
897 		}
898 	}
899 }
900 
901 STATIC int
902 xfs_qm_reset_dqcounts_all(
903 	struct xfs_mount	*mp,
904 	xfs_dqid_t		firstid,
905 	xfs_fsblock_t		bno,
906 	xfs_filblks_t		blkcnt,
907 	uint			flags,
908 	struct list_head	*buffer_list)
909 {
910 	struct xfs_buf		*bp;
911 	int			error;
912 	int			type;
913 
914 	ASSERT(blkcnt > 0);
915 	type = flags & XFS_QMOPT_UQUOTA ? XFS_DQ_USER :
916 		(flags & XFS_QMOPT_PQUOTA ? XFS_DQ_PROJ : XFS_DQ_GROUP);
917 	error = 0;
918 
919 	/*
920 	 * Blkcnt arg can be a very big number, and might even be
921 	 * larger than the log itself. So, we have to break it up into
922 	 * manageable-sized transactions.
923 	 * Note that we don't start a permanent transaction here; we might
924 	 * not be able to get a log reservation for the whole thing up front,
925 	 * and we don't really care to either, because we just discard
926 	 * everything if we were to crash in the middle of this loop.
927 	 */
928 	while (blkcnt--) {
929 		error = xfs_trans_read_buf(mp, NULL, mp->m_ddev_targp,
930 			      XFS_FSB_TO_DADDR(mp, bno),
931 			      mp->m_quotainfo->qi_dqchunklen, 0, &bp,
932 			      &xfs_dquot_buf_ops);
933 
934 		/*
935 		 * CRC and validation errors will return a EFSCORRUPTED here. If
936 		 * this occurs, re-read without CRC validation so that we can
937 		 * repair the damage via xfs_qm_reset_dqcounts(). This process
938 		 * will leave a trace in the log indicating corruption has
939 		 * been detected.
940 		 */
941 		if (error == -EFSCORRUPTED) {
942 			error = xfs_trans_read_buf(mp, NULL, mp->m_ddev_targp,
943 				      XFS_FSB_TO_DADDR(mp, bno),
944 				      mp->m_quotainfo->qi_dqchunklen, 0, &bp,
945 				      NULL);
946 		}
947 
948 		if (error)
949 			break;
950 
951 		/*
952 		 * A corrupt buffer might not have a verifier attached, so
953 		 * make sure we have the correct one attached before writeback
954 		 * occurs.
955 		 */
956 		bp->b_ops = &xfs_dquot_buf_ops;
957 		xfs_qm_reset_dqcounts(mp, bp, firstid, type);
958 		xfs_buf_delwri_queue(bp, buffer_list);
959 		xfs_buf_relse(bp);
960 
961 		/* goto the next block. */
962 		bno++;
963 		firstid += mp->m_quotainfo->qi_dqperchunk;
964 	}
965 
966 	return error;
967 }
968 
969 /*
970  * Iterate over all allocated dquot blocks in this quota inode, zeroing all
971  * counters for every chunk of dquots that we find.
972  */
973 STATIC int
974 xfs_qm_reset_dqcounts_buf(
975 	struct xfs_mount	*mp,
976 	struct xfs_inode	*qip,
977 	uint			flags,
978 	struct list_head	*buffer_list)
979 {
980 	struct xfs_bmbt_irec	*map;
981 	int			i, nmaps;	/* number of map entries */
982 	int			error;		/* return value */
983 	xfs_fileoff_t		lblkno;
984 	xfs_filblks_t		maxlblkcnt;
985 	xfs_dqid_t		firstid;
986 	xfs_fsblock_t		rablkno;
987 	xfs_filblks_t		rablkcnt;
988 
989 	error = 0;
990 	/*
991 	 * This looks racy, but we can't keep an inode lock across a
992 	 * trans_reserve. But, this gets called during quotacheck, and that
993 	 * happens only at mount time which is single threaded.
994 	 */
995 	if (qip->i_d.di_nblocks == 0)
996 		return 0;
997 
998 	map = kmem_alloc(XFS_DQITER_MAP_SIZE * sizeof(*map), 0);
999 
1000 	lblkno = 0;
1001 	maxlblkcnt = XFS_B_TO_FSB(mp, mp->m_super->s_maxbytes);
1002 	do {
1003 		uint		lock_mode;
1004 
1005 		nmaps = XFS_DQITER_MAP_SIZE;
1006 		/*
1007 		 * We aren't changing the inode itself. Just changing
1008 		 * some of its data. No new blocks are added here, and
1009 		 * the inode is never added to the transaction.
1010 		 */
1011 		lock_mode = xfs_ilock_data_map_shared(qip);
1012 		error = xfs_bmapi_read(qip, lblkno, maxlblkcnt - lblkno,
1013 				       map, &nmaps, 0);
1014 		xfs_iunlock(qip, lock_mode);
1015 		if (error)
1016 			break;
1017 
1018 		ASSERT(nmaps <= XFS_DQITER_MAP_SIZE);
1019 		for (i = 0; i < nmaps; i++) {
1020 			ASSERT(map[i].br_startblock != DELAYSTARTBLOCK);
1021 			ASSERT(map[i].br_blockcount);
1022 
1023 
1024 			lblkno += map[i].br_blockcount;
1025 
1026 			if (map[i].br_startblock == HOLESTARTBLOCK)
1027 				continue;
1028 
1029 			firstid = (xfs_dqid_t) map[i].br_startoff *
1030 				mp->m_quotainfo->qi_dqperchunk;
1031 			/*
1032 			 * Do a read-ahead on the next extent.
1033 			 */
1034 			if ((i+1 < nmaps) &&
1035 			    (map[i+1].br_startblock != HOLESTARTBLOCK)) {
1036 				rablkcnt =  map[i+1].br_blockcount;
1037 				rablkno = map[i+1].br_startblock;
1038 				while (rablkcnt--) {
1039 					xfs_buf_readahead(mp->m_ddev_targp,
1040 					       XFS_FSB_TO_DADDR(mp, rablkno),
1041 					       mp->m_quotainfo->qi_dqchunklen,
1042 					       &xfs_dquot_buf_ops);
1043 					rablkno++;
1044 				}
1045 			}
1046 			/*
1047 			 * Iterate thru all the blks in the extent and
1048 			 * reset the counters of all the dquots inside them.
1049 			 */
1050 			error = xfs_qm_reset_dqcounts_all(mp, firstid,
1051 						   map[i].br_startblock,
1052 						   map[i].br_blockcount,
1053 						   flags, buffer_list);
1054 			if (error)
1055 				goto out;
1056 		}
1057 	} while (nmaps > 0);
1058 
1059 out:
1060 	kmem_free(map);
1061 	return error;
1062 }
1063 
1064 /*
1065  * Called by dqusage_adjust in doing a quotacheck.
1066  *
1067  * Given the inode, and a dquot id this updates both the incore dqout as well
1068  * as the buffer copy. This is so that once the quotacheck is done, we can
1069  * just log all the buffers, as opposed to logging numerous updates to
1070  * individual dquots.
1071  */
1072 STATIC int
1073 xfs_qm_quotacheck_dqadjust(
1074 	struct xfs_inode	*ip,
1075 	uint			type,
1076 	xfs_qcnt_t		nblks,
1077 	xfs_qcnt_t		rtblks)
1078 {
1079 	struct xfs_mount	*mp = ip->i_mount;
1080 	struct xfs_dquot	*dqp;
1081 	xfs_dqid_t		id;
1082 	int			error;
1083 
1084 	id = xfs_qm_id_for_quotatype(ip, type);
1085 	error = xfs_qm_dqget(mp, id, type, true, &dqp);
1086 	if (error) {
1087 		/*
1088 		 * Shouldn't be able to turn off quotas here.
1089 		 */
1090 		ASSERT(error != -ESRCH);
1091 		ASSERT(error != -ENOENT);
1092 		return error;
1093 	}
1094 
1095 	trace_xfs_dqadjust(dqp);
1096 
1097 	/*
1098 	 * Adjust the inode count and the block count to reflect this inode's
1099 	 * resource usage.
1100 	 */
1101 	be64_add_cpu(&dqp->q_core.d_icount, 1);
1102 	dqp->q_res_icount++;
1103 	if (nblks) {
1104 		be64_add_cpu(&dqp->q_core.d_bcount, nblks);
1105 		dqp->q_res_bcount += nblks;
1106 	}
1107 	if (rtblks) {
1108 		be64_add_cpu(&dqp->q_core.d_rtbcount, rtblks);
1109 		dqp->q_res_rtbcount += rtblks;
1110 	}
1111 
1112 	/*
1113 	 * Set default limits, adjust timers (since we changed usages)
1114 	 *
1115 	 * There are no timers for the default values set in the root dquot.
1116 	 */
1117 	if (dqp->q_core.d_id) {
1118 		xfs_qm_adjust_dqlimits(mp, dqp);
1119 		xfs_qm_adjust_dqtimers(mp, &dqp->q_core);
1120 	}
1121 
1122 	dqp->dq_flags |= XFS_DQ_DIRTY;
1123 	xfs_qm_dqput(dqp);
1124 	return 0;
1125 }
1126 
1127 /*
1128  * callback routine supplied to bulkstat(). Given an inumber, find its
1129  * dquots and update them to account for resources taken by that inode.
1130  */
1131 /* ARGSUSED */
1132 STATIC int
1133 xfs_qm_dqusage_adjust(
1134 	struct xfs_mount	*mp,
1135 	struct xfs_trans	*tp,
1136 	xfs_ino_t		ino,
1137 	void			*data)
1138 {
1139 	struct xfs_inode	*ip;
1140 	xfs_qcnt_t		nblks;
1141 	xfs_filblks_t		rtblks = 0;	/* total rt blks */
1142 	int			error;
1143 
1144 	ASSERT(XFS_IS_QUOTA_RUNNING(mp));
1145 
1146 	/*
1147 	 * rootino must have its resources accounted for, not so with the quota
1148 	 * inodes.
1149 	 */
1150 	if (xfs_is_quota_inode(&mp->m_sb, ino))
1151 		return 0;
1152 
1153 	/*
1154 	 * We don't _need_ to take the ilock EXCL here because quotacheck runs
1155 	 * at mount time and therefore nobody will be racing chown/chproj.
1156 	 */
1157 	error = xfs_iget(mp, tp, ino, XFS_IGET_DONTCACHE, 0, &ip);
1158 	if (error == -EINVAL || error == -ENOENT)
1159 		return 0;
1160 	if (error)
1161 		return error;
1162 
1163 	ASSERT(ip->i_delayed_blks == 0);
1164 
1165 	if (XFS_IS_REALTIME_INODE(ip)) {
1166 		struct xfs_ifork	*ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK);
1167 
1168 		if (!(ifp->if_flags & XFS_IFEXTENTS)) {
1169 			error = xfs_iread_extents(tp, ip, XFS_DATA_FORK);
1170 			if (error)
1171 				goto error0;
1172 		}
1173 
1174 		xfs_bmap_count_leaves(ifp, &rtblks);
1175 	}
1176 
1177 	nblks = (xfs_qcnt_t)ip->i_d.di_nblocks - rtblks;
1178 
1179 	/*
1180 	 * Add the (disk blocks and inode) resources occupied by this
1181 	 * inode to its dquots. We do this adjustment in the incore dquot,
1182 	 * and also copy the changes to its buffer.
1183 	 * We don't care about putting these changes in a transaction
1184 	 * envelope because if we crash in the middle of a 'quotacheck'
1185 	 * we have to start from the beginning anyway.
1186 	 * Once we're done, we'll log all the dquot bufs.
1187 	 *
1188 	 * The *QUOTA_ON checks below may look pretty racy, but quotachecks
1189 	 * and quotaoffs don't race. (Quotachecks happen at mount time only).
1190 	 */
1191 	if (XFS_IS_UQUOTA_ON(mp)) {
1192 		error = xfs_qm_quotacheck_dqadjust(ip, XFS_DQ_USER, nblks,
1193 				rtblks);
1194 		if (error)
1195 			goto error0;
1196 	}
1197 
1198 	if (XFS_IS_GQUOTA_ON(mp)) {
1199 		error = xfs_qm_quotacheck_dqadjust(ip, XFS_DQ_GROUP, nblks,
1200 				rtblks);
1201 		if (error)
1202 			goto error0;
1203 	}
1204 
1205 	if (XFS_IS_PQUOTA_ON(mp)) {
1206 		error = xfs_qm_quotacheck_dqadjust(ip, XFS_DQ_PROJ, nblks,
1207 				rtblks);
1208 		if (error)
1209 			goto error0;
1210 	}
1211 
1212 error0:
1213 	xfs_irele(ip);
1214 	return error;
1215 }
1216 
1217 STATIC int
1218 xfs_qm_flush_one(
1219 	struct xfs_dquot	*dqp,
1220 	void			*data)
1221 {
1222 	struct xfs_mount	*mp = dqp->q_mount;
1223 	struct list_head	*buffer_list = data;
1224 	struct xfs_buf		*bp = NULL;
1225 	int			error = 0;
1226 
1227 	xfs_dqlock(dqp);
1228 	if (dqp->dq_flags & XFS_DQ_FREEING)
1229 		goto out_unlock;
1230 	if (!XFS_DQ_IS_DIRTY(dqp))
1231 		goto out_unlock;
1232 
1233 	/*
1234 	 * The only way the dquot is already flush locked by the time quotacheck
1235 	 * gets here is if reclaim flushed it before the dqadjust walk dirtied
1236 	 * it for the final time. Quotacheck collects all dquot bufs in the
1237 	 * local delwri queue before dquots are dirtied, so reclaim can't have
1238 	 * possibly queued it for I/O. The only way out is to push the buffer to
1239 	 * cycle the flush lock.
1240 	 */
1241 	if (!xfs_dqflock_nowait(dqp)) {
1242 		/* buf is pinned in-core by delwri list */
1243 		bp = xfs_buf_incore(mp->m_ddev_targp, dqp->q_blkno,
1244 				mp->m_quotainfo->qi_dqchunklen, 0);
1245 		if (!bp) {
1246 			error = -EINVAL;
1247 			goto out_unlock;
1248 		}
1249 		xfs_buf_unlock(bp);
1250 
1251 		xfs_buf_delwri_pushbuf(bp, buffer_list);
1252 		xfs_buf_rele(bp);
1253 
1254 		error = -EAGAIN;
1255 		goto out_unlock;
1256 	}
1257 
1258 	error = xfs_qm_dqflush(dqp, &bp);
1259 	if (error)
1260 		goto out_unlock;
1261 
1262 	xfs_buf_delwri_queue(bp, buffer_list);
1263 	xfs_buf_relse(bp);
1264 out_unlock:
1265 	xfs_dqunlock(dqp);
1266 	return error;
1267 }
1268 
1269 /*
1270  * Walk thru all the filesystem inodes and construct a consistent view
1271  * of the disk quota world. If the quotacheck fails, disable quotas.
1272  */
1273 STATIC int
1274 xfs_qm_quotacheck(
1275 	xfs_mount_t	*mp)
1276 {
1277 	int			error, error2;
1278 	uint			flags;
1279 	LIST_HEAD		(buffer_list);
1280 	struct xfs_inode	*uip = mp->m_quotainfo->qi_uquotaip;
1281 	struct xfs_inode	*gip = mp->m_quotainfo->qi_gquotaip;
1282 	struct xfs_inode	*pip = mp->m_quotainfo->qi_pquotaip;
1283 
1284 	flags = 0;
1285 
1286 	ASSERT(uip || gip || pip);
1287 	ASSERT(XFS_IS_QUOTA_RUNNING(mp));
1288 
1289 	xfs_notice(mp, "Quotacheck needed: Please wait.");
1290 
1291 	/*
1292 	 * First we go thru all the dquots on disk, USR and GRP/PRJ, and reset
1293 	 * their counters to zero. We need a clean slate.
1294 	 * We don't log our changes till later.
1295 	 */
1296 	if (uip) {
1297 		error = xfs_qm_reset_dqcounts_buf(mp, uip, XFS_QMOPT_UQUOTA,
1298 					 &buffer_list);
1299 		if (error)
1300 			goto error_return;
1301 		flags |= XFS_UQUOTA_CHKD;
1302 	}
1303 
1304 	if (gip) {
1305 		error = xfs_qm_reset_dqcounts_buf(mp, gip, XFS_QMOPT_GQUOTA,
1306 					 &buffer_list);
1307 		if (error)
1308 			goto error_return;
1309 		flags |= XFS_GQUOTA_CHKD;
1310 	}
1311 
1312 	if (pip) {
1313 		error = xfs_qm_reset_dqcounts_buf(mp, pip, XFS_QMOPT_PQUOTA,
1314 					 &buffer_list);
1315 		if (error)
1316 			goto error_return;
1317 		flags |= XFS_PQUOTA_CHKD;
1318 	}
1319 
1320 	error = xfs_iwalk_threaded(mp, 0, 0, xfs_qm_dqusage_adjust, 0, true,
1321 			NULL);
1322 	if (error)
1323 		goto error_return;
1324 
1325 	/*
1326 	 * We've made all the changes that we need to make incore.  Flush them
1327 	 * down to disk buffers if everything was updated successfully.
1328 	 */
1329 	if (XFS_IS_UQUOTA_ON(mp)) {
1330 		error = xfs_qm_dquot_walk(mp, XFS_DQ_USER, xfs_qm_flush_one,
1331 					  &buffer_list);
1332 	}
1333 	if (XFS_IS_GQUOTA_ON(mp)) {
1334 		error2 = xfs_qm_dquot_walk(mp, XFS_DQ_GROUP, xfs_qm_flush_one,
1335 					   &buffer_list);
1336 		if (!error)
1337 			error = error2;
1338 	}
1339 	if (XFS_IS_PQUOTA_ON(mp)) {
1340 		error2 = xfs_qm_dquot_walk(mp, XFS_DQ_PROJ, xfs_qm_flush_one,
1341 					   &buffer_list);
1342 		if (!error)
1343 			error = error2;
1344 	}
1345 
1346 	error2 = xfs_buf_delwri_submit(&buffer_list);
1347 	if (!error)
1348 		error = error2;
1349 
1350 	/*
1351 	 * We can get this error if we couldn't do a dquot allocation inside
1352 	 * xfs_qm_dqusage_adjust (via bulkstat). We don't care about the
1353 	 * dirty dquots that might be cached, we just want to get rid of them
1354 	 * and turn quotaoff. The dquots won't be attached to any of the inodes
1355 	 * at this point (because we intentionally didn't in dqget_noattach).
1356 	 */
1357 	if (error) {
1358 		xfs_qm_dqpurge_all(mp, XFS_QMOPT_QUOTALL);
1359 		goto error_return;
1360 	}
1361 
1362 	/*
1363 	 * If one type of quotas is off, then it will lose its
1364 	 * quotachecked status, since we won't be doing accounting for
1365 	 * that type anymore.
1366 	 */
1367 	mp->m_qflags &= ~XFS_ALL_QUOTA_CHKD;
1368 	mp->m_qflags |= flags;
1369 
1370  error_return:
1371 	xfs_buf_delwri_cancel(&buffer_list);
1372 
1373 	if (error) {
1374 		xfs_warn(mp,
1375 	"Quotacheck: Unsuccessful (Error %d): Disabling quotas.",
1376 			error);
1377 		/*
1378 		 * We must turn off quotas.
1379 		 */
1380 		ASSERT(mp->m_quotainfo != NULL);
1381 		xfs_qm_destroy_quotainfo(mp);
1382 		if (xfs_mount_reset_sbqflags(mp)) {
1383 			xfs_warn(mp,
1384 				"Quotacheck: Failed to reset quota flags.");
1385 		}
1386 	} else
1387 		xfs_notice(mp, "Quotacheck: Done.");
1388 	return error;
1389 }
1390 
1391 /*
1392  * This is called from xfs_mountfs to start quotas and initialize all
1393  * necessary data structures like quotainfo.  This is also responsible for
1394  * running a quotacheck as necessary.  We are guaranteed that the superblock
1395  * is consistently read in at this point.
1396  *
1397  * If we fail here, the mount will continue with quota turned off. We don't
1398  * need to inidicate success or failure at all.
1399  */
1400 void
1401 xfs_qm_mount_quotas(
1402 	struct xfs_mount	*mp)
1403 {
1404 	int			error = 0;
1405 	uint			sbf;
1406 
1407 	/*
1408 	 * If quotas on realtime volumes is not supported, we disable
1409 	 * quotas immediately.
1410 	 */
1411 	if (mp->m_sb.sb_rextents) {
1412 		xfs_notice(mp, "Cannot turn on quotas for realtime filesystem");
1413 		mp->m_qflags = 0;
1414 		goto write_changes;
1415 	}
1416 
1417 	ASSERT(XFS_IS_QUOTA_RUNNING(mp));
1418 
1419 	/*
1420 	 * Allocate the quotainfo structure inside the mount struct, and
1421 	 * create quotainode(s), and change/rev superblock if necessary.
1422 	 */
1423 	error = xfs_qm_init_quotainfo(mp);
1424 	if (error) {
1425 		/*
1426 		 * We must turn off quotas.
1427 		 */
1428 		ASSERT(mp->m_quotainfo == NULL);
1429 		mp->m_qflags = 0;
1430 		goto write_changes;
1431 	}
1432 	/*
1433 	 * If any of the quotas are not consistent, do a quotacheck.
1434 	 */
1435 	if (XFS_QM_NEED_QUOTACHECK(mp)) {
1436 		error = xfs_qm_quotacheck(mp);
1437 		if (error) {
1438 			/* Quotacheck failed and disabled quotas. */
1439 			return;
1440 		}
1441 	}
1442 	/*
1443 	 * If one type of quotas is off, then it will lose its
1444 	 * quotachecked status, since we won't be doing accounting for
1445 	 * that type anymore.
1446 	 */
1447 	if (!XFS_IS_UQUOTA_ON(mp))
1448 		mp->m_qflags &= ~XFS_UQUOTA_CHKD;
1449 	if (!XFS_IS_GQUOTA_ON(mp))
1450 		mp->m_qflags &= ~XFS_GQUOTA_CHKD;
1451 	if (!XFS_IS_PQUOTA_ON(mp))
1452 		mp->m_qflags &= ~XFS_PQUOTA_CHKD;
1453 
1454  write_changes:
1455 	/*
1456 	 * We actually don't have to acquire the m_sb_lock at all.
1457 	 * This can only be called from mount, and that's single threaded. XXX
1458 	 */
1459 	spin_lock(&mp->m_sb_lock);
1460 	sbf = mp->m_sb.sb_qflags;
1461 	mp->m_sb.sb_qflags = mp->m_qflags & XFS_MOUNT_QUOTA_ALL;
1462 	spin_unlock(&mp->m_sb_lock);
1463 
1464 	if (sbf != (mp->m_qflags & XFS_MOUNT_QUOTA_ALL)) {
1465 		if (xfs_sync_sb(mp, false)) {
1466 			/*
1467 			 * We could only have been turning quotas off.
1468 			 * We aren't in very good shape actually because
1469 			 * the incore structures are convinced that quotas are
1470 			 * off, but the on disk superblock doesn't know that !
1471 			 */
1472 			ASSERT(!(XFS_IS_QUOTA_RUNNING(mp)));
1473 			xfs_alert(mp, "%s: Superblock update failed!",
1474 				__func__);
1475 		}
1476 	}
1477 
1478 	if (error) {
1479 		xfs_warn(mp, "Failed to initialize disk quotas.");
1480 		return;
1481 	}
1482 }
1483 
1484 /*
1485  * This is called after the superblock has been read in and we're ready to
1486  * iget the quota inodes.
1487  */
1488 STATIC int
1489 xfs_qm_init_quotainos(
1490 	xfs_mount_t	*mp)
1491 {
1492 	struct xfs_inode	*uip = NULL;
1493 	struct xfs_inode	*gip = NULL;
1494 	struct xfs_inode	*pip = NULL;
1495 	int			error;
1496 	uint			flags = 0;
1497 
1498 	ASSERT(mp->m_quotainfo);
1499 
1500 	/*
1501 	 * Get the uquota and gquota inodes
1502 	 */
1503 	if (xfs_sb_version_hasquota(&mp->m_sb)) {
1504 		if (XFS_IS_UQUOTA_ON(mp) &&
1505 		    mp->m_sb.sb_uquotino != NULLFSINO) {
1506 			ASSERT(mp->m_sb.sb_uquotino > 0);
1507 			error = xfs_iget(mp, NULL, mp->m_sb.sb_uquotino,
1508 					     0, 0, &uip);
1509 			if (error)
1510 				return error;
1511 		}
1512 		if (XFS_IS_GQUOTA_ON(mp) &&
1513 		    mp->m_sb.sb_gquotino != NULLFSINO) {
1514 			ASSERT(mp->m_sb.sb_gquotino > 0);
1515 			error = xfs_iget(mp, NULL, mp->m_sb.sb_gquotino,
1516 					     0, 0, &gip);
1517 			if (error)
1518 				goto error_rele;
1519 		}
1520 		if (XFS_IS_PQUOTA_ON(mp) &&
1521 		    mp->m_sb.sb_pquotino != NULLFSINO) {
1522 			ASSERT(mp->m_sb.sb_pquotino > 0);
1523 			error = xfs_iget(mp, NULL, mp->m_sb.sb_pquotino,
1524 					     0, 0, &pip);
1525 			if (error)
1526 				goto error_rele;
1527 		}
1528 	} else {
1529 		flags |= XFS_QMOPT_SBVERSION;
1530 	}
1531 
1532 	/*
1533 	 * Create the three inodes, if they don't exist already. The changes
1534 	 * made above will get added to a transaction and logged in one of
1535 	 * the qino_alloc calls below.  If the device is readonly,
1536 	 * temporarily switch to read-write to do this.
1537 	 */
1538 	if (XFS_IS_UQUOTA_ON(mp) && uip == NULL) {
1539 		error = xfs_qm_qino_alloc(mp, &uip,
1540 					      flags | XFS_QMOPT_UQUOTA);
1541 		if (error)
1542 			goto error_rele;
1543 
1544 		flags &= ~XFS_QMOPT_SBVERSION;
1545 	}
1546 	if (XFS_IS_GQUOTA_ON(mp) && gip == NULL) {
1547 		error = xfs_qm_qino_alloc(mp, &gip,
1548 					  flags | XFS_QMOPT_GQUOTA);
1549 		if (error)
1550 			goto error_rele;
1551 
1552 		flags &= ~XFS_QMOPT_SBVERSION;
1553 	}
1554 	if (XFS_IS_PQUOTA_ON(mp) && pip == NULL) {
1555 		error = xfs_qm_qino_alloc(mp, &pip,
1556 					  flags | XFS_QMOPT_PQUOTA);
1557 		if (error)
1558 			goto error_rele;
1559 	}
1560 
1561 	mp->m_quotainfo->qi_uquotaip = uip;
1562 	mp->m_quotainfo->qi_gquotaip = gip;
1563 	mp->m_quotainfo->qi_pquotaip = pip;
1564 
1565 	return 0;
1566 
1567 error_rele:
1568 	if (uip)
1569 		xfs_irele(uip);
1570 	if (gip)
1571 		xfs_irele(gip);
1572 	if (pip)
1573 		xfs_irele(pip);
1574 	return error;
1575 }
1576 
1577 STATIC void
1578 xfs_qm_destroy_quotainos(
1579 	struct xfs_quotainfo	*qi)
1580 {
1581 	if (qi->qi_uquotaip) {
1582 		xfs_irele(qi->qi_uquotaip);
1583 		qi->qi_uquotaip = NULL; /* paranoia */
1584 	}
1585 	if (qi->qi_gquotaip) {
1586 		xfs_irele(qi->qi_gquotaip);
1587 		qi->qi_gquotaip = NULL;
1588 	}
1589 	if (qi->qi_pquotaip) {
1590 		xfs_irele(qi->qi_pquotaip);
1591 		qi->qi_pquotaip = NULL;
1592 	}
1593 }
1594 
1595 STATIC void
1596 xfs_qm_dqfree_one(
1597 	struct xfs_dquot	*dqp)
1598 {
1599 	struct xfs_mount	*mp = dqp->q_mount;
1600 	struct xfs_quotainfo	*qi = mp->m_quotainfo;
1601 
1602 	mutex_lock(&qi->qi_tree_lock);
1603 	radix_tree_delete(xfs_dquot_tree(qi, dqp->q_core.d_flags),
1604 			  be32_to_cpu(dqp->q_core.d_id));
1605 
1606 	qi->qi_dquots--;
1607 	mutex_unlock(&qi->qi_tree_lock);
1608 
1609 	xfs_qm_dqdestroy(dqp);
1610 }
1611 
1612 /* --------------- utility functions for vnodeops ---------------- */
1613 
1614 
1615 /*
1616  * Given an inode, a uid, gid and prid make sure that we have
1617  * allocated relevant dquot(s) on disk, and that we won't exceed inode
1618  * quotas by creating this file.
1619  * This also attaches dquot(s) to the given inode after locking it,
1620  * and returns the dquots corresponding to the uid and/or gid.
1621  *
1622  * in	: inode (unlocked)
1623  * out	: udquot, gdquot with references taken and unlocked
1624  */
1625 int
1626 xfs_qm_vop_dqalloc(
1627 	struct xfs_inode	*ip,
1628 	kuid_t			uid,
1629 	kgid_t			gid,
1630 	prid_t			prid,
1631 	uint			flags,
1632 	struct xfs_dquot	**O_udqpp,
1633 	struct xfs_dquot	**O_gdqpp,
1634 	struct xfs_dquot	**O_pdqpp)
1635 {
1636 	struct xfs_mount	*mp = ip->i_mount;
1637 	struct inode		*inode = VFS_I(ip);
1638 	struct user_namespace	*user_ns = inode->i_sb->s_user_ns;
1639 	struct xfs_dquot	*uq = NULL;
1640 	struct xfs_dquot	*gq = NULL;
1641 	struct xfs_dquot	*pq = NULL;
1642 	int			error;
1643 	uint			lockflags;
1644 
1645 	if (!XFS_IS_QUOTA_RUNNING(mp) || !XFS_IS_QUOTA_ON(mp))
1646 		return 0;
1647 
1648 	lockflags = XFS_ILOCK_EXCL;
1649 	xfs_ilock(ip, lockflags);
1650 
1651 	if ((flags & XFS_QMOPT_INHERIT) && XFS_INHERIT_GID(ip))
1652 		gid = inode->i_gid;
1653 
1654 	/*
1655 	 * Attach the dquot(s) to this inode, doing a dquot allocation
1656 	 * if necessary. The dquot(s) will not be locked.
1657 	 */
1658 	if (XFS_NOT_DQATTACHED(mp, ip)) {
1659 		error = xfs_qm_dqattach_locked(ip, true);
1660 		if (error) {
1661 			xfs_iunlock(ip, lockflags);
1662 			return error;
1663 		}
1664 	}
1665 
1666 	if ((flags & XFS_QMOPT_UQUOTA) && XFS_IS_UQUOTA_ON(mp)) {
1667 		if (!uid_eq(inode->i_uid, uid)) {
1668 			/*
1669 			 * What we need is the dquot that has this uid, and
1670 			 * if we send the inode to dqget, the uid of the inode
1671 			 * takes priority over what's sent in the uid argument.
1672 			 * We must unlock inode here before calling dqget if
1673 			 * we're not sending the inode, because otherwise
1674 			 * we'll deadlock by doing trans_reserve while
1675 			 * holding ilock.
1676 			 */
1677 			xfs_iunlock(ip, lockflags);
1678 			error = xfs_qm_dqget(mp, from_kuid(user_ns, uid),
1679 					XFS_DQ_USER, true, &uq);
1680 			if (error) {
1681 				ASSERT(error != -ENOENT);
1682 				return error;
1683 			}
1684 			/*
1685 			 * Get the ilock in the right order.
1686 			 */
1687 			xfs_dqunlock(uq);
1688 			lockflags = XFS_ILOCK_SHARED;
1689 			xfs_ilock(ip, lockflags);
1690 		} else {
1691 			/*
1692 			 * Take an extra reference, because we'll return
1693 			 * this to caller
1694 			 */
1695 			ASSERT(ip->i_udquot);
1696 			uq = xfs_qm_dqhold(ip->i_udquot);
1697 		}
1698 	}
1699 	if ((flags & XFS_QMOPT_GQUOTA) && XFS_IS_GQUOTA_ON(mp)) {
1700 		if (!gid_eq(inode->i_gid, gid)) {
1701 			xfs_iunlock(ip, lockflags);
1702 			error = xfs_qm_dqget(mp, from_kgid(user_ns, gid),
1703 					XFS_DQ_GROUP, true, &gq);
1704 			if (error) {
1705 				ASSERT(error != -ENOENT);
1706 				goto error_rele;
1707 			}
1708 			xfs_dqunlock(gq);
1709 			lockflags = XFS_ILOCK_SHARED;
1710 			xfs_ilock(ip, lockflags);
1711 		} else {
1712 			ASSERT(ip->i_gdquot);
1713 			gq = xfs_qm_dqhold(ip->i_gdquot);
1714 		}
1715 	}
1716 	if ((flags & XFS_QMOPT_PQUOTA) && XFS_IS_PQUOTA_ON(mp)) {
1717 		if (ip->i_d.di_projid != prid) {
1718 			xfs_iunlock(ip, lockflags);
1719 			error = xfs_qm_dqget(mp, (xfs_dqid_t)prid, XFS_DQ_PROJ,
1720 					true, &pq);
1721 			if (error) {
1722 				ASSERT(error != -ENOENT);
1723 				goto error_rele;
1724 			}
1725 			xfs_dqunlock(pq);
1726 			lockflags = XFS_ILOCK_SHARED;
1727 			xfs_ilock(ip, lockflags);
1728 		} else {
1729 			ASSERT(ip->i_pdquot);
1730 			pq = xfs_qm_dqhold(ip->i_pdquot);
1731 		}
1732 	}
1733 	if (uq)
1734 		trace_xfs_dquot_dqalloc(ip);
1735 
1736 	xfs_iunlock(ip, lockflags);
1737 	if (O_udqpp)
1738 		*O_udqpp = uq;
1739 	else
1740 		xfs_qm_dqrele(uq);
1741 	if (O_gdqpp)
1742 		*O_gdqpp = gq;
1743 	else
1744 		xfs_qm_dqrele(gq);
1745 	if (O_pdqpp)
1746 		*O_pdqpp = pq;
1747 	else
1748 		xfs_qm_dqrele(pq);
1749 	return 0;
1750 
1751 error_rele:
1752 	xfs_qm_dqrele(gq);
1753 	xfs_qm_dqrele(uq);
1754 	return error;
1755 }
1756 
1757 /*
1758  * Actually transfer ownership, and do dquot modifications.
1759  * These were already reserved.
1760  */
1761 struct xfs_dquot *
1762 xfs_qm_vop_chown(
1763 	struct xfs_trans	*tp,
1764 	struct xfs_inode	*ip,
1765 	struct xfs_dquot	**IO_olddq,
1766 	struct xfs_dquot	*newdq)
1767 {
1768 	struct xfs_dquot	*prevdq;
1769 	uint		bfield = XFS_IS_REALTIME_INODE(ip) ?
1770 				 XFS_TRANS_DQ_RTBCOUNT : XFS_TRANS_DQ_BCOUNT;
1771 
1772 
1773 	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
1774 	ASSERT(XFS_IS_QUOTA_RUNNING(ip->i_mount));
1775 
1776 	/* old dquot */
1777 	prevdq = *IO_olddq;
1778 	ASSERT(prevdq);
1779 	ASSERT(prevdq != newdq);
1780 
1781 	xfs_trans_mod_dquot(tp, prevdq, bfield, -(ip->i_d.di_nblocks));
1782 	xfs_trans_mod_dquot(tp, prevdq, XFS_TRANS_DQ_ICOUNT, -1);
1783 
1784 	/* the sparkling new dquot */
1785 	xfs_trans_mod_dquot(tp, newdq, bfield, ip->i_d.di_nblocks);
1786 	xfs_trans_mod_dquot(tp, newdq, XFS_TRANS_DQ_ICOUNT, 1);
1787 
1788 	/*
1789 	 * Take an extra reference, because the inode is going to keep
1790 	 * this dquot pointer even after the trans_commit.
1791 	 */
1792 	*IO_olddq = xfs_qm_dqhold(newdq);
1793 
1794 	return prevdq;
1795 }
1796 
1797 /*
1798  * Quota reservations for setattr(AT_UID|AT_GID|AT_PROJID).
1799  */
1800 int
1801 xfs_qm_vop_chown_reserve(
1802 	struct xfs_trans	*tp,
1803 	struct xfs_inode	*ip,
1804 	struct xfs_dquot	*udqp,
1805 	struct xfs_dquot	*gdqp,
1806 	struct xfs_dquot	*pdqp,
1807 	uint			flags)
1808 {
1809 	struct xfs_mount	*mp = ip->i_mount;
1810 	uint64_t		delblks;
1811 	unsigned int		blkflags, prjflags = 0;
1812 	struct xfs_dquot	*udq_unres = NULL;
1813 	struct xfs_dquot	*gdq_unres = NULL;
1814 	struct xfs_dquot	*pdq_unres = NULL;
1815 	struct xfs_dquot	*udq_delblks = NULL;
1816 	struct xfs_dquot	*gdq_delblks = NULL;
1817 	struct xfs_dquot	*pdq_delblks = NULL;
1818 	int			error;
1819 
1820 
1821 	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED));
1822 	ASSERT(XFS_IS_QUOTA_RUNNING(mp));
1823 
1824 	delblks = ip->i_delayed_blks;
1825 	blkflags = XFS_IS_REALTIME_INODE(ip) ?
1826 			XFS_QMOPT_RES_RTBLKS : XFS_QMOPT_RES_REGBLKS;
1827 
1828 	if (XFS_IS_UQUOTA_ON(mp) && udqp &&
1829 	    i_uid_read(VFS_I(ip)) != be32_to_cpu(udqp->q_core.d_id)) {
1830 		udq_delblks = udqp;
1831 		/*
1832 		 * If there are delayed allocation blocks, then we have to
1833 		 * unreserve those from the old dquot, and add them to the
1834 		 * new dquot.
1835 		 */
1836 		if (delblks) {
1837 			ASSERT(ip->i_udquot);
1838 			udq_unres = ip->i_udquot;
1839 		}
1840 	}
1841 	if (XFS_IS_GQUOTA_ON(ip->i_mount) && gdqp &&
1842 	    i_gid_read(VFS_I(ip)) != be32_to_cpu(gdqp->q_core.d_id)) {
1843 		gdq_delblks = gdqp;
1844 		if (delblks) {
1845 			ASSERT(ip->i_gdquot);
1846 			gdq_unres = ip->i_gdquot;
1847 		}
1848 	}
1849 
1850 	if (XFS_IS_PQUOTA_ON(ip->i_mount) && pdqp &&
1851 	    ip->i_d.di_projid != be32_to_cpu(pdqp->q_core.d_id)) {
1852 		prjflags = XFS_QMOPT_ENOSPC;
1853 		pdq_delblks = pdqp;
1854 		if (delblks) {
1855 			ASSERT(ip->i_pdquot);
1856 			pdq_unres = ip->i_pdquot;
1857 		}
1858 	}
1859 
1860 	error = xfs_trans_reserve_quota_bydquots(tp, ip->i_mount,
1861 				udq_delblks, gdq_delblks, pdq_delblks,
1862 				ip->i_d.di_nblocks, 1,
1863 				flags | blkflags | prjflags);
1864 	if (error)
1865 		return error;
1866 
1867 	/*
1868 	 * Do the delayed blks reservations/unreservations now. Since, these
1869 	 * are done without the help of a transaction, if a reservation fails
1870 	 * its previous reservations won't be automatically undone by trans
1871 	 * code. So, we have to do it manually here.
1872 	 */
1873 	if (delblks) {
1874 		/*
1875 		 * Do the reservations first. Unreservation can't fail.
1876 		 */
1877 		ASSERT(udq_delblks || gdq_delblks || pdq_delblks);
1878 		ASSERT(udq_unres || gdq_unres || pdq_unres);
1879 		error = xfs_trans_reserve_quota_bydquots(NULL, ip->i_mount,
1880 			    udq_delblks, gdq_delblks, pdq_delblks,
1881 			    (xfs_qcnt_t)delblks, 0,
1882 			    flags | blkflags | prjflags);
1883 		if (error)
1884 			return error;
1885 		xfs_trans_reserve_quota_bydquots(NULL, ip->i_mount,
1886 				udq_unres, gdq_unres, pdq_unres,
1887 				-((xfs_qcnt_t)delblks), 0, blkflags);
1888 	}
1889 
1890 	return 0;
1891 }
1892 
1893 int
1894 xfs_qm_vop_rename_dqattach(
1895 	struct xfs_inode	**i_tab)
1896 {
1897 	struct xfs_mount	*mp = i_tab[0]->i_mount;
1898 	int			i;
1899 
1900 	if (!XFS_IS_QUOTA_RUNNING(mp) || !XFS_IS_QUOTA_ON(mp))
1901 		return 0;
1902 
1903 	for (i = 0; (i < 4 && i_tab[i]); i++) {
1904 		struct xfs_inode	*ip = i_tab[i];
1905 		int			error;
1906 
1907 		/*
1908 		 * Watch out for duplicate entries in the table.
1909 		 */
1910 		if (i == 0 || ip != i_tab[i-1]) {
1911 			if (XFS_NOT_DQATTACHED(mp, ip)) {
1912 				error = xfs_qm_dqattach(ip);
1913 				if (error)
1914 					return error;
1915 			}
1916 		}
1917 	}
1918 	return 0;
1919 }
1920 
1921 void
1922 xfs_qm_vop_create_dqattach(
1923 	struct xfs_trans	*tp,
1924 	struct xfs_inode	*ip,
1925 	struct xfs_dquot	*udqp,
1926 	struct xfs_dquot	*gdqp,
1927 	struct xfs_dquot	*pdqp)
1928 {
1929 	struct xfs_mount	*mp = tp->t_mountp;
1930 
1931 	if (!XFS_IS_QUOTA_RUNNING(mp) || !XFS_IS_QUOTA_ON(mp))
1932 		return;
1933 
1934 	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
1935 	ASSERT(XFS_IS_QUOTA_RUNNING(mp));
1936 
1937 	if (udqp && XFS_IS_UQUOTA_ON(mp)) {
1938 		ASSERT(ip->i_udquot == NULL);
1939 		ASSERT(i_uid_read(VFS_I(ip)) == be32_to_cpu(udqp->q_core.d_id));
1940 
1941 		ip->i_udquot = xfs_qm_dqhold(udqp);
1942 		xfs_trans_mod_dquot(tp, udqp, XFS_TRANS_DQ_ICOUNT, 1);
1943 	}
1944 	if (gdqp && XFS_IS_GQUOTA_ON(mp)) {
1945 		ASSERT(ip->i_gdquot == NULL);
1946 		ASSERT(i_gid_read(VFS_I(ip)) == be32_to_cpu(gdqp->q_core.d_id));
1947 
1948 		ip->i_gdquot = xfs_qm_dqhold(gdqp);
1949 		xfs_trans_mod_dquot(tp, gdqp, XFS_TRANS_DQ_ICOUNT, 1);
1950 	}
1951 	if (pdqp && XFS_IS_PQUOTA_ON(mp)) {
1952 		ASSERT(ip->i_pdquot == NULL);
1953 		ASSERT(ip->i_d.di_projid == be32_to_cpu(pdqp->q_core.d_id));
1954 
1955 		ip->i_pdquot = xfs_qm_dqhold(pdqp);
1956 		xfs_trans_mod_dquot(tp, pdqp, XFS_TRANS_DQ_ICOUNT, 1);
1957 	}
1958 }
1959 
1960