xref: /openbmc/linux/fs/xfs/xfs_qm.c (revision 3fc41476)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (c) 2000-2005 Silicon Graphics, Inc.
4  * All Rights Reserved.
5  */
6 #include "xfs.h"
7 #include "xfs_fs.h"
8 #include "xfs_shared.h"
9 #include "xfs_format.h"
10 #include "xfs_log_format.h"
11 #include "xfs_trans_resv.h"
12 #include "xfs_bit.h"
13 #include "xfs_sb.h"
14 #include "xfs_mount.h"
15 #include "xfs_inode.h"
16 #include "xfs_ialloc.h"
17 #include "xfs_itable.h"
18 #include "xfs_quota.h"
19 #include "xfs_error.h"
20 #include "xfs_bmap.h"
21 #include "xfs_bmap_btree.h"
22 #include "xfs_bmap_util.h"
23 #include "xfs_trans.h"
24 #include "xfs_trans_space.h"
25 #include "xfs_qm.h"
26 #include "xfs_trace.h"
27 #include "xfs_icache.h"
28 #include "xfs_cksum.h"
29 
30 /*
31  * The global quota manager. There is only one of these for the entire
32  * system, _not_ one per file system. XQM keeps track of the overall
33  * quota functionality, including maintaining the freelist and hash
34  * tables of dquots.
35  */
36 STATIC int	xfs_qm_init_quotainos(xfs_mount_t *);
37 STATIC int	xfs_qm_init_quotainfo(xfs_mount_t *);
38 
39 STATIC void	xfs_qm_destroy_quotainos(xfs_quotainfo_t *qi);
40 STATIC void	xfs_qm_dqfree_one(struct xfs_dquot *dqp);
41 /*
42  * We use the batch lookup interface to iterate over the dquots as it
43  * currently is the only interface into the radix tree code that allows
44  * fuzzy lookups instead of exact matches.  Holding the lock over multiple
45  * operations is fine as all callers are used either during mount/umount
46  * or quotaoff.
47  */
48 #define XFS_DQ_LOOKUP_BATCH	32
49 
50 STATIC int
51 xfs_qm_dquot_walk(
52 	struct xfs_mount	*mp,
53 	int			type,
54 	int			(*execute)(struct xfs_dquot *dqp, void *data),
55 	void			*data)
56 {
57 	struct xfs_quotainfo	*qi = mp->m_quotainfo;
58 	struct radix_tree_root	*tree = xfs_dquot_tree(qi, type);
59 	uint32_t		next_index;
60 	int			last_error = 0;
61 	int			skipped;
62 	int			nr_found;
63 
64 restart:
65 	skipped = 0;
66 	next_index = 0;
67 	nr_found = 0;
68 
69 	while (1) {
70 		struct xfs_dquot *batch[XFS_DQ_LOOKUP_BATCH];
71 		int		error = 0;
72 		int		i;
73 
74 		mutex_lock(&qi->qi_tree_lock);
75 		nr_found = radix_tree_gang_lookup(tree, (void **)batch,
76 					next_index, XFS_DQ_LOOKUP_BATCH);
77 		if (!nr_found) {
78 			mutex_unlock(&qi->qi_tree_lock);
79 			break;
80 		}
81 
82 		for (i = 0; i < nr_found; i++) {
83 			struct xfs_dquot *dqp = batch[i];
84 
85 			next_index = be32_to_cpu(dqp->q_core.d_id) + 1;
86 
87 			error = execute(batch[i], data);
88 			if (error == -EAGAIN) {
89 				skipped++;
90 				continue;
91 			}
92 			if (error && last_error != -EFSCORRUPTED)
93 				last_error = error;
94 		}
95 
96 		mutex_unlock(&qi->qi_tree_lock);
97 
98 		/* bail out if the filesystem is corrupted.  */
99 		if (last_error == -EFSCORRUPTED) {
100 			skipped = 0;
101 			break;
102 		}
103 		/* we're done if id overflows back to zero */
104 		if (!next_index)
105 			break;
106 	}
107 
108 	if (skipped) {
109 		delay(1);
110 		goto restart;
111 	}
112 
113 	return last_error;
114 }
115 
116 
117 /*
118  * Purge a dquot from all tracking data structures and free it.
119  */
120 STATIC int
121 xfs_qm_dqpurge(
122 	struct xfs_dquot	*dqp,
123 	void			*data)
124 {
125 	struct xfs_mount	*mp = dqp->q_mount;
126 	struct xfs_quotainfo	*qi = mp->m_quotainfo;
127 
128 	xfs_dqlock(dqp);
129 	if ((dqp->dq_flags & XFS_DQ_FREEING) || dqp->q_nrefs != 0) {
130 		xfs_dqunlock(dqp);
131 		return -EAGAIN;
132 	}
133 
134 	dqp->dq_flags |= XFS_DQ_FREEING;
135 
136 	xfs_dqflock(dqp);
137 
138 	/*
139 	 * If we are turning this type of quotas off, we don't care
140 	 * about the dirty metadata sitting in this dquot. OTOH, if
141 	 * we're unmounting, we do care, so we flush it and wait.
142 	 */
143 	if (XFS_DQ_IS_DIRTY(dqp)) {
144 		struct xfs_buf	*bp = NULL;
145 		int		error;
146 
147 		/*
148 		 * We don't care about getting disk errors here. We need
149 		 * to purge this dquot anyway, so we go ahead regardless.
150 		 */
151 		error = xfs_qm_dqflush(dqp, &bp);
152 		if (!error) {
153 			error = xfs_bwrite(bp);
154 			xfs_buf_relse(bp);
155 		}
156 		xfs_dqflock(dqp);
157 	}
158 
159 	ASSERT(atomic_read(&dqp->q_pincount) == 0);
160 	ASSERT(XFS_FORCED_SHUTDOWN(mp) ||
161 		!test_bit(XFS_LI_IN_AIL, &dqp->q_logitem.qli_item.li_flags));
162 
163 	xfs_dqfunlock(dqp);
164 	xfs_dqunlock(dqp);
165 
166 	radix_tree_delete(xfs_dquot_tree(qi, dqp->q_core.d_flags),
167 			  be32_to_cpu(dqp->q_core.d_id));
168 	qi->qi_dquots--;
169 
170 	/*
171 	 * We move dquots to the freelist as soon as their reference count
172 	 * hits zero, so it really should be on the freelist here.
173 	 */
174 	ASSERT(!list_empty(&dqp->q_lru));
175 	list_lru_del(&qi->qi_lru, &dqp->q_lru);
176 	XFS_STATS_DEC(mp, xs_qm_dquot_unused);
177 
178 	xfs_qm_dqdestroy(dqp);
179 	return 0;
180 }
181 
182 /*
183  * Purge the dquot cache.
184  */
185 void
186 xfs_qm_dqpurge_all(
187 	struct xfs_mount	*mp,
188 	uint			flags)
189 {
190 	if (flags & XFS_QMOPT_UQUOTA)
191 		xfs_qm_dquot_walk(mp, XFS_DQ_USER, xfs_qm_dqpurge, NULL);
192 	if (flags & XFS_QMOPT_GQUOTA)
193 		xfs_qm_dquot_walk(mp, XFS_DQ_GROUP, xfs_qm_dqpurge, NULL);
194 	if (flags & XFS_QMOPT_PQUOTA)
195 		xfs_qm_dquot_walk(mp, XFS_DQ_PROJ, xfs_qm_dqpurge, NULL);
196 }
197 
198 /*
199  * Just destroy the quotainfo structure.
200  */
201 void
202 xfs_qm_unmount(
203 	struct xfs_mount	*mp)
204 {
205 	if (mp->m_quotainfo) {
206 		xfs_qm_dqpurge_all(mp, XFS_QMOPT_QUOTALL);
207 		xfs_qm_destroy_quotainfo(mp);
208 	}
209 }
210 
211 /*
212  * Called from the vfsops layer.
213  */
214 void
215 xfs_qm_unmount_quotas(
216 	xfs_mount_t	*mp)
217 {
218 	/*
219 	 * Release the dquots that root inode, et al might be holding,
220 	 * before we flush quotas and blow away the quotainfo structure.
221 	 */
222 	ASSERT(mp->m_rootip);
223 	xfs_qm_dqdetach(mp->m_rootip);
224 	if (mp->m_rbmip)
225 		xfs_qm_dqdetach(mp->m_rbmip);
226 	if (mp->m_rsumip)
227 		xfs_qm_dqdetach(mp->m_rsumip);
228 
229 	/*
230 	 * Release the quota inodes.
231 	 */
232 	if (mp->m_quotainfo) {
233 		if (mp->m_quotainfo->qi_uquotaip) {
234 			xfs_irele(mp->m_quotainfo->qi_uquotaip);
235 			mp->m_quotainfo->qi_uquotaip = NULL;
236 		}
237 		if (mp->m_quotainfo->qi_gquotaip) {
238 			xfs_irele(mp->m_quotainfo->qi_gquotaip);
239 			mp->m_quotainfo->qi_gquotaip = NULL;
240 		}
241 		if (mp->m_quotainfo->qi_pquotaip) {
242 			xfs_irele(mp->m_quotainfo->qi_pquotaip);
243 			mp->m_quotainfo->qi_pquotaip = NULL;
244 		}
245 	}
246 }
247 
248 STATIC int
249 xfs_qm_dqattach_one(
250 	xfs_inode_t	*ip,
251 	xfs_dqid_t	id,
252 	uint		type,
253 	bool		doalloc,
254 	xfs_dquot_t	**IO_idqpp)
255 {
256 	xfs_dquot_t	*dqp;
257 	int		error;
258 
259 	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
260 	error = 0;
261 
262 	/*
263 	 * See if we already have it in the inode itself. IO_idqpp is &i_udquot
264 	 * or &i_gdquot. This made the code look weird, but made the logic a lot
265 	 * simpler.
266 	 */
267 	dqp = *IO_idqpp;
268 	if (dqp) {
269 		trace_xfs_dqattach_found(dqp);
270 		return 0;
271 	}
272 
273 	/*
274 	 * Find the dquot from somewhere. This bumps the reference count of
275 	 * dquot and returns it locked.  This can return ENOENT if dquot didn't
276 	 * exist on disk and we didn't ask it to allocate; ESRCH if quotas got
277 	 * turned off suddenly.
278 	 */
279 	error = xfs_qm_dqget_inode(ip, type, doalloc, &dqp);
280 	if (error)
281 		return error;
282 
283 	trace_xfs_dqattach_get(dqp);
284 
285 	/*
286 	 * dqget may have dropped and re-acquired the ilock, but it guarantees
287 	 * that the dquot returned is the one that should go in the inode.
288 	 */
289 	*IO_idqpp = dqp;
290 	xfs_dqunlock(dqp);
291 	return 0;
292 }
293 
294 static bool
295 xfs_qm_need_dqattach(
296 	struct xfs_inode	*ip)
297 {
298 	struct xfs_mount	*mp = ip->i_mount;
299 
300 	if (!XFS_IS_QUOTA_RUNNING(mp))
301 		return false;
302 	if (!XFS_IS_QUOTA_ON(mp))
303 		return false;
304 	if (!XFS_NOT_DQATTACHED(mp, ip))
305 		return false;
306 	if (xfs_is_quota_inode(&mp->m_sb, ip->i_ino))
307 		return false;
308 	return true;
309 }
310 
311 /*
312  * Given a locked inode, attach dquot(s) to it, taking U/G/P-QUOTAON
313  * into account.
314  * If @doalloc is true, the dquot(s) will be allocated if needed.
315  * Inode may get unlocked and relocked in here, and the caller must deal with
316  * the consequences.
317  */
318 int
319 xfs_qm_dqattach_locked(
320 	xfs_inode_t	*ip,
321 	bool		doalloc)
322 {
323 	xfs_mount_t	*mp = ip->i_mount;
324 	int		error = 0;
325 
326 	if (!xfs_qm_need_dqattach(ip))
327 		return 0;
328 
329 	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
330 
331 	if (XFS_IS_UQUOTA_ON(mp) && !ip->i_udquot) {
332 		error = xfs_qm_dqattach_one(ip, ip->i_d.di_uid, XFS_DQ_USER,
333 				doalloc, &ip->i_udquot);
334 		if (error)
335 			goto done;
336 		ASSERT(ip->i_udquot);
337 	}
338 
339 	if (XFS_IS_GQUOTA_ON(mp) && !ip->i_gdquot) {
340 		error = xfs_qm_dqattach_one(ip, ip->i_d.di_gid, XFS_DQ_GROUP,
341 				doalloc, &ip->i_gdquot);
342 		if (error)
343 			goto done;
344 		ASSERT(ip->i_gdquot);
345 	}
346 
347 	if (XFS_IS_PQUOTA_ON(mp) && !ip->i_pdquot) {
348 		error = xfs_qm_dqattach_one(ip, xfs_get_projid(ip), XFS_DQ_PROJ,
349 				doalloc, &ip->i_pdquot);
350 		if (error)
351 			goto done;
352 		ASSERT(ip->i_pdquot);
353 	}
354 
355 done:
356 	/*
357 	 * Don't worry about the dquots that we may have attached before any
358 	 * error - they'll get detached later if it has not already been done.
359 	 */
360 	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
361 	return error;
362 }
363 
364 int
365 xfs_qm_dqattach(
366 	struct xfs_inode	*ip)
367 {
368 	int			error;
369 
370 	if (!xfs_qm_need_dqattach(ip))
371 		return 0;
372 
373 	xfs_ilock(ip, XFS_ILOCK_EXCL);
374 	error = xfs_qm_dqattach_locked(ip, false);
375 	xfs_iunlock(ip, XFS_ILOCK_EXCL);
376 
377 	return error;
378 }
379 
380 /*
381  * Release dquots (and their references) if any.
382  * The inode should be locked EXCL except when this's called by
383  * xfs_ireclaim.
384  */
385 void
386 xfs_qm_dqdetach(
387 	xfs_inode_t	*ip)
388 {
389 	if (!(ip->i_udquot || ip->i_gdquot || ip->i_pdquot))
390 		return;
391 
392 	trace_xfs_dquot_dqdetach(ip);
393 
394 	ASSERT(!xfs_is_quota_inode(&ip->i_mount->m_sb, ip->i_ino));
395 	if (ip->i_udquot) {
396 		xfs_qm_dqrele(ip->i_udquot);
397 		ip->i_udquot = NULL;
398 	}
399 	if (ip->i_gdquot) {
400 		xfs_qm_dqrele(ip->i_gdquot);
401 		ip->i_gdquot = NULL;
402 	}
403 	if (ip->i_pdquot) {
404 		xfs_qm_dqrele(ip->i_pdquot);
405 		ip->i_pdquot = NULL;
406 	}
407 }
408 
409 struct xfs_qm_isolate {
410 	struct list_head	buffers;
411 	struct list_head	dispose;
412 };
413 
414 static enum lru_status
415 xfs_qm_dquot_isolate(
416 	struct list_head	*item,
417 	struct list_lru_one	*lru,
418 	spinlock_t		*lru_lock,
419 	void			*arg)
420 		__releases(lru_lock) __acquires(lru_lock)
421 {
422 	struct xfs_dquot	*dqp = container_of(item,
423 						struct xfs_dquot, q_lru);
424 	struct xfs_qm_isolate	*isol = arg;
425 
426 	if (!xfs_dqlock_nowait(dqp))
427 		goto out_miss_busy;
428 
429 	/*
430 	 * This dquot has acquired a reference in the meantime remove it from
431 	 * the freelist and try again.
432 	 */
433 	if (dqp->q_nrefs) {
434 		xfs_dqunlock(dqp);
435 		XFS_STATS_INC(dqp->q_mount, xs_qm_dqwants);
436 
437 		trace_xfs_dqreclaim_want(dqp);
438 		list_lru_isolate(lru, &dqp->q_lru);
439 		XFS_STATS_DEC(dqp->q_mount, xs_qm_dquot_unused);
440 		return LRU_REMOVED;
441 	}
442 
443 	/*
444 	 * If the dquot is dirty, flush it. If it's already being flushed, just
445 	 * skip it so there is time for the IO to complete before we try to
446 	 * reclaim it again on the next LRU pass.
447 	 */
448 	if (!xfs_dqflock_nowait(dqp)) {
449 		xfs_dqunlock(dqp);
450 		goto out_miss_busy;
451 	}
452 
453 	if (XFS_DQ_IS_DIRTY(dqp)) {
454 		struct xfs_buf	*bp = NULL;
455 		int		error;
456 
457 		trace_xfs_dqreclaim_dirty(dqp);
458 
459 		/* we have to drop the LRU lock to flush the dquot */
460 		spin_unlock(lru_lock);
461 
462 		error = xfs_qm_dqflush(dqp, &bp);
463 		if (error)
464 			goto out_unlock_dirty;
465 
466 		xfs_buf_delwri_queue(bp, &isol->buffers);
467 		xfs_buf_relse(bp);
468 		goto out_unlock_dirty;
469 	}
470 	xfs_dqfunlock(dqp);
471 
472 	/*
473 	 * Prevent lookups now that we are past the point of no return.
474 	 */
475 	dqp->dq_flags |= XFS_DQ_FREEING;
476 	xfs_dqunlock(dqp);
477 
478 	ASSERT(dqp->q_nrefs == 0);
479 	list_lru_isolate_move(lru, &dqp->q_lru, &isol->dispose);
480 	XFS_STATS_DEC(dqp->q_mount, xs_qm_dquot_unused);
481 	trace_xfs_dqreclaim_done(dqp);
482 	XFS_STATS_INC(dqp->q_mount, xs_qm_dqreclaims);
483 	return LRU_REMOVED;
484 
485 out_miss_busy:
486 	trace_xfs_dqreclaim_busy(dqp);
487 	XFS_STATS_INC(dqp->q_mount, xs_qm_dqreclaim_misses);
488 	return LRU_SKIP;
489 
490 out_unlock_dirty:
491 	trace_xfs_dqreclaim_busy(dqp);
492 	XFS_STATS_INC(dqp->q_mount, xs_qm_dqreclaim_misses);
493 	xfs_dqunlock(dqp);
494 	spin_lock(lru_lock);
495 	return LRU_RETRY;
496 }
497 
498 static unsigned long
499 xfs_qm_shrink_scan(
500 	struct shrinker		*shrink,
501 	struct shrink_control	*sc)
502 {
503 	struct xfs_quotainfo	*qi = container_of(shrink,
504 					struct xfs_quotainfo, qi_shrinker);
505 	struct xfs_qm_isolate	isol;
506 	unsigned long		freed;
507 	int			error;
508 
509 	if ((sc->gfp_mask & (__GFP_FS|__GFP_DIRECT_RECLAIM)) != (__GFP_FS|__GFP_DIRECT_RECLAIM))
510 		return 0;
511 
512 	INIT_LIST_HEAD(&isol.buffers);
513 	INIT_LIST_HEAD(&isol.dispose);
514 
515 	freed = list_lru_shrink_walk(&qi->qi_lru, sc,
516 				     xfs_qm_dquot_isolate, &isol);
517 
518 	error = xfs_buf_delwri_submit(&isol.buffers);
519 	if (error)
520 		xfs_warn(NULL, "%s: dquot reclaim failed", __func__);
521 
522 	while (!list_empty(&isol.dispose)) {
523 		struct xfs_dquot	*dqp;
524 
525 		dqp = list_first_entry(&isol.dispose, struct xfs_dquot, q_lru);
526 		list_del_init(&dqp->q_lru);
527 		xfs_qm_dqfree_one(dqp);
528 	}
529 
530 	return freed;
531 }
532 
533 static unsigned long
534 xfs_qm_shrink_count(
535 	struct shrinker		*shrink,
536 	struct shrink_control	*sc)
537 {
538 	struct xfs_quotainfo	*qi = container_of(shrink,
539 					struct xfs_quotainfo, qi_shrinker);
540 
541 	return list_lru_shrink_count(&qi->qi_lru, sc);
542 }
543 
544 STATIC void
545 xfs_qm_set_defquota(
546 	xfs_mount_t	*mp,
547 	uint		type,
548 	xfs_quotainfo_t	*qinf)
549 {
550 	xfs_dquot_t		*dqp;
551 	struct xfs_def_quota    *defq;
552 	struct xfs_disk_dquot	*ddqp;
553 	int			error;
554 
555 	error = xfs_qm_dqget_uncached(mp, 0, type, &dqp);
556 	if (error)
557 		return;
558 
559 	ddqp = &dqp->q_core;
560 	defq = xfs_get_defquota(dqp, qinf);
561 
562 	/*
563 	 * Timers and warnings have been already set, let's just set the
564 	 * default limits for this quota type
565 	 */
566 	defq->bhardlimit = be64_to_cpu(ddqp->d_blk_hardlimit);
567 	defq->bsoftlimit = be64_to_cpu(ddqp->d_blk_softlimit);
568 	defq->ihardlimit = be64_to_cpu(ddqp->d_ino_hardlimit);
569 	defq->isoftlimit = be64_to_cpu(ddqp->d_ino_softlimit);
570 	defq->rtbhardlimit = be64_to_cpu(ddqp->d_rtb_hardlimit);
571 	defq->rtbsoftlimit = be64_to_cpu(ddqp->d_rtb_softlimit);
572 	xfs_qm_dqdestroy(dqp);
573 }
574 
575 /* Initialize quota time limits from the root dquot. */
576 static void
577 xfs_qm_init_timelimits(
578 	struct xfs_mount	*mp,
579 	struct xfs_quotainfo	*qinf)
580 {
581 	struct xfs_disk_dquot	*ddqp;
582 	struct xfs_dquot	*dqp;
583 	uint			type;
584 	int			error;
585 
586 	qinf->qi_btimelimit = XFS_QM_BTIMELIMIT;
587 	qinf->qi_itimelimit = XFS_QM_ITIMELIMIT;
588 	qinf->qi_rtbtimelimit = XFS_QM_RTBTIMELIMIT;
589 	qinf->qi_bwarnlimit = XFS_QM_BWARNLIMIT;
590 	qinf->qi_iwarnlimit = XFS_QM_IWARNLIMIT;
591 	qinf->qi_rtbwarnlimit = XFS_QM_RTBWARNLIMIT;
592 
593 	/*
594 	 * We try to get the limits from the superuser's limits fields.
595 	 * This is quite hacky, but it is standard quota practice.
596 	 *
597 	 * Since we may not have done a quotacheck by this point, just read
598 	 * the dquot without attaching it to any hashtables or lists.
599 	 *
600 	 * Timers and warnings are globally set by the first timer found in
601 	 * user/group/proj quota types, otherwise a default value is used.
602 	 * This should be split into different fields per quota type.
603 	 */
604 	if (XFS_IS_UQUOTA_RUNNING(mp))
605 		type = XFS_DQ_USER;
606 	else if (XFS_IS_GQUOTA_RUNNING(mp))
607 		type = XFS_DQ_GROUP;
608 	else
609 		type = XFS_DQ_PROJ;
610 	error = xfs_qm_dqget_uncached(mp, 0, type, &dqp);
611 	if (error)
612 		return;
613 
614 	ddqp = &dqp->q_core;
615 	/*
616 	 * The warnings and timers set the grace period given to
617 	 * a user or group before he or she can not perform any
618 	 * more writing. If it is zero, a default is used.
619 	 */
620 	if (ddqp->d_btimer)
621 		qinf->qi_btimelimit = be32_to_cpu(ddqp->d_btimer);
622 	if (ddqp->d_itimer)
623 		qinf->qi_itimelimit = be32_to_cpu(ddqp->d_itimer);
624 	if (ddqp->d_rtbtimer)
625 		qinf->qi_rtbtimelimit = be32_to_cpu(ddqp->d_rtbtimer);
626 	if (ddqp->d_bwarns)
627 		qinf->qi_bwarnlimit = be16_to_cpu(ddqp->d_bwarns);
628 	if (ddqp->d_iwarns)
629 		qinf->qi_iwarnlimit = be16_to_cpu(ddqp->d_iwarns);
630 	if (ddqp->d_rtbwarns)
631 		qinf->qi_rtbwarnlimit = be16_to_cpu(ddqp->d_rtbwarns);
632 
633 	xfs_qm_dqdestroy(dqp);
634 }
635 
636 /*
637  * This initializes all the quota information that's kept in the
638  * mount structure
639  */
640 STATIC int
641 xfs_qm_init_quotainfo(
642 	struct xfs_mount	*mp)
643 {
644 	struct xfs_quotainfo	*qinf;
645 	int			error;
646 
647 	ASSERT(XFS_IS_QUOTA_RUNNING(mp));
648 
649 	qinf = mp->m_quotainfo = kmem_zalloc(sizeof(xfs_quotainfo_t), KM_SLEEP);
650 
651 	error = list_lru_init(&qinf->qi_lru);
652 	if (error)
653 		goto out_free_qinf;
654 
655 	/*
656 	 * See if quotainodes are setup, and if not, allocate them,
657 	 * and change the superblock accordingly.
658 	 */
659 	error = xfs_qm_init_quotainos(mp);
660 	if (error)
661 		goto out_free_lru;
662 
663 	INIT_RADIX_TREE(&qinf->qi_uquota_tree, GFP_NOFS);
664 	INIT_RADIX_TREE(&qinf->qi_gquota_tree, GFP_NOFS);
665 	INIT_RADIX_TREE(&qinf->qi_pquota_tree, GFP_NOFS);
666 	mutex_init(&qinf->qi_tree_lock);
667 
668 	/* mutex used to serialize quotaoffs */
669 	mutex_init(&qinf->qi_quotaofflock);
670 
671 	/* Precalc some constants */
672 	qinf->qi_dqchunklen = XFS_FSB_TO_BB(mp, XFS_DQUOT_CLUSTER_SIZE_FSB);
673 	qinf->qi_dqperchunk = xfs_calc_dquots_per_chunk(qinf->qi_dqchunklen);
674 
675 	mp->m_qflags |= (mp->m_sb.sb_qflags & XFS_ALL_QUOTA_CHKD);
676 
677 	xfs_qm_init_timelimits(mp, qinf);
678 
679 	if (XFS_IS_UQUOTA_RUNNING(mp))
680 		xfs_qm_set_defquota(mp, XFS_DQ_USER, qinf);
681 	if (XFS_IS_GQUOTA_RUNNING(mp))
682 		xfs_qm_set_defquota(mp, XFS_DQ_GROUP, qinf);
683 	if (XFS_IS_PQUOTA_RUNNING(mp))
684 		xfs_qm_set_defquota(mp, XFS_DQ_PROJ, qinf);
685 
686 	qinf->qi_shrinker.count_objects = xfs_qm_shrink_count;
687 	qinf->qi_shrinker.scan_objects = xfs_qm_shrink_scan;
688 	qinf->qi_shrinker.seeks = DEFAULT_SEEKS;
689 	qinf->qi_shrinker.flags = SHRINKER_NUMA_AWARE;
690 
691 	error = register_shrinker(&qinf->qi_shrinker);
692 	if (error)
693 		goto out_free_inos;
694 
695 	return 0;
696 
697 out_free_inos:
698 	mutex_destroy(&qinf->qi_quotaofflock);
699 	mutex_destroy(&qinf->qi_tree_lock);
700 	xfs_qm_destroy_quotainos(qinf);
701 out_free_lru:
702 	list_lru_destroy(&qinf->qi_lru);
703 out_free_qinf:
704 	kmem_free(qinf);
705 	mp->m_quotainfo = NULL;
706 	return error;
707 }
708 
709 /*
710  * Gets called when unmounting a filesystem or when all quotas get
711  * turned off.
712  * This purges the quota inodes, destroys locks and frees itself.
713  */
714 void
715 xfs_qm_destroy_quotainfo(
716 	xfs_mount_t	*mp)
717 {
718 	xfs_quotainfo_t *qi;
719 
720 	qi = mp->m_quotainfo;
721 	ASSERT(qi != NULL);
722 
723 	unregister_shrinker(&qi->qi_shrinker);
724 	list_lru_destroy(&qi->qi_lru);
725 	xfs_qm_destroy_quotainos(qi);
726 	mutex_destroy(&qi->qi_tree_lock);
727 	mutex_destroy(&qi->qi_quotaofflock);
728 	kmem_free(qi);
729 	mp->m_quotainfo = NULL;
730 }
731 
732 /*
733  * Create an inode and return with a reference already taken, but unlocked
734  * This is how we create quota inodes
735  */
736 STATIC int
737 xfs_qm_qino_alloc(
738 	xfs_mount_t	*mp,
739 	xfs_inode_t	**ip,
740 	uint		flags)
741 {
742 	xfs_trans_t	*tp;
743 	int		error;
744 	bool		need_alloc = true;
745 
746 	*ip = NULL;
747 	/*
748 	 * With superblock that doesn't have separate pquotino, we
749 	 * share an inode between gquota and pquota. If the on-disk
750 	 * superblock has GQUOTA and the filesystem is now mounted
751 	 * with PQUOTA, just use sb_gquotino for sb_pquotino and
752 	 * vice-versa.
753 	 */
754 	if (!xfs_sb_version_has_pquotino(&mp->m_sb) &&
755 			(flags & (XFS_QMOPT_PQUOTA|XFS_QMOPT_GQUOTA))) {
756 		xfs_ino_t ino = NULLFSINO;
757 
758 		if ((flags & XFS_QMOPT_PQUOTA) &&
759 			     (mp->m_sb.sb_gquotino != NULLFSINO)) {
760 			ino = mp->m_sb.sb_gquotino;
761 			ASSERT(mp->m_sb.sb_pquotino == NULLFSINO);
762 		} else if ((flags & XFS_QMOPT_GQUOTA) &&
763 			     (mp->m_sb.sb_pquotino != NULLFSINO)) {
764 			ino = mp->m_sb.sb_pquotino;
765 			ASSERT(mp->m_sb.sb_gquotino == NULLFSINO);
766 		}
767 		if (ino != NULLFSINO) {
768 			error = xfs_iget(mp, NULL, ino, 0, 0, ip);
769 			if (error)
770 				return error;
771 			mp->m_sb.sb_gquotino = NULLFSINO;
772 			mp->m_sb.sb_pquotino = NULLFSINO;
773 			need_alloc = false;
774 		}
775 	}
776 
777 	error = xfs_trans_alloc(mp, &M_RES(mp)->tr_create,
778 			XFS_QM_QINOCREATE_SPACE_RES(mp), 0, 0, &tp);
779 	if (error)
780 		return error;
781 
782 	if (need_alloc) {
783 		error = xfs_dir_ialloc(&tp, NULL, S_IFREG, 1, 0, 0, ip);
784 		if (error) {
785 			xfs_trans_cancel(tp);
786 			return error;
787 		}
788 	}
789 
790 	/*
791 	 * Make the changes in the superblock, and log those too.
792 	 * sbfields arg may contain fields other than *QUOTINO;
793 	 * VERSIONNUM for example.
794 	 */
795 	spin_lock(&mp->m_sb_lock);
796 	if (flags & XFS_QMOPT_SBVERSION) {
797 		ASSERT(!xfs_sb_version_hasquota(&mp->m_sb));
798 
799 		xfs_sb_version_addquota(&mp->m_sb);
800 		mp->m_sb.sb_uquotino = NULLFSINO;
801 		mp->m_sb.sb_gquotino = NULLFSINO;
802 		mp->m_sb.sb_pquotino = NULLFSINO;
803 
804 		/* qflags will get updated fully _after_ quotacheck */
805 		mp->m_sb.sb_qflags = mp->m_qflags & XFS_ALL_QUOTA_ACCT;
806 	}
807 	if (flags & XFS_QMOPT_UQUOTA)
808 		mp->m_sb.sb_uquotino = (*ip)->i_ino;
809 	else if (flags & XFS_QMOPT_GQUOTA)
810 		mp->m_sb.sb_gquotino = (*ip)->i_ino;
811 	else
812 		mp->m_sb.sb_pquotino = (*ip)->i_ino;
813 	spin_unlock(&mp->m_sb_lock);
814 	xfs_log_sb(tp);
815 
816 	error = xfs_trans_commit(tp);
817 	if (error) {
818 		ASSERT(XFS_FORCED_SHUTDOWN(mp));
819 		xfs_alert(mp, "%s failed (error %d)!", __func__, error);
820 	}
821 	if (need_alloc)
822 		xfs_finish_inode_setup(*ip);
823 	return error;
824 }
825 
826 
827 STATIC void
828 xfs_qm_reset_dqcounts(
829 	xfs_mount_t	*mp,
830 	xfs_buf_t	*bp,
831 	xfs_dqid_t	id,
832 	uint		type)
833 {
834 	struct xfs_dqblk	*dqb;
835 	int			j;
836 	xfs_failaddr_t		fa;
837 
838 	trace_xfs_reset_dqcounts(bp, _RET_IP_);
839 
840 	/*
841 	 * Reset all counters and timers. They'll be
842 	 * started afresh by xfs_qm_quotacheck.
843 	 */
844 #ifdef DEBUG
845 	j = (int)XFS_FSB_TO_B(mp, XFS_DQUOT_CLUSTER_SIZE_FSB) /
846 		sizeof(xfs_dqblk_t);
847 	ASSERT(mp->m_quotainfo->qi_dqperchunk == j);
848 #endif
849 	dqb = bp->b_addr;
850 	for (j = 0; j < mp->m_quotainfo->qi_dqperchunk; j++) {
851 		struct xfs_disk_dquot	*ddq;
852 
853 		ddq = (struct xfs_disk_dquot *)&dqb[j];
854 
855 		/*
856 		 * Do a sanity check, and if needed, repair the dqblk. Don't
857 		 * output any warnings because it's perfectly possible to
858 		 * find uninitialised dquot blks. See comment in
859 		 * xfs_dquot_verify.
860 		 */
861 		fa = xfs_dqblk_verify(mp, &dqb[j], id + j, type);
862 		if (fa)
863 			xfs_dqblk_repair(mp, &dqb[j], id + j, type);
864 
865 		/*
866 		 * Reset type in case we are reusing group quota file for
867 		 * project quotas or vice versa
868 		 */
869 		ddq->d_flags = type;
870 		ddq->d_bcount = 0;
871 		ddq->d_icount = 0;
872 		ddq->d_rtbcount = 0;
873 		ddq->d_btimer = 0;
874 		ddq->d_itimer = 0;
875 		ddq->d_rtbtimer = 0;
876 		ddq->d_bwarns = 0;
877 		ddq->d_iwarns = 0;
878 		ddq->d_rtbwarns = 0;
879 
880 		if (xfs_sb_version_hascrc(&mp->m_sb)) {
881 			xfs_update_cksum((char *)&dqb[j],
882 					 sizeof(struct xfs_dqblk),
883 					 XFS_DQUOT_CRC_OFF);
884 		}
885 	}
886 }
887 
888 STATIC int
889 xfs_qm_reset_dqcounts_all(
890 	struct xfs_mount	*mp,
891 	xfs_dqid_t		firstid,
892 	xfs_fsblock_t		bno,
893 	xfs_filblks_t		blkcnt,
894 	uint			flags,
895 	struct list_head	*buffer_list)
896 {
897 	struct xfs_buf		*bp;
898 	int			error;
899 	int			type;
900 
901 	ASSERT(blkcnt > 0);
902 	type = flags & XFS_QMOPT_UQUOTA ? XFS_DQ_USER :
903 		(flags & XFS_QMOPT_PQUOTA ? XFS_DQ_PROJ : XFS_DQ_GROUP);
904 	error = 0;
905 
906 	/*
907 	 * Blkcnt arg can be a very big number, and might even be
908 	 * larger than the log itself. So, we have to break it up into
909 	 * manageable-sized transactions.
910 	 * Note that we don't start a permanent transaction here; we might
911 	 * not be able to get a log reservation for the whole thing up front,
912 	 * and we don't really care to either, because we just discard
913 	 * everything if we were to crash in the middle of this loop.
914 	 */
915 	while (blkcnt--) {
916 		error = xfs_trans_read_buf(mp, NULL, mp->m_ddev_targp,
917 			      XFS_FSB_TO_DADDR(mp, bno),
918 			      mp->m_quotainfo->qi_dqchunklen, 0, &bp,
919 			      &xfs_dquot_buf_ops);
920 
921 		/*
922 		 * CRC and validation errors will return a EFSCORRUPTED here. If
923 		 * this occurs, re-read without CRC validation so that we can
924 		 * repair the damage via xfs_qm_reset_dqcounts(). This process
925 		 * will leave a trace in the log indicating corruption has
926 		 * been detected.
927 		 */
928 		if (error == -EFSCORRUPTED) {
929 			error = xfs_trans_read_buf(mp, NULL, mp->m_ddev_targp,
930 				      XFS_FSB_TO_DADDR(mp, bno),
931 				      mp->m_quotainfo->qi_dqchunklen, 0, &bp,
932 				      NULL);
933 		}
934 
935 		if (error)
936 			break;
937 
938 		/*
939 		 * A corrupt buffer might not have a verifier attached, so
940 		 * make sure we have the correct one attached before writeback
941 		 * occurs.
942 		 */
943 		bp->b_ops = &xfs_dquot_buf_ops;
944 		xfs_qm_reset_dqcounts(mp, bp, firstid, type);
945 		xfs_buf_delwri_queue(bp, buffer_list);
946 		xfs_buf_relse(bp);
947 
948 		/* goto the next block. */
949 		bno++;
950 		firstid += mp->m_quotainfo->qi_dqperchunk;
951 	}
952 
953 	return error;
954 }
955 
956 /*
957  * Iterate over all allocated dquot blocks in this quota inode, zeroing all
958  * counters for every chunk of dquots that we find.
959  */
960 STATIC int
961 xfs_qm_reset_dqcounts_buf(
962 	struct xfs_mount	*mp,
963 	struct xfs_inode	*qip,
964 	uint			flags,
965 	struct list_head	*buffer_list)
966 {
967 	struct xfs_bmbt_irec	*map;
968 	int			i, nmaps;	/* number of map entries */
969 	int			error;		/* return value */
970 	xfs_fileoff_t		lblkno;
971 	xfs_filblks_t		maxlblkcnt;
972 	xfs_dqid_t		firstid;
973 	xfs_fsblock_t		rablkno;
974 	xfs_filblks_t		rablkcnt;
975 
976 	error = 0;
977 	/*
978 	 * This looks racy, but we can't keep an inode lock across a
979 	 * trans_reserve. But, this gets called during quotacheck, and that
980 	 * happens only at mount time which is single threaded.
981 	 */
982 	if (qip->i_d.di_nblocks == 0)
983 		return 0;
984 
985 	map = kmem_alloc(XFS_DQITER_MAP_SIZE * sizeof(*map), KM_SLEEP);
986 
987 	lblkno = 0;
988 	maxlblkcnt = XFS_B_TO_FSB(mp, mp->m_super->s_maxbytes);
989 	do {
990 		uint		lock_mode;
991 
992 		nmaps = XFS_DQITER_MAP_SIZE;
993 		/*
994 		 * We aren't changing the inode itself. Just changing
995 		 * some of its data. No new blocks are added here, and
996 		 * the inode is never added to the transaction.
997 		 */
998 		lock_mode = xfs_ilock_data_map_shared(qip);
999 		error = xfs_bmapi_read(qip, lblkno, maxlblkcnt - lblkno,
1000 				       map, &nmaps, 0);
1001 		xfs_iunlock(qip, lock_mode);
1002 		if (error)
1003 			break;
1004 
1005 		ASSERT(nmaps <= XFS_DQITER_MAP_SIZE);
1006 		for (i = 0; i < nmaps; i++) {
1007 			ASSERT(map[i].br_startblock != DELAYSTARTBLOCK);
1008 			ASSERT(map[i].br_blockcount);
1009 
1010 
1011 			lblkno += map[i].br_blockcount;
1012 
1013 			if (map[i].br_startblock == HOLESTARTBLOCK)
1014 				continue;
1015 
1016 			firstid = (xfs_dqid_t) map[i].br_startoff *
1017 				mp->m_quotainfo->qi_dqperchunk;
1018 			/*
1019 			 * Do a read-ahead on the next extent.
1020 			 */
1021 			if ((i+1 < nmaps) &&
1022 			    (map[i+1].br_startblock != HOLESTARTBLOCK)) {
1023 				rablkcnt =  map[i+1].br_blockcount;
1024 				rablkno = map[i+1].br_startblock;
1025 				while (rablkcnt--) {
1026 					xfs_buf_readahead(mp->m_ddev_targp,
1027 					       XFS_FSB_TO_DADDR(mp, rablkno),
1028 					       mp->m_quotainfo->qi_dqchunklen,
1029 					       &xfs_dquot_buf_ops);
1030 					rablkno++;
1031 				}
1032 			}
1033 			/*
1034 			 * Iterate thru all the blks in the extent and
1035 			 * reset the counters of all the dquots inside them.
1036 			 */
1037 			error = xfs_qm_reset_dqcounts_all(mp, firstid,
1038 						   map[i].br_startblock,
1039 						   map[i].br_blockcount,
1040 						   flags, buffer_list);
1041 			if (error)
1042 				goto out;
1043 		}
1044 	} while (nmaps > 0);
1045 
1046 out:
1047 	kmem_free(map);
1048 	return error;
1049 }
1050 
1051 /*
1052  * Called by dqusage_adjust in doing a quotacheck.
1053  *
1054  * Given the inode, and a dquot id this updates both the incore dqout as well
1055  * as the buffer copy. This is so that once the quotacheck is done, we can
1056  * just log all the buffers, as opposed to logging numerous updates to
1057  * individual dquots.
1058  */
1059 STATIC int
1060 xfs_qm_quotacheck_dqadjust(
1061 	struct xfs_inode	*ip,
1062 	uint			type,
1063 	xfs_qcnt_t		nblks,
1064 	xfs_qcnt_t		rtblks)
1065 {
1066 	struct xfs_mount	*mp = ip->i_mount;
1067 	struct xfs_dquot	*dqp;
1068 	xfs_dqid_t		id;
1069 	int			error;
1070 
1071 	id = xfs_qm_id_for_quotatype(ip, type);
1072 	error = xfs_qm_dqget(mp, id, type, true, &dqp);
1073 	if (error) {
1074 		/*
1075 		 * Shouldn't be able to turn off quotas here.
1076 		 */
1077 		ASSERT(error != -ESRCH);
1078 		ASSERT(error != -ENOENT);
1079 		return error;
1080 	}
1081 
1082 	trace_xfs_dqadjust(dqp);
1083 
1084 	/*
1085 	 * Adjust the inode count and the block count to reflect this inode's
1086 	 * resource usage.
1087 	 */
1088 	be64_add_cpu(&dqp->q_core.d_icount, 1);
1089 	dqp->q_res_icount++;
1090 	if (nblks) {
1091 		be64_add_cpu(&dqp->q_core.d_bcount, nblks);
1092 		dqp->q_res_bcount += nblks;
1093 	}
1094 	if (rtblks) {
1095 		be64_add_cpu(&dqp->q_core.d_rtbcount, rtblks);
1096 		dqp->q_res_rtbcount += rtblks;
1097 	}
1098 
1099 	/*
1100 	 * Set default limits, adjust timers (since we changed usages)
1101 	 *
1102 	 * There are no timers for the default values set in the root dquot.
1103 	 */
1104 	if (dqp->q_core.d_id) {
1105 		xfs_qm_adjust_dqlimits(mp, dqp);
1106 		xfs_qm_adjust_dqtimers(mp, &dqp->q_core);
1107 	}
1108 
1109 	dqp->dq_flags |= XFS_DQ_DIRTY;
1110 	xfs_qm_dqput(dqp);
1111 	return 0;
1112 }
1113 
1114 /*
1115  * callback routine supplied to bulkstat(). Given an inumber, find its
1116  * dquots and update them to account for resources taken by that inode.
1117  */
1118 /* ARGSUSED */
1119 STATIC int
1120 xfs_qm_dqusage_adjust(
1121 	xfs_mount_t	*mp,		/* mount point for filesystem */
1122 	xfs_ino_t	ino,		/* inode number to get data for */
1123 	void		__user *buffer,	/* not used */
1124 	int		ubsize,		/* not used */
1125 	int		*ubused,	/* not used */
1126 	int		*res)		/* result code value */
1127 {
1128 	xfs_inode_t	*ip;
1129 	xfs_qcnt_t	nblks;
1130 	xfs_filblks_t	rtblks = 0;	/* total rt blks */
1131 	int		error;
1132 
1133 	ASSERT(XFS_IS_QUOTA_RUNNING(mp));
1134 
1135 	/*
1136 	 * rootino must have its resources accounted for, not so with the quota
1137 	 * inodes.
1138 	 */
1139 	if (xfs_is_quota_inode(&mp->m_sb, ino)) {
1140 		*res = BULKSTAT_RV_NOTHING;
1141 		return -EINVAL;
1142 	}
1143 
1144 	/*
1145 	 * We don't _need_ to take the ilock EXCL here because quotacheck runs
1146 	 * at mount time and therefore nobody will be racing chown/chproj.
1147 	 */
1148 	error = xfs_iget(mp, NULL, ino, XFS_IGET_DONTCACHE, 0, &ip);
1149 	if (error) {
1150 		*res = BULKSTAT_RV_NOTHING;
1151 		return error;
1152 	}
1153 
1154 	ASSERT(ip->i_delayed_blks == 0);
1155 
1156 	if (XFS_IS_REALTIME_INODE(ip)) {
1157 		struct xfs_ifork	*ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK);
1158 
1159 		if (!(ifp->if_flags & XFS_IFEXTENTS)) {
1160 			error = xfs_iread_extents(NULL, ip, XFS_DATA_FORK);
1161 			if (error)
1162 				goto error0;
1163 		}
1164 
1165 		xfs_bmap_count_leaves(ifp, &rtblks);
1166 	}
1167 
1168 	nblks = (xfs_qcnt_t)ip->i_d.di_nblocks - rtblks;
1169 
1170 	/*
1171 	 * Add the (disk blocks and inode) resources occupied by this
1172 	 * inode to its dquots. We do this adjustment in the incore dquot,
1173 	 * and also copy the changes to its buffer.
1174 	 * We don't care about putting these changes in a transaction
1175 	 * envelope because if we crash in the middle of a 'quotacheck'
1176 	 * we have to start from the beginning anyway.
1177 	 * Once we're done, we'll log all the dquot bufs.
1178 	 *
1179 	 * The *QUOTA_ON checks below may look pretty racy, but quotachecks
1180 	 * and quotaoffs don't race. (Quotachecks happen at mount time only).
1181 	 */
1182 	if (XFS_IS_UQUOTA_ON(mp)) {
1183 		error = xfs_qm_quotacheck_dqadjust(ip, XFS_DQ_USER, nblks,
1184 				rtblks);
1185 		if (error)
1186 			goto error0;
1187 	}
1188 
1189 	if (XFS_IS_GQUOTA_ON(mp)) {
1190 		error = xfs_qm_quotacheck_dqadjust(ip, XFS_DQ_GROUP, nblks,
1191 				rtblks);
1192 		if (error)
1193 			goto error0;
1194 	}
1195 
1196 	if (XFS_IS_PQUOTA_ON(mp)) {
1197 		error = xfs_qm_quotacheck_dqadjust(ip, XFS_DQ_PROJ, nblks,
1198 				rtblks);
1199 		if (error)
1200 			goto error0;
1201 	}
1202 
1203 	xfs_irele(ip);
1204 	*res = BULKSTAT_RV_DIDONE;
1205 	return 0;
1206 
1207 error0:
1208 	xfs_irele(ip);
1209 	*res = BULKSTAT_RV_GIVEUP;
1210 	return error;
1211 }
1212 
1213 STATIC int
1214 xfs_qm_flush_one(
1215 	struct xfs_dquot	*dqp,
1216 	void			*data)
1217 {
1218 	struct xfs_mount	*mp = dqp->q_mount;
1219 	struct list_head	*buffer_list = data;
1220 	struct xfs_buf		*bp = NULL;
1221 	int			error = 0;
1222 
1223 	xfs_dqlock(dqp);
1224 	if (dqp->dq_flags & XFS_DQ_FREEING)
1225 		goto out_unlock;
1226 	if (!XFS_DQ_IS_DIRTY(dqp))
1227 		goto out_unlock;
1228 
1229 	/*
1230 	 * The only way the dquot is already flush locked by the time quotacheck
1231 	 * gets here is if reclaim flushed it before the dqadjust walk dirtied
1232 	 * it for the final time. Quotacheck collects all dquot bufs in the
1233 	 * local delwri queue before dquots are dirtied, so reclaim can't have
1234 	 * possibly queued it for I/O. The only way out is to push the buffer to
1235 	 * cycle the flush lock.
1236 	 */
1237 	if (!xfs_dqflock_nowait(dqp)) {
1238 		/* buf is pinned in-core by delwri list */
1239 		bp = xfs_buf_incore(mp->m_ddev_targp, dqp->q_blkno,
1240 				mp->m_quotainfo->qi_dqchunklen, 0);
1241 		if (!bp) {
1242 			error = -EINVAL;
1243 			goto out_unlock;
1244 		}
1245 		xfs_buf_unlock(bp);
1246 
1247 		xfs_buf_delwri_pushbuf(bp, buffer_list);
1248 		xfs_buf_rele(bp);
1249 
1250 		error = -EAGAIN;
1251 		goto out_unlock;
1252 	}
1253 
1254 	error = xfs_qm_dqflush(dqp, &bp);
1255 	if (error)
1256 		goto out_unlock;
1257 
1258 	xfs_buf_delwri_queue(bp, buffer_list);
1259 	xfs_buf_relse(bp);
1260 out_unlock:
1261 	xfs_dqunlock(dqp);
1262 	return error;
1263 }
1264 
1265 /*
1266  * Walk thru all the filesystem inodes and construct a consistent view
1267  * of the disk quota world. If the quotacheck fails, disable quotas.
1268  */
1269 STATIC int
1270 xfs_qm_quotacheck(
1271 	xfs_mount_t	*mp)
1272 {
1273 	int			done, count, error, error2;
1274 	xfs_ino_t		lastino;
1275 	size_t			structsz;
1276 	uint			flags;
1277 	LIST_HEAD		(buffer_list);
1278 	struct xfs_inode	*uip = mp->m_quotainfo->qi_uquotaip;
1279 	struct xfs_inode	*gip = mp->m_quotainfo->qi_gquotaip;
1280 	struct xfs_inode	*pip = mp->m_quotainfo->qi_pquotaip;
1281 
1282 	count = INT_MAX;
1283 	structsz = 1;
1284 	lastino = 0;
1285 	flags = 0;
1286 
1287 	ASSERT(uip || gip || pip);
1288 	ASSERT(XFS_IS_QUOTA_RUNNING(mp));
1289 
1290 	xfs_notice(mp, "Quotacheck needed: Please wait.");
1291 
1292 	/*
1293 	 * First we go thru all the dquots on disk, USR and GRP/PRJ, and reset
1294 	 * their counters to zero. We need a clean slate.
1295 	 * We don't log our changes till later.
1296 	 */
1297 	if (uip) {
1298 		error = xfs_qm_reset_dqcounts_buf(mp, uip, XFS_QMOPT_UQUOTA,
1299 					 &buffer_list);
1300 		if (error)
1301 			goto error_return;
1302 		flags |= XFS_UQUOTA_CHKD;
1303 	}
1304 
1305 	if (gip) {
1306 		error = xfs_qm_reset_dqcounts_buf(mp, gip, XFS_QMOPT_GQUOTA,
1307 					 &buffer_list);
1308 		if (error)
1309 			goto error_return;
1310 		flags |= XFS_GQUOTA_CHKD;
1311 	}
1312 
1313 	if (pip) {
1314 		error = xfs_qm_reset_dqcounts_buf(mp, pip, XFS_QMOPT_PQUOTA,
1315 					 &buffer_list);
1316 		if (error)
1317 			goto error_return;
1318 		flags |= XFS_PQUOTA_CHKD;
1319 	}
1320 
1321 	do {
1322 		/*
1323 		 * Iterate thru all the inodes in the file system,
1324 		 * adjusting the corresponding dquot counters in core.
1325 		 */
1326 		error = xfs_bulkstat(mp, &lastino, &count,
1327 				     xfs_qm_dqusage_adjust,
1328 				     structsz, NULL, &done);
1329 		if (error)
1330 			break;
1331 
1332 	} while (!done);
1333 
1334 	/*
1335 	 * We've made all the changes that we need to make incore.  Flush them
1336 	 * down to disk buffers if everything was updated successfully.
1337 	 */
1338 	if (XFS_IS_UQUOTA_ON(mp)) {
1339 		error = xfs_qm_dquot_walk(mp, XFS_DQ_USER, xfs_qm_flush_one,
1340 					  &buffer_list);
1341 	}
1342 	if (XFS_IS_GQUOTA_ON(mp)) {
1343 		error2 = xfs_qm_dquot_walk(mp, XFS_DQ_GROUP, xfs_qm_flush_one,
1344 					   &buffer_list);
1345 		if (!error)
1346 			error = error2;
1347 	}
1348 	if (XFS_IS_PQUOTA_ON(mp)) {
1349 		error2 = xfs_qm_dquot_walk(mp, XFS_DQ_PROJ, xfs_qm_flush_one,
1350 					   &buffer_list);
1351 		if (!error)
1352 			error = error2;
1353 	}
1354 
1355 	error2 = xfs_buf_delwri_submit(&buffer_list);
1356 	if (!error)
1357 		error = error2;
1358 
1359 	/*
1360 	 * We can get this error if we couldn't do a dquot allocation inside
1361 	 * xfs_qm_dqusage_adjust (via bulkstat). We don't care about the
1362 	 * dirty dquots that might be cached, we just want to get rid of them
1363 	 * and turn quotaoff. The dquots won't be attached to any of the inodes
1364 	 * at this point (because we intentionally didn't in dqget_noattach).
1365 	 */
1366 	if (error) {
1367 		xfs_qm_dqpurge_all(mp, XFS_QMOPT_QUOTALL);
1368 		goto error_return;
1369 	}
1370 
1371 	/*
1372 	 * If one type of quotas is off, then it will lose its
1373 	 * quotachecked status, since we won't be doing accounting for
1374 	 * that type anymore.
1375 	 */
1376 	mp->m_qflags &= ~XFS_ALL_QUOTA_CHKD;
1377 	mp->m_qflags |= flags;
1378 
1379  error_return:
1380 	xfs_buf_delwri_cancel(&buffer_list);
1381 
1382 	if (error) {
1383 		xfs_warn(mp,
1384 	"Quotacheck: Unsuccessful (Error %d): Disabling quotas.",
1385 			error);
1386 		/*
1387 		 * We must turn off quotas.
1388 		 */
1389 		ASSERT(mp->m_quotainfo != NULL);
1390 		xfs_qm_destroy_quotainfo(mp);
1391 		if (xfs_mount_reset_sbqflags(mp)) {
1392 			xfs_warn(mp,
1393 				"Quotacheck: Failed to reset quota flags.");
1394 		}
1395 	} else
1396 		xfs_notice(mp, "Quotacheck: Done.");
1397 	return error;
1398 }
1399 
1400 /*
1401  * This is called from xfs_mountfs to start quotas and initialize all
1402  * necessary data structures like quotainfo.  This is also responsible for
1403  * running a quotacheck as necessary.  We are guaranteed that the superblock
1404  * is consistently read in at this point.
1405  *
1406  * If we fail here, the mount will continue with quota turned off. We don't
1407  * need to inidicate success or failure at all.
1408  */
1409 void
1410 xfs_qm_mount_quotas(
1411 	struct xfs_mount	*mp)
1412 {
1413 	int			error = 0;
1414 	uint			sbf;
1415 
1416 	/*
1417 	 * If quotas on realtime volumes is not supported, we disable
1418 	 * quotas immediately.
1419 	 */
1420 	if (mp->m_sb.sb_rextents) {
1421 		xfs_notice(mp, "Cannot turn on quotas for realtime filesystem");
1422 		mp->m_qflags = 0;
1423 		goto write_changes;
1424 	}
1425 
1426 	ASSERT(XFS_IS_QUOTA_RUNNING(mp));
1427 
1428 	/*
1429 	 * Allocate the quotainfo structure inside the mount struct, and
1430 	 * create quotainode(s), and change/rev superblock if necessary.
1431 	 */
1432 	error = xfs_qm_init_quotainfo(mp);
1433 	if (error) {
1434 		/*
1435 		 * We must turn off quotas.
1436 		 */
1437 		ASSERT(mp->m_quotainfo == NULL);
1438 		mp->m_qflags = 0;
1439 		goto write_changes;
1440 	}
1441 	/*
1442 	 * If any of the quotas are not consistent, do a quotacheck.
1443 	 */
1444 	if (XFS_QM_NEED_QUOTACHECK(mp)) {
1445 		error = xfs_qm_quotacheck(mp);
1446 		if (error) {
1447 			/* Quotacheck failed and disabled quotas. */
1448 			return;
1449 		}
1450 	}
1451 	/*
1452 	 * If one type of quotas is off, then it will lose its
1453 	 * quotachecked status, since we won't be doing accounting for
1454 	 * that type anymore.
1455 	 */
1456 	if (!XFS_IS_UQUOTA_ON(mp))
1457 		mp->m_qflags &= ~XFS_UQUOTA_CHKD;
1458 	if (!XFS_IS_GQUOTA_ON(mp))
1459 		mp->m_qflags &= ~XFS_GQUOTA_CHKD;
1460 	if (!XFS_IS_PQUOTA_ON(mp))
1461 		mp->m_qflags &= ~XFS_PQUOTA_CHKD;
1462 
1463  write_changes:
1464 	/*
1465 	 * We actually don't have to acquire the m_sb_lock at all.
1466 	 * This can only be called from mount, and that's single threaded. XXX
1467 	 */
1468 	spin_lock(&mp->m_sb_lock);
1469 	sbf = mp->m_sb.sb_qflags;
1470 	mp->m_sb.sb_qflags = mp->m_qflags & XFS_MOUNT_QUOTA_ALL;
1471 	spin_unlock(&mp->m_sb_lock);
1472 
1473 	if (sbf != (mp->m_qflags & XFS_MOUNT_QUOTA_ALL)) {
1474 		if (xfs_sync_sb(mp, false)) {
1475 			/*
1476 			 * We could only have been turning quotas off.
1477 			 * We aren't in very good shape actually because
1478 			 * the incore structures are convinced that quotas are
1479 			 * off, but the on disk superblock doesn't know that !
1480 			 */
1481 			ASSERT(!(XFS_IS_QUOTA_RUNNING(mp)));
1482 			xfs_alert(mp, "%s: Superblock update failed!",
1483 				__func__);
1484 		}
1485 	}
1486 
1487 	if (error) {
1488 		xfs_warn(mp, "Failed to initialize disk quotas.");
1489 		return;
1490 	}
1491 }
1492 
1493 /*
1494  * This is called after the superblock has been read in and we're ready to
1495  * iget the quota inodes.
1496  */
1497 STATIC int
1498 xfs_qm_init_quotainos(
1499 	xfs_mount_t	*mp)
1500 {
1501 	struct xfs_inode	*uip = NULL;
1502 	struct xfs_inode	*gip = NULL;
1503 	struct xfs_inode	*pip = NULL;
1504 	int			error;
1505 	uint			flags = 0;
1506 
1507 	ASSERT(mp->m_quotainfo);
1508 
1509 	/*
1510 	 * Get the uquota and gquota inodes
1511 	 */
1512 	if (xfs_sb_version_hasquota(&mp->m_sb)) {
1513 		if (XFS_IS_UQUOTA_ON(mp) &&
1514 		    mp->m_sb.sb_uquotino != NULLFSINO) {
1515 			ASSERT(mp->m_sb.sb_uquotino > 0);
1516 			error = xfs_iget(mp, NULL, mp->m_sb.sb_uquotino,
1517 					     0, 0, &uip);
1518 			if (error)
1519 				return error;
1520 		}
1521 		if (XFS_IS_GQUOTA_ON(mp) &&
1522 		    mp->m_sb.sb_gquotino != NULLFSINO) {
1523 			ASSERT(mp->m_sb.sb_gquotino > 0);
1524 			error = xfs_iget(mp, NULL, mp->m_sb.sb_gquotino,
1525 					     0, 0, &gip);
1526 			if (error)
1527 				goto error_rele;
1528 		}
1529 		if (XFS_IS_PQUOTA_ON(mp) &&
1530 		    mp->m_sb.sb_pquotino != NULLFSINO) {
1531 			ASSERT(mp->m_sb.sb_pquotino > 0);
1532 			error = xfs_iget(mp, NULL, mp->m_sb.sb_pquotino,
1533 					     0, 0, &pip);
1534 			if (error)
1535 				goto error_rele;
1536 		}
1537 	} else {
1538 		flags |= XFS_QMOPT_SBVERSION;
1539 	}
1540 
1541 	/*
1542 	 * Create the three inodes, if they don't exist already. The changes
1543 	 * made above will get added to a transaction and logged in one of
1544 	 * the qino_alloc calls below.  If the device is readonly,
1545 	 * temporarily switch to read-write to do this.
1546 	 */
1547 	if (XFS_IS_UQUOTA_ON(mp) && uip == NULL) {
1548 		error = xfs_qm_qino_alloc(mp, &uip,
1549 					      flags | XFS_QMOPT_UQUOTA);
1550 		if (error)
1551 			goto error_rele;
1552 
1553 		flags &= ~XFS_QMOPT_SBVERSION;
1554 	}
1555 	if (XFS_IS_GQUOTA_ON(mp) && gip == NULL) {
1556 		error = xfs_qm_qino_alloc(mp, &gip,
1557 					  flags | XFS_QMOPT_GQUOTA);
1558 		if (error)
1559 			goto error_rele;
1560 
1561 		flags &= ~XFS_QMOPT_SBVERSION;
1562 	}
1563 	if (XFS_IS_PQUOTA_ON(mp) && pip == NULL) {
1564 		error = xfs_qm_qino_alloc(mp, &pip,
1565 					  flags | XFS_QMOPT_PQUOTA);
1566 		if (error)
1567 			goto error_rele;
1568 	}
1569 
1570 	mp->m_quotainfo->qi_uquotaip = uip;
1571 	mp->m_quotainfo->qi_gquotaip = gip;
1572 	mp->m_quotainfo->qi_pquotaip = pip;
1573 
1574 	return 0;
1575 
1576 error_rele:
1577 	if (uip)
1578 		xfs_irele(uip);
1579 	if (gip)
1580 		xfs_irele(gip);
1581 	if (pip)
1582 		xfs_irele(pip);
1583 	return error;
1584 }
1585 
1586 STATIC void
1587 xfs_qm_destroy_quotainos(
1588 	xfs_quotainfo_t	*qi)
1589 {
1590 	if (qi->qi_uquotaip) {
1591 		xfs_irele(qi->qi_uquotaip);
1592 		qi->qi_uquotaip = NULL; /* paranoia */
1593 	}
1594 	if (qi->qi_gquotaip) {
1595 		xfs_irele(qi->qi_gquotaip);
1596 		qi->qi_gquotaip = NULL;
1597 	}
1598 	if (qi->qi_pquotaip) {
1599 		xfs_irele(qi->qi_pquotaip);
1600 		qi->qi_pquotaip = NULL;
1601 	}
1602 }
1603 
1604 STATIC void
1605 xfs_qm_dqfree_one(
1606 	struct xfs_dquot	*dqp)
1607 {
1608 	struct xfs_mount	*mp = dqp->q_mount;
1609 	struct xfs_quotainfo	*qi = mp->m_quotainfo;
1610 
1611 	mutex_lock(&qi->qi_tree_lock);
1612 	radix_tree_delete(xfs_dquot_tree(qi, dqp->q_core.d_flags),
1613 			  be32_to_cpu(dqp->q_core.d_id));
1614 
1615 	qi->qi_dquots--;
1616 	mutex_unlock(&qi->qi_tree_lock);
1617 
1618 	xfs_qm_dqdestroy(dqp);
1619 }
1620 
1621 /* --------------- utility functions for vnodeops ---------------- */
1622 
1623 
1624 /*
1625  * Given an inode, a uid, gid and prid make sure that we have
1626  * allocated relevant dquot(s) on disk, and that we won't exceed inode
1627  * quotas by creating this file.
1628  * This also attaches dquot(s) to the given inode after locking it,
1629  * and returns the dquots corresponding to the uid and/or gid.
1630  *
1631  * in	: inode (unlocked)
1632  * out	: udquot, gdquot with references taken and unlocked
1633  */
1634 int
1635 xfs_qm_vop_dqalloc(
1636 	struct xfs_inode	*ip,
1637 	xfs_dqid_t		uid,
1638 	xfs_dqid_t		gid,
1639 	prid_t			prid,
1640 	uint			flags,
1641 	struct xfs_dquot	**O_udqpp,
1642 	struct xfs_dquot	**O_gdqpp,
1643 	struct xfs_dquot	**O_pdqpp)
1644 {
1645 	struct xfs_mount	*mp = ip->i_mount;
1646 	struct xfs_dquot	*uq = NULL;
1647 	struct xfs_dquot	*gq = NULL;
1648 	struct xfs_dquot	*pq = NULL;
1649 	int			error;
1650 	uint			lockflags;
1651 
1652 	if (!XFS_IS_QUOTA_RUNNING(mp) || !XFS_IS_QUOTA_ON(mp))
1653 		return 0;
1654 
1655 	lockflags = XFS_ILOCK_EXCL;
1656 	xfs_ilock(ip, lockflags);
1657 
1658 	if ((flags & XFS_QMOPT_INHERIT) && XFS_INHERIT_GID(ip))
1659 		gid = ip->i_d.di_gid;
1660 
1661 	/*
1662 	 * Attach the dquot(s) to this inode, doing a dquot allocation
1663 	 * if necessary. The dquot(s) will not be locked.
1664 	 */
1665 	if (XFS_NOT_DQATTACHED(mp, ip)) {
1666 		error = xfs_qm_dqattach_locked(ip, true);
1667 		if (error) {
1668 			xfs_iunlock(ip, lockflags);
1669 			return error;
1670 		}
1671 	}
1672 
1673 	if ((flags & XFS_QMOPT_UQUOTA) && XFS_IS_UQUOTA_ON(mp)) {
1674 		if (ip->i_d.di_uid != uid) {
1675 			/*
1676 			 * What we need is the dquot that has this uid, and
1677 			 * if we send the inode to dqget, the uid of the inode
1678 			 * takes priority over what's sent in the uid argument.
1679 			 * We must unlock inode here before calling dqget if
1680 			 * we're not sending the inode, because otherwise
1681 			 * we'll deadlock by doing trans_reserve while
1682 			 * holding ilock.
1683 			 */
1684 			xfs_iunlock(ip, lockflags);
1685 			error = xfs_qm_dqget(mp, uid, XFS_DQ_USER, true, &uq);
1686 			if (error) {
1687 				ASSERT(error != -ENOENT);
1688 				return error;
1689 			}
1690 			/*
1691 			 * Get the ilock in the right order.
1692 			 */
1693 			xfs_dqunlock(uq);
1694 			lockflags = XFS_ILOCK_SHARED;
1695 			xfs_ilock(ip, lockflags);
1696 		} else {
1697 			/*
1698 			 * Take an extra reference, because we'll return
1699 			 * this to caller
1700 			 */
1701 			ASSERT(ip->i_udquot);
1702 			uq = xfs_qm_dqhold(ip->i_udquot);
1703 		}
1704 	}
1705 	if ((flags & XFS_QMOPT_GQUOTA) && XFS_IS_GQUOTA_ON(mp)) {
1706 		if (ip->i_d.di_gid != gid) {
1707 			xfs_iunlock(ip, lockflags);
1708 			error = xfs_qm_dqget(mp, gid, XFS_DQ_GROUP, true, &gq);
1709 			if (error) {
1710 				ASSERT(error != -ENOENT);
1711 				goto error_rele;
1712 			}
1713 			xfs_dqunlock(gq);
1714 			lockflags = XFS_ILOCK_SHARED;
1715 			xfs_ilock(ip, lockflags);
1716 		} else {
1717 			ASSERT(ip->i_gdquot);
1718 			gq = xfs_qm_dqhold(ip->i_gdquot);
1719 		}
1720 	}
1721 	if ((flags & XFS_QMOPT_PQUOTA) && XFS_IS_PQUOTA_ON(mp)) {
1722 		if (xfs_get_projid(ip) != prid) {
1723 			xfs_iunlock(ip, lockflags);
1724 			error = xfs_qm_dqget(mp, (xfs_dqid_t)prid, XFS_DQ_PROJ,
1725 					true, &pq);
1726 			if (error) {
1727 				ASSERT(error != -ENOENT);
1728 				goto error_rele;
1729 			}
1730 			xfs_dqunlock(pq);
1731 			lockflags = XFS_ILOCK_SHARED;
1732 			xfs_ilock(ip, lockflags);
1733 		} else {
1734 			ASSERT(ip->i_pdquot);
1735 			pq = xfs_qm_dqhold(ip->i_pdquot);
1736 		}
1737 	}
1738 	if (uq)
1739 		trace_xfs_dquot_dqalloc(ip);
1740 
1741 	xfs_iunlock(ip, lockflags);
1742 	if (O_udqpp)
1743 		*O_udqpp = uq;
1744 	else
1745 		xfs_qm_dqrele(uq);
1746 	if (O_gdqpp)
1747 		*O_gdqpp = gq;
1748 	else
1749 		xfs_qm_dqrele(gq);
1750 	if (O_pdqpp)
1751 		*O_pdqpp = pq;
1752 	else
1753 		xfs_qm_dqrele(pq);
1754 	return 0;
1755 
1756 error_rele:
1757 	xfs_qm_dqrele(gq);
1758 	xfs_qm_dqrele(uq);
1759 	return error;
1760 }
1761 
1762 /*
1763  * Actually transfer ownership, and do dquot modifications.
1764  * These were already reserved.
1765  */
1766 xfs_dquot_t *
1767 xfs_qm_vop_chown(
1768 	xfs_trans_t	*tp,
1769 	xfs_inode_t	*ip,
1770 	xfs_dquot_t	**IO_olddq,
1771 	xfs_dquot_t	*newdq)
1772 {
1773 	xfs_dquot_t	*prevdq;
1774 	uint		bfield = XFS_IS_REALTIME_INODE(ip) ?
1775 				 XFS_TRANS_DQ_RTBCOUNT : XFS_TRANS_DQ_BCOUNT;
1776 
1777 
1778 	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
1779 	ASSERT(XFS_IS_QUOTA_RUNNING(ip->i_mount));
1780 
1781 	/* old dquot */
1782 	prevdq = *IO_olddq;
1783 	ASSERT(prevdq);
1784 	ASSERT(prevdq != newdq);
1785 
1786 	xfs_trans_mod_dquot(tp, prevdq, bfield, -(ip->i_d.di_nblocks));
1787 	xfs_trans_mod_dquot(tp, prevdq, XFS_TRANS_DQ_ICOUNT, -1);
1788 
1789 	/* the sparkling new dquot */
1790 	xfs_trans_mod_dquot(tp, newdq, bfield, ip->i_d.di_nblocks);
1791 	xfs_trans_mod_dquot(tp, newdq, XFS_TRANS_DQ_ICOUNT, 1);
1792 
1793 	/*
1794 	 * Take an extra reference, because the inode is going to keep
1795 	 * this dquot pointer even after the trans_commit.
1796 	 */
1797 	*IO_olddq = xfs_qm_dqhold(newdq);
1798 
1799 	return prevdq;
1800 }
1801 
1802 /*
1803  * Quota reservations for setattr(AT_UID|AT_GID|AT_PROJID).
1804  */
1805 int
1806 xfs_qm_vop_chown_reserve(
1807 	struct xfs_trans	*tp,
1808 	struct xfs_inode	*ip,
1809 	struct xfs_dquot	*udqp,
1810 	struct xfs_dquot	*gdqp,
1811 	struct xfs_dquot	*pdqp,
1812 	uint			flags)
1813 {
1814 	struct xfs_mount	*mp = ip->i_mount;
1815 	uint64_t		delblks;
1816 	unsigned int		blkflags, prjflags = 0;
1817 	struct xfs_dquot	*udq_unres = NULL;
1818 	struct xfs_dquot	*gdq_unres = NULL;
1819 	struct xfs_dquot	*pdq_unres = NULL;
1820 	struct xfs_dquot	*udq_delblks = NULL;
1821 	struct xfs_dquot	*gdq_delblks = NULL;
1822 	struct xfs_dquot	*pdq_delblks = NULL;
1823 	int			error;
1824 
1825 
1826 	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED));
1827 	ASSERT(XFS_IS_QUOTA_RUNNING(mp));
1828 
1829 	delblks = ip->i_delayed_blks;
1830 	blkflags = XFS_IS_REALTIME_INODE(ip) ?
1831 			XFS_QMOPT_RES_RTBLKS : XFS_QMOPT_RES_REGBLKS;
1832 
1833 	if (XFS_IS_UQUOTA_ON(mp) && udqp &&
1834 	    ip->i_d.di_uid != be32_to_cpu(udqp->q_core.d_id)) {
1835 		udq_delblks = udqp;
1836 		/*
1837 		 * If there are delayed allocation blocks, then we have to
1838 		 * unreserve those from the old dquot, and add them to the
1839 		 * new dquot.
1840 		 */
1841 		if (delblks) {
1842 			ASSERT(ip->i_udquot);
1843 			udq_unres = ip->i_udquot;
1844 		}
1845 	}
1846 	if (XFS_IS_GQUOTA_ON(ip->i_mount) && gdqp &&
1847 	    ip->i_d.di_gid != be32_to_cpu(gdqp->q_core.d_id)) {
1848 		gdq_delblks = gdqp;
1849 		if (delblks) {
1850 			ASSERT(ip->i_gdquot);
1851 			gdq_unres = ip->i_gdquot;
1852 		}
1853 	}
1854 
1855 	if (XFS_IS_PQUOTA_ON(ip->i_mount) && pdqp &&
1856 	    xfs_get_projid(ip) != be32_to_cpu(pdqp->q_core.d_id)) {
1857 		prjflags = XFS_QMOPT_ENOSPC;
1858 		pdq_delblks = pdqp;
1859 		if (delblks) {
1860 			ASSERT(ip->i_pdquot);
1861 			pdq_unres = ip->i_pdquot;
1862 		}
1863 	}
1864 
1865 	error = xfs_trans_reserve_quota_bydquots(tp, ip->i_mount,
1866 				udq_delblks, gdq_delblks, pdq_delblks,
1867 				ip->i_d.di_nblocks, 1,
1868 				flags | blkflags | prjflags);
1869 	if (error)
1870 		return error;
1871 
1872 	/*
1873 	 * Do the delayed blks reservations/unreservations now. Since, these
1874 	 * are done without the help of a transaction, if a reservation fails
1875 	 * its previous reservations won't be automatically undone by trans
1876 	 * code. So, we have to do it manually here.
1877 	 */
1878 	if (delblks) {
1879 		/*
1880 		 * Do the reservations first. Unreservation can't fail.
1881 		 */
1882 		ASSERT(udq_delblks || gdq_delblks || pdq_delblks);
1883 		ASSERT(udq_unres || gdq_unres || pdq_unres);
1884 		error = xfs_trans_reserve_quota_bydquots(NULL, ip->i_mount,
1885 			    udq_delblks, gdq_delblks, pdq_delblks,
1886 			    (xfs_qcnt_t)delblks, 0,
1887 			    flags | blkflags | prjflags);
1888 		if (error)
1889 			return error;
1890 		xfs_trans_reserve_quota_bydquots(NULL, ip->i_mount,
1891 				udq_unres, gdq_unres, pdq_unres,
1892 				-((xfs_qcnt_t)delblks), 0, blkflags);
1893 	}
1894 
1895 	return 0;
1896 }
1897 
1898 int
1899 xfs_qm_vop_rename_dqattach(
1900 	struct xfs_inode	**i_tab)
1901 {
1902 	struct xfs_mount	*mp = i_tab[0]->i_mount;
1903 	int			i;
1904 
1905 	if (!XFS_IS_QUOTA_RUNNING(mp) || !XFS_IS_QUOTA_ON(mp))
1906 		return 0;
1907 
1908 	for (i = 0; (i < 4 && i_tab[i]); i++) {
1909 		struct xfs_inode	*ip = i_tab[i];
1910 		int			error;
1911 
1912 		/*
1913 		 * Watch out for duplicate entries in the table.
1914 		 */
1915 		if (i == 0 || ip != i_tab[i-1]) {
1916 			if (XFS_NOT_DQATTACHED(mp, ip)) {
1917 				error = xfs_qm_dqattach(ip);
1918 				if (error)
1919 					return error;
1920 			}
1921 		}
1922 	}
1923 	return 0;
1924 }
1925 
1926 void
1927 xfs_qm_vop_create_dqattach(
1928 	struct xfs_trans	*tp,
1929 	struct xfs_inode	*ip,
1930 	struct xfs_dquot	*udqp,
1931 	struct xfs_dquot	*gdqp,
1932 	struct xfs_dquot	*pdqp)
1933 {
1934 	struct xfs_mount	*mp = tp->t_mountp;
1935 
1936 	if (!XFS_IS_QUOTA_RUNNING(mp) || !XFS_IS_QUOTA_ON(mp))
1937 		return;
1938 
1939 	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
1940 	ASSERT(XFS_IS_QUOTA_RUNNING(mp));
1941 
1942 	if (udqp && XFS_IS_UQUOTA_ON(mp)) {
1943 		ASSERT(ip->i_udquot == NULL);
1944 		ASSERT(ip->i_d.di_uid == be32_to_cpu(udqp->q_core.d_id));
1945 
1946 		ip->i_udquot = xfs_qm_dqhold(udqp);
1947 		xfs_trans_mod_dquot(tp, udqp, XFS_TRANS_DQ_ICOUNT, 1);
1948 	}
1949 	if (gdqp && XFS_IS_GQUOTA_ON(mp)) {
1950 		ASSERT(ip->i_gdquot == NULL);
1951 		ASSERT(ip->i_d.di_gid == be32_to_cpu(gdqp->q_core.d_id));
1952 		ip->i_gdquot = xfs_qm_dqhold(gdqp);
1953 		xfs_trans_mod_dquot(tp, gdqp, XFS_TRANS_DQ_ICOUNT, 1);
1954 	}
1955 	if (pdqp && XFS_IS_PQUOTA_ON(mp)) {
1956 		ASSERT(ip->i_pdquot == NULL);
1957 		ASSERT(xfs_get_projid(ip) == be32_to_cpu(pdqp->q_core.d_id));
1958 
1959 		ip->i_pdquot = xfs_qm_dqhold(pdqp);
1960 		xfs_trans_mod_dquot(tp, pdqp, XFS_TRANS_DQ_ICOUNT, 1);
1961 	}
1962 }
1963 
1964