xref: /openbmc/linux/fs/xfs/xfs_qm.c (revision ef9303fd)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (c) 2000-2005 Silicon Graphics, Inc.
4  * All Rights Reserved.
5  */
6 #include "xfs.h"
7 #include "xfs_fs.h"
8 #include "xfs_shared.h"
9 #include "xfs_format.h"
10 #include "xfs_log_format.h"
11 #include "xfs_trans_resv.h"
12 #include "xfs_bit.h"
13 #include "xfs_sb.h"
14 #include "xfs_mount.h"
15 #include "xfs_inode.h"
16 #include "xfs_iwalk.h"
17 #include "xfs_quota.h"
18 #include "xfs_bmap.h"
19 #include "xfs_bmap_util.h"
20 #include "xfs_trans.h"
21 #include "xfs_trans_space.h"
22 #include "xfs_qm.h"
23 #include "xfs_trace.h"
24 #include "xfs_icache.h"
25 #include "xfs_error.h"
26 
27 /*
28  * The global quota manager. There is only one of these for the entire
29  * system, _not_ one per file system. XQM keeps track of the overall
30  * quota functionality, including maintaining the freelist and hash
31  * tables of dquots.
32  */
33 STATIC int	xfs_qm_init_quotainos(struct xfs_mount *mp);
34 STATIC int	xfs_qm_init_quotainfo(struct xfs_mount *mp);
35 
36 STATIC void	xfs_qm_destroy_quotainos(struct xfs_quotainfo *qi);
37 STATIC void	xfs_qm_dqfree_one(struct xfs_dquot *dqp);
38 /*
39  * We use the batch lookup interface to iterate over the dquots as it
40  * currently is the only interface into the radix tree code that allows
41  * fuzzy lookups instead of exact matches.  Holding the lock over multiple
42  * operations is fine as all callers are used either during mount/umount
43  * or quotaoff.
44  */
45 #define XFS_DQ_LOOKUP_BATCH	32
46 
47 STATIC int
48 xfs_qm_dquot_walk(
49 	struct xfs_mount	*mp,
50 	int			type,
51 	int			(*execute)(struct xfs_dquot *dqp, void *data),
52 	void			*data)
53 {
54 	struct xfs_quotainfo	*qi = mp->m_quotainfo;
55 	struct radix_tree_root	*tree = xfs_dquot_tree(qi, type);
56 	uint32_t		next_index;
57 	int			last_error = 0;
58 	int			skipped;
59 	int			nr_found;
60 
61 restart:
62 	skipped = 0;
63 	next_index = 0;
64 	nr_found = 0;
65 
66 	while (1) {
67 		struct xfs_dquot *batch[XFS_DQ_LOOKUP_BATCH];
68 		int		error = 0;
69 		int		i;
70 
71 		mutex_lock(&qi->qi_tree_lock);
72 		nr_found = radix_tree_gang_lookup(tree, (void **)batch,
73 					next_index, XFS_DQ_LOOKUP_BATCH);
74 		if (!nr_found) {
75 			mutex_unlock(&qi->qi_tree_lock);
76 			break;
77 		}
78 
79 		for (i = 0; i < nr_found; i++) {
80 			struct xfs_dquot *dqp = batch[i];
81 
82 			next_index = be32_to_cpu(dqp->q_core.d_id) + 1;
83 
84 			error = execute(batch[i], data);
85 			if (error == -EAGAIN) {
86 				skipped++;
87 				continue;
88 			}
89 			if (error && last_error != -EFSCORRUPTED)
90 				last_error = error;
91 		}
92 
93 		mutex_unlock(&qi->qi_tree_lock);
94 
95 		/* bail out if the filesystem is corrupted.  */
96 		if (last_error == -EFSCORRUPTED) {
97 			skipped = 0;
98 			break;
99 		}
100 		/* we're done if id overflows back to zero */
101 		if (!next_index)
102 			break;
103 	}
104 
105 	if (skipped) {
106 		delay(1);
107 		goto restart;
108 	}
109 
110 	return last_error;
111 }
112 
113 
114 /*
115  * Purge a dquot from all tracking data structures and free it.
116  */
117 STATIC int
118 xfs_qm_dqpurge(
119 	struct xfs_dquot	*dqp,
120 	void			*data)
121 {
122 	struct xfs_mount	*mp = dqp->q_mount;
123 	struct xfs_quotainfo	*qi = mp->m_quotainfo;
124 	int			error = -EAGAIN;
125 
126 	xfs_dqlock(dqp);
127 	if ((dqp->dq_flags & XFS_DQ_FREEING) || dqp->q_nrefs != 0)
128 		goto out_unlock;
129 
130 	dqp->dq_flags |= XFS_DQ_FREEING;
131 
132 	xfs_dqflock(dqp);
133 
134 	/*
135 	 * If we are turning this type of quotas off, we don't care
136 	 * about the dirty metadata sitting in this dquot. OTOH, if
137 	 * we're unmounting, we do care, so we flush it and wait.
138 	 */
139 	if (XFS_DQ_IS_DIRTY(dqp)) {
140 		struct xfs_buf	*bp = NULL;
141 
142 		/*
143 		 * We don't care about getting disk errors here. We need
144 		 * to purge this dquot anyway, so we go ahead regardless.
145 		 */
146 		error = xfs_qm_dqflush(dqp, &bp);
147 		if (!error) {
148 			error = xfs_bwrite(bp);
149 			xfs_buf_relse(bp);
150 		} else if (error == -EAGAIN) {
151 			goto out_unlock;
152 		}
153 		xfs_dqflock(dqp);
154 	}
155 
156 	ASSERT(atomic_read(&dqp->q_pincount) == 0);
157 	ASSERT(XFS_FORCED_SHUTDOWN(mp) ||
158 		!test_bit(XFS_LI_IN_AIL, &dqp->q_logitem.qli_item.li_flags));
159 
160 	xfs_dqfunlock(dqp);
161 	xfs_dqunlock(dqp);
162 
163 	radix_tree_delete(xfs_dquot_tree(qi, dqp->q_core.d_flags),
164 			  be32_to_cpu(dqp->q_core.d_id));
165 	qi->qi_dquots--;
166 
167 	/*
168 	 * We move dquots to the freelist as soon as their reference count
169 	 * hits zero, so it really should be on the freelist here.
170 	 */
171 	ASSERT(!list_empty(&dqp->q_lru));
172 	list_lru_del(&qi->qi_lru, &dqp->q_lru);
173 	XFS_STATS_DEC(mp, xs_qm_dquot_unused);
174 
175 	xfs_qm_dqdestroy(dqp);
176 	return 0;
177 
178 out_unlock:
179 	xfs_dqunlock(dqp);
180 	return error;
181 }
182 
183 /*
184  * Purge the dquot cache.
185  */
186 void
187 xfs_qm_dqpurge_all(
188 	struct xfs_mount	*mp,
189 	uint			flags)
190 {
191 	if (flags & XFS_QMOPT_UQUOTA)
192 		xfs_qm_dquot_walk(mp, XFS_DQ_USER, xfs_qm_dqpurge, NULL);
193 	if (flags & XFS_QMOPT_GQUOTA)
194 		xfs_qm_dquot_walk(mp, XFS_DQ_GROUP, xfs_qm_dqpurge, NULL);
195 	if (flags & XFS_QMOPT_PQUOTA)
196 		xfs_qm_dquot_walk(mp, XFS_DQ_PROJ, xfs_qm_dqpurge, NULL);
197 }
198 
199 /*
200  * Just destroy the quotainfo structure.
201  */
202 void
203 xfs_qm_unmount(
204 	struct xfs_mount	*mp)
205 {
206 	if (mp->m_quotainfo) {
207 		xfs_qm_dqpurge_all(mp, XFS_QMOPT_QUOTALL);
208 		xfs_qm_destroy_quotainfo(mp);
209 	}
210 }
211 
212 /*
213  * Called from the vfsops layer.
214  */
215 void
216 xfs_qm_unmount_quotas(
217 	xfs_mount_t	*mp)
218 {
219 	/*
220 	 * Release the dquots that root inode, et al might be holding,
221 	 * before we flush quotas and blow away the quotainfo structure.
222 	 */
223 	ASSERT(mp->m_rootip);
224 	xfs_qm_dqdetach(mp->m_rootip);
225 	if (mp->m_rbmip)
226 		xfs_qm_dqdetach(mp->m_rbmip);
227 	if (mp->m_rsumip)
228 		xfs_qm_dqdetach(mp->m_rsumip);
229 
230 	/*
231 	 * Release the quota inodes.
232 	 */
233 	if (mp->m_quotainfo) {
234 		if (mp->m_quotainfo->qi_uquotaip) {
235 			xfs_irele(mp->m_quotainfo->qi_uquotaip);
236 			mp->m_quotainfo->qi_uquotaip = NULL;
237 		}
238 		if (mp->m_quotainfo->qi_gquotaip) {
239 			xfs_irele(mp->m_quotainfo->qi_gquotaip);
240 			mp->m_quotainfo->qi_gquotaip = NULL;
241 		}
242 		if (mp->m_quotainfo->qi_pquotaip) {
243 			xfs_irele(mp->m_quotainfo->qi_pquotaip);
244 			mp->m_quotainfo->qi_pquotaip = NULL;
245 		}
246 	}
247 }
248 
249 STATIC int
250 xfs_qm_dqattach_one(
251 	struct xfs_inode	*ip,
252 	xfs_dqid_t		id,
253 	uint			type,
254 	bool			doalloc,
255 	struct xfs_dquot	**IO_idqpp)
256 {
257 	struct xfs_dquot	*dqp;
258 	int			error;
259 
260 	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
261 	error = 0;
262 
263 	/*
264 	 * See if we already have it in the inode itself. IO_idqpp is &i_udquot
265 	 * or &i_gdquot. This made the code look weird, but made the logic a lot
266 	 * simpler.
267 	 */
268 	dqp = *IO_idqpp;
269 	if (dqp) {
270 		trace_xfs_dqattach_found(dqp);
271 		return 0;
272 	}
273 
274 	/*
275 	 * Find the dquot from somewhere. This bumps the reference count of
276 	 * dquot and returns it locked.  This can return ENOENT if dquot didn't
277 	 * exist on disk and we didn't ask it to allocate; ESRCH if quotas got
278 	 * turned off suddenly.
279 	 */
280 	error = xfs_qm_dqget_inode(ip, type, doalloc, &dqp);
281 	if (error)
282 		return error;
283 
284 	trace_xfs_dqattach_get(dqp);
285 
286 	/*
287 	 * dqget may have dropped and re-acquired the ilock, but it guarantees
288 	 * that the dquot returned is the one that should go in the inode.
289 	 */
290 	*IO_idqpp = dqp;
291 	xfs_dqunlock(dqp);
292 	return 0;
293 }
294 
295 static bool
296 xfs_qm_need_dqattach(
297 	struct xfs_inode	*ip)
298 {
299 	struct xfs_mount	*mp = ip->i_mount;
300 
301 	if (!XFS_IS_QUOTA_RUNNING(mp))
302 		return false;
303 	if (!XFS_IS_QUOTA_ON(mp))
304 		return false;
305 	if (!XFS_NOT_DQATTACHED(mp, ip))
306 		return false;
307 	if (xfs_is_quota_inode(&mp->m_sb, ip->i_ino))
308 		return false;
309 	return true;
310 }
311 
312 /*
313  * Given a locked inode, attach dquot(s) to it, taking U/G/P-QUOTAON
314  * into account.
315  * If @doalloc is true, the dquot(s) will be allocated if needed.
316  * Inode may get unlocked and relocked in here, and the caller must deal with
317  * the consequences.
318  */
319 int
320 xfs_qm_dqattach_locked(
321 	xfs_inode_t	*ip,
322 	bool		doalloc)
323 {
324 	xfs_mount_t	*mp = ip->i_mount;
325 	int		error = 0;
326 
327 	if (!xfs_qm_need_dqattach(ip))
328 		return 0;
329 
330 	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
331 
332 	if (XFS_IS_UQUOTA_ON(mp) && !ip->i_udquot) {
333 		error = xfs_qm_dqattach_one(ip, i_uid_read(VFS_I(ip)),
334 				XFS_DQ_USER, doalloc, &ip->i_udquot);
335 		if (error)
336 			goto done;
337 		ASSERT(ip->i_udquot);
338 	}
339 
340 	if (XFS_IS_GQUOTA_ON(mp) && !ip->i_gdquot) {
341 		error = xfs_qm_dqattach_one(ip, i_gid_read(VFS_I(ip)),
342 				XFS_DQ_GROUP, doalloc, &ip->i_gdquot);
343 		if (error)
344 			goto done;
345 		ASSERT(ip->i_gdquot);
346 	}
347 
348 	if (XFS_IS_PQUOTA_ON(mp) && !ip->i_pdquot) {
349 		error = xfs_qm_dqattach_one(ip, ip->i_d.di_projid, XFS_DQ_PROJ,
350 				doalloc, &ip->i_pdquot);
351 		if (error)
352 			goto done;
353 		ASSERT(ip->i_pdquot);
354 	}
355 
356 done:
357 	/*
358 	 * Don't worry about the dquots that we may have attached before any
359 	 * error - they'll get detached later if it has not already been done.
360 	 */
361 	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
362 	return error;
363 }
364 
365 int
366 xfs_qm_dqattach(
367 	struct xfs_inode	*ip)
368 {
369 	int			error;
370 
371 	if (!xfs_qm_need_dqattach(ip))
372 		return 0;
373 
374 	xfs_ilock(ip, XFS_ILOCK_EXCL);
375 	error = xfs_qm_dqattach_locked(ip, false);
376 	xfs_iunlock(ip, XFS_ILOCK_EXCL);
377 
378 	return error;
379 }
380 
381 /*
382  * Release dquots (and their references) if any.
383  * The inode should be locked EXCL except when this's called by
384  * xfs_ireclaim.
385  */
386 void
387 xfs_qm_dqdetach(
388 	xfs_inode_t	*ip)
389 {
390 	if (!(ip->i_udquot || ip->i_gdquot || ip->i_pdquot))
391 		return;
392 
393 	trace_xfs_dquot_dqdetach(ip);
394 
395 	ASSERT(!xfs_is_quota_inode(&ip->i_mount->m_sb, ip->i_ino));
396 	if (ip->i_udquot) {
397 		xfs_qm_dqrele(ip->i_udquot);
398 		ip->i_udquot = NULL;
399 	}
400 	if (ip->i_gdquot) {
401 		xfs_qm_dqrele(ip->i_gdquot);
402 		ip->i_gdquot = NULL;
403 	}
404 	if (ip->i_pdquot) {
405 		xfs_qm_dqrele(ip->i_pdquot);
406 		ip->i_pdquot = NULL;
407 	}
408 }
409 
410 struct xfs_qm_isolate {
411 	struct list_head	buffers;
412 	struct list_head	dispose;
413 };
414 
415 static enum lru_status
416 xfs_qm_dquot_isolate(
417 	struct list_head	*item,
418 	struct list_lru_one	*lru,
419 	spinlock_t		*lru_lock,
420 	void			*arg)
421 		__releases(lru_lock) __acquires(lru_lock)
422 {
423 	struct xfs_dquot	*dqp = container_of(item,
424 						struct xfs_dquot, q_lru);
425 	struct xfs_qm_isolate	*isol = arg;
426 
427 	if (!xfs_dqlock_nowait(dqp))
428 		goto out_miss_busy;
429 
430 	/*
431 	 * This dquot has acquired a reference in the meantime remove it from
432 	 * the freelist and try again.
433 	 */
434 	if (dqp->q_nrefs) {
435 		xfs_dqunlock(dqp);
436 		XFS_STATS_INC(dqp->q_mount, xs_qm_dqwants);
437 
438 		trace_xfs_dqreclaim_want(dqp);
439 		list_lru_isolate(lru, &dqp->q_lru);
440 		XFS_STATS_DEC(dqp->q_mount, xs_qm_dquot_unused);
441 		return LRU_REMOVED;
442 	}
443 
444 	/*
445 	 * If the dquot is dirty, flush it. If it's already being flushed, just
446 	 * skip it so there is time for the IO to complete before we try to
447 	 * reclaim it again on the next LRU pass.
448 	 */
449 	if (!xfs_dqflock_nowait(dqp)) {
450 		xfs_dqunlock(dqp);
451 		goto out_miss_busy;
452 	}
453 
454 	if (XFS_DQ_IS_DIRTY(dqp)) {
455 		struct xfs_buf	*bp = NULL;
456 		int		error;
457 
458 		trace_xfs_dqreclaim_dirty(dqp);
459 
460 		/* we have to drop the LRU lock to flush the dquot */
461 		spin_unlock(lru_lock);
462 
463 		error = xfs_qm_dqflush(dqp, &bp);
464 		if (error)
465 			goto out_unlock_dirty;
466 
467 		xfs_buf_delwri_queue(bp, &isol->buffers);
468 		xfs_buf_relse(bp);
469 		goto out_unlock_dirty;
470 	}
471 	xfs_dqfunlock(dqp);
472 
473 	/*
474 	 * Prevent lookups now that we are past the point of no return.
475 	 */
476 	dqp->dq_flags |= XFS_DQ_FREEING;
477 	xfs_dqunlock(dqp);
478 
479 	ASSERT(dqp->q_nrefs == 0);
480 	list_lru_isolate_move(lru, &dqp->q_lru, &isol->dispose);
481 	XFS_STATS_DEC(dqp->q_mount, xs_qm_dquot_unused);
482 	trace_xfs_dqreclaim_done(dqp);
483 	XFS_STATS_INC(dqp->q_mount, xs_qm_dqreclaims);
484 	return LRU_REMOVED;
485 
486 out_miss_busy:
487 	trace_xfs_dqreclaim_busy(dqp);
488 	XFS_STATS_INC(dqp->q_mount, xs_qm_dqreclaim_misses);
489 	return LRU_SKIP;
490 
491 out_unlock_dirty:
492 	trace_xfs_dqreclaim_busy(dqp);
493 	XFS_STATS_INC(dqp->q_mount, xs_qm_dqreclaim_misses);
494 	xfs_dqunlock(dqp);
495 	spin_lock(lru_lock);
496 	return LRU_RETRY;
497 }
498 
499 static unsigned long
500 xfs_qm_shrink_scan(
501 	struct shrinker		*shrink,
502 	struct shrink_control	*sc)
503 {
504 	struct xfs_quotainfo	*qi = container_of(shrink,
505 					struct xfs_quotainfo, qi_shrinker);
506 	struct xfs_qm_isolate	isol;
507 	unsigned long		freed;
508 	int			error;
509 
510 	if ((sc->gfp_mask & (__GFP_FS|__GFP_DIRECT_RECLAIM)) != (__GFP_FS|__GFP_DIRECT_RECLAIM))
511 		return 0;
512 
513 	INIT_LIST_HEAD(&isol.buffers);
514 	INIT_LIST_HEAD(&isol.dispose);
515 
516 	freed = list_lru_shrink_walk(&qi->qi_lru, sc,
517 				     xfs_qm_dquot_isolate, &isol);
518 
519 	error = xfs_buf_delwri_submit(&isol.buffers);
520 	if (error)
521 		xfs_warn(NULL, "%s: dquot reclaim failed", __func__);
522 
523 	while (!list_empty(&isol.dispose)) {
524 		struct xfs_dquot	*dqp;
525 
526 		dqp = list_first_entry(&isol.dispose, struct xfs_dquot, q_lru);
527 		list_del_init(&dqp->q_lru);
528 		xfs_qm_dqfree_one(dqp);
529 	}
530 
531 	return freed;
532 }
533 
534 static unsigned long
535 xfs_qm_shrink_count(
536 	struct shrinker		*shrink,
537 	struct shrink_control	*sc)
538 {
539 	struct xfs_quotainfo	*qi = container_of(shrink,
540 					struct xfs_quotainfo, qi_shrinker);
541 
542 	return list_lru_shrink_count(&qi->qi_lru, sc);
543 }
544 
545 STATIC void
546 xfs_qm_set_defquota(
547 	struct xfs_mount	*mp,
548 	uint			type,
549 	struct xfs_quotainfo	*qinf)
550 {
551 	struct xfs_dquot	*dqp;
552 	struct xfs_def_quota	*defq;
553 	struct xfs_disk_dquot	*ddqp;
554 	int			error;
555 
556 	error = xfs_qm_dqget_uncached(mp, 0, type, &dqp);
557 	if (error)
558 		return;
559 
560 	ddqp = &dqp->q_core;
561 	defq = xfs_get_defquota(qinf, xfs_dquot_type(dqp));
562 
563 	/*
564 	 * Timers and warnings have been already set, let's just set the
565 	 * default limits for this quota type
566 	 */
567 	defq->bhardlimit = be64_to_cpu(ddqp->d_blk_hardlimit);
568 	defq->bsoftlimit = be64_to_cpu(ddqp->d_blk_softlimit);
569 	defq->ihardlimit = be64_to_cpu(ddqp->d_ino_hardlimit);
570 	defq->isoftlimit = be64_to_cpu(ddqp->d_ino_softlimit);
571 	defq->rtbhardlimit = be64_to_cpu(ddqp->d_rtb_hardlimit);
572 	defq->rtbsoftlimit = be64_to_cpu(ddqp->d_rtb_softlimit);
573 	xfs_qm_dqdestroy(dqp);
574 }
575 
576 /* Initialize quota time limits from the root dquot. */
577 static void
578 xfs_qm_init_timelimits(
579 	struct xfs_mount	*mp,
580 	uint			type)
581 {
582 	struct xfs_quotainfo	*qinf = mp->m_quotainfo;
583 	struct xfs_def_quota	*defq;
584 	struct xfs_disk_dquot	*ddqp;
585 	struct xfs_dquot	*dqp;
586 	int			error;
587 
588 	defq = xfs_get_defquota(qinf, type);
589 
590 	defq->btimelimit = XFS_QM_BTIMELIMIT;
591 	defq->itimelimit = XFS_QM_ITIMELIMIT;
592 	defq->rtbtimelimit = XFS_QM_RTBTIMELIMIT;
593 	defq->bwarnlimit = XFS_QM_BWARNLIMIT;
594 	defq->iwarnlimit = XFS_QM_IWARNLIMIT;
595 	defq->rtbwarnlimit = XFS_QM_RTBWARNLIMIT;
596 
597 	/*
598 	 * We try to get the limits from the superuser's limits fields.
599 	 * This is quite hacky, but it is standard quota practice.
600 	 *
601 	 * Since we may not have done a quotacheck by this point, just read
602 	 * the dquot without attaching it to any hashtables or lists.
603 	 */
604 	error = xfs_qm_dqget_uncached(mp, 0, type, &dqp);
605 	if (error)
606 		return;
607 
608 	ddqp = &dqp->q_core;
609 
610 	/*
611 	 * The warnings and timers set the grace period given to
612 	 * a user or group before he or she can not perform any
613 	 * more writing. If it is zero, a default is used.
614 	 */
615 	if (ddqp->d_btimer)
616 		defq->btimelimit = be32_to_cpu(ddqp->d_btimer);
617 	if (ddqp->d_itimer)
618 		defq->itimelimit = be32_to_cpu(ddqp->d_itimer);
619 	if (ddqp->d_rtbtimer)
620 		defq->rtbtimelimit = be32_to_cpu(ddqp->d_rtbtimer);
621 	if (ddqp->d_bwarns)
622 		defq->bwarnlimit = be16_to_cpu(ddqp->d_bwarns);
623 	if (ddqp->d_iwarns)
624 		defq->iwarnlimit = be16_to_cpu(ddqp->d_iwarns);
625 	if (ddqp->d_rtbwarns)
626 		defq->rtbwarnlimit = be16_to_cpu(ddqp->d_rtbwarns);
627 
628 	xfs_qm_dqdestroy(dqp);
629 }
630 
631 /*
632  * This initializes all the quota information that's kept in the
633  * mount structure
634  */
635 STATIC int
636 xfs_qm_init_quotainfo(
637 	struct xfs_mount	*mp)
638 {
639 	struct xfs_quotainfo	*qinf;
640 	int			error;
641 
642 	ASSERT(XFS_IS_QUOTA_RUNNING(mp));
643 
644 	qinf = mp->m_quotainfo = kmem_zalloc(sizeof(struct xfs_quotainfo), 0);
645 
646 	error = list_lru_init(&qinf->qi_lru);
647 	if (error)
648 		goto out_free_qinf;
649 
650 	/*
651 	 * See if quotainodes are setup, and if not, allocate them,
652 	 * and change the superblock accordingly.
653 	 */
654 	error = xfs_qm_init_quotainos(mp);
655 	if (error)
656 		goto out_free_lru;
657 
658 	INIT_RADIX_TREE(&qinf->qi_uquota_tree, GFP_NOFS);
659 	INIT_RADIX_TREE(&qinf->qi_gquota_tree, GFP_NOFS);
660 	INIT_RADIX_TREE(&qinf->qi_pquota_tree, GFP_NOFS);
661 	mutex_init(&qinf->qi_tree_lock);
662 
663 	/* mutex used to serialize quotaoffs */
664 	mutex_init(&qinf->qi_quotaofflock);
665 
666 	/* Precalc some constants */
667 	qinf->qi_dqchunklen = XFS_FSB_TO_BB(mp, XFS_DQUOT_CLUSTER_SIZE_FSB);
668 	qinf->qi_dqperchunk = xfs_calc_dquots_per_chunk(qinf->qi_dqchunklen);
669 
670 	mp->m_qflags |= (mp->m_sb.sb_qflags & XFS_ALL_QUOTA_CHKD);
671 
672 	xfs_qm_init_timelimits(mp, XFS_DQ_USER);
673 	xfs_qm_init_timelimits(mp, XFS_DQ_GROUP);
674 	xfs_qm_init_timelimits(mp, XFS_DQ_PROJ);
675 
676 	if (XFS_IS_UQUOTA_RUNNING(mp))
677 		xfs_qm_set_defquota(mp, XFS_DQ_USER, qinf);
678 	if (XFS_IS_GQUOTA_RUNNING(mp))
679 		xfs_qm_set_defquota(mp, XFS_DQ_GROUP, qinf);
680 	if (XFS_IS_PQUOTA_RUNNING(mp))
681 		xfs_qm_set_defquota(mp, XFS_DQ_PROJ, qinf);
682 
683 	qinf->qi_shrinker.count_objects = xfs_qm_shrink_count;
684 	qinf->qi_shrinker.scan_objects = xfs_qm_shrink_scan;
685 	qinf->qi_shrinker.seeks = DEFAULT_SEEKS;
686 	qinf->qi_shrinker.flags = SHRINKER_NUMA_AWARE;
687 
688 	error = register_shrinker(&qinf->qi_shrinker);
689 	if (error)
690 		goto out_free_inos;
691 
692 	return 0;
693 
694 out_free_inos:
695 	mutex_destroy(&qinf->qi_quotaofflock);
696 	mutex_destroy(&qinf->qi_tree_lock);
697 	xfs_qm_destroy_quotainos(qinf);
698 out_free_lru:
699 	list_lru_destroy(&qinf->qi_lru);
700 out_free_qinf:
701 	kmem_free(qinf);
702 	mp->m_quotainfo = NULL;
703 	return error;
704 }
705 
706 /*
707  * Gets called when unmounting a filesystem or when all quotas get
708  * turned off.
709  * This purges the quota inodes, destroys locks and frees itself.
710  */
711 void
712 xfs_qm_destroy_quotainfo(
713 	struct xfs_mount	*mp)
714 {
715 	struct xfs_quotainfo	*qi;
716 
717 	qi = mp->m_quotainfo;
718 	ASSERT(qi != NULL);
719 
720 	unregister_shrinker(&qi->qi_shrinker);
721 	list_lru_destroy(&qi->qi_lru);
722 	xfs_qm_destroy_quotainos(qi);
723 	mutex_destroy(&qi->qi_tree_lock);
724 	mutex_destroy(&qi->qi_quotaofflock);
725 	kmem_free(qi);
726 	mp->m_quotainfo = NULL;
727 }
728 
729 /*
730  * Create an inode and return with a reference already taken, but unlocked
731  * This is how we create quota inodes
732  */
733 STATIC int
734 xfs_qm_qino_alloc(
735 	xfs_mount_t	*mp,
736 	xfs_inode_t	**ip,
737 	uint		flags)
738 {
739 	xfs_trans_t	*tp;
740 	int		error;
741 	bool		need_alloc = true;
742 
743 	*ip = NULL;
744 	/*
745 	 * With superblock that doesn't have separate pquotino, we
746 	 * share an inode between gquota and pquota. If the on-disk
747 	 * superblock has GQUOTA and the filesystem is now mounted
748 	 * with PQUOTA, just use sb_gquotino for sb_pquotino and
749 	 * vice-versa.
750 	 */
751 	if (!xfs_sb_version_has_pquotino(&mp->m_sb) &&
752 			(flags & (XFS_QMOPT_PQUOTA|XFS_QMOPT_GQUOTA))) {
753 		xfs_ino_t ino = NULLFSINO;
754 
755 		if ((flags & XFS_QMOPT_PQUOTA) &&
756 			     (mp->m_sb.sb_gquotino != NULLFSINO)) {
757 			ino = mp->m_sb.sb_gquotino;
758 			if (XFS_IS_CORRUPT(mp,
759 					   mp->m_sb.sb_pquotino != NULLFSINO))
760 				return -EFSCORRUPTED;
761 		} else if ((flags & XFS_QMOPT_GQUOTA) &&
762 			     (mp->m_sb.sb_pquotino != NULLFSINO)) {
763 			ino = mp->m_sb.sb_pquotino;
764 			if (XFS_IS_CORRUPT(mp,
765 					   mp->m_sb.sb_gquotino != NULLFSINO))
766 				return -EFSCORRUPTED;
767 		}
768 		if (ino != NULLFSINO) {
769 			error = xfs_iget(mp, NULL, ino, 0, 0, ip);
770 			if (error)
771 				return error;
772 			mp->m_sb.sb_gquotino = NULLFSINO;
773 			mp->m_sb.sb_pquotino = NULLFSINO;
774 			need_alloc = false;
775 		}
776 	}
777 
778 	error = xfs_trans_alloc(mp, &M_RES(mp)->tr_create,
779 			need_alloc ? XFS_QM_QINOCREATE_SPACE_RES(mp) : 0,
780 			0, 0, &tp);
781 	if (error)
782 		return error;
783 
784 	if (need_alloc) {
785 		error = xfs_dir_ialloc(&tp, NULL, S_IFREG, 1, 0, 0, ip);
786 		if (error) {
787 			xfs_trans_cancel(tp);
788 			return error;
789 		}
790 	}
791 
792 	/*
793 	 * Make the changes in the superblock, and log those too.
794 	 * sbfields arg may contain fields other than *QUOTINO;
795 	 * VERSIONNUM for example.
796 	 */
797 	spin_lock(&mp->m_sb_lock);
798 	if (flags & XFS_QMOPT_SBVERSION) {
799 		ASSERT(!xfs_sb_version_hasquota(&mp->m_sb));
800 
801 		xfs_sb_version_addquota(&mp->m_sb);
802 		mp->m_sb.sb_uquotino = NULLFSINO;
803 		mp->m_sb.sb_gquotino = NULLFSINO;
804 		mp->m_sb.sb_pquotino = NULLFSINO;
805 
806 		/* qflags will get updated fully _after_ quotacheck */
807 		mp->m_sb.sb_qflags = mp->m_qflags & XFS_ALL_QUOTA_ACCT;
808 	}
809 	if (flags & XFS_QMOPT_UQUOTA)
810 		mp->m_sb.sb_uquotino = (*ip)->i_ino;
811 	else if (flags & XFS_QMOPT_GQUOTA)
812 		mp->m_sb.sb_gquotino = (*ip)->i_ino;
813 	else
814 		mp->m_sb.sb_pquotino = (*ip)->i_ino;
815 	spin_unlock(&mp->m_sb_lock);
816 	xfs_log_sb(tp);
817 
818 	error = xfs_trans_commit(tp);
819 	if (error) {
820 		ASSERT(XFS_FORCED_SHUTDOWN(mp));
821 		xfs_alert(mp, "%s failed (error %d)!", __func__, error);
822 	}
823 	if (need_alloc)
824 		xfs_finish_inode_setup(*ip);
825 	return error;
826 }
827 
828 
829 STATIC void
830 xfs_qm_reset_dqcounts(
831 	xfs_mount_t	*mp,
832 	xfs_buf_t	*bp,
833 	xfs_dqid_t	id,
834 	uint		type)
835 {
836 	struct xfs_dqblk	*dqb;
837 	int			j;
838 	xfs_failaddr_t		fa;
839 
840 	trace_xfs_reset_dqcounts(bp, _RET_IP_);
841 
842 	/*
843 	 * Reset all counters and timers. They'll be
844 	 * started afresh by xfs_qm_quotacheck.
845 	 */
846 #ifdef DEBUG
847 	j = (int)XFS_FSB_TO_B(mp, XFS_DQUOT_CLUSTER_SIZE_FSB) /
848 		sizeof(xfs_dqblk_t);
849 	ASSERT(mp->m_quotainfo->qi_dqperchunk == j);
850 #endif
851 	dqb = bp->b_addr;
852 	for (j = 0; j < mp->m_quotainfo->qi_dqperchunk; j++) {
853 		struct xfs_disk_dquot	*ddq;
854 
855 		ddq = (struct xfs_disk_dquot *)&dqb[j];
856 
857 		/*
858 		 * Do a sanity check, and if needed, repair the dqblk. Don't
859 		 * output any warnings because it's perfectly possible to
860 		 * find uninitialised dquot blks. See comment in
861 		 * xfs_dquot_verify.
862 		 */
863 		fa = xfs_dqblk_verify(mp, &dqb[j], id + j, type);
864 		if (fa)
865 			xfs_dqblk_repair(mp, &dqb[j], id + j, type);
866 
867 		/*
868 		 * Reset type in case we are reusing group quota file for
869 		 * project quotas or vice versa
870 		 */
871 		ddq->d_flags = type;
872 		ddq->d_bcount = 0;
873 		ddq->d_icount = 0;
874 		ddq->d_rtbcount = 0;
875 
876 		/*
877 		 * dquot id 0 stores the default grace period and the maximum
878 		 * warning limit that were set by the administrator, so we
879 		 * should not reset them.
880 		 */
881 		if (ddq->d_id != 0) {
882 			ddq->d_btimer = 0;
883 			ddq->d_itimer = 0;
884 			ddq->d_rtbtimer = 0;
885 			ddq->d_bwarns = 0;
886 			ddq->d_iwarns = 0;
887 			ddq->d_rtbwarns = 0;
888 		}
889 
890 		if (xfs_sb_version_hascrc(&mp->m_sb)) {
891 			xfs_update_cksum((char *)&dqb[j],
892 					 sizeof(struct xfs_dqblk),
893 					 XFS_DQUOT_CRC_OFF);
894 		}
895 	}
896 }
897 
898 STATIC int
899 xfs_qm_reset_dqcounts_all(
900 	struct xfs_mount	*mp,
901 	xfs_dqid_t		firstid,
902 	xfs_fsblock_t		bno,
903 	xfs_filblks_t		blkcnt,
904 	uint			flags,
905 	struct list_head	*buffer_list)
906 {
907 	struct xfs_buf		*bp;
908 	int			error;
909 	int			type;
910 
911 	ASSERT(blkcnt > 0);
912 	type = flags & XFS_QMOPT_UQUOTA ? XFS_DQ_USER :
913 		(flags & XFS_QMOPT_PQUOTA ? XFS_DQ_PROJ : XFS_DQ_GROUP);
914 	error = 0;
915 
916 	/*
917 	 * Blkcnt arg can be a very big number, and might even be
918 	 * larger than the log itself. So, we have to break it up into
919 	 * manageable-sized transactions.
920 	 * Note that we don't start a permanent transaction here; we might
921 	 * not be able to get a log reservation for the whole thing up front,
922 	 * and we don't really care to either, because we just discard
923 	 * everything if we were to crash in the middle of this loop.
924 	 */
925 	while (blkcnt--) {
926 		error = xfs_trans_read_buf(mp, NULL, mp->m_ddev_targp,
927 			      XFS_FSB_TO_DADDR(mp, bno),
928 			      mp->m_quotainfo->qi_dqchunklen, 0, &bp,
929 			      &xfs_dquot_buf_ops);
930 
931 		/*
932 		 * CRC and validation errors will return a EFSCORRUPTED here. If
933 		 * this occurs, re-read without CRC validation so that we can
934 		 * repair the damage via xfs_qm_reset_dqcounts(). This process
935 		 * will leave a trace in the log indicating corruption has
936 		 * been detected.
937 		 */
938 		if (error == -EFSCORRUPTED) {
939 			error = xfs_trans_read_buf(mp, NULL, mp->m_ddev_targp,
940 				      XFS_FSB_TO_DADDR(mp, bno),
941 				      mp->m_quotainfo->qi_dqchunklen, 0, &bp,
942 				      NULL);
943 		}
944 
945 		if (error)
946 			break;
947 
948 		/*
949 		 * A corrupt buffer might not have a verifier attached, so
950 		 * make sure we have the correct one attached before writeback
951 		 * occurs.
952 		 */
953 		bp->b_ops = &xfs_dquot_buf_ops;
954 		xfs_qm_reset_dqcounts(mp, bp, firstid, type);
955 		xfs_buf_delwri_queue(bp, buffer_list);
956 		xfs_buf_relse(bp);
957 
958 		/* goto the next block. */
959 		bno++;
960 		firstid += mp->m_quotainfo->qi_dqperchunk;
961 	}
962 
963 	return error;
964 }
965 
966 /*
967  * Iterate over all allocated dquot blocks in this quota inode, zeroing all
968  * counters for every chunk of dquots that we find.
969  */
970 STATIC int
971 xfs_qm_reset_dqcounts_buf(
972 	struct xfs_mount	*mp,
973 	struct xfs_inode	*qip,
974 	uint			flags,
975 	struct list_head	*buffer_list)
976 {
977 	struct xfs_bmbt_irec	*map;
978 	int			i, nmaps;	/* number of map entries */
979 	int			error;		/* return value */
980 	xfs_fileoff_t		lblkno;
981 	xfs_filblks_t		maxlblkcnt;
982 	xfs_dqid_t		firstid;
983 	xfs_fsblock_t		rablkno;
984 	xfs_filblks_t		rablkcnt;
985 
986 	error = 0;
987 	/*
988 	 * This looks racy, but we can't keep an inode lock across a
989 	 * trans_reserve. But, this gets called during quotacheck, and that
990 	 * happens only at mount time which is single threaded.
991 	 */
992 	if (qip->i_d.di_nblocks == 0)
993 		return 0;
994 
995 	map = kmem_alloc(XFS_DQITER_MAP_SIZE * sizeof(*map), 0);
996 
997 	lblkno = 0;
998 	maxlblkcnt = XFS_B_TO_FSB(mp, mp->m_super->s_maxbytes);
999 	do {
1000 		uint		lock_mode;
1001 
1002 		nmaps = XFS_DQITER_MAP_SIZE;
1003 		/*
1004 		 * We aren't changing the inode itself. Just changing
1005 		 * some of its data. No new blocks are added here, and
1006 		 * the inode is never added to the transaction.
1007 		 */
1008 		lock_mode = xfs_ilock_data_map_shared(qip);
1009 		error = xfs_bmapi_read(qip, lblkno, maxlblkcnt - lblkno,
1010 				       map, &nmaps, 0);
1011 		xfs_iunlock(qip, lock_mode);
1012 		if (error)
1013 			break;
1014 
1015 		ASSERT(nmaps <= XFS_DQITER_MAP_SIZE);
1016 		for (i = 0; i < nmaps; i++) {
1017 			ASSERT(map[i].br_startblock != DELAYSTARTBLOCK);
1018 			ASSERT(map[i].br_blockcount);
1019 
1020 
1021 			lblkno += map[i].br_blockcount;
1022 
1023 			if (map[i].br_startblock == HOLESTARTBLOCK)
1024 				continue;
1025 
1026 			firstid = (xfs_dqid_t) map[i].br_startoff *
1027 				mp->m_quotainfo->qi_dqperchunk;
1028 			/*
1029 			 * Do a read-ahead on the next extent.
1030 			 */
1031 			if ((i+1 < nmaps) &&
1032 			    (map[i+1].br_startblock != HOLESTARTBLOCK)) {
1033 				rablkcnt =  map[i+1].br_blockcount;
1034 				rablkno = map[i+1].br_startblock;
1035 				while (rablkcnt--) {
1036 					xfs_buf_readahead(mp->m_ddev_targp,
1037 					       XFS_FSB_TO_DADDR(mp, rablkno),
1038 					       mp->m_quotainfo->qi_dqchunklen,
1039 					       &xfs_dquot_buf_ops);
1040 					rablkno++;
1041 				}
1042 			}
1043 			/*
1044 			 * Iterate thru all the blks in the extent and
1045 			 * reset the counters of all the dquots inside them.
1046 			 */
1047 			error = xfs_qm_reset_dqcounts_all(mp, firstid,
1048 						   map[i].br_startblock,
1049 						   map[i].br_blockcount,
1050 						   flags, buffer_list);
1051 			if (error)
1052 				goto out;
1053 		}
1054 	} while (nmaps > 0);
1055 
1056 out:
1057 	kmem_free(map);
1058 	return error;
1059 }
1060 
1061 /*
1062  * Called by dqusage_adjust in doing a quotacheck.
1063  *
1064  * Given the inode, and a dquot id this updates both the incore dqout as well
1065  * as the buffer copy. This is so that once the quotacheck is done, we can
1066  * just log all the buffers, as opposed to logging numerous updates to
1067  * individual dquots.
1068  */
1069 STATIC int
1070 xfs_qm_quotacheck_dqadjust(
1071 	struct xfs_inode	*ip,
1072 	uint			type,
1073 	xfs_qcnt_t		nblks,
1074 	xfs_qcnt_t		rtblks)
1075 {
1076 	struct xfs_mount	*mp = ip->i_mount;
1077 	struct xfs_dquot	*dqp;
1078 	xfs_dqid_t		id;
1079 	int			error;
1080 
1081 	id = xfs_qm_id_for_quotatype(ip, type);
1082 	error = xfs_qm_dqget(mp, id, type, true, &dqp);
1083 	if (error) {
1084 		/*
1085 		 * Shouldn't be able to turn off quotas here.
1086 		 */
1087 		ASSERT(error != -ESRCH);
1088 		ASSERT(error != -ENOENT);
1089 		return error;
1090 	}
1091 
1092 	trace_xfs_dqadjust(dqp);
1093 
1094 	/*
1095 	 * Adjust the inode count and the block count to reflect this inode's
1096 	 * resource usage.
1097 	 */
1098 	be64_add_cpu(&dqp->q_core.d_icount, 1);
1099 	dqp->q_res_icount++;
1100 	if (nblks) {
1101 		be64_add_cpu(&dqp->q_core.d_bcount, nblks);
1102 		dqp->q_res_bcount += nblks;
1103 	}
1104 	if (rtblks) {
1105 		be64_add_cpu(&dqp->q_core.d_rtbcount, rtblks);
1106 		dqp->q_res_rtbcount += rtblks;
1107 	}
1108 
1109 	/*
1110 	 * Set default limits, adjust timers (since we changed usages)
1111 	 *
1112 	 * There are no timers for the default values set in the root dquot.
1113 	 */
1114 	if (dqp->q_core.d_id) {
1115 		xfs_qm_adjust_dqlimits(mp, dqp);
1116 		xfs_qm_adjust_dqtimers(mp, dqp);
1117 	}
1118 
1119 	dqp->dq_flags |= XFS_DQ_DIRTY;
1120 	xfs_qm_dqput(dqp);
1121 	return 0;
1122 }
1123 
1124 /*
1125  * callback routine supplied to bulkstat(). Given an inumber, find its
1126  * dquots and update them to account for resources taken by that inode.
1127  */
1128 /* ARGSUSED */
1129 STATIC int
1130 xfs_qm_dqusage_adjust(
1131 	struct xfs_mount	*mp,
1132 	struct xfs_trans	*tp,
1133 	xfs_ino_t		ino,
1134 	void			*data)
1135 {
1136 	struct xfs_inode	*ip;
1137 	xfs_qcnt_t		nblks;
1138 	xfs_filblks_t		rtblks = 0;	/* total rt blks */
1139 	int			error;
1140 
1141 	ASSERT(XFS_IS_QUOTA_RUNNING(mp));
1142 
1143 	/*
1144 	 * rootino must have its resources accounted for, not so with the quota
1145 	 * inodes.
1146 	 */
1147 	if (xfs_is_quota_inode(&mp->m_sb, ino))
1148 		return 0;
1149 
1150 	/*
1151 	 * We don't _need_ to take the ilock EXCL here because quotacheck runs
1152 	 * at mount time and therefore nobody will be racing chown/chproj.
1153 	 */
1154 	error = xfs_iget(mp, tp, ino, XFS_IGET_DONTCACHE, 0, &ip);
1155 	if (error == -EINVAL || error == -ENOENT)
1156 		return 0;
1157 	if (error)
1158 		return error;
1159 
1160 	ASSERT(ip->i_delayed_blks == 0);
1161 
1162 	if (XFS_IS_REALTIME_INODE(ip)) {
1163 		struct xfs_ifork	*ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK);
1164 
1165 		if (!(ifp->if_flags & XFS_IFEXTENTS)) {
1166 			error = xfs_iread_extents(tp, ip, XFS_DATA_FORK);
1167 			if (error)
1168 				goto error0;
1169 		}
1170 
1171 		xfs_bmap_count_leaves(ifp, &rtblks);
1172 	}
1173 
1174 	nblks = (xfs_qcnt_t)ip->i_d.di_nblocks - rtblks;
1175 
1176 	/*
1177 	 * Add the (disk blocks and inode) resources occupied by this
1178 	 * inode to its dquots. We do this adjustment in the incore dquot,
1179 	 * and also copy the changes to its buffer.
1180 	 * We don't care about putting these changes in a transaction
1181 	 * envelope because if we crash in the middle of a 'quotacheck'
1182 	 * we have to start from the beginning anyway.
1183 	 * Once we're done, we'll log all the dquot bufs.
1184 	 *
1185 	 * The *QUOTA_ON checks below may look pretty racy, but quotachecks
1186 	 * and quotaoffs don't race. (Quotachecks happen at mount time only).
1187 	 */
1188 	if (XFS_IS_UQUOTA_ON(mp)) {
1189 		error = xfs_qm_quotacheck_dqadjust(ip, XFS_DQ_USER, nblks,
1190 				rtblks);
1191 		if (error)
1192 			goto error0;
1193 	}
1194 
1195 	if (XFS_IS_GQUOTA_ON(mp)) {
1196 		error = xfs_qm_quotacheck_dqadjust(ip, XFS_DQ_GROUP, nblks,
1197 				rtblks);
1198 		if (error)
1199 			goto error0;
1200 	}
1201 
1202 	if (XFS_IS_PQUOTA_ON(mp)) {
1203 		error = xfs_qm_quotacheck_dqadjust(ip, XFS_DQ_PROJ, nblks,
1204 				rtblks);
1205 		if (error)
1206 			goto error0;
1207 	}
1208 
1209 error0:
1210 	xfs_irele(ip);
1211 	return error;
1212 }
1213 
1214 STATIC int
1215 xfs_qm_flush_one(
1216 	struct xfs_dquot	*dqp,
1217 	void			*data)
1218 {
1219 	struct xfs_mount	*mp = dqp->q_mount;
1220 	struct list_head	*buffer_list = data;
1221 	struct xfs_buf		*bp = NULL;
1222 	int			error = 0;
1223 
1224 	xfs_dqlock(dqp);
1225 	if (dqp->dq_flags & XFS_DQ_FREEING)
1226 		goto out_unlock;
1227 	if (!XFS_DQ_IS_DIRTY(dqp))
1228 		goto out_unlock;
1229 
1230 	/*
1231 	 * The only way the dquot is already flush locked by the time quotacheck
1232 	 * gets here is if reclaim flushed it before the dqadjust walk dirtied
1233 	 * it for the final time. Quotacheck collects all dquot bufs in the
1234 	 * local delwri queue before dquots are dirtied, so reclaim can't have
1235 	 * possibly queued it for I/O. The only way out is to push the buffer to
1236 	 * cycle the flush lock.
1237 	 */
1238 	if (!xfs_dqflock_nowait(dqp)) {
1239 		/* buf is pinned in-core by delwri list */
1240 		bp = xfs_buf_incore(mp->m_ddev_targp, dqp->q_blkno,
1241 				mp->m_quotainfo->qi_dqchunklen, 0);
1242 		if (!bp) {
1243 			error = -EINVAL;
1244 			goto out_unlock;
1245 		}
1246 		xfs_buf_unlock(bp);
1247 
1248 		xfs_buf_delwri_pushbuf(bp, buffer_list);
1249 		xfs_buf_rele(bp);
1250 
1251 		error = -EAGAIN;
1252 		goto out_unlock;
1253 	}
1254 
1255 	error = xfs_qm_dqflush(dqp, &bp);
1256 	if (error)
1257 		goto out_unlock;
1258 
1259 	xfs_buf_delwri_queue(bp, buffer_list);
1260 	xfs_buf_relse(bp);
1261 out_unlock:
1262 	xfs_dqunlock(dqp);
1263 	return error;
1264 }
1265 
1266 /*
1267  * Walk thru all the filesystem inodes and construct a consistent view
1268  * of the disk quota world. If the quotacheck fails, disable quotas.
1269  */
1270 STATIC int
1271 xfs_qm_quotacheck(
1272 	xfs_mount_t	*mp)
1273 {
1274 	int			error, error2;
1275 	uint			flags;
1276 	LIST_HEAD		(buffer_list);
1277 	struct xfs_inode	*uip = mp->m_quotainfo->qi_uquotaip;
1278 	struct xfs_inode	*gip = mp->m_quotainfo->qi_gquotaip;
1279 	struct xfs_inode	*pip = mp->m_quotainfo->qi_pquotaip;
1280 
1281 	flags = 0;
1282 
1283 	ASSERT(uip || gip || pip);
1284 	ASSERT(XFS_IS_QUOTA_RUNNING(mp));
1285 
1286 	xfs_notice(mp, "Quotacheck needed: Please wait.");
1287 
1288 	/*
1289 	 * First we go thru all the dquots on disk, USR and GRP/PRJ, and reset
1290 	 * their counters to zero. We need a clean slate.
1291 	 * We don't log our changes till later.
1292 	 */
1293 	if (uip) {
1294 		error = xfs_qm_reset_dqcounts_buf(mp, uip, XFS_QMOPT_UQUOTA,
1295 					 &buffer_list);
1296 		if (error)
1297 			goto error_return;
1298 		flags |= XFS_UQUOTA_CHKD;
1299 	}
1300 
1301 	if (gip) {
1302 		error = xfs_qm_reset_dqcounts_buf(mp, gip, XFS_QMOPT_GQUOTA,
1303 					 &buffer_list);
1304 		if (error)
1305 			goto error_return;
1306 		flags |= XFS_GQUOTA_CHKD;
1307 	}
1308 
1309 	if (pip) {
1310 		error = xfs_qm_reset_dqcounts_buf(mp, pip, XFS_QMOPT_PQUOTA,
1311 					 &buffer_list);
1312 		if (error)
1313 			goto error_return;
1314 		flags |= XFS_PQUOTA_CHKD;
1315 	}
1316 
1317 	error = xfs_iwalk_threaded(mp, 0, 0, xfs_qm_dqusage_adjust, 0, true,
1318 			NULL);
1319 	if (error)
1320 		goto error_return;
1321 
1322 	/*
1323 	 * We've made all the changes that we need to make incore.  Flush them
1324 	 * down to disk buffers if everything was updated successfully.
1325 	 */
1326 	if (XFS_IS_UQUOTA_ON(mp)) {
1327 		error = xfs_qm_dquot_walk(mp, XFS_DQ_USER, xfs_qm_flush_one,
1328 					  &buffer_list);
1329 	}
1330 	if (XFS_IS_GQUOTA_ON(mp)) {
1331 		error2 = xfs_qm_dquot_walk(mp, XFS_DQ_GROUP, xfs_qm_flush_one,
1332 					   &buffer_list);
1333 		if (!error)
1334 			error = error2;
1335 	}
1336 	if (XFS_IS_PQUOTA_ON(mp)) {
1337 		error2 = xfs_qm_dquot_walk(mp, XFS_DQ_PROJ, xfs_qm_flush_one,
1338 					   &buffer_list);
1339 		if (!error)
1340 			error = error2;
1341 	}
1342 
1343 	error2 = xfs_buf_delwri_submit(&buffer_list);
1344 	if (!error)
1345 		error = error2;
1346 
1347 	/*
1348 	 * We can get this error if we couldn't do a dquot allocation inside
1349 	 * xfs_qm_dqusage_adjust (via bulkstat). We don't care about the
1350 	 * dirty dquots that might be cached, we just want to get rid of them
1351 	 * and turn quotaoff. The dquots won't be attached to any of the inodes
1352 	 * at this point (because we intentionally didn't in dqget_noattach).
1353 	 */
1354 	if (error) {
1355 		xfs_qm_dqpurge_all(mp, XFS_QMOPT_QUOTALL);
1356 		goto error_return;
1357 	}
1358 
1359 	/*
1360 	 * If one type of quotas is off, then it will lose its
1361 	 * quotachecked status, since we won't be doing accounting for
1362 	 * that type anymore.
1363 	 */
1364 	mp->m_qflags &= ~XFS_ALL_QUOTA_CHKD;
1365 	mp->m_qflags |= flags;
1366 
1367  error_return:
1368 	xfs_buf_delwri_cancel(&buffer_list);
1369 
1370 	if (error) {
1371 		xfs_warn(mp,
1372 	"Quotacheck: Unsuccessful (Error %d): Disabling quotas.",
1373 			error);
1374 		/*
1375 		 * We must turn off quotas.
1376 		 */
1377 		ASSERT(mp->m_quotainfo != NULL);
1378 		xfs_qm_destroy_quotainfo(mp);
1379 		if (xfs_mount_reset_sbqflags(mp)) {
1380 			xfs_warn(mp,
1381 				"Quotacheck: Failed to reset quota flags.");
1382 		}
1383 	} else
1384 		xfs_notice(mp, "Quotacheck: Done.");
1385 	return error;
1386 }
1387 
1388 /*
1389  * This is called from xfs_mountfs to start quotas and initialize all
1390  * necessary data structures like quotainfo.  This is also responsible for
1391  * running a quotacheck as necessary.  We are guaranteed that the superblock
1392  * is consistently read in at this point.
1393  *
1394  * If we fail here, the mount will continue with quota turned off. We don't
1395  * need to inidicate success or failure at all.
1396  */
1397 void
1398 xfs_qm_mount_quotas(
1399 	struct xfs_mount	*mp)
1400 {
1401 	int			error = 0;
1402 	uint			sbf;
1403 
1404 	/*
1405 	 * If quotas on realtime volumes is not supported, we disable
1406 	 * quotas immediately.
1407 	 */
1408 	if (mp->m_sb.sb_rextents) {
1409 		xfs_notice(mp, "Cannot turn on quotas for realtime filesystem");
1410 		mp->m_qflags = 0;
1411 		goto write_changes;
1412 	}
1413 
1414 	ASSERT(XFS_IS_QUOTA_RUNNING(mp));
1415 
1416 	/*
1417 	 * Allocate the quotainfo structure inside the mount struct, and
1418 	 * create quotainode(s), and change/rev superblock if necessary.
1419 	 */
1420 	error = xfs_qm_init_quotainfo(mp);
1421 	if (error) {
1422 		/*
1423 		 * We must turn off quotas.
1424 		 */
1425 		ASSERT(mp->m_quotainfo == NULL);
1426 		mp->m_qflags = 0;
1427 		goto write_changes;
1428 	}
1429 	/*
1430 	 * If any of the quotas are not consistent, do a quotacheck.
1431 	 */
1432 	if (XFS_QM_NEED_QUOTACHECK(mp)) {
1433 		error = xfs_qm_quotacheck(mp);
1434 		if (error) {
1435 			/* Quotacheck failed and disabled quotas. */
1436 			return;
1437 		}
1438 	}
1439 	/*
1440 	 * If one type of quotas is off, then it will lose its
1441 	 * quotachecked status, since we won't be doing accounting for
1442 	 * that type anymore.
1443 	 */
1444 	if (!XFS_IS_UQUOTA_ON(mp))
1445 		mp->m_qflags &= ~XFS_UQUOTA_CHKD;
1446 	if (!XFS_IS_GQUOTA_ON(mp))
1447 		mp->m_qflags &= ~XFS_GQUOTA_CHKD;
1448 	if (!XFS_IS_PQUOTA_ON(mp))
1449 		mp->m_qflags &= ~XFS_PQUOTA_CHKD;
1450 
1451  write_changes:
1452 	/*
1453 	 * We actually don't have to acquire the m_sb_lock at all.
1454 	 * This can only be called from mount, and that's single threaded. XXX
1455 	 */
1456 	spin_lock(&mp->m_sb_lock);
1457 	sbf = mp->m_sb.sb_qflags;
1458 	mp->m_sb.sb_qflags = mp->m_qflags & XFS_MOUNT_QUOTA_ALL;
1459 	spin_unlock(&mp->m_sb_lock);
1460 
1461 	if (sbf != (mp->m_qflags & XFS_MOUNT_QUOTA_ALL)) {
1462 		if (xfs_sync_sb(mp, false)) {
1463 			/*
1464 			 * We could only have been turning quotas off.
1465 			 * We aren't in very good shape actually because
1466 			 * the incore structures are convinced that quotas are
1467 			 * off, but the on disk superblock doesn't know that !
1468 			 */
1469 			ASSERT(!(XFS_IS_QUOTA_RUNNING(mp)));
1470 			xfs_alert(mp, "%s: Superblock update failed!",
1471 				__func__);
1472 		}
1473 	}
1474 
1475 	if (error) {
1476 		xfs_warn(mp, "Failed to initialize disk quotas.");
1477 		return;
1478 	}
1479 }
1480 
1481 /*
1482  * This is called after the superblock has been read in and we're ready to
1483  * iget the quota inodes.
1484  */
1485 STATIC int
1486 xfs_qm_init_quotainos(
1487 	xfs_mount_t	*mp)
1488 {
1489 	struct xfs_inode	*uip = NULL;
1490 	struct xfs_inode	*gip = NULL;
1491 	struct xfs_inode	*pip = NULL;
1492 	int			error;
1493 	uint			flags = 0;
1494 
1495 	ASSERT(mp->m_quotainfo);
1496 
1497 	/*
1498 	 * Get the uquota and gquota inodes
1499 	 */
1500 	if (xfs_sb_version_hasquota(&mp->m_sb)) {
1501 		if (XFS_IS_UQUOTA_ON(mp) &&
1502 		    mp->m_sb.sb_uquotino != NULLFSINO) {
1503 			ASSERT(mp->m_sb.sb_uquotino > 0);
1504 			error = xfs_iget(mp, NULL, mp->m_sb.sb_uquotino,
1505 					     0, 0, &uip);
1506 			if (error)
1507 				return error;
1508 		}
1509 		if (XFS_IS_GQUOTA_ON(mp) &&
1510 		    mp->m_sb.sb_gquotino != NULLFSINO) {
1511 			ASSERT(mp->m_sb.sb_gquotino > 0);
1512 			error = xfs_iget(mp, NULL, mp->m_sb.sb_gquotino,
1513 					     0, 0, &gip);
1514 			if (error)
1515 				goto error_rele;
1516 		}
1517 		if (XFS_IS_PQUOTA_ON(mp) &&
1518 		    mp->m_sb.sb_pquotino != NULLFSINO) {
1519 			ASSERT(mp->m_sb.sb_pquotino > 0);
1520 			error = xfs_iget(mp, NULL, mp->m_sb.sb_pquotino,
1521 					     0, 0, &pip);
1522 			if (error)
1523 				goto error_rele;
1524 		}
1525 	} else {
1526 		flags |= XFS_QMOPT_SBVERSION;
1527 	}
1528 
1529 	/*
1530 	 * Create the three inodes, if they don't exist already. The changes
1531 	 * made above will get added to a transaction and logged in one of
1532 	 * the qino_alloc calls below.  If the device is readonly,
1533 	 * temporarily switch to read-write to do this.
1534 	 */
1535 	if (XFS_IS_UQUOTA_ON(mp) && uip == NULL) {
1536 		error = xfs_qm_qino_alloc(mp, &uip,
1537 					      flags | XFS_QMOPT_UQUOTA);
1538 		if (error)
1539 			goto error_rele;
1540 
1541 		flags &= ~XFS_QMOPT_SBVERSION;
1542 	}
1543 	if (XFS_IS_GQUOTA_ON(mp) && gip == NULL) {
1544 		error = xfs_qm_qino_alloc(mp, &gip,
1545 					  flags | XFS_QMOPT_GQUOTA);
1546 		if (error)
1547 			goto error_rele;
1548 
1549 		flags &= ~XFS_QMOPT_SBVERSION;
1550 	}
1551 	if (XFS_IS_PQUOTA_ON(mp) && pip == NULL) {
1552 		error = xfs_qm_qino_alloc(mp, &pip,
1553 					  flags | XFS_QMOPT_PQUOTA);
1554 		if (error)
1555 			goto error_rele;
1556 	}
1557 
1558 	mp->m_quotainfo->qi_uquotaip = uip;
1559 	mp->m_quotainfo->qi_gquotaip = gip;
1560 	mp->m_quotainfo->qi_pquotaip = pip;
1561 
1562 	return 0;
1563 
1564 error_rele:
1565 	if (uip)
1566 		xfs_irele(uip);
1567 	if (gip)
1568 		xfs_irele(gip);
1569 	if (pip)
1570 		xfs_irele(pip);
1571 	return error;
1572 }
1573 
1574 STATIC void
1575 xfs_qm_destroy_quotainos(
1576 	struct xfs_quotainfo	*qi)
1577 {
1578 	if (qi->qi_uquotaip) {
1579 		xfs_irele(qi->qi_uquotaip);
1580 		qi->qi_uquotaip = NULL; /* paranoia */
1581 	}
1582 	if (qi->qi_gquotaip) {
1583 		xfs_irele(qi->qi_gquotaip);
1584 		qi->qi_gquotaip = NULL;
1585 	}
1586 	if (qi->qi_pquotaip) {
1587 		xfs_irele(qi->qi_pquotaip);
1588 		qi->qi_pquotaip = NULL;
1589 	}
1590 }
1591 
1592 STATIC void
1593 xfs_qm_dqfree_one(
1594 	struct xfs_dquot	*dqp)
1595 {
1596 	struct xfs_mount	*mp = dqp->q_mount;
1597 	struct xfs_quotainfo	*qi = mp->m_quotainfo;
1598 
1599 	mutex_lock(&qi->qi_tree_lock);
1600 	radix_tree_delete(xfs_dquot_tree(qi, dqp->q_core.d_flags),
1601 			  be32_to_cpu(dqp->q_core.d_id));
1602 
1603 	qi->qi_dquots--;
1604 	mutex_unlock(&qi->qi_tree_lock);
1605 
1606 	xfs_qm_dqdestroy(dqp);
1607 }
1608 
1609 /* --------------- utility functions for vnodeops ---------------- */
1610 
1611 
1612 /*
1613  * Given an inode, a uid, gid and prid make sure that we have
1614  * allocated relevant dquot(s) on disk, and that we won't exceed inode
1615  * quotas by creating this file.
1616  * This also attaches dquot(s) to the given inode after locking it,
1617  * and returns the dquots corresponding to the uid and/or gid.
1618  *
1619  * in	: inode (unlocked)
1620  * out	: udquot, gdquot with references taken and unlocked
1621  */
1622 int
1623 xfs_qm_vop_dqalloc(
1624 	struct xfs_inode	*ip,
1625 	kuid_t			uid,
1626 	kgid_t			gid,
1627 	prid_t			prid,
1628 	uint			flags,
1629 	struct xfs_dquot	**O_udqpp,
1630 	struct xfs_dquot	**O_gdqpp,
1631 	struct xfs_dquot	**O_pdqpp)
1632 {
1633 	struct xfs_mount	*mp = ip->i_mount;
1634 	struct inode		*inode = VFS_I(ip);
1635 	struct user_namespace	*user_ns = inode->i_sb->s_user_ns;
1636 	struct xfs_dquot	*uq = NULL;
1637 	struct xfs_dquot	*gq = NULL;
1638 	struct xfs_dquot	*pq = NULL;
1639 	int			error;
1640 	uint			lockflags;
1641 
1642 	if (!XFS_IS_QUOTA_RUNNING(mp) || !XFS_IS_QUOTA_ON(mp))
1643 		return 0;
1644 
1645 	lockflags = XFS_ILOCK_EXCL;
1646 	xfs_ilock(ip, lockflags);
1647 
1648 	if ((flags & XFS_QMOPT_INHERIT) && XFS_INHERIT_GID(ip))
1649 		gid = inode->i_gid;
1650 
1651 	/*
1652 	 * Attach the dquot(s) to this inode, doing a dquot allocation
1653 	 * if necessary. The dquot(s) will not be locked.
1654 	 */
1655 	if (XFS_NOT_DQATTACHED(mp, ip)) {
1656 		error = xfs_qm_dqattach_locked(ip, true);
1657 		if (error) {
1658 			xfs_iunlock(ip, lockflags);
1659 			return error;
1660 		}
1661 	}
1662 
1663 	if ((flags & XFS_QMOPT_UQUOTA) && XFS_IS_UQUOTA_ON(mp)) {
1664 		if (!uid_eq(inode->i_uid, uid)) {
1665 			/*
1666 			 * What we need is the dquot that has this uid, and
1667 			 * if we send the inode to dqget, the uid of the inode
1668 			 * takes priority over what's sent in the uid argument.
1669 			 * We must unlock inode here before calling dqget if
1670 			 * we're not sending the inode, because otherwise
1671 			 * we'll deadlock by doing trans_reserve while
1672 			 * holding ilock.
1673 			 */
1674 			xfs_iunlock(ip, lockflags);
1675 			error = xfs_qm_dqget(mp, from_kuid(user_ns, uid),
1676 					XFS_DQ_USER, true, &uq);
1677 			if (error) {
1678 				ASSERT(error != -ENOENT);
1679 				return error;
1680 			}
1681 			/*
1682 			 * Get the ilock in the right order.
1683 			 */
1684 			xfs_dqunlock(uq);
1685 			lockflags = XFS_ILOCK_SHARED;
1686 			xfs_ilock(ip, lockflags);
1687 		} else {
1688 			/*
1689 			 * Take an extra reference, because we'll return
1690 			 * this to caller
1691 			 */
1692 			ASSERT(ip->i_udquot);
1693 			uq = xfs_qm_dqhold(ip->i_udquot);
1694 		}
1695 	}
1696 	if ((flags & XFS_QMOPT_GQUOTA) && XFS_IS_GQUOTA_ON(mp)) {
1697 		if (!gid_eq(inode->i_gid, gid)) {
1698 			xfs_iunlock(ip, lockflags);
1699 			error = xfs_qm_dqget(mp, from_kgid(user_ns, gid),
1700 					XFS_DQ_GROUP, true, &gq);
1701 			if (error) {
1702 				ASSERT(error != -ENOENT);
1703 				goto error_rele;
1704 			}
1705 			xfs_dqunlock(gq);
1706 			lockflags = XFS_ILOCK_SHARED;
1707 			xfs_ilock(ip, lockflags);
1708 		} else {
1709 			ASSERT(ip->i_gdquot);
1710 			gq = xfs_qm_dqhold(ip->i_gdquot);
1711 		}
1712 	}
1713 	if ((flags & XFS_QMOPT_PQUOTA) && XFS_IS_PQUOTA_ON(mp)) {
1714 		if (ip->i_d.di_projid != prid) {
1715 			xfs_iunlock(ip, lockflags);
1716 			error = xfs_qm_dqget(mp, (xfs_dqid_t)prid, XFS_DQ_PROJ,
1717 					true, &pq);
1718 			if (error) {
1719 				ASSERT(error != -ENOENT);
1720 				goto error_rele;
1721 			}
1722 			xfs_dqunlock(pq);
1723 			lockflags = XFS_ILOCK_SHARED;
1724 			xfs_ilock(ip, lockflags);
1725 		} else {
1726 			ASSERT(ip->i_pdquot);
1727 			pq = xfs_qm_dqhold(ip->i_pdquot);
1728 		}
1729 	}
1730 	trace_xfs_dquot_dqalloc(ip);
1731 
1732 	xfs_iunlock(ip, lockflags);
1733 	if (O_udqpp)
1734 		*O_udqpp = uq;
1735 	else
1736 		xfs_qm_dqrele(uq);
1737 	if (O_gdqpp)
1738 		*O_gdqpp = gq;
1739 	else
1740 		xfs_qm_dqrele(gq);
1741 	if (O_pdqpp)
1742 		*O_pdqpp = pq;
1743 	else
1744 		xfs_qm_dqrele(pq);
1745 	return 0;
1746 
1747 error_rele:
1748 	xfs_qm_dqrele(gq);
1749 	xfs_qm_dqrele(uq);
1750 	return error;
1751 }
1752 
1753 /*
1754  * Actually transfer ownership, and do dquot modifications.
1755  * These were already reserved.
1756  */
1757 struct xfs_dquot *
1758 xfs_qm_vop_chown(
1759 	struct xfs_trans	*tp,
1760 	struct xfs_inode	*ip,
1761 	struct xfs_dquot	**IO_olddq,
1762 	struct xfs_dquot	*newdq)
1763 {
1764 	struct xfs_dquot	*prevdq;
1765 	uint		bfield = XFS_IS_REALTIME_INODE(ip) ?
1766 				 XFS_TRANS_DQ_RTBCOUNT : XFS_TRANS_DQ_BCOUNT;
1767 
1768 
1769 	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
1770 	ASSERT(XFS_IS_QUOTA_RUNNING(ip->i_mount));
1771 
1772 	/* old dquot */
1773 	prevdq = *IO_olddq;
1774 	ASSERT(prevdq);
1775 	ASSERT(prevdq != newdq);
1776 
1777 	xfs_trans_mod_dquot(tp, prevdq, bfield, -(ip->i_d.di_nblocks));
1778 	xfs_trans_mod_dquot(tp, prevdq, XFS_TRANS_DQ_ICOUNT, -1);
1779 
1780 	/* the sparkling new dquot */
1781 	xfs_trans_mod_dquot(tp, newdq, bfield, ip->i_d.di_nblocks);
1782 	xfs_trans_mod_dquot(tp, newdq, XFS_TRANS_DQ_ICOUNT, 1);
1783 
1784 	/*
1785 	 * Take an extra reference, because the inode is going to keep
1786 	 * this dquot pointer even after the trans_commit.
1787 	 */
1788 	*IO_olddq = xfs_qm_dqhold(newdq);
1789 
1790 	return prevdq;
1791 }
1792 
1793 /*
1794  * Quota reservations for setattr(AT_UID|AT_GID|AT_PROJID).
1795  */
1796 int
1797 xfs_qm_vop_chown_reserve(
1798 	struct xfs_trans	*tp,
1799 	struct xfs_inode	*ip,
1800 	struct xfs_dquot	*udqp,
1801 	struct xfs_dquot	*gdqp,
1802 	struct xfs_dquot	*pdqp,
1803 	uint			flags)
1804 {
1805 	struct xfs_mount	*mp = ip->i_mount;
1806 	uint64_t		delblks;
1807 	unsigned int		blkflags;
1808 	struct xfs_dquot	*udq_unres = NULL;
1809 	struct xfs_dquot	*gdq_unres = NULL;
1810 	struct xfs_dquot	*pdq_unres = NULL;
1811 	struct xfs_dquot	*udq_delblks = NULL;
1812 	struct xfs_dquot	*gdq_delblks = NULL;
1813 	struct xfs_dquot	*pdq_delblks = NULL;
1814 	int			error;
1815 
1816 
1817 	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED));
1818 	ASSERT(XFS_IS_QUOTA_RUNNING(mp));
1819 
1820 	delblks = ip->i_delayed_blks;
1821 	blkflags = XFS_IS_REALTIME_INODE(ip) ?
1822 			XFS_QMOPT_RES_RTBLKS : XFS_QMOPT_RES_REGBLKS;
1823 
1824 	if (XFS_IS_UQUOTA_ON(mp) && udqp &&
1825 	    i_uid_read(VFS_I(ip)) != be32_to_cpu(udqp->q_core.d_id)) {
1826 		udq_delblks = udqp;
1827 		/*
1828 		 * If there are delayed allocation blocks, then we have to
1829 		 * unreserve those from the old dquot, and add them to the
1830 		 * new dquot.
1831 		 */
1832 		if (delblks) {
1833 			ASSERT(ip->i_udquot);
1834 			udq_unres = ip->i_udquot;
1835 		}
1836 	}
1837 	if (XFS_IS_GQUOTA_ON(ip->i_mount) && gdqp &&
1838 	    i_gid_read(VFS_I(ip)) != be32_to_cpu(gdqp->q_core.d_id)) {
1839 		gdq_delblks = gdqp;
1840 		if (delblks) {
1841 			ASSERT(ip->i_gdquot);
1842 			gdq_unres = ip->i_gdquot;
1843 		}
1844 	}
1845 
1846 	if (XFS_IS_PQUOTA_ON(ip->i_mount) && pdqp &&
1847 	    ip->i_d.di_projid != be32_to_cpu(pdqp->q_core.d_id)) {
1848 		pdq_delblks = pdqp;
1849 		if (delblks) {
1850 			ASSERT(ip->i_pdquot);
1851 			pdq_unres = ip->i_pdquot;
1852 		}
1853 	}
1854 
1855 	error = xfs_trans_reserve_quota_bydquots(tp, ip->i_mount,
1856 				udq_delblks, gdq_delblks, pdq_delblks,
1857 				ip->i_d.di_nblocks, 1, flags | blkflags);
1858 	if (error)
1859 		return error;
1860 
1861 	/*
1862 	 * Do the delayed blks reservations/unreservations now. Since, these
1863 	 * are done without the help of a transaction, if a reservation fails
1864 	 * its previous reservations won't be automatically undone by trans
1865 	 * code. So, we have to do it manually here.
1866 	 */
1867 	if (delblks) {
1868 		/*
1869 		 * Do the reservations first. Unreservation can't fail.
1870 		 */
1871 		ASSERT(udq_delblks || gdq_delblks || pdq_delblks);
1872 		ASSERT(udq_unres || gdq_unres || pdq_unres);
1873 		error = xfs_trans_reserve_quota_bydquots(NULL, ip->i_mount,
1874 			    udq_delblks, gdq_delblks, pdq_delblks,
1875 			    (xfs_qcnt_t)delblks, 0, flags | blkflags);
1876 		if (error)
1877 			return error;
1878 		xfs_trans_reserve_quota_bydquots(NULL, ip->i_mount,
1879 				udq_unres, gdq_unres, pdq_unres,
1880 				-((xfs_qcnt_t)delblks), 0, blkflags);
1881 	}
1882 
1883 	return 0;
1884 }
1885 
1886 int
1887 xfs_qm_vop_rename_dqattach(
1888 	struct xfs_inode	**i_tab)
1889 {
1890 	struct xfs_mount	*mp = i_tab[0]->i_mount;
1891 	int			i;
1892 
1893 	if (!XFS_IS_QUOTA_RUNNING(mp) || !XFS_IS_QUOTA_ON(mp))
1894 		return 0;
1895 
1896 	for (i = 0; (i < 4 && i_tab[i]); i++) {
1897 		struct xfs_inode	*ip = i_tab[i];
1898 		int			error;
1899 
1900 		/*
1901 		 * Watch out for duplicate entries in the table.
1902 		 */
1903 		if (i == 0 || ip != i_tab[i-1]) {
1904 			if (XFS_NOT_DQATTACHED(mp, ip)) {
1905 				error = xfs_qm_dqattach(ip);
1906 				if (error)
1907 					return error;
1908 			}
1909 		}
1910 	}
1911 	return 0;
1912 }
1913 
1914 void
1915 xfs_qm_vop_create_dqattach(
1916 	struct xfs_trans	*tp,
1917 	struct xfs_inode	*ip,
1918 	struct xfs_dquot	*udqp,
1919 	struct xfs_dquot	*gdqp,
1920 	struct xfs_dquot	*pdqp)
1921 {
1922 	struct xfs_mount	*mp = tp->t_mountp;
1923 
1924 	if (!XFS_IS_QUOTA_RUNNING(mp) || !XFS_IS_QUOTA_ON(mp))
1925 		return;
1926 
1927 	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
1928 
1929 	if (udqp && XFS_IS_UQUOTA_ON(mp)) {
1930 		ASSERT(ip->i_udquot == NULL);
1931 		ASSERT(i_uid_read(VFS_I(ip)) == be32_to_cpu(udqp->q_core.d_id));
1932 
1933 		ip->i_udquot = xfs_qm_dqhold(udqp);
1934 		xfs_trans_mod_dquot(tp, udqp, XFS_TRANS_DQ_ICOUNT, 1);
1935 	}
1936 	if (gdqp && XFS_IS_GQUOTA_ON(mp)) {
1937 		ASSERT(ip->i_gdquot == NULL);
1938 		ASSERT(i_gid_read(VFS_I(ip)) == be32_to_cpu(gdqp->q_core.d_id));
1939 
1940 		ip->i_gdquot = xfs_qm_dqhold(gdqp);
1941 		xfs_trans_mod_dquot(tp, gdqp, XFS_TRANS_DQ_ICOUNT, 1);
1942 	}
1943 	if (pdqp && XFS_IS_PQUOTA_ON(mp)) {
1944 		ASSERT(ip->i_pdquot == NULL);
1945 		ASSERT(ip->i_d.di_projid == be32_to_cpu(pdqp->q_core.d_id));
1946 
1947 		ip->i_pdquot = xfs_qm_dqhold(pdqp);
1948 		xfs_trans_mod_dquot(tp, pdqp, XFS_TRANS_DQ_ICOUNT, 1);
1949 	}
1950 }
1951 
1952