10b61f8a4SDave Chinner // SPDX-License-Identifier: GPL-2.0
2c59d87c4SChristoph Hellwig /*
3c59d87c4SChristoph Hellwig * Copyright (c) 2000-2003 Silicon Graphics, Inc.
4c59d87c4SChristoph Hellwig * All Rights Reserved.
5c59d87c4SChristoph Hellwig */
6c59d87c4SChristoph Hellwig #include "xfs.h"
7c59d87c4SChristoph Hellwig #include "xfs_fs.h"
86ca1c906SDave Chinner #include "xfs_format.h"
9239880efSDave Chinner #include "xfs_log_format.h"
1070a9883cSDave Chinner #include "xfs_shared.h"
11239880efSDave Chinner #include "xfs_trans_resv.h"
12c59d87c4SChristoph Hellwig #include "xfs_bit.h"
13c59d87c4SChristoph Hellwig #include "xfs_mount.h"
143ab78df2SDarrick J. Wong #include "xfs_defer.h"
15c59d87c4SChristoph Hellwig #include "xfs_inode.h"
16c59d87c4SChristoph Hellwig #include "xfs_bmap.h"
17239880efSDave Chinner #include "xfs_quota.h"
18239880efSDave Chinner #include "xfs_trans.h"
19c59d87c4SChristoph Hellwig #include "xfs_buf_item.h"
20c59d87c4SChristoph Hellwig #include "xfs_trans_space.h"
21c59d87c4SChristoph Hellwig #include "xfs_trans_priv.h"
22c59d87c4SChristoph Hellwig #include "xfs_qm.h"
23c59d87c4SChristoph Hellwig #include "xfs_trace.h"
24239880efSDave Chinner #include "xfs_log.h"
25a4fbe6abSDave Chinner #include "xfs_bmap_btree.h"
26afeda600SDarrick J. Wong #include "xfs_error.h"
27c59d87c4SChristoph Hellwig
28c59d87c4SChristoph Hellwig /*
29bf72de31SChristoph Hellwig * Lock order:
30bf72de31SChristoph Hellwig *
31bf72de31SChristoph Hellwig * ip->i_lock
329f920f11SChristoph Hellwig * qi->qi_tree_lock
33bf72de31SChristoph Hellwig * dquot->q_qlock (xfs_dqlock() and friends)
34bf72de31SChristoph Hellwig * dquot->q_flush (xfs_dqflock() and friends)
35f8739c3cSChristoph Hellwig * qi->qi_lru_lock
36bf72de31SChristoph Hellwig *
37bf72de31SChristoph Hellwig * If two dquots need to be locked the order is user before group/project,
38bf72de31SChristoph Hellwig * otherwise by the lowest id first, see xfs_dqlock2.
39c59d87c4SChristoph Hellwig */
40c59d87c4SChristoph Hellwig
41182696fbSDarrick J. Wong struct kmem_cache *xfs_dqtrx_cache;
42182696fbSDarrick J. Wong static struct kmem_cache *xfs_dquot_cache;
43a05931ceSChristoph Hellwig
44f112a049SDave Chinner static struct lock_class_key xfs_dquot_group_class;
45f112a049SDave Chinner static struct lock_class_key xfs_dquot_project_class;
46c59d87c4SChristoph Hellwig
47c59d87c4SChristoph Hellwig /*
48c59d87c4SChristoph Hellwig * This is called to free all the memory associated with a dquot
49c59d87c4SChristoph Hellwig */
50c59d87c4SChristoph Hellwig void
xfs_qm_dqdestroy(struct xfs_dquot * dqp)51c59d87c4SChristoph Hellwig xfs_qm_dqdestroy(
52aefe69a4SPavel Reichl struct xfs_dquot *dqp)
53c59d87c4SChristoph Hellwig {
54f8739c3cSChristoph Hellwig ASSERT(list_empty(&dqp->q_lru));
55c59d87c4SChristoph Hellwig
56b1c5ebb2SDave Chinner kmem_free(dqp->q_logitem.qli_item.li_lv_shadow);
57c59d87c4SChristoph Hellwig mutex_destroy(&dqp->q_qlock);
58c59d87c4SChristoph Hellwig
59ff6d6af2SBill O'Donnell XFS_STATS_DEC(dqp->q_mount, xs_qm_dquot);
60182696fbSDarrick J. Wong kmem_cache_free(xfs_dquot_cache, dqp);
61c59d87c4SChristoph Hellwig }
62c59d87c4SChristoph Hellwig
63c59d87c4SChristoph Hellwig /*
64c59d87c4SChristoph Hellwig * If default limits are in force, push them into the dquot now.
65c59d87c4SChristoph Hellwig * We overwrite the dquot limits only if they are zero and this
66c59d87c4SChristoph Hellwig * is not the root dquot.
67c59d87c4SChristoph Hellwig */
68c59d87c4SChristoph Hellwig void
xfs_qm_adjust_dqlimits(struct xfs_dquot * dq)69c59d87c4SChristoph Hellwig xfs_qm_adjust_dqlimits(
704b6eae2eSBrian Foster struct xfs_dquot *dq)
71c59d87c4SChristoph Hellwig {
72c8c753e1SDarrick J. Wong struct xfs_mount *mp = dq->q_mount;
734b6eae2eSBrian Foster struct xfs_quotainfo *q = mp->m_quotainfo;
74be607946SCarlos Maiolino struct xfs_def_quota *defq;
75b1366451SBrian Foster int prealloc = 0;
76c59d87c4SChristoph Hellwig
77c51df733SDarrick J. Wong ASSERT(dq->q_id);
78ce6e7e79SEric Sandeen defq = xfs_get_defquota(q, xfs_dquot_type(dq));
79c59d87c4SChristoph Hellwig
8012d720fbSDarrick J. Wong if (!dq->q_blk.softlimit) {
81438769e3SDarrick J. Wong dq->q_blk.softlimit = defq->blk.soft;
82b1366451SBrian Foster prealloc = 1;
83b1366451SBrian Foster }
8412d720fbSDarrick J. Wong if (!dq->q_blk.hardlimit) {
85438769e3SDarrick J. Wong dq->q_blk.hardlimit = defq->blk.hard;
86b1366451SBrian Foster prealloc = 1;
87b1366451SBrian Foster }
8812d720fbSDarrick J. Wong if (!dq->q_ino.softlimit)
89438769e3SDarrick J. Wong dq->q_ino.softlimit = defq->ino.soft;
9012d720fbSDarrick J. Wong if (!dq->q_ino.hardlimit)
91438769e3SDarrick J. Wong dq->q_ino.hardlimit = defq->ino.hard;
9212d720fbSDarrick J. Wong if (!dq->q_rtb.softlimit)
93438769e3SDarrick J. Wong dq->q_rtb.softlimit = defq->rtb.soft;
9412d720fbSDarrick J. Wong if (!dq->q_rtb.hardlimit)
95438769e3SDarrick J. Wong dq->q_rtb.hardlimit = defq->rtb.hard;
96b1366451SBrian Foster
97b1366451SBrian Foster if (prealloc)
98b1366451SBrian Foster xfs_dquot_set_prealloc_limits(dq);
99c59d87c4SChristoph Hellwig }
100c59d87c4SChristoph Hellwig
10111d8a919SDarrick J. Wong /* Set the expiration time of a quota's grace period. */
10211d8a919SDarrick J. Wong time64_t
xfs_dquot_set_timeout(struct xfs_mount * mp,time64_t timeout)10311d8a919SDarrick J. Wong xfs_dquot_set_timeout(
10411d8a919SDarrick J. Wong struct xfs_mount *mp,
10511d8a919SDarrick J. Wong time64_t timeout)
10611d8a919SDarrick J. Wong {
10711d8a919SDarrick J. Wong struct xfs_quotainfo *qi = mp->m_quotainfo;
10811d8a919SDarrick J. Wong
10911d8a919SDarrick J. Wong return clamp_t(time64_t, timeout, qi->qi_expiry_min,
11011d8a919SDarrick J. Wong qi->qi_expiry_max);
11111d8a919SDarrick J. Wong }
11211d8a919SDarrick J. Wong
113ccc8e771SDarrick J. Wong /* Set the length of the default grace period. */
114ccc8e771SDarrick J. Wong time64_t
xfs_dquot_set_grace_period(time64_t grace)115ccc8e771SDarrick J. Wong xfs_dquot_set_grace_period(
116ccc8e771SDarrick J. Wong time64_t grace)
117ccc8e771SDarrick J. Wong {
118ccc8e771SDarrick J. Wong return clamp_t(time64_t, grace, XFS_DQ_GRACE_MIN, XFS_DQ_GRACE_MAX);
119ccc8e771SDarrick J. Wong }
120ccc8e771SDarrick J. Wong
121c59d87c4SChristoph Hellwig /*
122ea0cc6faSDarrick J. Wong * Determine if this quota counter is over either limit and set the quota
123ea0cc6faSDarrick J. Wong * timers as appropriate.
124ea0cc6faSDarrick J. Wong */
125ea0cc6faSDarrick J. Wong static inline void
xfs_qm_adjust_res_timer(struct xfs_mount * mp,struct xfs_dquot_res * res,struct xfs_quota_limits * qlim)126ea0cc6faSDarrick J. Wong xfs_qm_adjust_res_timer(
12711d8a919SDarrick J. Wong struct xfs_mount *mp,
128ea0cc6faSDarrick J. Wong struct xfs_dquot_res *res,
129ea0cc6faSDarrick J. Wong struct xfs_quota_limits *qlim)
130ea0cc6faSDarrick J. Wong {
131ea0cc6faSDarrick J. Wong ASSERT(res->hardlimit == 0 || res->softlimit <= res->hardlimit);
132ea0cc6faSDarrick J. Wong
133ea0cc6faSDarrick J. Wong if ((res->softlimit && res->count > res->softlimit) ||
134ea0cc6faSDarrick J. Wong (res->hardlimit && res->count > res->hardlimit)) {
135ea0cc6faSDarrick J. Wong if (res->timer == 0)
13611d8a919SDarrick J. Wong res->timer = xfs_dquot_set_timeout(mp,
13711d8a919SDarrick J. Wong ktime_get_real_seconds() + qlim->time);
138ea0cc6faSDarrick J. Wong } else {
139ea0cc6faSDarrick J. Wong res->timer = 0;
140ea0cc6faSDarrick J. Wong }
141ea0cc6faSDarrick J. Wong }
142ea0cc6faSDarrick J. Wong
143ea0cc6faSDarrick J. Wong /*
144c59d87c4SChristoph Hellwig * Check the limits and timers of a dquot and start or reset timers
145c59d87c4SChristoph Hellwig * if necessary.
146c59d87c4SChristoph Hellwig * This gets called even when quota enforcement is OFF, which makes our
147c59d87c4SChristoph Hellwig * life a little less complicated. (We just don't reject any quota
148c59d87c4SChristoph Hellwig * reservations in that case, when enforcement is off).
149c59d87c4SChristoph Hellwig * We also return 0 as the values of the timers in Q_GETQUOTA calls, when
150c59d87c4SChristoph Hellwig * enforcement's off.
151c59d87c4SChristoph Hellwig * In contrast, warnings are a little different in that they don't
152c59d87c4SChristoph Hellwig * 'automatically' get started when limits get exceeded. They do
153c59d87c4SChristoph Hellwig * get reset to zero, however, when we find the count to be under
154c59d87c4SChristoph Hellwig * the soft limit (they are only ever set non-zero via userspace).
155c59d87c4SChristoph Hellwig */
156c59d87c4SChristoph Hellwig void
xfs_qm_adjust_dqtimers(struct xfs_dquot * dq)157c59d87c4SChristoph Hellwig xfs_qm_adjust_dqtimers(
1583dbb9aa3SEric Sandeen struct xfs_dquot *dq)
159c59d87c4SChristoph Hellwig {
160c8c753e1SDarrick J. Wong struct xfs_mount *mp = dq->q_mount;
161e850301fSEric Sandeen struct xfs_quotainfo *qi = mp->m_quotainfo;
162e850301fSEric Sandeen struct xfs_def_quota *defq;
163e850301fSEric Sandeen
164c51df733SDarrick J. Wong ASSERT(dq->q_id);
165e850301fSEric Sandeen defq = xfs_get_defquota(qi, xfs_dquot_type(dq));
166c59d87c4SChristoph Hellwig
16711d8a919SDarrick J. Wong xfs_qm_adjust_res_timer(dq->q_mount, &dq->q_blk, &defq->blk);
16811d8a919SDarrick J. Wong xfs_qm_adjust_res_timer(dq->q_mount, &dq->q_ino, &defq->ino);
16911d8a919SDarrick J. Wong xfs_qm_adjust_res_timer(dq->q_mount, &dq->q_rtb, &defq->rtb);
170c59d87c4SChristoph Hellwig }
171c59d87c4SChristoph Hellwig
172c59d87c4SChristoph Hellwig /*
173c59d87c4SChristoph Hellwig * initialize a buffer full of dquots and log the whole thing
174c59d87c4SChristoph Hellwig */
175c59d87c4SChristoph Hellwig STATIC void
xfs_qm_init_dquot_blk(struct xfs_trans * tp,struct xfs_mount * mp,xfs_dqid_t id,xfs_dqtype_t type,struct xfs_buf * bp)176c59d87c4SChristoph Hellwig xfs_qm_init_dquot_blk(
17778bba5c8SDarrick J. Wong struct xfs_trans *tp,
17878bba5c8SDarrick J. Wong struct xfs_mount *mp,
179c59d87c4SChristoph Hellwig xfs_dqid_t id,
1801a7ed271SDarrick J. Wong xfs_dqtype_t type,
18178bba5c8SDarrick J. Wong struct xfs_buf *bp)
182c59d87c4SChristoph Hellwig {
183c59d87c4SChristoph Hellwig struct xfs_quotainfo *q = mp->m_quotainfo;
18478bba5c8SDarrick J. Wong struct xfs_dqblk *d;
185a484bcddSEric Sandeen xfs_dqid_t curid;
18678bba5c8SDarrick J. Wong unsigned int qflag;
18778bba5c8SDarrick J. Wong unsigned int blftype;
188a484bcddSEric Sandeen int i;
189c59d87c4SChristoph Hellwig
190c59d87c4SChristoph Hellwig ASSERT(tp);
191c59d87c4SChristoph Hellwig ASSERT(xfs_buf_islocked(bp));
192c59d87c4SChristoph Hellwig
193e6eb603cSDarrick J. Wong switch (type) {
194e6eb603cSDarrick J. Wong case XFS_DQTYPE_USER:
195e6eb603cSDarrick J. Wong qflag = XFS_UQUOTA_CHKD;
196e6eb603cSDarrick J. Wong blftype = XFS_BLF_UDQUOT_BUF;
197e6eb603cSDarrick J. Wong break;
198e6eb603cSDarrick J. Wong case XFS_DQTYPE_PROJ:
199e6eb603cSDarrick J. Wong qflag = XFS_PQUOTA_CHKD;
200e6eb603cSDarrick J. Wong blftype = XFS_BLF_PDQUOT_BUF;
201e6eb603cSDarrick J. Wong break;
202e6eb603cSDarrick J. Wong case XFS_DQTYPE_GROUP:
203e6eb603cSDarrick J. Wong qflag = XFS_GQUOTA_CHKD;
204e6eb603cSDarrick J. Wong blftype = XFS_BLF_GDQUOT_BUF;
205e6eb603cSDarrick J. Wong break;
206e6eb603cSDarrick J. Wong default:
207e6eb603cSDarrick J. Wong ASSERT(0);
208e6eb603cSDarrick J. Wong return;
209e6eb603cSDarrick J. Wong }
210e6eb603cSDarrick J. Wong
211c59d87c4SChristoph Hellwig d = bp->b_addr;
212c59d87c4SChristoph Hellwig
213c59d87c4SChristoph Hellwig /*
214c59d87c4SChristoph Hellwig * ID of the first dquot in the block - id's are zero based.
215c59d87c4SChristoph Hellwig */
216c59d87c4SChristoph Hellwig curid = id - (id % q->qi_dqperchunk);
217c59d87c4SChristoph Hellwig memset(d, 0, BBTOB(q->qi_dqchunklen));
21849d35a5cSChristoph Hellwig for (i = 0; i < q->qi_dqperchunk; i++, d++, curid++) {
21949d35a5cSChristoph Hellwig d->dd_diskdq.d_magic = cpu_to_be16(XFS_DQUOT_MAGIC);
22049d35a5cSChristoph Hellwig d->dd_diskdq.d_version = XFS_DQUOT_VERSION;
22149d35a5cSChristoph Hellwig d->dd_diskdq.d_id = cpu_to_be32(curid);
222d8c1af0dSDarrick J. Wong d->dd_diskdq.d_type = type;
22338c26bfdSDave Chinner if (curid > 0 && xfs_has_bigtime(mp))
2244ea1ff3bSDarrick J. Wong d->dd_diskdq.d_type |= XFS_DQTYPE_BIGTIME;
22538c26bfdSDave Chinner if (xfs_has_crc(mp)) {
22692863451SDave Chinner uuid_copy(&d->dd_uuid, &mp->m_sb.sb_meta_uuid);
2276fcdc59dSDave Chinner xfs_update_cksum((char *)d, sizeof(struct xfs_dqblk),
2286fcdc59dSDave Chinner XFS_DQUOT_CRC_OFF);
2296fcdc59dSDave Chinner }
23049d35a5cSChristoph Hellwig }
23149d35a5cSChristoph Hellwig
23278bba5c8SDarrick J. Wong xfs_trans_dquot_buf(tp, bp, blftype);
23378bba5c8SDarrick J. Wong
23478bba5c8SDarrick J. Wong /*
23578bba5c8SDarrick J. Wong * quotacheck uses delayed writes to update all the dquots on disk in an
23678bba5c8SDarrick J. Wong * efficient manner instead of logging the individual dquot changes as
23778bba5c8SDarrick J. Wong * they are made. However if we log the buffer allocated here and crash
23878bba5c8SDarrick J. Wong * after quotacheck while the logged initialisation is still in the
23978bba5c8SDarrick J. Wong * active region of the log, log recovery can replay the dquot buffer
24078bba5c8SDarrick J. Wong * initialisation over the top of the checked dquots and corrupt quota
24178bba5c8SDarrick J. Wong * accounting.
24278bba5c8SDarrick J. Wong *
24378bba5c8SDarrick J. Wong * To avoid this problem, quotacheck cannot log the initialised buffer.
24478bba5c8SDarrick J. Wong * We must still dirty the buffer and write it back before the
24578bba5c8SDarrick J. Wong * allocation transaction clears the log. Therefore, mark the buffer as
24678bba5c8SDarrick J. Wong * ordered instead of logging it directly. This is safe for quotacheck
24778bba5c8SDarrick J. Wong * because it detects and repairs allocated but initialized dquot blocks
24878bba5c8SDarrick J. Wong * in the quota inodes.
24978bba5c8SDarrick J. Wong */
25078bba5c8SDarrick J. Wong if (!(mp->m_qflags & qflag))
25178bba5c8SDarrick J. Wong xfs_trans_ordered_buf(tp, bp);
25278bba5c8SDarrick J. Wong else
253c59d87c4SChristoph Hellwig xfs_trans_log_buf(tp, bp, 0, BBTOB(q->qi_dqchunklen) - 1);
254c59d87c4SChristoph Hellwig }
255c59d87c4SChristoph Hellwig
256b1366451SBrian Foster /*
257b1366451SBrian Foster * Initialize the dynamic speculative preallocation thresholds. The lo/hi
258b1366451SBrian Foster * watermarks correspond to the soft and hard limits by default. If a soft limit
259b1366451SBrian Foster * is not specified, we use 95% of the hard limit.
260b1366451SBrian Foster */
261b1366451SBrian Foster void
xfs_dquot_set_prealloc_limits(struct xfs_dquot * dqp)262b1366451SBrian Foster xfs_dquot_set_prealloc_limits(struct xfs_dquot *dqp)
263b1366451SBrian Foster {
264c8ce540dSDarrick J. Wong uint64_t space;
265b1366451SBrian Foster
266d3537cf9SDarrick J. Wong dqp->q_prealloc_hi_wmark = dqp->q_blk.hardlimit;
267d3537cf9SDarrick J. Wong dqp->q_prealloc_lo_wmark = dqp->q_blk.softlimit;
268b1366451SBrian Foster if (!dqp->q_prealloc_lo_wmark) {
269b1366451SBrian Foster dqp->q_prealloc_lo_wmark = dqp->q_prealloc_hi_wmark;
270b1366451SBrian Foster do_div(dqp->q_prealloc_lo_wmark, 100);
271b1366451SBrian Foster dqp->q_prealloc_lo_wmark *= 95;
272b1366451SBrian Foster }
273b1366451SBrian Foster
274b1366451SBrian Foster space = dqp->q_prealloc_hi_wmark;
275b1366451SBrian Foster
276b1366451SBrian Foster do_div(space, 100);
277b1366451SBrian Foster dqp->q_low_space[XFS_QLOWSP_1_PCNT] = space;
278b1366451SBrian Foster dqp->q_low_space[XFS_QLOWSP_3_PCNT] = space * 3;
279b1366451SBrian Foster dqp->q_low_space[XFS_QLOWSP_5_PCNT] = space * 5;
280b1366451SBrian Foster }
281b1366451SBrian Foster
282c59d87c4SChristoph Hellwig /*
283d63192c8SDarrick J. Wong * Ensure that the given in-core dquot has a buffer on disk backing it, and
284710d707dSDarrick J. Wong * return the buffer locked and held. This is called when the bmapi finds a
285710d707dSDarrick J. Wong * hole.
286c59d87c4SChristoph Hellwig */
287c59d87c4SChristoph Hellwig STATIC int
xfs_dquot_disk_alloc(struct xfs_dquot * dqp,struct xfs_buf ** bpp)288d63192c8SDarrick J. Wong xfs_dquot_disk_alloc(
289d63192c8SDarrick J. Wong struct xfs_dquot *dqp,
290d63192c8SDarrick J. Wong struct xfs_buf **bpp)
291c59d87c4SChristoph Hellwig {
292d63192c8SDarrick J. Wong struct xfs_bmbt_irec map;
293eae44cb3SDarrick J. Wong struct xfs_trans *tp;
294eae44cb3SDarrick J. Wong struct xfs_mount *mp = dqp->q_mount;
295d63192c8SDarrick J. Wong struct xfs_buf *bp;
2961a7ed271SDarrick J. Wong xfs_dqtype_t qtype = xfs_dquot_type(dqp);
2970b04dd5dSDarrick J. Wong struct xfs_inode *quotip = xfs_quota_inode(mp, qtype);
298d63192c8SDarrick J. Wong int nmaps = 1;
299d63192c8SDarrick J. Wong int error;
300c59d87c4SChristoph Hellwig
301c59d87c4SChristoph Hellwig trace_xfs_dqalloc(dqp);
302c59d87c4SChristoph Hellwig
303eae44cb3SDarrick J. Wong error = xfs_trans_alloc(mp, &M_RES(mp)->tr_qm_dqalloc,
304eae44cb3SDarrick J. Wong XFS_QM_DQALLOC_SPACE_RES(mp), 0, 0, &tp);
305eae44cb3SDarrick J. Wong if (error)
306eae44cb3SDarrick J. Wong return error;
307eae44cb3SDarrick J. Wong
308c59d87c4SChristoph Hellwig xfs_ilock(quotip, XFS_ILOCK_EXCL);
309eae44cb3SDarrick J. Wong xfs_trans_ijoin(tp, quotip, 0);
310eae44cb3SDarrick J. Wong
3110b04dd5dSDarrick J. Wong if (!xfs_this_quota_on(dqp->q_mount, qtype)) {
312c59d87c4SChristoph Hellwig /*
313c59d87c4SChristoph Hellwig * Return if this type of quotas is turned off while we didn't
314c59d87c4SChristoph Hellwig * have an inode lock
315c59d87c4SChristoph Hellwig */
316eae44cb3SDarrick J. Wong error = -ESRCH;
317eae44cb3SDarrick J. Wong goto err_cancel;
318c59d87c4SChristoph Hellwig }
319c59d87c4SChristoph Hellwig
320727e1acdSChandan Babu R error = xfs_iext_count_may_overflow(quotip, XFS_DATA_FORK,
321727e1acdSChandan Babu R XFS_IEXT_ADD_NOSPLIT_CNT);
3224f86bb4bSChandan Babu R if (error == -EFBIG)
3234f86bb4bSChandan Babu R error = xfs_iext_count_upgrade(tp, quotip,
3244f86bb4bSChandan Babu R XFS_IEXT_ADD_NOSPLIT_CNT);
325727e1acdSChandan Babu R if (error)
326eae44cb3SDarrick J. Wong goto err_cancel;
327727e1acdSChandan Babu R
328727e1acdSChandan Babu R /* Create the block mapping. */
3292ba13721SBrian Foster error = xfs_bmapi_write(tp, quotip, dqp->q_fileoffset,
330da781e64SBrian Foster XFS_DQUOT_CLUSTER_SIZE_FSB, XFS_BMAPI_METADATA, 0, &map,
331da781e64SBrian Foster &nmaps);
332c0dc7828SDave Chinner if (error)
333eae44cb3SDarrick J. Wong goto err_cancel;
334eae44cb3SDarrick J. Wong
335c59d87c4SChristoph Hellwig ASSERT(map.br_blockcount == XFS_DQUOT_CLUSTER_SIZE_FSB);
336c59d87c4SChristoph Hellwig ASSERT((map.br_startblock != DELAYSTARTBLOCK) &&
337c59d87c4SChristoph Hellwig (map.br_startblock != HOLESTARTBLOCK));
338c59d87c4SChristoph Hellwig
339c59d87c4SChristoph Hellwig /*
340c59d87c4SChristoph Hellwig * Keep track of the blkno to save a lookup later
341c59d87c4SChristoph Hellwig */
342c59d87c4SChristoph Hellwig dqp->q_blkno = XFS_FSB_TO_DADDR(mp, map.br_startblock);
343c59d87c4SChristoph Hellwig
344c59d87c4SChristoph Hellwig /* now we can just get the buffer (there's nothing to read yet) */
345ce92464cSDarrick J. Wong error = xfs_trans_get_buf(tp, mp->m_ddev_targp, dqp->q_blkno,
346ce92464cSDarrick J. Wong mp->m_quotainfo->qi_dqchunklen, 0, &bp);
347ce92464cSDarrick J. Wong if (error)
348eae44cb3SDarrick J. Wong goto err_cancel;
3491813dd64SDave Chinner bp->b_ops = &xfs_dquot_buf_ops;
3502a30f36dSChandra Seetharaman
351c59d87c4SChristoph Hellwig /*
352c59d87c4SChristoph Hellwig * Make a chunk of dquots out of this buffer and log
353c59d87c4SChristoph Hellwig * the entire thing.
354c59d87c4SChristoph Hellwig */
3550b04dd5dSDarrick J. Wong xfs_qm_init_dquot_blk(tp, mp, dqp->q_id, qtype, bp);
356d63192c8SDarrick J. Wong xfs_buf_set_ref(bp, XFS_DQUOT_REF);
357c59d87c4SChristoph Hellwig
358c59d87c4SChristoph Hellwig /*
3597b6b50f5SDarrick J. Wong * Hold the buffer and join it to the dfops so that we'll still own
3607b6b50f5SDarrick J. Wong * the buffer when we return to the caller. The buffer disposal on
3617b6b50f5SDarrick J. Wong * error must be paid attention to very carefully, as it has been
3627b6b50f5SDarrick J. Wong * broken since commit efa092f3d4c6 "[XFS] Fixes a bug in the quota
3637b6b50f5SDarrick J. Wong * code when allocating a new dquot record" in 2005, and the later
3647b6b50f5SDarrick J. Wong * conversion to xfs_defer_ops in commit 310a75a3c6c747 failed to keep
3657b6b50f5SDarrick J. Wong * the buffer locked across the _defer_finish call. We can now do
3667b6b50f5SDarrick J. Wong * this correctly with xfs_defer_bjoin.
367c59d87c4SChristoph Hellwig *
36873971b17SBrian Foster * Above, we allocated a disk block for the dquot information and used
36973971b17SBrian Foster * get_buf to initialize the dquot. If the _defer_finish fails, the old
3707b6b50f5SDarrick J. Wong * transaction is gone but the new buffer is not joined or held to any
3717b6b50f5SDarrick J. Wong * transaction, so we must _buf_relse it.
372c59d87c4SChristoph Hellwig *
3737b6b50f5SDarrick J. Wong * If everything succeeds, the caller of this function is returned a
374d63192c8SDarrick J. Wong * buffer that is locked and held to the transaction. The caller
3757b6b50f5SDarrick J. Wong * is responsible for unlocking any buffer passed back, either
376710d707dSDarrick J. Wong * manually or by committing the transaction. On error, the buffer is
377710d707dSDarrick J. Wong * released and not passed back.
378eae44cb3SDarrick J. Wong *
379eae44cb3SDarrick J. Wong * Keep the quota inode ILOCKed until after the transaction commit to
380eae44cb3SDarrick J. Wong * maintain the atomicity of bmap/rmap updates.
381c59d87c4SChristoph Hellwig */
3822ba13721SBrian Foster xfs_trans_bhold(tp, bp);
383eae44cb3SDarrick J. Wong error = xfs_trans_commit(tp);
384eae44cb3SDarrick J. Wong xfs_iunlock(quotip, XFS_ILOCK_EXCL);
3857b6b50f5SDarrick J. Wong if (error) {
386eae44cb3SDarrick J. Wong xfs_buf_relse(bp);
38773971b17SBrian Foster return error;
3887b6b50f5SDarrick J. Wong }
389eae44cb3SDarrick J. Wong
390d63192c8SDarrick J. Wong *bpp = bp;
391c59d87c4SChristoph Hellwig return 0;
392eae44cb3SDarrick J. Wong
393eae44cb3SDarrick J. Wong err_cancel:
394eae44cb3SDarrick J. Wong xfs_trans_cancel(tp);
395eae44cb3SDarrick J. Wong xfs_iunlock(quotip, XFS_ILOCK_EXCL);
396eae44cb3SDarrick J. Wong return error;
397c59d87c4SChristoph Hellwig }
3989aede1d8SDave Chinner
399c59d87c4SChristoph Hellwig /*
400d63192c8SDarrick J. Wong * Read in the in-core dquot's on-disk metadata and return the buffer.
401d63192c8SDarrick J. Wong * Returns ENOENT to signal a hole.
402c59d87c4SChristoph Hellwig */
403c59d87c4SChristoph Hellwig STATIC int
xfs_dquot_disk_read(struct xfs_mount * mp,struct xfs_dquot * dqp,struct xfs_buf ** bpp)404d63192c8SDarrick J. Wong xfs_dquot_disk_read(
405d63192c8SDarrick J. Wong struct xfs_mount *mp,
406d63192c8SDarrick J. Wong struct xfs_dquot *dqp,
407d63192c8SDarrick J. Wong struct xfs_buf **bpp)
408c59d87c4SChristoph Hellwig {
409113a5683SChandra Seetharaman struct xfs_bmbt_irec map;
410113a5683SChandra Seetharaman struct xfs_buf *bp;
4111a7ed271SDarrick J. Wong xfs_dqtype_t qtype = xfs_dquot_type(dqp);
4120b04dd5dSDarrick J. Wong struct xfs_inode *quotip = xfs_quota_inode(mp, qtype);
4130891f997SChristoph Hellwig uint lock_mode;
414d63192c8SDarrick J. Wong int nmaps = 1;
415d63192c8SDarrick J. Wong int error;
416c59d87c4SChristoph Hellwig
417f4df8adcSChristoph Hellwig lock_mode = xfs_ilock_data_map_shared(quotip);
4180b04dd5dSDarrick J. Wong if (!xfs_this_quota_on(mp, qtype)) {
419c59d87c4SChristoph Hellwig /*
420c59d87c4SChristoph Hellwig * Return if this type of quotas is turned off while we
421c59d87c4SChristoph Hellwig * didn't have the quota inode lock.
422c59d87c4SChristoph Hellwig */
423f4df8adcSChristoph Hellwig xfs_iunlock(quotip, lock_mode);
4242451337dSDave Chinner return -ESRCH;
425c59d87c4SChristoph Hellwig }
426c59d87c4SChristoph Hellwig
427c59d87c4SChristoph Hellwig /*
428c59d87c4SChristoph Hellwig * Find the block map; no allocations yet
429c59d87c4SChristoph Hellwig */
4305c8ed202SDave Chinner error = xfs_bmapi_read(quotip, dqp->q_fileoffset,
4315c8ed202SDave Chinner XFS_DQUOT_CLUSTER_SIZE_FSB, &map, &nmaps, 0);
432f4df8adcSChristoph Hellwig xfs_iunlock(quotip, lock_mode);
433c59d87c4SChristoph Hellwig if (error)
434c59d87c4SChristoph Hellwig return error;
435c59d87c4SChristoph Hellwig
436c59d87c4SChristoph Hellwig ASSERT(nmaps == 1);
437d63192c8SDarrick J. Wong ASSERT(map.br_blockcount >= 1);
438c59d87c4SChristoph Hellwig ASSERT(map.br_startblock != DELAYSTARTBLOCK);
439d63192c8SDarrick J. Wong if (map.br_startblock == HOLESTARTBLOCK)
4402451337dSDave Chinner return -ENOENT;
441c59d87c4SChristoph Hellwig
442c59d87c4SChristoph Hellwig trace_xfs_dqtobp_read(dqp);
443c59d87c4SChristoph Hellwig
444c59d87c4SChristoph Hellwig /*
445c59d87c4SChristoph Hellwig * store the blkno etc so that we don't have to do the
446c59d87c4SChristoph Hellwig * mapping all the time
447c59d87c4SChristoph Hellwig */
448c59d87c4SChristoph Hellwig dqp->q_blkno = XFS_FSB_TO_DADDR(mp, map.br_startblock);
449c59d87c4SChristoph Hellwig
450d63192c8SDarrick J. Wong error = xfs_trans_read_buf(mp, NULL, mp->m_ddev_targp, dqp->q_blkno,
451d63192c8SDarrick J. Wong mp->m_quotainfo->qi_dqchunklen, 0, &bp,
452d63192c8SDarrick J. Wong &xfs_dquot_buf_ops);
453c6319198SDave Chinner if (error) {
454c6319198SDave Chinner ASSERT(bp == NULL);
455b474c7aeSEric Sandeen return error;
456c59d87c4SChristoph Hellwig }
457c59d87c4SChristoph Hellwig
458c59d87c4SChristoph Hellwig ASSERT(xfs_buf_islocked(bp));
459d63192c8SDarrick J. Wong xfs_buf_set_ref(bp, XFS_DQUOT_REF);
460d63192c8SDarrick J. Wong *bpp = bp;
461c59d87c4SChristoph Hellwig
462d99831ffSEric Sandeen return 0;
463c59d87c4SChristoph Hellwig }
464c59d87c4SChristoph Hellwig
465617cd5c1SDarrick J. Wong /* Allocate and initialize everything we need for an incore dquot. */
466617cd5c1SDarrick J. Wong STATIC struct xfs_dquot *
xfs_dquot_alloc(struct xfs_mount * mp,xfs_dqid_t id,xfs_dqtype_t type)467617cd5c1SDarrick J. Wong xfs_dquot_alloc(
46897e7ade5SChristoph Hellwig struct xfs_mount *mp,
469c59d87c4SChristoph Hellwig xfs_dqid_t id,
4701a7ed271SDarrick J. Wong xfs_dqtype_t type)
471c59d87c4SChristoph Hellwig {
47297e7ade5SChristoph Hellwig struct xfs_dquot *dqp;
47392b2e5b3SChristoph Hellwig
474182696fbSDarrick J. Wong dqp = kmem_cache_zalloc(xfs_dquot_cache, GFP_KERNEL | __GFP_NOFAIL);
47592b2e5b3SChristoph Hellwig
4761a7ed271SDarrick J. Wong dqp->q_type = type;
477c51df733SDarrick J. Wong dqp->q_id = id;
47892b2e5b3SChristoph Hellwig dqp->q_mount = mp;
479f8739c3cSChristoph Hellwig INIT_LIST_HEAD(&dqp->q_lru);
48092b2e5b3SChristoph Hellwig mutex_init(&dqp->q_qlock);
48192b2e5b3SChristoph Hellwig init_waitqueue_head(&dqp->q_pinwait);
482d63192c8SDarrick J. Wong dqp->q_fileoffset = (xfs_fileoff_t)id / mp->m_quotainfo->qi_dqperchunk;
483d63192c8SDarrick J. Wong /*
484d63192c8SDarrick J. Wong * Offset of dquot in the (fixed sized) dquot chunk.
485d63192c8SDarrick J. Wong */
486d63192c8SDarrick J. Wong dqp->q_bufoffset = (id % mp->m_quotainfo->qi_dqperchunk) *
48711a83f4cSChristoph Hellwig sizeof(struct xfs_dqblk);
48892b2e5b3SChristoph Hellwig
48992b2e5b3SChristoph Hellwig /*
49092b2e5b3SChristoph Hellwig * Because we want to use a counting completion, complete
49192b2e5b3SChristoph Hellwig * the flush completion once to allow a single access to
49292b2e5b3SChristoph Hellwig * the flush completion without blocking.
49392b2e5b3SChristoph Hellwig */
49492b2e5b3SChristoph Hellwig init_completion(&dqp->q_flush);
49592b2e5b3SChristoph Hellwig complete(&dqp->q_flush);
49692b2e5b3SChristoph Hellwig
49792b2e5b3SChristoph Hellwig /*
49892b2e5b3SChristoph Hellwig * Make sure group quotas have a different lock class than user
49992b2e5b3SChristoph Hellwig * quotas.
50092b2e5b3SChristoph Hellwig */
501f112a049SDave Chinner switch (type) {
5028cd4901dSDarrick J. Wong case XFS_DQTYPE_USER:
503f112a049SDave Chinner /* uses the default lock class */
504f112a049SDave Chinner break;
5058cd4901dSDarrick J. Wong case XFS_DQTYPE_GROUP:
506f112a049SDave Chinner lockdep_set_class(&dqp->q_qlock, &xfs_dquot_group_class);
507f112a049SDave Chinner break;
5088cd4901dSDarrick J. Wong case XFS_DQTYPE_PROJ:
509f112a049SDave Chinner lockdep_set_class(&dqp->q_qlock, &xfs_dquot_project_class);
510f112a049SDave Chinner break;
511f112a049SDave Chinner default:
512f112a049SDave Chinner ASSERT(0);
513f112a049SDave Chinner break;
514f112a049SDave Chinner }
51592b2e5b3SChristoph Hellwig
516617cd5c1SDarrick J. Wong xfs_qm_dquot_logitem_init(dqp);
517c59d87c4SChristoph Hellwig
518617cd5c1SDarrick J. Wong XFS_STATS_INC(mp, xs_qm_dquot);
519617cd5c1SDarrick J. Wong return dqp;
520617cd5c1SDarrick J. Wong }
521617cd5c1SDarrick J. Wong
52245068063SDarrick J. Wong /* Check the ondisk dquot's id and type match what the incore dquot expects. */
52345068063SDarrick J. Wong static bool
xfs_dquot_check_type(struct xfs_dquot * dqp,struct xfs_disk_dquot * ddqp)52445068063SDarrick J. Wong xfs_dquot_check_type(
52545068063SDarrick J. Wong struct xfs_dquot *dqp,
52645068063SDarrick J. Wong struct xfs_disk_dquot *ddqp)
52745068063SDarrick J. Wong {
52845068063SDarrick J. Wong uint8_t ddqp_type;
52945068063SDarrick J. Wong uint8_t dqp_type;
53045068063SDarrick J. Wong
53145068063SDarrick J. Wong ddqp_type = ddqp->d_type & XFS_DQTYPE_REC_MASK;
53245068063SDarrick J. Wong dqp_type = xfs_dquot_type(dqp);
53345068063SDarrick J. Wong
53445068063SDarrick J. Wong if (be32_to_cpu(ddqp->d_id) != dqp->q_id)
53545068063SDarrick J. Wong return false;
53645068063SDarrick J. Wong
53745068063SDarrick J. Wong /*
53845068063SDarrick J. Wong * V5 filesystems always expect an exact type match. V4 filesystems
53945068063SDarrick J. Wong * expect an exact match for user dquots and for non-root group and
54045068063SDarrick J. Wong * project dquots.
54145068063SDarrick J. Wong */
54238c26bfdSDave Chinner if (xfs_has_crc(dqp->q_mount) ||
54345068063SDarrick J. Wong dqp_type == XFS_DQTYPE_USER || dqp->q_id != 0)
54445068063SDarrick J. Wong return ddqp_type == dqp_type;
54545068063SDarrick J. Wong
54645068063SDarrick J. Wong /*
54745068063SDarrick J. Wong * V4 filesystems support either group or project quotas, but not both
54845068063SDarrick J. Wong * at the same time. The non-user quota file can be switched between
54945068063SDarrick J. Wong * group and project quota uses depending on the mount options, which
55045068063SDarrick J. Wong * means that we can encounter the other type when we try to load quota
55129d286d0SXin Gao * defaults. Quotacheck will soon reset the entire quota file
55245068063SDarrick J. Wong * (including the root dquot) anyway, but don't log scary corruption
55345068063SDarrick J. Wong * reports to dmesg.
55445068063SDarrick J. Wong */
55545068063SDarrick J. Wong return ddqp_type == XFS_DQTYPE_GROUP || ddqp_type == XFS_DQTYPE_PROJ;
55645068063SDarrick J. Wong }
55745068063SDarrick J. Wong
558617cd5c1SDarrick J. Wong /* Copy the in-core quota fields in from the on-disk buffer. */
559afeda600SDarrick J. Wong STATIC int
xfs_dquot_from_disk(struct xfs_dquot * dqp,struct xfs_buf * bp)560617cd5c1SDarrick J. Wong xfs_dquot_from_disk(
561617cd5c1SDarrick J. Wong struct xfs_dquot *dqp,
562d63192c8SDarrick J. Wong struct xfs_buf *bp)
563617cd5c1SDarrick J. Wong {
564*d744e578SDarrick J. Wong struct xfs_dqblk *dqb = xfs_buf_offset(bp, dqp->q_bufoffset);
565*d744e578SDarrick J. Wong struct xfs_disk_dquot *ddqp = &dqb->dd_diskdq;
566d63192c8SDarrick J. Wong
567afeda600SDarrick J. Wong /*
568afeda600SDarrick J. Wong * Ensure that we got the type and ID we were looking for.
569afeda600SDarrick J. Wong * Everything else was checked by the dquot buffer verifier.
570afeda600SDarrick J. Wong */
57145068063SDarrick J. Wong if (!xfs_dquot_check_type(dqp, ddqp)) {
572afeda600SDarrick J. Wong xfs_alert_tag(bp->b_mount, XFS_PTAG_VERIFIER_ERROR,
573afeda600SDarrick J. Wong "Metadata corruption detected at %pS, quota %u",
574c51df733SDarrick J. Wong __this_address, dqp->q_id);
575afeda600SDarrick J. Wong xfs_alert(bp->b_mount, "Unmount and run xfs_repair");
576afeda600SDarrick J. Wong return -EFSCORRUPTED;
577afeda600SDarrick J. Wong }
578afeda600SDarrick J. Wong
579617cd5c1SDarrick J. Wong /* copy everything from disk dquot to the incore dquot */
580d8c1af0dSDarrick J. Wong dqp->q_type = ddqp->d_type;
581d3537cf9SDarrick J. Wong dqp->q_blk.hardlimit = be64_to_cpu(ddqp->d_blk_hardlimit);
582d3537cf9SDarrick J. Wong dqp->q_blk.softlimit = be64_to_cpu(ddqp->d_blk_softlimit);
583d3537cf9SDarrick J. Wong dqp->q_ino.hardlimit = be64_to_cpu(ddqp->d_ino_hardlimit);
584d3537cf9SDarrick J. Wong dqp->q_ino.softlimit = be64_to_cpu(ddqp->d_ino_softlimit);
585d3537cf9SDarrick J. Wong dqp->q_rtb.hardlimit = be64_to_cpu(ddqp->d_rtb_hardlimit);
586d3537cf9SDarrick J. Wong dqp->q_rtb.softlimit = be64_to_cpu(ddqp->d_rtb_softlimit);
587617cd5c1SDarrick J. Wong
588be37d40cSDarrick J. Wong dqp->q_blk.count = be64_to_cpu(ddqp->d_bcount);
589be37d40cSDarrick J. Wong dqp->q_ino.count = be64_to_cpu(ddqp->d_icount);
590be37d40cSDarrick J. Wong dqp->q_rtb.count = be64_to_cpu(ddqp->d_rtbcount);
591be37d40cSDarrick J. Wong
5929f99c8feSDarrick J. Wong dqp->q_blk.timer = xfs_dquot_from_disk_ts(ddqp, ddqp->d_btimer);
5939f99c8feSDarrick J. Wong dqp->q_ino.timer = xfs_dquot_from_disk_ts(ddqp, ddqp->d_itimer);
5949f99c8feSDarrick J. Wong dqp->q_rtb.timer = xfs_dquot_from_disk_ts(ddqp, ddqp->d_rtbtimer);
59519dce7eaSDarrick J. Wong
596617cd5c1SDarrick J. Wong /*
597617cd5c1SDarrick J. Wong * Reservation counters are defined as reservation plus current usage
598617cd5c1SDarrick J. Wong * to avoid having to add every time.
599617cd5c1SDarrick J. Wong */
600be37d40cSDarrick J. Wong dqp->q_blk.reserved = dqp->q_blk.count;
601be37d40cSDarrick J. Wong dqp->q_ino.reserved = dqp->q_ino.count;
602be37d40cSDarrick J. Wong dqp->q_rtb.reserved = dqp->q_rtb.count;
603617cd5c1SDarrick J. Wong
604617cd5c1SDarrick J. Wong /* initialize the dquot speculative prealloc thresholds */
605617cd5c1SDarrick J. Wong xfs_dquot_set_prealloc_limits(dqp);
606afeda600SDarrick J. Wong return 0;
607617cd5c1SDarrick J. Wong }
608617cd5c1SDarrick J. Wong
6090b0fa1d1SDarrick J. Wong /* Copy the in-core quota fields into the on-disk buffer. */
6100b0fa1d1SDarrick J. Wong void
xfs_dquot_to_disk(struct xfs_disk_dquot * ddqp,struct xfs_dquot * dqp)6110b0fa1d1SDarrick J. Wong xfs_dquot_to_disk(
6120b0fa1d1SDarrick J. Wong struct xfs_disk_dquot *ddqp,
6130b0fa1d1SDarrick J. Wong struct xfs_dquot *dqp)
6140b0fa1d1SDarrick J. Wong {
61551dbb1beSDarrick J. Wong ddqp->d_magic = cpu_to_be16(XFS_DQUOT_MAGIC);
61651dbb1beSDarrick J. Wong ddqp->d_version = XFS_DQUOT_VERSION;
617d8c1af0dSDarrick J. Wong ddqp->d_type = dqp->q_type;
61851dbb1beSDarrick J. Wong ddqp->d_id = cpu_to_be32(dqp->q_id);
61951dbb1beSDarrick J. Wong ddqp->d_pad0 = 0;
62051dbb1beSDarrick J. Wong ddqp->d_pad = 0;
62151dbb1beSDarrick J. Wong
622d3537cf9SDarrick J. Wong ddqp->d_blk_hardlimit = cpu_to_be64(dqp->q_blk.hardlimit);
623d3537cf9SDarrick J. Wong ddqp->d_blk_softlimit = cpu_to_be64(dqp->q_blk.softlimit);
624d3537cf9SDarrick J. Wong ddqp->d_ino_hardlimit = cpu_to_be64(dqp->q_ino.hardlimit);
625d3537cf9SDarrick J. Wong ddqp->d_ino_softlimit = cpu_to_be64(dqp->q_ino.softlimit);
626d3537cf9SDarrick J. Wong ddqp->d_rtb_hardlimit = cpu_to_be64(dqp->q_rtb.hardlimit);
627d3537cf9SDarrick J. Wong ddqp->d_rtb_softlimit = cpu_to_be64(dqp->q_rtb.softlimit);
628be37d40cSDarrick J. Wong
629be37d40cSDarrick J. Wong ddqp->d_bcount = cpu_to_be64(dqp->q_blk.count);
630be37d40cSDarrick J. Wong ddqp->d_icount = cpu_to_be64(dqp->q_ino.count);
631be37d40cSDarrick J. Wong ddqp->d_rtbcount = cpu_to_be64(dqp->q_rtb.count);
632c8c45fb2SDarrick J. Wong
6332e06df55SCatherine Hoang ddqp->d_bwarns = 0;
6342e06df55SCatherine Hoang ddqp->d_iwarns = 0;
6352e06df55SCatherine Hoang ddqp->d_rtbwarns = 0;
63619dce7eaSDarrick J. Wong
6379f99c8feSDarrick J. Wong ddqp->d_btimer = xfs_dquot_to_disk_ts(dqp, dqp->q_blk.timer);
6389f99c8feSDarrick J. Wong ddqp->d_itimer = xfs_dquot_to_disk_ts(dqp, dqp->q_ino.timer);
6399f99c8feSDarrick J. Wong ddqp->d_rtbtimer = xfs_dquot_to_disk_ts(dqp, dqp->q_rtb.timer);
6400b0fa1d1SDarrick J. Wong }
6410b0fa1d1SDarrick J. Wong
642617cd5c1SDarrick J. Wong /*
643617cd5c1SDarrick J. Wong * Read in the ondisk dquot using dqtobp() then copy it to an incore version,
64430ab2dcfSDarrick J. Wong * and release the buffer immediately. If @can_alloc is true, fill any
64530ab2dcfSDarrick J. Wong * holes in the on-disk metadata.
646617cd5c1SDarrick J. Wong */
647114e73ccSDarrick J. Wong static int
xfs_qm_dqread(struct xfs_mount * mp,xfs_dqid_t id,xfs_dqtype_t type,bool can_alloc,struct xfs_dquot ** dqpp)648617cd5c1SDarrick J. Wong xfs_qm_dqread(
649617cd5c1SDarrick J. Wong struct xfs_mount *mp,
650617cd5c1SDarrick J. Wong xfs_dqid_t id,
6511a7ed271SDarrick J. Wong xfs_dqtype_t type,
65230ab2dcfSDarrick J. Wong bool can_alloc,
653d63192c8SDarrick J. Wong struct xfs_dquot **dqpp)
654617cd5c1SDarrick J. Wong {
655617cd5c1SDarrick J. Wong struct xfs_dquot *dqp;
656617cd5c1SDarrick J. Wong struct xfs_buf *bp;
657617cd5c1SDarrick J. Wong int error;
658617cd5c1SDarrick J. Wong
659617cd5c1SDarrick J. Wong dqp = xfs_dquot_alloc(mp, id, type);
660c59d87c4SChristoph Hellwig trace_xfs_dqread(dqp);
661c59d87c4SChristoph Hellwig
662d63192c8SDarrick J. Wong /* Try to read the buffer, allocating if necessary. */
663d63192c8SDarrick J. Wong error = xfs_dquot_disk_read(mp, dqp, &bp);
66430ab2dcfSDarrick J. Wong if (error == -ENOENT && can_alloc)
665eae44cb3SDarrick J. Wong error = xfs_dquot_disk_alloc(dqp, &bp);
66697e7ade5SChristoph Hellwig if (error)
667d63192c8SDarrick J. Wong goto err;
66897e7ade5SChristoph Hellwig
669c59d87c4SChristoph Hellwig /*
670d63192c8SDarrick J. Wong * At this point we should have a clean locked buffer. Copy the data
671d63192c8SDarrick J. Wong * to the incore dquot and release the buffer since the incore dquot
672d63192c8SDarrick J. Wong * has its own locking protocol so we needn't tie up the buffer any
673d63192c8SDarrick J. Wong * further.
674c59d87c4SChristoph Hellwig */
675c59d87c4SChristoph Hellwig ASSERT(xfs_buf_islocked(bp));
676afeda600SDarrick J. Wong error = xfs_dquot_from_disk(dqp, bp);
677d63192c8SDarrick J. Wong xfs_buf_relse(bp);
678afeda600SDarrick J. Wong if (error)
679afeda600SDarrick J. Wong goto err;
680afeda600SDarrick J. Wong
681d63192c8SDarrick J. Wong *dqpp = dqp;
68297e7ade5SChristoph Hellwig return error;
683c59d87c4SChristoph Hellwig
684d63192c8SDarrick J. Wong err:
685d63192c8SDarrick J. Wong trace_xfs_dqread_fail(dqp);
686c59d87c4SChristoph Hellwig xfs_qm_dqdestroy(dqp);
687d63192c8SDarrick J. Wong *dqpp = NULL;
68897e7ade5SChristoph Hellwig return error;
689c59d87c4SChristoph Hellwig }
690c59d87c4SChristoph Hellwig
691c59d87c4SChristoph Hellwig /*
692296c24e2SEric Sandeen * Advance to the next id in the current chunk, or if at the
693296c24e2SEric Sandeen * end of the chunk, skip ahead to first id in next allocated chunk
694296c24e2SEric Sandeen * using the SEEK_DATA interface.
695296c24e2SEric Sandeen */
6966e3e6d55SEryu Guan static int
xfs_dq_get_next_id(struct xfs_mount * mp,xfs_dqtype_t type,xfs_dqid_t * id)697296c24e2SEric Sandeen xfs_dq_get_next_id(
698bda250dbSChristoph Hellwig struct xfs_mount *mp,
6991a7ed271SDarrick J. Wong xfs_dqtype_t type,
700bda250dbSChristoph Hellwig xfs_dqid_t *id)
701296c24e2SEric Sandeen {
702bda250dbSChristoph Hellwig struct xfs_inode *quotip = xfs_quota_inode(mp, type);
703bda250dbSChristoph Hellwig xfs_dqid_t next_id = *id + 1; /* simple advance */
704bda250dbSChristoph Hellwig uint lock_flags;
705bda250dbSChristoph Hellwig struct xfs_bmbt_irec got;
706b2b1712aSChristoph Hellwig struct xfs_iext_cursor cur;
707296c24e2SEric Sandeen xfs_fsblock_t start;
708296c24e2SEric Sandeen int error = 0;
709296c24e2SEric Sandeen
710657bdfb7SEric Sandeen /* If we'd wrap past the max ID, stop */
711657bdfb7SEric Sandeen if (next_id < *id)
712657bdfb7SEric Sandeen return -ENOENT;
713657bdfb7SEric Sandeen
714296c24e2SEric Sandeen /* If new ID is within the current chunk, advancing it sufficed */
715296c24e2SEric Sandeen if (next_id % mp->m_quotainfo->qi_dqperchunk) {
716296c24e2SEric Sandeen *id = next_id;
717296c24e2SEric Sandeen return 0;
718296c24e2SEric Sandeen }
719296c24e2SEric Sandeen
720296c24e2SEric Sandeen /* Nope, next_id is now past the current chunk, so find the next one */
721296c24e2SEric Sandeen start = (xfs_fsblock_t)next_id / mp->m_quotainfo->qi_dqperchunk;
722296c24e2SEric Sandeen
723bda250dbSChristoph Hellwig lock_flags = xfs_ilock_data_map_shared(quotip);
724bda250dbSChristoph Hellwig error = xfs_iread_extents(NULL, quotip, XFS_DATA_FORK);
725296c24e2SEric Sandeen if (error)
726bda250dbSChristoph Hellwig return error;
727296c24e2SEric Sandeen
728b2b1712aSChristoph Hellwig if (xfs_iext_lookup_extent(quotip, "ip->i_df, start, &cur, &got)) {
7292192b0baSBrian Foster /* contiguous chunk, bump startoff for the id calculation */
7302192b0baSBrian Foster if (got.br_startoff < start)
7312192b0baSBrian Foster got.br_startoff = start;
732bda250dbSChristoph Hellwig *id = got.br_startoff * mp->m_quotainfo->qi_dqperchunk;
7332192b0baSBrian Foster } else {
734bda250dbSChristoph Hellwig error = -ENOENT;
7352192b0baSBrian Foster }
7362192b0baSBrian Foster
737bda250dbSChristoph Hellwig xfs_iunlock(quotip, lock_flags);
738bda250dbSChristoph Hellwig
739bda250dbSChristoph Hellwig return error;
740296c24e2SEric Sandeen }
741296c24e2SEric Sandeen
742296c24e2SEric Sandeen /*
743cc2047c4SDarrick J. Wong * Look up the dquot in the in-core cache. If found, the dquot is returned
744cc2047c4SDarrick J. Wong * locked and ready to go.
745cc2047c4SDarrick J. Wong */
746cc2047c4SDarrick J. Wong static struct xfs_dquot *
xfs_qm_dqget_cache_lookup(struct xfs_mount * mp,struct xfs_quotainfo * qi,struct radix_tree_root * tree,xfs_dqid_t id)747cc2047c4SDarrick J. Wong xfs_qm_dqget_cache_lookup(
748cc2047c4SDarrick J. Wong struct xfs_mount *mp,
749cc2047c4SDarrick J. Wong struct xfs_quotainfo *qi,
750cc2047c4SDarrick J. Wong struct radix_tree_root *tree,
751cc2047c4SDarrick J. Wong xfs_dqid_t id)
752cc2047c4SDarrick J. Wong {
753cc2047c4SDarrick J. Wong struct xfs_dquot *dqp;
754cc2047c4SDarrick J. Wong
755cc2047c4SDarrick J. Wong restart:
756cc2047c4SDarrick J. Wong mutex_lock(&qi->qi_tree_lock);
757cc2047c4SDarrick J. Wong dqp = radix_tree_lookup(tree, id);
758cc2047c4SDarrick J. Wong if (!dqp) {
759cc2047c4SDarrick J. Wong mutex_unlock(&qi->qi_tree_lock);
760cc2047c4SDarrick J. Wong XFS_STATS_INC(mp, xs_qm_dqcachemisses);
761cc2047c4SDarrick J. Wong return NULL;
762cc2047c4SDarrick J. Wong }
763cc2047c4SDarrick J. Wong
764cc2047c4SDarrick J. Wong xfs_dqlock(dqp);
765985a78fdSDarrick J. Wong if (dqp->q_flags & XFS_DQFLAG_FREEING) {
766cc2047c4SDarrick J. Wong xfs_dqunlock(dqp);
767cc2047c4SDarrick J. Wong mutex_unlock(&qi->qi_tree_lock);
768cc2047c4SDarrick J. Wong trace_xfs_dqget_freeing(dqp);
769cc2047c4SDarrick J. Wong delay(1);
770cc2047c4SDarrick J. Wong goto restart;
771cc2047c4SDarrick J. Wong }
772cc2047c4SDarrick J. Wong
773cc2047c4SDarrick J. Wong dqp->q_nrefs++;
774cc2047c4SDarrick J. Wong mutex_unlock(&qi->qi_tree_lock);
775cc2047c4SDarrick J. Wong
776cc2047c4SDarrick J. Wong trace_xfs_dqget_hit(dqp);
777cc2047c4SDarrick J. Wong XFS_STATS_INC(mp, xs_qm_dqcachehits);
778cc2047c4SDarrick J. Wong return dqp;
779cc2047c4SDarrick J. Wong }
780cc2047c4SDarrick J. Wong
781cc2047c4SDarrick J. Wong /*
782cc2047c4SDarrick J. Wong * Try to insert a new dquot into the in-core cache. If an error occurs the
783cc2047c4SDarrick J. Wong * caller should throw away the dquot and start over. Otherwise, the dquot
784cc2047c4SDarrick J. Wong * is returned locked (and held by the cache) as if there had been a cache
785cc2047c4SDarrick J. Wong * hit.
786cc2047c4SDarrick J. Wong */
787cc2047c4SDarrick J. Wong static int
xfs_qm_dqget_cache_insert(struct xfs_mount * mp,struct xfs_quotainfo * qi,struct radix_tree_root * tree,xfs_dqid_t id,struct xfs_dquot * dqp)788cc2047c4SDarrick J. Wong xfs_qm_dqget_cache_insert(
789cc2047c4SDarrick J. Wong struct xfs_mount *mp,
790cc2047c4SDarrick J. Wong struct xfs_quotainfo *qi,
791cc2047c4SDarrick J. Wong struct radix_tree_root *tree,
792cc2047c4SDarrick J. Wong xfs_dqid_t id,
793cc2047c4SDarrick J. Wong struct xfs_dquot *dqp)
794cc2047c4SDarrick J. Wong {
795cc2047c4SDarrick J. Wong int error;
796cc2047c4SDarrick J. Wong
797cc2047c4SDarrick J. Wong mutex_lock(&qi->qi_tree_lock);
798cc2047c4SDarrick J. Wong error = radix_tree_insert(tree, id, dqp);
799cc2047c4SDarrick J. Wong if (unlikely(error)) {
800cc2047c4SDarrick J. Wong /* Duplicate found! Caller must try again. */
801cc2047c4SDarrick J. Wong mutex_unlock(&qi->qi_tree_lock);
802cc2047c4SDarrick J. Wong trace_xfs_dqget_dup(dqp);
803cc2047c4SDarrick J. Wong return error;
804cc2047c4SDarrick J. Wong }
805cc2047c4SDarrick J. Wong
806cc2047c4SDarrick J. Wong /* Return a locked dquot to the caller, with a reference taken. */
807cc2047c4SDarrick J. Wong xfs_dqlock(dqp);
808cc2047c4SDarrick J. Wong dqp->q_nrefs = 1;
809cc2047c4SDarrick J. Wong
810cc2047c4SDarrick J. Wong qi->qi_dquots++;
811cc2047c4SDarrick J. Wong mutex_unlock(&qi->qi_tree_lock);
812cc2047c4SDarrick J. Wong
813cc2047c4SDarrick J. Wong return 0;
814cc2047c4SDarrick J. Wong }
815cc2047c4SDarrick J. Wong
816d7103eebSDarrick J. Wong /* Check our input parameters. */
817d7103eebSDarrick J. Wong static int
xfs_qm_dqget_checks(struct xfs_mount * mp,xfs_dqtype_t type)818d7103eebSDarrick J. Wong xfs_qm_dqget_checks(
819d7103eebSDarrick J. Wong struct xfs_mount *mp,
8201a7ed271SDarrick J. Wong xfs_dqtype_t type)
821d7103eebSDarrick J. Wong {
822d7103eebSDarrick J. Wong switch (type) {
8238cd4901dSDarrick J. Wong case XFS_DQTYPE_USER:
824d7103eebSDarrick J. Wong if (!XFS_IS_UQUOTA_ON(mp))
825d7103eebSDarrick J. Wong return -ESRCH;
826d7103eebSDarrick J. Wong return 0;
8278cd4901dSDarrick J. Wong case XFS_DQTYPE_GROUP:
828d7103eebSDarrick J. Wong if (!XFS_IS_GQUOTA_ON(mp))
829d7103eebSDarrick J. Wong return -ESRCH;
830d7103eebSDarrick J. Wong return 0;
8318cd4901dSDarrick J. Wong case XFS_DQTYPE_PROJ:
832d7103eebSDarrick J. Wong if (!XFS_IS_PQUOTA_ON(mp))
833d7103eebSDarrick J. Wong return -ESRCH;
834d7103eebSDarrick J. Wong return 0;
835d7103eebSDarrick J. Wong default:
836d7103eebSDarrick J. Wong WARN_ON_ONCE(0);
837d7103eebSDarrick J. Wong return -EINVAL;
838d7103eebSDarrick J. Wong }
839d7103eebSDarrick J. Wong }
840d7103eebSDarrick J. Wong
841cc2047c4SDarrick J. Wong /*
842a647d109SKaixu Xia * Given the file system, id, and type (UDQUOT/GDQUOT/PDQUOT), return a
843a647d109SKaixu Xia * locked dquot, doing an allocation (if requested) as needed.
844c59d87c4SChristoph Hellwig */
845c59d87c4SChristoph Hellwig int
xfs_qm_dqget(struct xfs_mount * mp,xfs_dqid_t id,xfs_dqtype_t type,bool can_alloc,struct xfs_dquot ** O_dqpp)846c59d87c4SChristoph Hellwig xfs_qm_dqget(
8474882c19dSDarrick J. Wong struct xfs_mount *mp,
8484882c19dSDarrick J. Wong xfs_dqid_t id,
8491a7ed271SDarrick J. Wong xfs_dqtype_t type,
85030ab2dcfSDarrick J. Wong bool can_alloc,
8514882c19dSDarrick J. Wong struct xfs_dquot **O_dqpp)
852c59d87c4SChristoph Hellwig {
8539f920f11SChristoph Hellwig struct xfs_quotainfo *qi = mp->m_quotainfo;
854329e0875SChandra Seetharaman struct radix_tree_root *tree = xfs_dquot_tree(qi, type);
8559f920f11SChristoph Hellwig struct xfs_dquot *dqp;
856c59d87c4SChristoph Hellwig int error;
857c59d87c4SChristoph Hellwig
858d7103eebSDarrick J. Wong error = xfs_qm_dqget_checks(mp, type);
859d7103eebSDarrick J. Wong if (error)
860d7103eebSDarrick J. Wong return error;
861c59d87c4SChristoph Hellwig
8624882c19dSDarrick J. Wong restart:
8634882c19dSDarrick J. Wong dqp = xfs_qm_dqget_cache_lookup(mp, qi, tree, id);
8644882c19dSDarrick J. Wong if (dqp) {
8654882c19dSDarrick J. Wong *O_dqpp = dqp;
8664882c19dSDarrick J. Wong return 0;
8674882c19dSDarrick J. Wong }
8684882c19dSDarrick J. Wong
86930ab2dcfSDarrick J. Wong error = xfs_qm_dqread(mp, id, type, can_alloc, &dqp);
8704882c19dSDarrick J. Wong if (error)
8714882c19dSDarrick J. Wong return error;
8724882c19dSDarrick J. Wong
8734882c19dSDarrick J. Wong error = xfs_qm_dqget_cache_insert(mp, qi, tree, id, dqp);
8744882c19dSDarrick J. Wong if (error) {
8754882c19dSDarrick J. Wong /*
8764882c19dSDarrick J. Wong * Duplicate found. Just throw away the new dquot and start
8774882c19dSDarrick J. Wong * over.
8784882c19dSDarrick J. Wong */
8794882c19dSDarrick J. Wong xfs_qm_dqdestroy(dqp);
8804882c19dSDarrick J. Wong XFS_STATS_INC(mp, xs_qm_dquot_dups);
8814882c19dSDarrick J. Wong goto restart;
8824882c19dSDarrick J. Wong }
8834882c19dSDarrick J. Wong
8844882c19dSDarrick J. Wong trace_xfs_dqget_miss(dqp);
8854882c19dSDarrick J. Wong *O_dqpp = dqp;
8864882c19dSDarrick J. Wong return 0;
8874882c19dSDarrick J. Wong }
8884882c19dSDarrick J. Wong
889114e73ccSDarrick J. Wong /*
890114e73ccSDarrick J. Wong * Given a dquot id and type, read and initialize a dquot from the on-disk
891114e73ccSDarrick J. Wong * metadata. This function is only for use during quota initialization so
892114e73ccSDarrick J. Wong * it ignores the dquot cache assuming that the dquot shrinker isn't set up.
893114e73ccSDarrick J. Wong * The caller is responsible for _qm_dqdestroy'ing the returned dquot.
894114e73ccSDarrick J. Wong */
895114e73ccSDarrick J. Wong int
xfs_qm_dqget_uncached(struct xfs_mount * mp,xfs_dqid_t id,xfs_dqtype_t type,struct xfs_dquot ** dqpp)896114e73ccSDarrick J. Wong xfs_qm_dqget_uncached(
897114e73ccSDarrick J. Wong struct xfs_mount *mp,
898114e73ccSDarrick J. Wong xfs_dqid_t id,
8991a7ed271SDarrick J. Wong xfs_dqtype_t type,
900114e73ccSDarrick J. Wong struct xfs_dquot **dqpp)
901114e73ccSDarrick J. Wong {
902114e73ccSDarrick J. Wong int error;
903114e73ccSDarrick J. Wong
904114e73ccSDarrick J. Wong error = xfs_qm_dqget_checks(mp, type);
905114e73ccSDarrick J. Wong if (error)
906114e73ccSDarrick J. Wong return error;
907114e73ccSDarrick J. Wong
908114e73ccSDarrick J. Wong return xfs_qm_dqread(mp, id, type, 0, dqpp);
909114e73ccSDarrick J. Wong }
910114e73ccSDarrick J. Wong
9114882c19dSDarrick J. Wong /* Return the quota id for a given inode and type. */
9124882c19dSDarrick J. Wong xfs_dqid_t
xfs_qm_id_for_quotatype(struct xfs_inode * ip,xfs_dqtype_t type)9134882c19dSDarrick J. Wong xfs_qm_id_for_quotatype(
9144882c19dSDarrick J. Wong struct xfs_inode *ip,
9151a7ed271SDarrick J. Wong xfs_dqtype_t type)
9164882c19dSDarrick J. Wong {
9174882c19dSDarrick J. Wong switch (type) {
9188cd4901dSDarrick J. Wong case XFS_DQTYPE_USER:
919ba8adad5SChristoph Hellwig return i_uid_read(VFS_I(ip));
9208cd4901dSDarrick J. Wong case XFS_DQTYPE_GROUP:
921ba8adad5SChristoph Hellwig return i_gid_read(VFS_I(ip));
9228cd4901dSDarrick J. Wong case XFS_DQTYPE_PROJ:
923ceaf603cSChristoph Hellwig return ip->i_projid;
9244882c19dSDarrick J. Wong }
9254882c19dSDarrick J. Wong ASSERT(0);
9264882c19dSDarrick J. Wong return 0;
9274882c19dSDarrick J. Wong }
9284882c19dSDarrick J. Wong
9294882c19dSDarrick J. Wong /*
9304882c19dSDarrick J. Wong * Return the dquot for a given inode and type. If @can_alloc is true, then
9314882c19dSDarrick J. Wong * allocate blocks if needed. The inode's ILOCK must be held and it must not
9324882c19dSDarrick J. Wong * have already had an inode attached.
9334882c19dSDarrick J. Wong */
9344882c19dSDarrick J. Wong int
xfs_qm_dqget_inode(struct xfs_inode * ip,xfs_dqtype_t type,bool can_alloc,struct xfs_dquot ** O_dqpp)9354882c19dSDarrick J. Wong xfs_qm_dqget_inode(
9364882c19dSDarrick J. Wong struct xfs_inode *ip,
9371a7ed271SDarrick J. Wong xfs_dqtype_t type,
9384882c19dSDarrick J. Wong bool can_alloc,
9394882c19dSDarrick J. Wong struct xfs_dquot **O_dqpp)
9404882c19dSDarrick J. Wong {
9414882c19dSDarrick J. Wong struct xfs_mount *mp = ip->i_mount;
9424882c19dSDarrick J. Wong struct xfs_quotainfo *qi = mp->m_quotainfo;
9434882c19dSDarrick J. Wong struct radix_tree_root *tree = xfs_dquot_tree(qi, type);
9444882c19dSDarrick J. Wong struct xfs_dquot *dqp;
9454882c19dSDarrick J. Wong xfs_dqid_t id;
9464882c19dSDarrick J. Wong int error;
9474882c19dSDarrick J. Wong
9484882c19dSDarrick J. Wong error = xfs_qm_dqget_checks(mp, type);
9494882c19dSDarrick J. Wong if (error)
9504882c19dSDarrick J. Wong return error;
9514882c19dSDarrick J. Wong
952c59d87c4SChristoph Hellwig ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
95336731410SChandra Seetharaman ASSERT(xfs_inode_dquot(ip, type) == NULL);
9544882c19dSDarrick J. Wong
9554882c19dSDarrick J. Wong id = xfs_qm_id_for_quotatype(ip, type);
95692678554SChristoph Hellwig
95792678554SChristoph Hellwig restart:
958cc2047c4SDarrick J. Wong dqp = xfs_qm_dqget_cache_lookup(mp, qi, tree, id);
9599f920f11SChristoph Hellwig if (dqp) {
9609f920f11SChristoph Hellwig *O_dqpp = dqp;
9619f920f11SChristoph Hellwig return 0;
9629f920f11SChristoph Hellwig }
9639f920f11SChristoph Hellwig
964c59d87c4SChristoph Hellwig /*
965c59d87c4SChristoph Hellwig * Dquot cache miss. We don't want to keep the inode lock across
966c59d87c4SChristoph Hellwig * a (potential) disk read. Also we don't want to deal with the lock
967c59d87c4SChristoph Hellwig * ordering between quotainode and this inode. OTOH, dropping the inode
968c59d87c4SChristoph Hellwig * lock here means dealing with a chown that can happen before
969c59d87c4SChristoph Hellwig * we re-acquire the lock.
970c59d87c4SChristoph Hellwig */
971c59d87c4SChristoph Hellwig xfs_iunlock(ip, XFS_ILOCK_EXCL);
97230ab2dcfSDarrick J. Wong error = xfs_qm_dqread(mp, id, type, can_alloc, &dqp);
973c59d87c4SChristoph Hellwig xfs_ilock(ip, XFS_ILOCK_EXCL);
9747ae44407SChristoph Hellwig if (error)
9757ae44407SChristoph Hellwig return error;
976c59d87c4SChristoph Hellwig
977c59d87c4SChristoph Hellwig /*
9784882c19dSDarrick J. Wong * A dquot could be attached to this inode by now, since we had
9794882c19dSDarrick J. Wong * dropped the ilock.
980c59d87c4SChristoph Hellwig */
98136731410SChandra Seetharaman if (xfs_this_quota_on(mp, type)) {
9829f920f11SChristoph Hellwig struct xfs_dquot *dqp1;
9839f920f11SChristoph Hellwig
98436731410SChandra Seetharaman dqp1 = xfs_inode_dquot(ip, type);
98536731410SChandra Seetharaman if (dqp1) {
986c59d87c4SChristoph Hellwig xfs_qm_dqdestroy(dqp);
98736731410SChandra Seetharaman dqp = dqp1;
988c59d87c4SChristoph Hellwig xfs_dqlock(dqp);
989c59d87c4SChristoph Hellwig goto dqret;
990c59d87c4SChristoph Hellwig }
991c59d87c4SChristoph Hellwig } else {
992c59d87c4SChristoph Hellwig /* inode stays locked on return */
993c59d87c4SChristoph Hellwig xfs_qm_dqdestroy(dqp);
9942451337dSDave Chinner return -ESRCH;
995c59d87c4SChristoph Hellwig }
996c59d87c4SChristoph Hellwig
997cc2047c4SDarrick J. Wong error = xfs_qm_dqget_cache_insert(mp, qi, tree, id, dqp);
998cc2047c4SDarrick J. Wong if (error) {
999c59d87c4SChristoph Hellwig /*
10009f920f11SChristoph Hellwig * Duplicate found. Just throw away the new dquot and start
10019f920f11SChristoph Hellwig * over.
1002c59d87c4SChristoph Hellwig */
1003c59d87c4SChristoph Hellwig xfs_qm_dqdestroy(dqp);
1004ff6d6af2SBill O'Donnell XFS_STATS_INC(mp, xs_qm_dquot_dups);
100592678554SChristoph Hellwig goto restart;
1006c59d87c4SChristoph Hellwig }
1007c59d87c4SChristoph Hellwig
1008c59d87c4SChristoph Hellwig dqret:
10094882c19dSDarrick J. Wong ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
1010c59d87c4SChristoph Hellwig trace_xfs_dqget_miss(dqp);
1011c59d87c4SChristoph Hellwig *O_dqpp = dqp;
1012d99831ffSEric Sandeen return 0;
1013c59d87c4SChristoph Hellwig }
1014c59d87c4SChristoph Hellwig
1015f8739c3cSChristoph Hellwig /*
10162e330e76SDarrick J. Wong * Starting at @id and progressing upwards, look for an initialized incore
10172e330e76SDarrick J. Wong * dquot, lock it, and return it.
10182e330e76SDarrick J. Wong */
10192e330e76SDarrick J. Wong int
xfs_qm_dqget_next(struct xfs_mount * mp,xfs_dqid_t id,xfs_dqtype_t type,struct xfs_dquot ** dqpp)10202e330e76SDarrick J. Wong xfs_qm_dqget_next(
10212e330e76SDarrick J. Wong struct xfs_mount *mp,
10222e330e76SDarrick J. Wong xfs_dqid_t id,
10231a7ed271SDarrick J. Wong xfs_dqtype_t type,
10242e330e76SDarrick J. Wong struct xfs_dquot **dqpp)
10252e330e76SDarrick J. Wong {
10262e330e76SDarrick J. Wong struct xfs_dquot *dqp;
10272e330e76SDarrick J. Wong int error = 0;
10282e330e76SDarrick J. Wong
10292e330e76SDarrick J. Wong *dqpp = NULL;
10302e330e76SDarrick J. Wong for (; !error; error = xfs_dq_get_next_id(mp, type, &id)) {
103130ab2dcfSDarrick J. Wong error = xfs_qm_dqget(mp, id, type, false, &dqp);
10322e330e76SDarrick J. Wong if (error == -ENOENT)
10332e330e76SDarrick J. Wong continue;
10342e330e76SDarrick J. Wong else if (error != 0)
10352e330e76SDarrick J. Wong break;
10362e330e76SDarrick J. Wong
10372e330e76SDarrick J. Wong if (!XFS_IS_DQUOT_UNINITIALIZED(dqp)) {
10382e330e76SDarrick J. Wong *dqpp = dqp;
10392e330e76SDarrick J. Wong return 0;
10402e330e76SDarrick J. Wong }
10412e330e76SDarrick J. Wong
10422e330e76SDarrick J. Wong xfs_qm_dqput(dqp);
10432e330e76SDarrick J. Wong }
10442e330e76SDarrick J. Wong
10452e330e76SDarrick J. Wong return error;
10462e330e76SDarrick J. Wong }
10472e330e76SDarrick J. Wong
10482e330e76SDarrick J. Wong /*
1049f8739c3cSChristoph Hellwig * Release a reference to the dquot (decrement ref-count) and unlock it.
1050f8739c3cSChristoph Hellwig *
1051f8739c3cSChristoph Hellwig * If there is a group quota attached to this dquot, carefully release that
1052f8739c3cSChristoph Hellwig * too without tripping over deadlocks'n'stuff.
1053f8739c3cSChristoph Hellwig */
1054f8739c3cSChristoph Hellwig void
xfs_qm_dqput(struct xfs_dquot * dqp)1055f8739c3cSChristoph Hellwig xfs_qm_dqput(
1056f8739c3cSChristoph Hellwig struct xfs_dquot *dqp)
1057f8739c3cSChristoph Hellwig {
1058f8739c3cSChristoph Hellwig ASSERT(dqp->q_nrefs > 0);
1059f8739c3cSChristoph Hellwig ASSERT(XFS_DQ_IS_LOCKED(dqp));
1060f8739c3cSChristoph Hellwig
1061f8739c3cSChristoph Hellwig trace_xfs_dqput(dqp);
1062f8739c3cSChristoph Hellwig
10633c353375SDave Chinner if (--dqp->q_nrefs == 0) {
10643c353375SDave Chinner struct xfs_quotainfo *qi = dqp->q_mount->m_quotainfo;
10653c353375SDave Chinner trace_xfs_dqput_free(dqp);
10663c353375SDave Chinner
10673c353375SDave Chinner if (list_lru_add(&qi->qi_lru, &dqp->q_lru))
1068ff6d6af2SBill O'Donnell XFS_STATS_INC(dqp->q_mount, xs_qm_dquot_unused);
10693c353375SDave Chinner }
1070f8739c3cSChristoph Hellwig xfs_dqunlock(dqp);
1071c59d87c4SChristoph Hellwig }
1072c59d87c4SChristoph Hellwig
1073c59d87c4SChristoph Hellwig /*
1074c59d87c4SChristoph Hellwig * Release a dquot. Flush it if dirty, then dqput() it.
1075c59d87c4SChristoph Hellwig * dquot must not be locked.
1076c59d87c4SChristoph Hellwig */
1077c59d87c4SChristoph Hellwig void
xfs_qm_dqrele(struct xfs_dquot * dqp)1078c59d87c4SChristoph Hellwig xfs_qm_dqrele(
1079aefe69a4SPavel Reichl struct xfs_dquot *dqp)
1080c59d87c4SChristoph Hellwig {
1081c59d87c4SChristoph Hellwig if (!dqp)
1082c59d87c4SChristoph Hellwig return;
1083c59d87c4SChristoph Hellwig
1084c59d87c4SChristoph Hellwig trace_xfs_dqrele(dqp);
1085c59d87c4SChristoph Hellwig
1086c59d87c4SChristoph Hellwig xfs_dqlock(dqp);
1087c59d87c4SChristoph Hellwig /*
1088c59d87c4SChristoph Hellwig * We don't care to flush it if the dquot is dirty here.
1089c59d87c4SChristoph Hellwig * That will create stutters that we want to avoid.
1090c59d87c4SChristoph Hellwig * Instead we do a delayed write when we try to reclaim
1091c59d87c4SChristoph Hellwig * a dirty dquot. Also xfs_sync will take part of the burden...
1092c59d87c4SChristoph Hellwig */
1093c59d87c4SChristoph Hellwig xfs_qm_dqput(dqp);
1094c59d87c4SChristoph Hellwig }
1095c59d87c4SChristoph Hellwig
1096c59d87c4SChristoph Hellwig /*
1097c59d87c4SChristoph Hellwig * This is the dquot flushing I/O completion routine. It is called
1098c59d87c4SChristoph Hellwig * from interrupt level when the buffer containing the dquot is
1099c59d87c4SChristoph Hellwig * flushed to disk. It is responsible for removing the dquot logitem
1100c59d87c4SChristoph Hellwig * from the AIL if it has not been re-logged, and unlocking the dquot's
1101c59d87c4SChristoph Hellwig * flush lock. This behavior is very similar to that of inodes..
1102c59d87c4SChristoph Hellwig */
11036f5de180SDave Chinner static void
xfs_qm_dqflush_done(struct xfs_log_item * lip)1104c59d87c4SChristoph Hellwig xfs_qm_dqflush_done(
1105c59d87c4SChristoph Hellwig struct xfs_log_item *lip)
1106c59d87c4SChristoph Hellwig {
1107fd8b81dbSPavel Reichl struct xfs_dq_logitem *qip = (struct xfs_dq_logitem *)lip;
1108aefe69a4SPavel Reichl struct xfs_dquot *dqp = qip->qli_dquot;
1109c59d87c4SChristoph Hellwig struct xfs_ail *ailp = lip->li_ailp;
1110849274c1SBrian Foster xfs_lsn_t tail_lsn;
1111c59d87c4SChristoph Hellwig
1112c59d87c4SChristoph Hellwig /*
1113c59d87c4SChristoph Hellwig * We only want to pull the item from the AIL if its
1114c59d87c4SChristoph Hellwig * location in the log has not changed since we started the flush.
1115c59d87c4SChristoph Hellwig * Thus, we only bother if the dquot's lsn has
1116c59d87c4SChristoph Hellwig * not changed. First we check the lsn outside the lock
1117c59d87c4SChristoph Hellwig * since it's cheaper, and then we recheck while
1118c59d87c4SChristoph Hellwig * holding the lock before removing the dquot from the AIL.
1119c59d87c4SChristoph Hellwig */
112022525c17SDave Chinner if (test_bit(XFS_LI_IN_AIL, &lip->li_flags) &&
1121373b0589SCarlos Maiolino ((lip->li_lsn == qip->qli_flush_lsn) ||
112222525c17SDave Chinner test_bit(XFS_LI_FAILED, &lip->li_flags))) {
1123c59d87c4SChristoph Hellwig
112457e80956SMatthew Wilcox spin_lock(&ailp->ail_lock);
1125e98084b8SDave Chinner xfs_clear_li_failed(lip);
1126373b0589SCarlos Maiolino if (lip->li_lsn == qip->qli_flush_lsn) {
1127849274c1SBrian Foster /* xfs_ail_update_finish() drops the AIL lock */
1128849274c1SBrian Foster tail_lsn = xfs_ail_delete_one(ailp, lip);
1129849274c1SBrian Foster xfs_ail_update_finish(ailp, tail_lsn);
1130373b0589SCarlos Maiolino } else {
113157e80956SMatthew Wilcox spin_unlock(&ailp->ail_lock);
1132c59d87c4SChristoph Hellwig }
1133373b0589SCarlos Maiolino }
1134c59d87c4SChristoph Hellwig
1135c59d87c4SChristoph Hellwig /*
1136c59d87c4SChristoph Hellwig * Release the dq's flush lock since we're done with it.
1137c59d87c4SChristoph Hellwig */
1138c59d87c4SChristoph Hellwig xfs_dqfunlock(dqp);
1139c59d87c4SChristoph Hellwig }
1140c59d87c4SChristoph Hellwig
11416f5de180SDave Chinner void
xfs_buf_dquot_iodone(struct xfs_buf * bp)1142664ffb8aSChristoph Hellwig xfs_buf_dquot_iodone(
11436f5de180SDave Chinner struct xfs_buf *bp)
11446f5de180SDave Chinner {
11456f5de180SDave Chinner struct xfs_log_item *lip, *n;
11466f5de180SDave Chinner
11476f5de180SDave Chinner list_for_each_entry_safe(lip, n, &bp->b_li_list, li_bio_list) {
11486f5de180SDave Chinner list_del_init(&lip->li_bio_list);
11496f5de180SDave Chinner xfs_qm_dqflush_done(lip);
11506f5de180SDave Chinner }
11516f5de180SDave Chinner }
11526f5de180SDave Chinner
1153664ffb8aSChristoph Hellwig void
xfs_buf_dquot_io_fail(struct xfs_buf * bp)1154664ffb8aSChristoph Hellwig xfs_buf_dquot_io_fail(
1155664ffb8aSChristoph Hellwig struct xfs_buf *bp)
1156664ffb8aSChristoph Hellwig {
1157664ffb8aSChristoph Hellwig struct xfs_log_item *lip;
1158664ffb8aSChristoph Hellwig
1159664ffb8aSChristoph Hellwig spin_lock(&bp->b_mount->m_ail->ail_lock);
1160664ffb8aSChristoph Hellwig list_for_each_entry(lip, &bp->b_li_list, li_bio_list)
1161664ffb8aSChristoph Hellwig xfs_set_li_failed(lip, bp);
1162664ffb8aSChristoph Hellwig spin_unlock(&bp->b_mount->m_ail->ail_lock);
1163664ffb8aSChristoph Hellwig }
1164664ffb8aSChristoph Hellwig
11650b0fa1d1SDarrick J. Wong /* Check incore dquot for errors before we flush. */
11660b0fa1d1SDarrick J. Wong static xfs_failaddr_t
xfs_qm_dqflush_check(struct xfs_dquot * dqp)11670b0fa1d1SDarrick J. Wong xfs_qm_dqflush_check(
11680b0fa1d1SDarrick J. Wong struct xfs_dquot *dqp)
11690b0fa1d1SDarrick J. Wong {
11701a7ed271SDarrick J. Wong xfs_dqtype_t type = xfs_dquot_type(dqp);
11710b0fa1d1SDarrick J. Wong
11728cd4901dSDarrick J. Wong if (type != XFS_DQTYPE_USER &&
11738cd4901dSDarrick J. Wong type != XFS_DQTYPE_GROUP &&
11748cd4901dSDarrick J. Wong type != XFS_DQTYPE_PROJ)
11750b0fa1d1SDarrick J. Wong return __this_address;
11760b0fa1d1SDarrick J. Wong
1177d3537cf9SDarrick J. Wong if (dqp->q_id == 0)
1178d3537cf9SDarrick J. Wong return NULL;
1179d3537cf9SDarrick J. Wong
1180be37d40cSDarrick J. Wong if (dqp->q_blk.softlimit && dqp->q_blk.count > dqp->q_blk.softlimit &&
118119dce7eaSDarrick J. Wong !dqp->q_blk.timer)
1182d3537cf9SDarrick J. Wong return __this_address;
1183d3537cf9SDarrick J. Wong
1184be37d40cSDarrick J. Wong if (dqp->q_ino.softlimit && dqp->q_ino.count > dqp->q_ino.softlimit &&
118519dce7eaSDarrick J. Wong !dqp->q_ino.timer)
1186d3537cf9SDarrick J. Wong return __this_address;
1187d3537cf9SDarrick J. Wong
1188be37d40cSDarrick J. Wong if (dqp->q_rtb.softlimit && dqp->q_rtb.count > dqp->q_rtb.softlimit &&
118919dce7eaSDarrick J. Wong !dqp->q_rtb.timer)
1190d3537cf9SDarrick J. Wong return __this_address;
1191d3537cf9SDarrick J. Wong
11924ea1ff3bSDarrick J. Wong /* bigtime flag should never be set on root dquots */
11934ea1ff3bSDarrick J. Wong if (dqp->q_type & XFS_DQTYPE_BIGTIME) {
119438c26bfdSDave Chinner if (!xfs_has_bigtime(dqp->q_mount))
11954ea1ff3bSDarrick J. Wong return __this_address;
11964ea1ff3bSDarrick J. Wong if (dqp->q_id == 0)
11974ea1ff3bSDarrick J. Wong return __this_address;
11984ea1ff3bSDarrick J. Wong }
11994ea1ff3bSDarrick J. Wong
12000b0fa1d1SDarrick J. Wong return NULL;
12010b0fa1d1SDarrick J. Wong }
12020b0fa1d1SDarrick J. Wong
1203c59d87c4SChristoph Hellwig /*
1204c59d87c4SChristoph Hellwig * Write a modified dquot to disk.
1205c59d87c4SChristoph Hellwig * The dquot must be locked and the flush lock too taken by caller.
1206c59d87c4SChristoph Hellwig * The flush lock will not be unlocked until the dquot reaches the disk,
1207c59d87c4SChristoph Hellwig * but the dquot is free to be unlocked and modified by the caller
1208c59d87c4SChristoph Hellwig * in the interim. Dquot is still locked on return. This behavior is
1209c59d87c4SChristoph Hellwig * identical to that of inodes.
1210c59d87c4SChristoph Hellwig */
1211c59d87c4SChristoph Hellwig int
xfs_qm_dqflush(struct xfs_dquot * dqp,struct xfs_buf ** bpp)1212c59d87c4SChristoph Hellwig xfs_qm_dqflush(
1213fe7257fdSChristoph Hellwig struct xfs_dquot *dqp,
1214fe7257fdSChristoph Hellwig struct xfs_buf **bpp)
1215c59d87c4SChristoph Hellwig {
1216c59d87c4SChristoph Hellwig struct xfs_mount *mp = dqp->q_mount;
1217b707fffdSBrian Foster struct xfs_log_item *lip = &dqp->q_logitem.qli_item;
1218c59d87c4SChristoph Hellwig struct xfs_buf *bp;
121951dbb1beSDarrick J. Wong struct xfs_dqblk *dqblk;
1220eebf3cabSDarrick J. Wong xfs_failaddr_t fa;
1221c59d87c4SChristoph Hellwig int error;
1222c59d87c4SChristoph Hellwig
1223c59d87c4SChristoph Hellwig ASSERT(XFS_DQ_IS_LOCKED(dqp));
1224c59d87c4SChristoph Hellwig ASSERT(!completion_done(&dqp->q_flush));
1225c59d87c4SChristoph Hellwig
1226c59d87c4SChristoph Hellwig trace_xfs_dqflush(dqp);
1227c59d87c4SChristoph Hellwig
1228fe7257fdSChristoph Hellwig *bpp = NULL;
1229fe7257fdSChristoph Hellwig
1230c59d87c4SChristoph Hellwig xfs_qm_dqunpin_wait(dqp);
1231c59d87c4SChristoph Hellwig
1232c59d87c4SChristoph Hellwig /*
1233c59d87c4SChristoph Hellwig * Get the buffer containing the on-disk dquot
1234c59d87c4SChristoph Hellwig */
1235c59d87c4SChristoph Hellwig error = xfs_trans_read_buf(mp, NULL, mp->m_ddev_targp, dqp->q_blkno,
12368d3d7e2bSBrian Foster mp->m_quotainfo->qi_dqchunklen, XBF_TRYLOCK,
12378d3d7e2bSBrian Foster &bp, &xfs_dquot_buf_ops);
1238b707fffdSBrian Foster if (error == -EAGAIN)
1239fe7257fdSChristoph Hellwig goto out_unlock;
1240b707fffdSBrian Foster if (error)
1241b707fffdSBrian Foster goto out_abort;
1242c59d87c4SChristoph Hellwig
12430b0fa1d1SDarrick J. Wong fa = xfs_qm_dqflush_check(dqp);
12440b0fa1d1SDarrick J. Wong if (fa) {
12450b0fa1d1SDarrick J. Wong xfs_alert(mp, "corrupt dquot ID 0x%x in memory at %pS",
1246c51df733SDarrick J. Wong dqp->q_id, fa);
12470b0fa1d1SDarrick J. Wong xfs_buf_relse(bp);
12480b0fa1d1SDarrick J. Wong error = -EFSCORRUPTED;
12490b0fa1d1SDarrick J. Wong goto out_abort;
12500b0fa1d1SDarrick J. Wong }
12510b0fa1d1SDarrick J. Wong
125251dbb1beSDarrick J. Wong /* Flush the incore dquot to the ondisk buffer. */
1253*d744e578SDarrick J. Wong dqblk = xfs_buf_offset(bp, dqp->q_bufoffset);
125451dbb1beSDarrick J. Wong xfs_dquot_to_disk(&dqblk->dd_diskdq, dqp);
1255c59d87c4SChristoph Hellwig
1256c59d87c4SChristoph Hellwig /*
1257c59d87c4SChristoph Hellwig * Clear the dirty field and remember the flush lsn for later use.
1258c59d87c4SChristoph Hellwig */
1259985a78fdSDarrick J. Wong dqp->q_flags &= ~XFS_DQFLAG_DIRTY;
1260c59d87c4SChristoph Hellwig
1261c59d87c4SChristoph Hellwig xfs_trans_ail_copy_lsn(mp->m_ail, &dqp->q_logitem.qli_flush_lsn,
1262c59d87c4SChristoph Hellwig &dqp->q_logitem.qli_item.li_lsn);
1263c59d87c4SChristoph Hellwig
1264c59d87c4SChristoph Hellwig /*
12653fe58f30SChristoph Hellwig * copy the lsn into the on-disk dquot now while we have the in memory
12663fe58f30SChristoph Hellwig * dquot here. This can't be done later in the write verifier as we
12673fe58f30SChristoph Hellwig * can't get access to the log item at that point in time.
12686fcdc59dSDave Chinner *
12696fcdc59dSDave Chinner * We also calculate the CRC here so that the on-disk dquot in the
12706fcdc59dSDave Chinner * buffer always has a valid CRC. This ensures there is no possibility
12716fcdc59dSDave Chinner * of a dquot without an up-to-date CRC getting to disk.
12723fe58f30SChristoph Hellwig */
127338c26bfdSDave Chinner if (xfs_has_crc(mp)) {
127451dbb1beSDarrick J. Wong dqblk->dd_lsn = cpu_to_be64(dqp->q_logitem.qli_item.li_lsn);
127551dbb1beSDarrick J. Wong xfs_update_cksum((char *)dqblk, sizeof(struct xfs_dqblk),
12766fcdc59dSDave Chinner XFS_DQUOT_CRC_OFF);
12773fe58f30SChristoph Hellwig }
12783fe58f30SChristoph Hellwig
12793fe58f30SChristoph Hellwig /*
12802ef3f7f5SDave Chinner * Attach the dquot to the buffer so that we can remove this dquot from
12812ef3f7f5SDave Chinner * the AIL and release the flush lock once the dquot is synced to disk.
1282c59d87c4SChristoph Hellwig */
12830c7e5afbSDave Chinner bp->b_flags |= _XBF_DQUOTS;
12842ef3f7f5SDave Chinner list_add_tail(&dqp->q_logitem.qli_item.li_bio_list, &bp->b_li_list);
1285c59d87c4SChristoph Hellwig
1286c59d87c4SChristoph Hellwig /*
1287c59d87c4SChristoph Hellwig * If the buffer is pinned then push on the log so we won't
1288c59d87c4SChristoph Hellwig * get stuck waiting in the write for too long.
1289c59d87c4SChristoph Hellwig */
1290c59d87c4SChristoph Hellwig if (xfs_buf_ispinned(bp)) {
1291c59d87c4SChristoph Hellwig trace_xfs_dqflush_force(dqp);
1292c59d87c4SChristoph Hellwig xfs_log_force(mp, 0);
1293c59d87c4SChristoph Hellwig }
1294c59d87c4SChristoph Hellwig
1295c59d87c4SChristoph Hellwig trace_xfs_dqflush_done(dqp);
1296fe7257fdSChristoph Hellwig *bpp = bp;
1297fe7257fdSChristoph Hellwig return 0;
1298c59d87c4SChristoph Hellwig
1299b707fffdSBrian Foster out_abort:
1300985a78fdSDarrick J. Wong dqp->q_flags &= ~XFS_DQFLAG_DIRTY;
13012b3cf093SBrian Foster xfs_trans_ail_delete(lip, 0);
1302b707fffdSBrian Foster xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
1303fe7257fdSChristoph Hellwig out_unlock:
1304fe7257fdSChristoph Hellwig xfs_dqfunlock(dqp);
13058d3d7e2bSBrian Foster return error;
1306c59d87c4SChristoph Hellwig }
1307c59d87c4SChristoph Hellwig
1308c59d87c4SChristoph Hellwig /*
1309c59d87c4SChristoph Hellwig * Lock two xfs_dquot structures.
1310c59d87c4SChristoph Hellwig *
1311c59d87c4SChristoph Hellwig * To avoid deadlocks we always lock the quota structure with
1312c59d87c4SChristoph Hellwig * the lowerd id first.
1313c59d87c4SChristoph Hellwig */
1314c59d87c4SChristoph Hellwig void
xfs_dqlock2(struct xfs_dquot * d1,struct xfs_dquot * d2)1315c59d87c4SChristoph Hellwig xfs_dqlock2(
1316aefe69a4SPavel Reichl struct xfs_dquot *d1,
1317aefe69a4SPavel Reichl struct xfs_dquot *d2)
1318c59d87c4SChristoph Hellwig {
1319c59d87c4SChristoph Hellwig if (d1 && d2) {
1320c59d87c4SChristoph Hellwig ASSERT(d1 != d2);
1321c51df733SDarrick J. Wong if (d1->q_id > d2->q_id) {
1322c59d87c4SChristoph Hellwig mutex_lock(&d2->q_qlock);
1323c59d87c4SChristoph Hellwig mutex_lock_nested(&d1->q_qlock, XFS_QLOCK_NESTED);
1324c59d87c4SChristoph Hellwig } else {
1325c59d87c4SChristoph Hellwig mutex_lock(&d1->q_qlock);
1326c59d87c4SChristoph Hellwig mutex_lock_nested(&d2->q_qlock, XFS_QLOCK_NESTED);
1327c59d87c4SChristoph Hellwig }
1328c59d87c4SChristoph Hellwig } else if (d1) {
1329c59d87c4SChristoph Hellwig mutex_lock(&d1->q_qlock);
1330c59d87c4SChristoph Hellwig } else if (d2) {
1331c59d87c4SChristoph Hellwig mutex_lock(&d2->q_qlock);
1332c59d87c4SChristoph Hellwig }
1333c59d87c4SChristoph Hellwig }
1334c59d87c4SChristoph Hellwig
1335a05931ceSChristoph Hellwig int __init
xfs_qm_init(void)1336a05931ceSChristoph Hellwig xfs_qm_init(void)
1337a05931ceSChristoph Hellwig {
1338182696fbSDarrick J. Wong xfs_dquot_cache = kmem_cache_create("xfs_dquot",
1339b1231760SCarlos Maiolino sizeof(struct xfs_dquot),
1340b1231760SCarlos Maiolino 0, 0, NULL);
1341182696fbSDarrick J. Wong if (!xfs_dquot_cache)
1342a05931ceSChristoph Hellwig goto out;
1343a05931ceSChristoph Hellwig
1344182696fbSDarrick J. Wong xfs_dqtrx_cache = kmem_cache_create("xfs_dqtrx",
1345b1231760SCarlos Maiolino sizeof(struct xfs_dquot_acct),
1346b1231760SCarlos Maiolino 0, 0, NULL);
1347182696fbSDarrick J. Wong if (!xfs_dqtrx_cache)
1348182696fbSDarrick J. Wong goto out_free_dquot_cache;
1349a05931ceSChristoph Hellwig
1350a05931ceSChristoph Hellwig return 0;
1351a05931ceSChristoph Hellwig
1352182696fbSDarrick J. Wong out_free_dquot_cache:
1353182696fbSDarrick J. Wong kmem_cache_destroy(xfs_dquot_cache);
1354a05931ceSChristoph Hellwig out:
1355a05931ceSChristoph Hellwig return -ENOMEM;
1356a05931ceSChristoph Hellwig }
1357a05931ceSChristoph Hellwig
13581c2ccc66SGerard Snitselaar void
xfs_qm_exit(void)1359a05931ceSChristoph Hellwig xfs_qm_exit(void)
1360a05931ceSChristoph Hellwig {
1361182696fbSDarrick J. Wong kmem_cache_destroy(xfs_dqtrx_cache);
1362182696fbSDarrick J. Wong kmem_cache_destroy(xfs_dquot_cache);
1363a05931ceSChristoph Hellwig }
1364554ba965SDarrick J. Wong
1365554ba965SDarrick J. Wong /*
1366554ba965SDarrick J. Wong * Iterate every dquot of a particular type. The caller must ensure that the
1367554ba965SDarrick J. Wong * particular quota type is active. iter_fn can return negative error codes,
1368e7ee96dfSDarrick J. Wong * or -ECANCELED to indicate that it wants to stop iterating.
1369554ba965SDarrick J. Wong */
1370554ba965SDarrick J. Wong int
xfs_qm_dqiterate(struct xfs_mount * mp,xfs_dqtype_t type,xfs_qm_dqiterate_fn iter_fn,void * priv)1371554ba965SDarrick J. Wong xfs_qm_dqiterate(
1372554ba965SDarrick J. Wong struct xfs_mount *mp,
13731a7ed271SDarrick J. Wong xfs_dqtype_t type,
1374554ba965SDarrick J. Wong xfs_qm_dqiterate_fn iter_fn,
1375554ba965SDarrick J. Wong void *priv)
1376554ba965SDarrick J. Wong {
1377554ba965SDarrick J. Wong struct xfs_dquot *dq;
1378554ba965SDarrick J. Wong xfs_dqid_t id = 0;
1379554ba965SDarrick J. Wong int error;
1380554ba965SDarrick J. Wong
1381554ba965SDarrick J. Wong do {
13821a7ed271SDarrick J. Wong error = xfs_qm_dqget_next(mp, id, type, &dq);
1383554ba965SDarrick J. Wong if (error == -ENOENT)
1384554ba965SDarrick J. Wong return 0;
1385554ba965SDarrick J. Wong if (error)
1386554ba965SDarrick J. Wong return error;
1387554ba965SDarrick J. Wong
13881a7ed271SDarrick J. Wong error = iter_fn(dq, type, priv);
13892c234a22SDarrick J. Wong id = dq->q_id + 1;
1390554ba965SDarrick J. Wong xfs_qm_dqput(dq);
1391554ba965SDarrick J. Wong } while (error == 0 && id != 0);
1392554ba965SDarrick J. Wong
1393554ba965SDarrick J. Wong return error;
1394554ba965SDarrick J. Wong }
1395