xref: /openbmc/linux/fs/xfs/xfs_dquot.c (revision 161f4089)
1 /*
2  * Copyright (c) 2000-2003 Silicon Graphics, Inc.
3  * All Rights Reserved.
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU General Public License as
7  * published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it would be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write the Free Software Foundation,
16  * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
17  */
18 #include "xfs.h"
19 #include "xfs_fs.h"
20 #include "xfs_format.h"
21 #include "xfs_bit.h"
22 #include "xfs_log.h"
23 #include "xfs_trans.h"
24 #include "xfs_sb.h"
25 #include "xfs_ag.h"
26 #include "xfs_alloc.h"
27 #include "xfs_quota.h"
28 #include "xfs_mount.h"
29 #include "xfs_bmap_btree.h"
30 #include "xfs_inode.h"
31 #include "xfs_bmap.h"
32 #include "xfs_bmap_util.h"
33 #include "xfs_rtalloc.h"
34 #include "xfs_error.h"
35 #include "xfs_itable.h"
36 #include "xfs_attr.h"
37 #include "xfs_buf_item.h"
38 #include "xfs_trans_space.h"
39 #include "xfs_trans_priv.h"
40 #include "xfs_qm.h"
41 #include "xfs_cksum.h"
42 #include "xfs_trace.h"
43 
44 /*
45  * Lock order:
46  *
47  * ip->i_lock
48  *   qi->qi_tree_lock
49  *     dquot->q_qlock (xfs_dqlock() and friends)
50  *       dquot->q_flush (xfs_dqflock() and friends)
51  *       qi->qi_lru_lock
52  *
53  * If two dquots need to be locked the order is user before group/project,
54  * otherwise by the lowest id first, see xfs_dqlock2.
55  */
56 
57 #ifdef DEBUG
58 xfs_buftarg_t *xfs_dqerror_target;
59 int xfs_do_dqerror;
60 int xfs_dqreq_num;
61 int xfs_dqerror_mod = 33;
62 #endif
63 
64 struct kmem_zone		*xfs_qm_dqtrxzone;
65 static struct kmem_zone		*xfs_qm_dqzone;
66 
67 static struct lock_class_key xfs_dquot_group_class;
68 static struct lock_class_key xfs_dquot_project_class;
69 
70 /*
71  * This is called to free all the memory associated with a dquot
72  */
73 void
74 xfs_qm_dqdestroy(
75 	xfs_dquot_t	*dqp)
76 {
77 	ASSERT(list_empty(&dqp->q_lru));
78 
79 	mutex_destroy(&dqp->q_qlock);
80 	kmem_zone_free(xfs_qm_dqzone, dqp);
81 
82 	XFS_STATS_DEC(xs_qm_dquot);
83 }
84 
85 /*
86  * If default limits are in force, push them into the dquot now.
87  * We overwrite the dquot limits only if they are zero and this
88  * is not the root dquot.
89  */
90 void
91 xfs_qm_adjust_dqlimits(
92 	struct xfs_mount	*mp,
93 	struct xfs_dquot	*dq)
94 {
95 	struct xfs_quotainfo	*q = mp->m_quotainfo;
96 	struct xfs_disk_dquot	*d = &dq->q_core;
97 	int			prealloc = 0;
98 
99 	ASSERT(d->d_id);
100 
101 	if (q->qi_bsoftlimit && !d->d_blk_softlimit) {
102 		d->d_blk_softlimit = cpu_to_be64(q->qi_bsoftlimit);
103 		prealloc = 1;
104 	}
105 	if (q->qi_bhardlimit && !d->d_blk_hardlimit) {
106 		d->d_blk_hardlimit = cpu_to_be64(q->qi_bhardlimit);
107 		prealloc = 1;
108 	}
109 	if (q->qi_isoftlimit && !d->d_ino_softlimit)
110 		d->d_ino_softlimit = cpu_to_be64(q->qi_isoftlimit);
111 	if (q->qi_ihardlimit && !d->d_ino_hardlimit)
112 		d->d_ino_hardlimit = cpu_to_be64(q->qi_ihardlimit);
113 	if (q->qi_rtbsoftlimit && !d->d_rtb_softlimit)
114 		d->d_rtb_softlimit = cpu_to_be64(q->qi_rtbsoftlimit);
115 	if (q->qi_rtbhardlimit && !d->d_rtb_hardlimit)
116 		d->d_rtb_hardlimit = cpu_to_be64(q->qi_rtbhardlimit);
117 
118 	if (prealloc)
119 		xfs_dquot_set_prealloc_limits(dq);
120 }
121 
122 /*
123  * Check the limits and timers of a dquot and start or reset timers
124  * if necessary.
125  * This gets called even when quota enforcement is OFF, which makes our
126  * life a little less complicated. (We just don't reject any quota
127  * reservations in that case, when enforcement is off).
128  * We also return 0 as the values of the timers in Q_GETQUOTA calls, when
129  * enforcement's off.
130  * In contrast, warnings are a little different in that they don't
131  * 'automatically' get started when limits get exceeded.  They do
132  * get reset to zero, however, when we find the count to be under
133  * the soft limit (they are only ever set non-zero via userspace).
134  */
135 void
136 xfs_qm_adjust_dqtimers(
137 	xfs_mount_t		*mp,
138 	xfs_disk_dquot_t	*d)
139 {
140 	ASSERT(d->d_id);
141 
142 #ifdef DEBUG
143 	if (d->d_blk_hardlimit)
144 		ASSERT(be64_to_cpu(d->d_blk_softlimit) <=
145 		       be64_to_cpu(d->d_blk_hardlimit));
146 	if (d->d_ino_hardlimit)
147 		ASSERT(be64_to_cpu(d->d_ino_softlimit) <=
148 		       be64_to_cpu(d->d_ino_hardlimit));
149 	if (d->d_rtb_hardlimit)
150 		ASSERT(be64_to_cpu(d->d_rtb_softlimit) <=
151 		       be64_to_cpu(d->d_rtb_hardlimit));
152 #endif
153 
154 	if (!d->d_btimer) {
155 		if ((d->d_blk_softlimit &&
156 		     (be64_to_cpu(d->d_bcount) >
157 		      be64_to_cpu(d->d_blk_softlimit))) ||
158 		    (d->d_blk_hardlimit &&
159 		     (be64_to_cpu(d->d_bcount) >
160 		      be64_to_cpu(d->d_blk_hardlimit)))) {
161 			d->d_btimer = cpu_to_be32(get_seconds() +
162 					mp->m_quotainfo->qi_btimelimit);
163 		} else {
164 			d->d_bwarns = 0;
165 		}
166 	} else {
167 		if ((!d->d_blk_softlimit ||
168 		     (be64_to_cpu(d->d_bcount) <=
169 		      be64_to_cpu(d->d_blk_softlimit))) &&
170 		    (!d->d_blk_hardlimit ||
171 		    (be64_to_cpu(d->d_bcount) <=
172 		     be64_to_cpu(d->d_blk_hardlimit)))) {
173 			d->d_btimer = 0;
174 		}
175 	}
176 
177 	if (!d->d_itimer) {
178 		if ((d->d_ino_softlimit &&
179 		     (be64_to_cpu(d->d_icount) >
180 		      be64_to_cpu(d->d_ino_softlimit))) ||
181 		    (d->d_ino_hardlimit &&
182 		     (be64_to_cpu(d->d_icount) >
183 		      be64_to_cpu(d->d_ino_hardlimit)))) {
184 			d->d_itimer = cpu_to_be32(get_seconds() +
185 					mp->m_quotainfo->qi_itimelimit);
186 		} else {
187 			d->d_iwarns = 0;
188 		}
189 	} else {
190 		if ((!d->d_ino_softlimit ||
191 		     (be64_to_cpu(d->d_icount) <=
192 		      be64_to_cpu(d->d_ino_softlimit)))  &&
193 		    (!d->d_ino_hardlimit ||
194 		     (be64_to_cpu(d->d_icount) <=
195 		      be64_to_cpu(d->d_ino_hardlimit)))) {
196 			d->d_itimer = 0;
197 		}
198 	}
199 
200 	if (!d->d_rtbtimer) {
201 		if ((d->d_rtb_softlimit &&
202 		     (be64_to_cpu(d->d_rtbcount) >
203 		      be64_to_cpu(d->d_rtb_softlimit))) ||
204 		    (d->d_rtb_hardlimit &&
205 		     (be64_to_cpu(d->d_rtbcount) >
206 		      be64_to_cpu(d->d_rtb_hardlimit)))) {
207 			d->d_rtbtimer = cpu_to_be32(get_seconds() +
208 					mp->m_quotainfo->qi_rtbtimelimit);
209 		} else {
210 			d->d_rtbwarns = 0;
211 		}
212 	} else {
213 		if ((!d->d_rtb_softlimit ||
214 		     (be64_to_cpu(d->d_rtbcount) <=
215 		      be64_to_cpu(d->d_rtb_softlimit))) &&
216 		    (!d->d_rtb_hardlimit ||
217 		     (be64_to_cpu(d->d_rtbcount) <=
218 		      be64_to_cpu(d->d_rtb_hardlimit)))) {
219 			d->d_rtbtimer = 0;
220 		}
221 	}
222 }
223 
224 /*
225  * initialize a buffer full of dquots and log the whole thing
226  */
227 STATIC void
228 xfs_qm_init_dquot_blk(
229 	xfs_trans_t	*tp,
230 	xfs_mount_t	*mp,
231 	xfs_dqid_t	id,
232 	uint		type,
233 	xfs_buf_t	*bp)
234 {
235 	struct xfs_quotainfo	*q = mp->m_quotainfo;
236 	xfs_dqblk_t	*d;
237 	int		curid, i;
238 
239 	ASSERT(tp);
240 	ASSERT(xfs_buf_islocked(bp));
241 
242 	d = bp->b_addr;
243 
244 	/*
245 	 * ID of the first dquot in the block - id's are zero based.
246 	 */
247 	curid = id - (id % q->qi_dqperchunk);
248 	ASSERT(curid >= 0);
249 	memset(d, 0, BBTOB(q->qi_dqchunklen));
250 	for (i = 0; i < q->qi_dqperchunk; i++, d++, curid++) {
251 		d->dd_diskdq.d_magic = cpu_to_be16(XFS_DQUOT_MAGIC);
252 		d->dd_diskdq.d_version = XFS_DQUOT_VERSION;
253 		d->dd_diskdq.d_id = cpu_to_be32(curid);
254 		d->dd_diskdq.d_flags = type;
255 		if (xfs_sb_version_hascrc(&mp->m_sb)) {
256 			uuid_copy(&d->dd_uuid, &mp->m_sb.sb_uuid);
257 			xfs_update_cksum((char *)d, sizeof(struct xfs_dqblk),
258 					 XFS_DQUOT_CRC_OFF);
259 		}
260 	}
261 
262 	xfs_trans_dquot_buf(tp, bp,
263 			    (type & XFS_DQ_USER ? XFS_BLF_UDQUOT_BUF :
264 			    ((type & XFS_DQ_PROJ) ? XFS_BLF_PDQUOT_BUF :
265 			     XFS_BLF_GDQUOT_BUF)));
266 	xfs_trans_log_buf(tp, bp, 0, BBTOB(q->qi_dqchunklen) - 1);
267 }
268 
269 /*
270  * Initialize the dynamic speculative preallocation thresholds. The lo/hi
271  * watermarks correspond to the soft and hard limits by default. If a soft limit
272  * is not specified, we use 95% of the hard limit.
273  */
274 void
275 xfs_dquot_set_prealloc_limits(struct xfs_dquot *dqp)
276 {
277 	__uint64_t space;
278 
279 	dqp->q_prealloc_hi_wmark = be64_to_cpu(dqp->q_core.d_blk_hardlimit);
280 	dqp->q_prealloc_lo_wmark = be64_to_cpu(dqp->q_core.d_blk_softlimit);
281 	if (!dqp->q_prealloc_lo_wmark) {
282 		dqp->q_prealloc_lo_wmark = dqp->q_prealloc_hi_wmark;
283 		do_div(dqp->q_prealloc_lo_wmark, 100);
284 		dqp->q_prealloc_lo_wmark *= 95;
285 	}
286 
287 	space = dqp->q_prealloc_hi_wmark;
288 
289 	do_div(space, 100);
290 	dqp->q_low_space[XFS_QLOWSP_1_PCNT] = space;
291 	dqp->q_low_space[XFS_QLOWSP_3_PCNT] = space * 3;
292 	dqp->q_low_space[XFS_QLOWSP_5_PCNT] = space * 5;
293 }
294 
295 STATIC bool
296 xfs_dquot_buf_verify_crc(
297 	struct xfs_mount	*mp,
298 	struct xfs_buf		*bp)
299 {
300 	struct xfs_dqblk	*d = (struct xfs_dqblk *)bp->b_addr;
301 	int			ndquots;
302 	int			i;
303 
304 	if (!xfs_sb_version_hascrc(&mp->m_sb))
305 		return true;
306 
307 	/*
308 	 * if we are in log recovery, the quota subsystem has not been
309 	 * initialised so we have no quotainfo structure. In that case, we need
310 	 * to manually calculate the number of dquots in the buffer.
311 	 */
312 	if (mp->m_quotainfo)
313 		ndquots = mp->m_quotainfo->qi_dqperchunk;
314 	else
315 		ndquots = xfs_qm_calc_dquots_per_chunk(mp,
316 					XFS_BB_TO_FSB(mp, bp->b_length));
317 
318 	for (i = 0; i < ndquots; i++, d++) {
319 		if (!xfs_verify_cksum((char *)d, sizeof(struct xfs_dqblk),
320 				 XFS_DQUOT_CRC_OFF))
321 			return false;
322 		if (!uuid_equal(&d->dd_uuid, &mp->m_sb.sb_uuid))
323 			return false;
324 	}
325 	return true;
326 }
327 
328 STATIC bool
329 xfs_dquot_buf_verify(
330 	struct xfs_mount	*mp,
331 	struct xfs_buf		*bp)
332 {
333 	struct xfs_dqblk	*d = (struct xfs_dqblk *)bp->b_addr;
334 	xfs_dqid_t		id = 0;
335 	int			ndquots;
336 	int			i;
337 
338 	/*
339 	 * if we are in log recovery, the quota subsystem has not been
340 	 * initialised so we have no quotainfo structure. In that case, we need
341 	 * to manually calculate the number of dquots in the buffer.
342 	 */
343 	if (mp->m_quotainfo)
344 		ndquots = mp->m_quotainfo->qi_dqperchunk;
345 	else
346 		ndquots = xfs_qm_calc_dquots_per_chunk(mp, bp->b_length);
347 
348 	/*
349 	 * On the first read of the buffer, verify that each dquot is valid.
350 	 * We don't know what the id of the dquot is supposed to be, just that
351 	 * they should be increasing monotonically within the buffer. If the
352 	 * first id is corrupt, then it will fail on the second dquot in the
353 	 * buffer so corruptions could point to the wrong dquot in this case.
354 	 */
355 	for (i = 0; i < ndquots; i++) {
356 		struct xfs_disk_dquot	*ddq;
357 		int			error;
358 
359 		ddq = &d[i].dd_diskdq;
360 
361 		if (i == 0)
362 			id = be32_to_cpu(ddq->d_id);
363 
364 		error = xfs_qm_dqcheck(mp, ddq, id + i, 0, XFS_QMOPT_DOWARN,
365 				       "xfs_dquot_buf_verify");
366 		if (error)
367 			return false;
368 	}
369 	return true;
370 }
371 
372 static void
373 xfs_dquot_buf_read_verify(
374 	struct xfs_buf	*bp)
375 {
376 	struct xfs_mount	*mp = bp->b_target->bt_mount;
377 
378 	if (!xfs_dquot_buf_verify_crc(mp, bp) || !xfs_dquot_buf_verify(mp, bp)) {
379 		XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp, bp->b_addr);
380 		xfs_buf_ioerror(bp, EFSCORRUPTED);
381 	}
382 }
383 
384 /*
385  * we don't calculate the CRC here as that is done when the dquot is flushed to
386  * the buffer after the update is done. This ensures that the dquot in the
387  * buffer always has an up-to-date CRC value.
388  */
389 void
390 xfs_dquot_buf_write_verify(
391 	struct xfs_buf	*bp)
392 {
393 	struct xfs_mount	*mp = bp->b_target->bt_mount;
394 
395 	if (!xfs_dquot_buf_verify(mp, bp)) {
396 		XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp, bp->b_addr);
397 		xfs_buf_ioerror(bp, EFSCORRUPTED);
398 		return;
399 	}
400 }
401 
402 const struct xfs_buf_ops xfs_dquot_buf_ops = {
403 	.verify_read = xfs_dquot_buf_read_verify,
404 	.verify_write = xfs_dquot_buf_write_verify,
405 };
406 
407 /*
408  * Allocate a block and fill it with dquots.
409  * This is called when the bmapi finds a hole.
410  */
411 STATIC int
412 xfs_qm_dqalloc(
413 	xfs_trans_t	**tpp,
414 	xfs_mount_t	*mp,
415 	xfs_dquot_t	*dqp,
416 	xfs_inode_t	*quotip,
417 	xfs_fileoff_t	offset_fsb,
418 	xfs_buf_t	**O_bpp)
419 {
420 	xfs_fsblock_t	firstblock;
421 	xfs_bmap_free_t flist;
422 	xfs_bmbt_irec_t map;
423 	int		nmaps, error, committed;
424 	xfs_buf_t	*bp;
425 	xfs_trans_t	*tp = *tpp;
426 
427 	ASSERT(tp != NULL);
428 
429 	trace_xfs_dqalloc(dqp);
430 
431 	/*
432 	 * Initialize the bmap freelist prior to calling bmapi code.
433 	 */
434 	xfs_bmap_init(&flist, &firstblock);
435 	xfs_ilock(quotip, XFS_ILOCK_EXCL);
436 	/*
437 	 * Return if this type of quotas is turned off while we didn't
438 	 * have an inode lock
439 	 */
440 	if (!xfs_this_quota_on(dqp->q_mount, dqp->dq_flags)) {
441 		xfs_iunlock(quotip, XFS_ILOCK_EXCL);
442 		return (ESRCH);
443 	}
444 
445 	xfs_trans_ijoin(tp, quotip, XFS_ILOCK_EXCL);
446 	nmaps = 1;
447 	error = xfs_bmapi_write(tp, quotip, offset_fsb,
448 				XFS_DQUOT_CLUSTER_SIZE_FSB, XFS_BMAPI_METADATA,
449 				&firstblock, XFS_QM_DQALLOC_SPACE_RES(mp),
450 				&map, &nmaps, &flist);
451 	if (error)
452 		goto error0;
453 	ASSERT(map.br_blockcount == XFS_DQUOT_CLUSTER_SIZE_FSB);
454 	ASSERT(nmaps == 1);
455 	ASSERT((map.br_startblock != DELAYSTARTBLOCK) &&
456 	       (map.br_startblock != HOLESTARTBLOCK));
457 
458 	/*
459 	 * Keep track of the blkno to save a lookup later
460 	 */
461 	dqp->q_blkno = XFS_FSB_TO_DADDR(mp, map.br_startblock);
462 
463 	/* now we can just get the buffer (there's nothing to read yet) */
464 	bp = xfs_trans_get_buf(tp, mp->m_ddev_targp,
465 			       dqp->q_blkno,
466 			       mp->m_quotainfo->qi_dqchunklen,
467 			       0);
468 
469 	error = xfs_buf_geterror(bp);
470 	if (error)
471 		goto error1;
472 	bp->b_ops = &xfs_dquot_buf_ops;
473 
474 	/*
475 	 * Make a chunk of dquots out of this buffer and log
476 	 * the entire thing.
477 	 */
478 	xfs_qm_init_dquot_blk(tp, mp, be32_to_cpu(dqp->q_core.d_id),
479 			      dqp->dq_flags & XFS_DQ_ALLTYPES, bp);
480 
481 	/*
482 	 * xfs_bmap_finish() may commit the current transaction and
483 	 * start a second transaction if the freelist is not empty.
484 	 *
485 	 * Since we still want to modify this buffer, we need to
486 	 * ensure that the buffer is not released on commit of
487 	 * the first transaction and ensure the buffer is added to the
488 	 * second transaction.
489 	 *
490 	 * If there is only one transaction then don't stop the buffer
491 	 * from being released when it commits later on.
492 	 */
493 
494 	xfs_trans_bhold(tp, bp);
495 
496 	if ((error = xfs_bmap_finish(tpp, &flist, &committed))) {
497 		goto error1;
498 	}
499 
500 	if (committed) {
501 		tp = *tpp;
502 		xfs_trans_bjoin(tp, bp);
503 	} else {
504 		xfs_trans_bhold_release(tp, bp);
505 	}
506 
507 	*O_bpp = bp;
508 	return 0;
509 
510       error1:
511 	xfs_bmap_cancel(&flist);
512       error0:
513 	xfs_iunlock(quotip, XFS_ILOCK_EXCL);
514 
515 	return (error);
516 }
517 STATIC int
518 xfs_qm_dqrepair(
519 	struct xfs_mount	*mp,
520 	struct xfs_trans	*tp,
521 	struct xfs_dquot	*dqp,
522 	xfs_dqid_t		firstid,
523 	struct xfs_buf		**bpp)
524 {
525 	int			error;
526 	struct xfs_disk_dquot	*ddq;
527 	struct xfs_dqblk	*d;
528 	int			i;
529 
530 	/*
531 	 * Read the buffer without verification so we get the corrupted
532 	 * buffer returned to us. make sure we verify it on write, though.
533 	 */
534 	error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp, dqp->q_blkno,
535 				   mp->m_quotainfo->qi_dqchunklen,
536 				   0, bpp, NULL);
537 
538 	if (error) {
539 		ASSERT(*bpp == NULL);
540 		return XFS_ERROR(error);
541 	}
542 	(*bpp)->b_ops = &xfs_dquot_buf_ops;
543 
544 	ASSERT(xfs_buf_islocked(*bpp));
545 	d = (struct xfs_dqblk *)(*bpp)->b_addr;
546 
547 	/* Do the actual repair of dquots in this buffer */
548 	for (i = 0; i < mp->m_quotainfo->qi_dqperchunk; i++) {
549 		ddq = &d[i].dd_diskdq;
550 		error = xfs_qm_dqcheck(mp, ddq, firstid + i,
551 				       dqp->dq_flags & XFS_DQ_ALLTYPES,
552 				       XFS_QMOPT_DQREPAIR, "xfs_qm_dqrepair");
553 		if (error) {
554 			/* repair failed, we're screwed */
555 			xfs_trans_brelse(tp, *bpp);
556 			return XFS_ERROR(EIO);
557 		}
558 	}
559 
560 	return 0;
561 }
562 
563 /*
564  * Maps a dquot to the buffer containing its on-disk version.
565  * This returns a ptr to the buffer containing the on-disk dquot
566  * in the bpp param, and a ptr to the on-disk dquot within that buffer
567  */
568 STATIC int
569 xfs_qm_dqtobp(
570 	xfs_trans_t		**tpp,
571 	xfs_dquot_t		*dqp,
572 	xfs_disk_dquot_t	**O_ddpp,
573 	xfs_buf_t		**O_bpp,
574 	uint			flags)
575 {
576 	struct xfs_bmbt_irec	map;
577 	int			nmaps = 1, error;
578 	struct xfs_buf		*bp;
579 	struct xfs_inode	*quotip = xfs_dq_to_quota_inode(dqp);
580 	struct xfs_mount	*mp = dqp->q_mount;
581 	xfs_dqid_t		id = be32_to_cpu(dqp->q_core.d_id);
582 	struct xfs_trans	*tp = (tpp ? *tpp : NULL);
583 
584 	dqp->q_fileoffset = (xfs_fileoff_t)id / mp->m_quotainfo->qi_dqperchunk;
585 
586 	xfs_ilock(quotip, XFS_ILOCK_SHARED);
587 	if (!xfs_this_quota_on(dqp->q_mount, dqp->dq_flags)) {
588 		/*
589 		 * Return if this type of quotas is turned off while we
590 		 * didn't have the quota inode lock.
591 		 */
592 		xfs_iunlock(quotip, XFS_ILOCK_SHARED);
593 		return ESRCH;
594 	}
595 
596 	/*
597 	 * Find the block map; no allocations yet
598 	 */
599 	error = xfs_bmapi_read(quotip, dqp->q_fileoffset,
600 			       XFS_DQUOT_CLUSTER_SIZE_FSB, &map, &nmaps, 0);
601 
602 	xfs_iunlock(quotip, XFS_ILOCK_SHARED);
603 	if (error)
604 		return error;
605 
606 	ASSERT(nmaps == 1);
607 	ASSERT(map.br_blockcount == 1);
608 
609 	/*
610 	 * Offset of dquot in the (fixed sized) dquot chunk.
611 	 */
612 	dqp->q_bufoffset = (id % mp->m_quotainfo->qi_dqperchunk) *
613 		sizeof(xfs_dqblk_t);
614 
615 	ASSERT(map.br_startblock != DELAYSTARTBLOCK);
616 	if (map.br_startblock == HOLESTARTBLOCK) {
617 		/*
618 		 * We don't allocate unless we're asked to
619 		 */
620 		if (!(flags & XFS_QMOPT_DQALLOC))
621 			return ENOENT;
622 
623 		ASSERT(tp);
624 		error = xfs_qm_dqalloc(tpp, mp, dqp, quotip,
625 					dqp->q_fileoffset, &bp);
626 		if (error)
627 			return error;
628 		tp = *tpp;
629 	} else {
630 		trace_xfs_dqtobp_read(dqp);
631 
632 		/*
633 		 * store the blkno etc so that we don't have to do the
634 		 * mapping all the time
635 		 */
636 		dqp->q_blkno = XFS_FSB_TO_DADDR(mp, map.br_startblock);
637 
638 		error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp,
639 					   dqp->q_blkno,
640 					   mp->m_quotainfo->qi_dqchunklen,
641 					   0, &bp, &xfs_dquot_buf_ops);
642 
643 		if (error == EFSCORRUPTED && (flags & XFS_QMOPT_DQREPAIR)) {
644 			xfs_dqid_t firstid = (xfs_dqid_t)map.br_startoff *
645 						mp->m_quotainfo->qi_dqperchunk;
646 			ASSERT(bp == NULL);
647 			error = xfs_qm_dqrepair(mp, tp, dqp, firstid, &bp);
648 		}
649 
650 		if (error) {
651 			ASSERT(bp == NULL);
652 			return XFS_ERROR(error);
653 		}
654 	}
655 
656 	ASSERT(xfs_buf_islocked(bp));
657 	*O_bpp = bp;
658 	*O_ddpp = bp->b_addr + dqp->q_bufoffset;
659 
660 	return (0);
661 }
662 
663 
664 /*
665  * Read in the ondisk dquot using dqtobp() then copy it to an incore version,
666  * and release the buffer immediately.
667  *
668  * If XFS_QMOPT_DQALLOC is set, allocate a dquot on disk if it needed.
669  */
670 int
671 xfs_qm_dqread(
672 	struct xfs_mount	*mp,
673 	xfs_dqid_t		id,
674 	uint			type,
675 	uint			flags,
676 	struct xfs_dquot	**O_dqpp)
677 {
678 	struct xfs_dquot	*dqp;
679 	struct xfs_disk_dquot	*ddqp;
680 	struct xfs_buf		*bp;
681 	struct xfs_trans	*tp = NULL;
682 	int			error;
683 	int			cancelflags = 0;
684 
685 
686 	dqp = kmem_zone_zalloc(xfs_qm_dqzone, KM_SLEEP);
687 
688 	dqp->dq_flags = type;
689 	dqp->q_core.d_id = cpu_to_be32(id);
690 	dqp->q_mount = mp;
691 	INIT_LIST_HEAD(&dqp->q_lru);
692 	mutex_init(&dqp->q_qlock);
693 	init_waitqueue_head(&dqp->q_pinwait);
694 
695 	/*
696 	 * Because we want to use a counting completion, complete
697 	 * the flush completion once to allow a single access to
698 	 * the flush completion without blocking.
699 	 */
700 	init_completion(&dqp->q_flush);
701 	complete(&dqp->q_flush);
702 
703 	/*
704 	 * Make sure group quotas have a different lock class than user
705 	 * quotas.
706 	 */
707 	switch (type) {
708 	case XFS_DQ_USER:
709 		/* uses the default lock class */
710 		break;
711 	case XFS_DQ_GROUP:
712 		lockdep_set_class(&dqp->q_qlock, &xfs_dquot_group_class);
713 		break;
714 	case XFS_DQ_PROJ:
715 		lockdep_set_class(&dqp->q_qlock, &xfs_dquot_project_class);
716 		break;
717 	default:
718 		ASSERT(0);
719 		break;
720 	}
721 
722 	XFS_STATS_INC(xs_qm_dquot);
723 
724 	trace_xfs_dqread(dqp);
725 
726 	if (flags & XFS_QMOPT_DQALLOC) {
727 		tp = xfs_trans_alloc(mp, XFS_TRANS_QM_DQALLOC);
728 		error = xfs_trans_reserve(tp, &M_RES(mp)->tr_attrsetm,
729 					  XFS_QM_DQALLOC_SPACE_RES(mp), 0);
730 		if (error)
731 			goto error1;
732 		cancelflags = XFS_TRANS_RELEASE_LOG_RES;
733 	}
734 
735 	/*
736 	 * get a pointer to the on-disk dquot and the buffer containing it
737 	 * dqp already knows its own type (GROUP/USER).
738 	 */
739 	error = xfs_qm_dqtobp(&tp, dqp, &ddqp, &bp, flags);
740 	if (error) {
741 		/*
742 		 * This can happen if quotas got turned off (ESRCH),
743 		 * or if the dquot didn't exist on disk and we ask to
744 		 * allocate (ENOENT).
745 		 */
746 		trace_xfs_dqread_fail(dqp);
747 		cancelflags |= XFS_TRANS_ABORT;
748 		goto error1;
749 	}
750 
751 	/* copy everything from disk dquot to the incore dquot */
752 	memcpy(&dqp->q_core, ddqp, sizeof(xfs_disk_dquot_t));
753 	xfs_qm_dquot_logitem_init(dqp);
754 
755 	/*
756 	 * Reservation counters are defined as reservation plus current usage
757 	 * to avoid having to add every time.
758 	 */
759 	dqp->q_res_bcount = be64_to_cpu(ddqp->d_bcount);
760 	dqp->q_res_icount = be64_to_cpu(ddqp->d_icount);
761 	dqp->q_res_rtbcount = be64_to_cpu(ddqp->d_rtbcount);
762 
763 	/* initialize the dquot speculative prealloc thresholds */
764 	xfs_dquot_set_prealloc_limits(dqp);
765 
766 	/* Mark the buf so that this will stay incore a little longer */
767 	xfs_buf_set_ref(bp, XFS_DQUOT_REF);
768 
769 	/*
770 	 * We got the buffer with a xfs_trans_read_buf() (in dqtobp())
771 	 * So we need to release with xfs_trans_brelse().
772 	 * The strategy here is identical to that of inodes; we lock
773 	 * the dquot in xfs_qm_dqget() before making it accessible to
774 	 * others. This is because dquots, like inodes, need a good level of
775 	 * concurrency, and we don't want to take locks on the entire buffers
776 	 * for dquot accesses.
777 	 * Note also that the dquot buffer may even be dirty at this point, if
778 	 * this particular dquot was repaired. We still aren't afraid to
779 	 * brelse it because we have the changes incore.
780 	 */
781 	ASSERT(xfs_buf_islocked(bp));
782 	xfs_trans_brelse(tp, bp);
783 
784 	if (tp) {
785 		error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES);
786 		if (error)
787 			goto error0;
788 	}
789 
790 	*O_dqpp = dqp;
791 	return error;
792 
793 error1:
794 	if (tp)
795 		xfs_trans_cancel(tp, cancelflags);
796 error0:
797 	xfs_qm_dqdestroy(dqp);
798 	*O_dqpp = NULL;
799 	return error;
800 }
801 
802 /*
803  * Given the file system, inode OR id, and type (UDQUOT/GDQUOT), return a
804  * a locked dquot, doing an allocation (if requested) as needed.
805  * When both an inode and an id are given, the inode's id takes precedence.
806  * That is, if the id changes while we don't hold the ilock inside this
807  * function, the new dquot is returned, not necessarily the one requested
808  * in the id argument.
809  */
810 int
811 xfs_qm_dqget(
812 	xfs_mount_t	*mp,
813 	xfs_inode_t	*ip,	  /* locked inode (optional) */
814 	xfs_dqid_t	id,	  /* uid/projid/gid depending on type */
815 	uint		type,	  /* XFS_DQ_USER/XFS_DQ_PROJ/XFS_DQ_GROUP */
816 	uint		flags,	  /* DQALLOC, DQSUSER, DQREPAIR, DOWARN */
817 	xfs_dquot_t	**O_dqpp) /* OUT : locked incore dquot */
818 {
819 	struct xfs_quotainfo	*qi = mp->m_quotainfo;
820 	struct radix_tree_root *tree = xfs_dquot_tree(qi, type);
821 	struct xfs_dquot	*dqp;
822 	int			error;
823 
824 	ASSERT(XFS_IS_QUOTA_RUNNING(mp));
825 	if ((! XFS_IS_UQUOTA_ON(mp) && type == XFS_DQ_USER) ||
826 	    (! XFS_IS_PQUOTA_ON(mp) && type == XFS_DQ_PROJ) ||
827 	    (! XFS_IS_GQUOTA_ON(mp) && type == XFS_DQ_GROUP)) {
828 		return (ESRCH);
829 	}
830 
831 #ifdef DEBUG
832 	if (xfs_do_dqerror) {
833 		if ((xfs_dqerror_target == mp->m_ddev_targp) &&
834 		    (xfs_dqreq_num++ % xfs_dqerror_mod) == 0) {
835 			xfs_debug(mp, "Returning error in dqget");
836 			return (EIO);
837 		}
838 	}
839 
840 	ASSERT(type == XFS_DQ_USER ||
841 	       type == XFS_DQ_PROJ ||
842 	       type == XFS_DQ_GROUP);
843 	if (ip) {
844 		ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
845 		ASSERT(xfs_inode_dquot(ip, type) == NULL);
846 	}
847 #endif
848 
849 restart:
850 	mutex_lock(&qi->qi_tree_lock);
851 	dqp = radix_tree_lookup(tree, id);
852 	if (dqp) {
853 		xfs_dqlock(dqp);
854 		if (dqp->dq_flags & XFS_DQ_FREEING) {
855 			xfs_dqunlock(dqp);
856 			mutex_unlock(&qi->qi_tree_lock);
857 			trace_xfs_dqget_freeing(dqp);
858 			delay(1);
859 			goto restart;
860 		}
861 
862 		dqp->q_nrefs++;
863 		mutex_unlock(&qi->qi_tree_lock);
864 
865 		trace_xfs_dqget_hit(dqp);
866 		XFS_STATS_INC(xs_qm_dqcachehits);
867 		*O_dqpp = dqp;
868 		return 0;
869 	}
870 	mutex_unlock(&qi->qi_tree_lock);
871 	XFS_STATS_INC(xs_qm_dqcachemisses);
872 
873 	/*
874 	 * Dquot cache miss. We don't want to keep the inode lock across
875 	 * a (potential) disk read. Also we don't want to deal with the lock
876 	 * ordering between quotainode and this inode. OTOH, dropping the inode
877 	 * lock here means dealing with a chown that can happen before
878 	 * we re-acquire the lock.
879 	 */
880 	if (ip)
881 		xfs_iunlock(ip, XFS_ILOCK_EXCL);
882 
883 	error = xfs_qm_dqread(mp, id, type, flags, &dqp);
884 
885 	if (ip)
886 		xfs_ilock(ip, XFS_ILOCK_EXCL);
887 
888 	if (error)
889 		return error;
890 
891 	if (ip) {
892 		/*
893 		 * A dquot could be attached to this inode by now, since
894 		 * we had dropped the ilock.
895 		 */
896 		if (xfs_this_quota_on(mp, type)) {
897 			struct xfs_dquot	*dqp1;
898 
899 			dqp1 = xfs_inode_dquot(ip, type);
900 			if (dqp1) {
901 				xfs_qm_dqdestroy(dqp);
902 				dqp = dqp1;
903 				xfs_dqlock(dqp);
904 				goto dqret;
905 			}
906 		} else {
907 			/* inode stays locked on return */
908 			xfs_qm_dqdestroy(dqp);
909 			return XFS_ERROR(ESRCH);
910 		}
911 	}
912 
913 	mutex_lock(&qi->qi_tree_lock);
914 	error = -radix_tree_insert(tree, id, dqp);
915 	if (unlikely(error)) {
916 		WARN_ON(error != EEXIST);
917 
918 		/*
919 		 * Duplicate found. Just throw away the new dquot and start
920 		 * over.
921 		 */
922 		mutex_unlock(&qi->qi_tree_lock);
923 		trace_xfs_dqget_dup(dqp);
924 		xfs_qm_dqdestroy(dqp);
925 		XFS_STATS_INC(xs_qm_dquot_dups);
926 		goto restart;
927 	}
928 
929 	/*
930 	 * We return a locked dquot to the caller, with a reference taken
931 	 */
932 	xfs_dqlock(dqp);
933 	dqp->q_nrefs = 1;
934 
935 	qi->qi_dquots++;
936 	mutex_unlock(&qi->qi_tree_lock);
937 
938  dqret:
939 	ASSERT((ip == NULL) || xfs_isilocked(ip, XFS_ILOCK_EXCL));
940 	trace_xfs_dqget_miss(dqp);
941 	*O_dqpp = dqp;
942 	return (0);
943 }
944 
945 
946 STATIC void
947 xfs_qm_dqput_final(
948 	struct xfs_dquot	*dqp)
949 {
950 	struct xfs_quotainfo	*qi = dqp->q_mount->m_quotainfo;
951 	struct xfs_dquot	*gdqp;
952 	struct xfs_dquot	*pdqp;
953 
954 	trace_xfs_dqput_free(dqp);
955 
956 	if (list_lru_add(&qi->qi_lru, &dqp->q_lru))
957 		XFS_STATS_INC(xs_qm_dquot_unused);
958 
959 	/*
960 	 * If we just added a udquot to the freelist, then we want to release
961 	 * the gdquot/pdquot reference that it (probably) has. Otherwise it'll
962 	 * keep the gdquot/pdquot from getting reclaimed.
963 	 */
964 	gdqp = dqp->q_gdquot;
965 	if (gdqp) {
966 		xfs_dqlock(gdqp);
967 		dqp->q_gdquot = NULL;
968 	}
969 
970 	pdqp = dqp->q_pdquot;
971 	if (pdqp) {
972 		xfs_dqlock(pdqp);
973 		dqp->q_pdquot = NULL;
974 	}
975 	xfs_dqunlock(dqp);
976 
977 	/*
978 	 * If we had a group/project quota hint, release it now.
979 	 */
980 	if (gdqp)
981 		xfs_qm_dqput(gdqp);
982 	if (pdqp)
983 		xfs_qm_dqput(pdqp);
984 }
985 
986 /*
987  * Release a reference to the dquot (decrement ref-count) and unlock it.
988  *
989  * If there is a group quota attached to this dquot, carefully release that
990  * too without tripping over deadlocks'n'stuff.
991  */
992 void
993 xfs_qm_dqput(
994 	struct xfs_dquot	*dqp)
995 {
996 	ASSERT(dqp->q_nrefs > 0);
997 	ASSERT(XFS_DQ_IS_LOCKED(dqp));
998 
999 	trace_xfs_dqput(dqp);
1000 
1001 	if (--dqp->q_nrefs > 0)
1002 		xfs_dqunlock(dqp);
1003 	else
1004 		xfs_qm_dqput_final(dqp);
1005 }
1006 
1007 /*
1008  * Release a dquot. Flush it if dirty, then dqput() it.
1009  * dquot must not be locked.
1010  */
1011 void
1012 xfs_qm_dqrele(
1013 	xfs_dquot_t	*dqp)
1014 {
1015 	if (!dqp)
1016 		return;
1017 
1018 	trace_xfs_dqrele(dqp);
1019 
1020 	xfs_dqlock(dqp);
1021 	/*
1022 	 * We don't care to flush it if the dquot is dirty here.
1023 	 * That will create stutters that we want to avoid.
1024 	 * Instead we do a delayed write when we try to reclaim
1025 	 * a dirty dquot. Also xfs_sync will take part of the burden...
1026 	 */
1027 	xfs_qm_dqput(dqp);
1028 }
1029 
1030 /*
1031  * This is the dquot flushing I/O completion routine.  It is called
1032  * from interrupt level when the buffer containing the dquot is
1033  * flushed to disk.  It is responsible for removing the dquot logitem
1034  * from the AIL if it has not been re-logged, and unlocking the dquot's
1035  * flush lock. This behavior is very similar to that of inodes..
1036  */
1037 STATIC void
1038 xfs_qm_dqflush_done(
1039 	struct xfs_buf		*bp,
1040 	struct xfs_log_item	*lip)
1041 {
1042 	xfs_dq_logitem_t	*qip = (struct xfs_dq_logitem *)lip;
1043 	xfs_dquot_t		*dqp = qip->qli_dquot;
1044 	struct xfs_ail		*ailp = lip->li_ailp;
1045 
1046 	/*
1047 	 * We only want to pull the item from the AIL if its
1048 	 * location in the log has not changed since we started the flush.
1049 	 * Thus, we only bother if the dquot's lsn has
1050 	 * not changed. First we check the lsn outside the lock
1051 	 * since it's cheaper, and then we recheck while
1052 	 * holding the lock before removing the dquot from the AIL.
1053 	 */
1054 	if ((lip->li_flags & XFS_LI_IN_AIL) &&
1055 	    lip->li_lsn == qip->qli_flush_lsn) {
1056 
1057 		/* xfs_trans_ail_delete() drops the AIL lock. */
1058 		spin_lock(&ailp->xa_lock);
1059 		if (lip->li_lsn == qip->qli_flush_lsn)
1060 			xfs_trans_ail_delete(ailp, lip, SHUTDOWN_CORRUPT_INCORE);
1061 		else
1062 			spin_unlock(&ailp->xa_lock);
1063 	}
1064 
1065 	/*
1066 	 * Release the dq's flush lock since we're done with it.
1067 	 */
1068 	xfs_dqfunlock(dqp);
1069 }
1070 
1071 /*
1072  * Write a modified dquot to disk.
1073  * The dquot must be locked and the flush lock too taken by caller.
1074  * The flush lock will not be unlocked until the dquot reaches the disk,
1075  * but the dquot is free to be unlocked and modified by the caller
1076  * in the interim. Dquot is still locked on return. This behavior is
1077  * identical to that of inodes.
1078  */
1079 int
1080 xfs_qm_dqflush(
1081 	struct xfs_dquot	*dqp,
1082 	struct xfs_buf		**bpp)
1083 {
1084 	struct xfs_mount	*mp = dqp->q_mount;
1085 	struct xfs_buf		*bp;
1086 	struct xfs_disk_dquot	*ddqp;
1087 	int			error;
1088 
1089 	ASSERT(XFS_DQ_IS_LOCKED(dqp));
1090 	ASSERT(!completion_done(&dqp->q_flush));
1091 
1092 	trace_xfs_dqflush(dqp);
1093 
1094 	*bpp = NULL;
1095 
1096 	xfs_qm_dqunpin_wait(dqp);
1097 
1098 	/*
1099 	 * This may have been unpinned because the filesystem is shutting
1100 	 * down forcibly. If that's the case we must not write this dquot
1101 	 * to disk, because the log record didn't make it to disk.
1102 	 *
1103 	 * We also have to remove the log item from the AIL in this case,
1104 	 * as we wait for an emptry AIL as part of the unmount process.
1105 	 */
1106 	if (XFS_FORCED_SHUTDOWN(mp)) {
1107 		struct xfs_log_item	*lip = &dqp->q_logitem.qli_item;
1108 		dqp->dq_flags &= ~XFS_DQ_DIRTY;
1109 
1110 		spin_lock(&mp->m_ail->xa_lock);
1111 		if (lip->li_flags & XFS_LI_IN_AIL)
1112 			xfs_trans_ail_delete(mp->m_ail, lip,
1113 					     SHUTDOWN_CORRUPT_INCORE);
1114 		else
1115 			spin_unlock(&mp->m_ail->xa_lock);
1116 		error = XFS_ERROR(EIO);
1117 		goto out_unlock;
1118 	}
1119 
1120 	/*
1121 	 * Get the buffer containing the on-disk dquot
1122 	 */
1123 	error = xfs_trans_read_buf(mp, NULL, mp->m_ddev_targp, dqp->q_blkno,
1124 				   mp->m_quotainfo->qi_dqchunklen, 0, &bp, NULL);
1125 	if (error)
1126 		goto out_unlock;
1127 
1128 	/*
1129 	 * Calculate the location of the dquot inside the buffer.
1130 	 */
1131 	ddqp = bp->b_addr + dqp->q_bufoffset;
1132 
1133 	/*
1134 	 * A simple sanity check in case we got a corrupted dquot..
1135 	 */
1136 	error = xfs_qm_dqcheck(mp, &dqp->q_core, be32_to_cpu(ddqp->d_id), 0,
1137 			   XFS_QMOPT_DOWARN, "dqflush (incore copy)");
1138 	if (error) {
1139 		xfs_buf_relse(bp);
1140 		xfs_dqfunlock(dqp);
1141 		xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
1142 		return XFS_ERROR(EIO);
1143 	}
1144 
1145 	/* This is the only portion of data that needs to persist */
1146 	memcpy(ddqp, &dqp->q_core, sizeof(xfs_disk_dquot_t));
1147 
1148 	/*
1149 	 * Clear the dirty field and remember the flush lsn for later use.
1150 	 */
1151 	dqp->dq_flags &= ~XFS_DQ_DIRTY;
1152 
1153 	xfs_trans_ail_copy_lsn(mp->m_ail, &dqp->q_logitem.qli_flush_lsn,
1154 					&dqp->q_logitem.qli_item.li_lsn);
1155 
1156 	/*
1157 	 * copy the lsn into the on-disk dquot now while we have the in memory
1158 	 * dquot here. This can't be done later in the write verifier as we
1159 	 * can't get access to the log item at that point in time.
1160 	 *
1161 	 * We also calculate the CRC here so that the on-disk dquot in the
1162 	 * buffer always has a valid CRC. This ensures there is no possibility
1163 	 * of a dquot without an up-to-date CRC getting to disk.
1164 	 */
1165 	if (xfs_sb_version_hascrc(&mp->m_sb)) {
1166 		struct xfs_dqblk *dqb = (struct xfs_dqblk *)ddqp;
1167 
1168 		dqb->dd_lsn = cpu_to_be64(dqp->q_logitem.qli_item.li_lsn);
1169 		xfs_update_cksum((char *)dqb, sizeof(struct xfs_dqblk),
1170 				 XFS_DQUOT_CRC_OFF);
1171 	}
1172 
1173 	/*
1174 	 * Attach an iodone routine so that we can remove this dquot from the
1175 	 * AIL and release the flush lock once the dquot is synced to disk.
1176 	 */
1177 	xfs_buf_attach_iodone(bp, xfs_qm_dqflush_done,
1178 				  &dqp->q_logitem.qli_item);
1179 
1180 	/*
1181 	 * If the buffer is pinned then push on the log so we won't
1182 	 * get stuck waiting in the write for too long.
1183 	 */
1184 	if (xfs_buf_ispinned(bp)) {
1185 		trace_xfs_dqflush_force(dqp);
1186 		xfs_log_force(mp, 0);
1187 	}
1188 
1189 	trace_xfs_dqflush_done(dqp);
1190 	*bpp = bp;
1191 	return 0;
1192 
1193 out_unlock:
1194 	xfs_dqfunlock(dqp);
1195 	return XFS_ERROR(EIO);
1196 }
1197 
1198 /*
1199  * Lock two xfs_dquot structures.
1200  *
1201  * To avoid deadlocks we always lock the quota structure with
1202  * the lowerd id first.
1203  */
1204 void
1205 xfs_dqlock2(
1206 	xfs_dquot_t	*d1,
1207 	xfs_dquot_t	*d2)
1208 {
1209 	if (d1 && d2) {
1210 		ASSERT(d1 != d2);
1211 		if (be32_to_cpu(d1->q_core.d_id) >
1212 		    be32_to_cpu(d2->q_core.d_id)) {
1213 			mutex_lock(&d2->q_qlock);
1214 			mutex_lock_nested(&d1->q_qlock, XFS_QLOCK_NESTED);
1215 		} else {
1216 			mutex_lock(&d1->q_qlock);
1217 			mutex_lock_nested(&d2->q_qlock, XFS_QLOCK_NESTED);
1218 		}
1219 	} else if (d1) {
1220 		mutex_lock(&d1->q_qlock);
1221 	} else if (d2) {
1222 		mutex_lock(&d2->q_qlock);
1223 	}
1224 }
1225 
1226 int __init
1227 xfs_qm_init(void)
1228 {
1229 	xfs_qm_dqzone =
1230 		kmem_zone_init(sizeof(struct xfs_dquot), "xfs_dquot");
1231 	if (!xfs_qm_dqzone)
1232 		goto out;
1233 
1234 	xfs_qm_dqtrxzone =
1235 		kmem_zone_init(sizeof(struct xfs_dquot_acct), "xfs_dqtrx");
1236 	if (!xfs_qm_dqtrxzone)
1237 		goto out_free_dqzone;
1238 
1239 	return 0;
1240 
1241 out_free_dqzone:
1242 	kmem_zone_destroy(xfs_qm_dqzone);
1243 out:
1244 	return -ENOMEM;
1245 }
1246 
1247 void
1248 xfs_qm_exit(void)
1249 {
1250 	kmem_zone_destroy(xfs_qm_dqtrxzone);
1251 	kmem_zone_destroy(xfs_qm_dqzone);
1252 }
1253