xref: /openbmc/linux/fs/xfs/xfs_dquot.c (revision 089a49b6)
1 /*
2  * Copyright (c) 2000-2003 Silicon Graphics, Inc.
3  * All Rights Reserved.
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU General Public License as
7  * published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it would be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write the Free Software Foundation,
16  * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
17  */
18 #include "xfs.h"
19 #include "xfs_fs.h"
20 #include "xfs_format.h"
21 #include "xfs_bit.h"
22 #include "xfs_log.h"
23 #include "xfs_trans.h"
24 #include "xfs_sb.h"
25 #include "xfs_ag.h"
26 #include "xfs_alloc.h"
27 #include "xfs_quota.h"
28 #include "xfs_mount.h"
29 #include "xfs_bmap_btree.h"
30 #include "xfs_inode.h"
31 #include "xfs_bmap.h"
32 #include "xfs_bmap_util.h"
33 #include "xfs_rtalloc.h"
34 #include "xfs_error.h"
35 #include "xfs_itable.h"
36 #include "xfs_attr.h"
37 #include "xfs_buf_item.h"
38 #include "xfs_trans_space.h"
39 #include "xfs_trans_priv.h"
40 #include "xfs_qm.h"
41 #include "xfs_cksum.h"
42 #include "xfs_trace.h"
43 
44 /*
45  * Lock order:
46  *
47  * ip->i_lock
48  *   qi->qi_tree_lock
49  *     dquot->q_qlock (xfs_dqlock() and friends)
50  *       dquot->q_flush (xfs_dqflock() and friends)
51  *       qi->qi_lru_lock
52  *
53  * If two dquots need to be locked the order is user before group/project,
54  * otherwise by the lowest id first, see xfs_dqlock2.
55  */
56 
57 #ifdef DEBUG
58 xfs_buftarg_t *xfs_dqerror_target;
59 int xfs_do_dqerror;
60 int xfs_dqreq_num;
61 int xfs_dqerror_mod = 33;
62 #endif
63 
64 struct kmem_zone		*xfs_qm_dqtrxzone;
65 static struct kmem_zone		*xfs_qm_dqzone;
66 
67 static struct lock_class_key xfs_dquot_other_class;
68 
69 /*
70  * This is called to free all the memory associated with a dquot
71  */
72 void
73 xfs_qm_dqdestroy(
74 	xfs_dquot_t	*dqp)
75 {
76 	ASSERT(list_empty(&dqp->q_lru));
77 
78 	mutex_destroy(&dqp->q_qlock);
79 	kmem_zone_free(xfs_qm_dqzone, dqp);
80 
81 	XFS_STATS_DEC(xs_qm_dquot);
82 }
83 
84 /*
85  * If default limits are in force, push them into the dquot now.
86  * We overwrite the dquot limits only if they are zero and this
87  * is not the root dquot.
88  */
89 void
90 xfs_qm_adjust_dqlimits(
91 	struct xfs_mount	*mp,
92 	struct xfs_dquot	*dq)
93 {
94 	struct xfs_quotainfo	*q = mp->m_quotainfo;
95 	struct xfs_disk_dquot	*d = &dq->q_core;
96 	int			prealloc = 0;
97 
98 	ASSERT(d->d_id);
99 
100 	if (q->qi_bsoftlimit && !d->d_blk_softlimit) {
101 		d->d_blk_softlimit = cpu_to_be64(q->qi_bsoftlimit);
102 		prealloc = 1;
103 	}
104 	if (q->qi_bhardlimit && !d->d_blk_hardlimit) {
105 		d->d_blk_hardlimit = cpu_to_be64(q->qi_bhardlimit);
106 		prealloc = 1;
107 	}
108 	if (q->qi_isoftlimit && !d->d_ino_softlimit)
109 		d->d_ino_softlimit = cpu_to_be64(q->qi_isoftlimit);
110 	if (q->qi_ihardlimit && !d->d_ino_hardlimit)
111 		d->d_ino_hardlimit = cpu_to_be64(q->qi_ihardlimit);
112 	if (q->qi_rtbsoftlimit && !d->d_rtb_softlimit)
113 		d->d_rtb_softlimit = cpu_to_be64(q->qi_rtbsoftlimit);
114 	if (q->qi_rtbhardlimit && !d->d_rtb_hardlimit)
115 		d->d_rtb_hardlimit = cpu_to_be64(q->qi_rtbhardlimit);
116 
117 	if (prealloc)
118 		xfs_dquot_set_prealloc_limits(dq);
119 }
120 
121 /*
122  * Check the limits and timers of a dquot and start or reset timers
123  * if necessary.
124  * This gets called even when quota enforcement is OFF, which makes our
125  * life a little less complicated. (We just don't reject any quota
126  * reservations in that case, when enforcement is off).
127  * We also return 0 as the values of the timers in Q_GETQUOTA calls, when
128  * enforcement's off.
129  * In contrast, warnings are a little different in that they don't
130  * 'automatically' get started when limits get exceeded.  They do
131  * get reset to zero, however, when we find the count to be under
132  * the soft limit (they are only ever set non-zero via userspace).
133  */
134 void
135 xfs_qm_adjust_dqtimers(
136 	xfs_mount_t		*mp,
137 	xfs_disk_dquot_t	*d)
138 {
139 	ASSERT(d->d_id);
140 
141 #ifdef DEBUG
142 	if (d->d_blk_hardlimit)
143 		ASSERT(be64_to_cpu(d->d_blk_softlimit) <=
144 		       be64_to_cpu(d->d_blk_hardlimit));
145 	if (d->d_ino_hardlimit)
146 		ASSERT(be64_to_cpu(d->d_ino_softlimit) <=
147 		       be64_to_cpu(d->d_ino_hardlimit));
148 	if (d->d_rtb_hardlimit)
149 		ASSERT(be64_to_cpu(d->d_rtb_softlimit) <=
150 		       be64_to_cpu(d->d_rtb_hardlimit));
151 #endif
152 
153 	if (!d->d_btimer) {
154 		if ((d->d_blk_softlimit &&
155 		     (be64_to_cpu(d->d_bcount) >
156 		      be64_to_cpu(d->d_blk_softlimit))) ||
157 		    (d->d_blk_hardlimit &&
158 		     (be64_to_cpu(d->d_bcount) >
159 		      be64_to_cpu(d->d_blk_hardlimit)))) {
160 			d->d_btimer = cpu_to_be32(get_seconds() +
161 					mp->m_quotainfo->qi_btimelimit);
162 		} else {
163 			d->d_bwarns = 0;
164 		}
165 	} else {
166 		if ((!d->d_blk_softlimit ||
167 		     (be64_to_cpu(d->d_bcount) <=
168 		      be64_to_cpu(d->d_blk_softlimit))) &&
169 		    (!d->d_blk_hardlimit ||
170 		    (be64_to_cpu(d->d_bcount) <=
171 		     be64_to_cpu(d->d_blk_hardlimit)))) {
172 			d->d_btimer = 0;
173 		}
174 	}
175 
176 	if (!d->d_itimer) {
177 		if ((d->d_ino_softlimit &&
178 		     (be64_to_cpu(d->d_icount) >
179 		      be64_to_cpu(d->d_ino_softlimit))) ||
180 		    (d->d_ino_hardlimit &&
181 		     (be64_to_cpu(d->d_icount) >
182 		      be64_to_cpu(d->d_ino_hardlimit)))) {
183 			d->d_itimer = cpu_to_be32(get_seconds() +
184 					mp->m_quotainfo->qi_itimelimit);
185 		} else {
186 			d->d_iwarns = 0;
187 		}
188 	} else {
189 		if ((!d->d_ino_softlimit ||
190 		     (be64_to_cpu(d->d_icount) <=
191 		      be64_to_cpu(d->d_ino_softlimit)))  &&
192 		    (!d->d_ino_hardlimit ||
193 		     (be64_to_cpu(d->d_icount) <=
194 		      be64_to_cpu(d->d_ino_hardlimit)))) {
195 			d->d_itimer = 0;
196 		}
197 	}
198 
199 	if (!d->d_rtbtimer) {
200 		if ((d->d_rtb_softlimit &&
201 		     (be64_to_cpu(d->d_rtbcount) >
202 		      be64_to_cpu(d->d_rtb_softlimit))) ||
203 		    (d->d_rtb_hardlimit &&
204 		     (be64_to_cpu(d->d_rtbcount) >
205 		      be64_to_cpu(d->d_rtb_hardlimit)))) {
206 			d->d_rtbtimer = cpu_to_be32(get_seconds() +
207 					mp->m_quotainfo->qi_rtbtimelimit);
208 		} else {
209 			d->d_rtbwarns = 0;
210 		}
211 	} else {
212 		if ((!d->d_rtb_softlimit ||
213 		     (be64_to_cpu(d->d_rtbcount) <=
214 		      be64_to_cpu(d->d_rtb_softlimit))) &&
215 		    (!d->d_rtb_hardlimit ||
216 		     (be64_to_cpu(d->d_rtbcount) <=
217 		      be64_to_cpu(d->d_rtb_hardlimit)))) {
218 			d->d_rtbtimer = 0;
219 		}
220 	}
221 }
222 
223 /*
224  * initialize a buffer full of dquots and log the whole thing
225  */
226 STATIC void
227 xfs_qm_init_dquot_blk(
228 	xfs_trans_t	*tp,
229 	xfs_mount_t	*mp,
230 	xfs_dqid_t	id,
231 	uint		type,
232 	xfs_buf_t	*bp)
233 {
234 	struct xfs_quotainfo	*q = mp->m_quotainfo;
235 	xfs_dqblk_t	*d;
236 	int		curid, i;
237 
238 	ASSERT(tp);
239 	ASSERT(xfs_buf_islocked(bp));
240 
241 	d = bp->b_addr;
242 
243 	/*
244 	 * ID of the first dquot in the block - id's are zero based.
245 	 */
246 	curid = id - (id % q->qi_dqperchunk);
247 	ASSERT(curid >= 0);
248 	memset(d, 0, BBTOB(q->qi_dqchunklen));
249 	for (i = 0; i < q->qi_dqperchunk; i++, d++, curid++) {
250 		d->dd_diskdq.d_magic = cpu_to_be16(XFS_DQUOT_MAGIC);
251 		d->dd_diskdq.d_version = XFS_DQUOT_VERSION;
252 		d->dd_diskdq.d_id = cpu_to_be32(curid);
253 		d->dd_diskdq.d_flags = type;
254 		if (xfs_sb_version_hascrc(&mp->m_sb)) {
255 			uuid_copy(&d->dd_uuid, &mp->m_sb.sb_uuid);
256 			xfs_update_cksum((char *)d, sizeof(struct xfs_dqblk),
257 					 XFS_DQUOT_CRC_OFF);
258 		}
259 	}
260 
261 	xfs_trans_dquot_buf(tp, bp,
262 			    (type & XFS_DQ_USER ? XFS_BLF_UDQUOT_BUF :
263 			    ((type & XFS_DQ_PROJ) ? XFS_BLF_PDQUOT_BUF :
264 			     XFS_BLF_GDQUOT_BUF)));
265 	xfs_trans_log_buf(tp, bp, 0, BBTOB(q->qi_dqchunklen) - 1);
266 }
267 
268 /*
269  * Initialize the dynamic speculative preallocation thresholds. The lo/hi
270  * watermarks correspond to the soft and hard limits by default. If a soft limit
271  * is not specified, we use 95% of the hard limit.
272  */
273 void
274 xfs_dquot_set_prealloc_limits(struct xfs_dquot *dqp)
275 {
276 	__uint64_t space;
277 
278 	dqp->q_prealloc_hi_wmark = be64_to_cpu(dqp->q_core.d_blk_hardlimit);
279 	dqp->q_prealloc_lo_wmark = be64_to_cpu(dqp->q_core.d_blk_softlimit);
280 	if (!dqp->q_prealloc_lo_wmark) {
281 		dqp->q_prealloc_lo_wmark = dqp->q_prealloc_hi_wmark;
282 		do_div(dqp->q_prealloc_lo_wmark, 100);
283 		dqp->q_prealloc_lo_wmark *= 95;
284 	}
285 
286 	space = dqp->q_prealloc_hi_wmark;
287 
288 	do_div(space, 100);
289 	dqp->q_low_space[XFS_QLOWSP_1_PCNT] = space;
290 	dqp->q_low_space[XFS_QLOWSP_3_PCNT] = space * 3;
291 	dqp->q_low_space[XFS_QLOWSP_5_PCNT] = space * 5;
292 }
293 
294 STATIC bool
295 xfs_dquot_buf_verify_crc(
296 	struct xfs_mount	*mp,
297 	struct xfs_buf		*bp)
298 {
299 	struct xfs_dqblk	*d = (struct xfs_dqblk *)bp->b_addr;
300 	int			ndquots;
301 	int			i;
302 
303 	if (!xfs_sb_version_hascrc(&mp->m_sb))
304 		return true;
305 
306 	/*
307 	 * if we are in log recovery, the quota subsystem has not been
308 	 * initialised so we have no quotainfo structure. In that case, we need
309 	 * to manually calculate the number of dquots in the buffer.
310 	 */
311 	if (mp->m_quotainfo)
312 		ndquots = mp->m_quotainfo->qi_dqperchunk;
313 	else
314 		ndquots = xfs_qm_calc_dquots_per_chunk(mp,
315 					XFS_BB_TO_FSB(mp, bp->b_length));
316 
317 	for (i = 0; i < ndquots; i++, d++) {
318 		if (!xfs_verify_cksum((char *)d, sizeof(struct xfs_dqblk),
319 				 XFS_DQUOT_CRC_OFF))
320 			return false;
321 		if (!uuid_equal(&d->dd_uuid, &mp->m_sb.sb_uuid))
322 			return false;
323 	}
324 	return true;
325 }
326 
327 STATIC bool
328 xfs_dquot_buf_verify(
329 	struct xfs_mount	*mp,
330 	struct xfs_buf		*bp)
331 {
332 	struct xfs_dqblk	*d = (struct xfs_dqblk *)bp->b_addr;
333 	xfs_dqid_t		id = 0;
334 	int			ndquots;
335 	int			i;
336 
337 	/*
338 	 * if we are in log recovery, the quota subsystem has not been
339 	 * initialised so we have no quotainfo structure. In that case, we need
340 	 * to manually calculate the number of dquots in the buffer.
341 	 */
342 	if (mp->m_quotainfo)
343 		ndquots = mp->m_quotainfo->qi_dqperchunk;
344 	else
345 		ndquots = xfs_qm_calc_dquots_per_chunk(mp, bp->b_length);
346 
347 	/*
348 	 * On the first read of the buffer, verify that each dquot is valid.
349 	 * We don't know what the id of the dquot is supposed to be, just that
350 	 * they should be increasing monotonically within the buffer. If the
351 	 * first id is corrupt, then it will fail on the second dquot in the
352 	 * buffer so corruptions could point to the wrong dquot in this case.
353 	 */
354 	for (i = 0; i < ndquots; i++) {
355 		struct xfs_disk_dquot	*ddq;
356 		int			error;
357 
358 		ddq = &d[i].dd_diskdq;
359 
360 		if (i == 0)
361 			id = be32_to_cpu(ddq->d_id);
362 
363 		error = xfs_qm_dqcheck(mp, ddq, id + i, 0, XFS_QMOPT_DOWARN,
364 				       "xfs_dquot_buf_verify");
365 		if (error)
366 			return false;
367 	}
368 	return true;
369 }
370 
371 static void
372 xfs_dquot_buf_read_verify(
373 	struct xfs_buf	*bp)
374 {
375 	struct xfs_mount	*mp = bp->b_target->bt_mount;
376 
377 	if (!xfs_dquot_buf_verify_crc(mp, bp) || !xfs_dquot_buf_verify(mp, bp)) {
378 		XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp, bp->b_addr);
379 		xfs_buf_ioerror(bp, EFSCORRUPTED);
380 	}
381 }
382 
383 /*
384  * we don't calculate the CRC here as that is done when the dquot is flushed to
385  * the buffer after the update is done. This ensures that the dquot in the
386  * buffer always has an up-to-date CRC value.
387  */
388 void
389 xfs_dquot_buf_write_verify(
390 	struct xfs_buf	*bp)
391 {
392 	struct xfs_mount	*mp = bp->b_target->bt_mount;
393 
394 	if (!xfs_dquot_buf_verify(mp, bp)) {
395 		XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp, bp->b_addr);
396 		xfs_buf_ioerror(bp, EFSCORRUPTED);
397 		return;
398 	}
399 }
400 
401 const struct xfs_buf_ops xfs_dquot_buf_ops = {
402 	.verify_read = xfs_dquot_buf_read_verify,
403 	.verify_write = xfs_dquot_buf_write_verify,
404 };
405 
406 /*
407  * Allocate a block and fill it with dquots.
408  * This is called when the bmapi finds a hole.
409  */
410 STATIC int
411 xfs_qm_dqalloc(
412 	xfs_trans_t	**tpp,
413 	xfs_mount_t	*mp,
414 	xfs_dquot_t	*dqp,
415 	xfs_inode_t	*quotip,
416 	xfs_fileoff_t	offset_fsb,
417 	xfs_buf_t	**O_bpp)
418 {
419 	xfs_fsblock_t	firstblock;
420 	xfs_bmap_free_t flist;
421 	xfs_bmbt_irec_t map;
422 	int		nmaps, error, committed;
423 	xfs_buf_t	*bp;
424 	xfs_trans_t	*tp = *tpp;
425 
426 	ASSERT(tp != NULL);
427 
428 	trace_xfs_dqalloc(dqp);
429 
430 	/*
431 	 * Initialize the bmap freelist prior to calling bmapi code.
432 	 */
433 	xfs_bmap_init(&flist, &firstblock);
434 	xfs_ilock(quotip, XFS_ILOCK_EXCL);
435 	/*
436 	 * Return if this type of quotas is turned off while we didn't
437 	 * have an inode lock
438 	 */
439 	if (!xfs_this_quota_on(dqp->q_mount, dqp->dq_flags)) {
440 		xfs_iunlock(quotip, XFS_ILOCK_EXCL);
441 		return (ESRCH);
442 	}
443 
444 	xfs_trans_ijoin(tp, quotip, XFS_ILOCK_EXCL);
445 	nmaps = 1;
446 	error = xfs_bmapi_write(tp, quotip, offset_fsb,
447 				XFS_DQUOT_CLUSTER_SIZE_FSB, XFS_BMAPI_METADATA,
448 				&firstblock, XFS_QM_DQALLOC_SPACE_RES(mp),
449 				&map, &nmaps, &flist);
450 	if (error)
451 		goto error0;
452 	ASSERT(map.br_blockcount == XFS_DQUOT_CLUSTER_SIZE_FSB);
453 	ASSERT(nmaps == 1);
454 	ASSERT((map.br_startblock != DELAYSTARTBLOCK) &&
455 	       (map.br_startblock != HOLESTARTBLOCK));
456 
457 	/*
458 	 * Keep track of the blkno to save a lookup later
459 	 */
460 	dqp->q_blkno = XFS_FSB_TO_DADDR(mp, map.br_startblock);
461 
462 	/* now we can just get the buffer (there's nothing to read yet) */
463 	bp = xfs_trans_get_buf(tp, mp->m_ddev_targp,
464 			       dqp->q_blkno,
465 			       mp->m_quotainfo->qi_dqchunklen,
466 			       0);
467 
468 	error = xfs_buf_geterror(bp);
469 	if (error)
470 		goto error1;
471 	bp->b_ops = &xfs_dquot_buf_ops;
472 
473 	/*
474 	 * Make a chunk of dquots out of this buffer and log
475 	 * the entire thing.
476 	 */
477 	xfs_qm_init_dquot_blk(tp, mp, be32_to_cpu(dqp->q_core.d_id),
478 			      dqp->dq_flags & XFS_DQ_ALLTYPES, bp);
479 
480 	/*
481 	 * xfs_bmap_finish() may commit the current transaction and
482 	 * start a second transaction if the freelist is not empty.
483 	 *
484 	 * Since we still want to modify this buffer, we need to
485 	 * ensure that the buffer is not released on commit of
486 	 * the first transaction and ensure the buffer is added to the
487 	 * second transaction.
488 	 *
489 	 * If there is only one transaction then don't stop the buffer
490 	 * from being released when it commits later on.
491 	 */
492 
493 	xfs_trans_bhold(tp, bp);
494 
495 	if ((error = xfs_bmap_finish(tpp, &flist, &committed))) {
496 		goto error1;
497 	}
498 
499 	if (committed) {
500 		tp = *tpp;
501 		xfs_trans_bjoin(tp, bp);
502 	} else {
503 		xfs_trans_bhold_release(tp, bp);
504 	}
505 
506 	*O_bpp = bp;
507 	return 0;
508 
509       error1:
510 	xfs_bmap_cancel(&flist);
511       error0:
512 	xfs_iunlock(quotip, XFS_ILOCK_EXCL);
513 
514 	return (error);
515 }
516 STATIC int
517 xfs_qm_dqrepair(
518 	struct xfs_mount	*mp,
519 	struct xfs_trans	*tp,
520 	struct xfs_dquot	*dqp,
521 	xfs_dqid_t		firstid,
522 	struct xfs_buf		**bpp)
523 {
524 	int			error;
525 	struct xfs_disk_dquot	*ddq;
526 	struct xfs_dqblk	*d;
527 	int			i;
528 
529 	/*
530 	 * Read the buffer without verification so we get the corrupted
531 	 * buffer returned to us. make sure we verify it on write, though.
532 	 */
533 	error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp, dqp->q_blkno,
534 				   mp->m_quotainfo->qi_dqchunklen,
535 				   0, bpp, NULL);
536 
537 	if (error) {
538 		ASSERT(*bpp == NULL);
539 		return XFS_ERROR(error);
540 	}
541 	(*bpp)->b_ops = &xfs_dquot_buf_ops;
542 
543 	ASSERT(xfs_buf_islocked(*bpp));
544 	d = (struct xfs_dqblk *)(*bpp)->b_addr;
545 
546 	/* Do the actual repair of dquots in this buffer */
547 	for (i = 0; i < mp->m_quotainfo->qi_dqperchunk; i++) {
548 		ddq = &d[i].dd_diskdq;
549 		error = xfs_qm_dqcheck(mp, ddq, firstid + i,
550 				       dqp->dq_flags & XFS_DQ_ALLTYPES,
551 				       XFS_QMOPT_DQREPAIR, "xfs_qm_dqrepair");
552 		if (error) {
553 			/* repair failed, we're screwed */
554 			xfs_trans_brelse(tp, *bpp);
555 			return XFS_ERROR(EIO);
556 		}
557 	}
558 
559 	return 0;
560 }
561 
562 /*
563  * Maps a dquot to the buffer containing its on-disk version.
564  * This returns a ptr to the buffer containing the on-disk dquot
565  * in the bpp param, and a ptr to the on-disk dquot within that buffer
566  */
567 STATIC int
568 xfs_qm_dqtobp(
569 	xfs_trans_t		**tpp,
570 	xfs_dquot_t		*dqp,
571 	xfs_disk_dquot_t	**O_ddpp,
572 	xfs_buf_t		**O_bpp,
573 	uint			flags)
574 {
575 	struct xfs_bmbt_irec	map;
576 	int			nmaps = 1, error;
577 	struct xfs_buf		*bp;
578 	struct xfs_inode	*quotip = xfs_dq_to_quota_inode(dqp);
579 	struct xfs_mount	*mp = dqp->q_mount;
580 	xfs_dqid_t		id = be32_to_cpu(dqp->q_core.d_id);
581 	struct xfs_trans	*tp = (tpp ? *tpp : NULL);
582 
583 	dqp->q_fileoffset = (xfs_fileoff_t)id / mp->m_quotainfo->qi_dqperchunk;
584 
585 	xfs_ilock(quotip, XFS_ILOCK_SHARED);
586 	if (!xfs_this_quota_on(dqp->q_mount, dqp->dq_flags)) {
587 		/*
588 		 * Return if this type of quotas is turned off while we
589 		 * didn't have the quota inode lock.
590 		 */
591 		xfs_iunlock(quotip, XFS_ILOCK_SHARED);
592 		return ESRCH;
593 	}
594 
595 	/*
596 	 * Find the block map; no allocations yet
597 	 */
598 	error = xfs_bmapi_read(quotip, dqp->q_fileoffset,
599 			       XFS_DQUOT_CLUSTER_SIZE_FSB, &map, &nmaps, 0);
600 
601 	xfs_iunlock(quotip, XFS_ILOCK_SHARED);
602 	if (error)
603 		return error;
604 
605 	ASSERT(nmaps == 1);
606 	ASSERT(map.br_blockcount == 1);
607 
608 	/*
609 	 * Offset of dquot in the (fixed sized) dquot chunk.
610 	 */
611 	dqp->q_bufoffset = (id % mp->m_quotainfo->qi_dqperchunk) *
612 		sizeof(xfs_dqblk_t);
613 
614 	ASSERT(map.br_startblock != DELAYSTARTBLOCK);
615 	if (map.br_startblock == HOLESTARTBLOCK) {
616 		/*
617 		 * We don't allocate unless we're asked to
618 		 */
619 		if (!(flags & XFS_QMOPT_DQALLOC))
620 			return ENOENT;
621 
622 		ASSERT(tp);
623 		error = xfs_qm_dqalloc(tpp, mp, dqp, quotip,
624 					dqp->q_fileoffset, &bp);
625 		if (error)
626 			return error;
627 		tp = *tpp;
628 	} else {
629 		trace_xfs_dqtobp_read(dqp);
630 
631 		/*
632 		 * store the blkno etc so that we don't have to do the
633 		 * mapping all the time
634 		 */
635 		dqp->q_blkno = XFS_FSB_TO_DADDR(mp, map.br_startblock);
636 
637 		error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp,
638 					   dqp->q_blkno,
639 					   mp->m_quotainfo->qi_dqchunklen,
640 					   0, &bp, &xfs_dquot_buf_ops);
641 
642 		if (error == EFSCORRUPTED && (flags & XFS_QMOPT_DQREPAIR)) {
643 			xfs_dqid_t firstid = (xfs_dqid_t)map.br_startoff *
644 						mp->m_quotainfo->qi_dqperchunk;
645 			ASSERT(bp == NULL);
646 			error = xfs_qm_dqrepair(mp, tp, dqp, firstid, &bp);
647 		}
648 
649 		if (error) {
650 			ASSERT(bp == NULL);
651 			return XFS_ERROR(error);
652 		}
653 	}
654 
655 	ASSERT(xfs_buf_islocked(bp));
656 	*O_bpp = bp;
657 	*O_ddpp = bp->b_addr + dqp->q_bufoffset;
658 
659 	return (0);
660 }
661 
662 
663 /*
664  * Read in the ondisk dquot using dqtobp() then copy it to an incore version,
665  * and release the buffer immediately.
666  *
667  * If XFS_QMOPT_DQALLOC is set, allocate a dquot on disk if it needed.
668  */
669 int
670 xfs_qm_dqread(
671 	struct xfs_mount	*mp,
672 	xfs_dqid_t		id,
673 	uint			type,
674 	uint			flags,
675 	struct xfs_dquot	**O_dqpp)
676 {
677 	struct xfs_dquot	*dqp;
678 	struct xfs_disk_dquot	*ddqp;
679 	struct xfs_buf		*bp;
680 	struct xfs_trans	*tp = NULL;
681 	int			error;
682 	int			cancelflags = 0;
683 
684 
685 	dqp = kmem_zone_zalloc(xfs_qm_dqzone, KM_SLEEP);
686 
687 	dqp->dq_flags = type;
688 	dqp->q_core.d_id = cpu_to_be32(id);
689 	dqp->q_mount = mp;
690 	INIT_LIST_HEAD(&dqp->q_lru);
691 	mutex_init(&dqp->q_qlock);
692 	init_waitqueue_head(&dqp->q_pinwait);
693 
694 	/*
695 	 * Because we want to use a counting completion, complete
696 	 * the flush completion once to allow a single access to
697 	 * the flush completion without blocking.
698 	 */
699 	init_completion(&dqp->q_flush);
700 	complete(&dqp->q_flush);
701 
702 	/*
703 	 * Make sure group quotas have a different lock class than user
704 	 * quotas.
705 	 */
706 	if (!(type & XFS_DQ_USER))
707 		lockdep_set_class(&dqp->q_qlock, &xfs_dquot_other_class);
708 
709 	XFS_STATS_INC(xs_qm_dquot);
710 
711 	trace_xfs_dqread(dqp);
712 
713 	if (flags & XFS_QMOPT_DQALLOC) {
714 		tp = xfs_trans_alloc(mp, XFS_TRANS_QM_DQALLOC);
715 		error = xfs_trans_reserve(tp, &M_RES(mp)->tr_attrsetm,
716 					  XFS_QM_DQALLOC_SPACE_RES(mp), 0);
717 		if (error)
718 			goto error1;
719 		cancelflags = XFS_TRANS_RELEASE_LOG_RES;
720 	}
721 
722 	/*
723 	 * get a pointer to the on-disk dquot and the buffer containing it
724 	 * dqp already knows its own type (GROUP/USER).
725 	 */
726 	error = xfs_qm_dqtobp(&tp, dqp, &ddqp, &bp, flags);
727 	if (error) {
728 		/*
729 		 * This can happen if quotas got turned off (ESRCH),
730 		 * or if the dquot didn't exist on disk and we ask to
731 		 * allocate (ENOENT).
732 		 */
733 		trace_xfs_dqread_fail(dqp);
734 		cancelflags |= XFS_TRANS_ABORT;
735 		goto error1;
736 	}
737 
738 	/* copy everything from disk dquot to the incore dquot */
739 	memcpy(&dqp->q_core, ddqp, sizeof(xfs_disk_dquot_t));
740 	xfs_qm_dquot_logitem_init(dqp);
741 
742 	/*
743 	 * Reservation counters are defined as reservation plus current usage
744 	 * to avoid having to add every time.
745 	 */
746 	dqp->q_res_bcount = be64_to_cpu(ddqp->d_bcount);
747 	dqp->q_res_icount = be64_to_cpu(ddqp->d_icount);
748 	dqp->q_res_rtbcount = be64_to_cpu(ddqp->d_rtbcount);
749 
750 	/* initialize the dquot speculative prealloc thresholds */
751 	xfs_dquot_set_prealloc_limits(dqp);
752 
753 	/* Mark the buf so that this will stay incore a little longer */
754 	xfs_buf_set_ref(bp, XFS_DQUOT_REF);
755 
756 	/*
757 	 * We got the buffer with a xfs_trans_read_buf() (in dqtobp())
758 	 * So we need to release with xfs_trans_brelse().
759 	 * The strategy here is identical to that of inodes; we lock
760 	 * the dquot in xfs_qm_dqget() before making it accessible to
761 	 * others. This is because dquots, like inodes, need a good level of
762 	 * concurrency, and we don't want to take locks on the entire buffers
763 	 * for dquot accesses.
764 	 * Note also that the dquot buffer may even be dirty at this point, if
765 	 * this particular dquot was repaired. We still aren't afraid to
766 	 * brelse it because we have the changes incore.
767 	 */
768 	ASSERT(xfs_buf_islocked(bp));
769 	xfs_trans_brelse(tp, bp);
770 
771 	if (tp) {
772 		error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES);
773 		if (error)
774 			goto error0;
775 	}
776 
777 	*O_dqpp = dqp;
778 	return error;
779 
780 error1:
781 	if (tp)
782 		xfs_trans_cancel(tp, cancelflags);
783 error0:
784 	xfs_qm_dqdestroy(dqp);
785 	*O_dqpp = NULL;
786 	return error;
787 }
788 
789 /*
790  * Given the file system, inode OR id, and type (UDQUOT/GDQUOT), return a
791  * a locked dquot, doing an allocation (if requested) as needed.
792  * When both an inode and an id are given, the inode's id takes precedence.
793  * That is, if the id changes while we don't hold the ilock inside this
794  * function, the new dquot is returned, not necessarily the one requested
795  * in the id argument.
796  */
797 int
798 xfs_qm_dqget(
799 	xfs_mount_t	*mp,
800 	xfs_inode_t	*ip,	  /* locked inode (optional) */
801 	xfs_dqid_t	id,	  /* uid/projid/gid depending on type */
802 	uint		type,	  /* XFS_DQ_USER/XFS_DQ_PROJ/XFS_DQ_GROUP */
803 	uint		flags,	  /* DQALLOC, DQSUSER, DQREPAIR, DOWARN */
804 	xfs_dquot_t	**O_dqpp) /* OUT : locked incore dquot */
805 {
806 	struct xfs_quotainfo	*qi = mp->m_quotainfo;
807 	struct radix_tree_root *tree = xfs_dquot_tree(qi, type);
808 	struct xfs_dquot	*dqp;
809 	int			error;
810 
811 	ASSERT(XFS_IS_QUOTA_RUNNING(mp));
812 	if ((! XFS_IS_UQUOTA_ON(mp) && type == XFS_DQ_USER) ||
813 	    (! XFS_IS_PQUOTA_ON(mp) && type == XFS_DQ_PROJ) ||
814 	    (! XFS_IS_GQUOTA_ON(mp) && type == XFS_DQ_GROUP)) {
815 		return (ESRCH);
816 	}
817 
818 #ifdef DEBUG
819 	if (xfs_do_dqerror) {
820 		if ((xfs_dqerror_target == mp->m_ddev_targp) &&
821 		    (xfs_dqreq_num++ % xfs_dqerror_mod) == 0) {
822 			xfs_debug(mp, "Returning error in dqget");
823 			return (EIO);
824 		}
825 	}
826 
827 	ASSERT(type == XFS_DQ_USER ||
828 	       type == XFS_DQ_PROJ ||
829 	       type == XFS_DQ_GROUP);
830 	if (ip) {
831 		ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
832 		ASSERT(xfs_inode_dquot(ip, type) == NULL);
833 	}
834 #endif
835 
836 restart:
837 	mutex_lock(&qi->qi_tree_lock);
838 	dqp = radix_tree_lookup(tree, id);
839 	if (dqp) {
840 		xfs_dqlock(dqp);
841 		if (dqp->dq_flags & XFS_DQ_FREEING) {
842 			xfs_dqunlock(dqp);
843 			mutex_unlock(&qi->qi_tree_lock);
844 			trace_xfs_dqget_freeing(dqp);
845 			delay(1);
846 			goto restart;
847 		}
848 
849 		dqp->q_nrefs++;
850 		mutex_unlock(&qi->qi_tree_lock);
851 
852 		trace_xfs_dqget_hit(dqp);
853 		XFS_STATS_INC(xs_qm_dqcachehits);
854 		*O_dqpp = dqp;
855 		return 0;
856 	}
857 	mutex_unlock(&qi->qi_tree_lock);
858 	XFS_STATS_INC(xs_qm_dqcachemisses);
859 
860 	/*
861 	 * Dquot cache miss. We don't want to keep the inode lock across
862 	 * a (potential) disk read. Also we don't want to deal with the lock
863 	 * ordering between quotainode and this inode. OTOH, dropping the inode
864 	 * lock here means dealing with a chown that can happen before
865 	 * we re-acquire the lock.
866 	 */
867 	if (ip)
868 		xfs_iunlock(ip, XFS_ILOCK_EXCL);
869 
870 	error = xfs_qm_dqread(mp, id, type, flags, &dqp);
871 
872 	if (ip)
873 		xfs_ilock(ip, XFS_ILOCK_EXCL);
874 
875 	if (error)
876 		return error;
877 
878 	if (ip) {
879 		/*
880 		 * A dquot could be attached to this inode by now, since
881 		 * we had dropped the ilock.
882 		 */
883 		if (xfs_this_quota_on(mp, type)) {
884 			struct xfs_dquot	*dqp1;
885 
886 			dqp1 = xfs_inode_dquot(ip, type);
887 			if (dqp1) {
888 				xfs_qm_dqdestroy(dqp);
889 				dqp = dqp1;
890 				xfs_dqlock(dqp);
891 				goto dqret;
892 			}
893 		} else {
894 			/* inode stays locked on return */
895 			xfs_qm_dqdestroy(dqp);
896 			return XFS_ERROR(ESRCH);
897 		}
898 	}
899 
900 	mutex_lock(&qi->qi_tree_lock);
901 	error = -radix_tree_insert(tree, id, dqp);
902 	if (unlikely(error)) {
903 		WARN_ON(error != EEXIST);
904 
905 		/*
906 		 * Duplicate found. Just throw away the new dquot and start
907 		 * over.
908 		 */
909 		mutex_unlock(&qi->qi_tree_lock);
910 		trace_xfs_dqget_dup(dqp);
911 		xfs_qm_dqdestroy(dqp);
912 		XFS_STATS_INC(xs_qm_dquot_dups);
913 		goto restart;
914 	}
915 
916 	/*
917 	 * We return a locked dquot to the caller, with a reference taken
918 	 */
919 	xfs_dqlock(dqp);
920 	dqp->q_nrefs = 1;
921 
922 	qi->qi_dquots++;
923 	mutex_unlock(&qi->qi_tree_lock);
924 
925  dqret:
926 	ASSERT((ip == NULL) || xfs_isilocked(ip, XFS_ILOCK_EXCL));
927 	trace_xfs_dqget_miss(dqp);
928 	*O_dqpp = dqp;
929 	return (0);
930 }
931 
932 
933 STATIC void
934 xfs_qm_dqput_final(
935 	struct xfs_dquot	*dqp)
936 {
937 	struct xfs_quotainfo	*qi = dqp->q_mount->m_quotainfo;
938 	struct xfs_dquot	*gdqp;
939 	struct xfs_dquot	*pdqp;
940 
941 	trace_xfs_dqput_free(dqp);
942 
943 	if (list_lru_add(&qi->qi_lru, &dqp->q_lru))
944 		XFS_STATS_INC(xs_qm_dquot_unused);
945 
946 	/*
947 	 * If we just added a udquot to the freelist, then we want to release
948 	 * the gdquot/pdquot reference that it (probably) has. Otherwise it'll
949 	 * keep the gdquot/pdquot from getting reclaimed.
950 	 */
951 	gdqp = dqp->q_gdquot;
952 	if (gdqp) {
953 		xfs_dqlock(gdqp);
954 		dqp->q_gdquot = NULL;
955 	}
956 
957 	pdqp = dqp->q_pdquot;
958 	if (pdqp) {
959 		xfs_dqlock(pdqp);
960 		dqp->q_pdquot = NULL;
961 	}
962 	xfs_dqunlock(dqp);
963 
964 	/*
965 	 * If we had a group/project quota hint, release it now.
966 	 */
967 	if (gdqp)
968 		xfs_qm_dqput(gdqp);
969 	if (pdqp)
970 		xfs_qm_dqput(pdqp);
971 }
972 
973 /*
974  * Release a reference to the dquot (decrement ref-count) and unlock it.
975  *
976  * If there is a group quota attached to this dquot, carefully release that
977  * too without tripping over deadlocks'n'stuff.
978  */
979 void
980 xfs_qm_dqput(
981 	struct xfs_dquot	*dqp)
982 {
983 	ASSERT(dqp->q_nrefs > 0);
984 	ASSERT(XFS_DQ_IS_LOCKED(dqp));
985 
986 	trace_xfs_dqput(dqp);
987 
988 	if (--dqp->q_nrefs > 0)
989 		xfs_dqunlock(dqp);
990 	else
991 		xfs_qm_dqput_final(dqp);
992 }
993 
994 /*
995  * Release a dquot. Flush it if dirty, then dqput() it.
996  * dquot must not be locked.
997  */
998 void
999 xfs_qm_dqrele(
1000 	xfs_dquot_t	*dqp)
1001 {
1002 	if (!dqp)
1003 		return;
1004 
1005 	trace_xfs_dqrele(dqp);
1006 
1007 	xfs_dqlock(dqp);
1008 	/*
1009 	 * We don't care to flush it if the dquot is dirty here.
1010 	 * That will create stutters that we want to avoid.
1011 	 * Instead we do a delayed write when we try to reclaim
1012 	 * a dirty dquot. Also xfs_sync will take part of the burden...
1013 	 */
1014 	xfs_qm_dqput(dqp);
1015 }
1016 
1017 /*
1018  * This is the dquot flushing I/O completion routine.  It is called
1019  * from interrupt level when the buffer containing the dquot is
1020  * flushed to disk.  It is responsible for removing the dquot logitem
1021  * from the AIL if it has not been re-logged, and unlocking the dquot's
1022  * flush lock. This behavior is very similar to that of inodes..
1023  */
1024 STATIC void
1025 xfs_qm_dqflush_done(
1026 	struct xfs_buf		*bp,
1027 	struct xfs_log_item	*lip)
1028 {
1029 	xfs_dq_logitem_t	*qip = (struct xfs_dq_logitem *)lip;
1030 	xfs_dquot_t		*dqp = qip->qli_dquot;
1031 	struct xfs_ail		*ailp = lip->li_ailp;
1032 
1033 	/*
1034 	 * We only want to pull the item from the AIL if its
1035 	 * location in the log has not changed since we started the flush.
1036 	 * Thus, we only bother if the dquot's lsn has
1037 	 * not changed. First we check the lsn outside the lock
1038 	 * since it's cheaper, and then we recheck while
1039 	 * holding the lock before removing the dquot from the AIL.
1040 	 */
1041 	if ((lip->li_flags & XFS_LI_IN_AIL) &&
1042 	    lip->li_lsn == qip->qli_flush_lsn) {
1043 
1044 		/* xfs_trans_ail_delete() drops the AIL lock. */
1045 		spin_lock(&ailp->xa_lock);
1046 		if (lip->li_lsn == qip->qli_flush_lsn)
1047 			xfs_trans_ail_delete(ailp, lip, SHUTDOWN_CORRUPT_INCORE);
1048 		else
1049 			spin_unlock(&ailp->xa_lock);
1050 	}
1051 
1052 	/*
1053 	 * Release the dq's flush lock since we're done with it.
1054 	 */
1055 	xfs_dqfunlock(dqp);
1056 }
1057 
1058 /*
1059  * Write a modified dquot to disk.
1060  * The dquot must be locked and the flush lock too taken by caller.
1061  * The flush lock will not be unlocked until the dquot reaches the disk,
1062  * but the dquot is free to be unlocked and modified by the caller
1063  * in the interim. Dquot is still locked on return. This behavior is
1064  * identical to that of inodes.
1065  */
1066 int
1067 xfs_qm_dqflush(
1068 	struct xfs_dquot	*dqp,
1069 	struct xfs_buf		**bpp)
1070 {
1071 	struct xfs_mount	*mp = dqp->q_mount;
1072 	struct xfs_buf		*bp;
1073 	struct xfs_disk_dquot	*ddqp;
1074 	int			error;
1075 
1076 	ASSERT(XFS_DQ_IS_LOCKED(dqp));
1077 	ASSERT(!completion_done(&dqp->q_flush));
1078 
1079 	trace_xfs_dqflush(dqp);
1080 
1081 	*bpp = NULL;
1082 
1083 	xfs_qm_dqunpin_wait(dqp);
1084 
1085 	/*
1086 	 * This may have been unpinned because the filesystem is shutting
1087 	 * down forcibly. If that's the case we must not write this dquot
1088 	 * to disk, because the log record didn't make it to disk.
1089 	 *
1090 	 * We also have to remove the log item from the AIL in this case,
1091 	 * as we wait for an emptry AIL as part of the unmount process.
1092 	 */
1093 	if (XFS_FORCED_SHUTDOWN(mp)) {
1094 		struct xfs_log_item	*lip = &dqp->q_logitem.qli_item;
1095 		dqp->dq_flags &= ~XFS_DQ_DIRTY;
1096 
1097 		spin_lock(&mp->m_ail->xa_lock);
1098 		if (lip->li_flags & XFS_LI_IN_AIL)
1099 			xfs_trans_ail_delete(mp->m_ail, lip,
1100 					     SHUTDOWN_CORRUPT_INCORE);
1101 		else
1102 			spin_unlock(&mp->m_ail->xa_lock);
1103 		error = XFS_ERROR(EIO);
1104 		goto out_unlock;
1105 	}
1106 
1107 	/*
1108 	 * Get the buffer containing the on-disk dquot
1109 	 */
1110 	error = xfs_trans_read_buf(mp, NULL, mp->m_ddev_targp, dqp->q_blkno,
1111 				   mp->m_quotainfo->qi_dqchunklen, 0, &bp, NULL);
1112 	if (error)
1113 		goto out_unlock;
1114 
1115 	/*
1116 	 * Calculate the location of the dquot inside the buffer.
1117 	 */
1118 	ddqp = bp->b_addr + dqp->q_bufoffset;
1119 
1120 	/*
1121 	 * A simple sanity check in case we got a corrupted dquot..
1122 	 */
1123 	error = xfs_qm_dqcheck(mp, &dqp->q_core, be32_to_cpu(ddqp->d_id), 0,
1124 			   XFS_QMOPT_DOWARN, "dqflush (incore copy)");
1125 	if (error) {
1126 		xfs_buf_relse(bp);
1127 		xfs_dqfunlock(dqp);
1128 		xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
1129 		return XFS_ERROR(EIO);
1130 	}
1131 
1132 	/* This is the only portion of data that needs to persist */
1133 	memcpy(ddqp, &dqp->q_core, sizeof(xfs_disk_dquot_t));
1134 
1135 	/*
1136 	 * Clear the dirty field and remember the flush lsn for later use.
1137 	 */
1138 	dqp->dq_flags &= ~XFS_DQ_DIRTY;
1139 
1140 	xfs_trans_ail_copy_lsn(mp->m_ail, &dqp->q_logitem.qli_flush_lsn,
1141 					&dqp->q_logitem.qli_item.li_lsn);
1142 
1143 	/*
1144 	 * copy the lsn into the on-disk dquot now while we have the in memory
1145 	 * dquot here. This can't be done later in the write verifier as we
1146 	 * can't get access to the log item at that point in time.
1147 	 *
1148 	 * We also calculate the CRC here so that the on-disk dquot in the
1149 	 * buffer always has a valid CRC. This ensures there is no possibility
1150 	 * of a dquot without an up-to-date CRC getting to disk.
1151 	 */
1152 	if (xfs_sb_version_hascrc(&mp->m_sb)) {
1153 		struct xfs_dqblk *dqb = (struct xfs_dqblk *)ddqp;
1154 
1155 		dqb->dd_lsn = cpu_to_be64(dqp->q_logitem.qli_item.li_lsn);
1156 		xfs_update_cksum((char *)dqb, sizeof(struct xfs_dqblk),
1157 				 XFS_DQUOT_CRC_OFF);
1158 	}
1159 
1160 	/*
1161 	 * Attach an iodone routine so that we can remove this dquot from the
1162 	 * AIL and release the flush lock once the dquot is synced to disk.
1163 	 */
1164 	xfs_buf_attach_iodone(bp, xfs_qm_dqflush_done,
1165 				  &dqp->q_logitem.qli_item);
1166 
1167 	/*
1168 	 * If the buffer is pinned then push on the log so we won't
1169 	 * get stuck waiting in the write for too long.
1170 	 */
1171 	if (xfs_buf_ispinned(bp)) {
1172 		trace_xfs_dqflush_force(dqp);
1173 		xfs_log_force(mp, 0);
1174 	}
1175 
1176 	trace_xfs_dqflush_done(dqp);
1177 	*bpp = bp;
1178 	return 0;
1179 
1180 out_unlock:
1181 	xfs_dqfunlock(dqp);
1182 	return XFS_ERROR(EIO);
1183 }
1184 
1185 /*
1186  * Lock two xfs_dquot structures.
1187  *
1188  * To avoid deadlocks we always lock the quota structure with
1189  * the lowerd id first.
1190  */
1191 void
1192 xfs_dqlock2(
1193 	xfs_dquot_t	*d1,
1194 	xfs_dquot_t	*d2)
1195 {
1196 	if (d1 && d2) {
1197 		ASSERT(d1 != d2);
1198 		if (be32_to_cpu(d1->q_core.d_id) >
1199 		    be32_to_cpu(d2->q_core.d_id)) {
1200 			mutex_lock(&d2->q_qlock);
1201 			mutex_lock_nested(&d1->q_qlock, XFS_QLOCK_NESTED);
1202 		} else {
1203 			mutex_lock(&d1->q_qlock);
1204 			mutex_lock_nested(&d2->q_qlock, XFS_QLOCK_NESTED);
1205 		}
1206 	} else if (d1) {
1207 		mutex_lock(&d1->q_qlock);
1208 	} else if (d2) {
1209 		mutex_lock(&d2->q_qlock);
1210 	}
1211 }
1212 
1213 int __init
1214 xfs_qm_init(void)
1215 {
1216 	xfs_qm_dqzone =
1217 		kmem_zone_init(sizeof(struct xfs_dquot), "xfs_dquot");
1218 	if (!xfs_qm_dqzone)
1219 		goto out;
1220 
1221 	xfs_qm_dqtrxzone =
1222 		kmem_zone_init(sizeof(struct xfs_dquot_acct), "xfs_dqtrx");
1223 	if (!xfs_qm_dqtrxzone)
1224 		goto out_free_dqzone;
1225 
1226 	return 0;
1227 
1228 out_free_dqzone:
1229 	kmem_zone_destroy(xfs_qm_dqzone);
1230 out:
1231 	return -ENOMEM;
1232 }
1233 
1234 void
1235 xfs_qm_exit(void)
1236 {
1237 	kmem_zone_destroy(xfs_qm_dqtrxzone);
1238 	kmem_zone_destroy(xfs_qm_dqzone);
1239 }
1240