xref: /openbmc/linux/fs/xfs/xfs_dquot.c (revision ca79522c)
1 /*
2  * Copyright (c) 2000-2003 Silicon Graphics, Inc.
3  * All Rights Reserved.
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU General Public License as
7  * published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it would be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write the Free Software Foundation,
16  * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
17  */
18 #include "xfs.h"
19 #include "xfs_fs.h"
20 #include "xfs_bit.h"
21 #include "xfs_log.h"
22 #include "xfs_trans.h"
23 #include "xfs_sb.h"
24 #include "xfs_ag.h"
25 #include "xfs_alloc.h"
26 #include "xfs_quota.h"
27 #include "xfs_mount.h"
28 #include "xfs_bmap_btree.h"
29 #include "xfs_inode.h"
30 #include "xfs_bmap.h"
31 #include "xfs_rtalloc.h"
32 #include "xfs_error.h"
33 #include "xfs_itable.h"
34 #include "xfs_attr.h"
35 #include "xfs_buf_item.h"
36 #include "xfs_trans_space.h"
37 #include "xfs_trans_priv.h"
38 #include "xfs_qm.h"
39 #include "xfs_cksum.h"
40 #include "xfs_trace.h"
41 
42 /*
43  * Lock order:
44  *
45  * ip->i_lock
46  *   qi->qi_tree_lock
47  *     dquot->q_qlock (xfs_dqlock() and friends)
48  *       dquot->q_flush (xfs_dqflock() and friends)
49  *       qi->qi_lru_lock
50  *
51  * If two dquots need to be locked the order is user before group/project,
52  * otherwise by the lowest id first, see xfs_dqlock2.
53  */
54 
55 #ifdef DEBUG
56 xfs_buftarg_t *xfs_dqerror_target;
57 int xfs_do_dqerror;
58 int xfs_dqreq_num;
59 int xfs_dqerror_mod = 33;
60 #endif
61 
62 struct kmem_zone		*xfs_qm_dqtrxzone;
63 static struct kmem_zone		*xfs_qm_dqzone;
64 
65 static struct lock_class_key xfs_dquot_other_class;
66 
67 /*
68  * This is called to free all the memory associated with a dquot
69  */
70 void
71 xfs_qm_dqdestroy(
72 	xfs_dquot_t	*dqp)
73 {
74 	ASSERT(list_empty(&dqp->q_lru));
75 
76 	mutex_destroy(&dqp->q_qlock);
77 	kmem_zone_free(xfs_qm_dqzone, dqp);
78 
79 	XFS_STATS_DEC(xs_qm_dquot);
80 }
81 
82 /*
83  * If default limits are in force, push them into the dquot now.
84  * We overwrite the dquot limits only if they are zero and this
85  * is not the root dquot.
86  */
87 void
88 xfs_qm_adjust_dqlimits(
89 	struct xfs_mount	*mp,
90 	struct xfs_dquot	*dq)
91 {
92 	struct xfs_quotainfo	*q = mp->m_quotainfo;
93 	struct xfs_disk_dquot	*d = &dq->q_core;
94 	int			prealloc = 0;
95 
96 	ASSERT(d->d_id);
97 
98 	if (q->qi_bsoftlimit && !d->d_blk_softlimit) {
99 		d->d_blk_softlimit = cpu_to_be64(q->qi_bsoftlimit);
100 		prealloc = 1;
101 	}
102 	if (q->qi_bhardlimit && !d->d_blk_hardlimit) {
103 		d->d_blk_hardlimit = cpu_to_be64(q->qi_bhardlimit);
104 		prealloc = 1;
105 	}
106 	if (q->qi_isoftlimit && !d->d_ino_softlimit)
107 		d->d_ino_softlimit = cpu_to_be64(q->qi_isoftlimit);
108 	if (q->qi_ihardlimit && !d->d_ino_hardlimit)
109 		d->d_ino_hardlimit = cpu_to_be64(q->qi_ihardlimit);
110 	if (q->qi_rtbsoftlimit && !d->d_rtb_softlimit)
111 		d->d_rtb_softlimit = cpu_to_be64(q->qi_rtbsoftlimit);
112 	if (q->qi_rtbhardlimit && !d->d_rtb_hardlimit)
113 		d->d_rtb_hardlimit = cpu_to_be64(q->qi_rtbhardlimit);
114 
115 	if (prealloc)
116 		xfs_dquot_set_prealloc_limits(dq);
117 }
118 
119 /*
120  * Check the limits and timers of a dquot and start or reset timers
121  * if necessary.
122  * This gets called even when quota enforcement is OFF, which makes our
123  * life a little less complicated. (We just don't reject any quota
124  * reservations in that case, when enforcement is off).
125  * We also return 0 as the values of the timers in Q_GETQUOTA calls, when
126  * enforcement's off.
127  * In contrast, warnings are a little different in that they don't
128  * 'automatically' get started when limits get exceeded.  They do
129  * get reset to zero, however, when we find the count to be under
130  * the soft limit (they are only ever set non-zero via userspace).
131  */
132 void
133 xfs_qm_adjust_dqtimers(
134 	xfs_mount_t		*mp,
135 	xfs_disk_dquot_t	*d)
136 {
137 	ASSERT(d->d_id);
138 
139 #ifdef DEBUG
140 	if (d->d_blk_hardlimit)
141 		ASSERT(be64_to_cpu(d->d_blk_softlimit) <=
142 		       be64_to_cpu(d->d_blk_hardlimit));
143 	if (d->d_ino_hardlimit)
144 		ASSERT(be64_to_cpu(d->d_ino_softlimit) <=
145 		       be64_to_cpu(d->d_ino_hardlimit));
146 	if (d->d_rtb_hardlimit)
147 		ASSERT(be64_to_cpu(d->d_rtb_softlimit) <=
148 		       be64_to_cpu(d->d_rtb_hardlimit));
149 #endif
150 
151 	if (!d->d_btimer) {
152 		if ((d->d_blk_softlimit &&
153 		     (be64_to_cpu(d->d_bcount) >
154 		      be64_to_cpu(d->d_blk_softlimit))) ||
155 		    (d->d_blk_hardlimit &&
156 		     (be64_to_cpu(d->d_bcount) >
157 		      be64_to_cpu(d->d_blk_hardlimit)))) {
158 			d->d_btimer = cpu_to_be32(get_seconds() +
159 					mp->m_quotainfo->qi_btimelimit);
160 		} else {
161 			d->d_bwarns = 0;
162 		}
163 	} else {
164 		if ((!d->d_blk_softlimit ||
165 		     (be64_to_cpu(d->d_bcount) <=
166 		      be64_to_cpu(d->d_blk_softlimit))) &&
167 		    (!d->d_blk_hardlimit ||
168 		    (be64_to_cpu(d->d_bcount) <=
169 		     be64_to_cpu(d->d_blk_hardlimit)))) {
170 			d->d_btimer = 0;
171 		}
172 	}
173 
174 	if (!d->d_itimer) {
175 		if ((d->d_ino_softlimit &&
176 		     (be64_to_cpu(d->d_icount) >
177 		      be64_to_cpu(d->d_ino_softlimit))) ||
178 		    (d->d_ino_hardlimit &&
179 		     (be64_to_cpu(d->d_icount) >
180 		      be64_to_cpu(d->d_ino_hardlimit)))) {
181 			d->d_itimer = cpu_to_be32(get_seconds() +
182 					mp->m_quotainfo->qi_itimelimit);
183 		} else {
184 			d->d_iwarns = 0;
185 		}
186 	} else {
187 		if ((!d->d_ino_softlimit ||
188 		     (be64_to_cpu(d->d_icount) <=
189 		      be64_to_cpu(d->d_ino_softlimit)))  &&
190 		    (!d->d_ino_hardlimit ||
191 		     (be64_to_cpu(d->d_icount) <=
192 		      be64_to_cpu(d->d_ino_hardlimit)))) {
193 			d->d_itimer = 0;
194 		}
195 	}
196 
197 	if (!d->d_rtbtimer) {
198 		if ((d->d_rtb_softlimit &&
199 		     (be64_to_cpu(d->d_rtbcount) >
200 		      be64_to_cpu(d->d_rtb_softlimit))) ||
201 		    (d->d_rtb_hardlimit &&
202 		     (be64_to_cpu(d->d_rtbcount) >
203 		      be64_to_cpu(d->d_rtb_hardlimit)))) {
204 			d->d_rtbtimer = cpu_to_be32(get_seconds() +
205 					mp->m_quotainfo->qi_rtbtimelimit);
206 		} else {
207 			d->d_rtbwarns = 0;
208 		}
209 	} else {
210 		if ((!d->d_rtb_softlimit ||
211 		     (be64_to_cpu(d->d_rtbcount) <=
212 		      be64_to_cpu(d->d_rtb_softlimit))) &&
213 		    (!d->d_rtb_hardlimit ||
214 		     (be64_to_cpu(d->d_rtbcount) <=
215 		      be64_to_cpu(d->d_rtb_hardlimit)))) {
216 			d->d_rtbtimer = 0;
217 		}
218 	}
219 }
220 
221 /*
222  * initialize a buffer full of dquots and log the whole thing
223  */
224 STATIC void
225 xfs_qm_init_dquot_blk(
226 	xfs_trans_t	*tp,
227 	xfs_mount_t	*mp,
228 	xfs_dqid_t	id,
229 	uint		type,
230 	xfs_buf_t	*bp)
231 {
232 	struct xfs_quotainfo	*q = mp->m_quotainfo;
233 	xfs_dqblk_t	*d;
234 	int		curid, i;
235 
236 	ASSERT(tp);
237 	ASSERT(xfs_buf_islocked(bp));
238 
239 	d = bp->b_addr;
240 
241 	/*
242 	 * ID of the first dquot in the block - id's are zero based.
243 	 */
244 	curid = id - (id % q->qi_dqperchunk);
245 	ASSERT(curid >= 0);
246 	memset(d, 0, BBTOB(q->qi_dqchunklen));
247 	for (i = 0; i < q->qi_dqperchunk; i++, d++, curid++) {
248 		d->dd_diskdq.d_magic = cpu_to_be16(XFS_DQUOT_MAGIC);
249 		d->dd_diskdq.d_version = XFS_DQUOT_VERSION;
250 		d->dd_diskdq.d_id = cpu_to_be32(curid);
251 		d->dd_diskdq.d_flags = type;
252 		if (xfs_sb_version_hascrc(&mp->m_sb))
253 			uuid_copy(&d->dd_uuid, &mp->m_sb.sb_uuid);
254 	}
255 
256 	xfs_trans_dquot_buf(tp, bp,
257 			    (type & XFS_DQ_USER ? XFS_BLF_UDQUOT_BUF :
258 			    ((type & XFS_DQ_PROJ) ? XFS_BLF_PDQUOT_BUF :
259 			     XFS_BLF_GDQUOT_BUF)));
260 	xfs_trans_log_buf(tp, bp, 0, BBTOB(q->qi_dqchunklen) - 1);
261 }
262 
263 /*
264  * Initialize the dynamic speculative preallocation thresholds. The lo/hi
265  * watermarks correspond to the soft and hard limits by default. If a soft limit
266  * is not specified, we use 95% of the hard limit.
267  */
268 void
269 xfs_dquot_set_prealloc_limits(struct xfs_dquot *dqp)
270 {
271 	__uint64_t space;
272 
273 	dqp->q_prealloc_hi_wmark = be64_to_cpu(dqp->q_core.d_blk_hardlimit);
274 	dqp->q_prealloc_lo_wmark = be64_to_cpu(dqp->q_core.d_blk_softlimit);
275 	if (!dqp->q_prealloc_lo_wmark) {
276 		dqp->q_prealloc_lo_wmark = dqp->q_prealloc_hi_wmark;
277 		do_div(dqp->q_prealloc_lo_wmark, 100);
278 		dqp->q_prealloc_lo_wmark *= 95;
279 	}
280 
281 	space = dqp->q_prealloc_hi_wmark;
282 
283 	do_div(space, 100);
284 	dqp->q_low_space[XFS_QLOWSP_1_PCNT] = space;
285 	dqp->q_low_space[XFS_QLOWSP_3_PCNT] = space * 3;
286 	dqp->q_low_space[XFS_QLOWSP_5_PCNT] = space * 5;
287 }
288 
289 STATIC void
290 xfs_dquot_buf_calc_crc(
291 	struct xfs_mount	*mp,
292 	struct xfs_buf		*bp)
293 {
294 	struct xfs_dqblk	*d = (struct xfs_dqblk *)bp->b_addr;
295 	int			i;
296 
297 	if (!xfs_sb_version_hascrc(&mp->m_sb))
298 		return;
299 
300 	for (i = 0; i < mp->m_quotainfo->qi_dqperchunk; i++, d++) {
301 		xfs_update_cksum((char *)d, sizeof(struct xfs_dqblk),
302 				 offsetof(struct xfs_dqblk, dd_crc));
303 	}
304 }
305 
306 STATIC bool
307 xfs_dquot_buf_verify_crc(
308 	struct xfs_mount	*mp,
309 	struct xfs_buf		*bp)
310 {
311 	struct xfs_dqblk	*d = (struct xfs_dqblk *)bp->b_addr;
312 	int			ndquots;
313 	int			i;
314 
315 	if (!xfs_sb_version_hascrc(&mp->m_sb))
316 		return true;
317 
318 	/*
319 	 * if we are in log recovery, the quota subsystem has not been
320 	 * initialised so we have no quotainfo structure. In that case, we need
321 	 * to manually calculate the number of dquots in the buffer.
322 	 */
323 	if (mp->m_quotainfo)
324 		ndquots = mp->m_quotainfo->qi_dqperchunk;
325 	else
326 		ndquots = xfs_qm_calc_dquots_per_chunk(mp,
327 					XFS_BB_TO_FSB(mp, bp->b_length));
328 
329 	for (i = 0; i < ndquots; i++, d++) {
330 		if (!xfs_verify_cksum((char *)d, sizeof(struct xfs_dqblk),
331 				 offsetof(struct xfs_dqblk, dd_crc)))
332 			return false;
333 		if (!uuid_equal(&d->dd_uuid, &mp->m_sb.sb_uuid))
334 			return false;
335 	}
336 
337 	return true;
338 }
339 
340 STATIC bool
341 xfs_dquot_buf_verify(
342 	struct xfs_mount	*mp,
343 	struct xfs_buf		*bp)
344 {
345 	struct xfs_dqblk	*d = (struct xfs_dqblk *)bp->b_addr;
346 	xfs_dqid_t		id = 0;
347 	int			ndquots;
348 	int			i;
349 
350 	/*
351 	 * if we are in log recovery, the quota subsystem has not been
352 	 * initialised so we have no quotainfo structure. In that case, we need
353 	 * to manually calculate the number of dquots in the buffer.
354 	 */
355 	if (mp->m_quotainfo)
356 		ndquots = mp->m_quotainfo->qi_dqperchunk;
357 	else
358 		ndquots = xfs_qm_calc_dquots_per_chunk(mp, bp->b_length);
359 
360 	/*
361 	 * On the first read of the buffer, verify that each dquot is valid.
362 	 * We don't know what the id of the dquot is supposed to be, just that
363 	 * they should be increasing monotonically within the buffer. If the
364 	 * first id is corrupt, then it will fail on the second dquot in the
365 	 * buffer so corruptions could point to the wrong dquot in this case.
366 	 */
367 	for (i = 0; i < ndquots; i++) {
368 		struct xfs_disk_dquot	*ddq;
369 		int			error;
370 
371 		ddq = &d[i].dd_diskdq;
372 
373 		if (i == 0)
374 			id = be32_to_cpu(ddq->d_id);
375 
376 		error = xfs_qm_dqcheck(mp, ddq, id + i, 0, XFS_QMOPT_DOWARN,
377 				       "xfs_dquot_buf_verify");
378 		if (error)
379 			return false;
380 	}
381 	return true;
382 }
383 
384 static void
385 xfs_dquot_buf_read_verify(
386 	struct xfs_buf	*bp)
387 {
388 	struct xfs_mount	*mp = bp->b_target->bt_mount;
389 
390 	if (!xfs_dquot_buf_verify_crc(mp, bp) || !xfs_dquot_buf_verify(mp, bp)) {
391 		XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp, bp->b_addr);
392 		xfs_buf_ioerror(bp, EFSCORRUPTED);
393 	}
394 }
395 
396 void
397 xfs_dquot_buf_write_verify(
398 	struct xfs_buf	*bp)
399 {
400 	struct xfs_mount	*mp = bp->b_target->bt_mount;
401 
402 	if (!xfs_dquot_buf_verify(mp, bp)) {
403 		XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp, bp->b_addr);
404 		xfs_buf_ioerror(bp, EFSCORRUPTED);
405 		return;
406 	}
407 	xfs_dquot_buf_calc_crc(mp, bp);
408 }
409 
410 const struct xfs_buf_ops xfs_dquot_buf_ops = {
411 	.verify_read = xfs_dquot_buf_read_verify,
412 	.verify_write = xfs_dquot_buf_write_verify,
413 };
414 
415 /*
416  * Allocate a block and fill it with dquots.
417  * This is called when the bmapi finds a hole.
418  */
419 STATIC int
420 xfs_qm_dqalloc(
421 	xfs_trans_t	**tpp,
422 	xfs_mount_t	*mp,
423 	xfs_dquot_t	*dqp,
424 	xfs_inode_t	*quotip,
425 	xfs_fileoff_t	offset_fsb,
426 	xfs_buf_t	**O_bpp)
427 {
428 	xfs_fsblock_t	firstblock;
429 	xfs_bmap_free_t flist;
430 	xfs_bmbt_irec_t map;
431 	int		nmaps, error, committed;
432 	xfs_buf_t	*bp;
433 	xfs_trans_t	*tp = *tpp;
434 
435 	ASSERT(tp != NULL);
436 
437 	trace_xfs_dqalloc(dqp);
438 
439 	/*
440 	 * Initialize the bmap freelist prior to calling bmapi code.
441 	 */
442 	xfs_bmap_init(&flist, &firstblock);
443 	xfs_ilock(quotip, XFS_ILOCK_EXCL);
444 	/*
445 	 * Return if this type of quotas is turned off while we didn't
446 	 * have an inode lock
447 	 */
448 	if (!xfs_this_quota_on(dqp->q_mount, dqp->dq_flags)) {
449 		xfs_iunlock(quotip, XFS_ILOCK_EXCL);
450 		return (ESRCH);
451 	}
452 
453 	xfs_trans_ijoin(tp, quotip, XFS_ILOCK_EXCL);
454 	nmaps = 1;
455 	error = xfs_bmapi_write(tp, quotip, offset_fsb,
456 				XFS_DQUOT_CLUSTER_SIZE_FSB, XFS_BMAPI_METADATA,
457 				&firstblock, XFS_QM_DQALLOC_SPACE_RES(mp),
458 				&map, &nmaps, &flist);
459 	if (error)
460 		goto error0;
461 	ASSERT(map.br_blockcount == XFS_DQUOT_CLUSTER_SIZE_FSB);
462 	ASSERT(nmaps == 1);
463 	ASSERT((map.br_startblock != DELAYSTARTBLOCK) &&
464 	       (map.br_startblock != HOLESTARTBLOCK));
465 
466 	/*
467 	 * Keep track of the blkno to save a lookup later
468 	 */
469 	dqp->q_blkno = XFS_FSB_TO_DADDR(mp, map.br_startblock);
470 
471 	/* now we can just get the buffer (there's nothing to read yet) */
472 	bp = xfs_trans_get_buf(tp, mp->m_ddev_targp,
473 			       dqp->q_blkno,
474 			       mp->m_quotainfo->qi_dqchunklen,
475 			       0);
476 
477 	error = xfs_buf_geterror(bp);
478 	if (error)
479 		goto error1;
480 	bp->b_ops = &xfs_dquot_buf_ops;
481 
482 	/*
483 	 * Make a chunk of dquots out of this buffer and log
484 	 * the entire thing.
485 	 */
486 	xfs_qm_init_dquot_blk(tp, mp, be32_to_cpu(dqp->q_core.d_id),
487 			      dqp->dq_flags & XFS_DQ_ALLTYPES, bp);
488 
489 	/*
490 	 * xfs_bmap_finish() may commit the current transaction and
491 	 * start a second transaction if the freelist is not empty.
492 	 *
493 	 * Since we still want to modify this buffer, we need to
494 	 * ensure that the buffer is not released on commit of
495 	 * the first transaction and ensure the buffer is added to the
496 	 * second transaction.
497 	 *
498 	 * If there is only one transaction then don't stop the buffer
499 	 * from being released when it commits later on.
500 	 */
501 
502 	xfs_trans_bhold(tp, bp);
503 
504 	if ((error = xfs_bmap_finish(tpp, &flist, &committed))) {
505 		goto error1;
506 	}
507 
508 	if (committed) {
509 		tp = *tpp;
510 		xfs_trans_bjoin(tp, bp);
511 	} else {
512 		xfs_trans_bhold_release(tp, bp);
513 	}
514 
515 	*O_bpp = bp;
516 	return 0;
517 
518       error1:
519 	xfs_bmap_cancel(&flist);
520       error0:
521 	xfs_iunlock(quotip, XFS_ILOCK_EXCL);
522 
523 	return (error);
524 }
525 STATIC int
526 xfs_qm_dqrepair(
527 	struct xfs_mount	*mp,
528 	struct xfs_trans	*tp,
529 	struct xfs_dquot	*dqp,
530 	xfs_dqid_t		firstid,
531 	struct xfs_buf		**bpp)
532 {
533 	int			error;
534 	struct xfs_disk_dquot	*ddq;
535 	struct xfs_dqblk	*d;
536 	int			i;
537 
538 	/*
539 	 * Read the buffer without verification so we get the corrupted
540 	 * buffer returned to us. make sure we verify it on write, though.
541 	 */
542 	error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp, dqp->q_blkno,
543 				   mp->m_quotainfo->qi_dqchunklen,
544 				   0, bpp, NULL);
545 
546 	if (error) {
547 		ASSERT(*bpp == NULL);
548 		return XFS_ERROR(error);
549 	}
550 	(*bpp)->b_ops = &xfs_dquot_buf_ops;
551 
552 	ASSERT(xfs_buf_islocked(*bpp));
553 	d = (struct xfs_dqblk *)(*bpp)->b_addr;
554 
555 	/* Do the actual repair of dquots in this buffer */
556 	for (i = 0; i < mp->m_quotainfo->qi_dqperchunk; i++) {
557 		ddq = &d[i].dd_diskdq;
558 		error = xfs_qm_dqcheck(mp, ddq, firstid + i,
559 				       dqp->dq_flags & XFS_DQ_ALLTYPES,
560 				       XFS_QMOPT_DQREPAIR, "xfs_qm_dqrepair");
561 		if (error) {
562 			/* repair failed, we're screwed */
563 			xfs_trans_brelse(tp, *bpp);
564 			return XFS_ERROR(EIO);
565 		}
566 	}
567 
568 	return 0;
569 }
570 
571 /*
572  * Maps a dquot to the buffer containing its on-disk version.
573  * This returns a ptr to the buffer containing the on-disk dquot
574  * in the bpp param, and a ptr to the on-disk dquot within that buffer
575  */
576 STATIC int
577 xfs_qm_dqtobp(
578 	xfs_trans_t		**tpp,
579 	xfs_dquot_t		*dqp,
580 	xfs_disk_dquot_t	**O_ddpp,
581 	xfs_buf_t		**O_bpp,
582 	uint			flags)
583 {
584 	xfs_bmbt_irec_t map;
585 	int		nmaps = 1, error;
586 	xfs_buf_t	*bp;
587 	xfs_inode_t	*quotip = XFS_DQ_TO_QIP(dqp);
588 	xfs_mount_t	*mp = dqp->q_mount;
589 	xfs_dqid_t	id = be32_to_cpu(dqp->q_core.d_id);
590 	xfs_trans_t	*tp = (tpp ? *tpp : NULL);
591 
592 	dqp->q_fileoffset = (xfs_fileoff_t)id / mp->m_quotainfo->qi_dqperchunk;
593 
594 	xfs_ilock(quotip, XFS_ILOCK_SHARED);
595 	if (!xfs_this_quota_on(dqp->q_mount, dqp->dq_flags)) {
596 		/*
597 		 * Return if this type of quotas is turned off while we
598 		 * didn't have the quota inode lock.
599 		 */
600 		xfs_iunlock(quotip, XFS_ILOCK_SHARED);
601 		return ESRCH;
602 	}
603 
604 	/*
605 	 * Find the block map; no allocations yet
606 	 */
607 	error = xfs_bmapi_read(quotip, dqp->q_fileoffset,
608 			       XFS_DQUOT_CLUSTER_SIZE_FSB, &map, &nmaps, 0);
609 
610 	xfs_iunlock(quotip, XFS_ILOCK_SHARED);
611 	if (error)
612 		return error;
613 
614 	ASSERT(nmaps == 1);
615 	ASSERT(map.br_blockcount == 1);
616 
617 	/*
618 	 * Offset of dquot in the (fixed sized) dquot chunk.
619 	 */
620 	dqp->q_bufoffset = (id % mp->m_quotainfo->qi_dqperchunk) *
621 		sizeof(xfs_dqblk_t);
622 
623 	ASSERT(map.br_startblock != DELAYSTARTBLOCK);
624 	if (map.br_startblock == HOLESTARTBLOCK) {
625 		/*
626 		 * We don't allocate unless we're asked to
627 		 */
628 		if (!(flags & XFS_QMOPT_DQALLOC))
629 			return ENOENT;
630 
631 		ASSERT(tp);
632 		error = xfs_qm_dqalloc(tpp, mp, dqp, quotip,
633 					dqp->q_fileoffset, &bp);
634 		if (error)
635 			return error;
636 		tp = *tpp;
637 	} else {
638 		trace_xfs_dqtobp_read(dqp);
639 
640 		/*
641 		 * store the blkno etc so that we don't have to do the
642 		 * mapping all the time
643 		 */
644 		dqp->q_blkno = XFS_FSB_TO_DADDR(mp, map.br_startblock);
645 
646 		error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp,
647 					   dqp->q_blkno,
648 					   mp->m_quotainfo->qi_dqchunklen,
649 					   0, &bp, &xfs_dquot_buf_ops);
650 
651 		if (error == EFSCORRUPTED && (flags & XFS_QMOPT_DQREPAIR)) {
652 			xfs_dqid_t firstid = (xfs_dqid_t)map.br_startoff *
653 						mp->m_quotainfo->qi_dqperchunk;
654 			ASSERT(bp == NULL);
655 			error = xfs_qm_dqrepair(mp, tp, dqp, firstid, &bp);
656 		}
657 
658 		if (error) {
659 			ASSERT(bp == NULL);
660 			return XFS_ERROR(error);
661 		}
662 	}
663 
664 	ASSERT(xfs_buf_islocked(bp));
665 	*O_bpp = bp;
666 	*O_ddpp = bp->b_addr + dqp->q_bufoffset;
667 
668 	return (0);
669 }
670 
671 
672 /*
673  * Read in the ondisk dquot using dqtobp() then copy it to an incore version,
674  * and release the buffer immediately.
675  *
676  * If XFS_QMOPT_DQALLOC is set, allocate a dquot on disk if it needed.
677  */
678 int
679 xfs_qm_dqread(
680 	struct xfs_mount	*mp,
681 	xfs_dqid_t		id,
682 	uint			type,
683 	uint			flags,
684 	struct xfs_dquot	**O_dqpp)
685 {
686 	struct xfs_dquot	*dqp;
687 	struct xfs_disk_dquot	*ddqp;
688 	struct xfs_buf		*bp;
689 	struct xfs_trans	*tp = NULL;
690 	int			error;
691 	int			cancelflags = 0;
692 
693 
694 	dqp = kmem_zone_zalloc(xfs_qm_dqzone, KM_SLEEP);
695 
696 	dqp->dq_flags = type;
697 	dqp->q_core.d_id = cpu_to_be32(id);
698 	dqp->q_mount = mp;
699 	INIT_LIST_HEAD(&dqp->q_lru);
700 	mutex_init(&dqp->q_qlock);
701 	init_waitqueue_head(&dqp->q_pinwait);
702 
703 	/*
704 	 * Because we want to use a counting completion, complete
705 	 * the flush completion once to allow a single access to
706 	 * the flush completion without blocking.
707 	 */
708 	init_completion(&dqp->q_flush);
709 	complete(&dqp->q_flush);
710 
711 	/*
712 	 * Make sure group quotas have a different lock class than user
713 	 * quotas.
714 	 */
715 	if (!(type & XFS_DQ_USER))
716 		lockdep_set_class(&dqp->q_qlock, &xfs_dquot_other_class);
717 
718 	XFS_STATS_INC(xs_qm_dquot);
719 
720 	trace_xfs_dqread(dqp);
721 
722 	if (flags & XFS_QMOPT_DQALLOC) {
723 		tp = xfs_trans_alloc(mp, XFS_TRANS_QM_DQALLOC);
724 		error = xfs_trans_reserve(tp, XFS_QM_DQALLOC_SPACE_RES(mp),
725 					  XFS_QM_DQALLOC_LOG_RES(mp), 0,
726 					  XFS_TRANS_PERM_LOG_RES,
727 					  XFS_WRITE_LOG_COUNT);
728 		if (error)
729 			goto error1;
730 		cancelflags = XFS_TRANS_RELEASE_LOG_RES;
731 	}
732 
733 	/*
734 	 * get a pointer to the on-disk dquot and the buffer containing it
735 	 * dqp already knows its own type (GROUP/USER).
736 	 */
737 	error = xfs_qm_dqtobp(&tp, dqp, &ddqp, &bp, flags);
738 	if (error) {
739 		/*
740 		 * This can happen if quotas got turned off (ESRCH),
741 		 * or if the dquot didn't exist on disk and we ask to
742 		 * allocate (ENOENT).
743 		 */
744 		trace_xfs_dqread_fail(dqp);
745 		cancelflags |= XFS_TRANS_ABORT;
746 		goto error1;
747 	}
748 
749 	/* copy everything from disk dquot to the incore dquot */
750 	memcpy(&dqp->q_core, ddqp, sizeof(xfs_disk_dquot_t));
751 	xfs_qm_dquot_logitem_init(dqp);
752 
753 	/*
754 	 * Reservation counters are defined as reservation plus current usage
755 	 * to avoid having to add every time.
756 	 */
757 	dqp->q_res_bcount = be64_to_cpu(ddqp->d_bcount);
758 	dqp->q_res_icount = be64_to_cpu(ddqp->d_icount);
759 	dqp->q_res_rtbcount = be64_to_cpu(ddqp->d_rtbcount);
760 
761 	/* initialize the dquot speculative prealloc thresholds */
762 	xfs_dquot_set_prealloc_limits(dqp);
763 
764 	/* Mark the buf so that this will stay incore a little longer */
765 	xfs_buf_set_ref(bp, XFS_DQUOT_REF);
766 
767 	/*
768 	 * We got the buffer with a xfs_trans_read_buf() (in dqtobp())
769 	 * So we need to release with xfs_trans_brelse().
770 	 * The strategy here is identical to that of inodes; we lock
771 	 * the dquot in xfs_qm_dqget() before making it accessible to
772 	 * others. This is because dquots, like inodes, need a good level of
773 	 * concurrency, and we don't want to take locks on the entire buffers
774 	 * for dquot accesses.
775 	 * Note also that the dquot buffer may even be dirty at this point, if
776 	 * this particular dquot was repaired. We still aren't afraid to
777 	 * brelse it because we have the changes incore.
778 	 */
779 	ASSERT(xfs_buf_islocked(bp));
780 	xfs_trans_brelse(tp, bp);
781 
782 	if (tp) {
783 		error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES);
784 		if (error)
785 			goto error0;
786 	}
787 
788 	*O_dqpp = dqp;
789 	return error;
790 
791 error1:
792 	if (tp)
793 		xfs_trans_cancel(tp, cancelflags);
794 error0:
795 	xfs_qm_dqdestroy(dqp);
796 	*O_dqpp = NULL;
797 	return error;
798 }
799 
800 /*
801  * Given the file system, inode OR id, and type (UDQUOT/GDQUOT), return a
802  * a locked dquot, doing an allocation (if requested) as needed.
803  * When both an inode and an id are given, the inode's id takes precedence.
804  * That is, if the id changes while we don't hold the ilock inside this
805  * function, the new dquot is returned, not necessarily the one requested
806  * in the id argument.
807  */
808 int
809 xfs_qm_dqget(
810 	xfs_mount_t	*mp,
811 	xfs_inode_t	*ip,	  /* locked inode (optional) */
812 	xfs_dqid_t	id,	  /* uid/projid/gid depending on type */
813 	uint		type,	  /* XFS_DQ_USER/XFS_DQ_PROJ/XFS_DQ_GROUP */
814 	uint		flags,	  /* DQALLOC, DQSUSER, DQREPAIR, DOWARN */
815 	xfs_dquot_t	**O_dqpp) /* OUT : locked incore dquot */
816 {
817 	struct xfs_quotainfo	*qi = mp->m_quotainfo;
818 	struct radix_tree_root *tree = XFS_DQUOT_TREE(qi, type);
819 	struct xfs_dquot	*dqp;
820 	int			error;
821 
822 	ASSERT(XFS_IS_QUOTA_RUNNING(mp));
823 	if ((! XFS_IS_UQUOTA_ON(mp) && type == XFS_DQ_USER) ||
824 	    (! XFS_IS_PQUOTA_ON(mp) && type == XFS_DQ_PROJ) ||
825 	    (! XFS_IS_GQUOTA_ON(mp) && type == XFS_DQ_GROUP)) {
826 		return (ESRCH);
827 	}
828 
829 #ifdef DEBUG
830 	if (xfs_do_dqerror) {
831 		if ((xfs_dqerror_target == mp->m_ddev_targp) &&
832 		    (xfs_dqreq_num++ % xfs_dqerror_mod) == 0) {
833 			xfs_debug(mp, "Returning error in dqget");
834 			return (EIO);
835 		}
836 	}
837 
838 	ASSERT(type == XFS_DQ_USER ||
839 	       type == XFS_DQ_PROJ ||
840 	       type == XFS_DQ_GROUP);
841 	if (ip) {
842 		ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
843 		ASSERT(xfs_inode_dquot(ip, type) == NULL);
844 	}
845 #endif
846 
847 restart:
848 	mutex_lock(&qi->qi_tree_lock);
849 	dqp = radix_tree_lookup(tree, id);
850 	if (dqp) {
851 		xfs_dqlock(dqp);
852 		if (dqp->dq_flags & XFS_DQ_FREEING) {
853 			xfs_dqunlock(dqp);
854 			mutex_unlock(&qi->qi_tree_lock);
855 			trace_xfs_dqget_freeing(dqp);
856 			delay(1);
857 			goto restart;
858 		}
859 
860 		dqp->q_nrefs++;
861 		mutex_unlock(&qi->qi_tree_lock);
862 
863 		trace_xfs_dqget_hit(dqp);
864 		XFS_STATS_INC(xs_qm_dqcachehits);
865 		*O_dqpp = dqp;
866 		return 0;
867 	}
868 	mutex_unlock(&qi->qi_tree_lock);
869 	XFS_STATS_INC(xs_qm_dqcachemisses);
870 
871 	/*
872 	 * Dquot cache miss. We don't want to keep the inode lock across
873 	 * a (potential) disk read. Also we don't want to deal with the lock
874 	 * ordering between quotainode and this inode. OTOH, dropping the inode
875 	 * lock here means dealing with a chown that can happen before
876 	 * we re-acquire the lock.
877 	 */
878 	if (ip)
879 		xfs_iunlock(ip, XFS_ILOCK_EXCL);
880 
881 	error = xfs_qm_dqread(mp, id, type, flags, &dqp);
882 
883 	if (ip)
884 		xfs_ilock(ip, XFS_ILOCK_EXCL);
885 
886 	if (error)
887 		return error;
888 
889 	if (ip) {
890 		/*
891 		 * A dquot could be attached to this inode by now, since
892 		 * we had dropped the ilock.
893 		 */
894 		if (xfs_this_quota_on(mp, type)) {
895 			struct xfs_dquot	*dqp1;
896 
897 			dqp1 = xfs_inode_dquot(ip, type);
898 			if (dqp1) {
899 				xfs_qm_dqdestroy(dqp);
900 				dqp = dqp1;
901 				xfs_dqlock(dqp);
902 				goto dqret;
903 			}
904 		} else {
905 			/* inode stays locked on return */
906 			xfs_qm_dqdestroy(dqp);
907 			return XFS_ERROR(ESRCH);
908 		}
909 	}
910 
911 	mutex_lock(&qi->qi_tree_lock);
912 	error = -radix_tree_insert(tree, id, dqp);
913 	if (unlikely(error)) {
914 		WARN_ON(error != EEXIST);
915 
916 		/*
917 		 * Duplicate found. Just throw away the new dquot and start
918 		 * over.
919 		 */
920 		mutex_unlock(&qi->qi_tree_lock);
921 		trace_xfs_dqget_dup(dqp);
922 		xfs_qm_dqdestroy(dqp);
923 		XFS_STATS_INC(xs_qm_dquot_dups);
924 		goto restart;
925 	}
926 
927 	/*
928 	 * We return a locked dquot to the caller, with a reference taken
929 	 */
930 	xfs_dqlock(dqp);
931 	dqp->q_nrefs = 1;
932 
933 	qi->qi_dquots++;
934 	mutex_unlock(&qi->qi_tree_lock);
935 
936  dqret:
937 	ASSERT((ip == NULL) || xfs_isilocked(ip, XFS_ILOCK_EXCL));
938 	trace_xfs_dqget_miss(dqp);
939 	*O_dqpp = dqp;
940 	return (0);
941 }
942 
943 
944 STATIC void
945 xfs_qm_dqput_final(
946 	struct xfs_dquot	*dqp)
947 {
948 	struct xfs_quotainfo	*qi = dqp->q_mount->m_quotainfo;
949 	struct xfs_dquot	*gdqp;
950 
951 	trace_xfs_dqput_free(dqp);
952 
953 	mutex_lock(&qi->qi_lru_lock);
954 	if (list_empty(&dqp->q_lru)) {
955 		list_add_tail(&dqp->q_lru, &qi->qi_lru_list);
956 		qi->qi_lru_count++;
957 		XFS_STATS_INC(xs_qm_dquot_unused);
958 	}
959 	mutex_unlock(&qi->qi_lru_lock);
960 
961 	/*
962 	 * If we just added a udquot to the freelist, then we want to release
963 	 * the gdquot reference that it (probably) has. Otherwise it'll keep
964 	 * the gdquot from getting reclaimed.
965 	 */
966 	gdqp = dqp->q_gdquot;
967 	if (gdqp) {
968 		xfs_dqlock(gdqp);
969 		dqp->q_gdquot = NULL;
970 	}
971 	xfs_dqunlock(dqp);
972 
973 	/*
974 	 * If we had a group quota hint, release it now.
975 	 */
976 	if (gdqp)
977 		xfs_qm_dqput(gdqp);
978 }
979 
980 /*
981  * Release a reference to the dquot (decrement ref-count) and unlock it.
982  *
983  * If there is a group quota attached to this dquot, carefully release that
984  * too without tripping over deadlocks'n'stuff.
985  */
986 void
987 xfs_qm_dqput(
988 	struct xfs_dquot	*dqp)
989 {
990 	ASSERT(dqp->q_nrefs > 0);
991 	ASSERT(XFS_DQ_IS_LOCKED(dqp));
992 
993 	trace_xfs_dqput(dqp);
994 
995 	if (--dqp->q_nrefs > 0)
996 		xfs_dqunlock(dqp);
997 	else
998 		xfs_qm_dqput_final(dqp);
999 }
1000 
1001 /*
1002  * Release a dquot. Flush it if dirty, then dqput() it.
1003  * dquot must not be locked.
1004  */
1005 void
1006 xfs_qm_dqrele(
1007 	xfs_dquot_t	*dqp)
1008 {
1009 	if (!dqp)
1010 		return;
1011 
1012 	trace_xfs_dqrele(dqp);
1013 
1014 	xfs_dqlock(dqp);
1015 	/*
1016 	 * We don't care to flush it if the dquot is dirty here.
1017 	 * That will create stutters that we want to avoid.
1018 	 * Instead we do a delayed write when we try to reclaim
1019 	 * a dirty dquot. Also xfs_sync will take part of the burden...
1020 	 */
1021 	xfs_qm_dqput(dqp);
1022 }
1023 
1024 /*
1025  * This is the dquot flushing I/O completion routine.  It is called
1026  * from interrupt level when the buffer containing the dquot is
1027  * flushed to disk.  It is responsible for removing the dquot logitem
1028  * from the AIL if it has not been re-logged, and unlocking the dquot's
1029  * flush lock. This behavior is very similar to that of inodes..
1030  */
1031 STATIC void
1032 xfs_qm_dqflush_done(
1033 	struct xfs_buf		*bp,
1034 	struct xfs_log_item	*lip)
1035 {
1036 	xfs_dq_logitem_t	*qip = (struct xfs_dq_logitem *)lip;
1037 	xfs_dquot_t		*dqp = qip->qli_dquot;
1038 	struct xfs_ail		*ailp = lip->li_ailp;
1039 
1040 	/*
1041 	 * We only want to pull the item from the AIL if its
1042 	 * location in the log has not changed since we started the flush.
1043 	 * Thus, we only bother if the dquot's lsn has
1044 	 * not changed. First we check the lsn outside the lock
1045 	 * since it's cheaper, and then we recheck while
1046 	 * holding the lock before removing the dquot from the AIL.
1047 	 */
1048 	if ((lip->li_flags & XFS_LI_IN_AIL) &&
1049 	    lip->li_lsn == qip->qli_flush_lsn) {
1050 
1051 		/* xfs_trans_ail_delete() drops the AIL lock. */
1052 		spin_lock(&ailp->xa_lock);
1053 		if (lip->li_lsn == qip->qli_flush_lsn)
1054 			xfs_trans_ail_delete(ailp, lip, SHUTDOWN_CORRUPT_INCORE);
1055 		else
1056 			spin_unlock(&ailp->xa_lock);
1057 	}
1058 
1059 	/*
1060 	 * Release the dq's flush lock since we're done with it.
1061 	 */
1062 	xfs_dqfunlock(dqp);
1063 }
1064 
1065 /*
1066  * Write a modified dquot to disk.
1067  * The dquot must be locked and the flush lock too taken by caller.
1068  * The flush lock will not be unlocked until the dquot reaches the disk,
1069  * but the dquot is free to be unlocked and modified by the caller
1070  * in the interim. Dquot is still locked on return. This behavior is
1071  * identical to that of inodes.
1072  */
1073 int
1074 xfs_qm_dqflush(
1075 	struct xfs_dquot	*dqp,
1076 	struct xfs_buf		**bpp)
1077 {
1078 	struct xfs_mount	*mp = dqp->q_mount;
1079 	struct xfs_buf		*bp;
1080 	struct xfs_disk_dquot	*ddqp;
1081 	int			error;
1082 
1083 	ASSERT(XFS_DQ_IS_LOCKED(dqp));
1084 	ASSERT(!completion_done(&dqp->q_flush));
1085 
1086 	trace_xfs_dqflush(dqp);
1087 
1088 	*bpp = NULL;
1089 
1090 	xfs_qm_dqunpin_wait(dqp);
1091 
1092 	/*
1093 	 * This may have been unpinned because the filesystem is shutting
1094 	 * down forcibly. If that's the case we must not write this dquot
1095 	 * to disk, because the log record didn't make it to disk.
1096 	 *
1097 	 * We also have to remove the log item from the AIL in this case,
1098 	 * as we wait for an emptry AIL as part of the unmount process.
1099 	 */
1100 	if (XFS_FORCED_SHUTDOWN(mp)) {
1101 		struct xfs_log_item	*lip = &dqp->q_logitem.qli_item;
1102 		dqp->dq_flags &= ~XFS_DQ_DIRTY;
1103 
1104 		spin_lock(&mp->m_ail->xa_lock);
1105 		if (lip->li_flags & XFS_LI_IN_AIL)
1106 			xfs_trans_ail_delete(mp->m_ail, lip,
1107 					     SHUTDOWN_CORRUPT_INCORE);
1108 		else
1109 			spin_unlock(&mp->m_ail->xa_lock);
1110 		error = XFS_ERROR(EIO);
1111 		goto out_unlock;
1112 	}
1113 
1114 	/*
1115 	 * Get the buffer containing the on-disk dquot
1116 	 */
1117 	error = xfs_trans_read_buf(mp, NULL, mp->m_ddev_targp, dqp->q_blkno,
1118 				   mp->m_quotainfo->qi_dqchunklen, 0, &bp, NULL);
1119 	if (error)
1120 		goto out_unlock;
1121 
1122 	/*
1123 	 * Calculate the location of the dquot inside the buffer.
1124 	 */
1125 	ddqp = bp->b_addr + dqp->q_bufoffset;
1126 
1127 	/*
1128 	 * A simple sanity check in case we got a corrupted dquot..
1129 	 */
1130 	error = xfs_qm_dqcheck(mp, &dqp->q_core, be32_to_cpu(ddqp->d_id), 0,
1131 			   XFS_QMOPT_DOWARN, "dqflush (incore copy)");
1132 	if (error) {
1133 		xfs_buf_relse(bp);
1134 		xfs_dqfunlock(dqp);
1135 		xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
1136 		return XFS_ERROR(EIO);
1137 	}
1138 
1139 	/* This is the only portion of data that needs to persist */
1140 	memcpy(ddqp, &dqp->q_core, sizeof(xfs_disk_dquot_t));
1141 
1142 	/*
1143 	 * Clear the dirty field and remember the flush lsn for later use.
1144 	 */
1145 	dqp->dq_flags &= ~XFS_DQ_DIRTY;
1146 
1147 	xfs_trans_ail_copy_lsn(mp->m_ail, &dqp->q_logitem.qli_flush_lsn,
1148 					&dqp->q_logitem.qli_item.li_lsn);
1149 
1150 	/*
1151 	 * copy the lsn into the on-disk dquot now while we have the in memory
1152 	 * dquot here. This can't be done later in the write verifier as we
1153 	 * can't get access to the log item at that point in time.
1154 	 */
1155 	if (xfs_sb_version_hascrc(&mp->m_sb)) {
1156 		struct xfs_dqblk *dqb = (struct xfs_dqblk *)ddqp;
1157 
1158 		dqb->dd_lsn = cpu_to_be64(dqp->q_logitem.qli_item.li_lsn);
1159 	}
1160 
1161 	/*
1162 	 * Attach an iodone routine so that we can remove this dquot from the
1163 	 * AIL and release the flush lock once the dquot is synced to disk.
1164 	 */
1165 	xfs_buf_attach_iodone(bp, xfs_qm_dqflush_done,
1166 				  &dqp->q_logitem.qli_item);
1167 
1168 	/*
1169 	 * If the buffer is pinned then push on the log so we won't
1170 	 * get stuck waiting in the write for too long.
1171 	 */
1172 	if (xfs_buf_ispinned(bp)) {
1173 		trace_xfs_dqflush_force(dqp);
1174 		xfs_log_force(mp, 0);
1175 	}
1176 
1177 	trace_xfs_dqflush_done(dqp);
1178 	*bpp = bp;
1179 	return 0;
1180 
1181 out_unlock:
1182 	xfs_dqfunlock(dqp);
1183 	return XFS_ERROR(EIO);
1184 }
1185 
1186 /*
1187  * Lock two xfs_dquot structures.
1188  *
1189  * To avoid deadlocks we always lock the quota structure with
1190  * the lowerd id first.
1191  */
1192 void
1193 xfs_dqlock2(
1194 	xfs_dquot_t	*d1,
1195 	xfs_dquot_t	*d2)
1196 {
1197 	if (d1 && d2) {
1198 		ASSERT(d1 != d2);
1199 		if (be32_to_cpu(d1->q_core.d_id) >
1200 		    be32_to_cpu(d2->q_core.d_id)) {
1201 			mutex_lock(&d2->q_qlock);
1202 			mutex_lock_nested(&d1->q_qlock, XFS_QLOCK_NESTED);
1203 		} else {
1204 			mutex_lock(&d1->q_qlock);
1205 			mutex_lock_nested(&d2->q_qlock, XFS_QLOCK_NESTED);
1206 		}
1207 	} else if (d1) {
1208 		mutex_lock(&d1->q_qlock);
1209 	} else if (d2) {
1210 		mutex_lock(&d2->q_qlock);
1211 	}
1212 }
1213 
1214 int __init
1215 xfs_qm_init(void)
1216 {
1217 	xfs_qm_dqzone =
1218 		kmem_zone_init(sizeof(struct xfs_dquot), "xfs_dquot");
1219 	if (!xfs_qm_dqzone)
1220 		goto out;
1221 
1222 	xfs_qm_dqtrxzone =
1223 		kmem_zone_init(sizeof(struct xfs_dquot_acct), "xfs_dqtrx");
1224 	if (!xfs_qm_dqtrxzone)
1225 		goto out_free_dqzone;
1226 
1227 	return 0;
1228 
1229 out_free_dqzone:
1230 	kmem_zone_destroy(xfs_qm_dqzone);
1231 out:
1232 	return -ENOMEM;
1233 }
1234 
1235 void
1236 xfs_qm_exit(void)
1237 {
1238 	kmem_zone_destroy(xfs_qm_dqtrxzone);
1239 	kmem_zone_destroy(xfs_qm_dqzone);
1240 }
1241