xref: /openbmc/linux/fs/xfs/xfs_dquot.c (revision 84744377)
1 /*
2  * Copyright (c) 2000-2003 Silicon Graphics, Inc.
3  * All Rights Reserved.
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU General Public License as
7  * published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it would be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write the Free Software Foundation,
16  * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
17  */
18 #include "xfs.h"
19 #include "xfs_fs.h"
20 #include "xfs_format.h"
21 #include "xfs_log_format.h"
22 #include "xfs_shared.h"
23 #include "xfs_trans_resv.h"
24 #include "xfs_bit.h"
25 #include "xfs_sb.h"
26 #include "xfs_ag.h"
27 #include "xfs_mount.h"
28 #include "xfs_inode.h"
29 #include "xfs_bmap.h"
30 #include "xfs_bmap_util.h"
31 #include "xfs_alloc.h"
32 #include "xfs_quota.h"
33 #include "xfs_error.h"
34 #include "xfs_trans.h"
35 #include "xfs_buf_item.h"
36 #include "xfs_trans_space.h"
37 #include "xfs_trans_priv.h"
38 #include "xfs_qm.h"
39 #include "xfs_cksum.h"
40 #include "xfs_trace.h"
41 #include "xfs_log.h"
42 #include "xfs_bmap_btree.h"
43 
44 /*
45  * Lock order:
46  *
47  * ip->i_lock
48  *   qi->qi_tree_lock
49  *     dquot->q_qlock (xfs_dqlock() and friends)
50  *       dquot->q_flush (xfs_dqflock() and friends)
51  *       qi->qi_lru_lock
52  *
53  * If two dquots need to be locked the order is user before group/project,
54  * otherwise by the lowest id first, see xfs_dqlock2.
55  */
56 
57 #ifdef DEBUG
58 xfs_buftarg_t *xfs_dqerror_target;
59 int xfs_do_dqerror;
60 int xfs_dqreq_num;
61 int xfs_dqerror_mod = 33;
62 #endif
63 
64 struct kmem_zone		*xfs_qm_dqtrxzone;
65 static struct kmem_zone		*xfs_qm_dqzone;
66 
67 static struct lock_class_key xfs_dquot_group_class;
68 static struct lock_class_key xfs_dquot_project_class;
69 
70 /*
71  * This is called to free all the memory associated with a dquot
72  */
73 void
74 xfs_qm_dqdestroy(
75 	xfs_dquot_t	*dqp)
76 {
77 	ASSERT(list_empty(&dqp->q_lru));
78 
79 	mutex_destroy(&dqp->q_qlock);
80 	kmem_zone_free(xfs_qm_dqzone, dqp);
81 
82 	XFS_STATS_DEC(xs_qm_dquot);
83 }
84 
85 /*
86  * If default limits are in force, push them into the dquot now.
87  * We overwrite the dquot limits only if they are zero and this
88  * is not the root dquot.
89  */
90 void
91 xfs_qm_adjust_dqlimits(
92 	struct xfs_mount	*mp,
93 	struct xfs_dquot	*dq)
94 {
95 	struct xfs_quotainfo	*q = mp->m_quotainfo;
96 	struct xfs_disk_dquot	*d = &dq->q_core;
97 	int			prealloc = 0;
98 
99 	ASSERT(d->d_id);
100 
101 	if (q->qi_bsoftlimit && !d->d_blk_softlimit) {
102 		d->d_blk_softlimit = cpu_to_be64(q->qi_bsoftlimit);
103 		prealloc = 1;
104 	}
105 	if (q->qi_bhardlimit && !d->d_blk_hardlimit) {
106 		d->d_blk_hardlimit = cpu_to_be64(q->qi_bhardlimit);
107 		prealloc = 1;
108 	}
109 	if (q->qi_isoftlimit && !d->d_ino_softlimit)
110 		d->d_ino_softlimit = cpu_to_be64(q->qi_isoftlimit);
111 	if (q->qi_ihardlimit && !d->d_ino_hardlimit)
112 		d->d_ino_hardlimit = cpu_to_be64(q->qi_ihardlimit);
113 	if (q->qi_rtbsoftlimit && !d->d_rtb_softlimit)
114 		d->d_rtb_softlimit = cpu_to_be64(q->qi_rtbsoftlimit);
115 	if (q->qi_rtbhardlimit && !d->d_rtb_hardlimit)
116 		d->d_rtb_hardlimit = cpu_to_be64(q->qi_rtbhardlimit);
117 
118 	if (prealloc)
119 		xfs_dquot_set_prealloc_limits(dq);
120 }
121 
122 /*
123  * Check the limits and timers of a dquot and start or reset timers
124  * if necessary.
125  * This gets called even when quota enforcement is OFF, which makes our
126  * life a little less complicated. (We just don't reject any quota
127  * reservations in that case, when enforcement is off).
128  * We also return 0 as the values of the timers in Q_GETQUOTA calls, when
129  * enforcement's off.
130  * In contrast, warnings are a little different in that they don't
131  * 'automatically' get started when limits get exceeded.  They do
132  * get reset to zero, however, when we find the count to be under
133  * the soft limit (they are only ever set non-zero via userspace).
134  */
135 void
136 xfs_qm_adjust_dqtimers(
137 	xfs_mount_t		*mp,
138 	xfs_disk_dquot_t	*d)
139 {
140 	ASSERT(d->d_id);
141 
142 #ifdef DEBUG
143 	if (d->d_blk_hardlimit)
144 		ASSERT(be64_to_cpu(d->d_blk_softlimit) <=
145 		       be64_to_cpu(d->d_blk_hardlimit));
146 	if (d->d_ino_hardlimit)
147 		ASSERT(be64_to_cpu(d->d_ino_softlimit) <=
148 		       be64_to_cpu(d->d_ino_hardlimit));
149 	if (d->d_rtb_hardlimit)
150 		ASSERT(be64_to_cpu(d->d_rtb_softlimit) <=
151 		       be64_to_cpu(d->d_rtb_hardlimit));
152 #endif
153 
154 	if (!d->d_btimer) {
155 		if ((d->d_blk_softlimit &&
156 		     (be64_to_cpu(d->d_bcount) >
157 		      be64_to_cpu(d->d_blk_softlimit))) ||
158 		    (d->d_blk_hardlimit &&
159 		     (be64_to_cpu(d->d_bcount) >
160 		      be64_to_cpu(d->d_blk_hardlimit)))) {
161 			d->d_btimer = cpu_to_be32(get_seconds() +
162 					mp->m_quotainfo->qi_btimelimit);
163 		} else {
164 			d->d_bwarns = 0;
165 		}
166 	} else {
167 		if ((!d->d_blk_softlimit ||
168 		     (be64_to_cpu(d->d_bcount) <=
169 		      be64_to_cpu(d->d_blk_softlimit))) &&
170 		    (!d->d_blk_hardlimit ||
171 		    (be64_to_cpu(d->d_bcount) <=
172 		     be64_to_cpu(d->d_blk_hardlimit)))) {
173 			d->d_btimer = 0;
174 		}
175 	}
176 
177 	if (!d->d_itimer) {
178 		if ((d->d_ino_softlimit &&
179 		     (be64_to_cpu(d->d_icount) >
180 		      be64_to_cpu(d->d_ino_softlimit))) ||
181 		    (d->d_ino_hardlimit &&
182 		     (be64_to_cpu(d->d_icount) >
183 		      be64_to_cpu(d->d_ino_hardlimit)))) {
184 			d->d_itimer = cpu_to_be32(get_seconds() +
185 					mp->m_quotainfo->qi_itimelimit);
186 		} else {
187 			d->d_iwarns = 0;
188 		}
189 	} else {
190 		if ((!d->d_ino_softlimit ||
191 		     (be64_to_cpu(d->d_icount) <=
192 		      be64_to_cpu(d->d_ino_softlimit)))  &&
193 		    (!d->d_ino_hardlimit ||
194 		     (be64_to_cpu(d->d_icount) <=
195 		      be64_to_cpu(d->d_ino_hardlimit)))) {
196 			d->d_itimer = 0;
197 		}
198 	}
199 
200 	if (!d->d_rtbtimer) {
201 		if ((d->d_rtb_softlimit &&
202 		     (be64_to_cpu(d->d_rtbcount) >
203 		      be64_to_cpu(d->d_rtb_softlimit))) ||
204 		    (d->d_rtb_hardlimit &&
205 		     (be64_to_cpu(d->d_rtbcount) >
206 		      be64_to_cpu(d->d_rtb_hardlimit)))) {
207 			d->d_rtbtimer = cpu_to_be32(get_seconds() +
208 					mp->m_quotainfo->qi_rtbtimelimit);
209 		} else {
210 			d->d_rtbwarns = 0;
211 		}
212 	} else {
213 		if ((!d->d_rtb_softlimit ||
214 		     (be64_to_cpu(d->d_rtbcount) <=
215 		      be64_to_cpu(d->d_rtb_softlimit))) &&
216 		    (!d->d_rtb_hardlimit ||
217 		     (be64_to_cpu(d->d_rtbcount) <=
218 		      be64_to_cpu(d->d_rtb_hardlimit)))) {
219 			d->d_rtbtimer = 0;
220 		}
221 	}
222 }
223 
224 /*
225  * initialize a buffer full of dquots and log the whole thing
226  */
227 STATIC void
228 xfs_qm_init_dquot_blk(
229 	xfs_trans_t	*tp,
230 	xfs_mount_t	*mp,
231 	xfs_dqid_t	id,
232 	uint		type,
233 	xfs_buf_t	*bp)
234 {
235 	struct xfs_quotainfo	*q = mp->m_quotainfo;
236 	xfs_dqblk_t	*d;
237 	int		curid, i;
238 
239 	ASSERT(tp);
240 	ASSERT(xfs_buf_islocked(bp));
241 
242 	d = bp->b_addr;
243 
244 	/*
245 	 * ID of the first dquot in the block - id's are zero based.
246 	 */
247 	curid = id - (id % q->qi_dqperchunk);
248 	ASSERT(curid >= 0);
249 	memset(d, 0, BBTOB(q->qi_dqchunklen));
250 	for (i = 0; i < q->qi_dqperchunk; i++, d++, curid++) {
251 		d->dd_diskdq.d_magic = cpu_to_be16(XFS_DQUOT_MAGIC);
252 		d->dd_diskdq.d_version = XFS_DQUOT_VERSION;
253 		d->dd_diskdq.d_id = cpu_to_be32(curid);
254 		d->dd_diskdq.d_flags = type;
255 		if (xfs_sb_version_hascrc(&mp->m_sb)) {
256 			uuid_copy(&d->dd_uuid, &mp->m_sb.sb_uuid);
257 			xfs_update_cksum((char *)d, sizeof(struct xfs_dqblk),
258 					 XFS_DQUOT_CRC_OFF);
259 		}
260 	}
261 
262 	xfs_trans_dquot_buf(tp, bp,
263 			    (type & XFS_DQ_USER ? XFS_BLF_UDQUOT_BUF :
264 			    ((type & XFS_DQ_PROJ) ? XFS_BLF_PDQUOT_BUF :
265 			     XFS_BLF_GDQUOT_BUF)));
266 	xfs_trans_log_buf(tp, bp, 0, BBTOB(q->qi_dqchunklen) - 1);
267 }
268 
269 /*
270  * Initialize the dynamic speculative preallocation thresholds. The lo/hi
271  * watermarks correspond to the soft and hard limits by default. If a soft limit
272  * is not specified, we use 95% of the hard limit.
273  */
274 void
275 xfs_dquot_set_prealloc_limits(struct xfs_dquot *dqp)
276 {
277 	__uint64_t space;
278 
279 	dqp->q_prealloc_hi_wmark = be64_to_cpu(dqp->q_core.d_blk_hardlimit);
280 	dqp->q_prealloc_lo_wmark = be64_to_cpu(dqp->q_core.d_blk_softlimit);
281 	if (!dqp->q_prealloc_lo_wmark) {
282 		dqp->q_prealloc_lo_wmark = dqp->q_prealloc_hi_wmark;
283 		do_div(dqp->q_prealloc_lo_wmark, 100);
284 		dqp->q_prealloc_lo_wmark *= 95;
285 	}
286 
287 	space = dqp->q_prealloc_hi_wmark;
288 
289 	do_div(space, 100);
290 	dqp->q_low_space[XFS_QLOWSP_1_PCNT] = space;
291 	dqp->q_low_space[XFS_QLOWSP_3_PCNT] = space * 3;
292 	dqp->q_low_space[XFS_QLOWSP_5_PCNT] = space * 5;
293 }
294 
295 /*
296  * Allocate a block and fill it with dquots.
297  * This is called when the bmapi finds a hole.
298  */
299 STATIC int
300 xfs_qm_dqalloc(
301 	xfs_trans_t	**tpp,
302 	xfs_mount_t	*mp,
303 	xfs_dquot_t	*dqp,
304 	xfs_inode_t	*quotip,
305 	xfs_fileoff_t	offset_fsb,
306 	xfs_buf_t	**O_bpp)
307 {
308 	xfs_fsblock_t	firstblock;
309 	xfs_bmap_free_t flist;
310 	xfs_bmbt_irec_t map;
311 	int		nmaps, error, committed;
312 	xfs_buf_t	*bp;
313 	xfs_trans_t	*tp = *tpp;
314 
315 	ASSERT(tp != NULL);
316 
317 	trace_xfs_dqalloc(dqp);
318 
319 	/*
320 	 * Initialize the bmap freelist prior to calling bmapi code.
321 	 */
322 	xfs_bmap_init(&flist, &firstblock);
323 	xfs_ilock(quotip, XFS_ILOCK_EXCL);
324 	/*
325 	 * Return if this type of quotas is turned off while we didn't
326 	 * have an inode lock
327 	 */
328 	if (!xfs_this_quota_on(dqp->q_mount, dqp->dq_flags)) {
329 		xfs_iunlock(quotip, XFS_ILOCK_EXCL);
330 		return (ESRCH);
331 	}
332 
333 	xfs_trans_ijoin(tp, quotip, XFS_ILOCK_EXCL);
334 	nmaps = 1;
335 	error = xfs_bmapi_write(tp, quotip, offset_fsb,
336 				XFS_DQUOT_CLUSTER_SIZE_FSB, XFS_BMAPI_METADATA,
337 				&firstblock, XFS_QM_DQALLOC_SPACE_RES(mp),
338 				&map, &nmaps, &flist);
339 	if (error)
340 		goto error0;
341 	ASSERT(map.br_blockcount == XFS_DQUOT_CLUSTER_SIZE_FSB);
342 	ASSERT(nmaps == 1);
343 	ASSERT((map.br_startblock != DELAYSTARTBLOCK) &&
344 	       (map.br_startblock != HOLESTARTBLOCK));
345 
346 	/*
347 	 * Keep track of the blkno to save a lookup later
348 	 */
349 	dqp->q_blkno = XFS_FSB_TO_DADDR(mp, map.br_startblock);
350 
351 	/* now we can just get the buffer (there's nothing to read yet) */
352 	bp = xfs_trans_get_buf(tp, mp->m_ddev_targp,
353 			       dqp->q_blkno,
354 			       mp->m_quotainfo->qi_dqchunklen,
355 			       0);
356 
357 	error = xfs_buf_geterror(bp);
358 	if (error)
359 		goto error1;
360 	bp->b_ops = &xfs_dquot_buf_ops;
361 
362 	/*
363 	 * Make a chunk of dquots out of this buffer and log
364 	 * the entire thing.
365 	 */
366 	xfs_qm_init_dquot_blk(tp, mp, be32_to_cpu(dqp->q_core.d_id),
367 			      dqp->dq_flags & XFS_DQ_ALLTYPES, bp);
368 
369 	/*
370 	 * xfs_bmap_finish() may commit the current transaction and
371 	 * start a second transaction if the freelist is not empty.
372 	 *
373 	 * Since we still want to modify this buffer, we need to
374 	 * ensure that the buffer is not released on commit of
375 	 * the first transaction and ensure the buffer is added to the
376 	 * second transaction.
377 	 *
378 	 * If there is only one transaction then don't stop the buffer
379 	 * from being released when it commits later on.
380 	 */
381 
382 	xfs_trans_bhold(tp, bp);
383 
384 	if ((error = xfs_bmap_finish(tpp, &flist, &committed))) {
385 		goto error1;
386 	}
387 
388 	if (committed) {
389 		tp = *tpp;
390 		xfs_trans_bjoin(tp, bp);
391 	} else {
392 		xfs_trans_bhold_release(tp, bp);
393 	}
394 
395 	*O_bpp = bp;
396 	return 0;
397 
398       error1:
399 	xfs_bmap_cancel(&flist);
400       error0:
401 	xfs_iunlock(quotip, XFS_ILOCK_EXCL);
402 
403 	return (error);
404 }
405 
406 STATIC int
407 xfs_qm_dqrepair(
408 	struct xfs_mount	*mp,
409 	struct xfs_trans	*tp,
410 	struct xfs_dquot	*dqp,
411 	xfs_dqid_t		firstid,
412 	struct xfs_buf		**bpp)
413 {
414 	int			error;
415 	struct xfs_disk_dquot	*ddq;
416 	struct xfs_dqblk	*d;
417 	int			i;
418 
419 	/*
420 	 * Read the buffer without verification so we get the corrupted
421 	 * buffer returned to us. make sure we verify it on write, though.
422 	 */
423 	error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp, dqp->q_blkno,
424 				   mp->m_quotainfo->qi_dqchunklen,
425 				   0, bpp, NULL);
426 
427 	if (error) {
428 		ASSERT(*bpp == NULL);
429 		return XFS_ERROR(error);
430 	}
431 	(*bpp)->b_ops = &xfs_dquot_buf_ops;
432 
433 	ASSERT(xfs_buf_islocked(*bpp));
434 	d = (struct xfs_dqblk *)(*bpp)->b_addr;
435 
436 	/* Do the actual repair of dquots in this buffer */
437 	for (i = 0; i < mp->m_quotainfo->qi_dqperchunk; i++) {
438 		ddq = &d[i].dd_diskdq;
439 		error = xfs_dqcheck(mp, ddq, firstid + i,
440 				       dqp->dq_flags & XFS_DQ_ALLTYPES,
441 				       XFS_QMOPT_DQREPAIR, "xfs_qm_dqrepair");
442 		if (error) {
443 			/* repair failed, we're screwed */
444 			xfs_trans_brelse(tp, *bpp);
445 			return XFS_ERROR(EIO);
446 		}
447 	}
448 
449 	return 0;
450 }
451 
452 /*
453  * Maps a dquot to the buffer containing its on-disk version.
454  * This returns a ptr to the buffer containing the on-disk dquot
455  * in the bpp param, and a ptr to the on-disk dquot within that buffer
456  */
457 STATIC int
458 xfs_qm_dqtobp(
459 	xfs_trans_t		**tpp,
460 	xfs_dquot_t		*dqp,
461 	xfs_disk_dquot_t	**O_ddpp,
462 	xfs_buf_t		**O_bpp,
463 	uint			flags)
464 {
465 	struct xfs_bmbt_irec	map;
466 	int			nmaps = 1, error;
467 	struct xfs_buf		*bp;
468 	struct xfs_inode	*quotip = xfs_dq_to_quota_inode(dqp);
469 	struct xfs_mount	*mp = dqp->q_mount;
470 	xfs_dqid_t		id = be32_to_cpu(dqp->q_core.d_id);
471 	struct xfs_trans	*tp = (tpp ? *tpp : NULL);
472 	uint			lock_mode;
473 
474 	dqp->q_fileoffset = (xfs_fileoff_t)id / mp->m_quotainfo->qi_dqperchunk;
475 
476 	lock_mode = xfs_ilock_data_map_shared(quotip);
477 	if (!xfs_this_quota_on(dqp->q_mount, dqp->dq_flags)) {
478 		/*
479 		 * Return if this type of quotas is turned off while we
480 		 * didn't have the quota inode lock.
481 		 */
482 		xfs_iunlock(quotip, lock_mode);
483 		return ESRCH;
484 	}
485 
486 	/*
487 	 * Find the block map; no allocations yet
488 	 */
489 	error = xfs_bmapi_read(quotip, dqp->q_fileoffset,
490 			       XFS_DQUOT_CLUSTER_SIZE_FSB, &map, &nmaps, 0);
491 
492 	xfs_iunlock(quotip, lock_mode);
493 	if (error)
494 		return error;
495 
496 	ASSERT(nmaps == 1);
497 	ASSERT(map.br_blockcount == 1);
498 
499 	/*
500 	 * Offset of dquot in the (fixed sized) dquot chunk.
501 	 */
502 	dqp->q_bufoffset = (id % mp->m_quotainfo->qi_dqperchunk) *
503 		sizeof(xfs_dqblk_t);
504 
505 	ASSERT(map.br_startblock != DELAYSTARTBLOCK);
506 	if (map.br_startblock == HOLESTARTBLOCK) {
507 		/*
508 		 * We don't allocate unless we're asked to
509 		 */
510 		if (!(flags & XFS_QMOPT_DQALLOC))
511 			return ENOENT;
512 
513 		ASSERT(tp);
514 		error = xfs_qm_dqalloc(tpp, mp, dqp, quotip,
515 					dqp->q_fileoffset, &bp);
516 		if (error)
517 			return error;
518 		tp = *tpp;
519 	} else {
520 		trace_xfs_dqtobp_read(dqp);
521 
522 		/*
523 		 * store the blkno etc so that we don't have to do the
524 		 * mapping all the time
525 		 */
526 		dqp->q_blkno = XFS_FSB_TO_DADDR(mp, map.br_startblock);
527 
528 		error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp,
529 					   dqp->q_blkno,
530 					   mp->m_quotainfo->qi_dqchunklen,
531 					   0, &bp, &xfs_dquot_buf_ops);
532 
533 		if (error == EFSCORRUPTED && (flags & XFS_QMOPT_DQREPAIR)) {
534 			xfs_dqid_t firstid = (xfs_dqid_t)map.br_startoff *
535 						mp->m_quotainfo->qi_dqperchunk;
536 			ASSERT(bp == NULL);
537 			error = xfs_qm_dqrepair(mp, tp, dqp, firstid, &bp);
538 		}
539 
540 		if (error) {
541 			ASSERT(bp == NULL);
542 			return XFS_ERROR(error);
543 		}
544 	}
545 
546 	ASSERT(xfs_buf_islocked(bp));
547 	*O_bpp = bp;
548 	*O_ddpp = bp->b_addr + dqp->q_bufoffset;
549 
550 	return (0);
551 }
552 
553 
554 /*
555  * Read in the ondisk dquot using dqtobp() then copy it to an incore version,
556  * and release the buffer immediately.
557  *
558  * If XFS_QMOPT_DQALLOC is set, allocate a dquot on disk if it needed.
559  */
560 int
561 xfs_qm_dqread(
562 	struct xfs_mount	*mp,
563 	xfs_dqid_t		id,
564 	uint			type,
565 	uint			flags,
566 	struct xfs_dquot	**O_dqpp)
567 {
568 	struct xfs_dquot	*dqp;
569 	struct xfs_disk_dquot	*ddqp;
570 	struct xfs_buf		*bp;
571 	struct xfs_trans	*tp = NULL;
572 	int			error;
573 	int			cancelflags = 0;
574 
575 
576 	dqp = kmem_zone_zalloc(xfs_qm_dqzone, KM_SLEEP);
577 
578 	dqp->dq_flags = type;
579 	dqp->q_core.d_id = cpu_to_be32(id);
580 	dqp->q_mount = mp;
581 	INIT_LIST_HEAD(&dqp->q_lru);
582 	mutex_init(&dqp->q_qlock);
583 	init_waitqueue_head(&dqp->q_pinwait);
584 
585 	/*
586 	 * Because we want to use a counting completion, complete
587 	 * the flush completion once to allow a single access to
588 	 * the flush completion without blocking.
589 	 */
590 	init_completion(&dqp->q_flush);
591 	complete(&dqp->q_flush);
592 
593 	/*
594 	 * Make sure group quotas have a different lock class than user
595 	 * quotas.
596 	 */
597 	switch (type) {
598 	case XFS_DQ_USER:
599 		/* uses the default lock class */
600 		break;
601 	case XFS_DQ_GROUP:
602 		lockdep_set_class(&dqp->q_qlock, &xfs_dquot_group_class);
603 		break;
604 	case XFS_DQ_PROJ:
605 		lockdep_set_class(&dqp->q_qlock, &xfs_dquot_project_class);
606 		break;
607 	default:
608 		ASSERT(0);
609 		break;
610 	}
611 
612 	XFS_STATS_INC(xs_qm_dquot);
613 
614 	trace_xfs_dqread(dqp);
615 
616 	if (flags & XFS_QMOPT_DQALLOC) {
617 		tp = xfs_trans_alloc(mp, XFS_TRANS_QM_DQALLOC);
618 		error = xfs_trans_reserve(tp, &M_RES(mp)->tr_attrsetm,
619 					  XFS_QM_DQALLOC_SPACE_RES(mp), 0);
620 		if (error)
621 			goto error1;
622 		cancelflags = XFS_TRANS_RELEASE_LOG_RES;
623 	}
624 
625 	/*
626 	 * get a pointer to the on-disk dquot and the buffer containing it
627 	 * dqp already knows its own type (GROUP/USER).
628 	 */
629 	error = xfs_qm_dqtobp(&tp, dqp, &ddqp, &bp, flags);
630 	if (error) {
631 		/*
632 		 * This can happen if quotas got turned off (ESRCH),
633 		 * or if the dquot didn't exist on disk and we ask to
634 		 * allocate (ENOENT).
635 		 */
636 		trace_xfs_dqread_fail(dqp);
637 		cancelflags |= XFS_TRANS_ABORT;
638 		goto error1;
639 	}
640 
641 	/* copy everything from disk dquot to the incore dquot */
642 	memcpy(&dqp->q_core, ddqp, sizeof(xfs_disk_dquot_t));
643 	xfs_qm_dquot_logitem_init(dqp);
644 
645 	/*
646 	 * Reservation counters are defined as reservation plus current usage
647 	 * to avoid having to add every time.
648 	 */
649 	dqp->q_res_bcount = be64_to_cpu(ddqp->d_bcount);
650 	dqp->q_res_icount = be64_to_cpu(ddqp->d_icount);
651 	dqp->q_res_rtbcount = be64_to_cpu(ddqp->d_rtbcount);
652 
653 	/* initialize the dquot speculative prealloc thresholds */
654 	xfs_dquot_set_prealloc_limits(dqp);
655 
656 	/* Mark the buf so that this will stay incore a little longer */
657 	xfs_buf_set_ref(bp, XFS_DQUOT_REF);
658 
659 	/*
660 	 * We got the buffer with a xfs_trans_read_buf() (in dqtobp())
661 	 * So we need to release with xfs_trans_brelse().
662 	 * The strategy here is identical to that of inodes; we lock
663 	 * the dquot in xfs_qm_dqget() before making it accessible to
664 	 * others. This is because dquots, like inodes, need a good level of
665 	 * concurrency, and we don't want to take locks on the entire buffers
666 	 * for dquot accesses.
667 	 * Note also that the dquot buffer may even be dirty at this point, if
668 	 * this particular dquot was repaired. We still aren't afraid to
669 	 * brelse it because we have the changes incore.
670 	 */
671 	ASSERT(xfs_buf_islocked(bp));
672 	xfs_trans_brelse(tp, bp);
673 
674 	if (tp) {
675 		error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES);
676 		if (error)
677 			goto error0;
678 	}
679 
680 	*O_dqpp = dqp;
681 	return error;
682 
683 error1:
684 	if (tp)
685 		xfs_trans_cancel(tp, cancelflags);
686 error0:
687 	xfs_qm_dqdestroy(dqp);
688 	*O_dqpp = NULL;
689 	return error;
690 }
691 
692 /*
693  * Given the file system, inode OR id, and type (UDQUOT/GDQUOT), return a
694  * a locked dquot, doing an allocation (if requested) as needed.
695  * When both an inode and an id are given, the inode's id takes precedence.
696  * That is, if the id changes while we don't hold the ilock inside this
697  * function, the new dquot is returned, not necessarily the one requested
698  * in the id argument.
699  */
700 int
701 xfs_qm_dqget(
702 	xfs_mount_t	*mp,
703 	xfs_inode_t	*ip,	  /* locked inode (optional) */
704 	xfs_dqid_t	id,	  /* uid/projid/gid depending on type */
705 	uint		type,	  /* XFS_DQ_USER/XFS_DQ_PROJ/XFS_DQ_GROUP */
706 	uint		flags,	  /* DQALLOC, DQSUSER, DQREPAIR, DOWARN */
707 	xfs_dquot_t	**O_dqpp) /* OUT : locked incore dquot */
708 {
709 	struct xfs_quotainfo	*qi = mp->m_quotainfo;
710 	struct radix_tree_root *tree = xfs_dquot_tree(qi, type);
711 	struct xfs_dquot	*dqp;
712 	int			error;
713 
714 	ASSERT(XFS_IS_QUOTA_RUNNING(mp));
715 	if ((! XFS_IS_UQUOTA_ON(mp) && type == XFS_DQ_USER) ||
716 	    (! XFS_IS_PQUOTA_ON(mp) && type == XFS_DQ_PROJ) ||
717 	    (! XFS_IS_GQUOTA_ON(mp) && type == XFS_DQ_GROUP)) {
718 		return (ESRCH);
719 	}
720 
721 #ifdef DEBUG
722 	if (xfs_do_dqerror) {
723 		if ((xfs_dqerror_target == mp->m_ddev_targp) &&
724 		    (xfs_dqreq_num++ % xfs_dqerror_mod) == 0) {
725 			xfs_debug(mp, "Returning error in dqget");
726 			return (EIO);
727 		}
728 	}
729 
730 	ASSERT(type == XFS_DQ_USER ||
731 	       type == XFS_DQ_PROJ ||
732 	       type == XFS_DQ_GROUP);
733 	if (ip) {
734 		ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
735 		ASSERT(xfs_inode_dquot(ip, type) == NULL);
736 	}
737 #endif
738 
739 restart:
740 	mutex_lock(&qi->qi_tree_lock);
741 	dqp = radix_tree_lookup(tree, id);
742 	if (dqp) {
743 		xfs_dqlock(dqp);
744 		if (dqp->dq_flags & XFS_DQ_FREEING) {
745 			xfs_dqunlock(dqp);
746 			mutex_unlock(&qi->qi_tree_lock);
747 			trace_xfs_dqget_freeing(dqp);
748 			delay(1);
749 			goto restart;
750 		}
751 
752 		dqp->q_nrefs++;
753 		mutex_unlock(&qi->qi_tree_lock);
754 
755 		trace_xfs_dqget_hit(dqp);
756 		XFS_STATS_INC(xs_qm_dqcachehits);
757 		*O_dqpp = dqp;
758 		return 0;
759 	}
760 	mutex_unlock(&qi->qi_tree_lock);
761 	XFS_STATS_INC(xs_qm_dqcachemisses);
762 
763 	/*
764 	 * Dquot cache miss. We don't want to keep the inode lock across
765 	 * a (potential) disk read. Also we don't want to deal with the lock
766 	 * ordering between quotainode and this inode. OTOH, dropping the inode
767 	 * lock here means dealing with a chown that can happen before
768 	 * we re-acquire the lock.
769 	 */
770 	if (ip)
771 		xfs_iunlock(ip, XFS_ILOCK_EXCL);
772 
773 	error = xfs_qm_dqread(mp, id, type, flags, &dqp);
774 
775 	if (ip)
776 		xfs_ilock(ip, XFS_ILOCK_EXCL);
777 
778 	if (error)
779 		return error;
780 
781 	if (ip) {
782 		/*
783 		 * A dquot could be attached to this inode by now, since
784 		 * we had dropped the ilock.
785 		 */
786 		if (xfs_this_quota_on(mp, type)) {
787 			struct xfs_dquot	*dqp1;
788 
789 			dqp1 = xfs_inode_dquot(ip, type);
790 			if (dqp1) {
791 				xfs_qm_dqdestroy(dqp);
792 				dqp = dqp1;
793 				xfs_dqlock(dqp);
794 				goto dqret;
795 			}
796 		} else {
797 			/* inode stays locked on return */
798 			xfs_qm_dqdestroy(dqp);
799 			return XFS_ERROR(ESRCH);
800 		}
801 	}
802 
803 	mutex_lock(&qi->qi_tree_lock);
804 	error = -radix_tree_insert(tree, id, dqp);
805 	if (unlikely(error)) {
806 		WARN_ON(error != EEXIST);
807 
808 		/*
809 		 * Duplicate found. Just throw away the new dquot and start
810 		 * over.
811 		 */
812 		mutex_unlock(&qi->qi_tree_lock);
813 		trace_xfs_dqget_dup(dqp);
814 		xfs_qm_dqdestroy(dqp);
815 		XFS_STATS_INC(xs_qm_dquot_dups);
816 		goto restart;
817 	}
818 
819 	/*
820 	 * We return a locked dquot to the caller, with a reference taken
821 	 */
822 	xfs_dqlock(dqp);
823 	dqp->q_nrefs = 1;
824 
825 	qi->qi_dquots++;
826 	mutex_unlock(&qi->qi_tree_lock);
827 
828  dqret:
829 	ASSERT((ip == NULL) || xfs_isilocked(ip, XFS_ILOCK_EXCL));
830 	trace_xfs_dqget_miss(dqp);
831 	*O_dqpp = dqp;
832 	return (0);
833 }
834 
835 
836 STATIC void
837 xfs_qm_dqput_final(
838 	struct xfs_dquot	*dqp)
839 {
840 	struct xfs_quotainfo	*qi = dqp->q_mount->m_quotainfo;
841 	struct xfs_dquot	*gdqp;
842 	struct xfs_dquot	*pdqp;
843 
844 	trace_xfs_dqput_free(dqp);
845 
846 	if (list_lru_add(&qi->qi_lru, &dqp->q_lru))
847 		XFS_STATS_INC(xs_qm_dquot_unused);
848 
849 	/*
850 	 * If we just added a udquot to the freelist, then we want to release
851 	 * the gdquot/pdquot reference that it (probably) has. Otherwise it'll
852 	 * keep the gdquot/pdquot from getting reclaimed.
853 	 */
854 	gdqp = dqp->q_gdquot;
855 	if (gdqp) {
856 		xfs_dqlock(gdqp);
857 		dqp->q_gdquot = NULL;
858 	}
859 
860 	pdqp = dqp->q_pdquot;
861 	if (pdqp) {
862 		xfs_dqlock(pdqp);
863 		dqp->q_pdquot = NULL;
864 	}
865 	xfs_dqunlock(dqp);
866 
867 	/*
868 	 * If we had a group/project quota hint, release it now.
869 	 */
870 	if (gdqp)
871 		xfs_qm_dqput(gdqp);
872 	if (pdqp)
873 		xfs_qm_dqput(pdqp);
874 }
875 
876 /*
877  * Release a reference to the dquot (decrement ref-count) and unlock it.
878  *
879  * If there is a group quota attached to this dquot, carefully release that
880  * too without tripping over deadlocks'n'stuff.
881  */
882 void
883 xfs_qm_dqput(
884 	struct xfs_dquot	*dqp)
885 {
886 	ASSERT(dqp->q_nrefs > 0);
887 	ASSERT(XFS_DQ_IS_LOCKED(dqp));
888 
889 	trace_xfs_dqput(dqp);
890 
891 	if (--dqp->q_nrefs > 0)
892 		xfs_dqunlock(dqp);
893 	else
894 		xfs_qm_dqput_final(dqp);
895 }
896 
897 /*
898  * Release a dquot. Flush it if dirty, then dqput() it.
899  * dquot must not be locked.
900  */
901 void
902 xfs_qm_dqrele(
903 	xfs_dquot_t	*dqp)
904 {
905 	if (!dqp)
906 		return;
907 
908 	trace_xfs_dqrele(dqp);
909 
910 	xfs_dqlock(dqp);
911 	/*
912 	 * We don't care to flush it if the dquot is dirty here.
913 	 * That will create stutters that we want to avoid.
914 	 * Instead we do a delayed write when we try to reclaim
915 	 * a dirty dquot. Also xfs_sync will take part of the burden...
916 	 */
917 	xfs_qm_dqput(dqp);
918 }
919 
920 /*
921  * This is the dquot flushing I/O completion routine.  It is called
922  * from interrupt level when the buffer containing the dquot is
923  * flushed to disk.  It is responsible for removing the dquot logitem
924  * from the AIL if it has not been re-logged, and unlocking the dquot's
925  * flush lock. This behavior is very similar to that of inodes..
926  */
927 STATIC void
928 xfs_qm_dqflush_done(
929 	struct xfs_buf		*bp,
930 	struct xfs_log_item	*lip)
931 {
932 	xfs_dq_logitem_t	*qip = (struct xfs_dq_logitem *)lip;
933 	xfs_dquot_t		*dqp = qip->qli_dquot;
934 	struct xfs_ail		*ailp = lip->li_ailp;
935 
936 	/*
937 	 * We only want to pull the item from the AIL if its
938 	 * location in the log has not changed since we started the flush.
939 	 * Thus, we only bother if the dquot's lsn has
940 	 * not changed. First we check the lsn outside the lock
941 	 * since it's cheaper, and then we recheck while
942 	 * holding the lock before removing the dquot from the AIL.
943 	 */
944 	if ((lip->li_flags & XFS_LI_IN_AIL) &&
945 	    lip->li_lsn == qip->qli_flush_lsn) {
946 
947 		/* xfs_trans_ail_delete() drops the AIL lock. */
948 		spin_lock(&ailp->xa_lock);
949 		if (lip->li_lsn == qip->qli_flush_lsn)
950 			xfs_trans_ail_delete(ailp, lip, SHUTDOWN_CORRUPT_INCORE);
951 		else
952 			spin_unlock(&ailp->xa_lock);
953 	}
954 
955 	/*
956 	 * Release the dq's flush lock since we're done with it.
957 	 */
958 	xfs_dqfunlock(dqp);
959 }
960 
961 /*
962  * Write a modified dquot to disk.
963  * The dquot must be locked and the flush lock too taken by caller.
964  * The flush lock will not be unlocked until the dquot reaches the disk,
965  * but the dquot is free to be unlocked and modified by the caller
966  * in the interim. Dquot is still locked on return. This behavior is
967  * identical to that of inodes.
968  */
969 int
970 xfs_qm_dqflush(
971 	struct xfs_dquot	*dqp,
972 	struct xfs_buf		**bpp)
973 {
974 	struct xfs_mount	*mp = dqp->q_mount;
975 	struct xfs_buf		*bp;
976 	struct xfs_disk_dquot	*ddqp;
977 	int			error;
978 
979 	ASSERT(XFS_DQ_IS_LOCKED(dqp));
980 	ASSERT(!completion_done(&dqp->q_flush));
981 
982 	trace_xfs_dqflush(dqp);
983 
984 	*bpp = NULL;
985 
986 	xfs_qm_dqunpin_wait(dqp);
987 
988 	/*
989 	 * This may have been unpinned because the filesystem is shutting
990 	 * down forcibly. If that's the case we must not write this dquot
991 	 * to disk, because the log record didn't make it to disk.
992 	 *
993 	 * We also have to remove the log item from the AIL in this case,
994 	 * as we wait for an emptry AIL as part of the unmount process.
995 	 */
996 	if (XFS_FORCED_SHUTDOWN(mp)) {
997 		struct xfs_log_item	*lip = &dqp->q_logitem.qli_item;
998 		dqp->dq_flags &= ~XFS_DQ_DIRTY;
999 
1000 		spin_lock(&mp->m_ail->xa_lock);
1001 		if (lip->li_flags & XFS_LI_IN_AIL)
1002 			xfs_trans_ail_delete(mp->m_ail, lip,
1003 					     SHUTDOWN_CORRUPT_INCORE);
1004 		else
1005 			spin_unlock(&mp->m_ail->xa_lock);
1006 		error = XFS_ERROR(EIO);
1007 		goto out_unlock;
1008 	}
1009 
1010 	/*
1011 	 * Get the buffer containing the on-disk dquot
1012 	 */
1013 	error = xfs_trans_read_buf(mp, NULL, mp->m_ddev_targp, dqp->q_blkno,
1014 				   mp->m_quotainfo->qi_dqchunklen, 0, &bp, NULL);
1015 	if (error)
1016 		goto out_unlock;
1017 
1018 	/*
1019 	 * Calculate the location of the dquot inside the buffer.
1020 	 */
1021 	ddqp = bp->b_addr + dqp->q_bufoffset;
1022 
1023 	/*
1024 	 * A simple sanity check in case we got a corrupted dquot..
1025 	 */
1026 	error = xfs_dqcheck(mp, &dqp->q_core, be32_to_cpu(ddqp->d_id), 0,
1027 			   XFS_QMOPT_DOWARN, "dqflush (incore copy)");
1028 	if (error) {
1029 		xfs_buf_relse(bp);
1030 		xfs_dqfunlock(dqp);
1031 		xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
1032 		return XFS_ERROR(EIO);
1033 	}
1034 
1035 	/* This is the only portion of data that needs to persist */
1036 	memcpy(ddqp, &dqp->q_core, sizeof(xfs_disk_dquot_t));
1037 
1038 	/*
1039 	 * Clear the dirty field and remember the flush lsn for later use.
1040 	 */
1041 	dqp->dq_flags &= ~XFS_DQ_DIRTY;
1042 
1043 	xfs_trans_ail_copy_lsn(mp->m_ail, &dqp->q_logitem.qli_flush_lsn,
1044 					&dqp->q_logitem.qli_item.li_lsn);
1045 
1046 	/*
1047 	 * copy the lsn into the on-disk dquot now while we have the in memory
1048 	 * dquot here. This can't be done later in the write verifier as we
1049 	 * can't get access to the log item at that point in time.
1050 	 *
1051 	 * We also calculate the CRC here so that the on-disk dquot in the
1052 	 * buffer always has a valid CRC. This ensures there is no possibility
1053 	 * of a dquot without an up-to-date CRC getting to disk.
1054 	 */
1055 	if (xfs_sb_version_hascrc(&mp->m_sb)) {
1056 		struct xfs_dqblk *dqb = (struct xfs_dqblk *)ddqp;
1057 
1058 		dqb->dd_lsn = cpu_to_be64(dqp->q_logitem.qli_item.li_lsn);
1059 		xfs_update_cksum((char *)dqb, sizeof(struct xfs_dqblk),
1060 				 XFS_DQUOT_CRC_OFF);
1061 	}
1062 
1063 	/*
1064 	 * Attach an iodone routine so that we can remove this dquot from the
1065 	 * AIL and release the flush lock once the dquot is synced to disk.
1066 	 */
1067 	xfs_buf_attach_iodone(bp, xfs_qm_dqflush_done,
1068 				  &dqp->q_logitem.qli_item);
1069 
1070 	/*
1071 	 * If the buffer is pinned then push on the log so we won't
1072 	 * get stuck waiting in the write for too long.
1073 	 */
1074 	if (xfs_buf_ispinned(bp)) {
1075 		trace_xfs_dqflush_force(dqp);
1076 		xfs_log_force(mp, 0);
1077 	}
1078 
1079 	trace_xfs_dqflush_done(dqp);
1080 	*bpp = bp;
1081 	return 0;
1082 
1083 out_unlock:
1084 	xfs_dqfunlock(dqp);
1085 	return XFS_ERROR(EIO);
1086 }
1087 
1088 /*
1089  * Lock two xfs_dquot structures.
1090  *
1091  * To avoid deadlocks we always lock the quota structure with
1092  * the lowerd id first.
1093  */
1094 void
1095 xfs_dqlock2(
1096 	xfs_dquot_t	*d1,
1097 	xfs_dquot_t	*d2)
1098 {
1099 	if (d1 && d2) {
1100 		ASSERT(d1 != d2);
1101 		if (be32_to_cpu(d1->q_core.d_id) >
1102 		    be32_to_cpu(d2->q_core.d_id)) {
1103 			mutex_lock(&d2->q_qlock);
1104 			mutex_lock_nested(&d1->q_qlock, XFS_QLOCK_NESTED);
1105 		} else {
1106 			mutex_lock(&d1->q_qlock);
1107 			mutex_lock_nested(&d2->q_qlock, XFS_QLOCK_NESTED);
1108 		}
1109 	} else if (d1) {
1110 		mutex_lock(&d1->q_qlock);
1111 	} else if (d2) {
1112 		mutex_lock(&d2->q_qlock);
1113 	}
1114 }
1115 
1116 int __init
1117 xfs_qm_init(void)
1118 {
1119 	xfs_qm_dqzone =
1120 		kmem_zone_init(sizeof(struct xfs_dquot), "xfs_dquot");
1121 	if (!xfs_qm_dqzone)
1122 		goto out;
1123 
1124 	xfs_qm_dqtrxzone =
1125 		kmem_zone_init(sizeof(struct xfs_dquot_acct), "xfs_dqtrx");
1126 	if (!xfs_qm_dqtrxzone)
1127 		goto out_free_dqzone;
1128 
1129 	return 0;
1130 
1131 out_free_dqzone:
1132 	kmem_zone_destroy(xfs_qm_dqzone);
1133 out:
1134 	return -ENOMEM;
1135 }
1136 
1137 void
1138 xfs_qm_exit(void)
1139 {
1140 	kmem_zone_destroy(xfs_qm_dqtrxzone);
1141 	kmem_zone_destroy(xfs_qm_dqzone);
1142 }
1143