xref: /openbmc/linux/fs/xfs/xfs_dquot.c (revision 1c2dd16a)
1 /*
2  * Copyright (c) 2000-2003 Silicon Graphics, Inc.
3  * All Rights Reserved.
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU General Public License as
7  * published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it would be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write the Free Software Foundation,
16  * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
17  */
18 #include "xfs.h"
19 #include "xfs_fs.h"
20 #include "xfs_format.h"
21 #include "xfs_log_format.h"
22 #include "xfs_shared.h"
23 #include "xfs_trans_resv.h"
24 #include "xfs_bit.h"
25 #include "xfs_mount.h"
26 #include "xfs_defer.h"
27 #include "xfs_inode.h"
28 #include "xfs_bmap.h"
29 #include "xfs_bmap_util.h"
30 #include "xfs_alloc.h"
31 #include "xfs_quota.h"
32 #include "xfs_error.h"
33 #include "xfs_trans.h"
34 #include "xfs_buf_item.h"
35 #include "xfs_trans_space.h"
36 #include "xfs_trans_priv.h"
37 #include "xfs_qm.h"
38 #include "xfs_cksum.h"
39 #include "xfs_trace.h"
40 #include "xfs_log.h"
41 #include "xfs_bmap_btree.h"
42 
43 /*
44  * Lock order:
45  *
46  * ip->i_lock
47  *   qi->qi_tree_lock
48  *     dquot->q_qlock (xfs_dqlock() and friends)
49  *       dquot->q_flush (xfs_dqflock() and friends)
50  *       qi->qi_lru_lock
51  *
52  * If two dquots need to be locked the order is user before group/project,
53  * otherwise by the lowest id first, see xfs_dqlock2.
54  */
55 
56 #ifdef DEBUG
57 xfs_buftarg_t *xfs_dqerror_target;
58 int xfs_do_dqerror;
59 int xfs_dqreq_num;
60 int xfs_dqerror_mod = 33;
61 #endif
62 
63 struct kmem_zone		*xfs_qm_dqtrxzone;
64 static struct kmem_zone		*xfs_qm_dqzone;
65 
66 static struct lock_class_key xfs_dquot_group_class;
67 static struct lock_class_key xfs_dquot_project_class;
68 
69 /*
70  * This is called to free all the memory associated with a dquot
71  */
72 void
73 xfs_qm_dqdestroy(
74 	xfs_dquot_t	*dqp)
75 {
76 	ASSERT(list_empty(&dqp->q_lru));
77 
78 	kmem_free(dqp->q_logitem.qli_item.li_lv_shadow);
79 	mutex_destroy(&dqp->q_qlock);
80 
81 	XFS_STATS_DEC(dqp->q_mount, xs_qm_dquot);
82 	kmem_zone_free(xfs_qm_dqzone, dqp);
83 }
84 
85 /*
86  * If default limits are in force, push them into the dquot now.
87  * We overwrite the dquot limits only if they are zero and this
88  * is not the root dquot.
89  */
90 void
91 xfs_qm_adjust_dqlimits(
92 	struct xfs_mount	*mp,
93 	struct xfs_dquot	*dq)
94 {
95 	struct xfs_quotainfo	*q = mp->m_quotainfo;
96 	struct xfs_disk_dquot	*d = &dq->q_core;
97 	struct xfs_def_quota	*defq;
98 	int			prealloc = 0;
99 
100 	ASSERT(d->d_id);
101 	defq = xfs_get_defquota(dq, q);
102 
103 	if (defq->bsoftlimit && !d->d_blk_softlimit) {
104 		d->d_blk_softlimit = cpu_to_be64(defq->bsoftlimit);
105 		prealloc = 1;
106 	}
107 	if (defq->bhardlimit && !d->d_blk_hardlimit) {
108 		d->d_blk_hardlimit = cpu_to_be64(defq->bhardlimit);
109 		prealloc = 1;
110 	}
111 	if (defq->isoftlimit && !d->d_ino_softlimit)
112 		d->d_ino_softlimit = cpu_to_be64(defq->isoftlimit);
113 	if (defq->ihardlimit && !d->d_ino_hardlimit)
114 		d->d_ino_hardlimit = cpu_to_be64(defq->ihardlimit);
115 	if (defq->rtbsoftlimit && !d->d_rtb_softlimit)
116 		d->d_rtb_softlimit = cpu_to_be64(defq->rtbsoftlimit);
117 	if (defq->rtbhardlimit && !d->d_rtb_hardlimit)
118 		d->d_rtb_hardlimit = cpu_to_be64(defq->rtbhardlimit);
119 
120 	if (prealloc)
121 		xfs_dquot_set_prealloc_limits(dq);
122 }
123 
124 /*
125  * Check the limits and timers of a dquot and start or reset timers
126  * if necessary.
127  * This gets called even when quota enforcement is OFF, which makes our
128  * life a little less complicated. (We just don't reject any quota
129  * reservations in that case, when enforcement is off).
130  * We also return 0 as the values of the timers in Q_GETQUOTA calls, when
131  * enforcement's off.
132  * In contrast, warnings are a little different in that they don't
133  * 'automatically' get started when limits get exceeded.  They do
134  * get reset to zero, however, when we find the count to be under
135  * the soft limit (they are only ever set non-zero via userspace).
136  */
137 void
138 xfs_qm_adjust_dqtimers(
139 	xfs_mount_t		*mp,
140 	xfs_disk_dquot_t	*d)
141 {
142 	ASSERT(d->d_id);
143 
144 #ifdef DEBUG
145 	if (d->d_blk_hardlimit)
146 		ASSERT(be64_to_cpu(d->d_blk_softlimit) <=
147 		       be64_to_cpu(d->d_blk_hardlimit));
148 	if (d->d_ino_hardlimit)
149 		ASSERT(be64_to_cpu(d->d_ino_softlimit) <=
150 		       be64_to_cpu(d->d_ino_hardlimit));
151 	if (d->d_rtb_hardlimit)
152 		ASSERT(be64_to_cpu(d->d_rtb_softlimit) <=
153 		       be64_to_cpu(d->d_rtb_hardlimit));
154 #endif
155 
156 	if (!d->d_btimer) {
157 		if ((d->d_blk_softlimit &&
158 		     (be64_to_cpu(d->d_bcount) >
159 		      be64_to_cpu(d->d_blk_softlimit))) ||
160 		    (d->d_blk_hardlimit &&
161 		     (be64_to_cpu(d->d_bcount) >
162 		      be64_to_cpu(d->d_blk_hardlimit)))) {
163 			d->d_btimer = cpu_to_be32(get_seconds() +
164 					mp->m_quotainfo->qi_btimelimit);
165 		} else {
166 			d->d_bwarns = 0;
167 		}
168 	} else {
169 		if ((!d->d_blk_softlimit ||
170 		     (be64_to_cpu(d->d_bcount) <=
171 		      be64_to_cpu(d->d_blk_softlimit))) &&
172 		    (!d->d_blk_hardlimit ||
173 		    (be64_to_cpu(d->d_bcount) <=
174 		     be64_to_cpu(d->d_blk_hardlimit)))) {
175 			d->d_btimer = 0;
176 		}
177 	}
178 
179 	if (!d->d_itimer) {
180 		if ((d->d_ino_softlimit &&
181 		     (be64_to_cpu(d->d_icount) >
182 		      be64_to_cpu(d->d_ino_softlimit))) ||
183 		    (d->d_ino_hardlimit &&
184 		     (be64_to_cpu(d->d_icount) >
185 		      be64_to_cpu(d->d_ino_hardlimit)))) {
186 			d->d_itimer = cpu_to_be32(get_seconds() +
187 					mp->m_quotainfo->qi_itimelimit);
188 		} else {
189 			d->d_iwarns = 0;
190 		}
191 	} else {
192 		if ((!d->d_ino_softlimit ||
193 		     (be64_to_cpu(d->d_icount) <=
194 		      be64_to_cpu(d->d_ino_softlimit)))  &&
195 		    (!d->d_ino_hardlimit ||
196 		     (be64_to_cpu(d->d_icount) <=
197 		      be64_to_cpu(d->d_ino_hardlimit)))) {
198 			d->d_itimer = 0;
199 		}
200 	}
201 
202 	if (!d->d_rtbtimer) {
203 		if ((d->d_rtb_softlimit &&
204 		     (be64_to_cpu(d->d_rtbcount) >
205 		      be64_to_cpu(d->d_rtb_softlimit))) ||
206 		    (d->d_rtb_hardlimit &&
207 		     (be64_to_cpu(d->d_rtbcount) >
208 		      be64_to_cpu(d->d_rtb_hardlimit)))) {
209 			d->d_rtbtimer = cpu_to_be32(get_seconds() +
210 					mp->m_quotainfo->qi_rtbtimelimit);
211 		} else {
212 			d->d_rtbwarns = 0;
213 		}
214 	} else {
215 		if ((!d->d_rtb_softlimit ||
216 		     (be64_to_cpu(d->d_rtbcount) <=
217 		      be64_to_cpu(d->d_rtb_softlimit))) &&
218 		    (!d->d_rtb_hardlimit ||
219 		     (be64_to_cpu(d->d_rtbcount) <=
220 		      be64_to_cpu(d->d_rtb_hardlimit)))) {
221 			d->d_rtbtimer = 0;
222 		}
223 	}
224 }
225 
226 /*
227  * initialize a buffer full of dquots and log the whole thing
228  */
229 STATIC void
230 xfs_qm_init_dquot_blk(
231 	xfs_trans_t	*tp,
232 	xfs_mount_t	*mp,
233 	xfs_dqid_t	id,
234 	uint		type,
235 	xfs_buf_t	*bp)
236 {
237 	struct xfs_quotainfo	*q = mp->m_quotainfo;
238 	xfs_dqblk_t	*d;
239 	xfs_dqid_t	curid;
240 	int		i;
241 
242 	ASSERT(tp);
243 	ASSERT(xfs_buf_islocked(bp));
244 
245 	d = bp->b_addr;
246 
247 	/*
248 	 * ID of the first dquot in the block - id's are zero based.
249 	 */
250 	curid = id - (id % q->qi_dqperchunk);
251 	memset(d, 0, BBTOB(q->qi_dqchunklen));
252 	for (i = 0; i < q->qi_dqperchunk; i++, d++, curid++) {
253 		d->dd_diskdq.d_magic = cpu_to_be16(XFS_DQUOT_MAGIC);
254 		d->dd_diskdq.d_version = XFS_DQUOT_VERSION;
255 		d->dd_diskdq.d_id = cpu_to_be32(curid);
256 		d->dd_diskdq.d_flags = type;
257 		if (xfs_sb_version_hascrc(&mp->m_sb)) {
258 			uuid_copy(&d->dd_uuid, &mp->m_sb.sb_meta_uuid);
259 			xfs_update_cksum((char *)d, sizeof(struct xfs_dqblk),
260 					 XFS_DQUOT_CRC_OFF);
261 		}
262 	}
263 
264 	xfs_trans_dquot_buf(tp, bp,
265 			    (type & XFS_DQ_USER ? XFS_BLF_UDQUOT_BUF :
266 			    ((type & XFS_DQ_PROJ) ? XFS_BLF_PDQUOT_BUF :
267 			     XFS_BLF_GDQUOT_BUF)));
268 	xfs_trans_log_buf(tp, bp, 0, BBTOB(q->qi_dqchunklen) - 1);
269 }
270 
271 /*
272  * Initialize the dynamic speculative preallocation thresholds. The lo/hi
273  * watermarks correspond to the soft and hard limits by default. If a soft limit
274  * is not specified, we use 95% of the hard limit.
275  */
276 void
277 xfs_dquot_set_prealloc_limits(struct xfs_dquot *dqp)
278 {
279 	__uint64_t space;
280 
281 	dqp->q_prealloc_hi_wmark = be64_to_cpu(dqp->q_core.d_blk_hardlimit);
282 	dqp->q_prealloc_lo_wmark = be64_to_cpu(dqp->q_core.d_blk_softlimit);
283 	if (!dqp->q_prealloc_lo_wmark) {
284 		dqp->q_prealloc_lo_wmark = dqp->q_prealloc_hi_wmark;
285 		do_div(dqp->q_prealloc_lo_wmark, 100);
286 		dqp->q_prealloc_lo_wmark *= 95;
287 	}
288 
289 	space = dqp->q_prealloc_hi_wmark;
290 
291 	do_div(space, 100);
292 	dqp->q_low_space[XFS_QLOWSP_1_PCNT] = space;
293 	dqp->q_low_space[XFS_QLOWSP_3_PCNT] = space * 3;
294 	dqp->q_low_space[XFS_QLOWSP_5_PCNT] = space * 5;
295 }
296 
297 /*
298  * Allocate a block and fill it with dquots.
299  * This is called when the bmapi finds a hole.
300  */
301 STATIC int
302 xfs_qm_dqalloc(
303 	xfs_trans_t	**tpp,
304 	xfs_mount_t	*mp,
305 	xfs_dquot_t	*dqp,
306 	xfs_inode_t	*quotip,
307 	xfs_fileoff_t	offset_fsb,
308 	xfs_buf_t	**O_bpp)
309 {
310 	xfs_fsblock_t	firstblock;
311 	struct xfs_defer_ops dfops;
312 	xfs_bmbt_irec_t map;
313 	int		nmaps, error;
314 	xfs_buf_t	*bp;
315 	xfs_trans_t	*tp = *tpp;
316 
317 	ASSERT(tp != NULL);
318 
319 	trace_xfs_dqalloc(dqp);
320 
321 	/*
322 	 * Initialize the bmap freelist prior to calling bmapi code.
323 	 */
324 	xfs_defer_init(&dfops, &firstblock);
325 	xfs_ilock(quotip, XFS_ILOCK_EXCL);
326 	/*
327 	 * Return if this type of quotas is turned off while we didn't
328 	 * have an inode lock
329 	 */
330 	if (!xfs_this_quota_on(dqp->q_mount, dqp->dq_flags)) {
331 		xfs_iunlock(quotip, XFS_ILOCK_EXCL);
332 		return -ESRCH;
333 	}
334 
335 	xfs_trans_ijoin(tp, quotip, XFS_ILOCK_EXCL);
336 	nmaps = 1;
337 	error = xfs_bmapi_write(tp, quotip, offset_fsb,
338 				XFS_DQUOT_CLUSTER_SIZE_FSB, XFS_BMAPI_METADATA,
339 				&firstblock, XFS_QM_DQALLOC_SPACE_RES(mp),
340 				&map, &nmaps, &dfops);
341 	if (error)
342 		goto error0;
343 	ASSERT(map.br_blockcount == XFS_DQUOT_CLUSTER_SIZE_FSB);
344 	ASSERT(nmaps == 1);
345 	ASSERT((map.br_startblock != DELAYSTARTBLOCK) &&
346 	       (map.br_startblock != HOLESTARTBLOCK));
347 
348 	/*
349 	 * Keep track of the blkno to save a lookup later
350 	 */
351 	dqp->q_blkno = XFS_FSB_TO_DADDR(mp, map.br_startblock);
352 
353 	/* now we can just get the buffer (there's nothing to read yet) */
354 	bp = xfs_trans_get_buf(tp, mp->m_ddev_targp,
355 			       dqp->q_blkno,
356 			       mp->m_quotainfo->qi_dqchunklen,
357 			       0);
358 	if (!bp) {
359 		error = -ENOMEM;
360 		goto error1;
361 	}
362 	bp->b_ops = &xfs_dquot_buf_ops;
363 
364 	/*
365 	 * Make a chunk of dquots out of this buffer and log
366 	 * the entire thing.
367 	 */
368 	xfs_qm_init_dquot_blk(tp, mp, be32_to_cpu(dqp->q_core.d_id),
369 			      dqp->dq_flags & XFS_DQ_ALLTYPES, bp);
370 
371 	/*
372 	 * xfs_defer_finish() may commit the current transaction and
373 	 * start a second transaction if the freelist is not empty.
374 	 *
375 	 * Since we still want to modify this buffer, we need to
376 	 * ensure that the buffer is not released on commit of
377 	 * the first transaction and ensure the buffer is added to the
378 	 * second transaction.
379 	 *
380 	 * If there is only one transaction then don't stop the buffer
381 	 * from being released when it commits later on.
382 	 */
383 
384 	xfs_trans_bhold(tp, bp);
385 
386 	error = xfs_defer_finish(tpp, &dfops, NULL);
387 	if (error)
388 		goto error1;
389 
390 	/* Transaction was committed? */
391 	if (*tpp != tp) {
392 		tp = *tpp;
393 		xfs_trans_bjoin(tp, bp);
394 	} else {
395 		xfs_trans_bhold_release(tp, bp);
396 	}
397 
398 	*O_bpp = bp;
399 	return 0;
400 
401 error1:
402 	xfs_defer_cancel(&dfops);
403 error0:
404 	xfs_iunlock(quotip, XFS_ILOCK_EXCL);
405 
406 	return error;
407 }
408 
409 STATIC int
410 xfs_qm_dqrepair(
411 	struct xfs_mount	*mp,
412 	struct xfs_trans	*tp,
413 	struct xfs_dquot	*dqp,
414 	xfs_dqid_t		firstid,
415 	struct xfs_buf		**bpp)
416 {
417 	int			error;
418 	struct xfs_disk_dquot	*ddq;
419 	struct xfs_dqblk	*d;
420 	int			i;
421 
422 	/*
423 	 * Read the buffer without verification so we get the corrupted
424 	 * buffer returned to us. make sure we verify it on write, though.
425 	 */
426 	error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp, dqp->q_blkno,
427 				   mp->m_quotainfo->qi_dqchunklen,
428 				   0, bpp, NULL);
429 
430 	if (error) {
431 		ASSERT(*bpp == NULL);
432 		return error;
433 	}
434 	(*bpp)->b_ops = &xfs_dquot_buf_ops;
435 
436 	ASSERT(xfs_buf_islocked(*bpp));
437 	d = (struct xfs_dqblk *)(*bpp)->b_addr;
438 
439 	/* Do the actual repair of dquots in this buffer */
440 	for (i = 0; i < mp->m_quotainfo->qi_dqperchunk; i++) {
441 		ddq = &d[i].dd_diskdq;
442 		error = xfs_dqcheck(mp, ddq, firstid + i,
443 				       dqp->dq_flags & XFS_DQ_ALLTYPES,
444 				       XFS_QMOPT_DQREPAIR, "xfs_qm_dqrepair");
445 		if (error) {
446 			/* repair failed, we're screwed */
447 			xfs_trans_brelse(tp, *bpp);
448 			return -EIO;
449 		}
450 	}
451 
452 	return 0;
453 }
454 
455 /*
456  * Maps a dquot to the buffer containing its on-disk version.
457  * This returns a ptr to the buffer containing the on-disk dquot
458  * in the bpp param, and a ptr to the on-disk dquot within that buffer
459  */
460 STATIC int
461 xfs_qm_dqtobp(
462 	xfs_trans_t		**tpp,
463 	xfs_dquot_t		*dqp,
464 	xfs_disk_dquot_t	**O_ddpp,
465 	xfs_buf_t		**O_bpp,
466 	uint			flags)
467 {
468 	struct xfs_bmbt_irec	map;
469 	int			nmaps = 1, error;
470 	struct xfs_buf		*bp;
471 	struct xfs_inode	*quotip;
472 	struct xfs_mount	*mp = dqp->q_mount;
473 	xfs_dqid_t		id = be32_to_cpu(dqp->q_core.d_id);
474 	struct xfs_trans	*tp = (tpp ? *tpp : NULL);
475 	uint			lock_mode;
476 
477 	quotip = xfs_quota_inode(dqp->q_mount, dqp->dq_flags);
478 	dqp->q_fileoffset = (xfs_fileoff_t)id / mp->m_quotainfo->qi_dqperchunk;
479 
480 	lock_mode = xfs_ilock_data_map_shared(quotip);
481 	if (!xfs_this_quota_on(dqp->q_mount, dqp->dq_flags)) {
482 		/*
483 		 * Return if this type of quotas is turned off while we
484 		 * didn't have the quota inode lock.
485 		 */
486 		xfs_iunlock(quotip, lock_mode);
487 		return -ESRCH;
488 	}
489 
490 	/*
491 	 * Find the block map; no allocations yet
492 	 */
493 	error = xfs_bmapi_read(quotip, dqp->q_fileoffset,
494 			       XFS_DQUOT_CLUSTER_SIZE_FSB, &map, &nmaps, 0);
495 
496 	xfs_iunlock(quotip, lock_mode);
497 	if (error)
498 		return error;
499 
500 	ASSERT(nmaps == 1);
501 	ASSERT(map.br_blockcount == 1);
502 
503 	/*
504 	 * Offset of dquot in the (fixed sized) dquot chunk.
505 	 */
506 	dqp->q_bufoffset = (id % mp->m_quotainfo->qi_dqperchunk) *
507 		sizeof(xfs_dqblk_t);
508 
509 	ASSERT(map.br_startblock != DELAYSTARTBLOCK);
510 	if (map.br_startblock == HOLESTARTBLOCK) {
511 		/*
512 		 * We don't allocate unless we're asked to
513 		 */
514 		if (!(flags & XFS_QMOPT_DQALLOC))
515 			return -ENOENT;
516 
517 		ASSERT(tp);
518 		error = xfs_qm_dqalloc(tpp, mp, dqp, quotip,
519 					dqp->q_fileoffset, &bp);
520 		if (error)
521 			return error;
522 		tp = *tpp;
523 	} else {
524 		trace_xfs_dqtobp_read(dqp);
525 
526 		/*
527 		 * store the blkno etc so that we don't have to do the
528 		 * mapping all the time
529 		 */
530 		dqp->q_blkno = XFS_FSB_TO_DADDR(mp, map.br_startblock);
531 
532 		error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp,
533 					   dqp->q_blkno,
534 					   mp->m_quotainfo->qi_dqchunklen,
535 					   0, &bp, &xfs_dquot_buf_ops);
536 
537 		if (error == -EFSCORRUPTED && (flags & XFS_QMOPT_DQREPAIR)) {
538 			xfs_dqid_t firstid = (xfs_dqid_t)map.br_startoff *
539 						mp->m_quotainfo->qi_dqperchunk;
540 			ASSERT(bp == NULL);
541 			error = xfs_qm_dqrepair(mp, tp, dqp, firstid, &bp);
542 		}
543 
544 		if (error) {
545 			ASSERT(bp == NULL);
546 			return error;
547 		}
548 	}
549 
550 	ASSERT(xfs_buf_islocked(bp));
551 	*O_bpp = bp;
552 	*O_ddpp = bp->b_addr + dqp->q_bufoffset;
553 
554 	return 0;
555 }
556 
557 
558 /*
559  * Read in the ondisk dquot using dqtobp() then copy it to an incore version,
560  * and release the buffer immediately.
561  *
562  * If XFS_QMOPT_DQALLOC is set, allocate a dquot on disk if it needed.
563  */
564 int
565 xfs_qm_dqread(
566 	struct xfs_mount	*mp,
567 	xfs_dqid_t		id,
568 	uint			type,
569 	uint			flags,
570 	struct xfs_dquot	**O_dqpp)
571 {
572 	struct xfs_dquot	*dqp;
573 	struct xfs_disk_dquot	*ddqp;
574 	struct xfs_buf		*bp;
575 	struct xfs_trans	*tp = NULL;
576 	int			error;
577 
578 	dqp = kmem_zone_zalloc(xfs_qm_dqzone, KM_SLEEP);
579 
580 	dqp->dq_flags = type;
581 	dqp->q_core.d_id = cpu_to_be32(id);
582 	dqp->q_mount = mp;
583 	INIT_LIST_HEAD(&dqp->q_lru);
584 	mutex_init(&dqp->q_qlock);
585 	init_waitqueue_head(&dqp->q_pinwait);
586 
587 	/*
588 	 * Because we want to use a counting completion, complete
589 	 * the flush completion once to allow a single access to
590 	 * the flush completion without blocking.
591 	 */
592 	init_completion(&dqp->q_flush);
593 	complete(&dqp->q_flush);
594 
595 	/*
596 	 * Make sure group quotas have a different lock class than user
597 	 * quotas.
598 	 */
599 	switch (type) {
600 	case XFS_DQ_USER:
601 		/* uses the default lock class */
602 		break;
603 	case XFS_DQ_GROUP:
604 		lockdep_set_class(&dqp->q_qlock, &xfs_dquot_group_class);
605 		break;
606 	case XFS_DQ_PROJ:
607 		lockdep_set_class(&dqp->q_qlock, &xfs_dquot_project_class);
608 		break;
609 	default:
610 		ASSERT(0);
611 		break;
612 	}
613 
614 	XFS_STATS_INC(mp, xs_qm_dquot);
615 
616 	trace_xfs_dqread(dqp);
617 
618 	if (flags & XFS_QMOPT_DQALLOC) {
619 		error = xfs_trans_alloc(mp, &M_RES(mp)->tr_qm_dqalloc,
620 				XFS_QM_DQALLOC_SPACE_RES(mp), 0, 0, &tp);
621 		if (error)
622 			goto error0;
623 	}
624 
625 	/*
626 	 * get a pointer to the on-disk dquot and the buffer containing it
627 	 * dqp already knows its own type (GROUP/USER).
628 	 */
629 	error = xfs_qm_dqtobp(&tp, dqp, &ddqp, &bp, flags);
630 	if (error) {
631 		/*
632 		 * This can happen if quotas got turned off (ESRCH),
633 		 * or if the dquot didn't exist on disk and we ask to
634 		 * allocate (ENOENT).
635 		 */
636 		trace_xfs_dqread_fail(dqp);
637 		goto error1;
638 	}
639 
640 	/* copy everything from disk dquot to the incore dquot */
641 	memcpy(&dqp->q_core, ddqp, sizeof(xfs_disk_dquot_t));
642 	xfs_qm_dquot_logitem_init(dqp);
643 
644 	/*
645 	 * Reservation counters are defined as reservation plus current usage
646 	 * to avoid having to add every time.
647 	 */
648 	dqp->q_res_bcount = be64_to_cpu(ddqp->d_bcount);
649 	dqp->q_res_icount = be64_to_cpu(ddqp->d_icount);
650 	dqp->q_res_rtbcount = be64_to_cpu(ddqp->d_rtbcount);
651 
652 	/* initialize the dquot speculative prealloc thresholds */
653 	xfs_dquot_set_prealloc_limits(dqp);
654 
655 	/* Mark the buf so that this will stay incore a little longer */
656 	xfs_buf_set_ref(bp, XFS_DQUOT_REF);
657 
658 	/*
659 	 * We got the buffer with a xfs_trans_read_buf() (in dqtobp())
660 	 * So we need to release with xfs_trans_brelse().
661 	 * The strategy here is identical to that of inodes; we lock
662 	 * the dquot in xfs_qm_dqget() before making it accessible to
663 	 * others. This is because dquots, like inodes, need a good level of
664 	 * concurrency, and we don't want to take locks on the entire buffers
665 	 * for dquot accesses.
666 	 * Note also that the dquot buffer may even be dirty at this point, if
667 	 * this particular dquot was repaired. We still aren't afraid to
668 	 * brelse it because we have the changes incore.
669 	 */
670 	ASSERT(xfs_buf_islocked(bp));
671 	xfs_trans_brelse(tp, bp);
672 
673 	if (tp) {
674 		error = xfs_trans_commit(tp);
675 		if (error)
676 			goto error0;
677 	}
678 
679 	*O_dqpp = dqp;
680 	return error;
681 
682 error1:
683 	if (tp)
684 		xfs_trans_cancel(tp);
685 error0:
686 	xfs_qm_dqdestroy(dqp);
687 	*O_dqpp = NULL;
688 	return error;
689 }
690 
691 /*
692  * Advance to the next id in the current chunk, or if at the
693  * end of the chunk, skip ahead to first id in next allocated chunk
694  * using the SEEK_DATA interface.
695  */
696 static int
697 xfs_dq_get_next_id(
698 	xfs_mount_t		*mp,
699 	uint			type,
700 	xfs_dqid_t		*id,
701 	loff_t			eof)
702 {
703 	struct xfs_inode	*quotip;
704 	xfs_fsblock_t		start;
705 	loff_t			offset;
706 	uint			lock;
707 	xfs_dqid_t		next_id;
708 	int			error = 0;
709 
710 	/* Simple advance */
711 	next_id = *id + 1;
712 
713 	/* If we'd wrap past the max ID, stop */
714 	if (next_id < *id)
715 		return -ENOENT;
716 
717 	/* If new ID is within the current chunk, advancing it sufficed */
718 	if (next_id % mp->m_quotainfo->qi_dqperchunk) {
719 		*id = next_id;
720 		return 0;
721 	}
722 
723 	/* Nope, next_id is now past the current chunk, so find the next one */
724 	start = (xfs_fsblock_t)next_id / mp->m_quotainfo->qi_dqperchunk;
725 
726 	quotip = xfs_quota_inode(mp, type);
727 	lock = xfs_ilock_data_map_shared(quotip);
728 
729 	offset = __xfs_seek_hole_data(VFS_I(quotip), XFS_FSB_TO_B(mp, start),
730 				      eof, SEEK_DATA);
731 	if (offset < 0)
732 		error = offset;
733 
734 	xfs_iunlock(quotip, lock);
735 
736 	/* -ENXIO is essentially "no more data" */
737 	if (error)
738 		return (error == -ENXIO ? -ENOENT: error);
739 
740 	/* Convert next data offset back to a quota id */
741 	*id = XFS_B_TO_FSB(mp, offset) * mp->m_quotainfo->qi_dqperchunk;
742 	return 0;
743 }
744 
745 /*
746  * Given the file system, inode OR id, and type (UDQUOT/GDQUOT), return a
747  * a locked dquot, doing an allocation (if requested) as needed.
748  * When both an inode and an id are given, the inode's id takes precedence.
749  * That is, if the id changes while we don't hold the ilock inside this
750  * function, the new dquot is returned, not necessarily the one requested
751  * in the id argument.
752  */
753 int
754 xfs_qm_dqget(
755 	xfs_mount_t	*mp,
756 	xfs_inode_t	*ip,	  /* locked inode (optional) */
757 	xfs_dqid_t	id,	  /* uid/projid/gid depending on type */
758 	uint		type,	  /* XFS_DQ_USER/XFS_DQ_PROJ/XFS_DQ_GROUP */
759 	uint		flags,	  /* DQALLOC, DQSUSER, DQREPAIR, DOWARN */
760 	xfs_dquot_t	**O_dqpp) /* OUT : locked incore dquot */
761 {
762 	struct xfs_quotainfo	*qi = mp->m_quotainfo;
763 	struct radix_tree_root *tree = xfs_dquot_tree(qi, type);
764 	struct xfs_dquot	*dqp;
765 	loff_t			eof = 0;
766 	int			error;
767 
768 	ASSERT(XFS_IS_QUOTA_RUNNING(mp));
769 	if ((! XFS_IS_UQUOTA_ON(mp) && type == XFS_DQ_USER) ||
770 	    (! XFS_IS_PQUOTA_ON(mp) && type == XFS_DQ_PROJ) ||
771 	    (! XFS_IS_GQUOTA_ON(mp) && type == XFS_DQ_GROUP)) {
772 		return -ESRCH;
773 	}
774 
775 #ifdef DEBUG
776 	if (xfs_do_dqerror) {
777 		if ((xfs_dqerror_target == mp->m_ddev_targp) &&
778 		    (xfs_dqreq_num++ % xfs_dqerror_mod) == 0) {
779 			xfs_debug(mp, "Returning error in dqget");
780 			return -EIO;
781 		}
782 	}
783 
784 	ASSERT(type == XFS_DQ_USER ||
785 	       type == XFS_DQ_PROJ ||
786 	       type == XFS_DQ_GROUP);
787 	if (ip) {
788 		ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
789 		ASSERT(xfs_inode_dquot(ip, type) == NULL);
790 	}
791 #endif
792 
793 	/* Get the end of the quota file if we need it */
794 	if (flags & XFS_QMOPT_DQNEXT) {
795 		struct xfs_inode	*quotip;
796 		xfs_fileoff_t		last;
797 		uint			lock_mode;
798 
799 		quotip = xfs_quota_inode(mp, type);
800 		lock_mode = xfs_ilock_data_map_shared(quotip);
801 		error = xfs_bmap_last_offset(quotip, &last, XFS_DATA_FORK);
802 		xfs_iunlock(quotip, lock_mode);
803 		if (error)
804 			return error;
805 		eof = XFS_FSB_TO_B(mp, last);
806 	}
807 
808 restart:
809 	mutex_lock(&qi->qi_tree_lock);
810 	dqp = radix_tree_lookup(tree, id);
811 	if (dqp) {
812 		xfs_dqlock(dqp);
813 		if (dqp->dq_flags & XFS_DQ_FREEING) {
814 			xfs_dqunlock(dqp);
815 			mutex_unlock(&qi->qi_tree_lock);
816 			trace_xfs_dqget_freeing(dqp);
817 			delay(1);
818 			goto restart;
819 		}
820 
821 		/* uninit / unused quota found in radix tree, keep looking  */
822 		if (flags & XFS_QMOPT_DQNEXT) {
823 			if (XFS_IS_DQUOT_UNINITIALIZED(dqp)) {
824 				xfs_dqunlock(dqp);
825 				mutex_unlock(&qi->qi_tree_lock);
826 				error = xfs_dq_get_next_id(mp, type, &id, eof);
827 				if (error)
828 					return error;
829 				goto restart;
830 			}
831 		}
832 
833 		dqp->q_nrefs++;
834 		mutex_unlock(&qi->qi_tree_lock);
835 
836 		trace_xfs_dqget_hit(dqp);
837 		XFS_STATS_INC(mp, xs_qm_dqcachehits);
838 		*O_dqpp = dqp;
839 		return 0;
840 	}
841 	mutex_unlock(&qi->qi_tree_lock);
842 	XFS_STATS_INC(mp, xs_qm_dqcachemisses);
843 
844 	/*
845 	 * Dquot cache miss. We don't want to keep the inode lock across
846 	 * a (potential) disk read. Also we don't want to deal with the lock
847 	 * ordering between quotainode and this inode. OTOH, dropping the inode
848 	 * lock here means dealing with a chown that can happen before
849 	 * we re-acquire the lock.
850 	 */
851 	if (ip)
852 		xfs_iunlock(ip, XFS_ILOCK_EXCL);
853 
854 	error = xfs_qm_dqread(mp, id, type, flags, &dqp);
855 
856 	if (ip)
857 		xfs_ilock(ip, XFS_ILOCK_EXCL);
858 
859 	/* If we are asked to find next active id, keep looking */
860 	if (error == -ENOENT && (flags & XFS_QMOPT_DQNEXT)) {
861 		error = xfs_dq_get_next_id(mp, type, &id, eof);
862 		if (!error)
863 			goto restart;
864 	}
865 
866 	if (error)
867 		return error;
868 
869 	if (ip) {
870 		/*
871 		 * A dquot could be attached to this inode by now, since
872 		 * we had dropped the ilock.
873 		 */
874 		if (xfs_this_quota_on(mp, type)) {
875 			struct xfs_dquot	*dqp1;
876 
877 			dqp1 = xfs_inode_dquot(ip, type);
878 			if (dqp1) {
879 				xfs_qm_dqdestroy(dqp);
880 				dqp = dqp1;
881 				xfs_dqlock(dqp);
882 				goto dqret;
883 			}
884 		} else {
885 			/* inode stays locked on return */
886 			xfs_qm_dqdestroy(dqp);
887 			return -ESRCH;
888 		}
889 	}
890 
891 	mutex_lock(&qi->qi_tree_lock);
892 	error = radix_tree_insert(tree, id, dqp);
893 	if (unlikely(error)) {
894 		WARN_ON(error != -EEXIST);
895 
896 		/*
897 		 * Duplicate found. Just throw away the new dquot and start
898 		 * over.
899 		 */
900 		mutex_unlock(&qi->qi_tree_lock);
901 		trace_xfs_dqget_dup(dqp);
902 		xfs_qm_dqdestroy(dqp);
903 		XFS_STATS_INC(mp, xs_qm_dquot_dups);
904 		goto restart;
905 	}
906 
907 	/*
908 	 * We return a locked dquot to the caller, with a reference taken
909 	 */
910 	xfs_dqlock(dqp);
911 	dqp->q_nrefs = 1;
912 
913 	qi->qi_dquots++;
914 	mutex_unlock(&qi->qi_tree_lock);
915 
916 	/* If we are asked to find next active id, keep looking */
917 	if (flags & XFS_QMOPT_DQNEXT) {
918 		if (XFS_IS_DQUOT_UNINITIALIZED(dqp)) {
919 			xfs_qm_dqput(dqp);
920 			error = xfs_dq_get_next_id(mp, type, &id, eof);
921 			if (error)
922 				return error;
923 			goto restart;
924 		}
925 	}
926 
927  dqret:
928 	ASSERT((ip == NULL) || xfs_isilocked(ip, XFS_ILOCK_EXCL));
929 	trace_xfs_dqget_miss(dqp);
930 	*O_dqpp = dqp;
931 	return 0;
932 }
933 
934 /*
935  * Release a reference to the dquot (decrement ref-count) and unlock it.
936  *
937  * If there is a group quota attached to this dquot, carefully release that
938  * too without tripping over deadlocks'n'stuff.
939  */
940 void
941 xfs_qm_dqput(
942 	struct xfs_dquot	*dqp)
943 {
944 	ASSERT(dqp->q_nrefs > 0);
945 	ASSERT(XFS_DQ_IS_LOCKED(dqp));
946 
947 	trace_xfs_dqput(dqp);
948 
949 	if (--dqp->q_nrefs == 0) {
950 		struct xfs_quotainfo	*qi = dqp->q_mount->m_quotainfo;
951 		trace_xfs_dqput_free(dqp);
952 
953 		if (list_lru_add(&qi->qi_lru, &dqp->q_lru))
954 			XFS_STATS_INC(dqp->q_mount, xs_qm_dquot_unused);
955 	}
956 	xfs_dqunlock(dqp);
957 }
958 
959 /*
960  * Release a dquot. Flush it if dirty, then dqput() it.
961  * dquot must not be locked.
962  */
963 void
964 xfs_qm_dqrele(
965 	xfs_dquot_t	*dqp)
966 {
967 	if (!dqp)
968 		return;
969 
970 	trace_xfs_dqrele(dqp);
971 
972 	xfs_dqlock(dqp);
973 	/*
974 	 * We don't care to flush it if the dquot is dirty here.
975 	 * That will create stutters that we want to avoid.
976 	 * Instead we do a delayed write when we try to reclaim
977 	 * a dirty dquot. Also xfs_sync will take part of the burden...
978 	 */
979 	xfs_qm_dqput(dqp);
980 }
981 
982 /*
983  * This is the dquot flushing I/O completion routine.  It is called
984  * from interrupt level when the buffer containing the dquot is
985  * flushed to disk.  It is responsible for removing the dquot logitem
986  * from the AIL if it has not been re-logged, and unlocking the dquot's
987  * flush lock. This behavior is very similar to that of inodes..
988  */
989 STATIC void
990 xfs_qm_dqflush_done(
991 	struct xfs_buf		*bp,
992 	struct xfs_log_item	*lip)
993 {
994 	xfs_dq_logitem_t	*qip = (struct xfs_dq_logitem *)lip;
995 	xfs_dquot_t		*dqp = qip->qli_dquot;
996 	struct xfs_ail		*ailp = lip->li_ailp;
997 
998 	/*
999 	 * We only want to pull the item from the AIL if its
1000 	 * location in the log has not changed since we started the flush.
1001 	 * Thus, we only bother if the dquot's lsn has
1002 	 * not changed. First we check the lsn outside the lock
1003 	 * since it's cheaper, and then we recheck while
1004 	 * holding the lock before removing the dquot from the AIL.
1005 	 */
1006 	if ((lip->li_flags & XFS_LI_IN_AIL) &&
1007 	    lip->li_lsn == qip->qli_flush_lsn) {
1008 
1009 		/* xfs_trans_ail_delete() drops the AIL lock. */
1010 		spin_lock(&ailp->xa_lock);
1011 		if (lip->li_lsn == qip->qli_flush_lsn)
1012 			xfs_trans_ail_delete(ailp, lip, SHUTDOWN_CORRUPT_INCORE);
1013 		else
1014 			spin_unlock(&ailp->xa_lock);
1015 	}
1016 
1017 	/*
1018 	 * Release the dq's flush lock since we're done with it.
1019 	 */
1020 	xfs_dqfunlock(dqp);
1021 }
1022 
1023 /*
1024  * Write a modified dquot to disk.
1025  * The dquot must be locked and the flush lock too taken by caller.
1026  * The flush lock will not be unlocked until the dquot reaches the disk,
1027  * but the dquot is free to be unlocked and modified by the caller
1028  * in the interim. Dquot is still locked on return. This behavior is
1029  * identical to that of inodes.
1030  */
1031 int
1032 xfs_qm_dqflush(
1033 	struct xfs_dquot	*dqp,
1034 	struct xfs_buf		**bpp)
1035 {
1036 	struct xfs_mount	*mp = dqp->q_mount;
1037 	struct xfs_buf		*bp;
1038 	struct xfs_disk_dquot	*ddqp;
1039 	int			error;
1040 
1041 	ASSERT(XFS_DQ_IS_LOCKED(dqp));
1042 	ASSERT(!completion_done(&dqp->q_flush));
1043 
1044 	trace_xfs_dqflush(dqp);
1045 
1046 	*bpp = NULL;
1047 
1048 	xfs_qm_dqunpin_wait(dqp);
1049 
1050 	/*
1051 	 * This may have been unpinned because the filesystem is shutting
1052 	 * down forcibly. If that's the case we must not write this dquot
1053 	 * to disk, because the log record didn't make it to disk.
1054 	 *
1055 	 * We also have to remove the log item from the AIL in this case,
1056 	 * as we wait for an emptry AIL as part of the unmount process.
1057 	 */
1058 	if (XFS_FORCED_SHUTDOWN(mp)) {
1059 		struct xfs_log_item	*lip = &dqp->q_logitem.qli_item;
1060 		dqp->dq_flags &= ~XFS_DQ_DIRTY;
1061 
1062 		xfs_trans_ail_remove(lip, SHUTDOWN_CORRUPT_INCORE);
1063 
1064 		error = -EIO;
1065 		goto out_unlock;
1066 	}
1067 
1068 	/*
1069 	 * Get the buffer containing the on-disk dquot
1070 	 */
1071 	error = xfs_trans_read_buf(mp, NULL, mp->m_ddev_targp, dqp->q_blkno,
1072 				   mp->m_quotainfo->qi_dqchunklen, 0, &bp,
1073 				   &xfs_dquot_buf_ops);
1074 	if (error)
1075 		goto out_unlock;
1076 
1077 	/*
1078 	 * Calculate the location of the dquot inside the buffer.
1079 	 */
1080 	ddqp = bp->b_addr + dqp->q_bufoffset;
1081 
1082 	/*
1083 	 * A simple sanity check in case we got a corrupted dquot..
1084 	 */
1085 	error = xfs_dqcheck(mp, &dqp->q_core, be32_to_cpu(ddqp->d_id), 0,
1086 			   XFS_QMOPT_DOWARN, "dqflush (incore copy)");
1087 	if (error) {
1088 		xfs_buf_relse(bp);
1089 		xfs_dqfunlock(dqp);
1090 		xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
1091 		return -EIO;
1092 	}
1093 
1094 	/* This is the only portion of data that needs to persist */
1095 	memcpy(ddqp, &dqp->q_core, sizeof(xfs_disk_dquot_t));
1096 
1097 	/*
1098 	 * Clear the dirty field and remember the flush lsn for later use.
1099 	 */
1100 	dqp->dq_flags &= ~XFS_DQ_DIRTY;
1101 
1102 	xfs_trans_ail_copy_lsn(mp->m_ail, &dqp->q_logitem.qli_flush_lsn,
1103 					&dqp->q_logitem.qli_item.li_lsn);
1104 
1105 	/*
1106 	 * copy the lsn into the on-disk dquot now while we have the in memory
1107 	 * dquot here. This can't be done later in the write verifier as we
1108 	 * can't get access to the log item at that point in time.
1109 	 *
1110 	 * We also calculate the CRC here so that the on-disk dquot in the
1111 	 * buffer always has a valid CRC. This ensures there is no possibility
1112 	 * of a dquot without an up-to-date CRC getting to disk.
1113 	 */
1114 	if (xfs_sb_version_hascrc(&mp->m_sb)) {
1115 		struct xfs_dqblk *dqb = (struct xfs_dqblk *)ddqp;
1116 
1117 		dqb->dd_lsn = cpu_to_be64(dqp->q_logitem.qli_item.li_lsn);
1118 		xfs_update_cksum((char *)dqb, sizeof(struct xfs_dqblk),
1119 				 XFS_DQUOT_CRC_OFF);
1120 	}
1121 
1122 	/*
1123 	 * Attach an iodone routine so that we can remove this dquot from the
1124 	 * AIL and release the flush lock once the dquot is synced to disk.
1125 	 */
1126 	xfs_buf_attach_iodone(bp, xfs_qm_dqflush_done,
1127 				  &dqp->q_logitem.qli_item);
1128 
1129 	/*
1130 	 * If the buffer is pinned then push on the log so we won't
1131 	 * get stuck waiting in the write for too long.
1132 	 */
1133 	if (xfs_buf_ispinned(bp)) {
1134 		trace_xfs_dqflush_force(dqp);
1135 		xfs_log_force(mp, 0);
1136 	}
1137 
1138 	trace_xfs_dqflush_done(dqp);
1139 	*bpp = bp;
1140 	return 0;
1141 
1142 out_unlock:
1143 	xfs_dqfunlock(dqp);
1144 	return -EIO;
1145 }
1146 
1147 /*
1148  * Lock two xfs_dquot structures.
1149  *
1150  * To avoid deadlocks we always lock the quota structure with
1151  * the lowerd id first.
1152  */
1153 void
1154 xfs_dqlock2(
1155 	xfs_dquot_t	*d1,
1156 	xfs_dquot_t	*d2)
1157 {
1158 	if (d1 && d2) {
1159 		ASSERT(d1 != d2);
1160 		if (be32_to_cpu(d1->q_core.d_id) >
1161 		    be32_to_cpu(d2->q_core.d_id)) {
1162 			mutex_lock(&d2->q_qlock);
1163 			mutex_lock_nested(&d1->q_qlock, XFS_QLOCK_NESTED);
1164 		} else {
1165 			mutex_lock(&d1->q_qlock);
1166 			mutex_lock_nested(&d2->q_qlock, XFS_QLOCK_NESTED);
1167 		}
1168 	} else if (d1) {
1169 		mutex_lock(&d1->q_qlock);
1170 	} else if (d2) {
1171 		mutex_lock(&d2->q_qlock);
1172 	}
1173 }
1174 
1175 int __init
1176 xfs_qm_init(void)
1177 {
1178 	xfs_qm_dqzone =
1179 		kmem_zone_init(sizeof(struct xfs_dquot), "xfs_dquot");
1180 	if (!xfs_qm_dqzone)
1181 		goto out;
1182 
1183 	xfs_qm_dqtrxzone =
1184 		kmem_zone_init(sizeof(struct xfs_dquot_acct), "xfs_dqtrx");
1185 	if (!xfs_qm_dqtrxzone)
1186 		goto out_free_dqzone;
1187 
1188 	return 0;
1189 
1190 out_free_dqzone:
1191 	kmem_zone_destroy(xfs_qm_dqzone);
1192 out:
1193 	return -ENOMEM;
1194 }
1195 
1196 void
1197 xfs_qm_exit(void)
1198 {
1199 	kmem_zone_destroy(xfs_qm_dqtrxzone);
1200 	kmem_zone_destroy(xfs_qm_dqzone);
1201 }
1202