xref: /openbmc/linux/fs/xfs/xfs_dquot.c (revision b96fc2f3)
1 /*
2  * Copyright (c) 2000-2003 Silicon Graphics, Inc.
3  * All Rights Reserved.
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU General Public License as
7  * published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it would be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write the Free Software Foundation,
16  * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
17  */
18 #include "xfs.h"
19 #include "xfs_fs.h"
20 #include "xfs_format.h"
21 #include "xfs_log_format.h"
22 #include "xfs_shared.h"
23 #include "xfs_trans_resv.h"
24 #include "xfs_bit.h"
25 #include "xfs_mount.h"
26 #include "xfs_inode.h"
27 #include "xfs_bmap.h"
28 #include "xfs_bmap_util.h"
29 #include "xfs_alloc.h"
30 #include "xfs_quota.h"
31 #include "xfs_error.h"
32 #include "xfs_trans.h"
33 #include "xfs_buf_item.h"
34 #include "xfs_trans_space.h"
35 #include "xfs_trans_priv.h"
36 #include "xfs_qm.h"
37 #include "xfs_cksum.h"
38 #include "xfs_trace.h"
39 #include "xfs_log.h"
40 #include "xfs_bmap_btree.h"
41 
42 /*
43  * Lock order:
44  *
45  * ip->i_lock
46  *   qi->qi_tree_lock
47  *     dquot->q_qlock (xfs_dqlock() and friends)
48  *       dquot->q_flush (xfs_dqflock() and friends)
49  *       qi->qi_lru_lock
50  *
51  * If two dquots need to be locked the order is user before group/project,
52  * otherwise by the lowest id first, see xfs_dqlock2.
53  */
54 
55 #ifdef DEBUG
56 xfs_buftarg_t *xfs_dqerror_target;
57 int xfs_do_dqerror;
58 int xfs_dqreq_num;
59 int xfs_dqerror_mod = 33;
60 #endif
61 
62 struct kmem_zone		*xfs_qm_dqtrxzone;
63 static struct kmem_zone		*xfs_qm_dqzone;
64 
65 static struct lock_class_key xfs_dquot_group_class;
66 static struct lock_class_key xfs_dquot_project_class;
67 
68 /*
69  * This is called to free all the memory associated with a dquot
70  */
71 void
72 xfs_qm_dqdestroy(
73 	xfs_dquot_t	*dqp)
74 {
75 	ASSERT(list_empty(&dqp->q_lru));
76 
77 	mutex_destroy(&dqp->q_qlock);
78 	kmem_zone_free(xfs_qm_dqzone, dqp);
79 
80 	XFS_STATS_DEC(xs_qm_dquot);
81 }
82 
83 /*
84  * If default limits are in force, push them into the dquot now.
85  * We overwrite the dquot limits only if they are zero and this
86  * is not the root dquot.
87  */
88 void
89 xfs_qm_adjust_dqlimits(
90 	struct xfs_mount	*mp,
91 	struct xfs_dquot	*dq)
92 {
93 	struct xfs_quotainfo	*q = mp->m_quotainfo;
94 	struct xfs_disk_dquot	*d = &dq->q_core;
95 	int			prealloc = 0;
96 
97 	ASSERT(d->d_id);
98 
99 	if (q->qi_bsoftlimit && !d->d_blk_softlimit) {
100 		d->d_blk_softlimit = cpu_to_be64(q->qi_bsoftlimit);
101 		prealloc = 1;
102 	}
103 	if (q->qi_bhardlimit && !d->d_blk_hardlimit) {
104 		d->d_blk_hardlimit = cpu_to_be64(q->qi_bhardlimit);
105 		prealloc = 1;
106 	}
107 	if (q->qi_isoftlimit && !d->d_ino_softlimit)
108 		d->d_ino_softlimit = cpu_to_be64(q->qi_isoftlimit);
109 	if (q->qi_ihardlimit && !d->d_ino_hardlimit)
110 		d->d_ino_hardlimit = cpu_to_be64(q->qi_ihardlimit);
111 	if (q->qi_rtbsoftlimit && !d->d_rtb_softlimit)
112 		d->d_rtb_softlimit = cpu_to_be64(q->qi_rtbsoftlimit);
113 	if (q->qi_rtbhardlimit && !d->d_rtb_hardlimit)
114 		d->d_rtb_hardlimit = cpu_to_be64(q->qi_rtbhardlimit);
115 
116 	if (prealloc)
117 		xfs_dquot_set_prealloc_limits(dq);
118 }
119 
120 /*
121  * Check the limits and timers of a dquot and start or reset timers
122  * if necessary.
123  * This gets called even when quota enforcement is OFF, which makes our
124  * life a little less complicated. (We just don't reject any quota
125  * reservations in that case, when enforcement is off).
126  * We also return 0 as the values of the timers in Q_GETQUOTA calls, when
127  * enforcement's off.
128  * In contrast, warnings are a little different in that they don't
129  * 'automatically' get started when limits get exceeded.  They do
130  * get reset to zero, however, when we find the count to be under
131  * the soft limit (they are only ever set non-zero via userspace).
132  */
133 void
134 xfs_qm_adjust_dqtimers(
135 	xfs_mount_t		*mp,
136 	xfs_disk_dquot_t	*d)
137 {
138 	ASSERT(d->d_id);
139 
140 #ifdef DEBUG
141 	if (d->d_blk_hardlimit)
142 		ASSERT(be64_to_cpu(d->d_blk_softlimit) <=
143 		       be64_to_cpu(d->d_blk_hardlimit));
144 	if (d->d_ino_hardlimit)
145 		ASSERT(be64_to_cpu(d->d_ino_softlimit) <=
146 		       be64_to_cpu(d->d_ino_hardlimit));
147 	if (d->d_rtb_hardlimit)
148 		ASSERT(be64_to_cpu(d->d_rtb_softlimit) <=
149 		       be64_to_cpu(d->d_rtb_hardlimit));
150 #endif
151 
152 	if (!d->d_btimer) {
153 		if ((d->d_blk_softlimit &&
154 		     (be64_to_cpu(d->d_bcount) >
155 		      be64_to_cpu(d->d_blk_softlimit))) ||
156 		    (d->d_blk_hardlimit &&
157 		     (be64_to_cpu(d->d_bcount) >
158 		      be64_to_cpu(d->d_blk_hardlimit)))) {
159 			d->d_btimer = cpu_to_be32(get_seconds() +
160 					mp->m_quotainfo->qi_btimelimit);
161 		} else {
162 			d->d_bwarns = 0;
163 		}
164 	} else {
165 		if ((!d->d_blk_softlimit ||
166 		     (be64_to_cpu(d->d_bcount) <=
167 		      be64_to_cpu(d->d_blk_softlimit))) &&
168 		    (!d->d_blk_hardlimit ||
169 		    (be64_to_cpu(d->d_bcount) <=
170 		     be64_to_cpu(d->d_blk_hardlimit)))) {
171 			d->d_btimer = 0;
172 		}
173 	}
174 
175 	if (!d->d_itimer) {
176 		if ((d->d_ino_softlimit &&
177 		     (be64_to_cpu(d->d_icount) >
178 		      be64_to_cpu(d->d_ino_softlimit))) ||
179 		    (d->d_ino_hardlimit &&
180 		     (be64_to_cpu(d->d_icount) >
181 		      be64_to_cpu(d->d_ino_hardlimit)))) {
182 			d->d_itimer = cpu_to_be32(get_seconds() +
183 					mp->m_quotainfo->qi_itimelimit);
184 		} else {
185 			d->d_iwarns = 0;
186 		}
187 	} else {
188 		if ((!d->d_ino_softlimit ||
189 		     (be64_to_cpu(d->d_icount) <=
190 		      be64_to_cpu(d->d_ino_softlimit)))  &&
191 		    (!d->d_ino_hardlimit ||
192 		     (be64_to_cpu(d->d_icount) <=
193 		      be64_to_cpu(d->d_ino_hardlimit)))) {
194 			d->d_itimer = 0;
195 		}
196 	}
197 
198 	if (!d->d_rtbtimer) {
199 		if ((d->d_rtb_softlimit &&
200 		     (be64_to_cpu(d->d_rtbcount) >
201 		      be64_to_cpu(d->d_rtb_softlimit))) ||
202 		    (d->d_rtb_hardlimit &&
203 		     (be64_to_cpu(d->d_rtbcount) >
204 		      be64_to_cpu(d->d_rtb_hardlimit)))) {
205 			d->d_rtbtimer = cpu_to_be32(get_seconds() +
206 					mp->m_quotainfo->qi_rtbtimelimit);
207 		} else {
208 			d->d_rtbwarns = 0;
209 		}
210 	} else {
211 		if ((!d->d_rtb_softlimit ||
212 		     (be64_to_cpu(d->d_rtbcount) <=
213 		      be64_to_cpu(d->d_rtb_softlimit))) &&
214 		    (!d->d_rtb_hardlimit ||
215 		     (be64_to_cpu(d->d_rtbcount) <=
216 		      be64_to_cpu(d->d_rtb_hardlimit)))) {
217 			d->d_rtbtimer = 0;
218 		}
219 	}
220 }
221 
222 /*
223  * initialize a buffer full of dquots and log the whole thing
224  */
225 STATIC void
226 xfs_qm_init_dquot_blk(
227 	xfs_trans_t	*tp,
228 	xfs_mount_t	*mp,
229 	xfs_dqid_t	id,
230 	uint		type,
231 	xfs_buf_t	*bp)
232 {
233 	struct xfs_quotainfo	*q = mp->m_quotainfo;
234 	xfs_dqblk_t	*d;
235 	int		curid, i;
236 
237 	ASSERT(tp);
238 	ASSERT(xfs_buf_islocked(bp));
239 
240 	d = bp->b_addr;
241 
242 	/*
243 	 * ID of the first dquot in the block - id's are zero based.
244 	 */
245 	curid = id - (id % q->qi_dqperchunk);
246 	ASSERT(curid >= 0);
247 	memset(d, 0, BBTOB(q->qi_dqchunklen));
248 	for (i = 0; i < q->qi_dqperchunk; i++, d++, curid++) {
249 		d->dd_diskdq.d_magic = cpu_to_be16(XFS_DQUOT_MAGIC);
250 		d->dd_diskdq.d_version = XFS_DQUOT_VERSION;
251 		d->dd_diskdq.d_id = cpu_to_be32(curid);
252 		d->dd_diskdq.d_flags = type;
253 		if (xfs_sb_version_hascrc(&mp->m_sb)) {
254 			uuid_copy(&d->dd_uuid, &mp->m_sb.sb_meta_uuid);
255 			xfs_update_cksum((char *)d, sizeof(struct xfs_dqblk),
256 					 XFS_DQUOT_CRC_OFF);
257 		}
258 	}
259 
260 	xfs_trans_dquot_buf(tp, bp,
261 			    (type & XFS_DQ_USER ? XFS_BLF_UDQUOT_BUF :
262 			    ((type & XFS_DQ_PROJ) ? XFS_BLF_PDQUOT_BUF :
263 			     XFS_BLF_GDQUOT_BUF)));
264 	xfs_trans_log_buf(tp, bp, 0, BBTOB(q->qi_dqchunklen) - 1);
265 }
266 
267 /*
268  * Initialize the dynamic speculative preallocation thresholds. The lo/hi
269  * watermarks correspond to the soft and hard limits by default. If a soft limit
270  * is not specified, we use 95% of the hard limit.
271  */
272 void
273 xfs_dquot_set_prealloc_limits(struct xfs_dquot *dqp)
274 {
275 	__uint64_t space;
276 
277 	dqp->q_prealloc_hi_wmark = be64_to_cpu(dqp->q_core.d_blk_hardlimit);
278 	dqp->q_prealloc_lo_wmark = be64_to_cpu(dqp->q_core.d_blk_softlimit);
279 	if (!dqp->q_prealloc_lo_wmark) {
280 		dqp->q_prealloc_lo_wmark = dqp->q_prealloc_hi_wmark;
281 		do_div(dqp->q_prealloc_lo_wmark, 100);
282 		dqp->q_prealloc_lo_wmark *= 95;
283 	}
284 
285 	space = dqp->q_prealloc_hi_wmark;
286 
287 	do_div(space, 100);
288 	dqp->q_low_space[XFS_QLOWSP_1_PCNT] = space;
289 	dqp->q_low_space[XFS_QLOWSP_3_PCNT] = space * 3;
290 	dqp->q_low_space[XFS_QLOWSP_5_PCNT] = space * 5;
291 }
292 
293 /*
294  * Allocate a block and fill it with dquots.
295  * This is called when the bmapi finds a hole.
296  */
297 STATIC int
298 xfs_qm_dqalloc(
299 	xfs_trans_t	**tpp,
300 	xfs_mount_t	*mp,
301 	xfs_dquot_t	*dqp,
302 	xfs_inode_t	*quotip,
303 	xfs_fileoff_t	offset_fsb,
304 	xfs_buf_t	**O_bpp)
305 {
306 	xfs_fsblock_t	firstblock;
307 	xfs_bmap_free_t flist;
308 	xfs_bmbt_irec_t map;
309 	int		nmaps, error, committed;
310 	xfs_buf_t	*bp;
311 	xfs_trans_t	*tp = *tpp;
312 
313 	ASSERT(tp != NULL);
314 
315 	trace_xfs_dqalloc(dqp);
316 
317 	/*
318 	 * Initialize the bmap freelist prior to calling bmapi code.
319 	 */
320 	xfs_bmap_init(&flist, &firstblock);
321 	xfs_ilock(quotip, XFS_ILOCK_EXCL);
322 	/*
323 	 * Return if this type of quotas is turned off while we didn't
324 	 * have an inode lock
325 	 */
326 	if (!xfs_this_quota_on(dqp->q_mount, dqp->dq_flags)) {
327 		xfs_iunlock(quotip, XFS_ILOCK_EXCL);
328 		return -ESRCH;
329 	}
330 
331 	xfs_trans_ijoin(tp, quotip, XFS_ILOCK_EXCL);
332 	nmaps = 1;
333 	error = xfs_bmapi_write(tp, quotip, offset_fsb,
334 				XFS_DQUOT_CLUSTER_SIZE_FSB, XFS_BMAPI_METADATA,
335 				&firstblock, XFS_QM_DQALLOC_SPACE_RES(mp),
336 				&map, &nmaps, &flist);
337 	if (error)
338 		goto error0;
339 	ASSERT(map.br_blockcount == XFS_DQUOT_CLUSTER_SIZE_FSB);
340 	ASSERT(nmaps == 1);
341 	ASSERT((map.br_startblock != DELAYSTARTBLOCK) &&
342 	       (map.br_startblock != HOLESTARTBLOCK));
343 
344 	/*
345 	 * Keep track of the blkno to save a lookup later
346 	 */
347 	dqp->q_blkno = XFS_FSB_TO_DADDR(mp, map.br_startblock);
348 
349 	/* now we can just get the buffer (there's nothing to read yet) */
350 	bp = xfs_trans_get_buf(tp, mp->m_ddev_targp,
351 			       dqp->q_blkno,
352 			       mp->m_quotainfo->qi_dqchunklen,
353 			       0);
354 	if (!bp) {
355 		error = -ENOMEM;
356 		goto error1;
357 	}
358 	bp->b_ops = &xfs_dquot_buf_ops;
359 
360 	/*
361 	 * Make a chunk of dquots out of this buffer and log
362 	 * the entire thing.
363 	 */
364 	xfs_qm_init_dquot_blk(tp, mp, be32_to_cpu(dqp->q_core.d_id),
365 			      dqp->dq_flags & XFS_DQ_ALLTYPES, bp);
366 
367 	/*
368 	 * xfs_bmap_finish() may commit the current transaction and
369 	 * start a second transaction if the freelist is not empty.
370 	 *
371 	 * Since we still want to modify this buffer, we need to
372 	 * ensure that the buffer is not released on commit of
373 	 * the first transaction and ensure the buffer is added to the
374 	 * second transaction.
375 	 *
376 	 * If there is only one transaction then don't stop the buffer
377 	 * from being released when it commits later on.
378 	 */
379 
380 	xfs_trans_bhold(tp, bp);
381 
382 	if ((error = xfs_bmap_finish(tpp, &flist, &committed))) {
383 		goto error1;
384 	}
385 
386 	if (committed) {
387 		tp = *tpp;
388 		xfs_trans_bjoin(tp, bp);
389 	} else {
390 		xfs_trans_bhold_release(tp, bp);
391 	}
392 
393 	*O_bpp = bp;
394 	return 0;
395 
396       error1:
397 	xfs_bmap_cancel(&flist);
398       error0:
399 	xfs_iunlock(quotip, XFS_ILOCK_EXCL);
400 
401 	return error;
402 }
403 
404 STATIC int
405 xfs_qm_dqrepair(
406 	struct xfs_mount	*mp,
407 	struct xfs_trans	*tp,
408 	struct xfs_dquot	*dqp,
409 	xfs_dqid_t		firstid,
410 	struct xfs_buf		**bpp)
411 {
412 	int			error;
413 	struct xfs_disk_dquot	*ddq;
414 	struct xfs_dqblk	*d;
415 	int			i;
416 
417 	/*
418 	 * Read the buffer without verification so we get the corrupted
419 	 * buffer returned to us. make sure we verify it on write, though.
420 	 */
421 	error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp, dqp->q_blkno,
422 				   mp->m_quotainfo->qi_dqchunklen,
423 				   0, bpp, NULL);
424 
425 	if (error) {
426 		ASSERT(*bpp == NULL);
427 		return error;
428 	}
429 	(*bpp)->b_ops = &xfs_dquot_buf_ops;
430 
431 	ASSERT(xfs_buf_islocked(*bpp));
432 	d = (struct xfs_dqblk *)(*bpp)->b_addr;
433 
434 	/* Do the actual repair of dquots in this buffer */
435 	for (i = 0; i < mp->m_quotainfo->qi_dqperchunk; i++) {
436 		ddq = &d[i].dd_diskdq;
437 		error = xfs_dqcheck(mp, ddq, firstid + i,
438 				       dqp->dq_flags & XFS_DQ_ALLTYPES,
439 				       XFS_QMOPT_DQREPAIR, "xfs_qm_dqrepair");
440 		if (error) {
441 			/* repair failed, we're screwed */
442 			xfs_trans_brelse(tp, *bpp);
443 			return -EIO;
444 		}
445 	}
446 
447 	return 0;
448 }
449 
450 /*
451  * Maps a dquot to the buffer containing its on-disk version.
452  * This returns a ptr to the buffer containing the on-disk dquot
453  * in the bpp param, and a ptr to the on-disk dquot within that buffer
454  */
455 STATIC int
456 xfs_qm_dqtobp(
457 	xfs_trans_t		**tpp,
458 	xfs_dquot_t		*dqp,
459 	xfs_disk_dquot_t	**O_ddpp,
460 	xfs_buf_t		**O_bpp,
461 	uint			flags)
462 {
463 	struct xfs_bmbt_irec	map;
464 	int			nmaps = 1, error;
465 	struct xfs_buf		*bp;
466 	struct xfs_inode	*quotip = xfs_dq_to_quota_inode(dqp);
467 	struct xfs_mount	*mp = dqp->q_mount;
468 	xfs_dqid_t		id = be32_to_cpu(dqp->q_core.d_id);
469 	struct xfs_trans	*tp = (tpp ? *tpp : NULL);
470 	uint			lock_mode;
471 
472 	dqp->q_fileoffset = (xfs_fileoff_t)id / mp->m_quotainfo->qi_dqperchunk;
473 
474 	lock_mode = xfs_ilock_data_map_shared(quotip);
475 	if (!xfs_this_quota_on(dqp->q_mount, dqp->dq_flags)) {
476 		/*
477 		 * Return if this type of quotas is turned off while we
478 		 * didn't have the quota inode lock.
479 		 */
480 		xfs_iunlock(quotip, lock_mode);
481 		return -ESRCH;
482 	}
483 
484 	/*
485 	 * Find the block map; no allocations yet
486 	 */
487 	error = xfs_bmapi_read(quotip, dqp->q_fileoffset,
488 			       XFS_DQUOT_CLUSTER_SIZE_FSB, &map, &nmaps, 0);
489 
490 	xfs_iunlock(quotip, lock_mode);
491 	if (error)
492 		return error;
493 
494 	ASSERT(nmaps == 1);
495 	ASSERT(map.br_blockcount == 1);
496 
497 	/*
498 	 * Offset of dquot in the (fixed sized) dquot chunk.
499 	 */
500 	dqp->q_bufoffset = (id % mp->m_quotainfo->qi_dqperchunk) *
501 		sizeof(xfs_dqblk_t);
502 
503 	ASSERT(map.br_startblock != DELAYSTARTBLOCK);
504 	if (map.br_startblock == HOLESTARTBLOCK) {
505 		/*
506 		 * We don't allocate unless we're asked to
507 		 */
508 		if (!(flags & XFS_QMOPT_DQALLOC))
509 			return -ENOENT;
510 
511 		ASSERT(tp);
512 		error = xfs_qm_dqalloc(tpp, mp, dqp, quotip,
513 					dqp->q_fileoffset, &bp);
514 		if (error)
515 			return error;
516 		tp = *tpp;
517 	} else {
518 		trace_xfs_dqtobp_read(dqp);
519 
520 		/*
521 		 * store the blkno etc so that we don't have to do the
522 		 * mapping all the time
523 		 */
524 		dqp->q_blkno = XFS_FSB_TO_DADDR(mp, map.br_startblock);
525 
526 		error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp,
527 					   dqp->q_blkno,
528 					   mp->m_quotainfo->qi_dqchunklen,
529 					   0, &bp, &xfs_dquot_buf_ops);
530 
531 		if (error == -EFSCORRUPTED && (flags & XFS_QMOPT_DQREPAIR)) {
532 			xfs_dqid_t firstid = (xfs_dqid_t)map.br_startoff *
533 						mp->m_quotainfo->qi_dqperchunk;
534 			ASSERT(bp == NULL);
535 			error = xfs_qm_dqrepair(mp, tp, dqp, firstid, &bp);
536 		}
537 
538 		if (error) {
539 			ASSERT(bp == NULL);
540 			return error;
541 		}
542 	}
543 
544 	ASSERT(xfs_buf_islocked(bp));
545 	*O_bpp = bp;
546 	*O_ddpp = bp->b_addr + dqp->q_bufoffset;
547 
548 	return 0;
549 }
550 
551 
552 /*
553  * Read in the ondisk dquot using dqtobp() then copy it to an incore version,
554  * and release the buffer immediately.
555  *
556  * If XFS_QMOPT_DQALLOC is set, allocate a dquot on disk if it needed.
557  */
558 int
559 xfs_qm_dqread(
560 	struct xfs_mount	*mp,
561 	xfs_dqid_t		id,
562 	uint			type,
563 	uint			flags,
564 	struct xfs_dquot	**O_dqpp)
565 {
566 	struct xfs_dquot	*dqp;
567 	struct xfs_disk_dquot	*ddqp;
568 	struct xfs_buf		*bp;
569 	struct xfs_trans	*tp = NULL;
570 	int			error;
571 
572 	dqp = kmem_zone_zalloc(xfs_qm_dqzone, KM_SLEEP);
573 
574 	dqp->dq_flags = type;
575 	dqp->q_core.d_id = cpu_to_be32(id);
576 	dqp->q_mount = mp;
577 	INIT_LIST_HEAD(&dqp->q_lru);
578 	mutex_init(&dqp->q_qlock);
579 	init_waitqueue_head(&dqp->q_pinwait);
580 
581 	/*
582 	 * Because we want to use a counting completion, complete
583 	 * the flush completion once to allow a single access to
584 	 * the flush completion without blocking.
585 	 */
586 	init_completion(&dqp->q_flush);
587 	complete(&dqp->q_flush);
588 
589 	/*
590 	 * Make sure group quotas have a different lock class than user
591 	 * quotas.
592 	 */
593 	switch (type) {
594 	case XFS_DQ_USER:
595 		/* uses the default lock class */
596 		break;
597 	case XFS_DQ_GROUP:
598 		lockdep_set_class(&dqp->q_qlock, &xfs_dquot_group_class);
599 		break;
600 	case XFS_DQ_PROJ:
601 		lockdep_set_class(&dqp->q_qlock, &xfs_dquot_project_class);
602 		break;
603 	default:
604 		ASSERT(0);
605 		break;
606 	}
607 
608 	XFS_STATS_INC(xs_qm_dquot);
609 
610 	trace_xfs_dqread(dqp);
611 
612 	if (flags & XFS_QMOPT_DQALLOC) {
613 		tp = xfs_trans_alloc(mp, XFS_TRANS_QM_DQALLOC);
614 		error = xfs_trans_reserve(tp, &M_RES(mp)->tr_qm_dqalloc,
615 					  XFS_QM_DQALLOC_SPACE_RES(mp), 0);
616 		if (error)
617 			goto error1;
618 	}
619 
620 	/*
621 	 * get a pointer to the on-disk dquot and the buffer containing it
622 	 * dqp already knows its own type (GROUP/USER).
623 	 */
624 	error = xfs_qm_dqtobp(&tp, dqp, &ddqp, &bp, flags);
625 	if (error) {
626 		/*
627 		 * This can happen if quotas got turned off (ESRCH),
628 		 * or if the dquot didn't exist on disk and we ask to
629 		 * allocate (ENOENT).
630 		 */
631 		trace_xfs_dqread_fail(dqp);
632 		goto error1;
633 	}
634 
635 	/* copy everything from disk dquot to the incore dquot */
636 	memcpy(&dqp->q_core, ddqp, sizeof(xfs_disk_dquot_t));
637 	xfs_qm_dquot_logitem_init(dqp);
638 
639 	/*
640 	 * Reservation counters are defined as reservation plus current usage
641 	 * to avoid having to add every time.
642 	 */
643 	dqp->q_res_bcount = be64_to_cpu(ddqp->d_bcount);
644 	dqp->q_res_icount = be64_to_cpu(ddqp->d_icount);
645 	dqp->q_res_rtbcount = be64_to_cpu(ddqp->d_rtbcount);
646 
647 	/* initialize the dquot speculative prealloc thresholds */
648 	xfs_dquot_set_prealloc_limits(dqp);
649 
650 	/* Mark the buf so that this will stay incore a little longer */
651 	xfs_buf_set_ref(bp, XFS_DQUOT_REF);
652 
653 	/*
654 	 * We got the buffer with a xfs_trans_read_buf() (in dqtobp())
655 	 * So we need to release with xfs_trans_brelse().
656 	 * The strategy here is identical to that of inodes; we lock
657 	 * the dquot in xfs_qm_dqget() before making it accessible to
658 	 * others. This is because dquots, like inodes, need a good level of
659 	 * concurrency, and we don't want to take locks on the entire buffers
660 	 * for dquot accesses.
661 	 * Note also that the dquot buffer may even be dirty at this point, if
662 	 * this particular dquot was repaired. We still aren't afraid to
663 	 * brelse it because we have the changes incore.
664 	 */
665 	ASSERT(xfs_buf_islocked(bp));
666 	xfs_trans_brelse(tp, bp);
667 
668 	if (tp) {
669 		error = xfs_trans_commit(tp);
670 		if (error)
671 			goto error0;
672 	}
673 
674 	*O_dqpp = dqp;
675 	return error;
676 
677 error1:
678 	if (tp)
679 		xfs_trans_cancel(tp);
680 error0:
681 	xfs_qm_dqdestroy(dqp);
682 	*O_dqpp = NULL;
683 	return error;
684 }
685 
686 /*
687  * Given the file system, inode OR id, and type (UDQUOT/GDQUOT), return a
688  * a locked dquot, doing an allocation (if requested) as needed.
689  * When both an inode and an id are given, the inode's id takes precedence.
690  * That is, if the id changes while we don't hold the ilock inside this
691  * function, the new dquot is returned, not necessarily the one requested
692  * in the id argument.
693  */
694 int
695 xfs_qm_dqget(
696 	xfs_mount_t	*mp,
697 	xfs_inode_t	*ip,	  /* locked inode (optional) */
698 	xfs_dqid_t	id,	  /* uid/projid/gid depending on type */
699 	uint		type,	  /* XFS_DQ_USER/XFS_DQ_PROJ/XFS_DQ_GROUP */
700 	uint		flags,	  /* DQALLOC, DQSUSER, DQREPAIR, DOWARN */
701 	xfs_dquot_t	**O_dqpp) /* OUT : locked incore dquot */
702 {
703 	struct xfs_quotainfo	*qi = mp->m_quotainfo;
704 	struct radix_tree_root *tree = xfs_dquot_tree(qi, type);
705 	struct xfs_dquot	*dqp;
706 	int			error;
707 
708 	ASSERT(XFS_IS_QUOTA_RUNNING(mp));
709 	if ((! XFS_IS_UQUOTA_ON(mp) && type == XFS_DQ_USER) ||
710 	    (! XFS_IS_PQUOTA_ON(mp) && type == XFS_DQ_PROJ) ||
711 	    (! XFS_IS_GQUOTA_ON(mp) && type == XFS_DQ_GROUP)) {
712 		return -ESRCH;
713 	}
714 
715 #ifdef DEBUG
716 	if (xfs_do_dqerror) {
717 		if ((xfs_dqerror_target == mp->m_ddev_targp) &&
718 		    (xfs_dqreq_num++ % xfs_dqerror_mod) == 0) {
719 			xfs_debug(mp, "Returning error in dqget");
720 			return -EIO;
721 		}
722 	}
723 
724 	ASSERT(type == XFS_DQ_USER ||
725 	       type == XFS_DQ_PROJ ||
726 	       type == XFS_DQ_GROUP);
727 	if (ip) {
728 		ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
729 		ASSERT(xfs_inode_dquot(ip, type) == NULL);
730 	}
731 #endif
732 
733 restart:
734 	mutex_lock(&qi->qi_tree_lock);
735 	dqp = radix_tree_lookup(tree, id);
736 	if (dqp) {
737 		xfs_dqlock(dqp);
738 		if (dqp->dq_flags & XFS_DQ_FREEING) {
739 			xfs_dqunlock(dqp);
740 			mutex_unlock(&qi->qi_tree_lock);
741 			trace_xfs_dqget_freeing(dqp);
742 			delay(1);
743 			goto restart;
744 		}
745 
746 		dqp->q_nrefs++;
747 		mutex_unlock(&qi->qi_tree_lock);
748 
749 		trace_xfs_dqget_hit(dqp);
750 		XFS_STATS_INC(xs_qm_dqcachehits);
751 		*O_dqpp = dqp;
752 		return 0;
753 	}
754 	mutex_unlock(&qi->qi_tree_lock);
755 	XFS_STATS_INC(xs_qm_dqcachemisses);
756 
757 	/*
758 	 * Dquot cache miss. We don't want to keep the inode lock across
759 	 * a (potential) disk read. Also we don't want to deal with the lock
760 	 * ordering between quotainode and this inode. OTOH, dropping the inode
761 	 * lock here means dealing with a chown that can happen before
762 	 * we re-acquire the lock.
763 	 */
764 	if (ip)
765 		xfs_iunlock(ip, XFS_ILOCK_EXCL);
766 
767 	error = xfs_qm_dqread(mp, id, type, flags, &dqp);
768 
769 	if (ip)
770 		xfs_ilock(ip, XFS_ILOCK_EXCL);
771 
772 	if (error)
773 		return error;
774 
775 	if (ip) {
776 		/*
777 		 * A dquot could be attached to this inode by now, since
778 		 * we had dropped the ilock.
779 		 */
780 		if (xfs_this_quota_on(mp, type)) {
781 			struct xfs_dquot	*dqp1;
782 
783 			dqp1 = xfs_inode_dquot(ip, type);
784 			if (dqp1) {
785 				xfs_qm_dqdestroy(dqp);
786 				dqp = dqp1;
787 				xfs_dqlock(dqp);
788 				goto dqret;
789 			}
790 		} else {
791 			/* inode stays locked on return */
792 			xfs_qm_dqdestroy(dqp);
793 			return -ESRCH;
794 		}
795 	}
796 
797 	mutex_lock(&qi->qi_tree_lock);
798 	error = radix_tree_insert(tree, id, dqp);
799 	if (unlikely(error)) {
800 		WARN_ON(error != -EEXIST);
801 
802 		/*
803 		 * Duplicate found. Just throw away the new dquot and start
804 		 * over.
805 		 */
806 		mutex_unlock(&qi->qi_tree_lock);
807 		trace_xfs_dqget_dup(dqp);
808 		xfs_qm_dqdestroy(dqp);
809 		XFS_STATS_INC(xs_qm_dquot_dups);
810 		goto restart;
811 	}
812 
813 	/*
814 	 * We return a locked dquot to the caller, with a reference taken
815 	 */
816 	xfs_dqlock(dqp);
817 	dqp->q_nrefs = 1;
818 
819 	qi->qi_dquots++;
820 	mutex_unlock(&qi->qi_tree_lock);
821 
822  dqret:
823 	ASSERT((ip == NULL) || xfs_isilocked(ip, XFS_ILOCK_EXCL));
824 	trace_xfs_dqget_miss(dqp);
825 	*O_dqpp = dqp;
826 	return 0;
827 }
828 
829 /*
830  * Release a reference to the dquot (decrement ref-count) and unlock it.
831  *
832  * If there is a group quota attached to this dquot, carefully release that
833  * too without tripping over deadlocks'n'stuff.
834  */
835 void
836 xfs_qm_dqput(
837 	struct xfs_dquot	*dqp)
838 {
839 	ASSERT(dqp->q_nrefs > 0);
840 	ASSERT(XFS_DQ_IS_LOCKED(dqp));
841 
842 	trace_xfs_dqput(dqp);
843 
844 	if (--dqp->q_nrefs == 0) {
845 		struct xfs_quotainfo	*qi = dqp->q_mount->m_quotainfo;
846 		trace_xfs_dqput_free(dqp);
847 
848 		if (list_lru_add(&qi->qi_lru, &dqp->q_lru))
849 			XFS_STATS_INC(xs_qm_dquot_unused);
850 	}
851 	xfs_dqunlock(dqp);
852 }
853 
854 /*
855  * Release a dquot. Flush it if dirty, then dqput() it.
856  * dquot must not be locked.
857  */
858 void
859 xfs_qm_dqrele(
860 	xfs_dquot_t	*dqp)
861 {
862 	if (!dqp)
863 		return;
864 
865 	trace_xfs_dqrele(dqp);
866 
867 	xfs_dqlock(dqp);
868 	/*
869 	 * We don't care to flush it if the dquot is dirty here.
870 	 * That will create stutters that we want to avoid.
871 	 * Instead we do a delayed write when we try to reclaim
872 	 * a dirty dquot. Also xfs_sync will take part of the burden...
873 	 */
874 	xfs_qm_dqput(dqp);
875 }
876 
877 /*
878  * This is the dquot flushing I/O completion routine.  It is called
879  * from interrupt level when the buffer containing the dquot is
880  * flushed to disk.  It is responsible for removing the dquot logitem
881  * from the AIL if it has not been re-logged, and unlocking the dquot's
882  * flush lock. This behavior is very similar to that of inodes..
883  */
884 STATIC void
885 xfs_qm_dqflush_done(
886 	struct xfs_buf		*bp,
887 	struct xfs_log_item	*lip)
888 {
889 	xfs_dq_logitem_t	*qip = (struct xfs_dq_logitem *)lip;
890 	xfs_dquot_t		*dqp = qip->qli_dquot;
891 	struct xfs_ail		*ailp = lip->li_ailp;
892 
893 	/*
894 	 * We only want to pull the item from the AIL if its
895 	 * location in the log has not changed since we started the flush.
896 	 * Thus, we only bother if the dquot's lsn has
897 	 * not changed. First we check the lsn outside the lock
898 	 * since it's cheaper, and then we recheck while
899 	 * holding the lock before removing the dquot from the AIL.
900 	 */
901 	if ((lip->li_flags & XFS_LI_IN_AIL) &&
902 	    lip->li_lsn == qip->qli_flush_lsn) {
903 
904 		/* xfs_trans_ail_delete() drops the AIL lock. */
905 		spin_lock(&ailp->xa_lock);
906 		if (lip->li_lsn == qip->qli_flush_lsn)
907 			xfs_trans_ail_delete(ailp, lip, SHUTDOWN_CORRUPT_INCORE);
908 		else
909 			spin_unlock(&ailp->xa_lock);
910 	}
911 
912 	/*
913 	 * Release the dq's flush lock since we're done with it.
914 	 */
915 	xfs_dqfunlock(dqp);
916 }
917 
918 /*
919  * Write a modified dquot to disk.
920  * The dquot must be locked and the flush lock too taken by caller.
921  * The flush lock will not be unlocked until the dquot reaches the disk,
922  * but the dquot is free to be unlocked and modified by the caller
923  * in the interim. Dquot is still locked on return. This behavior is
924  * identical to that of inodes.
925  */
926 int
927 xfs_qm_dqflush(
928 	struct xfs_dquot	*dqp,
929 	struct xfs_buf		**bpp)
930 {
931 	struct xfs_mount	*mp = dqp->q_mount;
932 	struct xfs_buf		*bp;
933 	struct xfs_disk_dquot	*ddqp;
934 	int			error;
935 
936 	ASSERT(XFS_DQ_IS_LOCKED(dqp));
937 	ASSERT(!completion_done(&dqp->q_flush));
938 
939 	trace_xfs_dqflush(dqp);
940 
941 	*bpp = NULL;
942 
943 	xfs_qm_dqunpin_wait(dqp);
944 
945 	/*
946 	 * This may have been unpinned because the filesystem is shutting
947 	 * down forcibly. If that's the case we must not write this dquot
948 	 * to disk, because the log record didn't make it to disk.
949 	 *
950 	 * We also have to remove the log item from the AIL in this case,
951 	 * as we wait for an emptry AIL as part of the unmount process.
952 	 */
953 	if (XFS_FORCED_SHUTDOWN(mp)) {
954 		struct xfs_log_item	*lip = &dqp->q_logitem.qli_item;
955 		dqp->dq_flags &= ~XFS_DQ_DIRTY;
956 
957 		xfs_trans_ail_remove(lip, SHUTDOWN_CORRUPT_INCORE);
958 
959 		error = -EIO;
960 		goto out_unlock;
961 	}
962 
963 	/*
964 	 * Get the buffer containing the on-disk dquot
965 	 */
966 	error = xfs_trans_read_buf(mp, NULL, mp->m_ddev_targp, dqp->q_blkno,
967 				   mp->m_quotainfo->qi_dqchunklen, 0, &bp,
968 				   &xfs_dquot_buf_ops);
969 	if (error)
970 		goto out_unlock;
971 
972 	/*
973 	 * Calculate the location of the dquot inside the buffer.
974 	 */
975 	ddqp = bp->b_addr + dqp->q_bufoffset;
976 
977 	/*
978 	 * A simple sanity check in case we got a corrupted dquot..
979 	 */
980 	error = xfs_dqcheck(mp, &dqp->q_core, be32_to_cpu(ddqp->d_id), 0,
981 			   XFS_QMOPT_DOWARN, "dqflush (incore copy)");
982 	if (error) {
983 		xfs_buf_relse(bp);
984 		xfs_dqfunlock(dqp);
985 		xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
986 		return -EIO;
987 	}
988 
989 	/* This is the only portion of data that needs to persist */
990 	memcpy(ddqp, &dqp->q_core, sizeof(xfs_disk_dquot_t));
991 
992 	/*
993 	 * Clear the dirty field and remember the flush lsn for later use.
994 	 */
995 	dqp->dq_flags &= ~XFS_DQ_DIRTY;
996 
997 	xfs_trans_ail_copy_lsn(mp->m_ail, &dqp->q_logitem.qli_flush_lsn,
998 					&dqp->q_logitem.qli_item.li_lsn);
999 
1000 	/*
1001 	 * copy the lsn into the on-disk dquot now while we have the in memory
1002 	 * dquot here. This can't be done later in the write verifier as we
1003 	 * can't get access to the log item at that point in time.
1004 	 *
1005 	 * We also calculate the CRC here so that the on-disk dquot in the
1006 	 * buffer always has a valid CRC. This ensures there is no possibility
1007 	 * of a dquot without an up-to-date CRC getting to disk.
1008 	 */
1009 	if (xfs_sb_version_hascrc(&mp->m_sb)) {
1010 		struct xfs_dqblk *dqb = (struct xfs_dqblk *)ddqp;
1011 
1012 		dqb->dd_lsn = cpu_to_be64(dqp->q_logitem.qli_item.li_lsn);
1013 		xfs_update_cksum((char *)dqb, sizeof(struct xfs_dqblk),
1014 				 XFS_DQUOT_CRC_OFF);
1015 	}
1016 
1017 	/*
1018 	 * Attach an iodone routine so that we can remove this dquot from the
1019 	 * AIL and release the flush lock once the dquot is synced to disk.
1020 	 */
1021 	xfs_buf_attach_iodone(bp, xfs_qm_dqflush_done,
1022 				  &dqp->q_logitem.qli_item);
1023 
1024 	/*
1025 	 * If the buffer is pinned then push on the log so we won't
1026 	 * get stuck waiting in the write for too long.
1027 	 */
1028 	if (xfs_buf_ispinned(bp)) {
1029 		trace_xfs_dqflush_force(dqp);
1030 		xfs_log_force(mp, 0);
1031 	}
1032 
1033 	trace_xfs_dqflush_done(dqp);
1034 	*bpp = bp;
1035 	return 0;
1036 
1037 out_unlock:
1038 	xfs_dqfunlock(dqp);
1039 	return -EIO;
1040 }
1041 
1042 /*
1043  * Lock two xfs_dquot structures.
1044  *
1045  * To avoid deadlocks we always lock the quota structure with
1046  * the lowerd id first.
1047  */
1048 void
1049 xfs_dqlock2(
1050 	xfs_dquot_t	*d1,
1051 	xfs_dquot_t	*d2)
1052 {
1053 	if (d1 && d2) {
1054 		ASSERT(d1 != d2);
1055 		if (be32_to_cpu(d1->q_core.d_id) >
1056 		    be32_to_cpu(d2->q_core.d_id)) {
1057 			mutex_lock(&d2->q_qlock);
1058 			mutex_lock_nested(&d1->q_qlock, XFS_QLOCK_NESTED);
1059 		} else {
1060 			mutex_lock(&d1->q_qlock);
1061 			mutex_lock_nested(&d2->q_qlock, XFS_QLOCK_NESTED);
1062 		}
1063 	} else if (d1) {
1064 		mutex_lock(&d1->q_qlock);
1065 	} else if (d2) {
1066 		mutex_lock(&d2->q_qlock);
1067 	}
1068 }
1069 
1070 int __init
1071 xfs_qm_init(void)
1072 {
1073 	xfs_qm_dqzone =
1074 		kmem_zone_init(sizeof(struct xfs_dquot), "xfs_dquot");
1075 	if (!xfs_qm_dqzone)
1076 		goto out;
1077 
1078 	xfs_qm_dqtrxzone =
1079 		kmem_zone_init(sizeof(struct xfs_dquot_acct), "xfs_dqtrx");
1080 	if (!xfs_qm_dqtrxzone)
1081 		goto out_free_dqzone;
1082 
1083 	return 0;
1084 
1085 out_free_dqzone:
1086 	kmem_zone_destroy(xfs_qm_dqzone);
1087 out:
1088 	return -ENOMEM;
1089 }
1090 
1091 void
1092 xfs_qm_exit(void)
1093 {
1094 	kmem_zone_destroy(xfs_qm_dqtrxzone);
1095 	kmem_zone_destroy(xfs_qm_dqzone);
1096 }
1097