xref: /openbmc/linux/fs/xfs/xfs_dquot.c (revision 77a87824)
1 /*
2  * Copyright (c) 2000-2003 Silicon Graphics, Inc.
3  * All Rights Reserved.
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU General Public License as
7  * published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it would be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write the Free Software Foundation,
16  * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
17  */
18 #include "xfs.h"
19 #include "xfs_fs.h"
20 #include "xfs_format.h"
21 #include "xfs_log_format.h"
22 #include "xfs_shared.h"
23 #include "xfs_trans_resv.h"
24 #include "xfs_bit.h"
25 #include "xfs_mount.h"
26 #include "xfs_inode.h"
27 #include "xfs_bmap.h"
28 #include "xfs_bmap_util.h"
29 #include "xfs_alloc.h"
30 #include "xfs_quota.h"
31 #include "xfs_error.h"
32 #include "xfs_trans.h"
33 #include "xfs_buf_item.h"
34 #include "xfs_trans_space.h"
35 #include "xfs_trans_priv.h"
36 #include "xfs_qm.h"
37 #include "xfs_cksum.h"
38 #include "xfs_trace.h"
39 #include "xfs_log.h"
40 #include "xfs_bmap_btree.h"
41 
42 /*
43  * Lock order:
44  *
45  * ip->i_lock
46  *   qi->qi_tree_lock
47  *     dquot->q_qlock (xfs_dqlock() and friends)
48  *       dquot->q_flush (xfs_dqflock() and friends)
49  *       qi->qi_lru_lock
50  *
51  * If two dquots need to be locked the order is user before group/project,
52  * otherwise by the lowest id first, see xfs_dqlock2.
53  */
54 
55 #ifdef DEBUG
56 xfs_buftarg_t *xfs_dqerror_target;
57 int xfs_do_dqerror;
58 int xfs_dqreq_num;
59 int xfs_dqerror_mod = 33;
60 #endif
61 
62 struct kmem_zone		*xfs_qm_dqtrxzone;
63 static struct kmem_zone		*xfs_qm_dqzone;
64 
65 static struct lock_class_key xfs_dquot_group_class;
66 static struct lock_class_key xfs_dquot_project_class;
67 
68 /*
69  * This is called to free all the memory associated with a dquot
70  */
71 void
72 xfs_qm_dqdestroy(
73 	xfs_dquot_t	*dqp)
74 {
75 	ASSERT(list_empty(&dqp->q_lru));
76 
77 	kmem_free(dqp->q_logitem.qli_item.li_lv_shadow);
78 	mutex_destroy(&dqp->q_qlock);
79 
80 	XFS_STATS_DEC(dqp->q_mount, xs_qm_dquot);
81 	kmem_zone_free(xfs_qm_dqzone, dqp);
82 }
83 
84 /*
85  * If default limits are in force, push them into the dquot now.
86  * We overwrite the dquot limits only if they are zero and this
87  * is not the root dquot.
88  */
89 void
90 xfs_qm_adjust_dqlimits(
91 	struct xfs_mount	*mp,
92 	struct xfs_dquot	*dq)
93 {
94 	struct xfs_quotainfo	*q = mp->m_quotainfo;
95 	struct xfs_disk_dquot	*d = &dq->q_core;
96 	struct xfs_def_quota	*defq;
97 	int			prealloc = 0;
98 
99 	ASSERT(d->d_id);
100 	defq = xfs_get_defquota(dq, q);
101 
102 	if (defq->bsoftlimit && !d->d_blk_softlimit) {
103 		d->d_blk_softlimit = cpu_to_be64(defq->bsoftlimit);
104 		prealloc = 1;
105 	}
106 	if (defq->bhardlimit && !d->d_blk_hardlimit) {
107 		d->d_blk_hardlimit = cpu_to_be64(defq->bhardlimit);
108 		prealloc = 1;
109 	}
110 	if (defq->isoftlimit && !d->d_ino_softlimit)
111 		d->d_ino_softlimit = cpu_to_be64(defq->isoftlimit);
112 	if (defq->ihardlimit && !d->d_ino_hardlimit)
113 		d->d_ino_hardlimit = cpu_to_be64(defq->ihardlimit);
114 	if (defq->rtbsoftlimit && !d->d_rtb_softlimit)
115 		d->d_rtb_softlimit = cpu_to_be64(defq->rtbsoftlimit);
116 	if (defq->rtbhardlimit && !d->d_rtb_hardlimit)
117 		d->d_rtb_hardlimit = cpu_to_be64(defq->rtbhardlimit);
118 
119 	if (prealloc)
120 		xfs_dquot_set_prealloc_limits(dq);
121 }
122 
123 /*
124  * Check the limits and timers of a dquot and start or reset timers
125  * if necessary.
126  * This gets called even when quota enforcement is OFF, which makes our
127  * life a little less complicated. (We just don't reject any quota
128  * reservations in that case, when enforcement is off).
129  * We also return 0 as the values of the timers in Q_GETQUOTA calls, when
130  * enforcement's off.
131  * In contrast, warnings are a little different in that they don't
132  * 'automatically' get started when limits get exceeded.  They do
133  * get reset to zero, however, when we find the count to be under
134  * the soft limit (they are only ever set non-zero via userspace).
135  */
136 void
137 xfs_qm_adjust_dqtimers(
138 	xfs_mount_t		*mp,
139 	xfs_disk_dquot_t	*d)
140 {
141 	ASSERT(d->d_id);
142 
143 #ifdef DEBUG
144 	if (d->d_blk_hardlimit)
145 		ASSERT(be64_to_cpu(d->d_blk_softlimit) <=
146 		       be64_to_cpu(d->d_blk_hardlimit));
147 	if (d->d_ino_hardlimit)
148 		ASSERT(be64_to_cpu(d->d_ino_softlimit) <=
149 		       be64_to_cpu(d->d_ino_hardlimit));
150 	if (d->d_rtb_hardlimit)
151 		ASSERT(be64_to_cpu(d->d_rtb_softlimit) <=
152 		       be64_to_cpu(d->d_rtb_hardlimit));
153 #endif
154 
155 	if (!d->d_btimer) {
156 		if ((d->d_blk_softlimit &&
157 		     (be64_to_cpu(d->d_bcount) >
158 		      be64_to_cpu(d->d_blk_softlimit))) ||
159 		    (d->d_blk_hardlimit &&
160 		     (be64_to_cpu(d->d_bcount) >
161 		      be64_to_cpu(d->d_blk_hardlimit)))) {
162 			d->d_btimer = cpu_to_be32(get_seconds() +
163 					mp->m_quotainfo->qi_btimelimit);
164 		} else {
165 			d->d_bwarns = 0;
166 		}
167 	} else {
168 		if ((!d->d_blk_softlimit ||
169 		     (be64_to_cpu(d->d_bcount) <=
170 		      be64_to_cpu(d->d_blk_softlimit))) &&
171 		    (!d->d_blk_hardlimit ||
172 		    (be64_to_cpu(d->d_bcount) <=
173 		     be64_to_cpu(d->d_blk_hardlimit)))) {
174 			d->d_btimer = 0;
175 		}
176 	}
177 
178 	if (!d->d_itimer) {
179 		if ((d->d_ino_softlimit &&
180 		     (be64_to_cpu(d->d_icount) >
181 		      be64_to_cpu(d->d_ino_softlimit))) ||
182 		    (d->d_ino_hardlimit &&
183 		     (be64_to_cpu(d->d_icount) >
184 		      be64_to_cpu(d->d_ino_hardlimit)))) {
185 			d->d_itimer = cpu_to_be32(get_seconds() +
186 					mp->m_quotainfo->qi_itimelimit);
187 		} else {
188 			d->d_iwarns = 0;
189 		}
190 	} else {
191 		if ((!d->d_ino_softlimit ||
192 		     (be64_to_cpu(d->d_icount) <=
193 		      be64_to_cpu(d->d_ino_softlimit)))  &&
194 		    (!d->d_ino_hardlimit ||
195 		     (be64_to_cpu(d->d_icount) <=
196 		      be64_to_cpu(d->d_ino_hardlimit)))) {
197 			d->d_itimer = 0;
198 		}
199 	}
200 
201 	if (!d->d_rtbtimer) {
202 		if ((d->d_rtb_softlimit &&
203 		     (be64_to_cpu(d->d_rtbcount) >
204 		      be64_to_cpu(d->d_rtb_softlimit))) ||
205 		    (d->d_rtb_hardlimit &&
206 		     (be64_to_cpu(d->d_rtbcount) >
207 		      be64_to_cpu(d->d_rtb_hardlimit)))) {
208 			d->d_rtbtimer = cpu_to_be32(get_seconds() +
209 					mp->m_quotainfo->qi_rtbtimelimit);
210 		} else {
211 			d->d_rtbwarns = 0;
212 		}
213 	} else {
214 		if ((!d->d_rtb_softlimit ||
215 		     (be64_to_cpu(d->d_rtbcount) <=
216 		      be64_to_cpu(d->d_rtb_softlimit))) &&
217 		    (!d->d_rtb_hardlimit ||
218 		     (be64_to_cpu(d->d_rtbcount) <=
219 		      be64_to_cpu(d->d_rtb_hardlimit)))) {
220 			d->d_rtbtimer = 0;
221 		}
222 	}
223 }
224 
225 /*
226  * initialize a buffer full of dquots and log the whole thing
227  */
228 STATIC void
229 xfs_qm_init_dquot_blk(
230 	xfs_trans_t	*tp,
231 	xfs_mount_t	*mp,
232 	xfs_dqid_t	id,
233 	uint		type,
234 	xfs_buf_t	*bp)
235 {
236 	struct xfs_quotainfo	*q = mp->m_quotainfo;
237 	xfs_dqblk_t	*d;
238 	xfs_dqid_t	curid;
239 	int		i;
240 
241 	ASSERT(tp);
242 	ASSERT(xfs_buf_islocked(bp));
243 
244 	d = bp->b_addr;
245 
246 	/*
247 	 * ID of the first dquot in the block - id's are zero based.
248 	 */
249 	curid = id - (id % q->qi_dqperchunk);
250 	memset(d, 0, BBTOB(q->qi_dqchunklen));
251 	for (i = 0; i < q->qi_dqperchunk; i++, d++, curid++) {
252 		d->dd_diskdq.d_magic = cpu_to_be16(XFS_DQUOT_MAGIC);
253 		d->dd_diskdq.d_version = XFS_DQUOT_VERSION;
254 		d->dd_diskdq.d_id = cpu_to_be32(curid);
255 		d->dd_diskdq.d_flags = type;
256 		if (xfs_sb_version_hascrc(&mp->m_sb)) {
257 			uuid_copy(&d->dd_uuid, &mp->m_sb.sb_meta_uuid);
258 			xfs_update_cksum((char *)d, sizeof(struct xfs_dqblk),
259 					 XFS_DQUOT_CRC_OFF);
260 		}
261 	}
262 
263 	xfs_trans_dquot_buf(tp, bp,
264 			    (type & XFS_DQ_USER ? XFS_BLF_UDQUOT_BUF :
265 			    ((type & XFS_DQ_PROJ) ? XFS_BLF_PDQUOT_BUF :
266 			     XFS_BLF_GDQUOT_BUF)));
267 	xfs_trans_log_buf(tp, bp, 0, BBTOB(q->qi_dqchunklen) - 1);
268 }
269 
270 /*
271  * Initialize the dynamic speculative preallocation thresholds. The lo/hi
272  * watermarks correspond to the soft and hard limits by default. If a soft limit
273  * is not specified, we use 95% of the hard limit.
274  */
275 void
276 xfs_dquot_set_prealloc_limits(struct xfs_dquot *dqp)
277 {
278 	__uint64_t space;
279 
280 	dqp->q_prealloc_hi_wmark = be64_to_cpu(dqp->q_core.d_blk_hardlimit);
281 	dqp->q_prealloc_lo_wmark = be64_to_cpu(dqp->q_core.d_blk_softlimit);
282 	if (!dqp->q_prealloc_lo_wmark) {
283 		dqp->q_prealloc_lo_wmark = dqp->q_prealloc_hi_wmark;
284 		do_div(dqp->q_prealloc_lo_wmark, 100);
285 		dqp->q_prealloc_lo_wmark *= 95;
286 	}
287 
288 	space = dqp->q_prealloc_hi_wmark;
289 
290 	do_div(space, 100);
291 	dqp->q_low_space[XFS_QLOWSP_1_PCNT] = space;
292 	dqp->q_low_space[XFS_QLOWSP_3_PCNT] = space * 3;
293 	dqp->q_low_space[XFS_QLOWSP_5_PCNT] = space * 5;
294 }
295 
296 /*
297  * Allocate a block and fill it with dquots.
298  * This is called when the bmapi finds a hole.
299  */
300 STATIC int
301 xfs_qm_dqalloc(
302 	xfs_trans_t	**tpp,
303 	xfs_mount_t	*mp,
304 	xfs_dquot_t	*dqp,
305 	xfs_inode_t	*quotip,
306 	xfs_fileoff_t	offset_fsb,
307 	xfs_buf_t	**O_bpp)
308 {
309 	xfs_fsblock_t	firstblock;
310 	xfs_bmap_free_t flist;
311 	xfs_bmbt_irec_t map;
312 	int		nmaps, error;
313 	xfs_buf_t	*bp;
314 	xfs_trans_t	*tp = *tpp;
315 
316 	ASSERT(tp != NULL);
317 
318 	trace_xfs_dqalloc(dqp);
319 
320 	/*
321 	 * Initialize the bmap freelist prior to calling bmapi code.
322 	 */
323 	xfs_bmap_init(&flist, &firstblock);
324 	xfs_ilock(quotip, XFS_ILOCK_EXCL);
325 	/*
326 	 * Return if this type of quotas is turned off while we didn't
327 	 * have an inode lock
328 	 */
329 	if (!xfs_this_quota_on(dqp->q_mount, dqp->dq_flags)) {
330 		xfs_iunlock(quotip, XFS_ILOCK_EXCL);
331 		return -ESRCH;
332 	}
333 
334 	xfs_trans_ijoin(tp, quotip, XFS_ILOCK_EXCL);
335 	nmaps = 1;
336 	error = xfs_bmapi_write(tp, quotip, offset_fsb,
337 				XFS_DQUOT_CLUSTER_SIZE_FSB, XFS_BMAPI_METADATA,
338 				&firstblock, XFS_QM_DQALLOC_SPACE_RES(mp),
339 				&map, &nmaps, &flist);
340 	if (error)
341 		goto error0;
342 	ASSERT(map.br_blockcount == XFS_DQUOT_CLUSTER_SIZE_FSB);
343 	ASSERT(nmaps == 1);
344 	ASSERT((map.br_startblock != DELAYSTARTBLOCK) &&
345 	       (map.br_startblock != HOLESTARTBLOCK));
346 
347 	/*
348 	 * Keep track of the blkno to save a lookup later
349 	 */
350 	dqp->q_blkno = XFS_FSB_TO_DADDR(mp, map.br_startblock);
351 
352 	/* now we can just get the buffer (there's nothing to read yet) */
353 	bp = xfs_trans_get_buf(tp, mp->m_ddev_targp,
354 			       dqp->q_blkno,
355 			       mp->m_quotainfo->qi_dqchunklen,
356 			       0);
357 	if (!bp) {
358 		error = -ENOMEM;
359 		goto error1;
360 	}
361 	bp->b_ops = &xfs_dquot_buf_ops;
362 
363 	/*
364 	 * Make a chunk of dquots out of this buffer and log
365 	 * the entire thing.
366 	 */
367 	xfs_qm_init_dquot_blk(tp, mp, be32_to_cpu(dqp->q_core.d_id),
368 			      dqp->dq_flags & XFS_DQ_ALLTYPES, bp);
369 
370 	/*
371 	 * xfs_bmap_finish() may commit the current transaction and
372 	 * start a second transaction if the freelist is not empty.
373 	 *
374 	 * Since we still want to modify this buffer, we need to
375 	 * ensure that the buffer is not released on commit of
376 	 * the first transaction and ensure the buffer is added to the
377 	 * second transaction.
378 	 *
379 	 * If there is only one transaction then don't stop the buffer
380 	 * from being released when it commits later on.
381 	 */
382 
383 	xfs_trans_bhold(tp, bp);
384 
385 	error = xfs_bmap_finish(tpp, &flist, NULL);
386 	if (error)
387 		goto error1;
388 
389 	/* Transaction was committed? */
390 	if (*tpp != tp) {
391 		tp = *tpp;
392 		xfs_trans_bjoin(tp, bp);
393 	} else {
394 		xfs_trans_bhold_release(tp, bp);
395 	}
396 
397 	*O_bpp = bp;
398 	return 0;
399 
400 error1:
401 	xfs_bmap_cancel(&flist);
402 error0:
403 	xfs_iunlock(quotip, XFS_ILOCK_EXCL);
404 
405 	return error;
406 }
407 
408 STATIC int
409 xfs_qm_dqrepair(
410 	struct xfs_mount	*mp,
411 	struct xfs_trans	*tp,
412 	struct xfs_dquot	*dqp,
413 	xfs_dqid_t		firstid,
414 	struct xfs_buf		**bpp)
415 {
416 	int			error;
417 	struct xfs_disk_dquot	*ddq;
418 	struct xfs_dqblk	*d;
419 	int			i;
420 
421 	/*
422 	 * Read the buffer without verification so we get the corrupted
423 	 * buffer returned to us. make sure we verify it on write, though.
424 	 */
425 	error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp, dqp->q_blkno,
426 				   mp->m_quotainfo->qi_dqchunklen,
427 				   0, bpp, NULL);
428 
429 	if (error) {
430 		ASSERT(*bpp == NULL);
431 		return error;
432 	}
433 	(*bpp)->b_ops = &xfs_dquot_buf_ops;
434 
435 	ASSERT(xfs_buf_islocked(*bpp));
436 	d = (struct xfs_dqblk *)(*bpp)->b_addr;
437 
438 	/* Do the actual repair of dquots in this buffer */
439 	for (i = 0; i < mp->m_quotainfo->qi_dqperchunk; i++) {
440 		ddq = &d[i].dd_diskdq;
441 		error = xfs_dqcheck(mp, ddq, firstid + i,
442 				       dqp->dq_flags & XFS_DQ_ALLTYPES,
443 				       XFS_QMOPT_DQREPAIR, "xfs_qm_dqrepair");
444 		if (error) {
445 			/* repair failed, we're screwed */
446 			xfs_trans_brelse(tp, *bpp);
447 			return -EIO;
448 		}
449 	}
450 
451 	return 0;
452 }
453 
454 /*
455  * Maps a dquot to the buffer containing its on-disk version.
456  * This returns a ptr to the buffer containing the on-disk dquot
457  * in the bpp param, and a ptr to the on-disk dquot within that buffer
458  */
459 STATIC int
460 xfs_qm_dqtobp(
461 	xfs_trans_t		**tpp,
462 	xfs_dquot_t		*dqp,
463 	xfs_disk_dquot_t	**O_ddpp,
464 	xfs_buf_t		**O_bpp,
465 	uint			flags)
466 {
467 	struct xfs_bmbt_irec	map;
468 	int			nmaps = 1, error;
469 	struct xfs_buf		*bp;
470 	struct xfs_inode	*quotip;
471 	struct xfs_mount	*mp = dqp->q_mount;
472 	xfs_dqid_t		id = be32_to_cpu(dqp->q_core.d_id);
473 	struct xfs_trans	*tp = (tpp ? *tpp : NULL);
474 	uint			lock_mode;
475 
476 	quotip = xfs_quota_inode(dqp->q_mount, dqp->dq_flags);
477 	dqp->q_fileoffset = (xfs_fileoff_t)id / mp->m_quotainfo->qi_dqperchunk;
478 
479 	lock_mode = xfs_ilock_data_map_shared(quotip);
480 	if (!xfs_this_quota_on(dqp->q_mount, dqp->dq_flags)) {
481 		/*
482 		 * Return if this type of quotas is turned off while we
483 		 * didn't have the quota inode lock.
484 		 */
485 		xfs_iunlock(quotip, lock_mode);
486 		return -ESRCH;
487 	}
488 
489 	/*
490 	 * Find the block map; no allocations yet
491 	 */
492 	error = xfs_bmapi_read(quotip, dqp->q_fileoffset,
493 			       XFS_DQUOT_CLUSTER_SIZE_FSB, &map, &nmaps, 0);
494 
495 	xfs_iunlock(quotip, lock_mode);
496 	if (error)
497 		return error;
498 
499 	ASSERT(nmaps == 1);
500 	ASSERT(map.br_blockcount == 1);
501 
502 	/*
503 	 * Offset of dquot in the (fixed sized) dquot chunk.
504 	 */
505 	dqp->q_bufoffset = (id % mp->m_quotainfo->qi_dqperchunk) *
506 		sizeof(xfs_dqblk_t);
507 
508 	ASSERT(map.br_startblock != DELAYSTARTBLOCK);
509 	if (map.br_startblock == HOLESTARTBLOCK) {
510 		/*
511 		 * We don't allocate unless we're asked to
512 		 */
513 		if (!(flags & XFS_QMOPT_DQALLOC))
514 			return -ENOENT;
515 
516 		ASSERT(tp);
517 		error = xfs_qm_dqalloc(tpp, mp, dqp, quotip,
518 					dqp->q_fileoffset, &bp);
519 		if (error)
520 			return error;
521 		tp = *tpp;
522 	} else {
523 		trace_xfs_dqtobp_read(dqp);
524 
525 		/*
526 		 * store the blkno etc so that we don't have to do the
527 		 * mapping all the time
528 		 */
529 		dqp->q_blkno = XFS_FSB_TO_DADDR(mp, map.br_startblock);
530 
531 		error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp,
532 					   dqp->q_blkno,
533 					   mp->m_quotainfo->qi_dqchunklen,
534 					   0, &bp, &xfs_dquot_buf_ops);
535 
536 		if (error == -EFSCORRUPTED && (flags & XFS_QMOPT_DQREPAIR)) {
537 			xfs_dqid_t firstid = (xfs_dqid_t)map.br_startoff *
538 						mp->m_quotainfo->qi_dqperchunk;
539 			ASSERT(bp == NULL);
540 			error = xfs_qm_dqrepair(mp, tp, dqp, firstid, &bp);
541 		}
542 
543 		if (error) {
544 			ASSERT(bp == NULL);
545 			return error;
546 		}
547 	}
548 
549 	ASSERT(xfs_buf_islocked(bp));
550 	*O_bpp = bp;
551 	*O_ddpp = bp->b_addr + dqp->q_bufoffset;
552 
553 	return 0;
554 }
555 
556 
557 /*
558  * Read in the ondisk dquot using dqtobp() then copy it to an incore version,
559  * and release the buffer immediately.
560  *
561  * If XFS_QMOPT_DQALLOC is set, allocate a dquot on disk if it needed.
562  */
563 int
564 xfs_qm_dqread(
565 	struct xfs_mount	*mp,
566 	xfs_dqid_t		id,
567 	uint			type,
568 	uint			flags,
569 	struct xfs_dquot	**O_dqpp)
570 {
571 	struct xfs_dquot	*dqp;
572 	struct xfs_disk_dquot	*ddqp;
573 	struct xfs_buf		*bp;
574 	struct xfs_trans	*tp = NULL;
575 	int			error;
576 
577 	dqp = kmem_zone_zalloc(xfs_qm_dqzone, KM_SLEEP);
578 
579 	dqp->dq_flags = type;
580 	dqp->q_core.d_id = cpu_to_be32(id);
581 	dqp->q_mount = mp;
582 	INIT_LIST_HEAD(&dqp->q_lru);
583 	mutex_init(&dqp->q_qlock);
584 	init_waitqueue_head(&dqp->q_pinwait);
585 
586 	/*
587 	 * Because we want to use a counting completion, complete
588 	 * the flush completion once to allow a single access to
589 	 * the flush completion without blocking.
590 	 */
591 	init_completion(&dqp->q_flush);
592 	complete(&dqp->q_flush);
593 
594 	/*
595 	 * Make sure group quotas have a different lock class than user
596 	 * quotas.
597 	 */
598 	switch (type) {
599 	case XFS_DQ_USER:
600 		/* uses the default lock class */
601 		break;
602 	case XFS_DQ_GROUP:
603 		lockdep_set_class(&dqp->q_qlock, &xfs_dquot_group_class);
604 		break;
605 	case XFS_DQ_PROJ:
606 		lockdep_set_class(&dqp->q_qlock, &xfs_dquot_project_class);
607 		break;
608 	default:
609 		ASSERT(0);
610 		break;
611 	}
612 
613 	XFS_STATS_INC(mp, xs_qm_dquot);
614 
615 	trace_xfs_dqread(dqp);
616 
617 	if (flags & XFS_QMOPT_DQALLOC) {
618 		error = xfs_trans_alloc(mp, &M_RES(mp)->tr_qm_dqalloc,
619 				XFS_QM_DQALLOC_SPACE_RES(mp), 0, 0, &tp);
620 		if (error)
621 			goto error0;
622 	}
623 
624 	/*
625 	 * get a pointer to the on-disk dquot and the buffer containing it
626 	 * dqp already knows its own type (GROUP/USER).
627 	 */
628 	error = xfs_qm_dqtobp(&tp, dqp, &ddqp, &bp, flags);
629 	if (error) {
630 		/*
631 		 * This can happen if quotas got turned off (ESRCH),
632 		 * or if the dquot didn't exist on disk and we ask to
633 		 * allocate (ENOENT).
634 		 */
635 		trace_xfs_dqread_fail(dqp);
636 		goto error1;
637 	}
638 
639 	/* copy everything from disk dquot to the incore dquot */
640 	memcpy(&dqp->q_core, ddqp, sizeof(xfs_disk_dquot_t));
641 	xfs_qm_dquot_logitem_init(dqp);
642 
643 	/*
644 	 * Reservation counters are defined as reservation plus current usage
645 	 * to avoid having to add every time.
646 	 */
647 	dqp->q_res_bcount = be64_to_cpu(ddqp->d_bcount);
648 	dqp->q_res_icount = be64_to_cpu(ddqp->d_icount);
649 	dqp->q_res_rtbcount = be64_to_cpu(ddqp->d_rtbcount);
650 
651 	/* initialize the dquot speculative prealloc thresholds */
652 	xfs_dquot_set_prealloc_limits(dqp);
653 
654 	/* Mark the buf so that this will stay incore a little longer */
655 	xfs_buf_set_ref(bp, XFS_DQUOT_REF);
656 
657 	/*
658 	 * We got the buffer with a xfs_trans_read_buf() (in dqtobp())
659 	 * So we need to release with xfs_trans_brelse().
660 	 * The strategy here is identical to that of inodes; we lock
661 	 * the dquot in xfs_qm_dqget() before making it accessible to
662 	 * others. This is because dquots, like inodes, need a good level of
663 	 * concurrency, and we don't want to take locks on the entire buffers
664 	 * for dquot accesses.
665 	 * Note also that the dquot buffer may even be dirty at this point, if
666 	 * this particular dquot was repaired. We still aren't afraid to
667 	 * brelse it because we have the changes incore.
668 	 */
669 	ASSERT(xfs_buf_islocked(bp));
670 	xfs_trans_brelse(tp, bp);
671 
672 	if (tp) {
673 		error = xfs_trans_commit(tp);
674 		if (error)
675 			goto error0;
676 	}
677 
678 	*O_dqpp = dqp;
679 	return error;
680 
681 error1:
682 	if (tp)
683 		xfs_trans_cancel(tp);
684 error0:
685 	xfs_qm_dqdestroy(dqp);
686 	*O_dqpp = NULL;
687 	return error;
688 }
689 
690 /*
691  * Advance to the next id in the current chunk, or if at the
692  * end of the chunk, skip ahead to first id in next allocated chunk
693  * using the SEEK_DATA interface.
694  */
695 static int
696 xfs_dq_get_next_id(
697 	xfs_mount_t		*mp,
698 	uint			type,
699 	xfs_dqid_t		*id,
700 	loff_t			eof)
701 {
702 	struct xfs_inode	*quotip;
703 	xfs_fsblock_t		start;
704 	loff_t			offset;
705 	uint			lock;
706 	xfs_dqid_t		next_id;
707 	int			error = 0;
708 
709 	/* Simple advance */
710 	next_id = *id + 1;
711 
712 	/* If new ID is within the current chunk, advancing it sufficed */
713 	if (next_id % mp->m_quotainfo->qi_dqperchunk) {
714 		*id = next_id;
715 		return 0;
716 	}
717 
718 	/* Nope, next_id is now past the current chunk, so find the next one */
719 	start = (xfs_fsblock_t)next_id / mp->m_quotainfo->qi_dqperchunk;
720 
721 	quotip = xfs_quota_inode(mp, type);
722 	lock = xfs_ilock_data_map_shared(quotip);
723 
724 	offset = __xfs_seek_hole_data(VFS_I(quotip), XFS_FSB_TO_B(mp, start),
725 				      eof, SEEK_DATA);
726 	if (offset < 0)
727 		error = offset;
728 
729 	xfs_iunlock(quotip, lock);
730 
731 	/* -ENXIO is essentially "no more data" */
732 	if (error)
733 		return (error == -ENXIO ? -ENOENT: error);
734 
735 	/* Convert next data offset back to a quota id */
736 	*id = XFS_B_TO_FSB(mp, offset) * mp->m_quotainfo->qi_dqperchunk;
737 	return 0;
738 }
739 
740 /*
741  * Given the file system, inode OR id, and type (UDQUOT/GDQUOT), return a
742  * a locked dquot, doing an allocation (if requested) as needed.
743  * When both an inode and an id are given, the inode's id takes precedence.
744  * That is, if the id changes while we don't hold the ilock inside this
745  * function, the new dquot is returned, not necessarily the one requested
746  * in the id argument.
747  */
748 int
749 xfs_qm_dqget(
750 	xfs_mount_t	*mp,
751 	xfs_inode_t	*ip,	  /* locked inode (optional) */
752 	xfs_dqid_t	id,	  /* uid/projid/gid depending on type */
753 	uint		type,	  /* XFS_DQ_USER/XFS_DQ_PROJ/XFS_DQ_GROUP */
754 	uint		flags,	  /* DQALLOC, DQSUSER, DQREPAIR, DOWARN */
755 	xfs_dquot_t	**O_dqpp) /* OUT : locked incore dquot */
756 {
757 	struct xfs_quotainfo	*qi = mp->m_quotainfo;
758 	struct radix_tree_root *tree = xfs_dquot_tree(qi, type);
759 	struct xfs_dquot	*dqp;
760 	loff_t			eof = 0;
761 	int			error;
762 
763 	ASSERT(XFS_IS_QUOTA_RUNNING(mp));
764 	if ((! XFS_IS_UQUOTA_ON(mp) && type == XFS_DQ_USER) ||
765 	    (! XFS_IS_PQUOTA_ON(mp) && type == XFS_DQ_PROJ) ||
766 	    (! XFS_IS_GQUOTA_ON(mp) && type == XFS_DQ_GROUP)) {
767 		return -ESRCH;
768 	}
769 
770 #ifdef DEBUG
771 	if (xfs_do_dqerror) {
772 		if ((xfs_dqerror_target == mp->m_ddev_targp) &&
773 		    (xfs_dqreq_num++ % xfs_dqerror_mod) == 0) {
774 			xfs_debug(mp, "Returning error in dqget");
775 			return -EIO;
776 		}
777 	}
778 
779 	ASSERT(type == XFS_DQ_USER ||
780 	       type == XFS_DQ_PROJ ||
781 	       type == XFS_DQ_GROUP);
782 	if (ip) {
783 		ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
784 		ASSERT(xfs_inode_dquot(ip, type) == NULL);
785 	}
786 #endif
787 
788 	/* Get the end of the quota file if we need it */
789 	if (flags & XFS_QMOPT_DQNEXT) {
790 		struct xfs_inode	*quotip;
791 		xfs_fileoff_t		last;
792 		uint			lock_mode;
793 
794 		quotip = xfs_quota_inode(mp, type);
795 		lock_mode = xfs_ilock_data_map_shared(quotip);
796 		error = xfs_bmap_last_offset(quotip, &last, XFS_DATA_FORK);
797 		xfs_iunlock(quotip, lock_mode);
798 		if (error)
799 			return error;
800 		eof = XFS_FSB_TO_B(mp, last);
801 	}
802 
803 restart:
804 	mutex_lock(&qi->qi_tree_lock);
805 	dqp = radix_tree_lookup(tree, id);
806 	if (dqp) {
807 		xfs_dqlock(dqp);
808 		if (dqp->dq_flags & XFS_DQ_FREEING) {
809 			xfs_dqunlock(dqp);
810 			mutex_unlock(&qi->qi_tree_lock);
811 			trace_xfs_dqget_freeing(dqp);
812 			delay(1);
813 			goto restart;
814 		}
815 
816 		/* uninit / unused quota found in radix tree, keep looking  */
817 		if (flags & XFS_QMOPT_DQNEXT) {
818 			if (XFS_IS_DQUOT_UNINITIALIZED(dqp)) {
819 				xfs_dqunlock(dqp);
820 				mutex_unlock(&qi->qi_tree_lock);
821 				error = xfs_dq_get_next_id(mp, type, &id, eof);
822 				if (error)
823 					return error;
824 				goto restart;
825 			}
826 		}
827 
828 		dqp->q_nrefs++;
829 		mutex_unlock(&qi->qi_tree_lock);
830 
831 		trace_xfs_dqget_hit(dqp);
832 		XFS_STATS_INC(mp, xs_qm_dqcachehits);
833 		*O_dqpp = dqp;
834 		return 0;
835 	}
836 	mutex_unlock(&qi->qi_tree_lock);
837 	XFS_STATS_INC(mp, xs_qm_dqcachemisses);
838 
839 	/*
840 	 * Dquot cache miss. We don't want to keep the inode lock across
841 	 * a (potential) disk read. Also we don't want to deal with the lock
842 	 * ordering between quotainode and this inode. OTOH, dropping the inode
843 	 * lock here means dealing with a chown that can happen before
844 	 * we re-acquire the lock.
845 	 */
846 	if (ip)
847 		xfs_iunlock(ip, XFS_ILOCK_EXCL);
848 
849 	error = xfs_qm_dqread(mp, id, type, flags, &dqp);
850 
851 	if (ip)
852 		xfs_ilock(ip, XFS_ILOCK_EXCL);
853 
854 	/* If we are asked to find next active id, keep looking */
855 	if (error == -ENOENT && (flags & XFS_QMOPT_DQNEXT)) {
856 		error = xfs_dq_get_next_id(mp, type, &id, eof);
857 		if (!error)
858 			goto restart;
859 	}
860 
861 	if (error)
862 		return error;
863 
864 	if (ip) {
865 		/*
866 		 * A dquot could be attached to this inode by now, since
867 		 * we had dropped the ilock.
868 		 */
869 		if (xfs_this_quota_on(mp, type)) {
870 			struct xfs_dquot	*dqp1;
871 
872 			dqp1 = xfs_inode_dquot(ip, type);
873 			if (dqp1) {
874 				xfs_qm_dqdestroy(dqp);
875 				dqp = dqp1;
876 				xfs_dqlock(dqp);
877 				goto dqret;
878 			}
879 		} else {
880 			/* inode stays locked on return */
881 			xfs_qm_dqdestroy(dqp);
882 			return -ESRCH;
883 		}
884 	}
885 
886 	mutex_lock(&qi->qi_tree_lock);
887 	error = radix_tree_insert(tree, id, dqp);
888 	if (unlikely(error)) {
889 		WARN_ON(error != -EEXIST);
890 
891 		/*
892 		 * Duplicate found. Just throw away the new dquot and start
893 		 * over.
894 		 */
895 		mutex_unlock(&qi->qi_tree_lock);
896 		trace_xfs_dqget_dup(dqp);
897 		xfs_qm_dqdestroy(dqp);
898 		XFS_STATS_INC(mp, xs_qm_dquot_dups);
899 		goto restart;
900 	}
901 
902 	/*
903 	 * We return a locked dquot to the caller, with a reference taken
904 	 */
905 	xfs_dqlock(dqp);
906 	dqp->q_nrefs = 1;
907 
908 	qi->qi_dquots++;
909 	mutex_unlock(&qi->qi_tree_lock);
910 
911 	/* If we are asked to find next active id, keep looking */
912 	if (flags & XFS_QMOPT_DQNEXT) {
913 		if (XFS_IS_DQUOT_UNINITIALIZED(dqp)) {
914 			xfs_qm_dqput(dqp);
915 			error = xfs_dq_get_next_id(mp, type, &id, eof);
916 			if (error)
917 				return error;
918 			goto restart;
919 		}
920 	}
921 
922  dqret:
923 	ASSERT((ip == NULL) || xfs_isilocked(ip, XFS_ILOCK_EXCL));
924 	trace_xfs_dqget_miss(dqp);
925 	*O_dqpp = dqp;
926 	return 0;
927 }
928 
929 /*
930  * Release a reference to the dquot (decrement ref-count) and unlock it.
931  *
932  * If there is a group quota attached to this dquot, carefully release that
933  * too without tripping over deadlocks'n'stuff.
934  */
935 void
936 xfs_qm_dqput(
937 	struct xfs_dquot	*dqp)
938 {
939 	ASSERT(dqp->q_nrefs > 0);
940 	ASSERT(XFS_DQ_IS_LOCKED(dqp));
941 
942 	trace_xfs_dqput(dqp);
943 
944 	if (--dqp->q_nrefs == 0) {
945 		struct xfs_quotainfo	*qi = dqp->q_mount->m_quotainfo;
946 		trace_xfs_dqput_free(dqp);
947 
948 		if (list_lru_add(&qi->qi_lru, &dqp->q_lru))
949 			XFS_STATS_INC(dqp->q_mount, xs_qm_dquot_unused);
950 	}
951 	xfs_dqunlock(dqp);
952 }
953 
954 /*
955  * Release a dquot. Flush it if dirty, then dqput() it.
956  * dquot must not be locked.
957  */
958 void
959 xfs_qm_dqrele(
960 	xfs_dquot_t	*dqp)
961 {
962 	if (!dqp)
963 		return;
964 
965 	trace_xfs_dqrele(dqp);
966 
967 	xfs_dqlock(dqp);
968 	/*
969 	 * We don't care to flush it if the dquot is dirty here.
970 	 * That will create stutters that we want to avoid.
971 	 * Instead we do a delayed write when we try to reclaim
972 	 * a dirty dquot. Also xfs_sync will take part of the burden...
973 	 */
974 	xfs_qm_dqput(dqp);
975 }
976 
977 /*
978  * This is the dquot flushing I/O completion routine.  It is called
979  * from interrupt level when the buffer containing the dquot is
980  * flushed to disk.  It is responsible for removing the dquot logitem
981  * from the AIL if it has not been re-logged, and unlocking the dquot's
982  * flush lock. This behavior is very similar to that of inodes..
983  */
984 STATIC void
985 xfs_qm_dqflush_done(
986 	struct xfs_buf		*bp,
987 	struct xfs_log_item	*lip)
988 {
989 	xfs_dq_logitem_t	*qip = (struct xfs_dq_logitem *)lip;
990 	xfs_dquot_t		*dqp = qip->qli_dquot;
991 	struct xfs_ail		*ailp = lip->li_ailp;
992 
993 	/*
994 	 * We only want to pull the item from the AIL if its
995 	 * location in the log has not changed since we started the flush.
996 	 * Thus, we only bother if the dquot's lsn has
997 	 * not changed. First we check the lsn outside the lock
998 	 * since it's cheaper, and then we recheck while
999 	 * holding the lock before removing the dquot from the AIL.
1000 	 */
1001 	if ((lip->li_flags & XFS_LI_IN_AIL) &&
1002 	    lip->li_lsn == qip->qli_flush_lsn) {
1003 
1004 		/* xfs_trans_ail_delete() drops the AIL lock. */
1005 		spin_lock(&ailp->xa_lock);
1006 		if (lip->li_lsn == qip->qli_flush_lsn)
1007 			xfs_trans_ail_delete(ailp, lip, SHUTDOWN_CORRUPT_INCORE);
1008 		else
1009 			spin_unlock(&ailp->xa_lock);
1010 	}
1011 
1012 	/*
1013 	 * Release the dq's flush lock since we're done with it.
1014 	 */
1015 	xfs_dqfunlock(dqp);
1016 }
1017 
1018 /*
1019  * Write a modified dquot to disk.
1020  * The dquot must be locked and the flush lock too taken by caller.
1021  * The flush lock will not be unlocked until the dquot reaches the disk,
1022  * but the dquot is free to be unlocked and modified by the caller
1023  * in the interim. Dquot is still locked on return. This behavior is
1024  * identical to that of inodes.
1025  */
1026 int
1027 xfs_qm_dqflush(
1028 	struct xfs_dquot	*dqp,
1029 	struct xfs_buf		**bpp)
1030 {
1031 	struct xfs_mount	*mp = dqp->q_mount;
1032 	struct xfs_buf		*bp;
1033 	struct xfs_disk_dquot	*ddqp;
1034 	int			error;
1035 
1036 	ASSERT(XFS_DQ_IS_LOCKED(dqp));
1037 	ASSERT(!completion_done(&dqp->q_flush));
1038 
1039 	trace_xfs_dqflush(dqp);
1040 
1041 	*bpp = NULL;
1042 
1043 	xfs_qm_dqunpin_wait(dqp);
1044 
1045 	/*
1046 	 * This may have been unpinned because the filesystem is shutting
1047 	 * down forcibly. If that's the case we must not write this dquot
1048 	 * to disk, because the log record didn't make it to disk.
1049 	 *
1050 	 * We also have to remove the log item from the AIL in this case,
1051 	 * as we wait for an emptry AIL as part of the unmount process.
1052 	 */
1053 	if (XFS_FORCED_SHUTDOWN(mp)) {
1054 		struct xfs_log_item	*lip = &dqp->q_logitem.qli_item;
1055 		dqp->dq_flags &= ~XFS_DQ_DIRTY;
1056 
1057 		xfs_trans_ail_remove(lip, SHUTDOWN_CORRUPT_INCORE);
1058 
1059 		error = -EIO;
1060 		goto out_unlock;
1061 	}
1062 
1063 	/*
1064 	 * Get the buffer containing the on-disk dquot
1065 	 */
1066 	error = xfs_trans_read_buf(mp, NULL, mp->m_ddev_targp, dqp->q_blkno,
1067 				   mp->m_quotainfo->qi_dqchunklen, 0, &bp,
1068 				   &xfs_dquot_buf_ops);
1069 	if (error)
1070 		goto out_unlock;
1071 
1072 	/*
1073 	 * Calculate the location of the dquot inside the buffer.
1074 	 */
1075 	ddqp = bp->b_addr + dqp->q_bufoffset;
1076 
1077 	/*
1078 	 * A simple sanity check in case we got a corrupted dquot..
1079 	 */
1080 	error = xfs_dqcheck(mp, &dqp->q_core, be32_to_cpu(ddqp->d_id), 0,
1081 			   XFS_QMOPT_DOWARN, "dqflush (incore copy)");
1082 	if (error) {
1083 		xfs_buf_relse(bp);
1084 		xfs_dqfunlock(dqp);
1085 		xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
1086 		return -EIO;
1087 	}
1088 
1089 	/* This is the only portion of data that needs to persist */
1090 	memcpy(ddqp, &dqp->q_core, sizeof(xfs_disk_dquot_t));
1091 
1092 	/*
1093 	 * Clear the dirty field and remember the flush lsn for later use.
1094 	 */
1095 	dqp->dq_flags &= ~XFS_DQ_DIRTY;
1096 
1097 	xfs_trans_ail_copy_lsn(mp->m_ail, &dqp->q_logitem.qli_flush_lsn,
1098 					&dqp->q_logitem.qli_item.li_lsn);
1099 
1100 	/*
1101 	 * copy the lsn into the on-disk dquot now while we have the in memory
1102 	 * dquot here. This can't be done later in the write verifier as we
1103 	 * can't get access to the log item at that point in time.
1104 	 *
1105 	 * We also calculate the CRC here so that the on-disk dquot in the
1106 	 * buffer always has a valid CRC. This ensures there is no possibility
1107 	 * of a dquot without an up-to-date CRC getting to disk.
1108 	 */
1109 	if (xfs_sb_version_hascrc(&mp->m_sb)) {
1110 		struct xfs_dqblk *dqb = (struct xfs_dqblk *)ddqp;
1111 
1112 		dqb->dd_lsn = cpu_to_be64(dqp->q_logitem.qli_item.li_lsn);
1113 		xfs_update_cksum((char *)dqb, sizeof(struct xfs_dqblk),
1114 				 XFS_DQUOT_CRC_OFF);
1115 	}
1116 
1117 	/*
1118 	 * Attach an iodone routine so that we can remove this dquot from the
1119 	 * AIL and release the flush lock once the dquot is synced to disk.
1120 	 */
1121 	xfs_buf_attach_iodone(bp, xfs_qm_dqflush_done,
1122 				  &dqp->q_logitem.qli_item);
1123 
1124 	/*
1125 	 * If the buffer is pinned then push on the log so we won't
1126 	 * get stuck waiting in the write for too long.
1127 	 */
1128 	if (xfs_buf_ispinned(bp)) {
1129 		trace_xfs_dqflush_force(dqp);
1130 		xfs_log_force(mp, 0);
1131 	}
1132 
1133 	trace_xfs_dqflush_done(dqp);
1134 	*bpp = bp;
1135 	return 0;
1136 
1137 out_unlock:
1138 	xfs_dqfunlock(dqp);
1139 	return -EIO;
1140 }
1141 
1142 /*
1143  * Lock two xfs_dquot structures.
1144  *
1145  * To avoid deadlocks we always lock the quota structure with
1146  * the lowerd id first.
1147  */
1148 void
1149 xfs_dqlock2(
1150 	xfs_dquot_t	*d1,
1151 	xfs_dquot_t	*d2)
1152 {
1153 	if (d1 && d2) {
1154 		ASSERT(d1 != d2);
1155 		if (be32_to_cpu(d1->q_core.d_id) >
1156 		    be32_to_cpu(d2->q_core.d_id)) {
1157 			mutex_lock(&d2->q_qlock);
1158 			mutex_lock_nested(&d1->q_qlock, XFS_QLOCK_NESTED);
1159 		} else {
1160 			mutex_lock(&d1->q_qlock);
1161 			mutex_lock_nested(&d2->q_qlock, XFS_QLOCK_NESTED);
1162 		}
1163 	} else if (d1) {
1164 		mutex_lock(&d1->q_qlock);
1165 	} else if (d2) {
1166 		mutex_lock(&d2->q_qlock);
1167 	}
1168 }
1169 
1170 int __init
1171 xfs_qm_init(void)
1172 {
1173 	xfs_qm_dqzone =
1174 		kmem_zone_init(sizeof(struct xfs_dquot), "xfs_dquot");
1175 	if (!xfs_qm_dqzone)
1176 		goto out;
1177 
1178 	xfs_qm_dqtrxzone =
1179 		kmem_zone_init(sizeof(struct xfs_dquot_acct), "xfs_dqtrx");
1180 	if (!xfs_qm_dqtrxzone)
1181 		goto out_free_dqzone;
1182 
1183 	return 0;
1184 
1185 out_free_dqzone:
1186 	kmem_zone_destroy(xfs_qm_dqzone);
1187 out:
1188 	return -ENOMEM;
1189 }
1190 
1191 void
1192 xfs_qm_exit(void)
1193 {
1194 	kmem_zone_destroy(xfs_qm_dqtrxzone);
1195 	kmem_zone_destroy(xfs_qm_dqzone);
1196 }
1197