xref: /openbmc/linux/fs/xfs/xfs_dquot.c (revision 5927145e)
1 /*
2  * Copyright (c) 2000-2003 Silicon Graphics, Inc.
3  * All Rights Reserved.
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU General Public License as
7  * published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it would be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write the Free Software Foundation,
16  * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
17  */
18 #include "xfs.h"
19 #include "xfs_fs.h"
20 #include "xfs_format.h"
21 #include "xfs_log_format.h"
22 #include "xfs_shared.h"
23 #include "xfs_trans_resv.h"
24 #include "xfs_bit.h"
25 #include "xfs_mount.h"
26 #include "xfs_defer.h"
27 #include "xfs_inode.h"
28 #include "xfs_bmap.h"
29 #include "xfs_bmap_util.h"
30 #include "xfs_alloc.h"
31 #include "xfs_quota.h"
32 #include "xfs_error.h"
33 #include "xfs_trans.h"
34 #include "xfs_buf_item.h"
35 #include "xfs_trans_space.h"
36 #include "xfs_trans_priv.h"
37 #include "xfs_qm.h"
38 #include "xfs_cksum.h"
39 #include "xfs_trace.h"
40 #include "xfs_log.h"
41 #include "xfs_bmap_btree.h"
42 
43 /*
44  * Lock order:
45  *
46  * ip->i_lock
47  *   qi->qi_tree_lock
48  *     dquot->q_qlock (xfs_dqlock() and friends)
49  *       dquot->q_flush (xfs_dqflock() and friends)
50  *       qi->qi_lru_lock
51  *
52  * If two dquots need to be locked the order is user before group/project,
53  * otherwise by the lowest id first, see xfs_dqlock2.
54  */
55 
56 struct kmem_zone		*xfs_qm_dqtrxzone;
57 static struct kmem_zone		*xfs_qm_dqzone;
58 
59 static struct lock_class_key xfs_dquot_group_class;
60 static struct lock_class_key xfs_dquot_project_class;
61 
62 /*
63  * This is called to free all the memory associated with a dquot
64  */
65 void
66 xfs_qm_dqdestroy(
67 	xfs_dquot_t	*dqp)
68 {
69 	ASSERT(list_empty(&dqp->q_lru));
70 
71 	kmem_free(dqp->q_logitem.qli_item.li_lv_shadow);
72 	mutex_destroy(&dqp->q_qlock);
73 
74 	XFS_STATS_DEC(dqp->q_mount, xs_qm_dquot);
75 	kmem_zone_free(xfs_qm_dqzone, dqp);
76 }
77 
78 /*
79  * If default limits are in force, push them into the dquot now.
80  * We overwrite the dquot limits only if they are zero and this
81  * is not the root dquot.
82  */
83 void
84 xfs_qm_adjust_dqlimits(
85 	struct xfs_mount	*mp,
86 	struct xfs_dquot	*dq)
87 {
88 	struct xfs_quotainfo	*q = mp->m_quotainfo;
89 	struct xfs_disk_dquot	*d = &dq->q_core;
90 	struct xfs_def_quota	*defq;
91 	int			prealloc = 0;
92 
93 	ASSERT(d->d_id);
94 	defq = xfs_get_defquota(dq, q);
95 
96 	if (defq->bsoftlimit && !d->d_blk_softlimit) {
97 		d->d_blk_softlimit = cpu_to_be64(defq->bsoftlimit);
98 		prealloc = 1;
99 	}
100 	if (defq->bhardlimit && !d->d_blk_hardlimit) {
101 		d->d_blk_hardlimit = cpu_to_be64(defq->bhardlimit);
102 		prealloc = 1;
103 	}
104 	if (defq->isoftlimit && !d->d_ino_softlimit)
105 		d->d_ino_softlimit = cpu_to_be64(defq->isoftlimit);
106 	if (defq->ihardlimit && !d->d_ino_hardlimit)
107 		d->d_ino_hardlimit = cpu_to_be64(defq->ihardlimit);
108 	if (defq->rtbsoftlimit && !d->d_rtb_softlimit)
109 		d->d_rtb_softlimit = cpu_to_be64(defq->rtbsoftlimit);
110 	if (defq->rtbhardlimit && !d->d_rtb_hardlimit)
111 		d->d_rtb_hardlimit = cpu_to_be64(defq->rtbhardlimit);
112 
113 	if (prealloc)
114 		xfs_dquot_set_prealloc_limits(dq);
115 }
116 
117 /*
118  * Check the limits and timers of a dquot and start or reset timers
119  * if necessary.
120  * This gets called even when quota enforcement is OFF, which makes our
121  * life a little less complicated. (We just don't reject any quota
122  * reservations in that case, when enforcement is off).
123  * We also return 0 as the values of the timers in Q_GETQUOTA calls, when
124  * enforcement's off.
125  * In contrast, warnings are a little different in that they don't
126  * 'automatically' get started when limits get exceeded.  They do
127  * get reset to zero, however, when we find the count to be under
128  * the soft limit (they are only ever set non-zero via userspace).
129  */
130 void
131 xfs_qm_adjust_dqtimers(
132 	xfs_mount_t		*mp,
133 	xfs_disk_dquot_t	*d)
134 {
135 	ASSERT(d->d_id);
136 
137 #ifdef DEBUG
138 	if (d->d_blk_hardlimit)
139 		ASSERT(be64_to_cpu(d->d_blk_softlimit) <=
140 		       be64_to_cpu(d->d_blk_hardlimit));
141 	if (d->d_ino_hardlimit)
142 		ASSERT(be64_to_cpu(d->d_ino_softlimit) <=
143 		       be64_to_cpu(d->d_ino_hardlimit));
144 	if (d->d_rtb_hardlimit)
145 		ASSERT(be64_to_cpu(d->d_rtb_softlimit) <=
146 		       be64_to_cpu(d->d_rtb_hardlimit));
147 #endif
148 
149 	if (!d->d_btimer) {
150 		if ((d->d_blk_softlimit &&
151 		     (be64_to_cpu(d->d_bcount) >
152 		      be64_to_cpu(d->d_blk_softlimit))) ||
153 		    (d->d_blk_hardlimit &&
154 		     (be64_to_cpu(d->d_bcount) >
155 		      be64_to_cpu(d->d_blk_hardlimit)))) {
156 			d->d_btimer = cpu_to_be32(get_seconds() +
157 					mp->m_quotainfo->qi_btimelimit);
158 		} else {
159 			d->d_bwarns = 0;
160 		}
161 	} else {
162 		if ((!d->d_blk_softlimit ||
163 		     (be64_to_cpu(d->d_bcount) <=
164 		      be64_to_cpu(d->d_blk_softlimit))) &&
165 		    (!d->d_blk_hardlimit ||
166 		    (be64_to_cpu(d->d_bcount) <=
167 		     be64_to_cpu(d->d_blk_hardlimit)))) {
168 			d->d_btimer = 0;
169 		}
170 	}
171 
172 	if (!d->d_itimer) {
173 		if ((d->d_ino_softlimit &&
174 		     (be64_to_cpu(d->d_icount) >
175 		      be64_to_cpu(d->d_ino_softlimit))) ||
176 		    (d->d_ino_hardlimit &&
177 		     (be64_to_cpu(d->d_icount) >
178 		      be64_to_cpu(d->d_ino_hardlimit)))) {
179 			d->d_itimer = cpu_to_be32(get_seconds() +
180 					mp->m_quotainfo->qi_itimelimit);
181 		} else {
182 			d->d_iwarns = 0;
183 		}
184 	} else {
185 		if ((!d->d_ino_softlimit ||
186 		     (be64_to_cpu(d->d_icount) <=
187 		      be64_to_cpu(d->d_ino_softlimit)))  &&
188 		    (!d->d_ino_hardlimit ||
189 		     (be64_to_cpu(d->d_icount) <=
190 		      be64_to_cpu(d->d_ino_hardlimit)))) {
191 			d->d_itimer = 0;
192 		}
193 	}
194 
195 	if (!d->d_rtbtimer) {
196 		if ((d->d_rtb_softlimit &&
197 		     (be64_to_cpu(d->d_rtbcount) >
198 		      be64_to_cpu(d->d_rtb_softlimit))) ||
199 		    (d->d_rtb_hardlimit &&
200 		     (be64_to_cpu(d->d_rtbcount) >
201 		      be64_to_cpu(d->d_rtb_hardlimit)))) {
202 			d->d_rtbtimer = cpu_to_be32(get_seconds() +
203 					mp->m_quotainfo->qi_rtbtimelimit);
204 		} else {
205 			d->d_rtbwarns = 0;
206 		}
207 	} else {
208 		if ((!d->d_rtb_softlimit ||
209 		     (be64_to_cpu(d->d_rtbcount) <=
210 		      be64_to_cpu(d->d_rtb_softlimit))) &&
211 		    (!d->d_rtb_hardlimit ||
212 		     (be64_to_cpu(d->d_rtbcount) <=
213 		      be64_to_cpu(d->d_rtb_hardlimit)))) {
214 			d->d_rtbtimer = 0;
215 		}
216 	}
217 }
218 
219 /*
220  * initialize a buffer full of dquots and log the whole thing
221  */
222 STATIC void
223 xfs_qm_init_dquot_blk(
224 	xfs_trans_t	*tp,
225 	xfs_mount_t	*mp,
226 	xfs_dqid_t	id,
227 	uint		type,
228 	xfs_buf_t	*bp)
229 {
230 	struct xfs_quotainfo	*q = mp->m_quotainfo;
231 	xfs_dqblk_t	*d;
232 	xfs_dqid_t	curid;
233 	int		i;
234 
235 	ASSERT(tp);
236 	ASSERT(xfs_buf_islocked(bp));
237 
238 	d = bp->b_addr;
239 
240 	/*
241 	 * ID of the first dquot in the block - id's are zero based.
242 	 */
243 	curid = id - (id % q->qi_dqperchunk);
244 	memset(d, 0, BBTOB(q->qi_dqchunklen));
245 	for (i = 0; i < q->qi_dqperchunk; i++, d++, curid++) {
246 		d->dd_diskdq.d_magic = cpu_to_be16(XFS_DQUOT_MAGIC);
247 		d->dd_diskdq.d_version = XFS_DQUOT_VERSION;
248 		d->dd_diskdq.d_id = cpu_to_be32(curid);
249 		d->dd_diskdq.d_flags = type;
250 		if (xfs_sb_version_hascrc(&mp->m_sb)) {
251 			uuid_copy(&d->dd_uuid, &mp->m_sb.sb_meta_uuid);
252 			xfs_update_cksum((char *)d, sizeof(struct xfs_dqblk),
253 					 XFS_DQUOT_CRC_OFF);
254 		}
255 	}
256 
257 	xfs_trans_dquot_buf(tp, bp,
258 			    (type & XFS_DQ_USER ? XFS_BLF_UDQUOT_BUF :
259 			    ((type & XFS_DQ_PROJ) ? XFS_BLF_PDQUOT_BUF :
260 			     XFS_BLF_GDQUOT_BUF)));
261 	xfs_trans_log_buf(tp, bp, 0, BBTOB(q->qi_dqchunklen) - 1);
262 }
263 
264 /*
265  * Initialize the dynamic speculative preallocation thresholds. The lo/hi
266  * watermarks correspond to the soft and hard limits by default. If a soft limit
267  * is not specified, we use 95% of the hard limit.
268  */
269 void
270 xfs_dquot_set_prealloc_limits(struct xfs_dquot *dqp)
271 {
272 	uint64_t space;
273 
274 	dqp->q_prealloc_hi_wmark = be64_to_cpu(dqp->q_core.d_blk_hardlimit);
275 	dqp->q_prealloc_lo_wmark = be64_to_cpu(dqp->q_core.d_blk_softlimit);
276 	if (!dqp->q_prealloc_lo_wmark) {
277 		dqp->q_prealloc_lo_wmark = dqp->q_prealloc_hi_wmark;
278 		do_div(dqp->q_prealloc_lo_wmark, 100);
279 		dqp->q_prealloc_lo_wmark *= 95;
280 	}
281 
282 	space = dqp->q_prealloc_hi_wmark;
283 
284 	do_div(space, 100);
285 	dqp->q_low_space[XFS_QLOWSP_1_PCNT] = space;
286 	dqp->q_low_space[XFS_QLOWSP_3_PCNT] = space * 3;
287 	dqp->q_low_space[XFS_QLOWSP_5_PCNT] = space * 5;
288 }
289 
290 /*
291  * Allocate a block and fill it with dquots.
292  * This is called when the bmapi finds a hole.
293  */
294 STATIC int
295 xfs_qm_dqalloc(
296 	xfs_trans_t	**tpp,
297 	xfs_mount_t	*mp,
298 	xfs_dquot_t	*dqp,
299 	xfs_inode_t	*quotip,
300 	xfs_fileoff_t	offset_fsb,
301 	xfs_buf_t	**O_bpp)
302 {
303 	xfs_fsblock_t	firstblock;
304 	struct xfs_defer_ops dfops;
305 	xfs_bmbt_irec_t map;
306 	int		nmaps, error;
307 	xfs_buf_t	*bp;
308 	xfs_trans_t	*tp = *tpp;
309 
310 	ASSERT(tp != NULL);
311 
312 	trace_xfs_dqalloc(dqp);
313 
314 	/*
315 	 * Initialize the bmap freelist prior to calling bmapi code.
316 	 */
317 	xfs_defer_init(&dfops, &firstblock);
318 	xfs_ilock(quotip, XFS_ILOCK_EXCL);
319 	/*
320 	 * Return if this type of quotas is turned off while we didn't
321 	 * have an inode lock
322 	 */
323 	if (!xfs_this_quota_on(dqp->q_mount, dqp->dq_flags)) {
324 		xfs_iunlock(quotip, XFS_ILOCK_EXCL);
325 		return -ESRCH;
326 	}
327 
328 	xfs_trans_ijoin(tp, quotip, XFS_ILOCK_EXCL);
329 	nmaps = 1;
330 	error = xfs_bmapi_write(tp, quotip, offset_fsb,
331 				XFS_DQUOT_CLUSTER_SIZE_FSB, XFS_BMAPI_METADATA,
332 				&firstblock, XFS_QM_DQALLOC_SPACE_RES(mp),
333 				&map, &nmaps, &dfops);
334 	if (error)
335 		goto error0;
336 	ASSERT(map.br_blockcount == XFS_DQUOT_CLUSTER_SIZE_FSB);
337 	ASSERT(nmaps == 1);
338 	ASSERT((map.br_startblock != DELAYSTARTBLOCK) &&
339 	       (map.br_startblock != HOLESTARTBLOCK));
340 
341 	/*
342 	 * Keep track of the blkno to save a lookup later
343 	 */
344 	dqp->q_blkno = XFS_FSB_TO_DADDR(mp, map.br_startblock);
345 
346 	/* now we can just get the buffer (there's nothing to read yet) */
347 	bp = xfs_trans_get_buf(tp, mp->m_ddev_targp,
348 			       dqp->q_blkno,
349 			       mp->m_quotainfo->qi_dqchunklen,
350 			       0);
351 	if (!bp) {
352 		error = -ENOMEM;
353 		goto error1;
354 	}
355 	bp->b_ops = &xfs_dquot_buf_ops;
356 
357 	/*
358 	 * Make a chunk of dquots out of this buffer and log
359 	 * the entire thing.
360 	 */
361 	xfs_qm_init_dquot_blk(tp, mp, be32_to_cpu(dqp->q_core.d_id),
362 			      dqp->dq_flags & XFS_DQ_ALLTYPES, bp);
363 
364 	/*
365 	 * xfs_defer_finish() may commit the current transaction and
366 	 * start a second transaction if the freelist is not empty.
367 	 *
368 	 * Since we still want to modify this buffer, we need to
369 	 * ensure that the buffer is not released on commit of
370 	 * the first transaction and ensure the buffer is added to the
371 	 * second transaction.
372 	 *
373 	 * If there is only one transaction then don't stop the buffer
374 	 * from being released when it commits later on.
375 	 */
376 
377 	xfs_trans_bhold(tp, bp);
378 
379 	error = xfs_defer_finish(tpp, &dfops);
380 	if (error)
381 		goto error1;
382 
383 	/* Transaction was committed? */
384 	if (*tpp != tp) {
385 		tp = *tpp;
386 		xfs_trans_bjoin(tp, bp);
387 	} else {
388 		xfs_trans_bhold_release(tp, bp);
389 	}
390 
391 	*O_bpp = bp;
392 	return 0;
393 
394 error1:
395 	xfs_defer_cancel(&dfops);
396 error0:
397 	xfs_iunlock(quotip, XFS_ILOCK_EXCL);
398 
399 	return error;
400 }
401 
402 /*
403  * Maps a dquot to the buffer containing its on-disk version.
404  * This returns a ptr to the buffer containing the on-disk dquot
405  * in the bpp param, and a ptr to the on-disk dquot within that buffer
406  */
407 STATIC int
408 xfs_qm_dqtobp(
409 	xfs_trans_t		**tpp,
410 	xfs_dquot_t		*dqp,
411 	xfs_disk_dquot_t	**O_ddpp,
412 	xfs_buf_t		**O_bpp,
413 	uint			flags)
414 {
415 	struct xfs_bmbt_irec	map;
416 	int			nmaps = 1, error;
417 	struct xfs_buf		*bp;
418 	struct xfs_inode	*quotip;
419 	struct xfs_mount	*mp = dqp->q_mount;
420 	xfs_dqid_t		id = be32_to_cpu(dqp->q_core.d_id);
421 	struct xfs_trans	*tp = (tpp ? *tpp : NULL);
422 	uint			lock_mode;
423 
424 	quotip = xfs_quota_inode(dqp->q_mount, dqp->dq_flags);
425 	dqp->q_fileoffset = (xfs_fileoff_t)id / mp->m_quotainfo->qi_dqperchunk;
426 
427 	lock_mode = xfs_ilock_data_map_shared(quotip);
428 	if (!xfs_this_quota_on(dqp->q_mount, dqp->dq_flags)) {
429 		/*
430 		 * Return if this type of quotas is turned off while we
431 		 * didn't have the quota inode lock.
432 		 */
433 		xfs_iunlock(quotip, lock_mode);
434 		return -ESRCH;
435 	}
436 
437 	/*
438 	 * Find the block map; no allocations yet
439 	 */
440 	error = xfs_bmapi_read(quotip, dqp->q_fileoffset,
441 			       XFS_DQUOT_CLUSTER_SIZE_FSB, &map, &nmaps, 0);
442 
443 	xfs_iunlock(quotip, lock_mode);
444 	if (error)
445 		return error;
446 
447 	ASSERT(nmaps == 1);
448 	ASSERT(map.br_blockcount == 1);
449 
450 	/*
451 	 * Offset of dquot in the (fixed sized) dquot chunk.
452 	 */
453 	dqp->q_bufoffset = (id % mp->m_quotainfo->qi_dqperchunk) *
454 		sizeof(xfs_dqblk_t);
455 
456 	ASSERT(map.br_startblock != DELAYSTARTBLOCK);
457 	if (map.br_startblock == HOLESTARTBLOCK) {
458 		/*
459 		 * We don't allocate unless we're asked to
460 		 */
461 		if (!(flags & XFS_QMOPT_DQALLOC))
462 			return -ENOENT;
463 
464 		ASSERT(tp);
465 		error = xfs_qm_dqalloc(tpp, mp, dqp, quotip,
466 					dqp->q_fileoffset, &bp);
467 		if (error)
468 			return error;
469 		tp = *tpp;
470 	} else {
471 		trace_xfs_dqtobp_read(dqp);
472 
473 		/*
474 		 * store the blkno etc so that we don't have to do the
475 		 * mapping all the time
476 		 */
477 		dqp->q_blkno = XFS_FSB_TO_DADDR(mp, map.br_startblock);
478 
479 		error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp,
480 					   dqp->q_blkno,
481 					   mp->m_quotainfo->qi_dqchunklen,
482 					   0, &bp, &xfs_dquot_buf_ops);
483 		if (error) {
484 			ASSERT(bp == NULL);
485 			return error;
486 		}
487 	}
488 
489 	ASSERT(xfs_buf_islocked(bp));
490 	*O_bpp = bp;
491 	*O_ddpp = bp->b_addr + dqp->q_bufoffset;
492 
493 	return 0;
494 }
495 
496 
497 /*
498  * Read in the ondisk dquot using dqtobp() then copy it to an incore version,
499  * and release the buffer immediately.
500  *
501  * If XFS_QMOPT_DQALLOC is set, allocate a dquot on disk if it needed.
502  */
503 int
504 xfs_qm_dqread(
505 	struct xfs_mount	*mp,
506 	xfs_dqid_t		id,
507 	uint			type,
508 	uint			flags,
509 	struct xfs_dquot	**O_dqpp)
510 {
511 	struct xfs_dquot	*dqp;
512 	struct xfs_disk_dquot	*ddqp;
513 	struct xfs_buf		*bp;
514 	struct xfs_trans	*tp = NULL;
515 	int			error;
516 
517 	dqp = kmem_zone_zalloc(xfs_qm_dqzone, KM_SLEEP);
518 
519 	dqp->dq_flags = type;
520 	dqp->q_core.d_id = cpu_to_be32(id);
521 	dqp->q_mount = mp;
522 	INIT_LIST_HEAD(&dqp->q_lru);
523 	mutex_init(&dqp->q_qlock);
524 	init_waitqueue_head(&dqp->q_pinwait);
525 
526 	/*
527 	 * Because we want to use a counting completion, complete
528 	 * the flush completion once to allow a single access to
529 	 * the flush completion without blocking.
530 	 */
531 	init_completion(&dqp->q_flush);
532 	complete(&dqp->q_flush);
533 
534 	/*
535 	 * Make sure group quotas have a different lock class than user
536 	 * quotas.
537 	 */
538 	switch (type) {
539 	case XFS_DQ_USER:
540 		/* uses the default lock class */
541 		break;
542 	case XFS_DQ_GROUP:
543 		lockdep_set_class(&dqp->q_qlock, &xfs_dquot_group_class);
544 		break;
545 	case XFS_DQ_PROJ:
546 		lockdep_set_class(&dqp->q_qlock, &xfs_dquot_project_class);
547 		break;
548 	default:
549 		ASSERT(0);
550 		break;
551 	}
552 
553 	XFS_STATS_INC(mp, xs_qm_dquot);
554 
555 	trace_xfs_dqread(dqp);
556 
557 	if (flags & XFS_QMOPT_DQALLOC) {
558 		error = xfs_trans_alloc(mp, &M_RES(mp)->tr_qm_dqalloc,
559 				XFS_QM_DQALLOC_SPACE_RES(mp), 0, 0, &tp);
560 		if (error)
561 			goto error0;
562 	}
563 
564 	/*
565 	 * get a pointer to the on-disk dquot and the buffer containing it
566 	 * dqp already knows its own type (GROUP/USER).
567 	 */
568 	error = xfs_qm_dqtobp(&tp, dqp, &ddqp, &bp, flags);
569 	if (error) {
570 		/*
571 		 * This can happen if quotas got turned off (ESRCH),
572 		 * or if the dquot didn't exist on disk and we ask to
573 		 * allocate (ENOENT).
574 		 */
575 		trace_xfs_dqread_fail(dqp);
576 		goto error1;
577 	}
578 
579 	/* copy everything from disk dquot to the incore dquot */
580 	memcpy(&dqp->q_core, ddqp, sizeof(xfs_disk_dquot_t));
581 	xfs_qm_dquot_logitem_init(dqp);
582 
583 	/*
584 	 * Reservation counters are defined as reservation plus current usage
585 	 * to avoid having to add every time.
586 	 */
587 	dqp->q_res_bcount = be64_to_cpu(ddqp->d_bcount);
588 	dqp->q_res_icount = be64_to_cpu(ddqp->d_icount);
589 	dqp->q_res_rtbcount = be64_to_cpu(ddqp->d_rtbcount);
590 
591 	/* initialize the dquot speculative prealloc thresholds */
592 	xfs_dquot_set_prealloc_limits(dqp);
593 
594 	/* Mark the buf so that this will stay incore a little longer */
595 	xfs_buf_set_ref(bp, XFS_DQUOT_REF);
596 
597 	/*
598 	 * We got the buffer with a xfs_trans_read_buf() (in dqtobp())
599 	 * So we need to release with xfs_trans_brelse().
600 	 * The strategy here is identical to that of inodes; we lock
601 	 * the dquot in xfs_qm_dqget() before making it accessible to
602 	 * others. This is because dquots, like inodes, need a good level of
603 	 * concurrency, and we don't want to take locks on the entire buffers
604 	 * for dquot accesses.
605 	 * Note also that the dquot buffer may even be dirty at this point, if
606 	 * this particular dquot was repaired. We still aren't afraid to
607 	 * brelse it because we have the changes incore.
608 	 */
609 	ASSERT(xfs_buf_islocked(bp));
610 	xfs_trans_brelse(tp, bp);
611 
612 	if (tp) {
613 		error = xfs_trans_commit(tp);
614 		if (error)
615 			goto error0;
616 	}
617 
618 	*O_dqpp = dqp;
619 	return error;
620 
621 error1:
622 	if (tp)
623 		xfs_trans_cancel(tp);
624 error0:
625 	xfs_qm_dqdestroy(dqp);
626 	*O_dqpp = NULL;
627 	return error;
628 }
629 
630 /*
631  * Advance to the next id in the current chunk, or if at the
632  * end of the chunk, skip ahead to first id in next allocated chunk
633  * using the SEEK_DATA interface.
634  */
635 static int
636 xfs_dq_get_next_id(
637 	struct xfs_mount	*mp,
638 	uint			type,
639 	xfs_dqid_t		*id)
640 {
641 	struct xfs_inode	*quotip = xfs_quota_inode(mp, type);
642 	xfs_dqid_t		next_id = *id + 1; /* simple advance */
643 	uint			lock_flags;
644 	struct xfs_bmbt_irec	got;
645 	struct xfs_iext_cursor	cur;
646 	xfs_fsblock_t		start;
647 	int			error = 0;
648 
649 	/* If we'd wrap past the max ID, stop */
650 	if (next_id < *id)
651 		return -ENOENT;
652 
653 	/* If new ID is within the current chunk, advancing it sufficed */
654 	if (next_id % mp->m_quotainfo->qi_dqperchunk) {
655 		*id = next_id;
656 		return 0;
657 	}
658 
659 	/* Nope, next_id is now past the current chunk, so find the next one */
660 	start = (xfs_fsblock_t)next_id / mp->m_quotainfo->qi_dqperchunk;
661 
662 	lock_flags = xfs_ilock_data_map_shared(quotip);
663 	if (!(quotip->i_df.if_flags & XFS_IFEXTENTS)) {
664 		error = xfs_iread_extents(NULL, quotip, XFS_DATA_FORK);
665 		if (error)
666 			return error;
667 	}
668 
669 	if (xfs_iext_lookup_extent(quotip, &quotip->i_df, start, &cur, &got)) {
670 		/* contiguous chunk, bump startoff for the id calculation */
671 		if (got.br_startoff < start)
672 			got.br_startoff = start;
673 		*id = got.br_startoff * mp->m_quotainfo->qi_dqperchunk;
674 	} else {
675 		error = -ENOENT;
676 	}
677 
678 	xfs_iunlock(quotip, lock_flags);
679 
680 	return error;
681 }
682 
683 /*
684  * Given the file system, inode OR id, and type (UDQUOT/GDQUOT), return a
685  * a locked dquot, doing an allocation (if requested) as needed.
686  * When both an inode and an id are given, the inode's id takes precedence.
687  * That is, if the id changes while we don't hold the ilock inside this
688  * function, the new dquot is returned, not necessarily the one requested
689  * in the id argument.
690  */
691 int
692 xfs_qm_dqget(
693 	xfs_mount_t	*mp,
694 	xfs_inode_t	*ip,	  /* locked inode (optional) */
695 	xfs_dqid_t	id,	  /* uid/projid/gid depending on type */
696 	uint		type,	  /* XFS_DQ_USER/XFS_DQ_PROJ/XFS_DQ_GROUP */
697 	uint		flags,	  /* DQALLOC, DQSUSER, DQREPAIR, DOWARN */
698 	xfs_dquot_t	**O_dqpp) /* OUT : locked incore dquot */
699 {
700 	struct xfs_quotainfo	*qi = mp->m_quotainfo;
701 	struct radix_tree_root *tree = xfs_dquot_tree(qi, type);
702 	struct xfs_dquot	*dqp;
703 	int			error;
704 
705 	ASSERT(XFS_IS_QUOTA_RUNNING(mp));
706 	if ((! XFS_IS_UQUOTA_ON(mp) && type == XFS_DQ_USER) ||
707 	    (! XFS_IS_PQUOTA_ON(mp) && type == XFS_DQ_PROJ) ||
708 	    (! XFS_IS_GQUOTA_ON(mp) && type == XFS_DQ_GROUP)) {
709 		return -ESRCH;
710 	}
711 
712 	ASSERT(type == XFS_DQ_USER ||
713 	       type == XFS_DQ_PROJ ||
714 	       type == XFS_DQ_GROUP);
715 	if (ip) {
716 		ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
717 		ASSERT(xfs_inode_dquot(ip, type) == NULL);
718 	}
719 
720 restart:
721 	mutex_lock(&qi->qi_tree_lock);
722 	dqp = radix_tree_lookup(tree, id);
723 	if (dqp) {
724 		xfs_dqlock(dqp);
725 		if (dqp->dq_flags & XFS_DQ_FREEING) {
726 			xfs_dqunlock(dqp);
727 			mutex_unlock(&qi->qi_tree_lock);
728 			trace_xfs_dqget_freeing(dqp);
729 			delay(1);
730 			goto restart;
731 		}
732 
733 		/* uninit / unused quota found in radix tree, keep looking  */
734 		if (flags & XFS_QMOPT_DQNEXT) {
735 			if (XFS_IS_DQUOT_UNINITIALIZED(dqp)) {
736 				xfs_dqunlock(dqp);
737 				mutex_unlock(&qi->qi_tree_lock);
738 				error = xfs_dq_get_next_id(mp, type, &id);
739 				if (error)
740 					return error;
741 				goto restart;
742 			}
743 		}
744 
745 		dqp->q_nrefs++;
746 		mutex_unlock(&qi->qi_tree_lock);
747 
748 		trace_xfs_dqget_hit(dqp);
749 		XFS_STATS_INC(mp, xs_qm_dqcachehits);
750 		*O_dqpp = dqp;
751 		return 0;
752 	}
753 	mutex_unlock(&qi->qi_tree_lock);
754 	XFS_STATS_INC(mp, xs_qm_dqcachemisses);
755 
756 	/*
757 	 * Dquot cache miss. We don't want to keep the inode lock across
758 	 * a (potential) disk read. Also we don't want to deal with the lock
759 	 * ordering between quotainode and this inode. OTOH, dropping the inode
760 	 * lock here means dealing with a chown that can happen before
761 	 * we re-acquire the lock.
762 	 */
763 	if (ip)
764 		xfs_iunlock(ip, XFS_ILOCK_EXCL);
765 
766 	error = xfs_qm_dqread(mp, id, type, flags, &dqp);
767 
768 	if (ip)
769 		xfs_ilock(ip, XFS_ILOCK_EXCL);
770 
771 	/* If we are asked to find next active id, keep looking */
772 	if (error == -ENOENT && (flags & XFS_QMOPT_DQNEXT)) {
773 		error = xfs_dq_get_next_id(mp, type, &id);
774 		if (!error)
775 			goto restart;
776 	}
777 
778 	if (error)
779 		return error;
780 
781 	if (ip) {
782 		/*
783 		 * A dquot could be attached to this inode by now, since
784 		 * we had dropped the ilock.
785 		 */
786 		if (xfs_this_quota_on(mp, type)) {
787 			struct xfs_dquot	*dqp1;
788 
789 			dqp1 = xfs_inode_dquot(ip, type);
790 			if (dqp1) {
791 				xfs_qm_dqdestroy(dqp);
792 				dqp = dqp1;
793 				xfs_dqlock(dqp);
794 				goto dqret;
795 			}
796 		} else {
797 			/* inode stays locked on return */
798 			xfs_qm_dqdestroy(dqp);
799 			return -ESRCH;
800 		}
801 	}
802 
803 	mutex_lock(&qi->qi_tree_lock);
804 	error = radix_tree_insert(tree, id, dqp);
805 	if (unlikely(error)) {
806 		WARN_ON(error != -EEXIST);
807 
808 		/*
809 		 * Duplicate found. Just throw away the new dquot and start
810 		 * over.
811 		 */
812 		mutex_unlock(&qi->qi_tree_lock);
813 		trace_xfs_dqget_dup(dqp);
814 		xfs_qm_dqdestroy(dqp);
815 		XFS_STATS_INC(mp, xs_qm_dquot_dups);
816 		goto restart;
817 	}
818 
819 	/*
820 	 * We return a locked dquot to the caller, with a reference taken
821 	 */
822 	xfs_dqlock(dqp);
823 	dqp->q_nrefs = 1;
824 
825 	qi->qi_dquots++;
826 	mutex_unlock(&qi->qi_tree_lock);
827 
828 	/* If we are asked to find next active id, keep looking */
829 	if (flags & XFS_QMOPT_DQNEXT) {
830 		if (XFS_IS_DQUOT_UNINITIALIZED(dqp)) {
831 			xfs_qm_dqput(dqp);
832 			error = xfs_dq_get_next_id(mp, type, &id);
833 			if (error)
834 				return error;
835 			goto restart;
836 		}
837 	}
838 
839  dqret:
840 	ASSERT((ip == NULL) || xfs_isilocked(ip, XFS_ILOCK_EXCL));
841 	trace_xfs_dqget_miss(dqp);
842 	*O_dqpp = dqp;
843 	return 0;
844 }
845 
846 /*
847  * Release a reference to the dquot (decrement ref-count) and unlock it.
848  *
849  * If there is a group quota attached to this dquot, carefully release that
850  * too without tripping over deadlocks'n'stuff.
851  */
852 void
853 xfs_qm_dqput(
854 	struct xfs_dquot	*dqp)
855 {
856 	ASSERT(dqp->q_nrefs > 0);
857 	ASSERT(XFS_DQ_IS_LOCKED(dqp));
858 
859 	trace_xfs_dqput(dqp);
860 
861 	if (--dqp->q_nrefs == 0) {
862 		struct xfs_quotainfo	*qi = dqp->q_mount->m_quotainfo;
863 		trace_xfs_dqput_free(dqp);
864 
865 		if (list_lru_add(&qi->qi_lru, &dqp->q_lru))
866 			XFS_STATS_INC(dqp->q_mount, xs_qm_dquot_unused);
867 	}
868 	xfs_dqunlock(dqp);
869 }
870 
871 /*
872  * Release a dquot. Flush it if dirty, then dqput() it.
873  * dquot must not be locked.
874  */
875 void
876 xfs_qm_dqrele(
877 	xfs_dquot_t	*dqp)
878 {
879 	if (!dqp)
880 		return;
881 
882 	trace_xfs_dqrele(dqp);
883 
884 	xfs_dqlock(dqp);
885 	/*
886 	 * We don't care to flush it if the dquot is dirty here.
887 	 * That will create stutters that we want to avoid.
888 	 * Instead we do a delayed write when we try to reclaim
889 	 * a dirty dquot. Also xfs_sync will take part of the burden...
890 	 */
891 	xfs_qm_dqput(dqp);
892 }
893 
894 /*
895  * This is the dquot flushing I/O completion routine.  It is called
896  * from interrupt level when the buffer containing the dquot is
897  * flushed to disk.  It is responsible for removing the dquot logitem
898  * from the AIL if it has not been re-logged, and unlocking the dquot's
899  * flush lock. This behavior is very similar to that of inodes..
900  */
901 STATIC void
902 xfs_qm_dqflush_done(
903 	struct xfs_buf		*bp,
904 	struct xfs_log_item	*lip)
905 {
906 	xfs_dq_logitem_t	*qip = (struct xfs_dq_logitem *)lip;
907 	xfs_dquot_t		*dqp = qip->qli_dquot;
908 	struct xfs_ail		*ailp = lip->li_ailp;
909 
910 	/*
911 	 * We only want to pull the item from the AIL if its
912 	 * location in the log has not changed since we started the flush.
913 	 * Thus, we only bother if the dquot's lsn has
914 	 * not changed. First we check the lsn outside the lock
915 	 * since it's cheaper, and then we recheck while
916 	 * holding the lock before removing the dquot from the AIL.
917 	 */
918 	if ((lip->li_flags & XFS_LI_IN_AIL) &&
919 	    ((lip->li_lsn == qip->qli_flush_lsn) ||
920 	     (lip->li_flags & XFS_LI_FAILED))) {
921 
922 		/* xfs_trans_ail_delete() drops the AIL lock. */
923 		spin_lock(&ailp->xa_lock);
924 		if (lip->li_lsn == qip->qli_flush_lsn) {
925 			xfs_trans_ail_delete(ailp, lip, SHUTDOWN_CORRUPT_INCORE);
926 		} else {
927 			/*
928 			 * Clear the failed state since we are about to drop the
929 			 * flush lock
930 			 */
931 			if (lip->li_flags & XFS_LI_FAILED)
932 				xfs_clear_li_failed(lip);
933 			spin_unlock(&ailp->xa_lock);
934 		}
935 	}
936 
937 	/*
938 	 * Release the dq's flush lock since we're done with it.
939 	 */
940 	xfs_dqfunlock(dqp);
941 }
942 
943 /*
944  * Write a modified dquot to disk.
945  * The dquot must be locked and the flush lock too taken by caller.
946  * The flush lock will not be unlocked until the dquot reaches the disk,
947  * but the dquot is free to be unlocked and modified by the caller
948  * in the interim. Dquot is still locked on return. This behavior is
949  * identical to that of inodes.
950  */
951 int
952 xfs_qm_dqflush(
953 	struct xfs_dquot	*dqp,
954 	struct xfs_buf		**bpp)
955 {
956 	struct xfs_mount	*mp = dqp->q_mount;
957 	struct xfs_buf		*bp;
958 	struct xfs_disk_dquot	*ddqp;
959 	xfs_failaddr_t		fa;
960 	int			error;
961 
962 	ASSERT(XFS_DQ_IS_LOCKED(dqp));
963 	ASSERT(!completion_done(&dqp->q_flush));
964 
965 	trace_xfs_dqflush(dqp);
966 
967 	*bpp = NULL;
968 
969 	xfs_qm_dqunpin_wait(dqp);
970 
971 	/*
972 	 * This may have been unpinned because the filesystem is shutting
973 	 * down forcibly. If that's the case we must not write this dquot
974 	 * to disk, because the log record didn't make it to disk.
975 	 *
976 	 * We also have to remove the log item from the AIL in this case,
977 	 * as we wait for an emptry AIL as part of the unmount process.
978 	 */
979 	if (XFS_FORCED_SHUTDOWN(mp)) {
980 		struct xfs_log_item	*lip = &dqp->q_logitem.qli_item;
981 		dqp->dq_flags &= ~XFS_DQ_DIRTY;
982 
983 		xfs_trans_ail_remove(lip, SHUTDOWN_CORRUPT_INCORE);
984 
985 		error = -EIO;
986 		goto out_unlock;
987 	}
988 
989 	/*
990 	 * Get the buffer containing the on-disk dquot
991 	 */
992 	error = xfs_trans_read_buf(mp, NULL, mp->m_ddev_targp, dqp->q_blkno,
993 				   mp->m_quotainfo->qi_dqchunklen, 0, &bp,
994 				   &xfs_dquot_buf_ops);
995 	if (error)
996 		goto out_unlock;
997 
998 	/*
999 	 * Calculate the location of the dquot inside the buffer.
1000 	 */
1001 	ddqp = bp->b_addr + dqp->q_bufoffset;
1002 
1003 	/*
1004 	 * A simple sanity check in case we got a corrupted dquot..
1005 	 */
1006 	fa = xfs_dquot_verify(mp, &dqp->q_core, be32_to_cpu(ddqp->d_id), 0, 0);
1007 	if (fa) {
1008 		xfs_alert(mp, "corrupt dquot ID 0x%x in memory at %pS",
1009 				be32_to_cpu(ddqp->d_id), fa);
1010 		xfs_buf_relse(bp);
1011 		xfs_dqfunlock(dqp);
1012 		xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
1013 		return -EIO;
1014 	}
1015 
1016 	/* This is the only portion of data that needs to persist */
1017 	memcpy(ddqp, &dqp->q_core, sizeof(xfs_disk_dquot_t));
1018 
1019 	/*
1020 	 * Clear the dirty field and remember the flush lsn for later use.
1021 	 */
1022 	dqp->dq_flags &= ~XFS_DQ_DIRTY;
1023 
1024 	xfs_trans_ail_copy_lsn(mp->m_ail, &dqp->q_logitem.qli_flush_lsn,
1025 					&dqp->q_logitem.qli_item.li_lsn);
1026 
1027 	/*
1028 	 * copy the lsn into the on-disk dquot now while we have the in memory
1029 	 * dquot here. This can't be done later in the write verifier as we
1030 	 * can't get access to the log item at that point in time.
1031 	 *
1032 	 * We also calculate the CRC here so that the on-disk dquot in the
1033 	 * buffer always has a valid CRC. This ensures there is no possibility
1034 	 * of a dquot without an up-to-date CRC getting to disk.
1035 	 */
1036 	if (xfs_sb_version_hascrc(&mp->m_sb)) {
1037 		struct xfs_dqblk *dqb = (struct xfs_dqblk *)ddqp;
1038 
1039 		dqb->dd_lsn = cpu_to_be64(dqp->q_logitem.qli_item.li_lsn);
1040 		xfs_update_cksum((char *)dqb, sizeof(struct xfs_dqblk),
1041 				 XFS_DQUOT_CRC_OFF);
1042 	}
1043 
1044 	/*
1045 	 * Attach an iodone routine so that we can remove this dquot from the
1046 	 * AIL and release the flush lock once the dquot is synced to disk.
1047 	 */
1048 	xfs_buf_attach_iodone(bp, xfs_qm_dqflush_done,
1049 				  &dqp->q_logitem.qli_item);
1050 
1051 	/*
1052 	 * If the buffer is pinned then push on the log so we won't
1053 	 * get stuck waiting in the write for too long.
1054 	 */
1055 	if (xfs_buf_ispinned(bp)) {
1056 		trace_xfs_dqflush_force(dqp);
1057 		xfs_log_force(mp, 0);
1058 	}
1059 
1060 	trace_xfs_dqflush_done(dqp);
1061 	*bpp = bp;
1062 	return 0;
1063 
1064 out_unlock:
1065 	xfs_dqfunlock(dqp);
1066 	return -EIO;
1067 }
1068 
1069 /*
1070  * Lock two xfs_dquot structures.
1071  *
1072  * To avoid deadlocks we always lock the quota structure with
1073  * the lowerd id first.
1074  */
1075 void
1076 xfs_dqlock2(
1077 	xfs_dquot_t	*d1,
1078 	xfs_dquot_t	*d2)
1079 {
1080 	if (d1 && d2) {
1081 		ASSERT(d1 != d2);
1082 		if (be32_to_cpu(d1->q_core.d_id) >
1083 		    be32_to_cpu(d2->q_core.d_id)) {
1084 			mutex_lock(&d2->q_qlock);
1085 			mutex_lock_nested(&d1->q_qlock, XFS_QLOCK_NESTED);
1086 		} else {
1087 			mutex_lock(&d1->q_qlock);
1088 			mutex_lock_nested(&d2->q_qlock, XFS_QLOCK_NESTED);
1089 		}
1090 	} else if (d1) {
1091 		mutex_lock(&d1->q_qlock);
1092 	} else if (d2) {
1093 		mutex_lock(&d2->q_qlock);
1094 	}
1095 }
1096 
1097 int __init
1098 xfs_qm_init(void)
1099 {
1100 	xfs_qm_dqzone =
1101 		kmem_zone_init(sizeof(struct xfs_dquot), "xfs_dquot");
1102 	if (!xfs_qm_dqzone)
1103 		goto out;
1104 
1105 	xfs_qm_dqtrxzone =
1106 		kmem_zone_init(sizeof(struct xfs_dquot_acct), "xfs_dqtrx");
1107 	if (!xfs_qm_dqtrxzone)
1108 		goto out_free_dqzone;
1109 
1110 	return 0;
1111 
1112 out_free_dqzone:
1113 	kmem_zone_destroy(xfs_qm_dqzone);
1114 out:
1115 	return -ENOMEM;
1116 }
1117 
1118 void
1119 xfs_qm_exit(void)
1120 {
1121 	kmem_zone_destroy(xfs_qm_dqtrxzone);
1122 	kmem_zone_destroy(xfs_qm_dqzone);
1123 }
1124