xref: /openbmc/linux/fs/xfs/xfs_trans.c (revision 981b22b8)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (c) 2000-2003,2005 Silicon Graphics, Inc.
4  * Copyright (C) 2010 Red Hat, Inc.
5  * All Rights Reserved.
6  */
7 #include "xfs.h"
8 #include "xfs_fs.h"
9 #include "xfs_shared.h"
10 #include "xfs_format.h"
11 #include "xfs_log_format.h"
12 #include "xfs_log_priv.h"
13 #include "xfs_trans_resv.h"
14 #include "xfs_mount.h"
15 #include "xfs_extent_busy.h"
16 #include "xfs_quota.h"
17 #include "xfs_trans.h"
18 #include "xfs_trans_priv.h"
19 #include "xfs_log.h"
20 #include "xfs_trace.h"
21 #include "xfs_error.h"
22 #include "xfs_defer.h"
23 #include "xfs_inode.h"
24 #include "xfs_dquot_item.h"
25 #include "xfs_dquot.h"
26 #include "xfs_icache.h"
27 
28 kmem_zone_t	*xfs_trans_zone;
29 
30 #if defined(CONFIG_TRACEPOINTS)
31 static void
32 xfs_trans_trace_reservations(
33 	struct xfs_mount	*mp)
34 {
35 	struct xfs_trans_res	resv;
36 	struct xfs_trans_res	*res;
37 	struct xfs_trans_res	*end_res;
38 	int			i;
39 
40 	res = (struct xfs_trans_res *)M_RES(mp);
41 	end_res = (struct xfs_trans_res *)(M_RES(mp) + 1);
42 	for (i = 0; res < end_res; i++, res++)
43 		trace_xfs_trans_resv_calc(mp, i, res);
44 	xfs_log_get_max_trans_res(mp, &resv);
45 	trace_xfs_trans_resv_calc(mp, -1, &resv);
46 }
47 #else
48 # define xfs_trans_trace_reservations(mp)
49 #endif
50 
51 /*
52  * Initialize the precomputed transaction reservation values
53  * in the mount structure.
54  */
55 void
56 xfs_trans_init(
57 	struct xfs_mount	*mp)
58 {
59 	xfs_trans_resv_calc(mp, M_RES(mp));
60 	xfs_trans_trace_reservations(mp);
61 }
62 
63 /*
64  * Free the transaction structure.  If there is more clean up
65  * to do when the structure is freed, add it here.
66  */
67 STATIC void
68 xfs_trans_free(
69 	struct xfs_trans	*tp)
70 {
71 	xfs_extent_busy_sort(&tp->t_busy);
72 	xfs_extent_busy_clear(tp->t_mountp, &tp->t_busy, false);
73 
74 	trace_xfs_trans_free(tp, _RET_IP_);
75 	xfs_trans_clear_context(tp);
76 	if (!(tp->t_flags & XFS_TRANS_NO_WRITECOUNT))
77 		sb_end_intwrite(tp->t_mountp->m_super);
78 	xfs_trans_free_dqinfo(tp);
79 	kmem_cache_free(xfs_trans_zone, tp);
80 }
81 
82 /*
83  * This is called to create a new transaction which will share the
84  * permanent log reservation of the given transaction.  The remaining
85  * unused block and rt extent reservations are also inherited.  This
86  * implies that the original transaction is no longer allowed to allocate
87  * blocks.  Locks and log items, however, are no inherited.  They must
88  * be added to the new transaction explicitly.
89  */
90 STATIC struct xfs_trans *
91 xfs_trans_dup(
92 	struct xfs_trans	*tp)
93 {
94 	struct xfs_trans	*ntp;
95 
96 	trace_xfs_trans_dup(tp, _RET_IP_);
97 
98 	ntp = kmem_cache_zalloc(xfs_trans_zone, GFP_KERNEL | __GFP_NOFAIL);
99 
100 	/*
101 	 * Initialize the new transaction structure.
102 	 */
103 	ntp->t_magic = XFS_TRANS_HEADER_MAGIC;
104 	ntp->t_mountp = tp->t_mountp;
105 	INIT_LIST_HEAD(&ntp->t_items);
106 	INIT_LIST_HEAD(&ntp->t_busy);
107 	INIT_LIST_HEAD(&ntp->t_dfops);
108 	ntp->t_firstblock = NULLFSBLOCK;
109 
110 	ASSERT(tp->t_flags & XFS_TRANS_PERM_LOG_RES);
111 	ASSERT(tp->t_ticket != NULL);
112 
113 	ntp->t_flags = XFS_TRANS_PERM_LOG_RES |
114 		       (tp->t_flags & XFS_TRANS_RESERVE) |
115 		       (tp->t_flags & XFS_TRANS_NO_WRITECOUNT) |
116 		       (tp->t_flags & XFS_TRANS_RES_FDBLKS);
117 	/* We gave our writer reference to the new transaction */
118 	tp->t_flags |= XFS_TRANS_NO_WRITECOUNT;
119 	ntp->t_ticket = xfs_log_ticket_get(tp->t_ticket);
120 
121 	ASSERT(tp->t_blk_res >= tp->t_blk_res_used);
122 	ntp->t_blk_res = tp->t_blk_res - tp->t_blk_res_used;
123 	tp->t_blk_res = tp->t_blk_res_used;
124 
125 	ntp->t_rtx_res = tp->t_rtx_res - tp->t_rtx_res_used;
126 	tp->t_rtx_res = tp->t_rtx_res_used;
127 
128 	xfs_trans_switch_context(tp, ntp);
129 
130 	/* move deferred ops over to the new tp */
131 	xfs_defer_move(ntp, tp);
132 
133 	xfs_trans_dup_dqinfo(tp, ntp);
134 	return ntp;
135 }
136 
137 /*
138  * This is called to reserve free disk blocks and log space for the
139  * given transaction.  This must be done before allocating any resources
140  * within the transaction.
141  *
142  * This will return ENOSPC if there are not enough blocks available.
143  * It will sleep waiting for available log space.
144  * The only valid value for the flags parameter is XFS_RES_LOG_PERM, which
145  * is used by long running transactions.  If any one of the reservations
146  * fails then they will all be backed out.
147  *
148  * This does not do quota reservations. That typically is done by the
149  * caller afterwards.
150  */
151 static int
152 xfs_trans_reserve(
153 	struct xfs_trans	*tp,
154 	struct xfs_trans_res	*resp,
155 	uint			blocks,
156 	uint			rtextents)
157 {
158 	struct xfs_mount	*mp = tp->t_mountp;
159 	int			error = 0;
160 	bool			rsvd = (tp->t_flags & XFS_TRANS_RESERVE) != 0;
161 
162 	/*
163 	 * Attempt to reserve the needed disk blocks by decrementing
164 	 * the number needed from the number available.  This will
165 	 * fail if the count would go below zero.
166 	 */
167 	if (blocks > 0) {
168 		error = xfs_mod_fdblocks(mp, -((int64_t)blocks), rsvd);
169 		if (error != 0)
170 			return -ENOSPC;
171 		tp->t_blk_res += blocks;
172 	}
173 
174 	/*
175 	 * Reserve the log space needed for this transaction.
176 	 */
177 	if (resp->tr_logres > 0) {
178 		bool	permanent = false;
179 
180 		ASSERT(tp->t_log_res == 0 ||
181 		       tp->t_log_res == resp->tr_logres);
182 		ASSERT(tp->t_log_count == 0 ||
183 		       tp->t_log_count == resp->tr_logcount);
184 
185 		if (resp->tr_logflags & XFS_TRANS_PERM_LOG_RES) {
186 			tp->t_flags |= XFS_TRANS_PERM_LOG_RES;
187 			permanent = true;
188 		} else {
189 			ASSERT(tp->t_ticket == NULL);
190 			ASSERT(!(tp->t_flags & XFS_TRANS_PERM_LOG_RES));
191 		}
192 
193 		if (tp->t_ticket != NULL) {
194 			ASSERT(resp->tr_logflags & XFS_TRANS_PERM_LOG_RES);
195 			error = xfs_log_regrant(mp, tp->t_ticket);
196 		} else {
197 			error = xfs_log_reserve(mp,
198 						resp->tr_logres,
199 						resp->tr_logcount,
200 						&tp->t_ticket, XFS_TRANSACTION,
201 						permanent);
202 		}
203 
204 		if (error)
205 			goto undo_blocks;
206 
207 		tp->t_log_res = resp->tr_logres;
208 		tp->t_log_count = resp->tr_logcount;
209 	}
210 
211 	/*
212 	 * Attempt to reserve the needed realtime extents by decrementing
213 	 * the number needed from the number available.  This will
214 	 * fail if the count would go below zero.
215 	 */
216 	if (rtextents > 0) {
217 		error = xfs_mod_frextents(mp, -((int64_t)rtextents));
218 		if (error) {
219 			error = -ENOSPC;
220 			goto undo_log;
221 		}
222 		tp->t_rtx_res += rtextents;
223 	}
224 
225 	return 0;
226 
227 	/*
228 	 * Error cases jump to one of these labels to undo any
229 	 * reservations which have already been performed.
230 	 */
231 undo_log:
232 	if (resp->tr_logres > 0) {
233 		xfs_log_ticket_ungrant(mp->m_log, tp->t_ticket);
234 		tp->t_ticket = NULL;
235 		tp->t_log_res = 0;
236 		tp->t_flags &= ~XFS_TRANS_PERM_LOG_RES;
237 	}
238 
239 undo_blocks:
240 	if (blocks > 0) {
241 		xfs_mod_fdblocks(mp, (int64_t)blocks, rsvd);
242 		tp->t_blk_res = 0;
243 	}
244 	return error;
245 }
246 
247 int
248 xfs_trans_alloc(
249 	struct xfs_mount	*mp,
250 	struct xfs_trans_res	*resp,
251 	uint			blocks,
252 	uint			rtextents,
253 	uint			flags,
254 	struct xfs_trans	**tpp)
255 {
256 	struct xfs_trans	*tp;
257 	bool			want_retry = true;
258 	int			error;
259 
260 	/*
261 	 * Allocate the handle before we do our freeze accounting and setting up
262 	 * GFP_NOFS allocation context so that we avoid lockdep false positives
263 	 * by doing GFP_KERNEL allocations inside sb_start_intwrite().
264 	 */
265 retry:
266 	tp = kmem_cache_zalloc(xfs_trans_zone, GFP_KERNEL | __GFP_NOFAIL);
267 	if (!(flags & XFS_TRANS_NO_WRITECOUNT))
268 		sb_start_intwrite(mp->m_super);
269 	xfs_trans_set_context(tp);
270 
271 	/*
272 	 * Zero-reservation ("empty") transactions can't modify anything, so
273 	 * they're allowed to run while we're frozen.
274 	 */
275 	WARN_ON(resp->tr_logres > 0 &&
276 		mp->m_super->s_writers.frozen == SB_FREEZE_COMPLETE);
277 	ASSERT(!(flags & XFS_TRANS_RES_FDBLKS) ||
278 	       xfs_sb_version_haslazysbcount(&mp->m_sb));
279 
280 	tp->t_magic = XFS_TRANS_HEADER_MAGIC;
281 	tp->t_flags = flags;
282 	tp->t_mountp = mp;
283 	INIT_LIST_HEAD(&tp->t_items);
284 	INIT_LIST_HEAD(&tp->t_busy);
285 	INIT_LIST_HEAD(&tp->t_dfops);
286 	tp->t_firstblock = NULLFSBLOCK;
287 
288 	error = xfs_trans_reserve(tp, resp, blocks, rtextents);
289 	if (error == -ENOSPC && want_retry) {
290 		xfs_trans_cancel(tp);
291 
292 		/*
293 		 * We weren't able to reserve enough space for the transaction.
294 		 * Flush the other speculative space allocations to free space.
295 		 * Do not perform a synchronous scan because callers can hold
296 		 * other locks.
297 		 */
298 		error = xfs_blockgc_free_space(mp, NULL);
299 		if (error)
300 			return error;
301 
302 		want_retry = false;
303 		goto retry;
304 	}
305 	if (error) {
306 		xfs_trans_cancel(tp);
307 		return error;
308 	}
309 
310 	trace_xfs_trans_alloc(tp, _RET_IP_);
311 
312 	*tpp = tp;
313 	return 0;
314 }
315 
316 /*
317  * Create an empty transaction with no reservation.  This is a defensive
318  * mechanism for routines that query metadata without actually modifying them --
319  * if the metadata being queried is somehow cross-linked (think a btree block
320  * pointer that points higher in the tree), we risk deadlock.  However, blocks
321  * grabbed as part of a transaction can be re-grabbed.  The verifiers will
322  * notice the corrupt block and the operation will fail back to userspace
323  * without deadlocking.
324  *
325  * Note the zero-length reservation; this transaction MUST be cancelled without
326  * any dirty data.
327  *
328  * Callers should obtain freeze protection to avoid a conflict with fs freezing
329  * where we can be grabbing buffers at the same time that freeze is trying to
330  * drain the buffer LRU list.
331  */
332 int
333 xfs_trans_alloc_empty(
334 	struct xfs_mount		*mp,
335 	struct xfs_trans		**tpp)
336 {
337 	struct xfs_trans_res		resv = {0};
338 
339 	return xfs_trans_alloc(mp, &resv, 0, 0, XFS_TRANS_NO_WRITECOUNT, tpp);
340 }
341 
342 /*
343  * Record the indicated change to the given field for application
344  * to the file system's superblock when the transaction commits.
345  * For now, just store the change in the transaction structure.
346  *
347  * Mark the transaction structure to indicate that the superblock
348  * needs to be updated before committing.
349  *
350  * Because we may not be keeping track of allocated/free inodes and
351  * used filesystem blocks in the superblock, we do not mark the
352  * superblock dirty in this transaction if we modify these fields.
353  * We still need to update the transaction deltas so that they get
354  * applied to the incore superblock, but we don't want them to
355  * cause the superblock to get locked and logged if these are the
356  * only fields in the superblock that the transaction modifies.
357  */
358 void
359 xfs_trans_mod_sb(
360 	xfs_trans_t	*tp,
361 	uint		field,
362 	int64_t		delta)
363 {
364 	uint32_t	flags = (XFS_TRANS_DIRTY|XFS_TRANS_SB_DIRTY);
365 	xfs_mount_t	*mp = tp->t_mountp;
366 
367 	switch (field) {
368 	case XFS_TRANS_SB_ICOUNT:
369 		tp->t_icount_delta += delta;
370 		if (xfs_sb_version_haslazysbcount(&mp->m_sb))
371 			flags &= ~XFS_TRANS_SB_DIRTY;
372 		break;
373 	case XFS_TRANS_SB_IFREE:
374 		tp->t_ifree_delta += delta;
375 		if (xfs_sb_version_haslazysbcount(&mp->m_sb))
376 			flags &= ~XFS_TRANS_SB_DIRTY;
377 		break;
378 	case XFS_TRANS_SB_FDBLOCKS:
379 		/*
380 		 * Track the number of blocks allocated in the transaction.
381 		 * Make sure it does not exceed the number reserved. If so,
382 		 * shutdown as this can lead to accounting inconsistency.
383 		 */
384 		if (delta < 0) {
385 			tp->t_blk_res_used += (uint)-delta;
386 			if (tp->t_blk_res_used > tp->t_blk_res)
387 				xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
388 		} else if (delta > 0 && (tp->t_flags & XFS_TRANS_RES_FDBLKS)) {
389 			int64_t	blkres_delta;
390 
391 			/*
392 			 * Return freed blocks directly to the reservation
393 			 * instead of the global pool, being careful not to
394 			 * overflow the trans counter. This is used to preserve
395 			 * reservation across chains of transaction rolls that
396 			 * repeatedly free and allocate blocks.
397 			 */
398 			blkres_delta = min_t(int64_t, delta,
399 					     UINT_MAX - tp->t_blk_res);
400 			tp->t_blk_res += blkres_delta;
401 			delta -= blkres_delta;
402 		}
403 		tp->t_fdblocks_delta += delta;
404 		if (xfs_sb_version_haslazysbcount(&mp->m_sb))
405 			flags &= ~XFS_TRANS_SB_DIRTY;
406 		break;
407 	case XFS_TRANS_SB_RES_FDBLOCKS:
408 		/*
409 		 * The allocation has already been applied to the
410 		 * in-core superblock's counter.  This should only
411 		 * be applied to the on-disk superblock.
412 		 */
413 		tp->t_res_fdblocks_delta += delta;
414 		if (xfs_sb_version_haslazysbcount(&mp->m_sb))
415 			flags &= ~XFS_TRANS_SB_DIRTY;
416 		break;
417 	case XFS_TRANS_SB_FREXTENTS:
418 		/*
419 		 * Track the number of blocks allocated in the
420 		 * transaction.  Make sure it does not exceed the
421 		 * number reserved.
422 		 */
423 		if (delta < 0) {
424 			tp->t_rtx_res_used += (uint)-delta;
425 			ASSERT(tp->t_rtx_res_used <= tp->t_rtx_res);
426 		}
427 		tp->t_frextents_delta += delta;
428 		break;
429 	case XFS_TRANS_SB_RES_FREXTENTS:
430 		/*
431 		 * The allocation has already been applied to the
432 		 * in-core superblock's counter.  This should only
433 		 * be applied to the on-disk superblock.
434 		 */
435 		ASSERT(delta < 0);
436 		tp->t_res_frextents_delta += delta;
437 		break;
438 	case XFS_TRANS_SB_DBLOCKS:
439 		ASSERT(delta > 0);
440 		tp->t_dblocks_delta += delta;
441 		break;
442 	case XFS_TRANS_SB_AGCOUNT:
443 		ASSERT(delta > 0);
444 		tp->t_agcount_delta += delta;
445 		break;
446 	case XFS_TRANS_SB_IMAXPCT:
447 		tp->t_imaxpct_delta += delta;
448 		break;
449 	case XFS_TRANS_SB_REXTSIZE:
450 		tp->t_rextsize_delta += delta;
451 		break;
452 	case XFS_TRANS_SB_RBMBLOCKS:
453 		tp->t_rbmblocks_delta += delta;
454 		break;
455 	case XFS_TRANS_SB_RBLOCKS:
456 		tp->t_rblocks_delta += delta;
457 		break;
458 	case XFS_TRANS_SB_REXTENTS:
459 		tp->t_rextents_delta += delta;
460 		break;
461 	case XFS_TRANS_SB_REXTSLOG:
462 		tp->t_rextslog_delta += delta;
463 		break;
464 	default:
465 		ASSERT(0);
466 		return;
467 	}
468 
469 	tp->t_flags |= flags;
470 }
471 
472 /*
473  * xfs_trans_apply_sb_deltas() is called from the commit code
474  * to bring the superblock buffer into the current transaction
475  * and modify it as requested by earlier calls to xfs_trans_mod_sb().
476  *
477  * For now we just look at each field allowed to change and change
478  * it if necessary.
479  */
480 STATIC void
481 xfs_trans_apply_sb_deltas(
482 	xfs_trans_t	*tp)
483 {
484 	xfs_dsb_t	*sbp;
485 	struct xfs_buf	*bp;
486 	int		whole = 0;
487 
488 	bp = xfs_trans_getsb(tp);
489 	sbp = bp->b_addr;
490 
491 	/*
492 	 * Check that superblock mods match the mods made to AGF counters.
493 	 */
494 	ASSERT((tp->t_fdblocks_delta + tp->t_res_fdblocks_delta) ==
495 	       (tp->t_ag_freeblks_delta + tp->t_ag_flist_delta +
496 		tp->t_ag_btree_delta));
497 
498 	/*
499 	 * Only update the superblock counters if we are logging them
500 	 */
501 	if (!xfs_sb_version_haslazysbcount(&(tp->t_mountp->m_sb))) {
502 		if (tp->t_icount_delta)
503 			be64_add_cpu(&sbp->sb_icount, tp->t_icount_delta);
504 		if (tp->t_ifree_delta)
505 			be64_add_cpu(&sbp->sb_ifree, tp->t_ifree_delta);
506 		if (tp->t_fdblocks_delta)
507 			be64_add_cpu(&sbp->sb_fdblocks, tp->t_fdblocks_delta);
508 		if (tp->t_res_fdblocks_delta)
509 			be64_add_cpu(&sbp->sb_fdblocks, tp->t_res_fdblocks_delta);
510 	}
511 
512 	if (tp->t_frextents_delta)
513 		be64_add_cpu(&sbp->sb_frextents, tp->t_frextents_delta);
514 	if (tp->t_res_frextents_delta)
515 		be64_add_cpu(&sbp->sb_frextents, tp->t_res_frextents_delta);
516 
517 	if (tp->t_dblocks_delta) {
518 		be64_add_cpu(&sbp->sb_dblocks, tp->t_dblocks_delta);
519 		whole = 1;
520 	}
521 	if (tp->t_agcount_delta) {
522 		be32_add_cpu(&sbp->sb_agcount, tp->t_agcount_delta);
523 		whole = 1;
524 	}
525 	if (tp->t_imaxpct_delta) {
526 		sbp->sb_imax_pct += tp->t_imaxpct_delta;
527 		whole = 1;
528 	}
529 	if (tp->t_rextsize_delta) {
530 		be32_add_cpu(&sbp->sb_rextsize, tp->t_rextsize_delta);
531 		whole = 1;
532 	}
533 	if (tp->t_rbmblocks_delta) {
534 		be32_add_cpu(&sbp->sb_rbmblocks, tp->t_rbmblocks_delta);
535 		whole = 1;
536 	}
537 	if (tp->t_rblocks_delta) {
538 		be64_add_cpu(&sbp->sb_rblocks, tp->t_rblocks_delta);
539 		whole = 1;
540 	}
541 	if (tp->t_rextents_delta) {
542 		be64_add_cpu(&sbp->sb_rextents, tp->t_rextents_delta);
543 		whole = 1;
544 	}
545 	if (tp->t_rextslog_delta) {
546 		sbp->sb_rextslog += tp->t_rextslog_delta;
547 		whole = 1;
548 	}
549 
550 	xfs_trans_buf_set_type(tp, bp, XFS_BLFT_SB_BUF);
551 	if (whole)
552 		/*
553 		 * Log the whole thing, the fields are noncontiguous.
554 		 */
555 		xfs_trans_log_buf(tp, bp, 0, sizeof(xfs_dsb_t) - 1);
556 	else
557 		/*
558 		 * Since all the modifiable fields are contiguous, we
559 		 * can get away with this.
560 		 */
561 		xfs_trans_log_buf(tp, bp, offsetof(xfs_dsb_t, sb_icount),
562 				  offsetof(xfs_dsb_t, sb_frextents) +
563 				  sizeof(sbp->sb_frextents) - 1);
564 }
565 
566 /*
567  * xfs_trans_unreserve_and_mod_sb() is called to release unused reservations and
568  * apply superblock counter changes to the in-core superblock.  The
569  * t_res_fdblocks_delta and t_res_frextents_delta fields are explicitly NOT
570  * applied to the in-core superblock.  The idea is that that has already been
571  * done.
572  *
573  * If we are not logging superblock counters, then the inode allocated/free and
574  * used block counts are not updated in the on disk superblock. In this case,
575  * XFS_TRANS_SB_DIRTY will not be set when the transaction is updated but we
576  * still need to update the incore superblock with the changes.
577  *
578  * Deltas for the inode count are +/-64, hence we use a large batch size of 128
579  * so we don't need to take the counter lock on every update.
580  */
581 #define XFS_ICOUNT_BATCH	128
582 
583 void
584 xfs_trans_unreserve_and_mod_sb(
585 	struct xfs_trans	*tp)
586 {
587 	struct xfs_mount	*mp = tp->t_mountp;
588 	bool			rsvd = (tp->t_flags & XFS_TRANS_RESERVE) != 0;
589 	int64_t			blkdelta = 0;
590 	int64_t			rtxdelta = 0;
591 	int64_t			idelta = 0;
592 	int64_t			ifreedelta = 0;
593 	int			error;
594 
595 	/* calculate deltas */
596 	if (tp->t_blk_res > 0)
597 		blkdelta = tp->t_blk_res;
598 	if ((tp->t_fdblocks_delta != 0) &&
599 	    (xfs_sb_version_haslazysbcount(&mp->m_sb) ||
600 	     (tp->t_flags & XFS_TRANS_SB_DIRTY)))
601 	        blkdelta += tp->t_fdblocks_delta;
602 
603 	if (tp->t_rtx_res > 0)
604 		rtxdelta = tp->t_rtx_res;
605 	if ((tp->t_frextents_delta != 0) &&
606 	    (tp->t_flags & XFS_TRANS_SB_DIRTY))
607 		rtxdelta += tp->t_frextents_delta;
608 
609 	if (xfs_sb_version_haslazysbcount(&mp->m_sb) ||
610 	     (tp->t_flags & XFS_TRANS_SB_DIRTY)) {
611 		idelta = tp->t_icount_delta;
612 		ifreedelta = tp->t_ifree_delta;
613 	}
614 
615 	/* apply the per-cpu counters */
616 	if (blkdelta) {
617 		error = xfs_mod_fdblocks(mp, blkdelta, rsvd);
618 		ASSERT(!error);
619 	}
620 
621 	if (idelta) {
622 		percpu_counter_add_batch(&mp->m_icount, idelta,
623 					 XFS_ICOUNT_BATCH);
624 		if (idelta < 0)
625 			ASSERT(__percpu_counter_compare(&mp->m_icount, 0,
626 							XFS_ICOUNT_BATCH) >= 0);
627 	}
628 
629 	if (ifreedelta) {
630 		percpu_counter_add(&mp->m_ifree, ifreedelta);
631 		if (ifreedelta < 0)
632 			ASSERT(percpu_counter_compare(&mp->m_ifree, 0) >= 0);
633 	}
634 
635 	if (rtxdelta == 0 && !(tp->t_flags & XFS_TRANS_SB_DIRTY))
636 		return;
637 
638 	/* apply remaining deltas */
639 	spin_lock(&mp->m_sb_lock);
640 	mp->m_sb.sb_frextents += rtxdelta;
641 	mp->m_sb.sb_dblocks += tp->t_dblocks_delta;
642 	mp->m_sb.sb_agcount += tp->t_agcount_delta;
643 	mp->m_sb.sb_imax_pct += tp->t_imaxpct_delta;
644 	mp->m_sb.sb_rextsize += tp->t_rextsize_delta;
645 	mp->m_sb.sb_rbmblocks += tp->t_rbmblocks_delta;
646 	mp->m_sb.sb_rblocks += tp->t_rblocks_delta;
647 	mp->m_sb.sb_rextents += tp->t_rextents_delta;
648 	mp->m_sb.sb_rextslog += tp->t_rextslog_delta;
649 	spin_unlock(&mp->m_sb_lock);
650 
651 	/*
652 	 * Debug checks outside of the spinlock so they don't lock up the
653 	 * machine if they fail.
654 	 */
655 	ASSERT(mp->m_sb.sb_imax_pct >= 0);
656 	ASSERT(mp->m_sb.sb_rextslog >= 0);
657 	return;
658 }
659 
660 /* Add the given log item to the transaction's list of log items. */
661 void
662 xfs_trans_add_item(
663 	struct xfs_trans	*tp,
664 	struct xfs_log_item	*lip)
665 {
666 	ASSERT(lip->li_mountp == tp->t_mountp);
667 	ASSERT(lip->li_ailp == tp->t_mountp->m_ail);
668 	ASSERT(list_empty(&lip->li_trans));
669 	ASSERT(!test_bit(XFS_LI_DIRTY, &lip->li_flags));
670 
671 	list_add_tail(&lip->li_trans, &tp->t_items);
672 	trace_xfs_trans_add_item(tp, _RET_IP_);
673 }
674 
675 /*
676  * Unlink the log item from the transaction. the log item is no longer
677  * considered dirty in this transaction, as the linked transaction has
678  * finished, either by abort or commit completion.
679  */
680 void
681 xfs_trans_del_item(
682 	struct xfs_log_item	*lip)
683 {
684 	clear_bit(XFS_LI_DIRTY, &lip->li_flags);
685 	list_del_init(&lip->li_trans);
686 }
687 
688 /* Detach and unlock all of the items in a transaction */
689 static void
690 xfs_trans_free_items(
691 	struct xfs_trans	*tp,
692 	bool			abort)
693 {
694 	struct xfs_log_item	*lip, *next;
695 
696 	trace_xfs_trans_free_items(tp, _RET_IP_);
697 
698 	list_for_each_entry_safe(lip, next, &tp->t_items, li_trans) {
699 		xfs_trans_del_item(lip);
700 		if (abort)
701 			set_bit(XFS_LI_ABORTED, &lip->li_flags);
702 		if (lip->li_ops->iop_release)
703 			lip->li_ops->iop_release(lip);
704 	}
705 }
706 
707 static inline void
708 xfs_log_item_batch_insert(
709 	struct xfs_ail		*ailp,
710 	struct xfs_ail_cursor	*cur,
711 	struct xfs_log_item	**log_items,
712 	int			nr_items,
713 	xfs_lsn_t		commit_lsn)
714 {
715 	int	i;
716 
717 	spin_lock(&ailp->ail_lock);
718 	/* xfs_trans_ail_update_bulk drops ailp->ail_lock */
719 	xfs_trans_ail_update_bulk(ailp, cur, log_items, nr_items, commit_lsn);
720 
721 	for (i = 0; i < nr_items; i++) {
722 		struct xfs_log_item *lip = log_items[i];
723 
724 		if (lip->li_ops->iop_unpin)
725 			lip->li_ops->iop_unpin(lip, 0);
726 	}
727 }
728 
729 /*
730  * Bulk operation version of xfs_trans_committed that takes a log vector of
731  * items to insert into the AIL. This uses bulk AIL insertion techniques to
732  * minimise lock traffic.
733  *
734  * If we are called with the aborted flag set, it is because a log write during
735  * a CIL checkpoint commit has failed. In this case, all the items in the
736  * checkpoint have already gone through iop_committed and iop_committing, which
737  * means that checkpoint commit abort handling is treated exactly the same
738  * as an iclog write error even though we haven't started any IO yet. Hence in
739  * this case all we need to do is iop_committed processing, followed by an
740  * iop_unpin(aborted) call.
741  *
742  * The AIL cursor is used to optimise the insert process. If commit_lsn is not
743  * at the end of the AIL, the insert cursor avoids the need to walk
744  * the AIL to find the insertion point on every xfs_log_item_batch_insert()
745  * call. This saves a lot of needless list walking and is a net win, even
746  * though it slightly increases that amount of AIL lock traffic to set it up
747  * and tear it down.
748  */
749 void
750 xfs_trans_committed_bulk(
751 	struct xfs_ail		*ailp,
752 	struct xfs_log_vec	*log_vector,
753 	xfs_lsn_t		commit_lsn,
754 	bool			aborted)
755 {
756 #define LOG_ITEM_BATCH_SIZE	32
757 	struct xfs_log_item	*log_items[LOG_ITEM_BATCH_SIZE];
758 	struct xfs_log_vec	*lv;
759 	struct xfs_ail_cursor	cur;
760 	int			i = 0;
761 
762 	spin_lock(&ailp->ail_lock);
763 	xfs_trans_ail_cursor_last(ailp, &cur, commit_lsn);
764 	spin_unlock(&ailp->ail_lock);
765 
766 	/* unpin all the log items */
767 	for (lv = log_vector; lv; lv = lv->lv_next ) {
768 		struct xfs_log_item	*lip = lv->lv_item;
769 		xfs_lsn_t		item_lsn;
770 
771 		if (aborted)
772 			set_bit(XFS_LI_ABORTED, &lip->li_flags);
773 
774 		if (lip->li_ops->flags & XFS_ITEM_RELEASE_WHEN_COMMITTED) {
775 			lip->li_ops->iop_release(lip);
776 			continue;
777 		}
778 
779 		if (lip->li_ops->iop_committed)
780 			item_lsn = lip->li_ops->iop_committed(lip, commit_lsn);
781 		else
782 			item_lsn = commit_lsn;
783 
784 		/* item_lsn of -1 means the item needs no further processing */
785 		if (XFS_LSN_CMP(item_lsn, (xfs_lsn_t)-1) == 0)
786 			continue;
787 
788 		/*
789 		 * if we are aborting the operation, no point in inserting the
790 		 * object into the AIL as we are in a shutdown situation.
791 		 */
792 		if (aborted) {
793 			ASSERT(XFS_FORCED_SHUTDOWN(ailp->ail_mount));
794 			if (lip->li_ops->iop_unpin)
795 				lip->li_ops->iop_unpin(lip, 1);
796 			continue;
797 		}
798 
799 		if (item_lsn != commit_lsn) {
800 
801 			/*
802 			 * Not a bulk update option due to unusual item_lsn.
803 			 * Push into AIL immediately, rechecking the lsn once
804 			 * we have the ail lock. Then unpin the item. This does
805 			 * not affect the AIL cursor the bulk insert path is
806 			 * using.
807 			 */
808 			spin_lock(&ailp->ail_lock);
809 			if (XFS_LSN_CMP(item_lsn, lip->li_lsn) > 0)
810 				xfs_trans_ail_update(ailp, lip, item_lsn);
811 			else
812 				spin_unlock(&ailp->ail_lock);
813 			if (lip->li_ops->iop_unpin)
814 				lip->li_ops->iop_unpin(lip, 0);
815 			continue;
816 		}
817 
818 		/* Item is a candidate for bulk AIL insert.  */
819 		log_items[i++] = lv->lv_item;
820 		if (i >= LOG_ITEM_BATCH_SIZE) {
821 			xfs_log_item_batch_insert(ailp, &cur, log_items,
822 					LOG_ITEM_BATCH_SIZE, commit_lsn);
823 			i = 0;
824 		}
825 	}
826 
827 	/* make sure we insert the remainder! */
828 	if (i)
829 		xfs_log_item_batch_insert(ailp, &cur, log_items, i, commit_lsn);
830 
831 	spin_lock(&ailp->ail_lock);
832 	xfs_trans_ail_cursor_done(&cur);
833 	spin_unlock(&ailp->ail_lock);
834 }
835 
836 /*
837  * Commit the given transaction to the log.
838  *
839  * XFS disk error handling mechanism is not based on a typical
840  * transaction abort mechanism. Logically after the filesystem
841  * gets marked 'SHUTDOWN', we can't let any new transactions
842  * be durable - ie. committed to disk - because some metadata might
843  * be inconsistent. In such cases, this returns an error, and the
844  * caller may assume that all locked objects joined to the transaction
845  * have already been unlocked as if the commit had succeeded.
846  * Do not reference the transaction structure after this call.
847  */
848 static int
849 __xfs_trans_commit(
850 	struct xfs_trans	*tp,
851 	bool			regrant)
852 {
853 	struct xfs_mount	*mp = tp->t_mountp;
854 	xfs_lsn_t		commit_lsn = -1;
855 	int			error = 0;
856 	int			sync = tp->t_flags & XFS_TRANS_SYNC;
857 
858 	trace_xfs_trans_commit(tp, _RET_IP_);
859 
860 	/*
861 	 * Finish deferred items on final commit. Only permanent transactions
862 	 * should ever have deferred ops.
863 	 */
864 	WARN_ON_ONCE(!list_empty(&tp->t_dfops) &&
865 		     !(tp->t_flags & XFS_TRANS_PERM_LOG_RES));
866 	if (!regrant && (tp->t_flags & XFS_TRANS_PERM_LOG_RES)) {
867 		error = xfs_defer_finish_noroll(&tp);
868 		if (error)
869 			goto out_unreserve;
870 	}
871 
872 	/*
873 	 * If there is nothing to be logged by the transaction,
874 	 * then unlock all of the items associated with the
875 	 * transaction and free the transaction structure.
876 	 * Also make sure to return any reserved blocks to
877 	 * the free pool.
878 	 */
879 	if (!(tp->t_flags & XFS_TRANS_DIRTY))
880 		goto out_unreserve;
881 
882 	if (XFS_FORCED_SHUTDOWN(mp)) {
883 		error = -EIO;
884 		goto out_unreserve;
885 	}
886 
887 	ASSERT(tp->t_ticket != NULL);
888 
889 	/*
890 	 * If we need to update the superblock, then do it now.
891 	 */
892 	if (tp->t_flags & XFS_TRANS_SB_DIRTY)
893 		xfs_trans_apply_sb_deltas(tp);
894 	xfs_trans_apply_dquot_deltas(tp);
895 
896 	xfs_log_commit_cil(mp, tp, &commit_lsn, regrant);
897 
898 	xfs_trans_free(tp);
899 
900 	/*
901 	 * If the transaction needs to be synchronous, then force the
902 	 * log out now and wait for it.
903 	 */
904 	if (sync) {
905 		error = xfs_log_force_lsn(mp, commit_lsn, XFS_LOG_SYNC, NULL);
906 		XFS_STATS_INC(mp, xs_trans_sync);
907 	} else {
908 		XFS_STATS_INC(mp, xs_trans_async);
909 	}
910 
911 	return error;
912 
913 out_unreserve:
914 	xfs_trans_unreserve_and_mod_sb(tp);
915 
916 	/*
917 	 * It is indeed possible for the transaction to be not dirty but
918 	 * the dqinfo portion to be.  All that means is that we have some
919 	 * (non-persistent) quota reservations that need to be unreserved.
920 	 */
921 	xfs_trans_unreserve_and_mod_dquots(tp);
922 	if (tp->t_ticket) {
923 		if (regrant && !XLOG_FORCED_SHUTDOWN(mp->m_log))
924 			xfs_log_ticket_regrant(mp->m_log, tp->t_ticket);
925 		else
926 			xfs_log_ticket_ungrant(mp->m_log, tp->t_ticket);
927 		tp->t_ticket = NULL;
928 	}
929 	xfs_trans_free_items(tp, !!error);
930 	xfs_trans_free(tp);
931 
932 	XFS_STATS_INC(mp, xs_trans_empty);
933 	return error;
934 }
935 
936 int
937 xfs_trans_commit(
938 	struct xfs_trans	*tp)
939 {
940 	return __xfs_trans_commit(tp, false);
941 }
942 
943 /*
944  * Unlock all of the transaction's items and free the transaction.
945  * The transaction must not have modified any of its items, because
946  * there is no way to restore them to their previous state.
947  *
948  * If the transaction has made a log reservation, make sure to release
949  * it as well.
950  */
951 void
952 xfs_trans_cancel(
953 	struct xfs_trans	*tp)
954 {
955 	struct xfs_mount	*mp = tp->t_mountp;
956 	bool			dirty = (tp->t_flags & XFS_TRANS_DIRTY);
957 
958 	trace_xfs_trans_cancel(tp, _RET_IP_);
959 
960 	if (tp->t_flags & XFS_TRANS_PERM_LOG_RES)
961 		xfs_defer_cancel(tp);
962 
963 	/*
964 	 * See if the caller is relying on us to shut down the
965 	 * filesystem.  This happens in paths where we detect
966 	 * corruption and decide to give up.
967 	 */
968 	if (dirty && !XFS_FORCED_SHUTDOWN(mp)) {
969 		XFS_ERROR_REPORT("xfs_trans_cancel", XFS_ERRLEVEL_LOW, mp);
970 		xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
971 	}
972 #ifdef DEBUG
973 	if (!dirty && !XFS_FORCED_SHUTDOWN(mp)) {
974 		struct xfs_log_item *lip;
975 
976 		list_for_each_entry(lip, &tp->t_items, li_trans)
977 			ASSERT(!xlog_item_is_intent_done(lip));
978 	}
979 #endif
980 	xfs_trans_unreserve_and_mod_sb(tp);
981 	xfs_trans_unreserve_and_mod_dquots(tp);
982 
983 	if (tp->t_ticket) {
984 		xfs_log_ticket_ungrant(mp->m_log, tp->t_ticket);
985 		tp->t_ticket = NULL;
986 	}
987 
988 	xfs_trans_free_items(tp, dirty);
989 	xfs_trans_free(tp);
990 }
991 
992 /*
993  * Roll from one trans in the sequence of PERMANENT transactions to
994  * the next: permanent transactions are only flushed out when
995  * committed with xfs_trans_commit(), but we still want as soon
996  * as possible to let chunks of it go to the log. So we commit the
997  * chunk we've been working on and get a new transaction to continue.
998  */
999 int
1000 xfs_trans_roll(
1001 	struct xfs_trans	**tpp)
1002 {
1003 	struct xfs_trans	*trans = *tpp;
1004 	struct xfs_trans_res	tres;
1005 	int			error;
1006 
1007 	trace_xfs_trans_roll(trans, _RET_IP_);
1008 
1009 	/*
1010 	 * Copy the critical parameters from one trans to the next.
1011 	 */
1012 	tres.tr_logres = trans->t_log_res;
1013 	tres.tr_logcount = trans->t_log_count;
1014 
1015 	*tpp = xfs_trans_dup(trans);
1016 
1017 	/*
1018 	 * Commit the current transaction.
1019 	 * If this commit failed, then it'd just unlock those items that
1020 	 * are not marked ihold. That also means that a filesystem shutdown
1021 	 * is in progress. The caller takes the responsibility to cancel
1022 	 * the duplicate transaction that gets returned.
1023 	 */
1024 	error = __xfs_trans_commit(trans, true);
1025 	if (error)
1026 		return error;
1027 
1028 	/*
1029 	 * Reserve space in the log for the next transaction.
1030 	 * This also pushes items in the "AIL", the list of logged items,
1031 	 * out to disk if they are taking up space at the tail of the log
1032 	 * that we want to use.  This requires that either nothing be locked
1033 	 * across this call, or that anything that is locked be logged in
1034 	 * the prior and the next transactions.
1035 	 */
1036 	tres.tr_logflags = XFS_TRANS_PERM_LOG_RES;
1037 	return xfs_trans_reserve(*tpp, &tres, 0, 0);
1038 }
1039 
1040 /*
1041  * Allocate an transaction, lock and join the inode to it, and reserve quota.
1042  *
1043  * The caller must ensure that the on-disk dquots attached to this inode have
1044  * already been allocated and initialized.  The caller is responsible for
1045  * releasing ILOCK_EXCL if a new transaction is returned.
1046  */
1047 int
1048 xfs_trans_alloc_inode(
1049 	struct xfs_inode	*ip,
1050 	struct xfs_trans_res	*resv,
1051 	unsigned int		dblocks,
1052 	unsigned int		rblocks,
1053 	bool			force,
1054 	struct xfs_trans	**tpp)
1055 {
1056 	struct xfs_trans	*tp;
1057 	struct xfs_mount	*mp = ip->i_mount;
1058 	bool			retried = false;
1059 	int			error;
1060 
1061 retry:
1062 	error = xfs_trans_alloc(mp, resv, dblocks,
1063 			rblocks / mp->m_sb.sb_rextsize,
1064 			force ? XFS_TRANS_RESERVE : 0, &tp);
1065 	if (error)
1066 		return error;
1067 
1068 	xfs_ilock(ip, XFS_ILOCK_EXCL);
1069 	xfs_trans_ijoin(tp, ip, 0);
1070 
1071 	error = xfs_qm_dqattach_locked(ip, false);
1072 	if (error) {
1073 		/* Caller should have allocated the dquots! */
1074 		ASSERT(error != -ENOENT);
1075 		goto out_cancel;
1076 	}
1077 
1078 	error = xfs_trans_reserve_quota_nblks(tp, ip, dblocks, rblocks, force);
1079 	if ((error == -EDQUOT || error == -ENOSPC) && !retried) {
1080 		xfs_trans_cancel(tp);
1081 		xfs_iunlock(ip, XFS_ILOCK_EXCL);
1082 		xfs_blockgc_free_quota(ip, 0);
1083 		retried = true;
1084 		goto retry;
1085 	}
1086 	if (error)
1087 		goto out_cancel;
1088 
1089 	*tpp = tp;
1090 	return 0;
1091 
1092 out_cancel:
1093 	xfs_trans_cancel(tp);
1094 	xfs_iunlock(ip, XFS_ILOCK_EXCL);
1095 	return error;
1096 }
1097 
1098 /*
1099  * Allocate an transaction in preparation for inode creation by reserving quota
1100  * against the given dquots.  Callers are not required to hold any inode locks.
1101  */
1102 int
1103 xfs_trans_alloc_icreate(
1104 	struct xfs_mount	*mp,
1105 	struct xfs_trans_res	*resv,
1106 	struct xfs_dquot	*udqp,
1107 	struct xfs_dquot	*gdqp,
1108 	struct xfs_dquot	*pdqp,
1109 	unsigned int		dblocks,
1110 	struct xfs_trans	**tpp)
1111 {
1112 	struct xfs_trans	*tp;
1113 	bool			retried = false;
1114 	int			error;
1115 
1116 retry:
1117 	error = xfs_trans_alloc(mp, resv, dblocks, 0, 0, &tp);
1118 	if (error)
1119 		return error;
1120 
1121 	error = xfs_trans_reserve_quota_icreate(tp, udqp, gdqp, pdqp, dblocks);
1122 	if ((error == -EDQUOT || error == -ENOSPC) && !retried) {
1123 		xfs_trans_cancel(tp);
1124 		xfs_blockgc_free_dquots(mp, udqp, gdqp, pdqp, 0);
1125 		retried = true;
1126 		goto retry;
1127 	}
1128 	if (error) {
1129 		xfs_trans_cancel(tp);
1130 		return error;
1131 	}
1132 
1133 	*tpp = tp;
1134 	return 0;
1135 }
1136 
1137 /*
1138  * Allocate an transaction, lock and join the inode to it, and reserve quota
1139  * in preparation for inode attribute changes that include uid, gid, or prid
1140  * changes.
1141  *
1142  * The caller must ensure that the on-disk dquots attached to this inode have
1143  * already been allocated and initialized.  The ILOCK will be dropped when the
1144  * transaction is committed or cancelled.
1145  */
1146 int
1147 xfs_trans_alloc_ichange(
1148 	struct xfs_inode	*ip,
1149 	struct xfs_dquot	*new_udqp,
1150 	struct xfs_dquot	*new_gdqp,
1151 	struct xfs_dquot	*new_pdqp,
1152 	bool			force,
1153 	struct xfs_trans	**tpp)
1154 {
1155 	struct xfs_trans	*tp;
1156 	struct xfs_mount	*mp = ip->i_mount;
1157 	struct xfs_dquot	*udqp;
1158 	struct xfs_dquot	*gdqp;
1159 	struct xfs_dquot	*pdqp;
1160 	bool			retried = false;
1161 	int			error;
1162 
1163 retry:
1164 	error = xfs_trans_alloc(mp, &M_RES(mp)->tr_ichange, 0, 0, 0, &tp);
1165 	if (error)
1166 		return error;
1167 
1168 	xfs_ilock(ip, XFS_ILOCK_EXCL);
1169 	xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
1170 
1171 	error = xfs_qm_dqattach_locked(ip, false);
1172 	if (error) {
1173 		/* Caller should have allocated the dquots! */
1174 		ASSERT(error != -ENOENT);
1175 		goto out_cancel;
1176 	}
1177 
1178 	/*
1179 	 * For each quota type, skip quota reservations if the inode's dquots
1180 	 * now match the ones that came from the caller, or the caller didn't
1181 	 * pass one in.  The inode's dquots can change if we drop the ILOCK to
1182 	 * perform a blockgc scan, so we must preserve the caller's arguments.
1183 	 */
1184 	udqp = (new_udqp != ip->i_udquot) ? new_udqp : NULL;
1185 	gdqp = (new_gdqp != ip->i_gdquot) ? new_gdqp : NULL;
1186 	pdqp = (new_pdqp != ip->i_pdquot) ? new_pdqp : NULL;
1187 	if (udqp || gdqp || pdqp) {
1188 		unsigned int	qflags = XFS_QMOPT_RES_REGBLKS;
1189 
1190 		if (force)
1191 			qflags |= XFS_QMOPT_FORCE_RES;
1192 
1193 		/*
1194 		 * Reserve enough quota to handle blocks on disk and reserved
1195 		 * for a delayed allocation.  We'll actually transfer the
1196 		 * delalloc reservation between dquots at chown time, even
1197 		 * though that part is only semi-transactional.
1198 		 */
1199 		error = xfs_trans_reserve_quota_bydquots(tp, mp, udqp, gdqp,
1200 				pdqp, ip->i_d.di_nblocks + ip->i_delayed_blks,
1201 				1, qflags);
1202 		if ((error == -EDQUOT || error == -ENOSPC) && !retried) {
1203 			xfs_trans_cancel(tp);
1204 			xfs_blockgc_free_dquots(mp, udqp, gdqp, pdqp, 0);
1205 			retried = true;
1206 			goto retry;
1207 		}
1208 		if (error)
1209 			goto out_cancel;
1210 	}
1211 
1212 	*tpp = tp;
1213 	return 0;
1214 
1215 out_cancel:
1216 	xfs_trans_cancel(tp);
1217 	return error;
1218 }
1219