xref: /openbmc/linux/fs/xfs/xfs_trans.c (revision 2c363576)
1 /*
2  * Copyright (c) 2000-2003,2005 Silicon Graphics, Inc.
3  * Copyright (C) 2010 Red Hat, Inc.
4  * All Rights Reserved.
5  *
6  * This program is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU General Public License as
8  * published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it would be useful,
11  * but WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13  * GNU General Public License for more details.
14  *
15  * You should have received a copy of the GNU General Public License
16  * along with this program; if not, write the Free Software Foundation,
17  * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
18  */
19 #include "xfs.h"
20 #include "xfs_fs.h"
21 #include "xfs_shared.h"
22 #include "xfs_format.h"
23 #include "xfs_log_format.h"
24 #include "xfs_trans_resv.h"
25 #include "xfs_mount.h"
26 #include "xfs_inode.h"
27 #include "xfs_extent_busy.h"
28 #include "xfs_quota.h"
29 #include "xfs_trans.h"
30 #include "xfs_trans_priv.h"
31 #include "xfs_log.h"
32 #include "xfs_trace.h"
33 #include "xfs_error.h"
34 
35 kmem_zone_t	*xfs_trans_zone;
36 kmem_zone_t	*xfs_log_item_desc_zone;
37 
38 #if defined(CONFIG_TRACEPOINTS)
39 static void
40 xfs_trans_trace_reservations(
41 	struct xfs_mount	*mp)
42 {
43 	struct xfs_trans_res	resv;
44 	struct xfs_trans_res	*res;
45 	struct xfs_trans_res	*end_res;
46 	int			i;
47 
48 	res = (struct xfs_trans_res *)M_RES(mp);
49 	end_res = (struct xfs_trans_res *)(M_RES(mp) + 1);
50 	for (i = 0; res < end_res; i++, res++)
51 		trace_xfs_trans_resv_calc(mp, i, res);
52 	xfs_log_get_max_trans_res(mp, &resv);
53 	trace_xfs_trans_resv_calc(mp, -1, &resv);
54 }
55 #else
56 # define xfs_trans_trace_reservations(mp)
57 #endif
58 
59 /*
60  * Initialize the precomputed transaction reservation values
61  * in the mount structure.
62  */
63 void
64 xfs_trans_init(
65 	struct xfs_mount	*mp)
66 {
67 	xfs_trans_resv_calc(mp, M_RES(mp));
68 	xfs_trans_trace_reservations(mp);
69 }
70 
71 /*
72  * Free the transaction structure.  If there is more clean up
73  * to do when the structure is freed, add it here.
74  */
75 STATIC void
76 xfs_trans_free(
77 	struct xfs_trans	*tp)
78 {
79 	xfs_extent_busy_sort(&tp->t_busy);
80 	xfs_extent_busy_clear(tp->t_mountp, &tp->t_busy, false);
81 
82 	atomic_dec(&tp->t_mountp->m_active_trans);
83 	if (!(tp->t_flags & XFS_TRANS_NO_WRITECOUNT))
84 		sb_end_intwrite(tp->t_mountp->m_super);
85 	xfs_trans_free_dqinfo(tp);
86 	kmem_zone_free(xfs_trans_zone, tp);
87 }
88 
89 /*
90  * This is called to create a new transaction which will share the
91  * permanent log reservation of the given transaction.  The remaining
92  * unused block and rt extent reservations are also inherited.  This
93  * implies that the original transaction is no longer allowed to allocate
94  * blocks.  Locks and log items, however, are no inherited.  They must
95  * be added to the new transaction explicitly.
96  */
97 STATIC xfs_trans_t *
98 xfs_trans_dup(
99 	xfs_trans_t	*tp)
100 {
101 	xfs_trans_t	*ntp;
102 
103 	ntp = kmem_zone_zalloc(xfs_trans_zone, KM_SLEEP);
104 
105 	/*
106 	 * Initialize the new transaction structure.
107 	 */
108 	ntp->t_magic = XFS_TRANS_HEADER_MAGIC;
109 	ntp->t_mountp = tp->t_mountp;
110 	INIT_LIST_HEAD(&ntp->t_items);
111 	INIT_LIST_HEAD(&ntp->t_busy);
112 
113 	ASSERT(tp->t_flags & XFS_TRANS_PERM_LOG_RES);
114 	ASSERT(tp->t_ticket != NULL);
115 
116 	ntp->t_flags = XFS_TRANS_PERM_LOG_RES |
117 		       (tp->t_flags & XFS_TRANS_RESERVE) |
118 		       (tp->t_flags & XFS_TRANS_NO_WRITECOUNT);
119 	/* We gave our writer reference to the new transaction */
120 	tp->t_flags |= XFS_TRANS_NO_WRITECOUNT;
121 	ntp->t_ticket = xfs_log_ticket_get(tp->t_ticket);
122 
123 	ASSERT(tp->t_blk_res >= tp->t_blk_res_used);
124 	ntp->t_blk_res = tp->t_blk_res - tp->t_blk_res_used;
125 	tp->t_blk_res = tp->t_blk_res_used;
126 
127 	ntp->t_rtx_res = tp->t_rtx_res - tp->t_rtx_res_used;
128 	tp->t_rtx_res = tp->t_rtx_res_used;
129 	ntp->t_pflags = tp->t_pflags;
130 
131 	xfs_trans_dup_dqinfo(tp, ntp);
132 
133 	atomic_inc(&tp->t_mountp->m_active_trans);
134 	return ntp;
135 }
136 
137 /*
138  * This is called to reserve free disk blocks and log space for the
139  * given transaction.  This must be done before allocating any resources
140  * within the transaction.
141  *
142  * This will return ENOSPC if there are not enough blocks available.
143  * It will sleep waiting for available log space.
144  * The only valid value for the flags parameter is XFS_RES_LOG_PERM, which
145  * is used by long running transactions.  If any one of the reservations
146  * fails then they will all be backed out.
147  *
148  * This does not do quota reservations. That typically is done by the
149  * caller afterwards.
150  */
151 static int
152 xfs_trans_reserve(
153 	struct xfs_trans	*tp,
154 	struct xfs_trans_res	*resp,
155 	uint			blocks,
156 	uint			rtextents)
157 {
158 	int		error = 0;
159 	bool		rsvd = (tp->t_flags & XFS_TRANS_RESERVE) != 0;
160 
161 	/* Mark this thread as being in a transaction */
162 	current_set_flags_nested(&tp->t_pflags, PF_MEMALLOC_NOFS);
163 
164 	/*
165 	 * Attempt to reserve the needed disk blocks by decrementing
166 	 * the number needed from the number available.  This will
167 	 * fail if the count would go below zero.
168 	 */
169 	if (blocks > 0) {
170 		error = xfs_mod_fdblocks(tp->t_mountp, -((int64_t)blocks), rsvd);
171 		if (error != 0) {
172 			current_restore_flags_nested(&tp->t_pflags, PF_MEMALLOC_NOFS);
173 			return -ENOSPC;
174 		}
175 		tp->t_blk_res += blocks;
176 	}
177 
178 	/*
179 	 * Reserve the log space needed for this transaction.
180 	 */
181 	if (resp->tr_logres > 0) {
182 		bool	permanent = false;
183 
184 		ASSERT(tp->t_log_res == 0 ||
185 		       tp->t_log_res == resp->tr_logres);
186 		ASSERT(tp->t_log_count == 0 ||
187 		       tp->t_log_count == resp->tr_logcount);
188 
189 		if (resp->tr_logflags & XFS_TRANS_PERM_LOG_RES) {
190 			tp->t_flags |= XFS_TRANS_PERM_LOG_RES;
191 			permanent = true;
192 		} else {
193 			ASSERT(tp->t_ticket == NULL);
194 			ASSERT(!(tp->t_flags & XFS_TRANS_PERM_LOG_RES));
195 		}
196 
197 		if (tp->t_ticket != NULL) {
198 			ASSERT(resp->tr_logflags & XFS_TRANS_PERM_LOG_RES);
199 			error = xfs_log_regrant(tp->t_mountp, tp->t_ticket);
200 		} else {
201 			error = xfs_log_reserve(tp->t_mountp,
202 						resp->tr_logres,
203 						resp->tr_logcount,
204 						&tp->t_ticket, XFS_TRANSACTION,
205 						permanent);
206 		}
207 
208 		if (error)
209 			goto undo_blocks;
210 
211 		tp->t_log_res = resp->tr_logres;
212 		tp->t_log_count = resp->tr_logcount;
213 	}
214 
215 	/*
216 	 * Attempt to reserve the needed realtime extents by decrementing
217 	 * the number needed from the number available.  This will
218 	 * fail if the count would go below zero.
219 	 */
220 	if (rtextents > 0) {
221 		error = xfs_mod_frextents(tp->t_mountp, -((int64_t)rtextents));
222 		if (error) {
223 			error = -ENOSPC;
224 			goto undo_log;
225 		}
226 		tp->t_rtx_res += rtextents;
227 	}
228 
229 	return 0;
230 
231 	/*
232 	 * Error cases jump to one of these labels to undo any
233 	 * reservations which have already been performed.
234 	 */
235 undo_log:
236 	if (resp->tr_logres > 0) {
237 		xfs_log_done(tp->t_mountp, tp->t_ticket, NULL, false);
238 		tp->t_ticket = NULL;
239 		tp->t_log_res = 0;
240 		tp->t_flags &= ~XFS_TRANS_PERM_LOG_RES;
241 	}
242 
243 undo_blocks:
244 	if (blocks > 0) {
245 		xfs_mod_fdblocks(tp->t_mountp, (int64_t)blocks, rsvd);
246 		tp->t_blk_res = 0;
247 	}
248 
249 	current_restore_flags_nested(&tp->t_pflags, PF_MEMALLOC_NOFS);
250 
251 	return error;
252 }
253 
254 int
255 xfs_trans_alloc(
256 	struct xfs_mount	*mp,
257 	struct xfs_trans_res	*resp,
258 	uint			blocks,
259 	uint			rtextents,
260 	uint			flags,
261 	struct xfs_trans	**tpp)
262 {
263 	struct xfs_trans	*tp;
264 	int			error;
265 
266 	if (!(flags & XFS_TRANS_NO_WRITECOUNT))
267 		sb_start_intwrite(mp->m_super);
268 
269 	WARN_ON(mp->m_super->s_writers.frozen == SB_FREEZE_COMPLETE);
270 	atomic_inc(&mp->m_active_trans);
271 
272 	tp = kmem_zone_zalloc(xfs_trans_zone,
273 		(flags & XFS_TRANS_NOFS) ? KM_NOFS : KM_SLEEP);
274 	tp->t_magic = XFS_TRANS_HEADER_MAGIC;
275 	tp->t_flags = flags;
276 	tp->t_mountp = mp;
277 	INIT_LIST_HEAD(&tp->t_items);
278 	INIT_LIST_HEAD(&tp->t_busy);
279 
280 	error = xfs_trans_reserve(tp, resp, blocks, rtextents);
281 	if (error) {
282 		xfs_trans_cancel(tp);
283 		return error;
284 	}
285 
286 	*tpp = tp;
287 	return 0;
288 }
289 
290 /*
291  * Create an empty transaction with no reservation.  This is a defensive
292  * mechanism for routines that query metadata without actually modifying
293  * them -- if the metadata being queried is somehow cross-linked (think a
294  * btree block pointer that points higher in the tree), we risk deadlock.
295  * However, blocks grabbed as part of a transaction can be re-grabbed.
296  * The verifiers will notice the corrupt block and the operation will fail
297  * back to userspace without deadlocking.
298  *
299  * Note the zero-length reservation; this transaction MUST be cancelled
300  * without any dirty data.
301  */
302 int
303 xfs_trans_alloc_empty(
304 	struct xfs_mount		*mp,
305 	struct xfs_trans		**tpp)
306 {
307 	struct xfs_trans_res		resv = {0};
308 
309 	return xfs_trans_alloc(mp, &resv, 0, 0, XFS_TRANS_NO_WRITECOUNT, tpp);
310 }
311 
312 /*
313  * Record the indicated change to the given field for application
314  * to the file system's superblock when the transaction commits.
315  * For now, just store the change in the transaction structure.
316  *
317  * Mark the transaction structure to indicate that the superblock
318  * needs to be updated before committing.
319  *
320  * Because we may not be keeping track of allocated/free inodes and
321  * used filesystem blocks in the superblock, we do not mark the
322  * superblock dirty in this transaction if we modify these fields.
323  * We still need to update the transaction deltas so that they get
324  * applied to the incore superblock, but we don't want them to
325  * cause the superblock to get locked and logged if these are the
326  * only fields in the superblock that the transaction modifies.
327  */
328 void
329 xfs_trans_mod_sb(
330 	xfs_trans_t	*tp,
331 	uint		field,
332 	int64_t		delta)
333 {
334 	uint32_t	flags = (XFS_TRANS_DIRTY|XFS_TRANS_SB_DIRTY);
335 	xfs_mount_t	*mp = tp->t_mountp;
336 
337 	switch (field) {
338 	case XFS_TRANS_SB_ICOUNT:
339 		tp->t_icount_delta += delta;
340 		if (xfs_sb_version_haslazysbcount(&mp->m_sb))
341 			flags &= ~XFS_TRANS_SB_DIRTY;
342 		break;
343 	case XFS_TRANS_SB_IFREE:
344 		tp->t_ifree_delta += delta;
345 		if (xfs_sb_version_haslazysbcount(&mp->m_sb))
346 			flags &= ~XFS_TRANS_SB_DIRTY;
347 		break;
348 	case XFS_TRANS_SB_FDBLOCKS:
349 		/*
350 		 * Track the number of blocks allocated in the transaction.
351 		 * Make sure it does not exceed the number reserved. If so,
352 		 * shutdown as this can lead to accounting inconsistency.
353 		 */
354 		if (delta < 0) {
355 			tp->t_blk_res_used += (uint)-delta;
356 			if (tp->t_blk_res_used > tp->t_blk_res)
357 				xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
358 		}
359 		tp->t_fdblocks_delta += delta;
360 		if (xfs_sb_version_haslazysbcount(&mp->m_sb))
361 			flags &= ~XFS_TRANS_SB_DIRTY;
362 		break;
363 	case XFS_TRANS_SB_RES_FDBLOCKS:
364 		/*
365 		 * The allocation has already been applied to the
366 		 * in-core superblock's counter.  This should only
367 		 * be applied to the on-disk superblock.
368 		 */
369 		tp->t_res_fdblocks_delta += delta;
370 		if (xfs_sb_version_haslazysbcount(&mp->m_sb))
371 			flags &= ~XFS_TRANS_SB_DIRTY;
372 		break;
373 	case XFS_TRANS_SB_FREXTENTS:
374 		/*
375 		 * Track the number of blocks allocated in the
376 		 * transaction.  Make sure it does not exceed the
377 		 * number reserved.
378 		 */
379 		if (delta < 0) {
380 			tp->t_rtx_res_used += (uint)-delta;
381 			ASSERT(tp->t_rtx_res_used <= tp->t_rtx_res);
382 		}
383 		tp->t_frextents_delta += delta;
384 		break;
385 	case XFS_TRANS_SB_RES_FREXTENTS:
386 		/*
387 		 * The allocation has already been applied to the
388 		 * in-core superblock's counter.  This should only
389 		 * be applied to the on-disk superblock.
390 		 */
391 		ASSERT(delta < 0);
392 		tp->t_res_frextents_delta += delta;
393 		break;
394 	case XFS_TRANS_SB_DBLOCKS:
395 		ASSERT(delta > 0);
396 		tp->t_dblocks_delta += delta;
397 		break;
398 	case XFS_TRANS_SB_AGCOUNT:
399 		ASSERT(delta > 0);
400 		tp->t_agcount_delta += delta;
401 		break;
402 	case XFS_TRANS_SB_IMAXPCT:
403 		tp->t_imaxpct_delta += delta;
404 		break;
405 	case XFS_TRANS_SB_REXTSIZE:
406 		tp->t_rextsize_delta += delta;
407 		break;
408 	case XFS_TRANS_SB_RBMBLOCKS:
409 		tp->t_rbmblocks_delta += delta;
410 		break;
411 	case XFS_TRANS_SB_RBLOCKS:
412 		tp->t_rblocks_delta += delta;
413 		break;
414 	case XFS_TRANS_SB_REXTENTS:
415 		tp->t_rextents_delta += delta;
416 		break;
417 	case XFS_TRANS_SB_REXTSLOG:
418 		tp->t_rextslog_delta += delta;
419 		break;
420 	default:
421 		ASSERT(0);
422 		return;
423 	}
424 
425 	tp->t_flags |= flags;
426 }
427 
428 /*
429  * xfs_trans_apply_sb_deltas() is called from the commit code
430  * to bring the superblock buffer into the current transaction
431  * and modify it as requested by earlier calls to xfs_trans_mod_sb().
432  *
433  * For now we just look at each field allowed to change and change
434  * it if necessary.
435  */
436 STATIC void
437 xfs_trans_apply_sb_deltas(
438 	xfs_trans_t	*tp)
439 {
440 	xfs_dsb_t	*sbp;
441 	xfs_buf_t	*bp;
442 	int		whole = 0;
443 
444 	bp = xfs_trans_getsb(tp, tp->t_mountp, 0);
445 	sbp = XFS_BUF_TO_SBP(bp);
446 
447 	/*
448 	 * Check that superblock mods match the mods made to AGF counters.
449 	 */
450 	ASSERT((tp->t_fdblocks_delta + tp->t_res_fdblocks_delta) ==
451 	       (tp->t_ag_freeblks_delta + tp->t_ag_flist_delta +
452 		tp->t_ag_btree_delta));
453 
454 	/*
455 	 * Only update the superblock counters if we are logging them
456 	 */
457 	if (!xfs_sb_version_haslazysbcount(&(tp->t_mountp->m_sb))) {
458 		if (tp->t_icount_delta)
459 			be64_add_cpu(&sbp->sb_icount, tp->t_icount_delta);
460 		if (tp->t_ifree_delta)
461 			be64_add_cpu(&sbp->sb_ifree, tp->t_ifree_delta);
462 		if (tp->t_fdblocks_delta)
463 			be64_add_cpu(&sbp->sb_fdblocks, tp->t_fdblocks_delta);
464 		if (tp->t_res_fdblocks_delta)
465 			be64_add_cpu(&sbp->sb_fdblocks, tp->t_res_fdblocks_delta);
466 	}
467 
468 	if (tp->t_frextents_delta)
469 		be64_add_cpu(&sbp->sb_frextents, tp->t_frextents_delta);
470 	if (tp->t_res_frextents_delta)
471 		be64_add_cpu(&sbp->sb_frextents, tp->t_res_frextents_delta);
472 
473 	if (tp->t_dblocks_delta) {
474 		be64_add_cpu(&sbp->sb_dblocks, tp->t_dblocks_delta);
475 		whole = 1;
476 	}
477 	if (tp->t_agcount_delta) {
478 		be32_add_cpu(&sbp->sb_agcount, tp->t_agcount_delta);
479 		whole = 1;
480 	}
481 	if (tp->t_imaxpct_delta) {
482 		sbp->sb_imax_pct += tp->t_imaxpct_delta;
483 		whole = 1;
484 	}
485 	if (tp->t_rextsize_delta) {
486 		be32_add_cpu(&sbp->sb_rextsize, tp->t_rextsize_delta);
487 		whole = 1;
488 	}
489 	if (tp->t_rbmblocks_delta) {
490 		be32_add_cpu(&sbp->sb_rbmblocks, tp->t_rbmblocks_delta);
491 		whole = 1;
492 	}
493 	if (tp->t_rblocks_delta) {
494 		be64_add_cpu(&sbp->sb_rblocks, tp->t_rblocks_delta);
495 		whole = 1;
496 	}
497 	if (tp->t_rextents_delta) {
498 		be64_add_cpu(&sbp->sb_rextents, tp->t_rextents_delta);
499 		whole = 1;
500 	}
501 	if (tp->t_rextslog_delta) {
502 		sbp->sb_rextslog += tp->t_rextslog_delta;
503 		whole = 1;
504 	}
505 
506 	xfs_trans_buf_set_type(tp, bp, XFS_BLFT_SB_BUF);
507 	if (whole)
508 		/*
509 		 * Log the whole thing, the fields are noncontiguous.
510 		 */
511 		xfs_trans_log_buf(tp, bp, 0, sizeof(xfs_dsb_t) - 1);
512 	else
513 		/*
514 		 * Since all the modifiable fields are contiguous, we
515 		 * can get away with this.
516 		 */
517 		xfs_trans_log_buf(tp, bp, offsetof(xfs_dsb_t, sb_icount),
518 				  offsetof(xfs_dsb_t, sb_frextents) +
519 				  sizeof(sbp->sb_frextents) - 1);
520 }
521 
522 STATIC int
523 xfs_sb_mod8(
524 	uint8_t			*field,
525 	int8_t			delta)
526 {
527 	int8_t			counter = *field;
528 
529 	counter += delta;
530 	if (counter < 0) {
531 		ASSERT(0);
532 		return -EINVAL;
533 	}
534 	*field = counter;
535 	return 0;
536 }
537 
538 STATIC int
539 xfs_sb_mod32(
540 	uint32_t		*field,
541 	int32_t			delta)
542 {
543 	int32_t			counter = *field;
544 
545 	counter += delta;
546 	if (counter < 0) {
547 		ASSERT(0);
548 		return -EINVAL;
549 	}
550 	*field = counter;
551 	return 0;
552 }
553 
554 STATIC int
555 xfs_sb_mod64(
556 	uint64_t		*field,
557 	int64_t			delta)
558 {
559 	int64_t			counter = *field;
560 
561 	counter += delta;
562 	if (counter < 0) {
563 		ASSERT(0);
564 		return -EINVAL;
565 	}
566 	*field = counter;
567 	return 0;
568 }
569 
570 /*
571  * xfs_trans_unreserve_and_mod_sb() is called to release unused reservations
572  * and apply superblock counter changes to the in-core superblock.  The
573  * t_res_fdblocks_delta and t_res_frextents_delta fields are explicitly NOT
574  * applied to the in-core superblock.  The idea is that that has already been
575  * done.
576  *
577  * If we are not logging superblock counters, then the inode allocated/free and
578  * used block counts are not updated in the on disk superblock. In this case,
579  * XFS_TRANS_SB_DIRTY will not be set when the transaction is updated but we
580  * still need to update the incore superblock with the changes.
581  */
582 void
583 xfs_trans_unreserve_and_mod_sb(
584 	struct xfs_trans	*tp)
585 {
586 	struct xfs_mount	*mp = tp->t_mountp;
587 	bool			rsvd = (tp->t_flags & XFS_TRANS_RESERVE) != 0;
588 	int64_t			blkdelta = 0;
589 	int64_t			rtxdelta = 0;
590 	int64_t			idelta = 0;
591 	int64_t			ifreedelta = 0;
592 	int			error;
593 
594 	/* calculate deltas */
595 	if (tp->t_blk_res > 0)
596 		blkdelta = tp->t_blk_res;
597 	if ((tp->t_fdblocks_delta != 0) &&
598 	    (xfs_sb_version_haslazysbcount(&mp->m_sb) ||
599 	     (tp->t_flags & XFS_TRANS_SB_DIRTY)))
600 	        blkdelta += tp->t_fdblocks_delta;
601 
602 	if (tp->t_rtx_res > 0)
603 		rtxdelta = tp->t_rtx_res;
604 	if ((tp->t_frextents_delta != 0) &&
605 	    (tp->t_flags & XFS_TRANS_SB_DIRTY))
606 		rtxdelta += tp->t_frextents_delta;
607 
608 	if (xfs_sb_version_haslazysbcount(&mp->m_sb) ||
609 	     (tp->t_flags & XFS_TRANS_SB_DIRTY)) {
610 		idelta = tp->t_icount_delta;
611 		ifreedelta = tp->t_ifree_delta;
612 	}
613 
614 	/* apply the per-cpu counters */
615 	if (blkdelta) {
616 		error = xfs_mod_fdblocks(mp, blkdelta, rsvd);
617 		if (error)
618 			goto out;
619 	}
620 
621 	if (idelta) {
622 		error = xfs_mod_icount(mp, idelta);
623 		if (error)
624 			goto out_undo_fdblocks;
625 	}
626 
627 	if (ifreedelta) {
628 		error = xfs_mod_ifree(mp, ifreedelta);
629 		if (error)
630 			goto out_undo_icount;
631 	}
632 
633 	if (rtxdelta == 0 && !(tp->t_flags & XFS_TRANS_SB_DIRTY))
634 		return;
635 
636 	/* apply remaining deltas */
637 	spin_lock(&mp->m_sb_lock);
638 	if (rtxdelta) {
639 		error = xfs_sb_mod64(&mp->m_sb.sb_frextents, rtxdelta);
640 		if (error)
641 			goto out_undo_ifree;
642 	}
643 
644 	if (tp->t_dblocks_delta != 0) {
645 		error = xfs_sb_mod64(&mp->m_sb.sb_dblocks, tp->t_dblocks_delta);
646 		if (error)
647 			goto out_undo_frextents;
648 	}
649 	if (tp->t_agcount_delta != 0) {
650 		error = xfs_sb_mod32(&mp->m_sb.sb_agcount, tp->t_agcount_delta);
651 		if (error)
652 			goto out_undo_dblocks;
653 	}
654 	if (tp->t_imaxpct_delta != 0) {
655 		error = xfs_sb_mod8(&mp->m_sb.sb_imax_pct, tp->t_imaxpct_delta);
656 		if (error)
657 			goto out_undo_agcount;
658 	}
659 	if (tp->t_rextsize_delta != 0) {
660 		error = xfs_sb_mod32(&mp->m_sb.sb_rextsize,
661 				     tp->t_rextsize_delta);
662 		if (error)
663 			goto out_undo_imaxpct;
664 	}
665 	if (tp->t_rbmblocks_delta != 0) {
666 		error = xfs_sb_mod32(&mp->m_sb.sb_rbmblocks,
667 				     tp->t_rbmblocks_delta);
668 		if (error)
669 			goto out_undo_rextsize;
670 	}
671 	if (tp->t_rblocks_delta != 0) {
672 		error = xfs_sb_mod64(&mp->m_sb.sb_rblocks, tp->t_rblocks_delta);
673 		if (error)
674 			goto out_undo_rbmblocks;
675 	}
676 	if (tp->t_rextents_delta != 0) {
677 		error = xfs_sb_mod64(&mp->m_sb.sb_rextents,
678 				     tp->t_rextents_delta);
679 		if (error)
680 			goto out_undo_rblocks;
681 	}
682 	if (tp->t_rextslog_delta != 0) {
683 		error = xfs_sb_mod8(&mp->m_sb.sb_rextslog,
684 				     tp->t_rextslog_delta);
685 		if (error)
686 			goto out_undo_rextents;
687 	}
688 	spin_unlock(&mp->m_sb_lock);
689 	return;
690 
691 out_undo_rextents:
692 	if (tp->t_rextents_delta)
693 		xfs_sb_mod64(&mp->m_sb.sb_rextents, -tp->t_rextents_delta);
694 out_undo_rblocks:
695 	if (tp->t_rblocks_delta)
696 		xfs_sb_mod64(&mp->m_sb.sb_rblocks, -tp->t_rblocks_delta);
697 out_undo_rbmblocks:
698 	if (tp->t_rbmblocks_delta)
699 		xfs_sb_mod32(&mp->m_sb.sb_rbmblocks, -tp->t_rbmblocks_delta);
700 out_undo_rextsize:
701 	if (tp->t_rextsize_delta)
702 		xfs_sb_mod32(&mp->m_sb.sb_rextsize, -tp->t_rextsize_delta);
703 out_undo_imaxpct:
704 	if (tp->t_rextsize_delta)
705 		xfs_sb_mod8(&mp->m_sb.sb_imax_pct, -tp->t_imaxpct_delta);
706 out_undo_agcount:
707 	if (tp->t_agcount_delta)
708 		xfs_sb_mod32(&mp->m_sb.sb_agcount, -tp->t_agcount_delta);
709 out_undo_dblocks:
710 	if (tp->t_dblocks_delta)
711 		xfs_sb_mod64(&mp->m_sb.sb_dblocks, -tp->t_dblocks_delta);
712 out_undo_frextents:
713 	if (rtxdelta)
714 		xfs_sb_mod64(&mp->m_sb.sb_frextents, -rtxdelta);
715 out_undo_ifree:
716 	spin_unlock(&mp->m_sb_lock);
717 	if (ifreedelta)
718 		xfs_mod_ifree(mp, -ifreedelta);
719 out_undo_icount:
720 	if (idelta)
721 		xfs_mod_icount(mp, -idelta);
722 out_undo_fdblocks:
723 	if (blkdelta)
724 		xfs_mod_fdblocks(mp, -blkdelta, rsvd);
725 out:
726 	ASSERT(error == 0);
727 	return;
728 }
729 
730 /*
731  * Add the given log item to the transaction's list of log items.
732  *
733  * The log item will now point to its new descriptor with its li_desc field.
734  */
735 void
736 xfs_trans_add_item(
737 	struct xfs_trans	*tp,
738 	struct xfs_log_item	*lip)
739 {
740 	struct xfs_log_item_desc *lidp;
741 
742 	ASSERT(lip->li_mountp == tp->t_mountp);
743 	ASSERT(lip->li_ailp == tp->t_mountp->m_ail);
744 
745 	lidp = kmem_zone_zalloc(xfs_log_item_desc_zone, KM_SLEEP | KM_NOFS);
746 
747 	lidp->lid_item = lip;
748 	lidp->lid_flags = 0;
749 	list_add_tail(&lidp->lid_trans, &tp->t_items);
750 
751 	lip->li_desc = lidp;
752 }
753 
754 STATIC void
755 xfs_trans_free_item_desc(
756 	struct xfs_log_item_desc *lidp)
757 {
758 	list_del_init(&lidp->lid_trans);
759 	kmem_zone_free(xfs_log_item_desc_zone, lidp);
760 }
761 
762 /*
763  * Unlink and free the given descriptor.
764  */
765 void
766 xfs_trans_del_item(
767 	struct xfs_log_item	*lip)
768 {
769 	xfs_trans_free_item_desc(lip->li_desc);
770 	lip->li_desc = NULL;
771 }
772 
773 /*
774  * Unlock all of the items of a transaction and free all the descriptors
775  * of that transaction.
776  */
777 void
778 xfs_trans_free_items(
779 	struct xfs_trans	*tp,
780 	xfs_lsn_t		commit_lsn,
781 	bool			abort)
782 {
783 	struct xfs_log_item_desc *lidp, *next;
784 
785 	list_for_each_entry_safe(lidp, next, &tp->t_items, lid_trans) {
786 		struct xfs_log_item	*lip = lidp->lid_item;
787 
788 		lip->li_desc = NULL;
789 
790 		if (commit_lsn != NULLCOMMITLSN)
791 			lip->li_ops->iop_committing(lip, commit_lsn);
792 		if (abort)
793 			lip->li_flags |= XFS_LI_ABORTED;
794 		lip->li_ops->iop_unlock(lip);
795 
796 		xfs_trans_free_item_desc(lidp);
797 	}
798 }
799 
800 static inline void
801 xfs_log_item_batch_insert(
802 	struct xfs_ail		*ailp,
803 	struct xfs_ail_cursor	*cur,
804 	struct xfs_log_item	**log_items,
805 	int			nr_items,
806 	xfs_lsn_t		commit_lsn)
807 {
808 	int	i;
809 
810 	spin_lock(&ailp->ail_lock);
811 	/* xfs_trans_ail_update_bulk drops ailp->ail_lock */
812 	xfs_trans_ail_update_bulk(ailp, cur, log_items, nr_items, commit_lsn);
813 
814 	for (i = 0; i < nr_items; i++) {
815 		struct xfs_log_item *lip = log_items[i];
816 
817 		lip->li_ops->iop_unpin(lip, 0);
818 	}
819 }
820 
821 /*
822  * Bulk operation version of xfs_trans_committed that takes a log vector of
823  * items to insert into the AIL. This uses bulk AIL insertion techniques to
824  * minimise lock traffic.
825  *
826  * If we are called with the aborted flag set, it is because a log write during
827  * a CIL checkpoint commit has failed. In this case, all the items in the
828  * checkpoint have already gone through iop_commited and iop_unlock, which
829  * means that checkpoint commit abort handling is treated exactly the same
830  * as an iclog write error even though we haven't started any IO yet. Hence in
831  * this case all we need to do is iop_committed processing, followed by an
832  * iop_unpin(aborted) call.
833  *
834  * The AIL cursor is used to optimise the insert process. If commit_lsn is not
835  * at the end of the AIL, the insert cursor avoids the need to walk
836  * the AIL to find the insertion point on every xfs_log_item_batch_insert()
837  * call. This saves a lot of needless list walking and is a net win, even
838  * though it slightly increases that amount of AIL lock traffic to set it up
839  * and tear it down.
840  */
841 void
842 xfs_trans_committed_bulk(
843 	struct xfs_ail		*ailp,
844 	struct xfs_log_vec	*log_vector,
845 	xfs_lsn_t		commit_lsn,
846 	int			aborted)
847 {
848 #define LOG_ITEM_BATCH_SIZE	32
849 	struct xfs_log_item	*log_items[LOG_ITEM_BATCH_SIZE];
850 	struct xfs_log_vec	*lv;
851 	struct xfs_ail_cursor	cur;
852 	int			i = 0;
853 
854 	spin_lock(&ailp->ail_lock);
855 	xfs_trans_ail_cursor_last(ailp, &cur, commit_lsn);
856 	spin_unlock(&ailp->ail_lock);
857 
858 	/* unpin all the log items */
859 	for (lv = log_vector; lv; lv = lv->lv_next ) {
860 		struct xfs_log_item	*lip = lv->lv_item;
861 		xfs_lsn_t		item_lsn;
862 
863 		if (aborted)
864 			lip->li_flags |= XFS_LI_ABORTED;
865 		item_lsn = lip->li_ops->iop_committed(lip, commit_lsn);
866 
867 		/* item_lsn of -1 means the item needs no further processing */
868 		if (XFS_LSN_CMP(item_lsn, (xfs_lsn_t)-1) == 0)
869 			continue;
870 
871 		/*
872 		 * if we are aborting the operation, no point in inserting the
873 		 * object into the AIL as we are in a shutdown situation.
874 		 */
875 		if (aborted) {
876 			ASSERT(XFS_FORCED_SHUTDOWN(ailp->ail_mount));
877 			lip->li_ops->iop_unpin(lip, 1);
878 			continue;
879 		}
880 
881 		if (item_lsn != commit_lsn) {
882 
883 			/*
884 			 * Not a bulk update option due to unusual item_lsn.
885 			 * Push into AIL immediately, rechecking the lsn once
886 			 * we have the ail lock. Then unpin the item. This does
887 			 * not affect the AIL cursor the bulk insert path is
888 			 * using.
889 			 */
890 			spin_lock(&ailp->ail_lock);
891 			if (XFS_LSN_CMP(item_lsn, lip->li_lsn) > 0)
892 				xfs_trans_ail_update(ailp, lip, item_lsn);
893 			else
894 				spin_unlock(&ailp->ail_lock);
895 			lip->li_ops->iop_unpin(lip, 0);
896 			continue;
897 		}
898 
899 		/* Item is a candidate for bulk AIL insert.  */
900 		log_items[i++] = lv->lv_item;
901 		if (i >= LOG_ITEM_BATCH_SIZE) {
902 			xfs_log_item_batch_insert(ailp, &cur, log_items,
903 					LOG_ITEM_BATCH_SIZE, commit_lsn);
904 			i = 0;
905 		}
906 	}
907 
908 	/* make sure we insert the remainder! */
909 	if (i)
910 		xfs_log_item_batch_insert(ailp, &cur, log_items, i, commit_lsn);
911 
912 	spin_lock(&ailp->ail_lock);
913 	xfs_trans_ail_cursor_done(&cur);
914 	spin_unlock(&ailp->ail_lock);
915 }
916 
917 /*
918  * Commit the given transaction to the log.
919  *
920  * XFS disk error handling mechanism is not based on a typical
921  * transaction abort mechanism. Logically after the filesystem
922  * gets marked 'SHUTDOWN', we can't let any new transactions
923  * be durable - ie. committed to disk - because some metadata might
924  * be inconsistent. In such cases, this returns an error, and the
925  * caller may assume that all locked objects joined to the transaction
926  * have already been unlocked as if the commit had succeeded.
927  * Do not reference the transaction structure after this call.
928  */
929 static int
930 __xfs_trans_commit(
931 	struct xfs_trans	*tp,
932 	bool			regrant)
933 {
934 	struct xfs_mount	*mp = tp->t_mountp;
935 	xfs_lsn_t		commit_lsn = -1;
936 	int			error = 0;
937 	int			sync = tp->t_flags & XFS_TRANS_SYNC;
938 
939 	/*
940 	 * If there is nothing to be logged by the transaction,
941 	 * then unlock all of the items associated with the
942 	 * transaction and free the transaction structure.
943 	 * Also make sure to return any reserved blocks to
944 	 * the free pool.
945 	 */
946 	if (!(tp->t_flags & XFS_TRANS_DIRTY))
947 		goto out_unreserve;
948 
949 	if (XFS_FORCED_SHUTDOWN(mp)) {
950 		error = -EIO;
951 		goto out_unreserve;
952 	}
953 
954 	ASSERT(tp->t_ticket != NULL);
955 
956 	/*
957 	 * If we need to update the superblock, then do it now.
958 	 */
959 	if (tp->t_flags & XFS_TRANS_SB_DIRTY)
960 		xfs_trans_apply_sb_deltas(tp);
961 	xfs_trans_apply_dquot_deltas(tp);
962 
963 	xfs_log_commit_cil(mp, tp, &commit_lsn, regrant);
964 
965 	current_restore_flags_nested(&tp->t_pflags, PF_MEMALLOC_NOFS);
966 	xfs_trans_free(tp);
967 
968 	/*
969 	 * If the transaction needs to be synchronous, then force the
970 	 * log out now and wait for it.
971 	 */
972 	if (sync) {
973 		error = xfs_log_force_lsn(mp, commit_lsn, XFS_LOG_SYNC, NULL);
974 		XFS_STATS_INC(mp, xs_trans_sync);
975 	} else {
976 		XFS_STATS_INC(mp, xs_trans_async);
977 	}
978 
979 	return error;
980 
981 out_unreserve:
982 	xfs_trans_unreserve_and_mod_sb(tp);
983 
984 	/*
985 	 * It is indeed possible for the transaction to be not dirty but
986 	 * the dqinfo portion to be.  All that means is that we have some
987 	 * (non-persistent) quota reservations that need to be unreserved.
988 	 */
989 	xfs_trans_unreserve_and_mod_dquots(tp);
990 	if (tp->t_ticket) {
991 		commit_lsn = xfs_log_done(mp, tp->t_ticket, NULL, regrant);
992 		if (commit_lsn == -1 && !error)
993 			error = -EIO;
994 	}
995 	current_restore_flags_nested(&tp->t_pflags, PF_MEMALLOC_NOFS);
996 	xfs_trans_free_items(tp, NULLCOMMITLSN, !!error);
997 	xfs_trans_free(tp);
998 
999 	XFS_STATS_INC(mp, xs_trans_empty);
1000 	return error;
1001 }
1002 
1003 int
1004 xfs_trans_commit(
1005 	struct xfs_trans	*tp)
1006 {
1007 	return __xfs_trans_commit(tp, false);
1008 }
1009 
1010 /*
1011  * Unlock all of the transaction's items and free the transaction.
1012  * The transaction must not have modified any of its items, because
1013  * there is no way to restore them to their previous state.
1014  *
1015  * If the transaction has made a log reservation, make sure to release
1016  * it as well.
1017  */
1018 void
1019 xfs_trans_cancel(
1020 	struct xfs_trans	*tp)
1021 {
1022 	struct xfs_mount	*mp = tp->t_mountp;
1023 	bool			dirty = (tp->t_flags & XFS_TRANS_DIRTY);
1024 
1025 	/*
1026 	 * See if the caller is relying on us to shut down the
1027 	 * filesystem.  This happens in paths where we detect
1028 	 * corruption and decide to give up.
1029 	 */
1030 	if (dirty && !XFS_FORCED_SHUTDOWN(mp)) {
1031 		XFS_ERROR_REPORT("xfs_trans_cancel", XFS_ERRLEVEL_LOW, mp);
1032 		xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
1033 	}
1034 #ifdef DEBUG
1035 	if (!dirty && !XFS_FORCED_SHUTDOWN(mp)) {
1036 		struct xfs_log_item_desc *lidp;
1037 
1038 		list_for_each_entry(lidp, &tp->t_items, lid_trans)
1039 			ASSERT(!(lidp->lid_item->li_type == XFS_LI_EFD));
1040 	}
1041 #endif
1042 	xfs_trans_unreserve_and_mod_sb(tp);
1043 	xfs_trans_unreserve_and_mod_dquots(tp);
1044 
1045 	if (tp->t_ticket)
1046 		xfs_log_done(mp, tp->t_ticket, NULL, false);
1047 
1048 	/* mark this thread as no longer being in a transaction */
1049 	current_restore_flags_nested(&tp->t_pflags, PF_MEMALLOC_NOFS);
1050 
1051 	xfs_trans_free_items(tp, NULLCOMMITLSN, dirty);
1052 	xfs_trans_free(tp);
1053 }
1054 
1055 /*
1056  * Roll from one trans in the sequence of PERMANENT transactions to
1057  * the next: permanent transactions are only flushed out when
1058  * committed with xfs_trans_commit(), but we still want as soon
1059  * as possible to let chunks of it go to the log. So we commit the
1060  * chunk we've been working on and get a new transaction to continue.
1061  */
1062 int
1063 xfs_trans_roll(
1064 	struct xfs_trans	**tpp)
1065 {
1066 	struct xfs_trans	*trans = *tpp;
1067 	struct xfs_trans_res	tres;
1068 	int			error;
1069 
1070 	/*
1071 	 * Copy the critical parameters from one trans to the next.
1072 	 */
1073 	tres.tr_logres = trans->t_log_res;
1074 	tres.tr_logcount = trans->t_log_count;
1075 
1076 	*tpp = xfs_trans_dup(trans);
1077 
1078 	/*
1079 	 * Commit the current transaction.
1080 	 * If this commit failed, then it'd just unlock those items that
1081 	 * are not marked ihold. That also means that a filesystem shutdown
1082 	 * is in progress. The caller takes the responsibility to cancel
1083 	 * the duplicate transaction that gets returned.
1084 	 */
1085 	error = __xfs_trans_commit(trans, true);
1086 	if (error)
1087 		return error;
1088 
1089 	/*
1090 	 * Reserve space in the log for the next transaction.
1091 	 * This also pushes items in the "AIL", the list of logged items,
1092 	 * out to disk if they are taking up space at the tail of the log
1093 	 * that we want to use.  This requires that either nothing be locked
1094 	 * across this call, or that anything that is locked be logged in
1095 	 * the prior and the next transactions.
1096 	 */
1097 	tres.tr_logflags = XFS_TRANS_PERM_LOG_RES;
1098 	return xfs_trans_reserve(*tpp, &tres, 0, 0);
1099 }
1100