xref: /openbmc/linux/fs/xfs/xfs_trans.c (revision 160b8e75)
1 /*
2  * Copyright (c) 2000-2003,2005 Silicon Graphics, Inc.
3  * Copyright (C) 2010 Red Hat, Inc.
4  * All Rights Reserved.
5  *
6  * This program is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU General Public License as
8  * published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it would be useful,
11  * but WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13  * GNU General Public License for more details.
14  *
15  * You should have received a copy of the GNU General Public License
16  * along with this program; if not, write the Free Software Foundation,
17  * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
18  */
19 #include "xfs.h"
20 #include "xfs_fs.h"
21 #include "xfs_shared.h"
22 #include "xfs_format.h"
23 #include "xfs_log_format.h"
24 #include "xfs_trans_resv.h"
25 #include "xfs_mount.h"
26 #include "xfs_inode.h"
27 #include "xfs_extent_busy.h"
28 #include "xfs_quota.h"
29 #include "xfs_trans.h"
30 #include "xfs_trans_priv.h"
31 #include "xfs_log.h"
32 #include "xfs_trace.h"
33 #include "xfs_error.h"
34 
35 kmem_zone_t	*xfs_trans_zone;
36 kmem_zone_t	*xfs_log_item_desc_zone;
37 
38 #if defined(CONFIG_TRACEPOINTS)
39 static void
40 xfs_trans_trace_reservations(
41 	struct xfs_mount	*mp)
42 {
43 	struct xfs_trans_res	resv;
44 	struct xfs_trans_res	*res;
45 	struct xfs_trans_res	*end_res;
46 	int			i;
47 
48 	res = (struct xfs_trans_res *)M_RES(mp);
49 	end_res = (struct xfs_trans_res *)(M_RES(mp) + 1);
50 	for (i = 0; res < end_res; i++, res++)
51 		trace_xfs_trans_resv_calc(mp, i, res);
52 	xfs_log_get_max_trans_res(mp, &resv);
53 	trace_xfs_trans_resv_calc(mp, -1, &resv);
54 }
55 #else
56 # define xfs_trans_trace_reservations(mp)
57 #endif
58 
59 /*
60  * Initialize the precomputed transaction reservation values
61  * in the mount structure.
62  */
63 void
64 xfs_trans_init(
65 	struct xfs_mount	*mp)
66 {
67 	xfs_trans_resv_calc(mp, M_RES(mp));
68 	xfs_trans_trace_reservations(mp);
69 }
70 
71 /*
72  * Free the transaction structure.  If there is more clean up
73  * to do when the structure is freed, add it here.
74  */
75 STATIC void
76 xfs_trans_free(
77 	struct xfs_trans	*tp)
78 {
79 	xfs_extent_busy_sort(&tp->t_busy);
80 	xfs_extent_busy_clear(tp->t_mountp, &tp->t_busy, false);
81 
82 	atomic_dec(&tp->t_mountp->m_active_trans);
83 	if (!(tp->t_flags & XFS_TRANS_NO_WRITECOUNT))
84 		sb_end_intwrite(tp->t_mountp->m_super);
85 	xfs_trans_free_dqinfo(tp);
86 	kmem_zone_free(xfs_trans_zone, tp);
87 }
88 
89 /*
90  * This is called to create a new transaction which will share the
91  * permanent log reservation of the given transaction.  The remaining
92  * unused block and rt extent reservations are also inherited.  This
93  * implies that the original transaction is no longer allowed to allocate
94  * blocks.  Locks and log items, however, are no inherited.  They must
95  * be added to the new transaction explicitly.
96  */
97 STATIC xfs_trans_t *
98 xfs_trans_dup(
99 	xfs_trans_t	*tp)
100 {
101 	xfs_trans_t	*ntp;
102 
103 	ntp = kmem_zone_zalloc(xfs_trans_zone, KM_SLEEP);
104 
105 	/*
106 	 * Initialize the new transaction structure.
107 	 */
108 	ntp->t_magic = XFS_TRANS_HEADER_MAGIC;
109 	ntp->t_mountp = tp->t_mountp;
110 	INIT_LIST_HEAD(&ntp->t_items);
111 	INIT_LIST_HEAD(&ntp->t_busy);
112 
113 	ASSERT(tp->t_flags & XFS_TRANS_PERM_LOG_RES);
114 	ASSERT(tp->t_ticket != NULL);
115 
116 	ntp->t_flags = XFS_TRANS_PERM_LOG_RES |
117 		       (tp->t_flags & XFS_TRANS_RESERVE) |
118 		       (tp->t_flags & XFS_TRANS_NO_WRITECOUNT);
119 	/* We gave our writer reference to the new transaction */
120 	tp->t_flags |= XFS_TRANS_NO_WRITECOUNT;
121 	ntp->t_ticket = xfs_log_ticket_get(tp->t_ticket);
122 	ntp->t_blk_res = tp->t_blk_res - tp->t_blk_res_used;
123 	tp->t_blk_res = tp->t_blk_res_used;
124 	ntp->t_rtx_res = tp->t_rtx_res - tp->t_rtx_res_used;
125 	tp->t_rtx_res = tp->t_rtx_res_used;
126 	ntp->t_pflags = tp->t_pflags;
127 
128 	xfs_trans_dup_dqinfo(tp, ntp);
129 
130 	atomic_inc(&tp->t_mountp->m_active_trans);
131 	return ntp;
132 }
133 
134 /*
135  * This is called to reserve free disk blocks and log space for the
136  * given transaction.  This must be done before allocating any resources
137  * within the transaction.
138  *
139  * This will return ENOSPC if there are not enough blocks available.
140  * It will sleep waiting for available log space.
141  * The only valid value for the flags parameter is XFS_RES_LOG_PERM, which
142  * is used by long running transactions.  If any one of the reservations
143  * fails then they will all be backed out.
144  *
145  * This does not do quota reservations. That typically is done by the
146  * caller afterwards.
147  */
148 static int
149 xfs_trans_reserve(
150 	struct xfs_trans	*tp,
151 	struct xfs_trans_res	*resp,
152 	uint			blocks,
153 	uint			rtextents)
154 {
155 	int		error = 0;
156 	bool		rsvd = (tp->t_flags & XFS_TRANS_RESERVE) != 0;
157 
158 	/* Mark this thread as being in a transaction */
159 	current_set_flags_nested(&tp->t_pflags, PF_MEMALLOC_NOFS);
160 
161 	/*
162 	 * Attempt to reserve the needed disk blocks by decrementing
163 	 * the number needed from the number available.  This will
164 	 * fail if the count would go below zero.
165 	 */
166 	if (blocks > 0) {
167 		error = xfs_mod_fdblocks(tp->t_mountp, -((int64_t)blocks), rsvd);
168 		if (error != 0) {
169 			current_restore_flags_nested(&tp->t_pflags, PF_MEMALLOC_NOFS);
170 			return -ENOSPC;
171 		}
172 		tp->t_blk_res += blocks;
173 	}
174 
175 	/*
176 	 * Reserve the log space needed for this transaction.
177 	 */
178 	if (resp->tr_logres > 0) {
179 		bool	permanent = false;
180 
181 		ASSERT(tp->t_log_res == 0 ||
182 		       tp->t_log_res == resp->tr_logres);
183 		ASSERT(tp->t_log_count == 0 ||
184 		       tp->t_log_count == resp->tr_logcount);
185 
186 		if (resp->tr_logflags & XFS_TRANS_PERM_LOG_RES) {
187 			tp->t_flags |= XFS_TRANS_PERM_LOG_RES;
188 			permanent = true;
189 		} else {
190 			ASSERT(tp->t_ticket == NULL);
191 			ASSERT(!(tp->t_flags & XFS_TRANS_PERM_LOG_RES));
192 		}
193 
194 		if (tp->t_ticket != NULL) {
195 			ASSERT(resp->tr_logflags & XFS_TRANS_PERM_LOG_RES);
196 			error = xfs_log_regrant(tp->t_mountp, tp->t_ticket);
197 		} else {
198 			error = xfs_log_reserve(tp->t_mountp,
199 						resp->tr_logres,
200 						resp->tr_logcount,
201 						&tp->t_ticket, XFS_TRANSACTION,
202 						permanent);
203 		}
204 
205 		if (error)
206 			goto undo_blocks;
207 
208 		tp->t_log_res = resp->tr_logres;
209 		tp->t_log_count = resp->tr_logcount;
210 	}
211 
212 	/*
213 	 * Attempt to reserve the needed realtime extents by decrementing
214 	 * the number needed from the number available.  This will
215 	 * fail if the count would go below zero.
216 	 */
217 	if (rtextents > 0) {
218 		error = xfs_mod_frextents(tp->t_mountp, -((int64_t)rtextents));
219 		if (error) {
220 			error = -ENOSPC;
221 			goto undo_log;
222 		}
223 		tp->t_rtx_res += rtextents;
224 	}
225 
226 	return 0;
227 
228 	/*
229 	 * Error cases jump to one of these labels to undo any
230 	 * reservations which have already been performed.
231 	 */
232 undo_log:
233 	if (resp->tr_logres > 0) {
234 		xfs_log_done(tp->t_mountp, tp->t_ticket, NULL, false);
235 		tp->t_ticket = NULL;
236 		tp->t_log_res = 0;
237 		tp->t_flags &= ~XFS_TRANS_PERM_LOG_RES;
238 	}
239 
240 undo_blocks:
241 	if (blocks > 0) {
242 		xfs_mod_fdblocks(tp->t_mountp, (int64_t)blocks, rsvd);
243 		tp->t_blk_res = 0;
244 	}
245 
246 	current_restore_flags_nested(&tp->t_pflags, PF_MEMALLOC_NOFS);
247 
248 	return error;
249 }
250 
251 int
252 xfs_trans_alloc(
253 	struct xfs_mount	*mp,
254 	struct xfs_trans_res	*resp,
255 	uint			blocks,
256 	uint			rtextents,
257 	uint			flags,
258 	struct xfs_trans	**tpp)
259 {
260 	struct xfs_trans	*tp;
261 	int			error;
262 
263 	if (!(flags & XFS_TRANS_NO_WRITECOUNT))
264 		sb_start_intwrite(mp->m_super);
265 
266 	WARN_ON(mp->m_super->s_writers.frozen == SB_FREEZE_COMPLETE);
267 	atomic_inc(&mp->m_active_trans);
268 
269 	tp = kmem_zone_zalloc(xfs_trans_zone,
270 		(flags & XFS_TRANS_NOFS) ? KM_NOFS : KM_SLEEP);
271 	tp->t_magic = XFS_TRANS_HEADER_MAGIC;
272 	tp->t_flags = flags;
273 	tp->t_mountp = mp;
274 	INIT_LIST_HEAD(&tp->t_items);
275 	INIT_LIST_HEAD(&tp->t_busy);
276 
277 	error = xfs_trans_reserve(tp, resp, blocks, rtextents);
278 	if (error) {
279 		xfs_trans_cancel(tp);
280 		return error;
281 	}
282 
283 	*tpp = tp;
284 	return 0;
285 }
286 
287 /*
288  * Create an empty transaction with no reservation.  This is a defensive
289  * mechanism for routines that query metadata without actually modifying
290  * them -- if the metadata being queried is somehow cross-linked (think a
291  * btree block pointer that points higher in the tree), we risk deadlock.
292  * However, blocks grabbed as part of a transaction can be re-grabbed.
293  * The verifiers will notice the corrupt block and the operation will fail
294  * back to userspace without deadlocking.
295  *
296  * Note the zero-length reservation; this transaction MUST be cancelled
297  * without any dirty data.
298  */
299 int
300 xfs_trans_alloc_empty(
301 	struct xfs_mount		*mp,
302 	struct xfs_trans		**tpp)
303 {
304 	struct xfs_trans_res		resv = {0};
305 
306 	return xfs_trans_alloc(mp, &resv, 0, 0, XFS_TRANS_NO_WRITECOUNT, tpp);
307 }
308 
309 /*
310  * Record the indicated change to the given field for application
311  * to the file system's superblock when the transaction commits.
312  * For now, just store the change in the transaction structure.
313  *
314  * Mark the transaction structure to indicate that the superblock
315  * needs to be updated before committing.
316  *
317  * Because we may not be keeping track of allocated/free inodes and
318  * used filesystem blocks in the superblock, we do not mark the
319  * superblock dirty in this transaction if we modify these fields.
320  * We still need to update the transaction deltas so that they get
321  * applied to the incore superblock, but we don't want them to
322  * cause the superblock to get locked and logged if these are the
323  * only fields in the superblock that the transaction modifies.
324  */
325 void
326 xfs_trans_mod_sb(
327 	xfs_trans_t	*tp,
328 	uint		field,
329 	int64_t		delta)
330 {
331 	uint32_t	flags = (XFS_TRANS_DIRTY|XFS_TRANS_SB_DIRTY);
332 	xfs_mount_t	*mp = tp->t_mountp;
333 
334 	switch (field) {
335 	case XFS_TRANS_SB_ICOUNT:
336 		tp->t_icount_delta += delta;
337 		if (xfs_sb_version_haslazysbcount(&mp->m_sb))
338 			flags &= ~XFS_TRANS_SB_DIRTY;
339 		break;
340 	case XFS_TRANS_SB_IFREE:
341 		tp->t_ifree_delta += delta;
342 		if (xfs_sb_version_haslazysbcount(&mp->m_sb))
343 			flags &= ~XFS_TRANS_SB_DIRTY;
344 		break;
345 	case XFS_TRANS_SB_FDBLOCKS:
346 		/*
347 		 * Track the number of blocks allocated in the
348 		 * transaction.  Make sure it does not exceed the
349 		 * number reserved.
350 		 */
351 		if (delta < 0) {
352 			tp->t_blk_res_used += (uint)-delta;
353 			ASSERT(tp->t_blk_res_used <= tp->t_blk_res);
354 		}
355 		tp->t_fdblocks_delta += delta;
356 		if (xfs_sb_version_haslazysbcount(&mp->m_sb))
357 			flags &= ~XFS_TRANS_SB_DIRTY;
358 		break;
359 	case XFS_TRANS_SB_RES_FDBLOCKS:
360 		/*
361 		 * The allocation has already been applied to the
362 		 * in-core superblock's counter.  This should only
363 		 * be applied to the on-disk superblock.
364 		 */
365 		tp->t_res_fdblocks_delta += delta;
366 		if (xfs_sb_version_haslazysbcount(&mp->m_sb))
367 			flags &= ~XFS_TRANS_SB_DIRTY;
368 		break;
369 	case XFS_TRANS_SB_FREXTENTS:
370 		/*
371 		 * Track the number of blocks allocated in the
372 		 * transaction.  Make sure it does not exceed the
373 		 * number reserved.
374 		 */
375 		if (delta < 0) {
376 			tp->t_rtx_res_used += (uint)-delta;
377 			ASSERT(tp->t_rtx_res_used <= tp->t_rtx_res);
378 		}
379 		tp->t_frextents_delta += delta;
380 		break;
381 	case XFS_TRANS_SB_RES_FREXTENTS:
382 		/*
383 		 * The allocation has already been applied to the
384 		 * in-core superblock's counter.  This should only
385 		 * be applied to the on-disk superblock.
386 		 */
387 		ASSERT(delta < 0);
388 		tp->t_res_frextents_delta += delta;
389 		break;
390 	case XFS_TRANS_SB_DBLOCKS:
391 		ASSERT(delta > 0);
392 		tp->t_dblocks_delta += delta;
393 		break;
394 	case XFS_TRANS_SB_AGCOUNT:
395 		ASSERT(delta > 0);
396 		tp->t_agcount_delta += delta;
397 		break;
398 	case XFS_TRANS_SB_IMAXPCT:
399 		tp->t_imaxpct_delta += delta;
400 		break;
401 	case XFS_TRANS_SB_REXTSIZE:
402 		tp->t_rextsize_delta += delta;
403 		break;
404 	case XFS_TRANS_SB_RBMBLOCKS:
405 		tp->t_rbmblocks_delta += delta;
406 		break;
407 	case XFS_TRANS_SB_RBLOCKS:
408 		tp->t_rblocks_delta += delta;
409 		break;
410 	case XFS_TRANS_SB_REXTENTS:
411 		tp->t_rextents_delta += delta;
412 		break;
413 	case XFS_TRANS_SB_REXTSLOG:
414 		tp->t_rextslog_delta += delta;
415 		break;
416 	default:
417 		ASSERT(0);
418 		return;
419 	}
420 
421 	tp->t_flags |= flags;
422 }
423 
424 /*
425  * xfs_trans_apply_sb_deltas() is called from the commit code
426  * to bring the superblock buffer into the current transaction
427  * and modify it as requested by earlier calls to xfs_trans_mod_sb().
428  *
429  * For now we just look at each field allowed to change and change
430  * it if necessary.
431  */
432 STATIC void
433 xfs_trans_apply_sb_deltas(
434 	xfs_trans_t	*tp)
435 {
436 	xfs_dsb_t	*sbp;
437 	xfs_buf_t	*bp;
438 	int		whole = 0;
439 
440 	bp = xfs_trans_getsb(tp, tp->t_mountp, 0);
441 	sbp = XFS_BUF_TO_SBP(bp);
442 
443 	/*
444 	 * Check that superblock mods match the mods made to AGF counters.
445 	 */
446 	ASSERT((tp->t_fdblocks_delta + tp->t_res_fdblocks_delta) ==
447 	       (tp->t_ag_freeblks_delta + tp->t_ag_flist_delta +
448 		tp->t_ag_btree_delta));
449 
450 	/*
451 	 * Only update the superblock counters if we are logging them
452 	 */
453 	if (!xfs_sb_version_haslazysbcount(&(tp->t_mountp->m_sb))) {
454 		if (tp->t_icount_delta)
455 			be64_add_cpu(&sbp->sb_icount, tp->t_icount_delta);
456 		if (tp->t_ifree_delta)
457 			be64_add_cpu(&sbp->sb_ifree, tp->t_ifree_delta);
458 		if (tp->t_fdblocks_delta)
459 			be64_add_cpu(&sbp->sb_fdblocks, tp->t_fdblocks_delta);
460 		if (tp->t_res_fdblocks_delta)
461 			be64_add_cpu(&sbp->sb_fdblocks, tp->t_res_fdblocks_delta);
462 	}
463 
464 	if (tp->t_frextents_delta)
465 		be64_add_cpu(&sbp->sb_frextents, tp->t_frextents_delta);
466 	if (tp->t_res_frextents_delta)
467 		be64_add_cpu(&sbp->sb_frextents, tp->t_res_frextents_delta);
468 
469 	if (tp->t_dblocks_delta) {
470 		be64_add_cpu(&sbp->sb_dblocks, tp->t_dblocks_delta);
471 		whole = 1;
472 	}
473 	if (tp->t_agcount_delta) {
474 		be32_add_cpu(&sbp->sb_agcount, tp->t_agcount_delta);
475 		whole = 1;
476 	}
477 	if (tp->t_imaxpct_delta) {
478 		sbp->sb_imax_pct += tp->t_imaxpct_delta;
479 		whole = 1;
480 	}
481 	if (tp->t_rextsize_delta) {
482 		be32_add_cpu(&sbp->sb_rextsize, tp->t_rextsize_delta);
483 		whole = 1;
484 	}
485 	if (tp->t_rbmblocks_delta) {
486 		be32_add_cpu(&sbp->sb_rbmblocks, tp->t_rbmblocks_delta);
487 		whole = 1;
488 	}
489 	if (tp->t_rblocks_delta) {
490 		be64_add_cpu(&sbp->sb_rblocks, tp->t_rblocks_delta);
491 		whole = 1;
492 	}
493 	if (tp->t_rextents_delta) {
494 		be64_add_cpu(&sbp->sb_rextents, tp->t_rextents_delta);
495 		whole = 1;
496 	}
497 	if (tp->t_rextslog_delta) {
498 		sbp->sb_rextslog += tp->t_rextslog_delta;
499 		whole = 1;
500 	}
501 
502 	xfs_trans_buf_set_type(tp, bp, XFS_BLFT_SB_BUF);
503 	if (whole)
504 		/*
505 		 * Log the whole thing, the fields are noncontiguous.
506 		 */
507 		xfs_trans_log_buf(tp, bp, 0, sizeof(xfs_dsb_t) - 1);
508 	else
509 		/*
510 		 * Since all the modifiable fields are contiguous, we
511 		 * can get away with this.
512 		 */
513 		xfs_trans_log_buf(tp, bp, offsetof(xfs_dsb_t, sb_icount),
514 				  offsetof(xfs_dsb_t, sb_frextents) +
515 				  sizeof(sbp->sb_frextents) - 1);
516 }
517 
518 STATIC int
519 xfs_sb_mod8(
520 	uint8_t			*field,
521 	int8_t			delta)
522 {
523 	int8_t			counter = *field;
524 
525 	counter += delta;
526 	if (counter < 0) {
527 		ASSERT(0);
528 		return -EINVAL;
529 	}
530 	*field = counter;
531 	return 0;
532 }
533 
534 STATIC int
535 xfs_sb_mod32(
536 	uint32_t		*field,
537 	int32_t			delta)
538 {
539 	int32_t			counter = *field;
540 
541 	counter += delta;
542 	if (counter < 0) {
543 		ASSERT(0);
544 		return -EINVAL;
545 	}
546 	*field = counter;
547 	return 0;
548 }
549 
550 STATIC int
551 xfs_sb_mod64(
552 	uint64_t		*field,
553 	int64_t			delta)
554 {
555 	int64_t			counter = *field;
556 
557 	counter += delta;
558 	if (counter < 0) {
559 		ASSERT(0);
560 		return -EINVAL;
561 	}
562 	*field = counter;
563 	return 0;
564 }
565 
566 /*
567  * xfs_trans_unreserve_and_mod_sb() is called to release unused reservations
568  * and apply superblock counter changes to the in-core superblock.  The
569  * t_res_fdblocks_delta and t_res_frextents_delta fields are explicitly NOT
570  * applied to the in-core superblock.  The idea is that that has already been
571  * done.
572  *
573  * If we are not logging superblock counters, then the inode allocated/free and
574  * used block counts are not updated in the on disk superblock. In this case,
575  * XFS_TRANS_SB_DIRTY will not be set when the transaction is updated but we
576  * still need to update the incore superblock with the changes.
577  */
578 void
579 xfs_trans_unreserve_and_mod_sb(
580 	struct xfs_trans	*tp)
581 {
582 	struct xfs_mount	*mp = tp->t_mountp;
583 	bool			rsvd = (tp->t_flags & XFS_TRANS_RESERVE) != 0;
584 	int64_t			blkdelta = 0;
585 	int64_t			rtxdelta = 0;
586 	int64_t			idelta = 0;
587 	int64_t			ifreedelta = 0;
588 	int			error;
589 
590 	/* calculate deltas */
591 	if (tp->t_blk_res > 0)
592 		blkdelta = tp->t_blk_res;
593 	if ((tp->t_fdblocks_delta != 0) &&
594 	    (xfs_sb_version_haslazysbcount(&mp->m_sb) ||
595 	     (tp->t_flags & XFS_TRANS_SB_DIRTY)))
596 	        blkdelta += tp->t_fdblocks_delta;
597 
598 	if (tp->t_rtx_res > 0)
599 		rtxdelta = tp->t_rtx_res;
600 	if ((tp->t_frextents_delta != 0) &&
601 	    (tp->t_flags & XFS_TRANS_SB_DIRTY))
602 		rtxdelta += tp->t_frextents_delta;
603 
604 	if (xfs_sb_version_haslazysbcount(&mp->m_sb) ||
605 	     (tp->t_flags & XFS_TRANS_SB_DIRTY)) {
606 		idelta = tp->t_icount_delta;
607 		ifreedelta = tp->t_ifree_delta;
608 	}
609 
610 	/* apply the per-cpu counters */
611 	if (blkdelta) {
612 		error = xfs_mod_fdblocks(mp, blkdelta, rsvd);
613 		if (error)
614 			goto out;
615 	}
616 
617 	if (idelta) {
618 		error = xfs_mod_icount(mp, idelta);
619 		if (error)
620 			goto out_undo_fdblocks;
621 	}
622 
623 	if (ifreedelta) {
624 		error = xfs_mod_ifree(mp, ifreedelta);
625 		if (error)
626 			goto out_undo_icount;
627 	}
628 
629 	if (rtxdelta == 0 && !(tp->t_flags & XFS_TRANS_SB_DIRTY))
630 		return;
631 
632 	/* apply remaining deltas */
633 	spin_lock(&mp->m_sb_lock);
634 	if (rtxdelta) {
635 		error = xfs_sb_mod64(&mp->m_sb.sb_frextents, rtxdelta);
636 		if (error)
637 			goto out_undo_ifree;
638 	}
639 
640 	if (tp->t_dblocks_delta != 0) {
641 		error = xfs_sb_mod64(&mp->m_sb.sb_dblocks, tp->t_dblocks_delta);
642 		if (error)
643 			goto out_undo_frextents;
644 	}
645 	if (tp->t_agcount_delta != 0) {
646 		error = xfs_sb_mod32(&mp->m_sb.sb_agcount, tp->t_agcount_delta);
647 		if (error)
648 			goto out_undo_dblocks;
649 	}
650 	if (tp->t_imaxpct_delta != 0) {
651 		error = xfs_sb_mod8(&mp->m_sb.sb_imax_pct, tp->t_imaxpct_delta);
652 		if (error)
653 			goto out_undo_agcount;
654 	}
655 	if (tp->t_rextsize_delta != 0) {
656 		error = xfs_sb_mod32(&mp->m_sb.sb_rextsize,
657 				     tp->t_rextsize_delta);
658 		if (error)
659 			goto out_undo_imaxpct;
660 	}
661 	if (tp->t_rbmblocks_delta != 0) {
662 		error = xfs_sb_mod32(&mp->m_sb.sb_rbmblocks,
663 				     tp->t_rbmblocks_delta);
664 		if (error)
665 			goto out_undo_rextsize;
666 	}
667 	if (tp->t_rblocks_delta != 0) {
668 		error = xfs_sb_mod64(&mp->m_sb.sb_rblocks, tp->t_rblocks_delta);
669 		if (error)
670 			goto out_undo_rbmblocks;
671 	}
672 	if (tp->t_rextents_delta != 0) {
673 		error = xfs_sb_mod64(&mp->m_sb.sb_rextents,
674 				     tp->t_rextents_delta);
675 		if (error)
676 			goto out_undo_rblocks;
677 	}
678 	if (tp->t_rextslog_delta != 0) {
679 		error = xfs_sb_mod8(&mp->m_sb.sb_rextslog,
680 				     tp->t_rextslog_delta);
681 		if (error)
682 			goto out_undo_rextents;
683 	}
684 	spin_unlock(&mp->m_sb_lock);
685 	return;
686 
687 out_undo_rextents:
688 	if (tp->t_rextents_delta)
689 		xfs_sb_mod64(&mp->m_sb.sb_rextents, -tp->t_rextents_delta);
690 out_undo_rblocks:
691 	if (tp->t_rblocks_delta)
692 		xfs_sb_mod64(&mp->m_sb.sb_rblocks, -tp->t_rblocks_delta);
693 out_undo_rbmblocks:
694 	if (tp->t_rbmblocks_delta)
695 		xfs_sb_mod32(&mp->m_sb.sb_rbmblocks, -tp->t_rbmblocks_delta);
696 out_undo_rextsize:
697 	if (tp->t_rextsize_delta)
698 		xfs_sb_mod32(&mp->m_sb.sb_rextsize, -tp->t_rextsize_delta);
699 out_undo_imaxpct:
700 	if (tp->t_rextsize_delta)
701 		xfs_sb_mod8(&mp->m_sb.sb_imax_pct, -tp->t_imaxpct_delta);
702 out_undo_agcount:
703 	if (tp->t_agcount_delta)
704 		xfs_sb_mod32(&mp->m_sb.sb_agcount, -tp->t_agcount_delta);
705 out_undo_dblocks:
706 	if (tp->t_dblocks_delta)
707 		xfs_sb_mod64(&mp->m_sb.sb_dblocks, -tp->t_dblocks_delta);
708 out_undo_frextents:
709 	if (rtxdelta)
710 		xfs_sb_mod64(&mp->m_sb.sb_frextents, -rtxdelta);
711 out_undo_ifree:
712 	spin_unlock(&mp->m_sb_lock);
713 	if (ifreedelta)
714 		xfs_mod_ifree(mp, -ifreedelta);
715 out_undo_icount:
716 	if (idelta)
717 		xfs_mod_icount(mp, -idelta);
718 out_undo_fdblocks:
719 	if (blkdelta)
720 		xfs_mod_fdblocks(mp, -blkdelta, rsvd);
721 out:
722 	ASSERT(error == 0);
723 	return;
724 }
725 
726 /*
727  * Add the given log item to the transaction's list of log items.
728  *
729  * The log item will now point to its new descriptor with its li_desc field.
730  */
731 void
732 xfs_trans_add_item(
733 	struct xfs_trans	*tp,
734 	struct xfs_log_item	*lip)
735 {
736 	struct xfs_log_item_desc *lidp;
737 
738 	ASSERT(lip->li_mountp == tp->t_mountp);
739 	ASSERT(lip->li_ailp == tp->t_mountp->m_ail);
740 
741 	lidp = kmem_zone_zalloc(xfs_log_item_desc_zone, KM_SLEEP | KM_NOFS);
742 
743 	lidp->lid_item = lip;
744 	lidp->lid_flags = 0;
745 	list_add_tail(&lidp->lid_trans, &tp->t_items);
746 
747 	lip->li_desc = lidp;
748 }
749 
750 STATIC void
751 xfs_trans_free_item_desc(
752 	struct xfs_log_item_desc *lidp)
753 {
754 	list_del_init(&lidp->lid_trans);
755 	kmem_zone_free(xfs_log_item_desc_zone, lidp);
756 }
757 
758 /*
759  * Unlink and free the given descriptor.
760  */
761 void
762 xfs_trans_del_item(
763 	struct xfs_log_item	*lip)
764 {
765 	xfs_trans_free_item_desc(lip->li_desc);
766 	lip->li_desc = NULL;
767 }
768 
769 /*
770  * Unlock all of the items of a transaction and free all the descriptors
771  * of that transaction.
772  */
773 void
774 xfs_trans_free_items(
775 	struct xfs_trans	*tp,
776 	xfs_lsn_t		commit_lsn,
777 	bool			abort)
778 {
779 	struct xfs_log_item_desc *lidp, *next;
780 
781 	list_for_each_entry_safe(lidp, next, &tp->t_items, lid_trans) {
782 		struct xfs_log_item	*lip = lidp->lid_item;
783 
784 		lip->li_desc = NULL;
785 
786 		if (commit_lsn != NULLCOMMITLSN)
787 			lip->li_ops->iop_committing(lip, commit_lsn);
788 		if (abort)
789 			lip->li_flags |= XFS_LI_ABORTED;
790 		lip->li_ops->iop_unlock(lip);
791 
792 		xfs_trans_free_item_desc(lidp);
793 	}
794 }
795 
796 static inline void
797 xfs_log_item_batch_insert(
798 	struct xfs_ail		*ailp,
799 	struct xfs_ail_cursor	*cur,
800 	struct xfs_log_item	**log_items,
801 	int			nr_items,
802 	xfs_lsn_t		commit_lsn)
803 {
804 	int	i;
805 
806 	spin_lock(&ailp->xa_lock);
807 	/* xfs_trans_ail_update_bulk drops ailp->xa_lock */
808 	xfs_trans_ail_update_bulk(ailp, cur, log_items, nr_items, commit_lsn);
809 
810 	for (i = 0; i < nr_items; i++) {
811 		struct xfs_log_item *lip = log_items[i];
812 
813 		lip->li_ops->iop_unpin(lip, 0);
814 	}
815 }
816 
817 /*
818  * Bulk operation version of xfs_trans_committed that takes a log vector of
819  * items to insert into the AIL. This uses bulk AIL insertion techniques to
820  * minimise lock traffic.
821  *
822  * If we are called with the aborted flag set, it is because a log write during
823  * a CIL checkpoint commit has failed. In this case, all the items in the
824  * checkpoint have already gone through iop_commited and iop_unlock, which
825  * means that checkpoint commit abort handling is treated exactly the same
826  * as an iclog write error even though we haven't started any IO yet. Hence in
827  * this case all we need to do is iop_committed processing, followed by an
828  * iop_unpin(aborted) call.
829  *
830  * The AIL cursor is used to optimise the insert process. If commit_lsn is not
831  * at the end of the AIL, the insert cursor avoids the need to walk
832  * the AIL to find the insertion point on every xfs_log_item_batch_insert()
833  * call. This saves a lot of needless list walking and is a net win, even
834  * though it slightly increases that amount of AIL lock traffic to set it up
835  * and tear it down.
836  */
837 void
838 xfs_trans_committed_bulk(
839 	struct xfs_ail		*ailp,
840 	struct xfs_log_vec	*log_vector,
841 	xfs_lsn_t		commit_lsn,
842 	int			aborted)
843 {
844 #define LOG_ITEM_BATCH_SIZE	32
845 	struct xfs_log_item	*log_items[LOG_ITEM_BATCH_SIZE];
846 	struct xfs_log_vec	*lv;
847 	struct xfs_ail_cursor	cur;
848 	int			i = 0;
849 
850 	spin_lock(&ailp->xa_lock);
851 	xfs_trans_ail_cursor_last(ailp, &cur, commit_lsn);
852 	spin_unlock(&ailp->xa_lock);
853 
854 	/* unpin all the log items */
855 	for (lv = log_vector; lv; lv = lv->lv_next ) {
856 		struct xfs_log_item	*lip = lv->lv_item;
857 		xfs_lsn_t		item_lsn;
858 
859 		if (aborted)
860 			lip->li_flags |= XFS_LI_ABORTED;
861 		item_lsn = lip->li_ops->iop_committed(lip, commit_lsn);
862 
863 		/* item_lsn of -1 means the item needs no further processing */
864 		if (XFS_LSN_CMP(item_lsn, (xfs_lsn_t)-1) == 0)
865 			continue;
866 
867 		/*
868 		 * if we are aborting the operation, no point in inserting the
869 		 * object into the AIL as we are in a shutdown situation.
870 		 */
871 		if (aborted) {
872 			ASSERT(XFS_FORCED_SHUTDOWN(ailp->xa_mount));
873 			lip->li_ops->iop_unpin(lip, 1);
874 			continue;
875 		}
876 
877 		if (item_lsn != commit_lsn) {
878 
879 			/*
880 			 * Not a bulk update option due to unusual item_lsn.
881 			 * Push into AIL immediately, rechecking the lsn once
882 			 * we have the ail lock. Then unpin the item. This does
883 			 * not affect the AIL cursor the bulk insert path is
884 			 * using.
885 			 */
886 			spin_lock(&ailp->xa_lock);
887 			if (XFS_LSN_CMP(item_lsn, lip->li_lsn) > 0)
888 				xfs_trans_ail_update(ailp, lip, item_lsn);
889 			else
890 				spin_unlock(&ailp->xa_lock);
891 			lip->li_ops->iop_unpin(lip, 0);
892 			continue;
893 		}
894 
895 		/* Item is a candidate for bulk AIL insert.  */
896 		log_items[i++] = lv->lv_item;
897 		if (i >= LOG_ITEM_BATCH_SIZE) {
898 			xfs_log_item_batch_insert(ailp, &cur, log_items,
899 					LOG_ITEM_BATCH_SIZE, commit_lsn);
900 			i = 0;
901 		}
902 	}
903 
904 	/* make sure we insert the remainder! */
905 	if (i)
906 		xfs_log_item_batch_insert(ailp, &cur, log_items, i, commit_lsn);
907 
908 	spin_lock(&ailp->xa_lock);
909 	xfs_trans_ail_cursor_done(&cur);
910 	spin_unlock(&ailp->xa_lock);
911 }
912 
913 /*
914  * Commit the given transaction to the log.
915  *
916  * XFS disk error handling mechanism is not based on a typical
917  * transaction abort mechanism. Logically after the filesystem
918  * gets marked 'SHUTDOWN', we can't let any new transactions
919  * be durable - ie. committed to disk - because some metadata might
920  * be inconsistent. In such cases, this returns an error, and the
921  * caller may assume that all locked objects joined to the transaction
922  * have already been unlocked as if the commit had succeeded.
923  * Do not reference the transaction structure after this call.
924  */
925 static int
926 __xfs_trans_commit(
927 	struct xfs_trans	*tp,
928 	bool			regrant)
929 {
930 	struct xfs_mount	*mp = tp->t_mountp;
931 	xfs_lsn_t		commit_lsn = -1;
932 	int			error = 0;
933 	int			sync = tp->t_flags & XFS_TRANS_SYNC;
934 
935 	/*
936 	 * If there is nothing to be logged by the transaction,
937 	 * then unlock all of the items associated with the
938 	 * transaction and free the transaction structure.
939 	 * Also make sure to return any reserved blocks to
940 	 * the free pool.
941 	 */
942 	if (!(tp->t_flags & XFS_TRANS_DIRTY))
943 		goto out_unreserve;
944 
945 	if (XFS_FORCED_SHUTDOWN(mp)) {
946 		error = -EIO;
947 		goto out_unreserve;
948 	}
949 
950 	ASSERT(tp->t_ticket != NULL);
951 
952 	/*
953 	 * If we need to update the superblock, then do it now.
954 	 */
955 	if (tp->t_flags & XFS_TRANS_SB_DIRTY)
956 		xfs_trans_apply_sb_deltas(tp);
957 	xfs_trans_apply_dquot_deltas(tp);
958 
959 	xfs_log_commit_cil(mp, tp, &commit_lsn, regrant);
960 
961 	current_restore_flags_nested(&tp->t_pflags, PF_MEMALLOC_NOFS);
962 	xfs_trans_free(tp);
963 
964 	/*
965 	 * If the transaction needs to be synchronous, then force the
966 	 * log out now and wait for it.
967 	 */
968 	if (sync) {
969 		error = _xfs_log_force_lsn(mp, commit_lsn, XFS_LOG_SYNC, NULL);
970 		XFS_STATS_INC(mp, xs_trans_sync);
971 	} else {
972 		XFS_STATS_INC(mp, xs_trans_async);
973 	}
974 
975 	return error;
976 
977 out_unreserve:
978 	xfs_trans_unreserve_and_mod_sb(tp);
979 
980 	/*
981 	 * It is indeed possible for the transaction to be not dirty but
982 	 * the dqinfo portion to be.  All that means is that we have some
983 	 * (non-persistent) quota reservations that need to be unreserved.
984 	 */
985 	xfs_trans_unreserve_and_mod_dquots(tp);
986 	if (tp->t_ticket) {
987 		commit_lsn = xfs_log_done(mp, tp->t_ticket, NULL, regrant);
988 		if (commit_lsn == -1 && !error)
989 			error = -EIO;
990 	}
991 	current_restore_flags_nested(&tp->t_pflags, PF_MEMALLOC_NOFS);
992 	xfs_trans_free_items(tp, NULLCOMMITLSN, !!error);
993 	xfs_trans_free(tp);
994 
995 	XFS_STATS_INC(mp, xs_trans_empty);
996 	return error;
997 }
998 
999 int
1000 xfs_trans_commit(
1001 	struct xfs_trans	*tp)
1002 {
1003 	return __xfs_trans_commit(tp, false);
1004 }
1005 
1006 /*
1007  * Unlock all of the transaction's items and free the transaction.
1008  * The transaction must not have modified any of its items, because
1009  * there is no way to restore them to their previous state.
1010  *
1011  * If the transaction has made a log reservation, make sure to release
1012  * it as well.
1013  */
1014 void
1015 xfs_trans_cancel(
1016 	struct xfs_trans	*tp)
1017 {
1018 	struct xfs_mount	*mp = tp->t_mountp;
1019 	bool			dirty = (tp->t_flags & XFS_TRANS_DIRTY);
1020 
1021 	/*
1022 	 * See if the caller is relying on us to shut down the
1023 	 * filesystem.  This happens in paths where we detect
1024 	 * corruption and decide to give up.
1025 	 */
1026 	if (dirty && !XFS_FORCED_SHUTDOWN(mp)) {
1027 		XFS_ERROR_REPORT("xfs_trans_cancel", XFS_ERRLEVEL_LOW, mp);
1028 		xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
1029 	}
1030 #ifdef DEBUG
1031 	if (!dirty && !XFS_FORCED_SHUTDOWN(mp)) {
1032 		struct xfs_log_item_desc *lidp;
1033 
1034 		list_for_each_entry(lidp, &tp->t_items, lid_trans)
1035 			ASSERT(!(lidp->lid_item->li_type == XFS_LI_EFD));
1036 	}
1037 #endif
1038 	xfs_trans_unreserve_and_mod_sb(tp);
1039 	xfs_trans_unreserve_and_mod_dquots(tp);
1040 
1041 	if (tp->t_ticket)
1042 		xfs_log_done(mp, tp->t_ticket, NULL, false);
1043 
1044 	/* mark this thread as no longer being in a transaction */
1045 	current_restore_flags_nested(&tp->t_pflags, PF_MEMALLOC_NOFS);
1046 
1047 	xfs_trans_free_items(tp, NULLCOMMITLSN, dirty);
1048 	xfs_trans_free(tp);
1049 }
1050 
1051 /*
1052  * Roll from one trans in the sequence of PERMANENT transactions to
1053  * the next: permanent transactions are only flushed out when
1054  * committed with xfs_trans_commit(), but we still want as soon
1055  * as possible to let chunks of it go to the log. So we commit the
1056  * chunk we've been working on and get a new transaction to continue.
1057  */
1058 int
1059 xfs_trans_roll(
1060 	struct xfs_trans	**tpp)
1061 {
1062 	struct xfs_trans	*trans = *tpp;
1063 	struct xfs_trans_res	tres;
1064 	int			error;
1065 
1066 	/*
1067 	 * Copy the critical parameters from one trans to the next.
1068 	 */
1069 	tres.tr_logres = trans->t_log_res;
1070 	tres.tr_logcount = trans->t_log_count;
1071 
1072 	*tpp = xfs_trans_dup(trans);
1073 
1074 	/*
1075 	 * Commit the current transaction.
1076 	 * If this commit failed, then it'd just unlock those items that
1077 	 * are not marked ihold. That also means that a filesystem shutdown
1078 	 * is in progress. The caller takes the responsibility to cancel
1079 	 * the duplicate transaction that gets returned.
1080 	 */
1081 	error = __xfs_trans_commit(trans, true);
1082 	if (error)
1083 		return error;
1084 
1085 	/*
1086 	 * Reserve space in the log for the next transaction.
1087 	 * This also pushes items in the "AIL", the list of logged items,
1088 	 * out to disk if they are taking up space at the tail of the log
1089 	 * that we want to use.  This requires that either nothing be locked
1090 	 * across this call, or that anything that is locked be logged in
1091 	 * the prior and the next transactions.
1092 	 */
1093 	tres.tr_logflags = XFS_TRANS_PERM_LOG_RES;
1094 	return xfs_trans_reserve(*tpp, &tres, 0, 0);
1095 }
1096