xref: /openbmc/linux/fs/xfs/xfs_trans.c (revision 0edbfea5)
1 /*
2  * Copyright (c) 2000-2003,2005 Silicon Graphics, Inc.
3  * Copyright (C) 2010 Red Hat, Inc.
4  * All Rights Reserved.
5  *
6  * This program is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU General Public License as
8  * published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it would be useful,
11  * but WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13  * GNU General Public License for more details.
14  *
15  * You should have received a copy of the GNU General Public License
16  * along with this program; if not, write the Free Software Foundation,
17  * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
18  */
19 #include "xfs.h"
20 #include "xfs_fs.h"
21 #include "xfs_shared.h"
22 #include "xfs_format.h"
23 #include "xfs_log_format.h"
24 #include "xfs_trans_resv.h"
25 #include "xfs_mount.h"
26 #include "xfs_inode.h"
27 #include "xfs_extent_busy.h"
28 #include "xfs_quota.h"
29 #include "xfs_trans.h"
30 #include "xfs_trans_priv.h"
31 #include "xfs_log.h"
32 #include "xfs_trace.h"
33 #include "xfs_error.h"
34 
35 kmem_zone_t	*xfs_trans_zone;
36 kmem_zone_t	*xfs_log_item_desc_zone;
37 
38 /*
39  * Initialize the precomputed transaction reservation values
40  * in the mount structure.
41  */
42 void
43 xfs_trans_init(
44 	struct xfs_mount	*mp)
45 {
46 	xfs_trans_resv_calc(mp, M_RES(mp));
47 }
48 
49 /*
50  * Free the transaction structure.  If there is more clean up
51  * to do when the structure is freed, add it here.
52  */
53 STATIC void
54 xfs_trans_free(
55 	struct xfs_trans	*tp)
56 {
57 	xfs_extent_busy_sort(&tp->t_busy);
58 	xfs_extent_busy_clear(tp->t_mountp, &tp->t_busy, false);
59 
60 	atomic_dec(&tp->t_mountp->m_active_trans);
61 	if (!(tp->t_flags & XFS_TRANS_NO_WRITECOUNT))
62 		sb_end_intwrite(tp->t_mountp->m_super);
63 	xfs_trans_free_dqinfo(tp);
64 	kmem_zone_free(xfs_trans_zone, tp);
65 }
66 
67 /*
68  * This is called to create a new transaction which will share the
69  * permanent log reservation of the given transaction.  The remaining
70  * unused block and rt extent reservations are also inherited.  This
71  * implies that the original transaction is no longer allowed to allocate
72  * blocks.  Locks and log items, however, are no inherited.  They must
73  * be added to the new transaction explicitly.
74  */
75 STATIC xfs_trans_t *
76 xfs_trans_dup(
77 	xfs_trans_t	*tp)
78 {
79 	xfs_trans_t	*ntp;
80 
81 	ntp = kmem_zone_zalloc(xfs_trans_zone, KM_SLEEP);
82 
83 	/*
84 	 * Initialize the new transaction structure.
85 	 */
86 	ntp->t_magic = XFS_TRANS_HEADER_MAGIC;
87 	ntp->t_mountp = tp->t_mountp;
88 	INIT_LIST_HEAD(&ntp->t_items);
89 	INIT_LIST_HEAD(&ntp->t_busy);
90 
91 	ASSERT(tp->t_flags & XFS_TRANS_PERM_LOG_RES);
92 	ASSERT(tp->t_ticket != NULL);
93 
94 	ntp->t_flags = XFS_TRANS_PERM_LOG_RES |
95 		       (tp->t_flags & XFS_TRANS_RESERVE) |
96 		       (tp->t_flags & XFS_TRANS_NO_WRITECOUNT);
97 	/* We gave our writer reference to the new transaction */
98 	tp->t_flags |= XFS_TRANS_NO_WRITECOUNT;
99 	ntp->t_ticket = xfs_log_ticket_get(tp->t_ticket);
100 	ntp->t_blk_res = tp->t_blk_res - tp->t_blk_res_used;
101 	tp->t_blk_res = tp->t_blk_res_used;
102 	ntp->t_rtx_res = tp->t_rtx_res - tp->t_rtx_res_used;
103 	tp->t_rtx_res = tp->t_rtx_res_used;
104 	ntp->t_pflags = tp->t_pflags;
105 
106 	xfs_trans_dup_dqinfo(tp, ntp);
107 
108 	atomic_inc(&tp->t_mountp->m_active_trans);
109 	return ntp;
110 }
111 
112 /*
113  * This is called to reserve free disk blocks and log space for the
114  * given transaction.  This must be done before allocating any resources
115  * within the transaction.
116  *
117  * This will return ENOSPC if there are not enough blocks available.
118  * It will sleep waiting for available log space.
119  * The only valid value for the flags parameter is XFS_RES_LOG_PERM, which
120  * is used by long running transactions.  If any one of the reservations
121  * fails then they will all be backed out.
122  *
123  * This does not do quota reservations. That typically is done by the
124  * caller afterwards.
125  */
126 static int
127 xfs_trans_reserve(
128 	struct xfs_trans	*tp,
129 	struct xfs_trans_res	*resp,
130 	uint			blocks,
131 	uint			rtextents)
132 {
133 	int		error = 0;
134 	bool		rsvd = (tp->t_flags & XFS_TRANS_RESERVE) != 0;
135 
136 	/* Mark this thread as being in a transaction */
137 	current_set_flags_nested(&tp->t_pflags, PF_FSTRANS);
138 
139 	/*
140 	 * Attempt to reserve the needed disk blocks by decrementing
141 	 * the number needed from the number available.  This will
142 	 * fail if the count would go below zero.
143 	 */
144 	if (blocks > 0) {
145 		error = xfs_mod_fdblocks(tp->t_mountp, -((int64_t)blocks), rsvd);
146 		if (error != 0) {
147 			current_restore_flags_nested(&tp->t_pflags, PF_FSTRANS);
148 			return -ENOSPC;
149 		}
150 		tp->t_blk_res += blocks;
151 	}
152 
153 	/*
154 	 * Reserve the log space needed for this transaction.
155 	 */
156 	if (resp->tr_logres > 0) {
157 		bool	permanent = false;
158 
159 		ASSERT(tp->t_log_res == 0 ||
160 		       tp->t_log_res == resp->tr_logres);
161 		ASSERT(tp->t_log_count == 0 ||
162 		       tp->t_log_count == resp->tr_logcount);
163 
164 		if (resp->tr_logflags & XFS_TRANS_PERM_LOG_RES) {
165 			tp->t_flags |= XFS_TRANS_PERM_LOG_RES;
166 			permanent = true;
167 		} else {
168 			ASSERT(tp->t_ticket == NULL);
169 			ASSERT(!(tp->t_flags & XFS_TRANS_PERM_LOG_RES));
170 		}
171 
172 		if (tp->t_ticket != NULL) {
173 			ASSERT(resp->tr_logflags & XFS_TRANS_PERM_LOG_RES);
174 			error = xfs_log_regrant(tp->t_mountp, tp->t_ticket);
175 		} else {
176 			error = xfs_log_reserve(tp->t_mountp,
177 						resp->tr_logres,
178 						resp->tr_logcount,
179 						&tp->t_ticket, XFS_TRANSACTION,
180 						permanent);
181 		}
182 
183 		if (error)
184 			goto undo_blocks;
185 
186 		tp->t_log_res = resp->tr_logres;
187 		tp->t_log_count = resp->tr_logcount;
188 	}
189 
190 	/*
191 	 * Attempt to reserve the needed realtime extents by decrementing
192 	 * the number needed from the number available.  This will
193 	 * fail if the count would go below zero.
194 	 */
195 	if (rtextents > 0) {
196 		error = xfs_mod_frextents(tp->t_mountp, -((int64_t)rtextents));
197 		if (error) {
198 			error = -ENOSPC;
199 			goto undo_log;
200 		}
201 		tp->t_rtx_res += rtextents;
202 	}
203 
204 	return 0;
205 
206 	/*
207 	 * Error cases jump to one of these labels to undo any
208 	 * reservations which have already been performed.
209 	 */
210 undo_log:
211 	if (resp->tr_logres > 0) {
212 		xfs_log_done(tp->t_mountp, tp->t_ticket, NULL, false);
213 		tp->t_ticket = NULL;
214 		tp->t_log_res = 0;
215 		tp->t_flags &= ~XFS_TRANS_PERM_LOG_RES;
216 	}
217 
218 undo_blocks:
219 	if (blocks > 0) {
220 		xfs_mod_fdblocks(tp->t_mountp, -((int64_t)blocks), rsvd);
221 		tp->t_blk_res = 0;
222 	}
223 
224 	current_restore_flags_nested(&tp->t_pflags, PF_FSTRANS);
225 
226 	return error;
227 }
228 
229 int
230 xfs_trans_alloc(
231 	struct xfs_mount	*mp,
232 	struct xfs_trans_res	*resp,
233 	uint			blocks,
234 	uint			rtextents,
235 	uint			flags,
236 	struct xfs_trans	**tpp)
237 {
238 	struct xfs_trans	*tp;
239 	int			error;
240 
241 	if (!(flags & XFS_TRANS_NO_WRITECOUNT))
242 		sb_start_intwrite(mp->m_super);
243 
244 	WARN_ON(mp->m_super->s_writers.frozen == SB_FREEZE_COMPLETE);
245 	atomic_inc(&mp->m_active_trans);
246 
247 	tp = kmem_zone_zalloc(xfs_trans_zone,
248 		(flags & XFS_TRANS_NOFS) ? KM_NOFS : KM_SLEEP);
249 	tp->t_magic = XFS_TRANS_HEADER_MAGIC;
250 	tp->t_flags = flags;
251 	tp->t_mountp = mp;
252 	INIT_LIST_HEAD(&tp->t_items);
253 	INIT_LIST_HEAD(&tp->t_busy);
254 
255 	error = xfs_trans_reserve(tp, resp, blocks, rtextents);
256 	if (error) {
257 		xfs_trans_cancel(tp);
258 		return error;
259 	}
260 
261 	*tpp = tp;
262 	return 0;
263 }
264 
265 /*
266  * Record the indicated change to the given field for application
267  * to the file system's superblock when the transaction commits.
268  * For now, just store the change in the transaction structure.
269  *
270  * Mark the transaction structure to indicate that the superblock
271  * needs to be updated before committing.
272  *
273  * Because we may not be keeping track of allocated/free inodes and
274  * used filesystem blocks in the superblock, we do not mark the
275  * superblock dirty in this transaction if we modify these fields.
276  * We still need to update the transaction deltas so that they get
277  * applied to the incore superblock, but we don't want them to
278  * cause the superblock to get locked and logged if these are the
279  * only fields in the superblock that the transaction modifies.
280  */
281 void
282 xfs_trans_mod_sb(
283 	xfs_trans_t	*tp,
284 	uint		field,
285 	int64_t		delta)
286 {
287 	uint32_t	flags = (XFS_TRANS_DIRTY|XFS_TRANS_SB_DIRTY);
288 	xfs_mount_t	*mp = tp->t_mountp;
289 
290 	switch (field) {
291 	case XFS_TRANS_SB_ICOUNT:
292 		tp->t_icount_delta += delta;
293 		if (xfs_sb_version_haslazysbcount(&mp->m_sb))
294 			flags &= ~XFS_TRANS_SB_DIRTY;
295 		break;
296 	case XFS_TRANS_SB_IFREE:
297 		tp->t_ifree_delta += delta;
298 		if (xfs_sb_version_haslazysbcount(&mp->m_sb))
299 			flags &= ~XFS_TRANS_SB_DIRTY;
300 		break;
301 	case XFS_TRANS_SB_FDBLOCKS:
302 		/*
303 		 * Track the number of blocks allocated in the
304 		 * transaction.  Make sure it does not exceed the
305 		 * number reserved.
306 		 */
307 		if (delta < 0) {
308 			tp->t_blk_res_used += (uint)-delta;
309 			ASSERT(tp->t_blk_res_used <= tp->t_blk_res);
310 		}
311 		tp->t_fdblocks_delta += delta;
312 		if (xfs_sb_version_haslazysbcount(&mp->m_sb))
313 			flags &= ~XFS_TRANS_SB_DIRTY;
314 		break;
315 	case XFS_TRANS_SB_RES_FDBLOCKS:
316 		/*
317 		 * The allocation has already been applied to the
318 		 * in-core superblock's counter.  This should only
319 		 * be applied to the on-disk superblock.
320 		 */
321 		ASSERT(delta < 0);
322 		tp->t_res_fdblocks_delta += delta;
323 		if (xfs_sb_version_haslazysbcount(&mp->m_sb))
324 			flags &= ~XFS_TRANS_SB_DIRTY;
325 		break;
326 	case XFS_TRANS_SB_FREXTENTS:
327 		/*
328 		 * Track the number of blocks allocated in the
329 		 * transaction.  Make sure it does not exceed the
330 		 * number reserved.
331 		 */
332 		if (delta < 0) {
333 			tp->t_rtx_res_used += (uint)-delta;
334 			ASSERT(tp->t_rtx_res_used <= tp->t_rtx_res);
335 		}
336 		tp->t_frextents_delta += delta;
337 		break;
338 	case XFS_TRANS_SB_RES_FREXTENTS:
339 		/*
340 		 * The allocation has already been applied to the
341 		 * in-core superblock's counter.  This should only
342 		 * be applied to the on-disk superblock.
343 		 */
344 		ASSERT(delta < 0);
345 		tp->t_res_frextents_delta += delta;
346 		break;
347 	case XFS_TRANS_SB_DBLOCKS:
348 		ASSERT(delta > 0);
349 		tp->t_dblocks_delta += delta;
350 		break;
351 	case XFS_TRANS_SB_AGCOUNT:
352 		ASSERT(delta > 0);
353 		tp->t_agcount_delta += delta;
354 		break;
355 	case XFS_TRANS_SB_IMAXPCT:
356 		tp->t_imaxpct_delta += delta;
357 		break;
358 	case XFS_TRANS_SB_REXTSIZE:
359 		tp->t_rextsize_delta += delta;
360 		break;
361 	case XFS_TRANS_SB_RBMBLOCKS:
362 		tp->t_rbmblocks_delta += delta;
363 		break;
364 	case XFS_TRANS_SB_RBLOCKS:
365 		tp->t_rblocks_delta += delta;
366 		break;
367 	case XFS_TRANS_SB_REXTENTS:
368 		tp->t_rextents_delta += delta;
369 		break;
370 	case XFS_TRANS_SB_REXTSLOG:
371 		tp->t_rextslog_delta += delta;
372 		break;
373 	default:
374 		ASSERT(0);
375 		return;
376 	}
377 
378 	tp->t_flags |= flags;
379 }
380 
381 /*
382  * xfs_trans_apply_sb_deltas() is called from the commit code
383  * to bring the superblock buffer into the current transaction
384  * and modify it as requested by earlier calls to xfs_trans_mod_sb().
385  *
386  * For now we just look at each field allowed to change and change
387  * it if necessary.
388  */
389 STATIC void
390 xfs_trans_apply_sb_deltas(
391 	xfs_trans_t	*tp)
392 {
393 	xfs_dsb_t	*sbp;
394 	xfs_buf_t	*bp;
395 	int		whole = 0;
396 
397 	bp = xfs_trans_getsb(tp, tp->t_mountp, 0);
398 	sbp = XFS_BUF_TO_SBP(bp);
399 
400 	/*
401 	 * Check that superblock mods match the mods made to AGF counters.
402 	 */
403 	ASSERT((tp->t_fdblocks_delta + tp->t_res_fdblocks_delta) ==
404 	       (tp->t_ag_freeblks_delta + tp->t_ag_flist_delta +
405 		tp->t_ag_btree_delta));
406 
407 	/*
408 	 * Only update the superblock counters if we are logging them
409 	 */
410 	if (!xfs_sb_version_haslazysbcount(&(tp->t_mountp->m_sb))) {
411 		if (tp->t_icount_delta)
412 			be64_add_cpu(&sbp->sb_icount, tp->t_icount_delta);
413 		if (tp->t_ifree_delta)
414 			be64_add_cpu(&sbp->sb_ifree, tp->t_ifree_delta);
415 		if (tp->t_fdblocks_delta)
416 			be64_add_cpu(&sbp->sb_fdblocks, tp->t_fdblocks_delta);
417 		if (tp->t_res_fdblocks_delta)
418 			be64_add_cpu(&sbp->sb_fdblocks, tp->t_res_fdblocks_delta);
419 	}
420 
421 	if (tp->t_frextents_delta)
422 		be64_add_cpu(&sbp->sb_frextents, tp->t_frextents_delta);
423 	if (tp->t_res_frextents_delta)
424 		be64_add_cpu(&sbp->sb_frextents, tp->t_res_frextents_delta);
425 
426 	if (tp->t_dblocks_delta) {
427 		be64_add_cpu(&sbp->sb_dblocks, tp->t_dblocks_delta);
428 		whole = 1;
429 	}
430 	if (tp->t_agcount_delta) {
431 		be32_add_cpu(&sbp->sb_agcount, tp->t_agcount_delta);
432 		whole = 1;
433 	}
434 	if (tp->t_imaxpct_delta) {
435 		sbp->sb_imax_pct += tp->t_imaxpct_delta;
436 		whole = 1;
437 	}
438 	if (tp->t_rextsize_delta) {
439 		be32_add_cpu(&sbp->sb_rextsize, tp->t_rextsize_delta);
440 		whole = 1;
441 	}
442 	if (tp->t_rbmblocks_delta) {
443 		be32_add_cpu(&sbp->sb_rbmblocks, tp->t_rbmblocks_delta);
444 		whole = 1;
445 	}
446 	if (tp->t_rblocks_delta) {
447 		be64_add_cpu(&sbp->sb_rblocks, tp->t_rblocks_delta);
448 		whole = 1;
449 	}
450 	if (tp->t_rextents_delta) {
451 		be64_add_cpu(&sbp->sb_rextents, tp->t_rextents_delta);
452 		whole = 1;
453 	}
454 	if (tp->t_rextslog_delta) {
455 		sbp->sb_rextslog += tp->t_rextslog_delta;
456 		whole = 1;
457 	}
458 
459 	xfs_trans_buf_set_type(tp, bp, XFS_BLFT_SB_BUF);
460 	if (whole)
461 		/*
462 		 * Log the whole thing, the fields are noncontiguous.
463 		 */
464 		xfs_trans_log_buf(tp, bp, 0, sizeof(xfs_dsb_t) - 1);
465 	else
466 		/*
467 		 * Since all the modifiable fields are contiguous, we
468 		 * can get away with this.
469 		 */
470 		xfs_trans_log_buf(tp, bp, offsetof(xfs_dsb_t, sb_icount),
471 				  offsetof(xfs_dsb_t, sb_frextents) +
472 				  sizeof(sbp->sb_frextents) - 1);
473 }
474 
475 STATIC int
476 xfs_sb_mod8(
477 	uint8_t			*field,
478 	int8_t			delta)
479 {
480 	int8_t			counter = *field;
481 
482 	counter += delta;
483 	if (counter < 0) {
484 		ASSERT(0);
485 		return -EINVAL;
486 	}
487 	*field = counter;
488 	return 0;
489 }
490 
491 STATIC int
492 xfs_sb_mod32(
493 	uint32_t		*field,
494 	int32_t			delta)
495 {
496 	int32_t			counter = *field;
497 
498 	counter += delta;
499 	if (counter < 0) {
500 		ASSERT(0);
501 		return -EINVAL;
502 	}
503 	*field = counter;
504 	return 0;
505 }
506 
507 STATIC int
508 xfs_sb_mod64(
509 	uint64_t		*field,
510 	int64_t			delta)
511 {
512 	int64_t			counter = *field;
513 
514 	counter += delta;
515 	if (counter < 0) {
516 		ASSERT(0);
517 		return -EINVAL;
518 	}
519 	*field = counter;
520 	return 0;
521 }
522 
523 /*
524  * xfs_trans_unreserve_and_mod_sb() is called to release unused reservations
525  * and apply superblock counter changes to the in-core superblock.  The
526  * t_res_fdblocks_delta and t_res_frextents_delta fields are explicitly NOT
527  * applied to the in-core superblock.  The idea is that that has already been
528  * done.
529  *
530  * If we are not logging superblock counters, then the inode allocated/free and
531  * used block counts are not updated in the on disk superblock. In this case,
532  * XFS_TRANS_SB_DIRTY will not be set when the transaction is updated but we
533  * still need to update the incore superblock with the changes.
534  */
535 void
536 xfs_trans_unreserve_and_mod_sb(
537 	struct xfs_trans	*tp)
538 {
539 	struct xfs_mount	*mp = tp->t_mountp;
540 	bool			rsvd = (tp->t_flags & XFS_TRANS_RESERVE) != 0;
541 	int64_t			blkdelta = 0;
542 	int64_t			rtxdelta = 0;
543 	int64_t			idelta = 0;
544 	int64_t			ifreedelta = 0;
545 	int			error;
546 
547 	/* calculate deltas */
548 	if (tp->t_blk_res > 0)
549 		blkdelta = tp->t_blk_res;
550 	if ((tp->t_fdblocks_delta != 0) &&
551 	    (xfs_sb_version_haslazysbcount(&mp->m_sb) ||
552 	     (tp->t_flags & XFS_TRANS_SB_DIRTY)))
553 	        blkdelta += tp->t_fdblocks_delta;
554 
555 	if (tp->t_rtx_res > 0)
556 		rtxdelta = tp->t_rtx_res;
557 	if ((tp->t_frextents_delta != 0) &&
558 	    (tp->t_flags & XFS_TRANS_SB_DIRTY))
559 		rtxdelta += tp->t_frextents_delta;
560 
561 	if (xfs_sb_version_haslazysbcount(&mp->m_sb) ||
562 	     (tp->t_flags & XFS_TRANS_SB_DIRTY)) {
563 		idelta = tp->t_icount_delta;
564 		ifreedelta = tp->t_ifree_delta;
565 	}
566 
567 	/* apply the per-cpu counters */
568 	if (blkdelta) {
569 		error = xfs_mod_fdblocks(mp, blkdelta, rsvd);
570 		if (error)
571 			goto out;
572 	}
573 
574 	if (idelta) {
575 		error = xfs_mod_icount(mp, idelta);
576 		if (error)
577 			goto out_undo_fdblocks;
578 	}
579 
580 	if (ifreedelta) {
581 		error = xfs_mod_ifree(mp, ifreedelta);
582 		if (error)
583 			goto out_undo_icount;
584 	}
585 
586 	if (rtxdelta == 0 && !(tp->t_flags & XFS_TRANS_SB_DIRTY))
587 		return;
588 
589 	/* apply remaining deltas */
590 	spin_lock(&mp->m_sb_lock);
591 	if (rtxdelta) {
592 		error = xfs_sb_mod64(&mp->m_sb.sb_frextents, rtxdelta);
593 		if (error)
594 			goto out_undo_ifree;
595 	}
596 
597 	if (tp->t_dblocks_delta != 0) {
598 		error = xfs_sb_mod64(&mp->m_sb.sb_dblocks, tp->t_dblocks_delta);
599 		if (error)
600 			goto out_undo_frextents;
601 	}
602 	if (tp->t_agcount_delta != 0) {
603 		error = xfs_sb_mod32(&mp->m_sb.sb_agcount, tp->t_agcount_delta);
604 		if (error)
605 			goto out_undo_dblocks;
606 	}
607 	if (tp->t_imaxpct_delta != 0) {
608 		error = xfs_sb_mod8(&mp->m_sb.sb_imax_pct, tp->t_imaxpct_delta);
609 		if (error)
610 			goto out_undo_agcount;
611 	}
612 	if (tp->t_rextsize_delta != 0) {
613 		error = xfs_sb_mod32(&mp->m_sb.sb_rextsize,
614 				     tp->t_rextsize_delta);
615 		if (error)
616 			goto out_undo_imaxpct;
617 	}
618 	if (tp->t_rbmblocks_delta != 0) {
619 		error = xfs_sb_mod32(&mp->m_sb.sb_rbmblocks,
620 				     tp->t_rbmblocks_delta);
621 		if (error)
622 			goto out_undo_rextsize;
623 	}
624 	if (tp->t_rblocks_delta != 0) {
625 		error = xfs_sb_mod64(&mp->m_sb.sb_rblocks, tp->t_rblocks_delta);
626 		if (error)
627 			goto out_undo_rbmblocks;
628 	}
629 	if (tp->t_rextents_delta != 0) {
630 		error = xfs_sb_mod64(&mp->m_sb.sb_rextents,
631 				     tp->t_rextents_delta);
632 		if (error)
633 			goto out_undo_rblocks;
634 	}
635 	if (tp->t_rextslog_delta != 0) {
636 		error = xfs_sb_mod8(&mp->m_sb.sb_rextslog,
637 				     tp->t_rextslog_delta);
638 		if (error)
639 			goto out_undo_rextents;
640 	}
641 	spin_unlock(&mp->m_sb_lock);
642 	return;
643 
644 out_undo_rextents:
645 	if (tp->t_rextents_delta)
646 		xfs_sb_mod64(&mp->m_sb.sb_rextents, -tp->t_rextents_delta);
647 out_undo_rblocks:
648 	if (tp->t_rblocks_delta)
649 		xfs_sb_mod64(&mp->m_sb.sb_rblocks, -tp->t_rblocks_delta);
650 out_undo_rbmblocks:
651 	if (tp->t_rbmblocks_delta)
652 		xfs_sb_mod32(&mp->m_sb.sb_rbmblocks, -tp->t_rbmblocks_delta);
653 out_undo_rextsize:
654 	if (tp->t_rextsize_delta)
655 		xfs_sb_mod32(&mp->m_sb.sb_rextsize, -tp->t_rextsize_delta);
656 out_undo_imaxpct:
657 	if (tp->t_rextsize_delta)
658 		xfs_sb_mod8(&mp->m_sb.sb_imax_pct, -tp->t_imaxpct_delta);
659 out_undo_agcount:
660 	if (tp->t_agcount_delta)
661 		xfs_sb_mod32(&mp->m_sb.sb_agcount, -tp->t_agcount_delta);
662 out_undo_dblocks:
663 	if (tp->t_dblocks_delta)
664 		xfs_sb_mod64(&mp->m_sb.sb_dblocks, -tp->t_dblocks_delta);
665 out_undo_frextents:
666 	if (rtxdelta)
667 		xfs_sb_mod64(&mp->m_sb.sb_frextents, -rtxdelta);
668 out_undo_ifree:
669 	spin_unlock(&mp->m_sb_lock);
670 	if (ifreedelta)
671 		xfs_mod_ifree(mp, -ifreedelta);
672 out_undo_icount:
673 	if (idelta)
674 		xfs_mod_icount(mp, -idelta);
675 out_undo_fdblocks:
676 	if (blkdelta)
677 		xfs_mod_fdblocks(mp, -blkdelta, rsvd);
678 out:
679 	ASSERT(error == 0);
680 	return;
681 }
682 
683 /*
684  * Add the given log item to the transaction's list of log items.
685  *
686  * The log item will now point to its new descriptor with its li_desc field.
687  */
688 void
689 xfs_trans_add_item(
690 	struct xfs_trans	*tp,
691 	struct xfs_log_item	*lip)
692 {
693 	struct xfs_log_item_desc *lidp;
694 
695 	ASSERT(lip->li_mountp == tp->t_mountp);
696 	ASSERT(lip->li_ailp == tp->t_mountp->m_ail);
697 
698 	lidp = kmem_zone_zalloc(xfs_log_item_desc_zone, KM_SLEEP | KM_NOFS);
699 
700 	lidp->lid_item = lip;
701 	lidp->lid_flags = 0;
702 	list_add_tail(&lidp->lid_trans, &tp->t_items);
703 
704 	lip->li_desc = lidp;
705 }
706 
707 STATIC void
708 xfs_trans_free_item_desc(
709 	struct xfs_log_item_desc *lidp)
710 {
711 	list_del_init(&lidp->lid_trans);
712 	kmem_zone_free(xfs_log_item_desc_zone, lidp);
713 }
714 
715 /*
716  * Unlink and free the given descriptor.
717  */
718 void
719 xfs_trans_del_item(
720 	struct xfs_log_item	*lip)
721 {
722 	xfs_trans_free_item_desc(lip->li_desc);
723 	lip->li_desc = NULL;
724 }
725 
726 /*
727  * Unlock all of the items of a transaction and free all the descriptors
728  * of that transaction.
729  */
730 void
731 xfs_trans_free_items(
732 	struct xfs_trans	*tp,
733 	xfs_lsn_t		commit_lsn,
734 	bool			abort)
735 {
736 	struct xfs_log_item_desc *lidp, *next;
737 
738 	list_for_each_entry_safe(lidp, next, &tp->t_items, lid_trans) {
739 		struct xfs_log_item	*lip = lidp->lid_item;
740 
741 		lip->li_desc = NULL;
742 
743 		if (commit_lsn != NULLCOMMITLSN)
744 			lip->li_ops->iop_committing(lip, commit_lsn);
745 		if (abort)
746 			lip->li_flags |= XFS_LI_ABORTED;
747 		lip->li_ops->iop_unlock(lip);
748 
749 		xfs_trans_free_item_desc(lidp);
750 	}
751 }
752 
753 static inline void
754 xfs_log_item_batch_insert(
755 	struct xfs_ail		*ailp,
756 	struct xfs_ail_cursor	*cur,
757 	struct xfs_log_item	**log_items,
758 	int			nr_items,
759 	xfs_lsn_t		commit_lsn)
760 {
761 	int	i;
762 
763 	spin_lock(&ailp->xa_lock);
764 	/* xfs_trans_ail_update_bulk drops ailp->xa_lock */
765 	xfs_trans_ail_update_bulk(ailp, cur, log_items, nr_items, commit_lsn);
766 
767 	for (i = 0; i < nr_items; i++) {
768 		struct xfs_log_item *lip = log_items[i];
769 
770 		lip->li_ops->iop_unpin(lip, 0);
771 	}
772 }
773 
774 /*
775  * Bulk operation version of xfs_trans_committed that takes a log vector of
776  * items to insert into the AIL. This uses bulk AIL insertion techniques to
777  * minimise lock traffic.
778  *
779  * If we are called with the aborted flag set, it is because a log write during
780  * a CIL checkpoint commit has failed. In this case, all the items in the
781  * checkpoint have already gone through iop_commited and iop_unlock, which
782  * means that checkpoint commit abort handling is treated exactly the same
783  * as an iclog write error even though we haven't started any IO yet. Hence in
784  * this case all we need to do is iop_committed processing, followed by an
785  * iop_unpin(aborted) call.
786  *
787  * The AIL cursor is used to optimise the insert process. If commit_lsn is not
788  * at the end of the AIL, the insert cursor avoids the need to walk
789  * the AIL to find the insertion point on every xfs_log_item_batch_insert()
790  * call. This saves a lot of needless list walking and is a net win, even
791  * though it slightly increases that amount of AIL lock traffic to set it up
792  * and tear it down.
793  */
794 void
795 xfs_trans_committed_bulk(
796 	struct xfs_ail		*ailp,
797 	struct xfs_log_vec	*log_vector,
798 	xfs_lsn_t		commit_lsn,
799 	int			aborted)
800 {
801 #define LOG_ITEM_BATCH_SIZE	32
802 	struct xfs_log_item	*log_items[LOG_ITEM_BATCH_SIZE];
803 	struct xfs_log_vec	*lv;
804 	struct xfs_ail_cursor	cur;
805 	int			i = 0;
806 
807 	spin_lock(&ailp->xa_lock);
808 	xfs_trans_ail_cursor_last(ailp, &cur, commit_lsn);
809 	spin_unlock(&ailp->xa_lock);
810 
811 	/* unpin all the log items */
812 	for (lv = log_vector; lv; lv = lv->lv_next ) {
813 		struct xfs_log_item	*lip = lv->lv_item;
814 		xfs_lsn_t		item_lsn;
815 
816 		if (aborted)
817 			lip->li_flags |= XFS_LI_ABORTED;
818 		item_lsn = lip->li_ops->iop_committed(lip, commit_lsn);
819 
820 		/* item_lsn of -1 means the item needs no further processing */
821 		if (XFS_LSN_CMP(item_lsn, (xfs_lsn_t)-1) == 0)
822 			continue;
823 
824 		/*
825 		 * if we are aborting the operation, no point in inserting the
826 		 * object into the AIL as we are in a shutdown situation.
827 		 */
828 		if (aborted) {
829 			ASSERT(XFS_FORCED_SHUTDOWN(ailp->xa_mount));
830 			lip->li_ops->iop_unpin(lip, 1);
831 			continue;
832 		}
833 
834 		if (item_lsn != commit_lsn) {
835 
836 			/*
837 			 * Not a bulk update option due to unusual item_lsn.
838 			 * Push into AIL immediately, rechecking the lsn once
839 			 * we have the ail lock. Then unpin the item. This does
840 			 * not affect the AIL cursor the bulk insert path is
841 			 * using.
842 			 */
843 			spin_lock(&ailp->xa_lock);
844 			if (XFS_LSN_CMP(item_lsn, lip->li_lsn) > 0)
845 				xfs_trans_ail_update(ailp, lip, item_lsn);
846 			else
847 				spin_unlock(&ailp->xa_lock);
848 			lip->li_ops->iop_unpin(lip, 0);
849 			continue;
850 		}
851 
852 		/* Item is a candidate for bulk AIL insert.  */
853 		log_items[i++] = lv->lv_item;
854 		if (i >= LOG_ITEM_BATCH_SIZE) {
855 			xfs_log_item_batch_insert(ailp, &cur, log_items,
856 					LOG_ITEM_BATCH_SIZE, commit_lsn);
857 			i = 0;
858 		}
859 	}
860 
861 	/* make sure we insert the remainder! */
862 	if (i)
863 		xfs_log_item_batch_insert(ailp, &cur, log_items, i, commit_lsn);
864 
865 	spin_lock(&ailp->xa_lock);
866 	xfs_trans_ail_cursor_done(&cur);
867 	spin_unlock(&ailp->xa_lock);
868 }
869 
870 /*
871  * Commit the given transaction to the log.
872  *
873  * XFS disk error handling mechanism is not based on a typical
874  * transaction abort mechanism. Logically after the filesystem
875  * gets marked 'SHUTDOWN', we can't let any new transactions
876  * be durable - ie. committed to disk - because some metadata might
877  * be inconsistent. In such cases, this returns an error, and the
878  * caller may assume that all locked objects joined to the transaction
879  * have already been unlocked as if the commit had succeeded.
880  * Do not reference the transaction structure after this call.
881  */
882 static int
883 __xfs_trans_commit(
884 	struct xfs_trans	*tp,
885 	bool			regrant)
886 {
887 	struct xfs_mount	*mp = tp->t_mountp;
888 	xfs_lsn_t		commit_lsn = -1;
889 	int			error = 0;
890 	int			sync = tp->t_flags & XFS_TRANS_SYNC;
891 
892 	/*
893 	 * If there is nothing to be logged by the transaction,
894 	 * then unlock all of the items associated with the
895 	 * transaction and free the transaction structure.
896 	 * Also make sure to return any reserved blocks to
897 	 * the free pool.
898 	 */
899 	if (!(tp->t_flags & XFS_TRANS_DIRTY))
900 		goto out_unreserve;
901 
902 	if (XFS_FORCED_SHUTDOWN(mp)) {
903 		error = -EIO;
904 		goto out_unreserve;
905 	}
906 
907 	ASSERT(tp->t_ticket != NULL);
908 
909 	/*
910 	 * If we need to update the superblock, then do it now.
911 	 */
912 	if (tp->t_flags & XFS_TRANS_SB_DIRTY)
913 		xfs_trans_apply_sb_deltas(tp);
914 	xfs_trans_apply_dquot_deltas(tp);
915 
916 	xfs_log_commit_cil(mp, tp, &commit_lsn, regrant);
917 
918 	current_restore_flags_nested(&tp->t_pflags, PF_FSTRANS);
919 	xfs_trans_free(tp);
920 
921 	/*
922 	 * If the transaction needs to be synchronous, then force the
923 	 * log out now and wait for it.
924 	 */
925 	if (sync) {
926 		error = _xfs_log_force_lsn(mp, commit_lsn, XFS_LOG_SYNC, NULL);
927 		XFS_STATS_INC(mp, xs_trans_sync);
928 	} else {
929 		XFS_STATS_INC(mp, xs_trans_async);
930 	}
931 
932 	return error;
933 
934 out_unreserve:
935 	xfs_trans_unreserve_and_mod_sb(tp);
936 
937 	/*
938 	 * It is indeed possible for the transaction to be not dirty but
939 	 * the dqinfo portion to be.  All that means is that we have some
940 	 * (non-persistent) quota reservations that need to be unreserved.
941 	 */
942 	xfs_trans_unreserve_and_mod_dquots(tp);
943 	if (tp->t_ticket) {
944 		commit_lsn = xfs_log_done(mp, tp->t_ticket, NULL, regrant);
945 		if (commit_lsn == -1 && !error)
946 			error = -EIO;
947 	}
948 	current_restore_flags_nested(&tp->t_pflags, PF_FSTRANS);
949 	xfs_trans_free_items(tp, NULLCOMMITLSN, !!error);
950 	xfs_trans_free(tp);
951 
952 	XFS_STATS_INC(mp, xs_trans_empty);
953 	return error;
954 }
955 
956 int
957 xfs_trans_commit(
958 	struct xfs_trans	*tp)
959 {
960 	return __xfs_trans_commit(tp, false);
961 }
962 
963 /*
964  * Unlock all of the transaction's items and free the transaction.
965  * The transaction must not have modified any of its items, because
966  * there is no way to restore them to their previous state.
967  *
968  * If the transaction has made a log reservation, make sure to release
969  * it as well.
970  */
971 void
972 xfs_trans_cancel(
973 	struct xfs_trans	*tp)
974 {
975 	struct xfs_mount	*mp = tp->t_mountp;
976 	bool			dirty = (tp->t_flags & XFS_TRANS_DIRTY);
977 
978 	/*
979 	 * See if the caller is relying on us to shut down the
980 	 * filesystem.  This happens in paths where we detect
981 	 * corruption and decide to give up.
982 	 */
983 	if (dirty && !XFS_FORCED_SHUTDOWN(mp)) {
984 		XFS_ERROR_REPORT("xfs_trans_cancel", XFS_ERRLEVEL_LOW, mp);
985 		xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
986 	}
987 #ifdef DEBUG
988 	if (!dirty && !XFS_FORCED_SHUTDOWN(mp)) {
989 		struct xfs_log_item_desc *lidp;
990 
991 		list_for_each_entry(lidp, &tp->t_items, lid_trans)
992 			ASSERT(!(lidp->lid_item->li_type == XFS_LI_EFD));
993 	}
994 #endif
995 	xfs_trans_unreserve_and_mod_sb(tp);
996 	xfs_trans_unreserve_and_mod_dquots(tp);
997 
998 	if (tp->t_ticket)
999 		xfs_log_done(mp, tp->t_ticket, NULL, false);
1000 
1001 	/* mark this thread as no longer being in a transaction */
1002 	current_restore_flags_nested(&tp->t_pflags, PF_FSTRANS);
1003 
1004 	xfs_trans_free_items(tp, NULLCOMMITLSN, dirty);
1005 	xfs_trans_free(tp);
1006 }
1007 
1008 /*
1009  * Roll from one trans in the sequence of PERMANENT transactions to
1010  * the next: permanent transactions are only flushed out when
1011  * committed with xfs_trans_commit(), but we still want as soon
1012  * as possible to let chunks of it go to the log. So we commit the
1013  * chunk we've been working on and get a new transaction to continue.
1014  */
1015 int
1016 __xfs_trans_roll(
1017 	struct xfs_trans	**tpp,
1018 	struct xfs_inode	*dp,
1019 	int			*committed)
1020 {
1021 	struct xfs_trans	*trans;
1022 	struct xfs_trans_res	tres;
1023 	int			error;
1024 
1025 	*committed = 0;
1026 
1027 	/*
1028 	 * Ensure that the inode is always logged.
1029 	 */
1030 	trans = *tpp;
1031 	if (dp)
1032 		xfs_trans_log_inode(trans, dp, XFS_ILOG_CORE);
1033 
1034 	/*
1035 	 * Copy the critical parameters from one trans to the next.
1036 	 */
1037 	tres.tr_logres = trans->t_log_res;
1038 	tres.tr_logcount = trans->t_log_count;
1039 	*tpp = xfs_trans_dup(trans);
1040 
1041 	/*
1042 	 * Commit the current transaction.
1043 	 * If this commit failed, then it'd just unlock those items that
1044 	 * are not marked ihold. That also means that a filesystem shutdown
1045 	 * is in progress. The caller takes the responsibility to cancel
1046 	 * the duplicate transaction that gets returned.
1047 	 */
1048 	error = __xfs_trans_commit(trans, true);
1049 	if (error)
1050 		return error;
1051 
1052 	*committed = 1;
1053 	trans = *tpp;
1054 
1055 	/*
1056 	 * Reserve space in the log for th next transaction.
1057 	 * This also pushes items in the "AIL", the list of logged items,
1058 	 * out to disk if they are taking up space at the tail of the log
1059 	 * that we want to use.  This requires that either nothing be locked
1060 	 * across this call, or that anything that is locked be logged in
1061 	 * the prior and the next transactions.
1062 	 */
1063 	tres.tr_logflags = XFS_TRANS_PERM_LOG_RES;
1064 	error = xfs_trans_reserve(trans, &tres, 0, 0);
1065 	/*
1066 	 *  Ensure that the inode is in the new transaction and locked.
1067 	 */
1068 	if (error)
1069 		return error;
1070 
1071 	if (dp)
1072 		xfs_trans_ijoin(trans, dp, 0);
1073 	return 0;
1074 }
1075 
1076 int
1077 xfs_trans_roll(
1078 	struct xfs_trans	**tpp,
1079 	struct xfs_inode	*dp)
1080 {
1081 	int			committed;
1082 	return __xfs_trans_roll(tpp, dp, &committed);
1083 }
1084