xref: /openbmc/linux/fs/xfs/xfs_log_cil.c (revision 7fe2f639)
1 /*
2  * Copyright (c) 2010 Red Hat, Inc. All Rights Reserved.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public License as
6  * published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it would be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
11  * GNU General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public License
14  * along with this program; if not, write the Free Software Foundation,
15  * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
16  */
17 
18 #include "xfs.h"
19 #include "xfs_fs.h"
20 #include "xfs_types.h"
21 #include "xfs_bit.h"
22 #include "xfs_log.h"
23 #include "xfs_inum.h"
24 #include "xfs_trans.h"
25 #include "xfs_trans_priv.h"
26 #include "xfs_log_priv.h"
27 #include "xfs_sb.h"
28 #include "xfs_ag.h"
29 #include "xfs_mount.h"
30 #include "xfs_error.h"
31 #include "xfs_alloc.h"
32 #include "xfs_discard.h"
33 
34 /*
35  * Perform initial CIL structure initialisation. If the CIL is not
36  * enabled in this filesystem, ensure the log->l_cilp is null so
37  * we can check this conditional to determine if we are doing delayed
38  * logging or not.
39  */
40 int
41 xlog_cil_init(
42 	struct log	*log)
43 {
44 	struct xfs_cil	*cil;
45 	struct xfs_cil_ctx *ctx;
46 
47 	log->l_cilp = NULL;
48 	if (!(log->l_mp->m_flags & XFS_MOUNT_DELAYLOG))
49 		return 0;
50 
51 	cil = kmem_zalloc(sizeof(*cil), KM_SLEEP|KM_MAYFAIL);
52 	if (!cil)
53 		return ENOMEM;
54 
55 	ctx = kmem_zalloc(sizeof(*ctx), KM_SLEEP|KM_MAYFAIL);
56 	if (!ctx) {
57 		kmem_free(cil);
58 		return ENOMEM;
59 	}
60 
61 	INIT_LIST_HEAD(&cil->xc_cil);
62 	INIT_LIST_HEAD(&cil->xc_committing);
63 	spin_lock_init(&cil->xc_cil_lock);
64 	init_rwsem(&cil->xc_ctx_lock);
65 	init_waitqueue_head(&cil->xc_commit_wait);
66 
67 	INIT_LIST_HEAD(&ctx->committing);
68 	INIT_LIST_HEAD(&ctx->busy_extents);
69 	ctx->sequence = 1;
70 	ctx->cil = cil;
71 	cil->xc_ctx = ctx;
72 	cil->xc_current_sequence = ctx->sequence;
73 
74 	cil->xc_log = log;
75 	log->l_cilp = cil;
76 	return 0;
77 }
78 
79 void
80 xlog_cil_destroy(
81 	struct log	*log)
82 {
83 	if (!log->l_cilp)
84 		return;
85 
86 	if (log->l_cilp->xc_ctx) {
87 		if (log->l_cilp->xc_ctx->ticket)
88 			xfs_log_ticket_put(log->l_cilp->xc_ctx->ticket);
89 		kmem_free(log->l_cilp->xc_ctx);
90 	}
91 
92 	ASSERT(list_empty(&log->l_cilp->xc_cil));
93 	kmem_free(log->l_cilp);
94 }
95 
96 /*
97  * Allocate a new ticket. Failing to get a new ticket makes it really hard to
98  * recover, so we don't allow failure here. Also, we allocate in a context that
99  * we don't want to be issuing transactions from, so we need to tell the
100  * allocation code this as well.
101  *
102  * We don't reserve any space for the ticket - we are going to steal whatever
103  * space we require from transactions as they commit. To ensure we reserve all
104  * the space required, we need to set the current reservation of the ticket to
105  * zero so that we know to steal the initial transaction overhead from the
106  * first transaction commit.
107  */
108 static struct xlog_ticket *
109 xlog_cil_ticket_alloc(
110 	struct log	*log)
111 {
112 	struct xlog_ticket *tic;
113 
114 	tic = xlog_ticket_alloc(log, 0, 1, XFS_TRANSACTION, 0,
115 				KM_SLEEP|KM_NOFS);
116 	tic->t_trans_type = XFS_TRANS_CHECKPOINT;
117 
118 	/*
119 	 * set the current reservation to zero so we know to steal the basic
120 	 * transaction overhead reservation from the first transaction commit.
121 	 */
122 	tic->t_curr_res = 0;
123 	return tic;
124 }
125 
126 /*
127  * After the first stage of log recovery is done, we know where the head and
128  * tail of the log are. We need this log initialisation done before we can
129  * initialise the first CIL checkpoint context.
130  *
131  * Here we allocate a log ticket to track space usage during a CIL push.  This
132  * ticket is passed to xlog_write() directly so that we don't slowly leak log
133  * space by failing to account for space used by log headers and additional
134  * region headers for split regions.
135  */
136 void
137 xlog_cil_init_post_recovery(
138 	struct log	*log)
139 {
140 	if (!log->l_cilp)
141 		return;
142 
143 	log->l_cilp->xc_ctx->ticket = xlog_cil_ticket_alloc(log);
144 	log->l_cilp->xc_ctx->sequence = 1;
145 	log->l_cilp->xc_ctx->commit_lsn = xlog_assign_lsn(log->l_curr_cycle,
146 								log->l_curr_block);
147 }
148 
149 /*
150  * Format log item into a flat buffers
151  *
152  * For delayed logging, we need to hold a formatted buffer containing all the
153  * changes on the log item. This enables us to relog the item in memory and
154  * write it out asynchronously without needing to relock the object that was
155  * modified at the time it gets written into the iclog.
156  *
157  * This function builds a vector for the changes in each log item in the
158  * transaction. It then works out the length of the buffer needed for each log
159  * item, allocates them and formats the vector for the item into the buffer.
160  * The buffer is then attached to the log item are then inserted into the
161  * Committed Item List for tracking until the next checkpoint is written out.
162  *
163  * We don't set up region headers during this process; we simply copy the
164  * regions into the flat buffer. We can do this because we still have to do a
165  * formatting step to write the regions into the iclog buffer.  Writing the
166  * ophdrs during the iclog write means that we can support splitting large
167  * regions across iclog boundares without needing a change in the format of the
168  * item/region encapsulation.
169  *
170  * Hence what we need to do now is change the rewrite the vector array to point
171  * to the copied region inside the buffer we just allocated. This allows us to
172  * format the regions into the iclog as though they are being formatted
173  * directly out of the objects themselves.
174  */
175 static void
176 xlog_cil_format_items(
177 	struct log		*log,
178 	struct xfs_log_vec	*log_vector)
179 {
180 	struct xfs_log_vec *lv;
181 
182 	ASSERT(log_vector);
183 	for (lv = log_vector; lv; lv = lv->lv_next) {
184 		void	*ptr;
185 		int	index;
186 		int	len = 0;
187 
188 		/* build the vector array and calculate it's length */
189 		IOP_FORMAT(lv->lv_item, lv->lv_iovecp);
190 		for (index = 0; index < lv->lv_niovecs; index++)
191 			len += lv->lv_iovecp[index].i_len;
192 
193 		lv->lv_buf_len = len;
194 		lv->lv_buf = kmem_alloc(lv->lv_buf_len, KM_SLEEP|KM_NOFS);
195 		ptr = lv->lv_buf;
196 
197 		for (index = 0; index < lv->lv_niovecs; index++) {
198 			struct xfs_log_iovec *vec = &lv->lv_iovecp[index];
199 
200 			memcpy(ptr, vec->i_addr, vec->i_len);
201 			vec->i_addr = ptr;
202 			ptr += vec->i_len;
203 		}
204 		ASSERT(ptr == lv->lv_buf + lv->lv_buf_len);
205 	}
206 }
207 
208 /*
209  * Prepare the log item for insertion into the CIL. Calculate the difference in
210  * log space and vectors it will consume, and if it is a new item pin it as
211  * well.
212  */
213 STATIC void
214 xfs_cil_prepare_item(
215 	struct log		*log,
216 	struct xfs_log_vec	*lv,
217 	int			*len,
218 	int			*diff_iovecs)
219 {
220 	struct xfs_log_vec	*old = lv->lv_item->li_lv;
221 
222 	if (old) {
223 		/* existing lv on log item, space used is a delta */
224 		ASSERT(!list_empty(&lv->lv_item->li_cil));
225 		ASSERT(old->lv_buf && old->lv_buf_len && old->lv_niovecs);
226 
227 		*len += lv->lv_buf_len - old->lv_buf_len;
228 		*diff_iovecs += lv->lv_niovecs - old->lv_niovecs;
229 		kmem_free(old->lv_buf);
230 		kmem_free(old);
231 	} else {
232 		/* new lv, must pin the log item */
233 		ASSERT(!lv->lv_item->li_lv);
234 		ASSERT(list_empty(&lv->lv_item->li_cil));
235 
236 		*len += lv->lv_buf_len;
237 		*diff_iovecs += lv->lv_niovecs;
238 		IOP_PIN(lv->lv_item);
239 
240 	}
241 
242 	/* attach new log vector to log item */
243 	lv->lv_item->li_lv = lv;
244 
245 	/*
246 	 * If this is the first time the item is being committed to the
247 	 * CIL, store the sequence number on the log item so we can
248 	 * tell in future commits whether this is the first checkpoint
249 	 * the item is being committed into.
250 	 */
251 	if (!lv->lv_item->li_seq)
252 		lv->lv_item->li_seq = log->l_cilp->xc_ctx->sequence;
253 }
254 
255 /*
256  * Insert the log items into the CIL and calculate the difference in space
257  * consumed by the item. Add the space to the checkpoint ticket and calculate
258  * if the change requires additional log metadata. If it does, take that space
259  * as well. Remove the amount of space we addded to the checkpoint ticket from
260  * the current transaction ticket so that the accounting works out correctly.
261  */
262 static void
263 xlog_cil_insert_items(
264 	struct log		*log,
265 	struct xfs_log_vec	*log_vector,
266 	struct xlog_ticket	*ticket)
267 {
268 	struct xfs_cil		*cil = log->l_cilp;
269 	struct xfs_cil_ctx	*ctx = cil->xc_ctx;
270 	struct xfs_log_vec	*lv;
271 	int			len = 0;
272 	int			diff_iovecs = 0;
273 	int			iclog_space;
274 
275 	ASSERT(log_vector);
276 
277 	/*
278 	 * Do all the accounting aggregation and switching of log vectors
279 	 * around in a separate loop to the insertion of items into the CIL.
280 	 * Then we can do a separate loop to update the CIL within a single
281 	 * lock/unlock pair. This reduces the number of round trips on the CIL
282 	 * lock from O(nr_logvectors) to O(1) and greatly reduces the overall
283 	 * hold time for the transaction commit.
284 	 *
285 	 * If this is the first time the item is being placed into the CIL in
286 	 * this context, pin it so it can't be written to disk until the CIL is
287 	 * flushed to the iclog and the iclog written to disk.
288 	 *
289 	 * We can do this safely because the context can't checkpoint until we
290 	 * are done so it doesn't matter exactly how we update the CIL.
291 	 */
292 	for (lv = log_vector; lv; lv = lv->lv_next)
293 		xfs_cil_prepare_item(log, lv, &len, &diff_iovecs);
294 
295 	/* account for space used by new iovec headers  */
296 	len += diff_iovecs * sizeof(xlog_op_header_t);
297 
298 	spin_lock(&cil->xc_cil_lock);
299 
300 	/* move the items to the tail of the CIL */
301 	for (lv = log_vector; lv; lv = lv->lv_next)
302 		list_move_tail(&lv->lv_item->li_cil, &cil->xc_cil);
303 
304 	ctx->nvecs += diff_iovecs;
305 
306 	/*
307 	 * Now transfer enough transaction reservation to the context ticket
308 	 * for the checkpoint. The context ticket is special - the unit
309 	 * reservation has to grow as well as the current reservation as we
310 	 * steal from tickets so we can correctly determine the space used
311 	 * during the transaction commit.
312 	 */
313 	if (ctx->ticket->t_curr_res == 0) {
314 		/* first commit in checkpoint, steal the header reservation */
315 		ASSERT(ticket->t_curr_res >= ctx->ticket->t_unit_res + len);
316 		ctx->ticket->t_curr_res = ctx->ticket->t_unit_res;
317 		ticket->t_curr_res -= ctx->ticket->t_unit_res;
318 	}
319 
320 	/* do we need space for more log record headers? */
321 	iclog_space = log->l_iclog_size - log->l_iclog_hsize;
322 	if (len > 0 && (ctx->space_used / iclog_space !=
323 				(ctx->space_used + len) / iclog_space)) {
324 		int hdrs;
325 
326 		hdrs = (len + iclog_space - 1) / iclog_space;
327 		/* need to take into account split region headers, too */
328 		hdrs *= log->l_iclog_hsize + sizeof(struct xlog_op_header);
329 		ctx->ticket->t_unit_res += hdrs;
330 		ctx->ticket->t_curr_res += hdrs;
331 		ticket->t_curr_res -= hdrs;
332 		ASSERT(ticket->t_curr_res >= len);
333 	}
334 	ticket->t_curr_res -= len;
335 	ctx->space_used += len;
336 
337 	spin_unlock(&cil->xc_cil_lock);
338 }
339 
340 static void
341 xlog_cil_free_logvec(
342 	struct xfs_log_vec	*log_vector)
343 {
344 	struct xfs_log_vec	*lv;
345 
346 	for (lv = log_vector; lv; ) {
347 		struct xfs_log_vec *next = lv->lv_next;
348 		kmem_free(lv->lv_buf);
349 		kmem_free(lv);
350 		lv = next;
351 	}
352 }
353 
354 /*
355  * Mark all items committed and clear busy extents. We free the log vector
356  * chains in a separate pass so that we unpin the log items as quickly as
357  * possible.
358  */
359 static void
360 xlog_cil_committed(
361 	void	*args,
362 	int	abort)
363 {
364 	struct xfs_cil_ctx	*ctx = args;
365 	struct xfs_mount	*mp = ctx->cil->xc_log->l_mp;
366 
367 	xfs_trans_committed_bulk(ctx->cil->xc_log->l_ailp, ctx->lv_chain,
368 					ctx->start_lsn, abort);
369 
370 	xfs_alloc_busy_sort(&ctx->busy_extents);
371 	xfs_alloc_busy_clear(mp, &ctx->busy_extents,
372 			     (mp->m_flags & XFS_MOUNT_DISCARD) && !abort);
373 
374 	spin_lock(&ctx->cil->xc_cil_lock);
375 	list_del(&ctx->committing);
376 	spin_unlock(&ctx->cil->xc_cil_lock);
377 
378 	xlog_cil_free_logvec(ctx->lv_chain);
379 
380 	if (!list_empty(&ctx->busy_extents)) {
381 		ASSERT(mp->m_flags & XFS_MOUNT_DISCARD);
382 
383 		xfs_discard_extents(mp, &ctx->busy_extents);
384 		xfs_alloc_busy_clear(mp, &ctx->busy_extents, false);
385 	}
386 
387 	kmem_free(ctx);
388 }
389 
390 /*
391  * Push the Committed Item List to the log. If @push_seq flag is zero, then it
392  * is a background flush and so we can chose to ignore it. Otherwise, if the
393  * current sequence is the same as @push_seq we need to do a flush. If
394  * @push_seq is less than the current sequence, then it has already been
395  * flushed and we don't need to do anything - the caller will wait for it to
396  * complete if necessary.
397  *
398  * @push_seq is a value rather than a flag because that allows us to do an
399  * unlocked check of the sequence number for a match. Hence we can allows log
400  * forces to run racily and not issue pushes for the same sequence twice. If we
401  * get a race between multiple pushes for the same sequence they will block on
402  * the first one and then abort, hence avoiding needless pushes.
403  */
404 STATIC int
405 xlog_cil_push(
406 	struct log		*log,
407 	xfs_lsn_t		push_seq)
408 {
409 	struct xfs_cil		*cil = log->l_cilp;
410 	struct xfs_log_vec	*lv;
411 	struct xfs_cil_ctx	*ctx;
412 	struct xfs_cil_ctx	*new_ctx;
413 	struct xlog_in_core	*commit_iclog;
414 	struct xlog_ticket	*tic;
415 	int			num_lv;
416 	int			num_iovecs;
417 	int			len;
418 	int			error = 0;
419 	struct xfs_trans_header thdr;
420 	struct xfs_log_iovec	lhdr;
421 	struct xfs_log_vec	lvhdr = { NULL };
422 	xfs_lsn_t		commit_lsn;
423 
424 	if (!cil)
425 		return 0;
426 
427 	ASSERT(!push_seq || push_seq <= cil->xc_ctx->sequence);
428 
429 	new_ctx = kmem_zalloc(sizeof(*new_ctx), KM_SLEEP|KM_NOFS);
430 	new_ctx->ticket = xlog_cil_ticket_alloc(log);
431 
432 	/*
433 	 * Lock out transaction commit, but don't block for background pushes
434 	 * unless we are well over the CIL space limit. See the definition of
435 	 * XLOG_CIL_HARD_SPACE_LIMIT() for the full explanation of the logic
436 	 * used here.
437 	 */
438 	if (!down_write_trylock(&cil->xc_ctx_lock)) {
439 		if (!push_seq &&
440 		    cil->xc_ctx->space_used < XLOG_CIL_HARD_SPACE_LIMIT(log))
441 			goto out_free_ticket;
442 		down_write(&cil->xc_ctx_lock);
443 	}
444 	ctx = cil->xc_ctx;
445 
446 	/* check if we've anything to push */
447 	if (list_empty(&cil->xc_cil))
448 		goto out_skip;
449 
450 	/* check for spurious background flush */
451 	if (!push_seq && cil->xc_ctx->space_used < XLOG_CIL_SPACE_LIMIT(log))
452 		goto out_skip;
453 
454 	/* check for a previously pushed seqeunce */
455 	if (push_seq && push_seq < cil->xc_ctx->sequence)
456 		goto out_skip;
457 
458 	/*
459 	 * pull all the log vectors off the items in the CIL, and
460 	 * remove the items from the CIL. We don't need the CIL lock
461 	 * here because it's only needed on the transaction commit
462 	 * side which is currently locked out by the flush lock.
463 	 */
464 	lv = NULL;
465 	num_lv = 0;
466 	num_iovecs = 0;
467 	len = 0;
468 	while (!list_empty(&cil->xc_cil)) {
469 		struct xfs_log_item	*item;
470 		int			i;
471 
472 		item = list_first_entry(&cil->xc_cil,
473 					struct xfs_log_item, li_cil);
474 		list_del_init(&item->li_cil);
475 		if (!ctx->lv_chain)
476 			ctx->lv_chain = item->li_lv;
477 		else
478 			lv->lv_next = item->li_lv;
479 		lv = item->li_lv;
480 		item->li_lv = NULL;
481 
482 		num_lv++;
483 		num_iovecs += lv->lv_niovecs;
484 		for (i = 0; i < lv->lv_niovecs; i++)
485 			len += lv->lv_iovecp[i].i_len;
486 	}
487 
488 	/*
489 	 * initialise the new context and attach it to the CIL. Then attach
490 	 * the current context to the CIL committing lsit so it can be found
491 	 * during log forces to extract the commit lsn of the sequence that
492 	 * needs to be forced.
493 	 */
494 	INIT_LIST_HEAD(&new_ctx->committing);
495 	INIT_LIST_HEAD(&new_ctx->busy_extents);
496 	new_ctx->sequence = ctx->sequence + 1;
497 	new_ctx->cil = cil;
498 	cil->xc_ctx = new_ctx;
499 
500 	/*
501 	 * mirror the new sequence into the cil structure so that we can do
502 	 * unlocked checks against the current sequence in log forces without
503 	 * risking deferencing a freed context pointer.
504 	 */
505 	cil->xc_current_sequence = new_ctx->sequence;
506 
507 	/*
508 	 * The switch is now done, so we can drop the context lock and move out
509 	 * of a shared context. We can't just go straight to the commit record,
510 	 * though - we need to synchronise with previous and future commits so
511 	 * that the commit records are correctly ordered in the log to ensure
512 	 * that we process items during log IO completion in the correct order.
513 	 *
514 	 * For example, if we get an EFI in one checkpoint and the EFD in the
515 	 * next (e.g. due to log forces), we do not want the checkpoint with
516 	 * the EFD to be committed before the checkpoint with the EFI.  Hence
517 	 * we must strictly order the commit records of the checkpoints so
518 	 * that: a) the checkpoint callbacks are attached to the iclogs in the
519 	 * correct order; and b) the checkpoints are replayed in correct order
520 	 * in log recovery.
521 	 *
522 	 * Hence we need to add this context to the committing context list so
523 	 * that higher sequences will wait for us to write out a commit record
524 	 * before they do.
525 	 */
526 	spin_lock(&cil->xc_cil_lock);
527 	list_add(&ctx->committing, &cil->xc_committing);
528 	spin_unlock(&cil->xc_cil_lock);
529 	up_write(&cil->xc_ctx_lock);
530 
531 	/*
532 	 * Build a checkpoint transaction header and write it to the log to
533 	 * begin the transaction. We need to account for the space used by the
534 	 * transaction header here as it is not accounted for in xlog_write().
535 	 *
536 	 * The LSN we need to pass to the log items on transaction commit is
537 	 * the LSN reported by the first log vector write. If we use the commit
538 	 * record lsn then we can move the tail beyond the grant write head.
539 	 */
540 	tic = ctx->ticket;
541 	thdr.th_magic = XFS_TRANS_HEADER_MAGIC;
542 	thdr.th_type = XFS_TRANS_CHECKPOINT;
543 	thdr.th_tid = tic->t_tid;
544 	thdr.th_num_items = num_iovecs;
545 	lhdr.i_addr = &thdr;
546 	lhdr.i_len = sizeof(xfs_trans_header_t);
547 	lhdr.i_type = XLOG_REG_TYPE_TRANSHDR;
548 	tic->t_curr_res -= lhdr.i_len + sizeof(xlog_op_header_t);
549 
550 	lvhdr.lv_niovecs = 1;
551 	lvhdr.lv_iovecp = &lhdr;
552 	lvhdr.lv_next = ctx->lv_chain;
553 
554 	error = xlog_write(log, &lvhdr, tic, &ctx->start_lsn, NULL, 0);
555 	if (error)
556 		goto out_abort_free_ticket;
557 
558 	/*
559 	 * now that we've written the checkpoint into the log, strictly
560 	 * order the commit records so replay will get them in the right order.
561 	 */
562 restart:
563 	spin_lock(&cil->xc_cil_lock);
564 	list_for_each_entry(new_ctx, &cil->xc_committing, committing) {
565 		/*
566 		 * Higher sequences will wait for this one so skip them.
567 		 * Don't wait for own own sequence, either.
568 		 */
569 		if (new_ctx->sequence >= ctx->sequence)
570 			continue;
571 		if (!new_ctx->commit_lsn) {
572 			/*
573 			 * It is still being pushed! Wait for the push to
574 			 * complete, then start again from the beginning.
575 			 */
576 			xlog_wait(&cil->xc_commit_wait, &cil->xc_cil_lock);
577 			goto restart;
578 		}
579 	}
580 	spin_unlock(&cil->xc_cil_lock);
581 
582 	/* xfs_log_done always frees the ticket on error. */
583 	commit_lsn = xfs_log_done(log->l_mp, tic, &commit_iclog, 0);
584 	if (commit_lsn == -1)
585 		goto out_abort;
586 
587 	/* attach all the transactions w/ busy extents to iclog */
588 	ctx->log_cb.cb_func = xlog_cil_committed;
589 	ctx->log_cb.cb_arg = ctx;
590 	error = xfs_log_notify(log->l_mp, commit_iclog, &ctx->log_cb);
591 	if (error)
592 		goto out_abort;
593 
594 	/*
595 	 * now the checkpoint commit is complete and we've attached the
596 	 * callbacks to the iclog we can assign the commit LSN to the context
597 	 * and wake up anyone who is waiting for the commit to complete.
598 	 */
599 	spin_lock(&cil->xc_cil_lock);
600 	ctx->commit_lsn = commit_lsn;
601 	wake_up_all(&cil->xc_commit_wait);
602 	spin_unlock(&cil->xc_cil_lock);
603 
604 	/* release the hounds! */
605 	return xfs_log_release_iclog(log->l_mp, commit_iclog);
606 
607 out_skip:
608 	up_write(&cil->xc_ctx_lock);
609 out_free_ticket:
610 	xfs_log_ticket_put(new_ctx->ticket);
611 	kmem_free(new_ctx);
612 	return 0;
613 
614 out_abort_free_ticket:
615 	xfs_log_ticket_put(tic);
616 out_abort:
617 	xlog_cil_committed(ctx, XFS_LI_ABORTED);
618 	return XFS_ERROR(EIO);
619 }
620 
621 /*
622  * Commit a transaction with the given vector to the Committed Item List.
623  *
624  * To do this, we need to format the item, pin it in memory if required and
625  * account for the space used by the transaction. Once we have done that we
626  * need to release the unused reservation for the transaction, attach the
627  * transaction to the checkpoint context so we carry the busy extents through
628  * to checkpoint completion, and then unlock all the items in the transaction.
629  *
630  * For more specific information about the order of operations in
631  * xfs_log_commit_cil() please refer to the comments in
632  * xfs_trans_commit_iclog().
633  *
634  * Called with the context lock already held in read mode to lock out
635  * background commit, returns without it held once background commits are
636  * allowed again.
637  */
638 void
639 xfs_log_commit_cil(
640 	struct xfs_mount	*mp,
641 	struct xfs_trans	*tp,
642 	struct xfs_log_vec	*log_vector,
643 	xfs_lsn_t		*commit_lsn,
644 	int			flags)
645 {
646 	struct log		*log = mp->m_log;
647 	int			log_flags = 0;
648 	int			push = 0;
649 
650 	if (flags & XFS_TRANS_RELEASE_LOG_RES)
651 		log_flags = XFS_LOG_REL_PERM_RESERV;
652 
653 	/*
654 	 * do all the hard work of formatting items (including memory
655 	 * allocation) outside the CIL context lock. This prevents stalling CIL
656 	 * pushes when we are low on memory and a transaction commit spends a
657 	 * lot of time in memory reclaim.
658 	 */
659 	xlog_cil_format_items(log, log_vector);
660 
661 	/* lock out background commit */
662 	down_read(&log->l_cilp->xc_ctx_lock);
663 	if (commit_lsn)
664 		*commit_lsn = log->l_cilp->xc_ctx->sequence;
665 
666 	xlog_cil_insert_items(log, log_vector, tp->t_ticket);
667 
668 	/* check we didn't blow the reservation */
669 	if (tp->t_ticket->t_curr_res < 0)
670 		xlog_print_tic_res(log->l_mp, tp->t_ticket);
671 
672 	/* attach the transaction to the CIL if it has any busy extents */
673 	if (!list_empty(&tp->t_busy)) {
674 		spin_lock(&log->l_cilp->xc_cil_lock);
675 		list_splice_init(&tp->t_busy,
676 					&log->l_cilp->xc_ctx->busy_extents);
677 		spin_unlock(&log->l_cilp->xc_cil_lock);
678 	}
679 
680 	tp->t_commit_lsn = *commit_lsn;
681 	xfs_log_done(mp, tp->t_ticket, NULL, log_flags);
682 	xfs_trans_unreserve_and_mod_sb(tp);
683 
684 	/*
685 	 * Once all the items of the transaction have been copied to the CIL,
686 	 * the items can be unlocked and freed.
687 	 *
688 	 * This needs to be done before we drop the CIL context lock because we
689 	 * have to update state in the log items and unlock them before they go
690 	 * to disk. If we don't, then the CIL checkpoint can race with us and
691 	 * we can run checkpoint completion before we've updated and unlocked
692 	 * the log items. This affects (at least) processing of stale buffers,
693 	 * inodes and EFIs.
694 	 */
695 	xfs_trans_free_items(tp, *commit_lsn, 0);
696 
697 	/* check for background commit before unlock */
698 	if (log->l_cilp->xc_ctx->space_used > XLOG_CIL_SPACE_LIMIT(log))
699 		push = 1;
700 
701 	up_read(&log->l_cilp->xc_ctx_lock);
702 
703 	/*
704 	 * We need to push CIL every so often so we don't cache more than we
705 	 * can fit in the log. The limit really is that a checkpoint can't be
706 	 * more than half the log (the current checkpoint is not allowed to
707 	 * overwrite the previous checkpoint), but commit latency and memory
708 	 * usage limit this to a smaller size in most cases.
709 	 */
710 	if (push)
711 		xlog_cil_push(log, 0);
712 }
713 
714 /*
715  * Conditionally push the CIL based on the sequence passed in.
716  *
717  * We only need to push if we haven't already pushed the sequence
718  * number given. Hence the only time we will trigger a push here is
719  * if the push sequence is the same as the current context.
720  *
721  * We return the current commit lsn to allow the callers to determine if a
722  * iclog flush is necessary following this call.
723  *
724  * XXX: Initially, just push the CIL unconditionally and return whatever
725  * commit lsn is there. It'll be empty, so this is broken for now.
726  */
727 xfs_lsn_t
728 xlog_cil_force_lsn(
729 	struct log	*log,
730 	xfs_lsn_t	sequence)
731 {
732 	struct xfs_cil		*cil = log->l_cilp;
733 	struct xfs_cil_ctx	*ctx;
734 	xfs_lsn_t		commit_lsn = NULLCOMMITLSN;
735 
736 	ASSERT(sequence <= cil->xc_current_sequence);
737 
738 	/*
739 	 * check to see if we need to force out the current context.
740 	 * xlog_cil_push() handles racing pushes for the same sequence,
741 	 * so no need to deal with it here.
742 	 */
743 	if (sequence == cil->xc_current_sequence)
744 		xlog_cil_push(log, sequence);
745 
746 	/*
747 	 * See if we can find a previous sequence still committing.
748 	 * We need to wait for all previous sequence commits to complete
749 	 * before allowing the force of push_seq to go ahead. Hence block
750 	 * on commits for those as well.
751 	 */
752 restart:
753 	spin_lock(&cil->xc_cil_lock);
754 	list_for_each_entry(ctx, &cil->xc_committing, committing) {
755 		if (ctx->sequence > sequence)
756 			continue;
757 		if (!ctx->commit_lsn) {
758 			/*
759 			 * It is still being pushed! Wait for the push to
760 			 * complete, then start again from the beginning.
761 			 */
762 			xlog_wait(&cil->xc_commit_wait, &cil->xc_cil_lock);
763 			goto restart;
764 		}
765 		if (ctx->sequence != sequence)
766 			continue;
767 		/* found it! */
768 		commit_lsn = ctx->commit_lsn;
769 	}
770 	spin_unlock(&cil->xc_cil_lock);
771 	return commit_lsn;
772 }
773 
774 /*
775  * Check if the current log item was first committed in this sequence.
776  * We can't rely on just the log item being in the CIL, we have to check
777  * the recorded commit sequence number.
778  *
779  * Note: for this to be used in a non-racy manner, it has to be called with
780  * CIL flushing locked out. As a result, it should only be used during the
781  * transaction commit process when deciding what to format into the item.
782  */
783 bool
784 xfs_log_item_in_current_chkpt(
785 	struct xfs_log_item *lip)
786 {
787 	struct xfs_cil_ctx *ctx;
788 
789 	if (!(lip->li_mountp->m_flags & XFS_MOUNT_DELAYLOG))
790 		return false;
791 	if (list_empty(&lip->li_cil))
792 		return false;
793 
794 	ctx = lip->li_mountp->m_log->l_cilp->xc_ctx;
795 
796 	/*
797 	 * li_seq is written on the first commit of a log item to record the
798 	 * first checkpoint it is written to. Hence if it is different to the
799 	 * current sequence, we're in a new checkpoint.
800 	 */
801 	if (XFS_LSN_CMP(lip->li_seq, ctx->sequence) != 0)
802 		return false;
803 	return true;
804 }
805