xref: /openbmc/linux/fs/xfs/xfs_log_cil.c (revision 089a49b6)
1 /*
2  * Copyright (c) 2010 Red Hat, Inc. All Rights Reserved.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public License as
6  * published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it would be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
11  * GNU General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public License
14  * along with this program; if not, write the Free Software Foundation,
15  * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
16  */
17 
18 #include "xfs.h"
19 #include "xfs_fs.h"
20 #include "xfs_types.h"
21 #include "xfs_log.h"
22 #include "xfs_trans.h"
23 #include "xfs_trans_priv.h"
24 #include "xfs_log_priv.h"
25 #include "xfs_sb.h"
26 #include "xfs_ag.h"
27 #include "xfs_mount.h"
28 #include "xfs_error.h"
29 #include "xfs_alloc.h"
30 #include "xfs_extent_busy.h"
31 #include "xfs_discard.h"
32 
33 /*
34  * Allocate a new ticket. Failing to get a new ticket makes it really hard to
35  * recover, so we don't allow failure here. Also, we allocate in a context that
36  * we don't want to be issuing transactions from, so we need to tell the
37  * allocation code this as well.
38  *
39  * We don't reserve any space for the ticket - we are going to steal whatever
40  * space we require from transactions as they commit. To ensure we reserve all
41  * the space required, we need to set the current reservation of the ticket to
42  * zero so that we know to steal the initial transaction overhead from the
43  * first transaction commit.
44  */
45 static struct xlog_ticket *
46 xlog_cil_ticket_alloc(
47 	struct xlog	*log)
48 {
49 	struct xlog_ticket *tic;
50 
51 	tic = xlog_ticket_alloc(log, 0, 1, XFS_TRANSACTION, 0,
52 				KM_SLEEP|KM_NOFS);
53 	tic->t_trans_type = XFS_TRANS_CHECKPOINT;
54 
55 	/*
56 	 * set the current reservation to zero so we know to steal the basic
57 	 * transaction overhead reservation from the first transaction commit.
58 	 */
59 	tic->t_curr_res = 0;
60 	return tic;
61 }
62 
63 /*
64  * After the first stage of log recovery is done, we know where the head and
65  * tail of the log are. We need this log initialisation done before we can
66  * initialise the first CIL checkpoint context.
67  *
68  * Here we allocate a log ticket to track space usage during a CIL push.  This
69  * ticket is passed to xlog_write() directly so that we don't slowly leak log
70  * space by failing to account for space used by log headers and additional
71  * region headers for split regions.
72  */
73 void
74 xlog_cil_init_post_recovery(
75 	struct xlog	*log)
76 {
77 	log->l_cilp->xc_ctx->ticket = xlog_cil_ticket_alloc(log);
78 	log->l_cilp->xc_ctx->sequence = 1;
79 	log->l_cilp->xc_ctx->commit_lsn = xlog_assign_lsn(log->l_curr_cycle,
80 								log->l_curr_block);
81 }
82 
83 STATIC int
84 xlog_cil_lv_item_format(
85 	struct xfs_log_item	*lip,
86 	struct xfs_log_vec	*lv)
87 {
88 	int	index;
89 	char	*ptr;
90 
91 	/* format new vectors into array */
92 	lip->li_ops->iop_format(lip, lv->lv_iovecp);
93 
94 	/* copy data into existing array */
95 	ptr = lv->lv_buf;
96 	for (index = 0; index < lv->lv_niovecs; index++) {
97 		struct xfs_log_iovec *vec = &lv->lv_iovecp[index];
98 
99 		memcpy(ptr, vec->i_addr, vec->i_len);
100 		vec->i_addr = ptr;
101 		ptr += vec->i_len;
102 	}
103 
104 	/*
105 	 * some size calculations for log vectors over-estimate, so the caller
106 	 * doesn't know the amount of space actually used by the item. Return
107 	 * the byte count to the caller so they can check and store it
108 	 * appropriately.
109 	 */
110 	return ptr - lv->lv_buf;
111 }
112 
113 /*
114  * Prepare the log item for insertion into the CIL. Calculate the difference in
115  * log space and vectors it will consume, and if it is a new item pin it as
116  * well.
117  */
118 STATIC void
119 xfs_cil_prepare_item(
120 	struct xlog		*log,
121 	struct xfs_log_vec	*lv,
122 	struct xfs_log_vec	*old_lv,
123 	int			*diff_len,
124 	int			*diff_iovecs)
125 {
126 	/* Account for the new LV being passed in */
127 	if (lv->lv_buf_len != XFS_LOG_VEC_ORDERED) {
128 		*diff_len += lv->lv_buf_len;
129 		*diff_iovecs += lv->lv_niovecs;
130 	}
131 
132 	/*
133 	 * If there is no old LV, this is the first time we've seen the item in
134 	 * this CIL context and so we need to pin it. If we are replacing the
135 	 * old_lv, then remove the space it accounts for and free it.
136 	 */
137 	if (!old_lv)
138 		lv->lv_item->li_ops->iop_pin(lv->lv_item);
139 	else if (old_lv != lv) {
140 		ASSERT(lv->lv_buf_len != XFS_LOG_VEC_ORDERED);
141 
142 		*diff_len -= old_lv->lv_buf_len;
143 		*diff_iovecs -= old_lv->lv_niovecs;
144 		kmem_free(old_lv);
145 	}
146 
147 	/* attach new log vector to log item */
148 	lv->lv_item->li_lv = lv;
149 
150 	/*
151 	 * If this is the first time the item is being committed to the
152 	 * CIL, store the sequence number on the log item so we can
153 	 * tell in future commits whether this is the first checkpoint
154 	 * the item is being committed into.
155 	 */
156 	if (!lv->lv_item->li_seq)
157 		lv->lv_item->li_seq = log->l_cilp->xc_ctx->sequence;
158 }
159 
160 /*
161  * Format log item into a flat buffers
162  *
163  * For delayed logging, we need to hold a formatted buffer containing all the
164  * changes on the log item. This enables us to relog the item in memory and
165  * write it out asynchronously without needing to relock the object that was
166  * modified at the time it gets written into the iclog.
167  *
168  * This function builds a vector for the changes in each log item in the
169  * transaction. It then works out the length of the buffer needed for each log
170  * item, allocates them and formats the vector for the item into the buffer.
171  * The buffer is then attached to the log item are then inserted into the
172  * Committed Item List for tracking until the next checkpoint is written out.
173  *
174  * We don't set up region headers during this process; we simply copy the
175  * regions into the flat buffer. We can do this because we still have to do a
176  * formatting step to write the regions into the iclog buffer.  Writing the
177  * ophdrs during the iclog write means that we can support splitting large
178  * regions across iclog boundares without needing a change in the format of the
179  * item/region encapsulation.
180  *
181  * Hence what we need to do now is change the rewrite the vector array to point
182  * to the copied region inside the buffer we just allocated. This allows us to
183  * format the regions into the iclog as though they are being formatted
184  * directly out of the objects themselves.
185  */
186 static void
187 xlog_cil_insert_format_items(
188 	struct xlog		*log,
189 	struct xfs_trans	*tp,
190 	int			*diff_len,
191 	int			*diff_iovecs)
192 {
193 	struct xfs_log_item_desc *lidp;
194 
195 
196 	/* Bail out if we didn't find a log item.  */
197 	if (list_empty(&tp->t_items)) {
198 		ASSERT(0);
199 		return;
200 	}
201 
202 	list_for_each_entry(lidp, &tp->t_items, lid_trans) {
203 		struct xfs_log_item *lip = lidp->lid_item;
204 		struct xfs_log_vec *lv;
205 		struct xfs_log_vec *old_lv;
206 		int	niovecs = 0;
207 		int	nbytes = 0;
208 		int	buf_size;
209 		bool	ordered = false;
210 
211 		/* Skip items which aren't dirty in this transaction. */
212 		if (!(lidp->lid_flags & XFS_LID_DIRTY))
213 			continue;
214 
215 		/* get number of vecs and size of data to be stored */
216 		lip->li_ops->iop_size(lip, &niovecs, &nbytes);
217 
218 		/* Skip items that do not have any vectors for writing */
219 		if (!niovecs)
220 			continue;
221 
222 		/*
223 		 * Ordered items need to be tracked but we do not wish to write
224 		 * them. We need a logvec to track the object, but we do not
225 		 * need an iovec or buffer to be allocated for copying data.
226 		 */
227 		if (niovecs == XFS_LOG_VEC_ORDERED) {
228 			ordered = true;
229 			niovecs = 0;
230 			nbytes = 0;
231 		}
232 
233 		/* grab the old item if it exists for reservation accounting */
234 		old_lv = lip->li_lv;
235 
236 		/* calc buffer size */
237 		buf_size = sizeof(struct xfs_log_vec) + nbytes +
238 				niovecs * sizeof(struct xfs_log_iovec);
239 
240 		/* compare to existing item size */
241 		if (lip->li_lv && buf_size <= lip->li_lv->lv_size) {
242 			/* same or smaller, optimise common overwrite case */
243 			lv = lip->li_lv;
244 			lv->lv_next = NULL;
245 
246 			if (ordered)
247 				goto insert;
248 
249 			/*
250 			 * set the item up as though it is a new insertion so
251 			 * that the space reservation accounting is correct.
252 			 */
253 			*diff_iovecs -= lv->lv_niovecs;
254 			*diff_len -= lv->lv_buf_len;
255 
256 			/* Ensure the lv is set up according to ->iop_size */
257 			lv->lv_niovecs = niovecs;
258 			lv->lv_buf = (char *)lv + buf_size - nbytes;
259 
260 			lv->lv_buf_len = xlog_cil_lv_item_format(lip, lv);
261 			goto insert;
262 		}
263 
264 		/* allocate new data chunk */
265 		lv = kmem_zalloc(buf_size, KM_SLEEP|KM_NOFS);
266 		lv->lv_item = lip;
267 		lv->lv_size = buf_size;
268 		lv->lv_niovecs = niovecs;
269 		if (ordered) {
270 			/* track as an ordered logvec */
271 			ASSERT(lip->li_lv == NULL);
272 			lv->lv_buf_len = XFS_LOG_VEC_ORDERED;
273 			goto insert;
274 		}
275 
276 		/* The allocated iovec region lies beyond the log vector. */
277 		lv->lv_iovecp = (struct xfs_log_iovec *)&lv[1];
278 
279 		/* The allocated data region lies beyond the iovec region */
280 		lv->lv_buf = (char *)lv + buf_size - nbytes;
281 
282 		lv->lv_buf_len = xlog_cil_lv_item_format(lip, lv);
283 insert:
284 		ASSERT(lv->lv_buf_len <= nbytes);
285 		xfs_cil_prepare_item(log, lv, old_lv, diff_len, diff_iovecs);
286 	}
287 }
288 
289 /*
290  * Insert the log items into the CIL and calculate the difference in space
291  * consumed by the item. Add the space to the checkpoint ticket and calculate
292  * if the change requires additional log metadata. If it does, take that space
293  * as well. Remove the amount of space we added to the checkpoint ticket from
294  * the current transaction ticket so that the accounting works out correctly.
295  */
296 static void
297 xlog_cil_insert_items(
298 	struct xlog		*log,
299 	struct xfs_trans	*tp)
300 {
301 	struct xfs_cil		*cil = log->l_cilp;
302 	struct xfs_cil_ctx	*ctx = cil->xc_ctx;
303 	struct xfs_log_item_desc *lidp;
304 	int			len = 0;
305 	int			diff_iovecs = 0;
306 	int			iclog_space;
307 
308 	ASSERT(tp);
309 
310 	/*
311 	 * We can do this safely because the context can't checkpoint until we
312 	 * are done so it doesn't matter exactly how we update the CIL.
313 	 */
314 	xlog_cil_insert_format_items(log, tp, &len, &diff_iovecs);
315 
316 	/*
317 	 * Now (re-)position everything modified at the tail of the CIL.
318 	 * We do this here so we only need to take the CIL lock once during
319 	 * the transaction commit.
320 	 */
321 	spin_lock(&cil->xc_cil_lock);
322 	list_for_each_entry(lidp, &tp->t_items, lid_trans) {
323 		struct xfs_log_item	*lip = lidp->lid_item;
324 
325 		/* Skip items which aren't dirty in this transaction. */
326 		if (!(lidp->lid_flags & XFS_LID_DIRTY))
327 			continue;
328 
329 		list_move_tail(&lip->li_cil, &cil->xc_cil);
330 	}
331 
332 	/* account for space used by new iovec headers  */
333 	len += diff_iovecs * sizeof(xlog_op_header_t);
334 	ctx->nvecs += diff_iovecs;
335 
336 	/* attach the transaction to the CIL if it has any busy extents */
337 	if (!list_empty(&tp->t_busy))
338 		list_splice_init(&tp->t_busy, &ctx->busy_extents);
339 
340 	/*
341 	 * Now transfer enough transaction reservation to the context ticket
342 	 * for the checkpoint. The context ticket is special - the unit
343 	 * reservation has to grow as well as the current reservation as we
344 	 * steal from tickets so we can correctly determine the space used
345 	 * during the transaction commit.
346 	 */
347 	if (ctx->ticket->t_curr_res == 0) {
348 		ctx->ticket->t_curr_res = ctx->ticket->t_unit_res;
349 		tp->t_ticket->t_curr_res -= ctx->ticket->t_unit_res;
350 	}
351 
352 	/* do we need space for more log record headers? */
353 	iclog_space = log->l_iclog_size - log->l_iclog_hsize;
354 	if (len > 0 && (ctx->space_used / iclog_space !=
355 				(ctx->space_used + len) / iclog_space)) {
356 		int hdrs;
357 
358 		hdrs = (len + iclog_space - 1) / iclog_space;
359 		/* need to take into account split region headers, too */
360 		hdrs *= log->l_iclog_hsize + sizeof(struct xlog_op_header);
361 		ctx->ticket->t_unit_res += hdrs;
362 		ctx->ticket->t_curr_res += hdrs;
363 		tp->t_ticket->t_curr_res -= hdrs;
364 		ASSERT(tp->t_ticket->t_curr_res >= len);
365 	}
366 	tp->t_ticket->t_curr_res -= len;
367 	ctx->space_used += len;
368 
369 	spin_unlock(&cil->xc_cil_lock);
370 }
371 
372 static void
373 xlog_cil_free_logvec(
374 	struct xfs_log_vec	*log_vector)
375 {
376 	struct xfs_log_vec	*lv;
377 
378 	for (lv = log_vector; lv; ) {
379 		struct xfs_log_vec *next = lv->lv_next;
380 		kmem_free(lv);
381 		lv = next;
382 	}
383 }
384 
385 /*
386  * Mark all items committed and clear busy extents. We free the log vector
387  * chains in a separate pass so that we unpin the log items as quickly as
388  * possible.
389  */
390 static void
391 xlog_cil_committed(
392 	void	*args,
393 	int	abort)
394 {
395 	struct xfs_cil_ctx	*ctx = args;
396 	struct xfs_mount	*mp = ctx->cil->xc_log->l_mp;
397 
398 	xfs_trans_committed_bulk(ctx->cil->xc_log->l_ailp, ctx->lv_chain,
399 					ctx->start_lsn, abort);
400 
401 	xfs_extent_busy_sort(&ctx->busy_extents);
402 	xfs_extent_busy_clear(mp, &ctx->busy_extents,
403 			     (mp->m_flags & XFS_MOUNT_DISCARD) && !abort);
404 
405 	spin_lock(&ctx->cil->xc_push_lock);
406 	list_del(&ctx->committing);
407 	spin_unlock(&ctx->cil->xc_push_lock);
408 
409 	xlog_cil_free_logvec(ctx->lv_chain);
410 
411 	if (!list_empty(&ctx->busy_extents)) {
412 		ASSERT(mp->m_flags & XFS_MOUNT_DISCARD);
413 
414 		xfs_discard_extents(mp, &ctx->busy_extents);
415 		xfs_extent_busy_clear(mp, &ctx->busy_extents, false);
416 	}
417 
418 	kmem_free(ctx);
419 }
420 
421 /*
422  * Push the Committed Item List to the log. If @push_seq flag is zero, then it
423  * is a background flush and so we can chose to ignore it. Otherwise, if the
424  * current sequence is the same as @push_seq we need to do a flush. If
425  * @push_seq is less than the current sequence, then it has already been
426  * flushed and we don't need to do anything - the caller will wait for it to
427  * complete if necessary.
428  *
429  * @push_seq is a value rather than a flag because that allows us to do an
430  * unlocked check of the sequence number for a match. Hence we can allows log
431  * forces to run racily and not issue pushes for the same sequence twice. If we
432  * get a race between multiple pushes for the same sequence they will block on
433  * the first one and then abort, hence avoiding needless pushes.
434  */
435 STATIC int
436 xlog_cil_push(
437 	struct xlog		*log)
438 {
439 	struct xfs_cil		*cil = log->l_cilp;
440 	struct xfs_log_vec	*lv;
441 	struct xfs_cil_ctx	*ctx;
442 	struct xfs_cil_ctx	*new_ctx;
443 	struct xlog_in_core	*commit_iclog;
444 	struct xlog_ticket	*tic;
445 	int			num_iovecs;
446 	int			error = 0;
447 	struct xfs_trans_header thdr;
448 	struct xfs_log_iovec	lhdr;
449 	struct xfs_log_vec	lvhdr = { NULL };
450 	xfs_lsn_t		commit_lsn;
451 	xfs_lsn_t		push_seq;
452 
453 	if (!cil)
454 		return 0;
455 
456 	new_ctx = kmem_zalloc(sizeof(*new_ctx), KM_SLEEP|KM_NOFS);
457 	new_ctx->ticket = xlog_cil_ticket_alloc(log);
458 
459 	down_write(&cil->xc_ctx_lock);
460 	ctx = cil->xc_ctx;
461 
462 	spin_lock(&cil->xc_push_lock);
463 	push_seq = cil->xc_push_seq;
464 	ASSERT(push_seq <= ctx->sequence);
465 
466 	/*
467 	 * Check if we've anything to push. If there is nothing, then we don't
468 	 * move on to a new sequence number and so we have to be able to push
469 	 * this sequence again later.
470 	 */
471 	if (list_empty(&cil->xc_cil)) {
472 		cil->xc_push_seq = 0;
473 		spin_unlock(&cil->xc_push_lock);
474 		goto out_skip;
475 	}
476 	spin_unlock(&cil->xc_push_lock);
477 
478 
479 	/* check for a previously pushed seqeunce */
480 	if (push_seq < cil->xc_ctx->sequence)
481 		goto out_skip;
482 
483 	/*
484 	 * pull all the log vectors off the items in the CIL, and
485 	 * remove the items from the CIL. We don't need the CIL lock
486 	 * here because it's only needed on the transaction commit
487 	 * side which is currently locked out by the flush lock.
488 	 */
489 	lv = NULL;
490 	num_iovecs = 0;
491 	while (!list_empty(&cil->xc_cil)) {
492 		struct xfs_log_item	*item;
493 
494 		item = list_first_entry(&cil->xc_cil,
495 					struct xfs_log_item, li_cil);
496 		list_del_init(&item->li_cil);
497 		if (!ctx->lv_chain)
498 			ctx->lv_chain = item->li_lv;
499 		else
500 			lv->lv_next = item->li_lv;
501 		lv = item->li_lv;
502 		item->li_lv = NULL;
503 		num_iovecs += lv->lv_niovecs;
504 	}
505 
506 	/*
507 	 * initialise the new context and attach it to the CIL. Then attach
508 	 * the current context to the CIL committing lsit so it can be found
509 	 * during log forces to extract the commit lsn of the sequence that
510 	 * needs to be forced.
511 	 */
512 	INIT_LIST_HEAD(&new_ctx->committing);
513 	INIT_LIST_HEAD(&new_ctx->busy_extents);
514 	new_ctx->sequence = ctx->sequence + 1;
515 	new_ctx->cil = cil;
516 	cil->xc_ctx = new_ctx;
517 
518 	/*
519 	 * mirror the new sequence into the cil structure so that we can do
520 	 * unlocked checks against the current sequence in log forces without
521 	 * risking deferencing a freed context pointer.
522 	 */
523 	cil->xc_current_sequence = new_ctx->sequence;
524 
525 	/*
526 	 * The switch is now done, so we can drop the context lock and move out
527 	 * of a shared context. We can't just go straight to the commit record,
528 	 * though - we need to synchronise with previous and future commits so
529 	 * that the commit records are correctly ordered in the log to ensure
530 	 * that we process items during log IO completion in the correct order.
531 	 *
532 	 * For example, if we get an EFI in one checkpoint and the EFD in the
533 	 * next (e.g. due to log forces), we do not want the checkpoint with
534 	 * the EFD to be committed before the checkpoint with the EFI.  Hence
535 	 * we must strictly order the commit records of the checkpoints so
536 	 * that: a) the checkpoint callbacks are attached to the iclogs in the
537 	 * correct order; and b) the checkpoints are replayed in correct order
538 	 * in log recovery.
539 	 *
540 	 * Hence we need to add this context to the committing context list so
541 	 * that higher sequences will wait for us to write out a commit record
542 	 * before they do.
543 	 */
544 	spin_lock(&cil->xc_push_lock);
545 	list_add(&ctx->committing, &cil->xc_committing);
546 	spin_unlock(&cil->xc_push_lock);
547 	up_write(&cil->xc_ctx_lock);
548 
549 	/*
550 	 * Build a checkpoint transaction header and write it to the log to
551 	 * begin the transaction. We need to account for the space used by the
552 	 * transaction header here as it is not accounted for in xlog_write().
553 	 *
554 	 * The LSN we need to pass to the log items on transaction commit is
555 	 * the LSN reported by the first log vector write. If we use the commit
556 	 * record lsn then we can move the tail beyond the grant write head.
557 	 */
558 	tic = ctx->ticket;
559 	thdr.th_magic = XFS_TRANS_HEADER_MAGIC;
560 	thdr.th_type = XFS_TRANS_CHECKPOINT;
561 	thdr.th_tid = tic->t_tid;
562 	thdr.th_num_items = num_iovecs;
563 	lhdr.i_addr = &thdr;
564 	lhdr.i_len = sizeof(xfs_trans_header_t);
565 	lhdr.i_type = XLOG_REG_TYPE_TRANSHDR;
566 	tic->t_curr_res -= lhdr.i_len + sizeof(xlog_op_header_t);
567 
568 	lvhdr.lv_niovecs = 1;
569 	lvhdr.lv_iovecp = &lhdr;
570 	lvhdr.lv_next = ctx->lv_chain;
571 
572 	error = xlog_write(log, &lvhdr, tic, &ctx->start_lsn, NULL, 0);
573 	if (error)
574 		goto out_abort_free_ticket;
575 
576 	/*
577 	 * now that we've written the checkpoint into the log, strictly
578 	 * order the commit records so replay will get them in the right order.
579 	 */
580 restart:
581 	spin_lock(&cil->xc_push_lock);
582 	list_for_each_entry(new_ctx, &cil->xc_committing, committing) {
583 		/*
584 		 * Higher sequences will wait for this one so skip them.
585 		 * Don't wait for own own sequence, either.
586 		 */
587 		if (new_ctx->sequence >= ctx->sequence)
588 			continue;
589 		if (!new_ctx->commit_lsn) {
590 			/*
591 			 * It is still being pushed! Wait for the push to
592 			 * complete, then start again from the beginning.
593 			 */
594 			xlog_wait(&cil->xc_commit_wait, &cil->xc_push_lock);
595 			goto restart;
596 		}
597 	}
598 	spin_unlock(&cil->xc_push_lock);
599 
600 	/* xfs_log_done always frees the ticket on error. */
601 	commit_lsn = xfs_log_done(log->l_mp, tic, &commit_iclog, 0);
602 	if (commit_lsn == -1)
603 		goto out_abort;
604 
605 	/* attach all the transactions w/ busy extents to iclog */
606 	ctx->log_cb.cb_func = xlog_cil_committed;
607 	ctx->log_cb.cb_arg = ctx;
608 	error = xfs_log_notify(log->l_mp, commit_iclog, &ctx->log_cb);
609 	if (error)
610 		goto out_abort;
611 
612 	/*
613 	 * now the checkpoint commit is complete and we've attached the
614 	 * callbacks to the iclog we can assign the commit LSN to the context
615 	 * and wake up anyone who is waiting for the commit to complete.
616 	 */
617 	spin_lock(&cil->xc_push_lock);
618 	ctx->commit_lsn = commit_lsn;
619 	wake_up_all(&cil->xc_commit_wait);
620 	spin_unlock(&cil->xc_push_lock);
621 
622 	/* release the hounds! */
623 	return xfs_log_release_iclog(log->l_mp, commit_iclog);
624 
625 out_skip:
626 	up_write(&cil->xc_ctx_lock);
627 	xfs_log_ticket_put(new_ctx->ticket);
628 	kmem_free(new_ctx);
629 	return 0;
630 
631 out_abort_free_ticket:
632 	xfs_log_ticket_put(tic);
633 out_abort:
634 	xlog_cil_committed(ctx, XFS_LI_ABORTED);
635 	return XFS_ERROR(EIO);
636 }
637 
638 static void
639 xlog_cil_push_work(
640 	struct work_struct	*work)
641 {
642 	struct xfs_cil		*cil = container_of(work, struct xfs_cil,
643 							xc_push_work);
644 	xlog_cil_push(cil->xc_log);
645 }
646 
647 /*
648  * We need to push CIL every so often so we don't cache more than we can fit in
649  * the log. The limit really is that a checkpoint can't be more than half the
650  * log (the current checkpoint is not allowed to overwrite the previous
651  * checkpoint), but commit latency and memory usage limit this to a smaller
652  * size.
653  */
654 static void
655 xlog_cil_push_background(
656 	struct xlog	*log)
657 {
658 	struct xfs_cil	*cil = log->l_cilp;
659 
660 	/*
661 	 * The cil won't be empty because we are called while holding the
662 	 * context lock so whatever we added to the CIL will still be there
663 	 */
664 	ASSERT(!list_empty(&cil->xc_cil));
665 
666 	/*
667 	 * don't do a background push if we haven't used up all the
668 	 * space available yet.
669 	 */
670 	if (cil->xc_ctx->space_used < XLOG_CIL_SPACE_LIMIT(log))
671 		return;
672 
673 	spin_lock(&cil->xc_push_lock);
674 	if (cil->xc_push_seq < cil->xc_current_sequence) {
675 		cil->xc_push_seq = cil->xc_current_sequence;
676 		queue_work(log->l_mp->m_cil_workqueue, &cil->xc_push_work);
677 	}
678 	spin_unlock(&cil->xc_push_lock);
679 
680 }
681 
682 static void
683 xlog_cil_push_foreground(
684 	struct xlog	*log,
685 	xfs_lsn_t	push_seq)
686 {
687 	struct xfs_cil	*cil = log->l_cilp;
688 
689 	if (!cil)
690 		return;
691 
692 	ASSERT(push_seq && push_seq <= cil->xc_current_sequence);
693 
694 	/* start on any pending background push to minimise wait time on it */
695 	flush_work(&cil->xc_push_work);
696 
697 	/*
698 	 * If the CIL is empty or we've already pushed the sequence then
699 	 * there's no work we need to do.
700 	 */
701 	spin_lock(&cil->xc_push_lock);
702 	if (list_empty(&cil->xc_cil) || push_seq <= cil->xc_push_seq) {
703 		spin_unlock(&cil->xc_push_lock);
704 		return;
705 	}
706 
707 	cil->xc_push_seq = push_seq;
708 	spin_unlock(&cil->xc_push_lock);
709 
710 	/* do the push now */
711 	xlog_cil_push(log);
712 }
713 
714 /*
715  * Commit a transaction with the given vector to the Committed Item List.
716  *
717  * To do this, we need to format the item, pin it in memory if required and
718  * account for the space used by the transaction. Once we have done that we
719  * need to release the unused reservation for the transaction, attach the
720  * transaction to the checkpoint context so we carry the busy extents through
721  * to checkpoint completion, and then unlock all the items in the transaction.
722  *
723  * Called with the context lock already held in read mode to lock out
724  * background commit, returns without it held once background commits are
725  * allowed again.
726  */
727 int
728 xfs_log_commit_cil(
729 	struct xfs_mount	*mp,
730 	struct xfs_trans	*tp,
731 	xfs_lsn_t		*commit_lsn,
732 	int			flags)
733 {
734 	struct xlog		*log = mp->m_log;
735 	struct xfs_cil		*cil = log->l_cilp;
736 	int			log_flags = 0;
737 
738 	if (flags & XFS_TRANS_RELEASE_LOG_RES)
739 		log_flags = XFS_LOG_REL_PERM_RESERV;
740 
741 	/* lock out background commit */
742 	down_read(&cil->xc_ctx_lock);
743 
744 	xlog_cil_insert_items(log, tp);
745 
746 	/* check we didn't blow the reservation */
747 	if (tp->t_ticket->t_curr_res < 0)
748 		xlog_print_tic_res(mp, tp->t_ticket);
749 
750 	tp->t_commit_lsn = cil->xc_ctx->sequence;
751 	if (commit_lsn)
752 		*commit_lsn = tp->t_commit_lsn;
753 
754 	xfs_log_done(mp, tp->t_ticket, NULL, log_flags);
755 	xfs_trans_unreserve_and_mod_sb(tp);
756 
757 	/*
758 	 * Once all the items of the transaction have been copied to the CIL,
759 	 * the items can be unlocked and freed.
760 	 *
761 	 * This needs to be done before we drop the CIL context lock because we
762 	 * have to update state in the log items and unlock them before they go
763 	 * to disk. If we don't, then the CIL checkpoint can race with us and
764 	 * we can run checkpoint completion before we've updated and unlocked
765 	 * the log items. This affects (at least) processing of stale buffers,
766 	 * inodes and EFIs.
767 	 */
768 	xfs_trans_free_items(tp, tp->t_commit_lsn, 0);
769 
770 	xlog_cil_push_background(log);
771 
772 	up_read(&cil->xc_ctx_lock);
773 	return 0;
774 }
775 
776 /*
777  * Conditionally push the CIL based on the sequence passed in.
778  *
779  * We only need to push if we haven't already pushed the sequence
780  * number given. Hence the only time we will trigger a push here is
781  * if the push sequence is the same as the current context.
782  *
783  * We return the current commit lsn to allow the callers to determine if a
784  * iclog flush is necessary following this call.
785  */
786 xfs_lsn_t
787 xlog_cil_force_lsn(
788 	struct xlog	*log,
789 	xfs_lsn_t	sequence)
790 {
791 	struct xfs_cil		*cil = log->l_cilp;
792 	struct xfs_cil_ctx	*ctx;
793 	xfs_lsn_t		commit_lsn = NULLCOMMITLSN;
794 
795 	ASSERT(sequence <= cil->xc_current_sequence);
796 
797 	/*
798 	 * check to see if we need to force out the current context.
799 	 * xlog_cil_push() handles racing pushes for the same sequence,
800 	 * so no need to deal with it here.
801 	 */
802 	xlog_cil_push_foreground(log, sequence);
803 
804 	/*
805 	 * See if we can find a previous sequence still committing.
806 	 * We need to wait for all previous sequence commits to complete
807 	 * before allowing the force of push_seq to go ahead. Hence block
808 	 * on commits for those as well.
809 	 */
810 restart:
811 	spin_lock(&cil->xc_push_lock);
812 	list_for_each_entry(ctx, &cil->xc_committing, committing) {
813 		if (ctx->sequence > sequence)
814 			continue;
815 		if (!ctx->commit_lsn) {
816 			/*
817 			 * It is still being pushed! Wait for the push to
818 			 * complete, then start again from the beginning.
819 			 */
820 			xlog_wait(&cil->xc_commit_wait, &cil->xc_push_lock);
821 			goto restart;
822 		}
823 		if (ctx->sequence != sequence)
824 			continue;
825 		/* found it! */
826 		commit_lsn = ctx->commit_lsn;
827 	}
828 	spin_unlock(&cil->xc_push_lock);
829 	return commit_lsn;
830 }
831 
832 /*
833  * Check if the current log item was first committed in this sequence.
834  * We can't rely on just the log item being in the CIL, we have to check
835  * the recorded commit sequence number.
836  *
837  * Note: for this to be used in a non-racy manner, it has to be called with
838  * CIL flushing locked out. As a result, it should only be used during the
839  * transaction commit process when deciding what to format into the item.
840  */
841 bool
842 xfs_log_item_in_current_chkpt(
843 	struct xfs_log_item *lip)
844 {
845 	struct xfs_cil_ctx *ctx;
846 
847 	if (list_empty(&lip->li_cil))
848 		return false;
849 
850 	ctx = lip->li_mountp->m_log->l_cilp->xc_ctx;
851 
852 	/*
853 	 * li_seq is written on the first commit of a log item to record the
854 	 * first checkpoint it is written to. Hence if it is different to the
855 	 * current sequence, we're in a new checkpoint.
856 	 */
857 	if (XFS_LSN_CMP(lip->li_seq, ctx->sequence) != 0)
858 		return false;
859 	return true;
860 }
861 
862 /*
863  * Perform initial CIL structure initialisation.
864  */
865 int
866 xlog_cil_init(
867 	struct xlog	*log)
868 {
869 	struct xfs_cil	*cil;
870 	struct xfs_cil_ctx *ctx;
871 
872 	cil = kmem_zalloc(sizeof(*cil), KM_SLEEP|KM_MAYFAIL);
873 	if (!cil)
874 		return ENOMEM;
875 
876 	ctx = kmem_zalloc(sizeof(*ctx), KM_SLEEP|KM_MAYFAIL);
877 	if (!ctx) {
878 		kmem_free(cil);
879 		return ENOMEM;
880 	}
881 
882 	INIT_WORK(&cil->xc_push_work, xlog_cil_push_work);
883 	INIT_LIST_HEAD(&cil->xc_cil);
884 	INIT_LIST_HEAD(&cil->xc_committing);
885 	spin_lock_init(&cil->xc_cil_lock);
886 	spin_lock_init(&cil->xc_push_lock);
887 	init_rwsem(&cil->xc_ctx_lock);
888 	init_waitqueue_head(&cil->xc_commit_wait);
889 
890 	INIT_LIST_HEAD(&ctx->committing);
891 	INIT_LIST_HEAD(&ctx->busy_extents);
892 	ctx->sequence = 1;
893 	ctx->cil = cil;
894 	cil->xc_ctx = ctx;
895 	cil->xc_current_sequence = ctx->sequence;
896 
897 	cil->xc_log = log;
898 	log->l_cilp = cil;
899 	return 0;
900 }
901 
902 void
903 xlog_cil_destroy(
904 	struct xlog	*log)
905 {
906 	if (log->l_cilp->xc_ctx) {
907 		if (log->l_cilp->xc_ctx->ticket)
908 			xfs_log_ticket_put(log->l_cilp->xc_ctx->ticket);
909 		kmem_free(log->l_cilp->xc_ctx);
910 	}
911 
912 	ASSERT(list_empty(&log->l_cilp->xc_cil));
913 	kmem_free(log->l_cilp);
914 }
915 
916