xref: /openbmc/linux/fs/xfs/xfs_log_cil.c (revision baa7eb025ab14f3cba2e35c0a8648f9c9f01d24f)
1 /*
2  * Copyright (c) 2010 Red Hat, Inc. All Rights Reserved.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public License as
6  * published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it would be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
11  * GNU General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public License
14  * along with this program; if not, write the Free Software Foundation,
15  * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
16  */
17 
18 #include "xfs.h"
19 #include "xfs_fs.h"
20 #include "xfs_types.h"
21 #include "xfs_bit.h"
22 #include "xfs_log.h"
23 #include "xfs_inum.h"
24 #include "xfs_trans.h"
25 #include "xfs_trans_priv.h"
26 #include "xfs_log_priv.h"
27 #include "xfs_sb.h"
28 #include "xfs_ag.h"
29 #include "xfs_mount.h"
30 #include "xfs_error.h"
31 #include "xfs_alloc.h"
32 
33 /*
34  * Perform initial CIL structure initialisation. If the CIL is not
35  * enabled in this filesystem, ensure the log->l_cilp is null so
36  * we can check this conditional to determine if we are doing delayed
37  * logging or not.
38  */
39 int
40 xlog_cil_init(
41 	struct log	*log)
42 {
43 	struct xfs_cil	*cil;
44 	struct xfs_cil_ctx *ctx;
45 
46 	log->l_cilp = NULL;
47 	if (!(log->l_mp->m_flags & XFS_MOUNT_DELAYLOG))
48 		return 0;
49 
50 	cil = kmem_zalloc(sizeof(*cil), KM_SLEEP|KM_MAYFAIL);
51 	if (!cil)
52 		return ENOMEM;
53 
54 	ctx = kmem_zalloc(sizeof(*ctx), KM_SLEEP|KM_MAYFAIL);
55 	if (!ctx) {
56 		kmem_free(cil);
57 		return ENOMEM;
58 	}
59 
60 	INIT_LIST_HEAD(&cil->xc_cil);
61 	INIT_LIST_HEAD(&cil->xc_committing);
62 	spin_lock_init(&cil->xc_cil_lock);
63 	init_rwsem(&cil->xc_ctx_lock);
64 	sv_init(&cil->xc_commit_wait, SV_DEFAULT, "cilwait");
65 
66 	INIT_LIST_HEAD(&ctx->committing);
67 	INIT_LIST_HEAD(&ctx->busy_extents);
68 	ctx->sequence = 1;
69 	ctx->cil = cil;
70 	cil->xc_ctx = ctx;
71 	cil->xc_current_sequence = ctx->sequence;
72 
73 	cil->xc_log = log;
74 	log->l_cilp = cil;
75 	return 0;
76 }
77 
78 void
79 xlog_cil_destroy(
80 	struct log	*log)
81 {
82 	if (!log->l_cilp)
83 		return;
84 
85 	if (log->l_cilp->xc_ctx) {
86 		if (log->l_cilp->xc_ctx->ticket)
87 			xfs_log_ticket_put(log->l_cilp->xc_ctx->ticket);
88 		kmem_free(log->l_cilp->xc_ctx);
89 	}
90 
91 	ASSERT(list_empty(&log->l_cilp->xc_cil));
92 	kmem_free(log->l_cilp);
93 }
94 
95 /*
96  * Allocate a new ticket. Failing to get a new ticket makes it really hard to
97  * recover, so we don't allow failure here. Also, we allocate in a context that
98  * we don't want to be issuing transactions from, so we need to tell the
99  * allocation code this as well.
100  *
101  * We don't reserve any space for the ticket - we are going to steal whatever
102  * space we require from transactions as they commit. To ensure we reserve all
103  * the space required, we need to set the current reservation of the ticket to
104  * zero so that we know to steal the initial transaction overhead from the
105  * first transaction commit.
106  */
107 static struct xlog_ticket *
108 xlog_cil_ticket_alloc(
109 	struct log	*log)
110 {
111 	struct xlog_ticket *tic;
112 
113 	tic = xlog_ticket_alloc(log, 0, 1, XFS_TRANSACTION, 0,
114 				KM_SLEEP|KM_NOFS);
115 	tic->t_trans_type = XFS_TRANS_CHECKPOINT;
116 
117 	/*
118 	 * set the current reservation to zero so we know to steal the basic
119 	 * transaction overhead reservation from the first transaction commit.
120 	 */
121 	tic->t_curr_res = 0;
122 	return tic;
123 }
124 
125 /*
126  * After the first stage of log recovery is done, we know where the head and
127  * tail of the log are. We need this log initialisation done before we can
128  * initialise the first CIL checkpoint context.
129  *
130  * Here we allocate a log ticket to track space usage during a CIL push.  This
131  * ticket is passed to xlog_write() directly so that we don't slowly leak log
132  * space by failing to account for space used by log headers and additional
133  * region headers for split regions.
134  */
135 void
136 xlog_cil_init_post_recovery(
137 	struct log	*log)
138 {
139 	if (!log->l_cilp)
140 		return;
141 
142 	log->l_cilp->xc_ctx->ticket = xlog_cil_ticket_alloc(log);
143 	log->l_cilp->xc_ctx->sequence = 1;
144 	log->l_cilp->xc_ctx->commit_lsn = xlog_assign_lsn(log->l_curr_cycle,
145 								log->l_curr_block);
146 }
147 
148 /*
149  * Format log item into a flat buffers
150  *
151  * For delayed logging, we need to hold a formatted buffer containing all the
152  * changes on the log item. This enables us to relog the item in memory and
153  * write it out asynchronously without needing to relock the object that was
154  * modified at the time it gets written into the iclog.
155  *
156  * This function builds a vector for the changes in each log item in the
157  * transaction. It then works out the length of the buffer needed for each log
158  * item, allocates them and formats the vector for the item into the buffer.
159  * The buffer is then attached to the log item are then inserted into the
160  * Committed Item List for tracking until the next checkpoint is written out.
161  *
162  * We don't set up region headers during this process; we simply copy the
163  * regions into the flat buffer. We can do this because we still have to do a
164  * formatting step to write the regions into the iclog buffer.  Writing the
165  * ophdrs during the iclog write means that we can support splitting large
166  * regions across iclog boundares without needing a change in the format of the
167  * item/region encapsulation.
168  *
169  * Hence what we need to do now is change the rewrite the vector array to point
170  * to the copied region inside the buffer we just allocated. This allows us to
171  * format the regions into the iclog as though they are being formatted
172  * directly out of the objects themselves.
173  */
174 static void
175 xlog_cil_format_items(
176 	struct log		*log,
177 	struct xfs_log_vec	*log_vector)
178 {
179 	struct xfs_log_vec *lv;
180 
181 	ASSERT(log_vector);
182 	for (lv = log_vector; lv; lv = lv->lv_next) {
183 		void	*ptr;
184 		int	index;
185 		int	len = 0;
186 
187 		/* build the vector array and calculate it's length */
188 		IOP_FORMAT(lv->lv_item, lv->lv_iovecp);
189 		for (index = 0; index < lv->lv_niovecs; index++)
190 			len += lv->lv_iovecp[index].i_len;
191 
192 		lv->lv_buf_len = len;
193 		lv->lv_buf = kmem_alloc(lv->lv_buf_len, KM_SLEEP|KM_NOFS);
194 		ptr = lv->lv_buf;
195 
196 		for (index = 0; index < lv->lv_niovecs; index++) {
197 			struct xfs_log_iovec *vec = &lv->lv_iovecp[index];
198 
199 			memcpy(ptr, vec->i_addr, vec->i_len);
200 			vec->i_addr = ptr;
201 			ptr += vec->i_len;
202 		}
203 		ASSERT(ptr == lv->lv_buf + lv->lv_buf_len);
204 	}
205 }
206 
207 /*
208  * Prepare the log item for insertion into the CIL. Calculate the difference in
209  * log space and vectors it will consume, and if it is a new item pin it as
210  * well.
211  */
212 STATIC void
213 xfs_cil_prepare_item(
214 	struct log		*log,
215 	struct xfs_log_vec	*lv,
216 	int			*len,
217 	int			*diff_iovecs)
218 {
219 	struct xfs_log_vec	*old = lv->lv_item->li_lv;
220 
221 	if (old) {
222 		/* existing lv on log item, space used is a delta */
223 		ASSERT(!list_empty(&lv->lv_item->li_cil));
224 		ASSERT(old->lv_buf && old->lv_buf_len && old->lv_niovecs);
225 
226 		*len += lv->lv_buf_len - old->lv_buf_len;
227 		*diff_iovecs += lv->lv_niovecs - old->lv_niovecs;
228 		kmem_free(old->lv_buf);
229 		kmem_free(old);
230 	} else {
231 		/* new lv, must pin the log item */
232 		ASSERT(!lv->lv_item->li_lv);
233 		ASSERT(list_empty(&lv->lv_item->li_cil));
234 
235 		*len += lv->lv_buf_len;
236 		*diff_iovecs += lv->lv_niovecs;
237 		IOP_PIN(lv->lv_item);
238 
239 	}
240 
241 	/* attach new log vector to log item */
242 	lv->lv_item->li_lv = lv;
243 
244 	/*
245 	 * If this is the first time the item is being committed to the
246 	 * CIL, store the sequence number on the log item so we can
247 	 * tell in future commits whether this is the first checkpoint
248 	 * the item is being committed into.
249 	 */
250 	if (!lv->lv_item->li_seq)
251 		lv->lv_item->li_seq = log->l_cilp->xc_ctx->sequence;
252 }
253 
254 /*
255  * Insert the log items into the CIL and calculate the difference in space
256  * consumed by the item. Add the space to the checkpoint ticket and calculate
257  * if the change requires additional log metadata. If it does, take that space
258  * as well. Remove the amount of space we addded to the checkpoint ticket from
259  * the current transaction ticket so that the accounting works out correctly.
260  */
261 static void
262 xlog_cil_insert_items(
263 	struct log		*log,
264 	struct xfs_log_vec	*log_vector,
265 	struct xlog_ticket	*ticket)
266 {
267 	struct xfs_cil		*cil = log->l_cilp;
268 	struct xfs_cil_ctx	*ctx = cil->xc_ctx;
269 	struct xfs_log_vec	*lv;
270 	int			len = 0;
271 	int			diff_iovecs = 0;
272 	int			iclog_space;
273 
274 	ASSERT(log_vector);
275 
276 	/*
277 	 * Do all the accounting aggregation and switching of log vectors
278 	 * around in a separate loop to the insertion of items into the CIL.
279 	 * Then we can do a separate loop to update the CIL within a single
280 	 * lock/unlock pair. This reduces the number of round trips on the CIL
281 	 * lock from O(nr_logvectors) to O(1) and greatly reduces the overall
282 	 * hold time for the transaction commit.
283 	 *
284 	 * If this is the first time the item is being placed into the CIL in
285 	 * this context, pin it so it can't be written to disk until the CIL is
286 	 * flushed to the iclog and the iclog written to disk.
287 	 *
288 	 * We can do this safely because the context can't checkpoint until we
289 	 * are done so it doesn't matter exactly how we update the CIL.
290 	 */
291 	for (lv = log_vector; lv; lv = lv->lv_next)
292 		xfs_cil_prepare_item(log, lv, &len, &diff_iovecs);
293 
294 	/* account for space used by new iovec headers  */
295 	len += diff_iovecs * sizeof(xlog_op_header_t);
296 
297 	spin_lock(&cil->xc_cil_lock);
298 
299 	/* move the items to the tail of the CIL */
300 	for (lv = log_vector; lv; lv = lv->lv_next)
301 		list_move_tail(&lv->lv_item->li_cil, &cil->xc_cil);
302 
303 	ctx->nvecs += diff_iovecs;
304 
305 	/*
306 	 * Now transfer enough transaction reservation to the context ticket
307 	 * for the checkpoint. The context ticket is special - the unit
308 	 * reservation has to grow as well as the current reservation as we
309 	 * steal from tickets so we can correctly determine the space used
310 	 * during the transaction commit.
311 	 */
312 	if (ctx->ticket->t_curr_res == 0) {
313 		/* first commit in checkpoint, steal the header reservation */
314 		ASSERT(ticket->t_curr_res >= ctx->ticket->t_unit_res + len);
315 		ctx->ticket->t_curr_res = ctx->ticket->t_unit_res;
316 		ticket->t_curr_res -= ctx->ticket->t_unit_res;
317 	}
318 
319 	/* do we need space for more log record headers? */
320 	iclog_space = log->l_iclog_size - log->l_iclog_hsize;
321 	if (len > 0 && (ctx->space_used / iclog_space !=
322 				(ctx->space_used + len) / iclog_space)) {
323 		int hdrs;
324 
325 		hdrs = (len + iclog_space - 1) / iclog_space;
326 		/* need to take into account split region headers, too */
327 		hdrs *= log->l_iclog_hsize + sizeof(struct xlog_op_header);
328 		ctx->ticket->t_unit_res += hdrs;
329 		ctx->ticket->t_curr_res += hdrs;
330 		ticket->t_curr_res -= hdrs;
331 		ASSERT(ticket->t_curr_res >= len);
332 	}
333 	ticket->t_curr_res -= len;
334 	ctx->space_used += len;
335 
336 	spin_unlock(&cil->xc_cil_lock);
337 }
338 
339 static void
340 xlog_cil_free_logvec(
341 	struct xfs_log_vec	*log_vector)
342 {
343 	struct xfs_log_vec	*lv;
344 
345 	for (lv = log_vector; lv; ) {
346 		struct xfs_log_vec *next = lv->lv_next;
347 		kmem_free(lv->lv_buf);
348 		kmem_free(lv);
349 		lv = next;
350 	}
351 }
352 
353 /*
354  * Mark all items committed and clear busy extents. We free the log vector
355  * chains in a separate pass so that we unpin the log items as quickly as
356  * possible.
357  */
358 static void
359 xlog_cil_committed(
360 	void	*args,
361 	int	abort)
362 {
363 	struct xfs_cil_ctx	*ctx = args;
364 	struct xfs_log_vec	*lv;
365 	int			abortflag = abort ? XFS_LI_ABORTED : 0;
366 	struct xfs_busy_extent	*busyp, *n;
367 
368 	/* unpin all the log items */
369 	for (lv = ctx->lv_chain; lv; lv = lv->lv_next ) {
370 		xfs_trans_item_committed(lv->lv_item, ctx->start_lsn,
371 							abortflag);
372 	}
373 
374 	list_for_each_entry_safe(busyp, n, &ctx->busy_extents, list)
375 		xfs_alloc_busy_clear(ctx->cil->xc_log->l_mp, busyp);
376 
377 	spin_lock(&ctx->cil->xc_cil_lock);
378 	list_del(&ctx->committing);
379 	spin_unlock(&ctx->cil->xc_cil_lock);
380 
381 	xlog_cil_free_logvec(ctx->lv_chain);
382 	kmem_free(ctx);
383 }
384 
385 /*
386  * Push the Committed Item List to the log. If @push_seq flag is zero, then it
387  * is a background flush and so we can chose to ignore it. Otherwise, if the
388  * current sequence is the same as @push_seq we need to do a flush. If
389  * @push_seq is less than the current sequence, then it has already been
390  * flushed and we don't need to do anything - the caller will wait for it to
391  * complete if necessary.
392  *
393  * @push_seq is a value rather than a flag because that allows us to do an
394  * unlocked check of the sequence number for a match. Hence we can allows log
395  * forces to run racily and not issue pushes for the same sequence twice. If we
396  * get a race between multiple pushes for the same sequence they will block on
397  * the first one and then abort, hence avoiding needless pushes.
398  */
399 STATIC int
400 xlog_cil_push(
401 	struct log		*log,
402 	xfs_lsn_t		push_seq)
403 {
404 	struct xfs_cil		*cil = log->l_cilp;
405 	struct xfs_log_vec	*lv;
406 	struct xfs_cil_ctx	*ctx;
407 	struct xfs_cil_ctx	*new_ctx;
408 	struct xlog_in_core	*commit_iclog;
409 	struct xlog_ticket	*tic;
410 	int			num_lv;
411 	int			num_iovecs;
412 	int			len;
413 	int			error = 0;
414 	struct xfs_trans_header thdr;
415 	struct xfs_log_iovec	lhdr;
416 	struct xfs_log_vec	lvhdr = { NULL };
417 	xfs_lsn_t		commit_lsn;
418 
419 	if (!cil)
420 		return 0;
421 
422 	ASSERT(!push_seq || push_seq <= cil->xc_ctx->sequence);
423 
424 	new_ctx = kmem_zalloc(sizeof(*new_ctx), KM_SLEEP|KM_NOFS);
425 	new_ctx->ticket = xlog_cil_ticket_alloc(log);
426 
427 	/*
428 	 * Lock out transaction commit, but don't block for background pushes
429 	 * unless we are well over the CIL space limit. See the definition of
430 	 * XLOG_CIL_HARD_SPACE_LIMIT() for the full explanation of the logic
431 	 * used here.
432 	 */
433 	if (!down_write_trylock(&cil->xc_ctx_lock)) {
434 		if (!push_seq &&
435 		    cil->xc_ctx->space_used < XLOG_CIL_HARD_SPACE_LIMIT(log))
436 			goto out_free_ticket;
437 		down_write(&cil->xc_ctx_lock);
438 	}
439 	ctx = cil->xc_ctx;
440 
441 	/* check if we've anything to push */
442 	if (list_empty(&cil->xc_cil))
443 		goto out_skip;
444 
445 	/* check for spurious background flush */
446 	if (!push_seq && cil->xc_ctx->space_used < XLOG_CIL_SPACE_LIMIT(log))
447 		goto out_skip;
448 
449 	/* check for a previously pushed seqeunce */
450 	if (push_seq && push_seq < cil->xc_ctx->sequence)
451 		goto out_skip;
452 
453 	/*
454 	 * pull all the log vectors off the items in the CIL, and
455 	 * remove the items from the CIL. We don't need the CIL lock
456 	 * here because it's only needed on the transaction commit
457 	 * side which is currently locked out by the flush lock.
458 	 */
459 	lv = NULL;
460 	num_lv = 0;
461 	num_iovecs = 0;
462 	len = 0;
463 	while (!list_empty(&cil->xc_cil)) {
464 		struct xfs_log_item	*item;
465 		int			i;
466 
467 		item = list_first_entry(&cil->xc_cil,
468 					struct xfs_log_item, li_cil);
469 		list_del_init(&item->li_cil);
470 		if (!ctx->lv_chain)
471 			ctx->lv_chain = item->li_lv;
472 		else
473 			lv->lv_next = item->li_lv;
474 		lv = item->li_lv;
475 		item->li_lv = NULL;
476 
477 		num_lv++;
478 		num_iovecs += lv->lv_niovecs;
479 		for (i = 0; i < lv->lv_niovecs; i++)
480 			len += lv->lv_iovecp[i].i_len;
481 	}
482 
483 	/*
484 	 * initialise the new context and attach it to the CIL. Then attach
485 	 * the current context to the CIL committing lsit so it can be found
486 	 * during log forces to extract the commit lsn of the sequence that
487 	 * needs to be forced.
488 	 */
489 	INIT_LIST_HEAD(&new_ctx->committing);
490 	INIT_LIST_HEAD(&new_ctx->busy_extents);
491 	new_ctx->sequence = ctx->sequence + 1;
492 	new_ctx->cil = cil;
493 	cil->xc_ctx = new_ctx;
494 
495 	/*
496 	 * mirror the new sequence into the cil structure so that we can do
497 	 * unlocked checks against the current sequence in log forces without
498 	 * risking deferencing a freed context pointer.
499 	 */
500 	cil->xc_current_sequence = new_ctx->sequence;
501 
502 	/*
503 	 * The switch is now done, so we can drop the context lock and move out
504 	 * of a shared context. We can't just go straight to the commit record,
505 	 * though - we need to synchronise with previous and future commits so
506 	 * that the commit records are correctly ordered in the log to ensure
507 	 * that we process items during log IO completion in the correct order.
508 	 *
509 	 * For example, if we get an EFI in one checkpoint and the EFD in the
510 	 * next (e.g. due to log forces), we do not want the checkpoint with
511 	 * the EFD to be committed before the checkpoint with the EFI.  Hence
512 	 * we must strictly order the commit records of the checkpoints so
513 	 * that: a) the checkpoint callbacks are attached to the iclogs in the
514 	 * correct order; and b) the checkpoints are replayed in correct order
515 	 * in log recovery.
516 	 *
517 	 * Hence we need to add this context to the committing context list so
518 	 * that higher sequences will wait for us to write out a commit record
519 	 * before they do.
520 	 */
521 	spin_lock(&cil->xc_cil_lock);
522 	list_add(&ctx->committing, &cil->xc_committing);
523 	spin_unlock(&cil->xc_cil_lock);
524 	up_write(&cil->xc_ctx_lock);
525 
526 	/*
527 	 * Build a checkpoint transaction header and write it to the log to
528 	 * begin the transaction. We need to account for the space used by the
529 	 * transaction header here as it is not accounted for in xlog_write().
530 	 *
531 	 * The LSN we need to pass to the log items on transaction commit is
532 	 * the LSN reported by the first log vector write. If we use the commit
533 	 * record lsn then we can move the tail beyond the grant write head.
534 	 */
535 	tic = ctx->ticket;
536 	thdr.th_magic = XFS_TRANS_HEADER_MAGIC;
537 	thdr.th_type = XFS_TRANS_CHECKPOINT;
538 	thdr.th_tid = tic->t_tid;
539 	thdr.th_num_items = num_iovecs;
540 	lhdr.i_addr = &thdr;
541 	lhdr.i_len = sizeof(xfs_trans_header_t);
542 	lhdr.i_type = XLOG_REG_TYPE_TRANSHDR;
543 	tic->t_curr_res -= lhdr.i_len + sizeof(xlog_op_header_t);
544 
545 	lvhdr.lv_niovecs = 1;
546 	lvhdr.lv_iovecp = &lhdr;
547 	lvhdr.lv_next = ctx->lv_chain;
548 
549 	error = xlog_write(log, &lvhdr, tic, &ctx->start_lsn, NULL, 0);
550 	if (error)
551 		goto out_abort;
552 
553 	/*
554 	 * now that we've written the checkpoint into the log, strictly
555 	 * order the commit records so replay will get them in the right order.
556 	 */
557 restart:
558 	spin_lock(&cil->xc_cil_lock);
559 	list_for_each_entry(new_ctx, &cil->xc_committing, committing) {
560 		/*
561 		 * Higher sequences will wait for this one so skip them.
562 		 * Don't wait for own own sequence, either.
563 		 */
564 		if (new_ctx->sequence >= ctx->sequence)
565 			continue;
566 		if (!new_ctx->commit_lsn) {
567 			/*
568 			 * It is still being pushed! Wait for the push to
569 			 * complete, then start again from the beginning.
570 			 */
571 			sv_wait(&cil->xc_commit_wait, 0, &cil->xc_cil_lock, 0);
572 			goto restart;
573 		}
574 	}
575 	spin_unlock(&cil->xc_cil_lock);
576 
577 	commit_lsn = xfs_log_done(log->l_mp, tic, &commit_iclog, 0);
578 	if (error || commit_lsn == -1)
579 		goto out_abort;
580 
581 	/* attach all the transactions w/ busy extents to iclog */
582 	ctx->log_cb.cb_func = xlog_cil_committed;
583 	ctx->log_cb.cb_arg = ctx;
584 	error = xfs_log_notify(log->l_mp, commit_iclog, &ctx->log_cb);
585 	if (error)
586 		goto out_abort;
587 
588 	/*
589 	 * now the checkpoint commit is complete and we've attached the
590 	 * callbacks to the iclog we can assign the commit LSN to the context
591 	 * and wake up anyone who is waiting for the commit to complete.
592 	 */
593 	spin_lock(&cil->xc_cil_lock);
594 	ctx->commit_lsn = commit_lsn;
595 	sv_broadcast(&cil->xc_commit_wait);
596 	spin_unlock(&cil->xc_cil_lock);
597 
598 	/* release the hounds! */
599 	return xfs_log_release_iclog(log->l_mp, commit_iclog);
600 
601 out_skip:
602 	up_write(&cil->xc_ctx_lock);
603 out_free_ticket:
604 	xfs_log_ticket_put(new_ctx->ticket);
605 	kmem_free(new_ctx);
606 	return 0;
607 
608 out_abort:
609 	xlog_cil_committed(ctx, XFS_LI_ABORTED);
610 	return XFS_ERROR(EIO);
611 }
612 
613 /*
614  * Commit a transaction with the given vector to the Committed Item List.
615  *
616  * To do this, we need to format the item, pin it in memory if required and
617  * account for the space used by the transaction. Once we have done that we
618  * need to release the unused reservation for the transaction, attach the
619  * transaction to the checkpoint context so we carry the busy extents through
620  * to checkpoint completion, and then unlock all the items in the transaction.
621  *
622  * For more specific information about the order of operations in
623  * xfs_log_commit_cil() please refer to the comments in
624  * xfs_trans_commit_iclog().
625  *
626  * Called with the context lock already held in read mode to lock out
627  * background commit, returns without it held once background commits are
628  * allowed again.
629  */
630 int
631 xfs_log_commit_cil(
632 	struct xfs_mount	*mp,
633 	struct xfs_trans	*tp,
634 	struct xfs_log_vec	*log_vector,
635 	xfs_lsn_t		*commit_lsn,
636 	int			flags)
637 {
638 	struct log		*log = mp->m_log;
639 	int			log_flags = 0;
640 	int			push = 0;
641 
642 	if (flags & XFS_TRANS_RELEASE_LOG_RES)
643 		log_flags = XFS_LOG_REL_PERM_RESERV;
644 
645 	if (XLOG_FORCED_SHUTDOWN(log)) {
646 		xlog_cil_free_logvec(log_vector);
647 		return XFS_ERROR(EIO);
648 	}
649 
650 	/*
651 	 * do all the hard work of formatting items (including memory
652 	 * allocation) outside the CIL context lock. This prevents stalling CIL
653 	 * pushes when we are low on memory and a transaction commit spends a
654 	 * lot of time in memory reclaim.
655 	 */
656 	xlog_cil_format_items(log, log_vector);
657 
658 	/* lock out background commit */
659 	down_read(&log->l_cilp->xc_ctx_lock);
660 	if (commit_lsn)
661 		*commit_lsn = log->l_cilp->xc_ctx->sequence;
662 
663 	xlog_cil_insert_items(log, log_vector, tp->t_ticket);
664 
665 	/* check we didn't blow the reservation */
666 	if (tp->t_ticket->t_curr_res < 0)
667 		xlog_print_tic_res(log->l_mp, tp->t_ticket);
668 
669 	/* attach the transaction to the CIL if it has any busy extents */
670 	if (!list_empty(&tp->t_busy)) {
671 		spin_lock(&log->l_cilp->xc_cil_lock);
672 		list_splice_init(&tp->t_busy,
673 					&log->l_cilp->xc_ctx->busy_extents);
674 		spin_unlock(&log->l_cilp->xc_cil_lock);
675 	}
676 
677 	tp->t_commit_lsn = *commit_lsn;
678 	xfs_log_done(mp, tp->t_ticket, NULL, log_flags);
679 	xfs_trans_unreserve_and_mod_sb(tp);
680 
681 	/*
682 	 * Once all the items of the transaction have been copied to the CIL,
683 	 * the items can be unlocked and freed.
684 	 *
685 	 * This needs to be done before we drop the CIL context lock because we
686 	 * have to update state in the log items and unlock them before they go
687 	 * to disk. If we don't, then the CIL checkpoint can race with us and
688 	 * we can run checkpoint completion before we've updated and unlocked
689 	 * the log items. This affects (at least) processing of stale buffers,
690 	 * inodes and EFIs.
691 	 */
692 	xfs_trans_free_items(tp, *commit_lsn, 0);
693 
694 	/* check for background commit before unlock */
695 	if (log->l_cilp->xc_ctx->space_used > XLOG_CIL_SPACE_LIMIT(log))
696 		push = 1;
697 
698 	up_read(&log->l_cilp->xc_ctx_lock);
699 
700 	/*
701 	 * We need to push CIL every so often so we don't cache more than we
702 	 * can fit in the log. The limit really is that a checkpoint can't be
703 	 * more than half the log (the current checkpoint is not allowed to
704 	 * overwrite the previous checkpoint), but commit latency and memory
705 	 * usage limit this to a smaller size in most cases.
706 	 */
707 	if (push)
708 		xlog_cil_push(log, 0);
709 	return 0;
710 }
711 
712 /*
713  * Conditionally push the CIL based on the sequence passed in.
714  *
715  * We only need to push if we haven't already pushed the sequence
716  * number given. Hence the only time we will trigger a push here is
717  * if the push sequence is the same as the current context.
718  *
719  * We return the current commit lsn to allow the callers to determine if a
720  * iclog flush is necessary following this call.
721  *
722  * XXX: Initially, just push the CIL unconditionally and return whatever
723  * commit lsn is there. It'll be empty, so this is broken for now.
724  */
725 xfs_lsn_t
726 xlog_cil_force_lsn(
727 	struct log	*log,
728 	xfs_lsn_t	sequence)
729 {
730 	struct xfs_cil		*cil = log->l_cilp;
731 	struct xfs_cil_ctx	*ctx;
732 	xfs_lsn_t		commit_lsn = NULLCOMMITLSN;
733 
734 	ASSERT(sequence <= cil->xc_current_sequence);
735 
736 	/*
737 	 * check to see if we need to force out the current context.
738 	 * xlog_cil_push() handles racing pushes for the same sequence,
739 	 * so no need to deal with it here.
740 	 */
741 	if (sequence == cil->xc_current_sequence)
742 		xlog_cil_push(log, sequence);
743 
744 	/*
745 	 * See if we can find a previous sequence still committing.
746 	 * We need to wait for all previous sequence commits to complete
747 	 * before allowing the force of push_seq to go ahead. Hence block
748 	 * on commits for those as well.
749 	 */
750 restart:
751 	spin_lock(&cil->xc_cil_lock);
752 	list_for_each_entry(ctx, &cil->xc_committing, committing) {
753 		if (ctx->sequence > sequence)
754 			continue;
755 		if (!ctx->commit_lsn) {
756 			/*
757 			 * It is still being pushed! Wait for the push to
758 			 * complete, then start again from the beginning.
759 			 */
760 			sv_wait(&cil->xc_commit_wait, 0, &cil->xc_cil_lock, 0);
761 			goto restart;
762 		}
763 		if (ctx->sequence != sequence)
764 			continue;
765 		/* found it! */
766 		commit_lsn = ctx->commit_lsn;
767 	}
768 	spin_unlock(&cil->xc_cil_lock);
769 	return commit_lsn;
770 }
771 
772 /*
773  * Check if the current log item was first committed in this sequence.
774  * We can't rely on just the log item being in the CIL, we have to check
775  * the recorded commit sequence number.
776  *
777  * Note: for this to be used in a non-racy manner, it has to be called with
778  * CIL flushing locked out. As a result, it should only be used during the
779  * transaction commit process when deciding what to format into the item.
780  */
781 bool
782 xfs_log_item_in_current_chkpt(
783 	struct xfs_log_item *lip)
784 {
785 	struct xfs_cil_ctx *ctx;
786 
787 	if (!(lip->li_mountp->m_flags & XFS_MOUNT_DELAYLOG))
788 		return false;
789 	if (list_empty(&lip->li_cil))
790 		return false;
791 
792 	ctx = lip->li_mountp->m_log->l_cilp->xc_ctx;
793 
794 	/*
795 	 * li_seq is written on the first commit of a log item to record the
796 	 * first checkpoint it is written to. Hence if it is different to the
797 	 * current sequence, we're in a new checkpoint.
798 	 */
799 	if (XFS_LSN_CMP(lip->li_seq, ctx->sequence) != 0)
800 		return false;
801 	return true;
802 }
803