xref: /openbmc/linux/fs/xfs/xfs_buf_item.c (revision 9d749629)
1 /*
2  * Copyright (c) 2000-2005 Silicon Graphics, Inc.
3  * All Rights Reserved.
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU General Public License as
7  * published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it would be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write the Free Software Foundation,
16  * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
17  */
18 #include "xfs.h"
19 #include "xfs_fs.h"
20 #include "xfs_types.h"
21 #include "xfs_bit.h"
22 #include "xfs_log.h"
23 #include "xfs_trans.h"
24 #include "xfs_sb.h"
25 #include "xfs_ag.h"
26 #include "xfs_mount.h"
27 #include "xfs_buf_item.h"
28 #include "xfs_trans_priv.h"
29 #include "xfs_error.h"
30 #include "xfs_trace.h"
31 
32 
33 kmem_zone_t	*xfs_buf_item_zone;
34 
35 static inline struct xfs_buf_log_item *BUF_ITEM(struct xfs_log_item *lip)
36 {
37 	return container_of(lip, struct xfs_buf_log_item, bli_item);
38 }
39 
40 STATIC void	xfs_buf_do_callbacks(struct xfs_buf *bp);
41 
42 /*
43  * This returns the number of log iovecs needed to log the
44  * given buf log item.
45  *
46  * It calculates this as 1 iovec for the buf log format structure
47  * and 1 for each stretch of non-contiguous chunks to be logged.
48  * Contiguous chunks are logged in a single iovec.
49  *
50  * If the XFS_BLI_STALE flag has been set, then log nothing.
51  */
52 STATIC uint
53 xfs_buf_item_size_segment(
54 	struct xfs_buf_log_item	*bip,
55 	struct xfs_buf_log_format *blfp)
56 {
57 	struct xfs_buf		*bp = bip->bli_buf;
58 	uint			nvecs;
59 	int			next_bit;
60 	int			last_bit;
61 
62 	last_bit = xfs_next_bit(blfp->blf_data_map, blfp->blf_map_size, 0);
63 	if (last_bit == -1)
64 		return 0;
65 
66 	/*
67 	 * initial count for a dirty buffer is 2 vectors - the format structure
68 	 * and the first dirty region.
69 	 */
70 	nvecs = 2;
71 
72 	while (last_bit != -1) {
73 		/*
74 		 * This takes the bit number to start looking from and
75 		 * returns the next set bit from there.  It returns -1
76 		 * if there are no more bits set or the start bit is
77 		 * beyond the end of the bitmap.
78 		 */
79 		next_bit = xfs_next_bit(blfp->blf_data_map, blfp->blf_map_size,
80 					last_bit + 1);
81 		/*
82 		 * If we run out of bits, leave the loop,
83 		 * else if we find a new set of bits bump the number of vecs,
84 		 * else keep scanning the current set of bits.
85 		 */
86 		if (next_bit == -1) {
87 			break;
88 		} else if (next_bit != last_bit + 1) {
89 			last_bit = next_bit;
90 			nvecs++;
91 		} else if (xfs_buf_offset(bp, next_bit * XFS_BLF_CHUNK) !=
92 			   (xfs_buf_offset(bp, last_bit * XFS_BLF_CHUNK) +
93 			    XFS_BLF_CHUNK)) {
94 			last_bit = next_bit;
95 			nvecs++;
96 		} else {
97 			last_bit++;
98 		}
99 	}
100 
101 	return nvecs;
102 }
103 
104 /*
105  * This returns the number of log iovecs needed to log the given buf log item.
106  *
107  * It calculates this as 1 iovec for the buf log format structure and 1 for each
108  * stretch of non-contiguous chunks to be logged.  Contiguous chunks are logged
109  * in a single iovec.
110  *
111  * Discontiguous buffers need a format structure per region that that is being
112  * logged. This makes the changes in the buffer appear to log recovery as though
113  * they came from separate buffers, just like would occur if multiple buffers
114  * were used instead of a single discontiguous buffer. This enables
115  * discontiguous buffers to be in-memory constructs, completely transparent to
116  * what ends up on disk.
117  *
118  * If the XFS_BLI_STALE flag has been set, then log nothing but the buf log
119  * format structures.
120  */
121 STATIC uint
122 xfs_buf_item_size(
123 	struct xfs_log_item	*lip)
124 {
125 	struct xfs_buf_log_item	*bip = BUF_ITEM(lip);
126 	uint			nvecs;
127 	int			i;
128 
129 	ASSERT(atomic_read(&bip->bli_refcount) > 0);
130 	if (bip->bli_flags & XFS_BLI_STALE) {
131 		/*
132 		 * The buffer is stale, so all we need to log
133 		 * is the buf log format structure with the
134 		 * cancel flag in it.
135 		 */
136 		trace_xfs_buf_item_size_stale(bip);
137 		ASSERT(bip->__bli_format.blf_flags & XFS_BLF_CANCEL);
138 		return bip->bli_format_count;
139 	}
140 
141 	ASSERT(bip->bli_flags & XFS_BLI_LOGGED);
142 
143 	/*
144 	 * the vector count is based on the number of buffer vectors we have
145 	 * dirty bits in. This will only be greater than one when we have a
146 	 * compound buffer with more than one segment dirty. Hence for compound
147 	 * buffers we need to track which segment the dirty bits correspond to,
148 	 * and when we move from one segment to the next increment the vector
149 	 * count for the extra buf log format structure that will need to be
150 	 * written.
151 	 */
152 	nvecs = 0;
153 	for (i = 0; i < bip->bli_format_count; i++) {
154 		nvecs += xfs_buf_item_size_segment(bip, &bip->bli_formats[i]);
155 	}
156 
157 	trace_xfs_buf_item_size(bip);
158 	return nvecs;
159 }
160 
161 static struct xfs_log_iovec *
162 xfs_buf_item_format_segment(
163 	struct xfs_buf_log_item	*bip,
164 	struct xfs_log_iovec	*vecp,
165 	uint			offset,
166 	struct xfs_buf_log_format *blfp)
167 {
168 	struct xfs_buf	*bp = bip->bli_buf;
169 	uint		base_size;
170 	uint		nvecs;
171 	int		first_bit;
172 	int		last_bit;
173 	int		next_bit;
174 	uint		nbits;
175 	uint		buffer_offset;
176 
177 	/* copy the flags across from the base format item */
178 	blfp->blf_flags = bip->__bli_format.blf_flags;
179 
180 	/*
181 	 * Base size is the actual size of the ondisk structure - it reflects
182 	 * the actual size of the dirty bitmap rather than the size of the in
183 	 * memory structure.
184 	 */
185 	base_size = offsetof(struct xfs_buf_log_format, blf_data_map) +
186 			(blfp->blf_map_size * sizeof(blfp->blf_data_map[0]));
187 
188 	nvecs = 0;
189 	first_bit = xfs_next_bit(blfp->blf_data_map, blfp->blf_map_size, 0);
190 	if (!(bip->bli_flags & XFS_BLI_STALE) && first_bit == -1) {
191 		/*
192 		 * If the map is not be dirty in the transaction, mark
193 		 * the size as zero and do not advance the vector pointer.
194 		 */
195 		goto out;
196 	}
197 
198 	vecp->i_addr = blfp;
199 	vecp->i_len = base_size;
200 	vecp->i_type = XLOG_REG_TYPE_BFORMAT;
201 	vecp++;
202 	nvecs = 1;
203 
204 	if (bip->bli_flags & XFS_BLI_STALE) {
205 		/*
206 		 * The buffer is stale, so all we need to log
207 		 * is the buf log format structure with the
208 		 * cancel flag in it.
209 		 */
210 		trace_xfs_buf_item_format_stale(bip);
211 		ASSERT(blfp->blf_flags & XFS_BLF_CANCEL);
212 		goto out;
213 	}
214 
215 	/*
216 	 * Fill in an iovec for each set of contiguous chunks.
217 	 */
218 
219 	last_bit = first_bit;
220 	nbits = 1;
221 	for (;;) {
222 		/*
223 		 * This takes the bit number to start looking from and
224 		 * returns the next set bit from there.  It returns -1
225 		 * if there are no more bits set or the start bit is
226 		 * beyond the end of the bitmap.
227 		 */
228 		next_bit = xfs_next_bit(blfp->blf_data_map, blfp->blf_map_size,
229 					(uint)last_bit + 1);
230 		/*
231 		 * If we run out of bits fill in the last iovec and get
232 		 * out of the loop.
233 		 * Else if we start a new set of bits then fill in the
234 		 * iovec for the series we were looking at and start
235 		 * counting the bits in the new one.
236 		 * Else we're still in the same set of bits so just
237 		 * keep counting and scanning.
238 		 */
239 		if (next_bit == -1) {
240 			buffer_offset = offset + first_bit * XFS_BLF_CHUNK;
241 			vecp->i_addr = xfs_buf_offset(bp, buffer_offset);
242 			vecp->i_len = nbits * XFS_BLF_CHUNK;
243 			vecp->i_type = XLOG_REG_TYPE_BCHUNK;
244 			nvecs++;
245 			break;
246 		} else if (next_bit != last_bit + 1) {
247 			buffer_offset = offset + first_bit * XFS_BLF_CHUNK;
248 			vecp->i_addr = xfs_buf_offset(bp, buffer_offset);
249 			vecp->i_len = nbits * XFS_BLF_CHUNK;
250 			vecp->i_type = XLOG_REG_TYPE_BCHUNK;
251 			nvecs++;
252 			vecp++;
253 			first_bit = next_bit;
254 			last_bit = next_bit;
255 			nbits = 1;
256 		} else if (xfs_buf_offset(bp, offset +
257 					      (next_bit << XFS_BLF_SHIFT)) !=
258 			   (xfs_buf_offset(bp, offset +
259 					       (last_bit << XFS_BLF_SHIFT)) +
260 			    XFS_BLF_CHUNK)) {
261 			buffer_offset = offset + first_bit * XFS_BLF_CHUNK;
262 			vecp->i_addr = xfs_buf_offset(bp, buffer_offset);
263 			vecp->i_len = nbits * XFS_BLF_CHUNK;
264 			vecp->i_type = XLOG_REG_TYPE_BCHUNK;
265 /*
266  * You would think we need to bump the nvecs here too, but we do not
267  * this number is used by recovery, and it gets confused by the boundary
268  * split here
269  *			nvecs++;
270  */
271 			vecp++;
272 			first_bit = next_bit;
273 			last_bit = next_bit;
274 			nbits = 1;
275 		} else {
276 			last_bit++;
277 			nbits++;
278 		}
279 	}
280 out:
281 	blfp->blf_size = nvecs;
282 	return vecp;
283 }
284 
285 /*
286  * This is called to fill in the vector of log iovecs for the
287  * given log buf item.  It fills the first entry with a buf log
288  * format structure, and the rest point to contiguous chunks
289  * within the buffer.
290  */
291 STATIC void
292 xfs_buf_item_format(
293 	struct xfs_log_item	*lip,
294 	struct xfs_log_iovec	*vecp)
295 {
296 	struct xfs_buf_log_item	*bip = BUF_ITEM(lip);
297 	struct xfs_buf		*bp = bip->bli_buf;
298 	uint			offset = 0;
299 	int			i;
300 
301 	ASSERT(atomic_read(&bip->bli_refcount) > 0);
302 	ASSERT((bip->bli_flags & XFS_BLI_LOGGED) ||
303 	       (bip->bli_flags & XFS_BLI_STALE));
304 
305 	/*
306 	 * If it is an inode buffer, transfer the in-memory state to the
307 	 * format flags and clear the in-memory state. We do not transfer
308 	 * this state if the inode buffer allocation has not yet been committed
309 	 * to the log as setting the XFS_BLI_INODE_BUF flag will prevent
310 	 * correct replay of the inode allocation.
311 	 */
312 	if (bip->bli_flags & XFS_BLI_INODE_BUF) {
313 		if (!((bip->bli_flags & XFS_BLI_INODE_ALLOC_BUF) &&
314 		      xfs_log_item_in_current_chkpt(lip)))
315 			bip->__bli_format.blf_flags |= XFS_BLF_INODE_BUF;
316 		bip->bli_flags &= ~XFS_BLI_INODE_BUF;
317 	}
318 
319 	for (i = 0; i < bip->bli_format_count; i++) {
320 		vecp = xfs_buf_item_format_segment(bip, vecp, offset,
321 						&bip->bli_formats[i]);
322 		offset += bp->b_maps[i].bm_len;
323 	}
324 
325 	/*
326 	 * Check to make sure everything is consistent.
327 	 */
328 	trace_xfs_buf_item_format(bip);
329 }
330 
331 /*
332  * This is called to pin the buffer associated with the buf log item in memory
333  * so it cannot be written out.
334  *
335  * We also always take a reference to the buffer log item here so that the bli
336  * is held while the item is pinned in memory. This means that we can
337  * unconditionally drop the reference count a transaction holds when the
338  * transaction is completed.
339  */
340 STATIC void
341 xfs_buf_item_pin(
342 	struct xfs_log_item	*lip)
343 {
344 	struct xfs_buf_log_item	*bip = BUF_ITEM(lip);
345 
346 	ASSERT(atomic_read(&bip->bli_refcount) > 0);
347 	ASSERT((bip->bli_flags & XFS_BLI_LOGGED) ||
348 	       (bip->bli_flags & XFS_BLI_STALE));
349 
350 	trace_xfs_buf_item_pin(bip);
351 
352 	atomic_inc(&bip->bli_refcount);
353 	atomic_inc(&bip->bli_buf->b_pin_count);
354 }
355 
356 /*
357  * This is called to unpin the buffer associated with the buf log
358  * item which was previously pinned with a call to xfs_buf_item_pin().
359  *
360  * Also drop the reference to the buf item for the current transaction.
361  * If the XFS_BLI_STALE flag is set and we are the last reference,
362  * then free up the buf log item and unlock the buffer.
363  *
364  * If the remove flag is set we are called from uncommit in the
365  * forced-shutdown path.  If that is true and the reference count on
366  * the log item is going to drop to zero we need to free the item's
367  * descriptor in the transaction.
368  */
369 STATIC void
370 xfs_buf_item_unpin(
371 	struct xfs_log_item	*lip,
372 	int			remove)
373 {
374 	struct xfs_buf_log_item	*bip = BUF_ITEM(lip);
375 	xfs_buf_t	*bp = bip->bli_buf;
376 	struct xfs_ail	*ailp = lip->li_ailp;
377 	int		stale = bip->bli_flags & XFS_BLI_STALE;
378 	int		freed;
379 
380 	ASSERT(bp->b_fspriv == bip);
381 	ASSERT(atomic_read(&bip->bli_refcount) > 0);
382 
383 	trace_xfs_buf_item_unpin(bip);
384 
385 	freed = atomic_dec_and_test(&bip->bli_refcount);
386 
387 	if (atomic_dec_and_test(&bp->b_pin_count))
388 		wake_up_all(&bp->b_waiters);
389 
390 	if (freed && stale) {
391 		ASSERT(bip->bli_flags & XFS_BLI_STALE);
392 		ASSERT(xfs_buf_islocked(bp));
393 		ASSERT(XFS_BUF_ISSTALE(bp));
394 		ASSERT(bip->__bli_format.blf_flags & XFS_BLF_CANCEL);
395 
396 		trace_xfs_buf_item_unpin_stale(bip);
397 
398 		if (remove) {
399 			/*
400 			 * If we are in a transaction context, we have to
401 			 * remove the log item from the transaction as we are
402 			 * about to release our reference to the buffer.  If we
403 			 * don't, the unlock that occurs later in
404 			 * xfs_trans_uncommit() will try to reference the
405 			 * buffer which we no longer have a hold on.
406 			 */
407 			if (lip->li_desc)
408 				xfs_trans_del_item(lip);
409 
410 			/*
411 			 * Since the transaction no longer refers to the buffer,
412 			 * the buffer should no longer refer to the transaction.
413 			 */
414 			bp->b_transp = NULL;
415 		}
416 
417 		/*
418 		 * If we get called here because of an IO error, we may
419 		 * or may not have the item on the AIL. xfs_trans_ail_delete()
420 		 * will take care of that situation.
421 		 * xfs_trans_ail_delete() drops the AIL lock.
422 		 */
423 		if (bip->bli_flags & XFS_BLI_STALE_INODE) {
424 			xfs_buf_do_callbacks(bp);
425 			bp->b_fspriv = NULL;
426 			bp->b_iodone = NULL;
427 		} else {
428 			spin_lock(&ailp->xa_lock);
429 			xfs_trans_ail_delete(ailp, lip, SHUTDOWN_LOG_IO_ERROR);
430 			xfs_buf_item_relse(bp);
431 			ASSERT(bp->b_fspriv == NULL);
432 		}
433 		xfs_buf_relse(bp);
434 	} else if (freed && remove) {
435 		/*
436 		 * There are currently two references to the buffer - the active
437 		 * LRU reference and the buf log item. What we are about to do
438 		 * here - simulate a failed IO completion - requires 3
439 		 * references.
440 		 *
441 		 * The LRU reference is removed by the xfs_buf_stale() call. The
442 		 * buf item reference is removed by the xfs_buf_iodone()
443 		 * callback that is run by xfs_buf_do_callbacks() during ioend
444 		 * processing (via the bp->b_iodone callback), and then finally
445 		 * the ioend processing will drop the IO reference if the buffer
446 		 * is marked XBF_ASYNC.
447 		 *
448 		 * Hence we need to take an additional reference here so that IO
449 		 * completion processing doesn't free the buffer prematurely.
450 		 */
451 		xfs_buf_lock(bp);
452 		xfs_buf_hold(bp);
453 		bp->b_flags |= XBF_ASYNC;
454 		xfs_buf_ioerror(bp, EIO);
455 		XFS_BUF_UNDONE(bp);
456 		xfs_buf_stale(bp);
457 		xfs_buf_ioend(bp, 0);
458 	}
459 }
460 
461 STATIC uint
462 xfs_buf_item_push(
463 	struct xfs_log_item	*lip,
464 	struct list_head	*buffer_list)
465 {
466 	struct xfs_buf_log_item	*bip = BUF_ITEM(lip);
467 	struct xfs_buf		*bp = bip->bli_buf;
468 	uint			rval = XFS_ITEM_SUCCESS;
469 
470 	if (xfs_buf_ispinned(bp))
471 		return XFS_ITEM_PINNED;
472 	if (!xfs_buf_trylock(bp)) {
473 		/*
474 		 * If we have just raced with a buffer being pinned and it has
475 		 * been marked stale, we could end up stalling until someone else
476 		 * issues a log force to unpin the stale buffer. Check for the
477 		 * race condition here so xfsaild recognizes the buffer is pinned
478 		 * and queues a log force to move it along.
479 		 */
480 		if (xfs_buf_ispinned(bp))
481 			return XFS_ITEM_PINNED;
482 		return XFS_ITEM_LOCKED;
483 	}
484 
485 	ASSERT(!(bip->bli_flags & XFS_BLI_STALE));
486 
487 	trace_xfs_buf_item_push(bip);
488 
489 	if (!xfs_buf_delwri_queue(bp, buffer_list))
490 		rval = XFS_ITEM_FLUSHING;
491 	xfs_buf_unlock(bp);
492 	return rval;
493 }
494 
495 /*
496  * Release the buffer associated with the buf log item.  If there is no dirty
497  * logged data associated with the buffer recorded in the buf log item, then
498  * free the buf log item and remove the reference to it in the buffer.
499  *
500  * This call ignores the recursion count.  It is only called when the buffer
501  * should REALLY be unlocked, regardless of the recursion count.
502  *
503  * We unconditionally drop the transaction's reference to the log item. If the
504  * item was logged, then another reference was taken when it was pinned, so we
505  * can safely drop the transaction reference now.  This also allows us to avoid
506  * potential races with the unpin code freeing the bli by not referencing the
507  * bli after we've dropped the reference count.
508  *
509  * If the XFS_BLI_HOLD flag is set in the buf log item, then free the log item
510  * if necessary but do not unlock the buffer.  This is for support of
511  * xfs_trans_bhold(). Make sure the XFS_BLI_HOLD field is cleared if we don't
512  * free the item.
513  */
514 STATIC void
515 xfs_buf_item_unlock(
516 	struct xfs_log_item	*lip)
517 {
518 	struct xfs_buf_log_item	*bip = BUF_ITEM(lip);
519 	struct xfs_buf		*bp = bip->bli_buf;
520 	int			aborted, clean, i;
521 	uint			hold;
522 
523 	/* Clear the buffer's association with this transaction. */
524 	bp->b_transp = NULL;
525 
526 	/*
527 	 * If this is a transaction abort, don't return early.  Instead, allow
528 	 * the brelse to happen.  Normally it would be done for stale
529 	 * (cancelled) buffers at unpin time, but we'll never go through the
530 	 * pin/unpin cycle if we abort inside commit.
531 	 */
532 	aborted = (lip->li_flags & XFS_LI_ABORTED) != 0;
533 
534 	/*
535 	 * Before possibly freeing the buf item, determine if we should
536 	 * release the buffer at the end of this routine.
537 	 */
538 	hold = bip->bli_flags & XFS_BLI_HOLD;
539 
540 	/* Clear the per transaction state. */
541 	bip->bli_flags &= ~(XFS_BLI_LOGGED | XFS_BLI_HOLD);
542 
543 	/*
544 	 * If the buf item is marked stale, then don't do anything.  We'll
545 	 * unlock the buffer and free the buf item when the buffer is unpinned
546 	 * for the last time.
547 	 */
548 	if (bip->bli_flags & XFS_BLI_STALE) {
549 		trace_xfs_buf_item_unlock_stale(bip);
550 		ASSERT(bip->__bli_format.blf_flags & XFS_BLF_CANCEL);
551 		if (!aborted) {
552 			atomic_dec(&bip->bli_refcount);
553 			return;
554 		}
555 	}
556 
557 	trace_xfs_buf_item_unlock(bip);
558 
559 	/*
560 	 * If the buf item isn't tracking any data, free it, otherwise drop the
561 	 * reference we hold to it. If we are aborting the transaction, this may
562 	 * be the only reference to the buf item, so we free it anyway
563 	 * regardless of whether it is dirty or not. A dirty abort implies a
564 	 * shutdown, anyway.
565 	 */
566 	clean = 1;
567 	for (i = 0; i < bip->bli_format_count; i++) {
568 		if (!xfs_bitmap_empty(bip->bli_formats[i].blf_data_map,
569 			     bip->bli_formats[i].blf_map_size)) {
570 			clean = 0;
571 			break;
572 		}
573 	}
574 	if (clean)
575 		xfs_buf_item_relse(bp);
576 	else if (aborted) {
577 		if (atomic_dec_and_test(&bip->bli_refcount)) {
578 			ASSERT(XFS_FORCED_SHUTDOWN(lip->li_mountp));
579 			xfs_buf_item_relse(bp);
580 		}
581 	} else
582 		atomic_dec(&bip->bli_refcount);
583 
584 	if (!hold)
585 		xfs_buf_relse(bp);
586 }
587 
588 /*
589  * This is called to find out where the oldest active copy of the
590  * buf log item in the on disk log resides now that the last log
591  * write of it completed at the given lsn.
592  * We always re-log all the dirty data in a buffer, so usually the
593  * latest copy in the on disk log is the only one that matters.  For
594  * those cases we simply return the given lsn.
595  *
596  * The one exception to this is for buffers full of newly allocated
597  * inodes.  These buffers are only relogged with the XFS_BLI_INODE_BUF
598  * flag set, indicating that only the di_next_unlinked fields from the
599  * inodes in the buffers will be replayed during recovery.  If the
600  * original newly allocated inode images have not yet been flushed
601  * when the buffer is so relogged, then we need to make sure that we
602  * keep the old images in the 'active' portion of the log.  We do this
603  * by returning the original lsn of that transaction here rather than
604  * the current one.
605  */
606 STATIC xfs_lsn_t
607 xfs_buf_item_committed(
608 	struct xfs_log_item	*lip,
609 	xfs_lsn_t		lsn)
610 {
611 	struct xfs_buf_log_item	*bip = BUF_ITEM(lip);
612 
613 	trace_xfs_buf_item_committed(bip);
614 
615 	if ((bip->bli_flags & XFS_BLI_INODE_ALLOC_BUF) && lip->li_lsn != 0)
616 		return lip->li_lsn;
617 	return lsn;
618 }
619 
620 STATIC void
621 xfs_buf_item_committing(
622 	struct xfs_log_item	*lip,
623 	xfs_lsn_t		commit_lsn)
624 {
625 }
626 
627 /*
628  * This is the ops vector shared by all buf log items.
629  */
630 static const struct xfs_item_ops xfs_buf_item_ops = {
631 	.iop_size	= xfs_buf_item_size,
632 	.iop_format	= xfs_buf_item_format,
633 	.iop_pin	= xfs_buf_item_pin,
634 	.iop_unpin	= xfs_buf_item_unpin,
635 	.iop_unlock	= xfs_buf_item_unlock,
636 	.iop_committed	= xfs_buf_item_committed,
637 	.iop_push	= xfs_buf_item_push,
638 	.iop_committing = xfs_buf_item_committing
639 };
640 
641 STATIC int
642 xfs_buf_item_get_format(
643 	struct xfs_buf_log_item	*bip,
644 	int			count)
645 {
646 	ASSERT(bip->bli_formats == NULL);
647 	bip->bli_format_count = count;
648 
649 	if (count == 1) {
650 		bip->bli_formats = &bip->__bli_format;
651 		return 0;
652 	}
653 
654 	bip->bli_formats = kmem_zalloc(count * sizeof(struct xfs_buf_log_format),
655 				KM_SLEEP);
656 	if (!bip->bli_formats)
657 		return ENOMEM;
658 	return 0;
659 }
660 
661 STATIC void
662 xfs_buf_item_free_format(
663 	struct xfs_buf_log_item	*bip)
664 {
665 	if (bip->bli_formats != &bip->__bli_format) {
666 		kmem_free(bip->bli_formats);
667 		bip->bli_formats = NULL;
668 	}
669 }
670 
671 /*
672  * Allocate a new buf log item to go with the given buffer.
673  * Set the buffer's b_fsprivate field to point to the new
674  * buf log item.  If there are other item's attached to the
675  * buffer (see xfs_buf_attach_iodone() below), then put the
676  * buf log item at the front.
677  */
678 void
679 xfs_buf_item_init(
680 	xfs_buf_t	*bp,
681 	xfs_mount_t	*mp)
682 {
683 	xfs_log_item_t		*lip = bp->b_fspriv;
684 	xfs_buf_log_item_t	*bip;
685 	int			chunks;
686 	int			map_size;
687 	int			error;
688 	int			i;
689 
690 	/*
691 	 * Check to see if there is already a buf log item for
692 	 * this buffer.  If there is, it is guaranteed to be
693 	 * the first.  If we do already have one, there is
694 	 * nothing to do here so return.
695 	 */
696 	ASSERT(bp->b_target->bt_mount == mp);
697 	if (lip != NULL && lip->li_type == XFS_LI_BUF)
698 		return;
699 
700 	bip = kmem_zone_zalloc(xfs_buf_item_zone, KM_SLEEP);
701 	xfs_log_item_init(mp, &bip->bli_item, XFS_LI_BUF, &xfs_buf_item_ops);
702 	bip->bli_buf = bp;
703 	xfs_buf_hold(bp);
704 
705 	/*
706 	 * chunks is the number of XFS_BLF_CHUNK size pieces the buffer
707 	 * can be divided into. Make sure not to truncate any pieces.
708 	 * map_size is the size of the bitmap needed to describe the
709 	 * chunks of the buffer.
710 	 *
711 	 * Discontiguous buffer support follows the layout of the underlying
712 	 * buffer. This makes the implementation as simple as possible.
713 	 */
714 	error = xfs_buf_item_get_format(bip, bp->b_map_count);
715 	ASSERT(error == 0);
716 
717 	for (i = 0; i < bip->bli_format_count; i++) {
718 		chunks = DIV_ROUND_UP(BBTOB(bp->b_maps[i].bm_len),
719 				      XFS_BLF_CHUNK);
720 		map_size = DIV_ROUND_UP(chunks, NBWORD);
721 
722 		bip->bli_formats[i].blf_type = XFS_LI_BUF;
723 		bip->bli_formats[i].blf_blkno = bp->b_maps[i].bm_bn;
724 		bip->bli_formats[i].blf_len = bp->b_maps[i].bm_len;
725 		bip->bli_formats[i].blf_map_size = map_size;
726 	}
727 
728 #ifdef XFS_TRANS_DEBUG
729 	/*
730 	 * Allocate the arrays for tracking what needs to be logged
731 	 * and what our callers request to be logged.  bli_orig
732 	 * holds a copy of the original, clean buffer for comparison
733 	 * against, and bli_logged keeps a 1 bit flag per byte in
734 	 * the buffer to indicate which bytes the callers have asked
735 	 * to have logged.
736 	 */
737 	bip->bli_orig = kmem_alloc(BBTOB(bp->b_length), KM_SLEEP);
738 	memcpy(bip->bli_orig, bp->b_addr, BBTOB(bp->b_length));
739 	bip->bli_logged = kmem_zalloc(BBTOB(bp->b_length) / NBBY, KM_SLEEP);
740 #endif
741 
742 	/*
743 	 * Put the buf item into the list of items attached to the
744 	 * buffer at the front.
745 	 */
746 	if (bp->b_fspriv)
747 		bip->bli_item.li_bio_list = bp->b_fspriv;
748 	bp->b_fspriv = bip;
749 }
750 
751 
752 /*
753  * Mark bytes first through last inclusive as dirty in the buf
754  * item's bitmap.
755  */
756 void
757 xfs_buf_item_log_segment(
758 	struct xfs_buf_log_item	*bip,
759 	uint			first,
760 	uint			last,
761 	uint			*map)
762 {
763 	uint		first_bit;
764 	uint		last_bit;
765 	uint		bits_to_set;
766 	uint		bits_set;
767 	uint		word_num;
768 	uint		*wordp;
769 	uint		bit;
770 	uint		end_bit;
771 	uint		mask;
772 
773 	/*
774 	 * Convert byte offsets to bit numbers.
775 	 */
776 	first_bit = first >> XFS_BLF_SHIFT;
777 	last_bit = last >> XFS_BLF_SHIFT;
778 
779 	/*
780 	 * Calculate the total number of bits to be set.
781 	 */
782 	bits_to_set = last_bit - first_bit + 1;
783 
784 	/*
785 	 * Get a pointer to the first word in the bitmap
786 	 * to set a bit in.
787 	 */
788 	word_num = first_bit >> BIT_TO_WORD_SHIFT;
789 	wordp = &map[word_num];
790 
791 	/*
792 	 * Calculate the starting bit in the first word.
793 	 */
794 	bit = first_bit & (uint)(NBWORD - 1);
795 
796 	/*
797 	 * First set any bits in the first word of our range.
798 	 * If it starts at bit 0 of the word, it will be
799 	 * set below rather than here.  That is what the variable
800 	 * bit tells us. The variable bits_set tracks the number
801 	 * of bits that have been set so far.  End_bit is the number
802 	 * of the last bit to be set in this word plus one.
803 	 */
804 	if (bit) {
805 		end_bit = MIN(bit + bits_to_set, (uint)NBWORD);
806 		mask = ((1 << (end_bit - bit)) - 1) << bit;
807 		*wordp |= mask;
808 		wordp++;
809 		bits_set = end_bit - bit;
810 	} else {
811 		bits_set = 0;
812 	}
813 
814 	/*
815 	 * Now set bits a whole word at a time that are between
816 	 * first_bit and last_bit.
817 	 */
818 	while ((bits_to_set - bits_set) >= NBWORD) {
819 		*wordp |= 0xffffffff;
820 		bits_set += NBWORD;
821 		wordp++;
822 	}
823 
824 	/*
825 	 * Finally, set any bits left to be set in one last partial word.
826 	 */
827 	end_bit = bits_to_set - bits_set;
828 	if (end_bit) {
829 		mask = (1 << end_bit) - 1;
830 		*wordp |= mask;
831 	}
832 }
833 
834 /*
835  * Mark bytes first through last inclusive as dirty in the buf
836  * item's bitmap.
837  */
838 void
839 xfs_buf_item_log(
840 	xfs_buf_log_item_t	*bip,
841 	uint			first,
842 	uint			last)
843 {
844 	int			i;
845 	uint			start;
846 	uint			end;
847 	struct xfs_buf		*bp = bip->bli_buf;
848 
849 	/*
850 	 * Mark the item as having some dirty data for
851 	 * quick reference in xfs_buf_item_dirty.
852 	 */
853 	bip->bli_flags |= XFS_BLI_DIRTY;
854 
855 	/*
856 	 * walk each buffer segment and mark them dirty appropriately.
857 	 */
858 	start = 0;
859 	for (i = 0; i < bip->bli_format_count; i++) {
860 		if (start > last)
861 			break;
862 		end = start + BBTOB(bp->b_maps[i].bm_len);
863 		if (first > end) {
864 			start += BBTOB(bp->b_maps[i].bm_len);
865 			continue;
866 		}
867 		if (first < start)
868 			first = start;
869 		if (end > last)
870 			end = last;
871 
872 		xfs_buf_item_log_segment(bip, first, end,
873 					 &bip->bli_formats[i].blf_data_map[0]);
874 
875 		start += bp->b_maps[i].bm_len;
876 	}
877 }
878 
879 
880 /*
881  * Return 1 if the buffer has some data that has been logged (at any
882  * point, not just the current transaction) and 0 if not.
883  */
884 uint
885 xfs_buf_item_dirty(
886 	xfs_buf_log_item_t	*bip)
887 {
888 	return (bip->bli_flags & XFS_BLI_DIRTY);
889 }
890 
891 STATIC void
892 xfs_buf_item_free(
893 	xfs_buf_log_item_t	*bip)
894 {
895 #ifdef XFS_TRANS_DEBUG
896 	kmem_free(bip->bli_orig);
897 	kmem_free(bip->bli_logged);
898 #endif /* XFS_TRANS_DEBUG */
899 
900 	xfs_buf_item_free_format(bip);
901 	kmem_zone_free(xfs_buf_item_zone, bip);
902 }
903 
904 /*
905  * This is called when the buf log item is no longer needed.  It should
906  * free the buf log item associated with the given buffer and clear
907  * the buffer's pointer to the buf log item.  If there are no more
908  * items in the list, clear the b_iodone field of the buffer (see
909  * xfs_buf_attach_iodone() below).
910  */
911 void
912 xfs_buf_item_relse(
913 	xfs_buf_t	*bp)
914 {
915 	xfs_buf_log_item_t	*bip;
916 
917 	trace_xfs_buf_item_relse(bp, _RET_IP_);
918 
919 	bip = bp->b_fspriv;
920 	bp->b_fspriv = bip->bli_item.li_bio_list;
921 	if (bp->b_fspriv == NULL)
922 		bp->b_iodone = NULL;
923 
924 	xfs_buf_rele(bp);
925 	xfs_buf_item_free(bip);
926 }
927 
928 
929 /*
930  * Add the given log item with its callback to the list of callbacks
931  * to be called when the buffer's I/O completes.  If it is not set
932  * already, set the buffer's b_iodone() routine to be
933  * xfs_buf_iodone_callbacks() and link the log item into the list of
934  * items rooted at b_fsprivate.  Items are always added as the second
935  * entry in the list if there is a first, because the buf item code
936  * assumes that the buf log item is first.
937  */
938 void
939 xfs_buf_attach_iodone(
940 	xfs_buf_t	*bp,
941 	void		(*cb)(xfs_buf_t *, xfs_log_item_t *),
942 	xfs_log_item_t	*lip)
943 {
944 	xfs_log_item_t	*head_lip;
945 
946 	ASSERT(xfs_buf_islocked(bp));
947 
948 	lip->li_cb = cb;
949 	head_lip = bp->b_fspriv;
950 	if (head_lip) {
951 		lip->li_bio_list = head_lip->li_bio_list;
952 		head_lip->li_bio_list = lip;
953 	} else {
954 		bp->b_fspriv = lip;
955 	}
956 
957 	ASSERT(bp->b_iodone == NULL ||
958 	       bp->b_iodone == xfs_buf_iodone_callbacks);
959 	bp->b_iodone = xfs_buf_iodone_callbacks;
960 }
961 
962 /*
963  * We can have many callbacks on a buffer. Running the callbacks individually
964  * can cause a lot of contention on the AIL lock, so we allow for a single
965  * callback to be able to scan the remaining lip->li_bio_list for other items
966  * of the same type and callback to be processed in the first call.
967  *
968  * As a result, the loop walking the callback list below will also modify the
969  * list. it removes the first item from the list and then runs the callback.
970  * The loop then restarts from the new head of the list. This allows the
971  * callback to scan and modify the list attached to the buffer and we don't
972  * have to care about maintaining a next item pointer.
973  */
974 STATIC void
975 xfs_buf_do_callbacks(
976 	struct xfs_buf		*bp)
977 {
978 	struct xfs_log_item	*lip;
979 
980 	while ((lip = bp->b_fspriv) != NULL) {
981 		bp->b_fspriv = lip->li_bio_list;
982 		ASSERT(lip->li_cb != NULL);
983 		/*
984 		 * Clear the next pointer so we don't have any
985 		 * confusion if the item is added to another buf.
986 		 * Don't touch the log item after calling its
987 		 * callback, because it could have freed itself.
988 		 */
989 		lip->li_bio_list = NULL;
990 		lip->li_cb(bp, lip);
991 	}
992 }
993 
994 /*
995  * This is the iodone() function for buffers which have had callbacks
996  * attached to them by xfs_buf_attach_iodone().  It should remove each
997  * log item from the buffer's list and call the callback of each in turn.
998  * When done, the buffer's fsprivate field is set to NULL and the buffer
999  * is unlocked with a call to iodone().
1000  */
1001 void
1002 xfs_buf_iodone_callbacks(
1003 	struct xfs_buf		*bp)
1004 {
1005 	struct xfs_log_item	*lip = bp->b_fspriv;
1006 	struct xfs_mount	*mp = lip->li_mountp;
1007 	static ulong		lasttime;
1008 	static xfs_buftarg_t	*lasttarg;
1009 
1010 	if (likely(!xfs_buf_geterror(bp)))
1011 		goto do_callbacks;
1012 
1013 	/*
1014 	 * If we've already decided to shutdown the filesystem because of
1015 	 * I/O errors, there's no point in giving this a retry.
1016 	 */
1017 	if (XFS_FORCED_SHUTDOWN(mp)) {
1018 		xfs_buf_stale(bp);
1019 		XFS_BUF_DONE(bp);
1020 		trace_xfs_buf_item_iodone(bp, _RET_IP_);
1021 		goto do_callbacks;
1022 	}
1023 
1024 	if (bp->b_target != lasttarg ||
1025 	    time_after(jiffies, (lasttime + 5*HZ))) {
1026 		lasttime = jiffies;
1027 		xfs_buf_ioerror_alert(bp, __func__);
1028 	}
1029 	lasttarg = bp->b_target;
1030 
1031 	/*
1032 	 * If the write was asynchronous then no one will be looking for the
1033 	 * error.  Clear the error state and write the buffer out again.
1034 	 *
1035 	 * XXX: This helps against transient write errors, but we need to find
1036 	 * a way to shut the filesystem down if the writes keep failing.
1037 	 *
1038 	 * In practice we'll shut the filesystem down soon as non-transient
1039 	 * erorrs tend to affect the whole device and a failing log write
1040 	 * will make us give up.  But we really ought to do better here.
1041 	 */
1042 	if (XFS_BUF_ISASYNC(bp)) {
1043 		ASSERT(bp->b_iodone != NULL);
1044 
1045 		trace_xfs_buf_item_iodone_async(bp, _RET_IP_);
1046 
1047 		xfs_buf_ioerror(bp, 0); /* errno of 0 unsets the flag */
1048 
1049 		if (!XFS_BUF_ISSTALE(bp)) {
1050 			bp->b_flags |= XBF_WRITE | XBF_ASYNC | XBF_DONE;
1051 			xfs_buf_iorequest(bp);
1052 		} else {
1053 			xfs_buf_relse(bp);
1054 		}
1055 
1056 		return;
1057 	}
1058 
1059 	/*
1060 	 * If the write of the buffer was synchronous, we want to make
1061 	 * sure to return the error to the caller of xfs_bwrite().
1062 	 */
1063 	xfs_buf_stale(bp);
1064 	XFS_BUF_DONE(bp);
1065 
1066 	trace_xfs_buf_error_relse(bp, _RET_IP_);
1067 
1068 do_callbacks:
1069 	xfs_buf_do_callbacks(bp);
1070 	bp->b_fspriv = NULL;
1071 	bp->b_iodone = NULL;
1072 	xfs_buf_ioend(bp, 0);
1073 }
1074 
1075 /*
1076  * This is the iodone() function for buffers which have been
1077  * logged.  It is called when they are eventually flushed out.
1078  * It should remove the buf item from the AIL, and free the buf item.
1079  * It is called by xfs_buf_iodone_callbacks() above which will take
1080  * care of cleaning up the buffer itself.
1081  */
1082 void
1083 xfs_buf_iodone(
1084 	struct xfs_buf		*bp,
1085 	struct xfs_log_item	*lip)
1086 {
1087 	struct xfs_ail		*ailp = lip->li_ailp;
1088 
1089 	ASSERT(BUF_ITEM(lip)->bli_buf == bp);
1090 
1091 	xfs_buf_rele(bp);
1092 
1093 	/*
1094 	 * If we are forcibly shutting down, this may well be
1095 	 * off the AIL already. That's because we simulate the
1096 	 * log-committed callbacks to unpin these buffers. Or we may never
1097 	 * have put this item on AIL because of the transaction was
1098 	 * aborted forcibly. xfs_trans_ail_delete() takes care of these.
1099 	 *
1100 	 * Either way, AIL is useless if we're forcing a shutdown.
1101 	 */
1102 	spin_lock(&ailp->xa_lock);
1103 	xfs_trans_ail_delete(ailp, lip, SHUTDOWN_CORRUPT_INCORE);
1104 	xfs_buf_item_free(BUF_ITEM(lip));
1105 }
1106