xref: /openbmc/linux/fs/xfs/xfs_buf_item.c (revision fbb6b31a)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (c) 2000-2005 Silicon Graphics, Inc.
4  * All Rights Reserved.
5  */
6 #include "xfs.h"
7 #include "xfs_fs.h"
8 #include "xfs_shared.h"
9 #include "xfs_format.h"
10 #include "xfs_log_format.h"
11 #include "xfs_trans_resv.h"
12 #include "xfs_bit.h"
13 #include "xfs_mount.h"
14 #include "xfs_trans.h"
15 #include "xfs_trans_priv.h"
16 #include "xfs_buf_item.h"
17 #include "xfs_inode.h"
18 #include "xfs_inode_item.h"
19 #include "xfs_quota.h"
20 #include "xfs_dquot_item.h"
21 #include "xfs_dquot.h"
22 #include "xfs_trace.h"
23 #include "xfs_log.h"
24 #include "xfs_log_priv.h"
25 
26 
27 struct kmem_cache	*xfs_buf_item_cache;
28 
29 static inline struct xfs_buf_log_item *BUF_ITEM(struct xfs_log_item *lip)
30 {
31 	return container_of(lip, struct xfs_buf_log_item, bli_item);
32 }
33 
34 /* Is this log iovec plausibly large enough to contain the buffer log format? */
35 bool
36 xfs_buf_log_check_iovec(
37 	struct xfs_log_iovec		*iovec)
38 {
39 	struct xfs_buf_log_format	*blfp = iovec->i_addr;
40 	char				*bmp_end;
41 	char				*item_end;
42 
43 	if (offsetof(struct xfs_buf_log_format, blf_data_map) > iovec->i_len)
44 		return false;
45 
46 	item_end = (char *)iovec->i_addr + iovec->i_len;
47 	bmp_end = (char *)&blfp->blf_data_map[blfp->blf_map_size];
48 	return bmp_end <= item_end;
49 }
50 
51 static inline int
52 xfs_buf_log_format_size(
53 	struct xfs_buf_log_format *blfp)
54 {
55 	return offsetof(struct xfs_buf_log_format, blf_data_map) +
56 			(blfp->blf_map_size * sizeof(blfp->blf_data_map[0]));
57 }
58 
59 static inline bool
60 xfs_buf_item_straddle(
61 	struct xfs_buf		*bp,
62 	uint			offset,
63 	int			first_bit,
64 	int			nbits)
65 {
66 	void			*first, *last;
67 
68 	first = xfs_buf_offset(bp, offset + (first_bit << XFS_BLF_SHIFT));
69 	last = xfs_buf_offset(bp,
70 			offset + ((first_bit + nbits) << XFS_BLF_SHIFT));
71 
72 	if (last - first != nbits * XFS_BLF_CHUNK)
73 		return true;
74 	return false;
75 }
76 
77 /*
78  * Return the number of log iovecs and space needed to log the given buf log
79  * item segment.
80  *
81  * It calculates this as 1 iovec for the buf log format structure and 1 for each
82  * stretch of non-contiguous chunks to be logged.  Contiguous chunks are logged
83  * in a single iovec.
84  */
85 STATIC void
86 xfs_buf_item_size_segment(
87 	struct xfs_buf_log_item		*bip,
88 	struct xfs_buf_log_format	*blfp,
89 	uint				offset,
90 	int				*nvecs,
91 	int				*nbytes)
92 {
93 	struct xfs_buf			*bp = bip->bli_buf;
94 	int				first_bit;
95 	int				nbits;
96 	int				next_bit;
97 	int				last_bit;
98 
99 	first_bit = xfs_next_bit(blfp->blf_data_map, blfp->blf_map_size, 0);
100 	if (first_bit == -1)
101 		return;
102 
103 	(*nvecs)++;
104 	*nbytes += xfs_buf_log_format_size(blfp);
105 
106 	do {
107 		nbits = xfs_contig_bits(blfp->blf_data_map,
108 					blfp->blf_map_size, first_bit);
109 		ASSERT(nbits > 0);
110 
111 		/*
112 		 * Straddling a page is rare because we don't log contiguous
113 		 * chunks of unmapped buffers anywhere.
114 		 */
115 		if (nbits > 1 &&
116 		    xfs_buf_item_straddle(bp, offset, first_bit, nbits))
117 			goto slow_scan;
118 
119 		(*nvecs)++;
120 		*nbytes += nbits * XFS_BLF_CHUNK;
121 
122 		/*
123 		 * This takes the bit number to start looking from and
124 		 * returns the next set bit from there.  It returns -1
125 		 * if there are no more bits set or the start bit is
126 		 * beyond the end of the bitmap.
127 		 */
128 		first_bit = xfs_next_bit(blfp->blf_data_map, blfp->blf_map_size,
129 					(uint)first_bit + nbits + 1);
130 	} while (first_bit != -1);
131 
132 	return;
133 
134 slow_scan:
135 	/* Count the first bit we jumped out of the above loop from */
136 	(*nvecs)++;
137 	*nbytes += XFS_BLF_CHUNK;
138 	last_bit = first_bit;
139 	while (last_bit != -1) {
140 		/*
141 		 * This takes the bit number to start looking from and
142 		 * returns the next set bit from there.  It returns -1
143 		 * if there are no more bits set or the start bit is
144 		 * beyond the end of the bitmap.
145 		 */
146 		next_bit = xfs_next_bit(blfp->blf_data_map, blfp->blf_map_size,
147 					last_bit + 1);
148 		/*
149 		 * If we run out of bits, leave the loop,
150 		 * else if we find a new set of bits bump the number of vecs,
151 		 * else keep scanning the current set of bits.
152 		 */
153 		if (next_bit == -1) {
154 			break;
155 		} else if (next_bit != last_bit + 1 ||
156 		           xfs_buf_item_straddle(bp, offset, first_bit, nbits)) {
157 			last_bit = next_bit;
158 			first_bit = next_bit;
159 			(*nvecs)++;
160 			nbits = 1;
161 		} else {
162 			last_bit++;
163 			nbits++;
164 		}
165 		*nbytes += XFS_BLF_CHUNK;
166 	}
167 }
168 
169 /*
170  * Return the number of log iovecs and space needed to log the given buf log
171  * item.
172  *
173  * Discontiguous buffers need a format structure per region that is being
174  * logged. This makes the changes in the buffer appear to log recovery as though
175  * they came from separate buffers, just like would occur if multiple buffers
176  * were used instead of a single discontiguous buffer. This enables
177  * discontiguous buffers to be in-memory constructs, completely transparent to
178  * what ends up on disk.
179  *
180  * If the XFS_BLI_STALE flag has been set, then log nothing but the buf log
181  * format structures. If the item has previously been logged and has dirty
182  * regions, we do not relog them in stale buffers. This has the effect of
183  * reducing the size of the relogged item by the amount of dirty data tracked
184  * by the log item. This can result in the committing transaction reducing the
185  * amount of space being consumed by the CIL.
186  */
187 STATIC void
188 xfs_buf_item_size(
189 	struct xfs_log_item	*lip,
190 	int			*nvecs,
191 	int			*nbytes)
192 {
193 	struct xfs_buf_log_item	*bip = BUF_ITEM(lip);
194 	struct xfs_buf		*bp = bip->bli_buf;
195 	int			i;
196 	int			bytes;
197 	uint			offset = 0;
198 
199 	ASSERT(atomic_read(&bip->bli_refcount) > 0);
200 	if (bip->bli_flags & XFS_BLI_STALE) {
201 		/*
202 		 * The buffer is stale, so all we need to log is the buf log
203 		 * format structure with the cancel flag in it as we are never
204 		 * going to replay the changes tracked in the log item.
205 		 */
206 		trace_xfs_buf_item_size_stale(bip);
207 		ASSERT(bip->__bli_format.blf_flags & XFS_BLF_CANCEL);
208 		*nvecs += bip->bli_format_count;
209 		for (i = 0; i < bip->bli_format_count; i++) {
210 			*nbytes += xfs_buf_log_format_size(&bip->bli_formats[i]);
211 		}
212 		return;
213 	}
214 
215 	ASSERT(bip->bli_flags & XFS_BLI_LOGGED);
216 
217 	if (bip->bli_flags & XFS_BLI_ORDERED) {
218 		/*
219 		 * The buffer has been logged just to order it. It is not being
220 		 * included in the transaction commit, so no vectors are used at
221 		 * all.
222 		 */
223 		trace_xfs_buf_item_size_ordered(bip);
224 		*nvecs = XFS_LOG_VEC_ORDERED;
225 		return;
226 	}
227 
228 	/*
229 	 * The vector count is based on the number of buffer vectors we have
230 	 * dirty bits in. This will only be greater than one when we have a
231 	 * compound buffer with more than one segment dirty. Hence for compound
232 	 * buffers we need to track which segment the dirty bits correspond to,
233 	 * and when we move from one segment to the next increment the vector
234 	 * count for the extra buf log format structure that will need to be
235 	 * written.
236 	 */
237 	bytes = 0;
238 	for (i = 0; i < bip->bli_format_count; i++) {
239 		xfs_buf_item_size_segment(bip, &bip->bli_formats[i], offset,
240 					  nvecs, &bytes);
241 		offset += BBTOB(bp->b_maps[i].bm_len);
242 	}
243 
244 	/*
245 	 * Round up the buffer size required to minimise the number of memory
246 	 * allocations that need to be done as this item grows when relogged by
247 	 * repeated modifications.
248 	 */
249 	*nbytes = round_up(bytes, 512);
250 	trace_xfs_buf_item_size(bip);
251 }
252 
253 static inline void
254 xfs_buf_item_copy_iovec(
255 	struct xfs_log_vec	*lv,
256 	struct xfs_log_iovec	**vecp,
257 	struct xfs_buf		*bp,
258 	uint			offset,
259 	int			first_bit,
260 	uint			nbits)
261 {
262 	offset += first_bit * XFS_BLF_CHUNK;
263 	xlog_copy_iovec(lv, vecp, XLOG_REG_TYPE_BCHUNK,
264 			xfs_buf_offset(bp, offset),
265 			nbits * XFS_BLF_CHUNK);
266 }
267 
268 static void
269 xfs_buf_item_format_segment(
270 	struct xfs_buf_log_item	*bip,
271 	struct xfs_log_vec	*lv,
272 	struct xfs_log_iovec	**vecp,
273 	uint			offset,
274 	struct xfs_buf_log_format *blfp)
275 {
276 	struct xfs_buf		*bp = bip->bli_buf;
277 	uint			base_size;
278 	int			first_bit;
279 	int			last_bit;
280 	int			next_bit;
281 	uint			nbits;
282 
283 	/* copy the flags across from the base format item */
284 	blfp->blf_flags = bip->__bli_format.blf_flags;
285 
286 	/*
287 	 * Base size is the actual size of the ondisk structure - it reflects
288 	 * the actual size of the dirty bitmap rather than the size of the in
289 	 * memory structure.
290 	 */
291 	base_size = xfs_buf_log_format_size(blfp);
292 
293 	first_bit = xfs_next_bit(blfp->blf_data_map, blfp->blf_map_size, 0);
294 	if (!(bip->bli_flags & XFS_BLI_STALE) && first_bit == -1) {
295 		/*
296 		 * If the map is not be dirty in the transaction, mark
297 		 * the size as zero and do not advance the vector pointer.
298 		 */
299 		return;
300 	}
301 
302 	blfp = xlog_copy_iovec(lv, vecp, XLOG_REG_TYPE_BFORMAT, blfp, base_size);
303 	blfp->blf_size = 1;
304 
305 	if (bip->bli_flags & XFS_BLI_STALE) {
306 		/*
307 		 * The buffer is stale, so all we need to log
308 		 * is the buf log format structure with the
309 		 * cancel flag in it.
310 		 */
311 		trace_xfs_buf_item_format_stale(bip);
312 		ASSERT(blfp->blf_flags & XFS_BLF_CANCEL);
313 		return;
314 	}
315 
316 
317 	/*
318 	 * Fill in an iovec for each set of contiguous chunks.
319 	 */
320 	do {
321 		ASSERT(first_bit >= 0);
322 		nbits = xfs_contig_bits(blfp->blf_data_map,
323 					blfp->blf_map_size, first_bit);
324 		ASSERT(nbits > 0);
325 
326 		/*
327 		 * Straddling a page is rare because we don't log contiguous
328 		 * chunks of unmapped buffers anywhere.
329 		 */
330 		if (nbits > 1 &&
331 		    xfs_buf_item_straddle(bp, offset, first_bit, nbits))
332 			goto slow_scan;
333 
334 		xfs_buf_item_copy_iovec(lv, vecp, bp, offset,
335 					first_bit, nbits);
336 		blfp->blf_size++;
337 
338 		/*
339 		 * This takes the bit number to start looking from and
340 		 * returns the next set bit from there.  It returns -1
341 		 * if there are no more bits set or the start bit is
342 		 * beyond the end of the bitmap.
343 		 */
344 		first_bit = xfs_next_bit(blfp->blf_data_map, blfp->blf_map_size,
345 					(uint)first_bit + nbits + 1);
346 	} while (first_bit != -1);
347 
348 	return;
349 
350 slow_scan:
351 	ASSERT(bp->b_addr == NULL);
352 	last_bit = first_bit;
353 	nbits = 1;
354 	for (;;) {
355 		/*
356 		 * This takes the bit number to start looking from and
357 		 * returns the next set bit from there.  It returns -1
358 		 * if there are no more bits set or the start bit is
359 		 * beyond the end of the bitmap.
360 		 */
361 		next_bit = xfs_next_bit(blfp->blf_data_map, blfp->blf_map_size,
362 					(uint)last_bit + 1);
363 		/*
364 		 * If we run out of bits fill in the last iovec and get out of
365 		 * the loop.  Else if we start a new set of bits then fill in
366 		 * the iovec for the series we were looking at and start
367 		 * counting the bits in the new one.  Else we're still in the
368 		 * same set of bits so just keep counting and scanning.
369 		 */
370 		if (next_bit == -1) {
371 			xfs_buf_item_copy_iovec(lv, vecp, bp, offset,
372 						first_bit, nbits);
373 			blfp->blf_size++;
374 			break;
375 		} else if (next_bit != last_bit + 1 ||
376 		           xfs_buf_item_straddle(bp, offset, first_bit, nbits)) {
377 			xfs_buf_item_copy_iovec(lv, vecp, bp, offset,
378 						first_bit, nbits);
379 			blfp->blf_size++;
380 			first_bit = next_bit;
381 			last_bit = next_bit;
382 			nbits = 1;
383 		} else {
384 			last_bit++;
385 			nbits++;
386 		}
387 	}
388 }
389 
390 /*
391  * This is called to fill in the vector of log iovecs for the
392  * given log buf item.  It fills the first entry with a buf log
393  * format structure, and the rest point to contiguous chunks
394  * within the buffer.
395  */
396 STATIC void
397 xfs_buf_item_format(
398 	struct xfs_log_item	*lip,
399 	struct xfs_log_vec	*lv)
400 {
401 	struct xfs_buf_log_item	*bip = BUF_ITEM(lip);
402 	struct xfs_buf		*bp = bip->bli_buf;
403 	struct xfs_log_iovec	*vecp = NULL;
404 	uint			offset = 0;
405 	int			i;
406 
407 	ASSERT(atomic_read(&bip->bli_refcount) > 0);
408 	ASSERT((bip->bli_flags & XFS_BLI_LOGGED) ||
409 	       (bip->bli_flags & XFS_BLI_STALE));
410 	ASSERT((bip->bli_flags & XFS_BLI_STALE) ||
411 	       (xfs_blft_from_flags(&bip->__bli_format) > XFS_BLFT_UNKNOWN_BUF
412 	        && xfs_blft_from_flags(&bip->__bli_format) < XFS_BLFT_MAX_BUF));
413 	ASSERT(!(bip->bli_flags & XFS_BLI_ORDERED) ||
414 	       (bip->bli_flags & XFS_BLI_STALE));
415 
416 
417 	/*
418 	 * If it is an inode buffer, transfer the in-memory state to the
419 	 * format flags and clear the in-memory state.
420 	 *
421 	 * For buffer based inode allocation, we do not transfer
422 	 * this state if the inode buffer allocation has not yet been committed
423 	 * to the log as setting the XFS_BLI_INODE_BUF flag will prevent
424 	 * correct replay of the inode allocation.
425 	 *
426 	 * For icreate item based inode allocation, the buffers aren't written
427 	 * to the journal during allocation, and hence we should always tag the
428 	 * buffer as an inode buffer so that the correct unlinked list replay
429 	 * occurs during recovery.
430 	 */
431 	if (bip->bli_flags & XFS_BLI_INODE_BUF) {
432 		if (xfs_has_v3inodes(lip->li_log->l_mp) ||
433 		    !((bip->bli_flags & XFS_BLI_INODE_ALLOC_BUF) &&
434 		      xfs_log_item_in_current_chkpt(lip)))
435 			bip->__bli_format.blf_flags |= XFS_BLF_INODE_BUF;
436 		bip->bli_flags &= ~XFS_BLI_INODE_BUF;
437 	}
438 
439 	for (i = 0; i < bip->bli_format_count; i++) {
440 		xfs_buf_item_format_segment(bip, lv, &vecp, offset,
441 					    &bip->bli_formats[i]);
442 		offset += BBTOB(bp->b_maps[i].bm_len);
443 	}
444 
445 	/*
446 	 * Check to make sure everything is consistent.
447 	 */
448 	trace_xfs_buf_item_format(bip);
449 }
450 
451 /*
452  * This is called to pin the buffer associated with the buf log item in memory
453  * so it cannot be written out.
454  *
455  * We also always take a reference to the buffer log item here so that the bli
456  * is held while the item is pinned in memory. This means that we can
457  * unconditionally drop the reference count a transaction holds when the
458  * transaction is completed.
459  */
460 STATIC void
461 xfs_buf_item_pin(
462 	struct xfs_log_item	*lip)
463 {
464 	struct xfs_buf_log_item	*bip = BUF_ITEM(lip);
465 
466 	ASSERT(atomic_read(&bip->bli_refcount) > 0);
467 	ASSERT((bip->bli_flags & XFS_BLI_LOGGED) ||
468 	       (bip->bli_flags & XFS_BLI_ORDERED) ||
469 	       (bip->bli_flags & XFS_BLI_STALE));
470 
471 	trace_xfs_buf_item_pin(bip);
472 
473 	atomic_inc(&bip->bli_refcount);
474 	atomic_inc(&bip->bli_buf->b_pin_count);
475 }
476 
477 /*
478  * This is called to unpin the buffer associated with the buf log item which
479  * was previously pinned with a call to xfs_buf_item_pin().
480  */
481 STATIC void
482 xfs_buf_item_unpin(
483 	struct xfs_log_item	*lip,
484 	int			remove)
485 {
486 	struct xfs_buf_log_item	*bip = BUF_ITEM(lip);
487 	struct xfs_buf		*bp = bip->bli_buf;
488 	int			stale = bip->bli_flags & XFS_BLI_STALE;
489 	int			freed;
490 
491 	ASSERT(bp->b_log_item == bip);
492 	ASSERT(atomic_read(&bip->bli_refcount) > 0);
493 
494 	trace_xfs_buf_item_unpin(bip);
495 
496 	/*
497 	 * Drop the bli ref associated with the pin and grab the hold required
498 	 * for the I/O simulation failure in the abort case. We have to do this
499 	 * before the pin count drops because the AIL doesn't acquire a bli
500 	 * reference. Therefore if the refcount drops to zero, the bli could
501 	 * still be AIL resident and the buffer submitted for I/O (and freed on
502 	 * completion) at any point before we return. This can be removed once
503 	 * the AIL properly holds a reference on the bli.
504 	 */
505 	freed = atomic_dec_and_test(&bip->bli_refcount);
506 	if (freed && !stale && remove)
507 		xfs_buf_hold(bp);
508 	if (atomic_dec_and_test(&bp->b_pin_count))
509 		wake_up_all(&bp->b_waiters);
510 
511 	 /* nothing to do but drop the pin count if the bli is active */
512 	if (!freed)
513 		return;
514 
515 	if (stale) {
516 		ASSERT(bip->bli_flags & XFS_BLI_STALE);
517 		ASSERT(xfs_buf_islocked(bp));
518 		ASSERT(bp->b_flags & XBF_STALE);
519 		ASSERT(bip->__bli_format.blf_flags & XFS_BLF_CANCEL);
520 		ASSERT(list_empty(&lip->li_trans));
521 		ASSERT(!bp->b_transp);
522 
523 		trace_xfs_buf_item_unpin_stale(bip);
524 
525 		/*
526 		 * If we get called here because of an IO error, we may or may
527 		 * not have the item on the AIL. xfs_trans_ail_delete() will
528 		 * take care of that situation. xfs_trans_ail_delete() drops
529 		 * the AIL lock.
530 		 */
531 		if (bip->bli_flags & XFS_BLI_STALE_INODE) {
532 			xfs_buf_item_done(bp);
533 			xfs_buf_inode_iodone(bp);
534 			ASSERT(list_empty(&bp->b_li_list));
535 		} else {
536 			xfs_trans_ail_delete(lip, SHUTDOWN_LOG_IO_ERROR);
537 			xfs_buf_item_relse(bp);
538 			ASSERT(bp->b_log_item == NULL);
539 		}
540 		xfs_buf_relse(bp);
541 	} else if (remove) {
542 		/*
543 		 * The buffer must be locked and held by the caller to simulate
544 		 * an async I/O failure. We acquired the hold for this case
545 		 * before the buffer was unpinned.
546 		 */
547 		xfs_buf_lock(bp);
548 		bp->b_flags |= XBF_ASYNC;
549 		xfs_buf_ioend_fail(bp);
550 	}
551 }
552 
553 STATIC uint
554 xfs_buf_item_push(
555 	struct xfs_log_item	*lip,
556 	struct list_head	*buffer_list)
557 {
558 	struct xfs_buf_log_item	*bip = BUF_ITEM(lip);
559 	struct xfs_buf		*bp = bip->bli_buf;
560 	uint			rval = XFS_ITEM_SUCCESS;
561 
562 	if (xfs_buf_ispinned(bp))
563 		return XFS_ITEM_PINNED;
564 	if (!xfs_buf_trylock(bp)) {
565 		/*
566 		 * If we have just raced with a buffer being pinned and it has
567 		 * been marked stale, we could end up stalling until someone else
568 		 * issues a log force to unpin the stale buffer. Check for the
569 		 * race condition here so xfsaild recognizes the buffer is pinned
570 		 * and queues a log force to move it along.
571 		 */
572 		if (xfs_buf_ispinned(bp))
573 			return XFS_ITEM_PINNED;
574 		return XFS_ITEM_LOCKED;
575 	}
576 
577 	ASSERT(!(bip->bli_flags & XFS_BLI_STALE));
578 
579 	trace_xfs_buf_item_push(bip);
580 
581 	/* has a previous flush failed due to IO errors? */
582 	if (bp->b_flags & XBF_WRITE_FAIL) {
583 		xfs_buf_alert_ratelimited(bp, "XFS: Failing async write",
584 	    "Failing async write on buffer block 0x%llx. Retrying async write.",
585 					  (long long)xfs_buf_daddr(bp));
586 	}
587 
588 	if (!xfs_buf_delwri_queue(bp, buffer_list))
589 		rval = XFS_ITEM_FLUSHING;
590 	xfs_buf_unlock(bp);
591 	return rval;
592 }
593 
594 /*
595  * Drop the buffer log item refcount and take appropriate action. This helper
596  * determines whether the bli must be freed or not, since a decrement to zero
597  * does not necessarily mean the bli is unused.
598  *
599  * Return true if the bli is freed, false otherwise.
600  */
601 bool
602 xfs_buf_item_put(
603 	struct xfs_buf_log_item	*bip)
604 {
605 	struct xfs_log_item	*lip = &bip->bli_item;
606 	bool			aborted;
607 	bool			dirty;
608 
609 	/* drop the bli ref and return if it wasn't the last one */
610 	if (!atomic_dec_and_test(&bip->bli_refcount))
611 		return false;
612 
613 	/*
614 	 * We dropped the last ref and must free the item if clean or aborted.
615 	 * If the bli is dirty and non-aborted, the buffer was clean in the
616 	 * transaction but still awaiting writeback from previous changes. In
617 	 * that case, the bli is freed on buffer writeback completion.
618 	 */
619 	aborted = test_bit(XFS_LI_ABORTED, &lip->li_flags) ||
620 			xlog_is_shutdown(lip->li_log);
621 	dirty = bip->bli_flags & XFS_BLI_DIRTY;
622 	if (dirty && !aborted)
623 		return false;
624 
625 	/*
626 	 * The bli is aborted or clean. An aborted item may be in the AIL
627 	 * regardless of dirty state.  For example, consider an aborted
628 	 * transaction that invalidated a dirty bli and cleared the dirty
629 	 * state.
630 	 */
631 	if (aborted)
632 		xfs_trans_ail_delete(lip, 0);
633 	xfs_buf_item_relse(bip->bli_buf);
634 	return true;
635 }
636 
637 /*
638  * Release the buffer associated with the buf log item.  If there is no dirty
639  * logged data associated with the buffer recorded in the buf log item, then
640  * free the buf log item and remove the reference to it in the buffer.
641  *
642  * This call ignores the recursion count.  It is only called when the buffer
643  * should REALLY be unlocked, regardless of the recursion count.
644  *
645  * We unconditionally drop the transaction's reference to the log item. If the
646  * item was logged, then another reference was taken when it was pinned, so we
647  * can safely drop the transaction reference now.  This also allows us to avoid
648  * potential races with the unpin code freeing the bli by not referencing the
649  * bli after we've dropped the reference count.
650  *
651  * If the XFS_BLI_HOLD flag is set in the buf log item, then free the log item
652  * if necessary but do not unlock the buffer.  This is for support of
653  * xfs_trans_bhold(). Make sure the XFS_BLI_HOLD field is cleared if we don't
654  * free the item.
655  */
656 STATIC void
657 xfs_buf_item_release(
658 	struct xfs_log_item	*lip)
659 {
660 	struct xfs_buf_log_item	*bip = BUF_ITEM(lip);
661 	struct xfs_buf		*bp = bip->bli_buf;
662 	bool			released;
663 	bool			hold = bip->bli_flags & XFS_BLI_HOLD;
664 	bool			stale = bip->bli_flags & XFS_BLI_STALE;
665 #if defined(DEBUG) || defined(XFS_WARN)
666 	bool			ordered = bip->bli_flags & XFS_BLI_ORDERED;
667 	bool			dirty = bip->bli_flags & XFS_BLI_DIRTY;
668 	bool			aborted = test_bit(XFS_LI_ABORTED,
669 						   &lip->li_flags);
670 #endif
671 
672 	trace_xfs_buf_item_release(bip);
673 
674 	/*
675 	 * The bli dirty state should match whether the blf has logged segments
676 	 * except for ordered buffers, where only the bli should be dirty.
677 	 */
678 	ASSERT((!ordered && dirty == xfs_buf_item_dirty_format(bip)) ||
679 	       (ordered && dirty && !xfs_buf_item_dirty_format(bip)));
680 	ASSERT(!stale || (bip->__bli_format.blf_flags & XFS_BLF_CANCEL));
681 
682 	/*
683 	 * Clear the buffer's association with this transaction and
684 	 * per-transaction state from the bli, which has been copied above.
685 	 */
686 	bp->b_transp = NULL;
687 	bip->bli_flags &= ~(XFS_BLI_LOGGED | XFS_BLI_HOLD | XFS_BLI_ORDERED);
688 
689 	/*
690 	 * Unref the item and unlock the buffer unless held or stale. Stale
691 	 * buffers remain locked until final unpin unless the bli is freed by
692 	 * the unref call. The latter implies shutdown because buffer
693 	 * invalidation dirties the bli and transaction.
694 	 */
695 	released = xfs_buf_item_put(bip);
696 	if (hold || (stale && !released))
697 		return;
698 	ASSERT(!stale || aborted);
699 	xfs_buf_relse(bp);
700 }
701 
702 STATIC void
703 xfs_buf_item_committing(
704 	struct xfs_log_item	*lip,
705 	xfs_csn_t		seq)
706 {
707 	return xfs_buf_item_release(lip);
708 }
709 
710 /*
711  * This is called to find out where the oldest active copy of the
712  * buf log item in the on disk log resides now that the last log
713  * write of it completed at the given lsn.
714  * We always re-log all the dirty data in a buffer, so usually the
715  * latest copy in the on disk log is the only one that matters.  For
716  * those cases we simply return the given lsn.
717  *
718  * The one exception to this is for buffers full of newly allocated
719  * inodes.  These buffers are only relogged with the XFS_BLI_INODE_BUF
720  * flag set, indicating that only the di_next_unlinked fields from the
721  * inodes in the buffers will be replayed during recovery.  If the
722  * original newly allocated inode images have not yet been flushed
723  * when the buffer is so relogged, then we need to make sure that we
724  * keep the old images in the 'active' portion of the log.  We do this
725  * by returning the original lsn of that transaction here rather than
726  * the current one.
727  */
728 STATIC xfs_lsn_t
729 xfs_buf_item_committed(
730 	struct xfs_log_item	*lip,
731 	xfs_lsn_t		lsn)
732 {
733 	struct xfs_buf_log_item	*bip = BUF_ITEM(lip);
734 
735 	trace_xfs_buf_item_committed(bip);
736 
737 	if ((bip->bli_flags & XFS_BLI_INODE_ALLOC_BUF) && lip->li_lsn != 0)
738 		return lip->li_lsn;
739 	return lsn;
740 }
741 
742 static const struct xfs_item_ops xfs_buf_item_ops = {
743 	.iop_size	= xfs_buf_item_size,
744 	.iop_format	= xfs_buf_item_format,
745 	.iop_pin	= xfs_buf_item_pin,
746 	.iop_unpin	= xfs_buf_item_unpin,
747 	.iop_release	= xfs_buf_item_release,
748 	.iop_committing	= xfs_buf_item_committing,
749 	.iop_committed	= xfs_buf_item_committed,
750 	.iop_push	= xfs_buf_item_push,
751 };
752 
753 STATIC void
754 xfs_buf_item_get_format(
755 	struct xfs_buf_log_item	*bip,
756 	int			count)
757 {
758 	ASSERT(bip->bli_formats == NULL);
759 	bip->bli_format_count = count;
760 
761 	if (count == 1) {
762 		bip->bli_formats = &bip->__bli_format;
763 		return;
764 	}
765 
766 	bip->bli_formats = kmem_zalloc(count * sizeof(struct xfs_buf_log_format),
767 				0);
768 }
769 
770 STATIC void
771 xfs_buf_item_free_format(
772 	struct xfs_buf_log_item	*bip)
773 {
774 	if (bip->bli_formats != &bip->__bli_format) {
775 		kmem_free(bip->bli_formats);
776 		bip->bli_formats = NULL;
777 	}
778 }
779 
780 /*
781  * Allocate a new buf log item to go with the given buffer.
782  * Set the buffer's b_log_item field to point to the new
783  * buf log item.
784  */
785 int
786 xfs_buf_item_init(
787 	struct xfs_buf	*bp,
788 	struct xfs_mount *mp)
789 {
790 	struct xfs_buf_log_item	*bip = bp->b_log_item;
791 	int			chunks;
792 	int			map_size;
793 	int			i;
794 
795 	/*
796 	 * Check to see if there is already a buf log item for
797 	 * this buffer. If we do already have one, there is
798 	 * nothing to do here so return.
799 	 */
800 	ASSERT(bp->b_mount == mp);
801 	if (bip) {
802 		ASSERT(bip->bli_item.li_type == XFS_LI_BUF);
803 		ASSERT(!bp->b_transp);
804 		ASSERT(bip->bli_buf == bp);
805 		return 0;
806 	}
807 
808 	bip = kmem_cache_zalloc(xfs_buf_item_cache, GFP_KERNEL | __GFP_NOFAIL);
809 	xfs_log_item_init(mp, &bip->bli_item, XFS_LI_BUF, &xfs_buf_item_ops);
810 	bip->bli_buf = bp;
811 
812 	/*
813 	 * chunks is the number of XFS_BLF_CHUNK size pieces the buffer
814 	 * can be divided into. Make sure not to truncate any pieces.
815 	 * map_size is the size of the bitmap needed to describe the
816 	 * chunks of the buffer.
817 	 *
818 	 * Discontiguous buffer support follows the layout of the underlying
819 	 * buffer. This makes the implementation as simple as possible.
820 	 */
821 	xfs_buf_item_get_format(bip, bp->b_map_count);
822 
823 	for (i = 0; i < bip->bli_format_count; i++) {
824 		chunks = DIV_ROUND_UP(BBTOB(bp->b_maps[i].bm_len),
825 				      XFS_BLF_CHUNK);
826 		map_size = DIV_ROUND_UP(chunks, NBWORD);
827 
828 		if (map_size > XFS_BLF_DATAMAP_SIZE) {
829 			kmem_cache_free(xfs_buf_item_cache, bip);
830 			xfs_err(mp,
831 	"buffer item dirty bitmap (%u uints) too small to reflect %u bytes!",
832 					map_size,
833 					BBTOB(bp->b_maps[i].bm_len));
834 			return -EFSCORRUPTED;
835 		}
836 
837 		bip->bli_formats[i].blf_type = XFS_LI_BUF;
838 		bip->bli_formats[i].blf_blkno = bp->b_maps[i].bm_bn;
839 		bip->bli_formats[i].blf_len = bp->b_maps[i].bm_len;
840 		bip->bli_formats[i].blf_map_size = map_size;
841 	}
842 
843 	bp->b_log_item = bip;
844 	xfs_buf_hold(bp);
845 	return 0;
846 }
847 
848 
849 /*
850  * Mark bytes first through last inclusive as dirty in the buf
851  * item's bitmap.
852  */
853 static void
854 xfs_buf_item_log_segment(
855 	uint			first,
856 	uint			last,
857 	uint			*map)
858 {
859 	uint		first_bit;
860 	uint		last_bit;
861 	uint		bits_to_set;
862 	uint		bits_set;
863 	uint		word_num;
864 	uint		*wordp;
865 	uint		bit;
866 	uint		end_bit;
867 	uint		mask;
868 
869 	ASSERT(first < XFS_BLF_DATAMAP_SIZE * XFS_BLF_CHUNK * NBWORD);
870 	ASSERT(last < XFS_BLF_DATAMAP_SIZE * XFS_BLF_CHUNK * NBWORD);
871 
872 	/*
873 	 * Convert byte offsets to bit numbers.
874 	 */
875 	first_bit = first >> XFS_BLF_SHIFT;
876 	last_bit = last >> XFS_BLF_SHIFT;
877 
878 	/*
879 	 * Calculate the total number of bits to be set.
880 	 */
881 	bits_to_set = last_bit - first_bit + 1;
882 
883 	/*
884 	 * Get a pointer to the first word in the bitmap
885 	 * to set a bit in.
886 	 */
887 	word_num = first_bit >> BIT_TO_WORD_SHIFT;
888 	wordp = &map[word_num];
889 
890 	/*
891 	 * Calculate the starting bit in the first word.
892 	 */
893 	bit = first_bit & (uint)(NBWORD - 1);
894 
895 	/*
896 	 * First set any bits in the first word of our range.
897 	 * If it starts at bit 0 of the word, it will be
898 	 * set below rather than here.  That is what the variable
899 	 * bit tells us. The variable bits_set tracks the number
900 	 * of bits that have been set so far.  End_bit is the number
901 	 * of the last bit to be set in this word plus one.
902 	 */
903 	if (bit) {
904 		end_bit = min(bit + bits_to_set, (uint)NBWORD);
905 		mask = ((1U << (end_bit - bit)) - 1) << bit;
906 		*wordp |= mask;
907 		wordp++;
908 		bits_set = end_bit - bit;
909 	} else {
910 		bits_set = 0;
911 	}
912 
913 	/*
914 	 * Now set bits a whole word at a time that are between
915 	 * first_bit and last_bit.
916 	 */
917 	while ((bits_to_set - bits_set) >= NBWORD) {
918 		*wordp = 0xffffffff;
919 		bits_set += NBWORD;
920 		wordp++;
921 	}
922 
923 	/*
924 	 * Finally, set any bits left to be set in one last partial word.
925 	 */
926 	end_bit = bits_to_set - bits_set;
927 	if (end_bit) {
928 		mask = (1U << end_bit) - 1;
929 		*wordp |= mask;
930 	}
931 }
932 
933 /*
934  * Mark bytes first through last inclusive as dirty in the buf
935  * item's bitmap.
936  */
937 void
938 xfs_buf_item_log(
939 	struct xfs_buf_log_item	*bip,
940 	uint			first,
941 	uint			last)
942 {
943 	int			i;
944 	uint			start;
945 	uint			end;
946 	struct xfs_buf		*bp = bip->bli_buf;
947 
948 	/*
949 	 * walk each buffer segment and mark them dirty appropriately.
950 	 */
951 	start = 0;
952 	for (i = 0; i < bip->bli_format_count; i++) {
953 		if (start > last)
954 			break;
955 		end = start + BBTOB(bp->b_maps[i].bm_len) - 1;
956 
957 		/* skip to the map that includes the first byte to log */
958 		if (first > end) {
959 			start += BBTOB(bp->b_maps[i].bm_len);
960 			continue;
961 		}
962 
963 		/*
964 		 * Trim the range to this segment and mark it in the bitmap.
965 		 * Note that we must convert buffer offsets to segment relative
966 		 * offsets (e.g., the first byte of each segment is byte 0 of
967 		 * that segment).
968 		 */
969 		if (first < start)
970 			first = start;
971 		if (end > last)
972 			end = last;
973 		xfs_buf_item_log_segment(first - start, end - start,
974 					 &bip->bli_formats[i].blf_data_map[0]);
975 
976 		start += BBTOB(bp->b_maps[i].bm_len);
977 	}
978 }
979 
980 
981 /*
982  * Return true if the buffer has any ranges logged/dirtied by a transaction,
983  * false otherwise.
984  */
985 bool
986 xfs_buf_item_dirty_format(
987 	struct xfs_buf_log_item	*bip)
988 {
989 	int			i;
990 
991 	for (i = 0; i < bip->bli_format_count; i++) {
992 		if (!xfs_bitmap_empty(bip->bli_formats[i].blf_data_map,
993 			     bip->bli_formats[i].blf_map_size))
994 			return true;
995 	}
996 
997 	return false;
998 }
999 
1000 STATIC void
1001 xfs_buf_item_free(
1002 	struct xfs_buf_log_item	*bip)
1003 {
1004 	xfs_buf_item_free_format(bip);
1005 	kmem_free(bip->bli_item.li_lv_shadow);
1006 	kmem_cache_free(xfs_buf_item_cache, bip);
1007 }
1008 
1009 /*
1010  * xfs_buf_item_relse() is called when the buf log item is no longer needed.
1011  */
1012 void
1013 xfs_buf_item_relse(
1014 	struct xfs_buf	*bp)
1015 {
1016 	struct xfs_buf_log_item	*bip = bp->b_log_item;
1017 
1018 	trace_xfs_buf_item_relse(bp, _RET_IP_);
1019 	ASSERT(!test_bit(XFS_LI_IN_AIL, &bip->bli_item.li_flags));
1020 
1021 	bp->b_log_item = NULL;
1022 	xfs_buf_rele(bp);
1023 	xfs_buf_item_free(bip);
1024 }
1025 
1026 void
1027 xfs_buf_item_done(
1028 	struct xfs_buf		*bp)
1029 {
1030 	/*
1031 	 * If we are forcibly shutting down, this may well be off the AIL
1032 	 * already. That's because we simulate the log-committed callbacks to
1033 	 * unpin these buffers. Or we may never have put this item on AIL
1034 	 * because of the transaction was aborted forcibly.
1035 	 * xfs_trans_ail_delete() takes care of these.
1036 	 *
1037 	 * Either way, AIL is useless if we're forcing a shutdown.
1038 	 *
1039 	 * Note that log recovery writes might have buffer items that are not on
1040 	 * the AIL even when the file system is not shut down.
1041 	 */
1042 	xfs_trans_ail_delete(&bp->b_log_item->bli_item,
1043 			     (bp->b_flags & _XBF_LOGRECOVERY) ? 0 :
1044 			     SHUTDOWN_CORRUPT_INCORE);
1045 	xfs_buf_item_relse(bp);
1046 }
1047