xref: /openbmc/linux/fs/xfs/xfs_trans_buf.c (revision 6396bb221514d2876fd6dc0aa2a1f240d99b37bb)
1 /*
2  * Copyright (c) 2000-2002,2005 Silicon Graphics, Inc.
3  * All Rights Reserved.
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU General Public License as
7  * published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it would be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write the Free Software Foundation,
16  * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
17  */
18 #include "xfs.h"
19 #include "xfs_fs.h"
20 #include "xfs_shared.h"
21 #include "xfs_format.h"
22 #include "xfs_log_format.h"
23 #include "xfs_trans_resv.h"
24 #include "xfs_mount.h"
25 #include "xfs_inode.h"
26 #include "xfs_trans.h"
27 #include "xfs_buf_item.h"
28 #include "xfs_trans_priv.h"
29 #include "xfs_error.h"
30 #include "xfs_trace.h"
31 
32 /*
33  * Check to see if a buffer matching the given parameters is already
34  * a part of the given transaction.
35  */
36 STATIC struct xfs_buf *
37 xfs_trans_buf_item_match(
38 	struct xfs_trans	*tp,
39 	struct xfs_buftarg	*target,
40 	struct xfs_buf_map	*map,
41 	int			nmaps)
42 {
43 	struct xfs_log_item	*lip;
44 	struct xfs_buf_log_item	*blip;
45 	int			len = 0;
46 	int			i;
47 
48 	for (i = 0; i < nmaps; i++)
49 		len += map[i].bm_len;
50 
51 	list_for_each_entry(lip, &tp->t_items, li_trans) {
52 		blip = (struct xfs_buf_log_item *)lip;
53 		if (blip->bli_item.li_type == XFS_LI_BUF &&
54 		    blip->bli_buf->b_target == target &&
55 		    XFS_BUF_ADDR(blip->bli_buf) == map[0].bm_bn &&
56 		    blip->bli_buf->b_length == len) {
57 			ASSERT(blip->bli_buf->b_map_count == nmaps);
58 			return blip->bli_buf;
59 		}
60 	}
61 
62 	return NULL;
63 }
64 
65 /*
66  * Add the locked buffer to the transaction.
67  *
68  * The buffer must be locked, and it cannot be associated with any
69  * transaction.
70  *
71  * If the buffer does not yet have a buf log item associated with it,
72  * then allocate one for it.  Then add the buf item to the transaction.
73  */
74 STATIC void
75 _xfs_trans_bjoin(
76 	struct xfs_trans	*tp,
77 	struct xfs_buf		*bp,
78 	int			reset_recur)
79 {
80 	struct xfs_buf_log_item	*bip;
81 
82 	ASSERT(bp->b_transp == NULL);
83 
84 	/*
85 	 * The xfs_buf_log_item pointer is stored in b_log_item.  If
86 	 * it doesn't have one yet, then allocate one and initialize it.
87 	 * The checks to see if one is there are in xfs_buf_item_init().
88 	 */
89 	xfs_buf_item_init(bp, tp->t_mountp);
90 	bip = bp->b_log_item;
91 	ASSERT(!(bip->bli_flags & XFS_BLI_STALE));
92 	ASSERT(!(bip->__bli_format.blf_flags & XFS_BLF_CANCEL));
93 	ASSERT(!(bip->bli_flags & XFS_BLI_LOGGED));
94 	if (reset_recur)
95 		bip->bli_recur = 0;
96 
97 	/*
98 	 * Take a reference for this transaction on the buf item.
99 	 */
100 	atomic_inc(&bip->bli_refcount);
101 
102 	/*
103 	 * Attach the item to the transaction so we can find it in
104 	 * xfs_trans_get_buf() and friends.
105 	 */
106 	xfs_trans_add_item(tp, &bip->bli_item);
107 	bp->b_transp = tp;
108 
109 }
110 
111 void
112 xfs_trans_bjoin(
113 	struct xfs_trans	*tp,
114 	struct xfs_buf		*bp)
115 {
116 	_xfs_trans_bjoin(tp, bp, 0);
117 	trace_xfs_trans_bjoin(bp->b_log_item);
118 }
119 
120 /*
121  * Get and lock the buffer for the caller if it is not already
122  * locked within the given transaction.  If it is already locked
123  * within the transaction, just increment its lock recursion count
124  * and return a pointer to it.
125  *
126  * If the transaction pointer is NULL, make this just a normal
127  * get_buf() call.
128  */
129 struct xfs_buf *
130 xfs_trans_get_buf_map(
131 	struct xfs_trans	*tp,
132 	struct xfs_buftarg	*target,
133 	struct xfs_buf_map	*map,
134 	int			nmaps,
135 	xfs_buf_flags_t		flags)
136 {
137 	xfs_buf_t		*bp;
138 	struct xfs_buf_log_item	*bip;
139 
140 	if (!tp)
141 		return xfs_buf_get_map(target, map, nmaps, flags);
142 
143 	/*
144 	 * If we find the buffer in the cache with this transaction
145 	 * pointer in its b_fsprivate2 field, then we know we already
146 	 * have it locked.  In this case we just increment the lock
147 	 * recursion count and return the buffer to the caller.
148 	 */
149 	bp = xfs_trans_buf_item_match(tp, target, map, nmaps);
150 	if (bp != NULL) {
151 		ASSERT(xfs_buf_islocked(bp));
152 		if (XFS_FORCED_SHUTDOWN(tp->t_mountp)) {
153 			xfs_buf_stale(bp);
154 			bp->b_flags |= XBF_DONE;
155 		}
156 
157 		ASSERT(bp->b_transp == tp);
158 		bip = bp->b_log_item;
159 		ASSERT(bip != NULL);
160 		ASSERT(atomic_read(&bip->bli_refcount) > 0);
161 		bip->bli_recur++;
162 		trace_xfs_trans_get_buf_recur(bip);
163 		return bp;
164 	}
165 
166 	bp = xfs_buf_get_map(target, map, nmaps, flags);
167 	if (bp == NULL) {
168 		return NULL;
169 	}
170 
171 	ASSERT(!bp->b_error);
172 
173 	_xfs_trans_bjoin(tp, bp, 1);
174 	trace_xfs_trans_get_buf(bp->b_log_item);
175 	return bp;
176 }
177 
178 /*
179  * Get and lock the superblock buffer of this file system for the
180  * given transaction.
181  *
182  * We don't need to use incore_match() here, because the superblock
183  * buffer is a private buffer which we keep a pointer to in the
184  * mount structure.
185  */
186 xfs_buf_t *
187 xfs_trans_getsb(
188 	xfs_trans_t		*tp,
189 	struct xfs_mount	*mp,
190 	int			flags)
191 {
192 	xfs_buf_t		*bp;
193 	struct xfs_buf_log_item	*bip;
194 
195 	/*
196 	 * Default to just trying to lock the superblock buffer
197 	 * if tp is NULL.
198 	 */
199 	if (tp == NULL)
200 		return xfs_getsb(mp, flags);
201 
202 	/*
203 	 * If the superblock buffer already has this transaction
204 	 * pointer in its b_fsprivate2 field, then we know we already
205 	 * have it locked.  In this case we just increment the lock
206 	 * recursion count and return the buffer to the caller.
207 	 */
208 	bp = mp->m_sb_bp;
209 	if (bp->b_transp == tp) {
210 		bip = bp->b_log_item;
211 		ASSERT(bip != NULL);
212 		ASSERT(atomic_read(&bip->bli_refcount) > 0);
213 		bip->bli_recur++;
214 		trace_xfs_trans_getsb_recur(bip);
215 		return bp;
216 	}
217 
218 	bp = xfs_getsb(mp, flags);
219 	if (bp == NULL)
220 		return NULL;
221 
222 	_xfs_trans_bjoin(tp, bp, 1);
223 	trace_xfs_trans_getsb(bp->b_log_item);
224 	return bp;
225 }
226 
227 /*
228  * Get and lock the buffer for the caller if it is not already
229  * locked within the given transaction.  If it has not yet been
230  * read in, read it from disk. If it is already locked
231  * within the transaction and already read in, just increment its
232  * lock recursion count and return a pointer to it.
233  *
234  * If the transaction pointer is NULL, make this just a normal
235  * read_buf() call.
236  */
237 int
238 xfs_trans_read_buf_map(
239 	struct xfs_mount	*mp,
240 	struct xfs_trans	*tp,
241 	struct xfs_buftarg	*target,
242 	struct xfs_buf_map	*map,
243 	int			nmaps,
244 	xfs_buf_flags_t		flags,
245 	struct xfs_buf		**bpp,
246 	const struct xfs_buf_ops *ops)
247 {
248 	struct xfs_buf		*bp = NULL;
249 	struct xfs_buf_log_item	*bip;
250 	int			error;
251 
252 	*bpp = NULL;
253 	/*
254 	 * If we find the buffer in the cache with this transaction
255 	 * pointer in its b_fsprivate2 field, then we know we already
256 	 * have it locked.  If it is already read in we just increment
257 	 * the lock recursion count and return the buffer to the caller.
258 	 * If the buffer is not yet read in, then we read it in, increment
259 	 * the lock recursion count, and return it to the caller.
260 	 */
261 	if (tp)
262 		bp = xfs_trans_buf_item_match(tp, target, map, nmaps);
263 	if (bp) {
264 		ASSERT(xfs_buf_islocked(bp));
265 		ASSERT(bp->b_transp == tp);
266 		ASSERT(bp->b_log_item != NULL);
267 		ASSERT(!bp->b_error);
268 		ASSERT(bp->b_flags & XBF_DONE);
269 
270 		/*
271 		 * We never locked this buf ourselves, so we shouldn't
272 		 * brelse it either. Just get out.
273 		 */
274 		if (XFS_FORCED_SHUTDOWN(mp)) {
275 			trace_xfs_trans_read_buf_shut(bp, _RET_IP_);
276 			return -EIO;
277 		}
278 
279 		bip = bp->b_log_item;
280 		bip->bli_recur++;
281 
282 		ASSERT(atomic_read(&bip->bli_refcount) > 0);
283 		trace_xfs_trans_read_buf_recur(bip);
284 		*bpp = bp;
285 		return 0;
286 	}
287 
288 	bp = xfs_buf_read_map(target, map, nmaps, flags, ops);
289 	if (!bp) {
290 		if (!(flags & XBF_TRYLOCK))
291 			return -ENOMEM;
292 		return tp ? 0 : -EAGAIN;
293 	}
294 
295 	/*
296 	 * If we've had a read error, then the contents of the buffer are
297 	 * invalid and should not be used. To ensure that a followup read tries
298 	 * to pull the buffer from disk again, we clear the XBF_DONE flag and
299 	 * mark the buffer stale. This ensures that anyone who has a current
300 	 * reference to the buffer will interpret it's contents correctly and
301 	 * future cache lookups will also treat it as an empty, uninitialised
302 	 * buffer.
303 	 */
304 	if (bp->b_error) {
305 		error = bp->b_error;
306 		if (!XFS_FORCED_SHUTDOWN(mp))
307 			xfs_buf_ioerror_alert(bp, __func__);
308 		bp->b_flags &= ~XBF_DONE;
309 		xfs_buf_stale(bp);
310 
311 		if (tp && (tp->t_flags & XFS_TRANS_DIRTY))
312 			xfs_force_shutdown(tp->t_mountp, SHUTDOWN_META_IO_ERROR);
313 		xfs_buf_relse(bp);
314 
315 		/* bad CRC means corrupted metadata */
316 		if (error == -EFSBADCRC)
317 			error = -EFSCORRUPTED;
318 		return error;
319 	}
320 
321 	if (XFS_FORCED_SHUTDOWN(mp)) {
322 		xfs_buf_relse(bp);
323 		trace_xfs_trans_read_buf_shut(bp, _RET_IP_);
324 		return -EIO;
325 	}
326 
327 	if (tp) {
328 		_xfs_trans_bjoin(tp, bp, 1);
329 		trace_xfs_trans_read_buf(bp->b_log_item);
330 	}
331 	*bpp = bp;
332 	return 0;
333 
334 }
335 
336 /*
337  * Release the buffer bp which was previously acquired with one of the
338  * xfs_trans_... buffer allocation routines if the buffer has not
339  * been modified within this transaction.  If the buffer is modified
340  * within this transaction, do decrement the recursion count but do
341  * not release the buffer even if the count goes to 0.  If the buffer is not
342  * modified within the transaction, decrement the recursion count and
343  * release the buffer if the recursion count goes to 0.
344  *
345  * If the buffer is to be released and it was not modified before
346  * this transaction began, then free the buf_log_item associated with it.
347  *
348  * If the transaction pointer is NULL, make this just a normal
349  * brelse() call.
350  */
351 void
352 xfs_trans_brelse(
353 	xfs_trans_t		*tp,
354 	xfs_buf_t		*bp)
355 {
356 	struct xfs_buf_log_item	*bip;
357 	int			freed;
358 
359 	/*
360 	 * Default to a normal brelse() call if the tp is NULL.
361 	 */
362 	if (tp == NULL) {
363 		ASSERT(bp->b_transp == NULL);
364 		xfs_buf_relse(bp);
365 		return;
366 	}
367 
368 	ASSERT(bp->b_transp == tp);
369 	bip = bp->b_log_item;
370 	ASSERT(bip->bli_item.li_type == XFS_LI_BUF);
371 	ASSERT(!(bip->bli_flags & XFS_BLI_STALE));
372 	ASSERT(!(bip->__bli_format.blf_flags & XFS_BLF_CANCEL));
373 	ASSERT(atomic_read(&bip->bli_refcount) > 0);
374 
375 	trace_xfs_trans_brelse(bip);
376 
377 	/*
378 	 * If the release is just for a recursive lock,
379 	 * then decrement the count and return.
380 	 */
381 	if (bip->bli_recur > 0) {
382 		bip->bli_recur--;
383 		return;
384 	}
385 
386 	/*
387 	 * If the buffer is dirty within this transaction, we can't
388 	 * release it until we commit.
389 	 */
390 	if (test_bit(XFS_LI_DIRTY, &bip->bli_item.li_flags))
391 		return;
392 
393 	/*
394 	 * If the buffer has been invalidated, then we can't release
395 	 * it until the transaction commits to disk unless it is re-dirtied
396 	 * as part of this transaction.  This prevents us from pulling
397 	 * the item from the AIL before we should.
398 	 */
399 	if (bip->bli_flags & XFS_BLI_STALE)
400 		return;
401 
402 	ASSERT(!(bip->bli_flags & XFS_BLI_LOGGED));
403 
404 	/*
405 	 * Free up the log item descriptor tracking the released item.
406 	 */
407 	xfs_trans_del_item(&bip->bli_item);
408 
409 	/*
410 	 * Clear the hold flag in the buf log item if it is set.
411 	 * We wouldn't want the next user of the buffer to
412 	 * get confused.
413 	 */
414 	if (bip->bli_flags & XFS_BLI_HOLD) {
415 		bip->bli_flags &= ~XFS_BLI_HOLD;
416 	}
417 
418 	/*
419 	 * Drop our reference to the buf log item.
420 	 */
421 	freed = atomic_dec_and_test(&bip->bli_refcount);
422 
423 	/*
424 	 * If the buf item is not tracking data in the log, then we must free it
425 	 * before releasing the buffer back to the free pool.
426 	 *
427 	 * If the fs has shutdown and we dropped the last reference, it may fall
428 	 * on us to release a (possibly dirty) bli if it never made it to the
429 	 * AIL (e.g., the aborted unpin already happened and didn't release it
430 	 * due to our reference). Since we're already shutdown and need
431 	 * ail_lock, just force remove from the AIL and release the bli here.
432 	 */
433 	if (XFS_FORCED_SHUTDOWN(tp->t_mountp) && freed) {
434 		xfs_trans_ail_remove(&bip->bli_item, SHUTDOWN_LOG_IO_ERROR);
435 		xfs_buf_item_relse(bp);
436 	} else if (!(bip->bli_flags & XFS_BLI_DIRTY)) {
437 /***
438 		ASSERT(bp->b_pincount == 0);
439 ***/
440 		ASSERT(atomic_read(&bip->bli_refcount) == 0);
441 		ASSERT(!test_bit(XFS_LI_IN_AIL, &bip->bli_item.li_flags));
442 		ASSERT(!(bip->bli_flags & XFS_BLI_INODE_ALLOC_BUF));
443 		xfs_buf_item_relse(bp);
444 	}
445 
446 	bp->b_transp = NULL;
447 	xfs_buf_relse(bp);
448 }
449 
450 /*
451  * Mark the buffer as not needing to be unlocked when the buf item's
452  * iop_unlock() routine is called.  The buffer must already be locked
453  * and associated with the given transaction.
454  */
455 /* ARGSUSED */
456 void
457 xfs_trans_bhold(
458 	xfs_trans_t		*tp,
459 	xfs_buf_t		*bp)
460 {
461 	struct xfs_buf_log_item	*bip = bp->b_log_item;
462 
463 	ASSERT(bp->b_transp == tp);
464 	ASSERT(bip != NULL);
465 	ASSERT(!(bip->bli_flags & XFS_BLI_STALE));
466 	ASSERT(!(bip->__bli_format.blf_flags & XFS_BLF_CANCEL));
467 	ASSERT(atomic_read(&bip->bli_refcount) > 0);
468 
469 	bip->bli_flags |= XFS_BLI_HOLD;
470 	trace_xfs_trans_bhold(bip);
471 }
472 
473 /*
474  * Cancel the previous buffer hold request made on this buffer
475  * for this transaction.
476  */
477 void
478 xfs_trans_bhold_release(
479 	xfs_trans_t		*tp,
480 	xfs_buf_t		*bp)
481 {
482 	struct xfs_buf_log_item	*bip = bp->b_log_item;
483 
484 	ASSERT(bp->b_transp == tp);
485 	ASSERT(bip != NULL);
486 	ASSERT(!(bip->bli_flags & XFS_BLI_STALE));
487 	ASSERT(!(bip->__bli_format.blf_flags & XFS_BLF_CANCEL));
488 	ASSERT(atomic_read(&bip->bli_refcount) > 0);
489 	ASSERT(bip->bli_flags & XFS_BLI_HOLD);
490 
491 	bip->bli_flags &= ~XFS_BLI_HOLD;
492 	trace_xfs_trans_bhold_release(bip);
493 }
494 
495 /*
496  * Mark a buffer dirty in the transaction.
497  */
498 void
499 xfs_trans_dirty_buf(
500 	struct xfs_trans	*tp,
501 	struct xfs_buf		*bp)
502 {
503 	struct xfs_buf_log_item	*bip = bp->b_log_item;
504 
505 	ASSERT(bp->b_transp == tp);
506 	ASSERT(bip != NULL);
507 	ASSERT(bp->b_iodone == NULL ||
508 	       bp->b_iodone == xfs_buf_iodone_callbacks);
509 
510 	/*
511 	 * Mark the buffer as needing to be written out eventually,
512 	 * and set its iodone function to remove the buffer's buf log
513 	 * item from the AIL and free it when the buffer is flushed
514 	 * to disk.  See xfs_buf_attach_iodone() for more details
515 	 * on li_cb and xfs_buf_iodone_callbacks().
516 	 * If we end up aborting this transaction, we trap this buffer
517 	 * inside the b_bdstrat callback so that this won't get written to
518 	 * disk.
519 	 */
520 	bp->b_flags |= XBF_DONE;
521 
522 	ASSERT(atomic_read(&bip->bli_refcount) > 0);
523 	bp->b_iodone = xfs_buf_iodone_callbacks;
524 	bip->bli_item.li_cb = xfs_buf_iodone;
525 
526 	/*
527 	 * If we invalidated the buffer within this transaction, then
528 	 * cancel the invalidation now that we're dirtying the buffer
529 	 * again.  There are no races with the code in xfs_buf_item_unpin(),
530 	 * because we have a reference to the buffer this entire time.
531 	 */
532 	if (bip->bli_flags & XFS_BLI_STALE) {
533 		bip->bli_flags &= ~XFS_BLI_STALE;
534 		ASSERT(bp->b_flags & XBF_STALE);
535 		bp->b_flags &= ~XBF_STALE;
536 		bip->__bli_format.blf_flags &= ~XFS_BLF_CANCEL;
537 	}
538 	bip->bli_flags |= XFS_BLI_DIRTY | XFS_BLI_LOGGED;
539 
540 	tp->t_flags |= XFS_TRANS_DIRTY;
541 	set_bit(XFS_LI_DIRTY, &bip->bli_item.li_flags);
542 }
543 
544 /*
545  * This is called to mark bytes first through last inclusive of the given
546  * buffer as needing to be logged when the transaction is committed.
547  * The buffer must already be associated with the given transaction.
548  *
549  * First and last are numbers relative to the beginning of this buffer,
550  * so the first byte in the buffer is numbered 0 regardless of the
551  * value of b_blkno.
552  */
553 void
554 xfs_trans_log_buf(
555 	struct xfs_trans	*tp,
556 	struct xfs_buf		*bp,
557 	uint			first,
558 	uint			last)
559 {
560 	struct xfs_buf_log_item	*bip = bp->b_log_item;
561 
562 	ASSERT(first <= last && last < BBTOB(bp->b_length));
563 	ASSERT(!(bip->bli_flags & XFS_BLI_ORDERED));
564 
565 	xfs_trans_dirty_buf(tp, bp);
566 
567 	trace_xfs_trans_log_buf(bip);
568 	xfs_buf_item_log(bip, first, last);
569 }
570 
571 
572 /*
573  * Invalidate a buffer that is being used within a transaction.
574  *
575  * Typically this is because the blocks in the buffer are being freed, so we
576  * need to prevent it from being written out when we're done.  Allowing it
577  * to be written again might overwrite data in the free blocks if they are
578  * reallocated to a file.
579  *
580  * We prevent the buffer from being written out by marking it stale.  We can't
581  * get rid of the buf log item at this point because the buffer may still be
582  * pinned by another transaction.  If that is the case, then we'll wait until
583  * the buffer is committed to disk for the last time (we can tell by the ref
584  * count) and free it in xfs_buf_item_unpin().  Until that happens we will
585  * keep the buffer locked so that the buffer and buf log item are not reused.
586  *
587  * We also set the XFS_BLF_CANCEL flag in the buf log format structure and log
588  * the buf item.  This will be used at recovery time to determine that copies
589  * of the buffer in the log before this should not be replayed.
590  *
591  * We mark the item descriptor and the transaction dirty so that we'll hold
592  * the buffer until after the commit.
593  *
594  * Since we're invalidating the buffer, we also clear the state about which
595  * parts of the buffer have been logged.  We also clear the flag indicating
596  * that this is an inode buffer since the data in the buffer will no longer
597  * be valid.
598  *
599  * We set the stale bit in the buffer as well since we're getting rid of it.
600  */
601 void
602 xfs_trans_binval(
603 	xfs_trans_t		*tp,
604 	xfs_buf_t		*bp)
605 {
606 	struct xfs_buf_log_item	*bip = bp->b_log_item;
607 	int			i;
608 
609 	ASSERT(bp->b_transp == tp);
610 	ASSERT(bip != NULL);
611 	ASSERT(atomic_read(&bip->bli_refcount) > 0);
612 
613 	trace_xfs_trans_binval(bip);
614 
615 	if (bip->bli_flags & XFS_BLI_STALE) {
616 		/*
617 		 * If the buffer is already invalidated, then
618 		 * just return.
619 		 */
620 		ASSERT(bp->b_flags & XBF_STALE);
621 		ASSERT(!(bip->bli_flags & (XFS_BLI_LOGGED | XFS_BLI_DIRTY)));
622 		ASSERT(!(bip->__bli_format.blf_flags & XFS_BLF_INODE_BUF));
623 		ASSERT(!(bip->__bli_format.blf_flags & XFS_BLFT_MASK));
624 		ASSERT(bip->__bli_format.blf_flags & XFS_BLF_CANCEL);
625 		ASSERT(test_bit(XFS_LI_DIRTY, &bip->bli_item.li_flags));
626 		ASSERT(tp->t_flags & XFS_TRANS_DIRTY);
627 		return;
628 	}
629 
630 	xfs_buf_stale(bp);
631 
632 	bip->bli_flags |= XFS_BLI_STALE;
633 	bip->bli_flags &= ~(XFS_BLI_INODE_BUF | XFS_BLI_LOGGED | XFS_BLI_DIRTY);
634 	bip->__bli_format.blf_flags &= ~XFS_BLF_INODE_BUF;
635 	bip->__bli_format.blf_flags |= XFS_BLF_CANCEL;
636 	bip->__bli_format.blf_flags &= ~XFS_BLFT_MASK;
637 	for (i = 0; i < bip->bli_format_count; i++) {
638 		memset(bip->bli_formats[i].blf_data_map, 0,
639 		       (bip->bli_formats[i].blf_map_size * sizeof(uint)));
640 	}
641 	set_bit(XFS_LI_DIRTY, &bip->bli_item.li_flags);
642 	tp->t_flags |= XFS_TRANS_DIRTY;
643 }
644 
645 /*
646  * This call is used to indicate that the buffer contains on-disk inodes which
647  * must be handled specially during recovery.  They require special handling
648  * because only the di_next_unlinked from the inodes in the buffer should be
649  * recovered.  The rest of the data in the buffer is logged via the inodes
650  * themselves.
651  *
652  * All we do is set the XFS_BLI_INODE_BUF flag in the items flags so it can be
653  * transferred to the buffer's log format structure so that we'll know what to
654  * do at recovery time.
655  */
656 void
657 xfs_trans_inode_buf(
658 	xfs_trans_t		*tp,
659 	xfs_buf_t		*bp)
660 {
661 	struct xfs_buf_log_item	*bip = bp->b_log_item;
662 
663 	ASSERT(bp->b_transp == tp);
664 	ASSERT(bip != NULL);
665 	ASSERT(atomic_read(&bip->bli_refcount) > 0);
666 
667 	bip->bli_flags |= XFS_BLI_INODE_BUF;
668 	xfs_trans_buf_set_type(tp, bp, XFS_BLFT_DINO_BUF);
669 }
670 
671 /*
672  * This call is used to indicate that the buffer is going to
673  * be staled and was an inode buffer. This means it gets
674  * special processing during unpin - where any inodes
675  * associated with the buffer should be removed from ail.
676  * There is also special processing during recovery,
677  * any replay of the inodes in the buffer needs to be
678  * prevented as the buffer may have been reused.
679  */
680 void
681 xfs_trans_stale_inode_buf(
682 	xfs_trans_t		*tp,
683 	xfs_buf_t		*bp)
684 {
685 	struct xfs_buf_log_item	*bip = bp->b_log_item;
686 
687 	ASSERT(bp->b_transp == tp);
688 	ASSERT(bip != NULL);
689 	ASSERT(atomic_read(&bip->bli_refcount) > 0);
690 
691 	bip->bli_flags |= XFS_BLI_STALE_INODE;
692 	bip->bli_item.li_cb = xfs_buf_iodone;
693 	xfs_trans_buf_set_type(tp, bp, XFS_BLFT_DINO_BUF);
694 }
695 
696 /*
697  * Mark the buffer as being one which contains newly allocated
698  * inodes.  We need to make sure that even if this buffer is
699  * relogged as an 'inode buf' we still recover all of the inode
700  * images in the face of a crash.  This works in coordination with
701  * xfs_buf_item_committed() to ensure that the buffer remains in the
702  * AIL at its original location even after it has been relogged.
703  */
704 /* ARGSUSED */
705 void
706 xfs_trans_inode_alloc_buf(
707 	xfs_trans_t		*tp,
708 	xfs_buf_t		*bp)
709 {
710 	struct xfs_buf_log_item	*bip = bp->b_log_item;
711 
712 	ASSERT(bp->b_transp == tp);
713 	ASSERT(bip != NULL);
714 	ASSERT(atomic_read(&bip->bli_refcount) > 0);
715 
716 	bip->bli_flags |= XFS_BLI_INODE_ALLOC_BUF;
717 	xfs_trans_buf_set_type(tp, bp, XFS_BLFT_DINO_BUF);
718 }
719 
720 /*
721  * Mark the buffer as ordered for this transaction. This means that the contents
722  * of the buffer are not recorded in the transaction but it is tracked in the
723  * AIL as though it was. This allows us to record logical changes in
724  * transactions rather than the physical changes we make to the buffer without
725  * changing writeback ordering constraints of metadata buffers.
726  */
727 bool
728 xfs_trans_ordered_buf(
729 	struct xfs_trans	*tp,
730 	struct xfs_buf		*bp)
731 {
732 	struct xfs_buf_log_item	*bip = bp->b_log_item;
733 
734 	ASSERT(bp->b_transp == tp);
735 	ASSERT(bip != NULL);
736 	ASSERT(atomic_read(&bip->bli_refcount) > 0);
737 
738 	if (xfs_buf_item_dirty_format(bip))
739 		return false;
740 
741 	bip->bli_flags |= XFS_BLI_ORDERED;
742 	trace_xfs_buf_item_ordered(bip);
743 
744 	/*
745 	 * We don't log a dirty range of an ordered buffer but it still needs
746 	 * to be marked dirty and that it has been logged.
747 	 */
748 	xfs_trans_dirty_buf(tp, bp);
749 	return true;
750 }
751 
752 /*
753  * Set the type of the buffer for log recovery so that it can correctly identify
754  * and hence attach the correct buffer ops to the buffer after replay.
755  */
756 void
757 xfs_trans_buf_set_type(
758 	struct xfs_trans	*tp,
759 	struct xfs_buf		*bp,
760 	enum xfs_blft		type)
761 {
762 	struct xfs_buf_log_item	*bip = bp->b_log_item;
763 
764 	if (!tp)
765 		return;
766 
767 	ASSERT(bp->b_transp == tp);
768 	ASSERT(bip != NULL);
769 	ASSERT(atomic_read(&bip->bli_refcount) > 0);
770 
771 	xfs_blft_to_flags(&bip->__bli_format, type);
772 }
773 
774 void
775 xfs_trans_buf_copy_type(
776 	struct xfs_buf		*dst_bp,
777 	struct xfs_buf		*src_bp)
778 {
779 	struct xfs_buf_log_item	*sbip = src_bp->b_log_item;
780 	struct xfs_buf_log_item	*dbip = dst_bp->b_log_item;
781 	enum xfs_blft		type;
782 
783 	type = xfs_blft_from_flags(&sbip->__bli_format);
784 	xfs_blft_to_flags(&dbip->__bli_format, type);
785 }
786 
787 /*
788  * Similar to xfs_trans_inode_buf(), this marks the buffer as a cluster of
789  * dquots. However, unlike in inode buffer recovery, dquot buffers get
790  * recovered in their entirety. (Hence, no XFS_BLI_DQUOT_ALLOC_BUF flag).
791  * The only thing that makes dquot buffers different from regular
792  * buffers is that we must not replay dquot bufs when recovering
793  * if a _corresponding_ quotaoff has happened. We also have to distinguish
794  * between usr dquot bufs and grp dquot bufs, because usr and grp quotas
795  * can be turned off independently.
796  */
797 /* ARGSUSED */
798 void
799 xfs_trans_dquot_buf(
800 	xfs_trans_t		*tp,
801 	xfs_buf_t		*bp,
802 	uint			type)
803 {
804 	struct xfs_buf_log_item	*bip = bp->b_log_item;
805 
806 	ASSERT(type == XFS_BLF_UDQUOT_BUF ||
807 	       type == XFS_BLF_PDQUOT_BUF ||
808 	       type == XFS_BLF_GDQUOT_BUF);
809 
810 	bip->__bli_format.blf_flags |= type;
811 
812 	switch (type) {
813 	case XFS_BLF_UDQUOT_BUF:
814 		type = XFS_BLFT_UDQUOT_BUF;
815 		break;
816 	case XFS_BLF_PDQUOT_BUF:
817 		type = XFS_BLFT_PDQUOT_BUF;
818 		break;
819 	case XFS_BLF_GDQUOT_BUF:
820 		type = XFS_BLFT_GDQUOT_BUF;
821 		break;
822 	default:
823 		type = XFS_BLFT_UNKNOWN_BUF;
824 		break;
825 	}
826 
827 	xfs_trans_buf_set_type(tp, bp, type);
828 }
829