xref: /openbmc/linux/fs/xfs/xfs_buf.h (revision 8cf07f3d)
10b61f8a4SDave Chinner // SPDX-License-Identifier: GPL-2.0
2c59d87c4SChristoph Hellwig /*
3c59d87c4SChristoph Hellwig  * Copyright (c) 2000-2005 Silicon Graphics, Inc.
4c59d87c4SChristoph Hellwig  * All Rights Reserved.
5c59d87c4SChristoph Hellwig  */
6c59d87c4SChristoph Hellwig #ifndef __XFS_BUF_H__
7c59d87c4SChristoph Hellwig #define __XFS_BUF_H__
8c59d87c4SChristoph Hellwig 
9c59d87c4SChristoph Hellwig #include <linux/list.h>
10c59d87c4SChristoph Hellwig #include <linux/types.h>
11c59d87c4SChristoph Hellwig #include <linux/spinlock.h>
12c59d87c4SChristoph Hellwig #include <linux/mm.h>
13c59d87c4SChristoph Hellwig #include <linux/fs.h>
14c94c2acfSMatthew Wilcox #include <linux/dax.h>
15c59d87c4SChristoph Hellwig #include <linux/uio.h>
16e80dfa19SDave Chinner #include <linux/list_lru.h>
17c59d87c4SChristoph Hellwig 
18c59d87c4SChristoph Hellwig /*
19c59d87c4SChristoph Hellwig  *	Base types
20c59d87c4SChristoph Hellwig  */
21b01d1461SDave Chinner struct xfs_buf;
22c59d87c4SChristoph Hellwig 
23c59d87c4SChristoph Hellwig #define XFS_BUF_DADDR_NULL	((xfs_daddr_t) (-1LL))
24c59d87c4SChristoph Hellwig 
25c59d87c4SChristoph Hellwig #define XBF_READ	 (1 << 0) /* buffer intended for reading from device */
26c59d87c4SChristoph Hellwig #define XBF_WRITE	 (1 << 1) /* buffer intended for writing to device */
27c59d87c4SChristoph Hellwig #define XBF_READ_AHEAD	 (1 << 2) /* asynchronous read-ahead */
28c891c30aSBrian Foster #define XBF_NO_IOACCT	 (1 << 3) /* bypass I/O accounting (non-LRU bufs) */
29c59d87c4SChristoph Hellwig #define XBF_ASYNC	 (1 << 4) /* initiator will not wait for completion */
30c59d87c4SChristoph Hellwig #define XBF_DONE	 (1 << 5) /* all pages in the buffer uptodate */
3143ff2122SChristoph Hellwig #define XBF_STALE	 (1 << 6) /* buffer has been staled, do not find it */
32ce89755cSChristoph Hellwig #define XBF_WRITE_FAIL	 (1 << 7) /* async writes have failed on this buffer */
33c59d87c4SChristoph Hellwig 
34f593bf14SDave Chinner /* buffer type flags for write callbacks */
35f593bf14SDave Chinner #define _XBF_INODES	 (1 << 16)/* inode buffer */
360c7e5afbSDave Chinner #define _XBF_DQUOTS	 (1 << 17)/* dquot buffer */
379fe5c77cSDave Chinner #define _XBF_LOGRECOVERY	 (1 << 18)/* log recovery buffer */
38c59d87c4SChristoph Hellwig 
39c59d87c4SChristoph Hellwig /* flags used only internally */
40c59d87c4SChristoph Hellwig #define _XBF_PAGES	 (1 << 20)/* backed by refcounted pages */
41c59d87c4SChristoph Hellwig #define _XBF_KMEM	 (1 << 21)/* backed by heap memory */
4243ff2122SChristoph Hellwig #define _XBF_DELWRI_Q	 (1 << 22)/* buffer on a delwri queue */
43c59d87c4SChristoph Hellwig 
44f593bf14SDave Chinner /* flags used only as arguments to access routines */
45f593bf14SDave Chinner #define XBF_TRYLOCK	 (1 << 30)/* lock requested, but do not wait */
46f593bf14SDave Chinner #define XBF_UNMAPPED	 (1 << 31)/* do not map the buffer */
47f593bf14SDave Chinner 
48c59d87c4SChristoph Hellwig typedef unsigned int xfs_buf_flags_t;
49c59d87c4SChristoph Hellwig 
50c59d87c4SChristoph Hellwig #define XFS_BUF_FLAGS \
51c59d87c4SChristoph Hellwig 	{ XBF_READ,		"READ" }, \
52c59d87c4SChristoph Hellwig 	{ XBF_WRITE,		"WRITE" }, \
53c59d87c4SChristoph Hellwig 	{ XBF_READ_AHEAD,	"READ_AHEAD" }, \
541247ec4cSEric Sandeen 	{ XBF_NO_IOACCT,	"NO_IOACCT" }, \
55c59d87c4SChristoph Hellwig 	{ XBF_ASYNC,		"ASYNC" }, \
56c59d87c4SChristoph Hellwig 	{ XBF_DONE,		"DONE" }, \
57c59d87c4SChristoph Hellwig 	{ XBF_STALE,		"STALE" }, \
58ac8809f9SDave Chinner 	{ XBF_WRITE_FAIL,	"WRITE_FAIL" }, \
59f593bf14SDave Chinner 	{ _XBF_INODES,		"INODES" }, \
600c7e5afbSDave Chinner 	{ _XBF_DQUOTS,		"DQUOTS" }, \
619fe5c77cSDave Chinner 	{ _XBF_LOGRECOVERY,		"LOG_RECOVERY" }, \
62c59d87c4SChristoph Hellwig 	{ _XBF_PAGES,		"PAGES" }, \
63c59d87c4SChristoph Hellwig 	{ _XBF_KMEM,		"KMEM" }, \
64f593bf14SDave Chinner 	{ _XBF_DELWRI_Q,	"DELWRI_Q" }, \
65f593bf14SDave Chinner 	/* The following interface flags should never be set */ \
66f593bf14SDave Chinner 	{ XBF_TRYLOCK,		"TRYLOCK" }, \
67f593bf14SDave Chinner 	{ XBF_UNMAPPED,		"UNMAPPED" }
68ac8809f9SDave Chinner 
69a4082357SDave Chinner /*
70a4082357SDave Chinner  * Internal state flags.
71a4082357SDave Chinner  */
72a4082357SDave Chinner #define XFS_BSTATE_DISPOSE	 (1 << 0)	/* buffer being discarded */
7363db7c81SBrian Foster #define XFS_BSTATE_IN_FLIGHT	 (1 << 1)	/* I/O in flight */
74c59d87c4SChristoph Hellwig 
757c71ee78SEric Sandeen /*
767c71ee78SEric Sandeen  * The xfs_buftarg contains 2 notions of "sector size" -
777c71ee78SEric Sandeen  *
787c71ee78SEric Sandeen  * 1) The metadata sector size, which is the minimum unit and
797c71ee78SEric Sandeen  *    alignment of IO which will be performed by metadata operations.
807c71ee78SEric Sandeen  * 2) The device logical sector size
817c71ee78SEric Sandeen  *
827c71ee78SEric Sandeen  * The first is specified at mkfs time, and is stored on-disk in the
837c71ee78SEric Sandeen  * superblock's sb_sectsize.
847c71ee78SEric Sandeen  *
857c71ee78SEric Sandeen  * The latter is derived from the underlying device, and controls direct IO
867c71ee78SEric Sandeen  * alignment constraints.
877c71ee78SEric Sandeen  */
88c59d87c4SChristoph Hellwig typedef struct xfs_buftarg {
89c59d87c4SChristoph Hellwig 	dev_t			bt_dev;
90c59d87c4SChristoph Hellwig 	struct block_device	*bt_bdev;
91486aff5eSDan Williams 	struct dax_device	*bt_daxdev;
92c59d87c4SChristoph Hellwig 	struct xfs_mount	*bt_mount;
936da54179SEric Sandeen 	unsigned int		bt_meta_sectorsize;
946da54179SEric Sandeen 	size_t			bt_meta_sectormask;
957c71ee78SEric Sandeen 	size_t			bt_logical_sectorsize;
967c71ee78SEric Sandeen 	size_t			bt_logical_sectormask;
97c59d87c4SChristoph Hellwig 
98c59d87c4SChristoph Hellwig 	/* LRU control structures */
99c59d87c4SChristoph Hellwig 	struct shrinker		bt_shrinker;
100e80dfa19SDave Chinner 	struct list_lru		bt_lru;
1019c7504aaSBrian Foster 
1029c7504aaSBrian Foster 	struct percpu_counter	bt_io_count;
103f9bccfccSBrian Foster 	struct ratelimit_state	bt_ioerror_rl;
104c59d87c4SChristoph Hellwig } xfs_buftarg_t;
105c59d87c4SChristoph Hellwig 
106c59d87c4SChristoph Hellwig #define XB_PAGES	2
107c59d87c4SChristoph Hellwig 
108cbb7baabSDave Chinner struct xfs_buf_map {
109cbb7baabSDave Chinner 	xfs_daddr_t		bm_bn;	/* block number for I/O */
110cbb7baabSDave Chinner 	int			bm_len;	/* size of I/O */
111cbb7baabSDave Chinner };
112cbb7baabSDave Chinner 
1133e85c868SDave Chinner #define DEFINE_SINGLE_BUF_MAP(map, blkno, numblk) \
1143e85c868SDave Chinner 	struct xfs_buf_map (map) = { .bm_bn = (blkno), .bm_len = (numblk) };
1153e85c868SDave Chinner 
1161813dd64SDave Chinner struct xfs_buf_ops {
117233135b7SEric Sandeen 	char *name;
11815baadf7SDarrick J. Wong 	union {
11915baadf7SDarrick J. Wong 		__be32 magic[2];	/* v4 and v5 on disk magic values */
12015baadf7SDarrick J. Wong 		__be16 magic16[2];	/* v4 and v5 on disk magic values */
12115baadf7SDarrick J. Wong 	};
1221813dd64SDave Chinner 	void (*verify_read)(struct xfs_buf *);
1231813dd64SDave Chinner 	void (*verify_write)(struct xfs_buf *);
124b5572597SDarrick J. Wong 	xfs_failaddr_t (*verify_struct)(struct xfs_buf *bp);
1251813dd64SDave Chinner };
1261813dd64SDave Chinner 
127e8222613SDave Chinner struct xfs_buf {
128c59d87c4SChristoph Hellwig 	/*
129c59d87c4SChristoph Hellwig 	 * first cacheline holds all the fields needed for an uncontended cache
130c59d87c4SChristoph Hellwig 	 * hit to be fully processed. The semaphore straddles the cacheline
131c59d87c4SChristoph Hellwig 	 * boundary, but the counter and lock sits on the first cacheline,
132c59d87c4SChristoph Hellwig 	 * which is the only bit that is touched if we hit the semaphore
133c59d87c4SChristoph Hellwig 	 * fast-path on locking.
134c59d87c4SChristoph Hellwig 	 */
1356031e73aSLucas Stach 	struct rhash_head	b_rhash_head;	/* pag buffer hash node */
136*8cf07f3dSDave Chinner 
137*8cf07f3dSDave Chinner 	/*
138*8cf07f3dSDave Chinner 	 * b_bn is the cache index. Do not use directly, use b_maps[0].bm_bn
139*8cf07f3dSDave Chinner 	 * for the buffer disk address instead.
140*8cf07f3dSDave Chinner 	 */
141*8cf07f3dSDave Chinner 	xfs_daddr_t		b_bn;
1424e94b71bSDave Chinner 	int			b_length;	/* size of buffer in BBs */
143c59d87c4SChristoph Hellwig 	atomic_t		b_hold;		/* reference count */
144c59d87c4SChristoph Hellwig 	atomic_t		b_lru_ref;	/* lru reclaim ref count */
145c59d87c4SChristoph Hellwig 	xfs_buf_flags_t		b_flags;	/* status flags */
146c59d87c4SChristoph Hellwig 	struct semaphore	b_sema;		/* semaphore for lockables */
147c59d87c4SChristoph Hellwig 
1486fb8a90aSCarlos Maiolino 	/*
1496fb8a90aSCarlos Maiolino 	 * concurrent access to b_lru and b_lru_flags are protected by
1506fb8a90aSCarlos Maiolino 	 * bt_lru_lock and not by b_sema
1516fb8a90aSCarlos Maiolino 	 */
152c59d87c4SChristoph Hellwig 	struct list_head	b_lru;		/* lru list */
153a4082357SDave Chinner 	spinlock_t		b_lock;		/* internal state lock */
154a4082357SDave Chinner 	unsigned int		b_state;	/* internal state flags */
15561be9c52SDave Chinner 	int			b_io_error;	/* internal IO error state */
156c59d87c4SChristoph Hellwig 	wait_queue_head_t	b_waiters;	/* unpin waiters */
157c59d87c4SChristoph Hellwig 	struct list_head	b_list;
158c59d87c4SChristoph Hellwig 	struct xfs_perag	*b_pag;		/* contains rbtree root */
159dbd329f1SChristoph Hellwig 	struct xfs_mount	*b_mount;
16010fb9ac1SBrian Foster 	struct xfs_buftarg	*b_target;	/* buffer target (device) */
161c59d87c4SChristoph Hellwig 	void			*b_addr;	/* virtual address of buffer */
162b29c70f5SBrian Foster 	struct work_struct	b_ioend_work;
163c59d87c4SChristoph Hellwig 	struct completion	b_iowait;	/* queue for I/O waiters */
164e99b4bd0SChristoph Hellwig 	struct xfs_buf_log_item	*b_log_item;
165643c8c05SCarlos Maiolino 	struct list_head	b_li_list;	/* Log items list head */
166c59d87c4SChristoph Hellwig 	struct xfs_trans	*b_transp;
167c59d87c4SChristoph Hellwig 	struct page		**b_pages;	/* array of page pointers */
168c59d87c4SChristoph Hellwig 	struct page		*b_page_array[XB_PAGES]; /* inline pages */
1693e85c868SDave Chinner 	struct xfs_buf_map	*b_maps;	/* compound buffer map */
170d44d9bc6SMark Tinguely 	struct xfs_buf_map	__b_map;	/* inline compound buffer map */
1713e85c868SDave Chinner 	int			b_map_count;
172c59d87c4SChristoph Hellwig 	atomic_t		b_pin_count;	/* pin count */
173c59d87c4SChristoph Hellwig 	atomic_t		b_io_remaining;	/* #outstanding I/O requests */
174c59d87c4SChristoph Hellwig 	unsigned int		b_page_count;	/* size of page array */
17554cd3aa6SChristoph Hellwig 	unsigned int		b_offset;	/* page offset of b_addr,
17654cd3aa6SChristoph Hellwig 						   only for _XBF_KMEM buffers */
1772451337dSDave Chinner 	int			b_error;	/* error code on I/O */
178a5ea70d2SCarlos Maiolino 
179a5ea70d2SCarlos Maiolino 	/*
180a5ea70d2SCarlos Maiolino 	 * async write failure retry count. Initialised to zero on the first
181a5ea70d2SCarlos Maiolino 	 * failure, then when it exceeds the maximum configured without a
182a5ea70d2SCarlos Maiolino 	 * success the write is considered to be failed permanently and the
183a5ea70d2SCarlos Maiolino 	 * iodone handler will take appropriate action.
184a5ea70d2SCarlos Maiolino 	 *
185a5ea70d2SCarlos Maiolino 	 * For retry timeouts, we record the jiffie of the first failure. This
186a5ea70d2SCarlos Maiolino 	 * means that we can change the retry timeout for buffers already under
187a5ea70d2SCarlos Maiolino 	 * I/O and thus avoid getting stuck in a retry loop with a long timeout.
188a5ea70d2SCarlos Maiolino 	 *
189a5ea70d2SCarlos Maiolino 	 * last_error is used to ensure that we are getting repeated errors, not
190a5ea70d2SCarlos Maiolino 	 * different errors. e.g. a block device might change ENOSPC to EIO when
191a5ea70d2SCarlos Maiolino 	 * a failure timeout occurs, so we want to re-initialise the error
192a5ea70d2SCarlos Maiolino 	 * retry behaviour appropriately when that happens.
193a5ea70d2SCarlos Maiolino 	 */
194a5ea70d2SCarlos Maiolino 	int			b_retries;
195a5ea70d2SCarlos Maiolino 	unsigned long		b_first_retry_time; /* in jiffies */
196a5ea70d2SCarlos Maiolino 	int			b_last_error;
197a5ea70d2SCarlos Maiolino 
1981813dd64SDave Chinner 	const struct xfs_buf_ops	*b_ops;
199e8222613SDave Chinner };
200c59d87c4SChristoph Hellwig 
201c59d87c4SChristoph Hellwig /* Finding and Reading Buffers */
2028925a3dcSDave Chinner struct xfs_buf *xfs_buf_incore(struct xfs_buftarg *target,
2038925a3dcSDave Chinner 			   xfs_daddr_t blkno, size_t numblks,
2048925a3dcSDave Chinner 			   xfs_buf_flags_t flags);
2053e85c868SDave Chinner 
2063848b5f6SDarrick J. Wong int xfs_buf_get_map(struct xfs_buftarg *target, struct xfs_buf_map *map,
2073848b5f6SDarrick J. Wong 		int nmaps, xfs_buf_flags_t flags, struct xfs_buf **bpp);
2084ed8e27bSDarrick J. Wong int xfs_buf_read_map(struct xfs_buftarg *target, struct xfs_buf_map *map,
2094ed8e27bSDarrick J. Wong 		int nmaps, xfs_buf_flags_t flags, struct xfs_buf **bpp,
210cdbcf82bSDarrick J. Wong 		const struct xfs_buf_ops *ops, xfs_failaddr_t fa);
2116dde2707SDave Chinner void xfs_buf_readahead_map(struct xfs_buftarg *target,
212c3f8fc73SDave Chinner 			       struct xfs_buf_map *map, int nmaps,
2131813dd64SDave Chinner 			       const struct xfs_buf_ops *ops);
2146dde2707SDave Chinner 
215841263e9SDarrick J. Wong static inline int
2166dde2707SDave Chinner xfs_buf_get(
2176dde2707SDave Chinner 	struct xfs_buftarg	*target,
2186dde2707SDave Chinner 	xfs_daddr_t		blkno,
219841263e9SDarrick J. Wong 	size_t			numblks,
220841263e9SDarrick J. Wong 	struct xfs_buf		**bpp)
2216dde2707SDave Chinner {
2226dde2707SDave Chinner 	DEFINE_SINGLE_BUF_MAP(map, blkno, numblks);
2233848b5f6SDarrick J. Wong 
224841263e9SDarrick J. Wong 	return xfs_buf_get_map(target, &map, 1, 0, bpp);
2256dde2707SDave Chinner }
2266dde2707SDave Chinner 
2270e3eccceSDarrick J. Wong static inline int
2286dde2707SDave Chinner xfs_buf_read(
2296dde2707SDave Chinner 	struct xfs_buftarg	*target,
2306dde2707SDave Chinner 	xfs_daddr_t		blkno,
2316dde2707SDave Chinner 	size_t			numblks,
232c3f8fc73SDave Chinner 	xfs_buf_flags_t		flags,
2330e3eccceSDarrick J. Wong 	struct xfs_buf		**bpp,
2341813dd64SDave Chinner 	const struct xfs_buf_ops *ops)
2356dde2707SDave Chinner {
2366dde2707SDave Chinner 	DEFINE_SINGLE_BUF_MAP(map, blkno, numblks);
2374ed8e27bSDarrick J. Wong 
238cdbcf82bSDarrick J. Wong 	return xfs_buf_read_map(target, &map, 1, flags, bpp, ops,
239cdbcf82bSDarrick J. Wong 			__builtin_return_address(0));
2406dde2707SDave Chinner }
2416dde2707SDave Chinner 
2426dde2707SDave Chinner static inline void
2436dde2707SDave Chinner xfs_buf_readahead(
2446dde2707SDave Chinner 	struct xfs_buftarg	*target,
2456dde2707SDave Chinner 	xfs_daddr_t		blkno,
246c3f8fc73SDave Chinner 	size_t			numblks,
2471813dd64SDave Chinner 	const struct xfs_buf_ops *ops)
2486dde2707SDave Chinner {
2496dde2707SDave Chinner 	DEFINE_SINGLE_BUF_MAP(map, blkno, numblks);
2501813dd64SDave Chinner 	return xfs_buf_readahead_map(target, &map, 1, ops);
2516dde2707SDave Chinner }
252c59d87c4SChristoph Hellwig 
2532842b6dbSDarrick J. Wong int xfs_buf_get_uncached(struct xfs_buftarg *target, size_t numblks, int flags,
2542842b6dbSDarrick J. Wong 		struct xfs_buf **bpp);
255ba372674SDave Chinner int xfs_buf_read_uncached(struct xfs_buftarg *target, xfs_daddr_t daddr,
256ba372674SDave Chinner 			  size_t numblks, int flags, struct xfs_buf **bpp,
2571813dd64SDave Chinner 			  const struct xfs_buf_ops *ops);
25826e32875SChristoph Hellwig int _xfs_buf_read(struct xfs_buf *bp, xfs_buf_flags_t flags);
259e70b73f8SDave Chinner void xfs_buf_hold(struct xfs_buf *bp);
260c59d87c4SChristoph Hellwig 
261c59d87c4SChristoph Hellwig /* Releasing Buffers */
262e8222613SDave Chinner extern void xfs_buf_rele(struct xfs_buf *);
263c59d87c4SChristoph Hellwig 
264c59d87c4SChristoph Hellwig /* Locking and Unlocking Buffers */
265e8222613SDave Chinner extern int xfs_buf_trylock(struct xfs_buf *);
266e8222613SDave Chinner extern void xfs_buf_lock(struct xfs_buf *);
267e8222613SDave Chinner extern void xfs_buf_unlock(struct xfs_buf *);
268c59d87c4SChristoph Hellwig #define xfs_buf_islocked(bp) \
269c59d87c4SChristoph Hellwig 	((bp)->b_sema.count <= 0)
270c59d87c4SChristoph Hellwig 
271e8222613SDave Chinner static inline void xfs_buf_relse(struct xfs_buf *bp)
272f593bf14SDave Chinner {
273f593bf14SDave Chinner 	xfs_buf_unlock(bp);
274f593bf14SDave Chinner 	xfs_buf_rele(bp);
275f593bf14SDave Chinner }
276f593bf14SDave Chinner 
277c59d87c4SChristoph Hellwig /* Buffer Read and Write Routines */
278c2b006c1SChristoph Hellwig extern int xfs_bwrite(struct xfs_buf *bp);
279f593bf14SDave Chinner 
28031ca03c9SDarrick J. Wong extern void __xfs_buf_ioerror(struct xfs_buf *bp, int error,
28131ca03c9SDarrick J. Wong 		xfs_failaddr_t failaddr);
28231ca03c9SDarrick J. Wong #define xfs_buf_ioerror(bp, err) __xfs_buf_ioerror((bp), (err), __this_address)
283cdbcf82bSDarrick J. Wong extern void xfs_buf_ioerror_alert(struct xfs_buf *bp, xfs_failaddr_t fa);
28454b3b1f6SBrian Foster void xfs_buf_ioend_fail(struct xfs_buf *);
285f9a196eeSChristoph Hellwig void xfs_buf_zero(struct xfs_buf *bp, size_t boff, size_t bsize);
2868d57c216SDarrick J. Wong void __xfs_buf_mark_corrupt(struct xfs_buf *bp, xfs_failaddr_t fa);
2878d57c216SDarrick J. Wong #define xfs_buf_mark_corrupt(bp) __xfs_buf_mark_corrupt((bp), __this_address)
288c59d87c4SChristoph Hellwig 
289c59d87c4SChristoph Hellwig /* Buffer Utility Routines */
29088ee2df7SChristoph Hellwig extern void *xfs_buf_offset(struct xfs_buf *, size_t);
2915cfd28b6SDave Chinner extern void xfs_buf_stale(struct xfs_buf *bp);
292c59d87c4SChristoph Hellwig 
293c59d87c4SChristoph Hellwig /* Delayed Write Buffer Routines */
29420e8a063SBrian Foster extern void xfs_buf_delwri_cancel(struct list_head *);
29543ff2122SChristoph Hellwig extern bool xfs_buf_delwri_queue(struct xfs_buf *, struct list_head *);
29643ff2122SChristoph Hellwig extern int xfs_buf_delwri_submit(struct list_head *);
29743ff2122SChristoph Hellwig extern int xfs_buf_delwri_submit_nowait(struct list_head *);
2987912e7feSBrian Foster extern int xfs_buf_delwri_pushbuf(struct xfs_buf *, struct list_head *);
299c59d87c4SChristoph Hellwig 
300c59d87c4SChristoph Hellwig /* Buffer Daemon Setup Routines */
301c59d87c4SChristoph Hellwig extern int xfs_buf_init(void);
302c59d87c4SChristoph Hellwig extern void xfs_buf_terminate(void);
303c59d87c4SChristoph Hellwig 
304cbb7baabSDave Chinner /*
305cbb7baabSDave Chinner  * These macros use the IO block map rather than b_bn. b_bn is now really
306cbb7baabSDave Chinner  * just for the buffer cache index for cached buffers. As IO does not use b_bn
307cbb7baabSDave Chinner  * anymore, uncached buffers do not use b_bn at all and hence must modify the IO
308cbb7baabSDave Chinner  * map directly. Uncached buffers are not allowed to be discontiguous, so this
309cbb7baabSDave Chinner  * is safe to do.
310cbb7baabSDave Chinner  *
311cbb7baabSDave Chinner  * In future, uncached buffers will pass the block number directly to the io
312cbb7baabSDave Chinner  * request function and hence these macros will go away at that point.
313cbb7baabSDave Chinner  */
314d44d9bc6SMark Tinguely #define XFS_BUF_ADDR(bp)		((bp)->b_maps[0].bm_bn)
315d44d9bc6SMark Tinguely #define XFS_BUF_SET_ADDR(bp, bno)	((bp)->b_maps[0].bm_bn = (xfs_daddr_t)(bno))
316c59d87c4SChristoph Hellwig 
3177561d27eSBrian Foster void xfs_buf_set_ref(struct xfs_buf *bp, int lru_ref);
318c59d87c4SChristoph Hellwig 
319879de98eSDave Chinner /*
320879de98eSDave Chinner  * If the buffer is already on the LRU, do nothing. Otherwise set the buffer
321879de98eSDave Chinner  * up with a reference count of 0 so it will be tossed from the cache when
322879de98eSDave Chinner  * released.
323879de98eSDave Chinner  */
324879de98eSDave Chinner static inline void xfs_buf_oneshot(struct xfs_buf *bp)
325879de98eSDave Chinner {
326879de98eSDave Chinner 	if (!list_empty(&bp->b_lru) || atomic_read(&bp->b_lru_ref) > 1)
327879de98eSDave Chinner 		return;
328879de98eSDave Chinner 	atomic_set(&bp->b_lru_ref, 0);
329879de98eSDave Chinner }
330879de98eSDave Chinner 
331c59d87c4SChristoph Hellwig static inline int xfs_buf_ispinned(struct xfs_buf *bp)
332c59d87c4SChristoph Hellwig {
333c59d87c4SChristoph Hellwig 	return atomic_read(&bp->b_pin_count);
334c59d87c4SChristoph Hellwig }
335c59d87c4SChristoph Hellwig 
33651582170SEric Sandeen static inline int
33751582170SEric Sandeen xfs_buf_verify_cksum(struct xfs_buf *bp, unsigned long cksum_offset)
33851582170SEric Sandeen {
33951582170SEric Sandeen 	return xfs_verify_cksum(bp->b_addr, BBTOB(bp->b_length),
34051582170SEric Sandeen 				cksum_offset);
34151582170SEric Sandeen }
34251582170SEric Sandeen 
343f1dbcd7eSEric Sandeen static inline void
344f1dbcd7eSEric Sandeen xfs_buf_update_cksum(struct xfs_buf *bp, unsigned long cksum_offset)
345f1dbcd7eSEric Sandeen {
346f1dbcd7eSEric Sandeen 	xfs_update_cksum(bp->b_addr, BBTOB(bp->b_length),
347f1dbcd7eSEric Sandeen 			 cksum_offset);
348f1dbcd7eSEric Sandeen }
349f1dbcd7eSEric Sandeen 
350c59d87c4SChristoph Hellwig /*
351c59d87c4SChristoph Hellwig  *	Handling of buftargs.
352c59d87c4SChristoph Hellwig  */
35310fb9ac1SBrian Foster extern struct xfs_buftarg *xfs_alloc_buftarg(struct xfs_mount *,
354486aff5eSDan Williams 		struct block_device *, struct dax_device *);
355a1f69417SEric Sandeen extern void xfs_free_buftarg(struct xfs_buftarg *);
3568321ddb2SBrian Foster extern void xfs_buftarg_wait(struct xfs_buftarg *);
35710fb9ac1SBrian Foster extern void xfs_buftarg_drain(struct xfs_buftarg *);
35810fb9ac1SBrian Foster extern int xfs_setsize_buftarg(struct xfs_buftarg *, unsigned int);
359c59d87c4SChristoph Hellwig 
360c59d87c4SChristoph Hellwig #define xfs_getsize_buftarg(buftarg)	block_size((buftarg)->bt_bdev)
361c59d87c4SChristoph Hellwig #define xfs_readonly_buftarg(buftarg)	bdev_read_only((buftarg)->bt_bdev)
362c59d87c4SChristoph Hellwig 
36375d02303SBrian Foster int xfs_buf_reverify(struct xfs_buf *bp, const struct xfs_buf_ops *ops);
36415baadf7SDarrick J. Wong bool xfs_verify_magic(struct xfs_buf *bp, __be32 dmagic);
36515baadf7SDarrick J. Wong bool xfs_verify_magic16(struct xfs_buf *bp, __be16 dmagic);
3661aff5696SDarrick J. Wong 
367c59d87c4SChristoph Hellwig #endif	/* __XFS_BUF_H__ */
368