xref: /openbmc/linux/fs/xfs/xfs_buf.h (revision 09c434b8)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (c) 2000-2005 Silicon Graphics, Inc.
4  * All Rights Reserved.
5  */
6 #ifndef __XFS_BUF_H__
7 #define __XFS_BUF_H__
8 
9 #include <linux/list.h>
10 #include <linux/types.h>
11 #include <linux/spinlock.h>
12 #include <linux/mm.h>
13 #include <linux/fs.h>
14 #include <linux/dax.h>
15 #include <linux/uio.h>
16 #include <linux/list_lru.h>
17 
18 /*
19  *	Base types
20  */
21 
22 #define XFS_BUF_DADDR_NULL	((xfs_daddr_t) (-1LL))
23 
24 typedef enum {
25 	XBRW_READ = 1,			/* transfer into target memory */
26 	XBRW_WRITE = 2,			/* transfer from target memory */
27 	XBRW_ZERO = 3,			/* Zero target memory */
28 } xfs_buf_rw_t;
29 
30 #define XBF_READ	 (1 << 0) /* buffer intended for reading from device */
31 #define XBF_WRITE	 (1 << 1) /* buffer intended for writing to device */
32 #define XBF_READ_AHEAD	 (1 << 2) /* asynchronous read-ahead */
33 #define XBF_NO_IOACCT	 (1 << 3) /* bypass I/O accounting (non-LRU bufs) */
34 #define XBF_ASYNC	 (1 << 4) /* initiator will not wait for completion */
35 #define XBF_DONE	 (1 << 5) /* all pages in the buffer uptodate */
36 #define XBF_STALE	 (1 << 6) /* buffer has been staled, do not find it */
37 #define XBF_WRITE_FAIL	 (1 << 24)/* async writes have failed on this buffer */
38 
39 /* I/O hints for the BIO layer */
40 #define XBF_SYNCIO	 (1 << 10)/* treat this buffer as synchronous I/O */
41 #define XBF_FUA		 (1 << 11)/* force cache write through mode */
42 #define XBF_FLUSH	 (1 << 12)/* flush the disk cache before a write */
43 
44 /* flags used only as arguments to access routines */
45 #define XBF_TRYLOCK	 (1 << 16)/* lock requested, but do not wait */
46 #define XBF_UNMAPPED	 (1 << 17)/* do not map the buffer */
47 
48 /* flags used only internally */
49 #define _XBF_PAGES	 (1 << 20)/* backed by refcounted pages */
50 #define _XBF_KMEM	 (1 << 21)/* backed by heap memory */
51 #define _XBF_DELWRI_Q	 (1 << 22)/* buffer on a delwri queue */
52 #define _XBF_COMPOUND	 (1 << 23)/* compound buffer */
53 
54 typedef unsigned int xfs_buf_flags_t;
55 
56 #define XFS_BUF_FLAGS \
57 	{ XBF_READ,		"READ" }, \
58 	{ XBF_WRITE,		"WRITE" }, \
59 	{ XBF_READ_AHEAD,	"READ_AHEAD" }, \
60 	{ XBF_NO_IOACCT,	"NO_IOACCT" }, \
61 	{ XBF_ASYNC,		"ASYNC" }, \
62 	{ XBF_DONE,		"DONE" }, \
63 	{ XBF_STALE,		"STALE" }, \
64 	{ XBF_WRITE_FAIL,	"WRITE_FAIL" }, \
65 	{ XBF_SYNCIO,		"SYNCIO" }, \
66 	{ XBF_FUA,		"FUA" }, \
67 	{ XBF_FLUSH,		"FLUSH" }, \
68 	{ XBF_TRYLOCK,		"TRYLOCK" },	/* should never be set */\
69 	{ XBF_UNMAPPED,		"UNMAPPED" },	/* ditto */\
70 	{ _XBF_PAGES,		"PAGES" }, \
71 	{ _XBF_KMEM,		"KMEM" }, \
72 	{ _XBF_DELWRI_Q,	"DELWRI_Q" }, \
73 	{ _XBF_COMPOUND,	"COMPOUND" }
74 
75 
76 /*
77  * Internal state flags.
78  */
79 #define XFS_BSTATE_DISPOSE	 (1 << 0)	/* buffer being discarded */
80 #define XFS_BSTATE_IN_FLIGHT	 (1 << 1)	/* I/O in flight */
81 
82 /*
83  * The xfs_buftarg contains 2 notions of "sector size" -
84  *
85  * 1) The metadata sector size, which is the minimum unit and
86  *    alignment of IO which will be performed by metadata operations.
87  * 2) The device logical sector size
88  *
89  * The first is specified at mkfs time, and is stored on-disk in the
90  * superblock's sb_sectsize.
91  *
92  * The latter is derived from the underlying device, and controls direct IO
93  * alignment constraints.
94  */
95 typedef struct xfs_buftarg {
96 	dev_t			bt_dev;
97 	struct block_device	*bt_bdev;
98 	struct dax_device	*bt_daxdev;
99 	struct xfs_mount	*bt_mount;
100 	unsigned int		bt_meta_sectorsize;
101 	size_t			bt_meta_sectormask;
102 	size_t			bt_logical_sectorsize;
103 	size_t			bt_logical_sectormask;
104 
105 	/* LRU control structures */
106 	struct shrinker		bt_shrinker;
107 	struct list_lru		bt_lru;
108 
109 	struct percpu_counter	bt_io_count;
110 } xfs_buftarg_t;
111 
112 struct xfs_buf;
113 typedef void (*xfs_buf_iodone_t)(struct xfs_buf *);
114 
115 
116 #define XB_PAGES	2
117 
118 struct xfs_buf_map {
119 	xfs_daddr_t		bm_bn;	/* block number for I/O */
120 	int			bm_len;	/* size of I/O */
121 };
122 
123 #define DEFINE_SINGLE_BUF_MAP(map, blkno, numblk) \
124 	struct xfs_buf_map (map) = { .bm_bn = (blkno), .bm_len = (numblk) };
125 
126 struct xfs_buf_ops {
127 	char *name;
128 	union {
129 		__be32 magic[2];	/* v4 and v5 on disk magic values */
130 		__be16 magic16[2];	/* v4 and v5 on disk magic values */
131 	};
132 	void (*verify_read)(struct xfs_buf *);
133 	void (*verify_write)(struct xfs_buf *);
134 	xfs_failaddr_t (*verify_struct)(struct xfs_buf *bp);
135 };
136 
137 typedef struct xfs_buf {
138 	/*
139 	 * first cacheline holds all the fields needed for an uncontended cache
140 	 * hit to be fully processed. The semaphore straddles the cacheline
141 	 * boundary, but the counter and lock sits on the first cacheline,
142 	 * which is the only bit that is touched if we hit the semaphore
143 	 * fast-path on locking.
144 	 */
145 	struct rhash_head	b_rhash_head;	/* pag buffer hash node */
146 	xfs_daddr_t		b_bn;		/* block number of buffer */
147 	int			b_length;	/* size of buffer in BBs */
148 	atomic_t		b_hold;		/* reference count */
149 	atomic_t		b_lru_ref;	/* lru reclaim ref count */
150 	xfs_buf_flags_t		b_flags;	/* status flags */
151 	struct semaphore	b_sema;		/* semaphore for lockables */
152 
153 	/*
154 	 * concurrent access to b_lru and b_lru_flags are protected by
155 	 * bt_lru_lock and not by b_sema
156 	 */
157 	struct list_head	b_lru;		/* lru list */
158 	spinlock_t		b_lock;		/* internal state lock */
159 	unsigned int		b_state;	/* internal state flags */
160 	int			b_io_error;	/* internal IO error state */
161 	wait_queue_head_t	b_waiters;	/* unpin waiters */
162 	struct list_head	b_list;
163 	struct xfs_perag	*b_pag;		/* contains rbtree root */
164 	xfs_buftarg_t		*b_target;	/* buffer target (device) */
165 	void			*b_addr;	/* virtual address of buffer */
166 	struct work_struct	b_ioend_work;
167 	struct workqueue_struct	*b_ioend_wq;	/* I/O completion wq */
168 	xfs_buf_iodone_t	b_iodone;	/* I/O completion function */
169 	struct completion	b_iowait;	/* queue for I/O waiters */
170 	void			*b_log_item;
171 	struct list_head	b_li_list;	/* Log items list head */
172 	struct xfs_trans	*b_transp;
173 	struct page		**b_pages;	/* array of page pointers */
174 	struct page		*b_page_array[XB_PAGES]; /* inline pages */
175 	struct xfs_buf_map	*b_maps;	/* compound buffer map */
176 	struct xfs_buf_map	__b_map;	/* inline compound buffer map */
177 	int			b_map_count;
178 	int			b_io_length;	/* IO size in BBs */
179 	atomic_t		b_pin_count;	/* pin count */
180 	atomic_t		b_io_remaining;	/* #outstanding I/O requests */
181 	unsigned int		b_page_count;	/* size of page array */
182 	unsigned int		b_offset;	/* page offset in first page */
183 	int			b_error;	/* error code on I/O */
184 
185 	/*
186 	 * async write failure retry count. Initialised to zero on the first
187 	 * failure, then when it exceeds the maximum configured without a
188 	 * success the write is considered to be failed permanently and the
189 	 * iodone handler will take appropriate action.
190 	 *
191 	 * For retry timeouts, we record the jiffie of the first failure. This
192 	 * means that we can change the retry timeout for buffers already under
193 	 * I/O and thus avoid getting stuck in a retry loop with a long timeout.
194 	 *
195 	 * last_error is used to ensure that we are getting repeated errors, not
196 	 * different errors. e.g. a block device might change ENOSPC to EIO when
197 	 * a failure timeout occurs, so we want to re-initialise the error
198 	 * retry behaviour appropriately when that happens.
199 	 */
200 	int			b_retries;
201 	unsigned long		b_first_retry_time; /* in jiffies */
202 	int			b_last_error;
203 
204 	const struct xfs_buf_ops	*b_ops;
205 } xfs_buf_t;
206 
207 /* Finding and Reading Buffers */
208 struct xfs_buf *xfs_buf_incore(struct xfs_buftarg *target,
209 			   xfs_daddr_t blkno, size_t numblks,
210 			   xfs_buf_flags_t flags);
211 
212 struct xfs_buf *_xfs_buf_alloc(struct xfs_buftarg *target,
213 			       struct xfs_buf_map *map, int nmaps,
214 			       xfs_buf_flags_t flags);
215 
216 static inline struct xfs_buf *
217 xfs_buf_alloc(
218 	struct xfs_buftarg	*target,
219 	xfs_daddr_t		blkno,
220 	size_t			numblks,
221 	xfs_buf_flags_t		flags)
222 {
223 	DEFINE_SINGLE_BUF_MAP(map, blkno, numblks);
224 	return _xfs_buf_alloc(target, &map, 1, flags);
225 }
226 
227 struct xfs_buf *xfs_buf_get_map(struct xfs_buftarg *target,
228 			       struct xfs_buf_map *map, int nmaps,
229 			       xfs_buf_flags_t flags);
230 struct xfs_buf *xfs_buf_read_map(struct xfs_buftarg *target,
231 			       struct xfs_buf_map *map, int nmaps,
232 			       xfs_buf_flags_t flags,
233 			       const struct xfs_buf_ops *ops);
234 void xfs_buf_readahead_map(struct xfs_buftarg *target,
235 			       struct xfs_buf_map *map, int nmaps,
236 			       const struct xfs_buf_ops *ops);
237 
238 static inline struct xfs_buf *
239 xfs_buf_get(
240 	struct xfs_buftarg	*target,
241 	xfs_daddr_t		blkno,
242 	size_t			numblks,
243 	xfs_buf_flags_t		flags)
244 {
245 	DEFINE_SINGLE_BUF_MAP(map, blkno, numblks);
246 	return xfs_buf_get_map(target, &map, 1, flags);
247 }
248 
249 static inline struct xfs_buf *
250 xfs_buf_read(
251 	struct xfs_buftarg	*target,
252 	xfs_daddr_t		blkno,
253 	size_t			numblks,
254 	xfs_buf_flags_t		flags,
255 	const struct xfs_buf_ops *ops)
256 {
257 	DEFINE_SINGLE_BUF_MAP(map, blkno, numblks);
258 	return xfs_buf_read_map(target, &map, 1, flags, ops);
259 }
260 
261 static inline void
262 xfs_buf_readahead(
263 	struct xfs_buftarg	*target,
264 	xfs_daddr_t		blkno,
265 	size_t			numblks,
266 	const struct xfs_buf_ops *ops)
267 {
268 	DEFINE_SINGLE_BUF_MAP(map, blkno, numblks);
269 	return xfs_buf_readahead_map(target, &map, 1, ops);
270 }
271 
272 void xfs_buf_set_empty(struct xfs_buf *bp, size_t numblks);
273 int xfs_buf_associate_memory(struct xfs_buf *bp, void *mem, size_t length);
274 
275 struct xfs_buf *xfs_buf_get_uncached(struct xfs_buftarg *target, size_t numblks,
276 				int flags);
277 int xfs_buf_read_uncached(struct xfs_buftarg *target, xfs_daddr_t daddr,
278 			  size_t numblks, int flags, struct xfs_buf **bpp,
279 			  const struct xfs_buf_ops *ops);
280 void xfs_buf_hold(struct xfs_buf *bp);
281 
282 /* Releasing Buffers */
283 extern void xfs_buf_free(xfs_buf_t *);
284 extern void xfs_buf_rele(xfs_buf_t *);
285 
286 /* Locking and Unlocking Buffers */
287 extern int xfs_buf_trylock(xfs_buf_t *);
288 extern void xfs_buf_lock(xfs_buf_t *);
289 extern void xfs_buf_unlock(xfs_buf_t *);
290 #define xfs_buf_islocked(bp) \
291 	((bp)->b_sema.count <= 0)
292 
293 /* Buffer Read and Write Routines */
294 extern int xfs_bwrite(struct xfs_buf *bp);
295 extern void xfs_buf_ioend(struct xfs_buf *bp);
296 extern void __xfs_buf_ioerror(struct xfs_buf *bp, int error,
297 		xfs_failaddr_t failaddr);
298 #define xfs_buf_ioerror(bp, err) __xfs_buf_ioerror((bp), (err), __this_address)
299 extern void xfs_buf_ioerror_alert(struct xfs_buf *, const char *func);
300 
301 extern int __xfs_buf_submit(struct xfs_buf *bp, bool);
302 static inline int xfs_buf_submit(struct xfs_buf *bp)
303 {
304 	bool wait = bp->b_flags & XBF_ASYNC ? false : true;
305 	return __xfs_buf_submit(bp, wait);
306 }
307 
308 extern void xfs_buf_iomove(xfs_buf_t *, size_t, size_t, void *,
309 				xfs_buf_rw_t);
310 #define xfs_buf_zero(bp, off, len) \
311 	    xfs_buf_iomove((bp), (off), (len), NULL, XBRW_ZERO)
312 
313 /* Buffer Utility Routines */
314 extern void *xfs_buf_offset(struct xfs_buf *, size_t);
315 extern void xfs_buf_stale(struct xfs_buf *bp);
316 
317 /* Delayed Write Buffer Routines */
318 extern void xfs_buf_delwri_cancel(struct list_head *);
319 extern bool xfs_buf_delwri_queue(struct xfs_buf *, struct list_head *);
320 extern int xfs_buf_delwri_submit(struct list_head *);
321 extern int xfs_buf_delwri_submit_nowait(struct list_head *);
322 extern int xfs_buf_delwri_pushbuf(struct xfs_buf *, struct list_head *);
323 
324 /* Buffer Daemon Setup Routines */
325 extern int xfs_buf_init(void);
326 extern void xfs_buf_terminate(void);
327 
328 /*
329  * These macros use the IO block map rather than b_bn. b_bn is now really
330  * just for the buffer cache index for cached buffers. As IO does not use b_bn
331  * anymore, uncached buffers do not use b_bn at all and hence must modify the IO
332  * map directly. Uncached buffers are not allowed to be discontiguous, so this
333  * is safe to do.
334  *
335  * In future, uncached buffers will pass the block number directly to the io
336  * request function and hence these macros will go away at that point.
337  */
338 #define XFS_BUF_ADDR(bp)		((bp)->b_maps[0].bm_bn)
339 #define XFS_BUF_SET_ADDR(bp, bno)	((bp)->b_maps[0].bm_bn = (xfs_daddr_t)(bno))
340 
341 void xfs_buf_set_ref(struct xfs_buf *bp, int lru_ref);
342 
343 /*
344  * If the buffer is already on the LRU, do nothing. Otherwise set the buffer
345  * up with a reference count of 0 so it will be tossed from the cache when
346  * released.
347  */
348 static inline void xfs_buf_oneshot(struct xfs_buf *bp)
349 {
350 	if (!list_empty(&bp->b_lru) || atomic_read(&bp->b_lru_ref) > 1)
351 		return;
352 	atomic_set(&bp->b_lru_ref, 0);
353 }
354 
355 static inline int xfs_buf_ispinned(struct xfs_buf *bp)
356 {
357 	return atomic_read(&bp->b_pin_count);
358 }
359 
360 static inline void xfs_buf_relse(xfs_buf_t *bp)
361 {
362 	xfs_buf_unlock(bp);
363 	xfs_buf_rele(bp);
364 }
365 
366 static inline int
367 xfs_buf_verify_cksum(struct xfs_buf *bp, unsigned long cksum_offset)
368 {
369 	return xfs_verify_cksum(bp->b_addr, BBTOB(bp->b_length),
370 				cksum_offset);
371 }
372 
373 static inline void
374 xfs_buf_update_cksum(struct xfs_buf *bp, unsigned long cksum_offset)
375 {
376 	xfs_update_cksum(bp->b_addr, BBTOB(bp->b_length),
377 			 cksum_offset);
378 }
379 
380 /*
381  *	Handling of buftargs.
382  */
383 extern xfs_buftarg_t *xfs_alloc_buftarg(struct xfs_mount *,
384 			struct block_device *, struct dax_device *);
385 extern void xfs_free_buftarg(struct xfs_buftarg *);
386 extern void xfs_wait_buftarg(xfs_buftarg_t *);
387 extern int xfs_setsize_buftarg(xfs_buftarg_t *, unsigned int);
388 
389 #define xfs_getsize_buftarg(buftarg)	block_size((buftarg)->bt_bdev)
390 #define xfs_readonly_buftarg(buftarg)	bdev_read_only((buftarg)->bt_bdev)
391 
392 int xfs_buf_reverify(struct xfs_buf *bp, const struct xfs_buf_ops *ops);
393 bool xfs_verify_magic(struct xfs_buf *bp, __be32 dmagic);
394 bool xfs_verify_magic16(struct xfs_buf *bp, __be16 dmagic);
395 
396 #endif	/* __XFS_BUF_H__ */
397