1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (c) 2000-2005 Silicon Graphics, Inc. 4 * All Rights Reserved. 5 */ 6 #ifndef __XFS_BUF_H__ 7 #define __XFS_BUF_H__ 8 9 #include <linux/list.h> 10 #include <linux/types.h> 11 #include <linux/spinlock.h> 12 #include <linux/mm.h> 13 #include <linux/fs.h> 14 #include <linux/dax.h> 15 #include <linux/uio.h> 16 #include <linux/list_lru.h> 17 18 /* 19 * Base types 20 */ 21 22 #define XFS_BUF_DADDR_NULL ((xfs_daddr_t) (-1LL)) 23 24 #define XBF_READ (1 << 0) /* buffer intended for reading from device */ 25 #define XBF_WRITE (1 << 1) /* buffer intended for writing to device */ 26 #define XBF_READ_AHEAD (1 << 2) /* asynchronous read-ahead */ 27 #define XBF_NO_IOACCT (1 << 3) /* bypass I/O accounting (non-LRU bufs) */ 28 #define XBF_ASYNC (1 << 4) /* initiator will not wait for completion */ 29 #define XBF_DONE (1 << 5) /* all pages in the buffer uptodate */ 30 #define XBF_STALE (1 << 6) /* buffer has been staled, do not find it */ 31 #define XBF_WRITE_FAIL (1 << 7) /* async writes have failed on this buffer */ 32 33 /* flags used only as arguments to access routines */ 34 #define XBF_TRYLOCK (1 << 16)/* lock requested, but do not wait */ 35 #define XBF_UNMAPPED (1 << 17)/* do not map the buffer */ 36 37 /* flags used only internally */ 38 #define _XBF_PAGES (1 << 20)/* backed by refcounted pages */ 39 #define _XBF_KMEM (1 << 21)/* backed by heap memory */ 40 #define _XBF_DELWRI_Q (1 << 22)/* buffer on a delwri queue */ 41 42 typedef unsigned int xfs_buf_flags_t; 43 44 #define XFS_BUF_FLAGS \ 45 { XBF_READ, "READ" }, \ 46 { XBF_WRITE, "WRITE" }, \ 47 { XBF_READ_AHEAD, "READ_AHEAD" }, \ 48 { XBF_NO_IOACCT, "NO_IOACCT" }, \ 49 { XBF_ASYNC, "ASYNC" }, \ 50 { XBF_DONE, "DONE" }, \ 51 { XBF_STALE, "STALE" }, \ 52 { XBF_WRITE_FAIL, "WRITE_FAIL" }, \ 53 { XBF_TRYLOCK, "TRYLOCK" }, /* should never be set */\ 54 { XBF_UNMAPPED, "UNMAPPED" }, /* ditto */\ 55 { _XBF_PAGES, "PAGES" }, \ 56 { _XBF_KMEM, "KMEM" }, \ 57 { _XBF_DELWRI_Q, "DELWRI_Q" } 58 59 60 /* 61 * Internal state flags. 62 */ 63 #define XFS_BSTATE_DISPOSE (1 << 0) /* buffer being discarded */ 64 #define XFS_BSTATE_IN_FLIGHT (1 << 1) /* I/O in flight */ 65 66 /* 67 * The xfs_buftarg contains 2 notions of "sector size" - 68 * 69 * 1) The metadata sector size, which is the minimum unit and 70 * alignment of IO which will be performed by metadata operations. 71 * 2) The device logical sector size 72 * 73 * The first is specified at mkfs time, and is stored on-disk in the 74 * superblock's sb_sectsize. 75 * 76 * The latter is derived from the underlying device, and controls direct IO 77 * alignment constraints. 78 */ 79 typedef struct xfs_buftarg { 80 dev_t bt_dev; 81 struct block_device *bt_bdev; 82 struct dax_device *bt_daxdev; 83 struct xfs_mount *bt_mount; 84 unsigned int bt_meta_sectorsize; 85 size_t bt_meta_sectormask; 86 size_t bt_logical_sectorsize; 87 size_t bt_logical_sectormask; 88 89 /* LRU control structures */ 90 struct shrinker bt_shrinker; 91 struct list_lru bt_lru; 92 93 struct percpu_counter bt_io_count; 94 struct ratelimit_state bt_ioerror_rl; 95 } xfs_buftarg_t; 96 97 struct xfs_buf; 98 typedef void (*xfs_buf_iodone_t)(struct xfs_buf *); 99 100 101 #define XB_PAGES 2 102 103 struct xfs_buf_map { 104 xfs_daddr_t bm_bn; /* block number for I/O */ 105 int bm_len; /* size of I/O */ 106 }; 107 108 #define DEFINE_SINGLE_BUF_MAP(map, blkno, numblk) \ 109 struct xfs_buf_map (map) = { .bm_bn = (blkno), .bm_len = (numblk) }; 110 111 struct xfs_buf_ops { 112 char *name; 113 union { 114 __be32 magic[2]; /* v4 and v5 on disk magic values */ 115 __be16 magic16[2]; /* v4 and v5 on disk magic values */ 116 }; 117 void (*verify_read)(struct xfs_buf *); 118 void (*verify_write)(struct xfs_buf *); 119 xfs_failaddr_t (*verify_struct)(struct xfs_buf *bp); 120 }; 121 122 typedef struct xfs_buf { 123 /* 124 * first cacheline holds all the fields needed for an uncontended cache 125 * hit to be fully processed. The semaphore straddles the cacheline 126 * boundary, but the counter and lock sits on the first cacheline, 127 * which is the only bit that is touched if we hit the semaphore 128 * fast-path on locking. 129 */ 130 struct rhash_head b_rhash_head; /* pag buffer hash node */ 131 xfs_daddr_t b_bn; /* block number of buffer */ 132 int b_length; /* size of buffer in BBs */ 133 atomic_t b_hold; /* reference count */ 134 atomic_t b_lru_ref; /* lru reclaim ref count */ 135 xfs_buf_flags_t b_flags; /* status flags */ 136 struct semaphore b_sema; /* semaphore for lockables */ 137 138 /* 139 * concurrent access to b_lru and b_lru_flags are protected by 140 * bt_lru_lock and not by b_sema 141 */ 142 struct list_head b_lru; /* lru list */ 143 spinlock_t b_lock; /* internal state lock */ 144 unsigned int b_state; /* internal state flags */ 145 int b_io_error; /* internal IO error state */ 146 wait_queue_head_t b_waiters; /* unpin waiters */ 147 struct list_head b_list; 148 struct xfs_perag *b_pag; /* contains rbtree root */ 149 struct xfs_mount *b_mount; 150 xfs_buftarg_t *b_target; /* buffer target (device) */ 151 void *b_addr; /* virtual address of buffer */ 152 struct work_struct b_ioend_work; 153 xfs_buf_iodone_t b_iodone; /* I/O completion function */ 154 struct completion b_iowait; /* queue for I/O waiters */ 155 struct xfs_buf_log_item *b_log_item; 156 struct list_head b_li_list; /* Log items list head */ 157 struct xfs_trans *b_transp; 158 struct page **b_pages; /* array of page pointers */ 159 struct page *b_page_array[XB_PAGES]; /* inline pages */ 160 struct xfs_buf_map *b_maps; /* compound buffer map */ 161 struct xfs_buf_map __b_map; /* inline compound buffer map */ 162 int b_map_count; 163 atomic_t b_pin_count; /* pin count */ 164 atomic_t b_io_remaining; /* #outstanding I/O requests */ 165 unsigned int b_page_count; /* size of page array */ 166 unsigned int b_offset; /* page offset in first page */ 167 int b_error; /* error code on I/O */ 168 169 /* 170 * async write failure retry count. Initialised to zero on the first 171 * failure, then when it exceeds the maximum configured without a 172 * success the write is considered to be failed permanently and the 173 * iodone handler will take appropriate action. 174 * 175 * For retry timeouts, we record the jiffie of the first failure. This 176 * means that we can change the retry timeout for buffers already under 177 * I/O and thus avoid getting stuck in a retry loop with a long timeout. 178 * 179 * last_error is used to ensure that we are getting repeated errors, not 180 * different errors. e.g. a block device might change ENOSPC to EIO when 181 * a failure timeout occurs, so we want to re-initialise the error 182 * retry behaviour appropriately when that happens. 183 */ 184 int b_retries; 185 unsigned long b_first_retry_time; /* in jiffies */ 186 int b_last_error; 187 188 const struct xfs_buf_ops *b_ops; 189 } xfs_buf_t; 190 191 /* Finding and Reading Buffers */ 192 struct xfs_buf *xfs_buf_incore(struct xfs_buftarg *target, 193 xfs_daddr_t blkno, size_t numblks, 194 xfs_buf_flags_t flags); 195 196 int xfs_buf_get_map(struct xfs_buftarg *target, struct xfs_buf_map *map, 197 int nmaps, xfs_buf_flags_t flags, struct xfs_buf **bpp); 198 int xfs_buf_read_map(struct xfs_buftarg *target, struct xfs_buf_map *map, 199 int nmaps, xfs_buf_flags_t flags, struct xfs_buf **bpp, 200 const struct xfs_buf_ops *ops, xfs_failaddr_t fa); 201 void xfs_buf_readahead_map(struct xfs_buftarg *target, 202 struct xfs_buf_map *map, int nmaps, 203 const struct xfs_buf_ops *ops); 204 205 static inline int 206 xfs_buf_get( 207 struct xfs_buftarg *target, 208 xfs_daddr_t blkno, 209 size_t numblks, 210 struct xfs_buf **bpp) 211 { 212 DEFINE_SINGLE_BUF_MAP(map, blkno, numblks); 213 214 return xfs_buf_get_map(target, &map, 1, 0, bpp); 215 } 216 217 static inline int 218 xfs_buf_read( 219 struct xfs_buftarg *target, 220 xfs_daddr_t blkno, 221 size_t numblks, 222 xfs_buf_flags_t flags, 223 struct xfs_buf **bpp, 224 const struct xfs_buf_ops *ops) 225 { 226 DEFINE_SINGLE_BUF_MAP(map, blkno, numblks); 227 228 return xfs_buf_read_map(target, &map, 1, flags, bpp, ops, 229 __builtin_return_address(0)); 230 } 231 232 static inline void 233 xfs_buf_readahead( 234 struct xfs_buftarg *target, 235 xfs_daddr_t blkno, 236 size_t numblks, 237 const struct xfs_buf_ops *ops) 238 { 239 DEFINE_SINGLE_BUF_MAP(map, blkno, numblks); 240 return xfs_buf_readahead_map(target, &map, 1, ops); 241 } 242 243 int xfs_buf_get_uncached(struct xfs_buftarg *target, size_t numblks, int flags, 244 struct xfs_buf **bpp); 245 int xfs_buf_read_uncached(struct xfs_buftarg *target, xfs_daddr_t daddr, 246 size_t numblks, int flags, struct xfs_buf **bpp, 247 const struct xfs_buf_ops *ops); 248 void xfs_buf_hold(struct xfs_buf *bp); 249 250 /* Releasing Buffers */ 251 extern void xfs_buf_rele(xfs_buf_t *); 252 253 /* Locking and Unlocking Buffers */ 254 extern int xfs_buf_trylock(xfs_buf_t *); 255 extern void xfs_buf_lock(xfs_buf_t *); 256 extern void xfs_buf_unlock(xfs_buf_t *); 257 #define xfs_buf_islocked(bp) \ 258 ((bp)->b_sema.count <= 0) 259 260 /* Buffer Read and Write Routines */ 261 extern int xfs_bwrite(struct xfs_buf *bp); 262 extern void xfs_buf_ioend(struct xfs_buf *bp); 263 extern void __xfs_buf_ioerror(struct xfs_buf *bp, int error, 264 xfs_failaddr_t failaddr); 265 #define xfs_buf_ioerror(bp, err) __xfs_buf_ioerror((bp), (err), __this_address) 266 extern void xfs_buf_ioerror_alert(struct xfs_buf *bp, xfs_failaddr_t fa); 267 void xfs_buf_ioend_fail(struct xfs_buf *); 268 269 extern int __xfs_buf_submit(struct xfs_buf *bp, bool); 270 static inline int xfs_buf_submit(struct xfs_buf *bp) 271 { 272 bool wait = bp->b_flags & XBF_ASYNC ? false : true; 273 return __xfs_buf_submit(bp, wait); 274 } 275 276 void xfs_buf_zero(struct xfs_buf *bp, size_t boff, size_t bsize); 277 void __xfs_buf_mark_corrupt(struct xfs_buf *bp, xfs_failaddr_t fa); 278 #define xfs_buf_mark_corrupt(bp) __xfs_buf_mark_corrupt((bp), __this_address) 279 280 /* Buffer Utility Routines */ 281 extern void *xfs_buf_offset(struct xfs_buf *, size_t); 282 extern void xfs_buf_stale(struct xfs_buf *bp); 283 284 /* Delayed Write Buffer Routines */ 285 extern void xfs_buf_delwri_cancel(struct list_head *); 286 extern bool xfs_buf_delwri_queue(struct xfs_buf *, struct list_head *); 287 extern int xfs_buf_delwri_submit(struct list_head *); 288 extern int xfs_buf_delwri_submit_nowait(struct list_head *); 289 extern int xfs_buf_delwri_pushbuf(struct xfs_buf *, struct list_head *); 290 291 /* Buffer Daemon Setup Routines */ 292 extern int xfs_buf_init(void); 293 extern void xfs_buf_terminate(void); 294 295 /* 296 * These macros use the IO block map rather than b_bn. b_bn is now really 297 * just for the buffer cache index for cached buffers. As IO does not use b_bn 298 * anymore, uncached buffers do not use b_bn at all and hence must modify the IO 299 * map directly. Uncached buffers are not allowed to be discontiguous, so this 300 * is safe to do. 301 * 302 * In future, uncached buffers will pass the block number directly to the io 303 * request function and hence these macros will go away at that point. 304 */ 305 #define XFS_BUF_ADDR(bp) ((bp)->b_maps[0].bm_bn) 306 #define XFS_BUF_SET_ADDR(bp, bno) ((bp)->b_maps[0].bm_bn = (xfs_daddr_t)(bno)) 307 308 void xfs_buf_set_ref(struct xfs_buf *bp, int lru_ref); 309 310 /* 311 * If the buffer is already on the LRU, do nothing. Otherwise set the buffer 312 * up with a reference count of 0 so it will be tossed from the cache when 313 * released. 314 */ 315 static inline void xfs_buf_oneshot(struct xfs_buf *bp) 316 { 317 if (!list_empty(&bp->b_lru) || atomic_read(&bp->b_lru_ref) > 1) 318 return; 319 atomic_set(&bp->b_lru_ref, 0); 320 } 321 322 static inline int xfs_buf_ispinned(struct xfs_buf *bp) 323 { 324 return atomic_read(&bp->b_pin_count); 325 } 326 327 static inline void xfs_buf_relse(xfs_buf_t *bp) 328 { 329 xfs_buf_unlock(bp); 330 xfs_buf_rele(bp); 331 } 332 333 static inline int 334 xfs_buf_verify_cksum(struct xfs_buf *bp, unsigned long cksum_offset) 335 { 336 return xfs_verify_cksum(bp->b_addr, BBTOB(bp->b_length), 337 cksum_offset); 338 } 339 340 static inline void 341 xfs_buf_update_cksum(struct xfs_buf *bp, unsigned long cksum_offset) 342 { 343 xfs_update_cksum(bp->b_addr, BBTOB(bp->b_length), 344 cksum_offset); 345 } 346 347 /* 348 * Handling of buftargs. 349 */ 350 extern xfs_buftarg_t *xfs_alloc_buftarg(struct xfs_mount *, 351 struct block_device *, struct dax_device *); 352 extern void xfs_free_buftarg(struct xfs_buftarg *); 353 extern void xfs_wait_buftarg(xfs_buftarg_t *); 354 extern int xfs_setsize_buftarg(xfs_buftarg_t *, unsigned int); 355 356 #define xfs_getsize_buftarg(buftarg) block_size((buftarg)->bt_bdev) 357 #define xfs_readonly_buftarg(buftarg) bdev_read_only((buftarg)->bt_bdev) 358 359 static inline int 360 xfs_buftarg_dma_alignment(struct xfs_buftarg *bt) 361 { 362 return queue_dma_alignment(bt->bt_bdev->bd_disk->queue); 363 } 364 365 int xfs_buf_reverify(struct xfs_buf *bp, const struct xfs_buf_ops *ops); 366 bool xfs_verify_magic(struct xfs_buf *bp, __be32 dmagic); 367 bool xfs_verify_magic16(struct xfs_buf *bp, __be16 dmagic); 368 369 #endif /* __XFS_BUF_H__ */ 370