1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (c) 2000-2005 Silicon Graphics, Inc. 4 * All Rights Reserved. 5 */ 6 #ifndef __XFS_BUF_H__ 7 #define __XFS_BUF_H__ 8 9 #include <linux/list.h> 10 #include <linux/types.h> 11 #include <linux/spinlock.h> 12 #include <linux/mm.h> 13 #include <linux/fs.h> 14 #include <linux/dax.h> 15 #include <linux/uio.h> 16 #include <linux/list_lru.h> 17 18 /* 19 * Base types 20 */ 21 22 #define XFS_BUF_DADDR_NULL ((xfs_daddr_t) (-1LL)) 23 24 #define XBF_READ (1 << 0) /* buffer intended for reading from device */ 25 #define XBF_WRITE (1 << 1) /* buffer intended for writing to device */ 26 #define XBF_READ_AHEAD (1 << 2) /* asynchronous read-ahead */ 27 #define XBF_NO_IOACCT (1 << 3) /* bypass I/O accounting (non-LRU bufs) */ 28 #define XBF_ASYNC (1 << 4) /* initiator will not wait for completion */ 29 #define XBF_DONE (1 << 5) /* all pages in the buffer uptodate */ 30 #define XBF_STALE (1 << 6) /* buffer has been staled, do not find it */ 31 #define XBF_WRITE_FAIL (1 << 7) /* async writes have failed on this buffer */ 32 33 /* flags used only as arguments to access routines */ 34 #define XBF_TRYLOCK (1 << 16)/* lock requested, but do not wait */ 35 #define XBF_UNMAPPED (1 << 17)/* do not map the buffer */ 36 37 /* flags used only internally */ 38 #define _XBF_PAGES (1 << 20)/* backed by refcounted pages */ 39 #define _XBF_KMEM (1 << 21)/* backed by heap memory */ 40 #define _XBF_DELWRI_Q (1 << 22)/* buffer on a delwri queue */ 41 42 typedef unsigned int xfs_buf_flags_t; 43 44 #define XFS_BUF_FLAGS \ 45 { XBF_READ, "READ" }, \ 46 { XBF_WRITE, "WRITE" }, \ 47 { XBF_READ_AHEAD, "READ_AHEAD" }, \ 48 { XBF_NO_IOACCT, "NO_IOACCT" }, \ 49 { XBF_ASYNC, "ASYNC" }, \ 50 { XBF_DONE, "DONE" }, \ 51 { XBF_STALE, "STALE" }, \ 52 { XBF_WRITE_FAIL, "WRITE_FAIL" }, \ 53 { XBF_TRYLOCK, "TRYLOCK" }, /* should never be set */\ 54 { XBF_UNMAPPED, "UNMAPPED" }, /* ditto */\ 55 { _XBF_PAGES, "PAGES" }, \ 56 { _XBF_KMEM, "KMEM" }, \ 57 { _XBF_DELWRI_Q, "DELWRI_Q" } 58 59 60 /* 61 * Internal state flags. 62 */ 63 #define XFS_BSTATE_DISPOSE (1 << 0) /* buffer being discarded */ 64 #define XFS_BSTATE_IN_FLIGHT (1 << 1) /* I/O in flight */ 65 66 /* 67 * The xfs_buftarg contains 2 notions of "sector size" - 68 * 69 * 1) The metadata sector size, which is the minimum unit and 70 * alignment of IO which will be performed by metadata operations. 71 * 2) The device logical sector size 72 * 73 * The first is specified at mkfs time, and is stored on-disk in the 74 * superblock's sb_sectsize. 75 * 76 * The latter is derived from the underlying device, and controls direct IO 77 * alignment constraints. 78 */ 79 typedef struct xfs_buftarg { 80 dev_t bt_dev; 81 struct block_device *bt_bdev; 82 struct dax_device *bt_daxdev; 83 struct xfs_mount *bt_mount; 84 unsigned int bt_meta_sectorsize; 85 size_t bt_meta_sectormask; 86 size_t bt_logical_sectorsize; 87 size_t bt_logical_sectormask; 88 89 /* LRU control structures */ 90 struct shrinker bt_shrinker; 91 struct list_lru bt_lru; 92 93 struct percpu_counter bt_io_count; 94 } xfs_buftarg_t; 95 96 struct xfs_buf; 97 typedef void (*xfs_buf_iodone_t)(struct xfs_buf *); 98 99 100 #define XB_PAGES 2 101 102 struct xfs_buf_map { 103 xfs_daddr_t bm_bn; /* block number for I/O */ 104 int bm_len; /* size of I/O */ 105 }; 106 107 #define DEFINE_SINGLE_BUF_MAP(map, blkno, numblk) \ 108 struct xfs_buf_map (map) = { .bm_bn = (blkno), .bm_len = (numblk) }; 109 110 struct xfs_buf_ops { 111 char *name; 112 union { 113 __be32 magic[2]; /* v4 and v5 on disk magic values */ 114 __be16 magic16[2]; /* v4 and v5 on disk magic values */ 115 }; 116 void (*verify_read)(struct xfs_buf *); 117 void (*verify_write)(struct xfs_buf *); 118 xfs_failaddr_t (*verify_struct)(struct xfs_buf *bp); 119 }; 120 121 typedef struct xfs_buf { 122 /* 123 * first cacheline holds all the fields needed for an uncontended cache 124 * hit to be fully processed. The semaphore straddles the cacheline 125 * boundary, but the counter and lock sits on the first cacheline, 126 * which is the only bit that is touched if we hit the semaphore 127 * fast-path on locking. 128 */ 129 struct rhash_head b_rhash_head; /* pag buffer hash node */ 130 xfs_daddr_t b_bn; /* block number of buffer */ 131 int b_length; /* size of buffer in BBs */ 132 atomic_t b_hold; /* reference count */ 133 atomic_t b_lru_ref; /* lru reclaim ref count */ 134 xfs_buf_flags_t b_flags; /* status flags */ 135 struct semaphore b_sema; /* semaphore for lockables */ 136 137 /* 138 * concurrent access to b_lru and b_lru_flags are protected by 139 * bt_lru_lock and not by b_sema 140 */ 141 struct list_head b_lru; /* lru list */ 142 spinlock_t b_lock; /* internal state lock */ 143 unsigned int b_state; /* internal state flags */ 144 int b_io_error; /* internal IO error state */ 145 wait_queue_head_t b_waiters; /* unpin waiters */ 146 struct list_head b_list; 147 struct xfs_perag *b_pag; /* contains rbtree root */ 148 struct xfs_mount *b_mount; 149 xfs_buftarg_t *b_target; /* buffer target (device) */ 150 void *b_addr; /* virtual address of buffer */ 151 struct work_struct b_ioend_work; 152 xfs_buf_iodone_t b_iodone; /* I/O completion function */ 153 struct completion b_iowait; /* queue for I/O waiters */ 154 struct xfs_buf_log_item *b_log_item; 155 struct list_head b_li_list; /* Log items list head */ 156 struct xfs_trans *b_transp; 157 struct page **b_pages; /* array of page pointers */ 158 struct page *b_page_array[XB_PAGES]; /* inline pages */ 159 struct xfs_buf_map *b_maps; /* compound buffer map */ 160 struct xfs_buf_map __b_map; /* inline compound buffer map */ 161 int b_map_count; 162 atomic_t b_pin_count; /* pin count */ 163 atomic_t b_io_remaining; /* #outstanding I/O requests */ 164 unsigned int b_page_count; /* size of page array */ 165 unsigned int b_offset; /* page offset in first page */ 166 int b_error; /* error code on I/O */ 167 168 /* 169 * async write failure retry count. Initialised to zero on the first 170 * failure, then when it exceeds the maximum configured without a 171 * success the write is considered to be failed permanently and the 172 * iodone handler will take appropriate action. 173 * 174 * For retry timeouts, we record the jiffie of the first failure. This 175 * means that we can change the retry timeout for buffers already under 176 * I/O and thus avoid getting stuck in a retry loop with a long timeout. 177 * 178 * last_error is used to ensure that we are getting repeated errors, not 179 * different errors. e.g. a block device might change ENOSPC to EIO when 180 * a failure timeout occurs, so we want to re-initialise the error 181 * retry behaviour appropriately when that happens. 182 */ 183 int b_retries; 184 unsigned long b_first_retry_time; /* in jiffies */ 185 int b_last_error; 186 187 const struct xfs_buf_ops *b_ops; 188 } xfs_buf_t; 189 190 /* Finding and Reading Buffers */ 191 struct xfs_buf *xfs_buf_incore(struct xfs_buftarg *target, 192 xfs_daddr_t blkno, size_t numblks, 193 xfs_buf_flags_t flags); 194 195 int xfs_buf_get_map(struct xfs_buftarg *target, struct xfs_buf_map *map, 196 int nmaps, xfs_buf_flags_t flags, struct xfs_buf **bpp); 197 int xfs_buf_read_map(struct xfs_buftarg *target, struct xfs_buf_map *map, 198 int nmaps, xfs_buf_flags_t flags, struct xfs_buf **bpp, 199 const struct xfs_buf_ops *ops, xfs_failaddr_t fa); 200 void xfs_buf_readahead_map(struct xfs_buftarg *target, 201 struct xfs_buf_map *map, int nmaps, 202 const struct xfs_buf_ops *ops); 203 204 static inline int 205 xfs_buf_get( 206 struct xfs_buftarg *target, 207 xfs_daddr_t blkno, 208 size_t numblks, 209 struct xfs_buf **bpp) 210 { 211 DEFINE_SINGLE_BUF_MAP(map, blkno, numblks); 212 213 return xfs_buf_get_map(target, &map, 1, 0, bpp); 214 } 215 216 static inline int 217 xfs_buf_read( 218 struct xfs_buftarg *target, 219 xfs_daddr_t blkno, 220 size_t numblks, 221 xfs_buf_flags_t flags, 222 struct xfs_buf **bpp, 223 const struct xfs_buf_ops *ops) 224 { 225 DEFINE_SINGLE_BUF_MAP(map, blkno, numblks); 226 227 return xfs_buf_read_map(target, &map, 1, flags, bpp, ops, 228 __builtin_return_address(0)); 229 } 230 231 static inline void 232 xfs_buf_readahead( 233 struct xfs_buftarg *target, 234 xfs_daddr_t blkno, 235 size_t numblks, 236 const struct xfs_buf_ops *ops) 237 { 238 DEFINE_SINGLE_BUF_MAP(map, blkno, numblks); 239 return xfs_buf_readahead_map(target, &map, 1, ops); 240 } 241 242 int xfs_buf_get_uncached(struct xfs_buftarg *target, size_t numblks, int flags, 243 struct xfs_buf **bpp); 244 int xfs_buf_read_uncached(struct xfs_buftarg *target, xfs_daddr_t daddr, 245 size_t numblks, int flags, struct xfs_buf **bpp, 246 const struct xfs_buf_ops *ops); 247 void xfs_buf_hold(struct xfs_buf *bp); 248 249 /* Releasing Buffers */ 250 extern void xfs_buf_rele(xfs_buf_t *); 251 252 /* Locking and Unlocking Buffers */ 253 extern int xfs_buf_trylock(xfs_buf_t *); 254 extern void xfs_buf_lock(xfs_buf_t *); 255 extern void xfs_buf_unlock(xfs_buf_t *); 256 #define xfs_buf_islocked(bp) \ 257 ((bp)->b_sema.count <= 0) 258 259 /* Buffer Read and Write Routines */ 260 extern int xfs_bwrite(struct xfs_buf *bp); 261 extern void xfs_buf_ioend(struct xfs_buf *bp); 262 extern void __xfs_buf_ioerror(struct xfs_buf *bp, int error, 263 xfs_failaddr_t failaddr); 264 #define xfs_buf_ioerror(bp, err) __xfs_buf_ioerror((bp), (err), __this_address) 265 extern void xfs_buf_ioerror_alert(struct xfs_buf *bp, xfs_failaddr_t fa); 266 267 extern int __xfs_buf_submit(struct xfs_buf *bp, bool); 268 static inline int xfs_buf_submit(struct xfs_buf *bp) 269 { 270 bool wait = bp->b_flags & XBF_ASYNC ? false : true; 271 return __xfs_buf_submit(bp, wait); 272 } 273 274 void xfs_buf_zero(struct xfs_buf *bp, size_t boff, size_t bsize); 275 void __xfs_buf_mark_corrupt(struct xfs_buf *bp, xfs_failaddr_t fa); 276 #define xfs_buf_mark_corrupt(bp) __xfs_buf_mark_corrupt((bp), __this_address) 277 278 /* Buffer Utility Routines */ 279 extern void *xfs_buf_offset(struct xfs_buf *, size_t); 280 extern void xfs_buf_stale(struct xfs_buf *bp); 281 282 /* Delayed Write Buffer Routines */ 283 extern void xfs_buf_delwri_cancel(struct list_head *); 284 extern bool xfs_buf_delwri_queue(struct xfs_buf *, struct list_head *); 285 extern int xfs_buf_delwri_submit(struct list_head *); 286 extern int xfs_buf_delwri_submit_nowait(struct list_head *); 287 extern int xfs_buf_delwri_pushbuf(struct xfs_buf *, struct list_head *); 288 289 /* Buffer Daemon Setup Routines */ 290 extern int xfs_buf_init(void); 291 extern void xfs_buf_terminate(void); 292 293 /* 294 * These macros use the IO block map rather than b_bn. b_bn is now really 295 * just for the buffer cache index for cached buffers. As IO does not use b_bn 296 * anymore, uncached buffers do not use b_bn at all and hence must modify the IO 297 * map directly. Uncached buffers are not allowed to be discontiguous, so this 298 * is safe to do. 299 * 300 * In future, uncached buffers will pass the block number directly to the io 301 * request function and hence these macros will go away at that point. 302 */ 303 #define XFS_BUF_ADDR(bp) ((bp)->b_maps[0].bm_bn) 304 #define XFS_BUF_SET_ADDR(bp, bno) ((bp)->b_maps[0].bm_bn = (xfs_daddr_t)(bno)) 305 306 void xfs_buf_set_ref(struct xfs_buf *bp, int lru_ref); 307 308 /* 309 * If the buffer is already on the LRU, do nothing. Otherwise set the buffer 310 * up with a reference count of 0 so it will be tossed from the cache when 311 * released. 312 */ 313 static inline void xfs_buf_oneshot(struct xfs_buf *bp) 314 { 315 if (!list_empty(&bp->b_lru) || atomic_read(&bp->b_lru_ref) > 1) 316 return; 317 atomic_set(&bp->b_lru_ref, 0); 318 } 319 320 static inline int xfs_buf_ispinned(struct xfs_buf *bp) 321 { 322 return atomic_read(&bp->b_pin_count); 323 } 324 325 static inline void xfs_buf_relse(xfs_buf_t *bp) 326 { 327 xfs_buf_unlock(bp); 328 xfs_buf_rele(bp); 329 } 330 331 static inline int 332 xfs_buf_verify_cksum(struct xfs_buf *bp, unsigned long cksum_offset) 333 { 334 return xfs_verify_cksum(bp->b_addr, BBTOB(bp->b_length), 335 cksum_offset); 336 } 337 338 static inline void 339 xfs_buf_update_cksum(struct xfs_buf *bp, unsigned long cksum_offset) 340 { 341 xfs_update_cksum(bp->b_addr, BBTOB(bp->b_length), 342 cksum_offset); 343 } 344 345 /* 346 * Handling of buftargs. 347 */ 348 extern xfs_buftarg_t *xfs_alloc_buftarg(struct xfs_mount *, 349 struct block_device *, struct dax_device *); 350 extern void xfs_free_buftarg(struct xfs_buftarg *); 351 extern void xfs_wait_buftarg(xfs_buftarg_t *); 352 extern int xfs_setsize_buftarg(xfs_buftarg_t *, unsigned int); 353 354 #define xfs_getsize_buftarg(buftarg) block_size((buftarg)->bt_bdev) 355 #define xfs_readonly_buftarg(buftarg) bdev_read_only((buftarg)->bt_bdev) 356 357 static inline int 358 xfs_buftarg_dma_alignment(struct xfs_buftarg *bt) 359 { 360 return queue_dma_alignment(bt->bt_bdev->bd_disk->queue); 361 } 362 363 int xfs_buf_reverify(struct xfs_buf *bp, const struct xfs_buf_ops *ops); 364 bool xfs_verify_magic(struct xfs_buf *bp, __be32 dmagic); 365 bool xfs_verify_magic16(struct xfs_buf *bp, __be16 dmagic); 366 367 #endif /* __XFS_BUF_H__ */ 368