1 /* 2 * Copyright (c) 2000-2005 Silicon Graphics, Inc. 3 * All Rights Reserved. 4 * 5 * This program is free software; you can redistribute it and/or 6 * modify it under the terms of the GNU General Public License as 7 * published by the Free Software Foundation. 8 * 9 * This program is distributed in the hope that it would be useful, 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * GNU General Public License for more details. 13 * 14 * You should have received a copy of the GNU General Public License 15 * along with this program; if not, write the Free Software Foundation, 16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA 17 */ 18 #ifndef __XFS_BUF_H__ 19 #define __XFS_BUF_H__ 20 21 #include <linux/list.h> 22 #include <linux/types.h> 23 #include <linux/spinlock.h> 24 #include <linux/mm.h> 25 #include <linux/fs.h> 26 #include <linux/buffer_head.h> 27 #include <linux/uio.h> 28 #include <linux/list_lru.h> 29 30 /* 31 * Base types 32 */ 33 34 #define XFS_BUF_DADDR_NULL ((xfs_daddr_t) (-1LL)) 35 36 typedef enum { 37 XBRW_READ = 1, /* transfer into target memory */ 38 XBRW_WRITE = 2, /* transfer from target memory */ 39 XBRW_ZERO = 3, /* Zero target memory */ 40 } xfs_buf_rw_t; 41 42 #define XBF_READ (1 << 0) /* buffer intended for reading from device */ 43 #define XBF_WRITE (1 << 1) /* buffer intended for writing to device */ 44 #define XBF_READ_AHEAD (1 << 2) /* asynchronous read-ahead */ 45 #define XBF_ASYNC (1 << 4) /* initiator will not wait for completion */ 46 #define XBF_DONE (1 << 5) /* all pages in the buffer uptodate */ 47 #define XBF_STALE (1 << 6) /* buffer has been staled, do not find it */ 48 49 /* I/O hints for the BIO layer */ 50 #define XBF_SYNCIO (1 << 10)/* treat this buffer as synchronous I/O */ 51 #define XBF_FUA (1 << 11)/* force cache write through mode */ 52 #define XBF_FLUSH (1 << 12)/* flush the disk cache before a write */ 53 54 /* flags used only as arguments to access routines */ 55 #define XBF_TRYLOCK (1 << 16)/* lock requested, but do not wait */ 56 #define XBF_UNMAPPED (1 << 17)/* do not map the buffer */ 57 58 /* flags used only internally */ 59 #define _XBF_PAGES (1 << 20)/* backed by refcounted pages */ 60 #define _XBF_KMEM (1 << 21)/* backed by heap memory */ 61 #define _XBF_DELWRI_Q (1 << 22)/* buffer on a delwri queue */ 62 #define _XBF_COMPOUND (1 << 23)/* compound buffer */ 63 64 typedef unsigned int xfs_buf_flags_t; 65 66 #define XFS_BUF_FLAGS \ 67 { XBF_READ, "READ" }, \ 68 { XBF_WRITE, "WRITE" }, \ 69 { XBF_READ_AHEAD, "READ_AHEAD" }, \ 70 { XBF_ASYNC, "ASYNC" }, \ 71 { XBF_DONE, "DONE" }, \ 72 { XBF_STALE, "STALE" }, \ 73 { XBF_SYNCIO, "SYNCIO" }, \ 74 { XBF_FUA, "FUA" }, \ 75 { XBF_FLUSH, "FLUSH" }, \ 76 { XBF_TRYLOCK, "TRYLOCK" }, /* should never be set */\ 77 { XBF_UNMAPPED, "UNMAPPED" }, /* ditto */\ 78 { _XBF_PAGES, "PAGES" }, \ 79 { _XBF_KMEM, "KMEM" }, \ 80 { _XBF_DELWRI_Q, "DELWRI_Q" }, \ 81 { _XBF_COMPOUND, "COMPOUND" } 82 83 /* 84 * Internal state flags. 85 */ 86 #define XFS_BSTATE_DISPOSE (1 << 0) /* buffer being discarded */ 87 88 typedef struct xfs_buftarg { 89 dev_t bt_dev; 90 struct block_device *bt_bdev; 91 struct backing_dev_info *bt_bdi; 92 struct xfs_mount *bt_mount; 93 unsigned int bt_bsize; 94 unsigned int bt_sshift; 95 size_t bt_smask; 96 97 /* LRU control structures */ 98 struct shrinker bt_shrinker; 99 struct list_lru bt_lru; 100 } xfs_buftarg_t; 101 102 struct xfs_buf; 103 typedef void (*xfs_buf_iodone_t)(struct xfs_buf *); 104 105 106 #define XB_PAGES 2 107 108 struct xfs_buf_map { 109 xfs_daddr_t bm_bn; /* block number for I/O */ 110 int bm_len; /* size of I/O */ 111 }; 112 113 #define DEFINE_SINGLE_BUF_MAP(map, blkno, numblk) \ 114 struct xfs_buf_map (map) = { .bm_bn = (blkno), .bm_len = (numblk) }; 115 116 struct xfs_buf_ops { 117 void (*verify_read)(struct xfs_buf *); 118 void (*verify_write)(struct xfs_buf *); 119 }; 120 121 typedef struct xfs_buf { 122 /* 123 * first cacheline holds all the fields needed for an uncontended cache 124 * hit to be fully processed. The semaphore straddles the cacheline 125 * boundary, but the counter and lock sits on the first cacheline, 126 * which is the only bit that is touched if we hit the semaphore 127 * fast-path on locking. 128 */ 129 struct rb_node b_rbnode; /* rbtree node */ 130 xfs_daddr_t b_bn; /* block number of buffer */ 131 int b_length; /* size of buffer in BBs */ 132 atomic_t b_hold; /* reference count */ 133 atomic_t b_lru_ref; /* lru reclaim ref count */ 134 xfs_buf_flags_t b_flags; /* status flags */ 135 struct semaphore b_sema; /* semaphore for lockables */ 136 137 /* 138 * concurrent access to b_lru and b_lru_flags are protected by 139 * bt_lru_lock and not by b_sema 140 */ 141 struct list_head b_lru; /* lru list */ 142 spinlock_t b_lock; /* internal state lock */ 143 unsigned int b_state; /* internal state flags */ 144 wait_queue_head_t b_waiters; /* unpin waiters */ 145 struct list_head b_list; 146 struct xfs_perag *b_pag; /* contains rbtree root */ 147 xfs_buftarg_t *b_target; /* buffer target (device) */ 148 void *b_addr; /* virtual address of buffer */ 149 struct work_struct b_iodone_work; 150 xfs_buf_iodone_t b_iodone; /* I/O completion function */ 151 struct completion b_iowait; /* queue for I/O waiters */ 152 void *b_fspriv; 153 struct xfs_trans *b_transp; 154 struct page **b_pages; /* array of page pointers */ 155 struct page *b_page_array[XB_PAGES]; /* inline pages */ 156 struct xfs_buf_map *b_maps; /* compound buffer map */ 157 struct xfs_buf_map __b_map; /* inline compound buffer map */ 158 int b_map_count; 159 int b_io_length; /* IO size in BBs */ 160 atomic_t b_pin_count; /* pin count */ 161 atomic_t b_io_remaining; /* #outstanding I/O requests */ 162 unsigned int b_page_count; /* size of page array */ 163 unsigned int b_offset; /* page offset in first page */ 164 unsigned short b_error; /* error code on I/O */ 165 const struct xfs_buf_ops *b_ops; 166 167 #ifdef XFS_BUF_LOCK_TRACKING 168 int b_last_holder; 169 #endif 170 } xfs_buf_t; 171 172 /* Finding and Reading Buffers */ 173 struct xfs_buf *_xfs_buf_find(struct xfs_buftarg *target, 174 struct xfs_buf_map *map, int nmaps, 175 xfs_buf_flags_t flags, struct xfs_buf *new_bp); 176 177 static inline struct xfs_buf * 178 xfs_incore( 179 struct xfs_buftarg *target, 180 xfs_daddr_t blkno, 181 size_t numblks, 182 xfs_buf_flags_t flags) 183 { 184 DEFINE_SINGLE_BUF_MAP(map, blkno, numblks); 185 return _xfs_buf_find(target, &map, 1, flags, NULL); 186 } 187 188 struct xfs_buf *_xfs_buf_alloc(struct xfs_buftarg *target, 189 struct xfs_buf_map *map, int nmaps, 190 xfs_buf_flags_t flags); 191 192 static inline struct xfs_buf * 193 xfs_buf_alloc( 194 struct xfs_buftarg *target, 195 xfs_daddr_t blkno, 196 size_t numblks, 197 xfs_buf_flags_t flags) 198 { 199 DEFINE_SINGLE_BUF_MAP(map, blkno, numblks); 200 return _xfs_buf_alloc(target, &map, 1, flags); 201 } 202 203 struct xfs_buf *xfs_buf_get_map(struct xfs_buftarg *target, 204 struct xfs_buf_map *map, int nmaps, 205 xfs_buf_flags_t flags); 206 struct xfs_buf *xfs_buf_read_map(struct xfs_buftarg *target, 207 struct xfs_buf_map *map, int nmaps, 208 xfs_buf_flags_t flags, 209 const struct xfs_buf_ops *ops); 210 void xfs_buf_readahead_map(struct xfs_buftarg *target, 211 struct xfs_buf_map *map, int nmaps, 212 const struct xfs_buf_ops *ops); 213 214 static inline struct xfs_buf * 215 xfs_buf_get( 216 struct xfs_buftarg *target, 217 xfs_daddr_t blkno, 218 size_t numblks, 219 xfs_buf_flags_t flags) 220 { 221 DEFINE_SINGLE_BUF_MAP(map, blkno, numblks); 222 return xfs_buf_get_map(target, &map, 1, flags); 223 } 224 225 static inline struct xfs_buf * 226 xfs_buf_read( 227 struct xfs_buftarg *target, 228 xfs_daddr_t blkno, 229 size_t numblks, 230 xfs_buf_flags_t flags, 231 const struct xfs_buf_ops *ops) 232 { 233 DEFINE_SINGLE_BUF_MAP(map, blkno, numblks); 234 return xfs_buf_read_map(target, &map, 1, flags, ops); 235 } 236 237 static inline void 238 xfs_buf_readahead( 239 struct xfs_buftarg *target, 240 xfs_daddr_t blkno, 241 size_t numblks, 242 const struct xfs_buf_ops *ops) 243 { 244 DEFINE_SINGLE_BUF_MAP(map, blkno, numblks); 245 return xfs_buf_readahead_map(target, &map, 1, ops); 246 } 247 248 struct xfs_buf *xfs_buf_get_empty(struct xfs_buftarg *target, size_t numblks); 249 void xfs_buf_set_empty(struct xfs_buf *bp, size_t numblks); 250 int xfs_buf_associate_memory(struct xfs_buf *bp, void *mem, size_t length); 251 252 struct xfs_buf *xfs_buf_get_uncached(struct xfs_buftarg *target, size_t numblks, 253 int flags); 254 struct xfs_buf *xfs_buf_read_uncached(struct xfs_buftarg *target, 255 xfs_daddr_t daddr, size_t numblks, int flags, 256 const struct xfs_buf_ops *ops); 257 void xfs_buf_hold(struct xfs_buf *bp); 258 259 /* Releasing Buffers */ 260 extern void xfs_buf_free(xfs_buf_t *); 261 extern void xfs_buf_rele(xfs_buf_t *); 262 263 /* Locking and Unlocking Buffers */ 264 extern int xfs_buf_trylock(xfs_buf_t *); 265 extern void xfs_buf_lock(xfs_buf_t *); 266 extern void xfs_buf_unlock(xfs_buf_t *); 267 #define xfs_buf_islocked(bp) \ 268 ((bp)->b_sema.count <= 0) 269 270 /* Buffer Read and Write Routines */ 271 extern int xfs_bwrite(struct xfs_buf *bp); 272 273 extern void xfsbdstrat(struct xfs_mount *, struct xfs_buf *); 274 275 extern void xfs_buf_ioend(xfs_buf_t *, int); 276 extern void xfs_buf_ioerror(xfs_buf_t *, int); 277 extern void xfs_buf_ioerror_alert(struct xfs_buf *, const char *func); 278 extern void xfs_buf_iorequest(xfs_buf_t *); 279 extern int xfs_buf_iowait(xfs_buf_t *); 280 extern void xfs_buf_iomove(xfs_buf_t *, size_t, size_t, void *, 281 xfs_buf_rw_t); 282 #define xfs_buf_zero(bp, off, len) \ 283 xfs_buf_iomove((bp), (off), (len), NULL, XBRW_ZERO) 284 285 static inline int xfs_buf_geterror(xfs_buf_t *bp) 286 { 287 return bp ? bp->b_error : ENOMEM; 288 } 289 290 /* Buffer Utility Routines */ 291 extern xfs_caddr_t xfs_buf_offset(xfs_buf_t *, size_t); 292 293 /* Delayed Write Buffer Routines */ 294 extern bool xfs_buf_delwri_queue(struct xfs_buf *, struct list_head *); 295 extern int xfs_buf_delwri_submit(struct list_head *); 296 extern int xfs_buf_delwri_submit_nowait(struct list_head *); 297 298 /* Buffer Daemon Setup Routines */ 299 extern int xfs_buf_init(void); 300 extern void xfs_buf_terminate(void); 301 302 #define XFS_BUF_ZEROFLAGS(bp) \ 303 ((bp)->b_flags &= ~(XBF_READ|XBF_WRITE|XBF_ASYNC| \ 304 XBF_SYNCIO|XBF_FUA|XBF_FLUSH)) 305 306 void xfs_buf_stale(struct xfs_buf *bp); 307 #define XFS_BUF_UNSTALE(bp) ((bp)->b_flags &= ~XBF_STALE) 308 #define XFS_BUF_ISSTALE(bp) ((bp)->b_flags & XBF_STALE) 309 310 #define XFS_BUF_DONE(bp) ((bp)->b_flags |= XBF_DONE) 311 #define XFS_BUF_UNDONE(bp) ((bp)->b_flags &= ~XBF_DONE) 312 #define XFS_BUF_ISDONE(bp) ((bp)->b_flags & XBF_DONE) 313 314 #define XFS_BUF_ASYNC(bp) ((bp)->b_flags |= XBF_ASYNC) 315 #define XFS_BUF_UNASYNC(bp) ((bp)->b_flags &= ~XBF_ASYNC) 316 #define XFS_BUF_ISASYNC(bp) ((bp)->b_flags & XBF_ASYNC) 317 318 #define XFS_BUF_READ(bp) ((bp)->b_flags |= XBF_READ) 319 #define XFS_BUF_UNREAD(bp) ((bp)->b_flags &= ~XBF_READ) 320 #define XFS_BUF_ISREAD(bp) ((bp)->b_flags & XBF_READ) 321 322 #define XFS_BUF_WRITE(bp) ((bp)->b_flags |= XBF_WRITE) 323 #define XFS_BUF_UNWRITE(bp) ((bp)->b_flags &= ~XBF_WRITE) 324 #define XFS_BUF_ISWRITE(bp) ((bp)->b_flags & XBF_WRITE) 325 326 /* 327 * These macros use the IO block map rather than b_bn. b_bn is now really 328 * just for the buffer cache index for cached buffers. As IO does not use b_bn 329 * anymore, uncached buffers do not use b_bn at all and hence must modify the IO 330 * map directly. Uncached buffers are not allowed to be discontiguous, so this 331 * is safe to do. 332 * 333 * In future, uncached buffers will pass the block number directly to the io 334 * request function and hence these macros will go away at that point. 335 */ 336 #define XFS_BUF_ADDR(bp) ((bp)->b_maps[0].bm_bn) 337 #define XFS_BUF_SET_ADDR(bp, bno) ((bp)->b_maps[0].bm_bn = (xfs_daddr_t)(bno)) 338 339 static inline void xfs_buf_set_ref(struct xfs_buf *bp, int lru_ref) 340 { 341 atomic_set(&bp->b_lru_ref, lru_ref); 342 } 343 344 static inline int xfs_buf_ispinned(struct xfs_buf *bp) 345 { 346 return atomic_read(&bp->b_pin_count); 347 } 348 349 static inline void xfs_buf_relse(xfs_buf_t *bp) 350 { 351 xfs_buf_unlock(bp); 352 xfs_buf_rele(bp); 353 } 354 355 /* 356 * Handling of buftargs. 357 */ 358 extern xfs_buftarg_t *xfs_alloc_buftarg(struct xfs_mount *, 359 struct block_device *, int, const char *); 360 extern void xfs_free_buftarg(struct xfs_mount *, struct xfs_buftarg *); 361 extern void xfs_wait_buftarg(xfs_buftarg_t *); 362 extern int xfs_setsize_buftarg(xfs_buftarg_t *, unsigned int, unsigned int); 363 364 #define xfs_getsize_buftarg(buftarg) block_size((buftarg)->bt_bdev) 365 #define xfs_readonly_buftarg(buftarg) bdev_read_only((buftarg)->bt_bdev) 366 367 #endif /* __XFS_BUF_H__ */ 368