1 /* 2 * 2.5 block I/O model 3 * 4 * Copyright (C) 2001 Jens Axboe <axboe@suse.de> 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License version 2 as 8 * published by the Free Software Foundation. 9 * 10 * This program is distributed in the hope that it will be useful, 11 * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 * GNU General Public License for more details. 15 * 16 * You should have received a copy of the GNU General Public Licens 17 * along with this program; if not, write to the Free Software 18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111- 19 */ 20 #ifndef __LINUX_BIO_H 21 #define __LINUX_BIO_H 22 23 #include <linux/highmem.h> 24 #include <linux/mempool.h> 25 #include <linux/ioprio.h> 26 27 /* Platforms may set this to teach the BIO layer about IOMMU hardware. */ 28 #include <asm/io.h> 29 30 #if defined(BIO_VMERGE_MAX_SIZE) && defined(BIO_VMERGE_BOUNDARY) 31 #define BIOVEC_VIRT_START_SIZE(x) (bvec_to_phys(x) & (BIO_VMERGE_BOUNDARY - 1)) 32 #define BIOVEC_VIRT_OVERSIZE(x) ((x) > BIO_VMERGE_MAX_SIZE) 33 #else 34 #define BIOVEC_VIRT_START_SIZE(x) 0 35 #define BIOVEC_VIRT_OVERSIZE(x) 0 36 #endif 37 38 #ifndef BIO_VMERGE_BOUNDARY 39 #define BIO_VMERGE_BOUNDARY 0 40 #endif 41 42 #define BIO_DEBUG 43 44 #ifdef BIO_DEBUG 45 #define BIO_BUG_ON BUG_ON 46 #else 47 #define BIO_BUG_ON 48 #endif 49 50 #define BIO_MAX_PAGES 256 51 #define BIO_MAX_SIZE (BIO_MAX_PAGES << PAGE_CACHE_SHIFT) 52 #define BIO_MAX_SECTORS (BIO_MAX_SIZE >> 9) 53 54 /* 55 * was unsigned short, but we might as well be ready for > 64kB I/O pages 56 */ 57 struct bio_vec { 58 struct page *bv_page; 59 unsigned int bv_len; 60 unsigned int bv_offset; 61 }; 62 63 struct bio_set; 64 struct bio; 65 typedef int (bio_end_io_t) (struct bio *, unsigned int, int); 66 typedef void (bio_destructor_t) (struct bio *); 67 68 /* 69 * main unit of I/O for the block layer and lower layers (ie drivers and 70 * stacking drivers) 71 */ 72 struct bio { 73 sector_t bi_sector; 74 struct bio *bi_next; /* request queue link */ 75 struct block_device *bi_bdev; 76 unsigned long bi_flags; /* status, command, etc */ 77 unsigned long bi_rw; /* bottom bits READ/WRITE, 78 * top bits priority 79 */ 80 81 unsigned short bi_vcnt; /* how many bio_vec's */ 82 unsigned short bi_idx; /* current index into bvl_vec */ 83 84 /* Number of segments in this BIO after 85 * physical address coalescing is performed. 86 */ 87 unsigned short bi_phys_segments; 88 89 /* Number of segments after physical and DMA remapping 90 * hardware coalescing is performed. 91 */ 92 unsigned short bi_hw_segments; 93 94 unsigned int bi_size; /* residual I/O count */ 95 96 /* 97 * To keep track of the max hw size, we account for the 98 * sizes of the first and last virtually mergeable segments 99 * in this bio 100 */ 101 unsigned int bi_hw_front_size; 102 unsigned int bi_hw_back_size; 103 104 unsigned int bi_max_vecs; /* max bvl_vecs we can hold */ 105 106 struct bio_vec *bi_io_vec; /* the actual vec list */ 107 108 bio_end_io_t *bi_end_io; 109 atomic_t bi_cnt; /* pin count */ 110 111 void *bi_private; 112 113 bio_destructor_t *bi_destructor; /* destructor */ 114 }; 115 116 /* 117 * bio flags 118 */ 119 #define BIO_UPTODATE 0 /* ok after I/O completion */ 120 #define BIO_RW_BLOCK 1 /* RW_AHEAD set, and read/write would block */ 121 #define BIO_EOF 2 /* out-out-bounds error */ 122 #define BIO_SEG_VALID 3 /* nr_hw_seg valid */ 123 #define BIO_CLONED 4 /* doesn't own data */ 124 #define BIO_BOUNCED 5 /* bio is a bounce bio */ 125 #define BIO_USER_MAPPED 6 /* contains user pages */ 126 #define BIO_EOPNOTSUPP 7 /* not supported */ 127 #define bio_flagged(bio, flag) ((bio)->bi_flags & (1 << (flag))) 128 129 /* 130 * top 4 bits of bio flags indicate the pool this bio came from 131 */ 132 #define BIO_POOL_BITS (4) 133 #define BIO_POOL_OFFSET (BITS_PER_LONG - BIO_POOL_BITS) 134 #define BIO_POOL_MASK (1UL << BIO_POOL_OFFSET) 135 #define BIO_POOL_IDX(bio) ((bio)->bi_flags >> BIO_POOL_OFFSET) 136 137 /* 138 * bio bi_rw flags 139 * 140 * bit 0 -- read (not set) or write (set) 141 * bit 1 -- rw-ahead when set 142 * bit 2 -- barrier 143 * bit 3 -- fail fast, don't want low level driver retries 144 * bit 4 -- synchronous I/O hint: the block layer will unplug immediately 145 */ 146 #define BIO_RW 0 147 #define BIO_RW_AHEAD 1 148 #define BIO_RW_BARRIER 2 149 #define BIO_RW_FAILFAST 3 150 #define BIO_RW_SYNC 4 151 152 /* 153 * upper 16 bits of bi_rw define the io priority of this bio 154 */ 155 #define BIO_PRIO_SHIFT (8 * sizeof(unsigned long) - IOPRIO_BITS) 156 #define bio_prio(bio) ((bio)->bi_rw >> BIO_PRIO_SHIFT) 157 #define bio_prio_valid(bio) ioprio_valid(bio_prio(bio)) 158 159 #define bio_set_prio(bio, prio) do { \ 160 WARN_ON(prio >= (1 << IOPRIO_BITS)); \ 161 (bio)->bi_rw &= ((1UL << BIO_PRIO_SHIFT) - 1); \ 162 (bio)->bi_rw |= ((unsigned long) (prio) << BIO_PRIO_SHIFT); \ 163 } while (0) 164 165 /* 166 * various member access, note that bio_data should of course not be used 167 * on highmem page vectors 168 */ 169 #define bio_iovec_idx(bio, idx) (&((bio)->bi_io_vec[(idx)])) 170 #define bio_iovec(bio) bio_iovec_idx((bio), (bio)->bi_idx) 171 #define bio_page(bio) bio_iovec((bio))->bv_page 172 #define bio_offset(bio) bio_iovec((bio))->bv_offset 173 #define bio_segments(bio) ((bio)->bi_vcnt - (bio)->bi_idx) 174 #define bio_sectors(bio) ((bio)->bi_size >> 9) 175 #define bio_cur_sectors(bio) (bio_iovec(bio)->bv_len >> 9) 176 #define bio_data(bio) (page_address(bio_page((bio))) + bio_offset((bio))) 177 #define bio_barrier(bio) ((bio)->bi_rw & (1 << BIO_RW_BARRIER)) 178 #define bio_sync(bio) ((bio)->bi_rw & (1 << BIO_RW_SYNC)) 179 #define bio_failfast(bio) ((bio)->bi_rw & (1 << BIO_RW_FAILFAST)) 180 #define bio_rw_ahead(bio) ((bio)->bi_rw & (1 << BIO_RW_AHEAD)) 181 182 /* 183 * will die 184 */ 185 #define bio_to_phys(bio) (page_to_phys(bio_page((bio))) + (unsigned long) bio_offset((bio))) 186 #define bvec_to_phys(bv) (page_to_phys((bv)->bv_page) + (unsigned long) (bv)->bv_offset) 187 188 /* 189 * queues that have highmem support enabled may still need to revert to 190 * PIO transfers occasionally and thus map high pages temporarily. For 191 * permanent PIO fall back, user is probably better off disabling highmem 192 * I/O completely on that queue (see ide-dma for example) 193 */ 194 #define __bio_kmap_atomic(bio, idx, kmtype) \ 195 (kmap_atomic(bio_iovec_idx((bio), (idx))->bv_page, kmtype) + \ 196 bio_iovec_idx((bio), (idx))->bv_offset) 197 198 #define __bio_kunmap_atomic(addr, kmtype) kunmap_atomic(addr, kmtype) 199 200 /* 201 * merge helpers etc 202 */ 203 204 #define __BVEC_END(bio) bio_iovec_idx((bio), (bio)->bi_vcnt - 1) 205 #define __BVEC_START(bio) bio_iovec_idx((bio), (bio)->bi_idx) 206 207 /* 208 * allow arch override, for eg virtualized architectures (put in asm/io.h) 209 */ 210 #ifndef BIOVEC_PHYS_MERGEABLE 211 #define BIOVEC_PHYS_MERGEABLE(vec1, vec2) \ 212 ((bvec_to_phys((vec1)) + (vec1)->bv_len) == bvec_to_phys((vec2))) 213 #endif 214 215 #define BIOVEC_VIRT_MERGEABLE(vec1, vec2) \ 216 ((((bvec_to_phys((vec1)) + (vec1)->bv_len) | bvec_to_phys((vec2))) & (BIO_VMERGE_BOUNDARY - 1)) == 0) 217 #define __BIO_SEG_BOUNDARY(addr1, addr2, mask) \ 218 (((addr1) | (mask)) == (((addr2) - 1) | (mask))) 219 #define BIOVEC_SEG_BOUNDARY(q, b1, b2) \ 220 __BIO_SEG_BOUNDARY(bvec_to_phys((b1)), bvec_to_phys((b2)) + (b2)->bv_len, (q)->seg_boundary_mask) 221 #define BIO_SEG_BOUNDARY(q, b1, b2) \ 222 BIOVEC_SEG_BOUNDARY((q), __BVEC_END((b1)), __BVEC_START((b2))) 223 224 #define bio_io_error(bio, bytes) bio_endio((bio), (bytes), -EIO) 225 226 /* 227 * drivers should not use the __ version unless they _really_ want to 228 * run through the entire bio and not just pending pieces 229 */ 230 #define __bio_for_each_segment(bvl, bio, i, start_idx) \ 231 for (bvl = bio_iovec_idx((bio), (start_idx)), i = (start_idx); \ 232 i < (bio)->bi_vcnt; \ 233 bvl++, i++) 234 235 #define bio_for_each_segment(bvl, bio, i) \ 236 __bio_for_each_segment(bvl, bio, i, (bio)->bi_idx) 237 238 /* 239 * get a reference to a bio, so it won't disappear. the intended use is 240 * something like: 241 * 242 * bio_get(bio); 243 * submit_bio(rw, bio); 244 * if (bio->bi_flags ...) 245 * do_something 246 * bio_put(bio); 247 * 248 * without the bio_get(), it could potentially complete I/O before submit_bio 249 * returns. and then bio would be freed memory when if (bio->bi_flags ...) 250 * runs 251 */ 252 #define bio_get(bio) atomic_inc(&(bio)->bi_cnt) 253 254 255 /* 256 * A bio_pair is used when we need to split a bio. 257 * This can only happen for a bio that refers to just one 258 * page of data, and in the unusual situation when the 259 * page crosses a chunk/device boundary 260 * 261 * The address of the master bio is stored in bio1.bi_private 262 * The address of the pool the pair was allocated from is stored 263 * in bio2.bi_private 264 */ 265 struct bio_pair { 266 struct bio bio1, bio2; 267 struct bio_vec bv1, bv2; 268 atomic_t cnt; 269 int error; 270 }; 271 extern struct bio_pair *bio_split(struct bio *bi, mempool_t *pool, 272 int first_sectors); 273 extern mempool_t *bio_split_pool; 274 extern void bio_pair_release(struct bio_pair *dbio); 275 276 extern struct bio_set *bioset_create(int, int, int); 277 extern void bioset_free(struct bio_set *); 278 279 extern struct bio *bio_alloc(gfp_t, int); 280 extern struct bio *bio_alloc_bioset(gfp_t, int, struct bio_set *); 281 extern void bio_put(struct bio *); 282 extern void bio_free(struct bio *, struct bio_set *); 283 284 extern void bio_endio(struct bio *, unsigned int, int); 285 struct request_queue; 286 extern int bio_phys_segments(struct request_queue *, struct bio *); 287 extern int bio_hw_segments(struct request_queue *, struct bio *); 288 289 extern void __bio_clone(struct bio *, struct bio *); 290 extern struct bio *bio_clone(struct bio *, gfp_t); 291 292 extern void bio_init(struct bio *); 293 294 extern int bio_add_page(struct bio *, struct page *, unsigned int,unsigned int); 295 extern int bio_add_pc_page(struct request_queue *, struct bio *, struct page *, 296 unsigned int, unsigned int); 297 extern int bio_get_nr_vecs(struct block_device *); 298 extern struct bio *bio_map_user(struct request_queue *, struct block_device *, 299 unsigned long, unsigned int, int); 300 struct sg_iovec; 301 extern struct bio *bio_map_user_iov(struct request_queue *, 302 struct block_device *, 303 struct sg_iovec *, int, int); 304 extern void bio_unmap_user(struct bio *); 305 extern struct bio *bio_map_kern(struct request_queue *, void *, unsigned int, 306 gfp_t); 307 extern void bio_set_pages_dirty(struct bio *bio); 308 extern void bio_check_pages_dirty(struct bio *bio); 309 extern struct bio *bio_copy_user(struct request_queue *, unsigned long, unsigned int, int); 310 extern int bio_uncopy_user(struct bio *); 311 void zero_fill_bio(struct bio *bio); 312 313 #ifdef CONFIG_HIGHMEM 314 /* 315 * remember to add offset! and never ever reenable interrupts between a 316 * bvec_kmap_irq and bvec_kunmap_irq!! 317 * 318 * This function MUST be inlined - it plays with the CPU interrupt flags. 319 */ 320 static inline char *bvec_kmap_irq(struct bio_vec *bvec, unsigned long *flags) 321 { 322 unsigned long addr; 323 324 /* 325 * might not be a highmem page, but the preempt/irq count 326 * balancing is a lot nicer this way 327 */ 328 local_irq_save(*flags); 329 addr = (unsigned long) kmap_atomic(bvec->bv_page, KM_BIO_SRC_IRQ); 330 331 BUG_ON(addr & ~PAGE_MASK); 332 333 return (char *) addr + bvec->bv_offset; 334 } 335 336 static inline void bvec_kunmap_irq(char *buffer, unsigned long *flags) 337 { 338 unsigned long ptr = (unsigned long) buffer & PAGE_MASK; 339 340 kunmap_atomic((void *) ptr, KM_BIO_SRC_IRQ); 341 local_irq_restore(*flags); 342 } 343 344 #else 345 #define bvec_kmap_irq(bvec, flags) (page_address((bvec)->bv_page) + (bvec)->bv_offset) 346 #define bvec_kunmap_irq(buf, flags) do { *(flags) = 0; } while (0) 347 #endif 348 349 static inline char *__bio_kmap_irq(struct bio *bio, unsigned short idx, 350 unsigned long *flags) 351 { 352 return bvec_kmap_irq(bio_iovec_idx(bio, idx), flags); 353 } 354 #define __bio_kunmap_irq(buf, flags) bvec_kunmap_irq(buf, flags) 355 356 #define bio_kmap_irq(bio, flags) \ 357 __bio_kmap_irq((bio), (bio)->bi_idx, (flags)) 358 #define bio_kunmap_irq(buf,flags) __bio_kunmap_irq(buf, flags) 359 360 #endif /* __LINUX_BIO_H */ 361