1 /* 2 * Copyright (C) 2003 Sistina Software 3 * Copyright (C) 2006 Red Hat GmbH 4 * 5 * This file is released under the GPL. 6 */ 7 8 #include "dm.h" 9 10 #include <linux/device-mapper.h> 11 12 #include <linux/bio.h> 13 #include <linux/completion.h> 14 #include <linux/mempool.h> 15 #include <linux/module.h> 16 #include <linux/sched.h> 17 #include <linux/slab.h> 18 #include <linux/dm-io.h> 19 20 #define DM_MSG_PREFIX "io" 21 22 #define DM_IO_MAX_REGIONS BITS_PER_LONG 23 24 struct dm_io_client { 25 mempool_t *pool; 26 struct bio_set *bios; 27 }; 28 29 /* 30 * Aligning 'struct io' reduces the number of bits required to store 31 * its address. Refer to store_io_and_region_in_bio() below. 32 */ 33 struct io { 34 unsigned long error_bits; 35 atomic_t count; 36 struct completion *wait; 37 struct dm_io_client *client; 38 io_notify_fn callback; 39 void *context; 40 void *vma_invalidate_address; 41 unsigned long vma_invalidate_size; 42 } __attribute__((aligned(DM_IO_MAX_REGIONS))); 43 44 static struct kmem_cache *_dm_io_cache; 45 46 /* 47 * Create a client with mempool and bioset. 48 */ 49 struct dm_io_client *dm_io_client_create(void) 50 { 51 struct dm_io_client *client; 52 unsigned min_ios = dm_get_reserved_bio_based_ios(); 53 54 client = kmalloc(sizeof(*client), GFP_KERNEL); 55 if (!client) 56 return ERR_PTR(-ENOMEM); 57 58 client->pool = mempool_create_slab_pool(min_ios, _dm_io_cache); 59 if (!client->pool) 60 goto bad; 61 62 client->bios = bioset_create(min_ios, 0); 63 if (!client->bios) 64 goto bad; 65 66 return client; 67 68 bad: 69 if (client->pool) 70 mempool_destroy(client->pool); 71 kfree(client); 72 return ERR_PTR(-ENOMEM); 73 } 74 EXPORT_SYMBOL(dm_io_client_create); 75 76 void dm_io_client_destroy(struct dm_io_client *client) 77 { 78 mempool_destroy(client->pool); 79 bioset_free(client->bios); 80 kfree(client); 81 } 82 EXPORT_SYMBOL(dm_io_client_destroy); 83 84 /*----------------------------------------------------------------- 85 * We need to keep track of which region a bio is doing io for. 86 * To avoid a memory allocation to store just 5 or 6 bits, we 87 * ensure the 'struct io' pointer is aligned so enough low bits are 88 * always zero and then combine it with the region number directly in 89 * bi_private. 90 *---------------------------------------------------------------*/ 91 static void store_io_and_region_in_bio(struct bio *bio, struct io *io, 92 unsigned region) 93 { 94 if (unlikely(!IS_ALIGNED((unsigned long)io, DM_IO_MAX_REGIONS))) { 95 DMCRIT("Unaligned struct io pointer %p", io); 96 BUG(); 97 } 98 99 bio->bi_private = (void *)((unsigned long)io | region); 100 } 101 102 static void retrieve_io_and_region_from_bio(struct bio *bio, struct io **io, 103 unsigned *region) 104 { 105 unsigned long val = (unsigned long)bio->bi_private; 106 107 *io = (void *)(val & -(unsigned long)DM_IO_MAX_REGIONS); 108 *region = val & (DM_IO_MAX_REGIONS - 1); 109 } 110 111 /*----------------------------------------------------------------- 112 * We need an io object to keep track of the number of bios that 113 * have been dispatched for a particular io. 114 *---------------------------------------------------------------*/ 115 static void dec_count(struct io *io, unsigned int region, int error) 116 { 117 if (error) 118 set_bit(region, &io->error_bits); 119 120 if (atomic_dec_and_test(&io->count)) { 121 if (io->vma_invalidate_size) 122 invalidate_kernel_vmap_range(io->vma_invalidate_address, 123 io->vma_invalidate_size); 124 125 if (io->wait) 126 complete(io->wait); 127 128 else { 129 unsigned long r = io->error_bits; 130 io_notify_fn fn = io->callback; 131 void *context = io->context; 132 133 mempool_free(io, io->client->pool); 134 fn(r, context); 135 } 136 } 137 } 138 139 static void endio(struct bio *bio, int error) 140 { 141 struct io *io; 142 unsigned region; 143 144 if (error && bio_data_dir(bio) == READ) 145 zero_fill_bio(bio); 146 147 /* 148 * The bio destructor in bio_put() may use the io object. 149 */ 150 retrieve_io_and_region_from_bio(bio, &io, ®ion); 151 152 bio_put(bio); 153 154 dec_count(io, region, error); 155 } 156 157 /*----------------------------------------------------------------- 158 * These little objects provide an abstraction for getting a new 159 * destination page for io. 160 *---------------------------------------------------------------*/ 161 struct dpages { 162 void (*get_page)(struct dpages *dp, 163 struct page **p, unsigned long *len, unsigned *offset); 164 void (*next_page)(struct dpages *dp); 165 166 unsigned context_u; 167 void *context_ptr; 168 169 void *vma_invalidate_address; 170 unsigned long vma_invalidate_size; 171 }; 172 173 /* 174 * Functions for getting the pages from a list. 175 */ 176 static void list_get_page(struct dpages *dp, 177 struct page **p, unsigned long *len, unsigned *offset) 178 { 179 unsigned o = dp->context_u; 180 struct page_list *pl = (struct page_list *) dp->context_ptr; 181 182 *p = pl->page; 183 *len = PAGE_SIZE - o; 184 *offset = o; 185 } 186 187 static void list_next_page(struct dpages *dp) 188 { 189 struct page_list *pl = (struct page_list *) dp->context_ptr; 190 dp->context_ptr = pl->next; 191 dp->context_u = 0; 192 } 193 194 static void list_dp_init(struct dpages *dp, struct page_list *pl, unsigned offset) 195 { 196 dp->get_page = list_get_page; 197 dp->next_page = list_next_page; 198 dp->context_u = offset; 199 dp->context_ptr = pl; 200 } 201 202 /* 203 * Functions for getting the pages from a bvec. 204 */ 205 static void bio_get_page(struct dpages *dp, struct page **p, 206 unsigned long *len, unsigned *offset) 207 { 208 struct bio_vec *bvec = dp->context_ptr; 209 *p = bvec->bv_page; 210 *len = bvec->bv_len - dp->context_u; 211 *offset = bvec->bv_offset + dp->context_u; 212 } 213 214 static void bio_next_page(struct dpages *dp) 215 { 216 struct bio_vec *bvec = dp->context_ptr; 217 dp->context_ptr = bvec + 1; 218 dp->context_u = 0; 219 } 220 221 static void bio_dp_init(struct dpages *dp, struct bio *bio) 222 { 223 dp->get_page = bio_get_page; 224 dp->next_page = bio_next_page; 225 dp->context_ptr = __bvec_iter_bvec(bio->bi_io_vec, bio->bi_iter); 226 dp->context_u = bio->bi_iter.bi_bvec_done; 227 } 228 229 /* 230 * Functions for getting the pages from a VMA. 231 */ 232 static void vm_get_page(struct dpages *dp, 233 struct page **p, unsigned long *len, unsigned *offset) 234 { 235 *p = vmalloc_to_page(dp->context_ptr); 236 *offset = dp->context_u; 237 *len = PAGE_SIZE - dp->context_u; 238 } 239 240 static void vm_next_page(struct dpages *dp) 241 { 242 dp->context_ptr += PAGE_SIZE - dp->context_u; 243 dp->context_u = 0; 244 } 245 246 static void vm_dp_init(struct dpages *dp, void *data) 247 { 248 dp->get_page = vm_get_page; 249 dp->next_page = vm_next_page; 250 dp->context_u = ((unsigned long) data) & (PAGE_SIZE - 1); 251 dp->context_ptr = data; 252 } 253 254 /* 255 * Functions for getting the pages from kernel memory. 256 */ 257 static void km_get_page(struct dpages *dp, struct page **p, unsigned long *len, 258 unsigned *offset) 259 { 260 *p = virt_to_page(dp->context_ptr); 261 *offset = dp->context_u; 262 *len = PAGE_SIZE - dp->context_u; 263 } 264 265 static void km_next_page(struct dpages *dp) 266 { 267 dp->context_ptr += PAGE_SIZE - dp->context_u; 268 dp->context_u = 0; 269 } 270 271 static void km_dp_init(struct dpages *dp, void *data) 272 { 273 dp->get_page = km_get_page; 274 dp->next_page = km_next_page; 275 dp->context_u = ((unsigned long) data) & (PAGE_SIZE - 1); 276 dp->context_ptr = data; 277 } 278 279 /*----------------------------------------------------------------- 280 * IO routines that accept a list of pages. 281 *---------------------------------------------------------------*/ 282 static void do_region(int rw, unsigned region, struct dm_io_region *where, 283 struct dpages *dp, struct io *io) 284 { 285 struct bio *bio; 286 struct page *page; 287 unsigned long len; 288 unsigned offset; 289 unsigned num_bvecs; 290 sector_t remaining = where->count; 291 struct request_queue *q = bdev_get_queue(where->bdev); 292 unsigned short logical_block_size = queue_logical_block_size(q); 293 sector_t num_sectors; 294 295 /* 296 * where->count may be zero if rw holds a flush and we need to 297 * send a zero-sized flush. 298 */ 299 do { 300 /* 301 * Allocate a suitably sized-bio. 302 */ 303 if ((rw & REQ_DISCARD) || (rw & REQ_WRITE_SAME)) 304 num_bvecs = 1; 305 else 306 num_bvecs = min_t(int, bio_get_nr_vecs(where->bdev), 307 dm_sector_div_up(remaining, (PAGE_SIZE >> SECTOR_SHIFT))); 308 309 bio = bio_alloc_bioset(GFP_NOIO, num_bvecs, io->client->bios); 310 bio->bi_iter.bi_sector = where->sector + (where->count - remaining); 311 bio->bi_bdev = where->bdev; 312 bio->bi_end_io = endio; 313 store_io_and_region_in_bio(bio, io, region); 314 315 if (rw & REQ_DISCARD) { 316 num_sectors = min_t(sector_t, q->limits.max_discard_sectors, remaining); 317 bio->bi_iter.bi_size = num_sectors << SECTOR_SHIFT; 318 remaining -= num_sectors; 319 } else if (rw & REQ_WRITE_SAME) { 320 /* 321 * WRITE SAME only uses a single page. 322 */ 323 dp->get_page(dp, &page, &len, &offset); 324 bio_add_page(bio, page, logical_block_size, offset); 325 num_sectors = min_t(sector_t, q->limits.max_write_same_sectors, remaining); 326 bio->bi_iter.bi_size = num_sectors << SECTOR_SHIFT; 327 328 offset = 0; 329 remaining -= num_sectors; 330 dp->next_page(dp); 331 } else while (remaining) { 332 /* 333 * Try and add as many pages as possible. 334 */ 335 dp->get_page(dp, &page, &len, &offset); 336 len = min(len, to_bytes(remaining)); 337 if (!bio_add_page(bio, page, len, offset)) 338 break; 339 340 offset = 0; 341 remaining -= to_sector(len); 342 dp->next_page(dp); 343 } 344 345 atomic_inc(&io->count); 346 submit_bio(rw, bio); 347 } while (remaining); 348 } 349 350 static void dispatch_io(int rw, unsigned int num_regions, 351 struct dm_io_region *where, struct dpages *dp, 352 struct io *io, int sync) 353 { 354 int i; 355 struct dpages old_pages = *dp; 356 357 BUG_ON(num_regions > DM_IO_MAX_REGIONS); 358 359 if (sync) 360 rw |= REQ_SYNC; 361 362 /* 363 * For multiple regions we need to be careful to rewind 364 * the dp object for each call to do_region. 365 */ 366 for (i = 0; i < num_regions; i++) { 367 *dp = old_pages; 368 if (where[i].count || (rw & REQ_FLUSH)) 369 do_region(rw, i, where + i, dp, io); 370 } 371 372 /* 373 * Drop the extra reference that we were holding to avoid 374 * the io being completed too early. 375 */ 376 dec_count(io, 0, 0); 377 } 378 379 static int sync_io(struct dm_io_client *client, unsigned int num_regions, 380 struct dm_io_region *where, int rw, struct dpages *dp, 381 unsigned long *error_bits) 382 { 383 /* 384 * gcc <= 4.3 can't do the alignment for stack variables, so we must 385 * align it on our own. 386 * volatile prevents the optimizer from removing or reusing 387 * "io_" field from the stack frame (allowed in ANSI C). 388 */ 389 volatile char io_[sizeof(struct io) + __alignof__(struct io) - 1]; 390 struct io *io = (struct io *)PTR_ALIGN(&io_, __alignof__(struct io)); 391 DECLARE_COMPLETION_ONSTACK(wait); 392 393 if (num_regions > 1 && (rw & RW_MASK) != WRITE) { 394 WARN_ON(1); 395 return -EIO; 396 } 397 398 io->error_bits = 0; 399 atomic_set(&io->count, 1); /* see dispatch_io() */ 400 io->wait = &wait; 401 io->client = client; 402 403 io->vma_invalidate_address = dp->vma_invalidate_address; 404 io->vma_invalidate_size = dp->vma_invalidate_size; 405 406 dispatch_io(rw, num_regions, where, dp, io, 1); 407 408 wait_for_completion_io(&wait); 409 410 if (error_bits) 411 *error_bits = io->error_bits; 412 413 return io->error_bits ? -EIO : 0; 414 } 415 416 static int async_io(struct dm_io_client *client, unsigned int num_regions, 417 struct dm_io_region *where, int rw, struct dpages *dp, 418 io_notify_fn fn, void *context) 419 { 420 struct io *io; 421 422 if (num_regions > 1 && (rw & RW_MASK) != WRITE) { 423 WARN_ON(1); 424 fn(1, context); 425 return -EIO; 426 } 427 428 io = mempool_alloc(client->pool, GFP_NOIO); 429 io->error_bits = 0; 430 atomic_set(&io->count, 1); /* see dispatch_io() */ 431 io->wait = NULL; 432 io->client = client; 433 io->callback = fn; 434 io->context = context; 435 436 io->vma_invalidate_address = dp->vma_invalidate_address; 437 io->vma_invalidate_size = dp->vma_invalidate_size; 438 439 dispatch_io(rw, num_regions, where, dp, io, 0); 440 return 0; 441 } 442 443 static int dp_init(struct dm_io_request *io_req, struct dpages *dp, 444 unsigned long size) 445 { 446 /* Set up dpages based on memory type */ 447 448 dp->vma_invalidate_address = NULL; 449 dp->vma_invalidate_size = 0; 450 451 switch (io_req->mem.type) { 452 case DM_IO_PAGE_LIST: 453 list_dp_init(dp, io_req->mem.ptr.pl, io_req->mem.offset); 454 break; 455 456 case DM_IO_BIO: 457 bio_dp_init(dp, io_req->mem.ptr.bio); 458 break; 459 460 case DM_IO_VMA: 461 flush_kernel_vmap_range(io_req->mem.ptr.vma, size); 462 if ((io_req->bi_rw & RW_MASK) == READ) { 463 dp->vma_invalidate_address = io_req->mem.ptr.vma; 464 dp->vma_invalidate_size = size; 465 } 466 vm_dp_init(dp, io_req->mem.ptr.vma); 467 break; 468 469 case DM_IO_KMEM: 470 km_dp_init(dp, io_req->mem.ptr.addr); 471 break; 472 473 default: 474 return -EINVAL; 475 } 476 477 return 0; 478 } 479 480 /* 481 * New collapsed (a)synchronous interface. 482 * 483 * If the IO is asynchronous (i.e. it has notify.fn), you must either unplug 484 * the queue with blk_unplug() some time later or set REQ_SYNC in 485 io_req->bi_rw. If you fail to do one of these, the IO will be submitted to 486 * the disk after q->unplug_delay, which defaults to 3ms in blk-settings.c. 487 */ 488 int dm_io(struct dm_io_request *io_req, unsigned num_regions, 489 struct dm_io_region *where, unsigned long *sync_error_bits) 490 { 491 int r; 492 struct dpages dp; 493 494 r = dp_init(io_req, &dp, (unsigned long)where->count << SECTOR_SHIFT); 495 if (r) 496 return r; 497 498 if (!io_req->notify.fn) 499 return sync_io(io_req->client, num_regions, where, 500 io_req->bi_rw, &dp, sync_error_bits); 501 502 return async_io(io_req->client, num_regions, where, io_req->bi_rw, 503 &dp, io_req->notify.fn, io_req->notify.context); 504 } 505 EXPORT_SYMBOL(dm_io); 506 507 int __init dm_io_init(void) 508 { 509 _dm_io_cache = KMEM_CACHE(io, 0); 510 if (!_dm_io_cache) 511 return -ENOMEM; 512 513 return 0; 514 } 515 516 void dm_io_exit(void) 517 { 518 kmem_cache_destroy(_dm_io_cache); 519 _dm_io_cache = NULL; 520 } 521