1 /* 2 * Copyright (C) 2003 Sistina Software 3 * Copyright (C) 2006 Red Hat GmbH 4 * 5 * This file is released under the GPL. 6 */ 7 8 #include "dm-core.h" 9 10 #include <linux/device-mapper.h> 11 12 #include <linux/bio.h> 13 #include <linux/completion.h> 14 #include <linux/mempool.h> 15 #include <linux/module.h> 16 #include <linux/sched.h> 17 #include <linux/slab.h> 18 #include <linux/dm-io.h> 19 20 #define DM_MSG_PREFIX "io" 21 22 #define DM_IO_MAX_REGIONS BITS_PER_LONG 23 24 struct dm_io_client { 25 mempool_t pool; 26 struct bio_set bios; 27 }; 28 29 /* 30 * Aligning 'struct io' reduces the number of bits required to store 31 * its address. Refer to store_io_and_region_in_bio() below. 32 */ 33 struct io { 34 unsigned long error_bits; 35 atomic_t count; 36 struct dm_io_client *client; 37 io_notify_fn callback; 38 void *context; 39 void *vma_invalidate_address; 40 unsigned long vma_invalidate_size; 41 } __attribute__((aligned(DM_IO_MAX_REGIONS))); 42 43 static struct kmem_cache *_dm_io_cache; 44 45 /* 46 * Create a client with mempool and bioset. 47 */ 48 struct dm_io_client *dm_io_client_create(void) 49 { 50 struct dm_io_client *client; 51 unsigned min_ios = dm_get_reserved_bio_based_ios(); 52 int ret; 53 54 client = kzalloc(sizeof(*client), GFP_KERNEL); 55 if (!client) 56 return ERR_PTR(-ENOMEM); 57 58 ret = mempool_init_slab_pool(&client->pool, min_ios, _dm_io_cache); 59 if (ret) 60 goto bad; 61 62 ret = bioset_init(&client->bios, min_ios, 0, BIOSET_NEED_BVECS); 63 if (ret) 64 goto bad; 65 66 return client; 67 68 bad: 69 mempool_exit(&client->pool); 70 kfree(client); 71 return ERR_PTR(ret); 72 } 73 EXPORT_SYMBOL(dm_io_client_create); 74 75 void dm_io_client_destroy(struct dm_io_client *client) 76 { 77 mempool_exit(&client->pool); 78 bioset_exit(&client->bios); 79 kfree(client); 80 } 81 EXPORT_SYMBOL(dm_io_client_destroy); 82 83 /*----------------------------------------------------------------- 84 * We need to keep track of which region a bio is doing io for. 85 * To avoid a memory allocation to store just 5 or 6 bits, we 86 * ensure the 'struct io' pointer is aligned so enough low bits are 87 * always zero and then combine it with the region number directly in 88 * bi_private. 89 *---------------------------------------------------------------*/ 90 static void store_io_and_region_in_bio(struct bio *bio, struct io *io, 91 unsigned region) 92 { 93 if (unlikely(!IS_ALIGNED((unsigned long)io, DM_IO_MAX_REGIONS))) { 94 DMCRIT("Unaligned struct io pointer %p", io); 95 BUG(); 96 } 97 98 bio->bi_private = (void *)((unsigned long)io | region); 99 } 100 101 static void retrieve_io_and_region_from_bio(struct bio *bio, struct io **io, 102 unsigned *region) 103 { 104 unsigned long val = (unsigned long)bio->bi_private; 105 106 *io = (void *)(val & -(unsigned long)DM_IO_MAX_REGIONS); 107 *region = val & (DM_IO_MAX_REGIONS - 1); 108 } 109 110 /*----------------------------------------------------------------- 111 * We need an io object to keep track of the number of bios that 112 * have been dispatched for a particular io. 113 *---------------------------------------------------------------*/ 114 static void complete_io(struct io *io) 115 { 116 unsigned long error_bits = io->error_bits; 117 io_notify_fn fn = io->callback; 118 void *context = io->context; 119 120 if (io->vma_invalidate_size) 121 invalidate_kernel_vmap_range(io->vma_invalidate_address, 122 io->vma_invalidate_size); 123 124 mempool_free(io, &io->client->pool); 125 fn(error_bits, context); 126 } 127 128 static void dec_count(struct io *io, unsigned int region, blk_status_t error) 129 { 130 if (error) 131 set_bit(region, &io->error_bits); 132 133 if (atomic_dec_and_test(&io->count)) 134 complete_io(io); 135 } 136 137 static void endio(struct bio *bio) 138 { 139 struct io *io; 140 unsigned region; 141 blk_status_t error; 142 143 if (bio->bi_status && bio_data_dir(bio) == READ) 144 zero_fill_bio(bio); 145 146 /* 147 * The bio destructor in bio_put() may use the io object. 148 */ 149 retrieve_io_and_region_from_bio(bio, &io, ®ion); 150 151 error = bio->bi_status; 152 bio_put(bio); 153 154 dec_count(io, region, error); 155 } 156 157 /*----------------------------------------------------------------- 158 * These little objects provide an abstraction for getting a new 159 * destination page for io. 160 *---------------------------------------------------------------*/ 161 struct dpages { 162 void (*get_page)(struct dpages *dp, 163 struct page **p, unsigned long *len, unsigned *offset); 164 void (*next_page)(struct dpages *dp); 165 166 union { 167 unsigned context_u; 168 struct bvec_iter context_bi; 169 }; 170 void *context_ptr; 171 172 void *vma_invalidate_address; 173 unsigned long vma_invalidate_size; 174 }; 175 176 /* 177 * Functions for getting the pages from a list. 178 */ 179 static void list_get_page(struct dpages *dp, 180 struct page **p, unsigned long *len, unsigned *offset) 181 { 182 unsigned o = dp->context_u; 183 struct page_list *pl = (struct page_list *) dp->context_ptr; 184 185 *p = pl->page; 186 *len = PAGE_SIZE - o; 187 *offset = o; 188 } 189 190 static void list_next_page(struct dpages *dp) 191 { 192 struct page_list *pl = (struct page_list *) dp->context_ptr; 193 dp->context_ptr = pl->next; 194 dp->context_u = 0; 195 } 196 197 static void list_dp_init(struct dpages *dp, struct page_list *pl, unsigned offset) 198 { 199 dp->get_page = list_get_page; 200 dp->next_page = list_next_page; 201 dp->context_u = offset; 202 dp->context_ptr = pl; 203 } 204 205 /* 206 * Functions for getting the pages from a bvec. 207 */ 208 static void bio_get_page(struct dpages *dp, struct page **p, 209 unsigned long *len, unsigned *offset) 210 { 211 struct bio_vec bvec = bvec_iter_bvec((struct bio_vec *)dp->context_ptr, 212 dp->context_bi); 213 214 *p = bvec.bv_page; 215 *len = bvec.bv_len; 216 *offset = bvec.bv_offset; 217 218 /* avoid figuring it out again in bio_next_page() */ 219 dp->context_bi.bi_sector = (sector_t)bvec.bv_len; 220 } 221 222 static void bio_next_page(struct dpages *dp) 223 { 224 unsigned int len = (unsigned int)dp->context_bi.bi_sector; 225 226 bvec_iter_advance((struct bio_vec *)dp->context_ptr, 227 &dp->context_bi, len); 228 } 229 230 static void bio_dp_init(struct dpages *dp, struct bio *bio) 231 { 232 dp->get_page = bio_get_page; 233 dp->next_page = bio_next_page; 234 235 /* 236 * We just use bvec iterator to retrieve pages, so it is ok to 237 * access the bvec table directly here 238 */ 239 dp->context_ptr = bio->bi_io_vec; 240 dp->context_bi = bio->bi_iter; 241 } 242 243 /* 244 * Functions for getting the pages from a VMA. 245 */ 246 static void vm_get_page(struct dpages *dp, 247 struct page **p, unsigned long *len, unsigned *offset) 248 { 249 *p = vmalloc_to_page(dp->context_ptr); 250 *offset = dp->context_u; 251 *len = PAGE_SIZE - dp->context_u; 252 } 253 254 static void vm_next_page(struct dpages *dp) 255 { 256 dp->context_ptr += PAGE_SIZE - dp->context_u; 257 dp->context_u = 0; 258 } 259 260 static void vm_dp_init(struct dpages *dp, void *data) 261 { 262 dp->get_page = vm_get_page; 263 dp->next_page = vm_next_page; 264 dp->context_u = offset_in_page(data); 265 dp->context_ptr = data; 266 } 267 268 /* 269 * Functions for getting the pages from kernel memory. 270 */ 271 static void km_get_page(struct dpages *dp, struct page **p, unsigned long *len, 272 unsigned *offset) 273 { 274 *p = virt_to_page(dp->context_ptr); 275 *offset = dp->context_u; 276 *len = PAGE_SIZE - dp->context_u; 277 } 278 279 static void km_next_page(struct dpages *dp) 280 { 281 dp->context_ptr += PAGE_SIZE - dp->context_u; 282 dp->context_u = 0; 283 } 284 285 static void km_dp_init(struct dpages *dp, void *data) 286 { 287 dp->get_page = km_get_page; 288 dp->next_page = km_next_page; 289 dp->context_u = offset_in_page(data); 290 dp->context_ptr = data; 291 } 292 293 /*----------------------------------------------------------------- 294 * IO routines that accept a list of pages. 295 *---------------------------------------------------------------*/ 296 static void do_region(int op, int op_flags, unsigned region, 297 struct dm_io_region *where, struct dpages *dp, 298 struct io *io) 299 { 300 struct bio *bio; 301 struct page *page; 302 unsigned long len; 303 unsigned offset; 304 unsigned num_bvecs; 305 sector_t remaining = where->count; 306 struct request_queue *q = bdev_get_queue(where->bdev); 307 unsigned short logical_block_size = queue_logical_block_size(q); 308 sector_t num_sectors; 309 unsigned int special_cmd_max_sectors; 310 311 /* 312 * Reject unsupported discard and write same requests. 313 */ 314 if (op == REQ_OP_DISCARD) 315 special_cmd_max_sectors = q->limits.max_discard_sectors; 316 else if (op == REQ_OP_WRITE_ZEROES) 317 special_cmd_max_sectors = q->limits.max_write_zeroes_sectors; 318 else if (op == REQ_OP_WRITE_SAME) 319 special_cmd_max_sectors = q->limits.max_write_same_sectors; 320 if ((op == REQ_OP_DISCARD || op == REQ_OP_WRITE_ZEROES || 321 op == REQ_OP_WRITE_SAME) && special_cmd_max_sectors == 0) { 322 atomic_inc(&io->count); 323 dec_count(io, region, BLK_STS_NOTSUPP); 324 return; 325 } 326 327 /* 328 * where->count may be zero if op holds a flush and we need to 329 * send a zero-sized flush. 330 */ 331 do { 332 /* 333 * Allocate a suitably sized-bio. 334 */ 335 switch (op) { 336 case REQ_OP_DISCARD: 337 case REQ_OP_WRITE_ZEROES: 338 num_bvecs = 0; 339 break; 340 case REQ_OP_WRITE_SAME: 341 num_bvecs = 1; 342 break; 343 default: 344 num_bvecs = bio_max_segs(dm_sector_div_up(remaining, 345 (PAGE_SIZE >> SECTOR_SHIFT))); 346 } 347 348 bio = bio_alloc_bioset(where->bdev, num_bvecs, op | op_flags, 349 GFP_NOIO, &io->client->bios); 350 bio->bi_iter.bi_sector = where->sector + (where->count - remaining); 351 bio->bi_end_io = endio; 352 store_io_and_region_in_bio(bio, io, region); 353 354 if (op == REQ_OP_DISCARD || op == REQ_OP_WRITE_ZEROES) { 355 num_sectors = min_t(sector_t, special_cmd_max_sectors, remaining); 356 bio->bi_iter.bi_size = num_sectors << SECTOR_SHIFT; 357 remaining -= num_sectors; 358 } else if (op == REQ_OP_WRITE_SAME) { 359 /* 360 * WRITE SAME only uses a single page. 361 */ 362 dp->get_page(dp, &page, &len, &offset); 363 bio_add_page(bio, page, logical_block_size, offset); 364 num_sectors = min_t(sector_t, special_cmd_max_sectors, remaining); 365 bio->bi_iter.bi_size = num_sectors << SECTOR_SHIFT; 366 367 offset = 0; 368 remaining -= num_sectors; 369 dp->next_page(dp); 370 } else while (remaining) { 371 /* 372 * Try and add as many pages as possible. 373 */ 374 dp->get_page(dp, &page, &len, &offset); 375 len = min(len, to_bytes(remaining)); 376 if (!bio_add_page(bio, page, len, offset)) 377 break; 378 379 offset = 0; 380 remaining -= to_sector(len); 381 dp->next_page(dp); 382 } 383 384 atomic_inc(&io->count); 385 submit_bio(bio); 386 } while (remaining); 387 } 388 389 static void dispatch_io(int op, int op_flags, unsigned int num_regions, 390 struct dm_io_region *where, struct dpages *dp, 391 struct io *io, int sync) 392 { 393 int i; 394 struct dpages old_pages = *dp; 395 396 BUG_ON(num_regions > DM_IO_MAX_REGIONS); 397 398 if (sync) 399 op_flags |= REQ_SYNC; 400 401 /* 402 * For multiple regions we need to be careful to rewind 403 * the dp object for each call to do_region. 404 */ 405 for (i = 0; i < num_regions; i++) { 406 *dp = old_pages; 407 if (where[i].count || (op_flags & REQ_PREFLUSH)) 408 do_region(op, op_flags, i, where + i, dp, io); 409 } 410 411 /* 412 * Drop the extra reference that we were holding to avoid 413 * the io being completed too early. 414 */ 415 dec_count(io, 0, 0); 416 } 417 418 struct sync_io { 419 unsigned long error_bits; 420 struct completion wait; 421 }; 422 423 static void sync_io_complete(unsigned long error, void *context) 424 { 425 struct sync_io *sio = context; 426 427 sio->error_bits = error; 428 complete(&sio->wait); 429 } 430 431 static int sync_io(struct dm_io_client *client, unsigned int num_regions, 432 struct dm_io_region *where, int op, int op_flags, 433 struct dpages *dp, unsigned long *error_bits) 434 { 435 struct io *io; 436 struct sync_io sio; 437 438 if (num_regions > 1 && !op_is_write(op)) { 439 WARN_ON(1); 440 return -EIO; 441 } 442 443 init_completion(&sio.wait); 444 445 io = mempool_alloc(&client->pool, GFP_NOIO); 446 io->error_bits = 0; 447 atomic_set(&io->count, 1); /* see dispatch_io() */ 448 io->client = client; 449 io->callback = sync_io_complete; 450 io->context = &sio; 451 452 io->vma_invalidate_address = dp->vma_invalidate_address; 453 io->vma_invalidate_size = dp->vma_invalidate_size; 454 455 dispatch_io(op, op_flags, num_regions, where, dp, io, 1); 456 457 wait_for_completion_io(&sio.wait); 458 459 if (error_bits) 460 *error_bits = sio.error_bits; 461 462 return sio.error_bits ? -EIO : 0; 463 } 464 465 static int async_io(struct dm_io_client *client, unsigned int num_regions, 466 struct dm_io_region *where, int op, int op_flags, 467 struct dpages *dp, io_notify_fn fn, void *context) 468 { 469 struct io *io; 470 471 if (num_regions > 1 && !op_is_write(op)) { 472 WARN_ON(1); 473 fn(1, context); 474 return -EIO; 475 } 476 477 io = mempool_alloc(&client->pool, GFP_NOIO); 478 io->error_bits = 0; 479 atomic_set(&io->count, 1); /* see dispatch_io() */ 480 io->client = client; 481 io->callback = fn; 482 io->context = context; 483 484 io->vma_invalidate_address = dp->vma_invalidate_address; 485 io->vma_invalidate_size = dp->vma_invalidate_size; 486 487 dispatch_io(op, op_flags, num_regions, where, dp, io, 0); 488 return 0; 489 } 490 491 static int dp_init(struct dm_io_request *io_req, struct dpages *dp, 492 unsigned long size) 493 { 494 /* Set up dpages based on memory type */ 495 496 dp->vma_invalidate_address = NULL; 497 dp->vma_invalidate_size = 0; 498 499 switch (io_req->mem.type) { 500 case DM_IO_PAGE_LIST: 501 list_dp_init(dp, io_req->mem.ptr.pl, io_req->mem.offset); 502 break; 503 504 case DM_IO_BIO: 505 bio_dp_init(dp, io_req->mem.ptr.bio); 506 break; 507 508 case DM_IO_VMA: 509 flush_kernel_vmap_range(io_req->mem.ptr.vma, size); 510 if (io_req->bi_op == REQ_OP_READ) { 511 dp->vma_invalidate_address = io_req->mem.ptr.vma; 512 dp->vma_invalidate_size = size; 513 } 514 vm_dp_init(dp, io_req->mem.ptr.vma); 515 break; 516 517 case DM_IO_KMEM: 518 km_dp_init(dp, io_req->mem.ptr.addr); 519 break; 520 521 default: 522 return -EINVAL; 523 } 524 525 return 0; 526 } 527 528 /* 529 * New collapsed (a)synchronous interface. 530 * 531 * If the IO is asynchronous (i.e. it has notify.fn), you must either unplug 532 * the queue with blk_unplug() some time later or set REQ_SYNC in 533 * io_req->bi_opf. If you fail to do one of these, the IO will be submitted to 534 * the disk after q->unplug_delay, which defaults to 3ms in blk-settings.c. 535 */ 536 int dm_io(struct dm_io_request *io_req, unsigned num_regions, 537 struct dm_io_region *where, unsigned long *sync_error_bits) 538 { 539 int r; 540 struct dpages dp; 541 542 r = dp_init(io_req, &dp, (unsigned long)where->count << SECTOR_SHIFT); 543 if (r) 544 return r; 545 546 if (!io_req->notify.fn) 547 return sync_io(io_req->client, num_regions, where, 548 io_req->bi_op, io_req->bi_op_flags, &dp, 549 sync_error_bits); 550 551 return async_io(io_req->client, num_regions, where, io_req->bi_op, 552 io_req->bi_op_flags, &dp, io_req->notify.fn, 553 io_req->notify.context); 554 } 555 EXPORT_SYMBOL(dm_io); 556 557 int __init dm_io_init(void) 558 { 559 _dm_io_cache = KMEM_CACHE(io, 0); 560 if (!_dm_io_cache) 561 return -ENOMEM; 562 563 return 0; 564 } 565 566 void dm_io_exit(void) 567 { 568 kmem_cache_destroy(_dm_io_cache); 569 _dm_io_cache = NULL; 570 } 571