1 /* 2 * Copyright (C) 2003 Sistina Software 3 * Copyright (C) 2006 Red Hat GmbH 4 * 5 * This file is released under the GPL. 6 */ 7 8 #include "dm.h" 9 10 #include <linux/device-mapper.h> 11 12 #include <linux/bio.h> 13 #include <linux/completion.h> 14 #include <linux/mempool.h> 15 #include <linux/module.h> 16 #include <linux/sched.h> 17 #include <linux/slab.h> 18 #include <linux/dm-io.h> 19 20 #define DM_MSG_PREFIX "io" 21 22 #define DM_IO_MAX_REGIONS BITS_PER_LONG 23 24 struct dm_io_client { 25 mempool_t *pool; 26 struct bio_set *bios; 27 }; 28 29 /* 30 * Aligning 'struct io' reduces the number of bits required to store 31 * its address. Refer to store_io_and_region_in_bio() below. 32 */ 33 struct io { 34 unsigned long error_bits; 35 atomic_t count; 36 struct dm_io_client *client; 37 io_notify_fn callback; 38 void *context; 39 void *vma_invalidate_address; 40 unsigned long vma_invalidate_size; 41 } __attribute__((aligned(DM_IO_MAX_REGIONS))); 42 43 static struct kmem_cache *_dm_io_cache; 44 45 /* 46 * Create a client with mempool and bioset. 47 */ 48 struct dm_io_client *dm_io_client_create(void) 49 { 50 struct dm_io_client *client; 51 unsigned min_ios = dm_get_reserved_bio_based_ios(); 52 53 client = kmalloc(sizeof(*client), GFP_KERNEL); 54 if (!client) 55 return ERR_PTR(-ENOMEM); 56 57 client->pool = mempool_create_slab_pool(min_ios, _dm_io_cache); 58 if (!client->pool) 59 goto bad; 60 61 client->bios = bioset_create(min_ios, 0); 62 if (!client->bios) 63 goto bad; 64 65 return client; 66 67 bad: 68 mempool_destroy(client->pool); 69 kfree(client); 70 return ERR_PTR(-ENOMEM); 71 } 72 EXPORT_SYMBOL(dm_io_client_create); 73 74 void dm_io_client_destroy(struct dm_io_client *client) 75 { 76 mempool_destroy(client->pool); 77 bioset_free(client->bios); 78 kfree(client); 79 } 80 EXPORT_SYMBOL(dm_io_client_destroy); 81 82 /*----------------------------------------------------------------- 83 * We need to keep track of which region a bio is doing io for. 84 * To avoid a memory allocation to store just 5 or 6 bits, we 85 * ensure the 'struct io' pointer is aligned so enough low bits are 86 * always zero and then combine it with the region number directly in 87 * bi_private. 88 *---------------------------------------------------------------*/ 89 static void store_io_and_region_in_bio(struct bio *bio, struct io *io, 90 unsigned region) 91 { 92 if (unlikely(!IS_ALIGNED((unsigned long)io, DM_IO_MAX_REGIONS))) { 93 DMCRIT("Unaligned struct io pointer %p", io); 94 BUG(); 95 } 96 97 bio->bi_private = (void *)((unsigned long)io | region); 98 } 99 100 static void retrieve_io_and_region_from_bio(struct bio *bio, struct io **io, 101 unsigned *region) 102 { 103 unsigned long val = (unsigned long)bio->bi_private; 104 105 *io = (void *)(val & -(unsigned long)DM_IO_MAX_REGIONS); 106 *region = val & (DM_IO_MAX_REGIONS - 1); 107 } 108 109 /*----------------------------------------------------------------- 110 * We need an io object to keep track of the number of bios that 111 * have been dispatched for a particular io. 112 *---------------------------------------------------------------*/ 113 static void complete_io(struct io *io) 114 { 115 unsigned long error_bits = io->error_bits; 116 io_notify_fn fn = io->callback; 117 void *context = io->context; 118 119 if (io->vma_invalidate_size) 120 invalidate_kernel_vmap_range(io->vma_invalidate_address, 121 io->vma_invalidate_size); 122 123 mempool_free(io, io->client->pool); 124 fn(error_bits, context); 125 } 126 127 static void dec_count(struct io *io, unsigned int region, int error) 128 { 129 if (error) 130 set_bit(region, &io->error_bits); 131 132 if (atomic_dec_and_test(&io->count)) 133 complete_io(io); 134 } 135 136 static void endio(struct bio *bio) 137 { 138 struct io *io; 139 unsigned region; 140 int error; 141 142 if (bio->bi_error && bio_data_dir(bio) == READ) 143 zero_fill_bio(bio); 144 145 /* 146 * The bio destructor in bio_put() may use the io object. 147 */ 148 retrieve_io_and_region_from_bio(bio, &io, ®ion); 149 150 error = bio->bi_error; 151 bio_put(bio); 152 153 dec_count(io, region, error); 154 } 155 156 /*----------------------------------------------------------------- 157 * These little objects provide an abstraction for getting a new 158 * destination page for io. 159 *---------------------------------------------------------------*/ 160 struct dpages { 161 void (*get_page)(struct dpages *dp, 162 struct page **p, unsigned long *len, unsigned *offset); 163 void (*next_page)(struct dpages *dp); 164 165 unsigned context_u; 166 void *context_ptr; 167 168 void *vma_invalidate_address; 169 unsigned long vma_invalidate_size; 170 }; 171 172 /* 173 * Functions for getting the pages from a list. 174 */ 175 static void list_get_page(struct dpages *dp, 176 struct page **p, unsigned long *len, unsigned *offset) 177 { 178 unsigned o = dp->context_u; 179 struct page_list *pl = (struct page_list *) dp->context_ptr; 180 181 *p = pl->page; 182 *len = PAGE_SIZE - o; 183 *offset = o; 184 } 185 186 static void list_next_page(struct dpages *dp) 187 { 188 struct page_list *pl = (struct page_list *) dp->context_ptr; 189 dp->context_ptr = pl->next; 190 dp->context_u = 0; 191 } 192 193 static void list_dp_init(struct dpages *dp, struct page_list *pl, unsigned offset) 194 { 195 dp->get_page = list_get_page; 196 dp->next_page = list_next_page; 197 dp->context_u = offset; 198 dp->context_ptr = pl; 199 } 200 201 /* 202 * Functions for getting the pages from a bvec. 203 */ 204 static void bio_get_page(struct dpages *dp, struct page **p, 205 unsigned long *len, unsigned *offset) 206 { 207 struct bio_vec *bvec = dp->context_ptr; 208 *p = bvec->bv_page; 209 *len = bvec->bv_len - dp->context_u; 210 *offset = bvec->bv_offset + dp->context_u; 211 } 212 213 static void bio_next_page(struct dpages *dp) 214 { 215 struct bio_vec *bvec = dp->context_ptr; 216 dp->context_ptr = bvec + 1; 217 dp->context_u = 0; 218 } 219 220 static void bio_dp_init(struct dpages *dp, struct bio *bio) 221 { 222 dp->get_page = bio_get_page; 223 dp->next_page = bio_next_page; 224 dp->context_ptr = __bvec_iter_bvec(bio->bi_io_vec, bio->bi_iter); 225 dp->context_u = bio->bi_iter.bi_bvec_done; 226 } 227 228 /* 229 * Functions for getting the pages from a VMA. 230 */ 231 static void vm_get_page(struct dpages *dp, 232 struct page **p, unsigned long *len, unsigned *offset) 233 { 234 *p = vmalloc_to_page(dp->context_ptr); 235 *offset = dp->context_u; 236 *len = PAGE_SIZE - dp->context_u; 237 } 238 239 static void vm_next_page(struct dpages *dp) 240 { 241 dp->context_ptr += PAGE_SIZE - dp->context_u; 242 dp->context_u = 0; 243 } 244 245 static void vm_dp_init(struct dpages *dp, void *data) 246 { 247 dp->get_page = vm_get_page; 248 dp->next_page = vm_next_page; 249 dp->context_u = ((unsigned long) data) & (PAGE_SIZE - 1); 250 dp->context_ptr = data; 251 } 252 253 /* 254 * Functions for getting the pages from kernel memory. 255 */ 256 static void km_get_page(struct dpages *dp, struct page **p, unsigned long *len, 257 unsigned *offset) 258 { 259 *p = virt_to_page(dp->context_ptr); 260 *offset = dp->context_u; 261 *len = PAGE_SIZE - dp->context_u; 262 } 263 264 static void km_next_page(struct dpages *dp) 265 { 266 dp->context_ptr += PAGE_SIZE - dp->context_u; 267 dp->context_u = 0; 268 } 269 270 static void km_dp_init(struct dpages *dp, void *data) 271 { 272 dp->get_page = km_get_page; 273 dp->next_page = km_next_page; 274 dp->context_u = ((unsigned long) data) & (PAGE_SIZE - 1); 275 dp->context_ptr = data; 276 } 277 278 /*----------------------------------------------------------------- 279 * IO routines that accept a list of pages. 280 *---------------------------------------------------------------*/ 281 static void do_region(int rw, unsigned region, struct dm_io_region *where, 282 struct dpages *dp, struct io *io) 283 { 284 struct bio *bio; 285 struct page *page; 286 unsigned long len; 287 unsigned offset; 288 unsigned num_bvecs; 289 sector_t remaining = where->count; 290 struct request_queue *q = bdev_get_queue(where->bdev); 291 unsigned short logical_block_size = queue_logical_block_size(q); 292 sector_t num_sectors; 293 unsigned int uninitialized_var(special_cmd_max_sectors); 294 295 /* 296 * Reject unsupported discard and write same requests. 297 */ 298 if (rw & REQ_DISCARD) 299 special_cmd_max_sectors = q->limits.max_discard_sectors; 300 else if (rw & REQ_WRITE_SAME) 301 special_cmd_max_sectors = q->limits.max_write_same_sectors; 302 if ((rw & (REQ_DISCARD | REQ_WRITE_SAME)) && special_cmd_max_sectors == 0) { 303 dec_count(io, region, -EOPNOTSUPP); 304 return; 305 } 306 307 /* 308 * where->count may be zero if rw holds a flush and we need to 309 * send a zero-sized flush. 310 */ 311 do { 312 /* 313 * Allocate a suitably sized-bio. 314 */ 315 if ((rw & REQ_DISCARD) || (rw & REQ_WRITE_SAME)) 316 num_bvecs = 1; 317 else 318 num_bvecs = min_t(int, BIO_MAX_PAGES, 319 dm_sector_div_up(remaining, (PAGE_SIZE >> SECTOR_SHIFT))); 320 321 bio = bio_alloc_bioset(GFP_NOIO, num_bvecs, io->client->bios); 322 bio->bi_iter.bi_sector = where->sector + (where->count - remaining); 323 bio->bi_bdev = where->bdev; 324 bio->bi_end_io = endio; 325 store_io_and_region_in_bio(bio, io, region); 326 327 if (rw & REQ_DISCARD) { 328 num_sectors = min_t(sector_t, special_cmd_max_sectors, remaining); 329 bio->bi_iter.bi_size = num_sectors << SECTOR_SHIFT; 330 remaining -= num_sectors; 331 } else if (rw & REQ_WRITE_SAME) { 332 /* 333 * WRITE SAME only uses a single page. 334 */ 335 dp->get_page(dp, &page, &len, &offset); 336 bio_add_page(bio, page, logical_block_size, offset); 337 num_sectors = min_t(sector_t, special_cmd_max_sectors, remaining); 338 bio->bi_iter.bi_size = num_sectors << SECTOR_SHIFT; 339 340 offset = 0; 341 remaining -= num_sectors; 342 dp->next_page(dp); 343 } else while (remaining) { 344 /* 345 * Try and add as many pages as possible. 346 */ 347 dp->get_page(dp, &page, &len, &offset); 348 len = min(len, to_bytes(remaining)); 349 if (!bio_add_page(bio, page, len, offset)) 350 break; 351 352 offset = 0; 353 remaining -= to_sector(len); 354 dp->next_page(dp); 355 } 356 357 atomic_inc(&io->count); 358 submit_bio(rw, bio); 359 } while (remaining); 360 } 361 362 static void dispatch_io(int rw, unsigned int num_regions, 363 struct dm_io_region *where, struct dpages *dp, 364 struct io *io, int sync) 365 { 366 int i; 367 struct dpages old_pages = *dp; 368 369 BUG_ON(num_regions > DM_IO_MAX_REGIONS); 370 371 if (sync) 372 rw |= REQ_SYNC; 373 374 /* 375 * For multiple regions we need to be careful to rewind 376 * the dp object for each call to do_region. 377 */ 378 for (i = 0; i < num_regions; i++) { 379 *dp = old_pages; 380 if (where[i].count || (rw & REQ_FLUSH)) 381 do_region(rw, i, where + i, dp, io); 382 } 383 384 /* 385 * Drop the extra reference that we were holding to avoid 386 * the io being completed too early. 387 */ 388 dec_count(io, 0, 0); 389 } 390 391 struct sync_io { 392 unsigned long error_bits; 393 struct completion wait; 394 }; 395 396 static void sync_io_complete(unsigned long error, void *context) 397 { 398 struct sync_io *sio = context; 399 400 sio->error_bits = error; 401 complete(&sio->wait); 402 } 403 404 static int sync_io(struct dm_io_client *client, unsigned int num_regions, 405 struct dm_io_region *where, int rw, struct dpages *dp, 406 unsigned long *error_bits) 407 { 408 struct io *io; 409 struct sync_io sio; 410 411 if (num_regions > 1 && (rw & RW_MASK) != WRITE) { 412 WARN_ON(1); 413 return -EIO; 414 } 415 416 init_completion(&sio.wait); 417 418 io = mempool_alloc(client->pool, GFP_NOIO); 419 io->error_bits = 0; 420 atomic_set(&io->count, 1); /* see dispatch_io() */ 421 io->client = client; 422 io->callback = sync_io_complete; 423 io->context = &sio; 424 425 io->vma_invalidate_address = dp->vma_invalidate_address; 426 io->vma_invalidate_size = dp->vma_invalidate_size; 427 428 dispatch_io(rw, num_regions, where, dp, io, 1); 429 430 wait_for_completion_io(&sio.wait); 431 432 if (error_bits) 433 *error_bits = sio.error_bits; 434 435 return sio.error_bits ? -EIO : 0; 436 } 437 438 static int async_io(struct dm_io_client *client, unsigned int num_regions, 439 struct dm_io_region *where, int rw, struct dpages *dp, 440 io_notify_fn fn, void *context) 441 { 442 struct io *io; 443 444 if (num_regions > 1 && (rw & RW_MASK) != WRITE) { 445 WARN_ON(1); 446 fn(1, context); 447 return -EIO; 448 } 449 450 io = mempool_alloc(client->pool, GFP_NOIO); 451 io->error_bits = 0; 452 atomic_set(&io->count, 1); /* see dispatch_io() */ 453 io->client = client; 454 io->callback = fn; 455 io->context = context; 456 457 io->vma_invalidate_address = dp->vma_invalidate_address; 458 io->vma_invalidate_size = dp->vma_invalidate_size; 459 460 dispatch_io(rw, num_regions, where, dp, io, 0); 461 return 0; 462 } 463 464 static int dp_init(struct dm_io_request *io_req, struct dpages *dp, 465 unsigned long size) 466 { 467 /* Set up dpages based on memory type */ 468 469 dp->vma_invalidate_address = NULL; 470 dp->vma_invalidate_size = 0; 471 472 switch (io_req->mem.type) { 473 case DM_IO_PAGE_LIST: 474 list_dp_init(dp, io_req->mem.ptr.pl, io_req->mem.offset); 475 break; 476 477 case DM_IO_BIO: 478 bio_dp_init(dp, io_req->mem.ptr.bio); 479 break; 480 481 case DM_IO_VMA: 482 flush_kernel_vmap_range(io_req->mem.ptr.vma, size); 483 if ((io_req->bi_rw & RW_MASK) == READ) { 484 dp->vma_invalidate_address = io_req->mem.ptr.vma; 485 dp->vma_invalidate_size = size; 486 } 487 vm_dp_init(dp, io_req->mem.ptr.vma); 488 break; 489 490 case DM_IO_KMEM: 491 km_dp_init(dp, io_req->mem.ptr.addr); 492 break; 493 494 default: 495 return -EINVAL; 496 } 497 498 return 0; 499 } 500 501 /* 502 * New collapsed (a)synchronous interface. 503 * 504 * If the IO is asynchronous (i.e. it has notify.fn), you must either unplug 505 * the queue with blk_unplug() some time later or set REQ_SYNC in io_req->bi_rw. 506 * If you fail to do one of these, the IO will be submitted to the disk after 507 * q->unplug_delay, which defaults to 3ms in blk-settings.c. 508 */ 509 int dm_io(struct dm_io_request *io_req, unsigned num_regions, 510 struct dm_io_region *where, unsigned long *sync_error_bits) 511 { 512 int r; 513 struct dpages dp; 514 515 r = dp_init(io_req, &dp, (unsigned long)where->count << SECTOR_SHIFT); 516 if (r) 517 return r; 518 519 if (!io_req->notify.fn) 520 return sync_io(io_req->client, num_regions, where, 521 io_req->bi_rw, &dp, sync_error_bits); 522 523 return async_io(io_req->client, num_regions, where, io_req->bi_rw, 524 &dp, io_req->notify.fn, io_req->notify.context); 525 } 526 EXPORT_SYMBOL(dm_io); 527 528 int __init dm_io_init(void) 529 { 530 _dm_io_cache = KMEM_CACHE(io, 0); 531 if (!_dm_io_cache) 532 return -ENOMEM; 533 534 return 0; 535 } 536 537 void dm_io_exit(void) 538 { 539 kmem_cache_destroy(_dm_io_cache); 540 _dm_io_cache = NULL; 541 } 542