1 /* 2 * Copyright (C) 2003 Sistina Software 3 * Copyright (C) 2006 Red Hat GmbH 4 * 5 * This file is released under the GPL. 6 */ 7 8 #include "dm.h" 9 10 #include <linux/device-mapper.h> 11 12 #include <linux/bio.h> 13 #include <linux/mempool.h> 14 #include <linux/module.h> 15 #include <linux/sched.h> 16 #include <linux/slab.h> 17 #include <linux/dm-io.h> 18 19 #define DM_MSG_PREFIX "io" 20 21 #define DM_IO_MAX_REGIONS BITS_PER_LONG 22 23 struct dm_io_client { 24 mempool_t *pool; 25 struct bio_set *bios; 26 }; 27 28 /* 29 * Aligning 'struct io' reduces the number of bits required to store 30 * its address. Refer to store_io_and_region_in_bio() below. 31 */ 32 struct io { 33 unsigned long error_bits; 34 atomic_t count; 35 struct task_struct *sleeper; 36 struct dm_io_client *client; 37 io_notify_fn callback; 38 void *context; 39 void *vma_invalidate_address; 40 unsigned long vma_invalidate_size; 41 } __attribute__((aligned(DM_IO_MAX_REGIONS))); 42 43 static struct kmem_cache *_dm_io_cache; 44 45 /* 46 * Create a client with mempool and bioset. 47 */ 48 struct dm_io_client *dm_io_client_create(void) 49 { 50 struct dm_io_client *client; 51 unsigned min_ios = dm_get_reserved_bio_based_ios(); 52 53 client = kmalloc(sizeof(*client), GFP_KERNEL); 54 if (!client) 55 return ERR_PTR(-ENOMEM); 56 57 client->pool = mempool_create_slab_pool(min_ios, _dm_io_cache); 58 if (!client->pool) 59 goto bad; 60 61 client->bios = bioset_create(min_ios, 0); 62 if (!client->bios) 63 goto bad; 64 65 return client; 66 67 bad: 68 if (client->pool) 69 mempool_destroy(client->pool); 70 kfree(client); 71 return ERR_PTR(-ENOMEM); 72 } 73 EXPORT_SYMBOL(dm_io_client_create); 74 75 void dm_io_client_destroy(struct dm_io_client *client) 76 { 77 mempool_destroy(client->pool); 78 bioset_free(client->bios); 79 kfree(client); 80 } 81 EXPORT_SYMBOL(dm_io_client_destroy); 82 83 /*----------------------------------------------------------------- 84 * We need to keep track of which region a bio is doing io for. 85 * To avoid a memory allocation to store just 5 or 6 bits, we 86 * ensure the 'struct io' pointer is aligned so enough low bits are 87 * always zero and then combine it with the region number directly in 88 * bi_private. 89 *---------------------------------------------------------------*/ 90 static void store_io_and_region_in_bio(struct bio *bio, struct io *io, 91 unsigned region) 92 { 93 if (unlikely(!IS_ALIGNED((unsigned long)io, DM_IO_MAX_REGIONS))) { 94 DMCRIT("Unaligned struct io pointer %p", io); 95 BUG(); 96 } 97 98 bio->bi_private = (void *)((unsigned long)io | region); 99 } 100 101 static void retrieve_io_and_region_from_bio(struct bio *bio, struct io **io, 102 unsigned *region) 103 { 104 unsigned long val = (unsigned long)bio->bi_private; 105 106 *io = (void *)(val & -(unsigned long)DM_IO_MAX_REGIONS); 107 *region = val & (DM_IO_MAX_REGIONS - 1); 108 } 109 110 /*----------------------------------------------------------------- 111 * We need an io object to keep track of the number of bios that 112 * have been dispatched for a particular io. 113 *---------------------------------------------------------------*/ 114 static void dec_count(struct io *io, unsigned int region, int error) 115 { 116 if (error) 117 set_bit(region, &io->error_bits); 118 119 if (atomic_dec_and_test(&io->count)) { 120 if (io->vma_invalidate_size) 121 invalidate_kernel_vmap_range(io->vma_invalidate_address, 122 io->vma_invalidate_size); 123 124 if (io->sleeper) 125 wake_up_process(io->sleeper); 126 127 else { 128 unsigned long r = io->error_bits; 129 io_notify_fn fn = io->callback; 130 void *context = io->context; 131 132 mempool_free(io, io->client->pool); 133 fn(r, context); 134 } 135 } 136 } 137 138 static void endio(struct bio *bio, int error) 139 { 140 struct io *io; 141 unsigned region; 142 143 if (error && bio_data_dir(bio) == READ) 144 zero_fill_bio(bio); 145 146 /* 147 * The bio destructor in bio_put() may use the io object. 148 */ 149 retrieve_io_and_region_from_bio(bio, &io, ®ion); 150 151 bio_put(bio); 152 153 dec_count(io, region, error); 154 } 155 156 /*----------------------------------------------------------------- 157 * These little objects provide an abstraction for getting a new 158 * destination page for io. 159 *---------------------------------------------------------------*/ 160 struct dpages { 161 void (*get_page)(struct dpages *dp, 162 struct page **p, unsigned long *len, unsigned *offset); 163 void (*next_page)(struct dpages *dp); 164 165 unsigned context_u; 166 void *context_ptr; 167 168 void *vma_invalidate_address; 169 unsigned long vma_invalidate_size; 170 }; 171 172 /* 173 * Functions for getting the pages from a list. 174 */ 175 static void list_get_page(struct dpages *dp, 176 struct page **p, unsigned long *len, unsigned *offset) 177 { 178 unsigned o = dp->context_u; 179 struct page_list *pl = (struct page_list *) dp->context_ptr; 180 181 *p = pl->page; 182 *len = PAGE_SIZE - o; 183 *offset = o; 184 } 185 186 static void list_next_page(struct dpages *dp) 187 { 188 struct page_list *pl = (struct page_list *) dp->context_ptr; 189 dp->context_ptr = pl->next; 190 dp->context_u = 0; 191 } 192 193 static void list_dp_init(struct dpages *dp, struct page_list *pl, unsigned offset) 194 { 195 dp->get_page = list_get_page; 196 dp->next_page = list_next_page; 197 dp->context_u = offset; 198 dp->context_ptr = pl; 199 } 200 201 /* 202 * Functions for getting the pages from a bvec. 203 */ 204 static void bvec_get_page(struct dpages *dp, 205 struct page **p, unsigned long *len, unsigned *offset) 206 { 207 struct bio_vec *bvec = (struct bio_vec *) dp->context_ptr; 208 *p = bvec->bv_page; 209 *len = bvec->bv_len; 210 *offset = bvec->bv_offset; 211 } 212 213 static void bvec_next_page(struct dpages *dp) 214 { 215 struct bio_vec *bvec = (struct bio_vec *) dp->context_ptr; 216 dp->context_ptr = bvec + 1; 217 } 218 219 static void bvec_dp_init(struct dpages *dp, struct bio_vec *bvec) 220 { 221 dp->get_page = bvec_get_page; 222 dp->next_page = bvec_next_page; 223 dp->context_ptr = bvec; 224 } 225 226 /* 227 * Functions for getting the pages from a VMA. 228 */ 229 static void vm_get_page(struct dpages *dp, 230 struct page **p, unsigned long *len, unsigned *offset) 231 { 232 *p = vmalloc_to_page(dp->context_ptr); 233 *offset = dp->context_u; 234 *len = PAGE_SIZE - dp->context_u; 235 } 236 237 static void vm_next_page(struct dpages *dp) 238 { 239 dp->context_ptr += PAGE_SIZE - dp->context_u; 240 dp->context_u = 0; 241 } 242 243 static void vm_dp_init(struct dpages *dp, void *data) 244 { 245 dp->get_page = vm_get_page; 246 dp->next_page = vm_next_page; 247 dp->context_u = ((unsigned long) data) & (PAGE_SIZE - 1); 248 dp->context_ptr = data; 249 } 250 251 /* 252 * Functions for getting the pages from kernel memory. 253 */ 254 static void km_get_page(struct dpages *dp, struct page **p, unsigned long *len, 255 unsigned *offset) 256 { 257 *p = virt_to_page(dp->context_ptr); 258 *offset = dp->context_u; 259 *len = PAGE_SIZE - dp->context_u; 260 } 261 262 static void km_next_page(struct dpages *dp) 263 { 264 dp->context_ptr += PAGE_SIZE - dp->context_u; 265 dp->context_u = 0; 266 } 267 268 static void km_dp_init(struct dpages *dp, void *data) 269 { 270 dp->get_page = km_get_page; 271 dp->next_page = km_next_page; 272 dp->context_u = ((unsigned long) data) & (PAGE_SIZE - 1); 273 dp->context_ptr = data; 274 } 275 276 /*----------------------------------------------------------------- 277 * IO routines that accept a list of pages. 278 *---------------------------------------------------------------*/ 279 static void do_region(int rw, unsigned region, struct dm_io_region *where, 280 struct dpages *dp, struct io *io) 281 { 282 struct bio *bio; 283 struct page *page; 284 unsigned long len; 285 unsigned offset; 286 unsigned num_bvecs; 287 sector_t remaining = where->count; 288 struct request_queue *q = bdev_get_queue(where->bdev); 289 unsigned short logical_block_size = queue_logical_block_size(q); 290 sector_t num_sectors; 291 292 /* 293 * where->count may be zero if rw holds a flush and we need to 294 * send a zero-sized flush. 295 */ 296 do { 297 /* 298 * Allocate a suitably sized-bio. 299 */ 300 if ((rw & REQ_DISCARD) || (rw & REQ_WRITE_SAME)) 301 num_bvecs = 1; 302 else 303 num_bvecs = min_t(int, bio_get_nr_vecs(where->bdev), 304 dm_sector_div_up(remaining, (PAGE_SIZE >> SECTOR_SHIFT))); 305 306 bio = bio_alloc_bioset(GFP_NOIO, num_bvecs, io->client->bios); 307 bio->bi_sector = where->sector + (where->count - remaining); 308 bio->bi_bdev = where->bdev; 309 bio->bi_end_io = endio; 310 store_io_and_region_in_bio(bio, io, region); 311 312 if (rw & REQ_DISCARD) { 313 num_sectors = min_t(sector_t, q->limits.max_discard_sectors, remaining); 314 bio->bi_size = num_sectors << SECTOR_SHIFT; 315 remaining -= num_sectors; 316 } else if (rw & REQ_WRITE_SAME) { 317 /* 318 * WRITE SAME only uses a single page. 319 */ 320 dp->get_page(dp, &page, &len, &offset); 321 bio_add_page(bio, page, logical_block_size, offset); 322 num_sectors = min_t(sector_t, q->limits.max_write_same_sectors, remaining); 323 bio->bi_size = num_sectors << SECTOR_SHIFT; 324 325 offset = 0; 326 remaining -= num_sectors; 327 dp->next_page(dp); 328 } else while (remaining) { 329 /* 330 * Try and add as many pages as possible. 331 */ 332 dp->get_page(dp, &page, &len, &offset); 333 len = min(len, to_bytes(remaining)); 334 if (!bio_add_page(bio, page, len, offset)) 335 break; 336 337 offset = 0; 338 remaining -= to_sector(len); 339 dp->next_page(dp); 340 } 341 342 atomic_inc(&io->count); 343 submit_bio(rw, bio); 344 } while (remaining); 345 } 346 347 static void dispatch_io(int rw, unsigned int num_regions, 348 struct dm_io_region *where, struct dpages *dp, 349 struct io *io, int sync) 350 { 351 int i; 352 struct dpages old_pages = *dp; 353 354 BUG_ON(num_regions > DM_IO_MAX_REGIONS); 355 356 if (sync) 357 rw |= REQ_SYNC; 358 359 /* 360 * For multiple regions we need to be careful to rewind 361 * the dp object for each call to do_region. 362 */ 363 for (i = 0; i < num_regions; i++) { 364 *dp = old_pages; 365 if (where[i].count || (rw & REQ_FLUSH)) 366 do_region(rw, i, where + i, dp, io); 367 } 368 369 /* 370 * Drop the extra reference that we were holding to avoid 371 * the io being completed too early. 372 */ 373 dec_count(io, 0, 0); 374 } 375 376 static int sync_io(struct dm_io_client *client, unsigned int num_regions, 377 struct dm_io_region *where, int rw, struct dpages *dp, 378 unsigned long *error_bits) 379 { 380 /* 381 * gcc <= 4.3 can't do the alignment for stack variables, so we must 382 * align it on our own. 383 * volatile prevents the optimizer from removing or reusing 384 * "io_" field from the stack frame (allowed in ANSI C). 385 */ 386 volatile char io_[sizeof(struct io) + __alignof__(struct io) - 1]; 387 struct io *io = (struct io *)PTR_ALIGN(&io_, __alignof__(struct io)); 388 389 if (num_regions > 1 && (rw & RW_MASK) != WRITE) { 390 WARN_ON(1); 391 return -EIO; 392 } 393 394 io->error_bits = 0; 395 atomic_set(&io->count, 1); /* see dispatch_io() */ 396 io->sleeper = current; 397 io->client = client; 398 399 io->vma_invalidate_address = dp->vma_invalidate_address; 400 io->vma_invalidate_size = dp->vma_invalidate_size; 401 402 dispatch_io(rw, num_regions, where, dp, io, 1); 403 404 while (1) { 405 set_current_state(TASK_UNINTERRUPTIBLE); 406 407 if (!atomic_read(&io->count)) 408 break; 409 410 io_schedule(); 411 } 412 set_current_state(TASK_RUNNING); 413 414 if (error_bits) 415 *error_bits = io->error_bits; 416 417 return io->error_bits ? -EIO : 0; 418 } 419 420 static int async_io(struct dm_io_client *client, unsigned int num_regions, 421 struct dm_io_region *where, int rw, struct dpages *dp, 422 io_notify_fn fn, void *context) 423 { 424 struct io *io; 425 426 if (num_regions > 1 && (rw & RW_MASK) != WRITE) { 427 WARN_ON(1); 428 fn(1, context); 429 return -EIO; 430 } 431 432 io = mempool_alloc(client->pool, GFP_NOIO); 433 io->error_bits = 0; 434 atomic_set(&io->count, 1); /* see dispatch_io() */ 435 io->sleeper = NULL; 436 io->client = client; 437 io->callback = fn; 438 io->context = context; 439 440 io->vma_invalidate_address = dp->vma_invalidate_address; 441 io->vma_invalidate_size = dp->vma_invalidate_size; 442 443 dispatch_io(rw, num_regions, where, dp, io, 0); 444 return 0; 445 } 446 447 static int dp_init(struct dm_io_request *io_req, struct dpages *dp, 448 unsigned long size) 449 { 450 /* Set up dpages based on memory type */ 451 452 dp->vma_invalidate_address = NULL; 453 dp->vma_invalidate_size = 0; 454 455 switch (io_req->mem.type) { 456 case DM_IO_PAGE_LIST: 457 list_dp_init(dp, io_req->mem.ptr.pl, io_req->mem.offset); 458 break; 459 460 case DM_IO_BVEC: 461 bvec_dp_init(dp, io_req->mem.ptr.bvec); 462 break; 463 464 case DM_IO_VMA: 465 flush_kernel_vmap_range(io_req->mem.ptr.vma, size); 466 if ((io_req->bi_rw & RW_MASK) == READ) { 467 dp->vma_invalidate_address = io_req->mem.ptr.vma; 468 dp->vma_invalidate_size = size; 469 } 470 vm_dp_init(dp, io_req->mem.ptr.vma); 471 break; 472 473 case DM_IO_KMEM: 474 km_dp_init(dp, io_req->mem.ptr.addr); 475 break; 476 477 default: 478 return -EINVAL; 479 } 480 481 return 0; 482 } 483 484 /* 485 * New collapsed (a)synchronous interface. 486 * 487 * If the IO is asynchronous (i.e. it has notify.fn), you must either unplug 488 * the queue with blk_unplug() some time later or set REQ_SYNC in 489 io_req->bi_rw. If you fail to do one of these, the IO will be submitted to 490 * the disk after q->unplug_delay, which defaults to 3ms in blk-settings.c. 491 */ 492 int dm_io(struct dm_io_request *io_req, unsigned num_regions, 493 struct dm_io_region *where, unsigned long *sync_error_bits) 494 { 495 int r; 496 struct dpages dp; 497 498 r = dp_init(io_req, &dp, (unsigned long)where->count << SECTOR_SHIFT); 499 if (r) 500 return r; 501 502 if (!io_req->notify.fn) 503 return sync_io(io_req->client, num_regions, where, 504 io_req->bi_rw, &dp, sync_error_bits); 505 506 return async_io(io_req->client, num_regions, where, io_req->bi_rw, 507 &dp, io_req->notify.fn, io_req->notify.context); 508 } 509 EXPORT_SYMBOL(dm_io); 510 511 int __init dm_io_init(void) 512 { 513 _dm_io_cache = KMEM_CACHE(io, 0); 514 if (!_dm_io_cache) 515 return -ENOMEM; 516 517 return 0; 518 } 519 520 void dm_io_exit(void) 521 { 522 kmem_cache_destroy(_dm_io_cache); 523 _dm_io_cache = NULL; 524 } 525