1 /* 2 * Copyright (C) 2016 Red Hat, Inc. All rights reserved. 3 * 4 * This file is released under the GPL. 5 */ 6 7 #include "dm-core.h" 8 #include "dm-rq.h" 9 10 #include <linux/elevator.h> /* for rq_end_sector() */ 11 #include <linux/blk-mq.h> 12 13 #define DM_MSG_PREFIX "core-rq" 14 15 /* 16 * One of these is allocated per request. 17 */ 18 struct dm_rq_target_io { 19 struct mapped_device *md; 20 struct dm_target *ti; 21 struct request *orig, *clone; 22 struct kthread_work work; 23 blk_status_t error; 24 union map_info info; 25 struct dm_stats_aux stats_aux; 26 unsigned long duration_jiffies; 27 unsigned n_sectors; 28 unsigned completed; 29 }; 30 31 #define DM_MQ_NR_HW_QUEUES 1 32 #define DM_MQ_QUEUE_DEPTH 2048 33 static unsigned dm_mq_nr_hw_queues = DM_MQ_NR_HW_QUEUES; 34 static unsigned dm_mq_queue_depth = DM_MQ_QUEUE_DEPTH; 35 36 /* 37 * Request-based DM's mempools' reserved IOs set by the user. 38 */ 39 #define RESERVED_REQUEST_BASED_IOS 256 40 static unsigned reserved_rq_based_ios = RESERVED_REQUEST_BASED_IOS; 41 42 unsigned dm_get_reserved_rq_based_ios(void) 43 { 44 return __dm_get_module_param(&reserved_rq_based_ios, 45 RESERVED_REQUEST_BASED_IOS, DM_RESERVED_MAX_IOS); 46 } 47 EXPORT_SYMBOL_GPL(dm_get_reserved_rq_based_ios); 48 49 static unsigned dm_get_blk_mq_nr_hw_queues(void) 50 { 51 return __dm_get_module_param(&dm_mq_nr_hw_queues, 1, 32); 52 } 53 54 static unsigned dm_get_blk_mq_queue_depth(void) 55 { 56 return __dm_get_module_param(&dm_mq_queue_depth, 57 DM_MQ_QUEUE_DEPTH, BLK_MQ_MAX_DEPTH); 58 } 59 60 int dm_request_based(struct mapped_device *md) 61 { 62 return queue_is_mq(md->queue); 63 } 64 65 void dm_start_queue(struct request_queue *q) 66 { 67 blk_mq_unquiesce_queue(q); 68 blk_mq_kick_requeue_list(q); 69 } 70 71 void dm_stop_queue(struct request_queue *q) 72 { 73 if (blk_mq_queue_stopped(q)) 74 return; 75 76 blk_mq_quiesce_queue(q); 77 } 78 79 /* 80 * Partial completion handling for request-based dm 81 */ 82 static void end_clone_bio(struct bio *clone) 83 { 84 struct dm_rq_clone_bio_info *info = 85 container_of(clone, struct dm_rq_clone_bio_info, clone); 86 struct dm_rq_target_io *tio = info->tio; 87 unsigned int nr_bytes = info->orig->bi_iter.bi_size; 88 blk_status_t error = clone->bi_status; 89 bool is_last = !clone->bi_next; 90 91 bio_put(clone); 92 93 if (tio->error) 94 /* 95 * An error has already been detected on the request. 96 * Once error occurred, just let clone->end_io() handle 97 * the remainder. 98 */ 99 return; 100 else if (error) { 101 /* 102 * Don't notice the error to the upper layer yet. 103 * The error handling decision is made by the target driver, 104 * when the request is completed. 105 */ 106 tio->error = error; 107 goto exit; 108 } 109 110 /* 111 * I/O for the bio successfully completed. 112 * Notice the data completion to the upper layer. 113 */ 114 tio->completed += nr_bytes; 115 116 /* 117 * Update the original request. 118 * Do not use blk_end_request() here, because it may complete 119 * the original request before the clone, and break the ordering. 120 */ 121 if (is_last) 122 exit: 123 blk_update_request(tio->orig, BLK_STS_OK, tio->completed); 124 } 125 126 static struct dm_rq_target_io *tio_from_request(struct request *rq) 127 { 128 return blk_mq_rq_to_pdu(rq); 129 } 130 131 static void rq_end_stats(struct mapped_device *md, struct request *orig) 132 { 133 if (unlikely(dm_stats_used(&md->stats))) { 134 struct dm_rq_target_io *tio = tio_from_request(orig); 135 tio->duration_jiffies = jiffies - tio->duration_jiffies; 136 dm_stats_account_io(&md->stats, rq_data_dir(orig), 137 blk_rq_pos(orig), tio->n_sectors, true, 138 tio->duration_jiffies, &tio->stats_aux); 139 } 140 } 141 142 /* 143 * Don't touch any member of the md after calling this function because 144 * the md may be freed in dm_put() at the end of this function. 145 * Or do dm_get() before calling this function and dm_put() later. 146 */ 147 static void rq_completed(struct mapped_device *md) 148 { 149 /* nudge anyone waiting on suspend queue */ 150 if (unlikely(wq_has_sleeper(&md->wait))) 151 wake_up(&md->wait); 152 153 /* 154 * dm_put() must be at the end of this function. See the comment above 155 */ 156 dm_put(md); 157 } 158 159 /* 160 * Complete the clone and the original request. 161 * Must be called without clone's queue lock held, 162 * see end_clone_request() for more details. 163 */ 164 static void dm_end_request(struct request *clone, blk_status_t error) 165 { 166 struct dm_rq_target_io *tio = clone->end_io_data; 167 struct mapped_device *md = tio->md; 168 struct request *rq = tio->orig; 169 170 blk_rq_unprep_clone(clone); 171 tio->ti->type->release_clone_rq(clone); 172 173 rq_end_stats(md, rq); 174 blk_mq_end_request(rq, error); 175 rq_completed(md); 176 } 177 178 static void __dm_mq_kick_requeue_list(struct request_queue *q, unsigned long msecs) 179 { 180 blk_mq_delay_kick_requeue_list(q, msecs); 181 } 182 183 void dm_mq_kick_requeue_list(struct mapped_device *md) 184 { 185 __dm_mq_kick_requeue_list(dm_get_md_queue(md), 0); 186 } 187 EXPORT_SYMBOL(dm_mq_kick_requeue_list); 188 189 static void dm_mq_delay_requeue_request(struct request *rq, unsigned long msecs) 190 { 191 blk_mq_requeue_request(rq, false); 192 __dm_mq_kick_requeue_list(rq->q, msecs); 193 } 194 195 static void dm_requeue_original_request(struct dm_rq_target_io *tio, bool delay_requeue) 196 { 197 struct mapped_device *md = tio->md; 198 struct request *rq = tio->orig; 199 unsigned long delay_ms = delay_requeue ? 100 : 0; 200 201 rq_end_stats(md, rq); 202 if (tio->clone) { 203 blk_rq_unprep_clone(tio->clone); 204 tio->ti->type->release_clone_rq(tio->clone); 205 } 206 207 dm_mq_delay_requeue_request(rq, delay_ms); 208 rq_completed(md); 209 } 210 211 static void dm_done(struct request *clone, blk_status_t error, bool mapped) 212 { 213 int r = DM_ENDIO_DONE; 214 struct dm_rq_target_io *tio = clone->end_io_data; 215 dm_request_endio_fn rq_end_io = NULL; 216 217 if (tio->ti) { 218 rq_end_io = tio->ti->type->rq_end_io; 219 220 if (mapped && rq_end_io) 221 r = rq_end_io(tio->ti, clone, error, &tio->info); 222 } 223 224 if (unlikely(error == BLK_STS_TARGET)) { 225 if (req_op(clone) == REQ_OP_WRITE_SAME && 226 !clone->q->limits.max_write_same_sectors) 227 disable_write_same(tio->md); 228 if (req_op(clone) == REQ_OP_WRITE_ZEROES && 229 !clone->q->limits.max_write_zeroes_sectors) 230 disable_write_zeroes(tio->md); 231 } 232 233 switch (r) { 234 case DM_ENDIO_DONE: 235 /* The target wants to complete the I/O */ 236 dm_end_request(clone, error); 237 break; 238 case DM_ENDIO_INCOMPLETE: 239 /* The target will handle the I/O */ 240 return; 241 case DM_ENDIO_REQUEUE: 242 /* The target wants to requeue the I/O */ 243 dm_requeue_original_request(tio, false); 244 break; 245 case DM_ENDIO_DELAY_REQUEUE: 246 /* The target wants to requeue the I/O after a delay */ 247 dm_requeue_original_request(tio, true); 248 break; 249 default: 250 DMWARN("unimplemented target endio return value: %d", r); 251 BUG(); 252 } 253 } 254 255 /* 256 * Request completion handler for request-based dm 257 */ 258 static void dm_softirq_done(struct request *rq) 259 { 260 bool mapped = true; 261 struct dm_rq_target_io *tio = tio_from_request(rq); 262 struct request *clone = tio->clone; 263 264 if (!clone) { 265 struct mapped_device *md = tio->md; 266 267 rq_end_stats(md, rq); 268 blk_mq_end_request(rq, tio->error); 269 rq_completed(md); 270 return; 271 } 272 273 if (rq->rq_flags & RQF_FAILED) 274 mapped = false; 275 276 dm_done(clone, tio->error, mapped); 277 } 278 279 /* 280 * Complete the clone and the original request with the error status 281 * through softirq context. 282 */ 283 static void dm_complete_request(struct request *rq, blk_status_t error) 284 { 285 struct dm_rq_target_io *tio = tio_from_request(rq); 286 287 tio->error = error; 288 blk_mq_complete_request(rq); 289 } 290 291 /* 292 * Complete the not-mapped clone and the original request with the error status 293 * through softirq context. 294 * Target's rq_end_io() function isn't called. 295 * This may be used when the target's clone_and_map_rq() function fails. 296 */ 297 static void dm_kill_unmapped_request(struct request *rq, blk_status_t error) 298 { 299 rq->rq_flags |= RQF_FAILED; 300 dm_complete_request(rq, error); 301 } 302 303 static void end_clone_request(struct request *clone, blk_status_t error) 304 { 305 struct dm_rq_target_io *tio = clone->end_io_data; 306 307 dm_complete_request(tio->orig, error); 308 } 309 310 static blk_status_t dm_dispatch_clone_request(struct request *clone, struct request *rq) 311 { 312 blk_status_t r; 313 314 if (blk_queue_io_stat(clone->q)) 315 clone->rq_flags |= RQF_IO_STAT; 316 317 clone->start_time_ns = ktime_get_ns(); 318 r = blk_insert_cloned_request(clone->q, clone); 319 if (r != BLK_STS_OK && r != BLK_STS_RESOURCE && r != BLK_STS_DEV_RESOURCE) 320 /* must complete clone in terms of original request */ 321 dm_complete_request(rq, r); 322 return r; 323 } 324 325 static int dm_rq_bio_constructor(struct bio *bio, struct bio *bio_orig, 326 void *data) 327 { 328 struct dm_rq_target_io *tio = data; 329 struct dm_rq_clone_bio_info *info = 330 container_of(bio, struct dm_rq_clone_bio_info, clone); 331 332 info->orig = bio_orig; 333 info->tio = tio; 334 bio->bi_end_io = end_clone_bio; 335 336 return 0; 337 } 338 339 static int setup_clone(struct request *clone, struct request *rq, 340 struct dm_rq_target_io *tio, gfp_t gfp_mask) 341 { 342 int r; 343 344 r = blk_rq_prep_clone(clone, rq, &tio->md->bs, gfp_mask, 345 dm_rq_bio_constructor, tio); 346 if (r) 347 return r; 348 349 clone->end_io = end_clone_request; 350 clone->end_io_data = tio; 351 352 tio->clone = clone; 353 354 return 0; 355 } 356 357 static void init_tio(struct dm_rq_target_io *tio, struct request *rq, 358 struct mapped_device *md) 359 { 360 tio->md = md; 361 tio->ti = NULL; 362 tio->clone = NULL; 363 tio->orig = rq; 364 tio->error = 0; 365 tio->completed = 0; 366 /* 367 * Avoid initializing info for blk-mq; it passes 368 * target-specific data through info.ptr 369 * (see: dm_mq_init_request) 370 */ 371 if (!md->init_tio_pdu) 372 memset(&tio->info, 0, sizeof(tio->info)); 373 } 374 375 /* 376 * Returns: 377 * DM_MAPIO_* : the request has been processed as indicated 378 * DM_MAPIO_REQUEUE : the original request needs to be immediately requeued 379 * < 0 : the request was completed due to failure 380 */ 381 static int map_request(struct dm_rq_target_io *tio) 382 { 383 int r; 384 struct dm_target *ti = tio->ti; 385 struct mapped_device *md = tio->md; 386 struct request *rq = tio->orig; 387 struct request *clone = NULL; 388 blk_status_t ret; 389 390 r = ti->type->clone_and_map_rq(ti, rq, &tio->info, &clone); 391 switch (r) { 392 case DM_MAPIO_SUBMITTED: 393 /* The target has taken the I/O to submit by itself later */ 394 break; 395 case DM_MAPIO_REMAPPED: 396 if (setup_clone(clone, rq, tio, GFP_ATOMIC)) { 397 /* -ENOMEM */ 398 ti->type->release_clone_rq(clone); 399 return DM_MAPIO_REQUEUE; 400 } 401 402 /* The target has remapped the I/O so dispatch it */ 403 trace_block_rq_remap(clone->q, clone, disk_devt(dm_disk(md)), 404 blk_rq_pos(rq)); 405 ret = dm_dispatch_clone_request(clone, rq); 406 if (ret == BLK_STS_RESOURCE || ret == BLK_STS_DEV_RESOURCE) { 407 blk_rq_unprep_clone(clone); 408 tio->ti->type->release_clone_rq(clone); 409 tio->clone = NULL; 410 return DM_MAPIO_REQUEUE; 411 } 412 break; 413 case DM_MAPIO_REQUEUE: 414 /* The target wants to requeue the I/O */ 415 break; 416 case DM_MAPIO_DELAY_REQUEUE: 417 /* The target wants to requeue the I/O after a delay */ 418 dm_requeue_original_request(tio, true); 419 break; 420 case DM_MAPIO_KILL: 421 /* The target wants to complete the I/O */ 422 dm_kill_unmapped_request(rq, BLK_STS_IOERR); 423 break; 424 default: 425 DMWARN("unimplemented target map return value: %d", r); 426 BUG(); 427 } 428 429 return r; 430 } 431 432 /* DEPRECATED: previously used for request-based merge heuristic in dm_request_fn() */ 433 ssize_t dm_attr_rq_based_seq_io_merge_deadline_show(struct mapped_device *md, char *buf) 434 { 435 return sprintf(buf, "%u\n", 0); 436 } 437 438 ssize_t dm_attr_rq_based_seq_io_merge_deadline_store(struct mapped_device *md, 439 const char *buf, size_t count) 440 { 441 return count; 442 } 443 444 static void dm_start_request(struct mapped_device *md, struct request *orig) 445 { 446 blk_mq_start_request(orig); 447 448 if (unlikely(dm_stats_used(&md->stats))) { 449 struct dm_rq_target_io *tio = tio_from_request(orig); 450 tio->duration_jiffies = jiffies; 451 tio->n_sectors = blk_rq_sectors(orig); 452 dm_stats_account_io(&md->stats, rq_data_dir(orig), 453 blk_rq_pos(orig), tio->n_sectors, false, 0, 454 &tio->stats_aux); 455 } 456 457 /* 458 * Hold the md reference here for the in-flight I/O. 459 * We can't rely on the reference count by device opener, 460 * because the device may be closed during the request completion 461 * when all bios are completed. 462 * See the comment in rq_completed() too. 463 */ 464 dm_get(md); 465 } 466 467 static int dm_mq_init_request(struct blk_mq_tag_set *set, struct request *rq, 468 unsigned int hctx_idx, unsigned int numa_node) 469 { 470 struct mapped_device *md = set->driver_data; 471 struct dm_rq_target_io *tio = blk_mq_rq_to_pdu(rq); 472 473 /* 474 * Must initialize md member of tio, otherwise it won't 475 * be available in dm_mq_queue_rq. 476 */ 477 tio->md = md; 478 479 if (md->init_tio_pdu) { 480 /* target-specific per-io data is immediately after the tio */ 481 tio->info.ptr = tio + 1; 482 } 483 484 return 0; 485 } 486 487 static blk_status_t dm_mq_queue_rq(struct blk_mq_hw_ctx *hctx, 488 const struct blk_mq_queue_data *bd) 489 { 490 struct request *rq = bd->rq; 491 struct dm_rq_target_io *tio = blk_mq_rq_to_pdu(rq); 492 struct mapped_device *md = tio->md; 493 struct dm_target *ti = md->immutable_target; 494 495 if (unlikely(!ti)) { 496 int srcu_idx; 497 struct dm_table *map = dm_get_live_table(md, &srcu_idx); 498 499 ti = dm_table_find_target(map, 0); 500 dm_put_live_table(md, srcu_idx); 501 } 502 503 if (ti->type->busy && ti->type->busy(ti)) 504 return BLK_STS_RESOURCE; 505 506 dm_start_request(md, rq); 507 508 /* Init tio using md established in .init_request */ 509 init_tio(tio, rq, md); 510 511 /* 512 * Establish tio->ti before calling map_request(). 513 */ 514 tio->ti = ti; 515 516 /* Direct call is fine since .queue_rq allows allocations */ 517 if (map_request(tio) == DM_MAPIO_REQUEUE) { 518 /* Undo dm_start_request() before requeuing */ 519 rq_end_stats(md, rq); 520 rq_completed(md); 521 return BLK_STS_RESOURCE; 522 } 523 524 return BLK_STS_OK; 525 } 526 527 static const struct blk_mq_ops dm_mq_ops = { 528 .queue_rq = dm_mq_queue_rq, 529 .complete = dm_softirq_done, 530 .init_request = dm_mq_init_request, 531 }; 532 533 int dm_mq_init_request_queue(struct mapped_device *md, struct dm_table *t) 534 { 535 struct request_queue *q; 536 struct dm_target *immutable_tgt; 537 int err; 538 539 md->tag_set = kzalloc_node(sizeof(struct blk_mq_tag_set), GFP_KERNEL, md->numa_node_id); 540 if (!md->tag_set) 541 return -ENOMEM; 542 543 md->tag_set->ops = &dm_mq_ops; 544 md->tag_set->queue_depth = dm_get_blk_mq_queue_depth(); 545 md->tag_set->numa_node = md->numa_node_id; 546 md->tag_set->flags = BLK_MQ_F_SHOULD_MERGE; 547 md->tag_set->nr_hw_queues = dm_get_blk_mq_nr_hw_queues(); 548 md->tag_set->driver_data = md; 549 550 md->tag_set->cmd_size = sizeof(struct dm_rq_target_io); 551 immutable_tgt = dm_table_get_immutable_target(t); 552 if (immutable_tgt && immutable_tgt->per_io_data_size) { 553 /* any target-specific per-io data is immediately after the tio */ 554 md->tag_set->cmd_size += immutable_tgt->per_io_data_size; 555 md->init_tio_pdu = true; 556 } 557 558 err = blk_mq_alloc_tag_set(md->tag_set); 559 if (err) 560 goto out_kfree_tag_set; 561 562 q = blk_mq_init_allocated_queue(md->tag_set, md->queue); 563 if (IS_ERR(q)) { 564 err = PTR_ERR(q); 565 goto out_tag_set; 566 } 567 568 return 0; 569 570 out_tag_set: 571 blk_mq_free_tag_set(md->tag_set); 572 out_kfree_tag_set: 573 kfree(md->tag_set); 574 575 return err; 576 } 577 578 void dm_mq_cleanup_mapped_device(struct mapped_device *md) 579 { 580 if (md->tag_set) { 581 blk_mq_free_tag_set(md->tag_set); 582 kfree(md->tag_set); 583 } 584 } 585 586 module_param(reserved_rq_based_ios, uint, S_IRUGO | S_IWUSR); 587 MODULE_PARM_DESC(reserved_rq_based_ios, "Reserved IOs in request-based mempools"); 588 589 /* Unused, but preserved for userspace compatibility */ 590 static bool use_blk_mq = true; 591 module_param(use_blk_mq, bool, S_IRUGO | S_IWUSR); 592 MODULE_PARM_DESC(use_blk_mq, "Use block multiqueue for request-based DM devices"); 593 594 module_param(dm_mq_nr_hw_queues, uint, S_IRUGO | S_IWUSR); 595 MODULE_PARM_DESC(dm_mq_nr_hw_queues, "Number of hardware queues for request-based dm-mq devices"); 596 597 module_param(dm_mq_queue_depth, uint, S_IRUGO | S_IWUSR); 598 MODULE_PARM_DESC(dm_mq_queue_depth, "Queue depth for request-based dm-mq devices"); 599