1 /* 2 * Copyright (C) 2003 Sistina Software Limited. 3 * Copyright (C) 2004-2005 Red Hat, Inc. All rights reserved. 4 * 5 * This file is released under the GPL. 6 */ 7 8 #include <linux/device-mapper.h> 9 10 #include "dm-rq.h" 11 #include "dm-bio-record.h" 12 #include "dm-path-selector.h" 13 #include "dm-uevent.h" 14 15 #include <linux/blkdev.h> 16 #include <linux/ctype.h> 17 #include <linux/init.h> 18 #include <linux/mempool.h> 19 #include <linux/module.h> 20 #include <linux/pagemap.h> 21 #include <linux/slab.h> 22 #include <linux/time.h> 23 #include <linux/workqueue.h> 24 #include <linux/delay.h> 25 #include <scsi/scsi_device.h> 26 #include <scsi/scsi_dh.h> 27 #include <linux/atomic.h> 28 #include <linux/blk-mq.h> 29 30 #define DM_MSG_PREFIX "multipath" 31 #define DM_PG_INIT_DELAY_MSECS 2000 32 #define DM_PG_INIT_DELAY_DEFAULT ((unsigned) -1) 33 34 /* Path properties */ 35 struct pgpath { 36 struct list_head list; 37 38 struct priority_group *pg; /* Owning PG */ 39 unsigned fail_count; /* Cumulative failure count */ 40 41 struct dm_path path; 42 struct delayed_work activate_path; 43 44 bool is_active:1; /* Path status */ 45 }; 46 47 #define path_to_pgpath(__pgp) container_of((__pgp), struct pgpath, path) 48 49 /* 50 * Paths are grouped into Priority Groups and numbered from 1 upwards. 51 * Each has a path selector which controls which path gets used. 52 */ 53 struct priority_group { 54 struct list_head list; 55 56 struct multipath *m; /* Owning multipath instance */ 57 struct path_selector ps; 58 59 unsigned pg_num; /* Reference number */ 60 unsigned nr_pgpaths; /* Number of paths in PG */ 61 struct list_head pgpaths; 62 63 bool bypassed:1; /* Temporarily bypass this PG? */ 64 }; 65 66 /* Multipath context */ 67 struct multipath { 68 unsigned long flags; /* Multipath state flags */ 69 70 spinlock_t lock; 71 enum dm_queue_mode queue_mode; 72 73 struct pgpath *current_pgpath; 74 struct priority_group *current_pg; 75 struct priority_group *next_pg; /* Switch to this PG if set */ 76 77 atomic_t nr_valid_paths; /* Total number of usable paths */ 78 unsigned nr_priority_groups; 79 struct list_head priority_groups; 80 81 const char *hw_handler_name; 82 char *hw_handler_params; 83 wait_queue_head_t pg_init_wait; /* Wait for pg_init completion */ 84 unsigned pg_init_retries; /* Number of times to retry pg_init */ 85 unsigned pg_init_delay_msecs; /* Number of msecs before pg_init retry */ 86 atomic_t pg_init_in_progress; /* Only one pg_init allowed at once */ 87 atomic_t pg_init_count; /* Number of times pg_init called */ 88 89 struct mutex work_mutex; 90 struct work_struct trigger_event; 91 struct dm_target *ti; 92 93 struct work_struct process_queued_bios; 94 struct bio_list queued_bios; 95 }; 96 97 /* 98 * Context information attached to each io we process. 99 */ 100 struct dm_mpath_io { 101 struct pgpath *pgpath; 102 size_t nr_bytes; 103 }; 104 105 typedef int (*action_fn) (struct pgpath *pgpath); 106 107 static struct workqueue_struct *kmultipathd, *kmpath_handlerd; 108 static void trigger_event(struct work_struct *work); 109 static void activate_or_offline_path(struct pgpath *pgpath); 110 static void activate_path_work(struct work_struct *work); 111 static void process_queued_bios(struct work_struct *work); 112 113 /*----------------------------------------------- 114 * Multipath state flags. 115 *-----------------------------------------------*/ 116 117 #define MPATHF_QUEUE_IO 0 /* Must we queue all I/O? */ 118 #define MPATHF_QUEUE_IF_NO_PATH 1 /* Queue I/O if last path fails? */ 119 #define MPATHF_SAVED_QUEUE_IF_NO_PATH 2 /* Saved state during suspension */ 120 #define MPATHF_RETAIN_ATTACHED_HW_HANDLER 3 /* If there's already a hw_handler present, don't change it. */ 121 #define MPATHF_PG_INIT_DISABLED 4 /* pg_init is not currently allowed */ 122 #define MPATHF_PG_INIT_REQUIRED 5 /* pg_init needs calling? */ 123 #define MPATHF_PG_INIT_DELAY_RETRY 6 /* Delay pg_init retry? */ 124 125 /*----------------------------------------------- 126 * Allocation routines 127 *-----------------------------------------------*/ 128 129 static struct pgpath *alloc_pgpath(void) 130 { 131 struct pgpath *pgpath = kzalloc(sizeof(*pgpath), GFP_KERNEL); 132 133 if (!pgpath) 134 return NULL; 135 136 pgpath->is_active = true; 137 138 return pgpath; 139 } 140 141 static void free_pgpath(struct pgpath *pgpath) 142 { 143 kfree(pgpath); 144 } 145 146 static struct priority_group *alloc_priority_group(void) 147 { 148 struct priority_group *pg; 149 150 pg = kzalloc(sizeof(*pg), GFP_KERNEL); 151 152 if (pg) 153 INIT_LIST_HEAD(&pg->pgpaths); 154 155 return pg; 156 } 157 158 static void free_pgpaths(struct list_head *pgpaths, struct dm_target *ti) 159 { 160 struct pgpath *pgpath, *tmp; 161 162 list_for_each_entry_safe(pgpath, tmp, pgpaths, list) { 163 list_del(&pgpath->list); 164 dm_put_device(ti, pgpath->path.dev); 165 free_pgpath(pgpath); 166 } 167 } 168 169 static void free_priority_group(struct priority_group *pg, 170 struct dm_target *ti) 171 { 172 struct path_selector *ps = &pg->ps; 173 174 if (ps->type) { 175 ps->type->destroy(ps); 176 dm_put_path_selector(ps->type); 177 } 178 179 free_pgpaths(&pg->pgpaths, ti); 180 kfree(pg); 181 } 182 183 static struct multipath *alloc_multipath(struct dm_target *ti) 184 { 185 struct multipath *m; 186 187 m = kzalloc(sizeof(*m), GFP_KERNEL); 188 if (m) { 189 INIT_LIST_HEAD(&m->priority_groups); 190 spin_lock_init(&m->lock); 191 atomic_set(&m->nr_valid_paths, 0); 192 INIT_WORK(&m->trigger_event, trigger_event); 193 mutex_init(&m->work_mutex); 194 195 m->queue_mode = DM_TYPE_NONE; 196 197 m->ti = ti; 198 ti->private = m; 199 } 200 201 return m; 202 } 203 204 static int alloc_multipath_stage2(struct dm_target *ti, struct multipath *m) 205 { 206 if (m->queue_mode == DM_TYPE_NONE) { 207 /* 208 * Default to request-based. 209 */ 210 if (dm_use_blk_mq(dm_table_get_md(ti->table))) 211 m->queue_mode = DM_TYPE_MQ_REQUEST_BASED; 212 else 213 m->queue_mode = DM_TYPE_REQUEST_BASED; 214 215 } else if (m->queue_mode == DM_TYPE_BIO_BASED) { 216 INIT_WORK(&m->process_queued_bios, process_queued_bios); 217 /* 218 * bio-based doesn't support any direct scsi_dh management; 219 * it just discovers if a scsi_dh is attached. 220 */ 221 set_bit(MPATHF_RETAIN_ATTACHED_HW_HANDLER, &m->flags); 222 } 223 224 dm_table_set_type(ti->table, m->queue_mode); 225 226 return 0; 227 } 228 229 static void free_multipath(struct multipath *m) 230 { 231 struct priority_group *pg, *tmp; 232 233 list_for_each_entry_safe(pg, tmp, &m->priority_groups, list) { 234 list_del(&pg->list); 235 free_priority_group(pg, m->ti); 236 } 237 238 kfree(m->hw_handler_name); 239 kfree(m->hw_handler_params); 240 mutex_destroy(&m->work_mutex); 241 kfree(m); 242 } 243 244 static struct dm_mpath_io *get_mpio(union map_info *info) 245 { 246 return info->ptr; 247 } 248 249 static size_t multipath_per_bio_data_size(void) 250 { 251 return sizeof(struct dm_mpath_io) + sizeof(struct dm_bio_details); 252 } 253 254 static struct dm_mpath_io *get_mpio_from_bio(struct bio *bio) 255 { 256 return dm_per_bio_data(bio, multipath_per_bio_data_size()); 257 } 258 259 static struct dm_bio_details *get_bio_details_from_mpio(struct dm_mpath_io *mpio) 260 { 261 /* dm_bio_details is immediately after the dm_mpath_io in bio's per-bio-data */ 262 void *bio_details = mpio + 1; 263 return bio_details; 264 } 265 266 static void multipath_init_per_bio_data(struct bio *bio, struct dm_mpath_io **mpio_p) 267 { 268 struct dm_mpath_io *mpio = get_mpio_from_bio(bio); 269 struct dm_bio_details *bio_details = get_bio_details_from_mpio(mpio); 270 271 mpio->nr_bytes = bio->bi_iter.bi_size; 272 mpio->pgpath = NULL; 273 *mpio_p = mpio; 274 275 dm_bio_record(bio_details, bio); 276 } 277 278 /*----------------------------------------------- 279 * Path selection 280 *-----------------------------------------------*/ 281 282 static int __pg_init_all_paths(struct multipath *m) 283 { 284 struct pgpath *pgpath; 285 unsigned long pg_init_delay = 0; 286 287 lockdep_assert_held(&m->lock); 288 289 if (atomic_read(&m->pg_init_in_progress) || test_bit(MPATHF_PG_INIT_DISABLED, &m->flags)) 290 return 0; 291 292 atomic_inc(&m->pg_init_count); 293 clear_bit(MPATHF_PG_INIT_REQUIRED, &m->flags); 294 295 /* Check here to reset pg_init_required */ 296 if (!m->current_pg) 297 return 0; 298 299 if (test_bit(MPATHF_PG_INIT_DELAY_RETRY, &m->flags)) 300 pg_init_delay = msecs_to_jiffies(m->pg_init_delay_msecs != DM_PG_INIT_DELAY_DEFAULT ? 301 m->pg_init_delay_msecs : DM_PG_INIT_DELAY_MSECS); 302 list_for_each_entry(pgpath, &m->current_pg->pgpaths, list) { 303 /* Skip failed paths */ 304 if (!pgpath->is_active) 305 continue; 306 if (queue_delayed_work(kmpath_handlerd, &pgpath->activate_path, 307 pg_init_delay)) 308 atomic_inc(&m->pg_init_in_progress); 309 } 310 return atomic_read(&m->pg_init_in_progress); 311 } 312 313 static int pg_init_all_paths(struct multipath *m) 314 { 315 int ret; 316 unsigned long flags; 317 318 spin_lock_irqsave(&m->lock, flags); 319 ret = __pg_init_all_paths(m); 320 spin_unlock_irqrestore(&m->lock, flags); 321 322 return ret; 323 } 324 325 static void __switch_pg(struct multipath *m, struct priority_group *pg) 326 { 327 m->current_pg = pg; 328 329 /* Must we initialise the PG first, and queue I/O till it's ready? */ 330 if (m->hw_handler_name) { 331 set_bit(MPATHF_PG_INIT_REQUIRED, &m->flags); 332 set_bit(MPATHF_QUEUE_IO, &m->flags); 333 } else { 334 /* FIXME: not needed if no scsi_dh is attached */ 335 clear_bit(MPATHF_PG_INIT_REQUIRED, &m->flags); 336 clear_bit(MPATHF_QUEUE_IO, &m->flags); 337 } 338 339 atomic_set(&m->pg_init_count, 0); 340 } 341 342 static struct pgpath *choose_path_in_pg(struct multipath *m, 343 struct priority_group *pg, 344 size_t nr_bytes) 345 { 346 unsigned long flags; 347 struct dm_path *path; 348 struct pgpath *pgpath; 349 350 path = pg->ps.type->select_path(&pg->ps, nr_bytes); 351 if (!path) 352 return ERR_PTR(-ENXIO); 353 354 pgpath = path_to_pgpath(path); 355 356 if (unlikely(READ_ONCE(m->current_pg) != pg)) { 357 /* Only update current_pgpath if pg changed */ 358 spin_lock_irqsave(&m->lock, flags); 359 m->current_pgpath = pgpath; 360 __switch_pg(m, pg); 361 spin_unlock_irqrestore(&m->lock, flags); 362 } 363 364 return pgpath; 365 } 366 367 static struct pgpath *choose_pgpath(struct multipath *m, size_t nr_bytes) 368 { 369 unsigned long flags; 370 struct priority_group *pg; 371 struct pgpath *pgpath; 372 unsigned bypassed = 1; 373 374 if (!atomic_read(&m->nr_valid_paths)) { 375 clear_bit(MPATHF_QUEUE_IO, &m->flags); 376 goto failed; 377 } 378 379 /* Were we instructed to switch PG? */ 380 if (READ_ONCE(m->next_pg)) { 381 spin_lock_irqsave(&m->lock, flags); 382 pg = m->next_pg; 383 if (!pg) { 384 spin_unlock_irqrestore(&m->lock, flags); 385 goto check_current_pg; 386 } 387 m->next_pg = NULL; 388 spin_unlock_irqrestore(&m->lock, flags); 389 pgpath = choose_path_in_pg(m, pg, nr_bytes); 390 if (!IS_ERR_OR_NULL(pgpath)) 391 return pgpath; 392 } 393 394 /* Don't change PG until it has no remaining paths */ 395 check_current_pg: 396 pg = READ_ONCE(m->current_pg); 397 if (pg) { 398 pgpath = choose_path_in_pg(m, pg, nr_bytes); 399 if (!IS_ERR_OR_NULL(pgpath)) 400 return pgpath; 401 } 402 403 /* 404 * Loop through priority groups until we find a valid path. 405 * First time we skip PGs marked 'bypassed'. 406 * Second time we only try the ones we skipped, but set 407 * pg_init_delay_retry so we do not hammer controllers. 408 */ 409 do { 410 list_for_each_entry(pg, &m->priority_groups, list) { 411 if (pg->bypassed == !!bypassed) 412 continue; 413 pgpath = choose_path_in_pg(m, pg, nr_bytes); 414 if (!IS_ERR_OR_NULL(pgpath)) { 415 if (!bypassed) 416 set_bit(MPATHF_PG_INIT_DELAY_RETRY, &m->flags); 417 return pgpath; 418 } 419 } 420 } while (bypassed--); 421 422 failed: 423 spin_lock_irqsave(&m->lock, flags); 424 m->current_pgpath = NULL; 425 m->current_pg = NULL; 426 spin_unlock_irqrestore(&m->lock, flags); 427 428 return NULL; 429 } 430 431 /* 432 * dm_report_EIO() is a macro instead of a function to make pr_debug() 433 * report the function name and line number of the function from which 434 * it has been invoked. 435 */ 436 #define dm_report_EIO(m) \ 437 do { \ 438 struct mapped_device *md = dm_table_get_md((m)->ti->table); \ 439 \ 440 pr_debug("%s: returning EIO; QIFNP = %d; SQIFNP = %d; DNFS = %d\n", \ 441 dm_device_name(md), \ 442 test_bit(MPATHF_QUEUE_IF_NO_PATH, &(m)->flags), \ 443 test_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &(m)->flags), \ 444 dm_noflush_suspending((m)->ti)); \ 445 } while (0) 446 447 /* 448 * Check whether bios must be queued in the device-mapper core rather 449 * than here in the target. 450 * 451 * If MPATHF_QUEUE_IF_NO_PATH and MPATHF_SAVED_QUEUE_IF_NO_PATH hold 452 * the same value then we are not between multipath_presuspend() 453 * and multipath_resume() calls and we have no need to check 454 * for the DMF_NOFLUSH_SUSPENDING flag. 455 */ 456 static bool __must_push_back(struct multipath *m, unsigned long flags) 457 { 458 return ((test_bit(MPATHF_QUEUE_IF_NO_PATH, &flags) != 459 test_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &flags)) && 460 dm_noflush_suspending(m->ti)); 461 } 462 463 /* 464 * Following functions use READ_ONCE to get atomic access to 465 * all m->flags to avoid taking spinlock 466 */ 467 static bool must_push_back_rq(struct multipath *m) 468 { 469 unsigned long flags = READ_ONCE(m->flags); 470 return test_bit(MPATHF_QUEUE_IF_NO_PATH, &flags) || __must_push_back(m, flags); 471 } 472 473 static bool must_push_back_bio(struct multipath *m) 474 { 475 unsigned long flags = READ_ONCE(m->flags); 476 return __must_push_back(m, flags); 477 } 478 479 /* 480 * Map cloned requests (request-based multipath) 481 */ 482 static int multipath_clone_and_map(struct dm_target *ti, struct request *rq, 483 union map_info *map_context, 484 struct request **__clone) 485 { 486 struct multipath *m = ti->private; 487 size_t nr_bytes = blk_rq_bytes(rq); 488 struct pgpath *pgpath; 489 struct block_device *bdev; 490 struct dm_mpath_io *mpio = get_mpio(map_context); 491 struct request_queue *q; 492 struct request *clone; 493 494 /* Do we need to select a new pgpath? */ 495 pgpath = READ_ONCE(m->current_pgpath); 496 if (!pgpath || !test_bit(MPATHF_QUEUE_IO, &m->flags)) 497 pgpath = choose_pgpath(m, nr_bytes); 498 499 if (!pgpath) { 500 if (must_push_back_rq(m)) 501 return DM_MAPIO_DELAY_REQUEUE; 502 dm_report_EIO(m); /* Failed */ 503 return DM_MAPIO_KILL; 504 } else if (test_bit(MPATHF_QUEUE_IO, &m->flags) || 505 test_bit(MPATHF_PG_INIT_REQUIRED, &m->flags)) { 506 pg_init_all_paths(m); 507 return DM_MAPIO_DELAY_REQUEUE; 508 } 509 510 mpio->pgpath = pgpath; 511 mpio->nr_bytes = nr_bytes; 512 513 bdev = pgpath->path.dev->bdev; 514 q = bdev_get_queue(bdev); 515 clone = blk_get_request(q, rq->cmd_flags | REQ_NOMERGE, GFP_ATOMIC); 516 if (IS_ERR(clone)) { 517 /* EBUSY, ENODEV or EWOULDBLOCK: requeue */ 518 if (blk_queue_dying(q)) { 519 atomic_inc(&m->pg_init_in_progress); 520 activate_or_offline_path(pgpath); 521 return DM_MAPIO_DELAY_REQUEUE; 522 } 523 524 /* 525 * blk-mq's SCHED_RESTART can cover this requeue, so we 526 * needn't deal with it by DELAY_REQUEUE. More importantly, 527 * we have to return DM_MAPIO_REQUEUE so that blk-mq can 528 * get the queue busy feedback (via BLK_STS_RESOURCE), 529 * otherwise I/O merging can suffer. 530 */ 531 if (q->mq_ops) 532 return DM_MAPIO_REQUEUE; 533 else 534 return DM_MAPIO_DELAY_REQUEUE; 535 } 536 clone->bio = clone->biotail = NULL; 537 clone->rq_disk = bdev->bd_disk; 538 clone->cmd_flags |= REQ_FAILFAST_TRANSPORT; 539 *__clone = clone; 540 541 if (pgpath->pg->ps.type->start_io) 542 pgpath->pg->ps.type->start_io(&pgpath->pg->ps, 543 &pgpath->path, 544 nr_bytes); 545 return DM_MAPIO_REMAPPED; 546 } 547 548 static void multipath_release_clone(struct request *clone) 549 { 550 blk_put_request(clone); 551 } 552 553 /* 554 * Map cloned bios (bio-based multipath) 555 */ 556 557 static struct pgpath *__map_bio(struct multipath *m, struct bio *bio) 558 { 559 struct pgpath *pgpath; 560 unsigned long flags; 561 bool queue_io; 562 563 /* Do we need to select a new pgpath? */ 564 pgpath = READ_ONCE(m->current_pgpath); 565 queue_io = test_bit(MPATHF_QUEUE_IO, &m->flags); 566 if (!pgpath || !queue_io) 567 pgpath = choose_pgpath(m, bio->bi_iter.bi_size); 568 569 if ((pgpath && queue_io) || 570 (!pgpath && test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags))) { 571 /* Queue for the daemon to resubmit */ 572 spin_lock_irqsave(&m->lock, flags); 573 bio_list_add(&m->queued_bios, bio); 574 spin_unlock_irqrestore(&m->lock, flags); 575 576 /* PG_INIT_REQUIRED cannot be set without QUEUE_IO */ 577 if (queue_io || test_bit(MPATHF_PG_INIT_REQUIRED, &m->flags)) 578 pg_init_all_paths(m); 579 else if (!queue_io) 580 queue_work(kmultipathd, &m->process_queued_bios); 581 582 return ERR_PTR(-EAGAIN); 583 } 584 585 return pgpath; 586 } 587 588 static struct pgpath *__map_bio_fast(struct multipath *m, struct bio *bio) 589 { 590 struct pgpath *pgpath; 591 unsigned long flags; 592 593 /* Do we need to select a new pgpath? */ 594 /* 595 * FIXME: currently only switching path if no path (due to failure, etc) 596 * - which negates the point of using a path selector 597 */ 598 pgpath = READ_ONCE(m->current_pgpath); 599 if (!pgpath) 600 pgpath = choose_pgpath(m, bio->bi_iter.bi_size); 601 602 if (!pgpath) { 603 if (test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) { 604 /* Queue for the daemon to resubmit */ 605 spin_lock_irqsave(&m->lock, flags); 606 bio_list_add(&m->queued_bios, bio); 607 spin_unlock_irqrestore(&m->lock, flags); 608 queue_work(kmultipathd, &m->process_queued_bios); 609 610 return ERR_PTR(-EAGAIN); 611 } 612 return NULL; 613 } 614 615 return pgpath; 616 } 617 618 static int __multipath_map_bio(struct multipath *m, struct bio *bio, 619 struct dm_mpath_io *mpio) 620 { 621 struct pgpath *pgpath; 622 623 if (!m->hw_handler_name) 624 pgpath = __map_bio_fast(m, bio); 625 else 626 pgpath = __map_bio(m, bio); 627 628 if (IS_ERR(pgpath)) 629 return DM_MAPIO_SUBMITTED; 630 631 if (!pgpath) { 632 if (must_push_back_bio(m)) 633 return DM_MAPIO_REQUEUE; 634 dm_report_EIO(m); 635 return DM_MAPIO_KILL; 636 } 637 638 mpio->pgpath = pgpath; 639 640 bio->bi_status = 0; 641 bio_set_dev(bio, pgpath->path.dev->bdev); 642 bio->bi_opf |= REQ_FAILFAST_TRANSPORT; 643 644 if (pgpath->pg->ps.type->start_io) 645 pgpath->pg->ps.type->start_io(&pgpath->pg->ps, 646 &pgpath->path, 647 mpio->nr_bytes); 648 return DM_MAPIO_REMAPPED; 649 } 650 651 static int multipath_map_bio(struct dm_target *ti, struct bio *bio) 652 { 653 struct multipath *m = ti->private; 654 struct dm_mpath_io *mpio = NULL; 655 656 multipath_init_per_bio_data(bio, &mpio); 657 return __multipath_map_bio(m, bio, mpio); 658 } 659 660 static void process_queued_io_list(struct multipath *m) 661 { 662 if (m->queue_mode == DM_TYPE_MQ_REQUEST_BASED) 663 dm_mq_kick_requeue_list(dm_table_get_md(m->ti->table)); 664 else if (m->queue_mode == DM_TYPE_BIO_BASED) 665 queue_work(kmultipathd, &m->process_queued_bios); 666 } 667 668 static void process_queued_bios(struct work_struct *work) 669 { 670 int r; 671 unsigned long flags; 672 struct bio *bio; 673 struct bio_list bios; 674 struct blk_plug plug; 675 struct multipath *m = 676 container_of(work, struct multipath, process_queued_bios); 677 678 bio_list_init(&bios); 679 680 spin_lock_irqsave(&m->lock, flags); 681 682 if (bio_list_empty(&m->queued_bios)) { 683 spin_unlock_irqrestore(&m->lock, flags); 684 return; 685 } 686 687 bio_list_merge(&bios, &m->queued_bios); 688 bio_list_init(&m->queued_bios); 689 690 spin_unlock_irqrestore(&m->lock, flags); 691 692 blk_start_plug(&plug); 693 while ((bio = bio_list_pop(&bios))) { 694 struct dm_mpath_io *mpio = get_mpio_from_bio(bio); 695 dm_bio_restore(get_bio_details_from_mpio(mpio), bio); 696 r = __multipath_map_bio(m, bio, mpio); 697 switch (r) { 698 case DM_MAPIO_KILL: 699 bio->bi_status = BLK_STS_IOERR; 700 bio_endio(bio); 701 break; 702 case DM_MAPIO_REQUEUE: 703 bio->bi_status = BLK_STS_DM_REQUEUE; 704 bio_endio(bio); 705 break; 706 case DM_MAPIO_REMAPPED: 707 generic_make_request(bio); 708 break; 709 case 0: 710 break; 711 default: 712 WARN_ONCE(true, "__multipath_map_bio() returned %d\n", r); 713 } 714 } 715 blk_finish_plug(&plug); 716 } 717 718 /* 719 * If we run out of usable paths, should we queue I/O or error it? 720 */ 721 static int queue_if_no_path(struct multipath *m, bool queue_if_no_path, 722 bool save_old_value) 723 { 724 unsigned long flags; 725 726 spin_lock_irqsave(&m->lock, flags); 727 assign_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &m->flags, 728 (save_old_value && test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) || 729 (!save_old_value && queue_if_no_path)); 730 assign_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags, queue_if_no_path); 731 spin_unlock_irqrestore(&m->lock, flags); 732 733 if (!queue_if_no_path) { 734 dm_table_run_md_queue_async(m->ti->table); 735 process_queued_io_list(m); 736 } 737 738 return 0; 739 } 740 741 /* 742 * An event is triggered whenever a path is taken out of use. 743 * Includes path failure and PG bypass. 744 */ 745 static void trigger_event(struct work_struct *work) 746 { 747 struct multipath *m = 748 container_of(work, struct multipath, trigger_event); 749 750 dm_table_event(m->ti->table); 751 } 752 753 /*----------------------------------------------------------------- 754 * Constructor/argument parsing: 755 * <#multipath feature args> [<arg>]* 756 * <#hw_handler args> [hw_handler [<arg>]*] 757 * <#priority groups> 758 * <initial priority group> 759 * [<selector> <#selector args> [<arg>]* 760 * <#paths> <#per-path selector args> 761 * [<path> [<arg>]* ]+ ]+ 762 *---------------------------------------------------------------*/ 763 static int parse_path_selector(struct dm_arg_set *as, struct priority_group *pg, 764 struct dm_target *ti) 765 { 766 int r; 767 struct path_selector_type *pst; 768 unsigned ps_argc; 769 770 static const struct dm_arg _args[] = { 771 {0, 1024, "invalid number of path selector args"}, 772 }; 773 774 pst = dm_get_path_selector(dm_shift_arg(as)); 775 if (!pst) { 776 ti->error = "unknown path selector type"; 777 return -EINVAL; 778 } 779 780 r = dm_read_arg_group(_args, as, &ps_argc, &ti->error); 781 if (r) { 782 dm_put_path_selector(pst); 783 return -EINVAL; 784 } 785 786 r = pst->create(&pg->ps, ps_argc, as->argv); 787 if (r) { 788 dm_put_path_selector(pst); 789 ti->error = "path selector constructor failed"; 790 return r; 791 } 792 793 pg->ps.type = pst; 794 dm_consume_args(as, ps_argc); 795 796 return 0; 797 } 798 799 static int setup_scsi_dh(struct block_device *bdev, struct multipath *m, char **error) 800 { 801 struct request_queue *q = bdev_get_queue(bdev); 802 const char *attached_handler_name; 803 int r; 804 805 if (test_bit(MPATHF_RETAIN_ATTACHED_HW_HANDLER, &m->flags)) { 806 retain: 807 attached_handler_name = scsi_dh_attached_handler_name(q, GFP_KERNEL); 808 if (attached_handler_name) { 809 /* 810 * Clear any hw_handler_params associated with a 811 * handler that isn't already attached. 812 */ 813 if (m->hw_handler_name && strcmp(attached_handler_name, m->hw_handler_name)) { 814 kfree(m->hw_handler_params); 815 m->hw_handler_params = NULL; 816 } 817 818 /* 819 * Reset hw_handler_name to match the attached handler 820 * 821 * NB. This modifies the table line to show the actual 822 * handler instead of the original table passed in. 823 */ 824 kfree(m->hw_handler_name); 825 m->hw_handler_name = attached_handler_name; 826 827 /* 828 * Init fields that are only used when a scsi_dh is attached 829 */ 830 if (!test_and_set_bit(MPATHF_QUEUE_IO, &m->flags)) { 831 atomic_set(&m->pg_init_in_progress, 0); 832 atomic_set(&m->pg_init_count, 0); 833 m->pg_init_delay_msecs = DM_PG_INIT_DELAY_DEFAULT; 834 init_waitqueue_head(&m->pg_init_wait); 835 } 836 } 837 } 838 839 if (m->hw_handler_name) { 840 r = scsi_dh_attach(q, m->hw_handler_name); 841 if (r == -EBUSY) { 842 char b[BDEVNAME_SIZE]; 843 844 printk(KERN_INFO "dm-mpath: retaining handler on device %s\n", 845 bdevname(bdev, b)); 846 goto retain; 847 } 848 if (r < 0) { 849 *error = "error attaching hardware handler"; 850 return r; 851 } 852 853 if (m->hw_handler_params) { 854 r = scsi_dh_set_params(q, m->hw_handler_params); 855 if (r < 0) { 856 *error = "unable to set hardware handler parameters"; 857 return r; 858 } 859 } 860 } 861 862 return 0; 863 } 864 865 static struct pgpath *parse_path(struct dm_arg_set *as, struct path_selector *ps, 866 struct dm_target *ti) 867 { 868 int r; 869 struct pgpath *p; 870 struct multipath *m = ti->private; 871 struct scsi_device *sdev; 872 873 /* we need at least a path arg */ 874 if (as->argc < 1) { 875 ti->error = "no device given"; 876 return ERR_PTR(-EINVAL); 877 } 878 879 p = alloc_pgpath(); 880 if (!p) 881 return ERR_PTR(-ENOMEM); 882 883 r = dm_get_device(ti, dm_shift_arg(as), dm_table_get_mode(ti->table), 884 &p->path.dev); 885 if (r) { 886 ti->error = "error getting device"; 887 goto bad; 888 } 889 890 sdev = scsi_device_from_queue(bdev_get_queue(p->path.dev->bdev)); 891 if (sdev) { 892 put_device(&sdev->sdev_gendev); 893 INIT_DELAYED_WORK(&p->activate_path, activate_path_work); 894 r = setup_scsi_dh(p->path.dev->bdev, m, &ti->error); 895 if (r) { 896 dm_put_device(ti, p->path.dev); 897 goto bad; 898 } 899 } 900 901 r = ps->type->add_path(ps, &p->path, as->argc, as->argv, &ti->error); 902 if (r) { 903 dm_put_device(ti, p->path.dev); 904 goto bad; 905 } 906 907 return p; 908 bad: 909 free_pgpath(p); 910 return ERR_PTR(r); 911 } 912 913 static struct priority_group *parse_priority_group(struct dm_arg_set *as, 914 struct multipath *m) 915 { 916 static const struct dm_arg _args[] = { 917 {1, 1024, "invalid number of paths"}, 918 {0, 1024, "invalid number of selector args"} 919 }; 920 921 int r; 922 unsigned i, nr_selector_args, nr_args; 923 struct priority_group *pg; 924 struct dm_target *ti = m->ti; 925 926 if (as->argc < 2) { 927 as->argc = 0; 928 ti->error = "not enough priority group arguments"; 929 return ERR_PTR(-EINVAL); 930 } 931 932 pg = alloc_priority_group(); 933 if (!pg) { 934 ti->error = "couldn't allocate priority group"; 935 return ERR_PTR(-ENOMEM); 936 } 937 pg->m = m; 938 939 r = parse_path_selector(as, pg, ti); 940 if (r) 941 goto bad; 942 943 /* 944 * read the paths 945 */ 946 r = dm_read_arg(_args, as, &pg->nr_pgpaths, &ti->error); 947 if (r) 948 goto bad; 949 950 r = dm_read_arg(_args + 1, as, &nr_selector_args, &ti->error); 951 if (r) 952 goto bad; 953 954 nr_args = 1 + nr_selector_args; 955 for (i = 0; i < pg->nr_pgpaths; i++) { 956 struct pgpath *pgpath; 957 struct dm_arg_set path_args; 958 959 if (as->argc < nr_args) { 960 ti->error = "not enough path parameters"; 961 r = -EINVAL; 962 goto bad; 963 } 964 965 path_args.argc = nr_args; 966 path_args.argv = as->argv; 967 968 pgpath = parse_path(&path_args, &pg->ps, ti); 969 if (IS_ERR(pgpath)) { 970 r = PTR_ERR(pgpath); 971 goto bad; 972 } 973 974 pgpath->pg = pg; 975 list_add_tail(&pgpath->list, &pg->pgpaths); 976 dm_consume_args(as, nr_args); 977 } 978 979 return pg; 980 981 bad: 982 free_priority_group(pg, ti); 983 return ERR_PTR(r); 984 } 985 986 static int parse_hw_handler(struct dm_arg_set *as, struct multipath *m) 987 { 988 unsigned hw_argc; 989 int ret; 990 struct dm_target *ti = m->ti; 991 992 static const struct dm_arg _args[] = { 993 {0, 1024, "invalid number of hardware handler args"}, 994 }; 995 996 if (dm_read_arg_group(_args, as, &hw_argc, &ti->error)) 997 return -EINVAL; 998 999 if (!hw_argc) 1000 return 0; 1001 1002 if (m->queue_mode == DM_TYPE_BIO_BASED) { 1003 dm_consume_args(as, hw_argc); 1004 DMERR("bio-based multipath doesn't allow hardware handler args"); 1005 return 0; 1006 } 1007 1008 m->hw_handler_name = kstrdup(dm_shift_arg(as), GFP_KERNEL); 1009 if (!m->hw_handler_name) 1010 return -EINVAL; 1011 1012 if (hw_argc > 1) { 1013 char *p; 1014 int i, j, len = 4; 1015 1016 for (i = 0; i <= hw_argc - 2; i++) 1017 len += strlen(as->argv[i]) + 1; 1018 p = m->hw_handler_params = kzalloc(len, GFP_KERNEL); 1019 if (!p) { 1020 ti->error = "memory allocation failed"; 1021 ret = -ENOMEM; 1022 goto fail; 1023 } 1024 j = sprintf(p, "%d", hw_argc - 1); 1025 for (i = 0, p+=j+1; i <= hw_argc - 2; i++, p+=j+1) 1026 j = sprintf(p, "%s", as->argv[i]); 1027 } 1028 dm_consume_args(as, hw_argc - 1); 1029 1030 return 0; 1031 fail: 1032 kfree(m->hw_handler_name); 1033 m->hw_handler_name = NULL; 1034 return ret; 1035 } 1036 1037 static int parse_features(struct dm_arg_set *as, struct multipath *m) 1038 { 1039 int r; 1040 unsigned argc; 1041 struct dm_target *ti = m->ti; 1042 const char *arg_name; 1043 1044 static const struct dm_arg _args[] = { 1045 {0, 8, "invalid number of feature args"}, 1046 {1, 50, "pg_init_retries must be between 1 and 50"}, 1047 {0, 60000, "pg_init_delay_msecs must be between 0 and 60000"}, 1048 }; 1049 1050 r = dm_read_arg_group(_args, as, &argc, &ti->error); 1051 if (r) 1052 return -EINVAL; 1053 1054 if (!argc) 1055 return 0; 1056 1057 do { 1058 arg_name = dm_shift_arg(as); 1059 argc--; 1060 1061 if (!strcasecmp(arg_name, "queue_if_no_path")) { 1062 r = queue_if_no_path(m, true, false); 1063 continue; 1064 } 1065 1066 if (!strcasecmp(arg_name, "retain_attached_hw_handler")) { 1067 set_bit(MPATHF_RETAIN_ATTACHED_HW_HANDLER, &m->flags); 1068 continue; 1069 } 1070 1071 if (!strcasecmp(arg_name, "pg_init_retries") && 1072 (argc >= 1)) { 1073 r = dm_read_arg(_args + 1, as, &m->pg_init_retries, &ti->error); 1074 argc--; 1075 continue; 1076 } 1077 1078 if (!strcasecmp(arg_name, "pg_init_delay_msecs") && 1079 (argc >= 1)) { 1080 r = dm_read_arg(_args + 2, as, &m->pg_init_delay_msecs, &ti->error); 1081 argc--; 1082 continue; 1083 } 1084 1085 if (!strcasecmp(arg_name, "queue_mode") && 1086 (argc >= 1)) { 1087 const char *queue_mode_name = dm_shift_arg(as); 1088 1089 if (!strcasecmp(queue_mode_name, "bio")) 1090 m->queue_mode = DM_TYPE_BIO_BASED; 1091 else if (!strcasecmp(queue_mode_name, "rq")) 1092 m->queue_mode = DM_TYPE_REQUEST_BASED; 1093 else if (!strcasecmp(queue_mode_name, "mq")) 1094 m->queue_mode = DM_TYPE_MQ_REQUEST_BASED; 1095 else { 1096 ti->error = "Unknown 'queue_mode' requested"; 1097 r = -EINVAL; 1098 } 1099 argc--; 1100 continue; 1101 } 1102 1103 ti->error = "Unrecognised multipath feature request"; 1104 r = -EINVAL; 1105 } while (argc && !r); 1106 1107 return r; 1108 } 1109 1110 static int multipath_ctr(struct dm_target *ti, unsigned argc, char **argv) 1111 { 1112 /* target arguments */ 1113 static const struct dm_arg _args[] = { 1114 {0, 1024, "invalid number of priority groups"}, 1115 {0, 1024, "invalid initial priority group number"}, 1116 }; 1117 1118 int r; 1119 struct multipath *m; 1120 struct dm_arg_set as; 1121 unsigned pg_count = 0; 1122 unsigned next_pg_num; 1123 1124 as.argc = argc; 1125 as.argv = argv; 1126 1127 m = alloc_multipath(ti); 1128 if (!m) { 1129 ti->error = "can't allocate multipath"; 1130 return -EINVAL; 1131 } 1132 1133 r = parse_features(&as, m); 1134 if (r) 1135 goto bad; 1136 1137 r = alloc_multipath_stage2(ti, m); 1138 if (r) 1139 goto bad; 1140 1141 r = parse_hw_handler(&as, m); 1142 if (r) 1143 goto bad; 1144 1145 r = dm_read_arg(_args, &as, &m->nr_priority_groups, &ti->error); 1146 if (r) 1147 goto bad; 1148 1149 r = dm_read_arg(_args + 1, &as, &next_pg_num, &ti->error); 1150 if (r) 1151 goto bad; 1152 1153 if ((!m->nr_priority_groups && next_pg_num) || 1154 (m->nr_priority_groups && !next_pg_num)) { 1155 ti->error = "invalid initial priority group"; 1156 r = -EINVAL; 1157 goto bad; 1158 } 1159 1160 /* parse the priority groups */ 1161 while (as.argc) { 1162 struct priority_group *pg; 1163 unsigned nr_valid_paths = atomic_read(&m->nr_valid_paths); 1164 1165 pg = parse_priority_group(&as, m); 1166 if (IS_ERR(pg)) { 1167 r = PTR_ERR(pg); 1168 goto bad; 1169 } 1170 1171 nr_valid_paths += pg->nr_pgpaths; 1172 atomic_set(&m->nr_valid_paths, nr_valid_paths); 1173 1174 list_add_tail(&pg->list, &m->priority_groups); 1175 pg_count++; 1176 pg->pg_num = pg_count; 1177 if (!--next_pg_num) 1178 m->next_pg = pg; 1179 } 1180 1181 if (pg_count != m->nr_priority_groups) { 1182 ti->error = "priority group count mismatch"; 1183 r = -EINVAL; 1184 goto bad; 1185 } 1186 1187 ti->num_flush_bios = 1; 1188 ti->num_discard_bios = 1; 1189 ti->num_write_same_bios = 1; 1190 ti->num_write_zeroes_bios = 1; 1191 if (m->queue_mode == DM_TYPE_BIO_BASED) 1192 ti->per_io_data_size = multipath_per_bio_data_size(); 1193 else 1194 ti->per_io_data_size = sizeof(struct dm_mpath_io); 1195 1196 return 0; 1197 1198 bad: 1199 free_multipath(m); 1200 return r; 1201 } 1202 1203 static void multipath_wait_for_pg_init_completion(struct multipath *m) 1204 { 1205 DEFINE_WAIT(wait); 1206 1207 while (1) { 1208 prepare_to_wait(&m->pg_init_wait, &wait, TASK_UNINTERRUPTIBLE); 1209 1210 if (!atomic_read(&m->pg_init_in_progress)) 1211 break; 1212 1213 io_schedule(); 1214 } 1215 finish_wait(&m->pg_init_wait, &wait); 1216 } 1217 1218 static void flush_multipath_work(struct multipath *m) 1219 { 1220 if (m->hw_handler_name) { 1221 set_bit(MPATHF_PG_INIT_DISABLED, &m->flags); 1222 smp_mb__after_atomic(); 1223 1224 flush_workqueue(kmpath_handlerd); 1225 multipath_wait_for_pg_init_completion(m); 1226 1227 clear_bit(MPATHF_PG_INIT_DISABLED, &m->flags); 1228 smp_mb__after_atomic(); 1229 } 1230 1231 flush_workqueue(kmultipathd); 1232 flush_work(&m->trigger_event); 1233 } 1234 1235 static void multipath_dtr(struct dm_target *ti) 1236 { 1237 struct multipath *m = ti->private; 1238 1239 flush_multipath_work(m); 1240 free_multipath(m); 1241 } 1242 1243 /* 1244 * Take a path out of use. 1245 */ 1246 static int fail_path(struct pgpath *pgpath) 1247 { 1248 unsigned long flags; 1249 struct multipath *m = pgpath->pg->m; 1250 1251 spin_lock_irqsave(&m->lock, flags); 1252 1253 if (!pgpath->is_active) 1254 goto out; 1255 1256 DMWARN("Failing path %s.", pgpath->path.dev->name); 1257 1258 pgpath->pg->ps.type->fail_path(&pgpath->pg->ps, &pgpath->path); 1259 pgpath->is_active = false; 1260 pgpath->fail_count++; 1261 1262 atomic_dec(&m->nr_valid_paths); 1263 1264 if (pgpath == m->current_pgpath) 1265 m->current_pgpath = NULL; 1266 1267 dm_path_uevent(DM_UEVENT_PATH_FAILED, m->ti, 1268 pgpath->path.dev->name, atomic_read(&m->nr_valid_paths)); 1269 1270 schedule_work(&m->trigger_event); 1271 1272 out: 1273 spin_unlock_irqrestore(&m->lock, flags); 1274 1275 return 0; 1276 } 1277 1278 /* 1279 * Reinstate a previously-failed path 1280 */ 1281 static int reinstate_path(struct pgpath *pgpath) 1282 { 1283 int r = 0, run_queue = 0; 1284 unsigned long flags; 1285 struct multipath *m = pgpath->pg->m; 1286 unsigned nr_valid_paths; 1287 1288 spin_lock_irqsave(&m->lock, flags); 1289 1290 if (pgpath->is_active) 1291 goto out; 1292 1293 DMWARN("Reinstating path %s.", pgpath->path.dev->name); 1294 1295 r = pgpath->pg->ps.type->reinstate_path(&pgpath->pg->ps, &pgpath->path); 1296 if (r) 1297 goto out; 1298 1299 pgpath->is_active = true; 1300 1301 nr_valid_paths = atomic_inc_return(&m->nr_valid_paths); 1302 if (nr_valid_paths == 1) { 1303 m->current_pgpath = NULL; 1304 run_queue = 1; 1305 } else if (m->hw_handler_name && (m->current_pg == pgpath->pg)) { 1306 if (queue_work(kmpath_handlerd, &pgpath->activate_path.work)) 1307 atomic_inc(&m->pg_init_in_progress); 1308 } 1309 1310 dm_path_uevent(DM_UEVENT_PATH_REINSTATED, m->ti, 1311 pgpath->path.dev->name, nr_valid_paths); 1312 1313 schedule_work(&m->trigger_event); 1314 1315 out: 1316 spin_unlock_irqrestore(&m->lock, flags); 1317 if (run_queue) { 1318 dm_table_run_md_queue_async(m->ti->table); 1319 process_queued_io_list(m); 1320 } 1321 1322 return r; 1323 } 1324 1325 /* 1326 * Fail or reinstate all paths that match the provided struct dm_dev. 1327 */ 1328 static int action_dev(struct multipath *m, struct dm_dev *dev, 1329 action_fn action) 1330 { 1331 int r = -EINVAL; 1332 struct pgpath *pgpath; 1333 struct priority_group *pg; 1334 1335 list_for_each_entry(pg, &m->priority_groups, list) { 1336 list_for_each_entry(pgpath, &pg->pgpaths, list) { 1337 if (pgpath->path.dev == dev) 1338 r = action(pgpath); 1339 } 1340 } 1341 1342 return r; 1343 } 1344 1345 /* 1346 * Temporarily try to avoid having to use the specified PG 1347 */ 1348 static void bypass_pg(struct multipath *m, struct priority_group *pg, 1349 bool bypassed) 1350 { 1351 unsigned long flags; 1352 1353 spin_lock_irqsave(&m->lock, flags); 1354 1355 pg->bypassed = bypassed; 1356 m->current_pgpath = NULL; 1357 m->current_pg = NULL; 1358 1359 spin_unlock_irqrestore(&m->lock, flags); 1360 1361 schedule_work(&m->trigger_event); 1362 } 1363 1364 /* 1365 * Switch to using the specified PG from the next I/O that gets mapped 1366 */ 1367 static int switch_pg_num(struct multipath *m, const char *pgstr) 1368 { 1369 struct priority_group *pg; 1370 unsigned pgnum; 1371 unsigned long flags; 1372 char dummy; 1373 1374 if (!pgstr || (sscanf(pgstr, "%u%c", &pgnum, &dummy) != 1) || !pgnum || 1375 !m->nr_priority_groups || (pgnum > m->nr_priority_groups)) { 1376 DMWARN("invalid PG number supplied to switch_pg_num"); 1377 return -EINVAL; 1378 } 1379 1380 spin_lock_irqsave(&m->lock, flags); 1381 list_for_each_entry(pg, &m->priority_groups, list) { 1382 pg->bypassed = false; 1383 if (--pgnum) 1384 continue; 1385 1386 m->current_pgpath = NULL; 1387 m->current_pg = NULL; 1388 m->next_pg = pg; 1389 } 1390 spin_unlock_irqrestore(&m->lock, flags); 1391 1392 schedule_work(&m->trigger_event); 1393 return 0; 1394 } 1395 1396 /* 1397 * Set/clear bypassed status of a PG. 1398 * PGs are numbered upwards from 1 in the order they were declared. 1399 */ 1400 static int bypass_pg_num(struct multipath *m, const char *pgstr, bool bypassed) 1401 { 1402 struct priority_group *pg; 1403 unsigned pgnum; 1404 char dummy; 1405 1406 if (!pgstr || (sscanf(pgstr, "%u%c", &pgnum, &dummy) != 1) || !pgnum || 1407 !m->nr_priority_groups || (pgnum > m->nr_priority_groups)) { 1408 DMWARN("invalid PG number supplied to bypass_pg"); 1409 return -EINVAL; 1410 } 1411 1412 list_for_each_entry(pg, &m->priority_groups, list) { 1413 if (!--pgnum) 1414 break; 1415 } 1416 1417 bypass_pg(m, pg, bypassed); 1418 return 0; 1419 } 1420 1421 /* 1422 * Should we retry pg_init immediately? 1423 */ 1424 static bool pg_init_limit_reached(struct multipath *m, struct pgpath *pgpath) 1425 { 1426 unsigned long flags; 1427 bool limit_reached = false; 1428 1429 spin_lock_irqsave(&m->lock, flags); 1430 1431 if (atomic_read(&m->pg_init_count) <= m->pg_init_retries && 1432 !test_bit(MPATHF_PG_INIT_DISABLED, &m->flags)) 1433 set_bit(MPATHF_PG_INIT_REQUIRED, &m->flags); 1434 else 1435 limit_reached = true; 1436 1437 spin_unlock_irqrestore(&m->lock, flags); 1438 1439 return limit_reached; 1440 } 1441 1442 static void pg_init_done(void *data, int errors) 1443 { 1444 struct pgpath *pgpath = data; 1445 struct priority_group *pg = pgpath->pg; 1446 struct multipath *m = pg->m; 1447 unsigned long flags; 1448 bool delay_retry = false; 1449 1450 /* device or driver problems */ 1451 switch (errors) { 1452 case SCSI_DH_OK: 1453 break; 1454 case SCSI_DH_NOSYS: 1455 if (!m->hw_handler_name) { 1456 errors = 0; 1457 break; 1458 } 1459 DMERR("Could not failover the device: Handler scsi_dh_%s " 1460 "Error %d.", m->hw_handler_name, errors); 1461 /* 1462 * Fail path for now, so we do not ping pong 1463 */ 1464 fail_path(pgpath); 1465 break; 1466 case SCSI_DH_DEV_TEMP_BUSY: 1467 /* 1468 * Probably doing something like FW upgrade on the 1469 * controller so try the other pg. 1470 */ 1471 bypass_pg(m, pg, true); 1472 break; 1473 case SCSI_DH_RETRY: 1474 /* Wait before retrying. */ 1475 delay_retry = 1; 1476 /* fall through */ 1477 case SCSI_DH_IMM_RETRY: 1478 case SCSI_DH_RES_TEMP_UNAVAIL: 1479 if (pg_init_limit_reached(m, pgpath)) 1480 fail_path(pgpath); 1481 errors = 0; 1482 break; 1483 case SCSI_DH_DEV_OFFLINED: 1484 default: 1485 /* 1486 * We probably do not want to fail the path for a device 1487 * error, but this is what the old dm did. In future 1488 * patches we can do more advanced handling. 1489 */ 1490 fail_path(pgpath); 1491 } 1492 1493 spin_lock_irqsave(&m->lock, flags); 1494 if (errors) { 1495 if (pgpath == m->current_pgpath) { 1496 DMERR("Could not failover device. Error %d.", errors); 1497 m->current_pgpath = NULL; 1498 m->current_pg = NULL; 1499 } 1500 } else if (!test_bit(MPATHF_PG_INIT_REQUIRED, &m->flags)) 1501 pg->bypassed = false; 1502 1503 if (atomic_dec_return(&m->pg_init_in_progress) > 0) 1504 /* Activations of other paths are still on going */ 1505 goto out; 1506 1507 if (test_bit(MPATHF_PG_INIT_REQUIRED, &m->flags)) { 1508 if (delay_retry) 1509 set_bit(MPATHF_PG_INIT_DELAY_RETRY, &m->flags); 1510 else 1511 clear_bit(MPATHF_PG_INIT_DELAY_RETRY, &m->flags); 1512 1513 if (__pg_init_all_paths(m)) 1514 goto out; 1515 } 1516 clear_bit(MPATHF_QUEUE_IO, &m->flags); 1517 1518 process_queued_io_list(m); 1519 1520 /* 1521 * Wake up any thread waiting to suspend. 1522 */ 1523 wake_up(&m->pg_init_wait); 1524 1525 out: 1526 spin_unlock_irqrestore(&m->lock, flags); 1527 } 1528 1529 static void activate_or_offline_path(struct pgpath *pgpath) 1530 { 1531 struct request_queue *q = bdev_get_queue(pgpath->path.dev->bdev); 1532 1533 if (pgpath->is_active && !blk_queue_dying(q)) 1534 scsi_dh_activate(q, pg_init_done, pgpath); 1535 else 1536 pg_init_done(pgpath, SCSI_DH_DEV_OFFLINED); 1537 } 1538 1539 static void activate_path_work(struct work_struct *work) 1540 { 1541 struct pgpath *pgpath = 1542 container_of(work, struct pgpath, activate_path.work); 1543 1544 activate_or_offline_path(pgpath); 1545 } 1546 1547 static int multipath_end_io(struct dm_target *ti, struct request *clone, 1548 blk_status_t error, union map_info *map_context) 1549 { 1550 struct dm_mpath_io *mpio = get_mpio(map_context); 1551 struct pgpath *pgpath = mpio->pgpath; 1552 int r = DM_ENDIO_DONE; 1553 1554 /* 1555 * We don't queue any clone request inside the multipath target 1556 * during end I/O handling, since those clone requests don't have 1557 * bio clones. If we queue them inside the multipath target, 1558 * we need to make bio clones, that requires memory allocation. 1559 * (See drivers/md/dm-rq.c:end_clone_bio() about why the clone requests 1560 * don't have bio clones.) 1561 * Instead of queueing the clone request here, we queue the original 1562 * request into dm core, which will remake a clone request and 1563 * clone bios for it and resubmit it later. 1564 */ 1565 if (error && blk_path_error(error)) { 1566 struct multipath *m = ti->private; 1567 1568 if (error == BLK_STS_RESOURCE) 1569 r = DM_ENDIO_DELAY_REQUEUE; 1570 else 1571 r = DM_ENDIO_REQUEUE; 1572 1573 if (pgpath) 1574 fail_path(pgpath); 1575 1576 if (atomic_read(&m->nr_valid_paths) == 0 && 1577 !must_push_back_rq(m)) { 1578 if (error == BLK_STS_IOERR) 1579 dm_report_EIO(m); 1580 /* complete with the original error */ 1581 r = DM_ENDIO_DONE; 1582 } 1583 } 1584 1585 if (pgpath) { 1586 struct path_selector *ps = &pgpath->pg->ps; 1587 1588 if (ps->type->end_io) 1589 ps->type->end_io(ps, &pgpath->path, mpio->nr_bytes); 1590 } 1591 1592 return r; 1593 } 1594 1595 static int multipath_end_io_bio(struct dm_target *ti, struct bio *clone, 1596 blk_status_t *error) 1597 { 1598 struct multipath *m = ti->private; 1599 struct dm_mpath_io *mpio = get_mpio_from_bio(clone); 1600 struct pgpath *pgpath = mpio->pgpath; 1601 unsigned long flags; 1602 int r = DM_ENDIO_DONE; 1603 1604 if (!*error || !blk_path_error(*error)) 1605 goto done; 1606 1607 if (pgpath) 1608 fail_path(pgpath); 1609 1610 if (atomic_read(&m->nr_valid_paths) == 0 && 1611 !test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) { 1612 if (must_push_back_bio(m)) { 1613 r = DM_ENDIO_REQUEUE; 1614 } else { 1615 dm_report_EIO(m); 1616 *error = BLK_STS_IOERR; 1617 } 1618 goto done; 1619 } 1620 1621 spin_lock_irqsave(&m->lock, flags); 1622 bio_list_add(&m->queued_bios, clone); 1623 spin_unlock_irqrestore(&m->lock, flags); 1624 if (!test_bit(MPATHF_QUEUE_IO, &m->flags)) 1625 queue_work(kmultipathd, &m->process_queued_bios); 1626 1627 r = DM_ENDIO_INCOMPLETE; 1628 done: 1629 if (pgpath) { 1630 struct path_selector *ps = &pgpath->pg->ps; 1631 1632 if (ps->type->end_io) 1633 ps->type->end_io(ps, &pgpath->path, mpio->nr_bytes); 1634 } 1635 1636 return r; 1637 } 1638 1639 /* 1640 * Suspend can't complete until all the I/O is processed so if 1641 * the last path fails we must error any remaining I/O. 1642 * Note that if the freeze_bdev fails while suspending, the 1643 * queue_if_no_path state is lost - userspace should reset it. 1644 */ 1645 static void multipath_presuspend(struct dm_target *ti) 1646 { 1647 struct multipath *m = ti->private; 1648 1649 queue_if_no_path(m, false, true); 1650 } 1651 1652 static void multipath_postsuspend(struct dm_target *ti) 1653 { 1654 struct multipath *m = ti->private; 1655 1656 mutex_lock(&m->work_mutex); 1657 flush_multipath_work(m); 1658 mutex_unlock(&m->work_mutex); 1659 } 1660 1661 /* 1662 * Restore the queue_if_no_path setting. 1663 */ 1664 static void multipath_resume(struct dm_target *ti) 1665 { 1666 struct multipath *m = ti->private; 1667 unsigned long flags; 1668 1669 spin_lock_irqsave(&m->lock, flags); 1670 assign_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags, 1671 test_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &m->flags)); 1672 spin_unlock_irqrestore(&m->lock, flags); 1673 } 1674 1675 /* 1676 * Info output has the following format: 1677 * num_multipath_feature_args [multipath_feature_args]* 1678 * num_handler_status_args [handler_status_args]* 1679 * num_groups init_group_number 1680 * [A|D|E num_ps_status_args [ps_status_args]* 1681 * num_paths num_selector_args 1682 * [path_dev A|F fail_count [selector_args]* ]+ ]+ 1683 * 1684 * Table output has the following format (identical to the constructor string): 1685 * num_feature_args [features_args]* 1686 * num_handler_args hw_handler [hw_handler_args]* 1687 * num_groups init_group_number 1688 * [priority selector-name num_ps_args [ps_args]* 1689 * num_paths num_selector_args [path_dev [selector_args]* ]+ ]+ 1690 */ 1691 static void multipath_status(struct dm_target *ti, status_type_t type, 1692 unsigned status_flags, char *result, unsigned maxlen) 1693 { 1694 int sz = 0; 1695 unsigned long flags; 1696 struct multipath *m = ti->private; 1697 struct priority_group *pg; 1698 struct pgpath *p; 1699 unsigned pg_num; 1700 char state; 1701 1702 spin_lock_irqsave(&m->lock, flags); 1703 1704 /* Features */ 1705 if (type == STATUSTYPE_INFO) 1706 DMEMIT("2 %u %u ", test_bit(MPATHF_QUEUE_IO, &m->flags), 1707 atomic_read(&m->pg_init_count)); 1708 else { 1709 DMEMIT("%u ", test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags) + 1710 (m->pg_init_retries > 0) * 2 + 1711 (m->pg_init_delay_msecs != DM_PG_INIT_DELAY_DEFAULT) * 2 + 1712 test_bit(MPATHF_RETAIN_ATTACHED_HW_HANDLER, &m->flags) + 1713 (m->queue_mode != DM_TYPE_REQUEST_BASED) * 2); 1714 1715 if (test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) 1716 DMEMIT("queue_if_no_path "); 1717 if (m->pg_init_retries) 1718 DMEMIT("pg_init_retries %u ", m->pg_init_retries); 1719 if (m->pg_init_delay_msecs != DM_PG_INIT_DELAY_DEFAULT) 1720 DMEMIT("pg_init_delay_msecs %u ", m->pg_init_delay_msecs); 1721 if (test_bit(MPATHF_RETAIN_ATTACHED_HW_HANDLER, &m->flags)) 1722 DMEMIT("retain_attached_hw_handler "); 1723 if (m->queue_mode != DM_TYPE_REQUEST_BASED) { 1724 switch(m->queue_mode) { 1725 case DM_TYPE_BIO_BASED: 1726 DMEMIT("queue_mode bio "); 1727 break; 1728 case DM_TYPE_MQ_REQUEST_BASED: 1729 DMEMIT("queue_mode mq "); 1730 break; 1731 default: 1732 WARN_ON_ONCE(true); 1733 break; 1734 } 1735 } 1736 } 1737 1738 if (!m->hw_handler_name || type == STATUSTYPE_INFO) 1739 DMEMIT("0 "); 1740 else 1741 DMEMIT("1 %s ", m->hw_handler_name); 1742 1743 DMEMIT("%u ", m->nr_priority_groups); 1744 1745 if (m->next_pg) 1746 pg_num = m->next_pg->pg_num; 1747 else if (m->current_pg) 1748 pg_num = m->current_pg->pg_num; 1749 else 1750 pg_num = (m->nr_priority_groups ? 1 : 0); 1751 1752 DMEMIT("%u ", pg_num); 1753 1754 switch (type) { 1755 case STATUSTYPE_INFO: 1756 list_for_each_entry(pg, &m->priority_groups, list) { 1757 if (pg->bypassed) 1758 state = 'D'; /* Disabled */ 1759 else if (pg == m->current_pg) 1760 state = 'A'; /* Currently Active */ 1761 else 1762 state = 'E'; /* Enabled */ 1763 1764 DMEMIT("%c ", state); 1765 1766 if (pg->ps.type->status) 1767 sz += pg->ps.type->status(&pg->ps, NULL, type, 1768 result + sz, 1769 maxlen - sz); 1770 else 1771 DMEMIT("0 "); 1772 1773 DMEMIT("%u %u ", pg->nr_pgpaths, 1774 pg->ps.type->info_args); 1775 1776 list_for_each_entry(p, &pg->pgpaths, list) { 1777 DMEMIT("%s %s %u ", p->path.dev->name, 1778 p->is_active ? "A" : "F", 1779 p->fail_count); 1780 if (pg->ps.type->status) 1781 sz += pg->ps.type->status(&pg->ps, 1782 &p->path, type, result + sz, 1783 maxlen - sz); 1784 } 1785 } 1786 break; 1787 1788 case STATUSTYPE_TABLE: 1789 list_for_each_entry(pg, &m->priority_groups, list) { 1790 DMEMIT("%s ", pg->ps.type->name); 1791 1792 if (pg->ps.type->status) 1793 sz += pg->ps.type->status(&pg->ps, NULL, type, 1794 result + sz, 1795 maxlen - sz); 1796 else 1797 DMEMIT("0 "); 1798 1799 DMEMIT("%u %u ", pg->nr_pgpaths, 1800 pg->ps.type->table_args); 1801 1802 list_for_each_entry(p, &pg->pgpaths, list) { 1803 DMEMIT("%s ", p->path.dev->name); 1804 if (pg->ps.type->status) 1805 sz += pg->ps.type->status(&pg->ps, 1806 &p->path, type, result + sz, 1807 maxlen - sz); 1808 } 1809 } 1810 break; 1811 } 1812 1813 spin_unlock_irqrestore(&m->lock, flags); 1814 } 1815 1816 static int multipath_message(struct dm_target *ti, unsigned argc, char **argv) 1817 { 1818 int r = -EINVAL; 1819 struct dm_dev *dev; 1820 struct multipath *m = ti->private; 1821 action_fn action; 1822 1823 mutex_lock(&m->work_mutex); 1824 1825 if (dm_suspended(ti)) { 1826 r = -EBUSY; 1827 goto out; 1828 } 1829 1830 if (argc == 1) { 1831 if (!strcasecmp(argv[0], "queue_if_no_path")) { 1832 r = queue_if_no_path(m, true, false); 1833 goto out; 1834 } else if (!strcasecmp(argv[0], "fail_if_no_path")) { 1835 r = queue_if_no_path(m, false, false); 1836 goto out; 1837 } 1838 } 1839 1840 if (argc != 2) { 1841 DMWARN("Invalid multipath message arguments. Expected 2 arguments, got %d.", argc); 1842 goto out; 1843 } 1844 1845 if (!strcasecmp(argv[0], "disable_group")) { 1846 r = bypass_pg_num(m, argv[1], true); 1847 goto out; 1848 } else if (!strcasecmp(argv[0], "enable_group")) { 1849 r = bypass_pg_num(m, argv[1], false); 1850 goto out; 1851 } else if (!strcasecmp(argv[0], "switch_group")) { 1852 r = switch_pg_num(m, argv[1]); 1853 goto out; 1854 } else if (!strcasecmp(argv[0], "reinstate_path")) 1855 action = reinstate_path; 1856 else if (!strcasecmp(argv[0], "fail_path")) 1857 action = fail_path; 1858 else { 1859 DMWARN("Unrecognised multipath message received: %s", argv[0]); 1860 goto out; 1861 } 1862 1863 r = dm_get_device(ti, argv[1], dm_table_get_mode(ti->table), &dev); 1864 if (r) { 1865 DMWARN("message: error getting device %s", 1866 argv[1]); 1867 goto out; 1868 } 1869 1870 r = action_dev(m, dev, action); 1871 1872 dm_put_device(ti, dev); 1873 1874 out: 1875 mutex_unlock(&m->work_mutex); 1876 return r; 1877 } 1878 1879 static int multipath_prepare_ioctl(struct dm_target *ti, 1880 struct block_device **bdev, fmode_t *mode) 1881 { 1882 struct multipath *m = ti->private; 1883 struct pgpath *current_pgpath; 1884 int r; 1885 1886 current_pgpath = READ_ONCE(m->current_pgpath); 1887 if (!current_pgpath) 1888 current_pgpath = choose_pgpath(m, 0); 1889 1890 if (current_pgpath) { 1891 if (!test_bit(MPATHF_QUEUE_IO, &m->flags)) { 1892 *bdev = current_pgpath->path.dev->bdev; 1893 *mode = current_pgpath->path.dev->mode; 1894 r = 0; 1895 } else { 1896 /* pg_init has not started or completed */ 1897 r = -ENOTCONN; 1898 } 1899 } else { 1900 /* No path is available */ 1901 if (test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) 1902 r = -ENOTCONN; 1903 else 1904 r = -EIO; 1905 } 1906 1907 if (r == -ENOTCONN) { 1908 if (!READ_ONCE(m->current_pg)) { 1909 /* Path status changed, redo selection */ 1910 (void) choose_pgpath(m, 0); 1911 } 1912 if (test_bit(MPATHF_PG_INIT_REQUIRED, &m->flags)) 1913 pg_init_all_paths(m); 1914 dm_table_run_md_queue_async(m->ti->table); 1915 process_queued_io_list(m); 1916 } 1917 1918 /* 1919 * Only pass ioctls through if the device sizes match exactly. 1920 */ 1921 if (!r && ti->len != i_size_read((*bdev)->bd_inode) >> SECTOR_SHIFT) 1922 return 1; 1923 return r; 1924 } 1925 1926 static int multipath_iterate_devices(struct dm_target *ti, 1927 iterate_devices_callout_fn fn, void *data) 1928 { 1929 struct multipath *m = ti->private; 1930 struct priority_group *pg; 1931 struct pgpath *p; 1932 int ret = 0; 1933 1934 list_for_each_entry(pg, &m->priority_groups, list) { 1935 list_for_each_entry(p, &pg->pgpaths, list) { 1936 ret = fn(ti, p->path.dev, ti->begin, ti->len, data); 1937 if (ret) 1938 goto out; 1939 } 1940 } 1941 1942 out: 1943 return ret; 1944 } 1945 1946 static int pgpath_busy(struct pgpath *pgpath) 1947 { 1948 struct request_queue *q = bdev_get_queue(pgpath->path.dev->bdev); 1949 1950 return blk_lld_busy(q); 1951 } 1952 1953 /* 1954 * We return "busy", only when we can map I/Os but underlying devices 1955 * are busy (so even if we map I/Os now, the I/Os will wait on 1956 * the underlying queue). 1957 * In other words, if we want to kill I/Os or queue them inside us 1958 * due to map unavailability, we don't return "busy". Otherwise, 1959 * dm core won't give us the I/Os and we can't do what we want. 1960 */ 1961 static int multipath_busy(struct dm_target *ti) 1962 { 1963 bool busy = false, has_active = false; 1964 struct multipath *m = ti->private; 1965 struct priority_group *pg, *next_pg; 1966 struct pgpath *pgpath; 1967 1968 /* pg_init in progress */ 1969 if (atomic_read(&m->pg_init_in_progress)) 1970 return true; 1971 1972 /* no paths available, for blk-mq: rely on IO mapping to delay requeue */ 1973 if (!atomic_read(&m->nr_valid_paths) && test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) 1974 return (m->queue_mode != DM_TYPE_MQ_REQUEST_BASED); 1975 1976 /* Guess which priority_group will be used at next mapping time */ 1977 pg = READ_ONCE(m->current_pg); 1978 next_pg = READ_ONCE(m->next_pg); 1979 if (unlikely(!READ_ONCE(m->current_pgpath) && next_pg)) 1980 pg = next_pg; 1981 1982 if (!pg) { 1983 /* 1984 * We don't know which pg will be used at next mapping time. 1985 * We don't call choose_pgpath() here to avoid to trigger 1986 * pg_init just by busy checking. 1987 * So we don't know whether underlying devices we will be using 1988 * at next mapping time are busy or not. Just try mapping. 1989 */ 1990 return busy; 1991 } 1992 1993 /* 1994 * If there is one non-busy active path at least, the path selector 1995 * will be able to select it. So we consider such a pg as not busy. 1996 */ 1997 busy = true; 1998 list_for_each_entry(pgpath, &pg->pgpaths, list) { 1999 if (pgpath->is_active) { 2000 has_active = true; 2001 if (!pgpath_busy(pgpath)) { 2002 busy = false; 2003 break; 2004 } 2005 } 2006 } 2007 2008 if (!has_active) { 2009 /* 2010 * No active path in this pg, so this pg won't be used and 2011 * the current_pg will be changed at next mapping time. 2012 * We need to try mapping to determine it. 2013 */ 2014 busy = false; 2015 } 2016 2017 return busy; 2018 } 2019 2020 /*----------------------------------------------------------------- 2021 * Module setup 2022 *---------------------------------------------------------------*/ 2023 static struct target_type multipath_target = { 2024 .name = "multipath", 2025 .version = {1, 12, 0}, 2026 .features = DM_TARGET_SINGLETON | DM_TARGET_IMMUTABLE, 2027 .module = THIS_MODULE, 2028 .ctr = multipath_ctr, 2029 .dtr = multipath_dtr, 2030 .clone_and_map_rq = multipath_clone_and_map, 2031 .release_clone_rq = multipath_release_clone, 2032 .rq_end_io = multipath_end_io, 2033 .map = multipath_map_bio, 2034 .end_io = multipath_end_io_bio, 2035 .presuspend = multipath_presuspend, 2036 .postsuspend = multipath_postsuspend, 2037 .resume = multipath_resume, 2038 .status = multipath_status, 2039 .message = multipath_message, 2040 .prepare_ioctl = multipath_prepare_ioctl, 2041 .iterate_devices = multipath_iterate_devices, 2042 .busy = multipath_busy, 2043 }; 2044 2045 static int __init dm_multipath_init(void) 2046 { 2047 int r; 2048 2049 kmultipathd = alloc_workqueue("kmpathd", WQ_MEM_RECLAIM, 0); 2050 if (!kmultipathd) { 2051 DMERR("failed to create workqueue kmpathd"); 2052 r = -ENOMEM; 2053 goto bad_alloc_kmultipathd; 2054 } 2055 2056 /* 2057 * A separate workqueue is used to handle the device handlers 2058 * to avoid overloading existing workqueue. Overloading the 2059 * old workqueue would also create a bottleneck in the 2060 * path of the storage hardware device activation. 2061 */ 2062 kmpath_handlerd = alloc_ordered_workqueue("kmpath_handlerd", 2063 WQ_MEM_RECLAIM); 2064 if (!kmpath_handlerd) { 2065 DMERR("failed to create workqueue kmpath_handlerd"); 2066 r = -ENOMEM; 2067 goto bad_alloc_kmpath_handlerd; 2068 } 2069 2070 r = dm_register_target(&multipath_target); 2071 if (r < 0) { 2072 DMERR("request-based register failed %d", r); 2073 r = -EINVAL; 2074 goto bad_register_target; 2075 } 2076 2077 return 0; 2078 2079 bad_register_target: 2080 destroy_workqueue(kmpath_handlerd); 2081 bad_alloc_kmpath_handlerd: 2082 destroy_workqueue(kmultipathd); 2083 bad_alloc_kmultipathd: 2084 return r; 2085 } 2086 2087 static void __exit dm_multipath_exit(void) 2088 { 2089 destroy_workqueue(kmpath_handlerd); 2090 destroy_workqueue(kmultipathd); 2091 2092 dm_unregister_target(&multipath_target); 2093 } 2094 2095 module_init(dm_multipath_init); 2096 module_exit(dm_multipath_exit); 2097 2098 MODULE_DESCRIPTION(DM_NAME " multipath target"); 2099 MODULE_AUTHOR("Sistina Software <dm-devel@redhat.com>"); 2100 MODULE_LICENSE("GPL"); 2101