1 /* 2 * Copyright (C) 2003 Sistina Software Limited. 3 * Copyright (C) 2004-2005 Red Hat, Inc. All rights reserved. 4 * 5 * This file is released under the GPL. 6 */ 7 8 #include <linux/device-mapper.h> 9 10 #include "dm-path-selector.h" 11 #include "dm-uevent.h" 12 13 #include <linux/ctype.h> 14 #include <linux/init.h> 15 #include <linux/mempool.h> 16 #include <linux/module.h> 17 #include <linux/pagemap.h> 18 #include <linux/slab.h> 19 #include <linux/time.h> 20 #include <linux/workqueue.h> 21 #include <scsi/scsi_dh.h> 22 #include <asm/atomic.h> 23 24 #define DM_MSG_PREFIX "multipath" 25 #define MESG_STR(x) x, sizeof(x) 26 27 /* Path properties */ 28 struct pgpath { 29 struct list_head list; 30 31 struct priority_group *pg; /* Owning PG */ 32 unsigned is_active; /* Path status */ 33 unsigned fail_count; /* Cumulative failure count */ 34 35 struct dm_path path; 36 struct work_struct deactivate_path; 37 struct work_struct activate_path; 38 }; 39 40 #define path_to_pgpath(__pgp) container_of((__pgp), struct pgpath, path) 41 42 /* 43 * Paths are grouped into Priority Groups and numbered from 1 upwards. 44 * Each has a path selector which controls which path gets used. 45 */ 46 struct priority_group { 47 struct list_head list; 48 49 struct multipath *m; /* Owning multipath instance */ 50 struct path_selector ps; 51 52 unsigned pg_num; /* Reference number */ 53 unsigned bypassed; /* Temporarily bypass this PG? */ 54 55 unsigned nr_pgpaths; /* Number of paths in PG */ 56 struct list_head pgpaths; 57 }; 58 59 /* Multipath context */ 60 struct multipath { 61 struct list_head list; 62 struct dm_target *ti; 63 64 spinlock_t lock; 65 66 const char *hw_handler_name; 67 char *hw_handler_params; 68 unsigned nr_priority_groups; 69 struct list_head priority_groups; 70 unsigned pg_init_required; /* pg_init needs calling? */ 71 unsigned pg_init_in_progress; /* Only one pg_init allowed at once */ 72 wait_queue_head_t pg_init_wait; /* Wait for pg_init completion */ 73 74 unsigned nr_valid_paths; /* Total number of usable paths */ 75 struct pgpath *current_pgpath; 76 struct priority_group *current_pg; 77 struct priority_group *next_pg; /* Switch to this PG if set */ 78 unsigned repeat_count; /* I/Os left before calling PS again */ 79 80 unsigned queue_io; /* Must we queue all I/O? */ 81 unsigned queue_if_no_path; /* Queue I/O if last path fails? */ 82 unsigned saved_queue_if_no_path;/* Saved state during suspension */ 83 unsigned pg_init_retries; /* Number of times to retry pg_init */ 84 unsigned pg_init_count; /* Number of times pg_init called */ 85 86 struct work_struct process_queued_ios; 87 struct list_head queued_ios; 88 unsigned queue_size; 89 90 struct work_struct trigger_event; 91 92 /* 93 * We must use a mempool of dm_mpath_io structs so that we 94 * can resubmit bios on error. 95 */ 96 mempool_t *mpio_pool; 97 98 struct mutex work_mutex; 99 }; 100 101 /* 102 * Context information attached to each bio we process. 103 */ 104 struct dm_mpath_io { 105 struct pgpath *pgpath; 106 size_t nr_bytes; 107 }; 108 109 typedef int (*action_fn) (struct pgpath *pgpath); 110 111 #define MIN_IOS 256 /* Mempool size */ 112 113 static struct kmem_cache *_mpio_cache; 114 115 static struct workqueue_struct *kmultipathd, *kmpath_handlerd; 116 static void process_queued_ios(struct work_struct *work); 117 static void trigger_event(struct work_struct *work); 118 static void activate_path(struct work_struct *work); 119 static void deactivate_path(struct work_struct *work); 120 121 122 /*----------------------------------------------- 123 * Allocation routines 124 *-----------------------------------------------*/ 125 126 static struct pgpath *alloc_pgpath(void) 127 { 128 struct pgpath *pgpath = kzalloc(sizeof(*pgpath), GFP_KERNEL); 129 130 if (pgpath) { 131 pgpath->is_active = 1; 132 INIT_WORK(&pgpath->deactivate_path, deactivate_path); 133 INIT_WORK(&pgpath->activate_path, activate_path); 134 } 135 136 return pgpath; 137 } 138 139 static void free_pgpath(struct pgpath *pgpath) 140 { 141 kfree(pgpath); 142 } 143 144 static void deactivate_path(struct work_struct *work) 145 { 146 struct pgpath *pgpath = 147 container_of(work, struct pgpath, deactivate_path); 148 149 blk_abort_queue(pgpath->path.dev->bdev->bd_disk->queue); 150 } 151 152 static struct priority_group *alloc_priority_group(void) 153 { 154 struct priority_group *pg; 155 156 pg = kzalloc(sizeof(*pg), GFP_KERNEL); 157 158 if (pg) 159 INIT_LIST_HEAD(&pg->pgpaths); 160 161 return pg; 162 } 163 164 static void free_pgpaths(struct list_head *pgpaths, struct dm_target *ti) 165 { 166 struct pgpath *pgpath, *tmp; 167 struct multipath *m = ti->private; 168 169 list_for_each_entry_safe(pgpath, tmp, pgpaths, list) { 170 list_del(&pgpath->list); 171 if (m->hw_handler_name) 172 scsi_dh_detach(bdev_get_queue(pgpath->path.dev->bdev)); 173 dm_put_device(ti, pgpath->path.dev); 174 free_pgpath(pgpath); 175 } 176 } 177 178 static void free_priority_group(struct priority_group *pg, 179 struct dm_target *ti) 180 { 181 struct path_selector *ps = &pg->ps; 182 183 if (ps->type) { 184 ps->type->destroy(ps); 185 dm_put_path_selector(ps->type); 186 } 187 188 free_pgpaths(&pg->pgpaths, ti); 189 kfree(pg); 190 } 191 192 static struct multipath *alloc_multipath(struct dm_target *ti) 193 { 194 struct multipath *m; 195 196 m = kzalloc(sizeof(*m), GFP_KERNEL); 197 if (m) { 198 INIT_LIST_HEAD(&m->priority_groups); 199 INIT_LIST_HEAD(&m->queued_ios); 200 spin_lock_init(&m->lock); 201 m->queue_io = 1; 202 INIT_WORK(&m->process_queued_ios, process_queued_ios); 203 INIT_WORK(&m->trigger_event, trigger_event); 204 init_waitqueue_head(&m->pg_init_wait); 205 mutex_init(&m->work_mutex); 206 m->mpio_pool = mempool_create_slab_pool(MIN_IOS, _mpio_cache); 207 if (!m->mpio_pool) { 208 kfree(m); 209 return NULL; 210 } 211 m->ti = ti; 212 ti->private = m; 213 } 214 215 return m; 216 } 217 218 static void free_multipath(struct multipath *m) 219 { 220 struct priority_group *pg, *tmp; 221 222 list_for_each_entry_safe(pg, tmp, &m->priority_groups, list) { 223 list_del(&pg->list); 224 free_priority_group(pg, m->ti); 225 } 226 227 kfree(m->hw_handler_name); 228 kfree(m->hw_handler_params); 229 mempool_destroy(m->mpio_pool); 230 kfree(m); 231 } 232 233 234 /*----------------------------------------------- 235 * Path selection 236 *-----------------------------------------------*/ 237 238 static void __pg_init_all_paths(struct multipath *m) 239 { 240 struct pgpath *pgpath; 241 242 m->pg_init_count++; 243 m->pg_init_required = 0; 244 list_for_each_entry(pgpath, &m->current_pg->pgpaths, list) { 245 /* Skip failed paths */ 246 if (!pgpath->is_active) 247 continue; 248 if (queue_work(kmpath_handlerd, &pgpath->activate_path)) 249 m->pg_init_in_progress++; 250 } 251 } 252 253 static void __switch_pg(struct multipath *m, struct pgpath *pgpath) 254 { 255 m->current_pg = pgpath->pg; 256 257 /* Must we initialise the PG first, and queue I/O till it's ready? */ 258 if (m->hw_handler_name) { 259 m->pg_init_required = 1; 260 m->queue_io = 1; 261 } else { 262 m->pg_init_required = 0; 263 m->queue_io = 0; 264 } 265 266 m->pg_init_count = 0; 267 } 268 269 static int __choose_path_in_pg(struct multipath *m, struct priority_group *pg, 270 size_t nr_bytes) 271 { 272 struct dm_path *path; 273 274 path = pg->ps.type->select_path(&pg->ps, &m->repeat_count, nr_bytes); 275 if (!path) 276 return -ENXIO; 277 278 m->current_pgpath = path_to_pgpath(path); 279 280 if (m->current_pg != pg) 281 __switch_pg(m, m->current_pgpath); 282 283 return 0; 284 } 285 286 static void __choose_pgpath(struct multipath *m, size_t nr_bytes) 287 { 288 struct priority_group *pg; 289 unsigned bypassed = 1; 290 291 if (!m->nr_valid_paths) 292 goto failed; 293 294 /* Were we instructed to switch PG? */ 295 if (m->next_pg) { 296 pg = m->next_pg; 297 m->next_pg = NULL; 298 if (!__choose_path_in_pg(m, pg, nr_bytes)) 299 return; 300 } 301 302 /* Don't change PG until it has no remaining paths */ 303 if (m->current_pg && !__choose_path_in_pg(m, m->current_pg, nr_bytes)) 304 return; 305 306 /* 307 * Loop through priority groups until we find a valid path. 308 * First time we skip PGs marked 'bypassed'. 309 * Second time we only try the ones we skipped. 310 */ 311 do { 312 list_for_each_entry(pg, &m->priority_groups, list) { 313 if (pg->bypassed == bypassed) 314 continue; 315 if (!__choose_path_in_pg(m, pg, nr_bytes)) 316 return; 317 } 318 } while (bypassed--); 319 320 failed: 321 m->current_pgpath = NULL; 322 m->current_pg = NULL; 323 } 324 325 /* 326 * Check whether bios must be queued in the device-mapper core rather 327 * than here in the target. 328 * 329 * m->lock must be held on entry. 330 * 331 * If m->queue_if_no_path and m->saved_queue_if_no_path hold the 332 * same value then we are not between multipath_presuspend() 333 * and multipath_resume() calls and we have no need to check 334 * for the DMF_NOFLUSH_SUSPENDING flag. 335 */ 336 static int __must_push_back(struct multipath *m) 337 { 338 return (m->queue_if_no_path != m->saved_queue_if_no_path && 339 dm_noflush_suspending(m->ti)); 340 } 341 342 static int map_io(struct multipath *m, struct request *clone, 343 struct dm_mpath_io *mpio, unsigned was_queued) 344 { 345 int r = DM_MAPIO_REMAPPED; 346 size_t nr_bytes = blk_rq_bytes(clone); 347 unsigned long flags; 348 struct pgpath *pgpath; 349 struct block_device *bdev; 350 351 spin_lock_irqsave(&m->lock, flags); 352 353 /* Do we need to select a new pgpath? */ 354 if (!m->current_pgpath || 355 (!m->queue_io && (m->repeat_count && --m->repeat_count == 0))) 356 __choose_pgpath(m, nr_bytes); 357 358 pgpath = m->current_pgpath; 359 360 if (was_queued) 361 m->queue_size--; 362 363 if ((pgpath && m->queue_io) || 364 (!pgpath && m->queue_if_no_path)) { 365 /* Queue for the daemon to resubmit */ 366 list_add_tail(&clone->queuelist, &m->queued_ios); 367 m->queue_size++; 368 if ((m->pg_init_required && !m->pg_init_in_progress) || 369 !m->queue_io) 370 queue_work(kmultipathd, &m->process_queued_ios); 371 pgpath = NULL; 372 r = DM_MAPIO_SUBMITTED; 373 } else if (pgpath) { 374 bdev = pgpath->path.dev->bdev; 375 clone->q = bdev_get_queue(bdev); 376 clone->rq_disk = bdev->bd_disk; 377 } else if (__must_push_back(m)) 378 r = DM_MAPIO_REQUEUE; 379 else 380 r = -EIO; /* Failed */ 381 382 mpio->pgpath = pgpath; 383 mpio->nr_bytes = nr_bytes; 384 385 if (r == DM_MAPIO_REMAPPED && pgpath->pg->ps.type->start_io) 386 pgpath->pg->ps.type->start_io(&pgpath->pg->ps, &pgpath->path, 387 nr_bytes); 388 389 spin_unlock_irqrestore(&m->lock, flags); 390 391 return r; 392 } 393 394 /* 395 * If we run out of usable paths, should we queue I/O or error it? 396 */ 397 static int queue_if_no_path(struct multipath *m, unsigned queue_if_no_path, 398 unsigned save_old_value) 399 { 400 unsigned long flags; 401 402 spin_lock_irqsave(&m->lock, flags); 403 404 if (save_old_value) 405 m->saved_queue_if_no_path = m->queue_if_no_path; 406 else 407 m->saved_queue_if_no_path = queue_if_no_path; 408 m->queue_if_no_path = queue_if_no_path; 409 if (!m->queue_if_no_path && m->queue_size) 410 queue_work(kmultipathd, &m->process_queued_ios); 411 412 spin_unlock_irqrestore(&m->lock, flags); 413 414 return 0; 415 } 416 417 /*----------------------------------------------------------------- 418 * The multipath daemon is responsible for resubmitting queued ios. 419 *---------------------------------------------------------------*/ 420 421 static void dispatch_queued_ios(struct multipath *m) 422 { 423 int r; 424 unsigned long flags; 425 struct dm_mpath_io *mpio; 426 union map_info *info; 427 struct request *clone, *n; 428 LIST_HEAD(cl); 429 430 spin_lock_irqsave(&m->lock, flags); 431 list_splice_init(&m->queued_ios, &cl); 432 spin_unlock_irqrestore(&m->lock, flags); 433 434 list_for_each_entry_safe(clone, n, &cl, queuelist) { 435 list_del_init(&clone->queuelist); 436 437 info = dm_get_rq_mapinfo(clone); 438 mpio = info->ptr; 439 440 r = map_io(m, clone, mpio, 1); 441 if (r < 0) { 442 mempool_free(mpio, m->mpio_pool); 443 dm_kill_unmapped_request(clone, r); 444 } else if (r == DM_MAPIO_REMAPPED) 445 dm_dispatch_request(clone); 446 else if (r == DM_MAPIO_REQUEUE) { 447 mempool_free(mpio, m->mpio_pool); 448 dm_requeue_unmapped_request(clone); 449 } 450 } 451 } 452 453 static void process_queued_ios(struct work_struct *work) 454 { 455 struct multipath *m = 456 container_of(work, struct multipath, process_queued_ios); 457 struct pgpath *pgpath = NULL; 458 unsigned must_queue = 1; 459 unsigned long flags; 460 461 spin_lock_irqsave(&m->lock, flags); 462 463 if (!m->queue_size) 464 goto out; 465 466 if (!m->current_pgpath) 467 __choose_pgpath(m, 0); 468 469 pgpath = m->current_pgpath; 470 471 if ((pgpath && !m->queue_io) || 472 (!pgpath && !m->queue_if_no_path)) 473 must_queue = 0; 474 475 if (m->pg_init_required && !m->pg_init_in_progress && pgpath) 476 __pg_init_all_paths(m); 477 478 out: 479 spin_unlock_irqrestore(&m->lock, flags); 480 if (!must_queue) 481 dispatch_queued_ios(m); 482 } 483 484 /* 485 * An event is triggered whenever a path is taken out of use. 486 * Includes path failure and PG bypass. 487 */ 488 static void trigger_event(struct work_struct *work) 489 { 490 struct multipath *m = 491 container_of(work, struct multipath, trigger_event); 492 493 dm_table_event(m->ti->table); 494 } 495 496 /*----------------------------------------------------------------- 497 * Constructor/argument parsing: 498 * <#multipath feature args> [<arg>]* 499 * <#hw_handler args> [hw_handler [<arg>]*] 500 * <#priority groups> 501 * <initial priority group> 502 * [<selector> <#selector args> [<arg>]* 503 * <#paths> <#per-path selector args> 504 * [<path> [<arg>]* ]+ ]+ 505 *---------------------------------------------------------------*/ 506 struct param { 507 unsigned min; 508 unsigned max; 509 char *error; 510 }; 511 512 static int read_param(struct param *param, char *str, unsigned *v, char **error) 513 { 514 if (!str || 515 (sscanf(str, "%u", v) != 1) || 516 (*v < param->min) || 517 (*v > param->max)) { 518 *error = param->error; 519 return -EINVAL; 520 } 521 522 return 0; 523 } 524 525 struct arg_set { 526 unsigned argc; 527 char **argv; 528 }; 529 530 static char *shift(struct arg_set *as) 531 { 532 char *r; 533 534 if (as->argc) { 535 as->argc--; 536 r = *as->argv; 537 as->argv++; 538 return r; 539 } 540 541 return NULL; 542 } 543 544 static void consume(struct arg_set *as, unsigned n) 545 { 546 BUG_ON (as->argc < n); 547 as->argc -= n; 548 as->argv += n; 549 } 550 551 static int parse_path_selector(struct arg_set *as, struct priority_group *pg, 552 struct dm_target *ti) 553 { 554 int r; 555 struct path_selector_type *pst; 556 unsigned ps_argc; 557 558 static struct param _params[] = { 559 {0, 1024, "invalid number of path selector args"}, 560 }; 561 562 pst = dm_get_path_selector(shift(as)); 563 if (!pst) { 564 ti->error = "unknown path selector type"; 565 return -EINVAL; 566 } 567 568 r = read_param(_params, shift(as), &ps_argc, &ti->error); 569 if (r) { 570 dm_put_path_selector(pst); 571 return -EINVAL; 572 } 573 574 if (ps_argc > as->argc) { 575 dm_put_path_selector(pst); 576 ti->error = "not enough arguments for path selector"; 577 return -EINVAL; 578 } 579 580 r = pst->create(&pg->ps, ps_argc, as->argv); 581 if (r) { 582 dm_put_path_selector(pst); 583 ti->error = "path selector constructor failed"; 584 return r; 585 } 586 587 pg->ps.type = pst; 588 consume(as, ps_argc); 589 590 return 0; 591 } 592 593 static struct pgpath *parse_path(struct arg_set *as, struct path_selector *ps, 594 struct dm_target *ti) 595 { 596 int r; 597 struct pgpath *p; 598 struct multipath *m = ti->private; 599 600 /* we need at least a path arg */ 601 if (as->argc < 1) { 602 ti->error = "no device given"; 603 return ERR_PTR(-EINVAL); 604 } 605 606 p = alloc_pgpath(); 607 if (!p) 608 return ERR_PTR(-ENOMEM); 609 610 r = dm_get_device(ti, shift(as), dm_table_get_mode(ti->table), 611 &p->path.dev); 612 if (r) { 613 ti->error = "error getting device"; 614 goto bad; 615 } 616 617 if (m->hw_handler_name) { 618 struct request_queue *q = bdev_get_queue(p->path.dev->bdev); 619 620 r = scsi_dh_attach(q, m->hw_handler_name); 621 if (r == -EBUSY) { 622 /* 623 * Already attached to different hw_handler, 624 * try to reattach with correct one. 625 */ 626 scsi_dh_detach(q); 627 r = scsi_dh_attach(q, m->hw_handler_name); 628 } 629 630 if (r < 0) { 631 ti->error = "error attaching hardware handler"; 632 dm_put_device(ti, p->path.dev); 633 goto bad; 634 } 635 636 if (m->hw_handler_params) { 637 r = scsi_dh_set_params(q, m->hw_handler_params); 638 if (r < 0) { 639 ti->error = "unable to set hardware " 640 "handler parameters"; 641 scsi_dh_detach(q); 642 dm_put_device(ti, p->path.dev); 643 goto bad; 644 } 645 } 646 } 647 648 r = ps->type->add_path(ps, &p->path, as->argc, as->argv, &ti->error); 649 if (r) { 650 dm_put_device(ti, p->path.dev); 651 goto bad; 652 } 653 654 return p; 655 656 bad: 657 free_pgpath(p); 658 return ERR_PTR(r); 659 } 660 661 static struct priority_group *parse_priority_group(struct arg_set *as, 662 struct multipath *m) 663 { 664 static struct param _params[] = { 665 {1, 1024, "invalid number of paths"}, 666 {0, 1024, "invalid number of selector args"} 667 }; 668 669 int r; 670 unsigned i, nr_selector_args, nr_params; 671 struct priority_group *pg; 672 struct dm_target *ti = m->ti; 673 674 if (as->argc < 2) { 675 as->argc = 0; 676 ti->error = "not enough priority group arguments"; 677 return ERR_PTR(-EINVAL); 678 } 679 680 pg = alloc_priority_group(); 681 if (!pg) { 682 ti->error = "couldn't allocate priority group"; 683 return ERR_PTR(-ENOMEM); 684 } 685 pg->m = m; 686 687 r = parse_path_selector(as, pg, ti); 688 if (r) 689 goto bad; 690 691 /* 692 * read the paths 693 */ 694 r = read_param(_params, shift(as), &pg->nr_pgpaths, &ti->error); 695 if (r) 696 goto bad; 697 698 r = read_param(_params + 1, shift(as), &nr_selector_args, &ti->error); 699 if (r) 700 goto bad; 701 702 nr_params = 1 + nr_selector_args; 703 for (i = 0; i < pg->nr_pgpaths; i++) { 704 struct pgpath *pgpath; 705 struct arg_set path_args; 706 707 if (as->argc < nr_params) { 708 ti->error = "not enough path parameters"; 709 goto bad; 710 } 711 712 path_args.argc = nr_params; 713 path_args.argv = as->argv; 714 715 pgpath = parse_path(&path_args, &pg->ps, ti); 716 if (IS_ERR(pgpath)) { 717 r = PTR_ERR(pgpath); 718 goto bad; 719 } 720 721 pgpath->pg = pg; 722 list_add_tail(&pgpath->list, &pg->pgpaths); 723 consume(as, nr_params); 724 } 725 726 return pg; 727 728 bad: 729 free_priority_group(pg, ti); 730 return ERR_PTR(r); 731 } 732 733 static int parse_hw_handler(struct arg_set *as, struct multipath *m) 734 { 735 unsigned hw_argc; 736 int ret; 737 struct dm_target *ti = m->ti; 738 739 static struct param _params[] = { 740 {0, 1024, "invalid number of hardware handler args"}, 741 }; 742 743 if (read_param(_params, shift(as), &hw_argc, &ti->error)) 744 return -EINVAL; 745 746 if (!hw_argc) 747 return 0; 748 749 if (hw_argc > as->argc) { 750 ti->error = "not enough arguments for hardware handler"; 751 return -EINVAL; 752 } 753 754 m->hw_handler_name = kstrdup(shift(as), GFP_KERNEL); 755 request_module("scsi_dh_%s", m->hw_handler_name); 756 if (scsi_dh_handler_exist(m->hw_handler_name) == 0) { 757 ti->error = "unknown hardware handler type"; 758 ret = -EINVAL; 759 goto fail; 760 } 761 762 if (hw_argc > 1) { 763 char *p; 764 int i, j, len = 4; 765 766 for (i = 0; i <= hw_argc - 2; i++) 767 len += strlen(as->argv[i]) + 1; 768 p = m->hw_handler_params = kzalloc(len, GFP_KERNEL); 769 if (!p) { 770 ti->error = "memory allocation failed"; 771 ret = -ENOMEM; 772 goto fail; 773 } 774 j = sprintf(p, "%d", hw_argc - 1); 775 for (i = 0, p+=j+1; i <= hw_argc - 2; i++, p+=j+1) 776 j = sprintf(p, "%s", as->argv[i]); 777 } 778 consume(as, hw_argc - 1); 779 780 return 0; 781 fail: 782 kfree(m->hw_handler_name); 783 m->hw_handler_name = NULL; 784 return ret; 785 } 786 787 static int parse_features(struct arg_set *as, struct multipath *m) 788 { 789 int r; 790 unsigned argc; 791 struct dm_target *ti = m->ti; 792 const char *param_name; 793 794 static struct param _params[] = { 795 {0, 3, "invalid number of feature args"}, 796 {1, 50, "pg_init_retries must be between 1 and 50"}, 797 }; 798 799 r = read_param(_params, shift(as), &argc, &ti->error); 800 if (r) 801 return -EINVAL; 802 803 if (!argc) 804 return 0; 805 806 do { 807 param_name = shift(as); 808 argc--; 809 810 if (!strnicmp(param_name, MESG_STR("queue_if_no_path"))) { 811 r = queue_if_no_path(m, 1, 0); 812 continue; 813 } 814 815 if (!strnicmp(param_name, MESG_STR("pg_init_retries")) && 816 (argc >= 1)) { 817 r = read_param(_params + 1, shift(as), 818 &m->pg_init_retries, &ti->error); 819 argc--; 820 continue; 821 } 822 823 ti->error = "Unrecognised multipath feature request"; 824 r = -EINVAL; 825 } while (argc && !r); 826 827 return r; 828 } 829 830 static int multipath_ctr(struct dm_target *ti, unsigned int argc, 831 char **argv) 832 { 833 /* target parameters */ 834 static struct param _params[] = { 835 {1, 1024, "invalid number of priority groups"}, 836 {1, 1024, "invalid initial priority group number"}, 837 }; 838 839 int r; 840 struct multipath *m; 841 struct arg_set as; 842 unsigned pg_count = 0; 843 unsigned next_pg_num; 844 845 as.argc = argc; 846 as.argv = argv; 847 848 m = alloc_multipath(ti); 849 if (!m) { 850 ti->error = "can't allocate multipath"; 851 return -EINVAL; 852 } 853 854 r = parse_features(&as, m); 855 if (r) 856 goto bad; 857 858 r = parse_hw_handler(&as, m); 859 if (r) 860 goto bad; 861 862 r = read_param(_params, shift(&as), &m->nr_priority_groups, &ti->error); 863 if (r) 864 goto bad; 865 866 r = read_param(_params + 1, shift(&as), &next_pg_num, &ti->error); 867 if (r) 868 goto bad; 869 870 /* parse the priority groups */ 871 while (as.argc) { 872 struct priority_group *pg; 873 874 pg = parse_priority_group(&as, m); 875 if (IS_ERR(pg)) { 876 r = PTR_ERR(pg); 877 goto bad; 878 } 879 880 m->nr_valid_paths += pg->nr_pgpaths; 881 list_add_tail(&pg->list, &m->priority_groups); 882 pg_count++; 883 pg->pg_num = pg_count; 884 if (!--next_pg_num) 885 m->next_pg = pg; 886 } 887 888 if (pg_count != m->nr_priority_groups) { 889 ti->error = "priority group count mismatch"; 890 r = -EINVAL; 891 goto bad; 892 } 893 894 ti->num_flush_requests = 1; 895 896 return 0; 897 898 bad: 899 free_multipath(m); 900 return r; 901 } 902 903 static void multipath_wait_for_pg_init_completion(struct multipath *m) 904 { 905 DECLARE_WAITQUEUE(wait, current); 906 unsigned long flags; 907 908 add_wait_queue(&m->pg_init_wait, &wait); 909 910 while (1) { 911 set_current_state(TASK_UNINTERRUPTIBLE); 912 913 spin_lock_irqsave(&m->lock, flags); 914 if (!m->pg_init_in_progress) { 915 spin_unlock_irqrestore(&m->lock, flags); 916 break; 917 } 918 spin_unlock_irqrestore(&m->lock, flags); 919 920 io_schedule(); 921 } 922 set_current_state(TASK_RUNNING); 923 924 remove_wait_queue(&m->pg_init_wait, &wait); 925 } 926 927 static void flush_multipath_work(struct multipath *m) 928 { 929 flush_workqueue(kmpath_handlerd); 930 multipath_wait_for_pg_init_completion(m); 931 flush_workqueue(kmultipathd); 932 flush_scheduled_work(); 933 } 934 935 static void multipath_dtr(struct dm_target *ti) 936 { 937 struct multipath *m = ti->private; 938 939 flush_multipath_work(m); 940 free_multipath(m); 941 } 942 943 /* 944 * Map cloned requests 945 */ 946 static int multipath_map(struct dm_target *ti, struct request *clone, 947 union map_info *map_context) 948 { 949 int r; 950 struct dm_mpath_io *mpio; 951 struct multipath *m = (struct multipath *) ti->private; 952 953 mpio = mempool_alloc(m->mpio_pool, GFP_ATOMIC); 954 if (!mpio) 955 /* ENOMEM, requeue */ 956 return DM_MAPIO_REQUEUE; 957 memset(mpio, 0, sizeof(*mpio)); 958 959 map_context->ptr = mpio; 960 clone->cmd_flags |= REQ_FAILFAST_TRANSPORT; 961 r = map_io(m, clone, mpio, 0); 962 if (r < 0 || r == DM_MAPIO_REQUEUE) 963 mempool_free(mpio, m->mpio_pool); 964 965 return r; 966 } 967 968 /* 969 * Take a path out of use. 970 */ 971 static int fail_path(struct pgpath *pgpath) 972 { 973 unsigned long flags; 974 struct multipath *m = pgpath->pg->m; 975 976 spin_lock_irqsave(&m->lock, flags); 977 978 if (!pgpath->is_active) 979 goto out; 980 981 DMWARN("Failing path %s.", pgpath->path.dev->name); 982 983 pgpath->pg->ps.type->fail_path(&pgpath->pg->ps, &pgpath->path); 984 pgpath->is_active = 0; 985 pgpath->fail_count++; 986 987 m->nr_valid_paths--; 988 989 if (pgpath == m->current_pgpath) 990 m->current_pgpath = NULL; 991 992 dm_path_uevent(DM_UEVENT_PATH_FAILED, m->ti, 993 pgpath->path.dev->name, m->nr_valid_paths); 994 995 schedule_work(&m->trigger_event); 996 queue_work(kmultipathd, &pgpath->deactivate_path); 997 998 out: 999 spin_unlock_irqrestore(&m->lock, flags); 1000 1001 return 0; 1002 } 1003 1004 /* 1005 * Reinstate a previously-failed path 1006 */ 1007 static int reinstate_path(struct pgpath *pgpath) 1008 { 1009 int r = 0; 1010 unsigned long flags; 1011 struct multipath *m = pgpath->pg->m; 1012 1013 spin_lock_irqsave(&m->lock, flags); 1014 1015 if (pgpath->is_active) 1016 goto out; 1017 1018 if (!pgpath->pg->ps.type->reinstate_path) { 1019 DMWARN("Reinstate path not supported by path selector %s", 1020 pgpath->pg->ps.type->name); 1021 r = -EINVAL; 1022 goto out; 1023 } 1024 1025 r = pgpath->pg->ps.type->reinstate_path(&pgpath->pg->ps, &pgpath->path); 1026 if (r) 1027 goto out; 1028 1029 pgpath->is_active = 1; 1030 1031 if (!m->nr_valid_paths++ && m->queue_size) { 1032 m->current_pgpath = NULL; 1033 queue_work(kmultipathd, &m->process_queued_ios); 1034 } else if (m->hw_handler_name && (m->current_pg == pgpath->pg)) { 1035 if (queue_work(kmpath_handlerd, &pgpath->activate_path)) 1036 m->pg_init_in_progress++; 1037 } 1038 1039 dm_path_uevent(DM_UEVENT_PATH_REINSTATED, m->ti, 1040 pgpath->path.dev->name, m->nr_valid_paths); 1041 1042 schedule_work(&m->trigger_event); 1043 1044 out: 1045 spin_unlock_irqrestore(&m->lock, flags); 1046 1047 return r; 1048 } 1049 1050 /* 1051 * Fail or reinstate all paths that match the provided struct dm_dev. 1052 */ 1053 static int action_dev(struct multipath *m, struct dm_dev *dev, 1054 action_fn action) 1055 { 1056 int r = 0; 1057 struct pgpath *pgpath; 1058 struct priority_group *pg; 1059 1060 list_for_each_entry(pg, &m->priority_groups, list) { 1061 list_for_each_entry(pgpath, &pg->pgpaths, list) { 1062 if (pgpath->path.dev == dev) 1063 r = action(pgpath); 1064 } 1065 } 1066 1067 return r; 1068 } 1069 1070 /* 1071 * Temporarily try to avoid having to use the specified PG 1072 */ 1073 static void bypass_pg(struct multipath *m, struct priority_group *pg, 1074 int bypassed) 1075 { 1076 unsigned long flags; 1077 1078 spin_lock_irqsave(&m->lock, flags); 1079 1080 pg->bypassed = bypassed; 1081 m->current_pgpath = NULL; 1082 m->current_pg = NULL; 1083 1084 spin_unlock_irqrestore(&m->lock, flags); 1085 1086 schedule_work(&m->trigger_event); 1087 } 1088 1089 /* 1090 * Switch to using the specified PG from the next I/O that gets mapped 1091 */ 1092 static int switch_pg_num(struct multipath *m, const char *pgstr) 1093 { 1094 struct priority_group *pg; 1095 unsigned pgnum; 1096 unsigned long flags; 1097 1098 if (!pgstr || (sscanf(pgstr, "%u", &pgnum) != 1) || !pgnum || 1099 (pgnum > m->nr_priority_groups)) { 1100 DMWARN("invalid PG number supplied to switch_pg_num"); 1101 return -EINVAL; 1102 } 1103 1104 spin_lock_irqsave(&m->lock, flags); 1105 list_for_each_entry(pg, &m->priority_groups, list) { 1106 pg->bypassed = 0; 1107 if (--pgnum) 1108 continue; 1109 1110 m->current_pgpath = NULL; 1111 m->current_pg = NULL; 1112 m->next_pg = pg; 1113 } 1114 spin_unlock_irqrestore(&m->lock, flags); 1115 1116 schedule_work(&m->trigger_event); 1117 return 0; 1118 } 1119 1120 /* 1121 * Set/clear bypassed status of a PG. 1122 * PGs are numbered upwards from 1 in the order they were declared. 1123 */ 1124 static int bypass_pg_num(struct multipath *m, const char *pgstr, int bypassed) 1125 { 1126 struct priority_group *pg; 1127 unsigned pgnum; 1128 1129 if (!pgstr || (sscanf(pgstr, "%u", &pgnum) != 1) || !pgnum || 1130 (pgnum > m->nr_priority_groups)) { 1131 DMWARN("invalid PG number supplied to bypass_pg"); 1132 return -EINVAL; 1133 } 1134 1135 list_for_each_entry(pg, &m->priority_groups, list) { 1136 if (!--pgnum) 1137 break; 1138 } 1139 1140 bypass_pg(m, pg, bypassed); 1141 return 0; 1142 } 1143 1144 /* 1145 * Should we retry pg_init immediately? 1146 */ 1147 static int pg_init_limit_reached(struct multipath *m, struct pgpath *pgpath) 1148 { 1149 unsigned long flags; 1150 int limit_reached = 0; 1151 1152 spin_lock_irqsave(&m->lock, flags); 1153 1154 if (m->pg_init_count <= m->pg_init_retries) 1155 m->pg_init_required = 1; 1156 else 1157 limit_reached = 1; 1158 1159 spin_unlock_irqrestore(&m->lock, flags); 1160 1161 return limit_reached; 1162 } 1163 1164 static void pg_init_done(void *data, int errors) 1165 { 1166 struct pgpath *pgpath = data; 1167 struct priority_group *pg = pgpath->pg; 1168 struct multipath *m = pg->m; 1169 unsigned long flags; 1170 1171 /* device or driver problems */ 1172 switch (errors) { 1173 case SCSI_DH_OK: 1174 break; 1175 case SCSI_DH_NOSYS: 1176 if (!m->hw_handler_name) { 1177 errors = 0; 1178 break; 1179 } 1180 DMERR("Could not failover the device: Handler scsi_dh_%s " 1181 "Error %d.", m->hw_handler_name, errors); 1182 /* 1183 * Fail path for now, so we do not ping pong 1184 */ 1185 fail_path(pgpath); 1186 break; 1187 case SCSI_DH_DEV_TEMP_BUSY: 1188 /* 1189 * Probably doing something like FW upgrade on the 1190 * controller so try the other pg. 1191 */ 1192 bypass_pg(m, pg, 1); 1193 break; 1194 /* TODO: For SCSI_DH_RETRY we should wait a couple seconds */ 1195 case SCSI_DH_RETRY: 1196 case SCSI_DH_IMM_RETRY: 1197 case SCSI_DH_RES_TEMP_UNAVAIL: 1198 if (pg_init_limit_reached(m, pgpath)) 1199 fail_path(pgpath); 1200 errors = 0; 1201 break; 1202 default: 1203 /* 1204 * We probably do not want to fail the path for a device 1205 * error, but this is what the old dm did. In future 1206 * patches we can do more advanced handling. 1207 */ 1208 fail_path(pgpath); 1209 } 1210 1211 spin_lock_irqsave(&m->lock, flags); 1212 if (errors) { 1213 if (pgpath == m->current_pgpath) { 1214 DMERR("Could not failover device. Error %d.", errors); 1215 m->current_pgpath = NULL; 1216 m->current_pg = NULL; 1217 } 1218 } else if (!m->pg_init_required) 1219 pg->bypassed = 0; 1220 1221 if (--m->pg_init_in_progress) 1222 /* Activations of other paths are still on going */ 1223 goto out; 1224 1225 if (!m->pg_init_required) 1226 m->queue_io = 0; 1227 1228 queue_work(kmultipathd, &m->process_queued_ios); 1229 1230 /* 1231 * Wake up any thread waiting to suspend. 1232 */ 1233 wake_up(&m->pg_init_wait); 1234 1235 out: 1236 spin_unlock_irqrestore(&m->lock, flags); 1237 } 1238 1239 static void activate_path(struct work_struct *work) 1240 { 1241 struct pgpath *pgpath = 1242 container_of(work, struct pgpath, activate_path); 1243 1244 scsi_dh_activate(bdev_get_queue(pgpath->path.dev->bdev), 1245 pg_init_done, pgpath); 1246 } 1247 1248 /* 1249 * end_io handling 1250 */ 1251 static int do_end_io(struct multipath *m, struct request *clone, 1252 int error, struct dm_mpath_io *mpio) 1253 { 1254 /* 1255 * We don't queue any clone request inside the multipath target 1256 * during end I/O handling, since those clone requests don't have 1257 * bio clones. If we queue them inside the multipath target, 1258 * we need to make bio clones, that requires memory allocation. 1259 * (See drivers/md/dm.c:end_clone_bio() about why the clone requests 1260 * don't have bio clones.) 1261 * Instead of queueing the clone request here, we queue the original 1262 * request into dm core, which will remake a clone request and 1263 * clone bios for it and resubmit it later. 1264 */ 1265 int r = DM_ENDIO_REQUEUE; 1266 unsigned long flags; 1267 1268 if (!error && !clone->errors) 1269 return 0; /* I/O complete */ 1270 1271 if (error == -EOPNOTSUPP) 1272 return error; 1273 1274 if (mpio->pgpath) 1275 fail_path(mpio->pgpath); 1276 1277 spin_lock_irqsave(&m->lock, flags); 1278 if (!m->nr_valid_paths && !m->queue_if_no_path && !__must_push_back(m)) 1279 r = -EIO; 1280 spin_unlock_irqrestore(&m->lock, flags); 1281 1282 return r; 1283 } 1284 1285 static int multipath_end_io(struct dm_target *ti, struct request *clone, 1286 int error, union map_info *map_context) 1287 { 1288 struct multipath *m = ti->private; 1289 struct dm_mpath_io *mpio = map_context->ptr; 1290 struct pgpath *pgpath = mpio->pgpath; 1291 struct path_selector *ps; 1292 int r; 1293 1294 r = do_end_io(m, clone, error, mpio); 1295 if (pgpath) { 1296 ps = &pgpath->pg->ps; 1297 if (ps->type->end_io) 1298 ps->type->end_io(ps, &pgpath->path, mpio->nr_bytes); 1299 } 1300 mempool_free(mpio, m->mpio_pool); 1301 1302 return r; 1303 } 1304 1305 /* 1306 * Suspend can't complete until all the I/O is processed so if 1307 * the last path fails we must error any remaining I/O. 1308 * Note that if the freeze_bdev fails while suspending, the 1309 * queue_if_no_path state is lost - userspace should reset it. 1310 */ 1311 static void multipath_presuspend(struct dm_target *ti) 1312 { 1313 struct multipath *m = (struct multipath *) ti->private; 1314 1315 queue_if_no_path(m, 0, 1); 1316 } 1317 1318 static void multipath_postsuspend(struct dm_target *ti) 1319 { 1320 struct multipath *m = ti->private; 1321 1322 mutex_lock(&m->work_mutex); 1323 flush_multipath_work(m); 1324 mutex_unlock(&m->work_mutex); 1325 } 1326 1327 /* 1328 * Restore the queue_if_no_path setting. 1329 */ 1330 static void multipath_resume(struct dm_target *ti) 1331 { 1332 struct multipath *m = (struct multipath *) ti->private; 1333 unsigned long flags; 1334 1335 spin_lock_irqsave(&m->lock, flags); 1336 m->queue_if_no_path = m->saved_queue_if_no_path; 1337 spin_unlock_irqrestore(&m->lock, flags); 1338 } 1339 1340 /* 1341 * Info output has the following format: 1342 * num_multipath_feature_args [multipath_feature_args]* 1343 * num_handler_status_args [handler_status_args]* 1344 * num_groups init_group_number 1345 * [A|D|E num_ps_status_args [ps_status_args]* 1346 * num_paths num_selector_args 1347 * [path_dev A|F fail_count [selector_args]* ]+ ]+ 1348 * 1349 * Table output has the following format (identical to the constructor string): 1350 * num_feature_args [features_args]* 1351 * num_handler_args hw_handler [hw_handler_args]* 1352 * num_groups init_group_number 1353 * [priority selector-name num_ps_args [ps_args]* 1354 * num_paths num_selector_args [path_dev [selector_args]* ]+ ]+ 1355 */ 1356 static int multipath_status(struct dm_target *ti, status_type_t type, 1357 char *result, unsigned int maxlen) 1358 { 1359 int sz = 0; 1360 unsigned long flags; 1361 struct multipath *m = (struct multipath *) ti->private; 1362 struct priority_group *pg; 1363 struct pgpath *p; 1364 unsigned pg_num; 1365 char state; 1366 1367 spin_lock_irqsave(&m->lock, flags); 1368 1369 /* Features */ 1370 if (type == STATUSTYPE_INFO) 1371 DMEMIT("2 %u %u ", m->queue_size, m->pg_init_count); 1372 else { 1373 DMEMIT("%u ", m->queue_if_no_path + 1374 (m->pg_init_retries > 0) * 2); 1375 if (m->queue_if_no_path) 1376 DMEMIT("queue_if_no_path "); 1377 if (m->pg_init_retries) 1378 DMEMIT("pg_init_retries %u ", m->pg_init_retries); 1379 } 1380 1381 if (!m->hw_handler_name || type == STATUSTYPE_INFO) 1382 DMEMIT("0 "); 1383 else 1384 DMEMIT("1 %s ", m->hw_handler_name); 1385 1386 DMEMIT("%u ", m->nr_priority_groups); 1387 1388 if (m->next_pg) 1389 pg_num = m->next_pg->pg_num; 1390 else if (m->current_pg) 1391 pg_num = m->current_pg->pg_num; 1392 else 1393 pg_num = 1; 1394 1395 DMEMIT("%u ", pg_num); 1396 1397 switch (type) { 1398 case STATUSTYPE_INFO: 1399 list_for_each_entry(pg, &m->priority_groups, list) { 1400 if (pg->bypassed) 1401 state = 'D'; /* Disabled */ 1402 else if (pg == m->current_pg) 1403 state = 'A'; /* Currently Active */ 1404 else 1405 state = 'E'; /* Enabled */ 1406 1407 DMEMIT("%c ", state); 1408 1409 if (pg->ps.type->status) 1410 sz += pg->ps.type->status(&pg->ps, NULL, type, 1411 result + sz, 1412 maxlen - sz); 1413 else 1414 DMEMIT("0 "); 1415 1416 DMEMIT("%u %u ", pg->nr_pgpaths, 1417 pg->ps.type->info_args); 1418 1419 list_for_each_entry(p, &pg->pgpaths, list) { 1420 DMEMIT("%s %s %u ", p->path.dev->name, 1421 p->is_active ? "A" : "F", 1422 p->fail_count); 1423 if (pg->ps.type->status) 1424 sz += pg->ps.type->status(&pg->ps, 1425 &p->path, type, result + sz, 1426 maxlen - sz); 1427 } 1428 } 1429 break; 1430 1431 case STATUSTYPE_TABLE: 1432 list_for_each_entry(pg, &m->priority_groups, list) { 1433 DMEMIT("%s ", pg->ps.type->name); 1434 1435 if (pg->ps.type->status) 1436 sz += pg->ps.type->status(&pg->ps, NULL, type, 1437 result + sz, 1438 maxlen - sz); 1439 else 1440 DMEMIT("0 "); 1441 1442 DMEMIT("%u %u ", pg->nr_pgpaths, 1443 pg->ps.type->table_args); 1444 1445 list_for_each_entry(p, &pg->pgpaths, list) { 1446 DMEMIT("%s ", p->path.dev->name); 1447 if (pg->ps.type->status) 1448 sz += pg->ps.type->status(&pg->ps, 1449 &p->path, type, result + sz, 1450 maxlen - sz); 1451 } 1452 } 1453 break; 1454 } 1455 1456 spin_unlock_irqrestore(&m->lock, flags); 1457 1458 return 0; 1459 } 1460 1461 static int multipath_message(struct dm_target *ti, unsigned argc, char **argv) 1462 { 1463 int r = -EINVAL; 1464 struct dm_dev *dev; 1465 struct multipath *m = (struct multipath *) ti->private; 1466 action_fn action; 1467 1468 mutex_lock(&m->work_mutex); 1469 1470 if (dm_suspended(ti)) { 1471 r = -EBUSY; 1472 goto out; 1473 } 1474 1475 if (argc == 1) { 1476 if (!strnicmp(argv[0], MESG_STR("queue_if_no_path"))) { 1477 r = queue_if_no_path(m, 1, 0); 1478 goto out; 1479 } else if (!strnicmp(argv[0], MESG_STR("fail_if_no_path"))) { 1480 r = queue_if_no_path(m, 0, 0); 1481 goto out; 1482 } 1483 } 1484 1485 if (argc != 2) { 1486 DMWARN("Unrecognised multipath message received."); 1487 goto out; 1488 } 1489 1490 if (!strnicmp(argv[0], MESG_STR("disable_group"))) { 1491 r = bypass_pg_num(m, argv[1], 1); 1492 goto out; 1493 } else if (!strnicmp(argv[0], MESG_STR("enable_group"))) { 1494 r = bypass_pg_num(m, argv[1], 0); 1495 goto out; 1496 } else if (!strnicmp(argv[0], MESG_STR("switch_group"))) { 1497 r = switch_pg_num(m, argv[1]); 1498 goto out; 1499 } else if (!strnicmp(argv[0], MESG_STR("reinstate_path"))) 1500 action = reinstate_path; 1501 else if (!strnicmp(argv[0], MESG_STR("fail_path"))) 1502 action = fail_path; 1503 else { 1504 DMWARN("Unrecognised multipath message received."); 1505 goto out; 1506 } 1507 1508 r = dm_get_device(ti, argv[1], dm_table_get_mode(ti->table), &dev); 1509 if (r) { 1510 DMWARN("message: error getting device %s", 1511 argv[1]); 1512 goto out; 1513 } 1514 1515 r = action_dev(m, dev, action); 1516 1517 dm_put_device(ti, dev); 1518 1519 out: 1520 mutex_unlock(&m->work_mutex); 1521 return r; 1522 } 1523 1524 static int multipath_ioctl(struct dm_target *ti, unsigned int cmd, 1525 unsigned long arg) 1526 { 1527 struct multipath *m = (struct multipath *) ti->private; 1528 struct block_device *bdev = NULL; 1529 fmode_t mode = 0; 1530 unsigned long flags; 1531 int r = 0; 1532 1533 spin_lock_irqsave(&m->lock, flags); 1534 1535 if (!m->current_pgpath) 1536 __choose_pgpath(m, 0); 1537 1538 if (m->current_pgpath) { 1539 bdev = m->current_pgpath->path.dev->bdev; 1540 mode = m->current_pgpath->path.dev->mode; 1541 } 1542 1543 if (m->queue_io) 1544 r = -EAGAIN; 1545 else if (!bdev) 1546 r = -EIO; 1547 1548 spin_unlock_irqrestore(&m->lock, flags); 1549 1550 return r ? : __blkdev_driver_ioctl(bdev, mode, cmd, arg); 1551 } 1552 1553 static int multipath_iterate_devices(struct dm_target *ti, 1554 iterate_devices_callout_fn fn, void *data) 1555 { 1556 struct multipath *m = ti->private; 1557 struct priority_group *pg; 1558 struct pgpath *p; 1559 int ret = 0; 1560 1561 list_for_each_entry(pg, &m->priority_groups, list) { 1562 list_for_each_entry(p, &pg->pgpaths, list) { 1563 ret = fn(ti, p->path.dev, ti->begin, ti->len, data); 1564 if (ret) 1565 goto out; 1566 } 1567 } 1568 1569 out: 1570 return ret; 1571 } 1572 1573 static int __pgpath_busy(struct pgpath *pgpath) 1574 { 1575 struct request_queue *q = bdev_get_queue(pgpath->path.dev->bdev); 1576 1577 return dm_underlying_device_busy(q); 1578 } 1579 1580 /* 1581 * We return "busy", only when we can map I/Os but underlying devices 1582 * are busy (so even if we map I/Os now, the I/Os will wait on 1583 * the underlying queue). 1584 * In other words, if we want to kill I/Os or queue them inside us 1585 * due to map unavailability, we don't return "busy". Otherwise, 1586 * dm core won't give us the I/Os and we can't do what we want. 1587 */ 1588 static int multipath_busy(struct dm_target *ti) 1589 { 1590 int busy = 0, has_active = 0; 1591 struct multipath *m = ti->private; 1592 struct priority_group *pg; 1593 struct pgpath *pgpath; 1594 unsigned long flags; 1595 1596 spin_lock_irqsave(&m->lock, flags); 1597 1598 /* Guess which priority_group will be used at next mapping time */ 1599 if (unlikely(!m->current_pgpath && m->next_pg)) 1600 pg = m->next_pg; 1601 else if (likely(m->current_pg)) 1602 pg = m->current_pg; 1603 else 1604 /* 1605 * We don't know which pg will be used at next mapping time. 1606 * We don't call __choose_pgpath() here to avoid to trigger 1607 * pg_init just by busy checking. 1608 * So we don't know whether underlying devices we will be using 1609 * at next mapping time are busy or not. Just try mapping. 1610 */ 1611 goto out; 1612 1613 /* 1614 * If there is one non-busy active path at least, the path selector 1615 * will be able to select it. So we consider such a pg as not busy. 1616 */ 1617 busy = 1; 1618 list_for_each_entry(pgpath, &pg->pgpaths, list) 1619 if (pgpath->is_active) { 1620 has_active = 1; 1621 1622 if (!__pgpath_busy(pgpath)) { 1623 busy = 0; 1624 break; 1625 } 1626 } 1627 1628 if (!has_active) 1629 /* 1630 * No active path in this pg, so this pg won't be used and 1631 * the current_pg will be changed at next mapping time. 1632 * We need to try mapping to determine it. 1633 */ 1634 busy = 0; 1635 1636 out: 1637 spin_unlock_irqrestore(&m->lock, flags); 1638 1639 return busy; 1640 } 1641 1642 /*----------------------------------------------------------------- 1643 * Module setup 1644 *---------------------------------------------------------------*/ 1645 static struct target_type multipath_target = { 1646 .name = "multipath", 1647 .version = {1, 1, 1}, 1648 .module = THIS_MODULE, 1649 .ctr = multipath_ctr, 1650 .dtr = multipath_dtr, 1651 .map_rq = multipath_map, 1652 .rq_end_io = multipath_end_io, 1653 .presuspend = multipath_presuspend, 1654 .postsuspend = multipath_postsuspend, 1655 .resume = multipath_resume, 1656 .status = multipath_status, 1657 .message = multipath_message, 1658 .ioctl = multipath_ioctl, 1659 .iterate_devices = multipath_iterate_devices, 1660 .busy = multipath_busy, 1661 }; 1662 1663 static int __init dm_multipath_init(void) 1664 { 1665 int r; 1666 1667 /* allocate a slab for the dm_ios */ 1668 _mpio_cache = KMEM_CACHE(dm_mpath_io, 0); 1669 if (!_mpio_cache) 1670 return -ENOMEM; 1671 1672 r = dm_register_target(&multipath_target); 1673 if (r < 0) { 1674 DMERR("register failed %d", r); 1675 kmem_cache_destroy(_mpio_cache); 1676 return -EINVAL; 1677 } 1678 1679 kmultipathd = create_workqueue("kmpathd"); 1680 if (!kmultipathd) { 1681 DMERR("failed to create workqueue kmpathd"); 1682 dm_unregister_target(&multipath_target); 1683 kmem_cache_destroy(_mpio_cache); 1684 return -ENOMEM; 1685 } 1686 1687 /* 1688 * A separate workqueue is used to handle the device handlers 1689 * to avoid overloading existing workqueue. Overloading the 1690 * old workqueue would also create a bottleneck in the 1691 * path of the storage hardware device activation. 1692 */ 1693 kmpath_handlerd = create_singlethread_workqueue("kmpath_handlerd"); 1694 if (!kmpath_handlerd) { 1695 DMERR("failed to create workqueue kmpath_handlerd"); 1696 destroy_workqueue(kmultipathd); 1697 dm_unregister_target(&multipath_target); 1698 kmem_cache_destroy(_mpio_cache); 1699 return -ENOMEM; 1700 } 1701 1702 DMINFO("version %u.%u.%u loaded", 1703 multipath_target.version[0], multipath_target.version[1], 1704 multipath_target.version[2]); 1705 1706 return r; 1707 } 1708 1709 static void __exit dm_multipath_exit(void) 1710 { 1711 destroy_workqueue(kmpath_handlerd); 1712 destroy_workqueue(kmultipathd); 1713 1714 dm_unregister_target(&multipath_target); 1715 kmem_cache_destroy(_mpio_cache); 1716 } 1717 1718 module_init(dm_multipath_init); 1719 module_exit(dm_multipath_exit); 1720 1721 MODULE_DESCRIPTION(DM_NAME " multipath target"); 1722 MODULE_AUTHOR("Sistina Software <dm-devel@redhat.com>"); 1723 MODULE_LICENSE("GPL"); 1724