1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Generic SCSI-3 ALUA SCSI Device Handler 4 * 5 * Copyright (C) 2007-2010 Hannes Reinecke, SUSE Linux Products GmbH. 6 * All rights reserved. 7 */ 8 #include <linux/slab.h> 9 #include <linux/delay.h> 10 #include <linux/module.h> 11 #include <asm/unaligned.h> 12 #include <scsi/scsi.h> 13 #include <scsi/scsi_proto.h> 14 #include <scsi/scsi_dbg.h> 15 #include <scsi/scsi_eh.h> 16 #include <scsi/scsi_dh.h> 17 18 #define ALUA_DH_NAME "alua" 19 #define ALUA_DH_VER "2.0" 20 21 #define TPGS_SUPPORT_NONE 0x00 22 #define TPGS_SUPPORT_OPTIMIZED 0x01 23 #define TPGS_SUPPORT_NONOPTIMIZED 0x02 24 #define TPGS_SUPPORT_STANDBY 0x04 25 #define TPGS_SUPPORT_UNAVAILABLE 0x08 26 #define TPGS_SUPPORT_LBA_DEPENDENT 0x10 27 #define TPGS_SUPPORT_OFFLINE 0x40 28 #define TPGS_SUPPORT_TRANSITION 0x80 29 #define TPGS_SUPPORT_ALL 0xdf 30 31 #define RTPG_FMT_MASK 0x70 32 #define RTPG_FMT_EXT_HDR 0x10 33 34 #define TPGS_MODE_UNINITIALIZED -1 35 #define TPGS_MODE_NONE 0x0 36 #define TPGS_MODE_IMPLICIT 0x1 37 #define TPGS_MODE_EXPLICIT 0x2 38 39 #define ALUA_RTPG_SIZE 128 40 #define ALUA_FAILOVER_TIMEOUT 60 41 #define ALUA_FAILOVER_RETRIES 5 42 #define ALUA_RTPG_DELAY_MSECS 5 43 #define ALUA_RTPG_RETRY_DELAY 2 44 45 /* device handler flags */ 46 #define ALUA_OPTIMIZE_STPG 0x01 47 #define ALUA_RTPG_EXT_HDR_UNSUPP 0x02 48 /* State machine flags */ 49 #define ALUA_PG_RUN_RTPG 0x10 50 #define ALUA_PG_RUN_STPG 0x20 51 #define ALUA_PG_RUNNING 0x40 52 53 static uint optimize_stpg; 54 module_param(optimize_stpg, uint, S_IRUGO|S_IWUSR); 55 MODULE_PARM_DESC(optimize_stpg, "Allow use of a non-optimized path, rather than sending a STPG, when implicit TPGS is supported (0=No,1=Yes). Default is 0."); 56 57 static LIST_HEAD(port_group_list); 58 static DEFINE_SPINLOCK(port_group_lock); 59 static struct workqueue_struct *kaluad_wq; 60 61 struct alua_port_group { 62 struct kref kref; 63 struct rcu_head rcu; 64 struct list_head node; 65 struct list_head dh_list; 66 unsigned char device_id_str[256]; 67 int device_id_len; 68 int group_id; 69 int tpgs; 70 int state; 71 int pref; 72 int valid_states; 73 unsigned flags; /* used for optimizing STPG */ 74 unsigned char transition_tmo; 75 unsigned long expiry; 76 unsigned long interval; 77 struct delayed_work rtpg_work; 78 spinlock_t lock; 79 struct list_head rtpg_list; 80 struct scsi_device *rtpg_sdev; 81 }; 82 83 struct alua_dh_data { 84 struct list_head node; 85 struct alua_port_group __rcu *pg; 86 int group_id; 87 spinlock_t pg_lock; 88 struct scsi_device *sdev; 89 int init_error; 90 struct mutex init_mutex; 91 bool disabled; 92 }; 93 94 struct alua_queue_data { 95 struct list_head entry; 96 activate_complete callback_fn; 97 void *callback_data; 98 }; 99 100 #define ALUA_POLICY_SWITCH_CURRENT 0 101 #define ALUA_POLICY_SWITCH_ALL 1 102 103 static void alua_rtpg_work(struct work_struct *work); 104 static bool alua_rtpg_queue(struct alua_port_group *pg, 105 struct scsi_device *sdev, 106 struct alua_queue_data *qdata, bool force); 107 static void alua_check(struct scsi_device *sdev, bool force); 108 109 static void release_port_group(struct kref *kref) 110 { 111 struct alua_port_group *pg; 112 113 pg = container_of(kref, struct alua_port_group, kref); 114 if (pg->rtpg_sdev) 115 flush_delayed_work(&pg->rtpg_work); 116 spin_lock(&port_group_lock); 117 list_del(&pg->node); 118 spin_unlock(&port_group_lock); 119 kfree_rcu(pg, rcu); 120 } 121 122 /* 123 * submit_rtpg - Issue a REPORT TARGET GROUP STATES command 124 * @sdev: sdev the command should be sent to 125 */ 126 static int submit_rtpg(struct scsi_device *sdev, unsigned char *buff, 127 int bufflen, struct scsi_sense_hdr *sshdr, int flags) 128 { 129 u8 cdb[MAX_COMMAND_SIZE]; 130 blk_opf_t opf = REQ_OP_DRV_IN | REQ_FAILFAST_DEV | 131 REQ_FAILFAST_TRANSPORT | REQ_FAILFAST_DRIVER; 132 const struct scsi_exec_args exec_args = { 133 .sshdr = sshdr, 134 }; 135 136 /* Prepare the command. */ 137 memset(cdb, 0x0, MAX_COMMAND_SIZE); 138 cdb[0] = MAINTENANCE_IN; 139 if (!(flags & ALUA_RTPG_EXT_HDR_UNSUPP)) 140 cdb[1] = MI_REPORT_TARGET_PGS | MI_EXT_HDR_PARAM_FMT; 141 else 142 cdb[1] = MI_REPORT_TARGET_PGS; 143 put_unaligned_be32(bufflen, &cdb[6]); 144 145 return scsi_execute_cmd(sdev, cdb, opf, buff, bufflen, 146 ALUA_FAILOVER_TIMEOUT * HZ, 147 ALUA_FAILOVER_RETRIES, &exec_args); 148 } 149 150 /* 151 * submit_stpg - Issue a SET TARGET PORT GROUP command 152 * 153 * Currently we're only setting the current target port group state 154 * to 'active/optimized' and let the array firmware figure out 155 * the states of the remaining groups. 156 */ 157 static int submit_stpg(struct scsi_device *sdev, int group_id, 158 struct scsi_sense_hdr *sshdr) 159 { 160 u8 cdb[MAX_COMMAND_SIZE]; 161 unsigned char stpg_data[8]; 162 int stpg_len = 8; 163 blk_opf_t opf = REQ_OP_DRV_OUT | REQ_FAILFAST_DEV | 164 REQ_FAILFAST_TRANSPORT | REQ_FAILFAST_DRIVER; 165 const struct scsi_exec_args exec_args = { 166 .sshdr = sshdr, 167 }; 168 169 /* Prepare the data buffer */ 170 memset(stpg_data, 0, stpg_len); 171 stpg_data[4] = SCSI_ACCESS_STATE_OPTIMAL; 172 put_unaligned_be16(group_id, &stpg_data[6]); 173 174 /* Prepare the command. */ 175 memset(cdb, 0x0, MAX_COMMAND_SIZE); 176 cdb[0] = MAINTENANCE_OUT; 177 cdb[1] = MO_SET_TARGET_PGS; 178 put_unaligned_be32(stpg_len, &cdb[6]); 179 180 return scsi_execute_cmd(sdev, cdb, opf, stpg_data, 181 stpg_len, ALUA_FAILOVER_TIMEOUT * HZ, 182 ALUA_FAILOVER_RETRIES, &exec_args); 183 } 184 185 static struct alua_port_group *alua_find_get_pg(char *id_str, size_t id_size, 186 int group_id) 187 { 188 struct alua_port_group *pg; 189 190 if (!id_str || !id_size || !strlen(id_str)) 191 return NULL; 192 193 list_for_each_entry(pg, &port_group_list, node) { 194 if (pg->group_id != group_id) 195 continue; 196 if (!pg->device_id_len || pg->device_id_len != id_size) 197 continue; 198 if (strncmp(pg->device_id_str, id_str, id_size)) 199 continue; 200 if (!kref_get_unless_zero(&pg->kref)) 201 continue; 202 return pg; 203 } 204 205 return NULL; 206 } 207 208 /* 209 * alua_alloc_pg - Allocate a new port_group structure 210 * @sdev: scsi device 211 * @group_id: port group id 212 * @tpgs: target port group settings 213 * 214 * Allocate a new port_group structure for a given 215 * device. 216 */ 217 static struct alua_port_group *alua_alloc_pg(struct scsi_device *sdev, 218 int group_id, int tpgs) 219 { 220 struct alua_port_group *pg, *tmp_pg; 221 222 pg = kzalloc(sizeof(struct alua_port_group), GFP_KERNEL); 223 if (!pg) 224 return ERR_PTR(-ENOMEM); 225 226 pg->device_id_len = scsi_vpd_lun_id(sdev, pg->device_id_str, 227 sizeof(pg->device_id_str)); 228 if (pg->device_id_len <= 0) { 229 /* 230 * TPGS supported but no device identification found. 231 * Generate private device identification. 232 */ 233 sdev_printk(KERN_INFO, sdev, 234 "%s: No device descriptors found\n", 235 ALUA_DH_NAME); 236 pg->device_id_str[0] = '\0'; 237 pg->device_id_len = 0; 238 } 239 pg->group_id = group_id; 240 pg->tpgs = tpgs; 241 pg->state = SCSI_ACCESS_STATE_OPTIMAL; 242 pg->valid_states = TPGS_SUPPORT_ALL; 243 if (optimize_stpg) 244 pg->flags |= ALUA_OPTIMIZE_STPG; 245 kref_init(&pg->kref); 246 INIT_DELAYED_WORK(&pg->rtpg_work, alua_rtpg_work); 247 INIT_LIST_HEAD(&pg->rtpg_list); 248 INIT_LIST_HEAD(&pg->node); 249 INIT_LIST_HEAD(&pg->dh_list); 250 spin_lock_init(&pg->lock); 251 252 spin_lock(&port_group_lock); 253 tmp_pg = alua_find_get_pg(pg->device_id_str, pg->device_id_len, 254 group_id); 255 if (tmp_pg) { 256 spin_unlock(&port_group_lock); 257 kfree(pg); 258 return tmp_pg; 259 } 260 261 list_add(&pg->node, &port_group_list); 262 spin_unlock(&port_group_lock); 263 264 return pg; 265 } 266 267 /* 268 * alua_check_tpgs - Evaluate TPGS setting 269 * @sdev: device to be checked 270 * 271 * Examine the TPGS setting of the sdev to find out if ALUA 272 * is supported. 273 */ 274 static int alua_check_tpgs(struct scsi_device *sdev) 275 { 276 int tpgs = TPGS_MODE_NONE; 277 278 /* 279 * ALUA support for non-disk devices is fraught with 280 * difficulties, so disable it for now. 281 */ 282 if (sdev->type != TYPE_DISK) { 283 sdev_printk(KERN_INFO, sdev, 284 "%s: disable for non-disk devices\n", 285 ALUA_DH_NAME); 286 return tpgs; 287 } 288 289 tpgs = scsi_device_tpgs(sdev); 290 switch (tpgs) { 291 case TPGS_MODE_EXPLICIT|TPGS_MODE_IMPLICIT: 292 sdev_printk(KERN_INFO, sdev, 293 "%s: supports implicit and explicit TPGS\n", 294 ALUA_DH_NAME); 295 break; 296 case TPGS_MODE_EXPLICIT: 297 sdev_printk(KERN_INFO, sdev, "%s: supports explicit TPGS\n", 298 ALUA_DH_NAME); 299 break; 300 case TPGS_MODE_IMPLICIT: 301 sdev_printk(KERN_INFO, sdev, "%s: supports implicit TPGS\n", 302 ALUA_DH_NAME); 303 break; 304 case TPGS_MODE_NONE: 305 sdev_printk(KERN_INFO, sdev, "%s: not supported\n", 306 ALUA_DH_NAME); 307 break; 308 default: 309 sdev_printk(KERN_INFO, sdev, 310 "%s: unsupported TPGS setting %d\n", 311 ALUA_DH_NAME, tpgs); 312 tpgs = TPGS_MODE_NONE; 313 break; 314 } 315 316 return tpgs; 317 } 318 319 /* 320 * alua_check_vpd - Evaluate INQUIRY vpd page 0x83 321 * @sdev: device to be checked 322 * 323 * Extract the relative target port and the target port group 324 * descriptor from the list of identificators. 325 */ 326 static int alua_check_vpd(struct scsi_device *sdev, struct alua_dh_data *h, 327 int tpgs) 328 { 329 int rel_port = -1, group_id; 330 struct alua_port_group *pg, *old_pg = NULL; 331 bool pg_updated = false; 332 unsigned long flags; 333 334 group_id = scsi_vpd_tpg_id(sdev, &rel_port); 335 if (group_id < 0) { 336 /* 337 * Internal error; TPGS supported but required 338 * VPD identification descriptors not present. 339 * Disable ALUA support 340 */ 341 sdev_printk(KERN_INFO, sdev, 342 "%s: No target port descriptors found\n", 343 ALUA_DH_NAME); 344 return SCSI_DH_DEV_UNSUPP; 345 } 346 347 pg = alua_alloc_pg(sdev, group_id, tpgs); 348 if (IS_ERR(pg)) { 349 if (PTR_ERR(pg) == -ENOMEM) 350 return SCSI_DH_NOMEM; 351 return SCSI_DH_DEV_UNSUPP; 352 } 353 if (pg->device_id_len) 354 sdev_printk(KERN_INFO, sdev, 355 "%s: device %s port group %x rel port %x\n", 356 ALUA_DH_NAME, pg->device_id_str, 357 group_id, rel_port); 358 else 359 sdev_printk(KERN_INFO, sdev, 360 "%s: port group %x rel port %x\n", 361 ALUA_DH_NAME, group_id, rel_port); 362 363 kref_get(&pg->kref); 364 365 /* Check for existing port group references */ 366 spin_lock(&h->pg_lock); 367 old_pg = rcu_dereference_protected(h->pg, lockdep_is_held(&h->pg_lock)); 368 if (old_pg != pg) { 369 /* port group has changed. Update to new port group */ 370 if (h->pg) { 371 spin_lock_irqsave(&old_pg->lock, flags); 372 list_del_rcu(&h->node); 373 spin_unlock_irqrestore(&old_pg->lock, flags); 374 } 375 rcu_assign_pointer(h->pg, pg); 376 pg_updated = true; 377 } 378 379 spin_lock_irqsave(&pg->lock, flags); 380 if (pg_updated) 381 list_add_rcu(&h->node, &pg->dh_list); 382 spin_unlock_irqrestore(&pg->lock, flags); 383 384 spin_unlock(&h->pg_lock); 385 386 alua_rtpg_queue(pg, sdev, NULL, true); 387 kref_put(&pg->kref, release_port_group); 388 389 if (old_pg) 390 kref_put(&old_pg->kref, release_port_group); 391 392 return SCSI_DH_OK; 393 } 394 395 static char print_alua_state(unsigned char state) 396 { 397 switch (state) { 398 case SCSI_ACCESS_STATE_OPTIMAL: 399 return 'A'; 400 case SCSI_ACCESS_STATE_ACTIVE: 401 return 'N'; 402 case SCSI_ACCESS_STATE_STANDBY: 403 return 'S'; 404 case SCSI_ACCESS_STATE_UNAVAILABLE: 405 return 'U'; 406 case SCSI_ACCESS_STATE_LBA: 407 return 'L'; 408 case SCSI_ACCESS_STATE_OFFLINE: 409 return 'O'; 410 case SCSI_ACCESS_STATE_TRANSITIONING: 411 return 'T'; 412 default: 413 return 'X'; 414 } 415 } 416 417 static void alua_handle_state_transition(struct scsi_device *sdev) 418 { 419 struct alua_dh_data *h = sdev->handler_data; 420 struct alua_port_group *pg; 421 422 rcu_read_lock(); 423 pg = rcu_dereference(h->pg); 424 if (pg) 425 pg->state = SCSI_ACCESS_STATE_TRANSITIONING; 426 rcu_read_unlock(); 427 alua_check(sdev, false); 428 } 429 430 static enum scsi_disposition alua_check_sense(struct scsi_device *sdev, 431 struct scsi_sense_hdr *sense_hdr) 432 { 433 switch (sense_hdr->sense_key) { 434 case NOT_READY: 435 if (sense_hdr->asc == 0x04 && sense_hdr->ascq == 0x0a) { 436 /* 437 * LUN Not Accessible - ALUA state transition 438 */ 439 alua_handle_state_transition(sdev); 440 return NEEDS_RETRY; 441 } 442 break; 443 case UNIT_ATTENTION: 444 if (sense_hdr->asc == 0x04 && sense_hdr->ascq == 0x0a) { 445 /* 446 * LUN Not Accessible - ALUA state transition 447 */ 448 alua_handle_state_transition(sdev); 449 return NEEDS_RETRY; 450 } 451 if (sense_hdr->asc == 0x29 && sense_hdr->ascq == 0x00) { 452 /* 453 * Power On, Reset, or Bus Device Reset. 454 * Might have obscured a state transition, 455 * so schedule a recheck. 456 */ 457 alua_check(sdev, true); 458 return ADD_TO_MLQUEUE; 459 } 460 if (sense_hdr->asc == 0x29 && sense_hdr->ascq == 0x04) 461 /* 462 * Device internal reset 463 */ 464 return ADD_TO_MLQUEUE; 465 if (sense_hdr->asc == 0x2a && sense_hdr->ascq == 0x01) 466 /* 467 * Mode Parameters Changed 468 */ 469 return ADD_TO_MLQUEUE; 470 if (sense_hdr->asc == 0x2a && sense_hdr->ascq == 0x06) { 471 /* 472 * ALUA state changed 473 */ 474 alua_check(sdev, true); 475 return ADD_TO_MLQUEUE; 476 } 477 if (sense_hdr->asc == 0x2a && sense_hdr->ascq == 0x07) { 478 /* 479 * Implicit ALUA state transition failed 480 */ 481 alua_check(sdev, true); 482 return ADD_TO_MLQUEUE; 483 } 484 if (sense_hdr->asc == 0x3f && sense_hdr->ascq == 0x03) 485 /* 486 * Inquiry data has changed 487 */ 488 return ADD_TO_MLQUEUE; 489 if (sense_hdr->asc == 0x3f && sense_hdr->ascq == 0x0e) 490 /* 491 * REPORTED_LUNS_DATA_HAS_CHANGED is reported 492 * when switching controllers on targets like 493 * Intel Multi-Flex. We can just retry. 494 */ 495 return ADD_TO_MLQUEUE; 496 break; 497 } 498 499 return SCSI_RETURN_NOT_HANDLED; 500 } 501 502 /* 503 * alua_tur - Send a TEST UNIT READY 504 * @sdev: device to which the TEST UNIT READY command should be send 505 * 506 * Send a TEST UNIT READY to @sdev to figure out the device state 507 * Returns SCSI_DH_RETRY if the sense code is NOT READY/ALUA TRANSITIONING, 508 * SCSI_DH_OK if no error occurred, and SCSI_DH_IO otherwise. 509 */ 510 static int alua_tur(struct scsi_device *sdev) 511 { 512 struct scsi_sense_hdr sense_hdr; 513 int retval; 514 515 retval = scsi_test_unit_ready(sdev, ALUA_FAILOVER_TIMEOUT * HZ, 516 ALUA_FAILOVER_RETRIES, &sense_hdr); 517 if ((sense_hdr.sense_key == NOT_READY || 518 sense_hdr.sense_key == UNIT_ATTENTION) && 519 sense_hdr.asc == 0x04 && sense_hdr.ascq == 0x0a) 520 return SCSI_DH_RETRY; 521 else if (retval) 522 return SCSI_DH_IO; 523 else 524 return SCSI_DH_OK; 525 } 526 527 /* 528 * alua_rtpg - Evaluate REPORT TARGET GROUP STATES 529 * @sdev: the device to be evaluated. 530 * 531 * Evaluate the Target Port Group State. 532 * Returns SCSI_DH_DEV_OFFLINED if the path is 533 * found to be unusable. 534 */ 535 static int alua_rtpg(struct scsi_device *sdev, struct alua_port_group *pg) 536 { 537 struct scsi_sense_hdr sense_hdr; 538 struct alua_port_group *tmp_pg; 539 int len, k, off, bufflen = ALUA_RTPG_SIZE; 540 int group_id_old, state_old, pref_old, valid_states_old; 541 unsigned char *desc, *buff; 542 unsigned err; 543 int retval; 544 unsigned int tpg_desc_tbl_off; 545 unsigned char orig_transition_tmo; 546 unsigned long flags; 547 bool transitioning_sense = false; 548 549 group_id_old = pg->group_id; 550 state_old = pg->state; 551 pref_old = pg->pref; 552 valid_states_old = pg->valid_states; 553 554 if (!pg->expiry) { 555 unsigned long transition_tmo = ALUA_FAILOVER_TIMEOUT * HZ; 556 557 if (pg->transition_tmo) 558 transition_tmo = pg->transition_tmo * HZ; 559 560 pg->expiry = round_jiffies_up(jiffies + transition_tmo); 561 } 562 563 buff = kzalloc(bufflen, GFP_KERNEL); 564 if (!buff) 565 return SCSI_DH_DEV_TEMP_BUSY; 566 567 retry: 568 err = 0; 569 retval = submit_rtpg(sdev, buff, bufflen, &sense_hdr, pg->flags); 570 571 if (retval) { 572 /* 573 * Some (broken) implementations have a habit of returning 574 * an error during things like firmware update etc. 575 * But if the target only supports active/optimized there's 576 * not much we can do; it's not that we can switch paths 577 * or anything. 578 * So ignore any errors to avoid spurious failures during 579 * path failover. 580 */ 581 if ((pg->valid_states & ~TPGS_SUPPORT_OPTIMIZED) == 0) { 582 sdev_printk(KERN_INFO, sdev, 583 "%s: ignoring rtpg result %d\n", 584 ALUA_DH_NAME, retval); 585 kfree(buff); 586 return SCSI_DH_OK; 587 } 588 if (retval < 0 || !scsi_sense_valid(&sense_hdr)) { 589 sdev_printk(KERN_INFO, sdev, 590 "%s: rtpg failed, result %d\n", 591 ALUA_DH_NAME, retval); 592 kfree(buff); 593 if (retval < 0) 594 return SCSI_DH_DEV_TEMP_BUSY; 595 if (host_byte(retval) == DID_NO_CONNECT) 596 return SCSI_DH_RES_TEMP_UNAVAIL; 597 return SCSI_DH_IO; 598 } 599 600 /* 601 * submit_rtpg() has failed on existing arrays 602 * when requesting extended header info, and 603 * the array doesn't support extended headers, 604 * even though it shouldn't according to T10. 605 * The retry without rtpg_ext_hdr_req set 606 * handles this. 607 * Note: some arrays return a sense key of ILLEGAL_REQUEST 608 * with ASC 00h if they don't support the extended header. 609 */ 610 if (!(pg->flags & ALUA_RTPG_EXT_HDR_UNSUPP) && 611 sense_hdr.sense_key == ILLEGAL_REQUEST) { 612 pg->flags |= ALUA_RTPG_EXT_HDR_UNSUPP; 613 goto retry; 614 } 615 /* 616 * If the array returns with 'ALUA state transition' 617 * sense code here it cannot return RTPG data during 618 * transition. So set the state to 'transitioning' directly. 619 */ 620 if (sense_hdr.sense_key == NOT_READY && 621 sense_hdr.asc == 0x04 && sense_hdr.ascq == 0x0a) { 622 transitioning_sense = true; 623 goto skip_rtpg; 624 } 625 /* 626 * Retry on any other UNIT ATTENTION occurred. 627 */ 628 if (sense_hdr.sense_key == UNIT_ATTENTION) 629 err = SCSI_DH_RETRY; 630 if (err == SCSI_DH_RETRY && 631 pg->expiry != 0 && time_before(jiffies, pg->expiry)) { 632 sdev_printk(KERN_ERR, sdev, "%s: rtpg retry\n", 633 ALUA_DH_NAME); 634 scsi_print_sense_hdr(sdev, ALUA_DH_NAME, &sense_hdr); 635 kfree(buff); 636 return err; 637 } 638 sdev_printk(KERN_ERR, sdev, "%s: rtpg failed\n", 639 ALUA_DH_NAME); 640 scsi_print_sense_hdr(sdev, ALUA_DH_NAME, &sense_hdr); 641 kfree(buff); 642 pg->expiry = 0; 643 return SCSI_DH_IO; 644 } 645 646 len = get_unaligned_be32(&buff[0]) + 4; 647 648 if (len > bufflen) { 649 /* Resubmit with the correct length */ 650 kfree(buff); 651 bufflen = len; 652 buff = kmalloc(bufflen, GFP_KERNEL); 653 if (!buff) { 654 sdev_printk(KERN_WARNING, sdev, 655 "%s: kmalloc buffer failed\n",__func__); 656 /* Temporary failure, bypass */ 657 pg->expiry = 0; 658 return SCSI_DH_DEV_TEMP_BUSY; 659 } 660 goto retry; 661 } 662 663 orig_transition_tmo = pg->transition_tmo; 664 if ((buff[4] & RTPG_FMT_MASK) == RTPG_FMT_EXT_HDR && buff[5] != 0) 665 pg->transition_tmo = buff[5]; 666 else 667 pg->transition_tmo = ALUA_FAILOVER_TIMEOUT; 668 669 if (orig_transition_tmo != pg->transition_tmo) { 670 sdev_printk(KERN_INFO, sdev, 671 "%s: transition timeout set to %d seconds\n", 672 ALUA_DH_NAME, pg->transition_tmo); 673 pg->expiry = jiffies + pg->transition_tmo * HZ; 674 } 675 676 if ((buff[4] & RTPG_FMT_MASK) == RTPG_FMT_EXT_HDR) 677 tpg_desc_tbl_off = 8; 678 else 679 tpg_desc_tbl_off = 4; 680 681 for (k = tpg_desc_tbl_off, desc = buff + tpg_desc_tbl_off; 682 k < len; 683 k += off, desc += off) { 684 u16 group_id = get_unaligned_be16(&desc[2]); 685 686 spin_lock_irqsave(&port_group_lock, flags); 687 tmp_pg = alua_find_get_pg(pg->device_id_str, pg->device_id_len, 688 group_id); 689 spin_unlock_irqrestore(&port_group_lock, flags); 690 if (tmp_pg) { 691 if (spin_trylock_irqsave(&tmp_pg->lock, flags)) { 692 if ((tmp_pg == pg) || 693 !(tmp_pg->flags & ALUA_PG_RUNNING)) { 694 struct alua_dh_data *h; 695 696 tmp_pg->state = desc[0] & 0x0f; 697 tmp_pg->pref = desc[0] >> 7; 698 rcu_read_lock(); 699 list_for_each_entry_rcu(h, 700 &tmp_pg->dh_list, node) { 701 if (!h->sdev) 702 continue; 703 h->sdev->access_state = desc[0]; 704 } 705 rcu_read_unlock(); 706 } 707 if (tmp_pg == pg) 708 tmp_pg->valid_states = desc[1]; 709 spin_unlock_irqrestore(&tmp_pg->lock, flags); 710 } 711 kref_put(&tmp_pg->kref, release_port_group); 712 } 713 off = 8 + (desc[7] * 4); 714 } 715 716 skip_rtpg: 717 spin_lock_irqsave(&pg->lock, flags); 718 if (transitioning_sense) 719 pg->state = SCSI_ACCESS_STATE_TRANSITIONING; 720 721 if (group_id_old != pg->group_id || state_old != pg->state || 722 pref_old != pg->pref || valid_states_old != pg->valid_states) 723 sdev_printk(KERN_INFO, sdev, 724 "%s: port group %02x state %c %s supports %c%c%c%c%c%c%c\n", 725 ALUA_DH_NAME, pg->group_id, print_alua_state(pg->state), 726 pg->pref ? "preferred" : "non-preferred", 727 pg->valid_states&TPGS_SUPPORT_TRANSITION?'T':'t', 728 pg->valid_states&TPGS_SUPPORT_OFFLINE?'O':'o', 729 pg->valid_states&TPGS_SUPPORT_LBA_DEPENDENT?'L':'l', 730 pg->valid_states&TPGS_SUPPORT_UNAVAILABLE?'U':'u', 731 pg->valid_states&TPGS_SUPPORT_STANDBY?'S':'s', 732 pg->valid_states&TPGS_SUPPORT_NONOPTIMIZED?'N':'n', 733 pg->valid_states&TPGS_SUPPORT_OPTIMIZED?'A':'a'); 734 735 switch (pg->state) { 736 case SCSI_ACCESS_STATE_TRANSITIONING: 737 if (time_before(jiffies, pg->expiry)) { 738 /* State transition, retry */ 739 pg->interval = ALUA_RTPG_RETRY_DELAY; 740 err = SCSI_DH_RETRY; 741 } else { 742 struct alua_dh_data *h; 743 744 /* Transitioning time exceeded, set port to standby */ 745 err = SCSI_DH_IO; 746 pg->state = SCSI_ACCESS_STATE_STANDBY; 747 pg->expiry = 0; 748 rcu_read_lock(); 749 list_for_each_entry_rcu(h, &pg->dh_list, node) { 750 if (!h->sdev) 751 continue; 752 h->sdev->access_state = 753 (pg->state & SCSI_ACCESS_STATE_MASK); 754 if (pg->pref) 755 h->sdev->access_state |= 756 SCSI_ACCESS_STATE_PREFERRED; 757 } 758 rcu_read_unlock(); 759 } 760 break; 761 case SCSI_ACCESS_STATE_OFFLINE: 762 /* Path unusable */ 763 err = SCSI_DH_DEV_OFFLINED; 764 pg->expiry = 0; 765 break; 766 default: 767 /* Useable path if active */ 768 err = SCSI_DH_OK; 769 pg->expiry = 0; 770 break; 771 } 772 spin_unlock_irqrestore(&pg->lock, flags); 773 kfree(buff); 774 return err; 775 } 776 777 /* 778 * alua_stpg - Issue a SET TARGET PORT GROUP command 779 * 780 * Issue a SET TARGET PORT GROUP command and evaluate the 781 * response. Returns SCSI_DH_RETRY per default to trigger 782 * a re-evaluation of the target group state or SCSI_DH_OK 783 * if no further action needs to be taken. 784 */ 785 static unsigned alua_stpg(struct scsi_device *sdev, struct alua_port_group *pg) 786 { 787 int retval; 788 struct scsi_sense_hdr sense_hdr; 789 790 if (!(pg->tpgs & TPGS_MODE_EXPLICIT)) { 791 /* Only implicit ALUA supported, retry */ 792 return SCSI_DH_RETRY; 793 } 794 switch (pg->state) { 795 case SCSI_ACCESS_STATE_OPTIMAL: 796 return SCSI_DH_OK; 797 case SCSI_ACCESS_STATE_ACTIVE: 798 if ((pg->flags & ALUA_OPTIMIZE_STPG) && 799 !pg->pref && 800 (pg->tpgs & TPGS_MODE_IMPLICIT)) 801 return SCSI_DH_OK; 802 break; 803 case SCSI_ACCESS_STATE_STANDBY: 804 case SCSI_ACCESS_STATE_UNAVAILABLE: 805 break; 806 case SCSI_ACCESS_STATE_OFFLINE: 807 return SCSI_DH_IO; 808 case SCSI_ACCESS_STATE_TRANSITIONING: 809 break; 810 default: 811 sdev_printk(KERN_INFO, sdev, 812 "%s: stpg failed, unhandled TPGS state %d", 813 ALUA_DH_NAME, pg->state); 814 return SCSI_DH_NOSYS; 815 } 816 retval = submit_stpg(sdev, pg->group_id, &sense_hdr); 817 818 if (retval) { 819 if (retval < 0 || !scsi_sense_valid(&sense_hdr)) { 820 sdev_printk(KERN_INFO, sdev, 821 "%s: stpg failed, result %d", 822 ALUA_DH_NAME, retval); 823 if (retval < 0) 824 return SCSI_DH_DEV_TEMP_BUSY; 825 } else { 826 sdev_printk(KERN_INFO, sdev, "%s: stpg failed\n", 827 ALUA_DH_NAME); 828 scsi_print_sense_hdr(sdev, ALUA_DH_NAME, &sense_hdr); 829 } 830 } 831 /* Retry RTPG */ 832 return SCSI_DH_RETRY; 833 } 834 835 /* 836 * The caller must call scsi_device_put() on the returned pointer if it is not 837 * NULL. 838 */ 839 static struct scsi_device * __must_check 840 alua_rtpg_select_sdev(struct alua_port_group *pg) 841 { 842 struct alua_dh_data *h; 843 struct scsi_device *sdev = NULL, *prev_sdev; 844 845 lockdep_assert_held(&pg->lock); 846 if (WARN_ON(!pg->rtpg_sdev)) 847 return NULL; 848 849 /* 850 * RCU protection isn't necessary for dh_list here 851 * as we hold pg->lock, but for access to h->pg. 852 */ 853 rcu_read_lock(); 854 list_for_each_entry_rcu(h, &pg->dh_list, node) { 855 if (!h->sdev) 856 continue; 857 if (h->sdev == pg->rtpg_sdev) { 858 h->disabled = true; 859 continue; 860 } 861 if (rcu_dereference(h->pg) == pg && 862 !h->disabled && 863 !scsi_device_get(h->sdev)) { 864 sdev = h->sdev; 865 break; 866 } 867 } 868 rcu_read_unlock(); 869 870 if (!sdev) { 871 pr_warn("%s: no device found for rtpg\n", 872 (pg->device_id_len ? 873 (char *)pg->device_id_str : "(nameless PG)")); 874 return NULL; 875 } 876 877 sdev_printk(KERN_INFO, sdev, "rtpg retry on different device\n"); 878 879 prev_sdev = pg->rtpg_sdev; 880 pg->rtpg_sdev = sdev; 881 882 return prev_sdev; 883 } 884 885 static void alua_rtpg_work(struct work_struct *work) 886 { 887 struct alua_port_group *pg = 888 container_of(work, struct alua_port_group, rtpg_work.work); 889 struct scsi_device *sdev, *prev_sdev = NULL; 890 LIST_HEAD(qdata_list); 891 int err = SCSI_DH_OK; 892 struct alua_queue_data *qdata, *tmp; 893 struct alua_dh_data *h; 894 unsigned long flags; 895 896 spin_lock_irqsave(&pg->lock, flags); 897 sdev = pg->rtpg_sdev; 898 if (!sdev) { 899 WARN_ON(pg->flags & ALUA_PG_RUN_RTPG); 900 WARN_ON(pg->flags & ALUA_PG_RUN_STPG); 901 spin_unlock_irqrestore(&pg->lock, flags); 902 kref_put(&pg->kref, release_port_group); 903 return; 904 } 905 pg->flags |= ALUA_PG_RUNNING; 906 if (pg->flags & ALUA_PG_RUN_RTPG) { 907 int state = pg->state; 908 909 pg->flags &= ~ALUA_PG_RUN_RTPG; 910 spin_unlock_irqrestore(&pg->lock, flags); 911 if (state == SCSI_ACCESS_STATE_TRANSITIONING) { 912 if (alua_tur(sdev) == SCSI_DH_RETRY) { 913 spin_lock_irqsave(&pg->lock, flags); 914 pg->flags &= ~ALUA_PG_RUNNING; 915 pg->flags |= ALUA_PG_RUN_RTPG; 916 if (!pg->interval) 917 pg->interval = ALUA_RTPG_RETRY_DELAY; 918 spin_unlock_irqrestore(&pg->lock, flags); 919 queue_delayed_work(kaluad_wq, &pg->rtpg_work, 920 pg->interval * HZ); 921 return; 922 } 923 /* Send RTPG on failure or if TUR indicates SUCCESS */ 924 } 925 err = alua_rtpg(sdev, pg); 926 spin_lock_irqsave(&pg->lock, flags); 927 928 /* If RTPG failed on the current device, try using another */ 929 if (err == SCSI_DH_RES_TEMP_UNAVAIL && 930 (prev_sdev = alua_rtpg_select_sdev(pg))) 931 err = SCSI_DH_IMM_RETRY; 932 933 if (err == SCSI_DH_RETRY || err == SCSI_DH_IMM_RETRY || 934 pg->flags & ALUA_PG_RUN_RTPG) { 935 pg->flags &= ~ALUA_PG_RUNNING; 936 if (err == SCSI_DH_IMM_RETRY) 937 pg->interval = 0; 938 else if (!pg->interval && !(pg->flags & ALUA_PG_RUN_RTPG)) 939 pg->interval = ALUA_RTPG_RETRY_DELAY; 940 pg->flags |= ALUA_PG_RUN_RTPG; 941 spin_unlock_irqrestore(&pg->lock, flags); 942 goto queue_rtpg; 943 } 944 if (err != SCSI_DH_OK) 945 pg->flags &= ~ALUA_PG_RUN_STPG; 946 } 947 if (pg->flags & ALUA_PG_RUN_STPG) { 948 pg->flags &= ~ALUA_PG_RUN_STPG; 949 spin_unlock_irqrestore(&pg->lock, flags); 950 err = alua_stpg(sdev, pg); 951 spin_lock_irqsave(&pg->lock, flags); 952 if (err == SCSI_DH_RETRY || pg->flags & ALUA_PG_RUN_RTPG) { 953 pg->flags |= ALUA_PG_RUN_RTPG; 954 pg->interval = 0; 955 pg->flags &= ~ALUA_PG_RUNNING; 956 spin_unlock_irqrestore(&pg->lock, flags); 957 goto queue_rtpg; 958 } 959 } 960 961 list_splice_init(&pg->rtpg_list, &qdata_list); 962 /* 963 * We went through an RTPG, for good or bad. 964 * Re-enable all devices for the next attempt. 965 */ 966 list_for_each_entry(h, &pg->dh_list, node) 967 h->disabled = false; 968 pg->rtpg_sdev = NULL; 969 spin_unlock_irqrestore(&pg->lock, flags); 970 971 if (prev_sdev) 972 scsi_device_put(prev_sdev); 973 974 list_for_each_entry_safe(qdata, tmp, &qdata_list, entry) { 975 list_del(&qdata->entry); 976 if (qdata->callback_fn) 977 qdata->callback_fn(qdata->callback_data, err); 978 kfree(qdata); 979 } 980 spin_lock_irqsave(&pg->lock, flags); 981 pg->flags &= ~ALUA_PG_RUNNING; 982 spin_unlock_irqrestore(&pg->lock, flags); 983 scsi_device_put(sdev); 984 kref_put(&pg->kref, release_port_group); 985 return; 986 987 queue_rtpg: 988 if (prev_sdev) 989 scsi_device_put(prev_sdev); 990 queue_delayed_work(kaluad_wq, &pg->rtpg_work, pg->interval * HZ); 991 } 992 993 /** 994 * alua_rtpg_queue() - cause RTPG to be submitted asynchronously 995 * @pg: ALUA port group associated with @sdev. 996 * @sdev: SCSI device for which to submit an RTPG. 997 * @qdata: Information about the callback to invoke after the RTPG. 998 * @force: Whether or not to submit an RTPG if a work item that will submit an 999 * RTPG already has been scheduled. 1000 * 1001 * Returns true if and only if alua_rtpg_work() will be called asynchronously. 1002 * That function is responsible for calling @qdata->fn(). 1003 * 1004 * Context: may be called from atomic context (alua_check()) only if the caller 1005 * holds an sdev reference. 1006 */ 1007 static bool alua_rtpg_queue(struct alua_port_group *pg, 1008 struct scsi_device *sdev, 1009 struct alua_queue_data *qdata, bool force) 1010 { 1011 int start_queue = 0; 1012 unsigned long flags; 1013 1014 if (WARN_ON_ONCE(!pg) || scsi_device_get(sdev)) 1015 return false; 1016 1017 spin_lock_irqsave(&pg->lock, flags); 1018 if (qdata) { 1019 list_add_tail(&qdata->entry, &pg->rtpg_list); 1020 pg->flags |= ALUA_PG_RUN_STPG; 1021 force = true; 1022 } 1023 if (pg->rtpg_sdev == NULL) { 1024 struct alua_dh_data *h = sdev->handler_data; 1025 1026 rcu_read_lock(); 1027 if (h && rcu_dereference(h->pg) == pg) { 1028 pg->interval = 0; 1029 pg->flags |= ALUA_PG_RUN_RTPG; 1030 kref_get(&pg->kref); 1031 pg->rtpg_sdev = sdev; 1032 start_queue = 1; 1033 } 1034 rcu_read_unlock(); 1035 } else if (!(pg->flags & ALUA_PG_RUN_RTPG) && force) { 1036 pg->flags |= ALUA_PG_RUN_RTPG; 1037 /* Do not queue if the worker is already running */ 1038 if (!(pg->flags & ALUA_PG_RUNNING)) { 1039 kref_get(&pg->kref); 1040 start_queue = 1; 1041 } 1042 } 1043 1044 spin_unlock_irqrestore(&pg->lock, flags); 1045 1046 if (start_queue) { 1047 if (queue_delayed_work(kaluad_wq, &pg->rtpg_work, 1048 msecs_to_jiffies(ALUA_RTPG_DELAY_MSECS))) 1049 sdev = NULL; 1050 else 1051 kref_put(&pg->kref, release_port_group); 1052 } 1053 if (sdev) 1054 scsi_device_put(sdev); 1055 1056 return true; 1057 } 1058 1059 /* 1060 * alua_initialize - Initialize ALUA state 1061 * @sdev: the device to be initialized 1062 * 1063 * For the prep_fn to work correctly we have 1064 * to initialize the ALUA state for the device. 1065 */ 1066 static int alua_initialize(struct scsi_device *sdev, struct alua_dh_data *h) 1067 { 1068 int err = SCSI_DH_DEV_UNSUPP, tpgs; 1069 1070 mutex_lock(&h->init_mutex); 1071 h->disabled = false; 1072 tpgs = alua_check_tpgs(sdev); 1073 if (tpgs != TPGS_MODE_NONE) 1074 err = alua_check_vpd(sdev, h, tpgs); 1075 h->init_error = err; 1076 mutex_unlock(&h->init_mutex); 1077 return err; 1078 } 1079 /* 1080 * alua_set_params - set/unset the optimize flag 1081 * @sdev: device on the path to be activated 1082 * params - parameters in the following format 1083 * "no_of_params\0param1\0param2\0param3\0...\0" 1084 * For example, to set the flag pass the following parameters 1085 * from multipath.conf 1086 * hardware_handler "2 alua 1" 1087 */ 1088 static int alua_set_params(struct scsi_device *sdev, const char *params) 1089 { 1090 struct alua_dh_data *h = sdev->handler_data; 1091 struct alua_port_group *pg = NULL; 1092 unsigned int optimize = 0, argc; 1093 const char *p = params; 1094 int result = SCSI_DH_OK; 1095 unsigned long flags; 1096 1097 if ((sscanf(params, "%u", &argc) != 1) || (argc != 1)) 1098 return -EINVAL; 1099 1100 while (*p++) 1101 ; 1102 if ((sscanf(p, "%u", &optimize) != 1) || (optimize > 1)) 1103 return -EINVAL; 1104 1105 rcu_read_lock(); 1106 pg = rcu_dereference(h->pg); 1107 if (!pg) { 1108 rcu_read_unlock(); 1109 return -ENXIO; 1110 } 1111 spin_lock_irqsave(&pg->lock, flags); 1112 if (optimize) 1113 pg->flags |= ALUA_OPTIMIZE_STPG; 1114 else 1115 pg->flags &= ~ALUA_OPTIMIZE_STPG; 1116 spin_unlock_irqrestore(&pg->lock, flags); 1117 rcu_read_unlock(); 1118 1119 return result; 1120 } 1121 1122 /* 1123 * alua_activate - activate a path 1124 * @sdev: device on the path to be activated 1125 * 1126 * We're currently switching the port group to be activated only and 1127 * let the array figure out the rest. 1128 * There may be other arrays which require us to switch all port groups 1129 * based on a certain policy. But until we actually encounter them it 1130 * should be okay. 1131 */ 1132 static int alua_activate(struct scsi_device *sdev, 1133 activate_complete fn, void *data) 1134 { 1135 struct alua_dh_data *h = sdev->handler_data; 1136 int err = SCSI_DH_OK; 1137 struct alua_queue_data *qdata; 1138 struct alua_port_group *pg; 1139 1140 qdata = kzalloc(sizeof(*qdata), GFP_KERNEL); 1141 if (!qdata) { 1142 err = SCSI_DH_RES_TEMP_UNAVAIL; 1143 goto out; 1144 } 1145 qdata->callback_fn = fn; 1146 qdata->callback_data = data; 1147 1148 mutex_lock(&h->init_mutex); 1149 rcu_read_lock(); 1150 pg = rcu_dereference(h->pg); 1151 if (!pg || !kref_get_unless_zero(&pg->kref)) { 1152 rcu_read_unlock(); 1153 kfree(qdata); 1154 err = h->init_error; 1155 mutex_unlock(&h->init_mutex); 1156 goto out; 1157 } 1158 rcu_read_unlock(); 1159 mutex_unlock(&h->init_mutex); 1160 1161 if (alua_rtpg_queue(pg, sdev, qdata, true)) { 1162 fn = NULL; 1163 } else { 1164 kfree(qdata); 1165 err = SCSI_DH_DEV_OFFLINED; 1166 } 1167 kref_put(&pg->kref, release_port_group); 1168 out: 1169 if (fn) 1170 fn(data, err); 1171 return 0; 1172 } 1173 1174 /* 1175 * alua_check - check path status 1176 * @sdev: device on the path to be checked 1177 * 1178 * Check the device status 1179 */ 1180 static void alua_check(struct scsi_device *sdev, bool force) 1181 { 1182 struct alua_dh_data *h = sdev->handler_data; 1183 struct alua_port_group *pg; 1184 1185 rcu_read_lock(); 1186 pg = rcu_dereference(h->pg); 1187 if (!pg || !kref_get_unless_zero(&pg->kref)) { 1188 rcu_read_unlock(); 1189 return; 1190 } 1191 rcu_read_unlock(); 1192 alua_rtpg_queue(pg, sdev, NULL, force); 1193 kref_put(&pg->kref, release_port_group); 1194 } 1195 1196 /* 1197 * alua_prep_fn - request callback 1198 * 1199 * Fail I/O to all paths not in state 1200 * active/optimized or active/non-optimized. 1201 */ 1202 static blk_status_t alua_prep_fn(struct scsi_device *sdev, struct request *req) 1203 { 1204 struct alua_dh_data *h = sdev->handler_data; 1205 struct alua_port_group *pg; 1206 unsigned char state = SCSI_ACCESS_STATE_OPTIMAL; 1207 1208 rcu_read_lock(); 1209 pg = rcu_dereference(h->pg); 1210 if (pg) 1211 state = pg->state; 1212 rcu_read_unlock(); 1213 1214 switch (state) { 1215 case SCSI_ACCESS_STATE_OPTIMAL: 1216 case SCSI_ACCESS_STATE_ACTIVE: 1217 case SCSI_ACCESS_STATE_LBA: 1218 case SCSI_ACCESS_STATE_TRANSITIONING: 1219 return BLK_STS_OK; 1220 default: 1221 req->rq_flags |= RQF_QUIET; 1222 return BLK_STS_IOERR; 1223 } 1224 } 1225 1226 static void alua_rescan(struct scsi_device *sdev) 1227 { 1228 struct alua_dh_data *h = sdev->handler_data; 1229 1230 alua_initialize(sdev, h); 1231 } 1232 1233 /* 1234 * alua_bus_attach - Attach device handler 1235 * @sdev: device to be attached to 1236 */ 1237 static int alua_bus_attach(struct scsi_device *sdev) 1238 { 1239 struct alua_dh_data *h; 1240 int err; 1241 1242 h = kzalloc(sizeof(*h) , GFP_KERNEL); 1243 if (!h) 1244 return SCSI_DH_NOMEM; 1245 spin_lock_init(&h->pg_lock); 1246 rcu_assign_pointer(h->pg, NULL); 1247 h->init_error = SCSI_DH_OK; 1248 h->sdev = sdev; 1249 INIT_LIST_HEAD(&h->node); 1250 1251 mutex_init(&h->init_mutex); 1252 err = alua_initialize(sdev, h); 1253 if (err != SCSI_DH_OK && err != SCSI_DH_DEV_OFFLINED) 1254 goto failed; 1255 1256 sdev->handler_data = h; 1257 return SCSI_DH_OK; 1258 failed: 1259 kfree(h); 1260 return err; 1261 } 1262 1263 /* 1264 * alua_bus_detach - Detach device handler 1265 * @sdev: device to be detached from 1266 */ 1267 static void alua_bus_detach(struct scsi_device *sdev) 1268 { 1269 struct alua_dh_data *h = sdev->handler_data; 1270 struct alua_port_group *pg; 1271 1272 spin_lock(&h->pg_lock); 1273 pg = rcu_dereference_protected(h->pg, lockdep_is_held(&h->pg_lock)); 1274 rcu_assign_pointer(h->pg, NULL); 1275 spin_unlock(&h->pg_lock); 1276 if (pg) { 1277 spin_lock_irq(&pg->lock); 1278 list_del_rcu(&h->node); 1279 spin_unlock_irq(&pg->lock); 1280 kref_put(&pg->kref, release_port_group); 1281 } 1282 sdev->handler_data = NULL; 1283 synchronize_rcu(); 1284 kfree(h); 1285 } 1286 1287 static struct scsi_device_handler alua_dh = { 1288 .name = ALUA_DH_NAME, 1289 .module = THIS_MODULE, 1290 .attach = alua_bus_attach, 1291 .detach = alua_bus_detach, 1292 .prep_fn = alua_prep_fn, 1293 .check_sense = alua_check_sense, 1294 .activate = alua_activate, 1295 .rescan = alua_rescan, 1296 .set_params = alua_set_params, 1297 }; 1298 1299 static int __init alua_init(void) 1300 { 1301 int r; 1302 1303 kaluad_wq = alloc_workqueue("kaluad", WQ_MEM_RECLAIM, 0); 1304 if (!kaluad_wq) 1305 return -ENOMEM; 1306 1307 r = scsi_register_device_handler(&alua_dh); 1308 if (r != 0) { 1309 printk(KERN_ERR "%s: Failed to register scsi device handler", 1310 ALUA_DH_NAME); 1311 destroy_workqueue(kaluad_wq); 1312 } 1313 return r; 1314 } 1315 1316 static void __exit alua_exit(void) 1317 { 1318 scsi_unregister_device_handler(&alua_dh); 1319 destroy_workqueue(kaluad_wq); 1320 } 1321 1322 module_init(alua_init); 1323 module_exit(alua_exit); 1324 1325 MODULE_DESCRIPTION("DM Multipath ALUA support"); 1326 MODULE_AUTHOR("Hannes Reinecke <hare@suse.de>"); 1327 MODULE_LICENSE("GPL"); 1328 MODULE_VERSION(ALUA_DH_VER); 1329