1 /******************************************************************************* 2 * Filename: target_core_alua.c 3 * 4 * This file contains SPC-3 compliant asymmetric logical unit assigntment (ALUA) 5 * 6 * (c) Copyright 2009-2013 Datera, Inc. 7 * 8 * Nicholas A. Bellinger <nab@kernel.org> 9 * 10 * This program is free software; you can redistribute it and/or modify 11 * it under the terms of the GNU General Public License as published by 12 * the Free Software Foundation; either version 2 of the License, or 13 * (at your option) any later version. 14 * 15 * This program is distributed in the hope that it will be useful, 16 * but WITHOUT ANY WARRANTY; without even the implied warranty of 17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 18 * GNU General Public License for more details. 19 * 20 * You should have received a copy of the GNU General Public License 21 * along with this program; if not, write to the Free Software 22 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. 23 * 24 ******************************************************************************/ 25 26 #include <linux/slab.h> 27 #include <linux/spinlock.h> 28 #include <linux/configfs.h> 29 #include <linux/export.h> 30 #include <linux/file.h> 31 #include <scsi/scsi.h> 32 #include <scsi/scsi_cmnd.h> 33 #include <asm/unaligned.h> 34 35 #include <target/target_core_base.h> 36 #include <target/target_core_backend.h> 37 #include <target/target_core_fabric.h> 38 #include <target/target_core_configfs.h> 39 40 #include "target_core_internal.h" 41 #include "target_core_alua.h" 42 #include "target_core_ua.h" 43 44 static sense_reason_t core_alua_check_transition(int state, int *primary); 45 static int core_alua_set_tg_pt_secondary_state( 46 struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem, 47 struct se_port *port, int explicit, int offline); 48 49 static u16 alua_lu_gps_counter; 50 static u32 alua_lu_gps_count; 51 52 static DEFINE_SPINLOCK(lu_gps_lock); 53 static LIST_HEAD(lu_gps_list); 54 55 struct t10_alua_lu_gp *default_lu_gp; 56 57 /* 58 * REPORT_TARGET_PORT_GROUPS 59 * 60 * See spc4r17 section 6.27 61 */ 62 sense_reason_t 63 target_emulate_report_target_port_groups(struct se_cmd *cmd) 64 { 65 struct se_device *dev = cmd->se_dev; 66 struct se_port *port; 67 struct t10_alua_tg_pt_gp *tg_pt_gp; 68 struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem; 69 unsigned char *buf; 70 u32 rd_len = 0, off; 71 int ext_hdr = (cmd->t_task_cdb[1] & 0x20); 72 73 /* 74 * Skip over RESERVED area to first Target port group descriptor 75 * depending on the PARAMETER DATA FORMAT type.. 76 */ 77 if (ext_hdr != 0) 78 off = 8; 79 else 80 off = 4; 81 82 if (cmd->data_length < off) { 83 pr_warn("REPORT TARGET PORT GROUPS allocation length %u too" 84 " small for %s header\n", cmd->data_length, 85 (ext_hdr) ? "extended" : "normal"); 86 return TCM_INVALID_CDB_FIELD; 87 } 88 buf = transport_kmap_data_sg(cmd); 89 if (!buf) 90 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 91 92 spin_lock(&dev->t10_alua.tg_pt_gps_lock); 93 list_for_each_entry(tg_pt_gp, &dev->t10_alua.tg_pt_gps_list, 94 tg_pt_gp_list) { 95 /* 96 * Check if the Target port group and Target port descriptor list 97 * based on tg_pt_gp_members count will fit into the response payload. 98 * Otherwise, bump rd_len to let the initiator know we have exceeded 99 * the allocation length and the response is truncated. 100 */ 101 if ((off + 8 + (tg_pt_gp->tg_pt_gp_members * 4)) > 102 cmd->data_length) { 103 rd_len += 8 + (tg_pt_gp->tg_pt_gp_members * 4); 104 continue; 105 } 106 /* 107 * PREF: Preferred target port bit, determine if this 108 * bit should be set for port group. 109 */ 110 if (tg_pt_gp->tg_pt_gp_pref) 111 buf[off] = 0x80; 112 /* 113 * Set the ASYMMETRIC ACCESS State 114 */ 115 buf[off++] |= (atomic_read( 116 &tg_pt_gp->tg_pt_gp_alua_access_state) & 0xff); 117 /* 118 * Set supported ASYMMETRIC ACCESS State bits 119 */ 120 buf[off++] |= tg_pt_gp->tg_pt_gp_alua_supported_states; 121 /* 122 * TARGET PORT GROUP 123 */ 124 buf[off++] = ((tg_pt_gp->tg_pt_gp_id >> 8) & 0xff); 125 buf[off++] = (tg_pt_gp->tg_pt_gp_id & 0xff); 126 127 off++; /* Skip over Reserved */ 128 /* 129 * STATUS CODE 130 */ 131 buf[off++] = (tg_pt_gp->tg_pt_gp_alua_access_status & 0xff); 132 /* 133 * Vendor Specific field 134 */ 135 buf[off++] = 0x00; 136 /* 137 * TARGET PORT COUNT 138 */ 139 buf[off++] = (tg_pt_gp->tg_pt_gp_members & 0xff); 140 rd_len += 8; 141 142 spin_lock(&tg_pt_gp->tg_pt_gp_lock); 143 list_for_each_entry(tg_pt_gp_mem, &tg_pt_gp->tg_pt_gp_mem_list, 144 tg_pt_gp_mem_list) { 145 port = tg_pt_gp_mem->tg_pt; 146 /* 147 * Start Target Port descriptor format 148 * 149 * See spc4r17 section 6.2.7 Table 247 150 */ 151 off += 2; /* Skip over Obsolete */ 152 /* 153 * Set RELATIVE TARGET PORT IDENTIFIER 154 */ 155 buf[off++] = ((port->sep_rtpi >> 8) & 0xff); 156 buf[off++] = (port->sep_rtpi & 0xff); 157 rd_len += 4; 158 } 159 spin_unlock(&tg_pt_gp->tg_pt_gp_lock); 160 } 161 spin_unlock(&dev->t10_alua.tg_pt_gps_lock); 162 /* 163 * Set the RETURN DATA LENGTH set in the header of the DataIN Payload 164 */ 165 put_unaligned_be32(rd_len, &buf[0]); 166 167 /* 168 * Fill in the Extended header parameter data format if requested 169 */ 170 if (ext_hdr != 0) { 171 buf[4] = 0x10; 172 /* 173 * Set the implicit transition time (in seconds) for the application 174 * client to use as a base for it's transition timeout value. 175 * 176 * Use the current tg_pt_gp_mem -> tg_pt_gp membership from the LUN 177 * this CDB was received upon to determine this value individually 178 * for ALUA target port group. 179 */ 180 port = cmd->se_lun->lun_sep; 181 tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem; 182 if (tg_pt_gp_mem) { 183 spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); 184 tg_pt_gp = tg_pt_gp_mem->tg_pt_gp; 185 if (tg_pt_gp) 186 buf[5] = tg_pt_gp->tg_pt_gp_implicit_trans_secs; 187 spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); 188 } 189 } 190 transport_kunmap_data_sg(cmd); 191 192 target_complete_cmd(cmd, GOOD); 193 return 0; 194 } 195 196 /* 197 * SET_TARGET_PORT_GROUPS for explicit ALUA operation. 198 * 199 * See spc4r17 section 6.35 200 */ 201 sense_reason_t 202 target_emulate_set_target_port_groups(struct se_cmd *cmd) 203 { 204 struct se_device *dev = cmd->se_dev; 205 struct se_port *port, *l_port = cmd->se_lun->lun_sep; 206 struct se_node_acl *nacl = cmd->se_sess->se_node_acl; 207 struct t10_alua_tg_pt_gp *tg_pt_gp = NULL, *l_tg_pt_gp; 208 struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem, *l_tg_pt_gp_mem; 209 unsigned char *buf; 210 unsigned char *ptr; 211 sense_reason_t rc = TCM_NO_SENSE; 212 u32 len = 4; /* Skip over RESERVED area in header */ 213 int alua_access_state, primary = 0; 214 u16 tg_pt_id, rtpi; 215 216 if (!l_port) 217 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 218 219 if (cmd->data_length < 4) { 220 pr_warn("SET TARGET PORT GROUPS parameter list length %u too" 221 " small\n", cmd->data_length); 222 return TCM_INVALID_PARAMETER_LIST; 223 } 224 225 buf = transport_kmap_data_sg(cmd); 226 if (!buf) 227 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 228 229 /* 230 * Determine if explicit ALUA via SET_TARGET_PORT_GROUPS is allowed 231 * for the local tg_pt_gp. 232 */ 233 l_tg_pt_gp_mem = l_port->sep_alua_tg_pt_gp_mem; 234 if (!l_tg_pt_gp_mem) { 235 pr_err("Unable to access l_port->sep_alua_tg_pt_gp_mem\n"); 236 rc = TCM_UNSUPPORTED_SCSI_OPCODE; 237 goto out; 238 } 239 spin_lock(&l_tg_pt_gp_mem->tg_pt_gp_mem_lock); 240 l_tg_pt_gp = l_tg_pt_gp_mem->tg_pt_gp; 241 if (!l_tg_pt_gp) { 242 spin_unlock(&l_tg_pt_gp_mem->tg_pt_gp_mem_lock); 243 pr_err("Unable to access *l_tg_pt_gp_mem->tg_pt_gp\n"); 244 rc = TCM_UNSUPPORTED_SCSI_OPCODE; 245 goto out; 246 } 247 spin_unlock(&l_tg_pt_gp_mem->tg_pt_gp_mem_lock); 248 249 if (!(l_tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_EXPLICIT_ALUA)) { 250 pr_debug("Unable to process SET_TARGET_PORT_GROUPS" 251 " while TPGS_EXPLICIT_ALUA is disabled\n"); 252 rc = TCM_UNSUPPORTED_SCSI_OPCODE; 253 goto out; 254 } 255 256 ptr = &buf[4]; /* Skip over RESERVED area in header */ 257 258 while (len < cmd->data_length) { 259 bool found = false; 260 alua_access_state = (ptr[0] & 0x0f); 261 /* 262 * Check the received ALUA access state, and determine if 263 * the state is a primary or secondary target port asymmetric 264 * access state. 265 */ 266 rc = core_alua_check_transition(alua_access_state, &primary); 267 if (rc) { 268 /* 269 * If the SET TARGET PORT GROUPS attempts to establish 270 * an invalid combination of target port asymmetric 271 * access states or attempts to establish an 272 * unsupported target port asymmetric access state, 273 * then the command shall be terminated with CHECK 274 * CONDITION status, with the sense key set to ILLEGAL 275 * REQUEST, and the additional sense code set to INVALID 276 * FIELD IN PARAMETER LIST. 277 */ 278 goto out; 279 } 280 281 /* 282 * If the ASYMMETRIC ACCESS STATE field (see table 267) 283 * specifies a primary target port asymmetric access state, 284 * then the TARGET PORT GROUP OR TARGET PORT field specifies 285 * a primary target port group for which the primary target 286 * port asymmetric access state shall be changed. If the 287 * ASYMMETRIC ACCESS STATE field specifies a secondary target 288 * port asymmetric access state, then the TARGET PORT GROUP OR 289 * TARGET PORT field specifies the relative target port 290 * identifier (see 3.1.120) of the target port for which the 291 * secondary target port asymmetric access state shall be 292 * changed. 293 */ 294 if (primary) { 295 tg_pt_id = get_unaligned_be16(ptr + 2); 296 /* 297 * Locate the matching target port group ID from 298 * the global tg_pt_gp list 299 */ 300 spin_lock(&dev->t10_alua.tg_pt_gps_lock); 301 list_for_each_entry(tg_pt_gp, 302 &dev->t10_alua.tg_pt_gps_list, 303 tg_pt_gp_list) { 304 if (!tg_pt_gp->tg_pt_gp_valid_id) 305 continue; 306 307 if (tg_pt_id != tg_pt_gp->tg_pt_gp_id) 308 continue; 309 310 atomic_inc(&tg_pt_gp->tg_pt_gp_ref_cnt); 311 smp_mb__after_atomic_inc(); 312 313 spin_unlock(&dev->t10_alua.tg_pt_gps_lock); 314 315 if (!core_alua_do_port_transition(tg_pt_gp, 316 dev, l_port, nacl, 317 alua_access_state, 1)) 318 found = true; 319 320 spin_lock(&dev->t10_alua.tg_pt_gps_lock); 321 atomic_dec(&tg_pt_gp->tg_pt_gp_ref_cnt); 322 smp_mb__after_atomic_dec(); 323 break; 324 } 325 spin_unlock(&dev->t10_alua.tg_pt_gps_lock); 326 } else { 327 /* 328 * Extract the RELATIVE TARGET PORT IDENTIFIER to identify 329 * the Target Port in question for the the incoming 330 * SET_TARGET_PORT_GROUPS op. 331 */ 332 rtpi = get_unaligned_be16(ptr + 2); 333 /* 334 * Locate the matching relative target port identifier 335 * for the struct se_device storage object. 336 */ 337 spin_lock(&dev->se_port_lock); 338 list_for_each_entry(port, &dev->dev_sep_list, 339 sep_list) { 340 if (port->sep_rtpi != rtpi) 341 continue; 342 343 tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem; 344 345 spin_unlock(&dev->se_port_lock); 346 347 if (!core_alua_set_tg_pt_secondary_state( 348 tg_pt_gp_mem, port, 1, 1)) 349 found = true; 350 351 spin_lock(&dev->se_port_lock); 352 break; 353 } 354 spin_unlock(&dev->se_port_lock); 355 } 356 357 if (!found) { 358 rc = TCM_INVALID_PARAMETER_LIST; 359 goto out; 360 } 361 362 ptr += 4; 363 len += 4; 364 } 365 366 out: 367 transport_kunmap_data_sg(cmd); 368 if (!rc) 369 target_complete_cmd(cmd, GOOD); 370 return rc; 371 } 372 373 static inline int core_alua_state_nonoptimized( 374 struct se_cmd *cmd, 375 unsigned char *cdb, 376 int nonop_delay_msecs, 377 u8 *alua_ascq) 378 { 379 /* 380 * Set SCF_ALUA_NON_OPTIMIZED here, this value will be checked 381 * later to determine if processing of this cmd needs to be 382 * temporarily delayed for the Active/NonOptimized primary access state. 383 */ 384 cmd->se_cmd_flags |= SCF_ALUA_NON_OPTIMIZED; 385 cmd->alua_nonop_delay = nonop_delay_msecs; 386 return 0; 387 } 388 389 static inline int core_alua_state_standby( 390 struct se_cmd *cmd, 391 unsigned char *cdb, 392 u8 *alua_ascq) 393 { 394 /* 395 * Allowed CDBs for ALUA_ACCESS_STATE_STANDBY as defined by 396 * spc4r17 section 5.9.2.4.4 397 */ 398 switch (cdb[0]) { 399 case INQUIRY: 400 case LOG_SELECT: 401 case LOG_SENSE: 402 case MODE_SELECT: 403 case MODE_SENSE: 404 case REPORT_LUNS: 405 case RECEIVE_DIAGNOSTIC: 406 case SEND_DIAGNOSTIC: 407 return 0; 408 case MAINTENANCE_IN: 409 switch (cdb[1] & 0x1f) { 410 case MI_REPORT_TARGET_PGS: 411 return 0; 412 default: 413 *alua_ascq = ASCQ_04H_ALUA_TG_PT_STANDBY; 414 return 1; 415 } 416 case MAINTENANCE_OUT: 417 switch (cdb[1]) { 418 case MO_SET_TARGET_PGS: 419 return 0; 420 default: 421 *alua_ascq = ASCQ_04H_ALUA_TG_PT_STANDBY; 422 return 1; 423 } 424 case REQUEST_SENSE: 425 case PERSISTENT_RESERVE_IN: 426 case PERSISTENT_RESERVE_OUT: 427 case READ_BUFFER: 428 case WRITE_BUFFER: 429 return 0; 430 default: 431 *alua_ascq = ASCQ_04H_ALUA_TG_PT_STANDBY; 432 return 1; 433 } 434 435 return 0; 436 } 437 438 static inline int core_alua_state_unavailable( 439 struct se_cmd *cmd, 440 unsigned char *cdb, 441 u8 *alua_ascq) 442 { 443 /* 444 * Allowed CDBs for ALUA_ACCESS_STATE_UNAVAILABLE as defined by 445 * spc4r17 section 5.9.2.4.5 446 */ 447 switch (cdb[0]) { 448 case INQUIRY: 449 case REPORT_LUNS: 450 return 0; 451 case MAINTENANCE_IN: 452 switch (cdb[1] & 0x1f) { 453 case MI_REPORT_TARGET_PGS: 454 return 0; 455 default: 456 *alua_ascq = ASCQ_04H_ALUA_TG_PT_UNAVAILABLE; 457 return 1; 458 } 459 case MAINTENANCE_OUT: 460 switch (cdb[1]) { 461 case MO_SET_TARGET_PGS: 462 return 0; 463 default: 464 *alua_ascq = ASCQ_04H_ALUA_TG_PT_UNAVAILABLE; 465 return 1; 466 } 467 case REQUEST_SENSE: 468 case READ_BUFFER: 469 case WRITE_BUFFER: 470 return 0; 471 default: 472 *alua_ascq = ASCQ_04H_ALUA_TG_PT_UNAVAILABLE; 473 return 1; 474 } 475 476 return 0; 477 } 478 479 static inline int core_alua_state_transition( 480 struct se_cmd *cmd, 481 unsigned char *cdb, 482 u8 *alua_ascq) 483 { 484 /* 485 * Allowed CDBs for ALUA_ACCESS_STATE_TRANSITION as defined by 486 * spc4r17 section 5.9.2.5 487 */ 488 switch (cdb[0]) { 489 case INQUIRY: 490 case REPORT_LUNS: 491 return 0; 492 case MAINTENANCE_IN: 493 switch (cdb[1] & 0x1f) { 494 case MI_REPORT_TARGET_PGS: 495 return 0; 496 default: 497 *alua_ascq = ASCQ_04H_ALUA_STATE_TRANSITION; 498 return 1; 499 } 500 case REQUEST_SENSE: 501 case READ_BUFFER: 502 case WRITE_BUFFER: 503 return 0; 504 default: 505 *alua_ascq = ASCQ_04H_ALUA_STATE_TRANSITION; 506 return 1; 507 } 508 509 return 0; 510 } 511 512 /* 513 * return 1: Is used to signal LUN not accessible, and check condition/not ready 514 * return 0: Used to signal success 515 * return -1: Used to signal failure, and invalid cdb field 516 */ 517 sense_reason_t 518 target_alua_state_check(struct se_cmd *cmd) 519 { 520 struct se_device *dev = cmd->se_dev; 521 unsigned char *cdb = cmd->t_task_cdb; 522 struct se_lun *lun = cmd->se_lun; 523 struct se_port *port = lun->lun_sep; 524 struct t10_alua_tg_pt_gp *tg_pt_gp; 525 struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem; 526 int out_alua_state, nonop_delay_msecs; 527 u8 alua_ascq; 528 int ret; 529 530 if (dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE) 531 return 0; 532 if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) 533 return 0; 534 535 if (!port) 536 return 0; 537 /* 538 * First, check for a struct se_port specific secondary ALUA target port 539 * access state: OFFLINE 540 */ 541 if (atomic_read(&port->sep_tg_pt_secondary_offline)) { 542 pr_debug("ALUA: Got secondary offline status for local" 543 " target port\n"); 544 alua_ascq = ASCQ_04H_ALUA_OFFLINE; 545 ret = 1; 546 goto out; 547 } 548 /* 549 * Second, obtain the struct t10_alua_tg_pt_gp_member pointer to the 550 * ALUA target port group, to obtain current ALUA access state. 551 * Otherwise look for the underlying struct se_device association with 552 * a ALUA logical unit group. 553 */ 554 tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem; 555 if (!tg_pt_gp_mem) 556 return 0; 557 558 spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); 559 tg_pt_gp = tg_pt_gp_mem->tg_pt_gp; 560 out_alua_state = atomic_read(&tg_pt_gp->tg_pt_gp_alua_access_state); 561 nonop_delay_msecs = tg_pt_gp->tg_pt_gp_nonop_delay_msecs; 562 spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); 563 /* 564 * Process ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED in a separate conditional 565 * statement so the compiler knows explicitly to check this case first. 566 * For the Optimized ALUA access state case, we want to process the 567 * incoming fabric cmd ASAP.. 568 */ 569 if (out_alua_state == ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED) 570 return 0; 571 572 switch (out_alua_state) { 573 case ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED: 574 ret = core_alua_state_nonoptimized(cmd, cdb, 575 nonop_delay_msecs, &alua_ascq); 576 break; 577 case ALUA_ACCESS_STATE_STANDBY: 578 ret = core_alua_state_standby(cmd, cdb, &alua_ascq); 579 break; 580 case ALUA_ACCESS_STATE_UNAVAILABLE: 581 ret = core_alua_state_unavailable(cmd, cdb, &alua_ascq); 582 break; 583 case ALUA_ACCESS_STATE_TRANSITION: 584 ret = core_alua_state_transition(cmd, cdb, &alua_ascq); 585 break; 586 /* 587 * OFFLINE is a secondary ALUA target port group access state, that is 588 * handled above with struct se_port->sep_tg_pt_secondary_offline=1 589 */ 590 case ALUA_ACCESS_STATE_OFFLINE: 591 default: 592 pr_err("Unknown ALUA access state: 0x%02x\n", 593 out_alua_state); 594 return TCM_INVALID_CDB_FIELD; 595 } 596 597 out: 598 if (ret > 0) { 599 /* 600 * Set SCSI additional sense code (ASC) to 'LUN Not Accessible'; 601 * The ALUA additional sense code qualifier (ASCQ) is determined 602 * by the ALUA primary or secondary access state.. 603 */ 604 pr_debug("[%s]: ALUA TG Port not available, " 605 "SenseKey: NOT_READY, ASC/ASCQ: " 606 "0x04/0x%02x\n", 607 cmd->se_tfo->get_fabric_name(), alua_ascq); 608 609 cmd->scsi_asc = 0x04; 610 cmd->scsi_ascq = alua_ascq; 611 return TCM_CHECK_CONDITION_NOT_READY; 612 } 613 614 return 0; 615 } 616 617 /* 618 * Check implicit and explicit ALUA state change request. 619 */ 620 static sense_reason_t 621 core_alua_check_transition(int state, int *primary) 622 { 623 switch (state) { 624 case ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED: 625 case ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED: 626 case ALUA_ACCESS_STATE_STANDBY: 627 case ALUA_ACCESS_STATE_UNAVAILABLE: 628 /* 629 * OPTIMIZED, NON-OPTIMIZED, STANDBY and UNAVAILABLE are 630 * defined as primary target port asymmetric access states. 631 */ 632 *primary = 1; 633 break; 634 case ALUA_ACCESS_STATE_OFFLINE: 635 /* 636 * OFFLINE state is defined as a secondary target port 637 * asymmetric access state. 638 */ 639 *primary = 0; 640 break; 641 default: 642 pr_err("Unknown ALUA access state: 0x%02x\n", state); 643 return TCM_INVALID_PARAMETER_LIST; 644 } 645 646 return 0; 647 } 648 649 static char *core_alua_dump_state(int state) 650 { 651 switch (state) { 652 case ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED: 653 return "Active/Optimized"; 654 case ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED: 655 return "Active/NonOptimized"; 656 case ALUA_ACCESS_STATE_STANDBY: 657 return "Standby"; 658 case ALUA_ACCESS_STATE_UNAVAILABLE: 659 return "Unavailable"; 660 case ALUA_ACCESS_STATE_OFFLINE: 661 return "Offline"; 662 default: 663 return "Unknown"; 664 } 665 666 return NULL; 667 } 668 669 char *core_alua_dump_status(int status) 670 { 671 switch (status) { 672 case ALUA_STATUS_NONE: 673 return "None"; 674 case ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG: 675 return "Altered by Explicit STPG"; 676 case ALUA_STATUS_ALTERED_BY_IMPLICIT_ALUA: 677 return "Altered by Implicit ALUA"; 678 default: 679 return "Unknown"; 680 } 681 682 return NULL; 683 } 684 685 /* 686 * Used by fabric modules to determine when we need to delay processing 687 * for the Active/NonOptimized paths.. 688 */ 689 int core_alua_check_nonop_delay( 690 struct se_cmd *cmd) 691 { 692 if (!(cmd->se_cmd_flags & SCF_ALUA_NON_OPTIMIZED)) 693 return 0; 694 if (in_interrupt()) 695 return 0; 696 /* 697 * The ALUA Active/NonOptimized access state delay can be disabled 698 * in via configfs with a value of zero 699 */ 700 if (!cmd->alua_nonop_delay) 701 return 0; 702 /* 703 * struct se_cmd->alua_nonop_delay gets set by a target port group 704 * defined interval in core_alua_state_nonoptimized() 705 */ 706 msleep_interruptible(cmd->alua_nonop_delay); 707 return 0; 708 } 709 EXPORT_SYMBOL(core_alua_check_nonop_delay); 710 711 /* 712 * Called with tg_pt_gp->tg_pt_gp_md_mutex or tg_pt_gp_mem->sep_tg_pt_md_mutex 713 * 714 */ 715 static int core_alua_write_tpg_metadata( 716 const char *path, 717 unsigned char *md_buf, 718 u32 md_buf_len) 719 { 720 struct file *file = filp_open(path, O_RDWR | O_CREAT | O_TRUNC, 0600); 721 int ret; 722 723 if (IS_ERR(file)) { 724 pr_err("filp_open(%s) for ALUA metadata failed\n", path); 725 return -ENODEV; 726 } 727 ret = kernel_write(file, md_buf, md_buf_len, 0); 728 if (ret < 0) 729 pr_err("Error writing ALUA metadata file: %s\n", path); 730 fput(file); 731 return (ret < 0) ? -EIO : 0; 732 } 733 734 /* 735 * Called with tg_pt_gp->tg_pt_gp_md_mutex held 736 */ 737 static int core_alua_update_tpg_primary_metadata( 738 struct t10_alua_tg_pt_gp *tg_pt_gp, 739 int primary_state, 740 unsigned char *md_buf) 741 { 742 struct t10_wwn *wwn = &tg_pt_gp->tg_pt_gp_dev->t10_wwn; 743 char path[ALUA_METADATA_PATH_LEN]; 744 int len; 745 746 memset(path, 0, ALUA_METADATA_PATH_LEN); 747 748 len = snprintf(md_buf, tg_pt_gp->tg_pt_gp_md_buf_len, 749 "tg_pt_gp_id=%hu\n" 750 "alua_access_state=0x%02x\n" 751 "alua_access_status=0x%02x\n", 752 tg_pt_gp->tg_pt_gp_id, primary_state, 753 tg_pt_gp->tg_pt_gp_alua_access_status); 754 755 snprintf(path, ALUA_METADATA_PATH_LEN, 756 "/var/target/alua/tpgs_%s/%s", &wwn->unit_serial[0], 757 config_item_name(&tg_pt_gp->tg_pt_gp_group.cg_item)); 758 759 return core_alua_write_tpg_metadata(path, md_buf, len); 760 } 761 762 static int core_alua_do_transition_tg_pt( 763 struct t10_alua_tg_pt_gp *tg_pt_gp, 764 struct se_port *l_port, 765 struct se_node_acl *nacl, 766 unsigned char *md_buf, 767 int new_state, 768 int explicit) 769 { 770 struct se_dev_entry *se_deve; 771 struct se_lun_acl *lacl; 772 struct se_port *port; 773 struct t10_alua_tg_pt_gp_member *mem; 774 int old_state = 0; 775 /* 776 * Save the old primary ALUA access state, and set the current state 777 * to ALUA_ACCESS_STATE_TRANSITION. 778 */ 779 old_state = atomic_read(&tg_pt_gp->tg_pt_gp_alua_access_state); 780 atomic_set(&tg_pt_gp->tg_pt_gp_alua_access_state, 781 ALUA_ACCESS_STATE_TRANSITION); 782 tg_pt_gp->tg_pt_gp_alua_access_status = (explicit) ? 783 ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG : 784 ALUA_STATUS_ALTERED_BY_IMPLICIT_ALUA; 785 /* 786 * Check for the optional ALUA primary state transition delay 787 */ 788 if (tg_pt_gp->tg_pt_gp_trans_delay_msecs != 0) 789 msleep_interruptible(tg_pt_gp->tg_pt_gp_trans_delay_msecs); 790 791 spin_lock(&tg_pt_gp->tg_pt_gp_lock); 792 list_for_each_entry(mem, &tg_pt_gp->tg_pt_gp_mem_list, 793 tg_pt_gp_mem_list) { 794 port = mem->tg_pt; 795 /* 796 * After an implicit target port asymmetric access state 797 * change, a device server shall establish a unit attention 798 * condition for the initiator port associated with every I_T 799 * nexus with the additional sense code set to ASYMMETRIC 800 * ACCESS STATE CHANGED. 801 * 802 * After an explicit target port asymmetric access state 803 * change, a device server shall establish a unit attention 804 * condition with the additional sense code set to ASYMMETRIC 805 * ACCESS STATE CHANGED for the initiator port associated with 806 * every I_T nexus other than the I_T nexus on which the SET 807 * TARGET PORT GROUPS command 808 */ 809 atomic_inc(&mem->tg_pt_gp_mem_ref_cnt); 810 smp_mb__after_atomic_inc(); 811 spin_unlock(&tg_pt_gp->tg_pt_gp_lock); 812 813 spin_lock_bh(&port->sep_alua_lock); 814 list_for_each_entry(se_deve, &port->sep_alua_list, 815 alua_port_list) { 816 lacl = se_deve->se_lun_acl; 817 /* 818 * se_deve->se_lun_acl pointer may be NULL for a 819 * entry created without explicit Node+MappedLUN ACLs 820 */ 821 if (!lacl) 822 continue; 823 824 if (explicit && 825 (nacl != NULL) && (nacl == lacl->se_lun_nacl) && 826 (l_port != NULL) && (l_port == port)) 827 continue; 828 829 core_scsi3_ua_allocate(lacl->se_lun_nacl, 830 se_deve->mapped_lun, 0x2A, 831 ASCQ_2AH_ASYMMETRIC_ACCESS_STATE_CHANGED); 832 } 833 spin_unlock_bh(&port->sep_alua_lock); 834 835 spin_lock(&tg_pt_gp->tg_pt_gp_lock); 836 atomic_dec(&mem->tg_pt_gp_mem_ref_cnt); 837 smp_mb__after_atomic_dec(); 838 } 839 spin_unlock(&tg_pt_gp->tg_pt_gp_lock); 840 /* 841 * Update the ALUA metadata buf that has been allocated in 842 * core_alua_do_port_transition(), this metadata will be written 843 * to struct file. 844 * 845 * Note that there is the case where we do not want to update the 846 * metadata when the saved metadata is being parsed in userspace 847 * when setting the existing port access state and access status. 848 * 849 * Also note that the failure to write out the ALUA metadata to 850 * struct file does NOT affect the actual ALUA transition. 851 */ 852 if (tg_pt_gp->tg_pt_gp_write_metadata) { 853 mutex_lock(&tg_pt_gp->tg_pt_gp_md_mutex); 854 core_alua_update_tpg_primary_metadata(tg_pt_gp, 855 new_state, md_buf); 856 mutex_unlock(&tg_pt_gp->tg_pt_gp_md_mutex); 857 } 858 /* 859 * Set the current primary ALUA access state to the requested new state 860 */ 861 atomic_set(&tg_pt_gp->tg_pt_gp_alua_access_state, new_state); 862 863 pr_debug("Successful %s ALUA transition TG PT Group: %s ID: %hu" 864 " from primary access state %s to %s\n", (explicit) ? "explicit" : 865 "implicit", config_item_name(&tg_pt_gp->tg_pt_gp_group.cg_item), 866 tg_pt_gp->tg_pt_gp_id, core_alua_dump_state(old_state), 867 core_alua_dump_state(new_state)); 868 869 return 0; 870 } 871 872 int core_alua_do_port_transition( 873 struct t10_alua_tg_pt_gp *l_tg_pt_gp, 874 struct se_device *l_dev, 875 struct se_port *l_port, 876 struct se_node_acl *l_nacl, 877 int new_state, 878 int explicit) 879 { 880 struct se_device *dev; 881 struct se_port *port; 882 struct se_node_acl *nacl; 883 struct t10_alua_lu_gp *lu_gp; 884 struct t10_alua_lu_gp_member *lu_gp_mem, *local_lu_gp_mem; 885 struct t10_alua_tg_pt_gp *tg_pt_gp; 886 unsigned char *md_buf; 887 int primary; 888 889 if (core_alua_check_transition(new_state, &primary) != 0) 890 return -EINVAL; 891 892 md_buf = kzalloc(l_tg_pt_gp->tg_pt_gp_md_buf_len, GFP_KERNEL); 893 if (!md_buf) { 894 pr_err("Unable to allocate buf for ALUA metadata\n"); 895 return -ENOMEM; 896 } 897 898 local_lu_gp_mem = l_dev->dev_alua_lu_gp_mem; 899 spin_lock(&local_lu_gp_mem->lu_gp_mem_lock); 900 lu_gp = local_lu_gp_mem->lu_gp; 901 atomic_inc(&lu_gp->lu_gp_ref_cnt); 902 smp_mb__after_atomic_inc(); 903 spin_unlock(&local_lu_gp_mem->lu_gp_mem_lock); 904 /* 905 * For storage objects that are members of the 'default_lu_gp', 906 * we only do transition on the passed *l_tp_pt_gp, and not 907 * on all of the matching target port groups IDs in default_lu_gp. 908 */ 909 if (!lu_gp->lu_gp_id) { 910 /* 911 * core_alua_do_transition_tg_pt() will always return 912 * success. 913 */ 914 core_alua_do_transition_tg_pt(l_tg_pt_gp, l_port, l_nacl, 915 md_buf, new_state, explicit); 916 atomic_dec(&lu_gp->lu_gp_ref_cnt); 917 smp_mb__after_atomic_dec(); 918 kfree(md_buf); 919 return 0; 920 } 921 /* 922 * For all other LU groups aside from 'default_lu_gp', walk all of 923 * the associated storage objects looking for a matching target port 924 * group ID from the local target port group. 925 */ 926 spin_lock(&lu_gp->lu_gp_lock); 927 list_for_each_entry(lu_gp_mem, &lu_gp->lu_gp_mem_list, 928 lu_gp_mem_list) { 929 930 dev = lu_gp_mem->lu_gp_mem_dev; 931 atomic_inc(&lu_gp_mem->lu_gp_mem_ref_cnt); 932 smp_mb__after_atomic_inc(); 933 spin_unlock(&lu_gp->lu_gp_lock); 934 935 spin_lock(&dev->t10_alua.tg_pt_gps_lock); 936 list_for_each_entry(tg_pt_gp, 937 &dev->t10_alua.tg_pt_gps_list, 938 tg_pt_gp_list) { 939 940 if (!tg_pt_gp->tg_pt_gp_valid_id) 941 continue; 942 /* 943 * If the target behavior port asymmetric access state 944 * is changed for any target port group accessible via 945 * a logical unit within a LU group, the target port 946 * behavior group asymmetric access states for the same 947 * target port group accessible via other logical units 948 * in that LU group will also change. 949 */ 950 if (l_tg_pt_gp->tg_pt_gp_id != tg_pt_gp->tg_pt_gp_id) 951 continue; 952 953 if (l_tg_pt_gp == tg_pt_gp) { 954 port = l_port; 955 nacl = l_nacl; 956 } else { 957 port = NULL; 958 nacl = NULL; 959 } 960 atomic_inc(&tg_pt_gp->tg_pt_gp_ref_cnt); 961 smp_mb__after_atomic_inc(); 962 spin_unlock(&dev->t10_alua.tg_pt_gps_lock); 963 /* 964 * core_alua_do_transition_tg_pt() will always return 965 * success. 966 */ 967 core_alua_do_transition_tg_pt(tg_pt_gp, port, 968 nacl, md_buf, new_state, explicit); 969 970 spin_lock(&dev->t10_alua.tg_pt_gps_lock); 971 atomic_dec(&tg_pt_gp->tg_pt_gp_ref_cnt); 972 smp_mb__after_atomic_dec(); 973 } 974 spin_unlock(&dev->t10_alua.tg_pt_gps_lock); 975 976 spin_lock(&lu_gp->lu_gp_lock); 977 atomic_dec(&lu_gp_mem->lu_gp_mem_ref_cnt); 978 smp_mb__after_atomic_dec(); 979 } 980 spin_unlock(&lu_gp->lu_gp_lock); 981 982 pr_debug("Successfully processed LU Group: %s all ALUA TG PT" 983 " Group IDs: %hu %s transition to primary state: %s\n", 984 config_item_name(&lu_gp->lu_gp_group.cg_item), 985 l_tg_pt_gp->tg_pt_gp_id, (explicit) ? "explicit" : "implicit", 986 core_alua_dump_state(new_state)); 987 988 atomic_dec(&lu_gp->lu_gp_ref_cnt); 989 smp_mb__after_atomic_dec(); 990 kfree(md_buf); 991 return 0; 992 } 993 994 /* 995 * Called with tg_pt_gp_mem->sep_tg_pt_md_mutex held 996 */ 997 static int core_alua_update_tpg_secondary_metadata( 998 struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem, 999 struct se_port *port, 1000 unsigned char *md_buf, 1001 u32 md_buf_len) 1002 { 1003 struct se_portal_group *se_tpg = port->sep_tpg; 1004 char path[ALUA_METADATA_PATH_LEN], wwn[ALUA_SECONDARY_METADATA_WWN_LEN]; 1005 int len; 1006 1007 memset(path, 0, ALUA_METADATA_PATH_LEN); 1008 memset(wwn, 0, ALUA_SECONDARY_METADATA_WWN_LEN); 1009 1010 len = snprintf(wwn, ALUA_SECONDARY_METADATA_WWN_LEN, "%s", 1011 se_tpg->se_tpg_tfo->tpg_get_wwn(se_tpg)); 1012 1013 if (se_tpg->se_tpg_tfo->tpg_get_tag != NULL) 1014 snprintf(wwn+len, ALUA_SECONDARY_METADATA_WWN_LEN-len, "+%hu", 1015 se_tpg->se_tpg_tfo->tpg_get_tag(se_tpg)); 1016 1017 len = snprintf(md_buf, md_buf_len, "alua_tg_pt_offline=%d\n" 1018 "alua_tg_pt_status=0x%02x\n", 1019 atomic_read(&port->sep_tg_pt_secondary_offline), 1020 port->sep_tg_pt_secondary_stat); 1021 1022 snprintf(path, ALUA_METADATA_PATH_LEN, "/var/target/alua/%s/%s/lun_%u", 1023 se_tpg->se_tpg_tfo->get_fabric_name(), wwn, 1024 port->sep_lun->unpacked_lun); 1025 1026 return core_alua_write_tpg_metadata(path, md_buf, len); 1027 } 1028 1029 static int core_alua_set_tg_pt_secondary_state( 1030 struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem, 1031 struct se_port *port, 1032 int explicit, 1033 int offline) 1034 { 1035 struct t10_alua_tg_pt_gp *tg_pt_gp; 1036 unsigned char *md_buf; 1037 u32 md_buf_len; 1038 int trans_delay_msecs; 1039 1040 spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); 1041 tg_pt_gp = tg_pt_gp_mem->tg_pt_gp; 1042 if (!tg_pt_gp) { 1043 spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); 1044 pr_err("Unable to complete secondary state" 1045 " transition\n"); 1046 return -EINVAL; 1047 } 1048 trans_delay_msecs = tg_pt_gp->tg_pt_gp_trans_delay_msecs; 1049 /* 1050 * Set the secondary ALUA target port access state to OFFLINE 1051 * or release the previously secondary state for struct se_port 1052 */ 1053 if (offline) 1054 atomic_set(&port->sep_tg_pt_secondary_offline, 1); 1055 else 1056 atomic_set(&port->sep_tg_pt_secondary_offline, 0); 1057 1058 md_buf_len = tg_pt_gp->tg_pt_gp_md_buf_len; 1059 port->sep_tg_pt_secondary_stat = (explicit) ? 1060 ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG : 1061 ALUA_STATUS_ALTERED_BY_IMPLICIT_ALUA; 1062 1063 pr_debug("Successful %s ALUA transition TG PT Group: %s ID: %hu" 1064 " to secondary access state: %s\n", (explicit) ? "explicit" : 1065 "implicit", config_item_name(&tg_pt_gp->tg_pt_gp_group.cg_item), 1066 tg_pt_gp->tg_pt_gp_id, (offline) ? "OFFLINE" : "ONLINE"); 1067 1068 spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); 1069 /* 1070 * Do the optional transition delay after we set the secondary 1071 * ALUA access state. 1072 */ 1073 if (trans_delay_msecs != 0) 1074 msleep_interruptible(trans_delay_msecs); 1075 /* 1076 * See if we need to update the ALUA fabric port metadata for 1077 * secondary state and status 1078 */ 1079 if (port->sep_tg_pt_secondary_write_md) { 1080 md_buf = kzalloc(md_buf_len, GFP_KERNEL); 1081 if (!md_buf) { 1082 pr_err("Unable to allocate md_buf for" 1083 " secondary ALUA access metadata\n"); 1084 return -ENOMEM; 1085 } 1086 mutex_lock(&port->sep_tg_pt_md_mutex); 1087 core_alua_update_tpg_secondary_metadata(tg_pt_gp_mem, port, 1088 md_buf, md_buf_len); 1089 mutex_unlock(&port->sep_tg_pt_md_mutex); 1090 1091 kfree(md_buf); 1092 } 1093 1094 return 0; 1095 } 1096 1097 struct t10_alua_lu_gp * 1098 core_alua_allocate_lu_gp(const char *name, int def_group) 1099 { 1100 struct t10_alua_lu_gp *lu_gp; 1101 1102 lu_gp = kmem_cache_zalloc(t10_alua_lu_gp_cache, GFP_KERNEL); 1103 if (!lu_gp) { 1104 pr_err("Unable to allocate struct t10_alua_lu_gp\n"); 1105 return ERR_PTR(-ENOMEM); 1106 } 1107 INIT_LIST_HEAD(&lu_gp->lu_gp_node); 1108 INIT_LIST_HEAD(&lu_gp->lu_gp_mem_list); 1109 spin_lock_init(&lu_gp->lu_gp_lock); 1110 atomic_set(&lu_gp->lu_gp_ref_cnt, 0); 1111 1112 if (def_group) { 1113 lu_gp->lu_gp_id = alua_lu_gps_counter++; 1114 lu_gp->lu_gp_valid_id = 1; 1115 alua_lu_gps_count++; 1116 } 1117 1118 return lu_gp; 1119 } 1120 1121 int core_alua_set_lu_gp_id(struct t10_alua_lu_gp *lu_gp, u16 lu_gp_id) 1122 { 1123 struct t10_alua_lu_gp *lu_gp_tmp; 1124 u16 lu_gp_id_tmp; 1125 /* 1126 * The lu_gp->lu_gp_id may only be set once.. 1127 */ 1128 if (lu_gp->lu_gp_valid_id) { 1129 pr_warn("ALUA LU Group already has a valid ID," 1130 " ignoring request\n"); 1131 return -EINVAL; 1132 } 1133 1134 spin_lock(&lu_gps_lock); 1135 if (alua_lu_gps_count == 0x0000ffff) { 1136 pr_err("Maximum ALUA alua_lu_gps_count:" 1137 " 0x0000ffff reached\n"); 1138 spin_unlock(&lu_gps_lock); 1139 kmem_cache_free(t10_alua_lu_gp_cache, lu_gp); 1140 return -ENOSPC; 1141 } 1142 again: 1143 lu_gp_id_tmp = (lu_gp_id != 0) ? lu_gp_id : 1144 alua_lu_gps_counter++; 1145 1146 list_for_each_entry(lu_gp_tmp, &lu_gps_list, lu_gp_node) { 1147 if (lu_gp_tmp->lu_gp_id == lu_gp_id_tmp) { 1148 if (!lu_gp_id) 1149 goto again; 1150 1151 pr_warn("ALUA Logical Unit Group ID: %hu" 1152 " already exists, ignoring request\n", 1153 lu_gp_id); 1154 spin_unlock(&lu_gps_lock); 1155 return -EINVAL; 1156 } 1157 } 1158 1159 lu_gp->lu_gp_id = lu_gp_id_tmp; 1160 lu_gp->lu_gp_valid_id = 1; 1161 list_add_tail(&lu_gp->lu_gp_node, &lu_gps_list); 1162 alua_lu_gps_count++; 1163 spin_unlock(&lu_gps_lock); 1164 1165 return 0; 1166 } 1167 1168 static struct t10_alua_lu_gp_member * 1169 core_alua_allocate_lu_gp_mem(struct se_device *dev) 1170 { 1171 struct t10_alua_lu_gp_member *lu_gp_mem; 1172 1173 lu_gp_mem = kmem_cache_zalloc(t10_alua_lu_gp_mem_cache, GFP_KERNEL); 1174 if (!lu_gp_mem) { 1175 pr_err("Unable to allocate struct t10_alua_lu_gp_member\n"); 1176 return ERR_PTR(-ENOMEM); 1177 } 1178 INIT_LIST_HEAD(&lu_gp_mem->lu_gp_mem_list); 1179 spin_lock_init(&lu_gp_mem->lu_gp_mem_lock); 1180 atomic_set(&lu_gp_mem->lu_gp_mem_ref_cnt, 0); 1181 1182 lu_gp_mem->lu_gp_mem_dev = dev; 1183 dev->dev_alua_lu_gp_mem = lu_gp_mem; 1184 1185 return lu_gp_mem; 1186 } 1187 1188 void core_alua_free_lu_gp(struct t10_alua_lu_gp *lu_gp) 1189 { 1190 struct t10_alua_lu_gp_member *lu_gp_mem, *lu_gp_mem_tmp; 1191 /* 1192 * Once we have reached this point, config_item_put() has 1193 * already been called from target_core_alua_drop_lu_gp(). 1194 * 1195 * Here, we remove the *lu_gp from the global list so that 1196 * no associations can be made while we are releasing 1197 * struct t10_alua_lu_gp. 1198 */ 1199 spin_lock(&lu_gps_lock); 1200 list_del(&lu_gp->lu_gp_node); 1201 alua_lu_gps_count--; 1202 spin_unlock(&lu_gps_lock); 1203 /* 1204 * Allow struct t10_alua_lu_gp * referenced by core_alua_get_lu_gp_by_name() 1205 * in target_core_configfs.c:target_core_store_alua_lu_gp() to be 1206 * released with core_alua_put_lu_gp_from_name() 1207 */ 1208 while (atomic_read(&lu_gp->lu_gp_ref_cnt)) 1209 cpu_relax(); 1210 /* 1211 * Release reference to struct t10_alua_lu_gp * from all associated 1212 * struct se_device. 1213 */ 1214 spin_lock(&lu_gp->lu_gp_lock); 1215 list_for_each_entry_safe(lu_gp_mem, lu_gp_mem_tmp, 1216 &lu_gp->lu_gp_mem_list, lu_gp_mem_list) { 1217 if (lu_gp_mem->lu_gp_assoc) { 1218 list_del(&lu_gp_mem->lu_gp_mem_list); 1219 lu_gp->lu_gp_members--; 1220 lu_gp_mem->lu_gp_assoc = 0; 1221 } 1222 spin_unlock(&lu_gp->lu_gp_lock); 1223 /* 1224 * 1225 * lu_gp_mem is associated with a single 1226 * struct se_device->dev_alua_lu_gp_mem, and is released when 1227 * struct se_device is released via core_alua_free_lu_gp_mem(). 1228 * 1229 * If the passed lu_gp does NOT match the default_lu_gp, assume 1230 * we want to re-associate a given lu_gp_mem with default_lu_gp. 1231 */ 1232 spin_lock(&lu_gp_mem->lu_gp_mem_lock); 1233 if (lu_gp != default_lu_gp) 1234 __core_alua_attach_lu_gp_mem(lu_gp_mem, 1235 default_lu_gp); 1236 else 1237 lu_gp_mem->lu_gp = NULL; 1238 spin_unlock(&lu_gp_mem->lu_gp_mem_lock); 1239 1240 spin_lock(&lu_gp->lu_gp_lock); 1241 } 1242 spin_unlock(&lu_gp->lu_gp_lock); 1243 1244 kmem_cache_free(t10_alua_lu_gp_cache, lu_gp); 1245 } 1246 1247 void core_alua_free_lu_gp_mem(struct se_device *dev) 1248 { 1249 struct t10_alua_lu_gp *lu_gp; 1250 struct t10_alua_lu_gp_member *lu_gp_mem; 1251 1252 lu_gp_mem = dev->dev_alua_lu_gp_mem; 1253 if (!lu_gp_mem) 1254 return; 1255 1256 while (atomic_read(&lu_gp_mem->lu_gp_mem_ref_cnt)) 1257 cpu_relax(); 1258 1259 spin_lock(&lu_gp_mem->lu_gp_mem_lock); 1260 lu_gp = lu_gp_mem->lu_gp; 1261 if (lu_gp) { 1262 spin_lock(&lu_gp->lu_gp_lock); 1263 if (lu_gp_mem->lu_gp_assoc) { 1264 list_del(&lu_gp_mem->lu_gp_mem_list); 1265 lu_gp->lu_gp_members--; 1266 lu_gp_mem->lu_gp_assoc = 0; 1267 } 1268 spin_unlock(&lu_gp->lu_gp_lock); 1269 lu_gp_mem->lu_gp = NULL; 1270 } 1271 spin_unlock(&lu_gp_mem->lu_gp_mem_lock); 1272 1273 kmem_cache_free(t10_alua_lu_gp_mem_cache, lu_gp_mem); 1274 } 1275 1276 struct t10_alua_lu_gp *core_alua_get_lu_gp_by_name(const char *name) 1277 { 1278 struct t10_alua_lu_gp *lu_gp; 1279 struct config_item *ci; 1280 1281 spin_lock(&lu_gps_lock); 1282 list_for_each_entry(lu_gp, &lu_gps_list, lu_gp_node) { 1283 if (!lu_gp->lu_gp_valid_id) 1284 continue; 1285 ci = &lu_gp->lu_gp_group.cg_item; 1286 if (!strcmp(config_item_name(ci), name)) { 1287 atomic_inc(&lu_gp->lu_gp_ref_cnt); 1288 spin_unlock(&lu_gps_lock); 1289 return lu_gp; 1290 } 1291 } 1292 spin_unlock(&lu_gps_lock); 1293 1294 return NULL; 1295 } 1296 1297 void core_alua_put_lu_gp_from_name(struct t10_alua_lu_gp *lu_gp) 1298 { 1299 spin_lock(&lu_gps_lock); 1300 atomic_dec(&lu_gp->lu_gp_ref_cnt); 1301 spin_unlock(&lu_gps_lock); 1302 } 1303 1304 /* 1305 * Called with struct t10_alua_lu_gp_member->lu_gp_mem_lock 1306 */ 1307 void __core_alua_attach_lu_gp_mem( 1308 struct t10_alua_lu_gp_member *lu_gp_mem, 1309 struct t10_alua_lu_gp *lu_gp) 1310 { 1311 spin_lock(&lu_gp->lu_gp_lock); 1312 lu_gp_mem->lu_gp = lu_gp; 1313 lu_gp_mem->lu_gp_assoc = 1; 1314 list_add_tail(&lu_gp_mem->lu_gp_mem_list, &lu_gp->lu_gp_mem_list); 1315 lu_gp->lu_gp_members++; 1316 spin_unlock(&lu_gp->lu_gp_lock); 1317 } 1318 1319 /* 1320 * Called with struct t10_alua_lu_gp_member->lu_gp_mem_lock 1321 */ 1322 void __core_alua_drop_lu_gp_mem( 1323 struct t10_alua_lu_gp_member *lu_gp_mem, 1324 struct t10_alua_lu_gp *lu_gp) 1325 { 1326 spin_lock(&lu_gp->lu_gp_lock); 1327 list_del(&lu_gp_mem->lu_gp_mem_list); 1328 lu_gp_mem->lu_gp = NULL; 1329 lu_gp_mem->lu_gp_assoc = 0; 1330 lu_gp->lu_gp_members--; 1331 spin_unlock(&lu_gp->lu_gp_lock); 1332 } 1333 1334 struct t10_alua_tg_pt_gp *core_alua_allocate_tg_pt_gp(struct se_device *dev, 1335 const char *name, int def_group) 1336 { 1337 struct t10_alua_tg_pt_gp *tg_pt_gp; 1338 1339 tg_pt_gp = kmem_cache_zalloc(t10_alua_tg_pt_gp_cache, GFP_KERNEL); 1340 if (!tg_pt_gp) { 1341 pr_err("Unable to allocate struct t10_alua_tg_pt_gp\n"); 1342 return NULL; 1343 } 1344 INIT_LIST_HEAD(&tg_pt_gp->tg_pt_gp_list); 1345 INIT_LIST_HEAD(&tg_pt_gp->tg_pt_gp_mem_list); 1346 mutex_init(&tg_pt_gp->tg_pt_gp_md_mutex); 1347 spin_lock_init(&tg_pt_gp->tg_pt_gp_lock); 1348 atomic_set(&tg_pt_gp->tg_pt_gp_ref_cnt, 0); 1349 tg_pt_gp->tg_pt_gp_dev = dev; 1350 tg_pt_gp->tg_pt_gp_md_buf_len = ALUA_MD_BUF_LEN; 1351 atomic_set(&tg_pt_gp->tg_pt_gp_alua_access_state, 1352 ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED); 1353 /* 1354 * Enable both explicit and implicit ALUA support by default 1355 */ 1356 tg_pt_gp->tg_pt_gp_alua_access_type = 1357 TPGS_EXPLICIT_ALUA | TPGS_IMPLICIT_ALUA; 1358 /* 1359 * Set the default Active/NonOptimized Delay in milliseconds 1360 */ 1361 tg_pt_gp->tg_pt_gp_nonop_delay_msecs = ALUA_DEFAULT_NONOP_DELAY_MSECS; 1362 tg_pt_gp->tg_pt_gp_trans_delay_msecs = ALUA_DEFAULT_TRANS_DELAY_MSECS; 1363 tg_pt_gp->tg_pt_gp_implicit_trans_secs = ALUA_DEFAULT_IMPLICIT_TRANS_SECS; 1364 1365 /* 1366 * Enable all supported states 1367 */ 1368 tg_pt_gp->tg_pt_gp_alua_supported_states = 1369 ALUA_T_SUP | ALUA_O_SUP | 1370 ALUA_U_SUP | ALUA_S_SUP | ALUA_AN_SUP | ALUA_AO_SUP; 1371 1372 if (def_group) { 1373 spin_lock(&dev->t10_alua.tg_pt_gps_lock); 1374 tg_pt_gp->tg_pt_gp_id = 1375 dev->t10_alua.alua_tg_pt_gps_counter++; 1376 tg_pt_gp->tg_pt_gp_valid_id = 1; 1377 dev->t10_alua.alua_tg_pt_gps_count++; 1378 list_add_tail(&tg_pt_gp->tg_pt_gp_list, 1379 &dev->t10_alua.tg_pt_gps_list); 1380 spin_unlock(&dev->t10_alua.tg_pt_gps_lock); 1381 } 1382 1383 return tg_pt_gp; 1384 } 1385 1386 int core_alua_set_tg_pt_gp_id( 1387 struct t10_alua_tg_pt_gp *tg_pt_gp, 1388 u16 tg_pt_gp_id) 1389 { 1390 struct se_device *dev = tg_pt_gp->tg_pt_gp_dev; 1391 struct t10_alua_tg_pt_gp *tg_pt_gp_tmp; 1392 u16 tg_pt_gp_id_tmp; 1393 1394 /* 1395 * The tg_pt_gp->tg_pt_gp_id may only be set once.. 1396 */ 1397 if (tg_pt_gp->tg_pt_gp_valid_id) { 1398 pr_warn("ALUA TG PT Group already has a valid ID," 1399 " ignoring request\n"); 1400 return -EINVAL; 1401 } 1402 1403 spin_lock(&dev->t10_alua.tg_pt_gps_lock); 1404 if (dev->t10_alua.alua_tg_pt_gps_count == 0x0000ffff) { 1405 pr_err("Maximum ALUA alua_tg_pt_gps_count:" 1406 " 0x0000ffff reached\n"); 1407 spin_unlock(&dev->t10_alua.tg_pt_gps_lock); 1408 kmem_cache_free(t10_alua_tg_pt_gp_cache, tg_pt_gp); 1409 return -ENOSPC; 1410 } 1411 again: 1412 tg_pt_gp_id_tmp = (tg_pt_gp_id != 0) ? tg_pt_gp_id : 1413 dev->t10_alua.alua_tg_pt_gps_counter++; 1414 1415 list_for_each_entry(tg_pt_gp_tmp, &dev->t10_alua.tg_pt_gps_list, 1416 tg_pt_gp_list) { 1417 if (tg_pt_gp_tmp->tg_pt_gp_id == tg_pt_gp_id_tmp) { 1418 if (!tg_pt_gp_id) 1419 goto again; 1420 1421 pr_err("ALUA Target Port Group ID: %hu already" 1422 " exists, ignoring request\n", tg_pt_gp_id); 1423 spin_unlock(&dev->t10_alua.tg_pt_gps_lock); 1424 return -EINVAL; 1425 } 1426 } 1427 1428 tg_pt_gp->tg_pt_gp_id = tg_pt_gp_id_tmp; 1429 tg_pt_gp->tg_pt_gp_valid_id = 1; 1430 list_add_tail(&tg_pt_gp->tg_pt_gp_list, 1431 &dev->t10_alua.tg_pt_gps_list); 1432 dev->t10_alua.alua_tg_pt_gps_count++; 1433 spin_unlock(&dev->t10_alua.tg_pt_gps_lock); 1434 1435 return 0; 1436 } 1437 1438 struct t10_alua_tg_pt_gp_member *core_alua_allocate_tg_pt_gp_mem( 1439 struct se_port *port) 1440 { 1441 struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem; 1442 1443 tg_pt_gp_mem = kmem_cache_zalloc(t10_alua_tg_pt_gp_mem_cache, 1444 GFP_KERNEL); 1445 if (!tg_pt_gp_mem) { 1446 pr_err("Unable to allocate struct t10_alua_tg_pt_gp_member\n"); 1447 return ERR_PTR(-ENOMEM); 1448 } 1449 INIT_LIST_HEAD(&tg_pt_gp_mem->tg_pt_gp_mem_list); 1450 spin_lock_init(&tg_pt_gp_mem->tg_pt_gp_mem_lock); 1451 atomic_set(&tg_pt_gp_mem->tg_pt_gp_mem_ref_cnt, 0); 1452 1453 tg_pt_gp_mem->tg_pt = port; 1454 port->sep_alua_tg_pt_gp_mem = tg_pt_gp_mem; 1455 1456 return tg_pt_gp_mem; 1457 } 1458 1459 void core_alua_free_tg_pt_gp( 1460 struct t10_alua_tg_pt_gp *tg_pt_gp) 1461 { 1462 struct se_device *dev = tg_pt_gp->tg_pt_gp_dev; 1463 struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem, *tg_pt_gp_mem_tmp; 1464 1465 /* 1466 * Once we have reached this point, config_item_put() has already 1467 * been called from target_core_alua_drop_tg_pt_gp(). 1468 * 1469 * Here we remove *tg_pt_gp from the global list so that 1470 * no associations *OR* explicit ALUA via SET_TARGET_PORT_GROUPS 1471 * can be made while we are releasing struct t10_alua_tg_pt_gp. 1472 */ 1473 spin_lock(&dev->t10_alua.tg_pt_gps_lock); 1474 list_del(&tg_pt_gp->tg_pt_gp_list); 1475 dev->t10_alua.alua_tg_pt_gps_counter--; 1476 spin_unlock(&dev->t10_alua.tg_pt_gps_lock); 1477 1478 /* 1479 * Allow a struct t10_alua_tg_pt_gp_member * referenced by 1480 * core_alua_get_tg_pt_gp_by_name() in 1481 * target_core_configfs.c:target_core_store_alua_tg_pt_gp() 1482 * to be released with core_alua_put_tg_pt_gp_from_name(). 1483 */ 1484 while (atomic_read(&tg_pt_gp->tg_pt_gp_ref_cnt)) 1485 cpu_relax(); 1486 1487 /* 1488 * Release reference to struct t10_alua_tg_pt_gp from all associated 1489 * struct se_port. 1490 */ 1491 spin_lock(&tg_pt_gp->tg_pt_gp_lock); 1492 list_for_each_entry_safe(tg_pt_gp_mem, tg_pt_gp_mem_tmp, 1493 &tg_pt_gp->tg_pt_gp_mem_list, tg_pt_gp_mem_list) { 1494 if (tg_pt_gp_mem->tg_pt_gp_assoc) { 1495 list_del(&tg_pt_gp_mem->tg_pt_gp_mem_list); 1496 tg_pt_gp->tg_pt_gp_members--; 1497 tg_pt_gp_mem->tg_pt_gp_assoc = 0; 1498 } 1499 spin_unlock(&tg_pt_gp->tg_pt_gp_lock); 1500 /* 1501 * tg_pt_gp_mem is associated with a single 1502 * se_port->sep_alua_tg_pt_gp_mem, and is released via 1503 * core_alua_free_tg_pt_gp_mem(). 1504 * 1505 * If the passed tg_pt_gp does NOT match the default_tg_pt_gp, 1506 * assume we want to re-associate a given tg_pt_gp_mem with 1507 * default_tg_pt_gp. 1508 */ 1509 spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); 1510 if (tg_pt_gp != dev->t10_alua.default_tg_pt_gp) { 1511 __core_alua_attach_tg_pt_gp_mem(tg_pt_gp_mem, 1512 dev->t10_alua.default_tg_pt_gp); 1513 } else 1514 tg_pt_gp_mem->tg_pt_gp = NULL; 1515 spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); 1516 1517 spin_lock(&tg_pt_gp->tg_pt_gp_lock); 1518 } 1519 spin_unlock(&tg_pt_gp->tg_pt_gp_lock); 1520 1521 kmem_cache_free(t10_alua_tg_pt_gp_cache, tg_pt_gp); 1522 } 1523 1524 void core_alua_free_tg_pt_gp_mem(struct se_port *port) 1525 { 1526 struct t10_alua_tg_pt_gp *tg_pt_gp; 1527 struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem; 1528 1529 tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem; 1530 if (!tg_pt_gp_mem) 1531 return; 1532 1533 while (atomic_read(&tg_pt_gp_mem->tg_pt_gp_mem_ref_cnt)) 1534 cpu_relax(); 1535 1536 spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); 1537 tg_pt_gp = tg_pt_gp_mem->tg_pt_gp; 1538 if (tg_pt_gp) { 1539 spin_lock(&tg_pt_gp->tg_pt_gp_lock); 1540 if (tg_pt_gp_mem->tg_pt_gp_assoc) { 1541 list_del(&tg_pt_gp_mem->tg_pt_gp_mem_list); 1542 tg_pt_gp->tg_pt_gp_members--; 1543 tg_pt_gp_mem->tg_pt_gp_assoc = 0; 1544 } 1545 spin_unlock(&tg_pt_gp->tg_pt_gp_lock); 1546 tg_pt_gp_mem->tg_pt_gp = NULL; 1547 } 1548 spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); 1549 1550 kmem_cache_free(t10_alua_tg_pt_gp_mem_cache, tg_pt_gp_mem); 1551 } 1552 1553 static struct t10_alua_tg_pt_gp *core_alua_get_tg_pt_gp_by_name( 1554 struct se_device *dev, const char *name) 1555 { 1556 struct t10_alua_tg_pt_gp *tg_pt_gp; 1557 struct config_item *ci; 1558 1559 spin_lock(&dev->t10_alua.tg_pt_gps_lock); 1560 list_for_each_entry(tg_pt_gp, &dev->t10_alua.tg_pt_gps_list, 1561 tg_pt_gp_list) { 1562 if (!tg_pt_gp->tg_pt_gp_valid_id) 1563 continue; 1564 ci = &tg_pt_gp->tg_pt_gp_group.cg_item; 1565 if (!strcmp(config_item_name(ci), name)) { 1566 atomic_inc(&tg_pt_gp->tg_pt_gp_ref_cnt); 1567 spin_unlock(&dev->t10_alua.tg_pt_gps_lock); 1568 return tg_pt_gp; 1569 } 1570 } 1571 spin_unlock(&dev->t10_alua.tg_pt_gps_lock); 1572 1573 return NULL; 1574 } 1575 1576 static void core_alua_put_tg_pt_gp_from_name( 1577 struct t10_alua_tg_pt_gp *tg_pt_gp) 1578 { 1579 struct se_device *dev = tg_pt_gp->tg_pt_gp_dev; 1580 1581 spin_lock(&dev->t10_alua.tg_pt_gps_lock); 1582 atomic_dec(&tg_pt_gp->tg_pt_gp_ref_cnt); 1583 spin_unlock(&dev->t10_alua.tg_pt_gps_lock); 1584 } 1585 1586 /* 1587 * Called with struct t10_alua_tg_pt_gp_member->tg_pt_gp_mem_lock held 1588 */ 1589 void __core_alua_attach_tg_pt_gp_mem( 1590 struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem, 1591 struct t10_alua_tg_pt_gp *tg_pt_gp) 1592 { 1593 spin_lock(&tg_pt_gp->tg_pt_gp_lock); 1594 tg_pt_gp_mem->tg_pt_gp = tg_pt_gp; 1595 tg_pt_gp_mem->tg_pt_gp_assoc = 1; 1596 list_add_tail(&tg_pt_gp_mem->tg_pt_gp_mem_list, 1597 &tg_pt_gp->tg_pt_gp_mem_list); 1598 tg_pt_gp->tg_pt_gp_members++; 1599 spin_unlock(&tg_pt_gp->tg_pt_gp_lock); 1600 } 1601 1602 /* 1603 * Called with struct t10_alua_tg_pt_gp_member->tg_pt_gp_mem_lock held 1604 */ 1605 static void __core_alua_drop_tg_pt_gp_mem( 1606 struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem, 1607 struct t10_alua_tg_pt_gp *tg_pt_gp) 1608 { 1609 spin_lock(&tg_pt_gp->tg_pt_gp_lock); 1610 list_del(&tg_pt_gp_mem->tg_pt_gp_mem_list); 1611 tg_pt_gp_mem->tg_pt_gp = NULL; 1612 tg_pt_gp_mem->tg_pt_gp_assoc = 0; 1613 tg_pt_gp->tg_pt_gp_members--; 1614 spin_unlock(&tg_pt_gp->tg_pt_gp_lock); 1615 } 1616 1617 ssize_t core_alua_show_tg_pt_gp_info(struct se_port *port, char *page) 1618 { 1619 struct config_item *tg_pt_ci; 1620 struct t10_alua_tg_pt_gp *tg_pt_gp; 1621 struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem; 1622 ssize_t len = 0; 1623 1624 tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem; 1625 if (!tg_pt_gp_mem) 1626 return len; 1627 1628 spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); 1629 tg_pt_gp = tg_pt_gp_mem->tg_pt_gp; 1630 if (tg_pt_gp) { 1631 tg_pt_ci = &tg_pt_gp->tg_pt_gp_group.cg_item; 1632 len += sprintf(page, "TG Port Alias: %s\nTG Port Group ID:" 1633 " %hu\nTG Port Primary Access State: %s\nTG Port " 1634 "Primary Access Status: %s\nTG Port Secondary Access" 1635 " State: %s\nTG Port Secondary Access Status: %s\n", 1636 config_item_name(tg_pt_ci), tg_pt_gp->tg_pt_gp_id, 1637 core_alua_dump_state(atomic_read( 1638 &tg_pt_gp->tg_pt_gp_alua_access_state)), 1639 core_alua_dump_status( 1640 tg_pt_gp->tg_pt_gp_alua_access_status), 1641 (atomic_read(&port->sep_tg_pt_secondary_offline)) ? 1642 "Offline" : "None", 1643 core_alua_dump_status(port->sep_tg_pt_secondary_stat)); 1644 } 1645 spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); 1646 1647 return len; 1648 } 1649 1650 ssize_t core_alua_store_tg_pt_gp_info( 1651 struct se_port *port, 1652 const char *page, 1653 size_t count) 1654 { 1655 struct se_portal_group *tpg; 1656 struct se_lun *lun; 1657 struct se_device *dev = port->sep_lun->lun_se_dev; 1658 struct t10_alua_tg_pt_gp *tg_pt_gp = NULL, *tg_pt_gp_new = NULL; 1659 struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem; 1660 unsigned char buf[TG_PT_GROUP_NAME_BUF]; 1661 int move = 0; 1662 1663 tpg = port->sep_tpg; 1664 lun = port->sep_lun; 1665 1666 tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem; 1667 if (!tg_pt_gp_mem) 1668 return 0; 1669 1670 if (count > TG_PT_GROUP_NAME_BUF) { 1671 pr_err("ALUA Target Port Group alias too large!\n"); 1672 return -EINVAL; 1673 } 1674 memset(buf, 0, TG_PT_GROUP_NAME_BUF); 1675 memcpy(buf, page, count); 1676 /* 1677 * Any ALUA target port group alias besides "NULL" means we will be 1678 * making a new group association. 1679 */ 1680 if (strcmp(strstrip(buf), "NULL")) { 1681 /* 1682 * core_alua_get_tg_pt_gp_by_name() will increment reference to 1683 * struct t10_alua_tg_pt_gp. This reference is released with 1684 * core_alua_put_tg_pt_gp_from_name() below. 1685 */ 1686 tg_pt_gp_new = core_alua_get_tg_pt_gp_by_name(dev, 1687 strstrip(buf)); 1688 if (!tg_pt_gp_new) 1689 return -ENODEV; 1690 } 1691 1692 spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); 1693 tg_pt_gp = tg_pt_gp_mem->tg_pt_gp; 1694 if (tg_pt_gp) { 1695 /* 1696 * Clearing an existing tg_pt_gp association, and replacing 1697 * with the default_tg_pt_gp. 1698 */ 1699 if (!tg_pt_gp_new) { 1700 pr_debug("Target_Core_ConfigFS: Moving" 1701 " %s/tpgt_%hu/%s from ALUA Target Port Group:" 1702 " alua/%s, ID: %hu back to" 1703 " default_tg_pt_gp\n", 1704 tpg->se_tpg_tfo->tpg_get_wwn(tpg), 1705 tpg->se_tpg_tfo->tpg_get_tag(tpg), 1706 config_item_name(&lun->lun_group.cg_item), 1707 config_item_name( 1708 &tg_pt_gp->tg_pt_gp_group.cg_item), 1709 tg_pt_gp->tg_pt_gp_id); 1710 1711 __core_alua_drop_tg_pt_gp_mem(tg_pt_gp_mem, tg_pt_gp); 1712 __core_alua_attach_tg_pt_gp_mem(tg_pt_gp_mem, 1713 dev->t10_alua.default_tg_pt_gp); 1714 spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); 1715 1716 return count; 1717 } 1718 /* 1719 * Removing existing association of tg_pt_gp_mem with tg_pt_gp 1720 */ 1721 __core_alua_drop_tg_pt_gp_mem(tg_pt_gp_mem, tg_pt_gp); 1722 move = 1; 1723 } 1724 /* 1725 * Associate tg_pt_gp_mem with tg_pt_gp_new. 1726 */ 1727 __core_alua_attach_tg_pt_gp_mem(tg_pt_gp_mem, tg_pt_gp_new); 1728 spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); 1729 pr_debug("Target_Core_ConfigFS: %s %s/tpgt_%hu/%s to ALUA" 1730 " Target Port Group: alua/%s, ID: %hu\n", (move) ? 1731 "Moving" : "Adding", tpg->se_tpg_tfo->tpg_get_wwn(tpg), 1732 tpg->se_tpg_tfo->tpg_get_tag(tpg), 1733 config_item_name(&lun->lun_group.cg_item), 1734 config_item_name(&tg_pt_gp_new->tg_pt_gp_group.cg_item), 1735 tg_pt_gp_new->tg_pt_gp_id); 1736 1737 core_alua_put_tg_pt_gp_from_name(tg_pt_gp_new); 1738 return count; 1739 } 1740 1741 ssize_t core_alua_show_access_type( 1742 struct t10_alua_tg_pt_gp *tg_pt_gp, 1743 char *page) 1744 { 1745 if ((tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_EXPLICIT_ALUA) && 1746 (tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_IMPLICIT_ALUA)) 1747 return sprintf(page, "Implicit and Explicit\n"); 1748 else if (tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_IMPLICIT_ALUA) 1749 return sprintf(page, "Implicit\n"); 1750 else if (tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_EXPLICIT_ALUA) 1751 return sprintf(page, "Explicit\n"); 1752 else 1753 return sprintf(page, "None\n"); 1754 } 1755 1756 ssize_t core_alua_store_access_type( 1757 struct t10_alua_tg_pt_gp *tg_pt_gp, 1758 const char *page, 1759 size_t count) 1760 { 1761 unsigned long tmp; 1762 int ret; 1763 1764 ret = kstrtoul(page, 0, &tmp); 1765 if (ret < 0) { 1766 pr_err("Unable to extract alua_access_type\n"); 1767 return ret; 1768 } 1769 if ((tmp != 0) && (tmp != 1) && (tmp != 2) && (tmp != 3)) { 1770 pr_err("Illegal value for alua_access_type:" 1771 " %lu\n", tmp); 1772 return -EINVAL; 1773 } 1774 if (tmp == 3) 1775 tg_pt_gp->tg_pt_gp_alua_access_type = 1776 TPGS_IMPLICIT_ALUA | TPGS_EXPLICIT_ALUA; 1777 else if (tmp == 2) 1778 tg_pt_gp->tg_pt_gp_alua_access_type = TPGS_EXPLICIT_ALUA; 1779 else if (tmp == 1) 1780 tg_pt_gp->tg_pt_gp_alua_access_type = TPGS_IMPLICIT_ALUA; 1781 else 1782 tg_pt_gp->tg_pt_gp_alua_access_type = 0; 1783 1784 return count; 1785 } 1786 1787 ssize_t core_alua_show_nonop_delay_msecs( 1788 struct t10_alua_tg_pt_gp *tg_pt_gp, 1789 char *page) 1790 { 1791 return sprintf(page, "%d\n", tg_pt_gp->tg_pt_gp_nonop_delay_msecs); 1792 } 1793 1794 ssize_t core_alua_store_nonop_delay_msecs( 1795 struct t10_alua_tg_pt_gp *tg_pt_gp, 1796 const char *page, 1797 size_t count) 1798 { 1799 unsigned long tmp; 1800 int ret; 1801 1802 ret = kstrtoul(page, 0, &tmp); 1803 if (ret < 0) { 1804 pr_err("Unable to extract nonop_delay_msecs\n"); 1805 return ret; 1806 } 1807 if (tmp > ALUA_MAX_NONOP_DELAY_MSECS) { 1808 pr_err("Passed nonop_delay_msecs: %lu, exceeds" 1809 " ALUA_MAX_NONOP_DELAY_MSECS: %d\n", tmp, 1810 ALUA_MAX_NONOP_DELAY_MSECS); 1811 return -EINVAL; 1812 } 1813 tg_pt_gp->tg_pt_gp_nonop_delay_msecs = (int)tmp; 1814 1815 return count; 1816 } 1817 1818 ssize_t core_alua_show_trans_delay_msecs( 1819 struct t10_alua_tg_pt_gp *tg_pt_gp, 1820 char *page) 1821 { 1822 return sprintf(page, "%d\n", tg_pt_gp->tg_pt_gp_trans_delay_msecs); 1823 } 1824 1825 ssize_t core_alua_store_trans_delay_msecs( 1826 struct t10_alua_tg_pt_gp *tg_pt_gp, 1827 const char *page, 1828 size_t count) 1829 { 1830 unsigned long tmp; 1831 int ret; 1832 1833 ret = kstrtoul(page, 0, &tmp); 1834 if (ret < 0) { 1835 pr_err("Unable to extract trans_delay_msecs\n"); 1836 return ret; 1837 } 1838 if (tmp > ALUA_MAX_TRANS_DELAY_MSECS) { 1839 pr_err("Passed trans_delay_msecs: %lu, exceeds" 1840 " ALUA_MAX_TRANS_DELAY_MSECS: %d\n", tmp, 1841 ALUA_MAX_TRANS_DELAY_MSECS); 1842 return -EINVAL; 1843 } 1844 tg_pt_gp->tg_pt_gp_trans_delay_msecs = (int)tmp; 1845 1846 return count; 1847 } 1848 1849 ssize_t core_alua_show_implicit_trans_secs( 1850 struct t10_alua_tg_pt_gp *tg_pt_gp, 1851 char *page) 1852 { 1853 return sprintf(page, "%d\n", tg_pt_gp->tg_pt_gp_implicit_trans_secs); 1854 } 1855 1856 ssize_t core_alua_store_implicit_trans_secs( 1857 struct t10_alua_tg_pt_gp *tg_pt_gp, 1858 const char *page, 1859 size_t count) 1860 { 1861 unsigned long tmp; 1862 int ret; 1863 1864 ret = kstrtoul(page, 0, &tmp); 1865 if (ret < 0) { 1866 pr_err("Unable to extract implicit_trans_secs\n"); 1867 return ret; 1868 } 1869 if (tmp > ALUA_MAX_IMPLICIT_TRANS_SECS) { 1870 pr_err("Passed implicit_trans_secs: %lu, exceeds" 1871 " ALUA_MAX_IMPLICIT_TRANS_SECS: %d\n", tmp, 1872 ALUA_MAX_IMPLICIT_TRANS_SECS); 1873 return -EINVAL; 1874 } 1875 tg_pt_gp->tg_pt_gp_implicit_trans_secs = (int)tmp; 1876 1877 return count; 1878 } 1879 1880 ssize_t core_alua_show_preferred_bit( 1881 struct t10_alua_tg_pt_gp *tg_pt_gp, 1882 char *page) 1883 { 1884 return sprintf(page, "%d\n", tg_pt_gp->tg_pt_gp_pref); 1885 } 1886 1887 ssize_t core_alua_store_preferred_bit( 1888 struct t10_alua_tg_pt_gp *tg_pt_gp, 1889 const char *page, 1890 size_t count) 1891 { 1892 unsigned long tmp; 1893 int ret; 1894 1895 ret = kstrtoul(page, 0, &tmp); 1896 if (ret < 0) { 1897 pr_err("Unable to extract preferred ALUA value\n"); 1898 return ret; 1899 } 1900 if ((tmp != 0) && (tmp != 1)) { 1901 pr_err("Illegal value for preferred ALUA: %lu\n", tmp); 1902 return -EINVAL; 1903 } 1904 tg_pt_gp->tg_pt_gp_pref = (int)tmp; 1905 1906 return count; 1907 } 1908 1909 ssize_t core_alua_show_offline_bit(struct se_lun *lun, char *page) 1910 { 1911 if (!lun->lun_sep) 1912 return -ENODEV; 1913 1914 return sprintf(page, "%d\n", 1915 atomic_read(&lun->lun_sep->sep_tg_pt_secondary_offline)); 1916 } 1917 1918 ssize_t core_alua_store_offline_bit( 1919 struct se_lun *lun, 1920 const char *page, 1921 size_t count) 1922 { 1923 struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem; 1924 unsigned long tmp; 1925 int ret; 1926 1927 if (!lun->lun_sep) 1928 return -ENODEV; 1929 1930 ret = kstrtoul(page, 0, &tmp); 1931 if (ret < 0) { 1932 pr_err("Unable to extract alua_tg_pt_offline value\n"); 1933 return ret; 1934 } 1935 if ((tmp != 0) && (tmp != 1)) { 1936 pr_err("Illegal value for alua_tg_pt_offline: %lu\n", 1937 tmp); 1938 return -EINVAL; 1939 } 1940 tg_pt_gp_mem = lun->lun_sep->sep_alua_tg_pt_gp_mem; 1941 if (!tg_pt_gp_mem) { 1942 pr_err("Unable to locate *tg_pt_gp_mem\n"); 1943 return -EINVAL; 1944 } 1945 1946 ret = core_alua_set_tg_pt_secondary_state(tg_pt_gp_mem, 1947 lun->lun_sep, 0, (int)tmp); 1948 if (ret < 0) 1949 return -EINVAL; 1950 1951 return count; 1952 } 1953 1954 ssize_t core_alua_show_secondary_status( 1955 struct se_lun *lun, 1956 char *page) 1957 { 1958 return sprintf(page, "%d\n", lun->lun_sep->sep_tg_pt_secondary_stat); 1959 } 1960 1961 ssize_t core_alua_store_secondary_status( 1962 struct se_lun *lun, 1963 const char *page, 1964 size_t count) 1965 { 1966 unsigned long tmp; 1967 int ret; 1968 1969 ret = kstrtoul(page, 0, &tmp); 1970 if (ret < 0) { 1971 pr_err("Unable to extract alua_tg_pt_status\n"); 1972 return ret; 1973 } 1974 if ((tmp != ALUA_STATUS_NONE) && 1975 (tmp != ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG) && 1976 (tmp != ALUA_STATUS_ALTERED_BY_IMPLICIT_ALUA)) { 1977 pr_err("Illegal value for alua_tg_pt_status: %lu\n", 1978 tmp); 1979 return -EINVAL; 1980 } 1981 lun->lun_sep->sep_tg_pt_secondary_stat = (int)tmp; 1982 1983 return count; 1984 } 1985 1986 ssize_t core_alua_show_secondary_write_metadata( 1987 struct se_lun *lun, 1988 char *page) 1989 { 1990 return sprintf(page, "%d\n", 1991 lun->lun_sep->sep_tg_pt_secondary_write_md); 1992 } 1993 1994 ssize_t core_alua_store_secondary_write_metadata( 1995 struct se_lun *lun, 1996 const char *page, 1997 size_t count) 1998 { 1999 unsigned long tmp; 2000 int ret; 2001 2002 ret = kstrtoul(page, 0, &tmp); 2003 if (ret < 0) { 2004 pr_err("Unable to extract alua_tg_pt_write_md\n"); 2005 return ret; 2006 } 2007 if ((tmp != 0) && (tmp != 1)) { 2008 pr_err("Illegal value for alua_tg_pt_write_md:" 2009 " %lu\n", tmp); 2010 return -EINVAL; 2011 } 2012 lun->lun_sep->sep_tg_pt_secondary_write_md = (int)tmp; 2013 2014 return count; 2015 } 2016 2017 int core_setup_alua(struct se_device *dev) 2018 { 2019 if (dev->transport->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV && 2020 !(dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE)) { 2021 struct t10_alua_lu_gp_member *lu_gp_mem; 2022 2023 /* 2024 * Associate this struct se_device with the default ALUA 2025 * LUN Group. 2026 */ 2027 lu_gp_mem = core_alua_allocate_lu_gp_mem(dev); 2028 if (IS_ERR(lu_gp_mem)) 2029 return PTR_ERR(lu_gp_mem); 2030 2031 spin_lock(&lu_gp_mem->lu_gp_mem_lock); 2032 __core_alua_attach_lu_gp_mem(lu_gp_mem, 2033 default_lu_gp); 2034 spin_unlock(&lu_gp_mem->lu_gp_mem_lock); 2035 2036 pr_debug("%s: Adding to default ALUA LU Group:" 2037 " core/alua/lu_gps/default_lu_gp\n", 2038 dev->transport->name); 2039 } 2040 2041 return 0; 2042 } 2043