1 /******************************************************************************* 2 * Filename: target_core_alua.c 3 * 4 * This file contains SPC-3 compliant asymmetric logical unit assigntment (ALUA) 5 * 6 * (c) Copyright 2009-2013 Datera, Inc. 7 * 8 * Nicholas A. Bellinger <nab@kernel.org> 9 * 10 * This program is free software; you can redistribute it and/or modify 11 * it under the terms of the GNU General Public License as published by 12 * the Free Software Foundation; either version 2 of the License, or 13 * (at your option) any later version. 14 * 15 * This program is distributed in the hope that it will be useful, 16 * but WITHOUT ANY WARRANTY; without even the implied warranty of 17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 18 * GNU General Public License for more details. 19 * 20 * You should have received a copy of the GNU General Public License 21 * along with this program; if not, write to the Free Software 22 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. 23 * 24 ******************************************************************************/ 25 26 #include <linux/slab.h> 27 #include <linux/spinlock.h> 28 #include <linux/configfs.h> 29 #include <linux/delay.h> 30 #include <linux/export.h> 31 #include <linux/fcntl.h> 32 #include <linux/file.h> 33 #include <linux/fs.h> 34 #include <scsi/scsi_proto.h> 35 #include <asm/unaligned.h> 36 37 #include <target/target_core_base.h> 38 #include <target/target_core_backend.h> 39 #include <target/target_core_fabric.h> 40 41 #include "target_core_internal.h" 42 #include "target_core_alua.h" 43 #include "target_core_ua.h" 44 45 static sense_reason_t core_alua_check_transition(int state, int valid, 46 int *primary, int explicit); 47 static int core_alua_set_tg_pt_secondary_state( 48 struct se_lun *lun, int explicit, int offline); 49 50 static char *core_alua_dump_state(int state); 51 52 static void __target_attach_tg_pt_gp(struct se_lun *lun, 53 struct t10_alua_tg_pt_gp *tg_pt_gp); 54 55 static u16 alua_lu_gps_counter; 56 static u32 alua_lu_gps_count; 57 58 static DEFINE_SPINLOCK(lu_gps_lock); 59 static LIST_HEAD(lu_gps_list); 60 61 struct t10_alua_lu_gp *default_lu_gp; 62 63 /* 64 * REPORT REFERRALS 65 * 66 * See sbc3r35 section 5.23 67 */ 68 sense_reason_t 69 target_emulate_report_referrals(struct se_cmd *cmd) 70 { 71 struct se_device *dev = cmd->se_dev; 72 struct t10_alua_lba_map *map; 73 struct t10_alua_lba_map_member *map_mem; 74 unsigned char *buf; 75 u32 rd_len = 0, off; 76 77 if (cmd->data_length < 4) { 78 pr_warn("REPORT REFERRALS allocation length %u too" 79 " small\n", cmd->data_length); 80 return TCM_INVALID_CDB_FIELD; 81 } 82 83 buf = transport_kmap_data_sg(cmd); 84 if (!buf) 85 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 86 87 off = 4; 88 spin_lock(&dev->t10_alua.lba_map_lock); 89 if (list_empty(&dev->t10_alua.lba_map_list)) { 90 spin_unlock(&dev->t10_alua.lba_map_lock); 91 transport_kunmap_data_sg(cmd); 92 93 return TCM_UNSUPPORTED_SCSI_OPCODE; 94 } 95 96 list_for_each_entry(map, &dev->t10_alua.lba_map_list, 97 lba_map_list) { 98 int desc_num = off + 3; 99 int pg_num; 100 101 off += 4; 102 if (cmd->data_length > off) 103 put_unaligned_be64(map->lba_map_first_lba, &buf[off]); 104 off += 8; 105 if (cmd->data_length > off) 106 put_unaligned_be64(map->lba_map_last_lba, &buf[off]); 107 off += 8; 108 rd_len += 20; 109 pg_num = 0; 110 list_for_each_entry(map_mem, &map->lba_map_mem_list, 111 lba_map_mem_list) { 112 int alua_state = map_mem->lba_map_mem_alua_state; 113 int alua_pg_id = map_mem->lba_map_mem_alua_pg_id; 114 115 if (cmd->data_length > off) 116 buf[off] = alua_state & 0x0f; 117 off += 2; 118 if (cmd->data_length > off) 119 buf[off] = (alua_pg_id >> 8) & 0xff; 120 off++; 121 if (cmd->data_length > off) 122 buf[off] = (alua_pg_id & 0xff); 123 off++; 124 rd_len += 4; 125 pg_num++; 126 } 127 if (cmd->data_length > desc_num) 128 buf[desc_num] = pg_num; 129 } 130 spin_unlock(&dev->t10_alua.lba_map_lock); 131 132 /* 133 * Set the RETURN DATA LENGTH set in the header of the DataIN Payload 134 */ 135 put_unaligned_be16(rd_len, &buf[2]); 136 137 transport_kunmap_data_sg(cmd); 138 139 target_complete_cmd(cmd, GOOD); 140 return 0; 141 } 142 143 /* 144 * REPORT_TARGET_PORT_GROUPS 145 * 146 * See spc4r17 section 6.27 147 */ 148 sense_reason_t 149 target_emulate_report_target_port_groups(struct se_cmd *cmd) 150 { 151 struct se_device *dev = cmd->se_dev; 152 struct t10_alua_tg_pt_gp *tg_pt_gp; 153 struct se_lun *lun; 154 unsigned char *buf; 155 u32 rd_len = 0, off; 156 int ext_hdr = (cmd->t_task_cdb[1] & 0x20); 157 158 /* 159 * Skip over RESERVED area to first Target port group descriptor 160 * depending on the PARAMETER DATA FORMAT type.. 161 */ 162 if (ext_hdr != 0) 163 off = 8; 164 else 165 off = 4; 166 167 if (cmd->data_length < off) { 168 pr_warn("REPORT TARGET PORT GROUPS allocation length %u too" 169 " small for %s header\n", cmd->data_length, 170 (ext_hdr) ? "extended" : "normal"); 171 return TCM_INVALID_CDB_FIELD; 172 } 173 buf = transport_kmap_data_sg(cmd); 174 if (!buf) 175 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 176 177 spin_lock(&dev->t10_alua.tg_pt_gps_lock); 178 list_for_each_entry(tg_pt_gp, &dev->t10_alua.tg_pt_gps_list, 179 tg_pt_gp_list) { 180 /* 181 * Check if the Target port group and Target port descriptor list 182 * based on tg_pt_gp_members count will fit into the response payload. 183 * Otherwise, bump rd_len to let the initiator know we have exceeded 184 * the allocation length and the response is truncated. 185 */ 186 if ((off + 8 + (tg_pt_gp->tg_pt_gp_members * 4)) > 187 cmd->data_length) { 188 rd_len += 8 + (tg_pt_gp->tg_pt_gp_members * 4); 189 continue; 190 } 191 /* 192 * PREF: Preferred target port bit, determine if this 193 * bit should be set for port group. 194 */ 195 if (tg_pt_gp->tg_pt_gp_pref) 196 buf[off] = 0x80; 197 /* 198 * Set the ASYMMETRIC ACCESS State 199 */ 200 buf[off++] |= tg_pt_gp->tg_pt_gp_alua_access_state & 0xff; 201 /* 202 * Set supported ASYMMETRIC ACCESS State bits 203 */ 204 buf[off++] |= tg_pt_gp->tg_pt_gp_alua_supported_states; 205 /* 206 * TARGET PORT GROUP 207 */ 208 buf[off++] = ((tg_pt_gp->tg_pt_gp_id >> 8) & 0xff); 209 buf[off++] = (tg_pt_gp->tg_pt_gp_id & 0xff); 210 211 off++; /* Skip over Reserved */ 212 /* 213 * STATUS CODE 214 */ 215 buf[off++] = (tg_pt_gp->tg_pt_gp_alua_access_status & 0xff); 216 /* 217 * Vendor Specific field 218 */ 219 buf[off++] = 0x00; 220 /* 221 * TARGET PORT COUNT 222 */ 223 buf[off++] = (tg_pt_gp->tg_pt_gp_members & 0xff); 224 rd_len += 8; 225 226 spin_lock(&tg_pt_gp->tg_pt_gp_lock); 227 list_for_each_entry(lun, &tg_pt_gp->tg_pt_gp_lun_list, 228 lun_tg_pt_gp_link) { 229 /* 230 * Start Target Port descriptor format 231 * 232 * See spc4r17 section 6.2.7 Table 247 233 */ 234 off += 2; /* Skip over Obsolete */ 235 /* 236 * Set RELATIVE TARGET PORT IDENTIFIER 237 */ 238 buf[off++] = ((lun->lun_rtpi >> 8) & 0xff); 239 buf[off++] = (lun->lun_rtpi & 0xff); 240 rd_len += 4; 241 } 242 spin_unlock(&tg_pt_gp->tg_pt_gp_lock); 243 } 244 spin_unlock(&dev->t10_alua.tg_pt_gps_lock); 245 /* 246 * Set the RETURN DATA LENGTH set in the header of the DataIN Payload 247 */ 248 put_unaligned_be32(rd_len, &buf[0]); 249 250 /* 251 * Fill in the Extended header parameter data format if requested 252 */ 253 if (ext_hdr != 0) { 254 buf[4] = 0x10; 255 /* 256 * Set the implicit transition time (in seconds) for the application 257 * client to use as a base for it's transition timeout value. 258 * 259 * Use the current tg_pt_gp_mem -> tg_pt_gp membership from the LUN 260 * this CDB was received upon to determine this value individually 261 * for ALUA target port group. 262 */ 263 spin_lock(&cmd->se_lun->lun_tg_pt_gp_lock); 264 tg_pt_gp = cmd->se_lun->lun_tg_pt_gp; 265 if (tg_pt_gp) 266 buf[5] = tg_pt_gp->tg_pt_gp_implicit_trans_secs; 267 spin_unlock(&cmd->se_lun->lun_tg_pt_gp_lock); 268 } 269 transport_kunmap_data_sg(cmd); 270 271 target_complete_cmd(cmd, GOOD); 272 return 0; 273 } 274 275 /* 276 * SET_TARGET_PORT_GROUPS for explicit ALUA operation. 277 * 278 * See spc4r17 section 6.35 279 */ 280 sense_reason_t 281 target_emulate_set_target_port_groups(struct se_cmd *cmd) 282 { 283 struct se_device *dev = cmd->se_dev; 284 struct se_lun *l_lun = cmd->se_lun; 285 struct se_node_acl *nacl = cmd->se_sess->se_node_acl; 286 struct t10_alua_tg_pt_gp *tg_pt_gp = NULL, *l_tg_pt_gp; 287 unsigned char *buf; 288 unsigned char *ptr; 289 sense_reason_t rc = TCM_NO_SENSE; 290 u32 len = 4; /* Skip over RESERVED area in header */ 291 int alua_access_state, primary = 0, valid_states; 292 u16 tg_pt_id, rtpi; 293 294 if (cmd->data_length < 4) { 295 pr_warn("SET TARGET PORT GROUPS parameter list length %u too" 296 " small\n", cmd->data_length); 297 return TCM_INVALID_PARAMETER_LIST; 298 } 299 300 buf = transport_kmap_data_sg(cmd); 301 if (!buf) 302 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 303 304 /* 305 * Determine if explicit ALUA via SET_TARGET_PORT_GROUPS is allowed 306 * for the local tg_pt_gp. 307 */ 308 spin_lock(&l_lun->lun_tg_pt_gp_lock); 309 l_tg_pt_gp = l_lun->lun_tg_pt_gp; 310 if (!l_tg_pt_gp) { 311 spin_unlock(&l_lun->lun_tg_pt_gp_lock); 312 pr_err("Unable to access l_lun->tg_pt_gp\n"); 313 rc = TCM_UNSUPPORTED_SCSI_OPCODE; 314 goto out; 315 } 316 317 if (!(l_tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_EXPLICIT_ALUA)) { 318 spin_unlock(&l_lun->lun_tg_pt_gp_lock); 319 pr_debug("Unable to process SET_TARGET_PORT_GROUPS" 320 " while TPGS_EXPLICIT_ALUA is disabled\n"); 321 rc = TCM_UNSUPPORTED_SCSI_OPCODE; 322 goto out; 323 } 324 valid_states = l_tg_pt_gp->tg_pt_gp_alua_supported_states; 325 spin_unlock(&l_lun->lun_tg_pt_gp_lock); 326 327 ptr = &buf[4]; /* Skip over RESERVED area in header */ 328 329 while (len < cmd->data_length) { 330 bool found = false; 331 alua_access_state = (ptr[0] & 0x0f); 332 /* 333 * Check the received ALUA access state, and determine if 334 * the state is a primary or secondary target port asymmetric 335 * access state. 336 */ 337 rc = core_alua_check_transition(alua_access_state, valid_states, 338 &primary, 1); 339 if (rc) { 340 /* 341 * If the SET TARGET PORT GROUPS attempts to establish 342 * an invalid combination of target port asymmetric 343 * access states or attempts to establish an 344 * unsupported target port asymmetric access state, 345 * then the command shall be terminated with CHECK 346 * CONDITION status, with the sense key set to ILLEGAL 347 * REQUEST, and the additional sense code set to INVALID 348 * FIELD IN PARAMETER LIST. 349 */ 350 goto out; 351 } 352 353 /* 354 * If the ASYMMETRIC ACCESS STATE field (see table 267) 355 * specifies a primary target port asymmetric access state, 356 * then the TARGET PORT GROUP OR TARGET PORT field specifies 357 * a primary target port group for which the primary target 358 * port asymmetric access state shall be changed. If the 359 * ASYMMETRIC ACCESS STATE field specifies a secondary target 360 * port asymmetric access state, then the TARGET PORT GROUP OR 361 * TARGET PORT field specifies the relative target port 362 * identifier (see 3.1.120) of the target port for which the 363 * secondary target port asymmetric access state shall be 364 * changed. 365 */ 366 if (primary) { 367 tg_pt_id = get_unaligned_be16(ptr + 2); 368 /* 369 * Locate the matching target port group ID from 370 * the global tg_pt_gp list 371 */ 372 spin_lock(&dev->t10_alua.tg_pt_gps_lock); 373 list_for_each_entry(tg_pt_gp, 374 &dev->t10_alua.tg_pt_gps_list, 375 tg_pt_gp_list) { 376 if (!tg_pt_gp->tg_pt_gp_valid_id) 377 continue; 378 379 if (tg_pt_id != tg_pt_gp->tg_pt_gp_id) 380 continue; 381 382 atomic_inc_mb(&tg_pt_gp->tg_pt_gp_ref_cnt); 383 384 spin_unlock(&dev->t10_alua.tg_pt_gps_lock); 385 386 if (!core_alua_do_port_transition(tg_pt_gp, 387 dev, l_lun, nacl, 388 alua_access_state, 1)) 389 found = true; 390 391 spin_lock(&dev->t10_alua.tg_pt_gps_lock); 392 atomic_dec_mb(&tg_pt_gp->tg_pt_gp_ref_cnt); 393 break; 394 } 395 spin_unlock(&dev->t10_alua.tg_pt_gps_lock); 396 } else { 397 struct se_lun *lun; 398 399 /* 400 * Extract the RELATIVE TARGET PORT IDENTIFIER to identify 401 * the Target Port in question for the the incoming 402 * SET_TARGET_PORT_GROUPS op. 403 */ 404 rtpi = get_unaligned_be16(ptr + 2); 405 /* 406 * Locate the matching relative target port identifier 407 * for the struct se_device storage object. 408 */ 409 spin_lock(&dev->se_port_lock); 410 list_for_each_entry(lun, &dev->dev_sep_list, 411 lun_dev_link) { 412 if (lun->lun_rtpi != rtpi) 413 continue; 414 415 // XXX: racy unlock 416 spin_unlock(&dev->se_port_lock); 417 418 if (!core_alua_set_tg_pt_secondary_state( 419 lun, 1, 1)) 420 found = true; 421 422 spin_lock(&dev->se_port_lock); 423 break; 424 } 425 spin_unlock(&dev->se_port_lock); 426 } 427 428 if (!found) { 429 rc = TCM_INVALID_PARAMETER_LIST; 430 goto out; 431 } 432 433 ptr += 4; 434 len += 4; 435 } 436 437 out: 438 transport_kunmap_data_sg(cmd); 439 if (!rc) 440 target_complete_cmd(cmd, GOOD); 441 return rc; 442 } 443 444 static inline void set_ascq(struct se_cmd *cmd, u8 alua_ascq) 445 { 446 /* 447 * Set SCSI additional sense code (ASC) to 'LUN Not Accessible'; 448 * The ALUA additional sense code qualifier (ASCQ) is determined 449 * by the ALUA primary or secondary access state.. 450 */ 451 pr_debug("[%s]: ALUA TG Port not available, " 452 "SenseKey: NOT_READY, ASC/ASCQ: " 453 "0x04/0x%02x\n", 454 cmd->se_tfo->get_fabric_name(), alua_ascq); 455 456 cmd->scsi_asc = 0x04; 457 cmd->scsi_ascq = alua_ascq; 458 } 459 460 static inline void core_alua_state_nonoptimized( 461 struct se_cmd *cmd, 462 unsigned char *cdb, 463 int nonop_delay_msecs) 464 { 465 /* 466 * Set SCF_ALUA_NON_OPTIMIZED here, this value will be checked 467 * later to determine if processing of this cmd needs to be 468 * temporarily delayed for the Active/NonOptimized primary access state. 469 */ 470 cmd->se_cmd_flags |= SCF_ALUA_NON_OPTIMIZED; 471 cmd->alua_nonop_delay = nonop_delay_msecs; 472 } 473 474 static inline int core_alua_state_lba_dependent( 475 struct se_cmd *cmd, 476 struct t10_alua_tg_pt_gp *tg_pt_gp) 477 { 478 struct se_device *dev = cmd->se_dev; 479 u64 segment_size, segment_mult, sectors, lba; 480 481 /* Only need to check for cdb actually containing LBAs */ 482 if (!(cmd->se_cmd_flags & SCF_SCSI_DATA_CDB)) 483 return 0; 484 485 spin_lock(&dev->t10_alua.lba_map_lock); 486 segment_size = dev->t10_alua.lba_map_segment_size; 487 segment_mult = dev->t10_alua.lba_map_segment_multiplier; 488 sectors = cmd->data_length / dev->dev_attrib.block_size; 489 490 lba = cmd->t_task_lba; 491 while (lba < cmd->t_task_lba + sectors) { 492 struct t10_alua_lba_map *cur_map = NULL, *map; 493 struct t10_alua_lba_map_member *map_mem; 494 495 list_for_each_entry(map, &dev->t10_alua.lba_map_list, 496 lba_map_list) { 497 u64 start_lba, last_lba; 498 u64 first_lba = map->lba_map_first_lba; 499 500 if (segment_mult) { 501 u64 tmp = lba; 502 start_lba = do_div(tmp, segment_size * segment_mult); 503 504 last_lba = first_lba + segment_size - 1; 505 if (start_lba >= first_lba && 506 start_lba <= last_lba) { 507 lba += segment_size; 508 cur_map = map; 509 break; 510 } 511 } else { 512 last_lba = map->lba_map_last_lba; 513 if (lba >= first_lba && lba <= last_lba) { 514 lba = last_lba + 1; 515 cur_map = map; 516 break; 517 } 518 } 519 } 520 if (!cur_map) { 521 spin_unlock(&dev->t10_alua.lba_map_lock); 522 set_ascq(cmd, ASCQ_04H_ALUA_TG_PT_UNAVAILABLE); 523 return 1; 524 } 525 list_for_each_entry(map_mem, &cur_map->lba_map_mem_list, 526 lba_map_mem_list) { 527 if (map_mem->lba_map_mem_alua_pg_id != 528 tg_pt_gp->tg_pt_gp_id) 529 continue; 530 switch(map_mem->lba_map_mem_alua_state) { 531 case ALUA_ACCESS_STATE_STANDBY: 532 spin_unlock(&dev->t10_alua.lba_map_lock); 533 set_ascq(cmd, ASCQ_04H_ALUA_TG_PT_STANDBY); 534 return 1; 535 case ALUA_ACCESS_STATE_UNAVAILABLE: 536 spin_unlock(&dev->t10_alua.lba_map_lock); 537 set_ascq(cmd, ASCQ_04H_ALUA_TG_PT_UNAVAILABLE); 538 return 1; 539 default: 540 break; 541 } 542 } 543 } 544 spin_unlock(&dev->t10_alua.lba_map_lock); 545 return 0; 546 } 547 548 static inline int core_alua_state_standby( 549 struct se_cmd *cmd, 550 unsigned char *cdb) 551 { 552 /* 553 * Allowed CDBs for ALUA_ACCESS_STATE_STANDBY as defined by 554 * spc4r17 section 5.9.2.4.4 555 */ 556 switch (cdb[0]) { 557 case INQUIRY: 558 case LOG_SELECT: 559 case LOG_SENSE: 560 case MODE_SELECT: 561 case MODE_SENSE: 562 case REPORT_LUNS: 563 case RECEIVE_DIAGNOSTIC: 564 case SEND_DIAGNOSTIC: 565 case READ_CAPACITY: 566 return 0; 567 case SERVICE_ACTION_IN_16: 568 switch (cdb[1] & 0x1f) { 569 case SAI_READ_CAPACITY_16: 570 return 0; 571 default: 572 set_ascq(cmd, ASCQ_04H_ALUA_TG_PT_STANDBY); 573 return 1; 574 } 575 case MAINTENANCE_IN: 576 switch (cdb[1] & 0x1f) { 577 case MI_REPORT_TARGET_PGS: 578 return 0; 579 default: 580 set_ascq(cmd, ASCQ_04H_ALUA_TG_PT_STANDBY); 581 return 1; 582 } 583 case MAINTENANCE_OUT: 584 switch (cdb[1]) { 585 case MO_SET_TARGET_PGS: 586 return 0; 587 default: 588 set_ascq(cmd, ASCQ_04H_ALUA_TG_PT_STANDBY); 589 return 1; 590 } 591 case REQUEST_SENSE: 592 case PERSISTENT_RESERVE_IN: 593 case PERSISTENT_RESERVE_OUT: 594 case READ_BUFFER: 595 case WRITE_BUFFER: 596 return 0; 597 default: 598 set_ascq(cmd, ASCQ_04H_ALUA_TG_PT_STANDBY); 599 return 1; 600 } 601 602 return 0; 603 } 604 605 static inline int core_alua_state_unavailable( 606 struct se_cmd *cmd, 607 unsigned char *cdb) 608 { 609 /* 610 * Allowed CDBs for ALUA_ACCESS_STATE_UNAVAILABLE as defined by 611 * spc4r17 section 5.9.2.4.5 612 */ 613 switch (cdb[0]) { 614 case INQUIRY: 615 case REPORT_LUNS: 616 return 0; 617 case MAINTENANCE_IN: 618 switch (cdb[1] & 0x1f) { 619 case MI_REPORT_TARGET_PGS: 620 return 0; 621 default: 622 set_ascq(cmd, ASCQ_04H_ALUA_TG_PT_UNAVAILABLE); 623 return 1; 624 } 625 case MAINTENANCE_OUT: 626 switch (cdb[1]) { 627 case MO_SET_TARGET_PGS: 628 return 0; 629 default: 630 set_ascq(cmd, ASCQ_04H_ALUA_TG_PT_UNAVAILABLE); 631 return 1; 632 } 633 case REQUEST_SENSE: 634 case READ_BUFFER: 635 case WRITE_BUFFER: 636 return 0; 637 default: 638 set_ascq(cmd, ASCQ_04H_ALUA_TG_PT_UNAVAILABLE); 639 return 1; 640 } 641 642 return 0; 643 } 644 645 static inline int core_alua_state_transition( 646 struct se_cmd *cmd, 647 unsigned char *cdb) 648 { 649 /* 650 * Allowed CDBs for ALUA_ACCESS_STATE_TRANSITION as defined by 651 * spc4r17 section 5.9.2.5 652 */ 653 switch (cdb[0]) { 654 case INQUIRY: 655 case REPORT_LUNS: 656 return 0; 657 case MAINTENANCE_IN: 658 switch (cdb[1] & 0x1f) { 659 case MI_REPORT_TARGET_PGS: 660 return 0; 661 default: 662 set_ascq(cmd, ASCQ_04H_ALUA_STATE_TRANSITION); 663 return 1; 664 } 665 case REQUEST_SENSE: 666 case READ_BUFFER: 667 case WRITE_BUFFER: 668 return 0; 669 default: 670 set_ascq(cmd, ASCQ_04H_ALUA_STATE_TRANSITION); 671 return 1; 672 } 673 674 return 0; 675 } 676 677 /* 678 * return 1: Is used to signal LUN not accessible, and check condition/not ready 679 * return 0: Used to signal success 680 * return -1: Used to signal failure, and invalid cdb field 681 */ 682 sense_reason_t 683 target_alua_state_check(struct se_cmd *cmd) 684 { 685 struct se_device *dev = cmd->se_dev; 686 unsigned char *cdb = cmd->t_task_cdb; 687 struct se_lun *lun = cmd->se_lun; 688 struct t10_alua_tg_pt_gp *tg_pt_gp; 689 int out_alua_state, nonop_delay_msecs; 690 691 if (dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE) 692 return 0; 693 if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_ALUA) 694 return 0; 695 696 /* 697 * First, check for a struct se_port specific secondary ALUA target port 698 * access state: OFFLINE 699 */ 700 if (atomic_read(&lun->lun_tg_pt_secondary_offline)) { 701 pr_debug("ALUA: Got secondary offline status for local" 702 " target port\n"); 703 set_ascq(cmd, ASCQ_04H_ALUA_OFFLINE); 704 return TCM_CHECK_CONDITION_NOT_READY; 705 } 706 707 if (!lun->lun_tg_pt_gp) 708 return 0; 709 710 spin_lock(&lun->lun_tg_pt_gp_lock); 711 tg_pt_gp = lun->lun_tg_pt_gp; 712 out_alua_state = tg_pt_gp->tg_pt_gp_alua_access_state; 713 nonop_delay_msecs = tg_pt_gp->tg_pt_gp_nonop_delay_msecs; 714 715 // XXX: keeps using tg_pt_gp witout reference after unlock 716 spin_unlock(&lun->lun_tg_pt_gp_lock); 717 /* 718 * Process ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED in a separate conditional 719 * statement so the compiler knows explicitly to check this case first. 720 * For the Optimized ALUA access state case, we want to process the 721 * incoming fabric cmd ASAP.. 722 */ 723 if (out_alua_state == ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED) 724 return 0; 725 726 switch (out_alua_state) { 727 case ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED: 728 core_alua_state_nonoptimized(cmd, cdb, nonop_delay_msecs); 729 break; 730 case ALUA_ACCESS_STATE_STANDBY: 731 if (core_alua_state_standby(cmd, cdb)) 732 return TCM_CHECK_CONDITION_NOT_READY; 733 break; 734 case ALUA_ACCESS_STATE_UNAVAILABLE: 735 if (core_alua_state_unavailable(cmd, cdb)) 736 return TCM_CHECK_CONDITION_NOT_READY; 737 break; 738 case ALUA_ACCESS_STATE_TRANSITION: 739 if (core_alua_state_transition(cmd, cdb)) 740 return TCM_CHECK_CONDITION_NOT_READY; 741 break; 742 case ALUA_ACCESS_STATE_LBA_DEPENDENT: 743 if (core_alua_state_lba_dependent(cmd, tg_pt_gp)) 744 return TCM_CHECK_CONDITION_NOT_READY; 745 break; 746 /* 747 * OFFLINE is a secondary ALUA target port group access state, that is 748 * handled above with struct se_lun->lun_tg_pt_secondary_offline=1 749 */ 750 case ALUA_ACCESS_STATE_OFFLINE: 751 default: 752 pr_err("Unknown ALUA access state: 0x%02x\n", 753 out_alua_state); 754 return TCM_INVALID_CDB_FIELD; 755 } 756 757 return 0; 758 } 759 760 /* 761 * Check implicit and explicit ALUA state change request. 762 */ 763 static sense_reason_t 764 core_alua_check_transition(int state, int valid, int *primary, int explicit) 765 { 766 /* 767 * OPTIMIZED, NON-OPTIMIZED, STANDBY and UNAVAILABLE are 768 * defined as primary target port asymmetric access states. 769 */ 770 switch (state) { 771 case ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED: 772 if (!(valid & ALUA_AO_SUP)) 773 goto not_supported; 774 *primary = 1; 775 break; 776 case ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED: 777 if (!(valid & ALUA_AN_SUP)) 778 goto not_supported; 779 *primary = 1; 780 break; 781 case ALUA_ACCESS_STATE_STANDBY: 782 if (!(valid & ALUA_S_SUP)) 783 goto not_supported; 784 *primary = 1; 785 break; 786 case ALUA_ACCESS_STATE_UNAVAILABLE: 787 if (!(valid & ALUA_U_SUP)) 788 goto not_supported; 789 *primary = 1; 790 break; 791 case ALUA_ACCESS_STATE_LBA_DEPENDENT: 792 if (!(valid & ALUA_LBD_SUP)) 793 goto not_supported; 794 *primary = 1; 795 break; 796 case ALUA_ACCESS_STATE_OFFLINE: 797 /* 798 * OFFLINE state is defined as a secondary target port 799 * asymmetric access state. 800 */ 801 if (!(valid & ALUA_O_SUP)) 802 goto not_supported; 803 *primary = 0; 804 break; 805 case ALUA_ACCESS_STATE_TRANSITION: 806 if (!(valid & ALUA_T_SUP) || explicit) 807 /* 808 * Transitioning is set internally and by tcmu daemon, 809 * and cannot be selected through a STPG. 810 */ 811 goto not_supported; 812 *primary = 0; 813 break; 814 default: 815 pr_err("Unknown ALUA access state: 0x%02x\n", state); 816 return TCM_INVALID_PARAMETER_LIST; 817 } 818 819 return 0; 820 821 not_supported: 822 pr_err("ALUA access state %s not supported", 823 core_alua_dump_state(state)); 824 return TCM_INVALID_PARAMETER_LIST; 825 } 826 827 static char *core_alua_dump_state(int state) 828 { 829 switch (state) { 830 case ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED: 831 return "Active/Optimized"; 832 case ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED: 833 return "Active/NonOptimized"; 834 case ALUA_ACCESS_STATE_LBA_DEPENDENT: 835 return "LBA Dependent"; 836 case ALUA_ACCESS_STATE_STANDBY: 837 return "Standby"; 838 case ALUA_ACCESS_STATE_UNAVAILABLE: 839 return "Unavailable"; 840 case ALUA_ACCESS_STATE_OFFLINE: 841 return "Offline"; 842 case ALUA_ACCESS_STATE_TRANSITION: 843 return "Transitioning"; 844 default: 845 return "Unknown"; 846 } 847 848 return NULL; 849 } 850 851 char *core_alua_dump_status(int status) 852 { 853 switch (status) { 854 case ALUA_STATUS_NONE: 855 return "None"; 856 case ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG: 857 return "Altered by Explicit STPG"; 858 case ALUA_STATUS_ALTERED_BY_IMPLICIT_ALUA: 859 return "Altered by Implicit ALUA"; 860 default: 861 return "Unknown"; 862 } 863 864 return NULL; 865 } 866 867 /* 868 * Used by fabric modules to determine when we need to delay processing 869 * for the Active/NonOptimized paths.. 870 */ 871 int core_alua_check_nonop_delay( 872 struct se_cmd *cmd) 873 { 874 if (!(cmd->se_cmd_flags & SCF_ALUA_NON_OPTIMIZED)) 875 return 0; 876 if (in_interrupt()) 877 return 0; 878 /* 879 * The ALUA Active/NonOptimized access state delay can be disabled 880 * in via configfs with a value of zero 881 */ 882 if (!cmd->alua_nonop_delay) 883 return 0; 884 /* 885 * struct se_cmd->alua_nonop_delay gets set by a target port group 886 * defined interval in core_alua_state_nonoptimized() 887 */ 888 msleep_interruptible(cmd->alua_nonop_delay); 889 return 0; 890 } 891 EXPORT_SYMBOL(core_alua_check_nonop_delay); 892 893 static int core_alua_write_tpg_metadata( 894 const char *path, 895 unsigned char *md_buf, 896 u32 md_buf_len) 897 { 898 struct file *file = filp_open(path, O_RDWR | O_CREAT | O_TRUNC, 0600); 899 int ret; 900 901 if (IS_ERR(file)) { 902 pr_err("filp_open(%s) for ALUA metadata failed\n", path); 903 return -ENODEV; 904 } 905 ret = kernel_write(file, md_buf, md_buf_len, 0); 906 if (ret < 0) 907 pr_err("Error writing ALUA metadata file: %s\n", path); 908 fput(file); 909 return (ret < 0) ? -EIO : 0; 910 } 911 912 /* 913 * Called with tg_pt_gp->tg_pt_gp_transition_mutex held 914 */ 915 static int core_alua_update_tpg_primary_metadata( 916 struct t10_alua_tg_pt_gp *tg_pt_gp) 917 { 918 unsigned char *md_buf; 919 struct t10_wwn *wwn = &tg_pt_gp->tg_pt_gp_dev->t10_wwn; 920 char path[ALUA_METADATA_PATH_LEN]; 921 int len, rc; 922 923 md_buf = kzalloc(ALUA_MD_BUF_LEN, GFP_KERNEL); 924 if (!md_buf) { 925 pr_err("Unable to allocate buf for ALUA metadata\n"); 926 return -ENOMEM; 927 } 928 929 memset(path, 0, ALUA_METADATA_PATH_LEN); 930 931 len = snprintf(md_buf, ALUA_MD_BUF_LEN, 932 "tg_pt_gp_id=%hu\n" 933 "alua_access_state=0x%02x\n" 934 "alua_access_status=0x%02x\n", 935 tg_pt_gp->tg_pt_gp_id, 936 tg_pt_gp->tg_pt_gp_alua_access_state, 937 tg_pt_gp->tg_pt_gp_alua_access_status); 938 939 snprintf(path, ALUA_METADATA_PATH_LEN, 940 "%s/alua/tpgs_%s/%s", db_root, &wwn->unit_serial[0], 941 config_item_name(&tg_pt_gp->tg_pt_gp_group.cg_item)); 942 943 rc = core_alua_write_tpg_metadata(path, md_buf, len); 944 kfree(md_buf); 945 return rc; 946 } 947 948 static void core_alua_queue_state_change_ua(struct t10_alua_tg_pt_gp *tg_pt_gp) 949 { 950 struct se_dev_entry *se_deve; 951 struct se_lun *lun; 952 struct se_lun_acl *lacl; 953 954 spin_lock(&tg_pt_gp->tg_pt_gp_lock); 955 list_for_each_entry(lun, &tg_pt_gp->tg_pt_gp_lun_list, 956 lun_tg_pt_gp_link) { 957 /* 958 * After an implicit target port asymmetric access state 959 * change, a device server shall establish a unit attention 960 * condition for the initiator port associated with every I_T 961 * nexus with the additional sense code set to ASYMMETRIC 962 * ACCESS STATE CHANGED. 963 * 964 * After an explicit target port asymmetric access state 965 * change, a device server shall establish a unit attention 966 * condition with the additional sense code set to ASYMMETRIC 967 * ACCESS STATE CHANGED for the initiator port associated with 968 * every I_T nexus other than the I_T nexus on which the SET 969 * TARGET PORT GROUPS command 970 */ 971 if (!percpu_ref_tryget_live(&lun->lun_ref)) 972 continue; 973 spin_unlock(&tg_pt_gp->tg_pt_gp_lock); 974 975 spin_lock(&lun->lun_deve_lock); 976 list_for_each_entry(se_deve, &lun->lun_deve_list, lun_link) { 977 lacl = rcu_dereference_check(se_deve->se_lun_acl, 978 lockdep_is_held(&lun->lun_deve_lock)); 979 980 /* 981 * spc4r37 p.242: 982 * After an explicit target port asymmetric access 983 * state change, a device server shall establish a 984 * unit attention condition with the additional sense 985 * code set to ASYMMETRIC ACCESS STATE CHANGED for 986 * the initiator port associated with every I_T nexus 987 * other than the I_T nexus on which the SET TARGET 988 * PORT GROUPS command was received. 989 */ 990 if ((tg_pt_gp->tg_pt_gp_alua_access_status == 991 ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG) && 992 (tg_pt_gp->tg_pt_gp_alua_lun != NULL) && 993 (tg_pt_gp->tg_pt_gp_alua_lun == lun)) 994 continue; 995 996 /* 997 * se_deve->se_lun_acl pointer may be NULL for a 998 * entry created without explicit Node+MappedLUN ACLs 999 */ 1000 if (lacl && (tg_pt_gp->tg_pt_gp_alua_nacl != NULL) && 1001 (tg_pt_gp->tg_pt_gp_alua_nacl == lacl->se_lun_nacl)) 1002 continue; 1003 1004 core_scsi3_ua_allocate(se_deve, 0x2A, 1005 ASCQ_2AH_ASYMMETRIC_ACCESS_STATE_CHANGED); 1006 } 1007 spin_unlock(&lun->lun_deve_lock); 1008 1009 spin_lock(&tg_pt_gp->tg_pt_gp_lock); 1010 percpu_ref_put(&lun->lun_ref); 1011 } 1012 spin_unlock(&tg_pt_gp->tg_pt_gp_lock); 1013 } 1014 1015 static int core_alua_do_transition_tg_pt( 1016 struct t10_alua_tg_pt_gp *tg_pt_gp, 1017 int new_state, 1018 int explicit) 1019 { 1020 int prev_state; 1021 1022 mutex_lock(&tg_pt_gp->tg_pt_gp_transition_mutex); 1023 /* Nothing to be done here */ 1024 if (tg_pt_gp->tg_pt_gp_alua_access_state == new_state) { 1025 mutex_unlock(&tg_pt_gp->tg_pt_gp_transition_mutex); 1026 return 0; 1027 } 1028 1029 if (explicit && new_state == ALUA_ACCESS_STATE_TRANSITION) { 1030 mutex_unlock(&tg_pt_gp->tg_pt_gp_transition_mutex); 1031 return -EAGAIN; 1032 } 1033 1034 /* 1035 * Save the old primary ALUA access state, and set the current state 1036 * to ALUA_ACCESS_STATE_TRANSITION. 1037 */ 1038 prev_state = tg_pt_gp->tg_pt_gp_alua_access_state; 1039 tg_pt_gp->tg_pt_gp_alua_access_state = ALUA_ACCESS_STATE_TRANSITION; 1040 tg_pt_gp->tg_pt_gp_alua_access_status = (explicit) ? 1041 ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG : 1042 ALUA_STATUS_ALTERED_BY_IMPLICIT_ALUA; 1043 1044 core_alua_queue_state_change_ua(tg_pt_gp); 1045 1046 if (new_state == ALUA_ACCESS_STATE_TRANSITION) { 1047 mutex_unlock(&tg_pt_gp->tg_pt_gp_transition_mutex); 1048 return 0; 1049 } 1050 1051 /* 1052 * Check for the optional ALUA primary state transition delay 1053 */ 1054 if (tg_pt_gp->tg_pt_gp_trans_delay_msecs != 0) 1055 msleep_interruptible(tg_pt_gp->tg_pt_gp_trans_delay_msecs); 1056 1057 /* 1058 * Set the current primary ALUA access state to the requested new state 1059 */ 1060 tg_pt_gp->tg_pt_gp_alua_access_state = new_state; 1061 1062 /* 1063 * Update the ALUA metadata buf that has been allocated in 1064 * core_alua_do_port_transition(), this metadata will be written 1065 * to struct file. 1066 * 1067 * Note that there is the case where we do not want to update the 1068 * metadata when the saved metadata is being parsed in userspace 1069 * when setting the existing port access state and access status. 1070 * 1071 * Also note that the failure to write out the ALUA metadata to 1072 * struct file does NOT affect the actual ALUA transition. 1073 */ 1074 if (tg_pt_gp->tg_pt_gp_write_metadata) { 1075 core_alua_update_tpg_primary_metadata(tg_pt_gp); 1076 } 1077 1078 pr_debug("Successful %s ALUA transition TG PT Group: %s ID: %hu" 1079 " from primary access state %s to %s\n", (explicit) ? "explicit" : 1080 "implicit", config_item_name(&tg_pt_gp->tg_pt_gp_group.cg_item), 1081 tg_pt_gp->tg_pt_gp_id, 1082 core_alua_dump_state(prev_state), 1083 core_alua_dump_state(new_state)); 1084 1085 core_alua_queue_state_change_ua(tg_pt_gp); 1086 1087 mutex_unlock(&tg_pt_gp->tg_pt_gp_transition_mutex); 1088 return 0; 1089 } 1090 1091 int core_alua_do_port_transition( 1092 struct t10_alua_tg_pt_gp *l_tg_pt_gp, 1093 struct se_device *l_dev, 1094 struct se_lun *l_lun, 1095 struct se_node_acl *l_nacl, 1096 int new_state, 1097 int explicit) 1098 { 1099 struct se_device *dev; 1100 struct t10_alua_lu_gp *lu_gp; 1101 struct t10_alua_lu_gp_member *lu_gp_mem, *local_lu_gp_mem; 1102 struct t10_alua_tg_pt_gp *tg_pt_gp; 1103 int primary, valid_states, rc = 0; 1104 1105 if (l_dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_ALUA) 1106 return -ENODEV; 1107 1108 valid_states = l_tg_pt_gp->tg_pt_gp_alua_supported_states; 1109 if (core_alua_check_transition(new_state, valid_states, &primary, 1110 explicit) != 0) 1111 return -EINVAL; 1112 1113 local_lu_gp_mem = l_dev->dev_alua_lu_gp_mem; 1114 spin_lock(&local_lu_gp_mem->lu_gp_mem_lock); 1115 lu_gp = local_lu_gp_mem->lu_gp; 1116 atomic_inc(&lu_gp->lu_gp_ref_cnt); 1117 spin_unlock(&local_lu_gp_mem->lu_gp_mem_lock); 1118 /* 1119 * For storage objects that are members of the 'default_lu_gp', 1120 * we only do transition on the passed *l_tp_pt_gp, and not 1121 * on all of the matching target port groups IDs in default_lu_gp. 1122 */ 1123 if (!lu_gp->lu_gp_id) { 1124 /* 1125 * core_alua_do_transition_tg_pt() will always return 1126 * success. 1127 */ 1128 l_tg_pt_gp->tg_pt_gp_alua_lun = l_lun; 1129 l_tg_pt_gp->tg_pt_gp_alua_nacl = l_nacl; 1130 rc = core_alua_do_transition_tg_pt(l_tg_pt_gp, 1131 new_state, explicit); 1132 atomic_dec_mb(&lu_gp->lu_gp_ref_cnt); 1133 return rc; 1134 } 1135 /* 1136 * For all other LU groups aside from 'default_lu_gp', walk all of 1137 * the associated storage objects looking for a matching target port 1138 * group ID from the local target port group. 1139 */ 1140 spin_lock(&lu_gp->lu_gp_lock); 1141 list_for_each_entry(lu_gp_mem, &lu_gp->lu_gp_mem_list, 1142 lu_gp_mem_list) { 1143 1144 dev = lu_gp_mem->lu_gp_mem_dev; 1145 atomic_inc_mb(&lu_gp_mem->lu_gp_mem_ref_cnt); 1146 spin_unlock(&lu_gp->lu_gp_lock); 1147 1148 spin_lock(&dev->t10_alua.tg_pt_gps_lock); 1149 list_for_each_entry(tg_pt_gp, 1150 &dev->t10_alua.tg_pt_gps_list, 1151 tg_pt_gp_list) { 1152 1153 if (!tg_pt_gp->tg_pt_gp_valid_id) 1154 continue; 1155 /* 1156 * If the target behavior port asymmetric access state 1157 * is changed for any target port group accessible via 1158 * a logical unit within a LU group, the target port 1159 * behavior group asymmetric access states for the same 1160 * target port group accessible via other logical units 1161 * in that LU group will also change. 1162 */ 1163 if (l_tg_pt_gp->tg_pt_gp_id != tg_pt_gp->tg_pt_gp_id) 1164 continue; 1165 1166 if (l_tg_pt_gp == tg_pt_gp) { 1167 tg_pt_gp->tg_pt_gp_alua_lun = l_lun; 1168 tg_pt_gp->tg_pt_gp_alua_nacl = l_nacl; 1169 } else { 1170 tg_pt_gp->tg_pt_gp_alua_lun = NULL; 1171 tg_pt_gp->tg_pt_gp_alua_nacl = NULL; 1172 } 1173 atomic_inc_mb(&tg_pt_gp->tg_pt_gp_ref_cnt); 1174 spin_unlock(&dev->t10_alua.tg_pt_gps_lock); 1175 /* 1176 * core_alua_do_transition_tg_pt() will always return 1177 * success. 1178 */ 1179 rc = core_alua_do_transition_tg_pt(tg_pt_gp, 1180 new_state, explicit); 1181 1182 spin_lock(&dev->t10_alua.tg_pt_gps_lock); 1183 atomic_dec_mb(&tg_pt_gp->tg_pt_gp_ref_cnt); 1184 if (rc) 1185 break; 1186 } 1187 spin_unlock(&dev->t10_alua.tg_pt_gps_lock); 1188 1189 spin_lock(&lu_gp->lu_gp_lock); 1190 atomic_dec_mb(&lu_gp_mem->lu_gp_mem_ref_cnt); 1191 } 1192 spin_unlock(&lu_gp->lu_gp_lock); 1193 1194 if (!rc) { 1195 pr_debug("Successfully processed LU Group: %s all ALUA TG PT" 1196 " Group IDs: %hu %s transition to primary state: %s\n", 1197 config_item_name(&lu_gp->lu_gp_group.cg_item), 1198 l_tg_pt_gp->tg_pt_gp_id, 1199 (explicit) ? "explicit" : "implicit", 1200 core_alua_dump_state(new_state)); 1201 } 1202 1203 atomic_dec_mb(&lu_gp->lu_gp_ref_cnt); 1204 return rc; 1205 } 1206 1207 static int core_alua_update_tpg_secondary_metadata(struct se_lun *lun) 1208 { 1209 struct se_portal_group *se_tpg = lun->lun_tpg; 1210 unsigned char *md_buf; 1211 char path[ALUA_METADATA_PATH_LEN], wwn[ALUA_SECONDARY_METADATA_WWN_LEN]; 1212 int len, rc; 1213 1214 mutex_lock(&lun->lun_tg_pt_md_mutex); 1215 1216 md_buf = kzalloc(ALUA_MD_BUF_LEN, GFP_KERNEL); 1217 if (!md_buf) { 1218 pr_err("Unable to allocate buf for ALUA metadata\n"); 1219 rc = -ENOMEM; 1220 goto out_unlock; 1221 } 1222 1223 memset(path, 0, ALUA_METADATA_PATH_LEN); 1224 memset(wwn, 0, ALUA_SECONDARY_METADATA_WWN_LEN); 1225 1226 len = snprintf(wwn, ALUA_SECONDARY_METADATA_WWN_LEN, "%s", 1227 se_tpg->se_tpg_tfo->tpg_get_wwn(se_tpg)); 1228 1229 if (se_tpg->se_tpg_tfo->tpg_get_tag != NULL) 1230 snprintf(wwn+len, ALUA_SECONDARY_METADATA_WWN_LEN-len, "+%hu", 1231 se_tpg->se_tpg_tfo->tpg_get_tag(se_tpg)); 1232 1233 len = snprintf(md_buf, ALUA_MD_BUF_LEN, "alua_tg_pt_offline=%d\n" 1234 "alua_tg_pt_status=0x%02x\n", 1235 atomic_read(&lun->lun_tg_pt_secondary_offline), 1236 lun->lun_tg_pt_secondary_stat); 1237 1238 snprintf(path, ALUA_METADATA_PATH_LEN, "%s/alua/%s/%s/lun_%llu", 1239 db_root, se_tpg->se_tpg_tfo->get_fabric_name(), wwn, 1240 lun->unpacked_lun); 1241 1242 rc = core_alua_write_tpg_metadata(path, md_buf, len); 1243 kfree(md_buf); 1244 1245 out_unlock: 1246 mutex_unlock(&lun->lun_tg_pt_md_mutex); 1247 return rc; 1248 } 1249 1250 static int core_alua_set_tg_pt_secondary_state( 1251 struct se_lun *lun, 1252 int explicit, 1253 int offline) 1254 { 1255 struct t10_alua_tg_pt_gp *tg_pt_gp; 1256 int trans_delay_msecs; 1257 1258 spin_lock(&lun->lun_tg_pt_gp_lock); 1259 tg_pt_gp = lun->lun_tg_pt_gp; 1260 if (!tg_pt_gp) { 1261 spin_unlock(&lun->lun_tg_pt_gp_lock); 1262 pr_err("Unable to complete secondary state" 1263 " transition\n"); 1264 return -EINVAL; 1265 } 1266 trans_delay_msecs = tg_pt_gp->tg_pt_gp_trans_delay_msecs; 1267 /* 1268 * Set the secondary ALUA target port access state to OFFLINE 1269 * or release the previously secondary state for struct se_lun 1270 */ 1271 if (offline) 1272 atomic_set(&lun->lun_tg_pt_secondary_offline, 1); 1273 else 1274 atomic_set(&lun->lun_tg_pt_secondary_offline, 0); 1275 1276 lun->lun_tg_pt_secondary_stat = (explicit) ? 1277 ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG : 1278 ALUA_STATUS_ALTERED_BY_IMPLICIT_ALUA; 1279 1280 pr_debug("Successful %s ALUA transition TG PT Group: %s ID: %hu" 1281 " to secondary access state: %s\n", (explicit) ? "explicit" : 1282 "implicit", config_item_name(&tg_pt_gp->tg_pt_gp_group.cg_item), 1283 tg_pt_gp->tg_pt_gp_id, (offline) ? "OFFLINE" : "ONLINE"); 1284 1285 spin_unlock(&lun->lun_tg_pt_gp_lock); 1286 /* 1287 * Do the optional transition delay after we set the secondary 1288 * ALUA access state. 1289 */ 1290 if (trans_delay_msecs != 0) 1291 msleep_interruptible(trans_delay_msecs); 1292 /* 1293 * See if we need to update the ALUA fabric port metadata for 1294 * secondary state and status 1295 */ 1296 if (lun->lun_tg_pt_secondary_write_md) 1297 core_alua_update_tpg_secondary_metadata(lun); 1298 1299 return 0; 1300 } 1301 1302 struct t10_alua_lba_map * 1303 core_alua_allocate_lba_map(struct list_head *list, 1304 u64 first_lba, u64 last_lba) 1305 { 1306 struct t10_alua_lba_map *lba_map; 1307 1308 lba_map = kmem_cache_zalloc(t10_alua_lba_map_cache, GFP_KERNEL); 1309 if (!lba_map) { 1310 pr_err("Unable to allocate struct t10_alua_lba_map\n"); 1311 return ERR_PTR(-ENOMEM); 1312 } 1313 INIT_LIST_HEAD(&lba_map->lba_map_mem_list); 1314 lba_map->lba_map_first_lba = first_lba; 1315 lba_map->lba_map_last_lba = last_lba; 1316 1317 list_add_tail(&lba_map->lba_map_list, list); 1318 return lba_map; 1319 } 1320 1321 int 1322 core_alua_allocate_lba_map_mem(struct t10_alua_lba_map *lba_map, 1323 int pg_id, int state) 1324 { 1325 struct t10_alua_lba_map_member *lba_map_mem; 1326 1327 list_for_each_entry(lba_map_mem, &lba_map->lba_map_mem_list, 1328 lba_map_mem_list) { 1329 if (lba_map_mem->lba_map_mem_alua_pg_id == pg_id) { 1330 pr_err("Duplicate pg_id %d in lba_map\n", pg_id); 1331 return -EINVAL; 1332 } 1333 } 1334 1335 lba_map_mem = kmem_cache_zalloc(t10_alua_lba_map_mem_cache, GFP_KERNEL); 1336 if (!lba_map_mem) { 1337 pr_err("Unable to allocate struct t10_alua_lba_map_mem\n"); 1338 return -ENOMEM; 1339 } 1340 lba_map_mem->lba_map_mem_alua_state = state; 1341 lba_map_mem->lba_map_mem_alua_pg_id = pg_id; 1342 1343 list_add_tail(&lba_map_mem->lba_map_mem_list, 1344 &lba_map->lba_map_mem_list); 1345 return 0; 1346 } 1347 1348 void 1349 core_alua_free_lba_map(struct list_head *lba_list) 1350 { 1351 struct t10_alua_lba_map *lba_map, *lba_map_tmp; 1352 struct t10_alua_lba_map_member *lba_map_mem, *lba_map_mem_tmp; 1353 1354 list_for_each_entry_safe(lba_map, lba_map_tmp, lba_list, 1355 lba_map_list) { 1356 list_for_each_entry_safe(lba_map_mem, lba_map_mem_tmp, 1357 &lba_map->lba_map_mem_list, 1358 lba_map_mem_list) { 1359 list_del(&lba_map_mem->lba_map_mem_list); 1360 kmem_cache_free(t10_alua_lba_map_mem_cache, 1361 lba_map_mem); 1362 } 1363 list_del(&lba_map->lba_map_list); 1364 kmem_cache_free(t10_alua_lba_map_cache, lba_map); 1365 } 1366 } 1367 1368 void 1369 core_alua_set_lba_map(struct se_device *dev, struct list_head *lba_map_list, 1370 int segment_size, int segment_mult) 1371 { 1372 struct list_head old_lba_map_list; 1373 struct t10_alua_tg_pt_gp *tg_pt_gp; 1374 int activate = 0, supported; 1375 1376 INIT_LIST_HEAD(&old_lba_map_list); 1377 spin_lock(&dev->t10_alua.lba_map_lock); 1378 dev->t10_alua.lba_map_segment_size = segment_size; 1379 dev->t10_alua.lba_map_segment_multiplier = segment_mult; 1380 list_splice_init(&dev->t10_alua.lba_map_list, &old_lba_map_list); 1381 if (lba_map_list) { 1382 list_splice_init(lba_map_list, &dev->t10_alua.lba_map_list); 1383 activate = 1; 1384 } 1385 spin_unlock(&dev->t10_alua.lba_map_lock); 1386 spin_lock(&dev->t10_alua.tg_pt_gps_lock); 1387 list_for_each_entry(tg_pt_gp, &dev->t10_alua.tg_pt_gps_list, 1388 tg_pt_gp_list) { 1389 1390 if (!tg_pt_gp->tg_pt_gp_valid_id) 1391 continue; 1392 supported = tg_pt_gp->tg_pt_gp_alua_supported_states; 1393 if (activate) 1394 supported |= ALUA_LBD_SUP; 1395 else 1396 supported &= ~ALUA_LBD_SUP; 1397 tg_pt_gp->tg_pt_gp_alua_supported_states = supported; 1398 } 1399 spin_unlock(&dev->t10_alua.tg_pt_gps_lock); 1400 core_alua_free_lba_map(&old_lba_map_list); 1401 } 1402 1403 struct t10_alua_lu_gp * 1404 core_alua_allocate_lu_gp(const char *name, int def_group) 1405 { 1406 struct t10_alua_lu_gp *lu_gp; 1407 1408 lu_gp = kmem_cache_zalloc(t10_alua_lu_gp_cache, GFP_KERNEL); 1409 if (!lu_gp) { 1410 pr_err("Unable to allocate struct t10_alua_lu_gp\n"); 1411 return ERR_PTR(-ENOMEM); 1412 } 1413 INIT_LIST_HEAD(&lu_gp->lu_gp_node); 1414 INIT_LIST_HEAD(&lu_gp->lu_gp_mem_list); 1415 spin_lock_init(&lu_gp->lu_gp_lock); 1416 atomic_set(&lu_gp->lu_gp_ref_cnt, 0); 1417 1418 if (def_group) { 1419 lu_gp->lu_gp_id = alua_lu_gps_counter++; 1420 lu_gp->lu_gp_valid_id = 1; 1421 alua_lu_gps_count++; 1422 } 1423 1424 return lu_gp; 1425 } 1426 1427 int core_alua_set_lu_gp_id(struct t10_alua_lu_gp *lu_gp, u16 lu_gp_id) 1428 { 1429 struct t10_alua_lu_gp *lu_gp_tmp; 1430 u16 lu_gp_id_tmp; 1431 /* 1432 * The lu_gp->lu_gp_id may only be set once.. 1433 */ 1434 if (lu_gp->lu_gp_valid_id) { 1435 pr_warn("ALUA LU Group already has a valid ID," 1436 " ignoring request\n"); 1437 return -EINVAL; 1438 } 1439 1440 spin_lock(&lu_gps_lock); 1441 if (alua_lu_gps_count == 0x0000ffff) { 1442 pr_err("Maximum ALUA alua_lu_gps_count:" 1443 " 0x0000ffff reached\n"); 1444 spin_unlock(&lu_gps_lock); 1445 kmem_cache_free(t10_alua_lu_gp_cache, lu_gp); 1446 return -ENOSPC; 1447 } 1448 again: 1449 lu_gp_id_tmp = (lu_gp_id != 0) ? lu_gp_id : 1450 alua_lu_gps_counter++; 1451 1452 list_for_each_entry(lu_gp_tmp, &lu_gps_list, lu_gp_node) { 1453 if (lu_gp_tmp->lu_gp_id == lu_gp_id_tmp) { 1454 if (!lu_gp_id) 1455 goto again; 1456 1457 pr_warn("ALUA Logical Unit Group ID: %hu" 1458 " already exists, ignoring request\n", 1459 lu_gp_id); 1460 spin_unlock(&lu_gps_lock); 1461 return -EINVAL; 1462 } 1463 } 1464 1465 lu_gp->lu_gp_id = lu_gp_id_tmp; 1466 lu_gp->lu_gp_valid_id = 1; 1467 list_add_tail(&lu_gp->lu_gp_node, &lu_gps_list); 1468 alua_lu_gps_count++; 1469 spin_unlock(&lu_gps_lock); 1470 1471 return 0; 1472 } 1473 1474 static struct t10_alua_lu_gp_member * 1475 core_alua_allocate_lu_gp_mem(struct se_device *dev) 1476 { 1477 struct t10_alua_lu_gp_member *lu_gp_mem; 1478 1479 lu_gp_mem = kmem_cache_zalloc(t10_alua_lu_gp_mem_cache, GFP_KERNEL); 1480 if (!lu_gp_mem) { 1481 pr_err("Unable to allocate struct t10_alua_lu_gp_member\n"); 1482 return ERR_PTR(-ENOMEM); 1483 } 1484 INIT_LIST_HEAD(&lu_gp_mem->lu_gp_mem_list); 1485 spin_lock_init(&lu_gp_mem->lu_gp_mem_lock); 1486 atomic_set(&lu_gp_mem->lu_gp_mem_ref_cnt, 0); 1487 1488 lu_gp_mem->lu_gp_mem_dev = dev; 1489 dev->dev_alua_lu_gp_mem = lu_gp_mem; 1490 1491 return lu_gp_mem; 1492 } 1493 1494 void core_alua_free_lu_gp(struct t10_alua_lu_gp *lu_gp) 1495 { 1496 struct t10_alua_lu_gp_member *lu_gp_mem, *lu_gp_mem_tmp; 1497 /* 1498 * Once we have reached this point, config_item_put() has 1499 * already been called from target_core_alua_drop_lu_gp(). 1500 * 1501 * Here, we remove the *lu_gp from the global list so that 1502 * no associations can be made while we are releasing 1503 * struct t10_alua_lu_gp. 1504 */ 1505 spin_lock(&lu_gps_lock); 1506 list_del(&lu_gp->lu_gp_node); 1507 alua_lu_gps_count--; 1508 spin_unlock(&lu_gps_lock); 1509 /* 1510 * Allow struct t10_alua_lu_gp * referenced by core_alua_get_lu_gp_by_name() 1511 * in target_core_configfs.c:target_core_store_alua_lu_gp() to be 1512 * released with core_alua_put_lu_gp_from_name() 1513 */ 1514 while (atomic_read(&lu_gp->lu_gp_ref_cnt)) 1515 cpu_relax(); 1516 /* 1517 * Release reference to struct t10_alua_lu_gp * from all associated 1518 * struct se_device. 1519 */ 1520 spin_lock(&lu_gp->lu_gp_lock); 1521 list_for_each_entry_safe(lu_gp_mem, lu_gp_mem_tmp, 1522 &lu_gp->lu_gp_mem_list, lu_gp_mem_list) { 1523 if (lu_gp_mem->lu_gp_assoc) { 1524 list_del(&lu_gp_mem->lu_gp_mem_list); 1525 lu_gp->lu_gp_members--; 1526 lu_gp_mem->lu_gp_assoc = 0; 1527 } 1528 spin_unlock(&lu_gp->lu_gp_lock); 1529 /* 1530 * 1531 * lu_gp_mem is associated with a single 1532 * struct se_device->dev_alua_lu_gp_mem, and is released when 1533 * struct se_device is released via core_alua_free_lu_gp_mem(). 1534 * 1535 * If the passed lu_gp does NOT match the default_lu_gp, assume 1536 * we want to re-associate a given lu_gp_mem with default_lu_gp. 1537 */ 1538 spin_lock(&lu_gp_mem->lu_gp_mem_lock); 1539 if (lu_gp != default_lu_gp) 1540 __core_alua_attach_lu_gp_mem(lu_gp_mem, 1541 default_lu_gp); 1542 else 1543 lu_gp_mem->lu_gp = NULL; 1544 spin_unlock(&lu_gp_mem->lu_gp_mem_lock); 1545 1546 spin_lock(&lu_gp->lu_gp_lock); 1547 } 1548 spin_unlock(&lu_gp->lu_gp_lock); 1549 1550 kmem_cache_free(t10_alua_lu_gp_cache, lu_gp); 1551 } 1552 1553 void core_alua_free_lu_gp_mem(struct se_device *dev) 1554 { 1555 struct t10_alua_lu_gp *lu_gp; 1556 struct t10_alua_lu_gp_member *lu_gp_mem; 1557 1558 lu_gp_mem = dev->dev_alua_lu_gp_mem; 1559 if (!lu_gp_mem) 1560 return; 1561 1562 while (atomic_read(&lu_gp_mem->lu_gp_mem_ref_cnt)) 1563 cpu_relax(); 1564 1565 spin_lock(&lu_gp_mem->lu_gp_mem_lock); 1566 lu_gp = lu_gp_mem->lu_gp; 1567 if (lu_gp) { 1568 spin_lock(&lu_gp->lu_gp_lock); 1569 if (lu_gp_mem->lu_gp_assoc) { 1570 list_del(&lu_gp_mem->lu_gp_mem_list); 1571 lu_gp->lu_gp_members--; 1572 lu_gp_mem->lu_gp_assoc = 0; 1573 } 1574 spin_unlock(&lu_gp->lu_gp_lock); 1575 lu_gp_mem->lu_gp = NULL; 1576 } 1577 spin_unlock(&lu_gp_mem->lu_gp_mem_lock); 1578 1579 kmem_cache_free(t10_alua_lu_gp_mem_cache, lu_gp_mem); 1580 } 1581 1582 struct t10_alua_lu_gp *core_alua_get_lu_gp_by_name(const char *name) 1583 { 1584 struct t10_alua_lu_gp *lu_gp; 1585 struct config_item *ci; 1586 1587 spin_lock(&lu_gps_lock); 1588 list_for_each_entry(lu_gp, &lu_gps_list, lu_gp_node) { 1589 if (!lu_gp->lu_gp_valid_id) 1590 continue; 1591 ci = &lu_gp->lu_gp_group.cg_item; 1592 if (!strcmp(config_item_name(ci), name)) { 1593 atomic_inc(&lu_gp->lu_gp_ref_cnt); 1594 spin_unlock(&lu_gps_lock); 1595 return lu_gp; 1596 } 1597 } 1598 spin_unlock(&lu_gps_lock); 1599 1600 return NULL; 1601 } 1602 1603 void core_alua_put_lu_gp_from_name(struct t10_alua_lu_gp *lu_gp) 1604 { 1605 spin_lock(&lu_gps_lock); 1606 atomic_dec(&lu_gp->lu_gp_ref_cnt); 1607 spin_unlock(&lu_gps_lock); 1608 } 1609 1610 /* 1611 * Called with struct t10_alua_lu_gp_member->lu_gp_mem_lock 1612 */ 1613 void __core_alua_attach_lu_gp_mem( 1614 struct t10_alua_lu_gp_member *lu_gp_mem, 1615 struct t10_alua_lu_gp *lu_gp) 1616 { 1617 spin_lock(&lu_gp->lu_gp_lock); 1618 lu_gp_mem->lu_gp = lu_gp; 1619 lu_gp_mem->lu_gp_assoc = 1; 1620 list_add_tail(&lu_gp_mem->lu_gp_mem_list, &lu_gp->lu_gp_mem_list); 1621 lu_gp->lu_gp_members++; 1622 spin_unlock(&lu_gp->lu_gp_lock); 1623 } 1624 1625 /* 1626 * Called with struct t10_alua_lu_gp_member->lu_gp_mem_lock 1627 */ 1628 void __core_alua_drop_lu_gp_mem( 1629 struct t10_alua_lu_gp_member *lu_gp_mem, 1630 struct t10_alua_lu_gp *lu_gp) 1631 { 1632 spin_lock(&lu_gp->lu_gp_lock); 1633 list_del(&lu_gp_mem->lu_gp_mem_list); 1634 lu_gp_mem->lu_gp = NULL; 1635 lu_gp_mem->lu_gp_assoc = 0; 1636 lu_gp->lu_gp_members--; 1637 spin_unlock(&lu_gp->lu_gp_lock); 1638 } 1639 1640 struct t10_alua_tg_pt_gp *core_alua_allocate_tg_pt_gp(struct se_device *dev, 1641 const char *name, int def_group) 1642 { 1643 struct t10_alua_tg_pt_gp *tg_pt_gp; 1644 1645 tg_pt_gp = kmem_cache_zalloc(t10_alua_tg_pt_gp_cache, GFP_KERNEL); 1646 if (!tg_pt_gp) { 1647 pr_err("Unable to allocate struct t10_alua_tg_pt_gp\n"); 1648 return NULL; 1649 } 1650 INIT_LIST_HEAD(&tg_pt_gp->tg_pt_gp_list); 1651 INIT_LIST_HEAD(&tg_pt_gp->tg_pt_gp_lun_list); 1652 mutex_init(&tg_pt_gp->tg_pt_gp_transition_mutex); 1653 spin_lock_init(&tg_pt_gp->tg_pt_gp_lock); 1654 atomic_set(&tg_pt_gp->tg_pt_gp_ref_cnt, 0); 1655 tg_pt_gp->tg_pt_gp_dev = dev; 1656 tg_pt_gp->tg_pt_gp_alua_access_state = 1657 ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED; 1658 /* 1659 * Enable both explicit and implicit ALUA support by default 1660 */ 1661 tg_pt_gp->tg_pt_gp_alua_access_type = 1662 TPGS_EXPLICIT_ALUA | TPGS_IMPLICIT_ALUA; 1663 /* 1664 * Set the default Active/NonOptimized Delay in milliseconds 1665 */ 1666 tg_pt_gp->tg_pt_gp_nonop_delay_msecs = ALUA_DEFAULT_NONOP_DELAY_MSECS; 1667 tg_pt_gp->tg_pt_gp_trans_delay_msecs = ALUA_DEFAULT_TRANS_DELAY_MSECS; 1668 tg_pt_gp->tg_pt_gp_implicit_trans_secs = ALUA_DEFAULT_IMPLICIT_TRANS_SECS; 1669 1670 /* 1671 * Enable all supported states 1672 */ 1673 tg_pt_gp->tg_pt_gp_alua_supported_states = 1674 ALUA_T_SUP | ALUA_O_SUP | 1675 ALUA_U_SUP | ALUA_S_SUP | ALUA_AN_SUP | ALUA_AO_SUP; 1676 1677 if (def_group) { 1678 spin_lock(&dev->t10_alua.tg_pt_gps_lock); 1679 tg_pt_gp->tg_pt_gp_id = 1680 dev->t10_alua.alua_tg_pt_gps_counter++; 1681 tg_pt_gp->tg_pt_gp_valid_id = 1; 1682 dev->t10_alua.alua_tg_pt_gps_count++; 1683 list_add_tail(&tg_pt_gp->tg_pt_gp_list, 1684 &dev->t10_alua.tg_pt_gps_list); 1685 spin_unlock(&dev->t10_alua.tg_pt_gps_lock); 1686 } 1687 1688 return tg_pt_gp; 1689 } 1690 1691 int core_alua_set_tg_pt_gp_id( 1692 struct t10_alua_tg_pt_gp *tg_pt_gp, 1693 u16 tg_pt_gp_id) 1694 { 1695 struct se_device *dev = tg_pt_gp->tg_pt_gp_dev; 1696 struct t10_alua_tg_pt_gp *tg_pt_gp_tmp; 1697 u16 tg_pt_gp_id_tmp; 1698 1699 /* 1700 * The tg_pt_gp->tg_pt_gp_id may only be set once.. 1701 */ 1702 if (tg_pt_gp->tg_pt_gp_valid_id) { 1703 pr_warn("ALUA TG PT Group already has a valid ID," 1704 " ignoring request\n"); 1705 return -EINVAL; 1706 } 1707 1708 spin_lock(&dev->t10_alua.tg_pt_gps_lock); 1709 if (dev->t10_alua.alua_tg_pt_gps_count == 0x0000ffff) { 1710 pr_err("Maximum ALUA alua_tg_pt_gps_count:" 1711 " 0x0000ffff reached\n"); 1712 spin_unlock(&dev->t10_alua.tg_pt_gps_lock); 1713 kmem_cache_free(t10_alua_tg_pt_gp_cache, tg_pt_gp); 1714 return -ENOSPC; 1715 } 1716 again: 1717 tg_pt_gp_id_tmp = (tg_pt_gp_id != 0) ? tg_pt_gp_id : 1718 dev->t10_alua.alua_tg_pt_gps_counter++; 1719 1720 list_for_each_entry(tg_pt_gp_tmp, &dev->t10_alua.tg_pt_gps_list, 1721 tg_pt_gp_list) { 1722 if (tg_pt_gp_tmp->tg_pt_gp_id == tg_pt_gp_id_tmp) { 1723 if (!tg_pt_gp_id) 1724 goto again; 1725 1726 pr_err("ALUA Target Port Group ID: %hu already" 1727 " exists, ignoring request\n", tg_pt_gp_id); 1728 spin_unlock(&dev->t10_alua.tg_pt_gps_lock); 1729 return -EINVAL; 1730 } 1731 } 1732 1733 tg_pt_gp->tg_pt_gp_id = tg_pt_gp_id_tmp; 1734 tg_pt_gp->tg_pt_gp_valid_id = 1; 1735 list_add_tail(&tg_pt_gp->tg_pt_gp_list, 1736 &dev->t10_alua.tg_pt_gps_list); 1737 dev->t10_alua.alua_tg_pt_gps_count++; 1738 spin_unlock(&dev->t10_alua.tg_pt_gps_lock); 1739 1740 return 0; 1741 } 1742 1743 void core_alua_free_tg_pt_gp( 1744 struct t10_alua_tg_pt_gp *tg_pt_gp) 1745 { 1746 struct se_device *dev = tg_pt_gp->tg_pt_gp_dev; 1747 struct se_lun *lun, *next; 1748 1749 /* 1750 * Once we have reached this point, config_item_put() has already 1751 * been called from target_core_alua_drop_tg_pt_gp(). 1752 * 1753 * Here we remove *tg_pt_gp from the global list so that 1754 * no associations *OR* explicit ALUA via SET_TARGET_PORT_GROUPS 1755 * can be made while we are releasing struct t10_alua_tg_pt_gp. 1756 */ 1757 spin_lock(&dev->t10_alua.tg_pt_gps_lock); 1758 list_del(&tg_pt_gp->tg_pt_gp_list); 1759 dev->t10_alua.alua_tg_pt_gps_counter--; 1760 spin_unlock(&dev->t10_alua.tg_pt_gps_lock); 1761 1762 /* 1763 * Allow a struct t10_alua_tg_pt_gp_member * referenced by 1764 * core_alua_get_tg_pt_gp_by_name() in 1765 * target_core_configfs.c:target_core_store_alua_tg_pt_gp() 1766 * to be released with core_alua_put_tg_pt_gp_from_name(). 1767 */ 1768 while (atomic_read(&tg_pt_gp->tg_pt_gp_ref_cnt)) 1769 cpu_relax(); 1770 1771 /* 1772 * Release reference to struct t10_alua_tg_pt_gp from all associated 1773 * struct se_port. 1774 */ 1775 spin_lock(&tg_pt_gp->tg_pt_gp_lock); 1776 list_for_each_entry_safe(lun, next, 1777 &tg_pt_gp->tg_pt_gp_lun_list, lun_tg_pt_gp_link) { 1778 list_del_init(&lun->lun_tg_pt_gp_link); 1779 tg_pt_gp->tg_pt_gp_members--; 1780 1781 spin_unlock(&tg_pt_gp->tg_pt_gp_lock); 1782 /* 1783 * If the passed tg_pt_gp does NOT match the default_tg_pt_gp, 1784 * assume we want to re-associate a given tg_pt_gp_mem with 1785 * default_tg_pt_gp. 1786 */ 1787 spin_lock(&lun->lun_tg_pt_gp_lock); 1788 if (tg_pt_gp != dev->t10_alua.default_tg_pt_gp) { 1789 __target_attach_tg_pt_gp(lun, 1790 dev->t10_alua.default_tg_pt_gp); 1791 } else 1792 lun->lun_tg_pt_gp = NULL; 1793 spin_unlock(&lun->lun_tg_pt_gp_lock); 1794 1795 spin_lock(&tg_pt_gp->tg_pt_gp_lock); 1796 } 1797 spin_unlock(&tg_pt_gp->tg_pt_gp_lock); 1798 1799 kmem_cache_free(t10_alua_tg_pt_gp_cache, tg_pt_gp); 1800 } 1801 1802 static struct t10_alua_tg_pt_gp *core_alua_get_tg_pt_gp_by_name( 1803 struct se_device *dev, const char *name) 1804 { 1805 struct t10_alua_tg_pt_gp *tg_pt_gp; 1806 struct config_item *ci; 1807 1808 spin_lock(&dev->t10_alua.tg_pt_gps_lock); 1809 list_for_each_entry(tg_pt_gp, &dev->t10_alua.tg_pt_gps_list, 1810 tg_pt_gp_list) { 1811 if (!tg_pt_gp->tg_pt_gp_valid_id) 1812 continue; 1813 ci = &tg_pt_gp->tg_pt_gp_group.cg_item; 1814 if (!strcmp(config_item_name(ci), name)) { 1815 atomic_inc(&tg_pt_gp->tg_pt_gp_ref_cnt); 1816 spin_unlock(&dev->t10_alua.tg_pt_gps_lock); 1817 return tg_pt_gp; 1818 } 1819 } 1820 spin_unlock(&dev->t10_alua.tg_pt_gps_lock); 1821 1822 return NULL; 1823 } 1824 1825 static void core_alua_put_tg_pt_gp_from_name( 1826 struct t10_alua_tg_pt_gp *tg_pt_gp) 1827 { 1828 struct se_device *dev = tg_pt_gp->tg_pt_gp_dev; 1829 1830 spin_lock(&dev->t10_alua.tg_pt_gps_lock); 1831 atomic_dec(&tg_pt_gp->tg_pt_gp_ref_cnt); 1832 spin_unlock(&dev->t10_alua.tg_pt_gps_lock); 1833 } 1834 1835 static void __target_attach_tg_pt_gp(struct se_lun *lun, 1836 struct t10_alua_tg_pt_gp *tg_pt_gp) 1837 { 1838 struct se_dev_entry *se_deve; 1839 1840 assert_spin_locked(&lun->lun_tg_pt_gp_lock); 1841 1842 spin_lock(&tg_pt_gp->tg_pt_gp_lock); 1843 lun->lun_tg_pt_gp = tg_pt_gp; 1844 list_add_tail(&lun->lun_tg_pt_gp_link, &tg_pt_gp->tg_pt_gp_lun_list); 1845 tg_pt_gp->tg_pt_gp_members++; 1846 spin_lock(&lun->lun_deve_lock); 1847 list_for_each_entry(se_deve, &lun->lun_deve_list, lun_link) 1848 core_scsi3_ua_allocate(se_deve, 0x3f, 1849 ASCQ_3FH_INQUIRY_DATA_HAS_CHANGED); 1850 spin_unlock(&lun->lun_deve_lock); 1851 spin_unlock(&tg_pt_gp->tg_pt_gp_lock); 1852 } 1853 1854 void target_attach_tg_pt_gp(struct se_lun *lun, 1855 struct t10_alua_tg_pt_gp *tg_pt_gp) 1856 { 1857 spin_lock(&lun->lun_tg_pt_gp_lock); 1858 __target_attach_tg_pt_gp(lun, tg_pt_gp); 1859 spin_unlock(&lun->lun_tg_pt_gp_lock); 1860 } 1861 1862 static void __target_detach_tg_pt_gp(struct se_lun *lun, 1863 struct t10_alua_tg_pt_gp *tg_pt_gp) 1864 { 1865 assert_spin_locked(&lun->lun_tg_pt_gp_lock); 1866 1867 spin_lock(&tg_pt_gp->tg_pt_gp_lock); 1868 list_del_init(&lun->lun_tg_pt_gp_link); 1869 tg_pt_gp->tg_pt_gp_members--; 1870 spin_unlock(&tg_pt_gp->tg_pt_gp_lock); 1871 1872 lun->lun_tg_pt_gp = NULL; 1873 } 1874 1875 void target_detach_tg_pt_gp(struct se_lun *lun) 1876 { 1877 struct t10_alua_tg_pt_gp *tg_pt_gp; 1878 1879 spin_lock(&lun->lun_tg_pt_gp_lock); 1880 tg_pt_gp = lun->lun_tg_pt_gp; 1881 if (tg_pt_gp) 1882 __target_detach_tg_pt_gp(lun, tg_pt_gp); 1883 spin_unlock(&lun->lun_tg_pt_gp_lock); 1884 } 1885 1886 ssize_t core_alua_show_tg_pt_gp_info(struct se_lun *lun, char *page) 1887 { 1888 struct config_item *tg_pt_ci; 1889 struct t10_alua_tg_pt_gp *tg_pt_gp; 1890 ssize_t len = 0; 1891 1892 spin_lock(&lun->lun_tg_pt_gp_lock); 1893 tg_pt_gp = lun->lun_tg_pt_gp; 1894 if (tg_pt_gp) { 1895 tg_pt_ci = &tg_pt_gp->tg_pt_gp_group.cg_item; 1896 len += sprintf(page, "TG Port Alias: %s\nTG Port Group ID:" 1897 " %hu\nTG Port Primary Access State: %s\nTG Port " 1898 "Primary Access Status: %s\nTG Port Secondary Access" 1899 " State: %s\nTG Port Secondary Access Status: %s\n", 1900 config_item_name(tg_pt_ci), tg_pt_gp->tg_pt_gp_id, 1901 core_alua_dump_state( 1902 tg_pt_gp->tg_pt_gp_alua_access_state), 1903 core_alua_dump_status( 1904 tg_pt_gp->tg_pt_gp_alua_access_status), 1905 atomic_read(&lun->lun_tg_pt_secondary_offline) ? 1906 "Offline" : "None", 1907 core_alua_dump_status(lun->lun_tg_pt_secondary_stat)); 1908 } 1909 spin_unlock(&lun->lun_tg_pt_gp_lock); 1910 1911 return len; 1912 } 1913 1914 ssize_t core_alua_store_tg_pt_gp_info( 1915 struct se_lun *lun, 1916 const char *page, 1917 size_t count) 1918 { 1919 struct se_portal_group *tpg = lun->lun_tpg; 1920 /* 1921 * rcu_dereference_raw protected by se_lun->lun_group symlink 1922 * reference to se_device->dev_group. 1923 */ 1924 struct se_device *dev = rcu_dereference_raw(lun->lun_se_dev); 1925 struct t10_alua_tg_pt_gp *tg_pt_gp = NULL, *tg_pt_gp_new = NULL; 1926 unsigned char buf[TG_PT_GROUP_NAME_BUF]; 1927 int move = 0; 1928 1929 if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_ALUA || 1930 (dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE)) 1931 return -ENODEV; 1932 1933 if (count > TG_PT_GROUP_NAME_BUF) { 1934 pr_err("ALUA Target Port Group alias too large!\n"); 1935 return -EINVAL; 1936 } 1937 memset(buf, 0, TG_PT_GROUP_NAME_BUF); 1938 memcpy(buf, page, count); 1939 /* 1940 * Any ALUA target port group alias besides "NULL" means we will be 1941 * making a new group association. 1942 */ 1943 if (strcmp(strstrip(buf), "NULL")) { 1944 /* 1945 * core_alua_get_tg_pt_gp_by_name() will increment reference to 1946 * struct t10_alua_tg_pt_gp. This reference is released with 1947 * core_alua_put_tg_pt_gp_from_name() below. 1948 */ 1949 tg_pt_gp_new = core_alua_get_tg_pt_gp_by_name(dev, 1950 strstrip(buf)); 1951 if (!tg_pt_gp_new) 1952 return -ENODEV; 1953 } 1954 1955 spin_lock(&lun->lun_tg_pt_gp_lock); 1956 tg_pt_gp = lun->lun_tg_pt_gp; 1957 if (tg_pt_gp) { 1958 /* 1959 * Clearing an existing tg_pt_gp association, and replacing 1960 * with the default_tg_pt_gp. 1961 */ 1962 if (!tg_pt_gp_new) { 1963 pr_debug("Target_Core_ConfigFS: Moving" 1964 " %s/tpgt_%hu/%s from ALUA Target Port Group:" 1965 " alua/%s, ID: %hu back to" 1966 " default_tg_pt_gp\n", 1967 tpg->se_tpg_tfo->tpg_get_wwn(tpg), 1968 tpg->se_tpg_tfo->tpg_get_tag(tpg), 1969 config_item_name(&lun->lun_group.cg_item), 1970 config_item_name( 1971 &tg_pt_gp->tg_pt_gp_group.cg_item), 1972 tg_pt_gp->tg_pt_gp_id); 1973 1974 __target_detach_tg_pt_gp(lun, tg_pt_gp); 1975 __target_attach_tg_pt_gp(lun, 1976 dev->t10_alua.default_tg_pt_gp); 1977 spin_unlock(&lun->lun_tg_pt_gp_lock); 1978 1979 return count; 1980 } 1981 __target_detach_tg_pt_gp(lun, tg_pt_gp); 1982 move = 1; 1983 } 1984 1985 __target_attach_tg_pt_gp(lun, tg_pt_gp_new); 1986 spin_unlock(&lun->lun_tg_pt_gp_lock); 1987 pr_debug("Target_Core_ConfigFS: %s %s/tpgt_%hu/%s to ALUA" 1988 " Target Port Group: alua/%s, ID: %hu\n", (move) ? 1989 "Moving" : "Adding", tpg->se_tpg_tfo->tpg_get_wwn(tpg), 1990 tpg->se_tpg_tfo->tpg_get_tag(tpg), 1991 config_item_name(&lun->lun_group.cg_item), 1992 config_item_name(&tg_pt_gp_new->tg_pt_gp_group.cg_item), 1993 tg_pt_gp_new->tg_pt_gp_id); 1994 1995 core_alua_put_tg_pt_gp_from_name(tg_pt_gp_new); 1996 return count; 1997 } 1998 1999 ssize_t core_alua_show_access_type( 2000 struct t10_alua_tg_pt_gp *tg_pt_gp, 2001 char *page) 2002 { 2003 if ((tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_EXPLICIT_ALUA) && 2004 (tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_IMPLICIT_ALUA)) 2005 return sprintf(page, "Implicit and Explicit\n"); 2006 else if (tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_IMPLICIT_ALUA) 2007 return sprintf(page, "Implicit\n"); 2008 else if (tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_EXPLICIT_ALUA) 2009 return sprintf(page, "Explicit\n"); 2010 else 2011 return sprintf(page, "None\n"); 2012 } 2013 2014 ssize_t core_alua_store_access_type( 2015 struct t10_alua_tg_pt_gp *tg_pt_gp, 2016 const char *page, 2017 size_t count) 2018 { 2019 unsigned long tmp; 2020 int ret; 2021 2022 ret = kstrtoul(page, 0, &tmp); 2023 if (ret < 0) { 2024 pr_err("Unable to extract alua_access_type\n"); 2025 return ret; 2026 } 2027 if ((tmp != 0) && (tmp != 1) && (tmp != 2) && (tmp != 3)) { 2028 pr_err("Illegal value for alua_access_type:" 2029 " %lu\n", tmp); 2030 return -EINVAL; 2031 } 2032 if (tmp == 3) 2033 tg_pt_gp->tg_pt_gp_alua_access_type = 2034 TPGS_IMPLICIT_ALUA | TPGS_EXPLICIT_ALUA; 2035 else if (tmp == 2) 2036 tg_pt_gp->tg_pt_gp_alua_access_type = TPGS_EXPLICIT_ALUA; 2037 else if (tmp == 1) 2038 tg_pt_gp->tg_pt_gp_alua_access_type = TPGS_IMPLICIT_ALUA; 2039 else 2040 tg_pt_gp->tg_pt_gp_alua_access_type = 0; 2041 2042 return count; 2043 } 2044 2045 ssize_t core_alua_show_nonop_delay_msecs( 2046 struct t10_alua_tg_pt_gp *tg_pt_gp, 2047 char *page) 2048 { 2049 return sprintf(page, "%d\n", tg_pt_gp->tg_pt_gp_nonop_delay_msecs); 2050 } 2051 2052 ssize_t core_alua_store_nonop_delay_msecs( 2053 struct t10_alua_tg_pt_gp *tg_pt_gp, 2054 const char *page, 2055 size_t count) 2056 { 2057 unsigned long tmp; 2058 int ret; 2059 2060 ret = kstrtoul(page, 0, &tmp); 2061 if (ret < 0) { 2062 pr_err("Unable to extract nonop_delay_msecs\n"); 2063 return ret; 2064 } 2065 if (tmp > ALUA_MAX_NONOP_DELAY_MSECS) { 2066 pr_err("Passed nonop_delay_msecs: %lu, exceeds" 2067 " ALUA_MAX_NONOP_DELAY_MSECS: %d\n", tmp, 2068 ALUA_MAX_NONOP_DELAY_MSECS); 2069 return -EINVAL; 2070 } 2071 tg_pt_gp->tg_pt_gp_nonop_delay_msecs = (int)tmp; 2072 2073 return count; 2074 } 2075 2076 ssize_t core_alua_show_trans_delay_msecs( 2077 struct t10_alua_tg_pt_gp *tg_pt_gp, 2078 char *page) 2079 { 2080 return sprintf(page, "%d\n", tg_pt_gp->tg_pt_gp_trans_delay_msecs); 2081 } 2082 2083 ssize_t core_alua_store_trans_delay_msecs( 2084 struct t10_alua_tg_pt_gp *tg_pt_gp, 2085 const char *page, 2086 size_t count) 2087 { 2088 unsigned long tmp; 2089 int ret; 2090 2091 ret = kstrtoul(page, 0, &tmp); 2092 if (ret < 0) { 2093 pr_err("Unable to extract trans_delay_msecs\n"); 2094 return ret; 2095 } 2096 if (tmp > ALUA_MAX_TRANS_DELAY_MSECS) { 2097 pr_err("Passed trans_delay_msecs: %lu, exceeds" 2098 " ALUA_MAX_TRANS_DELAY_MSECS: %d\n", tmp, 2099 ALUA_MAX_TRANS_DELAY_MSECS); 2100 return -EINVAL; 2101 } 2102 tg_pt_gp->tg_pt_gp_trans_delay_msecs = (int)tmp; 2103 2104 return count; 2105 } 2106 2107 ssize_t core_alua_show_implicit_trans_secs( 2108 struct t10_alua_tg_pt_gp *tg_pt_gp, 2109 char *page) 2110 { 2111 return sprintf(page, "%d\n", tg_pt_gp->tg_pt_gp_implicit_trans_secs); 2112 } 2113 2114 ssize_t core_alua_store_implicit_trans_secs( 2115 struct t10_alua_tg_pt_gp *tg_pt_gp, 2116 const char *page, 2117 size_t count) 2118 { 2119 unsigned long tmp; 2120 int ret; 2121 2122 ret = kstrtoul(page, 0, &tmp); 2123 if (ret < 0) { 2124 pr_err("Unable to extract implicit_trans_secs\n"); 2125 return ret; 2126 } 2127 if (tmp > ALUA_MAX_IMPLICIT_TRANS_SECS) { 2128 pr_err("Passed implicit_trans_secs: %lu, exceeds" 2129 " ALUA_MAX_IMPLICIT_TRANS_SECS: %d\n", tmp, 2130 ALUA_MAX_IMPLICIT_TRANS_SECS); 2131 return -EINVAL; 2132 } 2133 tg_pt_gp->tg_pt_gp_implicit_trans_secs = (int)tmp; 2134 2135 return count; 2136 } 2137 2138 ssize_t core_alua_show_preferred_bit( 2139 struct t10_alua_tg_pt_gp *tg_pt_gp, 2140 char *page) 2141 { 2142 return sprintf(page, "%d\n", tg_pt_gp->tg_pt_gp_pref); 2143 } 2144 2145 ssize_t core_alua_store_preferred_bit( 2146 struct t10_alua_tg_pt_gp *tg_pt_gp, 2147 const char *page, 2148 size_t count) 2149 { 2150 unsigned long tmp; 2151 int ret; 2152 2153 ret = kstrtoul(page, 0, &tmp); 2154 if (ret < 0) { 2155 pr_err("Unable to extract preferred ALUA value\n"); 2156 return ret; 2157 } 2158 if ((tmp != 0) && (tmp != 1)) { 2159 pr_err("Illegal value for preferred ALUA: %lu\n", tmp); 2160 return -EINVAL; 2161 } 2162 tg_pt_gp->tg_pt_gp_pref = (int)tmp; 2163 2164 return count; 2165 } 2166 2167 ssize_t core_alua_show_offline_bit(struct se_lun *lun, char *page) 2168 { 2169 return sprintf(page, "%d\n", 2170 atomic_read(&lun->lun_tg_pt_secondary_offline)); 2171 } 2172 2173 ssize_t core_alua_store_offline_bit( 2174 struct se_lun *lun, 2175 const char *page, 2176 size_t count) 2177 { 2178 /* 2179 * rcu_dereference_raw protected by se_lun->lun_group symlink 2180 * reference to se_device->dev_group. 2181 */ 2182 struct se_device *dev = rcu_dereference_raw(lun->lun_se_dev); 2183 unsigned long tmp; 2184 int ret; 2185 2186 if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_ALUA || 2187 (dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE)) 2188 return -ENODEV; 2189 2190 ret = kstrtoul(page, 0, &tmp); 2191 if (ret < 0) { 2192 pr_err("Unable to extract alua_tg_pt_offline value\n"); 2193 return ret; 2194 } 2195 if ((tmp != 0) && (tmp != 1)) { 2196 pr_err("Illegal value for alua_tg_pt_offline: %lu\n", 2197 tmp); 2198 return -EINVAL; 2199 } 2200 2201 ret = core_alua_set_tg_pt_secondary_state(lun, 0, (int)tmp); 2202 if (ret < 0) 2203 return -EINVAL; 2204 2205 return count; 2206 } 2207 2208 ssize_t core_alua_show_secondary_status( 2209 struct se_lun *lun, 2210 char *page) 2211 { 2212 return sprintf(page, "%d\n", lun->lun_tg_pt_secondary_stat); 2213 } 2214 2215 ssize_t core_alua_store_secondary_status( 2216 struct se_lun *lun, 2217 const char *page, 2218 size_t count) 2219 { 2220 unsigned long tmp; 2221 int ret; 2222 2223 ret = kstrtoul(page, 0, &tmp); 2224 if (ret < 0) { 2225 pr_err("Unable to extract alua_tg_pt_status\n"); 2226 return ret; 2227 } 2228 if ((tmp != ALUA_STATUS_NONE) && 2229 (tmp != ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG) && 2230 (tmp != ALUA_STATUS_ALTERED_BY_IMPLICIT_ALUA)) { 2231 pr_err("Illegal value for alua_tg_pt_status: %lu\n", 2232 tmp); 2233 return -EINVAL; 2234 } 2235 lun->lun_tg_pt_secondary_stat = (int)tmp; 2236 2237 return count; 2238 } 2239 2240 ssize_t core_alua_show_secondary_write_metadata( 2241 struct se_lun *lun, 2242 char *page) 2243 { 2244 return sprintf(page, "%d\n", lun->lun_tg_pt_secondary_write_md); 2245 } 2246 2247 ssize_t core_alua_store_secondary_write_metadata( 2248 struct se_lun *lun, 2249 const char *page, 2250 size_t count) 2251 { 2252 unsigned long tmp; 2253 int ret; 2254 2255 ret = kstrtoul(page, 0, &tmp); 2256 if (ret < 0) { 2257 pr_err("Unable to extract alua_tg_pt_write_md\n"); 2258 return ret; 2259 } 2260 if ((tmp != 0) && (tmp != 1)) { 2261 pr_err("Illegal value for alua_tg_pt_write_md:" 2262 " %lu\n", tmp); 2263 return -EINVAL; 2264 } 2265 lun->lun_tg_pt_secondary_write_md = (int)tmp; 2266 2267 return count; 2268 } 2269 2270 int core_setup_alua(struct se_device *dev) 2271 { 2272 if (!(dev->transport->transport_flags & 2273 TRANSPORT_FLAG_PASSTHROUGH_ALUA) && 2274 !(dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE)) { 2275 struct t10_alua_lu_gp_member *lu_gp_mem; 2276 2277 /* 2278 * Associate this struct se_device with the default ALUA 2279 * LUN Group. 2280 */ 2281 lu_gp_mem = core_alua_allocate_lu_gp_mem(dev); 2282 if (IS_ERR(lu_gp_mem)) 2283 return PTR_ERR(lu_gp_mem); 2284 2285 spin_lock(&lu_gp_mem->lu_gp_mem_lock); 2286 __core_alua_attach_lu_gp_mem(lu_gp_mem, 2287 default_lu_gp); 2288 spin_unlock(&lu_gp_mem->lu_gp_mem_lock); 2289 2290 pr_debug("%s: Adding to default ALUA LU Group:" 2291 " core/alua/lu_gps/default_lu_gp\n", 2292 dev->transport->name); 2293 } 2294 2295 return 0; 2296 } 2297