1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /******************************************************************************* 3 * Filename: target_core_alua.c 4 * 5 * This file contains SPC-3 compliant asymmetric logical unit assigntment (ALUA) 6 * 7 * (c) Copyright 2009-2013 Datera, Inc. 8 * 9 * Nicholas A. Bellinger <nab@kernel.org> 10 * 11 ******************************************************************************/ 12 13 #include <linux/slab.h> 14 #include <linux/spinlock.h> 15 #include <linux/configfs.h> 16 #include <linux/delay.h> 17 #include <linux/export.h> 18 #include <linux/fcntl.h> 19 #include <linux/file.h> 20 #include <linux/fs.h> 21 #include <scsi/scsi_proto.h> 22 #include <asm/unaligned.h> 23 24 #include <target/target_core_base.h> 25 #include <target/target_core_backend.h> 26 #include <target/target_core_fabric.h> 27 28 #include "target_core_internal.h" 29 #include "target_core_alua.h" 30 #include "target_core_ua.h" 31 32 static sense_reason_t core_alua_check_transition(int state, int valid, 33 int *primary, int explicit); 34 static int core_alua_set_tg_pt_secondary_state( 35 struct se_lun *lun, int explicit, int offline); 36 37 static char *core_alua_dump_state(int state); 38 39 static void __target_attach_tg_pt_gp(struct se_lun *lun, 40 struct t10_alua_tg_pt_gp *tg_pt_gp); 41 42 static u16 alua_lu_gps_counter; 43 static u32 alua_lu_gps_count; 44 45 static DEFINE_SPINLOCK(lu_gps_lock); 46 static LIST_HEAD(lu_gps_list); 47 48 struct t10_alua_lu_gp *default_lu_gp; 49 50 /* 51 * REPORT REFERRALS 52 * 53 * See sbc3r35 section 5.23 54 */ 55 sense_reason_t 56 target_emulate_report_referrals(struct se_cmd *cmd) 57 { 58 struct se_device *dev = cmd->se_dev; 59 struct t10_alua_lba_map *map; 60 struct t10_alua_lba_map_member *map_mem; 61 unsigned char *buf; 62 u32 rd_len = 0, off; 63 64 if (cmd->data_length < 4) { 65 pr_warn("REPORT REFERRALS allocation length %u too" 66 " small\n", cmd->data_length); 67 return TCM_INVALID_CDB_FIELD; 68 } 69 70 buf = transport_kmap_data_sg(cmd); 71 if (!buf) 72 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 73 74 off = 4; 75 spin_lock(&dev->t10_alua.lba_map_lock); 76 if (list_empty(&dev->t10_alua.lba_map_list)) { 77 spin_unlock(&dev->t10_alua.lba_map_lock); 78 transport_kunmap_data_sg(cmd); 79 80 return TCM_UNSUPPORTED_SCSI_OPCODE; 81 } 82 83 list_for_each_entry(map, &dev->t10_alua.lba_map_list, 84 lba_map_list) { 85 int desc_num = off + 3; 86 int pg_num; 87 88 off += 4; 89 if (cmd->data_length > off) 90 put_unaligned_be64(map->lba_map_first_lba, &buf[off]); 91 off += 8; 92 if (cmd->data_length > off) 93 put_unaligned_be64(map->lba_map_last_lba, &buf[off]); 94 off += 8; 95 rd_len += 20; 96 pg_num = 0; 97 list_for_each_entry(map_mem, &map->lba_map_mem_list, 98 lba_map_mem_list) { 99 int alua_state = map_mem->lba_map_mem_alua_state; 100 int alua_pg_id = map_mem->lba_map_mem_alua_pg_id; 101 102 if (cmd->data_length > off) 103 buf[off] = alua_state & 0x0f; 104 off += 2; 105 if (cmd->data_length > off) 106 buf[off] = (alua_pg_id >> 8) & 0xff; 107 off++; 108 if (cmd->data_length > off) 109 buf[off] = (alua_pg_id & 0xff); 110 off++; 111 rd_len += 4; 112 pg_num++; 113 } 114 if (cmd->data_length > desc_num) 115 buf[desc_num] = pg_num; 116 } 117 spin_unlock(&dev->t10_alua.lba_map_lock); 118 119 /* 120 * Set the RETURN DATA LENGTH set in the header of the DataIN Payload 121 */ 122 put_unaligned_be16(rd_len, &buf[2]); 123 124 transport_kunmap_data_sg(cmd); 125 126 target_complete_cmd(cmd, SAM_STAT_GOOD); 127 return 0; 128 } 129 130 /* 131 * REPORT_TARGET_PORT_GROUPS 132 * 133 * See spc4r17 section 6.27 134 */ 135 sense_reason_t 136 target_emulate_report_target_port_groups(struct se_cmd *cmd) 137 { 138 struct se_device *dev = cmd->se_dev; 139 struct t10_alua_tg_pt_gp *tg_pt_gp; 140 struct se_lun *lun; 141 unsigned char *buf; 142 u32 rd_len = 0, off; 143 int ext_hdr = (cmd->t_task_cdb[1] & 0x20); 144 145 /* 146 * Skip over RESERVED area to first Target port group descriptor 147 * depending on the PARAMETER DATA FORMAT type.. 148 */ 149 if (ext_hdr != 0) 150 off = 8; 151 else 152 off = 4; 153 154 if (cmd->data_length < off) { 155 pr_warn("REPORT TARGET PORT GROUPS allocation length %u too" 156 " small for %s header\n", cmd->data_length, 157 (ext_hdr) ? "extended" : "normal"); 158 return TCM_INVALID_CDB_FIELD; 159 } 160 buf = transport_kmap_data_sg(cmd); 161 if (!buf) 162 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 163 164 spin_lock(&dev->t10_alua.tg_pt_gps_lock); 165 list_for_each_entry(tg_pt_gp, &dev->t10_alua.tg_pt_gps_list, 166 tg_pt_gp_list) { 167 /* 168 * Check if the Target port group and Target port descriptor list 169 * based on tg_pt_gp_members count will fit into the response payload. 170 * Otherwise, bump rd_len to let the initiator know we have exceeded 171 * the allocation length and the response is truncated. 172 */ 173 if ((off + 8 + (tg_pt_gp->tg_pt_gp_members * 4)) > 174 cmd->data_length) { 175 rd_len += 8 + (tg_pt_gp->tg_pt_gp_members * 4); 176 continue; 177 } 178 /* 179 * PREF: Preferred target port bit, determine if this 180 * bit should be set for port group. 181 */ 182 if (tg_pt_gp->tg_pt_gp_pref) 183 buf[off] = 0x80; 184 /* 185 * Set the ASYMMETRIC ACCESS State 186 */ 187 buf[off++] |= tg_pt_gp->tg_pt_gp_alua_access_state & 0xff; 188 /* 189 * Set supported ASYMMETRIC ACCESS State bits 190 */ 191 buf[off++] |= tg_pt_gp->tg_pt_gp_alua_supported_states; 192 /* 193 * TARGET PORT GROUP 194 */ 195 put_unaligned_be16(tg_pt_gp->tg_pt_gp_id, &buf[off]); 196 off += 2; 197 198 off++; /* Skip over Reserved */ 199 /* 200 * STATUS CODE 201 */ 202 buf[off++] = (tg_pt_gp->tg_pt_gp_alua_access_status & 0xff); 203 /* 204 * Vendor Specific field 205 */ 206 buf[off++] = 0x00; 207 /* 208 * TARGET PORT COUNT 209 */ 210 buf[off++] = (tg_pt_gp->tg_pt_gp_members & 0xff); 211 rd_len += 8; 212 213 spin_lock(&tg_pt_gp->tg_pt_gp_lock); 214 list_for_each_entry(lun, &tg_pt_gp->tg_pt_gp_lun_list, 215 lun_tg_pt_gp_link) { 216 /* 217 * Start Target Port descriptor format 218 * 219 * See spc4r17 section 6.2.7 Table 247 220 */ 221 off += 2; /* Skip over Obsolete */ 222 /* 223 * Set RELATIVE TARGET PORT IDENTIFIER 224 */ 225 put_unaligned_be16(lun->lun_rtpi, &buf[off]); 226 off += 2; 227 rd_len += 4; 228 } 229 spin_unlock(&tg_pt_gp->tg_pt_gp_lock); 230 } 231 spin_unlock(&dev->t10_alua.tg_pt_gps_lock); 232 /* 233 * Set the RETURN DATA LENGTH set in the header of the DataIN Payload 234 */ 235 put_unaligned_be32(rd_len, &buf[0]); 236 237 /* 238 * Fill in the Extended header parameter data format if requested 239 */ 240 if (ext_hdr != 0) { 241 buf[4] = 0x10; 242 /* 243 * Set the implicit transition time (in seconds) for the application 244 * client to use as a base for it's transition timeout value. 245 * 246 * Use the current tg_pt_gp_mem -> tg_pt_gp membership from the LUN 247 * this CDB was received upon to determine this value individually 248 * for ALUA target port group. 249 */ 250 rcu_read_lock(); 251 tg_pt_gp = rcu_dereference(cmd->se_lun->lun_tg_pt_gp); 252 if (tg_pt_gp) 253 buf[5] = tg_pt_gp->tg_pt_gp_implicit_trans_secs; 254 rcu_read_unlock(); 255 } 256 transport_kunmap_data_sg(cmd); 257 258 target_complete_cmd_with_length(cmd, SAM_STAT_GOOD, rd_len + 4); 259 return 0; 260 } 261 262 /* 263 * SET_TARGET_PORT_GROUPS for explicit ALUA operation. 264 * 265 * See spc4r17 section 6.35 266 */ 267 sense_reason_t 268 target_emulate_set_target_port_groups(struct se_cmd *cmd) 269 { 270 struct se_device *dev = cmd->se_dev; 271 struct se_lun *l_lun = cmd->se_lun; 272 struct se_node_acl *nacl = cmd->se_sess->se_node_acl; 273 struct t10_alua_tg_pt_gp *tg_pt_gp = NULL, *l_tg_pt_gp; 274 unsigned char *buf; 275 unsigned char *ptr; 276 sense_reason_t rc = TCM_NO_SENSE; 277 u32 len = 4; /* Skip over RESERVED area in header */ 278 int alua_access_state, primary = 0, valid_states; 279 u16 tg_pt_id, rtpi; 280 281 if (cmd->data_length < 4) { 282 pr_warn("SET TARGET PORT GROUPS parameter list length %u too" 283 " small\n", cmd->data_length); 284 return TCM_INVALID_PARAMETER_LIST; 285 } 286 287 buf = transport_kmap_data_sg(cmd); 288 if (!buf) 289 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 290 291 /* 292 * Determine if explicit ALUA via SET_TARGET_PORT_GROUPS is allowed 293 * for the local tg_pt_gp. 294 */ 295 rcu_read_lock(); 296 l_tg_pt_gp = rcu_dereference(l_lun->lun_tg_pt_gp); 297 if (!l_tg_pt_gp) { 298 rcu_read_unlock(); 299 pr_err("Unable to access l_lun->tg_pt_gp\n"); 300 rc = TCM_UNSUPPORTED_SCSI_OPCODE; 301 goto out; 302 } 303 304 if (!(l_tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_EXPLICIT_ALUA)) { 305 rcu_read_unlock(); 306 pr_debug("Unable to process SET_TARGET_PORT_GROUPS" 307 " while TPGS_EXPLICIT_ALUA is disabled\n"); 308 rc = TCM_UNSUPPORTED_SCSI_OPCODE; 309 goto out; 310 } 311 valid_states = l_tg_pt_gp->tg_pt_gp_alua_supported_states; 312 rcu_read_unlock(); 313 314 ptr = &buf[4]; /* Skip over RESERVED area in header */ 315 316 while (len < cmd->data_length) { 317 bool found = false; 318 alua_access_state = (ptr[0] & 0x0f); 319 /* 320 * Check the received ALUA access state, and determine if 321 * the state is a primary or secondary target port asymmetric 322 * access state. 323 */ 324 rc = core_alua_check_transition(alua_access_state, valid_states, 325 &primary, 1); 326 if (rc) { 327 /* 328 * If the SET TARGET PORT GROUPS attempts to establish 329 * an invalid combination of target port asymmetric 330 * access states or attempts to establish an 331 * unsupported target port asymmetric access state, 332 * then the command shall be terminated with CHECK 333 * CONDITION status, with the sense key set to ILLEGAL 334 * REQUEST, and the additional sense code set to INVALID 335 * FIELD IN PARAMETER LIST. 336 */ 337 goto out; 338 } 339 340 /* 341 * If the ASYMMETRIC ACCESS STATE field (see table 267) 342 * specifies a primary target port asymmetric access state, 343 * then the TARGET PORT GROUP OR TARGET PORT field specifies 344 * a primary target port group for which the primary target 345 * port asymmetric access state shall be changed. If the 346 * ASYMMETRIC ACCESS STATE field specifies a secondary target 347 * port asymmetric access state, then the TARGET PORT GROUP OR 348 * TARGET PORT field specifies the relative target port 349 * identifier (see 3.1.120) of the target port for which the 350 * secondary target port asymmetric access state shall be 351 * changed. 352 */ 353 if (primary) { 354 tg_pt_id = get_unaligned_be16(ptr + 2); 355 /* 356 * Locate the matching target port group ID from 357 * the global tg_pt_gp list 358 */ 359 spin_lock(&dev->t10_alua.tg_pt_gps_lock); 360 list_for_each_entry(tg_pt_gp, 361 &dev->t10_alua.tg_pt_gps_list, 362 tg_pt_gp_list) { 363 if (!tg_pt_gp->tg_pt_gp_valid_id) 364 continue; 365 366 if (tg_pt_id != tg_pt_gp->tg_pt_gp_id) 367 continue; 368 369 atomic_inc_mb(&tg_pt_gp->tg_pt_gp_ref_cnt); 370 371 spin_unlock(&dev->t10_alua.tg_pt_gps_lock); 372 373 if (!core_alua_do_port_transition(tg_pt_gp, 374 dev, l_lun, nacl, 375 alua_access_state, 1)) 376 found = true; 377 378 spin_lock(&dev->t10_alua.tg_pt_gps_lock); 379 atomic_dec_mb(&tg_pt_gp->tg_pt_gp_ref_cnt); 380 break; 381 } 382 spin_unlock(&dev->t10_alua.tg_pt_gps_lock); 383 } else { 384 struct se_lun *lun; 385 386 /* 387 * Extract the RELATIVE TARGET PORT IDENTIFIER to identify 388 * the Target Port in question for the incoming 389 * SET_TARGET_PORT_GROUPS op. 390 */ 391 rtpi = get_unaligned_be16(ptr + 2); 392 /* 393 * Locate the matching relative target port identifier 394 * for the struct se_device storage object. 395 */ 396 spin_lock(&dev->se_port_lock); 397 list_for_each_entry(lun, &dev->dev_sep_list, 398 lun_dev_link) { 399 if (lun->lun_rtpi != rtpi) 400 continue; 401 402 // XXX: racy unlock 403 spin_unlock(&dev->se_port_lock); 404 405 if (!core_alua_set_tg_pt_secondary_state( 406 lun, 1, 1)) 407 found = true; 408 409 spin_lock(&dev->se_port_lock); 410 break; 411 } 412 spin_unlock(&dev->se_port_lock); 413 } 414 415 if (!found) { 416 rc = TCM_INVALID_PARAMETER_LIST; 417 goto out; 418 } 419 420 ptr += 4; 421 len += 4; 422 } 423 424 out: 425 transport_kunmap_data_sg(cmd); 426 if (!rc) 427 target_complete_cmd(cmd, SAM_STAT_GOOD); 428 return rc; 429 } 430 431 static inline void core_alua_state_nonoptimized( 432 struct se_cmd *cmd, 433 unsigned char *cdb, 434 int nonop_delay_msecs) 435 { 436 /* 437 * Set SCF_ALUA_NON_OPTIMIZED here, this value will be checked 438 * later to determine if processing of this cmd needs to be 439 * temporarily delayed for the Active/NonOptimized primary access state. 440 */ 441 cmd->se_cmd_flags |= SCF_ALUA_NON_OPTIMIZED; 442 cmd->alua_nonop_delay = nonop_delay_msecs; 443 } 444 445 static inline sense_reason_t core_alua_state_lba_dependent( 446 struct se_cmd *cmd, 447 u16 tg_pt_gp_id) 448 { 449 struct se_device *dev = cmd->se_dev; 450 u64 segment_size, segment_mult, sectors, lba; 451 452 /* Only need to check for cdb actually containing LBAs */ 453 if (!(cmd->se_cmd_flags & SCF_SCSI_DATA_CDB)) 454 return 0; 455 456 spin_lock(&dev->t10_alua.lba_map_lock); 457 segment_size = dev->t10_alua.lba_map_segment_size; 458 segment_mult = dev->t10_alua.lba_map_segment_multiplier; 459 sectors = cmd->data_length / dev->dev_attrib.block_size; 460 461 lba = cmd->t_task_lba; 462 while (lba < cmd->t_task_lba + sectors) { 463 struct t10_alua_lba_map *cur_map = NULL, *map; 464 struct t10_alua_lba_map_member *map_mem; 465 466 list_for_each_entry(map, &dev->t10_alua.lba_map_list, 467 lba_map_list) { 468 u64 start_lba, last_lba; 469 u64 first_lba = map->lba_map_first_lba; 470 471 if (segment_mult) { 472 u64 tmp = lba; 473 start_lba = do_div(tmp, segment_size * segment_mult); 474 475 last_lba = first_lba + segment_size - 1; 476 if (start_lba >= first_lba && 477 start_lba <= last_lba) { 478 lba += segment_size; 479 cur_map = map; 480 break; 481 } 482 } else { 483 last_lba = map->lba_map_last_lba; 484 if (lba >= first_lba && lba <= last_lba) { 485 lba = last_lba + 1; 486 cur_map = map; 487 break; 488 } 489 } 490 } 491 if (!cur_map) { 492 spin_unlock(&dev->t10_alua.lba_map_lock); 493 return TCM_ALUA_TG_PT_UNAVAILABLE; 494 } 495 list_for_each_entry(map_mem, &cur_map->lba_map_mem_list, 496 lba_map_mem_list) { 497 if (map_mem->lba_map_mem_alua_pg_id != tg_pt_gp_id) 498 continue; 499 switch(map_mem->lba_map_mem_alua_state) { 500 case ALUA_ACCESS_STATE_STANDBY: 501 spin_unlock(&dev->t10_alua.lba_map_lock); 502 return TCM_ALUA_TG_PT_STANDBY; 503 case ALUA_ACCESS_STATE_UNAVAILABLE: 504 spin_unlock(&dev->t10_alua.lba_map_lock); 505 return TCM_ALUA_TG_PT_UNAVAILABLE; 506 default: 507 break; 508 } 509 } 510 } 511 spin_unlock(&dev->t10_alua.lba_map_lock); 512 return 0; 513 } 514 515 static inline sense_reason_t core_alua_state_standby( 516 struct se_cmd *cmd, 517 unsigned char *cdb) 518 { 519 /* 520 * Allowed CDBs for ALUA_ACCESS_STATE_STANDBY as defined by 521 * spc4r17 section 5.9.2.4.4 522 */ 523 switch (cdb[0]) { 524 case INQUIRY: 525 case LOG_SELECT: 526 case LOG_SENSE: 527 case MODE_SELECT: 528 case MODE_SENSE: 529 case REPORT_LUNS: 530 case RECEIVE_DIAGNOSTIC: 531 case SEND_DIAGNOSTIC: 532 case READ_CAPACITY: 533 return 0; 534 case SERVICE_ACTION_IN_16: 535 switch (cdb[1] & 0x1f) { 536 case SAI_READ_CAPACITY_16: 537 return 0; 538 default: 539 return TCM_ALUA_TG_PT_STANDBY; 540 } 541 case MAINTENANCE_IN: 542 switch (cdb[1] & 0x1f) { 543 case MI_REPORT_TARGET_PGS: 544 return 0; 545 default: 546 return TCM_ALUA_TG_PT_STANDBY; 547 } 548 case MAINTENANCE_OUT: 549 switch (cdb[1]) { 550 case MO_SET_TARGET_PGS: 551 return 0; 552 default: 553 return TCM_ALUA_TG_PT_STANDBY; 554 } 555 case REQUEST_SENSE: 556 case PERSISTENT_RESERVE_IN: 557 case PERSISTENT_RESERVE_OUT: 558 case READ_BUFFER: 559 case WRITE_BUFFER: 560 return 0; 561 default: 562 return TCM_ALUA_TG_PT_STANDBY; 563 } 564 565 return 0; 566 } 567 568 static inline sense_reason_t core_alua_state_unavailable( 569 struct se_cmd *cmd, 570 unsigned char *cdb) 571 { 572 /* 573 * Allowed CDBs for ALUA_ACCESS_STATE_UNAVAILABLE as defined by 574 * spc4r17 section 5.9.2.4.5 575 */ 576 switch (cdb[0]) { 577 case INQUIRY: 578 case REPORT_LUNS: 579 return 0; 580 case MAINTENANCE_IN: 581 switch (cdb[1] & 0x1f) { 582 case MI_REPORT_TARGET_PGS: 583 return 0; 584 default: 585 return TCM_ALUA_TG_PT_UNAVAILABLE; 586 } 587 case MAINTENANCE_OUT: 588 switch (cdb[1]) { 589 case MO_SET_TARGET_PGS: 590 return 0; 591 default: 592 return TCM_ALUA_TG_PT_UNAVAILABLE; 593 } 594 case REQUEST_SENSE: 595 case READ_BUFFER: 596 case WRITE_BUFFER: 597 return 0; 598 default: 599 return TCM_ALUA_TG_PT_UNAVAILABLE; 600 } 601 602 return 0; 603 } 604 605 static inline sense_reason_t core_alua_state_transition( 606 struct se_cmd *cmd, 607 unsigned char *cdb) 608 { 609 /* 610 * Allowed CDBs for ALUA_ACCESS_STATE_TRANSITION as defined by 611 * spc4r17 section 5.9.2.5 612 */ 613 switch (cdb[0]) { 614 case INQUIRY: 615 case REPORT_LUNS: 616 return 0; 617 case MAINTENANCE_IN: 618 switch (cdb[1] & 0x1f) { 619 case MI_REPORT_TARGET_PGS: 620 return 0; 621 default: 622 return TCM_ALUA_STATE_TRANSITION; 623 } 624 case REQUEST_SENSE: 625 case READ_BUFFER: 626 case WRITE_BUFFER: 627 return 0; 628 default: 629 return TCM_ALUA_STATE_TRANSITION; 630 } 631 632 return 0; 633 } 634 635 /* 636 * return 1: Is used to signal LUN not accessible, and check condition/not ready 637 * return 0: Used to signal success 638 * return -1: Used to signal failure, and invalid cdb field 639 */ 640 sense_reason_t 641 target_alua_state_check(struct se_cmd *cmd) 642 { 643 struct se_device *dev = cmd->se_dev; 644 unsigned char *cdb = cmd->t_task_cdb; 645 struct se_lun *lun = cmd->se_lun; 646 struct t10_alua_tg_pt_gp *tg_pt_gp; 647 int out_alua_state, nonop_delay_msecs; 648 u16 tg_pt_gp_id; 649 sense_reason_t rc = TCM_NO_SENSE; 650 651 if (dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE) 652 return 0; 653 if (dev->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_ALUA) 654 return 0; 655 656 /* 657 * First, check for a struct se_port specific secondary ALUA target port 658 * access state: OFFLINE 659 */ 660 if (atomic_read(&lun->lun_tg_pt_secondary_offline)) { 661 pr_debug("ALUA: Got secondary offline status for local" 662 " target port\n"); 663 return TCM_ALUA_OFFLINE; 664 } 665 rcu_read_lock(); 666 tg_pt_gp = rcu_dereference(lun->lun_tg_pt_gp); 667 if (!tg_pt_gp) { 668 rcu_read_unlock(); 669 return 0; 670 } 671 672 out_alua_state = tg_pt_gp->tg_pt_gp_alua_access_state; 673 nonop_delay_msecs = tg_pt_gp->tg_pt_gp_nonop_delay_msecs; 674 tg_pt_gp_id = tg_pt_gp->tg_pt_gp_id; 675 rcu_read_unlock(); 676 /* 677 * Process ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED in a separate conditional 678 * statement so the compiler knows explicitly to check this case first. 679 * For the Optimized ALUA access state case, we want to process the 680 * incoming fabric cmd ASAP.. 681 */ 682 if (out_alua_state == ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED) 683 return 0; 684 685 switch (out_alua_state) { 686 case ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED: 687 core_alua_state_nonoptimized(cmd, cdb, nonop_delay_msecs); 688 break; 689 case ALUA_ACCESS_STATE_STANDBY: 690 rc = core_alua_state_standby(cmd, cdb); 691 break; 692 case ALUA_ACCESS_STATE_UNAVAILABLE: 693 rc = core_alua_state_unavailable(cmd, cdb); 694 break; 695 case ALUA_ACCESS_STATE_TRANSITION: 696 rc = core_alua_state_transition(cmd, cdb); 697 break; 698 case ALUA_ACCESS_STATE_LBA_DEPENDENT: 699 rc = core_alua_state_lba_dependent(cmd, tg_pt_gp_id); 700 break; 701 /* 702 * OFFLINE is a secondary ALUA target port group access state, that is 703 * handled above with struct se_lun->lun_tg_pt_secondary_offline=1 704 */ 705 case ALUA_ACCESS_STATE_OFFLINE: 706 default: 707 pr_err("Unknown ALUA access state: 0x%02x\n", 708 out_alua_state); 709 rc = TCM_INVALID_CDB_FIELD; 710 } 711 712 if (rc && rc != TCM_INVALID_CDB_FIELD) { 713 pr_debug("[%s]: ALUA TG Port not available, " 714 "SenseKey: NOT_READY, ASC/rc: 0x04/%d\n", 715 cmd->se_tfo->fabric_name, rc); 716 } 717 718 return rc; 719 } 720 721 /* 722 * Check implicit and explicit ALUA state change request. 723 */ 724 static sense_reason_t 725 core_alua_check_transition(int state, int valid, int *primary, int explicit) 726 { 727 /* 728 * OPTIMIZED, NON-OPTIMIZED, STANDBY and UNAVAILABLE are 729 * defined as primary target port asymmetric access states. 730 */ 731 switch (state) { 732 case ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED: 733 if (!(valid & ALUA_AO_SUP)) 734 goto not_supported; 735 *primary = 1; 736 break; 737 case ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED: 738 if (!(valid & ALUA_AN_SUP)) 739 goto not_supported; 740 *primary = 1; 741 break; 742 case ALUA_ACCESS_STATE_STANDBY: 743 if (!(valid & ALUA_S_SUP)) 744 goto not_supported; 745 *primary = 1; 746 break; 747 case ALUA_ACCESS_STATE_UNAVAILABLE: 748 if (!(valid & ALUA_U_SUP)) 749 goto not_supported; 750 *primary = 1; 751 break; 752 case ALUA_ACCESS_STATE_LBA_DEPENDENT: 753 if (!(valid & ALUA_LBD_SUP)) 754 goto not_supported; 755 *primary = 1; 756 break; 757 case ALUA_ACCESS_STATE_OFFLINE: 758 /* 759 * OFFLINE state is defined as a secondary target port 760 * asymmetric access state. 761 */ 762 if (!(valid & ALUA_O_SUP)) 763 goto not_supported; 764 *primary = 0; 765 break; 766 case ALUA_ACCESS_STATE_TRANSITION: 767 if (!(valid & ALUA_T_SUP) || explicit) 768 /* 769 * Transitioning is set internally and by tcmu daemon, 770 * and cannot be selected through a STPG. 771 */ 772 goto not_supported; 773 *primary = 0; 774 break; 775 default: 776 pr_err("Unknown ALUA access state: 0x%02x\n", state); 777 return TCM_INVALID_PARAMETER_LIST; 778 } 779 780 return 0; 781 782 not_supported: 783 pr_err("ALUA access state %s not supported", 784 core_alua_dump_state(state)); 785 return TCM_INVALID_PARAMETER_LIST; 786 } 787 788 static char *core_alua_dump_state(int state) 789 { 790 switch (state) { 791 case ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED: 792 return "Active/Optimized"; 793 case ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED: 794 return "Active/NonOptimized"; 795 case ALUA_ACCESS_STATE_LBA_DEPENDENT: 796 return "LBA Dependent"; 797 case ALUA_ACCESS_STATE_STANDBY: 798 return "Standby"; 799 case ALUA_ACCESS_STATE_UNAVAILABLE: 800 return "Unavailable"; 801 case ALUA_ACCESS_STATE_OFFLINE: 802 return "Offline"; 803 case ALUA_ACCESS_STATE_TRANSITION: 804 return "Transitioning"; 805 default: 806 return "Unknown"; 807 } 808 809 return NULL; 810 } 811 812 char *core_alua_dump_status(int status) 813 { 814 switch (status) { 815 case ALUA_STATUS_NONE: 816 return "None"; 817 case ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG: 818 return "Altered by Explicit STPG"; 819 case ALUA_STATUS_ALTERED_BY_IMPLICIT_ALUA: 820 return "Altered by Implicit ALUA"; 821 default: 822 return "Unknown"; 823 } 824 825 return NULL; 826 } 827 828 /* 829 * Used by fabric modules to determine when we need to delay processing 830 * for the Active/NonOptimized paths.. 831 */ 832 int core_alua_check_nonop_delay( 833 struct se_cmd *cmd) 834 { 835 if (!(cmd->se_cmd_flags & SCF_ALUA_NON_OPTIMIZED)) 836 return 0; 837 /* 838 * The ALUA Active/NonOptimized access state delay can be disabled 839 * in via configfs with a value of zero 840 */ 841 if (!cmd->alua_nonop_delay) 842 return 0; 843 /* 844 * struct se_cmd->alua_nonop_delay gets set by a target port group 845 * defined interval in core_alua_state_nonoptimized() 846 */ 847 msleep_interruptible(cmd->alua_nonop_delay); 848 return 0; 849 } 850 EXPORT_SYMBOL(core_alua_check_nonop_delay); 851 852 static int core_alua_write_tpg_metadata( 853 const char *path, 854 unsigned char *md_buf, 855 u32 md_buf_len) 856 { 857 struct file *file = filp_open(path, O_RDWR | O_CREAT | O_TRUNC, 0600); 858 loff_t pos = 0; 859 int ret; 860 861 if (IS_ERR(file)) { 862 pr_err("filp_open(%s) for ALUA metadata failed\n", path); 863 return -ENODEV; 864 } 865 ret = kernel_write(file, md_buf, md_buf_len, &pos); 866 if (ret < 0) 867 pr_err("Error writing ALUA metadata file: %s\n", path); 868 fput(file); 869 return (ret < 0) ? -EIO : 0; 870 } 871 872 static int core_alua_update_tpg_primary_metadata( 873 struct t10_alua_tg_pt_gp *tg_pt_gp) 874 { 875 unsigned char *md_buf; 876 struct t10_wwn *wwn = &tg_pt_gp->tg_pt_gp_dev->t10_wwn; 877 char *path; 878 int len, rc; 879 880 lockdep_assert_held(&tg_pt_gp->tg_pt_gp_transition_mutex); 881 882 md_buf = kzalloc(ALUA_MD_BUF_LEN, GFP_KERNEL); 883 if (!md_buf) { 884 pr_err("Unable to allocate buf for ALUA metadata\n"); 885 return -ENOMEM; 886 } 887 888 len = snprintf(md_buf, ALUA_MD_BUF_LEN, 889 "tg_pt_gp_id=%hu\n" 890 "alua_access_state=0x%02x\n" 891 "alua_access_status=0x%02x\n", 892 tg_pt_gp->tg_pt_gp_id, 893 tg_pt_gp->tg_pt_gp_alua_access_state, 894 tg_pt_gp->tg_pt_gp_alua_access_status); 895 896 rc = -ENOMEM; 897 path = kasprintf(GFP_KERNEL, "%s/alua/tpgs_%s/%s", db_root, 898 &wwn->unit_serial[0], 899 config_item_name(&tg_pt_gp->tg_pt_gp_group.cg_item)); 900 if (path) { 901 rc = core_alua_write_tpg_metadata(path, md_buf, len); 902 kfree(path); 903 } 904 kfree(md_buf); 905 return rc; 906 } 907 908 static void core_alua_queue_state_change_ua(struct t10_alua_tg_pt_gp *tg_pt_gp) 909 { 910 struct se_dev_entry *se_deve; 911 struct se_lun *lun; 912 struct se_lun_acl *lacl; 913 914 spin_lock(&tg_pt_gp->tg_pt_gp_lock); 915 list_for_each_entry(lun, &tg_pt_gp->tg_pt_gp_lun_list, 916 lun_tg_pt_gp_link) { 917 /* 918 * After an implicit target port asymmetric access state 919 * change, a device server shall establish a unit attention 920 * condition for the initiator port associated with every I_T 921 * nexus with the additional sense code set to ASYMMETRIC 922 * ACCESS STATE CHANGED. 923 * 924 * After an explicit target port asymmetric access state 925 * change, a device server shall establish a unit attention 926 * condition with the additional sense code set to ASYMMETRIC 927 * ACCESS STATE CHANGED for the initiator port associated with 928 * every I_T nexus other than the I_T nexus on which the SET 929 * TARGET PORT GROUPS command 930 */ 931 if (!percpu_ref_tryget_live(&lun->lun_ref)) 932 continue; 933 spin_unlock(&tg_pt_gp->tg_pt_gp_lock); 934 935 spin_lock(&lun->lun_deve_lock); 936 list_for_each_entry(se_deve, &lun->lun_deve_list, lun_link) { 937 lacl = se_deve->se_lun_acl; 938 939 /* 940 * spc4r37 p.242: 941 * After an explicit target port asymmetric access 942 * state change, a device server shall establish a 943 * unit attention condition with the additional sense 944 * code set to ASYMMETRIC ACCESS STATE CHANGED for 945 * the initiator port associated with every I_T nexus 946 * other than the I_T nexus on which the SET TARGET 947 * PORT GROUPS command was received. 948 */ 949 if ((tg_pt_gp->tg_pt_gp_alua_access_status == 950 ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG) && 951 (tg_pt_gp->tg_pt_gp_alua_lun != NULL) && 952 (tg_pt_gp->tg_pt_gp_alua_lun == lun)) 953 continue; 954 955 /* 956 * se_deve->se_lun_acl pointer may be NULL for a 957 * entry created without explicit Node+MappedLUN ACLs 958 */ 959 if (lacl && (tg_pt_gp->tg_pt_gp_alua_nacl != NULL) && 960 (tg_pt_gp->tg_pt_gp_alua_nacl == lacl->se_lun_nacl)) 961 continue; 962 963 core_scsi3_ua_allocate(se_deve, 0x2A, 964 ASCQ_2AH_ASYMMETRIC_ACCESS_STATE_CHANGED); 965 } 966 spin_unlock(&lun->lun_deve_lock); 967 968 spin_lock(&tg_pt_gp->tg_pt_gp_lock); 969 percpu_ref_put(&lun->lun_ref); 970 } 971 spin_unlock(&tg_pt_gp->tg_pt_gp_lock); 972 } 973 974 static int core_alua_do_transition_tg_pt( 975 struct t10_alua_tg_pt_gp *tg_pt_gp, 976 int new_state, 977 int explicit) 978 { 979 int prev_state; 980 981 mutex_lock(&tg_pt_gp->tg_pt_gp_transition_mutex); 982 /* Nothing to be done here */ 983 if (tg_pt_gp->tg_pt_gp_alua_access_state == new_state) { 984 mutex_unlock(&tg_pt_gp->tg_pt_gp_transition_mutex); 985 return 0; 986 } 987 988 if (explicit && new_state == ALUA_ACCESS_STATE_TRANSITION) { 989 mutex_unlock(&tg_pt_gp->tg_pt_gp_transition_mutex); 990 return -EAGAIN; 991 } 992 993 /* 994 * Save the old primary ALUA access state, and set the current state 995 * to ALUA_ACCESS_STATE_TRANSITION. 996 */ 997 prev_state = tg_pt_gp->tg_pt_gp_alua_access_state; 998 tg_pt_gp->tg_pt_gp_alua_access_state = ALUA_ACCESS_STATE_TRANSITION; 999 tg_pt_gp->tg_pt_gp_alua_access_status = (explicit) ? 1000 ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG : 1001 ALUA_STATUS_ALTERED_BY_IMPLICIT_ALUA; 1002 1003 core_alua_queue_state_change_ua(tg_pt_gp); 1004 1005 if (new_state == ALUA_ACCESS_STATE_TRANSITION) { 1006 mutex_unlock(&tg_pt_gp->tg_pt_gp_transition_mutex); 1007 return 0; 1008 } 1009 1010 /* 1011 * Check for the optional ALUA primary state transition delay 1012 */ 1013 if (tg_pt_gp->tg_pt_gp_trans_delay_msecs != 0) 1014 msleep_interruptible(tg_pt_gp->tg_pt_gp_trans_delay_msecs); 1015 1016 /* 1017 * Set the current primary ALUA access state to the requested new state 1018 */ 1019 tg_pt_gp->tg_pt_gp_alua_access_state = new_state; 1020 1021 /* 1022 * Update the ALUA metadata buf that has been allocated in 1023 * core_alua_do_port_transition(), this metadata will be written 1024 * to struct file. 1025 * 1026 * Note that there is the case where we do not want to update the 1027 * metadata when the saved metadata is being parsed in userspace 1028 * when setting the existing port access state and access status. 1029 * 1030 * Also note that the failure to write out the ALUA metadata to 1031 * struct file does NOT affect the actual ALUA transition. 1032 */ 1033 if (tg_pt_gp->tg_pt_gp_write_metadata) { 1034 core_alua_update_tpg_primary_metadata(tg_pt_gp); 1035 } 1036 1037 pr_debug("Successful %s ALUA transition TG PT Group: %s ID: %hu" 1038 " from primary access state %s to %s\n", (explicit) ? "explicit" : 1039 "implicit", config_item_name(&tg_pt_gp->tg_pt_gp_group.cg_item), 1040 tg_pt_gp->tg_pt_gp_id, 1041 core_alua_dump_state(prev_state), 1042 core_alua_dump_state(new_state)); 1043 1044 core_alua_queue_state_change_ua(tg_pt_gp); 1045 1046 mutex_unlock(&tg_pt_gp->tg_pt_gp_transition_mutex); 1047 return 0; 1048 } 1049 1050 int core_alua_do_port_transition( 1051 struct t10_alua_tg_pt_gp *l_tg_pt_gp, 1052 struct se_device *l_dev, 1053 struct se_lun *l_lun, 1054 struct se_node_acl *l_nacl, 1055 int new_state, 1056 int explicit) 1057 { 1058 struct se_device *dev; 1059 struct t10_alua_lu_gp *lu_gp; 1060 struct t10_alua_lu_gp_member *lu_gp_mem, *local_lu_gp_mem; 1061 struct t10_alua_tg_pt_gp *tg_pt_gp; 1062 int primary, valid_states, rc = 0; 1063 1064 if (l_dev->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_ALUA) 1065 return -ENODEV; 1066 1067 valid_states = l_tg_pt_gp->tg_pt_gp_alua_supported_states; 1068 if (core_alua_check_transition(new_state, valid_states, &primary, 1069 explicit) != 0) 1070 return -EINVAL; 1071 1072 local_lu_gp_mem = l_dev->dev_alua_lu_gp_mem; 1073 spin_lock(&local_lu_gp_mem->lu_gp_mem_lock); 1074 lu_gp = local_lu_gp_mem->lu_gp; 1075 atomic_inc(&lu_gp->lu_gp_ref_cnt); 1076 spin_unlock(&local_lu_gp_mem->lu_gp_mem_lock); 1077 /* 1078 * For storage objects that are members of the 'default_lu_gp', 1079 * we only do transition on the passed *l_tp_pt_gp, and not 1080 * on all of the matching target port groups IDs in default_lu_gp. 1081 */ 1082 if (!lu_gp->lu_gp_id) { 1083 /* 1084 * core_alua_do_transition_tg_pt() will always return 1085 * success. 1086 */ 1087 l_tg_pt_gp->tg_pt_gp_alua_lun = l_lun; 1088 l_tg_pt_gp->tg_pt_gp_alua_nacl = l_nacl; 1089 rc = core_alua_do_transition_tg_pt(l_tg_pt_gp, 1090 new_state, explicit); 1091 atomic_dec_mb(&lu_gp->lu_gp_ref_cnt); 1092 return rc; 1093 } 1094 /* 1095 * For all other LU groups aside from 'default_lu_gp', walk all of 1096 * the associated storage objects looking for a matching target port 1097 * group ID from the local target port group. 1098 */ 1099 spin_lock(&lu_gp->lu_gp_lock); 1100 list_for_each_entry(lu_gp_mem, &lu_gp->lu_gp_mem_list, 1101 lu_gp_mem_list) { 1102 1103 dev = lu_gp_mem->lu_gp_mem_dev; 1104 atomic_inc_mb(&lu_gp_mem->lu_gp_mem_ref_cnt); 1105 spin_unlock(&lu_gp->lu_gp_lock); 1106 1107 spin_lock(&dev->t10_alua.tg_pt_gps_lock); 1108 list_for_each_entry(tg_pt_gp, 1109 &dev->t10_alua.tg_pt_gps_list, 1110 tg_pt_gp_list) { 1111 1112 if (!tg_pt_gp->tg_pt_gp_valid_id) 1113 continue; 1114 /* 1115 * If the target behavior port asymmetric access state 1116 * is changed for any target port group accessible via 1117 * a logical unit within a LU group, the target port 1118 * behavior group asymmetric access states for the same 1119 * target port group accessible via other logical units 1120 * in that LU group will also change. 1121 */ 1122 if (l_tg_pt_gp->tg_pt_gp_id != tg_pt_gp->tg_pt_gp_id) 1123 continue; 1124 1125 if (l_tg_pt_gp == tg_pt_gp) { 1126 tg_pt_gp->tg_pt_gp_alua_lun = l_lun; 1127 tg_pt_gp->tg_pt_gp_alua_nacl = l_nacl; 1128 } else { 1129 tg_pt_gp->tg_pt_gp_alua_lun = NULL; 1130 tg_pt_gp->tg_pt_gp_alua_nacl = NULL; 1131 } 1132 atomic_inc_mb(&tg_pt_gp->tg_pt_gp_ref_cnt); 1133 spin_unlock(&dev->t10_alua.tg_pt_gps_lock); 1134 /* 1135 * core_alua_do_transition_tg_pt() will always return 1136 * success. 1137 */ 1138 rc = core_alua_do_transition_tg_pt(tg_pt_gp, 1139 new_state, explicit); 1140 1141 spin_lock(&dev->t10_alua.tg_pt_gps_lock); 1142 atomic_dec_mb(&tg_pt_gp->tg_pt_gp_ref_cnt); 1143 if (rc) 1144 break; 1145 } 1146 spin_unlock(&dev->t10_alua.tg_pt_gps_lock); 1147 1148 spin_lock(&lu_gp->lu_gp_lock); 1149 atomic_dec_mb(&lu_gp_mem->lu_gp_mem_ref_cnt); 1150 } 1151 spin_unlock(&lu_gp->lu_gp_lock); 1152 1153 if (!rc) { 1154 pr_debug("Successfully processed LU Group: %s all ALUA TG PT" 1155 " Group IDs: %hu %s transition to primary state: %s\n", 1156 config_item_name(&lu_gp->lu_gp_group.cg_item), 1157 l_tg_pt_gp->tg_pt_gp_id, 1158 (explicit) ? "explicit" : "implicit", 1159 core_alua_dump_state(new_state)); 1160 } 1161 1162 atomic_dec_mb(&lu_gp->lu_gp_ref_cnt); 1163 return rc; 1164 } 1165 1166 static int core_alua_update_tpg_secondary_metadata(struct se_lun *lun) 1167 { 1168 struct se_portal_group *se_tpg = lun->lun_tpg; 1169 unsigned char *md_buf; 1170 char *path; 1171 int len, rc; 1172 1173 mutex_lock(&lun->lun_tg_pt_md_mutex); 1174 1175 md_buf = kzalloc(ALUA_MD_BUF_LEN, GFP_KERNEL); 1176 if (!md_buf) { 1177 pr_err("Unable to allocate buf for ALUA metadata\n"); 1178 rc = -ENOMEM; 1179 goto out_unlock; 1180 } 1181 1182 len = snprintf(md_buf, ALUA_MD_BUF_LEN, "alua_tg_pt_offline=%d\n" 1183 "alua_tg_pt_status=0x%02x\n", 1184 atomic_read(&lun->lun_tg_pt_secondary_offline), 1185 lun->lun_tg_pt_secondary_stat); 1186 1187 if (se_tpg->se_tpg_tfo->tpg_get_tag != NULL) { 1188 path = kasprintf(GFP_KERNEL, "%s/alua/%s/%s+%hu/lun_%llu", 1189 db_root, se_tpg->se_tpg_tfo->fabric_name, 1190 se_tpg->se_tpg_tfo->tpg_get_wwn(se_tpg), 1191 se_tpg->se_tpg_tfo->tpg_get_tag(se_tpg), 1192 lun->unpacked_lun); 1193 } else { 1194 path = kasprintf(GFP_KERNEL, "%s/alua/%s/%s/lun_%llu", 1195 db_root, se_tpg->se_tpg_tfo->fabric_name, 1196 se_tpg->se_tpg_tfo->tpg_get_wwn(se_tpg), 1197 lun->unpacked_lun); 1198 } 1199 if (!path) { 1200 rc = -ENOMEM; 1201 goto out_free; 1202 } 1203 1204 rc = core_alua_write_tpg_metadata(path, md_buf, len); 1205 kfree(path); 1206 out_free: 1207 kfree(md_buf); 1208 out_unlock: 1209 mutex_unlock(&lun->lun_tg_pt_md_mutex); 1210 return rc; 1211 } 1212 1213 static int core_alua_set_tg_pt_secondary_state( 1214 struct se_lun *lun, 1215 int explicit, 1216 int offline) 1217 { 1218 struct t10_alua_tg_pt_gp *tg_pt_gp; 1219 int trans_delay_msecs; 1220 1221 rcu_read_lock(); 1222 tg_pt_gp = rcu_dereference(lun->lun_tg_pt_gp); 1223 if (!tg_pt_gp) { 1224 rcu_read_unlock(); 1225 pr_err("Unable to complete secondary state" 1226 " transition\n"); 1227 return -EINVAL; 1228 } 1229 trans_delay_msecs = tg_pt_gp->tg_pt_gp_trans_delay_msecs; 1230 /* 1231 * Set the secondary ALUA target port access state to OFFLINE 1232 * or release the previously secondary state for struct se_lun 1233 */ 1234 if (offline) 1235 atomic_set(&lun->lun_tg_pt_secondary_offline, 1); 1236 else 1237 atomic_set(&lun->lun_tg_pt_secondary_offline, 0); 1238 1239 lun->lun_tg_pt_secondary_stat = (explicit) ? 1240 ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG : 1241 ALUA_STATUS_ALTERED_BY_IMPLICIT_ALUA; 1242 1243 pr_debug("Successful %s ALUA transition TG PT Group: %s ID: %hu" 1244 " to secondary access state: %s\n", (explicit) ? "explicit" : 1245 "implicit", config_item_name(&tg_pt_gp->tg_pt_gp_group.cg_item), 1246 tg_pt_gp->tg_pt_gp_id, (offline) ? "OFFLINE" : "ONLINE"); 1247 1248 rcu_read_unlock(); 1249 /* 1250 * Do the optional transition delay after we set the secondary 1251 * ALUA access state. 1252 */ 1253 if (trans_delay_msecs != 0) 1254 msleep_interruptible(trans_delay_msecs); 1255 /* 1256 * See if we need to update the ALUA fabric port metadata for 1257 * secondary state and status 1258 */ 1259 if (lun->lun_tg_pt_secondary_write_md) 1260 core_alua_update_tpg_secondary_metadata(lun); 1261 1262 return 0; 1263 } 1264 1265 struct t10_alua_lba_map * 1266 core_alua_allocate_lba_map(struct list_head *list, 1267 u64 first_lba, u64 last_lba) 1268 { 1269 struct t10_alua_lba_map *lba_map; 1270 1271 lba_map = kmem_cache_zalloc(t10_alua_lba_map_cache, GFP_KERNEL); 1272 if (!lba_map) { 1273 pr_err("Unable to allocate struct t10_alua_lba_map\n"); 1274 return ERR_PTR(-ENOMEM); 1275 } 1276 INIT_LIST_HEAD(&lba_map->lba_map_mem_list); 1277 lba_map->lba_map_first_lba = first_lba; 1278 lba_map->lba_map_last_lba = last_lba; 1279 1280 list_add_tail(&lba_map->lba_map_list, list); 1281 return lba_map; 1282 } 1283 1284 int 1285 core_alua_allocate_lba_map_mem(struct t10_alua_lba_map *lba_map, 1286 int pg_id, int state) 1287 { 1288 struct t10_alua_lba_map_member *lba_map_mem; 1289 1290 list_for_each_entry(lba_map_mem, &lba_map->lba_map_mem_list, 1291 lba_map_mem_list) { 1292 if (lba_map_mem->lba_map_mem_alua_pg_id == pg_id) { 1293 pr_err("Duplicate pg_id %d in lba_map\n", pg_id); 1294 return -EINVAL; 1295 } 1296 } 1297 1298 lba_map_mem = kmem_cache_zalloc(t10_alua_lba_map_mem_cache, GFP_KERNEL); 1299 if (!lba_map_mem) { 1300 pr_err("Unable to allocate struct t10_alua_lba_map_mem\n"); 1301 return -ENOMEM; 1302 } 1303 lba_map_mem->lba_map_mem_alua_state = state; 1304 lba_map_mem->lba_map_mem_alua_pg_id = pg_id; 1305 1306 list_add_tail(&lba_map_mem->lba_map_mem_list, 1307 &lba_map->lba_map_mem_list); 1308 return 0; 1309 } 1310 1311 void 1312 core_alua_free_lba_map(struct list_head *lba_list) 1313 { 1314 struct t10_alua_lba_map *lba_map, *lba_map_tmp; 1315 struct t10_alua_lba_map_member *lba_map_mem, *lba_map_mem_tmp; 1316 1317 list_for_each_entry_safe(lba_map, lba_map_tmp, lba_list, 1318 lba_map_list) { 1319 list_for_each_entry_safe(lba_map_mem, lba_map_mem_tmp, 1320 &lba_map->lba_map_mem_list, 1321 lba_map_mem_list) { 1322 list_del(&lba_map_mem->lba_map_mem_list); 1323 kmem_cache_free(t10_alua_lba_map_mem_cache, 1324 lba_map_mem); 1325 } 1326 list_del(&lba_map->lba_map_list); 1327 kmem_cache_free(t10_alua_lba_map_cache, lba_map); 1328 } 1329 } 1330 1331 void 1332 core_alua_set_lba_map(struct se_device *dev, struct list_head *lba_map_list, 1333 int segment_size, int segment_mult) 1334 { 1335 struct list_head old_lba_map_list; 1336 struct t10_alua_tg_pt_gp *tg_pt_gp; 1337 int activate = 0, supported; 1338 1339 INIT_LIST_HEAD(&old_lba_map_list); 1340 spin_lock(&dev->t10_alua.lba_map_lock); 1341 dev->t10_alua.lba_map_segment_size = segment_size; 1342 dev->t10_alua.lba_map_segment_multiplier = segment_mult; 1343 list_splice_init(&dev->t10_alua.lba_map_list, &old_lba_map_list); 1344 if (lba_map_list) { 1345 list_splice_init(lba_map_list, &dev->t10_alua.lba_map_list); 1346 activate = 1; 1347 } 1348 spin_unlock(&dev->t10_alua.lba_map_lock); 1349 spin_lock(&dev->t10_alua.tg_pt_gps_lock); 1350 list_for_each_entry(tg_pt_gp, &dev->t10_alua.tg_pt_gps_list, 1351 tg_pt_gp_list) { 1352 1353 if (!tg_pt_gp->tg_pt_gp_valid_id) 1354 continue; 1355 supported = tg_pt_gp->tg_pt_gp_alua_supported_states; 1356 if (activate) 1357 supported |= ALUA_LBD_SUP; 1358 else 1359 supported &= ~ALUA_LBD_SUP; 1360 tg_pt_gp->tg_pt_gp_alua_supported_states = supported; 1361 } 1362 spin_unlock(&dev->t10_alua.tg_pt_gps_lock); 1363 core_alua_free_lba_map(&old_lba_map_list); 1364 } 1365 1366 struct t10_alua_lu_gp * 1367 core_alua_allocate_lu_gp(const char *name, int def_group) 1368 { 1369 struct t10_alua_lu_gp *lu_gp; 1370 1371 lu_gp = kmem_cache_zalloc(t10_alua_lu_gp_cache, GFP_KERNEL); 1372 if (!lu_gp) { 1373 pr_err("Unable to allocate struct t10_alua_lu_gp\n"); 1374 return ERR_PTR(-ENOMEM); 1375 } 1376 INIT_LIST_HEAD(&lu_gp->lu_gp_node); 1377 INIT_LIST_HEAD(&lu_gp->lu_gp_mem_list); 1378 spin_lock_init(&lu_gp->lu_gp_lock); 1379 atomic_set(&lu_gp->lu_gp_ref_cnt, 0); 1380 1381 if (def_group) { 1382 lu_gp->lu_gp_id = alua_lu_gps_counter++; 1383 lu_gp->lu_gp_valid_id = 1; 1384 alua_lu_gps_count++; 1385 } 1386 1387 return lu_gp; 1388 } 1389 1390 int core_alua_set_lu_gp_id(struct t10_alua_lu_gp *lu_gp, u16 lu_gp_id) 1391 { 1392 struct t10_alua_lu_gp *lu_gp_tmp; 1393 u16 lu_gp_id_tmp; 1394 /* 1395 * The lu_gp->lu_gp_id may only be set once.. 1396 */ 1397 if (lu_gp->lu_gp_valid_id) { 1398 pr_warn("ALUA LU Group already has a valid ID," 1399 " ignoring request\n"); 1400 return -EINVAL; 1401 } 1402 1403 spin_lock(&lu_gps_lock); 1404 if (alua_lu_gps_count == 0x0000ffff) { 1405 pr_err("Maximum ALUA alua_lu_gps_count:" 1406 " 0x0000ffff reached\n"); 1407 spin_unlock(&lu_gps_lock); 1408 kmem_cache_free(t10_alua_lu_gp_cache, lu_gp); 1409 return -ENOSPC; 1410 } 1411 again: 1412 lu_gp_id_tmp = (lu_gp_id != 0) ? lu_gp_id : 1413 alua_lu_gps_counter++; 1414 1415 list_for_each_entry(lu_gp_tmp, &lu_gps_list, lu_gp_node) { 1416 if (lu_gp_tmp->lu_gp_id == lu_gp_id_tmp) { 1417 if (!lu_gp_id) 1418 goto again; 1419 1420 pr_warn("ALUA Logical Unit Group ID: %hu" 1421 " already exists, ignoring request\n", 1422 lu_gp_id); 1423 spin_unlock(&lu_gps_lock); 1424 return -EINVAL; 1425 } 1426 } 1427 1428 lu_gp->lu_gp_id = lu_gp_id_tmp; 1429 lu_gp->lu_gp_valid_id = 1; 1430 list_add_tail(&lu_gp->lu_gp_node, &lu_gps_list); 1431 alua_lu_gps_count++; 1432 spin_unlock(&lu_gps_lock); 1433 1434 return 0; 1435 } 1436 1437 static struct t10_alua_lu_gp_member * 1438 core_alua_allocate_lu_gp_mem(struct se_device *dev) 1439 { 1440 struct t10_alua_lu_gp_member *lu_gp_mem; 1441 1442 lu_gp_mem = kmem_cache_zalloc(t10_alua_lu_gp_mem_cache, GFP_KERNEL); 1443 if (!lu_gp_mem) { 1444 pr_err("Unable to allocate struct t10_alua_lu_gp_member\n"); 1445 return ERR_PTR(-ENOMEM); 1446 } 1447 INIT_LIST_HEAD(&lu_gp_mem->lu_gp_mem_list); 1448 spin_lock_init(&lu_gp_mem->lu_gp_mem_lock); 1449 atomic_set(&lu_gp_mem->lu_gp_mem_ref_cnt, 0); 1450 1451 lu_gp_mem->lu_gp_mem_dev = dev; 1452 dev->dev_alua_lu_gp_mem = lu_gp_mem; 1453 1454 return lu_gp_mem; 1455 } 1456 1457 void core_alua_free_lu_gp(struct t10_alua_lu_gp *lu_gp) 1458 { 1459 struct t10_alua_lu_gp_member *lu_gp_mem, *lu_gp_mem_tmp; 1460 /* 1461 * Once we have reached this point, config_item_put() has 1462 * already been called from target_core_alua_drop_lu_gp(). 1463 * 1464 * Here, we remove the *lu_gp from the global list so that 1465 * no associations can be made while we are releasing 1466 * struct t10_alua_lu_gp. 1467 */ 1468 spin_lock(&lu_gps_lock); 1469 list_del(&lu_gp->lu_gp_node); 1470 alua_lu_gps_count--; 1471 spin_unlock(&lu_gps_lock); 1472 /* 1473 * Allow struct t10_alua_lu_gp * referenced by core_alua_get_lu_gp_by_name() 1474 * in target_core_configfs.c:target_core_store_alua_lu_gp() to be 1475 * released with core_alua_put_lu_gp_from_name() 1476 */ 1477 while (atomic_read(&lu_gp->lu_gp_ref_cnt)) 1478 cpu_relax(); 1479 /* 1480 * Release reference to struct t10_alua_lu_gp * from all associated 1481 * struct se_device. 1482 */ 1483 spin_lock(&lu_gp->lu_gp_lock); 1484 list_for_each_entry_safe(lu_gp_mem, lu_gp_mem_tmp, 1485 &lu_gp->lu_gp_mem_list, lu_gp_mem_list) { 1486 if (lu_gp_mem->lu_gp_assoc) { 1487 list_del(&lu_gp_mem->lu_gp_mem_list); 1488 lu_gp->lu_gp_members--; 1489 lu_gp_mem->lu_gp_assoc = 0; 1490 } 1491 spin_unlock(&lu_gp->lu_gp_lock); 1492 /* 1493 * 1494 * lu_gp_mem is associated with a single 1495 * struct se_device->dev_alua_lu_gp_mem, and is released when 1496 * struct se_device is released via core_alua_free_lu_gp_mem(). 1497 * 1498 * If the passed lu_gp does NOT match the default_lu_gp, assume 1499 * we want to re-associate a given lu_gp_mem with default_lu_gp. 1500 */ 1501 spin_lock(&lu_gp_mem->lu_gp_mem_lock); 1502 if (lu_gp != default_lu_gp) 1503 __core_alua_attach_lu_gp_mem(lu_gp_mem, 1504 default_lu_gp); 1505 else 1506 lu_gp_mem->lu_gp = NULL; 1507 spin_unlock(&lu_gp_mem->lu_gp_mem_lock); 1508 1509 spin_lock(&lu_gp->lu_gp_lock); 1510 } 1511 spin_unlock(&lu_gp->lu_gp_lock); 1512 1513 kmem_cache_free(t10_alua_lu_gp_cache, lu_gp); 1514 } 1515 1516 void core_alua_free_lu_gp_mem(struct se_device *dev) 1517 { 1518 struct t10_alua_lu_gp *lu_gp; 1519 struct t10_alua_lu_gp_member *lu_gp_mem; 1520 1521 lu_gp_mem = dev->dev_alua_lu_gp_mem; 1522 if (!lu_gp_mem) 1523 return; 1524 1525 while (atomic_read(&lu_gp_mem->lu_gp_mem_ref_cnt)) 1526 cpu_relax(); 1527 1528 spin_lock(&lu_gp_mem->lu_gp_mem_lock); 1529 lu_gp = lu_gp_mem->lu_gp; 1530 if (lu_gp) { 1531 spin_lock(&lu_gp->lu_gp_lock); 1532 if (lu_gp_mem->lu_gp_assoc) { 1533 list_del(&lu_gp_mem->lu_gp_mem_list); 1534 lu_gp->lu_gp_members--; 1535 lu_gp_mem->lu_gp_assoc = 0; 1536 } 1537 spin_unlock(&lu_gp->lu_gp_lock); 1538 lu_gp_mem->lu_gp = NULL; 1539 } 1540 spin_unlock(&lu_gp_mem->lu_gp_mem_lock); 1541 1542 kmem_cache_free(t10_alua_lu_gp_mem_cache, lu_gp_mem); 1543 } 1544 1545 struct t10_alua_lu_gp *core_alua_get_lu_gp_by_name(const char *name) 1546 { 1547 struct t10_alua_lu_gp *lu_gp; 1548 struct config_item *ci; 1549 1550 spin_lock(&lu_gps_lock); 1551 list_for_each_entry(lu_gp, &lu_gps_list, lu_gp_node) { 1552 if (!lu_gp->lu_gp_valid_id) 1553 continue; 1554 ci = &lu_gp->lu_gp_group.cg_item; 1555 if (!strcmp(config_item_name(ci), name)) { 1556 atomic_inc(&lu_gp->lu_gp_ref_cnt); 1557 spin_unlock(&lu_gps_lock); 1558 return lu_gp; 1559 } 1560 } 1561 spin_unlock(&lu_gps_lock); 1562 1563 return NULL; 1564 } 1565 1566 void core_alua_put_lu_gp_from_name(struct t10_alua_lu_gp *lu_gp) 1567 { 1568 spin_lock(&lu_gps_lock); 1569 atomic_dec(&lu_gp->lu_gp_ref_cnt); 1570 spin_unlock(&lu_gps_lock); 1571 } 1572 1573 /* 1574 * Called with struct t10_alua_lu_gp_member->lu_gp_mem_lock 1575 */ 1576 void __core_alua_attach_lu_gp_mem( 1577 struct t10_alua_lu_gp_member *lu_gp_mem, 1578 struct t10_alua_lu_gp *lu_gp) 1579 { 1580 spin_lock(&lu_gp->lu_gp_lock); 1581 lu_gp_mem->lu_gp = lu_gp; 1582 lu_gp_mem->lu_gp_assoc = 1; 1583 list_add_tail(&lu_gp_mem->lu_gp_mem_list, &lu_gp->lu_gp_mem_list); 1584 lu_gp->lu_gp_members++; 1585 spin_unlock(&lu_gp->lu_gp_lock); 1586 } 1587 1588 /* 1589 * Called with struct t10_alua_lu_gp_member->lu_gp_mem_lock 1590 */ 1591 void __core_alua_drop_lu_gp_mem( 1592 struct t10_alua_lu_gp_member *lu_gp_mem, 1593 struct t10_alua_lu_gp *lu_gp) 1594 { 1595 spin_lock(&lu_gp->lu_gp_lock); 1596 list_del(&lu_gp_mem->lu_gp_mem_list); 1597 lu_gp_mem->lu_gp = NULL; 1598 lu_gp_mem->lu_gp_assoc = 0; 1599 lu_gp->lu_gp_members--; 1600 spin_unlock(&lu_gp->lu_gp_lock); 1601 } 1602 1603 struct t10_alua_tg_pt_gp *core_alua_allocate_tg_pt_gp(struct se_device *dev, 1604 const char *name, int def_group) 1605 { 1606 struct t10_alua_tg_pt_gp *tg_pt_gp; 1607 1608 tg_pt_gp = kmem_cache_zalloc(t10_alua_tg_pt_gp_cache, GFP_KERNEL); 1609 if (!tg_pt_gp) { 1610 pr_err("Unable to allocate struct t10_alua_tg_pt_gp\n"); 1611 return NULL; 1612 } 1613 INIT_LIST_HEAD(&tg_pt_gp->tg_pt_gp_list); 1614 INIT_LIST_HEAD(&tg_pt_gp->tg_pt_gp_lun_list); 1615 mutex_init(&tg_pt_gp->tg_pt_gp_transition_mutex); 1616 spin_lock_init(&tg_pt_gp->tg_pt_gp_lock); 1617 atomic_set(&tg_pt_gp->tg_pt_gp_ref_cnt, 0); 1618 tg_pt_gp->tg_pt_gp_dev = dev; 1619 tg_pt_gp->tg_pt_gp_alua_access_state = 1620 ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED; 1621 /* 1622 * Enable both explicit and implicit ALUA support by default 1623 */ 1624 tg_pt_gp->tg_pt_gp_alua_access_type = 1625 TPGS_EXPLICIT_ALUA | TPGS_IMPLICIT_ALUA; 1626 /* 1627 * Set the default Active/NonOptimized Delay in milliseconds 1628 */ 1629 tg_pt_gp->tg_pt_gp_nonop_delay_msecs = ALUA_DEFAULT_NONOP_DELAY_MSECS; 1630 tg_pt_gp->tg_pt_gp_trans_delay_msecs = ALUA_DEFAULT_TRANS_DELAY_MSECS; 1631 tg_pt_gp->tg_pt_gp_implicit_trans_secs = ALUA_DEFAULT_IMPLICIT_TRANS_SECS; 1632 1633 /* 1634 * Enable all supported states 1635 */ 1636 tg_pt_gp->tg_pt_gp_alua_supported_states = 1637 ALUA_T_SUP | ALUA_O_SUP | 1638 ALUA_U_SUP | ALUA_S_SUP | ALUA_AN_SUP | ALUA_AO_SUP; 1639 1640 if (def_group) { 1641 spin_lock(&dev->t10_alua.tg_pt_gps_lock); 1642 tg_pt_gp->tg_pt_gp_id = 1643 dev->t10_alua.alua_tg_pt_gps_counter++; 1644 tg_pt_gp->tg_pt_gp_valid_id = 1; 1645 dev->t10_alua.alua_tg_pt_gps_count++; 1646 list_add_tail(&tg_pt_gp->tg_pt_gp_list, 1647 &dev->t10_alua.tg_pt_gps_list); 1648 spin_unlock(&dev->t10_alua.tg_pt_gps_lock); 1649 } 1650 1651 return tg_pt_gp; 1652 } 1653 1654 int core_alua_set_tg_pt_gp_id( 1655 struct t10_alua_tg_pt_gp *tg_pt_gp, 1656 u16 tg_pt_gp_id) 1657 { 1658 struct se_device *dev = tg_pt_gp->tg_pt_gp_dev; 1659 struct t10_alua_tg_pt_gp *tg_pt_gp_tmp; 1660 u16 tg_pt_gp_id_tmp; 1661 1662 /* 1663 * The tg_pt_gp->tg_pt_gp_id may only be set once.. 1664 */ 1665 if (tg_pt_gp->tg_pt_gp_valid_id) { 1666 pr_warn("ALUA TG PT Group already has a valid ID," 1667 " ignoring request\n"); 1668 return -EINVAL; 1669 } 1670 1671 spin_lock(&dev->t10_alua.tg_pt_gps_lock); 1672 if (dev->t10_alua.alua_tg_pt_gps_count == 0x0000ffff) { 1673 pr_err("Maximum ALUA alua_tg_pt_gps_count:" 1674 " 0x0000ffff reached\n"); 1675 spin_unlock(&dev->t10_alua.tg_pt_gps_lock); 1676 return -ENOSPC; 1677 } 1678 again: 1679 tg_pt_gp_id_tmp = (tg_pt_gp_id != 0) ? tg_pt_gp_id : 1680 dev->t10_alua.alua_tg_pt_gps_counter++; 1681 1682 list_for_each_entry(tg_pt_gp_tmp, &dev->t10_alua.tg_pt_gps_list, 1683 tg_pt_gp_list) { 1684 if (tg_pt_gp_tmp->tg_pt_gp_id == tg_pt_gp_id_tmp) { 1685 if (!tg_pt_gp_id) 1686 goto again; 1687 1688 pr_err("ALUA Target Port Group ID: %hu already" 1689 " exists, ignoring request\n", tg_pt_gp_id); 1690 spin_unlock(&dev->t10_alua.tg_pt_gps_lock); 1691 return -EINVAL; 1692 } 1693 } 1694 1695 tg_pt_gp->tg_pt_gp_id = tg_pt_gp_id_tmp; 1696 tg_pt_gp->tg_pt_gp_valid_id = 1; 1697 list_add_tail(&tg_pt_gp->tg_pt_gp_list, 1698 &dev->t10_alua.tg_pt_gps_list); 1699 dev->t10_alua.alua_tg_pt_gps_count++; 1700 spin_unlock(&dev->t10_alua.tg_pt_gps_lock); 1701 1702 return 0; 1703 } 1704 1705 void core_alua_free_tg_pt_gp( 1706 struct t10_alua_tg_pt_gp *tg_pt_gp) 1707 { 1708 struct se_device *dev = tg_pt_gp->tg_pt_gp_dev; 1709 struct se_lun *lun, *next; 1710 1711 /* 1712 * Once we have reached this point, config_item_put() has already 1713 * been called from target_core_alua_drop_tg_pt_gp(). 1714 * 1715 * Here we remove *tg_pt_gp from the global list so that 1716 * no associations *OR* explicit ALUA via SET_TARGET_PORT_GROUPS 1717 * can be made while we are releasing struct t10_alua_tg_pt_gp. 1718 */ 1719 spin_lock(&dev->t10_alua.tg_pt_gps_lock); 1720 if (tg_pt_gp->tg_pt_gp_valid_id) { 1721 list_del(&tg_pt_gp->tg_pt_gp_list); 1722 dev->t10_alua.alua_tg_pt_gps_count--; 1723 } 1724 spin_unlock(&dev->t10_alua.tg_pt_gps_lock); 1725 1726 /* 1727 * Allow a struct t10_alua_tg_pt_gp_member * referenced by 1728 * core_alua_get_tg_pt_gp_by_name() in 1729 * target_core_configfs.c:target_core_store_alua_tg_pt_gp() 1730 * to be released with core_alua_put_tg_pt_gp_from_name(). 1731 */ 1732 while (atomic_read(&tg_pt_gp->tg_pt_gp_ref_cnt)) 1733 cpu_relax(); 1734 1735 /* 1736 * Release reference to struct t10_alua_tg_pt_gp from all associated 1737 * struct se_port. 1738 */ 1739 spin_lock(&tg_pt_gp->tg_pt_gp_lock); 1740 list_for_each_entry_safe(lun, next, 1741 &tg_pt_gp->tg_pt_gp_lun_list, lun_tg_pt_gp_link) { 1742 list_del_init(&lun->lun_tg_pt_gp_link); 1743 tg_pt_gp->tg_pt_gp_members--; 1744 1745 spin_unlock(&tg_pt_gp->tg_pt_gp_lock); 1746 /* 1747 * If the passed tg_pt_gp does NOT match the default_tg_pt_gp, 1748 * assume we want to re-associate a given tg_pt_gp_mem with 1749 * default_tg_pt_gp. 1750 */ 1751 spin_lock(&lun->lun_tg_pt_gp_lock); 1752 if (tg_pt_gp != dev->t10_alua.default_tg_pt_gp) { 1753 __target_attach_tg_pt_gp(lun, 1754 dev->t10_alua.default_tg_pt_gp); 1755 } else 1756 rcu_assign_pointer(lun->lun_tg_pt_gp, NULL); 1757 spin_unlock(&lun->lun_tg_pt_gp_lock); 1758 1759 spin_lock(&tg_pt_gp->tg_pt_gp_lock); 1760 } 1761 spin_unlock(&tg_pt_gp->tg_pt_gp_lock); 1762 1763 synchronize_rcu(); 1764 kmem_cache_free(t10_alua_tg_pt_gp_cache, tg_pt_gp); 1765 } 1766 1767 static struct t10_alua_tg_pt_gp *core_alua_get_tg_pt_gp_by_name( 1768 struct se_device *dev, const char *name) 1769 { 1770 struct t10_alua_tg_pt_gp *tg_pt_gp; 1771 struct config_item *ci; 1772 1773 spin_lock(&dev->t10_alua.tg_pt_gps_lock); 1774 list_for_each_entry(tg_pt_gp, &dev->t10_alua.tg_pt_gps_list, 1775 tg_pt_gp_list) { 1776 if (!tg_pt_gp->tg_pt_gp_valid_id) 1777 continue; 1778 ci = &tg_pt_gp->tg_pt_gp_group.cg_item; 1779 if (!strcmp(config_item_name(ci), name)) { 1780 atomic_inc(&tg_pt_gp->tg_pt_gp_ref_cnt); 1781 spin_unlock(&dev->t10_alua.tg_pt_gps_lock); 1782 return tg_pt_gp; 1783 } 1784 } 1785 spin_unlock(&dev->t10_alua.tg_pt_gps_lock); 1786 1787 return NULL; 1788 } 1789 1790 static void core_alua_put_tg_pt_gp_from_name( 1791 struct t10_alua_tg_pt_gp *tg_pt_gp) 1792 { 1793 struct se_device *dev = tg_pt_gp->tg_pt_gp_dev; 1794 1795 spin_lock(&dev->t10_alua.tg_pt_gps_lock); 1796 atomic_dec(&tg_pt_gp->tg_pt_gp_ref_cnt); 1797 spin_unlock(&dev->t10_alua.tg_pt_gps_lock); 1798 } 1799 1800 static void __target_attach_tg_pt_gp(struct se_lun *lun, 1801 struct t10_alua_tg_pt_gp *tg_pt_gp) 1802 { 1803 struct se_dev_entry *se_deve; 1804 1805 assert_spin_locked(&lun->lun_tg_pt_gp_lock); 1806 1807 spin_lock(&tg_pt_gp->tg_pt_gp_lock); 1808 rcu_assign_pointer(lun->lun_tg_pt_gp, tg_pt_gp); 1809 list_add_tail(&lun->lun_tg_pt_gp_link, &tg_pt_gp->tg_pt_gp_lun_list); 1810 tg_pt_gp->tg_pt_gp_members++; 1811 spin_lock(&lun->lun_deve_lock); 1812 list_for_each_entry(se_deve, &lun->lun_deve_list, lun_link) 1813 core_scsi3_ua_allocate(se_deve, 0x3f, 1814 ASCQ_3FH_INQUIRY_DATA_HAS_CHANGED); 1815 spin_unlock(&lun->lun_deve_lock); 1816 spin_unlock(&tg_pt_gp->tg_pt_gp_lock); 1817 } 1818 1819 void target_attach_tg_pt_gp(struct se_lun *lun, 1820 struct t10_alua_tg_pt_gp *tg_pt_gp) 1821 { 1822 spin_lock(&lun->lun_tg_pt_gp_lock); 1823 __target_attach_tg_pt_gp(lun, tg_pt_gp); 1824 spin_unlock(&lun->lun_tg_pt_gp_lock); 1825 synchronize_rcu(); 1826 } 1827 1828 static void __target_detach_tg_pt_gp(struct se_lun *lun, 1829 struct t10_alua_tg_pt_gp *tg_pt_gp) 1830 { 1831 assert_spin_locked(&lun->lun_tg_pt_gp_lock); 1832 1833 spin_lock(&tg_pt_gp->tg_pt_gp_lock); 1834 list_del_init(&lun->lun_tg_pt_gp_link); 1835 tg_pt_gp->tg_pt_gp_members--; 1836 spin_unlock(&tg_pt_gp->tg_pt_gp_lock); 1837 } 1838 1839 void target_detach_tg_pt_gp(struct se_lun *lun) 1840 { 1841 struct t10_alua_tg_pt_gp *tg_pt_gp; 1842 1843 spin_lock(&lun->lun_tg_pt_gp_lock); 1844 tg_pt_gp = rcu_dereference_check(lun->lun_tg_pt_gp, 1845 lockdep_is_held(&lun->lun_tg_pt_gp_lock)); 1846 if (tg_pt_gp) { 1847 __target_detach_tg_pt_gp(lun, tg_pt_gp); 1848 rcu_assign_pointer(lun->lun_tg_pt_gp, NULL); 1849 } 1850 spin_unlock(&lun->lun_tg_pt_gp_lock); 1851 synchronize_rcu(); 1852 } 1853 1854 static void target_swap_tg_pt_gp(struct se_lun *lun, 1855 struct t10_alua_tg_pt_gp *old_tg_pt_gp, 1856 struct t10_alua_tg_pt_gp *new_tg_pt_gp) 1857 { 1858 assert_spin_locked(&lun->lun_tg_pt_gp_lock); 1859 1860 if (old_tg_pt_gp) 1861 __target_detach_tg_pt_gp(lun, old_tg_pt_gp); 1862 __target_attach_tg_pt_gp(lun, new_tg_pt_gp); 1863 } 1864 1865 ssize_t core_alua_show_tg_pt_gp_info(struct se_lun *lun, char *page) 1866 { 1867 struct config_item *tg_pt_ci; 1868 struct t10_alua_tg_pt_gp *tg_pt_gp; 1869 ssize_t len = 0; 1870 1871 rcu_read_lock(); 1872 tg_pt_gp = rcu_dereference(lun->lun_tg_pt_gp); 1873 if (tg_pt_gp) { 1874 tg_pt_ci = &tg_pt_gp->tg_pt_gp_group.cg_item; 1875 len += sprintf(page, "TG Port Alias: %s\nTG Port Group ID:" 1876 " %hu\nTG Port Primary Access State: %s\nTG Port " 1877 "Primary Access Status: %s\nTG Port Secondary Access" 1878 " State: %s\nTG Port Secondary Access Status: %s\n", 1879 config_item_name(tg_pt_ci), tg_pt_gp->tg_pt_gp_id, 1880 core_alua_dump_state( 1881 tg_pt_gp->tg_pt_gp_alua_access_state), 1882 core_alua_dump_status( 1883 tg_pt_gp->tg_pt_gp_alua_access_status), 1884 atomic_read(&lun->lun_tg_pt_secondary_offline) ? 1885 "Offline" : "None", 1886 core_alua_dump_status(lun->lun_tg_pt_secondary_stat)); 1887 } 1888 rcu_read_unlock(); 1889 1890 return len; 1891 } 1892 1893 ssize_t core_alua_store_tg_pt_gp_info( 1894 struct se_lun *lun, 1895 const char *page, 1896 size_t count) 1897 { 1898 struct se_portal_group *tpg = lun->lun_tpg; 1899 /* 1900 * rcu_dereference_raw protected by se_lun->lun_group symlink 1901 * reference to se_device->dev_group. 1902 */ 1903 struct se_device *dev = rcu_dereference_raw(lun->lun_se_dev); 1904 struct t10_alua_tg_pt_gp *tg_pt_gp = NULL, *tg_pt_gp_new = NULL; 1905 unsigned char buf[TG_PT_GROUP_NAME_BUF]; 1906 int move = 0; 1907 1908 if (dev->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_ALUA || 1909 (dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE)) 1910 return -ENODEV; 1911 1912 if (count > TG_PT_GROUP_NAME_BUF) { 1913 pr_err("ALUA Target Port Group alias too large!\n"); 1914 return -EINVAL; 1915 } 1916 memset(buf, 0, TG_PT_GROUP_NAME_BUF); 1917 memcpy(buf, page, count); 1918 /* 1919 * Any ALUA target port group alias besides "NULL" means we will be 1920 * making a new group association. 1921 */ 1922 if (strcmp(strstrip(buf), "NULL")) { 1923 /* 1924 * core_alua_get_tg_pt_gp_by_name() will increment reference to 1925 * struct t10_alua_tg_pt_gp. This reference is released with 1926 * core_alua_put_tg_pt_gp_from_name() below. 1927 */ 1928 tg_pt_gp_new = core_alua_get_tg_pt_gp_by_name(dev, 1929 strstrip(buf)); 1930 if (!tg_pt_gp_new) 1931 return -ENODEV; 1932 } 1933 1934 spin_lock(&lun->lun_tg_pt_gp_lock); 1935 tg_pt_gp = rcu_dereference_check(lun->lun_tg_pt_gp, 1936 lockdep_is_held(&lun->lun_tg_pt_gp_lock)); 1937 if (tg_pt_gp) { 1938 /* 1939 * Clearing an existing tg_pt_gp association, and replacing 1940 * with the default_tg_pt_gp. 1941 */ 1942 if (!tg_pt_gp_new) { 1943 pr_debug("Target_Core_ConfigFS: Moving" 1944 " %s/tpgt_%hu/%s from ALUA Target Port Group:" 1945 " alua/%s, ID: %hu back to" 1946 " default_tg_pt_gp\n", 1947 tpg->se_tpg_tfo->tpg_get_wwn(tpg), 1948 tpg->se_tpg_tfo->tpg_get_tag(tpg), 1949 config_item_name(&lun->lun_group.cg_item), 1950 config_item_name( 1951 &tg_pt_gp->tg_pt_gp_group.cg_item), 1952 tg_pt_gp->tg_pt_gp_id); 1953 1954 target_swap_tg_pt_gp(lun, tg_pt_gp, 1955 dev->t10_alua.default_tg_pt_gp); 1956 spin_unlock(&lun->lun_tg_pt_gp_lock); 1957 1958 goto sync_rcu; 1959 } 1960 move = 1; 1961 } 1962 1963 target_swap_tg_pt_gp(lun, tg_pt_gp, tg_pt_gp_new); 1964 spin_unlock(&lun->lun_tg_pt_gp_lock); 1965 pr_debug("Target_Core_ConfigFS: %s %s/tpgt_%hu/%s to ALUA" 1966 " Target Port Group: alua/%s, ID: %hu\n", (move) ? 1967 "Moving" : "Adding", tpg->se_tpg_tfo->tpg_get_wwn(tpg), 1968 tpg->se_tpg_tfo->tpg_get_tag(tpg), 1969 config_item_name(&lun->lun_group.cg_item), 1970 config_item_name(&tg_pt_gp_new->tg_pt_gp_group.cg_item), 1971 tg_pt_gp_new->tg_pt_gp_id); 1972 1973 core_alua_put_tg_pt_gp_from_name(tg_pt_gp_new); 1974 sync_rcu: 1975 synchronize_rcu(); 1976 return count; 1977 } 1978 1979 ssize_t core_alua_show_access_type( 1980 struct t10_alua_tg_pt_gp *tg_pt_gp, 1981 char *page) 1982 { 1983 if ((tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_EXPLICIT_ALUA) && 1984 (tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_IMPLICIT_ALUA)) 1985 return sprintf(page, "Implicit and Explicit\n"); 1986 else if (tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_IMPLICIT_ALUA) 1987 return sprintf(page, "Implicit\n"); 1988 else if (tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_EXPLICIT_ALUA) 1989 return sprintf(page, "Explicit\n"); 1990 else 1991 return sprintf(page, "None\n"); 1992 } 1993 1994 ssize_t core_alua_store_access_type( 1995 struct t10_alua_tg_pt_gp *tg_pt_gp, 1996 const char *page, 1997 size_t count) 1998 { 1999 unsigned long tmp; 2000 int ret; 2001 2002 ret = kstrtoul(page, 0, &tmp); 2003 if (ret < 0) { 2004 pr_err("Unable to extract alua_access_type\n"); 2005 return ret; 2006 } 2007 if ((tmp != 0) && (tmp != 1) && (tmp != 2) && (tmp != 3)) { 2008 pr_err("Illegal value for alua_access_type:" 2009 " %lu\n", tmp); 2010 return -EINVAL; 2011 } 2012 if (tmp == 3) 2013 tg_pt_gp->tg_pt_gp_alua_access_type = 2014 TPGS_IMPLICIT_ALUA | TPGS_EXPLICIT_ALUA; 2015 else if (tmp == 2) 2016 tg_pt_gp->tg_pt_gp_alua_access_type = TPGS_EXPLICIT_ALUA; 2017 else if (tmp == 1) 2018 tg_pt_gp->tg_pt_gp_alua_access_type = TPGS_IMPLICIT_ALUA; 2019 else 2020 tg_pt_gp->tg_pt_gp_alua_access_type = 0; 2021 2022 return count; 2023 } 2024 2025 ssize_t core_alua_show_nonop_delay_msecs( 2026 struct t10_alua_tg_pt_gp *tg_pt_gp, 2027 char *page) 2028 { 2029 return sprintf(page, "%d\n", tg_pt_gp->tg_pt_gp_nonop_delay_msecs); 2030 } 2031 2032 ssize_t core_alua_store_nonop_delay_msecs( 2033 struct t10_alua_tg_pt_gp *tg_pt_gp, 2034 const char *page, 2035 size_t count) 2036 { 2037 unsigned long tmp; 2038 int ret; 2039 2040 ret = kstrtoul(page, 0, &tmp); 2041 if (ret < 0) { 2042 pr_err("Unable to extract nonop_delay_msecs\n"); 2043 return ret; 2044 } 2045 if (tmp > ALUA_MAX_NONOP_DELAY_MSECS) { 2046 pr_err("Passed nonop_delay_msecs: %lu, exceeds" 2047 " ALUA_MAX_NONOP_DELAY_MSECS: %d\n", tmp, 2048 ALUA_MAX_NONOP_DELAY_MSECS); 2049 return -EINVAL; 2050 } 2051 tg_pt_gp->tg_pt_gp_nonop_delay_msecs = (int)tmp; 2052 2053 return count; 2054 } 2055 2056 ssize_t core_alua_show_trans_delay_msecs( 2057 struct t10_alua_tg_pt_gp *tg_pt_gp, 2058 char *page) 2059 { 2060 return sprintf(page, "%d\n", tg_pt_gp->tg_pt_gp_trans_delay_msecs); 2061 } 2062 2063 ssize_t core_alua_store_trans_delay_msecs( 2064 struct t10_alua_tg_pt_gp *tg_pt_gp, 2065 const char *page, 2066 size_t count) 2067 { 2068 unsigned long tmp; 2069 int ret; 2070 2071 ret = kstrtoul(page, 0, &tmp); 2072 if (ret < 0) { 2073 pr_err("Unable to extract trans_delay_msecs\n"); 2074 return ret; 2075 } 2076 if (tmp > ALUA_MAX_TRANS_DELAY_MSECS) { 2077 pr_err("Passed trans_delay_msecs: %lu, exceeds" 2078 " ALUA_MAX_TRANS_DELAY_MSECS: %d\n", tmp, 2079 ALUA_MAX_TRANS_DELAY_MSECS); 2080 return -EINVAL; 2081 } 2082 tg_pt_gp->tg_pt_gp_trans_delay_msecs = (int)tmp; 2083 2084 return count; 2085 } 2086 2087 ssize_t core_alua_show_implicit_trans_secs( 2088 struct t10_alua_tg_pt_gp *tg_pt_gp, 2089 char *page) 2090 { 2091 return sprintf(page, "%d\n", tg_pt_gp->tg_pt_gp_implicit_trans_secs); 2092 } 2093 2094 ssize_t core_alua_store_implicit_trans_secs( 2095 struct t10_alua_tg_pt_gp *tg_pt_gp, 2096 const char *page, 2097 size_t count) 2098 { 2099 unsigned long tmp; 2100 int ret; 2101 2102 ret = kstrtoul(page, 0, &tmp); 2103 if (ret < 0) { 2104 pr_err("Unable to extract implicit_trans_secs\n"); 2105 return ret; 2106 } 2107 if (tmp > ALUA_MAX_IMPLICIT_TRANS_SECS) { 2108 pr_err("Passed implicit_trans_secs: %lu, exceeds" 2109 " ALUA_MAX_IMPLICIT_TRANS_SECS: %d\n", tmp, 2110 ALUA_MAX_IMPLICIT_TRANS_SECS); 2111 return -EINVAL; 2112 } 2113 tg_pt_gp->tg_pt_gp_implicit_trans_secs = (int)tmp; 2114 2115 return count; 2116 } 2117 2118 ssize_t core_alua_show_preferred_bit( 2119 struct t10_alua_tg_pt_gp *tg_pt_gp, 2120 char *page) 2121 { 2122 return sprintf(page, "%d\n", tg_pt_gp->tg_pt_gp_pref); 2123 } 2124 2125 ssize_t core_alua_store_preferred_bit( 2126 struct t10_alua_tg_pt_gp *tg_pt_gp, 2127 const char *page, 2128 size_t count) 2129 { 2130 unsigned long tmp; 2131 int ret; 2132 2133 ret = kstrtoul(page, 0, &tmp); 2134 if (ret < 0) { 2135 pr_err("Unable to extract preferred ALUA value\n"); 2136 return ret; 2137 } 2138 if ((tmp != 0) && (tmp != 1)) { 2139 pr_err("Illegal value for preferred ALUA: %lu\n", tmp); 2140 return -EINVAL; 2141 } 2142 tg_pt_gp->tg_pt_gp_pref = (int)tmp; 2143 2144 return count; 2145 } 2146 2147 ssize_t core_alua_show_offline_bit(struct se_lun *lun, char *page) 2148 { 2149 return sprintf(page, "%d\n", 2150 atomic_read(&lun->lun_tg_pt_secondary_offline)); 2151 } 2152 2153 ssize_t core_alua_store_offline_bit( 2154 struct se_lun *lun, 2155 const char *page, 2156 size_t count) 2157 { 2158 /* 2159 * rcu_dereference_raw protected by se_lun->lun_group symlink 2160 * reference to se_device->dev_group. 2161 */ 2162 struct se_device *dev = rcu_dereference_raw(lun->lun_se_dev); 2163 unsigned long tmp; 2164 int ret; 2165 2166 if (dev->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_ALUA || 2167 (dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE)) 2168 return -ENODEV; 2169 2170 ret = kstrtoul(page, 0, &tmp); 2171 if (ret < 0) { 2172 pr_err("Unable to extract alua_tg_pt_offline value\n"); 2173 return ret; 2174 } 2175 if ((tmp != 0) && (tmp != 1)) { 2176 pr_err("Illegal value for alua_tg_pt_offline: %lu\n", 2177 tmp); 2178 return -EINVAL; 2179 } 2180 2181 ret = core_alua_set_tg_pt_secondary_state(lun, 0, (int)tmp); 2182 if (ret < 0) 2183 return -EINVAL; 2184 2185 return count; 2186 } 2187 2188 ssize_t core_alua_show_secondary_status( 2189 struct se_lun *lun, 2190 char *page) 2191 { 2192 return sprintf(page, "%d\n", lun->lun_tg_pt_secondary_stat); 2193 } 2194 2195 ssize_t core_alua_store_secondary_status( 2196 struct se_lun *lun, 2197 const char *page, 2198 size_t count) 2199 { 2200 unsigned long tmp; 2201 int ret; 2202 2203 ret = kstrtoul(page, 0, &tmp); 2204 if (ret < 0) { 2205 pr_err("Unable to extract alua_tg_pt_status\n"); 2206 return ret; 2207 } 2208 if ((tmp != ALUA_STATUS_NONE) && 2209 (tmp != ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG) && 2210 (tmp != ALUA_STATUS_ALTERED_BY_IMPLICIT_ALUA)) { 2211 pr_err("Illegal value for alua_tg_pt_status: %lu\n", 2212 tmp); 2213 return -EINVAL; 2214 } 2215 lun->lun_tg_pt_secondary_stat = (int)tmp; 2216 2217 return count; 2218 } 2219 2220 ssize_t core_alua_show_secondary_write_metadata( 2221 struct se_lun *lun, 2222 char *page) 2223 { 2224 return sprintf(page, "%d\n", lun->lun_tg_pt_secondary_write_md); 2225 } 2226 2227 ssize_t core_alua_store_secondary_write_metadata( 2228 struct se_lun *lun, 2229 const char *page, 2230 size_t count) 2231 { 2232 unsigned long tmp; 2233 int ret; 2234 2235 ret = kstrtoul(page, 0, &tmp); 2236 if (ret < 0) { 2237 pr_err("Unable to extract alua_tg_pt_write_md\n"); 2238 return ret; 2239 } 2240 if ((tmp != 0) && (tmp != 1)) { 2241 pr_err("Illegal value for alua_tg_pt_write_md:" 2242 " %lu\n", tmp); 2243 return -EINVAL; 2244 } 2245 lun->lun_tg_pt_secondary_write_md = (int)tmp; 2246 2247 return count; 2248 } 2249 2250 int core_setup_alua(struct se_device *dev) 2251 { 2252 if (!(dev->transport_flags & 2253 TRANSPORT_FLAG_PASSTHROUGH_ALUA) && 2254 !(dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE)) { 2255 struct t10_alua_lu_gp_member *lu_gp_mem; 2256 2257 /* 2258 * Associate this struct se_device with the default ALUA 2259 * LUN Group. 2260 */ 2261 lu_gp_mem = core_alua_allocate_lu_gp_mem(dev); 2262 if (IS_ERR(lu_gp_mem)) 2263 return PTR_ERR(lu_gp_mem); 2264 2265 spin_lock(&lu_gp_mem->lu_gp_mem_lock); 2266 __core_alua_attach_lu_gp_mem(lu_gp_mem, 2267 default_lu_gp); 2268 spin_unlock(&lu_gp_mem->lu_gp_mem_lock); 2269 2270 pr_debug("%s: Adding to default ALUA LU Group:" 2271 " core/alua/lu_gps/default_lu_gp\n", 2272 dev->transport->name); 2273 } 2274 2275 return 0; 2276 } 2277