1 /******************************************************************************* 2 * Filename: target_core_alua.c 3 * 4 * This file contains SPC-3 compliant asymmetric logical unit assigntment (ALUA) 5 * 6 * Copyright (c) 2009-2010 Rising Tide Systems 7 * Copyright (c) 2009-2010 Linux-iSCSI.org 8 * 9 * Nicholas A. Bellinger <nab@kernel.org> 10 * 11 * This program is free software; you can redistribute it and/or modify 12 * it under the terms of the GNU General Public License as published by 13 * the Free Software Foundation; either version 2 of the License, or 14 * (at your option) any later version. 15 * 16 * This program is distributed in the hope that it will be useful, 17 * but WITHOUT ANY WARRANTY; without even the implied warranty of 18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 19 * GNU General Public License for more details. 20 * 21 * You should have received a copy of the GNU General Public License 22 * along with this program; if not, write to the Free Software 23 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. 24 * 25 ******************************************************************************/ 26 27 #include <linux/version.h> 28 #include <linux/slab.h> 29 #include <linux/spinlock.h> 30 #include <linux/configfs.h> 31 #include <scsi/scsi.h> 32 #include <scsi/scsi_cmnd.h> 33 34 #include <target/target_core_base.h> 35 #include <target/target_core_device.h> 36 #include <target/target_core_transport.h> 37 #include <target/target_core_fabric_ops.h> 38 #include <target/target_core_configfs.h> 39 40 #include "target_core_alua.h" 41 #include "target_core_hba.h" 42 #include "target_core_ua.h" 43 44 static int core_alua_check_transition(int state, int *primary); 45 static int core_alua_set_tg_pt_secondary_state( 46 struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem, 47 struct se_port *port, int explict, int offline); 48 49 /* 50 * REPORT_TARGET_PORT_GROUPS 51 * 52 * See spc4r17 section 6.27 53 */ 54 int core_emulate_report_target_port_groups(struct se_cmd *cmd) 55 { 56 struct se_subsystem_dev *su_dev = SE_DEV(cmd)->se_sub_dev; 57 struct se_port *port; 58 struct t10_alua_tg_pt_gp *tg_pt_gp; 59 struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem; 60 unsigned char *buf = (unsigned char *)T_TASK(cmd)->t_task_buf; 61 u32 rd_len = 0, off = 4; /* Skip over RESERVED area to first 62 Target port group descriptor */ 63 64 spin_lock(&T10_ALUA(su_dev)->tg_pt_gps_lock); 65 list_for_each_entry(tg_pt_gp, &T10_ALUA(su_dev)->tg_pt_gps_list, 66 tg_pt_gp_list) { 67 /* 68 * PREF: Preferred target port bit, determine if this 69 * bit should be set for port group. 70 */ 71 if (tg_pt_gp->tg_pt_gp_pref) 72 buf[off] = 0x80; 73 /* 74 * Set the ASYMMETRIC ACCESS State 75 */ 76 buf[off++] |= (atomic_read( 77 &tg_pt_gp->tg_pt_gp_alua_access_state) & 0xff); 78 /* 79 * Set supported ASYMMETRIC ACCESS State bits 80 */ 81 buf[off] = 0x80; /* T_SUP */ 82 buf[off] |= 0x40; /* O_SUP */ 83 buf[off] |= 0x8; /* U_SUP */ 84 buf[off] |= 0x4; /* S_SUP */ 85 buf[off] |= 0x2; /* AN_SUP */ 86 buf[off++] |= 0x1; /* AO_SUP */ 87 /* 88 * TARGET PORT GROUP 89 */ 90 buf[off++] = ((tg_pt_gp->tg_pt_gp_id >> 8) & 0xff); 91 buf[off++] = (tg_pt_gp->tg_pt_gp_id & 0xff); 92 93 off++; /* Skip over Reserved */ 94 /* 95 * STATUS CODE 96 */ 97 buf[off++] = (tg_pt_gp->tg_pt_gp_alua_access_status & 0xff); 98 /* 99 * Vendor Specific field 100 */ 101 buf[off++] = 0x00; 102 /* 103 * TARGET PORT COUNT 104 */ 105 buf[off++] = (tg_pt_gp->tg_pt_gp_members & 0xff); 106 rd_len += 8; 107 108 spin_lock(&tg_pt_gp->tg_pt_gp_lock); 109 list_for_each_entry(tg_pt_gp_mem, &tg_pt_gp->tg_pt_gp_mem_list, 110 tg_pt_gp_mem_list) { 111 port = tg_pt_gp_mem->tg_pt; 112 /* 113 * Start Target Port descriptor format 114 * 115 * See spc4r17 section 6.2.7 Table 247 116 */ 117 off += 2; /* Skip over Obsolete */ 118 /* 119 * Set RELATIVE TARGET PORT IDENTIFIER 120 */ 121 buf[off++] = ((port->sep_rtpi >> 8) & 0xff); 122 buf[off++] = (port->sep_rtpi & 0xff); 123 rd_len += 4; 124 } 125 spin_unlock(&tg_pt_gp->tg_pt_gp_lock); 126 } 127 spin_unlock(&T10_ALUA(su_dev)->tg_pt_gps_lock); 128 /* 129 * Set the RETURN DATA LENGTH set in the header of the DataIN Payload 130 */ 131 buf[0] = ((rd_len >> 24) & 0xff); 132 buf[1] = ((rd_len >> 16) & 0xff); 133 buf[2] = ((rd_len >> 8) & 0xff); 134 buf[3] = (rd_len & 0xff); 135 136 return 0; 137 } 138 139 /* 140 * SET_TARGET_PORT_GROUPS for explict ALUA operation. 141 * 142 * See spc4r17 section 6.35 143 */ 144 int core_emulate_set_target_port_groups(struct se_cmd *cmd) 145 { 146 struct se_device *dev = SE_DEV(cmd); 147 struct se_subsystem_dev *su_dev = SE_DEV(cmd)->se_sub_dev; 148 struct se_port *port, *l_port = SE_LUN(cmd)->lun_sep; 149 struct se_node_acl *nacl = SE_SESS(cmd)->se_node_acl; 150 struct t10_alua_tg_pt_gp *tg_pt_gp = NULL, *l_tg_pt_gp; 151 struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem, *l_tg_pt_gp_mem; 152 unsigned char *buf = (unsigned char *)T_TASK(cmd)->t_task_buf; 153 unsigned char *ptr = &buf[4]; /* Skip over RESERVED area in header */ 154 u32 len = 4; /* Skip over RESERVED area in header */ 155 int alua_access_state, primary = 0, rc; 156 u16 tg_pt_id, rtpi; 157 158 if (!(l_port)) 159 return PYX_TRANSPORT_LU_COMM_FAILURE; 160 /* 161 * Determine if explict ALUA via SET_TARGET_PORT_GROUPS is allowed 162 * for the local tg_pt_gp. 163 */ 164 l_tg_pt_gp_mem = l_port->sep_alua_tg_pt_gp_mem; 165 if (!(l_tg_pt_gp_mem)) { 166 printk(KERN_ERR "Unable to access l_port->sep_alua_tg_pt_gp_mem\n"); 167 return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE; 168 } 169 spin_lock(&l_tg_pt_gp_mem->tg_pt_gp_mem_lock); 170 l_tg_pt_gp = l_tg_pt_gp_mem->tg_pt_gp; 171 if (!(l_tg_pt_gp)) { 172 spin_unlock(&l_tg_pt_gp_mem->tg_pt_gp_mem_lock); 173 printk(KERN_ERR "Unable to access *l_tg_pt_gp_mem->tg_pt_gp\n"); 174 return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE; 175 } 176 rc = (l_tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_EXPLICT_ALUA); 177 spin_unlock(&l_tg_pt_gp_mem->tg_pt_gp_mem_lock); 178 179 if (!(rc)) { 180 printk(KERN_INFO "Unable to process SET_TARGET_PORT_GROUPS" 181 " while TPGS_EXPLICT_ALUA is disabled\n"); 182 return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE; 183 } 184 185 while (len < cmd->data_length) { 186 alua_access_state = (ptr[0] & 0x0f); 187 /* 188 * Check the received ALUA access state, and determine if 189 * the state is a primary or secondary target port asymmetric 190 * access state. 191 */ 192 rc = core_alua_check_transition(alua_access_state, &primary); 193 if (rc != 0) { 194 /* 195 * If the SET TARGET PORT GROUPS attempts to establish 196 * an invalid combination of target port asymmetric 197 * access states or attempts to establish an 198 * unsupported target port asymmetric access state, 199 * then the command shall be terminated with CHECK 200 * CONDITION status, with the sense key set to ILLEGAL 201 * REQUEST, and the additional sense code set to INVALID 202 * FIELD IN PARAMETER LIST. 203 */ 204 return PYX_TRANSPORT_INVALID_PARAMETER_LIST; 205 } 206 rc = -1; 207 /* 208 * If the ASYMMETRIC ACCESS STATE field (see table 267) 209 * specifies a primary target port asymmetric access state, 210 * then the TARGET PORT GROUP OR TARGET PORT field specifies 211 * a primary target port group for which the primary target 212 * port asymmetric access state shall be changed. If the 213 * ASYMMETRIC ACCESS STATE field specifies a secondary target 214 * port asymmetric access state, then the TARGET PORT GROUP OR 215 * TARGET PORT field specifies the relative target port 216 * identifier (see 3.1.120) of the target port for which the 217 * secondary target port asymmetric access state shall be 218 * changed. 219 */ 220 if (primary) { 221 tg_pt_id = ((ptr[2] << 8) & 0xff); 222 tg_pt_id |= (ptr[3] & 0xff); 223 /* 224 * Locate the matching target port group ID from 225 * the global tg_pt_gp list 226 */ 227 spin_lock(&T10_ALUA(su_dev)->tg_pt_gps_lock); 228 list_for_each_entry(tg_pt_gp, 229 &T10_ALUA(su_dev)->tg_pt_gps_list, 230 tg_pt_gp_list) { 231 if (!(tg_pt_gp->tg_pt_gp_valid_id)) 232 continue; 233 234 if (tg_pt_id != tg_pt_gp->tg_pt_gp_id) 235 continue; 236 237 atomic_inc(&tg_pt_gp->tg_pt_gp_ref_cnt); 238 smp_mb__after_atomic_inc(); 239 spin_unlock(&T10_ALUA(su_dev)->tg_pt_gps_lock); 240 241 rc = core_alua_do_port_transition(tg_pt_gp, 242 dev, l_port, nacl, 243 alua_access_state, 1); 244 245 spin_lock(&T10_ALUA(su_dev)->tg_pt_gps_lock); 246 atomic_dec(&tg_pt_gp->tg_pt_gp_ref_cnt); 247 smp_mb__after_atomic_dec(); 248 break; 249 } 250 spin_unlock(&T10_ALUA(su_dev)->tg_pt_gps_lock); 251 /* 252 * If not matching target port group ID can be located 253 * throw an exception with ASCQ: INVALID_PARAMETER_LIST 254 */ 255 if (rc != 0) 256 return PYX_TRANSPORT_INVALID_PARAMETER_LIST; 257 } else { 258 /* 259 * Extact the RELATIVE TARGET PORT IDENTIFIER to identify 260 * the Target Port in question for the the incoming 261 * SET_TARGET_PORT_GROUPS op. 262 */ 263 rtpi = ((ptr[2] << 8) & 0xff); 264 rtpi |= (ptr[3] & 0xff); 265 /* 266 * Locate the matching relative target port identifer 267 * for the struct se_device storage object. 268 */ 269 spin_lock(&dev->se_port_lock); 270 list_for_each_entry(port, &dev->dev_sep_list, 271 sep_list) { 272 if (port->sep_rtpi != rtpi) 273 continue; 274 275 tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem; 276 spin_unlock(&dev->se_port_lock); 277 278 rc = core_alua_set_tg_pt_secondary_state( 279 tg_pt_gp_mem, port, 1, 1); 280 281 spin_lock(&dev->se_port_lock); 282 break; 283 } 284 spin_unlock(&dev->se_port_lock); 285 /* 286 * If not matching relative target port identifier can 287 * be located, throw an exception with ASCQ: 288 * INVALID_PARAMETER_LIST 289 */ 290 if (rc != 0) 291 return PYX_TRANSPORT_INVALID_PARAMETER_LIST; 292 } 293 294 ptr += 4; 295 len += 4; 296 } 297 298 return 0; 299 } 300 301 static inline int core_alua_state_nonoptimized( 302 struct se_cmd *cmd, 303 unsigned char *cdb, 304 int nonop_delay_msecs, 305 u8 *alua_ascq) 306 { 307 /* 308 * Set SCF_ALUA_NON_OPTIMIZED here, this value will be checked 309 * later to determine if processing of this cmd needs to be 310 * temporarily delayed for the Active/NonOptimized primary access state. 311 */ 312 cmd->se_cmd_flags |= SCF_ALUA_NON_OPTIMIZED; 313 cmd->alua_nonop_delay = nonop_delay_msecs; 314 return 0; 315 } 316 317 static inline int core_alua_state_standby( 318 struct se_cmd *cmd, 319 unsigned char *cdb, 320 u8 *alua_ascq) 321 { 322 /* 323 * Allowed CDBs for ALUA_ACCESS_STATE_STANDBY as defined by 324 * spc4r17 section 5.9.2.4.4 325 */ 326 switch (cdb[0]) { 327 case INQUIRY: 328 case LOG_SELECT: 329 case LOG_SENSE: 330 case MODE_SELECT: 331 case MODE_SENSE: 332 case REPORT_LUNS: 333 case RECEIVE_DIAGNOSTIC: 334 case SEND_DIAGNOSTIC: 335 case MAINTENANCE_IN: 336 switch (cdb[1]) { 337 case MI_REPORT_TARGET_PGS: 338 return 0; 339 default: 340 *alua_ascq = ASCQ_04H_ALUA_TG_PT_STANDBY; 341 return 1; 342 } 343 case MAINTENANCE_OUT: 344 switch (cdb[1]) { 345 case MO_SET_TARGET_PGS: 346 return 0; 347 default: 348 *alua_ascq = ASCQ_04H_ALUA_TG_PT_STANDBY; 349 return 1; 350 } 351 case REQUEST_SENSE: 352 case PERSISTENT_RESERVE_IN: 353 case PERSISTENT_RESERVE_OUT: 354 case READ_BUFFER: 355 case WRITE_BUFFER: 356 return 0; 357 default: 358 *alua_ascq = ASCQ_04H_ALUA_TG_PT_STANDBY; 359 return 1; 360 } 361 362 return 0; 363 } 364 365 static inline int core_alua_state_unavailable( 366 struct se_cmd *cmd, 367 unsigned char *cdb, 368 u8 *alua_ascq) 369 { 370 /* 371 * Allowed CDBs for ALUA_ACCESS_STATE_UNAVAILABLE as defined by 372 * spc4r17 section 5.9.2.4.5 373 */ 374 switch (cdb[0]) { 375 case INQUIRY: 376 case REPORT_LUNS: 377 case MAINTENANCE_IN: 378 switch (cdb[1]) { 379 case MI_REPORT_TARGET_PGS: 380 return 0; 381 default: 382 *alua_ascq = ASCQ_04H_ALUA_TG_PT_UNAVAILABLE; 383 return 1; 384 } 385 case MAINTENANCE_OUT: 386 switch (cdb[1]) { 387 case MO_SET_TARGET_PGS: 388 return 0; 389 default: 390 *alua_ascq = ASCQ_04H_ALUA_TG_PT_UNAVAILABLE; 391 return 1; 392 } 393 case REQUEST_SENSE: 394 case READ_BUFFER: 395 case WRITE_BUFFER: 396 return 0; 397 default: 398 *alua_ascq = ASCQ_04H_ALUA_TG_PT_UNAVAILABLE; 399 return 1; 400 } 401 402 return 0; 403 } 404 405 static inline int core_alua_state_transition( 406 struct se_cmd *cmd, 407 unsigned char *cdb, 408 u8 *alua_ascq) 409 { 410 /* 411 * Allowed CDBs for ALUA_ACCESS_STATE_TRANSITIO as defined by 412 * spc4r17 section 5.9.2.5 413 */ 414 switch (cdb[0]) { 415 case INQUIRY: 416 case REPORT_LUNS: 417 case MAINTENANCE_IN: 418 switch (cdb[1]) { 419 case MI_REPORT_TARGET_PGS: 420 return 0; 421 default: 422 *alua_ascq = ASCQ_04H_ALUA_STATE_TRANSITION; 423 return 1; 424 } 425 case REQUEST_SENSE: 426 case READ_BUFFER: 427 case WRITE_BUFFER: 428 return 0; 429 default: 430 *alua_ascq = ASCQ_04H_ALUA_STATE_TRANSITION; 431 return 1; 432 } 433 434 return 0; 435 } 436 437 /* 438 * Used for alua_type SPC_ALUA_PASSTHROUGH and SPC2_ALUA_DISABLED 439 * in transport_cmd_sequencer(). This function is assigned to 440 * struct t10_alua *->state_check() in core_setup_alua() 441 */ 442 static int core_alua_state_check_nop( 443 struct se_cmd *cmd, 444 unsigned char *cdb, 445 u8 *alua_ascq) 446 { 447 return 0; 448 } 449 450 /* 451 * Used for alua_type SPC3_ALUA_EMULATED in transport_cmd_sequencer(). 452 * This function is assigned to struct t10_alua *->state_check() in 453 * core_setup_alua() 454 * 455 * Also, this function can return three different return codes to 456 * signal transport_generic_cmd_sequencer() 457 * 458 * return 1: Is used to signal LUN not accecsable, and check condition/not ready 459 * return 0: Used to signal success 460 * reutrn -1: Used to signal failure, and invalid cdb field 461 */ 462 static int core_alua_state_check( 463 struct se_cmd *cmd, 464 unsigned char *cdb, 465 u8 *alua_ascq) 466 { 467 struct se_lun *lun = SE_LUN(cmd); 468 struct se_port *port = lun->lun_sep; 469 struct t10_alua_tg_pt_gp *tg_pt_gp; 470 struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem; 471 int out_alua_state, nonop_delay_msecs; 472 473 if (!(port)) 474 return 0; 475 /* 476 * First, check for a struct se_port specific secondary ALUA target port 477 * access state: OFFLINE 478 */ 479 if (atomic_read(&port->sep_tg_pt_secondary_offline)) { 480 *alua_ascq = ASCQ_04H_ALUA_OFFLINE; 481 printk(KERN_INFO "ALUA: Got secondary offline status for local" 482 " target port\n"); 483 *alua_ascq = ASCQ_04H_ALUA_OFFLINE; 484 return 1; 485 } 486 /* 487 * Second, obtain the struct t10_alua_tg_pt_gp_member pointer to the 488 * ALUA target port group, to obtain current ALUA access state. 489 * Otherwise look for the underlying struct se_device association with 490 * a ALUA logical unit group. 491 */ 492 tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem; 493 spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); 494 tg_pt_gp = tg_pt_gp_mem->tg_pt_gp; 495 out_alua_state = atomic_read(&tg_pt_gp->tg_pt_gp_alua_access_state); 496 nonop_delay_msecs = tg_pt_gp->tg_pt_gp_nonop_delay_msecs; 497 spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); 498 /* 499 * Process ALUA_ACCESS_STATE_ACTIVE_OPTMIZED in a seperate conditional 500 * statement so the complier knows explictly to check this case first. 501 * For the Optimized ALUA access state case, we want to process the 502 * incoming fabric cmd ASAP.. 503 */ 504 if (out_alua_state == ALUA_ACCESS_STATE_ACTIVE_OPTMIZED) 505 return 0; 506 507 switch (out_alua_state) { 508 case ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED: 509 return core_alua_state_nonoptimized(cmd, cdb, 510 nonop_delay_msecs, alua_ascq); 511 case ALUA_ACCESS_STATE_STANDBY: 512 return core_alua_state_standby(cmd, cdb, alua_ascq); 513 case ALUA_ACCESS_STATE_UNAVAILABLE: 514 return core_alua_state_unavailable(cmd, cdb, alua_ascq); 515 case ALUA_ACCESS_STATE_TRANSITION: 516 return core_alua_state_transition(cmd, cdb, alua_ascq); 517 /* 518 * OFFLINE is a secondary ALUA target port group access state, that is 519 * handled above with struct se_port->sep_tg_pt_secondary_offline=1 520 */ 521 case ALUA_ACCESS_STATE_OFFLINE: 522 default: 523 printk(KERN_ERR "Unknown ALUA access state: 0x%02x\n", 524 out_alua_state); 525 return -1; 526 } 527 528 return 0; 529 } 530 531 /* 532 * Check implict and explict ALUA state change request. 533 */ 534 static int core_alua_check_transition(int state, int *primary) 535 { 536 switch (state) { 537 case ALUA_ACCESS_STATE_ACTIVE_OPTMIZED: 538 case ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED: 539 case ALUA_ACCESS_STATE_STANDBY: 540 case ALUA_ACCESS_STATE_UNAVAILABLE: 541 /* 542 * OPTIMIZED, NON-OPTIMIZED, STANDBY and UNAVAILABLE are 543 * defined as primary target port asymmetric access states. 544 */ 545 *primary = 1; 546 break; 547 case ALUA_ACCESS_STATE_OFFLINE: 548 /* 549 * OFFLINE state is defined as a secondary target port 550 * asymmetric access state. 551 */ 552 *primary = 0; 553 break; 554 default: 555 printk(KERN_ERR "Unknown ALUA access state: 0x%02x\n", state); 556 return -1; 557 } 558 559 return 0; 560 } 561 562 static char *core_alua_dump_state(int state) 563 { 564 switch (state) { 565 case ALUA_ACCESS_STATE_ACTIVE_OPTMIZED: 566 return "Active/Optimized"; 567 case ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED: 568 return "Active/NonOptimized"; 569 case ALUA_ACCESS_STATE_STANDBY: 570 return "Standby"; 571 case ALUA_ACCESS_STATE_UNAVAILABLE: 572 return "Unavailable"; 573 case ALUA_ACCESS_STATE_OFFLINE: 574 return "Offline"; 575 default: 576 return "Unknown"; 577 } 578 579 return NULL; 580 } 581 582 char *core_alua_dump_status(int status) 583 { 584 switch (status) { 585 case ALUA_STATUS_NONE: 586 return "None"; 587 case ALUA_STATUS_ALTERED_BY_EXPLICT_STPG: 588 return "Altered by Explict STPG"; 589 case ALUA_STATUS_ALTERED_BY_IMPLICT_ALUA: 590 return "Altered by Implict ALUA"; 591 default: 592 return "Unknown"; 593 } 594 595 return NULL; 596 } 597 598 /* 599 * Used by fabric modules to determine when we need to delay processing 600 * for the Active/NonOptimized paths.. 601 */ 602 int core_alua_check_nonop_delay( 603 struct se_cmd *cmd) 604 { 605 if (!(cmd->se_cmd_flags & SCF_ALUA_NON_OPTIMIZED)) 606 return 0; 607 if (in_interrupt()) 608 return 0; 609 /* 610 * The ALUA Active/NonOptimized access state delay can be disabled 611 * in via configfs with a value of zero 612 */ 613 if (!(cmd->alua_nonop_delay)) 614 return 0; 615 /* 616 * struct se_cmd->alua_nonop_delay gets set by a target port group 617 * defined interval in core_alua_state_nonoptimized() 618 */ 619 msleep_interruptible(cmd->alua_nonop_delay); 620 return 0; 621 } 622 EXPORT_SYMBOL(core_alua_check_nonop_delay); 623 624 /* 625 * Called with tg_pt_gp->tg_pt_gp_md_mutex or tg_pt_gp_mem->sep_tg_pt_md_mutex 626 * 627 */ 628 static int core_alua_write_tpg_metadata( 629 const char *path, 630 unsigned char *md_buf, 631 u32 md_buf_len) 632 { 633 mm_segment_t old_fs; 634 struct file *file; 635 struct iovec iov[1]; 636 int flags = O_RDWR | O_CREAT | O_TRUNC, ret; 637 638 memset(iov, 0, sizeof(struct iovec)); 639 640 file = filp_open(path, flags, 0600); 641 if (IS_ERR(file) || !file || !file->f_dentry) { 642 printk(KERN_ERR "filp_open(%s) for ALUA metadata failed\n", 643 path); 644 return -ENODEV; 645 } 646 647 iov[0].iov_base = &md_buf[0]; 648 iov[0].iov_len = md_buf_len; 649 650 old_fs = get_fs(); 651 set_fs(get_ds()); 652 ret = vfs_writev(file, &iov[0], 1, &file->f_pos); 653 set_fs(old_fs); 654 655 if (ret < 0) { 656 printk(KERN_ERR "Error writing ALUA metadata file: %s\n", path); 657 filp_close(file, NULL); 658 return -EIO; 659 } 660 filp_close(file, NULL); 661 662 return 0; 663 } 664 665 /* 666 * Called with tg_pt_gp->tg_pt_gp_md_mutex held 667 */ 668 static int core_alua_update_tpg_primary_metadata( 669 struct t10_alua_tg_pt_gp *tg_pt_gp, 670 int primary_state, 671 unsigned char *md_buf) 672 { 673 struct se_subsystem_dev *su_dev = tg_pt_gp->tg_pt_gp_su_dev; 674 struct t10_wwn *wwn = &su_dev->t10_wwn; 675 char path[ALUA_METADATA_PATH_LEN]; 676 int len; 677 678 memset(path, 0, ALUA_METADATA_PATH_LEN); 679 680 len = snprintf(md_buf, tg_pt_gp->tg_pt_gp_md_buf_len, 681 "tg_pt_gp_id=%hu\n" 682 "alua_access_state=0x%02x\n" 683 "alua_access_status=0x%02x\n", 684 tg_pt_gp->tg_pt_gp_id, primary_state, 685 tg_pt_gp->tg_pt_gp_alua_access_status); 686 687 snprintf(path, ALUA_METADATA_PATH_LEN, 688 "/var/target/alua/tpgs_%s/%s", &wwn->unit_serial[0], 689 config_item_name(&tg_pt_gp->tg_pt_gp_group.cg_item)); 690 691 return core_alua_write_tpg_metadata(path, md_buf, len); 692 } 693 694 static int core_alua_do_transition_tg_pt( 695 struct t10_alua_tg_pt_gp *tg_pt_gp, 696 struct se_port *l_port, 697 struct se_node_acl *nacl, 698 unsigned char *md_buf, 699 int new_state, 700 int explict) 701 { 702 struct se_dev_entry *se_deve; 703 struct se_lun_acl *lacl; 704 struct se_port *port; 705 struct t10_alua_tg_pt_gp_member *mem; 706 int old_state = 0; 707 /* 708 * Save the old primary ALUA access state, and set the current state 709 * to ALUA_ACCESS_STATE_TRANSITION. 710 */ 711 old_state = atomic_read(&tg_pt_gp->tg_pt_gp_alua_access_state); 712 atomic_set(&tg_pt_gp->tg_pt_gp_alua_access_state, 713 ALUA_ACCESS_STATE_TRANSITION); 714 tg_pt_gp->tg_pt_gp_alua_access_status = (explict) ? 715 ALUA_STATUS_ALTERED_BY_EXPLICT_STPG : 716 ALUA_STATUS_ALTERED_BY_IMPLICT_ALUA; 717 /* 718 * Check for the optional ALUA primary state transition delay 719 */ 720 if (tg_pt_gp->tg_pt_gp_trans_delay_msecs != 0) 721 msleep_interruptible(tg_pt_gp->tg_pt_gp_trans_delay_msecs); 722 723 spin_lock(&tg_pt_gp->tg_pt_gp_lock); 724 list_for_each_entry(mem, &tg_pt_gp->tg_pt_gp_mem_list, 725 tg_pt_gp_mem_list) { 726 port = mem->tg_pt; 727 /* 728 * After an implicit target port asymmetric access state 729 * change, a device server shall establish a unit attention 730 * condition for the initiator port associated with every I_T 731 * nexus with the additional sense code set to ASYMMETRIC 732 * ACCESS STATE CHAGED. 733 * 734 * After an explicit target port asymmetric access state 735 * change, a device server shall establish a unit attention 736 * condition with the additional sense code set to ASYMMETRIC 737 * ACCESS STATE CHANGED for the initiator port associated with 738 * every I_T nexus other than the I_T nexus on which the SET 739 * TARGET PORT GROUPS command 740 */ 741 atomic_inc(&mem->tg_pt_gp_mem_ref_cnt); 742 smp_mb__after_atomic_inc(); 743 spin_unlock(&tg_pt_gp->tg_pt_gp_lock); 744 745 spin_lock_bh(&port->sep_alua_lock); 746 list_for_each_entry(se_deve, &port->sep_alua_list, 747 alua_port_list) { 748 lacl = se_deve->se_lun_acl; 749 /* 750 * se_deve->se_lun_acl pointer may be NULL for a 751 * entry created without explict Node+MappedLUN ACLs 752 */ 753 if (!(lacl)) 754 continue; 755 756 if (explict && 757 (nacl != NULL) && (nacl == lacl->se_lun_nacl) && 758 (l_port != NULL) && (l_port == port)) 759 continue; 760 761 core_scsi3_ua_allocate(lacl->se_lun_nacl, 762 se_deve->mapped_lun, 0x2A, 763 ASCQ_2AH_ASYMMETRIC_ACCESS_STATE_CHANGED); 764 } 765 spin_unlock_bh(&port->sep_alua_lock); 766 767 spin_lock(&tg_pt_gp->tg_pt_gp_lock); 768 atomic_dec(&mem->tg_pt_gp_mem_ref_cnt); 769 smp_mb__after_atomic_dec(); 770 } 771 spin_unlock(&tg_pt_gp->tg_pt_gp_lock); 772 /* 773 * Update the ALUA metadata buf that has been allocated in 774 * core_alua_do_port_transition(), this metadata will be written 775 * to struct file. 776 * 777 * Note that there is the case where we do not want to update the 778 * metadata when the saved metadata is being parsed in userspace 779 * when setting the existing port access state and access status. 780 * 781 * Also note that the failure to write out the ALUA metadata to 782 * struct file does NOT affect the actual ALUA transition. 783 */ 784 if (tg_pt_gp->tg_pt_gp_write_metadata) { 785 mutex_lock(&tg_pt_gp->tg_pt_gp_md_mutex); 786 core_alua_update_tpg_primary_metadata(tg_pt_gp, 787 new_state, md_buf); 788 mutex_unlock(&tg_pt_gp->tg_pt_gp_md_mutex); 789 } 790 /* 791 * Set the current primary ALUA access state to the requested new state 792 */ 793 atomic_set(&tg_pt_gp->tg_pt_gp_alua_access_state, new_state); 794 795 printk(KERN_INFO "Successful %s ALUA transition TG PT Group: %s ID: %hu" 796 " from primary access state %s to %s\n", (explict) ? "explict" : 797 "implict", config_item_name(&tg_pt_gp->tg_pt_gp_group.cg_item), 798 tg_pt_gp->tg_pt_gp_id, core_alua_dump_state(old_state), 799 core_alua_dump_state(new_state)); 800 801 return 0; 802 } 803 804 int core_alua_do_port_transition( 805 struct t10_alua_tg_pt_gp *l_tg_pt_gp, 806 struct se_device *l_dev, 807 struct se_port *l_port, 808 struct se_node_acl *l_nacl, 809 int new_state, 810 int explict) 811 { 812 struct se_device *dev; 813 struct se_port *port; 814 struct se_subsystem_dev *su_dev; 815 struct se_node_acl *nacl; 816 struct t10_alua_lu_gp *lu_gp; 817 struct t10_alua_lu_gp_member *lu_gp_mem, *local_lu_gp_mem; 818 struct t10_alua_tg_pt_gp *tg_pt_gp; 819 unsigned char *md_buf; 820 int primary; 821 822 if (core_alua_check_transition(new_state, &primary) != 0) 823 return -EINVAL; 824 825 md_buf = kzalloc(l_tg_pt_gp->tg_pt_gp_md_buf_len, GFP_KERNEL); 826 if (!(md_buf)) { 827 printk("Unable to allocate buf for ALUA metadata\n"); 828 return -ENOMEM; 829 } 830 831 local_lu_gp_mem = l_dev->dev_alua_lu_gp_mem; 832 spin_lock(&local_lu_gp_mem->lu_gp_mem_lock); 833 lu_gp = local_lu_gp_mem->lu_gp; 834 atomic_inc(&lu_gp->lu_gp_ref_cnt); 835 smp_mb__after_atomic_inc(); 836 spin_unlock(&local_lu_gp_mem->lu_gp_mem_lock); 837 /* 838 * For storage objects that are members of the 'default_lu_gp', 839 * we only do transition on the passed *l_tp_pt_gp, and not 840 * on all of the matching target port groups IDs in default_lu_gp. 841 */ 842 if (!(lu_gp->lu_gp_id)) { 843 /* 844 * core_alua_do_transition_tg_pt() will always return 845 * success. 846 */ 847 core_alua_do_transition_tg_pt(l_tg_pt_gp, l_port, l_nacl, 848 md_buf, new_state, explict); 849 atomic_dec(&lu_gp->lu_gp_ref_cnt); 850 smp_mb__after_atomic_dec(); 851 kfree(md_buf); 852 return 0; 853 } 854 /* 855 * For all other LU groups aside from 'default_lu_gp', walk all of 856 * the associated storage objects looking for a matching target port 857 * group ID from the local target port group. 858 */ 859 spin_lock(&lu_gp->lu_gp_lock); 860 list_for_each_entry(lu_gp_mem, &lu_gp->lu_gp_mem_list, 861 lu_gp_mem_list) { 862 863 dev = lu_gp_mem->lu_gp_mem_dev; 864 su_dev = dev->se_sub_dev; 865 atomic_inc(&lu_gp_mem->lu_gp_mem_ref_cnt); 866 smp_mb__after_atomic_inc(); 867 spin_unlock(&lu_gp->lu_gp_lock); 868 869 spin_lock(&T10_ALUA(su_dev)->tg_pt_gps_lock); 870 list_for_each_entry(tg_pt_gp, 871 &T10_ALUA(su_dev)->tg_pt_gps_list, 872 tg_pt_gp_list) { 873 874 if (!(tg_pt_gp->tg_pt_gp_valid_id)) 875 continue; 876 /* 877 * If the target behavior port asymmetric access state 878 * is changed for any target port group accessiable via 879 * a logical unit within a LU group, the target port 880 * behavior group asymmetric access states for the same 881 * target port group accessible via other logical units 882 * in that LU group will also change. 883 */ 884 if (l_tg_pt_gp->tg_pt_gp_id != tg_pt_gp->tg_pt_gp_id) 885 continue; 886 887 if (l_tg_pt_gp == tg_pt_gp) { 888 port = l_port; 889 nacl = l_nacl; 890 } else { 891 port = NULL; 892 nacl = NULL; 893 } 894 atomic_inc(&tg_pt_gp->tg_pt_gp_ref_cnt); 895 smp_mb__after_atomic_inc(); 896 spin_unlock(&T10_ALUA(su_dev)->tg_pt_gps_lock); 897 /* 898 * core_alua_do_transition_tg_pt() will always return 899 * success. 900 */ 901 core_alua_do_transition_tg_pt(tg_pt_gp, port, 902 nacl, md_buf, new_state, explict); 903 904 spin_lock(&T10_ALUA(su_dev)->tg_pt_gps_lock); 905 atomic_dec(&tg_pt_gp->tg_pt_gp_ref_cnt); 906 smp_mb__after_atomic_dec(); 907 } 908 spin_unlock(&T10_ALUA(su_dev)->tg_pt_gps_lock); 909 910 spin_lock(&lu_gp->lu_gp_lock); 911 atomic_dec(&lu_gp_mem->lu_gp_mem_ref_cnt); 912 smp_mb__after_atomic_dec(); 913 } 914 spin_unlock(&lu_gp->lu_gp_lock); 915 916 printk(KERN_INFO "Successfully processed LU Group: %s all ALUA TG PT" 917 " Group IDs: %hu %s transition to primary state: %s\n", 918 config_item_name(&lu_gp->lu_gp_group.cg_item), 919 l_tg_pt_gp->tg_pt_gp_id, (explict) ? "explict" : "implict", 920 core_alua_dump_state(new_state)); 921 922 atomic_dec(&lu_gp->lu_gp_ref_cnt); 923 smp_mb__after_atomic_dec(); 924 kfree(md_buf); 925 return 0; 926 } 927 928 /* 929 * Called with tg_pt_gp_mem->sep_tg_pt_md_mutex held 930 */ 931 static int core_alua_update_tpg_secondary_metadata( 932 struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem, 933 struct se_port *port, 934 unsigned char *md_buf, 935 u32 md_buf_len) 936 { 937 struct se_portal_group *se_tpg = port->sep_tpg; 938 char path[ALUA_METADATA_PATH_LEN], wwn[ALUA_SECONDARY_METADATA_WWN_LEN]; 939 int len; 940 941 memset(path, 0, ALUA_METADATA_PATH_LEN); 942 memset(wwn, 0, ALUA_SECONDARY_METADATA_WWN_LEN); 943 944 len = snprintf(wwn, ALUA_SECONDARY_METADATA_WWN_LEN, "%s", 945 TPG_TFO(se_tpg)->tpg_get_wwn(se_tpg)); 946 947 if (TPG_TFO(se_tpg)->tpg_get_tag != NULL) 948 snprintf(wwn+len, ALUA_SECONDARY_METADATA_WWN_LEN-len, "+%hu", 949 TPG_TFO(se_tpg)->tpg_get_tag(se_tpg)); 950 951 len = snprintf(md_buf, md_buf_len, "alua_tg_pt_offline=%d\n" 952 "alua_tg_pt_status=0x%02x\n", 953 atomic_read(&port->sep_tg_pt_secondary_offline), 954 port->sep_tg_pt_secondary_stat); 955 956 snprintf(path, ALUA_METADATA_PATH_LEN, "/var/target/alua/%s/%s/lun_%u", 957 TPG_TFO(se_tpg)->get_fabric_name(), wwn, 958 port->sep_lun->unpacked_lun); 959 960 return core_alua_write_tpg_metadata(path, md_buf, len); 961 } 962 963 static int core_alua_set_tg_pt_secondary_state( 964 struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem, 965 struct se_port *port, 966 int explict, 967 int offline) 968 { 969 struct t10_alua_tg_pt_gp *tg_pt_gp; 970 unsigned char *md_buf; 971 u32 md_buf_len; 972 int trans_delay_msecs; 973 974 spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); 975 tg_pt_gp = tg_pt_gp_mem->tg_pt_gp; 976 if (!(tg_pt_gp)) { 977 spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); 978 printk(KERN_ERR "Unable to complete secondary state" 979 " transition\n"); 980 return -1; 981 } 982 trans_delay_msecs = tg_pt_gp->tg_pt_gp_trans_delay_msecs; 983 /* 984 * Set the secondary ALUA target port access state to OFFLINE 985 * or release the previously secondary state for struct se_port 986 */ 987 if (offline) 988 atomic_set(&port->sep_tg_pt_secondary_offline, 1); 989 else 990 atomic_set(&port->sep_tg_pt_secondary_offline, 0); 991 992 md_buf_len = tg_pt_gp->tg_pt_gp_md_buf_len; 993 port->sep_tg_pt_secondary_stat = (explict) ? 994 ALUA_STATUS_ALTERED_BY_EXPLICT_STPG : 995 ALUA_STATUS_ALTERED_BY_IMPLICT_ALUA; 996 997 printk(KERN_INFO "Successful %s ALUA transition TG PT Group: %s ID: %hu" 998 " to secondary access state: %s\n", (explict) ? "explict" : 999 "implict", config_item_name(&tg_pt_gp->tg_pt_gp_group.cg_item), 1000 tg_pt_gp->tg_pt_gp_id, (offline) ? "OFFLINE" : "ONLINE"); 1001 1002 spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); 1003 /* 1004 * Do the optional transition delay after we set the secondary 1005 * ALUA access state. 1006 */ 1007 if (trans_delay_msecs != 0) 1008 msleep_interruptible(trans_delay_msecs); 1009 /* 1010 * See if we need to update the ALUA fabric port metadata for 1011 * secondary state and status 1012 */ 1013 if (port->sep_tg_pt_secondary_write_md) { 1014 md_buf = kzalloc(md_buf_len, GFP_KERNEL); 1015 if (!(md_buf)) { 1016 printk(KERN_ERR "Unable to allocate md_buf for" 1017 " secondary ALUA access metadata\n"); 1018 return -1; 1019 } 1020 mutex_lock(&port->sep_tg_pt_md_mutex); 1021 core_alua_update_tpg_secondary_metadata(tg_pt_gp_mem, port, 1022 md_buf, md_buf_len); 1023 mutex_unlock(&port->sep_tg_pt_md_mutex); 1024 1025 kfree(md_buf); 1026 } 1027 1028 return 0; 1029 } 1030 1031 struct t10_alua_lu_gp * 1032 core_alua_allocate_lu_gp(const char *name, int def_group) 1033 { 1034 struct t10_alua_lu_gp *lu_gp; 1035 1036 lu_gp = kmem_cache_zalloc(t10_alua_lu_gp_cache, GFP_KERNEL); 1037 if (!(lu_gp)) { 1038 printk(KERN_ERR "Unable to allocate struct t10_alua_lu_gp\n"); 1039 return ERR_PTR(-ENOMEM);; 1040 } 1041 INIT_LIST_HEAD(&lu_gp->lu_gp_list); 1042 INIT_LIST_HEAD(&lu_gp->lu_gp_mem_list); 1043 spin_lock_init(&lu_gp->lu_gp_lock); 1044 atomic_set(&lu_gp->lu_gp_ref_cnt, 0); 1045 1046 if (def_group) { 1047 lu_gp->lu_gp_id = se_global->alua_lu_gps_counter++;; 1048 lu_gp->lu_gp_valid_id = 1; 1049 se_global->alua_lu_gps_count++; 1050 } 1051 1052 return lu_gp; 1053 } 1054 1055 int core_alua_set_lu_gp_id(struct t10_alua_lu_gp *lu_gp, u16 lu_gp_id) 1056 { 1057 struct t10_alua_lu_gp *lu_gp_tmp; 1058 u16 lu_gp_id_tmp; 1059 /* 1060 * The lu_gp->lu_gp_id may only be set once.. 1061 */ 1062 if (lu_gp->lu_gp_valid_id) { 1063 printk(KERN_WARNING "ALUA LU Group already has a valid ID," 1064 " ignoring request\n"); 1065 return -1; 1066 } 1067 1068 spin_lock(&se_global->lu_gps_lock); 1069 if (se_global->alua_lu_gps_count == 0x0000ffff) { 1070 printk(KERN_ERR "Maximum ALUA se_global->alua_lu_gps_count:" 1071 " 0x0000ffff reached\n"); 1072 spin_unlock(&se_global->lu_gps_lock); 1073 kmem_cache_free(t10_alua_lu_gp_cache, lu_gp); 1074 return -1; 1075 } 1076 again: 1077 lu_gp_id_tmp = (lu_gp_id != 0) ? lu_gp_id : 1078 se_global->alua_lu_gps_counter++; 1079 1080 list_for_each_entry(lu_gp_tmp, &se_global->g_lu_gps_list, lu_gp_list) { 1081 if (lu_gp_tmp->lu_gp_id == lu_gp_id_tmp) { 1082 if (!(lu_gp_id)) 1083 goto again; 1084 1085 printk(KERN_WARNING "ALUA Logical Unit Group ID: %hu" 1086 " already exists, ignoring request\n", 1087 lu_gp_id); 1088 spin_unlock(&se_global->lu_gps_lock); 1089 return -1; 1090 } 1091 } 1092 1093 lu_gp->lu_gp_id = lu_gp_id_tmp; 1094 lu_gp->lu_gp_valid_id = 1; 1095 list_add_tail(&lu_gp->lu_gp_list, &se_global->g_lu_gps_list); 1096 se_global->alua_lu_gps_count++; 1097 spin_unlock(&se_global->lu_gps_lock); 1098 1099 return 0; 1100 } 1101 1102 static struct t10_alua_lu_gp_member * 1103 core_alua_allocate_lu_gp_mem(struct se_device *dev) 1104 { 1105 struct t10_alua_lu_gp_member *lu_gp_mem; 1106 1107 lu_gp_mem = kmem_cache_zalloc(t10_alua_lu_gp_mem_cache, GFP_KERNEL); 1108 if (!(lu_gp_mem)) { 1109 printk(KERN_ERR "Unable to allocate struct t10_alua_lu_gp_member\n"); 1110 return ERR_PTR(-ENOMEM); 1111 } 1112 INIT_LIST_HEAD(&lu_gp_mem->lu_gp_mem_list); 1113 spin_lock_init(&lu_gp_mem->lu_gp_mem_lock); 1114 atomic_set(&lu_gp_mem->lu_gp_mem_ref_cnt, 0); 1115 1116 lu_gp_mem->lu_gp_mem_dev = dev; 1117 dev->dev_alua_lu_gp_mem = lu_gp_mem; 1118 1119 return lu_gp_mem; 1120 } 1121 1122 void core_alua_free_lu_gp(struct t10_alua_lu_gp *lu_gp) 1123 { 1124 struct t10_alua_lu_gp_member *lu_gp_mem, *lu_gp_mem_tmp; 1125 /* 1126 * Once we have reached this point, config_item_put() has 1127 * already been called from target_core_alua_drop_lu_gp(). 1128 * 1129 * Here, we remove the *lu_gp from the global list so that 1130 * no associations can be made while we are releasing 1131 * struct t10_alua_lu_gp. 1132 */ 1133 spin_lock(&se_global->lu_gps_lock); 1134 atomic_set(&lu_gp->lu_gp_shutdown, 1); 1135 list_del(&lu_gp->lu_gp_list); 1136 se_global->alua_lu_gps_count--; 1137 spin_unlock(&se_global->lu_gps_lock); 1138 /* 1139 * Allow struct t10_alua_lu_gp * referenced by core_alua_get_lu_gp_by_name() 1140 * in target_core_configfs.c:target_core_store_alua_lu_gp() to be 1141 * released with core_alua_put_lu_gp_from_name() 1142 */ 1143 while (atomic_read(&lu_gp->lu_gp_ref_cnt)) 1144 cpu_relax(); 1145 /* 1146 * Release reference to struct t10_alua_lu_gp * from all associated 1147 * struct se_device. 1148 */ 1149 spin_lock(&lu_gp->lu_gp_lock); 1150 list_for_each_entry_safe(lu_gp_mem, lu_gp_mem_tmp, 1151 &lu_gp->lu_gp_mem_list, lu_gp_mem_list) { 1152 if (lu_gp_mem->lu_gp_assoc) { 1153 list_del(&lu_gp_mem->lu_gp_mem_list); 1154 lu_gp->lu_gp_members--; 1155 lu_gp_mem->lu_gp_assoc = 0; 1156 } 1157 spin_unlock(&lu_gp->lu_gp_lock); 1158 /* 1159 * 1160 * lu_gp_mem is assoicated with a single 1161 * struct se_device->dev_alua_lu_gp_mem, and is released when 1162 * struct se_device is released via core_alua_free_lu_gp_mem(). 1163 * 1164 * If the passed lu_gp does NOT match the default_lu_gp, assume 1165 * we want to re-assocate a given lu_gp_mem with default_lu_gp. 1166 */ 1167 spin_lock(&lu_gp_mem->lu_gp_mem_lock); 1168 if (lu_gp != se_global->default_lu_gp) 1169 __core_alua_attach_lu_gp_mem(lu_gp_mem, 1170 se_global->default_lu_gp); 1171 else 1172 lu_gp_mem->lu_gp = NULL; 1173 spin_unlock(&lu_gp_mem->lu_gp_mem_lock); 1174 1175 spin_lock(&lu_gp->lu_gp_lock); 1176 } 1177 spin_unlock(&lu_gp->lu_gp_lock); 1178 1179 kmem_cache_free(t10_alua_lu_gp_cache, lu_gp); 1180 } 1181 1182 void core_alua_free_lu_gp_mem(struct se_device *dev) 1183 { 1184 struct se_subsystem_dev *su_dev = dev->se_sub_dev; 1185 struct t10_alua *alua = T10_ALUA(su_dev); 1186 struct t10_alua_lu_gp *lu_gp; 1187 struct t10_alua_lu_gp_member *lu_gp_mem; 1188 1189 if (alua->alua_type != SPC3_ALUA_EMULATED) 1190 return; 1191 1192 lu_gp_mem = dev->dev_alua_lu_gp_mem; 1193 if (!(lu_gp_mem)) 1194 return; 1195 1196 while (atomic_read(&lu_gp_mem->lu_gp_mem_ref_cnt)) 1197 cpu_relax(); 1198 1199 spin_lock(&lu_gp_mem->lu_gp_mem_lock); 1200 lu_gp = lu_gp_mem->lu_gp; 1201 if ((lu_gp)) { 1202 spin_lock(&lu_gp->lu_gp_lock); 1203 if (lu_gp_mem->lu_gp_assoc) { 1204 list_del(&lu_gp_mem->lu_gp_mem_list); 1205 lu_gp->lu_gp_members--; 1206 lu_gp_mem->lu_gp_assoc = 0; 1207 } 1208 spin_unlock(&lu_gp->lu_gp_lock); 1209 lu_gp_mem->lu_gp = NULL; 1210 } 1211 spin_unlock(&lu_gp_mem->lu_gp_mem_lock); 1212 1213 kmem_cache_free(t10_alua_lu_gp_mem_cache, lu_gp_mem); 1214 } 1215 1216 struct t10_alua_lu_gp *core_alua_get_lu_gp_by_name(const char *name) 1217 { 1218 struct t10_alua_lu_gp *lu_gp; 1219 struct config_item *ci; 1220 1221 spin_lock(&se_global->lu_gps_lock); 1222 list_for_each_entry(lu_gp, &se_global->g_lu_gps_list, lu_gp_list) { 1223 if (!(lu_gp->lu_gp_valid_id)) 1224 continue; 1225 ci = &lu_gp->lu_gp_group.cg_item; 1226 if (!(strcmp(config_item_name(ci), name))) { 1227 atomic_inc(&lu_gp->lu_gp_ref_cnt); 1228 spin_unlock(&se_global->lu_gps_lock); 1229 return lu_gp; 1230 } 1231 } 1232 spin_unlock(&se_global->lu_gps_lock); 1233 1234 return NULL; 1235 } 1236 1237 void core_alua_put_lu_gp_from_name(struct t10_alua_lu_gp *lu_gp) 1238 { 1239 spin_lock(&se_global->lu_gps_lock); 1240 atomic_dec(&lu_gp->lu_gp_ref_cnt); 1241 spin_unlock(&se_global->lu_gps_lock); 1242 } 1243 1244 /* 1245 * Called with struct t10_alua_lu_gp_member->lu_gp_mem_lock 1246 */ 1247 void __core_alua_attach_lu_gp_mem( 1248 struct t10_alua_lu_gp_member *lu_gp_mem, 1249 struct t10_alua_lu_gp *lu_gp) 1250 { 1251 spin_lock(&lu_gp->lu_gp_lock); 1252 lu_gp_mem->lu_gp = lu_gp; 1253 lu_gp_mem->lu_gp_assoc = 1; 1254 list_add_tail(&lu_gp_mem->lu_gp_mem_list, &lu_gp->lu_gp_mem_list); 1255 lu_gp->lu_gp_members++; 1256 spin_unlock(&lu_gp->lu_gp_lock); 1257 } 1258 1259 /* 1260 * Called with struct t10_alua_lu_gp_member->lu_gp_mem_lock 1261 */ 1262 void __core_alua_drop_lu_gp_mem( 1263 struct t10_alua_lu_gp_member *lu_gp_mem, 1264 struct t10_alua_lu_gp *lu_gp) 1265 { 1266 spin_lock(&lu_gp->lu_gp_lock); 1267 list_del(&lu_gp_mem->lu_gp_mem_list); 1268 lu_gp_mem->lu_gp = NULL; 1269 lu_gp_mem->lu_gp_assoc = 0; 1270 lu_gp->lu_gp_members--; 1271 spin_unlock(&lu_gp->lu_gp_lock); 1272 } 1273 1274 struct t10_alua_tg_pt_gp *core_alua_allocate_tg_pt_gp( 1275 struct se_subsystem_dev *su_dev, 1276 const char *name, 1277 int def_group) 1278 { 1279 struct t10_alua_tg_pt_gp *tg_pt_gp; 1280 1281 tg_pt_gp = kmem_cache_zalloc(t10_alua_tg_pt_gp_cache, GFP_KERNEL); 1282 if (!(tg_pt_gp)) { 1283 printk(KERN_ERR "Unable to allocate struct t10_alua_tg_pt_gp\n"); 1284 return NULL; 1285 } 1286 INIT_LIST_HEAD(&tg_pt_gp->tg_pt_gp_list); 1287 INIT_LIST_HEAD(&tg_pt_gp->tg_pt_gp_mem_list); 1288 mutex_init(&tg_pt_gp->tg_pt_gp_md_mutex); 1289 spin_lock_init(&tg_pt_gp->tg_pt_gp_lock); 1290 atomic_set(&tg_pt_gp->tg_pt_gp_ref_cnt, 0); 1291 tg_pt_gp->tg_pt_gp_su_dev = su_dev; 1292 tg_pt_gp->tg_pt_gp_md_buf_len = ALUA_MD_BUF_LEN; 1293 atomic_set(&tg_pt_gp->tg_pt_gp_alua_access_state, 1294 ALUA_ACCESS_STATE_ACTIVE_OPTMIZED); 1295 /* 1296 * Enable both explict and implict ALUA support by default 1297 */ 1298 tg_pt_gp->tg_pt_gp_alua_access_type = 1299 TPGS_EXPLICT_ALUA | TPGS_IMPLICT_ALUA; 1300 /* 1301 * Set the default Active/NonOptimized Delay in milliseconds 1302 */ 1303 tg_pt_gp->tg_pt_gp_nonop_delay_msecs = ALUA_DEFAULT_NONOP_DELAY_MSECS; 1304 tg_pt_gp->tg_pt_gp_trans_delay_msecs = ALUA_DEFAULT_TRANS_DELAY_MSECS; 1305 1306 if (def_group) { 1307 spin_lock(&T10_ALUA(su_dev)->tg_pt_gps_lock); 1308 tg_pt_gp->tg_pt_gp_id = 1309 T10_ALUA(su_dev)->alua_tg_pt_gps_counter++; 1310 tg_pt_gp->tg_pt_gp_valid_id = 1; 1311 T10_ALUA(su_dev)->alua_tg_pt_gps_count++; 1312 list_add_tail(&tg_pt_gp->tg_pt_gp_list, 1313 &T10_ALUA(su_dev)->tg_pt_gps_list); 1314 spin_unlock(&T10_ALUA(su_dev)->tg_pt_gps_lock); 1315 } 1316 1317 return tg_pt_gp; 1318 } 1319 1320 int core_alua_set_tg_pt_gp_id( 1321 struct t10_alua_tg_pt_gp *tg_pt_gp, 1322 u16 tg_pt_gp_id) 1323 { 1324 struct se_subsystem_dev *su_dev = tg_pt_gp->tg_pt_gp_su_dev; 1325 struct t10_alua_tg_pt_gp *tg_pt_gp_tmp; 1326 u16 tg_pt_gp_id_tmp; 1327 /* 1328 * The tg_pt_gp->tg_pt_gp_id may only be set once.. 1329 */ 1330 if (tg_pt_gp->tg_pt_gp_valid_id) { 1331 printk(KERN_WARNING "ALUA TG PT Group already has a valid ID," 1332 " ignoring request\n"); 1333 return -1; 1334 } 1335 1336 spin_lock(&T10_ALUA(su_dev)->tg_pt_gps_lock); 1337 if (T10_ALUA(su_dev)->alua_tg_pt_gps_count == 0x0000ffff) { 1338 printk(KERN_ERR "Maximum ALUA alua_tg_pt_gps_count:" 1339 " 0x0000ffff reached\n"); 1340 spin_unlock(&T10_ALUA(su_dev)->tg_pt_gps_lock); 1341 kmem_cache_free(t10_alua_tg_pt_gp_cache, tg_pt_gp); 1342 return -1; 1343 } 1344 again: 1345 tg_pt_gp_id_tmp = (tg_pt_gp_id != 0) ? tg_pt_gp_id : 1346 T10_ALUA(su_dev)->alua_tg_pt_gps_counter++; 1347 1348 list_for_each_entry(tg_pt_gp_tmp, &T10_ALUA(su_dev)->tg_pt_gps_list, 1349 tg_pt_gp_list) { 1350 if (tg_pt_gp_tmp->tg_pt_gp_id == tg_pt_gp_id_tmp) { 1351 if (!(tg_pt_gp_id)) 1352 goto again; 1353 1354 printk(KERN_ERR "ALUA Target Port Group ID: %hu already" 1355 " exists, ignoring request\n", tg_pt_gp_id); 1356 spin_unlock(&T10_ALUA(su_dev)->tg_pt_gps_lock); 1357 return -1; 1358 } 1359 } 1360 1361 tg_pt_gp->tg_pt_gp_id = tg_pt_gp_id_tmp; 1362 tg_pt_gp->tg_pt_gp_valid_id = 1; 1363 list_add_tail(&tg_pt_gp->tg_pt_gp_list, 1364 &T10_ALUA(su_dev)->tg_pt_gps_list); 1365 T10_ALUA(su_dev)->alua_tg_pt_gps_count++; 1366 spin_unlock(&T10_ALUA(su_dev)->tg_pt_gps_lock); 1367 1368 return 0; 1369 } 1370 1371 struct t10_alua_tg_pt_gp_member *core_alua_allocate_tg_pt_gp_mem( 1372 struct se_port *port) 1373 { 1374 struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem; 1375 1376 tg_pt_gp_mem = kmem_cache_zalloc(t10_alua_tg_pt_gp_mem_cache, 1377 GFP_KERNEL); 1378 if (!(tg_pt_gp_mem)) { 1379 printk(KERN_ERR "Unable to allocate struct t10_alua_tg_pt_gp_member\n"); 1380 return ERR_PTR(-ENOMEM); 1381 } 1382 INIT_LIST_HEAD(&tg_pt_gp_mem->tg_pt_gp_mem_list); 1383 spin_lock_init(&tg_pt_gp_mem->tg_pt_gp_mem_lock); 1384 atomic_set(&tg_pt_gp_mem->tg_pt_gp_mem_ref_cnt, 0); 1385 1386 tg_pt_gp_mem->tg_pt = port; 1387 port->sep_alua_tg_pt_gp_mem = tg_pt_gp_mem; 1388 atomic_set(&port->sep_tg_pt_gp_active, 1); 1389 1390 return tg_pt_gp_mem; 1391 } 1392 1393 void core_alua_free_tg_pt_gp( 1394 struct t10_alua_tg_pt_gp *tg_pt_gp) 1395 { 1396 struct se_subsystem_dev *su_dev = tg_pt_gp->tg_pt_gp_su_dev; 1397 struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem, *tg_pt_gp_mem_tmp; 1398 /* 1399 * Once we have reached this point, config_item_put() has already 1400 * been called from target_core_alua_drop_tg_pt_gp(). 1401 * 1402 * Here we remove *tg_pt_gp from the global list so that 1403 * no assications *OR* explict ALUA via SET_TARGET_PORT_GROUPS 1404 * can be made while we are releasing struct t10_alua_tg_pt_gp. 1405 */ 1406 spin_lock(&T10_ALUA(su_dev)->tg_pt_gps_lock); 1407 list_del(&tg_pt_gp->tg_pt_gp_list); 1408 T10_ALUA(su_dev)->alua_tg_pt_gps_counter--; 1409 spin_unlock(&T10_ALUA(su_dev)->tg_pt_gps_lock); 1410 /* 1411 * Allow a struct t10_alua_tg_pt_gp_member * referenced by 1412 * core_alua_get_tg_pt_gp_by_name() in 1413 * target_core_configfs.c:target_core_store_alua_tg_pt_gp() 1414 * to be released with core_alua_put_tg_pt_gp_from_name(). 1415 */ 1416 while (atomic_read(&tg_pt_gp->tg_pt_gp_ref_cnt)) 1417 cpu_relax(); 1418 /* 1419 * Release reference to struct t10_alua_tg_pt_gp from all associated 1420 * struct se_port. 1421 */ 1422 spin_lock(&tg_pt_gp->tg_pt_gp_lock); 1423 list_for_each_entry_safe(tg_pt_gp_mem, tg_pt_gp_mem_tmp, 1424 &tg_pt_gp->tg_pt_gp_mem_list, tg_pt_gp_mem_list) { 1425 if (tg_pt_gp_mem->tg_pt_gp_assoc) { 1426 list_del(&tg_pt_gp_mem->tg_pt_gp_mem_list); 1427 tg_pt_gp->tg_pt_gp_members--; 1428 tg_pt_gp_mem->tg_pt_gp_assoc = 0; 1429 } 1430 spin_unlock(&tg_pt_gp->tg_pt_gp_lock); 1431 /* 1432 * tg_pt_gp_mem is assoicated with a single 1433 * se_port->sep_alua_tg_pt_gp_mem, and is released via 1434 * core_alua_free_tg_pt_gp_mem(). 1435 * 1436 * If the passed tg_pt_gp does NOT match the default_tg_pt_gp, 1437 * assume we want to re-assocate a given tg_pt_gp_mem with 1438 * default_tg_pt_gp. 1439 */ 1440 spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); 1441 if (tg_pt_gp != T10_ALUA(su_dev)->default_tg_pt_gp) { 1442 __core_alua_attach_tg_pt_gp_mem(tg_pt_gp_mem, 1443 T10_ALUA(su_dev)->default_tg_pt_gp); 1444 } else 1445 tg_pt_gp_mem->tg_pt_gp = NULL; 1446 spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); 1447 1448 spin_lock(&tg_pt_gp->tg_pt_gp_lock); 1449 } 1450 spin_unlock(&tg_pt_gp->tg_pt_gp_lock); 1451 1452 kmem_cache_free(t10_alua_tg_pt_gp_cache, tg_pt_gp); 1453 } 1454 1455 void core_alua_free_tg_pt_gp_mem(struct se_port *port) 1456 { 1457 struct se_subsystem_dev *su_dev = port->sep_lun->lun_se_dev->se_sub_dev; 1458 struct t10_alua *alua = T10_ALUA(su_dev); 1459 struct t10_alua_tg_pt_gp *tg_pt_gp; 1460 struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem; 1461 1462 if (alua->alua_type != SPC3_ALUA_EMULATED) 1463 return; 1464 1465 tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem; 1466 if (!(tg_pt_gp_mem)) 1467 return; 1468 1469 while (atomic_read(&tg_pt_gp_mem->tg_pt_gp_mem_ref_cnt)) 1470 cpu_relax(); 1471 1472 spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); 1473 tg_pt_gp = tg_pt_gp_mem->tg_pt_gp; 1474 if ((tg_pt_gp)) { 1475 spin_lock(&tg_pt_gp->tg_pt_gp_lock); 1476 if (tg_pt_gp_mem->tg_pt_gp_assoc) { 1477 list_del(&tg_pt_gp_mem->tg_pt_gp_mem_list); 1478 tg_pt_gp->tg_pt_gp_members--; 1479 tg_pt_gp_mem->tg_pt_gp_assoc = 0; 1480 } 1481 spin_unlock(&tg_pt_gp->tg_pt_gp_lock); 1482 tg_pt_gp_mem->tg_pt_gp = NULL; 1483 } 1484 spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); 1485 1486 kmem_cache_free(t10_alua_tg_pt_gp_mem_cache, tg_pt_gp_mem); 1487 } 1488 1489 static struct t10_alua_tg_pt_gp *core_alua_get_tg_pt_gp_by_name( 1490 struct se_subsystem_dev *su_dev, 1491 const char *name) 1492 { 1493 struct t10_alua_tg_pt_gp *tg_pt_gp; 1494 struct config_item *ci; 1495 1496 spin_lock(&T10_ALUA(su_dev)->tg_pt_gps_lock); 1497 list_for_each_entry(tg_pt_gp, &T10_ALUA(su_dev)->tg_pt_gps_list, 1498 tg_pt_gp_list) { 1499 if (!(tg_pt_gp->tg_pt_gp_valid_id)) 1500 continue; 1501 ci = &tg_pt_gp->tg_pt_gp_group.cg_item; 1502 if (!(strcmp(config_item_name(ci), name))) { 1503 atomic_inc(&tg_pt_gp->tg_pt_gp_ref_cnt); 1504 spin_unlock(&T10_ALUA(su_dev)->tg_pt_gps_lock); 1505 return tg_pt_gp; 1506 } 1507 } 1508 spin_unlock(&T10_ALUA(su_dev)->tg_pt_gps_lock); 1509 1510 return NULL; 1511 } 1512 1513 static void core_alua_put_tg_pt_gp_from_name( 1514 struct t10_alua_tg_pt_gp *tg_pt_gp) 1515 { 1516 struct se_subsystem_dev *su_dev = tg_pt_gp->tg_pt_gp_su_dev; 1517 1518 spin_lock(&T10_ALUA(su_dev)->tg_pt_gps_lock); 1519 atomic_dec(&tg_pt_gp->tg_pt_gp_ref_cnt); 1520 spin_unlock(&T10_ALUA(su_dev)->tg_pt_gps_lock); 1521 } 1522 1523 /* 1524 * Called with struct t10_alua_tg_pt_gp_member->tg_pt_gp_mem_lock held 1525 */ 1526 void __core_alua_attach_tg_pt_gp_mem( 1527 struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem, 1528 struct t10_alua_tg_pt_gp *tg_pt_gp) 1529 { 1530 spin_lock(&tg_pt_gp->tg_pt_gp_lock); 1531 tg_pt_gp_mem->tg_pt_gp = tg_pt_gp; 1532 tg_pt_gp_mem->tg_pt_gp_assoc = 1; 1533 list_add_tail(&tg_pt_gp_mem->tg_pt_gp_mem_list, 1534 &tg_pt_gp->tg_pt_gp_mem_list); 1535 tg_pt_gp->tg_pt_gp_members++; 1536 spin_unlock(&tg_pt_gp->tg_pt_gp_lock); 1537 } 1538 1539 /* 1540 * Called with struct t10_alua_tg_pt_gp_member->tg_pt_gp_mem_lock held 1541 */ 1542 static void __core_alua_drop_tg_pt_gp_mem( 1543 struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem, 1544 struct t10_alua_tg_pt_gp *tg_pt_gp) 1545 { 1546 spin_lock(&tg_pt_gp->tg_pt_gp_lock); 1547 list_del(&tg_pt_gp_mem->tg_pt_gp_mem_list); 1548 tg_pt_gp_mem->tg_pt_gp = NULL; 1549 tg_pt_gp_mem->tg_pt_gp_assoc = 0; 1550 tg_pt_gp->tg_pt_gp_members--; 1551 spin_unlock(&tg_pt_gp->tg_pt_gp_lock); 1552 } 1553 1554 ssize_t core_alua_show_tg_pt_gp_info(struct se_port *port, char *page) 1555 { 1556 struct se_subsystem_dev *su_dev = port->sep_lun->lun_se_dev->se_sub_dev; 1557 struct config_item *tg_pt_ci; 1558 struct t10_alua *alua = T10_ALUA(su_dev); 1559 struct t10_alua_tg_pt_gp *tg_pt_gp; 1560 struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem; 1561 ssize_t len = 0; 1562 1563 if (alua->alua_type != SPC3_ALUA_EMULATED) 1564 return len; 1565 1566 tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem; 1567 if (!(tg_pt_gp_mem)) 1568 return len; 1569 1570 spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); 1571 tg_pt_gp = tg_pt_gp_mem->tg_pt_gp; 1572 if ((tg_pt_gp)) { 1573 tg_pt_ci = &tg_pt_gp->tg_pt_gp_group.cg_item; 1574 len += sprintf(page, "TG Port Alias: %s\nTG Port Group ID:" 1575 " %hu\nTG Port Primary Access State: %s\nTG Port " 1576 "Primary Access Status: %s\nTG Port Secondary Access" 1577 " State: %s\nTG Port Secondary Access Status: %s\n", 1578 config_item_name(tg_pt_ci), tg_pt_gp->tg_pt_gp_id, 1579 core_alua_dump_state(atomic_read( 1580 &tg_pt_gp->tg_pt_gp_alua_access_state)), 1581 core_alua_dump_status( 1582 tg_pt_gp->tg_pt_gp_alua_access_status), 1583 (atomic_read(&port->sep_tg_pt_secondary_offline)) ? 1584 "Offline" : "None", 1585 core_alua_dump_status(port->sep_tg_pt_secondary_stat)); 1586 } 1587 spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); 1588 1589 return len; 1590 } 1591 1592 ssize_t core_alua_store_tg_pt_gp_info( 1593 struct se_port *port, 1594 const char *page, 1595 size_t count) 1596 { 1597 struct se_portal_group *tpg; 1598 struct se_lun *lun; 1599 struct se_subsystem_dev *su_dev = port->sep_lun->lun_se_dev->se_sub_dev; 1600 struct t10_alua_tg_pt_gp *tg_pt_gp = NULL, *tg_pt_gp_new = NULL; 1601 struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem; 1602 unsigned char buf[TG_PT_GROUP_NAME_BUF]; 1603 int move = 0; 1604 1605 tpg = port->sep_tpg; 1606 lun = port->sep_lun; 1607 1608 if (T10_ALUA(su_dev)->alua_type != SPC3_ALUA_EMULATED) { 1609 printk(KERN_WARNING "SPC3_ALUA_EMULATED not enabled for" 1610 " %s/tpgt_%hu/%s\n", TPG_TFO(tpg)->tpg_get_wwn(tpg), 1611 TPG_TFO(tpg)->tpg_get_tag(tpg), 1612 config_item_name(&lun->lun_group.cg_item)); 1613 return -EINVAL; 1614 } 1615 1616 if (count > TG_PT_GROUP_NAME_BUF) { 1617 printk(KERN_ERR "ALUA Target Port Group alias too large!\n"); 1618 return -EINVAL; 1619 } 1620 memset(buf, 0, TG_PT_GROUP_NAME_BUF); 1621 memcpy(buf, page, count); 1622 /* 1623 * Any ALUA target port group alias besides "NULL" means we will be 1624 * making a new group association. 1625 */ 1626 if (strcmp(strstrip(buf), "NULL")) { 1627 /* 1628 * core_alua_get_tg_pt_gp_by_name() will increment reference to 1629 * struct t10_alua_tg_pt_gp. This reference is released with 1630 * core_alua_put_tg_pt_gp_from_name() below. 1631 */ 1632 tg_pt_gp_new = core_alua_get_tg_pt_gp_by_name(su_dev, 1633 strstrip(buf)); 1634 if (!(tg_pt_gp_new)) 1635 return -ENODEV; 1636 } 1637 tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem; 1638 if (!(tg_pt_gp_mem)) { 1639 if (tg_pt_gp_new) 1640 core_alua_put_tg_pt_gp_from_name(tg_pt_gp_new); 1641 printk(KERN_ERR "NULL struct se_port->sep_alua_tg_pt_gp_mem pointer\n"); 1642 return -EINVAL; 1643 } 1644 1645 spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); 1646 tg_pt_gp = tg_pt_gp_mem->tg_pt_gp; 1647 if ((tg_pt_gp)) { 1648 /* 1649 * Clearing an existing tg_pt_gp association, and replacing 1650 * with the default_tg_pt_gp. 1651 */ 1652 if (!(tg_pt_gp_new)) { 1653 printk(KERN_INFO "Target_Core_ConfigFS: Moving" 1654 " %s/tpgt_%hu/%s from ALUA Target Port Group:" 1655 " alua/%s, ID: %hu back to" 1656 " default_tg_pt_gp\n", 1657 TPG_TFO(tpg)->tpg_get_wwn(tpg), 1658 TPG_TFO(tpg)->tpg_get_tag(tpg), 1659 config_item_name(&lun->lun_group.cg_item), 1660 config_item_name( 1661 &tg_pt_gp->tg_pt_gp_group.cg_item), 1662 tg_pt_gp->tg_pt_gp_id); 1663 1664 __core_alua_drop_tg_pt_gp_mem(tg_pt_gp_mem, tg_pt_gp); 1665 __core_alua_attach_tg_pt_gp_mem(tg_pt_gp_mem, 1666 T10_ALUA(su_dev)->default_tg_pt_gp); 1667 spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); 1668 1669 return count; 1670 } 1671 /* 1672 * Removing existing association of tg_pt_gp_mem with tg_pt_gp 1673 */ 1674 __core_alua_drop_tg_pt_gp_mem(tg_pt_gp_mem, tg_pt_gp); 1675 move = 1; 1676 } 1677 /* 1678 * Associate tg_pt_gp_mem with tg_pt_gp_new. 1679 */ 1680 __core_alua_attach_tg_pt_gp_mem(tg_pt_gp_mem, tg_pt_gp_new); 1681 spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); 1682 printk(KERN_INFO "Target_Core_ConfigFS: %s %s/tpgt_%hu/%s to ALUA" 1683 " Target Port Group: alua/%s, ID: %hu\n", (move) ? 1684 "Moving" : "Adding", TPG_TFO(tpg)->tpg_get_wwn(tpg), 1685 TPG_TFO(tpg)->tpg_get_tag(tpg), 1686 config_item_name(&lun->lun_group.cg_item), 1687 config_item_name(&tg_pt_gp_new->tg_pt_gp_group.cg_item), 1688 tg_pt_gp_new->tg_pt_gp_id); 1689 1690 core_alua_put_tg_pt_gp_from_name(tg_pt_gp_new); 1691 return count; 1692 } 1693 1694 ssize_t core_alua_show_access_type( 1695 struct t10_alua_tg_pt_gp *tg_pt_gp, 1696 char *page) 1697 { 1698 if ((tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_EXPLICT_ALUA) && 1699 (tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_IMPLICT_ALUA)) 1700 return sprintf(page, "Implict and Explict\n"); 1701 else if (tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_IMPLICT_ALUA) 1702 return sprintf(page, "Implict\n"); 1703 else if (tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_EXPLICT_ALUA) 1704 return sprintf(page, "Explict\n"); 1705 else 1706 return sprintf(page, "None\n"); 1707 } 1708 1709 ssize_t core_alua_store_access_type( 1710 struct t10_alua_tg_pt_gp *tg_pt_gp, 1711 const char *page, 1712 size_t count) 1713 { 1714 unsigned long tmp; 1715 int ret; 1716 1717 ret = strict_strtoul(page, 0, &tmp); 1718 if (ret < 0) { 1719 printk(KERN_ERR "Unable to extract alua_access_type\n"); 1720 return -EINVAL; 1721 } 1722 if ((tmp != 0) && (tmp != 1) && (tmp != 2) && (tmp != 3)) { 1723 printk(KERN_ERR "Illegal value for alua_access_type:" 1724 " %lu\n", tmp); 1725 return -EINVAL; 1726 } 1727 if (tmp == 3) 1728 tg_pt_gp->tg_pt_gp_alua_access_type = 1729 TPGS_IMPLICT_ALUA | TPGS_EXPLICT_ALUA; 1730 else if (tmp == 2) 1731 tg_pt_gp->tg_pt_gp_alua_access_type = TPGS_EXPLICT_ALUA; 1732 else if (tmp == 1) 1733 tg_pt_gp->tg_pt_gp_alua_access_type = TPGS_IMPLICT_ALUA; 1734 else 1735 tg_pt_gp->tg_pt_gp_alua_access_type = 0; 1736 1737 return count; 1738 } 1739 1740 ssize_t core_alua_show_nonop_delay_msecs( 1741 struct t10_alua_tg_pt_gp *tg_pt_gp, 1742 char *page) 1743 { 1744 return sprintf(page, "%d\n", tg_pt_gp->tg_pt_gp_nonop_delay_msecs); 1745 } 1746 1747 ssize_t core_alua_store_nonop_delay_msecs( 1748 struct t10_alua_tg_pt_gp *tg_pt_gp, 1749 const char *page, 1750 size_t count) 1751 { 1752 unsigned long tmp; 1753 int ret; 1754 1755 ret = strict_strtoul(page, 0, &tmp); 1756 if (ret < 0) { 1757 printk(KERN_ERR "Unable to extract nonop_delay_msecs\n"); 1758 return -EINVAL; 1759 } 1760 if (tmp > ALUA_MAX_NONOP_DELAY_MSECS) { 1761 printk(KERN_ERR "Passed nonop_delay_msecs: %lu, exceeds" 1762 " ALUA_MAX_NONOP_DELAY_MSECS: %d\n", tmp, 1763 ALUA_MAX_NONOP_DELAY_MSECS); 1764 return -EINVAL; 1765 } 1766 tg_pt_gp->tg_pt_gp_nonop_delay_msecs = (int)tmp; 1767 1768 return count; 1769 } 1770 1771 ssize_t core_alua_show_trans_delay_msecs( 1772 struct t10_alua_tg_pt_gp *tg_pt_gp, 1773 char *page) 1774 { 1775 return sprintf(page, "%d\n", tg_pt_gp->tg_pt_gp_trans_delay_msecs); 1776 } 1777 1778 ssize_t core_alua_store_trans_delay_msecs( 1779 struct t10_alua_tg_pt_gp *tg_pt_gp, 1780 const char *page, 1781 size_t count) 1782 { 1783 unsigned long tmp; 1784 int ret; 1785 1786 ret = strict_strtoul(page, 0, &tmp); 1787 if (ret < 0) { 1788 printk(KERN_ERR "Unable to extract trans_delay_msecs\n"); 1789 return -EINVAL; 1790 } 1791 if (tmp > ALUA_MAX_TRANS_DELAY_MSECS) { 1792 printk(KERN_ERR "Passed trans_delay_msecs: %lu, exceeds" 1793 " ALUA_MAX_TRANS_DELAY_MSECS: %d\n", tmp, 1794 ALUA_MAX_TRANS_DELAY_MSECS); 1795 return -EINVAL; 1796 } 1797 tg_pt_gp->tg_pt_gp_trans_delay_msecs = (int)tmp; 1798 1799 return count; 1800 } 1801 1802 ssize_t core_alua_show_preferred_bit( 1803 struct t10_alua_tg_pt_gp *tg_pt_gp, 1804 char *page) 1805 { 1806 return sprintf(page, "%d\n", tg_pt_gp->tg_pt_gp_pref); 1807 } 1808 1809 ssize_t core_alua_store_preferred_bit( 1810 struct t10_alua_tg_pt_gp *tg_pt_gp, 1811 const char *page, 1812 size_t count) 1813 { 1814 unsigned long tmp; 1815 int ret; 1816 1817 ret = strict_strtoul(page, 0, &tmp); 1818 if (ret < 0) { 1819 printk(KERN_ERR "Unable to extract preferred ALUA value\n"); 1820 return -EINVAL; 1821 } 1822 if ((tmp != 0) && (tmp != 1)) { 1823 printk(KERN_ERR "Illegal value for preferred ALUA: %lu\n", tmp); 1824 return -EINVAL; 1825 } 1826 tg_pt_gp->tg_pt_gp_pref = (int)tmp; 1827 1828 return count; 1829 } 1830 1831 ssize_t core_alua_show_offline_bit(struct se_lun *lun, char *page) 1832 { 1833 if (!(lun->lun_sep)) 1834 return -ENODEV; 1835 1836 return sprintf(page, "%d\n", 1837 atomic_read(&lun->lun_sep->sep_tg_pt_secondary_offline)); 1838 } 1839 1840 ssize_t core_alua_store_offline_bit( 1841 struct se_lun *lun, 1842 const char *page, 1843 size_t count) 1844 { 1845 struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem; 1846 unsigned long tmp; 1847 int ret; 1848 1849 if (!(lun->lun_sep)) 1850 return -ENODEV; 1851 1852 ret = strict_strtoul(page, 0, &tmp); 1853 if (ret < 0) { 1854 printk(KERN_ERR "Unable to extract alua_tg_pt_offline value\n"); 1855 return -EINVAL; 1856 } 1857 if ((tmp != 0) && (tmp != 1)) { 1858 printk(KERN_ERR "Illegal value for alua_tg_pt_offline: %lu\n", 1859 tmp); 1860 return -EINVAL; 1861 } 1862 tg_pt_gp_mem = lun->lun_sep->sep_alua_tg_pt_gp_mem; 1863 if (!(tg_pt_gp_mem)) { 1864 printk(KERN_ERR "Unable to locate *tg_pt_gp_mem\n"); 1865 return -EINVAL; 1866 } 1867 1868 ret = core_alua_set_tg_pt_secondary_state(tg_pt_gp_mem, 1869 lun->lun_sep, 0, (int)tmp); 1870 if (ret < 0) 1871 return -EINVAL; 1872 1873 return count; 1874 } 1875 1876 ssize_t core_alua_show_secondary_status( 1877 struct se_lun *lun, 1878 char *page) 1879 { 1880 return sprintf(page, "%d\n", lun->lun_sep->sep_tg_pt_secondary_stat); 1881 } 1882 1883 ssize_t core_alua_store_secondary_status( 1884 struct se_lun *lun, 1885 const char *page, 1886 size_t count) 1887 { 1888 unsigned long tmp; 1889 int ret; 1890 1891 ret = strict_strtoul(page, 0, &tmp); 1892 if (ret < 0) { 1893 printk(KERN_ERR "Unable to extract alua_tg_pt_status\n"); 1894 return -EINVAL; 1895 } 1896 if ((tmp != ALUA_STATUS_NONE) && 1897 (tmp != ALUA_STATUS_ALTERED_BY_EXPLICT_STPG) && 1898 (tmp != ALUA_STATUS_ALTERED_BY_IMPLICT_ALUA)) { 1899 printk(KERN_ERR "Illegal value for alua_tg_pt_status: %lu\n", 1900 tmp); 1901 return -EINVAL; 1902 } 1903 lun->lun_sep->sep_tg_pt_secondary_stat = (int)tmp; 1904 1905 return count; 1906 } 1907 1908 ssize_t core_alua_show_secondary_write_metadata( 1909 struct se_lun *lun, 1910 char *page) 1911 { 1912 return sprintf(page, "%d\n", 1913 lun->lun_sep->sep_tg_pt_secondary_write_md); 1914 } 1915 1916 ssize_t core_alua_store_secondary_write_metadata( 1917 struct se_lun *lun, 1918 const char *page, 1919 size_t count) 1920 { 1921 unsigned long tmp; 1922 int ret; 1923 1924 ret = strict_strtoul(page, 0, &tmp); 1925 if (ret < 0) { 1926 printk(KERN_ERR "Unable to extract alua_tg_pt_write_md\n"); 1927 return -EINVAL; 1928 } 1929 if ((tmp != 0) && (tmp != 1)) { 1930 printk(KERN_ERR "Illegal value for alua_tg_pt_write_md:" 1931 " %lu\n", tmp); 1932 return -EINVAL; 1933 } 1934 lun->lun_sep->sep_tg_pt_secondary_write_md = (int)tmp; 1935 1936 return count; 1937 } 1938 1939 int core_setup_alua(struct se_device *dev, int force_pt) 1940 { 1941 struct se_subsystem_dev *su_dev = dev->se_sub_dev; 1942 struct t10_alua *alua = T10_ALUA(su_dev); 1943 struct t10_alua_lu_gp_member *lu_gp_mem; 1944 /* 1945 * If this device is from Target_Core_Mod/pSCSI, use the ALUA logic 1946 * of the Underlying SCSI hardware. In Linux/SCSI terms, this can 1947 * cause a problem because libata and some SATA RAID HBAs appear 1948 * under Linux/SCSI, but emulate SCSI logic themselves. 1949 */ 1950 if (((TRANSPORT(dev)->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) && 1951 !(DEV_ATTRIB(dev)->emulate_alua)) || force_pt) { 1952 alua->alua_type = SPC_ALUA_PASSTHROUGH; 1953 alua->alua_state_check = &core_alua_state_check_nop; 1954 printk(KERN_INFO "%s: Using SPC_ALUA_PASSTHROUGH, no ALUA" 1955 " emulation\n", TRANSPORT(dev)->name); 1956 return 0; 1957 } 1958 /* 1959 * If SPC-3 or above is reported by real or emulated struct se_device, 1960 * use emulated ALUA. 1961 */ 1962 if (TRANSPORT(dev)->get_device_rev(dev) >= SCSI_3) { 1963 printk(KERN_INFO "%s: Enabling ALUA Emulation for SPC-3" 1964 " device\n", TRANSPORT(dev)->name); 1965 /* 1966 * Assoicate this struct se_device with the default ALUA 1967 * LUN Group. 1968 */ 1969 lu_gp_mem = core_alua_allocate_lu_gp_mem(dev); 1970 if (IS_ERR(lu_gp_mem) || !lu_gp_mem) 1971 return -1; 1972 1973 alua->alua_type = SPC3_ALUA_EMULATED; 1974 alua->alua_state_check = &core_alua_state_check; 1975 spin_lock(&lu_gp_mem->lu_gp_mem_lock); 1976 __core_alua_attach_lu_gp_mem(lu_gp_mem, 1977 se_global->default_lu_gp); 1978 spin_unlock(&lu_gp_mem->lu_gp_mem_lock); 1979 1980 printk(KERN_INFO "%s: Adding to default ALUA LU Group:" 1981 " core/alua/lu_gps/default_lu_gp\n", 1982 TRANSPORT(dev)->name); 1983 } else { 1984 alua->alua_type = SPC2_ALUA_DISABLED; 1985 alua->alua_state_check = &core_alua_state_check_nop; 1986 printk(KERN_INFO "%s: Disabling ALUA Emulation for SPC-2" 1987 " device\n", TRANSPORT(dev)->name); 1988 } 1989 1990 return 0; 1991 } 1992