1 /******************************************************************************* 2 * This file contains tcm implementation using v4 configfs fabric infrastructure 3 * for QLogic target mode HBAs 4 * 5 * (c) Copyright 2010-2013 Datera, Inc. 6 * 7 * Author: Nicholas A. Bellinger <nab@daterainc.com> 8 * 9 * tcm_qla2xxx_parse_wwn() and tcm_qla2xxx_format_wwn() contains code from 10 * the TCM_FC / Open-FCoE.org fabric module. 11 * 12 * Copyright (c) 2010 Cisco Systems, Inc 13 * 14 * This program is free software; you can redistribute it and/or modify 15 * it under the terms of the GNU General Public License as published by 16 * the Free Software Foundation; either version 2 of the License, or 17 * (at your option) any later version. 18 * 19 * This program is distributed in the hope that it will be useful, 20 * but WITHOUT ANY WARRANTY; without even the implied warranty of 21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 22 * GNU General Public License for more details. 23 ****************************************************************************/ 24 25 26 #include <linux/module.h> 27 #include <linux/moduleparam.h> 28 #include <generated/utsrelease.h> 29 #include <linux/utsname.h> 30 #include <linux/init.h> 31 #include <linux/list.h> 32 #include <linux/slab.h> 33 #include <linux/kthread.h> 34 #include <linux/types.h> 35 #include <linux/string.h> 36 #include <linux/configfs.h> 37 #include <linux/ctype.h> 38 #include <asm/unaligned.h> 39 #include <scsi/scsi.h> 40 #include <scsi/scsi_host.h> 41 #include <scsi/scsi_device.h> 42 #include <scsi/scsi_cmnd.h> 43 #include <target/target_core_base.h> 44 #include <target/target_core_fabric.h> 45 #include <target/target_core_fabric_configfs.h> 46 #include <target/target_core_configfs.h> 47 #include <target/configfs_macros.h> 48 49 #include "qla_def.h" 50 #include "qla_target.h" 51 #include "tcm_qla2xxx.h" 52 53 struct workqueue_struct *tcm_qla2xxx_free_wq; 54 struct workqueue_struct *tcm_qla2xxx_cmd_wq; 55 56 /* 57 * Parse WWN. 58 * If strict, we require lower-case hex and colon separators to be sure 59 * the name is the same as what would be generated by ft_format_wwn() 60 * so the name and wwn are mapped one-to-one. 61 */ 62 static ssize_t tcm_qla2xxx_parse_wwn(const char *name, u64 *wwn, int strict) 63 { 64 const char *cp; 65 char c; 66 u32 nibble; 67 u32 byte = 0; 68 u32 pos = 0; 69 u32 err; 70 71 *wwn = 0; 72 for (cp = name; cp < &name[TCM_QLA2XXX_NAMELEN - 1]; cp++) { 73 c = *cp; 74 if (c == '\n' && cp[1] == '\0') 75 continue; 76 if (strict && pos++ == 2 && byte++ < 7) { 77 pos = 0; 78 if (c == ':') 79 continue; 80 err = 1; 81 goto fail; 82 } 83 if (c == '\0') { 84 err = 2; 85 if (strict && byte != 8) 86 goto fail; 87 return cp - name; 88 } 89 err = 3; 90 if (isdigit(c)) 91 nibble = c - '0'; 92 else if (isxdigit(c) && (islower(c) || !strict)) 93 nibble = tolower(c) - 'a' + 10; 94 else 95 goto fail; 96 *wwn = (*wwn << 4) | nibble; 97 } 98 err = 4; 99 fail: 100 pr_debug("err %u len %zu pos %u byte %u\n", 101 err, cp - name, pos, byte); 102 return -1; 103 } 104 105 static ssize_t tcm_qla2xxx_format_wwn(char *buf, size_t len, u64 wwn) 106 { 107 u8 b[8]; 108 109 put_unaligned_be64(wwn, b); 110 return snprintf(buf, len, 111 "%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x", 112 b[0], b[1], b[2], b[3], b[4], b[5], b[6], b[7]); 113 } 114 115 static char *tcm_qla2xxx_get_fabric_name(void) 116 { 117 return "qla2xxx"; 118 } 119 120 /* 121 * From drivers/scsi/scsi_transport_fc.c:fc_parse_wwn 122 */ 123 static int tcm_qla2xxx_npiv_extract_wwn(const char *ns, u64 *nm) 124 { 125 unsigned int i, j; 126 u8 wwn[8]; 127 128 memset(wwn, 0, sizeof(wwn)); 129 130 /* Validate and store the new name */ 131 for (i = 0, j = 0; i < 16; i++) { 132 int value; 133 134 value = hex_to_bin(*ns++); 135 if (value >= 0) 136 j = (j << 4) | value; 137 else 138 return -EINVAL; 139 140 if (i % 2) { 141 wwn[i/2] = j & 0xff; 142 j = 0; 143 } 144 } 145 146 *nm = wwn_to_u64(wwn); 147 return 0; 148 } 149 150 /* 151 * This parsing logic follows drivers/scsi/scsi_transport_fc.c: 152 * store_fc_host_vport_create() 153 */ 154 static int tcm_qla2xxx_npiv_parse_wwn( 155 const char *name, 156 size_t count, 157 u64 *wwpn, 158 u64 *wwnn) 159 { 160 unsigned int cnt = count; 161 int rc; 162 163 *wwpn = 0; 164 *wwnn = 0; 165 166 /* count may include a LF at end of string */ 167 if (name[cnt-1] == '\n' || name[cnt-1] == 0) 168 cnt--; 169 170 /* validate we have enough characters for WWPN */ 171 if ((cnt != (16+1+16)) || (name[16] != ':')) 172 return -EINVAL; 173 174 rc = tcm_qla2xxx_npiv_extract_wwn(&name[0], wwpn); 175 if (rc != 0) 176 return rc; 177 178 rc = tcm_qla2xxx_npiv_extract_wwn(&name[17], wwnn); 179 if (rc != 0) 180 return rc; 181 182 return 0; 183 } 184 185 static char *tcm_qla2xxx_npiv_get_fabric_name(void) 186 { 187 return "qla2xxx_npiv"; 188 } 189 190 static u8 tcm_qla2xxx_get_fabric_proto_ident(struct se_portal_group *se_tpg) 191 { 192 struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg, 193 struct tcm_qla2xxx_tpg, se_tpg); 194 struct tcm_qla2xxx_lport *lport = tpg->lport; 195 u8 proto_id; 196 197 switch (lport->lport_proto_id) { 198 case SCSI_PROTOCOL_FCP: 199 default: 200 proto_id = fc_get_fabric_proto_ident(se_tpg); 201 break; 202 } 203 204 return proto_id; 205 } 206 207 static char *tcm_qla2xxx_get_fabric_wwn(struct se_portal_group *se_tpg) 208 { 209 struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg, 210 struct tcm_qla2xxx_tpg, se_tpg); 211 struct tcm_qla2xxx_lport *lport = tpg->lport; 212 213 return lport->lport_naa_name; 214 } 215 216 static u16 tcm_qla2xxx_get_tag(struct se_portal_group *se_tpg) 217 { 218 struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg, 219 struct tcm_qla2xxx_tpg, se_tpg); 220 return tpg->lport_tpgt; 221 } 222 223 static u32 tcm_qla2xxx_get_default_depth(struct se_portal_group *se_tpg) 224 { 225 return 1; 226 } 227 228 static u32 tcm_qla2xxx_get_pr_transport_id( 229 struct se_portal_group *se_tpg, 230 struct se_node_acl *se_nacl, 231 struct t10_pr_registration *pr_reg, 232 int *format_code, 233 unsigned char *buf) 234 { 235 struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg, 236 struct tcm_qla2xxx_tpg, se_tpg); 237 struct tcm_qla2xxx_lport *lport = tpg->lport; 238 int ret = 0; 239 240 switch (lport->lport_proto_id) { 241 case SCSI_PROTOCOL_FCP: 242 default: 243 ret = fc_get_pr_transport_id(se_tpg, se_nacl, pr_reg, 244 format_code, buf); 245 break; 246 } 247 248 return ret; 249 } 250 251 static u32 tcm_qla2xxx_get_pr_transport_id_len( 252 struct se_portal_group *se_tpg, 253 struct se_node_acl *se_nacl, 254 struct t10_pr_registration *pr_reg, 255 int *format_code) 256 { 257 struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg, 258 struct tcm_qla2xxx_tpg, se_tpg); 259 struct tcm_qla2xxx_lport *lport = tpg->lport; 260 int ret = 0; 261 262 switch (lport->lport_proto_id) { 263 case SCSI_PROTOCOL_FCP: 264 default: 265 ret = fc_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg, 266 format_code); 267 break; 268 } 269 270 return ret; 271 } 272 273 static char *tcm_qla2xxx_parse_pr_out_transport_id( 274 struct se_portal_group *se_tpg, 275 const char *buf, 276 u32 *out_tid_len, 277 char **port_nexus_ptr) 278 { 279 struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg, 280 struct tcm_qla2xxx_tpg, se_tpg); 281 struct tcm_qla2xxx_lport *lport = tpg->lport; 282 char *tid = NULL; 283 284 switch (lport->lport_proto_id) { 285 case SCSI_PROTOCOL_FCP: 286 default: 287 tid = fc_parse_pr_out_transport_id(se_tpg, buf, out_tid_len, 288 port_nexus_ptr); 289 break; 290 } 291 292 return tid; 293 } 294 295 static int tcm_qla2xxx_check_demo_mode(struct se_portal_group *se_tpg) 296 { 297 struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg, 298 struct tcm_qla2xxx_tpg, se_tpg); 299 300 return tpg->tpg_attrib.generate_node_acls; 301 } 302 303 static int tcm_qla2xxx_check_demo_mode_cache(struct se_portal_group *se_tpg) 304 { 305 struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg, 306 struct tcm_qla2xxx_tpg, se_tpg); 307 308 return tpg->tpg_attrib.cache_dynamic_acls; 309 } 310 311 static int tcm_qla2xxx_check_demo_write_protect(struct se_portal_group *se_tpg) 312 { 313 struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg, 314 struct tcm_qla2xxx_tpg, se_tpg); 315 316 return tpg->tpg_attrib.demo_mode_write_protect; 317 } 318 319 static int tcm_qla2xxx_check_prod_write_protect(struct se_portal_group *se_tpg) 320 { 321 struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg, 322 struct tcm_qla2xxx_tpg, se_tpg); 323 324 return tpg->tpg_attrib.prod_mode_write_protect; 325 } 326 327 static int tcm_qla2xxx_check_demo_mode_login_only(struct se_portal_group *se_tpg) 328 { 329 struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg, 330 struct tcm_qla2xxx_tpg, se_tpg); 331 332 return tpg->tpg_attrib.demo_mode_login_only; 333 } 334 335 static struct se_node_acl *tcm_qla2xxx_alloc_fabric_acl( 336 struct se_portal_group *se_tpg) 337 { 338 struct tcm_qla2xxx_nacl *nacl; 339 340 nacl = kzalloc(sizeof(struct tcm_qla2xxx_nacl), GFP_KERNEL); 341 if (!nacl) { 342 pr_err("Unable to allocate struct tcm_qla2xxx_nacl\n"); 343 return NULL; 344 } 345 346 return &nacl->se_node_acl; 347 } 348 349 static void tcm_qla2xxx_release_fabric_acl( 350 struct se_portal_group *se_tpg, 351 struct se_node_acl *se_nacl) 352 { 353 struct tcm_qla2xxx_nacl *nacl = container_of(se_nacl, 354 struct tcm_qla2xxx_nacl, se_node_acl); 355 kfree(nacl); 356 } 357 358 static u32 tcm_qla2xxx_tpg_get_inst_index(struct se_portal_group *se_tpg) 359 { 360 struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg, 361 struct tcm_qla2xxx_tpg, se_tpg); 362 363 return tpg->lport_tpgt; 364 } 365 366 static void tcm_qla2xxx_complete_mcmd(struct work_struct *work) 367 { 368 struct qla_tgt_mgmt_cmd *mcmd = container_of(work, 369 struct qla_tgt_mgmt_cmd, free_work); 370 371 transport_generic_free_cmd(&mcmd->se_cmd, 0); 372 } 373 374 /* 375 * Called from qla_target_template->free_mcmd(), and will call 376 * tcm_qla2xxx_release_cmd() via normal struct target_core_fabric_ops 377 * release callback. qla_hw_data->hardware_lock is expected to be held 378 */ 379 static void tcm_qla2xxx_free_mcmd(struct qla_tgt_mgmt_cmd *mcmd) 380 { 381 INIT_WORK(&mcmd->free_work, tcm_qla2xxx_complete_mcmd); 382 queue_work(tcm_qla2xxx_free_wq, &mcmd->free_work); 383 } 384 385 static void tcm_qla2xxx_complete_free(struct work_struct *work) 386 { 387 struct qla_tgt_cmd *cmd = container_of(work, struct qla_tgt_cmd, work); 388 389 transport_generic_free_cmd(&cmd->se_cmd, 0); 390 } 391 392 /* 393 * Called from qla_target_template->free_cmd(), and will call 394 * tcm_qla2xxx_release_cmd via normal struct target_core_fabric_ops 395 * release callback. qla_hw_data->hardware_lock is expected to be held 396 */ 397 static void tcm_qla2xxx_free_cmd(struct qla_tgt_cmd *cmd) 398 { 399 INIT_WORK(&cmd->work, tcm_qla2xxx_complete_free); 400 queue_work(tcm_qla2xxx_free_wq, &cmd->work); 401 } 402 403 /* 404 * Called from struct target_core_fabric_ops->check_stop_free() context 405 */ 406 static int tcm_qla2xxx_check_stop_free(struct se_cmd *se_cmd) 407 { 408 return target_put_sess_cmd(se_cmd->se_sess, se_cmd); 409 } 410 411 /* tcm_qla2xxx_release_cmd - Callback from TCM Core to release underlying 412 * fabric descriptor @se_cmd command to release 413 */ 414 static void tcm_qla2xxx_release_cmd(struct se_cmd *se_cmd) 415 { 416 struct qla_tgt_cmd *cmd; 417 418 if (se_cmd->se_cmd_flags & SCF_SCSI_TMR_CDB) { 419 struct qla_tgt_mgmt_cmd *mcmd = container_of(se_cmd, 420 struct qla_tgt_mgmt_cmd, se_cmd); 421 qlt_free_mcmd(mcmd); 422 return; 423 } 424 425 cmd = container_of(se_cmd, struct qla_tgt_cmd, se_cmd); 426 qlt_free_cmd(cmd); 427 } 428 429 static int tcm_qla2xxx_shutdown_session(struct se_session *se_sess) 430 { 431 struct qla_tgt_sess *sess = se_sess->fabric_sess_ptr; 432 struct scsi_qla_host *vha; 433 unsigned long flags; 434 435 BUG_ON(!sess); 436 vha = sess->vha; 437 438 spin_lock_irqsave(&vha->hw->hardware_lock, flags); 439 target_sess_cmd_list_set_waiting(se_sess); 440 spin_unlock_irqrestore(&vha->hw->hardware_lock, flags); 441 442 return 1; 443 } 444 445 static void tcm_qla2xxx_close_session(struct se_session *se_sess) 446 { 447 struct qla_tgt_sess *sess = se_sess->fabric_sess_ptr; 448 struct scsi_qla_host *vha; 449 unsigned long flags; 450 451 BUG_ON(!sess); 452 vha = sess->vha; 453 454 spin_lock_irqsave(&vha->hw->hardware_lock, flags); 455 qlt_unreg_sess(sess); 456 spin_unlock_irqrestore(&vha->hw->hardware_lock, flags); 457 } 458 459 static u32 tcm_qla2xxx_sess_get_index(struct se_session *se_sess) 460 { 461 return 0; 462 } 463 464 static int tcm_qla2xxx_write_pending(struct se_cmd *se_cmd) 465 { 466 struct qla_tgt_cmd *cmd = container_of(se_cmd, 467 struct qla_tgt_cmd, se_cmd); 468 469 cmd->bufflen = se_cmd->data_length; 470 cmd->dma_data_direction = target_reverse_dma_direction(se_cmd); 471 472 cmd->sg_cnt = se_cmd->t_data_nents; 473 cmd->sg = se_cmd->t_data_sg; 474 475 /* 476 * qla_target.c:qlt_rdy_to_xfer() will call pci_map_sg() to setup 477 * the SGL mappings into PCIe memory for incoming FCP WRITE data. 478 */ 479 return qlt_rdy_to_xfer(cmd); 480 } 481 482 static int tcm_qla2xxx_write_pending_status(struct se_cmd *se_cmd) 483 { 484 unsigned long flags; 485 /* 486 * Check for WRITE_PENDING status to determine if we need to wait for 487 * CTIO aborts to be posted via hardware in tcm_qla2xxx_handle_data(). 488 */ 489 spin_lock_irqsave(&se_cmd->t_state_lock, flags); 490 if (se_cmd->t_state == TRANSPORT_WRITE_PENDING || 491 se_cmd->t_state == TRANSPORT_COMPLETE_QF_WP) { 492 spin_unlock_irqrestore(&se_cmd->t_state_lock, flags); 493 wait_for_completion_timeout(&se_cmd->t_transport_stop_comp, 494 3000); 495 return 0; 496 } 497 spin_unlock_irqrestore(&se_cmd->t_state_lock, flags); 498 499 return 0; 500 } 501 502 static void tcm_qla2xxx_set_default_node_attrs(struct se_node_acl *nacl) 503 { 504 return; 505 } 506 507 static u32 tcm_qla2xxx_get_task_tag(struct se_cmd *se_cmd) 508 { 509 struct qla_tgt_cmd *cmd = container_of(se_cmd, 510 struct qla_tgt_cmd, se_cmd); 511 512 return cmd->tag; 513 } 514 515 static int tcm_qla2xxx_get_cmd_state(struct se_cmd *se_cmd) 516 { 517 return 0; 518 } 519 520 /* 521 * Called from process context in qla_target.c:qlt_do_work() code 522 */ 523 static int tcm_qla2xxx_handle_cmd(scsi_qla_host_t *vha, struct qla_tgt_cmd *cmd, 524 unsigned char *cdb, uint32_t data_length, int fcp_task_attr, 525 int data_dir, int bidi) 526 { 527 struct se_cmd *se_cmd = &cmd->se_cmd; 528 struct se_session *se_sess; 529 struct qla_tgt_sess *sess; 530 int flags = TARGET_SCF_ACK_KREF; 531 532 if (bidi) 533 flags |= TARGET_SCF_BIDI_OP; 534 535 sess = cmd->sess; 536 if (!sess) { 537 pr_err("Unable to locate struct qla_tgt_sess from qla_tgt_cmd\n"); 538 return -EINVAL; 539 } 540 541 se_sess = sess->se_sess; 542 if (!se_sess) { 543 pr_err("Unable to locate active struct se_session\n"); 544 return -EINVAL; 545 } 546 547 return target_submit_cmd(se_cmd, se_sess, cdb, &cmd->sense_buffer[0], 548 cmd->unpacked_lun, data_length, fcp_task_attr, 549 data_dir, flags); 550 } 551 552 static void tcm_qla2xxx_handle_data_work(struct work_struct *work) 553 { 554 struct qla_tgt_cmd *cmd = container_of(work, struct qla_tgt_cmd, work); 555 556 /* 557 * Ensure that the complete FCP WRITE payload has been received. 558 * Otherwise return an exception via CHECK_CONDITION status. 559 */ 560 if (!cmd->write_data_transferred) { 561 /* 562 * Check if se_cmd has already been aborted via LUN_RESET, and 563 * waiting upon completion in tcm_qla2xxx_write_pending_status() 564 */ 565 if (cmd->se_cmd.transport_state & CMD_T_ABORTED) { 566 complete(&cmd->se_cmd.t_transport_stop_comp); 567 return; 568 } 569 570 transport_generic_request_failure(&cmd->se_cmd, 571 TCM_CHECK_CONDITION_ABORT_CMD); 572 return; 573 } 574 575 return target_execute_cmd(&cmd->se_cmd); 576 } 577 578 /* 579 * Called from qla_target.c:qlt_do_ctio_completion() 580 */ 581 static void tcm_qla2xxx_handle_data(struct qla_tgt_cmd *cmd) 582 { 583 INIT_WORK(&cmd->work, tcm_qla2xxx_handle_data_work); 584 queue_work(tcm_qla2xxx_free_wq, &cmd->work); 585 } 586 587 /* 588 * Called from qla_target.c:qlt_issue_task_mgmt() 589 */ 590 static int tcm_qla2xxx_handle_tmr(struct qla_tgt_mgmt_cmd *mcmd, uint32_t lun, 591 uint8_t tmr_func, uint32_t tag) 592 { 593 struct qla_tgt_sess *sess = mcmd->sess; 594 struct se_cmd *se_cmd = &mcmd->se_cmd; 595 596 return target_submit_tmr(se_cmd, sess->se_sess, NULL, lun, mcmd, 597 tmr_func, GFP_ATOMIC, tag, TARGET_SCF_ACK_KREF); 598 } 599 600 static int tcm_qla2xxx_queue_data_in(struct se_cmd *se_cmd) 601 { 602 struct qla_tgt_cmd *cmd = container_of(se_cmd, 603 struct qla_tgt_cmd, se_cmd); 604 605 cmd->bufflen = se_cmd->data_length; 606 cmd->dma_data_direction = target_reverse_dma_direction(se_cmd); 607 cmd->aborted = (se_cmd->transport_state & CMD_T_ABORTED); 608 609 cmd->sg_cnt = se_cmd->t_data_nents; 610 cmd->sg = se_cmd->t_data_sg; 611 cmd->offset = 0; 612 613 /* 614 * Now queue completed DATA_IN the qla2xxx LLD and response ring 615 */ 616 return qlt_xmit_response(cmd, QLA_TGT_XMIT_DATA|QLA_TGT_XMIT_STATUS, 617 se_cmd->scsi_status); 618 } 619 620 static int tcm_qla2xxx_queue_status(struct se_cmd *se_cmd) 621 { 622 struct qla_tgt_cmd *cmd = container_of(se_cmd, 623 struct qla_tgt_cmd, se_cmd); 624 int xmit_type = QLA_TGT_XMIT_STATUS; 625 626 cmd->bufflen = se_cmd->data_length; 627 cmd->sg = NULL; 628 cmd->sg_cnt = 0; 629 cmd->offset = 0; 630 cmd->dma_data_direction = target_reverse_dma_direction(se_cmd); 631 cmd->aborted = (se_cmd->transport_state & CMD_T_ABORTED); 632 633 if (se_cmd->data_direction == DMA_FROM_DEVICE) { 634 /* 635 * For FCP_READ with CHECK_CONDITION status, clear cmd->bufflen 636 * for qla_tgt_xmit_response LLD code 637 */ 638 if (se_cmd->se_cmd_flags & SCF_OVERFLOW_BIT) { 639 se_cmd->se_cmd_flags &= ~SCF_OVERFLOW_BIT; 640 se_cmd->residual_count = 0; 641 } 642 se_cmd->se_cmd_flags |= SCF_UNDERFLOW_BIT; 643 se_cmd->residual_count += se_cmd->data_length; 644 645 cmd->bufflen = 0; 646 } 647 /* 648 * Now queue status response to qla2xxx LLD code and response ring 649 */ 650 return qlt_xmit_response(cmd, xmit_type, se_cmd->scsi_status); 651 } 652 653 static void tcm_qla2xxx_queue_tm_rsp(struct se_cmd *se_cmd) 654 { 655 struct se_tmr_req *se_tmr = se_cmd->se_tmr_req; 656 struct qla_tgt_mgmt_cmd *mcmd = container_of(se_cmd, 657 struct qla_tgt_mgmt_cmd, se_cmd); 658 659 pr_debug("queue_tm_rsp: mcmd: %p func: 0x%02x response: 0x%02x\n", 660 mcmd, se_tmr->function, se_tmr->response); 661 /* 662 * Do translation between TCM TM response codes and 663 * QLA2xxx FC TM response codes. 664 */ 665 switch (se_tmr->response) { 666 case TMR_FUNCTION_COMPLETE: 667 mcmd->fc_tm_rsp = FC_TM_SUCCESS; 668 break; 669 case TMR_TASK_DOES_NOT_EXIST: 670 mcmd->fc_tm_rsp = FC_TM_BAD_CMD; 671 break; 672 case TMR_FUNCTION_REJECTED: 673 mcmd->fc_tm_rsp = FC_TM_REJECT; 674 break; 675 case TMR_LUN_DOES_NOT_EXIST: 676 default: 677 mcmd->fc_tm_rsp = FC_TM_FAILED; 678 break; 679 } 680 /* 681 * Queue the TM response to QLA2xxx LLD to build a 682 * CTIO response packet. 683 */ 684 qlt_xmit_tm_rsp(mcmd); 685 } 686 687 /* Local pointer to allocated TCM configfs fabric module */ 688 struct target_fabric_configfs *tcm_qla2xxx_fabric_configfs; 689 struct target_fabric_configfs *tcm_qla2xxx_npiv_fabric_configfs; 690 691 static void tcm_qla2xxx_clear_sess_lookup(struct tcm_qla2xxx_lport *, 692 struct tcm_qla2xxx_nacl *, struct qla_tgt_sess *); 693 /* 694 * Expected to be called with struct qla_hw_data->hardware_lock held 695 */ 696 static void tcm_qla2xxx_clear_nacl_from_fcport_map(struct qla_tgt_sess *sess) 697 { 698 struct se_node_acl *se_nacl = sess->se_sess->se_node_acl; 699 struct se_portal_group *se_tpg = se_nacl->se_tpg; 700 struct se_wwn *se_wwn = se_tpg->se_tpg_wwn; 701 struct tcm_qla2xxx_lport *lport = container_of(se_wwn, 702 struct tcm_qla2xxx_lport, lport_wwn); 703 struct tcm_qla2xxx_nacl *nacl = container_of(se_nacl, 704 struct tcm_qla2xxx_nacl, se_node_acl); 705 void *node; 706 707 pr_debug("fc_rport domain: port_id 0x%06x\n", nacl->nport_id); 708 709 node = btree_remove32(&lport->lport_fcport_map, nacl->nport_id); 710 WARN_ON(node && (node != se_nacl)); 711 712 pr_debug("Removed from fcport_map: %p for WWNN: 0x%016LX, port_id: 0x%06x\n", 713 se_nacl, nacl->nport_wwnn, nacl->nport_id); 714 /* 715 * Now clear the se_nacl and session pointers from our HW lport lookup 716 * table mapping for this initiator's fabric S_ID and LOOP_ID entries. 717 * 718 * This is done ahead of callbacks into tcm_qla2xxx_free_session() -> 719 * target_wait_for_sess_cmds() before the session waits for outstanding 720 * I/O to complete, to avoid a race between session shutdown execution 721 * and incoming ATIOs or TMRs picking up a stale se_node_act reference. 722 */ 723 tcm_qla2xxx_clear_sess_lookup(lport, nacl, sess); 724 } 725 726 static void tcm_qla2xxx_release_session(struct kref *kref) 727 { 728 struct se_session *se_sess = container_of(kref, 729 struct se_session, sess_kref); 730 731 qlt_unreg_sess(se_sess->fabric_sess_ptr); 732 } 733 734 static void tcm_qla2xxx_put_session(struct se_session *se_sess) 735 { 736 struct qla_tgt_sess *sess = se_sess->fabric_sess_ptr; 737 struct qla_hw_data *ha = sess->vha->hw; 738 unsigned long flags; 739 740 spin_lock_irqsave(&ha->hardware_lock, flags); 741 kref_put(&se_sess->sess_kref, tcm_qla2xxx_release_session); 742 spin_unlock_irqrestore(&ha->hardware_lock, flags); 743 } 744 745 static void tcm_qla2xxx_put_sess(struct qla_tgt_sess *sess) 746 { 747 if (!sess) 748 return; 749 750 assert_spin_locked(&sess->vha->hw->hardware_lock); 751 kref_put(&sess->se_sess->sess_kref, tcm_qla2xxx_release_session); 752 } 753 754 static void tcm_qla2xxx_shutdown_sess(struct qla_tgt_sess *sess) 755 { 756 assert_spin_locked(&sess->vha->hw->hardware_lock); 757 target_sess_cmd_list_set_waiting(sess->se_sess); 758 } 759 760 static struct se_node_acl *tcm_qla2xxx_make_nodeacl( 761 struct se_portal_group *se_tpg, 762 struct config_group *group, 763 const char *name) 764 { 765 struct se_node_acl *se_nacl, *se_nacl_new; 766 struct tcm_qla2xxx_nacl *nacl; 767 u64 wwnn; 768 u32 qla2xxx_nexus_depth; 769 770 if (tcm_qla2xxx_parse_wwn(name, &wwnn, 1) < 0) 771 return ERR_PTR(-EINVAL); 772 773 se_nacl_new = tcm_qla2xxx_alloc_fabric_acl(se_tpg); 774 if (!se_nacl_new) 775 return ERR_PTR(-ENOMEM); 776 /* #warning FIXME: Hardcoded qla2xxx_nexus depth in tcm_qla2xxx_make_nodeacl */ 777 qla2xxx_nexus_depth = 1; 778 779 /* 780 * se_nacl_new may be released by core_tpg_add_initiator_node_acl() 781 * when converting a NodeACL from demo mode -> explict 782 */ 783 se_nacl = core_tpg_add_initiator_node_acl(se_tpg, se_nacl_new, 784 name, qla2xxx_nexus_depth); 785 if (IS_ERR(se_nacl)) { 786 tcm_qla2xxx_release_fabric_acl(se_tpg, se_nacl_new); 787 return se_nacl; 788 } 789 /* 790 * Locate our struct tcm_qla2xxx_nacl and set the FC Nport WWPN 791 */ 792 nacl = container_of(se_nacl, struct tcm_qla2xxx_nacl, se_node_acl); 793 nacl->nport_wwnn = wwnn; 794 tcm_qla2xxx_format_wwn(&nacl->nport_name[0], TCM_QLA2XXX_NAMELEN, wwnn); 795 796 return se_nacl; 797 } 798 799 static void tcm_qla2xxx_drop_nodeacl(struct se_node_acl *se_acl) 800 { 801 struct se_portal_group *se_tpg = se_acl->se_tpg; 802 struct tcm_qla2xxx_nacl *nacl = container_of(se_acl, 803 struct tcm_qla2xxx_nacl, se_node_acl); 804 805 core_tpg_del_initiator_node_acl(se_tpg, se_acl, 1); 806 kfree(nacl); 807 } 808 809 /* Start items for tcm_qla2xxx_tpg_attrib_cit */ 810 811 #define DEF_QLA_TPG_ATTRIB(name) \ 812 \ 813 static ssize_t tcm_qla2xxx_tpg_attrib_show_##name( \ 814 struct se_portal_group *se_tpg, \ 815 char *page) \ 816 { \ 817 struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg, \ 818 struct tcm_qla2xxx_tpg, se_tpg); \ 819 \ 820 return sprintf(page, "%u\n", tpg->tpg_attrib.name); \ 821 } \ 822 \ 823 static ssize_t tcm_qla2xxx_tpg_attrib_store_##name( \ 824 struct se_portal_group *se_tpg, \ 825 const char *page, \ 826 size_t count) \ 827 { \ 828 struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg, \ 829 struct tcm_qla2xxx_tpg, se_tpg); \ 830 unsigned long val; \ 831 int ret; \ 832 \ 833 ret = kstrtoul(page, 0, &val); \ 834 if (ret < 0) { \ 835 pr_err("kstrtoul() failed with" \ 836 " ret: %d\n", ret); \ 837 return -EINVAL; \ 838 } \ 839 ret = tcm_qla2xxx_set_attrib_##name(tpg, val); \ 840 \ 841 return (!ret) ? count : -EINVAL; \ 842 } 843 844 #define DEF_QLA_TPG_ATTR_BOOL(_name) \ 845 \ 846 static int tcm_qla2xxx_set_attrib_##_name( \ 847 struct tcm_qla2xxx_tpg *tpg, \ 848 unsigned long val) \ 849 { \ 850 struct tcm_qla2xxx_tpg_attrib *a = &tpg->tpg_attrib; \ 851 \ 852 if ((val != 0) && (val != 1)) { \ 853 pr_err("Illegal boolean value %lu\n", val); \ 854 return -EINVAL; \ 855 } \ 856 \ 857 a->_name = val; \ 858 return 0; \ 859 } 860 861 #define QLA_TPG_ATTR(_name, _mode) \ 862 TF_TPG_ATTRIB_ATTR(tcm_qla2xxx, _name, _mode); 863 864 /* 865 * Define tcm_qla2xxx_tpg_attrib_s_generate_node_acls 866 */ 867 DEF_QLA_TPG_ATTR_BOOL(generate_node_acls); 868 DEF_QLA_TPG_ATTRIB(generate_node_acls); 869 QLA_TPG_ATTR(generate_node_acls, S_IRUGO | S_IWUSR); 870 871 /* 872 Define tcm_qla2xxx_attrib_s_cache_dynamic_acls 873 */ 874 DEF_QLA_TPG_ATTR_BOOL(cache_dynamic_acls); 875 DEF_QLA_TPG_ATTRIB(cache_dynamic_acls); 876 QLA_TPG_ATTR(cache_dynamic_acls, S_IRUGO | S_IWUSR); 877 878 /* 879 * Define tcm_qla2xxx_tpg_attrib_s_demo_mode_write_protect 880 */ 881 DEF_QLA_TPG_ATTR_BOOL(demo_mode_write_protect); 882 DEF_QLA_TPG_ATTRIB(demo_mode_write_protect); 883 QLA_TPG_ATTR(demo_mode_write_protect, S_IRUGO | S_IWUSR); 884 885 /* 886 * Define tcm_qla2xxx_tpg_attrib_s_prod_mode_write_protect 887 */ 888 DEF_QLA_TPG_ATTR_BOOL(prod_mode_write_protect); 889 DEF_QLA_TPG_ATTRIB(prod_mode_write_protect); 890 QLA_TPG_ATTR(prod_mode_write_protect, S_IRUGO | S_IWUSR); 891 892 /* 893 * Define tcm_qla2xxx_tpg_attrib_s_demo_mode_login_only 894 */ 895 DEF_QLA_TPG_ATTR_BOOL(demo_mode_login_only); 896 DEF_QLA_TPG_ATTRIB(demo_mode_login_only); 897 QLA_TPG_ATTR(demo_mode_login_only, S_IRUGO | S_IWUSR); 898 899 static struct configfs_attribute *tcm_qla2xxx_tpg_attrib_attrs[] = { 900 &tcm_qla2xxx_tpg_attrib_generate_node_acls.attr, 901 &tcm_qla2xxx_tpg_attrib_cache_dynamic_acls.attr, 902 &tcm_qla2xxx_tpg_attrib_demo_mode_write_protect.attr, 903 &tcm_qla2xxx_tpg_attrib_prod_mode_write_protect.attr, 904 &tcm_qla2xxx_tpg_attrib_demo_mode_login_only.attr, 905 NULL, 906 }; 907 908 /* End items for tcm_qla2xxx_tpg_attrib_cit */ 909 910 static ssize_t tcm_qla2xxx_tpg_show_enable( 911 struct se_portal_group *se_tpg, 912 char *page) 913 { 914 struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg, 915 struct tcm_qla2xxx_tpg, se_tpg); 916 917 return snprintf(page, PAGE_SIZE, "%d\n", 918 atomic_read(&tpg->lport_tpg_enabled)); 919 } 920 921 static void tcm_qla2xxx_depend_tpg(struct work_struct *work) 922 { 923 struct tcm_qla2xxx_tpg *base_tpg = container_of(work, 924 struct tcm_qla2xxx_tpg, tpg_base_work); 925 struct se_portal_group *se_tpg = &base_tpg->se_tpg; 926 struct scsi_qla_host *base_vha = base_tpg->lport->qla_vha; 927 928 if (!configfs_depend_item(se_tpg->se_tpg_tfo->tf_subsys, 929 &se_tpg->tpg_group.cg_item)) { 930 atomic_set(&base_tpg->lport_tpg_enabled, 1); 931 qlt_enable_vha(base_vha); 932 } 933 complete(&base_tpg->tpg_base_comp); 934 } 935 936 static void tcm_qla2xxx_undepend_tpg(struct work_struct *work) 937 { 938 struct tcm_qla2xxx_tpg *base_tpg = container_of(work, 939 struct tcm_qla2xxx_tpg, tpg_base_work); 940 struct se_portal_group *se_tpg = &base_tpg->se_tpg; 941 struct scsi_qla_host *base_vha = base_tpg->lport->qla_vha; 942 943 if (!qlt_stop_phase1(base_vha->vha_tgt.qla_tgt)) { 944 atomic_set(&base_tpg->lport_tpg_enabled, 0); 945 configfs_undepend_item(se_tpg->se_tpg_tfo->tf_subsys, 946 &se_tpg->tpg_group.cg_item); 947 } 948 complete(&base_tpg->tpg_base_comp); 949 } 950 951 static ssize_t tcm_qla2xxx_tpg_store_enable( 952 struct se_portal_group *se_tpg, 953 const char *page, 954 size_t count) 955 { 956 struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg, 957 struct tcm_qla2xxx_tpg, se_tpg); 958 unsigned long op; 959 int rc; 960 961 rc = kstrtoul(page, 0, &op); 962 if (rc < 0) { 963 pr_err("kstrtoul() returned %d\n", rc); 964 return -EINVAL; 965 } 966 if ((op != 1) && (op != 0)) { 967 pr_err("Illegal value for tpg_enable: %lu\n", op); 968 return -EINVAL; 969 } 970 if (op) { 971 if (atomic_read(&tpg->lport_tpg_enabled)) 972 return -EEXIST; 973 974 INIT_WORK(&tpg->tpg_base_work, tcm_qla2xxx_depend_tpg); 975 } else { 976 if (!atomic_read(&tpg->lport_tpg_enabled)) 977 return count; 978 979 INIT_WORK(&tpg->tpg_base_work, tcm_qla2xxx_undepend_tpg); 980 } 981 init_completion(&tpg->tpg_base_comp); 982 schedule_work(&tpg->tpg_base_work); 983 wait_for_completion(&tpg->tpg_base_comp); 984 985 if (op) { 986 if (!atomic_read(&tpg->lport_tpg_enabled)) 987 return -ENODEV; 988 } else { 989 if (atomic_read(&tpg->lport_tpg_enabled)) 990 return -EPERM; 991 } 992 return count; 993 } 994 995 TF_TPG_BASE_ATTR(tcm_qla2xxx, enable, S_IRUGO | S_IWUSR); 996 997 static struct configfs_attribute *tcm_qla2xxx_tpg_attrs[] = { 998 &tcm_qla2xxx_tpg_enable.attr, 999 NULL, 1000 }; 1001 1002 static struct se_portal_group *tcm_qla2xxx_make_tpg( 1003 struct se_wwn *wwn, 1004 struct config_group *group, 1005 const char *name) 1006 { 1007 struct tcm_qla2xxx_lport *lport = container_of(wwn, 1008 struct tcm_qla2xxx_lport, lport_wwn); 1009 struct tcm_qla2xxx_tpg *tpg; 1010 unsigned long tpgt; 1011 int ret; 1012 1013 if (strstr(name, "tpgt_") != name) 1014 return ERR_PTR(-EINVAL); 1015 if (kstrtoul(name + 5, 10, &tpgt) || tpgt > USHRT_MAX) 1016 return ERR_PTR(-EINVAL); 1017 1018 if ((tpgt != 1)) { 1019 pr_err("In non NPIV mode, a single TPG=1 is used for HW port mappings\n"); 1020 return ERR_PTR(-ENOSYS); 1021 } 1022 1023 tpg = kzalloc(sizeof(struct tcm_qla2xxx_tpg), GFP_KERNEL); 1024 if (!tpg) { 1025 pr_err("Unable to allocate struct tcm_qla2xxx_tpg\n"); 1026 return ERR_PTR(-ENOMEM); 1027 } 1028 tpg->lport = lport; 1029 tpg->lport_tpgt = tpgt; 1030 /* 1031 * By default allow READ-ONLY TPG demo-mode access w/ cached dynamic 1032 * NodeACLs 1033 */ 1034 tpg->tpg_attrib.generate_node_acls = 1; 1035 tpg->tpg_attrib.demo_mode_write_protect = 1; 1036 tpg->tpg_attrib.cache_dynamic_acls = 1; 1037 tpg->tpg_attrib.demo_mode_login_only = 1; 1038 1039 ret = core_tpg_register(&tcm_qla2xxx_fabric_configfs->tf_ops, wwn, 1040 &tpg->se_tpg, tpg, TRANSPORT_TPG_TYPE_NORMAL); 1041 if (ret < 0) { 1042 kfree(tpg); 1043 return NULL; 1044 } 1045 1046 lport->tpg_1 = tpg; 1047 1048 return &tpg->se_tpg; 1049 } 1050 1051 static void tcm_qla2xxx_drop_tpg(struct se_portal_group *se_tpg) 1052 { 1053 struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg, 1054 struct tcm_qla2xxx_tpg, se_tpg); 1055 struct tcm_qla2xxx_lport *lport = tpg->lport; 1056 struct scsi_qla_host *vha = lport->qla_vha; 1057 /* 1058 * Call into qla2x_target.c LLD logic to shutdown the active 1059 * FC Nexuses and disable target mode operation for this qla_hw_data 1060 */ 1061 if (vha->vha_tgt.qla_tgt && !vha->vha_tgt.qla_tgt->tgt_stop) 1062 qlt_stop_phase1(vha->vha_tgt.qla_tgt); 1063 1064 core_tpg_deregister(se_tpg); 1065 /* 1066 * Clear local TPG=1 pointer for non NPIV mode. 1067 */ 1068 lport->tpg_1 = NULL; 1069 kfree(tpg); 1070 } 1071 1072 static ssize_t tcm_qla2xxx_npiv_tpg_show_enable( 1073 struct se_portal_group *se_tpg, 1074 char *page) 1075 { 1076 return tcm_qla2xxx_tpg_show_enable(se_tpg, page); 1077 } 1078 1079 static ssize_t tcm_qla2xxx_npiv_tpg_store_enable( 1080 struct se_portal_group *se_tpg, 1081 const char *page, 1082 size_t count) 1083 { 1084 struct se_wwn *se_wwn = se_tpg->se_tpg_wwn; 1085 struct tcm_qla2xxx_lport *lport = container_of(se_wwn, 1086 struct tcm_qla2xxx_lport, lport_wwn); 1087 struct scsi_qla_host *vha = lport->qla_vha; 1088 struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg, 1089 struct tcm_qla2xxx_tpg, se_tpg); 1090 unsigned long op; 1091 int rc; 1092 1093 rc = kstrtoul(page, 0, &op); 1094 if (rc < 0) { 1095 pr_err("kstrtoul() returned %d\n", rc); 1096 return -EINVAL; 1097 } 1098 if ((op != 1) && (op != 0)) { 1099 pr_err("Illegal value for tpg_enable: %lu\n", op); 1100 return -EINVAL; 1101 } 1102 if (op) { 1103 if (atomic_read(&tpg->lport_tpg_enabled)) 1104 return -EEXIST; 1105 1106 atomic_set(&tpg->lport_tpg_enabled, 1); 1107 qlt_enable_vha(vha); 1108 } else { 1109 if (!atomic_read(&tpg->lport_tpg_enabled)) 1110 return count; 1111 1112 atomic_set(&tpg->lport_tpg_enabled, 0); 1113 qlt_stop_phase1(vha->vha_tgt.qla_tgt); 1114 } 1115 1116 return count; 1117 } 1118 1119 TF_TPG_BASE_ATTR(tcm_qla2xxx_npiv, enable, S_IRUGO | S_IWUSR); 1120 1121 static struct configfs_attribute *tcm_qla2xxx_npiv_tpg_attrs[] = { 1122 &tcm_qla2xxx_npiv_tpg_enable.attr, 1123 NULL, 1124 }; 1125 1126 static struct se_portal_group *tcm_qla2xxx_npiv_make_tpg( 1127 struct se_wwn *wwn, 1128 struct config_group *group, 1129 const char *name) 1130 { 1131 struct tcm_qla2xxx_lport *lport = container_of(wwn, 1132 struct tcm_qla2xxx_lport, lport_wwn); 1133 struct tcm_qla2xxx_tpg *tpg; 1134 unsigned long tpgt; 1135 int ret; 1136 1137 if (strstr(name, "tpgt_") != name) 1138 return ERR_PTR(-EINVAL); 1139 if (kstrtoul(name + 5, 10, &tpgt) || tpgt > USHRT_MAX) 1140 return ERR_PTR(-EINVAL); 1141 1142 tpg = kzalloc(sizeof(struct tcm_qla2xxx_tpg), GFP_KERNEL); 1143 if (!tpg) { 1144 pr_err("Unable to allocate struct tcm_qla2xxx_tpg\n"); 1145 return ERR_PTR(-ENOMEM); 1146 } 1147 tpg->lport = lport; 1148 tpg->lport_tpgt = tpgt; 1149 1150 /* 1151 * By default allow READ-ONLY TPG demo-mode access w/ cached dynamic 1152 * NodeACLs 1153 */ 1154 tpg->tpg_attrib.generate_node_acls = 1; 1155 tpg->tpg_attrib.demo_mode_write_protect = 1; 1156 tpg->tpg_attrib.cache_dynamic_acls = 1; 1157 tpg->tpg_attrib.demo_mode_login_only = 1; 1158 1159 ret = core_tpg_register(&tcm_qla2xxx_npiv_fabric_configfs->tf_ops, wwn, 1160 &tpg->se_tpg, tpg, TRANSPORT_TPG_TYPE_NORMAL); 1161 if (ret < 0) { 1162 kfree(tpg); 1163 return NULL; 1164 } 1165 lport->tpg_1 = tpg; 1166 return &tpg->se_tpg; 1167 } 1168 1169 /* 1170 * Expected to be called with struct qla_hw_data->hardware_lock held 1171 */ 1172 static struct qla_tgt_sess *tcm_qla2xxx_find_sess_by_s_id( 1173 scsi_qla_host_t *vha, 1174 const uint8_t *s_id) 1175 { 1176 struct tcm_qla2xxx_lport *lport; 1177 struct se_node_acl *se_nacl; 1178 struct tcm_qla2xxx_nacl *nacl; 1179 u32 key; 1180 1181 lport = vha->vha_tgt.target_lport_ptr; 1182 if (!lport) { 1183 pr_err("Unable to locate struct tcm_qla2xxx_lport\n"); 1184 dump_stack(); 1185 return NULL; 1186 } 1187 1188 key = (((unsigned long)s_id[0] << 16) | 1189 ((unsigned long)s_id[1] << 8) | 1190 (unsigned long)s_id[2]); 1191 pr_debug("find_sess_by_s_id: 0x%06x\n", key); 1192 1193 se_nacl = btree_lookup32(&lport->lport_fcport_map, key); 1194 if (!se_nacl) { 1195 pr_debug("Unable to locate s_id: 0x%06x\n", key); 1196 return NULL; 1197 } 1198 pr_debug("find_sess_by_s_id: located se_nacl: %p, initiatorname: %s\n", 1199 se_nacl, se_nacl->initiatorname); 1200 1201 nacl = container_of(se_nacl, struct tcm_qla2xxx_nacl, se_node_acl); 1202 if (!nacl->qla_tgt_sess) { 1203 pr_err("Unable to locate struct qla_tgt_sess\n"); 1204 return NULL; 1205 } 1206 1207 return nacl->qla_tgt_sess; 1208 } 1209 1210 /* 1211 * Expected to be called with struct qla_hw_data->hardware_lock held 1212 */ 1213 static void tcm_qla2xxx_set_sess_by_s_id( 1214 struct tcm_qla2xxx_lport *lport, 1215 struct se_node_acl *new_se_nacl, 1216 struct tcm_qla2xxx_nacl *nacl, 1217 struct se_session *se_sess, 1218 struct qla_tgt_sess *qla_tgt_sess, 1219 uint8_t *s_id) 1220 { 1221 u32 key; 1222 void *slot; 1223 int rc; 1224 1225 key = (((unsigned long)s_id[0] << 16) | 1226 ((unsigned long)s_id[1] << 8) | 1227 (unsigned long)s_id[2]); 1228 pr_debug("set_sess_by_s_id: %06x\n", key); 1229 1230 slot = btree_lookup32(&lport->lport_fcport_map, key); 1231 if (!slot) { 1232 if (new_se_nacl) { 1233 pr_debug("Setting up new fc_port entry to new_se_nacl\n"); 1234 nacl->nport_id = key; 1235 rc = btree_insert32(&lport->lport_fcport_map, key, 1236 new_se_nacl, GFP_ATOMIC); 1237 if (rc) 1238 printk(KERN_ERR "Unable to insert s_id into fcport_map: %06x\n", 1239 (int)key); 1240 } else { 1241 pr_debug("Wiping nonexisting fc_port entry\n"); 1242 } 1243 1244 qla_tgt_sess->se_sess = se_sess; 1245 nacl->qla_tgt_sess = qla_tgt_sess; 1246 return; 1247 } 1248 1249 if (nacl->qla_tgt_sess) { 1250 if (new_se_nacl == NULL) { 1251 pr_debug("Clearing existing nacl->qla_tgt_sess and fc_port entry\n"); 1252 btree_remove32(&lport->lport_fcport_map, key); 1253 nacl->qla_tgt_sess = NULL; 1254 return; 1255 } 1256 pr_debug("Replacing existing nacl->qla_tgt_sess and fc_port entry\n"); 1257 btree_update32(&lport->lport_fcport_map, key, new_se_nacl); 1258 qla_tgt_sess->se_sess = se_sess; 1259 nacl->qla_tgt_sess = qla_tgt_sess; 1260 return; 1261 } 1262 1263 if (new_se_nacl == NULL) { 1264 pr_debug("Clearing existing fc_port entry\n"); 1265 btree_remove32(&lport->lport_fcport_map, key); 1266 return; 1267 } 1268 1269 pr_debug("Replacing existing fc_port entry w/o active nacl->qla_tgt_sess\n"); 1270 btree_update32(&lport->lport_fcport_map, key, new_se_nacl); 1271 qla_tgt_sess->se_sess = se_sess; 1272 nacl->qla_tgt_sess = qla_tgt_sess; 1273 1274 pr_debug("Setup nacl->qla_tgt_sess %p by s_id for se_nacl: %p, initiatorname: %s\n", 1275 nacl->qla_tgt_sess, new_se_nacl, new_se_nacl->initiatorname); 1276 } 1277 1278 /* 1279 * Expected to be called with struct qla_hw_data->hardware_lock held 1280 */ 1281 static struct qla_tgt_sess *tcm_qla2xxx_find_sess_by_loop_id( 1282 scsi_qla_host_t *vha, 1283 const uint16_t loop_id) 1284 { 1285 struct tcm_qla2xxx_lport *lport; 1286 struct se_node_acl *se_nacl; 1287 struct tcm_qla2xxx_nacl *nacl; 1288 struct tcm_qla2xxx_fc_loopid *fc_loopid; 1289 1290 lport = vha->vha_tgt.target_lport_ptr; 1291 if (!lport) { 1292 pr_err("Unable to locate struct tcm_qla2xxx_lport\n"); 1293 dump_stack(); 1294 return NULL; 1295 } 1296 1297 pr_debug("find_sess_by_loop_id: Using loop_id: 0x%04x\n", loop_id); 1298 1299 fc_loopid = lport->lport_loopid_map + loop_id; 1300 se_nacl = fc_loopid->se_nacl; 1301 if (!se_nacl) { 1302 pr_debug("Unable to locate se_nacl by loop_id: 0x%04x\n", 1303 loop_id); 1304 return NULL; 1305 } 1306 1307 nacl = container_of(se_nacl, struct tcm_qla2xxx_nacl, se_node_acl); 1308 1309 if (!nacl->qla_tgt_sess) { 1310 pr_err("Unable to locate struct qla_tgt_sess\n"); 1311 return NULL; 1312 } 1313 1314 return nacl->qla_tgt_sess; 1315 } 1316 1317 /* 1318 * Expected to be called with struct qla_hw_data->hardware_lock held 1319 */ 1320 static void tcm_qla2xxx_set_sess_by_loop_id( 1321 struct tcm_qla2xxx_lport *lport, 1322 struct se_node_acl *new_se_nacl, 1323 struct tcm_qla2xxx_nacl *nacl, 1324 struct se_session *se_sess, 1325 struct qla_tgt_sess *qla_tgt_sess, 1326 uint16_t loop_id) 1327 { 1328 struct se_node_acl *saved_nacl; 1329 struct tcm_qla2xxx_fc_loopid *fc_loopid; 1330 1331 pr_debug("set_sess_by_loop_id: Using loop_id: 0x%04x\n", loop_id); 1332 1333 fc_loopid = &((struct tcm_qla2xxx_fc_loopid *) 1334 lport->lport_loopid_map)[loop_id]; 1335 1336 saved_nacl = fc_loopid->se_nacl; 1337 if (!saved_nacl) { 1338 pr_debug("Setting up new fc_loopid->se_nacl to new_se_nacl\n"); 1339 fc_loopid->se_nacl = new_se_nacl; 1340 if (qla_tgt_sess->se_sess != se_sess) 1341 qla_tgt_sess->se_sess = se_sess; 1342 if (nacl->qla_tgt_sess != qla_tgt_sess) 1343 nacl->qla_tgt_sess = qla_tgt_sess; 1344 return; 1345 } 1346 1347 if (nacl->qla_tgt_sess) { 1348 if (new_se_nacl == NULL) { 1349 pr_debug("Clearing nacl->qla_tgt_sess and fc_loopid->se_nacl\n"); 1350 fc_loopid->se_nacl = NULL; 1351 nacl->qla_tgt_sess = NULL; 1352 return; 1353 } 1354 1355 pr_debug("Replacing existing nacl->qla_tgt_sess and fc_loopid->se_nacl\n"); 1356 fc_loopid->se_nacl = new_se_nacl; 1357 if (qla_tgt_sess->se_sess != se_sess) 1358 qla_tgt_sess->se_sess = se_sess; 1359 if (nacl->qla_tgt_sess != qla_tgt_sess) 1360 nacl->qla_tgt_sess = qla_tgt_sess; 1361 return; 1362 } 1363 1364 if (new_se_nacl == NULL) { 1365 pr_debug("Clearing fc_loopid->se_nacl\n"); 1366 fc_loopid->se_nacl = NULL; 1367 return; 1368 } 1369 1370 pr_debug("Replacing existing fc_loopid->se_nacl w/o active nacl->qla_tgt_sess\n"); 1371 fc_loopid->se_nacl = new_se_nacl; 1372 if (qla_tgt_sess->se_sess != se_sess) 1373 qla_tgt_sess->se_sess = se_sess; 1374 if (nacl->qla_tgt_sess != qla_tgt_sess) 1375 nacl->qla_tgt_sess = qla_tgt_sess; 1376 1377 pr_debug("Setup nacl->qla_tgt_sess %p by loop_id for se_nacl: %p, initiatorname: %s\n", 1378 nacl->qla_tgt_sess, new_se_nacl, new_se_nacl->initiatorname); 1379 } 1380 1381 /* 1382 * Should always be called with qla_hw_data->hardware_lock held. 1383 */ 1384 static void tcm_qla2xxx_clear_sess_lookup(struct tcm_qla2xxx_lport *lport, 1385 struct tcm_qla2xxx_nacl *nacl, struct qla_tgt_sess *sess) 1386 { 1387 struct se_session *se_sess = sess->se_sess; 1388 unsigned char be_sid[3]; 1389 1390 be_sid[0] = sess->s_id.b.domain; 1391 be_sid[1] = sess->s_id.b.area; 1392 be_sid[2] = sess->s_id.b.al_pa; 1393 1394 tcm_qla2xxx_set_sess_by_s_id(lport, NULL, nacl, se_sess, 1395 sess, be_sid); 1396 tcm_qla2xxx_set_sess_by_loop_id(lport, NULL, nacl, se_sess, 1397 sess, sess->loop_id); 1398 } 1399 1400 static void tcm_qla2xxx_free_session(struct qla_tgt_sess *sess) 1401 { 1402 struct qla_tgt *tgt = sess->tgt; 1403 struct qla_hw_data *ha = tgt->ha; 1404 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); 1405 struct se_session *se_sess; 1406 struct se_node_acl *se_nacl; 1407 struct tcm_qla2xxx_lport *lport; 1408 struct tcm_qla2xxx_nacl *nacl; 1409 1410 BUG_ON(in_interrupt()); 1411 1412 se_sess = sess->se_sess; 1413 if (!se_sess) { 1414 pr_err("struct qla_tgt_sess->se_sess is NULL\n"); 1415 dump_stack(); 1416 return; 1417 } 1418 se_nacl = se_sess->se_node_acl; 1419 nacl = container_of(se_nacl, struct tcm_qla2xxx_nacl, se_node_acl); 1420 1421 lport = vha->vha_tgt.target_lport_ptr; 1422 if (!lport) { 1423 pr_err("Unable to locate struct tcm_qla2xxx_lport\n"); 1424 dump_stack(); 1425 return; 1426 } 1427 target_wait_for_sess_cmds(se_sess); 1428 1429 transport_deregister_session_configfs(sess->se_sess); 1430 transport_deregister_session(sess->se_sess); 1431 } 1432 1433 /* 1434 * Called via qlt_create_sess():ha->qla2x_tmpl->check_initiator_node_acl() 1435 * to locate struct se_node_acl 1436 */ 1437 static int tcm_qla2xxx_check_initiator_node_acl( 1438 scsi_qla_host_t *vha, 1439 unsigned char *fc_wwpn, 1440 void *qla_tgt_sess, 1441 uint8_t *s_id, 1442 uint16_t loop_id) 1443 { 1444 struct qla_hw_data *ha = vha->hw; 1445 struct tcm_qla2xxx_lport *lport; 1446 struct tcm_qla2xxx_tpg *tpg; 1447 struct tcm_qla2xxx_nacl *nacl; 1448 struct se_portal_group *se_tpg; 1449 struct se_node_acl *se_nacl; 1450 struct se_session *se_sess; 1451 struct qla_tgt_sess *sess = qla_tgt_sess; 1452 unsigned char port_name[36]; 1453 unsigned long flags; 1454 1455 lport = vha->vha_tgt.target_lport_ptr; 1456 if (!lport) { 1457 pr_err("Unable to locate struct tcm_qla2xxx_lport\n"); 1458 dump_stack(); 1459 return -EINVAL; 1460 } 1461 /* 1462 * Locate the TPG=1 reference.. 1463 */ 1464 tpg = lport->tpg_1; 1465 if (!tpg) { 1466 pr_err("Unable to lcoate struct tcm_qla2xxx_lport->tpg_1\n"); 1467 return -EINVAL; 1468 } 1469 se_tpg = &tpg->se_tpg; 1470 1471 se_sess = transport_init_session(); 1472 if (IS_ERR(se_sess)) { 1473 pr_err("Unable to initialize struct se_session\n"); 1474 return PTR_ERR(se_sess); 1475 } 1476 /* 1477 * Format the FCP Initiator port_name into colon seperated values to 1478 * match the format by tcm_qla2xxx explict ConfigFS NodeACLs. 1479 */ 1480 memset(&port_name, 0, 36); 1481 snprintf(port_name, 36, "%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x", 1482 fc_wwpn[0], fc_wwpn[1], fc_wwpn[2], fc_wwpn[3], fc_wwpn[4], 1483 fc_wwpn[5], fc_wwpn[6], fc_wwpn[7]); 1484 /* 1485 * Locate our struct se_node_acl either from an explict NodeACL created 1486 * via ConfigFS, or via running in TPG demo mode. 1487 */ 1488 se_sess->se_node_acl = core_tpg_check_initiator_node_acl(se_tpg, 1489 port_name); 1490 if (!se_sess->se_node_acl) { 1491 transport_free_session(se_sess); 1492 return -EINVAL; 1493 } 1494 se_nacl = se_sess->se_node_acl; 1495 nacl = container_of(se_nacl, struct tcm_qla2xxx_nacl, se_node_acl); 1496 /* 1497 * And now setup the new se_nacl and session pointers into our HW lport 1498 * mappings for fabric S_ID and LOOP_ID. 1499 */ 1500 spin_lock_irqsave(&ha->hardware_lock, flags); 1501 tcm_qla2xxx_set_sess_by_s_id(lport, se_nacl, nacl, se_sess, 1502 qla_tgt_sess, s_id); 1503 tcm_qla2xxx_set_sess_by_loop_id(lport, se_nacl, nacl, se_sess, 1504 qla_tgt_sess, loop_id); 1505 spin_unlock_irqrestore(&ha->hardware_lock, flags); 1506 /* 1507 * Finally register the new FC Nexus with TCM 1508 */ 1509 __transport_register_session(se_nacl->se_tpg, se_nacl, se_sess, sess); 1510 1511 return 0; 1512 } 1513 1514 static void tcm_qla2xxx_update_sess(struct qla_tgt_sess *sess, port_id_t s_id, 1515 uint16_t loop_id, bool conf_compl_supported) 1516 { 1517 struct qla_tgt *tgt = sess->tgt; 1518 struct qla_hw_data *ha = tgt->ha; 1519 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); 1520 struct tcm_qla2xxx_lport *lport = vha->vha_tgt.target_lport_ptr; 1521 struct se_node_acl *se_nacl = sess->se_sess->se_node_acl; 1522 struct tcm_qla2xxx_nacl *nacl = container_of(se_nacl, 1523 struct tcm_qla2xxx_nacl, se_node_acl); 1524 u32 key; 1525 1526 1527 if (sess->loop_id != loop_id || sess->s_id.b24 != s_id.b24) 1528 pr_info("Updating session %p from port %8phC loop_id %d -> %d s_id %x:%x:%x -> %x:%x:%x\n", 1529 sess, sess->port_name, 1530 sess->loop_id, loop_id, sess->s_id.b.domain, 1531 sess->s_id.b.area, sess->s_id.b.al_pa, s_id.b.domain, 1532 s_id.b.area, s_id.b.al_pa); 1533 1534 if (sess->loop_id != loop_id) { 1535 /* 1536 * Because we can shuffle loop IDs around and we 1537 * update different sessions non-atomically, we might 1538 * have overwritten this session's old loop ID 1539 * already, and we might end up overwriting some other 1540 * session that will be updated later. So we have to 1541 * be extra careful and we can't warn about those things... 1542 */ 1543 if (lport->lport_loopid_map[sess->loop_id].se_nacl == se_nacl) 1544 lport->lport_loopid_map[sess->loop_id].se_nacl = NULL; 1545 1546 lport->lport_loopid_map[loop_id].se_nacl = se_nacl; 1547 1548 sess->loop_id = loop_id; 1549 } 1550 1551 if (sess->s_id.b24 != s_id.b24) { 1552 key = (((u32) sess->s_id.b.domain << 16) | 1553 ((u32) sess->s_id.b.area << 8) | 1554 ((u32) sess->s_id.b.al_pa)); 1555 1556 if (btree_lookup32(&lport->lport_fcport_map, key)) 1557 WARN(btree_remove32(&lport->lport_fcport_map, key) != se_nacl, 1558 "Found wrong se_nacl when updating s_id %x:%x:%x\n", 1559 sess->s_id.b.domain, sess->s_id.b.area, sess->s_id.b.al_pa); 1560 else 1561 WARN(1, "No lport_fcport_map entry for s_id %x:%x:%x\n", 1562 sess->s_id.b.domain, sess->s_id.b.area, sess->s_id.b.al_pa); 1563 1564 key = (((u32) s_id.b.domain << 16) | 1565 ((u32) s_id.b.area << 8) | 1566 ((u32) s_id.b.al_pa)); 1567 1568 if (btree_lookup32(&lport->lport_fcport_map, key)) { 1569 WARN(1, "Already have lport_fcport_map entry for s_id %x:%x:%x\n", 1570 s_id.b.domain, s_id.b.area, s_id.b.al_pa); 1571 btree_update32(&lport->lport_fcport_map, key, se_nacl); 1572 } else { 1573 btree_insert32(&lport->lport_fcport_map, key, se_nacl, GFP_ATOMIC); 1574 } 1575 1576 sess->s_id = s_id; 1577 nacl->nport_id = key; 1578 } 1579 1580 sess->conf_compl_supported = conf_compl_supported; 1581 } 1582 1583 /* 1584 * Calls into tcm_qla2xxx used by qla2xxx LLD I/O path. 1585 */ 1586 static struct qla_tgt_func_tmpl tcm_qla2xxx_template = { 1587 .handle_cmd = tcm_qla2xxx_handle_cmd, 1588 .handle_data = tcm_qla2xxx_handle_data, 1589 .handle_tmr = tcm_qla2xxx_handle_tmr, 1590 .free_cmd = tcm_qla2xxx_free_cmd, 1591 .free_mcmd = tcm_qla2xxx_free_mcmd, 1592 .free_session = tcm_qla2xxx_free_session, 1593 .update_sess = tcm_qla2xxx_update_sess, 1594 .check_initiator_node_acl = tcm_qla2xxx_check_initiator_node_acl, 1595 .find_sess_by_s_id = tcm_qla2xxx_find_sess_by_s_id, 1596 .find_sess_by_loop_id = tcm_qla2xxx_find_sess_by_loop_id, 1597 .clear_nacl_from_fcport_map = tcm_qla2xxx_clear_nacl_from_fcport_map, 1598 .put_sess = tcm_qla2xxx_put_sess, 1599 .shutdown_sess = tcm_qla2xxx_shutdown_sess, 1600 }; 1601 1602 static int tcm_qla2xxx_init_lport(struct tcm_qla2xxx_lport *lport) 1603 { 1604 int rc; 1605 1606 rc = btree_init32(&lport->lport_fcport_map); 1607 if (rc) { 1608 pr_err("Unable to initialize lport->lport_fcport_map btree\n"); 1609 return rc; 1610 } 1611 1612 lport->lport_loopid_map = vmalloc(sizeof(struct tcm_qla2xxx_fc_loopid) * 1613 65536); 1614 if (!lport->lport_loopid_map) { 1615 pr_err("Unable to allocate lport->lport_loopid_map of %zu bytes\n", 1616 sizeof(struct tcm_qla2xxx_fc_loopid) * 65536); 1617 btree_destroy32(&lport->lport_fcport_map); 1618 return -ENOMEM; 1619 } 1620 memset(lport->lport_loopid_map, 0, sizeof(struct tcm_qla2xxx_fc_loopid) 1621 * 65536); 1622 pr_debug("qla2xxx: Allocated lport_loopid_map of %zu bytes\n", 1623 sizeof(struct tcm_qla2xxx_fc_loopid) * 65536); 1624 return 0; 1625 } 1626 1627 static int tcm_qla2xxx_lport_register_cb(struct scsi_qla_host *vha, 1628 void *target_lport_ptr, 1629 u64 npiv_wwpn, u64 npiv_wwnn) 1630 { 1631 struct qla_hw_data *ha = vha->hw; 1632 struct tcm_qla2xxx_lport *lport = 1633 (struct tcm_qla2xxx_lport *)target_lport_ptr; 1634 /* 1635 * Setup tgt_ops, local pointer to vha and target_lport_ptr 1636 */ 1637 ha->tgt.tgt_ops = &tcm_qla2xxx_template; 1638 vha->vha_tgt.target_lport_ptr = target_lport_ptr; 1639 lport->qla_vha = vha; 1640 1641 return 0; 1642 } 1643 1644 static struct se_wwn *tcm_qla2xxx_make_lport( 1645 struct target_fabric_configfs *tf, 1646 struct config_group *group, 1647 const char *name) 1648 { 1649 struct tcm_qla2xxx_lport *lport; 1650 u64 wwpn; 1651 int ret = -ENODEV; 1652 1653 if (tcm_qla2xxx_parse_wwn(name, &wwpn, 1) < 0) 1654 return ERR_PTR(-EINVAL); 1655 1656 lport = kzalloc(sizeof(struct tcm_qla2xxx_lport), GFP_KERNEL); 1657 if (!lport) { 1658 pr_err("Unable to allocate struct tcm_qla2xxx_lport\n"); 1659 return ERR_PTR(-ENOMEM); 1660 } 1661 lport->lport_wwpn = wwpn; 1662 tcm_qla2xxx_format_wwn(&lport->lport_name[0], TCM_QLA2XXX_NAMELEN, 1663 wwpn); 1664 sprintf(lport->lport_naa_name, "naa.%016llx", (unsigned long long) wwpn); 1665 1666 ret = tcm_qla2xxx_init_lport(lport); 1667 if (ret != 0) 1668 goto out; 1669 1670 ret = qlt_lport_register(lport, wwpn, 0, 0, 1671 tcm_qla2xxx_lport_register_cb); 1672 if (ret != 0) 1673 goto out_lport; 1674 1675 return &lport->lport_wwn; 1676 out_lport: 1677 vfree(lport->lport_loopid_map); 1678 btree_destroy32(&lport->lport_fcport_map); 1679 out: 1680 kfree(lport); 1681 return ERR_PTR(ret); 1682 } 1683 1684 static void tcm_qla2xxx_drop_lport(struct se_wwn *wwn) 1685 { 1686 struct tcm_qla2xxx_lport *lport = container_of(wwn, 1687 struct tcm_qla2xxx_lport, lport_wwn); 1688 struct scsi_qla_host *vha = lport->qla_vha; 1689 struct se_node_acl *node; 1690 u32 key = 0; 1691 1692 /* 1693 * Call into qla2x_target.c LLD logic to complete the 1694 * shutdown of struct qla_tgt after the call to 1695 * qlt_stop_phase1() from tcm_qla2xxx_drop_tpg() above.. 1696 */ 1697 if (vha->vha_tgt.qla_tgt && !vha->vha_tgt.qla_tgt->tgt_stopped) 1698 qlt_stop_phase2(vha->vha_tgt.qla_tgt); 1699 1700 qlt_lport_deregister(vha); 1701 1702 vfree(lport->lport_loopid_map); 1703 btree_for_each_safe32(&lport->lport_fcport_map, key, node) 1704 btree_remove32(&lport->lport_fcport_map, key); 1705 btree_destroy32(&lport->lport_fcport_map); 1706 kfree(lport); 1707 } 1708 1709 static int tcm_qla2xxx_lport_register_npiv_cb(struct scsi_qla_host *base_vha, 1710 void *target_lport_ptr, 1711 u64 npiv_wwpn, u64 npiv_wwnn) 1712 { 1713 struct fc_vport *vport; 1714 struct Scsi_Host *sh = base_vha->host; 1715 struct scsi_qla_host *npiv_vha; 1716 struct tcm_qla2xxx_lport *lport = 1717 (struct tcm_qla2xxx_lport *)target_lport_ptr; 1718 struct tcm_qla2xxx_lport *base_lport = 1719 (struct tcm_qla2xxx_lport *)base_vha->vha_tgt.target_lport_ptr; 1720 struct tcm_qla2xxx_tpg *base_tpg; 1721 struct fc_vport_identifiers vport_id; 1722 1723 if (!qla_tgt_mode_enabled(base_vha)) { 1724 pr_err("qla2xxx base_vha not enabled for target mode\n"); 1725 return -EPERM; 1726 } 1727 1728 if (!base_lport || !base_lport->tpg_1 || 1729 !atomic_read(&base_lport->tpg_1->lport_tpg_enabled)) { 1730 pr_err("qla2xxx base_lport or tpg_1 not available\n"); 1731 return -EPERM; 1732 } 1733 base_tpg = base_lport->tpg_1; 1734 1735 memset(&vport_id, 0, sizeof(vport_id)); 1736 vport_id.port_name = npiv_wwpn; 1737 vport_id.node_name = npiv_wwnn; 1738 vport_id.roles = FC_PORT_ROLE_FCP_INITIATOR; 1739 vport_id.vport_type = FC_PORTTYPE_NPIV; 1740 vport_id.disable = false; 1741 1742 vport = fc_vport_create(sh, 0, &vport_id); 1743 if (!vport) { 1744 pr_err("fc_vport_create failed for qla2xxx_npiv\n"); 1745 return -ENODEV; 1746 } 1747 /* 1748 * Setup local pointer to NPIV vhba + target_lport_ptr 1749 */ 1750 npiv_vha = (struct scsi_qla_host *)vport->dd_data; 1751 npiv_vha->vha_tgt.target_lport_ptr = target_lport_ptr; 1752 lport->qla_vha = npiv_vha; 1753 scsi_host_get(npiv_vha->host); 1754 return 0; 1755 } 1756 1757 1758 static struct se_wwn *tcm_qla2xxx_npiv_make_lport( 1759 struct target_fabric_configfs *tf, 1760 struct config_group *group, 1761 const char *name) 1762 { 1763 struct tcm_qla2xxx_lport *lport; 1764 u64 phys_wwpn, npiv_wwpn, npiv_wwnn; 1765 char *p, tmp[128]; 1766 int ret; 1767 1768 snprintf(tmp, 128, "%s", name); 1769 1770 p = strchr(tmp, '@'); 1771 if (!p) { 1772 pr_err("Unable to locate NPIV '@' seperator\n"); 1773 return ERR_PTR(-EINVAL); 1774 } 1775 *p++ = '\0'; 1776 1777 if (tcm_qla2xxx_parse_wwn(tmp, &phys_wwpn, 1) < 0) 1778 return ERR_PTR(-EINVAL); 1779 1780 if (tcm_qla2xxx_npiv_parse_wwn(p, strlen(p)+1, 1781 &npiv_wwpn, &npiv_wwnn) < 0) 1782 return ERR_PTR(-EINVAL); 1783 1784 lport = kzalloc(sizeof(struct tcm_qla2xxx_lport), GFP_KERNEL); 1785 if (!lport) { 1786 pr_err("Unable to allocate struct tcm_qla2xxx_lport for NPIV\n"); 1787 return ERR_PTR(-ENOMEM); 1788 } 1789 lport->lport_npiv_wwpn = npiv_wwpn; 1790 lport->lport_npiv_wwnn = npiv_wwnn; 1791 sprintf(lport->lport_naa_name, "naa.%016llx", (unsigned long long) npiv_wwpn); 1792 1793 ret = tcm_qla2xxx_init_lport(lport); 1794 if (ret != 0) 1795 goto out; 1796 1797 ret = qlt_lport_register(lport, phys_wwpn, npiv_wwpn, npiv_wwnn, 1798 tcm_qla2xxx_lport_register_npiv_cb); 1799 if (ret != 0) 1800 goto out_lport; 1801 1802 return &lport->lport_wwn; 1803 out_lport: 1804 vfree(lport->lport_loopid_map); 1805 btree_destroy32(&lport->lport_fcport_map); 1806 out: 1807 kfree(lport); 1808 return ERR_PTR(ret); 1809 } 1810 1811 static void tcm_qla2xxx_npiv_drop_lport(struct se_wwn *wwn) 1812 { 1813 struct tcm_qla2xxx_lport *lport = container_of(wwn, 1814 struct tcm_qla2xxx_lport, lport_wwn); 1815 struct scsi_qla_host *npiv_vha = lport->qla_vha; 1816 struct qla_hw_data *ha = npiv_vha->hw; 1817 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev); 1818 1819 scsi_host_put(npiv_vha->host); 1820 /* 1821 * Notify libfc that we want to release the vha->fc_vport 1822 */ 1823 fc_vport_terminate(npiv_vha->fc_vport); 1824 scsi_host_put(base_vha->host); 1825 kfree(lport); 1826 } 1827 1828 1829 static ssize_t tcm_qla2xxx_wwn_show_attr_version( 1830 struct target_fabric_configfs *tf, 1831 char *page) 1832 { 1833 return sprintf(page, 1834 "TCM QLOGIC QLA2XXX NPIV capable fabric module %s on %s/%s on " 1835 UTS_RELEASE"\n", TCM_QLA2XXX_VERSION, utsname()->sysname, 1836 utsname()->machine); 1837 } 1838 1839 TF_WWN_ATTR_RO(tcm_qla2xxx, version); 1840 1841 static struct configfs_attribute *tcm_qla2xxx_wwn_attrs[] = { 1842 &tcm_qla2xxx_wwn_version.attr, 1843 NULL, 1844 }; 1845 1846 static struct target_core_fabric_ops tcm_qla2xxx_ops = { 1847 .get_fabric_name = tcm_qla2xxx_get_fabric_name, 1848 .get_fabric_proto_ident = tcm_qla2xxx_get_fabric_proto_ident, 1849 .tpg_get_wwn = tcm_qla2xxx_get_fabric_wwn, 1850 .tpg_get_tag = tcm_qla2xxx_get_tag, 1851 .tpg_get_default_depth = tcm_qla2xxx_get_default_depth, 1852 .tpg_get_pr_transport_id = tcm_qla2xxx_get_pr_transport_id, 1853 .tpg_get_pr_transport_id_len = tcm_qla2xxx_get_pr_transport_id_len, 1854 .tpg_parse_pr_out_transport_id = tcm_qla2xxx_parse_pr_out_transport_id, 1855 .tpg_check_demo_mode = tcm_qla2xxx_check_demo_mode, 1856 .tpg_check_demo_mode_cache = tcm_qla2xxx_check_demo_mode_cache, 1857 .tpg_check_demo_mode_write_protect = 1858 tcm_qla2xxx_check_demo_write_protect, 1859 .tpg_check_prod_mode_write_protect = 1860 tcm_qla2xxx_check_prod_write_protect, 1861 .tpg_check_demo_mode_login_only = tcm_qla2xxx_check_demo_mode_login_only, 1862 .tpg_alloc_fabric_acl = tcm_qla2xxx_alloc_fabric_acl, 1863 .tpg_release_fabric_acl = tcm_qla2xxx_release_fabric_acl, 1864 .tpg_get_inst_index = tcm_qla2xxx_tpg_get_inst_index, 1865 .check_stop_free = tcm_qla2xxx_check_stop_free, 1866 .release_cmd = tcm_qla2xxx_release_cmd, 1867 .put_session = tcm_qla2xxx_put_session, 1868 .shutdown_session = tcm_qla2xxx_shutdown_session, 1869 .close_session = tcm_qla2xxx_close_session, 1870 .sess_get_index = tcm_qla2xxx_sess_get_index, 1871 .sess_get_initiator_sid = NULL, 1872 .write_pending = tcm_qla2xxx_write_pending, 1873 .write_pending_status = tcm_qla2xxx_write_pending_status, 1874 .set_default_node_attributes = tcm_qla2xxx_set_default_node_attrs, 1875 .get_task_tag = tcm_qla2xxx_get_task_tag, 1876 .get_cmd_state = tcm_qla2xxx_get_cmd_state, 1877 .queue_data_in = tcm_qla2xxx_queue_data_in, 1878 .queue_status = tcm_qla2xxx_queue_status, 1879 .queue_tm_rsp = tcm_qla2xxx_queue_tm_rsp, 1880 /* 1881 * Setup function pointers for generic logic in 1882 * target_core_fabric_configfs.c 1883 */ 1884 .fabric_make_wwn = tcm_qla2xxx_make_lport, 1885 .fabric_drop_wwn = tcm_qla2xxx_drop_lport, 1886 .fabric_make_tpg = tcm_qla2xxx_make_tpg, 1887 .fabric_drop_tpg = tcm_qla2xxx_drop_tpg, 1888 .fabric_post_link = NULL, 1889 .fabric_pre_unlink = NULL, 1890 .fabric_make_np = NULL, 1891 .fabric_drop_np = NULL, 1892 .fabric_make_nodeacl = tcm_qla2xxx_make_nodeacl, 1893 .fabric_drop_nodeacl = tcm_qla2xxx_drop_nodeacl, 1894 }; 1895 1896 static struct target_core_fabric_ops tcm_qla2xxx_npiv_ops = { 1897 .get_fabric_name = tcm_qla2xxx_npiv_get_fabric_name, 1898 .get_fabric_proto_ident = tcm_qla2xxx_get_fabric_proto_ident, 1899 .tpg_get_wwn = tcm_qla2xxx_get_fabric_wwn, 1900 .tpg_get_tag = tcm_qla2xxx_get_tag, 1901 .tpg_get_default_depth = tcm_qla2xxx_get_default_depth, 1902 .tpg_get_pr_transport_id = tcm_qla2xxx_get_pr_transport_id, 1903 .tpg_get_pr_transport_id_len = tcm_qla2xxx_get_pr_transport_id_len, 1904 .tpg_parse_pr_out_transport_id = tcm_qla2xxx_parse_pr_out_transport_id, 1905 .tpg_check_demo_mode = tcm_qla2xxx_check_demo_mode, 1906 .tpg_check_demo_mode_cache = tcm_qla2xxx_check_demo_mode_cache, 1907 .tpg_check_demo_mode_write_protect = tcm_qla2xxx_check_demo_mode, 1908 .tpg_check_prod_mode_write_protect = 1909 tcm_qla2xxx_check_prod_write_protect, 1910 .tpg_check_demo_mode_login_only = tcm_qla2xxx_check_demo_mode_login_only, 1911 .tpg_alloc_fabric_acl = tcm_qla2xxx_alloc_fabric_acl, 1912 .tpg_release_fabric_acl = tcm_qla2xxx_release_fabric_acl, 1913 .tpg_get_inst_index = tcm_qla2xxx_tpg_get_inst_index, 1914 .check_stop_free = tcm_qla2xxx_check_stop_free, 1915 .release_cmd = tcm_qla2xxx_release_cmd, 1916 .put_session = tcm_qla2xxx_put_session, 1917 .shutdown_session = tcm_qla2xxx_shutdown_session, 1918 .close_session = tcm_qla2xxx_close_session, 1919 .sess_get_index = tcm_qla2xxx_sess_get_index, 1920 .sess_get_initiator_sid = NULL, 1921 .write_pending = tcm_qla2xxx_write_pending, 1922 .write_pending_status = tcm_qla2xxx_write_pending_status, 1923 .set_default_node_attributes = tcm_qla2xxx_set_default_node_attrs, 1924 .get_task_tag = tcm_qla2xxx_get_task_tag, 1925 .get_cmd_state = tcm_qla2xxx_get_cmd_state, 1926 .queue_data_in = tcm_qla2xxx_queue_data_in, 1927 .queue_status = tcm_qla2xxx_queue_status, 1928 .queue_tm_rsp = tcm_qla2xxx_queue_tm_rsp, 1929 /* 1930 * Setup function pointers for generic logic in 1931 * target_core_fabric_configfs.c 1932 */ 1933 .fabric_make_wwn = tcm_qla2xxx_npiv_make_lport, 1934 .fabric_drop_wwn = tcm_qla2xxx_npiv_drop_lport, 1935 .fabric_make_tpg = tcm_qla2xxx_npiv_make_tpg, 1936 .fabric_drop_tpg = tcm_qla2xxx_drop_tpg, 1937 .fabric_post_link = NULL, 1938 .fabric_pre_unlink = NULL, 1939 .fabric_make_np = NULL, 1940 .fabric_drop_np = NULL, 1941 .fabric_make_nodeacl = tcm_qla2xxx_make_nodeacl, 1942 .fabric_drop_nodeacl = tcm_qla2xxx_drop_nodeacl, 1943 }; 1944 1945 static int tcm_qla2xxx_register_configfs(void) 1946 { 1947 struct target_fabric_configfs *fabric, *npiv_fabric; 1948 int ret; 1949 1950 pr_debug("TCM QLOGIC QLA2XXX fabric module %s on %s/%s on " 1951 UTS_RELEASE"\n", TCM_QLA2XXX_VERSION, utsname()->sysname, 1952 utsname()->machine); 1953 /* 1954 * Register the top level struct config_item_type with TCM core 1955 */ 1956 fabric = target_fabric_configfs_init(THIS_MODULE, "qla2xxx"); 1957 if (IS_ERR(fabric)) { 1958 pr_err("target_fabric_configfs_init() failed\n"); 1959 return PTR_ERR(fabric); 1960 } 1961 /* 1962 * Setup fabric->tf_ops from our local tcm_qla2xxx_ops 1963 */ 1964 fabric->tf_ops = tcm_qla2xxx_ops; 1965 /* 1966 * Setup default attribute lists for various fabric->tf_cit_tmpl 1967 */ 1968 fabric->tf_cit_tmpl.tfc_wwn_cit.ct_attrs = tcm_qla2xxx_wwn_attrs; 1969 fabric->tf_cit_tmpl.tfc_tpg_base_cit.ct_attrs = tcm_qla2xxx_tpg_attrs; 1970 fabric->tf_cit_tmpl.tfc_tpg_attrib_cit.ct_attrs = 1971 tcm_qla2xxx_tpg_attrib_attrs; 1972 fabric->tf_cit_tmpl.tfc_tpg_param_cit.ct_attrs = NULL; 1973 fabric->tf_cit_tmpl.tfc_tpg_np_base_cit.ct_attrs = NULL; 1974 fabric->tf_cit_tmpl.tfc_tpg_nacl_base_cit.ct_attrs = NULL; 1975 fabric->tf_cit_tmpl.tfc_tpg_nacl_attrib_cit.ct_attrs = NULL; 1976 fabric->tf_cit_tmpl.tfc_tpg_nacl_auth_cit.ct_attrs = NULL; 1977 fabric->tf_cit_tmpl.tfc_tpg_nacl_param_cit.ct_attrs = NULL; 1978 /* 1979 * Register the fabric for use within TCM 1980 */ 1981 ret = target_fabric_configfs_register(fabric); 1982 if (ret < 0) { 1983 pr_err("target_fabric_configfs_register() failed for TCM_QLA2XXX\n"); 1984 return ret; 1985 } 1986 /* 1987 * Setup our local pointer to *fabric 1988 */ 1989 tcm_qla2xxx_fabric_configfs = fabric; 1990 pr_debug("TCM_QLA2XXX[0] - Set fabric -> tcm_qla2xxx_fabric_configfs\n"); 1991 1992 /* 1993 * Register the top level struct config_item_type for NPIV with TCM core 1994 */ 1995 npiv_fabric = target_fabric_configfs_init(THIS_MODULE, "qla2xxx_npiv"); 1996 if (IS_ERR(npiv_fabric)) { 1997 pr_err("target_fabric_configfs_init() failed\n"); 1998 ret = PTR_ERR(npiv_fabric); 1999 goto out_fabric; 2000 } 2001 /* 2002 * Setup fabric->tf_ops from our local tcm_qla2xxx_npiv_ops 2003 */ 2004 npiv_fabric->tf_ops = tcm_qla2xxx_npiv_ops; 2005 /* 2006 * Setup default attribute lists for various npiv_fabric->tf_cit_tmpl 2007 */ 2008 npiv_fabric->tf_cit_tmpl.tfc_wwn_cit.ct_attrs = tcm_qla2xxx_wwn_attrs; 2009 npiv_fabric->tf_cit_tmpl.tfc_tpg_base_cit.ct_attrs = 2010 tcm_qla2xxx_npiv_tpg_attrs; 2011 npiv_fabric->tf_cit_tmpl.tfc_tpg_attrib_cit.ct_attrs = NULL; 2012 npiv_fabric->tf_cit_tmpl.tfc_tpg_param_cit.ct_attrs = NULL; 2013 npiv_fabric->tf_cit_tmpl.tfc_tpg_np_base_cit.ct_attrs = NULL; 2014 npiv_fabric->tf_cit_tmpl.tfc_tpg_nacl_base_cit.ct_attrs = NULL; 2015 npiv_fabric->tf_cit_tmpl.tfc_tpg_nacl_attrib_cit.ct_attrs = NULL; 2016 npiv_fabric->tf_cit_tmpl.tfc_tpg_nacl_auth_cit.ct_attrs = NULL; 2017 npiv_fabric->tf_cit_tmpl.tfc_tpg_nacl_param_cit.ct_attrs = NULL; 2018 /* 2019 * Register the npiv_fabric for use within TCM 2020 */ 2021 ret = target_fabric_configfs_register(npiv_fabric); 2022 if (ret < 0) { 2023 pr_err("target_fabric_configfs_register() failed for TCM_QLA2XXX\n"); 2024 goto out_fabric; 2025 } 2026 /* 2027 * Setup our local pointer to *npiv_fabric 2028 */ 2029 tcm_qla2xxx_npiv_fabric_configfs = npiv_fabric; 2030 pr_debug("TCM_QLA2XXX[0] - Set fabric -> tcm_qla2xxx_npiv_fabric_configfs\n"); 2031 2032 tcm_qla2xxx_free_wq = alloc_workqueue("tcm_qla2xxx_free", 2033 WQ_MEM_RECLAIM, 0); 2034 if (!tcm_qla2xxx_free_wq) { 2035 ret = -ENOMEM; 2036 goto out_fabric_npiv; 2037 } 2038 2039 tcm_qla2xxx_cmd_wq = alloc_workqueue("tcm_qla2xxx_cmd", 0, 0); 2040 if (!tcm_qla2xxx_cmd_wq) { 2041 ret = -ENOMEM; 2042 goto out_free_wq; 2043 } 2044 2045 return 0; 2046 2047 out_free_wq: 2048 destroy_workqueue(tcm_qla2xxx_free_wq); 2049 out_fabric_npiv: 2050 target_fabric_configfs_deregister(tcm_qla2xxx_npiv_fabric_configfs); 2051 out_fabric: 2052 target_fabric_configfs_deregister(tcm_qla2xxx_fabric_configfs); 2053 return ret; 2054 } 2055 2056 static void tcm_qla2xxx_deregister_configfs(void) 2057 { 2058 destroy_workqueue(tcm_qla2xxx_cmd_wq); 2059 destroy_workqueue(tcm_qla2xxx_free_wq); 2060 2061 target_fabric_configfs_deregister(tcm_qla2xxx_fabric_configfs); 2062 tcm_qla2xxx_fabric_configfs = NULL; 2063 pr_debug("TCM_QLA2XXX[0] - Cleared tcm_qla2xxx_fabric_configfs\n"); 2064 2065 target_fabric_configfs_deregister(tcm_qla2xxx_npiv_fabric_configfs); 2066 tcm_qla2xxx_npiv_fabric_configfs = NULL; 2067 pr_debug("TCM_QLA2XXX[0] - Cleared tcm_qla2xxx_npiv_fabric_configfs\n"); 2068 } 2069 2070 static int __init tcm_qla2xxx_init(void) 2071 { 2072 int ret; 2073 2074 ret = tcm_qla2xxx_register_configfs(); 2075 if (ret < 0) 2076 return ret; 2077 2078 return 0; 2079 } 2080 2081 static void __exit tcm_qla2xxx_exit(void) 2082 { 2083 tcm_qla2xxx_deregister_configfs(); 2084 } 2085 2086 MODULE_DESCRIPTION("TCM QLA2XXX series NPIV enabled fabric driver"); 2087 MODULE_LICENSE("GPL"); 2088 module_init(tcm_qla2xxx_init); 2089 module_exit(tcm_qla2xxx_exit); 2090