1 /******************************************************************************* 2 * This file contains tcm implementation using v4 configfs fabric infrastructure 3 * for QLogic target mode HBAs 4 * 5 * (c) Copyright 2010-2013 Datera, Inc. 6 * 7 * Author: Nicholas A. Bellinger <nab@daterainc.com> 8 * 9 * tcm_qla2xxx_parse_wwn() and tcm_qla2xxx_format_wwn() contains code from 10 * the TCM_FC / Open-FCoE.org fabric module. 11 * 12 * Copyright (c) 2010 Cisco Systems, Inc 13 * 14 * This program is free software; you can redistribute it and/or modify 15 * it under the terms of the GNU General Public License as published by 16 * the Free Software Foundation; either version 2 of the License, or 17 * (at your option) any later version. 18 * 19 * This program is distributed in the hope that it will be useful, 20 * but WITHOUT ANY WARRANTY; without even the implied warranty of 21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 22 * GNU General Public License for more details. 23 ****************************************************************************/ 24 25 26 #include <linux/module.h> 27 #include <linux/moduleparam.h> 28 #include <generated/utsrelease.h> 29 #include <linux/utsname.h> 30 #include <linux/init.h> 31 #include <linux/list.h> 32 #include <linux/slab.h> 33 #include <linux/kthread.h> 34 #include <linux/types.h> 35 #include <linux/string.h> 36 #include <linux/configfs.h> 37 #include <linux/ctype.h> 38 #include <asm/unaligned.h> 39 #include <scsi/scsi.h> 40 #include <scsi/scsi_host.h> 41 #include <scsi/scsi_device.h> 42 #include <scsi/scsi_cmnd.h> 43 #include <target/target_core_base.h> 44 #include <target/target_core_fabric.h> 45 #include <target/target_core_fabric_configfs.h> 46 #include <target/target_core_configfs.h> 47 #include <target/configfs_macros.h> 48 49 #include "qla_def.h" 50 #include "qla_target.h" 51 #include "tcm_qla2xxx.h" 52 53 struct workqueue_struct *tcm_qla2xxx_free_wq; 54 struct workqueue_struct *tcm_qla2xxx_cmd_wq; 55 56 /* 57 * Parse WWN. 58 * If strict, we require lower-case hex and colon separators to be sure 59 * the name is the same as what would be generated by ft_format_wwn() 60 * so the name and wwn are mapped one-to-one. 61 */ 62 static ssize_t tcm_qla2xxx_parse_wwn(const char *name, u64 *wwn, int strict) 63 { 64 const char *cp; 65 char c; 66 u32 nibble; 67 u32 byte = 0; 68 u32 pos = 0; 69 u32 err; 70 71 *wwn = 0; 72 for (cp = name; cp < &name[TCM_QLA2XXX_NAMELEN - 1]; cp++) { 73 c = *cp; 74 if (c == '\n' && cp[1] == '\0') 75 continue; 76 if (strict && pos++ == 2 && byte++ < 7) { 77 pos = 0; 78 if (c == ':') 79 continue; 80 err = 1; 81 goto fail; 82 } 83 if (c == '\0') { 84 err = 2; 85 if (strict && byte != 8) 86 goto fail; 87 return cp - name; 88 } 89 err = 3; 90 if (isdigit(c)) 91 nibble = c - '0'; 92 else if (isxdigit(c) && (islower(c) || !strict)) 93 nibble = tolower(c) - 'a' + 10; 94 else 95 goto fail; 96 *wwn = (*wwn << 4) | nibble; 97 } 98 err = 4; 99 fail: 100 pr_debug("err %u len %zu pos %u byte %u\n", 101 err, cp - name, pos, byte); 102 return -1; 103 } 104 105 static ssize_t tcm_qla2xxx_format_wwn(char *buf, size_t len, u64 wwn) 106 { 107 u8 b[8]; 108 109 put_unaligned_be64(wwn, b); 110 return snprintf(buf, len, 111 "%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x", 112 b[0], b[1], b[2], b[3], b[4], b[5], b[6], b[7]); 113 } 114 115 static char *tcm_qla2xxx_get_fabric_name(void) 116 { 117 return "qla2xxx"; 118 } 119 120 /* 121 * From drivers/scsi/scsi_transport_fc.c:fc_parse_wwn 122 */ 123 static int tcm_qla2xxx_npiv_extract_wwn(const char *ns, u64 *nm) 124 { 125 unsigned int i, j; 126 u8 wwn[8]; 127 128 memset(wwn, 0, sizeof(wwn)); 129 130 /* Validate and store the new name */ 131 for (i = 0, j = 0; i < 16; i++) { 132 int value; 133 134 value = hex_to_bin(*ns++); 135 if (value >= 0) 136 j = (j << 4) | value; 137 else 138 return -EINVAL; 139 140 if (i % 2) { 141 wwn[i/2] = j & 0xff; 142 j = 0; 143 } 144 } 145 146 *nm = wwn_to_u64(wwn); 147 return 0; 148 } 149 150 /* 151 * This parsing logic follows drivers/scsi/scsi_transport_fc.c: 152 * store_fc_host_vport_create() 153 */ 154 static int tcm_qla2xxx_npiv_parse_wwn( 155 const char *name, 156 size_t count, 157 u64 *wwpn, 158 u64 *wwnn) 159 { 160 unsigned int cnt = count; 161 int rc; 162 163 *wwpn = 0; 164 *wwnn = 0; 165 166 /* count may include a LF at end of string */ 167 if (name[cnt-1] == '\n' || name[cnt-1] == 0) 168 cnt--; 169 170 /* validate we have enough characters for WWPN */ 171 if ((cnt != (16+1+16)) || (name[16] != ':')) 172 return -EINVAL; 173 174 rc = tcm_qla2xxx_npiv_extract_wwn(&name[0], wwpn); 175 if (rc != 0) 176 return rc; 177 178 rc = tcm_qla2xxx_npiv_extract_wwn(&name[17], wwnn); 179 if (rc != 0) 180 return rc; 181 182 return 0; 183 } 184 185 static ssize_t tcm_qla2xxx_npiv_format_wwn(char *buf, size_t len, 186 u64 wwpn, u64 wwnn) 187 { 188 u8 b[8], b2[8]; 189 190 put_unaligned_be64(wwpn, b); 191 put_unaligned_be64(wwnn, b2); 192 return snprintf(buf, len, 193 "%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x," 194 "%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x", 195 b[0], b[1], b[2], b[3], b[4], b[5], b[6], b[7], 196 b2[0], b2[1], b2[2], b2[3], b2[4], b2[5], b2[6], b2[7]); 197 } 198 199 static char *tcm_qla2xxx_npiv_get_fabric_name(void) 200 { 201 return "qla2xxx_npiv"; 202 } 203 204 static u8 tcm_qla2xxx_get_fabric_proto_ident(struct se_portal_group *se_tpg) 205 { 206 struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg, 207 struct tcm_qla2xxx_tpg, se_tpg); 208 struct tcm_qla2xxx_lport *lport = tpg->lport; 209 u8 proto_id; 210 211 switch (lport->lport_proto_id) { 212 case SCSI_PROTOCOL_FCP: 213 default: 214 proto_id = fc_get_fabric_proto_ident(se_tpg); 215 break; 216 } 217 218 return proto_id; 219 } 220 221 static char *tcm_qla2xxx_get_fabric_wwn(struct se_portal_group *se_tpg) 222 { 223 struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg, 224 struct tcm_qla2xxx_tpg, se_tpg); 225 struct tcm_qla2xxx_lport *lport = tpg->lport; 226 227 return lport->lport_naa_name; 228 } 229 230 static char *tcm_qla2xxx_npiv_get_fabric_wwn(struct se_portal_group *se_tpg) 231 { 232 struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg, 233 struct tcm_qla2xxx_tpg, se_tpg); 234 struct tcm_qla2xxx_lport *lport = tpg->lport; 235 236 return &lport->lport_npiv_name[0]; 237 } 238 239 static u16 tcm_qla2xxx_get_tag(struct se_portal_group *se_tpg) 240 { 241 struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg, 242 struct tcm_qla2xxx_tpg, se_tpg); 243 return tpg->lport_tpgt; 244 } 245 246 static u32 tcm_qla2xxx_get_default_depth(struct se_portal_group *se_tpg) 247 { 248 return 1; 249 } 250 251 static u32 tcm_qla2xxx_get_pr_transport_id( 252 struct se_portal_group *se_tpg, 253 struct se_node_acl *se_nacl, 254 struct t10_pr_registration *pr_reg, 255 int *format_code, 256 unsigned char *buf) 257 { 258 struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg, 259 struct tcm_qla2xxx_tpg, se_tpg); 260 struct tcm_qla2xxx_lport *lport = tpg->lport; 261 int ret = 0; 262 263 switch (lport->lport_proto_id) { 264 case SCSI_PROTOCOL_FCP: 265 default: 266 ret = fc_get_pr_transport_id(se_tpg, se_nacl, pr_reg, 267 format_code, buf); 268 break; 269 } 270 271 return ret; 272 } 273 274 static u32 tcm_qla2xxx_get_pr_transport_id_len( 275 struct se_portal_group *se_tpg, 276 struct se_node_acl *se_nacl, 277 struct t10_pr_registration *pr_reg, 278 int *format_code) 279 { 280 struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg, 281 struct tcm_qla2xxx_tpg, se_tpg); 282 struct tcm_qla2xxx_lport *lport = tpg->lport; 283 int ret = 0; 284 285 switch (lport->lport_proto_id) { 286 case SCSI_PROTOCOL_FCP: 287 default: 288 ret = fc_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg, 289 format_code); 290 break; 291 } 292 293 return ret; 294 } 295 296 static char *tcm_qla2xxx_parse_pr_out_transport_id( 297 struct se_portal_group *se_tpg, 298 const char *buf, 299 u32 *out_tid_len, 300 char **port_nexus_ptr) 301 { 302 struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg, 303 struct tcm_qla2xxx_tpg, se_tpg); 304 struct tcm_qla2xxx_lport *lport = tpg->lport; 305 char *tid = NULL; 306 307 switch (lport->lport_proto_id) { 308 case SCSI_PROTOCOL_FCP: 309 default: 310 tid = fc_parse_pr_out_transport_id(se_tpg, buf, out_tid_len, 311 port_nexus_ptr); 312 break; 313 } 314 315 return tid; 316 } 317 318 static int tcm_qla2xxx_check_demo_mode(struct se_portal_group *se_tpg) 319 { 320 struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg, 321 struct tcm_qla2xxx_tpg, se_tpg); 322 323 return tpg->tpg_attrib.generate_node_acls; 324 } 325 326 static int tcm_qla2xxx_check_demo_mode_cache(struct se_portal_group *se_tpg) 327 { 328 struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg, 329 struct tcm_qla2xxx_tpg, se_tpg); 330 331 return tpg->tpg_attrib.cache_dynamic_acls; 332 } 333 334 static int tcm_qla2xxx_check_demo_write_protect(struct se_portal_group *se_tpg) 335 { 336 struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg, 337 struct tcm_qla2xxx_tpg, se_tpg); 338 339 return tpg->tpg_attrib.demo_mode_write_protect; 340 } 341 342 static int tcm_qla2xxx_check_prod_write_protect(struct se_portal_group *se_tpg) 343 { 344 struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg, 345 struct tcm_qla2xxx_tpg, se_tpg); 346 347 return tpg->tpg_attrib.prod_mode_write_protect; 348 } 349 350 static int tcm_qla2xxx_check_demo_mode_login_only(struct se_portal_group *se_tpg) 351 { 352 struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg, 353 struct tcm_qla2xxx_tpg, se_tpg); 354 355 return tpg->tpg_attrib.demo_mode_login_only; 356 } 357 358 static struct se_node_acl *tcm_qla2xxx_alloc_fabric_acl( 359 struct se_portal_group *se_tpg) 360 { 361 struct tcm_qla2xxx_nacl *nacl; 362 363 nacl = kzalloc(sizeof(struct tcm_qla2xxx_nacl), GFP_KERNEL); 364 if (!nacl) { 365 pr_err("Unable to allocate struct tcm_qla2xxx_nacl\n"); 366 return NULL; 367 } 368 369 return &nacl->se_node_acl; 370 } 371 372 static void tcm_qla2xxx_release_fabric_acl( 373 struct se_portal_group *se_tpg, 374 struct se_node_acl *se_nacl) 375 { 376 struct tcm_qla2xxx_nacl *nacl = container_of(se_nacl, 377 struct tcm_qla2xxx_nacl, se_node_acl); 378 kfree(nacl); 379 } 380 381 static u32 tcm_qla2xxx_tpg_get_inst_index(struct se_portal_group *se_tpg) 382 { 383 struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg, 384 struct tcm_qla2xxx_tpg, se_tpg); 385 386 return tpg->lport_tpgt; 387 } 388 389 static void tcm_qla2xxx_complete_mcmd(struct work_struct *work) 390 { 391 struct qla_tgt_mgmt_cmd *mcmd = container_of(work, 392 struct qla_tgt_mgmt_cmd, free_work); 393 394 transport_generic_free_cmd(&mcmd->se_cmd, 0); 395 } 396 397 /* 398 * Called from qla_target_template->free_mcmd(), and will call 399 * tcm_qla2xxx_release_cmd() via normal struct target_core_fabric_ops 400 * release callback. qla_hw_data->hardware_lock is expected to be held 401 */ 402 static void tcm_qla2xxx_free_mcmd(struct qla_tgt_mgmt_cmd *mcmd) 403 { 404 INIT_WORK(&mcmd->free_work, tcm_qla2xxx_complete_mcmd); 405 queue_work(tcm_qla2xxx_free_wq, &mcmd->free_work); 406 } 407 408 static void tcm_qla2xxx_complete_free(struct work_struct *work) 409 { 410 struct qla_tgt_cmd *cmd = container_of(work, struct qla_tgt_cmd, work); 411 412 transport_generic_free_cmd(&cmd->se_cmd, 0); 413 } 414 415 /* 416 * Called from qla_target_template->free_cmd(), and will call 417 * tcm_qla2xxx_release_cmd via normal struct target_core_fabric_ops 418 * release callback. qla_hw_data->hardware_lock is expected to be held 419 */ 420 static void tcm_qla2xxx_free_cmd(struct qla_tgt_cmd *cmd) 421 { 422 INIT_WORK(&cmd->work, tcm_qla2xxx_complete_free); 423 queue_work(tcm_qla2xxx_free_wq, &cmd->work); 424 } 425 426 /* 427 * Called from struct target_core_fabric_ops->check_stop_free() context 428 */ 429 static int tcm_qla2xxx_check_stop_free(struct se_cmd *se_cmd) 430 { 431 return target_put_sess_cmd(se_cmd->se_sess, se_cmd); 432 } 433 434 /* tcm_qla2xxx_release_cmd - Callback from TCM Core to release underlying 435 * fabric descriptor @se_cmd command to release 436 */ 437 static void tcm_qla2xxx_release_cmd(struct se_cmd *se_cmd) 438 { 439 struct qla_tgt_cmd *cmd; 440 441 if (se_cmd->se_cmd_flags & SCF_SCSI_TMR_CDB) { 442 struct qla_tgt_mgmt_cmd *mcmd = container_of(se_cmd, 443 struct qla_tgt_mgmt_cmd, se_cmd); 444 qlt_free_mcmd(mcmd); 445 return; 446 } 447 448 cmd = container_of(se_cmd, struct qla_tgt_cmd, se_cmd); 449 qlt_free_cmd(cmd); 450 } 451 452 static int tcm_qla2xxx_shutdown_session(struct se_session *se_sess) 453 { 454 struct qla_tgt_sess *sess = se_sess->fabric_sess_ptr; 455 struct scsi_qla_host *vha; 456 unsigned long flags; 457 458 BUG_ON(!sess); 459 vha = sess->vha; 460 461 spin_lock_irqsave(&vha->hw->hardware_lock, flags); 462 target_sess_cmd_list_set_waiting(se_sess); 463 spin_unlock_irqrestore(&vha->hw->hardware_lock, flags); 464 465 return 1; 466 } 467 468 static void tcm_qla2xxx_close_session(struct se_session *se_sess) 469 { 470 struct qla_tgt_sess *sess = se_sess->fabric_sess_ptr; 471 struct scsi_qla_host *vha; 472 unsigned long flags; 473 474 BUG_ON(!sess); 475 vha = sess->vha; 476 477 spin_lock_irqsave(&vha->hw->hardware_lock, flags); 478 qlt_unreg_sess(sess); 479 spin_unlock_irqrestore(&vha->hw->hardware_lock, flags); 480 } 481 482 static u32 tcm_qla2xxx_sess_get_index(struct se_session *se_sess) 483 { 484 return 0; 485 } 486 487 static int tcm_qla2xxx_write_pending(struct se_cmd *se_cmd) 488 { 489 struct qla_tgt_cmd *cmd = container_of(se_cmd, 490 struct qla_tgt_cmd, se_cmd); 491 492 cmd->bufflen = se_cmd->data_length; 493 cmd->dma_data_direction = target_reverse_dma_direction(se_cmd); 494 495 cmd->sg_cnt = se_cmd->t_data_nents; 496 cmd->sg = se_cmd->t_data_sg; 497 498 /* 499 * qla_target.c:qlt_rdy_to_xfer() will call pci_map_sg() to setup 500 * the SGL mappings into PCIe memory for incoming FCP WRITE data. 501 */ 502 return qlt_rdy_to_xfer(cmd); 503 } 504 505 static int tcm_qla2xxx_write_pending_status(struct se_cmd *se_cmd) 506 { 507 unsigned long flags; 508 /* 509 * Check for WRITE_PENDING status to determine if we need to wait for 510 * CTIO aborts to be posted via hardware in tcm_qla2xxx_handle_data(). 511 */ 512 spin_lock_irqsave(&se_cmd->t_state_lock, flags); 513 if (se_cmd->t_state == TRANSPORT_WRITE_PENDING || 514 se_cmd->t_state == TRANSPORT_COMPLETE_QF_WP) { 515 spin_unlock_irqrestore(&se_cmd->t_state_lock, flags); 516 wait_for_completion_timeout(&se_cmd->t_transport_stop_comp, 517 3000); 518 return 0; 519 } 520 spin_unlock_irqrestore(&se_cmd->t_state_lock, flags); 521 522 return 0; 523 } 524 525 static void tcm_qla2xxx_set_default_node_attrs(struct se_node_acl *nacl) 526 { 527 return; 528 } 529 530 static u32 tcm_qla2xxx_get_task_tag(struct se_cmd *se_cmd) 531 { 532 struct qla_tgt_cmd *cmd = container_of(se_cmd, 533 struct qla_tgt_cmd, se_cmd); 534 535 return cmd->tag; 536 } 537 538 static int tcm_qla2xxx_get_cmd_state(struct se_cmd *se_cmd) 539 { 540 return 0; 541 } 542 543 /* 544 * Called from process context in qla_target.c:qlt_do_work() code 545 */ 546 static int tcm_qla2xxx_handle_cmd(scsi_qla_host_t *vha, struct qla_tgt_cmd *cmd, 547 unsigned char *cdb, uint32_t data_length, int fcp_task_attr, 548 int data_dir, int bidi) 549 { 550 struct se_cmd *se_cmd = &cmd->se_cmd; 551 struct se_session *se_sess; 552 struct qla_tgt_sess *sess; 553 int flags = TARGET_SCF_ACK_KREF; 554 555 if (bidi) 556 flags |= TARGET_SCF_BIDI_OP; 557 558 sess = cmd->sess; 559 if (!sess) { 560 pr_err("Unable to locate struct qla_tgt_sess from qla_tgt_cmd\n"); 561 return -EINVAL; 562 } 563 564 se_sess = sess->se_sess; 565 if (!se_sess) { 566 pr_err("Unable to locate active struct se_session\n"); 567 return -EINVAL; 568 } 569 570 return target_submit_cmd(se_cmd, se_sess, cdb, &cmd->sense_buffer[0], 571 cmd->unpacked_lun, data_length, fcp_task_attr, 572 data_dir, flags); 573 } 574 575 static void tcm_qla2xxx_handle_data_work(struct work_struct *work) 576 { 577 struct qla_tgt_cmd *cmd = container_of(work, struct qla_tgt_cmd, work); 578 579 /* 580 * Ensure that the complete FCP WRITE payload has been received. 581 * Otherwise return an exception via CHECK_CONDITION status. 582 */ 583 if (!cmd->write_data_transferred) { 584 /* 585 * Check if se_cmd has already been aborted via LUN_RESET, and 586 * waiting upon completion in tcm_qla2xxx_write_pending_status() 587 */ 588 if (cmd->se_cmd.transport_state & CMD_T_ABORTED) { 589 complete(&cmd->se_cmd.t_transport_stop_comp); 590 return; 591 } 592 593 transport_generic_request_failure(&cmd->se_cmd, 594 TCM_CHECK_CONDITION_ABORT_CMD); 595 return; 596 } 597 598 return target_execute_cmd(&cmd->se_cmd); 599 } 600 601 /* 602 * Called from qla_target.c:qlt_do_ctio_completion() 603 */ 604 static void tcm_qla2xxx_handle_data(struct qla_tgt_cmd *cmd) 605 { 606 INIT_WORK(&cmd->work, tcm_qla2xxx_handle_data_work); 607 queue_work(tcm_qla2xxx_free_wq, &cmd->work); 608 } 609 610 /* 611 * Called from qla_target.c:qlt_issue_task_mgmt() 612 */ 613 static int tcm_qla2xxx_handle_tmr(struct qla_tgt_mgmt_cmd *mcmd, uint32_t lun, 614 uint8_t tmr_func, uint32_t tag) 615 { 616 struct qla_tgt_sess *sess = mcmd->sess; 617 struct se_cmd *se_cmd = &mcmd->se_cmd; 618 619 return target_submit_tmr(se_cmd, sess->se_sess, NULL, lun, mcmd, 620 tmr_func, GFP_ATOMIC, tag, TARGET_SCF_ACK_KREF); 621 } 622 623 static int tcm_qla2xxx_queue_data_in(struct se_cmd *se_cmd) 624 { 625 struct qla_tgt_cmd *cmd = container_of(se_cmd, 626 struct qla_tgt_cmd, se_cmd); 627 628 cmd->bufflen = se_cmd->data_length; 629 cmd->dma_data_direction = target_reverse_dma_direction(se_cmd); 630 cmd->aborted = (se_cmd->transport_state & CMD_T_ABORTED); 631 632 cmd->sg_cnt = se_cmd->t_data_nents; 633 cmd->sg = se_cmd->t_data_sg; 634 cmd->offset = 0; 635 636 /* 637 * Now queue completed DATA_IN the qla2xxx LLD and response ring 638 */ 639 return qlt_xmit_response(cmd, QLA_TGT_XMIT_DATA|QLA_TGT_XMIT_STATUS, 640 se_cmd->scsi_status); 641 } 642 643 static int tcm_qla2xxx_queue_status(struct se_cmd *se_cmd) 644 { 645 struct qla_tgt_cmd *cmd = container_of(se_cmd, 646 struct qla_tgt_cmd, se_cmd); 647 int xmit_type = QLA_TGT_XMIT_STATUS; 648 649 cmd->bufflen = se_cmd->data_length; 650 cmd->sg = NULL; 651 cmd->sg_cnt = 0; 652 cmd->offset = 0; 653 cmd->dma_data_direction = target_reverse_dma_direction(se_cmd); 654 cmd->aborted = (se_cmd->transport_state & CMD_T_ABORTED); 655 656 if (se_cmd->data_direction == DMA_FROM_DEVICE) { 657 /* 658 * For FCP_READ with CHECK_CONDITION status, clear cmd->bufflen 659 * for qla_tgt_xmit_response LLD code 660 */ 661 if (se_cmd->se_cmd_flags & SCF_OVERFLOW_BIT) { 662 se_cmd->se_cmd_flags &= ~SCF_OVERFLOW_BIT; 663 se_cmd->residual_count = 0; 664 } 665 se_cmd->se_cmd_flags |= SCF_UNDERFLOW_BIT; 666 se_cmd->residual_count += se_cmd->data_length; 667 668 cmd->bufflen = 0; 669 } 670 /* 671 * Now queue status response to qla2xxx LLD code and response ring 672 */ 673 return qlt_xmit_response(cmd, xmit_type, se_cmd->scsi_status); 674 } 675 676 static void tcm_qla2xxx_queue_tm_rsp(struct se_cmd *se_cmd) 677 { 678 struct se_tmr_req *se_tmr = se_cmd->se_tmr_req; 679 struct qla_tgt_mgmt_cmd *mcmd = container_of(se_cmd, 680 struct qla_tgt_mgmt_cmd, se_cmd); 681 682 pr_debug("queue_tm_rsp: mcmd: %p func: 0x%02x response: 0x%02x\n", 683 mcmd, se_tmr->function, se_tmr->response); 684 /* 685 * Do translation between TCM TM response codes and 686 * QLA2xxx FC TM response codes. 687 */ 688 switch (se_tmr->response) { 689 case TMR_FUNCTION_COMPLETE: 690 mcmd->fc_tm_rsp = FC_TM_SUCCESS; 691 break; 692 case TMR_TASK_DOES_NOT_EXIST: 693 mcmd->fc_tm_rsp = FC_TM_BAD_CMD; 694 break; 695 case TMR_FUNCTION_REJECTED: 696 mcmd->fc_tm_rsp = FC_TM_REJECT; 697 break; 698 case TMR_LUN_DOES_NOT_EXIST: 699 default: 700 mcmd->fc_tm_rsp = FC_TM_FAILED; 701 break; 702 } 703 /* 704 * Queue the TM response to QLA2xxx LLD to build a 705 * CTIO response packet. 706 */ 707 qlt_xmit_tm_rsp(mcmd); 708 } 709 710 /* Local pointer to allocated TCM configfs fabric module */ 711 struct target_fabric_configfs *tcm_qla2xxx_fabric_configfs; 712 struct target_fabric_configfs *tcm_qla2xxx_npiv_fabric_configfs; 713 714 static void tcm_qla2xxx_clear_sess_lookup(struct tcm_qla2xxx_lport *, 715 struct tcm_qla2xxx_nacl *, struct qla_tgt_sess *); 716 /* 717 * Expected to be called with struct qla_hw_data->hardware_lock held 718 */ 719 static void tcm_qla2xxx_clear_nacl_from_fcport_map(struct qla_tgt_sess *sess) 720 { 721 struct se_node_acl *se_nacl = sess->se_sess->se_node_acl; 722 struct se_portal_group *se_tpg = se_nacl->se_tpg; 723 struct se_wwn *se_wwn = se_tpg->se_tpg_wwn; 724 struct tcm_qla2xxx_lport *lport = container_of(se_wwn, 725 struct tcm_qla2xxx_lport, lport_wwn); 726 struct tcm_qla2xxx_nacl *nacl = container_of(se_nacl, 727 struct tcm_qla2xxx_nacl, se_node_acl); 728 void *node; 729 730 pr_debug("fc_rport domain: port_id 0x%06x\n", nacl->nport_id); 731 732 node = btree_remove32(&lport->lport_fcport_map, nacl->nport_id); 733 WARN_ON(node && (node != se_nacl)); 734 735 pr_debug("Removed from fcport_map: %p for WWNN: 0x%016LX, port_id: 0x%06x\n", 736 se_nacl, nacl->nport_wwnn, nacl->nport_id); 737 /* 738 * Now clear the se_nacl and session pointers from our HW lport lookup 739 * table mapping for this initiator's fabric S_ID and LOOP_ID entries. 740 * 741 * This is done ahead of callbacks into tcm_qla2xxx_free_session() -> 742 * target_wait_for_sess_cmds() before the session waits for outstanding 743 * I/O to complete, to avoid a race between session shutdown execution 744 * and incoming ATIOs or TMRs picking up a stale se_node_act reference. 745 */ 746 tcm_qla2xxx_clear_sess_lookup(lport, nacl, sess); 747 } 748 749 static void tcm_qla2xxx_release_session(struct kref *kref) 750 { 751 struct se_session *se_sess = container_of(kref, 752 struct se_session, sess_kref); 753 754 qlt_unreg_sess(se_sess->fabric_sess_ptr); 755 } 756 757 static void tcm_qla2xxx_put_session(struct se_session *se_sess) 758 { 759 struct qla_tgt_sess *sess = se_sess->fabric_sess_ptr; 760 struct qla_hw_data *ha = sess->vha->hw; 761 unsigned long flags; 762 763 spin_lock_irqsave(&ha->hardware_lock, flags); 764 kref_put(&se_sess->sess_kref, tcm_qla2xxx_release_session); 765 spin_unlock_irqrestore(&ha->hardware_lock, flags); 766 } 767 768 static void tcm_qla2xxx_put_sess(struct qla_tgt_sess *sess) 769 { 770 if (!sess) 771 return; 772 773 assert_spin_locked(&sess->vha->hw->hardware_lock); 774 kref_put(&sess->se_sess->sess_kref, tcm_qla2xxx_release_session); 775 } 776 777 static void tcm_qla2xxx_shutdown_sess(struct qla_tgt_sess *sess) 778 { 779 assert_spin_locked(&sess->vha->hw->hardware_lock); 780 target_sess_cmd_list_set_waiting(sess->se_sess); 781 } 782 783 static struct se_node_acl *tcm_qla2xxx_make_nodeacl( 784 struct se_portal_group *se_tpg, 785 struct config_group *group, 786 const char *name) 787 { 788 struct se_node_acl *se_nacl, *se_nacl_new; 789 struct tcm_qla2xxx_nacl *nacl; 790 u64 wwnn; 791 u32 qla2xxx_nexus_depth; 792 793 if (tcm_qla2xxx_parse_wwn(name, &wwnn, 1) < 0) 794 return ERR_PTR(-EINVAL); 795 796 se_nacl_new = tcm_qla2xxx_alloc_fabric_acl(se_tpg); 797 if (!se_nacl_new) 798 return ERR_PTR(-ENOMEM); 799 /* #warning FIXME: Hardcoded qla2xxx_nexus depth in tcm_qla2xxx_make_nodeacl */ 800 qla2xxx_nexus_depth = 1; 801 802 /* 803 * se_nacl_new may be released by core_tpg_add_initiator_node_acl() 804 * when converting a NodeACL from demo mode -> explict 805 */ 806 se_nacl = core_tpg_add_initiator_node_acl(se_tpg, se_nacl_new, 807 name, qla2xxx_nexus_depth); 808 if (IS_ERR(se_nacl)) { 809 tcm_qla2xxx_release_fabric_acl(se_tpg, se_nacl_new); 810 return se_nacl; 811 } 812 /* 813 * Locate our struct tcm_qla2xxx_nacl and set the FC Nport WWPN 814 */ 815 nacl = container_of(se_nacl, struct tcm_qla2xxx_nacl, se_node_acl); 816 nacl->nport_wwnn = wwnn; 817 tcm_qla2xxx_format_wwn(&nacl->nport_name[0], TCM_QLA2XXX_NAMELEN, wwnn); 818 819 return se_nacl; 820 } 821 822 static void tcm_qla2xxx_drop_nodeacl(struct se_node_acl *se_acl) 823 { 824 struct se_portal_group *se_tpg = se_acl->se_tpg; 825 struct tcm_qla2xxx_nacl *nacl = container_of(se_acl, 826 struct tcm_qla2xxx_nacl, se_node_acl); 827 828 core_tpg_del_initiator_node_acl(se_tpg, se_acl, 1); 829 kfree(nacl); 830 } 831 832 /* Start items for tcm_qla2xxx_tpg_attrib_cit */ 833 834 #define DEF_QLA_TPG_ATTRIB(name) \ 835 \ 836 static ssize_t tcm_qla2xxx_tpg_attrib_show_##name( \ 837 struct se_portal_group *se_tpg, \ 838 char *page) \ 839 { \ 840 struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg, \ 841 struct tcm_qla2xxx_tpg, se_tpg); \ 842 \ 843 return sprintf(page, "%u\n", tpg->tpg_attrib.name); \ 844 } \ 845 \ 846 static ssize_t tcm_qla2xxx_tpg_attrib_store_##name( \ 847 struct se_portal_group *se_tpg, \ 848 const char *page, \ 849 size_t count) \ 850 { \ 851 struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg, \ 852 struct tcm_qla2xxx_tpg, se_tpg); \ 853 unsigned long val; \ 854 int ret; \ 855 \ 856 ret = kstrtoul(page, 0, &val); \ 857 if (ret < 0) { \ 858 pr_err("kstrtoul() failed with" \ 859 " ret: %d\n", ret); \ 860 return -EINVAL; \ 861 } \ 862 ret = tcm_qla2xxx_set_attrib_##name(tpg, val); \ 863 \ 864 return (!ret) ? count : -EINVAL; \ 865 } 866 867 #define DEF_QLA_TPG_ATTR_BOOL(_name) \ 868 \ 869 static int tcm_qla2xxx_set_attrib_##_name( \ 870 struct tcm_qla2xxx_tpg *tpg, \ 871 unsigned long val) \ 872 { \ 873 struct tcm_qla2xxx_tpg_attrib *a = &tpg->tpg_attrib; \ 874 \ 875 if ((val != 0) && (val != 1)) { \ 876 pr_err("Illegal boolean value %lu\n", val); \ 877 return -EINVAL; \ 878 } \ 879 \ 880 a->_name = val; \ 881 return 0; \ 882 } 883 884 #define QLA_TPG_ATTR(_name, _mode) \ 885 TF_TPG_ATTRIB_ATTR(tcm_qla2xxx, _name, _mode); 886 887 /* 888 * Define tcm_qla2xxx_tpg_attrib_s_generate_node_acls 889 */ 890 DEF_QLA_TPG_ATTR_BOOL(generate_node_acls); 891 DEF_QLA_TPG_ATTRIB(generate_node_acls); 892 QLA_TPG_ATTR(generate_node_acls, S_IRUGO | S_IWUSR); 893 894 /* 895 Define tcm_qla2xxx_attrib_s_cache_dynamic_acls 896 */ 897 DEF_QLA_TPG_ATTR_BOOL(cache_dynamic_acls); 898 DEF_QLA_TPG_ATTRIB(cache_dynamic_acls); 899 QLA_TPG_ATTR(cache_dynamic_acls, S_IRUGO | S_IWUSR); 900 901 /* 902 * Define tcm_qla2xxx_tpg_attrib_s_demo_mode_write_protect 903 */ 904 DEF_QLA_TPG_ATTR_BOOL(demo_mode_write_protect); 905 DEF_QLA_TPG_ATTRIB(demo_mode_write_protect); 906 QLA_TPG_ATTR(demo_mode_write_protect, S_IRUGO | S_IWUSR); 907 908 /* 909 * Define tcm_qla2xxx_tpg_attrib_s_prod_mode_write_protect 910 */ 911 DEF_QLA_TPG_ATTR_BOOL(prod_mode_write_protect); 912 DEF_QLA_TPG_ATTRIB(prod_mode_write_protect); 913 QLA_TPG_ATTR(prod_mode_write_protect, S_IRUGO | S_IWUSR); 914 915 /* 916 * Define tcm_qla2xxx_tpg_attrib_s_demo_mode_login_only 917 */ 918 DEF_QLA_TPG_ATTR_BOOL(demo_mode_login_only); 919 DEF_QLA_TPG_ATTRIB(demo_mode_login_only); 920 QLA_TPG_ATTR(demo_mode_login_only, S_IRUGO | S_IWUSR); 921 922 static struct configfs_attribute *tcm_qla2xxx_tpg_attrib_attrs[] = { 923 &tcm_qla2xxx_tpg_attrib_generate_node_acls.attr, 924 &tcm_qla2xxx_tpg_attrib_cache_dynamic_acls.attr, 925 &tcm_qla2xxx_tpg_attrib_demo_mode_write_protect.attr, 926 &tcm_qla2xxx_tpg_attrib_prod_mode_write_protect.attr, 927 &tcm_qla2xxx_tpg_attrib_demo_mode_login_only.attr, 928 NULL, 929 }; 930 931 /* End items for tcm_qla2xxx_tpg_attrib_cit */ 932 933 static ssize_t tcm_qla2xxx_tpg_show_enable( 934 struct se_portal_group *se_tpg, 935 char *page) 936 { 937 struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg, 938 struct tcm_qla2xxx_tpg, se_tpg); 939 940 return snprintf(page, PAGE_SIZE, "%d\n", 941 atomic_read(&tpg->lport_tpg_enabled)); 942 } 943 944 static ssize_t tcm_qla2xxx_tpg_store_enable( 945 struct se_portal_group *se_tpg, 946 const char *page, 947 size_t count) 948 { 949 struct se_wwn *se_wwn = se_tpg->se_tpg_wwn; 950 struct tcm_qla2xxx_lport *lport = container_of(se_wwn, 951 struct tcm_qla2xxx_lport, lport_wwn); 952 struct scsi_qla_host *vha = lport->qla_vha; 953 struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg, 954 struct tcm_qla2xxx_tpg, se_tpg); 955 unsigned long op; 956 int rc; 957 958 rc = kstrtoul(page, 0, &op); 959 if (rc < 0) { 960 pr_err("kstrtoul() returned %d\n", rc); 961 return -EINVAL; 962 } 963 if ((op != 1) && (op != 0)) { 964 pr_err("Illegal value for tpg_enable: %lu\n", op); 965 return -EINVAL; 966 } 967 968 if (op) { 969 atomic_set(&tpg->lport_tpg_enabled, 1); 970 qlt_enable_vha(vha); 971 } else { 972 if (!vha->vha_tgt.qla_tgt) { 973 pr_err("struct qla_hw_data *vha->vha_tgt.qla_tgt is NULL\n"); 974 return -ENODEV; 975 } 976 atomic_set(&tpg->lport_tpg_enabled, 0); 977 qlt_stop_phase1(vha->vha_tgt.qla_tgt); 978 } 979 980 return count; 981 } 982 983 TF_TPG_BASE_ATTR(tcm_qla2xxx, enable, S_IRUGO | S_IWUSR); 984 985 static struct configfs_attribute *tcm_qla2xxx_tpg_attrs[] = { 986 &tcm_qla2xxx_tpg_enable.attr, 987 NULL, 988 }; 989 990 static struct se_portal_group *tcm_qla2xxx_make_tpg( 991 struct se_wwn *wwn, 992 struct config_group *group, 993 const char *name) 994 { 995 struct tcm_qla2xxx_lport *lport = container_of(wwn, 996 struct tcm_qla2xxx_lport, lport_wwn); 997 struct tcm_qla2xxx_tpg *tpg; 998 unsigned long tpgt; 999 int ret; 1000 1001 if (strstr(name, "tpgt_") != name) 1002 return ERR_PTR(-EINVAL); 1003 if (kstrtoul(name + 5, 10, &tpgt) || tpgt > USHRT_MAX) 1004 return ERR_PTR(-EINVAL); 1005 1006 if ((tpgt != 1)) { 1007 pr_err("In non NPIV mode, a single TPG=1 is used for HW port mappings\n"); 1008 return ERR_PTR(-ENOSYS); 1009 } 1010 1011 tpg = kzalloc(sizeof(struct tcm_qla2xxx_tpg), GFP_KERNEL); 1012 if (!tpg) { 1013 pr_err("Unable to allocate struct tcm_qla2xxx_tpg\n"); 1014 return ERR_PTR(-ENOMEM); 1015 } 1016 tpg->lport = lport; 1017 tpg->lport_tpgt = tpgt; 1018 /* 1019 * By default allow READ-ONLY TPG demo-mode access w/ cached dynamic 1020 * NodeACLs 1021 */ 1022 tpg->tpg_attrib.generate_node_acls = 1; 1023 tpg->tpg_attrib.demo_mode_write_protect = 1; 1024 tpg->tpg_attrib.cache_dynamic_acls = 1; 1025 tpg->tpg_attrib.demo_mode_login_only = 1; 1026 1027 ret = core_tpg_register(&tcm_qla2xxx_fabric_configfs->tf_ops, wwn, 1028 &tpg->se_tpg, tpg, TRANSPORT_TPG_TYPE_NORMAL); 1029 if (ret < 0) { 1030 kfree(tpg); 1031 return NULL; 1032 } 1033 1034 lport->tpg_1 = tpg; 1035 1036 return &tpg->se_tpg; 1037 } 1038 1039 static void tcm_qla2xxx_drop_tpg(struct se_portal_group *se_tpg) 1040 { 1041 struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg, 1042 struct tcm_qla2xxx_tpg, se_tpg); 1043 struct tcm_qla2xxx_lport *lport = tpg->lport; 1044 struct scsi_qla_host *vha = lport->qla_vha; 1045 /* 1046 * Call into qla2x_target.c LLD logic to shutdown the active 1047 * FC Nexuses and disable target mode operation for this qla_hw_data 1048 */ 1049 if (vha->vha_tgt.qla_tgt && !vha->vha_tgt.qla_tgt->tgt_stop) 1050 qlt_stop_phase1(vha->vha_tgt.qla_tgt); 1051 1052 core_tpg_deregister(se_tpg); 1053 /* 1054 * Clear local TPG=1 pointer for non NPIV mode. 1055 */ 1056 lport->tpg_1 = NULL; 1057 1058 kfree(tpg); 1059 } 1060 1061 static struct se_portal_group *tcm_qla2xxx_npiv_make_tpg( 1062 struct se_wwn *wwn, 1063 struct config_group *group, 1064 const char *name) 1065 { 1066 struct tcm_qla2xxx_lport *lport = container_of(wwn, 1067 struct tcm_qla2xxx_lport, lport_wwn); 1068 struct tcm_qla2xxx_tpg *tpg; 1069 unsigned long tpgt; 1070 int ret; 1071 1072 if (strstr(name, "tpgt_") != name) 1073 return ERR_PTR(-EINVAL); 1074 if (kstrtoul(name + 5, 10, &tpgt) || tpgt > USHRT_MAX) 1075 return ERR_PTR(-EINVAL); 1076 1077 tpg = kzalloc(sizeof(struct tcm_qla2xxx_tpg), GFP_KERNEL); 1078 if (!tpg) { 1079 pr_err("Unable to allocate struct tcm_qla2xxx_tpg\n"); 1080 return ERR_PTR(-ENOMEM); 1081 } 1082 tpg->lport = lport; 1083 tpg->lport_tpgt = tpgt; 1084 1085 /* 1086 * By default allow READ-ONLY TPG demo-mode access w/ cached dynamic 1087 * NodeACLs 1088 */ 1089 tpg->tpg_attrib.generate_node_acls = 1; 1090 tpg->tpg_attrib.demo_mode_write_protect = 1; 1091 tpg->tpg_attrib.cache_dynamic_acls = 1; 1092 tpg->tpg_attrib.demo_mode_login_only = 1; 1093 1094 ret = core_tpg_register(&tcm_qla2xxx_npiv_fabric_configfs->tf_ops, wwn, 1095 &tpg->se_tpg, tpg, TRANSPORT_TPG_TYPE_NORMAL); 1096 if (ret < 0) { 1097 kfree(tpg); 1098 return NULL; 1099 } 1100 lport->tpg_1 = tpg; 1101 return &tpg->se_tpg; 1102 } 1103 1104 /* 1105 * Expected to be called with struct qla_hw_data->hardware_lock held 1106 */ 1107 static struct qla_tgt_sess *tcm_qla2xxx_find_sess_by_s_id( 1108 scsi_qla_host_t *vha, 1109 const uint8_t *s_id) 1110 { 1111 struct tcm_qla2xxx_lport *lport; 1112 struct se_node_acl *se_nacl; 1113 struct tcm_qla2xxx_nacl *nacl; 1114 u32 key; 1115 1116 lport = vha->vha_tgt.target_lport_ptr; 1117 if (!lport) { 1118 pr_err("Unable to locate struct tcm_qla2xxx_lport\n"); 1119 dump_stack(); 1120 return NULL; 1121 } 1122 1123 key = (((unsigned long)s_id[0] << 16) | 1124 ((unsigned long)s_id[1] << 8) | 1125 (unsigned long)s_id[2]); 1126 pr_debug("find_sess_by_s_id: 0x%06x\n", key); 1127 1128 se_nacl = btree_lookup32(&lport->lport_fcport_map, key); 1129 if (!se_nacl) { 1130 pr_debug("Unable to locate s_id: 0x%06x\n", key); 1131 return NULL; 1132 } 1133 pr_debug("find_sess_by_s_id: located se_nacl: %p, initiatorname: %s\n", 1134 se_nacl, se_nacl->initiatorname); 1135 1136 nacl = container_of(se_nacl, struct tcm_qla2xxx_nacl, se_node_acl); 1137 if (!nacl->qla_tgt_sess) { 1138 pr_err("Unable to locate struct qla_tgt_sess\n"); 1139 return NULL; 1140 } 1141 1142 return nacl->qla_tgt_sess; 1143 } 1144 1145 /* 1146 * Expected to be called with struct qla_hw_data->hardware_lock held 1147 */ 1148 static void tcm_qla2xxx_set_sess_by_s_id( 1149 struct tcm_qla2xxx_lport *lport, 1150 struct se_node_acl *new_se_nacl, 1151 struct tcm_qla2xxx_nacl *nacl, 1152 struct se_session *se_sess, 1153 struct qla_tgt_sess *qla_tgt_sess, 1154 uint8_t *s_id) 1155 { 1156 u32 key; 1157 void *slot; 1158 int rc; 1159 1160 key = (((unsigned long)s_id[0] << 16) | 1161 ((unsigned long)s_id[1] << 8) | 1162 (unsigned long)s_id[2]); 1163 pr_debug("set_sess_by_s_id: %06x\n", key); 1164 1165 slot = btree_lookup32(&lport->lport_fcport_map, key); 1166 if (!slot) { 1167 if (new_se_nacl) { 1168 pr_debug("Setting up new fc_port entry to new_se_nacl\n"); 1169 nacl->nport_id = key; 1170 rc = btree_insert32(&lport->lport_fcport_map, key, 1171 new_se_nacl, GFP_ATOMIC); 1172 if (rc) 1173 printk(KERN_ERR "Unable to insert s_id into fcport_map: %06x\n", 1174 (int)key); 1175 } else { 1176 pr_debug("Wiping nonexisting fc_port entry\n"); 1177 } 1178 1179 qla_tgt_sess->se_sess = se_sess; 1180 nacl->qla_tgt_sess = qla_tgt_sess; 1181 return; 1182 } 1183 1184 if (nacl->qla_tgt_sess) { 1185 if (new_se_nacl == NULL) { 1186 pr_debug("Clearing existing nacl->qla_tgt_sess and fc_port entry\n"); 1187 btree_remove32(&lport->lport_fcport_map, key); 1188 nacl->qla_tgt_sess = NULL; 1189 return; 1190 } 1191 pr_debug("Replacing existing nacl->qla_tgt_sess and fc_port entry\n"); 1192 btree_update32(&lport->lport_fcport_map, key, new_se_nacl); 1193 qla_tgt_sess->se_sess = se_sess; 1194 nacl->qla_tgt_sess = qla_tgt_sess; 1195 return; 1196 } 1197 1198 if (new_se_nacl == NULL) { 1199 pr_debug("Clearing existing fc_port entry\n"); 1200 btree_remove32(&lport->lport_fcport_map, key); 1201 return; 1202 } 1203 1204 pr_debug("Replacing existing fc_port entry w/o active nacl->qla_tgt_sess\n"); 1205 btree_update32(&lport->lport_fcport_map, key, new_se_nacl); 1206 qla_tgt_sess->se_sess = se_sess; 1207 nacl->qla_tgt_sess = qla_tgt_sess; 1208 1209 pr_debug("Setup nacl->qla_tgt_sess %p by s_id for se_nacl: %p, initiatorname: %s\n", 1210 nacl->qla_tgt_sess, new_se_nacl, new_se_nacl->initiatorname); 1211 } 1212 1213 /* 1214 * Expected to be called with struct qla_hw_data->hardware_lock held 1215 */ 1216 static struct qla_tgt_sess *tcm_qla2xxx_find_sess_by_loop_id( 1217 scsi_qla_host_t *vha, 1218 const uint16_t loop_id) 1219 { 1220 struct tcm_qla2xxx_lport *lport; 1221 struct se_node_acl *se_nacl; 1222 struct tcm_qla2xxx_nacl *nacl; 1223 struct tcm_qla2xxx_fc_loopid *fc_loopid; 1224 1225 lport = vha->vha_tgt.target_lport_ptr; 1226 if (!lport) { 1227 pr_err("Unable to locate struct tcm_qla2xxx_lport\n"); 1228 dump_stack(); 1229 return NULL; 1230 } 1231 1232 pr_debug("find_sess_by_loop_id: Using loop_id: 0x%04x\n", loop_id); 1233 1234 fc_loopid = lport->lport_loopid_map + loop_id; 1235 se_nacl = fc_loopid->se_nacl; 1236 if (!se_nacl) { 1237 pr_debug("Unable to locate se_nacl by loop_id: 0x%04x\n", 1238 loop_id); 1239 return NULL; 1240 } 1241 1242 nacl = container_of(se_nacl, struct tcm_qla2xxx_nacl, se_node_acl); 1243 1244 if (!nacl->qla_tgt_sess) { 1245 pr_err("Unable to locate struct qla_tgt_sess\n"); 1246 return NULL; 1247 } 1248 1249 return nacl->qla_tgt_sess; 1250 } 1251 1252 /* 1253 * Expected to be called with struct qla_hw_data->hardware_lock held 1254 */ 1255 static void tcm_qla2xxx_set_sess_by_loop_id( 1256 struct tcm_qla2xxx_lport *lport, 1257 struct se_node_acl *new_se_nacl, 1258 struct tcm_qla2xxx_nacl *nacl, 1259 struct se_session *se_sess, 1260 struct qla_tgt_sess *qla_tgt_sess, 1261 uint16_t loop_id) 1262 { 1263 struct se_node_acl *saved_nacl; 1264 struct tcm_qla2xxx_fc_loopid *fc_loopid; 1265 1266 pr_debug("set_sess_by_loop_id: Using loop_id: 0x%04x\n", loop_id); 1267 1268 fc_loopid = &((struct tcm_qla2xxx_fc_loopid *) 1269 lport->lport_loopid_map)[loop_id]; 1270 1271 saved_nacl = fc_loopid->se_nacl; 1272 if (!saved_nacl) { 1273 pr_debug("Setting up new fc_loopid->se_nacl to new_se_nacl\n"); 1274 fc_loopid->se_nacl = new_se_nacl; 1275 if (qla_tgt_sess->se_sess != se_sess) 1276 qla_tgt_sess->se_sess = se_sess; 1277 if (nacl->qla_tgt_sess != qla_tgt_sess) 1278 nacl->qla_tgt_sess = qla_tgt_sess; 1279 return; 1280 } 1281 1282 if (nacl->qla_tgt_sess) { 1283 if (new_se_nacl == NULL) { 1284 pr_debug("Clearing nacl->qla_tgt_sess and fc_loopid->se_nacl\n"); 1285 fc_loopid->se_nacl = NULL; 1286 nacl->qla_tgt_sess = NULL; 1287 return; 1288 } 1289 1290 pr_debug("Replacing existing nacl->qla_tgt_sess and fc_loopid->se_nacl\n"); 1291 fc_loopid->se_nacl = new_se_nacl; 1292 if (qla_tgt_sess->se_sess != se_sess) 1293 qla_tgt_sess->se_sess = se_sess; 1294 if (nacl->qla_tgt_sess != qla_tgt_sess) 1295 nacl->qla_tgt_sess = qla_tgt_sess; 1296 return; 1297 } 1298 1299 if (new_se_nacl == NULL) { 1300 pr_debug("Clearing fc_loopid->se_nacl\n"); 1301 fc_loopid->se_nacl = NULL; 1302 return; 1303 } 1304 1305 pr_debug("Replacing existing fc_loopid->se_nacl w/o active nacl->qla_tgt_sess\n"); 1306 fc_loopid->se_nacl = new_se_nacl; 1307 if (qla_tgt_sess->se_sess != se_sess) 1308 qla_tgt_sess->se_sess = se_sess; 1309 if (nacl->qla_tgt_sess != qla_tgt_sess) 1310 nacl->qla_tgt_sess = qla_tgt_sess; 1311 1312 pr_debug("Setup nacl->qla_tgt_sess %p by loop_id for se_nacl: %p, initiatorname: %s\n", 1313 nacl->qla_tgt_sess, new_se_nacl, new_se_nacl->initiatorname); 1314 } 1315 1316 /* 1317 * Should always be called with qla_hw_data->hardware_lock held. 1318 */ 1319 static void tcm_qla2xxx_clear_sess_lookup(struct tcm_qla2xxx_lport *lport, 1320 struct tcm_qla2xxx_nacl *nacl, struct qla_tgt_sess *sess) 1321 { 1322 struct se_session *se_sess = sess->se_sess; 1323 unsigned char be_sid[3]; 1324 1325 be_sid[0] = sess->s_id.b.domain; 1326 be_sid[1] = sess->s_id.b.area; 1327 be_sid[2] = sess->s_id.b.al_pa; 1328 1329 tcm_qla2xxx_set_sess_by_s_id(lport, NULL, nacl, se_sess, 1330 sess, be_sid); 1331 tcm_qla2xxx_set_sess_by_loop_id(lport, NULL, nacl, se_sess, 1332 sess, sess->loop_id); 1333 } 1334 1335 static void tcm_qla2xxx_free_session(struct qla_tgt_sess *sess) 1336 { 1337 struct qla_tgt *tgt = sess->tgt; 1338 struct qla_hw_data *ha = tgt->ha; 1339 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); 1340 struct se_session *se_sess; 1341 struct se_node_acl *se_nacl; 1342 struct tcm_qla2xxx_lport *lport; 1343 struct tcm_qla2xxx_nacl *nacl; 1344 1345 BUG_ON(in_interrupt()); 1346 1347 se_sess = sess->se_sess; 1348 if (!se_sess) { 1349 pr_err("struct qla_tgt_sess->se_sess is NULL\n"); 1350 dump_stack(); 1351 return; 1352 } 1353 se_nacl = se_sess->se_node_acl; 1354 nacl = container_of(se_nacl, struct tcm_qla2xxx_nacl, se_node_acl); 1355 1356 lport = vha->vha_tgt.target_lport_ptr; 1357 if (!lport) { 1358 pr_err("Unable to locate struct tcm_qla2xxx_lport\n"); 1359 dump_stack(); 1360 return; 1361 } 1362 target_wait_for_sess_cmds(se_sess); 1363 1364 transport_deregister_session_configfs(sess->se_sess); 1365 transport_deregister_session(sess->se_sess); 1366 } 1367 1368 /* 1369 * Called via qlt_create_sess():ha->qla2x_tmpl->check_initiator_node_acl() 1370 * to locate struct se_node_acl 1371 */ 1372 static int tcm_qla2xxx_check_initiator_node_acl( 1373 scsi_qla_host_t *vha, 1374 unsigned char *fc_wwpn, 1375 void *qla_tgt_sess, 1376 uint8_t *s_id, 1377 uint16_t loop_id) 1378 { 1379 struct qla_hw_data *ha = vha->hw; 1380 struct tcm_qla2xxx_lport *lport; 1381 struct tcm_qla2xxx_tpg *tpg; 1382 struct tcm_qla2xxx_nacl *nacl; 1383 struct se_portal_group *se_tpg; 1384 struct se_node_acl *se_nacl; 1385 struct se_session *se_sess; 1386 struct qla_tgt_sess *sess = qla_tgt_sess; 1387 unsigned char port_name[36]; 1388 unsigned long flags; 1389 1390 lport = vha->vha_tgt.target_lport_ptr; 1391 if (!lport) { 1392 pr_err("Unable to locate struct tcm_qla2xxx_lport\n"); 1393 dump_stack(); 1394 return -EINVAL; 1395 } 1396 /* 1397 * Locate the TPG=1 reference.. 1398 */ 1399 tpg = lport->tpg_1; 1400 if (!tpg) { 1401 pr_err("Unable to lcoate struct tcm_qla2xxx_lport->tpg_1\n"); 1402 return -EINVAL; 1403 } 1404 se_tpg = &tpg->se_tpg; 1405 1406 se_sess = transport_init_session(); 1407 if (IS_ERR(se_sess)) { 1408 pr_err("Unable to initialize struct se_session\n"); 1409 return PTR_ERR(se_sess); 1410 } 1411 /* 1412 * Format the FCP Initiator port_name into colon seperated values to 1413 * match the format by tcm_qla2xxx explict ConfigFS NodeACLs. 1414 */ 1415 memset(&port_name, 0, 36); 1416 snprintf(port_name, 36, "%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x", 1417 fc_wwpn[0], fc_wwpn[1], fc_wwpn[2], fc_wwpn[3], fc_wwpn[4], 1418 fc_wwpn[5], fc_wwpn[6], fc_wwpn[7]); 1419 /* 1420 * Locate our struct se_node_acl either from an explict NodeACL created 1421 * via ConfigFS, or via running in TPG demo mode. 1422 */ 1423 se_sess->se_node_acl = core_tpg_check_initiator_node_acl(se_tpg, 1424 port_name); 1425 if (!se_sess->se_node_acl) { 1426 transport_free_session(se_sess); 1427 return -EINVAL; 1428 } 1429 se_nacl = se_sess->se_node_acl; 1430 nacl = container_of(se_nacl, struct tcm_qla2xxx_nacl, se_node_acl); 1431 /* 1432 * And now setup the new se_nacl and session pointers into our HW lport 1433 * mappings for fabric S_ID and LOOP_ID. 1434 */ 1435 spin_lock_irqsave(&ha->hardware_lock, flags); 1436 tcm_qla2xxx_set_sess_by_s_id(lport, se_nacl, nacl, se_sess, 1437 qla_tgt_sess, s_id); 1438 tcm_qla2xxx_set_sess_by_loop_id(lport, se_nacl, nacl, se_sess, 1439 qla_tgt_sess, loop_id); 1440 spin_unlock_irqrestore(&ha->hardware_lock, flags); 1441 /* 1442 * Finally register the new FC Nexus with TCM 1443 */ 1444 __transport_register_session(se_nacl->se_tpg, se_nacl, se_sess, sess); 1445 1446 return 0; 1447 } 1448 1449 static void tcm_qla2xxx_update_sess(struct qla_tgt_sess *sess, port_id_t s_id, 1450 uint16_t loop_id, bool conf_compl_supported) 1451 { 1452 struct qla_tgt *tgt = sess->tgt; 1453 struct qla_hw_data *ha = tgt->ha; 1454 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); 1455 struct tcm_qla2xxx_lport *lport = vha->vha_tgt.target_lport_ptr; 1456 struct se_node_acl *se_nacl = sess->se_sess->se_node_acl; 1457 struct tcm_qla2xxx_nacl *nacl = container_of(se_nacl, 1458 struct tcm_qla2xxx_nacl, se_node_acl); 1459 u32 key; 1460 1461 1462 if (sess->loop_id != loop_id || sess->s_id.b24 != s_id.b24) 1463 pr_info("Updating session %p from port %8phC loop_id %d -> %d s_id %x:%x:%x -> %x:%x:%x\n", 1464 sess, sess->port_name, 1465 sess->loop_id, loop_id, sess->s_id.b.domain, 1466 sess->s_id.b.area, sess->s_id.b.al_pa, s_id.b.domain, 1467 s_id.b.area, s_id.b.al_pa); 1468 1469 if (sess->loop_id != loop_id) { 1470 /* 1471 * Because we can shuffle loop IDs around and we 1472 * update different sessions non-atomically, we might 1473 * have overwritten this session's old loop ID 1474 * already, and we might end up overwriting some other 1475 * session that will be updated later. So we have to 1476 * be extra careful and we can't warn about those things... 1477 */ 1478 if (lport->lport_loopid_map[sess->loop_id].se_nacl == se_nacl) 1479 lport->lport_loopid_map[sess->loop_id].se_nacl = NULL; 1480 1481 lport->lport_loopid_map[loop_id].se_nacl = se_nacl; 1482 1483 sess->loop_id = loop_id; 1484 } 1485 1486 if (sess->s_id.b24 != s_id.b24) { 1487 key = (((u32) sess->s_id.b.domain << 16) | 1488 ((u32) sess->s_id.b.area << 8) | 1489 ((u32) sess->s_id.b.al_pa)); 1490 1491 if (btree_lookup32(&lport->lport_fcport_map, key)) 1492 WARN(btree_remove32(&lport->lport_fcport_map, key) != se_nacl, 1493 "Found wrong se_nacl when updating s_id %x:%x:%x\n", 1494 sess->s_id.b.domain, sess->s_id.b.area, sess->s_id.b.al_pa); 1495 else 1496 WARN(1, "No lport_fcport_map entry for s_id %x:%x:%x\n", 1497 sess->s_id.b.domain, sess->s_id.b.area, sess->s_id.b.al_pa); 1498 1499 key = (((u32) s_id.b.domain << 16) | 1500 ((u32) s_id.b.area << 8) | 1501 ((u32) s_id.b.al_pa)); 1502 1503 if (btree_lookup32(&lport->lport_fcport_map, key)) { 1504 WARN(1, "Already have lport_fcport_map entry for s_id %x:%x:%x\n", 1505 s_id.b.domain, s_id.b.area, s_id.b.al_pa); 1506 btree_update32(&lport->lport_fcport_map, key, se_nacl); 1507 } else { 1508 btree_insert32(&lport->lport_fcport_map, key, se_nacl, GFP_ATOMIC); 1509 } 1510 1511 sess->s_id = s_id; 1512 nacl->nport_id = key; 1513 } 1514 1515 sess->conf_compl_supported = conf_compl_supported; 1516 } 1517 1518 /* 1519 * Calls into tcm_qla2xxx used by qla2xxx LLD I/O path. 1520 */ 1521 static struct qla_tgt_func_tmpl tcm_qla2xxx_template = { 1522 .handle_cmd = tcm_qla2xxx_handle_cmd, 1523 .handle_data = tcm_qla2xxx_handle_data, 1524 .handle_tmr = tcm_qla2xxx_handle_tmr, 1525 .free_cmd = tcm_qla2xxx_free_cmd, 1526 .free_mcmd = tcm_qla2xxx_free_mcmd, 1527 .free_session = tcm_qla2xxx_free_session, 1528 .update_sess = tcm_qla2xxx_update_sess, 1529 .check_initiator_node_acl = tcm_qla2xxx_check_initiator_node_acl, 1530 .find_sess_by_s_id = tcm_qla2xxx_find_sess_by_s_id, 1531 .find_sess_by_loop_id = tcm_qla2xxx_find_sess_by_loop_id, 1532 .clear_nacl_from_fcport_map = tcm_qla2xxx_clear_nacl_from_fcport_map, 1533 .put_sess = tcm_qla2xxx_put_sess, 1534 .shutdown_sess = tcm_qla2xxx_shutdown_sess, 1535 }; 1536 1537 static int tcm_qla2xxx_init_lport(struct tcm_qla2xxx_lport *lport) 1538 { 1539 int rc; 1540 1541 rc = btree_init32(&lport->lport_fcport_map); 1542 if (rc) { 1543 pr_err("Unable to initialize lport->lport_fcport_map btree\n"); 1544 return rc; 1545 } 1546 1547 lport->lport_loopid_map = vmalloc(sizeof(struct tcm_qla2xxx_fc_loopid) * 1548 65536); 1549 if (!lport->lport_loopid_map) { 1550 pr_err("Unable to allocate lport->lport_loopid_map of %zu bytes\n", 1551 sizeof(struct tcm_qla2xxx_fc_loopid) * 65536); 1552 btree_destroy32(&lport->lport_fcport_map); 1553 return -ENOMEM; 1554 } 1555 memset(lport->lport_loopid_map, 0, sizeof(struct tcm_qla2xxx_fc_loopid) 1556 * 65536); 1557 pr_debug("qla2xxx: Allocated lport_loopid_map of %zu bytes\n", 1558 sizeof(struct tcm_qla2xxx_fc_loopid) * 65536); 1559 return 0; 1560 } 1561 1562 static int tcm_qla2xxx_lport_register_cb(struct scsi_qla_host *vha, 1563 void *target_lport_ptr, 1564 u64 npiv_wwpn, u64 npiv_wwnn) 1565 { 1566 struct qla_hw_data *ha = vha->hw; 1567 struct tcm_qla2xxx_lport *lport = 1568 (struct tcm_qla2xxx_lport *)target_lport_ptr; 1569 /* 1570 * Setup tgt_ops, local pointer to vha and target_lport_ptr 1571 */ 1572 ha->tgt.tgt_ops = &tcm_qla2xxx_template; 1573 vha->vha_tgt.target_lport_ptr = target_lport_ptr; 1574 lport->qla_vha = vha; 1575 1576 return 0; 1577 } 1578 1579 static struct se_wwn *tcm_qla2xxx_make_lport( 1580 struct target_fabric_configfs *tf, 1581 struct config_group *group, 1582 const char *name) 1583 { 1584 struct tcm_qla2xxx_lport *lport; 1585 u64 wwpn; 1586 int ret = -ENODEV; 1587 1588 if (tcm_qla2xxx_parse_wwn(name, &wwpn, 1) < 0) 1589 return ERR_PTR(-EINVAL); 1590 1591 lport = kzalloc(sizeof(struct tcm_qla2xxx_lport), GFP_KERNEL); 1592 if (!lport) { 1593 pr_err("Unable to allocate struct tcm_qla2xxx_lport\n"); 1594 return ERR_PTR(-ENOMEM); 1595 } 1596 lport->lport_wwpn = wwpn; 1597 tcm_qla2xxx_format_wwn(&lport->lport_name[0], TCM_QLA2XXX_NAMELEN, 1598 wwpn); 1599 sprintf(lport->lport_naa_name, "naa.%016llx", (unsigned long long) wwpn); 1600 1601 ret = tcm_qla2xxx_init_lport(lport); 1602 if (ret != 0) 1603 goto out; 1604 1605 ret = qlt_lport_register(lport, wwpn, 0, 0, 1606 tcm_qla2xxx_lport_register_cb); 1607 if (ret != 0) 1608 goto out_lport; 1609 1610 return &lport->lport_wwn; 1611 out_lport: 1612 vfree(lport->lport_loopid_map); 1613 btree_destroy32(&lport->lport_fcport_map); 1614 out: 1615 kfree(lport); 1616 return ERR_PTR(ret); 1617 } 1618 1619 static void tcm_qla2xxx_drop_lport(struct se_wwn *wwn) 1620 { 1621 struct tcm_qla2xxx_lport *lport = container_of(wwn, 1622 struct tcm_qla2xxx_lport, lport_wwn); 1623 struct scsi_qla_host *vha = lport->qla_vha; 1624 struct se_node_acl *node; 1625 u32 key = 0; 1626 1627 /* 1628 * Call into qla2x_target.c LLD logic to complete the 1629 * shutdown of struct qla_tgt after the call to 1630 * qlt_stop_phase1() from tcm_qla2xxx_drop_tpg() above.. 1631 */ 1632 if (vha->vha_tgt.qla_tgt && !vha->vha_tgt.qla_tgt->tgt_stopped) 1633 qlt_stop_phase2(vha->vha_tgt.qla_tgt); 1634 1635 qlt_lport_deregister(vha); 1636 1637 vfree(lport->lport_loopid_map); 1638 btree_for_each_safe32(&lport->lport_fcport_map, key, node) 1639 btree_remove32(&lport->lport_fcport_map, key); 1640 btree_destroy32(&lport->lport_fcport_map); 1641 kfree(lport); 1642 } 1643 1644 static int tcm_qla2xxx_lport_register_npiv_cb(struct scsi_qla_host *base_vha, 1645 void *target_lport_ptr, 1646 u64 npiv_wwpn, u64 npiv_wwnn) 1647 { 1648 struct fc_vport *vport; 1649 struct Scsi_Host *sh = base_vha->host; 1650 struct scsi_qla_host *npiv_vha; 1651 struct tcm_qla2xxx_lport *lport = 1652 (struct tcm_qla2xxx_lport *)target_lport_ptr; 1653 struct fc_vport_identifiers vport_id; 1654 1655 if (!qla_tgt_mode_enabled(base_vha)) { 1656 pr_err("qla2xxx base_vha not enabled for target mode\n"); 1657 return -EPERM; 1658 } 1659 1660 memset(&vport_id, 0, sizeof(vport_id)); 1661 vport_id.port_name = npiv_wwpn; 1662 vport_id.node_name = npiv_wwnn; 1663 vport_id.roles = FC_PORT_ROLE_FCP_INITIATOR; 1664 vport_id.vport_type = FC_PORTTYPE_NPIV; 1665 vport_id.disable = false; 1666 1667 vport = fc_vport_create(sh, 0, &vport_id); 1668 if (!vport) { 1669 pr_err("fc_vport_create failed for qla2xxx_npiv\n"); 1670 return -ENODEV; 1671 } 1672 /* 1673 * Setup local pointer to NPIV vhba + target_lport_ptr 1674 */ 1675 npiv_vha = (struct scsi_qla_host *)vport->dd_data; 1676 npiv_vha->vha_tgt.target_lport_ptr = target_lport_ptr; 1677 lport->qla_vha = npiv_vha; 1678 1679 scsi_host_get(npiv_vha->host); 1680 return 0; 1681 } 1682 1683 1684 static struct se_wwn *tcm_qla2xxx_npiv_make_lport( 1685 struct target_fabric_configfs *tf, 1686 struct config_group *group, 1687 const char *name) 1688 { 1689 struct tcm_qla2xxx_lport *lport; 1690 u64 phys_wwpn, npiv_wwpn, npiv_wwnn; 1691 char *p, tmp[128]; 1692 int ret; 1693 1694 snprintf(tmp, 128, "%s", name); 1695 1696 p = strchr(tmp, '@'); 1697 if (!p) { 1698 pr_err("Unable to locate NPIV '@' seperator\n"); 1699 return ERR_PTR(-EINVAL); 1700 } 1701 *p++ = '\0'; 1702 1703 if (tcm_qla2xxx_parse_wwn(tmp, &phys_wwpn, 1) < 0) 1704 return ERR_PTR(-EINVAL); 1705 1706 if (tcm_qla2xxx_npiv_parse_wwn(p, strlen(p)+1, 1707 &npiv_wwpn, &npiv_wwnn) < 0) 1708 return ERR_PTR(-EINVAL); 1709 1710 lport = kzalloc(sizeof(struct tcm_qla2xxx_lport), GFP_KERNEL); 1711 if (!lport) { 1712 pr_err("Unable to allocate struct tcm_qla2xxx_lport for NPIV\n"); 1713 return ERR_PTR(-ENOMEM); 1714 } 1715 lport->lport_npiv_wwpn = npiv_wwpn; 1716 lport->lport_npiv_wwnn = npiv_wwnn; 1717 tcm_qla2xxx_npiv_format_wwn(&lport->lport_npiv_name[0], 1718 TCM_QLA2XXX_NAMELEN, npiv_wwpn, npiv_wwnn); 1719 sprintf(lport->lport_naa_name, "naa.%016llx", (unsigned long long) npiv_wwpn); 1720 1721 ret = tcm_qla2xxx_init_lport(lport); 1722 if (ret != 0) 1723 goto out; 1724 1725 ret = qlt_lport_register(lport, phys_wwpn, npiv_wwpn, npiv_wwnn, 1726 tcm_qla2xxx_lport_register_npiv_cb); 1727 if (ret != 0) 1728 goto out_lport; 1729 1730 return &lport->lport_wwn; 1731 out_lport: 1732 vfree(lport->lport_loopid_map); 1733 btree_destroy32(&lport->lport_fcport_map); 1734 out: 1735 kfree(lport); 1736 return ERR_PTR(ret); 1737 } 1738 1739 static void tcm_qla2xxx_npiv_drop_lport(struct se_wwn *wwn) 1740 { 1741 struct tcm_qla2xxx_lport *lport = container_of(wwn, 1742 struct tcm_qla2xxx_lport, lport_wwn); 1743 struct scsi_qla_host *npiv_vha = lport->qla_vha; 1744 struct qla_hw_data *ha = npiv_vha->hw; 1745 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev); 1746 1747 scsi_host_put(npiv_vha->host); 1748 /* 1749 * Notify libfc that we want to release the vha->fc_vport 1750 */ 1751 fc_vport_terminate(npiv_vha->fc_vport); 1752 scsi_host_put(base_vha->host); 1753 kfree(lport); 1754 } 1755 1756 1757 static ssize_t tcm_qla2xxx_wwn_show_attr_version( 1758 struct target_fabric_configfs *tf, 1759 char *page) 1760 { 1761 return sprintf(page, 1762 "TCM QLOGIC QLA2XXX NPIV capable fabric module %s on %s/%s on " 1763 UTS_RELEASE"\n", TCM_QLA2XXX_VERSION, utsname()->sysname, 1764 utsname()->machine); 1765 } 1766 1767 TF_WWN_ATTR_RO(tcm_qla2xxx, version); 1768 1769 static struct configfs_attribute *tcm_qla2xxx_wwn_attrs[] = { 1770 &tcm_qla2xxx_wwn_version.attr, 1771 NULL, 1772 }; 1773 1774 static struct target_core_fabric_ops tcm_qla2xxx_ops = { 1775 .get_fabric_name = tcm_qla2xxx_get_fabric_name, 1776 .get_fabric_proto_ident = tcm_qla2xxx_get_fabric_proto_ident, 1777 .tpg_get_wwn = tcm_qla2xxx_get_fabric_wwn, 1778 .tpg_get_tag = tcm_qla2xxx_get_tag, 1779 .tpg_get_default_depth = tcm_qla2xxx_get_default_depth, 1780 .tpg_get_pr_transport_id = tcm_qla2xxx_get_pr_transport_id, 1781 .tpg_get_pr_transport_id_len = tcm_qla2xxx_get_pr_transport_id_len, 1782 .tpg_parse_pr_out_transport_id = tcm_qla2xxx_parse_pr_out_transport_id, 1783 .tpg_check_demo_mode = tcm_qla2xxx_check_demo_mode, 1784 .tpg_check_demo_mode_cache = tcm_qla2xxx_check_demo_mode_cache, 1785 .tpg_check_demo_mode_write_protect = 1786 tcm_qla2xxx_check_demo_write_protect, 1787 .tpg_check_prod_mode_write_protect = 1788 tcm_qla2xxx_check_prod_write_protect, 1789 .tpg_check_demo_mode_login_only = tcm_qla2xxx_check_demo_mode_login_only, 1790 .tpg_alloc_fabric_acl = tcm_qla2xxx_alloc_fabric_acl, 1791 .tpg_release_fabric_acl = tcm_qla2xxx_release_fabric_acl, 1792 .tpg_get_inst_index = tcm_qla2xxx_tpg_get_inst_index, 1793 .check_stop_free = tcm_qla2xxx_check_stop_free, 1794 .release_cmd = tcm_qla2xxx_release_cmd, 1795 .put_session = tcm_qla2xxx_put_session, 1796 .shutdown_session = tcm_qla2xxx_shutdown_session, 1797 .close_session = tcm_qla2xxx_close_session, 1798 .sess_get_index = tcm_qla2xxx_sess_get_index, 1799 .sess_get_initiator_sid = NULL, 1800 .write_pending = tcm_qla2xxx_write_pending, 1801 .write_pending_status = tcm_qla2xxx_write_pending_status, 1802 .set_default_node_attributes = tcm_qla2xxx_set_default_node_attrs, 1803 .get_task_tag = tcm_qla2xxx_get_task_tag, 1804 .get_cmd_state = tcm_qla2xxx_get_cmd_state, 1805 .queue_data_in = tcm_qla2xxx_queue_data_in, 1806 .queue_status = tcm_qla2xxx_queue_status, 1807 .queue_tm_rsp = tcm_qla2xxx_queue_tm_rsp, 1808 /* 1809 * Setup function pointers for generic logic in 1810 * target_core_fabric_configfs.c 1811 */ 1812 .fabric_make_wwn = tcm_qla2xxx_make_lport, 1813 .fabric_drop_wwn = tcm_qla2xxx_drop_lport, 1814 .fabric_make_tpg = tcm_qla2xxx_make_tpg, 1815 .fabric_drop_tpg = tcm_qla2xxx_drop_tpg, 1816 .fabric_post_link = NULL, 1817 .fabric_pre_unlink = NULL, 1818 .fabric_make_np = NULL, 1819 .fabric_drop_np = NULL, 1820 .fabric_make_nodeacl = tcm_qla2xxx_make_nodeacl, 1821 .fabric_drop_nodeacl = tcm_qla2xxx_drop_nodeacl, 1822 }; 1823 1824 static struct target_core_fabric_ops tcm_qla2xxx_npiv_ops = { 1825 .get_fabric_name = tcm_qla2xxx_npiv_get_fabric_name, 1826 .get_fabric_proto_ident = tcm_qla2xxx_get_fabric_proto_ident, 1827 .tpg_get_wwn = tcm_qla2xxx_npiv_get_fabric_wwn, 1828 .tpg_get_tag = tcm_qla2xxx_get_tag, 1829 .tpg_get_default_depth = tcm_qla2xxx_get_default_depth, 1830 .tpg_get_pr_transport_id = tcm_qla2xxx_get_pr_transport_id, 1831 .tpg_get_pr_transport_id_len = tcm_qla2xxx_get_pr_transport_id_len, 1832 .tpg_parse_pr_out_transport_id = tcm_qla2xxx_parse_pr_out_transport_id, 1833 .tpg_check_demo_mode = tcm_qla2xxx_check_demo_mode, 1834 .tpg_check_demo_mode_cache = tcm_qla2xxx_check_demo_mode_cache, 1835 .tpg_check_demo_mode_write_protect = tcm_qla2xxx_check_demo_mode, 1836 .tpg_check_prod_mode_write_protect = 1837 tcm_qla2xxx_check_prod_write_protect, 1838 .tpg_check_demo_mode_login_only = tcm_qla2xxx_check_demo_mode_login_only, 1839 .tpg_alloc_fabric_acl = tcm_qla2xxx_alloc_fabric_acl, 1840 .tpg_release_fabric_acl = tcm_qla2xxx_release_fabric_acl, 1841 .tpg_get_inst_index = tcm_qla2xxx_tpg_get_inst_index, 1842 .check_stop_free = tcm_qla2xxx_check_stop_free, 1843 .release_cmd = tcm_qla2xxx_release_cmd, 1844 .put_session = tcm_qla2xxx_put_session, 1845 .shutdown_session = tcm_qla2xxx_shutdown_session, 1846 .close_session = tcm_qla2xxx_close_session, 1847 .sess_get_index = tcm_qla2xxx_sess_get_index, 1848 .sess_get_initiator_sid = NULL, 1849 .write_pending = tcm_qla2xxx_write_pending, 1850 .write_pending_status = tcm_qla2xxx_write_pending_status, 1851 .set_default_node_attributes = tcm_qla2xxx_set_default_node_attrs, 1852 .get_task_tag = tcm_qla2xxx_get_task_tag, 1853 .get_cmd_state = tcm_qla2xxx_get_cmd_state, 1854 .queue_data_in = tcm_qla2xxx_queue_data_in, 1855 .queue_status = tcm_qla2xxx_queue_status, 1856 .queue_tm_rsp = tcm_qla2xxx_queue_tm_rsp, 1857 /* 1858 * Setup function pointers for generic logic in 1859 * target_core_fabric_configfs.c 1860 */ 1861 .fabric_make_wwn = tcm_qla2xxx_npiv_make_lport, 1862 .fabric_drop_wwn = tcm_qla2xxx_npiv_drop_lport, 1863 .fabric_make_tpg = tcm_qla2xxx_npiv_make_tpg, 1864 .fabric_drop_tpg = tcm_qla2xxx_drop_tpg, 1865 .fabric_post_link = NULL, 1866 .fabric_pre_unlink = NULL, 1867 .fabric_make_np = NULL, 1868 .fabric_drop_np = NULL, 1869 .fabric_make_nodeacl = tcm_qla2xxx_make_nodeacl, 1870 .fabric_drop_nodeacl = tcm_qla2xxx_drop_nodeacl, 1871 }; 1872 1873 static int tcm_qla2xxx_register_configfs(void) 1874 { 1875 struct target_fabric_configfs *fabric, *npiv_fabric; 1876 int ret; 1877 1878 pr_debug("TCM QLOGIC QLA2XXX fabric module %s on %s/%s on " 1879 UTS_RELEASE"\n", TCM_QLA2XXX_VERSION, utsname()->sysname, 1880 utsname()->machine); 1881 /* 1882 * Register the top level struct config_item_type with TCM core 1883 */ 1884 fabric = target_fabric_configfs_init(THIS_MODULE, "qla2xxx"); 1885 if (IS_ERR(fabric)) { 1886 pr_err("target_fabric_configfs_init() failed\n"); 1887 return PTR_ERR(fabric); 1888 } 1889 /* 1890 * Setup fabric->tf_ops from our local tcm_qla2xxx_ops 1891 */ 1892 fabric->tf_ops = tcm_qla2xxx_ops; 1893 /* 1894 * Setup default attribute lists for various fabric->tf_cit_tmpl 1895 */ 1896 fabric->tf_cit_tmpl.tfc_wwn_cit.ct_attrs = tcm_qla2xxx_wwn_attrs; 1897 fabric->tf_cit_tmpl.tfc_tpg_base_cit.ct_attrs = tcm_qla2xxx_tpg_attrs; 1898 fabric->tf_cit_tmpl.tfc_tpg_attrib_cit.ct_attrs = 1899 tcm_qla2xxx_tpg_attrib_attrs; 1900 fabric->tf_cit_tmpl.tfc_tpg_param_cit.ct_attrs = NULL; 1901 fabric->tf_cit_tmpl.tfc_tpg_np_base_cit.ct_attrs = NULL; 1902 fabric->tf_cit_tmpl.tfc_tpg_nacl_base_cit.ct_attrs = NULL; 1903 fabric->tf_cit_tmpl.tfc_tpg_nacl_attrib_cit.ct_attrs = NULL; 1904 fabric->tf_cit_tmpl.tfc_tpg_nacl_auth_cit.ct_attrs = NULL; 1905 fabric->tf_cit_tmpl.tfc_tpg_nacl_param_cit.ct_attrs = NULL; 1906 /* 1907 * Register the fabric for use within TCM 1908 */ 1909 ret = target_fabric_configfs_register(fabric); 1910 if (ret < 0) { 1911 pr_err("target_fabric_configfs_register() failed for TCM_QLA2XXX\n"); 1912 return ret; 1913 } 1914 /* 1915 * Setup our local pointer to *fabric 1916 */ 1917 tcm_qla2xxx_fabric_configfs = fabric; 1918 pr_debug("TCM_QLA2XXX[0] - Set fabric -> tcm_qla2xxx_fabric_configfs\n"); 1919 1920 /* 1921 * Register the top level struct config_item_type for NPIV with TCM core 1922 */ 1923 npiv_fabric = target_fabric_configfs_init(THIS_MODULE, "qla2xxx_npiv"); 1924 if (IS_ERR(npiv_fabric)) { 1925 pr_err("target_fabric_configfs_init() failed\n"); 1926 ret = PTR_ERR(npiv_fabric); 1927 goto out_fabric; 1928 } 1929 /* 1930 * Setup fabric->tf_ops from our local tcm_qla2xxx_npiv_ops 1931 */ 1932 npiv_fabric->tf_ops = tcm_qla2xxx_npiv_ops; 1933 /* 1934 * Setup default attribute lists for various npiv_fabric->tf_cit_tmpl 1935 */ 1936 npiv_fabric->tf_cit_tmpl.tfc_wwn_cit.ct_attrs = tcm_qla2xxx_wwn_attrs; 1937 npiv_fabric->tf_cit_tmpl.tfc_tpg_base_cit.ct_attrs = 1938 tcm_qla2xxx_tpg_attrs; 1939 npiv_fabric->tf_cit_tmpl.tfc_tpg_attrib_cit.ct_attrs = NULL; 1940 npiv_fabric->tf_cit_tmpl.tfc_tpg_param_cit.ct_attrs = NULL; 1941 npiv_fabric->tf_cit_tmpl.tfc_tpg_np_base_cit.ct_attrs = NULL; 1942 npiv_fabric->tf_cit_tmpl.tfc_tpg_nacl_base_cit.ct_attrs = NULL; 1943 npiv_fabric->tf_cit_tmpl.tfc_tpg_nacl_attrib_cit.ct_attrs = NULL; 1944 npiv_fabric->tf_cit_tmpl.tfc_tpg_nacl_auth_cit.ct_attrs = NULL; 1945 npiv_fabric->tf_cit_tmpl.tfc_tpg_nacl_param_cit.ct_attrs = NULL; 1946 /* 1947 * Register the npiv_fabric for use within TCM 1948 */ 1949 ret = target_fabric_configfs_register(npiv_fabric); 1950 if (ret < 0) { 1951 pr_err("target_fabric_configfs_register() failed for TCM_QLA2XXX\n"); 1952 goto out_fabric; 1953 } 1954 /* 1955 * Setup our local pointer to *npiv_fabric 1956 */ 1957 tcm_qla2xxx_npiv_fabric_configfs = npiv_fabric; 1958 pr_debug("TCM_QLA2XXX[0] - Set fabric -> tcm_qla2xxx_npiv_fabric_configfs\n"); 1959 1960 tcm_qla2xxx_free_wq = alloc_workqueue("tcm_qla2xxx_free", 1961 WQ_MEM_RECLAIM, 0); 1962 if (!tcm_qla2xxx_free_wq) { 1963 ret = -ENOMEM; 1964 goto out_fabric_npiv; 1965 } 1966 1967 tcm_qla2xxx_cmd_wq = alloc_workqueue("tcm_qla2xxx_cmd", 0, 0); 1968 if (!tcm_qla2xxx_cmd_wq) { 1969 ret = -ENOMEM; 1970 goto out_free_wq; 1971 } 1972 1973 return 0; 1974 1975 out_free_wq: 1976 destroy_workqueue(tcm_qla2xxx_free_wq); 1977 out_fabric_npiv: 1978 target_fabric_configfs_deregister(tcm_qla2xxx_npiv_fabric_configfs); 1979 out_fabric: 1980 target_fabric_configfs_deregister(tcm_qla2xxx_fabric_configfs); 1981 return ret; 1982 } 1983 1984 static void tcm_qla2xxx_deregister_configfs(void) 1985 { 1986 destroy_workqueue(tcm_qla2xxx_cmd_wq); 1987 destroy_workqueue(tcm_qla2xxx_free_wq); 1988 1989 target_fabric_configfs_deregister(tcm_qla2xxx_fabric_configfs); 1990 tcm_qla2xxx_fabric_configfs = NULL; 1991 pr_debug("TCM_QLA2XXX[0] - Cleared tcm_qla2xxx_fabric_configfs\n"); 1992 1993 target_fabric_configfs_deregister(tcm_qla2xxx_npiv_fabric_configfs); 1994 tcm_qla2xxx_npiv_fabric_configfs = NULL; 1995 pr_debug("TCM_QLA2XXX[0] - Cleared tcm_qla2xxx_npiv_fabric_configfs\n"); 1996 } 1997 1998 static int __init tcm_qla2xxx_init(void) 1999 { 2000 int ret; 2001 2002 ret = tcm_qla2xxx_register_configfs(); 2003 if (ret < 0) 2004 return ret; 2005 2006 return 0; 2007 } 2008 2009 static void __exit tcm_qla2xxx_exit(void) 2010 { 2011 tcm_qla2xxx_deregister_configfs(); 2012 } 2013 2014 MODULE_DESCRIPTION("TCM QLA2XXX series NPIV enabled fabric driver"); 2015 MODULE_LICENSE("GPL"); 2016 module_init(tcm_qla2xxx_init); 2017 module_exit(tcm_qla2xxx_exit); 2018