1 /******************************************************************************* 2 * This file contains tcm implementation using v4 configfs fabric infrastructure 3 * for QLogic target mode HBAs 4 * 5 * (c) Copyright 2010-2013 Datera, Inc. 6 * 7 * Author: Nicholas A. Bellinger <nab@daterainc.com> 8 * 9 * tcm_qla2xxx_parse_wwn() and tcm_qla2xxx_format_wwn() contains code from 10 * the TCM_FC / Open-FCoE.org fabric module. 11 * 12 * Copyright (c) 2010 Cisco Systems, Inc 13 * 14 * This program is free software; you can redistribute it and/or modify 15 * it under the terms of the GNU General Public License as published by 16 * the Free Software Foundation; either version 2 of the License, or 17 * (at your option) any later version. 18 * 19 * This program is distributed in the hope that it will be useful, 20 * but WITHOUT ANY WARRANTY; without even the implied warranty of 21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 22 * GNU General Public License for more details. 23 ****************************************************************************/ 24 25 26 #include <linux/module.h> 27 #include <linux/moduleparam.h> 28 #include <generated/utsrelease.h> 29 #include <linux/utsname.h> 30 #include <linux/init.h> 31 #include <linux/list.h> 32 #include <linux/slab.h> 33 #include <linux/kthread.h> 34 #include <linux/types.h> 35 #include <linux/string.h> 36 #include <linux/configfs.h> 37 #include <linux/ctype.h> 38 #include <asm/unaligned.h> 39 #include <scsi/scsi.h> 40 #include <scsi/scsi_host.h> 41 #include <scsi/scsi_device.h> 42 #include <scsi/scsi_cmnd.h> 43 #include <target/target_core_base.h> 44 #include <target/target_core_fabric.h> 45 #include <target/target_core_fabric_configfs.h> 46 #include <target/target_core_configfs.h> 47 #include <target/configfs_macros.h> 48 49 #include "qla_def.h" 50 #include "qla_target.h" 51 #include "tcm_qla2xxx.h" 52 53 struct workqueue_struct *tcm_qla2xxx_free_wq; 54 struct workqueue_struct *tcm_qla2xxx_cmd_wq; 55 56 static int tcm_qla2xxx_check_true(struct se_portal_group *se_tpg) 57 { 58 return 1; 59 } 60 61 static int tcm_qla2xxx_check_false(struct se_portal_group *se_tpg) 62 { 63 return 0; 64 } 65 66 /* 67 * Parse WWN. 68 * If strict, we require lower-case hex and colon separators to be sure 69 * the name is the same as what would be generated by ft_format_wwn() 70 * so the name and wwn are mapped one-to-one. 71 */ 72 static ssize_t tcm_qla2xxx_parse_wwn(const char *name, u64 *wwn, int strict) 73 { 74 const char *cp; 75 char c; 76 u32 nibble; 77 u32 byte = 0; 78 u32 pos = 0; 79 u32 err; 80 81 *wwn = 0; 82 for (cp = name; cp < &name[TCM_QLA2XXX_NAMELEN - 1]; cp++) { 83 c = *cp; 84 if (c == '\n' && cp[1] == '\0') 85 continue; 86 if (strict && pos++ == 2 && byte++ < 7) { 87 pos = 0; 88 if (c == ':') 89 continue; 90 err = 1; 91 goto fail; 92 } 93 if (c == '\0') { 94 err = 2; 95 if (strict && byte != 8) 96 goto fail; 97 return cp - name; 98 } 99 err = 3; 100 if (isdigit(c)) 101 nibble = c - '0'; 102 else if (isxdigit(c) && (islower(c) || !strict)) 103 nibble = tolower(c) - 'a' + 10; 104 else 105 goto fail; 106 *wwn = (*wwn << 4) | nibble; 107 } 108 err = 4; 109 fail: 110 pr_debug("err %u len %zu pos %u byte %u\n", 111 err, cp - name, pos, byte); 112 return -1; 113 } 114 115 static ssize_t tcm_qla2xxx_format_wwn(char *buf, size_t len, u64 wwn) 116 { 117 u8 b[8]; 118 119 put_unaligned_be64(wwn, b); 120 return snprintf(buf, len, 121 "%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x", 122 b[0], b[1], b[2], b[3], b[4], b[5], b[6], b[7]); 123 } 124 125 static char *tcm_qla2xxx_get_fabric_name(void) 126 { 127 return "qla2xxx"; 128 } 129 130 /* 131 * From drivers/scsi/scsi_transport_fc.c:fc_parse_wwn 132 */ 133 static int tcm_qla2xxx_npiv_extract_wwn(const char *ns, u64 *nm) 134 { 135 unsigned int i, j; 136 u8 wwn[8]; 137 138 memset(wwn, 0, sizeof(wwn)); 139 140 /* Validate and store the new name */ 141 for (i = 0, j = 0; i < 16; i++) { 142 int value; 143 144 value = hex_to_bin(*ns++); 145 if (value >= 0) 146 j = (j << 4) | value; 147 else 148 return -EINVAL; 149 150 if (i % 2) { 151 wwn[i/2] = j & 0xff; 152 j = 0; 153 } 154 } 155 156 *nm = wwn_to_u64(wwn); 157 return 0; 158 } 159 160 /* 161 * This parsing logic follows drivers/scsi/scsi_transport_fc.c: 162 * store_fc_host_vport_create() 163 */ 164 static int tcm_qla2xxx_npiv_parse_wwn( 165 const char *name, 166 size_t count, 167 u64 *wwpn, 168 u64 *wwnn) 169 { 170 unsigned int cnt = count; 171 int rc; 172 173 *wwpn = 0; 174 *wwnn = 0; 175 176 /* count may include a LF at end of string */ 177 if (name[cnt-1] == '\n') 178 cnt--; 179 180 /* validate we have enough characters for WWPN */ 181 if ((cnt != (16+1+16)) || (name[16] != ':')) 182 return -EINVAL; 183 184 rc = tcm_qla2xxx_npiv_extract_wwn(&name[0], wwpn); 185 if (rc != 0) 186 return rc; 187 188 rc = tcm_qla2xxx_npiv_extract_wwn(&name[17], wwnn); 189 if (rc != 0) 190 return rc; 191 192 return 0; 193 } 194 195 static ssize_t tcm_qla2xxx_npiv_format_wwn(char *buf, size_t len, 196 u64 wwpn, u64 wwnn) 197 { 198 u8 b[8], b2[8]; 199 200 put_unaligned_be64(wwpn, b); 201 put_unaligned_be64(wwnn, b2); 202 return snprintf(buf, len, 203 "%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x," 204 "%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x", 205 b[0], b[1], b[2], b[3], b[4], b[5], b[6], b[7], 206 b2[0], b2[1], b2[2], b2[3], b2[4], b2[5], b2[6], b2[7]); 207 } 208 209 static char *tcm_qla2xxx_npiv_get_fabric_name(void) 210 { 211 return "qla2xxx_npiv"; 212 } 213 214 static u8 tcm_qla2xxx_get_fabric_proto_ident(struct se_portal_group *se_tpg) 215 { 216 struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg, 217 struct tcm_qla2xxx_tpg, se_tpg); 218 struct tcm_qla2xxx_lport *lport = tpg->lport; 219 u8 proto_id; 220 221 switch (lport->lport_proto_id) { 222 case SCSI_PROTOCOL_FCP: 223 default: 224 proto_id = fc_get_fabric_proto_ident(se_tpg); 225 break; 226 } 227 228 return proto_id; 229 } 230 231 static char *tcm_qla2xxx_get_fabric_wwn(struct se_portal_group *se_tpg) 232 { 233 struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg, 234 struct tcm_qla2xxx_tpg, se_tpg); 235 struct tcm_qla2xxx_lport *lport = tpg->lport; 236 237 return lport->lport_naa_name; 238 } 239 240 static char *tcm_qla2xxx_npiv_get_fabric_wwn(struct se_portal_group *se_tpg) 241 { 242 struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg, 243 struct tcm_qla2xxx_tpg, se_tpg); 244 struct tcm_qla2xxx_lport *lport = tpg->lport; 245 246 return &lport->lport_npiv_name[0]; 247 } 248 249 static u16 tcm_qla2xxx_get_tag(struct se_portal_group *se_tpg) 250 { 251 struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg, 252 struct tcm_qla2xxx_tpg, se_tpg); 253 return tpg->lport_tpgt; 254 } 255 256 static u32 tcm_qla2xxx_get_default_depth(struct se_portal_group *se_tpg) 257 { 258 return 1; 259 } 260 261 static u32 tcm_qla2xxx_get_pr_transport_id( 262 struct se_portal_group *se_tpg, 263 struct se_node_acl *se_nacl, 264 struct t10_pr_registration *pr_reg, 265 int *format_code, 266 unsigned char *buf) 267 { 268 struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg, 269 struct tcm_qla2xxx_tpg, se_tpg); 270 struct tcm_qla2xxx_lport *lport = tpg->lport; 271 int ret = 0; 272 273 switch (lport->lport_proto_id) { 274 case SCSI_PROTOCOL_FCP: 275 default: 276 ret = fc_get_pr_transport_id(se_tpg, se_nacl, pr_reg, 277 format_code, buf); 278 break; 279 } 280 281 return ret; 282 } 283 284 static u32 tcm_qla2xxx_get_pr_transport_id_len( 285 struct se_portal_group *se_tpg, 286 struct se_node_acl *se_nacl, 287 struct t10_pr_registration *pr_reg, 288 int *format_code) 289 { 290 struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg, 291 struct tcm_qla2xxx_tpg, se_tpg); 292 struct tcm_qla2xxx_lport *lport = tpg->lport; 293 int ret = 0; 294 295 switch (lport->lport_proto_id) { 296 case SCSI_PROTOCOL_FCP: 297 default: 298 ret = fc_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg, 299 format_code); 300 break; 301 } 302 303 return ret; 304 } 305 306 static char *tcm_qla2xxx_parse_pr_out_transport_id( 307 struct se_portal_group *se_tpg, 308 const char *buf, 309 u32 *out_tid_len, 310 char **port_nexus_ptr) 311 { 312 struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg, 313 struct tcm_qla2xxx_tpg, se_tpg); 314 struct tcm_qla2xxx_lport *lport = tpg->lport; 315 char *tid = NULL; 316 317 switch (lport->lport_proto_id) { 318 case SCSI_PROTOCOL_FCP: 319 default: 320 tid = fc_parse_pr_out_transport_id(se_tpg, buf, out_tid_len, 321 port_nexus_ptr); 322 break; 323 } 324 325 return tid; 326 } 327 328 static int tcm_qla2xxx_check_demo_mode(struct se_portal_group *se_tpg) 329 { 330 struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg, 331 struct tcm_qla2xxx_tpg, se_tpg); 332 333 return QLA_TPG_ATTRIB(tpg)->generate_node_acls; 334 } 335 336 static int tcm_qla2xxx_check_demo_mode_cache(struct se_portal_group *se_tpg) 337 { 338 struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg, 339 struct tcm_qla2xxx_tpg, se_tpg); 340 341 return QLA_TPG_ATTRIB(tpg)->cache_dynamic_acls; 342 } 343 344 static int tcm_qla2xxx_check_demo_write_protect(struct se_portal_group *se_tpg) 345 { 346 struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg, 347 struct tcm_qla2xxx_tpg, se_tpg); 348 349 return QLA_TPG_ATTRIB(tpg)->demo_mode_write_protect; 350 } 351 352 static int tcm_qla2xxx_check_prod_write_protect(struct se_portal_group *se_tpg) 353 { 354 struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg, 355 struct tcm_qla2xxx_tpg, se_tpg); 356 357 return QLA_TPG_ATTRIB(tpg)->prod_mode_write_protect; 358 } 359 360 static int tcm_qla2xxx_check_demo_mode_login_only(struct se_portal_group *se_tpg) 361 { 362 struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg, 363 struct tcm_qla2xxx_tpg, se_tpg); 364 365 return QLA_TPG_ATTRIB(tpg)->demo_mode_login_only; 366 } 367 368 static struct se_node_acl *tcm_qla2xxx_alloc_fabric_acl( 369 struct se_portal_group *se_tpg) 370 { 371 struct tcm_qla2xxx_nacl *nacl; 372 373 nacl = kzalloc(sizeof(struct tcm_qla2xxx_nacl), GFP_KERNEL); 374 if (!nacl) { 375 pr_err("Unable to allocate struct tcm_qla2xxx_nacl\n"); 376 return NULL; 377 } 378 379 return &nacl->se_node_acl; 380 } 381 382 static void tcm_qla2xxx_release_fabric_acl( 383 struct se_portal_group *se_tpg, 384 struct se_node_acl *se_nacl) 385 { 386 struct tcm_qla2xxx_nacl *nacl = container_of(se_nacl, 387 struct tcm_qla2xxx_nacl, se_node_acl); 388 kfree(nacl); 389 } 390 391 static u32 tcm_qla2xxx_tpg_get_inst_index(struct se_portal_group *se_tpg) 392 { 393 struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg, 394 struct tcm_qla2xxx_tpg, se_tpg); 395 396 return tpg->lport_tpgt; 397 } 398 399 static void tcm_qla2xxx_complete_mcmd(struct work_struct *work) 400 { 401 struct qla_tgt_mgmt_cmd *mcmd = container_of(work, 402 struct qla_tgt_mgmt_cmd, free_work); 403 404 transport_generic_free_cmd(&mcmd->se_cmd, 0); 405 } 406 407 /* 408 * Called from qla_target_template->free_mcmd(), and will call 409 * tcm_qla2xxx_release_cmd() via normal struct target_core_fabric_ops 410 * release callback. qla_hw_data->hardware_lock is expected to be held 411 */ 412 static void tcm_qla2xxx_free_mcmd(struct qla_tgt_mgmt_cmd *mcmd) 413 { 414 INIT_WORK(&mcmd->free_work, tcm_qla2xxx_complete_mcmd); 415 queue_work(tcm_qla2xxx_free_wq, &mcmd->free_work); 416 } 417 418 static void tcm_qla2xxx_complete_free(struct work_struct *work) 419 { 420 struct qla_tgt_cmd *cmd = container_of(work, struct qla_tgt_cmd, work); 421 422 transport_generic_free_cmd(&cmd->se_cmd, 0); 423 } 424 425 /* 426 * Called from qla_target_template->free_cmd(), and will call 427 * tcm_qla2xxx_release_cmd via normal struct target_core_fabric_ops 428 * release callback. qla_hw_data->hardware_lock is expected to be held 429 */ 430 static void tcm_qla2xxx_free_cmd(struct qla_tgt_cmd *cmd) 431 { 432 INIT_WORK(&cmd->work, tcm_qla2xxx_complete_free); 433 queue_work(tcm_qla2xxx_free_wq, &cmd->work); 434 } 435 436 /* 437 * Called from struct target_core_fabric_ops->check_stop_free() context 438 */ 439 static int tcm_qla2xxx_check_stop_free(struct se_cmd *se_cmd) 440 { 441 return target_put_sess_cmd(se_cmd->se_sess, se_cmd); 442 } 443 444 /* tcm_qla2xxx_release_cmd - Callback from TCM Core to release underlying 445 * fabric descriptor @se_cmd command to release 446 */ 447 static void tcm_qla2xxx_release_cmd(struct se_cmd *se_cmd) 448 { 449 struct qla_tgt_cmd *cmd; 450 451 if (se_cmd->se_cmd_flags & SCF_SCSI_TMR_CDB) { 452 struct qla_tgt_mgmt_cmd *mcmd = container_of(se_cmd, 453 struct qla_tgt_mgmt_cmd, se_cmd); 454 qlt_free_mcmd(mcmd); 455 return; 456 } 457 458 cmd = container_of(se_cmd, struct qla_tgt_cmd, se_cmd); 459 qlt_free_cmd(cmd); 460 } 461 462 static int tcm_qla2xxx_shutdown_session(struct se_session *se_sess) 463 { 464 struct qla_tgt_sess *sess = se_sess->fabric_sess_ptr; 465 struct scsi_qla_host *vha; 466 unsigned long flags; 467 468 BUG_ON(!sess); 469 vha = sess->vha; 470 471 spin_lock_irqsave(&vha->hw->hardware_lock, flags); 472 target_sess_cmd_list_set_waiting(se_sess); 473 spin_unlock_irqrestore(&vha->hw->hardware_lock, flags); 474 475 return 1; 476 } 477 478 static void tcm_qla2xxx_close_session(struct se_session *se_sess) 479 { 480 struct qla_tgt_sess *sess = se_sess->fabric_sess_ptr; 481 struct scsi_qla_host *vha; 482 unsigned long flags; 483 484 BUG_ON(!sess); 485 vha = sess->vha; 486 487 spin_lock_irqsave(&vha->hw->hardware_lock, flags); 488 qlt_unreg_sess(sess); 489 spin_unlock_irqrestore(&vha->hw->hardware_lock, flags); 490 } 491 492 static u32 tcm_qla2xxx_sess_get_index(struct se_session *se_sess) 493 { 494 return 0; 495 } 496 497 static int tcm_qla2xxx_write_pending(struct se_cmd *se_cmd) 498 { 499 struct qla_tgt_cmd *cmd = container_of(se_cmd, 500 struct qla_tgt_cmd, se_cmd); 501 502 cmd->bufflen = se_cmd->data_length; 503 cmd->dma_data_direction = target_reverse_dma_direction(se_cmd); 504 505 cmd->sg_cnt = se_cmd->t_data_nents; 506 cmd->sg = se_cmd->t_data_sg; 507 508 /* 509 * qla_target.c:qlt_rdy_to_xfer() will call pci_map_sg() to setup 510 * the SGL mappings into PCIe memory for incoming FCP WRITE data. 511 */ 512 return qlt_rdy_to_xfer(cmd); 513 } 514 515 static int tcm_qla2xxx_write_pending_status(struct se_cmd *se_cmd) 516 { 517 unsigned long flags; 518 /* 519 * Check for WRITE_PENDING status to determine if we need to wait for 520 * CTIO aborts to be posted via hardware in tcm_qla2xxx_handle_data(). 521 */ 522 spin_lock_irqsave(&se_cmd->t_state_lock, flags); 523 if (se_cmd->t_state == TRANSPORT_WRITE_PENDING || 524 se_cmd->t_state == TRANSPORT_COMPLETE_QF_WP) { 525 spin_unlock_irqrestore(&se_cmd->t_state_lock, flags); 526 wait_for_completion_timeout(&se_cmd->t_transport_stop_comp, 527 3000); 528 return 0; 529 } 530 spin_unlock_irqrestore(&se_cmd->t_state_lock, flags); 531 532 return 0; 533 } 534 535 static void tcm_qla2xxx_set_default_node_attrs(struct se_node_acl *nacl) 536 { 537 return; 538 } 539 540 static u32 tcm_qla2xxx_get_task_tag(struct se_cmd *se_cmd) 541 { 542 struct qla_tgt_cmd *cmd = container_of(se_cmd, 543 struct qla_tgt_cmd, se_cmd); 544 545 return cmd->tag; 546 } 547 548 static int tcm_qla2xxx_get_cmd_state(struct se_cmd *se_cmd) 549 { 550 return 0; 551 } 552 553 /* 554 * Called from process context in qla_target.c:qlt_do_work() code 555 */ 556 static int tcm_qla2xxx_handle_cmd(scsi_qla_host_t *vha, struct qla_tgt_cmd *cmd, 557 unsigned char *cdb, uint32_t data_length, int fcp_task_attr, 558 int data_dir, int bidi) 559 { 560 struct se_cmd *se_cmd = &cmd->se_cmd; 561 struct se_session *se_sess; 562 struct qla_tgt_sess *sess; 563 int flags = TARGET_SCF_ACK_KREF; 564 565 if (bidi) 566 flags |= TARGET_SCF_BIDI_OP; 567 568 sess = cmd->sess; 569 if (!sess) { 570 pr_err("Unable to locate struct qla_tgt_sess from qla_tgt_cmd\n"); 571 return -EINVAL; 572 } 573 574 se_sess = sess->se_sess; 575 if (!se_sess) { 576 pr_err("Unable to locate active struct se_session\n"); 577 return -EINVAL; 578 } 579 580 return target_submit_cmd(se_cmd, se_sess, cdb, &cmd->sense_buffer[0], 581 cmd->unpacked_lun, data_length, fcp_task_attr, 582 data_dir, flags); 583 } 584 585 static void tcm_qla2xxx_handle_data_work(struct work_struct *work) 586 { 587 struct qla_tgt_cmd *cmd = container_of(work, struct qla_tgt_cmd, work); 588 589 /* 590 * Ensure that the complete FCP WRITE payload has been received. 591 * Otherwise return an exception via CHECK_CONDITION status. 592 */ 593 if (!cmd->write_data_transferred) { 594 /* 595 * Check if se_cmd has already been aborted via LUN_RESET, and 596 * waiting upon completion in tcm_qla2xxx_write_pending_status() 597 */ 598 if (cmd->se_cmd.transport_state & CMD_T_ABORTED) { 599 complete(&cmd->se_cmd.t_transport_stop_comp); 600 return; 601 } 602 603 transport_generic_request_failure(&cmd->se_cmd, 604 TCM_CHECK_CONDITION_ABORT_CMD); 605 return; 606 } 607 608 return target_execute_cmd(&cmd->se_cmd); 609 } 610 611 /* 612 * Called from qla_target.c:qlt_do_ctio_completion() 613 */ 614 static void tcm_qla2xxx_handle_data(struct qla_tgt_cmd *cmd) 615 { 616 INIT_WORK(&cmd->work, tcm_qla2xxx_handle_data_work); 617 queue_work(tcm_qla2xxx_free_wq, &cmd->work); 618 } 619 620 /* 621 * Called from qla_target.c:qlt_issue_task_mgmt() 622 */ 623 static int tcm_qla2xxx_handle_tmr(struct qla_tgt_mgmt_cmd *mcmd, uint32_t lun, 624 uint8_t tmr_func, uint32_t tag) 625 { 626 struct qla_tgt_sess *sess = mcmd->sess; 627 struct se_cmd *se_cmd = &mcmd->se_cmd; 628 629 return target_submit_tmr(se_cmd, sess->se_sess, NULL, lun, mcmd, 630 tmr_func, GFP_ATOMIC, tag, TARGET_SCF_ACK_KREF); 631 } 632 633 static int tcm_qla2xxx_queue_data_in(struct se_cmd *se_cmd) 634 { 635 struct qla_tgt_cmd *cmd = container_of(se_cmd, 636 struct qla_tgt_cmd, se_cmd); 637 638 cmd->bufflen = se_cmd->data_length; 639 cmd->dma_data_direction = target_reverse_dma_direction(se_cmd); 640 cmd->aborted = (se_cmd->transport_state & CMD_T_ABORTED); 641 642 cmd->sg_cnt = se_cmd->t_data_nents; 643 cmd->sg = se_cmd->t_data_sg; 644 cmd->offset = 0; 645 646 /* 647 * Now queue completed DATA_IN the qla2xxx LLD and response ring 648 */ 649 return qlt_xmit_response(cmd, QLA_TGT_XMIT_DATA|QLA_TGT_XMIT_STATUS, 650 se_cmd->scsi_status); 651 } 652 653 static int tcm_qla2xxx_queue_status(struct se_cmd *se_cmd) 654 { 655 struct qla_tgt_cmd *cmd = container_of(se_cmd, 656 struct qla_tgt_cmd, se_cmd); 657 int xmit_type = QLA_TGT_XMIT_STATUS; 658 659 cmd->bufflen = se_cmd->data_length; 660 cmd->sg = NULL; 661 cmd->sg_cnt = 0; 662 cmd->offset = 0; 663 cmd->dma_data_direction = target_reverse_dma_direction(se_cmd); 664 cmd->aborted = (se_cmd->transport_state & CMD_T_ABORTED); 665 666 if (se_cmd->data_direction == DMA_FROM_DEVICE) { 667 /* 668 * For FCP_READ with CHECK_CONDITION status, clear cmd->bufflen 669 * for qla_tgt_xmit_response LLD code 670 */ 671 if (se_cmd->se_cmd_flags & SCF_OVERFLOW_BIT) { 672 se_cmd->se_cmd_flags &= ~SCF_OVERFLOW_BIT; 673 se_cmd->residual_count = 0; 674 } 675 se_cmd->se_cmd_flags |= SCF_UNDERFLOW_BIT; 676 se_cmd->residual_count += se_cmd->data_length; 677 678 cmd->bufflen = 0; 679 } 680 /* 681 * Now queue status response to qla2xxx LLD code and response ring 682 */ 683 return qlt_xmit_response(cmd, xmit_type, se_cmd->scsi_status); 684 } 685 686 static void tcm_qla2xxx_queue_tm_rsp(struct se_cmd *se_cmd) 687 { 688 struct se_tmr_req *se_tmr = se_cmd->se_tmr_req; 689 struct qla_tgt_mgmt_cmd *mcmd = container_of(se_cmd, 690 struct qla_tgt_mgmt_cmd, se_cmd); 691 692 pr_debug("queue_tm_rsp: mcmd: %p func: 0x%02x response: 0x%02x\n", 693 mcmd, se_tmr->function, se_tmr->response); 694 /* 695 * Do translation between TCM TM response codes and 696 * QLA2xxx FC TM response codes. 697 */ 698 switch (se_tmr->response) { 699 case TMR_FUNCTION_COMPLETE: 700 mcmd->fc_tm_rsp = FC_TM_SUCCESS; 701 break; 702 case TMR_TASK_DOES_NOT_EXIST: 703 mcmd->fc_tm_rsp = FC_TM_BAD_CMD; 704 break; 705 case TMR_FUNCTION_REJECTED: 706 mcmd->fc_tm_rsp = FC_TM_REJECT; 707 break; 708 case TMR_LUN_DOES_NOT_EXIST: 709 default: 710 mcmd->fc_tm_rsp = FC_TM_FAILED; 711 break; 712 } 713 /* 714 * Queue the TM response to QLA2xxx LLD to build a 715 * CTIO response packet. 716 */ 717 qlt_xmit_tm_rsp(mcmd); 718 } 719 720 /* Local pointer to allocated TCM configfs fabric module */ 721 struct target_fabric_configfs *tcm_qla2xxx_fabric_configfs; 722 struct target_fabric_configfs *tcm_qla2xxx_npiv_fabric_configfs; 723 724 static void tcm_qla2xxx_clear_sess_lookup(struct tcm_qla2xxx_lport *, 725 struct tcm_qla2xxx_nacl *, struct qla_tgt_sess *); 726 /* 727 * Expected to be called with struct qla_hw_data->hardware_lock held 728 */ 729 static void tcm_qla2xxx_clear_nacl_from_fcport_map(struct qla_tgt_sess *sess) 730 { 731 struct se_node_acl *se_nacl = sess->se_sess->se_node_acl; 732 struct se_portal_group *se_tpg = se_nacl->se_tpg; 733 struct se_wwn *se_wwn = se_tpg->se_tpg_wwn; 734 struct tcm_qla2xxx_lport *lport = container_of(se_wwn, 735 struct tcm_qla2xxx_lport, lport_wwn); 736 struct tcm_qla2xxx_nacl *nacl = container_of(se_nacl, 737 struct tcm_qla2xxx_nacl, se_node_acl); 738 void *node; 739 740 pr_debug("fc_rport domain: port_id 0x%06x\n", nacl->nport_id); 741 742 node = btree_remove32(&lport->lport_fcport_map, nacl->nport_id); 743 WARN_ON(node && (node != se_nacl)); 744 745 pr_debug("Removed from fcport_map: %p for WWNN: 0x%016LX, port_id: 0x%06x\n", 746 se_nacl, nacl->nport_wwnn, nacl->nport_id); 747 /* 748 * Now clear the se_nacl and session pointers from our HW lport lookup 749 * table mapping for this initiator's fabric S_ID and LOOP_ID entries. 750 * 751 * This is done ahead of callbacks into tcm_qla2xxx_free_session() -> 752 * target_wait_for_sess_cmds() before the session waits for outstanding 753 * I/O to complete, to avoid a race between session shutdown execution 754 * and incoming ATIOs or TMRs picking up a stale se_node_act reference. 755 */ 756 tcm_qla2xxx_clear_sess_lookup(lport, nacl, sess); 757 } 758 759 static void tcm_qla2xxx_release_session(struct kref *kref) 760 { 761 struct se_session *se_sess = container_of(kref, 762 struct se_session, sess_kref); 763 764 qlt_unreg_sess(se_sess->fabric_sess_ptr); 765 } 766 767 static void tcm_qla2xxx_put_session(struct se_session *se_sess) 768 { 769 struct qla_tgt_sess *sess = se_sess->fabric_sess_ptr; 770 struct qla_hw_data *ha = sess->vha->hw; 771 unsigned long flags; 772 773 spin_lock_irqsave(&ha->hardware_lock, flags); 774 kref_put(&se_sess->sess_kref, tcm_qla2xxx_release_session); 775 spin_unlock_irqrestore(&ha->hardware_lock, flags); 776 } 777 778 static void tcm_qla2xxx_put_sess(struct qla_tgt_sess *sess) 779 { 780 assert_spin_locked(&sess->vha->hw->hardware_lock); 781 kref_put(&sess->se_sess->sess_kref, tcm_qla2xxx_release_session); 782 } 783 784 static void tcm_qla2xxx_shutdown_sess(struct qla_tgt_sess *sess) 785 { 786 assert_spin_locked(&sess->vha->hw->hardware_lock); 787 target_sess_cmd_list_set_waiting(sess->se_sess); 788 } 789 790 static struct se_node_acl *tcm_qla2xxx_make_nodeacl( 791 struct se_portal_group *se_tpg, 792 struct config_group *group, 793 const char *name) 794 { 795 struct se_node_acl *se_nacl, *se_nacl_new; 796 struct tcm_qla2xxx_nacl *nacl; 797 u64 wwnn; 798 u32 qla2xxx_nexus_depth; 799 800 if (tcm_qla2xxx_parse_wwn(name, &wwnn, 1) < 0) 801 return ERR_PTR(-EINVAL); 802 803 se_nacl_new = tcm_qla2xxx_alloc_fabric_acl(se_tpg); 804 if (!se_nacl_new) 805 return ERR_PTR(-ENOMEM); 806 /* #warning FIXME: Hardcoded qla2xxx_nexus depth in tcm_qla2xxx_make_nodeacl */ 807 qla2xxx_nexus_depth = 1; 808 809 /* 810 * se_nacl_new may be released by core_tpg_add_initiator_node_acl() 811 * when converting a NodeACL from demo mode -> explict 812 */ 813 se_nacl = core_tpg_add_initiator_node_acl(se_tpg, se_nacl_new, 814 name, qla2xxx_nexus_depth); 815 if (IS_ERR(se_nacl)) { 816 tcm_qla2xxx_release_fabric_acl(se_tpg, se_nacl_new); 817 return se_nacl; 818 } 819 /* 820 * Locate our struct tcm_qla2xxx_nacl and set the FC Nport WWPN 821 */ 822 nacl = container_of(se_nacl, struct tcm_qla2xxx_nacl, se_node_acl); 823 nacl->nport_wwnn = wwnn; 824 tcm_qla2xxx_format_wwn(&nacl->nport_name[0], TCM_QLA2XXX_NAMELEN, wwnn); 825 826 return se_nacl; 827 } 828 829 static void tcm_qla2xxx_drop_nodeacl(struct se_node_acl *se_acl) 830 { 831 struct se_portal_group *se_tpg = se_acl->se_tpg; 832 struct tcm_qla2xxx_nacl *nacl = container_of(se_acl, 833 struct tcm_qla2xxx_nacl, se_node_acl); 834 835 core_tpg_del_initiator_node_acl(se_tpg, se_acl, 1); 836 kfree(nacl); 837 } 838 839 /* Start items for tcm_qla2xxx_tpg_attrib_cit */ 840 841 #define DEF_QLA_TPG_ATTRIB(name) \ 842 \ 843 static ssize_t tcm_qla2xxx_tpg_attrib_show_##name( \ 844 struct se_portal_group *se_tpg, \ 845 char *page) \ 846 { \ 847 struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg, \ 848 struct tcm_qla2xxx_tpg, se_tpg); \ 849 \ 850 return sprintf(page, "%u\n", QLA_TPG_ATTRIB(tpg)->name); \ 851 } \ 852 \ 853 static ssize_t tcm_qla2xxx_tpg_attrib_store_##name( \ 854 struct se_portal_group *se_tpg, \ 855 const char *page, \ 856 size_t count) \ 857 { \ 858 struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg, \ 859 struct tcm_qla2xxx_tpg, se_tpg); \ 860 unsigned long val; \ 861 int ret; \ 862 \ 863 ret = kstrtoul(page, 0, &val); \ 864 if (ret < 0) { \ 865 pr_err("kstrtoul() failed with" \ 866 " ret: %d\n", ret); \ 867 return -EINVAL; \ 868 } \ 869 ret = tcm_qla2xxx_set_attrib_##name(tpg, val); \ 870 \ 871 return (!ret) ? count : -EINVAL; \ 872 } 873 874 #define DEF_QLA_TPG_ATTR_BOOL(_name) \ 875 \ 876 static int tcm_qla2xxx_set_attrib_##_name( \ 877 struct tcm_qla2xxx_tpg *tpg, \ 878 unsigned long val) \ 879 { \ 880 struct tcm_qla2xxx_tpg_attrib *a = &tpg->tpg_attrib; \ 881 \ 882 if ((val != 0) && (val != 1)) { \ 883 pr_err("Illegal boolean value %lu\n", val); \ 884 return -EINVAL; \ 885 } \ 886 \ 887 a->_name = val; \ 888 return 0; \ 889 } 890 891 #define QLA_TPG_ATTR(_name, _mode) \ 892 TF_TPG_ATTRIB_ATTR(tcm_qla2xxx, _name, _mode); 893 894 /* 895 * Define tcm_qla2xxx_tpg_attrib_s_generate_node_acls 896 */ 897 DEF_QLA_TPG_ATTR_BOOL(generate_node_acls); 898 DEF_QLA_TPG_ATTRIB(generate_node_acls); 899 QLA_TPG_ATTR(generate_node_acls, S_IRUGO | S_IWUSR); 900 901 /* 902 Define tcm_qla2xxx_attrib_s_cache_dynamic_acls 903 */ 904 DEF_QLA_TPG_ATTR_BOOL(cache_dynamic_acls); 905 DEF_QLA_TPG_ATTRIB(cache_dynamic_acls); 906 QLA_TPG_ATTR(cache_dynamic_acls, S_IRUGO | S_IWUSR); 907 908 /* 909 * Define tcm_qla2xxx_tpg_attrib_s_demo_mode_write_protect 910 */ 911 DEF_QLA_TPG_ATTR_BOOL(demo_mode_write_protect); 912 DEF_QLA_TPG_ATTRIB(demo_mode_write_protect); 913 QLA_TPG_ATTR(demo_mode_write_protect, S_IRUGO | S_IWUSR); 914 915 /* 916 * Define tcm_qla2xxx_tpg_attrib_s_prod_mode_write_protect 917 */ 918 DEF_QLA_TPG_ATTR_BOOL(prod_mode_write_protect); 919 DEF_QLA_TPG_ATTRIB(prod_mode_write_protect); 920 QLA_TPG_ATTR(prod_mode_write_protect, S_IRUGO | S_IWUSR); 921 922 /* 923 * Define tcm_qla2xxx_tpg_attrib_s_demo_mode_login_only 924 */ 925 DEF_QLA_TPG_ATTR_BOOL(demo_mode_login_only); 926 DEF_QLA_TPG_ATTRIB(demo_mode_login_only); 927 QLA_TPG_ATTR(demo_mode_login_only, S_IRUGO | S_IWUSR); 928 929 static struct configfs_attribute *tcm_qla2xxx_tpg_attrib_attrs[] = { 930 &tcm_qla2xxx_tpg_attrib_generate_node_acls.attr, 931 &tcm_qla2xxx_tpg_attrib_cache_dynamic_acls.attr, 932 &tcm_qla2xxx_tpg_attrib_demo_mode_write_protect.attr, 933 &tcm_qla2xxx_tpg_attrib_prod_mode_write_protect.attr, 934 &tcm_qla2xxx_tpg_attrib_demo_mode_login_only.attr, 935 NULL, 936 }; 937 938 /* End items for tcm_qla2xxx_tpg_attrib_cit */ 939 940 static ssize_t tcm_qla2xxx_tpg_show_enable( 941 struct se_portal_group *se_tpg, 942 char *page) 943 { 944 struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg, 945 struct tcm_qla2xxx_tpg, se_tpg); 946 947 return snprintf(page, PAGE_SIZE, "%d\n", 948 atomic_read(&tpg->lport_tpg_enabled)); 949 } 950 951 static ssize_t tcm_qla2xxx_tpg_store_enable( 952 struct se_portal_group *se_tpg, 953 const char *page, 954 size_t count) 955 { 956 struct se_wwn *se_wwn = se_tpg->se_tpg_wwn; 957 struct tcm_qla2xxx_lport *lport = container_of(se_wwn, 958 struct tcm_qla2xxx_lport, lport_wwn); 959 struct scsi_qla_host *vha = lport->qla_vha; 960 struct qla_hw_data *ha = vha->hw; 961 struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg, 962 struct tcm_qla2xxx_tpg, se_tpg); 963 unsigned long op; 964 int rc; 965 966 rc = kstrtoul(page, 0, &op); 967 if (rc < 0) { 968 pr_err("kstrtoul() returned %d\n", rc); 969 return -EINVAL; 970 } 971 if ((op != 1) && (op != 0)) { 972 pr_err("Illegal value for tpg_enable: %lu\n", op); 973 return -EINVAL; 974 } 975 976 if (op) { 977 atomic_set(&tpg->lport_tpg_enabled, 1); 978 qlt_enable_vha(vha); 979 } else { 980 if (!ha->tgt.qla_tgt) { 981 pr_err("truct qla_hw_data *ha->tgt.qla_tgt is NULL\n"); 982 return -ENODEV; 983 } 984 atomic_set(&tpg->lport_tpg_enabled, 0); 985 qlt_stop_phase1(ha->tgt.qla_tgt); 986 } 987 988 return count; 989 } 990 991 TF_TPG_BASE_ATTR(tcm_qla2xxx, enable, S_IRUGO | S_IWUSR); 992 993 static struct configfs_attribute *tcm_qla2xxx_tpg_attrs[] = { 994 &tcm_qla2xxx_tpg_enable.attr, 995 NULL, 996 }; 997 998 static struct se_portal_group *tcm_qla2xxx_make_tpg( 999 struct se_wwn *wwn, 1000 struct config_group *group, 1001 const char *name) 1002 { 1003 struct tcm_qla2xxx_lport *lport = container_of(wwn, 1004 struct tcm_qla2xxx_lport, lport_wwn); 1005 struct tcm_qla2xxx_tpg *tpg; 1006 unsigned long tpgt; 1007 int ret; 1008 1009 if (strstr(name, "tpgt_") != name) 1010 return ERR_PTR(-EINVAL); 1011 if (kstrtoul(name + 5, 10, &tpgt) || tpgt > USHRT_MAX) 1012 return ERR_PTR(-EINVAL); 1013 1014 if (!lport->qla_npiv_vp && (tpgt != 1)) { 1015 pr_err("In non NPIV mode, a single TPG=1 is used for HW port mappings\n"); 1016 return ERR_PTR(-ENOSYS); 1017 } 1018 1019 tpg = kzalloc(sizeof(struct tcm_qla2xxx_tpg), GFP_KERNEL); 1020 if (!tpg) { 1021 pr_err("Unable to allocate struct tcm_qla2xxx_tpg\n"); 1022 return ERR_PTR(-ENOMEM); 1023 } 1024 tpg->lport = lport; 1025 tpg->lport_tpgt = tpgt; 1026 /* 1027 * By default allow READ-ONLY TPG demo-mode access w/ cached dynamic 1028 * NodeACLs 1029 */ 1030 QLA_TPG_ATTRIB(tpg)->generate_node_acls = 1; 1031 QLA_TPG_ATTRIB(tpg)->demo_mode_write_protect = 1; 1032 QLA_TPG_ATTRIB(tpg)->cache_dynamic_acls = 1; 1033 QLA_TPG_ATTRIB(tpg)->demo_mode_login_only = 1; 1034 1035 ret = core_tpg_register(&tcm_qla2xxx_fabric_configfs->tf_ops, wwn, 1036 &tpg->se_tpg, tpg, TRANSPORT_TPG_TYPE_NORMAL); 1037 if (ret < 0) { 1038 kfree(tpg); 1039 return NULL; 1040 } 1041 /* 1042 * Setup local TPG=1 pointer for non NPIV mode. 1043 */ 1044 if (lport->qla_npiv_vp == NULL) 1045 lport->tpg_1 = tpg; 1046 1047 return &tpg->se_tpg; 1048 } 1049 1050 static void tcm_qla2xxx_drop_tpg(struct se_portal_group *se_tpg) 1051 { 1052 struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg, 1053 struct tcm_qla2xxx_tpg, se_tpg); 1054 struct tcm_qla2xxx_lport *lport = tpg->lport; 1055 struct scsi_qla_host *vha = lport->qla_vha; 1056 struct qla_hw_data *ha = vha->hw; 1057 /* 1058 * Call into qla2x_target.c LLD logic to shutdown the active 1059 * FC Nexuses and disable target mode operation for this qla_hw_data 1060 */ 1061 if (ha->tgt.qla_tgt && !ha->tgt.qla_tgt->tgt_stop) 1062 qlt_stop_phase1(ha->tgt.qla_tgt); 1063 1064 core_tpg_deregister(se_tpg); 1065 /* 1066 * Clear local TPG=1 pointer for non NPIV mode. 1067 */ 1068 if (lport->qla_npiv_vp == NULL) 1069 lport->tpg_1 = NULL; 1070 1071 kfree(tpg); 1072 } 1073 1074 static struct se_portal_group *tcm_qla2xxx_npiv_make_tpg( 1075 struct se_wwn *wwn, 1076 struct config_group *group, 1077 const char *name) 1078 { 1079 struct tcm_qla2xxx_lport *lport = container_of(wwn, 1080 struct tcm_qla2xxx_lport, lport_wwn); 1081 struct tcm_qla2xxx_tpg *tpg; 1082 unsigned long tpgt; 1083 int ret; 1084 1085 if (strstr(name, "tpgt_") != name) 1086 return ERR_PTR(-EINVAL); 1087 if (kstrtoul(name + 5, 10, &tpgt) || tpgt > USHRT_MAX) 1088 return ERR_PTR(-EINVAL); 1089 1090 tpg = kzalloc(sizeof(struct tcm_qla2xxx_tpg), GFP_KERNEL); 1091 if (!tpg) { 1092 pr_err("Unable to allocate struct tcm_qla2xxx_tpg\n"); 1093 return ERR_PTR(-ENOMEM); 1094 } 1095 tpg->lport = lport; 1096 tpg->lport_tpgt = tpgt; 1097 1098 ret = core_tpg_register(&tcm_qla2xxx_npiv_fabric_configfs->tf_ops, wwn, 1099 &tpg->se_tpg, tpg, TRANSPORT_TPG_TYPE_NORMAL); 1100 if (ret < 0) { 1101 kfree(tpg); 1102 return NULL; 1103 } 1104 return &tpg->se_tpg; 1105 } 1106 1107 /* 1108 * Expected to be called with struct qla_hw_data->hardware_lock held 1109 */ 1110 static struct qla_tgt_sess *tcm_qla2xxx_find_sess_by_s_id( 1111 scsi_qla_host_t *vha, 1112 const uint8_t *s_id) 1113 { 1114 struct qla_hw_data *ha = vha->hw; 1115 struct tcm_qla2xxx_lport *lport; 1116 struct se_node_acl *se_nacl; 1117 struct tcm_qla2xxx_nacl *nacl; 1118 u32 key; 1119 1120 lport = ha->tgt.target_lport_ptr; 1121 if (!lport) { 1122 pr_err("Unable to locate struct tcm_qla2xxx_lport\n"); 1123 dump_stack(); 1124 return NULL; 1125 } 1126 1127 key = (((unsigned long)s_id[0] << 16) | 1128 ((unsigned long)s_id[1] << 8) | 1129 (unsigned long)s_id[2]); 1130 pr_debug("find_sess_by_s_id: 0x%06x\n", key); 1131 1132 se_nacl = btree_lookup32(&lport->lport_fcport_map, key); 1133 if (!se_nacl) { 1134 pr_debug("Unable to locate s_id: 0x%06x\n", key); 1135 return NULL; 1136 } 1137 pr_debug("find_sess_by_s_id: located se_nacl: %p, initiatorname: %s\n", 1138 se_nacl, se_nacl->initiatorname); 1139 1140 nacl = container_of(se_nacl, struct tcm_qla2xxx_nacl, se_node_acl); 1141 if (!nacl->qla_tgt_sess) { 1142 pr_err("Unable to locate struct qla_tgt_sess\n"); 1143 return NULL; 1144 } 1145 1146 return nacl->qla_tgt_sess; 1147 } 1148 1149 /* 1150 * Expected to be called with struct qla_hw_data->hardware_lock held 1151 */ 1152 static void tcm_qla2xxx_set_sess_by_s_id( 1153 struct tcm_qla2xxx_lport *lport, 1154 struct se_node_acl *new_se_nacl, 1155 struct tcm_qla2xxx_nacl *nacl, 1156 struct se_session *se_sess, 1157 struct qla_tgt_sess *qla_tgt_sess, 1158 uint8_t *s_id) 1159 { 1160 u32 key; 1161 void *slot; 1162 int rc; 1163 1164 key = (((unsigned long)s_id[0] << 16) | 1165 ((unsigned long)s_id[1] << 8) | 1166 (unsigned long)s_id[2]); 1167 pr_debug("set_sess_by_s_id: %06x\n", key); 1168 1169 slot = btree_lookup32(&lport->lport_fcport_map, key); 1170 if (!slot) { 1171 if (new_se_nacl) { 1172 pr_debug("Setting up new fc_port entry to new_se_nacl\n"); 1173 nacl->nport_id = key; 1174 rc = btree_insert32(&lport->lport_fcport_map, key, 1175 new_se_nacl, GFP_ATOMIC); 1176 if (rc) 1177 printk(KERN_ERR "Unable to insert s_id into fcport_map: %06x\n", 1178 (int)key); 1179 } else { 1180 pr_debug("Wiping nonexisting fc_port entry\n"); 1181 } 1182 1183 qla_tgt_sess->se_sess = se_sess; 1184 nacl->qla_tgt_sess = qla_tgt_sess; 1185 return; 1186 } 1187 1188 if (nacl->qla_tgt_sess) { 1189 if (new_se_nacl == NULL) { 1190 pr_debug("Clearing existing nacl->qla_tgt_sess and fc_port entry\n"); 1191 btree_remove32(&lport->lport_fcport_map, key); 1192 nacl->qla_tgt_sess = NULL; 1193 return; 1194 } 1195 pr_debug("Replacing existing nacl->qla_tgt_sess and fc_port entry\n"); 1196 btree_update32(&lport->lport_fcport_map, key, new_se_nacl); 1197 qla_tgt_sess->se_sess = se_sess; 1198 nacl->qla_tgt_sess = qla_tgt_sess; 1199 return; 1200 } 1201 1202 if (new_se_nacl == NULL) { 1203 pr_debug("Clearing existing fc_port entry\n"); 1204 btree_remove32(&lport->lport_fcport_map, key); 1205 return; 1206 } 1207 1208 pr_debug("Replacing existing fc_port entry w/o active nacl->qla_tgt_sess\n"); 1209 btree_update32(&lport->lport_fcport_map, key, new_se_nacl); 1210 qla_tgt_sess->se_sess = se_sess; 1211 nacl->qla_tgt_sess = qla_tgt_sess; 1212 1213 pr_debug("Setup nacl->qla_tgt_sess %p by s_id for se_nacl: %p, initiatorname: %s\n", 1214 nacl->qla_tgt_sess, new_se_nacl, new_se_nacl->initiatorname); 1215 } 1216 1217 /* 1218 * Expected to be called with struct qla_hw_data->hardware_lock held 1219 */ 1220 static struct qla_tgt_sess *tcm_qla2xxx_find_sess_by_loop_id( 1221 scsi_qla_host_t *vha, 1222 const uint16_t loop_id) 1223 { 1224 struct qla_hw_data *ha = vha->hw; 1225 struct tcm_qla2xxx_lport *lport; 1226 struct se_node_acl *se_nacl; 1227 struct tcm_qla2xxx_nacl *nacl; 1228 struct tcm_qla2xxx_fc_loopid *fc_loopid; 1229 1230 lport = ha->tgt.target_lport_ptr; 1231 if (!lport) { 1232 pr_err("Unable to locate struct tcm_qla2xxx_lport\n"); 1233 dump_stack(); 1234 return NULL; 1235 } 1236 1237 pr_debug("find_sess_by_loop_id: Using loop_id: 0x%04x\n", loop_id); 1238 1239 fc_loopid = lport->lport_loopid_map + loop_id; 1240 se_nacl = fc_loopid->se_nacl; 1241 if (!se_nacl) { 1242 pr_debug("Unable to locate se_nacl by loop_id: 0x%04x\n", 1243 loop_id); 1244 return NULL; 1245 } 1246 1247 nacl = container_of(se_nacl, struct tcm_qla2xxx_nacl, se_node_acl); 1248 1249 if (!nacl->qla_tgt_sess) { 1250 pr_err("Unable to locate struct qla_tgt_sess\n"); 1251 return NULL; 1252 } 1253 1254 return nacl->qla_tgt_sess; 1255 } 1256 1257 /* 1258 * Expected to be called with struct qla_hw_data->hardware_lock held 1259 */ 1260 static void tcm_qla2xxx_set_sess_by_loop_id( 1261 struct tcm_qla2xxx_lport *lport, 1262 struct se_node_acl *new_se_nacl, 1263 struct tcm_qla2xxx_nacl *nacl, 1264 struct se_session *se_sess, 1265 struct qla_tgt_sess *qla_tgt_sess, 1266 uint16_t loop_id) 1267 { 1268 struct se_node_acl *saved_nacl; 1269 struct tcm_qla2xxx_fc_loopid *fc_loopid; 1270 1271 pr_debug("set_sess_by_loop_id: Using loop_id: 0x%04x\n", loop_id); 1272 1273 fc_loopid = &((struct tcm_qla2xxx_fc_loopid *) 1274 lport->lport_loopid_map)[loop_id]; 1275 1276 saved_nacl = fc_loopid->se_nacl; 1277 if (!saved_nacl) { 1278 pr_debug("Setting up new fc_loopid->se_nacl to new_se_nacl\n"); 1279 fc_loopid->se_nacl = new_se_nacl; 1280 if (qla_tgt_sess->se_sess != se_sess) 1281 qla_tgt_sess->se_sess = se_sess; 1282 if (nacl->qla_tgt_sess != qla_tgt_sess) 1283 nacl->qla_tgt_sess = qla_tgt_sess; 1284 return; 1285 } 1286 1287 if (nacl->qla_tgt_sess) { 1288 if (new_se_nacl == NULL) { 1289 pr_debug("Clearing nacl->qla_tgt_sess and fc_loopid->se_nacl\n"); 1290 fc_loopid->se_nacl = NULL; 1291 nacl->qla_tgt_sess = NULL; 1292 return; 1293 } 1294 1295 pr_debug("Replacing existing nacl->qla_tgt_sess and fc_loopid->se_nacl\n"); 1296 fc_loopid->se_nacl = new_se_nacl; 1297 if (qla_tgt_sess->se_sess != se_sess) 1298 qla_tgt_sess->se_sess = se_sess; 1299 if (nacl->qla_tgt_sess != qla_tgt_sess) 1300 nacl->qla_tgt_sess = qla_tgt_sess; 1301 return; 1302 } 1303 1304 if (new_se_nacl == NULL) { 1305 pr_debug("Clearing fc_loopid->se_nacl\n"); 1306 fc_loopid->se_nacl = NULL; 1307 return; 1308 } 1309 1310 pr_debug("Replacing existing fc_loopid->se_nacl w/o active nacl->qla_tgt_sess\n"); 1311 fc_loopid->se_nacl = new_se_nacl; 1312 if (qla_tgt_sess->se_sess != se_sess) 1313 qla_tgt_sess->se_sess = se_sess; 1314 if (nacl->qla_tgt_sess != qla_tgt_sess) 1315 nacl->qla_tgt_sess = qla_tgt_sess; 1316 1317 pr_debug("Setup nacl->qla_tgt_sess %p by loop_id for se_nacl: %p, initiatorname: %s\n", 1318 nacl->qla_tgt_sess, new_se_nacl, new_se_nacl->initiatorname); 1319 } 1320 1321 /* 1322 * Should always be called with qla_hw_data->hardware_lock held. 1323 */ 1324 static void tcm_qla2xxx_clear_sess_lookup(struct tcm_qla2xxx_lport *lport, 1325 struct tcm_qla2xxx_nacl *nacl, struct qla_tgt_sess *sess) 1326 { 1327 struct se_session *se_sess = sess->se_sess; 1328 unsigned char be_sid[3]; 1329 1330 be_sid[0] = sess->s_id.b.domain; 1331 be_sid[1] = sess->s_id.b.area; 1332 be_sid[2] = sess->s_id.b.al_pa; 1333 1334 tcm_qla2xxx_set_sess_by_s_id(lport, NULL, nacl, se_sess, 1335 sess, be_sid); 1336 tcm_qla2xxx_set_sess_by_loop_id(lport, NULL, nacl, se_sess, 1337 sess, sess->loop_id); 1338 } 1339 1340 static void tcm_qla2xxx_free_session(struct qla_tgt_sess *sess) 1341 { 1342 struct qla_tgt *tgt = sess->tgt; 1343 struct qla_hw_data *ha = tgt->ha; 1344 struct se_session *se_sess; 1345 struct se_node_acl *se_nacl; 1346 struct tcm_qla2xxx_lport *lport; 1347 struct tcm_qla2xxx_nacl *nacl; 1348 1349 BUG_ON(in_interrupt()); 1350 1351 se_sess = sess->se_sess; 1352 if (!se_sess) { 1353 pr_err("struct qla_tgt_sess->se_sess is NULL\n"); 1354 dump_stack(); 1355 return; 1356 } 1357 se_nacl = se_sess->se_node_acl; 1358 nacl = container_of(se_nacl, struct tcm_qla2xxx_nacl, se_node_acl); 1359 1360 lport = ha->tgt.target_lport_ptr; 1361 if (!lport) { 1362 pr_err("Unable to locate struct tcm_qla2xxx_lport\n"); 1363 dump_stack(); 1364 return; 1365 } 1366 target_wait_for_sess_cmds(se_sess); 1367 1368 transport_deregister_session_configfs(sess->se_sess); 1369 transport_deregister_session(sess->se_sess); 1370 } 1371 1372 /* 1373 * Called via qlt_create_sess():ha->qla2x_tmpl->check_initiator_node_acl() 1374 * to locate struct se_node_acl 1375 */ 1376 static int tcm_qla2xxx_check_initiator_node_acl( 1377 scsi_qla_host_t *vha, 1378 unsigned char *fc_wwpn, 1379 void *qla_tgt_sess, 1380 uint8_t *s_id, 1381 uint16_t loop_id) 1382 { 1383 struct qla_hw_data *ha = vha->hw; 1384 struct tcm_qla2xxx_lport *lport; 1385 struct tcm_qla2xxx_tpg *tpg; 1386 struct tcm_qla2xxx_nacl *nacl; 1387 struct se_portal_group *se_tpg; 1388 struct se_node_acl *se_nacl; 1389 struct se_session *se_sess; 1390 struct qla_tgt_sess *sess = qla_tgt_sess; 1391 unsigned char port_name[36]; 1392 unsigned long flags; 1393 1394 lport = ha->tgt.target_lport_ptr; 1395 if (!lport) { 1396 pr_err("Unable to locate struct tcm_qla2xxx_lport\n"); 1397 dump_stack(); 1398 return -EINVAL; 1399 } 1400 /* 1401 * Locate the TPG=1 reference.. 1402 */ 1403 tpg = lport->tpg_1; 1404 if (!tpg) { 1405 pr_err("Unable to lcoate struct tcm_qla2xxx_lport->tpg_1\n"); 1406 return -EINVAL; 1407 } 1408 se_tpg = &tpg->se_tpg; 1409 1410 se_sess = transport_init_session(); 1411 if (IS_ERR(se_sess)) { 1412 pr_err("Unable to initialize struct se_session\n"); 1413 return PTR_ERR(se_sess); 1414 } 1415 /* 1416 * Format the FCP Initiator port_name into colon seperated values to 1417 * match the format by tcm_qla2xxx explict ConfigFS NodeACLs. 1418 */ 1419 memset(&port_name, 0, 36); 1420 snprintf(port_name, 36, "%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x", 1421 fc_wwpn[0], fc_wwpn[1], fc_wwpn[2], fc_wwpn[3], fc_wwpn[4], 1422 fc_wwpn[5], fc_wwpn[6], fc_wwpn[7]); 1423 /* 1424 * Locate our struct se_node_acl either from an explict NodeACL created 1425 * via ConfigFS, or via running in TPG demo mode. 1426 */ 1427 se_sess->se_node_acl = core_tpg_check_initiator_node_acl(se_tpg, 1428 port_name); 1429 if (!se_sess->se_node_acl) { 1430 transport_free_session(se_sess); 1431 return -EINVAL; 1432 } 1433 se_nacl = se_sess->se_node_acl; 1434 nacl = container_of(se_nacl, struct tcm_qla2xxx_nacl, se_node_acl); 1435 /* 1436 * And now setup the new se_nacl and session pointers into our HW lport 1437 * mappings for fabric S_ID and LOOP_ID. 1438 */ 1439 spin_lock_irqsave(&ha->hardware_lock, flags); 1440 tcm_qla2xxx_set_sess_by_s_id(lport, se_nacl, nacl, se_sess, 1441 qla_tgt_sess, s_id); 1442 tcm_qla2xxx_set_sess_by_loop_id(lport, se_nacl, nacl, se_sess, 1443 qla_tgt_sess, loop_id); 1444 spin_unlock_irqrestore(&ha->hardware_lock, flags); 1445 /* 1446 * Finally register the new FC Nexus with TCM 1447 */ 1448 __transport_register_session(se_nacl->se_tpg, se_nacl, se_sess, sess); 1449 1450 return 0; 1451 } 1452 1453 static void tcm_qla2xxx_update_sess(struct qla_tgt_sess *sess, port_id_t s_id, 1454 uint16_t loop_id, bool conf_compl_supported) 1455 { 1456 struct qla_tgt *tgt = sess->tgt; 1457 struct qla_hw_data *ha = tgt->ha; 1458 struct tcm_qla2xxx_lport *lport = ha->tgt.target_lport_ptr; 1459 struct se_node_acl *se_nacl = sess->se_sess->se_node_acl; 1460 struct tcm_qla2xxx_nacl *nacl = container_of(se_nacl, 1461 struct tcm_qla2xxx_nacl, se_node_acl); 1462 u32 key; 1463 1464 1465 if (sess->loop_id != loop_id || sess->s_id.b24 != s_id.b24) 1466 pr_info("Updating session %p from port %8phC loop_id %d -> %d s_id %x:%x:%x -> %x:%x:%x\n", 1467 sess, sess->port_name, 1468 sess->loop_id, loop_id, sess->s_id.b.domain, 1469 sess->s_id.b.area, sess->s_id.b.al_pa, s_id.b.domain, 1470 s_id.b.area, s_id.b.al_pa); 1471 1472 if (sess->loop_id != loop_id) { 1473 /* 1474 * Because we can shuffle loop IDs around and we 1475 * update different sessions non-atomically, we might 1476 * have overwritten this session's old loop ID 1477 * already, and we might end up overwriting some other 1478 * session that will be updated later. So we have to 1479 * be extra careful and we can't warn about those things... 1480 */ 1481 if (lport->lport_loopid_map[sess->loop_id].se_nacl == se_nacl) 1482 lport->lport_loopid_map[sess->loop_id].se_nacl = NULL; 1483 1484 lport->lport_loopid_map[loop_id].se_nacl = se_nacl; 1485 1486 sess->loop_id = loop_id; 1487 } 1488 1489 if (sess->s_id.b24 != s_id.b24) { 1490 key = (((u32) sess->s_id.b.domain << 16) | 1491 ((u32) sess->s_id.b.area << 8) | 1492 ((u32) sess->s_id.b.al_pa)); 1493 1494 if (btree_lookup32(&lport->lport_fcport_map, key)) 1495 WARN(btree_remove32(&lport->lport_fcport_map, key) != se_nacl, 1496 "Found wrong se_nacl when updating s_id %x:%x:%x\n", 1497 sess->s_id.b.domain, sess->s_id.b.area, sess->s_id.b.al_pa); 1498 else 1499 WARN(1, "No lport_fcport_map entry for s_id %x:%x:%x\n", 1500 sess->s_id.b.domain, sess->s_id.b.area, sess->s_id.b.al_pa); 1501 1502 key = (((u32) s_id.b.domain << 16) | 1503 ((u32) s_id.b.area << 8) | 1504 ((u32) s_id.b.al_pa)); 1505 1506 if (btree_lookup32(&lport->lport_fcport_map, key)) { 1507 WARN(1, "Already have lport_fcport_map entry for s_id %x:%x:%x\n", 1508 s_id.b.domain, s_id.b.area, s_id.b.al_pa); 1509 btree_update32(&lport->lport_fcport_map, key, se_nacl); 1510 } else { 1511 btree_insert32(&lport->lport_fcport_map, key, se_nacl, GFP_ATOMIC); 1512 } 1513 1514 sess->s_id = s_id; 1515 nacl->nport_id = key; 1516 } 1517 1518 sess->conf_compl_supported = conf_compl_supported; 1519 } 1520 1521 /* 1522 * Calls into tcm_qla2xxx used by qla2xxx LLD I/O path. 1523 */ 1524 static struct qla_tgt_func_tmpl tcm_qla2xxx_template = { 1525 .handle_cmd = tcm_qla2xxx_handle_cmd, 1526 .handle_data = tcm_qla2xxx_handle_data, 1527 .handle_tmr = tcm_qla2xxx_handle_tmr, 1528 .free_cmd = tcm_qla2xxx_free_cmd, 1529 .free_mcmd = tcm_qla2xxx_free_mcmd, 1530 .free_session = tcm_qla2xxx_free_session, 1531 .update_sess = tcm_qla2xxx_update_sess, 1532 .check_initiator_node_acl = tcm_qla2xxx_check_initiator_node_acl, 1533 .find_sess_by_s_id = tcm_qla2xxx_find_sess_by_s_id, 1534 .find_sess_by_loop_id = tcm_qla2xxx_find_sess_by_loop_id, 1535 .clear_nacl_from_fcport_map = tcm_qla2xxx_clear_nacl_from_fcport_map, 1536 .put_sess = tcm_qla2xxx_put_sess, 1537 .shutdown_sess = tcm_qla2xxx_shutdown_sess, 1538 }; 1539 1540 static int tcm_qla2xxx_init_lport(struct tcm_qla2xxx_lport *lport) 1541 { 1542 int rc; 1543 1544 rc = btree_init32(&lport->lport_fcport_map); 1545 if (rc) { 1546 pr_err("Unable to initialize lport->lport_fcport_map btree\n"); 1547 return rc; 1548 } 1549 1550 lport->lport_loopid_map = vmalloc(sizeof(struct tcm_qla2xxx_fc_loopid) * 1551 65536); 1552 if (!lport->lport_loopid_map) { 1553 pr_err("Unable to allocate lport->lport_loopid_map of %zu bytes\n", 1554 sizeof(struct tcm_qla2xxx_fc_loopid) * 65536); 1555 btree_destroy32(&lport->lport_fcport_map); 1556 return -ENOMEM; 1557 } 1558 memset(lport->lport_loopid_map, 0, sizeof(struct tcm_qla2xxx_fc_loopid) 1559 * 65536); 1560 pr_debug("qla2xxx: Allocated lport_loopid_map of %zu bytes\n", 1561 sizeof(struct tcm_qla2xxx_fc_loopid) * 65536); 1562 return 0; 1563 } 1564 1565 static int tcm_qla2xxx_lport_register_cb(struct scsi_qla_host *vha) 1566 { 1567 struct qla_hw_data *ha = vha->hw; 1568 struct tcm_qla2xxx_lport *lport; 1569 /* 1570 * Setup local pointer to vha, NPIV VP pointer (if present) and 1571 * vha->tcm_lport pointer 1572 */ 1573 lport = (struct tcm_qla2xxx_lport *)ha->tgt.target_lport_ptr; 1574 lport->qla_vha = vha; 1575 1576 return 0; 1577 } 1578 1579 static struct se_wwn *tcm_qla2xxx_make_lport( 1580 struct target_fabric_configfs *tf, 1581 struct config_group *group, 1582 const char *name) 1583 { 1584 struct tcm_qla2xxx_lport *lport; 1585 u64 wwpn; 1586 int ret = -ENODEV; 1587 1588 if (tcm_qla2xxx_parse_wwn(name, &wwpn, 1) < 0) 1589 return ERR_PTR(-EINVAL); 1590 1591 lport = kzalloc(sizeof(struct tcm_qla2xxx_lport), GFP_KERNEL); 1592 if (!lport) { 1593 pr_err("Unable to allocate struct tcm_qla2xxx_lport\n"); 1594 return ERR_PTR(-ENOMEM); 1595 } 1596 lport->lport_wwpn = wwpn; 1597 tcm_qla2xxx_format_wwn(&lport->lport_name[0], TCM_QLA2XXX_NAMELEN, 1598 wwpn); 1599 sprintf(lport->lport_naa_name, "naa.%016llx", (unsigned long long) wwpn); 1600 1601 ret = tcm_qla2xxx_init_lport(lport); 1602 if (ret != 0) 1603 goto out; 1604 1605 ret = qlt_lport_register(&tcm_qla2xxx_template, wwpn, 1606 tcm_qla2xxx_lport_register_cb, lport); 1607 if (ret != 0) 1608 goto out_lport; 1609 1610 return &lport->lport_wwn; 1611 out_lport: 1612 vfree(lport->lport_loopid_map); 1613 btree_destroy32(&lport->lport_fcport_map); 1614 out: 1615 kfree(lport); 1616 return ERR_PTR(ret); 1617 } 1618 1619 static void tcm_qla2xxx_drop_lport(struct se_wwn *wwn) 1620 { 1621 struct tcm_qla2xxx_lport *lport = container_of(wwn, 1622 struct tcm_qla2xxx_lport, lport_wwn); 1623 struct scsi_qla_host *vha = lport->qla_vha; 1624 struct qla_hw_data *ha = vha->hw; 1625 struct se_node_acl *node; 1626 u32 key = 0; 1627 1628 /* 1629 * Call into qla2x_target.c LLD logic to complete the 1630 * shutdown of struct qla_tgt after the call to 1631 * qlt_stop_phase1() from tcm_qla2xxx_drop_tpg() above.. 1632 */ 1633 if (ha->tgt.qla_tgt && !ha->tgt.qla_tgt->tgt_stopped) 1634 qlt_stop_phase2(ha->tgt.qla_tgt); 1635 1636 qlt_lport_deregister(vha); 1637 1638 vfree(lport->lport_loopid_map); 1639 btree_for_each_safe32(&lport->lport_fcport_map, key, node) 1640 btree_remove32(&lport->lport_fcport_map, key); 1641 btree_destroy32(&lport->lport_fcport_map); 1642 kfree(lport); 1643 } 1644 1645 static struct se_wwn *tcm_qla2xxx_npiv_make_lport( 1646 struct target_fabric_configfs *tf, 1647 struct config_group *group, 1648 const char *name) 1649 { 1650 struct tcm_qla2xxx_lport *lport; 1651 u64 npiv_wwpn, npiv_wwnn; 1652 int ret; 1653 1654 if (tcm_qla2xxx_npiv_parse_wwn(name, strlen(name)+1, 1655 &npiv_wwpn, &npiv_wwnn) < 0) 1656 return ERR_PTR(-EINVAL); 1657 1658 lport = kzalloc(sizeof(struct tcm_qla2xxx_lport), GFP_KERNEL); 1659 if (!lport) { 1660 pr_err("Unable to allocate struct tcm_qla2xxx_lport for NPIV\n"); 1661 return ERR_PTR(-ENOMEM); 1662 } 1663 lport->lport_npiv_wwpn = npiv_wwpn; 1664 lport->lport_npiv_wwnn = npiv_wwnn; 1665 tcm_qla2xxx_npiv_format_wwn(&lport->lport_npiv_name[0], 1666 TCM_QLA2XXX_NAMELEN, npiv_wwpn, npiv_wwnn); 1667 sprintf(lport->lport_naa_name, "naa.%016llx", (unsigned long long) npiv_wwpn); 1668 1669 /* FIXME: tcm_qla2xxx_npiv_make_lport */ 1670 ret = -ENOSYS; 1671 if (ret != 0) 1672 goto out; 1673 1674 return &lport->lport_wwn; 1675 out: 1676 kfree(lport); 1677 return ERR_PTR(ret); 1678 } 1679 1680 static void tcm_qla2xxx_npiv_drop_lport(struct se_wwn *wwn) 1681 { 1682 struct tcm_qla2xxx_lport *lport = container_of(wwn, 1683 struct tcm_qla2xxx_lport, lport_wwn); 1684 struct scsi_qla_host *vha = lport->qla_vha; 1685 struct Scsi_Host *sh = vha->host; 1686 /* 1687 * Notify libfc that we want to release the lport->npiv_vport 1688 */ 1689 fc_vport_terminate(lport->npiv_vport); 1690 1691 scsi_host_put(sh); 1692 kfree(lport); 1693 } 1694 1695 1696 static ssize_t tcm_qla2xxx_wwn_show_attr_version( 1697 struct target_fabric_configfs *tf, 1698 char *page) 1699 { 1700 return sprintf(page, 1701 "TCM QLOGIC QLA2XXX NPIV capable fabric module %s on %s/%s on " 1702 UTS_RELEASE"\n", TCM_QLA2XXX_VERSION, utsname()->sysname, 1703 utsname()->machine); 1704 } 1705 1706 TF_WWN_ATTR_RO(tcm_qla2xxx, version); 1707 1708 static struct configfs_attribute *tcm_qla2xxx_wwn_attrs[] = { 1709 &tcm_qla2xxx_wwn_version.attr, 1710 NULL, 1711 }; 1712 1713 static struct target_core_fabric_ops tcm_qla2xxx_ops = { 1714 .get_fabric_name = tcm_qla2xxx_get_fabric_name, 1715 .get_fabric_proto_ident = tcm_qla2xxx_get_fabric_proto_ident, 1716 .tpg_get_wwn = tcm_qla2xxx_get_fabric_wwn, 1717 .tpg_get_tag = tcm_qla2xxx_get_tag, 1718 .tpg_get_default_depth = tcm_qla2xxx_get_default_depth, 1719 .tpg_get_pr_transport_id = tcm_qla2xxx_get_pr_transport_id, 1720 .tpg_get_pr_transport_id_len = tcm_qla2xxx_get_pr_transport_id_len, 1721 .tpg_parse_pr_out_transport_id = tcm_qla2xxx_parse_pr_out_transport_id, 1722 .tpg_check_demo_mode = tcm_qla2xxx_check_demo_mode, 1723 .tpg_check_demo_mode_cache = tcm_qla2xxx_check_demo_mode_cache, 1724 .tpg_check_demo_mode_write_protect = 1725 tcm_qla2xxx_check_demo_write_protect, 1726 .tpg_check_prod_mode_write_protect = 1727 tcm_qla2xxx_check_prod_write_protect, 1728 .tpg_check_demo_mode_login_only = tcm_qla2xxx_check_demo_mode_login_only, 1729 .tpg_alloc_fabric_acl = tcm_qla2xxx_alloc_fabric_acl, 1730 .tpg_release_fabric_acl = tcm_qla2xxx_release_fabric_acl, 1731 .tpg_get_inst_index = tcm_qla2xxx_tpg_get_inst_index, 1732 .check_stop_free = tcm_qla2xxx_check_stop_free, 1733 .release_cmd = tcm_qla2xxx_release_cmd, 1734 .put_session = tcm_qla2xxx_put_session, 1735 .shutdown_session = tcm_qla2xxx_shutdown_session, 1736 .close_session = tcm_qla2xxx_close_session, 1737 .sess_get_index = tcm_qla2xxx_sess_get_index, 1738 .sess_get_initiator_sid = NULL, 1739 .write_pending = tcm_qla2xxx_write_pending, 1740 .write_pending_status = tcm_qla2xxx_write_pending_status, 1741 .set_default_node_attributes = tcm_qla2xxx_set_default_node_attrs, 1742 .get_task_tag = tcm_qla2xxx_get_task_tag, 1743 .get_cmd_state = tcm_qla2xxx_get_cmd_state, 1744 .queue_data_in = tcm_qla2xxx_queue_data_in, 1745 .queue_status = tcm_qla2xxx_queue_status, 1746 .queue_tm_rsp = tcm_qla2xxx_queue_tm_rsp, 1747 /* 1748 * Setup function pointers for generic logic in 1749 * target_core_fabric_configfs.c 1750 */ 1751 .fabric_make_wwn = tcm_qla2xxx_make_lport, 1752 .fabric_drop_wwn = tcm_qla2xxx_drop_lport, 1753 .fabric_make_tpg = tcm_qla2xxx_make_tpg, 1754 .fabric_drop_tpg = tcm_qla2xxx_drop_tpg, 1755 .fabric_post_link = NULL, 1756 .fabric_pre_unlink = NULL, 1757 .fabric_make_np = NULL, 1758 .fabric_drop_np = NULL, 1759 .fabric_make_nodeacl = tcm_qla2xxx_make_nodeacl, 1760 .fabric_drop_nodeacl = tcm_qla2xxx_drop_nodeacl, 1761 }; 1762 1763 static struct target_core_fabric_ops tcm_qla2xxx_npiv_ops = { 1764 .get_fabric_name = tcm_qla2xxx_npiv_get_fabric_name, 1765 .get_fabric_proto_ident = tcm_qla2xxx_get_fabric_proto_ident, 1766 .tpg_get_wwn = tcm_qla2xxx_npiv_get_fabric_wwn, 1767 .tpg_get_tag = tcm_qla2xxx_get_tag, 1768 .tpg_get_default_depth = tcm_qla2xxx_get_default_depth, 1769 .tpg_get_pr_transport_id = tcm_qla2xxx_get_pr_transport_id, 1770 .tpg_get_pr_transport_id_len = tcm_qla2xxx_get_pr_transport_id_len, 1771 .tpg_parse_pr_out_transport_id = tcm_qla2xxx_parse_pr_out_transport_id, 1772 .tpg_check_demo_mode = tcm_qla2xxx_check_false, 1773 .tpg_check_demo_mode_cache = tcm_qla2xxx_check_true, 1774 .tpg_check_demo_mode_write_protect = tcm_qla2xxx_check_true, 1775 .tpg_check_prod_mode_write_protect = tcm_qla2xxx_check_false, 1776 .tpg_check_demo_mode_login_only = tcm_qla2xxx_check_demo_mode_login_only, 1777 .tpg_alloc_fabric_acl = tcm_qla2xxx_alloc_fabric_acl, 1778 .tpg_release_fabric_acl = tcm_qla2xxx_release_fabric_acl, 1779 .tpg_get_inst_index = tcm_qla2xxx_tpg_get_inst_index, 1780 .release_cmd = tcm_qla2xxx_release_cmd, 1781 .put_session = tcm_qla2xxx_put_session, 1782 .shutdown_session = tcm_qla2xxx_shutdown_session, 1783 .close_session = tcm_qla2xxx_close_session, 1784 .sess_get_index = tcm_qla2xxx_sess_get_index, 1785 .sess_get_initiator_sid = NULL, 1786 .write_pending = tcm_qla2xxx_write_pending, 1787 .write_pending_status = tcm_qla2xxx_write_pending_status, 1788 .set_default_node_attributes = tcm_qla2xxx_set_default_node_attrs, 1789 .get_task_tag = tcm_qla2xxx_get_task_tag, 1790 .get_cmd_state = tcm_qla2xxx_get_cmd_state, 1791 .queue_data_in = tcm_qla2xxx_queue_data_in, 1792 .queue_status = tcm_qla2xxx_queue_status, 1793 .queue_tm_rsp = tcm_qla2xxx_queue_tm_rsp, 1794 /* 1795 * Setup function pointers for generic logic in 1796 * target_core_fabric_configfs.c 1797 */ 1798 .fabric_make_wwn = tcm_qla2xxx_npiv_make_lport, 1799 .fabric_drop_wwn = tcm_qla2xxx_npiv_drop_lport, 1800 .fabric_make_tpg = tcm_qla2xxx_npiv_make_tpg, 1801 .fabric_drop_tpg = tcm_qla2xxx_drop_tpg, 1802 .fabric_post_link = NULL, 1803 .fabric_pre_unlink = NULL, 1804 .fabric_make_np = NULL, 1805 .fabric_drop_np = NULL, 1806 .fabric_make_nodeacl = tcm_qla2xxx_make_nodeacl, 1807 .fabric_drop_nodeacl = tcm_qla2xxx_drop_nodeacl, 1808 }; 1809 1810 static int tcm_qla2xxx_register_configfs(void) 1811 { 1812 struct target_fabric_configfs *fabric, *npiv_fabric; 1813 int ret; 1814 1815 pr_debug("TCM QLOGIC QLA2XXX fabric module %s on %s/%s on " 1816 UTS_RELEASE"\n", TCM_QLA2XXX_VERSION, utsname()->sysname, 1817 utsname()->machine); 1818 /* 1819 * Register the top level struct config_item_type with TCM core 1820 */ 1821 fabric = target_fabric_configfs_init(THIS_MODULE, "qla2xxx"); 1822 if (IS_ERR(fabric)) { 1823 pr_err("target_fabric_configfs_init() failed\n"); 1824 return PTR_ERR(fabric); 1825 } 1826 /* 1827 * Setup fabric->tf_ops from our local tcm_qla2xxx_ops 1828 */ 1829 fabric->tf_ops = tcm_qla2xxx_ops; 1830 /* 1831 * Setup default attribute lists for various fabric->tf_cit_tmpl 1832 */ 1833 TF_CIT_TMPL(fabric)->tfc_wwn_cit.ct_attrs = tcm_qla2xxx_wwn_attrs; 1834 TF_CIT_TMPL(fabric)->tfc_tpg_base_cit.ct_attrs = tcm_qla2xxx_tpg_attrs; 1835 TF_CIT_TMPL(fabric)->tfc_tpg_attrib_cit.ct_attrs = 1836 tcm_qla2xxx_tpg_attrib_attrs; 1837 TF_CIT_TMPL(fabric)->tfc_tpg_param_cit.ct_attrs = NULL; 1838 TF_CIT_TMPL(fabric)->tfc_tpg_np_base_cit.ct_attrs = NULL; 1839 TF_CIT_TMPL(fabric)->tfc_tpg_nacl_base_cit.ct_attrs = NULL; 1840 TF_CIT_TMPL(fabric)->tfc_tpg_nacl_attrib_cit.ct_attrs = NULL; 1841 TF_CIT_TMPL(fabric)->tfc_tpg_nacl_auth_cit.ct_attrs = NULL; 1842 TF_CIT_TMPL(fabric)->tfc_tpg_nacl_param_cit.ct_attrs = NULL; 1843 /* 1844 * Register the fabric for use within TCM 1845 */ 1846 ret = target_fabric_configfs_register(fabric); 1847 if (ret < 0) { 1848 pr_err("target_fabric_configfs_register() failed for TCM_QLA2XXX\n"); 1849 return ret; 1850 } 1851 /* 1852 * Setup our local pointer to *fabric 1853 */ 1854 tcm_qla2xxx_fabric_configfs = fabric; 1855 pr_debug("TCM_QLA2XXX[0] - Set fabric -> tcm_qla2xxx_fabric_configfs\n"); 1856 1857 /* 1858 * Register the top level struct config_item_type for NPIV with TCM core 1859 */ 1860 npiv_fabric = target_fabric_configfs_init(THIS_MODULE, "qla2xxx_npiv"); 1861 if (IS_ERR(npiv_fabric)) { 1862 pr_err("target_fabric_configfs_init() failed\n"); 1863 ret = PTR_ERR(npiv_fabric); 1864 goto out_fabric; 1865 } 1866 /* 1867 * Setup fabric->tf_ops from our local tcm_qla2xxx_npiv_ops 1868 */ 1869 npiv_fabric->tf_ops = tcm_qla2xxx_npiv_ops; 1870 /* 1871 * Setup default attribute lists for various npiv_fabric->tf_cit_tmpl 1872 */ 1873 TF_CIT_TMPL(npiv_fabric)->tfc_wwn_cit.ct_attrs = tcm_qla2xxx_wwn_attrs; 1874 TF_CIT_TMPL(npiv_fabric)->tfc_tpg_base_cit.ct_attrs = NULL; 1875 TF_CIT_TMPL(npiv_fabric)->tfc_tpg_attrib_cit.ct_attrs = NULL; 1876 TF_CIT_TMPL(npiv_fabric)->tfc_tpg_param_cit.ct_attrs = NULL; 1877 TF_CIT_TMPL(npiv_fabric)->tfc_tpg_np_base_cit.ct_attrs = NULL; 1878 TF_CIT_TMPL(npiv_fabric)->tfc_tpg_nacl_base_cit.ct_attrs = NULL; 1879 TF_CIT_TMPL(npiv_fabric)->tfc_tpg_nacl_attrib_cit.ct_attrs = NULL; 1880 TF_CIT_TMPL(npiv_fabric)->tfc_tpg_nacl_auth_cit.ct_attrs = NULL; 1881 TF_CIT_TMPL(npiv_fabric)->tfc_tpg_nacl_param_cit.ct_attrs = NULL; 1882 /* 1883 * Register the npiv_fabric for use within TCM 1884 */ 1885 ret = target_fabric_configfs_register(npiv_fabric); 1886 if (ret < 0) { 1887 pr_err("target_fabric_configfs_register() failed for TCM_QLA2XXX\n"); 1888 goto out_fabric; 1889 } 1890 /* 1891 * Setup our local pointer to *npiv_fabric 1892 */ 1893 tcm_qla2xxx_npiv_fabric_configfs = npiv_fabric; 1894 pr_debug("TCM_QLA2XXX[0] - Set fabric -> tcm_qla2xxx_npiv_fabric_configfs\n"); 1895 1896 tcm_qla2xxx_free_wq = alloc_workqueue("tcm_qla2xxx_free", 1897 WQ_MEM_RECLAIM, 0); 1898 if (!tcm_qla2xxx_free_wq) { 1899 ret = -ENOMEM; 1900 goto out_fabric_npiv; 1901 } 1902 1903 tcm_qla2xxx_cmd_wq = alloc_workqueue("tcm_qla2xxx_cmd", 0, 0); 1904 if (!tcm_qla2xxx_cmd_wq) { 1905 ret = -ENOMEM; 1906 goto out_free_wq; 1907 } 1908 1909 return 0; 1910 1911 out_free_wq: 1912 destroy_workqueue(tcm_qla2xxx_free_wq); 1913 out_fabric_npiv: 1914 target_fabric_configfs_deregister(tcm_qla2xxx_npiv_fabric_configfs); 1915 out_fabric: 1916 target_fabric_configfs_deregister(tcm_qla2xxx_fabric_configfs); 1917 return ret; 1918 } 1919 1920 static void tcm_qla2xxx_deregister_configfs(void) 1921 { 1922 destroy_workqueue(tcm_qla2xxx_cmd_wq); 1923 destroy_workqueue(tcm_qla2xxx_free_wq); 1924 1925 target_fabric_configfs_deregister(tcm_qla2xxx_fabric_configfs); 1926 tcm_qla2xxx_fabric_configfs = NULL; 1927 pr_debug("TCM_QLA2XXX[0] - Cleared tcm_qla2xxx_fabric_configfs\n"); 1928 1929 target_fabric_configfs_deregister(tcm_qla2xxx_npiv_fabric_configfs); 1930 tcm_qla2xxx_npiv_fabric_configfs = NULL; 1931 pr_debug("TCM_QLA2XXX[0] - Cleared tcm_qla2xxx_npiv_fabric_configfs\n"); 1932 } 1933 1934 static int __init tcm_qla2xxx_init(void) 1935 { 1936 int ret; 1937 1938 ret = tcm_qla2xxx_register_configfs(); 1939 if (ret < 0) 1940 return ret; 1941 1942 return 0; 1943 } 1944 1945 static void __exit tcm_qla2xxx_exit(void) 1946 { 1947 tcm_qla2xxx_deregister_configfs(); 1948 } 1949 1950 MODULE_DESCRIPTION("TCM QLA2XXX series NPIV enabled fabric driver"); 1951 MODULE_LICENSE("GPL"); 1952 module_init(tcm_qla2xxx_init); 1953 module_exit(tcm_qla2xxx_exit); 1954