1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * qla_target.c SCSI LLD infrastructure for QLogic 22xx/23xx/24xx/25xx 4 * 5 * based on qla2x00t.c code: 6 * 7 * Copyright (C) 2004 - 2010 Vladislav Bolkhovitin <vst@vlnb.net> 8 * Copyright (C) 2004 - 2005 Leonid Stoljar 9 * Copyright (C) 2006 Nathaniel Clark <nate@misrule.us> 10 * Copyright (C) 2006 - 2010 ID7 Ltd. 11 * 12 * Forward port and refactoring to modern qla2xxx and target/configfs 13 * 14 * Copyright (C) 2010-2013 Nicholas A. Bellinger <nab@kernel.org> 15 */ 16 17 #include <linux/module.h> 18 #include <linux/init.h> 19 #include <linux/types.h> 20 #include <linux/blkdev.h> 21 #include <linux/interrupt.h> 22 #include <linux/pci.h> 23 #include <linux/delay.h> 24 #include <linux/list.h> 25 #include <linux/workqueue.h> 26 #include <asm/unaligned.h> 27 #include <scsi/scsi.h> 28 #include <scsi/scsi_host.h> 29 #include <scsi/scsi_tcq.h> 30 #include <target/target_core_base.h> 31 #include <target/target_core_fabric.h> 32 33 #include "qla_def.h" 34 #include "qla_target.h" 35 36 static int ql2xtgt_tape_enable; 37 module_param(ql2xtgt_tape_enable, int, S_IRUGO|S_IWUSR); 38 MODULE_PARM_DESC(ql2xtgt_tape_enable, 39 "Enables Sequence level error recovery (aka FC Tape). Default is 0 - no SLER. 1 - Enable SLER."); 40 41 static char *qlini_mode = QLA2XXX_INI_MODE_STR_ENABLED; 42 module_param(qlini_mode, charp, S_IRUGO); 43 MODULE_PARM_DESC(qlini_mode, 44 "Determines when initiator mode will be enabled. Possible values: " 45 "\"exclusive\" - initiator mode will be enabled on load, " 46 "disabled on enabling target mode and then on disabling target mode " 47 "enabled back; " 48 "\"disabled\" - initiator mode will never be enabled; " 49 "\"dual\" - Initiator Modes will be enabled. Target Mode can be activated " 50 "when ready " 51 "\"enabled\" (default) - initiator mode will always stay enabled."); 52 53 static int ql_dm_tgt_ex_pct = 0; 54 module_param(ql_dm_tgt_ex_pct, int, S_IRUGO|S_IWUSR); 55 MODULE_PARM_DESC(ql_dm_tgt_ex_pct, 56 "For Dual Mode (qlini_mode=dual), this parameter determines " 57 "the percentage of exchanges/cmds FW will allocate resources " 58 "for Target mode."); 59 60 int ql2xuctrlirq = 1; 61 module_param(ql2xuctrlirq, int, 0644); 62 MODULE_PARM_DESC(ql2xuctrlirq, 63 "User to control IRQ placement via smp_affinity." 64 "Valid with qlini_mode=disabled." 65 "1(default): enable"); 66 67 int ql2x_ini_mode = QLA2XXX_INI_MODE_EXCLUSIVE; 68 69 static int qla_sam_status = SAM_STAT_BUSY; 70 static int tc_sam_status = SAM_STAT_TASK_SET_FULL; /* target core */ 71 72 /* 73 * From scsi/fc/fc_fcp.h 74 */ 75 enum fcp_resp_rsp_codes { 76 FCP_TMF_CMPL = 0, 77 FCP_DATA_LEN_INVALID = 1, 78 FCP_CMND_FIELDS_INVALID = 2, 79 FCP_DATA_PARAM_MISMATCH = 3, 80 FCP_TMF_REJECTED = 4, 81 FCP_TMF_FAILED = 5, 82 FCP_TMF_INVALID_LUN = 9, 83 }; 84 85 /* 86 * fc_pri_ta from scsi/fc/fc_fcp.h 87 */ 88 #define FCP_PTA_SIMPLE 0 /* simple task attribute */ 89 #define FCP_PTA_HEADQ 1 /* head of queue task attribute */ 90 #define FCP_PTA_ORDERED 2 /* ordered task attribute */ 91 #define FCP_PTA_ACA 4 /* auto. contingent allegiance */ 92 #define FCP_PTA_MASK 7 /* mask for task attribute field */ 93 #define FCP_PRI_SHIFT 3 /* priority field starts in bit 3 */ 94 #define FCP_PRI_RESVD_MASK 0x80 /* reserved bits in priority field */ 95 96 /* 97 * This driver calls qla2x00_alloc_iocbs() and qla2x00_issue_marker(), which 98 * must be called under HW lock and could unlock/lock it inside. 99 * It isn't an issue, since in the current implementation on the time when 100 * those functions are called: 101 * 102 * - Either context is IRQ and only IRQ handler can modify HW data, 103 * including rings related fields, 104 * 105 * - Or access to target mode variables from struct qla_tgt doesn't 106 * cross those functions boundaries, except tgt_stop, which 107 * additionally protected by irq_cmd_count. 108 */ 109 /* Predefs for callbacks handed to qla2xxx LLD */ 110 static void qlt_24xx_atio_pkt(struct scsi_qla_host *ha, 111 struct atio_from_isp *pkt, uint8_t); 112 static void qlt_response_pkt(struct scsi_qla_host *ha, struct rsp_que *rsp, 113 response_t *pkt); 114 static int qlt_issue_task_mgmt(struct fc_port *sess, u64 lun, 115 int fn, void *iocb, int flags); 116 static void qlt_send_term_exchange(struct qla_qpair *, struct qla_tgt_cmd 117 *cmd, struct atio_from_isp *atio, int ha_locked, int ul_abort); 118 static void qlt_alloc_qfull_cmd(struct scsi_qla_host *vha, 119 struct atio_from_isp *atio, uint16_t status, int qfull); 120 static void qlt_disable_vha(struct scsi_qla_host *vha); 121 static void qlt_clear_tgt_db(struct qla_tgt *tgt); 122 static void qlt_send_notify_ack(struct qla_qpair *qpair, 123 struct imm_ntfy_from_isp *ntfy, 124 uint32_t add_flags, uint16_t resp_code, int resp_code_valid, 125 uint16_t srr_flags, uint16_t srr_reject_code, uint8_t srr_explan); 126 static void qlt_send_term_imm_notif(struct scsi_qla_host *vha, 127 struct imm_ntfy_from_isp *imm, int ha_locked); 128 static struct fc_port *qlt_create_sess(struct scsi_qla_host *vha, 129 fc_port_t *fcport, bool local); 130 void qlt_unreg_sess(struct fc_port *sess); 131 static void qlt_24xx_handle_abts(struct scsi_qla_host *, 132 struct abts_recv_from_24xx *); 133 static void qlt_send_busy(struct qla_qpair *, struct atio_from_isp *, 134 uint16_t); 135 static int qlt_check_reserve_free_req(struct qla_qpair *qpair, uint32_t); 136 static inline uint32_t qlt_make_handle(struct qla_qpair *); 137 138 /* 139 * Global Variables 140 */ 141 static struct kmem_cache *qla_tgt_mgmt_cmd_cachep; 142 struct kmem_cache *qla_tgt_plogi_cachep; 143 static mempool_t *qla_tgt_mgmt_cmd_mempool; 144 static struct workqueue_struct *qla_tgt_wq; 145 static DEFINE_MUTEX(qla_tgt_mutex); 146 static LIST_HEAD(qla_tgt_glist); 147 148 static const char *prot_op_str(u32 prot_op) 149 { 150 switch (prot_op) { 151 case TARGET_PROT_NORMAL: return "NORMAL"; 152 case TARGET_PROT_DIN_INSERT: return "DIN_INSERT"; 153 case TARGET_PROT_DOUT_INSERT: return "DOUT_INSERT"; 154 case TARGET_PROT_DIN_STRIP: return "DIN_STRIP"; 155 case TARGET_PROT_DOUT_STRIP: return "DOUT_STRIP"; 156 case TARGET_PROT_DIN_PASS: return "DIN_PASS"; 157 case TARGET_PROT_DOUT_PASS: return "DOUT_PASS"; 158 default: return "UNKNOWN"; 159 } 160 } 161 162 /* This API intentionally takes dest as a parameter, rather than returning 163 * int value to avoid caller forgetting to issue wmb() after the store */ 164 void qlt_do_generation_tick(struct scsi_qla_host *vha, int *dest) 165 { 166 scsi_qla_host_t *base_vha = pci_get_drvdata(vha->hw->pdev); 167 *dest = atomic_inc_return(&base_vha->generation_tick); 168 /* memory barrier */ 169 wmb(); 170 } 171 172 /* Might release hw lock, then reaquire!! */ 173 static inline int qlt_issue_marker(struct scsi_qla_host *vha, int vha_locked) 174 { 175 /* Send marker if required */ 176 if (unlikely(vha->marker_needed != 0)) { 177 int rc = qla2x00_issue_marker(vha, vha_locked); 178 179 if (rc != QLA_SUCCESS) { 180 ql_dbg(ql_dbg_tgt, vha, 0xe03d, 181 "qla_target(%d): issue_marker() failed\n", 182 vha->vp_idx); 183 } 184 return rc; 185 } 186 return QLA_SUCCESS; 187 } 188 189 static inline 190 struct scsi_qla_host *qlt_find_host_by_d_id(struct scsi_qla_host *vha, 191 be_id_t d_id) 192 { 193 struct scsi_qla_host *host; 194 uint32_t key; 195 196 if (vha->d_id.b.area == d_id.area && 197 vha->d_id.b.domain == d_id.domain && 198 vha->d_id.b.al_pa == d_id.al_pa) 199 return vha; 200 201 key = be_to_port_id(d_id).b24; 202 203 host = btree_lookup32(&vha->hw->tgt.host_map, key); 204 if (!host) 205 ql_dbg(ql_dbg_tgt_mgt + ql_dbg_verbose, vha, 0xf005, 206 "Unable to find host %06x\n", key); 207 208 return host; 209 } 210 211 static inline 212 struct scsi_qla_host *qlt_find_host_by_vp_idx(struct scsi_qla_host *vha, 213 uint16_t vp_idx) 214 { 215 struct qla_hw_data *ha = vha->hw; 216 217 if (vha->vp_idx == vp_idx) 218 return vha; 219 220 BUG_ON(ha->tgt.tgt_vp_map == NULL); 221 if (likely(test_bit(vp_idx, ha->vp_idx_map))) 222 return ha->tgt.tgt_vp_map[vp_idx].vha; 223 224 return NULL; 225 } 226 227 static inline void qlt_incr_num_pend_cmds(struct scsi_qla_host *vha) 228 { 229 unsigned long flags; 230 231 spin_lock_irqsave(&vha->hw->tgt.q_full_lock, flags); 232 233 vha->hw->tgt.num_pend_cmds++; 234 if (vha->hw->tgt.num_pend_cmds > vha->qla_stats.stat_max_pend_cmds) 235 vha->qla_stats.stat_max_pend_cmds = 236 vha->hw->tgt.num_pend_cmds; 237 spin_unlock_irqrestore(&vha->hw->tgt.q_full_lock, flags); 238 } 239 static inline void qlt_decr_num_pend_cmds(struct scsi_qla_host *vha) 240 { 241 unsigned long flags; 242 243 spin_lock_irqsave(&vha->hw->tgt.q_full_lock, flags); 244 vha->hw->tgt.num_pend_cmds--; 245 spin_unlock_irqrestore(&vha->hw->tgt.q_full_lock, flags); 246 } 247 248 249 static void qlt_queue_unknown_atio(scsi_qla_host_t *vha, 250 struct atio_from_isp *atio, uint8_t ha_locked) 251 { 252 struct qla_tgt_sess_op *u; 253 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; 254 unsigned long flags; 255 256 if (tgt->tgt_stop) { 257 ql_dbg(ql_dbg_async, vha, 0x502c, 258 "qla_target(%d): dropping unknown ATIO_TYPE7, because tgt is being stopped", 259 vha->vp_idx); 260 goto out_term; 261 } 262 263 u = kzalloc(sizeof(*u), GFP_ATOMIC); 264 if (u == NULL) 265 goto out_term; 266 267 u->vha = vha; 268 memcpy(&u->atio, atio, sizeof(*atio)); 269 INIT_LIST_HEAD(&u->cmd_list); 270 271 spin_lock_irqsave(&vha->cmd_list_lock, flags); 272 list_add_tail(&u->cmd_list, &vha->unknown_atio_list); 273 spin_unlock_irqrestore(&vha->cmd_list_lock, flags); 274 275 schedule_delayed_work(&vha->unknown_atio_work, 1); 276 277 out: 278 return; 279 280 out_term: 281 qlt_send_term_exchange(vha->hw->base_qpair, NULL, atio, ha_locked, 0); 282 goto out; 283 } 284 285 static void qlt_try_to_dequeue_unknown_atios(struct scsi_qla_host *vha, 286 uint8_t ha_locked) 287 { 288 struct qla_tgt_sess_op *u, *t; 289 scsi_qla_host_t *host; 290 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; 291 unsigned long flags; 292 uint8_t queued = 0; 293 294 list_for_each_entry_safe(u, t, &vha->unknown_atio_list, cmd_list) { 295 if (u->aborted) { 296 ql_dbg(ql_dbg_async, vha, 0x502e, 297 "Freeing unknown %s %p, because of Abort\n", 298 "ATIO_TYPE7", u); 299 qlt_send_term_exchange(vha->hw->base_qpair, NULL, 300 &u->atio, ha_locked, 0); 301 goto abort; 302 } 303 304 host = qlt_find_host_by_d_id(vha, u->atio.u.isp24.fcp_hdr.d_id); 305 if (host != NULL) { 306 ql_dbg(ql_dbg_async + ql_dbg_verbose, vha, 0x502f, 307 "Requeuing unknown ATIO_TYPE7 %p\n", u); 308 qlt_24xx_atio_pkt(host, &u->atio, ha_locked); 309 } else if (tgt->tgt_stop) { 310 ql_dbg(ql_dbg_async + ql_dbg_verbose, vha, 0x503a, 311 "Freeing unknown %s %p, because tgt is being stopped\n", 312 "ATIO_TYPE7", u); 313 qlt_send_term_exchange(vha->hw->base_qpair, NULL, 314 &u->atio, ha_locked, 0); 315 } else { 316 ql_dbg(ql_dbg_async + ql_dbg_verbose, vha, 0x503d, 317 "Reschedule u %p, vha %p, host %p\n", u, vha, host); 318 if (!queued) { 319 queued = 1; 320 schedule_delayed_work(&vha->unknown_atio_work, 321 1); 322 } 323 continue; 324 } 325 326 abort: 327 spin_lock_irqsave(&vha->cmd_list_lock, flags); 328 list_del(&u->cmd_list); 329 spin_unlock_irqrestore(&vha->cmd_list_lock, flags); 330 kfree(u); 331 } 332 } 333 334 void qlt_unknown_atio_work_fn(struct work_struct *work) 335 { 336 struct scsi_qla_host *vha = container_of(to_delayed_work(work), 337 struct scsi_qla_host, unknown_atio_work); 338 339 qlt_try_to_dequeue_unknown_atios(vha, 0); 340 } 341 342 static bool qlt_24xx_atio_pkt_all_vps(struct scsi_qla_host *vha, 343 struct atio_from_isp *atio, uint8_t ha_locked) 344 { 345 ql_dbg(ql_dbg_tgt, vha, 0xe072, 346 "%s: qla_target(%d): type %x ox_id %04x\n", 347 __func__, vha->vp_idx, atio->u.raw.entry_type, 348 be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id)); 349 350 switch (atio->u.raw.entry_type) { 351 case ATIO_TYPE7: 352 { 353 struct scsi_qla_host *host = qlt_find_host_by_d_id(vha, 354 atio->u.isp24.fcp_hdr.d_id); 355 if (unlikely(NULL == host)) { 356 ql_dbg(ql_dbg_tgt, vha, 0xe03e, 357 "qla_target(%d): Received ATIO_TYPE7 " 358 "with unknown d_id %x:%x:%x\n", vha->vp_idx, 359 atio->u.isp24.fcp_hdr.d_id.domain, 360 atio->u.isp24.fcp_hdr.d_id.area, 361 atio->u.isp24.fcp_hdr.d_id.al_pa); 362 363 364 qlt_queue_unknown_atio(vha, atio, ha_locked); 365 break; 366 } 367 if (unlikely(!list_empty(&vha->unknown_atio_list))) 368 qlt_try_to_dequeue_unknown_atios(vha, ha_locked); 369 370 qlt_24xx_atio_pkt(host, atio, ha_locked); 371 break; 372 } 373 374 case IMMED_NOTIFY_TYPE: 375 { 376 struct scsi_qla_host *host = vha; 377 struct imm_ntfy_from_isp *entry = 378 (struct imm_ntfy_from_isp *)atio; 379 380 qlt_issue_marker(vha, ha_locked); 381 382 if ((entry->u.isp24.vp_index != 0xFF) && 383 (entry->u.isp24.nport_handle != 0xFFFF)) { 384 host = qlt_find_host_by_vp_idx(vha, 385 entry->u.isp24.vp_index); 386 if (unlikely(!host)) { 387 ql_dbg(ql_dbg_tgt, vha, 0xe03f, 388 "qla_target(%d): Received " 389 "ATIO (IMMED_NOTIFY_TYPE) " 390 "with unknown vp_index %d\n", 391 vha->vp_idx, entry->u.isp24.vp_index); 392 break; 393 } 394 } 395 qlt_24xx_atio_pkt(host, atio, ha_locked); 396 break; 397 } 398 399 case VP_RPT_ID_IOCB_TYPE: 400 qla24xx_report_id_acquisition(vha, 401 (struct vp_rpt_id_entry_24xx *)atio); 402 break; 403 404 case ABTS_RECV_24XX: 405 { 406 struct abts_recv_from_24xx *entry = 407 (struct abts_recv_from_24xx *)atio; 408 struct scsi_qla_host *host = qlt_find_host_by_vp_idx(vha, 409 entry->vp_index); 410 unsigned long flags; 411 412 if (unlikely(!host)) { 413 ql_dbg(ql_dbg_tgt, vha, 0xe00a, 414 "qla_target(%d): Response pkt (ABTS_RECV_24XX) " 415 "received, with unknown vp_index %d\n", 416 vha->vp_idx, entry->vp_index); 417 break; 418 } 419 if (!ha_locked) 420 spin_lock_irqsave(&host->hw->hardware_lock, flags); 421 qlt_24xx_handle_abts(host, (struct abts_recv_from_24xx *)atio); 422 if (!ha_locked) 423 spin_unlock_irqrestore(&host->hw->hardware_lock, flags); 424 break; 425 } 426 427 /* case PUREX_IOCB_TYPE: ql2xmvasynctoatio */ 428 429 default: 430 ql_dbg(ql_dbg_tgt, vha, 0xe040, 431 "qla_target(%d): Received unknown ATIO atio " 432 "type %x\n", vha->vp_idx, atio->u.raw.entry_type); 433 break; 434 } 435 436 return false; 437 } 438 439 void qlt_response_pkt_all_vps(struct scsi_qla_host *vha, 440 struct rsp_que *rsp, response_t *pkt) 441 { 442 switch (pkt->entry_type) { 443 case CTIO_CRC2: 444 ql_dbg(ql_dbg_tgt, vha, 0xe073, 445 "qla_target(%d):%s: CRC2 Response pkt\n", 446 vha->vp_idx, __func__); 447 /* fall through */ 448 case CTIO_TYPE7: 449 { 450 struct ctio7_from_24xx *entry = (struct ctio7_from_24xx *)pkt; 451 struct scsi_qla_host *host = qlt_find_host_by_vp_idx(vha, 452 entry->vp_index); 453 if (unlikely(!host)) { 454 ql_dbg(ql_dbg_tgt, vha, 0xe041, 455 "qla_target(%d): Response pkt (CTIO_TYPE7) " 456 "received, with unknown vp_index %d\n", 457 vha->vp_idx, entry->vp_index); 458 break; 459 } 460 qlt_response_pkt(host, rsp, pkt); 461 break; 462 } 463 464 case IMMED_NOTIFY_TYPE: 465 { 466 struct scsi_qla_host *host = vha; 467 struct imm_ntfy_from_isp *entry = 468 (struct imm_ntfy_from_isp *)pkt; 469 470 host = qlt_find_host_by_vp_idx(vha, entry->u.isp24.vp_index); 471 if (unlikely(!host)) { 472 ql_dbg(ql_dbg_tgt, vha, 0xe042, 473 "qla_target(%d): Response pkt (IMMED_NOTIFY_TYPE) " 474 "received, with unknown vp_index %d\n", 475 vha->vp_idx, entry->u.isp24.vp_index); 476 break; 477 } 478 qlt_response_pkt(host, rsp, pkt); 479 break; 480 } 481 482 case NOTIFY_ACK_TYPE: 483 { 484 struct scsi_qla_host *host = vha; 485 struct nack_to_isp *entry = (struct nack_to_isp *)pkt; 486 487 if (0xFF != entry->u.isp24.vp_index) { 488 host = qlt_find_host_by_vp_idx(vha, 489 entry->u.isp24.vp_index); 490 if (unlikely(!host)) { 491 ql_dbg(ql_dbg_tgt, vha, 0xe043, 492 "qla_target(%d): Response " 493 "pkt (NOTIFY_ACK_TYPE) " 494 "received, with unknown " 495 "vp_index %d\n", vha->vp_idx, 496 entry->u.isp24.vp_index); 497 break; 498 } 499 } 500 qlt_response_pkt(host, rsp, pkt); 501 break; 502 } 503 504 case ABTS_RECV_24XX: 505 { 506 struct abts_recv_from_24xx *entry = 507 (struct abts_recv_from_24xx *)pkt; 508 struct scsi_qla_host *host = qlt_find_host_by_vp_idx(vha, 509 entry->vp_index); 510 if (unlikely(!host)) { 511 ql_dbg(ql_dbg_tgt, vha, 0xe044, 512 "qla_target(%d): Response pkt " 513 "(ABTS_RECV_24XX) received, with unknown " 514 "vp_index %d\n", vha->vp_idx, entry->vp_index); 515 break; 516 } 517 qlt_response_pkt(host, rsp, pkt); 518 break; 519 } 520 521 case ABTS_RESP_24XX: 522 { 523 struct abts_resp_to_24xx *entry = 524 (struct abts_resp_to_24xx *)pkt; 525 struct scsi_qla_host *host = qlt_find_host_by_vp_idx(vha, 526 entry->vp_index); 527 if (unlikely(!host)) { 528 ql_dbg(ql_dbg_tgt, vha, 0xe045, 529 "qla_target(%d): Response pkt " 530 "(ABTS_RECV_24XX) received, with unknown " 531 "vp_index %d\n", vha->vp_idx, entry->vp_index); 532 break; 533 } 534 qlt_response_pkt(host, rsp, pkt); 535 break; 536 } 537 default: 538 qlt_response_pkt(vha, rsp, pkt); 539 break; 540 } 541 542 } 543 544 /* 545 * All qlt_plogi_ack_t operations are protected by hardware_lock 546 */ 547 static int qla24xx_post_nack_work(struct scsi_qla_host *vha, fc_port_t *fcport, 548 struct imm_ntfy_from_isp *ntfy, int type) 549 { 550 struct qla_work_evt *e; 551 552 e = qla2x00_alloc_work(vha, QLA_EVT_NACK); 553 if (!e) 554 return QLA_FUNCTION_FAILED; 555 556 e->u.nack.fcport = fcport; 557 e->u.nack.type = type; 558 memcpy(e->u.nack.iocb, ntfy, sizeof(struct imm_ntfy_from_isp)); 559 return qla2x00_post_work(vha, e); 560 } 561 562 static void qla2x00_async_nack_sp_done(srb_t *sp, int res) 563 { 564 struct scsi_qla_host *vha = sp->vha; 565 unsigned long flags; 566 567 ql_dbg(ql_dbg_disc, vha, 0x20f2, 568 "Async done-%s res %x %8phC type %d\n", 569 sp->name, res, sp->fcport->port_name, sp->type); 570 571 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); 572 sp->fcport->flags &= ~FCF_ASYNC_SENT; 573 sp->fcport->chip_reset = vha->hw->base_qpair->chip_reset; 574 575 switch (sp->type) { 576 case SRB_NACK_PLOGI: 577 sp->fcport->login_gen++; 578 sp->fcport->fw_login_state = DSC_LS_PLOGI_COMP; 579 sp->fcport->logout_on_delete = 1; 580 sp->fcport->plogi_nack_done_deadline = jiffies + HZ; 581 sp->fcport->send_els_logo = 0; 582 break; 583 584 case SRB_NACK_PRLI: 585 sp->fcport->fw_login_state = DSC_LS_PRLI_COMP; 586 sp->fcport->deleted = 0; 587 sp->fcport->send_els_logo = 0; 588 589 if (!sp->fcport->login_succ && 590 !IS_SW_RESV_ADDR(sp->fcport->d_id)) { 591 sp->fcport->login_succ = 1; 592 593 vha->fcport_count++; 594 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); 595 qla24xx_sched_upd_fcport(sp->fcport); 596 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); 597 } else { 598 sp->fcport->login_retry = 0; 599 sp->fcport->disc_state = DSC_LOGIN_COMPLETE; 600 sp->fcport->deleted = 0; 601 sp->fcport->logout_on_delete = 1; 602 } 603 break; 604 605 case SRB_NACK_LOGO: 606 sp->fcport->login_gen++; 607 sp->fcport->fw_login_state = DSC_LS_PORT_UNAVAIL; 608 qlt_logo_completion_handler(sp->fcport, MBS_COMMAND_COMPLETE); 609 break; 610 } 611 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); 612 613 sp->free(sp); 614 } 615 616 int qla24xx_async_notify_ack(scsi_qla_host_t *vha, fc_port_t *fcport, 617 struct imm_ntfy_from_isp *ntfy, int type) 618 { 619 int rval = QLA_FUNCTION_FAILED; 620 srb_t *sp; 621 char *c = NULL; 622 623 fcport->flags |= FCF_ASYNC_SENT; 624 switch (type) { 625 case SRB_NACK_PLOGI: 626 fcport->fw_login_state = DSC_LS_PLOGI_PEND; 627 c = "PLOGI"; 628 break; 629 case SRB_NACK_PRLI: 630 fcport->fw_login_state = DSC_LS_PRLI_PEND; 631 fcport->deleted = 0; 632 c = "PRLI"; 633 break; 634 case SRB_NACK_LOGO: 635 fcport->fw_login_state = DSC_LS_LOGO_PEND; 636 c = "LOGO"; 637 break; 638 } 639 640 sp = qla2x00_get_sp(vha, fcport, GFP_ATOMIC); 641 if (!sp) 642 goto done; 643 644 sp->type = type; 645 sp->name = "nack"; 646 647 sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout; 648 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha)+2); 649 650 sp->u.iocb_cmd.u.nack.ntfy = ntfy; 651 sp->done = qla2x00_async_nack_sp_done; 652 653 ql_dbg(ql_dbg_disc, vha, 0x20f4, 654 "Async-%s %8phC hndl %x %s\n", 655 sp->name, fcport->port_name, sp->handle, c); 656 657 rval = qla2x00_start_sp(sp); 658 if (rval != QLA_SUCCESS) 659 goto done_free_sp; 660 661 return rval; 662 663 done_free_sp: 664 sp->free(sp); 665 done: 666 fcport->flags &= ~FCF_ASYNC_SENT; 667 return rval; 668 } 669 670 void qla24xx_do_nack_work(struct scsi_qla_host *vha, struct qla_work_evt *e) 671 { 672 fc_port_t *t; 673 674 switch (e->u.nack.type) { 675 case SRB_NACK_PRLI: 676 t = e->u.nack.fcport; 677 flush_work(&t->del_work); 678 flush_work(&t->free_work); 679 mutex_lock(&vha->vha_tgt.tgt_mutex); 680 t = qlt_create_sess(vha, e->u.nack.fcport, 0); 681 mutex_unlock(&vha->vha_tgt.tgt_mutex); 682 if (t) { 683 ql_log(ql_log_info, vha, 0xd034, 684 "%s create sess success %p", __func__, t); 685 /* create sess has an extra kref */ 686 vha->hw->tgt.tgt_ops->put_sess(e->u.nack.fcport); 687 } 688 break; 689 } 690 qla24xx_async_notify_ack(vha, e->u.nack.fcport, 691 (struct imm_ntfy_from_isp *)e->u.nack.iocb, e->u.nack.type); 692 } 693 694 void qla24xx_delete_sess_fn(struct work_struct *work) 695 { 696 fc_port_t *fcport = container_of(work, struct fc_port, del_work); 697 struct qla_hw_data *ha = fcport->vha->hw; 698 699 if (fcport->se_sess) { 700 ha->tgt.tgt_ops->shutdown_sess(fcport); 701 ha->tgt.tgt_ops->put_sess(fcport); 702 } else { 703 qlt_unreg_sess(fcport); 704 } 705 } 706 707 /* 708 * Called from qla2x00_reg_remote_port() 709 */ 710 void qlt_fc_port_added(struct scsi_qla_host *vha, fc_port_t *fcport) 711 { 712 struct qla_hw_data *ha = vha->hw; 713 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; 714 struct fc_port *sess = fcport; 715 unsigned long flags; 716 717 if (!vha->hw->tgt.tgt_ops) 718 return; 719 720 spin_lock_irqsave(&ha->tgt.sess_lock, flags); 721 if (tgt->tgt_stop) { 722 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); 723 return; 724 } 725 726 if (fcport->disc_state == DSC_DELETE_PEND) { 727 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); 728 return; 729 } 730 731 if (!sess->se_sess) { 732 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); 733 734 mutex_lock(&vha->vha_tgt.tgt_mutex); 735 sess = qlt_create_sess(vha, fcport, false); 736 mutex_unlock(&vha->vha_tgt.tgt_mutex); 737 738 spin_lock_irqsave(&ha->tgt.sess_lock, flags); 739 } else { 740 if (fcport->fw_login_state == DSC_LS_PRLI_COMP) { 741 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); 742 return; 743 } 744 745 if (!kref_get_unless_zero(&sess->sess_kref)) { 746 ql_dbg(ql_dbg_disc, vha, 0x2107, 747 "%s: kref_get fail sess %8phC \n", 748 __func__, sess->port_name); 749 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); 750 return; 751 } 752 753 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04c, 754 "qla_target(%u): %ssession for port %8phC " 755 "(loop ID %d) reappeared\n", vha->vp_idx, 756 sess->local ? "local " : "", sess->port_name, sess->loop_id); 757 758 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf007, 759 "Reappeared sess %p\n", sess); 760 761 ha->tgt.tgt_ops->update_sess(sess, fcport->d_id, 762 fcport->loop_id, 763 (fcport->flags & FCF_CONF_COMP_SUPPORTED)); 764 } 765 766 if (sess && sess->local) { 767 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04d, 768 "qla_target(%u): local session for " 769 "port %8phC (loop ID %d) became global\n", vha->vp_idx, 770 fcport->port_name, sess->loop_id); 771 sess->local = 0; 772 } 773 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); 774 775 ha->tgt.tgt_ops->put_sess(sess); 776 } 777 778 /* 779 * This is a zero-base ref-counting solution, since hardware_lock 780 * guarantees that ref_count is not modified concurrently. 781 * Upon successful return content of iocb is undefined 782 */ 783 static struct qlt_plogi_ack_t * 784 qlt_plogi_ack_find_add(struct scsi_qla_host *vha, port_id_t *id, 785 struct imm_ntfy_from_isp *iocb) 786 { 787 struct qlt_plogi_ack_t *pla; 788 789 lockdep_assert_held(&vha->hw->hardware_lock); 790 791 list_for_each_entry(pla, &vha->plogi_ack_list, list) { 792 if (pla->id.b24 == id->b24) { 793 ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0x210d, 794 "%s %d %8phC Term INOT due to new INOT", 795 __func__, __LINE__, 796 pla->iocb.u.isp24.port_name); 797 qlt_send_term_imm_notif(vha, &pla->iocb, 1); 798 memcpy(&pla->iocb, iocb, sizeof(pla->iocb)); 799 return pla; 800 } 801 } 802 803 pla = kmem_cache_zalloc(qla_tgt_plogi_cachep, GFP_ATOMIC); 804 if (!pla) { 805 ql_dbg(ql_dbg_async, vha, 0x5088, 806 "qla_target(%d): Allocation of plogi_ack failed\n", 807 vha->vp_idx); 808 return NULL; 809 } 810 811 memcpy(&pla->iocb, iocb, sizeof(pla->iocb)); 812 pla->id = *id; 813 list_add_tail(&pla->list, &vha->plogi_ack_list); 814 815 return pla; 816 } 817 818 void qlt_plogi_ack_unref(struct scsi_qla_host *vha, 819 struct qlt_plogi_ack_t *pla) 820 { 821 struct imm_ntfy_from_isp *iocb = &pla->iocb; 822 port_id_t port_id; 823 uint16_t loop_id; 824 fc_port_t *fcport = pla->fcport; 825 826 BUG_ON(!pla->ref_count); 827 pla->ref_count--; 828 829 if (pla->ref_count) 830 return; 831 832 ql_dbg(ql_dbg_disc, vha, 0x5089, 833 "Sending PLOGI ACK to wwn %8phC s_id %02x:%02x:%02x loop_id %#04x" 834 " exch %#x ox_id %#x\n", iocb->u.isp24.port_name, 835 iocb->u.isp24.port_id[2], iocb->u.isp24.port_id[1], 836 iocb->u.isp24.port_id[0], 837 le16_to_cpu(iocb->u.isp24.nport_handle), 838 iocb->u.isp24.exchange_address, iocb->ox_id); 839 840 port_id.b.domain = iocb->u.isp24.port_id[2]; 841 port_id.b.area = iocb->u.isp24.port_id[1]; 842 port_id.b.al_pa = iocb->u.isp24.port_id[0]; 843 port_id.b.rsvd_1 = 0; 844 845 loop_id = le16_to_cpu(iocb->u.isp24.nport_handle); 846 847 fcport->loop_id = loop_id; 848 fcport->d_id = port_id; 849 if (iocb->u.isp24.status_subcode == ELS_PLOGI) 850 qla24xx_post_nack_work(vha, fcport, iocb, SRB_NACK_PLOGI); 851 else 852 qla24xx_post_nack_work(vha, fcport, iocb, SRB_NACK_PRLI); 853 854 list_for_each_entry(fcport, &vha->vp_fcports, list) { 855 if (fcport->plogi_link[QLT_PLOGI_LINK_SAME_WWN] == pla) 856 fcport->plogi_link[QLT_PLOGI_LINK_SAME_WWN] = NULL; 857 if (fcport->plogi_link[QLT_PLOGI_LINK_CONFLICT] == pla) 858 fcport->plogi_link[QLT_PLOGI_LINK_CONFLICT] = NULL; 859 } 860 861 list_del(&pla->list); 862 kmem_cache_free(qla_tgt_plogi_cachep, pla); 863 } 864 865 void 866 qlt_plogi_ack_link(struct scsi_qla_host *vha, struct qlt_plogi_ack_t *pla, 867 struct fc_port *sess, enum qlt_plogi_link_t link) 868 { 869 struct imm_ntfy_from_isp *iocb = &pla->iocb; 870 /* Inc ref_count first because link might already be pointing at pla */ 871 pla->ref_count++; 872 873 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf097, 874 "Linking sess %p [%d] wwn %8phC with PLOGI ACK to wwn %8phC" 875 " s_id %02x:%02x:%02x, ref=%d pla %p link %d\n", 876 sess, link, sess->port_name, 877 iocb->u.isp24.port_name, iocb->u.isp24.port_id[2], 878 iocb->u.isp24.port_id[1], iocb->u.isp24.port_id[0], 879 pla->ref_count, pla, link); 880 881 if (link == QLT_PLOGI_LINK_CONFLICT) { 882 switch (sess->disc_state) { 883 case DSC_DELETED: 884 case DSC_DELETE_PEND: 885 pla->ref_count--; 886 return; 887 default: 888 break; 889 } 890 } 891 892 if (sess->plogi_link[link]) 893 qlt_plogi_ack_unref(vha, sess->plogi_link[link]); 894 895 if (link == QLT_PLOGI_LINK_SAME_WWN) 896 pla->fcport = sess; 897 898 sess->plogi_link[link] = pla; 899 } 900 901 typedef struct { 902 /* These fields must be initialized by the caller */ 903 port_id_t id; 904 /* 905 * number of cmds dropped while we were waiting for 906 * initiator to ack LOGO initialize to 1 if LOGO is 907 * triggered by a command, otherwise, to 0 908 */ 909 int cmd_count; 910 911 /* These fields are used by callee */ 912 struct list_head list; 913 } qlt_port_logo_t; 914 915 static void 916 qlt_send_first_logo(struct scsi_qla_host *vha, qlt_port_logo_t *logo) 917 { 918 qlt_port_logo_t *tmp; 919 int res; 920 921 mutex_lock(&vha->vha_tgt.tgt_mutex); 922 923 list_for_each_entry(tmp, &vha->logo_list, list) { 924 if (tmp->id.b24 == logo->id.b24) { 925 tmp->cmd_count += logo->cmd_count; 926 mutex_unlock(&vha->vha_tgt.tgt_mutex); 927 return; 928 } 929 } 930 931 list_add_tail(&logo->list, &vha->logo_list); 932 933 mutex_unlock(&vha->vha_tgt.tgt_mutex); 934 935 res = qla24xx_els_dcmd_iocb(vha, ELS_DCMD_LOGO, logo->id); 936 937 mutex_lock(&vha->vha_tgt.tgt_mutex); 938 list_del(&logo->list); 939 mutex_unlock(&vha->vha_tgt.tgt_mutex); 940 941 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf098, 942 "Finished LOGO to %02x:%02x:%02x, dropped %d cmds, res = %#x\n", 943 logo->id.b.domain, logo->id.b.area, logo->id.b.al_pa, 944 logo->cmd_count, res); 945 } 946 947 void qlt_free_session_done(struct work_struct *work) 948 { 949 struct fc_port *sess = container_of(work, struct fc_port, 950 free_work); 951 struct qla_tgt *tgt = sess->tgt; 952 struct scsi_qla_host *vha = sess->vha; 953 struct qla_hw_data *ha = vha->hw; 954 unsigned long flags; 955 bool logout_started = false; 956 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev); 957 struct qlt_plogi_ack_t *own = 958 sess->plogi_link[QLT_PLOGI_LINK_SAME_WWN]; 959 960 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf084, 961 "%s: se_sess %p / sess %p from port %8phC loop_id %#04x" 962 " s_id %02x:%02x:%02x logout %d keep %d els_logo %d\n", 963 __func__, sess->se_sess, sess, sess->port_name, sess->loop_id, 964 sess->d_id.b.domain, sess->d_id.b.area, sess->d_id.b.al_pa, 965 sess->logout_on_delete, sess->keep_nport_handle, 966 sess->send_els_logo); 967 968 if (!IS_SW_RESV_ADDR(sess->d_id)) { 969 qla2x00_mark_device_lost(vha, sess, 0, 0); 970 971 if (sess->send_els_logo) { 972 qlt_port_logo_t logo; 973 974 logo.id = sess->d_id; 975 logo.cmd_count = 0; 976 if (!own) 977 qlt_send_first_logo(vha, &logo); 978 sess->send_els_logo = 0; 979 } 980 981 if (sess->logout_on_delete && sess->loop_id != FC_NO_LOOP_ID) { 982 int rc; 983 984 if (!own || 985 (own && 986 (own->iocb.u.isp24.status_subcode == ELS_PLOGI))) { 987 rc = qla2x00_post_async_logout_work(vha, sess, 988 NULL); 989 if (rc != QLA_SUCCESS) 990 ql_log(ql_log_warn, vha, 0xf085, 991 "Schedule logo failed sess %p rc %d\n", 992 sess, rc); 993 else 994 logout_started = true; 995 } else if (own && (own->iocb.u.isp24.status_subcode == 996 ELS_PRLI) && ha->flags.rida_fmt2) { 997 rc = qla2x00_post_async_prlo_work(vha, sess, 998 NULL); 999 if (rc != QLA_SUCCESS) 1000 ql_log(ql_log_warn, vha, 0xf085, 1001 "Schedule PRLO failed sess %p rc %d\n", 1002 sess, rc); 1003 else 1004 logout_started = true; 1005 } 1006 } /* if sess->logout_on_delete */ 1007 1008 if (sess->nvme_flag & NVME_FLAG_REGISTERED && 1009 !(sess->nvme_flag & NVME_FLAG_DELETING)) { 1010 sess->nvme_flag |= NVME_FLAG_DELETING; 1011 qla_nvme_unregister_remote_port(sess); 1012 } 1013 } 1014 1015 /* 1016 * Release the target session for FC Nexus from fabric module code. 1017 */ 1018 if (sess->se_sess != NULL) 1019 ha->tgt.tgt_ops->free_session(sess); 1020 1021 if (logout_started) { 1022 bool traced = false; 1023 u16 cnt = 0; 1024 1025 while (!READ_ONCE(sess->logout_completed)) { 1026 if (!traced) { 1027 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf086, 1028 "%s: waiting for sess %p logout\n", 1029 __func__, sess); 1030 traced = true; 1031 } 1032 msleep(100); 1033 cnt++; 1034 if (cnt > 200) 1035 break; 1036 } 1037 1038 ql_dbg(ql_dbg_disc, vha, 0xf087, 1039 "%s: sess %p logout completed\n", __func__, sess); 1040 } 1041 1042 if (sess->logo_ack_needed) { 1043 sess->logo_ack_needed = 0; 1044 qla24xx_async_notify_ack(vha, sess, 1045 (struct imm_ntfy_from_isp *)sess->iocb, SRB_NACK_LOGO); 1046 } 1047 1048 spin_lock_irqsave(&ha->tgt.sess_lock, flags); 1049 if (sess->se_sess) { 1050 sess->se_sess = NULL; 1051 if (tgt && !IS_SW_RESV_ADDR(sess->d_id)) 1052 tgt->sess_count--; 1053 } 1054 1055 sess->disc_state = DSC_DELETED; 1056 sess->fw_login_state = DSC_LS_PORT_UNAVAIL; 1057 sess->deleted = QLA_SESS_DELETED; 1058 1059 if (sess->login_succ && !IS_SW_RESV_ADDR(sess->d_id)) { 1060 vha->fcport_count--; 1061 sess->login_succ = 0; 1062 } 1063 1064 qla2x00_clear_loop_id(sess); 1065 1066 if (sess->conflict) { 1067 sess->conflict->login_pause = 0; 1068 sess->conflict = NULL; 1069 if (!test_bit(UNLOADING, &vha->dpc_flags)) 1070 set_bit(RELOGIN_NEEDED, &vha->dpc_flags); 1071 } 1072 1073 { 1074 struct qlt_plogi_ack_t *con = 1075 sess->plogi_link[QLT_PLOGI_LINK_CONFLICT]; 1076 struct imm_ntfy_from_isp *iocb; 1077 1078 own = sess->plogi_link[QLT_PLOGI_LINK_SAME_WWN]; 1079 1080 if (con) { 1081 iocb = &con->iocb; 1082 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf099, 1083 "se_sess %p / sess %p port %8phC is gone," 1084 " %s (ref=%d), releasing PLOGI for %8phC (ref=%d)\n", 1085 sess->se_sess, sess, sess->port_name, 1086 own ? "releasing own PLOGI" : "no own PLOGI pending", 1087 own ? own->ref_count : -1, 1088 iocb->u.isp24.port_name, con->ref_count); 1089 qlt_plogi_ack_unref(vha, con); 1090 sess->plogi_link[QLT_PLOGI_LINK_CONFLICT] = NULL; 1091 } else { 1092 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf09a, 1093 "se_sess %p / sess %p port %8phC is gone, %s (ref=%d)\n", 1094 sess->se_sess, sess, sess->port_name, 1095 own ? "releasing own PLOGI" : 1096 "no own PLOGI pending", 1097 own ? own->ref_count : -1); 1098 } 1099 1100 if (own) { 1101 sess->fw_login_state = DSC_LS_PLOGI_PEND; 1102 qlt_plogi_ack_unref(vha, own); 1103 sess->plogi_link[QLT_PLOGI_LINK_SAME_WWN] = NULL; 1104 } 1105 } 1106 1107 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); 1108 sess->free_pending = 0; 1109 1110 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf001, 1111 "Unregistration of sess %p %8phC finished fcp_cnt %d\n", 1112 sess, sess->port_name, vha->fcport_count); 1113 1114 if (tgt && (tgt->sess_count == 0)) 1115 wake_up_all(&tgt->waitQ); 1116 1117 if (!test_bit(PFLG_DRIVER_REMOVING, &base_vha->pci_flags) && 1118 !(vha->vp_idx && test_bit(VPORT_DELETE, &vha->dpc_flags)) && 1119 (!tgt || !tgt->tgt_stop) && !LOOP_TRANSITION(vha)) { 1120 switch (vha->host->active_mode) { 1121 case MODE_INITIATOR: 1122 case MODE_DUAL: 1123 set_bit(RELOGIN_NEEDED, &vha->dpc_flags); 1124 qla2xxx_wake_dpc(vha); 1125 break; 1126 case MODE_TARGET: 1127 default: 1128 /* no-op */ 1129 break; 1130 } 1131 } 1132 1133 if (vha->fcport_count == 0) 1134 wake_up_all(&vha->fcport_waitQ); 1135 } 1136 1137 /* ha->tgt.sess_lock supposed to be held on entry */ 1138 void qlt_unreg_sess(struct fc_port *sess) 1139 { 1140 struct scsi_qla_host *vha = sess->vha; 1141 unsigned long flags; 1142 1143 ql_dbg(ql_dbg_disc, sess->vha, 0x210a, 1144 "%s sess %p for deletion %8phC\n", 1145 __func__, sess, sess->port_name); 1146 1147 spin_lock_irqsave(&sess->vha->work_lock, flags); 1148 if (sess->free_pending) { 1149 spin_unlock_irqrestore(&sess->vha->work_lock, flags); 1150 return; 1151 } 1152 sess->free_pending = 1; 1153 spin_unlock_irqrestore(&sess->vha->work_lock, flags); 1154 1155 if (sess->se_sess) 1156 vha->hw->tgt.tgt_ops->clear_nacl_from_fcport_map(sess); 1157 1158 sess->deleted = QLA_SESS_DELETION_IN_PROGRESS; 1159 sess->disc_state = DSC_DELETE_PEND; 1160 sess->last_rscn_gen = sess->rscn_gen; 1161 sess->last_login_gen = sess->login_gen; 1162 1163 INIT_WORK(&sess->free_work, qlt_free_session_done); 1164 queue_work(sess->vha->hw->wq, &sess->free_work); 1165 } 1166 EXPORT_SYMBOL(qlt_unreg_sess); 1167 1168 static int qlt_reset(struct scsi_qla_host *vha, void *iocb, int mcmd) 1169 { 1170 struct qla_hw_data *ha = vha->hw; 1171 struct fc_port *sess = NULL; 1172 uint16_t loop_id; 1173 int res = 0; 1174 struct imm_ntfy_from_isp *n = (struct imm_ntfy_from_isp *)iocb; 1175 unsigned long flags; 1176 1177 loop_id = le16_to_cpu(n->u.isp24.nport_handle); 1178 if (loop_id == 0xFFFF) { 1179 /* Global event */ 1180 atomic_inc(&vha->vha_tgt.qla_tgt->tgt_global_resets_count); 1181 spin_lock_irqsave(&ha->tgt.sess_lock, flags); 1182 qlt_clear_tgt_db(vha->vha_tgt.qla_tgt); 1183 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); 1184 } else { 1185 spin_lock_irqsave(&ha->tgt.sess_lock, flags); 1186 sess = ha->tgt.tgt_ops->find_sess_by_loop_id(vha, loop_id); 1187 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); 1188 } 1189 1190 ql_dbg(ql_dbg_tgt, vha, 0xe000, 1191 "Using sess for qla_tgt_reset: %p\n", sess); 1192 if (!sess) { 1193 res = -ESRCH; 1194 return res; 1195 } 1196 1197 ql_dbg(ql_dbg_tgt, vha, 0xe047, 1198 "scsi(%ld): resetting (session %p from port %8phC mcmd %x, " 1199 "loop_id %d)\n", vha->host_no, sess, sess->port_name, 1200 mcmd, loop_id); 1201 1202 return qlt_issue_task_mgmt(sess, 0, mcmd, iocb, QLA24XX_MGMT_SEND_NACK); 1203 } 1204 1205 static void qla24xx_chk_fcp_state(struct fc_port *sess) 1206 { 1207 if (sess->chip_reset != sess->vha->hw->base_qpair->chip_reset) { 1208 sess->logout_on_delete = 0; 1209 sess->logo_ack_needed = 0; 1210 sess->fw_login_state = DSC_LS_PORT_UNAVAIL; 1211 } 1212 } 1213 1214 void qlt_schedule_sess_for_deletion(struct fc_port *sess) 1215 { 1216 struct qla_tgt *tgt = sess->tgt; 1217 unsigned long flags; 1218 u16 sec; 1219 1220 switch (sess->disc_state) { 1221 case DSC_DELETE_PEND: 1222 return; 1223 case DSC_DELETED: 1224 if (tgt && tgt->tgt_stop && (tgt->sess_count == 0)) 1225 wake_up_all(&tgt->waitQ); 1226 if (sess->vha->fcport_count == 0) 1227 wake_up_all(&sess->vha->fcport_waitQ); 1228 1229 if (!sess->plogi_link[QLT_PLOGI_LINK_SAME_WWN] && 1230 !sess->plogi_link[QLT_PLOGI_LINK_CONFLICT]) 1231 return; 1232 break; 1233 case DSC_UPD_FCPORT: 1234 /* 1235 * This port is not done reporting to upper layer. 1236 * let it finish 1237 */ 1238 sess->next_disc_state = DSC_DELETE_PEND; 1239 sec = jiffies_to_msecs(jiffies - 1240 sess->jiffies_at_registration)/1000; 1241 if (sess->sec_since_registration < sec && sec && !(sec % 5)) { 1242 sess->sec_since_registration = sec; 1243 ql_dbg(ql_dbg_disc, sess->vha, 0xffff, 1244 "%s %8phC : Slow Rport registration(%d Sec)\n", 1245 __func__, sess->port_name, sec); 1246 } 1247 return; 1248 default: 1249 break; 1250 } 1251 1252 spin_lock_irqsave(&sess->vha->work_lock, flags); 1253 if (sess->deleted == QLA_SESS_DELETION_IN_PROGRESS) { 1254 spin_unlock_irqrestore(&sess->vha->work_lock, flags); 1255 return; 1256 } 1257 sess->deleted = QLA_SESS_DELETION_IN_PROGRESS; 1258 spin_unlock_irqrestore(&sess->vha->work_lock, flags); 1259 1260 sess->disc_state = DSC_DELETE_PEND; 1261 1262 qla24xx_chk_fcp_state(sess); 1263 1264 ql_dbg(ql_dbg_tgt, sess->vha, 0xe001, 1265 "Scheduling sess %p for deletion %8phC\n", 1266 sess, sess->port_name); 1267 1268 INIT_WORK(&sess->del_work, qla24xx_delete_sess_fn); 1269 WARN_ON(!queue_work(sess->vha->hw->wq, &sess->del_work)); 1270 } 1271 1272 static void qlt_clear_tgt_db(struct qla_tgt *tgt) 1273 { 1274 struct fc_port *sess; 1275 scsi_qla_host_t *vha = tgt->vha; 1276 1277 list_for_each_entry(sess, &vha->vp_fcports, list) { 1278 if (sess->se_sess) 1279 qlt_schedule_sess_for_deletion(sess); 1280 } 1281 1282 /* At this point tgt could be already dead */ 1283 } 1284 1285 static int qla24xx_get_loop_id(struct scsi_qla_host *vha, be_id_t s_id, 1286 uint16_t *loop_id) 1287 { 1288 struct qla_hw_data *ha = vha->hw; 1289 dma_addr_t gid_list_dma; 1290 struct gid_list_info *gid_list, *gid; 1291 int res, rc, i; 1292 uint16_t entries; 1293 1294 gid_list = dma_alloc_coherent(&ha->pdev->dev, qla2x00_gid_list_size(ha), 1295 &gid_list_dma, GFP_KERNEL); 1296 if (!gid_list) { 1297 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf044, 1298 "qla_target(%d): DMA Alloc failed of %u\n", 1299 vha->vp_idx, qla2x00_gid_list_size(ha)); 1300 return -ENOMEM; 1301 } 1302 1303 /* Get list of logged in devices */ 1304 rc = qla24xx_gidlist_wait(vha, gid_list, gid_list_dma, &entries); 1305 if (rc != QLA_SUCCESS) { 1306 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf045, 1307 "qla_target(%d): get_id_list() failed: %x\n", 1308 vha->vp_idx, rc); 1309 res = -EBUSY; 1310 goto out_free_id_list; 1311 } 1312 1313 gid = gid_list; 1314 res = -ENOENT; 1315 for (i = 0; i < entries; i++) { 1316 if (gid->al_pa == s_id.al_pa && 1317 gid->area == s_id.area && 1318 gid->domain == s_id.domain) { 1319 *loop_id = le16_to_cpu(gid->loop_id); 1320 res = 0; 1321 break; 1322 } 1323 gid = (void *)gid + ha->gid_list_info_size; 1324 } 1325 1326 out_free_id_list: 1327 dma_free_coherent(&ha->pdev->dev, qla2x00_gid_list_size(ha), 1328 gid_list, gid_list_dma); 1329 return res; 1330 } 1331 1332 /* 1333 * Adds an extra ref to allow to drop hw lock after adding sess to the list. 1334 * Caller must put it. 1335 */ 1336 static struct fc_port *qlt_create_sess( 1337 struct scsi_qla_host *vha, 1338 fc_port_t *fcport, 1339 bool local) 1340 { 1341 struct qla_hw_data *ha = vha->hw; 1342 struct fc_port *sess = fcport; 1343 unsigned long flags; 1344 1345 if (vha->vha_tgt.qla_tgt->tgt_stop) 1346 return NULL; 1347 1348 if (fcport->se_sess) { 1349 if (!kref_get_unless_zero(&sess->sess_kref)) { 1350 ql_dbg(ql_dbg_disc, vha, 0x20f6, 1351 "%s: kref_get_unless_zero failed for %8phC\n", 1352 __func__, sess->port_name); 1353 return NULL; 1354 } 1355 return fcport; 1356 } 1357 sess->tgt = vha->vha_tgt.qla_tgt; 1358 sess->local = local; 1359 1360 /* 1361 * Under normal circumstances we want to logout from firmware when 1362 * session eventually ends and release corresponding nport handle. 1363 * In the exception cases (e.g. when new PLOGI is waiting) corresponding 1364 * code will adjust these flags as necessary. 1365 */ 1366 sess->logout_on_delete = 1; 1367 sess->keep_nport_handle = 0; 1368 sess->logout_completed = 0; 1369 1370 if (ha->tgt.tgt_ops->check_initiator_node_acl(vha, 1371 &fcport->port_name[0], sess) < 0) { 1372 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf015, 1373 "(%d) %8phC check_initiator_node_acl failed\n", 1374 vha->vp_idx, fcport->port_name); 1375 return NULL; 1376 } else { 1377 kref_init(&fcport->sess_kref); 1378 /* 1379 * Take an extra reference to ->sess_kref here to handle 1380 * fc_port access across ->tgt.sess_lock reaquire. 1381 */ 1382 if (!kref_get_unless_zero(&sess->sess_kref)) { 1383 ql_dbg(ql_dbg_disc, vha, 0x20f7, 1384 "%s: kref_get_unless_zero failed for %8phC\n", 1385 __func__, sess->port_name); 1386 return NULL; 1387 } 1388 1389 spin_lock_irqsave(&ha->tgt.sess_lock, flags); 1390 if (!IS_SW_RESV_ADDR(sess->d_id)) 1391 vha->vha_tgt.qla_tgt->sess_count++; 1392 1393 qlt_do_generation_tick(vha, &sess->generation); 1394 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); 1395 } 1396 1397 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf006, 1398 "Adding sess %p se_sess %p to tgt %p sess_count %d\n", 1399 sess, sess->se_sess, vha->vha_tgt.qla_tgt, 1400 vha->vha_tgt.qla_tgt->sess_count); 1401 1402 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04b, 1403 "qla_target(%d): %ssession for wwn %8phC (loop_id %d, " 1404 "s_id %x:%x:%x, confirmed completion %ssupported) added\n", 1405 vha->vp_idx, local ? "local " : "", fcport->port_name, 1406 fcport->loop_id, sess->d_id.b.domain, sess->d_id.b.area, 1407 sess->d_id.b.al_pa, sess->conf_compl_supported ? "" : "not "); 1408 1409 return sess; 1410 } 1411 1412 /* 1413 * max_gen - specifies maximum session generation 1414 * at which this deletion requestion is still valid 1415 */ 1416 void 1417 qlt_fc_port_deleted(struct scsi_qla_host *vha, fc_port_t *fcport, int max_gen) 1418 { 1419 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; 1420 struct fc_port *sess = fcport; 1421 unsigned long flags; 1422 1423 if (!vha->hw->tgt.tgt_ops) 1424 return; 1425 1426 if (!tgt) 1427 return; 1428 1429 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); 1430 if (tgt->tgt_stop) { 1431 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); 1432 return; 1433 } 1434 if (!sess->se_sess) { 1435 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); 1436 return; 1437 } 1438 1439 if (max_gen - sess->generation < 0) { 1440 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); 1441 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf092, 1442 "Ignoring stale deletion request for se_sess %p / sess %p" 1443 " for port %8phC, req_gen %d, sess_gen %d\n", 1444 sess->se_sess, sess, sess->port_name, max_gen, 1445 sess->generation); 1446 return; 1447 } 1448 1449 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf008, "qla_tgt_fc_port_deleted %p", sess); 1450 1451 sess->local = 1; 1452 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); 1453 qlt_schedule_sess_for_deletion(sess); 1454 } 1455 1456 static inline int test_tgt_sess_count(struct qla_tgt *tgt) 1457 { 1458 struct qla_hw_data *ha = tgt->ha; 1459 unsigned long flags; 1460 int res; 1461 /* 1462 * We need to protect against race, when tgt is freed before or 1463 * inside wake_up() 1464 */ 1465 spin_lock_irqsave(&ha->tgt.sess_lock, flags); 1466 ql_dbg(ql_dbg_tgt, tgt->vha, 0xe002, 1467 "tgt %p, sess_count=%d\n", 1468 tgt, tgt->sess_count); 1469 res = (tgt->sess_count == 0); 1470 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); 1471 1472 return res; 1473 } 1474 1475 /* Called by tcm_qla2xxx configfs code */ 1476 int qlt_stop_phase1(struct qla_tgt *tgt) 1477 { 1478 struct scsi_qla_host *vha = tgt->vha; 1479 struct qla_hw_data *ha = tgt->ha; 1480 unsigned long flags; 1481 1482 mutex_lock(&ha->optrom_mutex); 1483 mutex_lock(&qla_tgt_mutex); 1484 1485 if (tgt->tgt_stop || tgt->tgt_stopped) { 1486 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04e, 1487 "Already in tgt->tgt_stop or tgt_stopped state\n"); 1488 mutex_unlock(&qla_tgt_mutex); 1489 mutex_unlock(&ha->optrom_mutex); 1490 return -EPERM; 1491 } 1492 1493 ql_dbg(ql_dbg_tgt_mgt, vha, 0xe003, "Stopping target for host %ld(%p)\n", 1494 vha->host_no, vha); 1495 /* 1496 * Mutex needed to sync with qla_tgt_fc_port_[added,deleted]. 1497 * Lock is needed, because we still can get an incoming packet. 1498 */ 1499 mutex_lock(&vha->vha_tgt.tgt_mutex); 1500 tgt->tgt_stop = 1; 1501 qlt_clear_tgt_db(tgt); 1502 mutex_unlock(&vha->vha_tgt.tgt_mutex); 1503 mutex_unlock(&qla_tgt_mutex); 1504 1505 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf009, 1506 "Waiting for sess works (tgt %p)", tgt); 1507 spin_lock_irqsave(&tgt->sess_work_lock, flags); 1508 while (!list_empty(&tgt->sess_works_list)) { 1509 spin_unlock_irqrestore(&tgt->sess_work_lock, flags); 1510 flush_scheduled_work(); 1511 spin_lock_irqsave(&tgt->sess_work_lock, flags); 1512 } 1513 spin_unlock_irqrestore(&tgt->sess_work_lock, flags); 1514 1515 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00a, 1516 "Waiting for tgt %p: sess_count=%d\n", tgt, tgt->sess_count); 1517 1518 wait_event_timeout(tgt->waitQ, test_tgt_sess_count(tgt), 10*HZ); 1519 1520 /* Big hammer */ 1521 if (!ha->flags.host_shutting_down && 1522 (qla_tgt_mode_enabled(vha) || qla_dual_mode_enabled(vha))) 1523 qlt_disable_vha(vha); 1524 1525 /* Wait for sessions to clear out (just in case) */ 1526 wait_event_timeout(tgt->waitQ, test_tgt_sess_count(tgt), 10*HZ); 1527 mutex_unlock(&ha->optrom_mutex); 1528 1529 return 0; 1530 } 1531 EXPORT_SYMBOL(qlt_stop_phase1); 1532 1533 /* Called by tcm_qla2xxx configfs code */ 1534 void qlt_stop_phase2(struct qla_tgt *tgt) 1535 { 1536 scsi_qla_host_t *vha = tgt->vha; 1537 1538 if (tgt->tgt_stopped) { 1539 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04f, 1540 "Already in tgt->tgt_stopped state\n"); 1541 dump_stack(); 1542 return; 1543 } 1544 if (!tgt->tgt_stop) { 1545 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00b, 1546 "%s: phase1 stop is not completed\n", __func__); 1547 dump_stack(); 1548 return; 1549 } 1550 1551 mutex_lock(&vha->vha_tgt.tgt_mutex); 1552 tgt->tgt_stop = 0; 1553 tgt->tgt_stopped = 1; 1554 mutex_unlock(&vha->vha_tgt.tgt_mutex); 1555 1556 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00c, "Stop of tgt %p finished\n", 1557 tgt); 1558 1559 switch (vha->qlini_mode) { 1560 case QLA2XXX_INI_MODE_EXCLUSIVE: 1561 vha->flags.online = 1; 1562 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 1563 break; 1564 default: 1565 break; 1566 } 1567 } 1568 EXPORT_SYMBOL(qlt_stop_phase2); 1569 1570 /* Called from qlt_remove_target() -> qla2x00_remove_one() */ 1571 static void qlt_release(struct qla_tgt *tgt) 1572 { 1573 scsi_qla_host_t *vha = tgt->vha; 1574 void *node; 1575 u64 key = 0; 1576 u16 i; 1577 struct qla_qpair_hint *h; 1578 struct qla_hw_data *ha = vha->hw; 1579 1580 if (!tgt->tgt_stop && !tgt->tgt_stopped) 1581 qlt_stop_phase1(tgt); 1582 1583 if (!tgt->tgt_stopped) 1584 qlt_stop_phase2(tgt); 1585 1586 for (i = 0; i < vha->hw->max_qpairs + 1; i++) { 1587 unsigned long flags; 1588 1589 h = &tgt->qphints[i]; 1590 if (h->qpair) { 1591 spin_lock_irqsave(h->qpair->qp_lock_ptr, flags); 1592 list_del(&h->hint_elem); 1593 spin_unlock_irqrestore(h->qpair->qp_lock_ptr, flags); 1594 h->qpair = NULL; 1595 } 1596 } 1597 kfree(tgt->qphints); 1598 mutex_lock(&qla_tgt_mutex); 1599 list_del(&vha->vha_tgt.qla_tgt->tgt_list_entry); 1600 mutex_unlock(&qla_tgt_mutex); 1601 1602 btree_for_each_safe64(&tgt->lun_qpair_map, key, node) 1603 btree_remove64(&tgt->lun_qpair_map, key); 1604 1605 btree_destroy64(&tgt->lun_qpair_map); 1606 1607 if (vha->vp_idx) 1608 if (ha->tgt.tgt_ops && 1609 ha->tgt.tgt_ops->remove_target && 1610 vha->vha_tgt.target_lport_ptr) 1611 ha->tgt.tgt_ops->remove_target(vha); 1612 1613 vha->vha_tgt.qla_tgt = NULL; 1614 1615 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00d, 1616 "Release of tgt %p finished\n", tgt); 1617 1618 kfree(tgt); 1619 } 1620 1621 /* ha->hardware_lock supposed to be held on entry */ 1622 static int qlt_sched_sess_work(struct qla_tgt *tgt, int type, 1623 const void *param, unsigned int param_size) 1624 { 1625 struct qla_tgt_sess_work_param *prm; 1626 unsigned long flags; 1627 1628 prm = kzalloc(sizeof(*prm), GFP_ATOMIC); 1629 if (!prm) { 1630 ql_dbg(ql_dbg_tgt_mgt, tgt->vha, 0xf050, 1631 "qla_target(%d): Unable to create session " 1632 "work, command will be refused", 0); 1633 return -ENOMEM; 1634 } 1635 1636 ql_dbg(ql_dbg_tgt_mgt, tgt->vha, 0xf00e, 1637 "Scheduling work (type %d, prm %p)" 1638 " to find session for param %p (size %d, tgt %p)\n", 1639 type, prm, param, param_size, tgt); 1640 1641 prm->type = type; 1642 memcpy(&prm->tm_iocb, param, param_size); 1643 1644 spin_lock_irqsave(&tgt->sess_work_lock, flags); 1645 list_add_tail(&prm->sess_works_list_entry, &tgt->sess_works_list); 1646 spin_unlock_irqrestore(&tgt->sess_work_lock, flags); 1647 1648 schedule_work(&tgt->sess_work); 1649 1650 return 0; 1651 } 1652 1653 /* 1654 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire 1655 */ 1656 static void qlt_send_notify_ack(struct qla_qpair *qpair, 1657 struct imm_ntfy_from_isp *ntfy, 1658 uint32_t add_flags, uint16_t resp_code, int resp_code_valid, 1659 uint16_t srr_flags, uint16_t srr_reject_code, uint8_t srr_explan) 1660 { 1661 struct scsi_qla_host *vha = qpair->vha; 1662 struct qla_hw_data *ha = vha->hw; 1663 request_t *pkt; 1664 struct nack_to_isp *nack; 1665 1666 if (!ha->flags.fw_started) 1667 return; 1668 1669 ql_dbg(ql_dbg_tgt, vha, 0xe004, "Sending NOTIFY_ACK (ha=%p)\n", ha); 1670 1671 pkt = (request_t *)__qla2x00_alloc_iocbs(qpair, NULL); 1672 if (!pkt) { 1673 ql_dbg(ql_dbg_tgt, vha, 0xe049, 1674 "qla_target(%d): %s failed: unable to allocate " 1675 "request packet\n", vha->vp_idx, __func__); 1676 return; 1677 } 1678 1679 if (vha->vha_tgt.qla_tgt != NULL) 1680 vha->vha_tgt.qla_tgt->notify_ack_expected++; 1681 1682 pkt->entry_type = NOTIFY_ACK_TYPE; 1683 pkt->entry_count = 1; 1684 1685 nack = (struct nack_to_isp *)pkt; 1686 nack->ox_id = ntfy->ox_id; 1687 1688 nack->u.isp24.handle = QLA_TGT_SKIP_HANDLE; 1689 nack->u.isp24.nport_handle = ntfy->u.isp24.nport_handle; 1690 if (le16_to_cpu(ntfy->u.isp24.status) == IMM_NTFY_ELS) { 1691 nack->u.isp24.flags = ntfy->u.isp24.flags & 1692 cpu_to_le32(NOTIFY24XX_FLAGS_PUREX_IOCB); 1693 } 1694 nack->u.isp24.srr_rx_id = ntfy->u.isp24.srr_rx_id; 1695 nack->u.isp24.status = ntfy->u.isp24.status; 1696 nack->u.isp24.status_subcode = ntfy->u.isp24.status_subcode; 1697 nack->u.isp24.fw_handle = ntfy->u.isp24.fw_handle; 1698 nack->u.isp24.exchange_address = ntfy->u.isp24.exchange_address; 1699 nack->u.isp24.srr_rel_offs = ntfy->u.isp24.srr_rel_offs; 1700 nack->u.isp24.srr_ui = ntfy->u.isp24.srr_ui; 1701 nack->u.isp24.srr_flags = cpu_to_le16(srr_flags); 1702 nack->u.isp24.srr_reject_code = srr_reject_code; 1703 nack->u.isp24.srr_reject_code_expl = srr_explan; 1704 nack->u.isp24.vp_index = ntfy->u.isp24.vp_index; 1705 1706 ql_dbg(ql_dbg_tgt, vha, 0xe005, 1707 "qla_target(%d): Sending 24xx Notify Ack %d\n", 1708 vha->vp_idx, nack->u.isp24.status); 1709 1710 /* Memory Barrier */ 1711 wmb(); 1712 qla2x00_start_iocbs(vha, qpair->req); 1713 } 1714 1715 static int qlt_build_abts_resp_iocb(struct qla_tgt_mgmt_cmd *mcmd) 1716 { 1717 struct scsi_qla_host *vha = mcmd->vha; 1718 struct qla_hw_data *ha = vha->hw; 1719 struct abts_resp_to_24xx *resp; 1720 uint32_t f_ctl, h; 1721 uint8_t *p; 1722 int rc; 1723 struct abts_recv_from_24xx *abts = &mcmd->orig_iocb.abts; 1724 struct qla_qpair *qpair = mcmd->qpair; 1725 1726 ql_dbg(ql_dbg_tgt, vha, 0xe006, 1727 "Sending task mgmt ABTS response (ha=%p, status=%x)\n", 1728 ha, mcmd->fc_tm_rsp); 1729 1730 rc = qlt_check_reserve_free_req(qpair, 1); 1731 if (rc) { 1732 ql_dbg(ql_dbg_tgt, vha, 0xe04a, 1733 "qla_target(%d): %s failed: unable to allocate request packet\n", 1734 vha->vp_idx, __func__); 1735 return -EAGAIN; 1736 } 1737 1738 resp = (struct abts_resp_to_24xx *)qpair->req->ring_ptr; 1739 memset(resp, 0, sizeof(*resp)); 1740 1741 h = qlt_make_handle(qpair); 1742 if (unlikely(h == QLA_TGT_NULL_HANDLE)) { 1743 /* 1744 * CTIO type 7 from the firmware doesn't provide a way to 1745 * know the initiator's LOOP ID, hence we can't find 1746 * the session and, so, the command. 1747 */ 1748 return -EAGAIN; 1749 } else { 1750 qpair->req->outstanding_cmds[h] = (srb_t *)mcmd; 1751 } 1752 1753 resp->handle = MAKE_HANDLE(qpair->req->id, h); 1754 resp->entry_type = ABTS_RESP_24XX; 1755 resp->entry_count = 1; 1756 resp->nport_handle = abts->nport_handle; 1757 resp->vp_index = vha->vp_idx; 1758 resp->sof_type = abts->sof_type; 1759 resp->exchange_address = abts->exchange_address; 1760 resp->fcp_hdr_le = abts->fcp_hdr_le; 1761 f_ctl = cpu_to_le32(F_CTL_EXCH_CONTEXT_RESP | 1762 F_CTL_LAST_SEQ | F_CTL_END_SEQ | 1763 F_CTL_SEQ_INITIATIVE); 1764 p = (uint8_t *)&f_ctl; 1765 resp->fcp_hdr_le.f_ctl[0] = *p++; 1766 resp->fcp_hdr_le.f_ctl[1] = *p++; 1767 resp->fcp_hdr_le.f_ctl[2] = *p; 1768 1769 resp->fcp_hdr_le.d_id = abts->fcp_hdr_le.s_id; 1770 resp->fcp_hdr_le.s_id = abts->fcp_hdr_le.d_id; 1771 1772 resp->exchange_addr_to_abort = abts->exchange_addr_to_abort; 1773 if (mcmd->fc_tm_rsp == FCP_TMF_CMPL) { 1774 resp->fcp_hdr_le.r_ctl = R_CTL_BASIC_LINK_SERV | R_CTL_B_ACC; 1775 resp->payload.ba_acct.seq_id_valid = SEQ_ID_INVALID; 1776 resp->payload.ba_acct.low_seq_cnt = 0x0000; 1777 resp->payload.ba_acct.high_seq_cnt = 0xFFFF; 1778 resp->payload.ba_acct.ox_id = abts->fcp_hdr_le.ox_id; 1779 resp->payload.ba_acct.rx_id = abts->fcp_hdr_le.rx_id; 1780 } else { 1781 resp->fcp_hdr_le.r_ctl = R_CTL_BASIC_LINK_SERV | R_CTL_B_RJT; 1782 resp->payload.ba_rjt.reason_code = 1783 BA_RJT_REASON_CODE_UNABLE_TO_PERFORM; 1784 /* Other bytes are zero */ 1785 } 1786 1787 vha->vha_tgt.qla_tgt->abts_resp_expected++; 1788 1789 /* Memory Barrier */ 1790 wmb(); 1791 if (qpair->reqq_start_iocbs) 1792 qpair->reqq_start_iocbs(qpair); 1793 else 1794 qla2x00_start_iocbs(vha, qpair->req); 1795 1796 return rc; 1797 } 1798 1799 /* 1800 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire 1801 */ 1802 static void qlt_24xx_send_abts_resp(struct qla_qpair *qpair, 1803 struct abts_recv_from_24xx *abts, uint32_t status, 1804 bool ids_reversed) 1805 { 1806 struct scsi_qla_host *vha = qpair->vha; 1807 struct qla_hw_data *ha = vha->hw; 1808 struct abts_resp_to_24xx *resp; 1809 uint32_t f_ctl; 1810 uint8_t *p; 1811 1812 ql_dbg(ql_dbg_tgt, vha, 0xe006, 1813 "Sending task mgmt ABTS response (ha=%p, atio=%p, status=%x\n", 1814 ha, abts, status); 1815 1816 resp = (struct abts_resp_to_24xx *)qla2x00_alloc_iocbs_ready(qpair, 1817 NULL); 1818 if (!resp) { 1819 ql_dbg(ql_dbg_tgt, vha, 0xe04a, 1820 "qla_target(%d): %s failed: unable to allocate " 1821 "request packet", vha->vp_idx, __func__); 1822 return; 1823 } 1824 1825 resp->entry_type = ABTS_RESP_24XX; 1826 resp->handle = QLA_TGT_SKIP_HANDLE; 1827 resp->entry_count = 1; 1828 resp->nport_handle = abts->nport_handle; 1829 resp->vp_index = vha->vp_idx; 1830 resp->sof_type = abts->sof_type; 1831 resp->exchange_address = abts->exchange_address; 1832 resp->fcp_hdr_le = abts->fcp_hdr_le; 1833 f_ctl = cpu_to_le32(F_CTL_EXCH_CONTEXT_RESP | 1834 F_CTL_LAST_SEQ | F_CTL_END_SEQ | 1835 F_CTL_SEQ_INITIATIVE); 1836 p = (uint8_t *)&f_ctl; 1837 resp->fcp_hdr_le.f_ctl[0] = *p++; 1838 resp->fcp_hdr_le.f_ctl[1] = *p++; 1839 resp->fcp_hdr_le.f_ctl[2] = *p; 1840 if (ids_reversed) { 1841 resp->fcp_hdr_le.d_id = abts->fcp_hdr_le.d_id; 1842 resp->fcp_hdr_le.s_id = abts->fcp_hdr_le.s_id; 1843 } else { 1844 resp->fcp_hdr_le.d_id = abts->fcp_hdr_le.s_id; 1845 resp->fcp_hdr_le.s_id = abts->fcp_hdr_le.d_id; 1846 } 1847 resp->exchange_addr_to_abort = abts->exchange_addr_to_abort; 1848 if (status == FCP_TMF_CMPL) { 1849 resp->fcp_hdr_le.r_ctl = R_CTL_BASIC_LINK_SERV | R_CTL_B_ACC; 1850 resp->payload.ba_acct.seq_id_valid = SEQ_ID_INVALID; 1851 resp->payload.ba_acct.low_seq_cnt = 0x0000; 1852 resp->payload.ba_acct.high_seq_cnt = 0xFFFF; 1853 resp->payload.ba_acct.ox_id = abts->fcp_hdr_le.ox_id; 1854 resp->payload.ba_acct.rx_id = abts->fcp_hdr_le.rx_id; 1855 } else { 1856 resp->fcp_hdr_le.r_ctl = R_CTL_BASIC_LINK_SERV | R_CTL_B_RJT; 1857 resp->payload.ba_rjt.reason_code = 1858 BA_RJT_REASON_CODE_UNABLE_TO_PERFORM; 1859 /* Other bytes are zero */ 1860 } 1861 1862 vha->vha_tgt.qla_tgt->abts_resp_expected++; 1863 1864 /* Memory Barrier */ 1865 wmb(); 1866 if (qpair->reqq_start_iocbs) 1867 qpair->reqq_start_iocbs(qpair); 1868 else 1869 qla2x00_start_iocbs(vha, qpair->req); 1870 } 1871 1872 /* 1873 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire 1874 */ 1875 static void qlt_24xx_retry_term_exchange(struct scsi_qla_host *vha, 1876 struct qla_qpair *qpair, response_t *pkt, struct qla_tgt_mgmt_cmd *mcmd) 1877 { 1878 struct ctio7_to_24xx *ctio; 1879 u16 tmp; 1880 struct abts_recv_from_24xx *entry; 1881 1882 ctio = (struct ctio7_to_24xx *)qla2x00_alloc_iocbs_ready(qpair, NULL); 1883 if (ctio == NULL) { 1884 ql_dbg(ql_dbg_tgt, vha, 0xe04b, 1885 "qla_target(%d): %s failed: unable to allocate " 1886 "request packet\n", vha->vp_idx, __func__); 1887 return; 1888 } 1889 1890 if (mcmd) 1891 /* abts from remote port */ 1892 entry = &mcmd->orig_iocb.abts; 1893 else 1894 /* abts from this driver. */ 1895 entry = (struct abts_recv_from_24xx *)pkt; 1896 1897 /* 1898 * We've got on entrance firmware's response on by us generated 1899 * ABTS response. So, in it ID fields are reversed. 1900 */ 1901 1902 ctio->entry_type = CTIO_TYPE7; 1903 ctio->entry_count = 1; 1904 ctio->nport_handle = entry->nport_handle; 1905 ctio->handle = QLA_TGT_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK; 1906 ctio->timeout = cpu_to_le16(QLA_TGT_TIMEOUT); 1907 ctio->vp_index = vha->vp_idx; 1908 ctio->exchange_addr = entry->exchange_addr_to_abort; 1909 tmp = (CTIO7_FLAGS_STATUS_MODE_1 | CTIO7_FLAGS_TERMINATE); 1910 1911 if (mcmd) { 1912 ctio->initiator_id = entry->fcp_hdr_le.s_id; 1913 1914 if (mcmd->flags & QLA24XX_MGMT_ABORT_IO_ATTR_VALID) 1915 tmp |= (mcmd->abort_io_attr << 9); 1916 else if (qpair->retry_term_cnt & 1) 1917 tmp |= (0x4 << 9); 1918 } else { 1919 ctio->initiator_id = entry->fcp_hdr_le.d_id; 1920 1921 if (qpair->retry_term_cnt & 1) 1922 tmp |= (0x4 << 9); 1923 } 1924 ctio->u.status1.flags = cpu_to_le16(tmp); 1925 ctio->u.status1.ox_id = entry->fcp_hdr_le.ox_id; 1926 1927 ql_dbg(ql_dbg_tgt, vha, 0xe007, 1928 "Sending retry TERM EXCH CTIO7 flags %04xh oxid %04xh attr valid %x\n", 1929 le16_to_cpu(ctio->u.status1.flags), 1930 le16_to_cpu(ctio->u.status1.ox_id), 1931 (mcmd && mcmd->flags & QLA24XX_MGMT_ABORT_IO_ATTR_VALID) ? 1 : 0); 1932 1933 /* Memory Barrier */ 1934 wmb(); 1935 if (qpair->reqq_start_iocbs) 1936 qpair->reqq_start_iocbs(qpair); 1937 else 1938 qla2x00_start_iocbs(vha, qpair->req); 1939 1940 if (mcmd) 1941 qlt_build_abts_resp_iocb(mcmd); 1942 else 1943 qlt_24xx_send_abts_resp(qpair, 1944 (struct abts_recv_from_24xx *)entry, FCP_TMF_CMPL, true); 1945 1946 } 1947 1948 /* drop cmds for the given lun 1949 * XXX only looks for cmds on the port through which lun reset was recieved 1950 * XXX does not go through the list of other port (which may have cmds 1951 * for the same lun) 1952 */ 1953 static void abort_cmds_for_lun(struct scsi_qla_host *vha, u64 lun, be_id_t s_id) 1954 { 1955 struct qla_tgt_sess_op *op; 1956 struct qla_tgt_cmd *cmd; 1957 uint32_t key; 1958 unsigned long flags; 1959 1960 key = sid_to_key(s_id); 1961 spin_lock_irqsave(&vha->cmd_list_lock, flags); 1962 list_for_each_entry(op, &vha->qla_sess_op_cmd_list, cmd_list) { 1963 uint32_t op_key; 1964 u64 op_lun; 1965 1966 op_key = sid_to_key(op->atio.u.isp24.fcp_hdr.s_id); 1967 op_lun = scsilun_to_int( 1968 (struct scsi_lun *)&op->atio.u.isp24.fcp_cmnd.lun); 1969 if (op_key == key && op_lun == lun) 1970 op->aborted = true; 1971 } 1972 1973 list_for_each_entry(op, &vha->unknown_atio_list, cmd_list) { 1974 uint32_t op_key; 1975 u64 op_lun; 1976 1977 op_key = sid_to_key(op->atio.u.isp24.fcp_hdr.s_id); 1978 op_lun = scsilun_to_int( 1979 (struct scsi_lun *)&op->atio.u.isp24.fcp_cmnd.lun); 1980 if (op_key == key && op_lun == lun) 1981 op->aborted = true; 1982 } 1983 1984 list_for_each_entry(cmd, &vha->qla_cmd_list, cmd_list) { 1985 uint32_t cmd_key; 1986 u64 cmd_lun; 1987 1988 cmd_key = sid_to_key(cmd->atio.u.isp24.fcp_hdr.s_id); 1989 cmd_lun = scsilun_to_int( 1990 (struct scsi_lun *)&cmd->atio.u.isp24.fcp_cmnd.lun); 1991 if (cmd_key == key && cmd_lun == lun) 1992 cmd->aborted = 1; 1993 } 1994 spin_unlock_irqrestore(&vha->cmd_list_lock, flags); 1995 } 1996 1997 static struct qla_qpair_hint *qlt_find_qphint(struct scsi_qla_host *vha, 1998 uint64_t unpacked_lun) 1999 { 2000 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; 2001 struct qla_qpair_hint *h = NULL; 2002 2003 if (vha->flags.qpairs_available) { 2004 h = btree_lookup64(&tgt->lun_qpair_map, unpacked_lun); 2005 if (!h) 2006 h = &tgt->qphints[0]; 2007 } else { 2008 h = &tgt->qphints[0]; 2009 } 2010 2011 return h; 2012 } 2013 2014 static void qlt_do_tmr_work(struct work_struct *work) 2015 { 2016 struct qla_tgt_mgmt_cmd *mcmd = 2017 container_of(work, struct qla_tgt_mgmt_cmd, work); 2018 struct qla_hw_data *ha = mcmd->vha->hw; 2019 int rc = EIO; 2020 uint32_t tag; 2021 unsigned long flags; 2022 2023 switch (mcmd->tmr_func) { 2024 case QLA_TGT_ABTS: 2025 tag = mcmd->orig_iocb.abts.exchange_addr_to_abort; 2026 break; 2027 default: 2028 tag = 0; 2029 break; 2030 } 2031 2032 rc = ha->tgt.tgt_ops->handle_tmr(mcmd, mcmd->unpacked_lun, 2033 mcmd->tmr_func, tag); 2034 2035 if (rc != 0) { 2036 spin_lock_irqsave(mcmd->qpair->qp_lock_ptr, flags); 2037 switch (mcmd->tmr_func) { 2038 case QLA_TGT_ABTS: 2039 mcmd->fc_tm_rsp = FCP_TMF_REJECTED; 2040 qlt_build_abts_resp_iocb(mcmd); 2041 break; 2042 case QLA_TGT_LUN_RESET: 2043 case QLA_TGT_CLEAR_TS: 2044 case QLA_TGT_ABORT_TS: 2045 case QLA_TGT_CLEAR_ACA: 2046 case QLA_TGT_TARGET_RESET: 2047 qlt_send_busy(mcmd->qpair, &mcmd->orig_iocb.atio, 2048 qla_sam_status); 2049 break; 2050 2051 case QLA_TGT_ABORT_ALL: 2052 case QLA_TGT_NEXUS_LOSS_SESS: 2053 case QLA_TGT_NEXUS_LOSS: 2054 qlt_send_notify_ack(mcmd->qpair, 2055 &mcmd->orig_iocb.imm_ntfy, 0, 0, 0, 0, 0, 0); 2056 break; 2057 } 2058 spin_unlock_irqrestore(mcmd->qpair->qp_lock_ptr, flags); 2059 2060 ql_dbg(ql_dbg_tgt_mgt, mcmd->vha, 0xf052, 2061 "qla_target(%d): tgt_ops->handle_tmr() failed: %d\n", 2062 mcmd->vha->vp_idx, rc); 2063 mempool_free(mcmd, qla_tgt_mgmt_cmd_mempool); 2064 } 2065 } 2066 2067 /* ha->hardware_lock supposed to be held on entry */ 2068 static int __qlt_24xx_handle_abts(struct scsi_qla_host *vha, 2069 struct abts_recv_from_24xx *abts, struct fc_port *sess) 2070 { 2071 struct qla_hw_data *ha = vha->hw; 2072 struct qla_tgt_mgmt_cmd *mcmd; 2073 struct qla_qpair_hint *h = &vha->vha_tgt.qla_tgt->qphints[0]; 2074 2075 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00f, 2076 "qla_target(%d): task abort (tag=%d)\n", 2077 vha->vp_idx, abts->exchange_addr_to_abort); 2078 2079 mcmd = mempool_alloc(qla_tgt_mgmt_cmd_mempool, GFP_ATOMIC); 2080 if (mcmd == NULL) { 2081 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf051, 2082 "qla_target(%d): %s: Allocation of ABORT cmd failed", 2083 vha->vp_idx, __func__); 2084 return -ENOMEM; 2085 } 2086 memset(mcmd, 0, sizeof(*mcmd)); 2087 mcmd->cmd_type = TYPE_TGT_TMCMD; 2088 mcmd->sess = sess; 2089 memcpy(&mcmd->orig_iocb.abts, abts, sizeof(mcmd->orig_iocb.abts)); 2090 mcmd->reset_count = ha->base_qpair->chip_reset; 2091 mcmd->tmr_func = QLA_TGT_ABTS; 2092 mcmd->qpair = h->qpair; 2093 mcmd->vha = vha; 2094 2095 /* 2096 * LUN is looked up by target-core internally based on the passed 2097 * abts->exchange_addr_to_abort tag. 2098 */ 2099 mcmd->se_cmd.cpuid = h->cpuid; 2100 2101 if (ha->tgt.tgt_ops->find_cmd_by_tag) { 2102 struct qla_tgt_cmd *abort_cmd; 2103 2104 abort_cmd = ha->tgt.tgt_ops->find_cmd_by_tag(sess, 2105 abts->exchange_addr_to_abort); 2106 if (abort_cmd && abort_cmd->qpair) { 2107 mcmd->qpair = abort_cmd->qpair; 2108 mcmd->se_cmd.cpuid = abort_cmd->se_cmd.cpuid; 2109 mcmd->abort_io_attr = abort_cmd->atio.u.isp24.attr; 2110 mcmd->flags = QLA24XX_MGMT_ABORT_IO_ATTR_VALID; 2111 } 2112 } 2113 2114 INIT_WORK(&mcmd->work, qlt_do_tmr_work); 2115 queue_work_on(mcmd->se_cmd.cpuid, qla_tgt_wq, &mcmd->work); 2116 2117 return 0; 2118 } 2119 2120 /* 2121 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire 2122 */ 2123 static void qlt_24xx_handle_abts(struct scsi_qla_host *vha, 2124 struct abts_recv_from_24xx *abts) 2125 { 2126 struct qla_hw_data *ha = vha->hw; 2127 struct fc_port *sess; 2128 uint32_t tag = abts->exchange_addr_to_abort; 2129 be_id_t s_id; 2130 int rc; 2131 unsigned long flags; 2132 2133 if (le32_to_cpu(abts->fcp_hdr_le.parameter) & ABTS_PARAM_ABORT_SEQ) { 2134 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf053, 2135 "qla_target(%d): ABTS: Abort Sequence not " 2136 "supported\n", vha->vp_idx); 2137 qlt_24xx_send_abts_resp(ha->base_qpair, abts, FCP_TMF_REJECTED, 2138 false); 2139 return; 2140 } 2141 2142 if (tag == ATIO_EXCHANGE_ADDRESS_UNKNOWN) { 2143 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf010, 2144 "qla_target(%d): ABTS: Unknown Exchange " 2145 "Address received\n", vha->vp_idx); 2146 qlt_24xx_send_abts_resp(ha->base_qpair, abts, FCP_TMF_REJECTED, 2147 false); 2148 return; 2149 } 2150 2151 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf011, 2152 "qla_target(%d): task abort (s_id=%x:%x:%x, " 2153 "tag=%d, param=%x)\n", vha->vp_idx, abts->fcp_hdr_le.s_id.domain, 2154 abts->fcp_hdr_le.s_id.area, abts->fcp_hdr_le.s_id.al_pa, tag, 2155 le32_to_cpu(abts->fcp_hdr_le.parameter)); 2156 2157 s_id = le_id_to_be(abts->fcp_hdr_le.s_id); 2158 2159 spin_lock_irqsave(&ha->tgt.sess_lock, flags); 2160 sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, s_id); 2161 if (!sess) { 2162 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf012, 2163 "qla_target(%d): task abort for non-existent session\n", 2164 vha->vp_idx); 2165 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); 2166 2167 qlt_24xx_send_abts_resp(ha->base_qpair, abts, FCP_TMF_REJECTED, 2168 false); 2169 return; 2170 } 2171 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); 2172 2173 2174 if (sess->deleted) { 2175 qlt_24xx_send_abts_resp(ha->base_qpair, abts, FCP_TMF_REJECTED, 2176 false); 2177 return; 2178 } 2179 2180 rc = __qlt_24xx_handle_abts(vha, abts, sess); 2181 if (rc != 0) { 2182 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf054, 2183 "qla_target(%d): __qlt_24xx_handle_abts() failed: %d\n", 2184 vha->vp_idx, rc); 2185 qlt_24xx_send_abts_resp(ha->base_qpair, abts, FCP_TMF_REJECTED, 2186 false); 2187 return; 2188 } 2189 } 2190 2191 /* 2192 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire 2193 */ 2194 static void qlt_24xx_send_task_mgmt_ctio(struct qla_qpair *qpair, 2195 struct qla_tgt_mgmt_cmd *mcmd, uint32_t resp_code) 2196 { 2197 struct scsi_qla_host *ha = mcmd->vha; 2198 struct atio_from_isp *atio = &mcmd->orig_iocb.atio; 2199 struct ctio7_to_24xx *ctio; 2200 uint16_t temp; 2201 2202 ql_dbg(ql_dbg_tgt, ha, 0xe008, 2203 "Sending task mgmt CTIO7 (ha=%p, atio=%p, resp_code=%x\n", 2204 ha, atio, resp_code); 2205 2206 2207 ctio = (struct ctio7_to_24xx *)__qla2x00_alloc_iocbs(qpair, NULL); 2208 if (ctio == NULL) { 2209 ql_dbg(ql_dbg_tgt, ha, 0xe04c, 2210 "qla_target(%d): %s failed: unable to allocate " 2211 "request packet\n", ha->vp_idx, __func__); 2212 return; 2213 } 2214 2215 ctio->entry_type = CTIO_TYPE7; 2216 ctio->entry_count = 1; 2217 ctio->handle = QLA_TGT_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK; 2218 ctio->nport_handle = mcmd->sess->loop_id; 2219 ctio->timeout = cpu_to_le16(QLA_TGT_TIMEOUT); 2220 ctio->vp_index = ha->vp_idx; 2221 ctio->initiator_id = be_id_to_le(atio->u.isp24.fcp_hdr.s_id); 2222 ctio->exchange_addr = atio->u.isp24.exchange_addr; 2223 temp = (atio->u.isp24.attr << 9)| 2224 CTIO7_FLAGS_STATUS_MODE_1 | CTIO7_FLAGS_SEND_STATUS; 2225 ctio->u.status1.flags = cpu_to_le16(temp); 2226 temp = be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id); 2227 ctio->u.status1.ox_id = cpu_to_le16(temp); 2228 ctio->u.status1.scsi_status = 2229 cpu_to_le16(SS_RESPONSE_INFO_LEN_VALID); 2230 ctio->u.status1.response_len = cpu_to_le16(8); 2231 ctio->u.status1.sense_data[0] = resp_code; 2232 2233 /* Memory Barrier */ 2234 wmb(); 2235 if (qpair->reqq_start_iocbs) 2236 qpair->reqq_start_iocbs(qpair); 2237 else 2238 qla2x00_start_iocbs(ha, qpair->req); 2239 } 2240 2241 void qlt_free_mcmd(struct qla_tgt_mgmt_cmd *mcmd) 2242 { 2243 mempool_free(mcmd, qla_tgt_mgmt_cmd_mempool); 2244 } 2245 EXPORT_SYMBOL(qlt_free_mcmd); 2246 2247 /* 2248 * ha->hardware_lock supposed to be held on entry. Might drop it, then 2249 * reacquire 2250 */ 2251 void qlt_send_resp_ctio(struct qla_qpair *qpair, struct qla_tgt_cmd *cmd, 2252 uint8_t scsi_status, uint8_t sense_key, uint8_t asc, uint8_t ascq) 2253 { 2254 struct atio_from_isp *atio = &cmd->atio; 2255 struct ctio7_to_24xx *ctio; 2256 uint16_t temp; 2257 struct scsi_qla_host *vha = cmd->vha; 2258 2259 ql_dbg(ql_dbg_tgt_dif, vha, 0x3066, 2260 "Sending response CTIO7 (vha=%p, atio=%p, scsi_status=%02x, " 2261 "sense_key=%02x, asc=%02x, ascq=%02x", 2262 vha, atio, scsi_status, sense_key, asc, ascq); 2263 2264 ctio = (struct ctio7_to_24xx *)qla2x00_alloc_iocbs(vha, NULL); 2265 if (!ctio) { 2266 ql_dbg(ql_dbg_async, vha, 0x3067, 2267 "qla2x00t(%ld): %s failed: unable to allocate request packet", 2268 vha->host_no, __func__); 2269 goto out; 2270 } 2271 2272 ctio->entry_type = CTIO_TYPE7; 2273 ctio->entry_count = 1; 2274 ctio->handle = QLA_TGT_SKIP_HANDLE; 2275 ctio->nport_handle = cmd->sess->loop_id; 2276 ctio->timeout = cpu_to_le16(QLA_TGT_TIMEOUT); 2277 ctio->vp_index = vha->vp_idx; 2278 ctio->initiator_id = be_id_to_le(atio->u.isp24.fcp_hdr.s_id); 2279 ctio->exchange_addr = atio->u.isp24.exchange_addr; 2280 temp = (atio->u.isp24.attr << 9) | 2281 CTIO7_FLAGS_STATUS_MODE_1 | CTIO7_FLAGS_SEND_STATUS; 2282 ctio->u.status1.flags = cpu_to_le16(temp); 2283 temp = be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id); 2284 ctio->u.status1.ox_id = cpu_to_le16(temp); 2285 ctio->u.status1.scsi_status = 2286 cpu_to_le16(SS_RESPONSE_INFO_LEN_VALID | scsi_status); 2287 ctio->u.status1.response_len = cpu_to_le16(18); 2288 ctio->u.status1.residual = cpu_to_le32(get_datalen_for_atio(atio)); 2289 2290 if (ctio->u.status1.residual != 0) 2291 ctio->u.status1.scsi_status |= 2292 cpu_to_le16(SS_RESIDUAL_UNDER); 2293 2294 /* Fixed format sense data. */ 2295 ctio->u.status1.sense_data[0] = 0x70; 2296 ctio->u.status1.sense_data[2] = sense_key; 2297 /* Additional sense length */ 2298 ctio->u.status1.sense_data[7] = 0xa; 2299 /* ASC and ASCQ */ 2300 ctio->u.status1.sense_data[12] = asc; 2301 ctio->u.status1.sense_data[13] = ascq; 2302 2303 /* Memory Barrier */ 2304 wmb(); 2305 2306 if (qpair->reqq_start_iocbs) 2307 qpair->reqq_start_iocbs(qpair); 2308 else 2309 qla2x00_start_iocbs(vha, qpair->req); 2310 2311 out: 2312 return; 2313 } 2314 2315 /* callback from target fabric module code */ 2316 void qlt_xmit_tm_rsp(struct qla_tgt_mgmt_cmd *mcmd) 2317 { 2318 struct scsi_qla_host *vha = mcmd->sess->vha; 2319 struct qla_hw_data *ha = vha->hw; 2320 unsigned long flags; 2321 struct qla_qpair *qpair = mcmd->qpair; 2322 bool free_mcmd = true; 2323 2324 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf013, 2325 "TM response mcmd (%p) status %#x state %#x", 2326 mcmd, mcmd->fc_tm_rsp, mcmd->flags); 2327 2328 spin_lock_irqsave(qpair->qp_lock_ptr, flags); 2329 2330 if (!vha->flags.online || mcmd->reset_count != qpair->chip_reset) { 2331 /* 2332 * Either the port is not online or this request was from 2333 * previous life, just abort the processing. 2334 */ 2335 ql_dbg(ql_dbg_async, vha, 0xe100, 2336 "RESET-TMR online/active/old-count/new-count = %d/%d/%d/%d.\n", 2337 vha->flags.online, qla2x00_reset_active(vha), 2338 mcmd->reset_count, qpair->chip_reset); 2339 ha->tgt.tgt_ops->free_mcmd(mcmd); 2340 spin_unlock_irqrestore(qpair->qp_lock_ptr, flags); 2341 return; 2342 } 2343 2344 if (mcmd->flags == QLA24XX_MGMT_SEND_NACK) { 2345 switch (mcmd->orig_iocb.imm_ntfy.u.isp24.status_subcode) { 2346 case ELS_LOGO: 2347 case ELS_PRLO: 2348 case ELS_TPRLO: 2349 ql_dbg(ql_dbg_disc, vha, 0x2106, 2350 "TM response logo %8phC status %#x state %#x", 2351 mcmd->sess->port_name, mcmd->fc_tm_rsp, 2352 mcmd->flags); 2353 qlt_schedule_sess_for_deletion(mcmd->sess); 2354 break; 2355 default: 2356 qlt_send_notify_ack(vha->hw->base_qpair, 2357 &mcmd->orig_iocb.imm_ntfy, 0, 0, 0, 0, 0, 0); 2358 break; 2359 } 2360 } else { 2361 if (mcmd->orig_iocb.atio.u.raw.entry_type == ABTS_RECV_24XX) { 2362 qlt_build_abts_resp_iocb(mcmd); 2363 free_mcmd = false; 2364 } else 2365 qlt_24xx_send_task_mgmt_ctio(qpair, mcmd, 2366 mcmd->fc_tm_rsp); 2367 } 2368 /* 2369 * Make the callback for ->free_mcmd() to queue_work() and invoke 2370 * target_put_sess_cmd() to drop cmd_kref to 1. The final 2371 * target_put_sess_cmd() call will be made from TFO->check_stop_free() 2372 * -> tcm_qla2xxx_check_stop_free() to release the TMR associated se_cmd 2373 * descriptor after TFO->queue_tm_rsp() -> tcm_qla2xxx_queue_tm_rsp() -> 2374 * qlt_xmit_tm_rsp() returns here.. 2375 */ 2376 if (free_mcmd) 2377 ha->tgt.tgt_ops->free_mcmd(mcmd); 2378 2379 spin_unlock_irqrestore(qpair->qp_lock_ptr, flags); 2380 } 2381 EXPORT_SYMBOL(qlt_xmit_tm_rsp); 2382 2383 /* No locks */ 2384 static int qlt_pci_map_calc_cnt(struct qla_tgt_prm *prm) 2385 { 2386 struct qla_tgt_cmd *cmd = prm->cmd; 2387 2388 BUG_ON(cmd->sg_cnt == 0); 2389 2390 prm->sg = (struct scatterlist *)cmd->sg; 2391 prm->seg_cnt = dma_map_sg(&cmd->qpair->pdev->dev, cmd->sg, 2392 cmd->sg_cnt, cmd->dma_data_direction); 2393 if (unlikely(prm->seg_cnt == 0)) 2394 goto out_err; 2395 2396 prm->cmd->sg_mapped = 1; 2397 2398 if (cmd->se_cmd.prot_op == TARGET_PROT_NORMAL) { 2399 /* 2400 * If greater than four sg entries then we need to allocate 2401 * the continuation entries 2402 */ 2403 if (prm->seg_cnt > QLA_TGT_DATASEGS_PER_CMD_24XX) 2404 prm->req_cnt += DIV_ROUND_UP(prm->seg_cnt - 2405 QLA_TGT_DATASEGS_PER_CMD_24XX, 2406 QLA_TGT_DATASEGS_PER_CONT_24XX); 2407 } else { 2408 /* DIF */ 2409 if ((cmd->se_cmd.prot_op == TARGET_PROT_DIN_INSERT) || 2410 (cmd->se_cmd.prot_op == TARGET_PROT_DOUT_STRIP)) { 2411 prm->seg_cnt = DIV_ROUND_UP(cmd->bufflen, cmd->blk_sz); 2412 prm->tot_dsds = prm->seg_cnt; 2413 } else 2414 prm->tot_dsds = prm->seg_cnt; 2415 2416 if (cmd->prot_sg_cnt) { 2417 prm->prot_sg = cmd->prot_sg; 2418 prm->prot_seg_cnt = dma_map_sg(&cmd->qpair->pdev->dev, 2419 cmd->prot_sg, cmd->prot_sg_cnt, 2420 cmd->dma_data_direction); 2421 if (unlikely(prm->prot_seg_cnt == 0)) 2422 goto out_err; 2423 2424 if ((cmd->se_cmd.prot_op == TARGET_PROT_DIN_INSERT) || 2425 (cmd->se_cmd.prot_op == TARGET_PROT_DOUT_STRIP)) { 2426 /* Dif Bundling not support here */ 2427 prm->prot_seg_cnt = DIV_ROUND_UP(cmd->bufflen, 2428 cmd->blk_sz); 2429 prm->tot_dsds += prm->prot_seg_cnt; 2430 } else 2431 prm->tot_dsds += prm->prot_seg_cnt; 2432 } 2433 } 2434 2435 return 0; 2436 2437 out_err: 2438 ql_dbg_qp(ql_dbg_tgt, prm->cmd->qpair, 0xe04d, 2439 "qla_target(%d): PCI mapping failed: sg_cnt=%d", 2440 0, prm->cmd->sg_cnt); 2441 return -1; 2442 } 2443 2444 static void qlt_unmap_sg(struct scsi_qla_host *vha, struct qla_tgt_cmd *cmd) 2445 { 2446 struct qla_hw_data *ha; 2447 struct qla_qpair *qpair; 2448 2449 if (!cmd->sg_mapped) 2450 return; 2451 2452 qpair = cmd->qpair; 2453 2454 dma_unmap_sg(&qpair->pdev->dev, cmd->sg, cmd->sg_cnt, 2455 cmd->dma_data_direction); 2456 cmd->sg_mapped = 0; 2457 2458 if (cmd->prot_sg_cnt) 2459 dma_unmap_sg(&qpair->pdev->dev, cmd->prot_sg, cmd->prot_sg_cnt, 2460 cmd->dma_data_direction); 2461 2462 if (!cmd->ctx) 2463 return; 2464 ha = vha->hw; 2465 if (cmd->ctx_dsd_alloced) 2466 qla2x00_clean_dsd_pool(ha, cmd->ctx); 2467 2468 dma_pool_free(ha->dl_dma_pool, cmd->ctx, cmd->ctx->crc_ctx_dma); 2469 } 2470 2471 static int qlt_check_reserve_free_req(struct qla_qpair *qpair, 2472 uint32_t req_cnt) 2473 { 2474 uint32_t cnt; 2475 struct req_que *req = qpair->req; 2476 2477 if (req->cnt < (req_cnt + 2)) { 2478 cnt = (uint16_t)(qpair->use_shadow_reg ? *req->out_ptr : 2479 RD_REG_DWORD_RELAXED(req->req_q_out)); 2480 2481 if (req->ring_index < cnt) 2482 req->cnt = cnt - req->ring_index; 2483 else 2484 req->cnt = req->length - (req->ring_index - cnt); 2485 2486 if (unlikely(req->cnt < (req_cnt + 2))) 2487 return -EAGAIN; 2488 } 2489 2490 req->cnt -= req_cnt; 2491 2492 return 0; 2493 } 2494 2495 /* 2496 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire 2497 */ 2498 static inline void *qlt_get_req_pkt(struct req_que *req) 2499 { 2500 /* Adjust ring index. */ 2501 req->ring_index++; 2502 if (req->ring_index == req->length) { 2503 req->ring_index = 0; 2504 req->ring_ptr = req->ring; 2505 } else { 2506 req->ring_ptr++; 2507 } 2508 return (cont_entry_t *)req->ring_ptr; 2509 } 2510 2511 /* ha->hardware_lock supposed to be held on entry */ 2512 static inline uint32_t qlt_make_handle(struct qla_qpair *qpair) 2513 { 2514 uint32_t h; 2515 int index; 2516 uint8_t found = 0; 2517 struct req_que *req = qpair->req; 2518 2519 h = req->current_outstanding_cmd; 2520 2521 for (index = 1; index < req->num_outstanding_cmds; index++) { 2522 h++; 2523 if (h == req->num_outstanding_cmds) 2524 h = 1; 2525 2526 if (h == QLA_TGT_SKIP_HANDLE) 2527 continue; 2528 2529 if (!req->outstanding_cmds[h]) { 2530 found = 1; 2531 break; 2532 } 2533 } 2534 2535 if (found) { 2536 req->current_outstanding_cmd = h; 2537 } else { 2538 ql_dbg(ql_dbg_io, qpair->vha, 0x305b, 2539 "qla_target(%d): Ran out of empty cmd slots\n", 2540 qpair->vha->vp_idx); 2541 h = QLA_TGT_NULL_HANDLE; 2542 } 2543 2544 return h; 2545 } 2546 2547 /* ha->hardware_lock supposed to be held on entry */ 2548 static int qlt_24xx_build_ctio_pkt(struct qla_qpair *qpair, 2549 struct qla_tgt_prm *prm) 2550 { 2551 uint32_t h; 2552 struct ctio7_to_24xx *pkt; 2553 struct atio_from_isp *atio = &prm->cmd->atio; 2554 uint16_t temp; 2555 2556 pkt = (struct ctio7_to_24xx *)qpair->req->ring_ptr; 2557 prm->pkt = pkt; 2558 memset(pkt, 0, sizeof(*pkt)); 2559 2560 pkt->entry_type = CTIO_TYPE7; 2561 pkt->entry_count = (uint8_t)prm->req_cnt; 2562 pkt->vp_index = prm->cmd->vp_idx; 2563 2564 h = qlt_make_handle(qpair); 2565 if (unlikely(h == QLA_TGT_NULL_HANDLE)) { 2566 /* 2567 * CTIO type 7 from the firmware doesn't provide a way to 2568 * know the initiator's LOOP ID, hence we can't find 2569 * the session and, so, the command. 2570 */ 2571 return -EAGAIN; 2572 } else 2573 qpair->req->outstanding_cmds[h] = (srb_t *)prm->cmd; 2574 2575 pkt->handle = MAKE_HANDLE(qpair->req->id, h); 2576 pkt->handle |= CTIO_COMPLETION_HANDLE_MARK; 2577 pkt->nport_handle = cpu_to_le16(prm->cmd->loop_id); 2578 pkt->timeout = cpu_to_le16(QLA_TGT_TIMEOUT); 2579 pkt->initiator_id = be_id_to_le(atio->u.isp24.fcp_hdr.s_id); 2580 pkt->exchange_addr = atio->u.isp24.exchange_addr; 2581 temp = atio->u.isp24.attr << 9; 2582 pkt->u.status0.flags |= cpu_to_le16(temp); 2583 temp = be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id); 2584 pkt->u.status0.ox_id = cpu_to_le16(temp); 2585 pkt->u.status0.relative_offset = cpu_to_le32(prm->cmd->offset); 2586 2587 return 0; 2588 } 2589 2590 /* 2591 * ha->hardware_lock supposed to be held on entry. We have already made sure 2592 * that there is sufficient amount of request entries to not drop it. 2593 */ 2594 static void qlt_load_cont_data_segments(struct qla_tgt_prm *prm) 2595 { 2596 int cnt; 2597 struct dsd64 *cur_dsd; 2598 2599 /* Build continuation packets */ 2600 while (prm->seg_cnt > 0) { 2601 cont_a64_entry_t *cont_pkt64 = 2602 (cont_a64_entry_t *)qlt_get_req_pkt( 2603 prm->cmd->qpair->req); 2604 2605 /* 2606 * Make sure that from cont_pkt64 none of 2607 * 64-bit specific fields used for 32-bit 2608 * addressing. Cast to (cont_entry_t *) for 2609 * that. 2610 */ 2611 2612 memset(cont_pkt64, 0, sizeof(*cont_pkt64)); 2613 2614 cont_pkt64->entry_count = 1; 2615 cont_pkt64->sys_define = 0; 2616 2617 cont_pkt64->entry_type = CONTINUE_A64_TYPE; 2618 cur_dsd = cont_pkt64->dsd; 2619 2620 /* Load continuation entry data segments */ 2621 for (cnt = 0; 2622 cnt < QLA_TGT_DATASEGS_PER_CONT_24XX && prm->seg_cnt; 2623 cnt++, prm->seg_cnt--) { 2624 append_dsd64(&cur_dsd, prm->sg); 2625 prm->sg = sg_next(prm->sg); 2626 } 2627 } 2628 } 2629 2630 /* 2631 * ha->hardware_lock supposed to be held on entry. We have already made sure 2632 * that there is sufficient amount of request entries to not drop it. 2633 */ 2634 static void qlt_load_data_segments(struct qla_tgt_prm *prm) 2635 { 2636 int cnt; 2637 struct dsd64 *cur_dsd; 2638 struct ctio7_to_24xx *pkt24 = (struct ctio7_to_24xx *)prm->pkt; 2639 2640 pkt24->u.status0.transfer_length = cpu_to_le32(prm->cmd->bufflen); 2641 2642 /* Setup packet address segment pointer */ 2643 cur_dsd = &pkt24->u.status0.dsd; 2644 2645 /* Set total data segment count */ 2646 if (prm->seg_cnt) 2647 pkt24->dseg_count = cpu_to_le16(prm->seg_cnt); 2648 2649 if (prm->seg_cnt == 0) { 2650 /* No data transfer */ 2651 cur_dsd->address = 0; 2652 cur_dsd->length = 0; 2653 return; 2654 } 2655 2656 /* If scatter gather */ 2657 2658 /* Load command entry data segments */ 2659 for (cnt = 0; 2660 (cnt < QLA_TGT_DATASEGS_PER_CMD_24XX) && prm->seg_cnt; 2661 cnt++, prm->seg_cnt--) { 2662 append_dsd64(&cur_dsd, prm->sg); 2663 prm->sg = sg_next(prm->sg); 2664 } 2665 2666 qlt_load_cont_data_segments(prm); 2667 } 2668 2669 static inline int qlt_has_data(struct qla_tgt_cmd *cmd) 2670 { 2671 return cmd->bufflen > 0; 2672 } 2673 2674 static void qlt_print_dif_err(struct qla_tgt_prm *prm) 2675 { 2676 struct qla_tgt_cmd *cmd; 2677 struct scsi_qla_host *vha; 2678 2679 /* asc 0x10=dif error */ 2680 if (prm->sense_buffer && (prm->sense_buffer[12] == 0x10)) { 2681 cmd = prm->cmd; 2682 vha = cmd->vha; 2683 /* ASCQ */ 2684 switch (prm->sense_buffer[13]) { 2685 case 1: 2686 ql_dbg(ql_dbg_tgt_dif, vha, 0xe00b, 2687 "BE detected Guard TAG ERR: lba[0x%llx|%lld] len[0x%x] " 2688 "se_cmd=%p tag[%x]", 2689 cmd->lba, cmd->lba, cmd->num_blks, &cmd->se_cmd, 2690 cmd->atio.u.isp24.exchange_addr); 2691 break; 2692 case 2: 2693 ql_dbg(ql_dbg_tgt_dif, vha, 0xe00c, 2694 "BE detected APP TAG ERR: lba[0x%llx|%lld] len[0x%x] " 2695 "se_cmd=%p tag[%x]", 2696 cmd->lba, cmd->lba, cmd->num_blks, &cmd->se_cmd, 2697 cmd->atio.u.isp24.exchange_addr); 2698 break; 2699 case 3: 2700 ql_dbg(ql_dbg_tgt_dif, vha, 0xe00f, 2701 "BE detected REF TAG ERR: lba[0x%llx|%lld] len[0x%x] " 2702 "se_cmd=%p tag[%x]", 2703 cmd->lba, cmd->lba, cmd->num_blks, &cmd->se_cmd, 2704 cmd->atio.u.isp24.exchange_addr); 2705 break; 2706 default: 2707 ql_dbg(ql_dbg_tgt_dif, vha, 0xe010, 2708 "BE detected Dif ERR: lba[%llx|%lld] len[%x] " 2709 "se_cmd=%p tag[%x]", 2710 cmd->lba, cmd->lba, cmd->num_blks, &cmd->se_cmd, 2711 cmd->atio.u.isp24.exchange_addr); 2712 break; 2713 } 2714 ql_dump_buffer(ql_dbg_tgt_dif, vha, 0xe011, cmd->cdb, 16); 2715 } 2716 } 2717 2718 /* 2719 * Called without ha->hardware_lock held 2720 */ 2721 static int qlt_pre_xmit_response(struct qla_tgt_cmd *cmd, 2722 struct qla_tgt_prm *prm, int xmit_type, uint8_t scsi_status, 2723 uint32_t *full_req_cnt) 2724 { 2725 struct se_cmd *se_cmd = &cmd->se_cmd; 2726 struct qla_qpair *qpair = cmd->qpair; 2727 2728 prm->cmd = cmd; 2729 prm->tgt = cmd->tgt; 2730 prm->pkt = NULL; 2731 prm->rq_result = scsi_status; 2732 prm->sense_buffer = &cmd->sense_buffer[0]; 2733 prm->sense_buffer_len = TRANSPORT_SENSE_BUFFER; 2734 prm->sg = NULL; 2735 prm->seg_cnt = -1; 2736 prm->req_cnt = 1; 2737 prm->residual = 0; 2738 prm->add_status_pkt = 0; 2739 prm->prot_sg = NULL; 2740 prm->prot_seg_cnt = 0; 2741 prm->tot_dsds = 0; 2742 2743 if ((xmit_type & QLA_TGT_XMIT_DATA) && qlt_has_data(cmd)) { 2744 if (qlt_pci_map_calc_cnt(prm) != 0) 2745 return -EAGAIN; 2746 } 2747 2748 *full_req_cnt = prm->req_cnt; 2749 2750 if (se_cmd->se_cmd_flags & SCF_UNDERFLOW_BIT) { 2751 prm->residual = se_cmd->residual_count; 2752 ql_dbg_qp(ql_dbg_io + ql_dbg_verbose, qpair, 0x305c, 2753 "Residual underflow: %d (tag %lld, op %x, bufflen %d, rq_result %x)\n", 2754 prm->residual, se_cmd->tag, 2755 se_cmd->t_task_cdb ? se_cmd->t_task_cdb[0] : 0, 2756 cmd->bufflen, prm->rq_result); 2757 prm->rq_result |= SS_RESIDUAL_UNDER; 2758 } else if (se_cmd->se_cmd_flags & SCF_OVERFLOW_BIT) { 2759 prm->residual = se_cmd->residual_count; 2760 ql_dbg_qp(ql_dbg_io, qpair, 0x305d, 2761 "Residual overflow: %d (tag %lld, op %x, bufflen %d, rq_result %x)\n", 2762 prm->residual, se_cmd->tag, se_cmd->t_task_cdb ? 2763 se_cmd->t_task_cdb[0] : 0, cmd->bufflen, prm->rq_result); 2764 prm->rq_result |= SS_RESIDUAL_OVER; 2765 } 2766 2767 if (xmit_type & QLA_TGT_XMIT_STATUS) { 2768 /* 2769 * If QLA_TGT_XMIT_DATA is not set, add_status_pkt will be 2770 * ignored in *xmit_response() below 2771 */ 2772 if (qlt_has_data(cmd)) { 2773 if (QLA_TGT_SENSE_VALID(prm->sense_buffer) || 2774 (IS_FWI2_CAPABLE(cmd->vha->hw) && 2775 (prm->rq_result != 0))) { 2776 prm->add_status_pkt = 1; 2777 (*full_req_cnt)++; 2778 } 2779 } 2780 } 2781 2782 return 0; 2783 } 2784 2785 static inline int qlt_need_explicit_conf(struct qla_tgt_cmd *cmd, 2786 int sending_sense) 2787 { 2788 if (cmd->qpair->enable_class_2) 2789 return 0; 2790 2791 if (sending_sense) 2792 return cmd->conf_compl_supported; 2793 else 2794 return cmd->qpair->enable_explicit_conf && 2795 cmd->conf_compl_supported; 2796 } 2797 2798 static void qlt_24xx_init_ctio_to_isp(struct ctio7_to_24xx *ctio, 2799 struct qla_tgt_prm *prm) 2800 { 2801 prm->sense_buffer_len = min_t(uint32_t, prm->sense_buffer_len, 2802 (uint32_t)sizeof(ctio->u.status1.sense_data)); 2803 ctio->u.status0.flags |= cpu_to_le16(CTIO7_FLAGS_SEND_STATUS); 2804 if (qlt_need_explicit_conf(prm->cmd, 0)) { 2805 ctio->u.status0.flags |= cpu_to_le16( 2806 CTIO7_FLAGS_EXPLICIT_CONFORM | 2807 CTIO7_FLAGS_CONFORM_REQ); 2808 } 2809 ctio->u.status0.residual = cpu_to_le32(prm->residual); 2810 ctio->u.status0.scsi_status = cpu_to_le16(prm->rq_result); 2811 if (QLA_TGT_SENSE_VALID(prm->sense_buffer)) { 2812 int i; 2813 2814 if (qlt_need_explicit_conf(prm->cmd, 1)) { 2815 if ((prm->rq_result & SS_SCSI_STATUS_BYTE) != 0) { 2816 ql_dbg_qp(ql_dbg_tgt, prm->cmd->qpair, 0xe017, 2817 "Skipping EXPLICIT_CONFORM and " 2818 "CTIO7_FLAGS_CONFORM_REQ for FCP READ w/ " 2819 "non GOOD status\n"); 2820 goto skip_explict_conf; 2821 } 2822 ctio->u.status1.flags |= cpu_to_le16( 2823 CTIO7_FLAGS_EXPLICIT_CONFORM | 2824 CTIO7_FLAGS_CONFORM_REQ); 2825 } 2826 skip_explict_conf: 2827 ctio->u.status1.flags &= 2828 ~cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_0); 2829 ctio->u.status1.flags |= 2830 cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1); 2831 ctio->u.status1.scsi_status |= 2832 cpu_to_le16(SS_SENSE_LEN_VALID); 2833 ctio->u.status1.sense_length = 2834 cpu_to_le16(prm->sense_buffer_len); 2835 for (i = 0; i < prm->sense_buffer_len/4; i++) 2836 ((uint32_t *)ctio->u.status1.sense_data)[i] = 2837 cpu_to_be32(((uint32_t *)prm->sense_buffer)[i]); 2838 2839 qlt_print_dif_err(prm); 2840 2841 } else { 2842 ctio->u.status1.flags &= 2843 ~cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_0); 2844 ctio->u.status1.flags |= 2845 cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1); 2846 ctio->u.status1.sense_length = 0; 2847 memset(ctio->u.status1.sense_data, 0, 2848 sizeof(ctio->u.status1.sense_data)); 2849 } 2850 2851 /* Sense with len > 24, is it possible ??? */ 2852 } 2853 2854 static inline int 2855 qlt_hba_err_chk_enabled(struct se_cmd *se_cmd) 2856 { 2857 switch (se_cmd->prot_op) { 2858 case TARGET_PROT_DOUT_INSERT: 2859 case TARGET_PROT_DIN_STRIP: 2860 if (ql2xenablehba_err_chk >= 1) 2861 return 1; 2862 break; 2863 case TARGET_PROT_DOUT_PASS: 2864 case TARGET_PROT_DIN_PASS: 2865 if (ql2xenablehba_err_chk >= 2) 2866 return 1; 2867 break; 2868 case TARGET_PROT_DIN_INSERT: 2869 case TARGET_PROT_DOUT_STRIP: 2870 return 1; 2871 default: 2872 break; 2873 } 2874 return 0; 2875 } 2876 2877 static inline int 2878 qla_tgt_ref_mask_check(struct se_cmd *se_cmd) 2879 { 2880 switch (se_cmd->prot_op) { 2881 case TARGET_PROT_DIN_INSERT: 2882 case TARGET_PROT_DOUT_INSERT: 2883 case TARGET_PROT_DIN_STRIP: 2884 case TARGET_PROT_DOUT_STRIP: 2885 case TARGET_PROT_DIN_PASS: 2886 case TARGET_PROT_DOUT_PASS: 2887 return 1; 2888 default: 2889 return 0; 2890 } 2891 return 0; 2892 } 2893 2894 /* 2895 * qla_tgt_set_dif_tags - Extract Ref and App tags from SCSI command 2896 */ 2897 static void 2898 qla_tgt_set_dif_tags(struct qla_tgt_cmd *cmd, struct crc_context *ctx, 2899 uint16_t *pfw_prot_opts) 2900 { 2901 struct se_cmd *se_cmd = &cmd->se_cmd; 2902 uint32_t lba = 0xffffffff & se_cmd->t_task_lba; 2903 scsi_qla_host_t *vha = cmd->tgt->vha; 2904 struct qla_hw_data *ha = vha->hw; 2905 uint32_t t32 = 0; 2906 2907 /* 2908 * wait till Mode Sense/Select cmd, modepage Ah, subpage 2 2909 * have been immplemented by TCM, before AppTag is avail. 2910 * Look for modesense_handlers[] 2911 */ 2912 ctx->app_tag = 0; 2913 ctx->app_tag_mask[0] = 0x0; 2914 ctx->app_tag_mask[1] = 0x0; 2915 2916 if (IS_PI_UNINIT_CAPABLE(ha)) { 2917 if ((se_cmd->prot_type == TARGET_DIF_TYPE1_PROT) || 2918 (se_cmd->prot_type == TARGET_DIF_TYPE2_PROT)) 2919 *pfw_prot_opts |= PO_DIS_VALD_APP_ESC; 2920 else if (se_cmd->prot_type == TARGET_DIF_TYPE3_PROT) 2921 *pfw_prot_opts |= PO_DIS_VALD_APP_REF_ESC; 2922 } 2923 2924 t32 = ha->tgt.tgt_ops->get_dif_tags(cmd, pfw_prot_opts); 2925 2926 switch (se_cmd->prot_type) { 2927 case TARGET_DIF_TYPE0_PROT: 2928 /* 2929 * No check for ql2xenablehba_err_chk, as it 2930 * would be an I/O error if hba tag generation 2931 * is not done. 2932 */ 2933 ctx->ref_tag = cpu_to_le32(lba); 2934 /* enable ALL bytes of the ref tag */ 2935 ctx->ref_tag_mask[0] = 0xff; 2936 ctx->ref_tag_mask[1] = 0xff; 2937 ctx->ref_tag_mask[2] = 0xff; 2938 ctx->ref_tag_mask[3] = 0xff; 2939 break; 2940 case TARGET_DIF_TYPE1_PROT: 2941 /* 2942 * For TYPE 1 protection: 16 bit GUARD tag, 32 bit 2943 * REF tag, and 16 bit app tag. 2944 */ 2945 ctx->ref_tag = cpu_to_le32(lba); 2946 if (!qla_tgt_ref_mask_check(se_cmd) || 2947 !(ha->tgt.tgt_ops->chk_dif_tags(t32))) { 2948 *pfw_prot_opts |= PO_DIS_REF_TAG_VALD; 2949 break; 2950 } 2951 /* enable ALL bytes of the ref tag */ 2952 ctx->ref_tag_mask[0] = 0xff; 2953 ctx->ref_tag_mask[1] = 0xff; 2954 ctx->ref_tag_mask[2] = 0xff; 2955 ctx->ref_tag_mask[3] = 0xff; 2956 break; 2957 case TARGET_DIF_TYPE2_PROT: 2958 /* 2959 * For TYPE 2 protection: 16 bit GUARD + 32 bit REF 2960 * tag has to match LBA in CDB + N 2961 */ 2962 ctx->ref_tag = cpu_to_le32(lba); 2963 if (!qla_tgt_ref_mask_check(se_cmd) || 2964 !(ha->tgt.tgt_ops->chk_dif_tags(t32))) { 2965 *pfw_prot_opts |= PO_DIS_REF_TAG_VALD; 2966 break; 2967 } 2968 /* enable ALL bytes of the ref tag */ 2969 ctx->ref_tag_mask[0] = 0xff; 2970 ctx->ref_tag_mask[1] = 0xff; 2971 ctx->ref_tag_mask[2] = 0xff; 2972 ctx->ref_tag_mask[3] = 0xff; 2973 break; 2974 case TARGET_DIF_TYPE3_PROT: 2975 /* For TYPE 3 protection: 16 bit GUARD only */ 2976 *pfw_prot_opts |= PO_DIS_REF_TAG_VALD; 2977 ctx->ref_tag_mask[0] = ctx->ref_tag_mask[1] = 2978 ctx->ref_tag_mask[2] = ctx->ref_tag_mask[3] = 0x00; 2979 break; 2980 } 2981 } 2982 2983 static inline int 2984 qlt_build_ctio_crc2_pkt(struct qla_qpair *qpair, struct qla_tgt_prm *prm) 2985 { 2986 struct dsd64 *cur_dsd; 2987 uint32_t transfer_length = 0; 2988 uint32_t data_bytes; 2989 uint32_t dif_bytes; 2990 uint8_t bundling = 1; 2991 struct crc_context *crc_ctx_pkt = NULL; 2992 struct qla_hw_data *ha; 2993 struct ctio_crc2_to_fw *pkt; 2994 dma_addr_t crc_ctx_dma; 2995 uint16_t fw_prot_opts = 0; 2996 struct qla_tgt_cmd *cmd = prm->cmd; 2997 struct se_cmd *se_cmd = &cmd->se_cmd; 2998 uint32_t h; 2999 struct atio_from_isp *atio = &prm->cmd->atio; 3000 struct qla_tc_param tc; 3001 uint16_t t16; 3002 scsi_qla_host_t *vha = cmd->vha; 3003 3004 ha = vha->hw; 3005 3006 pkt = (struct ctio_crc2_to_fw *)qpair->req->ring_ptr; 3007 prm->pkt = pkt; 3008 memset(pkt, 0, sizeof(*pkt)); 3009 3010 ql_dbg_qp(ql_dbg_tgt, cmd->qpair, 0xe071, 3011 "qla_target(%d):%s: se_cmd[%p] CRC2 prot_op[0x%x] cmd prot sg:cnt[%p:%x] lba[%llu]\n", 3012 cmd->vp_idx, __func__, se_cmd, se_cmd->prot_op, 3013 prm->prot_sg, prm->prot_seg_cnt, se_cmd->t_task_lba); 3014 3015 if ((se_cmd->prot_op == TARGET_PROT_DIN_INSERT) || 3016 (se_cmd->prot_op == TARGET_PROT_DOUT_STRIP)) 3017 bundling = 0; 3018 3019 /* Compute dif len and adjust data len to incude protection */ 3020 data_bytes = cmd->bufflen; 3021 dif_bytes = (data_bytes / cmd->blk_sz) * 8; 3022 3023 switch (se_cmd->prot_op) { 3024 case TARGET_PROT_DIN_INSERT: 3025 case TARGET_PROT_DOUT_STRIP: 3026 transfer_length = data_bytes; 3027 if (cmd->prot_sg_cnt) 3028 data_bytes += dif_bytes; 3029 break; 3030 case TARGET_PROT_DIN_STRIP: 3031 case TARGET_PROT_DOUT_INSERT: 3032 case TARGET_PROT_DIN_PASS: 3033 case TARGET_PROT_DOUT_PASS: 3034 transfer_length = data_bytes + dif_bytes; 3035 break; 3036 default: 3037 BUG(); 3038 break; 3039 } 3040 3041 if (!qlt_hba_err_chk_enabled(se_cmd)) 3042 fw_prot_opts |= 0x10; /* Disable Guard tag checking */ 3043 /* HBA error checking enabled */ 3044 else if (IS_PI_UNINIT_CAPABLE(ha)) { 3045 if ((se_cmd->prot_type == TARGET_DIF_TYPE1_PROT) || 3046 (se_cmd->prot_type == TARGET_DIF_TYPE2_PROT)) 3047 fw_prot_opts |= PO_DIS_VALD_APP_ESC; 3048 else if (se_cmd->prot_type == TARGET_DIF_TYPE3_PROT) 3049 fw_prot_opts |= PO_DIS_VALD_APP_REF_ESC; 3050 } 3051 3052 switch (se_cmd->prot_op) { 3053 case TARGET_PROT_DIN_INSERT: 3054 case TARGET_PROT_DOUT_INSERT: 3055 fw_prot_opts |= PO_MODE_DIF_INSERT; 3056 break; 3057 case TARGET_PROT_DIN_STRIP: 3058 case TARGET_PROT_DOUT_STRIP: 3059 fw_prot_opts |= PO_MODE_DIF_REMOVE; 3060 break; 3061 case TARGET_PROT_DIN_PASS: 3062 case TARGET_PROT_DOUT_PASS: 3063 fw_prot_opts |= PO_MODE_DIF_PASS; 3064 /* FUTURE: does tcm require T10CRC<->IPCKSUM conversion? */ 3065 break; 3066 default:/* Normal Request */ 3067 fw_prot_opts |= PO_MODE_DIF_PASS; 3068 break; 3069 } 3070 3071 /* ---- PKT ---- */ 3072 /* Update entry type to indicate Command Type CRC_2 IOCB */ 3073 pkt->entry_type = CTIO_CRC2; 3074 pkt->entry_count = 1; 3075 pkt->vp_index = cmd->vp_idx; 3076 3077 h = qlt_make_handle(qpair); 3078 if (unlikely(h == QLA_TGT_NULL_HANDLE)) { 3079 /* 3080 * CTIO type 7 from the firmware doesn't provide a way to 3081 * know the initiator's LOOP ID, hence we can't find 3082 * the session and, so, the command. 3083 */ 3084 return -EAGAIN; 3085 } else 3086 qpair->req->outstanding_cmds[h] = (srb_t *)prm->cmd; 3087 3088 pkt->handle = MAKE_HANDLE(qpair->req->id, h); 3089 pkt->handle |= CTIO_COMPLETION_HANDLE_MARK; 3090 pkt->nport_handle = cpu_to_le16(prm->cmd->loop_id); 3091 pkt->timeout = cpu_to_le16(QLA_TGT_TIMEOUT); 3092 pkt->initiator_id = be_id_to_le(atio->u.isp24.fcp_hdr.s_id); 3093 pkt->exchange_addr = atio->u.isp24.exchange_addr; 3094 3095 /* silence compile warning */ 3096 t16 = be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id); 3097 pkt->ox_id = cpu_to_le16(t16); 3098 3099 t16 = (atio->u.isp24.attr << 9); 3100 pkt->flags |= cpu_to_le16(t16); 3101 pkt->relative_offset = cpu_to_le32(prm->cmd->offset); 3102 3103 /* Set transfer direction */ 3104 if (cmd->dma_data_direction == DMA_TO_DEVICE) 3105 pkt->flags = cpu_to_le16(CTIO7_FLAGS_DATA_IN); 3106 else if (cmd->dma_data_direction == DMA_FROM_DEVICE) 3107 pkt->flags = cpu_to_le16(CTIO7_FLAGS_DATA_OUT); 3108 3109 pkt->dseg_count = prm->tot_dsds; 3110 /* Fibre channel byte count */ 3111 pkt->transfer_length = cpu_to_le32(transfer_length); 3112 3113 /* ----- CRC context -------- */ 3114 3115 /* Allocate CRC context from global pool */ 3116 crc_ctx_pkt = cmd->ctx = 3117 dma_pool_zalloc(ha->dl_dma_pool, GFP_ATOMIC, &crc_ctx_dma); 3118 3119 if (!crc_ctx_pkt) 3120 goto crc_queuing_error; 3121 3122 crc_ctx_pkt->crc_ctx_dma = crc_ctx_dma; 3123 INIT_LIST_HEAD(&crc_ctx_pkt->dsd_list); 3124 3125 /* Set handle */ 3126 crc_ctx_pkt->handle = pkt->handle; 3127 3128 qla_tgt_set_dif_tags(cmd, crc_ctx_pkt, &fw_prot_opts); 3129 3130 put_unaligned_le64(crc_ctx_dma, &pkt->crc_context_address); 3131 pkt->crc_context_len = CRC_CONTEXT_LEN_FW; 3132 3133 if (!bundling) { 3134 cur_dsd = &crc_ctx_pkt->u.nobundling.data_dsd[0]; 3135 } else { 3136 /* 3137 * Configure Bundling if we need to fetch interlaving 3138 * protection PCI accesses 3139 */ 3140 fw_prot_opts |= PO_ENABLE_DIF_BUNDLING; 3141 crc_ctx_pkt->u.bundling.dif_byte_count = cpu_to_le32(dif_bytes); 3142 crc_ctx_pkt->u.bundling.dseg_count = 3143 cpu_to_le16(prm->tot_dsds - prm->prot_seg_cnt); 3144 cur_dsd = &crc_ctx_pkt->u.bundling.data_dsd[0]; 3145 } 3146 3147 /* Finish the common fields of CRC pkt */ 3148 crc_ctx_pkt->blk_size = cpu_to_le16(cmd->blk_sz); 3149 crc_ctx_pkt->prot_opts = cpu_to_le16(fw_prot_opts); 3150 crc_ctx_pkt->byte_count = cpu_to_le32(data_bytes); 3151 crc_ctx_pkt->guard_seed = cpu_to_le16(0); 3152 3153 memset((uint8_t *)&tc, 0 , sizeof(tc)); 3154 tc.vha = vha; 3155 tc.blk_sz = cmd->blk_sz; 3156 tc.bufflen = cmd->bufflen; 3157 tc.sg = cmd->sg; 3158 tc.prot_sg = cmd->prot_sg; 3159 tc.ctx = crc_ctx_pkt; 3160 tc.ctx_dsd_alloced = &cmd->ctx_dsd_alloced; 3161 3162 /* Walks data segments */ 3163 pkt->flags |= cpu_to_le16(CTIO7_FLAGS_DSD_PTR); 3164 3165 if (!bundling && prm->prot_seg_cnt) { 3166 if (qla24xx_walk_and_build_sglist_no_difb(ha, NULL, cur_dsd, 3167 prm->tot_dsds, &tc)) 3168 goto crc_queuing_error; 3169 } else if (qla24xx_walk_and_build_sglist(ha, NULL, cur_dsd, 3170 (prm->tot_dsds - prm->prot_seg_cnt), &tc)) 3171 goto crc_queuing_error; 3172 3173 if (bundling && prm->prot_seg_cnt) { 3174 /* Walks dif segments */ 3175 pkt->add_flags |= CTIO_CRC2_AF_DIF_DSD_ENA; 3176 3177 cur_dsd = &crc_ctx_pkt->u.bundling.dif_dsd; 3178 if (qla24xx_walk_and_build_prot_sglist(ha, NULL, cur_dsd, 3179 prm->prot_seg_cnt, cmd)) 3180 goto crc_queuing_error; 3181 } 3182 return QLA_SUCCESS; 3183 3184 crc_queuing_error: 3185 /* Cleanup will be performed by the caller */ 3186 qpair->req->outstanding_cmds[h] = NULL; 3187 3188 return QLA_FUNCTION_FAILED; 3189 } 3190 3191 /* 3192 * Callback to setup response of xmit_type of QLA_TGT_XMIT_DATA and * 3193 * QLA_TGT_XMIT_STATUS for >= 24xx silicon 3194 */ 3195 int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type, 3196 uint8_t scsi_status) 3197 { 3198 struct scsi_qla_host *vha = cmd->vha; 3199 struct qla_qpair *qpair = cmd->qpair; 3200 struct ctio7_to_24xx *pkt; 3201 struct qla_tgt_prm prm; 3202 uint32_t full_req_cnt = 0; 3203 unsigned long flags = 0; 3204 int res; 3205 3206 if (!qpair->fw_started || (cmd->reset_count != qpair->chip_reset) || 3207 (cmd->sess && cmd->sess->deleted)) { 3208 cmd->state = QLA_TGT_STATE_PROCESSED; 3209 res = 0; 3210 goto free; 3211 } 3212 3213 ql_dbg_qp(ql_dbg_tgt, qpair, 0xe018, 3214 "is_send_status=%d, cmd->bufflen=%d, cmd->sg_cnt=%d, cmd->dma_data_direction=%d se_cmd[%p] qp %d\n", 3215 (xmit_type & QLA_TGT_XMIT_STATUS) ? 3216 1 : 0, cmd->bufflen, cmd->sg_cnt, cmd->dma_data_direction, 3217 &cmd->se_cmd, qpair->id); 3218 3219 res = qlt_pre_xmit_response(cmd, &prm, xmit_type, scsi_status, 3220 &full_req_cnt); 3221 if (unlikely(res != 0)) 3222 goto free; 3223 3224 spin_lock_irqsave(qpair->qp_lock_ptr, flags); 3225 3226 if (xmit_type == QLA_TGT_XMIT_STATUS) 3227 qpair->tgt_counters.core_qla_snd_status++; 3228 else 3229 qpair->tgt_counters.core_qla_que_buf++; 3230 3231 if (!qpair->fw_started || cmd->reset_count != qpair->chip_reset) { 3232 /* 3233 * Either the port is not online or this request was from 3234 * previous life, just abort the processing. 3235 */ 3236 cmd->state = QLA_TGT_STATE_PROCESSED; 3237 ql_dbg_qp(ql_dbg_async, qpair, 0xe101, 3238 "RESET-RSP online/active/old-count/new-count = %d/%d/%d/%d.\n", 3239 vha->flags.online, qla2x00_reset_active(vha), 3240 cmd->reset_count, qpair->chip_reset); 3241 spin_unlock_irqrestore(qpair->qp_lock_ptr, flags); 3242 res = 0; 3243 goto free; 3244 } 3245 3246 /* Does F/W have an IOCBs for this request */ 3247 res = qlt_check_reserve_free_req(qpair, full_req_cnt); 3248 if (unlikely(res)) 3249 goto out_unmap_unlock; 3250 3251 if (cmd->se_cmd.prot_op && (xmit_type & QLA_TGT_XMIT_DATA)) 3252 res = qlt_build_ctio_crc2_pkt(qpair, &prm); 3253 else 3254 res = qlt_24xx_build_ctio_pkt(qpair, &prm); 3255 if (unlikely(res != 0)) { 3256 qpair->req->cnt += full_req_cnt; 3257 goto out_unmap_unlock; 3258 } 3259 3260 pkt = (struct ctio7_to_24xx *)prm.pkt; 3261 3262 if (qlt_has_data(cmd) && (xmit_type & QLA_TGT_XMIT_DATA)) { 3263 pkt->u.status0.flags |= 3264 cpu_to_le16(CTIO7_FLAGS_DATA_IN | 3265 CTIO7_FLAGS_STATUS_MODE_0); 3266 3267 if (cmd->se_cmd.prot_op == TARGET_PROT_NORMAL) 3268 qlt_load_data_segments(&prm); 3269 3270 if (prm.add_status_pkt == 0) { 3271 if (xmit_type & QLA_TGT_XMIT_STATUS) { 3272 pkt->u.status0.scsi_status = 3273 cpu_to_le16(prm.rq_result); 3274 pkt->u.status0.residual = 3275 cpu_to_le32(prm.residual); 3276 pkt->u.status0.flags |= cpu_to_le16( 3277 CTIO7_FLAGS_SEND_STATUS); 3278 if (qlt_need_explicit_conf(cmd, 0)) { 3279 pkt->u.status0.flags |= 3280 cpu_to_le16( 3281 CTIO7_FLAGS_EXPLICIT_CONFORM | 3282 CTIO7_FLAGS_CONFORM_REQ); 3283 } 3284 } 3285 3286 } else { 3287 /* 3288 * We have already made sure that there is sufficient 3289 * amount of request entries to not drop HW lock in 3290 * req_pkt(). 3291 */ 3292 struct ctio7_to_24xx *ctio = 3293 (struct ctio7_to_24xx *)qlt_get_req_pkt( 3294 qpair->req); 3295 3296 ql_dbg_qp(ql_dbg_tgt, qpair, 0x305e, 3297 "Building additional status packet 0x%p.\n", 3298 ctio); 3299 3300 /* 3301 * T10Dif: ctio_crc2_to_fw overlay ontop of 3302 * ctio7_to_24xx 3303 */ 3304 memcpy(ctio, pkt, sizeof(*ctio)); 3305 /* reset back to CTIO7 */ 3306 ctio->entry_count = 1; 3307 ctio->entry_type = CTIO_TYPE7; 3308 ctio->dseg_count = 0; 3309 ctio->u.status1.flags &= ~cpu_to_le16( 3310 CTIO7_FLAGS_DATA_IN); 3311 3312 /* Real finish is ctio_m1's finish */ 3313 pkt->handle |= CTIO_INTERMEDIATE_HANDLE_MARK; 3314 pkt->u.status0.flags |= cpu_to_le16( 3315 CTIO7_FLAGS_DONT_RET_CTIO); 3316 3317 /* qlt_24xx_init_ctio_to_isp will correct 3318 * all neccessary fields that's part of CTIO7. 3319 * There should be no residual of CTIO-CRC2 data. 3320 */ 3321 qlt_24xx_init_ctio_to_isp((struct ctio7_to_24xx *)ctio, 3322 &prm); 3323 } 3324 } else 3325 qlt_24xx_init_ctio_to_isp(pkt, &prm); 3326 3327 3328 cmd->state = QLA_TGT_STATE_PROCESSED; /* Mid-level is done processing */ 3329 cmd->cmd_sent_to_fw = 1; 3330 cmd->ctio_flags = le16_to_cpu(pkt->u.status0.flags); 3331 3332 /* Memory Barrier */ 3333 wmb(); 3334 if (qpair->reqq_start_iocbs) 3335 qpair->reqq_start_iocbs(qpair); 3336 else 3337 qla2x00_start_iocbs(vha, qpair->req); 3338 spin_unlock_irqrestore(qpair->qp_lock_ptr, flags); 3339 3340 return 0; 3341 3342 out_unmap_unlock: 3343 qlt_unmap_sg(vha, cmd); 3344 spin_unlock_irqrestore(qpair->qp_lock_ptr, flags); 3345 3346 free: 3347 vha->hw->tgt.tgt_ops->free_cmd(cmd); 3348 return res; 3349 } 3350 EXPORT_SYMBOL(qlt_xmit_response); 3351 3352 int qlt_rdy_to_xfer(struct qla_tgt_cmd *cmd) 3353 { 3354 struct ctio7_to_24xx *pkt; 3355 struct scsi_qla_host *vha = cmd->vha; 3356 struct qla_tgt *tgt = cmd->tgt; 3357 struct qla_tgt_prm prm; 3358 unsigned long flags = 0; 3359 int res = 0; 3360 struct qla_qpair *qpair = cmd->qpair; 3361 3362 memset(&prm, 0, sizeof(prm)); 3363 prm.cmd = cmd; 3364 prm.tgt = tgt; 3365 prm.sg = NULL; 3366 prm.req_cnt = 1; 3367 3368 /* Calculate number of entries and segments required */ 3369 if (qlt_pci_map_calc_cnt(&prm) != 0) 3370 return -EAGAIN; 3371 3372 if (!qpair->fw_started || (cmd->reset_count != qpair->chip_reset) || 3373 (cmd->sess && cmd->sess->deleted)) { 3374 /* 3375 * Either the port is not online or this request was from 3376 * previous life, just abort the processing. 3377 */ 3378 cmd->aborted = 1; 3379 cmd->write_data_transferred = 0; 3380 cmd->state = QLA_TGT_STATE_DATA_IN; 3381 vha->hw->tgt.tgt_ops->handle_data(cmd); 3382 ql_dbg_qp(ql_dbg_async, qpair, 0xe102, 3383 "RESET-XFR online/active/old-count/new-count = %d/%d/%d/%d.\n", 3384 vha->flags.online, qla2x00_reset_active(vha), 3385 cmd->reset_count, qpair->chip_reset); 3386 return 0; 3387 } 3388 3389 spin_lock_irqsave(qpair->qp_lock_ptr, flags); 3390 /* Does F/W have an IOCBs for this request */ 3391 res = qlt_check_reserve_free_req(qpair, prm.req_cnt); 3392 if (res != 0) 3393 goto out_unlock_free_unmap; 3394 if (cmd->se_cmd.prot_op) 3395 res = qlt_build_ctio_crc2_pkt(qpair, &prm); 3396 else 3397 res = qlt_24xx_build_ctio_pkt(qpair, &prm); 3398 3399 if (unlikely(res != 0)) { 3400 qpair->req->cnt += prm.req_cnt; 3401 goto out_unlock_free_unmap; 3402 } 3403 3404 pkt = (struct ctio7_to_24xx *)prm.pkt; 3405 pkt->u.status0.flags |= cpu_to_le16(CTIO7_FLAGS_DATA_OUT | 3406 CTIO7_FLAGS_STATUS_MODE_0); 3407 3408 if (cmd->se_cmd.prot_op == TARGET_PROT_NORMAL) 3409 qlt_load_data_segments(&prm); 3410 3411 cmd->state = QLA_TGT_STATE_NEED_DATA; 3412 cmd->cmd_sent_to_fw = 1; 3413 cmd->ctio_flags = le16_to_cpu(pkt->u.status0.flags); 3414 3415 /* Memory Barrier */ 3416 wmb(); 3417 if (qpair->reqq_start_iocbs) 3418 qpair->reqq_start_iocbs(qpair); 3419 else 3420 qla2x00_start_iocbs(vha, qpair->req); 3421 spin_unlock_irqrestore(qpair->qp_lock_ptr, flags); 3422 3423 return res; 3424 3425 out_unlock_free_unmap: 3426 qlt_unmap_sg(vha, cmd); 3427 spin_unlock_irqrestore(qpair->qp_lock_ptr, flags); 3428 3429 return res; 3430 } 3431 EXPORT_SYMBOL(qlt_rdy_to_xfer); 3432 3433 3434 /* 3435 * it is assumed either hardware_lock or qpair lock is held. 3436 */ 3437 static void 3438 qlt_handle_dif_error(struct qla_qpair *qpair, struct qla_tgt_cmd *cmd, 3439 struct ctio_crc_from_fw *sts) 3440 { 3441 uint8_t *ap = &sts->actual_dif[0]; 3442 uint8_t *ep = &sts->expected_dif[0]; 3443 uint64_t lba = cmd->se_cmd.t_task_lba; 3444 uint8_t scsi_status, sense_key, asc, ascq; 3445 unsigned long flags; 3446 struct scsi_qla_host *vha = cmd->vha; 3447 3448 cmd->trc_flags |= TRC_DIF_ERR; 3449 3450 cmd->a_guard = be16_to_cpu(*(uint16_t *)(ap + 0)); 3451 cmd->a_app_tag = be16_to_cpu(*(uint16_t *)(ap + 2)); 3452 cmd->a_ref_tag = be32_to_cpu(*(uint32_t *)(ap + 4)); 3453 3454 cmd->e_guard = be16_to_cpu(*(uint16_t *)(ep + 0)); 3455 cmd->e_app_tag = be16_to_cpu(*(uint16_t *)(ep + 2)); 3456 cmd->e_ref_tag = be32_to_cpu(*(uint32_t *)(ep + 4)); 3457 3458 ql_dbg(ql_dbg_tgt_dif, vha, 0xf075, 3459 "%s: aborted %d state %d\n", __func__, cmd->aborted, cmd->state); 3460 3461 scsi_status = sense_key = asc = ascq = 0; 3462 3463 /* check appl tag */ 3464 if (cmd->e_app_tag != cmd->a_app_tag) { 3465 ql_dbg(ql_dbg_tgt_dif, vha, 0xe00d, 3466 "App Tag ERR: cdb[%x] lba[%llx %llx] blks[%x] [Actual|Expected] Ref[%x|%x], App[%x|%x], Guard [%x|%x] cmd=%p ox_id[%04x]", 3467 cmd->cdb[0], lba, (lba+cmd->num_blks), cmd->num_blks, 3468 cmd->a_ref_tag, cmd->e_ref_tag, cmd->a_app_tag, 3469 cmd->e_app_tag, cmd->a_guard, cmd->e_guard, cmd, 3470 cmd->atio.u.isp24.fcp_hdr.ox_id); 3471 3472 cmd->dif_err_code = DIF_ERR_APP; 3473 scsi_status = SAM_STAT_CHECK_CONDITION; 3474 sense_key = ABORTED_COMMAND; 3475 asc = 0x10; 3476 ascq = 0x2; 3477 } 3478 3479 /* check ref tag */ 3480 if (cmd->e_ref_tag != cmd->a_ref_tag) { 3481 ql_dbg(ql_dbg_tgt_dif, vha, 0xe00e, 3482 "Ref Tag ERR: cdb[%x] lba[%llx %llx] blks[%x] [Actual|Expected] Ref[%x|%x], App[%x|%x], Guard[%x|%x] cmd=%p ox_id[%04x] ", 3483 cmd->cdb[0], lba, (lba+cmd->num_blks), cmd->num_blks, 3484 cmd->a_ref_tag, cmd->e_ref_tag, cmd->a_app_tag, 3485 cmd->e_app_tag, cmd->a_guard, cmd->e_guard, cmd, 3486 cmd->atio.u.isp24.fcp_hdr.ox_id); 3487 3488 cmd->dif_err_code = DIF_ERR_REF; 3489 scsi_status = SAM_STAT_CHECK_CONDITION; 3490 sense_key = ABORTED_COMMAND; 3491 asc = 0x10; 3492 ascq = 0x3; 3493 goto out; 3494 } 3495 3496 /* check guard */ 3497 if (cmd->e_guard != cmd->a_guard) { 3498 ql_dbg(ql_dbg_tgt_dif, vha, 0xe012, 3499 "Guard ERR: cdb[%x] lba[%llx %llx] blks[%x] [Actual|Expected] Ref[%x|%x], App[%x|%x], Guard [%x|%x] cmd=%p ox_id[%04x]", 3500 cmd->cdb[0], lba, (lba+cmd->num_blks), cmd->num_blks, 3501 cmd->a_ref_tag, cmd->e_ref_tag, cmd->a_app_tag, 3502 cmd->e_app_tag, cmd->a_guard, cmd->e_guard, cmd, 3503 cmd->atio.u.isp24.fcp_hdr.ox_id); 3504 3505 cmd->dif_err_code = DIF_ERR_GRD; 3506 scsi_status = SAM_STAT_CHECK_CONDITION; 3507 sense_key = ABORTED_COMMAND; 3508 asc = 0x10; 3509 ascq = 0x1; 3510 } 3511 out: 3512 switch (cmd->state) { 3513 case QLA_TGT_STATE_NEED_DATA: 3514 /* handle_data will load DIF error code */ 3515 cmd->state = QLA_TGT_STATE_DATA_IN; 3516 vha->hw->tgt.tgt_ops->handle_data(cmd); 3517 break; 3518 default: 3519 spin_lock_irqsave(&cmd->cmd_lock, flags); 3520 if (cmd->aborted) { 3521 spin_unlock_irqrestore(&cmd->cmd_lock, flags); 3522 vha->hw->tgt.tgt_ops->free_cmd(cmd); 3523 break; 3524 } 3525 spin_unlock_irqrestore(&cmd->cmd_lock, flags); 3526 3527 qlt_send_resp_ctio(qpair, cmd, scsi_status, sense_key, asc, 3528 ascq); 3529 /* assume scsi status gets out on the wire. 3530 * Will not wait for completion. 3531 */ 3532 vha->hw->tgt.tgt_ops->free_cmd(cmd); 3533 break; 3534 } 3535 } 3536 3537 /* If hardware_lock held on entry, might drop it, then reaquire */ 3538 /* This function sends the appropriate CTIO to ISP 2xxx or 24xx */ 3539 static int __qlt_send_term_imm_notif(struct scsi_qla_host *vha, 3540 struct imm_ntfy_from_isp *ntfy) 3541 { 3542 struct nack_to_isp *nack; 3543 struct qla_hw_data *ha = vha->hw; 3544 request_t *pkt; 3545 int ret = 0; 3546 3547 ql_dbg(ql_dbg_tgt_tmr, vha, 0xe01c, 3548 "Sending TERM ELS CTIO (ha=%p)\n", ha); 3549 3550 pkt = (request_t *)qla2x00_alloc_iocbs(vha, NULL); 3551 if (pkt == NULL) { 3552 ql_dbg(ql_dbg_tgt, vha, 0xe080, 3553 "qla_target(%d): %s failed: unable to allocate " 3554 "request packet\n", vha->vp_idx, __func__); 3555 return -ENOMEM; 3556 } 3557 3558 pkt->entry_type = NOTIFY_ACK_TYPE; 3559 pkt->entry_count = 1; 3560 pkt->handle = QLA_TGT_SKIP_HANDLE; 3561 3562 nack = (struct nack_to_isp *)pkt; 3563 nack->ox_id = ntfy->ox_id; 3564 3565 nack->u.isp24.nport_handle = ntfy->u.isp24.nport_handle; 3566 if (le16_to_cpu(ntfy->u.isp24.status) == IMM_NTFY_ELS) { 3567 nack->u.isp24.flags = ntfy->u.isp24.flags & 3568 __constant_cpu_to_le32(NOTIFY24XX_FLAGS_PUREX_IOCB); 3569 } 3570 3571 /* terminate */ 3572 nack->u.isp24.flags |= 3573 __constant_cpu_to_le16(NOTIFY_ACK_FLAGS_TERMINATE); 3574 3575 nack->u.isp24.srr_rx_id = ntfy->u.isp24.srr_rx_id; 3576 nack->u.isp24.status = ntfy->u.isp24.status; 3577 nack->u.isp24.status_subcode = ntfy->u.isp24.status_subcode; 3578 nack->u.isp24.fw_handle = ntfy->u.isp24.fw_handle; 3579 nack->u.isp24.exchange_address = ntfy->u.isp24.exchange_address; 3580 nack->u.isp24.srr_rel_offs = ntfy->u.isp24.srr_rel_offs; 3581 nack->u.isp24.srr_ui = ntfy->u.isp24.srr_ui; 3582 nack->u.isp24.vp_index = ntfy->u.isp24.vp_index; 3583 3584 qla2x00_start_iocbs(vha, vha->req); 3585 return ret; 3586 } 3587 3588 static void qlt_send_term_imm_notif(struct scsi_qla_host *vha, 3589 struct imm_ntfy_from_isp *imm, int ha_locked) 3590 { 3591 int rc; 3592 3593 WARN_ON_ONCE(!ha_locked); 3594 rc = __qlt_send_term_imm_notif(vha, imm); 3595 pr_debug("rc = %d\n", rc); 3596 } 3597 3598 /* 3599 * If hardware_lock held on entry, might drop it, then reaquire 3600 * This function sends the appropriate CTIO to ISP 2xxx or 24xx 3601 */ 3602 static int __qlt_send_term_exchange(struct qla_qpair *qpair, 3603 struct qla_tgt_cmd *cmd, 3604 struct atio_from_isp *atio) 3605 { 3606 struct scsi_qla_host *vha = qpair->vha; 3607 struct ctio7_to_24xx *ctio24; 3608 struct qla_hw_data *ha = vha->hw; 3609 request_t *pkt; 3610 int ret = 0; 3611 uint16_t temp; 3612 3613 ql_dbg(ql_dbg_tgt, vha, 0xe009, "Sending TERM EXCH CTIO (ha=%p)\n", ha); 3614 3615 if (cmd) 3616 vha = cmd->vha; 3617 3618 pkt = (request_t *)qla2x00_alloc_iocbs_ready(qpair, NULL); 3619 if (pkt == NULL) { 3620 ql_dbg(ql_dbg_tgt, vha, 0xe050, 3621 "qla_target(%d): %s failed: unable to allocate " 3622 "request packet\n", vha->vp_idx, __func__); 3623 return -ENOMEM; 3624 } 3625 3626 if (cmd != NULL) { 3627 if (cmd->state < QLA_TGT_STATE_PROCESSED) { 3628 ql_dbg(ql_dbg_tgt, vha, 0xe051, 3629 "qla_target(%d): Terminating cmd %p with " 3630 "incorrect state %d\n", vha->vp_idx, cmd, 3631 cmd->state); 3632 } else 3633 ret = 1; 3634 } 3635 3636 qpair->tgt_counters.num_term_xchg_sent++; 3637 pkt->entry_count = 1; 3638 pkt->handle = QLA_TGT_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK; 3639 3640 ctio24 = (struct ctio7_to_24xx *)pkt; 3641 ctio24->entry_type = CTIO_TYPE7; 3642 ctio24->nport_handle = CTIO7_NHANDLE_UNRECOGNIZED; 3643 ctio24->timeout = cpu_to_le16(QLA_TGT_TIMEOUT); 3644 ctio24->vp_index = vha->vp_idx; 3645 ctio24->initiator_id = be_id_to_le(atio->u.isp24.fcp_hdr.s_id); 3646 ctio24->exchange_addr = atio->u.isp24.exchange_addr; 3647 temp = (atio->u.isp24.attr << 9) | CTIO7_FLAGS_STATUS_MODE_1 | 3648 CTIO7_FLAGS_TERMINATE; 3649 ctio24->u.status1.flags = cpu_to_le16(temp); 3650 temp = be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id); 3651 ctio24->u.status1.ox_id = cpu_to_le16(temp); 3652 3653 /* Memory Barrier */ 3654 wmb(); 3655 if (qpair->reqq_start_iocbs) 3656 qpair->reqq_start_iocbs(qpair); 3657 else 3658 qla2x00_start_iocbs(vha, qpair->req); 3659 return ret; 3660 } 3661 3662 static void qlt_send_term_exchange(struct qla_qpair *qpair, 3663 struct qla_tgt_cmd *cmd, struct atio_from_isp *atio, int ha_locked, 3664 int ul_abort) 3665 { 3666 struct scsi_qla_host *vha; 3667 unsigned long flags = 0; 3668 int rc; 3669 3670 /* why use different vha? NPIV */ 3671 if (cmd) 3672 vha = cmd->vha; 3673 else 3674 vha = qpair->vha; 3675 3676 if (ha_locked) { 3677 rc = __qlt_send_term_exchange(qpair, cmd, atio); 3678 if (rc == -ENOMEM) 3679 qlt_alloc_qfull_cmd(vha, atio, 0, 0); 3680 goto done; 3681 } 3682 spin_lock_irqsave(qpair->qp_lock_ptr, flags); 3683 rc = __qlt_send_term_exchange(qpair, cmd, atio); 3684 if (rc == -ENOMEM) 3685 qlt_alloc_qfull_cmd(vha, atio, 0, 0); 3686 3687 done: 3688 if (cmd && !ul_abort && !cmd->aborted) { 3689 if (cmd->sg_mapped) 3690 qlt_unmap_sg(vha, cmd); 3691 vha->hw->tgt.tgt_ops->free_cmd(cmd); 3692 } 3693 3694 if (!ha_locked) 3695 spin_unlock_irqrestore(qpair->qp_lock_ptr, flags); 3696 3697 return; 3698 } 3699 3700 static void qlt_init_term_exchange(struct scsi_qla_host *vha) 3701 { 3702 struct list_head free_list; 3703 struct qla_tgt_cmd *cmd, *tcmd; 3704 3705 vha->hw->tgt.leak_exchg_thresh_hold = 3706 (vha->hw->cur_fw_xcb_count/100) * LEAK_EXCHG_THRESH_HOLD_PERCENT; 3707 3708 cmd = tcmd = NULL; 3709 if (!list_empty(&vha->hw->tgt.q_full_list)) { 3710 INIT_LIST_HEAD(&free_list); 3711 list_splice_init(&vha->hw->tgt.q_full_list, &free_list); 3712 3713 list_for_each_entry_safe(cmd, tcmd, &free_list, cmd_list) { 3714 list_del(&cmd->cmd_list); 3715 /* This cmd was never sent to TCM. There is no need 3716 * to schedule free or call free_cmd 3717 */ 3718 qlt_free_cmd(cmd); 3719 vha->hw->tgt.num_qfull_cmds_alloc--; 3720 } 3721 } 3722 vha->hw->tgt.num_qfull_cmds_dropped = 0; 3723 } 3724 3725 static void qlt_chk_exch_leak_thresh_hold(struct scsi_qla_host *vha) 3726 { 3727 uint32_t total_leaked; 3728 3729 total_leaked = vha->hw->tgt.num_qfull_cmds_dropped; 3730 3731 if (vha->hw->tgt.leak_exchg_thresh_hold && 3732 (total_leaked > vha->hw->tgt.leak_exchg_thresh_hold)) { 3733 3734 ql_dbg(ql_dbg_tgt, vha, 0xe079, 3735 "Chip reset due to exchange starvation: %d/%d.\n", 3736 total_leaked, vha->hw->cur_fw_xcb_count); 3737 3738 if (IS_P3P_TYPE(vha->hw)) 3739 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags); 3740 else 3741 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 3742 qla2xxx_wake_dpc(vha); 3743 } 3744 3745 } 3746 3747 int qlt_abort_cmd(struct qla_tgt_cmd *cmd) 3748 { 3749 struct qla_tgt *tgt = cmd->tgt; 3750 struct scsi_qla_host *vha = tgt->vha; 3751 struct se_cmd *se_cmd = &cmd->se_cmd; 3752 unsigned long flags; 3753 3754 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf014, 3755 "qla_target(%d): terminating exchange for aborted cmd=%p " 3756 "(se_cmd=%p, tag=%llu)", vha->vp_idx, cmd, &cmd->se_cmd, 3757 se_cmd->tag); 3758 3759 spin_lock_irqsave(&cmd->cmd_lock, flags); 3760 if (cmd->aborted) { 3761 spin_unlock_irqrestore(&cmd->cmd_lock, flags); 3762 /* 3763 * It's normal to see 2 calls in this path: 3764 * 1) XFER Rdy completion + CMD_T_ABORT 3765 * 2) TCM TMR - drain_state_list 3766 */ 3767 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf016, 3768 "multiple abort. %p transport_state %x, t_state %x, " 3769 "se_cmd_flags %x\n", cmd, cmd->se_cmd.transport_state, 3770 cmd->se_cmd.t_state, cmd->se_cmd.se_cmd_flags); 3771 return EIO; 3772 } 3773 cmd->aborted = 1; 3774 cmd->trc_flags |= TRC_ABORT; 3775 spin_unlock_irqrestore(&cmd->cmd_lock, flags); 3776 3777 qlt_send_term_exchange(cmd->qpair, cmd, &cmd->atio, 0, 1); 3778 return 0; 3779 } 3780 EXPORT_SYMBOL(qlt_abort_cmd); 3781 3782 void qlt_free_cmd(struct qla_tgt_cmd *cmd) 3783 { 3784 struct fc_port *sess = cmd->sess; 3785 3786 ql_dbg(ql_dbg_tgt, cmd->vha, 0xe074, 3787 "%s: se_cmd[%p] ox_id %04x\n", 3788 __func__, &cmd->se_cmd, 3789 be16_to_cpu(cmd->atio.u.isp24.fcp_hdr.ox_id)); 3790 3791 BUG_ON(cmd->cmd_in_wq); 3792 3793 if (cmd->sg_mapped) 3794 qlt_unmap_sg(cmd->vha, cmd); 3795 3796 if (!cmd->q_full) 3797 qlt_decr_num_pend_cmds(cmd->vha); 3798 3799 BUG_ON(cmd->sg_mapped); 3800 cmd->jiffies_at_free = get_jiffies_64(); 3801 if (unlikely(cmd->free_sg)) 3802 kfree(cmd->sg); 3803 3804 if (!sess || !sess->se_sess) { 3805 WARN_ON(1); 3806 return; 3807 } 3808 cmd->jiffies_at_free = get_jiffies_64(); 3809 target_free_tag(sess->se_sess, &cmd->se_cmd); 3810 } 3811 EXPORT_SYMBOL(qlt_free_cmd); 3812 3813 /* 3814 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire 3815 */ 3816 static int qlt_term_ctio_exchange(struct qla_qpair *qpair, void *ctio, 3817 struct qla_tgt_cmd *cmd, uint32_t status) 3818 { 3819 int term = 0; 3820 struct scsi_qla_host *vha = qpair->vha; 3821 3822 if (cmd->se_cmd.prot_op) 3823 ql_dbg(ql_dbg_tgt_dif, vha, 0xe013, 3824 "Term DIF cmd: lba[0x%llx|%lld] len[0x%x] " 3825 "se_cmd=%p tag[%x] op %#x/%s", 3826 cmd->lba, cmd->lba, 3827 cmd->num_blks, &cmd->se_cmd, 3828 cmd->atio.u.isp24.exchange_addr, 3829 cmd->se_cmd.prot_op, 3830 prot_op_str(cmd->se_cmd.prot_op)); 3831 3832 if (ctio != NULL) { 3833 struct ctio7_from_24xx *c = (struct ctio7_from_24xx *)ctio; 3834 3835 term = !(c->flags & 3836 cpu_to_le16(OF_TERM_EXCH)); 3837 } else 3838 term = 1; 3839 3840 if (term) 3841 qlt_send_term_exchange(qpair, cmd, &cmd->atio, 1, 0); 3842 3843 return term; 3844 } 3845 3846 3847 /* ha->hardware_lock supposed to be held on entry */ 3848 static void *qlt_ctio_to_cmd(struct scsi_qla_host *vha, 3849 struct rsp_que *rsp, uint32_t handle, void *ctio) 3850 { 3851 void *cmd = NULL; 3852 struct req_que *req; 3853 int qid = GET_QID(handle); 3854 uint32_t h = handle & ~QLA_TGT_HANDLE_MASK; 3855 3856 if (unlikely(h == QLA_TGT_SKIP_HANDLE)) 3857 return NULL; 3858 3859 if (qid == rsp->req->id) { 3860 req = rsp->req; 3861 } else if (vha->hw->req_q_map[qid]) { 3862 ql_dbg(ql_dbg_tgt_mgt, vha, 0x1000a, 3863 "qla_target(%d): CTIO completion with different QID %d handle %x\n", 3864 vha->vp_idx, rsp->id, handle); 3865 req = vha->hw->req_q_map[qid]; 3866 } else { 3867 return NULL; 3868 } 3869 3870 h &= QLA_CMD_HANDLE_MASK; 3871 3872 if (h != QLA_TGT_NULL_HANDLE) { 3873 if (unlikely(h >= req->num_outstanding_cmds)) { 3874 ql_dbg(ql_dbg_tgt, vha, 0xe052, 3875 "qla_target(%d): Wrong handle %x received\n", 3876 vha->vp_idx, handle); 3877 return NULL; 3878 } 3879 3880 cmd = (void *) req->outstanding_cmds[h]; 3881 if (unlikely(cmd == NULL)) { 3882 ql_dbg(ql_dbg_async, vha, 0xe053, 3883 "qla_target(%d): Suspicious: unable to find the command with handle %x req->id %d rsp->id %d\n", 3884 vha->vp_idx, handle, req->id, rsp->id); 3885 return NULL; 3886 } 3887 req->outstanding_cmds[h] = NULL; 3888 } else if (ctio != NULL) { 3889 /* We can't get loop ID from CTIO7 */ 3890 ql_dbg(ql_dbg_tgt, vha, 0xe054, 3891 "qla_target(%d): Wrong CTIO received: QLA24xx doesn't " 3892 "support NULL handles\n", vha->vp_idx); 3893 return NULL; 3894 } 3895 3896 return cmd; 3897 } 3898 3899 /* 3900 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire 3901 */ 3902 static void qlt_do_ctio_completion(struct scsi_qla_host *vha, 3903 struct rsp_que *rsp, uint32_t handle, uint32_t status, void *ctio) 3904 { 3905 struct qla_hw_data *ha = vha->hw; 3906 struct se_cmd *se_cmd; 3907 struct qla_tgt_cmd *cmd; 3908 struct qla_qpair *qpair = rsp->qpair; 3909 3910 if (handle & CTIO_INTERMEDIATE_HANDLE_MARK) { 3911 /* That could happen only in case of an error/reset/abort */ 3912 if (status != CTIO_SUCCESS) { 3913 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01d, 3914 "Intermediate CTIO received" 3915 " (status %x)\n", status); 3916 } 3917 return; 3918 } 3919 3920 cmd = qlt_ctio_to_cmd(vha, rsp, handle, ctio); 3921 if (cmd == NULL) 3922 return; 3923 3924 se_cmd = &cmd->se_cmd; 3925 cmd->cmd_sent_to_fw = 0; 3926 3927 qlt_unmap_sg(vha, cmd); 3928 3929 if (unlikely(status != CTIO_SUCCESS)) { 3930 switch (status & 0xFFFF) { 3931 case CTIO_INVALID_RX_ID: 3932 if (printk_ratelimit()) 3933 dev_info(&vha->hw->pdev->dev, 3934 "qla_target(%d): CTIO with INVALID_RX_ID ATIO attr %x CTIO Flags %x|%x\n", 3935 vha->vp_idx, cmd->atio.u.isp24.attr, 3936 ((cmd->ctio_flags >> 9) & 0xf), 3937 cmd->ctio_flags); 3938 3939 break; 3940 case CTIO_LIP_RESET: 3941 case CTIO_TARGET_RESET: 3942 case CTIO_ABORTED: 3943 /* driver request abort via Terminate exchange */ 3944 case CTIO_TIMEOUT: 3945 /* They are OK */ 3946 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf058, 3947 "qla_target(%d): CTIO with " 3948 "status %#x received, state %x, se_cmd %p, " 3949 "(LIP_RESET=e, ABORTED=2, TARGET_RESET=17, " 3950 "TIMEOUT=b, INVALID_RX_ID=8)\n", vha->vp_idx, 3951 status, cmd->state, se_cmd); 3952 break; 3953 3954 case CTIO_PORT_LOGGED_OUT: 3955 case CTIO_PORT_UNAVAILABLE: 3956 { 3957 int logged_out = 3958 (status & 0xFFFF) == CTIO_PORT_LOGGED_OUT; 3959 3960 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf059, 3961 "qla_target(%d): CTIO with %s status %x " 3962 "received (state %x, se_cmd %p)\n", vha->vp_idx, 3963 logged_out ? "PORT LOGGED OUT" : "PORT UNAVAILABLE", 3964 status, cmd->state, se_cmd); 3965 3966 if (logged_out && cmd->sess) { 3967 /* 3968 * Session is already logged out, but we need 3969 * to notify initiator, who's not aware of this 3970 */ 3971 cmd->sess->send_els_logo = 1; 3972 ql_dbg(ql_dbg_disc, vha, 0x20f8, 3973 "%s %d %8phC post del sess\n", 3974 __func__, __LINE__, cmd->sess->port_name); 3975 3976 qlt_schedule_sess_for_deletion(cmd->sess); 3977 } 3978 break; 3979 } 3980 case CTIO_DIF_ERROR: { 3981 struct ctio_crc_from_fw *crc = 3982 (struct ctio_crc_from_fw *)ctio; 3983 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf073, 3984 "qla_target(%d): CTIO with DIF_ERROR status %x " 3985 "received (state %x, ulp_cmd %p) actual_dif[0x%llx] " 3986 "expect_dif[0x%llx]\n", 3987 vha->vp_idx, status, cmd->state, se_cmd, 3988 *((u64 *)&crc->actual_dif[0]), 3989 *((u64 *)&crc->expected_dif[0])); 3990 3991 qlt_handle_dif_error(qpair, cmd, ctio); 3992 return; 3993 } 3994 default: 3995 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05b, 3996 "qla_target(%d): CTIO with error status 0x%x received (state %x, se_cmd %p\n", 3997 vha->vp_idx, status, cmd->state, se_cmd); 3998 break; 3999 } 4000 4001 4002 /* "cmd->aborted" means 4003 * cmd is already aborted/terminated, we don't 4004 * need to terminate again. The exchange is already 4005 * cleaned up/freed at FW level. Just cleanup at driver 4006 * level. 4007 */ 4008 if ((cmd->state != QLA_TGT_STATE_NEED_DATA) && 4009 (!cmd->aborted)) { 4010 cmd->trc_flags |= TRC_CTIO_ERR; 4011 if (qlt_term_ctio_exchange(qpair, ctio, cmd, status)) 4012 return; 4013 } 4014 } 4015 4016 if (cmd->state == QLA_TGT_STATE_PROCESSED) { 4017 cmd->trc_flags |= TRC_CTIO_DONE; 4018 } else if (cmd->state == QLA_TGT_STATE_NEED_DATA) { 4019 cmd->state = QLA_TGT_STATE_DATA_IN; 4020 4021 if (status == CTIO_SUCCESS) 4022 cmd->write_data_transferred = 1; 4023 4024 ha->tgt.tgt_ops->handle_data(cmd); 4025 return; 4026 } else if (cmd->aborted) { 4027 cmd->trc_flags |= TRC_CTIO_ABORTED; 4028 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01e, 4029 "Aborted command %p (tag %lld) finished\n", cmd, se_cmd->tag); 4030 } else { 4031 cmd->trc_flags |= TRC_CTIO_STRANGE; 4032 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05c, 4033 "qla_target(%d): A command in state (%d) should " 4034 "not return a CTIO complete\n", vha->vp_idx, cmd->state); 4035 } 4036 4037 if (unlikely(status != CTIO_SUCCESS) && 4038 !cmd->aborted) { 4039 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01f, "Finishing failed CTIO\n"); 4040 dump_stack(); 4041 } 4042 4043 ha->tgt.tgt_ops->free_cmd(cmd); 4044 } 4045 4046 static inline int qlt_get_fcp_task_attr(struct scsi_qla_host *vha, 4047 uint8_t task_codes) 4048 { 4049 int fcp_task_attr; 4050 4051 switch (task_codes) { 4052 case ATIO_SIMPLE_QUEUE: 4053 fcp_task_attr = TCM_SIMPLE_TAG; 4054 break; 4055 case ATIO_HEAD_OF_QUEUE: 4056 fcp_task_attr = TCM_HEAD_TAG; 4057 break; 4058 case ATIO_ORDERED_QUEUE: 4059 fcp_task_attr = TCM_ORDERED_TAG; 4060 break; 4061 case ATIO_ACA_QUEUE: 4062 fcp_task_attr = TCM_ACA_TAG; 4063 break; 4064 case ATIO_UNTAGGED: 4065 fcp_task_attr = TCM_SIMPLE_TAG; 4066 break; 4067 default: 4068 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05d, 4069 "qla_target: unknown task code %x, use ORDERED instead\n", 4070 task_codes); 4071 fcp_task_attr = TCM_ORDERED_TAG; 4072 break; 4073 } 4074 4075 return fcp_task_attr; 4076 } 4077 4078 /* 4079 * Process context for I/O path into tcm_qla2xxx code 4080 */ 4081 static void __qlt_do_work(struct qla_tgt_cmd *cmd) 4082 { 4083 scsi_qla_host_t *vha = cmd->vha; 4084 struct qla_hw_data *ha = vha->hw; 4085 struct fc_port *sess = cmd->sess; 4086 struct atio_from_isp *atio = &cmd->atio; 4087 unsigned char *cdb; 4088 unsigned long flags; 4089 uint32_t data_length; 4090 int ret, fcp_task_attr, data_dir, bidi = 0; 4091 struct qla_qpair *qpair = cmd->qpair; 4092 4093 cmd->cmd_in_wq = 0; 4094 cmd->trc_flags |= TRC_DO_WORK; 4095 4096 if (cmd->aborted) { 4097 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf082, 4098 "cmd with tag %u is aborted\n", 4099 cmd->atio.u.isp24.exchange_addr); 4100 goto out_term; 4101 } 4102 4103 spin_lock_init(&cmd->cmd_lock); 4104 cdb = &atio->u.isp24.fcp_cmnd.cdb[0]; 4105 cmd->se_cmd.tag = atio->u.isp24.exchange_addr; 4106 4107 if (atio->u.isp24.fcp_cmnd.rddata && 4108 atio->u.isp24.fcp_cmnd.wrdata) { 4109 bidi = 1; 4110 data_dir = DMA_TO_DEVICE; 4111 } else if (atio->u.isp24.fcp_cmnd.rddata) 4112 data_dir = DMA_FROM_DEVICE; 4113 else if (atio->u.isp24.fcp_cmnd.wrdata) 4114 data_dir = DMA_TO_DEVICE; 4115 else 4116 data_dir = DMA_NONE; 4117 4118 fcp_task_attr = qlt_get_fcp_task_attr(vha, 4119 atio->u.isp24.fcp_cmnd.task_attr); 4120 data_length = get_datalen_for_atio(atio); 4121 4122 ret = ha->tgt.tgt_ops->handle_cmd(vha, cmd, cdb, data_length, 4123 fcp_task_attr, data_dir, bidi); 4124 if (ret != 0) 4125 goto out_term; 4126 /* 4127 * Drop extra session reference from qlt_handle_cmd_for_atio(). 4128 */ 4129 ha->tgt.tgt_ops->put_sess(sess); 4130 return; 4131 4132 out_term: 4133 ql_dbg(ql_dbg_io, vha, 0x3060, "Terminating work cmd %p", cmd); 4134 /* 4135 * cmd has not sent to target yet, so pass NULL as the second 4136 * argument to qlt_send_term_exchange() and free the memory here. 4137 */ 4138 cmd->trc_flags |= TRC_DO_WORK_ERR; 4139 spin_lock_irqsave(qpair->qp_lock_ptr, flags); 4140 qlt_send_term_exchange(qpair, NULL, &cmd->atio, 1, 0); 4141 4142 qlt_decr_num_pend_cmds(vha); 4143 target_free_tag(sess->se_sess, &cmd->se_cmd); 4144 spin_unlock_irqrestore(qpair->qp_lock_ptr, flags); 4145 4146 ha->tgt.tgt_ops->put_sess(sess); 4147 } 4148 4149 static void qlt_do_work(struct work_struct *work) 4150 { 4151 struct qla_tgt_cmd *cmd = container_of(work, struct qla_tgt_cmd, work); 4152 scsi_qla_host_t *vha = cmd->vha; 4153 unsigned long flags; 4154 4155 spin_lock_irqsave(&vha->cmd_list_lock, flags); 4156 list_del(&cmd->cmd_list); 4157 spin_unlock_irqrestore(&vha->cmd_list_lock, flags); 4158 4159 __qlt_do_work(cmd); 4160 } 4161 4162 void qlt_clr_qp_table(struct scsi_qla_host *vha) 4163 { 4164 unsigned long flags; 4165 struct qla_hw_data *ha = vha->hw; 4166 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; 4167 void *node; 4168 u64 key = 0; 4169 4170 ql_log(ql_log_info, vha, 0x706c, 4171 "User update Number of Active Qpairs %d\n", 4172 ha->tgt.num_act_qpairs); 4173 4174 spin_lock_irqsave(&ha->tgt.atio_lock, flags); 4175 4176 btree_for_each_safe64(&tgt->lun_qpair_map, key, node) 4177 btree_remove64(&tgt->lun_qpair_map, key); 4178 4179 ha->base_qpair->lun_cnt = 0; 4180 for (key = 0; key < ha->max_qpairs; key++) 4181 if (ha->queue_pair_map[key]) 4182 ha->queue_pair_map[key]->lun_cnt = 0; 4183 4184 spin_unlock_irqrestore(&ha->tgt.atio_lock, flags); 4185 } 4186 4187 static void qlt_assign_qpair(struct scsi_qla_host *vha, 4188 struct qla_tgt_cmd *cmd) 4189 { 4190 struct qla_qpair *qpair, *qp; 4191 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; 4192 struct qla_qpair_hint *h; 4193 4194 if (vha->flags.qpairs_available) { 4195 h = btree_lookup64(&tgt->lun_qpair_map, cmd->unpacked_lun); 4196 if (unlikely(!h)) { 4197 /* spread lun to qpair ratio evently */ 4198 int lcnt = 0, rc; 4199 struct scsi_qla_host *base_vha = 4200 pci_get_drvdata(vha->hw->pdev); 4201 4202 qpair = vha->hw->base_qpair; 4203 if (qpair->lun_cnt == 0) { 4204 qpair->lun_cnt++; 4205 h = qla_qpair_to_hint(tgt, qpair); 4206 BUG_ON(!h); 4207 rc = btree_insert64(&tgt->lun_qpair_map, 4208 cmd->unpacked_lun, h, GFP_ATOMIC); 4209 if (rc) { 4210 qpair->lun_cnt--; 4211 ql_log(ql_log_info, vha, 0xd037, 4212 "Unable to insert lun %llx into lun_qpair_map\n", 4213 cmd->unpacked_lun); 4214 } 4215 goto out; 4216 } else { 4217 lcnt = qpair->lun_cnt; 4218 } 4219 4220 h = NULL; 4221 list_for_each_entry(qp, &base_vha->qp_list, 4222 qp_list_elem) { 4223 if (qp->lun_cnt == 0) { 4224 qp->lun_cnt++; 4225 h = qla_qpair_to_hint(tgt, qp); 4226 BUG_ON(!h); 4227 rc = btree_insert64(&tgt->lun_qpair_map, 4228 cmd->unpacked_lun, h, GFP_ATOMIC); 4229 if (rc) { 4230 qp->lun_cnt--; 4231 ql_log(ql_log_info, vha, 0xd038, 4232 "Unable to insert lun %llx into lun_qpair_map\n", 4233 cmd->unpacked_lun); 4234 } 4235 qpair = qp; 4236 goto out; 4237 } else { 4238 if (qp->lun_cnt < lcnt) { 4239 lcnt = qp->lun_cnt; 4240 qpair = qp; 4241 continue; 4242 } 4243 } 4244 } 4245 BUG_ON(!qpair); 4246 qpair->lun_cnt++; 4247 h = qla_qpair_to_hint(tgt, qpair); 4248 BUG_ON(!h); 4249 rc = btree_insert64(&tgt->lun_qpair_map, 4250 cmd->unpacked_lun, h, GFP_ATOMIC); 4251 if (rc) { 4252 qpair->lun_cnt--; 4253 ql_log(ql_log_info, vha, 0xd039, 4254 "Unable to insert lun %llx into lun_qpair_map\n", 4255 cmd->unpacked_lun); 4256 } 4257 } 4258 } else { 4259 h = &tgt->qphints[0]; 4260 } 4261 out: 4262 cmd->qpair = h->qpair; 4263 cmd->se_cmd.cpuid = h->cpuid; 4264 } 4265 4266 static struct qla_tgt_cmd *qlt_get_tag(scsi_qla_host_t *vha, 4267 struct fc_port *sess, 4268 struct atio_from_isp *atio) 4269 { 4270 struct se_session *se_sess = sess->se_sess; 4271 struct qla_tgt_cmd *cmd; 4272 int tag, cpu; 4273 4274 tag = sbitmap_queue_get(&se_sess->sess_tag_pool, &cpu); 4275 if (tag < 0) 4276 return NULL; 4277 4278 cmd = &((struct qla_tgt_cmd *)se_sess->sess_cmd_map)[tag]; 4279 memset(cmd, 0, sizeof(struct qla_tgt_cmd)); 4280 cmd->cmd_type = TYPE_TGT_CMD; 4281 memcpy(&cmd->atio, atio, sizeof(*atio)); 4282 cmd->state = QLA_TGT_STATE_NEW; 4283 cmd->tgt = vha->vha_tgt.qla_tgt; 4284 qlt_incr_num_pend_cmds(vha); 4285 cmd->vha = vha; 4286 cmd->se_cmd.map_tag = tag; 4287 cmd->se_cmd.map_cpu = cpu; 4288 cmd->sess = sess; 4289 cmd->loop_id = sess->loop_id; 4290 cmd->conf_compl_supported = sess->conf_compl_supported; 4291 4292 cmd->trc_flags = 0; 4293 cmd->jiffies_at_alloc = get_jiffies_64(); 4294 4295 cmd->unpacked_lun = scsilun_to_int( 4296 (struct scsi_lun *)&atio->u.isp24.fcp_cmnd.lun); 4297 qlt_assign_qpair(vha, cmd); 4298 cmd->reset_count = vha->hw->base_qpair->chip_reset; 4299 cmd->vp_idx = vha->vp_idx; 4300 4301 return cmd; 4302 } 4303 4304 /* ha->hardware_lock supposed to be held on entry */ 4305 static int qlt_handle_cmd_for_atio(struct scsi_qla_host *vha, 4306 struct atio_from_isp *atio) 4307 { 4308 struct qla_hw_data *ha = vha->hw; 4309 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; 4310 struct fc_port *sess; 4311 struct qla_tgt_cmd *cmd; 4312 unsigned long flags; 4313 port_id_t id; 4314 4315 if (unlikely(tgt->tgt_stop)) { 4316 ql_dbg(ql_dbg_io, vha, 0x3061, 4317 "New command while device %p is shutting down\n", tgt); 4318 return -ENODEV; 4319 } 4320 4321 id = be_to_port_id(atio->u.isp24.fcp_hdr.s_id); 4322 if (IS_SW_RESV_ADDR(id)) 4323 return -EBUSY; 4324 4325 sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, atio->u.isp24.fcp_hdr.s_id); 4326 if (unlikely(!sess)) 4327 return -EFAULT; 4328 4329 /* Another WWN used to have our s_id. Our PLOGI scheduled its 4330 * session deletion, but it's still in sess_del_work wq */ 4331 if (sess->deleted) { 4332 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf002, 4333 "New command while old session %p is being deleted\n", 4334 sess); 4335 return -EFAULT; 4336 } 4337 4338 /* 4339 * Do kref_get() before returning + dropping qla_hw_data->hardware_lock. 4340 */ 4341 if (!kref_get_unless_zero(&sess->sess_kref)) { 4342 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf004, 4343 "%s: kref_get fail, %8phC oxid %x \n", 4344 __func__, sess->port_name, 4345 be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id)); 4346 return -EFAULT; 4347 } 4348 4349 cmd = qlt_get_tag(vha, sess, atio); 4350 if (!cmd) { 4351 ql_dbg(ql_dbg_io, vha, 0x3062, 4352 "qla_target(%d): Allocation of cmd failed\n", vha->vp_idx); 4353 ha->tgt.tgt_ops->put_sess(sess); 4354 return -EBUSY; 4355 } 4356 4357 cmd->cmd_in_wq = 1; 4358 cmd->trc_flags |= TRC_NEW_CMD; 4359 4360 spin_lock_irqsave(&vha->cmd_list_lock, flags); 4361 list_add_tail(&cmd->cmd_list, &vha->qla_cmd_list); 4362 spin_unlock_irqrestore(&vha->cmd_list_lock, flags); 4363 4364 INIT_WORK(&cmd->work, qlt_do_work); 4365 if (vha->flags.qpairs_available) { 4366 queue_work_on(cmd->se_cmd.cpuid, qla_tgt_wq, &cmd->work); 4367 } else if (ha->msix_count) { 4368 if (cmd->atio.u.isp24.fcp_cmnd.rddata) 4369 queue_work_on(smp_processor_id(), qla_tgt_wq, 4370 &cmd->work); 4371 else 4372 queue_work_on(cmd->se_cmd.cpuid, qla_tgt_wq, 4373 &cmd->work); 4374 } else { 4375 queue_work(qla_tgt_wq, &cmd->work); 4376 } 4377 4378 return 0; 4379 } 4380 4381 /* ha->hardware_lock supposed to be held on entry */ 4382 static int qlt_issue_task_mgmt(struct fc_port *sess, u64 lun, 4383 int fn, void *iocb, int flags) 4384 { 4385 struct scsi_qla_host *vha = sess->vha; 4386 struct qla_hw_data *ha = vha->hw; 4387 struct qla_tgt_mgmt_cmd *mcmd; 4388 struct atio_from_isp *a = (struct atio_from_isp *)iocb; 4389 struct qla_qpair_hint *h = &vha->vha_tgt.qla_tgt->qphints[0]; 4390 4391 mcmd = mempool_alloc(qla_tgt_mgmt_cmd_mempool, GFP_ATOMIC); 4392 if (!mcmd) { 4393 ql_dbg(ql_dbg_tgt_tmr, vha, 0x10009, 4394 "qla_target(%d): Allocation of management " 4395 "command failed, some commands and their data could " 4396 "leak\n", vha->vp_idx); 4397 return -ENOMEM; 4398 } 4399 memset(mcmd, 0, sizeof(*mcmd)); 4400 mcmd->sess = sess; 4401 4402 if (iocb) { 4403 memcpy(&mcmd->orig_iocb.imm_ntfy, iocb, 4404 sizeof(mcmd->orig_iocb.imm_ntfy)); 4405 } 4406 mcmd->tmr_func = fn; 4407 mcmd->flags = flags; 4408 mcmd->reset_count = ha->base_qpair->chip_reset; 4409 mcmd->qpair = h->qpair; 4410 mcmd->vha = vha; 4411 mcmd->se_cmd.cpuid = h->cpuid; 4412 mcmd->unpacked_lun = lun; 4413 4414 switch (fn) { 4415 case QLA_TGT_LUN_RESET: 4416 case QLA_TGT_CLEAR_TS: 4417 case QLA_TGT_ABORT_TS: 4418 abort_cmds_for_lun(vha, lun, a->u.isp24.fcp_hdr.s_id); 4419 /* fall through */ 4420 case QLA_TGT_CLEAR_ACA: 4421 h = qlt_find_qphint(vha, mcmd->unpacked_lun); 4422 mcmd->qpair = h->qpair; 4423 mcmd->se_cmd.cpuid = h->cpuid; 4424 break; 4425 4426 case QLA_TGT_TARGET_RESET: 4427 case QLA_TGT_NEXUS_LOSS_SESS: 4428 case QLA_TGT_NEXUS_LOSS: 4429 case QLA_TGT_ABORT_ALL: 4430 default: 4431 /* no-op */ 4432 break; 4433 } 4434 4435 INIT_WORK(&mcmd->work, qlt_do_tmr_work); 4436 queue_work_on(mcmd->se_cmd.cpuid, qla_tgt_wq, 4437 &mcmd->work); 4438 4439 return 0; 4440 } 4441 4442 /* ha->hardware_lock supposed to be held on entry */ 4443 static int qlt_handle_task_mgmt(struct scsi_qla_host *vha, void *iocb) 4444 { 4445 struct atio_from_isp *a = (struct atio_from_isp *)iocb; 4446 struct qla_hw_data *ha = vha->hw; 4447 struct fc_port *sess; 4448 u64 unpacked_lun; 4449 int fn; 4450 unsigned long flags; 4451 4452 fn = a->u.isp24.fcp_cmnd.task_mgmt_flags; 4453 4454 spin_lock_irqsave(&ha->tgt.sess_lock, flags); 4455 sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, 4456 a->u.isp24.fcp_hdr.s_id); 4457 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); 4458 4459 unpacked_lun = 4460 scsilun_to_int((struct scsi_lun *)&a->u.isp24.fcp_cmnd.lun); 4461 4462 if (sess == NULL || sess->deleted) 4463 return -EFAULT; 4464 4465 return qlt_issue_task_mgmt(sess, unpacked_lun, fn, iocb, 0); 4466 } 4467 4468 /* ha->hardware_lock supposed to be held on entry */ 4469 static int __qlt_abort_task(struct scsi_qla_host *vha, 4470 struct imm_ntfy_from_isp *iocb, struct fc_port *sess) 4471 { 4472 struct atio_from_isp *a = (struct atio_from_isp *)iocb; 4473 struct qla_hw_data *ha = vha->hw; 4474 struct qla_tgt_mgmt_cmd *mcmd; 4475 u64 unpacked_lun; 4476 int rc; 4477 4478 mcmd = mempool_alloc(qla_tgt_mgmt_cmd_mempool, GFP_ATOMIC); 4479 if (mcmd == NULL) { 4480 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05f, 4481 "qla_target(%d): %s: Allocation of ABORT cmd failed\n", 4482 vha->vp_idx, __func__); 4483 return -ENOMEM; 4484 } 4485 memset(mcmd, 0, sizeof(*mcmd)); 4486 4487 mcmd->sess = sess; 4488 memcpy(&mcmd->orig_iocb.imm_ntfy, iocb, 4489 sizeof(mcmd->orig_iocb.imm_ntfy)); 4490 4491 unpacked_lun = 4492 scsilun_to_int((struct scsi_lun *)&a->u.isp24.fcp_cmnd.lun); 4493 mcmd->reset_count = ha->base_qpair->chip_reset; 4494 mcmd->tmr_func = QLA_TGT_2G_ABORT_TASK; 4495 mcmd->qpair = ha->base_qpair; 4496 4497 rc = ha->tgt.tgt_ops->handle_tmr(mcmd, unpacked_lun, mcmd->tmr_func, 4498 le16_to_cpu(iocb->u.isp2x.seq_id)); 4499 if (rc != 0) { 4500 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf060, 4501 "qla_target(%d): tgt_ops->handle_tmr() failed: %d\n", 4502 vha->vp_idx, rc); 4503 mempool_free(mcmd, qla_tgt_mgmt_cmd_mempool); 4504 return -EFAULT; 4505 } 4506 4507 return 0; 4508 } 4509 4510 /* ha->hardware_lock supposed to be held on entry */ 4511 static int qlt_abort_task(struct scsi_qla_host *vha, 4512 struct imm_ntfy_from_isp *iocb) 4513 { 4514 struct qla_hw_data *ha = vha->hw; 4515 struct fc_port *sess; 4516 int loop_id; 4517 unsigned long flags; 4518 4519 loop_id = GET_TARGET_ID(ha, (struct atio_from_isp *)iocb); 4520 4521 spin_lock_irqsave(&ha->tgt.sess_lock, flags); 4522 sess = ha->tgt.tgt_ops->find_sess_by_loop_id(vha, loop_id); 4523 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); 4524 4525 if (sess == NULL) { 4526 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf025, 4527 "qla_target(%d): task abort for unexisting " 4528 "session\n", vha->vp_idx); 4529 return qlt_sched_sess_work(vha->vha_tgt.qla_tgt, 4530 QLA_TGT_SESS_WORK_ABORT, iocb, sizeof(*iocb)); 4531 } 4532 4533 return __qlt_abort_task(vha, iocb, sess); 4534 } 4535 4536 void qlt_logo_completion_handler(fc_port_t *fcport, int rc) 4537 { 4538 if (rc != MBS_COMMAND_COMPLETE) { 4539 ql_dbg(ql_dbg_tgt_mgt, fcport->vha, 0xf093, 4540 "%s: se_sess %p / sess %p from" 4541 " port %8phC loop_id %#04x s_id %02x:%02x:%02x" 4542 " LOGO failed: %#x\n", 4543 __func__, 4544 fcport->se_sess, 4545 fcport, 4546 fcport->port_name, fcport->loop_id, 4547 fcport->d_id.b.domain, fcport->d_id.b.area, 4548 fcport->d_id.b.al_pa, rc); 4549 } 4550 4551 fcport->logout_completed = 1; 4552 } 4553 4554 /* 4555 * ha->hardware_lock supposed to be held on entry (to protect tgt->sess_list) 4556 * 4557 * Schedules sessions with matching port_id/loop_id but different wwn for 4558 * deletion. Returns existing session with matching wwn if present. 4559 * Null otherwise. 4560 */ 4561 struct fc_port * 4562 qlt_find_sess_invalidate_other(scsi_qla_host_t *vha, uint64_t wwn, 4563 port_id_t port_id, uint16_t loop_id, struct fc_port **conflict_sess) 4564 { 4565 struct fc_port *sess = NULL, *other_sess; 4566 uint64_t other_wwn; 4567 4568 *conflict_sess = NULL; 4569 4570 list_for_each_entry(other_sess, &vha->vp_fcports, list) { 4571 4572 other_wwn = wwn_to_u64(other_sess->port_name); 4573 4574 if (wwn == other_wwn) { 4575 WARN_ON(sess); 4576 sess = other_sess; 4577 continue; 4578 } 4579 4580 /* find other sess with nport_id collision */ 4581 if (port_id.b24 == other_sess->d_id.b24) { 4582 if (loop_id != other_sess->loop_id) { 4583 ql_dbg(ql_dbg_tgt_tmr, vha, 0x1000c, 4584 "Invalidating sess %p loop_id %d wwn %llx.\n", 4585 other_sess, other_sess->loop_id, other_wwn); 4586 4587 /* 4588 * logout_on_delete is set by default, but another 4589 * session that has the same s_id/loop_id combo 4590 * might have cleared it when requested this session 4591 * deletion, so don't touch it 4592 */ 4593 qlt_schedule_sess_for_deletion(other_sess); 4594 } else { 4595 /* 4596 * Another wwn used to have our s_id/loop_id 4597 * kill the session, but don't free the loop_id 4598 */ 4599 ql_dbg(ql_dbg_tgt_tmr, vha, 0xf01b, 4600 "Invalidating sess %p loop_id %d wwn %llx.\n", 4601 other_sess, other_sess->loop_id, other_wwn); 4602 4603 other_sess->keep_nport_handle = 1; 4604 if (other_sess->disc_state != DSC_DELETED) 4605 *conflict_sess = other_sess; 4606 qlt_schedule_sess_for_deletion(other_sess); 4607 } 4608 continue; 4609 } 4610 4611 /* find other sess with nport handle collision */ 4612 if ((loop_id == other_sess->loop_id) && 4613 (loop_id != FC_NO_LOOP_ID)) { 4614 ql_dbg(ql_dbg_tgt_tmr, vha, 0x1000d, 4615 "Invalidating sess %p loop_id %d wwn %llx.\n", 4616 other_sess, other_sess->loop_id, other_wwn); 4617 4618 /* Same loop_id but different s_id 4619 * Ok to kill and logout */ 4620 qlt_schedule_sess_for_deletion(other_sess); 4621 } 4622 } 4623 4624 return sess; 4625 } 4626 4627 /* Abort any commands for this s_id waiting on qla_tgt_wq workqueue */ 4628 static int abort_cmds_for_s_id(struct scsi_qla_host *vha, port_id_t *s_id) 4629 { 4630 struct qla_tgt_sess_op *op; 4631 struct qla_tgt_cmd *cmd; 4632 uint32_t key; 4633 int count = 0; 4634 unsigned long flags; 4635 4636 key = (((u32)s_id->b.domain << 16) | 4637 ((u32)s_id->b.area << 8) | 4638 ((u32)s_id->b.al_pa)); 4639 4640 spin_lock_irqsave(&vha->cmd_list_lock, flags); 4641 list_for_each_entry(op, &vha->qla_sess_op_cmd_list, cmd_list) { 4642 uint32_t op_key = sid_to_key(op->atio.u.isp24.fcp_hdr.s_id); 4643 4644 if (op_key == key) { 4645 op->aborted = true; 4646 count++; 4647 } 4648 } 4649 4650 list_for_each_entry(op, &vha->unknown_atio_list, cmd_list) { 4651 uint32_t op_key = sid_to_key(op->atio.u.isp24.fcp_hdr.s_id); 4652 4653 if (op_key == key) { 4654 op->aborted = true; 4655 count++; 4656 } 4657 } 4658 4659 list_for_each_entry(cmd, &vha->qla_cmd_list, cmd_list) { 4660 uint32_t cmd_key = sid_to_key(cmd->atio.u.isp24.fcp_hdr.s_id); 4661 4662 if (cmd_key == key) { 4663 cmd->aborted = 1; 4664 count++; 4665 } 4666 } 4667 spin_unlock_irqrestore(&vha->cmd_list_lock, flags); 4668 4669 return count; 4670 } 4671 4672 static int qlt_handle_login(struct scsi_qla_host *vha, 4673 struct imm_ntfy_from_isp *iocb) 4674 { 4675 struct fc_port *sess = NULL, *conflict_sess = NULL; 4676 uint64_t wwn; 4677 port_id_t port_id; 4678 uint16_t loop_id, wd3_lo; 4679 int res = 0; 4680 struct qlt_plogi_ack_t *pla; 4681 unsigned long flags; 4682 4683 lockdep_assert_held(&vha->hw->hardware_lock); 4684 4685 wwn = wwn_to_u64(iocb->u.isp24.port_name); 4686 4687 port_id.b.domain = iocb->u.isp24.port_id[2]; 4688 port_id.b.area = iocb->u.isp24.port_id[1]; 4689 port_id.b.al_pa = iocb->u.isp24.port_id[0]; 4690 port_id.b.rsvd_1 = 0; 4691 4692 loop_id = le16_to_cpu(iocb->u.isp24.nport_handle); 4693 4694 /* Mark all stale commands sitting in qla_tgt_wq for deletion */ 4695 abort_cmds_for_s_id(vha, &port_id); 4696 4697 if (wwn) { 4698 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); 4699 sess = qlt_find_sess_invalidate_other(vha, wwn, 4700 port_id, loop_id, &conflict_sess); 4701 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); 4702 } else { 4703 ql_dbg(ql_dbg_disc, vha, 0xffff, 4704 "%s %d Term INOT due to WWN=0 lid=%d, NportID %06X ", 4705 __func__, __LINE__, loop_id, port_id.b24); 4706 qlt_send_term_imm_notif(vha, iocb, 1); 4707 goto out; 4708 } 4709 4710 if (IS_SW_RESV_ADDR(port_id)) { 4711 res = 1; 4712 goto out; 4713 } 4714 4715 pla = qlt_plogi_ack_find_add(vha, &port_id, iocb); 4716 if (!pla) { 4717 ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0xffff, 4718 "%s %d %8phC Term INOT due to mem alloc fail", 4719 __func__, __LINE__, 4720 iocb->u.isp24.port_name); 4721 qlt_send_term_imm_notif(vha, iocb, 1); 4722 goto out; 4723 } 4724 4725 if (conflict_sess) { 4726 conflict_sess->login_gen++; 4727 qlt_plogi_ack_link(vha, pla, conflict_sess, 4728 QLT_PLOGI_LINK_CONFLICT); 4729 } 4730 4731 if (!sess) { 4732 pla->ref_count++; 4733 ql_dbg(ql_dbg_disc, vha, 0xffff, 4734 "%s %d %8phC post new sess\n", 4735 __func__, __LINE__, iocb->u.isp24.port_name); 4736 if (iocb->u.isp24.status_subcode == ELS_PLOGI) 4737 qla24xx_post_newsess_work(vha, &port_id, 4738 iocb->u.isp24.port_name, 4739 iocb->u.isp24.u.plogi.node_name, 4740 pla, FC4_TYPE_UNKNOWN); 4741 else 4742 qla24xx_post_newsess_work(vha, &port_id, 4743 iocb->u.isp24.port_name, NULL, 4744 pla, FC4_TYPE_UNKNOWN); 4745 4746 goto out; 4747 } 4748 4749 if (sess->disc_state == DSC_UPD_FCPORT) { 4750 u16 sec; 4751 4752 /* 4753 * Remote port registration is still going on from 4754 * previous login. Allow it to finish before we 4755 * accept the new login. 4756 */ 4757 sess->next_disc_state = DSC_DELETE_PEND; 4758 sec = jiffies_to_msecs(jiffies - 4759 sess->jiffies_at_registration) / 1000; 4760 if (sess->sec_since_registration < sec && sec && 4761 !(sec % 5)) { 4762 sess->sec_since_registration = sec; 4763 ql_dbg(ql_dbg_disc, vha, 0xffff, 4764 "%s %8phC - Slow Rport registration (%d Sec)\n", 4765 __func__, sess->port_name, sec); 4766 } 4767 4768 if (!conflict_sess) { 4769 list_del(&pla->list); 4770 kmem_cache_free(qla_tgt_plogi_cachep, pla); 4771 } 4772 4773 qlt_send_term_imm_notif(vha, iocb, 1); 4774 goto out; 4775 } 4776 4777 qlt_plogi_ack_link(vha, pla, sess, QLT_PLOGI_LINK_SAME_WWN); 4778 sess->d_id = port_id; 4779 sess->login_gen++; 4780 4781 if (iocb->u.isp24.status_subcode == ELS_PRLI) { 4782 sess->fw_login_state = DSC_LS_PRLI_PEND; 4783 sess->local = 0; 4784 sess->loop_id = loop_id; 4785 sess->d_id = port_id; 4786 sess->fw_login_state = DSC_LS_PRLI_PEND; 4787 wd3_lo = le16_to_cpu(iocb->u.isp24.u.prli.wd3_lo); 4788 4789 if (wd3_lo & BIT_7) 4790 sess->conf_compl_supported = 1; 4791 4792 if ((wd3_lo & BIT_4) == 0) 4793 sess->port_type = FCT_INITIATOR; 4794 else 4795 sess->port_type = FCT_TARGET; 4796 4797 } else 4798 sess->fw_login_state = DSC_LS_PLOGI_PEND; 4799 4800 4801 ql_dbg(ql_dbg_disc, vha, 0x20f9, 4802 "%s %d %8phC DS %d\n", 4803 __func__, __LINE__, sess->port_name, sess->disc_state); 4804 4805 switch (sess->disc_state) { 4806 case DSC_DELETED: 4807 qlt_plogi_ack_unref(vha, pla); 4808 break; 4809 4810 default: 4811 /* 4812 * Under normal circumstances we want to release nport handle 4813 * during LOGO process to avoid nport handle leaks inside FW. 4814 * The exception is when LOGO is done while another PLOGI with 4815 * the same nport handle is waiting as might be the case here. 4816 * Note: there is always a possibily of a race where session 4817 * deletion has already started for other reasons (e.g. ACL 4818 * removal) and now PLOGI arrives: 4819 * 1. if PLOGI arrived in FW after nport handle has been freed, 4820 * FW must have assigned this PLOGI a new/same handle and we 4821 * can proceed ACK'ing it as usual when session deletion 4822 * completes. 4823 * 2. if PLOGI arrived in FW before LOGO with LCF_FREE_NPORT 4824 * bit reached it, the handle has now been released. We'll 4825 * get an error when we ACK this PLOGI. Nothing will be sent 4826 * back to initiator. Initiator should eventually retry 4827 * PLOGI and situation will correct itself. 4828 */ 4829 sess->keep_nport_handle = ((sess->loop_id == loop_id) && 4830 (sess->d_id.b24 == port_id.b24)); 4831 4832 ql_dbg(ql_dbg_disc, vha, 0x20f9, 4833 "%s %d %8phC post del sess\n", 4834 __func__, __LINE__, sess->port_name); 4835 4836 4837 qlt_schedule_sess_for_deletion(sess); 4838 break; 4839 } 4840 out: 4841 return res; 4842 } 4843 4844 /* 4845 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire 4846 */ 4847 static int qlt_24xx_handle_els(struct scsi_qla_host *vha, 4848 struct imm_ntfy_from_isp *iocb) 4849 { 4850 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; 4851 struct qla_hw_data *ha = vha->hw; 4852 struct fc_port *sess = NULL, *conflict_sess = NULL; 4853 uint64_t wwn; 4854 port_id_t port_id; 4855 uint16_t loop_id; 4856 uint16_t wd3_lo; 4857 int res = 0; 4858 unsigned long flags; 4859 4860 lockdep_assert_held(&ha->hardware_lock); 4861 4862 wwn = wwn_to_u64(iocb->u.isp24.port_name); 4863 4864 port_id.b.domain = iocb->u.isp24.port_id[2]; 4865 port_id.b.area = iocb->u.isp24.port_id[1]; 4866 port_id.b.al_pa = iocb->u.isp24.port_id[0]; 4867 port_id.b.rsvd_1 = 0; 4868 4869 loop_id = le16_to_cpu(iocb->u.isp24.nport_handle); 4870 4871 ql_dbg(ql_dbg_disc, vha, 0xf026, 4872 "qla_target(%d): Port ID: %02x:%02x:%02x ELS opcode: 0x%02x lid %d %8phC\n", 4873 vha->vp_idx, iocb->u.isp24.port_id[2], 4874 iocb->u.isp24.port_id[1], iocb->u.isp24.port_id[0], 4875 iocb->u.isp24.status_subcode, loop_id, 4876 iocb->u.isp24.port_name); 4877 4878 /* res = 1 means ack at the end of thread 4879 * res = 0 means ack async/later. 4880 */ 4881 switch (iocb->u.isp24.status_subcode) { 4882 case ELS_PLOGI: 4883 res = qlt_handle_login(vha, iocb); 4884 break; 4885 4886 case ELS_PRLI: 4887 if (N2N_TOPO(ha)) { 4888 sess = qla2x00_find_fcport_by_wwpn(vha, 4889 iocb->u.isp24.port_name, 1); 4890 4891 if (sess && sess->plogi_link[QLT_PLOGI_LINK_SAME_WWN]) { 4892 ql_dbg(ql_dbg_disc, vha, 0xffff, 4893 "%s %d %8phC Term PRLI due to PLOGI ACK not completed\n", 4894 __func__, __LINE__, 4895 iocb->u.isp24.port_name); 4896 qlt_send_term_imm_notif(vha, iocb, 1); 4897 break; 4898 } 4899 4900 res = qlt_handle_login(vha, iocb); 4901 break; 4902 } 4903 4904 if (IS_SW_RESV_ADDR(port_id)) { 4905 res = 1; 4906 break; 4907 } 4908 4909 wd3_lo = le16_to_cpu(iocb->u.isp24.u.prli.wd3_lo); 4910 4911 if (wwn) { 4912 spin_lock_irqsave(&tgt->ha->tgt.sess_lock, flags); 4913 sess = qlt_find_sess_invalidate_other(vha, wwn, port_id, 4914 loop_id, &conflict_sess); 4915 spin_unlock_irqrestore(&tgt->ha->tgt.sess_lock, flags); 4916 } 4917 4918 if (conflict_sess) { 4919 switch (conflict_sess->disc_state) { 4920 case DSC_DELETED: 4921 case DSC_DELETE_PEND: 4922 break; 4923 default: 4924 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf09b, 4925 "PRLI with conflicting sess %p port %8phC\n", 4926 conflict_sess, conflict_sess->port_name); 4927 conflict_sess->fw_login_state = 4928 DSC_LS_PORT_UNAVAIL; 4929 qlt_send_term_imm_notif(vha, iocb, 1); 4930 res = 0; 4931 break; 4932 } 4933 } 4934 4935 if (sess != NULL) { 4936 bool delete = false; 4937 int sec; 4938 4939 spin_lock_irqsave(&tgt->ha->tgt.sess_lock, flags); 4940 switch (sess->fw_login_state) { 4941 case DSC_LS_PLOGI_PEND: 4942 case DSC_LS_PLOGI_COMP: 4943 case DSC_LS_PRLI_COMP: 4944 break; 4945 default: 4946 delete = true; 4947 break; 4948 } 4949 4950 switch (sess->disc_state) { 4951 case DSC_UPD_FCPORT: 4952 spin_unlock_irqrestore(&tgt->ha->tgt.sess_lock, 4953 flags); 4954 4955 sec = jiffies_to_msecs(jiffies - 4956 sess->jiffies_at_registration)/1000; 4957 if (sess->sec_since_registration < sec && sec && 4958 !(sec % 5)) { 4959 sess->sec_since_registration = sec; 4960 ql_dbg(ql_dbg_disc, sess->vha, 0xffff, 4961 "%s %8phC : Slow Rport registration(%d Sec)\n", 4962 __func__, sess->port_name, sec); 4963 } 4964 qlt_send_term_imm_notif(vha, iocb, 1); 4965 return 0; 4966 4967 case DSC_LOGIN_PEND: 4968 case DSC_GPDB: 4969 case DSC_LOGIN_COMPLETE: 4970 case DSC_ADISC: 4971 delete = false; 4972 break; 4973 default: 4974 break; 4975 } 4976 4977 if (delete) { 4978 spin_unlock_irqrestore(&tgt->ha->tgt.sess_lock, 4979 flags); 4980 /* 4981 * Impatient initiator sent PRLI before last 4982 * PLOGI could finish. Will force him to re-try, 4983 * while last one finishes. 4984 */ 4985 ql_log(ql_log_warn, sess->vha, 0xf095, 4986 "sess %p PRLI received, before plogi ack.\n", 4987 sess); 4988 qlt_send_term_imm_notif(vha, iocb, 1); 4989 res = 0; 4990 break; 4991 } 4992 4993 /* 4994 * This shouldn't happen under normal circumstances, 4995 * since we have deleted the old session during PLOGI 4996 */ 4997 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf096, 4998 "PRLI (loop_id %#04x) for existing sess %p (loop_id %#04x)\n", 4999 sess->loop_id, sess, iocb->u.isp24.nport_handle); 5000 5001 sess->local = 0; 5002 sess->loop_id = loop_id; 5003 sess->d_id = port_id; 5004 sess->fw_login_state = DSC_LS_PRLI_PEND; 5005 5006 if (wd3_lo & BIT_7) 5007 sess->conf_compl_supported = 1; 5008 5009 if ((wd3_lo & BIT_4) == 0) 5010 sess->port_type = FCT_INITIATOR; 5011 else 5012 sess->port_type = FCT_TARGET; 5013 5014 spin_unlock_irqrestore(&tgt->ha->tgt.sess_lock, flags); 5015 } 5016 res = 1; /* send notify ack */ 5017 5018 /* Make session global (not used in fabric mode) */ 5019 if (ha->current_topology != ISP_CFG_F) { 5020 if (sess) { 5021 ql_dbg(ql_dbg_disc, vha, 0x20fa, 5022 "%s %d %8phC post nack\n", 5023 __func__, __LINE__, sess->port_name); 5024 qla24xx_post_nack_work(vha, sess, iocb, 5025 SRB_NACK_PRLI); 5026 res = 0; 5027 } else { 5028 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); 5029 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); 5030 qla2xxx_wake_dpc(vha); 5031 } 5032 } else { 5033 if (sess) { 5034 ql_dbg(ql_dbg_disc, vha, 0x20fb, 5035 "%s %d %8phC post nack\n", 5036 __func__, __LINE__, sess->port_name); 5037 qla24xx_post_nack_work(vha, sess, iocb, 5038 SRB_NACK_PRLI); 5039 res = 0; 5040 } 5041 } 5042 break; 5043 5044 case ELS_TPRLO: 5045 if (le16_to_cpu(iocb->u.isp24.flags) & 5046 NOTIFY24XX_FLAGS_GLOBAL_TPRLO) { 5047 loop_id = 0xFFFF; 5048 qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS); 5049 res = 1; 5050 break; 5051 } 5052 /* fall through */ 5053 case ELS_LOGO: 5054 case ELS_PRLO: 5055 spin_lock_irqsave(&ha->tgt.sess_lock, flags); 5056 sess = qla2x00_find_fcport_by_loopid(vha, loop_id); 5057 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); 5058 5059 if (sess) { 5060 sess->login_gen++; 5061 sess->fw_login_state = DSC_LS_LOGO_PEND; 5062 sess->logo_ack_needed = 1; 5063 memcpy(sess->iocb, iocb, IOCB_SIZE); 5064 } 5065 5066 res = qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS_SESS); 5067 5068 ql_dbg(ql_dbg_disc, vha, 0x20fc, 5069 "%s: logo %llx res %d sess %p ", 5070 __func__, wwn, res, sess); 5071 if (res == 0) { 5072 /* 5073 * cmd went upper layer, look for qlt_xmit_tm_rsp() 5074 * for LOGO_ACK & sess delete 5075 */ 5076 BUG_ON(!sess); 5077 res = 0; 5078 } else { 5079 /* cmd did not go to upper layer. */ 5080 if (sess) { 5081 qlt_schedule_sess_for_deletion(sess); 5082 res = 0; 5083 } 5084 /* else logo will be ack */ 5085 } 5086 break; 5087 case ELS_PDISC: 5088 case ELS_ADISC: 5089 { 5090 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; 5091 5092 if (tgt->link_reinit_iocb_pending) { 5093 qlt_send_notify_ack(ha->base_qpair, 5094 &tgt->link_reinit_iocb, 0, 0, 0, 0, 0, 0); 5095 tgt->link_reinit_iocb_pending = 0; 5096 } 5097 5098 sess = qla2x00_find_fcport_by_wwpn(vha, 5099 iocb->u.isp24.port_name, 1); 5100 if (sess) { 5101 ql_dbg(ql_dbg_disc, vha, 0x20fd, 5102 "sess %p lid %d|%d DS %d LS %d\n", 5103 sess, sess->loop_id, loop_id, 5104 sess->disc_state, sess->fw_login_state); 5105 } 5106 5107 res = 1; /* send notify ack */ 5108 break; 5109 } 5110 5111 case ELS_FLOGI: /* should never happen */ 5112 default: 5113 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf061, 5114 "qla_target(%d): Unsupported ELS command %x " 5115 "received\n", vha->vp_idx, iocb->u.isp24.status_subcode); 5116 res = qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS_SESS); 5117 break; 5118 } 5119 5120 ql_dbg(ql_dbg_disc, vha, 0xf026, 5121 "qla_target(%d): Exit ELS opcode: 0x%02x res %d\n", 5122 vha->vp_idx, iocb->u.isp24.status_subcode, res); 5123 5124 return res; 5125 } 5126 5127 /* 5128 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire 5129 */ 5130 static void qlt_handle_imm_notify(struct scsi_qla_host *vha, 5131 struct imm_ntfy_from_isp *iocb) 5132 { 5133 struct qla_hw_data *ha = vha->hw; 5134 uint32_t add_flags = 0; 5135 int send_notify_ack = 1; 5136 uint16_t status; 5137 5138 lockdep_assert_held(&ha->hardware_lock); 5139 5140 status = le16_to_cpu(iocb->u.isp2x.status); 5141 switch (status) { 5142 case IMM_NTFY_LIP_RESET: 5143 { 5144 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf032, 5145 "qla_target(%d): LIP reset (loop %#x), subcode %x\n", 5146 vha->vp_idx, le16_to_cpu(iocb->u.isp24.nport_handle), 5147 iocb->u.isp24.status_subcode); 5148 5149 if (qlt_reset(vha, iocb, QLA_TGT_ABORT_ALL) == 0) 5150 send_notify_ack = 0; 5151 break; 5152 } 5153 5154 case IMM_NTFY_LIP_LINK_REINIT: 5155 { 5156 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; 5157 5158 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf033, 5159 "qla_target(%d): LINK REINIT (loop %#x, " 5160 "subcode %x)\n", vha->vp_idx, 5161 le16_to_cpu(iocb->u.isp24.nport_handle), 5162 iocb->u.isp24.status_subcode); 5163 if (tgt->link_reinit_iocb_pending) { 5164 qlt_send_notify_ack(ha->base_qpair, 5165 &tgt->link_reinit_iocb, 0, 0, 0, 0, 0, 0); 5166 } 5167 memcpy(&tgt->link_reinit_iocb, iocb, sizeof(*iocb)); 5168 tgt->link_reinit_iocb_pending = 1; 5169 /* 5170 * QLogic requires to wait after LINK REINIT for possible 5171 * PDISC or ADISC ELS commands 5172 */ 5173 send_notify_ack = 0; 5174 break; 5175 } 5176 5177 case IMM_NTFY_PORT_LOGOUT: 5178 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf034, 5179 "qla_target(%d): Port logout (loop " 5180 "%#x, subcode %x)\n", vha->vp_idx, 5181 le16_to_cpu(iocb->u.isp24.nport_handle), 5182 iocb->u.isp24.status_subcode); 5183 5184 if (qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS_SESS) == 0) 5185 send_notify_ack = 0; 5186 /* The sessions will be cleared in the callback, if needed */ 5187 break; 5188 5189 case IMM_NTFY_GLBL_TPRLO: 5190 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf035, 5191 "qla_target(%d): Global TPRLO (%x)\n", vha->vp_idx, status); 5192 if (qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS) == 0) 5193 send_notify_ack = 0; 5194 /* The sessions will be cleared in the callback, if needed */ 5195 break; 5196 5197 case IMM_NTFY_PORT_CONFIG: 5198 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf036, 5199 "qla_target(%d): Port config changed (%x)\n", vha->vp_idx, 5200 status); 5201 if (qlt_reset(vha, iocb, QLA_TGT_ABORT_ALL) == 0) 5202 send_notify_ack = 0; 5203 /* The sessions will be cleared in the callback, if needed */ 5204 break; 5205 5206 case IMM_NTFY_GLBL_LOGO: 5207 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06a, 5208 "qla_target(%d): Link failure detected\n", 5209 vha->vp_idx); 5210 /* I_T nexus loss */ 5211 if (qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS) == 0) 5212 send_notify_ack = 0; 5213 break; 5214 5215 case IMM_NTFY_IOCB_OVERFLOW: 5216 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06b, 5217 "qla_target(%d): Cannot provide requested " 5218 "capability (IOCB overflowed the immediate notify " 5219 "resource count)\n", vha->vp_idx); 5220 break; 5221 5222 case IMM_NTFY_ABORT_TASK: 5223 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf037, 5224 "qla_target(%d): Abort Task (S %08x I %#x -> " 5225 "L %#x)\n", vha->vp_idx, 5226 le16_to_cpu(iocb->u.isp2x.seq_id), 5227 GET_TARGET_ID(ha, (struct atio_from_isp *)iocb), 5228 le16_to_cpu(iocb->u.isp2x.lun)); 5229 if (qlt_abort_task(vha, iocb) == 0) 5230 send_notify_ack = 0; 5231 break; 5232 5233 case IMM_NTFY_RESOURCE: 5234 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06c, 5235 "qla_target(%d): Out of resources, host %ld\n", 5236 vha->vp_idx, vha->host_no); 5237 break; 5238 5239 case IMM_NTFY_MSG_RX: 5240 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf038, 5241 "qla_target(%d): Immediate notify task %x\n", 5242 vha->vp_idx, iocb->u.isp2x.task_flags); 5243 break; 5244 5245 case IMM_NTFY_ELS: 5246 if (qlt_24xx_handle_els(vha, iocb) == 0) 5247 send_notify_ack = 0; 5248 break; 5249 default: 5250 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06d, 5251 "qla_target(%d): Received unknown immediate " 5252 "notify status %x\n", vha->vp_idx, status); 5253 break; 5254 } 5255 5256 if (send_notify_ack) 5257 qlt_send_notify_ack(ha->base_qpair, iocb, add_flags, 0, 0, 0, 5258 0, 0); 5259 } 5260 5261 /* 5262 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire 5263 * This function sends busy to ISP 2xxx or 24xx. 5264 */ 5265 static int __qlt_send_busy(struct qla_qpair *qpair, 5266 struct atio_from_isp *atio, uint16_t status) 5267 { 5268 struct scsi_qla_host *vha = qpair->vha; 5269 struct ctio7_to_24xx *ctio24; 5270 struct qla_hw_data *ha = vha->hw; 5271 request_t *pkt; 5272 struct fc_port *sess = NULL; 5273 unsigned long flags; 5274 u16 temp; 5275 port_id_t id; 5276 5277 id = be_to_port_id(atio->u.isp24.fcp_hdr.s_id); 5278 5279 spin_lock_irqsave(&ha->tgt.sess_lock, flags); 5280 sess = qla2x00_find_fcport_by_nportid(vha, &id, 1); 5281 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); 5282 if (!sess) { 5283 qlt_send_term_exchange(qpair, NULL, atio, 1, 0); 5284 return 0; 5285 } 5286 /* Sending marker isn't necessary, since we called from ISR */ 5287 5288 pkt = (request_t *)__qla2x00_alloc_iocbs(qpair, NULL); 5289 if (!pkt) { 5290 ql_dbg(ql_dbg_io, vha, 0x3063, 5291 "qla_target(%d): %s failed: unable to allocate " 5292 "request packet", vha->vp_idx, __func__); 5293 return -ENOMEM; 5294 } 5295 5296 qpair->tgt_counters.num_q_full_sent++; 5297 pkt->entry_count = 1; 5298 pkt->handle = QLA_TGT_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK; 5299 5300 ctio24 = (struct ctio7_to_24xx *)pkt; 5301 ctio24->entry_type = CTIO_TYPE7; 5302 ctio24->nport_handle = sess->loop_id; 5303 ctio24->timeout = cpu_to_le16(QLA_TGT_TIMEOUT); 5304 ctio24->vp_index = vha->vp_idx; 5305 ctio24->initiator_id = be_id_to_le(atio->u.isp24.fcp_hdr.s_id); 5306 ctio24->exchange_addr = atio->u.isp24.exchange_addr; 5307 temp = (atio->u.isp24.attr << 9) | 5308 CTIO7_FLAGS_STATUS_MODE_1 | CTIO7_FLAGS_SEND_STATUS | 5309 CTIO7_FLAGS_DONT_RET_CTIO; 5310 ctio24->u.status1.flags = cpu_to_le16(temp); 5311 /* 5312 * CTIO from fw w/o se_cmd doesn't provide enough info to retry it, 5313 * if the explicit conformation is used. 5314 */ 5315 ctio24->u.status1.ox_id = swab16(atio->u.isp24.fcp_hdr.ox_id); 5316 ctio24->u.status1.scsi_status = cpu_to_le16(status); 5317 5318 ctio24->u.status1.residual = get_datalen_for_atio(atio); 5319 5320 if (ctio24->u.status1.residual != 0) 5321 ctio24->u.status1.scsi_status |= SS_RESIDUAL_UNDER; 5322 5323 /* Memory Barrier */ 5324 wmb(); 5325 if (qpair->reqq_start_iocbs) 5326 qpair->reqq_start_iocbs(qpair); 5327 else 5328 qla2x00_start_iocbs(vha, qpair->req); 5329 return 0; 5330 } 5331 5332 /* 5333 * This routine is used to allocate a command for either a QFull condition 5334 * (ie reply SAM_STAT_BUSY) or to terminate an exchange that did not go 5335 * out previously. 5336 */ 5337 static void 5338 qlt_alloc_qfull_cmd(struct scsi_qla_host *vha, 5339 struct atio_from_isp *atio, uint16_t status, int qfull) 5340 { 5341 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; 5342 struct qla_hw_data *ha = vha->hw; 5343 struct fc_port *sess; 5344 struct se_session *se_sess; 5345 struct qla_tgt_cmd *cmd; 5346 int tag, cpu; 5347 unsigned long flags; 5348 5349 if (unlikely(tgt->tgt_stop)) { 5350 ql_dbg(ql_dbg_io, vha, 0x300a, 5351 "New command while device %p is shutting down\n", tgt); 5352 return; 5353 } 5354 5355 if ((vha->hw->tgt.num_qfull_cmds_alloc + 1) > MAX_QFULL_CMDS_ALLOC) { 5356 vha->hw->tgt.num_qfull_cmds_dropped++; 5357 if (vha->hw->tgt.num_qfull_cmds_dropped > 5358 vha->qla_stats.stat_max_qfull_cmds_dropped) 5359 vha->qla_stats.stat_max_qfull_cmds_dropped = 5360 vha->hw->tgt.num_qfull_cmds_dropped; 5361 5362 ql_dbg(ql_dbg_io, vha, 0x3068, 5363 "qla_target(%d): %s: QFull CMD dropped[%d]\n", 5364 vha->vp_idx, __func__, 5365 vha->hw->tgt.num_qfull_cmds_dropped); 5366 5367 qlt_chk_exch_leak_thresh_hold(vha); 5368 return; 5369 } 5370 5371 sess = ha->tgt.tgt_ops->find_sess_by_s_id 5372 (vha, atio->u.isp24.fcp_hdr.s_id); 5373 if (!sess) 5374 return; 5375 5376 se_sess = sess->se_sess; 5377 5378 tag = sbitmap_queue_get(&se_sess->sess_tag_pool, &cpu); 5379 if (tag < 0) { 5380 ql_dbg(ql_dbg_io, vha, 0x3009, 5381 "qla_target(%d): %s: Allocation of cmd failed\n", 5382 vha->vp_idx, __func__); 5383 5384 vha->hw->tgt.num_qfull_cmds_dropped++; 5385 if (vha->hw->tgt.num_qfull_cmds_dropped > 5386 vha->qla_stats.stat_max_qfull_cmds_dropped) 5387 vha->qla_stats.stat_max_qfull_cmds_dropped = 5388 vha->hw->tgt.num_qfull_cmds_dropped; 5389 5390 qlt_chk_exch_leak_thresh_hold(vha); 5391 return; 5392 } 5393 5394 cmd = &((struct qla_tgt_cmd *)se_sess->sess_cmd_map)[tag]; 5395 memset(cmd, 0, sizeof(struct qla_tgt_cmd)); 5396 5397 qlt_incr_num_pend_cmds(vha); 5398 INIT_LIST_HEAD(&cmd->cmd_list); 5399 memcpy(&cmd->atio, atio, sizeof(*atio)); 5400 5401 cmd->tgt = vha->vha_tgt.qla_tgt; 5402 cmd->vha = vha; 5403 cmd->reset_count = ha->base_qpair->chip_reset; 5404 cmd->q_full = 1; 5405 cmd->qpair = ha->base_qpair; 5406 cmd->se_cmd.map_cpu = cpu; 5407 5408 if (qfull) { 5409 cmd->q_full = 1; 5410 /* NOTE: borrowing the state field to carry the status */ 5411 cmd->state = status; 5412 } else 5413 cmd->term_exchg = 1; 5414 5415 spin_lock_irqsave(&vha->hw->tgt.q_full_lock, flags); 5416 list_add_tail(&cmd->cmd_list, &vha->hw->tgt.q_full_list); 5417 5418 vha->hw->tgt.num_qfull_cmds_alloc++; 5419 if (vha->hw->tgt.num_qfull_cmds_alloc > 5420 vha->qla_stats.stat_max_qfull_cmds_alloc) 5421 vha->qla_stats.stat_max_qfull_cmds_alloc = 5422 vha->hw->tgt.num_qfull_cmds_alloc; 5423 spin_unlock_irqrestore(&vha->hw->tgt.q_full_lock, flags); 5424 } 5425 5426 int 5427 qlt_free_qfull_cmds(struct qla_qpair *qpair) 5428 { 5429 struct scsi_qla_host *vha = qpair->vha; 5430 struct qla_hw_data *ha = vha->hw; 5431 unsigned long flags; 5432 struct qla_tgt_cmd *cmd, *tcmd; 5433 struct list_head free_list, q_full_list; 5434 int rc = 0; 5435 5436 if (list_empty(&ha->tgt.q_full_list)) 5437 return 0; 5438 5439 INIT_LIST_HEAD(&free_list); 5440 INIT_LIST_HEAD(&q_full_list); 5441 5442 spin_lock_irqsave(&vha->hw->tgt.q_full_lock, flags); 5443 if (list_empty(&ha->tgt.q_full_list)) { 5444 spin_unlock_irqrestore(&vha->hw->tgt.q_full_lock, flags); 5445 return 0; 5446 } 5447 5448 list_splice_init(&vha->hw->tgt.q_full_list, &q_full_list); 5449 spin_unlock_irqrestore(&vha->hw->tgt.q_full_lock, flags); 5450 5451 spin_lock_irqsave(qpair->qp_lock_ptr, flags); 5452 list_for_each_entry_safe(cmd, tcmd, &q_full_list, cmd_list) { 5453 if (cmd->q_full) 5454 /* cmd->state is a borrowed field to hold status */ 5455 rc = __qlt_send_busy(qpair, &cmd->atio, cmd->state); 5456 else if (cmd->term_exchg) 5457 rc = __qlt_send_term_exchange(qpair, NULL, &cmd->atio); 5458 5459 if (rc == -ENOMEM) 5460 break; 5461 5462 if (cmd->q_full) 5463 ql_dbg(ql_dbg_io, vha, 0x3006, 5464 "%s: busy sent for ox_id[%04x]\n", __func__, 5465 be16_to_cpu(cmd->atio.u.isp24.fcp_hdr.ox_id)); 5466 else if (cmd->term_exchg) 5467 ql_dbg(ql_dbg_io, vha, 0x3007, 5468 "%s: Term exchg sent for ox_id[%04x]\n", __func__, 5469 be16_to_cpu(cmd->atio.u.isp24.fcp_hdr.ox_id)); 5470 else 5471 ql_dbg(ql_dbg_io, vha, 0x3008, 5472 "%s: Unexpected cmd in QFull list %p\n", __func__, 5473 cmd); 5474 5475 list_del(&cmd->cmd_list); 5476 list_add_tail(&cmd->cmd_list, &free_list); 5477 5478 /* piggy back on hardware_lock for protection */ 5479 vha->hw->tgt.num_qfull_cmds_alloc--; 5480 } 5481 spin_unlock_irqrestore(qpair->qp_lock_ptr, flags); 5482 5483 cmd = NULL; 5484 5485 list_for_each_entry_safe(cmd, tcmd, &free_list, cmd_list) { 5486 list_del(&cmd->cmd_list); 5487 /* This cmd was never sent to TCM. There is no need 5488 * to schedule free or call free_cmd 5489 */ 5490 qlt_free_cmd(cmd); 5491 } 5492 5493 if (!list_empty(&q_full_list)) { 5494 spin_lock_irqsave(&vha->hw->tgt.q_full_lock, flags); 5495 list_splice(&q_full_list, &vha->hw->tgt.q_full_list); 5496 spin_unlock_irqrestore(&vha->hw->tgt.q_full_lock, flags); 5497 } 5498 5499 return rc; 5500 } 5501 5502 static void 5503 qlt_send_busy(struct qla_qpair *qpair, struct atio_from_isp *atio, 5504 uint16_t status) 5505 { 5506 int rc = 0; 5507 struct scsi_qla_host *vha = qpair->vha; 5508 5509 rc = __qlt_send_busy(qpair, atio, status); 5510 if (rc == -ENOMEM) 5511 qlt_alloc_qfull_cmd(vha, atio, status, 1); 5512 } 5513 5514 static int 5515 qlt_chk_qfull_thresh_hold(struct scsi_qla_host *vha, struct qla_qpair *qpair, 5516 struct atio_from_isp *atio, uint8_t ha_locked) 5517 { 5518 struct qla_hw_data *ha = vha->hw; 5519 unsigned long flags; 5520 5521 if (ha->tgt.num_pend_cmds < Q_FULL_THRESH_HOLD(ha)) 5522 return 0; 5523 5524 if (!ha_locked) 5525 spin_lock_irqsave(&ha->hardware_lock, flags); 5526 qlt_send_busy(qpair, atio, qla_sam_status); 5527 if (!ha_locked) 5528 spin_unlock_irqrestore(&ha->hardware_lock, flags); 5529 5530 return 1; 5531 } 5532 5533 /* ha->hardware_lock supposed to be held on entry */ 5534 /* called via callback from qla2xxx */ 5535 static void qlt_24xx_atio_pkt(struct scsi_qla_host *vha, 5536 struct atio_from_isp *atio, uint8_t ha_locked) 5537 { 5538 struct qla_hw_data *ha = vha->hw; 5539 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; 5540 int rc; 5541 unsigned long flags = 0; 5542 5543 if (unlikely(tgt == NULL)) { 5544 ql_dbg(ql_dbg_tgt, vha, 0x3064, 5545 "ATIO pkt, but no tgt (ha %p)", ha); 5546 return; 5547 } 5548 /* 5549 * In tgt_stop mode we also should allow all requests to pass. 5550 * Otherwise, some commands can stuck. 5551 */ 5552 5553 tgt->atio_irq_cmd_count++; 5554 5555 switch (atio->u.raw.entry_type) { 5556 case ATIO_TYPE7: 5557 if (unlikely(atio->u.isp24.exchange_addr == 5558 ATIO_EXCHANGE_ADDRESS_UNKNOWN)) { 5559 ql_dbg(ql_dbg_io, vha, 0x3065, 5560 "qla_target(%d): ATIO_TYPE7 " 5561 "received with UNKNOWN exchange address, " 5562 "sending QUEUE_FULL\n", vha->vp_idx); 5563 if (!ha_locked) 5564 spin_lock_irqsave(&ha->hardware_lock, flags); 5565 qlt_send_busy(ha->base_qpair, atio, qla_sam_status); 5566 if (!ha_locked) 5567 spin_unlock_irqrestore(&ha->hardware_lock, 5568 flags); 5569 break; 5570 } 5571 5572 if (likely(atio->u.isp24.fcp_cmnd.task_mgmt_flags == 0)) { 5573 rc = qlt_chk_qfull_thresh_hold(vha, ha->base_qpair, 5574 atio, ha_locked); 5575 if (rc != 0) { 5576 tgt->atio_irq_cmd_count--; 5577 return; 5578 } 5579 rc = qlt_handle_cmd_for_atio(vha, atio); 5580 } else { 5581 rc = qlt_handle_task_mgmt(vha, atio); 5582 } 5583 if (unlikely(rc != 0)) { 5584 if (!ha_locked) 5585 spin_lock_irqsave(&ha->hardware_lock, flags); 5586 switch (rc) { 5587 case -ENODEV: 5588 ql_dbg(ql_dbg_tgt, vha, 0xe05f, 5589 "qla_target: Unable to send command to target\n"); 5590 break; 5591 case -EBADF: 5592 ql_dbg(ql_dbg_tgt, vha, 0xe05f, 5593 "qla_target: Unable to send command to target, sending TERM EXCHANGE for rsp\n"); 5594 qlt_send_term_exchange(ha->base_qpair, NULL, 5595 atio, 1, 0); 5596 break; 5597 case -EBUSY: 5598 ql_dbg(ql_dbg_tgt, vha, 0xe060, 5599 "qla_target(%d): Unable to send command to target, sending BUSY status\n", 5600 vha->vp_idx); 5601 qlt_send_busy(ha->base_qpair, atio, 5602 tc_sam_status); 5603 break; 5604 default: 5605 ql_dbg(ql_dbg_tgt, vha, 0xe060, 5606 "qla_target(%d): Unable to send command to target, sending BUSY status\n", 5607 vha->vp_idx); 5608 qlt_send_busy(ha->base_qpair, atio, 5609 qla_sam_status); 5610 break; 5611 } 5612 if (!ha_locked) 5613 spin_unlock_irqrestore(&ha->hardware_lock, 5614 flags); 5615 } 5616 break; 5617 5618 case IMMED_NOTIFY_TYPE: 5619 { 5620 if (unlikely(atio->u.isp2x.entry_status != 0)) { 5621 ql_dbg(ql_dbg_tgt, vha, 0xe05b, 5622 "qla_target(%d): Received ATIO packet %x " 5623 "with error status %x\n", vha->vp_idx, 5624 atio->u.raw.entry_type, 5625 atio->u.isp2x.entry_status); 5626 break; 5627 } 5628 ql_dbg(ql_dbg_tgt, vha, 0xe02e, "%s", "IMMED_NOTIFY ATIO"); 5629 5630 if (!ha_locked) 5631 spin_lock_irqsave(&ha->hardware_lock, flags); 5632 qlt_handle_imm_notify(vha, (struct imm_ntfy_from_isp *)atio); 5633 if (!ha_locked) 5634 spin_unlock_irqrestore(&ha->hardware_lock, flags); 5635 break; 5636 } 5637 5638 default: 5639 ql_dbg(ql_dbg_tgt, vha, 0xe05c, 5640 "qla_target(%d): Received unknown ATIO atio " 5641 "type %x\n", vha->vp_idx, atio->u.raw.entry_type); 5642 break; 5643 } 5644 5645 tgt->atio_irq_cmd_count--; 5646 } 5647 5648 /* 5649 * qpair lock is assume to be held 5650 * rc = 0 : send terminate & abts respond 5651 * rc != 0: do not send term & abts respond 5652 */ 5653 static int qlt_chk_unresolv_exchg(struct scsi_qla_host *vha, 5654 struct qla_qpair *qpair, struct abts_resp_from_24xx_fw *entry) 5655 { 5656 struct qla_hw_data *ha = vha->hw; 5657 int rc = 0; 5658 5659 /* 5660 * Detect unresolved exchange. If the same ABTS is unable 5661 * to terminate an existing command and the same ABTS loops 5662 * between FW & Driver, then force FW dump. Under 1 jiff, 5663 * we should see multiple loops. 5664 */ 5665 if (qpair->retry_term_exchg_addr == entry->exchange_addr_to_abort && 5666 qpair->retry_term_jiff == jiffies) { 5667 /* found existing exchange */ 5668 qpair->retry_term_cnt++; 5669 if (qpair->retry_term_cnt >= 5) { 5670 rc = EIO; 5671 qpair->retry_term_cnt = 0; 5672 ql_log(ql_log_warn, vha, 0xffff, 5673 "Unable to send ABTS Respond. Dumping firmware.\n"); 5674 ql_dump_buffer(ql_dbg_tgt_mgt + ql_dbg_buffer, 5675 vha, 0xffff, (uint8_t *)entry, sizeof(*entry)); 5676 5677 if (qpair == ha->base_qpair) 5678 ha->isp_ops->fw_dump(vha, 1); 5679 else 5680 ha->isp_ops->fw_dump(vha, 0); 5681 5682 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 5683 qla2xxx_wake_dpc(vha); 5684 } 5685 } else if (qpair->retry_term_jiff != jiffies) { 5686 qpair->retry_term_exchg_addr = entry->exchange_addr_to_abort; 5687 qpair->retry_term_cnt = 0; 5688 qpair->retry_term_jiff = jiffies; 5689 } 5690 5691 return rc; 5692 } 5693 5694 5695 static void qlt_handle_abts_completion(struct scsi_qla_host *vha, 5696 struct rsp_que *rsp, response_t *pkt) 5697 { 5698 struct abts_resp_from_24xx_fw *entry = 5699 (struct abts_resp_from_24xx_fw *)pkt; 5700 u32 h = pkt->handle & ~QLA_TGT_HANDLE_MASK; 5701 struct qla_tgt_mgmt_cmd *mcmd; 5702 struct qla_hw_data *ha = vha->hw; 5703 5704 mcmd = qlt_ctio_to_cmd(vha, rsp, pkt->handle, pkt); 5705 if (mcmd == NULL && h != QLA_TGT_SKIP_HANDLE) { 5706 ql_dbg(ql_dbg_async, vha, 0xe064, 5707 "qla_target(%d): ABTS Comp without mcmd\n", 5708 vha->vp_idx); 5709 return; 5710 } 5711 5712 if (mcmd) 5713 vha = mcmd->vha; 5714 vha->vha_tgt.qla_tgt->abts_resp_expected--; 5715 5716 ql_dbg(ql_dbg_tgt, vha, 0xe038, 5717 "ABTS_RESP_24XX: compl_status %x\n", 5718 entry->compl_status); 5719 5720 if (le16_to_cpu(entry->compl_status) != ABTS_RESP_COMPL_SUCCESS) { 5721 if ((entry->error_subcode1 == 0x1E) && 5722 (entry->error_subcode2 == 0)) { 5723 if (qlt_chk_unresolv_exchg(vha, rsp->qpair, entry)) { 5724 ha->tgt.tgt_ops->free_mcmd(mcmd); 5725 return; 5726 } 5727 qlt_24xx_retry_term_exchange(vha, rsp->qpair, 5728 pkt, mcmd); 5729 } else { 5730 ql_dbg(ql_dbg_tgt, vha, 0xe063, 5731 "qla_target(%d): ABTS_RESP_24XX failed %x (subcode %x:%x)", 5732 vha->vp_idx, entry->compl_status, 5733 entry->error_subcode1, 5734 entry->error_subcode2); 5735 ha->tgt.tgt_ops->free_mcmd(mcmd); 5736 } 5737 } else if (mcmd) { 5738 ha->tgt.tgt_ops->free_mcmd(mcmd); 5739 } 5740 } 5741 5742 /* ha->hardware_lock supposed to be held on entry */ 5743 /* called via callback from qla2xxx */ 5744 static void qlt_response_pkt(struct scsi_qla_host *vha, 5745 struct rsp_que *rsp, response_t *pkt) 5746 { 5747 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; 5748 5749 if (unlikely(tgt == NULL)) { 5750 ql_dbg(ql_dbg_tgt, vha, 0xe05d, 5751 "qla_target(%d): Response pkt %x received, but no tgt (ha %p)\n", 5752 vha->vp_idx, pkt->entry_type, vha->hw); 5753 return; 5754 } 5755 5756 /* 5757 * In tgt_stop mode we also should allow all requests to pass. 5758 * Otherwise, some commands can stuck. 5759 */ 5760 5761 switch (pkt->entry_type) { 5762 case CTIO_CRC2: 5763 case CTIO_TYPE7: 5764 { 5765 struct ctio7_from_24xx *entry = (struct ctio7_from_24xx *)pkt; 5766 5767 qlt_do_ctio_completion(vha, rsp, entry->handle, 5768 le16_to_cpu(entry->status)|(pkt->entry_status << 16), 5769 entry); 5770 break; 5771 } 5772 5773 case ACCEPT_TGT_IO_TYPE: 5774 { 5775 struct atio_from_isp *atio = (struct atio_from_isp *)pkt; 5776 int rc; 5777 5778 if (atio->u.isp2x.status != 5779 cpu_to_le16(ATIO_CDB_VALID)) { 5780 ql_dbg(ql_dbg_tgt, vha, 0xe05e, 5781 "qla_target(%d): ATIO with error " 5782 "status %x received\n", vha->vp_idx, 5783 le16_to_cpu(atio->u.isp2x.status)); 5784 break; 5785 } 5786 5787 rc = qlt_chk_qfull_thresh_hold(vha, rsp->qpair, atio, 1); 5788 if (rc != 0) 5789 return; 5790 5791 rc = qlt_handle_cmd_for_atio(vha, atio); 5792 if (unlikely(rc != 0)) { 5793 switch (rc) { 5794 case -ENODEV: 5795 ql_dbg(ql_dbg_tgt, vha, 0xe05f, 5796 "qla_target: Unable to send command to target\n"); 5797 break; 5798 case -EBADF: 5799 ql_dbg(ql_dbg_tgt, vha, 0xe05f, 5800 "qla_target: Unable to send command to target, sending TERM EXCHANGE for rsp\n"); 5801 qlt_send_term_exchange(rsp->qpair, NULL, 5802 atio, 1, 0); 5803 break; 5804 case -EBUSY: 5805 ql_dbg(ql_dbg_tgt, vha, 0xe060, 5806 "qla_target(%d): Unable to send command to target, sending BUSY status\n", 5807 vha->vp_idx); 5808 qlt_send_busy(rsp->qpair, atio, 5809 tc_sam_status); 5810 break; 5811 default: 5812 ql_dbg(ql_dbg_tgt, vha, 0xe060, 5813 "qla_target(%d): Unable to send command to target, sending BUSY status\n", 5814 vha->vp_idx); 5815 qlt_send_busy(rsp->qpair, atio, 5816 qla_sam_status); 5817 break; 5818 } 5819 } 5820 } 5821 break; 5822 5823 case CONTINUE_TGT_IO_TYPE: 5824 { 5825 struct ctio_to_2xxx *entry = (struct ctio_to_2xxx *)pkt; 5826 5827 qlt_do_ctio_completion(vha, rsp, entry->handle, 5828 le16_to_cpu(entry->status)|(pkt->entry_status << 16), 5829 entry); 5830 break; 5831 } 5832 5833 case CTIO_A64_TYPE: 5834 { 5835 struct ctio_to_2xxx *entry = (struct ctio_to_2xxx *)pkt; 5836 5837 qlt_do_ctio_completion(vha, rsp, entry->handle, 5838 le16_to_cpu(entry->status)|(pkt->entry_status << 16), 5839 entry); 5840 break; 5841 } 5842 5843 case IMMED_NOTIFY_TYPE: 5844 ql_dbg(ql_dbg_tgt, vha, 0xe035, "%s", "IMMED_NOTIFY\n"); 5845 qlt_handle_imm_notify(vha, (struct imm_ntfy_from_isp *)pkt); 5846 break; 5847 5848 case NOTIFY_ACK_TYPE: 5849 if (tgt->notify_ack_expected > 0) { 5850 struct nack_to_isp *entry = (struct nack_to_isp *)pkt; 5851 5852 ql_dbg(ql_dbg_tgt, vha, 0xe036, 5853 "NOTIFY_ACK seq %08x status %x\n", 5854 le16_to_cpu(entry->u.isp2x.seq_id), 5855 le16_to_cpu(entry->u.isp2x.status)); 5856 tgt->notify_ack_expected--; 5857 if (entry->u.isp2x.status != 5858 cpu_to_le16(NOTIFY_ACK_SUCCESS)) { 5859 ql_dbg(ql_dbg_tgt, vha, 0xe061, 5860 "qla_target(%d): NOTIFY_ACK " 5861 "failed %x\n", vha->vp_idx, 5862 le16_to_cpu(entry->u.isp2x.status)); 5863 } 5864 } else { 5865 ql_dbg(ql_dbg_tgt, vha, 0xe062, 5866 "qla_target(%d): Unexpected NOTIFY_ACK received\n", 5867 vha->vp_idx); 5868 } 5869 break; 5870 5871 case ABTS_RECV_24XX: 5872 ql_dbg(ql_dbg_tgt, vha, 0xe037, 5873 "ABTS_RECV_24XX: instance %d\n", vha->vp_idx); 5874 qlt_24xx_handle_abts(vha, (struct abts_recv_from_24xx *)pkt); 5875 break; 5876 5877 case ABTS_RESP_24XX: 5878 if (tgt->abts_resp_expected > 0) { 5879 qlt_handle_abts_completion(vha, rsp, pkt); 5880 } else { 5881 ql_dbg(ql_dbg_tgt, vha, 0xe064, 5882 "qla_target(%d): Unexpected ABTS_RESP_24XX " 5883 "received\n", vha->vp_idx); 5884 } 5885 break; 5886 5887 default: 5888 ql_dbg(ql_dbg_tgt, vha, 0xe065, 5889 "qla_target(%d): Received unknown response pkt " 5890 "type %x\n", vha->vp_idx, pkt->entry_type); 5891 break; 5892 } 5893 5894 } 5895 5896 /* 5897 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire 5898 */ 5899 void qlt_async_event(uint16_t code, struct scsi_qla_host *vha, 5900 uint16_t *mailbox) 5901 { 5902 struct qla_hw_data *ha = vha->hw; 5903 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; 5904 int login_code; 5905 5906 if (!tgt || tgt->tgt_stop || tgt->tgt_stopped) 5907 return; 5908 5909 if (((code == MBA_POINT_TO_POINT) || (code == MBA_CHG_IN_CONNECTION)) && 5910 IS_QLA2100(ha)) 5911 return; 5912 /* 5913 * In tgt_stop mode we also should allow all requests to pass. 5914 * Otherwise, some commands can stuck. 5915 */ 5916 5917 5918 switch (code) { 5919 case MBA_RESET: /* Reset */ 5920 case MBA_SYSTEM_ERR: /* System Error */ 5921 case MBA_REQ_TRANSFER_ERR: /* Request Transfer Error */ 5922 case MBA_RSP_TRANSFER_ERR: /* Response Transfer Error */ 5923 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03a, 5924 "qla_target(%d): System error async event %#x " 5925 "occurred", vha->vp_idx, code); 5926 break; 5927 case MBA_WAKEUP_THRES: /* Request Queue Wake-up. */ 5928 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 5929 break; 5930 5931 case MBA_LOOP_UP: 5932 { 5933 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03b, 5934 "qla_target(%d): Async LOOP_UP occurred " 5935 "(m[0]=%x, m[1]=%x, m[2]=%x, m[3]=%x)", vha->vp_idx, 5936 le16_to_cpu(mailbox[0]), le16_to_cpu(mailbox[1]), 5937 le16_to_cpu(mailbox[2]), le16_to_cpu(mailbox[3])); 5938 if (tgt->link_reinit_iocb_pending) { 5939 qlt_send_notify_ack(ha->base_qpair, 5940 (void *)&tgt->link_reinit_iocb, 5941 0, 0, 0, 0, 0, 0); 5942 tgt->link_reinit_iocb_pending = 0; 5943 } 5944 break; 5945 } 5946 5947 case MBA_LIP_OCCURRED: 5948 case MBA_LOOP_DOWN: 5949 case MBA_LIP_RESET: 5950 case MBA_RSCN_UPDATE: 5951 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03c, 5952 "qla_target(%d): Async event %#x occurred " 5953 "(m[0]=%x, m[1]=%x, m[2]=%x, m[3]=%x)", vha->vp_idx, code, 5954 le16_to_cpu(mailbox[0]), le16_to_cpu(mailbox[1]), 5955 le16_to_cpu(mailbox[2]), le16_to_cpu(mailbox[3])); 5956 break; 5957 5958 case MBA_REJECTED_FCP_CMD: 5959 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf017, 5960 "qla_target(%d): Async event LS_REJECT occurred (m[0]=%x, m[1]=%x, m[2]=%x, m[3]=%x)", 5961 vha->vp_idx, 5962 le16_to_cpu(mailbox[0]), le16_to_cpu(mailbox[1]), 5963 le16_to_cpu(mailbox[2]), le16_to_cpu(mailbox[3])); 5964 5965 if (le16_to_cpu(mailbox[3]) == 1) { 5966 /* exchange starvation. */ 5967 vha->hw->exch_starvation++; 5968 if (vha->hw->exch_starvation > 5) { 5969 ql_log(ql_log_warn, vha, 0xd03a, 5970 "Exchange starvation-. Resetting RISC\n"); 5971 5972 vha->hw->exch_starvation = 0; 5973 if (IS_P3P_TYPE(vha->hw)) 5974 set_bit(FCOE_CTX_RESET_NEEDED, 5975 &vha->dpc_flags); 5976 else 5977 set_bit(ISP_ABORT_NEEDED, 5978 &vha->dpc_flags); 5979 qla2xxx_wake_dpc(vha); 5980 } 5981 } 5982 break; 5983 5984 case MBA_PORT_UPDATE: 5985 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03d, 5986 "qla_target(%d): Port update async event %#x " 5987 "occurred: updating the ports database (m[0]=%x, m[1]=%x, " 5988 "m[2]=%x, m[3]=%x)", vha->vp_idx, code, 5989 le16_to_cpu(mailbox[0]), le16_to_cpu(mailbox[1]), 5990 le16_to_cpu(mailbox[2]), le16_to_cpu(mailbox[3])); 5991 5992 login_code = le16_to_cpu(mailbox[2]); 5993 if (login_code == 0x4) { 5994 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03e, 5995 "Async MB 2: Got PLOGI Complete\n"); 5996 vha->hw->exch_starvation = 0; 5997 } else if (login_code == 0x7) 5998 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03f, 5999 "Async MB 2: Port Logged Out\n"); 6000 break; 6001 default: 6002 break; 6003 } 6004 6005 } 6006 6007 static fc_port_t *qlt_get_port_database(struct scsi_qla_host *vha, 6008 uint16_t loop_id) 6009 { 6010 fc_port_t *fcport, *tfcp, *del; 6011 int rc; 6012 unsigned long flags; 6013 u8 newfcport = 0; 6014 6015 fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL); 6016 if (!fcport) { 6017 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06f, 6018 "qla_target(%d): Allocation of tmp FC port failed", 6019 vha->vp_idx); 6020 return NULL; 6021 } 6022 6023 fcport->loop_id = loop_id; 6024 6025 rc = qla24xx_gpdb_wait(vha, fcport, 0); 6026 if (rc != QLA_SUCCESS) { 6027 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf070, 6028 "qla_target(%d): Failed to retrieve fcport " 6029 "information -- get_port_database() returned %x " 6030 "(loop_id=0x%04x)", vha->vp_idx, rc, loop_id); 6031 kfree(fcport); 6032 return NULL; 6033 } 6034 6035 del = NULL; 6036 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); 6037 tfcp = qla2x00_find_fcport_by_wwpn(vha, fcport->port_name, 1); 6038 6039 if (tfcp) { 6040 tfcp->d_id = fcport->d_id; 6041 tfcp->port_type = fcport->port_type; 6042 tfcp->supported_classes = fcport->supported_classes; 6043 tfcp->flags |= fcport->flags; 6044 tfcp->scan_state = QLA_FCPORT_FOUND; 6045 6046 del = fcport; 6047 fcport = tfcp; 6048 } else { 6049 if (vha->hw->current_topology == ISP_CFG_F) 6050 fcport->flags |= FCF_FABRIC_DEVICE; 6051 6052 list_add_tail(&fcport->list, &vha->vp_fcports); 6053 if (!IS_SW_RESV_ADDR(fcport->d_id)) 6054 vha->fcport_count++; 6055 fcport->login_gen++; 6056 fcport->disc_state = DSC_LOGIN_COMPLETE; 6057 fcport->login_succ = 1; 6058 newfcport = 1; 6059 } 6060 6061 fcport->deleted = 0; 6062 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); 6063 6064 switch (vha->host->active_mode) { 6065 case MODE_INITIATOR: 6066 case MODE_DUAL: 6067 if (newfcport) { 6068 if (!IS_IIDMA_CAPABLE(vha->hw) || !vha->hw->flags.gpsc_supported) { 6069 qla24xx_sched_upd_fcport(fcport); 6070 } else { 6071 ql_dbg(ql_dbg_disc, vha, 0x20ff, 6072 "%s %d %8phC post gpsc fcp_cnt %d\n", 6073 __func__, __LINE__, fcport->port_name, vha->fcport_count); 6074 qla24xx_post_gpsc_work(vha, fcport); 6075 } 6076 } 6077 break; 6078 6079 case MODE_TARGET: 6080 default: 6081 break; 6082 } 6083 if (del) 6084 qla2x00_free_fcport(del); 6085 6086 return fcport; 6087 } 6088 6089 /* Must be called under tgt_mutex */ 6090 static struct fc_port *qlt_make_local_sess(struct scsi_qla_host *vha, 6091 be_id_t s_id) 6092 { 6093 struct fc_port *sess = NULL; 6094 fc_port_t *fcport = NULL; 6095 int rc, global_resets; 6096 uint16_t loop_id = 0; 6097 6098 if (s_id.domain == 0xFF && s_id.area == 0xFC) { 6099 /* 6100 * This is Domain Controller, so it should be 6101 * OK to drop SCSI commands from it. 6102 */ 6103 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf042, 6104 "Unable to find initiator with S_ID %x:%x:%x", 6105 s_id.domain, s_id.area, s_id.al_pa); 6106 return NULL; 6107 } 6108 6109 mutex_lock(&vha->vha_tgt.tgt_mutex); 6110 6111 retry: 6112 global_resets = 6113 atomic_read(&vha->vha_tgt.qla_tgt->tgt_global_resets_count); 6114 6115 rc = qla24xx_get_loop_id(vha, s_id, &loop_id); 6116 if (rc != 0) { 6117 mutex_unlock(&vha->vha_tgt.tgt_mutex); 6118 6119 ql_log(ql_log_info, vha, 0xf071, 6120 "qla_target(%d): Unable to find " 6121 "initiator with S_ID %x:%x:%x", 6122 vha->vp_idx, s_id.domain, s_id.area, s_id.al_pa); 6123 6124 if (rc == -ENOENT) { 6125 qlt_port_logo_t logo; 6126 6127 logo.id = be_to_port_id(s_id); 6128 logo.cmd_count = 1; 6129 qlt_send_first_logo(vha, &logo); 6130 } 6131 6132 return NULL; 6133 } 6134 6135 fcport = qlt_get_port_database(vha, loop_id); 6136 if (!fcport) { 6137 mutex_unlock(&vha->vha_tgt.tgt_mutex); 6138 return NULL; 6139 } 6140 6141 if (global_resets != 6142 atomic_read(&vha->vha_tgt.qla_tgt->tgt_global_resets_count)) { 6143 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf043, 6144 "qla_target(%d): global reset during session discovery " 6145 "(counter was %d, new %d), retrying", vha->vp_idx, 6146 global_resets, 6147 atomic_read(&vha->vha_tgt. 6148 qla_tgt->tgt_global_resets_count)); 6149 goto retry; 6150 } 6151 6152 sess = qlt_create_sess(vha, fcport, true); 6153 6154 mutex_unlock(&vha->vha_tgt.tgt_mutex); 6155 6156 return sess; 6157 } 6158 6159 static void qlt_abort_work(struct qla_tgt *tgt, 6160 struct qla_tgt_sess_work_param *prm) 6161 { 6162 struct scsi_qla_host *vha = tgt->vha; 6163 struct qla_hw_data *ha = vha->hw; 6164 struct fc_port *sess = NULL; 6165 unsigned long flags = 0, flags2 = 0; 6166 be_id_t s_id; 6167 int rc; 6168 6169 spin_lock_irqsave(&ha->tgt.sess_lock, flags2); 6170 6171 if (tgt->tgt_stop) 6172 goto out_term2; 6173 6174 s_id = le_id_to_be(prm->abts.fcp_hdr_le.s_id); 6175 6176 sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, s_id); 6177 if (!sess) { 6178 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags2); 6179 6180 sess = qlt_make_local_sess(vha, s_id); 6181 /* sess has got an extra creation ref */ 6182 6183 spin_lock_irqsave(&ha->tgt.sess_lock, flags2); 6184 if (!sess) 6185 goto out_term2; 6186 } else { 6187 if (sess->deleted) { 6188 sess = NULL; 6189 goto out_term2; 6190 } 6191 6192 if (!kref_get_unless_zero(&sess->sess_kref)) { 6193 ql_dbg(ql_dbg_tgt_tmr, vha, 0xf01c, 6194 "%s: kref_get fail %8phC \n", 6195 __func__, sess->port_name); 6196 sess = NULL; 6197 goto out_term2; 6198 } 6199 } 6200 6201 rc = __qlt_24xx_handle_abts(vha, &prm->abts, sess); 6202 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags2); 6203 6204 ha->tgt.tgt_ops->put_sess(sess); 6205 6206 if (rc != 0) 6207 goto out_term; 6208 return; 6209 6210 out_term2: 6211 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags2); 6212 6213 out_term: 6214 spin_lock_irqsave(&ha->hardware_lock, flags); 6215 qlt_24xx_send_abts_resp(ha->base_qpair, &prm->abts, 6216 FCP_TMF_REJECTED, false); 6217 spin_unlock_irqrestore(&ha->hardware_lock, flags); 6218 } 6219 6220 static void qlt_tmr_work(struct qla_tgt *tgt, 6221 struct qla_tgt_sess_work_param *prm) 6222 { 6223 struct atio_from_isp *a = &prm->tm_iocb2; 6224 struct scsi_qla_host *vha = tgt->vha; 6225 struct qla_hw_data *ha = vha->hw; 6226 struct fc_port *sess; 6227 unsigned long flags; 6228 be_id_t s_id; 6229 int rc; 6230 u64 unpacked_lun; 6231 int fn; 6232 void *iocb; 6233 6234 spin_lock_irqsave(&ha->tgt.sess_lock, flags); 6235 6236 if (tgt->tgt_stop) 6237 goto out_term2; 6238 6239 s_id = prm->tm_iocb2.u.isp24.fcp_hdr.s_id; 6240 sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, s_id); 6241 if (!sess) { 6242 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); 6243 6244 sess = qlt_make_local_sess(vha, s_id); 6245 /* sess has got an extra creation ref */ 6246 6247 spin_lock_irqsave(&ha->tgt.sess_lock, flags); 6248 if (!sess) 6249 goto out_term2; 6250 } else { 6251 if (sess->deleted) { 6252 goto out_term2; 6253 } 6254 6255 if (!kref_get_unless_zero(&sess->sess_kref)) { 6256 ql_dbg(ql_dbg_tgt_tmr, vha, 0xf020, 6257 "%s: kref_get fail %8phC\n", 6258 __func__, sess->port_name); 6259 goto out_term2; 6260 } 6261 } 6262 6263 iocb = a; 6264 fn = a->u.isp24.fcp_cmnd.task_mgmt_flags; 6265 unpacked_lun = 6266 scsilun_to_int((struct scsi_lun *)&a->u.isp24.fcp_cmnd.lun); 6267 6268 rc = qlt_issue_task_mgmt(sess, unpacked_lun, fn, iocb, 0); 6269 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); 6270 6271 ha->tgt.tgt_ops->put_sess(sess); 6272 6273 if (rc != 0) 6274 goto out_term; 6275 return; 6276 6277 out_term2: 6278 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); 6279 out_term: 6280 qlt_send_term_exchange(ha->base_qpair, NULL, &prm->tm_iocb2, 1, 0); 6281 } 6282 6283 static void qlt_sess_work_fn(struct work_struct *work) 6284 { 6285 struct qla_tgt *tgt = container_of(work, struct qla_tgt, sess_work); 6286 struct scsi_qla_host *vha = tgt->vha; 6287 unsigned long flags; 6288 6289 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf000, "Sess work (tgt %p)", tgt); 6290 6291 spin_lock_irqsave(&tgt->sess_work_lock, flags); 6292 while (!list_empty(&tgt->sess_works_list)) { 6293 struct qla_tgt_sess_work_param *prm = list_entry( 6294 tgt->sess_works_list.next, typeof(*prm), 6295 sess_works_list_entry); 6296 6297 /* 6298 * This work can be scheduled on several CPUs at time, so we 6299 * must delete the entry to eliminate double processing 6300 */ 6301 list_del(&prm->sess_works_list_entry); 6302 6303 spin_unlock_irqrestore(&tgt->sess_work_lock, flags); 6304 6305 switch (prm->type) { 6306 case QLA_TGT_SESS_WORK_ABORT: 6307 qlt_abort_work(tgt, prm); 6308 break; 6309 case QLA_TGT_SESS_WORK_TM: 6310 qlt_tmr_work(tgt, prm); 6311 break; 6312 default: 6313 BUG_ON(1); 6314 break; 6315 } 6316 6317 spin_lock_irqsave(&tgt->sess_work_lock, flags); 6318 6319 kfree(prm); 6320 } 6321 spin_unlock_irqrestore(&tgt->sess_work_lock, flags); 6322 } 6323 6324 /* Must be called under tgt_host_action_mutex */ 6325 int qlt_add_target(struct qla_hw_data *ha, struct scsi_qla_host *base_vha) 6326 { 6327 struct qla_tgt *tgt; 6328 int rc, i; 6329 struct qla_qpair_hint *h; 6330 6331 if (!QLA_TGT_MODE_ENABLED()) 6332 return 0; 6333 6334 if (!IS_TGT_MODE_CAPABLE(ha)) { 6335 ql_log(ql_log_warn, base_vha, 0xe070, 6336 "This adapter does not support target mode.\n"); 6337 return 0; 6338 } 6339 6340 ql_dbg(ql_dbg_tgt, base_vha, 0xe03b, 6341 "Registering target for host %ld(%p).\n", base_vha->host_no, ha); 6342 6343 BUG_ON(base_vha->vha_tgt.qla_tgt != NULL); 6344 6345 tgt = kzalloc(sizeof(struct qla_tgt), GFP_KERNEL); 6346 if (!tgt) { 6347 ql_dbg(ql_dbg_tgt, base_vha, 0xe066, 6348 "Unable to allocate struct qla_tgt\n"); 6349 return -ENOMEM; 6350 } 6351 6352 tgt->qphints = kcalloc(ha->max_qpairs + 1, 6353 sizeof(struct qla_qpair_hint), 6354 GFP_KERNEL); 6355 if (!tgt->qphints) { 6356 kfree(tgt); 6357 ql_log(ql_log_warn, base_vha, 0x0197, 6358 "Unable to allocate qpair hints.\n"); 6359 return -ENOMEM; 6360 } 6361 6362 if (!(base_vha->host->hostt->supported_mode & MODE_TARGET)) 6363 base_vha->host->hostt->supported_mode |= MODE_TARGET; 6364 6365 rc = btree_init64(&tgt->lun_qpair_map); 6366 if (rc) { 6367 kfree(tgt->qphints); 6368 kfree(tgt); 6369 ql_log(ql_log_info, base_vha, 0x0198, 6370 "Unable to initialize lun_qpair_map btree\n"); 6371 return -EIO; 6372 } 6373 h = &tgt->qphints[0]; 6374 h->qpair = ha->base_qpair; 6375 INIT_LIST_HEAD(&h->hint_elem); 6376 h->cpuid = ha->base_qpair->cpuid; 6377 list_add_tail(&h->hint_elem, &ha->base_qpair->hints_list); 6378 6379 for (i = 0; i < ha->max_qpairs; i++) { 6380 unsigned long flags; 6381 6382 struct qla_qpair *qpair = ha->queue_pair_map[i]; 6383 6384 h = &tgt->qphints[i + 1]; 6385 INIT_LIST_HEAD(&h->hint_elem); 6386 if (qpair) { 6387 h->qpair = qpair; 6388 spin_lock_irqsave(qpair->qp_lock_ptr, flags); 6389 list_add_tail(&h->hint_elem, &qpair->hints_list); 6390 spin_unlock_irqrestore(qpair->qp_lock_ptr, flags); 6391 h->cpuid = qpair->cpuid; 6392 } 6393 } 6394 6395 tgt->ha = ha; 6396 tgt->vha = base_vha; 6397 init_waitqueue_head(&tgt->waitQ); 6398 INIT_LIST_HEAD(&tgt->del_sess_list); 6399 spin_lock_init(&tgt->sess_work_lock); 6400 INIT_WORK(&tgt->sess_work, qlt_sess_work_fn); 6401 INIT_LIST_HEAD(&tgt->sess_works_list); 6402 atomic_set(&tgt->tgt_global_resets_count, 0); 6403 6404 base_vha->vha_tgt.qla_tgt = tgt; 6405 6406 ql_dbg(ql_dbg_tgt, base_vha, 0xe067, 6407 "qla_target(%d): using 64 Bit PCI addressing", 6408 base_vha->vp_idx); 6409 /* 3 is reserved */ 6410 tgt->sg_tablesize = QLA_TGT_MAX_SG_24XX(base_vha->req->length - 3); 6411 6412 mutex_lock(&qla_tgt_mutex); 6413 list_add_tail(&tgt->tgt_list_entry, &qla_tgt_glist); 6414 mutex_unlock(&qla_tgt_mutex); 6415 6416 if (ha->tgt.tgt_ops && ha->tgt.tgt_ops->add_target) 6417 ha->tgt.tgt_ops->add_target(base_vha); 6418 6419 return 0; 6420 } 6421 6422 /* Must be called under tgt_host_action_mutex */ 6423 int qlt_remove_target(struct qla_hw_data *ha, struct scsi_qla_host *vha) 6424 { 6425 if (!vha->vha_tgt.qla_tgt) 6426 return 0; 6427 6428 if (vha->fc_vport) { 6429 qlt_release(vha->vha_tgt.qla_tgt); 6430 return 0; 6431 } 6432 6433 /* free left over qfull cmds */ 6434 qlt_init_term_exchange(vha); 6435 6436 ql_dbg(ql_dbg_tgt, vha, 0xe03c, "Unregistering target for host %ld(%p)", 6437 vha->host_no, ha); 6438 qlt_release(vha->vha_tgt.qla_tgt); 6439 6440 return 0; 6441 } 6442 6443 void qlt_remove_target_resources(struct qla_hw_data *ha) 6444 { 6445 struct scsi_qla_host *node; 6446 u32 key = 0; 6447 6448 btree_for_each_safe32(&ha->tgt.host_map, key, node) 6449 btree_remove32(&ha->tgt.host_map, key); 6450 6451 btree_destroy32(&ha->tgt.host_map); 6452 } 6453 6454 static void qlt_lport_dump(struct scsi_qla_host *vha, u64 wwpn, 6455 unsigned char *b) 6456 { 6457 pr_debug("qla2xxx HW vha->node_name: %8phC\n", vha->node_name); 6458 pr_debug("qla2xxx HW vha->port_name: %8phC\n", vha->port_name); 6459 put_unaligned_be64(wwpn, b); 6460 pr_debug("qla2xxx passed configfs WWPN: %8phC\n", b); 6461 } 6462 6463 /** 6464 * qla_tgt_lport_register - register lport with external module 6465 * 6466 * @target_lport_ptr: pointer for tcm_qla2xxx specific lport data 6467 * @phys_wwpn: physical port WWPN 6468 * @npiv_wwpn: NPIV WWPN 6469 * @npiv_wwnn: NPIV WWNN 6470 * @callback: lport initialization callback for tcm_qla2xxx code 6471 */ 6472 int qlt_lport_register(void *target_lport_ptr, u64 phys_wwpn, 6473 u64 npiv_wwpn, u64 npiv_wwnn, 6474 int (*callback)(struct scsi_qla_host *, void *, u64, u64)) 6475 { 6476 struct qla_tgt *tgt; 6477 struct scsi_qla_host *vha; 6478 struct qla_hw_data *ha; 6479 struct Scsi_Host *host; 6480 unsigned long flags; 6481 int rc; 6482 u8 b[WWN_SIZE]; 6483 6484 mutex_lock(&qla_tgt_mutex); 6485 list_for_each_entry(tgt, &qla_tgt_glist, tgt_list_entry) { 6486 vha = tgt->vha; 6487 ha = vha->hw; 6488 6489 host = vha->host; 6490 if (!host) 6491 continue; 6492 6493 if (!(host->hostt->supported_mode & MODE_TARGET)) 6494 continue; 6495 6496 if (vha->qlini_mode == QLA2XXX_INI_MODE_ENABLED) 6497 continue; 6498 6499 spin_lock_irqsave(&ha->hardware_lock, flags); 6500 if ((!npiv_wwpn || !npiv_wwnn) && host->active_mode & MODE_TARGET) { 6501 pr_debug("MODE_TARGET already active on qla2xxx(%d)\n", 6502 host->host_no); 6503 spin_unlock_irqrestore(&ha->hardware_lock, flags); 6504 continue; 6505 } 6506 if (tgt->tgt_stop) { 6507 pr_debug("MODE_TARGET in shutdown on qla2xxx(%d)\n", 6508 host->host_no); 6509 spin_unlock_irqrestore(&ha->hardware_lock, flags); 6510 continue; 6511 } 6512 spin_unlock_irqrestore(&ha->hardware_lock, flags); 6513 6514 if (!scsi_host_get(host)) { 6515 ql_dbg(ql_dbg_tgt, vha, 0xe068, 6516 "Unable to scsi_host_get() for" 6517 " qla2xxx scsi_host\n"); 6518 continue; 6519 } 6520 qlt_lport_dump(vha, phys_wwpn, b); 6521 6522 if (memcmp(vha->port_name, b, WWN_SIZE)) { 6523 scsi_host_put(host); 6524 continue; 6525 } 6526 rc = (*callback)(vha, target_lport_ptr, npiv_wwpn, npiv_wwnn); 6527 if (rc != 0) 6528 scsi_host_put(host); 6529 6530 mutex_unlock(&qla_tgt_mutex); 6531 return rc; 6532 } 6533 mutex_unlock(&qla_tgt_mutex); 6534 6535 return -ENODEV; 6536 } 6537 EXPORT_SYMBOL(qlt_lport_register); 6538 6539 /** 6540 * qla_tgt_lport_deregister - Degister lport 6541 * 6542 * @vha: Registered scsi_qla_host pointer 6543 */ 6544 void qlt_lport_deregister(struct scsi_qla_host *vha) 6545 { 6546 struct qla_hw_data *ha = vha->hw; 6547 struct Scsi_Host *sh = vha->host; 6548 /* 6549 * Clear the target_lport_ptr qla_target_template pointer in qla_hw_data 6550 */ 6551 vha->vha_tgt.target_lport_ptr = NULL; 6552 ha->tgt.tgt_ops = NULL; 6553 /* 6554 * Release the Scsi_Host reference for the underlying qla2xxx host 6555 */ 6556 scsi_host_put(sh); 6557 } 6558 EXPORT_SYMBOL(qlt_lport_deregister); 6559 6560 /* Must be called under HW lock */ 6561 void qlt_set_mode(struct scsi_qla_host *vha) 6562 { 6563 switch (vha->qlini_mode) { 6564 case QLA2XXX_INI_MODE_DISABLED: 6565 case QLA2XXX_INI_MODE_EXCLUSIVE: 6566 vha->host->active_mode = MODE_TARGET; 6567 break; 6568 case QLA2XXX_INI_MODE_ENABLED: 6569 vha->host->active_mode = MODE_INITIATOR; 6570 break; 6571 case QLA2XXX_INI_MODE_DUAL: 6572 vha->host->active_mode = MODE_DUAL; 6573 break; 6574 default: 6575 break; 6576 } 6577 } 6578 6579 /* Must be called under HW lock */ 6580 static void qlt_clear_mode(struct scsi_qla_host *vha) 6581 { 6582 switch (vha->qlini_mode) { 6583 case QLA2XXX_INI_MODE_DISABLED: 6584 vha->host->active_mode = MODE_UNKNOWN; 6585 break; 6586 case QLA2XXX_INI_MODE_EXCLUSIVE: 6587 vha->host->active_mode = MODE_INITIATOR; 6588 break; 6589 case QLA2XXX_INI_MODE_ENABLED: 6590 case QLA2XXX_INI_MODE_DUAL: 6591 vha->host->active_mode = MODE_INITIATOR; 6592 break; 6593 default: 6594 break; 6595 } 6596 } 6597 6598 /* 6599 * qla_tgt_enable_vha - NO LOCK HELD 6600 * 6601 * host_reset, bring up w/ Target Mode Enabled 6602 */ 6603 void 6604 qlt_enable_vha(struct scsi_qla_host *vha) 6605 { 6606 struct qla_hw_data *ha = vha->hw; 6607 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; 6608 unsigned long flags; 6609 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev); 6610 6611 if (!tgt) { 6612 ql_dbg(ql_dbg_tgt, vha, 0xe069, 6613 "Unable to locate qla_tgt pointer from" 6614 " struct qla_hw_data\n"); 6615 dump_stack(); 6616 return; 6617 } 6618 if (vha->qlini_mode == QLA2XXX_INI_MODE_ENABLED) 6619 return; 6620 6621 if (ha->tgt.num_act_qpairs > ha->max_qpairs) 6622 ha->tgt.num_act_qpairs = ha->max_qpairs; 6623 spin_lock_irqsave(&ha->hardware_lock, flags); 6624 tgt->tgt_stopped = 0; 6625 qlt_set_mode(vha); 6626 spin_unlock_irqrestore(&ha->hardware_lock, flags); 6627 6628 mutex_lock(&ha->optrom_mutex); 6629 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf021, 6630 "%s.\n", __func__); 6631 if (vha->vp_idx) { 6632 qla24xx_disable_vp(vha); 6633 qla24xx_enable_vp(vha); 6634 } else { 6635 set_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags); 6636 qla2xxx_wake_dpc(base_vha); 6637 WARN_ON_ONCE(qla2x00_wait_for_hba_online(base_vha) != 6638 QLA_SUCCESS); 6639 } 6640 mutex_unlock(&ha->optrom_mutex); 6641 } 6642 EXPORT_SYMBOL(qlt_enable_vha); 6643 6644 /* 6645 * qla_tgt_disable_vha - NO LOCK HELD 6646 * 6647 * Disable Target Mode and reset the adapter 6648 */ 6649 static void qlt_disable_vha(struct scsi_qla_host *vha) 6650 { 6651 struct qla_hw_data *ha = vha->hw; 6652 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; 6653 unsigned long flags; 6654 6655 if (!tgt) { 6656 ql_dbg(ql_dbg_tgt, vha, 0xe06a, 6657 "Unable to locate qla_tgt pointer from" 6658 " struct qla_hw_data\n"); 6659 dump_stack(); 6660 return; 6661 } 6662 6663 spin_lock_irqsave(&ha->hardware_lock, flags); 6664 qlt_clear_mode(vha); 6665 spin_unlock_irqrestore(&ha->hardware_lock, flags); 6666 6667 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 6668 qla2xxx_wake_dpc(vha); 6669 if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) 6670 ql_dbg(ql_dbg_tgt, vha, 0xe081, 6671 "qla2x00_wait_for_hba_online() failed\n"); 6672 } 6673 6674 /* 6675 * Called from qla_init.c:qla24xx_vport_create() contex to setup 6676 * the target mode specific struct scsi_qla_host and struct qla_hw_data 6677 * members. 6678 */ 6679 void 6680 qlt_vport_create(struct scsi_qla_host *vha, struct qla_hw_data *ha) 6681 { 6682 vha->vha_tgt.qla_tgt = NULL; 6683 6684 mutex_init(&vha->vha_tgt.tgt_mutex); 6685 mutex_init(&vha->vha_tgt.tgt_host_action_mutex); 6686 6687 qlt_clear_mode(vha); 6688 6689 /* 6690 * NOTE: Currently the value is kept the same for <24xx and 6691 * >=24xx ISPs. If it is necessary to change it, 6692 * the check should be added for specific ISPs, 6693 * assigning the value appropriately. 6694 */ 6695 ha->tgt.atio_q_length = ATIO_ENTRY_CNT_24XX; 6696 6697 qlt_add_target(ha, vha); 6698 } 6699 6700 u8 6701 qlt_rff_id(struct scsi_qla_host *vha) 6702 { 6703 u8 fc4_feature = 0; 6704 /* 6705 * FC-4 Feature bit 0 indicates target functionality to the name server. 6706 */ 6707 if (qla_tgt_mode_enabled(vha)) { 6708 fc4_feature = BIT_0; 6709 } else if (qla_ini_mode_enabled(vha)) { 6710 fc4_feature = BIT_1; 6711 } else if (qla_dual_mode_enabled(vha)) 6712 fc4_feature = BIT_0 | BIT_1; 6713 6714 return fc4_feature; 6715 } 6716 6717 /* 6718 * qlt_init_atio_q_entries() - Initializes ATIO queue entries. 6719 * @ha: HA context 6720 * 6721 * Beginning of ATIO ring has initialization control block already built 6722 * by nvram config routine. 6723 * 6724 * Returns 0 on success. 6725 */ 6726 void 6727 qlt_init_atio_q_entries(struct scsi_qla_host *vha) 6728 { 6729 struct qla_hw_data *ha = vha->hw; 6730 uint16_t cnt; 6731 struct atio_from_isp *pkt = (struct atio_from_isp *)ha->tgt.atio_ring; 6732 6733 if (qla_ini_mode_enabled(vha)) 6734 return; 6735 6736 for (cnt = 0; cnt < ha->tgt.atio_q_length; cnt++) { 6737 pkt->u.raw.signature = ATIO_PROCESSED; 6738 pkt++; 6739 } 6740 6741 } 6742 6743 /* 6744 * qlt_24xx_process_atio_queue() - Process ATIO queue entries. 6745 * @ha: SCSI driver HA context 6746 */ 6747 void 6748 qlt_24xx_process_atio_queue(struct scsi_qla_host *vha, uint8_t ha_locked) 6749 { 6750 struct qla_hw_data *ha = vha->hw; 6751 struct atio_from_isp *pkt; 6752 int cnt, i; 6753 6754 if (!ha->flags.fw_started) 6755 return; 6756 6757 while ((ha->tgt.atio_ring_ptr->signature != ATIO_PROCESSED) || 6758 fcpcmd_is_corrupted(ha->tgt.atio_ring_ptr)) { 6759 pkt = (struct atio_from_isp *)ha->tgt.atio_ring_ptr; 6760 cnt = pkt->u.raw.entry_count; 6761 6762 if (unlikely(fcpcmd_is_corrupted(ha->tgt.atio_ring_ptr))) { 6763 /* 6764 * This packet is corrupted. The header + payload 6765 * can not be trusted. There is no point in passing 6766 * it further up. 6767 */ 6768 ql_log(ql_log_warn, vha, 0xd03c, 6769 "corrupted fcp frame SID[%3phN] OXID[%04x] EXCG[%x] %64phN\n", 6770 &pkt->u.isp24.fcp_hdr.s_id, 6771 be16_to_cpu(pkt->u.isp24.fcp_hdr.ox_id), 6772 le32_to_cpu(pkt->u.isp24.exchange_addr), pkt); 6773 6774 adjust_corrupted_atio(pkt); 6775 qlt_send_term_exchange(ha->base_qpair, NULL, pkt, 6776 ha_locked, 0); 6777 } else { 6778 qlt_24xx_atio_pkt_all_vps(vha, 6779 (struct atio_from_isp *)pkt, ha_locked); 6780 } 6781 6782 for (i = 0; i < cnt; i++) { 6783 ha->tgt.atio_ring_index++; 6784 if (ha->tgt.atio_ring_index == ha->tgt.atio_q_length) { 6785 ha->tgt.atio_ring_index = 0; 6786 ha->tgt.atio_ring_ptr = ha->tgt.atio_ring; 6787 } else 6788 ha->tgt.atio_ring_ptr++; 6789 6790 pkt->u.raw.signature = ATIO_PROCESSED; 6791 pkt = (struct atio_from_isp *)ha->tgt.atio_ring_ptr; 6792 } 6793 wmb(); 6794 } 6795 6796 /* Adjust ring index */ 6797 WRT_REG_DWORD(ISP_ATIO_Q_OUT(vha), ha->tgt.atio_ring_index); 6798 } 6799 6800 void 6801 qlt_24xx_config_rings(struct scsi_qla_host *vha) 6802 { 6803 struct qla_hw_data *ha = vha->hw; 6804 struct qla_msix_entry *msix = &ha->msix_entries[2]; 6805 struct init_cb_24xx *icb = (struct init_cb_24xx *)ha->init_cb; 6806 6807 if (!QLA_TGT_MODE_ENABLED()) 6808 return; 6809 6810 WRT_REG_DWORD(ISP_ATIO_Q_IN(vha), 0); 6811 WRT_REG_DWORD(ISP_ATIO_Q_OUT(vha), 0); 6812 RD_REG_DWORD(ISP_ATIO_Q_OUT(vha)); 6813 6814 if (ha->flags.msix_enabled) { 6815 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) { 6816 if (IS_QLA2071(ha)) { 6817 /* 4 ports Baker: Enable Interrupt Handshake */ 6818 icb->msix_atio = 0; 6819 icb->firmware_options_2 |= BIT_26; 6820 } else { 6821 icb->msix_atio = cpu_to_le16(msix->entry); 6822 icb->firmware_options_2 &= ~BIT_26; 6823 } 6824 ql_dbg(ql_dbg_init, vha, 0xf072, 6825 "Registering ICB vector 0x%x for atio que.\n", 6826 msix->entry); 6827 } 6828 } else { 6829 /* INTx|MSI */ 6830 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) { 6831 icb->msix_atio = 0; 6832 icb->firmware_options_2 |= BIT_26; 6833 ql_dbg(ql_dbg_init, vha, 0xf072, 6834 "%s: Use INTx for ATIOQ.\n", __func__); 6835 } 6836 } 6837 } 6838 6839 void 6840 qlt_24xx_config_nvram_stage1(struct scsi_qla_host *vha, struct nvram_24xx *nv) 6841 { 6842 struct qla_hw_data *ha = vha->hw; 6843 u32 tmp; 6844 6845 if (!QLA_TGT_MODE_ENABLED()) 6846 return; 6847 6848 if (qla_tgt_mode_enabled(vha) || qla_dual_mode_enabled(vha)) { 6849 if (!ha->tgt.saved_set) { 6850 /* We save only once */ 6851 ha->tgt.saved_exchange_count = nv->exchange_count; 6852 ha->tgt.saved_firmware_options_1 = 6853 nv->firmware_options_1; 6854 ha->tgt.saved_firmware_options_2 = 6855 nv->firmware_options_2; 6856 ha->tgt.saved_firmware_options_3 = 6857 nv->firmware_options_3; 6858 ha->tgt.saved_set = 1; 6859 } 6860 6861 if (qla_tgt_mode_enabled(vha)) 6862 nv->exchange_count = cpu_to_le16(0xFFFF); 6863 else /* dual */ 6864 nv->exchange_count = cpu_to_le16(vha->ql2xexchoffld); 6865 6866 /* Enable target mode */ 6867 nv->firmware_options_1 |= cpu_to_le32(BIT_4); 6868 6869 /* Disable ini mode, if requested */ 6870 if (qla_tgt_mode_enabled(vha)) 6871 nv->firmware_options_1 |= cpu_to_le32(BIT_5); 6872 6873 /* Disable Full Login after LIP */ 6874 nv->firmware_options_1 &= cpu_to_le32(~BIT_13); 6875 /* Enable initial LIP */ 6876 nv->firmware_options_1 &= cpu_to_le32(~BIT_9); 6877 if (ql2xtgt_tape_enable) 6878 /* Enable FC Tape support */ 6879 nv->firmware_options_2 |= cpu_to_le32(BIT_12); 6880 else 6881 /* Disable FC Tape support */ 6882 nv->firmware_options_2 &= cpu_to_le32(~BIT_12); 6883 6884 /* Disable Full Login after LIP */ 6885 nv->host_p &= cpu_to_le32(~BIT_10); 6886 6887 /* 6888 * clear BIT 15 explicitly as we have seen at least 6889 * a couple of instances where this was set and this 6890 * was causing the firmware to not be initialized. 6891 */ 6892 nv->firmware_options_1 &= cpu_to_le32(~BIT_15); 6893 /* Enable target PRLI control */ 6894 nv->firmware_options_2 |= cpu_to_le32(BIT_14); 6895 6896 if (IS_QLA25XX(ha)) { 6897 /* Change Loop-prefer to Pt-Pt */ 6898 tmp = ~(BIT_4|BIT_5|BIT_6); 6899 nv->firmware_options_2 &= cpu_to_le32(tmp); 6900 tmp = P2P << 4; 6901 nv->firmware_options_2 |= cpu_to_le32(tmp); 6902 } 6903 } else { 6904 if (ha->tgt.saved_set) { 6905 nv->exchange_count = ha->tgt.saved_exchange_count; 6906 nv->firmware_options_1 = 6907 ha->tgt.saved_firmware_options_1; 6908 nv->firmware_options_2 = 6909 ha->tgt.saved_firmware_options_2; 6910 nv->firmware_options_3 = 6911 ha->tgt.saved_firmware_options_3; 6912 } 6913 return; 6914 } 6915 6916 if (ha->base_qpair->enable_class_2) { 6917 if (vha->flags.init_done) 6918 fc_host_supported_classes(vha->host) = 6919 FC_COS_CLASS2 | FC_COS_CLASS3; 6920 6921 nv->firmware_options_2 |= cpu_to_le32(BIT_8); 6922 } else { 6923 if (vha->flags.init_done) 6924 fc_host_supported_classes(vha->host) = FC_COS_CLASS3; 6925 6926 nv->firmware_options_2 &= ~cpu_to_le32(BIT_8); 6927 } 6928 } 6929 6930 void 6931 qlt_24xx_config_nvram_stage2(struct scsi_qla_host *vha, 6932 struct init_cb_24xx *icb) 6933 { 6934 struct qla_hw_data *ha = vha->hw; 6935 6936 if (!QLA_TGT_MODE_ENABLED()) 6937 return; 6938 6939 if (ha->tgt.node_name_set) { 6940 memcpy(icb->node_name, ha->tgt.tgt_node_name, WWN_SIZE); 6941 icb->firmware_options_1 |= cpu_to_le32(BIT_14); 6942 } 6943 } 6944 6945 void 6946 qlt_81xx_config_nvram_stage1(struct scsi_qla_host *vha, struct nvram_81xx *nv) 6947 { 6948 struct qla_hw_data *ha = vha->hw; 6949 u32 tmp; 6950 6951 if (!QLA_TGT_MODE_ENABLED()) 6952 return; 6953 6954 if (qla_tgt_mode_enabled(vha) || qla_dual_mode_enabled(vha)) { 6955 if (!ha->tgt.saved_set) { 6956 /* We save only once */ 6957 ha->tgt.saved_exchange_count = nv->exchange_count; 6958 ha->tgt.saved_firmware_options_1 = 6959 nv->firmware_options_1; 6960 ha->tgt.saved_firmware_options_2 = 6961 nv->firmware_options_2; 6962 ha->tgt.saved_firmware_options_3 = 6963 nv->firmware_options_3; 6964 ha->tgt.saved_set = 1; 6965 } 6966 6967 if (qla_tgt_mode_enabled(vha)) 6968 nv->exchange_count = cpu_to_le16(0xFFFF); 6969 else /* dual */ 6970 nv->exchange_count = cpu_to_le16(vha->ql2xexchoffld); 6971 6972 /* Enable target mode */ 6973 nv->firmware_options_1 |= cpu_to_le32(BIT_4); 6974 6975 /* Disable ini mode, if requested */ 6976 if (qla_tgt_mode_enabled(vha)) 6977 nv->firmware_options_1 |= cpu_to_le32(BIT_5); 6978 /* Disable Full Login after LIP */ 6979 nv->firmware_options_1 &= cpu_to_le32(~BIT_13); 6980 /* Enable initial LIP */ 6981 nv->firmware_options_1 &= cpu_to_le32(~BIT_9); 6982 /* 6983 * clear BIT 15 explicitly as we have seen at 6984 * least a couple of instances where this was set 6985 * and this was causing the firmware to not be 6986 * initialized. 6987 */ 6988 nv->firmware_options_1 &= cpu_to_le32(~BIT_15); 6989 if (ql2xtgt_tape_enable) 6990 /* Enable FC tape support */ 6991 nv->firmware_options_2 |= cpu_to_le32(BIT_12); 6992 else 6993 /* Disable FC tape support */ 6994 nv->firmware_options_2 &= cpu_to_le32(~BIT_12); 6995 6996 /* Disable Full Login after LIP */ 6997 nv->host_p &= cpu_to_le32(~BIT_10); 6998 /* Enable target PRLI control */ 6999 nv->firmware_options_2 |= cpu_to_le32(BIT_14); 7000 7001 /* Change Loop-prefer to Pt-Pt */ 7002 tmp = ~(BIT_4|BIT_5|BIT_6); 7003 nv->firmware_options_2 &= cpu_to_le32(tmp); 7004 tmp = P2P << 4; 7005 nv->firmware_options_2 |= cpu_to_le32(tmp); 7006 } else { 7007 if (ha->tgt.saved_set) { 7008 nv->exchange_count = ha->tgt.saved_exchange_count; 7009 nv->firmware_options_1 = 7010 ha->tgt.saved_firmware_options_1; 7011 nv->firmware_options_2 = 7012 ha->tgt.saved_firmware_options_2; 7013 nv->firmware_options_3 = 7014 ha->tgt.saved_firmware_options_3; 7015 } 7016 return; 7017 } 7018 7019 if (ha->base_qpair->enable_class_2) { 7020 if (vha->flags.init_done) 7021 fc_host_supported_classes(vha->host) = 7022 FC_COS_CLASS2 | FC_COS_CLASS3; 7023 7024 nv->firmware_options_2 |= cpu_to_le32(BIT_8); 7025 } else { 7026 if (vha->flags.init_done) 7027 fc_host_supported_classes(vha->host) = FC_COS_CLASS3; 7028 7029 nv->firmware_options_2 &= ~cpu_to_le32(BIT_8); 7030 } 7031 } 7032 7033 void 7034 qlt_81xx_config_nvram_stage2(struct scsi_qla_host *vha, 7035 struct init_cb_81xx *icb) 7036 { 7037 struct qla_hw_data *ha = vha->hw; 7038 7039 if (!QLA_TGT_MODE_ENABLED()) 7040 return; 7041 7042 if (ha->tgt.node_name_set) { 7043 memcpy(icb->node_name, ha->tgt.tgt_node_name, WWN_SIZE); 7044 icb->firmware_options_1 |= cpu_to_le32(BIT_14); 7045 } 7046 } 7047 7048 void 7049 qlt_83xx_iospace_config(struct qla_hw_data *ha) 7050 { 7051 if (!QLA_TGT_MODE_ENABLED()) 7052 return; 7053 7054 ha->msix_count += 1; /* For ATIO Q */ 7055 } 7056 7057 7058 void 7059 qlt_modify_vp_config(struct scsi_qla_host *vha, 7060 struct vp_config_entry_24xx *vpmod) 7061 { 7062 /* enable target mode. Bit5 = 1 => disable */ 7063 if (qla_tgt_mode_enabled(vha) || qla_dual_mode_enabled(vha)) 7064 vpmod->options_idx1 &= ~BIT_5; 7065 7066 /* Disable ini mode, if requested. bit4 = 1 => disable */ 7067 if (qla_tgt_mode_enabled(vha)) 7068 vpmod->options_idx1 &= ~BIT_4; 7069 } 7070 7071 void 7072 qlt_probe_one_stage1(struct scsi_qla_host *base_vha, struct qla_hw_data *ha) 7073 { 7074 int rc; 7075 7076 if (!QLA_TGT_MODE_ENABLED()) 7077 return; 7078 7079 if ((ql2xenablemsix == 0) || IS_QLA83XX(ha) || IS_QLA27XX(ha) || 7080 IS_QLA28XX(ha)) { 7081 ISP_ATIO_Q_IN(base_vha) = &ha->mqiobase->isp25mq.atio_q_in; 7082 ISP_ATIO_Q_OUT(base_vha) = &ha->mqiobase->isp25mq.atio_q_out; 7083 } else { 7084 ISP_ATIO_Q_IN(base_vha) = &ha->iobase->isp24.atio_q_in; 7085 ISP_ATIO_Q_OUT(base_vha) = &ha->iobase->isp24.atio_q_out; 7086 } 7087 7088 mutex_init(&base_vha->vha_tgt.tgt_mutex); 7089 mutex_init(&base_vha->vha_tgt.tgt_host_action_mutex); 7090 7091 INIT_LIST_HEAD(&base_vha->unknown_atio_list); 7092 INIT_DELAYED_WORK(&base_vha->unknown_atio_work, 7093 qlt_unknown_atio_work_fn); 7094 7095 qlt_clear_mode(base_vha); 7096 7097 rc = btree_init32(&ha->tgt.host_map); 7098 if (rc) 7099 ql_log(ql_log_info, base_vha, 0xd03d, 7100 "Unable to initialize ha->host_map btree\n"); 7101 7102 qlt_update_vp_map(base_vha, SET_VP_IDX); 7103 } 7104 7105 irqreturn_t 7106 qla83xx_msix_atio_q(int irq, void *dev_id) 7107 { 7108 struct rsp_que *rsp; 7109 scsi_qla_host_t *vha; 7110 struct qla_hw_data *ha; 7111 unsigned long flags; 7112 7113 rsp = (struct rsp_que *) dev_id; 7114 ha = rsp->hw; 7115 vha = pci_get_drvdata(ha->pdev); 7116 7117 spin_lock_irqsave(&ha->tgt.atio_lock, flags); 7118 7119 qlt_24xx_process_atio_queue(vha, 0); 7120 7121 spin_unlock_irqrestore(&ha->tgt.atio_lock, flags); 7122 7123 return IRQ_HANDLED; 7124 } 7125 7126 static void 7127 qlt_handle_abts_recv_work(struct work_struct *work) 7128 { 7129 struct qla_tgt_sess_op *op = container_of(work, 7130 struct qla_tgt_sess_op, work); 7131 scsi_qla_host_t *vha = op->vha; 7132 struct qla_hw_data *ha = vha->hw; 7133 unsigned long flags; 7134 7135 if (qla2x00_reset_active(vha) || 7136 (op->chip_reset != ha->base_qpair->chip_reset)) 7137 return; 7138 7139 spin_lock_irqsave(&ha->tgt.atio_lock, flags); 7140 qlt_24xx_process_atio_queue(vha, 0); 7141 spin_unlock_irqrestore(&ha->tgt.atio_lock, flags); 7142 7143 spin_lock_irqsave(&ha->hardware_lock, flags); 7144 qlt_response_pkt_all_vps(vha, op->rsp, (response_t *)&op->atio); 7145 spin_unlock_irqrestore(&ha->hardware_lock, flags); 7146 7147 kfree(op); 7148 } 7149 7150 void 7151 qlt_handle_abts_recv(struct scsi_qla_host *vha, struct rsp_que *rsp, 7152 response_t *pkt) 7153 { 7154 struct qla_tgt_sess_op *op; 7155 7156 op = kzalloc(sizeof(*op), GFP_ATOMIC); 7157 7158 if (!op) { 7159 /* do not reach for ATIO queue here. This is best effort err 7160 * recovery at this point. 7161 */ 7162 qlt_response_pkt_all_vps(vha, rsp, pkt); 7163 return; 7164 } 7165 7166 memcpy(&op->atio, pkt, sizeof(*pkt)); 7167 op->vha = vha; 7168 op->chip_reset = vha->hw->base_qpair->chip_reset; 7169 op->rsp = rsp; 7170 INIT_WORK(&op->work, qlt_handle_abts_recv_work); 7171 queue_work(qla_tgt_wq, &op->work); 7172 return; 7173 } 7174 7175 int 7176 qlt_mem_alloc(struct qla_hw_data *ha) 7177 { 7178 if (!QLA_TGT_MODE_ENABLED()) 7179 return 0; 7180 7181 ha->tgt.tgt_vp_map = kcalloc(MAX_MULTI_ID_FABRIC, 7182 sizeof(struct qla_tgt_vp_map), 7183 GFP_KERNEL); 7184 if (!ha->tgt.tgt_vp_map) 7185 return -ENOMEM; 7186 7187 ha->tgt.atio_ring = dma_alloc_coherent(&ha->pdev->dev, 7188 (ha->tgt.atio_q_length + 1) * sizeof(struct atio_from_isp), 7189 &ha->tgt.atio_dma, GFP_KERNEL); 7190 if (!ha->tgt.atio_ring) { 7191 kfree(ha->tgt.tgt_vp_map); 7192 return -ENOMEM; 7193 } 7194 return 0; 7195 } 7196 7197 void 7198 qlt_mem_free(struct qla_hw_data *ha) 7199 { 7200 if (!QLA_TGT_MODE_ENABLED()) 7201 return; 7202 7203 if (ha->tgt.atio_ring) { 7204 dma_free_coherent(&ha->pdev->dev, (ha->tgt.atio_q_length + 1) * 7205 sizeof(struct atio_from_isp), ha->tgt.atio_ring, 7206 ha->tgt.atio_dma); 7207 } 7208 ha->tgt.atio_ring = NULL; 7209 ha->tgt.atio_dma = 0; 7210 kfree(ha->tgt.tgt_vp_map); 7211 ha->tgt.tgt_vp_map = NULL; 7212 } 7213 7214 /* vport_slock to be held by the caller */ 7215 void 7216 qlt_update_vp_map(struct scsi_qla_host *vha, int cmd) 7217 { 7218 void *slot; 7219 u32 key; 7220 int rc; 7221 7222 if (!QLA_TGT_MODE_ENABLED()) 7223 return; 7224 7225 key = vha->d_id.b24; 7226 7227 switch (cmd) { 7228 case SET_VP_IDX: 7229 vha->hw->tgt.tgt_vp_map[vha->vp_idx].vha = vha; 7230 break; 7231 case SET_AL_PA: 7232 slot = btree_lookup32(&vha->hw->tgt.host_map, key); 7233 if (!slot) { 7234 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf018, 7235 "Save vha in host_map %p %06x\n", vha, key); 7236 rc = btree_insert32(&vha->hw->tgt.host_map, 7237 key, vha, GFP_ATOMIC); 7238 if (rc) 7239 ql_log(ql_log_info, vha, 0xd03e, 7240 "Unable to insert s_id into host_map: %06x\n", 7241 key); 7242 return; 7243 } 7244 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf019, 7245 "replace existing vha in host_map %p %06x\n", vha, key); 7246 btree_update32(&vha->hw->tgt.host_map, key, vha); 7247 break; 7248 case RESET_VP_IDX: 7249 vha->hw->tgt.tgt_vp_map[vha->vp_idx].vha = NULL; 7250 break; 7251 case RESET_AL_PA: 7252 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01a, 7253 "clear vha in host_map %p %06x\n", vha, key); 7254 slot = btree_lookup32(&vha->hw->tgt.host_map, key); 7255 if (slot) 7256 btree_remove32(&vha->hw->tgt.host_map, key); 7257 vha->d_id.b24 = 0; 7258 break; 7259 } 7260 } 7261 7262 void qlt_update_host_map(struct scsi_qla_host *vha, port_id_t id) 7263 { 7264 7265 if (!vha->d_id.b24) { 7266 vha->d_id = id; 7267 qlt_update_vp_map(vha, SET_AL_PA); 7268 } else if (vha->d_id.b24 != id.b24) { 7269 qlt_update_vp_map(vha, RESET_AL_PA); 7270 vha->d_id = id; 7271 qlt_update_vp_map(vha, SET_AL_PA); 7272 } 7273 } 7274 7275 static int __init qlt_parse_ini_mode(void) 7276 { 7277 if (strcasecmp(qlini_mode, QLA2XXX_INI_MODE_STR_EXCLUSIVE) == 0) 7278 ql2x_ini_mode = QLA2XXX_INI_MODE_EXCLUSIVE; 7279 else if (strcasecmp(qlini_mode, QLA2XXX_INI_MODE_STR_DISABLED) == 0) 7280 ql2x_ini_mode = QLA2XXX_INI_MODE_DISABLED; 7281 else if (strcasecmp(qlini_mode, QLA2XXX_INI_MODE_STR_ENABLED) == 0) 7282 ql2x_ini_mode = QLA2XXX_INI_MODE_ENABLED; 7283 else if (strcasecmp(qlini_mode, QLA2XXX_INI_MODE_STR_DUAL) == 0) 7284 ql2x_ini_mode = QLA2XXX_INI_MODE_DUAL; 7285 else 7286 return false; 7287 7288 return true; 7289 } 7290 7291 int __init qlt_init(void) 7292 { 7293 int ret; 7294 7295 BUILD_BUG_ON(sizeof(struct ctio7_to_24xx) != 64); 7296 BUILD_BUG_ON(sizeof(struct ctio_to_2xxx) != 64); 7297 7298 if (!qlt_parse_ini_mode()) { 7299 ql_log(ql_log_fatal, NULL, 0xe06b, 7300 "qlt_parse_ini_mode() failed\n"); 7301 return -EINVAL; 7302 } 7303 7304 if (!QLA_TGT_MODE_ENABLED()) 7305 return 0; 7306 7307 qla_tgt_mgmt_cmd_cachep = kmem_cache_create("qla_tgt_mgmt_cmd_cachep", 7308 sizeof(struct qla_tgt_mgmt_cmd), __alignof__(struct 7309 qla_tgt_mgmt_cmd), 0, NULL); 7310 if (!qla_tgt_mgmt_cmd_cachep) { 7311 ql_log(ql_log_fatal, NULL, 0xd04b, 7312 "kmem_cache_create for qla_tgt_mgmt_cmd_cachep failed\n"); 7313 return -ENOMEM; 7314 } 7315 7316 qla_tgt_plogi_cachep = kmem_cache_create("qla_tgt_plogi_cachep", 7317 sizeof(struct qlt_plogi_ack_t), __alignof__(struct qlt_plogi_ack_t), 7318 0, NULL); 7319 7320 if (!qla_tgt_plogi_cachep) { 7321 ql_log(ql_log_fatal, NULL, 0xe06d, 7322 "kmem_cache_create for qla_tgt_plogi_cachep failed\n"); 7323 ret = -ENOMEM; 7324 goto out_mgmt_cmd_cachep; 7325 } 7326 7327 qla_tgt_mgmt_cmd_mempool = mempool_create(25, mempool_alloc_slab, 7328 mempool_free_slab, qla_tgt_mgmt_cmd_cachep); 7329 if (!qla_tgt_mgmt_cmd_mempool) { 7330 ql_log(ql_log_fatal, NULL, 0xe06e, 7331 "mempool_create for qla_tgt_mgmt_cmd_mempool failed\n"); 7332 ret = -ENOMEM; 7333 goto out_plogi_cachep; 7334 } 7335 7336 qla_tgt_wq = alloc_workqueue("qla_tgt_wq", 0, 0); 7337 if (!qla_tgt_wq) { 7338 ql_log(ql_log_fatal, NULL, 0xe06f, 7339 "alloc_workqueue for qla_tgt_wq failed\n"); 7340 ret = -ENOMEM; 7341 goto out_cmd_mempool; 7342 } 7343 /* 7344 * Return 1 to signal that initiator-mode is being disabled 7345 */ 7346 return (ql2x_ini_mode == QLA2XXX_INI_MODE_DISABLED) ? 1 : 0; 7347 7348 out_cmd_mempool: 7349 mempool_destroy(qla_tgt_mgmt_cmd_mempool); 7350 out_plogi_cachep: 7351 kmem_cache_destroy(qla_tgt_plogi_cachep); 7352 out_mgmt_cmd_cachep: 7353 kmem_cache_destroy(qla_tgt_mgmt_cmd_cachep); 7354 return ret; 7355 } 7356 7357 void qlt_exit(void) 7358 { 7359 if (!QLA_TGT_MODE_ENABLED()) 7360 return; 7361 7362 destroy_workqueue(qla_tgt_wq); 7363 mempool_destroy(qla_tgt_mgmt_cmd_mempool); 7364 kmem_cache_destroy(qla_tgt_plogi_cachep); 7365 kmem_cache_destroy(qla_tgt_mgmt_cmd_cachep); 7366 } 7367