1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * qla_target.c SCSI LLD infrastructure for QLogic 22xx/23xx/24xx/25xx 4 * 5 * based on qla2x00t.c code: 6 * 7 * Copyright (C) 2004 - 2010 Vladislav Bolkhovitin <vst@vlnb.net> 8 * Copyright (C) 2004 - 2005 Leonid Stoljar 9 * Copyright (C) 2006 Nathaniel Clark <nate@misrule.us> 10 * Copyright (C) 2006 - 2010 ID7 Ltd. 11 * 12 * Forward port and refactoring to modern qla2xxx and target/configfs 13 * 14 * Copyright (C) 2010-2013 Nicholas A. Bellinger <nab@kernel.org> 15 */ 16 17 #include <linux/module.h> 18 #include <linux/init.h> 19 #include <linux/types.h> 20 #include <linux/blkdev.h> 21 #include <linux/interrupt.h> 22 #include <linux/pci.h> 23 #include <linux/delay.h> 24 #include <linux/list.h> 25 #include <linux/workqueue.h> 26 #include <asm/unaligned.h> 27 #include <scsi/scsi.h> 28 #include <scsi/scsi_host.h> 29 #include <scsi/scsi_tcq.h> 30 31 #include "qla_def.h" 32 #include "qla_target.h" 33 34 static int ql2xtgt_tape_enable; 35 module_param(ql2xtgt_tape_enable, int, S_IRUGO|S_IWUSR); 36 MODULE_PARM_DESC(ql2xtgt_tape_enable, 37 "Enables Sequence level error recovery (aka FC Tape). Default is 0 - no SLER. 1 - Enable SLER."); 38 39 static char *qlini_mode = QLA2XXX_INI_MODE_STR_ENABLED; 40 module_param(qlini_mode, charp, S_IRUGO); 41 MODULE_PARM_DESC(qlini_mode, 42 "Determines when initiator mode will be enabled. Possible values: " 43 "\"exclusive\" - initiator mode will be enabled on load, " 44 "disabled on enabling target mode and then on disabling target mode " 45 "enabled back; " 46 "\"disabled\" - initiator mode will never be enabled; " 47 "\"dual\" - Initiator Modes will be enabled. Target Mode can be activated " 48 "when ready " 49 "\"enabled\" (default) - initiator mode will always stay enabled."); 50 51 int ql2xuctrlirq = 1; 52 module_param(ql2xuctrlirq, int, 0644); 53 MODULE_PARM_DESC(ql2xuctrlirq, 54 "User to control IRQ placement via smp_affinity." 55 "Valid with qlini_mode=disabled." 56 "1(default): enable"); 57 58 int ql2x_ini_mode = QLA2XXX_INI_MODE_EXCLUSIVE; 59 60 static int qla_sam_status = SAM_STAT_BUSY; 61 static int tc_sam_status = SAM_STAT_TASK_SET_FULL; /* target core */ 62 63 /* 64 * From scsi/fc/fc_fcp.h 65 */ 66 enum fcp_resp_rsp_codes { 67 FCP_TMF_CMPL = 0, 68 FCP_DATA_LEN_INVALID = 1, 69 FCP_CMND_FIELDS_INVALID = 2, 70 FCP_DATA_PARAM_MISMATCH = 3, 71 FCP_TMF_REJECTED = 4, 72 FCP_TMF_FAILED = 5, 73 FCP_TMF_INVALID_LUN = 9, 74 }; 75 76 /* 77 * fc_pri_ta from scsi/fc/fc_fcp.h 78 */ 79 #define FCP_PTA_SIMPLE 0 /* simple task attribute */ 80 #define FCP_PTA_HEADQ 1 /* head of queue task attribute */ 81 #define FCP_PTA_ORDERED 2 /* ordered task attribute */ 82 #define FCP_PTA_ACA 4 /* auto. contingent allegiance */ 83 #define FCP_PTA_MASK 7 /* mask for task attribute field */ 84 #define FCP_PRI_SHIFT 3 /* priority field starts in bit 3 */ 85 #define FCP_PRI_RESVD_MASK 0x80 /* reserved bits in priority field */ 86 87 /* 88 * This driver calls qla2x00_alloc_iocbs() and qla2x00_issue_marker(), which 89 * must be called under HW lock and could unlock/lock it inside. 90 * It isn't an issue, since in the current implementation on the time when 91 * those functions are called: 92 * 93 * - Either context is IRQ and only IRQ handler can modify HW data, 94 * including rings related fields, 95 * 96 * - Or access to target mode variables from struct qla_tgt doesn't 97 * cross those functions boundaries, except tgt_stop, which 98 * additionally protected by irq_cmd_count. 99 */ 100 /* Predefs for callbacks handed to qla2xxx LLD */ 101 static void qlt_24xx_atio_pkt(struct scsi_qla_host *ha, 102 struct atio_from_isp *pkt, uint8_t); 103 static void qlt_response_pkt(struct scsi_qla_host *ha, struct rsp_que *rsp, 104 response_t *pkt); 105 static int qlt_issue_task_mgmt(struct fc_port *sess, u64 lun, 106 int fn, void *iocb, int flags); 107 static void qlt_send_term_exchange(struct qla_qpair *, struct qla_tgt_cmd 108 *cmd, struct atio_from_isp *atio, int ha_locked, int ul_abort); 109 static void qlt_alloc_qfull_cmd(struct scsi_qla_host *vha, 110 struct atio_from_isp *atio, uint16_t status, int qfull); 111 static void qlt_disable_vha(struct scsi_qla_host *vha); 112 static void qlt_clear_tgt_db(struct qla_tgt *tgt); 113 static void qlt_send_notify_ack(struct qla_qpair *qpair, 114 struct imm_ntfy_from_isp *ntfy, 115 uint32_t add_flags, uint16_t resp_code, int resp_code_valid, 116 uint16_t srr_flags, uint16_t srr_reject_code, uint8_t srr_explan); 117 static void qlt_send_term_imm_notif(struct scsi_qla_host *vha, 118 struct imm_ntfy_from_isp *imm, int ha_locked); 119 static struct fc_port *qlt_create_sess(struct scsi_qla_host *vha, 120 fc_port_t *fcport, bool local); 121 void qlt_unreg_sess(struct fc_port *sess); 122 static void qlt_24xx_handle_abts(struct scsi_qla_host *, 123 struct abts_recv_from_24xx *); 124 static void qlt_send_busy(struct qla_qpair *, struct atio_from_isp *, 125 uint16_t); 126 static int qlt_check_reserve_free_req(struct qla_qpair *qpair, uint32_t); 127 static inline uint32_t qlt_make_handle(struct qla_qpair *); 128 129 /* 130 * Global Variables 131 */ 132 static struct kmem_cache *qla_tgt_mgmt_cmd_cachep; 133 struct kmem_cache *qla_tgt_plogi_cachep; 134 static mempool_t *qla_tgt_mgmt_cmd_mempool; 135 static struct workqueue_struct *qla_tgt_wq; 136 static DEFINE_MUTEX(qla_tgt_mutex); 137 static LIST_HEAD(qla_tgt_glist); 138 139 static const char *prot_op_str(u32 prot_op) 140 { 141 switch (prot_op) { 142 case TARGET_PROT_NORMAL: return "NORMAL"; 143 case TARGET_PROT_DIN_INSERT: return "DIN_INSERT"; 144 case TARGET_PROT_DOUT_INSERT: return "DOUT_INSERT"; 145 case TARGET_PROT_DIN_STRIP: return "DIN_STRIP"; 146 case TARGET_PROT_DOUT_STRIP: return "DOUT_STRIP"; 147 case TARGET_PROT_DIN_PASS: return "DIN_PASS"; 148 case TARGET_PROT_DOUT_PASS: return "DOUT_PASS"; 149 default: return "UNKNOWN"; 150 } 151 } 152 153 /* This API intentionally takes dest as a parameter, rather than returning 154 * int value to avoid caller forgetting to issue wmb() after the store */ 155 void qlt_do_generation_tick(struct scsi_qla_host *vha, int *dest) 156 { 157 scsi_qla_host_t *base_vha = pci_get_drvdata(vha->hw->pdev); 158 *dest = atomic_inc_return(&base_vha->generation_tick); 159 /* memory barrier */ 160 wmb(); 161 } 162 163 /* Might release hw lock, then reaquire!! */ 164 static inline int qlt_issue_marker(struct scsi_qla_host *vha, int vha_locked) 165 { 166 /* Send marker if required */ 167 if (unlikely(vha->marker_needed != 0)) { 168 int rc = qla2x00_issue_marker(vha, vha_locked); 169 170 if (rc != QLA_SUCCESS) { 171 ql_dbg(ql_dbg_tgt, vha, 0xe03d, 172 "qla_target(%d): issue_marker() failed\n", 173 vha->vp_idx); 174 } 175 return rc; 176 } 177 return QLA_SUCCESS; 178 } 179 180 struct scsi_qla_host *qla_find_host_by_d_id(struct scsi_qla_host *vha, 181 be_id_t d_id) 182 { 183 struct scsi_qla_host *host; 184 uint32_t key; 185 186 if (vha->d_id.b.area == d_id.area && 187 vha->d_id.b.domain == d_id.domain && 188 vha->d_id.b.al_pa == d_id.al_pa) 189 return vha; 190 191 key = be_to_port_id(d_id).b24; 192 193 host = btree_lookup32(&vha->hw->host_map, key); 194 if (!host) 195 ql_dbg(ql_dbg_tgt_mgt + ql_dbg_verbose, vha, 0xf005, 196 "Unable to find host %06x\n", key); 197 198 return host; 199 } 200 201 static inline void qlt_incr_num_pend_cmds(struct scsi_qla_host *vha) 202 { 203 unsigned long flags; 204 205 spin_lock_irqsave(&vha->hw->tgt.q_full_lock, flags); 206 207 vha->hw->tgt.num_pend_cmds++; 208 if (vha->hw->tgt.num_pend_cmds > vha->qla_stats.stat_max_pend_cmds) 209 vha->qla_stats.stat_max_pend_cmds = 210 vha->hw->tgt.num_pend_cmds; 211 spin_unlock_irqrestore(&vha->hw->tgt.q_full_lock, flags); 212 } 213 static inline void qlt_decr_num_pend_cmds(struct scsi_qla_host *vha) 214 { 215 unsigned long flags; 216 217 spin_lock_irqsave(&vha->hw->tgt.q_full_lock, flags); 218 vha->hw->tgt.num_pend_cmds--; 219 spin_unlock_irqrestore(&vha->hw->tgt.q_full_lock, flags); 220 } 221 222 223 static void qlt_queue_unknown_atio(scsi_qla_host_t *vha, 224 struct atio_from_isp *atio, uint8_t ha_locked) 225 { 226 struct qla_tgt_sess_op *u; 227 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; 228 unsigned long flags; 229 230 if (tgt->tgt_stop) { 231 ql_dbg(ql_dbg_async, vha, 0x502c, 232 "qla_target(%d): dropping unknown ATIO_TYPE7, because tgt is being stopped", 233 vha->vp_idx); 234 goto out_term; 235 } 236 237 u = kzalloc(sizeof(*u), GFP_ATOMIC); 238 if (u == NULL) 239 goto out_term; 240 241 u->vha = vha; 242 memcpy(&u->atio, atio, sizeof(*atio)); 243 INIT_LIST_HEAD(&u->cmd_list); 244 245 spin_lock_irqsave(&vha->cmd_list_lock, flags); 246 list_add_tail(&u->cmd_list, &vha->unknown_atio_list); 247 spin_unlock_irqrestore(&vha->cmd_list_lock, flags); 248 249 schedule_delayed_work(&vha->unknown_atio_work, 1); 250 251 out: 252 return; 253 254 out_term: 255 qlt_send_term_exchange(vha->hw->base_qpair, NULL, atio, ha_locked, 0); 256 goto out; 257 } 258 259 static void qlt_try_to_dequeue_unknown_atios(struct scsi_qla_host *vha, 260 uint8_t ha_locked) 261 { 262 struct qla_tgt_sess_op *u, *t; 263 scsi_qla_host_t *host; 264 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; 265 unsigned long flags; 266 uint8_t queued = 0; 267 268 list_for_each_entry_safe(u, t, &vha->unknown_atio_list, cmd_list) { 269 if (u->aborted) { 270 ql_dbg(ql_dbg_async, vha, 0x502e, 271 "Freeing unknown %s %p, because of Abort\n", 272 "ATIO_TYPE7", u); 273 qlt_send_term_exchange(vha->hw->base_qpair, NULL, 274 &u->atio, ha_locked, 0); 275 goto abort; 276 } 277 278 host = qla_find_host_by_d_id(vha, u->atio.u.isp24.fcp_hdr.d_id); 279 if (host != NULL) { 280 ql_dbg(ql_dbg_async + ql_dbg_verbose, vha, 0x502f, 281 "Requeuing unknown ATIO_TYPE7 %p\n", u); 282 qlt_24xx_atio_pkt(host, &u->atio, ha_locked); 283 } else if (tgt->tgt_stop) { 284 ql_dbg(ql_dbg_async + ql_dbg_verbose, vha, 0x503a, 285 "Freeing unknown %s %p, because tgt is being stopped\n", 286 "ATIO_TYPE7", u); 287 qlt_send_term_exchange(vha->hw->base_qpair, NULL, 288 &u->atio, ha_locked, 0); 289 } else { 290 ql_dbg(ql_dbg_async + ql_dbg_verbose, vha, 0x503d, 291 "Reschedule u %p, vha %p, host %p\n", u, vha, host); 292 if (!queued) { 293 queued = 1; 294 schedule_delayed_work(&vha->unknown_atio_work, 295 1); 296 } 297 continue; 298 } 299 300 abort: 301 spin_lock_irqsave(&vha->cmd_list_lock, flags); 302 list_del(&u->cmd_list); 303 spin_unlock_irqrestore(&vha->cmd_list_lock, flags); 304 kfree(u); 305 } 306 } 307 308 void qlt_unknown_atio_work_fn(struct work_struct *work) 309 { 310 struct scsi_qla_host *vha = container_of(to_delayed_work(work), 311 struct scsi_qla_host, unknown_atio_work); 312 313 qlt_try_to_dequeue_unknown_atios(vha, 0); 314 } 315 316 static bool qlt_24xx_atio_pkt_all_vps(struct scsi_qla_host *vha, 317 struct atio_from_isp *atio, uint8_t ha_locked) 318 { 319 ql_dbg(ql_dbg_tgt, vha, 0xe072, 320 "%s: qla_target(%d): type %x ox_id %04x\n", 321 __func__, vha->vp_idx, atio->u.raw.entry_type, 322 be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id)); 323 324 switch (atio->u.raw.entry_type) { 325 case ATIO_TYPE7: 326 { 327 struct scsi_qla_host *host = qla_find_host_by_d_id(vha, 328 atio->u.isp24.fcp_hdr.d_id); 329 if (unlikely(NULL == host)) { 330 ql_dbg(ql_dbg_tgt, vha, 0xe03e, 331 "qla_target(%d): Received ATIO_TYPE7 " 332 "with unknown d_id %x:%x:%x\n", vha->vp_idx, 333 atio->u.isp24.fcp_hdr.d_id.domain, 334 atio->u.isp24.fcp_hdr.d_id.area, 335 atio->u.isp24.fcp_hdr.d_id.al_pa); 336 337 338 qlt_queue_unknown_atio(vha, atio, ha_locked); 339 break; 340 } 341 if (unlikely(!list_empty(&vha->unknown_atio_list))) 342 qlt_try_to_dequeue_unknown_atios(vha, ha_locked); 343 344 qlt_24xx_atio_pkt(host, atio, ha_locked); 345 break; 346 } 347 348 case IMMED_NOTIFY_TYPE: 349 { 350 struct scsi_qla_host *host = vha; 351 struct imm_ntfy_from_isp *entry = 352 (struct imm_ntfy_from_isp *)atio; 353 354 qlt_issue_marker(vha, ha_locked); 355 356 if ((entry->u.isp24.vp_index != 0xFF) && 357 (entry->u.isp24.nport_handle != cpu_to_le16(0xFFFF))) { 358 host = qla_find_host_by_vp_idx(vha, 359 entry->u.isp24.vp_index); 360 if (unlikely(!host)) { 361 ql_dbg(ql_dbg_tgt, vha, 0xe03f, 362 "qla_target(%d): Received " 363 "ATIO (IMMED_NOTIFY_TYPE) " 364 "with unknown vp_index %d\n", 365 vha->vp_idx, entry->u.isp24.vp_index); 366 break; 367 } 368 } 369 qlt_24xx_atio_pkt(host, atio, ha_locked); 370 break; 371 } 372 373 case VP_RPT_ID_IOCB_TYPE: 374 qla24xx_report_id_acquisition(vha, 375 (struct vp_rpt_id_entry_24xx *)atio); 376 break; 377 378 case ABTS_RECV_24XX: 379 { 380 struct abts_recv_from_24xx *entry = 381 (struct abts_recv_from_24xx *)atio; 382 struct scsi_qla_host *host = qla_find_host_by_vp_idx(vha, 383 entry->vp_index); 384 unsigned long flags; 385 386 if (unlikely(!host)) { 387 ql_dbg(ql_dbg_tgt, vha, 0xe00a, 388 "qla_target(%d): Response pkt (ABTS_RECV_24XX) " 389 "received, with unknown vp_index %d\n", 390 vha->vp_idx, entry->vp_index); 391 break; 392 } 393 if (!ha_locked) 394 spin_lock_irqsave(&host->hw->hardware_lock, flags); 395 qlt_24xx_handle_abts(host, (struct abts_recv_from_24xx *)atio); 396 if (!ha_locked) 397 spin_unlock_irqrestore(&host->hw->hardware_lock, flags); 398 break; 399 } 400 401 /* case PUREX_IOCB_TYPE: ql2xmvasynctoatio */ 402 403 default: 404 ql_dbg(ql_dbg_tgt, vha, 0xe040, 405 "qla_target(%d): Received unknown ATIO atio " 406 "type %x\n", vha->vp_idx, atio->u.raw.entry_type); 407 break; 408 } 409 410 return false; 411 } 412 413 void qlt_response_pkt_all_vps(struct scsi_qla_host *vha, 414 struct rsp_que *rsp, response_t *pkt) 415 { 416 switch (pkt->entry_type) { 417 case CTIO_CRC2: 418 ql_dbg(ql_dbg_tgt, vha, 0xe073, 419 "qla_target(%d):%s: CRC2 Response pkt\n", 420 vha->vp_idx, __func__); 421 fallthrough; 422 case CTIO_TYPE7: 423 { 424 struct ctio7_from_24xx *entry = (struct ctio7_from_24xx *)pkt; 425 struct scsi_qla_host *host = qla_find_host_by_vp_idx(vha, 426 entry->vp_index); 427 if (unlikely(!host)) { 428 ql_dbg(ql_dbg_tgt, vha, 0xe041, 429 "qla_target(%d): Response pkt (CTIO_TYPE7) " 430 "received, with unknown vp_index %d\n", 431 vha->vp_idx, entry->vp_index); 432 break; 433 } 434 qlt_response_pkt(host, rsp, pkt); 435 break; 436 } 437 438 case IMMED_NOTIFY_TYPE: 439 { 440 struct scsi_qla_host *host; 441 struct imm_ntfy_from_isp *entry = 442 (struct imm_ntfy_from_isp *)pkt; 443 444 host = qla_find_host_by_vp_idx(vha, entry->u.isp24.vp_index); 445 if (unlikely(!host)) { 446 ql_dbg(ql_dbg_tgt, vha, 0xe042, 447 "qla_target(%d): Response pkt (IMMED_NOTIFY_TYPE) " 448 "received, with unknown vp_index %d\n", 449 vha->vp_idx, entry->u.isp24.vp_index); 450 break; 451 } 452 qlt_response_pkt(host, rsp, pkt); 453 break; 454 } 455 456 case NOTIFY_ACK_TYPE: 457 { 458 struct scsi_qla_host *host = vha; 459 struct nack_to_isp *entry = (struct nack_to_isp *)pkt; 460 461 if (0xFF != entry->u.isp24.vp_index) { 462 host = qla_find_host_by_vp_idx(vha, 463 entry->u.isp24.vp_index); 464 if (unlikely(!host)) { 465 ql_dbg(ql_dbg_tgt, vha, 0xe043, 466 "qla_target(%d): Response " 467 "pkt (NOTIFY_ACK_TYPE) " 468 "received, with unknown " 469 "vp_index %d\n", vha->vp_idx, 470 entry->u.isp24.vp_index); 471 break; 472 } 473 } 474 qlt_response_pkt(host, rsp, pkt); 475 break; 476 } 477 478 case ABTS_RECV_24XX: 479 { 480 struct abts_recv_from_24xx *entry = 481 (struct abts_recv_from_24xx *)pkt; 482 struct scsi_qla_host *host = qla_find_host_by_vp_idx(vha, 483 entry->vp_index); 484 if (unlikely(!host)) { 485 ql_dbg(ql_dbg_tgt, vha, 0xe044, 486 "qla_target(%d): Response pkt " 487 "(ABTS_RECV_24XX) received, with unknown " 488 "vp_index %d\n", vha->vp_idx, entry->vp_index); 489 break; 490 } 491 qlt_response_pkt(host, rsp, pkt); 492 break; 493 } 494 495 case ABTS_RESP_24XX: 496 { 497 struct abts_resp_to_24xx *entry = 498 (struct abts_resp_to_24xx *)pkt; 499 struct scsi_qla_host *host = qla_find_host_by_vp_idx(vha, 500 entry->vp_index); 501 if (unlikely(!host)) { 502 ql_dbg(ql_dbg_tgt, vha, 0xe045, 503 "qla_target(%d): Response pkt " 504 "(ABTS_RECV_24XX) received, with unknown " 505 "vp_index %d\n", vha->vp_idx, entry->vp_index); 506 break; 507 } 508 qlt_response_pkt(host, rsp, pkt); 509 break; 510 } 511 default: 512 qlt_response_pkt(vha, rsp, pkt); 513 break; 514 } 515 516 } 517 518 /* 519 * All qlt_plogi_ack_t operations are protected by hardware_lock 520 */ 521 static int qla24xx_post_nack_work(struct scsi_qla_host *vha, fc_port_t *fcport, 522 struct imm_ntfy_from_isp *ntfy, int type) 523 { 524 struct qla_work_evt *e; 525 526 e = qla2x00_alloc_work(vha, QLA_EVT_NACK); 527 if (!e) 528 return QLA_FUNCTION_FAILED; 529 530 e->u.nack.fcport = fcport; 531 e->u.nack.type = type; 532 memcpy(e->u.nack.iocb, ntfy, sizeof(struct imm_ntfy_from_isp)); 533 return qla2x00_post_work(vha, e); 534 } 535 536 static void qla2x00_async_nack_sp_done(srb_t *sp, int res) 537 { 538 struct scsi_qla_host *vha = sp->vha; 539 unsigned long flags; 540 541 ql_dbg(ql_dbg_disc, vha, 0x20f2, 542 "Async done-%s res %x %8phC type %d\n", 543 sp->name, res, sp->fcport->port_name, sp->type); 544 545 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); 546 sp->fcport->flags &= ~FCF_ASYNC_SENT; 547 sp->fcport->chip_reset = vha->hw->base_qpair->chip_reset; 548 549 switch (sp->type) { 550 case SRB_NACK_PLOGI: 551 sp->fcport->login_gen++; 552 sp->fcport->fw_login_state = DSC_LS_PLOGI_COMP; 553 sp->fcport->logout_on_delete = 1; 554 sp->fcport->plogi_nack_done_deadline = jiffies + HZ; 555 sp->fcport->send_els_logo = 0; 556 557 if (sp->fcport->flags & FCF_FCSP_DEVICE) { 558 ql_dbg(ql_dbg_edif, vha, 0x20ef, 559 "%s %8phC edif: PLOGI- AUTH WAIT\n", __func__, 560 sp->fcport->port_name); 561 qla2x00_set_fcport_disc_state(sp->fcport, 562 DSC_LOGIN_AUTH_PEND); 563 qla2x00_post_aen_work(vha, FCH_EVT_PORT_ONLINE, 564 sp->fcport->d_id.b24); 565 qla_edb_eventcreate(vha, VND_CMD_AUTH_STATE_NEEDED, sp->fcport->d_id.b24, 566 0, sp->fcport); 567 } 568 break; 569 570 case SRB_NACK_PRLI: 571 sp->fcport->fw_login_state = DSC_LS_PRLI_COMP; 572 sp->fcport->deleted = 0; 573 sp->fcport->send_els_logo = 0; 574 575 if (!sp->fcport->login_succ && 576 !IS_SW_RESV_ADDR(sp->fcport->d_id)) { 577 sp->fcport->login_succ = 1; 578 579 vha->fcport_count++; 580 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); 581 qla24xx_sched_upd_fcport(sp->fcport); 582 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); 583 } else { 584 sp->fcport->login_retry = 0; 585 qla2x00_set_fcport_disc_state(sp->fcport, 586 DSC_LOGIN_COMPLETE); 587 sp->fcport->deleted = 0; 588 sp->fcport->logout_on_delete = 1; 589 } 590 break; 591 592 case SRB_NACK_LOGO: 593 sp->fcport->login_gen++; 594 sp->fcport->fw_login_state = DSC_LS_PORT_UNAVAIL; 595 qlt_logo_completion_handler(sp->fcport, MBS_COMMAND_COMPLETE); 596 break; 597 } 598 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); 599 600 kref_put(&sp->cmd_kref, qla2x00_sp_release); 601 } 602 603 int qla24xx_async_notify_ack(scsi_qla_host_t *vha, fc_port_t *fcport, 604 struct imm_ntfy_from_isp *ntfy, int type) 605 { 606 int rval = QLA_FUNCTION_FAILED; 607 srb_t *sp; 608 char *c = NULL; 609 610 fcport->flags |= FCF_ASYNC_SENT; 611 switch (type) { 612 case SRB_NACK_PLOGI: 613 fcport->fw_login_state = DSC_LS_PLOGI_PEND; 614 c = "PLOGI"; 615 if (vha->hw->flags.edif_enabled && 616 (le16_to_cpu(ntfy->u.isp24.flags) & NOTIFY24XX_FLAGS_FCSP)) 617 fcport->flags |= FCF_FCSP_DEVICE; 618 break; 619 case SRB_NACK_PRLI: 620 fcport->fw_login_state = DSC_LS_PRLI_PEND; 621 fcport->deleted = 0; 622 c = "PRLI"; 623 break; 624 case SRB_NACK_LOGO: 625 fcport->fw_login_state = DSC_LS_LOGO_PEND; 626 c = "LOGO"; 627 break; 628 } 629 630 sp = qla2x00_get_sp(vha, fcport, GFP_ATOMIC); 631 if (!sp) 632 goto done; 633 634 sp->type = type; 635 sp->name = "nack"; 636 qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha) + 2, 637 qla2x00_async_nack_sp_done); 638 639 sp->u.iocb_cmd.u.nack.ntfy = ntfy; 640 641 ql_dbg(ql_dbg_disc, vha, 0x20f4, 642 "Async-%s %8phC hndl %x %s\n", 643 sp->name, fcport->port_name, sp->handle, c); 644 645 rval = qla2x00_start_sp(sp); 646 if (rval != QLA_SUCCESS) 647 goto done_free_sp; 648 649 return rval; 650 651 done_free_sp: 652 kref_put(&sp->cmd_kref, qla2x00_sp_release); 653 done: 654 fcport->flags &= ~FCF_ASYNC_SENT; 655 return rval; 656 } 657 658 void qla24xx_do_nack_work(struct scsi_qla_host *vha, struct qla_work_evt *e) 659 { 660 fc_port_t *t; 661 662 switch (e->u.nack.type) { 663 case SRB_NACK_PRLI: 664 t = e->u.nack.fcport; 665 flush_work(&t->del_work); 666 flush_work(&t->free_work); 667 mutex_lock(&vha->vha_tgt.tgt_mutex); 668 t = qlt_create_sess(vha, e->u.nack.fcport, 0); 669 mutex_unlock(&vha->vha_tgt.tgt_mutex); 670 if (t) { 671 ql_log(ql_log_info, vha, 0xd034, 672 "%s create sess success %p", __func__, t); 673 /* create sess has an extra kref */ 674 vha->hw->tgt.tgt_ops->put_sess(e->u.nack.fcport); 675 } 676 break; 677 } 678 qla24xx_async_notify_ack(vha, e->u.nack.fcport, 679 (struct imm_ntfy_from_isp *)e->u.nack.iocb, e->u.nack.type); 680 } 681 682 void qla24xx_delete_sess_fn(struct work_struct *work) 683 { 684 fc_port_t *fcport = container_of(work, struct fc_port, del_work); 685 struct qla_hw_data *ha = NULL; 686 687 if (!fcport || !fcport->vha || !fcport->vha->hw) 688 return; 689 690 ha = fcport->vha->hw; 691 692 if (fcport->se_sess) { 693 ha->tgt.tgt_ops->shutdown_sess(fcport); 694 ha->tgt.tgt_ops->put_sess(fcport); 695 } else { 696 qlt_unreg_sess(fcport); 697 } 698 } 699 700 /* 701 * Called from qla2x00_reg_remote_port() 702 */ 703 void qlt_fc_port_added(struct scsi_qla_host *vha, fc_port_t *fcport) 704 { 705 struct qla_hw_data *ha = vha->hw; 706 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; 707 struct fc_port *sess = fcport; 708 unsigned long flags; 709 710 if (!vha->hw->tgt.tgt_ops) 711 return; 712 713 spin_lock_irqsave(&ha->tgt.sess_lock, flags); 714 if (tgt->tgt_stop) { 715 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); 716 return; 717 } 718 719 if (fcport->disc_state == DSC_DELETE_PEND) { 720 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); 721 return; 722 } 723 724 if (!sess->se_sess) { 725 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); 726 727 mutex_lock(&vha->vha_tgt.tgt_mutex); 728 sess = qlt_create_sess(vha, fcport, false); 729 mutex_unlock(&vha->vha_tgt.tgt_mutex); 730 731 spin_lock_irqsave(&ha->tgt.sess_lock, flags); 732 } else { 733 if (fcport->fw_login_state == DSC_LS_PRLI_COMP) { 734 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); 735 return; 736 } 737 738 if (!kref_get_unless_zero(&sess->sess_kref)) { 739 ql_dbg(ql_dbg_disc, vha, 0x2107, 740 "%s: kref_get fail sess %8phC \n", 741 __func__, sess->port_name); 742 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); 743 return; 744 } 745 746 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04c, 747 "qla_target(%u): %ssession for port %8phC " 748 "(loop ID %d) reappeared\n", vha->vp_idx, 749 sess->local ? "local " : "", sess->port_name, sess->loop_id); 750 751 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf007, 752 "Reappeared sess %p\n", sess); 753 754 ha->tgt.tgt_ops->update_sess(sess, fcport->d_id, 755 fcport->loop_id, 756 (fcport->flags & FCF_CONF_COMP_SUPPORTED)); 757 } 758 759 if (sess && sess->local) { 760 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04d, 761 "qla_target(%u): local session for " 762 "port %8phC (loop ID %d) became global\n", vha->vp_idx, 763 fcport->port_name, sess->loop_id); 764 sess->local = 0; 765 } 766 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); 767 768 ha->tgt.tgt_ops->put_sess(sess); 769 } 770 771 /* 772 * This is a zero-base ref-counting solution, since hardware_lock 773 * guarantees that ref_count is not modified concurrently. 774 * Upon successful return content of iocb is undefined 775 */ 776 static struct qlt_plogi_ack_t * 777 qlt_plogi_ack_find_add(struct scsi_qla_host *vha, port_id_t *id, 778 struct imm_ntfy_from_isp *iocb) 779 { 780 struct qlt_plogi_ack_t *pla; 781 782 lockdep_assert_held(&vha->hw->hardware_lock); 783 784 list_for_each_entry(pla, &vha->plogi_ack_list, list) { 785 if (pla->id.b24 == id->b24) { 786 ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0x210d, 787 "%s %d %8phC Term INOT due to new INOT", 788 __func__, __LINE__, 789 pla->iocb.u.isp24.port_name); 790 qlt_send_term_imm_notif(vha, &pla->iocb, 1); 791 memcpy(&pla->iocb, iocb, sizeof(pla->iocb)); 792 return pla; 793 } 794 } 795 796 pla = kmem_cache_zalloc(qla_tgt_plogi_cachep, GFP_ATOMIC); 797 if (!pla) { 798 ql_dbg(ql_dbg_async, vha, 0x5088, 799 "qla_target(%d): Allocation of plogi_ack failed\n", 800 vha->vp_idx); 801 return NULL; 802 } 803 804 memcpy(&pla->iocb, iocb, sizeof(pla->iocb)); 805 pla->id = *id; 806 list_add_tail(&pla->list, &vha->plogi_ack_list); 807 808 return pla; 809 } 810 811 void qlt_plogi_ack_unref(struct scsi_qla_host *vha, 812 struct qlt_plogi_ack_t *pla) 813 { 814 struct imm_ntfy_from_isp *iocb = &pla->iocb; 815 port_id_t port_id; 816 uint16_t loop_id; 817 fc_port_t *fcport = pla->fcport; 818 819 BUG_ON(!pla->ref_count); 820 pla->ref_count--; 821 822 if (pla->ref_count) 823 return; 824 825 ql_dbg(ql_dbg_disc, vha, 0x5089, 826 "Sending PLOGI ACK to wwn %8phC s_id %02x:%02x:%02x loop_id %#04x" 827 " exch %#x ox_id %#x\n", iocb->u.isp24.port_name, 828 iocb->u.isp24.port_id[2], iocb->u.isp24.port_id[1], 829 iocb->u.isp24.port_id[0], 830 le16_to_cpu(iocb->u.isp24.nport_handle), 831 iocb->u.isp24.exchange_address, iocb->ox_id); 832 833 port_id.b.domain = iocb->u.isp24.port_id[2]; 834 port_id.b.area = iocb->u.isp24.port_id[1]; 835 port_id.b.al_pa = iocb->u.isp24.port_id[0]; 836 port_id.b.rsvd_1 = 0; 837 838 loop_id = le16_to_cpu(iocb->u.isp24.nport_handle); 839 840 fcport->loop_id = loop_id; 841 fcport->d_id = port_id; 842 if (iocb->u.isp24.status_subcode == ELS_PLOGI) 843 qla24xx_post_nack_work(vha, fcport, iocb, SRB_NACK_PLOGI); 844 else 845 qla24xx_post_nack_work(vha, fcport, iocb, SRB_NACK_PRLI); 846 847 list_for_each_entry(fcport, &vha->vp_fcports, list) { 848 if (fcport->plogi_link[QLT_PLOGI_LINK_SAME_WWN] == pla) 849 fcport->plogi_link[QLT_PLOGI_LINK_SAME_WWN] = NULL; 850 if (fcport->plogi_link[QLT_PLOGI_LINK_CONFLICT] == pla) 851 fcport->plogi_link[QLT_PLOGI_LINK_CONFLICT] = NULL; 852 } 853 854 list_del(&pla->list); 855 kmem_cache_free(qla_tgt_plogi_cachep, pla); 856 } 857 858 void 859 qlt_plogi_ack_link(struct scsi_qla_host *vha, struct qlt_plogi_ack_t *pla, 860 struct fc_port *sess, enum qlt_plogi_link_t link) 861 { 862 struct imm_ntfy_from_isp *iocb = &pla->iocb; 863 /* Inc ref_count first because link might already be pointing at pla */ 864 pla->ref_count++; 865 866 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf097, 867 "Linking sess %p [%d] wwn %8phC with PLOGI ACK to wwn %8phC" 868 " s_id %02x:%02x:%02x, ref=%d pla %p link %d\n", 869 sess, link, sess->port_name, 870 iocb->u.isp24.port_name, iocb->u.isp24.port_id[2], 871 iocb->u.isp24.port_id[1], iocb->u.isp24.port_id[0], 872 pla->ref_count, pla, link); 873 874 if (link == QLT_PLOGI_LINK_CONFLICT) { 875 switch (sess->disc_state) { 876 case DSC_DELETED: 877 case DSC_DELETE_PEND: 878 pla->ref_count--; 879 return; 880 default: 881 break; 882 } 883 } 884 885 if (sess->plogi_link[link]) 886 qlt_plogi_ack_unref(vha, sess->plogi_link[link]); 887 888 if (link == QLT_PLOGI_LINK_SAME_WWN) 889 pla->fcport = sess; 890 891 sess->plogi_link[link] = pla; 892 } 893 894 typedef struct { 895 /* These fields must be initialized by the caller */ 896 port_id_t id; 897 /* 898 * number of cmds dropped while we were waiting for 899 * initiator to ack LOGO initialize to 1 if LOGO is 900 * triggered by a command, otherwise, to 0 901 */ 902 int cmd_count; 903 904 /* These fields are used by callee */ 905 struct list_head list; 906 } qlt_port_logo_t; 907 908 static void 909 qlt_send_first_logo(struct scsi_qla_host *vha, qlt_port_logo_t *logo) 910 { 911 qlt_port_logo_t *tmp; 912 int res; 913 914 if (test_bit(PFLG_DRIVER_REMOVING, &vha->pci_flags)) { 915 res = 0; 916 goto out; 917 } 918 919 mutex_lock(&vha->vha_tgt.tgt_mutex); 920 921 list_for_each_entry(tmp, &vha->logo_list, list) { 922 if (tmp->id.b24 == logo->id.b24) { 923 tmp->cmd_count += logo->cmd_count; 924 mutex_unlock(&vha->vha_tgt.tgt_mutex); 925 return; 926 } 927 } 928 929 list_add_tail(&logo->list, &vha->logo_list); 930 931 mutex_unlock(&vha->vha_tgt.tgt_mutex); 932 933 res = qla24xx_els_dcmd_iocb(vha, ELS_DCMD_LOGO, logo->id); 934 935 mutex_lock(&vha->vha_tgt.tgt_mutex); 936 list_del(&logo->list); 937 mutex_unlock(&vha->vha_tgt.tgt_mutex); 938 939 out: 940 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf098, 941 "Finished LOGO to %02x:%02x:%02x, dropped %d cmds, res = %#x\n", 942 logo->id.b.domain, logo->id.b.area, logo->id.b.al_pa, 943 logo->cmd_count, res); 944 } 945 946 void qlt_free_session_done(struct work_struct *work) 947 { 948 struct fc_port *sess = container_of(work, struct fc_port, 949 free_work); 950 struct qla_tgt *tgt = sess->tgt; 951 struct scsi_qla_host *vha = sess->vha; 952 struct qla_hw_data *ha = vha->hw; 953 unsigned long flags; 954 bool logout_started = false; 955 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev); 956 struct qlt_plogi_ack_t *own = 957 sess->plogi_link[QLT_PLOGI_LINK_SAME_WWN]; 958 959 ql_dbg(ql_dbg_disc, vha, 0xf084, 960 "%s: se_sess %p / sess %p from port %8phC loop_id %#04x" 961 " s_id %02x:%02x:%02x logout %d keep %d els_logo %d\n", 962 __func__, sess->se_sess, sess, sess->port_name, sess->loop_id, 963 sess->d_id.b.domain, sess->d_id.b.area, sess->d_id.b.al_pa, 964 sess->logout_on_delete, sess->keep_nport_handle, 965 sess->send_els_logo); 966 967 if (!IS_SW_RESV_ADDR(sess->d_id)) { 968 qla2x00_mark_device_lost(vha, sess, 0); 969 970 if (sess->send_els_logo) { 971 qlt_port_logo_t logo; 972 973 logo.id = sess->d_id; 974 logo.cmd_count = 0; 975 INIT_LIST_HEAD(&logo.list); 976 if (!own) 977 qlt_send_first_logo(vha, &logo); 978 sess->send_els_logo = 0; 979 } 980 981 if (sess->logout_on_delete && sess->loop_id != FC_NO_LOOP_ID) { 982 int rc; 983 984 if (!own || 985 (own->iocb.u.isp24.status_subcode == ELS_PLOGI)) { 986 sess->logout_completed = 0; 987 rc = qla2x00_post_async_logout_work(vha, sess, 988 NULL); 989 if (rc != QLA_SUCCESS) 990 ql_log(ql_log_warn, vha, 0xf085, 991 "Schedule logo failed sess %p rc %d\n", 992 sess, rc); 993 else 994 logout_started = true; 995 } else if (own && (own->iocb.u.isp24.status_subcode == 996 ELS_PRLI) && ha->flags.rida_fmt2) { 997 rc = qla2x00_post_async_prlo_work(vha, sess, 998 NULL); 999 if (rc != QLA_SUCCESS) 1000 ql_log(ql_log_warn, vha, 0xf085, 1001 "Schedule PRLO failed sess %p rc %d\n", 1002 sess, rc); 1003 else 1004 logout_started = true; 1005 } 1006 } /* if sess->logout_on_delete */ 1007 1008 if (sess->nvme_flag & NVME_FLAG_REGISTERED && 1009 !(sess->nvme_flag & NVME_FLAG_DELETING)) { 1010 sess->nvme_flag |= NVME_FLAG_DELETING; 1011 qla_nvme_unregister_remote_port(sess); 1012 } 1013 1014 if (ha->flags.edif_enabled && 1015 (!own || own->iocb.u.isp24.status_subcode == ELS_PLOGI)) { 1016 sess->edif.authok = 0; 1017 if (!ha->flags.host_shutting_down) { 1018 ql_dbg(ql_dbg_edif, vha, 0x911e, 1019 "%s wwpn %8phC calling qla2x00_release_all_sadb\n", 1020 __func__, sess->port_name); 1021 qla2x00_release_all_sadb(vha, sess); 1022 } else { 1023 ql_dbg(ql_dbg_edif, vha, 0x911e, 1024 "%s bypassing release_all_sadb\n", 1025 __func__); 1026 } 1027 1028 qla_edif_clear_appdata(vha, sess); 1029 qla_edif_sess_down(vha, sess); 1030 } 1031 } 1032 1033 /* 1034 * Release the target session for FC Nexus from fabric module code. 1035 */ 1036 if (sess->se_sess != NULL) 1037 ha->tgt.tgt_ops->free_session(sess); 1038 1039 if (logout_started) { 1040 bool traced = false; 1041 u16 cnt = 0; 1042 1043 while (!READ_ONCE(sess->logout_completed)) { 1044 if (!traced) { 1045 ql_dbg(ql_dbg_disc, vha, 0xf086, 1046 "%s: waiting for sess %p logout\n", 1047 __func__, sess); 1048 traced = true; 1049 } 1050 msleep(100); 1051 cnt++; 1052 /* 1053 * Driver timeout is set to 22 Sec, update count value to loop 1054 * long enough for log-out to complete before advancing. Otherwise, 1055 * straddling logout can interfere with re-login attempt. 1056 */ 1057 if (cnt > 230) 1058 break; 1059 } 1060 1061 ql_dbg(ql_dbg_disc, vha, 0xf087, 1062 "%s: sess %p logout completed\n", __func__, sess); 1063 } 1064 1065 if (sess->logo_ack_needed) { 1066 sess->logo_ack_needed = 0; 1067 qla24xx_async_notify_ack(vha, sess, 1068 (struct imm_ntfy_from_isp *)sess->iocb, SRB_NACK_LOGO); 1069 } 1070 1071 spin_lock_irqsave(&ha->tgt.sess_lock, flags); 1072 if (sess->se_sess) { 1073 sess->se_sess = NULL; 1074 if (tgt && !IS_SW_RESV_ADDR(sess->d_id)) 1075 tgt->sess_count--; 1076 } 1077 1078 qla2x00_set_fcport_disc_state(sess, DSC_DELETED); 1079 sess->fw_login_state = DSC_LS_PORT_UNAVAIL; 1080 1081 if (sess->login_succ && !IS_SW_RESV_ADDR(sess->d_id)) { 1082 vha->fcport_count--; 1083 sess->login_succ = 0; 1084 } 1085 1086 qla2x00_clear_loop_id(sess); 1087 1088 if (sess->conflict) { 1089 sess->conflict->login_pause = 0; 1090 sess->conflict = NULL; 1091 if (!test_bit(UNLOADING, &vha->dpc_flags)) 1092 set_bit(RELOGIN_NEEDED, &vha->dpc_flags); 1093 } 1094 1095 { 1096 struct qlt_plogi_ack_t *con = 1097 sess->plogi_link[QLT_PLOGI_LINK_CONFLICT]; 1098 struct imm_ntfy_from_isp *iocb; 1099 1100 own = sess->plogi_link[QLT_PLOGI_LINK_SAME_WWN]; 1101 1102 if (con) { 1103 iocb = &con->iocb; 1104 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf099, 1105 "se_sess %p / sess %p port %8phC is gone," 1106 " %s (ref=%d), releasing PLOGI for %8phC (ref=%d)\n", 1107 sess->se_sess, sess, sess->port_name, 1108 own ? "releasing own PLOGI" : "no own PLOGI pending", 1109 own ? own->ref_count : -1, 1110 iocb->u.isp24.port_name, con->ref_count); 1111 qlt_plogi_ack_unref(vha, con); 1112 sess->plogi_link[QLT_PLOGI_LINK_CONFLICT] = NULL; 1113 } else { 1114 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf09a, 1115 "se_sess %p / sess %p port %8phC is gone, %s (ref=%d)\n", 1116 sess->se_sess, sess, sess->port_name, 1117 own ? "releasing own PLOGI" : 1118 "no own PLOGI pending", 1119 own ? own->ref_count : -1); 1120 } 1121 1122 if (own) { 1123 sess->fw_login_state = DSC_LS_PLOGI_PEND; 1124 qlt_plogi_ack_unref(vha, own); 1125 sess->plogi_link[QLT_PLOGI_LINK_SAME_WWN] = NULL; 1126 } 1127 } 1128 1129 sess->explicit_logout = 0; 1130 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); 1131 1132 qla2x00_dfs_remove_rport(vha, sess); 1133 1134 spin_lock_irqsave(&vha->work_lock, flags); 1135 sess->flags &= ~FCF_ASYNC_SENT; 1136 sess->deleted = QLA_SESS_DELETED; 1137 sess->free_pending = 0; 1138 spin_unlock_irqrestore(&vha->work_lock, flags); 1139 1140 ql_dbg(ql_dbg_disc, vha, 0xf001, 1141 "Unregistration of sess %p %8phC finished fcp_cnt %d\n", 1142 sess, sess->port_name, vha->fcport_count); 1143 1144 if (tgt && (tgt->sess_count == 0)) 1145 wake_up_all(&tgt->waitQ); 1146 1147 if (!test_bit(PFLG_DRIVER_REMOVING, &base_vha->pci_flags) && 1148 !(vha->vp_idx && test_bit(VPORT_DELETE, &vha->dpc_flags)) && 1149 (!tgt || !tgt->tgt_stop) && !LOOP_TRANSITION(vha)) { 1150 switch (vha->host->active_mode) { 1151 case MODE_INITIATOR: 1152 case MODE_DUAL: 1153 set_bit(RELOGIN_NEEDED, &vha->dpc_flags); 1154 qla2xxx_wake_dpc(vha); 1155 break; 1156 case MODE_TARGET: 1157 default: 1158 /* no-op */ 1159 break; 1160 } 1161 } 1162 1163 if (vha->fcport_count == 0) 1164 wake_up_all(&vha->fcport_waitQ); 1165 } 1166 1167 /* ha->tgt.sess_lock supposed to be held on entry */ 1168 void qlt_unreg_sess(struct fc_port *sess) 1169 { 1170 struct scsi_qla_host *vha = sess->vha; 1171 unsigned long flags; 1172 1173 ql_dbg(ql_dbg_disc, sess->vha, 0x210a, 1174 "%s sess %p for deletion %8phC\n", 1175 __func__, sess, sess->port_name); 1176 1177 spin_lock_irqsave(&sess->vha->work_lock, flags); 1178 if (sess->free_pending) { 1179 spin_unlock_irqrestore(&sess->vha->work_lock, flags); 1180 return; 1181 } 1182 sess->free_pending = 1; 1183 /* 1184 * Use FCF_ASYNC_SENT flag to block other cmds used in sess 1185 * management from being sent. 1186 */ 1187 sess->flags |= FCF_ASYNC_SENT; 1188 sess->deleted = QLA_SESS_DELETION_IN_PROGRESS; 1189 spin_unlock_irqrestore(&sess->vha->work_lock, flags); 1190 1191 if (sess->se_sess) 1192 vha->hw->tgt.tgt_ops->clear_nacl_from_fcport_map(sess); 1193 1194 qla2x00_set_fcport_disc_state(sess, DSC_DELETE_PEND); 1195 sess->last_rscn_gen = sess->rscn_gen; 1196 sess->last_login_gen = sess->login_gen; 1197 1198 queue_work(sess->vha->hw->wq, &sess->free_work); 1199 } 1200 EXPORT_SYMBOL(qlt_unreg_sess); 1201 1202 static int qlt_reset(struct scsi_qla_host *vha, void *iocb, int mcmd) 1203 { 1204 struct qla_hw_data *ha = vha->hw; 1205 struct fc_port *sess = NULL; 1206 uint16_t loop_id; 1207 int res = 0; 1208 struct imm_ntfy_from_isp *n = (struct imm_ntfy_from_isp *)iocb; 1209 unsigned long flags; 1210 1211 loop_id = le16_to_cpu(n->u.isp24.nport_handle); 1212 if (loop_id == 0xFFFF) { 1213 /* Global event */ 1214 atomic_inc(&vha->vha_tgt.qla_tgt->tgt_global_resets_count); 1215 spin_lock_irqsave(&ha->tgt.sess_lock, flags); 1216 qlt_clear_tgt_db(vha->vha_tgt.qla_tgt); 1217 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); 1218 } else { 1219 spin_lock_irqsave(&ha->tgt.sess_lock, flags); 1220 sess = ha->tgt.tgt_ops->find_sess_by_loop_id(vha, loop_id); 1221 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); 1222 } 1223 1224 ql_dbg(ql_dbg_tgt, vha, 0xe000, 1225 "Using sess for qla_tgt_reset: %p\n", sess); 1226 if (!sess) { 1227 res = -ESRCH; 1228 return res; 1229 } 1230 1231 ql_dbg(ql_dbg_tgt, vha, 0xe047, 1232 "scsi(%ld): resetting (session %p from port %8phC mcmd %x, " 1233 "loop_id %d)\n", vha->host_no, sess, sess->port_name, 1234 mcmd, loop_id); 1235 1236 return qlt_issue_task_mgmt(sess, 0, mcmd, iocb, QLA24XX_MGMT_SEND_NACK); 1237 } 1238 1239 static void qla24xx_chk_fcp_state(struct fc_port *sess) 1240 { 1241 if (sess->chip_reset != sess->vha->hw->base_qpair->chip_reset) { 1242 sess->logout_on_delete = 0; 1243 sess->logo_ack_needed = 0; 1244 sess->fw_login_state = DSC_LS_PORT_UNAVAIL; 1245 } 1246 } 1247 1248 void qlt_schedule_sess_for_deletion(struct fc_port *sess) 1249 { 1250 struct qla_tgt *tgt = sess->tgt; 1251 unsigned long flags; 1252 u16 sec; 1253 1254 switch (sess->disc_state) { 1255 case DSC_DELETE_PEND: 1256 return; 1257 case DSC_DELETED: 1258 if (!sess->plogi_link[QLT_PLOGI_LINK_SAME_WWN] && 1259 !sess->plogi_link[QLT_PLOGI_LINK_CONFLICT]) { 1260 if (tgt && tgt->tgt_stop && tgt->sess_count == 0) 1261 wake_up_all(&tgt->waitQ); 1262 1263 if (sess->vha->fcport_count == 0) 1264 wake_up_all(&sess->vha->fcport_waitQ); 1265 return; 1266 } 1267 break; 1268 case DSC_UPD_FCPORT: 1269 /* 1270 * This port is not done reporting to upper layer. 1271 * let it finish 1272 */ 1273 sess->next_disc_state = DSC_DELETE_PEND; 1274 sec = jiffies_to_msecs(jiffies - 1275 sess->jiffies_at_registration)/1000; 1276 if (sess->sec_since_registration < sec && sec && !(sec % 5)) { 1277 sess->sec_since_registration = sec; 1278 ql_dbg(ql_dbg_disc, sess->vha, 0xffff, 1279 "%s %8phC : Slow Rport registration(%d Sec)\n", 1280 __func__, sess->port_name, sec); 1281 } 1282 return; 1283 default: 1284 break; 1285 } 1286 1287 spin_lock_irqsave(&sess->vha->work_lock, flags); 1288 if (sess->deleted == QLA_SESS_DELETION_IN_PROGRESS) { 1289 spin_unlock_irqrestore(&sess->vha->work_lock, flags); 1290 return; 1291 } 1292 sess->deleted = QLA_SESS_DELETION_IN_PROGRESS; 1293 spin_unlock_irqrestore(&sess->vha->work_lock, flags); 1294 1295 sess->prli_pend_timer = 0; 1296 qla2x00_set_fcport_disc_state(sess, DSC_DELETE_PEND); 1297 1298 qla24xx_chk_fcp_state(sess); 1299 1300 ql_dbg(ql_log_warn, sess->vha, 0xe001, 1301 "Scheduling sess %p for deletion %8phC fc4_type %x\n", 1302 sess, sess->port_name, sess->fc4_type); 1303 1304 WARN_ON(!queue_work(sess->vha->hw->wq, &sess->del_work)); 1305 } 1306 1307 static void qlt_clear_tgt_db(struct qla_tgt *tgt) 1308 { 1309 struct fc_port *sess; 1310 scsi_qla_host_t *vha = tgt->vha; 1311 1312 list_for_each_entry(sess, &vha->vp_fcports, list) { 1313 if (sess->se_sess) 1314 qlt_schedule_sess_for_deletion(sess); 1315 } 1316 1317 /* At this point tgt could be already dead */ 1318 } 1319 1320 static int qla24xx_get_loop_id(struct scsi_qla_host *vha, be_id_t s_id, 1321 uint16_t *loop_id) 1322 { 1323 struct qla_hw_data *ha = vha->hw; 1324 dma_addr_t gid_list_dma; 1325 struct gid_list_info *gid_list, *gid; 1326 int res, rc, i; 1327 uint16_t entries; 1328 1329 gid_list = dma_alloc_coherent(&ha->pdev->dev, qla2x00_gid_list_size(ha), 1330 &gid_list_dma, GFP_KERNEL); 1331 if (!gid_list) { 1332 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf044, 1333 "qla_target(%d): DMA Alloc failed of %u\n", 1334 vha->vp_idx, qla2x00_gid_list_size(ha)); 1335 return -ENOMEM; 1336 } 1337 1338 /* Get list of logged in devices */ 1339 rc = qla24xx_gidlist_wait(vha, gid_list, gid_list_dma, &entries); 1340 if (rc != QLA_SUCCESS) { 1341 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf045, 1342 "qla_target(%d): get_id_list() failed: %x\n", 1343 vha->vp_idx, rc); 1344 res = -EBUSY; 1345 goto out_free_id_list; 1346 } 1347 1348 gid = gid_list; 1349 res = -ENOENT; 1350 for (i = 0; i < entries; i++) { 1351 if (gid->al_pa == s_id.al_pa && 1352 gid->area == s_id.area && 1353 gid->domain == s_id.domain) { 1354 *loop_id = le16_to_cpu(gid->loop_id); 1355 res = 0; 1356 break; 1357 } 1358 gid = (void *)gid + ha->gid_list_info_size; 1359 } 1360 1361 out_free_id_list: 1362 dma_free_coherent(&ha->pdev->dev, qla2x00_gid_list_size(ha), 1363 gid_list, gid_list_dma); 1364 return res; 1365 } 1366 1367 /* 1368 * Adds an extra ref to allow to drop hw lock after adding sess to the list. 1369 * Caller must put it. 1370 */ 1371 static struct fc_port *qlt_create_sess( 1372 struct scsi_qla_host *vha, 1373 fc_port_t *fcport, 1374 bool local) 1375 { 1376 struct qla_hw_data *ha = vha->hw; 1377 struct fc_port *sess = fcport; 1378 unsigned long flags; 1379 1380 if (vha->vha_tgt.qla_tgt->tgt_stop) 1381 return NULL; 1382 1383 if (fcport->se_sess) { 1384 if (!kref_get_unless_zero(&sess->sess_kref)) { 1385 ql_dbg(ql_dbg_disc, vha, 0x20f6, 1386 "%s: kref_get_unless_zero failed for %8phC\n", 1387 __func__, sess->port_name); 1388 return NULL; 1389 } 1390 return fcport; 1391 } 1392 sess->tgt = vha->vha_tgt.qla_tgt; 1393 sess->local = local; 1394 1395 /* 1396 * Under normal circumstances we want to logout from firmware when 1397 * session eventually ends and release corresponding nport handle. 1398 * In the exception cases (e.g. when new PLOGI is waiting) corresponding 1399 * code will adjust these flags as necessary. 1400 */ 1401 sess->logout_on_delete = 1; 1402 sess->keep_nport_handle = 0; 1403 sess->logout_completed = 0; 1404 1405 if (ha->tgt.tgt_ops->check_initiator_node_acl(vha, 1406 &fcport->port_name[0], sess) < 0) { 1407 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf015, 1408 "(%d) %8phC check_initiator_node_acl failed\n", 1409 vha->vp_idx, fcport->port_name); 1410 return NULL; 1411 } else { 1412 kref_init(&fcport->sess_kref); 1413 /* 1414 * Take an extra reference to ->sess_kref here to handle 1415 * fc_port access across ->tgt.sess_lock reaquire. 1416 */ 1417 if (!kref_get_unless_zero(&sess->sess_kref)) { 1418 ql_dbg(ql_dbg_disc, vha, 0x20f7, 1419 "%s: kref_get_unless_zero failed for %8phC\n", 1420 __func__, sess->port_name); 1421 return NULL; 1422 } 1423 1424 spin_lock_irqsave(&ha->tgt.sess_lock, flags); 1425 if (!IS_SW_RESV_ADDR(sess->d_id)) 1426 vha->vha_tgt.qla_tgt->sess_count++; 1427 1428 qlt_do_generation_tick(vha, &sess->generation); 1429 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); 1430 } 1431 1432 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf006, 1433 "Adding sess %p se_sess %p to tgt %p sess_count %d\n", 1434 sess, sess->se_sess, vha->vha_tgt.qla_tgt, 1435 vha->vha_tgt.qla_tgt->sess_count); 1436 1437 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04b, 1438 "qla_target(%d): %ssession for wwn %8phC (loop_id %d, " 1439 "s_id %x:%x:%x, confirmed completion %ssupported) added\n", 1440 vha->vp_idx, local ? "local " : "", fcport->port_name, 1441 fcport->loop_id, sess->d_id.b.domain, sess->d_id.b.area, 1442 sess->d_id.b.al_pa, sess->conf_compl_supported ? "" : "not "); 1443 1444 return sess; 1445 } 1446 1447 /* 1448 * max_gen - specifies maximum session generation 1449 * at which this deletion requestion is still valid 1450 */ 1451 void 1452 qlt_fc_port_deleted(struct scsi_qla_host *vha, fc_port_t *fcport, int max_gen) 1453 { 1454 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; 1455 struct fc_port *sess = fcport; 1456 unsigned long flags; 1457 1458 if (!vha->hw->tgt.tgt_ops) 1459 return; 1460 1461 if (!tgt) 1462 return; 1463 1464 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); 1465 if (tgt->tgt_stop) { 1466 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); 1467 return; 1468 } 1469 if (!sess->se_sess) { 1470 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); 1471 return; 1472 } 1473 1474 if (max_gen - sess->generation < 0) { 1475 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); 1476 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf092, 1477 "Ignoring stale deletion request for se_sess %p / sess %p" 1478 " for port %8phC, req_gen %d, sess_gen %d\n", 1479 sess->se_sess, sess, sess->port_name, max_gen, 1480 sess->generation); 1481 return; 1482 } 1483 1484 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf008, "qla_tgt_fc_port_deleted %p", sess); 1485 1486 sess->local = 1; 1487 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); 1488 qlt_schedule_sess_for_deletion(sess); 1489 } 1490 1491 static inline int test_tgt_sess_count(struct qla_tgt *tgt) 1492 { 1493 struct qla_hw_data *ha = tgt->ha; 1494 unsigned long flags; 1495 int res; 1496 /* 1497 * We need to protect against race, when tgt is freed before or 1498 * inside wake_up() 1499 */ 1500 spin_lock_irqsave(&ha->tgt.sess_lock, flags); 1501 ql_dbg(ql_dbg_tgt, tgt->vha, 0xe002, 1502 "tgt %p, sess_count=%d\n", 1503 tgt, tgt->sess_count); 1504 res = (tgt->sess_count == 0); 1505 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); 1506 1507 return res; 1508 } 1509 1510 /* Called by tcm_qla2xxx configfs code */ 1511 int qlt_stop_phase1(struct qla_tgt *tgt) 1512 { 1513 struct scsi_qla_host *vha = tgt->vha; 1514 struct qla_hw_data *ha = tgt->ha; 1515 unsigned long flags; 1516 1517 mutex_lock(&ha->optrom_mutex); 1518 mutex_lock(&qla_tgt_mutex); 1519 1520 if (tgt->tgt_stop || tgt->tgt_stopped) { 1521 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04e, 1522 "Already in tgt->tgt_stop or tgt_stopped state\n"); 1523 mutex_unlock(&qla_tgt_mutex); 1524 mutex_unlock(&ha->optrom_mutex); 1525 return -EPERM; 1526 } 1527 1528 ql_dbg(ql_dbg_tgt_mgt, vha, 0xe003, "Stopping target for host %ld(%p)\n", 1529 vha->host_no, vha); 1530 /* 1531 * Mutex needed to sync with qla_tgt_fc_port_[added,deleted]. 1532 * Lock is needed, because we still can get an incoming packet. 1533 */ 1534 mutex_lock(&vha->vha_tgt.tgt_mutex); 1535 tgt->tgt_stop = 1; 1536 qlt_clear_tgt_db(tgt); 1537 mutex_unlock(&vha->vha_tgt.tgt_mutex); 1538 mutex_unlock(&qla_tgt_mutex); 1539 1540 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf009, 1541 "Waiting for sess works (tgt %p)", tgt); 1542 spin_lock_irqsave(&tgt->sess_work_lock, flags); 1543 do { 1544 spin_unlock_irqrestore(&tgt->sess_work_lock, flags); 1545 flush_work(&tgt->sess_work); 1546 spin_lock_irqsave(&tgt->sess_work_lock, flags); 1547 } while (!list_empty(&tgt->sess_works_list)); 1548 spin_unlock_irqrestore(&tgt->sess_work_lock, flags); 1549 1550 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00a, 1551 "Waiting for tgt %p: sess_count=%d\n", tgt, tgt->sess_count); 1552 1553 wait_event_timeout(tgt->waitQ, test_tgt_sess_count(tgt), 10*HZ); 1554 1555 /* Big hammer */ 1556 if (!ha->flags.host_shutting_down && 1557 (qla_tgt_mode_enabled(vha) || qla_dual_mode_enabled(vha))) 1558 qlt_disable_vha(vha); 1559 1560 /* Wait for sessions to clear out (just in case) */ 1561 wait_event_timeout(tgt->waitQ, test_tgt_sess_count(tgt), 10*HZ); 1562 mutex_unlock(&ha->optrom_mutex); 1563 1564 return 0; 1565 } 1566 EXPORT_SYMBOL(qlt_stop_phase1); 1567 1568 /* Called by tcm_qla2xxx configfs code */ 1569 void qlt_stop_phase2(struct qla_tgt *tgt) 1570 { 1571 scsi_qla_host_t *vha = tgt->vha; 1572 1573 if (tgt->tgt_stopped) { 1574 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04f, 1575 "Already in tgt->tgt_stopped state\n"); 1576 dump_stack(); 1577 return; 1578 } 1579 if (!tgt->tgt_stop) { 1580 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00b, 1581 "%s: phase1 stop is not completed\n", __func__); 1582 dump_stack(); 1583 return; 1584 } 1585 1586 mutex_lock(&tgt->ha->optrom_mutex); 1587 mutex_lock(&vha->vha_tgt.tgt_mutex); 1588 tgt->tgt_stop = 0; 1589 tgt->tgt_stopped = 1; 1590 mutex_unlock(&vha->vha_tgt.tgt_mutex); 1591 mutex_unlock(&tgt->ha->optrom_mutex); 1592 1593 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00c, "Stop of tgt %p finished\n", 1594 tgt); 1595 1596 switch (vha->qlini_mode) { 1597 case QLA2XXX_INI_MODE_EXCLUSIVE: 1598 vha->flags.online = 1; 1599 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 1600 break; 1601 default: 1602 break; 1603 } 1604 } 1605 EXPORT_SYMBOL(qlt_stop_phase2); 1606 1607 /* Called from qlt_remove_target() -> qla2x00_remove_one() */ 1608 static void qlt_release(struct qla_tgt *tgt) 1609 { 1610 scsi_qla_host_t *vha = tgt->vha; 1611 void *node; 1612 u64 key = 0; 1613 u16 i; 1614 struct qla_qpair_hint *h; 1615 struct qla_hw_data *ha = vha->hw; 1616 1617 if (!tgt->tgt_stop && !tgt->tgt_stopped) 1618 qlt_stop_phase1(tgt); 1619 1620 if (!tgt->tgt_stopped) 1621 qlt_stop_phase2(tgt); 1622 1623 for (i = 0; i < vha->hw->max_qpairs + 1; i++) { 1624 unsigned long flags; 1625 1626 h = &tgt->qphints[i]; 1627 if (h->qpair) { 1628 spin_lock_irqsave(h->qpair->qp_lock_ptr, flags); 1629 list_del(&h->hint_elem); 1630 spin_unlock_irqrestore(h->qpair->qp_lock_ptr, flags); 1631 h->qpair = NULL; 1632 } 1633 } 1634 kfree(tgt->qphints); 1635 mutex_lock(&qla_tgt_mutex); 1636 list_del(&vha->vha_tgt.qla_tgt->tgt_list_entry); 1637 mutex_unlock(&qla_tgt_mutex); 1638 1639 btree_for_each_safe64(&tgt->lun_qpair_map, key, node) 1640 btree_remove64(&tgt->lun_qpair_map, key); 1641 1642 btree_destroy64(&tgt->lun_qpair_map); 1643 1644 if (vha->vp_idx) 1645 if (ha->tgt.tgt_ops && 1646 ha->tgt.tgt_ops->remove_target && 1647 vha->vha_tgt.target_lport_ptr) 1648 ha->tgt.tgt_ops->remove_target(vha); 1649 1650 vha->vha_tgt.qla_tgt = NULL; 1651 1652 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00d, 1653 "Release of tgt %p finished\n", tgt); 1654 1655 kfree(tgt); 1656 } 1657 1658 /* ha->hardware_lock supposed to be held on entry */ 1659 static int qlt_sched_sess_work(struct qla_tgt *tgt, int type, 1660 const void *param, unsigned int param_size) 1661 { 1662 struct qla_tgt_sess_work_param *prm; 1663 unsigned long flags; 1664 1665 prm = kzalloc(sizeof(*prm), GFP_ATOMIC); 1666 if (!prm) { 1667 ql_dbg(ql_dbg_tgt_mgt, tgt->vha, 0xf050, 1668 "qla_target(%d): Unable to create session " 1669 "work, command will be refused", 0); 1670 return -ENOMEM; 1671 } 1672 1673 ql_dbg(ql_dbg_tgt_mgt, tgt->vha, 0xf00e, 1674 "Scheduling work (type %d, prm %p)" 1675 " to find session for param %p (size %d, tgt %p)\n", 1676 type, prm, param, param_size, tgt); 1677 1678 prm->type = type; 1679 memcpy(&prm->tm_iocb, param, param_size); 1680 1681 spin_lock_irqsave(&tgt->sess_work_lock, flags); 1682 list_add_tail(&prm->sess_works_list_entry, &tgt->sess_works_list); 1683 spin_unlock_irqrestore(&tgt->sess_work_lock, flags); 1684 1685 schedule_work(&tgt->sess_work); 1686 1687 return 0; 1688 } 1689 1690 /* 1691 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire 1692 */ 1693 static void qlt_send_notify_ack(struct qla_qpair *qpair, 1694 struct imm_ntfy_from_isp *ntfy, 1695 uint32_t add_flags, uint16_t resp_code, int resp_code_valid, 1696 uint16_t srr_flags, uint16_t srr_reject_code, uint8_t srr_explan) 1697 { 1698 struct scsi_qla_host *vha = qpair->vha; 1699 struct qla_hw_data *ha = vha->hw; 1700 request_t *pkt; 1701 struct nack_to_isp *nack; 1702 1703 if (!ha->flags.fw_started) 1704 return; 1705 1706 ql_dbg(ql_dbg_tgt, vha, 0xe004, "Sending NOTIFY_ACK (ha=%p)\n", ha); 1707 1708 pkt = (request_t *)__qla2x00_alloc_iocbs(qpair, NULL); 1709 if (!pkt) { 1710 ql_dbg(ql_dbg_tgt, vha, 0xe049, 1711 "qla_target(%d): %s failed: unable to allocate " 1712 "request packet\n", vha->vp_idx, __func__); 1713 return; 1714 } 1715 1716 if (vha->vha_tgt.qla_tgt != NULL) 1717 vha->vha_tgt.qla_tgt->notify_ack_expected++; 1718 1719 pkt->entry_type = NOTIFY_ACK_TYPE; 1720 pkt->entry_count = 1; 1721 1722 nack = (struct nack_to_isp *)pkt; 1723 nack->ox_id = ntfy->ox_id; 1724 1725 nack->u.isp24.handle = QLA_TGT_SKIP_HANDLE; 1726 nack->u.isp24.nport_handle = ntfy->u.isp24.nport_handle; 1727 if (le16_to_cpu(ntfy->u.isp24.status) == IMM_NTFY_ELS) { 1728 nack->u.isp24.flags = ntfy->u.isp24.flags & 1729 cpu_to_le16(NOTIFY24XX_FLAGS_PUREX_IOCB); 1730 } 1731 nack->u.isp24.srr_rx_id = ntfy->u.isp24.srr_rx_id; 1732 nack->u.isp24.status = ntfy->u.isp24.status; 1733 nack->u.isp24.status_subcode = ntfy->u.isp24.status_subcode; 1734 nack->u.isp24.fw_handle = ntfy->u.isp24.fw_handle; 1735 nack->u.isp24.exchange_address = ntfy->u.isp24.exchange_address; 1736 nack->u.isp24.srr_rel_offs = ntfy->u.isp24.srr_rel_offs; 1737 nack->u.isp24.srr_ui = ntfy->u.isp24.srr_ui; 1738 nack->u.isp24.srr_flags = cpu_to_le16(srr_flags); 1739 nack->u.isp24.srr_reject_code = srr_reject_code; 1740 nack->u.isp24.srr_reject_code_expl = srr_explan; 1741 nack->u.isp24.vp_index = ntfy->u.isp24.vp_index; 1742 1743 /* TODO qualify this with EDIF enable */ 1744 if (ntfy->u.isp24.status_subcode == ELS_PLOGI && 1745 (le16_to_cpu(ntfy->u.isp24.flags) & NOTIFY24XX_FLAGS_FCSP)) { 1746 nack->u.isp24.flags |= cpu_to_le16(NOTIFY_ACK_FLAGS_FCSP); 1747 } 1748 1749 ql_dbg(ql_dbg_tgt, vha, 0xe005, 1750 "qla_target(%d): Sending 24xx Notify Ack %d\n", 1751 vha->vp_idx, nack->u.isp24.status); 1752 1753 /* Memory Barrier */ 1754 wmb(); 1755 qla2x00_start_iocbs(vha, qpair->req); 1756 } 1757 1758 static int qlt_build_abts_resp_iocb(struct qla_tgt_mgmt_cmd *mcmd) 1759 { 1760 struct scsi_qla_host *vha = mcmd->vha; 1761 struct qla_hw_data *ha = vha->hw; 1762 struct abts_resp_to_24xx *resp; 1763 __le32 f_ctl; 1764 uint32_t h; 1765 uint8_t *p; 1766 int rc; 1767 struct abts_recv_from_24xx *abts = &mcmd->orig_iocb.abts; 1768 struct qla_qpair *qpair = mcmd->qpair; 1769 1770 ql_dbg(ql_dbg_tgt, vha, 0xe006, 1771 "Sending task mgmt ABTS response (ha=%p, status=%x)\n", 1772 ha, mcmd->fc_tm_rsp); 1773 1774 rc = qlt_check_reserve_free_req(qpair, 1); 1775 if (rc) { 1776 ql_dbg(ql_dbg_tgt, vha, 0xe04a, 1777 "qla_target(%d): %s failed: unable to allocate request packet\n", 1778 vha->vp_idx, __func__); 1779 return -EAGAIN; 1780 } 1781 1782 resp = (struct abts_resp_to_24xx *)qpair->req->ring_ptr; 1783 memset(resp, 0, sizeof(*resp)); 1784 1785 h = qlt_make_handle(qpair); 1786 if (unlikely(h == QLA_TGT_NULL_HANDLE)) { 1787 /* 1788 * CTIO type 7 from the firmware doesn't provide a way to 1789 * know the initiator's LOOP ID, hence we can't find 1790 * the session and, so, the command. 1791 */ 1792 return -EAGAIN; 1793 } else { 1794 qpair->req->outstanding_cmds[h] = (srb_t *)mcmd; 1795 } 1796 1797 resp->handle = make_handle(qpair->req->id, h); 1798 resp->entry_type = ABTS_RESP_24XX; 1799 resp->entry_count = 1; 1800 resp->nport_handle = abts->nport_handle; 1801 resp->vp_index = vha->vp_idx; 1802 resp->sof_type = abts->sof_type; 1803 resp->exchange_address = abts->exchange_address; 1804 resp->fcp_hdr_le = abts->fcp_hdr_le; 1805 f_ctl = cpu_to_le32(F_CTL_EXCH_CONTEXT_RESP | 1806 F_CTL_LAST_SEQ | F_CTL_END_SEQ | 1807 F_CTL_SEQ_INITIATIVE); 1808 p = (uint8_t *)&f_ctl; 1809 resp->fcp_hdr_le.f_ctl[0] = *p++; 1810 resp->fcp_hdr_le.f_ctl[1] = *p++; 1811 resp->fcp_hdr_le.f_ctl[2] = *p; 1812 1813 resp->fcp_hdr_le.d_id = abts->fcp_hdr_le.s_id; 1814 resp->fcp_hdr_le.s_id = abts->fcp_hdr_le.d_id; 1815 1816 resp->exchange_addr_to_abort = abts->exchange_addr_to_abort; 1817 if (mcmd->fc_tm_rsp == FCP_TMF_CMPL) { 1818 resp->fcp_hdr_le.r_ctl = R_CTL_BASIC_LINK_SERV | R_CTL_B_ACC; 1819 resp->payload.ba_acct.seq_id_valid = SEQ_ID_INVALID; 1820 resp->payload.ba_acct.low_seq_cnt = 0x0000; 1821 resp->payload.ba_acct.high_seq_cnt = cpu_to_le16(0xFFFF); 1822 resp->payload.ba_acct.ox_id = abts->fcp_hdr_le.ox_id; 1823 resp->payload.ba_acct.rx_id = abts->fcp_hdr_le.rx_id; 1824 } else { 1825 resp->fcp_hdr_le.r_ctl = R_CTL_BASIC_LINK_SERV | R_CTL_B_RJT; 1826 resp->payload.ba_rjt.reason_code = 1827 BA_RJT_REASON_CODE_UNABLE_TO_PERFORM; 1828 /* Other bytes are zero */ 1829 } 1830 1831 vha->vha_tgt.qla_tgt->abts_resp_expected++; 1832 1833 /* Memory Barrier */ 1834 wmb(); 1835 if (qpair->reqq_start_iocbs) 1836 qpair->reqq_start_iocbs(qpair); 1837 else 1838 qla2x00_start_iocbs(vha, qpair->req); 1839 1840 return rc; 1841 } 1842 1843 /* 1844 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire 1845 */ 1846 static void qlt_24xx_send_abts_resp(struct qla_qpair *qpair, 1847 struct abts_recv_from_24xx *abts, uint32_t status, 1848 bool ids_reversed) 1849 { 1850 struct scsi_qla_host *vha = qpair->vha; 1851 struct qla_hw_data *ha = vha->hw; 1852 struct abts_resp_to_24xx *resp; 1853 __le32 f_ctl; 1854 uint8_t *p; 1855 1856 ql_dbg(ql_dbg_tgt, vha, 0xe006, 1857 "Sending task mgmt ABTS response (ha=%p, atio=%p, status=%x\n", 1858 ha, abts, status); 1859 1860 resp = (struct abts_resp_to_24xx *)qla2x00_alloc_iocbs_ready(qpair, 1861 NULL); 1862 if (!resp) { 1863 ql_dbg(ql_dbg_tgt, vha, 0xe04a, 1864 "qla_target(%d): %s failed: unable to allocate " 1865 "request packet", vha->vp_idx, __func__); 1866 return; 1867 } 1868 1869 resp->entry_type = ABTS_RESP_24XX; 1870 resp->handle = QLA_TGT_SKIP_HANDLE; 1871 resp->entry_count = 1; 1872 resp->nport_handle = abts->nport_handle; 1873 resp->vp_index = vha->vp_idx; 1874 resp->sof_type = abts->sof_type; 1875 resp->exchange_address = abts->exchange_address; 1876 resp->fcp_hdr_le = abts->fcp_hdr_le; 1877 f_ctl = cpu_to_le32(F_CTL_EXCH_CONTEXT_RESP | 1878 F_CTL_LAST_SEQ | F_CTL_END_SEQ | 1879 F_CTL_SEQ_INITIATIVE); 1880 p = (uint8_t *)&f_ctl; 1881 resp->fcp_hdr_le.f_ctl[0] = *p++; 1882 resp->fcp_hdr_le.f_ctl[1] = *p++; 1883 resp->fcp_hdr_le.f_ctl[2] = *p; 1884 if (ids_reversed) { 1885 resp->fcp_hdr_le.d_id = abts->fcp_hdr_le.d_id; 1886 resp->fcp_hdr_le.s_id = abts->fcp_hdr_le.s_id; 1887 } else { 1888 resp->fcp_hdr_le.d_id = abts->fcp_hdr_le.s_id; 1889 resp->fcp_hdr_le.s_id = abts->fcp_hdr_le.d_id; 1890 } 1891 resp->exchange_addr_to_abort = abts->exchange_addr_to_abort; 1892 if (status == FCP_TMF_CMPL) { 1893 resp->fcp_hdr_le.r_ctl = R_CTL_BASIC_LINK_SERV | R_CTL_B_ACC; 1894 resp->payload.ba_acct.seq_id_valid = SEQ_ID_INVALID; 1895 resp->payload.ba_acct.low_seq_cnt = 0x0000; 1896 resp->payload.ba_acct.high_seq_cnt = cpu_to_le16(0xFFFF); 1897 resp->payload.ba_acct.ox_id = abts->fcp_hdr_le.ox_id; 1898 resp->payload.ba_acct.rx_id = abts->fcp_hdr_le.rx_id; 1899 } else { 1900 resp->fcp_hdr_le.r_ctl = R_CTL_BASIC_LINK_SERV | R_CTL_B_RJT; 1901 resp->payload.ba_rjt.reason_code = 1902 BA_RJT_REASON_CODE_UNABLE_TO_PERFORM; 1903 /* Other bytes are zero */ 1904 } 1905 1906 vha->vha_tgt.qla_tgt->abts_resp_expected++; 1907 1908 /* Memory Barrier */ 1909 wmb(); 1910 if (qpair->reqq_start_iocbs) 1911 qpair->reqq_start_iocbs(qpair); 1912 else 1913 qla2x00_start_iocbs(vha, qpair->req); 1914 } 1915 1916 /* 1917 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire 1918 */ 1919 static void qlt_24xx_retry_term_exchange(struct scsi_qla_host *vha, 1920 struct qla_qpair *qpair, response_t *pkt, struct qla_tgt_mgmt_cmd *mcmd) 1921 { 1922 struct ctio7_to_24xx *ctio; 1923 u16 tmp; 1924 struct abts_recv_from_24xx *entry; 1925 1926 ctio = (struct ctio7_to_24xx *)qla2x00_alloc_iocbs_ready(qpair, NULL); 1927 if (ctio == NULL) { 1928 ql_dbg(ql_dbg_tgt, vha, 0xe04b, 1929 "qla_target(%d): %s failed: unable to allocate " 1930 "request packet\n", vha->vp_idx, __func__); 1931 return; 1932 } 1933 1934 if (mcmd) 1935 /* abts from remote port */ 1936 entry = &mcmd->orig_iocb.abts; 1937 else 1938 /* abts from this driver. */ 1939 entry = (struct abts_recv_from_24xx *)pkt; 1940 1941 /* 1942 * We've got on entrance firmware's response on by us generated 1943 * ABTS response. So, in it ID fields are reversed. 1944 */ 1945 1946 ctio->entry_type = CTIO_TYPE7; 1947 ctio->entry_count = 1; 1948 ctio->nport_handle = entry->nport_handle; 1949 ctio->handle = QLA_TGT_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK; 1950 ctio->timeout = cpu_to_le16(QLA_TGT_TIMEOUT); 1951 ctio->vp_index = vha->vp_idx; 1952 ctio->exchange_addr = entry->exchange_addr_to_abort; 1953 tmp = (CTIO7_FLAGS_STATUS_MODE_1 | CTIO7_FLAGS_TERMINATE); 1954 1955 if (mcmd) { 1956 ctio->initiator_id = entry->fcp_hdr_le.s_id; 1957 1958 if (mcmd->flags & QLA24XX_MGMT_ABORT_IO_ATTR_VALID) 1959 tmp |= (mcmd->abort_io_attr << 9); 1960 else if (qpair->retry_term_cnt & 1) 1961 tmp |= (0x4 << 9); 1962 } else { 1963 ctio->initiator_id = entry->fcp_hdr_le.d_id; 1964 1965 if (qpair->retry_term_cnt & 1) 1966 tmp |= (0x4 << 9); 1967 } 1968 ctio->u.status1.flags = cpu_to_le16(tmp); 1969 ctio->u.status1.ox_id = entry->fcp_hdr_le.ox_id; 1970 1971 ql_dbg(ql_dbg_tgt, vha, 0xe007, 1972 "Sending retry TERM EXCH CTIO7 flags %04xh oxid %04xh attr valid %x\n", 1973 le16_to_cpu(ctio->u.status1.flags), 1974 le16_to_cpu(ctio->u.status1.ox_id), 1975 (mcmd && mcmd->flags & QLA24XX_MGMT_ABORT_IO_ATTR_VALID) ? 1 : 0); 1976 1977 /* Memory Barrier */ 1978 wmb(); 1979 if (qpair->reqq_start_iocbs) 1980 qpair->reqq_start_iocbs(qpair); 1981 else 1982 qla2x00_start_iocbs(vha, qpair->req); 1983 1984 if (mcmd) 1985 qlt_build_abts_resp_iocb(mcmd); 1986 else 1987 qlt_24xx_send_abts_resp(qpair, 1988 (struct abts_recv_from_24xx *)entry, FCP_TMF_CMPL, true); 1989 1990 } 1991 1992 /* drop cmds for the given lun 1993 * XXX only looks for cmds on the port through which lun reset was recieved 1994 * XXX does not go through the list of other port (which may have cmds 1995 * for the same lun) 1996 */ 1997 static void abort_cmds_for_lun(struct scsi_qla_host *vha, u64 lun, be_id_t s_id) 1998 { 1999 struct qla_tgt_sess_op *op; 2000 struct qla_tgt_cmd *cmd; 2001 uint32_t key; 2002 unsigned long flags; 2003 2004 key = sid_to_key(s_id); 2005 spin_lock_irqsave(&vha->cmd_list_lock, flags); 2006 list_for_each_entry(op, &vha->unknown_atio_list, cmd_list) { 2007 uint32_t op_key; 2008 u64 op_lun; 2009 2010 op_key = sid_to_key(op->atio.u.isp24.fcp_hdr.s_id); 2011 op_lun = scsilun_to_int( 2012 (struct scsi_lun *)&op->atio.u.isp24.fcp_cmnd.lun); 2013 if (op_key == key && op_lun == lun) 2014 op->aborted = true; 2015 } 2016 2017 list_for_each_entry(cmd, &vha->qla_cmd_list, cmd_list) { 2018 uint32_t cmd_key; 2019 u64 cmd_lun; 2020 2021 cmd_key = sid_to_key(cmd->atio.u.isp24.fcp_hdr.s_id); 2022 cmd_lun = scsilun_to_int( 2023 (struct scsi_lun *)&cmd->atio.u.isp24.fcp_cmnd.lun); 2024 if (cmd_key == key && cmd_lun == lun) 2025 cmd->aborted = 1; 2026 } 2027 spin_unlock_irqrestore(&vha->cmd_list_lock, flags); 2028 } 2029 2030 static struct qla_qpair_hint *qlt_find_qphint(struct scsi_qla_host *vha, 2031 uint64_t unpacked_lun) 2032 { 2033 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; 2034 struct qla_qpair_hint *h = NULL; 2035 2036 if (vha->flags.qpairs_available) { 2037 h = btree_lookup64(&tgt->lun_qpair_map, unpacked_lun); 2038 if (!h) 2039 h = &tgt->qphints[0]; 2040 } else { 2041 h = &tgt->qphints[0]; 2042 } 2043 2044 return h; 2045 } 2046 2047 static void qlt_do_tmr_work(struct work_struct *work) 2048 { 2049 struct qla_tgt_mgmt_cmd *mcmd = 2050 container_of(work, struct qla_tgt_mgmt_cmd, work); 2051 struct qla_hw_data *ha = mcmd->vha->hw; 2052 int rc; 2053 uint32_t tag; 2054 unsigned long flags; 2055 2056 switch (mcmd->tmr_func) { 2057 case QLA_TGT_ABTS: 2058 tag = le32_to_cpu(mcmd->orig_iocb.abts.exchange_addr_to_abort); 2059 break; 2060 default: 2061 tag = 0; 2062 break; 2063 } 2064 2065 rc = ha->tgt.tgt_ops->handle_tmr(mcmd, mcmd->unpacked_lun, 2066 mcmd->tmr_func, tag); 2067 2068 if (rc != 0) { 2069 spin_lock_irqsave(mcmd->qpair->qp_lock_ptr, flags); 2070 switch (mcmd->tmr_func) { 2071 case QLA_TGT_ABTS: 2072 mcmd->fc_tm_rsp = FCP_TMF_REJECTED; 2073 qlt_build_abts_resp_iocb(mcmd); 2074 break; 2075 case QLA_TGT_LUN_RESET: 2076 case QLA_TGT_CLEAR_TS: 2077 case QLA_TGT_ABORT_TS: 2078 case QLA_TGT_CLEAR_ACA: 2079 case QLA_TGT_TARGET_RESET: 2080 qlt_send_busy(mcmd->qpair, &mcmd->orig_iocb.atio, 2081 qla_sam_status); 2082 break; 2083 2084 case QLA_TGT_ABORT_ALL: 2085 case QLA_TGT_NEXUS_LOSS_SESS: 2086 case QLA_TGT_NEXUS_LOSS: 2087 qlt_send_notify_ack(mcmd->qpair, 2088 &mcmd->orig_iocb.imm_ntfy, 0, 0, 0, 0, 0, 0); 2089 break; 2090 } 2091 spin_unlock_irqrestore(mcmd->qpair->qp_lock_ptr, flags); 2092 2093 ql_dbg(ql_dbg_tgt_mgt, mcmd->vha, 0xf052, 2094 "qla_target(%d): tgt_ops->handle_tmr() failed: %d\n", 2095 mcmd->vha->vp_idx, rc); 2096 mempool_free(mcmd, qla_tgt_mgmt_cmd_mempool); 2097 } 2098 } 2099 2100 /* ha->hardware_lock supposed to be held on entry */ 2101 static int __qlt_24xx_handle_abts(struct scsi_qla_host *vha, 2102 struct abts_recv_from_24xx *abts, struct fc_port *sess) 2103 { 2104 struct qla_hw_data *ha = vha->hw; 2105 struct qla_tgt_mgmt_cmd *mcmd; 2106 struct qla_qpair_hint *h = &vha->vha_tgt.qla_tgt->qphints[0]; 2107 struct qla_tgt_cmd *abort_cmd; 2108 2109 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00f, 2110 "qla_target(%d): task abort (tag=%d)\n", 2111 vha->vp_idx, abts->exchange_addr_to_abort); 2112 2113 mcmd = mempool_alloc(qla_tgt_mgmt_cmd_mempool, GFP_ATOMIC); 2114 if (mcmd == NULL) { 2115 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf051, 2116 "qla_target(%d): %s: Allocation of ABORT cmd failed", 2117 vha->vp_idx, __func__); 2118 return -ENOMEM; 2119 } 2120 memset(mcmd, 0, sizeof(*mcmd)); 2121 mcmd->cmd_type = TYPE_TGT_TMCMD; 2122 mcmd->sess = sess; 2123 memcpy(&mcmd->orig_iocb.abts, abts, sizeof(mcmd->orig_iocb.abts)); 2124 mcmd->reset_count = ha->base_qpair->chip_reset; 2125 mcmd->tmr_func = QLA_TGT_ABTS; 2126 mcmd->qpair = h->qpair; 2127 mcmd->vha = vha; 2128 2129 /* 2130 * LUN is looked up by target-core internally based on the passed 2131 * abts->exchange_addr_to_abort tag. 2132 */ 2133 mcmd->se_cmd.cpuid = h->cpuid; 2134 2135 abort_cmd = ha->tgt.tgt_ops->find_cmd_by_tag(sess, 2136 le32_to_cpu(abts->exchange_addr_to_abort)); 2137 if (!abort_cmd) { 2138 mempool_free(mcmd, qla_tgt_mgmt_cmd_mempool); 2139 return -EIO; 2140 } 2141 mcmd->unpacked_lun = abort_cmd->se_cmd.orig_fe_lun; 2142 2143 if (abort_cmd->qpair) { 2144 mcmd->qpair = abort_cmd->qpair; 2145 mcmd->se_cmd.cpuid = abort_cmd->se_cmd.cpuid; 2146 mcmd->abort_io_attr = abort_cmd->atio.u.isp24.attr; 2147 mcmd->flags = QLA24XX_MGMT_ABORT_IO_ATTR_VALID; 2148 } 2149 2150 INIT_WORK(&mcmd->work, qlt_do_tmr_work); 2151 queue_work_on(mcmd->se_cmd.cpuid, qla_tgt_wq, &mcmd->work); 2152 2153 return 0; 2154 } 2155 2156 /* 2157 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire 2158 */ 2159 static void qlt_24xx_handle_abts(struct scsi_qla_host *vha, 2160 struct abts_recv_from_24xx *abts) 2161 { 2162 struct qla_hw_data *ha = vha->hw; 2163 struct fc_port *sess; 2164 uint32_t tag = le32_to_cpu(abts->exchange_addr_to_abort); 2165 be_id_t s_id; 2166 int rc; 2167 unsigned long flags; 2168 2169 if (le32_to_cpu(abts->fcp_hdr_le.parameter) & ABTS_PARAM_ABORT_SEQ) { 2170 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf053, 2171 "qla_target(%d): ABTS: Abort Sequence not " 2172 "supported\n", vha->vp_idx); 2173 qlt_24xx_send_abts_resp(ha->base_qpair, abts, FCP_TMF_REJECTED, 2174 false); 2175 return; 2176 } 2177 2178 if (tag == ATIO_EXCHANGE_ADDRESS_UNKNOWN) { 2179 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf010, 2180 "qla_target(%d): ABTS: Unknown Exchange " 2181 "Address received\n", vha->vp_idx); 2182 qlt_24xx_send_abts_resp(ha->base_qpair, abts, FCP_TMF_REJECTED, 2183 false); 2184 return; 2185 } 2186 2187 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf011, 2188 "qla_target(%d): task abort (s_id=%x:%x:%x, " 2189 "tag=%d, param=%x)\n", vha->vp_idx, abts->fcp_hdr_le.s_id.domain, 2190 abts->fcp_hdr_le.s_id.area, abts->fcp_hdr_le.s_id.al_pa, tag, 2191 le32_to_cpu(abts->fcp_hdr_le.parameter)); 2192 2193 s_id = le_id_to_be(abts->fcp_hdr_le.s_id); 2194 2195 spin_lock_irqsave(&ha->tgt.sess_lock, flags); 2196 sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, s_id); 2197 if (!sess) { 2198 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf012, 2199 "qla_target(%d): task abort for non-existent session\n", 2200 vha->vp_idx); 2201 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); 2202 2203 qlt_24xx_send_abts_resp(ha->base_qpair, abts, FCP_TMF_REJECTED, 2204 false); 2205 return; 2206 } 2207 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); 2208 2209 2210 if (sess->deleted) { 2211 qlt_24xx_send_abts_resp(ha->base_qpair, abts, FCP_TMF_REJECTED, 2212 false); 2213 return; 2214 } 2215 2216 rc = __qlt_24xx_handle_abts(vha, abts, sess); 2217 if (rc != 0) { 2218 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf054, 2219 "qla_target(%d): __qlt_24xx_handle_abts() failed: %d\n", 2220 vha->vp_idx, rc); 2221 qlt_24xx_send_abts_resp(ha->base_qpair, abts, FCP_TMF_REJECTED, 2222 false); 2223 return; 2224 } 2225 } 2226 2227 /* 2228 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire 2229 */ 2230 static void qlt_24xx_send_task_mgmt_ctio(struct qla_qpair *qpair, 2231 struct qla_tgt_mgmt_cmd *mcmd, uint32_t resp_code) 2232 { 2233 struct scsi_qla_host *ha = mcmd->vha; 2234 struct atio_from_isp *atio = &mcmd->orig_iocb.atio; 2235 struct ctio7_to_24xx *ctio; 2236 uint16_t temp; 2237 2238 ql_dbg(ql_dbg_tgt, ha, 0xe008, 2239 "Sending task mgmt CTIO7 (ha=%p, atio=%p, resp_code=%x\n", 2240 ha, atio, resp_code); 2241 2242 2243 ctio = (struct ctio7_to_24xx *)__qla2x00_alloc_iocbs(qpair, NULL); 2244 if (ctio == NULL) { 2245 ql_dbg(ql_dbg_tgt, ha, 0xe04c, 2246 "qla_target(%d): %s failed: unable to allocate " 2247 "request packet\n", ha->vp_idx, __func__); 2248 return; 2249 } 2250 2251 ctio->entry_type = CTIO_TYPE7; 2252 ctio->entry_count = 1; 2253 ctio->handle = QLA_TGT_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK; 2254 ctio->nport_handle = cpu_to_le16(mcmd->sess->loop_id); 2255 ctio->timeout = cpu_to_le16(QLA_TGT_TIMEOUT); 2256 ctio->vp_index = ha->vp_idx; 2257 ctio->initiator_id = be_id_to_le(atio->u.isp24.fcp_hdr.s_id); 2258 ctio->exchange_addr = atio->u.isp24.exchange_addr; 2259 temp = (atio->u.isp24.attr << 9)| 2260 CTIO7_FLAGS_STATUS_MODE_1 | CTIO7_FLAGS_SEND_STATUS; 2261 ctio->u.status1.flags = cpu_to_le16(temp); 2262 temp = be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id); 2263 ctio->u.status1.ox_id = cpu_to_le16(temp); 2264 ctio->u.status1.scsi_status = 2265 cpu_to_le16(SS_RESPONSE_INFO_LEN_VALID); 2266 ctio->u.status1.response_len = cpu_to_le16(8); 2267 ctio->u.status1.sense_data[0] = resp_code; 2268 2269 /* Memory Barrier */ 2270 wmb(); 2271 if (qpair->reqq_start_iocbs) 2272 qpair->reqq_start_iocbs(qpair); 2273 else 2274 qla2x00_start_iocbs(ha, qpair->req); 2275 } 2276 2277 void qlt_free_mcmd(struct qla_tgt_mgmt_cmd *mcmd) 2278 { 2279 mempool_free(mcmd, qla_tgt_mgmt_cmd_mempool); 2280 } 2281 EXPORT_SYMBOL(qlt_free_mcmd); 2282 2283 /* 2284 * ha->hardware_lock supposed to be held on entry. Might drop it, then 2285 * reacquire 2286 */ 2287 void qlt_send_resp_ctio(struct qla_qpair *qpair, struct qla_tgt_cmd *cmd, 2288 uint8_t scsi_status, uint8_t sense_key, uint8_t asc, uint8_t ascq) 2289 { 2290 struct atio_from_isp *atio = &cmd->atio; 2291 struct ctio7_to_24xx *ctio; 2292 uint16_t temp; 2293 struct scsi_qla_host *vha = cmd->vha; 2294 2295 ql_dbg(ql_dbg_tgt_dif, vha, 0x3066, 2296 "Sending response CTIO7 (vha=%p, atio=%p, scsi_status=%02x, " 2297 "sense_key=%02x, asc=%02x, ascq=%02x", 2298 vha, atio, scsi_status, sense_key, asc, ascq); 2299 2300 ctio = (struct ctio7_to_24xx *)qla2x00_alloc_iocbs(vha, NULL); 2301 if (!ctio) { 2302 ql_dbg(ql_dbg_async, vha, 0x3067, 2303 "qla2x00t(%ld): %s failed: unable to allocate request packet", 2304 vha->host_no, __func__); 2305 goto out; 2306 } 2307 2308 ctio->entry_type = CTIO_TYPE7; 2309 ctio->entry_count = 1; 2310 ctio->handle = QLA_TGT_SKIP_HANDLE; 2311 ctio->nport_handle = cpu_to_le16(cmd->sess->loop_id); 2312 ctio->timeout = cpu_to_le16(QLA_TGT_TIMEOUT); 2313 ctio->vp_index = vha->vp_idx; 2314 ctio->initiator_id = be_id_to_le(atio->u.isp24.fcp_hdr.s_id); 2315 ctio->exchange_addr = atio->u.isp24.exchange_addr; 2316 temp = (atio->u.isp24.attr << 9) | 2317 CTIO7_FLAGS_STATUS_MODE_1 | CTIO7_FLAGS_SEND_STATUS; 2318 ctio->u.status1.flags = cpu_to_le16(temp); 2319 temp = be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id); 2320 ctio->u.status1.ox_id = cpu_to_le16(temp); 2321 ctio->u.status1.scsi_status = 2322 cpu_to_le16(SS_RESPONSE_INFO_LEN_VALID | scsi_status); 2323 ctio->u.status1.response_len = cpu_to_le16(18); 2324 ctio->u.status1.residual = cpu_to_le32(get_datalen_for_atio(atio)); 2325 2326 if (ctio->u.status1.residual != 0) 2327 ctio->u.status1.scsi_status |= 2328 cpu_to_le16(SS_RESIDUAL_UNDER); 2329 2330 /* Fixed format sense data. */ 2331 ctio->u.status1.sense_data[0] = 0x70; 2332 ctio->u.status1.sense_data[2] = sense_key; 2333 /* Additional sense length */ 2334 ctio->u.status1.sense_data[7] = 0xa; 2335 /* ASC and ASCQ */ 2336 ctio->u.status1.sense_data[12] = asc; 2337 ctio->u.status1.sense_data[13] = ascq; 2338 2339 /* Memory Barrier */ 2340 wmb(); 2341 2342 if (qpair->reqq_start_iocbs) 2343 qpair->reqq_start_iocbs(qpair); 2344 else 2345 qla2x00_start_iocbs(vha, qpair->req); 2346 2347 out: 2348 return; 2349 } 2350 2351 /* callback from target fabric module code */ 2352 void qlt_xmit_tm_rsp(struct qla_tgt_mgmt_cmd *mcmd) 2353 { 2354 struct scsi_qla_host *vha = mcmd->sess->vha; 2355 struct qla_hw_data *ha = vha->hw; 2356 unsigned long flags; 2357 struct qla_qpair *qpair = mcmd->qpair; 2358 bool free_mcmd = true; 2359 2360 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf013, 2361 "TM response mcmd (%p) status %#x state %#x", 2362 mcmd, mcmd->fc_tm_rsp, mcmd->flags); 2363 2364 spin_lock_irqsave(qpair->qp_lock_ptr, flags); 2365 2366 if (!vha->flags.online || mcmd->reset_count != qpair->chip_reset) { 2367 /* 2368 * Either the port is not online or this request was from 2369 * previous life, just abort the processing. 2370 */ 2371 ql_dbg(ql_dbg_async, vha, 0xe100, 2372 "RESET-TMR online/active/old-count/new-count = %d/%d/%d/%d.\n", 2373 vha->flags.online, qla2x00_reset_active(vha), 2374 mcmd->reset_count, qpair->chip_reset); 2375 ha->tgt.tgt_ops->free_mcmd(mcmd); 2376 spin_unlock_irqrestore(qpair->qp_lock_ptr, flags); 2377 return; 2378 } 2379 2380 if (mcmd->flags == QLA24XX_MGMT_SEND_NACK) { 2381 switch (mcmd->orig_iocb.imm_ntfy.u.isp24.status_subcode) { 2382 case ELS_LOGO: 2383 case ELS_PRLO: 2384 case ELS_TPRLO: 2385 ql_dbg(ql_dbg_disc, vha, 0x2106, 2386 "TM response logo %8phC status %#x state %#x", 2387 mcmd->sess->port_name, mcmd->fc_tm_rsp, 2388 mcmd->flags); 2389 qlt_schedule_sess_for_deletion(mcmd->sess); 2390 break; 2391 default: 2392 qlt_send_notify_ack(vha->hw->base_qpair, 2393 &mcmd->orig_iocb.imm_ntfy, 0, 0, 0, 0, 0, 0); 2394 break; 2395 } 2396 } else { 2397 if (mcmd->orig_iocb.atio.u.raw.entry_type == ABTS_RECV_24XX) { 2398 qlt_build_abts_resp_iocb(mcmd); 2399 free_mcmd = false; 2400 } else 2401 qlt_24xx_send_task_mgmt_ctio(qpair, mcmd, 2402 mcmd->fc_tm_rsp); 2403 } 2404 /* 2405 * Make the callback for ->free_mcmd() to queue_work() and invoke 2406 * target_put_sess_cmd() to drop cmd_kref to 1. The final 2407 * target_put_sess_cmd() call will be made from TFO->check_stop_free() 2408 * -> tcm_qla2xxx_check_stop_free() to release the TMR associated se_cmd 2409 * descriptor after TFO->queue_tm_rsp() -> tcm_qla2xxx_queue_tm_rsp() -> 2410 * qlt_xmit_tm_rsp() returns here.. 2411 */ 2412 if (free_mcmd) 2413 ha->tgt.tgt_ops->free_mcmd(mcmd); 2414 2415 spin_unlock_irqrestore(qpair->qp_lock_ptr, flags); 2416 } 2417 EXPORT_SYMBOL(qlt_xmit_tm_rsp); 2418 2419 /* No locks */ 2420 static int qlt_pci_map_calc_cnt(struct qla_tgt_prm *prm) 2421 { 2422 struct qla_tgt_cmd *cmd = prm->cmd; 2423 2424 BUG_ON(cmd->sg_cnt == 0); 2425 2426 prm->sg = (struct scatterlist *)cmd->sg; 2427 prm->seg_cnt = dma_map_sg(&cmd->qpair->pdev->dev, cmd->sg, 2428 cmd->sg_cnt, cmd->dma_data_direction); 2429 if (unlikely(prm->seg_cnt == 0)) 2430 goto out_err; 2431 2432 prm->cmd->sg_mapped = 1; 2433 2434 if (cmd->se_cmd.prot_op == TARGET_PROT_NORMAL) { 2435 /* 2436 * If greater than four sg entries then we need to allocate 2437 * the continuation entries 2438 */ 2439 if (prm->seg_cnt > QLA_TGT_DATASEGS_PER_CMD_24XX) 2440 prm->req_cnt += DIV_ROUND_UP(prm->seg_cnt - 2441 QLA_TGT_DATASEGS_PER_CMD_24XX, 2442 QLA_TGT_DATASEGS_PER_CONT_24XX); 2443 } else { 2444 /* DIF */ 2445 if ((cmd->se_cmd.prot_op == TARGET_PROT_DIN_INSERT) || 2446 (cmd->se_cmd.prot_op == TARGET_PROT_DOUT_STRIP)) { 2447 prm->seg_cnt = DIV_ROUND_UP(cmd->bufflen, cmd->blk_sz); 2448 prm->tot_dsds = prm->seg_cnt; 2449 } else 2450 prm->tot_dsds = prm->seg_cnt; 2451 2452 if (cmd->prot_sg_cnt) { 2453 prm->prot_sg = cmd->prot_sg; 2454 prm->prot_seg_cnt = dma_map_sg(&cmd->qpair->pdev->dev, 2455 cmd->prot_sg, cmd->prot_sg_cnt, 2456 cmd->dma_data_direction); 2457 if (unlikely(prm->prot_seg_cnt == 0)) 2458 goto out_err; 2459 2460 if ((cmd->se_cmd.prot_op == TARGET_PROT_DIN_INSERT) || 2461 (cmd->se_cmd.prot_op == TARGET_PROT_DOUT_STRIP)) { 2462 /* Dif Bundling not support here */ 2463 prm->prot_seg_cnt = DIV_ROUND_UP(cmd->bufflen, 2464 cmd->blk_sz); 2465 prm->tot_dsds += prm->prot_seg_cnt; 2466 } else 2467 prm->tot_dsds += prm->prot_seg_cnt; 2468 } 2469 } 2470 2471 return 0; 2472 2473 out_err: 2474 ql_dbg_qp(ql_dbg_tgt, prm->cmd->qpair, 0xe04d, 2475 "qla_target(%d): PCI mapping failed: sg_cnt=%d", 2476 0, prm->cmd->sg_cnt); 2477 return -1; 2478 } 2479 2480 static void qlt_unmap_sg(struct scsi_qla_host *vha, struct qla_tgt_cmd *cmd) 2481 { 2482 struct qla_hw_data *ha; 2483 struct qla_qpair *qpair; 2484 2485 if (!cmd->sg_mapped) 2486 return; 2487 2488 qpair = cmd->qpair; 2489 2490 dma_unmap_sg(&qpair->pdev->dev, cmd->sg, cmd->sg_cnt, 2491 cmd->dma_data_direction); 2492 cmd->sg_mapped = 0; 2493 2494 if (cmd->prot_sg_cnt) 2495 dma_unmap_sg(&qpair->pdev->dev, cmd->prot_sg, cmd->prot_sg_cnt, 2496 cmd->dma_data_direction); 2497 2498 if (!cmd->ctx) 2499 return; 2500 ha = vha->hw; 2501 if (cmd->ctx_dsd_alloced) 2502 qla2x00_clean_dsd_pool(ha, cmd->ctx); 2503 2504 dma_pool_free(ha->dl_dma_pool, cmd->ctx, cmd->ctx->crc_ctx_dma); 2505 } 2506 2507 static int qlt_check_reserve_free_req(struct qla_qpair *qpair, 2508 uint32_t req_cnt) 2509 { 2510 uint32_t cnt; 2511 struct req_que *req = qpair->req; 2512 2513 if (req->cnt < (req_cnt + 2)) { 2514 cnt = (uint16_t)(qpair->use_shadow_reg ? *req->out_ptr : 2515 rd_reg_dword_relaxed(req->req_q_out)); 2516 2517 if (req->ring_index < cnt) 2518 req->cnt = cnt - req->ring_index; 2519 else 2520 req->cnt = req->length - (req->ring_index - cnt); 2521 2522 if (unlikely(req->cnt < (req_cnt + 2))) 2523 return -EAGAIN; 2524 } 2525 2526 req->cnt -= req_cnt; 2527 2528 return 0; 2529 } 2530 2531 /* 2532 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire 2533 */ 2534 static inline void *qlt_get_req_pkt(struct req_que *req) 2535 { 2536 /* Adjust ring index. */ 2537 req->ring_index++; 2538 if (req->ring_index == req->length) { 2539 req->ring_index = 0; 2540 req->ring_ptr = req->ring; 2541 } else { 2542 req->ring_ptr++; 2543 } 2544 return (cont_entry_t *)req->ring_ptr; 2545 } 2546 2547 /* ha->hardware_lock supposed to be held on entry */ 2548 static inline uint32_t qlt_make_handle(struct qla_qpair *qpair) 2549 { 2550 uint32_t h; 2551 int index; 2552 uint8_t found = 0; 2553 struct req_que *req = qpair->req; 2554 2555 h = req->current_outstanding_cmd; 2556 2557 for (index = 1; index < req->num_outstanding_cmds; index++) { 2558 h++; 2559 if (h == req->num_outstanding_cmds) 2560 h = 1; 2561 2562 if (h == QLA_TGT_SKIP_HANDLE) 2563 continue; 2564 2565 if (!req->outstanding_cmds[h]) { 2566 found = 1; 2567 break; 2568 } 2569 } 2570 2571 if (found) { 2572 req->current_outstanding_cmd = h; 2573 } else { 2574 ql_dbg(ql_dbg_io, qpair->vha, 0x305b, 2575 "qla_target(%d): Ran out of empty cmd slots\n", 2576 qpair->vha->vp_idx); 2577 h = QLA_TGT_NULL_HANDLE; 2578 } 2579 2580 return h; 2581 } 2582 2583 /* ha->hardware_lock supposed to be held on entry */ 2584 static int qlt_24xx_build_ctio_pkt(struct qla_qpair *qpair, 2585 struct qla_tgt_prm *prm) 2586 { 2587 uint32_t h; 2588 struct ctio7_to_24xx *pkt; 2589 struct atio_from_isp *atio = &prm->cmd->atio; 2590 uint16_t temp; 2591 struct qla_tgt_cmd *cmd = prm->cmd; 2592 2593 pkt = (struct ctio7_to_24xx *)qpair->req->ring_ptr; 2594 prm->pkt = pkt; 2595 memset(pkt, 0, sizeof(*pkt)); 2596 2597 pkt->entry_type = CTIO_TYPE7; 2598 pkt->entry_count = (uint8_t)prm->req_cnt; 2599 pkt->vp_index = prm->cmd->vp_idx; 2600 2601 h = qlt_make_handle(qpair); 2602 if (unlikely(h == QLA_TGT_NULL_HANDLE)) { 2603 /* 2604 * CTIO type 7 from the firmware doesn't provide a way to 2605 * know the initiator's LOOP ID, hence we can't find 2606 * the session and, so, the command. 2607 */ 2608 return -EAGAIN; 2609 } else 2610 qpair->req->outstanding_cmds[h] = (srb_t *)prm->cmd; 2611 2612 pkt->handle = make_handle(qpair->req->id, h); 2613 pkt->handle |= CTIO_COMPLETION_HANDLE_MARK; 2614 pkt->nport_handle = cpu_to_le16(prm->cmd->loop_id); 2615 pkt->timeout = cpu_to_le16(QLA_TGT_TIMEOUT); 2616 pkt->initiator_id = be_id_to_le(atio->u.isp24.fcp_hdr.s_id); 2617 pkt->exchange_addr = atio->u.isp24.exchange_addr; 2618 temp = atio->u.isp24.attr << 9; 2619 pkt->u.status0.flags |= cpu_to_le16(temp); 2620 temp = be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id); 2621 pkt->u.status0.ox_id = cpu_to_le16(temp); 2622 pkt->u.status0.relative_offset = cpu_to_le32(prm->cmd->offset); 2623 2624 if (cmd->edif) { 2625 if (cmd->dma_data_direction == DMA_TO_DEVICE) 2626 prm->cmd->sess->edif.rx_bytes += cmd->bufflen; 2627 if (cmd->dma_data_direction == DMA_FROM_DEVICE) 2628 prm->cmd->sess->edif.tx_bytes += cmd->bufflen; 2629 2630 pkt->u.status0.edif_flags |= EF_EN_EDIF; 2631 } 2632 2633 return 0; 2634 } 2635 2636 /* 2637 * ha->hardware_lock supposed to be held on entry. We have already made sure 2638 * that there is sufficient amount of request entries to not drop it. 2639 */ 2640 static void qlt_load_cont_data_segments(struct qla_tgt_prm *prm) 2641 { 2642 int cnt; 2643 struct dsd64 *cur_dsd; 2644 2645 /* Build continuation packets */ 2646 while (prm->seg_cnt > 0) { 2647 cont_a64_entry_t *cont_pkt64 = 2648 (cont_a64_entry_t *)qlt_get_req_pkt( 2649 prm->cmd->qpair->req); 2650 2651 /* 2652 * Make sure that from cont_pkt64 none of 2653 * 64-bit specific fields used for 32-bit 2654 * addressing. Cast to (cont_entry_t *) for 2655 * that. 2656 */ 2657 2658 memset(cont_pkt64, 0, sizeof(*cont_pkt64)); 2659 2660 cont_pkt64->entry_count = 1; 2661 cont_pkt64->sys_define = 0; 2662 2663 cont_pkt64->entry_type = CONTINUE_A64_TYPE; 2664 cur_dsd = cont_pkt64->dsd; 2665 2666 /* Load continuation entry data segments */ 2667 for (cnt = 0; 2668 cnt < QLA_TGT_DATASEGS_PER_CONT_24XX && prm->seg_cnt; 2669 cnt++, prm->seg_cnt--) { 2670 append_dsd64(&cur_dsd, prm->sg); 2671 prm->sg = sg_next(prm->sg); 2672 } 2673 } 2674 } 2675 2676 /* 2677 * ha->hardware_lock supposed to be held on entry. We have already made sure 2678 * that there is sufficient amount of request entries to not drop it. 2679 */ 2680 static void qlt_load_data_segments(struct qla_tgt_prm *prm) 2681 { 2682 int cnt; 2683 struct dsd64 *cur_dsd; 2684 struct ctio7_to_24xx *pkt24 = (struct ctio7_to_24xx *)prm->pkt; 2685 2686 pkt24->u.status0.transfer_length = cpu_to_le32(prm->cmd->bufflen); 2687 2688 /* Setup packet address segment pointer */ 2689 cur_dsd = &pkt24->u.status0.dsd; 2690 2691 /* Set total data segment count */ 2692 if (prm->seg_cnt) 2693 pkt24->dseg_count = cpu_to_le16(prm->seg_cnt); 2694 2695 if (prm->seg_cnt == 0) { 2696 /* No data transfer */ 2697 cur_dsd->address = 0; 2698 cur_dsd->length = 0; 2699 return; 2700 } 2701 2702 /* If scatter gather */ 2703 2704 /* Load command entry data segments */ 2705 for (cnt = 0; 2706 (cnt < QLA_TGT_DATASEGS_PER_CMD_24XX) && prm->seg_cnt; 2707 cnt++, prm->seg_cnt--) { 2708 append_dsd64(&cur_dsd, prm->sg); 2709 prm->sg = sg_next(prm->sg); 2710 } 2711 2712 qlt_load_cont_data_segments(prm); 2713 } 2714 2715 static inline int qlt_has_data(struct qla_tgt_cmd *cmd) 2716 { 2717 return cmd->bufflen > 0; 2718 } 2719 2720 static void qlt_print_dif_err(struct qla_tgt_prm *prm) 2721 { 2722 struct qla_tgt_cmd *cmd; 2723 struct scsi_qla_host *vha; 2724 2725 /* asc 0x10=dif error */ 2726 if (prm->sense_buffer && (prm->sense_buffer[12] == 0x10)) { 2727 cmd = prm->cmd; 2728 vha = cmd->vha; 2729 /* ASCQ */ 2730 switch (prm->sense_buffer[13]) { 2731 case 1: 2732 ql_dbg(ql_dbg_tgt_dif, vha, 0xe00b, 2733 "BE detected Guard TAG ERR: lba[0x%llx|%lld] len[0x%x] " 2734 "se_cmd=%p tag[%x]", 2735 cmd->lba, cmd->lba, cmd->num_blks, &cmd->se_cmd, 2736 cmd->atio.u.isp24.exchange_addr); 2737 break; 2738 case 2: 2739 ql_dbg(ql_dbg_tgt_dif, vha, 0xe00c, 2740 "BE detected APP TAG ERR: lba[0x%llx|%lld] len[0x%x] " 2741 "se_cmd=%p tag[%x]", 2742 cmd->lba, cmd->lba, cmd->num_blks, &cmd->se_cmd, 2743 cmd->atio.u.isp24.exchange_addr); 2744 break; 2745 case 3: 2746 ql_dbg(ql_dbg_tgt_dif, vha, 0xe00f, 2747 "BE detected REF TAG ERR: lba[0x%llx|%lld] len[0x%x] " 2748 "se_cmd=%p tag[%x]", 2749 cmd->lba, cmd->lba, cmd->num_blks, &cmd->se_cmd, 2750 cmd->atio.u.isp24.exchange_addr); 2751 break; 2752 default: 2753 ql_dbg(ql_dbg_tgt_dif, vha, 0xe010, 2754 "BE detected Dif ERR: lba[%llx|%lld] len[%x] " 2755 "se_cmd=%p tag[%x]", 2756 cmd->lba, cmd->lba, cmd->num_blks, &cmd->se_cmd, 2757 cmd->atio.u.isp24.exchange_addr); 2758 break; 2759 } 2760 ql_dump_buffer(ql_dbg_tgt_dif, vha, 0xe011, cmd->cdb, 16); 2761 } 2762 } 2763 2764 /* 2765 * Called without ha->hardware_lock held 2766 */ 2767 static int qlt_pre_xmit_response(struct qla_tgt_cmd *cmd, 2768 struct qla_tgt_prm *prm, int xmit_type, uint8_t scsi_status, 2769 uint32_t *full_req_cnt) 2770 { 2771 struct se_cmd *se_cmd = &cmd->se_cmd; 2772 struct qla_qpair *qpair = cmd->qpair; 2773 2774 prm->cmd = cmd; 2775 prm->tgt = cmd->tgt; 2776 prm->pkt = NULL; 2777 prm->rq_result = scsi_status; 2778 prm->sense_buffer = &cmd->sense_buffer[0]; 2779 prm->sense_buffer_len = TRANSPORT_SENSE_BUFFER; 2780 prm->sg = NULL; 2781 prm->seg_cnt = -1; 2782 prm->req_cnt = 1; 2783 prm->residual = 0; 2784 prm->add_status_pkt = 0; 2785 prm->prot_sg = NULL; 2786 prm->prot_seg_cnt = 0; 2787 prm->tot_dsds = 0; 2788 2789 if ((xmit_type & QLA_TGT_XMIT_DATA) && qlt_has_data(cmd)) { 2790 if (qlt_pci_map_calc_cnt(prm) != 0) 2791 return -EAGAIN; 2792 } 2793 2794 *full_req_cnt = prm->req_cnt; 2795 2796 if (se_cmd->se_cmd_flags & SCF_UNDERFLOW_BIT) { 2797 prm->residual = se_cmd->residual_count; 2798 ql_dbg_qp(ql_dbg_io + ql_dbg_verbose, qpair, 0x305c, 2799 "Residual underflow: %d (tag %lld, op %x, bufflen %d, rq_result %x)\n", 2800 prm->residual, se_cmd->tag, 2801 se_cmd->t_task_cdb ? se_cmd->t_task_cdb[0] : 0, 2802 cmd->bufflen, prm->rq_result); 2803 prm->rq_result |= SS_RESIDUAL_UNDER; 2804 } else if (se_cmd->se_cmd_flags & SCF_OVERFLOW_BIT) { 2805 prm->residual = se_cmd->residual_count; 2806 ql_dbg_qp(ql_dbg_io, qpair, 0x305d, 2807 "Residual overflow: %d (tag %lld, op %x, bufflen %d, rq_result %x)\n", 2808 prm->residual, se_cmd->tag, se_cmd->t_task_cdb ? 2809 se_cmd->t_task_cdb[0] : 0, cmd->bufflen, prm->rq_result); 2810 prm->rq_result |= SS_RESIDUAL_OVER; 2811 } 2812 2813 if (xmit_type & QLA_TGT_XMIT_STATUS) { 2814 /* 2815 * If QLA_TGT_XMIT_DATA is not set, add_status_pkt will be 2816 * ignored in *xmit_response() below 2817 */ 2818 if (qlt_has_data(cmd)) { 2819 if (QLA_TGT_SENSE_VALID(prm->sense_buffer) || 2820 (IS_FWI2_CAPABLE(cmd->vha->hw) && 2821 (prm->rq_result != 0))) { 2822 prm->add_status_pkt = 1; 2823 (*full_req_cnt)++; 2824 } 2825 } 2826 } 2827 2828 return 0; 2829 } 2830 2831 static inline int qlt_need_explicit_conf(struct qla_tgt_cmd *cmd, 2832 int sending_sense) 2833 { 2834 if (cmd->qpair->enable_class_2) 2835 return 0; 2836 2837 if (sending_sense) 2838 return cmd->conf_compl_supported; 2839 else 2840 return cmd->qpair->enable_explicit_conf && 2841 cmd->conf_compl_supported; 2842 } 2843 2844 static void qlt_24xx_init_ctio_to_isp(struct ctio7_to_24xx *ctio, 2845 struct qla_tgt_prm *prm) 2846 { 2847 prm->sense_buffer_len = min_t(uint32_t, prm->sense_buffer_len, 2848 (uint32_t)sizeof(ctio->u.status1.sense_data)); 2849 ctio->u.status0.flags |= cpu_to_le16(CTIO7_FLAGS_SEND_STATUS); 2850 if (qlt_need_explicit_conf(prm->cmd, 0)) { 2851 ctio->u.status0.flags |= cpu_to_le16( 2852 CTIO7_FLAGS_EXPLICIT_CONFORM | 2853 CTIO7_FLAGS_CONFORM_REQ); 2854 } 2855 ctio->u.status0.residual = cpu_to_le32(prm->residual); 2856 ctio->u.status0.scsi_status = cpu_to_le16(prm->rq_result); 2857 if (QLA_TGT_SENSE_VALID(prm->sense_buffer)) { 2858 int i; 2859 2860 if (qlt_need_explicit_conf(prm->cmd, 1)) { 2861 if ((prm->rq_result & SS_SCSI_STATUS_BYTE) != 0) { 2862 ql_dbg_qp(ql_dbg_tgt, prm->cmd->qpair, 0xe017, 2863 "Skipping EXPLICIT_CONFORM and " 2864 "CTIO7_FLAGS_CONFORM_REQ for FCP READ w/ " 2865 "non GOOD status\n"); 2866 goto skip_explict_conf; 2867 } 2868 ctio->u.status1.flags |= cpu_to_le16( 2869 CTIO7_FLAGS_EXPLICIT_CONFORM | 2870 CTIO7_FLAGS_CONFORM_REQ); 2871 } 2872 skip_explict_conf: 2873 ctio->u.status1.flags &= 2874 ~cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_0); 2875 ctio->u.status1.flags |= 2876 cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1); 2877 ctio->u.status1.scsi_status |= 2878 cpu_to_le16(SS_SENSE_LEN_VALID); 2879 ctio->u.status1.sense_length = 2880 cpu_to_le16(prm->sense_buffer_len); 2881 for (i = 0; i < prm->sense_buffer_len/4; i++) { 2882 uint32_t v; 2883 2884 v = get_unaligned_be32( 2885 &((uint32_t *)prm->sense_buffer)[i]); 2886 put_unaligned_le32(v, 2887 &((uint32_t *)ctio->u.status1.sense_data)[i]); 2888 } 2889 qlt_print_dif_err(prm); 2890 2891 } else { 2892 ctio->u.status1.flags &= 2893 ~cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_0); 2894 ctio->u.status1.flags |= 2895 cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1); 2896 ctio->u.status1.sense_length = 0; 2897 memset(ctio->u.status1.sense_data, 0, 2898 sizeof(ctio->u.status1.sense_data)); 2899 } 2900 2901 /* Sense with len > 24, is it possible ??? */ 2902 } 2903 2904 static inline int 2905 qlt_hba_err_chk_enabled(struct se_cmd *se_cmd) 2906 { 2907 switch (se_cmd->prot_op) { 2908 case TARGET_PROT_DOUT_INSERT: 2909 case TARGET_PROT_DIN_STRIP: 2910 if (ql2xenablehba_err_chk >= 1) 2911 return 1; 2912 break; 2913 case TARGET_PROT_DOUT_PASS: 2914 case TARGET_PROT_DIN_PASS: 2915 if (ql2xenablehba_err_chk >= 2) 2916 return 1; 2917 break; 2918 case TARGET_PROT_DIN_INSERT: 2919 case TARGET_PROT_DOUT_STRIP: 2920 return 1; 2921 default: 2922 break; 2923 } 2924 return 0; 2925 } 2926 2927 static inline int 2928 qla_tgt_ref_mask_check(struct se_cmd *se_cmd) 2929 { 2930 switch (se_cmd->prot_op) { 2931 case TARGET_PROT_DIN_INSERT: 2932 case TARGET_PROT_DOUT_INSERT: 2933 case TARGET_PROT_DIN_STRIP: 2934 case TARGET_PROT_DOUT_STRIP: 2935 case TARGET_PROT_DIN_PASS: 2936 case TARGET_PROT_DOUT_PASS: 2937 return 1; 2938 default: 2939 return 0; 2940 } 2941 return 0; 2942 } 2943 2944 /* 2945 * qla_tgt_set_dif_tags - Extract Ref and App tags from SCSI command 2946 */ 2947 static void 2948 qla_tgt_set_dif_tags(struct qla_tgt_cmd *cmd, struct crc_context *ctx, 2949 uint16_t *pfw_prot_opts) 2950 { 2951 struct se_cmd *se_cmd = &cmd->se_cmd; 2952 uint32_t lba = 0xffffffff & se_cmd->t_task_lba; 2953 scsi_qla_host_t *vha = cmd->tgt->vha; 2954 struct qla_hw_data *ha = vha->hw; 2955 uint32_t t32 = 0; 2956 2957 /* 2958 * wait till Mode Sense/Select cmd, modepage Ah, subpage 2 2959 * have been immplemented by TCM, before AppTag is avail. 2960 * Look for modesense_handlers[] 2961 */ 2962 ctx->app_tag = 0; 2963 ctx->app_tag_mask[0] = 0x0; 2964 ctx->app_tag_mask[1] = 0x0; 2965 2966 if (IS_PI_UNINIT_CAPABLE(ha)) { 2967 if ((se_cmd->prot_type == TARGET_DIF_TYPE1_PROT) || 2968 (se_cmd->prot_type == TARGET_DIF_TYPE2_PROT)) 2969 *pfw_prot_opts |= PO_DIS_VALD_APP_ESC; 2970 else if (se_cmd->prot_type == TARGET_DIF_TYPE3_PROT) 2971 *pfw_prot_opts |= PO_DIS_VALD_APP_REF_ESC; 2972 } 2973 2974 t32 = ha->tgt.tgt_ops->get_dif_tags(cmd, pfw_prot_opts); 2975 2976 switch (se_cmd->prot_type) { 2977 case TARGET_DIF_TYPE0_PROT: 2978 /* 2979 * No check for ql2xenablehba_err_chk, as it 2980 * would be an I/O error if hba tag generation 2981 * is not done. 2982 */ 2983 ctx->ref_tag = cpu_to_le32(lba); 2984 /* enable ALL bytes of the ref tag */ 2985 ctx->ref_tag_mask[0] = 0xff; 2986 ctx->ref_tag_mask[1] = 0xff; 2987 ctx->ref_tag_mask[2] = 0xff; 2988 ctx->ref_tag_mask[3] = 0xff; 2989 break; 2990 case TARGET_DIF_TYPE1_PROT: 2991 /* 2992 * For TYPE 1 protection: 16 bit GUARD tag, 32 bit 2993 * REF tag, and 16 bit app tag. 2994 */ 2995 ctx->ref_tag = cpu_to_le32(lba); 2996 if (!qla_tgt_ref_mask_check(se_cmd) || 2997 !(ha->tgt.tgt_ops->chk_dif_tags(t32))) { 2998 *pfw_prot_opts |= PO_DIS_REF_TAG_VALD; 2999 break; 3000 } 3001 /* enable ALL bytes of the ref tag */ 3002 ctx->ref_tag_mask[0] = 0xff; 3003 ctx->ref_tag_mask[1] = 0xff; 3004 ctx->ref_tag_mask[2] = 0xff; 3005 ctx->ref_tag_mask[3] = 0xff; 3006 break; 3007 case TARGET_DIF_TYPE2_PROT: 3008 /* 3009 * For TYPE 2 protection: 16 bit GUARD + 32 bit REF 3010 * tag has to match LBA in CDB + N 3011 */ 3012 ctx->ref_tag = cpu_to_le32(lba); 3013 if (!qla_tgt_ref_mask_check(se_cmd) || 3014 !(ha->tgt.tgt_ops->chk_dif_tags(t32))) { 3015 *pfw_prot_opts |= PO_DIS_REF_TAG_VALD; 3016 break; 3017 } 3018 /* enable ALL bytes of the ref tag */ 3019 ctx->ref_tag_mask[0] = 0xff; 3020 ctx->ref_tag_mask[1] = 0xff; 3021 ctx->ref_tag_mask[2] = 0xff; 3022 ctx->ref_tag_mask[3] = 0xff; 3023 break; 3024 case TARGET_DIF_TYPE3_PROT: 3025 /* For TYPE 3 protection: 16 bit GUARD only */ 3026 *pfw_prot_opts |= PO_DIS_REF_TAG_VALD; 3027 ctx->ref_tag_mask[0] = ctx->ref_tag_mask[1] = 3028 ctx->ref_tag_mask[2] = ctx->ref_tag_mask[3] = 0x00; 3029 break; 3030 } 3031 } 3032 3033 static inline int 3034 qlt_build_ctio_crc2_pkt(struct qla_qpair *qpair, struct qla_tgt_prm *prm) 3035 { 3036 struct dsd64 *cur_dsd; 3037 uint32_t transfer_length = 0; 3038 uint32_t data_bytes; 3039 uint32_t dif_bytes; 3040 uint8_t bundling = 1; 3041 struct crc_context *crc_ctx_pkt = NULL; 3042 struct qla_hw_data *ha; 3043 struct ctio_crc2_to_fw *pkt; 3044 dma_addr_t crc_ctx_dma; 3045 uint16_t fw_prot_opts = 0; 3046 struct qla_tgt_cmd *cmd = prm->cmd; 3047 struct se_cmd *se_cmd = &cmd->se_cmd; 3048 uint32_t h; 3049 struct atio_from_isp *atio = &prm->cmd->atio; 3050 struct qla_tc_param tc; 3051 uint16_t t16; 3052 scsi_qla_host_t *vha = cmd->vha; 3053 3054 ha = vha->hw; 3055 3056 pkt = (struct ctio_crc2_to_fw *)qpair->req->ring_ptr; 3057 prm->pkt = pkt; 3058 memset(pkt, 0, sizeof(*pkt)); 3059 3060 ql_dbg_qp(ql_dbg_tgt, cmd->qpair, 0xe071, 3061 "qla_target(%d):%s: se_cmd[%p] CRC2 prot_op[0x%x] cmd prot sg:cnt[%p:%x] lba[%llu]\n", 3062 cmd->vp_idx, __func__, se_cmd, se_cmd->prot_op, 3063 prm->prot_sg, prm->prot_seg_cnt, se_cmd->t_task_lba); 3064 3065 if ((se_cmd->prot_op == TARGET_PROT_DIN_INSERT) || 3066 (se_cmd->prot_op == TARGET_PROT_DOUT_STRIP)) 3067 bundling = 0; 3068 3069 /* Compute dif len and adjust data len to incude protection */ 3070 data_bytes = cmd->bufflen; 3071 dif_bytes = (data_bytes / cmd->blk_sz) * 8; 3072 3073 switch (se_cmd->prot_op) { 3074 case TARGET_PROT_DIN_INSERT: 3075 case TARGET_PROT_DOUT_STRIP: 3076 transfer_length = data_bytes; 3077 if (cmd->prot_sg_cnt) 3078 data_bytes += dif_bytes; 3079 break; 3080 case TARGET_PROT_DIN_STRIP: 3081 case TARGET_PROT_DOUT_INSERT: 3082 case TARGET_PROT_DIN_PASS: 3083 case TARGET_PROT_DOUT_PASS: 3084 transfer_length = data_bytes + dif_bytes; 3085 break; 3086 default: 3087 BUG(); 3088 break; 3089 } 3090 3091 if (!qlt_hba_err_chk_enabled(se_cmd)) 3092 fw_prot_opts |= 0x10; /* Disable Guard tag checking */ 3093 /* HBA error checking enabled */ 3094 else if (IS_PI_UNINIT_CAPABLE(ha)) { 3095 if ((se_cmd->prot_type == TARGET_DIF_TYPE1_PROT) || 3096 (se_cmd->prot_type == TARGET_DIF_TYPE2_PROT)) 3097 fw_prot_opts |= PO_DIS_VALD_APP_ESC; 3098 else if (se_cmd->prot_type == TARGET_DIF_TYPE3_PROT) 3099 fw_prot_opts |= PO_DIS_VALD_APP_REF_ESC; 3100 } 3101 3102 switch (se_cmd->prot_op) { 3103 case TARGET_PROT_DIN_INSERT: 3104 case TARGET_PROT_DOUT_INSERT: 3105 fw_prot_opts |= PO_MODE_DIF_INSERT; 3106 break; 3107 case TARGET_PROT_DIN_STRIP: 3108 case TARGET_PROT_DOUT_STRIP: 3109 fw_prot_opts |= PO_MODE_DIF_REMOVE; 3110 break; 3111 case TARGET_PROT_DIN_PASS: 3112 case TARGET_PROT_DOUT_PASS: 3113 fw_prot_opts |= PO_MODE_DIF_PASS; 3114 /* FUTURE: does tcm require T10CRC<->IPCKSUM conversion? */ 3115 break; 3116 default:/* Normal Request */ 3117 fw_prot_opts |= PO_MODE_DIF_PASS; 3118 break; 3119 } 3120 3121 /* ---- PKT ---- */ 3122 /* Update entry type to indicate Command Type CRC_2 IOCB */ 3123 pkt->entry_type = CTIO_CRC2; 3124 pkt->entry_count = 1; 3125 pkt->vp_index = cmd->vp_idx; 3126 3127 h = qlt_make_handle(qpair); 3128 if (unlikely(h == QLA_TGT_NULL_HANDLE)) { 3129 /* 3130 * CTIO type 7 from the firmware doesn't provide a way to 3131 * know the initiator's LOOP ID, hence we can't find 3132 * the session and, so, the command. 3133 */ 3134 return -EAGAIN; 3135 } else 3136 qpair->req->outstanding_cmds[h] = (srb_t *)prm->cmd; 3137 3138 pkt->handle = make_handle(qpair->req->id, h); 3139 pkt->handle |= CTIO_COMPLETION_HANDLE_MARK; 3140 pkt->nport_handle = cpu_to_le16(prm->cmd->loop_id); 3141 pkt->timeout = cpu_to_le16(QLA_TGT_TIMEOUT); 3142 pkt->initiator_id = be_id_to_le(atio->u.isp24.fcp_hdr.s_id); 3143 pkt->exchange_addr = atio->u.isp24.exchange_addr; 3144 3145 /* silence compile warning */ 3146 t16 = be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id); 3147 pkt->ox_id = cpu_to_le16(t16); 3148 3149 t16 = (atio->u.isp24.attr << 9); 3150 pkt->flags |= cpu_to_le16(t16); 3151 pkt->relative_offset = cpu_to_le32(prm->cmd->offset); 3152 3153 /* Set transfer direction */ 3154 if (cmd->dma_data_direction == DMA_TO_DEVICE) 3155 pkt->flags = cpu_to_le16(CTIO7_FLAGS_DATA_IN); 3156 else if (cmd->dma_data_direction == DMA_FROM_DEVICE) 3157 pkt->flags = cpu_to_le16(CTIO7_FLAGS_DATA_OUT); 3158 3159 pkt->dseg_count = cpu_to_le16(prm->tot_dsds); 3160 /* Fibre channel byte count */ 3161 pkt->transfer_length = cpu_to_le32(transfer_length); 3162 3163 /* ----- CRC context -------- */ 3164 3165 /* Allocate CRC context from global pool */ 3166 crc_ctx_pkt = cmd->ctx = 3167 dma_pool_zalloc(ha->dl_dma_pool, GFP_ATOMIC, &crc_ctx_dma); 3168 3169 if (!crc_ctx_pkt) 3170 goto crc_queuing_error; 3171 3172 crc_ctx_pkt->crc_ctx_dma = crc_ctx_dma; 3173 INIT_LIST_HEAD(&crc_ctx_pkt->dsd_list); 3174 3175 /* Set handle */ 3176 crc_ctx_pkt->handle = pkt->handle; 3177 3178 qla_tgt_set_dif_tags(cmd, crc_ctx_pkt, &fw_prot_opts); 3179 3180 put_unaligned_le64(crc_ctx_dma, &pkt->crc_context_address); 3181 pkt->crc_context_len = cpu_to_le16(CRC_CONTEXT_LEN_FW); 3182 3183 if (!bundling) { 3184 cur_dsd = &crc_ctx_pkt->u.nobundling.data_dsd[0]; 3185 } else { 3186 /* 3187 * Configure Bundling if we need to fetch interlaving 3188 * protection PCI accesses 3189 */ 3190 fw_prot_opts |= PO_ENABLE_DIF_BUNDLING; 3191 crc_ctx_pkt->u.bundling.dif_byte_count = cpu_to_le32(dif_bytes); 3192 crc_ctx_pkt->u.bundling.dseg_count = 3193 cpu_to_le16(prm->tot_dsds - prm->prot_seg_cnt); 3194 cur_dsd = &crc_ctx_pkt->u.bundling.data_dsd[0]; 3195 } 3196 3197 /* Finish the common fields of CRC pkt */ 3198 crc_ctx_pkt->blk_size = cpu_to_le16(cmd->blk_sz); 3199 crc_ctx_pkt->prot_opts = cpu_to_le16(fw_prot_opts); 3200 crc_ctx_pkt->byte_count = cpu_to_le32(data_bytes); 3201 crc_ctx_pkt->guard_seed = cpu_to_le16(0); 3202 3203 memset((uint8_t *)&tc, 0 , sizeof(tc)); 3204 tc.vha = vha; 3205 tc.blk_sz = cmd->blk_sz; 3206 tc.bufflen = cmd->bufflen; 3207 tc.sg = cmd->sg; 3208 tc.prot_sg = cmd->prot_sg; 3209 tc.ctx = crc_ctx_pkt; 3210 tc.ctx_dsd_alloced = &cmd->ctx_dsd_alloced; 3211 3212 /* Walks data segments */ 3213 pkt->flags |= cpu_to_le16(CTIO7_FLAGS_DSD_PTR); 3214 3215 if (!bundling && prm->prot_seg_cnt) { 3216 if (qla24xx_walk_and_build_sglist_no_difb(ha, NULL, cur_dsd, 3217 prm->tot_dsds, &tc)) 3218 goto crc_queuing_error; 3219 } else if (qla24xx_walk_and_build_sglist(ha, NULL, cur_dsd, 3220 (prm->tot_dsds - prm->prot_seg_cnt), &tc)) 3221 goto crc_queuing_error; 3222 3223 if (bundling && prm->prot_seg_cnt) { 3224 /* Walks dif segments */ 3225 pkt->add_flags |= CTIO_CRC2_AF_DIF_DSD_ENA; 3226 3227 cur_dsd = &crc_ctx_pkt->u.bundling.dif_dsd; 3228 if (qla24xx_walk_and_build_prot_sglist(ha, NULL, cur_dsd, 3229 prm->prot_seg_cnt, cmd)) 3230 goto crc_queuing_error; 3231 } 3232 return QLA_SUCCESS; 3233 3234 crc_queuing_error: 3235 /* Cleanup will be performed by the caller */ 3236 qpair->req->outstanding_cmds[h] = NULL; 3237 3238 return QLA_FUNCTION_FAILED; 3239 } 3240 3241 /* 3242 * Callback to setup response of xmit_type of QLA_TGT_XMIT_DATA and * 3243 * QLA_TGT_XMIT_STATUS for >= 24xx silicon 3244 */ 3245 int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type, 3246 uint8_t scsi_status) 3247 { 3248 struct scsi_qla_host *vha = cmd->vha; 3249 struct qla_qpair *qpair = cmd->qpair; 3250 struct ctio7_to_24xx *pkt; 3251 struct qla_tgt_prm prm; 3252 uint32_t full_req_cnt = 0; 3253 unsigned long flags = 0; 3254 int res; 3255 3256 if (!qpair->fw_started || (cmd->reset_count != qpair->chip_reset) || 3257 (cmd->sess && cmd->sess->deleted)) { 3258 cmd->state = QLA_TGT_STATE_PROCESSED; 3259 return 0; 3260 } 3261 3262 ql_dbg_qp(ql_dbg_tgt, qpair, 0xe018, 3263 "is_send_status=%d, cmd->bufflen=%d, cmd->sg_cnt=%d, cmd->dma_data_direction=%d se_cmd[%p] qp %d\n", 3264 (xmit_type & QLA_TGT_XMIT_STATUS) ? 3265 1 : 0, cmd->bufflen, cmd->sg_cnt, cmd->dma_data_direction, 3266 &cmd->se_cmd, qpair->id); 3267 3268 res = qlt_pre_xmit_response(cmd, &prm, xmit_type, scsi_status, 3269 &full_req_cnt); 3270 if (unlikely(res != 0)) { 3271 return res; 3272 } 3273 3274 spin_lock_irqsave(qpair->qp_lock_ptr, flags); 3275 3276 if (xmit_type == QLA_TGT_XMIT_STATUS) 3277 qpair->tgt_counters.core_qla_snd_status++; 3278 else 3279 qpair->tgt_counters.core_qla_que_buf++; 3280 3281 if (!qpair->fw_started || cmd->reset_count != qpair->chip_reset) { 3282 /* 3283 * Either the port is not online or this request was from 3284 * previous life, just abort the processing. 3285 */ 3286 cmd->state = QLA_TGT_STATE_PROCESSED; 3287 ql_dbg_qp(ql_dbg_async, qpair, 0xe101, 3288 "RESET-RSP online/active/old-count/new-count = %d/%d/%d/%d.\n", 3289 vha->flags.online, qla2x00_reset_active(vha), 3290 cmd->reset_count, qpair->chip_reset); 3291 res = 0; 3292 goto out_unmap_unlock; 3293 } 3294 3295 /* Does F/W have an IOCBs for this request */ 3296 res = qlt_check_reserve_free_req(qpair, full_req_cnt); 3297 if (unlikely(res)) 3298 goto out_unmap_unlock; 3299 3300 if (cmd->se_cmd.prot_op && (xmit_type & QLA_TGT_XMIT_DATA)) 3301 res = qlt_build_ctio_crc2_pkt(qpair, &prm); 3302 else 3303 res = qlt_24xx_build_ctio_pkt(qpair, &prm); 3304 if (unlikely(res != 0)) { 3305 qpair->req->cnt += full_req_cnt; 3306 goto out_unmap_unlock; 3307 } 3308 3309 pkt = (struct ctio7_to_24xx *)prm.pkt; 3310 3311 if (qlt_has_data(cmd) && (xmit_type & QLA_TGT_XMIT_DATA)) { 3312 pkt->u.status0.flags |= 3313 cpu_to_le16(CTIO7_FLAGS_DATA_IN | 3314 CTIO7_FLAGS_STATUS_MODE_0); 3315 3316 if (cmd->se_cmd.prot_op == TARGET_PROT_NORMAL) 3317 qlt_load_data_segments(&prm); 3318 3319 if (prm.add_status_pkt == 0) { 3320 if (xmit_type & QLA_TGT_XMIT_STATUS) { 3321 pkt->u.status0.scsi_status = 3322 cpu_to_le16(prm.rq_result); 3323 if (!cmd->edif) 3324 pkt->u.status0.residual = 3325 cpu_to_le32(prm.residual); 3326 3327 pkt->u.status0.flags |= cpu_to_le16( 3328 CTIO7_FLAGS_SEND_STATUS); 3329 if (qlt_need_explicit_conf(cmd, 0)) { 3330 pkt->u.status0.flags |= 3331 cpu_to_le16( 3332 CTIO7_FLAGS_EXPLICIT_CONFORM | 3333 CTIO7_FLAGS_CONFORM_REQ); 3334 } 3335 } 3336 3337 } else { 3338 /* 3339 * We have already made sure that there is sufficient 3340 * amount of request entries to not drop HW lock in 3341 * req_pkt(). 3342 */ 3343 struct ctio7_to_24xx *ctio = 3344 (struct ctio7_to_24xx *)qlt_get_req_pkt( 3345 qpair->req); 3346 3347 ql_dbg_qp(ql_dbg_tgt, qpair, 0x305e, 3348 "Building additional status packet 0x%p.\n", 3349 ctio); 3350 3351 /* 3352 * T10Dif: ctio_crc2_to_fw overlay ontop of 3353 * ctio7_to_24xx 3354 */ 3355 memcpy(ctio, pkt, sizeof(*ctio)); 3356 /* reset back to CTIO7 */ 3357 ctio->entry_count = 1; 3358 ctio->entry_type = CTIO_TYPE7; 3359 ctio->dseg_count = 0; 3360 ctio->u.status1.flags &= ~cpu_to_le16( 3361 CTIO7_FLAGS_DATA_IN); 3362 3363 /* Real finish is ctio_m1's finish */ 3364 pkt->handle |= CTIO_INTERMEDIATE_HANDLE_MARK; 3365 pkt->u.status0.flags |= cpu_to_le16( 3366 CTIO7_FLAGS_DONT_RET_CTIO); 3367 3368 /* qlt_24xx_init_ctio_to_isp will correct 3369 * all neccessary fields that's part of CTIO7. 3370 * There should be no residual of CTIO-CRC2 data. 3371 */ 3372 qlt_24xx_init_ctio_to_isp((struct ctio7_to_24xx *)ctio, 3373 &prm); 3374 } 3375 } else 3376 qlt_24xx_init_ctio_to_isp(pkt, &prm); 3377 3378 3379 cmd->state = QLA_TGT_STATE_PROCESSED; /* Mid-level is done processing */ 3380 cmd->cmd_sent_to_fw = 1; 3381 cmd->ctio_flags = le16_to_cpu(pkt->u.status0.flags); 3382 3383 /* Memory Barrier */ 3384 wmb(); 3385 if (qpair->reqq_start_iocbs) 3386 qpair->reqq_start_iocbs(qpair); 3387 else 3388 qla2x00_start_iocbs(vha, qpair->req); 3389 spin_unlock_irqrestore(qpair->qp_lock_ptr, flags); 3390 3391 return 0; 3392 3393 out_unmap_unlock: 3394 qlt_unmap_sg(vha, cmd); 3395 spin_unlock_irqrestore(qpair->qp_lock_ptr, flags); 3396 3397 return res; 3398 } 3399 EXPORT_SYMBOL(qlt_xmit_response); 3400 3401 int qlt_rdy_to_xfer(struct qla_tgt_cmd *cmd) 3402 { 3403 struct ctio7_to_24xx *pkt; 3404 struct scsi_qla_host *vha = cmd->vha; 3405 struct qla_tgt *tgt = cmd->tgt; 3406 struct qla_tgt_prm prm; 3407 unsigned long flags = 0; 3408 int res = 0; 3409 struct qla_qpair *qpair = cmd->qpair; 3410 3411 memset(&prm, 0, sizeof(prm)); 3412 prm.cmd = cmd; 3413 prm.tgt = tgt; 3414 prm.sg = NULL; 3415 prm.req_cnt = 1; 3416 3417 if (!qpair->fw_started || (cmd->reset_count != qpair->chip_reset) || 3418 (cmd->sess && cmd->sess->deleted)) { 3419 /* 3420 * Either the port is not online or this request was from 3421 * previous life, just abort the processing. 3422 */ 3423 cmd->aborted = 1; 3424 cmd->write_data_transferred = 0; 3425 cmd->state = QLA_TGT_STATE_DATA_IN; 3426 vha->hw->tgt.tgt_ops->handle_data(cmd); 3427 ql_dbg_qp(ql_dbg_async, qpair, 0xe102, 3428 "RESET-XFR online/active/old-count/new-count = %d/%d/%d/%d.\n", 3429 vha->flags.online, qla2x00_reset_active(vha), 3430 cmd->reset_count, qpair->chip_reset); 3431 return 0; 3432 } 3433 3434 /* Calculate number of entries and segments required */ 3435 if (qlt_pci_map_calc_cnt(&prm) != 0) 3436 return -EAGAIN; 3437 3438 spin_lock_irqsave(qpair->qp_lock_ptr, flags); 3439 /* Does F/W have an IOCBs for this request */ 3440 res = qlt_check_reserve_free_req(qpair, prm.req_cnt); 3441 if (res != 0) 3442 goto out_unlock_free_unmap; 3443 if (cmd->se_cmd.prot_op) 3444 res = qlt_build_ctio_crc2_pkt(qpair, &prm); 3445 else 3446 res = qlt_24xx_build_ctio_pkt(qpair, &prm); 3447 3448 if (unlikely(res != 0)) { 3449 qpair->req->cnt += prm.req_cnt; 3450 goto out_unlock_free_unmap; 3451 } 3452 3453 pkt = (struct ctio7_to_24xx *)prm.pkt; 3454 pkt->u.status0.flags |= cpu_to_le16(CTIO7_FLAGS_DATA_OUT | 3455 CTIO7_FLAGS_STATUS_MODE_0); 3456 3457 if (cmd->se_cmd.prot_op == TARGET_PROT_NORMAL) 3458 qlt_load_data_segments(&prm); 3459 3460 cmd->state = QLA_TGT_STATE_NEED_DATA; 3461 cmd->cmd_sent_to_fw = 1; 3462 cmd->ctio_flags = le16_to_cpu(pkt->u.status0.flags); 3463 3464 /* Memory Barrier */ 3465 wmb(); 3466 if (qpair->reqq_start_iocbs) 3467 qpair->reqq_start_iocbs(qpair); 3468 else 3469 qla2x00_start_iocbs(vha, qpair->req); 3470 spin_unlock_irqrestore(qpair->qp_lock_ptr, flags); 3471 3472 return res; 3473 3474 out_unlock_free_unmap: 3475 qlt_unmap_sg(vha, cmd); 3476 spin_unlock_irqrestore(qpair->qp_lock_ptr, flags); 3477 3478 return res; 3479 } 3480 EXPORT_SYMBOL(qlt_rdy_to_xfer); 3481 3482 3483 /* 3484 * it is assumed either hardware_lock or qpair lock is held. 3485 */ 3486 static void 3487 qlt_handle_dif_error(struct qla_qpair *qpair, struct qla_tgt_cmd *cmd, 3488 struct ctio_crc_from_fw *sts) 3489 { 3490 uint8_t *ap = &sts->actual_dif[0]; 3491 uint8_t *ep = &sts->expected_dif[0]; 3492 uint64_t lba = cmd->se_cmd.t_task_lba; 3493 uint8_t scsi_status, sense_key, asc, ascq; 3494 unsigned long flags; 3495 struct scsi_qla_host *vha = cmd->vha; 3496 3497 cmd->trc_flags |= TRC_DIF_ERR; 3498 3499 cmd->a_guard = get_unaligned_be16(ap + 0); 3500 cmd->a_app_tag = get_unaligned_be16(ap + 2); 3501 cmd->a_ref_tag = get_unaligned_be32(ap + 4); 3502 3503 cmd->e_guard = get_unaligned_be16(ep + 0); 3504 cmd->e_app_tag = get_unaligned_be16(ep + 2); 3505 cmd->e_ref_tag = get_unaligned_be32(ep + 4); 3506 3507 ql_dbg(ql_dbg_tgt_dif, vha, 0xf075, 3508 "%s: aborted %d state %d\n", __func__, cmd->aborted, cmd->state); 3509 3510 scsi_status = sense_key = asc = ascq = 0; 3511 3512 /* check appl tag */ 3513 if (cmd->e_app_tag != cmd->a_app_tag) { 3514 ql_dbg(ql_dbg_tgt_dif, vha, 0xe00d, 3515 "App Tag ERR: cdb[%x] lba[%llx %llx] blks[%x] [Actual|Expected] Ref[%x|%x], App[%x|%x], Guard [%x|%x] cmd=%p ox_id[%04x]", 3516 cmd->cdb[0], lba, (lba+cmd->num_blks), cmd->num_blks, 3517 cmd->a_ref_tag, cmd->e_ref_tag, cmd->a_app_tag, 3518 cmd->e_app_tag, cmd->a_guard, cmd->e_guard, cmd, 3519 cmd->atio.u.isp24.fcp_hdr.ox_id); 3520 3521 cmd->dif_err_code = DIF_ERR_APP; 3522 scsi_status = SAM_STAT_CHECK_CONDITION; 3523 sense_key = ABORTED_COMMAND; 3524 asc = 0x10; 3525 ascq = 0x2; 3526 } 3527 3528 /* check ref tag */ 3529 if (cmd->e_ref_tag != cmd->a_ref_tag) { 3530 ql_dbg(ql_dbg_tgt_dif, vha, 0xe00e, 3531 "Ref Tag ERR: cdb[%x] lba[%llx %llx] blks[%x] [Actual|Expected] Ref[%x|%x], App[%x|%x], Guard[%x|%x] cmd=%p ox_id[%04x] ", 3532 cmd->cdb[0], lba, (lba+cmd->num_blks), cmd->num_blks, 3533 cmd->a_ref_tag, cmd->e_ref_tag, cmd->a_app_tag, 3534 cmd->e_app_tag, cmd->a_guard, cmd->e_guard, cmd, 3535 cmd->atio.u.isp24.fcp_hdr.ox_id); 3536 3537 cmd->dif_err_code = DIF_ERR_REF; 3538 scsi_status = SAM_STAT_CHECK_CONDITION; 3539 sense_key = ABORTED_COMMAND; 3540 asc = 0x10; 3541 ascq = 0x3; 3542 goto out; 3543 } 3544 3545 /* check guard */ 3546 if (cmd->e_guard != cmd->a_guard) { 3547 ql_dbg(ql_dbg_tgt_dif, vha, 0xe012, 3548 "Guard ERR: cdb[%x] lba[%llx %llx] blks[%x] [Actual|Expected] Ref[%x|%x], App[%x|%x], Guard [%x|%x] cmd=%p ox_id[%04x]", 3549 cmd->cdb[0], lba, (lba+cmd->num_blks), cmd->num_blks, 3550 cmd->a_ref_tag, cmd->e_ref_tag, cmd->a_app_tag, 3551 cmd->e_app_tag, cmd->a_guard, cmd->e_guard, cmd, 3552 cmd->atio.u.isp24.fcp_hdr.ox_id); 3553 3554 cmd->dif_err_code = DIF_ERR_GRD; 3555 scsi_status = SAM_STAT_CHECK_CONDITION; 3556 sense_key = ABORTED_COMMAND; 3557 asc = 0x10; 3558 ascq = 0x1; 3559 } 3560 out: 3561 switch (cmd->state) { 3562 case QLA_TGT_STATE_NEED_DATA: 3563 /* handle_data will load DIF error code */ 3564 cmd->state = QLA_TGT_STATE_DATA_IN; 3565 vha->hw->tgt.tgt_ops->handle_data(cmd); 3566 break; 3567 default: 3568 spin_lock_irqsave(&cmd->cmd_lock, flags); 3569 if (cmd->aborted) { 3570 spin_unlock_irqrestore(&cmd->cmd_lock, flags); 3571 vha->hw->tgt.tgt_ops->free_cmd(cmd); 3572 break; 3573 } 3574 spin_unlock_irqrestore(&cmd->cmd_lock, flags); 3575 3576 qlt_send_resp_ctio(qpair, cmd, scsi_status, sense_key, asc, 3577 ascq); 3578 /* assume scsi status gets out on the wire. 3579 * Will not wait for completion. 3580 */ 3581 vha->hw->tgt.tgt_ops->free_cmd(cmd); 3582 break; 3583 } 3584 } 3585 3586 /* If hardware_lock held on entry, might drop it, then reaquire */ 3587 /* This function sends the appropriate CTIO to ISP 2xxx or 24xx */ 3588 static int __qlt_send_term_imm_notif(struct scsi_qla_host *vha, 3589 struct imm_ntfy_from_isp *ntfy) 3590 { 3591 struct nack_to_isp *nack; 3592 struct qla_hw_data *ha = vha->hw; 3593 request_t *pkt; 3594 int ret = 0; 3595 3596 ql_dbg(ql_dbg_tgt_tmr, vha, 0xe01c, 3597 "Sending TERM ELS CTIO (ha=%p)\n", ha); 3598 3599 pkt = (request_t *)qla2x00_alloc_iocbs(vha, NULL); 3600 if (pkt == NULL) { 3601 ql_dbg(ql_dbg_tgt, vha, 0xe080, 3602 "qla_target(%d): %s failed: unable to allocate " 3603 "request packet\n", vha->vp_idx, __func__); 3604 return -ENOMEM; 3605 } 3606 3607 pkt->entry_type = NOTIFY_ACK_TYPE; 3608 pkt->entry_count = 1; 3609 pkt->handle = QLA_TGT_SKIP_HANDLE; 3610 3611 nack = (struct nack_to_isp *)pkt; 3612 nack->ox_id = ntfy->ox_id; 3613 3614 nack->u.isp24.nport_handle = ntfy->u.isp24.nport_handle; 3615 if (le16_to_cpu(ntfy->u.isp24.status) == IMM_NTFY_ELS) { 3616 nack->u.isp24.flags = ntfy->u.isp24.flags & 3617 cpu_to_le16(NOTIFY24XX_FLAGS_PUREX_IOCB); 3618 } 3619 3620 /* terminate */ 3621 nack->u.isp24.flags |= 3622 __constant_cpu_to_le16(NOTIFY_ACK_FLAGS_TERMINATE); 3623 3624 nack->u.isp24.srr_rx_id = ntfy->u.isp24.srr_rx_id; 3625 nack->u.isp24.status = ntfy->u.isp24.status; 3626 nack->u.isp24.status_subcode = ntfy->u.isp24.status_subcode; 3627 nack->u.isp24.fw_handle = ntfy->u.isp24.fw_handle; 3628 nack->u.isp24.exchange_address = ntfy->u.isp24.exchange_address; 3629 nack->u.isp24.srr_rel_offs = ntfy->u.isp24.srr_rel_offs; 3630 nack->u.isp24.srr_ui = ntfy->u.isp24.srr_ui; 3631 nack->u.isp24.vp_index = ntfy->u.isp24.vp_index; 3632 3633 qla2x00_start_iocbs(vha, vha->req); 3634 return ret; 3635 } 3636 3637 static void qlt_send_term_imm_notif(struct scsi_qla_host *vha, 3638 struct imm_ntfy_from_isp *imm, int ha_locked) 3639 { 3640 int rc; 3641 3642 WARN_ON_ONCE(!ha_locked); 3643 rc = __qlt_send_term_imm_notif(vha, imm); 3644 pr_debug("rc = %d\n", rc); 3645 } 3646 3647 /* 3648 * If hardware_lock held on entry, might drop it, then reaquire 3649 * This function sends the appropriate CTIO to ISP 2xxx or 24xx 3650 */ 3651 static int __qlt_send_term_exchange(struct qla_qpair *qpair, 3652 struct qla_tgt_cmd *cmd, 3653 struct atio_from_isp *atio) 3654 { 3655 struct scsi_qla_host *vha = qpair->vha; 3656 struct ctio7_to_24xx *ctio24; 3657 struct qla_hw_data *ha = vha->hw; 3658 request_t *pkt; 3659 int ret = 0; 3660 uint16_t temp; 3661 3662 ql_dbg(ql_dbg_tgt, vha, 0xe009, "Sending TERM EXCH CTIO (ha=%p)\n", ha); 3663 3664 if (cmd) 3665 vha = cmd->vha; 3666 3667 pkt = (request_t *)qla2x00_alloc_iocbs_ready(qpair, NULL); 3668 if (pkt == NULL) { 3669 ql_dbg(ql_dbg_tgt, vha, 0xe050, 3670 "qla_target(%d): %s failed: unable to allocate " 3671 "request packet\n", vha->vp_idx, __func__); 3672 return -ENOMEM; 3673 } 3674 3675 if (cmd != NULL) { 3676 if (cmd->state < QLA_TGT_STATE_PROCESSED) { 3677 ql_dbg(ql_dbg_tgt, vha, 0xe051, 3678 "qla_target(%d): Terminating cmd %p with " 3679 "incorrect state %d\n", vha->vp_idx, cmd, 3680 cmd->state); 3681 } else 3682 ret = 1; 3683 } 3684 3685 qpair->tgt_counters.num_term_xchg_sent++; 3686 pkt->entry_count = 1; 3687 pkt->handle = QLA_TGT_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK; 3688 3689 ctio24 = (struct ctio7_to_24xx *)pkt; 3690 ctio24->entry_type = CTIO_TYPE7; 3691 ctio24->nport_handle = cpu_to_le16(CTIO7_NHANDLE_UNRECOGNIZED); 3692 ctio24->timeout = cpu_to_le16(QLA_TGT_TIMEOUT); 3693 ctio24->vp_index = vha->vp_idx; 3694 ctio24->initiator_id = be_id_to_le(atio->u.isp24.fcp_hdr.s_id); 3695 ctio24->exchange_addr = atio->u.isp24.exchange_addr; 3696 temp = (atio->u.isp24.attr << 9) | CTIO7_FLAGS_STATUS_MODE_1 | 3697 CTIO7_FLAGS_TERMINATE; 3698 ctio24->u.status1.flags = cpu_to_le16(temp); 3699 temp = be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id); 3700 ctio24->u.status1.ox_id = cpu_to_le16(temp); 3701 3702 /* Memory Barrier */ 3703 wmb(); 3704 if (qpair->reqq_start_iocbs) 3705 qpair->reqq_start_iocbs(qpair); 3706 else 3707 qla2x00_start_iocbs(vha, qpair->req); 3708 return ret; 3709 } 3710 3711 static void qlt_send_term_exchange(struct qla_qpair *qpair, 3712 struct qla_tgt_cmd *cmd, struct atio_from_isp *atio, int ha_locked, 3713 int ul_abort) 3714 { 3715 struct scsi_qla_host *vha; 3716 unsigned long flags = 0; 3717 int rc; 3718 3719 /* why use different vha? NPIV */ 3720 if (cmd) 3721 vha = cmd->vha; 3722 else 3723 vha = qpair->vha; 3724 3725 if (ha_locked) { 3726 rc = __qlt_send_term_exchange(qpair, cmd, atio); 3727 if (rc == -ENOMEM) 3728 qlt_alloc_qfull_cmd(vha, atio, 0, 0); 3729 goto done; 3730 } 3731 spin_lock_irqsave(qpair->qp_lock_ptr, flags); 3732 rc = __qlt_send_term_exchange(qpair, cmd, atio); 3733 if (rc == -ENOMEM) 3734 qlt_alloc_qfull_cmd(vha, atio, 0, 0); 3735 3736 done: 3737 if (cmd && !ul_abort && !cmd->aborted) { 3738 if (cmd->sg_mapped) 3739 qlt_unmap_sg(vha, cmd); 3740 vha->hw->tgt.tgt_ops->free_cmd(cmd); 3741 } 3742 3743 if (!ha_locked) 3744 spin_unlock_irqrestore(qpair->qp_lock_ptr, flags); 3745 3746 return; 3747 } 3748 3749 static void qlt_init_term_exchange(struct scsi_qla_host *vha) 3750 { 3751 struct list_head free_list; 3752 struct qla_tgt_cmd *cmd, *tcmd; 3753 3754 vha->hw->tgt.leak_exchg_thresh_hold = 3755 (vha->hw->cur_fw_xcb_count/100) * LEAK_EXCHG_THRESH_HOLD_PERCENT; 3756 3757 cmd = tcmd = NULL; 3758 if (!list_empty(&vha->hw->tgt.q_full_list)) { 3759 INIT_LIST_HEAD(&free_list); 3760 list_splice_init(&vha->hw->tgt.q_full_list, &free_list); 3761 3762 list_for_each_entry_safe(cmd, tcmd, &free_list, cmd_list) { 3763 list_del(&cmd->cmd_list); 3764 /* This cmd was never sent to TCM. There is no need 3765 * to schedule free or call free_cmd 3766 */ 3767 qlt_free_cmd(cmd); 3768 vha->hw->tgt.num_qfull_cmds_alloc--; 3769 } 3770 } 3771 vha->hw->tgt.num_qfull_cmds_dropped = 0; 3772 } 3773 3774 static void qlt_chk_exch_leak_thresh_hold(struct scsi_qla_host *vha) 3775 { 3776 uint32_t total_leaked; 3777 3778 total_leaked = vha->hw->tgt.num_qfull_cmds_dropped; 3779 3780 if (vha->hw->tgt.leak_exchg_thresh_hold && 3781 (total_leaked > vha->hw->tgt.leak_exchg_thresh_hold)) { 3782 3783 ql_dbg(ql_dbg_tgt, vha, 0xe079, 3784 "Chip reset due to exchange starvation: %d/%d.\n", 3785 total_leaked, vha->hw->cur_fw_xcb_count); 3786 3787 if (IS_P3P_TYPE(vha->hw)) 3788 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags); 3789 else 3790 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 3791 qla2xxx_wake_dpc(vha); 3792 } 3793 3794 } 3795 3796 int qlt_abort_cmd(struct qla_tgt_cmd *cmd) 3797 { 3798 struct qla_tgt *tgt = cmd->tgt; 3799 struct scsi_qla_host *vha = tgt->vha; 3800 struct se_cmd *se_cmd = &cmd->se_cmd; 3801 unsigned long flags; 3802 3803 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf014, 3804 "qla_target(%d): terminating exchange for aborted cmd=%p " 3805 "(se_cmd=%p, tag=%llu)", vha->vp_idx, cmd, &cmd->se_cmd, 3806 se_cmd->tag); 3807 3808 spin_lock_irqsave(&cmd->cmd_lock, flags); 3809 if (cmd->aborted) { 3810 if (cmd->sg_mapped) 3811 qlt_unmap_sg(vha, cmd); 3812 3813 spin_unlock_irqrestore(&cmd->cmd_lock, flags); 3814 /* 3815 * It's normal to see 2 calls in this path: 3816 * 1) XFER Rdy completion + CMD_T_ABORT 3817 * 2) TCM TMR - drain_state_list 3818 */ 3819 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf016, 3820 "multiple abort. %p transport_state %x, t_state %x, " 3821 "se_cmd_flags %x\n", cmd, cmd->se_cmd.transport_state, 3822 cmd->se_cmd.t_state, cmd->se_cmd.se_cmd_flags); 3823 return -EIO; 3824 } 3825 cmd->aborted = 1; 3826 cmd->trc_flags |= TRC_ABORT; 3827 spin_unlock_irqrestore(&cmd->cmd_lock, flags); 3828 3829 qlt_send_term_exchange(cmd->qpair, cmd, &cmd->atio, 0, 1); 3830 return 0; 3831 } 3832 EXPORT_SYMBOL(qlt_abort_cmd); 3833 3834 void qlt_free_cmd(struct qla_tgt_cmd *cmd) 3835 { 3836 struct fc_port *sess = cmd->sess; 3837 3838 ql_dbg(ql_dbg_tgt, cmd->vha, 0xe074, 3839 "%s: se_cmd[%p] ox_id %04x\n", 3840 __func__, &cmd->se_cmd, 3841 be16_to_cpu(cmd->atio.u.isp24.fcp_hdr.ox_id)); 3842 3843 BUG_ON(cmd->cmd_in_wq); 3844 3845 if (!cmd->q_full) 3846 qlt_decr_num_pend_cmds(cmd->vha); 3847 3848 BUG_ON(cmd->sg_mapped); 3849 cmd->jiffies_at_free = get_jiffies_64(); 3850 3851 if (!sess || !sess->se_sess) { 3852 WARN_ON(1); 3853 return; 3854 } 3855 cmd->jiffies_at_free = get_jiffies_64(); 3856 cmd->vha->hw->tgt.tgt_ops->rel_cmd(cmd); 3857 } 3858 EXPORT_SYMBOL(qlt_free_cmd); 3859 3860 /* 3861 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire 3862 */ 3863 static int qlt_term_ctio_exchange(struct qla_qpair *qpair, void *ctio, 3864 struct qla_tgt_cmd *cmd, uint32_t status) 3865 { 3866 int term = 0; 3867 struct scsi_qla_host *vha = qpair->vha; 3868 3869 if (cmd->se_cmd.prot_op) 3870 ql_dbg(ql_dbg_tgt_dif, vha, 0xe013, 3871 "Term DIF cmd: lba[0x%llx|%lld] len[0x%x] " 3872 "se_cmd=%p tag[%x] op %#x/%s", 3873 cmd->lba, cmd->lba, 3874 cmd->num_blks, &cmd->se_cmd, 3875 cmd->atio.u.isp24.exchange_addr, 3876 cmd->se_cmd.prot_op, 3877 prot_op_str(cmd->se_cmd.prot_op)); 3878 3879 if (ctio != NULL) { 3880 struct ctio7_from_24xx *c = (struct ctio7_from_24xx *)ctio; 3881 3882 term = !(c->flags & 3883 cpu_to_le16(OF_TERM_EXCH)); 3884 } else 3885 term = 1; 3886 3887 if (term) 3888 qlt_send_term_exchange(qpair, cmd, &cmd->atio, 1, 0); 3889 3890 return term; 3891 } 3892 3893 3894 /* ha->hardware_lock supposed to be held on entry */ 3895 static void *qlt_ctio_to_cmd(struct scsi_qla_host *vha, 3896 struct rsp_que *rsp, uint32_t handle, void *ctio) 3897 { 3898 void *cmd = NULL; 3899 struct req_que *req; 3900 int qid = GET_QID(handle); 3901 uint32_t h = handle & ~QLA_TGT_HANDLE_MASK; 3902 3903 if (unlikely(h == QLA_TGT_SKIP_HANDLE)) 3904 return NULL; 3905 3906 if (qid == rsp->req->id) { 3907 req = rsp->req; 3908 } else if (vha->hw->req_q_map[qid]) { 3909 ql_dbg(ql_dbg_tgt_mgt, vha, 0x1000a, 3910 "qla_target(%d): CTIO completion with different QID %d handle %x\n", 3911 vha->vp_idx, rsp->id, handle); 3912 req = vha->hw->req_q_map[qid]; 3913 } else { 3914 return NULL; 3915 } 3916 3917 h &= QLA_CMD_HANDLE_MASK; 3918 3919 if (h != QLA_TGT_NULL_HANDLE) { 3920 if (unlikely(h >= req->num_outstanding_cmds)) { 3921 ql_dbg(ql_dbg_tgt, vha, 0xe052, 3922 "qla_target(%d): Wrong handle %x received\n", 3923 vha->vp_idx, handle); 3924 return NULL; 3925 } 3926 3927 cmd = req->outstanding_cmds[h]; 3928 if (unlikely(cmd == NULL)) { 3929 ql_dbg(ql_dbg_async, vha, 0xe053, 3930 "qla_target(%d): Suspicious: unable to find the command with handle %x req->id %d rsp->id %d\n", 3931 vha->vp_idx, handle, req->id, rsp->id); 3932 return NULL; 3933 } 3934 req->outstanding_cmds[h] = NULL; 3935 } else if (ctio != NULL) { 3936 /* We can't get loop ID from CTIO7 */ 3937 ql_dbg(ql_dbg_tgt, vha, 0xe054, 3938 "qla_target(%d): Wrong CTIO received: QLA24xx doesn't " 3939 "support NULL handles\n", vha->vp_idx); 3940 return NULL; 3941 } 3942 3943 return cmd; 3944 } 3945 3946 /* 3947 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire 3948 */ 3949 static void qlt_do_ctio_completion(struct scsi_qla_host *vha, 3950 struct rsp_que *rsp, uint32_t handle, uint32_t status, void *ctio) 3951 { 3952 struct qla_hw_data *ha = vha->hw; 3953 struct se_cmd *se_cmd; 3954 struct qla_tgt_cmd *cmd; 3955 struct qla_qpair *qpair = rsp->qpair; 3956 3957 if (handle & CTIO_INTERMEDIATE_HANDLE_MARK) { 3958 /* That could happen only in case of an error/reset/abort */ 3959 if (status != CTIO_SUCCESS) { 3960 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01d, 3961 "Intermediate CTIO received" 3962 " (status %x)\n", status); 3963 } 3964 return; 3965 } 3966 3967 cmd = qlt_ctio_to_cmd(vha, rsp, handle, ctio); 3968 if (cmd == NULL) 3969 return; 3970 3971 if ((le16_to_cpu(((struct ctio7_from_24xx *)ctio)->flags) & CTIO7_FLAGS_DATA_OUT) && 3972 cmd->sess) { 3973 qlt_chk_edif_rx_sa_delete_pending(vha, cmd->sess, 3974 (struct ctio7_from_24xx *)ctio); 3975 } 3976 3977 se_cmd = &cmd->se_cmd; 3978 cmd->cmd_sent_to_fw = 0; 3979 3980 qlt_unmap_sg(vha, cmd); 3981 3982 if (unlikely(status != CTIO_SUCCESS)) { 3983 switch (status & 0xFFFF) { 3984 case CTIO_INVALID_RX_ID: 3985 if (printk_ratelimit()) 3986 dev_info(&vha->hw->pdev->dev, 3987 "qla_target(%d): CTIO with INVALID_RX_ID ATIO attr %x CTIO Flags %x|%x\n", 3988 vha->vp_idx, cmd->atio.u.isp24.attr, 3989 ((cmd->ctio_flags >> 9) & 0xf), 3990 cmd->ctio_flags); 3991 3992 break; 3993 case CTIO_LIP_RESET: 3994 case CTIO_TARGET_RESET: 3995 case CTIO_ABORTED: 3996 /* driver request abort via Terminate exchange */ 3997 case CTIO_TIMEOUT: 3998 /* They are OK */ 3999 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf058, 4000 "qla_target(%d): CTIO with " 4001 "status %#x received, state %x, se_cmd %p, " 4002 "(LIP_RESET=e, ABORTED=2, TARGET_RESET=17, " 4003 "TIMEOUT=b, INVALID_RX_ID=8)\n", vha->vp_idx, 4004 status, cmd->state, se_cmd); 4005 break; 4006 4007 case CTIO_PORT_LOGGED_OUT: 4008 case CTIO_PORT_UNAVAILABLE: 4009 { 4010 int logged_out = 4011 (status & 0xFFFF) == CTIO_PORT_LOGGED_OUT; 4012 4013 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf059, 4014 "qla_target(%d): CTIO with %s status %x " 4015 "received (state %x, se_cmd %p)\n", vha->vp_idx, 4016 logged_out ? "PORT LOGGED OUT" : "PORT UNAVAILABLE", 4017 status, cmd->state, se_cmd); 4018 4019 if (logged_out && cmd->sess) { 4020 /* 4021 * Session is already logged out, but we need 4022 * to notify initiator, who's not aware of this 4023 */ 4024 cmd->sess->send_els_logo = 1; 4025 ql_dbg(ql_dbg_disc, vha, 0x20f8, 4026 "%s %d %8phC post del sess\n", 4027 __func__, __LINE__, cmd->sess->port_name); 4028 4029 qlt_schedule_sess_for_deletion(cmd->sess); 4030 } 4031 break; 4032 } 4033 case CTIO_DIF_ERROR: { 4034 struct ctio_crc_from_fw *crc = 4035 (struct ctio_crc_from_fw *)ctio; 4036 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf073, 4037 "qla_target(%d): CTIO with DIF_ERROR status %x " 4038 "received (state %x, ulp_cmd %p) actual_dif[0x%llx] " 4039 "expect_dif[0x%llx]\n", 4040 vha->vp_idx, status, cmd->state, se_cmd, 4041 *((u64 *)&crc->actual_dif[0]), 4042 *((u64 *)&crc->expected_dif[0])); 4043 4044 qlt_handle_dif_error(qpair, cmd, ctio); 4045 return; 4046 } 4047 4048 case CTIO_FAST_AUTH_ERR: 4049 case CTIO_FAST_INCOMP_PAD_LEN: 4050 case CTIO_FAST_INVALID_REQ: 4051 case CTIO_FAST_SPI_ERR: 4052 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05b, 4053 "qla_target(%d): CTIO with EDIF error status 0x%x received (state %x, se_cmd %p\n", 4054 vha->vp_idx, status, cmd->state, se_cmd); 4055 break; 4056 4057 default: 4058 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05b, 4059 "qla_target(%d): CTIO with error status 0x%x received (state %x, se_cmd %p\n", 4060 vha->vp_idx, status, cmd->state, se_cmd); 4061 break; 4062 } 4063 4064 4065 /* "cmd->aborted" means 4066 * cmd is already aborted/terminated, we don't 4067 * need to terminate again. The exchange is already 4068 * cleaned up/freed at FW level. Just cleanup at driver 4069 * level. 4070 */ 4071 if ((cmd->state != QLA_TGT_STATE_NEED_DATA) && 4072 (!cmd->aborted)) { 4073 cmd->trc_flags |= TRC_CTIO_ERR; 4074 if (qlt_term_ctio_exchange(qpair, ctio, cmd, status)) 4075 return; 4076 } 4077 } 4078 4079 if (cmd->state == QLA_TGT_STATE_PROCESSED) { 4080 cmd->trc_flags |= TRC_CTIO_DONE; 4081 } else if (cmd->state == QLA_TGT_STATE_NEED_DATA) { 4082 cmd->state = QLA_TGT_STATE_DATA_IN; 4083 4084 if (status == CTIO_SUCCESS) 4085 cmd->write_data_transferred = 1; 4086 4087 ha->tgt.tgt_ops->handle_data(cmd); 4088 return; 4089 } else if (cmd->aborted) { 4090 cmd->trc_flags |= TRC_CTIO_ABORTED; 4091 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01e, 4092 "Aborted command %p (tag %lld) finished\n", cmd, se_cmd->tag); 4093 } else { 4094 cmd->trc_flags |= TRC_CTIO_STRANGE; 4095 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05c, 4096 "qla_target(%d): A command in state (%d) should " 4097 "not return a CTIO complete\n", vha->vp_idx, cmd->state); 4098 } 4099 4100 if (unlikely(status != CTIO_SUCCESS) && 4101 !cmd->aborted) { 4102 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01f, "Finishing failed CTIO\n"); 4103 dump_stack(); 4104 } 4105 4106 ha->tgt.tgt_ops->free_cmd(cmd); 4107 } 4108 4109 static inline int qlt_get_fcp_task_attr(struct scsi_qla_host *vha, 4110 uint8_t task_codes) 4111 { 4112 int fcp_task_attr; 4113 4114 switch (task_codes) { 4115 case ATIO_SIMPLE_QUEUE: 4116 fcp_task_attr = TCM_SIMPLE_TAG; 4117 break; 4118 case ATIO_HEAD_OF_QUEUE: 4119 fcp_task_attr = TCM_HEAD_TAG; 4120 break; 4121 case ATIO_ORDERED_QUEUE: 4122 fcp_task_attr = TCM_ORDERED_TAG; 4123 break; 4124 case ATIO_ACA_QUEUE: 4125 fcp_task_attr = TCM_ACA_TAG; 4126 break; 4127 case ATIO_UNTAGGED: 4128 fcp_task_attr = TCM_SIMPLE_TAG; 4129 break; 4130 default: 4131 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05d, 4132 "qla_target: unknown task code %x, use ORDERED instead\n", 4133 task_codes); 4134 fcp_task_attr = TCM_ORDERED_TAG; 4135 break; 4136 } 4137 4138 return fcp_task_attr; 4139 } 4140 4141 /* 4142 * Process context for I/O path into tcm_qla2xxx code 4143 */ 4144 static void __qlt_do_work(struct qla_tgt_cmd *cmd) 4145 { 4146 scsi_qla_host_t *vha = cmd->vha; 4147 struct qla_hw_data *ha = vha->hw; 4148 struct fc_port *sess = cmd->sess; 4149 struct atio_from_isp *atio = &cmd->atio; 4150 unsigned char *cdb; 4151 unsigned long flags; 4152 uint32_t data_length; 4153 int ret, fcp_task_attr, data_dir, bidi = 0; 4154 struct qla_qpair *qpair = cmd->qpair; 4155 4156 cmd->cmd_in_wq = 0; 4157 cmd->trc_flags |= TRC_DO_WORK; 4158 4159 if (cmd->aborted) { 4160 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf082, 4161 "cmd with tag %u is aborted\n", 4162 cmd->atio.u.isp24.exchange_addr); 4163 goto out_term; 4164 } 4165 4166 spin_lock_init(&cmd->cmd_lock); 4167 cdb = &atio->u.isp24.fcp_cmnd.cdb[0]; 4168 cmd->se_cmd.tag = le32_to_cpu(atio->u.isp24.exchange_addr); 4169 4170 if (atio->u.isp24.fcp_cmnd.rddata && 4171 atio->u.isp24.fcp_cmnd.wrdata) { 4172 bidi = 1; 4173 data_dir = DMA_TO_DEVICE; 4174 } else if (atio->u.isp24.fcp_cmnd.rddata) 4175 data_dir = DMA_FROM_DEVICE; 4176 else if (atio->u.isp24.fcp_cmnd.wrdata) 4177 data_dir = DMA_TO_DEVICE; 4178 else 4179 data_dir = DMA_NONE; 4180 4181 fcp_task_attr = qlt_get_fcp_task_attr(vha, 4182 atio->u.isp24.fcp_cmnd.task_attr); 4183 data_length = get_datalen_for_atio(atio); 4184 4185 ret = ha->tgt.tgt_ops->handle_cmd(vha, cmd, cdb, data_length, 4186 fcp_task_attr, data_dir, bidi); 4187 if (ret != 0) 4188 goto out_term; 4189 /* 4190 * Drop extra session reference from qlt_handle_cmd_for_atio(). 4191 */ 4192 ha->tgt.tgt_ops->put_sess(sess); 4193 return; 4194 4195 out_term: 4196 ql_dbg(ql_dbg_io, vha, 0x3060, "Terminating work cmd %p", cmd); 4197 /* 4198 * cmd has not sent to target yet, so pass NULL as the second 4199 * argument to qlt_send_term_exchange() and free the memory here. 4200 */ 4201 cmd->trc_flags |= TRC_DO_WORK_ERR; 4202 spin_lock_irqsave(qpair->qp_lock_ptr, flags); 4203 qlt_send_term_exchange(qpair, NULL, &cmd->atio, 1, 0); 4204 4205 qlt_decr_num_pend_cmds(vha); 4206 cmd->vha->hw->tgt.tgt_ops->rel_cmd(cmd); 4207 spin_unlock_irqrestore(qpair->qp_lock_ptr, flags); 4208 4209 ha->tgt.tgt_ops->put_sess(sess); 4210 } 4211 4212 static void qlt_do_work(struct work_struct *work) 4213 { 4214 struct qla_tgt_cmd *cmd = container_of(work, struct qla_tgt_cmd, work); 4215 scsi_qla_host_t *vha = cmd->vha; 4216 unsigned long flags; 4217 4218 spin_lock_irqsave(&vha->cmd_list_lock, flags); 4219 list_del(&cmd->cmd_list); 4220 spin_unlock_irqrestore(&vha->cmd_list_lock, flags); 4221 4222 __qlt_do_work(cmd); 4223 } 4224 4225 void qlt_clr_qp_table(struct scsi_qla_host *vha) 4226 { 4227 unsigned long flags; 4228 struct qla_hw_data *ha = vha->hw; 4229 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; 4230 void *node; 4231 u64 key = 0; 4232 4233 ql_log(ql_log_info, vha, 0x706c, 4234 "User update Number of Active Qpairs %d\n", 4235 ha->tgt.num_act_qpairs); 4236 4237 spin_lock_irqsave(&ha->tgt.atio_lock, flags); 4238 4239 btree_for_each_safe64(&tgt->lun_qpair_map, key, node) 4240 btree_remove64(&tgt->lun_qpair_map, key); 4241 4242 ha->base_qpair->lun_cnt = 0; 4243 for (key = 0; key < ha->max_qpairs; key++) 4244 if (ha->queue_pair_map[key]) 4245 ha->queue_pair_map[key]->lun_cnt = 0; 4246 4247 spin_unlock_irqrestore(&ha->tgt.atio_lock, flags); 4248 } 4249 4250 static void qlt_assign_qpair(struct scsi_qla_host *vha, 4251 struct qla_tgt_cmd *cmd) 4252 { 4253 struct qla_qpair *qpair, *qp; 4254 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; 4255 struct qla_qpair_hint *h; 4256 4257 if (vha->flags.qpairs_available) { 4258 h = btree_lookup64(&tgt->lun_qpair_map, cmd->unpacked_lun); 4259 if (unlikely(!h)) { 4260 /* spread lun to qpair ratio evently */ 4261 int lcnt = 0, rc; 4262 struct scsi_qla_host *base_vha = 4263 pci_get_drvdata(vha->hw->pdev); 4264 4265 qpair = vha->hw->base_qpair; 4266 if (qpair->lun_cnt == 0) { 4267 qpair->lun_cnt++; 4268 h = qla_qpair_to_hint(tgt, qpair); 4269 BUG_ON(!h); 4270 rc = btree_insert64(&tgt->lun_qpair_map, 4271 cmd->unpacked_lun, h, GFP_ATOMIC); 4272 if (rc) { 4273 qpair->lun_cnt--; 4274 ql_log(ql_log_info, vha, 0xd037, 4275 "Unable to insert lun %llx into lun_qpair_map\n", 4276 cmd->unpacked_lun); 4277 } 4278 goto out; 4279 } else { 4280 lcnt = qpair->lun_cnt; 4281 } 4282 4283 h = NULL; 4284 list_for_each_entry(qp, &base_vha->qp_list, 4285 qp_list_elem) { 4286 if (qp->lun_cnt == 0) { 4287 qp->lun_cnt++; 4288 h = qla_qpair_to_hint(tgt, qp); 4289 BUG_ON(!h); 4290 rc = btree_insert64(&tgt->lun_qpair_map, 4291 cmd->unpacked_lun, h, GFP_ATOMIC); 4292 if (rc) { 4293 qp->lun_cnt--; 4294 ql_log(ql_log_info, vha, 0xd038, 4295 "Unable to insert lun %llx into lun_qpair_map\n", 4296 cmd->unpacked_lun); 4297 } 4298 qpair = qp; 4299 goto out; 4300 } else { 4301 if (qp->lun_cnt < lcnt) { 4302 lcnt = qp->lun_cnt; 4303 qpair = qp; 4304 continue; 4305 } 4306 } 4307 } 4308 BUG_ON(!qpair); 4309 qpair->lun_cnt++; 4310 h = qla_qpair_to_hint(tgt, qpair); 4311 BUG_ON(!h); 4312 rc = btree_insert64(&tgt->lun_qpair_map, 4313 cmd->unpacked_lun, h, GFP_ATOMIC); 4314 if (rc) { 4315 qpair->lun_cnt--; 4316 ql_log(ql_log_info, vha, 0xd039, 4317 "Unable to insert lun %llx into lun_qpair_map\n", 4318 cmd->unpacked_lun); 4319 } 4320 } 4321 } else { 4322 h = &tgt->qphints[0]; 4323 } 4324 out: 4325 cmd->qpair = h->qpair; 4326 cmd->se_cmd.cpuid = h->cpuid; 4327 } 4328 4329 static struct qla_tgt_cmd *qlt_get_tag(scsi_qla_host_t *vha, 4330 struct fc_port *sess, 4331 struct atio_from_isp *atio) 4332 { 4333 struct qla_tgt_cmd *cmd; 4334 4335 cmd = vha->hw->tgt.tgt_ops->get_cmd(sess); 4336 if (!cmd) 4337 return NULL; 4338 4339 cmd->cmd_type = TYPE_TGT_CMD; 4340 memcpy(&cmd->atio, atio, sizeof(*atio)); 4341 INIT_LIST_HEAD(&cmd->sess_cmd_list); 4342 cmd->state = QLA_TGT_STATE_NEW; 4343 cmd->tgt = vha->vha_tgt.qla_tgt; 4344 qlt_incr_num_pend_cmds(vha); 4345 cmd->vha = vha; 4346 cmd->sess = sess; 4347 cmd->loop_id = sess->loop_id; 4348 cmd->conf_compl_supported = sess->conf_compl_supported; 4349 4350 cmd->trc_flags = 0; 4351 cmd->jiffies_at_alloc = get_jiffies_64(); 4352 4353 cmd->unpacked_lun = scsilun_to_int( 4354 (struct scsi_lun *)&atio->u.isp24.fcp_cmnd.lun); 4355 qlt_assign_qpair(vha, cmd); 4356 cmd->reset_count = vha->hw->base_qpair->chip_reset; 4357 cmd->vp_idx = vha->vp_idx; 4358 cmd->edif = sess->edif.enable; 4359 4360 return cmd; 4361 } 4362 4363 /* ha->hardware_lock supposed to be held on entry */ 4364 static int qlt_handle_cmd_for_atio(struct scsi_qla_host *vha, 4365 struct atio_from_isp *atio) 4366 { 4367 struct qla_hw_data *ha = vha->hw; 4368 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; 4369 struct fc_port *sess; 4370 struct qla_tgt_cmd *cmd; 4371 unsigned long flags; 4372 port_id_t id; 4373 4374 if (unlikely(tgt->tgt_stop)) { 4375 ql_dbg(ql_dbg_io, vha, 0x3061, 4376 "New command while device %p is shutting down\n", tgt); 4377 return -ENODEV; 4378 } 4379 4380 id = be_to_port_id(atio->u.isp24.fcp_hdr.s_id); 4381 if (IS_SW_RESV_ADDR(id)) 4382 return -EBUSY; 4383 4384 sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, atio->u.isp24.fcp_hdr.s_id); 4385 if (unlikely(!sess)) 4386 return -EFAULT; 4387 4388 /* Another WWN used to have our s_id. Our PLOGI scheduled its 4389 * session deletion, but it's still in sess_del_work wq */ 4390 if (sess->deleted) { 4391 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf002, 4392 "New command while old session %p is being deleted\n", 4393 sess); 4394 return -EFAULT; 4395 } 4396 4397 /* 4398 * Do kref_get() before returning + dropping qla_hw_data->hardware_lock. 4399 */ 4400 if (!kref_get_unless_zero(&sess->sess_kref)) { 4401 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf004, 4402 "%s: kref_get fail, %8phC oxid %x \n", 4403 __func__, sess->port_name, 4404 be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id)); 4405 return -EFAULT; 4406 } 4407 4408 cmd = qlt_get_tag(vha, sess, atio); 4409 if (!cmd) { 4410 ql_dbg(ql_dbg_io, vha, 0x3062, 4411 "qla_target(%d): Allocation of cmd failed\n", vha->vp_idx); 4412 ha->tgt.tgt_ops->put_sess(sess); 4413 return -EBUSY; 4414 } 4415 4416 cmd->cmd_in_wq = 1; 4417 cmd->trc_flags |= TRC_NEW_CMD; 4418 4419 spin_lock_irqsave(&vha->cmd_list_lock, flags); 4420 list_add_tail(&cmd->cmd_list, &vha->qla_cmd_list); 4421 spin_unlock_irqrestore(&vha->cmd_list_lock, flags); 4422 4423 INIT_WORK(&cmd->work, qlt_do_work); 4424 if (vha->flags.qpairs_available) { 4425 queue_work_on(cmd->se_cmd.cpuid, qla_tgt_wq, &cmd->work); 4426 } else if (ha->msix_count) { 4427 if (cmd->atio.u.isp24.fcp_cmnd.rddata) 4428 queue_work(qla_tgt_wq, &cmd->work); 4429 else 4430 queue_work_on(cmd->se_cmd.cpuid, qla_tgt_wq, 4431 &cmd->work); 4432 } else { 4433 queue_work(qla_tgt_wq, &cmd->work); 4434 } 4435 4436 return 0; 4437 } 4438 4439 /* ha->hardware_lock supposed to be held on entry */ 4440 static int qlt_issue_task_mgmt(struct fc_port *sess, u64 lun, 4441 int fn, void *iocb, int flags) 4442 { 4443 struct scsi_qla_host *vha = sess->vha; 4444 struct qla_hw_data *ha = vha->hw; 4445 struct qla_tgt_mgmt_cmd *mcmd; 4446 struct atio_from_isp *a = (struct atio_from_isp *)iocb; 4447 struct qla_qpair_hint *h = &vha->vha_tgt.qla_tgt->qphints[0]; 4448 4449 mcmd = mempool_alloc(qla_tgt_mgmt_cmd_mempool, GFP_ATOMIC); 4450 if (!mcmd) { 4451 ql_dbg(ql_dbg_tgt_tmr, vha, 0x10009, 4452 "qla_target(%d): Allocation of management " 4453 "command failed, some commands and their data could " 4454 "leak\n", vha->vp_idx); 4455 return -ENOMEM; 4456 } 4457 memset(mcmd, 0, sizeof(*mcmd)); 4458 mcmd->sess = sess; 4459 4460 if (iocb) { 4461 memcpy(&mcmd->orig_iocb.imm_ntfy, iocb, 4462 sizeof(mcmd->orig_iocb.imm_ntfy)); 4463 } 4464 mcmd->tmr_func = fn; 4465 mcmd->flags = flags; 4466 mcmd->reset_count = ha->base_qpair->chip_reset; 4467 mcmd->qpair = h->qpair; 4468 mcmd->vha = vha; 4469 mcmd->se_cmd.cpuid = h->cpuid; 4470 mcmd->unpacked_lun = lun; 4471 4472 switch (fn) { 4473 case QLA_TGT_LUN_RESET: 4474 case QLA_TGT_CLEAR_TS: 4475 case QLA_TGT_ABORT_TS: 4476 abort_cmds_for_lun(vha, lun, a->u.isp24.fcp_hdr.s_id); 4477 fallthrough; 4478 case QLA_TGT_CLEAR_ACA: 4479 h = qlt_find_qphint(vha, mcmd->unpacked_lun); 4480 mcmd->qpair = h->qpair; 4481 mcmd->se_cmd.cpuid = h->cpuid; 4482 break; 4483 4484 case QLA_TGT_TARGET_RESET: 4485 case QLA_TGT_NEXUS_LOSS_SESS: 4486 case QLA_TGT_NEXUS_LOSS: 4487 case QLA_TGT_ABORT_ALL: 4488 default: 4489 /* no-op */ 4490 break; 4491 } 4492 4493 INIT_WORK(&mcmd->work, qlt_do_tmr_work); 4494 queue_work_on(mcmd->se_cmd.cpuid, qla_tgt_wq, 4495 &mcmd->work); 4496 4497 return 0; 4498 } 4499 4500 /* ha->hardware_lock supposed to be held on entry */ 4501 static int qlt_handle_task_mgmt(struct scsi_qla_host *vha, void *iocb) 4502 { 4503 struct atio_from_isp *a = (struct atio_from_isp *)iocb; 4504 struct qla_hw_data *ha = vha->hw; 4505 struct fc_port *sess; 4506 u64 unpacked_lun; 4507 int fn; 4508 unsigned long flags; 4509 4510 fn = a->u.isp24.fcp_cmnd.task_mgmt_flags; 4511 4512 spin_lock_irqsave(&ha->tgt.sess_lock, flags); 4513 sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, 4514 a->u.isp24.fcp_hdr.s_id); 4515 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); 4516 4517 unpacked_lun = 4518 scsilun_to_int((struct scsi_lun *)&a->u.isp24.fcp_cmnd.lun); 4519 4520 if (sess == NULL || sess->deleted) 4521 return -EFAULT; 4522 4523 return qlt_issue_task_mgmt(sess, unpacked_lun, fn, iocb, 0); 4524 } 4525 4526 /* ha->hardware_lock supposed to be held on entry */ 4527 static int __qlt_abort_task(struct scsi_qla_host *vha, 4528 struct imm_ntfy_from_isp *iocb, struct fc_port *sess) 4529 { 4530 struct atio_from_isp *a = (struct atio_from_isp *)iocb; 4531 struct qla_hw_data *ha = vha->hw; 4532 struct qla_tgt_mgmt_cmd *mcmd; 4533 u64 unpacked_lun; 4534 int rc; 4535 4536 mcmd = mempool_alloc(qla_tgt_mgmt_cmd_mempool, GFP_ATOMIC); 4537 if (mcmd == NULL) { 4538 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05f, 4539 "qla_target(%d): %s: Allocation of ABORT cmd failed\n", 4540 vha->vp_idx, __func__); 4541 return -ENOMEM; 4542 } 4543 memset(mcmd, 0, sizeof(*mcmd)); 4544 4545 mcmd->sess = sess; 4546 memcpy(&mcmd->orig_iocb.imm_ntfy, iocb, 4547 sizeof(mcmd->orig_iocb.imm_ntfy)); 4548 4549 unpacked_lun = 4550 scsilun_to_int((struct scsi_lun *)&a->u.isp24.fcp_cmnd.lun); 4551 mcmd->reset_count = ha->base_qpair->chip_reset; 4552 mcmd->tmr_func = QLA_TGT_2G_ABORT_TASK; 4553 mcmd->qpair = ha->base_qpair; 4554 4555 rc = ha->tgt.tgt_ops->handle_tmr(mcmd, unpacked_lun, mcmd->tmr_func, 4556 le16_to_cpu(iocb->u.isp2x.seq_id)); 4557 if (rc != 0) { 4558 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf060, 4559 "qla_target(%d): tgt_ops->handle_tmr() failed: %d\n", 4560 vha->vp_idx, rc); 4561 mempool_free(mcmd, qla_tgt_mgmt_cmd_mempool); 4562 return -EFAULT; 4563 } 4564 4565 return 0; 4566 } 4567 4568 /* ha->hardware_lock supposed to be held on entry */ 4569 static int qlt_abort_task(struct scsi_qla_host *vha, 4570 struct imm_ntfy_from_isp *iocb) 4571 { 4572 struct qla_hw_data *ha = vha->hw; 4573 struct fc_port *sess; 4574 int loop_id; 4575 unsigned long flags; 4576 4577 loop_id = GET_TARGET_ID(ha, (struct atio_from_isp *)iocb); 4578 4579 spin_lock_irqsave(&ha->tgt.sess_lock, flags); 4580 sess = ha->tgt.tgt_ops->find_sess_by_loop_id(vha, loop_id); 4581 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); 4582 4583 if (sess == NULL) { 4584 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf025, 4585 "qla_target(%d): task abort for unexisting " 4586 "session\n", vha->vp_idx); 4587 return qlt_sched_sess_work(vha->vha_tgt.qla_tgt, 4588 QLA_TGT_SESS_WORK_ABORT, iocb, sizeof(*iocb)); 4589 } 4590 4591 return __qlt_abort_task(vha, iocb, sess); 4592 } 4593 4594 void qlt_logo_completion_handler(fc_port_t *fcport, int rc) 4595 { 4596 if (rc != MBS_COMMAND_COMPLETE) { 4597 ql_dbg(ql_dbg_tgt_mgt, fcport->vha, 0xf093, 4598 "%s: se_sess %p / sess %p from" 4599 " port %8phC loop_id %#04x s_id %02x:%02x:%02x" 4600 " LOGO failed: %#x\n", 4601 __func__, 4602 fcport->se_sess, 4603 fcport, 4604 fcport->port_name, fcport->loop_id, 4605 fcport->d_id.b.domain, fcport->d_id.b.area, 4606 fcport->d_id.b.al_pa, rc); 4607 } 4608 4609 fcport->logout_completed = 1; 4610 } 4611 4612 /* 4613 * ha->hardware_lock supposed to be held on entry (to protect tgt->sess_list) 4614 * 4615 * Schedules sessions with matching port_id/loop_id but different wwn for 4616 * deletion. Returns existing session with matching wwn if present. 4617 * Null otherwise. 4618 */ 4619 struct fc_port * 4620 qlt_find_sess_invalidate_other(scsi_qla_host_t *vha, uint64_t wwn, 4621 port_id_t port_id, uint16_t loop_id, struct fc_port **conflict_sess) 4622 { 4623 struct fc_port *sess = NULL, *other_sess; 4624 uint64_t other_wwn; 4625 4626 *conflict_sess = NULL; 4627 4628 list_for_each_entry(other_sess, &vha->vp_fcports, list) { 4629 4630 other_wwn = wwn_to_u64(other_sess->port_name); 4631 4632 if (wwn == other_wwn) { 4633 WARN_ON(sess); 4634 sess = other_sess; 4635 continue; 4636 } 4637 4638 /* find other sess with nport_id collision */ 4639 if (port_id.b24 == other_sess->d_id.b24) { 4640 if (loop_id != other_sess->loop_id) { 4641 ql_dbg(ql_dbg_disc, vha, 0x1000c, 4642 "Invalidating sess %p loop_id %d wwn %llx.\n", 4643 other_sess, other_sess->loop_id, other_wwn); 4644 4645 /* 4646 * logout_on_delete is set by default, but another 4647 * session that has the same s_id/loop_id combo 4648 * might have cleared it when requested this session 4649 * deletion, so don't touch it 4650 */ 4651 qlt_schedule_sess_for_deletion(other_sess); 4652 } else { 4653 /* 4654 * Another wwn used to have our s_id/loop_id 4655 * kill the session, but don't free the loop_id 4656 */ 4657 ql_dbg(ql_dbg_disc, vha, 0xf01b, 4658 "Invalidating sess %p loop_id %d wwn %llx.\n", 4659 other_sess, other_sess->loop_id, other_wwn); 4660 4661 other_sess->keep_nport_handle = 1; 4662 if (other_sess->disc_state != DSC_DELETED) 4663 *conflict_sess = other_sess; 4664 qlt_schedule_sess_for_deletion(other_sess); 4665 } 4666 continue; 4667 } 4668 4669 /* find other sess with nport handle collision */ 4670 if ((loop_id == other_sess->loop_id) && 4671 (loop_id != FC_NO_LOOP_ID)) { 4672 ql_dbg(ql_dbg_disc, vha, 0x1000d, 4673 "Invalidating sess %p loop_id %d wwn %llx.\n", 4674 other_sess, other_sess->loop_id, other_wwn); 4675 4676 /* Same loop_id but different s_id 4677 * Ok to kill and logout */ 4678 qlt_schedule_sess_for_deletion(other_sess); 4679 } 4680 } 4681 4682 return sess; 4683 } 4684 4685 /* Abort any commands for this s_id waiting on qla_tgt_wq workqueue */ 4686 static int abort_cmds_for_s_id(struct scsi_qla_host *vha, port_id_t *s_id) 4687 { 4688 struct qla_tgt_sess_op *op; 4689 struct qla_tgt_cmd *cmd; 4690 uint32_t key; 4691 int count = 0; 4692 unsigned long flags; 4693 4694 key = (((u32)s_id->b.domain << 16) | 4695 ((u32)s_id->b.area << 8) | 4696 ((u32)s_id->b.al_pa)); 4697 4698 spin_lock_irqsave(&vha->cmd_list_lock, flags); 4699 list_for_each_entry(op, &vha->unknown_atio_list, cmd_list) { 4700 uint32_t op_key = sid_to_key(op->atio.u.isp24.fcp_hdr.s_id); 4701 4702 if (op_key == key) { 4703 op->aborted = true; 4704 count++; 4705 } 4706 } 4707 4708 list_for_each_entry(cmd, &vha->qla_cmd_list, cmd_list) { 4709 uint32_t cmd_key = sid_to_key(cmd->atio.u.isp24.fcp_hdr.s_id); 4710 4711 if (cmd_key == key) { 4712 cmd->aborted = 1; 4713 count++; 4714 } 4715 } 4716 spin_unlock_irqrestore(&vha->cmd_list_lock, flags); 4717 4718 return count; 4719 } 4720 4721 static int qlt_handle_login(struct scsi_qla_host *vha, 4722 struct imm_ntfy_from_isp *iocb) 4723 { 4724 struct fc_port *sess = NULL, *conflict_sess = NULL; 4725 uint64_t wwn; 4726 port_id_t port_id; 4727 uint16_t loop_id, wd3_lo; 4728 int res = 0; 4729 struct qlt_plogi_ack_t *pla; 4730 unsigned long flags; 4731 4732 lockdep_assert_held(&vha->hw->hardware_lock); 4733 4734 wwn = wwn_to_u64(iocb->u.isp24.port_name); 4735 4736 port_id.b.domain = iocb->u.isp24.port_id[2]; 4737 port_id.b.area = iocb->u.isp24.port_id[1]; 4738 port_id.b.al_pa = iocb->u.isp24.port_id[0]; 4739 port_id.b.rsvd_1 = 0; 4740 4741 loop_id = le16_to_cpu(iocb->u.isp24.nport_handle); 4742 4743 /* Mark all stale commands sitting in qla_tgt_wq for deletion */ 4744 abort_cmds_for_s_id(vha, &port_id); 4745 4746 if (wwn) { 4747 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); 4748 sess = qlt_find_sess_invalidate_other(vha, wwn, 4749 port_id, loop_id, &conflict_sess); 4750 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); 4751 } else { 4752 ql_dbg(ql_dbg_disc, vha, 0xffff, 4753 "%s %d Term INOT due to WWN=0 lid=%d, NportID %06X ", 4754 __func__, __LINE__, loop_id, port_id.b24); 4755 qlt_send_term_imm_notif(vha, iocb, 1); 4756 goto out; 4757 } 4758 4759 if (IS_SW_RESV_ADDR(port_id)) { 4760 res = 1; 4761 goto out; 4762 } 4763 4764 if (vha->hw->flags.edif_enabled && 4765 !(vha->e_dbell.db_flags & EDB_ACTIVE) && 4766 iocb->u.isp24.status_subcode == ELS_PLOGI && 4767 !(le16_to_cpu(iocb->u.isp24.flags) & NOTIFY24XX_FLAGS_FCSP)) { 4768 ql_dbg(ql_dbg_disc, vha, 0xffff, 4769 "%s %d Term INOT due to app not available lid=%d, NportID %06X ", 4770 __func__, __LINE__, loop_id, port_id.b24); 4771 qlt_send_term_imm_notif(vha, iocb, 1); 4772 goto out; 4773 } 4774 4775 if (vha->hw->flags.edif_enabled) { 4776 if (DBELL_INACTIVE(vha)) { 4777 ql_dbg(ql_dbg_disc, vha, 0xffff, 4778 "%s %d Term INOT due to app not started lid=%d, NportID %06X ", 4779 __func__, __LINE__, loop_id, port_id.b24); 4780 qlt_send_term_imm_notif(vha, iocb, 1); 4781 goto out; 4782 } else if (iocb->u.isp24.status_subcode == ELS_PLOGI && 4783 !(le16_to_cpu(iocb->u.isp24.flags) & NOTIFY24XX_FLAGS_FCSP)) { 4784 ql_dbg(ql_dbg_disc, vha, 0xffff, 4785 "%s %d Term INOT due to unsecure lid=%d, NportID %06X ", 4786 __func__, __LINE__, loop_id, port_id.b24); 4787 qlt_send_term_imm_notif(vha, iocb, 1); 4788 goto out; 4789 } 4790 } 4791 4792 pla = qlt_plogi_ack_find_add(vha, &port_id, iocb); 4793 if (!pla) { 4794 ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0xffff, 4795 "%s %d %8phC Term INOT due to mem alloc fail", 4796 __func__, __LINE__, 4797 iocb->u.isp24.port_name); 4798 qlt_send_term_imm_notif(vha, iocb, 1); 4799 goto out; 4800 } 4801 4802 if (conflict_sess) { 4803 conflict_sess->login_gen++; 4804 qlt_plogi_ack_link(vha, pla, conflict_sess, 4805 QLT_PLOGI_LINK_CONFLICT); 4806 } 4807 4808 if (!sess) { 4809 pla->ref_count++; 4810 ql_dbg(ql_dbg_disc, vha, 0xffff, 4811 "%s %d %8phC post new sess\n", 4812 __func__, __LINE__, iocb->u.isp24.port_name); 4813 if (iocb->u.isp24.status_subcode == ELS_PLOGI) 4814 qla24xx_post_newsess_work(vha, &port_id, 4815 iocb->u.isp24.port_name, 4816 iocb->u.isp24.u.plogi.node_name, 4817 pla, 0); 4818 else 4819 qla24xx_post_newsess_work(vha, &port_id, 4820 iocb->u.isp24.port_name, NULL, 4821 pla, 0); 4822 4823 goto out; 4824 } 4825 4826 if (sess->disc_state == DSC_UPD_FCPORT) { 4827 u16 sec; 4828 4829 /* 4830 * Remote port registration is still going on from 4831 * previous login. Allow it to finish before we 4832 * accept the new login. 4833 */ 4834 sess->next_disc_state = DSC_DELETE_PEND; 4835 sec = jiffies_to_msecs(jiffies - 4836 sess->jiffies_at_registration) / 1000; 4837 if (sess->sec_since_registration < sec && sec && 4838 !(sec % 5)) { 4839 sess->sec_since_registration = sec; 4840 ql_dbg(ql_dbg_disc, vha, 0xffff, 4841 "%s %8phC - Slow Rport registration (%d Sec)\n", 4842 __func__, sess->port_name, sec); 4843 } 4844 4845 if (!conflict_sess) { 4846 list_del(&pla->list); 4847 kmem_cache_free(qla_tgt_plogi_cachep, pla); 4848 } 4849 4850 qlt_send_term_imm_notif(vha, iocb, 1); 4851 goto out; 4852 } 4853 4854 qlt_plogi_ack_link(vha, pla, sess, QLT_PLOGI_LINK_SAME_WWN); 4855 sess->d_id = port_id; 4856 sess->login_gen++; 4857 sess->loop_id = loop_id; 4858 4859 if (iocb->u.isp24.status_subcode == ELS_PLOGI) { 4860 /* remote port has assigned Port ID */ 4861 if (N2N_TOPO(vha->hw) && fcport_is_bigger(sess)) 4862 vha->d_id = sess->d_id; 4863 4864 ql_dbg(ql_dbg_disc, vha, 0xffff, 4865 "%s %8phC - send port online\n", 4866 __func__, sess->port_name); 4867 4868 qla2x00_post_aen_work(vha, FCH_EVT_PORT_ONLINE, 4869 sess->d_id.b24); 4870 } 4871 4872 if (iocb->u.isp24.status_subcode == ELS_PRLI) { 4873 sess->fw_login_state = DSC_LS_PRLI_PEND; 4874 sess->local = 0; 4875 sess->loop_id = loop_id; 4876 sess->d_id = port_id; 4877 sess->fw_login_state = DSC_LS_PRLI_PEND; 4878 wd3_lo = le16_to_cpu(iocb->u.isp24.u.prli.wd3_lo); 4879 4880 if (wd3_lo & BIT_7) 4881 sess->conf_compl_supported = 1; 4882 4883 if ((wd3_lo & BIT_4) == 0) 4884 sess->port_type = FCT_INITIATOR; 4885 else 4886 sess->port_type = FCT_TARGET; 4887 4888 } else 4889 sess->fw_login_state = DSC_LS_PLOGI_PEND; 4890 4891 4892 ql_dbg(ql_dbg_disc, vha, 0x20f9, 4893 "%s %d %8phC DS %d\n", 4894 __func__, __LINE__, sess->port_name, sess->disc_state); 4895 4896 switch (sess->disc_state) { 4897 case DSC_DELETED: 4898 case DSC_LOGIN_PEND: 4899 qlt_plogi_ack_unref(vha, pla); 4900 break; 4901 4902 default: 4903 /* 4904 * Under normal circumstances we want to release nport handle 4905 * during LOGO process to avoid nport handle leaks inside FW. 4906 * The exception is when LOGO is done while another PLOGI with 4907 * the same nport handle is waiting as might be the case here. 4908 * Note: there is always a possibily of a race where session 4909 * deletion has already started for other reasons (e.g. ACL 4910 * removal) and now PLOGI arrives: 4911 * 1. if PLOGI arrived in FW after nport handle has been freed, 4912 * FW must have assigned this PLOGI a new/same handle and we 4913 * can proceed ACK'ing it as usual when session deletion 4914 * completes. 4915 * 2. if PLOGI arrived in FW before LOGO with LCF_FREE_NPORT 4916 * bit reached it, the handle has now been released. We'll 4917 * get an error when we ACK this PLOGI. Nothing will be sent 4918 * back to initiator. Initiator should eventually retry 4919 * PLOGI and situation will correct itself. 4920 */ 4921 sess->keep_nport_handle = ((sess->loop_id == loop_id) && 4922 (sess->d_id.b24 == port_id.b24)); 4923 4924 ql_dbg(ql_dbg_disc, vha, 0x20f9, 4925 "%s %d %8phC post del sess\n", 4926 __func__, __LINE__, sess->port_name); 4927 4928 4929 qlt_schedule_sess_for_deletion(sess); 4930 break; 4931 } 4932 out: 4933 return res; 4934 } 4935 4936 /* 4937 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire 4938 */ 4939 static int qlt_24xx_handle_els(struct scsi_qla_host *vha, 4940 struct imm_ntfy_from_isp *iocb) 4941 { 4942 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; 4943 struct qla_hw_data *ha = vha->hw; 4944 struct fc_port *sess = NULL, *conflict_sess = NULL; 4945 uint64_t wwn; 4946 port_id_t port_id; 4947 uint16_t loop_id; 4948 uint16_t wd3_lo; 4949 int res = 0; 4950 unsigned long flags; 4951 4952 lockdep_assert_held(&ha->hardware_lock); 4953 4954 wwn = wwn_to_u64(iocb->u.isp24.port_name); 4955 4956 port_id.b.domain = iocb->u.isp24.port_id[2]; 4957 port_id.b.area = iocb->u.isp24.port_id[1]; 4958 port_id.b.al_pa = iocb->u.isp24.port_id[0]; 4959 port_id.b.rsvd_1 = 0; 4960 4961 loop_id = le16_to_cpu(iocb->u.isp24.nport_handle); 4962 4963 ql_dbg(ql_dbg_disc, vha, 0xf026, 4964 "qla_target(%d): Port ID: %02x:%02x:%02x ELS opcode: 0x%02x lid %d %8phC\n", 4965 vha->vp_idx, iocb->u.isp24.port_id[2], 4966 iocb->u.isp24.port_id[1], iocb->u.isp24.port_id[0], 4967 iocb->u.isp24.status_subcode, loop_id, 4968 iocb->u.isp24.port_name); 4969 4970 /* res = 1 means ack at the end of thread 4971 * res = 0 means ack async/later. 4972 */ 4973 switch (iocb->u.isp24.status_subcode) { 4974 case ELS_PLOGI: 4975 res = qlt_handle_login(vha, iocb); 4976 break; 4977 4978 case ELS_PRLI: 4979 if (N2N_TOPO(ha)) { 4980 sess = qla2x00_find_fcport_by_wwpn(vha, 4981 iocb->u.isp24.port_name, 1); 4982 4983 if (vha->hw->flags.edif_enabled && sess && 4984 (!(sess->flags & FCF_FCSP_DEVICE) || 4985 !sess->edif.authok)) { 4986 ql_dbg(ql_dbg_disc, vha, 0xffff, 4987 "%s %d %8phC Term PRLI due to unauthorize PRLI\n", 4988 __func__, __LINE__, iocb->u.isp24.port_name); 4989 qlt_send_term_imm_notif(vha, iocb, 1); 4990 break; 4991 } 4992 4993 if (sess && sess->plogi_link[QLT_PLOGI_LINK_SAME_WWN]) { 4994 ql_dbg(ql_dbg_disc, vha, 0xffff, 4995 "%s %d %8phC Term PRLI due to PLOGI ACK not completed\n", 4996 __func__, __LINE__, 4997 iocb->u.isp24.port_name); 4998 qlt_send_term_imm_notif(vha, iocb, 1); 4999 break; 5000 } 5001 5002 res = qlt_handle_login(vha, iocb); 5003 break; 5004 } 5005 5006 if (IS_SW_RESV_ADDR(port_id)) { 5007 res = 1; 5008 break; 5009 } 5010 5011 wd3_lo = le16_to_cpu(iocb->u.isp24.u.prli.wd3_lo); 5012 5013 if (wwn) { 5014 spin_lock_irqsave(&tgt->ha->tgt.sess_lock, flags); 5015 sess = qlt_find_sess_invalidate_other(vha, wwn, port_id, 5016 loop_id, &conflict_sess); 5017 spin_unlock_irqrestore(&tgt->ha->tgt.sess_lock, flags); 5018 } 5019 5020 if (conflict_sess) { 5021 switch (conflict_sess->disc_state) { 5022 case DSC_DELETED: 5023 case DSC_DELETE_PEND: 5024 break; 5025 default: 5026 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf09b, 5027 "PRLI with conflicting sess %p port %8phC\n", 5028 conflict_sess, conflict_sess->port_name); 5029 conflict_sess->fw_login_state = 5030 DSC_LS_PORT_UNAVAIL; 5031 qlt_send_term_imm_notif(vha, iocb, 1); 5032 res = 0; 5033 break; 5034 } 5035 } 5036 5037 if (sess != NULL) { 5038 bool delete = false; 5039 int sec; 5040 5041 if (vha->hw->flags.edif_enabled && sess && 5042 (!(sess->flags & FCF_FCSP_DEVICE) || 5043 !sess->edif.authok)) { 5044 ql_dbg(ql_dbg_disc, vha, 0xffff, 5045 "%s %d %8phC Term PRLI due to unauthorize prli\n", 5046 __func__, __LINE__, iocb->u.isp24.port_name); 5047 qlt_send_term_imm_notif(vha, iocb, 1); 5048 break; 5049 } 5050 5051 spin_lock_irqsave(&tgt->ha->tgt.sess_lock, flags); 5052 switch (sess->fw_login_state) { 5053 case DSC_LS_PLOGI_PEND: 5054 case DSC_LS_PLOGI_COMP: 5055 case DSC_LS_PRLI_COMP: 5056 break; 5057 default: 5058 delete = true; 5059 break; 5060 } 5061 5062 switch (sess->disc_state) { 5063 case DSC_UPD_FCPORT: 5064 spin_unlock_irqrestore(&tgt->ha->tgt.sess_lock, 5065 flags); 5066 5067 sec = jiffies_to_msecs(jiffies - 5068 sess->jiffies_at_registration)/1000; 5069 if (sess->sec_since_registration < sec && sec && 5070 !(sec % 5)) { 5071 sess->sec_since_registration = sec; 5072 ql_dbg(ql_dbg_disc, sess->vha, 0xffff, 5073 "%s %8phC : Slow Rport registration(%d Sec)\n", 5074 __func__, sess->port_name, sec); 5075 } 5076 qlt_send_term_imm_notif(vha, iocb, 1); 5077 return 0; 5078 5079 case DSC_LOGIN_PEND: 5080 case DSC_GPDB: 5081 case DSC_LOGIN_COMPLETE: 5082 case DSC_ADISC: 5083 delete = false; 5084 break; 5085 default: 5086 break; 5087 } 5088 5089 if (delete) { 5090 spin_unlock_irqrestore(&tgt->ha->tgt.sess_lock, 5091 flags); 5092 /* 5093 * Impatient initiator sent PRLI before last 5094 * PLOGI could finish. Will force him to re-try, 5095 * while last one finishes. 5096 */ 5097 ql_log(ql_log_warn, sess->vha, 0xf095, 5098 "sess %p PRLI received, before plogi ack.\n", 5099 sess); 5100 qlt_send_term_imm_notif(vha, iocb, 1); 5101 res = 0; 5102 break; 5103 } 5104 5105 /* 5106 * This shouldn't happen under normal circumstances, 5107 * since we have deleted the old session during PLOGI 5108 */ 5109 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf096, 5110 "PRLI (loop_id %#04x) for existing sess %p (loop_id %#04x)\n", 5111 sess->loop_id, sess, iocb->u.isp24.nport_handle); 5112 5113 sess->local = 0; 5114 sess->loop_id = loop_id; 5115 sess->d_id = port_id; 5116 sess->fw_login_state = DSC_LS_PRLI_PEND; 5117 5118 if (wd3_lo & BIT_7) 5119 sess->conf_compl_supported = 1; 5120 5121 if ((wd3_lo & BIT_4) == 0) 5122 sess->port_type = FCT_INITIATOR; 5123 else 5124 sess->port_type = FCT_TARGET; 5125 5126 spin_unlock_irqrestore(&tgt->ha->tgt.sess_lock, flags); 5127 } 5128 res = 1; /* send notify ack */ 5129 5130 /* Make session global (not used in fabric mode) */ 5131 if (ha->current_topology != ISP_CFG_F) { 5132 if (sess) { 5133 ql_dbg(ql_dbg_disc, vha, 0x20fa, 5134 "%s %d %8phC post nack\n", 5135 __func__, __LINE__, sess->port_name); 5136 qla24xx_post_nack_work(vha, sess, iocb, 5137 SRB_NACK_PRLI); 5138 res = 0; 5139 } else { 5140 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); 5141 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); 5142 qla2xxx_wake_dpc(vha); 5143 } 5144 } else { 5145 if (sess) { 5146 ql_dbg(ql_dbg_disc, vha, 0x20fb, 5147 "%s %d %8phC post nack\n", 5148 __func__, __LINE__, sess->port_name); 5149 qla24xx_post_nack_work(vha, sess, iocb, 5150 SRB_NACK_PRLI); 5151 res = 0; 5152 } 5153 } 5154 break; 5155 5156 case ELS_TPRLO: 5157 if (le16_to_cpu(iocb->u.isp24.flags) & 5158 NOTIFY24XX_FLAGS_GLOBAL_TPRLO) { 5159 loop_id = 0xFFFF; 5160 qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS); 5161 res = 1; 5162 break; 5163 } 5164 fallthrough; 5165 case ELS_LOGO: 5166 case ELS_PRLO: 5167 spin_lock_irqsave(&ha->tgt.sess_lock, flags); 5168 sess = qla2x00_find_fcport_by_loopid(vha, loop_id); 5169 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); 5170 5171 if (sess) { 5172 sess->login_gen++; 5173 sess->fw_login_state = DSC_LS_LOGO_PEND; 5174 sess->logo_ack_needed = 1; 5175 memcpy(sess->iocb, iocb, IOCB_SIZE); 5176 } 5177 5178 res = qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS_SESS); 5179 5180 ql_dbg(ql_dbg_disc, vha, 0x20fc, 5181 "%s: logo %llx res %d sess %p ", 5182 __func__, wwn, res, sess); 5183 if (res == 0) { 5184 /* 5185 * cmd went upper layer, look for qlt_xmit_tm_rsp() 5186 * for LOGO_ACK & sess delete 5187 */ 5188 BUG_ON(!sess); 5189 res = 0; 5190 } else { 5191 /* cmd did not go to upper layer. */ 5192 if (sess) { 5193 qlt_schedule_sess_for_deletion(sess); 5194 res = 0; 5195 } 5196 /* else logo will be ack */ 5197 } 5198 break; 5199 case ELS_PDISC: 5200 case ELS_ADISC: 5201 { 5202 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; 5203 5204 if (tgt->link_reinit_iocb_pending) { 5205 qlt_send_notify_ack(ha->base_qpair, 5206 &tgt->link_reinit_iocb, 0, 0, 0, 0, 0, 0); 5207 tgt->link_reinit_iocb_pending = 0; 5208 } 5209 5210 sess = qla2x00_find_fcport_by_wwpn(vha, 5211 iocb->u.isp24.port_name, 1); 5212 if (sess) { 5213 ql_dbg(ql_dbg_disc, vha, 0x20fd, 5214 "sess %p lid %d|%d DS %d LS %d\n", 5215 sess, sess->loop_id, loop_id, 5216 sess->disc_state, sess->fw_login_state); 5217 } 5218 5219 res = 1; /* send notify ack */ 5220 break; 5221 } 5222 5223 case ELS_FLOGI: /* should never happen */ 5224 default: 5225 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf061, 5226 "qla_target(%d): Unsupported ELS command %x " 5227 "received\n", vha->vp_idx, iocb->u.isp24.status_subcode); 5228 res = qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS_SESS); 5229 break; 5230 } 5231 5232 ql_dbg(ql_dbg_disc, vha, 0xf026, 5233 "qla_target(%d): Exit ELS opcode: 0x%02x res %d\n", 5234 vha->vp_idx, iocb->u.isp24.status_subcode, res); 5235 5236 return res; 5237 } 5238 5239 /* 5240 * ha->hardware_lock supposed to be held on entry. 5241 * Might drop it, then reacquire. 5242 */ 5243 static void qlt_handle_imm_notify(struct scsi_qla_host *vha, 5244 struct imm_ntfy_from_isp *iocb) 5245 { 5246 struct qla_hw_data *ha = vha->hw; 5247 uint32_t add_flags = 0; 5248 int send_notify_ack = 1; 5249 uint16_t status; 5250 5251 lockdep_assert_held(&ha->hardware_lock); 5252 5253 status = le16_to_cpu(iocb->u.isp2x.status); 5254 switch (status) { 5255 case IMM_NTFY_LIP_RESET: 5256 { 5257 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf032, 5258 "qla_target(%d): LIP reset (loop %#x), subcode %x\n", 5259 vha->vp_idx, le16_to_cpu(iocb->u.isp24.nport_handle), 5260 iocb->u.isp24.status_subcode); 5261 5262 if (qlt_reset(vha, iocb, QLA_TGT_ABORT_ALL) == 0) 5263 send_notify_ack = 0; 5264 break; 5265 } 5266 5267 case IMM_NTFY_LIP_LINK_REINIT: 5268 { 5269 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; 5270 5271 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf033, 5272 "qla_target(%d): LINK REINIT (loop %#x, " 5273 "subcode %x)\n", vha->vp_idx, 5274 le16_to_cpu(iocb->u.isp24.nport_handle), 5275 iocb->u.isp24.status_subcode); 5276 if (tgt->link_reinit_iocb_pending) { 5277 qlt_send_notify_ack(ha->base_qpair, 5278 &tgt->link_reinit_iocb, 0, 0, 0, 0, 0, 0); 5279 } 5280 memcpy(&tgt->link_reinit_iocb, iocb, sizeof(*iocb)); 5281 tgt->link_reinit_iocb_pending = 1; 5282 /* 5283 * QLogic requires to wait after LINK REINIT for possible 5284 * PDISC or ADISC ELS commands 5285 */ 5286 send_notify_ack = 0; 5287 break; 5288 } 5289 5290 case IMM_NTFY_PORT_LOGOUT: 5291 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf034, 5292 "qla_target(%d): Port logout (loop " 5293 "%#x, subcode %x)\n", vha->vp_idx, 5294 le16_to_cpu(iocb->u.isp24.nport_handle), 5295 iocb->u.isp24.status_subcode); 5296 5297 if (qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS_SESS) == 0) 5298 send_notify_ack = 0; 5299 /* The sessions will be cleared in the callback, if needed */ 5300 break; 5301 5302 case IMM_NTFY_GLBL_TPRLO: 5303 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf035, 5304 "qla_target(%d): Global TPRLO (%x)\n", vha->vp_idx, status); 5305 if (qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS) == 0) 5306 send_notify_ack = 0; 5307 /* The sessions will be cleared in the callback, if needed */ 5308 break; 5309 5310 case IMM_NTFY_PORT_CONFIG: 5311 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf036, 5312 "qla_target(%d): Port config changed (%x)\n", vha->vp_idx, 5313 status); 5314 if (qlt_reset(vha, iocb, QLA_TGT_ABORT_ALL) == 0) 5315 send_notify_ack = 0; 5316 /* The sessions will be cleared in the callback, if needed */ 5317 break; 5318 5319 case IMM_NTFY_GLBL_LOGO: 5320 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06a, 5321 "qla_target(%d): Link failure detected\n", 5322 vha->vp_idx); 5323 /* I_T nexus loss */ 5324 if (qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS) == 0) 5325 send_notify_ack = 0; 5326 break; 5327 5328 case IMM_NTFY_IOCB_OVERFLOW: 5329 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06b, 5330 "qla_target(%d): Cannot provide requested " 5331 "capability (IOCB overflowed the immediate notify " 5332 "resource count)\n", vha->vp_idx); 5333 break; 5334 5335 case IMM_NTFY_ABORT_TASK: 5336 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf037, 5337 "qla_target(%d): Abort Task (S %08x I %#x -> " 5338 "L %#x)\n", vha->vp_idx, 5339 le16_to_cpu(iocb->u.isp2x.seq_id), 5340 GET_TARGET_ID(ha, (struct atio_from_isp *)iocb), 5341 le16_to_cpu(iocb->u.isp2x.lun)); 5342 if (qlt_abort_task(vha, iocb) == 0) 5343 send_notify_ack = 0; 5344 break; 5345 5346 case IMM_NTFY_RESOURCE: 5347 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06c, 5348 "qla_target(%d): Out of resources, host %ld\n", 5349 vha->vp_idx, vha->host_no); 5350 break; 5351 5352 case IMM_NTFY_MSG_RX: 5353 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf038, 5354 "qla_target(%d): Immediate notify task %x\n", 5355 vha->vp_idx, iocb->u.isp2x.task_flags); 5356 break; 5357 5358 case IMM_NTFY_ELS: 5359 if (qlt_24xx_handle_els(vha, iocb) == 0) 5360 send_notify_ack = 0; 5361 break; 5362 default: 5363 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06d, 5364 "qla_target(%d): Received unknown immediate " 5365 "notify status %x\n", vha->vp_idx, status); 5366 break; 5367 } 5368 5369 if (send_notify_ack) 5370 qlt_send_notify_ack(ha->base_qpair, iocb, add_flags, 0, 0, 0, 5371 0, 0); 5372 } 5373 5374 /* 5375 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire 5376 * This function sends busy to ISP 2xxx or 24xx. 5377 */ 5378 static int __qlt_send_busy(struct qla_qpair *qpair, 5379 struct atio_from_isp *atio, uint16_t status) 5380 { 5381 struct scsi_qla_host *vha = qpair->vha; 5382 struct ctio7_to_24xx *ctio24; 5383 struct qla_hw_data *ha = vha->hw; 5384 request_t *pkt; 5385 struct fc_port *sess = NULL; 5386 unsigned long flags; 5387 u16 temp; 5388 port_id_t id; 5389 5390 id = be_to_port_id(atio->u.isp24.fcp_hdr.s_id); 5391 5392 spin_lock_irqsave(&ha->tgt.sess_lock, flags); 5393 sess = qla2x00_find_fcport_by_nportid(vha, &id, 1); 5394 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); 5395 if (!sess) { 5396 qlt_send_term_exchange(qpair, NULL, atio, 1, 0); 5397 return 0; 5398 } 5399 /* Sending marker isn't necessary, since we called from ISR */ 5400 5401 pkt = (request_t *)__qla2x00_alloc_iocbs(qpair, NULL); 5402 if (!pkt) { 5403 ql_dbg(ql_dbg_io, vha, 0x3063, 5404 "qla_target(%d): %s failed: unable to allocate " 5405 "request packet", vha->vp_idx, __func__); 5406 return -ENOMEM; 5407 } 5408 5409 qpair->tgt_counters.num_q_full_sent++; 5410 pkt->entry_count = 1; 5411 pkt->handle = QLA_TGT_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK; 5412 5413 ctio24 = (struct ctio7_to_24xx *)pkt; 5414 ctio24->entry_type = CTIO_TYPE7; 5415 ctio24->nport_handle = cpu_to_le16(sess->loop_id); 5416 ctio24->timeout = cpu_to_le16(QLA_TGT_TIMEOUT); 5417 ctio24->vp_index = vha->vp_idx; 5418 ctio24->initiator_id = be_id_to_le(atio->u.isp24.fcp_hdr.s_id); 5419 ctio24->exchange_addr = atio->u.isp24.exchange_addr; 5420 temp = (atio->u.isp24.attr << 9) | 5421 CTIO7_FLAGS_STATUS_MODE_1 | CTIO7_FLAGS_SEND_STATUS | 5422 CTIO7_FLAGS_DONT_RET_CTIO; 5423 ctio24->u.status1.flags = cpu_to_le16(temp); 5424 /* 5425 * CTIO from fw w/o se_cmd doesn't provide enough info to retry it, 5426 * if the explicit conformation is used. 5427 */ 5428 ctio24->u.status1.ox_id = 5429 cpu_to_le16(be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id)); 5430 ctio24->u.status1.scsi_status = cpu_to_le16(status); 5431 5432 ctio24->u.status1.residual = cpu_to_le32(get_datalen_for_atio(atio)); 5433 5434 if (ctio24->u.status1.residual != 0) 5435 ctio24->u.status1.scsi_status |= cpu_to_le16(SS_RESIDUAL_UNDER); 5436 5437 /* Memory Barrier */ 5438 wmb(); 5439 if (qpair->reqq_start_iocbs) 5440 qpair->reqq_start_iocbs(qpair); 5441 else 5442 qla2x00_start_iocbs(vha, qpair->req); 5443 return 0; 5444 } 5445 5446 /* 5447 * This routine is used to allocate a command for either a QFull condition 5448 * (ie reply SAM_STAT_BUSY) or to terminate an exchange that did not go 5449 * out previously. 5450 */ 5451 static void 5452 qlt_alloc_qfull_cmd(struct scsi_qla_host *vha, 5453 struct atio_from_isp *atio, uint16_t status, int qfull) 5454 { 5455 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; 5456 struct qla_hw_data *ha = vha->hw; 5457 struct fc_port *sess; 5458 struct qla_tgt_cmd *cmd; 5459 unsigned long flags; 5460 5461 if (unlikely(tgt->tgt_stop)) { 5462 ql_dbg(ql_dbg_io, vha, 0x300a, 5463 "New command while device %p is shutting down\n", tgt); 5464 return; 5465 } 5466 5467 if ((vha->hw->tgt.num_qfull_cmds_alloc + 1) > MAX_QFULL_CMDS_ALLOC) { 5468 vha->hw->tgt.num_qfull_cmds_dropped++; 5469 if (vha->hw->tgt.num_qfull_cmds_dropped > 5470 vha->qla_stats.stat_max_qfull_cmds_dropped) 5471 vha->qla_stats.stat_max_qfull_cmds_dropped = 5472 vha->hw->tgt.num_qfull_cmds_dropped; 5473 5474 ql_dbg(ql_dbg_io, vha, 0x3068, 5475 "qla_target(%d): %s: QFull CMD dropped[%d]\n", 5476 vha->vp_idx, __func__, 5477 vha->hw->tgt.num_qfull_cmds_dropped); 5478 5479 qlt_chk_exch_leak_thresh_hold(vha); 5480 return; 5481 } 5482 5483 sess = ha->tgt.tgt_ops->find_sess_by_s_id 5484 (vha, atio->u.isp24.fcp_hdr.s_id); 5485 if (!sess) 5486 return; 5487 5488 cmd = ha->tgt.tgt_ops->get_cmd(sess); 5489 if (!cmd) { 5490 ql_dbg(ql_dbg_io, vha, 0x3009, 5491 "qla_target(%d): %s: Allocation of cmd failed\n", 5492 vha->vp_idx, __func__); 5493 5494 vha->hw->tgt.num_qfull_cmds_dropped++; 5495 if (vha->hw->tgt.num_qfull_cmds_dropped > 5496 vha->qla_stats.stat_max_qfull_cmds_dropped) 5497 vha->qla_stats.stat_max_qfull_cmds_dropped = 5498 vha->hw->tgt.num_qfull_cmds_dropped; 5499 5500 qlt_chk_exch_leak_thresh_hold(vha); 5501 return; 5502 } 5503 5504 qlt_incr_num_pend_cmds(vha); 5505 INIT_LIST_HEAD(&cmd->cmd_list); 5506 memcpy(&cmd->atio, atio, sizeof(*atio)); 5507 5508 cmd->tgt = vha->vha_tgt.qla_tgt; 5509 cmd->vha = vha; 5510 cmd->reset_count = ha->base_qpair->chip_reset; 5511 cmd->q_full = 1; 5512 cmd->qpair = ha->base_qpair; 5513 5514 if (qfull) { 5515 cmd->q_full = 1; 5516 /* NOTE: borrowing the state field to carry the status */ 5517 cmd->state = status; 5518 } else 5519 cmd->term_exchg = 1; 5520 5521 spin_lock_irqsave(&vha->hw->tgt.q_full_lock, flags); 5522 list_add_tail(&cmd->cmd_list, &vha->hw->tgt.q_full_list); 5523 5524 vha->hw->tgt.num_qfull_cmds_alloc++; 5525 if (vha->hw->tgt.num_qfull_cmds_alloc > 5526 vha->qla_stats.stat_max_qfull_cmds_alloc) 5527 vha->qla_stats.stat_max_qfull_cmds_alloc = 5528 vha->hw->tgt.num_qfull_cmds_alloc; 5529 spin_unlock_irqrestore(&vha->hw->tgt.q_full_lock, flags); 5530 } 5531 5532 int 5533 qlt_free_qfull_cmds(struct qla_qpair *qpair) 5534 { 5535 struct scsi_qla_host *vha = qpair->vha; 5536 struct qla_hw_data *ha = vha->hw; 5537 unsigned long flags; 5538 struct qla_tgt_cmd *cmd, *tcmd; 5539 struct list_head free_list, q_full_list; 5540 int rc = 0; 5541 5542 if (list_empty(&ha->tgt.q_full_list)) 5543 return 0; 5544 5545 INIT_LIST_HEAD(&free_list); 5546 INIT_LIST_HEAD(&q_full_list); 5547 5548 spin_lock_irqsave(&vha->hw->tgt.q_full_lock, flags); 5549 if (list_empty(&ha->tgt.q_full_list)) { 5550 spin_unlock_irqrestore(&vha->hw->tgt.q_full_lock, flags); 5551 return 0; 5552 } 5553 5554 list_splice_init(&vha->hw->tgt.q_full_list, &q_full_list); 5555 spin_unlock_irqrestore(&vha->hw->tgt.q_full_lock, flags); 5556 5557 spin_lock_irqsave(qpair->qp_lock_ptr, flags); 5558 list_for_each_entry_safe(cmd, tcmd, &q_full_list, cmd_list) { 5559 if (cmd->q_full) 5560 /* cmd->state is a borrowed field to hold status */ 5561 rc = __qlt_send_busy(qpair, &cmd->atio, cmd->state); 5562 else if (cmd->term_exchg) 5563 rc = __qlt_send_term_exchange(qpair, NULL, &cmd->atio); 5564 5565 if (rc == -ENOMEM) 5566 break; 5567 5568 if (cmd->q_full) 5569 ql_dbg(ql_dbg_io, vha, 0x3006, 5570 "%s: busy sent for ox_id[%04x]\n", __func__, 5571 be16_to_cpu(cmd->atio.u.isp24.fcp_hdr.ox_id)); 5572 else if (cmd->term_exchg) 5573 ql_dbg(ql_dbg_io, vha, 0x3007, 5574 "%s: Term exchg sent for ox_id[%04x]\n", __func__, 5575 be16_to_cpu(cmd->atio.u.isp24.fcp_hdr.ox_id)); 5576 else 5577 ql_dbg(ql_dbg_io, vha, 0x3008, 5578 "%s: Unexpected cmd in QFull list %p\n", __func__, 5579 cmd); 5580 5581 list_move_tail(&cmd->cmd_list, &free_list); 5582 5583 /* piggy back on hardware_lock for protection */ 5584 vha->hw->tgt.num_qfull_cmds_alloc--; 5585 } 5586 spin_unlock_irqrestore(qpair->qp_lock_ptr, flags); 5587 5588 cmd = NULL; 5589 5590 list_for_each_entry_safe(cmd, tcmd, &free_list, cmd_list) { 5591 list_del(&cmd->cmd_list); 5592 /* This cmd was never sent to TCM. There is no need 5593 * to schedule free or call free_cmd 5594 */ 5595 qlt_free_cmd(cmd); 5596 } 5597 5598 if (!list_empty(&q_full_list)) { 5599 spin_lock_irqsave(&vha->hw->tgt.q_full_lock, flags); 5600 list_splice(&q_full_list, &vha->hw->tgt.q_full_list); 5601 spin_unlock_irqrestore(&vha->hw->tgt.q_full_lock, flags); 5602 } 5603 5604 return rc; 5605 } 5606 5607 static void 5608 qlt_send_busy(struct qla_qpair *qpair, struct atio_from_isp *atio, 5609 uint16_t status) 5610 { 5611 int rc = 0; 5612 struct scsi_qla_host *vha = qpair->vha; 5613 5614 rc = __qlt_send_busy(qpair, atio, status); 5615 if (rc == -ENOMEM) 5616 qlt_alloc_qfull_cmd(vha, atio, status, 1); 5617 } 5618 5619 static int 5620 qlt_chk_qfull_thresh_hold(struct scsi_qla_host *vha, struct qla_qpair *qpair, 5621 struct atio_from_isp *atio, uint8_t ha_locked) 5622 { 5623 struct qla_hw_data *ha = vha->hw; 5624 unsigned long flags; 5625 5626 if (ha->tgt.num_pend_cmds < Q_FULL_THRESH_HOLD(ha)) 5627 return 0; 5628 5629 if (!ha_locked) 5630 spin_lock_irqsave(&ha->hardware_lock, flags); 5631 qlt_send_busy(qpair, atio, qla_sam_status); 5632 if (!ha_locked) 5633 spin_unlock_irqrestore(&ha->hardware_lock, flags); 5634 5635 return 1; 5636 } 5637 5638 /* ha->hardware_lock supposed to be held on entry */ 5639 /* called via callback from qla2xxx */ 5640 static void qlt_24xx_atio_pkt(struct scsi_qla_host *vha, 5641 struct atio_from_isp *atio, uint8_t ha_locked) 5642 { 5643 struct qla_hw_data *ha = vha->hw; 5644 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; 5645 int rc; 5646 unsigned long flags = 0; 5647 5648 if (unlikely(tgt == NULL)) { 5649 ql_dbg(ql_dbg_tgt, vha, 0x3064, 5650 "ATIO pkt, but no tgt (ha %p)", ha); 5651 return; 5652 } 5653 /* 5654 * In tgt_stop mode we also should allow all requests to pass. 5655 * Otherwise, some commands can stuck. 5656 */ 5657 5658 tgt->atio_irq_cmd_count++; 5659 5660 switch (atio->u.raw.entry_type) { 5661 case ATIO_TYPE7: 5662 if (unlikely(atio->u.isp24.exchange_addr == 5663 cpu_to_le32(ATIO_EXCHANGE_ADDRESS_UNKNOWN))) { 5664 ql_dbg(ql_dbg_io, vha, 0x3065, 5665 "qla_target(%d): ATIO_TYPE7 " 5666 "received with UNKNOWN exchange address, " 5667 "sending QUEUE_FULL\n", vha->vp_idx); 5668 if (!ha_locked) 5669 spin_lock_irqsave(&ha->hardware_lock, flags); 5670 qlt_send_busy(ha->base_qpair, atio, qla_sam_status); 5671 if (!ha_locked) 5672 spin_unlock_irqrestore(&ha->hardware_lock, 5673 flags); 5674 break; 5675 } 5676 5677 if (likely(atio->u.isp24.fcp_cmnd.task_mgmt_flags == 0)) { 5678 rc = qlt_chk_qfull_thresh_hold(vha, ha->base_qpair, 5679 atio, ha_locked); 5680 if (rc != 0) { 5681 tgt->atio_irq_cmd_count--; 5682 return; 5683 } 5684 rc = qlt_handle_cmd_for_atio(vha, atio); 5685 } else { 5686 rc = qlt_handle_task_mgmt(vha, atio); 5687 } 5688 if (unlikely(rc != 0)) { 5689 if (!ha_locked) 5690 spin_lock_irqsave(&ha->hardware_lock, flags); 5691 switch (rc) { 5692 case -ENODEV: 5693 ql_dbg(ql_dbg_tgt, vha, 0xe05f, 5694 "qla_target: Unable to send command to target\n"); 5695 break; 5696 case -EBADF: 5697 ql_dbg(ql_dbg_tgt, vha, 0xe05f, 5698 "qla_target: Unable to send command to target, sending TERM EXCHANGE for rsp\n"); 5699 qlt_send_term_exchange(ha->base_qpair, NULL, 5700 atio, 1, 0); 5701 break; 5702 case -EBUSY: 5703 ql_dbg(ql_dbg_tgt, vha, 0xe060, 5704 "qla_target(%d): Unable to send command to target, sending BUSY status\n", 5705 vha->vp_idx); 5706 qlt_send_busy(ha->base_qpair, atio, 5707 tc_sam_status); 5708 break; 5709 default: 5710 ql_dbg(ql_dbg_tgt, vha, 0xe060, 5711 "qla_target(%d): Unable to send command to target, sending BUSY status\n", 5712 vha->vp_idx); 5713 qlt_send_busy(ha->base_qpair, atio, 5714 qla_sam_status); 5715 break; 5716 } 5717 if (!ha_locked) 5718 spin_unlock_irqrestore(&ha->hardware_lock, 5719 flags); 5720 } 5721 break; 5722 5723 case IMMED_NOTIFY_TYPE: 5724 { 5725 if (unlikely(atio->u.isp2x.entry_status != 0)) { 5726 ql_dbg(ql_dbg_tgt, vha, 0xe05b, 5727 "qla_target(%d): Received ATIO packet %x " 5728 "with error status %x\n", vha->vp_idx, 5729 atio->u.raw.entry_type, 5730 atio->u.isp2x.entry_status); 5731 break; 5732 } 5733 ql_dbg(ql_dbg_tgt, vha, 0xe02e, "%s", "IMMED_NOTIFY ATIO"); 5734 5735 if (!ha_locked) 5736 spin_lock_irqsave(&ha->hardware_lock, flags); 5737 qlt_handle_imm_notify(vha, (struct imm_ntfy_from_isp *)atio); 5738 if (!ha_locked) 5739 spin_unlock_irqrestore(&ha->hardware_lock, flags); 5740 break; 5741 } 5742 5743 default: 5744 ql_dbg(ql_dbg_tgt, vha, 0xe05c, 5745 "qla_target(%d): Received unknown ATIO atio " 5746 "type %x\n", vha->vp_idx, atio->u.raw.entry_type); 5747 break; 5748 } 5749 5750 tgt->atio_irq_cmd_count--; 5751 } 5752 5753 /* 5754 * qpair lock is assume to be held 5755 * rc = 0 : send terminate & abts respond 5756 * rc != 0: do not send term & abts respond 5757 */ 5758 static int qlt_chk_unresolv_exchg(struct scsi_qla_host *vha, 5759 struct qla_qpair *qpair, struct abts_resp_from_24xx_fw *entry) 5760 { 5761 struct qla_hw_data *ha = vha->hw; 5762 int rc = 0; 5763 5764 /* 5765 * Detect unresolved exchange. If the same ABTS is unable 5766 * to terminate an existing command and the same ABTS loops 5767 * between FW & Driver, then force FW dump. Under 1 jiff, 5768 * we should see multiple loops. 5769 */ 5770 if (qpair->retry_term_exchg_addr == entry->exchange_addr_to_abort && 5771 qpair->retry_term_jiff == jiffies) { 5772 /* found existing exchange */ 5773 qpair->retry_term_cnt++; 5774 if (qpair->retry_term_cnt >= 5) { 5775 rc = -EIO; 5776 qpair->retry_term_cnt = 0; 5777 ql_log(ql_log_warn, vha, 0xffff, 5778 "Unable to send ABTS Respond. Dumping firmware.\n"); 5779 ql_dump_buffer(ql_dbg_tgt_mgt + ql_dbg_buffer, 5780 vha, 0xffff, (uint8_t *)entry, sizeof(*entry)); 5781 5782 if (qpair == ha->base_qpair) 5783 ha->isp_ops->fw_dump(vha); 5784 else 5785 qla2xxx_dump_fw(vha); 5786 5787 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 5788 qla2xxx_wake_dpc(vha); 5789 } 5790 } else if (qpair->retry_term_jiff != jiffies) { 5791 qpair->retry_term_exchg_addr = entry->exchange_addr_to_abort; 5792 qpair->retry_term_cnt = 0; 5793 qpair->retry_term_jiff = jiffies; 5794 } 5795 5796 return rc; 5797 } 5798 5799 5800 static void qlt_handle_abts_completion(struct scsi_qla_host *vha, 5801 struct rsp_que *rsp, response_t *pkt) 5802 { 5803 struct abts_resp_from_24xx_fw *entry = 5804 (struct abts_resp_from_24xx_fw *)pkt; 5805 u32 h = pkt->handle & ~QLA_TGT_HANDLE_MASK; 5806 struct qla_tgt_mgmt_cmd *mcmd; 5807 struct qla_hw_data *ha = vha->hw; 5808 5809 mcmd = qlt_ctio_to_cmd(vha, rsp, pkt->handle, pkt); 5810 if (mcmd == NULL && h != QLA_TGT_SKIP_HANDLE) { 5811 ql_dbg(ql_dbg_async, vha, 0xe064, 5812 "qla_target(%d): ABTS Comp without mcmd\n", 5813 vha->vp_idx); 5814 return; 5815 } 5816 5817 if (mcmd) 5818 vha = mcmd->vha; 5819 vha->vha_tgt.qla_tgt->abts_resp_expected--; 5820 5821 ql_dbg(ql_dbg_tgt, vha, 0xe038, 5822 "ABTS_RESP_24XX: compl_status %x\n", 5823 entry->compl_status); 5824 5825 if (le16_to_cpu(entry->compl_status) != ABTS_RESP_COMPL_SUCCESS) { 5826 if (le32_to_cpu(entry->error_subcode1) == 0x1E && 5827 le32_to_cpu(entry->error_subcode2) == 0) { 5828 if (qlt_chk_unresolv_exchg(vha, rsp->qpair, entry)) { 5829 ha->tgt.tgt_ops->free_mcmd(mcmd); 5830 return; 5831 } 5832 qlt_24xx_retry_term_exchange(vha, rsp->qpair, 5833 pkt, mcmd); 5834 } else { 5835 ql_dbg(ql_dbg_tgt, vha, 0xe063, 5836 "qla_target(%d): ABTS_RESP_24XX failed %x (subcode %x:%x)", 5837 vha->vp_idx, entry->compl_status, 5838 entry->error_subcode1, 5839 entry->error_subcode2); 5840 ha->tgt.tgt_ops->free_mcmd(mcmd); 5841 } 5842 } else if (mcmd) { 5843 ha->tgt.tgt_ops->free_mcmd(mcmd); 5844 } 5845 } 5846 5847 /* ha->hardware_lock supposed to be held on entry */ 5848 /* called via callback from qla2xxx */ 5849 static void qlt_response_pkt(struct scsi_qla_host *vha, 5850 struct rsp_que *rsp, response_t *pkt) 5851 { 5852 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; 5853 5854 if (unlikely(tgt == NULL)) { 5855 ql_dbg(ql_dbg_tgt, vha, 0xe05d, 5856 "qla_target(%d): Response pkt %x received, but no tgt (ha %p)\n", 5857 vha->vp_idx, pkt->entry_type, vha->hw); 5858 return; 5859 } 5860 5861 /* 5862 * In tgt_stop mode we also should allow all requests to pass. 5863 * Otherwise, some commands can stuck. 5864 */ 5865 5866 switch (pkt->entry_type) { 5867 case CTIO_CRC2: 5868 case CTIO_TYPE7: 5869 { 5870 struct ctio7_from_24xx *entry = (struct ctio7_from_24xx *)pkt; 5871 5872 qlt_do_ctio_completion(vha, rsp, entry->handle, 5873 le16_to_cpu(entry->status)|(pkt->entry_status << 16), 5874 entry); 5875 break; 5876 } 5877 5878 case ACCEPT_TGT_IO_TYPE: 5879 { 5880 struct atio_from_isp *atio = (struct atio_from_isp *)pkt; 5881 int rc; 5882 5883 if (atio->u.isp2x.status != 5884 cpu_to_le16(ATIO_CDB_VALID)) { 5885 ql_dbg(ql_dbg_tgt, vha, 0xe05e, 5886 "qla_target(%d): ATIO with error " 5887 "status %x received\n", vha->vp_idx, 5888 le16_to_cpu(atio->u.isp2x.status)); 5889 break; 5890 } 5891 5892 rc = qlt_chk_qfull_thresh_hold(vha, rsp->qpair, atio, 1); 5893 if (rc != 0) 5894 return; 5895 5896 rc = qlt_handle_cmd_for_atio(vha, atio); 5897 if (unlikely(rc != 0)) { 5898 switch (rc) { 5899 case -ENODEV: 5900 ql_dbg(ql_dbg_tgt, vha, 0xe05f, 5901 "qla_target: Unable to send command to target\n"); 5902 break; 5903 case -EBADF: 5904 ql_dbg(ql_dbg_tgt, vha, 0xe05f, 5905 "qla_target: Unable to send command to target, sending TERM EXCHANGE for rsp\n"); 5906 qlt_send_term_exchange(rsp->qpair, NULL, 5907 atio, 1, 0); 5908 break; 5909 case -EBUSY: 5910 ql_dbg(ql_dbg_tgt, vha, 0xe060, 5911 "qla_target(%d): Unable to send command to target, sending BUSY status\n", 5912 vha->vp_idx); 5913 qlt_send_busy(rsp->qpair, atio, 5914 tc_sam_status); 5915 break; 5916 default: 5917 ql_dbg(ql_dbg_tgt, vha, 0xe060, 5918 "qla_target(%d): Unable to send command to target, sending BUSY status\n", 5919 vha->vp_idx); 5920 qlt_send_busy(rsp->qpair, atio, 5921 qla_sam_status); 5922 break; 5923 } 5924 } 5925 } 5926 break; 5927 5928 case CONTINUE_TGT_IO_TYPE: 5929 { 5930 struct ctio_to_2xxx *entry = (struct ctio_to_2xxx *)pkt; 5931 5932 qlt_do_ctio_completion(vha, rsp, entry->handle, 5933 le16_to_cpu(entry->status)|(pkt->entry_status << 16), 5934 entry); 5935 break; 5936 } 5937 5938 case CTIO_A64_TYPE: 5939 { 5940 struct ctio_to_2xxx *entry = (struct ctio_to_2xxx *)pkt; 5941 5942 qlt_do_ctio_completion(vha, rsp, entry->handle, 5943 le16_to_cpu(entry->status)|(pkt->entry_status << 16), 5944 entry); 5945 break; 5946 } 5947 5948 case IMMED_NOTIFY_TYPE: 5949 ql_dbg(ql_dbg_tgt, vha, 0xe035, "%s", "IMMED_NOTIFY\n"); 5950 qlt_handle_imm_notify(vha, (struct imm_ntfy_from_isp *)pkt); 5951 break; 5952 5953 case NOTIFY_ACK_TYPE: 5954 if (tgt->notify_ack_expected > 0) { 5955 struct nack_to_isp *entry = (struct nack_to_isp *)pkt; 5956 5957 ql_dbg(ql_dbg_tgt, vha, 0xe036, 5958 "NOTIFY_ACK seq %08x status %x\n", 5959 le16_to_cpu(entry->u.isp2x.seq_id), 5960 le16_to_cpu(entry->u.isp2x.status)); 5961 tgt->notify_ack_expected--; 5962 if (entry->u.isp2x.status != 5963 cpu_to_le16(NOTIFY_ACK_SUCCESS)) { 5964 ql_dbg(ql_dbg_tgt, vha, 0xe061, 5965 "qla_target(%d): NOTIFY_ACK " 5966 "failed %x\n", vha->vp_idx, 5967 le16_to_cpu(entry->u.isp2x.status)); 5968 } 5969 } else { 5970 ql_dbg(ql_dbg_tgt, vha, 0xe062, 5971 "qla_target(%d): Unexpected NOTIFY_ACK received\n", 5972 vha->vp_idx); 5973 } 5974 break; 5975 5976 case ABTS_RECV_24XX: 5977 ql_dbg(ql_dbg_tgt, vha, 0xe037, 5978 "ABTS_RECV_24XX: instance %d\n", vha->vp_idx); 5979 qlt_24xx_handle_abts(vha, (struct abts_recv_from_24xx *)pkt); 5980 break; 5981 5982 case ABTS_RESP_24XX: 5983 if (tgt->abts_resp_expected > 0) { 5984 qlt_handle_abts_completion(vha, rsp, pkt); 5985 } else { 5986 ql_dbg(ql_dbg_tgt, vha, 0xe064, 5987 "qla_target(%d): Unexpected ABTS_RESP_24XX " 5988 "received\n", vha->vp_idx); 5989 } 5990 break; 5991 5992 default: 5993 ql_dbg(ql_dbg_tgt, vha, 0xe065, 5994 "qla_target(%d): Received unknown response pkt " 5995 "type %x\n", vha->vp_idx, pkt->entry_type); 5996 break; 5997 } 5998 5999 } 6000 6001 /* 6002 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire 6003 */ 6004 void qlt_async_event(uint16_t code, struct scsi_qla_host *vha, 6005 uint16_t *mailbox) 6006 { 6007 struct qla_hw_data *ha = vha->hw; 6008 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; 6009 int login_code; 6010 6011 if (!tgt || tgt->tgt_stop || tgt->tgt_stopped) 6012 return; 6013 6014 if (((code == MBA_POINT_TO_POINT) || (code == MBA_CHG_IN_CONNECTION)) && 6015 IS_QLA2100(ha)) 6016 return; 6017 /* 6018 * In tgt_stop mode we also should allow all requests to pass. 6019 * Otherwise, some commands can stuck. 6020 */ 6021 6022 6023 switch (code) { 6024 case MBA_RESET: /* Reset */ 6025 case MBA_SYSTEM_ERR: /* System Error */ 6026 case MBA_REQ_TRANSFER_ERR: /* Request Transfer Error */ 6027 case MBA_RSP_TRANSFER_ERR: /* Response Transfer Error */ 6028 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03a, 6029 "qla_target(%d): System error async event %#x " 6030 "occurred", vha->vp_idx, code); 6031 break; 6032 case MBA_WAKEUP_THRES: /* Request Queue Wake-up. */ 6033 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 6034 break; 6035 6036 case MBA_LOOP_UP: 6037 { 6038 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03b, 6039 "qla_target(%d): Async LOOP_UP occurred " 6040 "(m[0]=%x, m[1]=%x, m[2]=%x, m[3]=%x)", vha->vp_idx, 6041 mailbox[0], mailbox[1], mailbox[2], mailbox[3]); 6042 if (tgt->link_reinit_iocb_pending) { 6043 qlt_send_notify_ack(ha->base_qpair, 6044 &tgt->link_reinit_iocb, 6045 0, 0, 0, 0, 0, 0); 6046 tgt->link_reinit_iocb_pending = 0; 6047 } 6048 break; 6049 } 6050 6051 case MBA_LIP_OCCURRED: 6052 case MBA_LOOP_DOWN: 6053 case MBA_LIP_RESET: 6054 case MBA_RSCN_UPDATE: 6055 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03c, 6056 "qla_target(%d): Async event %#x occurred " 6057 "(m[0]=%x, m[1]=%x, m[2]=%x, m[3]=%x)", vha->vp_idx, code, 6058 mailbox[0], mailbox[1], mailbox[2], mailbox[3]); 6059 break; 6060 6061 case MBA_REJECTED_FCP_CMD: 6062 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf017, 6063 "qla_target(%d): Async event LS_REJECT occurred (m[0]=%x, m[1]=%x, m[2]=%x, m[3]=%x)", 6064 vha->vp_idx, 6065 mailbox[0], mailbox[1], mailbox[2], mailbox[3]); 6066 6067 if (mailbox[3] == 1) { 6068 /* exchange starvation. */ 6069 vha->hw->exch_starvation++; 6070 if (vha->hw->exch_starvation > 5) { 6071 ql_log(ql_log_warn, vha, 0xd03a, 6072 "Exchange starvation-. Resetting RISC\n"); 6073 6074 vha->hw->exch_starvation = 0; 6075 if (IS_P3P_TYPE(vha->hw)) 6076 set_bit(FCOE_CTX_RESET_NEEDED, 6077 &vha->dpc_flags); 6078 else 6079 set_bit(ISP_ABORT_NEEDED, 6080 &vha->dpc_flags); 6081 qla2xxx_wake_dpc(vha); 6082 } 6083 } 6084 break; 6085 6086 case MBA_PORT_UPDATE: 6087 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03d, 6088 "qla_target(%d): Port update async event %#x " 6089 "occurred: updating the ports database (m[0]=%x, m[1]=%x, " 6090 "m[2]=%x, m[3]=%x)", vha->vp_idx, code, 6091 mailbox[0], mailbox[1], mailbox[2], mailbox[3]); 6092 6093 login_code = mailbox[2]; 6094 if (login_code == 0x4) { 6095 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03e, 6096 "Async MB 2: Got PLOGI Complete\n"); 6097 vha->hw->exch_starvation = 0; 6098 } else if (login_code == 0x7) 6099 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03f, 6100 "Async MB 2: Port Logged Out\n"); 6101 break; 6102 default: 6103 break; 6104 } 6105 6106 } 6107 6108 static fc_port_t *qlt_get_port_database(struct scsi_qla_host *vha, 6109 uint16_t loop_id) 6110 { 6111 fc_port_t *fcport, *tfcp, *del; 6112 int rc; 6113 unsigned long flags; 6114 u8 newfcport = 0; 6115 6116 fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL); 6117 if (!fcport) { 6118 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06f, 6119 "qla_target(%d): Allocation of tmp FC port failed", 6120 vha->vp_idx); 6121 return NULL; 6122 } 6123 6124 fcport->loop_id = loop_id; 6125 6126 rc = qla24xx_gpdb_wait(vha, fcport, 0); 6127 if (rc != QLA_SUCCESS) { 6128 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf070, 6129 "qla_target(%d): Failed to retrieve fcport " 6130 "information -- get_port_database() returned %x " 6131 "(loop_id=0x%04x)", vha->vp_idx, rc, loop_id); 6132 kfree(fcport); 6133 return NULL; 6134 } 6135 6136 del = NULL; 6137 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); 6138 tfcp = qla2x00_find_fcport_by_wwpn(vha, fcport->port_name, 1); 6139 6140 if (tfcp) { 6141 tfcp->d_id = fcport->d_id; 6142 tfcp->port_type = fcport->port_type; 6143 tfcp->supported_classes = fcport->supported_classes; 6144 tfcp->flags |= fcport->flags; 6145 tfcp->scan_state = QLA_FCPORT_FOUND; 6146 6147 del = fcport; 6148 fcport = tfcp; 6149 } else { 6150 if (vha->hw->current_topology == ISP_CFG_F) 6151 fcport->flags |= FCF_FABRIC_DEVICE; 6152 6153 list_add_tail(&fcport->list, &vha->vp_fcports); 6154 if (!IS_SW_RESV_ADDR(fcport->d_id)) 6155 vha->fcport_count++; 6156 fcport->login_gen++; 6157 qla2x00_set_fcport_disc_state(fcport, DSC_LOGIN_COMPLETE); 6158 fcport->login_succ = 1; 6159 newfcport = 1; 6160 } 6161 6162 fcport->deleted = 0; 6163 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); 6164 6165 switch (vha->host->active_mode) { 6166 case MODE_INITIATOR: 6167 case MODE_DUAL: 6168 if (newfcport) { 6169 if (!IS_IIDMA_CAPABLE(vha->hw) || !vha->hw->flags.gpsc_supported) { 6170 qla24xx_sched_upd_fcport(fcport); 6171 } else { 6172 ql_dbg(ql_dbg_disc, vha, 0x20ff, 6173 "%s %d %8phC post gpsc fcp_cnt %d\n", 6174 __func__, __LINE__, fcport->port_name, vha->fcport_count); 6175 qla24xx_post_gpsc_work(vha, fcport); 6176 } 6177 } 6178 break; 6179 6180 case MODE_TARGET: 6181 default: 6182 break; 6183 } 6184 if (del) 6185 qla2x00_free_fcport(del); 6186 6187 return fcport; 6188 } 6189 6190 /* Must be called under tgt_mutex */ 6191 static struct fc_port *qlt_make_local_sess(struct scsi_qla_host *vha, 6192 be_id_t s_id) 6193 { 6194 struct fc_port *sess = NULL; 6195 fc_port_t *fcport = NULL; 6196 int rc, global_resets; 6197 uint16_t loop_id = 0; 6198 6199 if (s_id.domain == 0xFF && s_id.area == 0xFC) { 6200 /* 6201 * This is Domain Controller, so it should be 6202 * OK to drop SCSI commands from it. 6203 */ 6204 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf042, 6205 "Unable to find initiator with S_ID %x:%x:%x", 6206 s_id.domain, s_id.area, s_id.al_pa); 6207 return NULL; 6208 } 6209 6210 mutex_lock(&vha->vha_tgt.tgt_mutex); 6211 6212 retry: 6213 global_resets = 6214 atomic_read(&vha->vha_tgt.qla_tgt->tgt_global_resets_count); 6215 6216 rc = qla24xx_get_loop_id(vha, s_id, &loop_id); 6217 if (rc != 0) { 6218 mutex_unlock(&vha->vha_tgt.tgt_mutex); 6219 6220 ql_log(ql_log_info, vha, 0xf071, 6221 "qla_target(%d): Unable to find " 6222 "initiator with S_ID %x:%x:%x", 6223 vha->vp_idx, s_id.domain, s_id.area, s_id.al_pa); 6224 6225 if (rc == -ENOENT) { 6226 qlt_port_logo_t logo; 6227 6228 logo.id = be_to_port_id(s_id); 6229 logo.cmd_count = 1; 6230 qlt_send_first_logo(vha, &logo); 6231 } 6232 6233 return NULL; 6234 } 6235 6236 fcport = qlt_get_port_database(vha, loop_id); 6237 if (!fcport) { 6238 mutex_unlock(&vha->vha_tgt.tgt_mutex); 6239 return NULL; 6240 } 6241 6242 if (global_resets != 6243 atomic_read(&vha->vha_tgt.qla_tgt->tgt_global_resets_count)) { 6244 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf043, 6245 "qla_target(%d): global reset during session discovery " 6246 "(counter was %d, new %d), retrying", vha->vp_idx, 6247 global_resets, 6248 atomic_read(&vha->vha_tgt. 6249 qla_tgt->tgt_global_resets_count)); 6250 goto retry; 6251 } 6252 6253 sess = qlt_create_sess(vha, fcport, true); 6254 6255 mutex_unlock(&vha->vha_tgt.tgt_mutex); 6256 6257 return sess; 6258 } 6259 6260 static void qlt_abort_work(struct qla_tgt *tgt, 6261 struct qla_tgt_sess_work_param *prm) 6262 { 6263 struct scsi_qla_host *vha = tgt->vha; 6264 struct qla_hw_data *ha = vha->hw; 6265 struct fc_port *sess = NULL; 6266 unsigned long flags = 0, flags2 = 0; 6267 be_id_t s_id; 6268 int rc; 6269 6270 spin_lock_irqsave(&ha->tgt.sess_lock, flags2); 6271 6272 if (tgt->tgt_stop) 6273 goto out_term2; 6274 6275 s_id = le_id_to_be(prm->abts.fcp_hdr_le.s_id); 6276 6277 sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, s_id); 6278 if (!sess) { 6279 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags2); 6280 6281 sess = qlt_make_local_sess(vha, s_id); 6282 /* sess has got an extra creation ref */ 6283 6284 spin_lock_irqsave(&ha->tgt.sess_lock, flags2); 6285 if (!sess) 6286 goto out_term2; 6287 } else { 6288 if (sess->deleted) { 6289 sess = NULL; 6290 goto out_term2; 6291 } 6292 6293 if (!kref_get_unless_zero(&sess->sess_kref)) { 6294 ql_dbg(ql_dbg_tgt_tmr, vha, 0xf01c, 6295 "%s: kref_get fail %8phC \n", 6296 __func__, sess->port_name); 6297 sess = NULL; 6298 goto out_term2; 6299 } 6300 } 6301 6302 rc = __qlt_24xx_handle_abts(vha, &prm->abts, sess); 6303 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags2); 6304 6305 ha->tgt.tgt_ops->put_sess(sess); 6306 6307 if (rc != 0) 6308 goto out_term; 6309 return; 6310 6311 out_term2: 6312 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags2); 6313 6314 out_term: 6315 spin_lock_irqsave(&ha->hardware_lock, flags); 6316 qlt_24xx_send_abts_resp(ha->base_qpair, &prm->abts, 6317 FCP_TMF_REJECTED, false); 6318 spin_unlock_irqrestore(&ha->hardware_lock, flags); 6319 } 6320 6321 static void qlt_sess_work_fn(struct work_struct *work) 6322 { 6323 struct qla_tgt *tgt = container_of(work, struct qla_tgt, sess_work); 6324 struct scsi_qla_host *vha = tgt->vha; 6325 unsigned long flags; 6326 6327 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf000, "Sess work (tgt %p)", tgt); 6328 6329 spin_lock_irqsave(&tgt->sess_work_lock, flags); 6330 while (!list_empty(&tgt->sess_works_list)) { 6331 struct qla_tgt_sess_work_param *prm = list_entry( 6332 tgt->sess_works_list.next, typeof(*prm), 6333 sess_works_list_entry); 6334 6335 /* 6336 * This work can be scheduled on several CPUs at time, so we 6337 * must delete the entry to eliminate double processing 6338 */ 6339 list_del(&prm->sess_works_list_entry); 6340 6341 spin_unlock_irqrestore(&tgt->sess_work_lock, flags); 6342 6343 switch (prm->type) { 6344 case QLA_TGT_SESS_WORK_ABORT: 6345 qlt_abort_work(tgt, prm); 6346 break; 6347 default: 6348 BUG_ON(1); 6349 break; 6350 } 6351 6352 spin_lock_irqsave(&tgt->sess_work_lock, flags); 6353 6354 kfree(prm); 6355 } 6356 spin_unlock_irqrestore(&tgt->sess_work_lock, flags); 6357 } 6358 6359 /* Must be called under tgt_host_action_mutex */ 6360 int qlt_add_target(struct qla_hw_data *ha, struct scsi_qla_host *base_vha) 6361 { 6362 struct qla_tgt *tgt; 6363 int rc, i; 6364 struct qla_qpair_hint *h; 6365 6366 if (!QLA_TGT_MODE_ENABLED()) 6367 return 0; 6368 6369 if (!IS_TGT_MODE_CAPABLE(ha)) { 6370 ql_log(ql_log_warn, base_vha, 0xe070, 6371 "This adapter does not support target mode.\n"); 6372 return 0; 6373 } 6374 6375 ql_dbg(ql_dbg_tgt, base_vha, 0xe03b, 6376 "Registering target for host %ld(%p).\n", base_vha->host_no, ha); 6377 6378 BUG_ON(base_vha->vha_tgt.qla_tgt != NULL); 6379 6380 tgt = kzalloc(sizeof(struct qla_tgt), GFP_KERNEL); 6381 if (!tgt) { 6382 ql_dbg(ql_dbg_tgt, base_vha, 0xe066, 6383 "Unable to allocate struct qla_tgt\n"); 6384 return -ENOMEM; 6385 } 6386 6387 tgt->qphints = kcalloc(ha->max_qpairs + 1, 6388 sizeof(struct qla_qpair_hint), 6389 GFP_KERNEL); 6390 if (!tgt->qphints) { 6391 kfree(tgt); 6392 ql_log(ql_log_warn, base_vha, 0x0197, 6393 "Unable to allocate qpair hints.\n"); 6394 return -ENOMEM; 6395 } 6396 6397 qla2xxx_driver_template.supported_mode |= MODE_TARGET; 6398 6399 rc = btree_init64(&tgt->lun_qpair_map); 6400 if (rc) { 6401 kfree(tgt->qphints); 6402 kfree(tgt); 6403 ql_log(ql_log_info, base_vha, 0x0198, 6404 "Unable to initialize lun_qpair_map btree\n"); 6405 return -EIO; 6406 } 6407 h = &tgt->qphints[0]; 6408 h->qpair = ha->base_qpair; 6409 INIT_LIST_HEAD(&h->hint_elem); 6410 h->cpuid = ha->base_qpair->cpuid; 6411 list_add_tail(&h->hint_elem, &ha->base_qpair->hints_list); 6412 6413 for (i = 0; i < ha->max_qpairs; i++) { 6414 unsigned long flags; 6415 6416 struct qla_qpair *qpair = ha->queue_pair_map[i]; 6417 6418 h = &tgt->qphints[i + 1]; 6419 INIT_LIST_HEAD(&h->hint_elem); 6420 if (qpair) { 6421 h->qpair = qpair; 6422 spin_lock_irqsave(qpair->qp_lock_ptr, flags); 6423 list_add_tail(&h->hint_elem, &qpair->hints_list); 6424 spin_unlock_irqrestore(qpair->qp_lock_ptr, flags); 6425 h->cpuid = qpair->cpuid; 6426 } 6427 } 6428 6429 tgt->ha = ha; 6430 tgt->vha = base_vha; 6431 init_waitqueue_head(&tgt->waitQ); 6432 spin_lock_init(&tgt->sess_work_lock); 6433 INIT_WORK(&tgt->sess_work, qlt_sess_work_fn); 6434 INIT_LIST_HEAD(&tgt->sess_works_list); 6435 atomic_set(&tgt->tgt_global_resets_count, 0); 6436 6437 base_vha->vha_tgt.qla_tgt = tgt; 6438 6439 ql_dbg(ql_dbg_tgt, base_vha, 0xe067, 6440 "qla_target(%d): using 64 Bit PCI addressing", 6441 base_vha->vp_idx); 6442 /* 3 is reserved */ 6443 tgt->sg_tablesize = QLA_TGT_MAX_SG_24XX(base_vha->req->length - 3); 6444 6445 mutex_lock(&qla_tgt_mutex); 6446 list_add_tail(&tgt->tgt_list_entry, &qla_tgt_glist); 6447 mutex_unlock(&qla_tgt_mutex); 6448 6449 if (ha->tgt.tgt_ops && ha->tgt.tgt_ops->add_target) 6450 ha->tgt.tgt_ops->add_target(base_vha); 6451 6452 return 0; 6453 } 6454 6455 /* Must be called under tgt_host_action_mutex */ 6456 int qlt_remove_target(struct qla_hw_data *ha, struct scsi_qla_host *vha) 6457 { 6458 if (!vha->vha_tgt.qla_tgt) 6459 return 0; 6460 6461 if (vha->fc_vport) { 6462 qlt_release(vha->vha_tgt.qla_tgt); 6463 return 0; 6464 } 6465 6466 /* free left over qfull cmds */ 6467 qlt_init_term_exchange(vha); 6468 6469 ql_dbg(ql_dbg_tgt, vha, 0xe03c, "Unregistering target for host %ld(%p)", 6470 vha->host_no, ha); 6471 qlt_release(vha->vha_tgt.qla_tgt); 6472 6473 return 0; 6474 } 6475 6476 void qla_remove_hostmap(struct qla_hw_data *ha) 6477 { 6478 struct scsi_qla_host *node; 6479 u32 key = 0; 6480 6481 btree_for_each_safe32(&ha->host_map, key, node) 6482 btree_remove32(&ha->host_map, key); 6483 6484 btree_destroy32(&ha->host_map); 6485 } 6486 6487 static void qlt_lport_dump(struct scsi_qla_host *vha, u64 wwpn, 6488 unsigned char *b) 6489 { 6490 pr_debug("qla2xxx HW vha->node_name: %8phC\n", vha->node_name); 6491 pr_debug("qla2xxx HW vha->port_name: %8phC\n", vha->port_name); 6492 put_unaligned_be64(wwpn, b); 6493 pr_debug("qla2xxx passed configfs WWPN: %8phC\n", b); 6494 } 6495 6496 /** 6497 * qlt_lport_register - register lport with external module 6498 * 6499 * @target_lport_ptr: pointer for tcm_qla2xxx specific lport data 6500 * @phys_wwpn: physical port WWPN 6501 * @npiv_wwpn: NPIV WWPN 6502 * @npiv_wwnn: NPIV WWNN 6503 * @callback: lport initialization callback for tcm_qla2xxx code 6504 */ 6505 int qlt_lport_register(void *target_lport_ptr, u64 phys_wwpn, 6506 u64 npiv_wwpn, u64 npiv_wwnn, 6507 int (*callback)(struct scsi_qla_host *, void *, u64, u64)) 6508 { 6509 struct qla_tgt *tgt; 6510 struct scsi_qla_host *vha; 6511 struct qla_hw_data *ha; 6512 struct Scsi_Host *host; 6513 unsigned long flags; 6514 int rc; 6515 u8 b[WWN_SIZE]; 6516 6517 mutex_lock(&qla_tgt_mutex); 6518 list_for_each_entry(tgt, &qla_tgt_glist, tgt_list_entry) { 6519 vha = tgt->vha; 6520 ha = vha->hw; 6521 6522 host = vha->host; 6523 if (!host) 6524 continue; 6525 6526 if (!(host->hostt->supported_mode & MODE_TARGET)) 6527 continue; 6528 6529 if (vha->qlini_mode == QLA2XXX_INI_MODE_ENABLED) 6530 continue; 6531 6532 spin_lock_irqsave(&ha->hardware_lock, flags); 6533 if ((!npiv_wwpn || !npiv_wwnn) && host->active_mode & MODE_TARGET) { 6534 pr_debug("MODE_TARGET already active on qla2xxx(%d)\n", 6535 host->host_no); 6536 spin_unlock_irqrestore(&ha->hardware_lock, flags); 6537 continue; 6538 } 6539 if (tgt->tgt_stop) { 6540 pr_debug("MODE_TARGET in shutdown on qla2xxx(%d)\n", 6541 host->host_no); 6542 spin_unlock_irqrestore(&ha->hardware_lock, flags); 6543 continue; 6544 } 6545 spin_unlock_irqrestore(&ha->hardware_lock, flags); 6546 6547 if (!scsi_host_get(host)) { 6548 ql_dbg(ql_dbg_tgt, vha, 0xe068, 6549 "Unable to scsi_host_get() for" 6550 " qla2xxx scsi_host\n"); 6551 continue; 6552 } 6553 qlt_lport_dump(vha, phys_wwpn, b); 6554 6555 if (memcmp(vha->port_name, b, WWN_SIZE)) { 6556 scsi_host_put(host); 6557 continue; 6558 } 6559 rc = (*callback)(vha, target_lport_ptr, npiv_wwpn, npiv_wwnn); 6560 if (rc != 0) 6561 scsi_host_put(host); 6562 6563 mutex_unlock(&qla_tgt_mutex); 6564 return rc; 6565 } 6566 mutex_unlock(&qla_tgt_mutex); 6567 6568 return -ENODEV; 6569 } 6570 EXPORT_SYMBOL(qlt_lport_register); 6571 6572 /** 6573 * qlt_lport_deregister - Degister lport 6574 * 6575 * @vha: Registered scsi_qla_host pointer 6576 */ 6577 void qlt_lport_deregister(struct scsi_qla_host *vha) 6578 { 6579 struct qla_hw_data *ha = vha->hw; 6580 struct Scsi_Host *sh = vha->host; 6581 /* 6582 * Clear the target_lport_ptr qla_target_template pointer in qla_hw_data 6583 */ 6584 vha->vha_tgt.target_lport_ptr = NULL; 6585 ha->tgt.tgt_ops = NULL; 6586 /* 6587 * Release the Scsi_Host reference for the underlying qla2xxx host 6588 */ 6589 scsi_host_put(sh); 6590 } 6591 EXPORT_SYMBOL(qlt_lport_deregister); 6592 6593 /* Must be called under HW lock */ 6594 void qlt_set_mode(struct scsi_qla_host *vha) 6595 { 6596 switch (vha->qlini_mode) { 6597 case QLA2XXX_INI_MODE_DISABLED: 6598 case QLA2XXX_INI_MODE_EXCLUSIVE: 6599 vha->host->active_mode = MODE_TARGET; 6600 break; 6601 case QLA2XXX_INI_MODE_ENABLED: 6602 vha->host->active_mode = MODE_INITIATOR; 6603 break; 6604 case QLA2XXX_INI_MODE_DUAL: 6605 vha->host->active_mode = MODE_DUAL; 6606 break; 6607 default: 6608 break; 6609 } 6610 } 6611 6612 /* Must be called under HW lock */ 6613 static void qlt_clear_mode(struct scsi_qla_host *vha) 6614 { 6615 switch (vha->qlini_mode) { 6616 case QLA2XXX_INI_MODE_DISABLED: 6617 vha->host->active_mode = MODE_UNKNOWN; 6618 break; 6619 case QLA2XXX_INI_MODE_EXCLUSIVE: 6620 vha->host->active_mode = MODE_INITIATOR; 6621 break; 6622 case QLA2XXX_INI_MODE_ENABLED: 6623 case QLA2XXX_INI_MODE_DUAL: 6624 vha->host->active_mode = MODE_INITIATOR; 6625 break; 6626 default: 6627 break; 6628 } 6629 } 6630 6631 /* 6632 * qla_tgt_enable_vha - NO LOCK HELD 6633 * 6634 * host_reset, bring up w/ Target Mode Enabled 6635 */ 6636 void 6637 qlt_enable_vha(struct scsi_qla_host *vha) 6638 { 6639 struct qla_hw_data *ha = vha->hw; 6640 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; 6641 unsigned long flags; 6642 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev); 6643 6644 if (!tgt) { 6645 ql_dbg(ql_dbg_tgt, vha, 0xe069, 6646 "Unable to locate qla_tgt pointer from" 6647 " struct qla_hw_data\n"); 6648 dump_stack(); 6649 return; 6650 } 6651 if (vha->qlini_mode == QLA2XXX_INI_MODE_ENABLED) 6652 return; 6653 6654 if (ha->tgt.num_act_qpairs > ha->max_qpairs) 6655 ha->tgt.num_act_qpairs = ha->max_qpairs; 6656 spin_lock_irqsave(&ha->hardware_lock, flags); 6657 tgt->tgt_stopped = 0; 6658 qlt_set_mode(vha); 6659 spin_unlock_irqrestore(&ha->hardware_lock, flags); 6660 6661 mutex_lock(&ha->optrom_mutex); 6662 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf021, 6663 "%s.\n", __func__); 6664 if (vha->vp_idx) { 6665 qla24xx_disable_vp(vha); 6666 qla24xx_enable_vp(vha); 6667 } else { 6668 set_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags); 6669 qla2xxx_wake_dpc(base_vha); 6670 WARN_ON_ONCE(qla2x00_wait_for_hba_online(base_vha) != 6671 QLA_SUCCESS); 6672 } 6673 mutex_unlock(&ha->optrom_mutex); 6674 } 6675 EXPORT_SYMBOL(qlt_enable_vha); 6676 6677 /* 6678 * qla_tgt_disable_vha - NO LOCK HELD 6679 * 6680 * Disable Target Mode and reset the adapter 6681 */ 6682 static void qlt_disable_vha(struct scsi_qla_host *vha) 6683 { 6684 struct qla_hw_data *ha = vha->hw; 6685 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; 6686 unsigned long flags; 6687 6688 if (!tgt) { 6689 ql_dbg(ql_dbg_tgt, vha, 0xe06a, 6690 "Unable to locate qla_tgt pointer from" 6691 " struct qla_hw_data\n"); 6692 dump_stack(); 6693 return; 6694 } 6695 6696 spin_lock_irqsave(&ha->hardware_lock, flags); 6697 qlt_clear_mode(vha); 6698 spin_unlock_irqrestore(&ha->hardware_lock, flags); 6699 6700 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 6701 qla2xxx_wake_dpc(vha); 6702 6703 /* 6704 * We are expecting the offline state. 6705 * QLA_FUNCTION_FAILED means that adapter is offline. 6706 */ 6707 if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) 6708 ql_dbg(ql_dbg_tgt, vha, 0xe081, 6709 "adapter is offline\n"); 6710 } 6711 6712 /* 6713 * Called from qla_init.c:qla24xx_vport_create() contex to setup 6714 * the target mode specific struct scsi_qla_host and struct qla_hw_data 6715 * members. 6716 */ 6717 void 6718 qlt_vport_create(struct scsi_qla_host *vha, struct qla_hw_data *ha) 6719 { 6720 vha->vha_tgt.qla_tgt = NULL; 6721 6722 mutex_init(&vha->vha_tgt.tgt_mutex); 6723 mutex_init(&vha->vha_tgt.tgt_host_action_mutex); 6724 6725 INIT_LIST_HEAD(&vha->unknown_atio_list); 6726 INIT_DELAYED_WORK(&vha->unknown_atio_work, qlt_unknown_atio_work_fn); 6727 6728 qlt_clear_mode(vha); 6729 6730 /* 6731 * NOTE: Currently the value is kept the same for <24xx and 6732 * >=24xx ISPs. If it is necessary to change it, 6733 * the check should be added for specific ISPs, 6734 * assigning the value appropriately. 6735 */ 6736 ha->tgt.atio_q_length = ATIO_ENTRY_CNT_24XX; 6737 6738 qlt_add_target(ha, vha); 6739 } 6740 6741 u8 6742 qlt_rff_id(struct scsi_qla_host *vha) 6743 { 6744 u8 fc4_feature = 0; 6745 /* 6746 * FC-4 Feature bit 0 indicates target functionality to the name server. 6747 */ 6748 if (qla_tgt_mode_enabled(vha)) { 6749 fc4_feature = BIT_0; 6750 } else if (qla_ini_mode_enabled(vha)) { 6751 fc4_feature = BIT_1; 6752 } else if (qla_dual_mode_enabled(vha)) 6753 fc4_feature = BIT_0 | BIT_1; 6754 6755 return fc4_feature; 6756 } 6757 6758 /* 6759 * qlt_init_atio_q_entries() - Initializes ATIO queue entries. 6760 * @ha: HA context 6761 * 6762 * Beginning of ATIO ring has initialization control block already built 6763 * by nvram config routine. 6764 * 6765 * Returns 0 on success. 6766 */ 6767 void 6768 qlt_init_atio_q_entries(struct scsi_qla_host *vha) 6769 { 6770 struct qla_hw_data *ha = vha->hw; 6771 uint16_t cnt; 6772 struct atio_from_isp *pkt = (struct atio_from_isp *)ha->tgt.atio_ring; 6773 6774 if (qla_ini_mode_enabled(vha)) 6775 return; 6776 6777 for (cnt = 0; cnt < ha->tgt.atio_q_length; cnt++) { 6778 pkt->u.raw.signature = cpu_to_le32(ATIO_PROCESSED); 6779 pkt++; 6780 } 6781 6782 } 6783 6784 /* 6785 * qlt_24xx_process_atio_queue() - Process ATIO queue entries. 6786 * @ha: SCSI driver HA context 6787 */ 6788 void 6789 qlt_24xx_process_atio_queue(struct scsi_qla_host *vha, uint8_t ha_locked) 6790 { 6791 struct qla_hw_data *ha = vha->hw; 6792 struct atio_from_isp *pkt; 6793 int cnt, i; 6794 6795 if (!ha->flags.fw_started) 6796 return; 6797 6798 while ((ha->tgt.atio_ring_ptr->signature != ATIO_PROCESSED) || 6799 fcpcmd_is_corrupted(ha->tgt.atio_ring_ptr)) { 6800 pkt = (struct atio_from_isp *)ha->tgt.atio_ring_ptr; 6801 cnt = pkt->u.raw.entry_count; 6802 6803 if (unlikely(fcpcmd_is_corrupted(ha->tgt.atio_ring_ptr))) { 6804 /* 6805 * This packet is corrupted. The header + payload 6806 * can not be trusted. There is no point in passing 6807 * it further up. 6808 */ 6809 ql_log(ql_log_warn, vha, 0xd03c, 6810 "corrupted fcp frame SID[%3phN] OXID[%04x] EXCG[%x] %64phN\n", 6811 &pkt->u.isp24.fcp_hdr.s_id, 6812 be16_to_cpu(pkt->u.isp24.fcp_hdr.ox_id), 6813 pkt->u.isp24.exchange_addr, pkt); 6814 6815 adjust_corrupted_atio(pkt); 6816 qlt_send_term_exchange(ha->base_qpair, NULL, pkt, 6817 ha_locked, 0); 6818 } else { 6819 qlt_24xx_atio_pkt_all_vps(vha, 6820 (struct atio_from_isp *)pkt, ha_locked); 6821 } 6822 6823 for (i = 0; i < cnt; i++) { 6824 ha->tgt.atio_ring_index++; 6825 if (ha->tgt.atio_ring_index == ha->tgt.atio_q_length) { 6826 ha->tgt.atio_ring_index = 0; 6827 ha->tgt.atio_ring_ptr = ha->tgt.atio_ring; 6828 } else 6829 ha->tgt.atio_ring_ptr++; 6830 6831 pkt->u.raw.signature = cpu_to_le32(ATIO_PROCESSED); 6832 pkt = (struct atio_from_isp *)ha->tgt.atio_ring_ptr; 6833 } 6834 wmb(); 6835 } 6836 6837 /* Adjust ring index */ 6838 wrt_reg_dword(ISP_ATIO_Q_OUT(vha), ha->tgt.atio_ring_index); 6839 } 6840 6841 void 6842 qlt_24xx_config_rings(struct scsi_qla_host *vha) 6843 { 6844 struct qla_hw_data *ha = vha->hw; 6845 struct qla_msix_entry *msix = &ha->msix_entries[2]; 6846 struct init_cb_24xx *icb = (struct init_cb_24xx *)ha->init_cb; 6847 6848 if (!QLA_TGT_MODE_ENABLED()) 6849 return; 6850 6851 wrt_reg_dword(ISP_ATIO_Q_IN(vha), 0); 6852 wrt_reg_dword(ISP_ATIO_Q_OUT(vha), 0); 6853 rd_reg_dword(ISP_ATIO_Q_OUT(vha)); 6854 6855 if (ha->flags.msix_enabled) { 6856 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) { 6857 icb->msix_atio = cpu_to_le16(msix->entry); 6858 icb->firmware_options_2 &= cpu_to_le32(~BIT_26); 6859 ql_dbg(ql_dbg_init, vha, 0xf072, 6860 "Registering ICB vector 0x%x for atio que.\n", 6861 msix->entry); 6862 } 6863 } else { 6864 /* INTx|MSI */ 6865 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) { 6866 icb->msix_atio = 0; 6867 icb->firmware_options_2 |= cpu_to_le32(BIT_26); 6868 ql_dbg(ql_dbg_init, vha, 0xf072, 6869 "%s: Use INTx for ATIOQ.\n", __func__); 6870 } 6871 } 6872 } 6873 6874 void 6875 qlt_24xx_config_nvram_stage1(struct scsi_qla_host *vha, struct nvram_24xx *nv) 6876 { 6877 struct qla_hw_data *ha = vha->hw; 6878 u32 tmp; 6879 6880 if (!QLA_TGT_MODE_ENABLED()) 6881 return; 6882 6883 if (qla_tgt_mode_enabled(vha) || qla_dual_mode_enabled(vha)) { 6884 if (!ha->tgt.saved_set) { 6885 /* We save only once */ 6886 ha->tgt.saved_exchange_count = nv->exchange_count; 6887 ha->tgt.saved_firmware_options_1 = 6888 nv->firmware_options_1; 6889 ha->tgt.saved_firmware_options_2 = 6890 nv->firmware_options_2; 6891 ha->tgt.saved_firmware_options_3 = 6892 nv->firmware_options_3; 6893 ha->tgt.saved_set = 1; 6894 } 6895 6896 if (qla_tgt_mode_enabled(vha)) 6897 nv->exchange_count = cpu_to_le16(0xFFFF); 6898 else /* dual */ 6899 nv->exchange_count = cpu_to_le16(vha->ql2xexchoffld); 6900 6901 /* Enable target mode */ 6902 nv->firmware_options_1 |= cpu_to_le32(BIT_4); 6903 6904 /* Disable ini mode, if requested */ 6905 if (qla_tgt_mode_enabled(vha)) 6906 nv->firmware_options_1 |= cpu_to_le32(BIT_5); 6907 6908 /* Disable Full Login after LIP */ 6909 nv->firmware_options_1 &= cpu_to_le32(~BIT_13); 6910 /* Enable initial LIP */ 6911 nv->firmware_options_1 &= cpu_to_le32(~BIT_9); 6912 if (ql2xtgt_tape_enable) 6913 /* Enable FC Tape support */ 6914 nv->firmware_options_2 |= cpu_to_le32(BIT_12); 6915 else 6916 /* Disable FC Tape support */ 6917 nv->firmware_options_2 &= cpu_to_le32(~BIT_12); 6918 6919 /* Disable Full Login after LIP */ 6920 nv->host_p &= cpu_to_le32(~BIT_10); 6921 6922 /* 6923 * clear BIT 15 explicitly as we have seen at least 6924 * a couple of instances where this was set and this 6925 * was causing the firmware to not be initialized. 6926 */ 6927 nv->firmware_options_1 &= cpu_to_le32(~BIT_15); 6928 /* Enable target PRLI control */ 6929 nv->firmware_options_2 |= cpu_to_le32(BIT_14); 6930 6931 if (IS_QLA25XX(ha)) { 6932 /* Change Loop-prefer to Pt-Pt */ 6933 tmp = ~(BIT_4|BIT_5|BIT_6); 6934 nv->firmware_options_2 &= cpu_to_le32(tmp); 6935 tmp = P2P << 4; 6936 nv->firmware_options_2 |= cpu_to_le32(tmp); 6937 } 6938 } else { 6939 if (ha->tgt.saved_set) { 6940 nv->exchange_count = ha->tgt.saved_exchange_count; 6941 nv->firmware_options_1 = 6942 ha->tgt.saved_firmware_options_1; 6943 nv->firmware_options_2 = 6944 ha->tgt.saved_firmware_options_2; 6945 nv->firmware_options_3 = 6946 ha->tgt.saved_firmware_options_3; 6947 } 6948 return; 6949 } 6950 6951 if (ha->base_qpair->enable_class_2) { 6952 if (vha->flags.init_done) 6953 fc_host_supported_classes(vha->host) = 6954 FC_COS_CLASS2 | FC_COS_CLASS3; 6955 6956 nv->firmware_options_2 |= cpu_to_le32(BIT_8); 6957 } else { 6958 if (vha->flags.init_done) 6959 fc_host_supported_classes(vha->host) = FC_COS_CLASS3; 6960 6961 nv->firmware_options_2 &= ~cpu_to_le32(BIT_8); 6962 } 6963 } 6964 6965 void 6966 qlt_24xx_config_nvram_stage2(struct scsi_qla_host *vha, 6967 struct init_cb_24xx *icb) 6968 { 6969 struct qla_hw_data *ha = vha->hw; 6970 6971 if (!QLA_TGT_MODE_ENABLED()) 6972 return; 6973 6974 if (ha->tgt.node_name_set) { 6975 memcpy(icb->node_name, ha->tgt.tgt_node_name, WWN_SIZE); 6976 icb->firmware_options_1 |= cpu_to_le32(BIT_14); 6977 } 6978 } 6979 6980 void 6981 qlt_81xx_config_nvram_stage1(struct scsi_qla_host *vha, struct nvram_81xx *nv) 6982 { 6983 struct qla_hw_data *ha = vha->hw; 6984 u32 tmp; 6985 6986 if (!QLA_TGT_MODE_ENABLED()) 6987 return; 6988 6989 if (qla_tgt_mode_enabled(vha) || qla_dual_mode_enabled(vha)) { 6990 if (!ha->tgt.saved_set) { 6991 /* We save only once */ 6992 ha->tgt.saved_exchange_count = nv->exchange_count; 6993 ha->tgt.saved_firmware_options_1 = 6994 nv->firmware_options_1; 6995 ha->tgt.saved_firmware_options_2 = 6996 nv->firmware_options_2; 6997 ha->tgt.saved_firmware_options_3 = 6998 nv->firmware_options_3; 6999 ha->tgt.saved_set = 1; 7000 } 7001 7002 if (qla_tgt_mode_enabled(vha)) 7003 nv->exchange_count = cpu_to_le16(0xFFFF); 7004 else /* dual */ 7005 nv->exchange_count = cpu_to_le16(vha->ql2xexchoffld); 7006 7007 /* Enable target mode */ 7008 nv->firmware_options_1 |= cpu_to_le32(BIT_4); 7009 7010 /* Disable ini mode, if requested */ 7011 if (qla_tgt_mode_enabled(vha)) 7012 nv->firmware_options_1 |= cpu_to_le32(BIT_5); 7013 /* Disable Full Login after LIP */ 7014 nv->firmware_options_1 &= cpu_to_le32(~BIT_13); 7015 /* Enable initial LIP */ 7016 nv->firmware_options_1 &= cpu_to_le32(~BIT_9); 7017 /* 7018 * clear BIT 15 explicitly as we have seen at 7019 * least a couple of instances where this was set 7020 * and this was causing the firmware to not be 7021 * initialized. 7022 */ 7023 nv->firmware_options_1 &= cpu_to_le32(~BIT_15); 7024 if (ql2xtgt_tape_enable) 7025 /* Enable FC tape support */ 7026 nv->firmware_options_2 |= cpu_to_le32(BIT_12); 7027 else 7028 /* Disable FC tape support */ 7029 nv->firmware_options_2 &= cpu_to_le32(~BIT_12); 7030 7031 /* Disable Full Login after LIP */ 7032 nv->host_p &= cpu_to_le32(~BIT_10); 7033 /* Enable target PRLI control */ 7034 nv->firmware_options_2 |= cpu_to_le32(BIT_14); 7035 7036 /* Change Loop-prefer to Pt-Pt */ 7037 tmp = ~(BIT_4|BIT_5|BIT_6); 7038 nv->firmware_options_2 &= cpu_to_le32(tmp); 7039 tmp = P2P << 4; 7040 nv->firmware_options_2 |= cpu_to_le32(tmp); 7041 } else { 7042 if (ha->tgt.saved_set) { 7043 nv->exchange_count = ha->tgt.saved_exchange_count; 7044 nv->firmware_options_1 = 7045 ha->tgt.saved_firmware_options_1; 7046 nv->firmware_options_2 = 7047 ha->tgt.saved_firmware_options_2; 7048 nv->firmware_options_3 = 7049 ha->tgt.saved_firmware_options_3; 7050 } 7051 return; 7052 } 7053 7054 if (ha->base_qpair->enable_class_2) { 7055 if (vha->flags.init_done) 7056 fc_host_supported_classes(vha->host) = 7057 FC_COS_CLASS2 | FC_COS_CLASS3; 7058 7059 nv->firmware_options_2 |= cpu_to_le32(BIT_8); 7060 } else { 7061 if (vha->flags.init_done) 7062 fc_host_supported_classes(vha->host) = FC_COS_CLASS3; 7063 7064 nv->firmware_options_2 &= ~cpu_to_le32(BIT_8); 7065 } 7066 } 7067 7068 void 7069 qlt_81xx_config_nvram_stage2(struct scsi_qla_host *vha, 7070 struct init_cb_81xx *icb) 7071 { 7072 struct qla_hw_data *ha = vha->hw; 7073 7074 if (!QLA_TGT_MODE_ENABLED()) 7075 return; 7076 7077 if (ha->tgt.node_name_set) { 7078 memcpy(icb->node_name, ha->tgt.tgt_node_name, WWN_SIZE); 7079 icb->firmware_options_1 |= cpu_to_le32(BIT_14); 7080 } 7081 } 7082 7083 void 7084 qlt_83xx_iospace_config(struct qla_hw_data *ha) 7085 { 7086 if (!QLA_TGT_MODE_ENABLED()) 7087 return; 7088 7089 ha->msix_count += 1; /* For ATIO Q */ 7090 } 7091 7092 7093 void 7094 qlt_modify_vp_config(struct scsi_qla_host *vha, 7095 struct vp_config_entry_24xx *vpmod) 7096 { 7097 /* enable target mode. Bit5 = 1 => disable */ 7098 if (qla_tgt_mode_enabled(vha) || qla_dual_mode_enabled(vha)) 7099 vpmod->options_idx1 &= ~BIT_5; 7100 7101 /* Disable ini mode, if requested. bit4 = 1 => disable */ 7102 if (qla_tgt_mode_enabled(vha)) 7103 vpmod->options_idx1 &= ~BIT_4; 7104 } 7105 7106 void 7107 qlt_probe_one_stage1(struct scsi_qla_host *base_vha, struct qla_hw_data *ha) 7108 { 7109 mutex_init(&base_vha->vha_tgt.tgt_mutex); 7110 if (!QLA_TGT_MODE_ENABLED()) 7111 return; 7112 7113 if (ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) { 7114 ISP_ATIO_Q_IN(base_vha) = &ha->mqiobase->isp25mq.atio_q_in; 7115 ISP_ATIO_Q_OUT(base_vha) = &ha->mqiobase->isp25mq.atio_q_out; 7116 } else { 7117 ISP_ATIO_Q_IN(base_vha) = &ha->iobase->isp24.atio_q_in; 7118 ISP_ATIO_Q_OUT(base_vha) = &ha->iobase->isp24.atio_q_out; 7119 } 7120 7121 mutex_init(&base_vha->vha_tgt.tgt_host_action_mutex); 7122 7123 INIT_LIST_HEAD(&base_vha->unknown_atio_list); 7124 INIT_DELAYED_WORK(&base_vha->unknown_atio_work, 7125 qlt_unknown_atio_work_fn); 7126 7127 qlt_clear_mode(base_vha); 7128 7129 qla_update_vp_map(base_vha, SET_VP_IDX); 7130 } 7131 7132 irqreturn_t 7133 qla83xx_msix_atio_q(int irq, void *dev_id) 7134 { 7135 struct rsp_que *rsp; 7136 scsi_qla_host_t *vha; 7137 struct qla_hw_data *ha; 7138 unsigned long flags; 7139 7140 rsp = (struct rsp_que *) dev_id; 7141 ha = rsp->hw; 7142 vha = pci_get_drvdata(ha->pdev); 7143 7144 spin_lock_irqsave(&ha->tgt.atio_lock, flags); 7145 7146 qlt_24xx_process_atio_queue(vha, 0); 7147 7148 spin_unlock_irqrestore(&ha->tgt.atio_lock, flags); 7149 7150 return IRQ_HANDLED; 7151 } 7152 7153 static void 7154 qlt_handle_abts_recv_work(struct work_struct *work) 7155 { 7156 struct qla_tgt_sess_op *op = container_of(work, 7157 struct qla_tgt_sess_op, work); 7158 scsi_qla_host_t *vha = op->vha; 7159 struct qla_hw_data *ha = vha->hw; 7160 unsigned long flags; 7161 7162 if (qla2x00_reset_active(vha) || 7163 (op->chip_reset != ha->base_qpair->chip_reset)) 7164 return; 7165 7166 spin_lock_irqsave(&ha->tgt.atio_lock, flags); 7167 qlt_24xx_process_atio_queue(vha, 0); 7168 spin_unlock_irqrestore(&ha->tgt.atio_lock, flags); 7169 7170 spin_lock_irqsave(&ha->hardware_lock, flags); 7171 qlt_response_pkt_all_vps(vha, op->rsp, (response_t *)&op->atio); 7172 spin_unlock_irqrestore(&ha->hardware_lock, flags); 7173 7174 kfree(op); 7175 } 7176 7177 void 7178 qlt_handle_abts_recv(struct scsi_qla_host *vha, struct rsp_que *rsp, 7179 response_t *pkt) 7180 { 7181 struct qla_tgt_sess_op *op; 7182 7183 op = kzalloc(sizeof(*op), GFP_ATOMIC); 7184 7185 if (!op) { 7186 /* do not reach for ATIO queue here. This is best effort err 7187 * recovery at this point. 7188 */ 7189 qlt_response_pkt_all_vps(vha, rsp, pkt); 7190 return; 7191 } 7192 7193 memcpy(&op->atio, pkt, sizeof(*pkt)); 7194 op->vha = vha; 7195 op->chip_reset = vha->hw->base_qpair->chip_reset; 7196 op->rsp = rsp; 7197 INIT_WORK(&op->work, qlt_handle_abts_recv_work); 7198 queue_work(qla_tgt_wq, &op->work); 7199 return; 7200 } 7201 7202 int 7203 qlt_mem_alloc(struct qla_hw_data *ha) 7204 { 7205 if (!QLA_TGT_MODE_ENABLED()) 7206 return 0; 7207 7208 ha->tgt.atio_ring = dma_alloc_coherent(&ha->pdev->dev, 7209 (ha->tgt.atio_q_length + 1) * sizeof(struct atio_from_isp), 7210 &ha->tgt.atio_dma, GFP_KERNEL); 7211 if (!ha->tgt.atio_ring) { 7212 return -ENOMEM; 7213 } 7214 return 0; 7215 } 7216 7217 void 7218 qlt_mem_free(struct qla_hw_data *ha) 7219 { 7220 if (!QLA_TGT_MODE_ENABLED()) 7221 return; 7222 7223 if (ha->tgt.atio_ring) { 7224 dma_free_coherent(&ha->pdev->dev, (ha->tgt.atio_q_length + 1) * 7225 sizeof(struct atio_from_isp), ha->tgt.atio_ring, 7226 ha->tgt.atio_dma); 7227 } 7228 ha->tgt.atio_ring = NULL; 7229 ha->tgt.atio_dma = 0; 7230 } 7231 7232 static int __init qlt_parse_ini_mode(void) 7233 { 7234 if (strcasecmp(qlini_mode, QLA2XXX_INI_MODE_STR_EXCLUSIVE) == 0) 7235 ql2x_ini_mode = QLA2XXX_INI_MODE_EXCLUSIVE; 7236 else if (strcasecmp(qlini_mode, QLA2XXX_INI_MODE_STR_DISABLED) == 0) 7237 ql2x_ini_mode = QLA2XXX_INI_MODE_DISABLED; 7238 else if (strcasecmp(qlini_mode, QLA2XXX_INI_MODE_STR_ENABLED) == 0) 7239 ql2x_ini_mode = QLA2XXX_INI_MODE_ENABLED; 7240 else if (strcasecmp(qlini_mode, QLA2XXX_INI_MODE_STR_DUAL) == 0) 7241 ql2x_ini_mode = QLA2XXX_INI_MODE_DUAL; 7242 else 7243 return false; 7244 7245 return true; 7246 } 7247 7248 int __init qlt_init(void) 7249 { 7250 int ret; 7251 7252 BUILD_BUG_ON(sizeof(struct ctio7_to_24xx) != 64); 7253 BUILD_BUG_ON(sizeof(struct ctio_to_2xxx) != 64); 7254 7255 if (!qlt_parse_ini_mode()) { 7256 ql_log(ql_log_fatal, NULL, 0xe06b, 7257 "qlt_parse_ini_mode() failed\n"); 7258 return -EINVAL; 7259 } 7260 7261 if (!QLA_TGT_MODE_ENABLED()) 7262 return 0; 7263 7264 qla_tgt_mgmt_cmd_cachep = kmem_cache_create("qla_tgt_mgmt_cmd_cachep", 7265 sizeof(struct qla_tgt_mgmt_cmd), __alignof__(struct 7266 qla_tgt_mgmt_cmd), 0, NULL); 7267 if (!qla_tgt_mgmt_cmd_cachep) { 7268 ql_log(ql_log_fatal, NULL, 0xd04b, 7269 "kmem_cache_create for qla_tgt_mgmt_cmd_cachep failed\n"); 7270 return -ENOMEM; 7271 } 7272 7273 qla_tgt_plogi_cachep = kmem_cache_create("qla_tgt_plogi_cachep", 7274 sizeof(struct qlt_plogi_ack_t), __alignof__(struct qlt_plogi_ack_t), 7275 0, NULL); 7276 7277 if (!qla_tgt_plogi_cachep) { 7278 ql_log(ql_log_fatal, NULL, 0xe06d, 7279 "kmem_cache_create for qla_tgt_plogi_cachep failed\n"); 7280 ret = -ENOMEM; 7281 goto out_mgmt_cmd_cachep; 7282 } 7283 7284 qla_tgt_mgmt_cmd_mempool = mempool_create(25, mempool_alloc_slab, 7285 mempool_free_slab, qla_tgt_mgmt_cmd_cachep); 7286 if (!qla_tgt_mgmt_cmd_mempool) { 7287 ql_log(ql_log_fatal, NULL, 0xe06e, 7288 "mempool_create for qla_tgt_mgmt_cmd_mempool failed\n"); 7289 ret = -ENOMEM; 7290 goto out_plogi_cachep; 7291 } 7292 7293 qla_tgt_wq = alloc_workqueue("qla_tgt_wq", 0, 0); 7294 if (!qla_tgt_wq) { 7295 ql_log(ql_log_fatal, NULL, 0xe06f, 7296 "alloc_workqueue for qla_tgt_wq failed\n"); 7297 ret = -ENOMEM; 7298 goto out_cmd_mempool; 7299 } 7300 /* 7301 * Return 1 to signal that initiator-mode is being disabled 7302 */ 7303 return (ql2x_ini_mode == QLA2XXX_INI_MODE_DISABLED) ? 1 : 0; 7304 7305 out_cmd_mempool: 7306 mempool_destroy(qla_tgt_mgmt_cmd_mempool); 7307 out_plogi_cachep: 7308 kmem_cache_destroy(qla_tgt_plogi_cachep); 7309 out_mgmt_cmd_cachep: 7310 kmem_cache_destroy(qla_tgt_mgmt_cmd_cachep); 7311 return ret; 7312 } 7313 7314 void qlt_exit(void) 7315 { 7316 if (!QLA_TGT_MODE_ENABLED()) 7317 return; 7318 7319 destroy_workqueue(qla_tgt_wq); 7320 mempool_destroy(qla_tgt_mgmt_cmd_mempool); 7321 kmem_cache_destroy(qla_tgt_plogi_cachep); 7322 kmem_cache_destroy(qla_tgt_mgmt_cmd_cachep); 7323 } 7324