1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * qla_target.c SCSI LLD infrastructure for QLogic 22xx/23xx/24xx/25xx 4 * 5 * based on qla2x00t.c code: 6 * 7 * Copyright (C) 2004 - 2010 Vladislav Bolkhovitin <vst@vlnb.net> 8 * Copyright (C) 2004 - 2005 Leonid Stoljar 9 * Copyright (C) 2006 Nathaniel Clark <nate@misrule.us> 10 * Copyright (C) 2006 - 2010 ID7 Ltd. 11 * 12 * Forward port and refactoring to modern qla2xxx and target/configfs 13 * 14 * Copyright (C) 2010-2013 Nicholas A. Bellinger <nab@kernel.org> 15 */ 16 17 #include <linux/module.h> 18 #include <linux/init.h> 19 #include <linux/types.h> 20 #include <linux/blkdev.h> 21 #include <linux/interrupt.h> 22 #include <linux/pci.h> 23 #include <linux/delay.h> 24 #include <linux/list.h> 25 #include <linux/workqueue.h> 26 #include <asm/unaligned.h> 27 #include <scsi/scsi.h> 28 #include <scsi/scsi_host.h> 29 #include <scsi/scsi_tcq.h> 30 31 #include "qla_def.h" 32 #include "qla_target.h" 33 34 static int ql2xtgt_tape_enable; 35 module_param(ql2xtgt_tape_enable, int, S_IRUGO|S_IWUSR); 36 MODULE_PARM_DESC(ql2xtgt_tape_enable, 37 "Enables Sequence level error recovery (aka FC Tape). Default is 0 - no SLER. 1 - Enable SLER."); 38 39 static char *qlini_mode = QLA2XXX_INI_MODE_STR_ENABLED; 40 module_param(qlini_mode, charp, S_IRUGO); 41 MODULE_PARM_DESC(qlini_mode, 42 "Determines when initiator mode will be enabled. Possible values: " 43 "\"exclusive\" - initiator mode will be enabled on load, " 44 "disabled on enabling target mode and then on disabling target mode " 45 "enabled back; " 46 "\"disabled\" - initiator mode will never be enabled; " 47 "\"dual\" - Initiator Modes will be enabled. Target Mode can be activated " 48 "when ready " 49 "\"enabled\" (default) - initiator mode will always stay enabled."); 50 51 int ql2xuctrlirq = 1; 52 module_param(ql2xuctrlirq, int, 0644); 53 MODULE_PARM_DESC(ql2xuctrlirq, 54 "User to control IRQ placement via smp_affinity." 55 "Valid with qlini_mode=disabled." 56 "1(default): enable"); 57 58 int ql2x_ini_mode = QLA2XXX_INI_MODE_EXCLUSIVE; 59 60 static int qla_sam_status = SAM_STAT_BUSY; 61 static int tc_sam_status = SAM_STAT_TASK_SET_FULL; /* target core */ 62 63 /* 64 * From scsi/fc/fc_fcp.h 65 */ 66 enum fcp_resp_rsp_codes { 67 FCP_TMF_CMPL = 0, 68 FCP_DATA_LEN_INVALID = 1, 69 FCP_CMND_FIELDS_INVALID = 2, 70 FCP_DATA_PARAM_MISMATCH = 3, 71 FCP_TMF_REJECTED = 4, 72 FCP_TMF_FAILED = 5, 73 FCP_TMF_INVALID_LUN = 9, 74 }; 75 76 /* 77 * fc_pri_ta from scsi/fc/fc_fcp.h 78 */ 79 #define FCP_PTA_SIMPLE 0 /* simple task attribute */ 80 #define FCP_PTA_HEADQ 1 /* head of queue task attribute */ 81 #define FCP_PTA_ORDERED 2 /* ordered task attribute */ 82 #define FCP_PTA_ACA 4 /* auto. contingent allegiance */ 83 #define FCP_PTA_MASK 7 /* mask for task attribute field */ 84 #define FCP_PRI_SHIFT 3 /* priority field starts in bit 3 */ 85 #define FCP_PRI_RESVD_MASK 0x80 /* reserved bits in priority field */ 86 87 /* 88 * This driver calls qla2x00_alloc_iocbs() and qla2x00_issue_marker(), which 89 * must be called under HW lock and could unlock/lock it inside. 90 * It isn't an issue, since in the current implementation on the time when 91 * those functions are called: 92 * 93 * - Either context is IRQ and only IRQ handler can modify HW data, 94 * including rings related fields, 95 * 96 * - Or access to target mode variables from struct qla_tgt doesn't 97 * cross those functions boundaries, except tgt_stop, which 98 * additionally protected by irq_cmd_count. 99 */ 100 /* Predefs for callbacks handed to qla2xxx LLD */ 101 static void qlt_24xx_atio_pkt(struct scsi_qla_host *ha, 102 struct atio_from_isp *pkt, uint8_t); 103 static void qlt_response_pkt(struct scsi_qla_host *ha, struct rsp_que *rsp, 104 response_t *pkt); 105 static int qlt_issue_task_mgmt(struct fc_port *sess, u64 lun, 106 int fn, void *iocb, int flags); 107 static void qlt_send_term_exchange(struct qla_qpair *, struct qla_tgt_cmd 108 *cmd, struct atio_from_isp *atio, int ha_locked, int ul_abort); 109 static void qlt_alloc_qfull_cmd(struct scsi_qla_host *vha, 110 struct atio_from_isp *atio, uint16_t status, int qfull); 111 static void qlt_disable_vha(struct scsi_qla_host *vha); 112 static void qlt_clear_tgt_db(struct qla_tgt *tgt); 113 static void qlt_send_notify_ack(struct qla_qpair *qpair, 114 struct imm_ntfy_from_isp *ntfy, 115 uint32_t add_flags, uint16_t resp_code, int resp_code_valid, 116 uint16_t srr_flags, uint16_t srr_reject_code, uint8_t srr_explan); 117 static void qlt_send_term_imm_notif(struct scsi_qla_host *vha, 118 struct imm_ntfy_from_isp *imm, int ha_locked); 119 static struct fc_port *qlt_create_sess(struct scsi_qla_host *vha, 120 fc_port_t *fcport, bool local); 121 void qlt_unreg_sess(struct fc_port *sess); 122 static void qlt_24xx_handle_abts(struct scsi_qla_host *, 123 struct abts_recv_from_24xx *); 124 static void qlt_send_busy(struct qla_qpair *, struct atio_from_isp *, 125 uint16_t); 126 static int qlt_check_reserve_free_req(struct qla_qpair *qpair, uint32_t); 127 static inline uint32_t qlt_make_handle(struct qla_qpair *); 128 129 /* 130 * Global Variables 131 */ 132 static struct kmem_cache *qla_tgt_mgmt_cmd_cachep; 133 struct kmem_cache *qla_tgt_plogi_cachep; 134 static mempool_t *qla_tgt_mgmt_cmd_mempool; 135 static struct workqueue_struct *qla_tgt_wq; 136 static DEFINE_MUTEX(qla_tgt_mutex); 137 static LIST_HEAD(qla_tgt_glist); 138 139 static const char *prot_op_str(u32 prot_op) 140 { 141 switch (prot_op) { 142 case TARGET_PROT_NORMAL: return "NORMAL"; 143 case TARGET_PROT_DIN_INSERT: return "DIN_INSERT"; 144 case TARGET_PROT_DOUT_INSERT: return "DOUT_INSERT"; 145 case TARGET_PROT_DIN_STRIP: return "DIN_STRIP"; 146 case TARGET_PROT_DOUT_STRIP: return "DOUT_STRIP"; 147 case TARGET_PROT_DIN_PASS: return "DIN_PASS"; 148 case TARGET_PROT_DOUT_PASS: return "DOUT_PASS"; 149 default: return "UNKNOWN"; 150 } 151 } 152 153 /* This API intentionally takes dest as a parameter, rather than returning 154 * int value to avoid caller forgetting to issue wmb() after the store */ 155 void qlt_do_generation_tick(struct scsi_qla_host *vha, int *dest) 156 { 157 scsi_qla_host_t *base_vha = pci_get_drvdata(vha->hw->pdev); 158 *dest = atomic_inc_return(&base_vha->generation_tick); 159 /* memory barrier */ 160 wmb(); 161 } 162 163 /* Might release hw lock, then reaquire!! */ 164 static inline int qlt_issue_marker(struct scsi_qla_host *vha, int vha_locked) 165 { 166 /* Send marker if required */ 167 if (unlikely(vha->marker_needed != 0)) { 168 int rc = qla2x00_issue_marker(vha, vha_locked); 169 170 if (rc != QLA_SUCCESS) { 171 ql_dbg(ql_dbg_tgt, vha, 0xe03d, 172 "qla_target(%d): issue_marker() failed\n", 173 vha->vp_idx); 174 } 175 return rc; 176 } 177 return QLA_SUCCESS; 178 } 179 180 struct scsi_qla_host *qla_find_host_by_d_id(struct scsi_qla_host *vha, 181 be_id_t d_id) 182 { 183 struct scsi_qla_host *host; 184 uint32_t key; 185 186 if (vha->d_id.b.area == d_id.area && 187 vha->d_id.b.domain == d_id.domain && 188 vha->d_id.b.al_pa == d_id.al_pa) 189 return vha; 190 191 key = be_to_port_id(d_id).b24; 192 193 host = btree_lookup32(&vha->hw->host_map, key); 194 if (!host) 195 ql_dbg(ql_dbg_tgt_mgt + ql_dbg_verbose, vha, 0xf005, 196 "Unable to find host %06x\n", key); 197 198 return host; 199 } 200 201 static inline void qlt_incr_num_pend_cmds(struct scsi_qla_host *vha) 202 { 203 unsigned long flags; 204 205 spin_lock_irqsave(&vha->hw->tgt.q_full_lock, flags); 206 207 vha->hw->tgt.num_pend_cmds++; 208 if (vha->hw->tgt.num_pend_cmds > vha->qla_stats.stat_max_pend_cmds) 209 vha->qla_stats.stat_max_pend_cmds = 210 vha->hw->tgt.num_pend_cmds; 211 spin_unlock_irqrestore(&vha->hw->tgt.q_full_lock, flags); 212 } 213 static inline void qlt_decr_num_pend_cmds(struct scsi_qla_host *vha) 214 { 215 unsigned long flags; 216 217 spin_lock_irqsave(&vha->hw->tgt.q_full_lock, flags); 218 vha->hw->tgt.num_pend_cmds--; 219 spin_unlock_irqrestore(&vha->hw->tgt.q_full_lock, flags); 220 } 221 222 223 static void qlt_queue_unknown_atio(scsi_qla_host_t *vha, 224 struct atio_from_isp *atio, uint8_t ha_locked) 225 { 226 struct qla_tgt_sess_op *u; 227 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; 228 unsigned long flags; 229 230 if (tgt->tgt_stop) { 231 ql_dbg(ql_dbg_async, vha, 0x502c, 232 "qla_target(%d): dropping unknown ATIO_TYPE7, because tgt is being stopped", 233 vha->vp_idx); 234 goto out_term; 235 } 236 237 u = kzalloc(sizeof(*u), GFP_ATOMIC); 238 if (u == NULL) 239 goto out_term; 240 241 u->vha = vha; 242 memcpy(&u->atio, atio, sizeof(*atio)); 243 INIT_LIST_HEAD(&u->cmd_list); 244 245 spin_lock_irqsave(&vha->cmd_list_lock, flags); 246 list_add_tail(&u->cmd_list, &vha->unknown_atio_list); 247 spin_unlock_irqrestore(&vha->cmd_list_lock, flags); 248 249 schedule_delayed_work(&vha->unknown_atio_work, 1); 250 251 out: 252 return; 253 254 out_term: 255 qlt_send_term_exchange(vha->hw->base_qpair, NULL, atio, ha_locked, 0); 256 goto out; 257 } 258 259 static void qlt_try_to_dequeue_unknown_atios(struct scsi_qla_host *vha, 260 uint8_t ha_locked) 261 { 262 struct qla_tgt_sess_op *u, *t; 263 scsi_qla_host_t *host; 264 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; 265 unsigned long flags; 266 uint8_t queued = 0; 267 268 list_for_each_entry_safe(u, t, &vha->unknown_atio_list, cmd_list) { 269 if (u->aborted) { 270 ql_dbg(ql_dbg_async, vha, 0x502e, 271 "Freeing unknown %s %p, because of Abort\n", 272 "ATIO_TYPE7", u); 273 qlt_send_term_exchange(vha->hw->base_qpair, NULL, 274 &u->atio, ha_locked, 0); 275 goto abort; 276 } 277 278 host = qla_find_host_by_d_id(vha, u->atio.u.isp24.fcp_hdr.d_id); 279 if (host != NULL) { 280 ql_dbg(ql_dbg_async + ql_dbg_verbose, vha, 0x502f, 281 "Requeuing unknown ATIO_TYPE7 %p\n", u); 282 qlt_24xx_atio_pkt(host, &u->atio, ha_locked); 283 } else if (tgt->tgt_stop) { 284 ql_dbg(ql_dbg_async + ql_dbg_verbose, vha, 0x503a, 285 "Freeing unknown %s %p, because tgt is being stopped\n", 286 "ATIO_TYPE7", u); 287 qlt_send_term_exchange(vha->hw->base_qpair, NULL, 288 &u->atio, ha_locked, 0); 289 } else { 290 ql_dbg(ql_dbg_async + ql_dbg_verbose, vha, 0x503d, 291 "Reschedule u %p, vha %p, host %p\n", u, vha, host); 292 if (!queued) { 293 queued = 1; 294 schedule_delayed_work(&vha->unknown_atio_work, 295 1); 296 } 297 continue; 298 } 299 300 abort: 301 spin_lock_irqsave(&vha->cmd_list_lock, flags); 302 list_del(&u->cmd_list); 303 spin_unlock_irqrestore(&vha->cmd_list_lock, flags); 304 kfree(u); 305 } 306 } 307 308 void qlt_unknown_atio_work_fn(struct work_struct *work) 309 { 310 struct scsi_qla_host *vha = container_of(to_delayed_work(work), 311 struct scsi_qla_host, unknown_atio_work); 312 313 qlt_try_to_dequeue_unknown_atios(vha, 0); 314 } 315 316 static bool qlt_24xx_atio_pkt_all_vps(struct scsi_qla_host *vha, 317 struct atio_from_isp *atio, uint8_t ha_locked) 318 { 319 ql_dbg(ql_dbg_tgt, vha, 0xe072, 320 "%s: qla_target(%d): type %x ox_id %04x\n", 321 __func__, vha->vp_idx, atio->u.raw.entry_type, 322 be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id)); 323 324 switch (atio->u.raw.entry_type) { 325 case ATIO_TYPE7: 326 { 327 struct scsi_qla_host *host = qla_find_host_by_d_id(vha, 328 atio->u.isp24.fcp_hdr.d_id); 329 if (unlikely(NULL == host)) { 330 ql_dbg(ql_dbg_tgt, vha, 0xe03e, 331 "qla_target(%d): Received ATIO_TYPE7 " 332 "with unknown d_id %x:%x:%x\n", vha->vp_idx, 333 atio->u.isp24.fcp_hdr.d_id.domain, 334 atio->u.isp24.fcp_hdr.d_id.area, 335 atio->u.isp24.fcp_hdr.d_id.al_pa); 336 337 338 qlt_queue_unknown_atio(vha, atio, ha_locked); 339 break; 340 } 341 if (unlikely(!list_empty(&vha->unknown_atio_list))) 342 qlt_try_to_dequeue_unknown_atios(vha, ha_locked); 343 344 qlt_24xx_atio_pkt(host, atio, ha_locked); 345 break; 346 } 347 348 case IMMED_NOTIFY_TYPE: 349 { 350 struct scsi_qla_host *host = vha; 351 struct imm_ntfy_from_isp *entry = 352 (struct imm_ntfy_from_isp *)atio; 353 354 qlt_issue_marker(vha, ha_locked); 355 356 if ((entry->u.isp24.vp_index != 0xFF) && 357 (entry->u.isp24.nport_handle != cpu_to_le16(0xFFFF))) { 358 host = qla_find_host_by_vp_idx(vha, 359 entry->u.isp24.vp_index); 360 if (unlikely(!host)) { 361 ql_dbg(ql_dbg_tgt, vha, 0xe03f, 362 "qla_target(%d): Received " 363 "ATIO (IMMED_NOTIFY_TYPE) " 364 "with unknown vp_index %d\n", 365 vha->vp_idx, entry->u.isp24.vp_index); 366 break; 367 } 368 } 369 qlt_24xx_atio_pkt(host, atio, ha_locked); 370 break; 371 } 372 373 case VP_RPT_ID_IOCB_TYPE: 374 qla24xx_report_id_acquisition(vha, 375 (struct vp_rpt_id_entry_24xx *)atio); 376 break; 377 378 case ABTS_RECV_24XX: 379 { 380 struct abts_recv_from_24xx *entry = 381 (struct abts_recv_from_24xx *)atio; 382 struct scsi_qla_host *host = qla_find_host_by_vp_idx(vha, 383 entry->vp_index); 384 unsigned long flags; 385 386 if (unlikely(!host)) { 387 ql_dbg(ql_dbg_tgt, vha, 0xe00a, 388 "qla_target(%d): Response pkt (ABTS_RECV_24XX) " 389 "received, with unknown vp_index %d\n", 390 vha->vp_idx, entry->vp_index); 391 break; 392 } 393 if (!ha_locked) 394 spin_lock_irqsave(&host->hw->hardware_lock, flags); 395 qlt_24xx_handle_abts(host, (struct abts_recv_from_24xx *)atio); 396 if (!ha_locked) 397 spin_unlock_irqrestore(&host->hw->hardware_lock, flags); 398 break; 399 } 400 401 /* case PUREX_IOCB_TYPE: ql2xmvasynctoatio */ 402 403 default: 404 ql_dbg(ql_dbg_tgt, vha, 0xe040, 405 "qla_target(%d): Received unknown ATIO atio " 406 "type %x\n", vha->vp_idx, atio->u.raw.entry_type); 407 break; 408 } 409 410 return false; 411 } 412 413 void qlt_response_pkt_all_vps(struct scsi_qla_host *vha, 414 struct rsp_que *rsp, response_t *pkt) 415 { 416 switch (pkt->entry_type) { 417 case CTIO_CRC2: 418 ql_dbg(ql_dbg_tgt, vha, 0xe073, 419 "qla_target(%d):%s: CRC2 Response pkt\n", 420 vha->vp_idx, __func__); 421 fallthrough; 422 case CTIO_TYPE7: 423 { 424 struct ctio7_from_24xx *entry = (struct ctio7_from_24xx *)pkt; 425 struct scsi_qla_host *host = qla_find_host_by_vp_idx(vha, 426 entry->vp_index); 427 if (unlikely(!host)) { 428 ql_dbg(ql_dbg_tgt, vha, 0xe041, 429 "qla_target(%d): Response pkt (CTIO_TYPE7) " 430 "received, with unknown vp_index %d\n", 431 vha->vp_idx, entry->vp_index); 432 break; 433 } 434 qlt_response_pkt(host, rsp, pkt); 435 break; 436 } 437 438 case IMMED_NOTIFY_TYPE: 439 { 440 struct scsi_qla_host *host; 441 struct imm_ntfy_from_isp *entry = 442 (struct imm_ntfy_from_isp *)pkt; 443 444 host = qla_find_host_by_vp_idx(vha, entry->u.isp24.vp_index); 445 if (unlikely(!host)) { 446 ql_dbg(ql_dbg_tgt, vha, 0xe042, 447 "qla_target(%d): Response pkt (IMMED_NOTIFY_TYPE) " 448 "received, with unknown vp_index %d\n", 449 vha->vp_idx, entry->u.isp24.vp_index); 450 break; 451 } 452 qlt_response_pkt(host, rsp, pkt); 453 break; 454 } 455 456 case NOTIFY_ACK_TYPE: 457 { 458 struct scsi_qla_host *host = vha; 459 struct nack_to_isp *entry = (struct nack_to_isp *)pkt; 460 461 if (0xFF != entry->u.isp24.vp_index) { 462 host = qla_find_host_by_vp_idx(vha, 463 entry->u.isp24.vp_index); 464 if (unlikely(!host)) { 465 ql_dbg(ql_dbg_tgt, vha, 0xe043, 466 "qla_target(%d): Response " 467 "pkt (NOTIFY_ACK_TYPE) " 468 "received, with unknown " 469 "vp_index %d\n", vha->vp_idx, 470 entry->u.isp24.vp_index); 471 break; 472 } 473 } 474 qlt_response_pkt(host, rsp, pkt); 475 break; 476 } 477 478 case ABTS_RECV_24XX: 479 { 480 struct abts_recv_from_24xx *entry = 481 (struct abts_recv_from_24xx *)pkt; 482 struct scsi_qla_host *host = qla_find_host_by_vp_idx(vha, 483 entry->vp_index); 484 if (unlikely(!host)) { 485 ql_dbg(ql_dbg_tgt, vha, 0xe044, 486 "qla_target(%d): Response pkt " 487 "(ABTS_RECV_24XX) received, with unknown " 488 "vp_index %d\n", vha->vp_idx, entry->vp_index); 489 break; 490 } 491 qlt_response_pkt(host, rsp, pkt); 492 break; 493 } 494 495 case ABTS_RESP_24XX: 496 { 497 struct abts_resp_to_24xx *entry = 498 (struct abts_resp_to_24xx *)pkt; 499 struct scsi_qla_host *host = qla_find_host_by_vp_idx(vha, 500 entry->vp_index); 501 if (unlikely(!host)) { 502 ql_dbg(ql_dbg_tgt, vha, 0xe045, 503 "qla_target(%d): Response pkt " 504 "(ABTS_RECV_24XX) received, with unknown " 505 "vp_index %d\n", vha->vp_idx, entry->vp_index); 506 break; 507 } 508 qlt_response_pkt(host, rsp, pkt); 509 break; 510 } 511 default: 512 qlt_response_pkt(vha, rsp, pkt); 513 break; 514 } 515 516 } 517 518 /* 519 * All qlt_plogi_ack_t operations are protected by hardware_lock 520 */ 521 static int qla24xx_post_nack_work(struct scsi_qla_host *vha, fc_port_t *fcport, 522 struct imm_ntfy_from_isp *ntfy, int type) 523 { 524 struct qla_work_evt *e; 525 526 e = qla2x00_alloc_work(vha, QLA_EVT_NACK); 527 if (!e) 528 return QLA_FUNCTION_FAILED; 529 530 e->u.nack.fcport = fcport; 531 e->u.nack.type = type; 532 memcpy(e->u.nack.iocb, ntfy, sizeof(struct imm_ntfy_from_isp)); 533 return qla2x00_post_work(vha, e); 534 } 535 536 static void qla2x00_async_nack_sp_done(srb_t *sp, int res) 537 { 538 struct scsi_qla_host *vha = sp->vha; 539 unsigned long flags; 540 541 ql_dbg(ql_dbg_disc, vha, 0x20f2, 542 "Async done-%s res %x %8phC type %d\n", 543 sp->name, res, sp->fcport->port_name, sp->type); 544 545 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); 546 sp->fcport->flags &= ~FCF_ASYNC_SENT; 547 sp->fcport->chip_reset = vha->hw->base_qpair->chip_reset; 548 549 switch (sp->type) { 550 case SRB_NACK_PLOGI: 551 sp->fcport->login_gen++; 552 sp->fcport->fw_login_state = DSC_LS_PLOGI_COMP; 553 sp->fcport->logout_on_delete = 1; 554 sp->fcport->plogi_nack_done_deadline = jiffies + HZ; 555 sp->fcport->send_els_logo = 0; 556 557 if (sp->fcport->flags & FCF_FCSP_DEVICE) { 558 ql_dbg(ql_dbg_edif, vha, 0x20ef, 559 "%s %8phC edif: PLOGI- AUTH WAIT\n", __func__, 560 sp->fcport->port_name); 561 qla2x00_set_fcport_disc_state(sp->fcport, 562 DSC_LOGIN_AUTH_PEND); 563 qla2x00_post_aen_work(vha, FCH_EVT_PORT_ONLINE, 564 sp->fcport->d_id.b24); 565 qla_edb_eventcreate(vha, VND_CMD_AUTH_STATE_NEEDED, sp->fcport->d_id.b24, 566 0, sp->fcport); 567 } 568 break; 569 570 case SRB_NACK_PRLI: 571 sp->fcport->fw_login_state = DSC_LS_PRLI_COMP; 572 sp->fcport->deleted = 0; 573 sp->fcport->send_els_logo = 0; 574 575 if (!sp->fcport->login_succ && 576 !IS_SW_RESV_ADDR(sp->fcport->d_id)) { 577 sp->fcport->login_succ = 1; 578 579 vha->fcport_count++; 580 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); 581 qla24xx_sched_upd_fcport(sp->fcport); 582 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); 583 } else { 584 sp->fcport->login_retry = 0; 585 qla2x00_set_fcport_disc_state(sp->fcport, 586 DSC_LOGIN_COMPLETE); 587 sp->fcport->deleted = 0; 588 sp->fcport->logout_on_delete = 1; 589 } 590 break; 591 592 case SRB_NACK_LOGO: 593 sp->fcport->login_gen++; 594 sp->fcport->fw_login_state = DSC_LS_PORT_UNAVAIL; 595 qlt_logo_completion_handler(sp->fcport, MBS_COMMAND_COMPLETE); 596 break; 597 } 598 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); 599 600 kref_put(&sp->cmd_kref, qla2x00_sp_release); 601 } 602 603 int qla24xx_async_notify_ack(scsi_qla_host_t *vha, fc_port_t *fcport, 604 struct imm_ntfy_from_isp *ntfy, int type) 605 { 606 int rval = QLA_FUNCTION_FAILED; 607 srb_t *sp; 608 char *c = NULL; 609 610 fcport->flags |= FCF_ASYNC_SENT; 611 switch (type) { 612 case SRB_NACK_PLOGI: 613 fcport->fw_login_state = DSC_LS_PLOGI_PEND; 614 c = "PLOGI"; 615 if (vha->hw->flags.edif_enabled && 616 (le16_to_cpu(ntfy->u.isp24.flags) & NOTIFY24XX_FLAGS_FCSP)) 617 fcport->flags |= FCF_FCSP_DEVICE; 618 break; 619 case SRB_NACK_PRLI: 620 fcport->fw_login_state = DSC_LS_PRLI_PEND; 621 fcport->deleted = 0; 622 c = "PRLI"; 623 break; 624 case SRB_NACK_LOGO: 625 fcport->fw_login_state = DSC_LS_LOGO_PEND; 626 c = "LOGO"; 627 break; 628 } 629 630 sp = qla2x00_get_sp(vha, fcport, GFP_ATOMIC); 631 if (!sp) 632 goto done; 633 634 sp->type = type; 635 sp->name = "nack"; 636 qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha) + 2, 637 qla2x00_async_nack_sp_done); 638 639 sp->u.iocb_cmd.u.nack.ntfy = ntfy; 640 641 ql_dbg(ql_dbg_disc, vha, 0x20f4, 642 "Async-%s %8phC hndl %x %s\n", 643 sp->name, fcport->port_name, sp->handle, c); 644 645 rval = qla2x00_start_sp(sp); 646 if (rval != QLA_SUCCESS) 647 goto done_free_sp; 648 649 return rval; 650 651 done_free_sp: 652 kref_put(&sp->cmd_kref, qla2x00_sp_release); 653 done: 654 fcport->flags &= ~FCF_ASYNC_SENT; 655 return rval; 656 } 657 658 void qla24xx_do_nack_work(struct scsi_qla_host *vha, struct qla_work_evt *e) 659 { 660 fc_port_t *t; 661 662 switch (e->u.nack.type) { 663 case SRB_NACK_PRLI: 664 t = e->u.nack.fcport; 665 flush_work(&t->del_work); 666 flush_work(&t->free_work); 667 mutex_lock(&vha->vha_tgt.tgt_mutex); 668 t = qlt_create_sess(vha, e->u.nack.fcport, 0); 669 mutex_unlock(&vha->vha_tgt.tgt_mutex); 670 if (t) { 671 ql_log(ql_log_info, vha, 0xd034, 672 "%s create sess success %p", __func__, t); 673 /* create sess has an extra kref */ 674 vha->hw->tgt.tgt_ops->put_sess(e->u.nack.fcport); 675 } 676 break; 677 } 678 qla24xx_async_notify_ack(vha, e->u.nack.fcport, 679 (struct imm_ntfy_from_isp *)e->u.nack.iocb, e->u.nack.type); 680 } 681 682 void qla24xx_delete_sess_fn(struct work_struct *work) 683 { 684 fc_port_t *fcport = container_of(work, struct fc_port, del_work); 685 struct qla_hw_data *ha = NULL; 686 687 if (!fcport || !fcport->vha || !fcport->vha->hw) 688 return; 689 690 ha = fcport->vha->hw; 691 692 if (fcport->se_sess) { 693 ha->tgt.tgt_ops->shutdown_sess(fcport); 694 ha->tgt.tgt_ops->put_sess(fcport); 695 } else { 696 qlt_unreg_sess(fcport); 697 } 698 } 699 700 /* 701 * Called from qla2x00_reg_remote_port() 702 */ 703 void qlt_fc_port_added(struct scsi_qla_host *vha, fc_port_t *fcport) 704 { 705 struct qla_hw_data *ha = vha->hw; 706 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; 707 struct fc_port *sess = fcport; 708 unsigned long flags; 709 710 if (!vha->hw->tgt.tgt_ops) 711 return; 712 713 spin_lock_irqsave(&ha->tgt.sess_lock, flags); 714 if (tgt->tgt_stop) { 715 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); 716 return; 717 } 718 719 if (fcport->disc_state == DSC_DELETE_PEND) { 720 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); 721 return; 722 } 723 724 if (!sess->se_sess) { 725 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); 726 727 mutex_lock(&vha->vha_tgt.tgt_mutex); 728 sess = qlt_create_sess(vha, fcport, false); 729 mutex_unlock(&vha->vha_tgt.tgt_mutex); 730 731 spin_lock_irqsave(&ha->tgt.sess_lock, flags); 732 } else { 733 if (fcport->fw_login_state == DSC_LS_PRLI_COMP) { 734 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); 735 return; 736 } 737 738 if (!kref_get_unless_zero(&sess->sess_kref)) { 739 ql_dbg(ql_dbg_disc, vha, 0x2107, 740 "%s: kref_get fail sess %8phC \n", 741 __func__, sess->port_name); 742 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); 743 return; 744 } 745 746 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04c, 747 "qla_target(%u): %ssession for port %8phC " 748 "(loop ID %d) reappeared\n", vha->vp_idx, 749 sess->local ? "local " : "", sess->port_name, sess->loop_id); 750 751 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf007, 752 "Reappeared sess %p\n", sess); 753 754 ha->tgt.tgt_ops->update_sess(sess, fcport->d_id, 755 fcport->loop_id, 756 (fcport->flags & FCF_CONF_COMP_SUPPORTED)); 757 } 758 759 if (sess && sess->local) { 760 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04d, 761 "qla_target(%u): local session for " 762 "port %8phC (loop ID %d) became global\n", vha->vp_idx, 763 fcport->port_name, sess->loop_id); 764 sess->local = 0; 765 } 766 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); 767 768 ha->tgt.tgt_ops->put_sess(sess); 769 } 770 771 /* 772 * This is a zero-base ref-counting solution, since hardware_lock 773 * guarantees that ref_count is not modified concurrently. 774 * Upon successful return content of iocb is undefined 775 */ 776 static struct qlt_plogi_ack_t * 777 qlt_plogi_ack_find_add(struct scsi_qla_host *vha, port_id_t *id, 778 struct imm_ntfy_from_isp *iocb) 779 { 780 struct qlt_plogi_ack_t *pla; 781 782 lockdep_assert_held(&vha->hw->hardware_lock); 783 784 list_for_each_entry(pla, &vha->plogi_ack_list, list) { 785 if (pla->id.b24 == id->b24) { 786 ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0x210d, 787 "%s %d %8phC Term INOT due to new INOT", 788 __func__, __LINE__, 789 pla->iocb.u.isp24.port_name); 790 qlt_send_term_imm_notif(vha, &pla->iocb, 1); 791 memcpy(&pla->iocb, iocb, sizeof(pla->iocb)); 792 return pla; 793 } 794 } 795 796 pla = kmem_cache_zalloc(qla_tgt_plogi_cachep, GFP_ATOMIC); 797 if (!pla) { 798 ql_dbg(ql_dbg_async, vha, 0x5088, 799 "qla_target(%d): Allocation of plogi_ack failed\n", 800 vha->vp_idx); 801 return NULL; 802 } 803 804 memcpy(&pla->iocb, iocb, sizeof(pla->iocb)); 805 pla->id = *id; 806 list_add_tail(&pla->list, &vha->plogi_ack_list); 807 808 return pla; 809 } 810 811 void qlt_plogi_ack_unref(struct scsi_qla_host *vha, 812 struct qlt_plogi_ack_t *pla) 813 { 814 struct imm_ntfy_from_isp *iocb = &pla->iocb; 815 port_id_t port_id; 816 uint16_t loop_id; 817 fc_port_t *fcport = pla->fcport; 818 819 BUG_ON(!pla->ref_count); 820 pla->ref_count--; 821 822 if (pla->ref_count) 823 return; 824 825 ql_dbg(ql_dbg_disc, vha, 0x5089, 826 "Sending PLOGI ACK to wwn %8phC s_id %02x:%02x:%02x loop_id %#04x" 827 " exch %#x ox_id %#x\n", iocb->u.isp24.port_name, 828 iocb->u.isp24.port_id[2], iocb->u.isp24.port_id[1], 829 iocb->u.isp24.port_id[0], 830 le16_to_cpu(iocb->u.isp24.nport_handle), 831 iocb->u.isp24.exchange_address, iocb->ox_id); 832 833 port_id.b.domain = iocb->u.isp24.port_id[2]; 834 port_id.b.area = iocb->u.isp24.port_id[1]; 835 port_id.b.al_pa = iocb->u.isp24.port_id[0]; 836 port_id.b.rsvd_1 = 0; 837 838 loop_id = le16_to_cpu(iocb->u.isp24.nport_handle); 839 840 fcport->loop_id = loop_id; 841 fcport->d_id = port_id; 842 if (iocb->u.isp24.status_subcode == ELS_PLOGI) 843 qla24xx_post_nack_work(vha, fcport, iocb, SRB_NACK_PLOGI); 844 else 845 qla24xx_post_nack_work(vha, fcport, iocb, SRB_NACK_PRLI); 846 847 list_for_each_entry(fcport, &vha->vp_fcports, list) { 848 if (fcport->plogi_link[QLT_PLOGI_LINK_SAME_WWN] == pla) 849 fcport->plogi_link[QLT_PLOGI_LINK_SAME_WWN] = NULL; 850 if (fcport->plogi_link[QLT_PLOGI_LINK_CONFLICT] == pla) 851 fcport->plogi_link[QLT_PLOGI_LINK_CONFLICT] = NULL; 852 } 853 854 list_del(&pla->list); 855 kmem_cache_free(qla_tgt_plogi_cachep, pla); 856 } 857 858 void 859 qlt_plogi_ack_link(struct scsi_qla_host *vha, struct qlt_plogi_ack_t *pla, 860 struct fc_port *sess, enum qlt_plogi_link_t link) 861 { 862 struct imm_ntfy_from_isp *iocb = &pla->iocb; 863 /* Inc ref_count first because link might already be pointing at pla */ 864 pla->ref_count++; 865 866 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf097, 867 "Linking sess %p [%d] wwn %8phC with PLOGI ACK to wwn %8phC" 868 " s_id %02x:%02x:%02x, ref=%d pla %p link %d\n", 869 sess, link, sess->port_name, 870 iocb->u.isp24.port_name, iocb->u.isp24.port_id[2], 871 iocb->u.isp24.port_id[1], iocb->u.isp24.port_id[0], 872 pla->ref_count, pla, link); 873 874 if (link == QLT_PLOGI_LINK_CONFLICT) { 875 switch (sess->disc_state) { 876 case DSC_DELETED: 877 case DSC_DELETE_PEND: 878 pla->ref_count--; 879 return; 880 default: 881 break; 882 } 883 } 884 885 if (sess->plogi_link[link]) 886 qlt_plogi_ack_unref(vha, sess->plogi_link[link]); 887 888 if (link == QLT_PLOGI_LINK_SAME_WWN) 889 pla->fcport = sess; 890 891 sess->plogi_link[link] = pla; 892 } 893 894 typedef struct { 895 /* These fields must be initialized by the caller */ 896 port_id_t id; 897 /* 898 * number of cmds dropped while we were waiting for 899 * initiator to ack LOGO initialize to 1 if LOGO is 900 * triggered by a command, otherwise, to 0 901 */ 902 int cmd_count; 903 904 /* These fields are used by callee */ 905 struct list_head list; 906 } qlt_port_logo_t; 907 908 static void 909 qlt_send_first_logo(struct scsi_qla_host *vha, qlt_port_logo_t *logo) 910 { 911 qlt_port_logo_t *tmp; 912 int res; 913 914 if (test_bit(PFLG_DRIVER_REMOVING, &vha->pci_flags)) { 915 res = 0; 916 goto out; 917 } 918 919 mutex_lock(&vha->vha_tgt.tgt_mutex); 920 921 list_for_each_entry(tmp, &vha->logo_list, list) { 922 if (tmp->id.b24 == logo->id.b24) { 923 tmp->cmd_count += logo->cmd_count; 924 mutex_unlock(&vha->vha_tgt.tgt_mutex); 925 return; 926 } 927 } 928 929 list_add_tail(&logo->list, &vha->logo_list); 930 931 mutex_unlock(&vha->vha_tgt.tgt_mutex); 932 933 res = qla24xx_els_dcmd_iocb(vha, ELS_DCMD_LOGO, logo->id); 934 935 mutex_lock(&vha->vha_tgt.tgt_mutex); 936 list_del(&logo->list); 937 mutex_unlock(&vha->vha_tgt.tgt_mutex); 938 939 out: 940 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf098, 941 "Finished LOGO to %02x:%02x:%02x, dropped %d cmds, res = %#x\n", 942 logo->id.b.domain, logo->id.b.area, logo->id.b.al_pa, 943 logo->cmd_count, res); 944 } 945 946 void qlt_free_session_done(struct work_struct *work) 947 { 948 struct fc_port *sess = container_of(work, struct fc_port, 949 free_work); 950 struct qla_tgt *tgt = sess->tgt; 951 struct scsi_qla_host *vha = sess->vha; 952 struct qla_hw_data *ha = vha->hw; 953 unsigned long flags; 954 bool logout_started = false; 955 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev); 956 struct qlt_plogi_ack_t *own = 957 sess->plogi_link[QLT_PLOGI_LINK_SAME_WWN]; 958 959 ql_dbg(ql_dbg_disc, vha, 0xf084, 960 "%s: se_sess %p / sess %p from port %8phC loop_id %#04x" 961 " s_id %02x:%02x:%02x logout %d keep %d els_logo %d\n", 962 __func__, sess->se_sess, sess, sess->port_name, sess->loop_id, 963 sess->d_id.b.domain, sess->d_id.b.area, sess->d_id.b.al_pa, 964 sess->logout_on_delete, sess->keep_nport_handle, 965 sess->send_els_logo); 966 967 if (!IS_SW_RESV_ADDR(sess->d_id)) { 968 qla2x00_mark_device_lost(vha, sess, 0); 969 970 if (sess->send_els_logo) { 971 qlt_port_logo_t logo; 972 973 logo.id = sess->d_id; 974 logo.cmd_count = 0; 975 INIT_LIST_HEAD(&logo.list); 976 if (!own) 977 qlt_send_first_logo(vha, &logo); 978 sess->send_els_logo = 0; 979 } 980 981 if (sess->logout_on_delete && sess->loop_id != FC_NO_LOOP_ID) { 982 int rc; 983 984 if (!own || 985 (own->iocb.u.isp24.status_subcode == ELS_PLOGI)) { 986 sess->logout_completed = 0; 987 rc = qla2x00_post_async_logout_work(vha, sess, 988 NULL); 989 if (rc != QLA_SUCCESS) 990 ql_log(ql_log_warn, vha, 0xf085, 991 "Schedule logo failed sess %p rc %d\n", 992 sess, rc); 993 else 994 logout_started = true; 995 } else if (own && (own->iocb.u.isp24.status_subcode == 996 ELS_PRLI) && ha->flags.rida_fmt2) { 997 rc = qla2x00_post_async_prlo_work(vha, sess, 998 NULL); 999 if (rc != QLA_SUCCESS) 1000 ql_log(ql_log_warn, vha, 0xf085, 1001 "Schedule PRLO failed sess %p rc %d\n", 1002 sess, rc); 1003 else 1004 logout_started = true; 1005 } 1006 } /* if sess->logout_on_delete */ 1007 1008 if (sess->nvme_flag & NVME_FLAG_REGISTERED && 1009 !(sess->nvme_flag & NVME_FLAG_DELETING)) { 1010 sess->nvme_flag |= NVME_FLAG_DELETING; 1011 qla_nvme_unregister_remote_port(sess); 1012 } 1013 1014 if (ha->flags.edif_enabled && 1015 (!own || own->iocb.u.isp24.status_subcode == ELS_PLOGI)) { 1016 sess->edif.authok = 0; 1017 if (!ha->flags.host_shutting_down) { 1018 ql_dbg(ql_dbg_edif, vha, 0x911e, 1019 "%s wwpn %8phC calling qla2x00_release_all_sadb\n", 1020 __func__, sess->port_name); 1021 qla2x00_release_all_sadb(vha, sess); 1022 } else { 1023 ql_dbg(ql_dbg_edif, vha, 0x911e, 1024 "%s bypassing release_all_sadb\n", 1025 __func__); 1026 } 1027 1028 qla_edif_clear_appdata(vha, sess); 1029 qla_edif_sess_down(vha, sess); 1030 } 1031 } 1032 1033 /* 1034 * Release the target session for FC Nexus from fabric module code. 1035 */ 1036 if (sess->se_sess != NULL) 1037 ha->tgt.tgt_ops->free_session(sess); 1038 1039 if (logout_started) { 1040 bool traced = false; 1041 u16 cnt = 0; 1042 1043 while (!READ_ONCE(sess->logout_completed)) { 1044 if (!traced) { 1045 ql_dbg(ql_dbg_disc, vha, 0xf086, 1046 "%s: waiting for sess %p logout\n", 1047 __func__, sess); 1048 traced = true; 1049 } 1050 msleep(100); 1051 cnt++; 1052 /* 1053 * Driver timeout is set to 22 Sec, update count value to loop 1054 * long enough for log-out to complete before advancing. Otherwise, 1055 * straddling logout can interfere with re-login attempt. 1056 */ 1057 if (cnt > 230) 1058 break; 1059 } 1060 1061 ql_dbg(ql_dbg_disc, vha, 0xf087, 1062 "%s: sess %p logout completed\n", __func__, sess); 1063 } 1064 1065 if (sess->logo_ack_needed) { 1066 sess->logo_ack_needed = 0; 1067 qla24xx_async_notify_ack(vha, sess, 1068 (struct imm_ntfy_from_isp *)sess->iocb, SRB_NACK_LOGO); 1069 } 1070 1071 spin_lock_irqsave(&vha->work_lock, flags); 1072 sess->flags &= ~FCF_ASYNC_SENT; 1073 spin_unlock_irqrestore(&vha->work_lock, flags); 1074 1075 spin_lock_irqsave(&ha->tgt.sess_lock, flags); 1076 if (sess->se_sess) { 1077 sess->se_sess = NULL; 1078 if (tgt && !IS_SW_RESV_ADDR(sess->d_id)) 1079 tgt->sess_count--; 1080 } 1081 1082 qla2x00_set_fcport_disc_state(sess, DSC_DELETED); 1083 sess->fw_login_state = DSC_LS_PORT_UNAVAIL; 1084 sess->deleted = QLA_SESS_DELETED; 1085 1086 if (sess->login_succ && !IS_SW_RESV_ADDR(sess->d_id)) { 1087 vha->fcport_count--; 1088 sess->login_succ = 0; 1089 } 1090 1091 qla2x00_clear_loop_id(sess); 1092 1093 if (sess->conflict) { 1094 sess->conflict->login_pause = 0; 1095 sess->conflict = NULL; 1096 if (!test_bit(UNLOADING, &vha->dpc_flags)) 1097 set_bit(RELOGIN_NEEDED, &vha->dpc_flags); 1098 } 1099 1100 { 1101 struct qlt_plogi_ack_t *con = 1102 sess->plogi_link[QLT_PLOGI_LINK_CONFLICT]; 1103 struct imm_ntfy_from_isp *iocb; 1104 1105 own = sess->plogi_link[QLT_PLOGI_LINK_SAME_WWN]; 1106 1107 if (con) { 1108 iocb = &con->iocb; 1109 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf099, 1110 "se_sess %p / sess %p port %8phC is gone," 1111 " %s (ref=%d), releasing PLOGI for %8phC (ref=%d)\n", 1112 sess->se_sess, sess, sess->port_name, 1113 own ? "releasing own PLOGI" : "no own PLOGI pending", 1114 own ? own->ref_count : -1, 1115 iocb->u.isp24.port_name, con->ref_count); 1116 qlt_plogi_ack_unref(vha, con); 1117 sess->plogi_link[QLT_PLOGI_LINK_CONFLICT] = NULL; 1118 } else { 1119 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf09a, 1120 "se_sess %p / sess %p port %8phC is gone, %s (ref=%d)\n", 1121 sess->se_sess, sess, sess->port_name, 1122 own ? "releasing own PLOGI" : 1123 "no own PLOGI pending", 1124 own ? own->ref_count : -1); 1125 } 1126 1127 if (own) { 1128 sess->fw_login_state = DSC_LS_PLOGI_PEND; 1129 qlt_plogi_ack_unref(vha, own); 1130 sess->plogi_link[QLT_PLOGI_LINK_SAME_WWN] = NULL; 1131 } 1132 } 1133 1134 sess->explicit_logout = 0; 1135 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); 1136 sess->free_pending = 0; 1137 1138 qla2x00_dfs_remove_rport(vha, sess); 1139 1140 ql_dbg(ql_dbg_disc, vha, 0xf001, 1141 "Unregistration of sess %p %8phC finished fcp_cnt %d\n", 1142 sess, sess->port_name, vha->fcport_count); 1143 1144 if (tgt && (tgt->sess_count == 0)) 1145 wake_up_all(&tgt->waitQ); 1146 1147 if (!test_bit(PFLG_DRIVER_REMOVING, &base_vha->pci_flags) && 1148 !(vha->vp_idx && test_bit(VPORT_DELETE, &vha->dpc_flags)) && 1149 (!tgt || !tgt->tgt_stop) && !LOOP_TRANSITION(vha)) { 1150 switch (vha->host->active_mode) { 1151 case MODE_INITIATOR: 1152 case MODE_DUAL: 1153 set_bit(RELOGIN_NEEDED, &vha->dpc_flags); 1154 qla2xxx_wake_dpc(vha); 1155 break; 1156 case MODE_TARGET: 1157 default: 1158 /* no-op */ 1159 break; 1160 } 1161 } 1162 1163 if (vha->fcport_count == 0) 1164 wake_up_all(&vha->fcport_waitQ); 1165 } 1166 1167 /* ha->tgt.sess_lock supposed to be held on entry */ 1168 void qlt_unreg_sess(struct fc_port *sess) 1169 { 1170 struct scsi_qla_host *vha = sess->vha; 1171 unsigned long flags; 1172 1173 ql_dbg(ql_dbg_disc, sess->vha, 0x210a, 1174 "%s sess %p for deletion %8phC\n", 1175 __func__, sess, sess->port_name); 1176 1177 spin_lock_irqsave(&sess->vha->work_lock, flags); 1178 if (sess->free_pending) { 1179 spin_unlock_irqrestore(&sess->vha->work_lock, flags); 1180 return; 1181 } 1182 sess->free_pending = 1; 1183 /* 1184 * Use FCF_ASYNC_SENT flag to block other cmds used in sess 1185 * management from being sent. 1186 */ 1187 sess->flags |= FCF_ASYNC_SENT; 1188 spin_unlock_irqrestore(&sess->vha->work_lock, flags); 1189 1190 if (sess->se_sess) 1191 vha->hw->tgt.tgt_ops->clear_nacl_from_fcport_map(sess); 1192 1193 sess->deleted = QLA_SESS_DELETION_IN_PROGRESS; 1194 qla2x00_set_fcport_disc_state(sess, DSC_DELETE_PEND); 1195 sess->last_rscn_gen = sess->rscn_gen; 1196 sess->last_login_gen = sess->login_gen; 1197 1198 queue_work(sess->vha->hw->wq, &sess->free_work); 1199 } 1200 EXPORT_SYMBOL(qlt_unreg_sess); 1201 1202 static int qlt_reset(struct scsi_qla_host *vha, void *iocb, int mcmd) 1203 { 1204 struct qla_hw_data *ha = vha->hw; 1205 struct fc_port *sess = NULL; 1206 uint16_t loop_id; 1207 int res = 0; 1208 struct imm_ntfy_from_isp *n = (struct imm_ntfy_from_isp *)iocb; 1209 unsigned long flags; 1210 1211 loop_id = le16_to_cpu(n->u.isp24.nport_handle); 1212 if (loop_id == 0xFFFF) { 1213 /* Global event */ 1214 atomic_inc(&vha->vha_tgt.qla_tgt->tgt_global_resets_count); 1215 spin_lock_irqsave(&ha->tgt.sess_lock, flags); 1216 qlt_clear_tgt_db(vha->vha_tgt.qla_tgt); 1217 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); 1218 } else { 1219 spin_lock_irqsave(&ha->tgt.sess_lock, flags); 1220 sess = ha->tgt.tgt_ops->find_sess_by_loop_id(vha, loop_id); 1221 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); 1222 } 1223 1224 ql_dbg(ql_dbg_tgt, vha, 0xe000, 1225 "Using sess for qla_tgt_reset: %p\n", sess); 1226 if (!sess) { 1227 res = -ESRCH; 1228 return res; 1229 } 1230 1231 ql_dbg(ql_dbg_tgt, vha, 0xe047, 1232 "scsi(%ld): resetting (session %p from port %8phC mcmd %x, " 1233 "loop_id %d)\n", vha->host_no, sess, sess->port_name, 1234 mcmd, loop_id); 1235 1236 return qlt_issue_task_mgmt(sess, 0, mcmd, iocb, QLA24XX_MGMT_SEND_NACK); 1237 } 1238 1239 static void qla24xx_chk_fcp_state(struct fc_port *sess) 1240 { 1241 if (sess->chip_reset != sess->vha->hw->base_qpair->chip_reset) { 1242 sess->logout_on_delete = 0; 1243 sess->logo_ack_needed = 0; 1244 sess->fw_login_state = DSC_LS_PORT_UNAVAIL; 1245 } 1246 } 1247 1248 void qlt_schedule_sess_for_deletion(struct fc_port *sess) 1249 { 1250 struct qla_tgt *tgt = sess->tgt; 1251 unsigned long flags; 1252 u16 sec; 1253 1254 switch (sess->disc_state) { 1255 case DSC_DELETE_PEND: 1256 return; 1257 case DSC_DELETED: 1258 if (!sess->plogi_link[QLT_PLOGI_LINK_SAME_WWN] && 1259 !sess->plogi_link[QLT_PLOGI_LINK_CONFLICT]) { 1260 if (tgt && tgt->tgt_stop && tgt->sess_count == 0) 1261 wake_up_all(&tgt->waitQ); 1262 1263 if (sess->vha->fcport_count == 0) 1264 wake_up_all(&sess->vha->fcport_waitQ); 1265 return; 1266 } 1267 break; 1268 case DSC_UPD_FCPORT: 1269 /* 1270 * This port is not done reporting to upper layer. 1271 * let it finish 1272 */ 1273 sess->next_disc_state = DSC_DELETE_PEND; 1274 sec = jiffies_to_msecs(jiffies - 1275 sess->jiffies_at_registration)/1000; 1276 if (sess->sec_since_registration < sec && sec && !(sec % 5)) { 1277 sess->sec_since_registration = sec; 1278 ql_dbg(ql_dbg_disc, sess->vha, 0xffff, 1279 "%s %8phC : Slow Rport registration(%d Sec)\n", 1280 __func__, sess->port_name, sec); 1281 } 1282 return; 1283 default: 1284 break; 1285 } 1286 1287 spin_lock_irqsave(&sess->vha->work_lock, flags); 1288 if (sess->deleted == QLA_SESS_DELETION_IN_PROGRESS) { 1289 spin_unlock_irqrestore(&sess->vha->work_lock, flags); 1290 return; 1291 } 1292 sess->deleted = QLA_SESS_DELETION_IN_PROGRESS; 1293 spin_unlock_irqrestore(&sess->vha->work_lock, flags); 1294 1295 sess->prli_pend_timer = 0; 1296 qla2x00_set_fcport_disc_state(sess, DSC_DELETE_PEND); 1297 1298 qla24xx_chk_fcp_state(sess); 1299 1300 ql_dbg(ql_log_warn, sess->vha, 0xe001, 1301 "Scheduling sess %p for deletion %8phC fc4_type %x\n", 1302 sess, sess->port_name, sess->fc4_type); 1303 1304 WARN_ON(!queue_work(sess->vha->hw->wq, &sess->del_work)); 1305 } 1306 1307 static void qlt_clear_tgt_db(struct qla_tgt *tgt) 1308 { 1309 struct fc_port *sess; 1310 scsi_qla_host_t *vha = tgt->vha; 1311 1312 list_for_each_entry(sess, &vha->vp_fcports, list) { 1313 if (sess->se_sess) 1314 qlt_schedule_sess_for_deletion(sess); 1315 } 1316 1317 /* At this point tgt could be already dead */ 1318 } 1319 1320 static int qla24xx_get_loop_id(struct scsi_qla_host *vha, be_id_t s_id, 1321 uint16_t *loop_id) 1322 { 1323 struct qla_hw_data *ha = vha->hw; 1324 dma_addr_t gid_list_dma; 1325 struct gid_list_info *gid_list, *gid; 1326 int res, rc, i; 1327 uint16_t entries; 1328 1329 gid_list = dma_alloc_coherent(&ha->pdev->dev, qla2x00_gid_list_size(ha), 1330 &gid_list_dma, GFP_KERNEL); 1331 if (!gid_list) { 1332 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf044, 1333 "qla_target(%d): DMA Alloc failed of %u\n", 1334 vha->vp_idx, qla2x00_gid_list_size(ha)); 1335 return -ENOMEM; 1336 } 1337 1338 /* Get list of logged in devices */ 1339 rc = qla24xx_gidlist_wait(vha, gid_list, gid_list_dma, &entries); 1340 if (rc != QLA_SUCCESS) { 1341 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf045, 1342 "qla_target(%d): get_id_list() failed: %x\n", 1343 vha->vp_idx, rc); 1344 res = -EBUSY; 1345 goto out_free_id_list; 1346 } 1347 1348 gid = gid_list; 1349 res = -ENOENT; 1350 for (i = 0; i < entries; i++) { 1351 if (gid->al_pa == s_id.al_pa && 1352 gid->area == s_id.area && 1353 gid->domain == s_id.domain) { 1354 *loop_id = le16_to_cpu(gid->loop_id); 1355 res = 0; 1356 break; 1357 } 1358 gid = (void *)gid + ha->gid_list_info_size; 1359 } 1360 1361 out_free_id_list: 1362 dma_free_coherent(&ha->pdev->dev, qla2x00_gid_list_size(ha), 1363 gid_list, gid_list_dma); 1364 return res; 1365 } 1366 1367 /* 1368 * Adds an extra ref to allow to drop hw lock after adding sess to the list. 1369 * Caller must put it. 1370 */ 1371 static struct fc_port *qlt_create_sess( 1372 struct scsi_qla_host *vha, 1373 fc_port_t *fcport, 1374 bool local) 1375 { 1376 struct qla_hw_data *ha = vha->hw; 1377 struct fc_port *sess = fcport; 1378 unsigned long flags; 1379 1380 if (vha->vha_tgt.qla_tgt->tgt_stop) 1381 return NULL; 1382 1383 if (fcport->se_sess) { 1384 if (!kref_get_unless_zero(&sess->sess_kref)) { 1385 ql_dbg(ql_dbg_disc, vha, 0x20f6, 1386 "%s: kref_get_unless_zero failed for %8phC\n", 1387 __func__, sess->port_name); 1388 return NULL; 1389 } 1390 return fcport; 1391 } 1392 sess->tgt = vha->vha_tgt.qla_tgt; 1393 sess->local = local; 1394 1395 /* 1396 * Under normal circumstances we want to logout from firmware when 1397 * session eventually ends and release corresponding nport handle. 1398 * In the exception cases (e.g. when new PLOGI is waiting) corresponding 1399 * code will adjust these flags as necessary. 1400 */ 1401 sess->logout_on_delete = 1; 1402 sess->keep_nport_handle = 0; 1403 sess->logout_completed = 0; 1404 1405 if (ha->tgt.tgt_ops->check_initiator_node_acl(vha, 1406 &fcport->port_name[0], sess) < 0) { 1407 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf015, 1408 "(%d) %8phC check_initiator_node_acl failed\n", 1409 vha->vp_idx, fcport->port_name); 1410 return NULL; 1411 } else { 1412 kref_init(&fcport->sess_kref); 1413 /* 1414 * Take an extra reference to ->sess_kref here to handle 1415 * fc_port access across ->tgt.sess_lock reaquire. 1416 */ 1417 if (!kref_get_unless_zero(&sess->sess_kref)) { 1418 ql_dbg(ql_dbg_disc, vha, 0x20f7, 1419 "%s: kref_get_unless_zero failed for %8phC\n", 1420 __func__, sess->port_name); 1421 return NULL; 1422 } 1423 1424 spin_lock_irqsave(&ha->tgt.sess_lock, flags); 1425 if (!IS_SW_RESV_ADDR(sess->d_id)) 1426 vha->vha_tgt.qla_tgt->sess_count++; 1427 1428 qlt_do_generation_tick(vha, &sess->generation); 1429 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); 1430 } 1431 1432 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf006, 1433 "Adding sess %p se_sess %p to tgt %p sess_count %d\n", 1434 sess, sess->se_sess, vha->vha_tgt.qla_tgt, 1435 vha->vha_tgt.qla_tgt->sess_count); 1436 1437 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04b, 1438 "qla_target(%d): %ssession for wwn %8phC (loop_id %d, " 1439 "s_id %x:%x:%x, confirmed completion %ssupported) added\n", 1440 vha->vp_idx, local ? "local " : "", fcport->port_name, 1441 fcport->loop_id, sess->d_id.b.domain, sess->d_id.b.area, 1442 sess->d_id.b.al_pa, sess->conf_compl_supported ? "" : "not "); 1443 1444 return sess; 1445 } 1446 1447 /* 1448 * max_gen - specifies maximum session generation 1449 * at which this deletion requestion is still valid 1450 */ 1451 void 1452 qlt_fc_port_deleted(struct scsi_qla_host *vha, fc_port_t *fcport, int max_gen) 1453 { 1454 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; 1455 struct fc_port *sess = fcport; 1456 unsigned long flags; 1457 1458 if (!vha->hw->tgt.tgt_ops) 1459 return; 1460 1461 if (!tgt) 1462 return; 1463 1464 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); 1465 if (tgt->tgt_stop) { 1466 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); 1467 return; 1468 } 1469 if (!sess->se_sess) { 1470 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); 1471 return; 1472 } 1473 1474 if (max_gen - sess->generation < 0) { 1475 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); 1476 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf092, 1477 "Ignoring stale deletion request for se_sess %p / sess %p" 1478 " for port %8phC, req_gen %d, sess_gen %d\n", 1479 sess->se_sess, sess, sess->port_name, max_gen, 1480 sess->generation); 1481 return; 1482 } 1483 1484 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf008, "qla_tgt_fc_port_deleted %p", sess); 1485 1486 sess->local = 1; 1487 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); 1488 qlt_schedule_sess_for_deletion(sess); 1489 } 1490 1491 static inline int test_tgt_sess_count(struct qla_tgt *tgt) 1492 { 1493 struct qla_hw_data *ha = tgt->ha; 1494 unsigned long flags; 1495 int res; 1496 /* 1497 * We need to protect against race, when tgt is freed before or 1498 * inside wake_up() 1499 */ 1500 spin_lock_irqsave(&ha->tgt.sess_lock, flags); 1501 ql_dbg(ql_dbg_tgt, tgt->vha, 0xe002, 1502 "tgt %p, sess_count=%d\n", 1503 tgt, tgt->sess_count); 1504 res = (tgt->sess_count == 0); 1505 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); 1506 1507 return res; 1508 } 1509 1510 /* Called by tcm_qla2xxx configfs code */ 1511 int qlt_stop_phase1(struct qla_tgt *tgt) 1512 { 1513 struct scsi_qla_host *vha = tgt->vha; 1514 struct qla_hw_data *ha = tgt->ha; 1515 unsigned long flags; 1516 1517 mutex_lock(&ha->optrom_mutex); 1518 mutex_lock(&qla_tgt_mutex); 1519 1520 if (tgt->tgt_stop || tgt->tgt_stopped) { 1521 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04e, 1522 "Already in tgt->tgt_stop or tgt_stopped state\n"); 1523 mutex_unlock(&qla_tgt_mutex); 1524 mutex_unlock(&ha->optrom_mutex); 1525 return -EPERM; 1526 } 1527 1528 ql_dbg(ql_dbg_tgt_mgt, vha, 0xe003, "Stopping target for host %ld(%p)\n", 1529 vha->host_no, vha); 1530 /* 1531 * Mutex needed to sync with qla_tgt_fc_port_[added,deleted]. 1532 * Lock is needed, because we still can get an incoming packet. 1533 */ 1534 mutex_lock(&vha->vha_tgt.tgt_mutex); 1535 tgt->tgt_stop = 1; 1536 qlt_clear_tgt_db(tgt); 1537 mutex_unlock(&vha->vha_tgt.tgt_mutex); 1538 mutex_unlock(&qla_tgt_mutex); 1539 1540 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf009, 1541 "Waiting for sess works (tgt %p)", tgt); 1542 spin_lock_irqsave(&tgt->sess_work_lock, flags); 1543 do { 1544 spin_unlock_irqrestore(&tgt->sess_work_lock, flags); 1545 flush_work(&tgt->sess_work); 1546 spin_lock_irqsave(&tgt->sess_work_lock, flags); 1547 } while (!list_empty(&tgt->sess_works_list)); 1548 spin_unlock_irqrestore(&tgt->sess_work_lock, flags); 1549 1550 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00a, 1551 "Waiting for tgt %p: sess_count=%d\n", tgt, tgt->sess_count); 1552 1553 wait_event_timeout(tgt->waitQ, test_tgt_sess_count(tgt), 10*HZ); 1554 1555 /* Big hammer */ 1556 if (!ha->flags.host_shutting_down && 1557 (qla_tgt_mode_enabled(vha) || qla_dual_mode_enabled(vha))) 1558 qlt_disable_vha(vha); 1559 1560 /* Wait for sessions to clear out (just in case) */ 1561 wait_event_timeout(tgt->waitQ, test_tgt_sess_count(tgt), 10*HZ); 1562 mutex_unlock(&ha->optrom_mutex); 1563 1564 return 0; 1565 } 1566 EXPORT_SYMBOL(qlt_stop_phase1); 1567 1568 /* Called by tcm_qla2xxx configfs code */ 1569 void qlt_stop_phase2(struct qla_tgt *tgt) 1570 { 1571 scsi_qla_host_t *vha = tgt->vha; 1572 1573 if (tgt->tgt_stopped) { 1574 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04f, 1575 "Already in tgt->tgt_stopped state\n"); 1576 dump_stack(); 1577 return; 1578 } 1579 if (!tgt->tgt_stop) { 1580 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00b, 1581 "%s: phase1 stop is not completed\n", __func__); 1582 dump_stack(); 1583 return; 1584 } 1585 1586 mutex_lock(&tgt->ha->optrom_mutex); 1587 mutex_lock(&vha->vha_tgt.tgt_mutex); 1588 tgt->tgt_stop = 0; 1589 tgt->tgt_stopped = 1; 1590 mutex_unlock(&vha->vha_tgt.tgt_mutex); 1591 mutex_unlock(&tgt->ha->optrom_mutex); 1592 1593 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00c, "Stop of tgt %p finished\n", 1594 tgt); 1595 1596 switch (vha->qlini_mode) { 1597 case QLA2XXX_INI_MODE_EXCLUSIVE: 1598 vha->flags.online = 1; 1599 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 1600 break; 1601 default: 1602 break; 1603 } 1604 } 1605 EXPORT_SYMBOL(qlt_stop_phase2); 1606 1607 /* Called from qlt_remove_target() -> qla2x00_remove_one() */ 1608 static void qlt_release(struct qla_tgt *tgt) 1609 { 1610 scsi_qla_host_t *vha = tgt->vha; 1611 void *node; 1612 u64 key = 0; 1613 u16 i; 1614 struct qla_qpair_hint *h; 1615 struct qla_hw_data *ha = vha->hw; 1616 1617 if (!tgt->tgt_stop && !tgt->tgt_stopped) 1618 qlt_stop_phase1(tgt); 1619 1620 if (!tgt->tgt_stopped) 1621 qlt_stop_phase2(tgt); 1622 1623 for (i = 0; i < vha->hw->max_qpairs + 1; i++) { 1624 unsigned long flags; 1625 1626 h = &tgt->qphints[i]; 1627 if (h->qpair) { 1628 spin_lock_irqsave(h->qpair->qp_lock_ptr, flags); 1629 list_del(&h->hint_elem); 1630 spin_unlock_irqrestore(h->qpair->qp_lock_ptr, flags); 1631 h->qpair = NULL; 1632 } 1633 } 1634 kfree(tgt->qphints); 1635 mutex_lock(&qla_tgt_mutex); 1636 list_del(&vha->vha_tgt.qla_tgt->tgt_list_entry); 1637 mutex_unlock(&qla_tgt_mutex); 1638 1639 btree_for_each_safe64(&tgt->lun_qpair_map, key, node) 1640 btree_remove64(&tgt->lun_qpair_map, key); 1641 1642 btree_destroy64(&tgt->lun_qpair_map); 1643 1644 if (vha->vp_idx) 1645 if (ha->tgt.tgt_ops && 1646 ha->tgt.tgt_ops->remove_target && 1647 vha->vha_tgt.target_lport_ptr) 1648 ha->tgt.tgt_ops->remove_target(vha); 1649 1650 vha->vha_tgt.qla_tgt = NULL; 1651 1652 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00d, 1653 "Release of tgt %p finished\n", tgt); 1654 1655 kfree(tgt); 1656 } 1657 1658 /* ha->hardware_lock supposed to be held on entry */ 1659 static int qlt_sched_sess_work(struct qla_tgt *tgt, int type, 1660 const void *param, unsigned int param_size) 1661 { 1662 struct qla_tgt_sess_work_param *prm; 1663 unsigned long flags; 1664 1665 prm = kzalloc(sizeof(*prm), GFP_ATOMIC); 1666 if (!prm) { 1667 ql_dbg(ql_dbg_tgt_mgt, tgt->vha, 0xf050, 1668 "qla_target(%d): Unable to create session " 1669 "work, command will be refused", 0); 1670 return -ENOMEM; 1671 } 1672 1673 ql_dbg(ql_dbg_tgt_mgt, tgt->vha, 0xf00e, 1674 "Scheduling work (type %d, prm %p)" 1675 " to find session for param %p (size %d, tgt %p)\n", 1676 type, prm, param, param_size, tgt); 1677 1678 prm->type = type; 1679 memcpy(&prm->tm_iocb, param, param_size); 1680 1681 spin_lock_irqsave(&tgt->sess_work_lock, flags); 1682 list_add_tail(&prm->sess_works_list_entry, &tgt->sess_works_list); 1683 spin_unlock_irqrestore(&tgt->sess_work_lock, flags); 1684 1685 schedule_work(&tgt->sess_work); 1686 1687 return 0; 1688 } 1689 1690 /* 1691 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire 1692 */ 1693 static void qlt_send_notify_ack(struct qla_qpair *qpair, 1694 struct imm_ntfy_from_isp *ntfy, 1695 uint32_t add_flags, uint16_t resp_code, int resp_code_valid, 1696 uint16_t srr_flags, uint16_t srr_reject_code, uint8_t srr_explan) 1697 { 1698 struct scsi_qla_host *vha = qpair->vha; 1699 struct qla_hw_data *ha = vha->hw; 1700 request_t *pkt; 1701 struct nack_to_isp *nack; 1702 1703 if (!ha->flags.fw_started) 1704 return; 1705 1706 ql_dbg(ql_dbg_tgt, vha, 0xe004, "Sending NOTIFY_ACK (ha=%p)\n", ha); 1707 1708 pkt = (request_t *)__qla2x00_alloc_iocbs(qpair, NULL); 1709 if (!pkt) { 1710 ql_dbg(ql_dbg_tgt, vha, 0xe049, 1711 "qla_target(%d): %s failed: unable to allocate " 1712 "request packet\n", vha->vp_idx, __func__); 1713 return; 1714 } 1715 1716 if (vha->vha_tgt.qla_tgt != NULL) 1717 vha->vha_tgt.qla_tgt->notify_ack_expected++; 1718 1719 pkt->entry_type = NOTIFY_ACK_TYPE; 1720 pkt->entry_count = 1; 1721 1722 nack = (struct nack_to_isp *)pkt; 1723 nack->ox_id = ntfy->ox_id; 1724 1725 nack->u.isp24.handle = QLA_TGT_SKIP_HANDLE; 1726 nack->u.isp24.nport_handle = ntfy->u.isp24.nport_handle; 1727 if (le16_to_cpu(ntfy->u.isp24.status) == IMM_NTFY_ELS) { 1728 nack->u.isp24.flags = ntfy->u.isp24.flags & 1729 cpu_to_le16(NOTIFY24XX_FLAGS_PUREX_IOCB); 1730 } 1731 nack->u.isp24.srr_rx_id = ntfy->u.isp24.srr_rx_id; 1732 nack->u.isp24.status = ntfy->u.isp24.status; 1733 nack->u.isp24.status_subcode = ntfy->u.isp24.status_subcode; 1734 nack->u.isp24.fw_handle = ntfy->u.isp24.fw_handle; 1735 nack->u.isp24.exchange_address = ntfy->u.isp24.exchange_address; 1736 nack->u.isp24.srr_rel_offs = ntfy->u.isp24.srr_rel_offs; 1737 nack->u.isp24.srr_ui = ntfy->u.isp24.srr_ui; 1738 nack->u.isp24.srr_flags = cpu_to_le16(srr_flags); 1739 nack->u.isp24.srr_reject_code = srr_reject_code; 1740 nack->u.isp24.srr_reject_code_expl = srr_explan; 1741 nack->u.isp24.vp_index = ntfy->u.isp24.vp_index; 1742 1743 /* TODO qualify this with EDIF enable */ 1744 if (ntfy->u.isp24.status_subcode == ELS_PLOGI && 1745 (le16_to_cpu(ntfy->u.isp24.flags) & NOTIFY24XX_FLAGS_FCSP)) { 1746 nack->u.isp24.flags |= cpu_to_le16(NOTIFY_ACK_FLAGS_FCSP); 1747 } 1748 1749 ql_dbg(ql_dbg_tgt, vha, 0xe005, 1750 "qla_target(%d): Sending 24xx Notify Ack %d\n", 1751 vha->vp_idx, nack->u.isp24.status); 1752 1753 /* Memory Barrier */ 1754 wmb(); 1755 qla2x00_start_iocbs(vha, qpair->req); 1756 } 1757 1758 static int qlt_build_abts_resp_iocb(struct qla_tgt_mgmt_cmd *mcmd) 1759 { 1760 struct scsi_qla_host *vha = mcmd->vha; 1761 struct qla_hw_data *ha = vha->hw; 1762 struct abts_resp_to_24xx *resp; 1763 __le32 f_ctl; 1764 uint32_t h; 1765 uint8_t *p; 1766 int rc; 1767 struct abts_recv_from_24xx *abts = &mcmd->orig_iocb.abts; 1768 struct qla_qpair *qpair = mcmd->qpair; 1769 1770 ql_dbg(ql_dbg_tgt, vha, 0xe006, 1771 "Sending task mgmt ABTS response (ha=%p, status=%x)\n", 1772 ha, mcmd->fc_tm_rsp); 1773 1774 rc = qlt_check_reserve_free_req(qpair, 1); 1775 if (rc) { 1776 ql_dbg(ql_dbg_tgt, vha, 0xe04a, 1777 "qla_target(%d): %s failed: unable to allocate request packet\n", 1778 vha->vp_idx, __func__); 1779 return -EAGAIN; 1780 } 1781 1782 resp = (struct abts_resp_to_24xx *)qpair->req->ring_ptr; 1783 memset(resp, 0, sizeof(*resp)); 1784 1785 h = qlt_make_handle(qpair); 1786 if (unlikely(h == QLA_TGT_NULL_HANDLE)) { 1787 /* 1788 * CTIO type 7 from the firmware doesn't provide a way to 1789 * know the initiator's LOOP ID, hence we can't find 1790 * the session and, so, the command. 1791 */ 1792 return -EAGAIN; 1793 } else { 1794 qpair->req->outstanding_cmds[h] = (srb_t *)mcmd; 1795 } 1796 1797 resp->handle = make_handle(qpair->req->id, h); 1798 resp->entry_type = ABTS_RESP_24XX; 1799 resp->entry_count = 1; 1800 resp->nport_handle = abts->nport_handle; 1801 resp->vp_index = vha->vp_idx; 1802 resp->sof_type = abts->sof_type; 1803 resp->exchange_address = abts->exchange_address; 1804 resp->fcp_hdr_le = abts->fcp_hdr_le; 1805 f_ctl = cpu_to_le32(F_CTL_EXCH_CONTEXT_RESP | 1806 F_CTL_LAST_SEQ | F_CTL_END_SEQ | 1807 F_CTL_SEQ_INITIATIVE); 1808 p = (uint8_t *)&f_ctl; 1809 resp->fcp_hdr_le.f_ctl[0] = *p++; 1810 resp->fcp_hdr_le.f_ctl[1] = *p++; 1811 resp->fcp_hdr_le.f_ctl[2] = *p; 1812 1813 resp->fcp_hdr_le.d_id = abts->fcp_hdr_le.s_id; 1814 resp->fcp_hdr_le.s_id = abts->fcp_hdr_le.d_id; 1815 1816 resp->exchange_addr_to_abort = abts->exchange_addr_to_abort; 1817 if (mcmd->fc_tm_rsp == FCP_TMF_CMPL) { 1818 resp->fcp_hdr_le.r_ctl = R_CTL_BASIC_LINK_SERV | R_CTL_B_ACC; 1819 resp->payload.ba_acct.seq_id_valid = SEQ_ID_INVALID; 1820 resp->payload.ba_acct.low_seq_cnt = 0x0000; 1821 resp->payload.ba_acct.high_seq_cnt = cpu_to_le16(0xFFFF); 1822 resp->payload.ba_acct.ox_id = abts->fcp_hdr_le.ox_id; 1823 resp->payload.ba_acct.rx_id = abts->fcp_hdr_le.rx_id; 1824 } else { 1825 resp->fcp_hdr_le.r_ctl = R_CTL_BASIC_LINK_SERV | R_CTL_B_RJT; 1826 resp->payload.ba_rjt.reason_code = 1827 BA_RJT_REASON_CODE_UNABLE_TO_PERFORM; 1828 /* Other bytes are zero */ 1829 } 1830 1831 vha->vha_tgt.qla_tgt->abts_resp_expected++; 1832 1833 /* Memory Barrier */ 1834 wmb(); 1835 if (qpair->reqq_start_iocbs) 1836 qpair->reqq_start_iocbs(qpair); 1837 else 1838 qla2x00_start_iocbs(vha, qpair->req); 1839 1840 return rc; 1841 } 1842 1843 /* 1844 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire 1845 */ 1846 static void qlt_24xx_send_abts_resp(struct qla_qpair *qpair, 1847 struct abts_recv_from_24xx *abts, uint32_t status, 1848 bool ids_reversed) 1849 { 1850 struct scsi_qla_host *vha = qpair->vha; 1851 struct qla_hw_data *ha = vha->hw; 1852 struct abts_resp_to_24xx *resp; 1853 __le32 f_ctl; 1854 uint8_t *p; 1855 1856 ql_dbg(ql_dbg_tgt, vha, 0xe006, 1857 "Sending task mgmt ABTS response (ha=%p, atio=%p, status=%x\n", 1858 ha, abts, status); 1859 1860 resp = (struct abts_resp_to_24xx *)qla2x00_alloc_iocbs_ready(qpair, 1861 NULL); 1862 if (!resp) { 1863 ql_dbg(ql_dbg_tgt, vha, 0xe04a, 1864 "qla_target(%d): %s failed: unable to allocate " 1865 "request packet", vha->vp_idx, __func__); 1866 return; 1867 } 1868 1869 resp->entry_type = ABTS_RESP_24XX; 1870 resp->handle = QLA_TGT_SKIP_HANDLE; 1871 resp->entry_count = 1; 1872 resp->nport_handle = abts->nport_handle; 1873 resp->vp_index = vha->vp_idx; 1874 resp->sof_type = abts->sof_type; 1875 resp->exchange_address = abts->exchange_address; 1876 resp->fcp_hdr_le = abts->fcp_hdr_le; 1877 f_ctl = cpu_to_le32(F_CTL_EXCH_CONTEXT_RESP | 1878 F_CTL_LAST_SEQ | F_CTL_END_SEQ | 1879 F_CTL_SEQ_INITIATIVE); 1880 p = (uint8_t *)&f_ctl; 1881 resp->fcp_hdr_le.f_ctl[0] = *p++; 1882 resp->fcp_hdr_le.f_ctl[1] = *p++; 1883 resp->fcp_hdr_le.f_ctl[2] = *p; 1884 if (ids_reversed) { 1885 resp->fcp_hdr_le.d_id = abts->fcp_hdr_le.d_id; 1886 resp->fcp_hdr_le.s_id = abts->fcp_hdr_le.s_id; 1887 } else { 1888 resp->fcp_hdr_le.d_id = abts->fcp_hdr_le.s_id; 1889 resp->fcp_hdr_le.s_id = abts->fcp_hdr_le.d_id; 1890 } 1891 resp->exchange_addr_to_abort = abts->exchange_addr_to_abort; 1892 if (status == FCP_TMF_CMPL) { 1893 resp->fcp_hdr_le.r_ctl = R_CTL_BASIC_LINK_SERV | R_CTL_B_ACC; 1894 resp->payload.ba_acct.seq_id_valid = SEQ_ID_INVALID; 1895 resp->payload.ba_acct.low_seq_cnt = 0x0000; 1896 resp->payload.ba_acct.high_seq_cnt = cpu_to_le16(0xFFFF); 1897 resp->payload.ba_acct.ox_id = abts->fcp_hdr_le.ox_id; 1898 resp->payload.ba_acct.rx_id = abts->fcp_hdr_le.rx_id; 1899 } else { 1900 resp->fcp_hdr_le.r_ctl = R_CTL_BASIC_LINK_SERV | R_CTL_B_RJT; 1901 resp->payload.ba_rjt.reason_code = 1902 BA_RJT_REASON_CODE_UNABLE_TO_PERFORM; 1903 /* Other bytes are zero */ 1904 } 1905 1906 vha->vha_tgt.qla_tgt->abts_resp_expected++; 1907 1908 /* Memory Barrier */ 1909 wmb(); 1910 if (qpair->reqq_start_iocbs) 1911 qpair->reqq_start_iocbs(qpair); 1912 else 1913 qla2x00_start_iocbs(vha, qpair->req); 1914 } 1915 1916 /* 1917 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire 1918 */ 1919 static void qlt_24xx_retry_term_exchange(struct scsi_qla_host *vha, 1920 struct qla_qpair *qpair, response_t *pkt, struct qla_tgt_mgmt_cmd *mcmd) 1921 { 1922 struct ctio7_to_24xx *ctio; 1923 u16 tmp; 1924 struct abts_recv_from_24xx *entry; 1925 1926 ctio = (struct ctio7_to_24xx *)qla2x00_alloc_iocbs_ready(qpair, NULL); 1927 if (ctio == NULL) { 1928 ql_dbg(ql_dbg_tgt, vha, 0xe04b, 1929 "qla_target(%d): %s failed: unable to allocate " 1930 "request packet\n", vha->vp_idx, __func__); 1931 return; 1932 } 1933 1934 if (mcmd) 1935 /* abts from remote port */ 1936 entry = &mcmd->orig_iocb.abts; 1937 else 1938 /* abts from this driver. */ 1939 entry = (struct abts_recv_from_24xx *)pkt; 1940 1941 /* 1942 * We've got on entrance firmware's response on by us generated 1943 * ABTS response. So, in it ID fields are reversed. 1944 */ 1945 1946 ctio->entry_type = CTIO_TYPE7; 1947 ctio->entry_count = 1; 1948 ctio->nport_handle = entry->nport_handle; 1949 ctio->handle = QLA_TGT_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK; 1950 ctio->timeout = cpu_to_le16(QLA_TGT_TIMEOUT); 1951 ctio->vp_index = vha->vp_idx; 1952 ctio->exchange_addr = entry->exchange_addr_to_abort; 1953 tmp = (CTIO7_FLAGS_STATUS_MODE_1 | CTIO7_FLAGS_TERMINATE); 1954 1955 if (mcmd) { 1956 ctio->initiator_id = entry->fcp_hdr_le.s_id; 1957 1958 if (mcmd->flags & QLA24XX_MGMT_ABORT_IO_ATTR_VALID) 1959 tmp |= (mcmd->abort_io_attr << 9); 1960 else if (qpair->retry_term_cnt & 1) 1961 tmp |= (0x4 << 9); 1962 } else { 1963 ctio->initiator_id = entry->fcp_hdr_le.d_id; 1964 1965 if (qpair->retry_term_cnt & 1) 1966 tmp |= (0x4 << 9); 1967 } 1968 ctio->u.status1.flags = cpu_to_le16(tmp); 1969 ctio->u.status1.ox_id = entry->fcp_hdr_le.ox_id; 1970 1971 ql_dbg(ql_dbg_tgt, vha, 0xe007, 1972 "Sending retry TERM EXCH CTIO7 flags %04xh oxid %04xh attr valid %x\n", 1973 le16_to_cpu(ctio->u.status1.flags), 1974 le16_to_cpu(ctio->u.status1.ox_id), 1975 (mcmd && mcmd->flags & QLA24XX_MGMT_ABORT_IO_ATTR_VALID) ? 1 : 0); 1976 1977 /* Memory Barrier */ 1978 wmb(); 1979 if (qpair->reqq_start_iocbs) 1980 qpair->reqq_start_iocbs(qpair); 1981 else 1982 qla2x00_start_iocbs(vha, qpair->req); 1983 1984 if (mcmd) 1985 qlt_build_abts_resp_iocb(mcmd); 1986 else 1987 qlt_24xx_send_abts_resp(qpair, 1988 (struct abts_recv_from_24xx *)entry, FCP_TMF_CMPL, true); 1989 1990 } 1991 1992 /* drop cmds for the given lun 1993 * XXX only looks for cmds on the port through which lun reset was recieved 1994 * XXX does not go through the list of other port (which may have cmds 1995 * for the same lun) 1996 */ 1997 static void abort_cmds_for_lun(struct scsi_qla_host *vha, u64 lun, be_id_t s_id) 1998 { 1999 struct qla_tgt_sess_op *op; 2000 struct qla_tgt_cmd *cmd; 2001 uint32_t key; 2002 unsigned long flags; 2003 2004 key = sid_to_key(s_id); 2005 spin_lock_irqsave(&vha->cmd_list_lock, flags); 2006 list_for_each_entry(op, &vha->unknown_atio_list, cmd_list) { 2007 uint32_t op_key; 2008 u64 op_lun; 2009 2010 op_key = sid_to_key(op->atio.u.isp24.fcp_hdr.s_id); 2011 op_lun = scsilun_to_int( 2012 (struct scsi_lun *)&op->atio.u.isp24.fcp_cmnd.lun); 2013 if (op_key == key && op_lun == lun) 2014 op->aborted = true; 2015 } 2016 2017 list_for_each_entry(cmd, &vha->qla_cmd_list, cmd_list) { 2018 uint32_t cmd_key; 2019 u64 cmd_lun; 2020 2021 cmd_key = sid_to_key(cmd->atio.u.isp24.fcp_hdr.s_id); 2022 cmd_lun = scsilun_to_int( 2023 (struct scsi_lun *)&cmd->atio.u.isp24.fcp_cmnd.lun); 2024 if (cmd_key == key && cmd_lun == lun) 2025 cmd->aborted = 1; 2026 } 2027 spin_unlock_irqrestore(&vha->cmd_list_lock, flags); 2028 } 2029 2030 static struct qla_qpair_hint *qlt_find_qphint(struct scsi_qla_host *vha, 2031 uint64_t unpacked_lun) 2032 { 2033 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; 2034 struct qla_qpair_hint *h = NULL; 2035 2036 if (vha->flags.qpairs_available) { 2037 h = btree_lookup64(&tgt->lun_qpair_map, unpacked_lun); 2038 if (!h) 2039 h = &tgt->qphints[0]; 2040 } else { 2041 h = &tgt->qphints[0]; 2042 } 2043 2044 return h; 2045 } 2046 2047 static void qlt_do_tmr_work(struct work_struct *work) 2048 { 2049 struct qla_tgt_mgmt_cmd *mcmd = 2050 container_of(work, struct qla_tgt_mgmt_cmd, work); 2051 struct qla_hw_data *ha = mcmd->vha->hw; 2052 int rc; 2053 uint32_t tag; 2054 unsigned long flags; 2055 2056 switch (mcmd->tmr_func) { 2057 case QLA_TGT_ABTS: 2058 tag = le32_to_cpu(mcmd->orig_iocb.abts.exchange_addr_to_abort); 2059 break; 2060 default: 2061 tag = 0; 2062 break; 2063 } 2064 2065 rc = ha->tgt.tgt_ops->handle_tmr(mcmd, mcmd->unpacked_lun, 2066 mcmd->tmr_func, tag); 2067 2068 if (rc != 0) { 2069 spin_lock_irqsave(mcmd->qpair->qp_lock_ptr, flags); 2070 switch (mcmd->tmr_func) { 2071 case QLA_TGT_ABTS: 2072 mcmd->fc_tm_rsp = FCP_TMF_REJECTED; 2073 qlt_build_abts_resp_iocb(mcmd); 2074 break; 2075 case QLA_TGT_LUN_RESET: 2076 case QLA_TGT_CLEAR_TS: 2077 case QLA_TGT_ABORT_TS: 2078 case QLA_TGT_CLEAR_ACA: 2079 case QLA_TGT_TARGET_RESET: 2080 qlt_send_busy(mcmd->qpair, &mcmd->orig_iocb.atio, 2081 qla_sam_status); 2082 break; 2083 2084 case QLA_TGT_ABORT_ALL: 2085 case QLA_TGT_NEXUS_LOSS_SESS: 2086 case QLA_TGT_NEXUS_LOSS: 2087 qlt_send_notify_ack(mcmd->qpair, 2088 &mcmd->orig_iocb.imm_ntfy, 0, 0, 0, 0, 0, 0); 2089 break; 2090 } 2091 spin_unlock_irqrestore(mcmd->qpair->qp_lock_ptr, flags); 2092 2093 ql_dbg(ql_dbg_tgt_mgt, mcmd->vha, 0xf052, 2094 "qla_target(%d): tgt_ops->handle_tmr() failed: %d\n", 2095 mcmd->vha->vp_idx, rc); 2096 mempool_free(mcmd, qla_tgt_mgmt_cmd_mempool); 2097 } 2098 } 2099 2100 /* ha->hardware_lock supposed to be held on entry */ 2101 static int __qlt_24xx_handle_abts(struct scsi_qla_host *vha, 2102 struct abts_recv_from_24xx *abts, struct fc_port *sess) 2103 { 2104 struct qla_hw_data *ha = vha->hw; 2105 struct qla_tgt_mgmt_cmd *mcmd; 2106 struct qla_qpair_hint *h = &vha->vha_tgt.qla_tgt->qphints[0]; 2107 struct qla_tgt_cmd *abort_cmd; 2108 2109 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00f, 2110 "qla_target(%d): task abort (tag=%d)\n", 2111 vha->vp_idx, abts->exchange_addr_to_abort); 2112 2113 mcmd = mempool_alloc(qla_tgt_mgmt_cmd_mempool, GFP_ATOMIC); 2114 if (mcmd == NULL) { 2115 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf051, 2116 "qla_target(%d): %s: Allocation of ABORT cmd failed", 2117 vha->vp_idx, __func__); 2118 return -ENOMEM; 2119 } 2120 memset(mcmd, 0, sizeof(*mcmd)); 2121 mcmd->cmd_type = TYPE_TGT_TMCMD; 2122 mcmd->sess = sess; 2123 memcpy(&mcmd->orig_iocb.abts, abts, sizeof(mcmd->orig_iocb.abts)); 2124 mcmd->reset_count = ha->base_qpair->chip_reset; 2125 mcmd->tmr_func = QLA_TGT_ABTS; 2126 mcmd->qpair = h->qpair; 2127 mcmd->vha = vha; 2128 2129 /* 2130 * LUN is looked up by target-core internally based on the passed 2131 * abts->exchange_addr_to_abort tag. 2132 */ 2133 mcmd->se_cmd.cpuid = h->cpuid; 2134 2135 abort_cmd = ha->tgt.tgt_ops->find_cmd_by_tag(sess, 2136 le32_to_cpu(abts->exchange_addr_to_abort)); 2137 if (!abort_cmd) { 2138 mempool_free(mcmd, qla_tgt_mgmt_cmd_mempool); 2139 return -EIO; 2140 } 2141 mcmd->unpacked_lun = abort_cmd->se_cmd.orig_fe_lun; 2142 2143 if (abort_cmd->qpair) { 2144 mcmd->qpair = abort_cmd->qpair; 2145 mcmd->se_cmd.cpuid = abort_cmd->se_cmd.cpuid; 2146 mcmd->abort_io_attr = abort_cmd->atio.u.isp24.attr; 2147 mcmd->flags = QLA24XX_MGMT_ABORT_IO_ATTR_VALID; 2148 } 2149 2150 INIT_WORK(&mcmd->work, qlt_do_tmr_work); 2151 queue_work_on(mcmd->se_cmd.cpuid, qla_tgt_wq, &mcmd->work); 2152 2153 return 0; 2154 } 2155 2156 /* 2157 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire 2158 */ 2159 static void qlt_24xx_handle_abts(struct scsi_qla_host *vha, 2160 struct abts_recv_from_24xx *abts) 2161 { 2162 struct qla_hw_data *ha = vha->hw; 2163 struct fc_port *sess; 2164 uint32_t tag = le32_to_cpu(abts->exchange_addr_to_abort); 2165 be_id_t s_id; 2166 int rc; 2167 unsigned long flags; 2168 2169 if (le32_to_cpu(abts->fcp_hdr_le.parameter) & ABTS_PARAM_ABORT_SEQ) { 2170 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf053, 2171 "qla_target(%d): ABTS: Abort Sequence not " 2172 "supported\n", vha->vp_idx); 2173 qlt_24xx_send_abts_resp(ha->base_qpair, abts, FCP_TMF_REJECTED, 2174 false); 2175 return; 2176 } 2177 2178 if (tag == ATIO_EXCHANGE_ADDRESS_UNKNOWN) { 2179 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf010, 2180 "qla_target(%d): ABTS: Unknown Exchange " 2181 "Address received\n", vha->vp_idx); 2182 qlt_24xx_send_abts_resp(ha->base_qpair, abts, FCP_TMF_REJECTED, 2183 false); 2184 return; 2185 } 2186 2187 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf011, 2188 "qla_target(%d): task abort (s_id=%x:%x:%x, " 2189 "tag=%d, param=%x)\n", vha->vp_idx, abts->fcp_hdr_le.s_id.domain, 2190 abts->fcp_hdr_le.s_id.area, abts->fcp_hdr_le.s_id.al_pa, tag, 2191 le32_to_cpu(abts->fcp_hdr_le.parameter)); 2192 2193 s_id = le_id_to_be(abts->fcp_hdr_le.s_id); 2194 2195 spin_lock_irqsave(&ha->tgt.sess_lock, flags); 2196 sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, s_id); 2197 if (!sess) { 2198 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf012, 2199 "qla_target(%d): task abort for non-existent session\n", 2200 vha->vp_idx); 2201 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); 2202 2203 qlt_24xx_send_abts_resp(ha->base_qpair, abts, FCP_TMF_REJECTED, 2204 false); 2205 return; 2206 } 2207 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); 2208 2209 2210 if (sess->deleted) { 2211 qlt_24xx_send_abts_resp(ha->base_qpair, abts, FCP_TMF_REJECTED, 2212 false); 2213 return; 2214 } 2215 2216 rc = __qlt_24xx_handle_abts(vha, abts, sess); 2217 if (rc != 0) { 2218 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf054, 2219 "qla_target(%d): __qlt_24xx_handle_abts() failed: %d\n", 2220 vha->vp_idx, rc); 2221 qlt_24xx_send_abts_resp(ha->base_qpair, abts, FCP_TMF_REJECTED, 2222 false); 2223 return; 2224 } 2225 } 2226 2227 /* 2228 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire 2229 */ 2230 static void qlt_24xx_send_task_mgmt_ctio(struct qla_qpair *qpair, 2231 struct qla_tgt_mgmt_cmd *mcmd, uint32_t resp_code) 2232 { 2233 struct scsi_qla_host *ha = mcmd->vha; 2234 struct atio_from_isp *atio = &mcmd->orig_iocb.atio; 2235 struct ctio7_to_24xx *ctio; 2236 uint16_t temp; 2237 2238 ql_dbg(ql_dbg_tgt, ha, 0xe008, 2239 "Sending task mgmt CTIO7 (ha=%p, atio=%p, resp_code=%x\n", 2240 ha, atio, resp_code); 2241 2242 2243 ctio = (struct ctio7_to_24xx *)__qla2x00_alloc_iocbs(qpair, NULL); 2244 if (ctio == NULL) { 2245 ql_dbg(ql_dbg_tgt, ha, 0xe04c, 2246 "qla_target(%d): %s failed: unable to allocate " 2247 "request packet\n", ha->vp_idx, __func__); 2248 return; 2249 } 2250 2251 ctio->entry_type = CTIO_TYPE7; 2252 ctio->entry_count = 1; 2253 ctio->handle = QLA_TGT_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK; 2254 ctio->nport_handle = cpu_to_le16(mcmd->sess->loop_id); 2255 ctio->timeout = cpu_to_le16(QLA_TGT_TIMEOUT); 2256 ctio->vp_index = ha->vp_idx; 2257 ctio->initiator_id = be_id_to_le(atio->u.isp24.fcp_hdr.s_id); 2258 ctio->exchange_addr = atio->u.isp24.exchange_addr; 2259 temp = (atio->u.isp24.attr << 9)| 2260 CTIO7_FLAGS_STATUS_MODE_1 | CTIO7_FLAGS_SEND_STATUS; 2261 ctio->u.status1.flags = cpu_to_le16(temp); 2262 temp = be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id); 2263 ctio->u.status1.ox_id = cpu_to_le16(temp); 2264 ctio->u.status1.scsi_status = 2265 cpu_to_le16(SS_RESPONSE_INFO_LEN_VALID); 2266 ctio->u.status1.response_len = cpu_to_le16(8); 2267 ctio->u.status1.sense_data[0] = resp_code; 2268 2269 /* Memory Barrier */ 2270 wmb(); 2271 if (qpair->reqq_start_iocbs) 2272 qpair->reqq_start_iocbs(qpair); 2273 else 2274 qla2x00_start_iocbs(ha, qpair->req); 2275 } 2276 2277 void qlt_free_mcmd(struct qla_tgt_mgmt_cmd *mcmd) 2278 { 2279 mempool_free(mcmd, qla_tgt_mgmt_cmd_mempool); 2280 } 2281 EXPORT_SYMBOL(qlt_free_mcmd); 2282 2283 /* 2284 * ha->hardware_lock supposed to be held on entry. Might drop it, then 2285 * reacquire 2286 */ 2287 void qlt_send_resp_ctio(struct qla_qpair *qpair, struct qla_tgt_cmd *cmd, 2288 uint8_t scsi_status, uint8_t sense_key, uint8_t asc, uint8_t ascq) 2289 { 2290 struct atio_from_isp *atio = &cmd->atio; 2291 struct ctio7_to_24xx *ctio; 2292 uint16_t temp; 2293 struct scsi_qla_host *vha = cmd->vha; 2294 2295 ql_dbg(ql_dbg_tgt_dif, vha, 0x3066, 2296 "Sending response CTIO7 (vha=%p, atio=%p, scsi_status=%02x, " 2297 "sense_key=%02x, asc=%02x, ascq=%02x", 2298 vha, atio, scsi_status, sense_key, asc, ascq); 2299 2300 ctio = (struct ctio7_to_24xx *)qla2x00_alloc_iocbs(vha, NULL); 2301 if (!ctio) { 2302 ql_dbg(ql_dbg_async, vha, 0x3067, 2303 "qla2x00t(%ld): %s failed: unable to allocate request packet", 2304 vha->host_no, __func__); 2305 goto out; 2306 } 2307 2308 ctio->entry_type = CTIO_TYPE7; 2309 ctio->entry_count = 1; 2310 ctio->handle = QLA_TGT_SKIP_HANDLE; 2311 ctio->nport_handle = cpu_to_le16(cmd->sess->loop_id); 2312 ctio->timeout = cpu_to_le16(QLA_TGT_TIMEOUT); 2313 ctio->vp_index = vha->vp_idx; 2314 ctio->initiator_id = be_id_to_le(atio->u.isp24.fcp_hdr.s_id); 2315 ctio->exchange_addr = atio->u.isp24.exchange_addr; 2316 temp = (atio->u.isp24.attr << 9) | 2317 CTIO7_FLAGS_STATUS_MODE_1 | CTIO7_FLAGS_SEND_STATUS; 2318 ctio->u.status1.flags = cpu_to_le16(temp); 2319 temp = be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id); 2320 ctio->u.status1.ox_id = cpu_to_le16(temp); 2321 ctio->u.status1.scsi_status = 2322 cpu_to_le16(SS_RESPONSE_INFO_LEN_VALID | scsi_status); 2323 ctio->u.status1.response_len = cpu_to_le16(18); 2324 ctio->u.status1.residual = cpu_to_le32(get_datalen_for_atio(atio)); 2325 2326 if (ctio->u.status1.residual != 0) 2327 ctio->u.status1.scsi_status |= 2328 cpu_to_le16(SS_RESIDUAL_UNDER); 2329 2330 /* Fixed format sense data. */ 2331 ctio->u.status1.sense_data[0] = 0x70; 2332 ctio->u.status1.sense_data[2] = sense_key; 2333 /* Additional sense length */ 2334 ctio->u.status1.sense_data[7] = 0xa; 2335 /* ASC and ASCQ */ 2336 ctio->u.status1.sense_data[12] = asc; 2337 ctio->u.status1.sense_data[13] = ascq; 2338 2339 /* Memory Barrier */ 2340 wmb(); 2341 2342 if (qpair->reqq_start_iocbs) 2343 qpair->reqq_start_iocbs(qpair); 2344 else 2345 qla2x00_start_iocbs(vha, qpair->req); 2346 2347 out: 2348 return; 2349 } 2350 2351 /* callback from target fabric module code */ 2352 void qlt_xmit_tm_rsp(struct qla_tgt_mgmt_cmd *mcmd) 2353 { 2354 struct scsi_qla_host *vha = mcmd->sess->vha; 2355 struct qla_hw_data *ha = vha->hw; 2356 unsigned long flags; 2357 struct qla_qpair *qpair = mcmd->qpair; 2358 bool free_mcmd = true; 2359 2360 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf013, 2361 "TM response mcmd (%p) status %#x state %#x", 2362 mcmd, mcmd->fc_tm_rsp, mcmd->flags); 2363 2364 spin_lock_irqsave(qpair->qp_lock_ptr, flags); 2365 2366 if (!vha->flags.online || mcmd->reset_count != qpair->chip_reset) { 2367 /* 2368 * Either the port is not online or this request was from 2369 * previous life, just abort the processing. 2370 */ 2371 ql_dbg(ql_dbg_async, vha, 0xe100, 2372 "RESET-TMR online/active/old-count/new-count = %d/%d/%d/%d.\n", 2373 vha->flags.online, qla2x00_reset_active(vha), 2374 mcmd->reset_count, qpair->chip_reset); 2375 ha->tgt.tgt_ops->free_mcmd(mcmd); 2376 spin_unlock_irqrestore(qpair->qp_lock_ptr, flags); 2377 return; 2378 } 2379 2380 if (mcmd->flags == QLA24XX_MGMT_SEND_NACK) { 2381 switch (mcmd->orig_iocb.imm_ntfy.u.isp24.status_subcode) { 2382 case ELS_LOGO: 2383 case ELS_PRLO: 2384 case ELS_TPRLO: 2385 ql_dbg(ql_dbg_disc, vha, 0x2106, 2386 "TM response logo %8phC status %#x state %#x", 2387 mcmd->sess->port_name, mcmd->fc_tm_rsp, 2388 mcmd->flags); 2389 qlt_schedule_sess_for_deletion(mcmd->sess); 2390 break; 2391 default: 2392 qlt_send_notify_ack(vha->hw->base_qpair, 2393 &mcmd->orig_iocb.imm_ntfy, 0, 0, 0, 0, 0, 0); 2394 break; 2395 } 2396 } else { 2397 if (mcmd->orig_iocb.atio.u.raw.entry_type == ABTS_RECV_24XX) { 2398 qlt_build_abts_resp_iocb(mcmd); 2399 free_mcmd = false; 2400 } else 2401 qlt_24xx_send_task_mgmt_ctio(qpair, mcmd, 2402 mcmd->fc_tm_rsp); 2403 } 2404 /* 2405 * Make the callback for ->free_mcmd() to queue_work() and invoke 2406 * target_put_sess_cmd() to drop cmd_kref to 1. The final 2407 * target_put_sess_cmd() call will be made from TFO->check_stop_free() 2408 * -> tcm_qla2xxx_check_stop_free() to release the TMR associated se_cmd 2409 * descriptor after TFO->queue_tm_rsp() -> tcm_qla2xxx_queue_tm_rsp() -> 2410 * qlt_xmit_tm_rsp() returns here.. 2411 */ 2412 if (free_mcmd) 2413 ha->tgt.tgt_ops->free_mcmd(mcmd); 2414 2415 spin_unlock_irqrestore(qpair->qp_lock_ptr, flags); 2416 } 2417 EXPORT_SYMBOL(qlt_xmit_tm_rsp); 2418 2419 /* No locks */ 2420 static int qlt_pci_map_calc_cnt(struct qla_tgt_prm *prm) 2421 { 2422 struct qla_tgt_cmd *cmd = prm->cmd; 2423 2424 BUG_ON(cmd->sg_cnt == 0); 2425 2426 prm->sg = (struct scatterlist *)cmd->sg; 2427 prm->seg_cnt = dma_map_sg(&cmd->qpair->pdev->dev, cmd->sg, 2428 cmd->sg_cnt, cmd->dma_data_direction); 2429 if (unlikely(prm->seg_cnt == 0)) 2430 goto out_err; 2431 2432 prm->cmd->sg_mapped = 1; 2433 2434 if (cmd->se_cmd.prot_op == TARGET_PROT_NORMAL) { 2435 /* 2436 * If greater than four sg entries then we need to allocate 2437 * the continuation entries 2438 */ 2439 if (prm->seg_cnt > QLA_TGT_DATASEGS_PER_CMD_24XX) 2440 prm->req_cnt += DIV_ROUND_UP(prm->seg_cnt - 2441 QLA_TGT_DATASEGS_PER_CMD_24XX, 2442 QLA_TGT_DATASEGS_PER_CONT_24XX); 2443 } else { 2444 /* DIF */ 2445 if ((cmd->se_cmd.prot_op == TARGET_PROT_DIN_INSERT) || 2446 (cmd->se_cmd.prot_op == TARGET_PROT_DOUT_STRIP)) { 2447 prm->seg_cnt = DIV_ROUND_UP(cmd->bufflen, cmd->blk_sz); 2448 prm->tot_dsds = prm->seg_cnt; 2449 } else 2450 prm->tot_dsds = prm->seg_cnt; 2451 2452 if (cmd->prot_sg_cnt) { 2453 prm->prot_sg = cmd->prot_sg; 2454 prm->prot_seg_cnt = dma_map_sg(&cmd->qpair->pdev->dev, 2455 cmd->prot_sg, cmd->prot_sg_cnt, 2456 cmd->dma_data_direction); 2457 if (unlikely(prm->prot_seg_cnt == 0)) 2458 goto out_err; 2459 2460 if ((cmd->se_cmd.prot_op == TARGET_PROT_DIN_INSERT) || 2461 (cmd->se_cmd.prot_op == TARGET_PROT_DOUT_STRIP)) { 2462 /* Dif Bundling not support here */ 2463 prm->prot_seg_cnt = DIV_ROUND_UP(cmd->bufflen, 2464 cmd->blk_sz); 2465 prm->tot_dsds += prm->prot_seg_cnt; 2466 } else 2467 prm->tot_dsds += prm->prot_seg_cnt; 2468 } 2469 } 2470 2471 return 0; 2472 2473 out_err: 2474 ql_dbg_qp(ql_dbg_tgt, prm->cmd->qpair, 0xe04d, 2475 "qla_target(%d): PCI mapping failed: sg_cnt=%d", 2476 0, prm->cmd->sg_cnt); 2477 return -1; 2478 } 2479 2480 static void qlt_unmap_sg(struct scsi_qla_host *vha, struct qla_tgt_cmd *cmd) 2481 { 2482 struct qla_hw_data *ha; 2483 struct qla_qpair *qpair; 2484 2485 if (!cmd->sg_mapped) 2486 return; 2487 2488 qpair = cmd->qpair; 2489 2490 dma_unmap_sg(&qpair->pdev->dev, cmd->sg, cmd->sg_cnt, 2491 cmd->dma_data_direction); 2492 cmd->sg_mapped = 0; 2493 2494 if (cmd->prot_sg_cnt) 2495 dma_unmap_sg(&qpair->pdev->dev, cmd->prot_sg, cmd->prot_sg_cnt, 2496 cmd->dma_data_direction); 2497 2498 if (!cmd->ctx) 2499 return; 2500 ha = vha->hw; 2501 if (cmd->ctx_dsd_alloced) 2502 qla2x00_clean_dsd_pool(ha, cmd->ctx); 2503 2504 dma_pool_free(ha->dl_dma_pool, cmd->ctx, cmd->ctx->crc_ctx_dma); 2505 } 2506 2507 static int qlt_check_reserve_free_req(struct qla_qpair *qpair, 2508 uint32_t req_cnt) 2509 { 2510 uint32_t cnt; 2511 struct req_que *req = qpair->req; 2512 2513 if (req->cnt < (req_cnt + 2)) { 2514 cnt = (uint16_t)(qpair->use_shadow_reg ? *req->out_ptr : 2515 rd_reg_dword_relaxed(req->req_q_out)); 2516 2517 if (req->ring_index < cnt) 2518 req->cnt = cnt - req->ring_index; 2519 else 2520 req->cnt = req->length - (req->ring_index - cnt); 2521 2522 if (unlikely(req->cnt < (req_cnt + 2))) 2523 return -EAGAIN; 2524 } 2525 2526 req->cnt -= req_cnt; 2527 2528 return 0; 2529 } 2530 2531 /* 2532 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire 2533 */ 2534 static inline void *qlt_get_req_pkt(struct req_que *req) 2535 { 2536 /* Adjust ring index. */ 2537 req->ring_index++; 2538 if (req->ring_index == req->length) { 2539 req->ring_index = 0; 2540 req->ring_ptr = req->ring; 2541 } else { 2542 req->ring_ptr++; 2543 } 2544 return (cont_entry_t *)req->ring_ptr; 2545 } 2546 2547 /* ha->hardware_lock supposed to be held on entry */ 2548 static inline uint32_t qlt_make_handle(struct qla_qpair *qpair) 2549 { 2550 uint32_t h; 2551 int index; 2552 uint8_t found = 0; 2553 struct req_que *req = qpair->req; 2554 2555 h = req->current_outstanding_cmd; 2556 2557 for (index = 1; index < req->num_outstanding_cmds; index++) { 2558 h++; 2559 if (h == req->num_outstanding_cmds) 2560 h = 1; 2561 2562 if (h == QLA_TGT_SKIP_HANDLE) 2563 continue; 2564 2565 if (!req->outstanding_cmds[h]) { 2566 found = 1; 2567 break; 2568 } 2569 } 2570 2571 if (found) { 2572 req->current_outstanding_cmd = h; 2573 } else { 2574 ql_dbg(ql_dbg_io, qpair->vha, 0x305b, 2575 "qla_target(%d): Ran out of empty cmd slots\n", 2576 qpair->vha->vp_idx); 2577 h = QLA_TGT_NULL_HANDLE; 2578 } 2579 2580 return h; 2581 } 2582 2583 /* ha->hardware_lock supposed to be held on entry */ 2584 static int qlt_24xx_build_ctio_pkt(struct qla_qpair *qpair, 2585 struct qla_tgt_prm *prm) 2586 { 2587 uint32_t h; 2588 struct ctio7_to_24xx *pkt; 2589 struct atio_from_isp *atio = &prm->cmd->atio; 2590 uint16_t temp; 2591 struct qla_tgt_cmd *cmd = prm->cmd; 2592 2593 pkt = (struct ctio7_to_24xx *)qpair->req->ring_ptr; 2594 prm->pkt = pkt; 2595 memset(pkt, 0, sizeof(*pkt)); 2596 2597 pkt->entry_type = CTIO_TYPE7; 2598 pkt->entry_count = (uint8_t)prm->req_cnt; 2599 pkt->vp_index = prm->cmd->vp_idx; 2600 2601 h = qlt_make_handle(qpair); 2602 if (unlikely(h == QLA_TGT_NULL_HANDLE)) { 2603 /* 2604 * CTIO type 7 from the firmware doesn't provide a way to 2605 * know the initiator's LOOP ID, hence we can't find 2606 * the session and, so, the command. 2607 */ 2608 return -EAGAIN; 2609 } else 2610 qpair->req->outstanding_cmds[h] = (srb_t *)prm->cmd; 2611 2612 pkt->handle = make_handle(qpair->req->id, h); 2613 pkt->handle |= CTIO_COMPLETION_HANDLE_MARK; 2614 pkt->nport_handle = cpu_to_le16(prm->cmd->loop_id); 2615 pkt->timeout = cpu_to_le16(QLA_TGT_TIMEOUT); 2616 pkt->initiator_id = be_id_to_le(atio->u.isp24.fcp_hdr.s_id); 2617 pkt->exchange_addr = atio->u.isp24.exchange_addr; 2618 temp = atio->u.isp24.attr << 9; 2619 pkt->u.status0.flags |= cpu_to_le16(temp); 2620 temp = be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id); 2621 pkt->u.status0.ox_id = cpu_to_le16(temp); 2622 pkt->u.status0.relative_offset = cpu_to_le32(prm->cmd->offset); 2623 2624 if (cmd->edif) { 2625 if (cmd->dma_data_direction == DMA_TO_DEVICE) 2626 prm->cmd->sess->edif.rx_bytes += cmd->bufflen; 2627 if (cmd->dma_data_direction == DMA_FROM_DEVICE) 2628 prm->cmd->sess->edif.tx_bytes += cmd->bufflen; 2629 2630 pkt->u.status0.edif_flags |= EF_EN_EDIF; 2631 } 2632 2633 return 0; 2634 } 2635 2636 /* 2637 * ha->hardware_lock supposed to be held on entry. We have already made sure 2638 * that there is sufficient amount of request entries to not drop it. 2639 */ 2640 static void qlt_load_cont_data_segments(struct qla_tgt_prm *prm) 2641 { 2642 int cnt; 2643 struct dsd64 *cur_dsd; 2644 2645 /* Build continuation packets */ 2646 while (prm->seg_cnt > 0) { 2647 cont_a64_entry_t *cont_pkt64 = 2648 (cont_a64_entry_t *)qlt_get_req_pkt( 2649 prm->cmd->qpair->req); 2650 2651 /* 2652 * Make sure that from cont_pkt64 none of 2653 * 64-bit specific fields used for 32-bit 2654 * addressing. Cast to (cont_entry_t *) for 2655 * that. 2656 */ 2657 2658 memset(cont_pkt64, 0, sizeof(*cont_pkt64)); 2659 2660 cont_pkt64->entry_count = 1; 2661 cont_pkt64->sys_define = 0; 2662 2663 cont_pkt64->entry_type = CONTINUE_A64_TYPE; 2664 cur_dsd = cont_pkt64->dsd; 2665 2666 /* Load continuation entry data segments */ 2667 for (cnt = 0; 2668 cnt < QLA_TGT_DATASEGS_PER_CONT_24XX && prm->seg_cnt; 2669 cnt++, prm->seg_cnt--) { 2670 append_dsd64(&cur_dsd, prm->sg); 2671 prm->sg = sg_next(prm->sg); 2672 } 2673 } 2674 } 2675 2676 /* 2677 * ha->hardware_lock supposed to be held on entry. We have already made sure 2678 * that there is sufficient amount of request entries to not drop it. 2679 */ 2680 static void qlt_load_data_segments(struct qla_tgt_prm *prm) 2681 { 2682 int cnt; 2683 struct dsd64 *cur_dsd; 2684 struct ctio7_to_24xx *pkt24 = (struct ctio7_to_24xx *)prm->pkt; 2685 2686 pkt24->u.status0.transfer_length = cpu_to_le32(prm->cmd->bufflen); 2687 2688 /* Setup packet address segment pointer */ 2689 cur_dsd = &pkt24->u.status0.dsd; 2690 2691 /* Set total data segment count */ 2692 if (prm->seg_cnt) 2693 pkt24->dseg_count = cpu_to_le16(prm->seg_cnt); 2694 2695 if (prm->seg_cnt == 0) { 2696 /* No data transfer */ 2697 cur_dsd->address = 0; 2698 cur_dsd->length = 0; 2699 return; 2700 } 2701 2702 /* If scatter gather */ 2703 2704 /* Load command entry data segments */ 2705 for (cnt = 0; 2706 (cnt < QLA_TGT_DATASEGS_PER_CMD_24XX) && prm->seg_cnt; 2707 cnt++, prm->seg_cnt--) { 2708 append_dsd64(&cur_dsd, prm->sg); 2709 prm->sg = sg_next(prm->sg); 2710 } 2711 2712 qlt_load_cont_data_segments(prm); 2713 } 2714 2715 static inline int qlt_has_data(struct qla_tgt_cmd *cmd) 2716 { 2717 return cmd->bufflen > 0; 2718 } 2719 2720 static void qlt_print_dif_err(struct qla_tgt_prm *prm) 2721 { 2722 struct qla_tgt_cmd *cmd; 2723 struct scsi_qla_host *vha; 2724 2725 /* asc 0x10=dif error */ 2726 if (prm->sense_buffer && (prm->sense_buffer[12] == 0x10)) { 2727 cmd = prm->cmd; 2728 vha = cmd->vha; 2729 /* ASCQ */ 2730 switch (prm->sense_buffer[13]) { 2731 case 1: 2732 ql_dbg(ql_dbg_tgt_dif, vha, 0xe00b, 2733 "BE detected Guard TAG ERR: lba[0x%llx|%lld] len[0x%x] " 2734 "se_cmd=%p tag[%x]", 2735 cmd->lba, cmd->lba, cmd->num_blks, &cmd->se_cmd, 2736 cmd->atio.u.isp24.exchange_addr); 2737 break; 2738 case 2: 2739 ql_dbg(ql_dbg_tgt_dif, vha, 0xe00c, 2740 "BE detected APP TAG ERR: lba[0x%llx|%lld] len[0x%x] " 2741 "se_cmd=%p tag[%x]", 2742 cmd->lba, cmd->lba, cmd->num_blks, &cmd->se_cmd, 2743 cmd->atio.u.isp24.exchange_addr); 2744 break; 2745 case 3: 2746 ql_dbg(ql_dbg_tgt_dif, vha, 0xe00f, 2747 "BE detected REF TAG ERR: lba[0x%llx|%lld] len[0x%x] " 2748 "se_cmd=%p tag[%x]", 2749 cmd->lba, cmd->lba, cmd->num_blks, &cmd->se_cmd, 2750 cmd->atio.u.isp24.exchange_addr); 2751 break; 2752 default: 2753 ql_dbg(ql_dbg_tgt_dif, vha, 0xe010, 2754 "BE detected Dif ERR: lba[%llx|%lld] len[%x] " 2755 "se_cmd=%p tag[%x]", 2756 cmd->lba, cmd->lba, cmd->num_blks, &cmd->se_cmd, 2757 cmd->atio.u.isp24.exchange_addr); 2758 break; 2759 } 2760 ql_dump_buffer(ql_dbg_tgt_dif, vha, 0xe011, cmd->cdb, 16); 2761 } 2762 } 2763 2764 /* 2765 * Called without ha->hardware_lock held 2766 */ 2767 static int qlt_pre_xmit_response(struct qla_tgt_cmd *cmd, 2768 struct qla_tgt_prm *prm, int xmit_type, uint8_t scsi_status, 2769 uint32_t *full_req_cnt) 2770 { 2771 struct se_cmd *se_cmd = &cmd->se_cmd; 2772 struct qla_qpair *qpair = cmd->qpair; 2773 2774 prm->cmd = cmd; 2775 prm->tgt = cmd->tgt; 2776 prm->pkt = NULL; 2777 prm->rq_result = scsi_status; 2778 prm->sense_buffer = &cmd->sense_buffer[0]; 2779 prm->sense_buffer_len = TRANSPORT_SENSE_BUFFER; 2780 prm->sg = NULL; 2781 prm->seg_cnt = -1; 2782 prm->req_cnt = 1; 2783 prm->residual = 0; 2784 prm->add_status_pkt = 0; 2785 prm->prot_sg = NULL; 2786 prm->prot_seg_cnt = 0; 2787 prm->tot_dsds = 0; 2788 2789 if ((xmit_type & QLA_TGT_XMIT_DATA) && qlt_has_data(cmd)) { 2790 if (qlt_pci_map_calc_cnt(prm) != 0) 2791 return -EAGAIN; 2792 } 2793 2794 *full_req_cnt = prm->req_cnt; 2795 2796 if (se_cmd->se_cmd_flags & SCF_UNDERFLOW_BIT) { 2797 prm->residual = se_cmd->residual_count; 2798 ql_dbg_qp(ql_dbg_io + ql_dbg_verbose, qpair, 0x305c, 2799 "Residual underflow: %d (tag %lld, op %x, bufflen %d, rq_result %x)\n", 2800 prm->residual, se_cmd->tag, 2801 se_cmd->t_task_cdb ? se_cmd->t_task_cdb[0] : 0, 2802 cmd->bufflen, prm->rq_result); 2803 prm->rq_result |= SS_RESIDUAL_UNDER; 2804 } else if (se_cmd->se_cmd_flags & SCF_OVERFLOW_BIT) { 2805 prm->residual = se_cmd->residual_count; 2806 ql_dbg_qp(ql_dbg_io, qpair, 0x305d, 2807 "Residual overflow: %d (tag %lld, op %x, bufflen %d, rq_result %x)\n", 2808 prm->residual, se_cmd->tag, se_cmd->t_task_cdb ? 2809 se_cmd->t_task_cdb[0] : 0, cmd->bufflen, prm->rq_result); 2810 prm->rq_result |= SS_RESIDUAL_OVER; 2811 } 2812 2813 if (xmit_type & QLA_TGT_XMIT_STATUS) { 2814 /* 2815 * If QLA_TGT_XMIT_DATA is not set, add_status_pkt will be 2816 * ignored in *xmit_response() below 2817 */ 2818 if (qlt_has_data(cmd)) { 2819 if (QLA_TGT_SENSE_VALID(prm->sense_buffer) || 2820 (IS_FWI2_CAPABLE(cmd->vha->hw) && 2821 (prm->rq_result != 0))) { 2822 prm->add_status_pkt = 1; 2823 (*full_req_cnt)++; 2824 } 2825 } 2826 } 2827 2828 return 0; 2829 } 2830 2831 static inline int qlt_need_explicit_conf(struct qla_tgt_cmd *cmd, 2832 int sending_sense) 2833 { 2834 if (cmd->qpair->enable_class_2) 2835 return 0; 2836 2837 if (sending_sense) 2838 return cmd->conf_compl_supported; 2839 else 2840 return cmd->qpair->enable_explicit_conf && 2841 cmd->conf_compl_supported; 2842 } 2843 2844 static void qlt_24xx_init_ctio_to_isp(struct ctio7_to_24xx *ctio, 2845 struct qla_tgt_prm *prm) 2846 { 2847 prm->sense_buffer_len = min_t(uint32_t, prm->sense_buffer_len, 2848 (uint32_t)sizeof(ctio->u.status1.sense_data)); 2849 ctio->u.status0.flags |= cpu_to_le16(CTIO7_FLAGS_SEND_STATUS); 2850 if (qlt_need_explicit_conf(prm->cmd, 0)) { 2851 ctio->u.status0.flags |= cpu_to_le16( 2852 CTIO7_FLAGS_EXPLICIT_CONFORM | 2853 CTIO7_FLAGS_CONFORM_REQ); 2854 } 2855 ctio->u.status0.residual = cpu_to_le32(prm->residual); 2856 ctio->u.status0.scsi_status = cpu_to_le16(prm->rq_result); 2857 if (QLA_TGT_SENSE_VALID(prm->sense_buffer)) { 2858 int i; 2859 2860 if (qlt_need_explicit_conf(prm->cmd, 1)) { 2861 if ((prm->rq_result & SS_SCSI_STATUS_BYTE) != 0) { 2862 ql_dbg_qp(ql_dbg_tgt, prm->cmd->qpair, 0xe017, 2863 "Skipping EXPLICIT_CONFORM and " 2864 "CTIO7_FLAGS_CONFORM_REQ for FCP READ w/ " 2865 "non GOOD status\n"); 2866 goto skip_explict_conf; 2867 } 2868 ctio->u.status1.flags |= cpu_to_le16( 2869 CTIO7_FLAGS_EXPLICIT_CONFORM | 2870 CTIO7_FLAGS_CONFORM_REQ); 2871 } 2872 skip_explict_conf: 2873 ctio->u.status1.flags &= 2874 ~cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_0); 2875 ctio->u.status1.flags |= 2876 cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1); 2877 ctio->u.status1.scsi_status |= 2878 cpu_to_le16(SS_SENSE_LEN_VALID); 2879 ctio->u.status1.sense_length = 2880 cpu_to_le16(prm->sense_buffer_len); 2881 for (i = 0; i < prm->sense_buffer_len/4; i++) { 2882 uint32_t v; 2883 2884 v = get_unaligned_be32( 2885 &((uint32_t *)prm->sense_buffer)[i]); 2886 put_unaligned_le32(v, 2887 &((uint32_t *)ctio->u.status1.sense_data)[i]); 2888 } 2889 qlt_print_dif_err(prm); 2890 2891 } else { 2892 ctio->u.status1.flags &= 2893 ~cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_0); 2894 ctio->u.status1.flags |= 2895 cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1); 2896 ctio->u.status1.sense_length = 0; 2897 memset(ctio->u.status1.sense_data, 0, 2898 sizeof(ctio->u.status1.sense_data)); 2899 } 2900 2901 /* Sense with len > 24, is it possible ??? */ 2902 } 2903 2904 static inline int 2905 qlt_hba_err_chk_enabled(struct se_cmd *se_cmd) 2906 { 2907 switch (se_cmd->prot_op) { 2908 case TARGET_PROT_DOUT_INSERT: 2909 case TARGET_PROT_DIN_STRIP: 2910 if (ql2xenablehba_err_chk >= 1) 2911 return 1; 2912 break; 2913 case TARGET_PROT_DOUT_PASS: 2914 case TARGET_PROT_DIN_PASS: 2915 if (ql2xenablehba_err_chk >= 2) 2916 return 1; 2917 break; 2918 case TARGET_PROT_DIN_INSERT: 2919 case TARGET_PROT_DOUT_STRIP: 2920 return 1; 2921 default: 2922 break; 2923 } 2924 return 0; 2925 } 2926 2927 static inline int 2928 qla_tgt_ref_mask_check(struct se_cmd *se_cmd) 2929 { 2930 switch (se_cmd->prot_op) { 2931 case TARGET_PROT_DIN_INSERT: 2932 case TARGET_PROT_DOUT_INSERT: 2933 case TARGET_PROT_DIN_STRIP: 2934 case TARGET_PROT_DOUT_STRIP: 2935 case TARGET_PROT_DIN_PASS: 2936 case TARGET_PROT_DOUT_PASS: 2937 return 1; 2938 default: 2939 return 0; 2940 } 2941 return 0; 2942 } 2943 2944 /* 2945 * qla_tgt_set_dif_tags - Extract Ref and App tags from SCSI command 2946 */ 2947 static void 2948 qla_tgt_set_dif_tags(struct qla_tgt_cmd *cmd, struct crc_context *ctx, 2949 uint16_t *pfw_prot_opts) 2950 { 2951 struct se_cmd *se_cmd = &cmd->se_cmd; 2952 uint32_t lba = 0xffffffff & se_cmd->t_task_lba; 2953 scsi_qla_host_t *vha = cmd->tgt->vha; 2954 struct qla_hw_data *ha = vha->hw; 2955 uint32_t t32 = 0; 2956 2957 /* 2958 * wait till Mode Sense/Select cmd, modepage Ah, subpage 2 2959 * have been immplemented by TCM, before AppTag is avail. 2960 * Look for modesense_handlers[] 2961 */ 2962 ctx->app_tag = 0; 2963 ctx->app_tag_mask[0] = 0x0; 2964 ctx->app_tag_mask[1] = 0x0; 2965 2966 if (IS_PI_UNINIT_CAPABLE(ha)) { 2967 if ((se_cmd->prot_type == TARGET_DIF_TYPE1_PROT) || 2968 (se_cmd->prot_type == TARGET_DIF_TYPE2_PROT)) 2969 *pfw_prot_opts |= PO_DIS_VALD_APP_ESC; 2970 else if (se_cmd->prot_type == TARGET_DIF_TYPE3_PROT) 2971 *pfw_prot_opts |= PO_DIS_VALD_APP_REF_ESC; 2972 } 2973 2974 t32 = ha->tgt.tgt_ops->get_dif_tags(cmd, pfw_prot_opts); 2975 2976 switch (se_cmd->prot_type) { 2977 case TARGET_DIF_TYPE0_PROT: 2978 /* 2979 * No check for ql2xenablehba_err_chk, as it 2980 * would be an I/O error if hba tag generation 2981 * is not done. 2982 */ 2983 ctx->ref_tag = cpu_to_le32(lba); 2984 /* enable ALL bytes of the ref tag */ 2985 ctx->ref_tag_mask[0] = 0xff; 2986 ctx->ref_tag_mask[1] = 0xff; 2987 ctx->ref_tag_mask[2] = 0xff; 2988 ctx->ref_tag_mask[3] = 0xff; 2989 break; 2990 case TARGET_DIF_TYPE1_PROT: 2991 /* 2992 * For TYPE 1 protection: 16 bit GUARD tag, 32 bit 2993 * REF tag, and 16 bit app tag. 2994 */ 2995 ctx->ref_tag = cpu_to_le32(lba); 2996 if (!qla_tgt_ref_mask_check(se_cmd) || 2997 !(ha->tgt.tgt_ops->chk_dif_tags(t32))) { 2998 *pfw_prot_opts |= PO_DIS_REF_TAG_VALD; 2999 break; 3000 } 3001 /* enable ALL bytes of the ref tag */ 3002 ctx->ref_tag_mask[0] = 0xff; 3003 ctx->ref_tag_mask[1] = 0xff; 3004 ctx->ref_tag_mask[2] = 0xff; 3005 ctx->ref_tag_mask[3] = 0xff; 3006 break; 3007 case TARGET_DIF_TYPE2_PROT: 3008 /* 3009 * For TYPE 2 protection: 16 bit GUARD + 32 bit REF 3010 * tag has to match LBA in CDB + N 3011 */ 3012 ctx->ref_tag = cpu_to_le32(lba); 3013 if (!qla_tgt_ref_mask_check(se_cmd) || 3014 !(ha->tgt.tgt_ops->chk_dif_tags(t32))) { 3015 *pfw_prot_opts |= PO_DIS_REF_TAG_VALD; 3016 break; 3017 } 3018 /* enable ALL bytes of the ref tag */ 3019 ctx->ref_tag_mask[0] = 0xff; 3020 ctx->ref_tag_mask[1] = 0xff; 3021 ctx->ref_tag_mask[2] = 0xff; 3022 ctx->ref_tag_mask[3] = 0xff; 3023 break; 3024 case TARGET_DIF_TYPE3_PROT: 3025 /* For TYPE 3 protection: 16 bit GUARD only */ 3026 *pfw_prot_opts |= PO_DIS_REF_TAG_VALD; 3027 ctx->ref_tag_mask[0] = ctx->ref_tag_mask[1] = 3028 ctx->ref_tag_mask[2] = ctx->ref_tag_mask[3] = 0x00; 3029 break; 3030 } 3031 } 3032 3033 static inline int 3034 qlt_build_ctio_crc2_pkt(struct qla_qpair *qpair, struct qla_tgt_prm *prm) 3035 { 3036 struct dsd64 *cur_dsd; 3037 uint32_t transfer_length = 0; 3038 uint32_t data_bytes; 3039 uint32_t dif_bytes; 3040 uint8_t bundling = 1; 3041 struct crc_context *crc_ctx_pkt = NULL; 3042 struct qla_hw_data *ha; 3043 struct ctio_crc2_to_fw *pkt; 3044 dma_addr_t crc_ctx_dma; 3045 uint16_t fw_prot_opts = 0; 3046 struct qla_tgt_cmd *cmd = prm->cmd; 3047 struct se_cmd *se_cmd = &cmd->se_cmd; 3048 uint32_t h; 3049 struct atio_from_isp *atio = &prm->cmd->atio; 3050 struct qla_tc_param tc; 3051 uint16_t t16; 3052 scsi_qla_host_t *vha = cmd->vha; 3053 3054 ha = vha->hw; 3055 3056 pkt = (struct ctio_crc2_to_fw *)qpair->req->ring_ptr; 3057 prm->pkt = pkt; 3058 memset(pkt, 0, sizeof(*pkt)); 3059 3060 ql_dbg_qp(ql_dbg_tgt, cmd->qpair, 0xe071, 3061 "qla_target(%d):%s: se_cmd[%p] CRC2 prot_op[0x%x] cmd prot sg:cnt[%p:%x] lba[%llu]\n", 3062 cmd->vp_idx, __func__, se_cmd, se_cmd->prot_op, 3063 prm->prot_sg, prm->prot_seg_cnt, se_cmd->t_task_lba); 3064 3065 if ((se_cmd->prot_op == TARGET_PROT_DIN_INSERT) || 3066 (se_cmd->prot_op == TARGET_PROT_DOUT_STRIP)) 3067 bundling = 0; 3068 3069 /* Compute dif len and adjust data len to incude protection */ 3070 data_bytes = cmd->bufflen; 3071 dif_bytes = (data_bytes / cmd->blk_sz) * 8; 3072 3073 switch (se_cmd->prot_op) { 3074 case TARGET_PROT_DIN_INSERT: 3075 case TARGET_PROT_DOUT_STRIP: 3076 transfer_length = data_bytes; 3077 if (cmd->prot_sg_cnt) 3078 data_bytes += dif_bytes; 3079 break; 3080 case TARGET_PROT_DIN_STRIP: 3081 case TARGET_PROT_DOUT_INSERT: 3082 case TARGET_PROT_DIN_PASS: 3083 case TARGET_PROT_DOUT_PASS: 3084 transfer_length = data_bytes + dif_bytes; 3085 break; 3086 default: 3087 BUG(); 3088 break; 3089 } 3090 3091 if (!qlt_hba_err_chk_enabled(se_cmd)) 3092 fw_prot_opts |= 0x10; /* Disable Guard tag checking */ 3093 /* HBA error checking enabled */ 3094 else if (IS_PI_UNINIT_CAPABLE(ha)) { 3095 if ((se_cmd->prot_type == TARGET_DIF_TYPE1_PROT) || 3096 (se_cmd->prot_type == TARGET_DIF_TYPE2_PROT)) 3097 fw_prot_opts |= PO_DIS_VALD_APP_ESC; 3098 else if (se_cmd->prot_type == TARGET_DIF_TYPE3_PROT) 3099 fw_prot_opts |= PO_DIS_VALD_APP_REF_ESC; 3100 } 3101 3102 switch (se_cmd->prot_op) { 3103 case TARGET_PROT_DIN_INSERT: 3104 case TARGET_PROT_DOUT_INSERT: 3105 fw_prot_opts |= PO_MODE_DIF_INSERT; 3106 break; 3107 case TARGET_PROT_DIN_STRIP: 3108 case TARGET_PROT_DOUT_STRIP: 3109 fw_prot_opts |= PO_MODE_DIF_REMOVE; 3110 break; 3111 case TARGET_PROT_DIN_PASS: 3112 case TARGET_PROT_DOUT_PASS: 3113 fw_prot_opts |= PO_MODE_DIF_PASS; 3114 /* FUTURE: does tcm require T10CRC<->IPCKSUM conversion? */ 3115 break; 3116 default:/* Normal Request */ 3117 fw_prot_opts |= PO_MODE_DIF_PASS; 3118 break; 3119 } 3120 3121 /* ---- PKT ---- */ 3122 /* Update entry type to indicate Command Type CRC_2 IOCB */ 3123 pkt->entry_type = CTIO_CRC2; 3124 pkt->entry_count = 1; 3125 pkt->vp_index = cmd->vp_idx; 3126 3127 h = qlt_make_handle(qpair); 3128 if (unlikely(h == QLA_TGT_NULL_HANDLE)) { 3129 /* 3130 * CTIO type 7 from the firmware doesn't provide a way to 3131 * know the initiator's LOOP ID, hence we can't find 3132 * the session and, so, the command. 3133 */ 3134 return -EAGAIN; 3135 } else 3136 qpair->req->outstanding_cmds[h] = (srb_t *)prm->cmd; 3137 3138 pkt->handle = make_handle(qpair->req->id, h); 3139 pkt->handle |= CTIO_COMPLETION_HANDLE_MARK; 3140 pkt->nport_handle = cpu_to_le16(prm->cmd->loop_id); 3141 pkt->timeout = cpu_to_le16(QLA_TGT_TIMEOUT); 3142 pkt->initiator_id = be_id_to_le(atio->u.isp24.fcp_hdr.s_id); 3143 pkt->exchange_addr = atio->u.isp24.exchange_addr; 3144 3145 /* silence compile warning */ 3146 t16 = be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id); 3147 pkt->ox_id = cpu_to_le16(t16); 3148 3149 t16 = (atio->u.isp24.attr << 9); 3150 pkt->flags |= cpu_to_le16(t16); 3151 pkt->relative_offset = cpu_to_le32(prm->cmd->offset); 3152 3153 /* Set transfer direction */ 3154 if (cmd->dma_data_direction == DMA_TO_DEVICE) 3155 pkt->flags = cpu_to_le16(CTIO7_FLAGS_DATA_IN); 3156 else if (cmd->dma_data_direction == DMA_FROM_DEVICE) 3157 pkt->flags = cpu_to_le16(CTIO7_FLAGS_DATA_OUT); 3158 3159 pkt->dseg_count = cpu_to_le16(prm->tot_dsds); 3160 /* Fibre channel byte count */ 3161 pkt->transfer_length = cpu_to_le32(transfer_length); 3162 3163 /* ----- CRC context -------- */ 3164 3165 /* Allocate CRC context from global pool */ 3166 crc_ctx_pkt = cmd->ctx = 3167 dma_pool_zalloc(ha->dl_dma_pool, GFP_ATOMIC, &crc_ctx_dma); 3168 3169 if (!crc_ctx_pkt) 3170 goto crc_queuing_error; 3171 3172 crc_ctx_pkt->crc_ctx_dma = crc_ctx_dma; 3173 INIT_LIST_HEAD(&crc_ctx_pkt->dsd_list); 3174 3175 /* Set handle */ 3176 crc_ctx_pkt->handle = pkt->handle; 3177 3178 qla_tgt_set_dif_tags(cmd, crc_ctx_pkt, &fw_prot_opts); 3179 3180 put_unaligned_le64(crc_ctx_dma, &pkt->crc_context_address); 3181 pkt->crc_context_len = cpu_to_le16(CRC_CONTEXT_LEN_FW); 3182 3183 if (!bundling) { 3184 cur_dsd = &crc_ctx_pkt->u.nobundling.data_dsd[0]; 3185 } else { 3186 /* 3187 * Configure Bundling if we need to fetch interlaving 3188 * protection PCI accesses 3189 */ 3190 fw_prot_opts |= PO_ENABLE_DIF_BUNDLING; 3191 crc_ctx_pkt->u.bundling.dif_byte_count = cpu_to_le32(dif_bytes); 3192 crc_ctx_pkt->u.bundling.dseg_count = 3193 cpu_to_le16(prm->tot_dsds - prm->prot_seg_cnt); 3194 cur_dsd = &crc_ctx_pkt->u.bundling.data_dsd[0]; 3195 } 3196 3197 /* Finish the common fields of CRC pkt */ 3198 crc_ctx_pkt->blk_size = cpu_to_le16(cmd->blk_sz); 3199 crc_ctx_pkt->prot_opts = cpu_to_le16(fw_prot_opts); 3200 crc_ctx_pkt->byte_count = cpu_to_le32(data_bytes); 3201 crc_ctx_pkt->guard_seed = cpu_to_le16(0); 3202 3203 memset((uint8_t *)&tc, 0 , sizeof(tc)); 3204 tc.vha = vha; 3205 tc.blk_sz = cmd->blk_sz; 3206 tc.bufflen = cmd->bufflen; 3207 tc.sg = cmd->sg; 3208 tc.prot_sg = cmd->prot_sg; 3209 tc.ctx = crc_ctx_pkt; 3210 tc.ctx_dsd_alloced = &cmd->ctx_dsd_alloced; 3211 3212 /* Walks data segments */ 3213 pkt->flags |= cpu_to_le16(CTIO7_FLAGS_DSD_PTR); 3214 3215 if (!bundling && prm->prot_seg_cnt) { 3216 if (qla24xx_walk_and_build_sglist_no_difb(ha, NULL, cur_dsd, 3217 prm->tot_dsds, &tc)) 3218 goto crc_queuing_error; 3219 } else if (qla24xx_walk_and_build_sglist(ha, NULL, cur_dsd, 3220 (prm->tot_dsds - prm->prot_seg_cnt), &tc)) 3221 goto crc_queuing_error; 3222 3223 if (bundling && prm->prot_seg_cnt) { 3224 /* Walks dif segments */ 3225 pkt->add_flags |= CTIO_CRC2_AF_DIF_DSD_ENA; 3226 3227 cur_dsd = &crc_ctx_pkt->u.bundling.dif_dsd; 3228 if (qla24xx_walk_and_build_prot_sglist(ha, NULL, cur_dsd, 3229 prm->prot_seg_cnt, cmd)) 3230 goto crc_queuing_error; 3231 } 3232 return QLA_SUCCESS; 3233 3234 crc_queuing_error: 3235 /* Cleanup will be performed by the caller */ 3236 qpair->req->outstanding_cmds[h] = NULL; 3237 3238 return QLA_FUNCTION_FAILED; 3239 } 3240 3241 /* 3242 * Callback to setup response of xmit_type of QLA_TGT_XMIT_DATA and * 3243 * QLA_TGT_XMIT_STATUS for >= 24xx silicon 3244 */ 3245 int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type, 3246 uint8_t scsi_status) 3247 { 3248 struct scsi_qla_host *vha = cmd->vha; 3249 struct qla_qpair *qpair = cmd->qpair; 3250 struct ctio7_to_24xx *pkt; 3251 struct qla_tgt_prm prm; 3252 uint32_t full_req_cnt = 0; 3253 unsigned long flags = 0; 3254 int res; 3255 3256 if (!qpair->fw_started || (cmd->reset_count != qpair->chip_reset) || 3257 (cmd->sess && cmd->sess->deleted)) { 3258 cmd->state = QLA_TGT_STATE_PROCESSED; 3259 return 0; 3260 } 3261 3262 ql_dbg_qp(ql_dbg_tgt, qpair, 0xe018, 3263 "is_send_status=%d, cmd->bufflen=%d, cmd->sg_cnt=%d, cmd->dma_data_direction=%d se_cmd[%p] qp %d\n", 3264 (xmit_type & QLA_TGT_XMIT_STATUS) ? 3265 1 : 0, cmd->bufflen, cmd->sg_cnt, cmd->dma_data_direction, 3266 &cmd->se_cmd, qpair->id); 3267 3268 res = qlt_pre_xmit_response(cmd, &prm, xmit_type, scsi_status, 3269 &full_req_cnt); 3270 if (unlikely(res != 0)) { 3271 return res; 3272 } 3273 3274 spin_lock_irqsave(qpair->qp_lock_ptr, flags); 3275 3276 if (xmit_type == QLA_TGT_XMIT_STATUS) 3277 qpair->tgt_counters.core_qla_snd_status++; 3278 else 3279 qpair->tgt_counters.core_qla_que_buf++; 3280 3281 if (!qpair->fw_started || cmd->reset_count != qpair->chip_reset) { 3282 /* 3283 * Either the port is not online or this request was from 3284 * previous life, just abort the processing. 3285 */ 3286 cmd->state = QLA_TGT_STATE_PROCESSED; 3287 ql_dbg_qp(ql_dbg_async, qpair, 0xe101, 3288 "RESET-RSP online/active/old-count/new-count = %d/%d/%d/%d.\n", 3289 vha->flags.online, qla2x00_reset_active(vha), 3290 cmd->reset_count, qpair->chip_reset); 3291 res = 0; 3292 goto out_unmap_unlock; 3293 } 3294 3295 /* Does F/W have an IOCBs for this request */ 3296 res = qlt_check_reserve_free_req(qpair, full_req_cnt); 3297 if (unlikely(res)) 3298 goto out_unmap_unlock; 3299 3300 if (cmd->se_cmd.prot_op && (xmit_type & QLA_TGT_XMIT_DATA)) 3301 res = qlt_build_ctio_crc2_pkt(qpair, &prm); 3302 else 3303 res = qlt_24xx_build_ctio_pkt(qpair, &prm); 3304 if (unlikely(res != 0)) { 3305 qpair->req->cnt += full_req_cnt; 3306 goto out_unmap_unlock; 3307 } 3308 3309 pkt = (struct ctio7_to_24xx *)prm.pkt; 3310 3311 if (qlt_has_data(cmd) && (xmit_type & QLA_TGT_XMIT_DATA)) { 3312 pkt->u.status0.flags |= 3313 cpu_to_le16(CTIO7_FLAGS_DATA_IN | 3314 CTIO7_FLAGS_STATUS_MODE_0); 3315 3316 if (cmd->se_cmd.prot_op == TARGET_PROT_NORMAL) 3317 qlt_load_data_segments(&prm); 3318 3319 if (prm.add_status_pkt == 0) { 3320 if (xmit_type & QLA_TGT_XMIT_STATUS) { 3321 pkt->u.status0.scsi_status = 3322 cpu_to_le16(prm.rq_result); 3323 if (!cmd->edif) 3324 pkt->u.status0.residual = 3325 cpu_to_le32(prm.residual); 3326 3327 pkt->u.status0.flags |= cpu_to_le16( 3328 CTIO7_FLAGS_SEND_STATUS); 3329 if (qlt_need_explicit_conf(cmd, 0)) { 3330 pkt->u.status0.flags |= 3331 cpu_to_le16( 3332 CTIO7_FLAGS_EXPLICIT_CONFORM | 3333 CTIO7_FLAGS_CONFORM_REQ); 3334 } 3335 } 3336 3337 } else { 3338 /* 3339 * We have already made sure that there is sufficient 3340 * amount of request entries to not drop HW lock in 3341 * req_pkt(). 3342 */ 3343 struct ctio7_to_24xx *ctio = 3344 (struct ctio7_to_24xx *)qlt_get_req_pkt( 3345 qpair->req); 3346 3347 ql_dbg_qp(ql_dbg_tgt, qpair, 0x305e, 3348 "Building additional status packet 0x%p.\n", 3349 ctio); 3350 3351 /* 3352 * T10Dif: ctio_crc2_to_fw overlay ontop of 3353 * ctio7_to_24xx 3354 */ 3355 memcpy(ctio, pkt, sizeof(*ctio)); 3356 /* reset back to CTIO7 */ 3357 ctio->entry_count = 1; 3358 ctio->entry_type = CTIO_TYPE7; 3359 ctio->dseg_count = 0; 3360 ctio->u.status1.flags &= ~cpu_to_le16( 3361 CTIO7_FLAGS_DATA_IN); 3362 3363 /* Real finish is ctio_m1's finish */ 3364 pkt->handle |= CTIO_INTERMEDIATE_HANDLE_MARK; 3365 pkt->u.status0.flags |= cpu_to_le16( 3366 CTIO7_FLAGS_DONT_RET_CTIO); 3367 3368 /* qlt_24xx_init_ctio_to_isp will correct 3369 * all neccessary fields that's part of CTIO7. 3370 * There should be no residual of CTIO-CRC2 data. 3371 */ 3372 qlt_24xx_init_ctio_to_isp((struct ctio7_to_24xx *)ctio, 3373 &prm); 3374 } 3375 } else 3376 qlt_24xx_init_ctio_to_isp(pkt, &prm); 3377 3378 3379 cmd->state = QLA_TGT_STATE_PROCESSED; /* Mid-level is done processing */ 3380 cmd->cmd_sent_to_fw = 1; 3381 cmd->ctio_flags = le16_to_cpu(pkt->u.status0.flags); 3382 3383 /* Memory Barrier */ 3384 wmb(); 3385 if (qpair->reqq_start_iocbs) 3386 qpair->reqq_start_iocbs(qpair); 3387 else 3388 qla2x00_start_iocbs(vha, qpair->req); 3389 spin_unlock_irqrestore(qpair->qp_lock_ptr, flags); 3390 3391 return 0; 3392 3393 out_unmap_unlock: 3394 qlt_unmap_sg(vha, cmd); 3395 spin_unlock_irqrestore(qpair->qp_lock_ptr, flags); 3396 3397 return res; 3398 } 3399 EXPORT_SYMBOL(qlt_xmit_response); 3400 3401 int qlt_rdy_to_xfer(struct qla_tgt_cmd *cmd) 3402 { 3403 struct ctio7_to_24xx *pkt; 3404 struct scsi_qla_host *vha = cmd->vha; 3405 struct qla_tgt *tgt = cmd->tgt; 3406 struct qla_tgt_prm prm; 3407 unsigned long flags = 0; 3408 int res = 0; 3409 struct qla_qpair *qpair = cmd->qpair; 3410 3411 memset(&prm, 0, sizeof(prm)); 3412 prm.cmd = cmd; 3413 prm.tgt = tgt; 3414 prm.sg = NULL; 3415 prm.req_cnt = 1; 3416 3417 if (!qpair->fw_started || (cmd->reset_count != qpair->chip_reset) || 3418 (cmd->sess && cmd->sess->deleted)) { 3419 /* 3420 * Either the port is not online or this request was from 3421 * previous life, just abort the processing. 3422 */ 3423 cmd->aborted = 1; 3424 cmd->write_data_transferred = 0; 3425 cmd->state = QLA_TGT_STATE_DATA_IN; 3426 vha->hw->tgt.tgt_ops->handle_data(cmd); 3427 ql_dbg_qp(ql_dbg_async, qpair, 0xe102, 3428 "RESET-XFR online/active/old-count/new-count = %d/%d/%d/%d.\n", 3429 vha->flags.online, qla2x00_reset_active(vha), 3430 cmd->reset_count, qpair->chip_reset); 3431 return 0; 3432 } 3433 3434 /* Calculate number of entries and segments required */ 3435 if (qlt_pci_map_calc_cnt(&prm) != 0) 3436 return -EAGAIN; 3437 3438 spin_lock_irqsave(qpair->qp_lock_ptr, flags); 3439 /* Does F/W have an IOCBs for this request */ 3440 res = qlt_check_reserve_free_req(qpair, prm.req_cnt); 3441 if (res != 0) 3442 goto out_unlock_free_unmap; 3443 if (cmd->se_cmd.prot_op) 3444 res = qlt_build_ctio_crc2_pkt(qpair, &prm); 3445 else 3446 res = qlt_24xx_build_ctio_pkt(qpair, &prm); 3447 3448 if (unlikely(res != 0)) { 3449 qpair->req->cnt += prm.req_cnt; 3450 goto out_unlock_free_unmap; 3451 } 3452 3453 pkt = (struct ctio7_to_24xx *)prm.pkt; 3454 pkt->u.status0.flags |= cpu_to_le16(CTIO7_FLAGS_DATA_OUT | 3455 CTIO7_FLAGS_STATUS_MODE_0); 3456 3457 if (cmd->se_cmd.prot_op == TARGET_PROT_NORMAL) 3458 qlt_load_data_segments(&prm); 3459 3460 cmd->state = QLA_TGT_STATE_NEED_DATA; 3461 cmd->cmd_sent_to_fw = 1; 3462 cmd->ctio_flags = le16_to_cpu(pkt->u.status0.flags); 3463 3464 /* Memory Barrier */ 3465 wmb(); 3466 if (qpair->reqq_start_iocbs) 3467 qpair->reqq_start_iocbs(qpair); 3468 else 3469 qla2x00_start_iocbs(vha, qpair->req); 3470 spin_unlock_irqrestore(qpair->qp_lock_ptr, flags); 3471 3472 return res; 3473 3474 out_unlock_free_unmap: 3475 qlt_unmap_sg(vha, cmd); 3476 spin_unlock_irqrestore(qpair->qp_lock_ptr, flags); 3477 3478 return res; 3479 } 3480 EXPORT_SYMBOL(qlt_rdy_to_xfer); 3481 3482 3483 /* 3484 * it is assumed either hardware_lock or qpair lock is held. 3485 */ 3486 static void 3487 qlt_handle_dif_error(struct qla_qpair *qpair, struct qla_tgt_cmd *cmd, 3488 struct ctio_crc_from_fw *sts) 3489 { 3490 uint8_t *ap = &sts->actual_dif[0]; 3491 uint8_t *ep = &sts->expected_dif[0]; 3492 uint64_t lba = cmd->se_cmd.t_task_lba; 3493 uint8_t scsi_status, sense_key, asc, ascq; 3494 unsigned long flags; 3495 struct scsi_qla_host *vha = cmd->vha; 3496 3497 cmd->trc_flags |= TRC_DIF_ERR; 3498 3499 cmd->a_guard = get_unaligned_be16(ap + 0); 3500 cmd->a_app_tag = get_unaligned_be16(ap + 2); 3501 cmd->a_ref_tag = get_unaligned_be32(ap + 4); 3502 3503 cmd->e_guard = get_unaligned_be16(ep + 0); 3504 cmd->e_app_tag = get_unaligned_be16(ep + 2); 3505 cmd->e_ref_tag = get_unaligned_be32(ep + 4); 3506 3507 ql_dbg(ql_dbg_tgt_dif, vha, 0xf075, 3508 "%s: aborted %d state %d\n", __func__, cmd->aborted, cmd->state); 3509 3510 scsi_status = sense_key = asc = ascq = 0; 3511 3512 /* check appl tag */ 3513 if (cmd->e_app_tag != cmd->a_app_tag) { 3514 ql_dbg(ql_dbg_tgt_dif, vha, 0xe00d, 3515 "App Tag ERR: cdb[%x] lba[%llx %llx] blks[%x] [Actual|Expected] Ref[%x|%x], App[%x|%x], Guard [%x|%x] cmd=%p ox_id[%04x]", 3516 cmd->cdb[0], lba, (lba+cmd->num_blks), cmd->num_blks, 3517 cmd->a_ref_tag, cmd->e_ref_tag, cmd->a_app_tag, 3518 cmd->e_app_tag, cmd->a_guard, cmd->e_guard, cmd, 3519 cmd->atio.u.isp24.fcp_hdr.ox_id); 3520 3521 cmd->dif_err_code = DIF_ERR_APP; 3522 scsi_status = SAM_STAT_CHECK_CONDITION; 3523 sense_key = ABORTED_COMMAND; 3524 asc = 0x10; 3525 ascq = 0x2; 3526 } 3527 3528 /* check ref tag */ 3529 if (cmd->e_ref_tag != cmd->a_ref_tag) { 3530 ql_dbg(ql_dbg_tgt_dif, vha, 0xe00e, 3531 "Ref Tag ERR: cdb[%x] lba[%llx %llx] blks[%x] [Actual|Expected] Ref[%x|%x], App[%x|%x], Guard[%x|%x] cmd=%p ox_id[%04x] ", 3532 cmd->cdb[0], lba, (lba+cmd->num_blks), cmd->num_blks, 3533 cmd->a_ref_tag, cmd->e_ref_tag, cmd->a_app_tag, 3534 cmd->e_app_tag, cmd->a_guard, cmd->e_guard, cmd, 3535 cmd->atio.u.isp24.fcp_hdr.ox_id); 3536 3537 cmd->dif_err_code = DIF_ERR_REF; 3538 scsi_status = SAM_STAT_CHECK_CONDITION; 3539 sense_key = ABORTED_COMMAND; 3540 asc = 0x10; 3541 ascq = 0x3; 3542 goto out; 3543 } 3544 3545 /* check guard */ 3546 if (cmd->e_guard != cmd->a_guard) { 3547 ql_dbg(ql_dbg_tgt_dif, vha, 0xe012, 3548 "Guard ERR: cdb[%x] lba[%llx %llx] blks[%x] [Actual|Expected] Ref[%x|%x], App[%x|%x], Guard [%x|%x] cmd=%p ox_id[%04x]", 3549 cmd->cdb[0], lba, (lba+cmd->num_blks), cmd->num_blks, 3550 cmd->a_ref_tag, cmd->e_ref_tag, cmd->a_app_tag, 3551 cmd->e_app_tag, cmd->a_guard, cmd->e_guard, cmd, 3552 cmd->atio.u.isp24.fcp_hdr.ox_id); 3553 3554 cmd->dif_err_code = DIF_ERR_GRD; 3555 scsi_status = SAM_STAT_CHECK_CONDITION; 3556 sense_key = ABORTED_COMMAND; 3557 asc = 0x10; 3558 ascq = 0x1; 3559 } 3560 out: 3561 switch (cmd->state) { 3562 case QLA_TGT_STATE_NEED_DATA: 3563 /* handle_data will load DIF error code */ 3564 cmd->state = QLA_TGT_STATE_DATA_IN; 3565 vha->hw->tgt.tgt_ops->handle_data(cmd); 3566 break; 3567 default: 3568 spin_lock_irqsave(&cmd->cmd_lock, flags); 3569 if (cmd->aborted) { 3570 spin_unlock_irqrestore(&cmd->cmd_lock, flags); 3571 vha->hw->tgt.tgt_ops->free_cmd(cmd); 3572 break; 3573 } 3574 spin_unlock_irqrestore(&cmd->cmd_lock, flags); 3575 3576 qlt_send_resp_ctio(qpair, cmd, scsi_status, sense_key, asc, 3577 ascq); 3578 /* assume scsi status gets out on the wire. 3579 * Will not wait for completion. 3580 */ 3581 vha->hw->tgt.tgt_ops->free_cmd(cmd); 3582 break; 3583 } 3584 } 3585 3586 /* If hardware_lock held on entry, might drop it, then reaquire */ 3587 /* This function sends the appropriate CTIO to ISP 2xxx or 24xx */ 3588 static int __qlt_send_term_imm_notif(struct scsi_qla_host *vha, 3589 struct imm_ntfy_from_isp *ntfy) 3590 { 3591 struct nack_to_isp *nack; 3592 struct qla_hw_data *ha = vha->hw; 3593 request_t *pkt; 3594 int ret = 0; 3595 3596 ql_dbg(ql_dbg_tgt_tmr, vha, 0xe01c, 3597 "Sending TERM ELS CTIO (ha=%p)\n", ha); 3598 3599 pkt = (request_t *)qla2x00_alloc_iocbs(vha, NULL); 3600 if (pkt == NULL) { 3601 ql_dbg(ql_dbg_tgt, vha, 0xe080, 3602 "qla_target(%d): %s failed: unable to allocate " 3603 "request packet\n", vha->vp_idx, __func__); 3604 return -ENOMEM; 3605 } 3606 3607 pkt->entry_type = NOTIFY_ACK_TYPE; 3608 pkt->entry_count = 1; 3609 pkt->handle = QLA_TGT_SKIP_HANDLE; 3610 3611 nack = (struct nack_to_isp *)pkt; 3612 nack->ox_id = ntfy->ox_id; 3613 3614 nack->u.isp24.nport_handle = ntfy->u.isp24.nport_handle; 3615 if (le16_to_cpu(ntfy->u.isp24.status) == IMM_NTFY_ELS) { 3616 nack->u.isp24.flags = ntfy->u.isp24.flags & 3617 cpu_to_le16(NOTIFY24XX_FLAGS_PUREX_IOCB); 3618 } 3619 3620 /* terminate */ 3621 nack->u.isp24.flags |= 3622 __constant_cpu_to_le16(NOTIFY_ACK_FLAGS_TERMINATE); 3623 3624 nack->u.isp24.srr_rx_id = ntfy->u.isp24.srr_rx_id; 3625 nack->u.isp24.status = ntfy->u.isp24.status; 3626 nack->u.isp24.status_subcode = ntfy->u.isp24.status_subcode; 3627 nack->u.isp24.fw_handle = ntfy->u.isp24.fw_handle; 3628 nack->u.isp24.exchange_address = ntfy->u.isp24.exchange_address; 3629 nack->u.isp24.srr_rel_offs = ntfy->u.isp24.srr_rel_offs; 3630 nack->u.isp24.srr_ui = ntfy->u.isp24.srr_ui; 3631 nack->u.isp24.vp_index = ntfy->u.isp24.vp_index; 3632 3633 qla2x00_start_iocbs(vha, vha->req); 3634 return ret; 3635 } 3636 3637 static void qlt_send_term_imm_notif(struct scsi_qla_host *vha, 3638 struct imm_ntfy_from_isp *imm, int ha_locked) 3639 { 3640 int rc; 3641 3642 WARN_ON_ONCE(!ha_locked); 3643 rc = __qlt_send_term_imm_notif(vha, imm); 3644 pr_debug("rc = %d\n", rc); 3645 } 3646 3647 /* 3648 * If hardware_lock held on entry, might drop it, then reaquire 3649 * This function sends the appropriate CTIO to ISP 2xxx or 24xx 3650 */ 3651 static int __qlt_send_term_exchange(struct qla_qpair *qpair, 3652 struct qla_tgt_cmd *cmd, 3653 struct atio_from_isp *atio) 3654 { 3655 struct scsi_qla_host *vha = qpair->vha; 3656 struct ctio7_to_24xx *ctio24; 3657 struct qla_hw_data *ha = vha->hw; 3658 request_t *pkt; 3659 int ret = 0; 3660 uint16_t temp; 3661 3662 ql_dbg(ql_dbg_tgt, vha, 0xe009, "Sending TERM EXCH CTIO (ha=%p)\n", ha); 3663 3664 if (cmd) 3665 vha = cmd->vha; 3666 3667 pkt = (request_t *)qla2x00_alloc_iocbs_ready(qpair, NULL); 3668 if (pkt == NULL) { 3669 ql_dbg(ql_dbg_tgt, vha, 0xe050, 3670 "qla_target(%d): %s failed: unable to allocate " 3671 "request packet\n", vha->vp_idx, __func__); 3672 return -ENOMEM; 3673 } 3674 3675 if (cmd != NULL) { 3676 if (cmd->state < QLA_TGT_STATE_PROCESSED) { 3677 ql_dbg(ql_dbg_tgt, vha, 0xe051, 3678 "qla_target(%d): Terminating cmd %p with " 3679 "incorrect state %d\n", vha->vp_idx, cmd, 3680 cmd->state); 3681 } else 3682 ret = 1; 3683 } 3684 3685 qpair->tgt_counters.num_term_xchg_sent++; 3686 pkt->entry_count = 1; 3687 pkt->handle = QLA_TGT_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK; 3688 3689 ctio24 = (struct ctio7_to_24xx *)pkt; 3690 ctio24->entry_type = CTIO_TYPE7; 3691 ctio24->nport_handle = cpu_to_le16(CTIO7_NHANDLE_UNRECOGNIZED); 3692 ctio24->timeout = cpu_to_le16(QLA_TGT_TIMEOUT); 3693 ctio24->vp_index = vha->vp_idx; 3694 ctio24->initiator_id = be_id_to_le(atio->u.isp24.fcp_hdr.s_id); 3695 ctio24->exchange_addr = atio->u.isp24.exchange_addr; 3696 temp = (atio->u.isp24.attr << 9) | CTIO7_FLAGS_STATUS_MODE_1 | 3697 CTIO7_FLAGS_TERMINATE; 3698 ctio24->u.status1.flags = cpu_to_le16(temp); 3699 temp = be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id); 3700 ctio24->u.status1.ox_id = cpu_to_le16(temp); 3701 3702 /* Memory Barrier */ 3703 wmb(); 3704 if (qpair->reqq_start_iocbs) 3705 qpair->reqq_start_iocbs(qpair); 3706 else 3707 qla2x00_start_iocbs(vha, qpair->req); 3708 return ret; 3709 } 3710 3711 static void qlt_send_term_exchange(struct qla_qpair *qpair, 3712 struct qla_tgt_cmd *cmd, struct atio_from_isp *atio, int ha_locked, 3713 int ul_abort) 3714 { 3715 struct scsi_qla_host *vha; 3716 unsigned long flags = 0; 3717 int rc; 3718 3719 /* why use different vha? NPIV */ 3720 if (cmd) 3721 vha = cmd->vha; 3722 else 3723 vha = qpair->vha; 3724 3725 if (ha_locked) { 3726 rc = __qlt_send_term_exchange(qpair, cmd, atio); 3727 if (rc == -ENOMEM) 3728 qlt_alloc_qfull_cmd(vha, atio, 0, 0); 3729 goto done; 3730 } 3731 spin_lock_irqsave(qpair->qp_lock_ptr, flags); 3732 rc = __qlt_send_term_exchange(qpair, cmd, atio); 3733 if (rc == -ENOMEM) 3734 qlt_alloc_qfull_cmd(vha, atio, 0, 0); 3735 3736 done: 3737 if (cmd && !ul_abort && !cmd->aborted) { 3738 if (cmd->sg_mapped) 3739 qlt_unmap_sg(vha, cmd); 3740 vha->hw->tgt.tgt_ops->free_cmd(cmd); 3741 } 3742 3743 if (!ha_locked) 3744 spin_unlock_irqrestore(qpair->qp_lock_ptr, flags); 3745 3746 return; 3747 } 3748 3749 static void qlt_init_term_exchange(struct scsi_qla_host *vha) 3750 { 3751 struct list_head free_list; 3752 struct qla_tgt_cmd *cmd, *tcmd; 3753 3754 vha->hw->tgt.leak_exchg_thresh_hold = 3755 (vha->hw->cur_fw_xcb_count/100) * LEAK_EXCHG_THRESH_HOLD_PERCENT; 3756 3757 cmd = tcmd = NULL; 3758 if (!list_empty(&vha->hw->tgt.q_full_list)) { 3759 INIT_LIST_HEAD(&free_list); 3760 list_splice_init(&vha->hw->tgt.q_full_list, &free_list); 3761 3762 list_for_each_entry_safe(cmd, tcmd, &free_list, cmd_list) { 3763 list_del(&cmd->cmd_list); 3764 /* This cmd was never sent to TCM. There is no need 3765 * to schedule free or call free_cmd 3766 */ 3767 qlt_free_cmd(cmd); 3768 vha->hw->tgt.num_qfull_cmds_alloc--; 3769 } 3770 } 3771 vha->hw->tgt.num_qfull_cmds_dropped = 0; 3772 } 3773 3774 static void qlt_chk_exch_leak_thresh_hold(struct scsi_qla_host *vha) 3775 { 3776 uint32_t total_leaked; 3777 3778 total_leaked = vha->hw->tgt.num_qfull_cmds_dropped; 3779 3780 if (vha->hw->tgt.leak_exchg_thresh_hold && 3781 (total_leaked > vha->hw->tgt.leak_exchg_thresh_hold)) { 3782 3783 ql_dbg(ql_dbg_tgt, vha, 0xe079, 3784 "Chip reset due to exchange starvation: %d/%d.\n", 3785 total_leaked, vha->hw->cur_fw_xcb_count); 3786 3787 if (IS_P3P_TYPE(vha->hw)) 3788 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags); 3789 else 3790 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 3791 qla2xxx_wake_dpc(vha); 3792 } 3793 3794 } 3795 3796 int qlt_abort_cmd(struct qla_tgt_cmd *cmd) 3797 { 3798 struct qla_tgt *tgt = cmd->tgt; 3799 struct scsi_qla_host *vha = tgt->vha; 3800 struct se_cmd *se_cmd = &cmd->se_cmd; 3801 unsigned long flags; 3802 3803 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf014, 3804 "qla_target(%d): terminating exchange for aborted cmd=%p " 3805 "(se_cmd=%p, tag=%llu)", vha->vp_idx, cmd, &cmd->se_cmd, 3806 se_cmd->tag); 3807 3808 spin_lock_irqsave(&cmd->cmd_lock, flags); 3809 if (cmd->aborted) { 3810 if (cmd->sg_mapped) 3811 qlt_unmap_sg(vha, cmd); 3812 3813 spin_unlock_irqrestore(&cmd->cmd_lock, flags); 3814 /* 3815 * It's normal to see 2 calls in this path: 3816 * 1) XFER Rdy completion + CMD_T_ABORT 3817 * 2) TCM TMR - drain_state_list 3818 */ 3819 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf016, 3820 "multiple abort. %p transport_state %x, t_state %x, " 3821 "se_cmd_flags %x\n", cmd, cmd->se_cmd.transport_state, 3822 cmd->se_cmd.t_state, cmd->se_cmd.se_cmd_flags); 3823 return -EIO; 3824 } 3825 cmd->aborted = 1; 3826 cmd->trc_flags |= TRC_ABORT; 3827 spin_unlock_irqrestore(&cmd->cmd_lock, flags); 3828 3829 qlt_send_term_exchange(cmd->qpair, cmd, &cmd->atio, 0, 1); 3830 return 0; 3831 } 3832 EXPORT_SYMBOL(qlt_abort_cmd); 3833 3834 void qlt_free_cmd(struct qla_tgt_cmd *cmd) 3835 { 3836 struct fc_port *sess = cmd->sess; 3837 3838 ql_dbg(ql_dbg_tgt, cmd->vha, 0xe074, 3839 "%s: se_cmd[%p] ox_id %04x\n", 3840 __func__, &cmd->se_cmd, 3841 be16_to_cpu(cmd->atio.u.isp24.fcp_hdr.ox_id)); 3842 3843 BUG_ON(cmd->cmd_in_wq); 3844 3845 if (!cmd->q_full) 3846 qlt_decr_num_pend_cmds(cmd->vha); 3847 3848 BUG_ON(cmd->sg_mapped); 3849 cmd->jiffies_at_free = get_jiffies_64(); 3850 3851 if (!sess || !sess->se_sess) { 3852 WARN_ON(1); 3853 return; 3854 } 3855 cmd->jiffies_at_free = get_jiffies_64(); 3856 cmd->vha->hw->tgt.tgt_ops->rel_cmd(cmd); 3857 } 3858 EXPORT_SYMBOL(qlt_free_cmd); 3859 3860 /* 3861 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire 3862 */ 3863 static int qlt_term_ctio_exchange(struct qla_qpair *qpair, void *ctio, 3864 struct qla_tgt_cmd *cmd, uint32_t status) 3865 { 3866 int term = 0; 3867 struct scsi_qla_host *vha = qpair->vha; 3868 3869 if (cmd->se_cmd.prot_op) 3870 ql_dbg(ql_dbg_tgt_dif, vha, 0xe013, 3871 "Term DIF cmd: lba[0x%llx|%lld] len[0x%x] " 3872 "se_cmd=%p tag[%x] op %#x/%s", 3873 cmd->lba, cmd->lba, 3874 cmd->num_blks, &cmd->se_cmd, 3875 cmd->atio.u.isp24.exchange_addr, 3876 cmd->se_cmd.prot_op, 3877 prot_op_str(cmd->se_cmd.prot_op)); 3878 3879 if (ctio != NULL) { 3880 struct ctio7_from_24xx *c = (struct ctio7_from_24xx *)ctio; 3881 3882 term = !(c->flags & 3883 cpu_to_le16(OF_TERM_EXCH)); 3884 } else 3885 term = 1; 3886 3887 if (term) 3888 qlt_send_term_exchange(qpair, cmd, &cmd->atio, 1, 0); 3889 3890 return term; 3891 } 3892 3893 3894 /* ha->hardware_lock supposed to be held on entry */ 3895 static void *qlt_ctio_to_cmd(struct scsi_qla_host *vha, 3896 struct rsp_que *rsp, uint32_t handle, void *ctio) 3897 { 3898 void *cmd = NULL; 3899 struct req_que *req; 3900 int qid = GET_QID(handle); 3901 uint32_t h = handle & ~QLA_TGT_HANDLE_MASK; 3902 3903 if (unlikely(h == QLA_TGT_SKIP_HANDLE)) 3904 return NULL; 3905 3906 if (qid == rsp->req->id) { 3907 req = rsp->req; 3908 } else if (vha->hw->req_q_map[qid]) { 3909 ql_dbg(ql_dbg_tgt_mgt, vha, 0x1000a, 3910 "qla_target(%d): CTIO completion with different QID %d handle %x\n", 3911 vha->vp_idx, rsp->id, handle); 3912 req = vha->hw->req_q_map[qid]; 3913 } else { 3914 return NULL; 3915 } 3916 3917 h &= QLA_CMD_HANDLE_MASK; 3918 3919 if (h != QLA_TGT_NULL_HANDLE) { 3920 if (unlikely(h >= req->num_outstanding_cmds)) { 3921 ql_dbg(ql_dbg_tgt, vha, 0xe052, 3922 "qla_target(%d): Wrong handle %x received\n", 3923 vha->vp_idx, handle); 3924 return NULL; 3925 } 3926 3927 cmd = req->outstanding_cmds[h]; 3928 if (unlikely(cmd == NULL)) { 3929 ql_dbg(ql_dbg_async, vha, 0xe053, 3930 "qla_target(%d): Suspicious: unable to find the command with handle %x req->id %d rsp->id %d\n", 3931 vha->vp_idx, handle, req->id, rsp->id); 3932 return NULL; 3933 } 3934 req->outstanding_cmds[h] = NULL; 3935 } else if (ctio != NULL) { 3936 /* We can't get loop ID from CTIO7 */ 3937 ql_dbg(ql_dbg_tgt, vha, 0xe054, 3938 "qla_target(%d): Wrong CTIO received: QLA24xx doesn't " 3939 "support NULL handles\n", vha->vp_idx); 3940 return NULL; 3941 } 3942 3943 return cmd; 3944 } 3945 3946 /* 3947 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire 3948 */ 3949 static void qlt_do_ctio_completion(struct scsi_qla_host *vha, 3950 struct rsp_que *rsp, uint32_t handle, uint32_t status, void *ctio) 3951 { 3952 struct qla_hw_data *ha = vha->hw; 3953 struct se_cmd *se_cmd; 3954 struct qla_tgt_cmd *cmd; 3955 struct qla_qpair *qpair = rsp->qpair; 3956 3957 if (handle & CTIO_INTERMEDIATE_HANDLE_MARK) { 3958 /* That could happen only in case of an error/reset/abort */ 3959 if (status != CTIO_SUCCESS) { 3960 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01d, 3961 "Intermediate CTIO received" 3962 " (status %x)\n", status); 3963 } 3964 return; 3965 } 3966 3967 cmd = qlt_ctio_to_cmd(vha, rsp, handle, ctio); 3968 if (cmd == NULL) 3969 return; 3970 3971 if ((le16_to_cpu(((struct ctio7_from_24xx *)ctio)->flags) & CTIO7_FLAGS_DATA_OUT) && 3972 cmd->sess) { 3973 qlt_chk_edif_rx_sa_delete_pending(vha, cmd->sess, 3974 (struct ctio7_from_24xx *)ctio); 3975 } 3976 3977 se_cmd = &cmd->se_cmd; 3978 cmd->cmd_sent_to_fw = 0; 3979 3980 qlt_unmap_sg(vha, cmd); 3981 3982 if (unlikely(status != CTIO_SUCCESS)) { 3983 switch (status & 0xFFFF) { 3984 case CTIO_INVALID_RX_ID: 3985 if (printk_ratelimit()) 3986 dev_info(&vha->hw->pdev->dev, 3987 "qla_target(%d): CTIO with INVALID_RX_ID ATIO attr %x CTIO Flags %x|%x\n", 3988 vha->vp_idx, cmd->atio.u.isp24.attr, 3989 ((cmd->ctio_flags >> 9) & 0xf), 3990 cmd->ctio_flags); 3991 3992 break; 3993 case CTIO_LIP_RESET: 3994 case CTIO_TARGET_RESET: 3995 case CTIO_ABORTED: 3996 /* driver request abort via Terminate exchange */ 3997 case CTIO_TIMEOUT: 3998 /* They are OK */ 3999 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf058, 4000 "qla_target(%d): CTIO with " 4001 "status %#x received, state %x, se_cmd %p, " 4002 "(LIP_RESET=e, ABORTED=2, TARGET_RESET=17, " 4003 "TIMEOUT=b, INVALID_RX_ID=8)\n", vha->vp_idx, 4004 status, cmd->state, se_cmd); 4005 break; 4006 4007 case CTIO_PORT_LOGGED_OUT: 4008 case CTIO_PORT_UNAVAILABLE: 4009 { 4010 int logged_out = 4011 (status & 0xFFFF) == CTIO_PORT_LOGGED_OUT; 4012 4013 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf059, 4014 "qla_target(%d): CTIO with %s status %x " 4015 "received (state %x, se_cmd %p)\n", vha->vp_idx, 4016 logged_out ? "PORT LOGGED OUT" : "PORT UNAVAILABLE", 4017 status, cmd->state, se_cmd); 4018 4019 if (logged_out && cmd->sess) { 4020 /* 4021 * Session is already logged out, but we need 4022 * to notify initiator, who's not aware of this 4023 */ 4024 cmd->sess->send_els_logo = 1; 4025 ql_dbg(ql_dbg_disc, vha, 0x20f8, 4026 "%s %d %8phC post del sess\n", 4027 __func__, __LINE__, cmd->sess->port_name); 4028 4029 qlt_schedule_sess_for_deletion(cmd->sess); 4030 } 4031 break; 4032 } 4033 case CTIO_DIF_ERROR: { 4034 struct ctio_crc_from_fw *crc = 4035 (struct ctio_crc_from_fw *)ctio; 4036 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf073, 4037 "qla_target(%d): CTIO with DIF_ERROR status %x " 4038 "received (state %x, ulp_cmd %p) actual_dif[0x%llx] " 4039 "expect_dif[0x%llx]\n", 4040 vha->vp_idx, status, cmd->state, se_cmd, 4041 *((u64 *)&crc->actual_dif[0]), 4042 *((u64 *)&crc->expected_dif[0])); 4043 4044 qlt_handle_dif_error(qpair, cmd, ctio); 4045 return; 4046 } 4047 4048 case CTIO_FAST_AUTH_ERR: 4049 case CTIO_FAST_INCOMP_PAD_LEN: 4050 case CTIO_FAST_INVALID_REQ: 4051 case CTIO_FAST_SPI_ERR: 4052 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05b, 4053 "qla_target(%d): CTIO with EDIF error status 0x%x received (state %x, se_cmd %p\n", 4054 vha->vp_idx, status, cmd->state, se_cmd); 4055 break; 4056 4057 default: 4058 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05b, 4059 "qla_target(%d): CTIO with error status 0x%x received (state %x, se_cmd %p\n", 4060 vha->vp_idx, status, cmd->state, se_cmd); 4061 break; 4062 } 4063 4064 4065 /* "cmd->aborted" means 4066 * cmd is already aborted/terminated, we don't 4067 * need to terminate again. The exchange is already 4068 * cleaned up/freed at FW level. Just cleanup at driver 4069 * level. 4070 */ 4071 if ((cmd->state != QLA_TGT_STATE_NEED_DATA) && 4072 (!cmd->aborted)) { 4073 cmd->trc_flags |= TRC_CTIO_ERR; 4074 if (qlt_term_ctio_exchange(qpair, ctio, cmd, status)) 4075 return; 4076 } 4077 } 4078 4079 if (cmd->state == QLA_TGT_STATE_PROCESSED) { 4080 cmd->trc_flags |= TRC_CTIO_DONE; 4081 } else if (cmd->state == QLA_TGT_STATE_NEED_DATA) { 4082 cmd->state = QLA_TGT_STATE_DATA_IN; 4083 4084 if (status == CTIO_SUCCESS) 4085 cmd->write_data_transferred = 1; 4086 4087 ha->tgt.tgt_ops->handle_data(cmd); 4088 return; 4089 } else if (cmd->aborted) { 4090 cmd->trc_flags |= TRC_CTIO_ABORTED; 4091 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01e, 4092 "Aborted command %p (tag %lld) finished\n", cmd, se_cmd->tag); 4093 } else { 4094 cmd->trc_flags |= TRC_CTIO_STRANGE; 4095 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05c, 4096 "qla_target(%d): A command in state (%d) should " 4097 "not return a CTIO complete\n", vha->vp_idx, cmd->state); 4098 } 4099 4100 if (unlikely(status != CTIO_SUCCESS) && 4101 !cmd->aborted) { 4102 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01f, "Finishing failed CTIO\n"); 4103 dump_stack(); 4104 } 4105 4106 ha->tgt.tgt_ops->free_cmd(cmd); 4107 } 4108 4109 static inline int qlt_get_fcp_task_attr(struct scsi_qla_host *vha, 4110 uint8_t task_codes) 4111 { 4112 int fcp_task_attr; 4113 4114 switch (task_codes) { 4115 case ATIO_SIMPLE_QUEUE: 4116 fcp_task_attr = TCM_SIMPLE_TAG; 4117 break; 4118 case ATIO_HEAD_OF_QUEUE: 4119 fcp_task_attr = TCM_HEAD_TAG; 4120 break; 4121 case ATIO_ORDERED_QUEUE: 4122 fcp_task_attr = TCM_ORDERED_TAG; 4123 break; 4124 case ATIO_ACA_QUEUE: 4125 fcp_task_attr = TCM_ACA_TAG; 4126 break; 4127 case ATIO_UNTAGGED: 4128 fcp_task_attr = TCM_SIMPLE_TAG; 4129 break; 4130 default: 4131 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05d, 4132 "qla_target: unknown task code %x, use ORDERED instead\n", 4133 task_codes); 4134 fcp_task_attr = TCM_ORDERED_TAG; 4135 break; 4136 } 4137 4138 return fcp_task_attr; 4139 } 4140 4141 /* 4142 * Process context for I/O path into tcm_qla2xxx code 4143 */ 4144 static void __qlt_do_work(struct qla_tgt_cmd *cmd) 4145 { 4146 scsi_qla_host_t *vha = cmd->vha; 4147 struct qla_hw_data *ha = vha->hw; 4148 struct fc_port *sess = cmd->sess; 4149 struct atio_from_isp *atio = &cmd->atio; 4150 unsigned char *cdb; 4151 unsigned long flags; 4152 uint32_t data_length; 4153 int ret, fcp_task_attr, data_dir, bidi = 0; 4154 struct qla_qpair *qpair = cmd->qpair; 4155 4156 cmd->cmd_in_wq = 0; 4157 cmd->trc_flags |= TRC_DO_WORK; 4158 4159 if (cmd->aborted) { 4160 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf082, 4161 "cmd with tag %u is aborted\n", 4162 cmd->atio.u.isp24.exchange_addr); 4163 goto out_term; 4164 } 4165 4166 spin_lock_init(&cmd->cmd_lock); 4167 cdb = &atio->u.isp24.fcp_cmnd.cdb[0]; 4168 cmd->se_cmd.tag = le32_to_cpu(atio->u.isp24.exchange_addr); 4169 4170 if (atio->u.isp24.fcp_cmnd.rddata && 4171 atio->u.isp24.fcp_cmnd.wrdata) { 4172 bidi = 1; 4173 data_dir = DMA_TO_DEVICE; 4174 } else if (atio->u.isp24.fcp_cmnd.rddata) 4175 data_dir = DMA_FROM_DEVICE; 4176 else if (atio->u.isp24.fcp_cmnd.wrdata) 4177 data_dir = DMA_TO_DEVICE; 4178 else 4179 data_dir = DMA_NONE; 4180 4181 fcp_task_attr = qlt_get_fcp_task_attr(vha, 4182 atio->u.isp24.fcp_cmnd.task_attr); 4183 data_length = get_datalen_for_atio(atio); 4184 4185 ret = ha->tgt.tgt_ops->handle_cmd(vha, cmd, cdb, data_length, 4186 fcp_task_attr, data_dir, bidi); 4187 if (ret != 0) 4188 goto out_term; 4189 /* 4190 * Drop extra session reference from qlt_handle_cmd_for_atio(). 4191 */ 4192 ha->tgt.tgt_ops->put_sess(sess); 4193 return; 4194 4195 out_term: 4196 ql_dbg(ql_dbg_io, vha, 0x3060, "Terminating work cmd %p", cmd); 4197 /* 4198 * cmd has not sent to target yet, so pass NULL as the second 4199 * argument to qlt_send_term_exchange() and free the memory here. 4200 */ 4201 cmd->trc_flags |= TRC_DO_WORK_ERR; 4202 spin_lock_irqsave(qpair->qp_lock_ptr, flags); 4203 qlt_send_term_exchange(qpair, NULL, &cmd->atio, 1, 0); 4204 4205 qlt_decr_num_pend_cmds(vha); 4206 cmd->vha->hw->tgt.tgt_ops->rel_cmd(cmd); 4207 spin_unlock_irqrestore(qpair->qp_lock_ptr, flags); 4208 4209 ha->tgt.tgt_ops->put_sess(sess); 4210 } 4211 4212 static void qlt_do_work(struct work_struct *work) 4213 { 4214 struct qla_tgt_cmd *cmd = container_of(work, struct qla_tgt_cmd, work); 4215 scsi_qla_host_t *vha = cmd->vha; 4216 unsigned long flags; 4217 4218 spin_lock_irqsave(&vha->cmd_list_lock, flags); 4219 list_del(&cmd->cmd_list); 4220 spin_unlock_irqrestore(&vha->cmd_list_lock, flags); 4221 4222 __qlt_do_work(cmd); 4223 } 4224 4225 void qlt_clr_qp_table(struct scsi_qla_host *vha) 4226 { 4227 unsigned long flags; 4228 struct qla_hw_data *ha = vha->hw; 4229 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; 4230 void *node; 4231 u64 key = 0; 4232 4233 ql_log(ql_log_info, vha, 0x706c, 4234 "User update Number of Active Qpairs %d\n", 4235 ha->tgt.num_act_qpairs); 4236 4237 spin_lock_irqsave(&ha->tgt.atio_lock, flags); 4238 4239 btree_for_each_safe64(&tgt->lun_qpair_map, key, node) 4240 btree_remove64(&tgt->lun_qpair_map, key); 4241 4242 ha->base_qpair->lun_cnt = 0; 4243 for (key = 0; key < ha->max_qpairs; key++) 4244 if (ha->queue_pair_map[key]) 4245 ha->queue_pair_map[key]->lun_cnt = 0; 4246 4247 spin_unlock_irqrestore(&ha->tgt.atio_lock, flags); 4248 } 4249 4250 static void qlt_assign_qpair(struct scsi_qla_host *vha, 4251 struct qla_tgt_cmd *cmd) 4252 { 4253 struct qla_qpair *qpair, *qp; 4254 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; 4255 struct qla_qpair_hint *h; 4256 4257 if (vha->flags.qpairs_available) { 4258 h = btree_lookup64(&tgt->lun_qpair_map, cmd->unpacked_lun); 4259 if (unlikely(!h)) { 4260 /* spread lun to qpair ratio evently */ 4261 int lcnt = 0, rc; 4262 struct scsi_qla_host *base_vha = 4263 pci_get_drvdata(vha->hw->pdev); 4264 4265 qpair = vha->hw->base_qpair; 4266 if (qpair->lun_cnt == 0) { 4267 qpair->lun_cnt++; 4268 h = qla_qpair_to_hint(tgt, qpair); 4269 BUG_ON(!h); 4270 rc = btree_insert64(&tgt->lun_qpair_map, 4271 cmd->unpacked_lun, h, GFP_ATOMIC); 4272 if (rc) { 4273 qpair->lun_cnt--; 4274 ql_log(ql_log_info, vha, 0xd037, 4275 "Unable to insert lun %llx into lun_qpair_map\n", 4276 cmd->unpacked_lun); 4277 } 4278 goto out; 4279 } else { 4280 lcnt = qpair->lun_cnt; 4281 } 4282 4283 h = NULL; 4284 list_for_each_entry(qp, &base_vha->qp_list, 4285 qp_list_elem) { 4286 if (qp->lun_cnt == 0) { 4287 qp->lun_cnt++; 4288 h = qla_qpair_to_hint(tgt, qp); 4289 BUG_ON(!h); 4290 rc = btree_insert64(&tgt->lun_qpair_map, 4291 cmd->unpacked_lun, h, GFP_ATOMIC); 4292 if (rc) { 4293 qp->lun_cnt--; 4294 ql_log(ql_log_info, vha, 0xd038, 4295 "Unable to insert lun %llx into lun_qpair_map\n", 4296 cmd->unpacked_lun); 4297 } 4298 qpair = qp; 4299 goto out; 4300 } else { 4301 if (qp->lun_cnt < lcnt) { 4302 lcnt = qp->lun_cnt; 4303 qpair = qp; 4304 continue; 4305 } 4306 } 4307 } 4308 BUG_ON(!qpair); 4309 qpair->lun_cnt++; 4310 h = qla_qpair_to_hint(tgt, qpair); 4311 BUG_ON(!h); 4312 rc = btree_insert64(&tgt->lun_qpair_map, 4313 cmd->unpacked_lun, h, GFP_ATOMIC); 4314 if (rc) { 4315 qpair->lun_cnt--; 4316 ql_log(ql_log_info, vha, 0xd039, 4317 "Unable to insert lun %llx into lun_qpair_map\n", 4318 cmd->unpacked_lun); 4319 } 4320 } 4321 } else { 4322 h = &tgt->qphints[0]; 4323 } 4324 out: 4325 cmd->qpair = h->qpair; 4326 cmd->se_cmd.cpuid = h->cpuid; 4327 } 4328 4329 static struct qla_tgt_cmd *qlt_get_tag(scsi_qla_host_t *vha, 4330 struct fc_port *sess, 4331 struct atio_from_isp *atio) 4332 { 4333 struct qla_tgt_cmd *cmd; 4334 4335 cmd = vha->hw->tgt.tgt_ops->get_cmd(sess); 4336 if (!cmd) 4337 return NULL; 4338 4339 cmd->cmd_type = TYPE_TGT_CMD; 4340 memcpy(&cmd->atio, atio, sizeof(*atio)); 4341 INIT_LIST_HEAD(&cmd->sess_cmd_list); 4342 cmd->state = QLA_TGT_STATE_NEW; 4343 cmd->tgt = vha->vha_tgt.qla_tgt; 4344 qlt_incr_num_pend_cmds(vha); 4345 cmd->vha = vha; 4346 cmd->sess = sess; 4347 cmd->loop_id = sess->loop_id; 4348 cmd->conf_compl_supported = sess->conf_compl_supported; 4349 4350 cmd->trc_flags = 0; 4351 cmd->jiffies_at_alloc = get_jiffies_64(); 4352 4353 cmd->unpacked_lun = scsilun_to_int( 4354 (struct scsi_lun *)&atio->u.isp24.fcp_cmnd.lun); 4355 qlt_assign_qpair(vha, cmd); 4356 cmd->reset_count = vha->hw->base_qpair->chip_reset; 4357 cmd->vp_idx = vha->vp_idx; 4358 cmd->edif = sess->edif.enable; 4359 4360 return cmd; 4361 } 4362 4363 /* ha->hardware_lock supposed to be held on entry */ 4364 static int qlt_handle_cmd_for_atio(struct scsi_qla_host *vha, 4365 struct atio_from_isp *atio) 4366 { 4367 struct qla_hw_data *ha = vha->hw; 4368 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; 4369 struct fc_port *sess; 4370 struct qla_tgt_cmd *cmd; 4371 unsigned long flags; 4372 port_id_t id; 4373 4374 if (unlikely(tgt->tgt_stop)) { 4375 ql_dbg(ql_dbg_io, vha, 0x3061, 4376 "New command while device %p is shutting down\n", tgt); 4377 return -ENODEV; 4378 } 4379 4380 id = be_to_port_id(atio->u.isp24.fcp_hdr.s_id); 4381 if (IS_SW_RESV_ADDR(id)) 4382 return -EBUSY; 4383 4384 sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, atio->u.isp24.fcp_hdr.s_id); 4385 if (unlikely(!sess)) 4386 return -EFAULT; 4387 4388 /* Another WWN used to have our s_id. Our PLOGI scheduled its 4389 * session deletion, but it's still in sess_del_work wq */ 4390 if (sess->deleted) { 4391 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf002, 4392 "New command while old session %p is being deleted\n", 4393 sess); 4394 return -EFAULT; 4395 } 4396 4397 /* 4398 * Do kref_get() before returning + dropping qla_hw_data->hardware_lock. 4399 */ 4400 if (!kref_get_unless_zero(&sess->sess_kref)) { 4401 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf004, 4402 "%s: kref_get fail, %8phC oxid %x \n", 4403 __func__, sess->port_name, 4404 be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id)); 4405 return -EFAULT; 4406 } 4407 4408 cmd = qlt_get_tag(vha, sess, atio); 4409 if (!cmd) { 4410 ql_dbg(ql_dbg_io, vha, 0x3062, 4411 "qla_target(%d): Allocation of cmd failed\n", vha->vp_idx); 4412 ha->tgt.tgt_ops->put_sess(sess); 4413 return -EBUSY; 4414 } 4415 4416 cmd->cmd_in_wq = 1; 4417 cmd->trc_flags |= TRC_NEW_CMD; 4418 4419 spin_lock_irqsave(&vha->cmd_list_lock, flags); 4420 list_add_tail(&cmd->cmd_list, &vha->qla_cmd_list); 4421 spin_unlock_irqrestore(&vha->cmd_list_lock, flags); 4422 4423 INIT_WORK(&cmd->work, qlt_do_work); 4424 if (vha->flags.qpairs_available) { 4425 queue_work_on(cmd->se_cmd.cpuid, qla_tgt_wq, &cmd->work); 4426 } else if (ha->msix_count) { 4427 if (cmd->atio.u.isp24.fcp_cmnd.rddata) 4428 queue_work_on(smp_processor_id(), qla_tgt_wq, 4429 &cmd->work); 4430 else 4431 queue_work_on(cmd->se_cmd.cpuid, qla_tgt_wq, 4432 &cmd->work); 4433 } else { 4434 queue_work(qla_tgt_wq, &cmd->work); 4435 } 4436 4437 return 0; 4438 } 4439 4440 /* ha->hardware_lock supposed to be held on entry */ 4441 static int qlt_issue_task_mgmt(struct fc_port *sess, u64 lun, 4442 int fn, void *iocb, int flags) 4443 { 4444 struct scsi_qla_host *vha = sess->vha; 4445 struct qla_hw_data *ha = vha->hw; 4446 struct qla_tgt_mgmt_cmd *mcmd; 4447 struct atio_from_isp *a = (struct atio_from_isp *)iocb; 4448 struct qla_qpair_hint *h = &vha->vha_tgt.qla_tgt->qphints[0]; 4449 4450 mcmd = mempool_alloc(qla_tgt_mgmt_cmd_mempool, GFP_ATOMIC); 4451 if (!mcmd) { 4452 ql_dbg(ql_dbg_tgt_tmr, vha, 0x10009, 4453 "qla_target(%d): Allocation of management " 4454 "command failed, some commands and their data could " 4455 "leak\n", vha->vp_idx); 4456 return -ENOMEM; 4457 } 4458 memset(mcmd, 0, sizeof(*mcmd)); 4459 mcmd->sess = sess; 4460 4461 if (iocb) { 4462 memcpy(&mcmd->orig_iocb.imm_ntfy, iocb, 4463 sizeof(mcmd->orig_iocb.imm_ntfy)); 4464 } 4465 mcmd->tmr_func = fn; 4466 mcmd->flags = flags; 4467 mcmd->reset_count = ha->base_qpair->chip_reset; 4468 mcmd->qpair = h->qpair; 4469 mcmd->vha = vha; 4470 mcmd->se_cmd.cpuid = h->cpuid; 4471 mcmd->unpacked_lun = lun; 4472 4473 switch (fn) { 4474 case QLA_TGT_LUN_RESET: 4475 case QLA_TGT_CLEAR_TS: 4476 case QLA_TGT_ABORT_TS: 4477 abort_cmds_for_lun(vha, lun, a->u.isp24.fcp_hdr.s_id); 4478 fallthrough; 4479 case QLA_TGT_CLEAR_ACA: 4480 h = qlt_find_qphint(vha, mcmd->unpacked_lun); 4481 mcmd->qpair = h->qpair; 4482 mcmd->se_cmd.cpuid = h->cpuid; 4483 break; 4484 4485 case QLA_TGT_TARGET_RESET: 4486 case QLA_TGT_NEXUS_LOSS_SESS: 4487 case QLA_TGT_NEXUS_LOSS: 4488 case QLA_TGT_ABORT_ALL: 4489 default: 4490 /* no-op */ 4491 break; 4492 } 4493 4494 INIT_WORK(&mcmd->work, qlt_do_tmr_work); 4495 queue_work_on(mcmd->se_cmd.cpuid, qla_tgt_wq, 4496 &mcmd->work); 4497 4498 return 0; 4499 } 4500 4501 /* ha->hardware_lock supposed to be held on entry */ 4502 static int qlt_handle_task_mgmt(struct scsi_qla_host *vha, void *iocb) 4503 { 4504 struct atio_from_isp *a = (struct atio_from_isp *)iocb; 4505 struct qla_hw_data *ha = vha->hw; 4506 struct fc_port *sess; 4507 u64 unpacked_lun; 4508 int fn; 4509 unsigned long flags; 4510 4511 fn = a->u.isp24.fcp_cmnd.task_mgmt_flags; 4512 4513 spin_lock_irqsave(&ha->tgt.sess_lock, flags); 4514 sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, 4515 a->u.isp24.fcp_hdr.s_id); 4516 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); 4517 4518 unpacked_lun = 4519 scsilun_to_int((struct scsi_lun *)&a->u.isp24.fcp_cmnd.lun); 4520 4521 if (sess == NULL || sess->deleted) 4522 return -EFAULT; 4523 4524 return qlt_issue_task_mgmt(sess, unpacked_lun, fn, iocb, 0); 4525 } 4526 4527 /* ha->hardware_lock supposed to be held on entry */ 4528 static int __qlt_abort_task(struct scsi_qla_host *vha, 4529 struct imm_ntfy_from_isp *iocb, struct fc_port *sess) 4530 { 4531 struct atio_from_isp *a = (struct atio_from_isp *)iocb; 4532 struct qla_hw_data *ha = vha->hw; 4533 struct qla_tgt_mgmt_cmd *mcmd; 4534 u64 unpacked_lun; 4535 int rc; 4536 4537 mcmd = mempool_alloc(qla_tgt_mgmt_cmd_mempool, GFP_ATOMIC); 4538 if (mcmd == NULL) { 4539 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05f, 4540 "qla_target(%d): %s: Allocation of ABORT cmd failed\n", 4541 vha->vp_idx, __func__); 4542 return -ENOMEM; 4543 } 4544 memset(mcmd, 0, sizeof(*mcmd)); 4545 4546 mcmd->sess = sess; 4547 memcpy(&mcmd->orig_iocb.imm_ntfy, iocb, 4548 sizeof(mcmd->orig_iocb.imm_ntfy)); 4549 4550 unpacked_lun = 4551 scsilun_to_int((struct scsi_lun *)&a->u.isp24.fcp_cmnd.lun); 4552 mcmd->reset_count = ha->base_qpair->chip_reset; 4553 mcmd->tmr_func = QLA_TGT_2G_ABORT_TASK; 4554 mcmd->qpair = ha->base_qpair; 4555 4556 rc = ha->tgt.tgt_ops->handle_tmr(mcmd, unpacked_lun, mcmd->tmr_func, 4557 le16_to_cpu(iocb->u.isp2x.seq_id)); 4558 if (rc != 0) { 4559 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf060, 4560 "qla_target(%d): tgt_ops->handle_tmr() failed: %d\n", 4561 vha->vp_idx, rc); 4562 mempool_free(mcmd, qla_tgt_mgmt_cmd_mempool); 4563 return -EFAULT; 4564 } 4565 4566 return 0; 4567 } 4568 4569 /* ha->hardware_lock supposed to be held on entry */ 4570 static int qlt_abort_task(struct scsi_qla_host *vha, 4571 struct imm_ntfy_from_isp *iocb) 4572 { 4573 struct qla_hw_data *ha = vha->hw; 4574 struct fc_port *sess; 4575 int loop_id; 4576 unsigned long flags; 4577 4578 loop_id = GET_TARGET_ID(ha, (struct atio_from_isp *)iocb); 4579 4580 spin_lock_irqsave(&ha->tgt.sess_lock, flags); 4581 sess = ha->tgt.tgt_ops->find_sess_by_loop_id(vha, loop_id); 4582 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); 4583 4584 if (sess == NULL) { 4585 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf025, 4586 "qla_target(%d): task abort for unexisting " 4587 "session\n", vha->vp_idx); 4588 return qlt_sched_sess_work(vha->vha_tgt.qla_tgt, 4589 QLA_TGT_SESS_WORK_ABORT, iocb, sizeof(*iocb)); 4590 } 4591 4592 return __qlt_abort_task(vha, iocb, sess); 4593 } 4594 4595 void qlt_logo_completion_handler(fc_port_t *fcport, int rc) 4596 { 4597 if (rc != MBS_COMMAND_COMPLETE) { 4598 ql_dbg(ql_dbg_tgt_mgt, fcport->vha, 0xf093, 4599 "%s: se_sess %p / sess %p from" 4600 " port %8phC loop_id %#04x s_id %02x:%02x:%02x" 4601 " LOGO failed: %#x\n", 4602 __func__, 4603 fcport->se_sess, 4604 fcport, 4605 fcport->port_name, fcport->loop_id, 4606 fcport->d_id.b.domain, fcport->d_id.b.area, 4607 fcport->d_id.b.al_pa, rc); 4608 } 4609 4610 fcport->logout_completed = 1; 4611 } 4612 4613 /* 4614 * ha->hardware_lock supposed to be held on entry (to protect tgt->sess_list) 4615 * 4616 * Schedules sessions with matching port_id/loop_id but different wwn for 4617 * deletion. Returns existing session with matching wwn if present. 4618 * Null otherwise. 4619 */ 4620 struct fc_port * 4621 qlt_find_sess_invalidate_other(scsi_qla_host_t *vha, uint64_t wwn, 4622 port_id_t port_id, uint16_t loop_id, struct fc_port **conflict_sess) 4623 { 4624 struct fc_port *sess = NULL, *other_sess; 4625 uint64_t other_wwn; 4626 4627 *conflict_sess = NULL; 4628 4629 list_for_each_entry(other_sess, &vha->vp_fcports, list) { 4630 4631 other_wwn = wwn_to_u64(other_sess->port_name); 4632 4633 if (wwn == other_wwn) { 4634 WARN_ON(sess); 4635 sess = other_sess; 4636 continue; 4637 } 4638 4639 /* find other sess with nport_id collision */ 4640 if (port_id.b24 == other_sess->d_id.b24) { 4641 if (loop_id != other_sess->loop_id) { 4642 ql_dbg(ql_dbg_disc, vha, 0x1000c, 4643 "Invalidating sess %p loop_id %d wwn %llx.\n", 4644 other_sess, other_sess->loop_id, other_wwn); 4645 4646 /* 4647 * logout_on_delete is set by default, but another 4648 * session that has the same s_id/loop_id combo 4649 * might have cleared it when requested this session 4650 * deletion, so don't touch it 4651 */ 4652 qlt_schedule_sess_for_deletion(other_sess); 4653 } else { 4654 /* 4655 * Another wwn used to have our s_id/loop_id 4656 * kill the session, but don't free the loop_id 4657 */ 4658 ql_dbg(ql_dbg_disc, vha, 0xf01b, 4659 "Invalidating sess %p loop_id %d wwn %llx.\n", 4660 other_sess, other_sess->loop_id, other_wwn); 4661 4662 other_sess->keep_nport_handle = 1; 4663 if (other_sess->disc_state != DSC_DELETED) 4664 *conflict_sess = other_sess; 4665 qlt_schedule_sess_for_deletion(other_sess); 4666 } 4667 continue; 4668 } 4669 4670 /* find other sess with nport handle collision */ 4671 if ((loop_id == other_sess->loop_id) && 4672 (loop_id != FC_NO_LOOP_ID)) { 4673 ql_dbg(ql_dbg_disc, vha, 0x1000d, 4674 "Invalidating sess %p loop_id %d wwn %llx.\n", 4675 other_sess, other_sess->loop_id, other_wwn); 4676 4677 /* Same loop_id but different s_id 4678 * Ok to kill and logout */ 4679 qlt_schedule_sess_for_deletion(other_sess); 4680 } 4681 } 4682 4683 return sess; 4684 } 4685 4686 /* Abort any commands for this s_id waiting on qla_tgt_wq workqueue */ 4687 static int abort_cmds_for_s_id(struct scsi_qla_host *vha, port_id_t *s_id) 4688 { 4689 struct qla_tgt_sess_op *op; 4690 struct qla_tgt_cmd *cmd; 4691 uint32_t key; 4692 int count = 0; 4693 unsigned long flags; 4694 4695 key = (((u32)s_id->b.domain << 16) | 4696 ((u32)s_id->b.area << 8) | 4697 ((u32)s_id->b.al_pa)); 4698 4699 spin_lock_irqsave(&vha->cmd_list_lock, flags); 4700 list_for_each_entry(op, &vha->unknown_atio_list, cmd_list) { 4701 uint32_t op_key = sid_to_key(op->atio.u.isp24.fcp_hdr.s_id); 4702 4703 if (op_key == key) { 4704 op->aborted = true; 4705 count++; 4706 } 4707 } 4708 4709 list_for_each_entry(cmd, &vha->qla_cmd_list, cmd_list) { 4710 uint32_t cmd_key = sid_to_key(cmd->atio.u.isp24.fcp_hdr.s_id); 4711 4712 if (cmd_key == key) { 4713 cmd->aborted = 1; 4714 count++; 4715 } 4716 } 4717 spin_unlock_irqrestore(&vha->cmd_list_lock, flags); 4718 4719 return count; 4720 } 4721 4722 static int qlt_handle_login(struct scsi_qla_host *vha, 4723 struct imm_ntfy_from_isp *iocb) 4724 { 4725 struct fc_port *sess = NULL, *conflict_sess = NULL; 4726 uint64_t wwn; 4727 port_id_t port_id; 4728 uint16_t loop_id, wd3_lo; 4729 int res = 0; 4730 struct qlt_plogi_ack_t *pla; 4731 unsigned long flags; 4732 4733 lockdep_assert_held(&vha->hw->hardware_lock); 4734 4735 wwn = wwn_to_u64(iocb->u.isp24.port_name); 4736 4737 port_id.b.domain = iocb->u.isp24.port_id[2]; 4738 port_id.b.area = iocb->u.isp24.port_id[1]; 4739 port_id.b.al_pa = iocb->u.isp24.port_id[0]; 4740 port_id.b.rsvd_1 = 0; 4741 4742 loop_id = le16_to_cpu(iocb->u.isp24.nport_handle); 4743 4744 /* Mark all stale commands sitting in qla_tgt_wq for deletion */ 4745 abort_cmds_for_s_id(vha, &port_id); 4746 4747 if (wwn) { 4748 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); 4749 sess = qlt_find_sess_invalidate_other(vha, wwn, 4750 port_id, loop_id, &conflict_sess); 4751 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); 4752 } else { 4753 ql_dbg(ql_dbg_disc, vha, 0xffff, 4754 "%s %d Term INOT due to WWN=0 lid=%d, NportID %06X ", 4755 __func__, __LINE__, loop_id, port_id.b24); 4756 qlt_send_term_imm_notif(vha, iocb, 1); 4757 goto out; 4758 } 4759 4760 if (IS_SW_RESV_ADDR(port_id)) { 4761 res = 1; 4762 goto out; 4763 } 4764 4765 if (vha->hw->flags.edif_enabled && 4766 !(vha->e_dbell.db_flags & EDB_ACTIVE) && 4767 iocb->u.isp24.status_subcode == ELS_PLOGI && 4768 !(le16_to_cpu(iocb->u.isp24.flags) & NOTIFY24XX_FLAGS_FCSP)) { 4769 ql_dbg(ql_dbg_disc, vha, 0xffff, 4770 "%s %d Term INOT due to app not available lid=%d, NportID %06X ", 4771 __func__, __LINE__, loop_id, port_id.b24); 4772 qlt_send_term_imm_notif(vha, iocb, 1); 4773 goto out; 4774 } 4775 4776 if (vha->hw->flags.edif_enabled) { 4777 if (DBELL_INACTIVE(vha)) { 4778 ql_dbg(ql_dbg_disc, vha, 0xffff, 4779 "%s %d Term INOT due to app not started lid=%d, NportID %06X ", 4780 __func__, __LINE__, loop_id, port_id.b24); 4781 qlt_send_term_imm_notif(vha, iocb, 1); 4782 goto out; 4783 } else if (iocb->u.isp24.status_subcode == ELS_PLOGI && 4784 !(le16_to_cpu(iocb->u.isp24.flags) & NOTIFY24XX_FLAGS_FCSP)) { 4785 ql_dbg(ql_dbg_disc, vha, 0xffff, 4786 "%s %d Term INOT due to unsecure lid=%d, NportID %06X ", 4787 __func__, __LINE__, loop_id, port_id.b24); 4788 qlt_send_term_imm_notif(vha, iocb, 1); 4789 goto out; 4790 } 4791 } 4792 4793 pla = qlt_plogi_ack_find_add(vha, &port_id, iocb); 4794 if (!pla) { 4795 ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0xffff, 4796 "%s %d %8phC Term INOT due to mem alloc fail", 4797 __func__, __LINE__, 4798 iocb->u.isp24.port_name); 4799 qlt_send_term_imm_notif(vha, iocb, 1); 4800 goto out; 4801 } 4802 4803 if (conflict_sess) { 4804 conflict_sess->login_gen++; 4805 qlt_plogi_ack_link(vha, pla, conflict_sess, 4806 QLT_PLOGI_LINK_CONFLICT); 4807 } 4808 4809 if (!sess) { 4810 pla->ref_count++; 4811 ql_dbg(ql_dbg_disc, vha, 0xffff, 4812 "%s %d %8phC post new sess\n", 4813 __func__, __LINE__, iocb->u.isp24.port_name); 4814 if (iocb->u.isp24.status_subcode == ELS_PLOGI) 4815 qla24xx_post_newsess_work(vha, &port_id, 4816 iocb->u.isp24.port_name, 4817 iocb->u.isp24.u.plogi.node_name, 4818 pla, 0); 4819 else 4820 qla24xx_post_newsess_work(vha, &port_id, 4821 iocb->u.isp24.port_name, NULL, 4822 pla, 0); 4823 4824 goto out; 4825 } 4826 4827 if (sess->disc_state == DSC_UPD_FCPORT) { 4828 u16 sec; 4829 4830 /* 4831 * Remote port registration is still going on from 4832 * previous login. Allow it to finish before we 4833 * accept the new login. 4834 */ 4835 sess->next_disc_state = DSC_DELETE_PEND; 4836 sec = jiffies_to_msecs(jiffies - 4837 sess->jiffies_at_registration) / 1000; 4838 if (sess->sec_since_registration < sec && sec && 4839 !(sec % 5)) { 4840 sess->sec_since_registration = sec; 4841 ql_dbg(ql_dbg_disc, vha, 0xffff, 4842 "%s %8phC - Slow Rport registration (%d Sec)\n", 4843 __func__, sess->port_name, sec); 4844 } 4845 4846 if (!conflict_sess) { 4847 list_del(&pla->list); 4848 kmem_cache_free(qla_tgt_plogi_cachep, pla); 4849 } 4850 4851 qlt_send_term_imm_notif(vha, iocb, 1); 4852 goto out; 4853 } 4854 4855 qlt_plogi_ack_link(vha, pla, sess, QLT_PLOGI_LINK_SAME_WWN); 4856 sess->d_id = port_id; 4857 sess->login_gen++; 4858 sess->loop_id = loop_id; 4859 4860 if (iocb->u.isp24.status_subcode == ELS_PLOGI) { 4861 /* remote port has assigned Port ID */ 4862 if (N2N_TOPO(vha->hw) && fcport_is_bigger(sess)) 4863 vha->d_id = sess->d_id; 4864 4865 ql_dbg(ql_dbg_disc, vha, 0xffff, 4866 "%s %8phC - send port online\n", 4867 __func__, sess->port_name); 4868 4869 qla2x00_post_aen_work(vha, FCH_EVT_PORT_ONLINE, 4870 sess->d_id.b24); 4871 } 4872 4873 if (iocb->u.isp24.status_subcode == ELS_PRLI) { 4874 sess->fw_login_state = DSC_LS_PRLI_PEND; 4875 sess->local = 0; 4876 sess->loop_id = loop_id; 4877 sess->d_id = port_id; 4878 sess->fw_login_state = DSC_LS_PRLI_PEND; 4879 wd3_lo = le16_to_cpu(iocb->u.isp24.u.prli.wd3_lo); 4880 4881 if (wd3_lo & BIT_7) 4882 sess->conf_compl_supported = 1; 4883 4884 if ((wd3_lo & BIT_4) == 0) 4885 sess->port_type = FCT_INITIATOR; 4886 else 4887 sess->port_type = FCT_TARGET; 4888 4889 } else 4890 sess->fw_login_state = DSC_LS_PLOGI_PEND; 4891 4892 4893 ql_dbg(ql_dbg_disc, vha, 0x20f9, 4894 "%s %d %8phC DS %d\n", 4895 __func__, __LINE__, sess->port_name, sess->disc_state); 4896 4897 switch (sess->disc_state) { 4898 case DSC_DELETED: 4899 case DSC_LOGIN_PEND: 4900 qlt_plogi_ack_unref(vha, pla); 4901 break; 4902 4903 default: 4904 /* 4905 * Under normal circumstances we want to release nport handle 4906 * during LOGO process to avoid nport handle leaks inside FW. 4907 * The exception is when LOGO is done while another PLOGI with 4908 * the same nport handle is waiting as might be the case here. 4909 * Note: there is always a possibily of a race where session 4910 * deletion has already started for other reasons (e.g. ACL 4911 * removal) and now PLOGI arrives: 4912 * 1. if PLOGI arrived in FW after nport handle has been freed, 4913 * FW must have assigned this PLOGI a new/same handle and we 4914 * can proceed ACK'ing it as usual when session deletion 4915 * completes. 4916 * 2. if PLOGI arrived in FW before LOGO with LCF_FREE_NPORT 4917 * bit reached it, the handle has now been released. We'll 4918 * get an error when we ACK this PLOGI. Nothing will be sent 4919 * back to initiator. Initiator should eventually retry 4920 * PLOGI and situation will correct itself. 4921 */ 4922 sess->keep_nport_handle = ((sess->loop_id == loop_id) && 4923 (sess->d_id.b24 == port_id.b24)); 4924 4925 ql_dbg(ql_dbg_disc, vha, 0x20f9, 4926 "%s %d %8phC post del sess\n", 4927 __func__, __LINE__, sess->port_name); 4928 4929 4930 qlt_schedule_sess_for_deletion(sess); 4931 break; 4932 } 4933 out: 4934 return res; 4935 } 4936 4937 /* 4938 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire 4939 */ 4940 static int qlt_24xx_handle_els(struct scsi_qla_host *vha, 4941 struct imm_ntfy_from_isp *iocb) 4942 { 4943 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; 4944 struct qla_hw_data *ha = vha->hw; 4945 struct fc_port *sess = NULL, *conflict_sess = NULL; 4946 uint64_t wwn; 4947 port_id_t port_id; 4948 uint16_t loop_id; 4949 uint16_t wd3_lo; 4950 int res = 0; 4951 unsigned long flags; 4952 4953 lockdep_assert_held(&ha->hardware_lock); 4954 4955 wwn = wwn_to_u64(iocb->u.isp24.port_name); 4956 4957 port_id.b.domain = iocb->u.isp24.port_id[2]; 4958 port_id.b.area = iocb->u.isp24.port_id[1]; 4959 port_id.b.al_pa = iocb->u.isp24.port_id[0]; 4960 port_id.b.rsvd_1 = 0; 4961 4962 loop_id = le16_to_cpu(iocb->u.isp24.nport_handle); 4963 4964 ql_dbg(ql_dbg_disc, vha, 0xf026, 4965 "qla_target(%d): Port ID: %02x:%02x:%02x ELS opcode: 0x%02x lid %d %8phC\n", 4966 vha->vp_idx, iocb->u.isp24.port_id[2], 4967 iocb->u.isp24.port_id[1], iocb->u.isp24.port_id[0], 4968 iocb->u.isp24.status_subcode, loop_id, 4969 iocb->u.isp24.port_name); 4970 4971 /* res = 1 means ack at the end of thread 4972 * res = 0 means ack async/later. 4973 */ 4974 switch (iocb->u.isp24.status_subcode) { 4975 case ELS_PLOGI: 4976 res = qlt_handle_login(vha, iocb); 4977 break; 4978 4979 case ELS_PRLI: 4980 if (N2N_TOPO(ha)) { 4981 sess = qla2x00_find_fcport_by_wwpn(vha, 4982 iocb->u.isp24.port_name, 1); 4983 4984 if (vha->hw->flags.edif_enabled && sess && 4985 (!(sess->flags & FCF_FCSP_DEVICE) || 4986 !sess->edif.authok)) { 4987 ql_dbg(ql_dbg_disc, vha, 0xffff, 4988 "%s %d %8phC Term PRLI due to unauthorize PRLI\n", 4989 __func__, __LINE__, iocb->u.isp24.port_name); 4990 qlt_send_term_imm_notif(vha, iocb, 1); 4991 break; 4992 } 4993 4994 if (sess && sess->plogi_link[QLT_PLOGI_LINK_SAME_WWN]) { 4995 ql_dbg(ql_dbg_disc, vha, 0xffff, 4996 "%s %d %8phC Term PRLI due to PLOGI ACK not completed\n", 4997 __func__, __LINE__, 4998 iocb->u.isp24.port_name); 4999 qlt_send_term_imm_notif(vha, iocb, 1); 5000 break; 5001 } 5002 5003 res = qlt_handle_login(vha, iocb); 5004 break; 5005 } 5006 5007 if (IS_SW_RESV_ADDR(port_id)) { 5008 res = 1; 5009 break; 5010 } 5011 5012 wd3_lo = le16_to_cpu(iocb->u.isp24.u.prli.wd3_lo); 5013 5014 if (wwn) { 5015 spin_lock_irqsave(&tgt->ha->tgt.sess_lock, flags); 5016 sess = qlt_find_sess_invalidate_other(vha, wwn, port_id, 5017 loop_id, &conflict_sess); 5018 spin_unlock_irqrestore(&tgt->ha->tgt.sess_lock, flags); 5019 } 5020 5021 if (conflict_sess) { 5022 switch (conflict_sess->disc_state) { 5023 case DSC_DELETED: 5024 case DSC_DELETE_PEND: 5025 break; 5026 default: 5027 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf09b, 5028 "PRLI with conflicting sess %p port %8phC\n", 5029 conflict_sess, conflict_sess->port_name); 5030 conflict_sess->fw_login_state = 5031 DSC_LS_PORT_UNAVAIL; 5032 qlt_send_term_imm_notif(vha, iocb, 1); 5033 res = 0; 5034 break; 5035 } 5036 } 5037 5038 if (sess != NULL) { 5039 bool delete = false; 5040 int sec; 5041 5042 if (vha->hw->flags.edif_enabled && sess && 5043 (!(sess->flags & FCF_FCSP_DEVICE) || 5044 !sess->edif.authok)) { 5045 ql_dbg(ql_dbg_disc, vha, 0xffff, 5046 "%s %d %8phC Term PRLI due to unauthorize prli\n", 5047 __func__, __LINE__, iocb->u.isp24.port_name); 5048 qlt_send_term_imm_notif(vha, iocb, 1); 5049 break; 5050 } 5051 5052 spin_lock_irqsave(&tgt->ha->tgt.sess_lock, flags); 5053 switch (sess->fw_login_state) { 5054 case DSC_LS_PLOGI_PEND: 5055 case DSC_LS_PLOGI_COMP: 5056 case DSC_LS_PRLI_COMP: 5057 break; 5058 default: 5059 delete = true; 5060 break; 5061 } 5062 5063 switch (sess->disc_state) { 5064 case DSC_UPD_FCPORT: 5065 spin_unlock_irqrestore(&tgt->ha->tgt.sess_lock, 5066 flags); 5067 5068 sec = jiffies_to_msecs(jiffies - 5069 sess->jiffies_at_registration)/1000; 5070 if (sess->sec_since_registration < sec && sec && 5071 !(sec % 5)) { 5072 sess->sec_since_registration = sec; 5073 ql_dbg(ql_dbg_disc, sess->vha, 0xffff, 5074 "%s %8phC : Slow Rport registration(%d Sec)\n", 5075 __func__, sess->port_name, sec); 5076 } 5077 qlt_send_term_imm_notif(vha, iocb, 1); 5078 return 0; 5079 5080 case DSC_LOGIN_PEND: 5081 case DSC_GPDB: 5082 case DSC_LOGIN_COMPLETE: 5083 case DSC_ADISC: 5084 delete = false; 5085 break; 5086 default: 5087 break; 5088 } 5089 5090 if (delete) { 5091 spin_unlock_irqrestore(&tgt->ha->tgt.sess_lock, 5092 flags); 5093 /* 5094 * Impatient initiator sent PRLI before last 5095 * PLOGI could finish. Will force him to re-try, 5096 * while last one finishes. 5097 */ 5098 ql_log(ql_log_warn, sess->vha, 0xf095, 5099 "sess %p PRLI received, before plogi ack.\n", 5100 sess); 5101 qlt_send_term_imm_notif(vha, iocb, 1); 5102 res = 0; 5103 break; 5104 } 5105 5106 /* 5107 * This shouldn't happen under normal circumstances, 5108 * since we have deleted the old session during PLOGI 5109 */ 5110 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf096, 5111 "PRLI (loop_id %#04x) for existing sess %p (loop_id %#04x)\n", 5112 sess->loop_id, sess, iocb->u.isp24.nport_handle); 5113 5114 sess->local = 0; 5115 sess->loop_id = loop_id; 5116 sess->d_id = port_id; 5117 sess->fw_login_state = DSC_LS_PRLI_PEND; 5118 5119 if (wd3_lo & BIT_7) 5120 sess->conf_compl_supported = 1; 5121 5122 if ((wd3_lo & BIT_4) == 0) 5123 sess->port_type = FCT_INITIATOR; 5124 else 5125 sess->port_type = FCT_TARGET; 5126 5127 spin_unlock_irqrestore(&tgt->ha->tgt.sess_lock, flags); 5128 } 5129 res = 1; /* send notify ack */ 5130 5131 /* Make session global (not used in fabric mode) */ 5132 if (ha->current_topology != ISP_CFG_F) { 5133 if (sess) { 5134 ql_dbg(ql_dbg_disc, vha, 0x20fa, 5135 "%s %d %8phC post nack\n", 5136 __func__, __LINE__, sess->port_name); 5137 qla24xx_post_nack_work(vha, sess, iocb, 5138 SRB_NACK_PRLI); 5139 res = 0; 5140 } else { 5141 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); 5142 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); 5143 qla2xxx_wake_dpc(vha); 5144 } 5145 } else { 5146 if (sess) { 5147 ql_dbg(ql_dbg_disc, vha, 0x20fb, 5148 "%s %d %8phC post nack\n", 5149 __func__, __LINE__, sess->port_name); 5150 qla24xx_post_nack_work(vha, sess, iocb, 5151 SRB_NACK_PRLI); 5152 res = 0; 5153 } 5154 } 5155 break; 5156 5157 case ELS_TPRLO: 5158 if (le16_to_cpu(iocb->u.isp24.flags) & 5159 NOTIFY24XX_FLAGS_GLOBAL_TPRLO) { 5160 loop_id = 0xFFFF; 5161 qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS); 5162 res = 1; 5163 break; 5164 } 5165 fallthrough; 5166 case ELS_LOGO: 5167 case ELS_PRLO: 5168 spin_lock_irqsave(&ha->tgt.sess_lock, flags); 5169 sess = qla2x00_find_fcport_by_loopid(vha, loop_id); 5170 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); 5171 5172 if (sess) { 5173 sess->login_gen++; 5174 sess->fw_login_state = DSC_LS_LOGO_PEND; 5175 sess->logo_ack_needed = 1; 5176 memcpy(sess->iocb, iocb, IOCB_SIZE); 5177 } 5178 5179 res = qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS_SESS); 5180 5181 ql_dbg(ql_dbg_disc, vha, 0x20fc, 5182 "%s: logo %llx res %d sess %p ", 5183 __func__, wwn, res, sess); 5184 if (res == 0) { 5185 /* 5186 * cmd went upper layer, look for qlt_xmit_tm_rsp() 5187 * for LOGO_ACK & sess delete 5188 */ 5189 BUG_ON(!sess); 5190 res = 0; 5191 } else { 5192 /* cmd did not go to upper layer. */ 5193 if (sess) { 5194 qlt_schedule_sess_for_deletion(sess); 5195 res = 0; 5196 } 5197 /* else logo will be ack */ 5198 } 5199 break; 5200 case ELS_PDISC: 5201 case ELS_ADISC: 5202 { 5203 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; 5204 5205 if (tgt->link_reinit_iocb_pending) { 5206 qlt_send_notify_ack(ha->base_qpair, 5207 &tgt->link_reinit_iocb, 0, 0, 0, 0, 0, 0); 5208 tgt->link_reinit_iocb_pending = 0; 5209 } 5210 5211 sess = qla2x00_find_fcport_by_wwpn(vha, 5212 iocb->u.isp24.port_name, 1); 5213 if (sess) { 5214 ql_dbg(ql_dbg_disc, vha, 0x20fd, 5215 "sess %p lid %d|%d DS %d LS %d\n", 5216 sess, sess->loop_id, loop_id, 5217 sess->disc_state, sess->fw_login_state); 5218 } 5219 5220 res = 1; /* send notify ack */ 5221 break; 5222 } 5223 5224 case ELS_FLOGI: /* should never happen */ 5225 default: 5226 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf061, 5227 "qla_target(%d): Unsupported ELS command %x " 5228 "received\n", vha->vp_idx, iocb->u.isp24.status_subcode); 5229 res = qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS_SESS); 5230 break; 5231 } 5232 5233 ql_dbg(ql_dbg_disc, vha, 0xf026, 5234 "qla_target(%d): Exit ELS opcode: 0x%02x res %d\n", 5235 vha->vp_idx, iocb->u.isp24.status_subcode, res); 5236 5237 return res; 5238 } 5239 5240 /* 5241 * ha->hardware_lock supposed to be held on entry. 5242 * Might drop it, then reacquire. 5243 */ 5244 static void qlt_handle_imm_notify(struct scsi_qla_host *vha, 5245 struct imm_ntfy_from_isp *iocb) 5246 { 5247 struct qla_hw_data *ha = vha->hw; 5248 uint32_t add_flags = 0; 5249 int send_notify_ack = 1; 5250 uint16_t status; 5251 5252 lockdep_assert_held(&ha->hardware_lock); 5253 5254 status = le16_to_cpu(iocb->u.isp2x.status); 5255 switch (status) { 5256 case IMM_NTFY_LIP_RESET: 5257 { 5258 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf032, 5259 "qla_target(%d): LIP reset (loop %#x), subcode %x\n", 5260 vha->vp_idx, le16_to_cpu(iocb->u.isp24.nport_handle), 5261 iocb->u.isp24.status_subcode); 5262 5263 if (qlt_reset(vha, iocb, QLA_TGT_ABORT_ALL) == 0) 5264 send_notify_ack = 0; 5265 break; 5266 } 5267 5268 case IMM_NTFY_LIP_LINK_REINIT: 5269 { 5270 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; 5271 5272 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf033, 5273 "qla_target(%d): LINK REINIT (loop %#x, " 5274 "subcode %x)\n", vha->vp_idx, 5275 le16_to_cpu(iocb->u.isp24.nport_handle), 5276 iocb->u.isp24.status_subcode); 5277 if (tgt->link_reinit_iocb_pending) { 5278 qlt_send_notify_ack(ha->base_qpair, 5279 &tgt->link_reinit_iocb, 0, 0, 0, 0, 0, 0); 5280 } 5281 memcpy(&tgt->link_reinit_iocb, iocb, sizeof(*iocb)); 5282 tgt->link_reinit_iocb_pending = 1; 5283 /* 5284 * QLogic requires to wait after LINK REINIT for possible 5285 * PDISC or ADISC ELS commands 5286 */ 5287 send_notify_ack = 0; 5288 break; 5289 } 5290 5291 case IMM_NTFY_PORT_LOGOUT: 5292 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf034, 5293 "qla_target(%d): Port logout (loop " 5294 "%#x, subcode %x)\n", vha->vp_idx, 5295 le16_to_cpu(iocb->u.isp24.nport_handle), 5296 iocb->u.isp24.status_subcode); 5297 5298 if (qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS_SESS) == 0) 5299 send_notify_ack = 0; 5300 /* The sessions will be cleared in the callback, if needed */ 5301 break; 5302 5303 case IMM_NTFY_GLBL_TPRLO: 5304 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf035, 5305 "qla_target(%d): Global TPRLO (%x)\n", vha->vp_idx, status); 5306 if (qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS) == 0) 5307 send_notify_ack = 0; 5308 /* The sessions will be cleared in the callback, if needed */ 5309 break; 5310 5311 case IMM_NTFY_PORT_CONFIG: 5312 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf036, 5313 "qla_target(%d): Port config changed (%x)\n", vha->vp_idx, 5314 status); 5315 if (qlt_reset(vha, iocb, QLA_TGT_ABORT_ALL) == 0) 5316 send_notify_ack = 0; 5317 /* The sessions will be cleared in the callback, if needed */ 5318 break; 5319 5320 case IMM_NTFY_GLBL_LOGO: 5321 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06a, 5322 "qla_target(%d): Link failure detected\n", 5323 vha->vp_idx); 5324 /* I_T nexus loss */ 5325 if (qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS) == 0) 5326 send_notify_ack = 0; 5327 break; 5328 5329 case IMM_NTFY_IOCB_OVERFLOW: 5330 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06b, 5331 "qla_target(%d): Cannot provide requested " 5332 "capability (IOCB overflowed the immediate notify " 5333 "resource count)\n", vha->vp_idx); 5334 break; 5335 5336 case IMM_NTFY_ABORT_TASK: 5337 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf037, 5338 "qla_target(%d): Abort Task (S %08x I %#x -> " 5339 "L %#x)\n", vha->vp_idx, 5340 le16_to_cpu(iocb->u.isp2x.seq_id), 5341 GET_TARGET_ID(ha, (struct atio_from_isp *)iocb), 5342 le16_to_cpu(iocb->u.isp2x.lun)); 5343 if (qlt_abort_task(vha, iocb) == 0) 5344 send_notify_ack = 0; 5345 break; 5346 5347 case IMM_NTFY_RESOURCE: 5348 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06c, 5349 "qla_target(%d): Out of resources, host %ld\n", 5350 vha->vp_idx, vha->host_no); 5351 break; 5352 5353 case IMM_NTFY_MSG_RX: 5354 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf038, 5355 "qla_target(%d): Immediate notify task %x\n", 5356 vha->vp_idx, iocb->u.isp2x.task_flags); 5357 break; 5358 5359 case IMM_NTFY_ELS: 5360 if (qlt_24xx_handle_els(vha, iocb) == 0) 5361 send_notify_ack = 0; 5362 break; 5363 default: 5364 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06d, 5365 "qla_target(%d): Received unknown immediate " 5366 "notify status %x\n", vha->vp_idx, status); 5367 break; 5368 } 5369 5370 if (send_notify_ack) 5371 qlt_send_notify_ack(ha->base_qpair, iocb, add_flags, 0, 0, 0, 5372 0, 0); 5373 } 5374 5375 /* 5376 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire 5377 * This function sends busy to ISP 2xxx or 24xx. 5378 */ 5379 static int __qlt_send_busy(struct qla_qpair *qpair, 5380 struct atio_from_isp *atio, uint16_t status) 5381 { 5382 struct scsi_qla_host *vha = qpair->vha; 5383 struct ctio7_to_24xx *ctio24; 5384 struct qla_hw_data *ha = vha->hw; 5385 request_t *pkt; 5386 struct fc_port *sess = NULL; 5387 unsigned long flags; 5388 u16 temp; 5389 port_id_t id; 5390 5391 id = be_to_port_id(atio->u.isp24.fcp_hdr.s_id); 5392 5393 spin_lock_irqsave(&ha->tgt.sess_lock, flags); 5394 sess = qla2x00_find_fcport_by_nportid(vha, &id, 1); 5395 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); 5396 if (!sess) { 5397 qlt_send_term_exchange(qpair, NULL, atio, 1, 0); 5398 return 0; 5399 } 5400 /* Sending marker isn't necessary, since we called from ISR */ 5401 5402 pkt = (request_t *)__qla2x00_alloc_iocbs(qpair, NULL); 5403 if (!pkt) { 5404 ql_dbg(ql_dbg_io, vha, 0x3063, 5405 "qla_target(%d): %s failed: unable to allocate " 5406 "request packet", vha->vp_idx, __func__); 5407 return -ENOMEM; 5408 } 5409 5410 qpair->tgt_counters.num_q_full_sent++; 5411 pkt->entry_count = 1; 5412 pkt->handle = QLA_TGT_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK; 5413 5414 ctio24 = (struct ctio7_to_24xx *)pkt; 5415 ctio24->entry_type = CTIO_TYPE7; 5416 ctio24->nport_handle = cpu_to_le16(sess->loop_id); 5417 ctio24->timeout = cpu_to_le16(QLA_TGT_TIMEOUT); 5418 ctio24->vp_index = vha->vp_idx; 5419 ctio24->initiator_id = be_id_to_le(atio->u.isp24.fcp_hdr.s_id); 5420 ctio24->exchange_addr = atio->u.isp24.exchange_addr; 5421 temp = (atio->u.isp24.attr << 9) | 5422 CTIO7_FLAGS_STATUS_MODE_1 | CTIO7_FLAGS_SEND_STATUS | 5423 CTIO7_FLAGS_DONT_RET_CTIO; 5424 ctio24->u.status1.flags = cpu_to_le16(temp); 5425 /* 5426 * CTIO from fw w/o se_cmd doesn't provide enough info to retry it, 5427 * if the explicit conformation is used. 5428 */ 5429 ctio24->u.status1.ox_id = 5430 cpu_to_le16(be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id)); 5431 ctio24->u.status1.scsi_status = cpu_to_le16(status); 5432 5433 ctio24->u.status1.residual = cpu_to_le32(get_datalen_for_atio(atio)); 5434 5435 if (ctio24->u.status1.residual != 0) 5436 ctio24->u.status1.scsi_status |= cpu_to_le16(SS_RESIDUAL_UNDER); 5437 5438 /* Memory Barrier */ 5439 wmb(); 5440 if (qpair->reqq_start_iocbs) 5441 qpair->reqq_start_iocbs(qpair); 5442 else 5443 qla2x00_start_iocbs(vha, qpair->req); 5444 return 0; 5445 } 5446 5447 /* 5448 * This routine is used to allocate a command for either a QFull condition 5449 * (ie reply SAM_STAT_BUSY) or to terminate an exchange that did not go 5450 * out previously. 5451 */ 5452 static void 5453 qlt_alloc_qfull_cmd(struct scsi_qla_host *vha, 5454 struct atio_from_isp *atio, uint16_t status, int qfull) 5455 { 5456 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; 5457 struct qla_hw_data *ha = vha->hw; 5458 struct fc_port *sess; 5459 struct qla_tgt_cmd *cmd; 5460 unsigned long flags; 5461 5462 if (unlikely(tgt->tgt_stop)) { 5463 ql_dbg(ql_dbg_io, vha, 0x300a, 5464 "New command while device %p is shutting down\n", tgt); 5465 return; 5466 } 5467 5468 if ((vha->hw->tgt.num_qfull_cmds_alloc + 1) > MAX_QFULL_CMDS_ALLOC) { 5469 vha->hw->tgt.num_qfull_cmds_dropped++; 5470 if (vha->hw->tgt.num_qfull_cmds_dropped > 5471 vha->qla_stats.stat_max_qfull_cmds_dropped) 5472 vha->qla_stats.stat_max_qfull_cmds_dropped = 5473 vha->hw->tgt.num_qfull_cmds_dropped; 5474 5475 ql_dbg(ql_dbg_io, vha, 0x3068, 5476 "qla_target(%d): %s: QFull CMD dropped[%d]\n", 5477 vha->vp_idx, __func__, 5478 vha->hw->tgt.num_qfull_cmds_dropped); 5479 5480 qlt_chk_exch_leak_thresh_hold(vha); 5481 return; 5482 } 5483 5484 sess = ha->tgt.tgt_ops->find_sess_by_s_id 5485 (vha, atio->u.isp24.fcp_hdr.s_id); 5486 if (!sess) 5487 return; 5488 5489 cmd = ha->tgt.tgt_ops->get_cmd(sess); 5490 if (!cmd) { 5491 ql_dbg(ql_dbg_io, vha, 0x3009, 5492 "qla_target(%d): %s: Allocation of cmd failed\n", 5493 vha->vp_idx, __func__); 5494 5495 vha->hw->tgt.num_qfull_cmds_dropped++; 5496 if (vha->hw->tgt.num_qfull_cmds_dropped > 5497 vha->qla_stats.stat_max_qfull_cmds_dropped) 5498 vha->qla_stats.stat_max_qfull_cmds_dropped = 5499 vha->hw->tgt.num_qfull_cmds_dropped; 5500 5501 qlt_chk_exch_leak_thresh_hold(vha); 5502 return; 5503 } 5504 5505 qlt_incr_num_pend_cmds(vha); 5506 INIT_LIST_HEAD(&cmd->cmd_list); 5507 memcpy(&cmd->atio, atio, sizeof(*atio)); 5508 5509 cmd->tgt = vha->vha_tgt.qla_tgt; 5510 cmd->vha = vha; 5511 cmd->reset_count = ha->base_qpair->chip_reset; 5512 cmd->q_full = 1; 5513 cmd->qpair = ha->base_qpair; 5514 5515 if (qfull) { 5516 cmd->q_full = 1; 5517 /* NOTE: borrowing the state field to carry the status */ 5518 cmd->state = status; 5519 } else 5520 cmd->term_exchg = 1; 5521 5522 spin_lock_irqsave(&vha->hw->tgt.q_full_lock, flags); 5523 list_add_tail(&cmd->cmd_list, &vha->hw->tgt.q_full_list); 5524 5525 vha->hw->tgt.num_qfull_cmds_alloc++; 5526 if (vha->hw->tgt.num_qfull_cmds_alloc > 5527 vha->qla_stats.stat_max_qfull_cmds_alloc) 5528 vha->qla_stats.stat_max_qfull_cmds_alloc = 5529 vha->hw->tgt.num_qfull_cmds_alloc; 5530 spin_unlock_irqrestore(&vha->hw->tgt.q_full_lock, flags); 5531 } 5532 5533 int 5534 qlt_free_qfull_cmds(struct qla_qpair *qpair) 5535 { 5536 struct scsi_qla_host *vha = qpair->vha; 5537 struct qla_hw_data *ha = vha->hw; 5538 unsigned long flags; 5539 struct qla_tgt_cmd *cmd, *tcmd; 5540 struct list_head free_list, q_full_list; 5541 int rc = 0; 5542 5543 if (list_empty(&ha->tgt.q_full_list)) 5544 return 0; 5545 5546 INIT_LIST_HEAD(&free_list); 5547 INIT_LIST_HEAD(&q_full_list); 5548 5549 spin_lock_irqsave(&vha->hw->tgt.q_full_lock, flags); 5550 if (list_empty(&ha->tgt.q_full_list)) { 5551 spin_unlock_irqrestore(&vha->hw->tgt.q_full_lock, flags); 5552 return 0; 5553 } 5554 5555 list_splice_init(&vha->hw->tgt.q_full_list, &q_full_list); 5556 spin_unlock_irqrestore(&vha->hw->tgt.q_full_lock, flags); 5557 5558 spin_lock_irqsave(qpair->qp_lock_ptr, flags); 5559 list_for_each_entry_safe(cmd, tcmd, &q_full_list, cmd_list) { 5560 if (cmd->q_full) 5561 /* cmd->state is a borrowed field to hold status */ 5562 rc = __qlt_send_busy(qpair, &cmd->atio, cmd->state); 5563 else if (cmd->term_exchg) 5564 rc = __qlt_send_term_exchange(qpair, NULL, &cmd->atio); 5565 5566 if (rc == -ENOMEM) 5567 break; 5568 5569 if (cmd->q_full) 5570 ql_dbg(ql_dbg_io, vha, 0x3006, 5571 "%s: busy sent for ox_id[%04x]\n", __func__, 5572 be16_to_cpu(cmd->atio.u.isp24.fcp_hdr.ox_id)); 5573 else if (cmd->term_exchg) 5574 ql_dbg(ql_dbg_io, vha, 0x3007, 5575 "%s: Term exchg sent for ox_id[%04x]\n", __func__, 5576 be16_to_cpu(cmd->atio.u.isp24.fcp_hdr.ox_id)); 5577 else 5578 ql_dbg(ql_dbg_io, vha, 0x3008, 5579 "%s: Unexpected cmd in QFull list %p\n", __func__, 5580 cmd); 5581 5582 list_move_tail(&cmd->cmd_list, &free_list); 5583 5584 /* piggy back on hardware_lock for protection */ 5585 vha->hw->tgt.num_qfull_cmds_alloc--; 5586 } 5587 spin_unlock_irqrestore(qpair->qp_lock_ptr, flags); 5588 5589 cmd = NULL; 5590 5591 list_for_each_entry_safe(cmd, tcmd, &free_list, cmd_list) { 5592 list_del(&cmd->cmd_list); 5593 /* This cmd was never sent to TCM. There is no need 5594 * to schedule free or call free_cmd 5595 */ 5596 qlt_free_cmd(cmd); 5597 } 5598 5599 if (!list_empty(&q_full_list)) { 5600 spin_lock_irqsave(&vha->hw->tgt.q_full_lock, flags); 5601 list_splice(&q_full_list, &vha->hw->tgt.q_full_list); 5602 spin_unlock_irqrestore(&vha->hw->tgt.q_full_lock, flags); 5603 } 5604 5605 return rc; 5606 } 5607 5608 static void 5609 qlt_send_busy(struct qla_qpair *qpair, struct atio_from_isp *atio, 5610 uint16_t status) 5611 { 5612 int rc = 0; 5613 struct scsi_qla_host *vha = qpair->vha; 5614 5615 rc = __qlt_send_busy(qpair, atio, status); 5616 if (rc == -ENOMEM) 5617 qlt_alloc_qfull_cmd(vha, atio, status, 1); 5618 } 5619 5620 static int 5621 qlt_chk_qfull_thresh_hold(struct scsi_qla_host *vha, struct qla_qpair *qpair, 5622 struct atio_from_isp *atio, uint8_t ha_locked) 5623 { 5624 struct qla_hw_data *ha = vha->hw; 5625 unsigned long flags; 5626 5627 if (ha->tgt.num_pend_cmds < Q_FULL_THRESH_HOLD(ha)) 5628 return 0; 5629 5630 if (!ha_locked) 5631 spin_lock_irqsave(&ha->hardware_lock, flags); 5632 qlt_send_busy(qpair, atio, qla_sam_status); 5633 if (!ha_locked) 5634 spin_unlock_irqrestore(&ha->hardware_lock, flags); 5635 5636 return 1; 5637 } 5638 5639 /* ha->hardware_lock supposed to be held on entry */ 5640 /* called via callback from qla2xxx */ 5641 static void qlt_24xx_atio_pkt(struct scsi_qla_host *vha, 5642 struct atio_from_isp *atio, uint8_t ha_locked) 5643 { 5644 struct qla_hw_data *ha = vha->hw; 5645 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; 5646 int rc; 5647 unsigned long flags = 0; 5648 5649 if (unlikely(tgt == NULL)) { 5650 ql_dbg(ql_dbg_tgt, vha, 0x3064, 5651 "ATIO pkt, but no tgt (ha %p)", ha); 5652 return; 5653 } 5654 /* 5655 * In tgt_stop mode we also should allow all requests to pass. 5656 * Otherwise, some commands can stuck. 5657 */ 5658 5659 tgt->atio_irq_cmd_count++; 5660 5661 switch (atio->u.raw.entry_type) { 5662 case ATIO_TYPE7: 5663 if (unlikely(atio->u.isp24.exchange_addr == 5664 cpu_to_le32(ATIO_EXCHANGE_ADDRESS_UNKNOWN))) { 5665 ql_dbg(ql_dbg_io, vha, 0x3065, 5666 "qla_target(%d): ATIO_TYPE7 " 5667 "received with UNKNOWN exchange address, " 5668 "sending QUEUE_FULL\n", vha->vp_idx); 5669 if (!ha_locked) 5670 spin_lock_irqsave(&ha->hardware_lock, flags); 5671 qlt_send_busy(ha->base_qpair, atio, qla_sam_status); 5672 if (!ha_locked) 5673 spin_unlock_irqrestore(&ha->hardware_lock, 5674 flags); 5675 break; 5676 } 5677 5678 if (likely(atio->u.isp24.fcp_cmnd.task_mgmt_flags == 0)) { 5679 rc = qlt_chk_qfull_thresh_hold(vha, ha->base_qpair, 5680 atio, ha_locked); 5681 if (rc != 0) { 5682 tgt->atio_irq_cmd_count--; 5683 return; 5684 } 5685 rc = qlt_handle_cmd_for_atio(vha, atio); 5686 } else { 5687 rc = qlt_handle_task_mgmt(vha, atio); 5688 } 5689 if (unlikely(rc != 0)) { 5690 if (!ha_locked) 5691 spin_lock_irqsave(&ha->hardware_lock, flags); 5692 switch (rc) { 5693 case -ENODEV: 5694 ql_dbg(ql_dbg_tgt, vha, 0xe05f, 5695 "qla_target: Unable to send command to target\n"); 5696 break; 5697 case -EBADF: 5698 ql_dbg(ql_dbg_tgt, vha, 0xe05f, 5699 "qla_target: Unable to send command to target, sending TERM EXCHANGE for rsp\n"); 5700 qlt_send_term_exchange(ha->base_qpair, NULL, 5701 atio, 1, 0); 5702 break; 5703 case -EBUSY: 5704 ql_dbg(ql_dbg_tgt, vha, 0xe060, 5705 "qla_target(%d): Unable to send command to target, sending BUSY status\n", 5706 vha->vp_idx); 5707 qlt_send_busy(ha->base_qpair, atio, 5708 tc_sam_status); 5709 break; 5710 default: 5711 ql_dbg(ql_dbg_tgt, vha, 0xe060, 5712 "qla_target(%d): Unable to send command to target, sending BUSY status\n", 5713 vha->vp_idx); 5714 qlt_send_busy(ha->base_qpair, atio, 5715 qla_sam_status); 5716 break; 5717 } 5718 if (!ha_locked) 5719 spin_unlock_irqrestore(&ha->hardware_lock, 5720 flags); 5721 } 5722 break; 5723 5724 case IMMED_NOTIFY_TYPE: 5725 { 5726 if (unlikely(atio->u.isp2x.entry_status != 0)) { 5727 ql_dbg(ql_dbg_tgt, vha, 0xe05b, 5728 "qla_target(%d): Received ATIO packet %x " 5729 "with error status %x\n", vha->vp_idx, 5730 atio->u.raw.entry_type, 5731 atio->u.isp2x.entry_status); 5732 break; 5733 } 5734 ql_dbg(ql_dbg_tgt, vha, 0xe02e, "%s", "IMMED_NOTIFY ATIO"); 5735 5736 if (!ha_locked) 5737 spin_lock_irqsave(&ha->hardware_lock, flags); 5738 qlt_handle_imm_notify(vha, (struct imm_ntfy_from_isp *)atio); 5739 if (!ha_locked) 5740 spin_unlock_irqrestore(&ha->hardware_lock, flags); 5741 break; 5742 } 5743 5744 default: 5745 ql_dbg(ql_dbg_tgt, vha, 0xe05c, 5746 "qla_target(%d): Received unknown ATIO atio " 5747 "type %x\n", vha->vp_idx, atio->u.raw.entry_type); 5748 break; 5749 } 5750 5751 tgt->atio_irq_cmd_count--; 5752 } 5753 5754 /* 5755 * qpair lock is assume to be held 5756 * rc = 0 : send terminate & abts respond 5757 * rc != 0: do not send term & abts respond 5758 */ 5759 static int qlt_chk_unresolv_exchg(struct scsi_qla_host *vha, 5760 struct qla_qpair *qpair, struct abts_resp_from_24xx_fw *entry) 5761 { 5762 struct qla_hw_data *ha = vha->hw; 5763 int rc = 0; 5764 5765 /* 5766 * Detect unresolved exchange. If the same ABTS is unable 5767 * to terminate an existing command and the same ABTS loops 5768 * between FW & Driver, then force FW dump. Under 1 jiff, 5769 * we should see multiple loops. 5770 */ 5771 if (qpair->retry_term_exchg_addr == entry->exchange_addr_to_abort && 5772 qpair->retry_term_jiff == jiffies) { 5773 /* found existing exchange */ 5774 qpair->retry_term_cnt++; 5775 if (qpair->retry_term_cnt >= 5) { 5776 rc = -EIO; 5777 qpair->retry_term_cnt = 0; 5778 ql_log(ql_log_warn, vha, 0xffff, 5779 "Unable to send ABTS Respond. Dumping firmware.\n"); 5780 ql_dump_buffer(ql_dbg_tgt_mgt + ql_dbg_buffer, 5781 vha, 0xffff, (uint8_t *)entry, sizeof(*entry)); 5782 5783 if (qpair == ha->base_qpair) 5784 ha->isp_ops->fw_dump(vha); 5785 else 5786 qla2xxx_dump_fw(vha); 5787 5788 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 5789 qla2xxx_wake_dpc(vha); 5790 } 5791 } else if (qpair->retry_term_jiff != jiffies) { 5792 qpair->retry_term_exchg_addr = entry->exchange_addr_to_abort; 5793 qpair->retry_term_cnt = 0; 5794 qpair->retry_term_jiff = jiffies; 5795 } 5796 5797 return rc; 5798 } 5799 5800 5801 static void qlt_handle_abts_completion(struct scsi_qla_host *vha, 5802 struct rsp_que *rsp, response_t *pkt) 5803 { 5804 struct abts_resp_from_24xx_fw *entry = 5805 (struct abts_resp_from_24xx_fw *)pkt; 5806 u32 h = pkt->handle & ~QLA_TGT_HANDLE_MASK; 5807 struct qla_tgt_mgmt_cmd *mcmd; 5808 struct qla_hw_data *ha = vha->hw; 5809 5810 mcmd = qlt_ctio_to_cmd(vha, rsp, pkt->handle, pkt); 5811 if (mcmd == NULL && h != QLA_TGT_SKIP_HANDLE) { 5812 ql_dbg(ql_dbg_async, vha, 0xe064, 5813 "qla_target(%d): ABTS Comp without mcmd\n", 5814 vha->vp_idx); 5815 return; 5816 } 5817 5818 if (mcmd) 5819 vha = mcmd->vha; 5820 vha->vha_tgt.qla_tgt->abts_resp_expected--; 5821 5822 ql_dbg(ql_dbg_tgt, vha, 0xe038, 5823 "ABTS_RESP_24XX: compl_status %x\n", 5824 entry->compl_status); 5825 5826 if (le16_to_cpu(entry->compl_status) != ABTS_RESP_COMPL_SUCCESS) { 5827 if (le32_to_cpu(entry->error_subcode1) == 0x1E && 5828 le32_to_cpu(entry->error_subcode2) == 0) { 5829 if (qlt_chk_unresolv_exchg(vha, rsp->qpair, entry)) { 5830 ha->tgt.tgt_ops->free_mcmd(mcmd); 5831 return; 5832 } 5833 qlt_24xx_retry_term_exchange(vha, rsp->qpair, 5834 pkt, mcmd); 5835 } else { 5836 ql_dbg(ql_dbg_tgt, vha, 0xe063, 5837 "qla_target(%d): ABTS_RESP_24XX failed %x (subcode %x:%x)", 5838 vha->vp_idx, entry->compl_status, 5839 entry->error_subcode1, 5840 entry->error_subcode2); 5841 ha->tgt.tgt_ops->free_mcmd(mcmd); 5842 } 5843 } else if (mcmd) { 5844 ha->tgt.tgt_ops->free_mcmd(mcmd); 5845 } 5846 } 5847 5848 /* ha->hardware_lock supposed to be held on entry */ 5849 /* called via callback from qla2xxx */ 5850 static void qlt_response_pkt(struct scsi_qla_host *vha, 5851 struct rsp_que *rsp, response_t *pkt) 5852 { 5853 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; 5854 5855 if (unlikely(tgt == NULL)) { 5856 ql_dbg(ql_dbg_tgt, vha, 0xe05d, 5857 "qla_target(%d): Response pkt %x received, but no tgt (ha %p)\n", 5858 vha->vp_idx, pkt->entry_type, vha->hw); 5859 return; 5860 } 5861 5862 /* 5863 * In tgt_stop mode we also should allow all requests to pass. 5864 * Otherwise, some commands can stuck. 5865 */ 5866 5867 switch (pkt->entry_type) { 5868 case CTIO_CRC2: 5869 case CTIO_TYPE7: 5870 { 5871 struct ctio7_from_24xx *entry = (struct ctio7_from_24xx *)pkt; 5872 5873 qlt_do_ctio_completion(vha, rsp, entry->handle, 5874 le16_to_cpu(entry->status)|(pkt->entry_status << 16), 5875 entry); 5876 break; 5877 } 5878 5879 case ACCEPT_TGT_IO_TYPE: 5880 { 5881 struct atio_from_isp *atio = (struct atio_from_isp *)pkt; 5882 int rc; 5883 5884 if (atio->u.isp2x.status != 5885 cpu_to_le16(ATIO_CDB_VALID)) { 5886 ql_dbg(ql_dbg_tgt, vha, 0xe05e, 5887 "qla_target(%d): ATIO with error " 5888 "status %x received\n", vha->vp_idx, 5889 le16_to_cpu(atio->u.isp2x.status)); 5890 break; 5891 } 5892 5893 rc = qlt_chk_qfull_thresh_hold(vha, rsp->qpair, atio, 1); 5894 if (rc != 0) 5895 return; 5896 5897 rc = qlt_handle_cmd_for_atio(vha, atio); 5898 if (unlikely(rc != 0)) { 5899 switch (rc) { 5900 case -ENODEV: 5901 ql_dbg(ql_dbg_tgt, vha, 0xe05f, 5902 "qla_target: Unable to send command to target\n"); 5903 break; 5904 case -EBADF: 5905 ql_dbg(ql_dbg_tgt, vha, 0xe05f, 5906 "qla_target: Unable to send command to target, sending TERM EXCHANGE for rsp\n"); 5907 qlt_send_term_exchange(rsp->qpair, NULL, 5908 atio, 1, 0); 5909 break; 5910 case -EBUSY: 5911 ql_dbg(ql_dbg_tgt, vha, 0xe060, 5912 "qla_target(%d): Unable to send command to target, sending BUSY status\n", 5913 vha->vp_idx); 5914 qlt_send_busy(rsp->qpair, atio, 5915 tc_sam_status); 5916 break; 5917 default: 5918 ql_dbg(ql_dbg_tgt, vha, 0xe060, 5919 "qla_target(%d): Unable to send command to target, sending BUSY status\n", 5920 vha->vp_idx); 5921 qlt_send_busy(rsp->qpair, atio, 5922 qla_sam_status); 5923 break; 5924 } 5925 } 5926 } 5927 break; 5928 5929 case CONTINUE_TGT_IO_TYPE: 5930 { 5931 struct ctio_to_2xxx *entry = (struct ctio_to_2xxx *)pkt; 5932 5933 qlt_do_ctio_completion(vha, rsp, entry->handle, 5934 le16_to_cpu(entry->status)|(pkt->entry_status << 16), 5935 entry); 5936 break; 5937 } 5938 5939 case CTIO_A64_TYPE: 5940 { 5941 struct ctio_to_2xxx *entry = (struct ctio_to_2xxx *)pkt; 5942 5943 qlt_do_ctio_completion(vha, rsp, entry->handle, 5944 le16_to_cpu(entry->status)|(pkt->entry_status << 16), 5945 entry); 5946 break; 5947 } 5948 5949 case IMMED_NOTIFY_TYPE: 5950 ql_dbg(ql_dbg_tgt, vha, 0xe035, "%s", "IMMED_NOTIFY\n"); 5951 qlt_handle_imm_notify(vha, (struct imm_ntfy_from_isp *)pkt); 5952 break; 5953 5954 case NOTIFY_ACK_TYPE: 5955 if (tgt->notify_ack_expected > 0) { 5956 struct nack_to_isp *entry = (struct nack_to_isp *)pkt; 5957 5958 ql_dbg(ql_dbg_tgt, vha, 0xe036, 5959 "NOTIFY_ACK seq %08x status %x\n", 5960 le16_to_cpu(entry->u.isp2x.seq_id), 5961 le16_to_cpu(entry->u.isp2x.status)); 5962 tgt->notify_ack_expected--; 5963 if (entry->u.isp2x.status != 5964 cpu_to_le16(NOTIFY_ACK_SUCCESS)) { 5965 ql_dbg(ql_dbg_tgt, vha, 0xe061, 5966 "qla_target(%d): NOTIFY_ACK " 5967 "failed %x\n", vha->vp_idx, 5968 le16_to_cpu(entry->u.isp2x.status)); 5969 } 5970 } else { 5971 ql_dbg(ql_dbg_tgt, vha, 0xe062, 5972 "qla_target(%d): Unexpected NOTIFY_ACK received\n", 5973 vha->vp_idx); 5974 } 5975 break; 5976 5977 case ABTS_RECV_24XX: 5978 ql_dbg(ql_dbg_tgt, vha, 0xe037, 5979 "ABTS_RECV_24XX: instance %d\n", vha->vp_idx); 5980 qlt_24xx_handle_abts(vha, (struct abts_recv_from_24xx *)pkt); 5981 break; 5982 5983 case ABTS_RESP_24XX: 5984 if (tgt->abts_resp_expected > 0) { 5985 qlt_handle_abts_completion(vha, rsp, pkt); 5986 } else { 5987 ql_dbg(ql_dbg_tgt, vha, 0xe064, 5988 "qla_target(%d): Unexpected ABTS_RESP_24XX " 5989 "received\n", vha->vp_idx); 5990 } 5991 break; 5992 5993 default: 5994 ql_dbg(ql_dbg_tgt, vha, 0xe065, 5995 "qla_target(%d): Received unknown response pkt " 5996 "type %x\n", vha->vp_idx, pkt->entry_type); 5997 break; 5998 } 5999 6000 } 6001 6002 /* 6003 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire 6004 */ 6005 void qlt_async_event(uint16_t code, struct scsi_qla_host *vha, 6006 uint16_t *mailbox) 6007 { 6008 struct qla_hw_data *ha = vha->hw; 6009 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; 6010 int login_code; 6011 6012 if (!tgt || tgt->tgt_stop || tgt->tgt_stopped) 6013 return; 6014 6015 if (((code == MBA_POINT_TO_POINT) || (code == MBA_CHG_IN_CONNECTION)) && 6016 IS_QLA2100(ha)) 6017 return; 6018 /* 6019 * In tgt_stop mode we also should allow all requests to pass. 6020 * Otherwise, some commands can stuck. 6021 */ 6022 6023 6024 switch (code) { 6025 case MBA_RESET: /* Reset */ 6026 case MBA_SYSTEM_ERR: /* System Error */ 6027 case MBA_REQ_TRANSFER_ERR: /* Request Transfer Error */ 6028 case MBA_RSP_TRANSFER_ERR: /* Response Transfer Error */ 6029 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03a, 6030 "qla_target(%d): System error async event %#x " 6031 "occurred", vha->vp_idx, code); 6032 break; 6033 case MBA_WAKEUP_THRES: /* Request Queue Wake-up. */ 6034 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 6035 break; 6036 6037 case MBA_LOOP_UP: 6038 { 6039 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03b, 6040 "qla_target(%d): Async LOOP_UP occurred " 6041 "(m[0]=%x, m[1]=%x, m[2]=%x, m[3]=%x)", vha->vp_idx, 6042 mailbox[0], mailbox[1], mailbox[2], mailbox[3]); 6043 if (tgt->link_reinit_iocb_pending) { 6044 qlt_send_notify_ack(ha->base_qpair, 6045 &tgt->link_reinit_iocb, 6046 0, 0, 0, 0, 0, 0); 6047 tgt->link_reinit_iocb_pending = 0; 6048 } 6049 break; 6050 } 6051 6052 case MBA_LIP_OCCURRED: 6053 case MBA_LOOP_DOWN: 6054 case MBA_LIP_RESET: 6055 case MBA_RSCN_UPDATE: 6056 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03c, 6057 "qla_target(%d): Async event %#x occurred " 6058 "(m[0]=%x, m[1]=%x, m[2]=%x, m[3]=%x)", vha->vp_idx, code, 6059 mailbox[0], mailbox[1], mailbox[2], mailbox[3]); 6060 break; 6061 6062 case MBA_REJECTED_FCP_CMD: 6063 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf017, 6064 "qla_target(%d): Async event LS_REJECT occurred (m[0]=%x, m[1]=%x, m[2]=%x, m[3]=%x)", 6065 vha->vp_idx, 6066 mailbox[0], mailbox[1], mailbox[2], mailbox[3]); 6067 6068 if (mailbox[3] == 1) { 6069 /* exchange starvation. */ 6070 vha->hw->exch_starvation++; 6071 if (vha->hw->exch_starvation > 5) { 6072 ql_log(ql_log_warn, vha, 0xd03a, 6073 "Exchange starvation-. Resetting RISC\n"); 6074 6075 vha->hw->exch_starvation = 0; 6076 if (IS_P3P_TYPE(vha->hw)) 6077 set_bit(FCOE_CTX_RESET_NEEDED, 6078 &vha->dpc_flags); 6079 else 6080 set_bit(ISP_ABORT_NEEDED, 6081 &vha->dpc_flags); 6082 qla2xxx_wake_dpc(vha); 6083 } 6084 } 6085 break; 6086 6087 case MBA_PORT_UPDATE: 6088 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03d, 6089 "qla_target(%d): Port update async event %#x " 6090 "occurred: updating the ports database (m[0]=%x, m[1]=%x, " 6091 "m[2]=%x, m[3]=%x)", vha->vp_idx, code, 6092 mailbox[0], mailbox[1], mailbox[2], mailbox[3]); 6093 6094 login_code = mailbox[2]; 6095 if (login_code == 0x4) { 6096 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03e, 6097 "Async MB 2: Got PLOGI Complete\n"); 6098 vha->hw->exch_starvation = 0; 6099 } else if (login_code == 0x7) 6100 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03f, 6101 "Async MB 2: Port Logged Out\n"); 6102 break; 6103 default: 6104 break; 6105 } 6106 6107 } 6108 6109 static fc_port_t *qlt_get_port_database(struct scsi_qla_host *vha, 6110 uint16_t loop_id) 6111 { 6112 fc_port_t *fcport, *tfcp, *del; 6113 int rc; 6114 unsigned long flags; 6115 u8 newfcport = 0; 6116 6117 fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL); 6118 if (!fcport) { 6119 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06f, 6120 "qla_target(%d): Allocation of tmp FC port failed", 6121 vha->vp_idx); 6122 return NULL; 6123 } 6124 6125 fcport->loop_id = loop_id; 6126 6127 rc = qla24xx_gpdb_wait(vha, fcport, 0); 6128 if (rc != QLA_SUCCESS) { 6129 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf070, 6130 "qla_target(%d): Failed to retrieve fcport " 6131 "information -- get_port_database() returned %x " 6132 "(loop_id=0x%04x)", vha->vp_idx, rc, loop_id); 6133 kfree(fcport); 6134 return NULL; 6135 } 6136 6137 del = NULL; 6138 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); 6139 tfcp = qla2x00_find_fcport_by_wwpn(vha, fcport->port_name, 1); 6140 6141 if (tfcp) { 6142 tfcp->d_id = fcport->d_id; 6143 tfcp->port_type = fcport->port_type; 6144 tfcp->supported_classes = fcport->supported_classes; 6145 tfcp->flags |= fcport->flags; 6146 tfcp->scan_state = QLA_FCPORT_FOUND; 6147 6148 del = fcport; 6149 fcport = tfcp; 6150 } else { 6151 if (vha->hw->current_topology == ISP_CFG_F) 6152 fcport->flags |= FCF_FABRIC_DEVICE; 6153 6154 list_add_tail(&fcport->list, &vha->vp_fcports); 6155 if (!IS_SW_RESV_ADDR(fcport->d_id)) 6156 vha->fcport_count++; 6157 fcport->login_gen++; 6158 qla2x00_set_fcport_disc_state(fcport, DSC_LOGIN_COMPLETE); 6159 fcport->login_succ = 1; 6160 newfcport = 1; 6161 } 6162 6163 fcport->deleted = 0; 6164 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); 6165 6166 switch (vha->host->active_mode) { 6167 case MODE_INITIATOR: 6168 case MODE_DUAL: 6169 if (newfcport) { 6170 if (!IS_IIDMA_CAPABLE(vha->hw) || !vha->hw->flags.gpsc_supported) { 6171 qla24xx_sched_upd_fcport(fcport); 6172 } else { 6173 ql_dbg(ql_dbg_disc, vha, 0x20ff, 6174 "%s %d %8phC post gpsc fcp_cnt %d\n", 6175 __func__, __LINE__, fcport->port_name, vha->fcport_count); 6176 qla24xx_post_gpsc_work(vha, fcport); 6177 } 6178 } 6179 break; 6180 6181 case MODE_TARGET: 6182 default: 6183 break; 6184 } 6185 if (del) 6186 qla2x00_free_fcport(del); 6187 6188 return fcport; 6189 } 6190 6191 /* Must be called under tgt_mutex */ 6192 static struct fc_port *qlt_make_local_sess(struct scsi_qla_host *vha, 6193 be_id_t s_id) 6194 { 6195 struct fc_port *sess = NULL; 6196 fc_port_t *fcport = NULL; 6197 int rc, global_resets; 6198 uint16_t loop_id = 0; 6199 6200 if (s_id.domain == 0xFF && s_id.area == 0xFC) { 6201 /* 6202 * This is Domain Controller, so it should be 6203 * OK to drop SCSI commands from it. 6204 */ 6205 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf042, 6206 "Unable to find initiator with S_ID %x:%x:%x", 6207 s_id.domain, s_id.area, s_id.al_pa); 6208 return NULL; 6209 } 6210 6211 mutex_lock(&vha->vha_tgt.tgt_mutex); 6212 6213 retry: 6214 global_resets = 6215 atomic_read(&vha->vha_tgt.qla_tgt->tgt_global_resets_count); 6216 6217 rc = qla24xx_get_loop_id(vha, s_id, &loop_id); 6218 if (rc != 0) { 6219 mutex_unlock(&vha->vha_tgt.tgt_mutex); 6220 6221 ql_log(ql_log_info, vha, 0xf071, 6222 "qla_target(%d): Unable to find " 6223 "initiator with S_ID %x:%x:%x", 6224 vha->vp_idx, s_id.domain, s_id.area, s_id.al_pa); 6225 6226 if (rc == -ENOENT) { 6227 qlt_port_logo_t logo; 6228 6229 logo.id = be_to_port_id(s_id); 6230 logo.cmd_count = 1; 6231 qlt_send_first_logo(vha, &logo); 6232 } 6233 6234 return NULL; 6235 } 6236 6237 fcport = qlt_get_port_database(vha, loop_id); 6238 if (!fcport) { 6239 mutex_unlock(&vha->vha_tgt.tgt_mutex); 6240 return NULL; 6241 } 6242 6243 if (global_resets != 6244 atomic_read(&vha->vha_tgt.qla_tgt->tgt_global_resets_count)) { 6245 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf043, 6246 "qla_target(%d): global reset during session discovery " 6247 "(counter was %d, new %d), retrying", vha->vp_idx, 6248 global_resets, 6249 atomic_read(&vha->vha_tgt. 6250 qla_tgt->tgt_global_resets_count)); 6251 goto retry; 6252 } 6253 6254 sess = qlt_create_sess(vha, fcport, true); 6255 6256 mutex_unlock(&vha->vha_tgt.tgt_mutex); 6257 6258 return sess; 6259 } 6260 6261 static void qlt_abort_work(struct qla_tgt *tgt, 6262 struct qla_tgt_sess_work_param *prm) 6263 { 6264 struct scsi_qla_host *vha = tgt->vha; 6265 struct qla_hw_data *ha = vha->hw; 6266 struct fc_port *sess = NULL; 6267 unsigned long flags = 0, flags2 = 0; 6268 be_id_t s_id; 6269 int rc; 6270 6271 spin_lock_irqsave(&ha->tgt.sess_lock, flags2); 6272 6273 if (tgt->tgt_stop) 6274 goto out_term2; 6275 6276 s_id = le_id_to_be(prm->abts.fcp_hdr_le.s_id); 6277 6278 sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, s_id); 6279 if (!sess) { 6280 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags2); 6281 6282 sess = qlt_make_local_sess(vha, s_id); 6283 /* sess has got an extra creation ref */ 6284 6285 spin_lock_irqsave(&ha->tgt.sess_lock, flags2); 6286 if (!sess) 6287 goto out_term2; 6288 } else { 6289 if (sess->deleted) { 6290 sess = NULL; 6291 goto out_term2; 6292 } 6293 6294 if (!kref_get_unless_zero(&sess->sess_kref)) { 6295 ql_dbg(ql_dbg_tgt_tmr, vha, 0xf01c, 6296 "%s: kref_get fail %8phC \n", 6297 __func__, sess->port_name); 6298 sess = NULL; 6299 goto out_term2; 6300 } 6301 } 6302 6303 rc = __qlt_24xx_handle_abts(vha, &prm->abts, sess); 6304 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags2); 6305 6306 ha->tgt.tgt_ops->put_sess(sess); 6307 6308 if (rc != 0) 6309 goto out_term; 6310 return; 6311 6312 out_term2: 6313 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags2); 6314 6315 out_term: 6316 spin_lock_irqsave(&ha->hardware_lock, flags); 6317 qlt_24xx_send_abts_resp(ha->base_qpair, &prm->abts, 6318 FCP_TMF_REJECTED, false); 6319 spin_unlock_irqrestore(&ha->hardware_lock, flags); 6320 } 6321 6322 static void qlt_sess_work_fn(struct work_struct *work) 6323 { 6324 struct qla_tgt *tgt = container_of(work, struct qla_tgt, sess_work); 6325 struct scsi_qla_host *vha = tgt->vha; 6326 unsigned long flags; 6327 6328 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf000, "Sess work (tgt %p)", tgt); 6329 6330 spin_lock_irqsave(&tgt->sess_work_lock, flags); 6331 while (!list_empty(&tgt->sess_works_list)) { 6332 struct qla_tgt_sess_work_param *prm = list_entry( 6333 tgt->sess_works_list.next, typeof(*prm), 6334 sess_works_list_entry); 6335 6336 /* 6337 * This work can be scheduled on several CPUs at time, so we 6338 * must delete the entry to eliminate double processing 6339 */ 6340 list_del(&prm->sess_works_list_entry); 6341 6342 spin_unlock_irqrestore(&tgt->sess_work_lock, flags); 6343 6344 switch (prm->type) { 6345 case QLA_TGT_SESS_WORK_ABORT: 6346 qlt_abort_work(tgt, prm); 6347 break; 6348 default: 6349 BUG_ON(1); 6350 break; 6351 } 6352 6353 spin_lock_irqsave(&tgt->sess_work_lock, flags); 6354 6355 kfree(prm); 6356 } 6357 spin_unlock_irqrestore(&tgt->sess_work_lock, flags); 6358 } 6359 6360 /* Must be called under tgt_host_action_mutex */ 6361 int qlt_add_target(struct qla_hw_data *ha, struct scsi_qla_host *base_vha) 6362 { 6363 struct qla_tgt *tgt; 6364 int rc, i; 6365 struct qla_qpair_hint *h; 6366 6367 if (!QLA_TGT_MODE_ENABLED()) 6368 return 0; 6369 6370 if (!IS_TGT_MODE_CAPABLE(ha)) { 6371 ql_log(ql_log_warn, base_vha, 0xe070, 6372 "This adapter does not support target mode.\n"); 6373 return 0; 6374 } 6375 6376 ql_dbg(ql_dbg_tgt, base_vha, 0xe03b, 6377 "Registering target for host %ld(%p).\n", base_vha->host_no, ha); 6378 6379 BUG_ON(base_vha->vha_tgt.qla_tgt != NULL); 6380 6381 tgt = kzalloc(sizeof(struct qla_tgt), GFP_KERNEL); 6382 if (!tgt) { 6383 ql_dbg(ql_dbg_tgt, base_vha, 0xe066, 6384 "Unable to allocate struct qla_tgt\n"); 6385 return -ENOMEM; 6386 } 6387 6388 tgt->qphints = kcalloc(ha->max_qpairs + 1, 6389 sizeof(struct qla_qpair_hint), 6390 GFP_KERNEL); 6391 if (!tgt->qphints) { 6392 kfree(tgt); 6393 ql_log(ql_log_warn, base_vha, 0x0197, 6394 "Unable to allocate qpair hints.\n"); 6395 return -ENOMEM; 6396 } 6397 6398 if (!(base_vha->host->hostt->supported_mode & MODE_TARGET)) 6399 base_vha->host->hostt->supported_mode |= MODE_TARGET; 6400 6401 rc = btree_init64(&tgt->lun_qpair_map); 6402 if (rc) { 6403 kfree(tgt->qphints); 6404 kfree(tgt); 6405 ql_log(ql_log_info, base_vha, 0x0198, 6406 "Unable to initialize lun_qpair_map btree\n"); 6407 return -EIO; 6408 } 6409 h = &tgt->qphints[0]; 6410 h->qpair = ha->base_qpair; 6411 INIT_LIST_HEAD(&h->hint_elem); 6412 h->cpuid = ha->base_qpair->cpuid; 6413 list_add_tail(&h->hint_elem, &ha->base_qpair->hints_list); 6414 6415 for (i = 0; i < ha->max_qpairs; i++) { 6416 unsigned long flags; 6417 6418 struct qla_qpair *qpair = ha->queue_pair_map[i]; 6419 6420 h = &tgt->qphints[i + 1]; 6421 INIT_LIST_HEAD(&h->hint_elem); 6422 if (qpair) { 6423 h->qpair = qpair; 6424 spin_lock_irqsave(qpair->qp_lock_ptr, flags); 6425 list_add_tail(&h->hint_elem, &qpair->hints_list); 6426 spin_unlock_irqrestore(qpair->qp_lock_ptr, flags); 6427 h->cpuid = qpair->cpuid; 6428 } 6429 } 6430 6431 tgt->ha = ha; 6432 tgt->vha = base_vha; 6433 init_waitqueue_head(&tgt->waitQ); 6434 spin_lock_init(&tgt->sess_work_lock); 6435 INIT_WORK(&tgt->sess_work, qlt_sess_work_fn); 6436 INIT_LIST_HEAD(&tgt->sess_works_list); 6437 atomic_set(&tgt->tgt_global_resets_count, 0); 6438 6439 base_vha->vha_tgt.qla_tgt = tgt; 6440 6441 ql_dbg(ql_dbg_tgt, base_vha, 0xe067, 6442 "qla_target(%d): using 64 Bit PCI addressing", 6443 base_vha->vp_idx); 6444 /* 3 is reserved */ 6445 tgt->sg_tablesize = QLA_TGT_MAX_SG_24XX(base_vha->req->length - 3); 6446 6447 mutex_lock(&qla_tgt_mutex); 6448 list_add_tail(&tgt->tgt_list_entry, &qla_tgt_glist); 6449 mutex_unlock(&qla_tgt_mutex); 6450 6451 if (ha->tgt.tgt_ops && ha->tgt.tgt_ops->add_target) 6452 ha->tgt.tgt_ops->add_target(base_vha); 6453 6454 return 0; 6455 } 6456 6457 /* Must be called under tgt_host_action_mutex */ 6458 int qlt_remove_target(struct qla_hw_data *ha, struct scsi_qla_host *vha) 6459 { 6460 if (!vha->vha_tgt.qla_tgt) 6461 return 0; 6462 6463 if (vha->fc_vport) { 6464 qlt_release(vha->vha_tgt.qla_tgt); 6465 return 0; 6466 } 6467 6468 /* free left over qfull cmds */ 6469 qlt_init_term_exchange(vha); 6470 6471 ql_dbg(ql_dbg_tgt, vha, 0xe03c, "Unregistering target for host %ld(%p)", 6472 vha->host_no, ha); 6473 qlt_release(vha->vha_tgt.qla_tgt); 6474 6475 return 0; 6476 } 6477 6478 void qla_remove_hostmap(struct qla_hw_data *ha) 6479 { 6480 struct scsi_qla_host *node; 6481 u32 key = 0; 6482 6483 btree_for_each_safe32(&ha->host_map, key, node) 6484 btree_remove32(&ha->host_map, key); 6485 6486 btree_destroy32(&ha->host_map); 6487 } 6488 6489 static void qlt_lport_dump(struct scsi_qla_host *vha, u64 wwpn, 6490 unsigned char *b) 6491 { 6492 pr_debug("qla2xxx HW vha->node_name: %8phC\n", vha->node_name); 6493 pr_debug("qla2xxx HW vha->port_name: %8phC\n", vha->port_name); 6494 put_unaligned_be64(wwpn, b); 6495 pr_debug("qla2xxx passed configfs WWPN: %8phC\n", b); 6496 } 6497 6498 /** 6499 * qlt_lport_register - register lport with external module 6500 * 6501 * @target_lport_ptr: pointer for tcm_qla2xxx specific lport data 6502 * @phys_wwpn: physical port WWPN 6503 * @npiv_wwpn: NPIV WWPN 6504 * @npiv_wwnn: NPIV WWNN 6505 * @callback: lport initialization callback for tcm_qla2xxx code 6506 */ 6507 int qlt_lport_register(void *target_lport_ptr, u64 phys_wwpn, 6508 u64 npiv_wwpn, u64 npiv_wwnn, 6509 int (*callback)(struct scsi_qla_host *, void *, u64, u64)) 6510 { 6511 struct qla_tgt *tgt; 6512 struct scsi_qla_host *vha; 6513 struct qla_hw_data *ha; 6514 struct Scsi_Host *host; 6515 unsigned long flags; 6516 int rc; 6517 u8 b[WWN_SIZE]; 6518 6519 mutex_lock(&qla_tgt_mutex); 6520 list_for_each_entry(tgt, &qla_tgt_glist, tgt_list_entry) { 6521 vha = tgt->vha; 6522 ha = vha->hw; 6523 6524 host = vha->host; 6525 if (!host) 6526 continue; 6527 6528 if (!(host->hostt->supported_mode & MODE_TARGET)) 6529 continue; 6530 6531 if (vha->qlini_mode == QLA2XXX_INI_MODE_ENABLED) 6532 continue; 6533 6534 spin_lock_irqsave(&ha->hardware_lock, flags); 6535 if ((!npiv_wwpn || !npiv_wwnn) && host->active_mode & MODE_TARGET) { 6536 pr_debug("MODE_TARGET already active on qla2xxx(%d)\n", 6537 host->host_no); 6538 spin_unlock_irqrestore(&ha->hardware_lock, flags); 6539 continue; 6540 } 6541 if (tgt->tgt_stop) { 6542 pr_debug("MODE_TARGET in shutdown on qla2xxx(%d)\n", 6543 host->host_no); 6544 spin_unlock_irqrestore(&ha->hardware_lock, flags); 6545 continue; 6546 } 6547 spin_unlock_irqrestore(&ha->hardware_lock, flags); 6548 6549 if (!scsi_host_get(host)) { 6550 ql_dbg(ql_dbg_tgt, vha, 0xe068, 6551 "Unable to scsi_host_get() for" 6552 " qla2xxx scsi_host\n"); 6553 continue; 6554 } 6555 qlt_lport_dump(vha, phys_wwpn, b); 6556 6557 if (memcmp(vha->port_name, b, WWN_SIZE)) { 6558 scsi_host_put(host); 6559 continue; 6560 } 6561 rc = (*callback)(vha, target_lport_ptr, npiv_wwpn, npiv_wwnn); 6562 if (rc != 0) 6563 scsi_host_put(host); 6564 6565 mutex_unlock(&qla_tgt_mutex); 6566 return rc; 6567 } 6568 mutex_unlock(&qla_tgt_mutex); 6569 6570 return -ENODEV; 6571 } 6572 EXPORT_SYMBOL(qlt_lport_register); 6573 6574 /** 6575 * qlt_lport_deregister - Degister lport 6576 * 6577 * @vha: Registered scsi_qla_host pointer 6578 */ 6579 void qlt_lport_deregister(struct scsi_qla_host *vha) 6580 { 6581 struct qla_hw_data *ha = vha->hw; 6582 struct Scsi_Host *sh = vha->host; 6583 /* 6584 * Clear the target_lport_ptr qla_target_template pointer in qla_hw_data 6585 */ 6586 vha->vha_tgt.target_lport_ptr = NULL; 6587 ha->tgt.tgt_ops = NULL; 6588 /* 6589 * Release the Scsi_Host reference for the underlying qla2xxx host 6590 */ 6591 scsi_host_put(sh); 6592 } 6593 EXPORT_SYMBOL(qlt_lport_deregister); 6594 6595 /* Must be called under HW lock */ 6596 void qlt_set_mode(struct scsi_qla_host *vha) 6597 { 6598 switch (vha->qlini_mode) { 6599 case QLA2XXX_INI_MODE_DISABLED: 6600 case QLA2XXX_INI_MODE_EXCLUSIVE: 6601 vha->host->active_mode = MODE_TARGET; 6602 break; 6603 case QLA2XXX_INI_MODE_ENABLED: 6604 vha->host->active_mode = MODE_INITIATOR; 6605 break; 6606 case QLA2XXX_INI_MODE_DUAL: 6607 vha->host->active_mode = MODE_DUAL; 6608 break; 6609 default: 6610 break; 6611 } 6612 } 6613 6614 /* Must be called under HW lock */ 6615 static void qlt_clear_mode(struct scsi_qla_host *vha) 6616 { 6617 switch (vha->qlini_mode) { 6618 case QLA2XXX_INI_MODE_DISABLED: 6619 vha->host->active_mode = MODE_UNKNOWN; 6620 break; 6621 case QLA2XXX_INI_MODE_EXCLUSIVE: 6622 vha->host->active_mode = MODE_INITIATOR; 6623 break; 6624 case QLA2XXX_INI_MODE_ENABLED: 6625 case QLA2XXX_INI_MODE_DUAL: 6626 vha->host->active_mode = MODE_INITIATOR; 6627 break; 6628 default: 6629 break; 6630 } 6631 } 6632 6633 /* 6634 * qla_tgt_enable_vha - NO LOCK HELD 6635 * 6636 * host_reset, bring up w/ Target Mode Enabled 6637 */ 6638 void 6639 qlt_enable_vha(struct scsi_qla_host *vha) 6640 { 6641 struct qla_hw_data *ha = vha->hw; 6642 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; 6643 unsigned long flags; 6644 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev); 6645 6646 if (!tgt) { 6647 ql_dbg(ql_dbg_tgt, vha, 0xe069, 6648 "Unable to locate qla_tgt pointer from" 6649 " struct qla_hw_data\n"); 6650 dump_stack(); 6651 return; 6652 } 6653 if (vha->qlini_mode == QLA2XXX_INI_MODE_ENABLED) 6654 return; 6655 6656 if (ha->tgt.num_act_qpairs > ha->max_qpairs) 6657 ha->tgt.num_act_qpairs = ha->max_qpairs; 6658 spin_lock_irqsave(&ha->hardware_lock, flags); 6659 tgt->tgt_stopped = 0; 6660 qlt_set_mode(vha); 6661 spin_unlock_irqrestore(&ha->hardware_lock, flags); 6662 6663 mutex_lock(&ha->optrom_mutex); 6664 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf021, 6665 "%s.\n", __func__); 6666 if (vha->vp_idx) { 6667 qla24xx_disable_vp(vha); 6668 qla24xx_enable_vp(vha); 6669 } else { 6670 set_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags); 6671 qla2xxx_wake_dpc(base_vha); 6672 WARN_ON_ONCE(qla2x00_wait_for_hba_online(base_vha) != 6673 QLA_SUCCESS); 6674 } 6675 mutex_unlock(&ha->optrom_mutex); 6676 } 6677 EXPORT_SYMBOL(qlt_enable_vha); 6678 6679 /* 6680 * qla_tgt_disable_vha - NO LOCK HELD 6681 * 6682 * Disable Target Mode and reset the adapter 6683 */ 6684 static void qlt_disable_vha(struct scsi_qla_host *vha) 6685 { 6686 struct qla_hw_data *ha = vha->hw; 6687 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; 6688 unsigned long flags; 6689 6690 if (!tgt) { 6691 ql_dbg(ql_dbg_tgt, vha, 0xe06a, 6692 "Unable to locate qla_tgt pointer from" 6693 " struct qla_hw_data\n"); 6694 dump_stack(); 6695 return; 6696 } 6697 6698 spin_lock_irqsave(&ha->hardware_lock, flags); 6699 qlt_clear_mode(vha); 6700 spin_unlock_irqrestore(&ha->hardware_lock, flags); 6701 6702 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 6703 qla2xxx_wake_dpc(vha); 6704 6705 /* 6706 * We are expecting the offline state. 6707 * QLA_FUNCTION_FAILED means that adapter is offline. 6708 */ 6709 if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) 6710 ql_dbg(ql_dbg_tgt, vha, 0xe081, 6711 "adapter is offline\n"); 6712 } 6713 6714 /* 6715 * Called from qla_init.c:qla24xx_vport_create() contex to setup 6716 * the target mode specific struct scsi_qla_host and struct qla_hw_data 6717 * members. 6718 */ 6719 void 6720 qlt_vport_create(struct scsi_qla_host *vha, struct qla_hw_data *ha) 6721 { 6722 vha->vha_tgt.qla_tgt = NULL; 6723 6724 mutex_init(&vha->vha_tgt.tgt_mutex); 6725 mutex_init(&vha->vha_tgt.tgt_host_action_mutex); 6726 6727 INIT_LIST_HEAD(&vha->unknown_atio_list); 6728 INIT_DELAYED_WORK(&vha->unknown_atio_work, qlt_unknown_atio_work_fn); 6729 6730 qlt_clear_mode(vha); 6731 6732 /* 6733 * NOTE: Currently the value is kept the same for <24xx and 6734 * >=24xx ISPs. If it is necessary to change it, 6735 * the check should be added for specific ISPs, 6736 * assigning the value appropriately. 6737 */ 6738 ha->tgt.atio_q_length = ATIO_ENTRY_CNT_24XX; 6739 6740 qlt_add_target(ha, vha); 6741 } 6742 6743 u8 6744 qlt_rff_id(struct scsi_qla_host *vha) 6745 { 6746 u8 fc4_feature = 0; 6747 /* 6748 * FC-4 Feature bit 0 indicates target functionality to the name server. 6749 */ 6750 if (qla_tgt_mode_enabled(vha)) { 6751 fc4_feature = BIT_0; 6752 } else if (qla_ini_mode_enabled(vha)) { 6753 fc4_feature = BIT_1; 6754 } else if (qla_dual_mode_enabled(vha)) 6755 fc4_feature = BIT_0 | BIT_1; 6756 6757 return fc4_feature; 6758 } 6759 6760 /* 6761 * qlt_init_atio_q_entries() - Initializes ATIO queue entries. 6762 * @ha: HA context 6763 * 6764 * Beginning of ATIO ring has initialization control block already built 6765 * by nvram config routine. 6766 * 6767 * Returns 0 on success. 6768 */ 6769 void 6770 qlt_init_atio_q_entries(struct scsi_qla_host *vha) 6771 { 6772 struct qla_hw_data *ha = vha->hw; 6773 uint16_t cnt; 6774 struct atio_from_isp *pkt = (struct atio_from_isp *)ha->tgt.atio_ring; 6775 6776 if (qla_ini_mode_enabled(vha)) 6777 return; 6778 6779 for (cnt = 0; cnt < ha->tgt.atio_q_length; cnt++) { 6780 pkt->u.raw.signature = cpu_to_le32(ATIO_PROCESSED); 6781 pkt++; 6782 } 6783 6784 } 6785 6786 /* 6787 * qlt_24xx_process_atio_queue() - Process ATIO queue entries. 6788 * @ha: SCSI driver HA context 6789 */ 6790 void 6791 qlt_24xx_process_atio_queue(struct scsi_qla_host *vha, uint8_t ha_locked) 6792 { 6793 struct qla_hw_data *ha = vha->hw; 6794 struct atio_from_isp *pkt; 6795 int cnt, i; 6796 6797 if (!ha->flags.fw_started) 6798 return; 6799 6800 while ((ha->tgt.atio_ring_ptr->signature != ATIO_PROCESSED) || 6801 fcpcmd_is_corrupted(ha->tgt.atio_ring_ptr)) { 6802 pkt = (struct atio_from_isp *)ha->tgt.atio_ring_ptr; 6803 cnt = pkt->u.raw.entry_count; 6804 6805 if (unlikely(fcpcmd_is_corrupted(ha->tgt.atio_ring_ptr))) { 6806 /* 6807 * This packet is corrupted. The header + payload 6808 * can not be trusted. There is no point in passing 6809 * it further up. 6810 */ 6811 ql_log(ql_log_warn, vha, 0xd03c, 6812 "corrupted fcp frame SID[%3phN] OXID[%04x] EXCG[%x] %64phN\n", 6813 &pkt->u.isp24.fcp_hdr.s_id, 6814 be16_to_cpu(pkt->u.isp24.fcp_hdr.ox_id), 6815 pkt->u.isp24.exchange_addr, pkt); 6816 6817 adjust_corrupted_atio(pkt); 6818 qlt_send_term_exchange(ha->base_qpair, NULL, pkt, 6819 ha_locked, 0); 6820 } else { 6821 qlt_24xx_atio_pkt_all_vps(vha, 6822 (struct atio_from_isp *)pkt, ha_locked); 6823 } 6824 6825 for (i = 0; i < cnt; i++) { 6826 ha->tgt.atio_ring_index++; 6827 if (ha->tgt.atio_ring_index == ha->tgt.atio_q_length) { 6828 ha->tgt.atio_ring_index = 0; 6829 ha->tgt.atio_ring_ptr = ha->tgt.atio_ring; 6830 } else 6831 ha->tgt.atio_ring_ptr++; 6832 6833 pkt->u.raw.signature = cpu_to_le32(ATIO_PROCESSED); 6834 pkt = (struct atio_from_isp *)ha->tgt.atio_ring_ptr; 6835 } 6836 wmb(); 6837 } 6838 6839 /* Adjust ring index */ 6840 wrt_reg_dword(ISP_ATIO_Q_OUT(vha), ha->tgt.atio_ring_index); 6841 } 6842 6843 void 6844 qlt_24xx_config_rings(struct scsi_qla_host *vha) 6845 { 6846 struct qla_hw_data *ha = vha->hw; 6847 struct qla_msix_entry *msix = &ha->msix_entries[2]; 6848 struct init_cb_24xx *icb = (struct init_cb_24xx *)ha->init_cb; 6849 6850 if (!QLA_TGT_MODE_ENABLED()) 6851 return; 6852 6853 wrt_reg_dword(ISP_ATIO_Q_IN(vha), 0); 6854 wrt_reg_dword(ISP_ATIO_Q_OUT(vha), 0); 6855 rd_reg_dword(ISP_ATIO_Q_OUT(vha)); 6856 6857 if (ha->flags.msix_enabled) { 6858 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) { 6859 icb->msix_atio = cpu_to_le16(msix->entry); 6860 icb->firmware_options_2 &= cpu_to_le32(~BIT_26); 6861 ql_dbg(ql_dbg_init, vha, 0xf072, 6862 "Registering ICB vector 0x%x for atio que.\n", 6863 msix->entry); 6864 } 6865 } else { 6866 /* INTx|MSI */ 6867 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) { 6868 icb->msix_atio = 0; 6869 icb->firmware_options_2 |= cpu_to_le32(BIT_26); 6870 ql_dbg(ql_dbg_init, vha, 0xf072, 6871 "%s: Use INTx for ATIOQ.\n", __func__); 6872 } 6873 } 6874 } 6875 6876 void 6877 qlt_24xx_config_nvram_stage1(struct scsi_qla_host *vha, struct nvram_24xx *nv) 6878 { 6879 struct qla_hw_data *ha = vha->hw; 6880 u32 tmp; 6881 6882 if (!QLA_TGT_MODE_ENABLED()) 6883 return; 6884 6885 if (qla_tgt_mode_enabled(vha) || qla_dual_mode_enabled(vha)) { 6886 if (!ha->tgt.saved_set) { 6887 /* We save only once */ 6888 ha->tgt.saved_exchange_count = nv->exchange_count; 6889 ha->tgt.saved_firmware_options_1 = 6890 nv->firmware_options_1; 6891 ha->tgt.saved_firmware_options_2 = 6892 nv->firmware_options_2; 6893 ha->tgt.saved_firmware_options_3 = 6894 nv->firmware_options_3; 6895 ha->tgt.saved_set = 1; 6896 } 6897 6898 if (qla_tgt_mode_enabled(vha)) 6899 nv->exchange_count = cpu_to_le16(0xFFFF); 6900 else /* dual */ 6901 nv->exchange_count = cpu_to_le16(vha->ql2xexchoffld); 6902 6903 /* Enable target mode */ 6904 nv->firmware_options_1 |= cpu_to_le32(BIT_4); 6905 6906 /* Disable ini mode, if requested */ 6907 if (qla_tgt_mode_enabled(vha)) 6908 nv->firmware_options_1 |= cpu_to_le32(BIT_5); 6909 6910 /* Disable Full Login after LIP */ 6911 nv->firmware_options_1 &= cpu_to_le32(~BIT_13); 6912 /* Enable initial LIP */ 6913 nv->firmware_options_1 &= cpu_to_le32(~BIT_9); 6914 if (ql2xtgt_tape_enable) 6915 /* Enable FC Tape support */ 6916 nv->firmware_options_2 |= cpu_to_le32(BIT_12); 6917 else 6918 /* Disable FC Tape support */ 6919 nv->firmware_options_2 &= cpu_to_le32(~BIT_12); 6920 6921 /* Disable Full Login after LIP */ 6922 nv->host_p &= cpu_to_le32(~BIT_10); 6923 6924 /* 6925 * clear BIT 15 explicitly as we have seen at least 6926 * a couple of instances where this was set and this 6927 * was causing the firmware to not be initialized. 6928 */ 6929 nv->firmware_options_1 &= cpu_to_le32(~BIT_15); 6930 /* Enable target PRLI control */ 6931 nv->firmware_options_2 |= cpu_to_le32(BIT_14); 6932 6933 if (IS_QLA25XX(ha)) { 6934 /* Change Loop-prefer to Pt-Pt */ 6935 tmp = ~(BIT_4|BIT_5|BIT_6); 6936 nv->firmware_options_2 &= cpu_to_le32(tmp); 6937 tmp = P2P << 4; 6938 nv->firmware_options_2 |= cpu_to_le32(tmp); 6939 } 6940 } else { 6941 if (ha->tgt.saved_set) { 6942 nv->exchange_count = ha->tgt.saved_exchange_count; 6943 nv->firmware_options_1 = 6944 ha->tgt.saved_firmware_options_1; 6945 nv->firmware_options_2 = 6946 ha->tgt.saved_firmware_options_2; 6947 nv->firmware_options_3 = 6948 ha->tgt.saved_firmware_options_3; 6949 } 6950 return; 6951 } 6952 6953 if (ha->base_qpair->enable_class_2) { 6954 if (vha->flags.init_done) 6955 fc_host_supported_classes(vha->host) = 6956 FC_COS_CLASS2 | FC_COS_CLASS3; 6957 6958 nv->firmware_options_2 |= cpu_to_le32(BIT_8); 6959 } else { 6960 if (vha->flags.init_done) 6961 fc_host_supported_classes(vha->host) = FC_COS_CLASS3; 6962 6963 nv->firmware_options_2 &= ~cpu_to_le32(BIT_8); 6964 } 6965 } 6966 6967 void 6968 qlt_24xx_config_nvram_stage2(struct scsi_qla_host *vha, 6969 struct init_cb_24xx *icb) 6970 { 6971 struct qla_hw_data *ha = vha->hw; 6972 6973 if (!QLA_TGT_MODE_ENABLED()) 6974 return; 6975 6976 if (ha->tgt.node_name_set) { 6977 memcpy(icb->node_name, ha->tgt.tgt_node_name, WWN_SIZE); 6978 icb->firmware_options_1 |= cpu_to_le32(BIT_14); 6979 } 6980 } 6981 6982 void 6983 qlt_81xx_config_nvram_stage1(struct scsi_qla_host *vha, struct nvram_81xx *nv) 6984 { 6985 struct qla_hw_data *ha = vha->hw; 6986 u32 tmp; 6987 6988 if (!QLA_TGT_MODE_ENABLED()) 6989 return; 6990 6991 if (qla_tgt_mode_enabled(vha) || qla_dual_mode_enabled(vha)) { 6992 if (!ha->tgt.saved_set) { 6993 /* We save only once */ 6994 ha->tgt.saved_exchange_count = nv->exchange_count; 6995 ha->tgt.saved_firmware_options_1 = 6996 nv->firmware_options_1; 6997 ha->tgt.saved_firmware_options_2 = 6998 nv->firmware_options_2; 6999 ha->tgt.saved_firmware_options_3 = 7000 nv->firmware_options_3; 7001 ha->tgt.saved_set = 1; 7002 } 7003 7004 if (qla_tgt_mode_enabled(vha)) 7005 nv->exchange_count = cpu_to_le16(0xFFFF); 7006 else /* dual */ 7007 nv->exchange_count = cpu_to_le16(vha->ql2xexchoffld); 7008 7009 /* Enable target mode */ 7010 nv->firmware_options_1 |= cpu_to_le32(BIT_4); 7011 7012 /* Disable ini mode, if requested */ 7013 if (qla_tgt_mode_enabled(vha)) 7014 nv->firmware_options_1 |= cpu_to_le32(BIT_5); 7015 /* Disable Full Login after LIP */ 7016 nv->firmware_options_1 &= cpu_to_le32(~BIT_13); 7017 /* Enable initial LIP */ 7018 nv->firmware_options_1 &= cpu_to_le32(~BIT_9); 7019 /* 7020 * clear BIT 15 explicitly as we have seen at 7021 * least a couple of instances where this was set 7022 * and this was causing the firmware to not be 7023 * initialized. 7024 */ 7025 nv->firmware_options_1 &= cpu_to_le32(~BIT_15); 7026 if (ql2xtgt_tape_enable) 7027 /* Enable FC tape support */ 7028 nv->firmware_options_2 |= cpu_to_le32(BIT_12); 7029 else 7030 /* Disable FC tape support */ 7031 nv->firmware_options_2 &= cpu_to_le32(~BIT_12); 7032 7033 /* Disable Full Login after LIP */ 7034 nv->host_p &= cpu_to_le32(~BIT_10); 7035 /* Enable target PRLI control */ 7036 nv->firmware_options_2 |= cpu_to_le32(BIT_14); 7037 7038 /* Change Loop-prefer to Pt-Pt */ 7039 tmp = ~(BIT_4|BIT_5|BIT_6); 7040 nv->firmware_options_2 &= cpu_to_le32(tmp); 7041 tmp = P2P << 4; 7042 nv->firmware_options_2 |= cpu_to_le32(tmp); 7043 } else { 7044 if (ha->tgt.saved_set) { 7045 nv->exchange_count = ha->tgt.saved_exchange_count; 7046 nv->firmware_options_1 = 7047 ha->tgt.saved_firmware_options_1; 7048 nv->firmware_options_2 = 7049 ha->tgt.saved_firmware_options_2; 7050 nv->firmware_options_3 = 7051 ha->tgt.saved_firmware_options_3; 7052 } 7053 return; 7054 } 7055 7056 if (ha->base_qpair->enable_class_2) { 7057 if (vha->flags.init_done) 7058 fc_host_supported_classes(vha->host) = 7059 FC_COS_CLASS2 | FC_COS_CLASS3; 7060 7061 nv->firmware_options_2 |= cpu_to_le32(BIT_8); 7062 } else { 7063 if (vha->flags.init_done) 7064 fc_host_supported_classes(vha->host) = FC_COS_CLASS3; 7065 7066 nv->firmware_options_2 &= ~cpu_to_le32(BIT_8); 7067 } 7068 } 7069 7070 void 7071 qlt_81xx_config_nvram_stage2(struct scsi_qla_host *vha, 7072 struct init_cb_81xx *icb) 7073 { 7074 struct qla_hw_data *ha = vha->hw; 7075 7076 if (!QLA_TGT_MODE_ENABLED()) 7077 return; 7078 7079 if (ha->tgt.node_name_set) { 7080 memcpy(icb->node_name, ha->tgt.tgt_node_name, WWN_SIZE); 7081 icb->firmware_options_1 |= cpu_to_le32(BIT_14); 7082 } 7083 } 7084 7085 void 7086 qlt_83xx_iospace_config(struct qla_hw_data *ha) 7087 { 7088 if (!QLA_TGT_MODE_ENABLED()) 7089 return; 7090 7091 ha->msix_count += 1; /* For ATIO Q */ 7092 } 7093 7094 7095 void 7096 qlt_modify_vp_config(struct scsi_qla_host *vha, 7097 struct vp_config_entry_24xx *vpmod) 7098 { 7099 /* enable target mode. Bit5 = 1 => disable */ 7100 if (qla_tgt_mode_enabled(vha) || qla_dual_mode_enabled(vha)) 7101 vpmod->options_idx1 &= ~BIT_5; 7102 7103 /* Disable ini mode, if requested. bit4 = 1 => disable */ 7104 if (qla_tgt_mode_enabled(vha)) 7105 vpmod->options_idx1 &= ~BIT_4; 7106 } 7107 7108 void 7109 qlt_probe_one_stage1(struct scsi_qla_host *base_vha, struct qla_hw_data *ha) 7110 { 7111 mutex_init(&base_vha->vha_tgt.tgt_mutex); 7112 if (!QLA_TGT_MODE_ENABLED()) 7113 return; 7114 7115 if (ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) { 7116 ISP_ATIO_Q_IN(base_vha) = &ha->mqiobase->isp25mq.atio_q_in; 7117 ISP_ATIO_Q_OUT(base_vha) = &ha->mqiobase->isp25mq.atio_q_out; 7118 } else { 7119 ISP_ATIO_Q_IN(base_vha) = &ha->iobase->isp24.atio_q_in; 7120 ISP_ATIO_Q_OUT(base_vha) = &ha->iobase->isp24.atio_q_out; 7121 } 7122 7123 mutex_init(&base_vha->vha_tgt.tgt_host_action_mutex); 7124 7125 INIT_LIST_HEAD(&base_vha->unknown_atio_list); 7126 INIT_DELAYED_WORK(&base_vha->unknown_atio_work, 7127 qlt_unknown_atio_work_fn); 7128 7129 qlt_clear_mode(base_vha); 7130 7131 qla_update_vp_map(base_vha, SET_VP_IDX); 7132 } 7133 7134 irqreturn_t 7135 qla83xx_msix_atio_q(int irq, void *dev_id) 7136 { 7137 struct rsp_que *rsp; 7138 scsi_qla_host_t *vha; 7139 struct qla_hw_data *ha; 7140 unsigned long flags; 7141 7142 rsp = (struct rsp_que *) dev_id; 7143 ha = rsp->hw; 7144 vha = pci_get_drvdata(ha->pdev); 7145 7146 spin_lock_irqsave(&ha->tgt.atio_lock, flags); 7147 7148 qlt_24xx_process_atio_queue(vha, 0); 7149 7150 spin_unlock_irqrestore(&ha->tgt.atio_lock, flags); 7151 7152 return IRQ_HANDLED; 7153 } 7154 7155 static void 7156 qlt_handle_abts_recv_work(struct work_struct *work) 7157 { 7158 struct qla_tgt_sess_op *op = container_of(work, 7159 struct qla_tgt_sess_op, work); 7160 scsi_qla_host_t *vha = op->vha; 7161 struct qla_hw_data *ha = vha->hw; 7162 unsigned long flags; 7163 7164 if (qla2x00_reset_active(vha) || 7165 (op->chip_reset != ha->base_qpair->chip_reset)) 7166 return; 7167 7168 spin_lock_irqsave(&ha->tgt.atio_lock, flags); 7169 qlt_24xx_process_atio_queue(vha, 0); 7170 spin_unlock_irqrestore(&ha->tgt.atio_lock, flags); 7171 7172 spin_lock_irqsave(&ha->hardware_lock, flags); 7173 qlt_response_pkt_all_vps(vha, op->rsp, (response_t *)&op->atio); 7174 spin_unlock_irqrestore(&ha->hardware_lock, flags); 7175 7176 kfree(op); 7177 } 7178 7179 void 7180 qlt_handle_abts_recv(struct scsi_qla_host *vha, struct rsp_que *rsp, 7181 response_t *pkt) 7182 { 7183 struct qla_tgt_sess_op *op; 7184 7185 op = kzalloc(sizeof(*op), GFP_ATOMIC); 7186 7187 if (!op) { 7188 /* do not reach for ATIO queue here. This is best effort err 7189 * recovery at this point. 7190 */ 7191 qlt_response_pkt_all_vps(vha, rsp, pkt); 7192 return; 7193 } 7194 7195 memcpy(&op->atio, pkt, sizeof(*pkt)); 7196 op->vha = vha; 7197 op->chip_reset = vha->hw->base_qpair->chip_reset; 7198 op->rsp = rsp; 7199 INIT_WORK(&op->work, qlt_handle_abts_recv_work); 7200 queue_work(qla_tgt_wq, &op->work); 7201 return; 7202 } 7203 7204 int 7205 qlt_mem_alloc(struct qla_hw_data *ha) 7206 { 7207 if (!QLA_TGT_MODE_ENABLED()) 7208 return 0; 7209 7210 ha->tgt.atio_ring = dma_alloc_coherent(&ha->pdev->dev, 7211 (ha->tgt.atio_q_length + 1) * sizeof(struct atio_from_isp), 7212 &ha->tgt.atio_dma, GFP_KERNEL); 7213 if (!ha->tgt.atio_ring) { 7214 return -ENOMEM; 7215 } 7216 return 0; 7217 } 7218 7219 void 7220 qlt_mem_free(struct qla_hw_data *ha) 7221 { 7222 if (!QLA_TGT_MODE_ENABLED()) 7223 return; 7224 7225 if (ha->tgt.atio_ring) { 7226 dma_free_coherent(&ha->pdev->dev, (ha->tgt.atio_q_length + 1) * 7227 sizeof(struct atio_from_isp), ha->tgt.atio_ring, 7228 ha->tgt.atio_dma); 7229 } 7230 ha->tgt.atio_ring = NULL; 7231 ha->tgt.atio_dma = 0; 7232 } 7233 7234 static int __init qlt_parse_ini_mode(void) 7235 { 7236 if (strcasecmp(qlini_mode, QLA2XXX_INI_MODE_STR_EXCLUSIVE) == 0) 7237 ql2x_ini_mode = QLA2XXX_INI_MODE_EXCLUSIVE; 7238 else if (strcasecmp(qlini_mode, QLA2XXX_INI_MODE_STR_DISABLED) == 0) 7239 ql2x_ini_mode = QLA2XXX_INI_MODE_DISABLED; 7240 else if (strcasecmp(qlini_mode, QLA2XXX_INI_MODE_STR_ENABLED) == 0) 7241 ql2x_ini_mode = QLA2XXX_INI_MODE_ENABLED; 7242 else if (strcasecmp(qlini_mode, QLA2XXX_INI_MODE_STR_DUAL) == 0) 7243 ql2x_ini_mode = QLA2XXX_INI_MODE_DUAL; 7244 else 7245 return false; 7246 7247 return true; 7248 } 7249 7250 int __init qlt_init(void) 7251 { 7252 int ret; 7253 7254 BUILD_BUG_ON(sizeof(struct ctio7_to_24xx) != 64); 7255 BUILD_BUG_ON(sizeof(struct ctio_to_2xxx) != 64); 7256 7257 if (!qlt_parse_ini_mode()) { 7258 ql_log(ql_log_fatal, NULL, 0xe06b, 7259 "qlt_parse_ini_mode() failed\n"); 7260 return -EINVAL; 7261 } 7262 7263 if (!QLA_TGT_MODE_ENABLED()) 7264 return 0; 7265 7266 qla_tgt_mgmt_cmd_cachep = kmem_cache_create("qla_tgt_mgmt_cmd_cachep", 7267 sizeof(struct qla_tgt_mgmt_cmd), __alignof__(struct 7268 qla_tgt_mgmt_cmd), 0, NULL); 7269 if (!qla_tgt_mgmt_cmd_cachep) { 7270 ql_log(ql_log_fatal, NULL, 0xd04b, 7271 "kmem_cache_create for qla_tgt_mgmt_cmd_cachep failed\n"); 7272 return -ENOMEM; 7273 } 7274 7275 qla_tgt_plogi_cachep = kmem_cache_create("qla_tgt_plogi_cachep", 7276 sizeof(struct qlt_plogi_ack_t), __alignof__(struct qlt_plogi_ack_t), 7277 0, NULL); 7278 7279 if (!qla_tgt_plogi_cachep) { 7280 ql_log(ql_log_fatal, NULL, 0xe06d, 7281 "kmem_cache_create for qla_tgt_plogi_cachep failed\n"); 7282 ret = -ENOMEM; 7283 goto out_mgmt_cmd_cachep; 7284 } 7285 7286 qla_tgt_mgmt_cmd_mempool = mempool_create(25, mempool_alloc_slab, 7287 mempool_free_slab, qla_tgt_mgmt_cmd_cachep); 7288 if (!qla_tgt_mgmt_cmd_mempool) { 7289 ql_log(ql_log_fatal, NULL, 0xe06e, 7290 "mempool_create for qla_tgt_mgmt_cmd_mempool failed\n"); 7291 ret = -ENOMEM; 7292 goto out_plogi_cachep; 7293 } 7294 7295 qla_tgt_wq = alloc_workqueue("qla_tgt_wq", 0, 0); 7296 if (!qla_tgt_wq) { 7297 ql_log(ql_log_fatal, NULL, 0xe06f, 7298 "alloc_workqueue for qla_tgt_wq failed\n"); 7299 ret = -ENOMEM; 7300 goto out_cmd_mempool; 7301 } 7302 /* 7303 * Return 1 to signal that initiator-mode is being disabled 7304 */ 7305 return (ql2x_ini_mode == QLA2XXX_INI_MODE_DISABLED) ? 1 : 0; 7306 7307 out_cmd_mempool: 7308 mempool_destroy(qla_tgt_mgmt_cmd_mempool); 7309 out_plogi_cachep: 7310 kmem_cache_destroy(qla_tgt_plogi_cachep); 7311 out_mgmt_cmd_cachep: 7312 kmem_cache_destroy(qla_tgt_mgmt_cmd_cachep); 7313 return ret; 7314 } 7315 7316 void qlt_exit(void) 7317 { 7318 if (!QLA_TGT_MODE_ENABLED()) 7319 return; 7320 7321 destroy_workqueue(qla_tgt_wq); 7322 mempool_destroy(qla_tgt_mgmt_cmd_mempool); 7323 kmem_cache_destroy(qla_tgt_plogi_cachep); 7324 kmem_cache_destroy(qla_tgt_mgmt_cmd_cachep); 7325 } 7326