1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * qla_target.c SCSI LLD infrastructure for QLogic 22xx/23xx/24xx/25xx 4 * 5 * based on qla2x00t.c code: 6 * 7 * Copyright (C) 2004 - 2010 Vladislav Bolkhovitin <vst@vlnb.net> 8 * Copyright (C) 2004 - 2005 Leonid Stoljar 9 * Copyright (C) 2006 Nathaniel Clark <nate@misrule.us> 10 * Copyright (C) 2006 - 2010 ID7 Ltd. 11 * 12 * Forward port and refactoring to modern qla2xxx and target/configfs 13 * 14 * Copyright (C) 2010-2013 Nicholas A. Bellinger <nab@kernel.org> 15 */ 16 17 #include <linux/module.h> 18 #include <linux/init.h> 19 #include <linux/types.h> 20 #include <linux/blkdev.h> 21 #include <linux/interrupt.h> 22 #include <linux/pci.h> 23 #include <linux/delay.h> 24 #include <linux/list.h> 25 #include <linux/workqueue.h> 26 #include <asm/unaligned.h> 27 #include <scsi/scsi.h> 28 #include <scsi/scsi_host.h> 29 #include <scsi/scsi_tcq.h> 30 31 #include "qla_def.h" 32 #include "qla_target.h" 33 34 static int ql2xtgt_tape_enable; 35 module_param(ql2xtgt_tape_enable, int, S_IRUGO|S_IWUSR); 36 MODULE_PARM_DESC(ql2xtgt_tape_enable, 37 "Enables Sequence level error recovery (aka FC Tape). Default is 0 - no SLER. 1 - Enable SLER."); 38 39 static char *qlini_mode = QLA2XXX_INI_MODE_STR_ENABLED; 40 module_param(qlini_mode, charp, S_IRUGO); 41 MODULE_PARM_DESC(qlini_mode, 42 "Determines when initiator mode will be enabled. Possible values: " 43 "\"exclusive\" - initiator mode will be enabled on load, " 44 "disabled on enabling target mode and then on disabling target mode " 45 "enabled back; " 46 "\"disabled\" - initiator mode will never be enabled; " 47 "\"dual\" - Initiator Modes will be enabled. Target Mode can be activated " 48 "when ready " 49 "\"enabled\" (default) - initiator mode will always stay enabled."); 50 51 static int ql_dm_tgt_ex_pct = 0; 52 module_param(ql_dm_tgt_ex_pct, int, S_IRUGO|S_IWUSR); 53 MODULE_PARM_DESC(ql_dm_tgt_ex_pct, 54 "For Dual Mode (qlini_mode=dual), this parameter determines " 55 "the percentage of exchanges/cmds FW will allocate resources " 56 "for Target mode."); 57 58 int ql2xuctrlirq = 1; 59 module_param(ql2xuctrlirq, int, 0644); 60 MODULE_PARM_DESC(ql2xuctrlirq, 61 "User to control IRQ placement via smp_affinity." 62 "Valid with qlini_mode=disabled." 63 "1(default): enable"); 64 65 int ql2x_ini_mode = QLA2XXX_INI_MODE_EXCLUSIVE; 66 67 static int qla_sam_status = SAM_STAT_BUSY; 68 static int tc_sam_status = SAM_STAT_TASK_SET_FULL; /* target core */ 69 70 /* 71 * From scsi/fc/fc_fcp.h 72 */ 73 enum fcp_resp_rsp_codes { 74 FCP_TMF_CMPL = 0, 75 FCP_DATA_LEN_INVALID = 1, 76 FCP_CMND_FIELDS_INVALID = 2, 77 FCP_DATA_PARAM_MISMATCH = 3, 78 FCP_TMF_REJECTED = 4, 79 FCP_TMF_FAILED = 5, 80 FCP_TMF_INVALID_LUN = 9, 81 }; 82 83 /* 84 * fc_pri_ta from scsi/fc/fc_fcp.h 85 */ 86 #define FCP_PTA_SIMPLE 0 /* simple task attribute */ 87 #define FCP_PTA_HEADQ 1 /* head of queue task attribute */ 88 #define FCP_PTA_ORDERED 2 /* ordered task attribute */ 89 #define FCP_PTA_ACA 4 /* auto. contingent allegiance */ 90 #define FCP_PTA_MASK 7 /* mask for task attribute field */ 91 #define FCP_PRI_SHIFT 3 /* priority field starts in bit 3 */ 92 #define FCP_PRI_RESVD_MASK 0x80 /* reserved bits in priority field */ 93 94 /* 95 * This driver calls qla2x00_alloc_iocbs() and qla2x00_issue_marker(), which 96 * must be called under HW lock and could unlock/lock it inside. 97 * It isn't an issue, since in the current implementation on the time when 98 * those functions are called: 99 * 100 * - Either context is IRQ and only IRQ handler can modify HW data, 101 * including rings related fields, 102 * 103 * - Or access to target mode variables from struct qla_tgt doesn't 104 * cross those functions boundaries, except tgt_stop, which 105 * additionally protected by irq_cmd_count. 106 */ 107 /* Predefs for callbacks handed to qla2xxx LLD */ 108 static void qlt_24xx_atio_pkt(struct scsi_qla_host *ha, 109 struct atio_from_isp *pkt, uint8_t); 110 static void qlt_response_pkt(struct scsi_qla_host *ha, struct rsp_que *rsp, 111 response_t *pkt); 112 static int qlt_issue_task_mgmt(struct fc_port *sess, u64 lun, 113 int fn, void *iocb, int flags); 114 static void qlt_send_term_exchange(struct qla_qpair *, struct qla_tgt_cmd 115 *cmd, struct atio_from_isp *atio, int ha_locked, int ul_abort); 116 static void qlt_alloc_qfull_cmd(struct scsi_qla_host *vha, 117 struct atio_from_isp *atio, uint16_t status, int qfull); 118 static void qlt_disable_vha(struct scsi_qla_host *vha); 119 static void qlt_clear_tgt_db(struct qla_tgt *tgt); 120 static void qlt_send_notify_ack(struct qla_qpair *qpair, 121 struct imm_ntfy_from_isp *ntfy, 122 uint32_t add_flags, uint16_t resp_code, int resp_code_valid, 123 uint16_t srr_flags, uint16_t srr_reject_code, uint8_t srr_explan); 124 static void qlt_send_term_imm_notif(struct scsi_qla_host *vha, 125 struct imm_ntfy_from_isp *imm, int ha_locked); 126 static struct fc_port *qlt_create_sess(struct scsi_qla_host *vha, 127 fc_port_t *fcport, bool local); 128 void qlt_unreg_sess(struct fc_port *sess); 129 static void qlt_24xx_handle_abts(struct scsi_qla_host *, 130 struct abts_recv_from_24xx *); 131 static void qlt_send_busy(struct qla_qpair *, struct atio_from_isp *, 132 uint16_t); 133 static int qlt_check_reserve_free_req(struct qla_qpair *qpair, uint32_t); 134 static inline uint32_t qlt_make_handle(struct qla_qpair *); 135 136 /* 137 * Global Variables 138 */ 139 static struct kmem_cache *qla_tgt_mgmt_cmd_cachep; 140 struct kmem_cache *qla_tgt_plogi_cachep; 141 static mempool_t *qla_tgt_mgmt_cmd_mempool; 142 static struct workqueue_struct *qla_tgt_wq; 143 static DEFINE_MUTEX(qla_tgt_mutex); 144 static LIST_HEAD(qla_tgt_glist); 145 146 static const char *prot_op_str(u32 prot_op) 147 { 148 switch (prot_op) { 149 case TARGET_PROT_NORMAL: return "NORMAL"; 150 case TARGET_PROT_DIN_INSERT: return "DIN_INSERT"; 151 case TARGET_PROT_DOUT_INSERT: return "DOUT_INSERT"; 152 case TARGET_PROT_DIN_STRIP: return "DIN_STRIP"; 153 case TARGET_PROT_DOUT_STRIP: return "DOUT_STRIP"; 154 case TARGET_PROT_DIN_PASS: return "DIN_PASS"; 155 case TARGET_PROT_DOUT_PASS: return "DOUT_PASS"; 156 default: return "UNKNOWN"; 157 } 158 } 159 160 /* This API intentionally takes dest as a parameter, rather than returning 161 * int value to avoid caller forgetting to issue wmb() after the store */ 162 void qlt_do_generation_tick(struct scsi_qla_host *vha, int *dest) 163 { 164 scsi_qla_host_t *base_vha = pci_get_drvdata(vha->hw->pdev); 165 *dest = atomic_inc_return(&base_vha->generation_tick); 166 /* memory barrier */ 167 wmb(); 168 } 169 170 /* Might release hw lock, then reaquire!! */ 171 static inline int qlt_issue_marker(struct scsi_qla_host *vha, int vha_locked) 172 { 173 /* Send marker if required */ 174 if (unlikely(vha->marker_needed != 0)) { 175 int rc = qla2x00_issue_marker(vha, vha_locked); 176 177 if (rc != QLA_SUCCESS) { 178 ql_dbg(ql_dbg_tgt, vha, 0xe03d, 179 "qla_target(%d): issue_marker() failed\n", 180 vha->vp_idx); 181 } 182 return rc; 183 } 184 return QLA_SUCCESS; 185 } 186 187 static inline 188 struct scsi_qla_host *qlt_find_host_by_d_id(struct scsi_qla_host *vha, 189 be_id_t d_id) 190 { 191 struct scsi_qla_host *host; 192 uint32_t key; 193 194 if (vha->d_id.b.area == d_id.area && 195 vha->d_id.b.domain == d_id.domain && 196 vha->d_id.b.al_pa == d_id.al_pa) 197 return vha; 198 199 key = be_to_port_id(d_id).b24; 200 201 host = btree_lookup32(&vha->hw->tgt.host_map, key); 202 if (!host) 203 ql_dbg(ql_dbg_tgt_mgt + ql_dbg_verbose, vha, 0xf005, 204 "Unable to find host %06x\n", key); 205 206 return host; 207 } 208 209 static inline 210 struct scsi_qla_host *qlt_find_host_by_vp_idx(struct scsi_qla_host *vha, 211 uint16_t vp_idx) 212 { 213 struct qla_hw_data *ha = vha->hw; 214 215 if (vha->vp_idx == vp_idx) 216 return vha; 217 218 BUG_ON(ha->tgt.tgt_vp_map == NULL); 219 if (likely(test_bit(vp_idx, ha->vp_idx_map))) 220 return ha->tgt.tgt_vp_map[vp_idx].vha; 221 222 return NULL; 223 } 224 225 static inline void qlt_incr_num_pend_cmds(struct scsi_qla_host *vha) 226 { 227 unsigned long flags; 228 229 spin_lock_irqsave(&vha->hw->tgt.q_full_lock, flags); 230 231 vha->hw->tgt.num_pend_cmds++; 232 if (vha->hw->tgt.num_pend_cmds > vha->qla_stats.stat_max_pend_cmds) 233 vha->qla_stats.stat_max_pend_cmds = 234 vha->hw->tgt.num_pend_cmds; 235 spin_unlock_irqrestore(&vha->hw->tgt.q_full_lock, flags); 236 } 237 static inline void qlt_decr_num_pend_cmds(struct scsi_qla_host *vha) 238 { 239 unsigned long flags; 240 241 spin_lock_irqsave(&vha->hw->tgt.q_full_lock, flags); 242 vha->hw->tgt.num_pend_cmds--; 243 spin_unlock_irqrestore(&vha->hw->tgt.q_full_lock, flags); 244 } 245 246 247 static void qlt_queue_unknown_atio(scsi_qla_host_t *vha, 248 struct atio_from_isp *atio, uint8_t ha_locked) 249 { 250 struct qla_tgt_sess_op *u; 251 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; 252 unsigned long flags; 253 254 if (tgt->tgt_stop) { 255 ql_dbg(ql_dbg_async, vha, 0x502c, 256 "qla_target(%d): dropping unknown ATIO_TYPE7, because tgt is being stopped", 257 vha->vp_idx); 258 goto out_term; 259 } 260 261 u = kzalloc(sizeof(*u), GFP_ATOMIC); 262 if (u == NULL) 263 goto out_term; 264 265 u->vha = vha; 266 memcpy(&u->atio, atio, sizeof(*atio)); 267 INIT_LIST_HEAD(&u->cmd_list); 268 269 spin_lock_irqsave(&vha->cmd_list_lock, flags); 270 list_add_tail(&u->cmd_list, &vha->unknown_atio_list); 271 spin_unlock_irqrestore(&vha->cmd_list_lock, flags); 272 273 schedule_delayed_work(&vha->unknown_atio_work, 1); 274 275 out: 276 return; 277 278 out_term: 279 qlt_send_term_exchange(vha->hw->base_qpair, NULL, atio, ha_locked, 0); 280 goto out; 281 } 282 283 static void qlt_try_to_dequeue_unknown_atios(struct scsi_qla_host *vha, 284 uint8_t ha_locked) 285 { 286 struct qla_tgt_sess_op *u, *t; 287 scsi_qla_host_t *host; 288 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; 289 unsigned long flags; 290 uint8_t queued = 0; 291 292 list_for_each_entry_safe(u, t, &vha->unknown_atio_list, cmd_list) { 293 if (u->aborted) { 294 ql_dbg(ql_dbg_async, vha, 0x502e, 295 "Freeing unknown %s %p, because of Abort\n", 296 "ATIO_TYPE7", u); 297 qlt_send_term_exchange(vha->hw->base_qpair, NULL, 298 &u->atio, ha_locked, 0); 299 goto abort; 300 } 301 302 host = qlt_find_host_by_d_id(vha, u->atio.u.isp24.fcp_hdr.d_id); 303 if (host != NULL) { 304 ql_dbg(ql_dbg_async + ql_dbg_verbose, vha, 0x502f, 305 "Requeuing unknown ATIO_TYPE7 %p\n", u); 306 qlt_24xx_atio_pkt(host, &u->atio, ha_locked); 307 } else if (tgt->tgt_stop) { 308 ql_dbg(ql_dbg_async + ql_dbg_verbose, vha, 0x503a, 309 "Freeing unknown %s %p, because tgt is being stopped\n", 310 "ATIO_TYPE7", u); 311 qlt_send_term_exchange(vha->hw->base_qpair, NULL, 312 &u->atio, ha_locked, 0); 313 } else { 314 ql_dbg(ql_dbg_async + ql_dbg_verbose, vha, 0x503d, 315 "Reschedule u %p, vha %p, host %p\n", u, vha, host); 316 if (!queued) { 317 queued = 1; 318 schedule_delayed_work(&vha->unknown_atio_work, 319 1); 320 } 321 continue; 322 } 323 324 abort: 325 spin_lock_irqsave(&vha->cmd_list_lock, flags); 326 list_del(&u->cmd_list); 327 spin_unlock_irqrestore(&vha->cmd_list_lock, flags); 328 kfree(u); 329 } 330 } 331 332 void qlt_unknown_atio_work_fn(struct work_struct *work) 333 { 334 struct scsi_qla_host *vha = container_of(to_delayed_work(work), 335 struct scsi_qla_host, unknown_atio_work); 336 337 qlt_try_to_dequeue_unknown_atios(vha, 0); 338 } 339 340 static bool qlt_24xx_atio_pkt_all_vps(struct scsi_qla_host *vha, 341 struct atio_from_isp *atio, uint8_t ha_locked) 342 { 343 ql_dbg(ql_dbg_tgt, vha, 0xe072, 344 "%s: qla_target(%d): type %x ox_id %04x\n", 345 __func__, vha->vp_idx, atio->u.raw.entry_type, 346 be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id)); 347 348 switch (atio->u.raw.entry_type) { 349 case ATIO_TYPE7: 350 { 351 struct scsi_qla_host *host = qlt_find_host_by_d_id(vha, 352 atio->u.isp24.fcp_hdr.d_id); 353 if (unlikely(NULL == host)) { 354 ql_dbg(ql_dbg_tgt, vha, 0xe03e, 355 "qla_target(%d): Received ATIO_TYPE7 " 356 "with unknown d_id %x:%x:%x\n", vha->vp_idx, 357 atio->u.isp24.fcp_hdr.d_id.domain, 358 atio->u.isp24.fcp_hdr.d_id.area, 359 atio->u.isp24.fcp_hdr.d_id.al_pa); 360 361 362 qlt_queue_unknown_atio(vha, atio, ha_locked); 363 break; 364 } 365 if (unlikely(!list_empty(&vha->unknown_atio_list))) 366 qlt_try_to_dequeue_unknown_atios(vha, ha_locked); 367 368 qlt_24xx_atio_pkt(host, atio, ha_locked); 369 break; 370 } 371 372 case IMMED_NOTIFY_TYPE: 373 { 374 struct scsi_qla_host *host = vha; 375 struct imm_ntfy_from_isp *entry = 376 (struct imm_ntfy_from_isp *)atio; 377 378 qlt_issue_marker(vha, ha_locked); 379 380 if ((entry->u.isp24.vp_index != 0xFF) && 381 (entry->u.isp24.nport_handle != cpu_to_le16(0xFFFF))) { 382 host = qlt_find_host_by_vp_idx(vha, 383 entry->u.isp24.vp_index); 384 if (unlikely(!host)) { 385 ql_dbg(ql_dbg_tgt, vha, 0xe03f, 386 "qla_target(%d): Received " 387 "ATIO (IMMED_NOTIFY_TYPE) " 388 "with unknown vp_index %d\n", 389 vha->vp_idx, entry->u.isp24.vp_index); 390 break; 391 } 392 } 393 qlt_24xx_atio_pkt(host, atio, ha_locked); 394 break; 395 } 396 397 case VP_RPT_ID_IOCB_TYPE: 398 qla24xx_report_id_acquisition(vha, 399 (struct vp_rpt_id_entry_24xx *)atio); 400 break; 401 402 case ABTS_RECV_24XX: 403 { 404 struct abts_recv_from_24xx *entry = 405 (struct abts_recv_from_24xx *)atio; 406 struct scsi_qla_host *host = qlt_find_host_by_vp_idx(vha, 407 entry->vp_index); 408 unsigned long flags; 409 410 if (unlikely(!host)) { 411 ql_dbg(ql_dbg_tgt, vha, 0xe00a, 412 "qla_target(%d): Response pkt (ABTS_RECV_24XX) " 413 "received, with unknown vp_index %d\n", 414 vha->vp_idx, entry->vp_index); 415 break; 416 } 417 if (!ha_locked) 418 spin_lock_irqsave(&host->hw->hardware_lock, flags); 419 qlt_24xx_handle_abts(host, (struct abts_recv_from_24xx *)atio); 420 if (!ha_locked) 421 spin_unlock_irqrestore(&host->hw->hardware_lock, flags); 422 break; 423 } 424 425 /* case PUREX_IOCB_TYPE: ql2xmvasynctoatio */ 426 427 default: 428 ql_dbg(ql_dbg_tgt, vha, 0xe040, 429 "qla_target(%d): Received unknown ATIO atio " 430 "type %x\n", vha->vp_idx, atio->u.raw.entry_type); 431 break; 432 } 433 434 return false; 435 } 436 437 void qlt_response_pkt_all_vps(struct scsi_qla_host *vha, 438 struct rsp_que *rsp, response_t *pkt) 439 { 440 switch (pkt->entry_type) { 441 case CTIO_CRC2: 442 ql_dbg(ql_dbg_tgt, vha, 0xe073, 443 "qla_target(%d):%s: CRC2 Response pkt\n", 444 vha->vp_idx, __func__); 445 fallthrough; 446 case CTIO_TYPE7: 447 { 448 struct ctio7_from_24xx *entry = (struct ctio7_from_24xx *)pkt; 449 struct scsi_qla_host *host = qlt_find_host_by_vp_idx(vha, 450 entry->vp_index); 451 if (unlikely(!host)) { 452 ql_dbg(ql_dbg_tgt, vha, 0xe041, 453 "qla_target(%d): Response pkt (CTIO_TYPE7) " 454 "received, with unknown vp_index %d\n", 455 vha->vp_idx, entry->vp_index); 456 break; 457 } 458 qlt_response_pkt(host, rsp, pkt); 459 break; 460 } 461 462 case IMMED_NOTIFY_TYPE: 463 { 464 struct scsi_qla_host *host; 465 struct imm_ntfy_from_isp *entry = 466 (struct imm_ntfy_from_isp *)pkt; 467 468 host = qlt_find_host_by_vp_idx(vha, entry->u.isp24.vp_index); 469 if (unlikely(!host)) { 470 ql_dbg(ql_dbg_tgt, vha, 0xe042, 471 "qla_target(%d): Response pkt (IMMED_NOTIFY_TYPE) " 472 "received, with unknown vp_index %d\n", 473 vha->vp_idx, entry->u.isp24.vp_index); 474 break; 475 } 476 qlt_response_pkt(host, rsp, pkt); 477 break; 478 } 479 480 case NOTIFY_ACK_TYPE: 481 { 482 struct scsi_qla_host *host = vha; 483 struct nack_to_isp *entry = (struct nack_to_isp *)pkt; 484 485 if (0xFF != entry->u.isp24.vp_index) { 486 host = qlt_find_host_by_vp_idx(vha, 487 entry->u.isp24.vp_index); 488 if (unlikely(!host)) { 489 ql_dbg(ql_dbg_tgt, vha, 0xe043, 490 "qla_target(%d): Response " 491 "pkt (NOTIFY_ACK_TYPE) " 492 "received, with unknown " 493 "vp_index %d\n", vha->vp_idx, 494 entry->u.isp24.vp_index); 495 break; 496 } 497 } 498 qlt_response_pkt(host, rsp, pkt); 499 break; 500 } 501 502 case ABTS_RECV_24XX: 503 { 504 struct abts_recv_from_24xx *entry = 505 (struct abts_recv_from_24xx *)pkt; 506 struct scsi_qla_host *host = qlt_find_host_by_vp_idx(vha, 507 entry->vp_index); 508 if (unlikely(!host)) { 509 ql_dbg(ql_dbg_tgt, vha, 0xe044, 510 "qla_target(%d): Response pkt " 511 "(ABTS_RECV_24XX) received, with unknown " 512 "vp_index %d\n", vha->vp_idx, entry->vp_index); 513 break; 514 } 515 qlt_response_pkt(host, rsp, pkt); 516 break; 517 } 518 519 case ABTS_RESP_24XX: 520 { 521 struct abts_resp_to_24xx *entry = 522 (struct abts_resp_to_24xx *)pkt; 523 struct scsi_qla_host *host = qlt_find_host_by_vp_idx(vha, 524 entry->vp_index); 525 if (unlikely(!host)) { 526 ql_dbg(ql_dbg_tgt, vha, 0xe045, 527 "qla_target(%d): Response pkt " 528 "(ABTS_RECV_24XX) received, with unknown " 529 "vp_index %d\n", vha->vp_idx, entry->vp_index); 530 break; 531 } 532 qlt_response_pkt(host, rsp, pkt); 533 break; 534 } 535 default: 536 qlt_response_pkt(vha, rsp, pkt); 537 break; 538 } 539 540 } 541 542 /* 543 * All qlt_plogi_ack_t operations are protected by hardware_lock 544 */ 545 static int qla24xx_post_nack_work(struct scsi_qla_host *vha, fc_port_t *fcport, 546 struct imm_ntfy_from_isp *ntfy, int type) 547 { 548 struct qla_work_evt *e; 549 550 e = qla2x00_alloc_work(vha, QLA_EVT_NACK); 551 if (!e) 552 return QLA_FUNCTION_FAILED; 553 554 e->u.nack.fcport = fcport; 555 e->u.nack.type = type; 556 memcpy(e->u.nack.iocb, ntfy, sizeof(struct imm_ntfy_from_isp)); 557 return qla2x00_post_work(vha, e); 558 } 559 560 static void qla2x00_async_nack_sp_done(srb_t *sp, int res) 561 { 562 struct scsi_qla_host *vha = sp->vha; 563 unsigned long flags; 564 565 ql_dbg(ql_dbg_disc, vha, 0x20f2, 566 "Async done-%s res %x %8phC type %d\n", 567 sp->name, res, sp->fcport->port_name, sp->type); 568 569 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); 570 sp->fcport->flags &= ~FCF_ASYNC_SENT; 571 sp->fcport->chip_reset = vha->hw->base_qpair->chip_reset; 572 573 switch (sp->type) { 574 case SRB_NACK_PLOGI: 575 sp->fcport->login_gen++; 576 sp->fcport->fw_login_state = DSC_LS_PLOGI_COMP; 577 sp->fcport->logout_on_delete = 1; 578 sp->fcport->plogi_nack_done_deadline = jiffies + HZ; 579 sp->fcport->send_els_logo = 0; 580 break; 581 582 case SRB_NACK_PRLI: 583 sp->fcport->fw_login_state = DSC_LS_PRLI_COMP; 584 sp->fcport->deleted = 0; 585 sp->fcport->send_els_logo = 0; 586 587 if (!sp->fcport->login_succ && 588 !IS_SW_RESV_ADDR(sp->fcport->d_id)) { 589 sp->fcport->login_succ = 1; 590 591 vha->fcport_count++; 592 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); 593 qla24xx_sched_upd_fcport(sp->fcport); 594 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); 595 } else { 596 sp->fcport->login_retry = 0; 597 qla2x00_set_fcport_disc_state(sp->fcport, 598 DSC_LOGIN_COMPLETE); 599 sp->fcport->deleted = 0; 600 sp->fcport->logout_on_delete = 1; 601 } 602 break; 603 604 case SRB_NACK_LOGO: 605 sp->fcport->login_gen++; 606 sp->fcport->fw_login_state = DSC_LS_PORT_UNAVAIL; 607 qlt_logo_completion_handler(sp->fcport, MBS_COMMAND_COMPLETE); 608 break; 609 } 610 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); 611 612 sp->free(sp); 613 } 614 615 int qla24xx_async_notify_ack(scsi_qla_host_t *vha, fc_port_t *fcport, 616 struct imm_ntfy_from_isp *ntfy, int type) 617 { 618 int rval = QLA_FUNCTION_FAILED; 619 srb_t *sp; 620 char *c = NULL; 621 622 fcport->flags |= FCF_ASYNC_SENT; 623 switch (type) { 624 case SRB_NACK_PLOGI: 625 fcport->fw_login_state = DSC_LS_PLOGI_PEND; 626 c = "PLOGI"; 627 break; 628 case SRB_NACK_PRLI: 629 fcport->fw_login_state = DSC_LS_PRLI_PEND; 630 fcport->deleted = 0; 631 c = "PRLI"; 632 break; 633 case SRB_NACK_LOGO: 634 fcport->fw_login_state = DSC_LS_LOGO_PEND; 635 c = "LOGO"; 636 break; 637 } 638 639 sp = qla2x00_get_sp(vha, fcport, GFP_ATOMIC); 640 if (!sp) 641 goto done; 642 643 sp->type = type; 644 sp->name = "nack"; 645 646 sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout; 647 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha)+2); 648 649 sp->u.iocb_cmd.u.nack.ntfy = ntfy; 650 sp->done = qla2x00_async_nack_sp_done; 651 652 ql_dbg(ql_dbg_disc, vha, 0x20f4, 653 "Async-%s %8phC hndl %x %s\n", 654 sp->name, fcport->port_name, sp->handle, c); 655 656 rval = qla2x00_start_sp(sp); 657 if (rval != QLA_SUCCESS) 658 goto done_free_sp; 659 660 return rval; 661 662 done_free_sp: 663 sp->free(sp); 664 done: 665 fcport->flags &= ~FCF_ASYNC_SENT; 666 return rval; 667 } 668 669 void qla24xx_do_nack_work(struct scsi_qla_host *vha, struct qla_work_evt *e) 670 { 671 fc_port_t *t; 672 673 switch (e->u.nack.type) { 674 case SRB_NACK_PRLI: 675 t = e->u.nack.fcport; 676 flush_work(&t->del_work); 677 flush_work(&t->free_work); 678 mutex_lock(&vha->vha_tgt.tgt_mutex); 679 t = qlt_create_sess(vha, e->u.nack.fcport, 0); 680 mutex_unlock(&vha->vha_tgt.tgt_mutex); 681 if (t) { 682 ql_log(ql_log_info, vha, 0xd034, 683 "%s create sess success %p", __func__, t); 684 /* create sess has an extra kref */ 685 vha->hw->tgt.tgt_ops->put_sess(e->u.nack.fcport); 686 } 687 break; 688 } 689 qla24xx_async_notify_ack(vha, e->u.nack.fcport, 690 (struct imm_ntfy_from_isp *)e->u.nack.iocb, e->u.nack.type); 691 } 692 693 void qla24xx_delete_sess_fn(struct work_struct *work) 694 { 695 fc_port_t *fcport = container_of(work, struct fc_port, del_work); 696 struct qla_hw_data *ha = fcport->vha->hw; 697 698 if (fcport->se_sess) { 699 ha->tgt.tgt_ops->shutdown_sess(fcport); 700 ha->tgt.tgt_ops->put_sess(fcport); 701 } else { 702 qlt_unreg_sess(fcport); 703 } 704 } 705 706 /* 707 * Called from qla2x00_reg_remote_port() 708 */ 709 void qlt_fc_port_added(struct scsi_qla_host *vha, fc_port_t *fcport) 710 { 711 struct qla_hw_data *ha = vha->hw; 712 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; 713 struct fc_port *sess = fcport; 714 unsigned long flags; 715 716 if (!vha->hw->tgt.tgt_ops) 717 return; 718 719 spin_lock_irqsave(&ha->tgt.sess_lock, flags); 720 if (tgt->tgt_stop) { 721 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); 722 return; 723 } 724 725 if (fcport->disc_state == DSC_DELETE_PEND) { 726 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); 727 return; 728 } 729 730 if (!sess->se_sess) { 731 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); 732 733 mutex_lock(&vha->vha_tgt.tgt_mutex); 734 sess = qlt_create_sess(vha, fcport, false); 735 mutex_unlock(&vha->vha_tgt.tgt_mutex); 736 737 spin_lock_irqsave(&ha->tgt.sess_lock, flags); 738 } else { 739 if (fcport->fw_login_state == DSC_LS_PRLI_COMP) { 740 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); 741 return; 742 } 743 744 if (!kref_get_unless_zero(&sess->sess_kref)) { 745 ql_dbg(ql_dbg_disc, vha, 0x2107, 746 "%s: kref_get fail sess %8phC \n", 747 __func__, sess->port_name); 748 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); 749 return; 750 } 751 752 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04c, 753 "qla_target(%u): %ssession for port %8phC " 754 "(loop ID %d) reappeared\n", vha->vp_idx, 755 sess->local ? "local " : "", sess->port_name, sess->loop_id); 756 757 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf007, 758 "Reappeared sess %p\n", sess); 759 760 ha->tgt.tgt_ops->update_sess(sess, fcport->d_id, 761 fcport->loop_id, 762 (fcport->flags & FCF_CONF_COMP_SUPPORTED)); 763 } 764 765 if (sess && sess->local) { 766 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04d, 767 "qla_target(%u): local session for " 768 "port %8phC (loop ID %d) became global\n", vha->vp_idx, 769 fcport->port_name, sess->loop_id); 770 sess->local = 0; 771 } 772 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); 773 774 ha->tgt.tgt_ops->put_sess(sess); 775 } 776 777 /* 778 * This is a zero-base ref-counting solution, since hardware_lock 779 * guarantees that ref_count is not modified concurrently. 780 * Upon successful return content of iocb is undefined 781 */ 782 static struct qlt_plogi_ack_t * 783 qlt_plogi_ack_find_add(struct scsi_qla_host *vha, port_id_t *id, 784 struct imm_ntfy_from_isp *iocb) 785 { 786 struct qlt_plogi_ack_t *pla; 787 788 lockdep_assert_held(&vha->hw->hardware_lock); 789 790 list_for_each_entry(pla, &vha->plogi_ack_list, list) { 791 if (pla->id.b24 == id->b24) { 792 ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0x210d, 793 "%s %d %8phC Term INOT due to new INOT", 794 __func__, __LINE__, 795 pla->iocb.u.isp24.port_name); 796 qlt_send_term_imm_notif(vha, &pla->iocb, 1); 797 memcpy(&pla->iocb, iocb, sizeof(pla->iocb)); 798 return pla; 799 } 800 } 801 802 pla = kmem_cache_zalloc(qla_tgt_plogi_cachep, GFP_ATOMIC); 803 if (!pla) { 804 ql_dbg(ql_dbg_async, vha, 0x5088, 805 "qla_target(%d): Allocation of plogi_ack failed\n", 806 vha->vp_idx); 807 return NULL; 808 } 809 810 memcpy(&pla->iocb, iocb, sizeof(pla->iocb)); 811 pla->id = *id; 812 list_add_tail(&pla->list, &vha->plogi_ack_list); 813 814 return pla; 815 } 816 817 void qlt_plogi_ack_unref(struct scsi_qla_host *vha, 818 struct qlt_plogi_ack_t *pla) 819 { 820 struct imm_ntfy_from_isp *iocb = &pla->iocb; 821 port_id_t port_id; 822 uint16_t loop_id; 823 fc_port_t *fcport = pla->fcport; 824 825 BUG_ON(!pla->ref_count); 826 pla->ref_count--; 827 828 if (pla->ref_count) 829 return; 830 831 ql_dbg(ql_dbg_disc, vha, 0x5089, 832 "Sending PLOGI ACK to wwn %8phC s_id %02x:%02x:%02x loop_id %#04x" 833 " exch %#x ox_id %#x\n", iocb->u.isp24.port_name, 834 iocb->u.isp24.port_id[2], iocb->u.isp24.port_id[1], 835 iocb->u.isp24.port_id[0], 836 le16_to_cpu(iocb->u.isp24.nport_handle), 837 iocb->u.isp24.exchange_address, iocb->ox_id); 838 839 port_id.b.domain = iocb->u.isp24.port_id[2]; 840 port_id.b.area = iocb->u.isp24.port_id[1]; 841 port_id.b.al_pa = iocb->u.isp24.port_id[0]; 842 port_id.b.rsvd_1 = 0; 843 844 loop_id = le16_to_cpu(iocb->u.isp24.nport_handle); 845 846 fcport->loop_id = loop_id; 847 fcport->d_id = port_id; 848 if (iocb->u.isp24.status_subcode == ELS_PLOGI) 849 qla24xx_post_nack_work(vha, fcport, iocb, SRB_NACK_PLOGI); 850 else 851 qla24xx_post_nack_work(vha, fcport, iocb, SRB_NACK_PRLI); 852 853 list_for_each_entry(fcport, &vha->vp_fcports, list) { 854 if (fcport->plogi_link[QLT_PLOGI_LINK_SAME_WWN] == pla) 855 fcport->plogi_link[QLT_PLOGI_LINK_SAME_WWN] = NULL; 856 if (fcport->plogi_link[QLT_PLOGI_LINK_CONFLICT] == pla) 857 fcport->plogi_link[QLT_PLOGI_LINK_CONFLICT] = NULL; 858 } 859 860 list_del(&pla->list); 861 kmem_cache_free(qla_tgt_plogi_cachep, pla); 862 } 863 864 void 865 qlt_plogi_ack_link(struct scsi_qla_host *vha, struct qlt_plogi_ack_t *pla, 866 struct fc_port *sess, enum qlt_plogi_link_t link) 867 { 868 struct imm_ntfy_from_isp *iocb = &pla->iocb; 869 /* Inc ref_count first because link might already be pointing at pla */ 870 pla->ref_count++; 871 872 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf097, 873 "Linking sess %p [%d] wwn %8phC with PLOGI ACK to wwn %8phC" 874 " s_id %02x:%02x:%02x, ref=%d pla %p link %d\n", 875 sess, link, sess->port_name, 876 iocb->u.isp24.port_name, iocb->u.isp24.port_id[2], 877 iocb->u.isp24.port_id[1], iocb->u.isp24.port_id[0], 878 pla->ref_count, pla, link); 879 880 if (link == QLT_PLOGI_LINK_CONFLICT) { 881 switch (sess->disc_state) { 882 case DSC_DELETED: 883 case DSC_DELETE_PEND: 884 pla->ref_count--; 885 return; 886 default: 887 break; 888 } 889 } 890 891 if (sess->plogi_link[link]) 892 qlt_plogi_ack_unref(vha, sess->plogi_link[link]); 893 894 if (link == QLT_PLOGI_LINK_SAME_WWN) 895 pla->fcport = sess; 896 897 sess->plogi_link[link] = pla; 898 } 899 900 typedef struct { 901 /* These fields must be initialized by the caller */ 902 port_id_t id; 903 /* 904 * number of cmds dropped while we were waiting for 905 * initiator to ack LOGO initialize to 1 if LOGO is 906 * triggered by a command, otherwise, to 0 907 */ 908 int cmd_count; 909 910 /* These fields are used by callee */ 911 struct list_head list; 912 } qlt_port_logo_t; 913 914 static void 915 qlt_send_first_logo(struct scsi_qla_host *vha, qlt_port_logo_t *logo) 916 { 917 qlt_port_logo_t *tmp; 918 int res; 919 920 mutex_lock(&vha->vha_tgt.tgt_mutex); 921 922 list_for_each_entry(tmp, &vha->logo_list, list) { 923 if (tmp->id.b24 == logo->id.b24) { 924 tmp->cmd_count += logo->cmd_count; 925 mutex_unlock(&vha->vha_tgt.tgt_mutex); 926 return; 927 } 928 } 929 930 list_add_tail(&logo->list, &vha->logo_list); 931 932 mutex_unlock(&vha->vha_tgt.tgt_mutex); 933 934 res = qla24xx_els_dcmd_iocb(vha, ELS_DCMD_LOGO, logo->id); 935 936 mutex_lock(&vha->vha_tgt.tgt_mutex); 937 list_del(&logo->list); 938 mutex_unlock(&vha->vha_tgt.tgt_mutex); 939 940 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf098, 941 "Finished LOGO to %02x:%02x:%02x, dropped %d cmds, res = %#x\n", 942 logo->id.b.domain, logo->id.b.area, logo->id.b.al_pa, 943 logo->cmd_count, res); 944 } 945 946 void qlt_free_session_done(struct work_struct *work) 947 { 948 struct fc_port *sess = container_of(work, struct fc_port, 949 free_work); 950 struct qla_tgt *tgt = sess->tgt; 951 struct scsi_qla_host *vha = sess->vha; 952 struct qla_hw_data *ha = vha->hw; 953 unsigned long flags; 954 bool logout_started = false; 955 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev); 956 struct qlt_plogi_ack_t *own = 957 sess->plogi_link[QLT_PLOGI_LINK_SAME_WWN]; 958 959 ql_dbg(ql_dbg_disc, vha, 0xf084, 960 "%s: se_sess %p / sess %p from port %8phC loop_id %#04x" 961 " s_id %02x:%02x:%02x logout %d keep %d els_logo %d\n", 962 __func__, sess->se_sess, sess, sess->port_name, sess->loop_id, 963 sess->d_id.b.domain, sess->d_id.b.area, sess->d_id.b.al_pa, 964 sess->logout_on_delete, sess->keep_nport_handle, 965 sess->send_els_logo); 966 967 if (!IS_SW_RESV_ADDR(sess->d_id)) { 968 qla2x00_mark_device_lost(vha, sess, 0); 969 970 if (sess->send_els_logo) { 971 qlt_port_logo_t logo; 972 973 logo.id = sess->d_id; 974 logo.cmd_count = 0; 975 if (!own) 976 qlt_send_first_logo(vha, &logo); 977 sess->send_els_logo = 0; 978 } 979 980 if (sess->logout_on_delete && sess->loop_id != FC_NO_LOOP_ID) { 981 int rc; 982 983 if (!own || 984 (own->iocb.u.isp24.status_subcode == ELS_PLOGI)) { 985 rc = qla2x00_post_async_logout_work(vha, sess, 986 NULL); 987 if (rc != QLA_SUCCESS) 988 ql_log(ql_log_warn, vha, 0xf085, 989 "Schedule logo failed sess %p rc %d\n", 990 sess, rc); 991 else 992 logout_started = true; 993 } else if (own && (own->iocb.u.isp24.status_subcode == 994 ELS_PRLI) && ha->flags.rida_fmt2) { 995 rc = qla2x00_post_async_prlo_work(vha, sess, 996 NULL); 997 if (rc != QLA_SUCCESS) 998 ql_log(ql_log_warn, vha, 0xf085, 999 "Schedule PRLO failed sess %p rc %d\n", 1000 sess, rc); 1001 else 1002 logout_started = true; 1003 } 1004 } /* if sess->logout_on_delete */ 1005 1006 if (sess->nvme_flag & NVME_FLAG_REGISTERED && 1007 !(sess->nvme_flag & NVME_FLAG_DELETING)) { 1008 sess->nvme_flag |= NVME_FLAG_DELETING; 1009 qla_nvme_unregister_remote_port(sess); 1010 } 1011 } 1012 1013 /* 1014 * Release the target session for FC Nexus from fabric module code. 1015 */ 1016 if (sess->se_sess != NULL) 1017 ha->tgt.tgt_ops->free_session(sess); 1018 1019 if (logout_started) { 1020 bool traced = false; 1021 u16 cnt = 0; 1022 1023 while (!READ_ONCE(sess->logout_completed)) { 1024 if (!traced) { 1025 ql_dbg(ql_dbg_disc, vha, 0xf086, 1026 "%s: waiting for sess %p logout\n", 1027 __func__, sess); 1028 traced = true; 1029 } 1030 msleep(100); 1031 cnt++; 1032 /* 1033 * Driver timeout is set to 22 Sec, update count value to loop 1034 * long enough for log-out to complete before advancing. Otherwise, 1035 * straddling logout can interfere with re-login attempt. 1036 */ 1037 if (cnt > 230) 1038 break; 1039 } 1040 1041 ql_dbg(ql_dbg_disc, vha, 0xf087, 1042 "%s: sess %p logout completed\n", __func__, sess); 1043 } 1044 1045 if (sess->logo_ack_needed) { 1046 sess->logo_ack_needed = 0; 1047 qla24xx_async_notify_ack(vha, sess, 1048 (struct imm_ntfy_from_isp *)sess->iocb, SRB_NACK_LOGO); 1049 } 1050 1051 spin_lock_irqsave(&vha->work_lock, flags); 1052 sess->flags &= ~FCF_ASYNC_SENT; 1053 spin_unlock_irqrestore(&vha->work_lock, flags); 1054 1055 spin_lock_irqsave(&ha->tgt.sess_lock, flags); 1056 if (sess->se_sess) { 1057 sess->se_sess = NULL; 1058 if (tgt && !IS_SW_RESV_ADDR(sess->d_id)) 1059 tgt->sess_count--; 1060 } 1061 1062 qla2x00_set_fcport_disc_state(sess, DSC_DELETED); 1063 sess->fw_login_state = DSC_LS_PORT_UNAVAIL; 1064 sess->deleted = QLA_SESS_DELETED; 1065 1066 if (sess->login_succ && !IS_SW_RESV_ADDR(sess->d_id)) { 1067 vha->fcport_count--; 1068 sess->login_succ = 0; 1069 } 1070 1071 qla2x00_clear_loop_id(sess); 1072 1073 if (sess->conflict) { 1074 sess->conflict->login_pause = 0; 1075 sess->conflict = NULL; 1076 if (!test_bit(UNLOADING, &vha->dpc_flags)) 1077 set_bit(RELOGIN_NEEDED, &vha->dpc_flags); 1078 } 1079 1080 { 1081 struct qlt_plogi_ack_t *con = 1082 sess->plogi_link[QLT_PLOGI_LINK_CONFLICT]; 1083 struct imm_ntfy_from_isp *iocb; 1084 1085 own = sess->plogi_link[QLT_PLOGI_LINK_SAME_WWN]; 1086 1087 if (con) { 1088 iocb = &con->iocb; 1089 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf099, 1090 "se_sess %p / sess %p port %8phC is gone," 1091 " %s (ref=%d), releasing PLOGI for %8phC (ref=%d)\n", 1092 sess->se_sess, sess, sess->port_name, 1093 own ? "releasing own PLOGI" : "no own PLOGI pending", 1094 own ? own->ref_count : -1, 1095 iocb->u.isp24.port_name, con->ref_count); 1096 qlt_plogi_ack_unref(vha, con); 1097 sess->plogi_link[QLT_PLOGI_LINK_CONFLICT] = NULL; 1098 } else { 1099 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf09a, 1100 "se_sess %p / sess %p port %8phC is gone, %s (ref=%d)\n", 1101 sess->se_sess, sess, sess->port_name, 1102 own ? "releasing own PLOGI" : 1103 "no own PLOGI pending", 1104 own ? own->ref_count : -1); 1105 } 1106 1107 if (own) { 1108 sess->fw_login_state = DSC_LS_PLOGI_PEND; 1109 qlt_plogi_ack_unref(vha, own); 1110 sess->plogi_link[QLT_PLOGI_LINK_SAME_WWN] = NULL; 1111 } 1112 } 1113 1114 sess->explicit_logout = 0; 1115 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); 1116 sess->free_pending = 0; 1117 1118 qla2x00_dfs_remove_rport(vha, sess); 1119 1120 ql_dbg(ql_dbg_disc, vha, 0xf001, 1121 "Unregistration of sess %p %8phC finished fcp_cnt %d\n", 1122 sess, sess->port_name, vha->fcport_count); 1123 1124 if (tgt && (tgt->sess_count == 0)) 1125 wake_up_all(&tgt->waitQ); 1126 1127 if (!test_bit(PFLG_DRIVER_REMOVING, &base_vha->pci_flags) && 1128 !(vha->vp_idx && test_bit(VPORT_DELETE, &vha->dpc_flags)) && 1129 (!tgt || !tgt->tgt_stop) && !LOOP_TRANSITION(vha)) { 1130 switch (vha->host->active_mode) { 1131 case MODE_INITIATOR: 1132 case MODE_DUAL: 1133 set_bit(RELOGIN_NEEDED, &vha->dpc_flags); 1134 qla2xxx_wake_dpc(vha); 1135 break; 1136 case MODE_TARGET: 1137 default: 1138 /* no-op */ 1139 break; 1140 } 1141 } 1142 1143 if (vha->fcport_count == 0) 1144 wake_up_all(&vha->fcport_waitQ); 1145 } 1146 1147 /* ha->tgt.sess_lock supposed to be held on entry */ 1148 void qlt_unreg_sess(struct fc_port *sess) 1149 { 1150 struct scsi_qla_host *vha = sess->vha; 1151 unsigned long flags; 1152 1153 ql_dbg(ql_dbg_disc, sess->vha, 0x210a, 1154 "%s sess %p for deletion %8phC\n", 1155 __func__, sess, sess->port_name); 1156 1157 spin_lock_irqsave(&sess->vha->work_lock, flags); 1158 if (sess->free_pending) { 1159 spin_unlock_irqrestore(&sess->vha->work_lock, flags); 1160 return; 1161 } 1162 sess->free_pending = 1; 1163 /* 1164 * Use FCF_ASYNC_SENT flag to block other cmds used in sess 1165 * management from being sent. 1166 */ 1167 sess->flags |= FCF_ASYNC_SENT; 1168 spin_unlock_irqrestore(&sess->vha->work_lock, flags); 1169 1170 if (sess->se_sess) 1171 vha->hw->tgt.tgt_ops->clear_nacl_from_fcport_map(sess); 1172 1173 sess->deleted = QLA_SESS_DELETION_IN_PROGRESS; 1174 qla2x00_set_fcport_disc_state(sess, DSC_DELETE_PEND); 1175 sess->last_rscn_gen = sess->rscn_gen; 1176 sess->last_login_gen = sess->login_gen; 1177 1178 queue_work(sess->vha->hw->wq, &sess->free_work); 1179 } 1180 EXPORT_SYMBOL(qlt_unreg_sess); 1181 1182 static int qlt_reset(struct scsi_qla_host *vha, void *iocb, int mcmd) 1183 { 1184 struct qla_hw_data *ha = vha->hw; 1185 struct fc_port *sess = NULL; 1186 uint16_t loop_id; 1187 int res = 0; 1188 struct imm_ntfy_from_isp *n = (struct imm_ntfy_from_isp *)iocb; 1189 unsigned long flags; 1190 1191 loop_id = le16_to_cpu(n->u.isp24.nport_handle); 1192 if (loop_id == 0xFFFF) { 1193 /* Global event */ 1194 atomic_inc(&vha->vha_tgt.qla_tgt->tgt_global_resets_count); 1195 spin_lock_irqsave(&ha->tgt.sess_lock, flags); 1196 qlt_clear_tgt_db(vha->vha_tgt.qla_tgt); 1197 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); 1198 } else { 1199 spin_lock_irqsave(&ha->tgt.sess_lock, flags); 1200 sess = ha->tgt.tgt_ops->find_sess_by_loop_id(vha, loop_id); 1201 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); 1202 } 1203 1204 ql_dbg(ql_dbg_tgt, vha, 0xe000, 1205 "Using sess for qla_tgt_reset: %p\n", sess); 1206 if (!sess) { 1207 res = -ESRCH; 1208 return res; 1209 } 1210 1211 ql_dbg(ql_dbg_tgt, vha, 0xe047, 1212 "scsi(%ld): resetting (session %p from port %8phC mcmd %x, " 1213 "loop_id %d)\n", vha->host_no, sess, sess->port_name, 1214 mcmd, loop_id); 1215 1216 return qlt_issue_task_mgmt(sess, 0, mcmd, iocb, QLA24XX_MGMT_SEND_NACK); 1217 } 1218 1219 static void qla24xx_chk_fcp_state(struct fc_port *sess) 1220 { 1221 if (sess->chip_reset != sess->vha->hw->base_qpair->chip_reset) { 1222 sess->logout_on_delete = 0; 1223 sess->logo_ack_needed = 0; 1224 sess->fw_login_state = DSC_LS_PORT_UNAVAIL; 1225 } 1226 } 1227 1228 void qlt_schedule_sess_for_deletion(struct fc_port *sess) 1229 { 1230 struct qla_tgt *tgt = sess->tgt; 1231 unsigned long flags; 1232 u16 sec; 1233 1234 switch (sess->disc_state) { 1235 case DSC_DELETE_PEND: 1236 return; 1237 case DSC_DELETED: 1238 if (!sess->plogi_link[QLT_PLOGI_LINK_SAME_WWN] && 1239 !sess->plogi_link[QLT_PLOGI_LINK_CONFLICT]) { 1240 if (tgt && tgt->tgt_stop && tgt->sess_count == 0) 1241 wake_up_all(&tgt->waitQ); 1242 1243 if (sess->vha->fcport_count == 0) 1244 wake_up_all(&sess->vha->fcport_waitQ); 1245 return; 1246 } 1247 break; 1248 case DSC_UPD_FCPORT: 1249 /* 1250 * This port is not done reporting to upper layer. 1251 * let it finish 1252 */ 1253 sess->next_disc_state = DSC_DELETE_PEND; 1254 sec = jiffies_to_msecs(jiffies - 1255 sess->jiffies_at_registration)/1000; 1256 if (sess->sec_since_registration < sec && sec && !(sec % 5)) { 1257 sess->sec_since_registration = sec; 1258 ql_dbg(ql_dbg_disc, sess->vha, 0xffff, 1259 "%s %8phC : Slow Rport registration(%d Sec)\n", 1260 __func__, sess->port_name, sec); 1261 } 1262 return; 1263 default: 1264 break; 1265 } 1266 1267 spin_lock_irqsave(&sess->vha->work_lock, flags); 1268 if (sess->deleted == QLA_SESS_DELETION_IN_PROGRESS) { 1269 spin_unlock_irqrestore(&sess->vha->work_lock, flags); 1270 return; 1271 } 1272 sess->deleted = QLA_SESS_DELETION_IN_PROGRESS; 1273 spin_unlock_irqrestore(&sess->vha->work_lock, flags); 1274 1275 sess->prli_pend_timer = 0; 1276 qla2x00_set_fcport_disc_state(sess, DSC_DELETE_PEND); 1277 1278 qla24xx_chk_fcp_state(sess); 1279 1280 ql_dbg(ql_log_warn, sess->vha, 0xe001, 1281 "Scheduling sess %p for deletion %8phC\n", 1282 sess, sess->port_name); 1283 1284 WARN_ON(!queue_work(sess->vha->hw->wq, &sess->del_work)); 1285 } 1286 1287 static void qlt_clear_tgt_db(struct qla_tgt *tgt) 1288 { 1289 struct fc_port *sess; 1290 scsi_qla_host_t *vha = tgt->vha; 1291 1292 list_for_each_entry(sess, &vha->vp_fcports, list) { 1293 if (sess->se_sess) 1294 qlt_schedule_sess_for_deletion(sess); 1295 } 1296 1297 /* At this point tgt could be already dead */ 1298 } 1299 1300 static int qla24xx_get_loop_id(struct scsi_qla_host *vha, be_id_t s_id, 1301 uint16_t *loop_id) 1302 { 1303 struct qla_hw_data *ha = vha->hw; 1304 dma_addr_t gid_list_dma; 1305 struct gid_list_info *gid_list, *gid; 1306 int res, rc, i; 1307 uint16_t entries; 1308 1309 gid_list = dma_alloc_coherent(&ha->pdev->dev, qla2x00_gid_list_size(ha), 1310 &gid_list_dma, GFP_KERNEL); 1311 if (!gid_list) { 1312 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf044, 1313 "qla_target(%d): DMA Alloc failed of %u\n", 1314 vha->vp_idx, qla2x00_gid_list_size(ha)); 1315 return -ENOMEM; 1316 } 1317 1318 /* Get list of logged in devices */ 1319 rc = qla24xx_gidlist_wait(vha, gid_list, gid_list_dma, &entries); 1320 if (rc != QLA_SUCCESS) { 1321 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf045, 1322 "qla_target(%d): get_id_list() failed: %x\n", 1323 vha->vp_idx, rc); 1324 res = -EBUSY; 1325 goto out_free_id_list; 1326 } 1327 1328 gid = gid_list; 1329 res = -ENOENT; 1330 for (i = 0; i < entries; i++) { 1331 if (gid->al_pa == s_id.al_pa && 1332 gid->area == s_id.area && 1333 gid->domain == s_id.domain) { 1334 *loop_id = le16_to_cpu(gid->loop_id); 1335 res = 0; 1336 break; 1337 } 1338 gid = (void *)gid + ha->gid_list_info_size; 1339 } 1340 1341 out_free_id_list: 1342 dma_free_coherent(&ha->pdev->dev, qla2x00_gid_list_size(ha), 1343 gid_list, gid_list_dma); 1344 return res; 1345 } 1346 1347 /* 1348 * Adds an extra ref to allow to drop hw lock after adding sess to the list. 1349 * Caller must put it. 1350 */ 1351 static struct fc_port *qlt_create_sess( 1352 struct scsi_qla_host *vha, 1353 fc_port_t *fcport, 1354 bool local) 1355 { 1356 struct qla_hw_data *ha = vha->hw; 1357 struct fc_port *sess = fcport; 1358 unsigned long flags; 1359 1360 if (vha->vha_tgt.qla_tgt->tgt_stop) 1361 return NULL; 1362 1363 if (fcport->se_sess) { 1364 if (!kref_get_unless_zero(&sess->sess_kref)) { 1365 ql_dbg(ql_dbg_disc, vha, 0x20f6, 1366 "%s: kref_get_unless_zero failed for %8phC\n", 1367 __func__, sess->port_name); 1368 return NULL; 1369 } 1370 return fcport; 1371 } 1372 sess->tgt = vha->vha_tgt.qla_tgt; 1373 sess->local = local; 1374 1375 /* 1376 * Under normal circumstances we want to logout from firmware when 1377 * session eventually ends and release corresponding nport handle. 1378 * In the exception cases (e.g. when new PLOGI is waiting) corresponding 1379 * code will adjust these flags as necessary. 1380 */ 1381 sess->logout_on_delete = 1; 1382 sess->keep_nport_handle = 0; 1383 sess->logout_completed = 0; 1384 1385 if (ha->tgt.tgt_ops->check_initiator_node_acl(vha, 1386 &fcport->port_name[0], sess) < 0) { 1387 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf015, 1388 "(%d) %8phC check_initiator_node_acl failed\n", 1389 vha->vp_idx, fcport->port_name); 1390 return NULL; 1391 } else { 1392 kref_init(&fcport->sess_kref); 1393 /* 1394 * Take an extra reference to ->sess_kref here to handle 1395 * fc_port access across ->tgt.sess_lock reaquire. 1396 */ 1397 if (!kref_get_unless_zero(&sess->sess_kref)) { 1398 ql_dbg(ql_dbg_disc, vha, 0x20f7, 1399 "%s: kref_get_unless_zero failed for %8phC\n", 1400 __func__, sess->port_name); 1401 return NULL; 1402 } 1403 1404 spin_lock_irqsave(&ha->tgt.sess_lock, flags); 1405 if (!IS_SW_RESV_ADDR(sess->d_id)) 1406 vha->vha_tgt.qla_tgt->sess_count++; 1407 1408 qlt_do_generation_tick(vha, &sess->generation); 1409 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); 1410 } 1411 1412 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf006, 1413 "Adding sess %p se_sess %p to tgt %p sess_count %d\n", 1414 sess, sess->se_sess, vha->vha_tgt.qla_tgt, 1415 vha->vha_tgt.qla_tgt->sess_count); 1416 1417 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04b, 1418 "qla_target(%d): %ssession for wwn %8phC (loop_id %d, " 1419 "s_id %x:%x:%x, confirmed completion %ssupported) added\n", 1420 vha->vp_idx, local ? "local " : "", fcport->port_name, 1421 fcport->loop_id, sess->d_id.b.domain, sess->d_id.b.area, 1422 sess->d_id.b.al_pa, sess->conf_compl_supported ? "" : "not "); 1423 1424 return sess; 1425 } 1426 1427 /* 1428 * max_gen - specifies maximum session generation 1429 * at which this deletion requestion is still valid 1430 */ 1431 void 1432 qlt_fc_port_deleted(struct scsi_qla_host *vha, fc_port_t *fcport, int max_gen) 1433 { 1434 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; 1435 struct fc_port *sess = fcport; 1436 unsigned long flags; 1437 1438 if (!vha->hw->tgt.tgt_ops) 1439 return; 1440 1441 if (!tgt) 1442 return; 1443 1444 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); 1445 if (tgt->tgt_stop) { 1446 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); 1447 return; 1448 } 1449 if (!sess->se_sess) { 1450 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); 1451 return; 1452 } 1453 1454 if (max_gen - sess->generation < 0) { 1455 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); 1456 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf092, 1457 "Ignoring stale deletion request for se_sess %p / sess %p" 1458 " for port %8phC, req_gen %d, sess_gen %d\n", 1459 sess->se_sess, sess, sess->port_name, max_gen, 1460 sess->generation); 1461 return; 1462 } 1463 1464 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf008, "qla_tgt_fc_port_deleted %p", sess); 1465 1466 sess->local = 1; 1467 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); 1468 qlt_schedule_sess_for_deletion(sess); 1469 } 1470 1471 static inline int test_tgt_sess_count(struct qla_tgt *tgt) 1472 { 1473 struct qla_hw_data *ha = tgt->ha; 1474 unsigned long flags; 1475 int res; 1476 /* 1477 * We need to protect against race, when tgt is freed before or 1478 * inside wake_up() 1479 */ 1480 spin_lock_irqsave(&ha->tgt.sess_lock, flags); 1481 ql_dbg(ql_dbg_tgt, tgt->vha, 0xe002, 1482 "tgt %p, sess_count=%d\n", 1483 tgt, tgt->sess_count); 1484 res = (tgt->sess_count == 0); 1485 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); 1486 1487 return res; 1488 } 1489 1490 /* Called by tcm_qla2xxx configfs code */ 1491 int qlt_stop_phase1(struct qla_tgt *tgt) 1492 { 1493 struct scsi_qla_host *vha = tgt->vha; 1494 struct qla_hw_data *ha = tgt->ha; 1495 unsigned long flags; 1496 1497 mutex_lock(&ha->optrom_mutex); 1498 mutex_lock(&qla_tgt_mutex); 1499 1500 if (tgt->tgt_stop || tgt->tgt_stopped) { 1501 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04e, 1502 "Already in tgt->tgt_stop or tgt_stopped state\n"); 1503 mutex_unlock(&qla_tgt_mutex); 1504 mutex_unlock(&ha->optrom_mutex); 1505 return -EPERM; 1506 } 1507 1508 ql_dbg(ql_dbg_tgt_mgt, vha, 0xe003, "Stopping target for host %ld(%p)\n", 1509 vha->host_no, vha); 1510 /* 1511 * Mutex needed to sync with qla_tgt_fc_port_[added,deleted]. 1512 * Lock is needed, because we still can get an incoming packet. 1513 */ 1514 mutex_lock(&vha->vha_tgt.tgt_mutex); 1515 tgt->tgt_stop = 1; 1516 qlt_clear_tgt_db(tgt); 1517 mutex_unlock(&vha->vha_tgt.tgt_mutex); 1518 mutex_unlock(&qla_tgt_mutex); 1519 1520 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf009, 1521 "Waiting for sess works (tgt %p)", tgt); 1522 spin_lock_irqsave(&tgt->sess_work_lock, flags); 1523 while (!list_empty(&tgt->sess_works_list)) { 1524 spin_unlock_irqrestore(&tgt->sess_work_lock, flags); 1525 flush_scheduled_work(); 1526 spin_lock_irqsave(&tgt->sess_work_lock, flags); 1527 } 1528 spin_unlock_irqrestore(&tgt->sess_work_lock, flags); 1529 1530 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00a, 1531 "Waiting for tgt %p: sess_count=%d\n", tgt, tgt->sess_count); 1532 1533 wait_event_timeout(tgt->waitQ, test_tgt_sess_count(tgt), 10*HZ); 1534 1535 /* Big hammer */ 1536 if (!ha->flags.host_shutting_down && 1537 (qla_tgt_mode_enabled(vha) || qla_dual_mode_enabled(vha))) 1538 qlt_disable_vha(vha); 1539 1540 /* Wait for sessions to clear out (just in case) */ 1541 wait_event_timeout(tgt->waitQ, test_tgt_sess_count(tgt), 10*HZ); 1542 mutex_unlock(&ha->optrom_mutex); 1543 1544 return 0; 1545 } 1546 EXPORT_SYMBOL(qlt_stop_phase1); 1547 1548 /* Called by tcm_qla2xxx configfs code */ 1549 void qlt_stop_phase2(struct qla_tgt *tgt) 1550 { 1551 scsi_qla_host_t *vha = tgt->vha; 1552 1553 if (tgt->tgt_stopped) { 1554 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04f, 1555 "Already in tgt->tgt_stopped state\n"); 1556 dump_stack(); 1557 return; 1558 } 1559 if (!tgt->tgt_stop) { 1560 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00b, 1561 "%s: phase1 stop is not completed\n", __func__); 1562 dump_stack(); 1563 return; 1564 } 1565 1566 mutex_lock(&tgt->ha->optrom_mutex); 1567 mutex_lock(&vha->vha_tgt.tgt_mutex); 1568 tgt->tgt_stop = 0; 1569 tgt->tgt_stopped = 1; 1570 mutex_unlock(&vha->vha_tgt.tgt_mutex); 1571 mutex_unlock(&tgt->ha->optrom_mutex); 1572 1573 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00c, "Stop of tgt %p finished\n", 1574 tgt); 1575 1576 switch (vha->qlini_mode) { 1577 case QLA2XXX_INI_MODE_EXCLUSIVE: 1578 vha->flags.online = 1; 1579 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 1580 break; 1581 default: 1582 break; 1583 } 1584 } 1585 EXPORT_SYMBOL(qlt_stop_phase2); 1586 1587 /* Called from qlt_remove_target() -> qla2x00_remove_one() */ 1588 static void qlt_release(struct qla_tgt *tgt) 1589 { 1590 scsi_qla_host_t *vha = tgt->vha; 1591 void *node; 1592 u64 key = 0; 1593 u16 i; 1594 struct qla_qpair_hint *h; 1595 struct qla_hw_data *ha = vha->hw; 1596 1597 if (!tgt->tgt_stop && !tgt->tgt_stopped) 1598 qlt_stop_phase1(tgt); 1599 1600 if (!tgt->tgt_stopped) 1601 qlt_stop_phase2(tgt); 1602 1603 for (i = 0; i < vha->hw->max_qpairs + 1; i++) { 1604 unsigned long flags; 1605 1606 h = &tgt->qphints[i]; 1607 if (h->qpair) { 1608 spin_lock_irqsave(h->qpair->qp_lock_ptr, flags); 1609 list_del(&h->hint_elem); 1610 spin_unlock_irqrestore(h->qpair->qp_lock_ptr, flags); 1611 h->qpair = NULL; 1612 } 1613 } 1614 kfree(tgt->qphints); 1615 mutex_lock(&qla_tgt_mutex); 1616 list_del(&vha->vha_tgt.qla_tgt->tgt_list_entry); 1617 mutex_unlock(&qla_tgt_mutex); 1618 1619 btree_for_each_safe64(&tgt->lun_qpair_map, key, node) 1620 btree_remove64(&tgt->lun_qpair_map, key); 1621 1622 btree_destroy64(&tgt->lun_qpair_map); 1623 1624 if (vha->vp_idx) 1625 if (ha->tgt.tgt_ops && 1626 ha->tgt.tgt_ops->remove_target && 1627 vha->vha_tgt.target_lport_ptr) 1628 ha->tgt.tgt_ops->remove_target(vha); 1629 1630 vha->vha_tgt.qla_tgt = NULL; 1631 1632 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00d, 1633 "Release of tgt %p finished\n", tgt); 1634 1635 kfree(tgt); 1636 } 1637 1638 /* ha->hardware_lock supposed to be held on entry */ 1639 static int qlt_sched_sess_work(struct qla_tgt *tgt, int type, 1640 const void *param, unsigned int param_size) 1641 { 1642 struct qla_tgt_sess_work_param *prm; 1643 unsigned long flags; 1644 1645 prm = kzalloc(sizeof(*prm), GFP_ATOMIC); 1646 if (!prm) { 1647 ql_dbg(ql_dbg_tgt_mgt, tgt->vha, 0xf050, 1648 "qla_target(%d): Unable to create session " 1649 "work, command will be refused", 0); 1650 return -ENOMEM; 1651 } 1652 1653 ql_dbg(ql_dbg_tgt_mgt, tgt->vha, 0xf00e, 1654 "Scheduling work (type %d, prm %p)" 1655 " to find session for param %p (size %d, tgt %p)\n", 1656 type, prm, param, param_size, tgt); 1657 1658 prm->type = type; 1659 memcpy(&prm->tm_iocb, param, param_size); 1660 1661 spin_lock_irqsave(&tgt->sess_work_lock, flags); 1662 list_add_tail(&prm->sess_works_list_entry, &tgt->sess_works_list); 1663 spin_unlock_irqrestore(&tgt->sess_work_lock, flags); 1664 1665 schedule_work(&tgt->sess_work); 1666 1667 return 0; 1668 } 1669 1670 /* 1671 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire 1672 */ 1673 static void qlt_send_notify_ack(struct qla_qpair *qpair, 1674 struct imm_ntfy_from_isp *ntfy, 1675 uint32_t add_flags, uint16_t resp_code, int resp_code_valid, 1676 uint16_t srr_flags, uint16_t srr_reject_code, uint8_t srr_explan) 1677 { 1678 struct scsi_qla_host *vha = qpair->vha; 1679 struct qla_hw_data *ha = vha->hw; 1680 request_t *pkt; 1681 struct nack_to_isp *nack; 1682 1683 if (!ha->flags.fw_started) 1684 return; 1685 1686 ql_dbg(ql_dbg_tgt, vha, 0xe004, "Sending NOTIFY_ACK (ha=%p)\n", ha); 1687 1688 pkt = (request_t *)__qla2x00_alloc_iocbs(qpair, NULL); 1689 if (!pkt) { 1690 ql_dbg(ql_dbg_tgt, vha, 0xe049, 1691 "qla_target(%d): %s failed: unable to allocate " 1692 "request packet\n", vha->vp_idx, __func__); 1693 return; 1694 } 1695 1696 if (vha->vha_tgt.qla_tgt != NULL) 1697 vha->vha_tgt.qla_tgt->notify_ack_expected++; 1698 1699 pkt->entry_type = NOTIFY_ACK_TYPE; 1700 pkt->entry_count = 1; 1701 1702 nack = (struct nack_to_isp *)pkt; 1703 nack->ox_id = ntfy->ox_id; 1704 1705 nack->u.isp24.handle = QLA_TGT_SKIP_HANDLE; 1706 nack->u.isp24.nport_handle = ntfy->u.isp24.nport_handle; 1707 if (le16_to_cpu(ntfy->u.isp24.status) == IMM_NTFY_ELS) { 1708 nack->u.isp24.flags = ntfy->u.isp24.flags & 1709 cpu_to_le16(NOTIFY24XX_FLAGS_PUREX_IOCB); 1710 } 1711 nack->u.isp24.srr_rx_id = ntfy->u.isp24.srr_rx_id; 1712 nack->u.isp24.status = ntfy->u.isp24.status; 1713 nack->u.isp24.status_subcode = ntfy->u.isp24.status_subcode; 1714 nack->u.isp24.fw_handle = ntfy->u.isp24.fw_handle; 1715 nack->u.isp24.exchange_address = ntfy->u.isp24.exchange_address; 1716 nack->u.isp24.srr_rel_offs = ntfy->u.isp24.srr_rel_offs; 1717 nack->u.isp24.srr_ui = ntfy->u.isp24.srr_ui; 1718 nack->u.isp24.srr_flags = cpu_to_le16(srr_flags); 1719 nack->u.isp24.srr_reject_code = srr_reject_code; 1720 nack->u.isp24.srr_reject_code_expl = srr_explan; 1721 nack->u.isp24.vp_index = ntfy->u.isp24.vp_index; 1722 1723 ql_dbg(ql_dbg_tgt, vha, 0xe005, 1724 "qla_target(%d): Sending 24xx Notify Ack %d\n", 1725 vha->vp_idx, nack->u.isp24.status); 1726 1727 /* Memory Barrier */ 1728 wmb(); 1729 qla2x00_start_iocbs(vha, qpair->req); 1730 } 1731 1732 static int qlt_build_abts_resp_iocb(struct qla_tgt_mgmt_cmd *mcmd) 1733 { 1734 struct scsi_qla_host *vha = mcmd->vha; 1735 struct qla_hw_data *ha = vha->hw; 1736 struct abts_resp_to_24xx *resp; 1737 __le32 f_ctl; 1738 uint32_t h; 1739 uint8_t *p; 1740 int rc; 1741 struct abts_recv_from_24xx *abts = &mcmd->orig_iocb.abts; 1742 struct qla_qpair *qpair = mcmd->qpair; 1743 1744 ql_dbg(ql_dbg_tgt, vha, 0xe006, 1745 "Sending task mgmt ABTS response (ha=%p, status=%x)\n", 1746 ha, mcmd->fc_tm_rsp); 1747 1748 rc = qlt_check_reserve_free_req(qpair, 1); 1749 if (rc) { 1750 ql_dbg(ql_dbg_tgt, vha, 0xe04a, 1751 "qla_target(%d): %s failed: unable to allocate request packet\n", 1752 vha->vp_idx, __func__); 1753 return -EAGAIN; 1754 } 1755 1756 resp = (struct abts_resp_to_24xx *)qpair->req->ring_ptr; 1757 memset(resp, 0, sizeof(*resp)); 1758 1759 h = qlt_make_handle(qpair); 1760 if (unlikely(h == QLA_TGT_NULL_HANDLE)) { 1761 /* 1762 * CTIO type 7 from the firmware doesn't provide a way to 1763 * know the initiator's LOOP ID, hence we can't find 1764 * the session and, so, the command. 1765 */ 1766 return -EAGAIN; 1767 } else { 1768 qpair->req->outstanding_cmds[h] = (srb_t *)mcmd; 1769 } 1770 1771 resp->handle = make_handle(qpair->req->id, h); 1772 resp->entry_type = ABTS_RESP_24XX; 1773 resp->entry_count = 1; 1774 resp->nport_handle = abts->nport_handle; 1775 resp->vp_index = vha->vp_idx; 1776 resp->sof_type = abts->sof_type; 1777 resp->exchange_address = abts->exchange_address; 1778 resp->fcp_hdr_le = abts->fcp_hdr_le; 1779 f_ctl = cpu_to_le32(F_CTL_EXCH_CONTEXT_RESP | 1780 F_CTL_LAST_SEQ | F_CTL_END_SEQ | 1781 F_CTL_SEQ_INITIATIVE); 1782 p = (uint8_t *)&f_ctl; 1783 resp->fcp_hdr_le.f_ctl[0] = *p++; 1784 resp->fcp_hdr_le.f_ctl[1] = *p++; 1785 resp->fcp_hdr_le.f_ctl[2] = *p; 1786 1787 resp->fcp_hdr_le.d_id = abts->fcp_hdr_le.s_id; 1788 resp->fcp_hdr_le.s_id = abts->fcp_hdr_le.d_id; 1789 1790 resp->exchange_addr_to_abort = abts->exchange_addr_to_abort; 1791 if (mcmd->fc_tm_rsp == FCP_TMF_CMPL) { 1792 resp->fcp_hdr_le.r_ctl = R_CTL_BASIC_LINK_SERV | R_CTL_B_ACC; 1793 resp->payload.ba_acct.seq_id_valid = SEQ_ID_INVALID; 1794 resp->payload.ba_acct.low_seq_cnt = 0x0000; 1795 resp->payload.ba_acct.high_seq_cnt = cpu_to_le16(0xFFFF); 1796 resp->payload.ba_acct.ox_id = abts->fcp_hdr_le.ox_id; 1797 resp->payload.ba_acct.rx_id = abts->fcp_hdr_le.rx_id; 1798 } else { 1799 resp->fcp_hdr_le.r_ctl = R_CTL_BASIC_LINK_SERV | R_CTL_B_RJT; 1800 resp->payload.ba_rjt.reason_code = 1801 BA_RJT_REASON_CODE_UNABLE_TO_PERFORM; 1802 /* Other bytes are zero */ 1803 } 1804 1805 vha->vha_tgt.qla_tgt->abts_resp_expected++; 1806 1807 /* Memory Barrier */ 1808 wmb(); 1809 if (qpair->reqq_start_iocbs) 1810 qpair->reqq_start_iocbs(qpair); 1811 else 1812 qla2x00_start_iocbs(vha, qpair->req); 1813 1814 return rc; 1815 } 1816 1817 /* 1818 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire 1819 */ 1820 static void qlt_24xx_send_abts_resp(struct qla_qpair *qpair, 1821 struct abts_recv_from_24xx *abts, uint32_t status, 1822 bool ids_reversed) 1823 { 1824 struct scsi_qla_host *vha = qpair->vha; 1825 struct qla_hw_data *ha = vha->hw; 1826 struct abts_resp_to_24xx *resp; 1827 __le32 f_ctl; 1828 uint8_t *p; 1829 1830 ql_dbg(ql_dbg_tgt, vha, 0xe006, 1831 "Sending task mgmt ABTS response (ha=%p, atio=%p, status=%x\n", 1832 ha, abts, status); 1833 1834 resp = (struct abts_resp_to_24xx *)qla2x00_alloc_iocbs_ready(qpair, 1835 NULL); 1836 if (!resp) { 1837 ql_dbg(ql_dbg_tgt, vha, 0xe04a, 1838 "qla_target(%d): %s failed: unable to allocate " 1839 "request packet", vha->vp_idx, __func__); 1840 return; 1841 } 1842 1843 resp->entry_type = ABTS_RESP_24XX; 1844 resp->handle = QLA_TGT_SKIP_HANDLE; 1845 resp->entry_count = 1; 1846 resp->nport_handle = abts->nport_handle; 1847 resp->vp_index = vha->vp_idx; 1848 resp->sof_type = abts->sof_type; 1849 resp->exchange_address = abts->exchange_address; 1850 resp->fcp_hdr_le = abts->fcp_hdr_le; 1851 f_ctl = cpu_to_le32(F_CTL_EXCH_CONTEXT_RESP | 1852 F_CTL_LAST_SEQ | F_CTL_END_SEQ | 1853 F_CTL_SEQ_INITIATIVE); 1854 p = (uint8_t *)&f_ctl; 1855 resp->fcp_hdr_le.f_ctl[0] = *p++; 1856 resp->fcp_hdr_le.f_ctl[1] = *p++; 1857 resp->fcp_hdr_le.f_ctl[2] = *p; 1858 if (ids_reversed) { 1859 resp->fcp_hdr_le.d_id = abts->fcp_hdr_le.d_id; 1860 resp->fcp_hdr_le.s_id = abts->fcp_hdr_le.s_id; 1861 } else { 1862 resp->fcp_hdr_le.d_id = abts->fcp_hdr_le.s_id; 1863 resp->fcp_hdr_le.s_id = abts->fcp_hdr_le.d_id; 1864 } 1865 resp->exchange_addr_to_abort = abts->exchange_addr_to_abort; 1866 if (status == FCP_TMF_CMPL) { 1867 resp->fcp_hdr_le.r_ctl = R_CTL_BASIC_LINK_SERV | R_CTL_B_ACC; 1868 resp->payload.ba_acct.seq_id_valid = SEQ_ID_INVALID; 1869 resp->payload.ba_acct.low_seq_cnt = 0x0000; 1870 resp->payload.ba_acct.high_seq_cnt = cpu_to_le16(0xFFFF); 1871 resp->payload.ba_acct.ox_id = abts->fcp_hdr_le.ox_id; 1872 resp->payload.ba_acct.rx_id = abts->fcp_hdr_le.rx_id; 1873 } else { 1874 resp->fcp_hdr_le.r_ctl = R_CTL_BASIC_LINK_SERV | R_CTL_B_RJT; 1875 resp->payload.ba_rjt.reason_code = 1876 BA_RJT_REASON_CODE_UNABLE_TO_PERFORM; 1877 /* Other bytes are zero */ 1878 } 1879 1880 vha->vha_tgt.qla_tgt->abts_resp_expected++; 1881 1882 /* Memory Barrier */ 1883 wmb(); 1884 if (qpair->reqq_start_iocbs) 1885 qpair->reqq_start_iocbs(qpair); 1886 else 1887 qla2x00_start_iocbs(vha, qpair->req); 1888 } 1889 1890 /* 1891 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire 1892 */ 1893 static void qlt_24xx_retry_term_exchange(struct scsi_qla_host *vha, 1894 struct qla_qpair *qpair, response_t *pkt, struct qla_tgt_mgmt_cmd *mcmd) 1895 { 1896 struct ctio7_to_24xx *ctio; 1897 u16 tmp; 1898 struct abts_recv_from_24xx *entry; 1899 1900 ctio = (struct ctio7_to_24xx *)qla2x00_alloc_iocbs_ready(qpair, NULL); 1901 if (ctio == NULL) { 1902 ql_dbg(ql_dbg_tgt, vha, 0xe04b, 1903 "qla_target(%d): %s failed: unable to allocate " 1904 "request packet\n", vha->vp_idx, __func__); 1905 return; 1906 } 1907 1908 if (mcmd) 1909 /* abts from remote port */ 1910 entry = &mcmd->orig_iocb.abts; 1911 else 1912 /* abts from this driver. */ 1913 entry = (struct abts_recv_from_24xx *)pkt; 1914 1915 /* 1916 * We've got on entrance firmware's response on by us generated 1917 * ABTS response. So, in it ID fields are reversed. 1918 */ 1919 1920 ctio->entry_type = CTIO_TYPE7; 1921 ctio->entry_count = 1; 1922 ctio->nport_handle = entry->nport_handle; 1923 ctio->handle = QLA_TGT_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK; 1924 ctio->timeout = cpu_to_le16(QLA_TGT_TIMEOUT); 1925 ctio->vp_index = vha->vp_idx; 1926 ctio->exchange_addr = entry->exchange_addr_to_abort; 1927 tmp = (CTIO7_FLAGS_STATUS_MODE_1 | CTIO7_FLAGS_TERMINATE); 1928 1929 if (mcmd) { 1930 ctio->initiator_id = entry->fcp_hdr_le.s_id; 1931 1932 if (mcmd->flags & QLA24XX_MGMT_ABORT_IO_ATTR_VALID) 1933 tmp |= (mcmd->abort_io_attr << 9); 1934 else if (qpair->retry_term_cnt & 1) 1935 tmp |= (0x4 << 9); 1936 } else { 1937 ctio->initiator_id = entry->fcp_hdr_le.d_id; 1938 1939 if (qpair->retry_term_cnt & 1) 1940 tmp |= (0x4 << 9); 1941 } 1942 ctio->u.status1.flags = cpu_to_le16(tmp); 1943 ctio->u.status1.ox_id = entry->fcp_hdr_le.ox_id; 1944 1945 ql_dbg(ql_dbg_tgt, vha, 0xe007, 1946 "Sending retry TERM EXCH CTIO7 flags %04xh oxid %04xh attr valid %x\n", 1947 le16_to_cpu(ctio->u.status1.flags), 1948 le16_to_cpu(ctio->u.status1.ox_id), 1949 (mcmd && mcmd->flags & QLA24XX_MGMT_ABORT_IO_ATTR_VALID) ? 1 : 0); 1950 1951 /* Memory Barrier */ 1952 wmb(); 1953 if (qpair->reqq_start_iocbs) 1954 qpair->reqq_start_iocbs(qpair); 1955 else 1956 qla2x00_start_iocbs(vha, qpair->req); 1957 1958 if (mcmd) 1959 qlt_build_abts_resp_iocb(mcmd); 1960 else 1961 qlt_24xx_send_abts_resp(qpair, 1962 (struct abts_recv_from_24xx *)entry, FCP_TMF_CMPL, true); 1963 1964 } 1965 1966 /* drop cmds for the given lun 1967 * XXX only looks for cmds on the port through which lun reset was recieved 1968 * XXX does not go through the list of other port (which may have cmds 1969 * for the same lun) 1970 */ 1971 static void abort_cmds_for_lun(struct scsi_qla_host *vha, u64 lun, be_id_t s_id) 1972 { 1973 struct qla_tgt_sess_op *op; 1974 struct qla_tgt_cmd *cmd; 1975 uint32_t key; 1976 unsigned long flags; 1977 1978 key = sid_to_key(s_id); 1979 spin_lock_irqsave(&vha->cmd_list_lock, flags); 1980 list_for_each_entry(op, &vha->qla_sess_op_cmd_list, cmd_list) { 1981 uint32_t op_key; 1982 u64 op_lun; 1983 1984 op_key = sid_to_key(op->atio.u.isp24.fcp_hdr.s_id); 1985 op_lun = scsilun_to_int( 1986 (struct scsi_lun *)&op->atio.u.isp24.fcp_cmnd.lun); 1987 if (op_key == key && op_lun == lun) 1988 op->aborted = true; 1989 } 1990 1991 list_for_each_entry(op, &vha->unknown_atio_list, cmd_list) { 1992 uint32_t op_key; 1993 u64 op_lun; 1994 1995 op_key = sid_to_key(op->atio.u.isp24.fcp_hdr.s_id); 1996 op_lun = scsilun_to_int( 1997 (struct scsi_lun *)&op->atio.u.isp24.fcp_cmnd.lun); 1998 if (op_key == key && op_lun == lun) 1999 op->aborted = true; 2000 } 2001 2002 list_for_each_entry(cmd, &vha->qla_cmd_list, cmd_list) { 2003 uint32_t cmd_key; 2004 u64 cmd_lun; 2005 2006 cmd_key = sid_to_key(cmd->atio.u.isp24.fcp_hdr.s_id); 2007 cmd_lun = scsilun_to_int( 2008 (struct scsi_lun *)&cmd->atio.u.isp24.fcp_cmnd.lun); 2009 if (cmd_key == key && cmd_lun == lun) 2010 cmd->aborted = 1; 2011 } 2012 spin_unlock_irqrestore(&vha->cmd_list_lock, flags); 2013 } 2014 2015 static struct qla_qpair_hint *qlt_find_qphint(struct scsi_qla_host *vha, 2016 uint64_t unpacked_lun) 2017 { 2018 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; 2019 struct qla_qpair_hint *h = NULL; 2020 2021 if (vha->flags.qpairs_available) { 2022 h = btree_lookup64(&tgt->lun_qpair_map, unpacked_lun); 2023 if (!h) 2024 h = &tgt->qphints[0]; 2025 } else { 2026 h = &tgt->qphints[0]; 2027 } 2028 2029 return h; 2030 } 2031 2032 static void qlt_do_tmr_work(struct work_struct *work) 2033 { 2034 struct qla_tgt_mgmt_cmd *mcmd = 2035 container_of(work, struct qla_tgt_mgmt_cmd, work); 2036 struct qla_hw_data *ha = mcmd->vha->hw; 2037 int rc; 2038 uint32_t tag; 2039 unsigned long flags; 2040 2041 switch (mcmd->tmr_func) { 2042 case QLA_TGT_ABTS: 2043 tag = le32_to_cpu(mcmd->orig_iocb.abts.exchange_addr_to_abort); 2044 break; 2045 default: 2046 tag = 0; 2047 break; 2048 } 2049 2050 rc = ha->tgt.tgt_ops->handle_tmr(mcmd, mcmd->unpacked_lun, 2051 mcmd->tmr_func, tag); 2052 2053 if (rc != 0) { 2054 spin_lock_irqsave(mcmd->qpair->qp_lock_ptr, flags); 2055 switch (mcmd->tmr_func) { 2056 case QLA_TGT_ABTS: 2057 mcmd->fc_tm_rsp = FCP_TMF_REJECTED; 2058 qlt_build_abts_resp_iocb(mcmd); 2059 break; 2060 case QLA_TGT_LUN_RESET: 2061 case QLA_TGT_CLEAR_TS: 2062 case QLA_TGT_ABORT_TS: 2063 case QLA_TGT_CLEAR_ACA: 2064 case QLA_TGT_TARGET_RESET: 2065 qlt_send_busy(mcmd->qpair, &mcmd->orig_iocb.atio, 2066 qla_sam_status); 2067 break; 2068 2069 case QLA_TGT_ABORT_ALL: 2070 case QLA_TGT_NEXUS_LOSS_SESS: 2071 case QLA_TGT_NEXUS_LOSS: 2072 qlt_send_notify_ack(mcmd->qpair, 2073 &mcmd->orig_iocb.imm_ntfy, 0, 0, 0, 0, 0, 0); 2074 break; 2075 } 2076 spin_unlock_irqrestore(mcmd->qpair->qp_lock_ptr, flags); 2077 2078 ql_dbg(ql_dbg_tgt_mgt, mcmd->vha, 0xf052, 2079 "qla_target(%d): tgt_ops->handle_tmr() failed: %d\n", 2080 mcmd->vha->vp_idx, rc); 2081 mempool_free(mcmd, qla_tgt_mgmt_cmd_mempool); 2082 } 2083 } 2084 2085 /* ha->hardware_lock supposed to be held on entry */ 2086 static int __qlt_24xx_handle_abts(struct scsi_qla_host *vha, 2087 struct abts_recv_from_24xx *abts, struct fc_port *sess) 2088 { 2089 struct qla_hw_data *ha = vha->hw; 2090 struct qla_tgt_mgmt_cmd *mcmd; 2091 struct qla_qpair_hint *h = &vha->vha_tgt.qla_tgt->qphints[0]; 2092 struct qla_tgt_cmd *abort_cmd; 2093 2094 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00f, 2095 "qla_target(%d): task abort (tag=%d)\n", 2096 vha->vp_idx, abts->exchange_addr_to_abort); 2097 2098 mcmd = mempool_alloc(qla_tgt_mgmt_cmd_mempool, GFP_ATOMIC); 2099 if (mcmd == NULL) { 2100 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf051, 2101 "qla_target(%d): %s: Allocation of ABORT cmd failed", 2102 vha->vp_idx, __func__); 2103 return -ENOMEM; 2104 } 2105 memset(mcmd, 0, sizeof(*mcmd)); 2106 mcmd->cmd_type = TYPE_TGT_TMCMD; 2107 mcmd->sess = sess; 2108 memcpy(&mcmd->orig_iocb.abts, abts, sizeof(mcmd->orig_iocb.abts)); 2109 mcmd->reset_count = ha->base_qpair->chip_reset; 2110 mcmd->tmr_func = QLA_TGT_ABTS; 2111 mcmd->qpair = h->qpair; 2112 mcmd->vha = vha; 2113 2114 /* 2115 * LUN is looked up by target-core internally based on the passed 2116 * abts->exchange_addr_to_abort tag. 2117 */ 2118 mcmd->se_cmd.cpuid = h->cpuid; 2119 2120 abort_cmd = ha->tgt.tgt_ops->find_cmd_by_tag(sess, 2121 le32_to_cpu(abts->exchange_addr_to_abort)); 2122 if (!abort_cmd) 2123 return -EIO; 2124 mcmd->unpacked_lun = abort_cmd->se_cmd.orig_fe_lun; 2125 2126 if (abort_cmd->qpair) { 2127 mcmd->qpair = abort_cmd->qpair; 2128 mcmd->se_cmd.cpuid = abort_cmd->se_cmd.cpuid; 2129 mcmd->abort_io_attr = abort_cmd->atio.u.isp24.attr; 2130 mcmd->flags = QLA24XX_MGMT_ABORT_IO_ATTR_VALID; 2131 } 2132 2133 INIT_WORK(&mcmd->work, qlt_do_tmr_work); 2134 queue_work_on(mcmd->se_cmd.cpuid, qla_tgt_wq, &mcmd->work); 2135 2136 return 0; 2137 } 2138 2139 /* 2140 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire 2141 */ 2142 static void qlt_24xx_handle_abts(struct scsi_qla_host *vha, 2143 struct abts_recv_from_24xx *abts) 2144 { 2145 struct qla_hw_data *ha = vha->hw; 2146 struct fc_port *sess; 2147 uint32_t tag = le32_to_cpu(abts->exchange_addr_to_abort); 2148 be_id_t s_id; 2149 int rc; 2150 unsigned long flags; 2151 2152 if (le32_to_cpu(abts->fcp_hdr_le.parameter) & ABTS_PARAM_ABORT_SEQ) { 2153 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf053, 2154 "qla_target(%d): ABTS: Abort Sequence not " 2155 "supported\n", vha->vp_idx); 2156 qlt_24xx_send_abts_resp(ha->base_qpair, abts, FCP_TMF_REJECTED, 2157 false); 2158 return; 2159 } 2160 2161 if (tag == ATIO_EXCHANGE_ADDRESS_UNKNOWN) { 2162 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf010, 2163 "qla_target(%d): ABTS: Unknown Exchange " 2164 "Address received\n", vha->vp_idx); 2165 qlt_24xx_send_abts_resp(ha->base_qpair, abts, FCP_TMF_REJECTED, 2166 false); 2167 return; 2168 } 2169 2170 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf011, 2171 "qla_target(%d): task abort (s_id=%x:%x:%x, " 2172 "tag=%d, param=%x)\n", vha->vp_idx, abts->fcp_hdr_le.s_id.domain, 2173 abts->fcp_hdr_le.s_id.area, abts->fcp_hdr_le.s_id.al_pa, tag, 2174 le32_to_cpu(abts->fcp_hdr_le.parameter)); 2175 2176 s_id = le_id_to_be(abts->fcp_hdr_le.s_id); 2177 2178 spin_lock_irqsave(&ha->tgt.sess_lock, flags); 2179 sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, s_id); 2180 if (!sess) { 2181 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf012, 2182 "qla_target(%d): task abort for non-existent session\n", 2183 vha->vp_idx); 2184 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); 2185 2186 qlt_24xx_send_abts_resp(ha->base_qpair, abts, FCP_TMF_REJECTED, 2187 false); 2188 return; 2189 } 2190 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); 2191 2192 2193 if (sess->deleted) { 2194 qlt_24xx_send_abts_resp(ha->base_qpair, abts, FCP_TMF_REJECTED, 2195 false); 2196 return; 2197 } 2198 2199 rc = __qlt_24xx_handle_abts(vha, abts, sess); 2200 if (rc != 0) { 2201 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf054, 2202 "qla_target(%d): __qlt_24xx_handle_abts() failed: %d\n", 2203 vha->vp_idx, rc); 2204 qlt_24xx_send_abts_resp(ha->base_qpair, abts, FCP_TMF_REJECTED, 2205 false); 2206 return; 2207 } 2208 } 2209 2210 /* 2211 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire 2212 */ 2213 static void qlt_24xx_send_task_mgmt_ctio(struct qla_qpair *qpair, 2214 struct qla_tgt_mgmt_cmd *mcmd, uint32_t resp_code) 2215 { 2216 struct scsi_qla_host *ha = mcmd->vha; 2217 struct atio_from_isp *atio = &mcmd->orig_iocb.atio; 2218 struct ctio7_to_24xx *ctio; 2219 uint16_t temp; 2220 2221 ql_dbg(ql_dbg_tgt, ha, 0xe008, 2222 "Sending task mgmt CTIO7 (ha=%p, atio=%p, resp_code=%x\n", 2223 ha, atio, resp_code); 2224 2225 2226 ctio = (struct ctio7_to_24xx *)__qla2x00_alloc_iocbs(qpair, NULL); 2227 if (ctio == NULL) { 2228 ql_dbg(ql_dbg_tgt, ha, 0xe04c, 2229 "qla_target(%d): %s failed: unable to allocate " 2230 "request packet\n", ha->vp_idx, __func__); 2231 return; 2232 } 2233 2234 ctio->entry_type = CTIO_TYPE7; 2235 ctio->entry_count = 1; 2236 ctio->handle = QLA_TGT_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK; 2237 ctio->nport_handle = cpu_to_le16(mcmd->sess->loop_id); 2238 ctio->timeout = cpu_to_le16(QLA_TGT_TIMEOUT); 2239 ctio->vp_index = ha->vp_idx; 2240 ctio->initiator_id = be_id_to_le(atio->u.isp24.fcp_hdr.s_id); 2241 ctio->exchange_addr = atio->u.isp24.exchange_addr; 2242 temp = (atio->u.isp24.attr << 9)| 2243 CTIO7_FLAGS_STATUS_MODE_1 | CTIO7_FLAGS_SEND_STATUS; 2244 ctio->u.status1.flags = cpu_to_le16(temp); 2245 temp = be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id); 2246 ctio->u.status1.ox_id = cpu_to_le16(temp); 2247 ctio->u.status1.scsi_status = 2248 cpu_to_le16(SS_RESPONSE_INFO_LEN_VALID); 2249 ctio->u.status1.response_len = cpu_to_le16(8); 2250 ctio->u.status1.sense_data[0] = resp_code; 2251 2252 /* Memory Barrier */ 2253 wmb(); 2254 if (qpair->reqq_start_iocbs) 2255 qpair->reqq_start_iocbs(qpair); 2256 else 2257 qla2x00_start_iocbs(ha, qpair->req); 2258 } 2259 2260 void qlt_free_mcmd(struct qla_tgt_mgmt_cmd *mcmd) 2261 { 2262 mempool_free(mcmd, qla_tgt_mgmt_cmd_mempool); 2263 } 2264 EXPORT_SYMBOL(qlt_free_mcmd); 2265 2266 /* 2267 * ha->hardware_lock supposed to be held on entry. Might drop it, then 2268 * reacquire 2269 */ 2270 void qlt_send_resp_ctio(struct qla_qpair *qpair, struct qla_tgt_cmd *cmd, 2271 uint8_t scsi_status, uint8_t sense_key, uint8_t asc, uint8_t ascq) 2272 { 2273 struct atio_from_isp *atio = &cmd->atio; 2274 struct ctio7_to_24xx *ctio; 2275 uint16_t temp; 2276 struct scsi_qla_host *vha = cmd->vha; 2277 2278 ql_dbg(ql_dbg_tgt_dif, vha, 0x3066, 2279 "Sending response CTIO7 (vha=%p, atio=%p, scsi_status=%02x, " 2280 "sense_key=%02x, asc=%02x, ascq=%02x", 2281 vha, atio, scsi_status, sense_key, asc, ascq); 2282 2283 ctio = (struct ctio7_to_24xx *)qla2x00_alloc_iocbs(vha, NULL); 2284 if (!ctio) { 2285 ql_dbg(ql_dbg_async, vha, 0x3067, 2286 "qla2x00t(%ld): %s failed: unable to allocate request packet", 2287 vha->host_no, __func__); 2288 goto out; 2289 } 2290 2291 ctio->entry_type = CTIO_TYPE7; 2292 ctio->entry_count = 1; 2293 ctio->handle = QLA_TGT_SKIP_HANDLE; 2294 ctio->nport_handle = cpu_to_le16(cmd->sess->loop_id); 2295 ctio->timeout = cpu_to_le16(QLA_TGT_TIMEOUT); 2296 ctio->vp_index = vha->vp_idx; 2297 ctio->initiator_id = be_id_to_le(atio->u.isp24.fcp_hdr.s_id); 2298 ctio->exchange_addr = atio->u.isp24.exchange_addr; 2299 temp = (atio->u.isp24.attr << 9) | 2300 CTIO7_FLAGS_STATUS_MODE_1 | CTIO7_FLAGS_SEND_STATUS; 2301 ctio->u.status1.flags = cpu_to_le16(temp); 2302 temp = be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id); 2303 ctio->u.status1.ox_id = cpu_to_le16(temp); 2304 ctio->u.status1.scsi_status = 2305 cpu_to_le16(SS_RESPONSE_INFO_LEN_VALID | scsi_status); 2306 ctio->u.status1.response_len = cpu_to_le16(18); 2307 ctio->u.status1.residual = cpu_to_le32(get_datalen_for_atio(atio)); 2308 2309 if (ctio->u.status1.residual != 0) 2310 ctio->u.status1.scsi_status |= 2311 cpu_to_le16(SS_RESIDUAL_UNDER); 2312 2313 /* Fixed format sense data. */ 2314 ctio->u.status1.sense_data[0] = 0x70; 2315 ctio->u.status1.sense_data[2] = sense_key; 2316 /* Additional sense length */ 2317 ctio->u.status1.sense_data[7] = 0xa; 2318 /* ASC and ASCQ */ 2319 ctio->u.status1.sense_data[12] = asc; 2320 ctio->u.status1.sense_data[13] = ascq; 2321 2322 /* Memory Barrier */ 2323 wmb(); 2324 2325 if (qpair->reqq_start_iocbs) 2326 qpair->reqq_start_iocbs(qpair); 2327 else 2328 qla2x00_start_iocbs(vha, qpair->req); 2329 2330 out: 2331 return; 2332 } 2333 2334 /* callback from target fabric module code */ 2335 void qlt_xmit_tm_rsp(struct qla_tgt_mgmt_cmd *mcmd) 2336 { 2337 struct scsi_qla_host *vha = mcmd->sess->vha; 2338 struct qla_hw_data *ha = vha->hw; 2339 unsigned long flags; 2340 struct qla_qpair *qpair = mcmd->qpair; 2341 bool free_mcmd = true; 2342 2343 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf013, 2344 "TM response mcmd (%p) status %#x state %#x", 2345 mcmd, mcmd->fc_tm_rsp, mcmd->flags); 2346 2347 spin_lock_irqsave(qpair->qp_lock_ptr, flags); 2348 2349 if (!vha->flags.online || mcmd->reset_count != qpair->chip_reset) { 2350 /* 2351 * Either the port is not online or this request was from 2352 * previous life, just abort the processing. 2353 */ 2354 ql_dbg(ql_dbg_async, vha, 0xe100, 2355 "RESET-TMR online/active/old-count/new-count = %d/%d/%d/%d.\n", 2356 vha->flags.online, qla2x00_reset_active(vha), 2357 mcmd->reset_count, qpair->chip_reset); 2358 ha->tgt.tgt_ops->free_mcmd(mcmd); 2359 spin_unlock_irqrestore(qpair->qp_lock_ptr, flags); 2360 return; 2361 } 2362 2363 if (mcmd->flags == QLA24XX_MGMT_SEND_NACK) { 2364 switch (mcmd->orig_iocb.imm_ntfy.u.isp24.status_subcode) { 2365 case ELS_LOGO: 2366 case ELS_PRLO: 2367 case ELS_TPRLO: 2368 ql_dbg(ql_dbg_disc, vha, 0x2106, 2369 "TM response logo %8phC status %#x state %#x", 2370 mcmd->sess->port_name, mcmd->fc_tm_rsp, 2371 mcmd->flags); 2372 qlt_schedule_sess_for_deletion(mcmd->sess); 2373 break; 2374 default: 2375 qlt_send_notify_ack(vha->hw->base_qpair, 2376 &mcmd->orig_iocb.imm_ntfy, 0, 0, 0, 0, 0, 0); 2377 break; 2378 } 2379 } else { 2380 if (mcmd->orig_iocb.atio.u.raw.entry_type == ABTS_RECV_24XX) { 2381 qlt_build_abts_resp_iocb(mcmd); 2382 free_mcmd = false; 2383 } else 2384 qlt_24xx_send_task_mgmt_ctio(qpair, mcmd, 2385 mcmd->fc_tm_rsp); 2386 } 2387 /* 2388 * Make the callback for ->free_mcmd() to queue_work() and invoke 2389 * target_put_sess_cmd() to drop cmd_kref to 1. The final 2390 * target_put_sess_cmd() call will be made from TFO->check_stop_free() 2391 * -> tcm_qla2xxx_check_stop_free() to release the TMR associated se_cmd 2392 * descriptor after TFO->queue_tm_rsp() -> tcm_qla2xxx_queue_tm_rsp() -> 2393 * qlt_xmit_tm_rsp() returns here.. 2394 */ 2395 if (free_mcmd) 2396 ha->tgt.tgt_ops->free_mcmd(mcmd); 2397 2398 spin_unlock_irqrestore(qpair->qp_lock_ptr, flags); 2399 } 2400 EXPORT_SYMBOL(qlt_xmit_tm_rsp); 2401 2402 /* No locks */ 2403 static int qlt_pci_map_calc_cnt(struct qla_tgt_prm *prm) 2404 { 2405 struct qla_tgt_cmd *cmd = prm->cmd; 2406 2407 BUG_ON(cmd->sg_cnt == 0); 2408 2409 prm->sg = (struct scatterlist *)cmd->sg; 2410 prm->seg_cnt = dma_map_sg(&cmd->qpair->pdev->dev, cmd->sg, 2411 cmd->sg_cnt, cmd->dma_data_direction); 2412 if (unlikely(prm->seg_cnt == 0)) 2413 goto out_err; 2414 2415 prm->cmd->sg_mapped = 1; 2416 2417 if (cmd->se_cmd.prot_op == TARGET_PROT_NORMAL) { 2418 /* 2419 * If greater than four sg entries then we need to allocate 2420 * the continuation entries 2421 */ 2422 if (prm->seg_cnt > QLA_TGT_DATASEGS_PER_CMD_24XX) 2423 prm->req_cnt += DIV_ROUND_UP(prm->seg_cnt - 2424 QLA_TGT_DATASEGS_PER_CMD_24XX, 2425 QLA_TGT_DATASEGS_PER_CONT_24XX); 2426 } else { 2427 /* DIF */ 2428 if ((cmd->se_cmd.prot_op == TARGET_PROT_DIN_INSERT) || 2429 (cmd->se_cmd.prot_op == TARGET_PROT_DOUT_STRIP)) { 2430 prm->seg_cnt = DIV_ROUND_UP(cmd->bufflen, cmd->blk_sz); 2431 prm->tot_dsds = prm->seg_cnt; 2432 } else 2433 prm->tot_dsds = prm->seg_cnt; 2434 2435 if (cmd->prot_sg_cnt) { 2436 prm->prot_sg = cmd->prot_sg; 2437 prm->prot_seg_cnt = dma_map_sg(&cmd->qpair->pdev->dev, 2438 cmd->prot_sg, cmd->prot_sg_cnt, 2439 cmd->dma_data_direction); 2440 if (unlikely(prm->prot_seg_cnt == 0)) 2441 goto out_err; 2442 2443 if ((cmd->se_cmd.prot_op == TARGET_PROT_DIN_INSERT) || 2444 (cmd->se_cmd.prot_op == TARGET_PROT_DOUT_STRIP)) { 2445 /* Dif Bundling not support here */ 2446 prm->prot_seg_cnt = DIV_ROUND_UP(cmd->bufflen, 2447 cmd->blk_sz); 2448 prm->tot_dsds += prm->prot_seg_cnt; 2449 } else 2450 prm->tot_dsds += prm->prot_seg_cnt; 2451 } 2452 } 2453 2454 return 0; 2455 2456 out_err: 2457 ql_dbg_qp(ql_dbg_tgt, prm->cmd->qpair, 0xe04d, 2458 "qla_target(%d): PCI mapping failed: sg_cnt=%d", 2459 0, prm->cmd->sg_cnt); 2460 return -1; 2461 } 2462 2463 static void qlt_unmap_sg(struct scsi_qla_host *vha, struct qla_tgt_cmd *cmd) 2464 { 2465 struct qla_hw_data *ha; 2466 struct qla_qpair *qpair; 2467 2468 if (!cmd->sg_mapped) 2469 return; 2470 2471 qpair = cmd->qpair; 2472 2473 dma_unmap_sg(&qpair->pdev->dev, cmd->sg, cmd->sg_cnt, 2474 cmd->dma_data_direction); 2475 cmd->sg_mapped = 0; 2476 2477 if (cmd->prot_sg_cnt) 2478 dma_unmap_sg(&qpair->pdev->dev, cmd->prot_sg, cmd->prot_sg_cnt, 2479 cmd->dma_data_direction); 2480 2481 if (!cmd->ctx) 2482 return; 2483 ha = vha->hw; 2484 if (cmd->ctx_dsd_alloced) 2485 qla2x00_clean_dsd_pool(ha, cmd->ctx); 2486 2487 dma_pool_free(ha->dl_dma_pool, cmd->ctx, cmd->ctx->crc_ctx_dma); 2488 } 2489 2490 static int qlt_check_reserve_free_req(struct qla_qpair *qpair, 2491 uint32_t req_cnt) 2492 { 2493 uint32_t cnt; 2494 struct req_que *req = qpair->req; 2495 2496 if (req->cnt < (req_cnt + 2)) { 2497 cnt = (uint16_t)(qpair->use_shadow_reg ? *req->out_ptr : 2498 rd_reg_dword_relaxed(req->req_q_out)); 2499 2500 if (req->ring_index < cnt) 2501 req->cnt = cnt - req->ring_index; 2502 else 2503 req->cnt = req->length - (req->ring_index - cnt); 2504 2505 if (unlikely(req->cnt < (req_cnt + 2))) 2506 return -EAGAIN; 2507 } 2508 2509 req->cnt -= req_cnt; 2510 2511 return 0; 2512 } 2513 2514 /* 2515 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire 2516 */ 2517 static inline void *qlt_get_req_pkt(struct req_que *req) 2518 { 2519 /* Adjust ring index. */ 2520 req->ring_index++; 2521 if (req->ring_index == req->length) { 2522 req->ring_index = 0; 2523 req->ring_ptr = req->ring; 2524 } else { 2525 req->ring_ptr++; 2526 } 2527 return (cont_entry_t *)req->ring_ptr; 2528 } 2529 2530 /* ha->hardware_lock supposed to be held on entry */ 2531 static inline uint32_t qlt_make_handle(struct qla_qpair *qpair) 2532 { 2533 uint32_t h; 2534 int index; 2535 uint8_t found = 0; 2536 struct req_que *req = qpair->req; 2537 2538 h = req->current_outstanding_cmd; 2539 2540 for (index = 1; index < req->num_outstanding_cmds; index++) { 2541 h++; 2542 if (h == req->num_outstanding_cmds) 2543 h = 1; 2544 2545 if (h == QLA_TGT_SKIP_HANDLE) 2546 continue; 2547 2548 if (!req->outstanding_cmds[h]) { 2549 found = 1; 2550 break; 2551 } 2552 } 2553 2554 if (found) { 2555 req->current_outstanding_cmd = h; 2556 } else { 2557 ql_dbg(ql_dbg_io, qpair->vha, 0x305b, 2558 "qla_target(%d): Ran out of empty cmd slots\n", 2559 qpair->vha->vp_idx); 2560 h = QLA_TGT_NULL_HANDLE; 2561 } 2562 2563 return h; 2564 } 2565 2566 /* ha->hardware_lock supposed to be held on entry */ 2567 static int qlt_24xx_build_ctio_pkt(struct qla_qpair *qpair, 2568 struct qla_tgt_prm *prm) 2569 { 2570 uint32_t h; 2571 struct ctio7_to_24xx *pkt; 2572 struct atio_from_isp *atio = &prm->cmd->atio; 2573 uint16_t temp; 2574 2575 pkt = (struct ctio7_to_24xx *)qpair->req->ring_ptr; 2576 prm->pkt = pkt; 2577 memset(pkt, 0, sizeof(*pkt)); 2578 2579 pkt->entry_type = CTIO_TYPE7; 2580 pkt->entry_count = (uint8_t)prm->req_cnt; 2581 pkt->vp_index = prm->cmd->vp_idx; 2582 2583 h = qlt_make_handle(qpair); 2584 if (unlikely(h == QLA_TGT_NULL_HANDLE)) { 2585 /* 2586 * CTIO type 7 from the firmware doesn't provide a way to 2587 * know the initiator's LOOP ID, hence we can't find 2588 * the session and, so, the command. 2589 */ 2590 return -EAGAIN; 2591 } else 2592 qpair->req->outstanding_cmds[h] = (srb_t *)prm->cmd; 2593 2594 pkt->handle = make_handle(qpair->req->id, h); 2595 pkt->handle |= CTIO_COMPLETION_HANDLE_MARK; 2596 pkt->nport_handle = cpu_to_le16(prm->cmd->loop_id); 2597 pkt->timeout = cpu_to_le16(QLA_TGT_TIMEOUT); 2598 pkt->initiator_id = be_id_to_le(atio->u.isp24.fcp_hdr.s_id); 2599 pkt->exchange_addr = atio->u.isp24.exchange_addr; 2600 temp = atio->u.isp24.attr << 9; 2601 pkt->u.status0.flags |= cpu_to_le16(temp); 2602 temp = be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id); 2603 pkt->u.status0.ox_id = cpu_to_le16(temp); 2604 pkt->u.status0.relative_offset = cpu_to_le32(prm->cmd->offset); 2605 2606 return 0; 2607 } 2608 2609 /* 2610 * ha->hardware_lock supposed to be held on entry. We have already made sure 2611 * that there is sufficient amount of request entries to not drop it. 2612 */ 2613 static void qlt_load_cont_data_segments(struct qla_tgt_prm *prm) 2614 { 2615 int cnt; 2616 struct dsd64 *cur_dsd; 2617 2618 /* Build continuation packets */ 2619 while (prm->seg_cnt > 0) { 2620 cont_a64_entry_t *cont_pkt64 = 2621 (cont_a64_entry_t *)qlt_get_req_pkt( 2622 prm->cmd->qpair->req); 2623 2624 /* 2625 * Make sure that from cont_pkt64 none of 2626 * 64-bit specific fields used for 32-bit 2627 * addressing. Cast to (cont_entry_t *) for 2628 * that. 2629 */ 2630 2631 memset(cont_pkt64, 0, sizeof(*cont_pkt64)); 2632 2633 cont_pkt64->entry_count = 1; 2634 cont_pkt64->sys_define = 0; 2635 2636 cont_pkt64->entry_type = CONTINUE_A64_TYPE; 2637 cur_dsd = cont_pkt64->dsd; 2638 2639 /* Load continuation entry data segments */ 2640 for (cnt = 0; 2641 cnt < QLA_TGT_DATASEGS_PER_CONT_24XX && prm->seg_cnt; 2642 cnt++, prm->seg_cnt--) { 2643 append_dsd64(&cur_dsd, prm->sg); 2644 prm->sg = sg_next(prm->sg); 2645 } 2646 } 2647 } 2648 2649 /* 2650 * ha->hardware_lock supposed to be held on entry. We have already made sure 2651 * that there is sufficient amount of request entries to not drop it. 2652 */ 2653 static void qlt_load_data_segments(struct qla_tgt_prm *prm) 2654 { 2655 int cnt; 2656 struct dsd64 *cur_dsd; 2657 struct ctio7_to_24xx *pkt24 = (struct ctio7_to_24xx *)prm->pkt; 2658 2659 pkt24->u.status0.transfer_length = cpu_to_le32(prm->cmd->bufflen); 2660 2661 /* Setup packet address segment pointer */ 2662 cur_dsd = &pkt24->u.status0.dsd; 2663 2664 /* Set total data segment count */ 2665 if (prm->seg_cnt) 2666 pkt24->dseg_count = cpu_to_le16(prm->seg_cnt); 2667 2668 if (prm->seg_cnt == 0) { 2669 /* No data transfer */ 2670 cur_dsd->address = 0; 2671 cur_dsd->length = 0; 2672 return; 2673 } 2674 2675 /* If scatter gather */ 2676 2677 /* Load command entry data segments */ 2678 for (cnt = 0; 2679 (cnt < QLA_TGT_DATASEGS_PER_CMD_24XX) && prm->seg_cnt; 2680 cnt++, prm->seg_cnt--) { 2681 append_dsd64(&cur_dsd, prm->sg); 2682 prm->sg = sg_next(prm->sg); 2683 } 2684 2685 qlt_load_cont_data_segments(prm); 2686 } 2687 2688 static inline int qlt_has_data(struct qla_tgt_cmd *cmd) 2689 { 2690 return cmd->bufflen > 0; 2691 } 2692 2693 static void qlt_print_dif_err(struct qla_tgt_prm *prm) 2694 { 2695 struct qla_tgt_cmd *cmd; 2696 struct scsi_qla_host *vha; 2697 2698 /* asc 0x10=dif error */ 2699 if (prm->sense_buffer && (prm->sense_buffer[12] == 0x10)) { 2700 cmd = prm->cmd; 2701 vha = cmd->vha; 2702 /* ASCQ */ 2703 switch (prm->sense_buffer[13]) { 2704 case 1: 2705 ql_dbg(ql_dbg_tgt_dif, vha, 0xe00b, 2706 "BE detected Guard TAG ERR: lba[0x%llx|%lld] len[0x%x] " 2707 "se_cmd=%p tag[%x]", 2708 cmd->lba, cmd->lba, cmd->num_blks, &cmd->se_cmd, 2709 cmd->atio.u.isp24.exchange_addr); 2710 break; 2711 case 2: 2712 ql_dbg(ql_dbg_tgt_dif, vha, 0xe00c, 2713 "BE detected APP TAG ERR: lba[0x%llx|%lld] len[0x%x] " 2714 "se_cmd=%p tag[%x]", 2715 cmd->lba, cmd->lba, cmd->num_blks, &cmd->se_cmd, 2716 cmd->atio.u.isp24.exchange_addr); 2717 break; 2718 case 3: 2719 ql_dbg(ql_dbg_tgt_dif, vha, 0xe00f, 2720 "BE detected REF TAG ERR: lba[0x%llx|%lld] len[0x%x] " 2721 "se_cmd=%p tag[%x]", 2722 cmd->lba, cmd->lba, cmd->num_blks, &cmd->se_cmd, 2723 cmd->atio.u.isp24.exchange_addr); 2724 break; 2725 default: 2726 ql_dbg(ql_dbg_tgt_dif, vha, 0xe010, 2727 "BE detected Dif ERR: lba[%llx|%lld] len[%x] " 2728 "se_cmd=%p tag[%x]", 2729 cmd->lba, cmd->lba, cmd->num_blks, &cmd->se_cmd, 2730 cmd->atio.u.isp24.exchange_addr); 2731 break; 2732 } 2733 ql_dump_buffer(ql_dbg_tgt_dif, vha, 0xe011, cmd->cdb, 16); 2734 } 2735 } 2736 2737 /* 2738 * Called without ha->hardware_lock held 2739 */ 2740 static int qlt_pre_xmit_response(struct qla_tgt_cmd *cmd, 2741 struct qla_tgt_prm *prm, int xmit_type, uint8_t scsi_status, 2742 uint32_t *full_req_cnt) 2743 { 2744 struct se_cmd *se_cmd = &cmd->se_cmd; 2745 struct qla_qpair *qpair = cmd->qpair; 2746 2747 prm->cmd = cmd; 2748 prm->tgt = cmd->tgt; 2749 prm->pkt = NULL; 2750 prm->rq_result = scsi_status; 2751 prm->sense_buffer = &cmd->sense_buffer[0]; 2752 prm->sense_buffer_len = TRANSPORT_SENSE_BUFFER; 2753 prm->sg = NULL; 2754 prm->seg_cnt = -1; 2755 prm->req_cnt = 1; 2756 prm->residual = 0; 2757 prm->add_status_pkt = 0; 2758 prm->prot_sg = NULL; 2759 prm->prot_seg_cnt = 0; 2760 prm->tot_dsds = 0; 2761 2762 if ((xmit_type & QLA_TGT_XMIT_DATA) && qlt_has_data(cmd)) { 2763 if (qlt_pci_map_calc_cnt(prm) != 0) 2764 return -EAGAIN; 2765 } 2766 2767 *full_req_cnt = prm->req_cnt; 2768 2769 if (se_cmd->se_cmd_flags & SCF_UNDERFLOW_BIT) { 2770 prm->residual = se_cmd->residual_count; 2771 ql_dbg_qp(ql_dbg_io + ql_dbg_verbose, qpair, 0x305c, 2772 "Residual underflow: %d (tag %lld, op %x, bufflen %d, rq_result %x)\n", 2773 prm->residual, se_cmd->tag, 2774 se_cmd->t_task_cdb ? se_cmd->t_task_cdb[0] : 0, 2775 cmd->bufflen, prm->rq_result); 2776 prm->rq_result |= SS_RESIDUAL_UNDER; 2777 } else if (se_cmd->se_cmd_flags & SCF_OVERFLOW_BIT) { 2778 prm->residual = se_cmd->residual_count; 2779 ql_dbg_qp(ql_dbg_io, qpair, 0x305d, 2780 "Residual overflow: %d (tag %lld, op %x, bufflen %d, rq_result %x)\n", 2781 prm->residual, se_cmd->tag, se_cmd->t_task_cdb ? 2782 se_cmd->t_task_cdb[0] : 0, cmd->bufflen, prm->rq_result); 2783 prm->rq_result |= SS_RESIDUAL_OVER; 2784 } 2785 2786 if (xmit_type & QLA_TGT_XMIT_STATUS) { 2787 /* 2788 * If QLA_TGT_XMIT_DATA is not set, add_status_pkt will be 2789 * ignored in *xmit_response() below 2790 */ 2791 if (qlt_has_data(cmd)) { 2792 if (QLA_TGT_SENSE_VALID(prm->sense_buffer) || 2793 (IS_FWI2_CAPABLE(cmd->vha->hw) && 2794 (prm->rq_result != 0))) { 2795 prm->add_status_pkt = 1; 2796 (*full_req_cnt)++; 2797 } 2798 } 2799 } 2800 2801 return 0; 2802 } 2803 2804 static inline int qlt_need_explicit_conf(struct qla_tgt_cmd *cmd, 2805 int sending_sense) 2806 { 2807 if (cmd->qpair->enable_class_2) 2808 return 0; 2809 2810 if (sending_sense) 2811 return cmd->conf_compl_supported; 2812 else 2813 return cmd->qpair->enable_explicit_conf && 2814 cmd->conf_compl_supported; 2815 } 2816 2817 static void qlt_24xx_init_ctio_to_isp(struct ctio7_to_24xx *ctio, 2818 struct qla_tgt_prm *prm) 2819 { 2820 prm->sense_buffer_len = min_t(uint32_t, prm->sense_buffer_len, 2821 (uint32_t)sizeof(ctio->u.status1.sense_data)); 2822 ctio->u.status0.flags |= cpu_to_le16(CTIO7_FLAGS_SEND_STATUS); 2823 if (qlt_need_explicit_conf(prm->cmd, 0)) { 2824 ctio->u.status0.flags |= cpu_to_le16( 2825 CTIO7_FLAGS_EXPLICIT_CONFORM | 2826 CTIO7_FLAGS_CONFORM_REQ); 2827 } 2828 ctio->u.status0.residual = cpu_to_le32(prm->residual); 2829 ctio->u.status0.scsi_status = cpu_to_le16(prm->rq_result); 2830 if (QLA_TGT_SENSE_VALID(prm->sense_buffer)) { 2831 int i; 2832 2833 if (qlt_need_explicit_conf(prm->cmd, 1)) { 2834 if ((prm->rq_result & SS_SCSI_STATUS_BYTE) != 0) { 2835 ql_dbg_qp(ql_dbg_tgt, prm->cmd->qpair, 0xe017, 2836 "Skipping EXPLICIT_CONFORM and " 2837 "CTIO7_FLAGS_CONFORM_REQ for FCP READ w/ " 2838 "non GOOD status\n"); 2839 goto skip_explict_conf; 2840 } 2841 ctio->u.status1.flags |= cpu_to_le16( 2842 CTIO7_FLAGS_EXPLICIT_CONFORM | 2843 CTIO7_FLAGS_CONFORM_REQ); 2844 } 2845 skip_explict_conf: 2846 ctio->u.status1.flags &= 2847 ~cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_0); 2848 ctio->u.status1.flags |= 2849 cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1); 2850 ctio->u.status1.scsi_status |= 2851 cpu_to_le16(SS_SENSE_LEN_VALID); 2852 ctio->u.status1.sense_length = 2853 cpu_to_le16(prm->sense_buffer_len); 2854 for (i = 0; i < prm->sense_buffer_len/4; i++) { 2855 uint32_t v; 2856 2857 v = get_unaligned_be32( 2858 &((uint32_t *)prm->sense_buffer)[i]); 2859 put_unaligned_le32(v, 2860 &((uint32_t *)ctio->u.status1.sense_data)[i]); 2861 } 2862 qlt_print_dif_err(prm); 2863 2864 } else { 2865 ctio->u.status1.flags &= 2866 ~cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_0); 2867 ctio->u.status1.flags |= 2868 cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1); 2869 ctio->u.status1.sense_length = 0; 2870 memset(ctio->u.status1.sense_data, 0, 2871 sizeof(ctio->u.status1.sense_data)); 2872 } 2873 2874 /* Sense with len > 24, is it possible ??? */ 2875 } 2876 2877 static inline int 2878 qlt_hba_err_chk_enabled(struct se_cmd *se_cmd) 2879 { 2880 switch (se_cmd->prot_op) { 2881 case TARGET_PROT_DOUT_INSERT: 2882 case TARGET_PROT_DIN_STRIP: 2883 if (ql2xenablehba_err_chk >= 1) 2884 return 1; 2885 break; 2886 case TARGET_PROT_DOUT_PASS: 2887 case TARGET_PROT_DIN_PASS: 2888 if (ql2xenablehba_err_chk >= 2) 2889 return 1; 2890 break; 2891 case TARGET_PROT_DIN_INSERT: 2892 case TARGET_PROT_DOUT_STRIP: 2893 return 1; 2894 default: 2895 break; 2896 } 2897 return 0; 2898 } 2899 2900 static inline int 2901 qla_tgt_ref_mask_check(struct se_cmd *se_cmd) 2902 { 2903 switch (se_cmd->prot_op) { 2904 case TARGET_PROT_DIN_INSERT: 2905 case TARGET_PROT_DOUT_INSERT: 2906 case TARGET_PROT_DIN_STRIP: 2907 case TARGET_PROT_DOUT_STRIP: 2908 case TARGET_PROT_DIN_PASS: 2909 case TARGET_PROT_DOUT_PASS: 2910 return 1; 2911 default: 2912 return 0; 2913 } 2914 return 0; 2915 } 2916 2917 /* 2918 * qla_tgt_set_dif_tags - Extract Ref and App tags from SCSI command 2919 */ 2920 static void 2921 qla_tgt_set_dif_tags(struct qla_tgt_cmd *cmd, struct crc_context *ctx, 2922 uint16_t *pfw_prot_opts) 2923 { 2924 struct se_cmd *se_cmd = &cmd->se_cmd; 2925 uint32_t lba = 0xffffffff & se_cmd->t_task_lba; 2926 scsi_qla_host_t *vha = cmd->tgt->vha; 2927 struct qla_hw_data *ha = vha->hw; 2928 uint32_t t32 = 0; 2929 2930 /* 2931 * wait till Mode Sense/Select cmd, modepage Ah, subpage 2 2932 * have been immplemented by TCM, before AppTag is avail. 2933 * Look for modesense_handlers[] 2934 */ 2935 ctx->app_tag = 0; 2936 ctx->app_tag_mask[0] = 0x0; 2937 ctx->app_tag_mask[1] = 0x0; 2938 2939 if (IS_PI_UNINIT_CAPABLE(ha)) { 2940 if ((se_cmd->prot_type == TARGET_DIF_TYPE1_PROT) || 2941 (se_cmd->prot_type == TARGET_DIF_TYPE2_PROT)) 2942 *pfw_prot_opts |= PO_DIS_VALD_APP_ESC; 2943 else if (se_cmd->prot_type == TARGET_DIF_TYPE3_PROT) 2944 *pfw_prot_opts |= PO_DIS_VALD_APP_REF_ESC; 2945 } 2946 2947 t32 = ha->tgt.tgt_ops->get_dif_tags(cmd, pfw_prot_opts); 2948 2949 switch (se_cmd->prot_type) { 2950 case TARGET_DIF_TYPE0_PROT: 2951 /* 2952 * No check for ql2xenablehba_err_chk, as it 2953 * would be an I/O error if hba tag generation 2954 * is not done. 2955 */ 2956 ctx->ref_tag = cpu_to_le32(lba); 2957 /* enable ALL bytes of the ref tag */ 2958 ctx->ref_tag_mask[0] = 0xff; 2959 ctx->ref_tag_mask[1] = 0xff; 2960 ctx->ref_tag_mask[2] = 0xff; 2961 ctx->ref_tag_mask[3] = 0xff; 2962 break; 2963 case TARGET_DIF_TYPE1_PROT: 2964 /* 2965 * For TYPE 1 protection: 16 bit GUARD tag, 32 bit 2966 * REF tag, and 16 bit app tag. 2967 */ 2968 ctx->ref_tag = cpu_to_le32(lba); 2969 if (!qla_tgt_ref_mask_check(se_cmd) || 2970 !(ha->tgt.tgt_ops->chk_dif_tags(t32))) { 2971 *pfw_prot_opts |= PO_DIS_REF_TAG_VALD; 2972 break; 2973 } 2974 /* enable ALL bytes of the ref tag */ 2975 ctx->ref_tag_mask[0] = 0xff; 2976 ctx->ref_tag_mask[1] = 0xff; 2977 ctx->ref_tag_mask[2] = 0xff; 2978 ctx->ref_tag_mask[3] = 0xff; 2979 break; 2980 case TARGET_DIF_TYPE2_PROT: 2981 /* 2982 * For TYPE 2 protection: 16 bit GUARD + 32 bit REF 2983 * tag has to match LBA in CDB + N 2984 */ 2985 ctx->ref_tag = cpu_to_le32(lba); 2986 if (!qla_tgt_ref_mask_check(se_cmd) || 2987 !(ha->tgt.tgt_ops->chk_dif_tags(t32))) { 2988 *pfw_prot_opts |= PO_DIS_REF_TAG_VALD; 2989 break; 2990 } 2991 /* enable ALL bytes of the ref tag */ 2992 ctx->ref_tag_mask[0] = 0xff; 2993 ctx->ref_tag_mask[1] = 0xff; 2994 ctx->ref_tag_mask[2] = 0xff; 2995 ctx->ref_tag_mask[3] = 0xff; 2996 break; 2997 case TARGET_DIF_TYPE3_PROT: 2998 /* For TYPE 3 protection: 16 bit GUARD only */ 2999 *pfw_prot_opts |= PO_DIS_REF_TAG_VALD; 3000 ctx->ref_tag_mask[0] = ctx->ref_tag_mask[1] = 3001 ctx->ref_tag_mask[2] = ctx->ref_tag_mask[3] = 0x00; 3002 break; 3003 } 3004 } 3005 3006 static inline int 3007 qlt_build_ctio_crc2_pkt(struct qla_qpair *qpair, struct qla_tgt_prm *prm) 3008 { 3009 struct dsd64 *cur_dsd; 3010 uint32_t transfer_length = 0; 3011 uint32_t data_bytes; 3012 uint32_t dif_bytes; 3013 uint8_t bundling = 1; 3014 struct crc_context *crc_ctx_pkt = NULL; 3015 struct qla_hw_data *ha; 3016 struct ctio_crc2_to_fw *pkt; 3017 dma_addr_t crc_ctx_dma; 3018 uint16_t fw_prot_opts = 0; 3019 struct qla_tgt_cmd *cmd = prm->cmd; 3020 struct se_cmd *se_cmd = &cmd->se_cmd; 3021 uint32_t h; 3022 struct atio_from_isp *atio = &prm->cmd->atio; 3023 struct qla_tc_param tc; 3024 uint16_t t16; 3025 scsi_qla_host_t *vha = cmd->vha; 3026 3027 ha = vha->hw; 3028 3029 pkt = (struct ctio_crc2_to_fw *)qpair->req->ring_ptr; 3030 prm->pkt = pkt; 3031 memset(pkt, 0, sizeof(*pkt)); 3032 3033 ql_dbg_qp(ql_dbg_tgt, cmd->qpair, 0xe071, 3034 "qla_target(%d):%s: se_cmd[%p] CRC2 prot_op[0x%x] cmd prot sg:cnt[%p:%x] lba[%llu]\n", 3035 cmd->vp_idx, __func__, se_cmd, se_cmd->prot_op, 3036 prm->prot_sg, prm->prot_seg_cnt, se_cmd->t_task_lba); 3037 3038 if ((se_cmd->prot_op == TARGET_PROT_DIN_INSERT) || 3039 (se_cmd->prot_op == TARGET_PROT_DOUT_STRIP)) 3040 bundling = 0; 3041 3042 /* Compute dif len and adjust data len to incude protection */ 3043 data_bytes = cmd->bufflen; 3044 dif_bytes = (data_bytes / cmd->blk_sz) * 8; 3045 3046 switch (se_cmd->prot_op) { 3047 case TARGET_PROT_DIN_INSERT: 3048 case TARGET_PROT_DOUT_STRIP: 3049 transfer_length = data_bytes; 3050 if (cmd->prot_sg_cnt) 3051 data_bytes += dif_bytes; 3052 break; 3053 case TARGET_PROT_DIN_STRIP: 3054 case TARGET_PROT_DOUT_INSERT: 3055 case TARGET_PROT_DIN_PASS: 3056 case TARGET_PROT_DOUT_PASS: 3057 transfer_length = data_bytes + dif_bytes; 3058 break; 3059 default: 3060 BUG(); 3061 break; 3062 } 3063 3064 if (!qlt_hba_err_chk_enabled(se_cmd)) 3065 fw_prot_opts |= 0x10; /* Disable Guard tag checking */ 3066 /* HBA error checking enabled */ 3067 else if (IS_PI_UNINIT_CAPABLE(ha)) { 3068 if ((se_cmd->prot_type == TARGET_DIF_TYPE1_PROT) || 3069 (se_cmd->prot_type == TARGET_DIF_TYPE2_PROT)) 3070 fw_prot_opts |= PO_DIS_VALD_APP_ESC; 3071 else if (se_cmd->prot_type == TARGET_DIF_TYPE3_PROT) 3072 fw_prot_opts |= PO_DIS_VALD_APP_REF_ESC; 3073 } 3074 3075 switch (se_cmd->prot_op) { 3076 case TARGET_PROT_DIN_INSERT: 3077 case TARGET_PROT_DOUT_INSERT: 3078 fw_prot_opts |= PO_MODE_DIF_INSERT; 3079 break; 3080 case TARGET_PROT_DIN_STRIP: 3081 case TARGET_PROT_DOUT_STRIP: 3082 fw_prot_opts |= PO_MODE_DIF_REMOVE; 3083 break; 3084 case TARGET_PROT_DIN_PASS: 3085 case TARGET_PROT_DOUT_PASS: 3086 fw_prot_opts |= PO_MODE_DIF_PASS; 3087 /* FUTURE: does tcm require T10CRC<->IPCKSUM conversion? */ 3088 break; 3089 default:/* Normal Request */ 3090 fw_prot_opts |= PO_MODE_DIF_PASS; 3091 break; 3092 } 3093 3094 /* ---- PKT ---- */ 3095 /* Update entry type to indicate Command Type CRC_2 IOCB */ 3096 pkt->entry_type = CTIO_CRC2; 3097 pkt->entry_count = 1; 3098 pkt->vp_index = cmd->vp_idx; 3099 3100 h = qlt_make_handle(qpair); 3101 if (unlikely(h == QLA_TGT_NULL_HANDLE)) { 3102 /* 3103 * CTIO type 7 from the firmware doesn't provide a way to 3104 * know the initiator's LOOP ID, hence we can't find 3105 * the session and, so, the command. 3106 */ 3107 return -EAGAIN; 3108 } else 3109 qpair->req->outstanding_cmds[h] = (srb_t *)prm->cmd; 3110 3111 pkt->handle = make_handle(qpair->req->id, h); 3112 pkt->handle |= CTIO_COMPLETION_HANDLE_MARK; 3113 pkt->nport_handle = cpu_to_le16(prm->cmd->loop_id); 3114 pkt->timeout = cpu_to_le16(QLA_TGT_TIMEOUT); 3115 pkt->initiator_id = be_id_to_le(atio->u.isp24.fcp_hdr.s_id); 3116 pkt->exchange_addr = atio->u.isp24.exchange_addr; 3117 3118 /* silence compile warning */ 3119 t16 = be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id); 3120 pkt->ox_id = cpu_to_le16(t16); 3121 3122 t16 = (atio->u.isp24.attr << 9); 3123 pkt->flags |= cpu_to_le16(t16); 3124 pkt->relative_offset = cpu_to_le32(prm->cmd->offset); 3125 3126 /* Set transfer direction */ 3127 if (cmd->dma_data_direction == DMA_TO_DEVICE) 3128 pkt->flags = cpu_to_le16(CTIO7_FLAGS_DATA_IN); 3129 else if (cmd->dma_data_direction == DMA_FROM_DEVICE) 3130 pkt->flags = cpu_to_le16(CTIO7_FLAGS_DATA_OUT); 3131 3132 pkt->dseg_count = cpu_to_le16(prm->tot_dsds); 3133 /* Fibre channel byte count */ 3134 pkt->transfer_length = cpu_to_le32(transfer_length); 3135 3136 /* ----- CRC context -------- */ 3137 3138 /* Allocate CRC context from global pool */ 3139 crc_ctx_pkt = cmd->ctx = 3140 dma_pool_zalloc(ha->dl_dma_pool, GFP_ATOMIC, &crc_ctx_dma); 3141 3142 if (!crc_ctx_pkt) 3143 goto crc_queuing_error; 3144 3145 crc_ctx_pkt->crc_ctx_dma = crc_ctx_dma; 3146 INIT_LIST_HEAD(&crc_ctx_pkt->dsd_list); 3147 3148 /* Set handle */ 3149 crc_ctx_pkt->handle = pkt->handle; 3150 3151 qla_tgt_set_dif_tags(cmd, crc_ctx_pkt, &fw_prot_opts); 3152 3153 put_unaligned_le64(crc_ctx_dma, &pkt->crc_context_address); 3154 pkt->crc_context_len = cpu_to_le16(CRC_CONTEXT_LEN_FW); 3155 3156 if (!bundling) { 3157 cur_dsd = &crc_ctx_pkt->u.nobundling.data_dsd[0]; 3158 } else { 3159 /* 3160 * Configure Bundling if we need to fetch interlaving 3161 * protection PCI accesses 3162 */ 3163 fw_prot_opts |= PO_ENABLE_DIF_BUNDLING; 3164 crc_ctx_pkt->u.bundling.dif_byte_count = cpu_to_le32(dif_bytes); 3165 crc_ctx_pkt->u.bundling.dseg_count = 3166 cpu_to_le16(prm->tot_dsds - prm->prot_seg_cnt); 3167 cur_dsd = &crc_ctx_pkt->u.bundling.data_dsd[0]; 3168 } 3169 3170 /* Finish the common fields of CRC pkt */ 3171 crc_ctx_pkt->blk_size = cpu_to_le16(cmd->blk_sz); 3172 crc_ctx_pkt->prot_opts = cpu_to_le16(fw_prot_opts); 3173 crc_ctx_pkt->byte_count = cpu_to_le32(data_bytes); 3174 crc_ctx_pkt->guard_seed = cpu_to_le16(0); 3175 3176 memset((uint8_t *)&tc, 0 , sizeof(tc)); 3177 tc.vha = vha; 3178 tc.blk_sz = cmd->blk_sz; 3179 tc.bufflen = cmd->bufflen; 3180 tc.sg = cmd->sg; 3181 tc.prot_sg = cmd->prot_sg; 3182 tc.ctx = crc_ctx_pkt; 3183 tc.ctx_dsd_alloced = &cmd->ctx_dsd_alloced; 3184 3185 /* Walks data segments */ 3186 pkt->flags |= cpu_to_le16(CTIO7_FLAGS_DSD_PTR); 3187 3188 if (!bundling && prm->prot_seg_cnt) { 3189 if (qla24xx_walk_and_build_sglist_no_difb(ha, NULL, cur_dsd, 3190 prm->tot_dsds, &tc)) 3191 goto crc_queuing_error; 3192 } else if (qla24xx_walk_and_build_sglist(ha, NULL, cur_dsd, 3193 (prm->tot_dsds - prm->prot_seg_cnt), &tc)) 3194 goto crc_queuing_error; 3195 3196 if (bundling && prm->prot_seg_cnt) { 3197 /* Walks dif segments */ 3198 pkt->add_flags |= CTIO_CRC2_AF_DIF_DSD_ENA; 3199 3200 cur_dsd = &crc_ctx_pkt->u.bundling.dif_dsd; 3201 if (qla24xx_walk_and_build_prot_sglist(ha, NULL, cur_dsd, 3202 prm->prot_seg_cnt, cmd)) 3203 goto crc_queuing_error; 3204 } 3205 return QLA_SUCCESS; 3206 3207 crc_queuing_error: 3208 /* Cleanup will be performed by the caller */ 3209 qpair->req->outstanding_cmds[h] = NULL; 3210 3211 return QLA_FUNCTION_FAILED; 3212 } 3213 3214 /* 3215 * Callback to setup response of xmit_type of QLA_TGT_XMIT_DATA and * 3216 * QLA_TGT_XMIT_STATUS for >= 24xx silicon 3217 */ 3218 int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type, 3219 uint8_t scsi_status) 3220 { 3221 struct scsi_qla_host *vha = cmd->vha; 3222 struct qla_qpair *qpair = cmd->qpair; 3223 struct ctio7_to_24xx *pkt; 3224 struct qla_tgt_prm prm; 3225 uint32_t full_req_cnt = 0; 3226 unsigned long flags = 0; 3227 int res; 3228 3229 if (!qpair->fw_started || (cmd->reset_count != qpair->chip_reset) || 3230 (cmd->sess && cmd->sess->deleted)) { 3231 cmd->state = QLA_TGT_STATE_PROCESSED; 3232 return 0; 3233 } 3234 3235 ql_dbg_qp(ql_dbg_tgt, qpair, 0xe018, 3236 "is_send_status=%d, cmd->bufflen=%d, cmd->sg_cnt=%d, cmd->dma_data_direction=%d se_cmd[%p] qp %d\n", 3237 (xmit_type & QLA_TGT_XMIT_STATUS) ? 3238 1 : 0, cmd->bufflen, cmd->sg_cnt, cmd->dma_data_direction, 3239 &cmd->se_cmd, qpair->id); 3240 3241 res = qlt_pre_xmit_response(cmd, &prm, xmit_type, scsi_status, 3242 &full_req_cnt); 3243 if (unlikely(res != 0)) { 3244 return res; 3245 } 3246 3247 spin_lock_irqsave(qpair->qp_lock_ptr, flags); 3248 3249 if (xmit_type == QLA_TGT_XMIT_STATUS) 3250 qpair->tgt_counters.core_qla_snd_status++; 3251 else 3252 qpair->tgt_counters.core_qla_que_buf++; 3253 3254 if (!qpair->fw_started || cmd->reset_count != qpair->chip_reset) { 3255 /* 3256 * Either the port is not online or this request was from 3257 * previous life, just abort the processing. 3258 */ 3259 cmd->state = QLA_TGT_STATE_PROCESSED; 3260 ql_dbg_qp(ql_dbg_async, qpair, 0xe101, 3261 "RESET-RSP online/active/old-count/new-count = %d/%d/%d/%d.\n", 3262 vha->flags.online, qla2x00_reset_active(vha), 3263 cmd->reset_count, qpair->chip_reset); 3264 spin_unlock_irqrestore(qpair->qp_lock_ptr, flags); 3265 return 0; 3266 } 3267 3268 /* Does F/W have an IOCBs for this request */ 3269 res = qlt_check_reserve_free_req(qpair, full_req_cnt); 3270 if (unlikely(res)) 3271 goto out_unmap_unlock; 3272 3273 if (cmd->se_cmd.prot_op && (xmit_type & QLA_TGT_XMIT_DATA)) 3274 res = qlt_build_ctio_crc2_pkt(qpair, &prm); 3275 else 3276 res = qlt_24xx_build_ctio_pkt(qpair, &prm); 3277 if (unlikely(res != 0)) { 3278 qpair->req->cnt += full_req_cnt; 3279 goto out_unmap_unlock; 3280 } 3281 3282 pkt = (struct ctio7_to_24xx *)prm.pkt; 3283 3284 if (qlt_has_data(cmd) && (xmit_type & QLA_TGT_XMIT_DATA)) { 3285 pkt->u.status0.flags |= 3286 cpu_to_le16(CTIO7_FLAGS_DATA_IN | 3287 CTIO7_FLAGS_STATUS_MODE_0); 3288 3289 if (cmd->se_cmd.prot_op == TARGET_PROT_NORMAL) 3290 qlt_load_data_segments(&prm); 3291 3292 if (prm.add_status_pkt == 0) { 3293 if (xmit_type & QLA_TGT_XMIT_STATUS) { 3294 pkt->u.status0.scsi_status = 3295 cpu_to_le16(prm.rq_result); 3296 pkt->u.status0.residual = 3297 cpu_to_le32(prm.residual); 3298 pkt->u.status0.flags |= cpu_to_le16( 3299 CTIO7_FLAGS_SEND_STATUS); 3300 if (qlt_need_explicit_conf(cmd, 0)) { 3301 pkt->u.status0.flags |= 3302 cpu_to_le16( 3303 CTIO7_FLAGS_EXPLICIT_CONFORM | 3304 CTIO7_FLAGS_CONFORM_REQ); 3305 } 3306 } 3307 3308 } else { 3309 /* 3310 * We have already made sure that there is sufficient 3311 * amount of request entries to not drop HW lock in 3312 * req_pkt(). 3313 */ 3314 struct ctio7_to_24xx *ctio = 3315 (struct ctio7_to_24xx *)qlt_get_req_pkt( 3316 qpair->req); 3317 3318 ql_dbg_qp(ql_dbg_tgt, qpair, 0x305e, 3319 "Building additional status packet 0x%p.\n", 3320 ctio); 3321 3322 /* 3323 * T10Dif: ctio_crc2_to_fw overlay ontop of 3324 * ctio7_to_24xx 3325 */ 3326 memcpy(ctio, pkt, sizeof(*ctio)); 3327 /* reset back to CTIO7 */ 3328 ctio->entry_count = 1; 3329 ctio->entry_type = CTIO_TYPE7; 3330 ctio->dseg_count = 0; 3331 ctio->u.status1.flags &= ~cpu_to_le16( 3332 CTIO7_FLAGS_DATA_IN); 3333 3334 /* Real finish is ctio_m1's finish */ 3335 pkt->handle |= CTIO_INTERMEDIATE_HANDLE_MARK; 3336 pkt->u.status0.flags |= cpu_to_le16( 3337 CTIO7_FLAGS_DONT_RET_CTIO); 3338 3339 /* qlt_24xx_init_ctio_to_isp will correct 3340 * all neccessary fields that's part of CTIO7. 3341 * There should be no residual of CTIO-CRC2 data. 3342 */ 3343 qlt_24xx_init_ctio_to_isp((struct ctio7_to_24xx *)ctio, 3344 &prm); 3345 } 3346 } else 3347 qlt_24xx_init_ctio_to_isp(pkt, &prm); 3348 3349 3350 cmd->state = QLA_TGT_STATE_PROCESSED; /* Mid-level is done processing */ 3351 cmd->cmd_sent_to_fw = 1; 3352 cmd->ctio_flags = le16_to_cpu(pkt->u.status0.flags); 3353 3354 /* Memory Barrier */ 3355 wmb(); 3356 if (qpair->reqq_start_iocbs) 3357 qpair->reqq_start_iocbs(qpair); 3358 else 3359 qla2x00_start_iocbs(vha, qpair->req); 3360 spin_unlock_irqrestore(qpair->qp_lock_ptr, flags); 3361 3362 return 0; 3363 3364 out_unmap_unlock: 3365 qlt_unmap_sg(vha, cmd); 3366 spin_unlock_irqrestore(qpair->qp_lock_ptr, flags); 3367 3368 return res; 3369 } 3370 EXPORT_SYMBOL(qlt_xmit_response); 3371 3372 int qlt_rdy_to_xfer(struct qla_tgt_cmd *cmd) 3373 { 3374 struct ctio7_to_24xx *pkt; 3375 struct scsi_qla_host *vha = cmd->vha; 3376 struct qla_tgt *tgt = cmd->tgt; 3377 struct qla_tgt_prm prm; 3378 unsigned long flags = 0; 3379 int res = 0; 3380 struct qla_qpair *qpair = cmd->qpair; 3381 3382 memset(&prm, 0, sizeof(prm)); 3383 prm.cmd = cmd; 3384 prm.tgt = tgt; 3385 prm.sg = NULL; 3386 prm.req_cnt = 1; 3387 3388 /* Calculate number of entries and segments required */ 3389 if (qlt_pci_map_calc_cnt(&prm) != 0) 3390 return -EAGAIN; 3391 3392 if (!qpair->fw_started || (cmd->reset_count != qpair->chip_reset) || 3393 (cmd->sess && cmd->sess->deleted)) { 3394 /* 3395 * Either the port is not online or this request was from 3396 * previous life, just abort the processing. 3397 */ 3398 cmd->aborted = 1; 3399 cmd->write_data_transferred = 0; 3400 cmd->state = QLA_TGT_STATE_DATA_IN; 3401 vha->hw->tgt.tgt_ops->handle_data(cmd); 3402 ql_dbg_qp(ql_dbg_async, qpair, 0xe102, 3403 "RESET-XFR online/active/old-count/new-count = %d/%d/%d/%d.\n", 3404 vha->flags.online, qla2x00_reset_active(vha), 3405 cmd->reset_count, qpair->chip_reset); 3406 return 0; 3407 } 3408 3409 spin_lock_irqsave(qpair->qp_lock_ptr, flags); 3410 /* Does F/W have an IOCBs for this request */ 3411 res = qlt_check_reserve_free_req(qpair, prm.req_cnt); 3412 if (res != 0) 3413 goto out_unlock_free_unmap; 3414 if (cmd->se_cmd.prot_op) 3415 res = qlt_build_ctio_crc2_pkt(qpair, &prm); 3416 else 3417 res = qlt_24xx_build_ctio_pkt(qpair, &prm); 3418 3419 if (unlikely(res != 0)) { 3420 qpair->req->cnt += prm.req_cnt; 3421 goto out_unlock_free_unmap; 3422 } 3423 3424 pkt = (struct ctio7_to_24xx *)prm.pkt; 3425 pkt->u.status0.flags |= cpu_to_le16(CTIO7_FLAGS_DATA_OUT | 3426 CTIO7_FLAGS_STATUS_MODE_0); 3427 3428 if (cmd->se_cmd.prot_op == TARGET_PROT_NORMAL) 3429 qlt_load_data_segments(&prm); 3430 3431 cmd->state = QLA_TGT_STATE_NEED_DATA; 3432 cmd->cmd_sent_to_fw = 1; 3433 cmd->ctio_flags = le16_to_cpu(pkt->u.status0.flags); 3434 3435 /* Memory Barrier */ 3436 wmb(); 3437 if (qpair->reqq_start_iocbs) 3438 qpair->reqq_start_iocbs(qpair); 3439 else 3440 qla2x00_start_iocbs(vha, qpair->req); 3441 spin_unlock_irqrestore(qpair->qp_lock_ptr, flags); 3442 3443 return res; 3444 3445 out_unlock_free_unmap: 3446 qlt_unmap_sg(vha, cmd); 3447 spin_unlock_irqrestore(qpair->qp_lock_ptr, flags); 3448 3449 return res; 3450 } 3451 EXPORT_SYMBOL(qlt_rdy_to_xfer); 3452 3453 3454 /* 3455 * it is assumed either hardware_lock or qpair lock is held. 3456 */ 3457 static void 3458 qlt_handle_dif_error(struct qla_qpair *qpair, struct qla_tgt_cmd *cmd, 3459 struct ctio_crc_from_fw *sts) 3460 { 3461 uint8_t *ap = &sts->actual_dif[0]; 3462 uint8_t *ep = &sts->expected_dif[0]; 3463 uint64_t lba = cmd->se_cmd.t_task_lba; 3464 uint8_t scsi_status, sense_key, asc, ascq; 3465 unsigned long flags; 3466 struct scsi_qla_host *vha = cmd->vha; 3467 3468 cmd->trc_flags |= TRC_DIF_ERR; 3469 3470 cmd->a_guard = get_unaligned_be16(ap + 0); 3471 cmd->a_app_tag = get_unaligned_be16(ap + 2); 3472 cmd->a_ref_tag = get_unaligned_be32(ap + 4); 3473 3474 cmd->e_guard = get_unaligned_be16(ep + 0); 3475 cmd->e_app_tag = get_unaligned_be16(ep + 2); 3476 cmd->e_ref_tag = get_unaligned_be32(ep + 4); 3477 3478 ql_dbg(ql_dbg_tgt_dif, vha, 0xf075, 3479 "%s: aborted %d state %d\n", __func__, cmd->aborted, cmd->state); 3480 3481 scsi_status = sense_key = asc = ascq = 0; 3482 3483 /* check appl tag */ 3484 if (cmd->e_app_tag != cmd->a_app_tag) { 3485 ql_dbg(ql_dbg_tgt_dif, vha, 0xe00d, 3486 "App Tag ERR: cdb[%x] lba[%llx %llx] blks[%x] [Actual|Expected] Ref[%x|%x], App[%x|%x], Guard [%x|%x] cmd=%p ox_id[%04x]", 3487 cmd->cdb[0], lba, (lba+cmd->num_blks), cmd->num_blks, 3488 cmd->a_ref_tag, cmd->e_ref_tag, cmd->a_app_tag, 3489 cmd->e_app_tag, cmd->a_guard, cmd->e_guard, cmd, 3490 cmd->atio.u.isp24.fcp_hdr.ox_id); 3491 3492 cmd->dif_err_code = DIF_ERR_APP; 3493 scsi_status = SAM_STAT_CHECK_CONDITION; 3494 sense_key = ABORTED_COMMAND; 3495 asc = 0x10; 3496 ascq = 0x2; 3497 } 3498 3499 /* check ref tag */ 3500 if (cmd->e_ref_tag != cmd->a_ref_tag) { 3501 ql_dbg(ql_dbg_tgt_dif, vha, 0xe00e, 3502 "Ref Tag ERR: cdb[%x] lba[%llx %llx] blks[%x] [Actual|Expected] Ref[%x|%x], App[%x|%x], Guard[%x|%x] cmd=%p ox_id[%04x] ", 3503 cmd->cdb[0], lba, (lba+cmd->num_blks), cmd->num_blks, 3504 cmd->a_ref_tag, cmd->e_ref_tag, cmd->a_app_tag, 3505 cmd->e_app_tag, cmd->a_guard, cmd->e_guard, cmd, 3506 cmd->atio.u.isp24.fcp_hdr.ox_id); 3507 3508 cmd->dif_err_code = DIF_ERR_REF; 3509 scsi_status = SAM_STAT_CHECK_CONDITION; 3510 sense_key = ABORTED_COMMAND; 3511 asc = 0x10; 3512 ascq = 0x3; 3513 goto out; 3514 } 3515 3516 /* check guard */ 3517 if (cmd->e_guard != cmd->a_guard) { 3518 ql_dbg(ql_dbg_tgt_dif, vha, 0xe012, 3519 "Guard ERR: cdb[%x] lba[%llx %llx] blks[%x] [Actual|Expected] Ref[%x|%x], App[%x|%x], Guard [%x|%x] cmd=%p ox_id[%04x]", 3520 cmd->cdb[0], lba, (lba+cmd->num_blks), cmd->num_blks, 3521 cmd->a_ref_tag, cmd->e_ref_tag, cmd->a_app_tag, 3522 cmd->e_app_tag, cmd->a_guard, cmd->e_guard, cmd, 3523 cmd->atio.u.isp24.fcp_hdr.ox_id); 3524 3525 cmd->dif_err_code = DIF_ERR_GRD; 3526 scsi_status = SAM_STAT_CHECK_CONDITION; 3527 sense_key = ABORTED_COMMAND; 3528 asc = 0x10; 3529 ascq = 0x1; 3530 } 3531 out: 3532 switch (cmd->state) { 3533 case QLA_TGT_STATE_NEED_DATA: 3534 /* handle_data will load DIF error code */ 3535 cmd->state = QLA_TGT_STATE_DATA_IN; 3536 vha->hw->tgt.tgt_ops->handle_data(cmd); 3537 break; 3538 default: 3539 spin_lock_irqsave(&cmd->cmd_lock, flags); 3540 if (cmd->aborted) { 3541 spin_unlock_irqrestore(&cmd->cmd_lock, flags); 3542 vha->hw->tgt.tgt_ops->free_cmd(cmd); 3543 break; 3544 } 3545 spin_unlock_irqrestore(&cmd->cmd_lock, flags); 3546 3547 qlt_send_resp_ctio(qpair, cmd, scsi_status, sense_key, asc, 3548 ascq); 3549 /* assume scsi status gets out on the wire. 3550 * Will not wait for completion. 3551 */ 3552 vha->hw->tgt.tgt_ops->free_cmd(cmd); 3553 break; 3554 } 3555 } 3556 3557 /* If hardware_lock held on entry, might drop it, then reaquire */ 3558 /* This function sends the appropriate CTIO to ISP 2xxx or 24xx */ 3559 static int __qlt_send_term_imm_notif(struct scsi_qla_host *vha, 3560 struct imm_ntfy_from_isp *ntfy) 3561 { 3562 struct nack_to_isp *nack; 3563 struct qla_hw_data *ha = vha->hw; 3564 request_t *pkt; 3565 int ret = 0; 3566 3567 ql_dbg(ql_dbg_tgt_tmr, vha, 0xe01c, 3568 "Sending TERM ELS CTIO (ha=%p)\n", ha); 3569 3570 pkt = (request_t *)qla2x00_alloc_iocbs(vha, NULL); 3571 if (pkt == NULL) { 3572 ql_dbg(ql_dbg_tgt, vha, 0xe080, 3573 "qla_target(%d): %s failed: unable to allocate " 3574 "request packet\n", vha->vp_idx, __func__); 3575 return -ENOMEM; 3576 } 3577 3578 pkt->entry_type = NOTIFY_ACK_TYPE; 3579 pkt->entry_count = 1; 3580 pkt->handle = QLA_TGT_SKIP_HANDLE; 3581 3582 nack = (struct nack_to_isp *)pkt; 3583 nack->ox_id = ntfy->ox_id; 3584 3585 nack->u.isp24.nport_handle = ntfy->u.isp24.nport_handle; 3586 if (le16_to_cpu(ntfy->u.isp24.status) == IMM_NTFY_ELS) { 3587 nack->u.isp24.flags = ntfy->u.isp24.flags & 3588 cpu_to_le16(NOTIFY24XX_FLAGS_PUREX_IOCB); 3589 } 3590 3591 /* terminate */ 3592 nack->u.isp24.flags |= 3593 __constant_cpu_to_le16(NOTIFY_ACK_FLAGS_TERMINATE); 3594 3595 nack->u.isp24.srr_rx_id = ntfy->u.isp24.srr_rx_id; 3596 nack->u.isp24.status = ntfy->u.isp24.status; 3597 nack->u.isp24.status_subcode = ntfy->u.isp24.status_subcode; 3598 nack->u.isp24.fw_handle = ntfy->u.isp24.fw_handle; 3599 nack->u.isp24.exchange_address = ntfy->u.isp24.exchange_address; 3600 nack->u.isp24.srr_rel_offs = ntfy->u.isp24.srr_rel_offs; 3601 nack->u.isp24.srr_ui = ntfy->u.isp24.srr_ui; 3602 nack->u.isp24.vp_index = ntfy->u.isp24.vp_index; 3603 3604 qla2x00_start_iocbs(vha, vha->req); 3605 return ret; 3606 } 3607 3608 static void qlt_send_term_imm_notif(struct scsi_qla_host *vha, 3609 struct imm_ntfy_from_isp *imm, int ha_locked) 3610 { 3611 int rc; 3612 3613 WARN_ON_ONCE(!ha_locked); 3614 rc = __qlt_send_term_imm_notif(vha, imm); 3615 pr_debug("rc = %d\n", rc); 3616 } 3617 3618 /* 3619 * If hardware_lock held on entry, might drop it, then reaquire 3620 * This function sends the appropriate CTIO to ISP 2xxx or 24xx 3621 */ 3622 static int __qlt_send_term_exchange(struct qla_qpair *qpair, 3623 struct qla_tgt_cmd *cmd, 3624 struct atio_from_isp *atio) 3625 { 3626 struct scsi_qla_host *vha = qpair->vha; 3627 struct ctio7_to_24xx *ctio24; 3628 struct qla_hw_data *ha = vha->hw; 3629 request_t *pkt; 3630 int ret = 0; 3631 uint16_t temp; 3632 3633 ql_dbg(ql_dbg_tgt, vha, 0xe009, "Sending TERM EXCH CTIO (ha=%p)\n", ha); 3634 3635 if (cmd) 3636 vha = cmd->vha; 3637 3638 pkt = (request_t *)qla2x00_alloc_iocbs_ready(qpair, NULL); 3639 if (pkt == NULL) { 3640 ql_dbg(ql_dbg_tgt, vha, 0xe050, 3641 "qla_target(%d): %s failed: unable to allocate " 3642 "request packet\n", vha->vp_idx, __func__); 3643 return -ENOMEM; 3644 } 3645 3646 if (cmd != NULL) { 3647 if (cmd->state < QLA_TGT_STATE_PROCESSED) { 3648 ql_dbg(ql_dbg_tgt, vha, 0xe051, 3649 "qla_target(%d): Terminating cmd %p with " 3650 "incorrect state %d\n", vha->vp_idx, cmd, 3651 cmd->state); 3652 } else 3653 ret = 1; 3654 } 3655 3656 qpair->tgt_counters.num_term_xchg_sent++; 3657 pkt->entry_count = 1; 3658 pkt->handle = QLA_TGT_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK; 3659 3660 ctio24 = (struct ctio7_to_24xx *)pkt; 3661 ctio24->entry_type = CTIO_TYPE7; 3662 ctio24->nport_handle = cpu_to_le16(CTIO7_NHANDLE_UNRECOGNIZED); 3663 ctio24->timeout = cpu_to_le16(QLA_TGT_TIMEOUT); 3664 ctio24->vp_index = vha->vp_idx; 3665 ctio24->initiator_id = be_id_to_le(atio->u.isp24.fcp_hdr.s_id); 3666 ctio24->exchange_addr = atio->u.isp24.exchange_addr; 3667 temp = (atio->u.isp24.attr << 9) | CTIO7_FLAGS_STATUS_MODE_1 | 3668 CTIO7_FLAGS_TERMINATE; 3669 ctio24->u.status1.flags = cpu_to_le16(temp); 3670 temp = be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id); 3671 ctio24->u.status1.ox_id = cpu_to_le16(temp); 3672 3673 /* Memory Barrier */ 3674 wmb(); 3675 if (qpair->reqq_start_iocbs) 3676 qpair->reqq_start_iocbs(qpair); 3677 else 3678 qla2x00_start_iocbs(vha, qpair->req); 3679 return ret; 3680 } 3681 3682 static void qlt_send_term_exchange(struct qla_qpair *qpair, 3683 struct qla_tgt_cmd *cmd, struct atio_from_isp *atio, int ha_locked, 3684 int ul_abort) 3685 { 3686 struct scsi_qla_host *vha; 3687 unsigned long flags = 0; 3688 int rc; 3689 3690 /* why use different vha? NPIV */ 3691 if (cmd) 3692 vha = cmd->vha; 3693 else 3694 vha = qpair->vha; 3695 3696 if (ha_locked) { 3697 rc = __qlt_send_term_exchange(qpair, cmd, atio); 3698 if (rc == -ENOMEM) 3699 qlt_alloc_qfull_cmd(vha, atio, 0, 0); 3700 goto done; 3701 } 3702 spin_lock_irqsave(qpair->qp_lock_ptr, flags); 3703 rc = __qlt_send_term_exchange(qpair, cmd, atio); 3704 if (rc == -ENOMEM) 3705 qlt_alloc_qfull_cmd(vha, atio, 0, 0); 3706 3707 done: 3708 if (cmd && !ul_abort && !cmd->aborted) { 3709 if (cmd->sg_mapped) 3710 qlt_unmap_sg(vha, cmd); 3711 vha->hw->tgt.tgt_ops->free_cmd(cmd); 3712 } 3713 3714 if (!ha_locked) 3715 spin_unlock_irqrestore(qpair->qp_lock_ptr, flags); 3716 3717 return; 3718 } 3719 3720 static void qlt_init_term_exchange(struct scsi_qla_host *vha) 3721 { 3722 struct list_head free_list; 3723 struct qla_tgt_cmd *cmd, *tcmd; 3724 3725 vha->hw->tgt.leak_exchg_thresh_hold = 3726 (vha->hw->cur_fw_xcb_count/100) * LEAK_EXCHG_THRESH_HOLD_PERCENT; 3727 3728 cmd = tcmd = NULL; 3729 if (!list_empty(&vha->hw->tgt.q_full_list)) { 3730 INIT_LIST_HEAD(&free_list); 3731 list_splice_init(&vha->hw->tgt.q_full_list, &free_list); 3732 3733 list_for_each_entry_safe(cmd, tcmd, &free_list, cmd_list) { 3734 list_del(&cmd->cmd_list); 3735 /* This cmd was never sent to TCM. There is no need 3736 * to schedule free or call free_cmd 3737 */ 3738 qlt_free_cmd(cmd); 3739 vha->hw->tgt.num_qfull_cmds_alloc--; 3740 } 3741 } 3742 vha->hw->tgt.num_qfull_cmds_dropped = 0; 3743 } 3744 3745 static void qlt_chk_exch_leak_thresh_hold(struct scsi_qla_host *vha) 3746 { 3747 uint32_t total_leaked; 3748 3749 total_leaked = vha->hw->tgt.num_qfull_cmds_dropped; 3750 3751 if (vha->hw->tgt.leak_exchg_thresh_hold && 3752 (total_leaked > vha->hw->tgt.leak_exchg_thresh_hold)) { 3753 3754 ql_dbg(ql_dbg_tgt, vha, 0xe079, 3755 "Chip reset due to exchange starvation: %d/%d.\n", 3756 total_leaked, vha->hw->cur_fw_xcb_count); 3757 3758 if (IS_P3P_TYPE(vha->hw)) 3759 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags); 3760 else 3761 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 3762 qla2xxx_wake_dpc(vha); 3763 } 3764 3765 } 3766 3767 int qlt_abort_cmd(struct qla_tgt_cmd *cmd) 3768 { 3769 struct qla_tgt *tgt = cmd->tgt; 3770 struct scsi_qla_host *vha = tgt->vha; 3771 struct se_cmd *se_cmd = &cmd->se_cmd; 3772 unsigned long flags; 3773 3774 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf014, 3775 "qla_target(%d): terminating exchange for aborted cmd=%p " 3776 "(se_cmd=%p, tag=%llu)", vha->vp_idx, cmd, &cmd->se_cmd, 3777 se_cmd->tag); 3778 3779 spin_lock_irqsave(&cmd->cmd_lock, flags); 3780 if (cmd->aborted) { 3781 spin_unlock_irqrestore(&cmd->cmd_lock, flags); 3782 /* 3783 * It's normal to see 2 calls in this path: 3784 * 1) XFER Rdy completion + CMD_T_ABORT 3785 * 2) TCM TMR - drain_state_list 3786 */ 3787 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf016, 3788 "multiple abort. %p transport_state %x, t_state %x, " 3789 "se_cmd_flags %x\n", cmd, cmd->se_cmd.transport_state, 3790 cmd->se_cmd.t_state, cmd->se_cmd.se_cmd_flags); 3791 return -EIO; 3792 } 3793 cmd->aborted = 1; 3794 cmd->trc_flags |= TRC_ABORT; 3795 spin_unlock_irqrestore(&cmd->cmd_lock, flags); 3796 3797 qlt_send_term_exchange(cmd->qpair, cmd, &cmd->atio, 0, 1); 3798 return 0; 3799 } 3800 EXPORT_SYMBOL(qlt_abort_cmd); 3801 3802 void qlt_free_cmd(struct qla_tgt_cmd *cmd) 3803 { 3804 struct fc_port *sess = cmd->sess; 3805 3806 ql_dbg(ql_dbg_tgt, cmd->vha, 0xe074, 3807 "%s: se_cmd[%p] ox_id %04x\n", 3808 __func__, &cmd->se_cmd, 3809 be16_to_cpu(cmd->atio.u.isp24.fcp_hdr.ox_id)); 3810 3811 BUG_ON(cmd->cmd_in_wq); 3812 3813 if (cmd->sg_mapped) 3814 qlt_unmap_sg(cmd->vha, cmd); 3815 3816 if (!cmd->q_full) 3817 qlt_decr_num_pend_cmds(cmd->vha); 3818 3819 BUG_ON(cmd->sg_mapped); 3820 cmd->jiffies_at_free = get_jiffies_64(); 3821 if (unlikely(cmd->free_sg)) 3822 kfree(cmd->sg); 3823 3824 if (!sess || !sess->se_sess) { 3825 WARN_ON(1); 3826 return; 3827 } 3828 cmd->jiffies_at_free = get_jiffies_64(); 3829 cmd->vha->hw->tgt.tgt_ops->rel_cmd(cmd); 3830 } 3831 EXPORT_SYMBOL(qlt_free_cmd); 3832 3833 /* 3834 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire 3835 */ 3836 static int qlt_term_ctio_exchange(struct qla_qpair *qpair, void *ctio, 3837 struct qla_tgt_cmd *cmd, uint32_t status) 3838 { 3839 int term = 0; 3840 struct scsi_qla_host *vha = qpair->vha; 3841 3842 if (cmd->se_cmd.prot_op) 3843 ql_dbg(ql_dbg_tgt_dif, vha, 0xe013, 3844 "Term DIF cmd: lba[0x%llx|%lld] len[0x%x] " 3845 "se_cmd=%p tag[%x] op %#x/%s", 3846 cmd->lba, cmd->lba, 3847 cmd->num_blks, &cmd->se_cmd, 3848 cmd->atio.u.isp24.exchange_addr, 3849 cmd->se_cmd.prot_op, 3850 prot_op_str(cmd->se_cmd.prot_op)); 3851 3852 if (ctio != NULL) { 3853 struct ctio7_from_24xx *c = (struct ctio7_from_24xx *)ctio; 3854 3855 term = !(c->flags & 3856 cpu_to_le16(OF_TERM_EXCH)); 3857 } else 3858 term = 1; 3859 3860 if (term) 3861 qlt_send_term_exchange(qpair, cmd, &cmd->atio, 1, 0); 3862 3863 return term; 3864 } 3865 3866 3867 /* ha->hardware_lock supposed to be held on entry */ 3868 static void *qlt_ctio_to_cmd(struct scsi_qla_host *vha, 3869 struct rsp_que *rsp, uint32_t handle, void *ctio) 3870 { 3871 void *cmd = NULL; 3872 struct req_que *req; 3873 int qid = GET_QID(handle); 3874 uint32_t h = handle & ~QLA_TGT_HANDLE_MASK; 3875 3876 if (unlikely(h == QLA_TGT_SKIP_HANDLE)) 3877 return NULL; 3878 3879 if (qid == rsp->req->id) { 3880 req = rsp->req; 3881 } else if (vha->hw->req_q_map[qid]) { 3882 ql_dbg(ql_dbg_tgt_mgt, vha, 0x1000a, 3883 "qla_target(%d): CTIO completion with different QID %d handle %x\n", 3884 vha->vp_idx, rsp->id, handle); 3885 req = vha->hw->req_q_map[qid]; 3886 } else { 3887 return NULL; 3888 } 3889 3890 h &= QLA_CMD_HANDLE_MASK; 3891 3892 if (h != QLA_TGT_NULL_HANDLE) { 3893 if (unlikely(h >= req->num_outstanding_cmds)) { 3894 ql_dbg(ql_dbg_tgt, vha, 0xe052, 3895 "qla_target(%d): Wrong handle %x received\n", 3896 vha->vp_idx, handle); 3897 return NULL; 3898 } 3899 3900 cmd = req->outstanding_cmds[h]; 3901 if (unlikely(cmd == NULL)) { 3902 ql_dbg(ql_dbg_async, vha, 0xe053, 3903 "qla_target(%d): Suspicious: unable to find the command with handle %x req->id %d rsp->id %d\n", 3904 vha->vp_idx, handle, req->id, rsp->id); 3905 return NULL; 3906 } 3907 req->outstanding_cmds[h] = NULL; 3908 } else if (ctio != NULL) { 3909 /* We can't get loop ID from CTIO7 */ 3910 ql_dbg(ql_dbg_tgt, vha, 0xe054, 3911 "qla_target(%d): Wrong CTIO received: QLA24xx doesn't " 3912 "support NULL handles\n", vha->vp_idx); 3913 return NULL; 3914 } 3915 3916 return cmd; 3917 } 3918 3919 /* 3920 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire 3921 */ 3922 static void qlt_do_ctio_completion(struct scsi_qla_host *vha, 3923 struct rsp_que *rsp, uint32_t handle, uint32_t status, void *ctio) 3924 { 3925 struct qla_hw_data *ha = vha->hw; 3926 struct se_cmd *se_cmd; 3927 struct qla_tgt_cmd *cmd; 3928 struct qla_qpair *qpair = rsp->qpair; 3929 3930 if (handle & CTIO_INTERMEDIATE_HANDLE_MARK) { 3931 /* That could happen only in case of an error/reset/abort */ 3932 if (status != CTIO_SUCCESS) { 3933 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01d, 3934 "Intermediate CTIO received" 3935 " (status %x)\n", status); 3936 } 3937 return; 3938 } 3939 3940 cmd = qlt_ctio_to_cmd(vha, rsp, handle, ctio); 3941 if (cmd == NULL) 3942 return; 3943 3944 se_cmd = &cmd->se_cmd; 3945 cmd->cmd_sent_to_fw = 0; 3946 3947 qlt_unmap_sg(vha, cmd); 3948 3949 if (unlikely(status != CTIO_SUCCESS)) { 3950 switch (status & 0xFFFF) { 3951 case CTIO_INVALID_RX_ID: 3952 if (printk_ratelimit()) 3953 dev_info(&vha->hw->pdev->dev, 3954 "qla_target(%d): CTIO with INVALID_RX_ID ATIO attr %x CTIO Flags %x|%x\n", 3955 vha->vp_idx, cmd->atio.u.isp24.attr, 3956 ((cmd->ctio_flags >> 9) & 0xf), 3957 cmd->ctio_flags); 3958 3959 break; 3960 case CTIO_LIP_RESET: 3961 case CTIO_TARGET_RESET: 3962 case CTIO_ABORTED: 3963 /* driver request abort via Terminate exchange */ 3964 case CTIO_TIMEOUT: 3965 /* They are OK */ 3966 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf058, 3967 "qla_target(%d): CTIO with " 3968 "status %#x received, state %x, se_cmd %p, " 3969 "(LIP_RESET=e, ABORTED=2, TARGET_RESET=17, " 3970 "TIMEOUT=b, INVALID_RX_ID=8)\n", vha->vp_idx, 3971 status, cmd->state, se_cmd); 3972 break; 3973 3974 case CTIO_PORT_LOGGED_OUT: 3975 case CTIO_PORT_UNAVAILABLE: 3976 { 3977 int logged_out = 3978 (status & 0xFFFF) == CTIO_PORT_LOGGED_OUT; 3979 3980 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf059, 3981 "qla_target(%d): CTIO with %s status %x " 3982 "received (state %x, se_cmd %p)\n", vha->vp_idx, 3983 logged_out ? "PORT LOGGED OUT" : "PORT UNAVAILABLE", 3984 status, cmd->state, se_cmd); 3985 3986 if (logged_out && cmd->sess) { 3987 /* 3988 * Session is already logged out, but we need 3989 * to notify initiator, who's not aware of this 3990 */ 3991 cmd->sess->send_els_logo = 1; 3992 ql_dbg(ql_dbg_disc, vha, 0x20f8, 3993 "%s %d %8phC post del sess\n", 3994 __func__, __LINE__, cmd->sess->port_name); 3995 3996 qlt_schedule_sess_for_deletion(cmd->sess); 3997 } 3998 break; 3999 } 4000 case CTIO_DIF_ERROR: { 4001 struct ctio_crc_from_fw *crc = 4002 (struct ctio_crc_from_fw *)ctio; 4003 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf073, 4004 "qla_target(%d): CTIO with DIF_ERROR status %x " 4005 "received (state %x, ulp_cmd %p) actual_dif[0x%llx] " 4006 "expect_dif[0x%llx]\n", 4007 vha->vp_idx, status, cmd->state, se_cmd, 4008 *((u64 *)&crc->actual_dif[0]), 4009 *((u64 *)&crc->expected_dif[0])); 4010 4011 qlt_handle_dif_error(qpair, cmd, ctio); 4012 return; 4013 } 4014 default: 4015 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05b, 4016 "qla_target(%d): CTIO with error status 0x%x received (state %x, se_cmd %p\n", 4017 vha->vp_idx, status, cmd->state, se_cmd); 4018 break; 4019 } 4020 4021 4022 /* "cmd->aborted" means 4023 * cmd is already aborted/terminated, we don't 4024 * need to terminate again. The exchange is already 4025 * cleaned up/freed at FW level. Just cleanup at driver 4026 * level. 4027 */ 4028 if ((cmd->state != QLA_TGT_STATE_NEED_DATA) && 4029 (!cmd->aborted)) { 4030 cmd->trc_flags |= TRC_CTIO_ERR; 4031 if (qlt_term_ctio_exchange(qpair, ctio, cmd, status)) 4032 return; 4033 } 4034 } 4035 4036 if (cmd->state == QLA_TGT_STATE_PROCESSED) { 4037 cmd->trc_flags |= TRC_CTIO_DONE; 4038 } else if (cmd->state == QLA_TGT_STATE_NEED_DATA) { 4039 cmd->state = QLA_TGT_STATE_DATA_IN; 4040 4041 if (status == CTIO_SUCCESS) 4042 cmd->write_data_transferred = 1; 4043 4044 ha->tgt.tgt_ops->handle_data(cmd); 4045 return; 4046 } else if (cmd->aborted) { 4047 cmd->trc_flags |= TRC_CTIO_ABORTED; 4048 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01e, 4049 "Aborted command %p (tag %lld) finished\n", cmd, se_cmd->tag); 4050 } else { 4051 cmd->trc_flags |= TRC_CTIO_STRANGE; 4052 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05c, 4053 "qla_target(%d): A command in state (%d) should " 4054 "not return a CTIO complete\n", vha->vp_idx, cmd->state); 4055 } 4056 4057 if (unlikely(status != CTIO_SUCCESS) && 4058 !cmd->aborted) { 4059 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01f, "Finishing failed CTIO\n"); 4060 dump_stack(); 4061 } 4062 4063 ha->tgt.tgt_ops->free_cmd(cmd); 4064 } 4065 4066 static inline int qlt_get_fcp_task_attr(struct scsi_qla_host *vha, 4067 uint8_t task_codes) 4068 { 4069 int fcp_task_attr; 4070 4071 switch (task_codes) { 4072 case ATIO_SIMPLE_QUEUE: 4073 fcp_task_attr = TCM_SIMPLE_TAG; 4074 break; 4075 case ATIO_HEAD_OF_QUEUE: 4076 fcp_task_attr = TCM_HEAD_TAG; 4077 break; 4078 case ATIO_ORDERED_QUEUE: 4079 fcp_task_attr = TCM_ORDERED_TAG; 4080 break; 4081 case ATIO_ACA_QUEUE: 4082 fcp_task_attr = TCM_ACA_TAG; 4083 break; 4084 case ATIO_UNTAGGED: 4085 fcp_task_attr = TCM_SIMPLE_TAG; 4086 break; 4087 default: 4088 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05d, 4089 "qla_target: unknown task code %x, use ORDERED instead\n", 4090 task_codes); 4091 fcp_task_attr = TCM_ORDERED_TAG; 4092 break; 4093 } 4094 4095 return fcp_task_attr; 4096 } 4097 4098 /* 4099 * Process context for I/O path into tcm_qla2xxx code 4100 */ 4101 static void __qlt_do_work(struct qla_tgt_cmd *cmd) 4102 { 4103 scsi_qla_host_t *vha = cmd->vha; 4104 struct qla_hw_data *ha = vha->hw; 4105 struct fc_port *sess = cmd->sess; 4106 struct atio_from_isp *atio = &cmd->atio; 4107 unsigned char *cdb; 4108 unsigned long flags; 4109 uint32_t data_length; 4110 int ret, fcp_task_attr, data_dir, bidi = 0; 4111 struct qla_qpair *qpair = cmd->qpair; 4112 4113 cmd->cmd_in_wq = 0; 4114 cmd->trc_flags |= TRC_DO_WORK; 4115 4116 if (cmd->aborted) { 4117 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf082, 4118 "cmd with tag %u is aborted\n", 4119 cmd->atio.u.isp24.exchange_addr); 4120 goto out_term; 4121 } 4122 4123 spin_lock_init(&cmd->cmd_lock); 4124 cdb = &atio->u.isp24.fcp_cmnd.cdb[0]; 4125 cmd->se_cmd.tag = le32_to_cpu(atio->u.isp24.exchange_addr); 4126 4127 if (atio->u.isp24.fcp_cmnd.rddata && 4128 atio->u.isp24.fcp_cmnd.wrdata) { 4129 bidi = 1; 4130 data_dir = DMA_TO_DEVICE; 4131 } else if (atio->u.isp24.fcp_cmnd.rddata) 4132 data_dir = DMA_FROM_DEVICE; 4133 else if (atio->u.isp24.fcp_cmnd.wrdata) 4134 data_dir = DMA_TO_DEVICE; 4135 else 4136 data_dir = DMA_NONE; 4137 4138 fcp_task_attr = qlt_get_fcp_task_attr(vha, 4139 atio->u.isp24.fcp_cmnd.task_attr); 4140 data_length = get_datalen_for_atio(atio); 4141 4142 ret = ha->tgt.tgt_ops->handle_cmd(vha, cmd, cdb, data_length, 4143 fcp_task_attr, data_dir, bidi); 4144 if (ret != 0) 4145 goto out_term; 4146 /* 4147 * Drop extra session reference from qlt_handle_cmd_for_atio(). 4148 */ 4149 ha->tgt.tgt_ops->put_sess(sess); 4150 return; 4151 4152 out_term: 4153 ql_dbg(ql_dbg_io, vha, 0x3060, "Terminating work cmd %p", cmd); 4154 /* 4155 * cmd has not sent to target yet, so pass NULL as the second 4156 * argument to qlt_send_term_exchange() and free the memory here. 4157 */ 4158 cmd->trc_flags |= TRC_DO_WORK_ERR; 4159 spin_lock_irqsave(qpair->qp_lock_ptr, flags); 4160 qlt_send_term_exchange(qpair, NULL, &cmd->atio, 1, 0); 4161 4162 qlt_decr_num_pend_cmds(vha); 4163 cmd->vha->hw->tgt.tgt_ops->rel_cmd(cmd); 4164 spin_unlock_irqrestore(qpair->qp_lock_ptr, flags); 4165 4166 ha->tgt.tgt_ops->put_sess(sess); 4167 } 4168 4169 static void qlt_do_work(struct work_struct *work) 4170 { 4171 struct qla_tgt_cmd *cmd = container_of(work, struct qla_tgt_cmd, work); 4172 scsi_qla_host_t *vha = cmd->vha; 4173 unsigned long flags; 4174 4175 spin_lock_irqsave(&vha->cmd_list_lock, flags); 4176 list_del(&cmd->cmd_list); 4177 spin_unlock_irqrestore(&vha->cmd_list_lock, flags); 4178 4179 __qlt_do_work(cmd); 4180 } 4181 4182 void qlt_clr_qp_table(struct scsi_qla_host *vha) 4183 { 4184 unsigned long flags; 4185 struct qla_hw_data *ha = vha->hw; 4186 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; 4187 void *node; 4188 u64 key = 0; 4189 4190 ql_log(ql_log_info, vha, 0x706c, 4191 "User update Number of Active Qpairs %d\n", 4192 ha->tgt.num_act_qpairs); 4193 4194 spin_lock_irqsave(&ha->tgt.atio_lock, flags); 4195 4196 btree_for_each_safe64(&tgt->lun_qpair_map, key, node) 4197 btree_remove64(&tgt->lun_qpair_map, key); 4198 4199 ha->base_qpair->lun_cnt = 0; 4200 for (key = 0; key < ha->max_qpairs; key++) 4201 if (ha->queue_pair_map[key]) 4202 ha->queue_pair_map[key]->lun_cnt = 0; 4203 4204 spin_unlock_irqrestore(&ha->tgt.atio_lock, flags); 4205 } 4206 4207 static void qlt_assign_qpair(struct scsi_qla_host *vha, 4208 struct qla_tgt_cmd *cmd) 4209 { 4210 struct qla_qpair *qpair, *qp; 4211 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; 4212 struct qla_qpair_hint *h; 4213 4214 if (vha->flags.qpairs_available) { 4215 h = btree_lookup64(&tgt->lun_qpair_map, cmd->unpacked_lun); 4216 if (unlikely(!h)) { 4217 /* spread lun to qpair ratio evently */ 4218 int lcnt = 0, rc; 4219 struct scsi_qla_host *base_vha = 4220 pci_get_drvdata(vha->hw->pdev); 4221 4222 qpair = vha->hw->base_qpair; 4223 if (qpair->lun_cnt == 0) { 4224 qpair->lun_cnt++; 4225 h = qla_qpair_to_hint(tgt, qpair); 4226 BUG_ON(!h); 4227 rc = btree_insert64(&tgt->lun_qpair_map, 4228 cmd->unpacked_lun, h, GFP_ATOMIC); 4229 if (rc) { 4230 qpair->lun_cnt--; 4231 ql_log(ql_log_info, vha, 0xd037, 4232 "Unable to insert lun %llx into lun_qpair_map\n", 4233 cmd->unpacked_lun); 4234 } 4235 goto out; 4236 } else { 4237 lcnt = qpair->lun_cnt; 4238 } 4239 4240 h = NULL; 4241 list_for_each_entry(qp, &base_vha->qp_list, 4242 qp_list_elem) { 4243 if (qp->lun_cnt == 0) { 4244 qp->lun_cnt++; 4245 h = qla_qpair_to_hint(tgt, qp); 4246 BUG_ON(!h); 4247 rc = btree_insert64(&tgt->lun_qpair_map, 4248 cmd->unpacked_lun, h, GFP_ATOMIC); 4249 if (rc) { 4250 qp->lun_cnt--; 4251 ql_log(ql_log_info, vha, 0xd038, 4252 "Unable to insert lun %llx into lun_qpair_map\n", 4253 cmd->unpacked_lun); 4254 } 4255 qpair = qp; 4256 goto out; 4257 } else { 4258 if (qp->lun_cnt < lcnt) { 4259 lcnt = qp->lun_cnt; 4260 qpair = qp; 4261 continue; 4262 } 4263 } 4264 } 4265 BUG_ON(!qpair); 4266 qpair->lun_cnt++; 4267 h = qla_qpair_to_hint(tgt, qpair); 4268 BUG_ON(!h); 4269 rc = btree_insert64(&tgt->lun_qpair_map, 4270 cmd->unpacked_lun, h, GFP_ATOMIC); 4271 if (rc) { 4272 qpair->lun_cnt--; 4273 ql_log(ql_log_info, vha, 0xd039, 4274 "Unable to insert lun %llx into lun_qpair_map\n", 4275 cmd->unpacked_lun); 4276 } 4277 } 4278 } else { 4279 h = &tgt->qphints[0]; 4280 } 4281 out: 4282 cmd->qpair = h->qpair; 4283 cmd->se_cmd.cpuid = h->cpuid; 4284 } 4285 4286 static struct qla_tgt_cmd *qlt_get_tag(scsi_qla_host_t *vha, 4287 struct fc_port *sess, 4288 struct atio_from_isp *atio) 4289 { 4290 struct qla_tgt_cmd *cmd; 4291 4292 cmd = vha->hw->tgt.tgt_ops->get_cmd(sess); 4293 if (!cmd) 4294 return NULL; 4295 4296 cmd->cmd_type = TYPE_TGT_CMD; 4297 memcpy(&cmd->atio, atio, sizeof(*atio)); 4298 INIT_LIST_HEAD(&cmd->sess_cmd_list); 4299 cmd->state = QLA_TGT_STATE_NEW; 4300 cmd->tgt = vha->vha_tgt.qla_tgt; 4301 qlt_incr_num_pend_cmds(vha); 4302 cmd->vha = vha; 4303 cmd->sess = sess; 4304 cmd->loop_id = sess->loop_id; 4305 cmd->conf_compl_supported = sess->conf_compl_supported; 4306 4307 cmd->trc_flags = 0; 4308 cmd->jiffies_at_alloc = get_jiffies_64(); 4309 4310 cmd->unpacked_lun = scsilun_to_int( 4311 (struct scsi_lun *)&atio->u.isp24.fcp_cmnd.lun); 4312 qlt_assign_qpair(vha, cmd); 4313 cmd->reset_count = vha->hw->base_qpair->chip_reset; 4314 cmd->vp_idx = vha->vp_idx; 4315 4316 return cmd; 4317 } 4318 4319 /* ha->hardware_lock supposed to be held on entry */ 4320 static int qlt_handle_cmd_for_atio(struct scsi_qla_host *vha, 4321 struct atio_from_isp *atio) 4322 { 4323 struct qla_hw_data *ha = vha->hw; 4324 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; 4325 struct fc_port *sess; 4326 struct qla_tgt_cmd *cmd; 4327 unsigned long flags; 4328 port_id_t id; 4329 4330 if (unlikely(tgt->tgt_stop)) { 4331 ql_dbg(ql_dbg_io, vha, 0x3061, 4332 "New command while device %p is shutting down\n", tgt); 4333 return -ENODEV; 4334 } 4335 4336 id = be_to_port_id(atio->u.isp24.fcp_hdr.s_id); 4337 if (IS_SW_RESV_ADDR(id)) 4338 return -EBUSY; 4339 4340 sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, atio->u.isp24.fcp_hdr.s_id); 4341 if (unlikely(!sess)) 4342 return -EFAULT; 4343 4344 /* Another WWN used to have our s_id. Our PLOGI scheduled its 4345 * session deletion, but it's still in sess_del_work wq */ 4346 if (sess->deleted) { 4347 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf002, 4348 "New command while old session %p is being deleted\n", 4349 sess); 4350 return -EFAULT; 4351 } 4352 4353 /* 4354 * Do kref_get() before returning + dropping qla_hw_data->hardware_lock. 4355 */ 4356 if (!kref_get_unless_zero(&sess->sess_kref)) { 4357 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf004, 4358 "%s: kref_get fail, %8phC oxid %x \n", 4359 __func__, sess->port_name, 4360 be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id)); 4361 return -EFAULT; 4362 } 4363 4364 cmd = qlt_get_tag(vha, sess, atio); 4365 if (!cmd) { 4366 ql_dbg(ql_dbg_io, vha, 0x3062, 4367 "qla_target(%d): Allocation of cmd failed\n", vha->vp_idx); 4368 ha->tgt.tgt_ops->put_sess(sess); 4369 return -EBUSY; 4370 } 4371 4372 cmd->cmd_in_wq = 1; 4373 cmd->trc_flags |= TRC_NEW_CMD; 4374 4375 spin_lock_irqsave(&vha->cmd_list_lock, flags); 4376 list_add_tail(&cmd->cmd_list, &vha->qla_cmd_list); 4377 spin_unlock_irqrestore(&vha->cmd_list_lock, flags); 4378 4379 INIT_WORK(&cmd->work, qlt_do_work); 4380 if (vha->flags.qpairs_available) { 4381 queue_work_on(cmd->se_cmd.cpuid, qla_tgt_wq, &cmd->work); 4382 } else if (ha->msix_count) { 4383 if (cmd->atio.u.isp24.fcp_cmnd.rddata) 4384 queue_work_on(smp_processor_id(), qla_tgt_wq, 4385 &cmd->work); 4386 else 4387 queue_work_on(cmd->se_cmd.cpuid, qla_tgt_wq, 4388 &cmd->work); 4389 } else { 4390 queue_work(qla_tgt_wq, &cmd->work); 4391 } 4392 4393 return 0; 4394 } 4395 4396 /* ha->hardware_lock supposed to be held on entry */ 4397 static int qlt_issue_task_mgmt(struct fc_port *sess, u64 lun, 4398 int fn, void *iocb, int flags) 4399 { 4400 struct scsi_qla_host *vha = sess->vha; 4401 struct qla_hw_data *ha = vha->hw; 4402 struct qla_tgt_mgmt_cmd *mcmd; 4403 struct atio_from_isp *a = (struct atio_from_isp *)iocb; 4404 struct qla_qpair_hint *h = &vha->vha_tgt.qla_tgt->qphints[0]; 4405 4406 mcmd = mempool_alloc(qla_tgt_mgmt_cmd_mempool, GFP_ATOMIC); 4407 if (!mcmd) { 4408 ql_dbg(ql_dbg_tgt_tmr, vha, 0x10009, 4409 "qla_target(%d): Allocation of management " 4410 "command failed, some commands and their data could " 4411 "leak\n", vha->vp_idx); 4412 return -ENOMEM; 4413 } 4414 memset(mcmd, 0, sizeof(*mcmd)); 4415 mcmd->sess = sess; 4416 4417 if (iocb) { 4418 memcpy(&mcmd->orig_iocb.imm_ntfy, iocb, 4419 sizeof(mcmd->orig_iocb.imm_ntfy)); 4420 } 4421 mcmd->tmr_func = fn; 4422 mcmd->flags = flags; 4423 mcmd->reset_count = ha->base_qpair->chip_reset; 4424 mcmd->qpair = h->qpair; 4425 mcmd->vha = vha; 4426 mcmd->se_cmd.cpuid = h->cpuid; 4427 mcmd->unpacked_lun = lun; 4428 4429 switch (fn) { 4430 case QLA_TGT_LUN_RESET: 4431 case QLA_TGT_CLEAR_TS: 4432 case QLA_TGT_ABORT_TS: 4433 abort_cmds_for_lun(vha, lun, a->u.isp24.fcp_hdr.s_id); 4434 fallthrough; 4435 case QLA_TGT_CLEAR_ACA: 4436 h = qlt_find_qphint(vha, mcmd->unpacked_lun); 4437 mcmd->qpair = h->qpair; 4438 mcmd->se_cmd.cpuid = h->cpuid; 4439 break; 4440 4441 case QLA_TGT_TARGET_RESET: 4442 case QLA_TGT_NEXUS_LOSS_SESS: 4443 case QLA_TGT_NEXUS_LOSS: 4444 case QLA_TGT_ABORT_ALL: 4445 default: 4446 /* no-op */ 4447 break; 4448 } 4449 4450 INIT_WORK(&mcmd->work, qlt_do_tmr_work); 4451 queue_work_on(mcmd->se_cmd.cpuid, qla_tgt_wq, 4452 &mcmd->work); 4453 4454 return 0; 4455 } 4456 4457 /* ha->hardware_lock supposed to be held on entry */ 4458 static int qlt_handle_task_mgmt(struct scsi_qla_host *vha, void *iocb) 4459 { 4460 struct atio_from_isp *a = (struct atio_from_isp *)iocb; 4461 struct qla_hw_data *ha = vha->hw; 4462 struct fc_port *sess; 4463 u64 unpacked_lun; 4464 int fn; 4465 unsigned long flags; 4466 4467 fn = a->u.isp24.fcp_cmnd.task_mgmt_flags; 4468 4469 spin_lock_irqsave(&ha->tgt.sess_lock, flags); 4470 sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, 4471 a->u.isp24.fcp_hdr.s_id); 4472 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); 4473 4474 unpacked_lun = 4475 scsilun_to_int((struct scsi_lun *)&a->u.isp24.fcp_cmnd.lun); 4476 4477 if (sess == NULL || sess->deleted) 4478 return -EFAULT; 4479 4480 return qlt_issue_task_mgmt(sess, unpacked_lun, fn, iocb, 0); 4481 } 4482 4483 /* ha->hardware_lock supposed to be held on entry */ 4484 static int __qlt_abort_task(struct scsi_qla_host *vha, 4485 struct imm_ntfy_from_isp *iocb, struct fc_port *sess) 4486 { 4487 struct atio_from_isp *a = (struct atio_from_isp *)iocb; 4488 struct qla_hw_data *ha = vha->hw; 4489 struct qla_tgt_mgmt_cmd *mcmd; 4490 u64 unpacked_lun; 4491 int rc; 4492 4493 mcmd = mempool_alloc(qla_tgt_mgmt_cmd_mempool, GFP_ATOMIC); 4494 if (mcmd == NULL) { 4495 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05f, 4496 "qla_target(%d): %s: Allocation of ABORT cmd failed\n", 4497 vha->vp_idx, __func__); 4498 return -ENOMEM; 4499 } 4500 memset(mcmd, 0, sizeof(*mcmd)); 4501 4502 mcmd->sess = sess; 4503 memcpy(&mcmd->orig_iocb.imm_ntfy, iocb, 4504 sizeof(mcmd->orig_iocb.imm_ntfy)); 4505 4506 unpacked_lun = 4507 scsilun_to_int((struct scsi_lun *)&a->u.isp24.fcp_cmnd.lun); 4508 mcmd->reset_count = ha->base_qpair->chip_reset; 4509 mcmd->tmr_func = QLA_TGT_2G_ABORT_TASK; 4510 mcmd->qpair = ha->base_qpair; 4511 4512 rc = ha->tgt.tgt_ops->handle_tmr(mcmd, unpacked_lun, mcmd->tmr_func, 4513 le16_to_cpu(iocb->u.isp2x.seq_id)); 4514 if (rc != 0) { 4515 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf060, 4516 "qla_target(%d): tgt_ops->handle_tmr() failed: %d\n", 4517 vha->vp_idx, rc); 4518 mempool_free(mcmd, qla_tgt_mgmt_cmd_mempool); 4519 return -EFAULT; 4520 } 4521 4522 return 0; 4523 } 4524 4525 /* ha->hardware_lock supposed to be held on entry */ 4526 static int qlt_abort_task(struct scsi_qla_host *vha, 4527 struct imm_ntfy_from_isp *iocb) 4528 { 4529 struct qla_hw_data *ha = vha->hw; 4530 struct fc_port *sess; 4531 int loop_id; 4532 unsigned long flags; 4533 4534 loop_id = GET_TARGET_ID(ha, (struct atio_from_isp *)iocb); 4535 4536 spin_lock_irqsave(&ha->tgt.sess_lock, flags); 4537 sess = ha->tgt.tgt_ops->find_sess_by_loop_id(vha, loop_id); 4538 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); 4539 4540 if (sess == NULL) { 4541 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf025, 4542 "qla_target(%d): task abort for unexisting " 4543 "session\n", vha->vp_idx); 4544 return qlt_sched_sess_work(vha->vha_tgt.qla_tgt, 4545 QLA_TGT_SESS_WORK_ABORT, iocb, sizeof(*iocb)); 4546 } 4547 4548 return __qlt_abort_task(vha, iocb, sess); 4549 } 4550 4551 void qlt_logo_completion_handler(fc_port_t *fcport, int rc) 4552 { 4553 if (rc != MBS_COMMAND_COMPLETE) { 4554 ql_dbg(ql_dbg_tgt_mgt, fcport->vha, 0xf093, 4555 "%s: se_sess %p / sess %p from" 4556 " port %8phC loop_id %#04x s_id %02x:%02x:%02x" 4557 " LOGO failed: %#x\n", 4558 __func__, 4559 fcport->se_sess, 4560 fcport, 4561 fcport->port_name, fcport->loop_id, 4562 fcport->d_id.b.domain, fcport->d_id.b.area, 4563 fcport->d_id.b.al_pa, rc); 4564 } 4565 4566 fcport->logout_completed = 1; 4567 } 4568 4569 /* 4570 * ha->hardware_lock supposed to be held on entry (to protect tgt->sess_list) 4571 * 4572 * Schedules sessions with matching port_id/loop_id but different wwn for 4573 * deletion. Returns existing session with matching wwn if present. 4574 * Null otherwise. 4575 */ 4576 struct fc_port * 4577 qlt_find_sess_invalidate_other(scsi_qla_host_t *vha, uint64_t wwn, 4578 port_id_t port_id, uint16_t loop_id, struct fc_port **conflict_sess) 4579 { 4580 struct fc_port *sess = NULL, *other_sess; 4581 uint64_t other_wwn; 4582 4583 *conflict_sess = NULL; 4584 4585 list_for_each_entry(other_sess, &vha->vp_fcports, list) { 4586 4587 other_wwn = wwn_to_u64(other_sess->port_name); 4588 4589 if (wwn == other_wwn) { 4590 WARN_ON(sess); 4591 sess = other_sess; 4592 continue; 4593 } 4594 4595 /* find other sess with nport_id collision */ 4596 if (port_id.b24 == other_sess->d_id.b24) { 4597 if (loop_id != other_sess->loop_id) { 4598 ql_dbg(ql_dbg_disc, vha, 0x1000c, 4599 "Invalidating sess %p loop_id %d wwn %llx.\n", 4600 other_sess, other_sess->loop_id, other_wwn); 4601 4602 /* 4603 * logout_on_delete is set by default, but another 4604 * session that has the same s_id/loop_id combo 4605 * might have cleared it when requested this session 4606 * deletion, so don't touch it 4607 */ 4608 qlt_schedule_sess_for_deletion(other_sess); 4609 } else { 4610 /* 4611 * Another wwn used to have our s_id/loop_id 4612 * kill the session, but don't free the loop_id 4613 */ 4614 ql_dbg(ql_dbg_disc, vha, 0xf01b, 4615 "Invalidating sess %p loop_id %d wwn %llx.\n", 4616 other_sess, other_sess->loop_id, other_wwn); 4617 4618 other_sess->keep_nport_handle = 1; 4619 if (other_sess->disc_state != DSC_DELETED) 4620 *conflict_sess = other_sess; 4621 qlt_schedule_sess_for_deletion(other_sess); 4622 } 4623 continue; 4624 } 4625 4626 /* find other sess with nport handle collision */ 4627 if ((loop_id == other_sess->loop_id) && 4628 (loop_id != FC_NO_LOOP_ID)) { 4629 ql_dbg(ql_dbg_disc, vha, 0x1000d, 4630 "Invalidating sess %p loop_id %d wwn %llx.\n", 4631 other_sess, other_sess->loop_id, other_wwn); 4632 4633 /* Same loop_id but different s_id 4634 * Ok to kill and logout */ 4635 qlt_schedule_sess_for_deletion(other_sess); 4636 } 4637 } 4638 4639 return sess; 4640 } 4641 4642 /* Abort any commands for this s_id waiting on qla_tgt_wq workqueue */ 4643 static int abort_cmds_for_s_id(struct scsi_qla_host *vha, port_id_t *s_id) 4644 { 4645 struct qla_tgt_sess_op *op; 4646 struct qla_tgt_cmd *cmd; 4647 uint32_t key; 4648 int count = 0; 4649 unsigned long flags; 4650 4651 key = (((u32)s_id->b.domain << 16) | 4652 ((u32)s_id->b.area << 8) | 4653 ((u32)s_id->b.al_pa)); 4654 4655 spin_lock_irqsave(&vha->cmd_list_lock, flags); 4656 list_for_each_entry(op, &vha->qla_sess_op_cmd_list, cmd_list) { 4657 uint32_t op_key = sid_to_key(op->atio.u.isp24.fcp_hdr.s_id); 4658 4659 if (op_key == key) { 4660 op->aborted = true; 4661 count++; 4662 } 4663 } 4664 4665 list_for_each_entry(op, &vha->unknown_atio_list, cmd_list) { 4666 uint32_t op_key = sid_to_key(op->atio.u.isp24.fcp_hdr.s_id); 4667 4668 if (op_key == key) { 4669 op->aborted = true; 4670 count++; 4671 } 4672 } 4673 4674 list_for_each_entry(cmd, &vha->qla_cmd_list, cmd_list) { 4675 uint32_t cmd_key = sid_to_key(cmd->atio.u.isp24.fcp_hdr.s_id); 4676 4677 if (cmd_key == key) { 4678 cmd->aborted = 1; 4679 count++; 4680 } 4681 } 4682 spin_unlock_irqrestore(&vha->cmd_list_lock, flags); 4683 4684 return count; 4685 } 4686 4687 static int qlt_handle_login(struct scsi_qla_host *vha, 4688 struct imm_ntfy_from_isp *iocb) 4689 { 4690 struct fc_port *sess = NULL, *conflict_sess = NULL; 4691 uint64_t wwn; 4692 port_id_t port_id; 4693 uint16_t loop_id, wd3_lo; 4694 int res = 0; 4695 struct qlt_plogi_ack_t *pla; 4696 unsigned long flags; 4697 4698 lockdep_assert_held(&vha->hw->hardware_lock); 4699 4700 wwn = wwn_to_u64(iocb->u.isp24.port_name); 4701 4702 port_id.b.domain = iocb->u.isp24.port_id[2]; 4703 port_id.b.area = iocb->u.isp24.port_id[1]; 4704 port_id.b.al_pa = iocb->u.isp24.port_id[0]; 4705 port_id.b.rsvd_1 = 0; 4706 4707 loop_id = le16_to_cpu(iocb->u.isp24.nport_handle); 4708 4709 /* Mark all stale commands sitting in qla_tgt_wq for deletion */ 4710 abort_cmds_for_s_id(vha, &port_id); 4711 4712 if (wwn) { 4713 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); 4714 sess = qlt_find_sess_invalidate_other(vha, wwn, 4715 port_id, loop_id, &conflict_sess); 4716 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); 4717 } else { 4718 ql_dbg(ql_dbg_disc, vha, 0xffff, 4719 "%s %d Term INOT due to WWN=0 lid=%d, NportID %06X ", 4720 __func__, __LINE__, loop_id, port_id.b24); 4721 qlt_send_term_imm_notif(vha, iocb, 1); 4722 goto out; 4723 } 4724 4725 if (IS_SW_RESV_ADDR(port_id)) { 4726 res = 1; 4727 goto out; 4728 } 4729 4730 pla = qlt_plogi_ack_find_add(vha, &port_id, iocb); 4731 if (!pla) { 4732 ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0xffff, 4733 "%s %d %8phC Term INOT due to mem alloc fail", 4734 __func__, __LINE__, 4735 iocb->u.isp24.port_name); 4736 qlt_send_term_imm_notif(vha, iocb, 1); 4737 goto out; 4738 } 4739 4740 if (conflict_sess) { 4741 conflict_sess->login_gen++; 4742 qlt_plogi_ack_link(vha, pla, conflict_sess, 4743 QLT_PLOGI_LINK_CONFLICT); 4744 } 4745 4746 if (!sess) { 4747 pla->ref_count++; 4748 ql_dbg(ql_dbg_disc, vha, 0xffff, 4749 "%s %d %8phC post new sess\n", 4750 __func__, __LINE__, iocb->u.isp24.port_name); 4751 if (iocb->u.isp24.status_subcode == ELS_PLOGI) 4752 qla24xx_post_newsess_work(vha, &port_id, 4753 iocb->u.isp24.port_name, 4754 iocb->u.isp24.u.plogi.node_name, 4755 pla, 0); 4756 else 4757 qla24xx_post_newsess_work(vha, &port_id, 4758 iocb->u.isp24.port_name, NULL, 4759 pla, 0); 4760 4761 goto out; 4762 } 4763 4764 if (sess->disc_state == DSC_UPD_FCPORT) { 4765 u16 sec; 4766 4767 /* 4768 * Remote port registration is still going on from 4769 * previous login. Allow it to finish before we 4770 * accept the new login. 4771 */ 4772 sess->next_disc_state = DSC_DELETE_PEND; 4773 sec = jiffies_to_msecs(jiffies - 4774 sess->jiffies_at_registration) / 1000; 4775 if (sess->sec_since_registration < sec && sec && 4776 !(sec % 5)) { 4777 sess->sec_since_registration = sec; 4778 ql_dbg(ql_dbg_disc, vha, 0xffff, 4779 "%s %8phC - Slow Rport registration (%d Sec)\n", 4780 __func__, sess->port_name, sec); 4781 } 4782 4783 if (!conflict_sess) { 4784 list_del(&pla->list); 4785 kmem_cache_free(qla_tgt_plogi_cachep, pla); 4786 } 4787 4788 qlt_send_term_imm_notif(vha, iocb, 1); 4789 goto out; 4790 } 4791 4792 qlt_plogi_ack_link(vha, pla, sess, QLT_PLOGI_LINK_SAME_WWN); 4793 sess->d_id = port_id; 4794 sess->login_gen++; 4795 4796 if (iocb->u.isp24.status_subcode == ELS_PRLI) { 4797 sess->fw_login_state = DSC_LS_PRLI_PEND; 4798 sess->local = 0; 4799 sess->loop_id = loop_id; 4800 sess->d_id = port_id; 4801 sess->fw_login_state = DSC_LS_PRLI_PEND; 4802 wd3_lo = le16_to_cpu(iocb->u.isp24.u.prli.wd3_lo); 4803 4804 if (wd3_lo & BIT_7) 4805 sess->conf_compl_supported = 1; 4806 4807 if ((wd3_lo & BIT_4) == 0) 4808 sess->port_type = FCT_INITIATOR; 4809 else 4810 sess->port_type = FCT_TARGET; 4811 4812 } else 4813 sess->fw_login_state = DSC_LS_PLOGI_PEND; 4814 4815 4816 ql_dbg(ql_dbg_disc, vha, 0x20f9, 4817 "%s %d %8phC DS %d\n", 4818 __func__, __LINE__, sess->port_name, sess->disc_state); 4819 4820 switch (sess->disc_state) { 4821 case DSC_DELETED: 4822 case DSC_LOGIN_PEND: 4823 qlt_plogi_ack_unref(vha, pla); 4824 break; 4825 4826 default: 4827 /* 4828 * Under normal circumstances we want to release nport handle 4829 * during LOGO process to avoid nport handle leaks inside FW. 4830 * The exception is when LOGO is done while another PLOGI with 4831 * the same nport handle is waiting as might be the case here. 4832 * Note: there is always a possibily of a race where session 4833 * deletion has already started for other reasons (e.g. ACL 4834 * removal) and now PLOGI arrives: 4835 * 1. if PLOGI arrived in FW after nport handle has been freed, 4836 * FW must have assigned this PLOGI a new/same handle and we 4837 * can proceed ACK'ing it as usual when session deletion 4838 * completes. 4839 * 2. if PLOGI arrived in FW before LOGO with LCF_FREE_NPORT 4840 * bit reached it, the handle has now been released. We'll 4841 * get an error when we ACK this PLOGI. Nothing will be sent 4842 * back to initiator. Initiator should eventually retry 4843 * PLOGI and situation will correct itself. 4844 */ 4845 sess->keep_nport_handle = ((sess->loop_id == loop_id) && 4846 (sess->d_id.b24 == port_id.b24)); 4847 4848 ql_dbg(ql_dbg_disc, vha, 0x20f9, 4849 "%s %d %8phC post del sess\n", 4850 __func__, __LINE__, sess->port_name); 4851 4852 4853 qlt_schedule_sess_for_deletion(sess); 4854 break; 4855 } 4856 out: 4857 return res; 4858 } 4859 4860 /* 4861 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire 4862 */ 4863 static int qlt_24xx_handle_els(struct scsi_qla_host *vha, 4864 struct imm_ntfy_from_isp *iocb) 4865 { 4866 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; 4867 struct qla_hw_data *ha = vha->hw; 4868 struct fc_port *sess = NULL, *conflict_sess = NULL; 4869 uint64_t wwn; 4870 port_id_t port_id; 4871 uint16_t loop_id; 4872 uint16_t wd3_lo; 4873 int res = 0; 4874 unsigned long flags; 4875 4876 lockdep_assert_held(&ha->hardware_lock); 4877 4878 wwn = wwn_to_u64(iocb->u.isp24.port_name); 4879 4880 port_id.b.domain = iocb->u.isp24.port_id[2]; 4881 port_id.b.area = iocb->u.isp24.port_id[1]; 4882 port_id.b.al_pa = iocb->u.isp24.port_id[0]; 4883 port_id.b.rsvd_1 = 0; 4884 4885 loop_id = le16_to_cpu(iocb->u.isp24.nport_handle); 4886 4887 ql_dbg(ql_dbg_disc, vha, 0xf026, 4888 "qla_target(%d): Port ID: %02x:%02x:%02x ELS opcode: 0x%02x lid %d %8phC\n", 4889 vha->vp_idx, iocb->u.isp24.port_id[2], 4890 iocb->u.isp24.port_id[1], iocb->u.isp24.port_id[0], 4891 iocb->u.isp24.status_subcode, loop_id, 4892 iocb->u.isp24.port_name); 4893 4894 /* res = 1 means ack at the end of thread 4895 * res = 0 means ack async/later. 4896 */ 4897 switch (iocb->u.isp24.status_subcode) { 4898 case ELS_PLOGI: 4899 res = qlt_handle_login(vha, iocb); 4900 break; 4901 4902 case ELS_PRLI: 4903 if (N2N_TOPO(ha)) { 4904 sess = qla2x00_find_fcport_by_wwpn(vha, 4905 iocb->u.isp24.port_name, 1); 4906 4907 if (sess && sess->plogi_link[QLT_PLOGI_LINK_SAME_WWN]) { 4908 ql_dbg(ql_dbg_disc, vha, 0xffff, 4909 "%s %d %8phC Term PRLI due to PLOGI ACK not completed\n", 4910 __func__, __LINE__, 4911 iocb->u.isp24.port_name); 4912 qlt_send_term_imm_notif(vha, iocb, 1); 4913 break; 4914 } 4915 4916 res = qlt_handle_login(vha, iocb); 4917 break; 4918 } 4919 4920 if (IS_SW_RESV_ADDR(port_id)) { 4921 res = 1; 4922 break; 4923 } 4924 4925 wd3_lo = le16_to_cpu(iocb->u.isp24.u.prli.wd3_lo); 4926 4927 if (wwn) { 4928 spin_lock_irqsave(&tgt->ha->tgt.sess_lock, flags); 4929 sess = qlt_find_sess_invalidate_other(vha, wwn, port_id, 4930 loop_id, &conflict_sess); 4931 spin_unlock_irqrestore(&tgt->ha->tgt.sess_lock, flags); 4932 } 4933 4934 if (conflict_sess) { 4935 switch (conflict_sess->disc_state) { 4936 case DSC_DELETED: 4937 case DSC_DELETE_PEND: 4938 break; 4939 default: 4940 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf09b, 4941 "PRLI with conflicting sess %p port %8phC\n", 4942 conflict_sess, conflict_sess->port_name); 4943 conflict_sess->fw_login_state = 4944 DSC_LS_PORT_UNAVAIL; 4945 qlt_send_term_imm_notif(vha, iocb, 1); 4946 res = 0; 4947 break; 4948 } 4949 } 4950 4951 if (sess != NULL) { 4952 bool delete = false; 4953 int sec; 4954 4955 spin_lock_irqsave(&tgt->ha->tgt.sess_lock, flags); 4956 switch (sess->fw_login_state) { 4957 case DSC_LS_PLOGI_PEND: 4958 case DSC_LS_PLOGI_COMP: 4959 case DSC_LS_PRLI_COMP: 4960 break; 4961 default: 4962 delete = true; 4963 break; 4964 } 4965 4966 switch (sess->disc_state) { 4967 case DSC_UPD_FCPORT: 4968 spin_unlock_irqrestore(&tgt->ha->tgt.sess_lock, 4969 flags); 4970 4971 sec = jiffies_to_msecs(jiffies - 4972 sess->jiffies_at_registration)/1000; 4973 if (sess->sec_since_registration < sec && sec && 4974 !(sec % 5)) { 4975 sess->sec_since_registration = sec; 4976 ql_dbg(ql_dbg_disc, sess->vha, 0xffff, 4977 "%s %8phC : Slow Rport registration(%d Sec)\n", 4978 __func__, sess->port_name, sec); 4979 } 4980 qlt_send_term_imm_notif(vha, iocb, 1); 4981 return 0; 4982 4983 case DSC_LOGIN_PEND: 4984 case DSC_GPDB: 4985 case DSC_LOGIN_COMPLETE: 4986 case DSC_ADISC: 4987 delete = false; 4988 break; 4989 default: 4990 break; 4991 } 4992 4993 if (delete) { 4994 spin_unlock_irqrestore(&tgt->ha->tgt.sess_lock, 4995 flags); 4996 /* 4997 * Impatient initiator sent PRLI before last 4998 * PLOGI could finish. Will force him to re-try, 4999 * while last one finishes. 5000 */ 5001 ql_log(ql_log_warn, sess->vha, 0xf095, 5002 "sess %p PRLI received, before plogi ack.\n", 5003 sess); 5004 qlt_send_term_imm_notif(vha, iocb, 1); 5005 res = 0; 5006 break; 5007 } 5008 5009 /* 5010 * This shouldn't happen under normal circumstances, 5011 * since we have deleted the old session during PLOGI 5012 */ 5013 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf096, 5014 "PRLI (loop_id %#04x) for existing sess %p (loop_id %#04x)\n", 5015 sess->loop_id, sess, iocb->u.isp24.nport_handle); 5016 5017 sess->local = 0; 5018 sess->loop_id = loop_id; 5019 sess->d_id = port_id; 5020 sess->fw_login_state = DSC_LS_PRLI_PEND; 5021 5022 if (wd3_lo & BIT_7) 5023 sess->conf_compl_supported = 1; 5024 5025 if ((wd3_lo & BIT_4) == 0) 5026 sess->port_type = FCT_INITIATOR; 5027 else 5028 sess->port_type = FCT_TARGET; 5029 5030 spin_unlock_irqrestore(&tgt->ha->tgt.sess_lock, flags); 5031 } 5032 res = 1; /* send notify ack */ 5033 5034 /* Make session global (not used in fabric mode) */ 5035 if (ha->current_topology != ISP_CFG_F) { 5036 if (sess) { 5037 ql_dbg(ql_dbg_disc, vha, 0x20fa, 5038 "%s %d %8phC post nack\n", 5039 __func__, __LINE__, sess->port_name); 5040 qla24xx_post_nack_work(vha, sess, iocb, 5041 SRB_NACK_PRLI); 5042 res = 0; 5043 } else { 5044 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); 5045 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); 5046 qla2xxx_wake_dpc(vha); 5047 } 5048 } else { 5049 if (sess) { 5050 ql_dbg(ql_dbg_disc, vha, 0x20fb, 5051 "%s %d %8phC post nack\n", 5052 __func__, __LINE__, sess->port_name); 5053 qla24xx_post_nack_work(vha, sess, iocb, 5054 SRB_NACK_PRLI); 5055 res = 0; 5056 } 5057 } 5058 break; 5059 5060 case ELS_TPRLO: 5061 if (le16_to_cpu(iocb->u.isp24.flags) & 5062 NOTIFY24XX_FLAGS_GLOBAL_TPRLO) { 5063 loop_id = 0xFFFF; 5064 qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS); 5065 res = 1; 5066 break; 5067 } 5068 fallthrough; 5069 case ELS_LOGO: 5070 case ELS_PRLO: 5071 spin_lock_irqsave(&ha->tgt.sess_lock, flags); 5072 sess = qla2x00_find_fcport_by_loopid(vha, loop_id); 5073 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); 5074 5075 if (sess) { 5076 sess->login_gen++; 5077 sess->fw_login_state = DSC_LS_LOGO_PEND; 5078 sess->logo_ack_needed = 1; 5079 memcpy(sess->iocb, iocb, IOCB_SIZE); 5080 } 5081 5082 res = qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS_SESS); 5083 5084 ql_dbg(ql_dbg_disc, vha, 0x20fc, 5085 "%s: logo %llx res %d sess %p ", 5086 __func__, wwn, res, sess); 5087 if (res == 0) { 5088 /* 5089 * cmd went upper layer, look for qlt_xmit_tm_rsp() 5090 * for LOGO_ACK & sess delete 5091 */ 5092 BUG_ON(!sess); 5093 res = 0; 5094 } else { 5095 /* cmd did not go to upper layer. */ 5096 if (sess) { 5097 qlt_schedule_sess_for_deletion(sess); 5098 res = 0; 5099 } 5100 /* else logo will be ack */ 5101 } 5102 break; 5103 case ELS_PDISC: 5104 case ELS_ADISC: 5105 { 5106 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; 5107 5108 if (tgt->link_reinit_iocb_pending) { 5109 qlt_send_notify_ack(ha->base_qpair, 5110 &tgt->link_reinit_iocb, 0, 0, 0, 0, 0, 0); 5111 tgt->link_reinit_iocb_pending = 0; 5112 } 5113 5114 sess = qla2x00_find_fcport_by_wwpn(vha, 5115 iocb->u.isp24.port_name, 1); 5116 if (sess) { 5117 ql_dbg(ql_dbg_disc, vha, 0x20fd, 5118 "sess %p lid %d|%d DS %d LS %d\n", 5119 sess, sess->loop_id, loop_id, 5120 sess->disc_state, sess->fw_login_state); 5121 } 5122 5123 res = 1; /* send notify ack */ 5124 break; 5125 } 5126 5127 case ELS_FLOGI: /* should never happen */ 5128 default: 5129 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf061, 5130 "qla_target(%d): Unsupported ELS command %x " 5131 "received\n", vha->vp_idx, iocb->u.isp24.status_subcode); 5132 res = qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS_SESS); 5133 break; 5134 } 5135 5136 ql_dbg(ql_dbg_disc, vha, 0xf026, 5137 "qla_target(%d): Exit ELS opcode: 0x%02x res %d\n", 5138 vha->vp_idx, iocb->u.isp24.status_subcode, res); 5139 5140 return res; 5141 } 5142 5143 /* 5144 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire 5145 */ 5146 static void qlt_handle_imm_notify(struct scsi_qla_host *vha, 5147 struct imm_ntfy_from_isp *iocb) 5148 { 5149 struct qla_hw_data *ha = vha->hw; 5150 uint32_t add_flags = 0; 5151 int send_notify_ack = 1; 5152 uint16_t status; 5153 5154 lockdep_assert_held(&ha->hardware_lock); 5155 5156 status = le16_to_cpu(iocb->u.isp2x.status); 5157 switch (status) { 5158 case IMM_NTFY_LIP_RESET: 5159 { 5160 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf032, 5161 "qla_target(%d): LIP reset (loop %#x), subcode %x\n", 5162 vha->vp_idx, le16_to_cpu(iocb->u.isp24.nport_handle), 5163 iocb->u.isp24.status_subcode); 5164 5165 if (qlt_reset(vha, iocb, QLA_TGT_ABORT_ALL) == 0) 5166 send_notify_ack = 0; 5167 break; 5168 } 5169 5170 case IMM_NTFY_LIP_LINK_REINIT: 5171 { 5172 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; 5173 5174 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf033, 5175 "qla_target(%d): LINK REINIT (loop %#x, " 5176 "subcode %x)\n", vha->vp_idx, 5177 le16_to_cpu(iocb->u.isp24.nport_handle), 5178 iocb->u.isp24.status_subcode); 5179 if (tgt->link_reinit_iocb_pending) { 5180 qlt_send_notify_ack(ha->base_qpair, 5181 &tgt->link_reinit_iocb, 0, 0, 0, 0, 0, 0); 5182 } 5183 memcpy(&tgt->link_reinit_iocb, iocb, sizeof(*iocb)); 5184 tgt->link_reinit_iocb_pending = 1; 5185 /* 5186 * QLogic requires to wait after LINK REINIT for possible 5187 * PDISC or ADISC ELS commands 5188 */ 5189 send_notify_ack = 0; 5190 break; 5191 } 5192 5193 case IMM_NTFY_PORT_LOGOUT: 5194 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf034, 5195 "qla_target(%d): Port logout (loop " 5196 "%#x, subcode %x)\n", vha->vp_idx, 5197 le16_to_cpu(iocb->u.isp24.nport_handle), 5198 iocb->u.isp24.status_subcode); 5199 5200 if (qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS_SESS) == 0) 5201 send_notify_ack = 0; 5202 /* The sessions will be cleared in the callback, if needed */ 5203 break; 5204 5205 case IMM_NTFY_GLBL_TPRLO: 5206 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf035, 5207 "qla_target(%d): Global TPRLO (%x)\n", vha->vp_idx, status); 5208 if (qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS) == 0) 5209 send_notify_ack = 0; 5210 /* The sessions will be cleared in the callback, if needed */ 5211 break; 5212 5213 case IMM_NTFY_PORT_CONFIG: 5214 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf036, 5215 "qla_target(%d): Port config changed (%x)\n", vha->vp_idx, 5216 status); 5217 if (qlt_reset(vha, iocb, QLA_TGT_ABORT_ALL) == 0) 5218 send_notify_ack = 0; 5219 /* The sessions will be cleared in the callback, if needed */ 5220 break; 5221 5222 case IMM_NTFY_GLBL_LOGO: 5223 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06a, 5224 "qla_target(%d): Link failure detected\n", 5225 vha->vp_idx); 5226 /* I_T nexus loss */ 5227 if (qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS) == 0) 5228 send_notify_ack = 0; 5229 break; 5230 5231 case IMM_NTFY_IOCB_OVERFLOW: 5232 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06b, 5233 "qla_target(%d): Cannot provide requested " 5234 "capability (IOCB overflowed the immediate notify " 5235 "resource count)\n", vha->vp_idx); 5236 break; 5237 5238 case IMM_NTFY_ABORT_TASK: 5239 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf037, 5240 "qla_target(%d): Abort Task (S %08x I %#x -> " 5241 "L %#x)\n", vha->vp_idx, 5242 le16_to_cpu(iocb->u.isp2x.seq_id), 5243 GET_TARGET_ID(ha, (struct atio_from_isp *)iocb), 5244 le16_to_cpu(iocb->u.isp2x.lun)); 5245 if (qlt_abort_task(vha, iocb) == 0) 5246 send_notify_ack = 0; 5247 break; 5248 5249 case IMM_NTFY_RESOURCE: 5250 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06c, 5251 "qla_target(%d): Out of resources, host %ld\n", 5252 vha->vp_idx, vha->host_no); 5253 break; 5254 5255 case IMM_NTFY_MSG_RX: 5256 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf038, 5257 "qla_target(%d): Immediate notify task %x\n", 5258 vha->vp_idx, iocb->u.isp2x.task_flags); 5259 break; 5260 5261 case IMM_NTFY_ELS: 5262 if (qlt_24xx_handle_els(vha, iocb) == 0) 5263 send_notify_ack = 0; 5264 break; 5265 default: 5266 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06d, 5267 "qla_target(%d): Received unknown immediate " 5268 "notify status %x\n", vha->vp_idx, status); 5269 break; 5270 } 5271 5272 if (send_notify_ack) 5273 qlt_send_notify_ack(ha->base_qpair, iocb, add_flags, 0, 0, 0, 5274 0, 0); 5275 } 5276 5277 /* 5278 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire 5279 * This function sends busy to ISP 2xxx or 24xx. 5280 */ 5281 static int __qlt_send_busy(struct qla_qpair *qpair, 5282 struct atio_from_isp *atio, uint16_t status) 5283 { 5284 struct scsi_qla_host *vha = qpair->vha; 5285 struct ctio7_to_24xx *ctio24; 5286 struct qla_hw_data *ha = vha->hw; 5287 request_t *pkt; 5288 struct fc_port *sess = NULL; 5289 unsigned long flags; 5290 u16 temp; 5291 port_id_t id; 5292 5293 id = be_to_port_id(atio->u.isp24.fcp_hdr.s_id); 5294 5295 spin_lock_irqsave(&ha->tgt.sess_lock, flags); 5296 sess = qla2x00_find_fcport_by_nportid(vha, &id, 1); 5297 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); 5298 if (!sess) { 5299 qlt_send_term_exchange(qpair, NULL, atio, 1, 0); 5300 return 0; 5301 } 5302 /* Sending marker isn't necessary, since we called from ISR */ 5303 5304 pkt = (request_t *)__qla2x00_alloc_iocbs(qpair, NULL); 5305 if (!pkt) { 5306 ql_dbg(ql_dbg_io, vha, 0x3063, 5307 "qla_target(%d): %s failed: unable to allocate " 5308 "request packet", vha->vp_idx, __func__); 5309 return -ENOMEM; 5310 } 5311 5312 qpair->tgt_counters.num_q_full_sent++; 5313 pkt->entry_count = 1; 5314 pkt->handle = QLA_TGT_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK; 5315 5316 ctio24 = (struct ctio7_to_24xx *)pkt; 5317 ctio24->entry_type = CTIO_TYPE7; 5318 ctio24->nport_handle = cpu_to_le16(sess->loop_id); 5319 ctio24->timeout = cpu_to_le16(QLA_TGT_TIMEOUT); 5320 ctio24->vp_index = vha->vp_idx; 5321 ctio24->initiator_id = be_id_to_le(atio->u.isp24.fcp_hdr.s_id); 5322 ctio24->exchange_addr = atio->u.isp24.exchange_addr; 5323 temp = (atio->u.isp24.attr << 9) | 5324 CTIO7_FLAGS_STATUS_MODE_1 | CTIO7_FLAGS_SEND_STATUS | 5325 CTIO7_FLAGS_DONT_RET_CTIO; 5326 ctio24->u.status1.flags = cpu_to_le16(temp); 5327 /* 5328 * CTIO from fw w/o se_cmd doesn't provide enough info to retry it, 5329 * if the explicit conformation is used. 5330 */ 5331 ctio24->u.status1.ox_id = 5332 cpu_to_le16(be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id)); 5333 ctio24->u.status1.scsi_status = cpu_to_le16(status); 5334 5335 ctio24->u.status1.residual = cpu_to_le32(get_datalen_for_atio(atio)); 5336 5337 if (ctio24->u.status1.residual != 0) 5338 ctio24->u.status1.scsi_status |= cpu_to_le16(SS_RESIDUAL_UNDER); 5339 5340 /* Memory Barrier */ 5341 wmb(); 5342 if (qpair->reqq_start_iocbs) 5343 qpair->reqq_start_iocbs(qpair); 5344 else 5345 qla2x00_start_iocbs(vha, qpair->req); 5346 return 0; 5347 } 5348 5349 /* 5350 * This routine is used to allocate a command for either a QFull condition 5351 * (ie reply SAM_STAT_BUSY) or to terminate an exchange that did not go 5352 * out previously. 5353 */ 5354 static void 5355 qlt_alloc_qfull_cmd(struct scsi_qla_host *vha, 5356 struct atio_from_isp *atio, uint16_t status, int qfull) 5357 { 5358 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; 5359 struct qla_hw_data *ha = vha->hw; 5360 struct fc_port *sess; 5361 struct qla_tgt_cmd *cmd; 5362 unsigned long flags; 5363 5364 if (unlikely(tgt->tgt_stop)) { 5365 ql_dbg(ql_dbg_io, vha, 0x300a, 5366 "New command while device %p is shutting down\n", tgt); 5367 return; 5368 } 5369 5370 if ((vha->hw->tgt.num_qfull_cmds_alloc + 1) > MAX_QFULL_CMDS_ALLOC) { 5371 vha->hw->tgt.num_qfull_cmds_dropped++; 5372 if (vha->hw->tgt.num_qfull_cmds_dropped > 5373 vha->qla_stats.stat_max_qfull_cmds_dropped) 5374 vha->qla_stats.stat_max_qfull_cmds_dropped = 5375 vha->hw->tgt.num_qfull_cmds_dropped; 5376 5377 ql_dbg(ql_dbg_io, vha, 0x3068, 5378 "qla_target(%d): %s: QFull CMD dropped[%d]\n", 5379 vha->vp_idx, __func__, 5380 vha->hw->tgt.num_qfull_cmds_dropped); 5381 5382 qlt_chk_exch_leak_thresh_hold(vha); 5383 return; 5384 } 5385 5386 sess = ha->tgt.tgt_ops->find_sess_by_s_id 5387 (vha, atio->u.isp24.fcp_hdr.s_id); 5388 if (!sess) 5389 return; 5390 5391 cmd = ha->tgt.tgt_ops->get_cmd(sess); 5392 if (!cmd) { 5393 ql_dbg(ql_dbg_io, vha, 0x3009, 5394 "qla_target(%d): %s: Allocation of cmd failed\n", 5395 vha->vp_idx, __func__); 5396 5397 vha->hw->tgt.num_qfull_cmds_dropped++; 5398 if (vha->hw->tgt.num_qfull_cmds_dropped > 5399 vha->qla_stats.stat_max_qfull_cmds_dropped) 5400 vha->qla_stats.stat_max_qfull_cmds_dropped = 5401 vha->hw->tgt.num_qfull_cmds_dropped; 5402 5403 qlt_chk_exch_leak_thresh_hold(vha); 5404 return; 5405 } 5406 5407 qlt_incr_num_pend_cmds(vha); 5408 INIT_LIST_HEAD(&cmd->cmd_list); 5409 memcpy(&cmd->atio, atio, sizeof(*atio)); 5410 5411 cmd->tgt = vha->vha_tgt.qla_tgt; 5412 cmd->vha = vha; 5413 cmd->reset_count = ha->base_qpair->chip_reset; 5414 cmd->q_full = 1; 5415 cmd->qpair = ha->base_qpair; 5416 5417 if (qfull) { 5418 cmd->q_full = 1; 5419 /* NOTE: borrowing the state field to carry the status */ 5420 cmd->state = status; 5421 } else 5422 cmd->term_exchg = 1; 5423 5424 spin_lock_irqsave(&vha->hw->tgt.q_full_lock, flags); 5425 list_add_tail(&cmd->cmd_list, &vha->hw->tgt.q_full_list); 5426 5427 vha->hw->tgt.num_qfull_cmds_alloc++; 5428 if (vha->hw->tgt.num_qfull_cmds_alloc > 5429 vha->qla_stats.stat_max_qfull_cmds_alloc) 5430 vha->qla_stats.stat_max_qfull_cmds_alloc = 5431 vha->hw->tgt.num_qfull_cmds_alloc; 5432 spin_unlock_irqrestore(&vha->hw->tgt.q_full_lock, flags); 5433 } 5434 5435 int 5436 qlt_free_qfull_cmds(struct qla_qpair *qpair) 5437 { 5438 struct scsi_qla_host *vha = qpair->vha; 5439 struct qla_hw_data *ha = vha->hw; 5440 unsigned long flags; 5441 struct qla_tgt_cmd *cmd, *tcmd; 5442 struct list_head free_list, q_full_list; 5443 int rc = 0; 5444 5445 if (list_empty(&ha->tgt.q_full_list)) 5446 return 0; 5447 5448 INIT_LIST_HEAD(&free_list); 5449 INIT_LIST_HEAD(&q_full_list); 5450 5451 spin_lock_irqsave(&vha->hw->tgt.q_full_lock, flags); 5452 if (list_empty(&ha->tgt.q_full_list)) { 5453 spin_unlock_irqrestore(&vha->hw->tgt.q_full_lock, flags); 5454 return 0; 5455 } 5456 5457 list_splice_init(&vha->hw->tgt.q_full_list, &q_full_list); 5458 spin_unlock_irqrestore(&vha->hw->tgt.q_full_lock, flags); 5459 5460 spin_lock_irqsave(qpair->qp_lock_ptr, flags); 5461 list_for_each_entry_safe(cmd, tcmd, &q_full_list, cmd_list) { 5462 if (cmd->q_full) 5463 /* cmd->state is a borrowed field to hold status */ 5464 rc = __qlt_send_busy(qpair, &cmd->atio, cmd->state); 5465 else if (cmd->term_exchg) 5466 rc = __qlt_send_term_exchange(qpair, NULL, &cmd->atio); 5467 5468 if (rc == -ENOMEM) 5469 break; 5470 5471 if (cmd->q_full) 5472 ql_dbg(ql_dbg_io, vha, 0x3006, 5473 "%s: busy sent for ox_id[%04x]\n", __func__, 5474 be16_to_cpu(cmd->atio.u.isp24.fcp_hdr.ox_id)); 5475 else if (cmd->term_exchg) 5476 ql_dbg(ql_dbg_io, vha, 0x3007, 5477 "%s: Term exchg sent for ox_id[%04x]\n", __func__, 5478 be16_to_cpu(cmd->atio.u.isp24.fcp_hdr.ox_id)); 5479 else 5480 ql_dbg(ql_dbg_io, vha, 0x3008, 5481 "%s: Unexpected cmd in QFull list %p\n", __func__, 5482 cmd); 5483 5484 list_move_tail(&cmd->cmd_list, &free_list); 5485 5486 /* piggy back on hardware_lock for protection */ 5487 vha->hw->tgt.num_qfull_cmds_alloc--; 5488 } 5489 spin_unlock_irqrestore(qpair->qp_lock_ptr, flags); 5490 5491 cmd = NULL; 5492 5493 list_for_each_entry_safe(cmd, tcmd, &free_list, cmd_list) { 5494 list_del(&cmd->cmd_list); 5495 /* This cmd was never sent to TCM. There is no need 5496 * to schedule free or call free_cmd 5497 */ 5498 qlt_free_cmd(cmd); 5499 } 5500 5501 if (!list_empty(&q_full_list)) { 5502 spin_lock_irqsave(&vha->hw->tgt.q_full_lock, flags); 5503 list_splice(&q_full_list, &vha->hw->tgt.q_full_list); 5504 spin_unlock_irqrestore(&vha->hw->tgt.q_full_lock, flags); 5505 } 5506 5507 return rc; 5508 } 5509 5510 static void 5511 qlt_send_busy(struct qla_qpair *qpair, struct atio_from_isp *atio, 5512 uint16_t status) 5513 { 5514 int rc = 0; 5515 struct scsi_qla_host *vha = qpair->vha; 5516 5517 rc = __qlt_send_busy(qpair, atio, status); 5518 if (rc == -ENOMEM) 5519 qlt_alloc_qfull_cmd(vha, atio, status, 1); 5520 } 5521 5522 static int 5523 qlt_chk_qfull_thresh_hold(struct scsi_qla_host *vha, struct qla_qpair *qpair, 5524 struct atio_from_isp *atio, uint8_t ha_locked) 5525 { 5526 struct qla_hw_data *ha = vha->hw; 5527 unsigned long flags; 5528 5529 if (ha->tgt.num_pend_cmds < Q_FULL_THRESH_HOLD(ha)) 5530 return 0; 5531 5532 if (!ha_locked) 5533 spin_lock_irqsave(&ha->hardware_lock, flags); 5534 qlt_send_busy(qpair, atio, qla_sam_status); 5535 if (!ha_locked) 5536 spin_unlock_irqrestore(&ha->hardware_lock, flags); 5537 5538 return 1; 5539 } 5540 5541 /* ha->hardware_lock supposed to be held on entry */ 5542 /* called via callback from qla2xxx */ 5543 static void qlt_24xx_atio_pkt(struct scsi_qla_host *vha, 5544 struct atio_from_isp *atio, uint8_t ha_locked) 5545 { 5546 struct qla_hw_data *ha = vha->hw; 5547 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; 5548 int rc; 5549 unsigned long flags = 0; 5550 5551 if (unlikely(tgt == NULL)) { 5552 ql_dbg(ql_dbg_tgt, vha, 0x3064, 5553 "ATIO pkt, but no tgt (ha %p)", ha); 5554 return; 5555 } 5556 /* 5557 * In tgt_stop mode we also should allow all requests to pass. 5558 * Otherwise, some commands can stuck. 5559 */ 5560 5561 tgt->atio_irq_cmd_count++; 5562 5563 switch (atio->u.raw.entry_type) { 5564 case ATIO_TYPE7: 5565 if (unlikely(atio->u.isp24.exchange_addr == 5566 cpu_to_le32(ATIO_EXCHANGE_ADDRESS_UNKNOWN))) { 5567 ql_dbg(ql_dbg_io, vha, 0x3065, 5568 "qla_target(%d): ATIO_TYPE7 " 5569 "received with UNKNOWN exchange address, " 5570 "sending QUEUE_FULL\n", vha->vp_idx); 5571 if (!ha_locked) 5572 spin_lock_irqsave(&ha->hardware_lock, flags); 5573 qlt_send_busy(ha->base_qpair, atio, qla_sam_status); 5574 if (!ha_locked) 5575 spin_unlock_irqrestore(&ha->hardware_lock, 5576 flags); 5577 break; 5578 } 5579 5580 if (likely(atio->u.isp24.fcp_cmnd.task_mgmt_flags == 0)) { 5581 rc = qlt_chk_qfull_thresh_hold(vha, ha->base_qpair, 5582 atio, ha_locked); 5583 if (rc != 0) { 5584 tgt->atio_irq_cmd_count--; 5585 return; 5586 } 5587 rc = qlt_handle_cmd_for_atio(vha, atio); 5588 } else { 5589 rc = qlt_handle_task_mgmt(vha, atio); 5590 } 5591 if (unlikely(rc != 0)) { 5592 if (!ha_locked) 5593 spin_lock_irqsave(&ha->hardware_lock, flags); 5594 switch (rc) { 5595 case -ENODEV: 5596 ql_dbg(ql_dbg_tgt, vha, 0xe05f, 5597 "qla_target: Unable to send command to target\n"); 5598 break; 5599 case -EBADF: 5600 ql_dbg(ql_dbg_tgt, vha, 0xe05f, 5601 "qla_target: Unable to send command to target, sending TERM EXCHANGE for rsp\n"); 5602 qlt_send_term_exchange(ha->base_qpair, NULL, 5603 atio, 1, 0); 5604 break; 5605 case -EBUSY: 5606 ql_dbg(ql_dbg_tgt, vha, 0xe060, 5607 "qla_target(%d): Unable to send command to target, sending BUSY status\n", 5608 vha->vp_idx); 5609 qlt_send_busy(ha->base_qpair, atio, 5610 tc_sam_status); 5611 break; 5612 default: 5613 ql_dbg(ql_dbg_tgt, vha, 0xe060, 5614 "qla_target(%d): Unable to send command to target, sending BUSY status\n", 5615 vha->vp_idx); 5616 qlt_send_busy(ha->base_qpair, atio, 5617 qla_sam_status); 5618 break; 5619 } 5620 if (!ha_locked) 5621 spin_unlock_irqrestore(&ha->hardware_lock, 5622 flags); 5623 } 5624 break; 5625 5626 case IMMED_NOTIFY_TYPE: 5627 { 5628 if (unlikely(atio->u.isp2x.entry_status != 0)) { 5629 ql_dbg(ql_dbg_tgt, vha, 0xe05b, 5630 "qla_target(%d): Received ATIO packet %x " 5631 "with error status %x\n", vha->vp_idx, 5632 atio->u.raw.entry_type, 5633 atio->u.isp2x.entry_status); 5634 break; 5635 } 5636 ql_dbg(ql_dbg_tgt, vha, 0xe02e, "%s", "IMMED_NOTIFY ATIO"); 5637 5638 if (!ha_locked) 5639 spin_lock_irqsave(&ha->hardware_lock, flags); 5640 qlt_handle_imm_notify(vha, (struct imm_ntfy_from_isp *)atio); 5641 if (!ha_locked) 5642 spin_unlock_irqrestore(&ha->hardware_lock, flags); 5643 break; 5644 } 5645 5646 default: 5647 ql_dbg(ql_dbg_tgt, vha, 0xe05c, 5648 "qla_target(%d): Received unknown ATIO atio " 5649 "type %x\n", vha->vp_idx, atio->u.raw.entry_type); 5650 break; 5651 } 5652 5653 tgt->atio_irq_cmd_count--; 5654 } 5655 5656 /* 5657 * qpair lock is assume to be held 5658 * rc = 0 : send terminate & abts respond 5659 * rc != 0: do not send term & abts respond 5660 */ 5661 static int qlt_chk_unresolv_exchg(struct scsi_qla_host *vha, 5662 struct qla_qpair *qpair, struct abts_resp_from_24xx_fw *entry) 5663 { 5664 struct qla_hw_data *ha = vha->hw; 5665 int rc = 0; 5666 5667 /* 5668 * Detect unresolved exchange. If the same ABTS is unable 5669 * to terminate an existing command and the same ABTS loops 5670 * between FW & Driver, then force FW dump. Under 1 jiff, 5671 * we should see multiple loops. 5672 */ 5673 if (qpair->retry_term_exchg_addr == entry->exchange_addr_to_abort && 5674 qpair->retry_term_jiff == jiffies) { 5675 /* found existing exchange */ 5676 qpair->retry_term_cnt++; 5677 if (qpair->retry_term_cnt >= 5) { 5678 rc = -EIO; 5679 qpair->retry_term_cnt = 0; 5680 ql_log(ql_log_warn, vha, 0xffff, 5681 "Unable to send ABTS Respond. Dumping firmware.\n"); 5682 ql_dump_buffer(ql_dbg_tgt_mgt + ql_dbg_buffer, 5683 vha, 0xffff, (uint8_t *)entry, sizeof(*entry)); 5684 5685 if (qpair == ha->base_qpair) 5686 ha->isp_ops->fw_dump(vha); 5687 else 5688 qla2xxx_dump_fw(vha); 5689 5690 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 5691 qla2xxx_wake_dpc(vha); 5692 } 5693 } else if (qpair->retry_term_jiff != jiffies) { 5694 qpair->retry_term_exchg_addr = entry->exchange_addr_to_abort; 5695 qpair->retry_term_cnt = 0; 5696 qpair->retry_term_jiff = jiffies; 5697 } 5698 5699 return rc; 5700 } 5701 5702 5703 static void qlt_handle_abts_completion(struct scsi_qla_host *vha, 5704 struct rsp_que *rsp, response_t *pkt) 5705 { 5706 struct abts_resp_from_24xx_fw *entry = 5707 (struct abts_resp_from_24xx_fw *)pkt; 5708 u32 h = pkt->handle & ~QLA_TGT_HANDLE_MASK; 5709 struct qla_tgt_mgmt_cmd *mcmd; 5710 struct qla_hw_data *ha = vha->hw; 5711 5712 mcmd = qlt_ctio_to_cmd(vha, rsp, pkt->handle, pkt); 5713 if (mcmd == NULL && h != QLA_TGT_SKIP_HANDLE) { 5714 ql_dbg(ql_dbg_async, vha, 0xe064, 5715 "qla_target(%d): ABTS Comp without mcmd\n", 5716 vha->vp_idx); 5717 return; 5718 } 5719 5720 if (mcmd) 5721 vha = mcmd->vha; 5722 vha->vha_tgt.qla_tgt->abts_resp_expected--; 5723 5724 ql_dbg(ql_dbg_tgt, vha, 0xe038, 5725 "ABTS_RESP_24XX: compl_status %x\n", 5726 entry->compl_status); 5727 5728 if (le16_to_cpu(entry->compl_status) != ABTS_RESP_COMPL_SUCCESS) { 5729 if (le32_to_cpu(entry->error_subcode1) == 0x1E && 5730 le32_to_cpu(entry->error_subcode2) == 0) { 5731 if (qlt_chk_unresolv_exchg(vha, rsp->qpair, entry)) { 5732 ha->tgt.tgt_ops->free_mcmd(mcmd); 5733 return; 5734 } 5735 qlt_24xx_retry_term_exchange(vha, rsp->qpair, 5736 pkt, mcmd); 5737 } else { 5738 ql_dbg(ql_dbg_tgt, vha, 0xe063, 5739 "qla_target(%d): ABTS_RESP_24XX failed %x (subcode %x:%x)", 5740 vha->vp_idx, entry->compl_status, 5741 entry->error_subcode1, 5742 entry->error_subcode2); 5743 ha->tgt.tgt_ops->free_mcmd(mcmd); 5744 } 5745 } else if (mcmd) { 5746 ha->tgt.tgt_ops->free_mcmd(mcmd); 5747 } 5748 } 5749 5750 /* ha->hardware_lock supposed to be held on entry */ 5751 /* called via callback from qla2xxx */ 5752 static void qlt_response_pkt(struct scsi_qla_host *vha, 5753 struct rsp_que *rsp, response_t *pkt) 5754 { 5755 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; 5756 5757 if (unlikely(tgt == NULL)) { 5758 ql_dbg(ql_dbg_tgt, vha, 0xe05d, 5759 "qla_target(%d): Response pkt %x received, but no tgt (ha %p)\n", 5760 vha->vp_idx, pkt->entry_type, vha->hw); 5761 return; 5762 } 5763 5764 /* 5765 * In tgt_stop mode we also should allow all requests to pass. 5766 * Otherwise, some commands can stuck. 5767 */ 5768 5769 switch (pkt->entry_type) { 5770 case CTIO_CRC2: 5771 case CTIO_TYPE7: 5772 { 5773 struct ctio7_from_24xx *entry = (struct ctio7_from_24xx *)pkt; 5774 5775 qlt_do_ctio_completion(vha, rsp, entry->handle, 5776 le16_to_cpu(entry->status)|(pkt->entry_status << 16), 5777 entry); 5778 break; 5779 } 5780 5781 case ACCEPT_TGT_IO_TYPE: 5782 { 5783 struct atio_from_isp *atio = (struct atio_from_isp *)pkt; 5784 int rc; 5785 5786 if (atio->u.isp2x.status != 5787 cpu_to_le16(ATIO_CDB_VALID)) { 5788 ql_dbg(ql_dbg_tgt, vha, 0xe05e, 5789 "qla_target(%d): ATIO with error " 5790 "status %x received\n", vha->vp_idx, 5791 le16_to_cpu(atio->u.isp2x.status)); 5792 break; 5793 } 5794 5795 rc = qlt_chk_qfull_thresh_hold(vha, rsp->qpair, atio, 1); 5796 if (rc != 0) 5797 return; 5798 5799 rc = qlt_handle_cmd_for_atio(vha, atio); 5800 if (unlikely(rc != 0)) { 5801 switch (rc) { 5802 case -ENODEV: 5803 ql_dbg(ql_dbg_tgt, vha, 0xe05f, 5804 "qla_target: Unable to send command to target\n"); 5805 break; 5806 case -EBADF: 5807 ql_dbg(ql_dbg_tgt, vha, 0xe05f, 5808 "qla_target: Unable to send command to target, sending TERM EXCHANGE for rsp\n"); 5809 qlt_send_term_exchange(rsp->qpair, NULL, 5810 atio, 1, 0); 5811 break; 5812 case -EBUSY: 5813 ql_dbg(ql_dbg_tgt, vha, 0xe060, 5814 "qla_target(%d): Unable to send command to target, sending BUSY status\n", 5815 vha->vp_idx); 5816 qlt_send_busy(rsp->qpair, atio, 5817 tc_sam_status); 5818 break; 5819 default: 5820 ql_dbg(ql_dbg_tgt, vha, 0xe060, 5821 "qla_target(%d): Unable to send command to target, sending BUSY status\n", 5822 vha->vp_idx); 5823 qlt_send_busy(rsp->qpair, atio, 5824 qla_sam_status); 5825 break; 5826 } 5827 } 5828 } 5829 break; 5830 5831 case CONTINUE_TGT_IO_TYPE: 5832 { 5833 struct ctio_to_2xxx *entry = (struct ctio_to_2xxx *)pkt; 5834 5835 qlt_do_ctio_completion(vha, rsp, entry->handle, 5836 le16_to_cpu(entry->status)|(pkt->entry_status << 16), 5837 entry); 5838 break; 5839 } 5840 5841 case CTIO_A64_TYPE: 5842 { 5843 struct ctio_to_2xxx *entry = (struct ctio_to_2xxx *)pkt; 5844 5845 qlt_do_ctio_completion(vha, rsp, entry->handle, 5846 le16_to_cpu(entry->status)|(pkt->entry_status << 16), 5847 entry); 5848 break; 5849 } 5850 5851 case IMMED_NOTIFY_TYPE: 5852 ql_dbg(ql_dbg_tgt, vha, 0xe035, "%s", "IMMED_NOTIFY\n"); 5853 qlt_handle_imm_notify(vha, (struct imm_ntfy_from_isp *)pkt); 5854 break; 5855 5856 case NOTIFY_ACK_TYPE: 5857 if (tgt->notify_ack_expected > 0) { 5858 struct nack_to_isp *entry = (struct nack_to_isp *)pkt; 5859 5860 ql_dbg(ql_dbg_tgt, vha, 0xe036, 5861 "NOTIFY_ACK seq %08x status %x\n", 5862 le16_to_cpu(entry->u.isp2x.seq_id), 5863 le16_to_cpu(entry->u.isp2x.status)); 5864 tgt->notify_ack_expected--; 5865 if (entry->u.isp2x.status != 5866 cpu_to_le16(NOTIFY_ACK_SUCCESS)) { 5867 ql_dbg(ql_dbg_tgt, vha, 0xe061, 5868 "qla_target(%d): NOTIFY_ACK " 5869 "failed %x\n", vha->vp_idx, 5870 le16_to_cpu(entry->u.isp2x.status)); 5871 } 5872 } else { 5873 ql_dbg(ql_dbg_tgt, vha, 0xe062, 5874 "qla_target(%d): Unexpected NOTIFY_ACK received\n", 5875 vha->vp_idx); 5876 } 5877 break; 5878 5879 case ABTS_RECV_24XX: 5880 ql_dbg(ql_dbg_tgt, vha, 0xe037, 5881 "ABTS_RECV_24XX: instance %d\n", vha->vp_idx); 5882 qlt_24xx_handle_abts(vha, (struct abts_recv_from_24xx *)pkt); 5883 break; 5884 5885 case ABTS_RESP_24XX: 5886 if (tgt->abts_resp_expected > 0) { 5887 qlt_handle_abts_completion(vha, rsp, pkt); 5888 } else { 5889 ql_dbg(ql_dbg_tgt, vha, 0xe064, 5890 "qla_target(%d): Unexpected ABTS_RESP_24XX " 5891 "received\n", vha->vp_idx); 5892 } 5893 break; 5894 5895 default: 5896 ql_dbg(ql_dbg_tgt, vha, 0xe065, 5897 "qla_target(%d): Received unknown response pkt " 5898 "type %x\n", vha->vp_idx, pkt->entry_type); 5899 break; 5900 } 5901 5902 } 5903 5904 /* 5905 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire 5906 */ 5907 void qlt_async_event(uint16_t code, struct scsi_qla_host *vha, 5908 uint16_t *mailbox) 5909 { 5910 struct qla_hw_data *ha = vha->hw; 5911 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; 5912 int login_code; 5913 5914 if (!tgt || tgt->tgt_stop || tgt->tgt_stopped) 5915 return; 5916 5917 if (((code == MBA_POINT_TO_POINT) || (code == MBA_CHG_IN_CONNECTION)) && 5918 IS_QLA2100(ha)) 5919 return; 5920 /* 5921 * In tgt_stop mode we also should allow all requests to pass. 5922 * Otherwise, some commands can stuck. 5923 */ 5924 5925 5926 switch (code) { 5927 case MBA_RESET: /* Reset */ 5928 case MBA_SYSTEM_ERR: /* System Error */ 5929 case MBA_REQ_TRANSFER_ERR: /* Request Transfer Error */ 5930 case MBA_RSP_TRANSFER_ERR: /* Response Transfer Error */ 5931 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03a, 5932 "qla_target(%d): System error async event %#x " 5933 "occurred", vha->vp_idx, code); 5934 break; 5935 case MBA_WAKEUP_THRES: /* Request Queue Wake-up. */ 5936 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 5937 break; 5938 5939 case MBA_LOOP_UP: 5940 { 5941 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03b, 5942 "qla_target(%d): Async LOOP_UP occurred " 5943 "(m[0]=%x, m[1]=%x, m[2]=%x, m[3]=%x)", vha->vp_idx, 5944 mailbox[0], mailbox[1], mailbox[2], mailbox[3]); 5945 if (tgt->link_reinit_iocb_pending) { 5946 qlt_send_notify_ack(ha->base_qpair, 5947 &tgt->link_reinit_iocb, 5948 0, 0, 0, 0, 0, 0); 5949 tgt->link_reinit_iocb_pending = 0; 5950 } 5951 break; 5952 } 5953 5954 case MBA_LIP_OCCURRED: 5955 case MBA_LOOP_DOWN: 5956 case MBA_LIP_RESET: 5957 case MBA_RSCN_UPDATE: 5958 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03c, 5959 "qla_target(%d): Async event %#x occurred " 5960 "(m[0]=%x, m[1]=%x, m[2]=%x, m[3]=%x)", vha->vp_idx, code, 5961 mailbox[0], mailbox[1], mailbox[2], mailbox[3]); 5962 break; 5963 5964 case MBA_REJECTED_FCP_CMD: 5965 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf017, 5966 "qla_target(%d): Async event LS_REJECT occurred (m[0]=%x, m[1]=%x, m[2]=%x, m[3]=%x)", 5967 vha->vp_idx, 5968 mailbox[0], mailbox[1], mailbox[2], mailbox[3]); 5969 5970 if (mailbox[3] == 1) { 5971 /* exchange starvation. */ 5972 vha->hw->exch_starvation++; 5973 if (vha->hw->exch_starvation > 5) { 5974 ql_log(ql_log_warn, vha, 0xd03a, 5975 "Exchange starvation-. Resetting RISC\n"); 5976 5977 vha->hw->exch_starvation = 0; 5978 if (IS_P3P_TYPE(vha->hw)) 5979 set_bit(FCOE_CTX_RESET_NEEDED, 5980 &vha->dpc_flags); 5981 else 5982 set_bit(ISP_ABORT_NEEDED, 5983 &vha->dpc_flags); 5984 qla2xxx_wake_dpc(vha); 5985 } 5986 } 5987 break; 5988 5989 case MBA_PORT_UPDATE: 5990 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03d, 5991 "qla_target(%d): Port update async event %#x " 5992 "occurred: updating the ports database (m[0]=%x, m[1]=%x, " 5993 "m[2]=%x, m[3]=%x)", vha->vp_idx, code, 5994 mailbox[0], mailbox[1], mailbox[2], mailbox[3]); 5995 5996 login_code = mailbox[2]; 5997 if (login_code == 0x4) { 5998 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03e, 5999 "Async MB 2: Got PLOGI Complete\n"); 6000 vha->hw->exch_starvation = 0; 6001 } else if (login_code == 0x7) 6002 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03f, 6003 "Async MB 2: Port Logged Out\n"); 6004 break; 6005 default: 6006 break; 6007 } 6008 6009 } 6010 6011 static fc_port_t *qlt_get_port_database(struct scsi_qla_host *vha, 6012 uint16_t loop_id) 6013 { 6014 fc_port_t *fcport, *tfcp, *del; 6015 int rc; 6016 unsigned long flags; 6017 u8 newfcport = 0; 6018 6019 fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL); 6020 if (!fcport) { 6021 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06f, 6022 "qla_target(%d): Allocation of tmp FC port failed", 6023 vha->vp_idx); 6024 return NULL; 6025 } 6026 6027 fcport->loop_id = loop_id; 6028 6029 rc = qla24xx_gpdb_wait(vha, fcport, 0); 6030 if (rc != QLA_SUCCESS) { 6031 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf070, 6032 "qla_target(%d): Failed to retrieve fcport " 6033 "information -- get_port_database() returned %x " 6034 "(loop_id=0x%04x)", vha->vp_idx, rc, loop_id); 6035 kfree(fcport); 6036 return NULL; 6037 } 6038 6039 del = NULL; 6040 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); 6041 tfcp = qla2x00_find_fcport_by_wwpn(vha, fcport->port_name, 1); 6042 6043 if (tfcp) { 6044 tfcp->d_id = fcport->d_id; 6045 tfcp->port_type = fcport->port_type; 6046 tfcp->supported_classes = fcport->supported_classes; 6047 tfcp->flags |= fcport->flags; 6048 tfcp->scan_state = QLA_FCPORT_FOUND; 6049 6050 del = fcport; 6051 fcport = tfcp; 6052 } else { 6053 if (vha->hw->current_topology == ISP_CFG_F) 6054 fcport->flags |= FCF_FABRIC_DEVICE; 6055 6056 list_add_tail(&fcport->list, &vha->vp_fcports); 6057 if (!IS_SW_RESV_ADDR(fcport->d_id)) 6058 vha->fcport_count++; 6059 fcport->login_gen++; 6060 qla2x00_set_fcport_disc_state(fcport, DSC_LOGIN_COMPLETE); 6061 fcport->login_succ = 1; 6062 newfcport = 1; 6063 } 6064 6065 fcport->deleted = 0; 6066 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); 6067 6068 switch (vha->host->active_mode) { 6069 case MODE_INITIATOR: 6070 case MODE_DUAL: 6071 if (newfcport) { 6072 if (!IS_IIDMA_CAPABLE(vha->hw) || !vha->hw->flags.gpsc_supported) { 6073 qla24xx_sched_upd_fcport(fcport); 6074 } else { 6075 ql_dbg(ql_dbg_disc, vha, 0x20ff, 6076 "%s %d %8phC post gpsc fcp_cnt %d\n", 6077 __func__, __LINE__, fcport->port_name, vha->fcport_count); 6078 qla24xx_post_gpsc_work(vha, fcport); 6079 } 6080 } 6081 break; 6082 6083 case MODE_TARGET: 6084 default: 6085 break; 6086 } 6087 if (del) 6088 qla2x00_free_fcport(del); 6089 6090 return fcport; 6091 } 6092 6093 /* Must be called under tgt_mutex */ 6094 static struct fc_port *qlt_make_local_sess(struct scsi_qla_host *vha, 6095 be_id_t s_id) 6096 { 6097 struct fc_port *sess = NULL; 6098 fc_port_t *fcport = NULL; 6099 int rc, global_resets; 6100 uint16_t loop_id = 0; 6101 6102 if (s_id.domain == 0xFF && s_id.area == 0xFC) { 6103 /* 6104 * This is Domain Controller, so it should be 6105 * OK to drop SCSI commands from it. 6106 */ 6107 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf042, 6108 "Unable to find initiator with S_ID %x:%x:%x", 6109 s_id.domain, s_id.area, s_id.al_pa); 6110 return NULL; 6111 } 6112 6113 mutex_lock(&vha->vha_tgt.tgt_mutex); 6114 6115 retry: 6116 global_resets = 6117 atomic_read(&vha->vha_tgt.qla_tgt->tgt_global_resets_count); 6118 6119 rc = qla24xx_get_loop_id(vha, s_id, &loop_id); 6120 if (rc != 0) { 6121 mutex_unlock(&vha->vha_tgt.tgt_mutex); 6122 6123 ql_log(ql_log_info, vha, 0xf071, 6124 "qla_target(%d): Unable to find " 6125 "initiator with S_ID %x:%x:%x", 6126 vha->vp_idx, s_id.domain, s_id.area, s_id.al_pa); 6127 6128 if (rc == -ENOENT) { 6129 qlt_port_logo_t logo; 6130 6131 logo.id = be_to_port_id(s_id); 6132 logo.cmd_count = 1; 6133 qlt_send_first_logo(vha, &logo); 6134 } 6135 6136 return NULL; 6137 } 6138 6139 fcport = qlt_get_port_database(vha, loop_id); 6140 if (!fcport) { 6141 mutex_unlock(&vha->vha_tgt.tgt_mutex); 6142 return NULL; 6143 } 6144 6145 if (global_resets != 6146 atomic_read(&vha->vha_tgt.qla_tgt->tgt_global_resets_count)) { 6147 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf043, 6148 "qla_target(%d): global reset during session discovery " 6149 "(counter was %d, new %d), retrying", vha->vp_idx, 6150 global_resets, 6151 atomic_read(&vha->vha_tgt. 6152 qla_tgt->tgt_global_resets_count)); 6153 goto retry; 6154 } 6155 6156 sess = qlt_create_sess(vha, fcport, true); 6157 6158 mutex_unlock(&vha->vha_tgt.tgt_mutex); 6159 6160 return sess; 6161 } 6162 6163 static void qlt_abort_work(struct qla_tgt *tgt, 6164 struct qla_tgt_sess_work_param *prm) 6165 { 6166 struct scsi_qla_host *vha = tgt->vha; 6167 struct qla_hw_data *ha = vha->hw; 6168 struct fc_port *sess = NULL; 6169 unsigned long flags = 0, flags2 = 0; 6170 be_id_t s_id; 6171 int rc; 6172 6173 spin_lock_irqsave(&ha->tgt.sess_lock, flags2); 6174 6175 if (tgt->tgt_stop) 6176 goto out_term2; 6177 6178 s_id = le_id_to_be(prm->abts.fcp_hdr_le.s_id); 6179 6180 sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, s_id); 6181 if (!sess) { 6182 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags2); 6183 6184 sess = qlt_make_local_sess(vha, s_id); 6185 /* sess has got an extra creation ref */ 6186 6187 spin_lock_irqsave(&ha->tgt.sess_lock, flags2); 6188 if (!sess) 6189 goto out_term2; 6190 } else { 6191 if (sess->deleted) { 6192 sess = NULL; 6193 goto out_term2; 6194 } 6195 6196 if (!kref_get_unless_zero(&sess->sess_kref)) { 6197 ql_dbg(ql_dbg_tgt_tmr, vha, 0xf01c, 6198 "%s: kref_get fail %8phC \n", 6199 __func__, sess->port_name); 6200 sess = NULL; 6201 goto out_term2; 6202 } 6203 } 6204 6205 rc = __qlt_24xx_handle_abts(vha, &prm->abts, sess); 6206 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags2); 6207 6208 ha->tgt.tgt_ops->put_sess(sess); 6209 6210 if (rc != 0) 6211 goto out_term; 6212 return; 6213 6214 out_term2: 6215 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags2); 6216 6217 out_term: 6218 spin_lock_irqsave(&ha->hardware_lock, flags); 6219 qlt_24xx_send_abts_resp(ha->base_qpair, &prm->abts, 6220 FCP_TMF_REJECTED, false); 6221 spin_unlock_irqrestore(&ha->hardware_lock, flags); 6222 } 6223 6224 static void qlt_tmr_work(struct qla_tgt *tgt, 6225 struct qla_tgt_sess_work_param *prm) 6226 { 6227 struct atio_from_isp *a = &prm->tm_iocb2; 6228 struct scsi_qla_host *vha = tgt->vha; 6229 struct qla_hw_data *ha = vha->hw; 6230 struct fc_port *sess; 6231 unsigned long flags; 6232 be_id_t s_id; 6233 int rc; 6234 u64 unpacked_lun; 6235 int fn; 6236 void *iocb; 6237 6238 spin_lock_irqsave(&ha->tgt.sess_lock, flags); 6239 6240 if (tgt->tgt_stop) 6241 goto out_term2; 6242 6243 s_id = prm->tm_iocb2.u.isp24.fcp_hdr.s_id; 6244 sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, s_id); 6245 if (!sess) { 6246 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); 6247 6248 sess = qlt_make_local_sess(vha, s_id); 6249 /* sess has got an extra creation ref */ 6250 6251 spin_lock_irqsave(&ha->tgt.sess_lock, flags); 6252 if (!sess) 6253 goto out_term2; 6254 } else { 6255 if (sess->deleted) { 6256 goto out_term2; 6257 } 6258 6259 if (!kref_get_unless_zero(&sess->sess_kref)) { 6260 ql_dbg(ql_dbg_tgt_tmr, vha, 0xf020, 6261 "%s: kref_get fail %8phC\n", 6262 __func__, sess->port_name); 6263 goto out_term2; 6264 } 6265 } 6266 6267 iocb = a; 6268 fn = a->u.isp24.fcp_cmnd.task_mgmt_flags; 6269 unpacked_lun = 6270 scsilun_to_int((struct scsi_lun *)&a->u.isp24.fcp_cmnd.lun); 6271 6272 rc = qlt_issue_task_mgmt(sess, unpacked_lun, fn, iocb, 0); 6273 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); 6274 6275 ha->tgt.tgt_ops->put_sess(sess); 6276 6277 if (rc != 0) 6278 goto out_term; 6279 return; 6280 6281 out_term2: 6282 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); 6283 out_term: 6284 qlt_send_term_exchange(ha->base_qpair, NULL, &prm->tm_iocb2, 1, 0); 6285 } 6286 6287 static void qlt_sess_work_fn(struct work_struct *work) 6288 { 6289 struct qla_tgt *tgt = container_of(work, struct qla_tgt, sess_work); 6290 struct scsi_qla_host *vha = tgt->vha; 6291 unsigned long flags; 6292 6293 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf000, "Sess work (tgt %p)", tgt); 6294 6295 spin_lock_irqsave(&tgt->sess_work_lock, flags); 6296 while (!list_empty(&tgt->sess_works_list)) { 6297 struct qla_tgt_sess_work_param *prm = list_entry( 6298 tgt->sess_works_list.next, typeof(*prm), 6299 sess_works_list_entry); 6300 6301 /* 6302 * This work can be scheduled on several CPUs at time, so we 6303 * must delete the entry to eliminate double processing 6304 */ 6305 list_del(&prm->sess_works_list_entry); 6306 6307 spin_unlock_irqrestore(&tgt->sess_work_lock, flags); 6308 6309 switch (prm->type) { 6310 case QLA_TGT_SESS_WORK_ABORT: 6311 qlt_abort_work(tgt, prm); 6312 break; 6313 case QLA_TGT_SESS_WORK_TM: 6314 qlt_tmr_work(tgt, prm); 6315 break; 6316 default: 6317 BUG_ON(1); 6318 break; 6319 } 6320 6321 spin_lock_irqsave(&tgt->sess_work_lock, flags); 6322 6323 kfree(prm); 6324 } 6325 spin_unlock_irqrestore(&tgt->sess_work_lock, flags); 6326 } 6327 6328 /* Must be called under tgt_host_action_mutex */ 6329 int qlt_add_target(struct qla_hw_data *ha, struct scsi_qla_host *base_vha) 6330 { 6331 struct qla_tgt *tgt; 6332 int rc, i; 6333 struct qla_qpair_hint *h; 6334 6335 if (!QLA_TGT_MODE_ENABLED()) 6336 return 0; 6337 6338 if (!IS_TGT_MODE_CAPABLE(ha)) { 6339 ql_log(ql_log_warn, base_vha, 0xe070, 6340 "This adapter does not support target mode.\n"); 6341 return 0; 6342 } 6343 6344 ql_dbg(ql_dbg_tgt, base_vha, 0xe03b, 6345 "Registering target for host %ld(%p).\n", base_vha->host_no, ha); 6346 6347 BUG_ON(base_vha->vha_tgt.qla_tgt != NULL); 6348 6349 tgt = kzalloc(sizeof(struct qla_tgt), GFP_KERNEL); 6350 if (!tgt) { 6351 ql_dbg(ql_dbg_tgt, base_vha, 0xe066, 6352 "Unable to allocate struct qla_tgt\n"); 6353 return -ENOMEM; 6354 } 6355 6356 tgt->qphints = kcalloc(ha->max_qpairs + 1, 6357 sizeof(struct qla_qpair_hint), 6358 GFP_KERNEL); 6359 if (!tgt->qphints) { 6360 kfree(tgt); 6361 ql_log(ql_log_warn, base_vha, 0x0197, 6362 "Unable to allocate qpair hints.\n"); 6363 return -ENOMEM; 6364 } 6365 6366 if (!(base_vha->host->hostt->supported_mode & MODE_TARGET)) 6367 base_vha->host->hostt->supported_mode |= MODE_TARGET; 6368 6369 rc = btree_init64(&tgt->lun_qpair_map); 6370 if (rc) { 6371 kfree(tgt->qphints); 6372 kfree(tgt); 6373 ql_log(ql_log_info, base_vha, 0x0198, 6374 "Unable to initialize lun_qpair_map btree\n"); 6375 return -EIO; 6376 } 6377 h = &tgt->qphints[0]; 6378 h->qpair = ha->base_qpair; 6379 INIT_LIST_HEAD(&h->hint_elem); 6380 h->cpuid = ha->base_qpair->cpuid; 6381 list_add_tail(&h->hint_elem, &ha->base_qpair->hints_list); 6382 6383 for (i = 0; i < ha->max_qpairs; i++) { 6384 unsigned long flags; 6385 6386 struct qla_qpair *qpair = ha->queue_pair_map[i]; 6387 6388 h = &tgt->qphints[i + 1]; 6389 INIT_LIST_HEAD(&h->hint_elem); 6390 if (qpair) { 6391 h->qpair = qpair; 6392 spin_lock_irqsave(qpair->qp_lock_ptr, flags); 6393 list_add_tail(&h->hint_elem, &qpair->hints_list); 6394 spin_unlock_irqrestore(qpair->qp_lock_ptr, flags); 6395 h->cpuid = qpair->cpuid; 6396 } 6397 } 6398 6399 tgt->ha = ha; 6400 tgt->vha = base_vha; 6401 init_waitqueue_head(&tgt->waitQ); 6402 INIT_LIST_HEAD(&tgt->del_sess_list); 6403 spin_lock_init(&tgt->sess_work_lock); 6404 INIT_WORK(&tgt->sess_work, qlt_sess_work_fn); 6405 INIT_LIST_HEAD(&tgt->sess_works_list); 6406 atomic_set(&tgt->tgt_global_resets_count, 0); 6407 6408 base_vha->vha_tgt.qla_tgt = tgt; 6409 6410 ql_dbg(ql_dbg_tgt, base_vha, 0xe067, 6411 "qla_target(%d): using 64 Bit PCI addressing", 6412 base_vha->vp_idx); 6413 /* 3 is reserved */ 6414 tgt->sg_tablesize = QLA_TGT_MAX_SG_24XX(base_vha->req->length - 3); 6415 6416 mutex_lock(&qla_tgt_mutex); 6417 list_add_tail(&tgt->tgt_list_entry, &qla_tgt_glist); 6418 mutex_unlock(&qla_tgt_mutex); 6419 6420 if (ha->tgt.tgt_ops && ha->tgt.tgt_ops->add_target) 6421 ha->tgt.tgt_ops->add_target(base_vha); 6422 6423 return 0; 6424 } 6425 6426 /* Must be called under tgt_host_action_mutex */ 6427 int qlt_remove_target(struct qla_hw_data *ha, struct scsi_qla_host *vha) 6428 { 6429 if (!vha->vha_tgt.qla_tgt) 6430 return 0; 6431 6432 if (vha->fc_vport) { 6433 qlt_release(vha->vha_tgt.qla_tgt); 6434 return 0; 6435 } 6436 6437 /* free left over qfull cmds */ 6438 qlt_init_term_exchange(vha); 6439 6440 ql_dbg(ql_dbg_tgt, vha, 0xe03c, "Unregistering target for host %ld(%p)", 6441 vha->host_no, ha); 6442 qlt_release(vha->vha_tgt.qla_tgt); 6443 6444 return 0; 6445 } 6446 6447 void qlt_remove_target_resources(struct qla_hw_data *ha) 6448 { 6449 struct scsi_qla_host *node; 6450 u32 key = 0; 6451 6452 btree_for_each_safe32(&ha->tgt.host_map, key, node) 6453 btree_remove32(&ha->tgt.host_map, key); 6454 6455 btree_destroy32(&ha->tgt.host_map); 6456 } 6457 6458 static void qlt_lport_dump(struct scsi_qla_host *vha, u64 wwpn, 6459 unsigned char *b) 6460 { 6461 pr_debug("qla2xxx HW vha->node_name: %8phC\n", vha->node_name); 6462 pr_debug("qla2xxx HW vha->port_name: %8phC\n", vha->port_name); 6463 put_unaligned_be64(wwpn, b); 6464 pr_debug("qla2xxx passed configfs WWPN: %8phC\n", b); 6465 } 6466 6467 /** 6468 * qlt_lport_register - register lport with external module 6469 * 6470 * @target_lport_ptr: pointer for tcm_qla2xxx specific lport data 6471 * @phys_wwpn: physical port WWPN 6472 * @npiv_wwpn: NPIV WWPN 6473 * @npiv_wwnn: NPIV WWNN 6474 * @callback: lport initialization callback for tcm_qla2xxx code 6475 */ 6476 int qlt_lport_register(void *target_lport_ptr, u64 phys_wwpn, 6477 u64 npiv_wwpn, u64 npiv_wwnn, 6478 int (*callback)(struct scsi_qla_host *, void *, u64, u64)) 6479 { 6480 struct qla_tgt *tgt; 6481 struct scsi_qla_host *vha; 6482 struct qla_hw_data *ha; 6483 struct Scsi_Host *host; 6484 unsigned long flags; 6485 int rc; 6486 u8 b[WWN_SIZE]; 6487 6488 mutex_lock(&qla_tgt_mutex); 6489 list_for_each_entry(tgt, &qla_tgt_glist, tgt_list_entry) { 6490 vha = tgt->vha; 6491 ha = vha->hw; 6492 6493 host = vha->host; 6494 if (!host) 6495 continue; 6496 6497 if (!(host->hostt->supported_mode & MODE_TARGET)) 6498 continue; 6499 6500 if (vha->qlini_mode == QLA2XXX_INI_MODE_ENABLED) 6501 continue; 6502 6503 spin_lock_irqsave(&ha->hardware_lock, flags); 6504 if ((!npiv_wwpn || !npiv_wwnn) && host->active_mode & MODE_TARGET) { 6505 pr_debug("MODE_TARGET already active on qla2xxx(%d)\n", 6506 host->host_no); 6507 spin_unlock_irqrestore(&ha->hardware_lock, flags); 6508 continue; 6509 } 6510 if (tgt->tgt_stop) { 6511 pr_debug("MODE_TARGET in shutdown on qla2xxx(%d)\n", 6512 host->host_no); 6513 spin_unlock_irqrestore(&ha->hardware_lock, flags); 6514 continue; 6515 } 6516 spin_unlock_irqrestore(&ha->hardware_lock, flags); 6517 6518 if (!scsi_host_get(host)) { 6519 ql_dbg(ql_dbg_tgt, vha, 0xe068, 6520 "Unable to scsi_host_get() for" 6521 " qla2xxx scsi_host\n"); 6522 continue; 6523 } 6524 qlt_lport_dump(vha, phys_wwpn, b); 6525 6526 if (memcmp(vha->port_name, b, WWN_SIZE)) { 6527 scsi_host_put(host); 6528 continue; 6529 } 6530 rc = (*callback)(vha, target_lport_ptr, npiv_wwpn, npiv_wwnn); 6531 if (rc != 0) 6532 scsi_host_put(host); 6533 6534 mutex_unlock(&qla_tgt_mutex); 6535 return rc; 6536 } 6537 mutex_unlock(&qla_tgt_mutex); 6538 6539 return -ENODEV; 6540 } 6541 EXPORT_SYMBOL(qlt_lport_register); 6542 6543 /** 6544 * qlt_lport_deregister - Degister lport 6545 * 6546 * @vha: Registered scsi_qla_host pointer 6547 */ 6548 void qlt_lport_deregister(struct scsi_qla_host *vha) 6549 { 6550 struct qla_hw_data *ha = vha->hw; 6551 struct Scsi_Host *sh = vha->host; 6552 /* 6553 * Clear the target_lport_ptr qla_target_template pointer in qla_hw_data 6554 */ 6555 vha->vha_tgt.target_lport_ptr = NULL; 6556 ha->tgt.tgt_ops = NULL; 6557 /* 6558 * Release the Scsi_Host reference for the underlying qla2xxx host 6559 */ 6560 scsi_host_put(sh); 6561 } 6562 EXPORT_SYMBOL(qlt_lport_deregister); 6563 6564 /* Must be called under HW lock */ 6565 void qlt_set_mode(struct scsi_qla_host *vha) 6566 { 6567 switch (vha->qlini_mode) { 6568 case QLA2XXX_INI_MODE_DISABLED: 6569 case QLA2XXX_INI_MODE_EXCLUSIVE: 6570 vha->host->active_mode = MODE_TARGET; 6571 break; 6572 case QLA2XXX_INI_MODE_ENABLED: 6573 vha->host->active_mode = MODE_INITIATOR; 6574 break; 6575 case QLA2XXX_INI_MODE_DUAL: 6576 vha->host->active_mode = MODE_DUAL; 6577 break; 6578 default: 6579 break; 6580 } 6581 } 6582 6583 /* Must be called under HW lock */ 6584 static void qlt_clear_mode(struct scsi_qla_host *vha) 6585 { 6586 switch (vha->qlini_mode) { 6587 case QLA2XXX_INI_MODE_DISABLED: 6588 vha->host->active_mode = MODE_UNKNOWN; 6589 break; 6590 case QLA2XXX_INI_MODE_EXCLUSIVE: 6591 vha->host->active_mode = MODE_INITIATOR; 6592 break; 6593 case QLA2XXX_INI_MODE_ENABLED: 6594 case QLA2XXX_INI_MODE_DUAL: 6595 vha->host->active_mode = MODE_INITIATOR; 6596 break; 6597 default: 6598 break; 6599 } 6600 } 6601 6602 /* 6603 * qla_tgt_enable_vha - NO LOCK HELD 6604 * 6605 * host_reset, bring up w/ Target Mode Enabled 6606 */ 6607 void 6608 qlt_enable_vha(struct scsi_qla_host *vha) 6609 { 6610 struct qla_hw_data *ha = vha->hw; 6611 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; 6612 unsigned long flags; 6613 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev); 6614 6615 if (!tgt) { 6616 ql_dbg(ql_dbg_tgt, vha, 0xe069, 6617 "Unable to locate qla_tgt pointer from" 6618 " struct qla_hw_data\n"); 6619 dump_stack(); 6620 return; 6621 } 6622 if (vha->qlini_mode == QLA2XXX_INI_MODE_ENABLED) 6623 return; 6624 6625 if (ha->tgt.num_act_qpairs > ha->max_qpairs) 6626 ha->tgt.num_act_qpairs = ha->max_qpairs; 6627 spin_lock_irqsave(&ha->hardware_lock, flags); 6628 tgt->tgt_stopped = 0; 6629 qlt_set_mode(vha); 6630 spin_unlock_irqrestore(&ha->hardware_lock, flags); 6631 6632 mutex_lock(&ha->optrom_mutex); 6633 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf021, 6634 "%s.\n", __func__); 6635 if (vha->vp_idx) { 6636 qla24xx_disable_vp(vha); 6637 qla24xx_enable_vp(vha); 6638 } else { 6639 set_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags); 6640 qla2xxx_wake_dpc(base_vha); 6641 WARN_ON_ONCE(qla2x00_wait_for_hba_online(base_vha) != 6642 QLA_SUCCESS); 6643 } 6644 mutex_unlock(&ha->optrom_mutex); 6645 } 6646 EXPORT_SYMBOL(qlt_enable_vha); 6647 6648 /* 6649 * qla_tgt_disable_vha - NO LOCK HELD 6650 * 6651 * Disable Target Mode and reset the adapter 6652 */ 6653 static void qlt_disable_vha(struct scsi_qla_host *vha) 6654 { 6655 struct qla_hw_data *ha = vha->hw; 6656 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; 6657 unsigned long flags; 6658 6659 if (!tgt) { 6660 ql_dbg(ql_dbg_tgt, vha, 0xe06a, 6661 "Unable to locate qla_tgt pointer from" 6662 " struct qla_hw_data\n"); 6663 dump_stack(); 6664 return; 6665 } 6666 6667 spin_lock_irqsave(&ha->hardware_lock, flags); 6668 qlt_clear_mode(vha); 6669 spin_unlock_irqrestore(&ha->hardware_lock, flags); 6670 6671 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 6672 qla2xxx_wake_dpc(vha); 6673 6674 /* 6675 * We are expecting the offline state. 6676 * QLA_FUNCTION_FAILED means that adapter is offline. 6677 */ 6678 if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) 6679 ql_dbg(ql_dbg_tgt, vha, 0xe081, 6680 "adapter is offline\n"); 6681 } 6682 6683 /* 6684 * Called from qla_init.c:qla24xx_vport_create() contex to setup 6685 * the target mode specific struct scsi_qla_host and struct qla_hw_data 6686 * members. 6687 */ 6688 void 6689 qlt_vport_create(struct scsi_qla_host *vha, struct qla_hw_data *ha) 6690 { 6691 vha->vha_tgt.qla_tgt = NULL; 6692 6693 mutex_init(&vha->vha_tgt.tgt_mutex); 6694 mutex_init(&vha->vha_tgt.tgt_host_action_mutex); 6695 6696 qlt_clear_mode(vha); 6697 6698 /* 6699 * NOTE: Currently the value is kept the same for <24xx and 6700 * >=24xx ISPs. If it is necessary to change it, 6701 * the check should be added for specific ISPs, 6702 * assigning the value appropriately. 6703 */ 6704 ha->tgt.atio_q_length = ATIO_ENTRY_CNT_24XX; 6705 6706 qlt_add_target(ha, vha); 6707 } 6708 6709 u8 6710 qlt_rff_id(struct scsi_qla_host *vha) 6711 { 6712 u8 fc4_feature = 0; 6713 /* 6714 * FC-4 Feature bit 0 indicates target functionality to the name server. 6715 */ 6716 if (qla_tgt_mode_enabled(vha)) { 6717 fc4_feature = BIT_0; 6718 } else if (qla_ini_mode_enabled(vha)) { 6719 fc4_feature = BIT_1; 6720 } else if (qla_dual_mode_enabled(vha)) 6721 fc4_feature = BIT_0 | BIT_1; 6722 6723 return fc4_feature; 6724 } 6725 6726 /* 6727 * qlt_init_atio_q_entries() - Initializes ATIO queue entries. 6728 * @ha: HA context 6729 * 6730 * Beginning of ATIO ring has initialization control block already built 6731 * by nvram config routine. 6732 * 6733 * Returns 0 on success. 6734 */ 6735 void 6736 qlt_init_atio_q_entries(struct scsi_qla_host *vha) 6737 { 6738 struct qla_hw_data *ha = vha->hw; 6739 uint16_t cnt; 6740 struct atio_from_isp *pkt = (struct atio_from_isp *)ha->tgt.atio_ring; 6741 6742 if (qla_ini_mode_enabled(vha)) 6743 return; 6744 6745 for (cnt = 0; cnt < ha->tgt.atio_q_length; cnt++) { 6746 pkt->u.raw.signature = cpu_to_le32(ATIO_PROCESSED); 6747 pkt++; 6748 } 6749 6750 } 6751 6752 /* 6753 * qlt_24xx_process_atio_queue() - Process ATIO queue entries. 6754 * @ha: SCSI driver HA context 6755 */ 6756 void 6757 qlt_24xx_process_atio_queue(struct scsi_qla_host *vha, uint8_t ha_locked) 6758 { 6759 struct qla_hw_data *ha = vha->hw; 6760 struct atio_from_isp *pkt; 6761 int cnt, i; 6762 6763 if (!ha->flags.fw_started) 6764 return; 6765 6766 while ((ha->tgt.atio_ring_ptr->signature != ATIO_PROCESSED) || 6767 fcpcmd_is_corrupted(ha->tgt.atio_ring_ptr)) { 6768 pkt = (struct atio_from_isp *)ha->tgt.atio_ring_ptr; 6769 cnt = pkt->u.raw.entry_count; 6770 6771 if (unlikely(fcpcmd_is_corrupted(ha->tgt.atio_ring_ptr))) { 6772 /* 6773 * This packet is corrupted. The header + payload 6774 * can not be trusted. There is no point in passing 6775 * it further up. 6776 */ 6777 ql_log(ql_log_warn, vha, 0xd03c, 6778 "corrupted fcp frame SID[%3phN] OXID[%04x] EXCG[%x] %64phN\n", 6779 &pkt->u.isp24.fcp_hdr.s_id, 6780 be16_to_cpu(pkt->u.isp24.fcp_hdr.ox_id), 6781 pkt->u.isp24.exchange_addr, pkt); 6782 6783 adjust_corrupted_atio(pkt); 6784 qlt_send_term_exchange(ha->base_qpair, NULL, pkt, 6785 ha_locked, 0); 6786 } else { 6787 qlt_24xx_atio_pkt_all_vps(vha, 6788 (struct atio_from_isp *)pkt, ha_locked); 6789 } 6790 6791 for (i = 0; i < cnt; i++) { 6792 ha->tgt.atio_ring_index++; 6793 if (ha->tgt.atio_ring_index == ha->tgt.atio_q_length) { 6794 ha->tgt.atio_ring_index = 0; 6795 ha->tgt.atio_ring_ptr = ha->tgt.atio_ring; 6796 } else 6797 ha->tgt.atio_ring_ptr++; 6798 6799 pkt->u.raw.signature = cpu_to_le32(ATIO_PROCESSED); 6800 pkt = (struct atio_from_isp *)ha->tgt.atio_ring_ptr; 6801 } 6802 wmb(); 6803 } 6804 6805 /* Adjust ring index */ 6806 wrt_reg_dword(ISP_ATIO_Q_OUT(vha), ha->tgt.atio_ring_index); 6807 } 6808 6809 void 6810 qlt_24xx_config_rings(struct scsi_qla_host *vha) 6811 { 6812 struct qla_hw_data *ha = vha->hw; 6813 struct qla_msix_entry *msix = &ha->msix_entries[2]; 6814 struct init_cb_24xx *icb = (struct init_cb_24xx *)ha->init_cb; 6815 6816 if (!QLA_TGT_MODE_ENABLED()) 6817 return; 6818 6819 wrt_reg_dword(ISP_ATIO_Q_IN(vha), 0); 6820 wrt_reg_dword(ISP_ATIO_Q_OUT(vha), 0); 6821 rd_reg_dword(ISP_ATIO_Q_OUT(vha)); 6822 6823 if (ha->flags.msix_enabled) { 6824 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) { 6825 if (IS_QLA2071(ha)) { 6826 /* 4 ports Baker: Enable Interrupt Handshake */ 6827 icb->msix_atio = 0; 6828 icb->firmware_options_2 |= cpu_to_le32(BIT_26); 6829 } else { 6830 icb->msix_atio = cpu_to_le16(msix->entry); 6831 icb->firmware_options_2 &= cpu_to_le32(~BIT_26); 6832 } 6833 ql_dbg(ql_dbg_init, vha, 0xf072, 6834 "Registering ICB vector 0x%x for atio que.\n", 6835 msix->entry); 6836 } 6837 } else { 6838 /* INTx|MSI */ 6839 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) { 6840 icb->msix_atio = 0; 6841 icb->firmware_options_2 |= cpu_to_le32(BIT_26); 6842 ql_dbg(ql_dbg_init, vha, 0xf072, 6843 "%s: Use INTx for ATIOQ.\n", __func__); 6844 } 6845 } 6846 } 6847 6848 void 6849 qlt_24xx_config_nvram_stage1(struct scsi_qla_host *vha, struct nvram_24xx *nv) 6850 { 6851 struct qla_hw_data *ha = vha->hw; 6852 u32 tmp; 6853 6854 if (!QLA_TGT_MODE_ENABLED()) 6855 return; 6856 6857 if (qla_tgt_mode_enabled(vha) || qla_dual_mode_enabled(vha)) { 6858 if (!ha->tgt.saved_set) { 6859 /* We save only once */ 6860 ha->tgt.saved_exchange_count = nv->exchange_count; 6861 ha->tgt.saved_firmware_options_1 = 6862 nv->firmware_options_1; 6863 ha->tgt.saved_firmware_options_2 = 6864 nv->firmware_options_2; 6865 ha->tgt.saved_firmware_options_3 = 6866 nv->firmware_options_3; 6867 ha->tgt.saved_set = 1; 6868 } 6869 6870 if (qla_tgt_mode_enabled(vha)) 6871 nv->exchange_count = cpu_to_le16(0xFFFF); 6872 else /* dual */ 6873 nv->exchange_count = cpu_to_le16(vha->ql2xexchoffld); 6874 6875 /* Enable target mode */ 6876 nv->firmware_options_1 |= cpu_to_le32(BIT_4); 6877 6878 /* Disable ini mode, if requested */ 6879 if (qla_tgt_mode_enabled(vha)) 6880 nv->firmware_options_1 |= cpu_to_le32(BIT_5); 6881 6882 /* Disable Full Login after LIP */ 6883 nv->firmware_options_1 &= cpu_to_le32(~BIT_13); 6884 /* Enable initial LIP */ 6885 nv->firmware_options_1 &= cpu_to_le32(~BIT_9); 6886 if (ql2xtgt_tape_enable) 6887 /* Enable FC Tape support */ 6888 nv->firmware_options_2 |= cpu_to_le32(BIT_12); 6889 else 6890 /* Disable FC Tape support */ 6891 nv->firmware_options_2 &= cpu_to_le32(~BIT_12); 6892 6893 /* Disable Full Login after LIP */ 6894 nv->host_p &= cpu_to_le32(~BIT_10); 6895 6896 /* 6897 * clear BIT 15 explicitly as we have seen at least 6898 * a couple of instances where this was set and this 6899 * was causing the firmware to not be initialized. 6900 */ 6901 nv->firmware_options_1 &= cpu_to_le32(~BIT_15); 6902 /* Enable target PRLI control */ 6903 nv->firmware_options_2 |= cpu_to_le32(BIT_14); 6904 6905 if (IS_QLA25XX(ha)) { 6906 /* Change Loop-prefer to Pt-Pt */ 6907 tmp = ~(BIT_4|BIT_5|BIT_6); 6908 nv->firmware_options_2 &= cpu_to_le32(tmp); 6909 tmp = P2P << 4; 6910 nv->firmware_options_2 |= cpu_to_le32(tmp); 6911 } 6912 } else { 6913 if (ha->tgt.saved_set) { 6914 nv->exchange_count = ha->tgt.saved_exchange_count; 6915 nv->firmware_options_1 = 6916 ha->tgt.saved_firmware_options_1; 6917 nv->firmware_options_2 = 6918 ha->tgt.saved_firmware_options_2; 6919 nv->firmware_options_3 = 6920 ha->tgt.saved_firmware_options_3; 6921 } 6922 return; 6923 } 6924 6925 if (ha->base_qpair->enable_class_2) { 6926 if (vha->flags.init_done) 6927 fc_host_supported_classes(vha->host) = 6928 FC_COS_CLASS2 | FC_COS_CLASS3; 6929 6930 nv->firmware_options_2 |= cpu_to_le32(BIT_8); 6931 } else { 6932 if (vha->flags.init_done) 6933 fc_host_supported_classes(vha->host) = FC_COS_CLASS3; 6934 6935 nv->firmware_options_2 &= ~cpu_to_le32(BIT_8); 6936 } 6937 } 6938 6939 void 6940 qlt_24xx_config_nvram_stage2(struct scsi_qla_host *vha, 6941 struct init_cb_24xx *icb) 6942 { 6943 struct qla_hw_data *ha = vha->hw; 6944 6945 if (!QLA_TGT_MODE_ENABLED()) 6946 return; 6947 6948 if (ha->tgt.node_name_set) { 6949 memcpy(icb->node_name, ha->tgt.tgt_node_name, WWN_SIZE); 6950 icb->firmware_options_1 |= cpu_to_le32(BIT_14); 6951 } 6952 } 6953 6954 void 6955 qlt_81xx_config_nvram_stage1(struct scsi_qla_host *vha, struct nvram_81xx *nv) 6956 { 6957 struct qla_hw_data *ha = vha->hw; 6958 u32 tmp; 6959 6960 if (!QLA_TGT_MODE_ENABLED()) 6961 return; 6962 6963 if (qla_tgt_mode_enabled(vha) || qla_dual_mode_enabled(vha)) { 6964 if (!ha->tgt.saved_set) { 6965 /* We save only once */ 6966 ha->tgt.saved_exchange_count = nv->exchange_count; 6967 ha->tgt.saved_firmware_options_1 = 6968 nv->firmware_options_1; 6969 ha->tgt.saved_firmware_options_2 = 6970 nv->firmware_options_2; 6971 ha->tgt.saved_firmware_options_3 = 6972 nv->firmware_options_3; 6973 ha->tgt.saved_set = 1; 6974 } 6975 6976 if (qla_tgt_mode_enabled(vha)) 6977 nv->exchange_count = cpu_to_le16(0xFFFF); 6978 else /* dual */ 6979 nv->exchange_count = cpu_to_le16(vha->ql2xexchoffld); 6980 6981 /* Enable target mode */ 6982 nv->firmware_options_1 |= cpu_to_le32(BIT_4); 6983 6984 /* Disable ini mode, if requested */ 6985 if (qla_tgt_mode_enabled(vha)) 6986 nv->firmware_options_1 |= cpu_to_le32(BIT_5); 6987 /* Disable Full Login after LIP */ 6988 nv->firmware_options_1 &= cpu_to_le32(~BIT_13); 6989 /* Enable initial LIP */ 6990 nv->firmware_options_1 &= cpu_to_le32(~BIT_9); 6991 /* 6992 * clear BIT 15 explicitly as we have seen at 6993 * least a couple of instances where this was set 6994 * and this was causing the firmware to not be 6995 * initialized. 6996 */ 6997 nv->firmware_options_1 &= cpu_to_le32(~BIT_15); 6998 if (ql2xtgt_tape_enable) 6999 /* Enable FC tape support */ 7000 nv->firmware_options_2 |= cpu_to_le32(BIT_12); 7001 else 7002 /* Disable FC tape support */ 7003 nv->firmware_options_2 &= cpu_to_le32(~BIT_12); 7004 7005 /* Disable Full Login after LIP */ 7006 nv->host_p &= cpu_to_le32(~BIT_10); 7007 /* Enable target PRLI control */ 7008 nv->firmware_options_2 |= cpu_to_le32(BIT_14); 7009 7010 /* Change Loop-prefer to Pt-Pt */ 7011 tmp = ~(BIT_4|BIT_5|BIT_6); 7012 nv->firmware_options_2 &= cpu_to_le32(tmp); 7013 tmp = P2P << 4; 7014 nv->firmware_options_2 |= cpu_to_le32(tmp); 7015 } else { 7016 if (ha->tgt.saved_set) { 7017 nv->exchange_count = ha->tgt.saved_exchange_count; 7018 nv->firmware_options_1 = 7019 ha->tgt.saved_firmware_options_1; 7020 nv->firmware_options_2 = 7021 ha->tgt.saved_firmware_options_2; 7022 nv->firmware_options_3 = 7023 ha->tgt.saved_firmware_options_3; 7024 } 7025 return; 7026 } 7027 7028 if (ha->base_qpair->enable_class_2) { 7029 if (vha->flags.init_done) 7030 fc_host_supported_classes(vha->host) = 7031 FC_COS_CLASS2 | FC_COS_CLASS3; 7032 7033 nv->firmware_options_2 |= cpu_to_le32(BIT_8); 7034 } else { 7035 if (vha->flags.init_done) 7036 fc_host_supported_classes(vha->host) = FC_COS_CLASS3; 7037 7038 nv->firmware_options_2 &= ~cpu_to_le32(BIT_8); 7039 } 7040 } 7041 7042 void 7043 qlt_81xx_config_nvram_stage2(struct scsi_qla_host *vha, 7044 struct init_cb_81xx *icb) 7045 { 7046 struct qla_hw_data *ha = vha->hw; 7047 7048 if (!QLA_TGT_MODE_ENABLED()) 7049 return; 7050 7051 if (ha->tgt.node_name_set) { 7052 memcpy(icb->node_name, ha->tgt.tgt_node_name, WWN_SIZE); 7053 icb->firmware_options_1 |= cpu_to_le32(BIT_14); 7054 } 7055 } 7056 7057 void 7058 qlt_83xx_iospace_config(struct qla_hw_data *ha) 7059 { 7060 if (!QLA_TGT_MODE_ENABLED()) 7061 return; 7062 7063 ha->msix_count += 1; /* For ATIO Q */ 7064 } 7065 7066 7067 void 7068 qlt_modify_vp_config(struct scsi_qla_host *vha, 7069 struct vp_config_entry_24xx *vpmod) 7070 { 7071 /* enable target mode. Bit5 = 1 => disable */ 7072 if (qla_tgt_mode_enabled(vha) || qla_dual_mode_enabled(vha)) 7073 vpmod->options_idx1 &= ~BIT_5; 7074 7075 /* Disable ini mode, if requested. bit4 = 1 => disable */ 7076 if (qla_tgt_mode_enabled(vha)) 7077 vpmod->options_idx1 &= ~BIT_4; 7078 } 7079 7080 void 7081 qlt_probe_one_stage1(struct scsi_qla_host *base_vha, struct qla_hw_data *ha) 7082 { 7083 int rc; 7084 7085 if (!QLA_TGT_MODE_ENABLED()) 7086 return; 7087 7088 if ((ql2xenablemsix == 0) || IS_QLA83XX(ha) || IS_QLA27XX(ha) || 7089 IS_QLA28XX(ha)) { 7090 ISP_ATIO_Q_IN(base_vha) = &ha->mqiobase->isp25mq.atio_q_in; 7091 ISP_ATIO_Q_OUT(base_vha) = &ha->mqiobase->isp25mq.atio_q_out; 7092 } else { 7093 ISP_ATIO_Q_IN(base_vha) = &ha->iobase->isp24.atio_q_in; 7094 ISP_ATIO_Q_OUT(base_vha) = &ha->iobase->isp24.atio_q_out; 7095 } 7096 7097 mutex_init(&base_vha->vha_tgt.tgt_mutex); 7098 mutex_init(&base_vha->vha_tgt.tgt_host_action_mutex); 7099 7100 INIT_LIST_HEAD(&base_vha->unknown_atio_list); 7101 INIT_DELAYED_WORK(&base_vha->unknown_atio_work, 7102 qlt_unknown_atio_work_fn); 7103 7104 qlt_clear_mode(base_vha); 7105 7106 rc = btree_init32(&ha->tgt.host_map); 7107 if (rc) 7108 ql_log(ql_log_info, base_vha, 0xd03d, 7109 "Unable to initialize ha->host_map btree\n"); 7110 7111 qlt_update_vp_map(base_vha, SET_VP_IDX); 7112 } 7113 7114 irqreturn_t 7115 qla83xx_msix_atio_q(int irq, void *dev_id) 7116 { 7117 struct rsp_que *rsp; 7118 scsi_qla_host_t *vha; 7119 struct qla_hw_data *ha; 7120 unsigned long flags; 7121 7122 rsp = (struct rsp_que *) dev_id; 7123 ha = rsp->hw; 7124 vha = pci_get_drvdata(ha->pdev); 7125 7126 spin_lock_irqsave(&ha->tgt.atio_lock, flags); 7127 7128 qlt_24xx_process_atio_queue(vha, 0); 7129 7130 spin_unlock_irqrestore(&ha->tgt.atio_lock, flags); 7131 7132 return IRQ_HANDLED; 7133 } 7134 7135 static void 7136 qlt_handle_abts_recv_work(struct work_struct *work) 7137 { 7138 struct qla_tgt_sess_op *op = container_of(work, 7139 struct qla_tgt_sess_op, work); 7140 scsi_qla_host_t *vha = op->vha; 7141 struct qla_hw_data *ha = vha->hw; 7142 unsigned long flags; 7143 7144 if (qla2x00_reset_active(vha) || 7145 (op->chip_reset != ha->base_qpair->chip_reset)) 7146 return; 7147 7148 spin_lock_irqsave(&ha->tgt.atio_lock, flags); 7149 qlt_24xx_process_atio_queue(vha, 0); 7150 spin_unlock_irqrestore(&ha->tgt.atio_lock, flags); 7151 7152 spin_lock_irqsave(&ha->hardware_lock, flags); 7153 qlt_response_pkt_all_vps(vha, op->rsp, (response_t *)&op->atio); 7154 spin_unlock_irqrestore(&ha->hardware_lock, flags); 7155 7156 kfree(op); 7157 } 7158 7159 void 7160 qlt_handle_abts_recv(struct scsi_qla_host *vha, struct rsp_que *rsp, 7161 response_t *pkt) 7162 { 7163 struct qla_tgt_sess_op *op; 7164 7165 op = kzalloc(sizeof(*op), GFP_ATOMIC); 7166 7167 if (!op) { 7168 /* do not reach for ATIO queue here. This is best effort err 7169 * recovery at this point. 7170 */ 7171 qlt_response_pkt_all_vps(vha, rsp, pkt); 7172 return; 7173 } 7174 7175 memcpy(&op->atio, pkt, sizeof(*pkt)); 7176 op->vha = vha; 7177 op->chip_reset = vha->hw->base_qpair->chip_reset; 7178 op->rsp = rsp; 7179 INIT_WORK(&op->work, qlt_handle_abts_recv_work); 7180 queue_work(qla_tgt_wq, &op->work); 7181 return; 7182 } 7183 7184 int 7185 qlt_mem_alloc(struct qla_hw_data *ha) 7186 { 7187 if (!QLA_TGT_MODE_ENABLED()) 7188 return 0; 7189 7190 ha->tgt.tgt_vp_map = kcalloc(MAX_MULTI_ID_FABRIC, 7191 sizeof(struct qla_tgt_vp_map), 7192 GFP_KERNEL); 7193 if (!ha->tgt.tgt_vp_map) 7194 return -ENOMEM; 7195 7196 ha->tgt.atio_ring = dma_alloc_coherent(&ha->pdev->dev, 7197 (ha->tgt.atio_q_length + 1) * sizeof(struct atio_from_isp), 7198 &ha->tgt.atio_dma, GFP_KERNEL); 7199 if (!ha->tgt.atio_ring) { 7200 kfree(ha->tgt.tgt_vp_map); 7201 return -ENOMEM; 7202 } 7203 return 0; 7204 } 7205 7206 void 7207 qlt_mem_free(struct qla_hw_data *ha) 7208 { 7209 if (!QLA_TGT_MODE_ENABLED()) 7210 return; 7211 7212 if (ha->tgt.atio_ring) { 7213 dma_free_coherent(&ha->pdev->dev, (ha->tgt.atio_q_length + 1) * 7214 sizeof(struct atio_from_isp), ha->tgt.atio_ring, 7215 ha->tgt.atio_dma); 7216 } 7217 ha->tgt.atio_ring = NULL; 7218 ha->tgt.atio_dma = 0; 7219 kfree(ha->tgt.tgt_vp_map); 7220 ha->tgt.tgt_vp_map = NULL; 7221 } 7222 7223 /* vport_slock to be held by the caller */ 7224 void 7225 qlt_update_vp_map(struct scsi_qla_host *vha, int cmd) 7226 { 7227 void *slot; 7228 u32 key; 7229 int rc; 7230 7231 if (!QLA_TGT_MODE_ENABLED()) 7232 return; 7233 7234 key = vha->d_id.b24; 7235 7236 switch (cmd) { 7237 case SET_VP_IDX: 7238 vha->hw->tgt.tgt_vp_map[vha->vp_idx].vha = vha; 7239 break; 7240 case SET_AL_PA: 7241 slot = btree_lookup32(&vha->hw->tgt.host_map, key); 7242 if (!slot) { 7243 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf018, 7244 "Save vha in host_map %p %06x\n", vha, key); 7245 rc = btree_insert32(&vha->hw->tgt.host_map, 7246 key, vha, GFP_ATOMIC); 7247 if (rc) 7248 ql_log(ql_log_info, vha, 0xd03e, 7249 "Unable to insert s_id into host_map: %06x\n", 7250 key); 7251 return; 7252 } 7253 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf019, 7254 "replace existing vha in host_map %p %06x\n", vha, key); 7255 btree_update32(&vha->hw->tgt.host_map, key, vha); 7256 break; 7257 case RESET_VP_IDX: 7258 vha->hw->tgt.tgt_vp_map[vha->vp_idx].vha = NULL; 7259 break; 7260 case RESET_AL_PA: 7261 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01a, 7262 "clear vha in host_map %p %06x\n", vha, key); 7263 slot = btree_lookup32(&vha->hw->tgt.host_map, key); 7264 if (slot) 7265 btree_remove32(&vha->hw->tgt.host_map, key); 7266 vha->d_id.b24 = 0; 7267 break; 7268 } 7269 } 7270 7271 void qlt_update_host_map(struct scsi_qla_host *vha, port_id_t id) 7272 { 7273 7274 if (!vha->d_id.b24) { 7275 vha->d_id = id; 7276 qlt_update_vp_map(vha, SET_AL_PA); 7277 } else if (vha->d_id.b24 != id.b24) { 7278 qlt_update_vp_map(vha, RESET_AL_PA); 7279 vha->d_id = id; 7280 qlt_update_vp_map(vha, SET_AL_PA); 7281 } 7282 } 7283 7284 static int __init qlt_parse_ini_mode(void) 7285 { 7286 if (strcasecmp(qlini_mode, QLA2XXX_INI_MODE_STR_EXCLUSIVE) == 0) 7287 ql2x_ini_mode = QLA2XXX_INI_MODE_EXCLUSIVE; 7288 else if (strcasecmp(qlini_mode, QLA2XXX_INI_MODE_STR_DISABLED) == 0) 7289 ql2x_ini_mode = QLA2XXX_INI_MODE_DISABLED; 7290 else if (strcasecmp(qlini_mode, QLA2XXX_INI_MODE_STR_ENABLED) == 0) 7291 ql2x_ini_mode = QLA2XXX_INI_MODE_ENABLED; 7292 else if (strcasecmp(qlini_mode, QLA2XXX_INI_MODE_STR_DUAL) == 0) 7293 ql2x_ini_mode = QLA2XXX_INI_MODE_DUAL; 7294 else 7295 return false; 7296 7297 return true; 7298 } 7299 7300 int __init qlt_init(void) 7301 { 7302 int ret; 7303 7304 BUILD_BUG_ON(sizeof(struct ctio7_to_24xx) != 64); 7305 BUILD_BUG_ON(sizeof(struct ctio_to_2xxx) != 64); 7306 7307 if (!qlt_parse_ini_mode()) { 7308 ql_log(ql_log_fatal, NULL, 0xe06b, 7309 "qlt_parse_ini_mode() failed\n"); 7310 return -EINVAL; 7311 } 7312 7313 if (!QLA_TGT_MODE_ENABLED()) 7314 return 0; 7315 7316 qla_tgt_mgmt_cmd_cachep = kmem_cache_create("qla_tgt_mgmt_cmd_cachep", 7317 sizeof(struct qla_tgt_mgmt_cmd), __alignof__(struct 7318 qla_tgt_mgmt_cmd), 0, NULL); 7319 if (!qla_tgt_mgmt_cmd_cachep) { 7320 ql_log(ql_log_fatal, NULL, 0xd04b, 7321 "kmem_cache_create for qla_tgt_mgmt_cmd_cachep failed\n"); 7322 return -ENOMEM; 7323 } 7324 7325 qla_tgt_plogi_cachep = kmem_cache_create("qla_tgt_plogi_cachep", 7326 sizeof(struct qlt_plogi_ack_t), __alignof__(struct qlt_plogi_ack_t), 7327 0, NULL); 7328 7329 if (!qla_tgt_plogi_cachep) { 7330 ql_log(ql_log_fatal, NULL, 0xe06d, 7331 "kmem_cache_create for qla_tgt_plogi_cachep failed\n"); 7332 ret = -ENOMEM; 7333 goto out_mgmt_cmd_cachep; 7334 } 7335 7336 qla_tgt_mgmt_cmd_mempool = mempool_create(25, mempool_alloc_slab, 7337 mempool_free_slab, qla_tgt_mgmt_cmd_cachep); 7338 if (!qla_tgt_mgmt_cmd_mempool) { 7339 ql_log(ql_log_fatal, NULL, 0xe06e, 7340 "mempool_create for qla_tgt_mgmt_cmd_mempool failed\n"); 7341 ret = -ENOMEM; 7342 goto out_plogi_cachep; 7343 } 7344 7345 qla_tgt_wq = alloc_workqueue("qla_tgt_wq", 0, 0); 7346 if (!qla_tgt_wq) { 7347 ql_log(ql_log_fatal, NULL, 0xe06f, 7348 "alloc_workqueue for qla_tgt_wq failed\n"); 7349 ret = -ENOMEM; 7350 goto out_cmd_mempool; 7351 } 7352 /* 7353 * Return 1 to signal that initiator-mode is being disabled 7354 */ 7355 return (ql2x_ini_mode == QLA2XXX_INI_MODE_DISABLED) ? 1 : 0; 7356 7357 out_cmd_mempool: 7358 mempool_destroy(qla_tgt_mgmt_cmd_mempool); 7359 out_plogi_cachep: 7360 kmem_cache_destroy(qla_tgt_plogi_cachep); 7361 out_mgmt_cmd_cachep: 7362 kmem_cache_destroy(qla_tgt_mgmt_cmd_cachep); 7363 return ret; 7364 } 7365 7366 void qlt_exit(void) 7367 { 7368 if (!QLA_TGT_MODE_ENABLED()) 7369 return; 7370 7371 destroy_workqueue(qla_tgt_wq); 7372 mempool_destroy(qla_tgt_mgmt_cmd_mempool); 7373 kmem_cache_destroy(qla_tgt_plogi_cachep); 7374 kmem_cache_destroy(qla_tgt_mgmt_cmd_cachep); 7375 } 7376