1 /* 2 * qla_target.c SCSI LLD infrastructure for QLogic 22xx/23xx/24xx/25xx 3 * 4 * based on qla2x00t.c code: 5 * 6 * Copyright (C) 2004 - 2010 Vladislav Bolkhovitin <vst@vlnb.net> 7 * Copyright (C) 2004 - 2005 Leonid Stoljar 8 * Copyright (C) 2006 Nathaniel Clark <nate@misrule.us> 9 * Copyright (C) 2006 - 2010 ID7 Ltd. 10 * 11 * Forward port and refactoring to modern qla2xxx and target/configfs 12 * 13 * Copyright (C) 2010-2013 Nicholas A. Bellinger <nab@kernel.org> 14 * 15 * This program is free software; you can redistribute it and/or 16 * modify it under the terms of the GNU General Public License 17 * as published by the Free Software Foundation, version 2 18 * of the License. 19 * 20 * This program is distributed in the hope that it will be useful, 21 * but WITHOUT ANY WARRANTY; without even the implied warranty of 22 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 23 * GNU General Public License for more details. 24 */ 25 26 #include <linux/module.h> 27 #include <linux/init.h> 28 #include <linux/types.h> 29 #include <linux/blkdev.h> 30 #include <linux/interrupt.h> 31 #include <linux/pci.h> 32 #include <linux/delay.h> 33 #include <linux/list.h> 34 #include <linux/workqueue.h> 35 #include <asm/unaligned.h> 36 #include <scsi/scsi.h> 37 #include <scsi/scsi_host.h> 38 #include <scsi/scsi_tcq.h> 39 #include <target/target_core_base.h> 40 #include <target/target_core_fabric.h> 41 42 #include "qla_def.h" 43 #include "qla_target.h" 44 45 static int ql2xtgt_tape_enable; 46 module_param(ql2xtgt_tape_enable, int, S_IRUGO|S_IWUSR); 47 MODULE_PARM_DESC(ql2xtgt_tape_enable, 48 "Enables Sequence level error recovery (aka FC Tape). Default is 0 - no SLER. 1 - Enable SLER."); 49 50 static char *qlini_mode = QLA2XXX_INI_MODE_STR_ENABLED; 51 module_param(qlini_mode, charp, S_IRUGO); 52 MODULE_PARM_DESC(qlini_mode, 53 "Determines when initiator mode will be enabled. Possible values: " 54 "\"exclusive\" - initiator mode will be enabled on load, " 55 "disabled on enabling target mode and then on disabling target mode " 56 "enabled back; " 57 "\"disabled\" - initiator mode will never be enabled; " 58 "\"dual\" - Initiator Modes will be enabled. Target Mode can be activated " 59 "when ready " 60 "\"enabled\" (default) - initiator mode will always stay enabled."); 61 62 static int ql_dm_tgt_ex_pct = 0; 63 module_param(ql_dm_tgt_ex_pct, int, S_IRUGO|S_IWUSR); 64 MODULE_PARM_DESC(ql_dm_tgt_ex_pct, 65 "For Dual Mode (qlini_mode=dual), this parameter determines " 66 "the percentage of exchanges/cmds FW will allocate resources " 67 "for Target mode."); 68 69 int ql2xuctrlirq = 1; 70 module_param(ql2xuctrlirq, int, 0644); 71 MODULE_PARM_DESC(ql2xuctrlirq, 72 "User to control IRQ placement via smp_affinity." 73 "Valid with qlini_mode=disabled." 74 "1(default): enable"); 75 76 int ql2x_ini_mode = QLA2XXX_INI_MODE_EXCLUSIVE; 77 78 static int qla_sam_status = SAM_STAT_BUSY; 79 static int tc_sam_status = SAM_STAT_TASK_SET_FULL; /* target core */ 80 81 /* 82 * From scsi/fc/fc_fcp.h 83 */ 84 enum fcp_resp_rsp_codes { 85 FCP_TMF_CMPL = 0, 86 FCP_DATA_LEN_INVALID = 1, 87 FCP_CMND_FIELDS_INVALID = 2, 88 FCP_DATA_PARAM_MISMATCH = 3, 89 FCP_TMF_REJECTED = 4, 90 FCP_TMF_FAILED = 5, 91 FCP_TMF_INVALID_LUN = 9, 92 }; 93 94 /* 95 * fc_pri_ta from scsi/fc/fc_fcp.h 96 */ 97 #define FCP_PTA_SIMPLE 0 /* simple task attribute */ 98 #define FCP_PTA_HEADQ 1 /* head of queue task attribute */ 99 #define FCP_PTA_ORDERED 2 /* ordered task attribute */ 100 #define FCP_PTA_ACA 4 /* auto. contingent allegiance */ 101 #define FCP_PTA_MASK 7 /* mask for task attribute field */ 102 #define FCP_PRI_SHIFT 3 /* priority field starts in bit 3 */ 103 #define FCP_PRI_RESVD_MASK 0x80 /* reserved bits in priority field */ 104 105 /* 106 * This driver calls qla2x00_alloc_iocbs() and qla2x00_issue_marker(), which 107 * must be called under HW lock and could unlock/lock it inside. 108 * It isn't an issue, since in the current implementation on the time when 109 * those functions are called: 110 * 111 * - Either context is IRQ and only IRQ handler can modify HW data, 112 * including rings related fields, 113 * 114 * - Or access to target mode variables from struct qla_tgt doesn't 115 * cross those functions boundaries, except tgt_stop, which 116 * additionally protected by irq_cmd_count. 117 */ 118 /* Predefs for callbacks handed to qla2xxx LLD */ 119 static void qlt_24xx_atio_pkt(struct scsi_qla_host *ha, 120 struct atio_from_isp *pkt, uint8_t); 121 static void qlt_response_pkt(struct scsi_qla_host *ha, struct rsp_que *rsp, 122 response_t *pkt); 123 static int qlt_issue_task_mgmt(struct fc_port *sess, u64 lun, 124 int fn, void *iocb, int flags); 125 static void qlt_send_term_exchange(struct qla_qpair *, struct qla_tgt_cmd 126 *cmd, struct atio_from_isp *atio, int ha_locked, int ul_abort); 127 static void qlt_alloc_qfull_cmd(struct scsi_qla_host *vha, 128 struct atio_from_isp *atio, uint16_t status, int qfull); 129 static void qlt_disable_vha(struct scsi_qla_host *vha); 130 static void qlt_clear_tgt_db(struct qla_tgt *tgt); 131 static void qlt_send_notify_ack(struct qla_qpair *qpair, 132 struct imm_ntfy_from_isp *ntfy, 133 uint32_t add_flags, uint16_t resp_code, int resp_code_valid, 134 uint16_t srr_flags, uint16_t srr_reject_code, uint8_t srr_explan); 135 static void qlt_send_term_imm_notif(struct scsi_qla_host *vha, 136 struct imm_ntfy_from_isp *imm, int ha_locked); 137 static struct fc_port *qlt_create_sess(struct scsi_qla_host *vha, 138 fc_port_t *fcport, bool local); 139 void qlt_unreg_sess(struct fc_port *sess); 140 static void qlt_24xx_handle_abts(struct scsi_qla_host *, 141 struct abts_recv_from_24xx *); 142 static void qlt_send_busy(struct qla_qpair *, struct atio_from_isp *, 143 uint16_t); 144 145 /* 146 * Global Variables 147 */ 148 static struct kmem_cache *qla_tgt_mgmt_cmd_cachep; 149 struct kmem_cache *qla_tgt_plogi_cachep; 150 static mempool_t *qla_tgt_mgmt_cmd_mempool; 151 static struct workqueue_struct *qla_tgt_wq; 152 static DEFINE_MUTEX(qla_tgt_mutex); 153 static LIST_HEAD(qla_tgt_glist); 154 155 static const char *prot_op_str(u32 prot_op) 156 { 157 switch (prot_op) { 158 case TARGET_PROT_NORMAL: return "NORMAL"; 159 case TARGET_PROT_DIN_INSERT: return "DIN_INSERT"; 160 case TARGET_PROT_DOUT_INSERT: return "DOUT_INSERT"; 161 case TARGET_PROT_DIN_STRIP: return "DIN_STRIP"; 162 case TARGET_PROT_DOUT_STRIP: return "DOUT_STRIP"; 163 case TARGET_PROT_DIN_PASS: return "DIN_PASS"; 164 case TARGET_PROT_DOUT_PASS: return "DOUT_PASS"; 165 default: return "UNKNOWN"; 166 } 167 } 168 169 /* This API intentionally takes dest as a parameter, rather than returning 170 * int value to avoid caller forgetting to issue wmb() after the store */ 171 void qlt_do_generation_tick(struct scsi_qla_host *vha, int *dest) 172 { 173 scsi_qla_host_t *base_vha = pci_get_drvdata(vha->hw->pdev); 174 *dest = atomic_inc_return(&base_vha->generation_tick); 175 /* memory barrier */ 176 wmb(); 177 } 178 179 /* Might release hw lock, then reaquire!! */ 180 static inline int qlt_issue_marker(struct scsi_qla_host *vha, int vha_locked) 181 { 182 /* Send marker if required */ 183 if (unlikely(vha->marker_needed != 0)) { 184 int rc = qla2x00_issue_marker(vha, vha_locked); 185 if (rc != QLA_SUCCESS) { 186 ql_dbg(ql_dbg_tgt, vha, 0xe03d, 187 "qla_target(%d): issue_marker() failed\n", 188 vha->vp_idx); 189 } 190 return rc; 191 } 192 return QLA_SUCCESS; 193 } 194 195 static inline 196 struct scsi_qla_host *qlt_find_host_by_d_id(struct scsi_qla_host *vha, 197 uint8_t *d_id) 198 { 199 struct scsi_qla_host *host; 200 uint32_t key = 0; 201 202 if ((vha->d_id.b.area == d_id[1]) && (vha->d_id.b.domain == d_id[0]) && 203 (vha->d_id.b.al_pa == d_id[2])) 204 return vha; 205 206 key = (uint32_t)d_id[0] << 16; 207 key |= (uint32_t)d_id[1] << 8; 208 key |= (uint32_t)d_id[2]; 209 210 host = btree_lookup32(&vha->hw->tgt.host_map, key); 211 if (!host) 212 ql_dbg(ql_dbg_tgt_mgt + ql_dbg_verbose, vha, 0xf005, 213 "Unable to find host %06x\n", key); 214 215 return host; 216 } 217 218 static inline 219 struct scsi_qla_host *qlt_find_host_by_vp_idx(struct scsi_qla_host *vha, 220 uint16_t vp_idx) 221 { 222 struct qla_hw_data *ha = vha->hw; 223 224 if (vha->vp_idx == vp_idx) 225 return vha; 226 227 BUG_ON(ha->tgt.tgt_vp_map == NULL); 228 if (likely(test_bit(vp_idx, ha->vp_idx_map))) 229 return ha->tgt.tgt_vp_map[vp_idx].vha; 230 231 return NULL; 232 } 233 234 static inline void qlt_incr_num_pend_cmds(struct scsi_qla_host *vha) 235 { 236 unsigned long flags; 237 238 spin_lock_irqsave(&vha->hw->tgt.q_full_lock, flags); 239 240 vha->hw->tgt.num_pend_cmds++; 241 if (vha->hw->tgt.num_pend_cmds > vha->qla_stats.stat_max_pend_cmds) 242 vha->qla_stats.stat_max_pend_cmds = 243 vha->hw->tgt.num_pend_cmds; 244 spin_unlock_irqrestore(&vha->hw->tgt.q_full_lock, flags); 245 } 246 static inline void qlt_decr_num_pend_cmds(struct scsi_qla_host *vha) 247 { 248 unsigned long flags; 249 250 spin_lock_irqsave(&vha->hw->tgt.q_full_lock, flags); 251 vha->hw->tgt.num_pend_cmds--; 252 spin_unlock_irqrestore(&vha->hw->tgt.q_full_lock, flags); 253 } 254 255 256 static void qlt_queue_unknown_atio(scsi_qla_host_t *vha, 257 struct atio_from_isp *atio, uint8_t ha_locked) 258 { 259 struct qla_tgt_sess_op *u; 260 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; 261 unsigned long flags; 262 263 if (tgt->tgt_stop) { 264 ql_dbg(ql_dbg_async, vha, 0x502c, 265 "qla_target(%d): dropping unknown ATIO_TYPE7, because tgt is being stopped", 266 vha->vp_idx); 267 goto out_term; 268 } 269 270 u = kzalloc(sizeof(*u), GFP_ATOMIC); 271 if (u == NULL) 272 goto out_term; 273 274 u->vha = vha; 275 memcpy(&u->atio, atio, sizeof(*atio)); 276 INIT_LIST_HEAD(&u->cmd_list); 277 278 spin_lock_irqsave(&vha->cmd_list_lock, flags); 279 list_add_tail(&u->cmd_list, &vha->unknown_atio_list); 280 spin_unlock_irqrestore(&vha->cmd_list_lock, flags); 281 282 schedule_delayed_work(&vha->unknown_atio_work, 1); 283 284 out: 285 return; 286 287 out_term: 288 qlt_send_term_exchange(vha->hw->base_qpair, NULL, atio, ha_locked, 0); 289 goto out; 290 } 291 292 static void qlt_try_to_dequeue_unknown_atios(struct scsi_qla_host *vha, 293 uint8_t ha_locked) 294 { 295 struct qla_tgt_sess_op *u, *t; 296 scsi_qla_host_t *host; 297 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; 298 unsigned long flags; 299 uint8_t queued = 0; 300 301 list_for_each_entry_safe(u, t, &vha->unknown_atio_list, cmd_list) { 302 if (u->aborted) { 303 ql_dbg(ql_dbg_async, vha, 0x502e, 304 "Freeing unknown %s %p, because of Abort\n", 305 "ATIO_TYPE7", u); 306 qlt_send_term_exchange(vha->hw->base_qpair, NULL, 307 &u->atio, ha_locked, 0); 308 goto abort; 309 } 310 311 host = qlt_find_host_by_d_id(vha, u->atio.u.isp24.fcp_hdr.d_id); 312 if (host != NULL) { 313 ql_dbg(ql_dbg_async + ql_dbg_verbose, vha, 0x502f, 314 "Requeuing unknown ATIO_TYPE7 %p\n", u); 315 qlt_24xx_atio_pkt(host, &u->atio, ha_locked); 316 } else if (tgt->tgt_stop) { 317 ql_dbg(ql_dbg_async + ql_dbg_verbose, vha, 0x503a, 318 "Freeing unknown %s %p, because tgt is being stopped\n", 319 "ATIO_TYPE7", u); 320 qlt_send_term_exchange(vha->hw->base_qpair, NULL, 321 &u->atio, ha_locked, 0); 322 } else { 323 ql_dbg(ql_dbg_async + ql_dbg_verbose, vha, 0x503d, 324 "Reschedule u %p, vha %p, host %p\n", u, vha, host); 325 if (!queued) { 326 queued = 1; 327 schedule_delayed_work(&vha->unknown_atio_work, 328 1); 329 } 330 continue; 331 } 332 333 abort: 334 spin_lock_irqsave(&vha->cmd_list_lock, flags); 335 list_del(&u->cmd_list); 336 spin_unlock_irqrestore(&vha->cmd_list_lock, flags); 337 kfree(u); 338 } 339 } 340 341 void qlt_unknown_atio_work_fn(struct work_struct *work) 342 { 343 struct scsi_qla_host *vha = container_of(to_delayed_work(work), 344 struct scsi_qla_host, unknown_atio_work); 345 346 qlt_try_to_dequeue_unknown_atios(vha, 0); 347 } 348 349 static bool qlt_24xx_atio_pkt_all_vps(struct scsi_qla_host *vha, 350 struct atio_from_isp *atio, uint8_t ha_locked) 351 { 352 ql_dbg(ql_dbg_tgt, vha, 0xe072, 353 "%s: qla_target(%d): type %x ox_id %04x\n", 354 __func__, vha->vp_idx, atio->u.raw.entry_type, 355 be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id)); 356 357 switch (atio->u.raw.entry_type) { 358 case ATIO_TYPE7: 359 { 360 struct scsi_qla_host *host = qlt_find_host_by_d_id(vha, 361 atio->u.isp24.fcp_hdr.d_id); 362 if (unlikely(NULL == host)) { 363 ql_dbg(ql_dbg_tgt, vha, 0xe03e, 364 "qla_target(%d): Received ATIO_TYPE7 " 365 "with unknown d_id %x:%x:%x\n", vha->vp_idx, 366 atio->u.isp24.fcp_hdr.d_id[0], 367 atio->u.isp24.fcp_hdr.d_id[1], 368 atio->u.isp24.fcp_hdr.d_id[2]); 369 370 371 qlt_queue_unknown_atio(vha, atio, ha_locked); 372 break; 373 } 374 if (unlikely(!list_empty(&vha->unknown_atio_list))) 375 qlt_try_to_dequeue_unknown_atios(vha, ha_locked); 376 377 qlt_24xx_atio_pkt(host, atio, ha_locked); 378 break; 379 } 380 381 case IMMED_NOTIFY_TYPE: 382 { 383 struct scsi_qla_host *host = vha; 384 struct imm_ntfy_from_isp *entry = 385 (struct imm_ntfy_from_isp *)atio; 386 387 qlt_issue_marker(vha, ha_locked); 388 389 if ((entry->u.isp24.vp_index != 0xFF) && 390 (entry->u.isp24.nport_handle != 0xFFFF)) { 391 host = qlt_find_host_by_vp_idx(vha, 392 entry->u.isp24.vp_index); 393 if (unlikely(!host)) { 394 ql_dbg(ql_dbg_tgt, vha, 0xe03f, 395 "qla_target(%d): Received " 396 "ATIO (IMMED_NOTIFY_TYPE) " 397 "with unknown vp_index %d\n", 398 vha->vp_idx, entry->u.isp24.vp_index); 399 break; 400 } 401 } 402 qlt_24xx_atio_pkt(host, atio, ha_locked); 403 break; 404 } 405 406 case VP_RPT_ID_IOCB_TYPE: 407 qla24xx_report_id_acquisition(vha, 408 (struct vp_rpt_id_entry_24xx *)atio); 409 break; 410 411 case ABTS_RECV_24XX: 412 { 413 struct abts_recv_from_24xx *entry = 414 (struct abts_recv_from_24xx *)atio; 415 struct scsi_qla_host *host = qlt_find_host_by_vp_idx(vha, 416 entry->vp_index); 417 unsigned long flags; 418 419 if (unlikely(!host)) { 420 ql_dbg(ql_dbg_tgt, vha, 0xe00a, 421 "qla_target(%d): Response pkt (ABTS_RECV_24XX) " 422 "received, with unknown vp_index %d\n", 423 vha->vp_idx, entry->vp_index); 424 break; 425 } 426 if (!ha_locked) 427 spin_lock_irqsave(&host->hw->hardware_lock, flags); 428 qlt_24xx_handle_abts(host, (struct abts_recv_from_24xx *)atio); 429 if (!ha_locked) 430 spin_unlock_irqrestore(&host->hw->hardware_lock, flags); 431 break; 432 } 433 434 /* case PUREX_IOCB_TYPE: ql2xmvasynctoatio */ 435 436 default: 437 ql_dbg(ql_dbg_tgt, vha, 0xe040, 438 "qla_target(%d): Received unknown ATIO atio " 439 "type %x\n", vha->vp_idx, atio->u.raw.entry_type); 440 break; 441 } 442 443 return false; 444 } 445 446 void qlt_response_pkt_all_vps(struct scsi_qla_host *vha, 447 struct rsp_que *rsp, response_t *pkt) 448 { 449 switch (pkt->entry_type) { 450 case CTIO_CRC2: 451 ql_dbg(ql_dbg_tgt, vha, 0xe073, 452 "qla_target(%d):%s: CRC2 Response pkt\n", 453 vha->vp_idx, __func__); 454 /* fall through */ 455 case CTIO_TYPE7: 456 { 457 struct ctio7_from_24xx *entry = (struct ctio7_from_24xx *)pkt; 458 struct scsi_qla_host *host = qlt_find_host_by_vp_idx(vha, 459 entry->vp_index); 460 if (unlikely(!host)) { 461 ql_dbg(ql_dbg_tgt, vha, 0xe041, 462 "qla_target(%d): Response pkt (CTIO_TYPE7) " 463 "received, with unknown vp_index %d\n", 464 vha->vp_idx, entry->vp_index); 465 break; 466 } 467 qlt_response_pkt(host, rsp, pkt); 468 break; 469 } 470 471 case IMMED_NOTIFY_TYPE: 472 { 473 struct scsi_qla_host *host = vha; 474 struct imm_ntfy_from_isp *entry = 475 (struct imm_ntfy_from_isp *)pkt; 476 477 host = qlt_find_host_by_vp_idx(vha, entry->u.isp24.vp_index); 478 if (unlikely(!host)) { 479 ql_dbg(ql_dbg_tgt, vha, 0xe042, 480 "qla_target(%d): Response pkt (IMMED_NOTIFY_TYPE) " 481 "received, with unknown vp_index %d\n", 482 vha->vp_idx, entry->u.isp24.vp_index); 483 break; 484 } 485 qlt_response_pkt(host, rsp, pkt); 486 break; 487 } 488 489 case NOTIFY_ACK_TYPE: 490 { 491 struct scsi_qla_host *host = vha; 492 struct nack_to_isp *entry = (struct nack_to_isp *)pkt; 493 494 if (0xFF != entry->u.isp24.vp_index) { 495 host = qlt_find_host_by_vp_idx(vha, 496 entry->u.isp24.vp_index); 497 if (unlikely(!host)) { 498 ql_dbg(ql_dbg_tgt, vha, 0xe043, 499 "qla_target(%d): Response " 500 "pkt (NOTIFY_ACK_TYPE) " 501 "received, with unknown " 502 "vp_index %d\n", vha->vp_idx, 503 entry->u.isp24.vp_index); 504 break; 505 } 506 } 507 qlt_response_pkt(host, rsp, pkt); 508 break; 509 } 510 511 case ABTS_RECV_24XX: 512 { 513 struct abts_recv_from_24xx *entry = 514 (struct abts_recv_from_24xx *)pkt; 515 struct scsi_qla_host *host = qlt_find_host_by_vp_idx(vha, 516 entry->vp_index); 517 if (unlikely(!host)) { 518 ql_dbg(ql_dbg_tgt, vha, 0xe044, 519 "qla_target(%d): Response pkt " 520 "(ABTS_RECV_24XX) received, with unknown " 521 "vp_index %d\n", vha->vp_idx, entry->vp_index); 522 break; 523 } 524 qlt_response_pkt(host, rsp, pkt); 525 break; 526 } 527 528 case ABTS_RESP_24XX: 529 { 530 struct abts_resp_to_24xx *entry = 531 (struct abts_resp_to_24xx *)pkt; 532 struct scsi_qla_host *host = qlt_find_host_by_vp_idx(vha, 533 entry->vp_index); 534 if (unlikely(!host)) { 535 ql_dbg(ql_dbg_tgt, vha, 0xe045, 536 "qla_target(%d): Response pkt " 537 "(ABTS_RECV_24XX) received, with unknown " 538 "vp_index %d\n", vha->vp_idx, entry->vp_index); 539 break; 540 } 541 qlt_response_pkt(host, rsp, pkt); 542 break; 543 } 544 545 default: 546 qlt_response_pkt(vha, rsp, pkt); 547 break; 548 } 549 550 } 551 552 /* 553 * All qlt_plogi_ack_t operations are protected by hardware_lock 554 */ 555 static int qla24xx_post_nack_work(struct scsi_qla_host *vha, fc_port_t *fcport, 556 struct imm_ntfy_from_isp *ntfy, int type) 557 { 558 struct qla_work_evt *e; 559 e = qla2x00_alloc_work(vha, QLA_EVT_NACK); 560 if (!e) 561 return QLA_FUNCTION_FAILED; 562 563 e->u.nack.fcport = fcport; 564 e->u.nack.type = type; 565 memcpy(e->u.nack.iocb, ntfy, sizeof(struct imm_ntfy_from_isp)); 566 return qla2x00_post_work(vha, e); 567 } 568 569 static 570 void qla2x00_async_nack_sp_done(void *s, int res) 571 { 572 struct srb *sp = (struct srb *)s; 573 struct scsi_qla_host *vha = sp->vha; 574 unsigned long flags; 575 576 ql_dbg(ql_dbg_disc, vha, 0x20f2, 577 "Async done-%s res %x %8phC type %d\n", 578 sp->name, res, sp->fcport->port_name, sp->type); 579 580 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); 581 sp->fcport->flags &= ~FCF_ASYNC_SENT; 582 sp->fcport->chip_reset = vha->hw->base_qpair->chip_reset; 583 584 switch (sp->type) { 585 case SRB_NACK_PLOGI: 586 sp->fcport->login_gen++; 587 sp->fcport->fw_login_state = DSC_LS_PLOGI_COMP; 588 sp->fcport->logout_on_delete = 1; 589 sp->fcport->plogi_nack_done_deadline = jiffies + HZ; 590 sp->fcport->send_els_logo = 0; 591 break; 592 593 case SRB_NACK_PRLI: 594 sp->fcport->fw_login_state = DSC_LS_PRLI_COMP; 595 sp->fcport->deleted = 0; 596 sp->fcport->send_els_logo = 0; 597 598 if (!sp->fcport->login_succ && 599 !IS_SW_RESV_ADDR(sp->fcport->d_id)) { 600 sp->fcport->login_succ = 1; 601 602 vha->fcport_count++; 603 604 if (!IS_IIDMA_CAPABLE(vha->hw) || 605 !vha->hw->flags.gpsc_supported) { 606 ql_dbg(ql_dbg_disc, vha, 0x20f3, 607 "%s %d %8phC post upd_fcport fcp_cnt %d\n", 608 __func__, __LINE__, 609 sp->fcport->port_name, 610 vha->fcport_count); 611 sp->fcport->disc_state = DSC_UPD_FCPORT; 612 qla24xx_post_upd_fcport_work(vha, sp->fcport); 613 } else { 614 ql_dbg(ql_dbg_disc, vha, 0x20f5, 615 "%s %d %8phC post gpsc fcp_cnt %d\n", 616 __func__, __LINE__, 617 sp->fcport->port_name, 618 vha->fcport_count); 619 620 qla24xx_post_gpsc_work(vha, sp->fcport); 621 } 622 } 623 break; 624 625 case SRB_NACK_LOGO: 626 sp->fcport->login_gen++; 627 sp->fcport->fw_login_state = DSC_LS_PORT_UNAVAIL; 628 qlt_logo_completion_handler(sp->fcport, MBS_COMMAND_COMPLETE); 629 break; 630 } 631 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); 632 633 sp->free(sp); 634 } 635 636 int qla24xx_async_notify_ack(scsi_qla_host_t *vha, fc_port_t *fcport, 637 struct imm_ntfy_from_isp *ntfy, int type) 638 { 639 int rval = QLA_FUNCTION_FAILED; 640 srb_t *sp; 641 char *c = NULL; 642 643 fcport->flags |= FCF_ASYNC_SENT; 644 switch (type) { 645 case SRB_NACK_PLOGI: 646 fcport->fw_login_state = DSC_LS_PLOGI_PEND; 647 c = "PLOGI"; 648 break; 649 case SRB_NACK_PRLI: 650 fcport->fw_login_state = DSC_LS_PRLI_PEND; 651 fcport->deleted = 0; 652 c = "PRLI"; 653 break; 654 case SRB_NACK_LOGO: 655 fcport->fw_login_state = DSC_LS_LOGO_PEND; 656 c = "LOGO"; 657 break; 658 } 659 660 sp = qla2x00_get_sp(vha, fcport, GFP_ATOMIC); 661 if (!sp) 662 goto done; 663 664 sp->type = type; 665 sp->name = "nack"; 666 667 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha)+2); 668 669 sp->u.iocb_cmd.u.nack.ntfy = ntfy; 670 sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout; 671 sp->done = qla2x00_async_nack_sp_done; 672 673 rval = qla2x00_start_sp(sp); 674 if (rval != QLA_SUCCESS) 675 goto done_free_sp; 676 677 ql_dbg(ql_dbg_disc, vha, 0x20f4, 678 "Async-%s %8phC hndl %x %s\n", 679 sp->name, fcport->port_name, sp->handle, c); 680 681 return rval; 682 683 done_free_sp: 684 sp->free(sp); 685 done: 686 fcport->flags &= ~FCF_ASYNC_SENT; 687 return rval; 688 } 689 690 void qla24xx_do_nack_work(struct scsi_qla_host *vha, struct qla_work_evt *e) 691 { 692 fc_port_t *t; 693 unsigned long flags; 694 695 switch (e->u.nack.type) { 696 case SRB_NACK_PRLI: 697 mutex_lock(&vha->vha_tgt.tgt_mutex); 698 t = qlt_create_sess(vha, e->u.nack.fcport, 0); 699 mutex_unlock(&vha->vha_tgt.tgt_mutex); 700 if (t) { 701 ql_log(ql_log_info, vha, 0xd034, 702 "%s create sess success %p", __func__, t); 703 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); 704 /* create sess has an extra kref */ 705 vha->hw->tgt.tgt_ops->put_sess(e->u.nack.fcport); 706 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); 707 } 708 break; 709 } 710 qla24xx_async_notify_ack(vha, e->u.nack.fcport, 711 (struct imm_ntfy_from_isp*)e->u.nack.iocb, e->u.nack.type); 712 } 713 714 void qla24xx_delete_sess_fn(struct work_struct *work) 715 { 716 fc_port_t *fcport = container_of(work, struct fc_port, del_work); 717 struct qla_hw_data *ha = fcport->vha->hw; 718 unsigned long flags; 719 720 spin_lock_irqsave(&ha->tgt.sess_lock, flags); 721 722 if (fcport->se_sess) { 723 ha->tgt.tgt_ops->shutdown_sess(fcport); 724 ha->tgt.tgt_ops->put_sess(fcport); 725 } else { 726 qlt_unreg_sess(fcport); 727 } 728 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); 729 } 730 731 /* 732 * Called from qla2x00_reg_remote_port() 733 */ 734 void qlt_fc_port_added(struct scsi_qla_host *vha, fc_port_t *fcport) 735 { 736 struct qla_hw_data *ha = vha->hw; 737 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; 738 struct fc_port *sess = fcport; 739 unsigned long flags; 740 741 if (!vha->hw->tgt.tgt_ops) 742 return; 743 744 spin_lock_irqsave(&ha->tgt.sess_lock, flags); 745 if (tgt->tgt_stop) { 746 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); 747 return; 748 } 749 750 if (fcport->disc_state == DSC_DELETE_PEND) { 751 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); 752 return; 753 } 754 755 if (!sess->se_sess) { 756 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); 757 758 mutex_lock(&vha->vha_tgt.tgt_mutex); 759 sess = qlt_create_sess(vha, fcport, false); 760 mutex_unlock(&vha->vha_tgt.tgt_mutex); 761 762 spin_lock_irqsave(&ha->tgt.sess_lock, flags); 763 } else { 764 if (fcport->fw_login_state == DSC_LS_PRLI_COMP) { 765 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); 766 return; 767 } 768 769 if (!kref_get_unless_zero(&sess->sess_kref)) { 770 ql_dbg(ql_dbg_disc, vha, 0x2107, 771 "%s: kref_get fail sess %8phC \n", 772 __func__, sess->port_name); 773 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); 774 return; 775 } 776 777 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04c, 778 "qla_target(%u): %ssession for port %8phC " 779 "(loop ID %d) reappeared\n", vha->vp_idx, 780 sess->local ? "local " : "", sess->port_name, sess->loop_id); 781 782 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf007, 783 "Reappeared sess %p\n", sess); 784 785 ha->tgt.tgt_ops->update_sess(sess, fcport->d_id, 786 fcport->loop_id, 787 (fcport->flags & FCF_CONF_COMP_SUPPORTED)); 788 } 789 790 if (sess && sess->local) { 791 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04d, 792 "qla_target(%u): local session for " 793 "port %8phC (loop ID %d) became global\n", vha->vp_idx, 794 fcport->port_name, sess->loop_id); 795 sess->local = 0; 796 } 797 ha->tgt.tgt_ops->put_sess(sess); 798 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); 799 } 800 801 /* 802 * This is a zero-base ref-counting solution, since hardware_lock 803 * guarantees that ref_count is not modified concurrently. 804 * Upon successful return content of iocb is undefined 805 */ 806 static struct qlt_plogi_ack_t * 807 qlt_plogi_ack_find_add(struct scsi_qla_host *vha, port_id_t *id, 808 struct imm_ntfy_from_isp *iocb) 809 { 810 struct qlt_plogi_ack_t *pla; 811 812 list_for_each_entry(pla, &vha->plogi_ack_list, list) { 813 if (pla->id.b24 == id->b24) { 814 qlt_send_term_imm_notif(vha, &pla->iocb, 1); 815 memcpy(&pla->iocb, iocb, sizeof(pla->iocb)); 816 return pla; 817 } 818 } 819 820 pla = kmem_cache_zalloc(qla_tgt_plogi_cachep, GFP_ATOMIC); 821 if (!pla) { 822 ql_dbg(ql_dbg_async, vha, 0x5088, 823 "qla_target(%d): Allocation of plogi_ack failed\n", 824 vha->vp_idx); 825 return NULL; 826 } 827 828 memcpy(&pla->iocb, iocb, sizeof(pla->iocb)); 829 pla->id = *id; 830 list_add_tail(&pla->list, &vha->plogi_ack_list); 831 832 return pla; 833 } 834 835 void qlt_plogi_ack_unref(struct scsi_qla_host *vha, 836 struct qlt_plogi_ack_t *pla) 837 { 838 struct imm_ntfy_from_isp *iocb = &pla->iocb; 839 port_id_t port_id; 840 uint16_t loop_id; 841 fc_port_t *fcport = pla->fcport; 842 843 BUG_ON(!pla->ref_count); 844 pla->ref_count--; 845 846 if (pla->ref_count) 847 return; 848 849 ql_dbg(ql_dbg_disc, vha, 0x5089, 850 "Sending PLOGI ACK to wwn %8phC s_id %02x:%02x:%02x loop_id %#04x" 851 " exch %#x ox_id %#x\n", iocb->u.isp24.port_name, 852 iocb->u.isp24.port_id[2], iocb->u.isp24.port_id[1], 853 iocb->u.isp24.port_id[0], 854 le16_to_cpu(iocb->u.isp24.nport_handle), 855 iocb->u.isp24.exchange_address, iocb->ox_id); 856 857 port_id.b.domain = iocb->u.isp24.port_id[2]; 858 port_id.b.area = iocb->u.isp24.port_id[1]; 859 port_id.b.al_pa = iocb->u.isp24.port_id[0]; 860 port_id.b.rsvd_1 = 0; 861 862 loop_id = le16_to_cpu(iocb->u.isp24.nport_handle); 863 864 fcport->loop_id = loop_id; 865 fcport->d_id = port_id; 866 if (iocb->u.isp24.status_subcode == ELS_PLOGI) 867 qla24xx_post_nack_work(vha, fcport, iocb, SRB_NACK_PLOGI); 868 else 869 qla24xx_post_nack_work(vha, fcport, iocb, SRB_NACK_PRLI); 870 871 list_for_each_entry(fcport, &vha->vp_fcports, list) { 872 if (fcport->plogi_link[QLT_PLOGI_LINK_SAME_WWN] == pla) 873 fcport->plogi_link[QLT_PLOGI_LINK_SAME_WWN] = NULL; 874 if (fcport->plogi_link[QLT_PLOGI_LINK_CONFLICT] == pla) 875 fcport->plogi_link[QLT_PLOGI_LINK_CONFLICT] = NULL; 876 } 877 878 list_del(&pla->list); 879 kmem_cache_free(qla_tgt_plogi_cachep, pla); 880 } 881 882 void 883 qlt_plogi_ack_link(struct scsi_qla_host *vha, struct qlt_plogi_ack_t *pla, 884 struct fc_port *sess, enum qlt_plogi_link_t link) 885 { 886 struct imm_ntfy_from_isp *iocb = &pla->iocb; 887 /* Inc ref_count first because link might already be pointing at pla */ 888 pla->ref_count++; 889 890 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf097, 891 "Linking sess %p [%d] wwn %8phC with PLOGI ACK to wwn %8phC" 892 " s_id %02x:%02x:%02x, ref=%d pla %p link %d\n", 893 sess, link, sess->port_name, 894 iocb->u.isp24.port_name, iocb->u.isp24.port_id[2], 895 iocb->u.isp24.port_id[1], iocb->u.isp24.port_id[0], 896 pla->ref_count, pla, link); 897 898 if (link == QLT_PLOGI_LINK_CONFLICT) { 899 switch (sess->disc_state) { 900 case DSC_DELETED: 901 case DSC_DELETE_PEND: 902 pla->ref_count--; 903 return; 904 default: 905 break; 906 } 907 } 908 909 if (sess->plogi_link[link]) 910 qlt_plogi_ack_unref(vha, sess->plogi_link[link]); 911 912 if (link == QLT_PLOGI_LINK_SAME_WWN) 913 pla->fcport = sess; 914 915 sess->plogi_link[link] = pla; 916 } 917 918 typedef struct { 919 /* These fields must be initialized by the caller */ 920 port_id_t id; 921 /* 922 * number of cmds dropped while we were waiting for 923 * initiator to ack LOGO initialize to 1 if LOGO is 924 * triggered by a command, otherwise, to 0 925 */ 926 int cmd_count; 927 928 /* These fields are used by callee */ 929 struct list_head list; 930 } qlt_port_logo_t; 931 932 static void 933 qlt_send_first_logo(struct scsi_qla_host *vha, qlt_port_logo_t *logo) 934 { 935 qlt_port_logo_t *tmp; 936 int res; 937 938 mutex_lock(&vha->vha_tgt.tgt_mutex); 939 940 list_for_each_entry(tmp, &vha->logo_list, list) { 941 if (tmp->id.b24 == logo->id.b24) { 942 tmp->cmd_count += logo->cmd_count; 943 mutex_unlock(&vha->vha_tgt.tgt_mutex); 944 return; 945 } 946 } 947 948 list_add_tail(&logo->list, &vha->logo_list); 949 950 mutex_unlock(&vha->vha_tgt.tgt_mutex); 951 952 res = qla24xx_els_dcmd_iocb(vha, ELS_DCMD_LOGO, logo->id); 953 954 mutex_lock(&vha->vha_tgt.tgt_mutex); 955 list_del(&logo->list); 956 mutex_unlock(&vha->vha_tgt.tgt_mutex); 957 958 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf098, 959 "Finished LOGO to %02x:%02x:%02x, dropped %d cmds, res = %#x\n", 960 logo->id.b.domain, logo->id.b.area, logo->id.b.al_pa, 961 logo->cmd_count, res); 962 } 963 964 static void qlt_free_session_done(struct work_struct *work) 965 { 966 struct fc_port *sess = container_of(work, struct fc_port, 967 free_work); 968 struct qla_tgt *tgt = sess->tgt; 969 struct scsi_qla_host *vha = sess->vha; 970 struct qla_hw_data *ha = vha->hw; 971 unsigned long flags; 972 bool logout_started = false; 973 scsi_qla_host_t *base_vha; 974 struct qlt_plogi_ack_t *own = 975 sess->plogi_link[QLT_PLOGI_LINK_SAME_WWN]; 976 977 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf084, 978 "%s: se_sess %p / sess %p from port %8phC loop_id %#04x" 979 " s_id %02x:%02x:%02x logout %d keep %d els_logo %d\n", 980 __func__, sess->se_sess, sess, sess->port_name, sess->loop_id, 981 sess->d_id.b.domain, sess->d_id.b.area, sess->d_id.b.al_pa, 982 sess->logout_on_delete, sess->keep_nport_handle, 983 sess->send_els_logo); 984 985 if (!IS_SW_RESV_ADDR(sess->d_id)) { 986 if (sess->send_els_logo) { 987 qlt_port_logo_t logo; 988 989 logo.id = sess->d_id; 990 logo.cmd_count = 0; 991 sess->send_els_logo = 0; 992 qlt_send_first_logo(vha, &logo); 993 } 994 995 if (sess->logout_on_delete && sess->loop_id != FC_NO_LOOP_ID) { 996 int rc; 997 998 if (!own || 999 (own && 1000 (own->iocb.u.isp24.status_subcode == ELS_PLOGI))) { 1001 rc = qla2x00_post_async_logout_work(vha, sess, 1002 NULL); 1003 if (rc != QLA_SUCCESS) 1004 ql_log(ql_log_warn, vha, 0xf085, 1005 "Schedule logo failed sess %p rc %d\n", 1006 sess, rc); 1007 else 1008 logout_started = true; 1009 } else if (own && (own->iocb.u.isp24.status_subcode == 1010 ELS_PRLI) && ha->flags.rida_fmt2) { 1011 rc = qla2x00_post_async_prlo_work(vha, sess, 1012 NULL); 1013 if (rc != QLA_SUCCESS) 1014 ql_log(ql_log_warn, vha, 0xf085, 1015 "Schedule PRLO failed sess %p rc %d\n", 1016 sess, rc); 1017 else 1018 logout_started = true; 1019 } 1020 } 1021 } 1022 1023 /* 1024 * Release the target session for FC Nexus from fabric module code. 1025 */ 1026 if (sess->se_sess != NULL) 1027 ha->tgt.tgt_ops->free_session(sess); 1028 1029 if (logout_started) { 1030 bool traced = false; 1031 1032 while (!READ_ONCE(sess->logout_completed)) { 1033 if (!traced) { 1034 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf086, 1035 "%s: waiting for sess %p logout\n", 1036 __func__, sess); 1037 traced = true; 1038 } 1039 msleep(100); 1040 } 1041 1042 ql_dbg(ql_dbg_disc, vha, 0xf087, 1043 "%s: sess %p logout completed\n", __func__, sess); 1044 } 1045 1046 if (sess->logo_ack_needed) { 1047 sess->logo_ack_needed = 0; 1048 qla24xx_async_notify_ack(vha, sess, 1049 (struct imm_ntfy_from_isp *)sess->iocb, SRB_NACK_LOGO); 1050 } 1051 1052 spin_lock_irqsave(&ha->tgt.sess_lock, flags); 1053 if (sess->se_sess) { 1054 sess->se_sess = NULL; 1055 if (tgt && !IS_SW_RESV_ADDR(sess->d_id)) 1056 tgt->sess_count--; 1057 } 1058 1059 sess->disc_state = DSC_DELETED; 1060 sess->fw_login_state = DSC_LS_PORT_UNAVAIL; 1061 sess->deleted = QLA_SESS_DELETED; 1062 sess->login_retry = vha->hw->login_retry_count; 1063 1064 if (sess->login_succ && !IS_SW_RESV_ADDR(sess->d_id)) { 1065 vha->fcport_count--; 1066 sess->login_succ = 0; 1067 } 1068 1069 qla2x00_clear_loop_id(sess); 1070 1071 if (sess->conflict) { 1072 sess->conflict->login_pause = 0; 1073 sess->conflict = NULL; 1074 if (!test_bit(UNLOADING, &vha->dpc_flags)) 1075 set_bit(RELOGIN_NEEDED, &vha->dpc_flags); 1076 } 1077 1078 { 1079 struct qlt_plogi_ack_t *con = 1080 sess->plogi_link[QLT_PLOGI_LINK_CONFLICT]; 1081 struct imm_ntfy_from_isp *iocb; 1082 1083 if (con) { 1084 iocb = &con->iocb; 1085 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf099, 1086 "se_sess %p / sess %p port %8phC is gone," 1087 " %s (ref=%d), releasing PLOGI for %8phC (ref=%d)\n", 1088 sess->se_sess, sess, sess->port_name, 1089 own ? "releasing own PLOGI" : "no own PLOGI pending", 1090 own ? own->ref_count : -1, 1091 iocb->u.isp24.port_name, con->ref_count); 1092 qlt_plogi_ack_unref(vha, con); 1093 sess->plogi_link[QLT_PLOGI_LINK_CONFLICT] = NULL; 1094 } else { 1095 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf09a, 1096 "se_sess %p / sess %p port %8phC is gone, %s (ref=%d)\n", 1097 sess->se_sess, sess, sess->port_name, 1098 own ? "releasing own PLOGI" : 1099 "no own PLOGI pending", 1100 own ? own->ref_count : -1); 1101 } 1102 1103 if (own) { 1104 sess->fw_login_state = DSC_LS_PLOGI_PEND; 1105 qlt_plogi_ack_unref(vha, own); 1106 sess->plogi_link[QLT_PLOGI_LINK_SAME_WWN] = NULL; 1107 } 1108 } 1109 1110 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); 1111 1112 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf001, 1113 "Unregistration of sess %p %8phC finished fcp_cnt %d\n", 1114 sess, sess->port_name, vha->fcport_count); 1115 1116 if (tgt && (tgt->sess_count == 0)) 1117 wake_up_all(&tgt->waitQ); 1118 1119 if (vha->fcport_count == 0) 1120 wake_up_all(&vha->fcport_waitQ); 1121 1122 base_vha = pci_get_drvdata(ha->pdev); 1123 1124 sess->free_pending = 0; 1125 1126 if (test_bit(PFLG_DRIVER_REMOVING, &base_vha->pci_flags)) 1127 return; 1128 1129 if ((!tgt || !tgt->tgt_stop) && !LOOP_TRANSITION(vha)) { 1130 switch (vha->host->active_mode) { 1131 case MODE_INITIATOR: 1132 case MODE_DUAL: 1133 set_bit(RELOGIN_NEEDED, &vha->dpc_flags); 1134 qla2xxx_wake_dpc(vha); 1135 break; 1136 case MODE_TARGET: 1137 default: 1138 /* no-op */ 1139 break; 1140 } 1141 } 1142 } 1143 1144 /* ha->tgt.sess_lock supposed to be held on entry */ 1145 void qlt_unreg_sess(struct fc_port *sess) 1146 { 1147 struct scsi_qla_host *vha = sess->vha; 1148 unsigned long flags; 1149 1150 ql_dbg(ql_dbg_disc, sess->vha, 0x210a, 1151 "%s sess %p for deletion %8phC\n", 1152 __func__, sess, sess->port_name); 1153 1154 spin_lock_irqsave(&sess->vha->work_lock, flags); 1155 if (sess->free_pending) { 1156 spin_unlock_irqrestore(&sess->vha->work_lock, flags); 1157 return; 1158 } 1159 sess->free_pending = 1; 1160 spin_unlock_irqrestore(&sess->vha->work_lock, flags); 1161 1162 if (sess->se_sess) 1163 vha->hw->tgt.tgt_ops->clear_nacl_from_fcport_map(sess); 1164 1165 qla2x00_mark_device_lost(vha, sess, 1, 1); 1166 1167 sess->deleted = QLA_SESS_DELETION_IN_PROGRESS; 1168 sess->disc_state = DSC_DELETE_PEND; 1169 sess->last_rscn_gen = sess->rscn_gen; 1170 sess->last_login_gen = sess->login_gen; 1171 1172 if (sess->nvme_flag & NVME_FLAG_REGISTERED) 1173 schedule_work(&sess->nvme_del_work); 1174 1175 INIT_WORK(&sess->free_work, qlt_free_session_done); 1176 schedule_work(&sess->free_work); 1177 } 1178 EXPORT_SYMBOL(qlt_unreg_sess); 1179 1180 static int qlt_reset(struct scsi_qla_host *vha, void *iocb, int mcmd) 1181 { 1182 struct qla_hw_data *ha = vha->hw; 1183 struct fc_port *sess = NULL; 1184 uint16_t loop_id; 1185 int res = 0; 1186 struct imm_ntfy_from_isp *n = (struct imm_ntfy_from_isp *)iocb; 1187 unsigned long flags; 1188 1189 loop_id = le16_to_cpu(n->u.isp24.nport_handle); 1190 if (loop_id == 0xFFFF) { 1191 /* Global event */ 1192 atomic_inc(&vha->vha_tgt.qla_tgt->tgt_global_resets_count); 1193 spin_lock_irqsave(&ha->tgt.sess_lock, flags); 1194 qlt_clear_tgt_db(vha->vha_tgt.qla_tgt); 1195 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); 1196 } else { 1197 spin_lock_irqsave(&ha->tgt.sess_lock, flags); 1198 sess = ha->tgt.tgt_ops->find_sess_by_loop_id(vha, loop_id); 1199 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); 1200 } 1201 1202 ql_dbg(ql_dbg_tgt, vha, 0xe000, 1203 "Using sess for qla_tgt_reset: %p\n", sess); 1204 if (!sess) { 1205 res = -ESRCH; 1206 return res; 1207 } 1208 1209 ql_dbg(ql_dbg_tgt, vha, 0xe047, 1210 "scsi(%ld): resetting (session %p from port %8phC mcmd %x, " 1211 "loop_id %d)\n", vha->host_no, sess, sess->port_name, 1212 mcmd, loop_id); 1213 1214 return qlt_issue_task_mgmt(sess, 0, mcmd, iocb, QLA24XX_MGMT_SEND_NACK); 1215 } 1216 1217 static void qla24xx_chk_fcp_state(struct fc_port *sess) 1218 { 1219 if (sess->chip_reset != sess->vha->hw->base_qpair->chip_reset) { 1220 sess->logout_on_delete = 0; 1221 sess->logo_ack_needed = 0; 1222 sess->fw_login_state = DSC_LS_PORT_UNAVAIL; 1223 sess->scan_state = 0; 1224 } 1225 } 1226 1227 void qlt_schedule_sess_for_deletion(struct fc_port *sess) 1228 { 1229 struct qla_tgt *tgt = sess->tgt; 1230 struct qla_hw_data *ha = sess->vha->hw; 1231 unsigned long flags; 1232 1233 if (sess->disc_state == DSC_DELETE_PEND) 1234 return; 1235 1236 if (sess->disc_state == DSC_DELETED) { 1237 if (tgt && tgt->tgt_stop && (tgt->sess_count == 0)) 1238 wake_up_all(&tgt->waitQ); 1239 if (sess->vha->fcport_count == 0) 1240 wake_up_all(&sess->vha->fcport_waitQ); 1241 1242 if (!sess->plogi_link[QLT_PLOGI_LINK_SAME_WWN] && 1243 !sess->plogi_link[QLT_PLOGI_LINK_CONFLICT]) 1244 return; 1245 } 1246 1247 spin_lock_irqsave(&ha->tgt.sess_lock, flags); 1248 if (sess->deleted == QLA_SESS_DELETED) 1249 sess->logout_on_delete = 0; 1250 1251 if (sess->deleted == QLA_SESS_DELETION_IN_PROGRESS) { 1252 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); 1253 return; 1254 } 1255 sess->deleted = QLA_SESS_DELETION_IN_PROGRESS; 1256 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); 1257 1258 sess->disc_state = DSC_DELETE_PEND; 1259 1260 qla24xx_chk_fcp_state(sess); 1261 1262 ql_dbg(ql_dbg_tgt, sess->vha, 0xe001, 1263 "Scheduling sess %p for deletion\n", sess); 1264 1265 INIT_WORK(&sess->del_work, qla24xx_delete_sess_fn); 1266 WARN_ON(!queue_work(sess->vha->hw->wq, &sess->del_work)); 1267 } 1268 1269 static void qlt_clear_tgt_db(struct qla_tgt *tgt) 1270 { 1271 struct fc_port *sess; 1272 scsi_qla_host_t *vha = tgt->vha; 1273 1274 list_for_each_entry(sess, &vha->vp_fcports, list) { 1275 if (sess->se_sess) 1276 qlt_schedule_sess_for_deletion(sess); 1277 } 1278 1279 /* At this point tgt could be already dead */ 1280 } 1281 1282 static int qla24xx_get_loop_id(struct scsi_qla_host *vha, const uint8_t *s_id, 1283 uint16_t *loop_id) 1284 { 1285 struct qla_hw_data *ha = vha->hw; 1286 dma_addr_t gid_list_dma; 1287 struct gid_list_info *gid_list; 1288 char *id_iter; 1289 int res, rc, i; 1290 uint16_t entries; 1291 1292 gid_list = dma_alloc_coherent(&ha->pdev->dev, qla2x00_gid_list_size(ha), 1293 &gid_list_dma, GFP_KERNEL); 1294 if (!gid_list) { 1295 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf044, 1296 "qla_target(%d): DMA Alloc failed of %u\n", 1297 vha->vp_idx, qla2x00_gid_list_size(ha)); 1298 return -ENOMEM; 1299 } 1300 1301 /* Get list of logged in devices */ 1302 rc = qla24xx_gidlist_wait(vha, gid_list, gid_list_dma, &entries); 1303 if (rc != QLA_SUCCESS) { 1304 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf045, 1305 "qla_target(%d): get_id_list() failed: %x\n", 1306 vha->vp_idx, rc); 1307 res = -EBUSY; 1308 goto out_free_id_list; 1309 } 1310 1311 id_iter = (char *)gid_list; 1312 res = -ENOENT; 1313 for (i = 0; i < entries; i++) { 1314 struct gid_list_info *gid = (struct gid_list_info *)id_iter; 1315 if ((gid->al_pa == s_id[2]) && 1316 (gid->area == s_id[1]) && 1317 (gid->domain == s_id[0])) { 1318 *loop_id = le16_to_cpu(gid->loop_id); 1319 res = 0; 1320 break; 1321 } 1322 id_iter += ha->gid_list_info_size; 1323 } 1324 1325 out_free_id_list: 1326 dma_free_coherent(&ha->pdev->dev, qla2x00_gid_list_size(ha), 1327 gid_list, gid_list_dma); 1328 return res; 1329 } 1330 1331 /* 1332 * Adds an extra ref to allow to drop hw lock after adding sess to the list. 1333 * Caller must put it. 1334 */ 1335 static struct fc_port *qlt_create_sess( 1336 struct scsi_qla_host *vha, 1337 fc_port_t *fcport, 1338 bool local) 1339 { 1340 struct qla_hw_data *ha = vha->hw; 1341 struct fc_port *sess = fcport; 1342 unsigned long flags; 1343 1344 if (vha->vha_tgt.qla_tgt->tgt_stop) 1345 return NULL; 1346 1347 if (fcport->se_sess) { 1348 if (!kref_get_unless_zero(&sess->sess_kref)) { 1349 ql_dbg(ql_dbg_disc, vha, 0x20f6, 1350 "%s: kref_get_unless_zero failed for %8phC\n", 1351 __func__, sess->port_name); 1352 return NULL; 1353 } 1354 return fcport; 1355 } 1356 sess->tgt = vha->vha_tgt.qla_tgt; 1357 sess->local = local; 1358 1359 /* 1360 * Under normal circumstances we want to logout from firmware when 1361 * session eventually ends and release corresponding nport handle. 1362 * In the exception cases (e.g. when new PLOGI is waiting) corresponding 1363 * code will adjust these flags as necessary. 1364 */ 1365 sess->logout_on_delete = 1; 1366 sess->keep_nport_handle = 0; 1367 sess->logout_completed = 0; 1368 1369 if (ha->tgt.tgt_ops->check_initiator_node_acl(vha, 1370 &fcport->port_name[0], sess) < 0) { 1371 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf015, 1372 "(%d) %8phC check_initiator_node_acl failed\n", 1373 vha->vp_idx, fcport->port_name); 1374 return NULL; 1375 } else { 1376 kref_init(&fcport->sess_kref); 1377 /* 1378 * Take an extra reference to ->sess_kref here to handle 1379 * fc_port access across ->tgt.sess_lock reaquire. 1380 */ 1381 if (!kref_get_unless_zero(&sess->sess_kref)) { 1382 ql_dbg(ql_dbg_disc, vha, 0x20f7, 1383 "%s: kref_get_unless_zero failed for %8phC\n", 1384 __func__, sess->port_name); 1385 return NULL; 1386 } 1387 1388 spin_lock_irqsave(&ha->tgt.sess_lock, flags); 1389 if (!IS_SW_RESV_ADDR(sess->d_id)) 1390 vha->vha_tgt.qla_tgt->sess_count++; 1391 1392 qlt_do_generation_tick(vha, &sess->generation); 1393 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); 1394 } 1395 1396 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf006, 1397 "Adding sess %p se_sess %p to tgt %p sess_count %d\n", 1398 sess, sess->se_sess, vha->vha_tgt.qla_tgt, 1399 vha->vha_tgt.qla_tgt->sess_count); 1400 1401 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04b, 1402 "qla_target(%d): %ssession for wwn %8phC (loop_id %d, " 1403 "s_id %x:%x:%x, confirmed completion %ssupported) added\n", 1404 vha->vp_idx, local ? "local " : "", fcport->port_name, 1405 fcport->loop_id, sess->d_id.b.domain, sess->d_id.b.area, 1406 sess->d_id.b.al_pa, sess->conf_compl_supported ? "" : "not "); 1407 1408 return sess; 1409 } 1410 1411 /* 1412 * max_gen - specifies maximum session generation 1413 * at which this deletion requestion is still valid 1414 */ 1415 void 1416 qlt_fc_port_deleted(struct scsi_qla_host *vha, fc_port_t *fcport, int max_gen) 1417 { 1418 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; 1419 struct fc_port *sess = fcport; 1420 unsigned long flags; 1421 1422 if (!vha->hw->tgt.tgt_ops) 1423 return; 1424 1425 if (!tgt) 1426 return; 1427 1428 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); 1429 if (tgt->tgt_stop) { 1430 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); 1431 return; 1432 } 1433 if (!sess->se_sess) { 1434 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); 1435 return; 1436 } 1437 1438 if (max_gen - sess->generation < 0) { 1439 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); 1440 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf092, 1441 "Ignoring stale deletion request for se_sess %p / sess %p" 1442 " for port %8phC, req_gen %d, sess_gen %d\n", 1443 sess->se_sess, sess, sess->port_name, max_gen, 1444 sess->generation); 1445 return; 1446 } 1447 1448 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf008, "qla_tgt_fc_port_deleted %p", sess); 1449 1450 sess->local = 1; 1451 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); 1452 qlt_schedule_sess_for_deletion(sess); 1453 } 1454 1455 static inline int test_tgt_sess_count(struct qla_tgt *tgt) 1456 { 1457 struct qla_hw_data *ha = tgt->ha; 1458 unsigned long flags; 1459 int res; 1460 /* 1461 * We need to protect against race, when tgt is freed before or 1462 * inside wake_up() 1463 */ 1464 spin_lock_irqsave(&ha->tgt.sess_lock, flags); 1465 ql_dbg(ql_dbg_tgt, tgt->vha, 0xe002, 1466 "tgt %p, sess_count=%d\n", 1467 tgt, tgt->sess_count); 1468 res = (tgt->sess_count == 0); 1469 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); 1470 1471 return res; 1472 } 1473 1474 /* Called by tcm_qla2xxx configfs code */ 1475 int qlt_stop_phase1(struct qla_tgt *tgt) 1476 { 1477 struct scsi_qla_host *vha = tgt->vha; 1478 struct qla_hw_data *ha = tgt->ha; 1479 unsigned long flags; 1480 1481 mutex_lock(&qla_tgt_mutex); 1482 if (!vha->fc_vport) { 1483 struct Scsi_Host *sh = vha->host; 1484 struct fc_host_attrs *fc_host = shost_to_fc_host(sh); 1485 bool npiv_vports; 1486 1487 spin_lock_irqsave(sh->host_lock, flags); 1488 npiv_vports = (fc_host->npiv_vports_inuse); 1489 spin_unlock_irqrestore(sh->host_lock, flags); 1490 1491 if (npiv_vports) { 1492 mutex_unlock(&qla_tgt_mutex); 1493 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf021, 1494 "NPIV is in use. Can not stop target\n"); 1495 return -EPERM; 1496 } 1497 } 1498 if (tgt->tgt_stop || tgt->tgt_stopped) { 1499 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04e, 1500 "Already in tgt->tgt_stop or tgt_stopped state\n"); 1501 mutex_unlock(&qla_tgt_mutex); 1502 return -EPERM; 1503 } 1504 1505 ql_dbg(ql_dbg_tgt_mgt, vha, 0xe003, "Stopping target for host %ld(%p)\n", 1506 vha->host_no, vha); 1507 /* 1508 * Mutex needed to sync with qla_tgt_fc_port_[added,deleted]. 1509 * Lock is needed, because we still can get an incoming packet. 1510 */ 1511 mutex_lock(&vha->vha_tgt.tgt_mutex); 1512 tgt->tgt_stop = 1; 1513 qlt_clear_tgt_db(tgt); 1514 mutex_unlock(&vha->vha_tgt.tgt_mutex); 1515 mutex_unlock(&qla_tgt_mutex); 1516 1517 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf009, 1518 "Waiting for sess works (tgt %p)", tgt); 1519 spin_lock_irqsave(&tgt->sess_work_lock, flags); 1520 while (!list_empty(&tgt->sess_works_list)) { 1521 spin_unlock_irqrestore(&tgt->sess_work_lock, flags); 1522 flush_scheduled_work(); 1523 spin_lock_irqsave(&tgt->sess_work_lock, flags); 1524 } 1525 spin_unlock_irqrestore(&tgt->sess_work_lock, flags); 1526 1527 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00a, 1528 "Waiting for tgt %p: sess_count=%d\n", tgt, tgt->sess_count); 1529 1530 wait_event_timeout(tgt->waitQ, test_tgt_sess_count(tgt), 10*HZ); 1531 1532 /* Big hammer */ 1533 if (!ha->flags.host_shutting_down && 1534 (qla_tgt_mode_enabled(vha) || qla_dual_mode_enabled(vha))) 1535 qlt_disable_vha(vha); 1536 1537 /* Wait for sessions to clear out (just in case) */ 1538 wait_event_timeout(tgt->waitQ, test_tgt_sess_count(tgt), 10*HZ); 1539 return 0; 1540 } 1541 EXPORT_SYMBOL(qlt_stop_phase1); 1542 1543 /* Called by tcm_qla2xxx configfs code */ 1544 void qlt_stop_phase2(struct qla_tgt *tgt) 1545 { 1546 scsi_qla_host_t *vha = tgt->vha; 1547 1548 if (tgt->tgt_stopped) { 1549 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04f, 1550 "Already in tgt->tgt_stopped state\n"); 1551 dump_stack(); 1552 return; 1553 } 1554 if (!tgt->tgt_stop) { 1555 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00b, 1556 "%s: phase1 stop is not completed\n", __func__); 1557 dump_stack(); 1558 return; 1559 } 1560 1561 mutex_lock(&vha->vha_tgt.tgt_mutex); 1562 tgt->tgt_stop = 0; 1563 tgt->tgt_stopped = 1; 1564 mutex_unlock(&vha->vha_tgt.tgt_mutex); 1565 1566 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00c, "Stop of tgt %p finished\n", 1567 tgt); 1568 } 1569 EXPORT_SYMBOL(qlt_stop_phase2); 1570 1571 /* Called from qlt_remove_target() -> qla2x00_remove_one() */ 1572 static void qlt_release(struct qla_tgt *tgt) 1573 { 1574 scsi_qla_host_t *vha = tgt->vha; 1575 void *node; 1576 u64 key = 0; 1577 u16 i; 1578 struct qla_qpair_hint *h; 1579 struct qla_hw_data *ha = vha->hw; 1580 1581 if ((vha->vha_tgt.qla_tgt != NULL) && !tgt->tgt_stop && 1582 !tgt->tgt_stopped) 1583 qlt_stop_phase1(tgt); 1584 1585 if ((vha->vha_tgt.qla_tgt != NULL) && !tgt->tgt_stopped) 1586 qlt_stop_phase2(tgt); 1587 1588 for (i = 0; i < vha->hw->max_qpairs + 1; i++) { 1589 unsigned long flags; 1590 1591 h = &tgt->qphints[i]; 1592 if (h->qpair) { 1593 spin_lock_irqsave(h->qpair->qp_lock_ptr, flags); 1594 list_del(&h->hint_elem); 1595 spin_unlock_irqrestore(h->qpair->qp_lock_ptr, flags); 1596 h->qpair = NULL; 1597 } 1598 } 1599 kfree(tgt->qphints); 1600 mutex_lock(&qla_tgt_mutex); 1601 list_del(&vha->vha_tgt.qla_tgt->tgt_list_entry); 1602 mutex_unlock(&qla_tgt_mutex); 1603 1604 btree_for_each_safe64(&tgt->lun_qpair_map, key, node) 1605 btree_remove64(&tgt->lun_qpair_map, key); 1606 1607 btree_destroy64(&tgt->lun_qpair_map); 1608 1609 if (vha->vp_idx) 1610 if (ha->tgt.tgt_ops && 1611 ha->tgt.tgt_ops->remove_target && 1612 vha->vha_tgt.target_lport_ptr) 1613 ha->tgt.tgt_ops->remove_target(vha); 1614 1615 vha->vha_tgt.qla_tgt = NULL; 1616 1617 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00d, 1618 "Release of tgt %p finished\n", tgt); 1619 1620 kfree(tgt); 1621 } 1622 1623 /* ha->hardware_lock supposed to be held on entry */ 1624 static int qlt_sched_sess_work(struct qla_tgt *tgt, int type, 1625 const void *param, unsigned int param_size) 1626 { 1627 struct qla_tgt_sess_work_param *prm; 1628 unsigned long flags; 1629 1630 prm = kzalloc(sizeof(*prm), GFP_ATOMIC); 1631 if (!prm) { 1632 ql_dbg(ql_dbg_tgt_mgt, tgt->vha, 0xf050, 1633 "qla_target(%d): Unable to create session " 1634 "work, command will be refused", 0); 1635 return -ENOMEM; 1636 } 1637 1638 ql_dbg(ql_dbg_tgt_mgt, tgt->vha, 0xf00e, 1639 "Scheduling work (type %d, prm %p)" 1640 " to find session for param %p (size %d, tgt %p)\n", 1641 type, prm, param, param_size, tgt); 1642 1643 prm->type = type; 1644 memcpy(&prm->tm_iocb, param, param_size); 1645 1646 spin_lock_irqsave(&tgt->sess_work_lock, flags); 1647 list_add_tail(&prm->sess_works_list_entry, &tgt->sess_works_list); 1648 spin_unlock_irqrestore(&tgt->sess_work_lock, flags); 1649 1650 schedule_work(&tgt->sess_work); 1651 1652 return 0; 1653 } 1654 1655 /* 1656 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire 1657 */ 1658 static void qlt_send_notify_ack(struct qla_qpair *qpair, 1659 struct imm_ntfy_from_isp *ntfy, 1660 uint32_t add_flags, uint16_t resp_code, int resp_code_valid, 1661 uint16_t srr_flags, uint16_t srr_reject_code, uint8_t srr_explan) 1662 { 1663 struct scsi_qla_host *vha = qpair->vha; 1664 struct qla_hw_data *ha = vha->hw; 1665 request_t *pkt; 1666 struct nack_to_isp *nack; 1667 1668 if (!ha->flags.fw_started) 1669 return; 1670 1671 ql_dbg(ql_dbg_tgt, vha, 0xe004, "Sending NOTIFY_ACK (ha=%p)\n", ha); 1672 1673 pkt = (request_t *)__qla2x00_alloc_iocbs(qpair, NULL); 1674 if (!pkt) { 1675 ql_dbg(ql_dbg_tgt, vha, 0xe049, 1676 "qla_target(%d): %s failed: unable to allocate " 1677 "request packet\n", vha->vp_idx, __func__); 1678 return; 1679 } 1680 1681 if (vha->vha_tgt.qla_tgt != NULL) 1682 vha->vha_tgt.qla_tgt->notify_ack_expected++; 1683 1684 pkt->entry_type = NOTIFY_ACK_TYPE; 1685 pkt->entry_count = 1; 1686 1687 nack = (struct nack_to_isp *)pkt; 1688 nack->ox_id = ntfy->ox_id; 1689 1690 nack->u.isp24.handle = QLA_TGT_SKIP_HANDLE; 1691 nack->u.isp24.nport_handle = ntfy->u.isp24.nport_handle; 1692 if (le16_to_cpu(ntfy->u.isp24.status) == IMM_NTFY_ELS) { 1693 nack->u.isp24.flags = ntfy->u.isp24.flags & 1694 cpu_to_le32(NOTIFY24XX_FLAGS_PUREX_IOCB); 1695 } 1696 nack->u.isp24.srr_rx_id = ntfy->u.isp24.srr_rx_id; 1697 nack->u.isp24.status = ntfy->u.isp24.status; 1698 nack->u.isp24.status_subcode = ntfy->u.isp24.status_subcode; 1699 nack->u.isp24.fw_handle = ntfy->u.isp24.fw_handle; 1700 nack->u.isp24.exchange_address = ntfy->u.isp24.exchange_address; 1701 nack->u.isp24.srr_rel_offs = ntfy->u.isp24.srr_rel_offs; 1702 nack->u.isp24.srr_ui = ntfy->u.isp24.srr_ui; 1703 nack->u.isp24.srr_flags = cpu_to_le16(srr_flags); 1704 nack->u.isp24.srr_reject_code = srr_reject_code; 1705 nack->u.isp24.srr_reject_code_expl = srr_explan; 1706 nack->u.isp24.vp_index = ntfy->u.isp24.vp_index; 1707 1708 ql_dbg(ql_dbg_tgt, vha, 0xe005, 1709 "qla_target(%d): Sending 24xx Notify Ack %d\n", 1710 vha->vp_idx, nack->u.isp24.status); 1711 1712 /* Memory Barrier */ 1713 wmb(); 1714 qla2x00_start_iocbs(vha, qpair->req); 1715 } 1716 1717 /* 1718 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire 1719 */ 1720 static void qlt_24xx_send_abts_resp(struct qla_qpair *qpair, 1721 struct abts_recv_from_24xx *abts, uint32_t status, 1722 bool ids_reversed) 1723 { 1724 struct scsi_qla_host *vha = qpair->vha; 1725 struct qla_hw_data *ha = vha->hw; 1726 struct abts_resp_to_24xx *resp; 1727 uint32_t f_ctl; 1728 uint8_t *p; 1729 1730 ql_dbg(ql_dbg_tgt, vha, 0xe006, 1731 "Sending task mgmt ABTS response (ha=%p, atio=%p, status=%x\n", 1732 ha, abts, status); 1733 1734 resp = (struct abts_resp_to_24xx *)qla2x00_alloc_iocbs_ready(qpair, 1735 NULL); 1736 if (!resp) { 1737 ql_dbg(ql_dbg_tgt, vha, 0xe04a, 1738 "qla_target(%d): %s failed: unable to allocate " 1739 "request packet", vha->vp_idx, __func__); 1740 return; 1741 } 1742 1743 resp->entry_type = ABTS_RESP_24XX; 1744 resp->entry_count = 1; 1745 resp->nport_handle = abts->nport_handle; 1746 resp->vp_index = vha->vp_idx; 1747 resp->sof_type = abts->sof_type; 1748 resp->exchange_address = abts->exchange_address; 1749 resp->fcp_hdr_le = abts->fcp_hdr_le; 1750 f_ctl = cpu_to_le32(F_CTL_EXCH_CONTEXT_RESP | 1751 F_CTL_LAST_SEQ | F_CTL_END_SEQ | 1752 F_CTL_SEQ_INITIATIVE); 1753 p = (uint8_t *)&f_ctl; 1754 resp->fcp_hdr_le.f_ctl[0] = *p++; 1755 resp->fcp_hdr_le.f_ctl[1] = *p++; 1756 resp->fcp_hdr_le.f_ctl[2] = *p; 1757 if (ids_reversed) { 1758 resp->fcp_hdr_le.d_id[0] = abts->fcp_hdr_le.d_id[0]; 1759 resp->fcp_hdr_le.d_id[1] = abts->fcp_hdr_le.d_id[1]; 1760 resp->fcp_hdr_le.d_id[2] = abts->fcp_hdr_le.d_id[2]; 1761 resp->fcp_hdr_le.s_id[0] = abts->fcp_hdr_le.s_id[0]; 1762 resp->fcp_hdr_le.s_id[1] = abts->fcp_hdr_le.s_id[1]; 1763 resp->fcp_hdr_le.s_id[2] = abts->fcp_hdr_le.s_id[2]; 1764 } else { 1765 resp->fcp_hdr_le.d_id[0] = abts->fcp_hdr_le.s_id[0]; 1766 resp->fcp_hdr_le.d_id[1] = abts->fcp_hdr_le.s_id[1]; 1767 resp->fcp_hdr_le.d_id[2] = abts->fcp_hdr_le.s_id[2]; 1768 resp->fcp_hdr_le.s_id[0] = abts->fcp_hdr_le.d_id[0]; 1769 resp->fcp_hdr_le.s_id[1] = abts->fcp_hdr_le.d_id[1]; 1770 resp->fcp_hdr_le.s_id[2] = abts->fcp_hdr_le.d_id[2]; 1771 } 1772 resp->exchange_addr_to_abort = abts->exchange_addr_to_abort; 1773 if (status == FCP_TMF_CMPL) { 1774 resp->fcp_hdr_le.r_ctl = R_CTL_BASIC_LINK_SERV | R_CTL_B_ACC; 1775 resp->payload.ba_acct.seq_id_valid = SEQ_ID_INVALID; 1776 resp->payload.ba_acct.low_seq_cnt = 0x0000; 1777 resp->payload.ba_acct.high_seq_cnt = 0xFFFF; 1778 resp->payload.ba_acct.ox_id = abts->fcp_hdr_le.ox_id; 1779 resp->payload.ba_acct.rx_id = abts->fcp_hdr_le.rx_id; 1780 } else { 1781 resp->fcp_hdr_le.r_ctl = R_CTL_BASIC_LINK_SERV | R_CTL_B_RJT; 1782 resp->payload.ba_rjt.reason_code = 1783 BA_RJT_REASON_CODE_UNABLE_TO_PERFORM; 1784 /* Other bytes are zero */ 1785 } 1786 1787 vha->vha_tgt.qla_tgt->abts_resp_expected++; 1788 1789 /* Memory Barrier */ 1790 wmb(); 1791 if (qpair->reqq_start_iocbs) 1792 qpair->reqq_start_iocbs(qpair); 1793 else 1794 qla2x00_start_iocbs(vha, qpair->req); 1795 } 1796 1797 /* 1798 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire 1799 */ 1800 static void qlt_24xx_retry_term_exchange(struct scsi_qla_host *vha, 1801 struct abts_resp_from_24xx_fw *entry) 1802 { 1803 struct ctio7_to_24xx *ctio; 1804 1805 ql_dbg(ql_dbg_tgt, vha, 0xe007, 1806 "Sending retry TERM EXCH CTIO7 (ha=%p)\n", vha->hw); 1807 1808 ctio = (struct ctio7_to_24xx *)qla2x00_alloc_iocbs_ready( 1809 vha->hw->base_qpair, NULL); 1810 if (ctio == NULL) { 1811 ql_dbg(ql_dbg_tgt, vha, 0xe04b, 1812 "qla_target(%d): %s failed: unable to allocate " 1813 "request packet\n", vha->vp_idx, __func__); 1814 return; 1815 } 1816 1817 /* 1818 * We've got on entrance firmware's response on by us generated 1819 * ABTS response. So, in it ID fields are reversed. 1820 */ 1821 1822 ctio->entry_type = CTIO_TYPE7; 1823 ctio->entry_count = 1; 1824 ctio->nport_handle = entry->nport_handle; 1825 ctio->handle = QLA_TGT_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK; 1826 ctio->timeout = cpu_to_le16(QLA_TGT_TIMEOUT); 1827 ctio->vp_index = vha->vp_idx; 1828 ctio->initiator_id[0] = entry->fcp_hdr_le.d_id[0]; 1829 ctio->initiator_id[1] = entry->fcp_hdr_le.d_id[1]; 1830 ctio->initiator_id[2] = entry->fcp_hdr_le.d_id[2]; 1831 ctio->exchange_addr = entry->exchange_addr_to_abort; 1832 ctio->u.status1.flags = cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1 | 1833 CTIO7_FLAGS_TERMINATE); 1834 ctio->u.status1.ox_id = cpu_to_le16(entry->fcp_hdr_le.ox_id); 1835 1836 /* Memory Barrier */ 1837 wmb(); 1838 qla2x00_start_iocbs(vha, vha->req); 1839 1840 qlt_24xx_send_abts_resp(vha->hw->base_qpair, 1841 (struct abts_recv_from_24xx *)entry, 1842 FCP_TMF_CMPL, true); 1843 } 1844 1845 static int abort_cmd_for_tag(struct scsi_qla_host *vha, uint32_t tag) 1846 { 1847 struct qla_tgt_sess_op *op; 1848 struct qla_tgt_cmd *cmd; 1849 unsigned long flags; 1850 1851 spin_lock_irqsave(&vha->cmd_list_lock, flags); 1852 list_for_each_entry(op, &vha->qla_sess_op_cmd_list, cmd_list) { 1853 if (tag == op->atio.u.isp24.exchange_addr) { 1854 op->aborted = true; 1855 spin_unlock_irqrestore(&vha->cmd_list_lock, flags); 1856 return 1; 1857 } 1858 } 1859 1860 list_for_each_entry(op, &vha->unknown_atio_list, cmd_list) { 1861 if (tag == op->atio.u.isp24.exchange_addr) { 1862 op->aborted = true; 1863 spin_unlock_irqrestore(&vha->cmd_list_lock, flags); 1864 return 1; 1865 } 1866 } 1867 1868 list_for_each_entry(cmd, &vha->qla_cmd_list, cmd_list) { 1869 if (tag == cmd->atio.u.isp24.exchange_addr) { 1870 cmd->aborted = 1; 1871 spin_unlock_irqrestore(&vha->cmd_list_lock, flags); 1872 return 1; 1873 } 1874 } 1875 spin_unlock_irqrestore(&vha->cmd_list_lock, flags); 1876 1877 return 0; 1878 } 1879 1880 /* drop cmds for the given lun 1881 * XXX only looks for cmds on the port through which lun reset was recieved 1882 * XXX does not go through the list of other port (which may have cmds 1883 * for the same lun) 1884 */ 1885 static void abort_cmds_for_lun(struct scsi_qla_host *vha, 1886 u64 lun, uint8_t *s_id) 1887 { 1888 struct qla_tgt_sess_op *op; 1889 struct qla_tgt_cmd *cmd; 1890 uint32_t key; 1891 unsigned long flags; 1892 1893 key = sid_to_key(s_id); 1894 spin_lock_irqsave(&vha->cmd_list_lock, flags); 1895 list_for_each_entry(op, &vha->qla_sess_op_cmd_list, cmd_list) { 1896 uint32_t op_key; 1897 u64 op_lun; 1898 1899 op_key = sid_to_key(op->atio.u.isp24.fcp_hdr.s_id); 1900 op_lun = scsilun_to_int( 1901 (struct scsi_lun *)&op->atio.u.isp24.fcp_cmnd.lun); 1902 if (op_key == key && op_lun == lun) 1903 op->aborted = true; 1904 } 1905 1906 list_for_each_entry(op, &vha->unknown_atio_list, cmd_list) { 1907 uint32_t op_key; 1908 u64 op_lun; 1909 1910 op_key = sid_to_key(op->atio.u.isp24.fcp_hdr.s_id); 1911 op_lun = scsilun_to_int( 1912 (struct scsi_lun *)&op->atio.u.isp24.fcp_cmnd.lun); 1913 if (op_key == key && op_lun == lun) 1914 op->aborted = true; 1915 } 1916 1917 list_for_each_entry(cmd, &vha->qla_cmd_list, cmd_list) { 1918 uint32_t cmd_key; 1919 u64 cmd_lun; 1920 1921 cmd_key = sid_to_key(cmd->atio.u.isp24.fcp_hdr.s_id); 1922 cmd_lun = scsilun_to_int( 1923 (struct scsi_lun *)&cmd->atio.u.isp24.fcp_cmnd.lun); 1924 if (cmd_key == key && cmd_lun == lun) 1925 cmd->aborted = 1; 1926 } 1927 spin_unlock_irqrestore(&vha->cmd_list_lock, flags); 1928 } 1929 1930 /* ha->hardware_lock supposed to be held on entry */ 1931 static int __qlt_24xx_handle_abts(struct scsi_qla_host *vha, 1932 struct abts_recv_from_24xx *abts, struct fc_port *sess) 1933 { 1934 struct qla_hw_data *ha = vha->hw; 1935 struct qla_tgt_mgmt_cmd *mcmd; 1936 int rc; 1937 1938 if (abort_cmd_for_tag(vha, abts->exchange_addr_to_abort)) { 1939 /* send TASK_ABORT response immediately */ 1940 qlt_24xx_send_abts_resp(ha->base_qpair, abts, FCP_TMF_CMPL, false); 1941 return 0; 1942 } 1943 1944 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00f, 1945 "qla_target(%d): task abort (tag=%d)\n", 1946 vha->vp_idx, abts->exchange_addr_to_abort); 1947 1948 mcmd = mempool_alloc(qla_tgt_mgmt_cmd_mempool, GFP_ATOMIC); 1949 if (mcmd == NULL) { 1950 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf051, 1951 "qla_target(%d): %s: Allocation of ABORT cmd failed", 1952 vha->vp_idx, __func__); 1953 return -ENOMEM; 1954 } 1955 memset(mcmd, 0, sizeof(*mcmd)); 1956 1957 mcmd->sess = sess; 1958 memcpy(&mcmd->orig_iocb.abts, abts, sizeof(mcmd->orig_iocb.abts)); 1959 mcmd->reset_count = ha->base_qpair->chip_reset; 1960 mcmd->tmr_func = QLA_TGT_ABTS; 1961 mcmd->qpair = ha->base_qpair; 1962 mcmd->vha = vha; 1963 1964 /* 1965 * LUN is looked up by target-core internally based on the passed 1966 * abts->exchange_addr_to_abort tag. 1967 */ 1968 rc = ha->tgt.tgt_ops->handle_tmr(mcmd, 0, mcmd->tmr_func, 1969 abts->exchange_addr_to_abort); 1970 if (rc != 0) { 1971 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf052, 1972 "qla_target(%d): tgt_ops->handle_tmr()" 1973 " failed: %d", vha->vp_idx, rc); 1974 mempool_free(mcmd, qla_tgt_mgmt_cmd_mempool); 1975 return -EFAULT; 1976 } 1977 1978 return 0; 1979 } 1980 1981 /* 1982 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire 1983 */ 1984 static void qlt_24xx_handle_abts(struct scsi_qla_host *vha, 1985 struct abts_recv_from_24xx *abts) 1986 { 1987 struct qla_hw_data *ha = vha->hw; 1988 struct fc_port *sess; 1989 uint32_t tag = abts->exchange_addr_to_abort; 1990 uint8_t s_id[3]; 1991 int rc; 1992 unsigned long flags; 1993 1994 if (le32_to_cpu(abts->fcp_hdr_le.parameter) & ABTS_PARAM_ABORT_SEQ) { 1995 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf053, 1996 "qla_target(%d): ABTS: Abort Sequence not " 1997 "supported\n", vha->vp_idx); 1998 qlt_24xx_send_abts_resp(ha->base_qpair, abts, FCP_TMF_REJECTED, 1999 false); 2000 return; 2001 } 2002 2003 if (tag == ATIO_EXCHANGE_ADDRESS_UNKNOWN) { 2004 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf010, 2005 "qla_target(%d): ABTS: Unknown Exchange " 2006 "Address received\n", vha->vp_idx); 2007 qlt_24xx_send_abts_resp(ha->base_qpair, abts, FCP_TMF_REJECTED, 2008 false); 2009 return; 2010 } 2011 2012 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf011, 2013 "qla_target(%d): task abort (s_id=%x:%x:%x, " 2014 "tag=%d, param=%x)\n", vha->vp_idx, abts->fcp_hdr_le.s_id[2], 2015 abts->fcp_hdr_le.s_id[1], abts->fcp_hdr_le.s_id[0], tag, 2016 le32_to_cpu(abts->fcp_hdr_le.parameter)); 2017 2018 s_id[0] = abts->fcp_hdr_le.s_id[2]; 2019 s_id[1] = abts->fcp_hdr_le.s_id[1]; 2020 s_id[2] = abts->fcp_hdr_le.s_id[0]; 2021 2022 spin_lock_irqsave(&ha->tgt.sess_lock, flags); 2023 sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, s_id); 2024 if (!sess) { 2025 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf012, 2026 "qla_target(%d): task abort for non-existant session\n", 2027 vha->vp_idx); 2028 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); 2029 2030 qlt_24xx_send_abts_resp(ha->base_qpair, abts, FCP_TMF_REJECTED, 2031 false); 2032 return; 2033 } 2034 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); 2035 2036 2037 if (sess->deleted) { 2038 qlt_24xx_send_abts_resp(ha->base_qpair, abts, FCP_TMF_REJECTED, 2039 false); 2040 return; 2041 } 2042 2043 rc = __qlt_24xx_handle_abts(vha, abts, sess); 2044 if (rc != 0) { 2045 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf054, 2046 "qla_target(%d): __qlt_24xx_handle_abts() failed: %d\n", 2047 vha->vp_idx, rc); 2048 qlt_24xx_send_abts_resp(ha->base_qpair, abts, FCP_TMF_REJECTED, 2049 false); 2050 return; 2051 } 2052 } 2053 2054 /* 2055 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire 2056 */ 2057 static void qlt_24xx_send_task_mgmt_ctio(struct qla_qpair *qpair, 2058 struct qla_tgt_mgmt_cmd *mcmd, uint32_t resp_code) 2059 { 2060 struct scsi_qla_host *ha = mcmd->vha; 2061 struct atio_from_isp *atio = &mcmd->orig_iocb.atio; 2062 struct ctio7_to_24xx *ctio; 2063 uint16_t temp; 2064 2065 ql_dbg(ql_dbg_tgt, ha, 0xe008, 2066 "Sending task mgmt CTIO7 (ha=%p, atio=%p, resp_code=%x\n", 2067 ha, atio, resp_code); 2068 2069 2070 ctio = (struct ctio7_to_24xx *)__qla2x00_alloc_iocbs(qpair, NULL); 2071 if (ctio == NULL) { 2072 ql_dbg(ql_dbg_tgt, ha, 0xe04c, 2073 "qla_target(%d): %s failed: unable to allocate " 2074 "request packet\n", ha->vp_idx, __func__); 2075 return; 2076 } 2077 2078 ctio->entry_type = CTIO_TYPE7; 2079 ctio->entry_count = 1; 2080 ctio->handle = QLA_TGT_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK; 2081 ctio->nport_handle = mcmd->sess->loop_id; 2082 ctio->timeout = cpu_to_le16(QLA_TGT_TIMEOUT); 2083 ctio->vp_index = ha->vp_idx; 2084 ctio->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2]; 2085 ctio->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1]; 2086 ctio->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0]; 2087 ctio->exchange_addr = atio->u.isp24.exchange_addr; 2088 temp = (atio->u.isp24.attr << 9)| 2089 CTIO7_FLAGS_STATUS_MODE_1 | CTIO7_FLAGS_SEND_STATUS; 2090 ctio->u.status1.flags = cpu_to_le16(temp); 2091 temp = be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id); 2092 ctio->u.status1.ox_id = cpu_to_le16(temp); 2093 ctio->u.status1.scsi_status = 2094 cpu_to_le16(SS_RESPONSE_INFO_LEN_VALID); 2095 ctio->u.status1.response_len = cpu_to_le16(8); 2096 ctio->u.status1.sense_data[0] = resp_code; 2097 2098 /* Memory Barrier */ 2099 wmb(); 2100 if (qpair->reqq_start_iocbs) 2101 qpair->reqq_start_iocbs(qpair); 2102 else 2103 qla2x00_start_iocbs(ha, qpair->req); 2104 } 2105 2106 void qlt_free_mcmd(struct qla_tgt_mgmt_cmd *mcmd) 2107 { 2108 mempool_free(mcmd, qla_tgt_mgmt_cmd_mempool); 2109 } 2110 EXPORT_SYMBOL(qlt_free_mcmd); 2111 2112 /* 2113 * ha->hardware_lock supposed to be held on entry. Might drop it, then 2114 * reacquire 2115 */ 2116 void qlt_send_resp_ctio(struct qla_qpair *qpair, struct qla_tgt_cmd *cmd, 2117 uint8_t scsi_status, uint8_t sense_key, uint8_t asc, uint8_t ascq) 2118 { 2119 struct atio_from_isp *atio = &cmd->atio; 2120 struct ctio7_to_24xx *ctio; 2121 uint16_t temp; 2122 struct scsi_qla_host *vha = cmd->vha; 2123 2124 ql_dbg(ql_dbg_tgt_dif, vha, 0x3066, 2125 "Sending response CTIO7 (vha=%p, atio=%p, scsi_status=%02x, " 2126 "sense_key=%02x, asc=%02x, ascq=%02x", 2127 vha, atio, scsi_status, sense_key, asc, ascq); 2128 2129 ctio = (struct ctio7_to_24xx *)qla2x00_alloc_iocbs(vha, NULL); 2130 if (!ctio) { 2131 ql_dbg(ql_dbg_async, vha, 0x3067, 2132 "qla2x00t(%ld): %s failed: unable to allocate request packet", 2133 vha->host_no, __func__); 2134 goto out; 2135 } 2136 2137 ctio->entry_type = CTIO_TYPE7; 2138 ctio->entry_count = 1; 2139 ctio->handle = QLA_TGT_SKIP_HANDLE; 2140 ctio->nport_handle = cmd->sess->loop_id; 2141 ctio->timeout = cpu_to_le16(QLA_TGT_TIMEOUT); 2142 ctio->vp_index = vha->vp_idx; 2143 ctio->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2]; 2144 ctio->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1]; 2145 ctio->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0]; 2146 ctio->exchange_addr = atio->u.isp24.exchange_addr; 2147 temp = (atio->u.isp24.attr << 9) | 2148 CTIO7_FLAGS_STATUS_MODE_1 | CTIO7_FLAGS_SEND_STATUS; 2149 ctio->u.status1.flags = cpu_to_le16(temp); 2150 temp = be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id); 2151 ctio->u.status1.ox_id = cpu_to_le16(temp); 2152 ctio->u.status1.scsi_status = 2153 cpu_to_le16(SS_RESPONSE_INFO_LEN_VALID | scsi_status); 2154 ctio->u.status1.response_len = cpu_to_le16(18); 2155 ctio->u.status1.residual = cpu_to_le32(get_datalen_for_atio(atio)); 2156 2157 if (ctio->u.status1.residual != 0) 2158 ctio->u.status1.scsi_status |= 2159 cpu_to_le16(SS_RESIDUAL_UNDER); 2160 2161 /* Response code and sense key */ 2162 put_unaligned_le32(((0x70 << 24) | (sense_key << 8)), 2163 (&ctio->u.status1.sense_data)[0]); 2164 /* Additional sense length */ 2165 put_unaligned_le32(0x0a, (&ctio->u.status1.sense_data)[1]); 2166 /* ASC and ASCQ */ 2167 put_unaligned_le32(((asc << 24) | (ascq << 16)), 2168 (&ctio->u.status1.sense_data)[3]); 2169 2170 /* Memory Barrier */ 2171 wmb(); 2172 2173 if (qpair->reqq_start_iocbs) 2174 qpair->reqq_start_iocbs(qpair); 2175 else 2176 qla2x00_start_iocbs(vha, qpair->req); 2177 2178 out: 2179 return; 2180 } 2181 2182 /* callback from target fabric module code */ 2183 void qlt_xmit_tm_rsp(struct qla_tgt_mgmt_cmd *mcmd) 2184 { 2185 struct scsi_qla_host *vha = mcmd->sess->vha; 2186 struct qla_hw_data *ha = vha->hw; 2187 unsigned long flags; 2188 struct qla_qpair *qpair = mcmd->qpair; 2189 2190 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf013, 2191 "TM response mcmd (%p) status %#x state %#x", 2192 mcmd, mcmd->fc_tm_rsp, mcmd->flags); 2193 2194 spin_lock_irqsave(qpair->qp_lock_ptr, flags); 2195 2196 if (!vha->flags.online || mcmd->reset_count != qpair->chip_reset) { 2197 /* 2198 * Either the port is not online or this request was from 2199 * previous life, just abort the processing. 2200 */ 2201 ql_dbg(ql_dbg_async, vha, 0xe100, 2202 "RESET-TMR online/active/old-count/new-count = %d/%d/%d/%d.\n", 2203 vha->flags.online, qla2x00_reset_active(vha), 2204 mcmd->reset_count, qpair->chip_reset); 2205 ha->tgt.tgt_ops->free_mcmd(mcmd); 2206 spin_unlock_irqrestore(qpair->qp_lock_ptr, flags); 2207 return; 2208 } 2209 2210 if (mcmd->flags == QLA24XX_MGMT_SEND_NACK) { 2211 if (mcmd->orig_iocb.imm_ntfy.u.isp24.status_subcode == 2212 ELS_LOGO || 2213 mcmd->orig_iocb.imm_ntfy.u.isp24.status_subcode == 2214 ELS_PRLO || 2215 mcmd->orig_iocb.imm_ntfy.u.isp24.status_subcode == 2216 ELS_TPRLO) { 2217 ql_dbg(ql_dbg_disc, vha, 0x2106, 2218 "TM response logo %phC status %#x state %#x", 2219 mcmd->sess->port_name, mcmd->fc_tm_rsp, 2220 mcmd->flags); 2221 qlt_schedule_sess_for_deletion(mcmd->sess); 2222 } else { 2223 qlt_send_notify_ack(vha->hw->base_qpair, 2224 &mcmd->orig_iocb.imm_ntfy, 0, 0, 0, 0, 0, 0); 2225 } 2226 } else { 2227 if (mcmd->orig_iocb.atio.u.raw.entry_type == ABTS_RECV_24XX) 2228 qlt_24xx_send_abts_resp(qpair, &mcmd->orig_iocb.abts, 2229 mcmd->fc_tm_rsp, false); 2230 else 2231 qlt_24xx_send_task_mgmt_ctio(qpair, mcmd, 2232 mcmd->fc_tm_rsp); 2233 } 2234 /* 2235 * Make the callback for ->free_mcmd() to queue_work() and invoke 2236 * target_put_sess_cmd() to drop cmd_kref to 1. The final 2237 * target_put_sess_cmd() call will be made from TFO->check_stop_free() 2238 * -> tcm_qla2xxx_check_stop_free() to release the TMR associated se_cmd 2239 * descriptor after TFO->queue_tm_rsp() -> tcm_qla2xxx_queue_tm_rsp() -> 2240 * qlt_xmit_tm_rsp() returns here.. 2241 */ 2242 ha->tgt.tgt_ops->free_mcmd(mcmd); 2243 spin_unlock_irqrestore(qpair->qp_lock_ptr, flags); 2244 } 2245 EXPORT_SYMBOL(qlt_xmit_tm_rsp); 2246 2247 /* No locks */ 2248 static int qlt_pci_map_calc_cnt(struct qla_tgt_prm *prm) 2249 { 2250 struct qla_tgt_cmd *cmd = prm->cmd; 2251 2252 BUG_ON(cmd->sg_cnt == 0); 2253 2254 prm->sg = (struct scatterlist *)cmd->sg; 2255 prm->seg_cnt = pci_map_sg(cmd->qpair->pdev, cmd->sg, 2256 cmd->sg_cnt, cmd->dma_data_direction); 2257 if (unlikely(prm->seg_cnt == 0)) 2258 goto out_err; 2259 2260 prm->cmd->sg_mapped = 1; 2261 2262 if (cmd->se_cmd.prot_op == TARGET_PROT_NORMAL) { 2263 /* 2264 * If greater than four sg entries then we need to allocate 2265 * the continuation entries 2266 */ 2267 if (prm->seg_cnt > QLA_TGT_DATASEGS_PER_CMD_24XX) 2268 prm->req_cnt += DIV_ROUND_UP(prm->seg_cnt - 2269 QLA_TGT_DATASEGS_PER_CMD_24XX, 2270 QLA_TGT_DATASEGS_PER_CONT_24XX); 2271 } else { 2272 /* DIF */ 2273 if ((cmd->se_cmd.prot_op == TARGET_PROT_DIN_INSERT) || 2274 (cmd->se_cmd.prot_op == TARGET_PROT_DOUT_STRIP)) { 2275 prm->seg_cnt = DIV_ROUND_UP(cmd->bufflen, cmd->blk_sz); 2276 prm->tot_dsds = prm->seg_cnt; 2277 } else 2278 prm->tot_dsds = prm->seg_cnt; 2279 2280 if (cmd->prot_sg_cnt) { 2281 prm->prot_sg = cmd->prot_sg; 2282 prm->prot_seg_cnt = pci_map_sg(cmd->qpair->pdev, 2283 cmd->prot_sg, cmd->prot_sg_cnt, 2284 cmd->dma_data_direction); 2285 if (unlikely(prm->prot_seg_cnt == 0)) 2286 goto out_err; 2287 2288 if ((cmd->se_cmd.prot_op == TARGET_PROT_DIN_INSERT) || 2289 (cmd->se_cmd.prot_op == TARGET_PROT_DOUT_STRIP)) { 2290 /* Dif Bundling not support here */ 2291 prm->prot_seg_cnt = DIV_ROUND_UP(cmd->bufflen, 2292 cmd->blk_sz); 2293 prm->tot_dsds += prm->prot_seg_cnt; 2294 } else 2295 prm->tot_dsds += prm->prot_seg_cnt; 2296 } 2297 } 2298 2299 return 0; 2300 2301 out_err: 2302 ql_dbg_qp(ql_dbg_tgt, prm->cmd->qpair, 0xe04d, 2303 "qla_target(%d): PCI mapping failed: sg_cnt=%d", 2304 0, prm->cmd->sg_cnt); 2305 return -1; 2306 } 2307 2308 static void qlt_unmap_sg(struct scsi_qla_host *vha, struct qla_tgt_cmd *cmd) 2309 { 2310 struct qla_hw_data *ha; 2311 struct qla_qpair *qpair; 2312 if (!cmd->sg_mapped) 2313 return; 2314 2315 qpair = cmd->qpair; 2316 2317 pci_unmap_sg(qpair->pdev, cmd->sg, cmd->sg_cnt, 2318 cmd->dma_data_direction); 2319 cmd->sg_mapped = 0; 2320 2321 if (cmd->prot_sg_cnt) 2322 pci_unmap_sg(qpair->pdev, cmd->prot_sg, cmd->prot_sg_cnt, 2323 cmd->dma_data_direction); 2324 2325 if (!cmd->ctx) 2326 return; 2327 ha = vha->hw; 2328 if (cmd->ctx_dsd_alloced) 2329 qla2x00_clean_dsd_pool(ha, cmd->ctx); 2330 2331 dma_pool_free(ha->dl_dma_pool, cmd->ctx, cmd->ctx->crc_ctx_dma); 2332 } 2333 2334 static int qlt_check_reserve_free_req(struct qla_qpair *qpair, 2335 uint32_t req_cnt) 2336 { 2337 uint32_t cnt; 2338 struct req_que *req = qpair->req; 2339 2340 if (req->cnt < (req_cnt + 2)) { 2341 cnt = (uint16_t)(qpair->use_shadow_reg ? *req->out_ptr : 2342 RD_REG_DWORD_RELAXED(req->req_q_out)); 2343 2344 if (req->ring_index < cnt) 2345 req->cnt = cnt - req->ring_index; 2346 else 2347 req->cnt = req->length - (req->ring_index - cnt); 2348 2349 if (unlikely(req->cnt < (req_cnt + 2))) 2350 return -EAGAIN; 2351 } 2352 2353 req->cnt -= req_cnt; 2354 2355 return 0; 2356 } 2357 2358 /* 2359 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire 2360 */ 2361 static inline void *qlt_get_req_pkt(struct req_que *req) 2362 { 2363 /* Adjust ring index. */ 2364 req->ring_index++; 2365 if (req->ring_index == req->length) { 2366 req->ring_index = 0; 2367 req->ring_ptr = req->ring; 2368 } else { 2369 req->ring_ptr++; 2370 } 2371 return (cont_entry_t *)req->ring_ptr; 2372 } 2373 2374 /* ha->hardware_lock supposed to be held on entry */ 2375 static inline uint32_t qlt_make_handle(struct qla_qpair *qpair) 2376 { 2377 uint32_t h; 2378 int index; 2379 uint8_t found = 0; 2380 struct req_que *req = qpair->req; 2381 2382 h = req->current_outstanding_cmd; 2383 2384 for (index = 1; index < req->num_outstanding_cmds; index++) { 2385 h++; 2386 if (h == req->num_outstanding_cmds) 2387 h = 1; 2388 2389 if (h == QLA_TGT_SKIP_HANDLE) 2390 continue; 2391 2392 if (!req->outstanding_cmds[h]) { 2393 found = 1; 2394 break; 2395 } 2396 } 2397 2398 if (found) { 2399 req->current_outstanding_cmd = h; 2400 } else { 2401 ql_dbg(ql_dbg_io, qpair->vha, 0x305b, 2402 "qla_target(%d): Ran out of empty cmd slots\n", 2403 qpair->vha->vp_idx); 2404 h = QLA_TGT_NULL_HANDLE; 2405 } 2406 2407 return h; 2408 } 2409 2410 /* ha->hardware_lock supposed to be held on entry */ 2411 static int qlt_24xx_build_ctio_pkt(struct qla_qpair *qpair, 2412 struct qla_tgt_prm *prm) 2413 { 2414 uint32_t h; 2415 struct ctio7_to_24xx *pkt; 2416 struct atio_from_isp *atio = &prm->cmd->atio; 2417 uint16_t temp; 2418 2419 pkt = (struct ctio7_to_24xx *)qpair->req->ring_ptr; 2420 prm->pkt = pkt; 2421 memset(pkt, 0, sizeof(*pkt)); 2422 2423 pkt->entry_type = CTIO_TYPE7; 2424 pkt->entry_count = (uint8_t)prm->req_cnt; 2425 pkt->vp_index = prm->cmd->vp_idx; 2426 2427 h = qlt_make_handle(qpair); 2428 if (unlikely(h == QLA_TGT_NULL_HANDLE)) { 2429 /* 2430 * CTIO type 7 from the firmware doesn't provide a way to 2431 * know the initiator's LOOP ID, hence we can't find 2432 * the session and, so, the command. 2433 */ 2434 return -EAGAIN; 2435 } else 2436 qpair->req->outstanding_cmds[h] = (srb_t *)prm->cmd; 2437 2438 pkt->handle = MAKE_HANDLE(qpair->req->id, h); 2439 pkt->handle |= CTIO_COMPLETION_HANDLE_MARK; 2440 pkt->nport_handle = cpu_to_le16(prm->cmd->loop_id); 2441 pkt->timeout = cpu_to_le16(QLA_TGT_TIMEOUT); 2442 pkt->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2]; 2443 pkt->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1]; 2444 pkt->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0]; 2445 pkt->exchange_addr = atio->u.isp24.exchange_addr; 2446 temp = atio->u.isp24.attr << 9; 2447 pkt->u.status0.flags |= cpu_to_le16(temp); 2448 temp = be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id); 2449 pkt->u.status0.ox_id = cpu_to_le16(temp); 2450 pkt->u.status0.relative_offset = cpu_to_le32(prm->cmd->offset); 2451 2452 return 0; 2453 } 2454 2455 /* 2456 * ha->hardware_lock supposed to be held on entry. We have already made sure 2457 * that there is sufficient amount of request entries to not drop it. 2458 */ 2459 static void qlt_load_cont_data_segments(struct qla_tgt_prm *prm) 2460 { 2461 int cnt; 2462 uint32_t *dword_ptr; 2463 2464 /* Build continuation packets */ 2465 while (prm->seg_cnt > 0) { 2466 cont_a64_entry_t *cont_pkt64 = 2467 (cont_a64_entry_t *)qlt_get_req_pkt( 2468 prm->cmd->qpair->req); 2469 2470 /* 2471 * Make sure that from cont_pkt64 none of 2472 * 64-bit specific fields used for 32-bit 2473 * addressing. Cast to (cont_entry_t *) for 2474 * that. 2475 */ 2476 2477 memset(cont_pkt64, 0, sizeof(*cont_pkt64)); 2478 2479 cont_pkt64->entry_count = 1; 2480 cont_pkt64->sys_define = 0; 2481 2482 cont_pkt64->entry_type = CONTINUE_A64_TYPE; 2483 dword_ptr = (uint32_t *)&cont_pkt64->dseg_0_address; 2484 2485 /* Load continuation entry data segments */ 2486 for (cnt = 0; 2487 cnt < QLA_TGT_DATASEGS_PER_CONT_24XX && prm->seg_cnt; 2488 cnt++, prm->seg_cnt--) { 2489 *dword_ptr++ = 2490 cpu_to_le32(pci_dma_lo32 2491 (sg_dma_address(prm->sg))); 2492 *dword_ptr++ = cpu_to_le32(pci_dma_hi32 2493 (sg_dma_address(prm->sg))); 2494 *dword_ptr++ = cpu_to_le32(sg_dma_len(prm->sg)); 2495 2496 prm->sg = sg_next(prm->sg); 2497 } 2498 } 2499 } 2500 2501 /* 2502 * ha->hardware_lock supposed to be held on entry. We have already made sure 2503 * that there is sufficient amount of request entries to not drop it. 2504 */ 2505 static void qlt_load_data_segments(struct qla_tgt_prm *prm) 2506 { 2507 int cnt; 2508 uint32_t *dword_ptr; 2509 struct ctio7_to_24xx *pkt24 = (struct ctio7_to_24xx *)prm->pkt; 2510 2511 pkt24->u.status0.transfer_length = cpu_to_le32(prm->cmd->bufflen); 2512 2513 /* Setup packet address segment pointer */ 2514 dword_ptr = pkt24->u.status0.dseg_0_address; 2515 2516 /* Set total data segment count */ 2517 if (prm->seg_cnt) 2518 pkt24->dseg_count = cpu_to_le16(prm->seg_cnt); 2519 2520 if (prm->seg_cnt == 0) { 2521 /* No data transfer */ 2522 *dword_ptr++ = 0; 2523 *dword_ptr = 0; 2524 return; 2525 } 2526 2527 /* If scatter gather */ 2528 2529 /* Load command entry data segments */ 2530 for (cnt = 0; 2531 (cnt < QLA_TGT_DATASEGS_PER_CMD_24XX) && prm->seg_cnt; 2532 cnt++, prm->seg_cnt--) { 2533 *dword_ptr++ = 2534 cpu_to_le32(pci_dma_lo32(sg_dma_address(prm->sg))); 2535 2536 *dword_ptr++ = cpu_to_le32(pci_dma_hi32( 2537 sg_dma_address(prm->sg))); 2538 2539 *dword_ptr++ = cpu_to_le32(sg_dma_len(prm->sg)); 2540 2541 prm->sg = sg_next(prm->sg); 2542 } 2543 2544 qlt_load_cont_data_segments(prm); 2545 } 2546 2547 static inline int qlt_has_data(struct qla_tgt_cmd *cmd) 2548 { 2549 return cmd->bufflen > 0; 2550 } 2551 2552 static void qlt_print_dif_err(struct qla_tgt_prm *prm) 2553 { 2554 struct qla_tgt_cmd *cmd; 2555 struct scsi_qla_host *vha; 2556 2557 /* asc 0x10=dif error */ 2558 if (prm->sense_buffer && (prm->sense_buffer[12] == 0x10)) { 2559 cmd = prm->cmd; 2560 vha = cmd->vha; 2561 /* ASCQ */ 2562 switch (prm->sense_buffer[13]) { 2563 case 1: 2564 ql_dbg(ql_dbg_tgt_dif, vha, 0xe00b, 2565 "BE detected Guard TAG ERR: lba[0x%llx|%lld] len[0x%x] " 2566 "se_cmd=%p tag[%x]", 2567 cmd->lba, cmd->lba, cmd->num_blks, &cmd->se_cmd, 2568 cmd->atio.u.isp24.exchange_addr); 2569 break; 2570 case 2: 2571 ql_dbg(ql_dbg_tgt_dif, vha, 0xe00c, 2572 "BE detected APP TAG ERR: lba[0x%llx|%lld] len[0x%x] " 2573 "se_cmd=%p tag[%x]", 2574 cmd->lba, cmd->lba, cmd->num_blks, &cmd->se_cmd, 2575 cmd->atio.u.isp24.exchange_addr); 2576 break; 2577 case 3: 2578 ql_dbg(ql_dbg_tgt_dif, vha, 0xe00f, 2579 "BE detected REF TAG ERR: lba[0x%llx|%lld] len[0x%x] " 2580 "se_cmd=%p tag[%x]", 2581 cmd->lba, cmd->lba, cmd->num_blks, &cmd->se_cmd, 2582 cmd->atio.u.isp24.exchange_addr); 2583 break; 2584 default: 2585 ql_dbg(ql_dbg_tgt_dif, vha, 0xe010, 2586 "BE detected Dif ERR: lba[%llx|%lld] len[%x] " 2587 "se_cmd=%p tag[%x]", 2588 cmd->lba, cmd->lba, cmd->num_blks, &cmd->se_cmd, 2589 cmd->atio.u.isp24.exchange_addr); 2590 break; 2591 } 2592 ql_dump_buffer(ql_dbg_tgt_dif, vha, 0xe011, cmd->cdb, 16); 2593 } 2594 } 2595 2596 /* 2597 * Called without ha->hardware_lock held 2598 */ 2599 static int qlt_pre_xmit_response(struct qla_tgt_cmd *cmd, 2600 struct qla_tgt_prm *prm, int xmit_type, uint8_t scsi_status, 2601 uint32_t *full_req_cnt) 2602 { 2603 struct se_cmd *se_cmd = &cmd->se_cmd; 2604 struct qla_qpair *qpair = cmd->qpair; 2605 2606 prm->cmd = cmd; 2607 prm->tgt = cmd->tgt; 2608 prm->pkt = NULL; 2609 prm->rq_result = scsi_status; 2610 prm->sense_buffer = &cmd->sense_buffer[0]; 2611 prm->sense_buffer_len = TRANSPORT_SENSE_BUFFER; 2612 prm->sg = NULL; 2613 prm->seg_cnt = -1; 2614 prm->req_cnt = 1; 2615 prm->residual = 0; 2616 prm->add_status_pkt = 0; 2617 prm->prot_sg = NULL; 2618 prm->prot_seg_cnt = 0; 2619 prm->tot_dsds = 0; 2620 2621 if ((xmit_type & QLA_TGT_XMIT_DATA) && qlt_has_data(cmd)) { 2622 if (qlt_pci_map_calc_cnt(prm) != 0) 2623 return -EAGAIN; 2624 } 2625 2626 *full_req_cnt = prm->req_cnt; 2627 2628 if (se_cmd->se_cmd_flags & SCF_UNDERFLOW_BIT) { 2629 prm->residual = se_cmd->residual_count; 2630 ql_dbg_qp(ql_dbg_io + ql_dbg_verbose, qpair, 0x305c, 2631 "Residual underflow: %d (tag %lld, op %x, bufflen %d, rq_result %x)\n", 2632 prm->residual, se_cmd->tag, 2633 se_cmd->t_task_cdb ? se_cmd->t_task_cdb[0] : 0, 2634 cmd->bufflen, prm->rq_result); 2635 prm->rq_result |= SS_RESIDUAL_UNDER; 2636 } else if (se_cmd->se_cmd_flags & SCF_OVERFLOW_BIT) { 2637 prm->residual = se_cmd->residual_count; 2638 ql_dbg_qp(ql_dbg_io, qpair, 0x305d, 2639 "Residual overflow: %d (tag %lld, op %x, bufflen %d, rq_result %x)\n", 2640 prm->residual, se_cmd->tag, se_cmd->t_task_cdb ? 2641 se_cmd->t_task_cdb[0] : 0, cmd->bufflen, prm->rq_result); 2642 prm->rq_result |= SS_RESIDUAL_OVER; 2643 } 2644 2645 if (xmit_type & QLA_TGT_XMIT_STATUS) { 2646 /* 2647 * If QLA_TGT_XMIT_DATA is not set, add_status_pkt will be 2648 * ignored in *xmit_response() below 2649 */ 2650 if (qlt_has_data(cmd)) { 2651 if (QLA_TGT_SENSE_VALID(prm->sense_buffer) || 2652 (IS_FWI2_CAPABLE(cmd->vha->hw) && 2653 (prm->rq_result != 0))) { 2654 prm->add_status_pkt = 1; 2655 (*full_req_cnt)++; 2656 } 2657 } 2658 } 2659 2660 return 0; 2661 } 2662 2663 static inline int qlt_need_explicit_conf(struct qla_tgt_cmd *cmd, 2664 int sending_sense) 2665 { 2666 if (cmd->qpair->enable_class_2) 2667 return 0; 2668 2669 if (sending_sense) 2670 return cmd->conf_compl_supported; 2671 else 2672 return cmd->qpair->enable_explicit_conf && 2673 cmd->conf_compl_supported; 2674 } 2675 2676 static void qlt_24xx_init_ctio_to_isp(struct ctio7_to_24xx *ctio, 2677 struct qla_tgt_prm *prm) 2678 { 2679 prm->sense_buffer_len = min_t(uint32_t, prm->sense_buffer_len, 2680 (uint32_t)sizeof(ctio->u.status1.sense_data)); 2681 ctio->u.status0.flags |= cpu_to_le16(CTIO7_FLAGS_SEND_STATUS); 2682 if (qlt_need_explicit_conf(prm->cmd, 0)) { 2683 ctio->u.status0.flags |= cpu_to_le16( 2684 CTIO7_FLAGS_EXPLICIT_CONFORM | 2685 CTIO7_FLAGS_CONFORM_REQ); 2686 } 2687 ctio->u.status0.residual = cpu_to_le32(prm->residual); 2688 ctio->u.status0.scsi_status = cpu_to_le16(prm->rq_result); 2689 if (QLA_TGT_SENSE_VALID(prm->sense_buffer)) { 2690 int i; 2691 2692 if (qlt_need_explicit_conf(prm->cmd, 1)) { 2693 if ((prm->rq_result & SS_SCSI_STATUS_BYTE) != 0) { 2694 ql_dbg_qp(ql_dbg_tgt, prm->cmd->qpair, 0xe017, 2695 "Skipping EXPLICIT_CONFORM and " 2696 "CTIO7_FLAGS_CONFORM_REQ for FCP READ w/ " 2697 "non GOOD status\n"); 2698 goto skip_explict_conf; 2699 } 2700 ctio->u.status1.flags |= cpu_to_le16( 2701 CTIO7_FLAGS_EXPLICIT_CONFORM | 2702 CTIO7_FLAGS_CONFORM_REQ); 2703 } 2704 skip_explict_conf: 2705 ctio->u.status1.flags &= 2706 ~cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_0); 2707 ctio->u.status1.flags |= 2708 cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1); 2709 ctio->u.status1.scsi_status |= 2710 cpu_to_le16(SS_SENSE_LEN_VALID); 2711 ctio->u.status1.sense_length = 2712 cpu_to_le16(prm->sense_buffer_len); 2713 for (i = 0; i < prm->sense_buffer_len/4; i++) 2714 ((uint32_t *)ctio->u.status1.sense_data)[i] = 2715 cpu_to_be32(((uint32_t *)prm->sense_buffer)[i]); 2716 2717 qlt_print_dif_err(prm); 2718 2719 } else { 2720 ctio->u.status1.flags &= 2721 ~cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_0); 2722 ctio->u.status1.flags |= 2723 cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1); 2724 ctio->u.status1.sense_length = 0; 2725 memset(ctio->u.status1.sense_data, 0, 2726 sizeof(ctio->u.status1.sense_data)); 2727 } 2728 2729 /* Sense with len > 24, is it possible ??? */ 2730 } 2731 2732 static inline int 2733 qlt_hba_err_chk_enabled(struct se_cmd *se_cmd) 2734 { 2735 switch (se_cmd->prot_op) { 2736 case TARGET_PROT_DOUT_INSERT: 2737 case TARGET_PROT_DIN_STRIP: 2738 if (ql2xenablehba_err_chk >= 1) 2739 return 1; 2740 break; 2741 case TARGET_PROT_DOUT_PASS: 2742 case TARGET_PROT_DIN_PASS: 2743 if (ql2xenablehba_err_chk >= 2) 2744 return 1; 2745 break; 2746 case TARGET_PROT_DIN_INSERT: 2747 case TARGET_PROT_DOUT_STRIP: 2748 return 1; 2749 default: 2750 break; 2751 } 2752 return 0; 2753 } 2754 2755 static inline int 2756 qla_tgt_ref_mask_check(struct se_cmd *se_cmd) 2757 { 2758 switch (se_cmd->prot_op) { 2759 case TARGET_PROT_DIN_INSERT: 2760 case TARGET_PROT_DOUT_INSERT: 2761 case TARGET_PROT_DIN_STRIP: 2762 case TARGET_PROT_DOUT_STRIP: 2763 case TARGET_PROT_DIN_PASS: 2764 case TARGET_PROT_DOUT_PASS: 2765 return 1; 2766 default: 2767 return 0; 2768 } 2769 return 0; 2770 } 2771 2772 /* 2773 * qla_tgt_set_dif_tags - Extract Ref and App tags from SCSI command 2774 */ 2775 static void 2776 qla_tgt_set_dif_tags(struct qla_tgt_cmd *cmd, struct crc_context *ctx, 2777 uint16_t *pfw_prot_opts) 2778 { 2779 struct se_cmd *se_cmd = &cmd->se_cmd; 2780 uint32_t lba = 0xffffffff & se_cmd->t_task_lba; 2781 scsi_qla_host_t *vha = cmd->tgt->vha; 2782 struct qla_hw_data *ha = vha->hw; 2783 uint32_t t32 = 0; 2784 2785 /* 2786 * wait till Mode Sense/Select cmd, modepage Ah, subpage 2 2787 * have been immplemented by TCM, before AppTag is avail. 2788 * Look for modesense_handlers[] 2789 */ 2790 ctx->app_tag = 0; 2791 ctx->app_tag_mask[0] = 0x0; 2792 ctx->app_tag_mask[1] = 0x0; 2793 2794 if (IS_PI_UNINIT_CAPABLE(ha)) { 2795 if ((se_cmd->prot_type == TARGET_DIF_TYPE1_PROT) || 2796 (se_cmd->prot_type == TARGET_DIF_TYPE2_PROT)) 2797 *pfw_prot_opts |= PO_DIS_VALD_APP_ESC; 2798 else if (se_cmd->prot_type == TARGET_DIF_TYPE3_PROT) 2799 *pfw_prot_opts |= PO_DIS_VALD_APP_REF_ESC; 2800 } 2801 2802 t32 = ha->tgt.tgt_ops->get_dif_tags(cmd, pfw_prot_opts); 2803 2804 switch (se_cmd->prot_type) { 2805 case TARGET_DIF_TYPE0_PROT: 2806 /* 2807 * No check for ql2xenablehba_err_chk, as it 2808 * would be an I/O error if hba tag generation 2809 * is not done. 2810 */ 2811 ctx->ref_tag = cpu_to_le32(lba); 2812 /* enable ALL bytes of the ref tag */ 2813 ctx->ref_tag_mask[0] = 0xff; 2814 ctx->ref_tag_mask[1] = 0xff; 2815 ctx->ref_tag_mask[2] = 0xff; 2816 ctx->ref_tag_mask[3] = 0xff; 2817 break; 2818 case TARGET_DIF_TYPE1_PROT: 2819 /* 2820 * For TYPE 1 protection: 16 bit GUARD tag, 32 bit 2821 * REF tag, and 16 bit app tag. 2822 */ 2823 ctx->ref_tag = cpu_to_le32(lba); 2824 if (!qla_tgt_ref_mask_check(se_cmd) || 2825 !(ha->tgt.tgt_ops->chk_dif_tags(t32))) { 2826 *pfw_prot_opts |= PO_DIS_REF_TAG_VALD; 2827 break; 2828 } 2829 /* enable ALL bytes of the ref tag */ 2830 ctx->ref_tag_mask[0] = 0xff; 2831 ctx->ref_tag_mask[1] = 0xff; 2832 ctx->ref_tag_mask[2] = 0xff; 2833 ctx->ref_tag_mask[3] = 0xff; 2834 break; 2835 case TARGET_DIF_TYPE2_PROT: 2836 /* 2837 * For TYPE 2 protection: 16 bit GUARD + 32 bit REF 2838 * tag has to match LBA in CDB + N 2839 */ 2840 ctx->ref_tag = cpu_to_le32(lba); 2841 if (!qla_tgt_ref_mask_check(se_cmd) || 2842 !(ha->tgt.tgt_ops->chk_dif_tags(t32))) { 2843 *pfw_prot_opts |= PO_DIS_REF_TAG_VALD; 2844 break; 2845 } 2846 /* enable ALL bytes of the ref tag */ 2847 ctx->ref_tag_mask[0] = 0xff; 2848 ctx->ref_tag_mask[1] = 0xff; 2849 ctx->ref_tag_mask[2] = 0xff; 2850 ctx->ref_tag_mask[3] = 0xff; 2851 break; 2852 case TARGET_DIF_TYPE3_PROT: 2853 /* For TYPE 3 protection: 16 bit GUARD only */ 2854 *pfw_prot_opts |= PO_DIS_REF_TAG_VALD; 2855 ctx->ref_tag_mask[0] = ctx->ref_tag_mask[1] = 2856 ctx->ref_tag_mask[2] = ctx->ref_tag_mask[3] = 0x00; 2857 break; 2858 } 2859 } 2860 2861 static inline int 2862 qlt_build_ctio_crc2_pkt(struct qla_qpair *qpair, struct qla_tgt_prm *prm) 2863 { 2864 uint32_t *cur_dsd; 2865 uint32_t transfer_length = 0; 2866 uint32_t data_bytes; 2867 uint32_t dif_bytes; 2868 uint8_t bundling = 1; 2869 uint8_t *clr_ptr; 2870 struct crc_context *crc_ctx_pkt = NULL; 2871 struct qla_hw_data *ha; 2872 struct ctio_crc2_to_fw *pkt; 2873 dma_addr_t crc_ctx_dma; 2874 uint16_t fw_prot_opts = 0; 2875 struct qla_tgt_cmd *cmd = prm->cmd; 2876 struct se_cmd *se_cmd = &cmd->se_cmd; 2877 uint32_t h; 2878 struct atio_from_isp *atio = &prm->cmd->atio; 2879 struct qla_tc_param tc; 2880 uint16_t t16; 2881 scsi_qla_host_t *vha = cmd->vha; 2882 2883 ha = vha->hw; 2884 2885 pkt = (struct ctio_crc2_to_fw *)qpair->req->ring_ptr; 2886 prm->pkt = pkt; 2887 memset(pkt, 0, sizeof(*pkt)); 2888 2889 ql_dbg_qp(ql_dbg_tgt, cmd->qpair, 0xe071, 2890 "qla_target(%d):%s: se_cmd[%p] CRC2 prot_op[0x%x] cmd prot sg:cnt[%p:%x] lba[%llu]\n", 2891 cmd->vp_idx, __func__, se_cmd, se_cmd->prot_op, 2892 prm->prot_sg, prm->prot_seg_cnt, se_cmd->t_task_lba); 2893 2894 if ((se_cmd->prot_op == TARGET_PROT_DIN_INSERT) || 2895 (se_cmd->prot_op == TARGET_PROT_DOUT_STRIP)) 2896 bundling = 0; 2897 2898 /* Compute dif len and adjust data len to incude protection */ 2899 data_bytes = cmd->bufflen; 2900 dif_bytes = (data_bytes / cmd->blk_sz) * 8; 2901 2902 switch (se_cmd->prot_op) { 2903 case TARGET_PROT_DIN_INSERT: 2904 case TARGET_PROT_DOUT_STRIP: 2905 transfer_length = data_bytes; 2906 if (cmd->prot_sg_cnt) 2907 data_bytes += dif_bytes; 2908 break; 2909 case TARGET_PROT_DIN_STRIP: 2910 case TARGET_PROT_DOUT_INSERT: 2911 case TARGET_PROT_DIN_PASS: 2912 case TARGET_PROT_DOUT_PASS: 2913 transfer_length = data_bytes + dif_bytes; 2914 break; 2915 default: 2916 BUG(); 2917 break; 2918 } 2919 2920 if (!qlt_hba_err_chk_enabled(se_cmd)) 2921 fw_prot_opts |= 0x10; /* Disable Guard tag checking */ 2922 /* HBA error checking enabled */ 2923 else if (IS_PI_UNINIT_CAPABLE(ha)) { 2924 if ((se_cmd->prot_type == TARGET_DIF_TYPE1_PROT) || 2925 (se_cmd->prot_type == TARGET_DIF_TYPE2_PROT)) 2926 fw_prot_opts |= PO_DIS_VALD_APP_ESC; 2927 else if (se_cmd->prot_type == TARGET_DIF_TYPE3_PROT) 2928 fw_prot_opts |= PO_DIS_VALD_APP_REF_ESC; 2929 } 2930 2931 switch (se_cmd->prot_op) { 2932 case TARGET_PROT_DIN_INSERT: 2933 case TARGET_PROT_DOUT_INSERT: 2934 fw_prot_opts |= PO_MODE_DIF_INSERT; 2935 break; 2936 case TARGET_PROT_DIN_STRIP: 2937 case TARGET_PROT_DOUT_STRIP: 2938 fw_prot_opts |= PO_MODE_DIF_REMOVE; 2939 break; 2940 case TARGET_PROT_DIN_PASS: 2941 case TARGET_PROT_DOUT_PASS: 2942 fw_prot_opts |= PO_MODE_DIF_PASS; 2943 /* FUTURE: does tcm require T10CRC<->IPCKSUM conversion? */ 2944 break; 2945 default:/* Normal Request */ 2946 fw_prot_opts |= PO_MODE_DIF_PASS; 2947 break; 2948 } 2949 2950 /* ---- PKT ---- */ 2951 /* Update entry type to indicate Command Type CRC_2 IOCB */ 2952 pkt->entry_type = CTIO_CRC2; 2953 pkt->entry_count = 1; 2954 pkt->vp_index = cmd->vp_idx; 2955 2956 h = qlt_make_handle(qpair); 2957 if (unlikely(h == QLA_TGT_NULL_HANDLE)) { 2958 /* 2959 * CTIO type 7 from the firmware doesn't provide a way to 2960 * know the initiator's LOOP ID, hence we can't find 2961 * the session and, so, the command. 2962 */ 2963 return -EAGAIN; 2964 } else 2965 qpair->req->outstanding_cmds[h] = (srb_t *)prm->cmd; 2966 2967 pkt->handle = MAKE_HANDLE(qpair->req->id, h); 2968 pkt->handle |= CTIO_COMPLETION_HANDLE_MARK; 2969 pkt->nport_handle = cpu_to_le16(prm->cmd->loop_id); 2970 pkt->timeout = cpu_to_le16(QLA_TGT_TIMEOUT); 2971 pkt->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2]; 2972 pkt->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1]; 2973 pkt->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0]; 2974 pkt->exchange_addr = atio->u.isp24.exchange_addr; 2975 2976 /* silence compile warning */ 2977 t16 = be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id); 2978 pkt->ox_id = cpu_to_le16(t16); 2979 2980 t16 = (atio->u.isp24.attr << 9); 2981 pkt->flags |= cpu_to_le16(t16); 2982 pkt->relative_offset = cpu_to_le32(prm->cmd->offset); 2983 2984 /* Set transfer direction */ 2985 if (cmd->dma_data_direction == DMA_TO_DEVICE) 2986 pkt->flags = cpu_to_le16(CTIO7_FLAGS_DATA_IN); 2987 else if (cmd->dma_data_direction == DMA_FROM_DEVICE) 2988 pkt->flags = cpu_to_le16(CTIO7_FLAGS_DATA_OUT); 2989 2990 pkt->dseg_count = prm->tot_dsds; 2991 /* Fibre channel byte count */ 2992 pkt->transfer_length = cpu_to_le32(transfer_length); 2993 2994 /* ----- CRC context -------- */ 2995 2996 /* Allocate CRC context from global pool */ 2997 crc_ctx_pkt = cmd->ctx = 2998 dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC, &crc_ctx_dma); 2999 3000 if (!crc_ctx_pkt) 3001 goto crc_queuing_error; 3002 3003 /* Zero out CTX area. */ 3004 clr_ptr = (uint8_t *)crc_ctx_pkt; 3005 memset(clr_ptr, 0, sizeof(*crc_ctx_pkt)); 3006 3007 crc_ctx_pkt->crc_ctx_dma = crc_ctx_dma; 3008 INIT_LIST_HEAD(&crc_ctx_pkt->dsd_list); 3009 3010 /* Set handle */ 3011 crc_ctx_pkt->handle = pkt->handle; 3012 3013 qla_tgt_set_dif_tags(cmd, crc_ctx_pkt, &fw_prot_opts); 3014 3015 pkt->crc_context_address[0] = cpu_to_le32(LSD(crc_ctx_dma)); 3016 pkt->crc_context_address[1] = cpu_to_le32(MSD(crc_ctx_dma)); 3017 pkt->crc_context_len = CRC_CONTEXT_LEN_FW; 3018 3019 if (!bundling) { 3020 cur_dsd = (uint32_t *) &crc_ctx_pkt->u.nobundling.data_address; 3021 } else { 3022 /* 3023 * Configure Bundling if we need to fetch interlaving 3024 * protection PCI accesses 3025 */ 3026 fw_prot_opts |= PO_ENABLE_DIF_BUNDLING; 3027 crc_ctx_pkt->u.bundling.dif_byte_count = cpu_to_le32(dif_bytes); 3028 crc_ctx_pkt->u.bundling.dseg_count = 3029 cpu_to_le16(prm->tot_dsds - prm->prot_seg_cnt); 3030 cur_dsd = (uint32_t *) &crc_ctx_pkt->u.bundling.data_address; 3031 } 3032 3033 /* Finish the common fields of CRC pkt */ 3034 crc_ctx_pkt->blk_size = cpu_to_le16(cmd->blk_sz); 3035 crc_ctx_pkt->prot_opts = cpu_to_le16(fw_prot_opts); 3036 crc_ctx_pkt->byte_count = cpu_to_le32(data_bytes); 3037 crc_ctx_pkt->guard_seed = cpu_to_le16(0); 3038 3039 memset((uint8_t *)&tc, 0 , sizeof(tc)); 3040 tc.vha = vha; 3041 tc.blk_sz = cmd->blk_sz; 3042 tc.bufflen = cmd->bufflen; 3043 tc.sg = cmd->sg; 3044 tc.prot_sg = cmd->prot_sg; 3045 tc.ctx = crc_ctx_pkt; 3046 tc.ctx_dsd_alloced = &cmd->ctx_dsd_alloced; 3047 3048 /* Walks data segments */ 3049 pkt->flags |= cpu_to_le16(CTIO7_FLAGS_DSD_PTR); 3050 3051 if (!bundling && prm->prot_seg_cnt) { 3052 if (qla24xx_walk_and_build_sglist_no_difb(ha, NULL, cur_dsd, 3053 prm->tot_dsds, &tc)) 3054 goto crc_queuing_error; 3055 } else if (qla24xx_walk_and_build_sglist(ha, NULL, cur_dsd, 3056 (prm->tot_dsds - prm->prot_seg_cnt), &tc)) 3057 goto crc_queuing_error; 3058 3059 if (bundling && prm->prot_seg_cnt) { 3060 /* Walks dif segments */ 3061 pkt->add_flags |= CTIO_CRC2_AF_DIF_DSD_ENA; 3062 3063 cur_dsd = (uint32_t *) &crc_ctx_pkt->u.bundling.dif_address; 3064 if (qla24xx_walk_and_build_prot_sglist(ha, NULL, cur_dsd, 3065 prm->prot_seg_cnt, &tc)) 3066 goto crc_queuing_error; 3067 } 3068 return QLA_SUCCESS; 3069 3070 crc_queuing_error: 3071 /* Cleanup will be performed by the caller */ 3072 qpair->req->outstanding_cmds[h] = NULL; 3073 3074 return QLA_FUNCTION_FAILED; 3075 } 3076 3077 /* 3078 * Callback to setup response of xmit_type of QLA_TGT_XMIT_DATA and * 3079 * QLA_TGT_XMIT_STATUS for >= 24xx silicon 3080 */ 3081 int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type, 3082 uint8_t scsi_status) 3083 { 3084 struct scsi_qla_host *vha = cmd->vha; 3085 struct qla_qpair *qpair = cmd->qpair; 3086 struct ctio7_to_24xx *pkt; 3087 struct qla_tgt_prm prm; 3088 uint32_t full_req_cnt = 0; 3089 unsigned long flags = 0; 3090 int res; 3091 3092 if (cmd->sess && cmd->sess->deleted) { 3093 cmd->state = QLA_TGT_STATE_PROCESSED; 3094 if (cmd->sess->logout_completed) 3095 /* no need to terminate. FW already freed exchange. */ 3096 qlt_abort_cmd_on_host_reset(cmd->vha, cmd); 3097 else 3098 qlt_send_term_exchange(qpair, cmd, &cmd->atio, 0, 0); 3099 return 0; 3100 } 3101 3102 ql_dbg_qp(ql_dbg_tgt, qpair, 0xe018, 3103 "is_send_status=%d, cmd->bufflen=%d, cmd->sg_cnt=%d, cmd->dma_data_direction=%d se_cmd[%p] qp %d\n", 3104 (xmit_type & QLA_TGT_XMIT_STATUS) ? 3105 1 : 0, cmd->bufflen, cmd->sg_cnt, cmd->dma_data_direction, 3106 &cmd->se_cmd, qpair->id); 3107 3108 res = qlt_pre_xmit_response(cmd, &prm, xmit_type, scsi_status, 3109 &full_req_cnt); 3110 if (unlikely(res != 0)) { 3111 return res; 3112 } 3113 3114 spin_lock_irqsave(qpair->qp_lock_ptr, flags); 3115 3116 if (xmit_type == QLA_TGT_XMIT_STATUS) 3117 qpair->tgt_counters.core_qla_snd_status++; 3118 else 3119 qpair->tgt_counters.core_qla_que_buf++; 3120 3121 if (!qpair->fw_started || cmd->reset_count != qpair->chip_reset) { 3122 /* 3123 * Either the port is not online or this request was from 3124 * previous life, just abort the processing. 3125 */ 3126 cmd->state = QLA_TGT_STATE_PROCESSED; 3127 qlt_abort_cmd_on_host_reset(cmd->vha, cmd); 3128 ql_dbg_qp(ql_dbg_async, qpair, 0xe101, 3129 "RESET-RSP online/active/old-count/new-count = %d/%d/%d/%d.\n", 3130 vha->flags.online, qla2x00_reset_active(vha), 3131 cmd->reset_count, qpair->chip_reset); 3132 spin_unlock_irqrestore(qpair->qp_lock_ptr, flags); 3133 return 0; 3134 } 3135 3136 /* Does F/W have an IOCBs for this request */ 3137 res = qlt_check_reserve_free_req(qpair, full_req_cnt); 3138 if (unlikely(res)) 3139 goto out_unmap_unlock; 3140 3141 if (cmd->se_cmd.prot_op && (xmit_type & QLA_TGT_XMIT_DATA)) 3142 res = qlt_build_ctio_crc2_pkt(qpair, &prm); 3143 else 3144 res = qlt_24xx_build_ctio_pkt(qpair, &prm); 3145 if (unlikely(res != 0)) { 3146 qpair->req->cnt += full_req_cnt; 3147 goto out_unmap_unlock; 3148 } 3149 3150 pkt = (struct ctio7_to_24xx *)prm.pkt; 3151 3152 if (qlt_has_data(cmd) && (xmit_type & QLA_TGT_XMIT_DATA)) { 3153 pkt->u.status0.flags |= 3154 cpu_to_le16(CTIO7_FLAGS_DATA_IN | 3155 CTIO7_FLAGS_STATUS_MODE_0); 3156 3157 if (cmd->se_cmd.prot_op == TARGET_PROT_NORMAL) 3158 qlt_load_data_segments(&prm); 3159 3160 if (prm.add_status_pkt == 0) { 3161 if (xmit_type & QLA_TGT_XMIT_STATUS) { 3162 pkt->u.status0.scsi_status = 3163 cpu_to_le16(prm.rq_result); 3164 pkt->u.status0.residual = 3165 cpu_to_le32(prm.residual); 3166 pkt->u.status0.flags |= cpu_to_le16( 3167 CTIO7_FLAGS_SEND_STATUS); 3168 if (qlt_need_explicit_conf(cmd, 0)) { 3169 pkt->u.status0.flags |= 3170 cpu_to_le16( 3171 CTIO7_FLAGS_EXPLICIT_CONFORM | 3172 CTIO7_FLAGS_CONFORM_REQ); 3173 } 3174 } 3175 3176 } else { 3177 /* 3178 * We have already made sure that there is sufficient 3179 * amount of request entries to not drop HW lock in 3180 * req_pkt(). 3181 */ 3182 struct ctio7_to_24xx *ctio = 3183 (struct ctio7_to_24xx *)qlt_get_req_pkt( 3184 qpair->req); 3185 3186 ql_dbg_qp(ql_dbg_tgt, qpair, 0x305e, 3187 "Building additional status packet 0x%p.\n", 3188 ctio); 3189 3190 /* 3191 * T10Dif: ctio_crc2_to_fw overlay ontop of 3192 * ctio7_to_24xx 3193 */ 3194 memcpy(ctio, pkt, sizeof(*ctio)); 3195 /* reset back to CTIO7 */ 3196 ctio->entry_count = 1; 3197 ctio->entry_type = CTIO_TYPE7; 3198 ctio->dseg_count = 0; 3199 ctio->u.status1.flags &= ~cpu_to_le16( 3200 CTIO7_FLAGS_DATA_IN); 3201 3202 /* Real finish is ctio_m1's finish */ 3203 pkt->handle |= CTIO_INTERMEDIATE_HANDLE_MARK; 3204 pkt->u.status0.flags |= cpu_to_le16( 3205 CTIO7_FLAGS_DONT_RET_CTIO); 3206 3207 /* qlt_24xx_init_ctio_to_isp will correct 3208 * all neccessary fields that's part of CTIO7. 3209 * There should be no residual of CTIO-CRC2 data. 3210 */ 3211 qlt_24xx_init_ctio_to_isp((struct ctio7_to_24xx *)ctio, 3212 &prm); 3213 } 3214 } else 3215 qlt_24xx_init_ctio_to_isp(pkt, &prm); 3216 3217 3218 cmd->state = QLA_TGT_STATE_PROCESSED; /* Mid-level is done processing */ 3219 cmd->cmd_sent_to_fw = 1; 3220 3221 /* Memory Barrier */ 3222 wmb(); 3223 if (qpair->reqq_start_iocbs) 3224 qpair->reqq_start_iocbs(qpair); 3225 else 3226 qla2x00_start_iocbs(vha, qpair->req); 3227 spin_unlock_irqrestore(qpair->qp_lock_ptr, flags); 3228 3229 return 0; 3230 3231 out_unmap_unlock: 3232 qlt_unmap_sg(vha, cmd); 3233 spin_unlock_irqrestore(qpair->qp_lock_ptr, flags); 3234 3235 return res; 3236 } 3237 EXPORT_SYMBOL(qlt_xmit_response); 3238 3239 int qlt_rdy_to_xfer(struct qla_tgt_cmd *cmd) 3240 { 3241 struct ctio7_to_24xx *pkt; 3242 struct scsi_qla_host *vha = cmd->vha; 3243 struct qla_tgt *tgt = cmd->tgt; 3244 struct qla_tgt_prm prm; 3245 unsigned long flags = 0; 3246 int res = 0; 3247 struct qla_qpair *qpair = cmd->qpair; 3248 3249 memset(&prm, 0, sizeof(prm)); 3250 prm.cmd = cmd; 3251 prm.tgt = tgt; 3252 prm.sg = NULL; 3253 prm.req_cnt = 1; 3254 3255 /* Calculate number of entries and segments required */ 3256 if (qlt_pci_map_calc_cnt(&prm) != 0) 3257 return -EAGAIN; 3258 3259 if (!qpair->fw_started || (cmd->reset_count != qpair->chip_reset) || 3260 (cmd->sess && cmd->sess->deleted)) { 3261 /* 3262 * Either the port is not online or this request was from 3263 * previous life, just abort the processing. 3264 */ 3265 cmd->state = QLA_TGT_STATE_NEED_DATA; 3266 qlt_abort_cmd_on_host_reset(cmd->vha, cmd); 3267 ql_dbg_qp(ql_dbg_async, qpair, 0xe102, 3268 "RESET-XFR online/active/old-count/new-count = %d/%d/%d/%d.\n", 3269 vha->flags.online, qla2x00_reset_active(vha), 3270 cmd->reset_count, qpair->chip_reset); 3271 return 0; 3272 } 3273 3274 spin_lock_irqsave(qpair->qp_lock_ptr, flags); 3275 /* Does F/W have an IOCBs for this request */ 3276 res = qlt_check_reserve_free_req(qpair, prm.req_cnt); 3277 if (res != 0) 3278 goto out_unlock_free_unmap; 3279 if (cmd->se_cmd.prot_op) 3280 res = qlt_build_ctio_crc2_pkt(qpair, &prm); 3281 else 3282 res = qlt_24xx_build_ctio_pkt(qpair, &prm); 3283 3284 if (unlikely(res != 0)) { 3285 qpair->req->cnt += prm.req_cnt; 3286 goto out_unlock_free_unmap; 3287 } 3288 3289 pkt = (struct ctio7_to_24xx *)prm.pkt; 3290 pkt->u.status0.flags |= cpu_to_le16(CTIO7_FLAGS_DATA_OUT | 3291 CTIO7_FLAGS_STATUS_MODE_0); 3292 3293 if (cmd->se_cmd.prot_op == TARGET_PROT_NORMAL) 3294 qlt_load_data_segments(&prm); 3295 3296 cmd->state = QLA_TGT_STATE_NEED_DATA; 3297 cmd->cmd_sent_to_fw = 1; 3298 3299 /* Memory Barrier */ 3300 wmb(); 3301 if (qpair->reqq_start_iocbs) 3302 qpair->reqq_start_iocbs(qpair); 3303 else 3304 qla2x00_start_iocbs(vha, qpair->req); 3305 spin_unlock_irqrestore(qpair->qp_lock_ptr, flags); 3306 3307 return res; 3308 3309 out_unlock_free_unmap: 3310 qlt_unmap_sg(vha, cmd); 3311 spin_unlock_irqrestore(qpair->qp_lock_ptr, flags); 3312 3313 return res; 3314 } 3315 EXPORT_SYMBOL(qlt_rdy_to_xfer); 3316 3317 3318 /* 3319 * it is assumed either hardware_lock or qpair lock is held. 3320 */ 3321 static void 3322 qlt_handle_dif_error(struct qla_qpair *qpair, struct qla_tgt_cmd *cmd, 3323 struct ctio_crc_from_fw *sts) 3324 { 3325 uint8_t *ap = &sts->actual_dif[0]; 3326 uint8_t *ep = &sts->expected_dif[0]; 3327 uint64_t lba = cmd->se_cmd.t_task_lba; 3328 uint8_t scsi_status, sense_key, asc, ascq; 3329 unsigned long flags; 3330 struct scsi_qla_host *vha = cmd->vha; 3331 3332 cmd->trc_flags |= TRC_DIF_ERR; 3333 3334 cmd->a_guard = be16_to_cpu(*(uint16_t *)(ap + 0)); 3335 cmd->a_app_tag = be16_to_cpu(*(uint16_t *)(ap + 2)); 3336 cmd->a_ref_tag = be32_to_cpu(*(uint32_t *)(ap + 4)); 3337 3338 cmd->e_guard = be16_to_cpu(*(uint16_t *)(ep + 0)); 3339 cmd->e_app_tag = be16_to_cpu(*(uint16_t *)(ep + 2)); 3340 cmd->e_ref_tag = be32_to_cpu(*(uint32_t *)(ep + 4)); 3341 3342 ql_dbg(ql_dbg_tgt_dif, vha, 0xf075, 3343 "%s: aborted %d state %d\n", __func__, cmd->aborted, cmd->state); 3344 3345 scsi_status = sense_key = asc = ascq = 0; 3346 3347 /* check appl tag */ 3348 if (cmd->e_app_tag != cmd->a_app_tag) { 3349 ql_dbg(ql_dbg_tgt_dif, vha, 0xe00d, 3350 "App Tag ERR: cdb[%x] lba[%llx %llx] blks[%x] [Actual|Expected] Ref[%x|%x], App[%x|%x], Guard [%x|%x] cmd=%p ox_id[%04x]", 3351 cmd->cdb[0], lba, (lba+cmd->num_blks), cmd->num_blks, 3352 cmd->a_ref_tag, cmd->e_ref_tag, cmd->a_app_tag, 3353 cmd->e_app_tag, cmd->a_guard, cmd->e_guard, cmd, 3354 cmd->atio.u.isp24.fcp_hdr.ox_id); 3355 3356 cmd->dif_err_code = DIF_ERR_APP; 3357 scsi_status = SAM_STAT_CHECK_CONDITION; 3358 sense_key = ABORTED_COMMAND; 3359 asc = 0x10; 3360 ascq = 0x2; 3361 } 3362 3363 /* check ref tag */ 3364 if (cmd->e_ref_tag != cmd->a_ref_tag) { 3365 ql_dbg(ql_dbg_tgt_dif, vha, 0xe00e, 3366 "Ref Tag ERR: cdb[%x] lba[%llx %llx] blks[%x] [Actual|Expected] Ref[%x|%x], App[%x|%x], Guard[%x|%x] cmd=%p ox_id[%04x] ", 3367 cmd->cdb[0], lba, (lba+cmd->num_blks), cmd->num_blks, 3368 cmd->a_ref_tag, cmd->e_ref_tag, cmd->a_app_tag, 3369 cmd->e_app_tag, cmd->a_guard, cmd->e_guard, cmd, 3370 cmd->atio.u.isp24.fcp_hdr.ox_id); 3371 3372 cmd->dif_err_code = DIF_ERR_REF; 3373 scsi_status = SAM_STAT_CHECK_CONDITION; 3374 sense_key = ABORTED_COMMAND; 3375 asc = 0x10; 3376 ascq = 0x3; 3377 goto out; 3378 } 3379 3380 /* check guard */ 3381 if (cmd->e_guard != cmd->a_guard) { 3382 ql_dbg(ql_dbg_tgt_dif, vha, 0xe012, 3383 "Guard ERR: cdb[%x] lba[%llx %llx] blks[%x] [Actual|Expected] Ref[%x|%x], App[%x|%x], Guard [%x|%x] cmd=%p ox_id[%04x]", 3384 cmd->cdb[0], lba, (lba+cmd->num_blks), cmd->num_blks, 3385 cmd->a_ref_tag, cmd->e_ref_tag, cmd->a_app_tag, 3386 cmd->e_app_tag, cmd->a_guard, cmd->e_guard, cmd, 3387 cmd->atio.u.isp24.fcp_hdr.ox_id); 3388 3389 cmd->dif_err_code = DIF_ERR_GRD; 3390 scsi_status = SAM_STAT_CHECK_CONDITION; 3391 sense_key = ABORTED_COMMAND; 3392 asc = 0x10; 3393 ascq = 0x1; 3394 } 3395 out: 3396 switch (cmd->state) { 3397 case QLA_TGT_STATE_NEED_DATA: 3398 /* handle_data will load DIF error code */ 3399 cmd->state = QLA_TGT_STATE_DATA_IN; 3400 vha->hw->tgt.tgt_ops->handle_data(cmd); 3401 break; 3402 default: 3403 spin_lock_irqsave(&cmd->cmd_lock, flags); 3404 if (cmd->aborted) { 3405 spin_unlock_irqrestore(&cmd->cmd_lock, flags); 3406 vha->hw->tgt.tgt_ops->free_cmd(cmd); 3407 break; 3408 } 3409 spin_unlock_irqrestore(&cmd->cmd_lock, flags); 3410 3411 qlt_send_resp_ctio(qpair, cmd, scsi_status, sense_key, asc, 3412 ascq); 3413 /* assume scsi status gets out on the wire. 3414 * Will not wait for completion. 3415 */ 3416 vha->hw->tgt.tgt_ops->free_cmd(cmd); 3417 break; 3418 } 3419 } 3420 3421 /* If hardware_lock held on entry, might drop it, then reaquire */ 3422 /* This function sends the appropriate CTIO to ISP 2xxx or 24xx */ 3423 static int __qlt_send_term_imm_notif(struct scsi_qla_host *vha, 3424 struct imm_ntfy_from_isp *ntfy) 3425 { 3426 struct nack_to_isp *nack; 3427 struct qla_hw_data *ha = vha->hw; 3428 request_t *pkt; 3429 int ret = 0; 3430 3431 ql_dbg(ql_dbg_tgt_tmr, vha, 0xe01c, 3432 "Sending TERM ELS CTIO (ha=%p)\n", ha); 3433 3434 pkt = (request_t *)qla2x00_alloc_iocbs(vha, NULL); 3435 if (pkt == NULL) { 3436 ql_dbg(ql_dbg_tgt, vha, 0xe080, 3437 "qla_target(%d): %s failed: unable to allocate " 3438 "request packet\n", vha->vp_idx, __func__); 3439 return -ENOMEM; 3440 } 3441 3442 pkt->entry_type = NOTIFY_ACK_TYPE; 3443 pkt->entry_count = 1; 3444 pkt->handle = QLA_TGT_SKIP_HANDLE; 3445 3446 nack = (struct nack_to_isp *)pkt; 3447 nack->ox_id = ntfy->ox_id; 3448 3449 nack->u.isp24.nport_handle = ntfy->u.isp24.nport_handle; 3450 if (le16_to_cpu(ntfy->u.isp24.status) == IMM_NTFY_ELS) { 3451 nack->u.isp24.flags = ntfy->u.isp24.flags & 3452 __constant_cpu_to_le32(NOTIFY24XX_FLAGS_PUREX_IOCB); 3453 } 3454 3455 /* terminate */ 3456 nack->u.isp24.flags |= 3457 __constant_cpu_to_le16(NOTIFY_ACK_FLAGS_TERMINATE); 3458 3459 nack->u.isp24.srr_rx_id = ntfy->u.isp24.srr_rx_id; 3460 nack->u.isp24.status = ntfy->u.isp24.status; 3461 nack->u.isp24.status_subcode = ntfy->u.isp24.status_subcode; 3462 nack->u.isp24.fw_handle = ntfy->u.isp24.fw_handle; 3463 nack->u.isp24.exchange_address = ntfy->u.isp24.exchange_address; 3464 nack->u.isp24.srr_rel_offs = ntfy->u.isp24.srr_rel_offs; 3465 nack->u.isp24.srr_ui = ntfy->u.isp24.srr_ui; 3466 nack->u.isp24.vp_index = ntfy->u.isp24.vp_index; 3467 3468 qla2x00_start_iocbs(vha, vha->req); 3469 return ret; 3470 } 3471 3472 static void qlt_send_term_imm_notif(struct scsi_qla_host *vha, 3473 struct imm_ntfy_from_isp *imm, int ha_locked) 3474 { 3475 unsigned long flags = 0; 3476 int rc; 3477 3478 if (ha_locked) { 3479 rc = __qlt_send_term_imm_notif(vha, imm); 3480 3481 #if 0 /* Todo */ 3482 if (rc == -ENOMEM) 3483 qlt_alloc_qfull_cmd(vha, imm, 0, 0); 3484 #else 3485 if (rc) { 3486 } 3487 #endif 3488 goto done; 3489 } 3490 3491 spin_lock_irqsave(&vha->hw->hardware_lock, flags); 3492 rc = __qlt_send_term_imm_notif(vha, imm); 3493 3494 #if 0 /* Todo */ 3495 if (rc == -ENOMEM) 3496 qlt_alloc_qfull_cmd(vha, imm, 0, 0); 3497 #endif 3498 3499 done: 3500 if (!ha_locked) 3501 spin_unlock_irqrestore(&vha->hw->hardware_lock, flags); 3502 } 3503 3504 /* 3505 * If hardware_lock held on entry, might drop it, then reaquire 3506 * This function sends the appropriate CTIO to ISP 2xxx or 24xx 3507 */ 3508 static int __qlt_send_term_exchange(struct qla_qpair *qpair, 3509 struct qla_tgt_cmd *cmd, 3510 struct atio_from_isp *atio) 3511 { 3512 struct scsi_qla_host *vha = qpair->vha; 3513 struct ctio7_to_24xx *ctio24; 3514 struct qla_hw_data *ha = vha->hw; 3515 request_t *pkt; 3516 int ret = 0; 3517 uint16_t temp; 3518 3519 ql_dbg(ql_dbg_tgt, vha, 0xe009, "Sending TERM EXCH CTIO (ha=%p)\n", ha); 3520 3521 if (cmd) 3522 vha = cmd->vha; 3523 3524 pkt = (request_t *)qla2x00_alloc_iocbs_ready(qpair, NULL); 3525 if (pkt == NULL) { 3526 ql_dbg(ql_dbg_tgt, vha, 0xe050, 3527 "qla_target(%d): %s failed: unable to allocate " 3528 "request packet\n", vha->vp_idx, __func__); 3529 return -ENOMEM; 3530 } 3531 3532 if (cmd != NULL) { 3533 if (cmd->state < QLA_TGT_STATE_PROCESSED) { 3534 ql_dbg(ql_dbg_tgt, vha, 0xe051, 3535 "qla_target(%d): Terminating cmd %p with " 3536 "incorrect state %d\n", vha->vp_idx, cmd, 3537 cmd->state); 3538 } else 3539 ret = 1; 3540 } 3541 3542 qpair->tgt_counters.num_term_xchg_sent++; 3543 pkt->entry_count = 1; 3544 pkt->handle = QLA_TGT_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK; 3545 3546 ctio24 = (struct ctio7_to_24xx *)pkt; 3547 ctio24->entry_type = CTIO_TYPE7; 3548 ctio24->nport_handle = CTIO7_NHANDLE_UNRECOGNIZED; 3549 ctio24->timeout = cpu_to_le16(QLA_TGT_TIMEOUT); 3550 ctio24->vp_index = vha->vp_idx; 3551 ctio24->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2]; 3552 ctio24->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1]; 3553 ctio24->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0]; 3554 ctio24->exchange_addr = atio->u.isp24.exchange_addr; 3555 temp = (atio->u.isp24.attr << 9) | CTIO7_FLAGS_STATUS_MODE_1 | 3556 CTIO7_FLAGS_TERMINATE; 3557 ctio24->u.status1.flags = cpu_to_le16(temp); 3558 temp = be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id); 3559 ctio24->u.status1.ox_id = cpu_to_le16(temp); 3560 3561 /* Most likely, it isn't needed */ 3562 ctio24->u.status1.residual = get_unaligned((uint32_t *) 3563 &atio->u.isp24.fcp_cmnd.add_cdb[ 3564 atio->u.isp24.fcp_cmnd.add_cdb_len]); 3565 if (ctio24->u.status1.residual != 0) 3566 ctio24->u.status1.scsi_status |= SS_RESIDUAL_UNDER; 3567 3568 /* Memory Barrier */ 3569 wmb(); 3570 if (qpair->reqq_start_iocbs) 3571 qpair->reqq_start_iocbs(qpair); 3572 else 3573 qla2x00_start_iocbs(vha, qpair->req); 3574 return ret; 3575 } 3576 3577 static void qlt_send_term_exchange(struct qla_qpair *qpair, 3578 struct qla_tgt_cmd *cmd, struct atio_from_isp *atio, int ha_locked, 3579 int ul_abort) 3580 { 3581 struct scsi_qla_host *vha; 3582 unsigned long flags = 0; 3583 int rc; 3584 3585 /* why use different vha? NPIV */ 3586 if (cmd) 3587 vha = cmd->vha; 3588 else 3589 vha = qpair->vha; 3590 3591 if (ha_locked) { 3592 rc = __qlt_send_term_exchange(qpair, cmd, atio); 3593 if (rc == -ENOMEM) 3594 qlt_alloc_qfull_cmd(vha, atio, 0, 0); 3595 goto done; 3596 } 3597 spin_lock_irqsave(qpair->qp_lock_ptr, flags); 3598 rc = __qlt_send_term_exchange(qpair, cmd, atio); 3599 if (rc == -ENOMEM) 3600 qlt_alloc_qfull_cmd(vha, atio, 0, 0); 3601 3602 done: 3603 if (cmd && !ul_abort && !cmd->aborted) { 3604 if (cmd->sg_mapped) 3605 qlt_unmap_sg(vha, cmd); 3606 vha->hw->tgt.tgt_ops->free_cmd(cmd); 3607 } 3608 3609 if (!ha_locked) 3610 spin_unlock_irqrestore(qpair->qp_lock_ptr, flags); 3611 3612 return; 3613 } 3614 3615 static void qlt_init_term_exchange(struct scsi_qla_host *vha) 3616 { 3617 struct list_head free_list; 3618 struct qla_tgt_cmd *cmd, *tcmd; 3619 3620 vha->hw->tgt.leak_exchg_thresh_hold = 3621 (vha->hw->cur_fw_xcb_count/100) * LEAK_EXCHG_THRESH_HOLD_PERCENT; 3622 3623 cmd = tcmd = NULL; 3624 if (!list_empty(&vha->hw->tgt.q_full_list)) { 3625 INIT_LIST_HEAD(&free_list); 3626 list_splice_init(&vha->hw->tgt.q_full_list, &free_list); 3627 3628 list_for_each_entry_safe(cmd, tcmd, &free_list, cmd_list) { 3629 list_del(&cmd->cmd_list); 3630 /* This cmd was never sent to TCM. There is no need 3631 * to schedule free or call free_cmd 3632 */ 3633 qlt_free_cmd(cmd); 3634 vha->hw->tgt.num_qfull_cmds_alloc--; 3635 } 3636 } 3637 vha->hw->tgt.num_qfull_cmds_dropped = 0; 3638 } 3639 3640 static void qlt_chk_exch_leak_thresh_hold(struct scsi_qla_host *vha) 3641 { 3642 uint32_t total_leaked; 3643 3644 total_leaked = vha->hw->tgt.num_qfull_cmds_dropped; 3645 3646 if (vha->hw->tgt.leak_exchg_thresh_hold && 3647 (total_leaked > vha->hw->tgt.leak_exchg_thresh_hold)) { 3648 3649 ql_dbg(ql_dbg_tgt, vha, 0xe079, 3650 "Chip reset due to exchange starvation: %d/%d.\n", 3651 total_leaked, vha->hw->cur_fw_xcb_count); 3652 3653 if (IS_P3P_TYPE(vha->hw)) 3654 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags); 3655 else 3656 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 3657 qla2xxx_wake_dpc(vha); 3658 } 3659 3660 } 3661 3662 int qlt_abort_cmd(struct qla_tgt_cmd *cmd) 3663 { 3664 struct qla_tgt *tgt = cmd->tgt; 3665 struct scsi_qla_host *vha = tgt->vha; 3666 struct se_cmd *se_cmd = &cmd->se_cmd; 3667 unsigned long flags; 3668 3669 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf014, 3670 "qla_target(%d): terminating exchange for aborted cmd=%p " 3671 "(se_cmd=%p, tag=%llu)", vha->vp_idx, cmd, &cmd->se_cmd, 3672 se_cmd->tag); 3673 3674 spin_lock_irqsave(&cmd->cmd_lock, flags); 3675 if (cmd->aborted) { 3676 spin_unlock_irqrestore(&cmd->cmd_lock, flags); 3677 /* 3678 * It's normal to see 2 calls in this path: 3679 * 1) XFER Rdy completion + CMD_T_ABORT 3680 * 2) TCM TMR - drain_state_list 3681 */ 3682 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf016, 3683 "multiple abort. %p transport_state %x, t_state %x, " 3684 "se_cmd_flags %x\n", cmd, cmd->se_cmd.transport_state, 3685 cmd->se_cmd.t_state, cmd->se_cmd.se_cmd_flags); 3686 return EIO; 3687 } 3688 cmd->aborted = 1; 3689 cmd->trc_flags |= TRC_ABORT; 3690 spin_unlock_irqrestore(&cmd->cmd_lock, flags); 3691 3692 qlt_send_term_exchange(cmd->qpair, cmd, &cmd->atio, 0, 1); 3693 return 0; 3694 } 3695 EXPORT_SYMBOL(qlt_abort_cmd); 3696 3697 void qlt_free_cmd(struct qla_tgt_cmd *cmd) 3698 { 3699 struct fc_port *sess = cmd->sess; 3700 3701 ql_dbg(ql_dbg_tgt, cmd->vha, 0xe074, 3702 "%s: se_cmd[%p] ox_id %04x\n", 3703 __func__, &cmd->se_cmd, 3704 be16_to_cpu(cmd->atio.u.isp24.fcp_hdr.ox_id)); 3705 3706 BUG_ON(cmd->cmd_in_wq); 3707 3708 if (cmd->sg_mapped) 3709 qlt_unmap_sg(cmd->vha, cmd); 3710 3711 if (!cmd->q_full) 3712 qlt_decr_num_pend_cmds(cmd->vha); 3713 3714 BUG_ON(cmd->sg_mapped); 3715 cmd->jiffies_at_free = get_jiffies_64(); 3716 if (unlikely(cmd->free_sg)) 3717 kfree(cmd->sg); 3718 3719 if (!sess || !sess->se_sess) { 3720 WARN_ON(1); 3721 return; 3722 } 3723 cmd->jiffies_at_free = get_jiffies_64(); 3724 percpu_ida_free(&sess->se_sess->sess_tag_pool, cmd->se_cmd.map_tag); 3725 } 3726 EXPORT_SYMBOL(qlt_free_cmd); 3727 3728 /* 3729 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire 3730 */ 3731 static int qlt_term_ctio_exchange(struct qla_qpair *qpair, void *ctio, 3732 struct qla_tgt_cmd *cmd, uint32_t status) 3733 { 3734 int term = 0; 3735 struct scsi_qla_host *vha = qpair->vha; 3736 3737 if (cmd->se_cmd.prot_op) 3738 ql_dbg(ql_dbg_tgt_dif, vha, 0xe013, 3739 "Term DIF cmd: lba[0x%llx|%lld] len[0x%x] " 3740 "se_cmd=%p tag[%x] op %#x/%s", 3741 cmd->lba, cmd->lba, 3742 cmd->num_blks, &cmd->se_cmd, 3743 cmd->atio.u.isp24.exchange_addr, 3744 cmd->se_cmd.prot_op, 3745 prot_op_str(cmd->se_cmd.prot_op)); 3746 3747 if (ctio != NULL) { 3748 struct ctio7_from_24xx *c = (struct ctio7_from_24xx *)ctio; 3749 term = !(c->flags & 3750 cpu_to_le16(OF_TERM_EXCH)); 3751 } else 3752 term = 1; 3753 3754 if (term) 3755 qlt_send_term_exchange(qpair, cmd, &cmd->atio, 1, 0); 3756 3757 return term; 3758 } 3759 3760 3761 /* ha->hardware_lock supposed to be held on entry */ 3762 static struct qla_tgt_cmd *qlt_ctio_to_cmd(struct scsi_qla_host *vha, 3763 struct rsp_que *rsp, uint32_t handle, void *ctio) 3764 { 3765 struct qla_tgt_cmd *cmd = NULL; 3766 struct req_que *req; 3767 int qid = GET_QID(handle); 3768 uint32_t h = handle & ~QLA_TGT_HANDLE_MASK; 3769 3770 if (unlikely(h == QLA_TGT_SKIP_HANDLE)) 3771 return NULL; 3772 3773 if (qid == rsp->req->id) { 3774 req = rsp->req; 3775 } else if (vha->hw->req_q_map[qid]) { 3776 ql_dbg(ql_dbg_tgt_mgt, vha, 0x1000a, 3777 "qla_target(%d): CTIO completion with different QID %d handle %x\n", 3778 vha->vp_idx, rsp->id, handle); 3779 req = vha->hw->req_q_map[qid]; 3780 } else { 3781 return NULL; 3782 } 3783 3784 h &= QLA_CMD_HANDLE_MASK; 3785 3786 if (h != QLA_TGT_NULL_HANDLE) { 3787 if (unlikely(h >= req->num_outstanding_cmds)) { 3788 ql_dbg(ql_dbg_tgt, vha, 0xe052, 3789 "qla_target(%d): Wrong handle %x received\n", 3790 vha->vp_idx, handle); 3791 return NULL; 3792 } 3793 3794 cmd = (struct qla_tgt_cmd *)req->outstanding_cmds[h]; 3795 if (unlikely(cmd == NULL)) { 3796 ql_dbg(ql_dbg_async, vha, 0xe053, 3797 "qla_target(%d): Suspicious: unable to find the command with handle %x req->id %d rsp->id %d\n", 3798 vha->vp_idx, handle, req->id, rsp->id); 3799 return NULL; 3800 } 3801 req->outstanding_cmds[h] = NULL; 3802 } else if (ctio != NULL) { 3803 /* We can't get loop ID from CTIO7 */ 3804 ql_dbg(ql_dbg_tgt, vha, 0xe054, 3805 "qla_target(%d): Wrong CTIO received: QLA24xx doesn't " 3806 "support NULL handles\n", vha->vp_idx); 3807 return NULL; 3808 } 3809 3810 return cmd; 3811 } 3812 3813 /* hardware_lock should be held by caller. */ 3814 void 3815 qlt_abort_cmd_on_host_reset(struct scsi_qla_host *vha, struct qla_tgt_cmd *cmd) 3816 { 3817 struct qla_hw_data *ha = vha->hw; 3818 3819 if (cmd->sg_mapped) 3820 qlt_unmap_sg(vha, cmd); 3821 3822 /* TODO: fix debug message type and ids. */ 3823 if (cmd->state == QLA_TGT_STATE_PROCESSED) { 3824 ql_dbg(ql_dbg_io, vha, 0xff00, 3825 "HOST-ABORT: state=PROCESSED.\n"); 3826 } else if (cmd->state == QLA_TGT_STATE_NEED_DATA) { 3827 cmd->write_data_transferred = 0; 3828 cmd->state = QLA_TGT_STATE_DATA_IN; 3829 3830 ql_dbg(ql_dbg_io, vha, 0xff01, 3831 "HOST-ABORT: state=DATA_IN.\n"); 3832 3833 ha->tgt.tgt_ops->handle_data(cmd); 3834 return; 3835 } else { 3836 ql_dbg(ql_dbg_io, vha, 0xff03, 3837 "HOST-ABORT: state=BAD(%d).\n", 3838 cmd->state); 3839 dump_stack(); 3840 } 3841 3842 cmd->trc_flags |= TRC_FLUSH; 3843 ha->tgt.tgt_ops->free_cmd(cmd); 3844 } 3845 3846 /* 3847 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire 3848 */ 3849 static void qlt_do_ctio_completion(struct scsi_qla_host *vha, 3850 struct rsp_que *rsp, uint32_t handle, uint32_t status, void *ctio) 3851 { 3852 struct qla_hw_data *ha = vha->hw; 3853 struct se_cmd *se_cmd; 3854 struct qla_tgt_cmd *cmd; 3855 struct qla_qpair *qpair = rsp->qpair; 3856 3857 if (handle & CTIO_INTERMEDIATE_HANDLE_MARK) { 3858 /* That could happen only in case of an error/reset/abort */ 3859 if (status != CTIO_SUCCESS) { 3860 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01d, 3861 "Intermediate CTIO received" 3862 " (status %x)\n", status); 3863 } 3864 return; 3865 } 3866 3867 cmd = qlt_ctio_to_cmd(vha, rsp, handle, ctio); 3868 if (cmd == NULL) 3869 return; 3870 3871 se_cmd = &cmd->se_cmd; 3872 cmd->cmd_sent_to_fw = 0; 3873 3874 qlt_unmap_sg(vha, cmd); 3875 3876 if (unlikely(status != CTIO_SUCCESS)) { 3877 switch (status & 0xFFFF) { 3878 case CTIO_LIP_RESET: 3879 case CTIO_TARGET_RESET: 3880 case CTIO_ABORTED: 3881 /* driver request abort via Terminate exchange */ 3882 case CTIO_TIMEOUT: 3883 case CTIO_INVALID_RX_ID: 3884 /* They are OK */ 3885 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf058, 3886 "qla_target(%d): CTIO with " 3887 "status %#x received, state %x, se_cmd %p, " 3888 "(LIP_RESET=e, ABORTED=2, TARGET_RESET=17, " 3889 "TIMEOUT=b, INVALID_RX_ID=8)\n", vha->vp_idx, 3890 status, cmd->state, se_cmd); 3891 break; 3892 3893 case CTIO_PORT_LOGGED_OUT: 3894 case CTIO_PORT_UNAVAILABLE: 3895 { 3896 int logged_out = 3897 (status & 0xFFFF) == CTIO_PORT_LOGGED_OUT; 3898 3899 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf059, 3900 "qla_target(%d): CTIO with %s status %x " 3901 "received (state %x, se_cmd %p)\n", vha->vp_idx, 3902 logged_out ? "PORT LOGGED OUT" : "PORT UNAVAILABLE", 3903 status, cmd->state, se_cmd); 3904 3905 if (logged_out && cmd->sess) { 3906 /* 3907 * Session is already logged out, but we need 3908 * to notify initiator, who's not aware of this 3909 */ 3910 cmd->sess->logout_on_delete = 0; 3911 cmd->sess->send_els_logo = 1; 3912 ql_dbg(ql_dbg_disc, vha, 0x20f8, 3913 "%s %d %8phC post del sess\n", 3914 __func__, __LINE__, cmd->sess->port_name); 3915 3916 qlt_schedule_sess_for_deletion(cmd->sess); 3917 } 3918 break; 3919 } 3920 case CTIO_DIF_ERROR: { 3921 struct ctio_crc_from_fw *crc = 3922 (struct ctio_crc_from_fw *)ctio; 3923 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf073, 3924 "qla_target(%d): CTIO with DIF_ERROR status %x " 3925 "received (state %x, ulp_cmd %p) actual_dif[0x%llx] " 3926 "expect_dif[0x%llx]\n", 3927 vha->vp_idx, status, cmd->state, se_cmd, 3928 *((u64 *)&crc->actual_dif[0]), 3929 *((u64 *)&crc->expected_dif[0])); 3930 3931 qlt_handle_dif_error(qpair, cmd, ctio); 3932 return; 3933 } 3934 default: 3935 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05b, 3936 "qla_target(%d): CTIO with error status 0x%x received (state %x, se_cmd %p\n", 3937 vha->vp_idx, status, cmd->state, se_cmd); 3938 break; 3939 } 3940 3941 3942 /* "cmd->aborted" means 3943 * cmd is already aborted/terminated, we don't 3944 * need to terminate again. The exchange is already 3945 * cleaned up/freed at FW level. Just cleanup at driver 3946 * level. 3947 */ 3948 if ((cmd->state != QLA_TGT_STATE_NEED_DATA) && 3949 (!cmd->aborted)) { 3950 cmd->trc_flags |= TRC_CTIO_ERR; 3951 if (qlt_term_ctio_exchange(qpair, ctio, cmd, status)) 3952 return; 3953 } 3954 } 3955 3956 if (cmd->state == QLA_TGT_STATE_PROCESSED) { 3957 cmd->trc_flags |= TRC_CTIO_DONE; 3958 } else if (cmd->state == QLA_TGT_STATE_NEED_DATA) { 3959 cmd->state = QLA_TGT_STATE_DATA_IN; 3960 3961 if (status == CTIO_SUCCESS) 3962 cmd->write_data_transferred = 1; 3963 3964 ha->tgt.tgt_ops->handle_data(cmd); 3965 return; 3966 } else if (cmd->aborted) { 3967 cmd->trc_flags |= TRC_CTIO_ABORTED; 3968 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01e, 3969 "Aborted command %p (tag %lld) finished\n", cmd, se_cmd->tag); 3970 } else { 3971 cmd->trc_flags |= TRC_CTIO_STRANGE; 3972 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05c, 3973 "qla_target(%d): A command in state (%d) should " 3974 "not return a CTIO complete\n", vha->vp_idx, cmd->state); 3975 } 3976 3977 if (unlikely(status != CTIO_SUCCESS) && 3978 !cmd->aborted) { 3979 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01f, "Finishing failed CTIO\n"); 3980 dump_stack(); 3981 } 3982 3983 ha->tgt.tgt_ops->free_cmd(cmd); 3984 } 3985 3986 static inline int qlt_get_fcp_task_attr(struct scsi_qla_host *vha, 3987 uint8_t task_codes) 3988 { 3989 int fcp_task_attr; 3990 3991 switch (task_codes) { 3992 case ATIO_SIMPLE_QUEUE: 3993 fcp_task_attr = TCM_SIMPLE_TAG; 3994 break; 3995 case ATIO_HEAD_OF_QUEUE: 3996 fcp_task_attr = TCM_HEAD_TAG; 3997 break; 3998 case ATIO_ORDERED_QUEUE: 3999 fcp_task_attr = TCM_ORDERED_TAG; 4000 break; 4001 case ATIO_ACA_QUEUE: 4002 fcp_task_attr = TCM_ACA_TAG; 4003 break; 4004 case ATIO_UNTAGGED: 4005 fcp_task_attr = TCM_SIMPLE_TAG; 4006 break; 4007 default: 4008 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05d, 4009 "qla_target: unknown task code %x, use ORDERED instead\n", 4010 task_codes); 4011 fcp_task_attr = TCM_ORDERED_TAG; 4012 break; 4013 } 4014 4015 return fcp_task_attr; 4016 } 4017 4018 static struct fc_port *qlt_make_local_sess(struct scsi_qla_host *, 4019 uint8_t *); 4020 /* 4021 * Process context for I/O path into tcm_qla2xxx code 4022 */ 4023 static void __qlt_do_work(struct qla_tgt_cmd *cmd) 4024 { 4025 scsi_qla_host_t *vha = cmd->vha; 4026 struct qla_hw_data *ha = vha->hw; 4027 struct fc_port *sess = cmd->sess; 4028 struct atio_from_isp *atio = &cmd->atio; 4029 unsigned char *cdb; 4030 unsigned long flags; 4031 uint32_t data_length; 4032 int ret, fcp_task_attr, data_dir, bidi = 0; 4033 struct qla_qpair *qpair = cmd->qpair; 4034 4035 cmd->cmd_in_wq = 0; 4036 cmd->trc_flags |= TRC_DO_WORK; 4037 4038 if (cmd->aborted) { 4039 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf082, 4040 "cmd with tag %u is aborted\n", 4041 cmd->atio.u.isp24.exchange_addr); 4042 goto out_term; 4043 } 4044 4045 spin_lock_init(&cmd->cmd_lock); 4046 cdb = &atio->u.isp24.fcp_cmnd.cdb[0]; 4047 cmd->se_cmd.tag = atio->u.isp24.exchange_addr; 4048 4049 if (atio->u.isp24.fcp_cmnd.rddata && 4050 atio->u.isp24.fcp_cmnd.wrdata) { 4051 bidi = 1; 4052 data_dir = DMA_TO_DEVICE; 4053 } else if (atio->u.isp24.fcp_cmnd.rddata) 4054 data_dir = DMA_FROM_DEVICE; 4055 else if (atio->u.isp24.fcp_cmnd.wrdata) 4056 data_dir = DMA_TO_DEVICE; 4057 else 4058 data_dir = DMA_NONE; 4059 4060 fcp_task_attr = qlt_get_fcp_task_attr(vha, 4061 atio->u.isp24.fcp_cmnd.task_attr); 4062 data_length = be32_to_cpu(get_unaligned((uint32_t *) 4063 &atio->u.isp24.fcp_cmnd.add_cdb[ 4064 atio->u.isp24.fcp_cmnd.add_cdb_len])); 4065 4066 ret = ha->tgt.tgt_ops->handle_cmd(vha, cmd, cdb, data_length, 4067 fcp_task_attr, data_dir, bidi); 4068 if (ret != 0) 4069 goto out_term; 4070 /* 4071 * Drop extra session reference from qla_tgt_handle_cmd_for_atio*( 4072 */ 4073 spin_lock_irqsave(&ha->tgt.sess_lock, flags); 4074 ha->tgt.tgt_ops->put_sess(sess); 4075 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); 4076 return; 4077 4078 out_term: 4079 ql_dbg(ql_dbg_io, vha, 0x3060, "Terminating work cmd %p", cmd); 4080 /* 4081 * cmd has not sent to target yet, so pass NULL as the second 4082 * argument to qlt_send_term_exchange() and free the memory here. 4083 */ 4084 cmd->trc_flags |= TRC_DO_WORK_ERR; 4085 spin_lock_irqsave(qpair->qp_lock_ptr, flags); 4086 qlt_send_term_exchange(qpair, NULL, &cmd->atio, 1, 0); 4087 4088 qlt_decr_num_pend_cmds(vha); 4089 percpu_ida_free(&sess->se_sess->sess_tag_pool, cmd->se_cmd.map_tag); 4090 spin_unlock_irqrestore(qpair->qp_lock_ptr, flags); 4091 4092 spin_lock_irqsave(&ha->tgt.sess_lock, flags); 4093 ha->tgt.tgt_ops->put_sess(sess); 4094 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); 4095 } 4096 4097 static void qlt_do_work(struct work_struct *work) 4098 { 4099 struct qla_tgt_cmd *cmd = container_of(work, struct qla_tgt_cmd, work); 4100 scsi_qla_host_t *vha = cmd->vha; 4101 unsigned long flags; 4102 4103 spin_lock_irqsave(&vha->cmd_list_lock, flags); 4104 list_del(&cmd->cmd_list); 4105 spin_unlock_irqrestore(&vha->cmd_list_lock, flags); 4106 4107 __qlt_do_work(cmd); 4108 } 4109 4110 void qlt_clr_qp_table(struct scsi_qla_host *vha) 4111 { 4112 unsigned long flags; 4113 struct qla_hw_data *ha = vha->hw; 4114 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; 4115 void *node; 4116 u64 key = 0; 4117 4118 ql_log(ql_log_info, vha, 0x706c, 4119 "User update Number of Active Qpairs %d\n", 4120 ha->tgt.num_act_qpairs); 4121 4122 spin_lock_irqsave(&ha->tgt.atio_lock, flags); 4123 4124 btree_for_each_safe64(&tgt->lun_qpair_map, key, node) 4125 btree_remove64(&tgt->lun_qpair_map, key); 4126 4127 ha->base_qpair->lun_cnt = 0; 4128 for (key = 0; key < ha->max_qpairs; key++) 4129 if (ha->queue_pair_map[key]) 4130 ha->queue_pair_map[key]->lun_cnt = 0; 4131 4132 spin_unlock_irqrestore(&ha->tgt.atio_lock, flags); 4133 } 4134 4135 static void qlt_assign_qpair(struct scsi_qla_host *vha, 4136 struct qla_tgt_cmd *cmd) 4137 { 4138 struct qla_qpair *qpair, *qp; 4139 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; 4140 struct qla_qpair_hint *h; 4141 4142 if (vha->flags.qpairs_available) { 4143 h = btree_lookup64(&tgt->lun_qpair_map, cmd->unpacked_lun); 4144 if (unlikely(!h)) { 4145 /* spread lun to qpair ratio evently */ 4146 int lcnt = 0, rc; 4147 struct scsi_qla_host *base_vha = 4148 pci_get_drvdata(vha->hw->pdev); 4149 4150 qpair = vha->hw->base_qpair; 4151 if (qpair->lun_cnt == 0) { 4152 qpair->lun_cnt++; 4153 h = qla_qpair_to_hint(tgt, qpair); 4154 BUG_ON(!h); 4155 rc = btree_insert64(&tgt->lun_qpair_map, 4156 cmd->unpacked_lun, h, GFP_ATOMIC); 4157 if (rc) { 4158 qpair->lun_cnt--; 4159 ql_log(ql_log_info, vha, 0xd037, 4160 "Unable to insert lun %llx into lun_qpair_map\n", 4161 cmd->unpacked_lun); 4162 } 4163 goto out; 4164 } else { 4165 lcnt = qpair->lun_cnt; 4166 } 4167 4168 h = NULL; 4169 list_for_each_entry(qp, &base_vha->qp_list, 4170 qp_list_elem) { 4171 if (qp->lun_cnt == 0) { 4172 qp->lun_cnt++; 4173 h = qla_qpair_to_hint(tgt, qp); 4174 BUG_ON(!h); 4175 rc = btree_insert64(&tgt->lun_qpair_map, 4176 cmd->unpacked_lun, h, GFP_ATOMIC); 4177 if (rc) { 4178 qp->lun_cnt--; 4179 ql_log(ql_log_info, vha, 0xd038, 4180 "Unable to insert lun %llx into lun_qpair_map\n", 4181 cmd->unpacked_lun); 4182 } 4183 qpair = qp; 4184 goto out; 4185 } else { 4186 if (qp->lun_cnt < lcnt) { 4187 lcnt = qp->lun_cnt; 4188 qpair = qp; 4189 continue; 4190 } 4191 } 4192 } 4193 BUG_ON(!qpair); 4194 qpair->lun_cnt++; 4195 h = qla_qpair_to_hint(tgt, qpair); 4196 BUG_ON(!h); 4197 rc = btree_insert64(&tgt->lun_qpair_map, 4198 cmd->unpacked_lun, h, GFP_ATOMIC); 4199 if (rc) { 4200 qpair->lun_cnt--; 4201 ql_log(ql_log_info, vha, 0xd039, 4202 "Unable to insert lun %llx into lun_qpair_map\n", 4203 cmd->unpacked_lun); 4204 } 4205 } 4206 } else { 4207 h = &tgt->qphints[0]; 4208 } 4209 out: 4210 cmd->qpair = h->qpair; 4211 cmd->se_cmd.cpuid = h->cpuid; 4212 } 4213 4214 static struct qla_tgt_cmd *qlt_get_tag(scsi_qla_host_t *vha, 4215 struct fc_port *sess, 4216 struct atio_from_isp *atio) 4217 { 4218 struct se_session *se_sess = sess->se_sess; 4219 struct qla_tgt_cmd *cmd; 4220 int tag; 4221 4222 tag = percpu_ida_alloc(&se_sess->sess_tag_pool, TASK_RUNNING); 4223 if (tag < 0) 4224 return NULL; 4225 4226 cmd = &((struct qla_tgt_cmd *)se_sess->sess_cmd_map)[tag]; 4227 memset(cmd, 0, sizeof(struct qla_tgt_cmd)); 4228 cmd->cmd_type = TYPE_TGT_CMD; 4229 memcpy(&cmd->atio, atio, sizeof(*atio)); 4230 cmd->state = QLA_TGT_STATE_NEW; 4231 cmd->tgt = vha->vha_tgt.qla_tgt; 4232 qlt_incr_num_pend_cmds(vha); 4233 cmd->vha = vha; 4234 cmd->se_cmd.map_tag = tag; 4235 cmd->sess = sess; 4236 cmd->loop_id = sess->loop_id; 4237 cmd->conf_compl_supported = sess->conf_compl_supported; 4238 4239 cmd->trc_flags = 0; 4240 cmd->jiffies_at_alloc = get_jiffies_64(); 4241 4242 cmd->unpacked_lun = scsilun_to_int( 4243 (struct scsi_lun *)&atio->u.isp24.fcp_cmnd.lun); 4244 qlt_assign_qpair(vha, cmd); 4245 cmd->reset_count = vha->hw->base_qpair->chip_reset; 4246 cmd->vp_idx = vha->vp_idx; 4247 4248 return cmd; 4249 } 4250 4251 /* ha->hardware_lock supposed to be held on entry */ 4252 static int qlt_handle_cmd_for_atio(struct scsi_qla_host *vha, 4253 struct atio_from_isp *atio) 4254 { 4255 struct qla_hw_data *ha = vha->hw; 4256 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; 4257 struct fc_port *sess; 4258 struct qla_tgt_cmd *cmd; 4259 unsigned long flags; 4260 port_id_t id; 4261 4262 if (unlikely(tgt->tgt_stop)) { 4263 ql_dbg(ql_dbg_io, vha, 0x3061, 4264 "New command while device %p is shutting down\n", tgt); 4265 return -ENODEV; 4266 } 4267 4268 id.b.al_pa = atio->u.isp24.fcp_hdr.s_id[2]; 4269 id.b.area = atio->u.isp24.fcp_hdr.s_id[1]; 4270 id.b.domain = atio->u.isp24.fcp_hdr.s_id[0]; 4271 if (IS_SW_RESV_ADDR(id)) 4272 return -EBUSY; 4273 4274 sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, atio->u.isp24.fcp_hdr.s_id); 4275 if (unlikely(!sess)) 4276 return -EFAULT; 4277 4278 /* Another WWN used to have our s_id. Our PLOGI scheduled its 4279 * session deletion, but it's still in sess_del_work wq */ 4280 if (sess->deleted) { 4281 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf002, 4282 "New command while old session %p is being deleted\n", 4283 sess); 4284 return -EFAULT; 4285 } 4286 4287 /* 4288 * Do kref_get() before returning + dropping qla_hw_data->hardware_lock. 4289 */ 4290 if (!kref_get_unless_zero(&sess->sess_kref)) { 4291 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf004, 4292 "%s: kref_get fail, %8phC oxid %x \n", 4293 __func__, sess->port_name, 4294 be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id)); 4295 return -EFAULT; 4296 } 4297 4298 cmd = qlt_get_tag(vha, sess, atio); 4299 if (!cmd) { 4300 ql_dbg(ql_dbg_io, vha, 0x3062, 4301 "qla_target(%d): Allocation of cmd failed\n", vha->vp_idx); 4302 spin_lock_irqsave(&ha->tgt.sess_lock, flags); 4303 ha->tgt.tgt_ops->put_sess(sess); 4304 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); 4305 return -EBUSY; 4306 } 4307 4308 cmd->cmd_in_wq = 1; 4309 cmd->trc_flags |= TRC_NEW_CMD; 4310 4311 spin_lock_irqsave(&vha->cmd_list_lock, flags); 4312 list_add_tail(&cmd->cmd_list, &vha->qla_cmd_list); 4313 spin_unlock_irqrestore(&vha->cmd_list_lock, flags); 4314 4315 INIT_WORK(&cmd->work, qlt_do_work); 4316 if (vha->flags.qpairs_available) { 4317 queue_work_on(cmd->se_cmd.cpuid, qla_tgt_wq, &cmd->work); 4318 } else if (ha->msix_count) { 4319 if (cmd->atio.u.isp24.fcp_cmnd.rddata) 4320 queue_work_on(smp_processor_id(), qla_tgt_wq, 4321 &cmd->work); 4322 else 4323 queue_work_on(cmd->se_cmd.cpuid, qla_tgt_wq, 4324 &cmd->work); 4325 } else { 4326 queue_work(qla_tgt_wq, &cmd->work); 4327 } 4328 4329 return 0; 4330 } 4331 4332 /* ha->hardware_lock supposed to be held on entry */ 4333 static int qlt_issue_task_mgmt(struct fc_port *sess, u64 lun, 4334 int fn, void *iocb, int flags) 4335 { 4336 struct scsi_qla_host *vha = sess->vha; 4337 struct qla_hw_data *ha = vha->hw; 4338 struct qla_tgt_mgmt_cmd *mcmd; 4339 struct atio_from_isp *a = (struct atio_from_isp *)iocb; 4340 int res; 4341 4342 mcmd = mempool_alloc(qla_tgt_mgmt_cmd_mempool, GFP_ATOMIC); 4343 if (!mcmd) { 4344 ql_dbg(ql_dbg_tgt_tmr, vha, 0x10009, 4345 "qla_target(%d): Allocation of management " 4346 "command failed, some commands and their data could " 4347 "leak\n", vha->vp_idx); 4348 return -ENOMEM; 4349 } 4350 memset(mcmd, 0, sizeof(*mcmd)); 4351 mcmd->sess = sess; 4352 4353 if (iocb) { 4354 memcpy(&mcmd->orig_iocb.imm_ntfy, iocb, 4355 sizeof(mcmd->orig_iocb.imm_ntfy)); 4356 } 4357 mcmd->tmr_func = fn; 4358 mcmd->flags = flags; 4359 mcmd->reset_count = ha->base_qpair->chip_reset; 4360 mcmd->qpair = ha->base_qpair; 4361 mcmd->vha = vha; 4362 4363 switch (fn) { 4364 case QLA_TGT_LUN_RESET: 4365 abort_cmds_for_lun(vha, lun, a->u.isp24.fcp_hdr.s_id); 4366 break; 4367 } 4368 4369 res = ha->tgt.tgt_ops->handle_tmr(mcmd, lun, mcmd->tmr_func, 0); 4370 if (res != 0) { 4371 ql_dbg(ql_dbg_tgt_tmr, vha, 0x1000b, 4372 "qla_target(%d): tgt.tgt_ops->handle_tmr() failed: %d\n", 4373 sess->vha->vp_idx, res); 4374 mempool_free(mcmd, qla_tgt_mgmt_cmd_mempool); 4375 return -EFAULT; 4376 } 4377 4378 return 0; 4379 } 4380 4381 /* ha->hardware_lock supposed to be held on entry */ 4382 static int qlt_handle_task_mgmt(struct scsi_qla_host *vha, void *iocb) 4383 { 4384 struct atio_from_isp *a = (struct atio_from_isp *)iocb; 4385 struct qla_hw_data *ha = vha->hw; 4386 struct fc_port *sess; 4387 u64 unpacked_lun; 4388 int fn; 4389 unsigned long flags; 4390 4391 fn = a->u.isp24.fcp_cmnd.task_mgmt_flags; 4392 4393 spin_lock_irqsave(&ha->tgt.sess_lock, flags); 4394 sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, 4395 a->u.isp24.fcp_hdr.s_id); 4396 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); 4397 4398 unpacked_lun = 4399 scsilun_to_int((struct scsi_lun *)&a->u.isp24.fcp_cmnd.lun); 4400 4401 if (sess == NULL || sess->deleted) 4402 return -EFAULT; 4403 4404 return qlt_issue_task_mgmt(sess, unpacked_lun, fn, iocb, 0); 4405 } 4406 4407 /* ha->hardware_lock supposed to be held on entry */ 4408 static int __qlt_abort_task(struct scsi_qla_host *vha, 4409 struct imm_ntfy_from_isp *iocb, struct fc_port *sess) 4410 { 4411 struct atio_from_isp *a = (struct atio_from_isp *)iocb; 4412 struct qla_hw_data *ha = vha->hw; 4413 struct qla_tgt_mgmt_cmd *mcmd; 4414 u64 unpacked_lun; 4415 int rc; 4416 4417 mcmd = mempool_alloc(qla_tgt_mgmt_cmd_mempool, GFP_ATOMIC); 4418 if (mcmd == NULL) { 4419 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05f, 4420 "qla_target(%d): %s: Allocation of ABORT cmd failed\n", 4421 vha->vp_idx, __func__); 4422 return -ENOMEM; 4423 } 4424 memset(mcmd, 0, sizeof(*mcmd)); 4425 4426 mcmd->sess = sess; 4427 memcpy(&mcmd->orig_iocb.imm_ntfy, iocb, 4428 sizeof(mcmd->orig_iocb.imm_ntfy)); 4429 4430 unpacked_lun = 4431 scsilun_to_int((struct scsi_lun *)&a->u.isp24.fcp_cmnd.lun); 4432 mcmd->reset_count = ha->base_qpair->chip_reset; 4433 mcmd->tmr_func = QLA_TGT_2G_ABORT_TASK; 4434 mcmd->qpair = ha->base_qpair; 4435 4436 rc = ha->tgt.tgt_ops->handle_tmr(mcmd, unpacked_lun, mcmd->tmr_func, 4437 le16_to_cpu(iocb->u.isp2x.seq_id)); 4438 if (rc != 0) { 4439 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf060, 4440 "qla_target(%d): tgt_ops->handle_tmr() failed: %d\n", 4441 vha->vp_idx, rc); 4442 mempool_free(mcmd, qla_tgt_mgmt_cmd_mempool); 4443 return -EFAULT; 4444 } 4445 4446 return 0; 4447 } 4448 4449 /* ha->hardware_lock supposed to be held on entry */ 4450 static int qlt_abort_task(struct scsi_qla_host *vha, 4451 struct imm_ntfy_from_isp *iocb) 4452 { 4453 struct qla_hw_data *ha = vha->hw; 4454 struct fc_port *sess; 4455 int loop_id; 4456 unsigned long flags; 4457 4458 loop_id = GET_TARGET_ID(ha, (struct atio_from_isp *)iocb); 4459 4460 spin_lock_irqsave(&ha->tgt.sess_lock, flags); 4461 sess = ha->tgt.tgt_ops->find_sess_by_loop_id(vha, loop_id); 4462 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); 4463 4464 if (sess == NULL) { 4465 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf025, 4466 "qla_target(%d): task abort for unexisting " 4467 "session\n", vha->vp_idx); 4468 return qlt_sched_sess_work(vha->vha_tgt.qla_tgt, 4469 QLA_TGT_SESS_WORK_ABORT, iocb, sizeof(*iocb)); 4470 } 4471 4472 return __qlt_abort_task(vha, iocb, sess); 4473 } 4474 4475 void qlt_logo_completion_handler(fc_port_t *fcport, int rc) 4476 { 4477 if (rc != MBS_COMMAND_COMPLETE) { 4478 ql_dbg(ql_dbg_tgt_mgt, fcport->vha, 0xf093, 4479 "%s: se_sess %p / sess %p from" 4480 " port %8phC loop_id %#04x s_id %02x:%02x:%02x" 4481 " LOGO failed: %#x\n", 4482 __func__, 4483 fcport->se_sess, 4484 fcport, 4485 fcport->port_name, fcport->loop_id, 4486 fcport->d_id.b.domain, fcport->d_id.b.area, 4487 fcport->d_id.b.al_pa, rc); 4488 } 4489 4490 fcport->logout_completed = 1; 4491 } 4492 4493 /* 4494 * ha->hardware_lock supposed to be held on entry (to protect tgt->sess_list) 4495 * 4496 * Schedules sessions with matching port_id/loop_id but different wwn for 4497 * deletion. Returns existing session with matching wwn if present. 4498 * Null otherwise. 4499 */ 4500 struct fc_port * 4501 qlt_find_sess_invalidate_other(scsi_qla_host_t *vha, uint64_t wwn, 4502 port_id_t port_id, uint16_t loop_id, struct fc_port **conflict_sess) 4503 { 4504 struct fc_port *sess = NULL, *other_sess; 4505 uint64_t other_wwn; 4506 4507 *conflict_sess = NULL; 4508 4509 list_for_each_entry(other_sess, &vha->vp_fcports, list) { 4510 4511 other_wwn = wwn_to_u64(other_sess->port_name); 4512 4513 if (wwn == other_wwn) { 4514 WARN_ON(sess); 4515 sess = other_sess; 4516 continue; 4517 } 4518 4519 /* find other sess with nport_id collision */ 4520 if (port_id.b24 == other_sess->d_id.b24) { 4521 if (loop_id != other_sess->loop_id) { 4522 ql_dbg(ql_dbg_tgt_tmr, vha, 0x1000c, 4523 "Invalidating sess %p loop_id %d wwn %llx.\n", 4524 other_sess, other_sess->loop_id, other_wwn); 4525 4526 /* 4527 * logout_on_delete is set by default, but another 4528 * session that has the same s_id/loop_id combo 4529 * might have cleared it when requested this session 4530 * deletion, so don't touch it 4531 */ 4532 qlt_schedule_sess_for_deletion(other_sess); 4533 } else { 4534 /* 4535 * Another wwn used to have our s_id/loop_id 4536 * kill the session, but don't free the loop_id 4537 */ 4538 ql_dbg(ql_dbg_tgt_tmr, vha, 0xf01b, 4539 "Invalidating sess %p loop_id %d wwn %llx.\n", 4540 other_sess, other_sess->loop_id, other_wwn); 4541 4542 other_sess->keep_nport_handle = 1; 4543 if (other_sess->disc_state != DSC_DELETED) 4544 *conflict_sess = other_sess; 4545 qlt_schedule_sess_for_deletion(other_sess); 4546 } 4547 continue; 4548 } 4549 4550 /* find other sess with nport handle collision */ 4551 if ((loop_id == other_sess->loop_id) && 4552 (loop_id != FC_NO_LOOP_ID)) { 4553 ql_dbg(ql_dbg_tgt_tmr, vha, 0x1000d, 4554 "Invalidating sess %p loop_id %d wwn %llx.\n", 4555 other_sess, other_sess->loop_id, other_wwn); 4556 4557 /* Same loop_id but different s_id 4558 * Ok to kill and logout */ 4559 qlt_schedule_sess_for_deletion(other_sess); 4560 } 4561 } 4562 4563 return sess; 4564 } 4565 4566 /* Abort any commands for this s_id waiting on qla_tgt_wq workqueue */ 4567 static int abort_cmds_for_s_id(struct scsi_qla_host *vha, port_id_t *s_id) 4568 { 4569 struct qla_tgt_sess_op *op; 4570 struct qla_tgt_cmd *cmd; 4571 uint32_t key; 4572 int count = 0; 4573 unsigned long flags; 4574 4575 key = (((u32)s_id->b.domain << 16) | 4576 ((u32)s_id->b.area << 8) | 4577 ((u32)s_id->b.al_pa)); 4578 4579 spin_lock_irqsave(&vha->cmd_list_lock, flags); 4580 list_for_each_entry(op, &vha->qla_sess_op_cmd_list, cmd_list) { 4581 uint32_t op_key = sid_to_key(op->atio.u.isp24.fcp_hdr.s_id); 4582 4583 if (op_key == key) { 4584 op->aborted = true; 4585 count++; 4586 } 4587 } 4588 4589 list_for_each_entry(op, &vha->unknown_atio_list, cmd_list) { 4590 uint32_t op_key = sid_to_key(op->atio.u.isp24.fcp_hdr.s_id); 4591 if (op_key == key) { 4592 op->aborted = true; 4593 count++; 4594 } 4595 } 4596 4597 list_for_each_entry(cmd, &vha->qla_cmd_list, cmd_list) { 4598 uint32_t cmd_key = sid_to_key(cmd->atio.u.isp24.fcp_hdr.s_id); 4599 if (cmd_key == key) { 4600 cmd->aborted = 1; 4601 count++; 4602 } 4603 } 4604 spin_unlock_irqrestore(&vha->cmd_list_lock, flags); 4605 4606 return count; 4607 } 4608 4609 static int qlt_handle_login(struct scsi_qla_host *vha, 4610 struct imm_ntfy_from_isp *iocb) 4611 { 4612 struct fc_port *sess = NULL, *conflict_sess = NULL; 4613 uint64_t wwn; 4614 port_id_t port_id; 4615 uint16_t loop_id, wd3_lo; 4616 int res = 0; 4617 struct qlt_plogi_ack_t *pla; 4618 unsigned long flags; 4619 4620 wwn = wwn_to_u64(iocb->u.isp24.port_name); 4621 4622 port_id.b.domain = iocb->u.isp24.port_id[2]; 4623 port_id.b.area = iocb->u.isp24.port_id[1]; 4624 port_id.b.al_pa = iocb->u.isp24.port_id[0]; 4625 port_id.b.rsvd_1 = 0; 4626 4627 loop_id = le16_to_cpu(iocb->u.isp24.nport_handle); 4628 4629 /* Mark all stale commands sitting in qla_tgt_wq for deletion */ 4630 abort_cmds_for_s_id(vha, &port_id); 4631 4632 if (wwn) { 4633 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); 4634 sess = qlt_find_sess_invalidate_other(vha, wwn, 4635 port_id, loop_id, &conflict_sess); 4636 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); 4637 } 4638 4639 if (IS_SW_RESV_ADDR(port_id)) { 4640 res = 1; 4641 goto out; 4642 } 4643 4644 pla = qlt_plogi_ack_find_add(vha, &port_id, iocb); 4645 if (!pla) { 4646 qlt_send_term_imm_notif(vha, iocb, 1); 4647 goto out; 4648 } 4649 4650 if (conflict_sess) { 4651 conflict_sess->login_gen++; 4652 qlt_plogi_ack_link(vha, pla, conflict_sess, 4653 QLT_PLOGI_LINK_CONFLICT); 4654 } 4655 4656 if (!sess) { 4657 pla->ref_count++; 4658 ql_dbg(ql_dbg_disc, vha, 0xffff, 4659 "%s %d %8phC post new sess\n", 4660 __func__, __LINE__, iocb->u.isp24.port_name); 4661 if (iocb->u.isp24.status_subcode == ELS_PLOGI) 4662 qla24xx_post_newsess_work(vha, &port_id, 4663 iocb->u.isp24.port_name, 4664 iocb->u.isp24.u.plogi.node_name, 4665 pla, FC4_TYPE_UNKNOWN); 4666 else 4667 qla24xx_post_newsess_work(vha, &port_id, 4668 iocb->u.isp24.port_name, NULL, 4669 pla, FC4_TYPE_UNKNOWN); 4670 4671 goto out; 4672 } 4673 4674 qlt_plogi_ack_link(vha, pla, sess, QLT_PLOGI_LINK_SAME_WWN); 4675 sess->d_id = port_id; 4676 sess->login_gen++; 4677 4678 if (iocb->u.isp24.status_subcode == ELS_PRLI) { 4679 sess->fw_login_state = DSC_LS_PRLI_PEND; 4680 sess->local = 0; 4681 sess->loop_id = loop_id; 4682 sess->d_id = port_id; 4683 sess->fw_login_state = DSC_LS_PRLI_PEND; 4684 wd3_lo = le16_to_cpu(iocb->u.isp24.u.prli.wd3_lo); 4685 4686 if (wd3_lo & BIT_7) 4687 sess->conf_compl_supported = 1; 4688 4689 if ((wd3_lo & BIT_4) == 0) 4690 sess->port_type = FCT_INITIATOR; 4691 else 4692 sess->port_type = FCT_TARGET; 4693 4694 } else 4695 sess->fw_login_state = DSC_LS_PLOGI_PEND; 4696 4697 4698 ql_dbg(ql_dbg_disc, vha, 0x20f9, 4699 "%s %d %8phC DS %d\n", 4700 __func__, __LINE__, sess->port_name, sess->disc_state); 4701 4702 switch (sess->disc_state) { 4703 case DSC_DELETED: 4704 qlt_plogi_ack_unref(vha, pla); 4705 break; 4706 4707 default: 4708 /* 4709 * Under normal circumstances we want to release nport handle 4710 * during LOGO process to avoid nport handle leaks inside FW. 4711 * The exception is when LOGO is done while another PLOGI with 4712 * the same nport handle is waiting as might be the case here. 4713 * Note: there is always a possibily of a race where session 4714 * deletion has already started for other reasons (e.g. ACL 4715 * removal) and now PLOGI arrives: 4716 * 1. if PLOGI arrived in FW after nport handle has been freed, 4717 * FW must have assigned this PLOGI a new/same handle and we 4718 * can proceed ACK'ing it as usual when session deletion 4719 * completes. 4720 * 2. if PLOGI arrived in FW before LOGO with LCF_FREE_NPORT 4721 * bit reached it, the handle has now been released. We'll 4722 * get an error when we ACK this PLOGI. Nothing will be sent 4723 * back to initiator. Initiator should eventually retry 4724 * PLOGI and situation will correct itself. 4725 */ 4726 sess->keep_nport_handle = ((sess->loop_id == loop_id) && 4727 (sess->d_id.b24 == port_id.b24)); 4728 4729 ql_dbg(ql_dbg_disc, vha, 0x20f9, 4730 "%s %d %8phC post del sess\n", 4731 __func__, __LINE__, sess->port_name); 4732 4733 4734 qlt_schedule_sess_for_deletion(sess); 4735 break; 4736 } 4737 out: 4738 return res; 4739 } 4740 4741 /* 4742 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire 4743 */ 4744 static int qlt_24xx_handle_els(struct scsi_qla_host *vha, 4745 struct imm_ntfy_from_isp *iocb) 4746 { 4747 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; 4748 struct qla_hw_data *ha = vha->hw; 4749 struct fc_port *sess = NULL, *conflict_sess = NULL; 4750 uint64_t wwn; 4751 port_id_t port_id; 4752 uint16_t loop_id; 4753 uint16_t wd3_lo; 4754 int res = 0; 4755 unsigned long flags; 4756 4757 wwn = wwn_to_u64(iocb->u.isp24.port_name); 4758 4759 port_id.b.domain = iocb->u.isp24.port_id[2]; 4760 port_id.b.area = iocb->u.isp24.port_id[1]; 4761 port_id.b.al_pa = iocb->u.isp24.port_id[0]; 4762 port_id.b.rsvd_1 = 0; 4763 4764 loop_id = le16_to_cpu(iocb->u.isp24.nport_handle); 4765 4766 ql_dbg(ql_dbg_disc, vha, 0xf026, 4767 "qla_target(%d): Port ID: %02x:%02x:%02x ELS opcode: 0x%02x lid %d %8phC\n", 4768 vha->vp_idx, iocb->u.isp24.port_id[2], 4769 iocb->u.isp24.port_id[1], iocb->u.isp24.port_id[0], 4770 iocb->u.isp24.status_subcode, loop_id, 4771 iocb->u.isp24.port_name); 4772 4773 /* res = 1 means ack at the end of thread 4774 * res = 0 means ack async/later. 4775 */ 4776 switch (iocb->u.isp24.status_subcode) { 4777 case ELS_PLOGI: 4778 res = qlt_handle_login(vha, iocb); 4779 break; 4780 4781 case ELS_PRLI: 4782 if (N2N_TOPO(ha)) { 4783 sess = qla2x00_find_fcport_by_wwpn(vha, 4784 iocb->u.isp24.port_name, 1); 4785 4786 if (sess && sess->plogi_link[QLT_PLOGI_LINK_SAME_WWN]) { 4787 ql_dbg(ql_dbg_disc, vha, 0xffff, 4788 "%s %d %8phC Term PRLI due to PLOGI ACK not completed\n", 4789 __func__, __LINE__, 4790 iocb->u.isp24.port_name); 4791 qlt_send_term_imm_notif(vha, iocb, 1); 4792 break; 4793 } 4794 4795 res = qlt_handle_login(vha, iocb); 4796 break; 4797 } 4798 4799 if (IS_SW_RESV_ADDR(port_id)) { 4800 res = 1; 4801 break; 4802 } 4803 4804 wd3_lo = le16_to_cpu(iocb->u.isp24.u.prli.wd3_lo); 4805 4806 if (wwn) { 4807 spin_lock_irqsave(&tgt->ha->tgt.sess_lock, flags); 4808 sess = qlt_find_sess_invalidate_other(vha, wwn, port_id, 4809 loop_id, &conflict_sess); 4810 spin_unlock_irqrestore(&tgt->ha->tgt.sess_lock, flags); 4811 } 4812 4813 if (conflict_sess) { 4814 switch (conflict_sess->disc_state) { 4815 case DSC_DELETED: 4816 case DSC_DELETE_PEND: 4817 break; 4818 default: 4819 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf09b, 4820 "PRLI with conflicting sess %p port %8phC\n", 4821 conflict_sess, conflict_sess->port_name); 4822 conflict_sess->fw_login_state = 4823 DSC_LS_PORT_UNAVAIL; 4824 qlt_send_term_imm_notif(vha, iocb, 1); 4825 res = 0; 4826 break; 4827 } 4828 } 4829 4830 if (sess != NULL) { 4831 bool delete = false; 4832 spin_lock_irqsave(&tgt->ha->tgt.sess_lock, flags); 4833 switch (sess->fw_login_state) { 4834 case DSC_LS_PLOGI_PEND: 4835 case DSC_LS_PLOGI_COMP: 4836 case DSC_LS_PRLI_COMP: 4837 break; 4838 default: 4839 delete = true; 4840 break; 4841 } 4842 4843 switch (sess->disc_state) { 4844 case DSC_LOGIN_PEND: 4845 case DSC_GPDB: 4846 case DSC_GPSC: 4847 case DSC_UPD_FCPORT: 4848 case DSC_LOGIN_COMPLETE: 4849 case DSC_ADISC: 4850 delete = false; 4851 break; 4852 default: 4853 break; 4854 } 4855 4856 if (delete) { 4857 spin_unlock_irqrestore(&tgt->ha->tgt.sess_lock, 4858 flags); 4859 /* 4860 * Impatient initiator sent PRLI before last 4861 * PLOGI could finish. Will force him to re-try, 4862 * while last one finishes. 4863 */ 4864 ql_log(ql_log_warn, sess->vha, 0xf095, 4865 "sess %p PRLI received, before plogi ack.\n", 4866 sess); 4867 qlt_send_term_imm_notif(vha, iocb, 1); 4868 res = 0; 4869 break; 4870 } 4871 4872 /* 4873 * This shouldn't happen under normal circumstances, 4874 * since we have deleted the old session during PLOGI 4875 */ 4876 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf096, 4877 "PRLI (loop_id %#04x) for existing sess %p (loop_id %#04x)\n", 4878 sess->loop_id, sess, iocb->u.isp24.nport_handle); 4879 4880 sess->local = 0; 4881 sess->loop_id = loop_id; 4882 sess->d_id = port_id; 4883 sess->fw_login_state = DSC_LS_PRLI_PEND; 4884 4885 if (wd3_lo & BIT_7) 4886 sess->conf_compl_supported = 1; 4887 4888 if ((wd3_lo & BIT_4) == 0) 4889 sess->port_type = FCT_INITIATOR; 4890 else 4891 sess->port_type = FCT_TARGET; 4892 4893 spin_unlock_irqrestore(&tgt->ha->tgt.sess_lock, flags); 4894 } 4895 res = 1; /* send notify ack */ 4896 4897 /* Make session global (not used in fabric mode) */ 4898 if (ha->current_topology != ISP_CFG_F) { 4899 if (sess) { 4900 ql_dbg(ql_dbg_disc, vha, 0x20fa, 4901 "%s %d %8phC post nack\n", 4902 __func__, __LINE__, sess->port_name); 4903 qla24xx_post_nack_work(vha, sess, iocb, 4904 SRB_NACK_PRLI); 4905 res = 0; 4906 } else { 4907 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); 4908 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); 4909 qla2xxx_wake_dpc(vha); 4910 } 4911 } else { 4912 if (sess) { 4913 ql_dbg(ql_dbg_disc, vha, 0x20fb, 4914 "%s %d %8phC post nack\n", 4915 __func__, __LINE__, sess->port_name); 4916 qla24xx_post_nack_work(vha, sess, iocb, 4917 SRB_NACK_PRLI); 4918 res = 0; 4919 } 4920 } 4921 break; 4922 4923 case ELS_TPRLO: 4924 if (le16_to_cpu(iocb->u.isp24.flags) & 4925 NOTIFY24XX_FLAGS_GLOBAL_TPRLO) { 4926 loop_id = 0xFFFF; 4927 qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS); 4928 res = 1; 4929 break; 4930 } 4931 /* fall through */ 4932 case ELS_LOGO: 4933 case ELS_PRLO: 4934 spin_lock_irqsave(&ha->tgt.sess_lock, flags); 4935 sess = qla2x00_find_fcport_by_loopid(vha, loop_id); 4936 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); 4937 4938 if (sess) { 4939 sess->login_gen++; 4940 sess->fw_login_state = DSC_LS_LOGO_PEND; 4941 sess->logo_ack_needed = 1; 4942 memcpy(sess->iocb, iocb, IOCB_SIZE); 4943 } 4944 4945 res = qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS_SESS); 4946 4947 ql_dbg(ql_dbg_disc, vha, 0x20fc, 4948 "%s: logo %llx res %d sess %p ", 4949 __func__, wwn, res, sess); 4950 if (res == 0) { 4951 /* 4952 * cmd went upper layer, look for qlt_xmit_tm_rsp() 4953 * for LOGO_ACK & sess delete 4954 */ 4955 BUG_ON(!sess); 4956 res = 0; 4957 } else { 4958 /* cmd did not go to upper layer. */ 4959 if (sess) { 4960 qlt_schedule_sess_for_deletion(sess); 4961 res = 0; 4962 } 4963 /* else logo will be ack */ 4964 } 4965 break; 4966 case ELS_PDISC: 4967 case ELS_ADISC: 4968 { 4969 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; 4970 if (tgt->link_reinit_iocb_pending) { 4971 qlt_send_notify_ack(ha->base_qpair, 4972 &tgt->link_reinit_iocb, 0, 0, 0, 0, 0, 0); 4973 tgt->link_reinit_iocb_pending = 0; 4974 } 4975 4976 sess = qla2x00_find_fcport_by_wwpn(vha, 4977 iocb->u.isp24.port_name, 1); 4978 if (sess) { 4979 ql_dbg(ql_dbg_disc, vha, 0x20fd, 4980 "sess %p lid %d|%d DS %d LS %d\n", 4981 sess, sess->loop_id, loop_id, 4982 sess->disc_state, sess->fw_login_state); 4983 } 4984 4985 res = 1; /* send notify ack */ 4986 break; 4987 } 4988 4989 case ELS_FLOGI: /* should never happen */ 4990 default: 4991 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf061, 4992 "qla_target(%d): Unsupported ELS command %x " 4993 "received\n", vha->vp_idx, iocb->u.isp24.status_subcode); 4994 res = qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS_SESS); 4995 break; 4996 } 4997 4998 ql_dbg(ql_dbg_disc, vha, 0xf026, 4999 "qla_target(%d): Exit ELS opcode: 0x%02x res %d\n", 5000 vha->vp_idx, iocb->u.isp24.status_subcode, res); 5001 5002 return res; 5003 } 5004 5005 /* 5006 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire 5007 */ 5008 static void qlt_handle_imm_notify(struct scsi_qla_host *vha, 5009 struct imm_ntfy_from_isp *iocb) 5010 { 5011 struct qla_hw_data *ha = vha->hw; 5012 uint32_t add_flags = 0; 5013 int send_notify_ack = 1; 5014 uint16_t status; 5015 5016 status = le16_to_cpu(iocb->u.isp2x.status); 5017 switch (status) { 5018 case IMM_NTFY_LIP_RESET: 5019 { 5020 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf032, 5021 "qla_target(%d): LIP reset (loop %#x), subcode %x\n", 5022 vha->vp_idx, le16_to_cpu(iocb->u.isp24.nport_handle), 5023 iocb->u.isp24.status_subcode); 5024 5025 if (qlt_reset(vha, iocb, QLA_TGT_ABORT_ALL) == 0) 5026 send_notify_ack = 0; 5027 break; 5028 } 5029 5030 case IMM_NTFY_LIP_LINK_REINIT: 5031 { 5032 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; 5033 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf033, 5034 "qla_target(%d): LINK REINIT (loop %#x, " 5035 "subcode %x)\n", vha->vp_idx, 5036 le16_to_cpu(iocb->u.isp24.nport_handle), 5037 iocb->u.isp24.status_subcode); 5038 if (tgt->link_reinit_iocb_pending) { 5039 qlt_send_notify_ack(ha->base_qpair, 5040 &tgt->link_reinit_iocb, 0, 0, 0, 0, 0, 0); 5041 } 5042 memcpy(&tgt->link_reinit_iocb, iocb, sizeof(*iocb)); 5043 tgt->link_reinit_iocb_pending = 1; 5044 /* 5045 * QLogic requires to wait after LINK REINIT for possible 5046 * PDISC or ADISC ELS commands 5047 */ 5048 send_notify_ack = 0; 5049 break; 5050 } 5051 5052 case IMM_NTFY_PORT_LOGOUT: 5053 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf034, 5054 "qla_target(%d): Port logout (loop " 5055 "%#x, subcode %x)\n", vha->vp_idx, 5056 le16_to_cpu(iocb->u.isp24.nport_handle), 5057 iocb->u.isp24.status_subcode); 5058 5059 if (qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS_SESS) == 0) 5060 send_notify_ack = 0; 5061 /* The sessions will be cleared in the callback, if needed */ 5062 break; 5063 5064 case IMM_NTFY_GLBL_TPRLO: 5065 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf035, 5066 "qla_target(%d): Global TPRLO (%x)\n", vha->vp_idx, status); 5067 if (qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS) == 0) 5068 send_notify_ack = 0; 5069 /* The sessions will be cleared in the callback, if needed */ 5070 break; 5071 5072 case IMM_NTFY_PORT_CONFIG: 5073 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf036, 5074 "qla_target(%d): Port config changed (%x)\n", vha->vp_idx, 5075 status); 5076 if (qlt_reset(vha, iocb, QLA_TGT_ABORT_ALL) == 0) 5077 send_notify_ack = 0; 5078 /* The sessions will be cleared in the callback, if needed */ 5079 break; 5080 5081 case IMM_NTFY_GLBL_LOGO: 5082 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06a, 5083 "qla_target(%d): Link failure detected\n", 5084 vha->vp_idx); 5085 /* I_T nexus loss */ 5086 if (qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS) == 0) 5087 send_notify_ack = 0; 5088 break; 5089 5090 case IMM_NTFY_IOCB_OVERFLOW: 5091 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06b, 5092 "qla_target(%d): Cannot provide requested " 5093 "capability (IOCB overflowed the immediate notify " 5094 "resource count)\n", vha->vp_idx); 5095 break; 5096 5097 case IMM_NTFY_ABORT_TASK: 5098 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf037, 5099 "qla_target(%d): Abort Task (S %08x I %#x -> " 5100 "L %#x)\n", vha->vp_idx, 5101 le16_to_cpu(iocb->u.isp2x.seq_id), 5102 GET_TARGET_ID(ha, (struct atio_from_isp *)iocb), 5103 le16_to_cpu(iocb->u.isp2x.lun)); 5104 if (qlt_abort_task(vha, iocb) == 0) 5105 send_notify_ack = 0; 5106 break; 5107 5108 case IMM_NTFY_RESOURCE: 5109 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06c, 5110 "qla_target(%d): Out of resources, host %ld\n", 5111 vha->vp_idx, vha->host_no); 5112 break; 5113 5114 case IMM_NTFY_MSG_RX: 5115 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf038, 5116 "qla_target(%d): Immediate notify task %x\n", 5117 vha->vp_idx, iocb->u.isp2x.task_flags); 5118 if (qlt_handle_task_mgmt(vha, iocb) == 0) 5119 send_notify_ack = 0; 5120 break; 5121 5122 case IMM_NTFY_ELS: 5123 if (qlt_24xx_handle_els(vha, iocb) == 0) 5124 send_notify_ack = 0; 5125 break; 5126 default: 5127 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06d, 5128 "qla_target(%d): Received unknown immediate " 5129 "notify status %x\n", vha->vp_idx, status); 5130 break; 5131 } 5132 5133 if (send_notify_ack) 5134 qlt_send_notify_ack(ha->base_qpair, iocb, add_flags, 0, 0, 0, 5135 0, 0); 5136 } 5137 5138 /* 5139 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire 5140 * This function sends busy to ISP 2xxx or 24xx. 5141 */ 5142 static int __qlt_send_busy(struct qla_qpair *qpair, 5143 struct atio_from_isp *atio, uint16_t status) 5144 { 5145 struct scsi_qla_host *vha = qpair->vha; 5146 struct ctio7_to_24xx *ctio24; 5147 struct qla_hw_data *ha = vha->hw; 5148 request_t *pkt; 5149 struct fc_port *sess = NULL; 5150 unsigned long flags; 5151 u16 temp; 5152 5153 spin_lock_irqsave(&ha->tgt.sess_lock, flags); 5154 sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, 5155 atio->u.isp24.fcp_hdr.s_id); 5156 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); 5157 if (!sess) { 5158 qlt_send_term_exchange(qpair, NULL, atio, 1, 0); 5159 return 0; 5160 } 5161 /* Sending marker isn't necessary, since we called from ISR */ 5162 5163 pkt = (request_t *)__qla2x00_alloc_iocbs(qpair, NULL); 5164 if (!pkt) { 5165 ql_dbg(ql_dbg_io, vha, 0x3063, 5166 "qla_target(%d): %s failed: unable to allocate " 5167 "request packet", vha->vp_idx, __func__); 5168 return -ENOMEM; 5169 } 5170 5171 qpair->tgt_counters.num_q_full_sent++; 5172 pkt->entry_count = 1; 5173 pkt->handle = QLA_TGT_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK; 5174 5175 ctio24 = (struct ctio7_to_24xx *)pkt; 5176 ctio24->entry_type = CTIO_TYPE7; 5177 ctio24->nport_handle = sess->loop_id; 5178 ctio24->timeout = cpu_to_le16(QLA_TGT_TIMEOUT); 5179 ctio24->vp_index = vha->vp_idx; 5180 ctio24->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2]; 5181 ctio24->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1]; 5182 ctio24->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0]; 5183 ctio24->exchange_addr = atio->u.isp24.exchange_addr; 5184 temp = (atio->u.isp24.attr << 9) | 5185 CTIO7_FLAGS_STATUS_MODE_1 | CTIO7_FLAGS_SEND_STATUS | 5186 CTIO7_FLAGS_DONT_RET_CTIO; 5187 ctio24->u.status1.flags = cpu_to_le16(temp); 5188 /* 5189 * CTIO from fw w/o se_cmd doesn't provide enough info to retry it, 5190 * if the explicit conformation is used. 5191 */ 5192 ctio24->u.status1.ox_id = swab16(atio->u.isp24.fcp_hdr.ox_id); 5193 ctio24->u.status1.scsi_status = cpu_to_le16(status); 5194 /* Memory Barrier */ 5195 wmb(); 5196 if (qpair->reqq_start_iocbs) 5197 qpair->reqq_start_iocbs(qpair); 5198 else 5199 qla2x00_start_iocbs(vha, qpair->req); 5200 return 0; 5201 } 5202 5203 /* 5204 * This routine is used to allocate a command for either a QFull condition 5205 * (ie reply SAM_STAT_BUSY) or to terminate an exchange that did not go 5206 * out previously. 5207 */ 5208 static void 5209 qlt_alloc_qfull_cmd(struct scsi_qla_host *vha, 5210 struct atio_from_isp *atio, uint16_t status, int qfull) 5211 { 5212 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; 5213 struct qla_hw_data *ha = vha->hw; 5214 struct fc_port *sess; 5215 struct se_session *se_sess; 5216 struct qla_tgt_cmd *cmd; 5217 int tag; 5218 unsigned long flags; 5219 5220 if (unlikely(tgt->tgt_stop)) { 5221 ql_dbg(ql_dbg_io, vha, 0x300a, 5222 "New command while device %p is shutting down\n", tgt); 5223 return; 5224 } 5225 5226 if ((vha->hw->tgt.num_qfull_cmds_alloc + 1) > MAX_QFULL_CMDS_ALLOC) { 5227 vha->hw->tgt.num_qfull_cmds_dropped++; 5228 if (vha->hw->tgt.num_qfull_cmds_dropped > 5229 vha->qla_stats.stat_max_qfull_cmds_dropped) 5230 vha->qla_stats.stat_max_qfull_cmds_dropped = 5231 vha->hw->tgt.num_qfull_cmds_dropped; 5232 5233 ql_dbg(ql_dbg_io, vha, 0x3068, 5234 "qla_target(%d): %s: QFull CMD dropped[%d]\n", 5235 vha->vp_idx, __func__, 5236 vha->hw->tgt.num_qfull_cmds_dropped); 5237 5238 qlt_chk_exch_leak_thresh_hold(vha); 5239 return; 5240 } 5241 5242 sess = ha->tgt.tgt_ops->find_sess_by_s_id 5243 (vha, atio->u.isp24.fcp_hdr.s_id); 5244 if (!sess) 5245 return; 5246 5247 se_sess = sess->se_sess; 5248 5249 tag = percpu_ida_alloc(&se_sess->sess_tag_pool, TASK_RUNNING); 5250 if (tag < 0) 5251 return; 5252 5253 cmd = &((struct qla_tgt_cmd *)se_sess->sess_cmd_map)[tag]; 5254 if (!cmd) { 5255 ql_dbg(ql_dbg_io, vha, 0x3009, 5256 "qla_target(%d): %s: Allocation of cmd failed\n", 5257 vha->vp_idx, __func__); 5258 5259 vha->hw->tgt.num_qfull_cmds_dropped++; 5260 if (vha->hw->tgt.num_qfull_cmds_dropped > 5261 vha->qla_stats.stat_max_qfull_cmds_dropped) 5262 vha->qla_stats.stat_max_qfull_cmds_dropped = 5263 vha->hw->tgt.num_qfull_cmds_dropped; 5264 5265 qlt_chk_exch_leak_thresh_hold(vha); 5266 return; 5267 } 5268 5269 memset(cmd, 0, sizeof(struct qla_tgt_cmd)); 5270 5271 qlt_incr_num_pend_cmds(vha); 5272 INIT_LIST_HEAD(&cmd->cmd_list); 5273 memcpy(&cmd->atio, atio, sizeof(*atio)); 5274 5275 cmd->tgt = vha->vha_tgt.qla_tgt; 5276 cmd->vha = vha; 5277 cmd->reset_count = ha->base_qpair->chip_reset; 5278 cmd->q_full = 1; 5279 cmd->qpair = ha->base_qpair; 5280 5281 if (qfull) { 5282 cmd->q_full = 1; 5283 /* NOTE: borrowing the state field to carry the status */ 5284 cmd->state = status; 5285 } else 5286 cmd->term_exchg = 1; 5287 5288 spin_lock_irqsave(&vha->hw->tgt.q_full_lock, flags); 5289 list_add_tail(&cmd->cmd_list, &vha->hw->tgt.q_full_list); 5290 5291 vha->hw->tgt.num_qfull_cmds_alloc++; 5292 if (vha->hw->tgt.num_qfull_cmds_alloc > 5293 vha->qla_stats.stat_max_qfull_cmds_alloc) 5294 vha->qla_stats.stat_max_qfull_cmds_alloc = 5295 vha->hw->tgt.num_qfull_cmds_alloc; 5296 spin_unlock_irqrestore(&vha->hw->tgt.q_full_lock, flags); 5297 } 5298 5299 int 5300 qlt_free_qfull_cmds(struct qla_qpair *qpair) 5301 { 5302 struct scsi_qla_host *vha = qpair->vha; 5303 struct qla_hw_data *ha = vha->hw; 5304 unsigned long flags; 5305 struct qla_tgt_cmd *cmd, *tcmd; 5306 struct list_head free_list, q_full_list; 5307 int rc = 0; 5308 5309 if (list_empty(&ha->tgt.q_full_list)) 5310 return 0; 5311 5312 INIT_LIST_HEAD(&free_list); 5313 INIT_LIST_HEAD(&q_full_list); 5314 5315 spin_lock_irqsave(&vha->hw->tgt.q_full_lock, flags); 5316 if (list_empty(&ha->tgt.q_full_list)) { 5317 spin_unlock_irqrestore(&vha->hw->tgt.q_full_lock, flags); 5318 return 0; 5319 } 5320 5321 list_splice_init(&vha->hw->tgt.q_full_list, &q_full_list); 5322 spin_unlock_irqrestore(&vha->hw->tgt.q_full_lock, flags); 5323 5324 spin_lock_irqsave(qpair->qp_lock_ptr, flags); 5325 list_for_each_entry_safe(cmd, tcmd, &q_full_list, cmd_list) { 5326 if (cmd->q_full) 5327 /* cmd->state is a borrowed field to hold status */ 5328 rc = __qlt_send_busy(qpair, &cmd->atio, cmd->state); 5329 else if (cmd->term_exchg) 5330 rc = __qlt_send_term_exchange(qpair, NULL, &cmd->atio); 5331 5332 if (rc == -ENOMEM) 5333 break; 5334 5335 if (cmd->q_full) 5336 ql_dbg(ql_dbg_io, vha, 0x3006, 5337 "%s: busy sent for ox_id[%04x]\n", __func__, 5338 be16_to_cpu(cmd->atio.u.isp24.fcp_hdr.ox_id)); 5339 else if (cmd->term_exchg) 5340 ql_dbg(ql_dbg_io, vha, 0x3007, 5341 "%s: Term exchg sent for ox_id[%04x]\n", __func__, 5342 be16_to_cpu(cmd->atio.u.isp24.fcp_hdr.ox_id)); 5343 else 5344 ql_dbg(ql_dbg_io, vha, 0x3008, 5345 "%s: Unexpected cmd in QFull list %p\n", __func__, 5346 cmd); 5347 5348 list_del(&cmd->cmd_list); 5349 list_add_tail(&cmd->cmd_list, &free_list); 5350 5351 /* piggy back on hardware_lock for protection */ 5352 vha->hw->tgt.num_qfull_cmds_alloc--; 5353 } 5354 spin_unlock_irqrestore(qpair->qp_lock_ptr, flags); 5355 5356 cmd = NULL; 5357 5358 list_for_each_entry_safe(cmd, tcmd, &free_list, cmd_list) { 5359 list_del(&cmd->cmd_list); 5360 /* This cmd was never sent to TCM. There is no need 5361 * to schedule free or call free_cmd 5362 */ 5363 qlt_free_cmd(cmd); 5364 } 5365 5366 if (!list_empty(&q_full_list)) { 5367 spin_lock_irqsave(&vha->hw->tgt.q_full_lock, flags); 5368 list_splice(&q_full_list, &vha->hw->tgt.q_full_list); 5369 spin_unlock_irqrestore(&vha->hw->tgt.q_full_lock, flags); 5370 } 5371 5372 return rc; 5373 } 5374 5375 static void 5376 qlt_send_busy(struct qla_qpair *qpair, struct atio_from_isp *atio, 5377 uint16_t status) 5378 { 5379 int rc = 0; 5380 struct scsi_qla_host *vha = qpair->vha; 5381 5382 rc = __qlt_send_busy(qpair, atio, status); 5383 if (rc == -ENOMEM) 5384 qlt_alloc_qfull_cmd(vha, atio, status, 1); 5385 } 5386 5387 static int 5388 qlt_chk_qfull_thresh_hold(struct scsi_qla_host *vha, struct qla_qpair *qpair, 5389 struct atio_from_isp *atio, uint8_t ha_locked) 5390 { 5391 struct qla_hw_data *ha = vha->hw; 5392 unsigned long flags; 5393 5394 if (ha->tgt.num_pend_cmds < Q_FULL_THRESH_HOLD(ha)) 5395 return 0; 5396 5397 if (!ha_locked) 5398 spin_lock_irqsave(&ha->hardware_lock, flags); 5399 qlt_send_busy(qpair, atio, qla_sam_status); 5400 if (!ha_locked) 5401 spin_unlock_irqrestore(&ha->hardware_lock, flags); 5402 5403 return 1; 5404 } 5405 5406 /* ha->hardware_lock supposed to be held on entry */ 5407 /* called via callback from qla2xxx */ 5408 static void qlt_24xx_atio_pkt(struct scsi_qla_host *vha, 5409 struct atio_from_isp *atio, uint8_t ha_locked) 5410 { 5411 struct qla_hw_data *ha = vha->hw; 5412 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; 5413 int rc; 5414 unsigned long flags = 0; 5415 5416 if (unlikely(tgt == NULL)) { 5417 ql_dbg(ql_dbg_tgt, vha, 0x3064, 5418 "ATIO pkt, but no tgt (ha %p)", ha); 5419 return; 5420 } 5421 /* 5422 * In tgt_stop mode we also should allow all requests to pass. 5423 * Otherwise, some commands can stuck. 5424 */ 5425 5426 tgt->atio_irq_cmd_count++; 5427 5428 switch (atio->u.raw.entry_type) { 5429 case ATIO_TYPE7: 5430 if (unlikely(atio->u.isp24.exchange_addr == 5431 ATIO_EXCHANGE_ADDRESS_UNKNOWN)) { 5432 ql_dbg(ql_dbg_io, vha, 0x3065, 5433 "qla_target(%d): ATIO_TYPE7 " 5434 "received with UNKNOWN exchange address, " 5435 "sending QUEUE_FULL\n", vha->vp_idx); 5436 if (!ha_locked) 5437 spin_lock_irqsave(&ha->hardware_lock, flags); 5438 qlt_send_busy(ha->base_qpair, atio, qla_sam_status); 5439 if (!ha_locked) 5440 spin_unlock_irqrestore(&ha->hardware_lock, 5441 flags); 5442 break; 5443 } 5444 5445 if (likely(atio->u.isp24.fcp_cmnd.task_mgmt_flags == 0)) { 5446 rc = qlt_chk_qfull_thresh_hold(vha, ha->base_qpair, 5447 atio, ha_locked); 5448 if (rc != 0) { 5449 tgt->atio_irq_cmd_count--; 5450 return; 5451 } 5452 rc = qlt_handle_cmd_for_atio(vha, atio); 5453 } else { 5454 rc = qlt_handle_task_mgmt(vha, atio); 5455 } 5456 if (unlikely(rc != 0)) { 5457 if (!ha_locked) 5458 spin_lock_irqsave(&ha->hardware_lock, flags); 5459 switch (rc) { 5460 case -ENODEV: 5461 ql_dbg(ql_dbg_tgt, vha, 0xe05f, 5462 "qla_target: Unable to send command to target\n"); 5463 break; 5464 case -EBADF: 5465 ql_dbg(ql_dbg_tgt, vha, 0xe05f, 5466 "qla_target: Unable to send command to target, sending TERM EXCHANGE for rsp\n"); 5467 qlt_send_term_exchange(ha->base_qpair, NULL, 5468 atio, 1, 0); 5469 break; 5470 case -EBUSY: 5471 ql_dbg(ql_dbg_tgt, vha, 0xe060, 5472 "qla_target(%d): Unable to send command to target, sending BUSY status\n", 5473 vha->vp_idx); 5474 qlt_send_busy(ha->base_qpair, atio, 5475 tc_sam_status); 5476 break; 5477 default: 5478 ql_dbg(ql_dbg_tgt, vha, 0xe060, 5479 "qla_target(%d): Unable to send command to target, sending BUSY status\n", 5480 vha->vp_idx); 5481 qlt_send_busy(ha->base_qpair, atio, 5482 qla_sam_status); 5483 break; 5484 } 5485 if (!ha_locked) 5486 spin_unlock_irqrestore(&ha->hardware_lock, 5487 flags); 5488 } 5489 break; 5490 5491 case IMMED_NOTIFY_TYPE: 5492 { 5493 if (unlikely(atio->u.isp2x.entry_status != 0)) { 5494 ql_dbg(ql_dbg_tgt, vha, 0xe05b, 5495 "qla_target(%d): Received ATIO packet %x " 5496 "with error status %x\n", vha->vp_idx, 5497 atio->u.raw.entry_type, 5498 atio->u.isp2x.entry_status); 5499 break; 5500 } 5501 ql_dbg(ql_dbg_tgt, vha, 0xe02e, "%s", "IMMED_NOTIFY ATIO"); 5502 5503 if (!ha_locked) 5504 spin_lock_irqsave(&ha->hardware_lock, flags); 5505 qlt_handle_imm_notify(vha, (struct imm_ntfy_from_isp *)atio); 5506 if (!ha_locked) 5507 spin_unlock_irqrestore(&ha->hardware_lock, flags); 5508 break; 5509 } 5510 5511 default: 5512 ql_dbg(ql_dbg_tgt, vha, 0xe05c, 5513 "qla_target(%d): Received unknown ATIO atio " 5514 "type %x\n", vha->vp_idx, atio->u.raw.entry_type); 5515 break; 5516 } 5517 5518 tgt->atio_irq_cmd_count--; 5519 } 5520 5521 /* ha->hardware_lock supposed to be held on entry */ 5522 /* called via callback from qla2xxx */ 5523 static void qlt_response_pkt(struct scsi_qla_host *vha, 5524 struct rsp_que *rsp, response_t *pkt) 5525 { 5526 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; 5527 5528 if (unlikely(tgt == NULL)) { 5529 ql_dbg(ql_dbg_tgt, vha, 0xe05d, 5530 "qla_target(%d): Response pkt %x received, but no tgt (ha %p)\n", 5531 vha->vp_idx, pkt->entry_type, vha->hw); 5532 return; 5533 } 5534 5535 /* 5536 * In tgt_stop mode we also should allow all requests to pass. 5537 * Otherwise, some commands can stuck. 5538 */ 5539 5540 switch (pkt->entry_type) { 5541 case CTIO_CRC2: 5542 case CTIO_TYPE7: 5543 { 5544 struct ctio7_from_24xx *entry = (struct ctio7_from_24xx *)pkt; 5545 qlt_do_ctio_completion(vha, rsp, entry->handle, 5546 le16_to_cpu(entry->status)|(pkt->entry_status << 16), 5547 entry); 5548 break; 5549 } 5550 5551 case ACCEPT_TGT_IO_TYPE: 5552 { 5553 struct atio_from_isp *atio = (struct atio_from_isp *)pkt; 5554 int rc; 5555 if (atio->u.isp2x.status != 5556 cpu_to_le16(ATIO_CDB_VALID)) { 5557 ql_dbg(ql_dbg_tgt, vha, 0xe05e, 5558 "qla_target(%d): ATIO with error " 5559 "status %x received\n", vha->vp_idx, 5560 le16_to_cpu(atio->u.isp2x.status)); 5561 break; 5562 } 5563 5564 rc = qlt_chk_qfull_thresh_hold(vha, rsp->qpair, atio, 1); 5565 if (rc != 0) 5566 return; 5567 5568 rc = qlt_handle_cmd_for_atio(vha, atio); 5569 if (unlikely(rc != 0)) { 5570 switch (rc) { 5571 case -ENODEV: 5572 ql_dbg(ql_dbg_tgt, vha, 0xe05f, 5573 "qla_target: Unable to send command to target\n"); 5574 break; 5575 case -EBADF: 5576 ql_dbg(ql_dbg_tgt, vha, 0xe05f, 5577 "qla_target: Unable to send command to target, sending TERM EXCHANGE for rsp\n"); 5578 qlt_send_term_exchange(rsp->qpair, NULL, 5579 atio, 1, 0); 5580 break; 5581 case -EBUSY: 5582 ql_dbg(ql_dbg_tgt, vha, 0xe060, 5583 "qla_target(%d): Unable to send command to target, sending BUSY status\n", 5584 vha->vp_idx); 5585 qlt_send_busy(rsp->qpair, atio, 5586 tc_sam_status); 5587 break; 5588 default: 5589 ql_dbg(ql_dbg_tgt, vha, 0xe060, 5590 "qla_target(%d): Unable to send command to target, sending BUSY status\n", 5591 vha->vp_idx); 5592 qlt_send_busy(rsp->qpair, atio, 5593 qla_sam_status); 5594 break; 5595 } 5596 } 5597 } 5598 break; 5599 5600 case CONTINUE_TGT_IO_TYPE: 5601 { 5602 struct ctio_to_2xxx *entry = (struct ctio_to_2xxx *)pkt; 5603 qlt_do_ctio_completion(vha, rsp, entry->handle, 5604 le16_to_cpu(entry->status)|(pkt->entry_status << 16), 5605 entry); 5606 break; 5607 } 5608 5609 case CTIO_A64_TYPE: 5610 { 5611 struct ctio_to_2xxx *entry = (struct ctio_to_2xxx *)pkt; 5612 qlt_do_ctio_completion(vha, rsp, entry->handle, 5613 le16_to_cpu(entry->status)|(pkt->entry_status << 16), 5614 entry); 5615 break; 5616 } 5617 5618 case IMMED_NOTIFY_TYPE: 5619 ql_dbg(ql_dbg_tgt, vha, 0xe035, "%s", "IMMED_NOTIFY\n"); 5620 qlt_handle_imm_notify(vha, (struct imm_ntfy_from_isp *)pkt); 5621 break; 5622 5623 case NOTIFY_ACK_TYPE: 5624 if (tgt->notify_ack_expected > 0) { 5625 struct nack_to_isp *entry = (struct nack_to_isp *)pkt; 5626 ql_dbg(ql_dbg_tgt, vha, 0xe036, 5627 "NOTIFY_ACK seq %08x status %x\n", 5628 le16_to_cpu(entry->u.isp2x.seq_id), 5629 le16_to_cpu(entry->u.isp2x.status)); 5630 tgt->notify_ack_expected--; 5631 if (entry->u.isp2x.status != 5632 cpu_to_le16(NOTIFY_ACK_SUCCESS)) { 5633 ql_dbg(ql_dbg_tgt, vha, 0xe061, 5634 "qla_target(%d): NOTIFY_ACK " 5635 "failed %x\n", vha->vp_idx, 5636 le16_to_cpu(entry->u.isp2x.status)); 5637 } 5638 } else { 5639 ql_dbg(ql_dbg_tgt, vha, 0xe062, 5640 "qla_target(%d): Unexpected NOTIFY_ACK received\n", 5641 vha->vp_idx); 5642 } 5643 break; 5644 5645 case ABTS_RECV_24XX: 5646 ql_dbg(ql_dbg_tgt, vha, 0xe037, 5647 "ABTS_RECV_24XX: instance %d\n", vha->vp_idx); 5648 qlt_24xx_handle_abts(vha, (struct abts_recv_from_24xx *)pkt); 5649 break; 5650 5651 case ABTS_RESP_24XX: 5652 if (tgt->abts_resp_expected > 0) { 5653 struct abts_resp_from_24xx_fw *entry = 5654 (struct abts_resp_from_24xx_fw *)pkt; 5655 ql_dbg(ql_dbg_tgt, vha, 0xe038, 5656 "ABTS_RESP_24XX: compl_status %x\n", 5657 entry->compl_status); 5658 tgt->abts_resp_expected--; 5659 if (le16_to_cpu(entry->compl_status) != 5660 ABTS_RESP_COMPL_SUCCESS) { 5661 if ((entry->error_subcode1 == 0x1E) && 5662 (entry->error_subcode2 == 0)) { 5663 /* 5664 * We've got a race here: aborted 5665 * exchange not terminated, i.e. 5666 * response for the aborted command was 5667 * sent between the abort request was 5668 * received and processed. 5669 * Unfortunately, the firmware has a 5670 * silly requirement that all aborted 5671 * exchanges must be explicitely 5672 * terminated, otherwise it refuses to 5673 * send responses for the abort 5674 * requests. So, we have to 5675 * (re)terminate the exchange and retry 5676 * the abort response. 5677 */ 5678 qlt_24xx_retry_term_exchange(vha, 5679 entry); 5680 } else 5681 ql_dbg(ql_dbg_tgt, vha, 0xe063, 5682 "qla_target(%d): ABTS_RESP_24XX " 5683 "failed %x (subcode %x:%x)", 5684 vha->vp_idx, entry->compl_status, 5685 entry->error_subcode1, 5686 entry->error_subcode2); 5687 } 5688 } else { 5689 ql_dbg(ql_dbg_tgt, vha, 0xe064, 5690 "qla_target(%d): Unexpected ABTS_RESP_24XX " 5691 "received\n", vha->vp_idx); 5692 } 5693 break; 5694 5695 default: 5696 ql_dbg(ql_dbg_tgt, vha, 0xe065, 5697 "qla_target(%d): Received unknown response pkt " 5698 "type %x\n", vha->vp_idx, pkt->entry_type); 5699 break; 5700 } 5701 5702 } 5703 5704 /* 5705 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire 5706 */ 5707 void qlt_async_event(uint16_t code, struct scsi_qla_host *vha, 5708 uint16_t *mailbox) 5709 { 5710 struct qla_hw_data *ha = vha->hw; 5711 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; 5712 int login_code; 5713 5714 if (!tgt || tgt->tgt_stop || tgt->tgt_stopped) 5715 return; 5716 5717 if (((code == MBA_POINT_TO_POINT) || (code == MBA_CHG_IN_CONNECTION)) && 5718 IS_QLA2100(ha)) 5719 return; 5720 /* 5721 * In tgt_stop mode we also should allow all requests to pass. 5722 * Otherwise, some commands can stuck. 5723 */ 5724 5725 5726 switch (code) { 5727 case MBA_RESET: /* Reset */ 5728 case MBA_SYSTEM_ERR: /* System Error */ 5729 case MBA_REQ_TRANSFER_ERR: /* Request Transfer Error */ 5730 case MBA_RSP_TRANSFER_ERR: /* Response Transfer Error */ 5731 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03a, 5732 "qla_target(%d): System error async event %#x " 5733 "occurred", vha->vp_idx, code); 5734 break; 5735 case MBA_WAKEUP_THRES: /* Request Queue Wake-up. */ 5736 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 5737 break; 5738 5739 case MBA_LOOP_UP: 5740 { 5741 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03b, 5742 "qla_target(%d): Async LOOP_UP occurred " 5743 "(m[0]=%x, m[1]=%x, m[2]=%x, m[3]=%x)", vha->vp_idx, 5744 le16_to_cpu(mailbox[0]), le16_to_cpu(mailbox[1]), 5745 le16_to_cpu(mailbox[2]), le16_to_cpu(mailbox[3])); 5746 if (tgt->link_reinit_iocb_pending) { 5747 qlt_send_notify_ack(ha->base_qpair, 5748 (void *)&tgt->link_reinit_iocb, 5749 0, 0, 0, 0, 0, 0); 5750 tgt->link_reinit_iocb_pending = 0; 5751 } 5752 break; 5753 } 5754 5755 case MBA_LIP_OCCURRED: 5756 case MBA_LOOP_DOWN: 5757 case MBA_LIP_RESET: 5758 case MBA_RSCN_UPDATE: 5759 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03c, 5760 "qla_target(%d): Async event %#x occurred " 5761 "(m[0]=%x, m[1]=%x, m[2]=%x, m[3]=%x)", vha->vp_idx, code, 5762 le16_to_cpu(mailbox[0]), le16_to_cpu(mailbox[1]), 5763 le16_to_cpu(mailbox[2]), le16_to_cpu(mailbox[3])); 5764 break; 5765 5766 case MBA_REJECTED_FCP_CMD: 5767 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf017, 5768 "qla_target(%d): Async event LS_REJECT occurred (m[0]=%x, m[1]=%x, m[2]=%x, m[3]=%x)", 5769 vha->vp_idx, 5770 le16_to_cpu(mailbox[0]), le16_to_cpu(mailbox[1]), 5771 le16_to_cpu(mailbox[2]), le16_to_cpu(mailbox[3])); 5772 5773 if (le16_to_cpu(mailbox[3]) == 1) { 5774 /* exchange starvation. */ 5775 vha->hw->exch_starvation++; 5776 if (vha->hw->exch_starvation > 5) { 5777 ql_log(ql_log_warn, vha, 0xd03a, 5778 "Exchange starvation-. Resetting RISC\n"); 5779 5780 vha->hw->exch_starvation = 0; 5781 if (IS_P3P_TYPE(vha->hw)) 5782 set_bit(FCOE_CTX_RESET_NEEDED, 5783 &vha->dpc_flags); 5784 else 5785 set_bit(ISP_ABORT_NEEDED, 5786 &vha->dpc_flags); 5787 qla2xxx_wake_dpc(vha); 5788 } 5789 } 5790 break; 5791 5792 case MBA_PORT_UPDATE: 5793 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03d, 5794 "qla_target(%d): Port update async event %#x " 5795 "occurred: updating the ports database (m[0]=%x, m[1]=%x, " 5796 "m[2]=%x, m[3]=%x)", vha->vp_idx, code, 5797 le16_to_cpu(mailbox[0]), le16_to_cpu(mailbox[1]), 5798 le16_to_cpu(mailbox[2]), le16_to_cpu(mailbox[3])); 5799 5800 login_code = le16_to_cpu(mailbox[2]); 5801 if (login_code == 0x4) { 5802 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03e, 5803 "Async MB 2: Got PLOGI Complete\n"); 5804 vha->hw->exch_starvation = 0; 5805 } else if (login_code == 0x7) 5806 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03f, 5807 "Async MB 2: Port Logged Out\n"); 5808 break; 5809 default: 5810 break; 5811 } 5812 5813 } 5814 5815 static fc_port_t *qlt_get_port_database(struct scsi_qla_host *vha, 5816 uint16_t loop_id) 5817 { 5818 fc_port_t *fcport, *tfcp, *del; 5819 int rc; 5820 unsigned long flags; 5821 u8 newfcport = 0; 5822 5823 fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL); 5824 if (!fcport) { 5825 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06f, 5826 "qla_target(%d): Allocation of tmp FC port failed", 5827 vha->vp_idx); 5828 return NULL; 5829 } 5830 5831 fcport->loop_id = loop_id; 5832 5833 rc = qla24xx_gpdb_wait(vha, fcport, 0); 5834 if (rc != QLA_SUCCESS) { 5835 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf070, 5836 "qla_target(%d): Failed to retrieve fcport " 5837 "information -- get_port_database() returned %x " 5838 "(loop_id=0x%04x)", vha->vp_idx, rc, loop_id); 5839 kfree(fcport); 5840 return NULL; 5841 } 5842 5843 del = NULL; 5844 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); 5845 tfcp = qla2x00_find_fcport_by_wwpn(vha, fcport->port_name, 1); 5846 5847 if (tfcp) { 5848 tfcp->d_id = fcport->d_id; 5849 tfcp->port_type = fcport->port_type; 5850 tfcp->supported_classes = fcport->supported_classes; 5851 tfcp->flags |= fcport->flags; 5852 tfcp->scan_state = QLA_FCPORT_FOUND; 5853 5854 del = fcport; 5855 fcport = tfcp; 5856 } else { 5857 if (vha->hw->current_topology == ISP_CFG_F) 5858 fcport->flags |= FCF_FABRIC_DEVICE; 5859 5860 list_add_tail(&fcport->list, &vha->vp_fcports); 5861 if (!IS_SW_RESV_ADDR(fcport->d_id)) 5862 vha->fcport_count++; 5863 fcport->login_gen++; 5864 fcport->disc_state = DSC_LOGIN_COMPLETE; 5865 fcport->login_succ = 1; 5866 newfcport = 1; 5867 } 5868 5869 fcport->deleted = 0; 5870 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); 5871 5872 switch (vha->host->active_mode) { 5873 case MODE_INITIATOR: 5874 case MODE_DUAL: 5875 if (newfcport) { 5876 if (!IS_IIDMA_CAPABLE(vha->hw) || !vha->hw->flags.gpsc_supported) { 5877 ql_dbg(ql_dbg_disc, vha, 0x20fe, 5878 "%s %d %8phC post upd_fcport fcp_cnt %d\n", 5879 __func__, __LINE__, fcport->port_name, vha->fcport_count); 5880 qla24xx_post_upd_fcport_work(vha, fcport); 5881 } else { 5882 ql_dbg(ql_dbg_disc, vha, 0x20ff, 5883 "%s %d %8phC post gpsc fcp_cnt %d\n", 5884 __func__, __LINE__, fcport->port_name, vha->fcport_count); 5885 qla24xx_post_gpsc_work(vha, fcport); 5886 } 5887 } 5888 break; 5889 5890 case MODE_TARGET: 5891 default: 5892 break; 5893 } 5894 if (del) 5895 qla2x00_free_fcport(del); 5896 5897 return fcport; 5898 } 5899 5900 /* Must be called under tgt_mutex */ 5901 static struct fc_port *qlt_make_local_sess(struct scsi_qla_host *vha, 5902 uint8_t *s_id) 5903 { 5904 struct fc_port *sess = NULL; 5905 fc_port_t *fcport = NULL; 5906 int rc, global_resets; 5907 uint16_t loop_id = 0; 5908 5909 if ((s_id[0] == 0xFF) && (s_id[1] == 0xFC)) { 5910 /* 5911 * This is Domain Controller, so it should be 5912 * OK to drop SCSI commands from it. 5913 */ 5914 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf042, 5915 "Unable to find initiator with S_ID %x:%x:%x", 5916 s_id[0], s_id[1], s_id[2]); 5917 return NULL; 5918 } 5919 5920 mutex_lock(&vha->vha_tgt.tgt_mutex); 5921 5922 retry: 5923 global_resets = 5924 atomic_read(&vha->vha_tgt.qla_tgt->tgt_global_resets_count); 5925 5926 rc = qla24xx_get_loop_id(vha, s_id, &loop_id); 5927 if (rc != 0) { 5928 mutex_unlock(&vha->vha_tgt.tgt_mutex); 5929 5930 ql_log(ql_log_info, vha, 0xf071, 5931 "qla_target(%d): Unable to find " 5932 "initiator with S_ID %x:%x:%x", 5933 vha->vp_idx, s_id[0], s_id[1], 5934 s_id[2]); 5935 5936 if (rc == -ENOENT) { 5937 qlt_port_logo_t logo; 5938 sid_to_portid(s_id, &logo.id); 5939 logo.cmd_count = 1; 5940 qlt_send_first_logo(vha, &logo); 5941 } 5942 5943 return NULL; 5944 } 5945 5946 fcport = qlt_get_port_database(vha, loop_id); 5947 if (!fcport) { 5948 mutex_unlock(&vha->vha_tgt.tgt_mutex); 5949 return NULL; 5950 } 5951 5952 if (global_resets != 5953 atomic_read(&vha->vha_tgt.qla_tgt->tgt_global_resets_count)) { 5954 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf043, 5955 "qla_target(%d): global reset during session discovery " 5956 "(counter was %d, new %d), retrying", vha->vp_idx, 5957 global_resets, 5958 atomic_read(&vha->vha_tgt. 5959 qla_tgt->tgt_global_resets_count)); 5960 goto retry; 5961 } 5962 5963 sess = qlt_create_sess(vha, fcport, true); 5964 5965 mutex_unlock(&vha->vha_tgt.tgt_mutex); 5966 5967 return sess; 5968 } 5969 5970 static void qlt_abort_work(struct qla_tgt *tgt, 5971 struct qla_tgt_sess_work_param *prm) 5972 { 5973 struct scsi_qla_host *vha = tgt->vha; 5974 struct qla_hw_data *ha = vha->hw; 5975 struct fc_port *sess = NULL; 5976 unsigned long flags = 0, flags2 = 0; 5977 uint32_t be_s_id; 5978 uint8_t s_id[3]; 5979 int rc; 5980 5981 spin_lock_irqsave(&ha->tgt.sess_lock, flags2); 5982 5983 if (tgt->tgt_stop) 5984 goto out_term2; 5985 5986 s_id[0] = prm->abts.fcp_hdr_le.s_id[2]; 5987 s_id[1] = prm->abts.fcp_hdr_le.s_id[1]; 5988 s_id[2] = prm->abts.fcp_hdr_le.s_id[0]; 5989 5990 sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, 5991 (unsigned char *)&be_s_id); 5992 if (!sess) { 5993 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags2); 5994 5995 sess = qlt_make_local_sess(vha, s_id); 5996 /* sess has got an extra creation ref */ 5997 5998 spin_lock_irqsave(&ha->tgt.sess_lock, flags2); 5999 if (!sess) 6000 goto out_term2; 6001 } else { 6002 if (sess->deleted) { 6003 sess = NULL; 6004 goto out_term2; 6005 } 6006 6007 if (!kref_get_unless_zero(&sess->sess_kref)) { 6008 ql_dbg(ql_dbg_tgt_tmr, vha, 0xf01c, 6009 "%s: kref_get fail %8phC \n", 6010 __func__, sess->port_name); 6011 sess = NULL; 6012 goto out_term2; 6013 } 6014 } 6015 6016 rc = __qlt_24xx_handle_abts(vha, &prm->abts, sess); 6017 ha->tgt.tgt_ops->put_sess(sess); 6018 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags2); 6019 6020 if (rc != 0) 6021 goto out_term; 6022 return; 6023 6024 out_term2: 6025 if (sess) 6026 ha->tgt.tgt_ops->put_sess(sess); 6027 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags2); 6028 6029 out_term: 6030 spin_lock_irqsave(&ha->hardware_lock, flags); 6031 qlt_24xx_send_abts_resp(ha->base_qpair, &prm->abts, 6032 FCP_TMF_REJECTED, false); 6033 spin_unlock_irqrestore(&ha->hardware_lock, flags); 6034 } 6035 6036 static void qlt_tmr_work(struct qla_tgt *tgt, 6037 struct qla_tgt_sess_work_param *prm) 6038 { 6039 struct atio_from_isp *a = &prm->tm_iocb2; 6040 struct scsi_qla_host *vha = tgt->vha; 6041 struct qla_hw_data *ha = vha->hw; 6042 struct fc_port *sess = NULL; 6043 unsigned long flags; 6044 uint8_t *s_id = NULL; /* to hide compiler warnings */ 6045 int rc; 6046 u64 unpacked_lun; 6047 int fn; 6048 void *iocb; 6049 6050 spin_lock_irqsave(&ha->tgt.sess_lock, flags); 6051 6052 if (tgt->tgt_stop) 6053 goto out_term2; 6054 6055 s_id = prm->tm_iocb2.u.isp24.fcp_hdr.s_id; 6056 sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, s_id); 6057 if (!sess) { 6058 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); 6059 6060 sess = qlt_make_local_sess(vha, s_id); 6061 /* sess has got an extra creation ref */ 6062 6063 spin_lock_irqsave(&ha->tgt.sess_lock, flags); 6064 if (!sess) 6065 goto out_term2; 6066 } else { 6067 if (sess->deleted) { 6068 sess = NULL; 6069 goto out_term2; 6070 } 6071 6072 if (!kref_get_unless_zero(&sess->sess_kref)) { 6073 ql_dbg(ql_dbg_tgt_tmr, vha, 0xf020, 6074 "%s: kref_get fail %8phC\n", 6075 __func__, sess->port_name); 6076 sess = NULL; 6077 goto out_term2; 6078 } 6079 } 6080 6081 iocb = a; 6082 fn = a->u.isp24.fcp_cmnd.task_mgmt_flags; 6083 unpacked_lun = 6084 scsilun_to_int((struct scsi_lun *)&a->u.isp24.fcp_cmnd.lun); 6085 6086 rc = qlt_issue_task_mgmt(sess, unpacked_lun, fn, iocb, 0); 6087 ha->tgt.tgt_ops->put_sess(sess); 6088 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); 6089 6090 if (rc != 0) 6091 goto out_term; 6092 return; 6093 6094 out_term2: 6095 if (sess) 6096 ha->tgt.tgt_ops->put_sess(sess); 6097 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); 6098 out_term: 6099 qlt_send_term_exchange(ha->base_qpair, NULL, &prm->tm_iocb2, 1, 0); 6100 } 6101 6102 static void qlt_sess_work_fn(struct work_struct *work) 6103 { 6104 struct qla_tgt *tgt = container_of(work, struct qla_tgt, sess_work); 6105 struct scsi_qla_host *vha = tgt->vha; 6106 unsigned long flags; 6107 6108 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf000, "Sess work (tgt %p)", tgt); 6109 6110 spin_lock_irqsave(&tgt->sess_work_lock, flags); 6111 while (!list_empty(&tgt->sess_works_list)) { 6112 struct qla_tgt_sess_work_param *prm = list_entry( 6113 tgt->sess_works_list.next, typeof(*prm), 6114 sess_works_list_entry); 6115 6116 /* 6117 * This work can be scheduled on several CPUs at time, so we 6118 * must delete the entry to eliminate double processing 6119 */ 6120 list_del(&prm->sess_works_list_entry); 6121 6122 spin_unlock_irqrestore(&tgt->sess_work_lock, flags); 6123 6124 switch (prm->type) { 6125 case QLA_TGT_SESS_WORK_ABORT: 6126 qlt_abort_work(tgt, prm); 6127 break; 6128 case QLA_TGT_SESS_WORK_TM: 6129 qlt_tmr_work(tgt, prm); 6130 break; 6131 default: 6132 BUG_ON(1); 6133 break; 6134 } 6135 6136 spin_lock_irqsave(&tgt->sess_work_lock, flags); 6137 6138 kfree(prm); 6139 } 6140 spin_unlock_irqrestore(&tgt->sess_work_lock, flags); 6141 } 6142 6143 /* Must be called under tgt_host_action_mutex */ 6144 int qlt_add_target(struct qla_hw_data *ha, struct scsi_qla_host *base_vha) 6145 { 6146 struct qla_tgt *tgt; 6147 int rc, i; 6148 struct qla_qpair_hint *h; 6149 6150 if (!QLA_TGT_MODE_ENABLED()) 6151 return 0; 6152 6153 if (!IS_TGT_MODE_CAPABLE(ha)) { 6154 ql_log(ql_log_warn, base_vha, 0xe070, 6155 "This adapter does not support target mode.\n"); 6156 return 0; 6157 } 6158 6159 ql_dbg(ql_dbg_tgt, base_vha, 0xe03b, 6160 "Registering target for host %ld(%p).\n", base_vha->host_no, ha); 6161 6162 BUG_ON(base_vha->vha_tgt.qla_tgt != NULL); 6163 6164 tgt = kzalloc(sizeof(struct qla_tgt), GFP_KERNEL); 6165 if (!tgt) { 6166 ql_dbg(ql_dbg_tgt, base_vha, 0xe066, 6167 "Unable to allocate struct qla_tgt\n"); 6168 return -ENOMEM; 6169 } 6170 6171 tgt->qphints = kzalloc((ha->max_qpairs + 1) * 6172 sizeof(struct qla_qpair_hint), GFP_KERNEL); 6173 if (!tgt->qphints) { 6174 kfree(tgt); 6175 ql_log(ql_log_warn, base_vha, 0x0197, 6176 "Unable to allocate qpair hints.\n"); 6177 return -ENOMEM; 6178 } 6179 6180 if (!(base_vha->host->hostt->supported_mode & MODE_TARGET)) 6181 base_vha->host->hostt->supported_mode |= MODE_TARGET; 6182 6183 rc = btree_init64(&tgt->lun_qpair_map); 6184 if (rc) { 6185 kfree(tgt->qphints); 6186 kfree(tgt); 6187 ql_log(ql_log_info, base_vha, 0x0198, 6188 "Unable to initialize lun_qpair_map btree\n"); 6189 return -EIO; 6190 } 6191 h = &tgt->qphints[0]; 6192 h->qpair = ha->base_qpair; 6193 INIT_LIST_HEAD(&h->hint_elem); 6194 h->cpuid = ha->base_qpair->cpuid; 6195 list_add_tail(&h->hint_elem, &ha->base_qpair->hints_list); 6196 6197 for (i = 0; i < ha->max_qpairs; i++) { 6198 unsigned long flags; 6199 6200 struct qla_qpair *qpair = ha->queue_pair_map[i]; 6201 h = &tgt->qphints[i + 1]; 6202 INIT_LIST_HEAD(&h->hint_elem); 6203 if (qpair) { 6204 h->qpair = qpair; 6205 spin_lock_irqsave(qpair->qp_lock_ptr, flags); 6206 list_add_tail(&h->hint_elem, &qpair->hints_list); 6207 spin_unlock_irqrestore(qpair->qp_lock_ptr, flags); 6208 h->cpuid = qpair->cpuid; 6209 } 6210 } 6211 6212 tgt->ha = ha; 6213 tgt->vha = base_vha; 6214 init_waitqueue_head(&tgt->waitQ); 6215 INIT_LIST_HEAD(&tgt->del_sess_list); 6216 spin_lock_init(&tgt->sess_work_lock); 6217 INIT_WORK(&tgt->sess_work, qlt_sess_work_fn); 6218 INIT_LIST_HEAD(&tgt->sess_works_list); 6219 atomic_set(&tgt->tgt_global_resets_count, 0); 6220 6221 base_vha->vha_tgt.qla_tgt = tgt; 6222 6223 ql_dbg(ql_dbg_tgt, base_vha, 0xe067, 6224 "qla_target(%d): using 64 Bit PCI addressing", 6225 base_vha->vp_idx); 6226 /* 3 is reserved */ 6227 tgt->sg_tablesize = QLA_TGT_MAX_SG_24XX(base_vha->req->length - 3); 6228 6229 mutex_lock(&qla_tgt_mutex); 6230 list_add_tail(&tgt->tgt_list_entry, &qla_tgt_glist); 6231 mutex_unlock(&qla_tgt_mutex); 6232 6233 if (ha->tgt.tgt_ops && ha->tgt.tgt_ops->add_target) 6234 ha->tgt.tgt_ops->add_target(base_vha); 6235 6236 return 0; 6237 } 6238 6239 /* Must be called under tgt_host_action_mutex */ 6240 int qlt_remove_target(struct qla_hw_data *ha, struct scsi_qla_host *vha) 6241 { 6242 if (!vha->vha_tgt.qla_tgt) 6243 return 0; 6244 6245 if (vha->fc_vport) { 6246 qlt_release(vha->vha_tgt.qla_tgt); 6247 return 0; 6248 } 6249 6250 /* free left over qfull cmds */ 6251 qlt_init_term_exchange(vha); 6252 6253 ql_dbg(ql_dbg_tgt, vha, 0xe03c, "Unregistering target for host %ld(%p)", 6254 vha->host_no, ha); 6255 qlt_release(vha->vha_tgt.qla_tgt); 6256 6257 return 0; 6258 } 6259 6260 void qlt_remove_target_resources(struct qla_hw_data *ha) 6261 { 6262 struct scsi_qla_host *node; 6263 u32 key = 0; 6264 6265 btree_for_each_safe32(&ha->tgt.host_map, key, node) 6266 btree_remove32(&ha->tgt.host_map, key); 6267 6268 btree_destroy32(&ha->tgt.host_map); 6269 } 6270 6271 static void qlt_lport_dump(struct scsi_qla_host *vha, u64 wwpn, 6272 unsigned char *b) 6273 { 6274 int i; 6275 6276 pr_debug("qla2xxx HW vha->node_name: "); 6277 for (i = 0; i < WWN_SIZE; i++) 6278 pr_debug("%02x ", vha->node_name[i]); 6279 pr_debug("\n"); 6280 pr_debug("qla2xxx HW vha->port_name: "); 6281 for (i = 0; i < WWN_SIZE; i++) 6282 pr_debug("%02x ", vha->port_name[i]); 6283 pr_debug("\n"); 6284 6285 pr_debug("qla2xxx passed configfs WWPN: "); 6286 put_unaligned_be64(wwpn, b); 6287 for (i = 0; i < WWN_SIZE; i++) 6288 pr_debug("%02x ", b[i]); 6289 pr_debug("\n"); 6290 } 6291 6292 /** 6293 * qla_tgt_lport_register - register lport with external module 6294 * 6295 * @qla_tgt_ops: Pointer for tcm_qla2xxx qla_tgt_ops 6296 * @wwpn: Passwd FC target WWPN 6297 * @callback: lport initialization callback for tcm_qla2xxx code 6298 * @target_lport_ptr: pointer for tcm_qla2xxx specific lport data 6299 */ 6300 int qlt_lport_register(void *target_lport_ptr, u64 phys_wwpn, 6301 u64 npiv_wwpn, u64 npiv_wwnn, 6302 int (*callback)(struct scsi_qla_host *, void *, u64, u64)) 6303 { 6304 struct qla_tgt *tgt; 6305 struct scsi_qla_host *vha; 6306 struct qla_hw_data *ha; 6307 struct Scsi_Host *host; 6308 unsigned long flags; 6309 int rc; 6310 u8 b[WWN_SIZE]; 6311 6312 mutex_lock(&qla_tgt_mutex); 6313 list_for_each_entry(tgt, &qla_tgt_glist, tgt_list_entry) { 6314 vha = tgt->vha; 6315 ha = vha->hw; 6316 6317 host = vha->host; 6318 if (!host) 6319 continue; 6320 6321 if (!(host->hostt->supported_mode & MODE_TARGET)) 6322 continue; 6323 6324 spin_lock_irqsave(&ha->hardware_lock, flags); 6325 if ((!npiv_wwpn || !npiv_wwnn) && host->active_mode & MODE_TARGET) { 6326 pr_debug("MODE_TARGET already active on qla2xxx(%d)\n", 6327 host->host_no); 6328 spin_unlock_irqrestore(&ha->hardware_lock, flags); 6329 continue; 6330 } 6331 if (tgt->tgt_stop) { 6332 pr_debug("MODE_TARGET in shutdown on qla2xxx(%d)\n", 6333 host->host_no); 6334 spin_unlock_irqrestore(&ha->hardware_lock, flags); 6335 continue; 6336 } 6337 spin_unlock_irqrestore(&ha->hardware_lock, flags); 6338 6339 if (!scsi_host_get(host)) { 6340 ql_dbg(ql_dbg_tgt, vha, 0xe068, 6341 "Unable to scsi_host_get() for" 6342 " qla2xxx scsi_host\n"); 6343 continue; 6344 } 6345 qlt_lport_dump(vha, phys_wwpn, b); 6346 6347 if (memcmp(vha->port_name, b, WWN_SIZE)) { 6348 scsi_host_put(host); 6349 continue; 6350 } 6351 rc = (*callback)(vha, target_lport_ptr, npiv_wwpn, npiv_wwnn); 6352 if (rc != 0) 6353 scsi_host_put(host); 6354 6355 mutex_unlock(&qla_tgt_mutex); 6356 return rc; 6357 } 6358 mutex_unlock(&qla_tgt_mutex); 6359 6360 return -ENODEV; 6361 } 6362 EXPORT_SYMBOL(qlt_lport_register); 6363 6364 /** 6365 * qla_tgt_lport_deregister - Degister lport 6366 * 6367 * @vha: Registered scsi_qla_host pointer 6368 */ 6369 void qlt_lport_deregister(struct scsi_qla_host *vha) 6370 { 6371 struct qla_hw_data *ha = vha->hw; 6372 struct Scsi_Host *sh = vha->host; 6373 /* 6374 * Clear the target_lport_ptr qla_target_template pointer in qla_hw_data 6375 */ 6376 vha->vha_tgt.target_lport_ptr = NULL; 6377 ha->tgt.tgt_ops = NULL; 6378 /* 6379 * Release the Scsi_Host reference for the underlying qla2xxx host 6380 */ 6381 scsi_host_put(sh); 6382 } 6383 EXPORT_SYMBOL(qlt_lport_deregister); 6384 6385 /* Must be called under HW lock */ 6386 static void qlt_set_mode(struct scsi_qla_host *vha) 6387 { 6388 switch (ql2x_ini_mode) { 6389 case QLA2XXX_INI_MODE_DISABLED: 6390 case QLA2XXX_INI_MODE_EXCLUSIVE: 6391 vha->host->active_mode = MODE_TARGET; 6392 break; 6393 case QLA2XXX_INI_MODE_ENABLED: 6394 vha->host->active_mode = MODE_UNKNOWN; 6395 break; 6396 case QLA2XXX_INI_MODE_DUAL: 6397 vha->host->active_mode = MODE_DUAL; 6398 break; 6399 default: 6400 break; 6401 } 6402 } 6403 6404 /* Must be called under HW lock */ 6405 static void qlt_clear_mode(struct scsi_qla_host *vha) 6406 { 6407 switch (ql2x_ini_mode) { 6408 case QLA2XXX_INI_MODE_DISABLED: 6409 vha->host->active_mode = MODE_UNKNOWN; 6410 break; 6411 case QLA2XXX_INI_MODE_EXCLUSIVE: 6412 vha->host->active_mode = MODE_INITIATOR; 6413 break; 6414 case QLA2XXX_INI_MODE_ENABLED: 6415 case QLA2XXX_INI_MODE_DUAL: 6416 vha->host->active_mode = MODE_INITIATOR; 6417 break; 6418 default: 6419 break; 6420 } 6421 } 6422 6423 /* 6424 * qla_tgt_enable_vha - NO LOCK HELD 6425 * 6426 * host_reset, bring up w/ Target Mode Enabled 6427 */ 6428 void 6429 qlt_enable_vha(struct scsi_qla_host *vha) 6430 { 6431 struct qla_hw_data *ha = vha->hw; 6432 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; 6433 unsigned long flags; 6434 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev); 6435 6436 if (!tgt) { 6437 ql_dbg(ql_dbg_tgt, vha, 0xe069, 6438 "Unable to locate qla_tgt pointer from" 6439 " struct qla_hw_data\n"); 6440 dump_stack(); 6441 return; 6442 } 6443 6444 spin_lock_irqsave(&ha->hardware_lock, flags); 6445 tgt->tgt_stopped = 0; 6446 qlt_set_mode(vha); 6447 spin_unlock_irqrestore(&ha->hardware_lock, flags); 6448 6449 if (vha->vp_idx) { 6450 qla24xx_disable_vp(vha); 6451 qla24xx_enable_vp(vha); 6452 } else { 6453 set_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags); 6454 qla2xxx_wake_dpc(base_vha); 6455 qla2x00_wait_for_hba_online(base_vha); 6456 } 6457 } 6458 EXPORT_SYMBOL(qlt_enable_vha); 6459 6460 /* 6461 * qla_tgt_disable_vha - NO LOCK HELD 6462 * 6463 * Disable Target Mode and reset the adapter 6464 */ 6465 static void qlt_disable_vha(struct scsi_qla_host *vha) 6466 { 6467 struct qla_hw_data *ha = vha->hw; 6468 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; 6469 unsigned long flags; 6470 6471 if (!tgt) { 6472 ql_dbg(ql_dbg_tgt, vha, 0xe06a, 6473 "Unable to locate qla_tgt pointer from" 6474 " struct qla_hw_data\n"); 6475 dump_stack(); 6476 return; 6477 } 6478 6479 spin_lock_irqsave(&ha->hardware_lock, flags); 6480 qlt_clear_mode(vha); 6481 spin_unlock_irqrestore(&ha->hardware_lock, flags); 6482 6483 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 6484 qla2xxx_wake_dpc(vha); 6485 qla2x00_wait_for_hba_online(vha); 6486 } 6487 6488 /* 6489 * Called from qla_init.c:qla24xx_vport_create() contex to setup 6490 * the target mode specific struct scsi_qla_host and struct qla_hw_data 6491 * members. 6492 */ 6493 void 6494 qlt_vport_create(struct scsi_qla_host *vha, struct qla_hw_data *ha) 6495 { 6496 vha->vha_tgt.qla_tgt = NULL; 6497 6498 mutex_init(&vha->vha_tgt.tgt_mutex); 6499 mutex_init(&vha->vha_tgt.tgt_host_action_mutex); 6500 6501 qlt_clear_mode(vha); 6502 6503 /* 6504 * NOTE: Currently the value is kept the same for <24xx and 6505 * >=24xx ISPs. If it is necessary to change it, 6506 * the check should be added for specific ISPs, 6507 * assigning the value appropriately. 6508 */ 6509 ha->tgt.atio_q_length = ATIO_ENTRY_CNT_24XX; 6510 6511 qlt_add_target(ha, vha); 6512 } 6513 6514 u8 6515 qlt_rff_id(struct scsi_qla_host *vha) 6516 { 6517 u8 fc4_feature = 0; 6518 /* 6519 * FC-4 Feature bit 0 indicates target functionality to the name server. 6520 */ 6521 if (qla_tgt_mode_enabled(vha)) { 6522 fc4_feature = BIT_0; 6523 } else if (qla_ini_mode_enabled(vha)) { 6524 fc4_feature = BIT_1; 6525 } else if (qla_dual_mode_enabled(vha)) 6526 fc4_feature = BIT_0 | BIT_1; 6527 6528 return fc4_feature; 6529 } 6530 6531 /* 6532 * qlt_init_atio_q_entries() - Initializes ATIO queue entries. 6533 * @ha: HA context 6534 * 6535 * Beginning of ATIO ring has initialization control block already built 6536 * by nvram config routine. 6537 * 6538 * Returns 0 on success. 6539 */ 6540 void 6541 qlt_init_atio_q_entries(struct scsi_qla_host *vha) 6542 { 6543 struct qla_hw_data *ha = vha->hw; 6544 uint16_t cnt; 6545 struct atio_from_isp *pkt = (struct atio_from_isp *)ha->tgt.atio_ring; 6546 6547 if (qla_ini_mode_enabled(vha)) 6548 return; 6549 6550 for (cnt = 0; cnt < ha->tgt.atio_q_length; cnt++) { 6551 pkt->u.raw.signature = ATIO_PROCESSED; 6552 pkt++; 6553 } 6554 6555 } 6556 6557 /* 6558 * qlt_24xx_process_atio_queue() - Process ATIO queue entries. 6559 * @ha: SCSI driver HA context 6560 */ 6561 void 6562 qlt_24xx_process_atio_queue(struct scsi_qla_host *vha, uint8_t ha_locked) 6563 { 6564 struct qla_hw_data *ha = vha->hw; 6565 struct atio_from_isp *pkt; 6566 int cnt, i; 6567 6568 if (!ha->flags.fw_started) 6569 return; 6570 6571 while ((ha->tgt.atio_ring_ptr->signature != ATIO_PROCESSED) || 6572 fcpcmd_is_corrupted(ha->tgt.atio_ring_ptr)) { 6573 pkt = (struct atio_from_isp *)ha->tgt.atio_ring_ptr; 6574 cnt = pkt->u.raw.entry_count; 6575 6576 if (unlikely(fcpcmd_is_corrupted(ha->tgt.atio_ring_ptr))) { 6577 /* 6578 * This packet is corrupted. The header + payload 6579 * can not be trusted. There is no point in passing 6580 * it further up. 6581 */ 6582 ql_log(ql_log_warn, vha, 0xd03c, 6583 "corrupted fcp frame SID[%3phN] OXID[%04x] EXCG[%x] %64phN\n", 6584 pkt->u.isp24.fcp_hdr.s_id, 6585 be16_to_cpu(pkt->u.isp24.fcp_hdr.ox_id), 6586 le32_to_cpu(pkt->u.isp24.exchange_addr), pkt); 6587 6588 adjust_corrupted_atio(pkt); 6589 qlt_send_term_exchange(ha->base_qpair, NULL, pkt, 6590 ha_locked, 0); 6591 } else { 6592 qlt_24xx_atio_pkt_all_vps(vha, 6593 (struct atio_from_isp *)pkt, ha_locked); 6594 } 6595 6596 for (i = 0; i < cnt; i++) { 6597 ha->tgt.atio_ring_index++; 6598 if (ha->tgt.atio_ring_index == ha->tgt.atio_q_length) { 6599 ha->tgt.atio_ring_index = 0; 6600 ha->tgt.atio_ring_ptr = ha->tgt.atio_ring; 6601 } else 6602 ha->tgt.atio_ring_ptr++; 6603 6604 pkt->u.raw.signature = ATIO_PROCESSED; 6605 pkt = (struct atio_from_isp *)ha->tgt.atio_ring_ptr; 6606 } 6607 wmb(); 6608 } 6609 6610 /* Adjust ring index */ 6611 WRT_REG_DWORD(ISP_ATIO_Q_OUT(vha), ha->tgt.atio_ring_index); 6612 } 6613 6614 void 6615 qlt_24xx_config_rings(struct scsi_qla_host *vha) 6616 { 6617 struct qla_hw_data *ha = vha->hw; 6618 struct qla_msix_entry *msix = &ha->msix_entries[2]; 6619 struct init_cb_24xx *icb = (struct init_cb_24xx *)ha->init_cb; 6620 6621 if (!QLA_TGT_MODE_ENABLED()) 6622 return; 6623 6624 WRT_REG_DWORD(ISP_ATIO_Q_IN(vha), 0); 6625 WRT_REG_DWORD(ISP_ATIO_Q_OUT(vha), 0); 6626 RD_REG_DWORD(ISP_ATIO_Q_OUT(vha)); 6627 6628 if (ha->flags.msix_enabled) { 6629 if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) { 6630 if (IS_QLA2071(ha)) { 6631 /* 4 ports Baker: Enable Interrupt Handshake */ 6632 icb->msix_atio = 0; 6633 icb->firmware_options_2 |= BIT_26; 6634 } else { 6635 icb->msix_atio = cpu_to_le16(msix->entry); 6636 icb->firmware_options_2 &= ~BIT_26; 6637 } 6638 ql_dbg(ql_dbg_init, vha, 0xf072, 6639 "Registering ICB vector 0x%x for atio que.\n", 6640 msix->entry); 6641 } 6642 } else { 6643 /* INTx|MSI */ 6644 if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) { 6645 icb->msix_atio = 0; 6646 icb->firmware_options_2 |= BIT_26; 6647 ql_dbg(ql_dbg_init, vha, 0xf072, 6648 "%s: Use INTx for ATIOQ.\n", __func__); 6649 } 6650 } 6651 } 6652 6653 void 6654 qlt_24xx_config_nvram_stage1(struct scsi_qla_host *vha, struct nvram_24xx *nv) 6655 { 6656 struct qla_hw_data *ha = vha->hw; 6657 u32 tmp; 6658 6659 if (!QLA_TGT_MODE_ENABLED()) 6660 return; 6661 6662 if (qla_tgt_mode_enabled(vha) || qla_dual_mode_enabled(vha)) { 6663 if (!ha->tgt.saved_set) { 6664 /* We save only once */ 6665 ha->tgt.saved_exchange_count = nv->exchange_count; 6666 ha->tgt.saved_firmware_options_1 = 6667 nv->firmware_options_1; 6668 ha->tgt.saved_firmware_options_2 = 6669 nv->firmware_options_2; 6670 ha->tgt.saved_firmware_options_3 = 6671 nv->firmware_options_3; 6672 ha->tgt.saved_set = 1; 6673 } 6674 6675 if (qla_tgt_mode_enabled(vha)) 6676 nv->exchange_count = cpu_to_le16(0xFFFF); 6677 else /* dual */ 6678 nv->exchange_count = cpu_to_le16(ql2xexchoffld); 6679 6680 /* Enable target mode */ 6681 nv->firmware_options_1 |= cpu_to_le32(BIT_4); 6682 6683 /* Disable ini mode, if requested */ 6684 if (qla_tgt_mode_enabled(vha)) 6685 nv->firmware_options_1 |= cpu_to_le32(BIT_5); 6686 6687 /* Disable Full Login after LIP */ 6688 nv->firmware_options_1 &= cpu_to_le32(~BIT_13); 6689 /* Enable initial LIP */ 6690 nv->firmware_options_1 &= cpu_to_le32(~BIT_9); 6691 if (ql2xtgt_tape_enable) 6692 /* Enable FC Tape support */ 6693 nv->firmware_options_2 |= cpu_to_le32(BIT_12); 6694 else 6695 /* Disable FC Tape support */ 6696 nv->firmware_options_2 &= cpu_to_le32(~BIT_12); 6697 6698 /* Disable Full Login after LIP */ 6699 nv->host_p &= cpu_to_le32(~BIT_10); 6700 6701 /* 6702 * clear BIT 15 explicitly as we have seen at least 6703 * a couple of instances where this was set and this 6704 * was causing the firmware to not be initialized. 6705 */ 6706 nv->firmware_options_1 &= cpu_to_le32(~BIT_15); 6707 /* Enable target PRLI control */ 6708 nv->firmware_options_2 |= cpu_to_le32(BIT_14); 6709 6710 if (IS_QLA25XX(ha)) { 6711 /* Change Loop-prefer to Pt-Pt */ 6712 tmp = ~(BIT_4|BIT_5|BIT_6); 6713 nv->firmware_options_2 &= cpu_to_le32(tmp); 6714 tmp = P2P << 4; 6715 nv->firmware_options_2 |= cpu_to_le32(tmp); 6716 } 6717 } else { 6718 if (ha->tgt.saved_set) { 6719 nv->exchange_count = ha->tgt.saved_exchange_count; 6720 nv->firmware_options_1 = 6721 ha->tgt.saved_firmware_options_1; 6722 nv->firmware_options_2 = 6723 ha->tgt.saved_firmware_options_2; 6724 nv->firmware_options_3 = 6725 ha->tgt.saved_firmware_options_3; 6726 } 6727 return; 6728 } 6729 6730 if (ha->base_qpair->enable_class_2) { 6731 if (vha->flags.init_done) 6732 fc_host_supported_classes(vha->host) = 6733 FC_COS_CLASS2 | FC_COS_CLASS3; 6734 6735 nv->firmware_options_2 |= cpu_to_le32(BIT_8); 6736 } else { 6737 if (vha->flags.init_done) 6738 fc_host_supported_classes(vha->host) = FC_COS_CLASS3; 6739 6740 nv->firmware_options_2 &= ~cpu_to_le32(BIT_8); 6741 } 6742 } 6743 6744 void 6745 qlt_24xx_config_nvram_stage2(struct scsi_qla_host *vha, 6746 struct init_cb_24xx *icb) 6747 { 6748 struct qla_hw_data *ha = vha->hw; 6749 6750 if (!QLA_TGT_MODE_ENABLED()) 6751 return; 6752 6753 if (ha->tgt.node_name_set) { 6754 memcpy(icb->node_name, ha->tgt.tgt_node_name, WWN_SIZE); 6755 icb->firmware_options_1 |= cpu_to_le32(BIT_14); 6756 } 6757 6758 /* disable ZIO at start time. */ 6759 if (!vha->flags.init_done) { 6760 uint32_t tmp; 6761 tmp = le32_to_cpu(icb->firmware_options_2); 6762 tmp &= ~(BIT_3 | BIT_2 | BIT_1 | BIT_0); 6763 icb->firmware_options_2 = cpu_to_le32(tmp); 6764 } 6765 } 6766 6767 void 6768 qlt_81xx_config_nvram_stage1(struct scsi_qla_host *vha, struct nvram_81xx *nv) 6769 { 6770 struct qla_hw_data *ha = vha->hw; 6771 u32 tmp; 6772 6773 if (!QLA_TGT_MODE_ENABLED()) 6774 return; 6775 6776 if (qla_tgt_mode_enabled(vha) || qla_dual_mode_enabled(vha)) { 6777 if (!ha->tgt.saved_set) { 6778 /* We save only once */ 6779 ha->tgt.saved_exchange_count = nv->exchange_count; 6780 ha->tgt.saved_firmware_options_1 = 6781 nv->firmware_options_1; 6782 ha->tgt.saved_firmware_options_2 = 6783 nv->firmware_options_2; 6784 ha->tgt.saved_firmware_options_3 = 6785 nv->firmware_options_3; 6786 ha->tgt.saved_set = 1; 6787 } 6788 6789 if (qla_tgt_mode_enabled(vha)) 6790 nv->exchange_count = cpu_to_le16(0xFFFF); 6791 else /* dual */ 6792 nv->exchange_count = cpu_to_le16(ql2xexchoffld); 6793 6794 /* Enable target mode */ 6795 nv->firmware_options_1 |= cpu_to_le32(BIT_4); 6796 6797 /* Disable ini mode, if requested */ 6798 if (qla_tgt_mode_enabled(vha)) 6799 nv->firmware_options_1 |= cpu_to_le32(BIT_5); 6800 /* Disable Full Login after LIP */ 6801 nv->firmware_options_1 &= cpu_to_le32(~BIT_13); 6802 /* Enable initial LIP */ 6803 nv->firmware_options_1 &= cpu_to_le32(~BIT_9); 6804 /* 6805 * clear BIT 15 explicitly as we have seen at 6806 * least a couple of instances where this was set 6807 * and this was causing the firmware to not be 6808 * initialized. 6809 */ 6810 nv->firmware_options_1 &= cpu_to_le32(~BIT_15); 6811 if (ql2xtgt_tape_enable) 6812 /* Enable FC tape support */ 6813 nv->firmware_options_2 |= cpu_to_le32(BIT_12); 6814 else 6815 /* Disable FC tape support */ 6816 nv->firmware_options_2 &= cpu_to_le32(~BIT_12); 6817 6818 /* Disable Full Login after LIP */ 6819 nv->host_p &= cpu_to_le32(~BIT_10); 6820 /* Enable target PRLI control */ 6821 nv->firmware_options_2 |= cpu_to_le32(BIT_14); 6822 6823 /* Change Loop-prefer to Pt-Pt */ 6824 tmp = ~(BIT_4|BIT_5|BIT_6); 6825 nv->firmware_options_2 &= cpu_to_le32(tmp); 6826 tmp = P2P << 4; 6827 nv->firmware_options_2 |= cpu_to_le32(tmp); 6828 } else { 6829 if (ha->tgt.saved_set) { 6830 nv->exchange_count = ha->tgt.saved_exchange_count; 6831 nv->firmware_options_1 = 6832 ha->tgt.saved_firmware_options_1; 6833 nv->firmware_options_2 = 6834 ha->tgt.saved_firmware_options_2; 6835 nv->firmware_options_3 = 6836 ha->tgt.saved_firmware_options_3; 6837 } 6838 return; 6839 } 6840 6841 if (ha->base_qpair->enable_class_2) { 6842 if (vha->flags.init_done) 6843 fc_host_supported_classes(vha->host) = 6844 FC_COS_CLASS2 | FC_COS_CLASS3; 6845 6846 nv->firmware_options_2 |= cpu_to_le32(BIT_8); 6847 } else { 6848 if (vha->flags.init_done) 6849 fc_host_supported_classes(vha->host) = FC_COS_CLASS3; 6850 6851 nv->firmware_options_2 &= ~cpu_to_le32(BIT_8); 6852 } 6853 } 6854 6855 void 6856 qlt_81xx_config_nvram_stage2(struct scsi_qla_host *vha, 6857 struct init_cb_81xx *icb) 6858 { 6859 struct qla_hw_data *ha = vha->hw; 6860 6861 if (!QLA_TGT_MODE_ENABLED()) 6862 return; 6863 6864 if (ha->tgt.node_name_set) { 6865 memcpy(icb->node_name, ha->tgt.tgt_node_name, WWN_SIZE); 6866 icb->firmware_options_1 |= cpu_to_le32(BIT_14); 6867 } 6868 6869 /* disable ZIO at start time. */ 6870 if (!vha->flags.init_done) { 6871 uint32_t tmp; 6872 tmp = le32_to_cpu(icb->firmware_options_2); 6873 tmp &= ~(BIT_3 | BIT_2 | BIT_1 | BIT_0); 6874 icb->firmware_options_2 = cpu_to_le32(tmp); 6875 } 6876 6877 } 6878 6879 void 6880 qlt_83xx_iospace_config(struct qla_hw_data *ha) 6881 { 6882 if (!QLA_TGT_MODE_ENABLED()) 6883 return; 6884 6885 ha->msix_count += 1; /* For ATIO Q */ 6886 } 6887 6888 6889 void 6890 qlt_modify_vp_config(struct scsi_qla_host *vha, 6891 struct vp_config_entry_24xx *vpmod) 6892 { 6893 /* enable target mode. Bit5 = 1 => disable */ 6894 if (qla_tgt_mode_enabled(vha) || qla_dual_mode_enabled(vha)) 6895 vpmod->options_idx1 &= ~BIT_5; 6896 6897 /* Disable ini mode, if requested. bit4 = 1 => disable */ 6898 if (qla_tgt_mode_enabled(vha)) 6899 vpmod->options_idx1 &= ~BIT_4; 6900 } 6901 6902 void 6903 qlt_probe_one_stage1(struct scsi_qla_host *base_vha, struct qla_hw_data *ha) 6904 { 6905 int rc; 6906 6907 if (!QLA_TGT_MODE_ENABLED()) 6908 return; 6909 6910 if ((ql2xenablemsix == 0) || IS_QLA83XX(ha) || IS_QLA27XX(ha)) { 6911 ISP_ATIO_Q_IN(base_vha) = &ha->mqiobase->isp25mq.atio_q_in; 6912 ISP_ATIO_Q_OUT(base_vha) = &ha->mqiobase->isp25mq.atio_q_out; 6913 } else { 6914 ISP_ATIO_Q_IN(base_vha) = &ha->iobase->isp24.atio_q_in; 6915 ISP_ATIO_Q_OUT(base_vha) = &ha->iobase->isp24.atio_q_out; 6916 } 6917 6918 mutex_init(&base_vha->vha_tgt.tgt_mutex); 6919 mutex_init(&base_vha->vha_tgt.tgt_host_action_mutex); 6920 6921 INIT_LIST_HEAD(&base_vha->unknown_atio_list); 6922 INIT_DELAYED_WORK(&base_vha->unknown_atio_work, 6923 qlt_unknown_atio_work_fn); 6924 6925 qlt_clear_mode(base_vha); 6926 6927 rc = btree_init32(&ha->tgt.host_map); 6928 if (rc) 6929 ql_log(ql_log_info, base_vha, 0xd03d, 6930 "Unable to initialize ha->host_map btree\n"); 6931 6932 qlt_update_vp_map(base_vha, SET_VP_IDX); 6933 } 6934 6935 irqreturn_t 6936 qla83xx_msix_atio_q(int irq, void *dev_id) 6937 { 6938 struct rsp_que *rsp; 6939 scsi_qla_host_t *vha; 6940 struct qla_hw_data *ha; 6941 unsigned long flags; 6942 6943 rsp = (struct rsp_que *) dev_id; 6944 ha = rsp->hw; 6945 vha = pci_get_drvdata(ha->pdev); 6946 6947 spin_lock_irqsave(&ha->tgt.atio_lock, flags); 6948 6949 qlt_24xx_process_atio_queue(vha, 0); 6950 6951 spin_unlock_irqrestore(&ha->tgt.atio_lock, flags); 6952 6953 return IRQ_HANDLED; 6954 } 6955 6956 static void 6957 qlt_handle_abts_recv_work(struct work_struct *work) 6958 { 6959 struct qla_tgt_sess_op *op = container_of(work, 6960 struct qla_tgt_sess_op, work); 6961 scsi_qla_host_t *vha = op->vha; 6962 struct qla_hw_data *ha = vha->hw; 6963 unsigned long flags; 6964 6965 if (qla2x00_reset_active(vha) || 6966 (op->chip_reset != ha->base_qpair->chip_reset)) 6967 return; 6968 6969 spin_lock_irqsave(&ha->tgt.atio_lock, flags); 6970 qlt_24xx_process_atio_queue(vha, 0); 6971 spin_unlock_irqrestore(&ha->tgt.atio_lock, flags); 6972 6973 spin_lock_irqsave(&ha->hardware_lock, flags); 6974 qlt_response_pkt_all_vps(vha, op->rsp, (response_t *)&op->atio); 6975 spin_unlock_irqrestore(&ha->hardware_lock, flags); 6976 6977 kfree(op); 6978 } 6979 6980 void 6981 qlt_handle_abts_recv(struct scsi_qla_host *vha, struct rsp_que *rsp, 6982 response_t *pkt) 6983 { 6984 struct qla_tgt_sess_op *op; 6985 6986 op = kzalloc(sizeof(*op), GFP_ATOMIC); 6987 6988 if (!op) { 6989 /* do not reach for ATIO queue here. This is best effort err 6990 * recovery at this point. 6991 */ 6992 qlt_response_pkt_all_vps(vha, rsp, pkt); 6993 return; 6994 } 6995 6996 memcpy(&op->atio, pkt, sizeof(*pkt)); 6997 op->vha = vha; 6998 op->chip_reset = vha->hw->base_qpair->chip_reset; 6999 op->rsp = rsp; 7000 INIT_WORK(&op->work, qlt_handle_abts_recv_work); 7001 queue_work(qla_tgt_wq, &op->work); 7002 return; 7003 } 7004 7005 int 7006 qlt_mem_alloc(struct qla_hw_data *ha) 7007 { 7008 if (!QLA_TGT_MODE_ENABLED()) 7009 return 0; 7010 7011 ha->tgt.tgt_vp_map = kzalloc(sizeof(struct qla_tgt_vp_map) * 7012 MAX_MULTI_ID_FABRIC, GFP_KERNEL); 7013 if (!ha->tgt.tgt_vp_map) 7014 return -ENOMEM; 7015 7016 ha->tgt.atio_ring = dma_alloc_coherent(&ha->pdev->dev, 7017 (ha->tgt.atio_q_length + 1) * sizeof(struct atio_from_isp), 7018 &ha->tgt.atio_dma, GFP_KERNEL); 7019 if (!ha->tgt.atio_ring) { 7020 kfree(ha->tgt.tgt_vp_map); 7021 return -ENOMEM; 7022 } 7023 return 0; 7024 } 7025 7026 void 7027 qlt_mem_free(struct qla_hw_data *ha) 7028 { 7029 if (!QLA_TGT_MODE_ENABLED()) 7030 return; 7031 7032 if (ha->tgt.atio_ring) { 7033 dma_free_coherent(&ha->pdev->dev, (ha->tgt.atio_q_length + 1) * 7034 sizeof(struct atio_from_isp), ha->tgt.atio_ring, 7035 ha->tgt.atio_dma); 7036 } 7037 kfree(ha->tgt.tgt_vp_map); 7038 } 7039 7040 /* vport_slock to be held by the caller */ 7041 void 7042 qlt_update_vp_map(struct scsi_qla_host *vha, int cmd) 7043 { 7044 void *slot; 7045 u32 key; 7046 int rc; 7047 7048 if (!QLA_TGT_MODE_ENABLED()) 7049 return; 7050 7051 key = vha->d_id.b24; 7052 7053 switch (cmd) { 7054 case SET_VP_IDX: 7055 vha->hw->tgt.tgt_vp_map[vha->vp_idx].vha = vha; 7056 break; 7057 case SET_AL_PA: 7058 slot = btree_lookup32(&vha->hw->tgt.host_map, key); 7059 if (!slot) { 7060 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf018, 7061 "Save vha in host_map %p %06x\n", vha, key); 7062 rc = btree_insert32(&vha->hw->tgt.host_map, 7063 key, vha, GFP_ATOMIC); 7064 if (rc) 7065 ql_log(ql_log_info, vha, 0xd03e, 7066 "Unable to insert s_id into host_map: %06x\n", 7067 key); 7068 return; 7069 } 7070 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf019, 7071 "replace existing vha in host_map %p %06x\n", vha, key); 7072 btree_update32(&vha->hw->tgt.host_map, key, vha); 7073 break; 7074 case RESET_VP_IDX: 7075 vha->hw->tgt.tgt_vp_map[vha->vp_idx].vha = NULL; 7076 break; 7077 case RESET_AL_PA: 7078 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01a, 7079 "clear vha in host_map %p %06x\n", vha, key); 7080 slot = btree_lookup32(&vha->hw->tgt.host_map, key); 7081 if (slot) 7082 btree_remove32(&vha->hw->tgt.host_map, key); 7083 vha->d_id.b24 = 0; 7084 break; 7085 } 7086 } 7087 7088 void qlt_update_host_map(struct scsi_qla_host *vha, port_id_t id) 7089 { 7090 7091 if (!vha->d_id.b24) { 7092 vha->d_id = id; 7093 qlt_update_vp_map(vha, SET_AL_PA); 7094 } else if (vha->d_id.b24 != id.b24) { 7095 qlt_update_vp_map(vha, RESET_AL_PA); 7096 vha->d_id = id; 7097 qlt_update_vp_map(vha, SET_AL_PA); 7098 } 7099 } 7100 7101 static int __init qlt_parse_ini_mode(void) 7102 { 7103 if (strcasecmp(qlini_mode, QLA2XXX_INI_MODE_STR_EXCLUSIVE) == 0) 7104 ql2x_ini_mode = QLA2XXX_INI_MODE_EXCLUSIVE; 7105 else if (strcasecmp(qlini_mode, QLA2XXX_INI_MODE_STR_DISABLED) == 0) 7106 ql2x_ini_mode = QLA2XXX_INI_MODE_DISABLED; 7107 else if (strcasecmp(qlini_mode, QLA2XXX_INI_MODE_STR_ENABLED) == 0) 7108 ql2x_ini_mode = QLA2XXX_INI_MODE_ENABLED; 7109 else if (strcasecmp(qlini_mode, QLA2XXX_INI_MODE_STR_DUAL) == 0) 7110 ql2x_ini_mode = QLA2XXX_INI_MODE_DUAL; 7111 else 7112 return false; 7113 7114 return true; 7115 } 7116 7117 int __init qlt_init(void) 7118 { 7119 int ret; 7120 7121 if (!qlt_parse_ini_mode()) { 7122 ql_log(ql_log_fatal, NULL, 0xe06b, 7123 "qlt_parse_ini_mode() failed\n"); 7124 return -EINVAL; 7125 } 7126 7127 if (!QLA_TGT_MODE_ENABLED()) 7128 return 0; 7129 7130 qla_tgt_mgmt_cmd_cachep = kmem_cache_create("qla_tgt_mgmt_cmd_cachep", 7131 sizeof(struct qla_tgt_mgmt_cmd), __alignof__(struct 7132 qla_tgt_mgmt_cmd), 0, NULL); 7133 if (!qla_tgt_mgmt_cmd_cachep) { 7134 ql_log(ql_log_fatal, NULL, 0xd04b, 7135 "kmem_cache_create for qla_tgt_mgmt_cmd_cachep failed\n"); 7136 return -ENOMEM; 7137 } 7138 7139 qla_tgt_plogi_cachep = kmem_cache_create("qla_tgt_plogi_cachep", 7140 sizeof(struct qlt_plogi_ack_t), __alignof__(struct qlt_plogi_ack_t), 7141 0, NULL); 7142 7143 if (!qla_tgt_plogi_cachep) { 7144 ql_log(ql_log_fatal, NULL, 0xe06d, 7145 "kmem_cache_create for qla_tgt_plogi_cachep failed\n"); 7146 ret = -ENOMEM; 7147 goto out_mgmt_cmd_cachep; 7148 } 7149 7150 qla_tgt_mgmt_cmd_mempool = mempool_create(25, mempool_alloc_slab, 7151 mempool_free_slab, qla_tgt_mgmt_cmd_cachep); 7152 if (!qla_tgt_mgmt_cmd_mempool) { 7153 ql_log(ql_log_fatal, NULL, 0xe06e, 7154 "mempool_create for qla_tgt_mgmt_cmd_mempool failed\n"); 7155 ret = -ENOMEM; 7156 goto out_plogi_cachep; 7157 } 7158 7159 qla_tgt_wq = alloc_workqueue("qla_tgt_wq", 0, 0); 7160 if (!qla_tgt_wq) { 7161 ql_log(ql_log_fatal, NULL, 0xe06f, 7162 "alloc_workqueue for qla_tgt_wq failed\n"); 7163 ret = -ENOMEM; 7164 goto out_cmd_mempool; 7165 } 7166 /* 7167 * Return 1 to signal that initiator-mode is being disabled 7168 */ 7169 return (ql2x_ini_mode == QLA2XXX_INI_MODE_DISABLED) ? 1 : 0; 7170 7171 out_cmd_mempool: 7172 mempool_destroy(qla_tgt_mgmt_cmd_mempool); 7173 out_plogi_cachep: 7174 kmem_cache_destroy(qla_tgt_plogi_cachep); 7175 out_mgmt_cmd_cachep: 7176 kmem_cache_destroy(qla_tgt_mgmt_cmd_cachep); 7177 return ret; 7178 } 7179 7180 void qlt_exit(void) 7181 { 7182 if (!QLA_TGT_MODE_ENABLED()) 7183 return; 7184 7185 destroy_workqueue(qla_tgt_wq); 7186 mempool_destroy(qla_tgt_mgmt_cmd_mempool); 7187 kmem_cache_destroy(qla_tgt_plogi_cachep); 7188 kmem_cache_destroy(qla_tgt_mgmt_cmd_cachep); 7189 } 7190