1 /* 2 * qla_target.c SCSI LLD infrastructure for QLogic 22xx/23xx/24xx/25xx 3 * 4 * based on qla2x00t.c code: 5 * 6 * Copyright (C) 2004 - 2010 Vladislav Bolkhovitin <vst@vlnb.net> 7 * Copyright (C) 2004 - 2005 Leonid Stoljar 8 * Copyright (C) 2006 Nathaniel Clark <nate@misrule.us> 9 * Copyright (C) 2006 - 2010 ID7 Ltd. 10 * 11 * Forward port and refactoring to modern qla2xxx and target/configfs 12 * 13 * Copyright (C) 2010-2013 Nicholas A. Bellinger <nab@kernel.org> 14 * 15 * This program is free software; you can redistribute it and/or 16 * modify it under the terms of the GNU General Public License 17 * as published by the Free Software Foundation, version 2 18 * of the License. 19 * 20 * This program is distributed in the hope that it will be useful, 21 * but WITHOUT ANY WARRANTY; without even the implied warranty of 22 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 23 * GNU General Public License for more details. 24 */ 25 26 #include <linux/module.h> 27 #include <linux/init.h> 28 #include <linux/types.h> 29 #include <linux/blkdev.h> 30 #include <linux/interrupt.h> 31 #include <linux/pci.h> 32 #include <linux/delay.h> 33 #include <linux/list.h> 34 #include <linux/workqueue.h> 35 #include <asm/unaligned.h> 36 #include <scsi/scsi.h> 37 #include <scsi/scsi_host.h> 38 #include <scsi/scsi_tcq.h> 39 #include <target/target_core_base.h> 40 #include <target/target_core_fabric.h> 41 42 #include "qla_def.h" 43 #include "qla_target.h" 44 45 static int ql2xtgt_tape_enable; 46 module_param(ql2xtgt_tape_enable, int, S_IRUGO|S_IWUSR); 47 MODULE_PARM_DESC(ql2xtgt_tape_enable, 48 "Enables Sequence level error recovery (aka FC Tape). Default is 0 - no SLER. 1 - Enable SLER."); 49 50 static char *qlini_mode = QLA2XXX_INI_MODE_STR_ENABLED; 51 module_param(qlini_mode, charp, S_IRUGO); 52 MODULE_PARM_DESC(qlini_mode, 53 "Determines when initiator mode will be enabled. Possible values: " 54 "\"exclusive\" - initiator mode will be enabled on load, " 55 "disabled on enabling target mode and then on disabling target mode " 56 "enabled back; " 57 "\"disabled\" - initiator mode will never be enabled; " 58 "\"dual\" - Initiator Modes will be enabled. Target Mode can be activated " 59 "when ready " 60 "\"enabled\" (default) - initiator mode will always stay enabled."); 61 62 static int ql_dm_tgt_ex_pct = 50; 63 module_param(ql_dm_tgt_ex_pct, int, S_IRUGO|S_IWUSR); 64 MODULE_PARM_DESC(ql_dm_tgt_ex_pct, 65 "For Dual Mode (qlini_mode=dual), this parameter determines " 66 "the percentage of exchanges/cmds FW will allocate resources " 67 "for Target mode."); 68 69 int ql2x_ini_mode = QLA2XXX_INI_MODE_EXCLUSIVE; 70 71 static int temp_sam_status = SAM_STAT_BUSY; 72 73 /* 74 * From scsi/fc/fc_fcp.h 75 */ 76 enum fcp_resp_rsp_codes { 77 FCP_TMF_CMPL = 0, 78 FCP_DATA_LEN_INVALID = 1, 79 FCP_CMND_FIELDS_INVALID = 2, 80 FCP_DATA_PARAM_MISMATCH = 3, 81 FCP_TMF_REJECTED = 4, 82 FCP_TMF_FAILED = 5, 83 FCP_TMF_INVALID_LUN = 9, 84 }; 85 86 /* 87 * fc_pri_ta from scsi/fc/fc_fcp.h 88 */ 89 #define FCP_PTA_SIMPLE 0 /* simple task attribute */ 90 #define FCP_PTA_HEADQ 1 /* head of queue task attribute */ 91 #define FCP_PTA_ORDERED 2 /* ordered task attribute */ 92 #define FCP_PTA_ACA 4 /* auto. contingent allegiance */ 93 #define FCP_PTA_MASK 7 /* mask for task attribute field */ 94 #define FCP_PRI_SHIFT 3 /* priority field starts in bit 3 */ 95 #define FCP_PRI_RESVD_MASK 0x80 /* reserved bits in priority field */ 96 97 /* 98 * This driver calls qla2x00_alloc_iocbs() and qla2x00_issue_marker(), which 99 * must be called under HW lock and could unlock/lock it inside. 100 * It isn't an issue, since in the current implementation on the time when 101 * those functions are called: 102 * 103 * - Either context is IRQ and only IRQ handler can modify HW data, 104 * including rings related fields, 105 * 106 * - Or access to target mode variables from struct qla_tgt doesn't 107 * cross those functions boundaries, except tgt_stop, which 108 * additionally protected by irq_cmd_count. 109 */ 110 /* Predefs for callbacks handed to qla2xxx LLD */ 111 static void qlt_24xx_atio_pkt(struct scsi_qla_host *ha, 112 struct atio_from_isp *pkt, uint8_t); 113 static void qlt_response_pkt(struct scsi_qla_host *ha, response_t *pkt); 114 static int qlt_issue_task_mgmt(struct fc_port *sess, u64 lun, 115 int fn, void *iocb, int flags); 116 static void qlt_send_term_exchange(struct scsi_qla_host *ha, struct qla_tgt_cmd 117 *cmd, struct atio_from_isp *atio, int ha_locked, int ul_abort); 118 static void qlt_abort_cmd_on_host_reset(struct scsi_qla_host *vha, 119 struct qla_tgt_cmd *cmd); 120 static void qlt_alloc_qfull_cmd(struct scsi_qla_host *vha, 121 struct atio_from_isp *atio, uint16_t status, int qfull); 122 static void qlt_disable_vha(struct scsi_qla_host *vha); 123 static void qlt_clear_tgt_db(struct qla_tgt *tgt); 124 static void qlt_send_notify_ack(struct scsi_qla_host *vha, 125 struct imm_ntfy_from_isp *ntfy, 126 uint32_t add_flags, uint16_t resp_code, int resp_code_valid, 127 uint16_t srr_flags, uint16_t srr_reject_code, uint8_t srr_explan); 128 static void qlt_send_term_imm_notif(struct scsi_qla_host *vha, 129 struct imm_ntfy_from_isp *imm, int ha_locked); 130 static struct fc_port *qlt_create_sess(struct scsi_qla_host *vha, 131 fc_port_t *fcport, bool local); 132 void qlt_unreg_sess(struct fc_port *sess); 133 static void qlt_24xx_handle_abts(struct scsi_qla_host *, 134 struct abts_recv_from_24xx *); 135 136 /* 137 * Global Variables 138 */ 139 static struct kmem_cache *qla_tgt_mgmt_cmd_cachep; 140 static struct kmem_cache *qla_tgt_plogi_cachep; 141 static mempool_t *qla_tgt_mgmt_cmd_mempool; 142 static struct workqueue_struct *qla_tgt_wq; 143 static DEFINE_MUTEX(qla_tgt_mutex); 144 static LIST_HEAD(qla_tgt_glist); 145 146 static const char *prot_op_str(u32 prot_op) 147 { 148 switch (prot_op) { 149 case TARGET_PROT_NORMAL: return "NORMAL"; 150 case TARGET_PROT_DIN_INSERT: return "DIN_INSERT"; 151 case TARGET_PROT_DOUT_INSERT: return "DOUT_INSERT"; 152 case TARGET_PROT_DIN_STRIP: return "DIN_STRIP"; 153 case TARGET_PROT_DOUT_STRIP: return "DOUT_STRIP"; 154 case TARGET_PROT_DIN_PASS: return "DIN_PASS"; 155 case TARGET_PROT_DOUT_PASS: return "DOUT_PASS"; 156 default: return "UNKNOWN"; 157 } 158 } 159 160 /* This API intentionally takes dest as a parameter, rather than returning 161 * int value to avoid caller forgetting to issue wmb() after the store */ 162 void qlt_do_generation_tick(struct scsi_qla_host *vha, int *dest) 163 { 164 scsi_qla_host_t *base_vha = pci_get_drvdata(vha->hw->pdev); 165 *dest = atomic_inc_return(&base_vha->generation_tick); 166 /* memory barrier */ 167 wmb(); 168 } 169 170 /* Might release hw lock, then reaquire!! */ 171 static inline int qlt_issue_marker(struct scsi_qla_host *vha, int vha_locked) 172 { 173 /* Send marker if required */ 174 if (unlikely(vha->marker_needed != 0)) { 175 int rc = qla2x00_issue_marker(vha, vha_locked); 176 if (rc != QLA_SUCCESS) { 177 ql_dbg(ql_dbg_tgt, vha, 0xe03d, 178 "qla_target(%d): issue_marker() failed\n", 179 vha->vp_idx); 180 } 181 return rc; 182 } 183 return QLA_SUCCESS; 184 } 185 186 static inline 187 struct scsi_qla_host *qlt_find_host_by_d_id(struct scsi_qla_host *vha, 188 uint8_t *d_id) 189 { 190 struct scsi_qla_host *host; 191 uint32_t key = 0; 192 193 if ((vha->d_id.b.area == d_id[1]) && (vha->d_id.b.domain == d_id[0]) && 194 (vha->d_id.b.al_pa == d_id[2])) 195 return vha; 196 197 key = (uint32_t)d_id[0] << 16; 198 key |= (uint32_t)d_id[1] << 8; 199 key |= (uint32_t)d_id[2]; 200 201 host = btree_lookup32(&vha->hw->tgt.host_map, key); 202 if (!host) 203 ql_dbg(ql_dbg_tgt_mgt, vha, 0xffff, 204 "Unable to find host %06x\n", key); 205 206 return host; 207 } 208 209 static inline 210 struct scsi_qla_host *qlt_find_host_by_vp_idx(struct scsi_qla_host *vha, 211 uint16_t vp_idx) 212 { 213 struct qla_hw_data *ha = vha->hw; 214 215 if (vha->vp_idx == vp_idx) 216 return vha; 217 218 BUG_ON(ha->tgt.tgt_vp_map == NULL); 219 if (likely(test_bit(vp_idx, ha->vp_idx_map))) 220 return ha->tgt.tgt_vp_map[vp_idx].vha; 221 222 return NULL; 223 } 224 225 static inline void qlt_incr_num_pend_cmds(struct scsi_qla_host *vha) 226 { 227 unsigned long flags; 228 229 spin_lock_irqsave(&vha->hw->tgt.q_full_lock, flags); 230 231 vha->hw->tgt.num_pend_cmds++; 232 if (vha->hw->tgt.num_pend_cmds > vha->qla_stats.stat_max_pend_cmds) 233 vha->qla_stats.stat_max_pend_cmds = 234 vha->hw->tgt.num_pend_cmds; 235 spin_unlock_irqrestore(&vha->hw->tgt.q_full_lock, flags); 236 } 237 static inline void qlt_decr_num_pend_cmds(struct scsi_qla_host *vha) 238 { 239 unsigned long flags; 240 241 spin_lock_irqsave(&vha->hw->tgt.q_full_lock, flags); 242 vha->hw->tgt.num_pend_cmds--; 243 spin_unlock_irqrestore(&vha->hw->tgt.q_full_lock, flags); 244 } 245 246 247 static void qlt_queue_unknown_atio(scsi_qla_host_t *vha, 248 struct atio_from_isp *atio, uint8_t ha_locked) 249 { 250 struct qla_tgt_sess_op *u; 251 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; 252 unsigned long flags; 253 254 if (tgt->tgt_stop) { 255 ql_dbg(ql_dbg_async, vha, 0xffff, 256 "qla_target(%d): dropping unknown ATIO_TYPE7, " 257 "because tgt is being stopped", vha->vp_idx); 258 goto out_term; 259 } 260 261 u = kzalloc(sizeof(*u), GFP_ATOMIC); 262 if (u == NULL) { 263 ql_dbg(ql_dbg_async, vha, 0xffff, 264 "Alloc of struct unknown_atio (size %zd) failed", sizeof(*u)); 265 /* It should be harmless and on the next retry should work well */ 266 goto out_term; 267 } 268 269 u->vha = vha; 270 memcpy(&u->atio, atio, sizeof(*atio)); 271 INIT_LIST_HEAD(&u->cmd_list); 272 273 spin_lock_irqsave(&vha->cmd_list_lock, flags); 274 list_add_tail(&u->cmd_list, &vha->unknown_atio_list); 275 spin_unlock_irqrestore(&vha->cmd_list_lock, flags); 276 277 schedule_delayed_work(&vha->unknown_atio_work, 1); 278 279 out: 280 return; 281 282 out_term: 283 qlt_send_term_exchange(vha, NULL, atio, ha_locked, 0); 284 goto out; 285 } 286 287 static void qlt_try_to_dequeue_unknown_atios(struct scsi_qla_host *vha, 288 uint8_t ha_locked) 289 { 290 struct qla_tgt_sess_op *u, *t; 291 scsi_qla_host_t *host; 292 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; 293 unsigned long flags; 294 uint8_t queued = 0; 295 296 list_for_each_entry_safe(u, t, &vha->unknown_atio_list, cmd_list) { 297 if (u->aborted) { 298 ql_dbg(ql_dbg_async, vha, 0xffff, 299 "Freeing unknown %s %p, because of Abort", 300 "ATIO_TYPE7", u); 301 qlt_send_term_exchange(vha, NULL, &u->atio, 302 ha_locked, 0); 303 goto abort; 304 } 305 306 host = qlt_find_host_by_d_id(vha, u->atio.u.isp24.fcp_hdr.d_id); 307 if (host != NULL) { 308 ql_dbg(ql_dbg_async, vha, 0xffff, 309 "Requeuing unknown ATIO_TYPE7 %p", u); 310 qlt_24xx_atio_pkt(host, &u->atio, ha_locked); 311 } else if (tgt->tgt_stop) { 312 ql_dbg(ql_dbg_async, vha, 0xffff, 313 "Freeing unknown %s %p, because tgt is being stopped", 314 "ATIO_TYPE7", u); 315 qlt_send_term_exchange(vha, NULL, &u->atio, 316 ha_locked, 0); 317 } else { 318 ql_dbg(ql_dbg_async, vha, 0xffff, 319 "u %p, vha %p, host %p, sched again..", u, 320 vha, host); 321 if (!queued) { 322 queued = 1; 323 schedule_delayed_work(&vha->unknown_atio_work, 324 1); 325 } 326 continue; 327 } 328 329 abort: 330 spin_lock_irqsave(&vha->cmd_list_lock, flags); 331 list_del(&u->cmd_list); 332 spin_unlock_irqrestore(&vha->cmd_list_lock, flags); 333 kfree(u); 334 } 335 } 336 337 void qlt_unknown_atio_work_fn(struct work_struct *work) 338 { 339 struct scsi_qla_host *vha = container_of(to_delayed_work(work), 340 struct scsi_qla_host, unknown_atio_work); 341 342 qlt_try_to_dequeue_unknown_atios(vha, 0); 343 } 344 345 static bool qlt_24xx_atio_pkt_all_vps(struct scsi_qla_host *vha, 346 struct atio_from_isp *atio, uint8_t ha_locked) 347 { 348 ql_dbg(ql_dbg_tgt, vha, 0xe072, 349 "%s: qla_target(%d): type %x ox_id %04x\n", 350 __func__, vha->vp_idx, atio->u.raw.entry_type, 351 be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id)); 352 353 switch (atio->u.raw.entry_type) { 354 case ATIO_TYPE7: 355 { 356 struct scsi_qla_host *host = qlt_find_host_by_d_id(vha, 357 atio->u.isp24.fcp_hdr.d_id); 358 if (unlikely(NULL == host)) { 359 ql_dbg(ql_dbg_tgt, vha, 0xe03e, 360 "qla_target(%d): Received ATIO_TYPE7 " 361 "with unknown d_id %x:%x:%x\n", vha->vp_idx, 362 atio->u.isp24.fcp_hdr.d_id[0], 363 atio->u.isp24.fcp_hdr.d_id[1], 364 atio->u.isp24.fcp_hdr.d_id[2]); 365 366 367 qlt_queue_unknown_atio(vha, atio, ha_locked); 368 break; 369 } 370 if (unlikely(!list_empty(&vha->unknown_atio_list))) 371 qlt_try_to_dequeue_unknown_atios(vha, ha_locked); 372 373 qlt_24xx_atio_pkt(host, atio, ha_locked); 374 break; 375 } 376 377 case IMMED_NOTIFY_TYPE: 378 { 379 struct scsi_qla_host *host = vha; 380 struct imm_ntfy_from_isp *entry = 381 (struct imm_ntfy_from_isp *)atio; 382 383 if ((entry->u.isp24.vp_index != 0xFF) && 384 (entry->u.isp24.nport_handle != 0xFFFF)) { 385 host = qlt_find_host_by_vp_idx(vha, 386 entry->u.isp24.vp_index); 387 if (unlikely(!host)) { 388 ql_dbg(ql_dbg_tgt, vha, 0xe03f, 389 "qla_target(%d): Received " 390 "ATIO (IMMED_NOTIFY_TYPE) " 391 "with unknown vp_index %d\n", 392 vha->vp_idx, entry->u.isp24.vp_index); 393 break; 394 } 395 } 396 qlt_24xx_atio_pkt(host, atio, ha_locked); 397 break; 398 } 399 400 case VP_RPT_ID_IOCB_TYPE: 401 qla24xx_report_id_acquisition(vha, 402 (struct vp_rpt_id_entry_24xx *)atio); 403 break; 404 405 case ABTS_RECV_24XX: 406 { 407 struct abts_recv_from_24xx *entry = 408 (struct abts_recv_from_24xx *)atio; 409 struct scsi_qla_host *host = qlt_find_host_by_vp_idx(vha, 410 entry->vp_index); 411 unsigned long flags; 412 413 if (unlikely(!host)) { 414 ql_dbg(ql_dbg_tgt, vha, 0xffff, 415 "qla_target(%d): Response pkt (ABTS_RECV_24XX) " 416 "received, with unknown vp_index %d\n", 417 vha->vp_idx, entry->vp_index); 418 break; 419 } 420 if (!ha_locked) 421 spin_lock_irqsave(&host->hw->hardware_lock, flags); 422 qlt_24xx_handle_abts(host, (struct abts_recv_from_24xx *)atio); 423 if (!ha_locked) 424 spin_unlock_irqrestore(&host->hw->hardware_lock, flags); 425 break; 426 } 427 428 /* case PUREX_IOCB_TYPE: ql2xmvasynctoatio */ 429 430 default: 431 ql_dbg(ql_dbg_tgt, vha, 0xe040, 432 "qla_target(%d): Received unknown ATIO atio " 433 "type %x\n", vha->vp_idx, atio->u.raw.entry_type); 434 break; 435 } 436 437 return false; 438 } 439 440 void qlt_response_pkt_all_vps(struct scsi_qla_host *vha, response_t *pkt) 441 { 442 switch (pkt->entry_type) { 443 case CTIO_CRC2: 444 ql_dbg(ql_dbg_tgt, vha, 0xe073, 445 "qla_target(%d):%s: CRC2 Response pkt\n", 446 vha->vp_idx, __func__); 447 case CTIO_TYPE7: 448 { 449 struct ctio7_from_24xx *entry = (struct ctio7_from_24xx *)pkt; 450 struct scsi_qla_host *host = qlt_find_host_by_vp_idx(vha, 451 entry->vp_index); 452 if (unlikely(!host)) { 453 ql_dbg(ql_dbg_tgt, vha, 0xe041, 454 "qla_target(%d): Response pkt (CTIO_TYPE7) " 455 "received, with unknown vp_index %d\n", 456 vha->vp_idx, entry->vp_index); 457 break; 458 } 459 qlt_response_pkt(host, pkt); 460 break; 461 } 462 463 case IMMED_NOTIFY_TYPE: 464 { 465 struct scsi_qla_host *host = vha; 466 struct imm_ntfy_from_isp *entry = 467 (struct imm_ntfy_from_isp *)pkt; 468 469 host = qlt_find_host_by_vp_idx(vha, entry->u.isp24.vp_index); 470 if (unlikely(!host)) { 471 ql_dbg(ql_dbg_tgt, vha, 0xe042, 472 "qla_target(%d): Response pkt (IMMED_NOTIFY_TYPE) " 473 "received, with unknown vp_index %d\n", 474 vha->vp_idx, entry->u.isp24.vp_index); 475 break; 476 } 477 qlt_response_pkt(host, pkt); 478 break; 479 } 480 481 case NOTIFY_ACK_TYPE: 482 { 483 struct scsi_qla_host *host = vha; 484 struct nack_to_isp *entry = (struct nack_to_isp *)pkt; 485 486 if (0xFF != entry->u.isp24.vp_index) { 487 host = qlt_find_host_by_vp_idx(vha, 488 entry->u.isp24.vp_index); 489 if (unlikely(!host)) { 490 ql_dbg(ql_dbg_tgt, vha, 0xe043, 491 "qla_target(%d): Response " 492 "pkt (NOTIFY_ACK_TYPE) " 493 "received, with unknown " 494 "vp_index %d\n", vha->vp_idx, 495 entry->u.isp24.vp_index); 496 break; 497 } 498 } 499 qlt_response_pkt(host, pkt); 500 break; 501 } 502 503 case ABTS_RECV_24XX: 504 { 505 struct abts_recv_from_24xx *entry = 506 (struct abts_recv_from_24xx *)pkt; 507 struct scsi_qla_host *host = qlt_find_host_by_vp_idx(vha, 508 entry->vp_index); 509 if (unlikely(!host)) { 510 ql_dbg(ql_dbg_tgt, vha, 0xe044, 511 "qla_target(%d): Response pkt " 512 "(ABTS_RECV_24XX) received, with unknown " 513 "vp_index %d\n", vha->vp_idx, entry->vp_index); 514 break; 515 } 516 qlt_response_pkt(host, pkt); 517 break; 518 } 519 520 case ABTS_RESP_24XX: 521 { 522 struct abts_resp_to_24xx *entry = 523 (struct abts_resp_to_24xx *)pkt; 524 struct scsi_qla_host *host = qlt_find_host_by_vp_idx(vha, 525 entry->vp_index); 526 if (unlikely(!host)) { 527 ql_dbg(ql_dbg_tgt, vha, 0xe045, 528 "qla_target(%d): Response pkt " 529 "(ABTS_RECV_24XX) received, with unknown " 530 "vp_index %d\n", vha->vp_idx, entry->vp_index); 531 break; 532 } 533 qlt_response_pkt(host, pkt); 534 break; 535 } 536 537 default: 538 qlt_response_pkt(vha, pkt); 539 break; 540 } 541 542 } 543 544 /* 545 * All qlt_plogi_ack_t operations are protected by hardware_lock 546 */ 547 static int qla24xx_post_nack_work(struct scsi_qla_host *vha, fc_port_t *fcport, 548 struct imm_ntfy_from_isp *ntfy, int type) 549 { 550 struct qla_work_evt *e; 551 e = qla2x00_alloc_work(vha, QLA_EVT_NACK); 552 if (!e) 553 return QLA_FUNCTION_FAILED; 554 555 e->u.nack.fcport = fcport; 556 e->u.nack.type = type; 557 memcpy(e->u.nack.iocb, ntfy, sizeof(struct imm_ntfy_from_isp)); 558 return qla2x00_post_work(vha, e); 559 } 560 561 static 562 void qla2x00_async_nack_sp_done(void *s, int res) 563 { 564 struct srb *sp = (struct srb *)s; 565 struct scsi_qla_host *vha = sp->vha; 566 unsigned long flags; 567 568 ql_dbg(ql_dbg_disc, vha, 0xffff, 569 "Async done-%s res %x %8phC type %d\n", 570 sp->name, res, sp->fcport->port_name, sp->type); 571 572 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); 573 sp->fcport->flags &= ~FCF_ASYNC_SENT; 574 sp->fcport->chip_reset = vha->hw->chip_reset; 575 576 switch (sp->type) { 577 case SRB_NACK_PLOGI: 578 sp->fcport->login_gen++; 579 sp->fcport->fw_login_state = DSC_LS_PLOGI_COMP; 580 sp->fcport->logout_on_delete = 1; 581 sp->fcport->plogi_nack_done_deadline = jiffies + HZ; 582 break; 583 584 case SRB_NACK_PRLI: 585 sp->fcport->fw_login_state = DSC_LS_PRLI_COMP; 586 sp->fcport->deleted = 0; 587 588 if (!sp->fcport->login_succ && 589 !IS_SW_RESV_ADDR(sp->fcport->d_id)) { 590 sp->fcport->login_succ = 1; 591 592 vha->fcport_count++; 593 594 if (!IS_IIDMA_CAPABLE(vha->hw) || 595 !vha->hw->flags.gpsc_supported) { 596 ql_dbg(ql_dbg_disc, vha, 0xffff, 597 "%s %d %8phC post upd_fcport fcp_cnt %d\n", 598 __func__, __LINE__, 599 sp->fcport->port_name, 600 vha->fcport_count); 601 602 qla24xx_post_upd_fcport_work(vha, sp->fcport); 603 } else { 604 ql_dbg(ql_dbg_disc, vha, 0xffff, 605 "%s %d %8phC post gpsc fcp_cnt %d\n", 606 __func__, __LINE__, 607 sp->fcport->port_name, 608 vha->fcport_count); 609 610 qla24xx_post_gpsc_work(vha, sp->fcport); 611 } 612 } 613 break; 614 615 case SRB_NACK_LOGO: 616 sp->fcport->login_gen++; 617 sp->fcport->fw_login_state = DSC_LS_PORT_UNAVAIL; 618 qlt_logo_completion_handler(sp->fcport, MBS_COMMAND_COMPLETE); 619 break; 620 } 621 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); 622 623 sp->free(sp); 624 } 625 626 int qla24xx_async_notify_ack(scsi_qla_host_t *vha, fc_port_t *fcport, 627 struct imm_ntfy_from_isp *ntfy, int type) 628 { 629 int rval = QLA_FUNCTION_FAILED; 630 srb_t *sp; 631 char *c = NULL; 632 633 fcport->flags |= FCF_ASYNC_SENT; 634 switch (type) { 635 case SRB_NACK_PLOGI: 636 fcport->fw_login_state = DSC_LS_PLOGI_PEND; 637 c = "PLOGI"; 638 break; 639 case SRB_NACK_PRLI: 640 fcport->fw_login_state = DSC_LS_PRLI_PEND; 641 fcport->deleted = 0; 642 c = "PRLI"; 643 break; 644 case SRB_NACK_LOGO: 645 fcport->fw_login_state = DSC_LS_LOGO_PEND; 646 c = "LOGO"; 647 break; 648 } 649 650 sp = qla2x00_get_sp(vha, fcport, GFP_ATOMIC); 651 if (!sp) 652 goto done; 653 654 sp->type = type; 655 sp->name = "nack"; 656 657 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha)+2); 658 659 sp->u.iocb_cmd.u.nack.ntfy = ntfy; 660 661 sp->done = qla2x00_async_nack_sp_done; 662 663 rval = qla2x00_start_sp(sp); 664 if (rval != QLA_SUCCESS) 665 goto done_free_sp; 666 667 ql_dbg(ql_dbg_disc, vha, 0xffff, 668 "Async-%s %8phC hndl %x %s\n", 669 sp->name, fcport->port_name, sp->handle, c); 670 671 return rval; 672 673 done_free_sp: 674 sp->free(sp); 675 done: 676 fcport->flags &= ~FCF_ASYNC_SENT; 677 return rval; 678 } 679 680 void qla24xx_do_nack_work(struct scsi_qla_host *vha, struct qla_work_evt *e) 681 { 682 fc_port_t *t; 683 unsigned long flags; 684 685 switch (e->u.nack.type) { 686 case SRB_NACK_PRLI: 687 mutex_lock(&vha->vha_tgt.tgt_mutex); 688 t = qlt_create_sess(vha, e->u.nack.fcport, 0); 689 mutex_unlock(&vha->vha_tgt.tgt_mutex); 690 if (t) { 691 ql_log(ql_log_info, vha, 0xffff, 692 "%s create sess success %p", __func__, t); 693 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); 694 /* create sess has an extra kref */ 695 vha->hw->tgt.tgt_ops->put_sess(e->u.nack.fcport); 696 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); 697 } 698 break; 699 } 700 qla24xx_async_notify_ack(vha, e->u.nack.fcport, 701 (struct imm_ntfy_from_isp*)e->u.nack.iocb, e->u.nack.type); 702 } 703 704 void qla24xx_delete_sess_fn(struct work_struct *work) 705 { 706 fc_port_t *fcport = container_of(work, struct fc_port, del_work); 707 struct qla_hw_data *ha = fcport->vha->hw; 708 unsigned long flags; 709 710 spin_lock_irqsave(&ha->tgt.sess_lock, flags); 711 712 if (fcport->se_sess) { 713 ha->tgt.tgt_ops->shutdown_sess(fcport); 714 ha->tgt.tgt_ops->put_sess(fcport); 715 } else { 716 qlt_unreg_sess(fcport); 717 } 718 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); 719 } 720 721 /* 722 * Called from qla2x00_reg_remote_port() 723 */ 724 void qlt_fc_port_added(struct scsi_qla_host *vha, fc_port_t *fcport) 725 { 726 struct qla_hw_data *ha = vha->hw; 727 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; 728 struct fc_port *sess = fcport; 729 unsigned long flags; 730 731 if (!vha->hw->tgt.tgt_ops) 732 return; 733 734 spin_lock_irqsave(&ha->tgt.sess_lock, flags); 735 if (tgt->tgt_stop) { 736 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); 737 return; 738 } 739 740 if (fcport->disc_state == DSC_DELETE_PEND) { 741 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); 742 return; 743 } 744 745 if (!sess->se_sess) { 746 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); 747 748 mutex_lock(&vha->vha_tgt.tgt_mutex); 749 sess = qlt_create_sess(vha, fcport, false); 750 mutex_unlock(&vha->vha_tgt.tgt_mutex); 751 752 spin_lock_irqsave(&ha->tgt.sess_lock, flags); 753 } else { 754 if (fcport->fw_login_state == DSC_LS_PRLI_COMP) { 755 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); 756 return; 757 } 758 759 if (!kref_get_unless_zero(&sess->sess_kref)) { 760 ql_dbg(ql_dbg_disc, vha, 0xffff, 761 "%s: kref_get fail sess %8phC \n", 762 __func__, sess->port_name); 763 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); 764 return; 765 } 766 767 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04c, 768 "qla_target(%u): %ssession for port %8phC " 769 "(loop ID %d) reappeared\n", vha->vp_idx, 770 sess->local ? "local " : "", sess->port_name, sess->loop_id); 771 772 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf007, 773 "Reappeared sess %p\n", sess); 774 775 ha->tgt.tgt_ops->update_sess(sess, fcport->d_id, 776 fcport->loop_id, 777 (fcport->flags & FCF_CONF_COMP_SUPPORTED)); 778 } 779 780 if (sess && sess->local) { 781 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04d, 782 "qla_target(%u): local session for " 783 "port %8phC (loop ID %d) became global\n", vha->vp_idx, 784 fcport->port_name, sess->loop_id); 785 sess->local = 0; 786 } 787 ha->tgt.tgt_ops->put_sess(sess); 788 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); 789 } 790 791 /* 792 * This is a zero-base ref-counting solution, since hardware_lock 793 * guarantees that ref_count is not modified concurrently. 794 * Upon successful return content of iocb is undefined 795 */ 796 static struct qlt_plogi_ack_t * 797 qlt_plogi_ack_find_add(struct scsi_qla_host *vha, port_id_t *id, 798 struct imm_ntfy_from_isp *iocb) 799 { 800 struct qlt_plogi_ack_t *pla; 801 802 list_for_each_entry(pla, &vha->plogi_ack_list, list) { 803 if (pla->id.b24 == id->b24) { 804 qlt_send_term_imm_notif(vha, &pla->iocb, 1); 805 memcpy(&pla->iocb, iocb, sizeof(pla->iocb)); 806 return pla; 807 } 808 } 809 810 pla = kmem_cache_zalloc(qla_tgt_plogi_cachep, GFP_ATOMIC); 811 if (!pla) { 812 ql_dbg(ql_dbg_async, vha, 0x5088, 813 "qla_target(%d): Allocation of plogi_ack failed\n", 814 vha->vp_idx); 815 return NULL; 816 } 817 818 memcpy(&pla->iocb, iocb, sizeof(pla->iocb)); 819 pla->id = *id; 820 list_add_tail(&pla->list, &vha->plogi_ack_list); 821 822 return pla; 823 } 824 825 void qlt_plogi_ack_unref(struct scsi_qla_host *vha, 826 struct qlt_plogi_ack_t *pla) 827 { 828 struct imm_ntfy_from_isp *iocb = &pla->iocb; 829 port_id_t port_id; 830 uint16_t loop_id; 831 fc_port_t *fcport = pla->fcport; 832 833 BUG_ON(!pla->ref_count); 834 pla->ref_count--; 835 836 if (pla->ref_count) 837 return; 838 839 ql_dbg(ql_dbg_disc, vha, 0x5089, 840 "Sending PLOGI ACK to wwn %8phC s_id %02x:%02x:%02x loop_id %#04x" 841 " exch %#x ox_id %#x\n", iocb->u.isp24.port_name, 842 iocb->u.isp24.port_id[2], iocb->u.isp24.port_id[1], 843 iocb->u.isp24.port_id[0], 844 le16_to_cpu(iocb->u.isp24.nport_handle), 845 iocb->u.isp24.exchange_address, iocb->ox_id); 846 847 port_id.b.domain = iocb->u.isp24.port_id[2]; 848 port_id.b.area = iocb->u.isp24.port_id[1]; 849 port_id.b.al_pa = iocb->u.isp24.port_id[0]; 850 port_id.b.rsvd_1 = 0; 851 852 loop_id = le16_to_cpu(iocb->u.isp24.nport_handle); 853 854 fcport->loop_id = loop_id; 855 fcport->d_id = port_id; 856 qla24xx_post_nack_work(vha, fcport, iocb, SRB_NACK_PLOGI); 857 858 list_for_each_entry(fcport, &vha->vp_fcports, list) { 859 if (fcport->plogi_link[QLT_PLOGI_LINK_SAME_WWN] == pla) 860 fcport->plogi_link[QLT_PLOGI_LINK_SAME_WWN] = NULL; 861 if (fcport->plogi_link[QLT_PLOGI_LINK_CONFLICT] == pla) 862 fcport->plogi_link[QLT_PLOGI_LINK_CONFLICT] = NULL; 863 } 864 865 list_del(&pla->list); 866 kmem_cache_free(qla_tgt_plogi_cachep, pla); 867 } 868 869 void 870 qlt_plogi_ack_link(struct scsi_qla_host *vha, struct qlt_plogi_ack_t *pla, 871 struct fc_port *sess, enum qlt_plogi_link_t link) 872 { 873 struct imm_ntfy_from_isp *iocb = &pla->iocb; 874 /* Inc ref_count first because link might already be pointing at pla */ 875 pla->ref_count++; 876 877 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf097, 878 "Linking sess %p [%d] wwn %8phC with PLOGI ACK to wwn %8phC" 879 " s_id %02x:%02x:%02x, ref=%d pla %p link %d\n", 880 sess, link, sess->port_name, 881 iocb->u.isp24.port_name, iocb->u.isp24.port_id[2], 882 iocb->u.isp24.port_id[1], iocb->u.isp24.port_id[0], 883 pla->ref_count, pla, link); 884 885 if (sess->plogi_link[link]) 886 qlt_plogi_ack_unref(vha, sess->plogi_link[link]); 887 888 if (link == QLT_PLOGI_LINK_SAME_WWN) 889 pla->fcport = sess; 890 891 sess->plogi_link[link] = pla; 892 } 893 894 typedef struct { 895 /* These fields must be initialized by the caller */ 896 port_id_t id; 897 /* 898 * number of cmds dropped while we were waiting for 899 * initiator to ack LOGO initialize to 1 if LOGO is 900 * triggered by a command, otherwise, to 0 901 */ 902 int cmd_count; 903 904 /* These fields are used by callee */ 905 struct list_head list; 906 } qlt_port_logo_t; 907 908 static void 909 qlt_send_first_logo(struct scsi_qla_host *vha, qlt_port_logo_t *logo) 910 { 911 qlt_port_logo_t *tmp; 912 int res; 913 914 mutex_lock(&vha->vha_tgt.tgt_mutex); 915 916 list_for_each_entry(tmp, &vha->logo_list, list) { 917 if (tmp->id.b24 == logo->id.b24) { 918 tmp->cmd_count += logo->cmd_count; 919 mutex_unlock(&vha->vha_tgt.tgt_mutex); 920 return; 921 } 922 } 923 924 list_add_tail(&logo->list, &vha->logo_list); 925 926 mutex_unlock(&vha->vha_tgt.tgt_mutex); 927 928 res = qla24xx_els_dcmd_iocb(vha, ELS_DCMD_LOGO, logo->id); 929 930 mutex_lock(&vha->vha_tgt.tgt_mutex); 931 list_del(&logo->list); 932 mutex_unlock(&vha->vha_tgt.tgt_mutex); 933 934 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf098, 935 "Finished LOGO to %02x:%02x:%02x, dropped %d cmds, res = %#x\n", 936 logo->id.b.domain, logo->id.b.area, logo->id.b.al_pa, 937 logo->cmd_count, res); 938 } 939 940 static void qlt_free_session_done(struct work_struct *work) 941 { 942 struct fc_port *sess = container_of(work, struct fc_port, 943 free_work); 944 struct qla_tgt *tgt = sess->tgt; 945 struct scsi_qla_host *vha = sess->vha; 946 struct qla_hw_data *ha = vha->hw; 947 unsigned long flags; 948 bool logout_started = false; 949 struct event_arg ea; 950 scsi_qla_host_t *base_vha; 951 952 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf084, 953 "%s: se_sess %p / sess %p from port %8phC loop_id %#04x" 954 " s_id %02x:%02x:%02x logout %d keep %d els_logo %d\n", 955 __func__, sess->se_sess, sess, sess->port_name, sess->loop_id, 956 sess->d_id.b.domain, sess->d_id.b.area, sess->d_id.b.al_pa, 957 sess->logout_on_delete, sess->keep_nport_handle, 958 sess->send_els_logo); 959 960 961 if (!IS_SW_RESV_ADDR(sess->d_id)) { 962 if (sess->send_els_logo) { 963 qlt_port_logo_t logo; 964 965 logo.id = sess->d_id; 966 logo.cmd_count = 0; 967 qlt_send_first_logo(vha, &logo); 968 } 969 970 if (sess->logout_on_delete) { 971 int rc; 972 973 rc = qla2x00_post_async_logout_work(vha, sess, NULL); 974 if (rc != QLA_SUCCESS) 975 ql_log(ql_log_warn, vha, 0xf085, 976 "Schedule logo failed sess %p rc %d\n", 977 sess, rc); 978 else 979 logout_started = true; 980 } 981 } 982 983 /* 984 * Release the target session for FC Nexus from fabric module code. 985 */ 986 if (sess->se_sess != NULL) 987 ha->tgt.tgt_ops->free_session(sess); 988 989 if (logout_started) { 990 bool traced = false; 991 992 while (!ACCESS_ONCE(sess->logout_completed)) { 993 if (!traced) { 994 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf086, 995 "%s: waiting for sess %p logout\n", 996 __func__, sess); 997 traced = true; 998 } 999 msleep(100); 1000 } 1001 1002 ql_dbg(ql_dbg_disc, vha, 0xf087, 1003 "%s: sess %p logout completed\n",__func__, sess); 1004 } 1005 1006 if (sess->logo_ack_needed) { 1007 sess->logo_ack_needed = 0; 1008 qla24xx_async_notify_ack(vha, sess, 1009 (struct imm_ntfy_from_isp *)sess->iocb, SRB_NACK_LOGO); 1010 } 1011 1012 spin_lock_irqsave(&ha->tgt.sess_lock, flags); 1013 if (sess->se_sess) { 1014 sess->se_sess = NULL; 1015 if (tgt && !IS_SW_RESV_ADDR(sess->d_id)) 1016 tgt->sess_count--; 1017 } 1018 1019 sess->disc_state = DSC_DELETED; 1020 sess->fw_login_state = DSC_LS_PORT_UNAVAIL; 1021 sess->deleted = QLA_SESS_DELETED; 1022 sess->login_retry = vha->hw->login_retry_count; 1023 1024 if (sess->login_succ && !IS_SW_RESV_ADDR(sess->d_id)) { 1025 vha->fcport_count--; 1026 sess->login_succ = 0; 1027 } 1028 1029 if (sess->chip_reset != sess->vha->hw->chip_reset) 1030 qla2x00_clear_loop_id(sess); 1031 1032 if (sess->conflict) { 1033 sess->conflict->login_pause = 0; 1034 sess->conflict = NULL; 1035 if (!test_bit(UNLOADING, &vha->dpc_flags)) 1036 set_bit(RELOGIN_NEEDED, &vha->dpc_flags); 1037 } 1038 1039 { 1040 struct qlt_plogi_ack_t *own = 1041 sess->plogi_link[QLT_PLOGI_LINK_SAME_WWN]; 1042 struct qlt_plogi_ack_t *con = 1043 sess->plogi_link[QLT_PLOGI_LINK_CONFLICT]; 1044 struct imm_ntfy_from_isp *iocb; 1045 1046 if (con) { 1047 iocb = &con->iocb; 1048 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf099, 1049 "se_sess %p / sess %p port %8phC is gone," 1050 " %s (ref=%d), releasing PLOGI for %8phC (ref=%d)\n", 1051 sess->se_sess, sess, sess->port_name, 1052 own ? "releasing own PLOGI" : "no own PLOGI pending", 1053 own ? own->ref_count : -1, 1054 iocb->u.isp24.port_name, con->ref_count); 1055 qlt_plogi_ack_unref(vha, con); 1056 sess->plogi_link[QLT_PLOGI_LINK_CONFLICT] = NULL; 1057 } else { 1058 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf09a, 1059 "se_sess %p / sess %p port %8phC is gone, %s (ref=%d)\n", 1060 sess->se_sess, sess, sess->port_name, 1061 own ? "releasing own PLOGI" : 1062 "no own PLOGI pending", 1063 own ? own->ref_count : -1); 1064 } 1065 1066 if (own) { 1067 sess->fw_login_state = DSC_LS_PLOGI_PEND; 1068 qlt_plogi_ack_unref(vha, own); 1069 sess->plogi_link[QLT_PLOGI_LINK_SAME_WWN] = NULL; 1070 } 1071 } 1072 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); 1073 1074 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf001, 1075 "Unregistration of sess %p %8phC finished fcp_cnt %d\n", 1076 sess, sess->port_name, vha->fcport_count); 1077 1078 if (tgt && (tgt->sess_count == 0)) 1079 wake_up_all(&tgt->waitQ); 1080 1081 if (vha->fcport_count == 0) 1082 wake_up_all(&vha->fcport_waitQ); 1083 1084 base_vha = pci_get_drvdata(ha->pdev); 1085 if (test_bit(PFLG_DRIVER_REMOVING, &base_vha->pci_flags)) 1086 return; 1087 1088 if (!tgt || !tgt->tgt_stop) { 1089 memset(&ea, 0, sizeof(ea)); 1090 ea.event = FCME_DELETE_DONE; 1091 ea.fcport = sess; 1092 qla2x00_fcport_event_handler(vha, &ea); 1093 } 1094 } 1095 1096 /* ha->tgt.sess_lock supposed to be held on entry */ 1097 void qlt_unreg_sess(struct fc_port *sess) 1098 { 1099 struct scsi_qla_host *vha = sess->vha; 1100 1101 ql_dbg(ql_dbg_disc, sess->vha, 0xffff, 1102 "%s sess %p for deletion %8phC\n", 1103 __func__, sess, sess->port_name); 1104 1105 if (sess->se_sess) 1106 vha->hw->tgt.tgt_ops->clear_nacl_from_fcport_map(sess); 1107 1108 qla2x00_mark_device_lost(vha, sess, 1, 1); 1109 1110 sess->deleted = QLA_SESS_DELETION_IN_PROGRESS; 1111 sess->disc_state = DSC_DELETE_PEND; 1112 sess->last_rscn_gen = sess->rscn_gen; 1113 sess->last_login_gen = sess->login_gen; 1114 1115 INIT_WORK(&sess->free_work, qlt_free_session_done); 1116 schedule_work(&sess->free_work); 1117 } 1118 EXPORT_SYMBOL(qlt_unreg_sess); 1119 1120 static int qlt_reset(struct scsi_qla_host *vha, void *iocb, int mcmd) 1121 { 1122 struct qla_hw_data *ha = vha->hw; 1123 struct fc_port *sess = NULL; 1124 uint16_t loop_id; 1125 int res = 0; 1126 struct imm_ntfy_from_isp *n = (struct imm_ntfy_from_isp *)iocb; 1127 unsigned long flags; 1128 1129 loop_id = le16_to_cpu(n->u.isp24.nport_handle); 1130 if (loop_id == 0xFFFF) { 1131 /* Global event */ 1132 atomic_inc(&vha->vha_tgt.qla_tgt->tgt_global_resets_count); 1133 spin_lock_irqsave(&ha->tgt.sess_lock, flags); 1134 qlt_clear_tgt_db(vha->vha_tgt.qla_tgt); 1135 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); 1136 } else { 1137 spin_lock_irqsave(&ha->tgt.sess_lock, flags); 1138 sess = ha->tgt.tgt_ops->find_sess_by_loop_id(vha, loop_id); 1139 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); 1140 } 1141 1142 ql_dbg(ql_dbg_tgt, vha, 0xe000, 1143 "Using sess for qla_tgt_reset: %p\n", sess); 1144 if (!sess) { 1145 res = -ESRCH; 1146 return res; 1147 } 1148 1149 ql_dbg(ql_dbg_tgt, vha, 0xe047, 1150 "scsi(%ld): resetting (session %p from port %8phC mcmd %x, " 1151 "loop_id %d)\n", vha->host_no, sess, sess->port_name, 1152 mcmd, loop_id); 1153 1154 return qlt_issue_task_mgmt(sess, 0, mcmd, iocb, QLA24XX_MGMT_SEND_NACK); 1155 } 1156 1157 static void qla24xx_chk_fcp_state(struct fc_port *sess) 1158 { 1159 if (sess->chip_reset != sess->vha->hw->chip_reset) { 1160 sess->logout_on_delete = 0; 1161 sess->logo_ack_needed = 0; 1162 sess->fw_login_state = DSC_LS_PORT_UNAVAIL; 1163 sess->scan_state = 0; 1164 } 1165 } 1166 1167 /* ha->tgt.sess_lock supposed to be held on entry */ 1168 void qlt_schedule_sess_for_deletion(struct fc_port *sess, 1169 bool immediate) 1170 { 1171 struct qla_tgt *tgt = sess->tgt; 1172 1173 if (sess->disc_state == DSC_DELETE_PEND) 1174 return; 1175 1176 if (sess->disc_state == DSC_DELETED) { 1177 if (tgt && tgt->tgt_stop && (tgt->sess_count == 0)) 1178 wake_up_all(&tgt->waitQ); 1179 if (sess->vha->fcport_count == 0) 1180 wake_up_all(&sess->vha->fcport_waitQ); 1181 1182 if (!sess->plogi_link[QLT_PLOGI_LINK_SAME_WWN] && 1183 !sess->plogi_link[QLT_PLOGI_LINK_CONFLICT]) 1184 return; 1185 } 1186 1187 sess->disc_state = DSC_DELETE_PEND; 1188 1189 if (sess->deleted == QLA_SESS_DELETED) 1190 sess->logout_on_delete = 0; 1191 1192 sess->deleted = QLA_SESS_DELETION_IN_PROGRESS; 1193 qla24xx_chk_fcp_state(sess); 1194 1195 ql_dbg(ql_dbg_tgt, sess->vha, 0xe001, 1196 "Scheduling sess %p for deletion\n", sess); 1197 1198 schedule_work(&sess->del_work); 1199 } 1200 1201 void qlt_schedule_sess_for_deletion_lock(struct fc_port *sess) 1202 { 1203 unsigned long flags; 1204 struct qla_hw_data *ha = sess->vha->hw; 1205 spin_lock_irqsave(&ha->tgt.sess_lock, flags); 1206 qlt_schedule_sess_for_deletion(sess, 1); 1207 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); 1208 } 1209 1210 /* ha->tgt.sess_lock supposed to be held on entry */ 1211 static void qlt_clear_tgt_db(struct qla_tgt *tgt) 1212 { 1213 struct fc_port *sess; 1214 scsi_qla_host_t *vha = tgt->vha; 1215 1216 list_for_each_entry(sess, &vha->vp_fcports, list) { 1217 if (sess->se_sess) 1218 qlt_schedule_sess_for_deletion(sess, 1); 1219 } 1220 1221 /* At this point tgt could be already dead */ 1222 } 1223 1224 static int qla24xx_get_loop_id(struct scsi_qla_host *vha, const uint8_t *s_id, 1225 uint16_t *loop_id) 1226 { 1227 struct qla_hw_data *ha = vha->hw; 1228 dma_addr_t gid_list_dma; 1229 struct gid_list_info *gid_list; 1230 char *id_iter; 1231 int res, rc, i; 1232 uint16_t entries; 1233 1234 gid_list = dma_alloc_coherent(&ha->pdev->dev, qla2x00_gid_list_size(ha), 1235 &gid_list_dma, GFP_KERNEL); 1236 if (!gid_list) { 1237 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf044, 1238 "qla_target(%d): DMA Alloc failed of %u\n", 1239 vha->vp_idx, qla2x00_gid_list_size(ha)); 1240 return -ENOMEM; 1241 } 1242 1243 /* Get list of logged in devices */ 1244 rc = qla24xx_gidlist_wait(vha, gid_list, gid_list_dma, &entries); 1245 if (rc != QLA_SUCCESS) { 1246 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf045, 1247 "qla_target(%d): get_id_list() failed: %x\n", 1248 vha->vp_idx, rc); 1249 res = -EBUSY; 1250 goto out_free_id_list; 1251 } 1252 1253 id_iter = (char *)gid_list; 1254 res = -ENOENT; 1255 for (i = 0; i < entries; i++) { 1256 struct gid_list_info *gid = (struct gid_list_info *)id_iter; 1257 if ((gid->al_pa == s_id[2]) && 1258 (gid->area == s_id[1]) && 1259 (gid->domain == s_id[0])) { 1260 *loop_id = le16_to_cpu(gid->loop_id); 1261 res = 0; 1262 break; 1263 } 1264 id_iter += ha->gid_list_info_size; 1265 } 1266 1267 out_free_id_list: 1268 dma_free_coherent(&ha->pdev->dev, qla2x00_gid_list_size(ha), 1269 gid_list, gid_list_dma); 1270 return res; 1271 } 1272 1273 /* 1274 * Adds an extra ref to allow to drop hw lock after adding sess to the list. 1275 * Caller must put it. 1276 */ 1277 static struct fc_port *qlt_create_sess( 1278 struct scsi_qla_host *vha, 1279 fc_port_t *fcport, 1280 bool local) 1281 { 1282 struct qla_hw_data *ha = vha->hw; 1283 struct fc_port *sess = fcport; 1284 unsigned long flags; 1285 1286 if (vha->vha_tgt.qla_tgt->tgt_stop) 1287 return NULL; 1288 1289 if (fcport->se_sess) { 1290 if (!kref_get_unless_zero(&sess->sess_kref)) { 1291 ql_dbg(ql_dbg_disc, vha, 0xffff, 1292 "%s: kref_get_unless_zero failed for %8phC\n", 1293 __func__, sess->port_name); 1294 return NULL; 1295 } 1296 return fcport; 1297 } 1298 sess->tgt = vha->vha_tgt.qla_tgt; 1299 sess->local = local; 1300 1301 /* 1302 * Under normal circumstances we want to logout from firmware when 1303 * session eventually ends and release corresponding nport handle. 1304 * In the exception cases (e.g. when new PLOGI is waiting) corresponding 1305 * code will adjust these flags as necessary. 1306 */ 1307 sess->logout_on_delete = 1; 1308 sess->keep_nport_handle = 0; 1309 sess->logout_completed = 0; 1310 1311 if (ha->tgt.tgt_ops->check_initiator_node_acl(vha, 1312 &fcport->port_name[0], sess) < 0) { 1313 ql_dbg(ql_dbg_tgt_mgt, vha, 0xffff, 1314 "(%d) %8phC check_initiator_node_acl failed\n", 1315 vha->vp_idx, fcport->port_name); 1316 return NULL; 1317 } else { 1318 kref_init(&fcport->sess_kref); 1319 /* 1320 * Take an extra reference to ->sess_kref here to handle 1321 * fc_port access across ->tgt.sess_lock reaquire. 1322 */ 1323 if (!kref_get_unless_zero(&sess->sess_kref)) { 1324 ql_dbg(ql_dbg_disc, vha, 0xffff, 1325 "%s: kref_get_unless_zero failed for %8phC\n", 1326 __func__, sess->port_name); 1327 return NULL; 1328 } 1329 1330 spin_lock_irqsave(&ha->tgt.sess_lock, flags); 1331 if (!IS_SW_RESV_ADDR(sess->d_id)) 1332 vha->vha_tgt.qla_tgt->sess_count++; 1333 1334 qlt_do_generation_tick(vha, &sess->generation); 1335 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); 1336 } 1337 1338 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf006, 1339 "Adding sess %p se_sess %p to tgt %p sess_count %d\n", 1340 sess, sess->se_sess, vha->vha_tgt.qla_tgt, 1341 vha->vha_tgt.qla_tgt->sess_count); 1342 1343 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04b, 1344 "qla_target(%d): %ssession for wwn %8phC (loop_id %d, " 1345 "s_id %x:%x:%x, confirmed completion %ssupported) added\n", 1346 vha->vp_idx, local ? "local " : "", fcport->port_name, 1347 fcport->loop_id, sess->d_id.b.domain, sess->d_id.b.area, 1348 sess->d_id.b.al_pa, sess->conf_compl_supported ? "" : "not "); 1349 1350 return sess; 1351 } 1352 1353 /* 1354 * max_gen - specifies maximum session generation 1355 * at which this deletion requestion is still valid 1356 */ 1357 void 1358 qlt_fc_port_deleted(struct scsi_qla_host *vha, fc_port_t *fcport, int max_gen) 1359 { 1360 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; 1361 struct fc_port *sess = fcport; 1362 unsigned long flags; 1363 1364 if (!vha->hw->tgt.tgt_ops) 1365 return; 1366 1367 if (!tgt) 1368 return; 1369 1370 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); 1371 if (tgt->tgt_stop) { 1372 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); 1373 return; 1374 } 1375 if (!sess->se_sess) { 1376 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); 1377 return; 1378 } 1379 1380 if (max_gen - sess->generation < 0) { 1381 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); 1382 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf092, 1383 "Ignoring stale deletion request for se_sess %p / sess %p" 1384 " for port %8phC, req_gen %d, sess_gen %d\n", 1385 sess->se_sess, sess, sess->port_name, max_gen, 1386 sess->generation); 1387 return; 1388 } 1389 1390 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf008, "qla_tgt_fc_port_deleted %p", sess); 1391 1392 sess->local = 1; 1393 qlt_schedule_sess_for_deletion(sess, false); 1394 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); 1395 } 1396 1397 static inline int test_tgt_sess_count(struct qla_tgt *tgt) 1398 { 1399 struct qla_hw_data *ha = tgt->ha; 1400 unsigned long flags; 1401 int res; 1402 /* 1403 * We need to protect against race, when tgt is freed before or 1404 * inside wake_up() 1405 */ 1406 spin_lock_irqsave(&ha->tgt.sess_lock, flags); 1407 ql_dbg(ql_dbg_tgt, tgt->vha, 0xe002, 1408 "tgt %p, sess_count=%d\n", 1409 tgt, tgt->sess_count); 1410 res = (tgt->sess_count == 0); 1411 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); 1412 1413 return res; 1414 } 1415 1416 /* Called by tcm_qla2xxx configfs code */ 1417 int qlt_stop_phase1(struct qla_tgt *tgt) 1418 { 1419 struct scsi_qla_host *vha = tgt->vha; 1420 struct qla_hw_data *ha = tgt->ha; 1421 unsigned long flags; 1422 1423 mutex_lock(&qla_tgt_mutex); 1424 if (!vha->fc_vport) { 1425 struct Scsi_Host *sh = vha->host; 1426 struct fc_host_attrs *fc_host = shost_to_fc_host(sh); 1427 bool npiv_vports; 1428 1429 spin_lock_irqsave(sh->host_lock, flags); 1430 npiv_vports = (fc_host->npiv_vports_inuse); 1431 spin_unlock_irqrestore(sh->host_lock, flags); 1432 1433 if (npiv_vports) { 1434 mutex_unlock(&qla_tgt_mutex); 1435 return -EPERM; 1436 } 1437 } 1438 if (tgt->tgt_stop || tgt->tgt_stopped) { 1439 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04e, 1440 "Already in tgt->tgt_stop or tgt_stopped state\n"); 1441 mutex_unlock(&qla_tgt_mutex); 1442 return -EPERM; 1443 } 1444 1445 ql_dbg(ql_dbg_tgt, vha, 0xe003, "Stopping target for host %ld(%p)\n", 1446 vha->host_no, vha); 1447 /* 1448 * Mutex needed to sync with qla_tgt_fc_port_[added,deleted]. 1449 * Lock is needed, because we still can get an incoming packet. 1450 */ 1451 mutex_lock(&vha->vha_tgt.tgt_mutex); 1452 spin_lock_irqsave(&ha->tgt.sess_lock, flags); 1453 tgt->tgt_stop = 1; 1454 qlt_clear_tgt_db(tgt); 1455 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); 1456 mutex_unlock(&vha->vha_tgt.tgt_mutex); 1457 mutex_unlock(&qla_tgt_mutex); 1458 1459 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf009, 1460 "Waiting for sess works (tgt %p)", tgt); 1461 spin_lock_irqsave(&tgt->sess_work_lock, flags); 1462 while (!list_empty(&tgt->sess_works_list)) { 1463 spin_unlock_irqrestore(&tgt->sess_work_lock, flags); 1464 flush_scheduled_work(); 1465 spin_lock_irqsave(&tgt->sess_work_lock, flags); 1466 } 1467 spin_unlock_irqrestore(&tgt->sess_work_lock, flags); 1468 1469 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00a, 1470 "Waiting for tgt %p: sess_count=%d\n", tgt, tgt->sess_count); 1471 1472 wait_event(tgt->waitQ, test_tgt_sess_count(tgt)); 1473 1474 /* Big hammer */ 1475 if (!ha->flags.host_shutting_down && 1476 (qla_tgt_mode_enabled(vha) || qla_dual_mode_enabled(vha))) 1477 qlt_disable_vha(vha); 1478 1479 /* Wait for sessions to clear out (just in case) */ 1480 wait_event(tgt->waitQ, test_tgt_sess_count(tgt)); 1481 return 0; 1482 } 1483 EXPORT_SYMBOL(qlt_stop_phase1); 1484 1485 /* Called by tcm_qla2xxx configfs code */ 1486 void qlt_stop_phase2(struct qla_tgt *tgt) 1487 { 1488 struct qla_hw_data *ha = tgt->ha; 1489 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); 1490 unsigned long flags; 1491 1492 if (tgt->tgt_stopped) { 1493 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04f, 1494 "Already in tgt->tgt_stopped state\n"); 1495 dump_stack(); 1496 return; 1497 } 1498 1499 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00b, 1500 "Waiting for %d IRQ commands to complete (tgt %p)", 1501 tgt->irq_cmd_count, tgt); 1502 1503 mutex_lock(&vha->vha_tgt.tgt_mutex); 1504 spin_lock_irqsave(&ha->hardware_lock, flags); 1505 while ((tgt->irq_cmd_count != 0) || (tgt->atio_irq_cmd_count != 0)) { 1506 spin_unlock_irqrestore(&ha->hardware_lock, flags); 1507 udelay(2); 1508 spin_lock_irqsave(&ha->hardware_lock, flags); 1509 } 1510 tgt->tgt_stop = 0; 1511 tgt->tgt_stopped = 1; 1512 spin_unlock_irqrestore(&ha->hardware_lock, flags); 1513 mutex_unlock(&vha->vha_tgt.tgt_mutex); 1514 1515 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00c, "Stop of tgt %p finished", 1516 tgt); 1517 } 1518 EXPORT_SYMBOL(qlt_stop_phase2); 1519 1520 /* Called from qlt_remove_target() -> qla2x00_remove_one() */ 1521 static void qlt_release(struct qla_tgt *tgt) 1522 { 1523 scsi_qla_host_t *vha = tgt->vha; 1524 1525 if ((vha->vha_tgt.qla_tgt != NULL) && !tgt->tgt_stopped) 1526 qlt_stop_phase2(tgt); 1527 1528 vha->vha_tgt.qla_tgt = NULL; 1529 1530 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00d, 1531 "Release of tgt %p finished\n", tgt); 1532 1533 kfree(tgt); 1534 } 1535 1536 /* ha->hardware_lock supposed to be held on entry */ 1537 static int qlt_sched_sess_work(struct qla_tgt *tgt, int type, 1538 const void *param, unsigned int param_size) 1539 { 1540 struct qla_tgt_sess_work_param *prm; 1541 unsigned long flags; 1542 1543 prm = kzalloc(sizeof(*prm), GFP_ATOMIC); 1544 if (!prm) { 1545 ql_dbg(ql_dbg_tgt_mgt, tgt->vha, 0xf050, 1546 "qla_target(%d): Unable to create session " 1547 "work, command will be refused", 0); 1548 return -ENOMEM; 1549 } 1550 1551 ql_dbg(ql_dbg_tgt_mgt, tgt->vha, 0xf00e, 1552 "Scheduling work (type %d, prm %p)" 1553 " to find session for param %p (size %d, tgt %p)\n", 1554 type, prm, param, param_size, tgt); 1555 1556 prm->type = type; 1557 memcpy(&prm->tm_iocb, param, param_size); 1558 1559 spin_lock_irqsave(&tgt->sess_work_lock, flags); 1560 list_add_tail(&prm->sess_works_list_entry, &tgt->sess_works_list); 1561 spin_unlock_irqrestore(&tgt->sess_work_lock, flags); 1562 1563 schedule_work(&tgt->sess_work); 1564 1565 return 0; 1566 } 1567 1568 /* 1569 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire 1570 */ 1571 static void qlt_send_notify_ack(struct scsi_qla_host *vha, 1572 struct imm_ntfy_from_isp *ntfy, 1573 uint32_t add_flags, uint16_t resp_code, int resp_code_valid, 1574 uint16_t srr_flags, uint16_t srr_reject_code, uint8_t srr_explan) 1575 { 1576 struct qla_hw_data *ha = vha->hw; 1577 request_t *pkt; 1578 struct nack_to_isp *nack; 1579 1580 if (!ha->flags.fw_started) 1581 return; 1582 1583 ql_dbg(ql_dbg_tgt, vha, 0xe004, "Sending NOTIFY_ACK (ha=%p)\n", ha); 1584 1585 /* Send marker if required */ 1586 if (qlt_issue_marker(vha, 1) != QLA_SUCCESS) 1587 return; 1588 1589 pkt = (request_t *)qla2x00_alloc_iocbs(vha, NULL); 1590 if (!pkt) { 1591 ql_dbg(ql_dbg_tgt, vha, 0xe049, 1592 "qla_target(%d): %s failed: unable to allocate " 1593 "request packet\n", vha->vp_idx, __func__); 1594 return; 1595 } 1596 1597 if (vha->vha_tgt.qla_tgt != NULL) 1598 vha->vha_tgt.qla_tgt->notify_ack_expected++; 1599 1600 pkt->entry_type = NOTIFY_ACK_TYPE; 1601 pkt->entry_count = 1; 1602 1603 nack = (struct nack_to_isp *)pkt; 1604 nack->ox_id = ntfy->ox_id; 1605 1606 nack->u.isp24.handle = QLA_TGT_SKIP_HANDLE; 1607 nack->u.isp24.nport_handle = ntfy->u.isp24.nport_handle; 1608 if (le16_to_cpu(ntfy->u.isp24.status) == IMM_NTFY_ELS) { 1609 nack->u.isp24.flags = ntfy->u.isp24.flags & 1610 cpu_to_le32(NOTIFY24XX_FLAGS_PUREX_IOCB); 1611 } 1612 nack->u.isp24.srr_rx_id = ntfy->u.isp24.srr_rx_id; 1613 nack->u.isp24.status = ntfy->u.isp24.status; 1614 nack->u.isp24.status_subcode = ntfy->u.isp24.status_subcode; 1615 nack->u.isp24.fw_handle = ntfy->u.isp24.fw_handle; 1616 nack->u.isp24.exchange_address = ntfy->u.isp24.exchange_address; 1617 nack->u.isp24.srr_rel_offs = ntfy->u.isp24.srr_rel_offs; 1618 nack->u.isp24.srr_ui = ntfy->u.isp24.srr_ui; 1619 nack->u.isp24.srr_flags = cpu_to_le16(srr_flags); 1620 nack->u.isp24.srr_reject_code = srr_reject_code; 1621 nack->u.isp24.srr_reject_code_expl = srr_explan; 1622 nack->u.isp24.vp_index = ntfy->u.isp24.vp_index; 1623 1624 ql_dbg(ql_dbg_tgt, vha, 0xe005, 1625 "qla_target(%d): Sending 24xx Notify Ack %d\n", 1626 vha->vp_idx, nack->u.isp24.status); 1627 1628 /* Memory Barrier */ 1629 wmb(); 1630 qla2x00_start_iocbs(vha, vha->req); 1631 } 1632 1633 /* 1634 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire 1635 */ 1636 static void qlt_24xx_send_abts_resp(struct scsi_qla_host *vha, 1637 struct abts_recv_from_24xx *abts, uint32_t status, 1638 bool ids_reversed) 1639 { 1640 struct qla_hw_data *ha = vha->hw; 1641 struct abts_resp_to_24xx *resp; 1642 uint32_t f_ctl; 1643 uint8_t *p; 1644 1645 ql_dbg(ql_dbg_tgt, vha, 0xe006, 1646 "Sending task mgmt ABTS response (ha=%p, atio=%p, status=%x\n", 1647 ha, abts, status); 1648 1649 /* Send marker if required */ 1650 if (qlt_issue_marker(vha, 1) != QLA_SUCCESS) 1651 return; 1652 1653 resp = (struct abts_resp_to_24xx *)qla2x00_alloc_iocbs_ready(vha, NULL); 1654 if (!resp) { 1655 ql_dbg(ql_dbg_tgt, vha, 0xe04a, 1656 "qla_target(%d): %s failed: unable to allocate " 1657 "request packet", vha->vp_idx, __func__); 1658 return; 1659 } 1660 1661 resp->entry_type = ABTS_RESP_24XX; 1662 resp->entry_count = 1; 1663 resp->nport_handle = abts->nport_handle; 1664 resp->vp_index = vha->vp_idx; 1665 resp->sof_type = abts->sof_type; 1666 resp->exchange_address = abts->exchange_address; 1667 resp->fcp_hdr_le = abts->fcp_hdr_le; 1668 f_ctl = cpu_to_le32(F_CTL_EXCH_CONTEXT_RESP | 1669 F_CTL_LAST_SEQ | F_CTL_END_SEQ | 1670 F_CTL_SEQ_INITIATIVE); 1671 p = (uint8_t *)&f_ctl; 1672 resp->fcp_hdr_le.f_ctl[0] = *p++; 1673 resp->fcp_hdr_le.f_ctl[1] = *p++; 1674 resp->fcp_hdr_le.f_ctl[2] = *p; 1675 if (ids_reversed) { 1676 resp->fcp_hdr_le.d_id[0] = abts->fcp_hdr_le.d_id[0]; 1677 resp->fcp_hdr_le.d_id[1] = abts->fcp_hdr_le.d_id[1]; 1678 resp->fcp_hdr_le.d_id[2] = abts->fcp_hdr_le.d_id[2]; 1679 resp->fcp_hdr_le.s_id[0] = abts->fcp_hdr_le.s_id[0]; 1680 resp->fcp_hdr_le.s_id[1] = abts->fcp_hdr_le.s_id[1]; 1681 resp->fcp_hdr_le.s_id[2] = abts->fcp_hdr_le.s_id[2]; 1682 } else { 1683 resp->fcp_hdr_le.d_id[0] = abts->fcp_hdr_le.s_id[0]; 1684 resp->fcp_hdr_le.d_id[1] = abts->fcp_hdr_le.s_id[1]; 1685 resp->fcp_hdr_le.d_id[2] = abts->fcp_hdr_le.s_id[2]; 1686 resp->fcp_hdr_le.s_id[0] = abts->fcp_hdr_le.d_id[0]; 1687 resp->fcp_hdr_le.s_id[1] = abts->fcp_hdr_le.d_id[1]; 1688 resp->fcp_hdr_le.s_id[2] = abts->fcp_hdr_le.d_id[2]; 1689 } 1690 resp->exchange_addr_to_abort = abts->exchange_addr_to_abort; 1691 if (status == FCP_TMF_CMPL) { 1692 resp->fcp_hdr_le.r_ctl = R_CTL_BASIC_LINK_SERV | R_CTL_B_ACC; 1693 resp->payload.ba_acct.seq_id_valid = SEQ_ID_INVALID; 1694 resp->payload.ba_acct.low_seq_cnt = 0x0000; 1695 resp->payload.ba_acct.high_seq_cnt = 0xFFFF; 1696 resp->payload.ba_acct.ox_id = abts->fcp_hdr_le.ox_id; 1697 resp->payload.ba_acct.rx_id = abts->fcp_hdr_le.rx_id; 1698 } else { 1699 resp->fcp_hdr_le.r_ctl = R_CTL_BASIC_LINK_SERV | R_CTL_B_RJT; 1700 resp->payload.ba_rjt.reason_code = 1701 BA_RJT_REASON_CODE_UNABLE_TO_PERFORM; 1702 /* Other bytes are zero */ 1703 } 1704 1705 vha->vha_tgt.qla_tgt->abts_resp_expected++; 1706 1707 /* Memory Barrier */ 1708 wmb(); 1709 qla2x00_start_iocbs(vha, vha->req); 1710 } 1711 1712 /* 1713 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire 1714 */ 1715 static void qlt_24xx_retry_term_exchange(struct scsi_qla_host *vha, 1716 struct abts_resp_from_24xx_fw *entry) 1717 { 1718 struct ctio7_to_24xx *ctio; 1719 1720 ql_dbg(ql_dbg_tgt, vha, 0xe007, 1721 "Sending retry TERM EXCH CTIO7 (ha=%p)\n", vha->hw); 1722 /* Send marker if required */ 1723 if (qlt_issue_marker(vha, 1) != QLA_SUCCESS) 1724 return; 1725 1726 ctio = (struct ctio7_to_24xx *)qla2x00_alloc_iocbs_ready(vha, NULL); 1727 if (ctio == NULL) { 1728 ql_dbg(ql_dbg_tgt, vha, 0xe04b, 1729 "qla_target(%d): %s failed: unable to allocate " 1730 "request packet\n", vha->vp_idx, __func__); 1731 return; 1732 } 1733 1734 /* 1735 * We've got on entrance firmware's response on by us generated 1736 * ABTS response. So, in it ID fields are reversed. 1737 */ 1738 1739 ctio->entry_type = CTIO_TYPE7; 1740 ctio->entry_count = 1; 1741 ctio->nport_handle = entry->nport_handle; 1742 ctio->handle = QLA_TGT_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK; 1743 ctio->timeout = cpu_to_le16(QLA_TGT_TIMEOUT); 1744 ctio->vp_index = vha->vp_idx; 1745 ctio->initiator_id[0] = entry->fcp_hdr_le.d_id[0]; 1746 ctio->initiator_id[1] = entry->fcp_hdr_le.d_id[1]; 1747 ctio->initiator_id[2] = entry->fcp_hdr_le.d_id[2]; 1748 ctio->exchange_addr = entry->exchange_addr_to_abort; 1749 ctio->u.status1.flags = cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1 | 1750 CTIO7_FLAGS_TERMINATE); 1751 ctio->u.status1.ox_id = cpu_to_le16(entry->fcp_hdr_le.ox_id); 1752 1753 /* Memory Barrier */ 1754 wmb(); 1755 qla2x00_start_iocbs(vha, vha->req); 1756 1757 qlt_24xx_send_abts_resp(vha, (struct abts_recv_from_24xx *)entry, 1758 FCP_TMF_CMPL, true); 1759 } 1760 1761 static int abort_cmd_for_tag(struct scsi_qla_host *vha, uint32_t tag) 1762 { 1763 struct qla_tgt_sess_op *op; 1764 struct qla_tgt_cmd *cmd; 1765 1766 spin_lock(&vha->cmd_list_lock); 1767 1768 list_for_each_entry(op, &vha->qla_sess_op_cmd_list, cmd_list) { 1769 if (tag == op->atio.u.isp24.exchange_addr) { 1770 op->aborted = true; 1771 spin_unlock(&vha->cmd_list_lock); 1772 return 1; 1773 } 1774 } 1775 1776 list_for_each_entry(op, &vha->unknown_atio_list, cmd_list) { 1777 if (tag == op->atio.u.isp24.exchange_addr) { 1778 op->aborted = true; 1779 spin_unlock(&vha->cmd_list_lock); 1780 return 1; 1781 } 1782 } 1783 1784 list_for_each_entry(cmd, &vha->qla_cmd_list, cmd_list) { 1785 if (tag == cmd->atio.u.isp24.exchange_addr) { 1786 cmd->aborted = 1; 1787 spin_unlock(&vha->cmd_list_lock); 1788 return 1; 1789 } 1790 } 1791 1792 spin_unlock(&vha->cmd_list_lock); 1793 return 0; 1794 } 1795 1796 /* drop cmds for the given lun 1797 * XXX only looks for cmds on the port through which lun reset was recieved 1798 * XXX does not go through the list of other port (which may have cmds 1799 * for the same lun) 1800 */ 1801 static void abort_cmds_for_lun(struct scsi_qla_host *vha, 1802 uint32_t lun, uint8_t *s_id) 1803 { 1804 struct qla_tgt_sess_op *op; 1805 struct qla_tgt_cmd *cmd; 1806 uint32_t key; 1807 1808 key = sid_to_key(s_id); 1809 spin_lock(&vha->cmd_list_lock); 1810 list_for_each_entry(op, &vha->qla_sess_op_cmd_list, cmd_list) { 1811 uint32_t op_key; 1812 uint32_t op_lun; 1813 1814 op_key = sid_to_key(op->atio.u.isp24.fcp_hdr.s_id); 1815 op_lun = scsilun_to_int( 1816 (struct scsi_lun *)&op->atio.u.isp24.fcp_cmnd.lun); 1817 if (op_key == key && op_lun == lun) 1818 op->aborted = true; 1819 } 1820 1821 list_for_each_entry(op, &vha->unknown_atio_list, cmd_list) { 1822 uint32_t op_key; 1823 u64 op_lun; 1824 1825 op_key = sid_to_key(op->atio.u.isp24.fcp_hdr.s_id); 1826 op_lun = scsilun_to_int( 1827 (struct scsi_lun *)&op->atio.u.isp24.fcp_cmnd.lun); 1828 if (op_key == key && op_lun == lun) 1829 op->aborted = true; 1830 } 1831 1832 list_for_each_entry(cmd, &vha->qla_cmd_list, cmd_list) { 1833 uint32_t cmd_key; 1834 uint32_t cmd_lun; 1835 1836 cmd_key = sid_to_key(cmd->atio.u.isp24.fcp_hdr.s_id); 1837 cmd_lun = scsilun_to_int( 1838 (struct scsi_lun *)&cmd->atio.u.isp24.fcp_cmnd.lun); 1839 if (cmd_key == key && cmd_lun == lun) 1840 cmd->aborted = 1; 1841 } 1842 spin_unlock(&vha->cmd_list_lock); 1843 } 1844 1845 /* ha->hardware_lock supposed to be held on entry */ 1846 static int __qlt_24xx_handle_abts(struct scsi_qla_host *vha, 1847 struct abts_recv_from_24xx *abts, struct fc_port *sess) 1848 { 1849 struct qla_hw_data *ha = vha->hw; 1850 struct se_session *se_sess = sess->se_sess; 1851 struct qla_tgt_mgmt_cmd *mcmd; 1852 struct se_cmd *se_cmd; 1853 u32 lun = 0; 1854 int rc; 1855 bool found_lun = false; 1856 unsigned long flags; 1857 1858 spin_lock_irqsave(&se_sess->sess_cmd_lock, flags); 1859 list_for_each_entry(se_cmd, &se_sess->sess_cmd_list, se_cmd_list) { 1860 struct qla_tgt_cmd *cmd = 1861 container_of(se_cmd, struct qla_tgt_cmd, se_cmd); 1862 if (se_cmd->tag == abts->exchange_addr_to_abort) { 1863 lun = cmd->unpacked_lun; 1864 found_lun = true; 1865 break; 1866 } 1867 } 1868 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); 1869 1870 /* cmd not in LIO lists, look in qla list */ 1871 if (!found_lun) { 1872 if (abort_cmd_for_tag(vha, abts->exchange_addr_to_abort)) { 1873 /* send TASK_ABORT response immediately */ 1874 qlt_24xx_send_abts_resp(vha, abts, FCP_TMF_CMPL, false); 1875 return 0; 1876 } else { 1877 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf081, 1878 "unable to find cmd in driver or LIO for tag 0x%x\n", 1879 abts->exchange_addr_to_abort); 1880 return -ENOENT; 1881 } 1882 } 1883 1884 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00f, 1885 "qla_target(%d): task abort (tag=%d)\n", 1886 vha->vp_idx, abts->exchange_addr_to_abort); 1887 1888 mcmd = mempool_alloc(qla_tgt_mgmt_cmd_mempool, GFP_ATOMIC); 1889 if (mcmd == NULL) { 1890 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf051, 1891 "qla_target(%d): %s: Allocation of ABORT cmd failed", 1892 vha->vp_idx, __func__); 1893 return -ENOMEM; 1894 } 1895 memset(mcmd, 0, sizeof(*mcmd)); 1896 1897 mcmd->sess = sess; 1898 memcpy(&mcmd->orig_iocb.abts, abts, sizeof(mcmd->orig_iocb.abts)); 1899 mcmd->reset_count = vha->hw->chip_reset; 1900 mcmd->tmr_func = QLA_TGT_ABTS; 1901 1902 rc = ha->tgt.tgt_ops->handle_tmr(mcmd, lun, mcmd->tmr_func, 1903 abts->exchange_addr_to_abort); 1904 if (rc != 0) { 1905 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf052, 1906 "qla_target(%d): tgt_ops->handle_tmr()" 1907 " failed: %d", vha->vp_idx, rc); 1908 mempool_free(mcmd, qla_tgt_mgmt_cmd_mempool); 1909 return -EFAULT; 1910 } 1911 1912 return 0; 1913 } 1914 1915 /* 1916 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire 1917 */ 1918 static void qlt_24xx_handle_abts(struct scsi_qla_host *vha, 1919 struct abts_recv_from_24xx *abts) 1920 { 1921 struct qla_hw_data *ha = vha->hw; 1922 struct fc_port *sess; 1923 uint32_t tag = abts->exchange_addr_to_abort; 1924 uint8_t s_id[3]; 1925 int rc; 1926 unsigned long flags; 1927 1928 if (le32_to_cpu(abts->fcp_hdr_le.parameter) & ABTS_PARAM_ABORT_SEQ) { 1929 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf053, 1930 "qla_target(%d): ABTS: Abort Sequence not " 1931 "supported\n", vha->vp_idx); 1932 qlt_24xx_send_abts_resp(vha, abts, FCP_TMF_REJECTED, false); 1933 return; 1934 } 1935 1936 if (tag == ATIO_EXCHANGE_ADDRESS_UNKNOWN) { 1937 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf010, 1938 "qla_target(%d): ABTS: Unknown Exchange " 1939 "Address received\n", vha->vp_idx); 1940 qlt_24xx_send_abts_resp(vha, abts, FCP_TMF_REJECTED, false); 1941 return; 1942 } 1943 1944 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf011, 1945 "qla_target(%d): task abort (s_id=%x:%x:%x, " 1946 "tag=%d, param=%x)\n", vha->vp_idx, abts->fcp_hdr_le.s_id[2], 1947 abts->fcp_hdr_le.s_id[1], abts->fcp_hdr_le.s_id[0], tag, 1948 le32_to_cpu(abts->fcp_hdr_le.parameter)); 1949 1950 s_id[0] = abts->fcp_hdr_le.s_id[2]; 1951 s_id[1] = abts->fcp_hdr_le.s_id[1]; 1952 s_id[2] = abts->fcp_hdr_le.s_id[0]; 1953 1954 spin_lock_irqsave(&ha->tgt.sess_lock, flags); 1955 sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, s_id); 1956 if (!sess) { 1957 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf012, 1958 "qla_target(%d): task abort for non-existant session\n", 1959 vha->vp_idx); 1960 rc = qlt_sched_sess_work(vha->vha_tgt.qla_tgt, 1961 QLA_TGT_SESS_WORK_ABORT, abts, sizeof(*abts)); 1962 1963 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); 1964 1965 if (rc != 0) { 1966 qlt_24xx_send_abts_resp(vha, abts, FCP_TMF_REJECTED, 1967 false); 1968 } 1969 return; 1970 } 1971 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); 1972 1973 1974 if (sess->deleted) { 1975 qlt_24xx_send_abts_resp(vha, abts, FCP_TMF_REJECTED, false); 1976 return; 1977 } 1978 1979 rc = __qlt_24xx_handle_abts(vha, abts, sess); 1980 if (rc != 0) { 1981 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf054, 1982 "qla_target(%d): __qlt_24xx_handle_abts() failed: %d\n", 1983 vha->vp_idx, rc); 1984 qlt_24xx_send_abts_resp(vha, abts, FCP_TMF_REJECTED, false); 1985 return; 1986 } 1987 } 1988 1989 /* 1990 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire 1991 */ 1992 static void qlt_24xx_send_task_mgmt_ctio(struct scsi_qla_host *ha, 1993 struct qla_tgt_mgmt_cmd *mcmd, uint32_t resp_code) 1994 { 1995 struct atio_from_isp *atio = &mcmd->orig_iocb.atio; 1996 struct ctio7_to_24xx *ctio; 1997 uint16_t temp; 1998 1999 ql_dbg(ql_dbg_tgt, ha, 0xe008, 2000 "Sending task mgmt CTIO7 (ha=%p, atio=%p, resp_code=%x\n", 2001 ha, atio, resp_code); 2002 2003 /* Send marker if required */ 2004 if (qlt_issue_marker(ha, 1) != QLA_SUCCESS) 2005 return; 2006 2007 ctio = (struct ctio7_to_24xx *)qla2x00_alloc_iocbs(ha, NULL); 2008 if (ctio == NULL) { 2009 ql_dbg(ql_dbg_tgt, ha, 0xe04c, 2010 "qla_target(%d): %s failed: unable to allocate " 2011 "request packet\n", ha->vp_idx, __func__); 2012 return; 2013 } 2014 2015 ctio->entry_type = CTIO_TYPE7; 2016 ctio->entry_count = 1; 2017 ctio->handle = QLA_TGT_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK; 2018 ctio->nport_handle = mcmd->sess->loop_id; 2019 ctio->timeout = cpu_to_le16(QLA_TGT_TIMEOUT); 2020 ctio->vp_index = ha->vp_idx; 2021 ctio->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2]; 2022 ctio->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1]; 2023 ctio->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0]; 2024 ctio->exchange_addr = atio->u.isp24.exchange_addr; 2025 ctio->u.status1.flags = (atio->u.isp24.attr << 9) | 2026 cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1 | CTIO7_FLAGS_SEND_STATUS); 2027 temp = be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id); 2028 ctio->u.status1.ox_id = cpu_to_le16(temp); 2029 ctio->u.status1.scsi_status = 2030 cpu_to_le16(SS_RESPONSE_INFO_LEN_VALID); 2031 ctio->u.status1.response_len = cpu_to_le16(8); 2032 ctio->u.status1.sense_data[0] = resp_code; 2033 2034 /* Memory Barrier */ 2035 wmb(); 2036 qla2x00_start_iocbs(ha, ha->req); 2037 } 2038 2039 void qlt_free_mcmd(struct qla_tgt_mgmt_cmd *mcmd) 2040 { 2041 mempool_free(mcmd, qla_tgt_mgmt_cmd_mempool); 2042 } 2043 EXPORT_SYMBOL(qlt_free_mcmd); 2044 2045 /* 2046 * ha->hardware_lock supposed to be held on entry. Might drop it, then 2047 * reacquire 2048 */ 2049 void qlt_send_resp_ctio(scsi_qla_host_t *vha, struct qla_tgt_cmd *cmd, 2050 uint8_t scsi_status, uint8_t sense_key, uint8_t asc, uint8_t ascq) 2051 { 2052 struct atio_from_isp *atio = &cmd->atio; 2053 struct ctio7_to_24xx *ctio; 2054 uint16_t temp; 2055 2056 ql_dbg(ql_dbg_tgt_dif, vha, 0x3066, 2057 "Sending response CTIO7 (vha=%p, atio=%p, scsi_status=%02x, " 2058 "sense_key=%02x, asc=%02x, ascq=%02x", 2059 vha, atio, scsi_status, sense_key, asc, ascq); 2060 2061 ctio = (struct ctio7_to_24xx *)qla2x00_alloc_iocbs(vha, NULL); 2062 if (!ctio) { 2063 ql_dbg(ql_dbg_async, vha, 0x3067, 2064 "qla2x00t(%ld): %s failed: unable to allocate request packet", 2065 vha->host_no, __func__); 2066 goto out; 2067 } 2068 2069 ctio->entry_type = CTIO_TYPE7; 2070 ctio->entry_count = 1; 2071 ctio->handle = QLA_TGT_SKIP_HANDLE; 2072 ctio->nport_handle = cmd->sess->loop_id; 2073 ctio->timeout = cpu_to_le16(QLA_TGT_TIMEOUT); 2074 ctio->vp_index = vha->vp_idx; 2075 ctio->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2]; 2076 ctio->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1]; 2077 ctio->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0]; 2078 ctio->exchange_addr = atio->u.isp24.exchange_addr; 2079 ctio->u.status1.flags = (atio->u.isp24.attr << 9) | 2080 cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1 | CTIO7_FLAGS_SEND_STATUS); 2081 temp = be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id); 2082 ctio->u.status1.ox_id = cpu_to_le16(temp); 2083 ctio->u.status1.scsi_status = 2084 cpu_to_le16(SS_RESPONSE_INFO_LEN_VALID | scsi_status); 2085 ctio->u.status1.response_len = cpu_to_le16(18); 2086 ctio->u.status1.residual = cpu_to_le32(get_datalen_for_atio(atio)); 2087 2088 if (ctio->u.status1.residual != 0) 2089 ctio->u.status1.scsi_status |= 2090 cpu_to_le16(SS_RESIDUAL_UNDER); 2091 2092 /* Response code and sense key */ 2093 put_unaligned_le32(((0x70 << 24) | (sense_key << 8)), 2094 (&ctio->u.status1.sense_data)[0]); 2095 /* Additional sense length */ 2096 put_unaligned_le32(0x0a, (&ctio->u.status1.sense_data)[1]); 2097 /* ASC and ASCQ */ 2098 put_unaligned_le32(((asc << 24) | (ascq << 16)), 2099 (&ctio->u.status1.sense_data)[3]); 2100 2101 /* Memory Barrier */ 2102 wmb(); 2103 2104 qla2x00_start_iocbs(vha, vha->req); 2105 out: 2106 return; 2107 } 2108 2109 /* callback from target fabric module code */ 2110 void qlt_xmit_tm_rsp(struct qla_tgt_mgmt_cmd *mcmd) 2111 { 2112 struct scsi_qla_host *vha = mcmd->sess->vha; 2113 struct qla_hw_data *ha = vha->hw; 2114 unsigned long flags; 2115 2116 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf013, 2117 "TM response mcmd (%p) status %#x state %#x", 2118 mcmd, mcmd->fc_tm_rsp, mcmd->flags); 2119 2120 spin_lock_irqsave(&ha->hardware_lock, flags); 2121 2122 if (!vha->flags.online || mcmd->reset_count != ha->chip_reset) { 2123 /* 2124 * Either the port is not online or this request was from 2125 * previous life, just abort the processing. 2126 */ 2127 ql_dbg(ql_dbg_async, vha, 0xe100, 2128 "RESET-TMR online/active/old-count/new-count = %d/%d/%d/%d.\n", 2129 vha->flags.online, qla2x00_reset_active(vha), 2130 mcmd->reset_count, ha->chip_reset); 2131 ha->tgt.tgt_ops->free_mcmd(mcmd); 2132 spin_unlock_irqrestore(&ha->hardware_lock, flags); 2133 return; 2134 } 2135 2136 if (mcmd->flags == QLA24XX_MGMT_SEND_NACK) { 2137 if (mcmd->orig_iocb.imm_ntfy.u.isp24.status_subcode == 2138 ELS_LOGO || 2139 mcmd->orig_iocb.imm_ntfy.u.isp24.status_subcode == 2140 ELS_PRLO || 2141 mcmd->orig_iocb.imm_ntfy.u.isp24.status_subcode == 2142 ELS_TPRLO) { 2143 ql_dbg(ql_dbg_disc, vha, 0xffff, 2144 "TM response logo %phC status %#x state %#x", 2145 mcmd->sess->port_name, mcmd->fc_tm_rsp, 2146 mcmd->flags); 2147 qlt_schedule_sess_for_deletion_lock(mcmd->sess); 2148 } else { 2149 qlt_send_notify_ack(vha, &mcmd->orig_iocb.imm_ntfy, 2150 0, 0, 0, 0, 0, 0); 2151 } 2152 } else { 2153 if (mcmd->orig_iocb.atio.u.raw.entry_type == ABTS_RECV_24XX) 2154 qlt_24xx_send_abts_resp(vha, &mcmd->orig_iocb.abts, 2155 mcmd->fc_tm_rsp, false); 2156 else 2157 qlt_24xx_send_task_mgmt_ctio(vha, mcmd, 2158 mcmd->fc_tm_rsp); 2159 } 2160 /* 2161 * Make the callback for ->free_mcmd() to queue_work() and invoke 2162 * target_put_sess_cmd() to drop cmd_kref to 1. The final 2163 * target_put_sess_cmd() call will be made from TFO->check_stop_free() 2164 * -> tcm_qla2xxx_check_stop_free() to release the TMR associated se_cmd 2165 * descriptor after TFO->queue_tm_rsp() -> tcm_qla2xxx_queue_tm_rsp() -> 2166 * qlt_xmit_tm_rsp() returns here.. 2167 */ 2168 ha->tgt.tgt_ops->free_mcmd(mcmd); 2169 spin_unlock_irqrestore(&ha->hardware_lock, flags); 2170 } 2171 EXPORT_SYMBOL(qlt_xmit_tm_rsp); 2172 2173 /* No locks */ 2174 static int qlt_pci_map_calc_cnt(struct qla_tgt_prm *prm) 2175 { 2176 struct qla_tgt_cmd *cmd = prm->cmd; 2177 2178 BUG_ON(cmd->sg_cnt == 0); 2179 2180 prm->sg = (struct scatterlist *)cmd->sg; 2181 prm->seg_cnt = pci_map_sg(prm->tgt->ha->pdev, cmd->sg, 2182 cmd->sg_cnt, cmd->dma_data_direction); 2183 if (unlikely(prm->seg_cnt == 0)) 2184 goto out_err; 2185 2186 prm->cmd->sg_mapped = 1; 2187 2188 if (cmd->se_cmd.prot_op == TARGET_PROT_NORMAL) { 2189 /* 2190 * If greater than four sg entries then we need to allocate 2191 * the continuation entries 2192 */ 2193 if (prm->seg_cnt > prm->tgt->datasegs_per_cmd) 2194 prm->req_cnt += DIV_ROUND_UP(prm->seg_cnt - 2195 prm->tgt->datasegs_per_cmd, 2196 prm->tgt->datasegs_per_cont); 2197 } else { 2198 /* DIF */ 2199 if ((cmd->se_cmd.prot_op == TARGET_PROT_DIN_INSERT) || 2200 (cmd->se_cmd.prot_op == TARGET_PROT_DOUT_STRIP)) { 2201 prm->seg_cnt = DIV_ROUND_UP(cmd->bufflen, cmd->blk_sz); 2202 prm->tot_dsds = prm->seg_cnt; 2203 } else 2204 prm->tot_dsds = prm->seg_cnt; 2205 2206 if (cmd->prot_sg_cnt) { 2207 prm->prot_sg = cmd->prot_sg; 2208 prm->prot_seg_cnt = pci_map_sg(prm->tgt->ha->pdev, 2209 cmd->prot_sg, cmd->prot_sg_cnt, 2210 cmd->dma_data_direction); 2211 if (unlikely(prm->prot_seg_cnt == 0)) 2212 goto out_err; 2213 2214 if ((cmd->se_cmd.prot_op == TARGET_PROT_DIN_INSERT) || 2215 (cmd->se_cmd.prot_op == TARGET_PROT_DOUT_STRIP)) { 2216 /* Dif Bundling not support here */ 2217 prm->prot_seg_cnt = DIV_ROUND_UP(cmd->bufflen, 2218 cmd->blk_sz); 2219 prm->tot_dsds += prm->prot_seg_cnt; 2220 } else 2221 prm->tot_dsds += prm->prot_seg_cnt; 2222 } 2223 } 2224 2225 return 0; 2226 2227 out_err: 2228 ql_dbg(ql_dbg_tgt, prm->cmd->vha, 0xe04d, 2229 "qla_target(%d): PCI mapping failed: sg_cnt=%d", 2230 0, prm->cmd->sg_cnt); 2231 return -1; 2232 } 2233 2234 static void qlt_unmap_sg(struct scsi_qla_host *vha, struct qla_tgt_cmd *cmd) 2235 { 2236 struct qla_hw_data *ha = vha->hw; 2237 2238 if (!cmd->sg_mapped) 2239 return; 2240 2241 pci_unmap_sg(ha->pdev, cmd->sg, cmd->sg_cnt, cmd->dma_data_direction); 2242 cmd->sg_mapped = 0; 2243 2244 if (cmd->prot_sg_cnt) 2245 pci_unmap_sg(ha->pdev, cmd->prot_sg, cmd->prot_sg_cnt, 2246 cmd->dma_data_direction); 2247 2248 if (!cmd->ctx) 2249 return; 2250 2251 if (cmd->ctx_dsd_alloced) 2252 qla2x00_clean_dsd_pool(ha, cmd->ctx); 2253 2254 dma_pool_free(ha->dl_dma_pool, cmd->ctx, cmd->ctx->crc_ctx_dma); 2255 } 2256 2257 static int qlt_check_reserve_free_req(struct scsi_qla_host *vha, 2258 uint32_t req_cnt) 2259 { 2260 uint32_t cnt, cnt_in; 2261 2262 if (vha->req->cnt < (req_cnt + 2)) { 2263 cnt = (uint16_t)RD_REG_DWORD(vha->req->req_q_out); 2264 cnt_in = (uint16_t)RD_REG_DWORD(vha->req->req_q_in); 2265 2266 if (vha->req->ring_index < cnt) 2267 vha->req->cnt = cnt - vha->req->ring_index; 2268 else 2269 vha->req->cnt = vha->req->length - 2270 (vha->req->ring_index - cnt); 2271 2272 if (unlikely(vha->req->cnt < (req_cnt + 2))) { 2273 ql_dbg(ql_dbg_io, vha, 0x305a, 2274 "qla_target(%d): There is no room in the request ring: vha->req->ring_index=%d, vha->req->cnt=%d, req_cnt=%d Req-out=%d Req-in=%d Req-Length=%d\n", 2275 vha->vp_idx, vha->req->ring_index, 2276 vha->req->cnt, req_cnt, cnt, cnt_in, 2277 vha->req->length); 2278 return -EAGAIN; 2279 } 2280 } 2281 2282 vha->req->cnt -= req_cnt; 2283 2284 return 0; 2285 } 2286 2287 /* 2288 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire 2289 */ 2290 static inline void *qlt_get_req_pkt(struct scsi_qla_host *vha) 2291 { 2292 /* Adjust ring index. */ 2293 vha->req->ring_index++; 2294 if (vha->req->ring_index == vha->req->length) { 2295 vha->req->ring_index = 0; 2296 vha->req->ring_ptr = vha->req->ring; 2297 } else { 2298 vha->req->ring_ptr++; 2299 } 2300 return (cont_entry_t *)vha->req->ring_ptr; 2301 } 2302 2303 /* ha->hardware_lock supposed to be held on entry */ 2304 static inline uint32_t qlt_make_handle(struct scsi_qla_host *vha) 2305 { 2306 struct qla_hw_data *ha = vha->hw; 2307 uint32_t h; 2308 2309 h = ha->tgt.current_handle; 2310 /* always increment cmd handle */ 2311 do { 2312 ++h; 2313 if (h > DEFAULT_OUTSTANDING_COMMANDS) 2314 h = 1; /* 0 is QLA_TGT_NULL_HANDLE */ 2315 if (h == ha->tgt.current_handle) { 2316 ql_dbg(ql_dbg_io, vha, 0x305b, 2317 "qla_target(%d): Ran out of " 2318 "empty cmd slots in ha %p\n", vha->vp_idx, ha); 2319 h = QLA_TGT_NULL_HANDLE; 2320 break; 2321 } 2322 } while ((h == QLA_TGT_NULL_HANDLE) || 2323 (h == QLA_TGT_SKIP_HANDLE) || 2324 (ha->tgt.cmds[h-1] != NULL)); 2325 2326 if (h != QLA_TGT_NULL_HANDLE) 2327 ha->tgt.current_handle = h; 2328 2329 return h; 2330 } 2331 2332 /* ha->hardware_lock supposed to be held on entry */ 2333 static int qlt_24xx_build_ctio_pkt(struct qla_tgt_prm *prm, 2334 struct scsi_qla_host *vha) 2335 { 2336 uint32_t h; 2337 struct ctio7_to_24xx *pkt; 2338 struct qla_hw_data *ha = vha->hw; 2339 struct atio_from_isp *atio = &prm->cmd->atio; 2340 uint16_t temp; 2341 2342 pkt = (struct ctio7_to_24xx *)vha->req->ring_ptr; 2343 prm->pkt = pkt; 2344 memset(pkt, 0, sizeof(*pkt)); 2345 2346 pkt->entry_type = CTIO_TYPE7; 2347 pkt->entry_count = (uint8_t)prm->req_cnt; 2348 pkt->vp_index = vha->vp_idx; 2349 2350 h = qlt_make_handle(vha); 2351 if (unlikely(h == QLA_TGT_NULL_HANDLE)) { 2352 /* 2353 * CTIO type 7 from the firmware doesn't provide a way to 2354 * know the initiator's LOOP ID, hence we can't find 2355 * the session and, so, the command. 2356 */ 2357 return -EAGAIN; 2358 } else 2359 ha->tgt.cmds[h - 1] = prm->cmd; 2360 2361 pkt->handle = h | CTIO_COMPLETION_HANDLE_MARK; 2362 pkt->nport_handle = prm->cmd->loop_id; 2363 pkt->timeout = cpu_to_le16(QLA_TGT_TIMEOUT); 2364 pkt->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2]; 2365 pkt->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1]; 2366 pkt->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0]; 2367 pkt->exchange_addr = atio->u.isp24.exchange_addr; 2368 pkt->u.status0.flags |= (atio->u.isp24.attr << 9); 2369 temp = be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id); 2370 pkt->u.status0.ox_id = cpu_to_le16(temp); 2371 pkt->u.status0.relative_offset = cpu_to_le32(prm->cmd->offset); 2372 2373 return 0; 2374 } 2375 2376 /* 2377 * ha->hardware_lock supposed to be held on entry. We have already made sure 2378 * that there is sufficient amount of request entries to not drop it. 2379 */ 2380 static void qlt_load_cont_data_segments(struct qla_tgt_prm *prm, 2381 struct scsi_qla_host *vha) 2382 { 2383 int cnt; 2384 uint32_t *dword_ptr; 2385 int enable_64bit_addressing = prm->tgt->tgt_enable_64bit_addr; 2386 2387 /* Build continuation packets */ 2388 while (prm->seg_cnt > 0) { 2389 cont_a64_entry_t *cont_pkt64 = 2390 (cont_a64_entry_t *)qlt_get_req_pkt(vha); 2391 2392 /* 2393 * Make sure that from cont_pkt64 none of 2394 * 64-bit specific fields used for 32-bit 2395 * addressing. Cast to (cont_entry_t *) for 2396 * that. 2397 */ 2398 2399 memset(cont_pkt64, 0, sizeof(*cont_pkt64)); 2400 2401 cont_pkt64->entry_count = 1; 2402 cont_pkt64->sys_define = 0; 2403 2404 if (enable_64bit_addressing) { 2405 cont_pkt64->entry_type = CONTINUE_A64_TYPE; 2406 dword_ptr = 2407 (uint32_t *)&cont_pkt64->dseg_0_address; 2408 } else { 2409 cont_pkt64->entry_type = CONTINUE_TYPE; 2410 dword_ptr = 2411 (uint32_t *)&((cont_entry_t *) 2412 cont_pkt64)->dseg_0_address; 2413 } 2414 2415 /* Load continuation entry data segments */ 2416 for (cnt = 0; 2417 cnt < prm->tgt->datasegs_per_cont && prm->seg_cnt; 2418 cnt++, prm->seg_cnt--) { 2419 *dword_ptr++ = 2420 cpu_to_le32(pci_dma_lo32 2421 (sg_dma_address(prm->sg))); 2422 if (enable_64bit_addressing) { 2423 *dword_ptr++ = 2424 cpu_to_le32(pci_dma_hi32 2425 (sg_dma_address 2426 (prm->sg))); 2427 } 2428 *dword_ptr++ = cpu_to_le32(sg_dma_len(prm->sg)); 2429 2430 prm->sg = sg_next(prm->sg); 2431 } 2432 } 2433 } 2434 2435 /* 2436 * ha->hardware_lock supposed to be held on entry. We have already made sure 2437 * that there is sufficient amount of request entries to not drop it. 2438 */ 2439 static void qlt_load_data_segments(struct qla_tgt_prm *prm, 2440 struct scsi_qla_host *vha) 2441 { 2442 int cnt; 2443 uint32_t *dword_ptr; 2444 int enable_64bit_addressing = prm->tgt->tgt_enable_64bit_addr; 2445 struct ctio7_to_24xx *pkt24 = (struct ctio7_to_24xx *)prm->pkt; 2446 2447 pkt24->u.status0.transfer_length = cpu_to_le32(prm->cmd->bufflen); 2448 2449 /* Setup packet address segment pointer */ 2450 dword_ptr = pkt24->u.status0.dseg_0_address; 2451 2452 /* Set total data segment count */ 2453 if (prm->seg_cnt) 2454 pkt24->dseg_count = cpu_to_le16(prm->seg_cnt); 2455 2456 if (prm->seg_cnt == 0) { 2457 /* No data transfer */ 2458 *dword_ptr++ = 0; 2459 *dword_ptr = 0; 2460 return; 2461 } 2462 2463 /* If scatter gather */ 2464 2465 /* Load command entry data segments */ 2466 for (cnt = 0; 2467 (cnt < prm->tgt->datasegs_per_cmd) && prm->seg_cnt; 2468 cnt++, prm->seg_cnt--) { 2469 *dword_ptr++ = 2470 cpu_to_le32(pci_dma_lo32(sg_dma_address(prm->sg))); 2471 if (enable_64bit_addressing) { 2472 *dword_ptr++ = 2473 cpu_to_le32(pci_dma_hi32( 2474 sg_dma_address(prm->sg))); 2475 } 2476 *dword_ptr++ = cpu_to_le32(sg_dma_len(prm->sg)); 2477 2478 prm->sg = sg_next(prm->sg); 2479 } 2480 2481 qlt_load_cont_data_segments(prm, vha); 2482 } 2483 2484 static inline int qlt_has_data(struct qla_tgt_cmd *cmd) 2485 { 2486 return cmd->bufflen > 0; 2487 } 2488 2489 static void qlt_print_dif_err(struct qla_tgt_prm *prm) 2490 { 2491 struct qla_tgt_cmd *cmd; 2492 struct scsi_qla_host *vha; 2493 2494 /* asc 0x10=dif error */ 2495 if (prm->sense_buffer && (prm->sense_buffer[12] == 0x10)) { 2496 cmd = prm->cmd; 2497 vha = cmd->vha; 2498 /* ASCQ */ 2499 switch (prm->sense_buffer[13]) { 2500 case 1: 2501 ql_dbg(ql_dbg_tgt_dif, vha, 0xffff, 2502 "BE detected Guard TAG ERR: lba[0x%llx|%lld] len[0x%x] " 2503 "se_cmd=%p tag[%x]", 2504 cmd->lba, cmd->lba, cmd->num_blks, &cmd->se_cmd, 2505 cmd->atio.u.isp24.exchange_addr); 2506 break; 2507 case 2: 2508 ql_dbg(ql_dbg_tgt_dif, vha, 0xffff, 2509 "BE detected APP TAG ERR: lba[0x%llx|%lld] len[0x%x] " 2510 "se_cmd=%p tag[%x]", 2511 cmd->lba, cmd->lba, cmd->num_blks, &cmd->se_cmd, 2512 cmd->atio.u.isp24.exchange_addr); 2513 break; 2514 case 3: 2515 ql_dbg(ql_dbg_tgt_dif, vha, 0xffff, 2516 "BE detected REF TAG ERR: lba[0x%llx|%lld] len[0x%x] " 2517 "se_cmd=%p tag[%x]", 2518 cmd->lba, cmd->lba, cmd->num_blks, &cmd->se_cmd, 2519 cmd->atio.u.isp24.exchange_addr); 2520 break; 2521 default: 2522 ql_dbg(ql_dbg_tgt_dif, vha, 0xffff, 2523 "BE detected Dif ERR: lba[%llx|%lld] len[%x] " 2524 "se_cmd=%p tag[%x]", 2525 cmd->lba, cmd->lba, cmd->num_blks, &cmd->se_cmd, 2526 cmd->atio.u.isp24.exchange_addr); 2527 break; 2528 } 2529 ql_dump_buffer(ql_dbg_tgt_dif, vha, 0xffff, cmd->cdb, 16); 2530 } 2531 } 2532 2533 /* 2534 * Called without ha->hardware_lock held 2535 */ 2536 static int qlt_pre_xmit_response(struct qla_tgt_cmd *cmd, 2537 struct qla_tgt_prm *prm, int xmit_type, uint8_t scsi_status, 2538 uint32_t *full_req_cnt) 2539 { 2540 struct qla_tgt *tgt = cmd->tgt; 2541 struct scsi_qla_host *vha = tgt->vha; 2542 struct qla_hw_data *ha = vha->hw; 2543 struct se_cmd *se_cmd = &cmd->se_cmd; 2544 2545 prm->cmd = cmd; 2546 prm->tgt = tgt; 2547 prm->rq_result = scsi_status; 2548 prm->sense_buffer = &cmd->sense_buffer[0]; 2549 prm->sense_buffer_len = TRANSPORT_SENSE_BUFFER; 2550 prm->sg = NULL; 2551 prm->seg_cnt = -1; 2552 prm->req_cnt = 1; 2553 prm->add_status_pkt = 0; 2554 2555 /* Send marker if required */ 2556 if (qlt_issue_marker(vha, 0) != QLA_SUCCESS) 2557 return -EFAULT; 2558 2559 if ((xmit_type & QLA_TGT_XMIT_DATA) && qlt_has_data(cmd)) { 2560 if (qlt_pci_map_calc_cnt(prm) != 0) 2561 return -EAGAIN; 2562 } 2563 2564 *full_req_cnt = prm->req_cnt; 2565 2566 if (se_cmd->se_cmd_flags & SCF_UNDERFLOW_BIT) { 2567 prm->residual = se_cmd->residual_count; 2568 ql_dbg(ql_dbg_io + ql_dbg_verbose, vha, 0x305c, 2569 "Residual underflow: %d (tag %lld, op %x, bufflen %d, rq_result %x)\n", 2570 prm->residual, se_cmd->tag, 2571 se_cmd->t_task_cdb ? se_cmd->t_task_cdb[0] : 0, 2572 cmd->bufflen, prm->rq_result); 2573 prm->rq_result |= SS_RESIDUAL_UNDER; 2574 } else if (se_cmd->se_cmd_flags & SCF_OVERFLOW_BIT) { 2575 prm->residual = se_cmd->residual_count; 2576 ql_dbg(ql_dbg_io, vha, 0x305d, 2577 "Residual overflow: %d (tag %lld, op %x, bufflen %d, rq_result %x)\n", 2578 prm->residual, se_cmd->tag, se_cmd->t_task_cdb ? 2579 se_cmd->t_task_cdb[0] : 0, cmd->bufflen, prm->rq_result); 2580 prm->rq_result |= SS_RESIDUAL_OVER; 2581 } 2582 2583 if (xmit_type & QLA_TGT_XMIT_STATUS) { 2584 /* 2585 * If QLA_TGT_XMIT_DATA is not set, add_status_pkt will be 2586 * ignored in *xmit_response() below 2587 */ 2588 if (qlt_has_data(cmd)) { 2589 if (QLA_TGT_SENSE_VALID(prm->sense_buffer) || 2590 (IS_FWI2_CAPABLE(ha) && 2591 (prm->rq_result != 0))) { 2592 prm->add_status_pkt = 1; 2593 (*full_req_cnt)++; 2594 } 2595 } 2596 } 2597 2598 return 0; 2599 } 2600 2601 static inline int qlt_need_explicit_conf(struct qla_hw_data *ha, 2602 struct qla_tgt_cmd *cmd, int sending_sense) 2603 { 2604 if (ha->tgt.enable_class_2) 2605 return 0; 2606 2607 if (sending_sense) 2608 return cmd->conf_compl_supported; 2609 else 2610 return ha->tgt.enable_explicit_conf && 2611 cmd->conf_compl_supported; 2612 } 2613 2614 static void qlt_24xx_init_ctio_to_isp(struct ctio7_to_24xx *ctio, 2615 struct qla_tgt_prm *prm) 2616 { 2617 prm->sense_buffer_len = min_t(uint32_t, prm->sense_buffer_len, 2618 (uint32_t)sizeof(ctio->u.status1.sense_data)); 2619 ctio->u.status0.flags |= cpu_to_le16(CTIO7_FLAGS_SEND_STATUS); 2620 if (qlt_need_explicit_conf(prm->tgt->ha, prm->cmd, 0)) { 2621 ctio->u.status0.flags |= cpu_to_le16( 2622 CTIO7_FLAGS_EXPLICIT_CONFORM | 2623 CTIO7_FLAGS_CONFORM_REQ); 2624 } 2625 ctio->u.status0.residual = cpu_to_le32(prm->residual); 2626 ctio->u.status0.scsi_status = cpu_to_le16(prm->rq_result); 2627 if (QLA_TGT_SENSE_VALID(prm->sense_buffer)) { 2628 int i; 2629 2630 if (qlt_need_explicit_conf(prm->tgt->ha, prm->cmd, 1)) { 2631 if ((prm->rq_result & SS_SCSI_STATUS_BYTE) != 0) { 2632 ql_dbg(ql_dbg_tgt, prm->cmd->vha, 0xe017, 2633 "Skipping EXPLICIT_CONFORM and " 2634 "CTIO7_FLAGS_CONFORM_REQ for FCP READ w/ " 2635 "non GOOD status\n"); 2636 goto skip_explict_conf; 2637 } 2638 ctio->u.status1.flags |= cpu_to_le16( 2639 CTIO7_FLAGS_EXPLICIT_CONFORM | 2640 CTIO7_FLAGS_CONFORM_REQ); 2641 } 2642 skip_explict_conf: 2643 ctio->u.status1.flags &= 2644 ~cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_0); 2645 ctio->u.status1.flags |= 2646 cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1); 2647 ctio->u.status1.scsi_status |= 2648 cpu_to_le16(SS_SENSE_LEN_VALID); 2649 ctio->u.status1.sense_length = 2650 cpu_to_le16(prm->sense_buffer_len); 2651 for (i = 0; i < prm->sense_buffer_len/4; i++) 2652 ((uint32_t *)ctio->u.status1.sense_data)[i] = 2653 cpu_to_be32(((uint32_t *)prm->sense_buffer)[i]); 2654 2655 qlt_print_dif_err(prm); 2656 2657 } else { 2658 ctio->u.status1.flags &= 2659 ~cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_0); 2660 ctio->u.status1.flags |= 2661 cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1); 2662 ctio->u.status1.sense_length = 0; 2663 memset(ctio->u.status1.sense_data, 0, 2664 sizeof(ctio->u.status1.sense_data)); 2665 } 2666 2667 /* Sense with len > 24, is it possible ??? */ 2668 } 2669 2670 static inline int 2671 qlt_hba_err_chk_enabled(struct se_cmd *se_cmd) 2672 { 2673 switch (se_cmd->prot_op) { 2674 case TARGET_PROT_DOUT_INSERT: 2675 case TARGET_PROT_DIN_STRIP: 2676 if (ql2xenablehba_err_chk >= 1) 2677 return 1; 2678 break; 2679 case TARGET_PROT_DOUT_PASS: 2680 case TARGET_PROT_DIN_PASS: 2681 if (ql2xenablehba_err_chk >= 2) 2682 return 1; 2683 break; 2684 case TARGET_PROT_DIN_INSERT: 2685 case TARGET_PROT_DOUT_STRIP: 2686 return 1; 2687 default: 2688 break; 2689 } 2690 return 0; 2691 } 2692 2693 static inline int 2694 qla_tgt_ref_mask_check(struct se_cmd *se_cmd) 2695 { 2696 switch (se_cmd->prot_op) { 2697 case TARGET_PROT_DIN_INSERT: 2698 case TARGET_PROT_DOUT_INSERT: 2699 case TARGET_PROT_DIN_STRIP: 2700 case TARGET_PROT_DOUT_STRIP: 2701 case TARGET_PROT_DIN_PASS: 2702 case TARGET_PROT_DOUT_PASS: 2703 return 1; 2704 default: 2705 return 0; 2706 } 2707 return 0; 2708 } 2709 2710 /* 2711 * qla_tgt_set_dif_tags - Extract Ref and App tags from SCSI command 2712 */ 2713 static void 2714 qla_tgt_set_dif_tags(struct qla_tgt_cmd *cmd, struct crc_context *ctx, 2715 uint16_t *pfw_prot_opts) 2716 { 2717 struct se_cmd *se_cmd = &cmd->se_cmd; 2718 uint32_t lba = 0xffffffff & se_cmd->t_task_lba; 2719 scsi_qla_host_t *vha = cmd->tgt->vha; 2720 struct qla_hw_data *ha = vha->hw; 2721 uint32_t t32 = 0; 2722 2723 /* 2724 * wait till Mode Sense/Select cmd, modepage Ah, subpage 2 2725 * have been immplemented by TCM, before AppTag is avail. 2726 * Look for modesense_handlers[] 2727 */ 2728 ctx->app_tag = 0; 2729 ctx->app_tag_mask[0] = 0x0; 2730 ctx->app_tag_mask[1] = 0x0; 2731 2732 if (IS_PI_UNINIT_CAPABLE(ha)) { 2733 if ((se_cmd->prot_type == TARGET_DIF_TYPE1_PROT) || 2734 (se_cmd->prot_type == TARGET_DIF_TYPE2_PROT)) 2735 *pfw_prot_opts |= PO_DIS_VALD_APP_ESC; 2736 else if (se_cmd->prot_type == TARGET_DIF_TYPE3_PROT) 2737 *pfw_prot_opts |= PO_DIS_VALD_APP_REF_ESC; 2738 } 2739 2740 t32 = ha->tgt.tgt_ops->get_dif_tags(cmd, pfw_prot_opts); 2741 2742 switch (se_cmd->prot_type) { 2743 case TARGET_DIF_TYPE0_PROT: 2744 /* 2745 * No check for ql2xenablehba_err_chk, as it 2746 * would be an I/O error if hba tag generation 2747 * is not done. 2748 */ 2749 ctx->ref_tag = cpu_to_le32(lba); 2750 /* enable ALL bytes of the ref tag */ 2751 ctx->ref_tag_mask[0] = 0xff; 2752 ctx->ref_tag_mask[1] = 0xff; 2753 ctx->ref_tag_mask[2] = 0xff; 2754 ctx->ref_tag_mask[3] = 0xff; 2755 break; 2756 case TARGET_DIF_TYPE1_PROT: 2757 /* 2758 * For TYPE 1 protection: 16 bit GUARD tag, 32 bit 2759 * REF tag, and 16 bit app tag. 2760 */ 2761 ctx->ref_tag = cpu_to_le32(lba); 2762 if (!qla_tgt_ref_mask_check(se_cmd) || 2763 !(ha->tgt.tgt_ops->chk_dif_tags(t32))) { 2764 *pfw_prot_opts |= PO_DIS_REF_TAG_VALD; 2765 break; 2766 } 2767 /* enable ALL bytes of the ref tag */ 2768 ctx->ref_tag_mask[0] = 0xff; 2769 ctx->ref_tag_mask[1] = 0xff; 2770 ctx->ref_tag_mask[2] = 0xff; 2771 ctx->ref_tag_mask[3] = 0xff; 2772 break; 2773 case TARGET_DIF_TYPE2_PROT: 2774 /* 2775 * For TYPE 2 protection: 16 bit GUARD + 32 bit REF 2776 * tag has to match LBA in CDB + N 2777 */ 2778 ctx->ref_tag = cpu_to_le32(lba); 2779 if (!qla_tgt_ref_mask_check(se_cmd) || 2780 !(ha->tgt.tgt_ops->chk_dif_tags(t32))) { 2781 *pfw_prot_opts |= PO_DIS_REF_TAG_VALD; 2782 break; 2783 } 2784 /* enable ALL bytes of the ref tag */ 2785 ctx->ref_tag_mask[0] = 0xff; 2786 ctx->ref_tag_mask[1] = 0xff; 2787 ctx->ref_tag_mask[2] = 0xff; 2788 ctx->ref_tag_mask[3] = 0xff; 2789 break; 2790 case TARGET_DIF_TYPE3_PROT: 2791 /* For TYPE 3 protection: 16 bit GUARD only */ 2792 *pfw_prot_opts |= PO_DIS_REF_TAG_VALD; 2793 ctx->ref_tag_mask[0] = ctx->ref_tag_mask[1] = 2794 ctx->ref_tag_mask[2] = ctx->ref_tag_mask[3] = 0x00; 2795 break; 2796 } 2797 } 2798 2799 static inline int 2800 qlt_build_ctio_crc2_pkt(struct qla_tgt_prm *prm, scsi_qla_host_t *vha) 2801 { 2802 uint32_t *cur_dsd; 2803 uint32_t transfer_length = 0; 2804 uint32_t data_bytes; 2805 uint32_t dif_bytes; 2806 uint8_t bundling = 1; 2807 uint8_t *clr_ptr; 2808 struct crc_context *crc_ctx_pkt = NULL; 2809 struct qla_hw_data *ha; 2810 struct ctio_crc2_to_fw *pkt; 2811 dma_addr_t crc_ctx_dma; 2812 uint16_t fw_prot_opts = 0; 2813 struct qla_tgt_cmd *cmd = prm->cmd; 2814 struct se_cmd *se_cmd = &cmd->se_cmd; 2815 uint32_t h; 2816 struct atio_from_isp *atio = &prm->cmd->atio; 2817 struct qla_tc_param tc; 2818 uint16_t t16; 2819 2820 ha = vha->hw; 2821 2822 pkt = (struct ctio_crc2_to_fw *)vha->req->ring_ptr; 2823 prm->pkt = pkt; 2824 memset(pkt, 0, sizeof(*pkt)); 2825 2826 ql_dbg(ql_dbg_tgt, vha, 0xe071, 2827 "qla_target(%d):%s: se_cmd[%p] CRC2 prot_op[0x%x] cmd prot sg:cnt[%p:%x] lba[%llu]\n", 2828 vha->vp_idx, __func__, se_cmd, se_cmd->prot_op, 2829 prm->prot_sg, prm->prot_seg_cnt, se_cmd->t_task_lba); 2830 2831 if ((se_cmd->prot_op == TARGET_PROT_DIN_INSERT) || 2832 (se_cmd->prot_op == TARGET_PROT_DOUT_STRIP)) 2833 bundling = 0; 2834 2835 /* Compute dif len and adjust data len to incude protection */ 2836 data_bytes = cmd->bufflen; 2837 dif_bytes = (data_bytes / cmd->blk_sz) * 8; 2838 2839 switch (se_cmd->prot_op) { 2840 case TARGET_PROT_DIN_INSERT: 2841 case TARGET_PROT_DOUT_STRIP: 2842 transfer_length = data_bytes; 2843 if (cmd->prot_sg_cnt) 2844 data_bytes += dif_bytes; 2845 break; 2846 case TARGET_PROT_DIN_STRIP: 2847 case TARGET_PROT_DOUT_INSERT: 2848 case TARGET_PROT_DIN_PASS: 2849 case TARGET_PROT_DOUT_PASS: 2850 transfer_length = data_bytes + dif_bytes; 2851 break; 2852 default: 2853 BUG(); 2854 break; 2855 } 2856 2857 if (!qlt_hba_err_chk_enabled(se_cmd)) 2858 fw_prot_opts |= 0x10; /* Disable Guard tag checking */ 2859 /* HBA error checking enabled */ 2860 else if (IS_PI_UNINIT_CAPABLE(ha)) { 2861 if ((se_cmd->prot_type == TARGET_DIF_TYPE1_PROT) || 2862 (se_cmd->prot_type == TARGET_DIF_TYPE2_PROT)) 2863 fw_prot_opts |= PO_DIS_VALD_APP_ESC; 2864 else if (se_cmd->prot_type == TARGET_DIF_TYPE3_PROT) 2865 fw_prot_opts |= PO_DIS_VALD_APP_REF_ESC; 2866 } 2867 2868 switch (se_cmd->prot_op) { 2869 case TARGET_PROT_DIN_INSERT: 2870 case TARGET_PROT_DOUT_INSERT: 2871 fw_prot_opts |= PO_MODE_DIF_INSERT; 2872 break; 2873 case TARGET_PROT_DIN_STRIP: 2874 case TARGET_PROT_DOUT_STRIP: 2875 fw_prot_opts |= PO_MODE_DIF_REMOVE; 2876 break; 2877 case TARGET_PROT_DIN_PASS: 2878 case TARGET_PROT_DOUT_PASS: 2879 fw_prot_opts |= PO_MODE_DIF_PASS; 2880 /* FUTURE: does tcm require T10CRC<->IPCKSUM conversion? */ 2881 break; 2882 default:/* Normal Request */ 2883 fw_prot_opts |= PO_MODE_DIF_PASS; 2884 break; 2885 } 2886 2887 /* ---- PKT ---- */ 2888 /* Update entry type to indicate Command Type CRC_2 IOCB */ 2889 pkt->entry_type = CTIO_CRC2; 2890 pkt->entry_count = 1; 2891 pkt->vp_index = vha->vp_idx; 2892 2893 h = qlt_make_handle(vha); 2894 if (unlikely(h == QLA_TGT_NULL_HANDLE)) { 2895 /* 2896 * CTIO type 7 from the firmware doesn't provide a way to 2897 * know the initiator's LOOP ID, hence we can't find 2898 * the session and, so, the command. 2899 */ 2900 return -EAGAIN; 2901 } else 2902 ha->tgt.cmds[h-1] = prm->cmd; 2903 2904 pkt->handle = h | CTIO_COMPLETION_HANDLE_MARK; 2905 pkt->nport_handle = cpu_to_le16(prm->cmd->loop_id); 2906 pkt->timeout = cpu_to_le16(QLA_TGT_TIMEOUT); 2907 pkt->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2]; 2908 pkt->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1]; 2909 pkt->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0]; 2910 pkt->exchange_addr = atio->u.isp24.exchange_addr; 2911 2912 /* silence compile warning */ 2913 t16 = be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id); 2914 pkt->ox_id = cpu_to_le16(t16); 2915 2916 t16 = (atio->u.isp24.attr << 9); 2917 pkt->flags |= cpu_to_le16(t16); 2918 pkt->relative_offset = cpu_to_le32(prm->cmd->offset); 2919 2920 /* Set transfer direction */ 2921 if (cmd->dma_data_direction == DMA_TO_DEVICE) 2922 pkt->flags = cpu_to_le16(CTIO7_FLAGS_DATA_IN); 2923 else if (cmd->dma_data_direction == DMA_FROM_DEVICE) 2924 pkt->flags = cpu_to_le16(CTIO7_FLAGS_DATA_OUT); 2925 2926 pkt->dseg_count = prm->tot_dsds; 2927 /* Fibre channel byte count */ 2928 pkt->transfer_length = cpu_to_le32(transfer_length); 2929 2930 /* ----- CRC context -------- */ 2931 2932 /* Allocate CRC context from global pool */ 2933 crc_ctx_pkt = cmd->ctx = 2934 dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC, &crc_ctx_dma); 2935 2936 if (!crc_ctx_pkt) 2937 goto crc_queuing_error; 2938 2939 /* Zero out CTX area. */ 2940 clr_ptr = (uint8_t *)crc_ctx_pkt; 2941 memset(clr_ptr, 0, sizeof(*crc_ctx_pkt)); 2942 2943 crc_ctx_pkt->crc_ctx_dma = crc_ctx_dma; 2944 INIT_LIST_HEAD(&crc_ctx_pkt->dsd_list); 2945 2946 /* Set handle */ 2947 crc_ctx_pkt->handle = pkt->handle; 2948 2949 qla_tgt_set_dif_tags(cmd, crc_ctx_pkt, &fw_prot_opts); 2950 2951 pkt->crc_context_address[0] = cpu_to_le32(LSD(crc_ctx_dma)); 2952 pkt->crc_context_address[1] = cpu_to_le32(MSD(crc_ctx_dma)); 2953 pkt->crc_context_len = CRC_CONTEXT_LEN_FW; 2954 2955 if (!bundling) { 2956 cur_dsd = (uint32_t *) &crc_ctx_pkt->u.nobundling.data_address; 2957 } else { 2958 /* 2959 * Configure Bundling if we need to fetch interlaving 2960 * protection PCI accesses 2961 */ 2962 fw_prot_opts |= PO_ENABLE_DIF_BUNDLING; 2963 crc_ctx_pkt->u.bundling.dif_byte_count = cpu_to_le32(dif_bytes); 2964 crc_ctx_pkt->u.bundling.dseg_count = 2965 cpu_to_le16(prm->tot_dsds - prm->prot_seg_cnt); 2966 cur_dsd = (uint32_t *) &crc_ctx_pkt->u.bundling.data_address; 2967 } 2968 2969 /* Finish the common fields of CRC pkt */ 2970 crc_ctx_pkt->blk_size = cpu_to_le16(cmd->blk_sz); 2971 crc_ctx_pkt->prot_opts = cpu_to_le16(fw_prot_opts); 2972 crc_ctx_pkt->byte_count = cpu_to_le32(data_bytes); 2973 crc_ctx_pkt->guard_seed = cpu_to_le16(0); 2974 2975 memset((uint8_t *)&tc, 0 , sizeof(tc)); 2976 tc.vha = vha; 2977 tc.blk_sz = cmd->blk_sz; 2978 tc.bufflen = cmd->bufflen; 2979 tc.sg = cmd->sg; 2980 tc.prot_sg = cmd->prot_sg; 2981 tc.ctx = crc_ctx_pkt; 2982 tc.ctx_dsd_alloced = &cmd->ctx_dsd_alloced; 2983 2984 /* Walks data segments */ 2985 pkt->flags |= cpu_to_le16(CTIO7_FLAGS_DSD_PTR); 2986 2987 if (!bundling && prm->prot_seg_cnt) { 2988 if (qla24xx_walk_and_build_sglist_no_difb(ha, NULL, cur_dsd, 2989 prm->tot_dsds, &tc)) 2990 goto crc_queuing_error; 2991 } else if (qla24xx_walk_and_build_sglist(ha, NULL, cur_dsd, 2992 (prm->tot_dsds - prm->prot_seg_cnt), &tc)) 2993 goto crc_queuing_error; 2994 2995 if (bundling && prm->prot_seg_cnt) { 2996 /* Walks dif segments */ 2997 pkt->add_flags |= CTIO_CRC2_AF_DIF_DSD_ENA; 2998 2999 cur_dsd = (uint32_t *) &crc_ctx_pkt->u.bundling.dif_address; 3000 if (qla24xx_walk_and_build_prot_sglist(ha, NULL, cur_dsd, 3001 prm->prot_seg_cnt, &tc)) 3002 goto crc_queuing_error; 3003 } 3004 return QLA_SUCCESS; 3005 3006 crc_queuing_error: 3007 /* Cleanup will be performed by the caller */ 3008 vha->hw->tgt.cmds[h - 1] = NULL; 3009 3010 return QLA_FUNCTION_FAILED; 3011 } 3012 3013 /* 3014 * Callback to setup response of xmit_type of QLA_TGT_XMIT_DATA and * 3015 * QLA_TGT_XMIT_STATUS for >= 24xx silicon 3016 */ 3017 int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type, 3018 uint8_t scsi_status) 3019 { 3020 struct scsi_qla_host *vha = cmd->vha; 3021 struct qla_hw_data *ha = vha->hw; 3022 struct ctio7_to_24xx *pkt; 3023 struct qla_tgt_prm prm; 3024 uint32_t full_req_cnt = 0; 3025 unsigned long flags = 0; 3026 int res; 3027 3028 spin_lock_irqsave(&ha->hardware_lock, flags); 3029 if (cmd->sess && cmd->sess->deleted) { 3030 cmd->state = QLA_TGT_STATE_PROCESSED; 3031 if (cmd->sess->logout_completed) 3032 /* no need to terminate. FW already freed exchange. */ 3033 qlt_abort_cmd_on_host_reset(cmd->vha, cmd); 3034 else 3035 qlt_send_term_exchange(vha, cmd, &cmd->atio, 1, 0); 3036 spin_unlock_irqrestore(&ha->hardware_lock, flags); 3037 return 0; 3038 } 3039 spin_unlock_irqrestore(&ha->hardware_lock, flags); 3040 3041 memset(&prm, 0, sizeof(prm)); 3042 3043 ql_dbg(ql_dbg_tgt, cmd->vha, 0xe018, 3044 "is_send_status=%d, cmd->bufflen=%d, cmd->sg_cnt=%d, cmd->dma_data_direction=%d se_cmd[%p]\n", 3045 (xmit_type & QLA_TGT_XMIT_STATUS) ? 3046 1 : 0, cmd->bufflen, cmd->sg_cnt, cmd->dma_data_direction, 3047 &cmd->se_cmd); 3048 3049 res = qlt_pre_xmit_response(cmd, &prm, xmit_type, scsi_status, 3050 &full_req_cnt); 3051 if (unlikely(res != 0)) { 3052 return res; 3053 } 3054 3055 spin_lock_irqsave(&ha->hardware_lock, flags); 3056 3057 if (xmit_type == QLA_TGT_XMIT_STATUS) 3058 vha->tgt_counters.core_qla_snd_status++; 3059 else 3060 vha->tgt_counters.core_qla_que_buf++; 3061 3062 if (!ha->flags.fw_started || cmd->reset_count != ha->chip_reset) { 3063 /* 3064 * Either the port is not online or this request was from 3065 * previous life, just abort the processing. 3066 */ 3067 cmd->state = QLA_TGT_STATE_PROCESSED; 3068 qlt_abort_cmd_on_host_reset(cmd->vha, cmd); 3069 ql_dbg(ql_dbg_async, vha, 0xe101, 3070 "RESET-RSP online/active/old-count/new-count = %d/%d/%d/%d.\n", 3071 vha->flags.online, qla2x00_reset_active(vha), 3072 cmd->reset_count, ha->chip_reset); 3073 spin_unlock_irqrestore(&ha->hardware_lock, flags); 3074 return 0; 3075 } 3076 3077 /* Does F/W have an IOCBs for this request */ 3078 res = qlt_check_reserve_free_req(vha, full_req_cnt); 3079 if (unlikely(res)) 3080 goto out_unmap_unlock; 3081 3082 if (cmd->se_cmd.prot_op && (xmit_type & QLA_TGT_XMIT_DATA)) 3083 res = qlt_build_ctio_crc2_pkt(&prm, vha); 3084 else 3085 res = qlt_24xx_build_ctio_pkt(&prm, vha); 3086 if (unlikely(res != 0)) { 3087 vha->req->cnt += full_req_cnt; 3088 goto out_unmap_unlock; 3089 } 3090 3091 pkt = (struct ctio7_to_24xx *)prm.pkt; 3092 3093 if (qlt_has_data(cmd) && (xmit_type & QLA_TGT_XMIT_DATA)) { 3094 pkt->u.status0.flags |= 3095 cpu_to_le16(CTIO7_FLAGS_DATA_IN | 3096 CTIO7_FLAGS_STATUS_MODE_0); 3097 3098 if (cmd->se_cmd.prot_op == TARGET_PROT_NORMAL) 3099 qlt_load_data_segments(&prm, vha); 3100 3101 if (prm.add_status_pkt == 0) { 3102 if (xmit_type & QLA_TGT_XMIT_STATUS) { 3103 pkt->u.status0.scsi_status = 3104 cpu_to_le16(prm.rq_result); 3105 pkt->u.status0.residual = 3106 cpu_to_le32(prm.residual); 3107 pkt->u.status0.flags |= cpu_to_le16( 3108 CTIO7_FLAGS_SEND_STATUS); 3109 if (qlt_need_explicit_conf(ha, cmd, 0)) { 3110 pkt->u.status0.flags |= 3111 cpu_to_le16( 3112 CTIO7_FLAGS_EXPLICIT_CONFORM | 3113 CTIO7_FLAGS_CONFORM_REQ); 3114 } 3115 } 3116 3117 } else { 3118 /* 3119 * We have already made sure that there is sufficient 3120 * amount of request entries to not drop HW lock in 3121 * req_pkt(). 3122 */ 3123 struct ctio7_to_24xx *ctio = 3124 (struct ctio7_to_24xx *)qlt_get_req_pkt(vha); 3125 3126 ql_dbg(ql_dbg_io, vha, 0x305e, 3127 "Building additional status packet 0x%p.\n", 3128 ctio); 3129 3130 /* 3131 * T10Dif: ctio_crc2_to_fw overlay ontop of 3132 * ctio7_to_24xx 3133 */ 3134 memcpy(ctio, pkt, sizeof(*ctio)); 3135 /* reset back to CTIO7 */ 3136 ctio->entry_count = 1; 3137 ctio->entry_type = CTIO_TYPE7; 3138 ctio->dseg_count = 0; 3139 ctio->u.status1.flags &= ~cpu_to_le16( 3140 CTIO7_FLAGS_DATA_IN); 3141 3142 /* Real finish is ctio_m1's finish */ 3143 pkt->handle |= CTIO_INTERMEDIATE_HANDLE_MARK; 3144 pkt->u.status0.flags |= cpu_to_le16( 3145 CTIO7_FLAGS_DONT_RET_CTIO); 3146 3147 /* qlt_24xx_init_ctio_to_isp will correct 3148 * all neccessary fields that's part of CTIO7. 3149 * There should be no residual of CTIO-CRC2 data. 3150 */ 3151 qlt_24xx_init_ctio_to_isp((struct ctio7_to_24xx *)ctio, 3152 &prm); 3153 pr_debug("Status CTIO7: %p\n", ctio); 3154 } 3155 } else 3156 qlt_24xx_init_ctio_to_isp(pkt, &prm); 3157 3158 3159 cmd->state = QLA_TGT_STATE_PROCESSED; /* Mid-level is done processing */ 3160 cmd->cmd_sent_to_fw = 1; 3161 3162 /* Memory Barrier */ 3163 wmb(); 3164 qla2x00_start_iocbs(vha, vha->req); 3165 spin_unlock_irqrestore(&ha->hardware_lock, flags); 3166 3167 return 0; 3168 3169 out_unmap_unlock: 3170 qlt_unmap_sg(vha, cmd); 3171 spin_unlock_irqrestore(&ha->hardware_lock, flags); 3172 3173 return res; 3174 } 3175 EXPORT_SYMBOL(qlt_xmit_response); 3176 3177 int qlt_rdy_to_xfer(struct qla_tgt_cmd *cmd) 3178 { 3179 struct ctio7_to_24xx *pkt; 3180 struct scsi_qla_host *vha = cmd->vha; 3181 struct qla_hw_data *ha = vha->hw; 3182 struct qla_tgt *tgt = cmd->tgt; 3183 struct qla_tgt_prm prm; 3184 unsigned long flags; 3185 int res = 0; 3186 3187 memset(&prm, 0, sizeof(prm)); 3188 prm.cmd = cmd; 3189 prm.tgt = tgt; 3190 prm.sg = NULL; 3191 prm.req_cnt = 1; 3192 3193 /* Send marker if required */ 3194 if (qlt_issue_marker(vha, 0) != QLA_SUCCESS) 3195 return -EIO; 3196 3197 /* Calculate number of entries and segments required */ 3198 if (qlt_pci_map_calc_cnt(&prm) != 0) 3199 return -EAGAIN; 3200 3201 spin_lock_irqsave(&ha->hardware_lock, flags); 3202 3203 if (!ha->flags.fw_started || (cmd->reset_count != ha->chip_reset) || 3204 (cmd->sess && cmd->sess->deleted)) { 3205 /* 3206 * Either the port is not online or this request was from 3207 * previous life, just abort the processing. 3208 */ 3209 cmd->state = QLA_TGT_STATE_NEED_DATA; 3210 qlt_abort_cmd_on_host_reset(cmd->vha, cmd); 3211 ql_dbg(ql_dbg_async, vha, 0xe102, 3212 "RESET-XFR online/active/old-count/new-count = %d/%d/%d/%d.\n", 3213 vha->flags.online, qla2x00_reset_active(vha), 3214 cmd->reset_count, ha->chip_reset); 3215 spin_unlock_irqrestore(&ha->hardware_lock, flags); 3216 return 0; 3217 } 3218 3219 /* Does F/W have an IOCBs for this request */ 3220 res = qlt_check_reserve_free_req(vha, prm.req_cnt); 3221 if (res != 0) 3222 goto out_unlock_free_unmap; 3223 if (cmd->se_cmd.prot_op) 3224 res = qlt_build_ctio_crc2_pkt(&prm, vha); 3225 else 3226 res = qlt_24xx_build_ctio_pkt(&prm, vha); 3227 3228 if (unlikely(res != 0)) { 3229 vha->req->cnt += prm.req_cnt; 3230 goto out_unlock_free_unmap; 3231 } 3232 3233 pkt = (struct ctio7_to_24xx *)prm.pkt; 3234 pkt->u.status0.flags |= cpu_to_le16(CTIO7_FLAGS_DATA_OUT | 3235 CTIO7_FLAGS_STATUS_MODE_0); 3236 3237 if (cmd->se_cmd.prot_op == TARGET_PROT_NORMAL) 3238 qlt_load_data_segments(&prm, vha); 3239 3240 cmd->state = QLA_TGT_STATE_NEED_DATA; 3241 cmd->cmd_sent_to_fw = 1; 3242 3243 /* Memory Barrier */ 3244 wmb(); 3245 qla2x00_start_iocbs(vha, vha->req); 3246 spin_unlock_irqrestore(&ha->hardware_lock, flags); 3247 3248 return res; 3249 3250 out_unlock_free_unmap: 3251 qlt_unmap_sg(vha, cmd); 3252 spin_unlock_irqrestore(&ha->hardware_lock, flags); 3253 3254 return res; 3255 } 3256 EXPORT_SYMBOL(qlt_rdy_to_xfer); 3257 3258 3259 /* 3260 * it is assumed either hardware_lock or qpair lock is held. 3261 */ 3262 static void 3263 qlt_handle_dif_error(struct scsi_qla_host *vha, struct qla_tgt_cmd *cmd, 3264 struct ctio_crc_from_fw *sts) 3265 { 3266 uint8_t *ap = &sts->actual_dif[0]; 3267 uint8_t *ep = &sts->expected_dif[0]; 3268 uint64_t lba = cmd->se_cmd.t_task_lba; 3269 uint8_t scsi_status, sense_key, asc, ascq; 3270 unsigned long flags; 3271 3272 cmd->trc_flags |= TRC_DIF_ERR; 3273 3274 cmd->a_guard = be16_to_cpu(*(uint16_t *)(ap + 0)); 3275 cmd->a_app_tag = be16_to_cpu(*(uint16_t *)(ap + 2)); 3276 cmd->a_ref_tag = be32_to_cpu(*(uint32_t *)(ap + 4)); 3277 3278 cmd->e_guard = be16_to_cpu(*(uint16_t *)(ep + 0)); 3279 cmd->e_app_tag = be16_to_cpu(*(uint16_t *)(ep + 2)); 3280 cmd->e_ref_tag = be32_to_cpu(*(uint32_t *)(ep + 4)); 3281 3282 ql_dbg(ql_dbg_tgt_dif, vha, 0xf075, 3283 "%s: aborted %d state %d\n", __func__, cmd->aborted, cmd->state); 3284 3285 scsi_status = sense_key = asc = ascq = 0; 3286 3287 /* check appl tag */ 3288 if (cmd->e_app_tag != cmd->a_app_tag) { 3289 ql_dbg(ql_dbg_tgt_dif, vha, 0xffff, 3290 "App Tag ERR: cdb[%x] lba[%llx %llx] blks[%x] [Actual|Expected] " 3291 "Ref[%x|%x], App[%x|%x], " 3292 "Guard [%x|%x] cmd=%p ox_id[%04x]", 3293 cmd->cdb[0], lba, (lba+cmd->num_blks), cmd->num_blks, 3294 cmd->a_ref_tag, cmd->e_ref_tag, 3295 cmd->a_app_tag, cmd->e_app_tag, 3296 cmd->a_guard, cmd->e_guard, 3297 cmd, cmd->atio.u.isp24.fcp_hdr.ox_id); 3298 3299 cmd->dif_err_code = DIF_ERR_APP; 3300 scsi_status = SAM_STAT_CHECK_CONDITION; 3301 sense_key = ABORTED_COMMAND; 3302 asc = 0x10; 3303 ascq = 0x2; 3304 } 3305 3306 /* check ref tag */ 3307 if (cmd->e_ref_tag != cmd->a_ref_tag) { 3308 ql_dbg(ql_dbg_tgt_dif, vha, 0xffff, 3309 "Ref Tag ERR: cdb[%x] lba[%llx %llx] blks[%x] [Actual|Expected] " 3310 "Ref[%x|%x], App[%x|%x], " 3311 "Guard[%x|%x] cmd=%p ox_id[%04x] ", 3312 cmd->cdb[0], lba, (lba+cmd->num_blks), cmd->num_blks, 3313 cmd->a_ref_tag, cmd->e_ref_tag, 3314 cmd->a_app_tag, cmd->e_app_tag, 3315 cmd->a_guard, cmd->e_guard, 3316 cmd, cmd->atio.u.isp24.fcp_hdr.ox_id); 3317 3318 cmd->dif_err_code = DIF_ERR_REF; 3319 scsi_status = SAM_STAT_CHECK_CONDITION; 3320 sense_key = ABORTED_COMMAND; 3321 asc = 0x10; 3322 ascq = 0x3; 3323 goto out; 3324 } 3325 3326 /* check guard */ 3327 if (cmd->e_guard != cmd->a_guard) { 3328 ql_dbg(ql_dbg_tgt_dif, vha, 0xffff, 3329 "Guard ERR: cdb[%x] lba[%llx %llx] blks[%x] [Actual|Expected] " 3330 "Ref[%x|%x], App[%x|%x], " 3331 "Guard [%x|%x] cmd=%p ox_id[%04x]", 3332 cmd->cdb[0], lba, (lba+cmd->num_blks), cmd->num_blks, 3333 cmd->a_ref_tag, cmd->e_ref_tag, 3334 cmd->a_app_tag, cmd->e_app_tag, 3335 cmd->a_guard, cmd->e_guard, 3336 cmd, cmd->atio.u.isp24.fcp_hdr.ox_id); 3337 cmd->dif_err_code = DIF_ERR_GRD; 3338 scsi_status = SAM_STAT_CHECK_CONDITION; 3339 sense_key = ABORTED_COMMAND; 3340 asc = 0x10; 3341 ascq = 0x1; 3342 } 3343 out: 3344 switch (cmd->state) { 3345 case QLA_TGT_STATE_NEED_DATA: 3346 /* handle_data will load DIF error code */ 3347 cmd->state = QLA_TGT_STATE_DATA_IN; 3348 vha->hw->tgt.tgt_ops->handle_data(cmd); 3349 break; 3350 default: 3351 spin_lock_irqsave(&cmd->cmd_lock, flags); 3352 if (cmd->aborted) { 3353 spin_unlock_irqrestore(&cmd->cmd_lock, flags); 3354 vha->hw->tgt.tgt_ops->free_cmd(cmd); 3355 break; 3356 } 3357 spin_unlock_irqrestore(&cmd->cmd_lock, flags); 3358 3359 qlt_send_resp_ctio(vha, cmd, scsi_status, sense_key, asc, ascq); 3360 /* assume scsi status gets out on the wire. 3361 * Will not wait for completion. 3362 */ 3363 vha->hw->tgt.tgt_ops->free_cmd(cmd); 3364 break; 3365 } 3366 } 3367 3368 /* If hardware_lock held on entry, might drop it, then reaquire */ 3369 /* This function sends the appropriate CTIO to ISP 2xxx or 24xx */ 3370 static int __qlt_send_term_imm_notif(struct scsi_qla_host *vha, 3371 struct imm_ntfy_from_isp *ntfy) 3372 { 3373 struct nack_to_isp *nack; 3374 struct qla_hw_data *ha = vha->hw; 3375 request_t *pkt; 3376 int ret = 0; 3377 3378 ql_dbg(ql_dbg_tgt_tmr, vha, 0xe01c, 3379 "Sending TERM ELS CTIO (ha=%p)\n", ha); 3380 3381 pkt = (request_t *)qla2x00_alloc_iocbs(vha, NULL); 3382 if (pkt == NULL) { 3383 ql_dbg(ql_dbg_tgt, vha, 0xe080, 3384 "qla_target(%d): %s failed: unable to allocate " 3385 "request packet\n", vha->vp_idx, __func__); 3386 return -ENOMEM; 3387 } 3388 3389 pkt->entry_type = NOTIFY_ACK_TYPE; 3390 pkt->entry_count = 1; 3391 pkt->handle = QLA_TGT_SKIP_HANDLE; 3392 3393 nack = (struct nack_to_isp *)pkt; 3394 nack->ox_id = ntfy->ox_id; 3395 3396 nack->u.isp24.nport_handle = ntfy->u.isp24.nport_handle; 3397 if (le16_to_cpu(ntfy->u.isp24.status) == IMM_NTFY_ELS) { 3398 nack->u.isp24.flags = ntfy->u.isp24.flags & 3399 __constant_cpu_to_le32(NOTIFY24XX_FLAGS_PUREX_IOCB); 3400 } 3401 3402 /* terminate */ 3403 nack->u.isp24.flags |= 3404 __constant_cpu_to_le16(NOTIFY_ACK_FLAGS_TERMINATE); 3405 3406 nack->u.isp24.srr_rx_id = ntfy->u.isp24.srr_rx_id; 3407 nack->u.isp24.status = ntfy->u.isp24.status; 3408 nack->u.isp24.status_subcode = ntfy->u.isp24.status_subcode; 3409 nack->u.isp24.fw_handle = ntfy->u.isp24.fw_handle; 3410 nack->u.isp24.exchange_address = ntfy->u.isp24.exchange_address; 3411 nack->u.isp24.srr_rel_offs = ntfy->u.isp24.srr_rel_offs; 3412 nack->u.isp24.srr_ui = ntfy->u.isp24.srr_ui; 3413 nack->u.isp24.vp_index = ntfy->u.isp24.vp_index; 3414 3415 qla2x00_start_iocbs(vha, vha->req); 3416 return ret; 3417 } 3418 3419 static void qlt_send_term_imm_notif(struct scsi_qla_host *vha, 3420 struct imm_ntfy_from_isp *imm, int ha_locked) 3421 { 3422 unsigned long flags = 0; 3423 int rc; 3424 3425 if (qlt_issue_marker(vha, ha_locked) < 0) 3426 return; 3427 3428 if (ha_locked) { 3429 rc = __qlt_send_term_imm_notif(vha, imm); 3430 3431 #if 0 /* Todo */ 3432 if (rc == -ENOMEM) 3433 qlt_alloc_qfull_cmd(vha, imm, 0, 0); 3434 #else 3435 if (rc) { 3436 } 3437 #endif 3438 goto done; 3439 } 3440 3441 spin_lock_irqsave(&vha->hw->hardware_lock, flags); 3442 rc = __qlt_send_term_imm_notif(vha, imm); 3443 3444 #if 0 /* Todo */ 3445 if (rc == -ENOMEM) 3446 qlt_alloc_qfull_cmd(vha, imm, 0, 0); 3447 #endif 3448 3449 done: 3450 if (!ha_locked) 3451 spin_unlock_irqrestore(&vha->hw->hardware_lock, flags); 3452 } 3453 3454 /* If hardware_lock held on entry, might drop it, then reaquire */ 3455 /* This function sends the appropriate CTIO to ISP 2xxx or 24xx */ 3456 static int __qlt_send_term_exchange(struct scsi_qla_host *vha, 3457 struct qla_tgt_cmd *cmd, 3458 struct atio_from_isp *atio) 3459 { 3460 struct ctio7_to_24xx *ctio24; 3461 struct qla_hw_data *ha = vha->hw; 3462 request_t *pkt; 3463 int ret = 0; 3464 uint16_t temp; 3465 3466 ql_dbg(ql_dbg_tgt, vha, 0xe01c, "Sending TERM EXCH CTIO (ha=%p)\n", ha); 3467 3468 pkt = (request_t *)qla2x00_alloc_iocbs_ready(vha, NULL); 3469 if (pkt == NULL) { 3470 ql_dbg(ql_dbg_tgt, vha, 0xe050, 3471 "qla_target(%d): %s failed: unable to allocate " 3472 "request packet\n", vha->vp_idx, __func__); 3473 return -ENOMEM; 3474 } 3475 3476 if (cmd != NULL) { 3477 if (cmd->state < QLA_TGT_STATE_PROCESSED) { 3478 ql_dbg(ql_dbg_tgt, vha, 0xe051, 3479 "qla_target(%d): Terminating cmd %p with " 3480 "incorrect state %d\n", vha->vp_idx, cmd, 3481 cmd->state); 3482 } else 3483 ret = 1; 3484 } 3485 3486 vha->tgt_counters.num_term_xchg_sent++; 3487 pkt->entry_count = 1; 3488 pkt->handle = QLA_TGT_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK; 3489 3490 ctio24 = (struct ctio7_to_24xx *)pkt; 3491 ctio24->entry_type = CTIO_TYPE7; 3492 ctio24->nport_handle = CTIO7_NHANDLE_UNRECOGNIZED; 3493 ctio24->timeout = cpu_to_le16(QLA_TGT_TIMEOUT); 3494 ctio24->vp_index = vha->vp_idx; 3495 ctio24->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2]; 3496 ctio24->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1]; 3497 ctio24->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0]; 3498 ctio24->exchange_addr = atio->u.isp24.exchange_addr; 3499 ctio24->u.status1.flags = (atio->u.isp24.attr << 9) | 3500 cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1 | 3501 CTIO7_FLAGS_TERMINATE); 3502 temp = be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id); 3503 ctio24->u.status1.ox_id = cpu_to_le16(temp); 3504 3505 /* Most likely, it isn't needed */ 3506 ctio24->u.status1.residual = get_unaligned((uint32_t *) 3507 &atio->u.isp24.fcp_cmnd.add_cdb[ 3508 atio->u.isp24.fcp_cmnd.add_cdb_len]); 3509 if (ctio24->u.status1.residual != 0) 3510 ctio24->u.status1.scsi_status |= SS_RESIDUAL_UNDER; 3511 3512 /* Memory Barrier */ 3513 wmb(); 3514 qla2x00_start_iocbs(vha, vha->req); 3515 return ret; 3516 } 3517 3518 static void qlt_send_term_exchange(struct scsi_qla_host *vha, 3519 struct qla_tgt_cmd *cmd, struct atio_from_isp *atio, int ha_locked, 3520 int ul_abort) 3521 { 3522 unsigned long flags = 0; 3523 int rc; 3524 3525 if (qlt_issue_marker(vha, ha_locked) < 0) 3526 return; 3527 3528 if (ha_locked) { 3529 rc = __qlt_send_term_exchange(vha, cmd, atio); 3530 if (rc == -ENOMEM) 3531 qlt_alloc_qfull_cmd(vha, atio, 0, 0); 3532 goto done; 3533 } 3534 spin_lock_irqsave(&vha->hw->hardware_lock, flags); 3535 rc = __qlt_send_term_exchange(vha, cmd, atio); 3536 if (rc == -ENOMEM) 3537 qlt_alloc_qfull_cmd(vha, atio, 0, 0); 3538 3539 done: 3540 if (cmd && !ul_abort && !cmd->aborted) { 3541 if (cmd->sg_mapped) 3542 qlt_unmap_sg(vha, cmd); 3543 vha->hw->tgt.tgt_ops->free_cmd(cmd); 3544 } 3545 3546 if (!ha_locked) 3547 spin_unlock_irqrestore(&vha->hw->hardware_lock, flags); 3548 3549 return; 3550 } 3551 3552 static void qlt_init_term_exchange(struct scsi_qla_host *vha) 3553 { 3554 struct list_head free_list; 3555 struct qla_tgt_cmd *cmd, *tcmd; 3556 3557 vha->hw->tgt.leak_exchg_thresh_hold = 3558 (vha->hw->cur_fw_xcb_count/100) * LEAK_EXCHG_THRESH_HOLD_PERCENT; 3559 3560 cmd = tcmd = NULL; 3561 if (!list_empty(&vha->hw->tgt.q_full_list)) { 3562 INIT_LIST_HEAD(&free_list); 3563 list_splice_init(&vha->hw->tgt.q_full_list, &free_list); 3564 3565 list_for_each_entry_safe(cmd, tcmd, &free_list, cmd_list) { 3566 list_del(&cmd->cmd_list); 3567 /* This cmd was never sent to TCM. There is no need 3568 * to schedule free or call free_cmd 3569 */ 3570 qlt_free_cmd(cmd); 3571 vha->hw->tgt.num_qfull_cmds_alloc--; 3572 } 3573 } 3574 vha->hw->tgt.num_qfull_cmds_dropped = 0; 3575 } 3576 3577 static void qlt_chk_exch_leak_thresh_hold(struct scsi_qla_host *vha) 3578 { 3579 uint32_t total_leaked; 3580 3581 total_leaked = vha->hw->tgt.num_qfull_cmds_dropped; 3582 3583 if (vha->hw->tgt.leak_exchg_thresh_hold && 3584 (total_leaked > vha->hw->tgt.leak_exchg_thresh_hold)) { 3585 3586 ql_dbg(ql_dbg_tgt, vha, 0xe079, 3587 "Chip reset due to exchange starvation: %d/%d.\n", 3588 total_leaked, vha->hw->cur_fw_xcb_count); 3589 3590 if (IS_P3P_TYPE(vha->hw)) 3591 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags); 3592 else 3593 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 3594 qla2xxx_wake_dpc(vha); 3595 } 3596 3597 } 3598 3599 int qlt_abort_cmd(struct qla_tgt_cmd *cmd) 3600 { 3601 struct qla_tgt *tgt = cmd->tgt; 3602 struct scsi_qla_host *vha = tgt->vha; 3603 struct se_cmd *se_cmd = &cmd->se_cmd; 3604 unsigned long flags; 3605 3606 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf014, 3607 "qla_target(%d): terminating exchange for aborted cmd=%p " 3608 "(se_cmd=%p, tag=%llu)", vha->vp_idx, cmd, &cmd->se_cmd, 3609 se_cmd->tag); 3610 3611 spin_lock_irqsave(&cmd->cmd_lock, flags); 3612 if (cmd->aborted) { 3613 spin_unlock_irqrestore(&cmd->cmd_lock, flags); 3614 /* 3615 * It's normal to see 2 calls in this path: 3616 * 1) XFER Rdy completion + CMD_T_ABORT 3617 * 2) TCM TMR - drain_state_list 3618 */ 3619 ql_dbg(ql_dbg_tgt_mgt, vha, 0xffff, 3620 "multiple abort. %p transport_state %x, t_state %x," 3621 " se_cmd_flags %x \n", cmd, cmd->se_cmd.transport_state, 3622 cmd->se_cmd.t_state,cmd->se_cmd.se_cmd_flags); 3623 return EIO; 3624 } 3625 cmd->aborted = 1; 3626 cmd->trc_flags |= TRC_ABORT; 3627 spin_unlock_irqrestore(&cmd->cmd_lock, flags); 3628 3629 qlt_send_term_exchange(vha, cmd, &cmd->atio, 0, 1); 3630 return 0; 3631 } 3632 EXPORT_SYMBOL(qlt_abort_cmd); 3633 3634 void qlt_free_cmd(struct qla_tgt_cmd *cmd) 3635 { 3636 struct fc_port *sess = cmd->sess; 3637 3638 ql_dbg(ql_dbg_tgt, cmd->vha, 0xe074, 3639 "%s: se_cmd[%p] ox_id %04x\n", 3640 __func__, &cmd->se_cmd, 3641 be16_to_cpu(cmd->atio.u.isp24.fcp_hdr.ox_id)); 3642 3643 BUG_ON(cmd->cmd_in_wq); 3644 3645 if (cmd->sg_mapped) 3646 qlt_unmap_sg(cmd->vha, cmd); 3647 3648 if (!cmd->q_full) 3649 qlt_decr_num_pend_cmds(cmd->vha); 3650 3651 BUG_ON(cmd->sg_mapped); 3652 cmd->jiffies_at_free = get_jiffies_64(); 3653 if (unlikely(cmd->free_sg)) 3654 kfree(cmd->sg); 3655 3656 if (!sess || !sess->se_sess) { 3657 WARN_ON(1); 3658 return; 3659 } 3660 cmd->jiffies_at_free = get_jiffies_64(); 3661 percpu_ida_free(&sess->se_sess->sess_tag_pool, cmd->se_cmd.map_tag); 3662 } 3663 EXPORT_SYMBOL(qlt_free_cmd); 3664 3665 /* 3666 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire 3667 */ 3668 static int qlt_term_ctio_exchange(struct scsi_qla_host *vha, void *ctio, 3669 struct qla_tgt_cmd *cmd, uint32_t status) 3670 { 3671 int term = 0; 3672 3673 if (cmd->se_cmd.prot_op) 3674 ql_dbg(ql_dbg_tgt_dif, vha, 0xffff, 3675 "Term DIF cmd: lba[0x%llx|%lld] len[0x%x] " 3676 "se_cmd=%p tag[%x] op %#x/%s", 3677 cmd->lba, cmd->lba, 3678 cmd->num_blks, &cmd->se_cmd, 3679 cmd->atio.u.isp24.exchange_addr, 3680 cmd->se_cmd.prot_op, 3681 prot_op_str(cmd->se_cmd.prot_op)); 3682 3683 if (ctio != NULL) { 3684 struct ctio7_from_24xx *c = (struct ctio7_from_24xx *)ctio; 3685 term = !(c->flags & 3686 cpu_to_le16(OF_TERM_EXCH)); 3687 } else 3688 term = 1; 3689 3690 if (term) 3691 qlt_send_term_exchange(vha, cmd, &cmd->atio, 1, 0); 3692 3693 return term; 3694 } 3695 3696 /* ha->hardware_lock supposed to be held on entry */ 3697 static inline struct qla_tgt_cmd *qlt_get_cmd(struct scsi_qla_host *vha, 3698 uint32_t handle) 3699 { 3700 struct qla_hw_data *ha = vha->hw; 3701 3702 handle--; 3703 if (ha->tgt.cmds[handle] != NULL) { 3704 struct qla_tgt_cmd *cmd = ha->tgt.cmds[handle]; 3705 ha->tgt.cmds[handle] = NULL; 3706 return cmd; 3707 } else 3708 return NULL; 3709 } 3710 3711 /* ha->hardware_lock supposed to be held on entry */ 3712 static struct qla_tgt_cmd *qlt_ctio_to_cmd(struct scsi_qla_host *vha, 3713 uint32_t handle, void *ctio) 3714 { 3715 struct qla_tgt_cmd *cmd = NULL; 3716 3717 /* Clear out internal marks */ 3718 handle &= ~(CTIO_COMPLETION_HANDLE_MARK | 3719 CTIO_INTERMEDIATE_HANDLE_MARK); 3720 3721 if (handle != QLA_TGT_NULL_HANDLE) { 3722 if (unlikely(handle == QLA_TGT_SKIP_HANDLE)) 3723 return NULL; 3724 3725 /* handle-1 is actually used */ 3726 if (unlikely(handle > DEFAULT_OUTSTANDING_COMMANDS)) { 3727 ql_dbg(ql_dbg_tgt, vha, 0xe052, 3728 "qla_target(%d): Wrong handle %x received\n", 3729 vha->vp_idx, handle); 3730 return NULL; 3731 } 3732 cmd = qlt_get_cmd(vha, handle); 3733 if (unlikely(cmd == NULL)) { 3734 ql_dbg(ql_dbg_tgt, vha, 0xe053, 3735 "qla_target(%d): Suspicious: unable to " 3736 "find the command with handle %x\n", vha->vp_idx, 3737 handle); 3738 return NULL; 3739 } 3740 } else if (ctio != NULL) { 3741 /* We can't get loop ID from CTIO7 */ 3742 ql_dbg(ql_dbg_tgt, vha, 0xe054, 3743 "qla_target(%d): Wrong CTIO received: QLA24xx doesn't " 3744 "support NULL handles\n", vha->vp_idx); 3745 return NULL; 3746 } 3747 3748 return cmd; 3749 } 3750 3751 /* hardware_lock should be held by caller. */ 3752 static void 3753 qlt_abort_cmd_on_host_reset(struct scsi_qla_host *vha, struct qla_tgt_cmd *cmd) 3754 { 3755 struct qla_hw_data *ha = vha->hw; 3756 uint32_t handle; 3757 3758 if (cmd->sg_mapped) 3759 qlt_unmap_sg(vha, cmd); 3760 3761 handle = qlt_make_handle(vha); 3762 3763 /* TODO: fix debug message type and ids. */ 3764 if (cmd->state == QLA_TGT_STATE_PROCESSED) { 3765 ql_dbg(ql_dbg_io, vha, 0xff00, 3766 "HOST-ABORT: handle=%d, state=PROCESSED.\n", handle); 3767 } else if (cmd->state == QLA_TGT_STATE_NEED_DATA) { 3768 cmd->write_data_transferred = 0; 3769 cmd->state = QLA_TGT_STATE_DATA_IN; 3770 3771 ql_dbg(ql_dbg_io, vha, 0xff01, 3772 "HOST-ABORT: handle=%d, state=DATA_IN.\n", handle); 3773 3774 ha->tgt.tgt_ops->handle_data(cmd); 3775 return; 3776 } else { 3777 ql_dbg(ql_dbg_io, vha, 0xff03, 3778 "HOST-ABORT: handle=%d, state=BAD(%d).\n", handle, 3779 cmd->state); 3780 dump_stack(); 3781 } 3782 3783 cmd->trc_flags |= TRC_FLUSH; 3784 ha->tgt.tgt_ops->free_cmd(cmd); 3785 } 3786 3787 void 3788 qlt_host_reset_handler(struct qla_hw_data *ha) 3789 { 3790 struct qla_tgt_cmd *cmd; 3791 unsigned long flags; 3792 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev); 3793 scsi_qla_host_t *vha = NULL; 3794 struct qla_tgt *tgt = base_vha->vha_tgt.qla_tgt; 3795 uint32_t i; 3796 3797 if (!base_vha->hw->tgt.tgt_ops) 3798 return; 3799 3800 if (!tgt || qla_ini_mode_enabled(base_vha)) { 3801 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf003, 3802 "Target mode disabled\n"); 3803 return; 3804 } 3805 3806 ql_dbg(ql_dbg_tgt_mgt, vha, 0xff10, 3807 "HOST-ABORT-HNDLR: base_vha->dpc_flags=%lx.\n", 3808 base_vha->dpc_flags); 3809 3810 spin_lock_irqsave(&ha->hardware_lock, flags); 3811 for (i = 1; i < DEFAULT_OUTSTANDING_COMMANDS + 1; i++) { 3812 cmd = qlt_get_cmd(base_vha, i); 3813 if (!cmd) 3814 continue; 3815 /* ha->tgt.cmds entry is cleared by qlt_get_cmd. */ 3816 vha = cmd->vha; 3817 qlt_abort_cmd_on_host_reset(vha, cmd); 3818 } 3819 spin_unlock_irqrestore(&ha->hardware_lock, flags); 3820 } 3821 3822 3823 /* 3824 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire 3825 */ 3826 static void qlt_do_ctio_completion(struct scsi_qla_host *vha, uint32_t handle, 3827 uint32_t status, void *ctio) 3828 { 3829 struct qla_hw_data *ha = vha->hw; 3830 struct se_cmd *se_cmd; 3831 struct qla_tgt_cmd *cmd; 3832 3833 if (handle & CTIO_INTERMEDIATE_HANDLE_MARK) { 3834 /* That could happen only in case of an error/reset/abort */ 3835 if (status != CTIO_SUCCESS) { 3836 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01d, 3837 "Intermediate CTIO received" 3838 " (status %x)\n", status); 3839 } 3840 return; 3841 } 3842 3843 cmd = qlt_ctio_to_cmd(vha, handle, ctio); 3844 if (cmd == NULL) 3845 return; 3846 3847 se_cmd = &cmd->se_cmd; 3848 cmd->cmd_sent_to_fw = 0; 3849 3850 qlt_unmap_sg(vha, cmd); 3851 3852 if (unlikely(status != CTIO_SUCCESS)) { 3853 switch (status & 0xFFFF) { 3854 case CTIO_LIP_RESET: 3855 case CTIO_TARGET_RESET: 3856 case CTIO_ABORTED: 3857 /* driver request abort via Terminate exchange */ 3858 case CTIO_TIMEOUT: 3859 case CTIO_INVALID_RX_ID: 3860 /* They are OK */ 3861 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf058, 3862 "qla_target(%d): CTIO with " 3863 "status %#x received, state %x, se_cmd %p, " 3864 "(LIP_RESET=e, ABORTED=2, TARGET_RESET=17, " 3865 "TIMEOUT=b, INVALID_RX_ID=8)\n", vha->vp_idx, 3866 status, cmd->state, se_cmd); 3867 break; 3868 3869 case CTIO_PORT_LOGGED_OUT: 3870 case CTIO_PORT_UNAVAILABLE: 3871 { 3872 int logged_out = 3873 (status & 0xFFFF) == CTIO_PORT_LOGGED_OUT; 3874 3875 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf059, 3876 "qla_target(%d): CTIO with %s status %x " 3877 "received (state %x, se_cmd %p)\n", vha->vp_idx, 3878 logged_out ? "PORT LOGGED OUT" : "PORT UNAVAILABLE", 3879 status, cmd->state, se_cmd); 3880 3881 if (logged_out && cmd->sess) { 3882 /* 3883 * Session is already logged out, but we need 3884 * to notify initiator, who's not aware of this 3885 */ 3886 cmd->sess->logout_on_delete = 0; 3887 cmd->sess->send_els_logo = 1; 3888 ql_dbg(ql_dbg_disc, vha, 0xffff, 3889 "%s %d %8phC post del sess\n", 3890 __func__, __LINE__, cmd->sess->port_name); 3891 3892 qlt_schedule_sess_for_deletion_lock(cmd->sess); 3893 } 3894 break; 3895 } 3896 case CTIO_DIF_ERROR: { 3897 struct ctio_crc_from_fw *crc = 3898 (struct ctio_crc_from_fw *)ctio; 3899 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf073, 3900 "qla_target(%d): CTIO with DIF_ERROR status %x " 3901 "received (state %x, ulp_cmd %p) actual_dif[0x%llx] " 3902 "expect_dif[0x%llx]\n", 3903 vha->vp_idx, status, cmd->state, se_cmd, 3904 *((u64 *)&crc->actual_dif[0]), 3905 *((u64 *)&crc->expected_dif[0])); 3906 3907 qlt_handle_dif_error(vha, cmd, ctio); 3908 return; 3909 } 3910 default: 3911 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05b, 3912 "qla_target(%d): CTIO with error status 0x%x received (state %x, se_cmd %p\n", 3913 vha->vp_idx, status, cmd->state, se_cmd); 3914 break; 3915 } 3916 3917 3918 /* "cmd->aborted" means 3919 * cmd is already aborted/terminated, we don't 3920 * need to terminate again. The exchange is already 3921 * cleaned up/freed at FW level. Just cleanup at driver 3922 * level. 3923 */ 3924 if ((cmd->state != QLA_TGT_STATE_NEED_DATA) && 3925 (!cmd->aborted)) { 3926 cmd->trc_flags |= TRC_CTIO_ERR; 3927 if (qlt_term_ctio_exchange(vha, ctio, cmd, status)) 3928 return; 3929 } 3930 } 3931 3932 if (cmd->state == QLA_TGT_STATE_PROCESSED) { 3933 cmd->trc_flags |= TRC_CTIO_DONE; 3934 } else if (cmd->state == QLA_TGT_STATE_NEED_DATA) { 3935 cmd->state = QLA_TGT_STATE_DATA_IN; 3936 3937 if (status == CTIO_SUCCESS) 3938 cmd->write_data_transferred = 1; 3939 3940 ha->tgt.tgt_ops->handle_data(cmd); 3941 return; 3942 } else if (cmd->aborted) { 3943 cmd->trc_flags |= TRC_CTIO_ABORTED; 3944 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01e, 3945 "Aborted command %p (tag %lld) finished\n", cmd, se_cmd->tag); 3946 } else { 3947 cmd->trc_flags |= TRC_CTIO_STRANGE; 3948 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05c, 3949 "qla_target(%d): A command in state (%d) should " 3950 "not return a CTIO complete\n", vha->vp_idx, cmd->state); 3951 } 3952 3953 if (unlikely(status != CTIO_SUCCESS) && 3954 !cmd->aborted) { 3955 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01f, "Finishing failed CTIO\n"); 3956 dump_stack(); 3957 } 3958 3959 ha->tgt.tgt_ops->free_cmd(cmd); 3960 } 3961 3962 static inline int qlt_get_fcp_task_attr(struct scsi_qla_host *vha, 3963 uint8_t task_codes) 3964 { 3965 int fcp_task_attr; 3966 3967 switch (task_codes) { 3968 case ATIO_SIMPLE_QUEUE: 3969 fcp_task_attr = TCM_SIMPLE_TAG; 3970 break; 3971 case ATIO_HEAD_OF_QUEUE: 3972 fcp_task_attr = TCM_HEAD_TAG; 3973 break; 3974 case ATIO_ORDERED_QUEUE: 3975 fcp_task_attr = TCM_ORDERED_TAG; 3976 break; 3977 case ATIO_ACA_QUEUE: 3978 fcp_task_attr = TCM_ACA_TAG; 3979 break; 3980 case ATIO_UNTAGGED: 3981 fcp_task_attr = TCM_SIMPLE_TAG; 3982 break; 3983 default: 3984 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05d, 3985 "qla_target: unknown task code %x, use ORDERED instead\n", 3986 task_codes); 3987 fcp_task_attr = TCM_ORDERED_TAG; 3988 break; 3989 } 3990 3991 return fcp_task_attr; 3992 } 3993 3994 static struct fc_port *qlt_make_local_sess(struct scsi_qla_host *, 3995 uint8_t *); 3996 /* 3997 * Process context for I/O path into tcm_qla2xxx code 3998 */ 3999 static void __qlt_do_work(struct qla_tgt_cmd *cmd) 4000 { 4001 scsi_qla_host_t *vha = cmd->vha; 4002 struct qla_hw_data *ha = vha->hw; 4003 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; 4004 struct fc_port *sess = cmd->sess; 4005 struct atio_from_isp *atio = &cmd->atio; 4006 unsigned char *cdb; 4007 unsigned long flags; 4008 uint32_t data_length; 4009 int ret, fcp_task_attr, data_dir, bidi = 0; 4010 4011 cmd->cmd_in_wq = 0; 4012 cmd->trc_flags |= TRC_DO_WORK; 4013 if (tgt->tgt_stop) 4014 goto out_term; 4015 4016 if (cmd->aborted) { 4017 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf082, 4018 "cmd with tag %u is aborted\n", 4019 cmd->atio.u.isp24.exchange_addr); 4020 goto out_term; 4021 } 4022 4023 spin_lock_init(&cmd->cmd_lock); 4024 cdb = &atio->u.isp24.fcp_cmnd.cdb[0]; 4025 cmd->se_cmd.tag = atio->u.isp24.exchange_addr; 4026 cmd->unpacked_lun = scsilun_to_int( 4027 (struct scsi_lun *)&atio->u.isp24.fcp_cmnd.lun); 4028 4029 if (atio->u.isp24.fcp_cmnd.rddata && 4030 atio->u.isp24.fcp_cmnd.wrdata) { 4031 bidi = 1; 4032 data_dir = DMA_TO_DEVICE; 4033 } else if (atio->u.isp24.fcp_cmnd.rddata) 4034 data_dir = DMA_FROM_DEVICE; 4035 else if (atio->u.isp24.fcp_cmnd.wrdata) 4036 data_dir = DMA_TO_DEVICE; 4037 else 4038 data_dir = DMA_NONE; 4039 4040 fcp_task_attr = qlt_get_fcp_task_attr(vha, 4041 atio->u.isp24.fcp_cmnd.task_attr); 4042 data_length = be32_to_cpu(get_unaligned((uint32_t *) 4043 &atio->u.isp24.fcp_cmnd.add_cdb[ 4044 atio->u.isp24.fcp_cmnd.add_cdb_len])); 4045 4046 ret = ha->tgt.tgt_ops->handle_cmd(vha, cmd, cdb, data_length, 4047 fcp_task_attr, data_dir, bidi); 4048 if (ret != 0) 4049 goto out_term; 4050 /* 4051 * Drop extra session reference from qla_tgt_handle_cmd_for_atio*( 4052 */ 4053 spin_lock_irqsave(&ha->tgt.sess_lock, flags); 4054 ha->tgt.tgt_ops->put_sess(sess); 4055 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); 4056 return; 4057 4058 out_term: 4059 ql_dbg(ql_dbg_io, vha, 0x3060, "Terminating work cmd %p", cmd); 4060 /* 4061 * cmd has not sent to target yet, so pass NULL as the second 4062 * argument to qlt_send_term_exchange() and free the memory here. 4063 */ 4064 cmd->trc_flags |= TRC_DO_WORK_ERR; 4065 spin_lock_irqsave(&ha->hardware_lock, flags); 4066 qlt_send_term_exchange(vha, NULL, &cmd->atio, 1, 0); 4067 4068 qlt_decr_num_pend_cmds(vha); 4069 percpu_ida_free(&sess->se_sess->sess_tag_pool, cmd->se_cmd.map_tag); 4070 spin_unlock_irqrestore(&ha->hardware_lock, flags); 4071 4072 spin_lock_irqsave(&ha->tgt.sess_lock, flags); 4073 ha->tgt.tgt_ops->put_sess(sess); 4074 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); 4075 } 4076 4077 static void qlt_do_work(struct work_struct *work) 4078 { 4079 struct qla_tgt_cmd *cmd = container_of(work, struct qla_tgt_cmd, work); 4080 scsi_qla_host_t *vha = cmd->vha; 4081 unsigned long flags; 4082 4083 spin_lock_irqsave(&vha->cmd_list_lock, flags); 4084 list_del(&cmd->cmd_list); 4085 spin_unlock_irqrestore(&vha->cmd_list_lock, flags); 4086 4087 __qlt_do_work(cmd); 4088 } 4089 4090 static struct qla_tgt_cmd *qlt_get_tag(scsi_qla_host_t *vha, 4091 struct fc_port *sess, 4092 struct atio_from_isp *atio) 4093 { 4094 struct se_session *se_sess = sess->se_sess; 4095 struct qla_tgt_cmd *cmd; 4096 int tag; 4097 4098 tag = percpu_ida_alloc(&se_sess->sess_tag_pool, TASK_RUNNING); 4099 if (tag < 0) 4100 return NULL; 4101 4102 cmd = &((struct qla_tgt_cmd *)se_sess->sess_cmd_map)[tag]; 4103 memset(cmd, 0, sizeof(struct qla_tgt_cmd)); 4104 4105 memcpy(&cmd->atio, atio, sizeof(*atio)); 4106 cmd->state = QLA_TGT_STATE_NEW; 4107 cmd->tgt = vha->vha_tgt.qla_tgt; 4108 qlt_incr_num_pend_cmds(vha); 4109 cmd->vha = vha; 4110 cmd->se_cmd.map_tag = tag; 4111 cmd->sess = sess; 4112 cmd->loop_id = sess->loop_id; 4113 cmd->conf_compl_supported = sess->conf_compl_supported; 4114 4115 cmd->trc_flags = 0; 4116 cmd->jiffies_at_alloc = get_jiffies_64(); 4117 4118 cmd->reset_count = vha->hw->chip_reset; 4119 4120 return cmd; 4121 } 4122 4123 static void qlt_send_busy(struct scsi_qla_host *, struct atio_from_isp *, 4124 uint16_t); 4125 4126 static void qlt_create_sess_from_atio(struct work_struct *work) 4127 { 4128 struct qla_tgt_sess_op *op = container_of(work, 4129 struct qla_tgt_sess_op, work); 4130 scsi_qla_host_t *vha = op->vha; 4131 struct qla_hw_data *ha = vha->hw; 4132 struct fc_port *sess; 4133 struct qla_tgt_cmd *cmd; 4134 unsigned long flags; 4135 uint8_t *s_id = op->atio.u.isp24.fcp_hdr.s_id; 4136 4137 spin_lock_irqsave(&vha->cmd_list_lock, flags); 4138 list_del(&op->cmd_list); 4139 spin_unlock_irqrestore(&vha->cmd_list_lock, flags); 4140 4141 if (op->aborted) { 4142 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf083, 4143 "sess_op with tag %u is aborted\n", 4144 op->atio.u.isp24.exchange_addr); 4145 goto out_term; 4146 } 4147 4148 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf022, 4149 "qla_target(%d): Unable to find wwn login" 4150 " (s_id %x:%x:%x), trying to create it manually\n", 4151 vha->vp_idx, s_id[0], s_id[1], s_id[2]); 4152 4153 if (op->atio.u.raw.entry_count > 1) { 4154 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf023, 4155 "Dropping multy entry atio %p\n", &op->atio); 4156 goto out_term; 4157 } 4158 4159 sess = qlt_make_local_sess(vha, s_id); 4160 /* sess has an extra creation ref. */ 4161 4162 if (!sess) 4163 goto out_term; 4164 /* 4165 * Now obtain a pre-allocated session tag using the original op->atio 4166 * packet header, and dispatch into __qlt_do_work() using the existing 4167 * process context. 4168 */ 4169 cmd = qlt_get_tag(vha, sess, &op->atio); 4170 if (!cmd) { 4171 spin_lock_irqsave(&ha->hardware_lock, flags); 4172 qlt_send_busy(vha, &op->atio, SAM_STAT_BUSY); 4173 ha->tgt.tgt_ops->put_sess(sess); 4174 spin_unlock_irqrestore(&ha->hardware_lock, flags); 4175 kfree(op); 4176 return; 4177 } 4178 4179 /* 4180 * __qlt_do_work() will call qlt_put_sess() to release 4181 * the extra reference taken above by qlt_make_local_sess() 4182 */ 4183 __qlt_do_work(cmd); 4184 kfree(op); 4185 return; 4186 out_term: 4187 spin_lock_irqsave(&ha->hardware_lock, flags); 4188 qlt_send_term_exchange(vha, NULL, &op->atio, 1, 0); 4189 spin_unlock_irqrestore(&ha->hardware_lock, flags); 4190 kfree(op); 4191 } 4192 4193 /* ha->hardware_lock supposed to be held on entry */ 4194 static int qlt_handle_cmd_for_atio(struct scsi_qla_host *vha, 4195 struct atio_from_isp *atio) 4196 { 4197 struct qla_hw_data *ha = vha->hw; 4198 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; 4199 struct fc_port *sess; 4200 struct qla_tgt_cmd *cmd; 4201 unsigned long flags; 4202 4203 if (unlikely(tgt->tgt_stop)) { 4204 ql_dbg(ql_dbg_io, vha, 0x3061, 4205 "New command while device %p is shutting down\n", tgt); 4206 return -EFAULT; 4207 } 4208 4209 sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, atio->u.isp24.fcp_hdr.s_id); 4210 if (unlikely(!sess)) { 4211 struct qla_tgt_sess_op *op = kzalloc(sizeof(struct qla_tgt_sess_op), 4212 GFP_ATOMIC); 4213 if (!op) 4214 return -ENOMEM; 4215 4216 memcpy(&op->atio, atio, sizeof(*atio)); 4217 op->vha = vha; 4218 4219 spin_lock(&vha->cmd_list_lock); 4220 list_add_tail(&op->cmd_list, &vha->qla_sess_op_cmd_list); 4221 spin_unlock(&vha->cmd_list_lock); 4222 4223 INIT_WORK(&op->work, qlt_create_sess_from_atio); 4224 queue_work(qla_tgt_wq, &op->work); 4225 return 0; 4226 } 4227 4228 /* Another WWN used to have our s_id. Our PLOGI scheduled its 4229 * session deletion, but it's still in sess_del_work wq */ 4230 if (sess->deleted) { 4231 ql_dbg(ql_dbg_io, vha, 0x3061, 4232 "New command while old session %p is being deleted\n", 4233 sess); 4234 return -EFAULT; 4235 } 4236 4237 /* 4238 * Do kref_get() before returning + dropping qla_hw_data->hardware_lock. 4239 */ 4240 if (!kref_get_unless_zero(&sess->sess_kref)) { 4241 ql_dbg(ql_dbg_tgt, vha, 0xffff, 4242 "%s: kref_get fail, %8phC oxid %x \n", 4243 __func__, sess->port_name, 4244 be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id)); 4245 return -EFAULT; 4246 } 4247 4248 cmd = qlt_get_tag(vha, sess, atio); 4249 if (!cmd) { 4250 ql_dbg(ql_dbg_io, vha, 0x3062, 4251 "qla_target(%d): Allocation of cmd failed\n", vha->vp_idx); 4252 spin_lock_irqsave(&ha->tgt.sess_lock, flags); 4253 ha->tgt.tgt_ops->put_sess(sess); 4254 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); 4255 return -ENOMEM; 4256 } 4257 4258 cmd->cmd_in_wq = 1; 4259 cmd->trc_flags |= TRC_NEW_CMD; 4260 cmd->se_cmd.cpuid = ha->msix_count ? 4261 ha->tgt.rspq_vector_cpuid : WORK_CPU_UNBOUND; 4262 4263 spin_lock_irqsave(&vha->cmd_list_lock, flags); 4264 list_add_tail(&cmd->cmd_list, &vha->qla_cmd_list); 4265 spin_unlock_irqrestore(&vha->cmd_list_lock, flags); 4266 4267 INIT_WORK(&cmd->work, qlt_do_work); 4268 if (ha->msix_count) { 4269 if (cmd->atio.u.isp24.fcp_cmnd.rddata) 4270 queue_work_on(smp_processor_id(), qla_tgt_wq, 4271 &cmd->work); 4272 else 4273 queue_work_on(cmd->se_cmd.cpuid, qla_tgt_wq, 4274 &cmd->work); 4275 } else { 4276 queue_work(qla_tgt_wq, &cmd->work); 4277 } 4278 return 0; 4279 4280 } 4281 4282 /* ha->hardware_lock supposed to be held on entry */ 4283 static int qlt_issue_task_mgmt(struct fc_port *sess, u64 lun, 4284 int fn, void *iocb, int flags) 4285 { 4286 struct scsi_qla_host *vha = sess->vha; 4287 struct qla_hw_data *ha = vha->hw; 4288 struct qla_tgt_mgmt_cmd *mcmd; 4289 struct atio_from_isp *a = (struct atio_from_isp *)iocb; 4290 int res; 4291 4292 mcmd = mempool_alloc(qla_tgt_mgmt_cmd_mempool, GFP_ATOMIC); 4293 if (!mcmd) { 4294 ql_dbg(ql_dbg_tgt_tmr, vha, 0x10009, 4295 "qla_target(%d): Allocation of management " 4296 "command failed, some commands and their data could " 4297 "leak\n", vha->vp_idx); 4298 return -ENOMEM; 4299 } 4300 memset(mcmd, 0, sizeof(*mcmd)); 4301 mcmd->sess = sess; 4302 4303 if (iocb) { 4304 memcpy(&mcmd->orig_iocb.imm_ntfy, iocb, 4305 sizeof(mcmd->orig_iocb.imm_ntfy)); 4306 } 4307 mcmd->tmr_func = fn; 4308 mcmd->flags = flags; 4309 mcmd->reset_count = vha->hw->chip_reset; 4310 4311 switch (fn) { 4312 case QLA_TGT_LUN_RESET: 4313 abort_cmds_for_lun(vha, lun, a->u.isp24.fcp_hdr.s_id); 4314 break; 4315 } 4316 4317 res = ha->tgt.tgt_ops->handle_tmr(mcmd, lun, mcmd->tmr_func, 0); 4318 if (res != 0) { 4319 ql_dbg(ql_dbg_tgt_tmr, vha, 0x1000b, 4320 "qla_target(%d): tgt.tgt_ops->handle_tmr() failed: %d\n", 4321 sess->vha->vp_idx, res); 4322 mempool_free(mcmd, qla_tgt_mgmt_cmd_mempool); 4323 return -EFAULT; 4324 } 4325 4326 return 0; 4327 } 4328 4329 /* ha->hardware_lock supposed to be held on entry */ 4330 static int qlt_handle_task_mgmt(struct scsi_qla_host *vha, void *iocb) 4331 { 4332 struct atio_from_isp *a = (struct atio_from_isp *)iocb; 4333 struct qla_hw_data *ha = vha->hw; 4334 struct qla_tgt *tgt; 4335 struct fc_port *sess; 4336 uint32_t lun, unpacked_lun; 4337 int fn; 4338 unsigned long flags; 4339 4340 tgt = vha->vha_tgt.qla_tgt; 4341 4342 lun = a->u.isp24.fcp_cmnd.lun; 4343 fn = a->u.isp24.fcp_cmnd.task_mgmt_flags; 4344 4345 spin_lock_irqsave(&ha->tgt.sess_lock, flags); 4346 sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, 4347 a->u.isp24.fcp_hdr.s_id); 4348 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); 4349 4350 unpacked_lun = scsilun_to_int((struct scsi_lun *)&lun); 4351 4352 if (!sess) { 4353 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf024, 4354 "qla_target(%d): task mgmt fn 0x%x for " 4355 "non-existant session\n", vha->vp_idx, fn); 4356 return qlt_sched_sess_work(tgt, QLA_TGT_SESS_WORK_TM, iocb, 4357 sizeof(struct atio_from_isp)); 4358 } 4359 4360 if (sess->deleted) 4361 return -EFAULT; 4362 4363 return qlt_issue_task_mgmt(sess, unpacked_lun, fn, iocb, 0); 4364 } 4365 4366 /* ha->hardware_lock supposed to be held on entry */ 4367 static int __qlt_abort_task(struct scsi_qla_host *vha, 4368 struct imm_ntfy_from_isp *iocb, struct fc_port *sess) 4369 { 4370 struct atio_from_isp *a = (struct atio_from_isp *)iocb; 4371 struct qla_hw_data *ha = vha->hw; 4372 struct qla_tgt_mgmt_cmd *mcmd; 4373 uint32_t lun, unpacked_lun; 4374 int rc; 4375 4376 mcmd = mempool_alloc(qla_tgt_mgmt_cmd_mempool, GFP_ATOMIC); 4377 if (mcmd == NULL) { 4378 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05f, 4379 "qla_target(%d): %s: Allocation of ABORT cmd failed\n", 4380 vha->vp_idx, __func__); 4381 return -ENOMEM; 4382 } 4383 memset(mcmd, 0, sizeof(*mcmd)); 4384 4385 mcmd->sess = sess; 4386 memcpy(&mcmd->orig_iocb.imm_ntfy, iocb, 4387 sizeof(mcmd->orig_iocb.imm_ntfy)); 4388 4389 lun = a->u.isp24.fcp_cmnd.lun; 4390 unpacked_lun = scsilun_to_int((struct scsi_lun *)&lun); 4391 mcmd->reset_count = vha->hw->chip_reset; 4392 mcmd->tmr_func = QLA_TGT_2G_ABORT_TASK; 4393 4394 rc = ha->tgt.tgt_ops->handle_tmr(mcmd, unpacked_lun, mcmd->tmr_func, 4395 le16_to_cpu(iocb->u.isp2x.seq_id)); 4396 if (rc != 0) { 4397 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf060, 4398 "qla_target(%d): tgt_ops->handle_tmr() failed: %d\n", 4399 vha->vp_idx, rc); 4400 mempool_free(mcmd, qla_tgt_mgmt_cmd_mempool); 4401 return -EFAULT; 4402 } 4403 4404 return 0; 4405 } 4406 4407 /* ha->hardware_lock supposed to be held on entry */ 4408 static int qlt_abort_task(struct scsi_qla_host *vha, 4409 struct imm_ntfy_from_isp *iocb) 4410 { 4411 struct qla_hw_data *ha = vha->hw; 4412 struct fc_port *sess; 4413 int loop_id; 4414 unsigned long flags; 4415 4416 loop_id = GET_TARGET_ID(ha, (struct atio_from_isp *)iocb); 4417 4418 spin_lock_irqsave(&ha->tgt.sess_lock, flags); 4419 sess = ha->tgt.tgt_ops->find_sess_by_loop_id(vha, loop_id); 4420 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); 4421 4422 if (sess == NULL) { 4423 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf025, 4424 "qla_target(%d): task abort for unexisting " 4425 "session\n", vha->vp_idx); 4426 return qlt_sched_sess_work(vha->vha_tgt.qla_tgt, 4427 QLA_TGT_SESS_WORK_ABORT, iocb, sizeof(*iocb)); 4428 } 4429 4430 return __qlt_abort_task(vha, iocb, sess); 4431 } 4432 4433 void qlt_logo_completion_handler(fc_port_t *fcport, int rc) 4434 { 4435 if (rc != MBS_COMMAND_COMPLETE) { 4436 ql_dbg(ql_dbg_tgt_mgt, fcport->vha, 0xf093, 4437 "%s: se_sess %p / sess %p from" 4438 " port %8phC loop_id %#04x s_id %02x:%02x:%02x" 4439 " LOGO failed: %#x\n", 4440 __func__, 4441 fcport->se_sess, 4442 fcport, 4443 fcport->port_name, fcport->loop_id, 4444 fcport->d_id.b.domain, fcport->d_id.b.area, 4445 fcport->d_id.b.al_pa, rc); 4446 } 4447 4448 fcport->logout_completed = 1; 4449 } 4450 4451 /* 4452 * ha->hardware_lock supposed to be held on entry (to protect tgt->sess_list) 4453 * 4454 * Schedules sessions with matching port_id/loop_id but different wwn for 4455 * deletion. Returns existing session with matching wwn if present. 4456 * Null otherwise. 4457 */ 4458 struct fc_port * 4459 qlt_find_sess_invalidate_other(scsi_qla_host_t *vha, uint64_t wwn, 4460 port_id_t port_id, uint16_t loop_id, struct fc_port **conflict_sess) 4461 { 4462 struct fc_port *sess = NULL, *other_sess; 4463 uint64_t other_wwn; 4464 4465 *conflict_sess = NULL; 4466 4467 list_for_each_entry(other_sess, &vha->vp_fcports, list) { 4468 4469 other_wwn = wwn_to_u64(other_sess->port_name); 4470 4471 if (wwn == other_wwn) { 4472 WARN_ON(sess); 4473 sess = other_sess; 4474 continue; 4475 } 4476 4477 /* find other sess with nport_id collision */ 4478 if (port_id.b24 == other_sess->d_id.b24) { 4479 if (loop_id != other_sess->loop_id) { 4480 ql_dbg(ql_dbg_tgt_tmr, vha, 0x1000c, 4481 "Invalidating sess %p loop_id %d wwn %llx.\n", 4482 other_sess, other_sess->loop_id, other_wwn); 4483 4484 /* 4485 * logout_on_delete is set by default, but another 4486 * session that has the same s_id/loop_id combo 4487 * might have cleared it when requested this session 4488 * deletion, so don't touch it 4489 */ 4490 qlt_schedule_sess_for_deletion(other_sess, true); 4491 } else { 4492 /* 4493 * Another wwn used to have our s_id/loop_id 4494 * kill the session, but don't free the loop_id 4495 */ 4496 ql_dbg(ql_dbg_tgt_tmr, vha, 0xffff, 4497 "Invalidating sess %p loop_id %d wwn %llx.\n", 4498 other_sess, other_sess->loop_id, other_wwn); 4499 4500 4501 other_sess->keep_nport_handle = 1; 4502 *conflict_sess = other_sess; 4503 qlt_schedule_sess_for_deletion(other_sess, 4504 true); 4505 } 4506 continue; 4507 } 4508 4509 /* find other sess with nport handle collision */ 4510 if ((loop_id == other_sess->loop_id) && 4511 (loop_id != FC_NO_LOOP_ID)) { 4512 ql_dbg(ql_dbg_tgt_tmr, vha, 0x1000d, 4513 "Invalidating sess %p loop_id %d wwn %llx.\n", 4514 other_sess, other_sess->loop_id, other_wwn); 4515 4516 /* Same loop_id but different s_id 4517 * Ok to kill and logout */ 4518 qlt_schedule_sess_for_deletion(other_sess, true); 4519 } 4520 } 4521 4522 return sess; 4523 } 4524 4525 /* Abort any commands for this s_id waiting on qla_tgt_wq workqueue */ 4526 static int abort_cmds_for_s_id(struct scsi_qla_host *vha, port_id_t *s_id) 4527 { 4528 struct qla_tgt_sess_op *op; 4529 struct qla_tgt_cmd *cmd; 4530 uint32_t key; 4531 int count = 0; 4532 4533 key = (((u32)s_id->b.domain << 16) | 4534 ((u32)s_id->b.area << 8) | 4535 ((u32)s_id->b.al_pa)); 4536 4537 spin_lock(&vha->cmd_list_lock); 4538 list_for_each_entry(op, &vha->qla_sess_op_cmd_list, cmd_list) { 4539 uint32_t op_key = sid_to_key(op->atio.u.isp24.fcp_hdr.s_id); 4540 4541 if (op_key == key) { 4542 op->aborted = true; 4543 count++; 4544 } 4545 } 4546 4547 list_for_each_entry(op, &vha->unknown_atio_list, cmd_list) { 4548 uint32_t op_key = sid_to_key(op->atio.u.isp24.fcp_hdr.s_id); 4549 if (op_key == key) { 4550 op->aborted = true; 4551 count++; 4552 } 4553 } 4554 4555 list_for_each_entry(cmd, &vha->qla_cmd_list, cmd_list) { 4556 uint32_t cmd_key = sid_to_key(cmd->atio.u.isp24.fcp_hdr.s_id); 4557 if (cmd_key == key) { 4558 cmd->aborted = 1; 4559 count++; 4560 } 4561 } 4562 spin_unlock(&vha->cmd_list_lock); 4563 4564 return count; 4565 } 4566 4567 /* 4568 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire 4569 */ 4570 static int qlt_24xx_handle_els(struct scsi_qla_host *vha, 4571 struct imm_ntfy_from_isp *iocb) 4572 { 4573 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; 4574 struct qla_hw_data *ha = vha->hw; 4575 struct fc_port *sess = NULL, *conflict_sess = NULL; 4576 uint64_t wwn; 4577 port_id_t port_id; 4578 uint16_t loop_id; 4579 uint16_t wd3_lo; 4580 int res = 0; 4581 struct qlt_plogi_ack_t *pla; 4582 unsigned long flags; 4583 4584 wwn = wwn_to_u64(iocb->u.isp24.port_name); 4585 4586 port_id.b.domain = iocb->u.isp24.port_id[2]; 4587 port_id.b.area = iocb->u.isp24.port_id[1]; 4588 port_id.b.al_pa = iocb->u.isp24.port_id[0]; 4589 port_id.b.rsvd_1 = 0; 4590 4591 loop_id = le16_to_cpu(iocb->u.isp24.nport_handle); 4592 4593 ql_dbg(ql_dbg_disc, vha, 0xf026, 4594 "qla_target(%d): Port ID: %02x:%02x:%02x ELS opcode: 0x%02x lid %d %8phC\n", 4595 vha->vp_idx, iocb->u.isp24.port_id[2], 4596 iocb->u.isp24.port_id[1], iocb->u.isp24.port_id[0], 4597 iocb->u.isp24.status_subcode, loop_id, 4598 iocb->u.isp24.port_name); 4599 4600 /* res = 1 means ack at the end of thread 4601 * res = 0 means ack async/later. 4602 */ 4603 switch (iocb->u.isp24.status_subcode) { 4604 case ELS_PLOGI: 4605 4606 /* Mark all stale commands in qla_tgt_wq for deletion */ 4607 abort_cmds_for_s_id(vha, &port_id); 4608 4609 if (wwn) { 4610 spin_lock_irqsave(&tgt->ha->tgt.sess_lock, flags); 4611 sess = qlt_find_sess_invalidate_other(vha, wwn, 4612 port_id, loop_id, &conflict_sess); 4613 spin_unlock_irqrestore(&tgt->ha->tgt.sess_lock, flags); 4614 } 4615 4616 if (IS_SW_RESV_ADDR(port_id)) { 4617 res = 1; 4618 break; 4619 } 4620 4621 pla = qlt_plogi_ack_find_add(vha, &port_id, iocb); 4622 if (!pla) { 4623 qlt_send_term_imm_notif(vha, iocb, 1); 4624 break; 4625 } 4626 4627 res = 0; 4628 4629 if (conflict_sess) { 4630 conflict_sess->login_gen++; 4631 qlt_plogi_ack_link(vha, pla, conflict_sess, 4632 QLT_PLOGI_LINK_CONFLICT); 4633 } 4634 4635 if (!sess) { 4636 pla->ref_count++; 4637 qla24xx_post_newsess_work(vha, &port_id, 4638 iocb->u.isp24.port_name, pla); 4639 res = 0; 4640 break; 4641 } 4642 4643 qlt_plogi_ack_link(vha, pla, sess, QLT_PLOGI_LINK_SAME_WWN); 4644 sess->fw_login_state = DSC_LS_PLOGI_PEND; 4645 sess->d_id = port_id; 4646 sess->login_gen++; 4647 4648 switch (sess->disc_state) { 4649 case DSC_DELETED: 4650 qlt_plogi_ack_unref(vha, pla); 4651 break; 4652 4653 default: 4654 /* 4655 * Under normal circumstances we want to release nport handle 4656 * during LOGO process to avoid nport handle leaks inside FW. 4657 * The exception is when LOGO is done while another PLOGI with 4658 * the same nport handle is waiting as might be the case here. 4659 * Note: there is always a possibily of a race where session 4660 * deletion has already started for other reasons (e.g. ACL 4661 * removal) and now PLOGI arrives: 4662 * 1. if PLOGI arrived in FW after nport handle has been freed, 4663 * FW must have assigned this PLOGI a new/same handle and we 4664 * can proceed ACK'ing it as usual when session deletion 4665 * completes. 4666 * 2. if PLOGI arrived in FW before LOGO with LCF_FREE_NPORT 4667 * bit reached it, the handle has now been released. We'll 4668 * get an error when we ACK this PLOGI. Nothing will be sent 4669 * back to initiator. Initiator should eventually retry 4670 * PLOGI and situation will correct itself. 4671 */ 4672 sess->keep_nport_handle = ((sess->loop_id == loop_id) && 4673 (sess->d_id.b24 == port_id.b24)); 4674 4675 ql_dbg(ql_dbg_disc, vha, 0xffff, 4676 "%s %d %8phC post del sess\n", 4677 __func__, __LINE__, sess->port_name); 4678 4679 4680 qlt_schedule_sess_for_deletion_lock(sess); 4681 break; 4682 } 4683 4684 break; 4685 4686 case ELS_PRLI: 4687 wd3_lo = le16_to_cpu(iocb->u.isp24.u.prli.wd3_lo); 4688 4689 if (wwn) { 4690 spin_lock_irqsave(&tgt->ha->tgt.sess_lock, flags); 4691 sess = qlt_find_sess_invalidate_other(vha, wwn, port_id, 4692 loop_id, &conflict_sess); 4693 spin_unlock_irqrestore(&tgt->ha->tgt.sess_lock, flags); 4694 } 4695 4696 if (conflict_sess) { 4697 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf09b, 4698 "PRLI with conflicting sess %p port %8phC\n", 4699 conflict_sess, conflict_sess->port_name); 4700 qlt_send_term_imm_notif(vha, iocb, 1); 4701 res = 0; 4702 break; 4703 } 4704 4705 if (sess != NULL) { 4706 if (sess->fw_login_state != DSC_LS_PLOGI_PEND && 4707 sess->fw_login_state != DSC_LS_PLOGI_COMP) { 4708 /* 4709 * Impatient initiator sent PRLI before last 4710 * PLOGI could finish. Will force him to re-try, 4711 * while last one finishes. 4712 */ 4713 ql_log(ql_log_warn, sess->vha, 0xf095, 4714 "sess %p PRLI received, before plogi ack.\n", 4715 sess); 4716 qlt_send_term_imm_notif(vha, iocb, 1); 4717 res = 0; 4718 break; 4719 } 4720 4721 /* 4722 * This shouldn't happen under normal circumstances, 4723 * since we have deleted the old session during PLOGI 4724 */ 4725 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf096, 4726 "PRLI (loop_id %#04x) for existing sess %p (loop_id %#04x)\n", 4727 sess->loop_id, sess, iocb->u.isp24.nport_handle); 4728 4729 sess->local = 0; 4730 sess->loop_id = loop_id; 4731 sess->d_id = port_id; 4732 sess->fw_login_state = DSC_LS_PRLI_PEND; 4733 4734 if (wd3_lo & BIT_7) 4735 sess->conf_compl_supported = 1; 4736 4737 if ((wd3_lo & BIT_4) == 0) 4738 sess->port_type = FCT_INITIATOR; 4739 else 4740 sess->port_type = FCT_TARGET; 4741 } 4742 res = 1; /* send notify ack */ 4743 4744 /* Make session global (not used in fabric mode) */ 4745 if (ha->current_topology != ISP_CFG_F) { 4746 if (sess) { 4747 ql_dbg(ql_dbg_disc, vha, 0xffff, 4748 "%s %d %8phC post nack\n", 4749 __func__, __LINE__, sess->port_name); 4750 qla24xx_post_nack_work(vha, sess, iocb, 4751 SRB_NACK_PRLI); 4752 res = 0; 4753 } else { 4754 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); 4755 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); 4756 qla2xxx_wake_dpc(vha); 4757 } 4758 } else { 4759 if (sess) { 4760 ql_dbg(ql_dbg_disc, vha, 0xffff, 4761 "%s %d %8phC post nack\n", 4762 __func__, __LINE__, sess->port_name); 4763 qla24xx_post_nack_work(vha, sess, iocb, 4764 SRB_NACK_PRLI); 4765 res = 0; 4766 } 4767 } 4768 break; 4769 4770 case ELS_TPRLO: 4771 if (le16_to_cpu(iocb->u.isp24.flags) & 4772 NOTIFY24XX_FLAGS_GLOBAL_TPRLO) { 4773 loop_id = 0xFFFF; 4774 qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS); 4775 res = 1; 4776 break; 4777 } 4778 /* drop through */ 4779 case ELS_LOGO: 4780 case ELS_PRLO: 4781 spin_lock_irqsave(&ha->tgt.sess_lock, flags); 4782 sess = qla2x00_find_fcport_by_loopid(vha, loop_id); 4783 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); 4784 4785 if (sess) { 4786 sess->login_gen++; 4787 sess->fw_login_state = DSC_LS_LOGO_PEND; 4788 sess->logo_ack_needed = 1; 4789 memcpy(sess->iocb, iocb, IOCB_SIZE); 4790 } 4791 4792 res = qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS_SESS); 4793 4794 ql_dbg(ql_dbg_disc, vha, 0xffff, 4795 "%s: logo %llx res %d sess %p ", 4796 __func__, wwn, res, sess); 4797 if (res == 0) { 4798 /* 4799 * cmd went upper layer, look for qlt_xmit_tm_rsp() 4800 * for LOGO_ACK & sess delete 4801 */ 4802 BUG_ON(!sess); 4803 res = 0; 4804 } else { 4805 /* cmd did not go to upper layer. */ 4806 if (sess) { 4807 qlt_schedule_sess_for_deletion_lock(sess); 4808 res = 0; 4809 } 4810 /* else logo will be ack */ 4811 } 4812 break; 4813 case ELS_PDISC: 4814 case ELS_ADISC: 4815 { 4816 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; 4817 if (tgt->link_reinit_iocb_pending) { 4818 qlt_send_notify_ack(vha, &tgt->link_reinit_iocb, 4819 0, 0, 0, 0, 0, 0); 4820 tgt->link_reinit_iocb_pending = 0; 4821 } 4822 4823 sess = qla2x00_find_fcport_by_wwpn(vha, 4824 iocb->u.isp24.port_name, 1); 4825 if (sess) { 4826 ql_dbg(ql_dbg_disc, vha, 0xffff, 4827 "sess %p lid %d|%d DS %d LS %d\n", 4828 sess, sess->loop_id, loop_id, 4829 sess->disc_state, sess->fw_login_state); 4830 } 4831 4832 res = 1; /* send notify ack */ 4833 break; 4834 } 4835 4836 case ELS_FLOGI: /* should never happen */ 4837 default: 4838 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf061, 4839 "qla_target(%d): Unsupported ELS command %x " 4840 "received\n", vha->vp_idx, iocb->u.isp24.status_subcode); 4841 res = qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS_SESS); 4842 break; 4843 } 4844 4845 return res; 4846 } 4847 4848 /* 4849 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire 4850 */ 4851 static void qlt_handle_imm_notify(struct scsi_qla_host *vha, 4852 struct imm_ntfy_from_isp *iocb) 4853 { 4854 struct qla_hw_data *ha = vha->hw; 4855 uint32_t add_flags = 0; 4856 int send_notify_ack = 1; 4857 uint16_t status; 4858 4859 status = le16_to_cpu(iocb->u.isp2x.status); 4860 switch (status) { 4861 case IMM_NTFY_LIP_RESET: 4862 { 4863 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf032, 4864 "qla_target(%d): LIP reset (loop %#x), subcode %x\n", 4865 vha->vp_idx, le16_to_cpu(iocb->u.isp24.nport_handle), 4866 iocb->u.isp24.status_subcode); 4867 4868 if (qlt_reset(vha, iocb, QLA_TGT_ABORT_ALL) == 0) 4869 send_notify_ack = 0; 4870 break; 4871 } 4872 4873 case IMM_NTFY_LIP_LINK_REINIT: 4874 { 4875 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; 4876 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf033, 4877 "qla_target(%d): LINK REINIT (loop %#x, " 4878 "subcode %x)\n", vha->vp_idx, 4879 le16_to_cpu(iocb->u.isp24.nport_handle), 4880 iocb->u.isp24.status_subcode); 4881 if (tgt->link_reinit_iocb_pending) { 4882 qlt_send_notify_ack(vha, &tgt->link_reinit_iocb, 4883 0, 0, 0, 0, 0, 0); 4884 } 4885 memcpy(&tgt->link_reinit_iocb, iocb, sizeof(*iocb)); 4886 tgt->link_reinit_iocb_pending = 1; 4887 /* 4888 * QLogic requires to wait after LINK REINIT for possible 4889 * PDISC or ADISC ELS commands 4890 */ 4891 send_notify_ack = 0; 4892 break; 4893 } 4894 4895 case IMM_NTFY_PORT_LOGOUT: 4896 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf034, 4897 "qla_target(%d): Port logout (loop " 4898 "%#x, subcode %x)\n", vha->vp_idx, 4899 le16_to_cpu(iocb->u.isp24.nport_handle), 4900 iocb->u.isp24.status_subcode); 4901 4902 if (qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS_SESS) == 0) 4903 send_notify_ack = 0; 4904 /* The sessions will be cleared in the callback, if needed */ 4905 break; 4906 4907 case IMM_NTFY_GLBL_TPRLO: 4908 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf035, 4909 "qla_target(%d): Global TPRLO (%x)\n", vha->vp_idx, status); 4910 if (qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS) == 0) 4911 send_notify_ack = 0; 4912 /* The sessions will be cleared in the callback, if needed */ 4913 break; 4914 4915 case IMM_NTFY_PORT_CONFIG: 4916 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf036, 4917 "qla_target(%d): Port config changed (%x)\n", vha->vp_idx, 4918 status); 4919 if (qlt_reset(vha, iocb, QLA_TGT_ABORT_ALL) == 0) 4920 send_notify_ack = 0; 4921 /* The sessions will be cleared in the callback, if needed */ 4922 break; 4923 4924 case IMM_NTFY_GLBL_LOGO: 4925 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06a, 4926 "qla_target(%d): Link failure detected\n", 4927 vha->vp_idx); 4928 /* I_T nexus loss */ 4929 if (qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS) == 0) 4930 send_notify_ack = 0; 4931 break; 4932 4933 case IMM_NTFY_IOCB_OVERFLOW: 4934 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06b, 4935 "qla_target(%d): Cannot provide requested " 4936 "capability (IOCB overflowed the immediate notify " 4937 "resource count)\n", vha->vp_idx); 4938 break; 4939 4940 case IMM_NTFY_ABORT_TASK: 4941 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf037, 4942 "qla_target(%d): Abort Task (S %08x I %#x -> " 4943 "L %#x)\n", vha->vp_idx, 4944 le16_to_cpu(iocb->u.isp2x.seq_id), 4945 GET_TARGET_ID(ha, (struct atio_from_isp *)iocb), 4946 le16_to_cpu(iocb->u.isp2x.lun)); 4947 if (qlt_abort_task(vha, iocb) == 0) 4948 send_notify_ack = 0; 4949 break; 4950 4951 case IMM_NTFY_RESOURCE: 4952 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06c, 4953 "qla_target(%d): Out of resources, host %ld\n", 4954 vha->vp_idx, vha->host_no); 4955 break; 4956 4957 case IMM_NTFY_MSG_RX: 4958 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf038, 4959 "qla_target(%d): Immediate notify task %x\n", 4960 vha->vp_idx, iocb->u.isp2x.task_flags); 4961 if (qlt_handle_task_mgmt(vha, iocb) == 0) 4962 send_notify_ack = 0; 4963 break; 4964 4965 case IMM_NTFY_ELS: 4966 if (qlt_24xx_handle_els(vha, iocb) == 0) 4967 send_notify_ack = 0; 4968 break; 4969 default: 4970 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06d, 4971 "qla_target(%d): Received unknown immediate " 4972 "notify status %x\n", vha->vp_idx, status); 4973 break; 4974 } 4975 4976 if (send_notify_ack) 4977 qlt_send_notify_ack(vha, iocb, add_flags, 0, 0, 0, 0, 0); 4978 } 4979 4980 /* 4981 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire 4982 * This function sends busy to ISP 2xxx or 24xx. 4983 */ 4984 static int __qlt_send_busy(struct scsi_qla_host *vha, 4985 struct atio_from_isp *atio, uint16_t status) 4986 { 4987 struct ctio7_to_24xx *ctio24; 4988 struct qla_hw_data *ha = vha->hw; 4989 request_t *pkt; 4990 struct fc_port *sess = NULL; 4991 unsigned long flags; 4992 4993 spin_lock_irqsave(&ha->tgt.sess_lock, flags); 4994 sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, 4995 atio->u.isp24.fcp_hdr.s_id); 4996 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); 4997 if (!sess) { 4998 qlt_send_term_exchange(vha, NULL, atio, 1, 0); 4999 return 0; 5000 } 5001 /* Sending marker isn't necessary, since we called from ISR */ 5002 5003 pkt = (request_t *)qla2x00_alloc_iocbs(vha, NULL); 5004 if (!pkt) { 5005 ql_dbg(ql_dbg_io, vha, 0x3063, 5006 "qla_target(%d): %s failed: unable to allocate " 5007 "request packet", vha->vp_idx, __func__); 5008 return -ENOMEM; 5009 } 5010 5011 vha->tgt_counters.num_q_full_sent++; 5012 pkt->entry_count = 1; 5013 pkt->handle = QLA_TGT_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK; 5014 5015 ctio24 = (struct ctio7_to_24xx *)pkt; 5016 ctio24->entry_type = CTIO_TYPE7; 5017 ctio24->nport_handle = sess->loop_id; 5018 ctio24->timeout = cpu_to_le16(QLA_TGT_TIMEOUT); 5019 ctio24->vp_index = vha->vp_idx; 5020 ctio24->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2]; 5021 ctio24->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1]; 5022 ctio24->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0]; 5023 ctio24->exchange_addr = atio->u.isp24.exchange_addr; 5024 ctio24->u.status1.flags = (atio->u.isp24.attr << 9) | 5025 cpu_to_le16( 5026 CTIO7_FLAGS_STATUS_MODE_1 | CTIO7_FLAGS_SEND_STATUS | 5027 CTIO7_FLAGS_DONT_RET_CTIO); 5028 /* 5029 * CTIO from fw w/o se_cmd doesn't provide enough info to retry it, 5030 * if the explicit conformation is used. 5031 */ 5032 ctio24->u.status1.ox_id = swab16(atio->u.isp24.fcp_hdr.ox_id); 5033 ctio24->u.status1.scsi_status = cpu_to_le16(status); 5034 /* Memory Barrier */ 5035 wmb(); 5036 qla2x00_start_iocbs(vha, vha->req); 5037 return 0; 5038 } 5039 5040 /* 5041 * This routine is used to allocate a command for either a QFull condition 5042 * (ie reply SAM_STAT_BUSY) or to terminate an exchange that did not go 5043 * out previously. 5044 */ 5045 static void 5046 qlt_alloc_qfull_cmd(struct scsi_qla_host *vha, 5047 struct atio_from_isp *atio, uint16_t status, int qfull) 5048 { 5049 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; 5050 struct qla_hw_data *ha = vha->hw; 5051 struct fc_port *sess; 5052 struct se_session *se_sess; 5053 struct qla_tgt_cmd *cmd; 5054 int tag; 5055 5056 if (unlikely(tgt->tgt_stop)) { 5057 ql_dbg(ql_dbg_io, vha, 0x300a, 5058 "New command while device %p is shutting down\n", tgt); 5059 return; 5060 } 5061 5062 if ((vha->hw->tgt.num_qfull_cmds_alloc + 1) > MAX_QFULL_CMDS_ALLOC) { 5063 vha->hw->tgt.num_qfull_cmds_dropped++; 5064 if (vha->hw->tgt.num_qfull_cmds_dropped > 5065 vha->qla_stats.stat_max_qfull_cmds_dropped) 5066 vha->qla_stats.stat_max_qfull_cmds_dropped = 5067 vha->hw->tgt.num_qfull_cmds_dropped; 5068 5069 ql_dbg(ql_dbg_io, vha, 0x3068, 5070 "qla_target(%d): %s: QFull CMD dropped[%d]\n", 5071 vha->vp_idx, __func__, 5072 vha->hw->tgt.num_qfull_cmds_dropped); 5073 5074 qlt_chk_exch_leak_thresh_hold(vha); 5075 return; 5076 } 5077 5078 sess = ha->tgt.tgt_ops->find_sess_by_s_id 5079 (vha, atio->u.isp24.fcp_hdr.s_id); 5080 if (!sess) 5081 return; 5082 5083 se_sess = sess->se_sess; 5084 5085 tag = percpu_ida_alloc(&se_sess->sess_tag_pool, TASK_RUNNING); 5086 if (tag < 0) 5087 return; 5088 5089 cmd = &((struct qla_tgt_cmd *)se_sess->sess_cmd_map)[tag]; 5090 if (!cmd) { 5091 ql_dbg(ql_dbg_io, vha, 0x3009, 5092 "qla_target(%d): %s: Allocation of cmd failed\n", 5093 vha->vp_idx, __func__); 5094 5095 vha->hw->tgt.num_qfull_cmds_dropped++; 5096 if (vha->hw->tgt.num_qfull_cmds_dropped > 5097 vha->qla_stats.stat_max_qfull_cmds_dropped) 5098 vha->qla_stats.stat_max_qfull_cmds_dropped = 5099 vha->hw->tgt.num_qfull_cmds_dropped; 5100 5101 qlt_chk_exch_leak_thresh_hold(vha); 5102 return; 5103 } 5104 5105 memset(cmd, 0, sizeof(struct qla_tgt_cmd)); 5106 5107 qlt_incr_num_pend_cmds(vha); 5108 INIT_LIST_HEAD(&cmd->cmd_list); 5109 memcpy(&cmd->atio, atio, sizeof(*atio)); 5110 5111 cmd->tgt = vha->vha_tgt.qla_tgt; 5112 cmd->vha = vha; 5113 cmd->reset_count = vha->hw->chip_reset; 5114 cmd->q_full = 1; 5115 5116 if (qfull) { 5117 cmd->q_full = 1; 5118 /* NOTE: borrowing the state field to carry the status */ 5119 cmd->state = status; 5120 } else 5121 cmd->term_exchg = 1; 5122 5123 list_add_tail(&cmd->cmd_list, &vha->hw->tgt.q_full_list); 5124 5125 vha->hw->tgt.num_qfull_cmds_alloc++; 5126 if (vha->hw->tgt.num_qfull_cmds_alloc > 5127 vha->qla_stats.stat_max_qfull_cmds_alloc) 5128 vha->qla_stats.stat_max_qfull_cmds_alloc = 5129 vha->hw->tgt.num_qfull_cmds_alloc; 5130 } 5131 5132 int 5133 qlt_free_qfull_cmds(struct scsi_qla_host *vha) 5134 { 5135 struct qla_hw_data *ha = vha->hw; 5136 unsigned long flags; 5137 struct qla_tgt_cmd *cmd, *tcmd; 5138 struct list_head free_list; 5139 int rc = 0; 5140 5141 if (list_empty(&ha->tgt.q_full_list)) 5142 return 0; 5143 5144 INIT_LIST_HEAD(&free_list); 5145 5146 spin_lock_irqsave(&vha->hw->hardware_lock, flags); 5147 5148 if (list_empty(&ha->tgt.q_full_list)) { 5149 spin_unlock_irqrestore(&vha->hw->hardware_lock, flags); 5150 return 0; 5151 } 5152 5153 list_for_each_entry_safe(cmd, tcmd, &ha->tgt.q_full_list, cmd_list) { 5154 if (cmd->q_full) 5155 /* cmd->state is a borrowed field to hold status */ 5156 rc = __qlt_send_busy(vha, &cmd->atio, cmd->state); 5157 else if (cmd->term_exchg) 5158 rc = __qlt_send_term_exchange(vha, NULL, &cmd->atio); 5159 5160 if (rc == -ENOMEM) 5161 break; 5162 5163 if (cmd->q_full) 5164 ql_dbg(ql_dbg_io, vha, 0x3006, 5165 "%s: busy sent for ox_id[%04x]\n", __func__, 5166 be16_to_cpu(cmd->atio.u.isp24.fcp_hdr.ox_id)); 5167 else if (cmd->term_exchg) 5168 ql_dbg(ql_dbg_io, vha, 0x3007, 5169 "%s: Term exchg sent for ox_id[%04x]\n", __func__, 5170 be16_to_cpu(cmd->atio.u.isp24.fcp_hdr.ox_id)); 5171 else 5172 ql_dbg(ql_dbg_io, vha, 0x3008, 5173 "%s: Unexpected cmd in QFull list %p\n", __func__, 5174 cmd); 5175 5176 list_del(&cmd->cmd_list); 5177 list_add_tail(&cmd->cmd_list, &free_list); 5178 5179 /* piggy back on hardware_lock for protection */ 5180 vha->hw->tgt.num_qfull_cmds_alloc--; 5181 } 5182 spin_unlock_irqrestore(&vha->hw->hardware_lock, flags); 5183 5184 cmd = NULL; 5185 5186 list_for_each_entry_safe(cmd, tcmd, &free_list, cmd_list) { 5187 list_del(&cmd->cmd_list); 5188 /* This cmd was never sent to TCM. There is no need 5189 * to schedule free or call free_cmd 5190 */ 5191 qlt_free_cmd(cmd); 5192 } 5193 return rc; 5194 } 5195 5196 static void 5197 qlt_send_busy(struct scsi_qla_host *vha, 5198 struct atio_from_isp *atio, uint16_t status) 5199 { 5200 int rc = 0; 5201 5202 rc = __qlt_send_busy(vha, atio, status); 5203 if (rc == -ENOMEM) 5204 qlt_alloc_qfull_cmd(vha, atio, status, 1); 5205 } 5206 5207 static int 5208 qlt_chk_qfull_thresh_hold(struct scsi_qla_host *vha, 5209 struct atio_from_isp *atio, bool ha_locked) 5210 { 5211 struct qla_hw_data *ha = vha->hw; 5212 uint16_t status; 5213 unsigned long flags; 5214 5215 if (ha->tgt.num_pend_cmds < Q_FULL_THRESH_HOLD(ha)) 5216 return 0; 5217 5218 if (!ha_locked) 5219 spin_lock_irqsave(&ha->hardware_lock, flags); 5220 status = temp_sam_status; 5221 qlt_send_busy(vha, atio, status); 5222 if (!ha_locked) 5223 spin_unlock_irqrestore(&ha->hardware_lock, flags); 5224 5225 return 1; 5226 } 5227 5228 /* ha->hardware_lock supposed to be held on entry */ 5229 /* called via callback from qla2xxx */ 5230 static void qlt_24xx_atio_pkt(struct scsi_qla_host *vha, 5231 struct atio_from_isp *atio, uint8_t ha_locked) 5232 { 5233 struct qla_hw_data *ha = vha->hw; 5234 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; 5235 int rc; 5236 unsigned long flags; 5237 5238 if (unlikely(tgt == NULL)) { 5239 ql_dbg(ql_dbg_tgt, vha, 0x3064, 5240 "ATIO pkt, but no tgt (ha %p)", ha); 5241 return; 5242 } 5243 /* 5244 * In tgt_stop mode we also should allow all requests to pass. 5245 * Otherwise, some commands can stuck. 5246 */ 5247 5248 tgt->atio_irq_cmd_count++; 5249 5250 switch (atio->u.raw.entry_type) { 5251 case ATIO_TYPE7: 5252 if (unlikely(atio->u.isp24.exchange_addr == 5253 ATIO_EXCHANGE_ADDRESS_UNKNOWN)) { 5254 ql_dbg(ql_dbg_io, vha, 0x3065, 5255 "qla_target(%d): ATIO_TYPE7 " 5256 "received with UNKNOWN exchange address, " 5257 "sending QUEUE_FULL\n", vha->vp_idx); 5258 if (!ha_locked) 5259 spin_lock_irqsave(&ha->hardware_lock, flags); 5260 qlt_send_busy(vha, atio, SAM_STAT_TASK_SET_FULL); 5261 if (!ha_locked) 5262 spin_unlock_irqrestore(&ha->hardware_lock, flags); 5263 break; 5264 } 5265 5266 5267 5268 if (likely(atio->u.isp24.fcp_cmnd.task_mgmt_flags == 0)) { 5269 rc = qlt_chk_qfull_thresh_hold(vha, atio, ha_locked); 5270 if (rc != 0) { 5271 tgt->atio_irq_cmd_count--; 5272 return; 5273 } 5274 rc = qlt_handle_cmd_for_atio(vha, atio); 5275 } else { 5276 rc = qlt_handle_task_mgmt(vha, atio); 5277 } 5278 if (unlikely(rc != 0)) { 5279 if (rc == -ESRCH) { 5280 if (!ha_locked) 5281 spin_lock_irqsave 5282 (&ha->hardware_lock, flags); 5283 5284 #if 1 /* With TERM EXCHANGE some FC cards refuse to boot */ 5285 qlt_send_busy(vha, atio, SAM_STAT_BUSY); 5286 #else 5287 qlt_send_term_exchange(vha, NULL, atio, 1, 0); 5288 #endif 5289 5290 if (!ha_locked) 5291 spin_unlock_irqrestore 5292 (&ha->hardware_lock, flags); 5293 5294 } else { 5295 if (tgt->tgt_stop) { 5296 ql_dbg(ql_dbg_tgt, vha, 0xe059, 5297 "qla_target: Unable to send " 5298 "command to target for req, " 5299 "ignoring.\n"); 5300 } else { 5301 ql_dbg(ql_dbg_tgt, vha, 0xe05a, 5302 "qla_target(%d): Unable to send " 5303 "command to target, sending BUSY " 5304 "status.\n", vha->vp_idx); 5305 if (!ha_locked) 5306 spin_lock_irqsave( 5307 &ha->hardware_lock, flags); 5308 qlt_send_busy(vha, atio, SAM_STAT_BUSY); 5309 if (!ha_locked) 5310 spin_unlock_irqrestore( 5311 &ha->hardware_lock, flags); 5312 } 5313 } 5314 } 5315 break; 5316 5317 case IMMED_NOTIFY_TYPE: 5318 { 5319 if (unlikely(atio->u.isp2x.entry_status != 0)) { 5320 ql_dbg(ql_dbg_tgt, vha, 0xe05b, 5321 "qla_target(%d): Received ATIO packet %x " 5322 "with error status %x\n", vha->vp_idx, 5323 atio->u.raw.entry_type, 5324 atio->u.isp2x.entry_status); 5325 break; 5326 } 5327 ql_dbg(ql_dbg_tgt, vha, 0xe02e, "%s", "IMMED_NOTIFY ATIO"); 5328 5329 if (!ha_locked) 5330 spin_lock_irqsave(&ha->hardware_lock, flags); 5331 qlt_handle_imm_notify(vha, (struct imm_ntfy_from_isp *)atio); 5332 if (!ha_locked) 5333 spin_unlock_irqrestore(&ha->hardware_lock, flags); 5334 break; 5335 } 5336 5337 default: 5338 ql_dbg(ql_dbg_tgt, vha, 0xe05c, 5339 "qla_target(%d): Received unknown ATIO atio " 5340 "type %x\n", vha->vp_idx, atio->u.raw.entry_type); 5341 break; 5342 } 5343 5344 tgt->atio_irq_cmd_count--; 5345 } 5346 5347 /* ha->hardware_lock supposed to be held on entry */ 5348 /* called via callback from qla2xxx */ 5349 static void qlt_response_pkt(struct scsi_qla_host *vha, response_t *pkt) 5350 { 5351 struct qla_hw_data *ha = vha->hw; 5352 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; 5353 5354 if (unlikely(tgt == NULL)) { 5355 ql_dbg(ql_dbg_tgt, vha, 0xe05d, 5356 "qla_target(%d): Response pkt %x received, but no " 5357 "tgt (ha %p)\n", vha->vp_idx, pkt->entry_type, ha); 5358 return; 5359 } 5360 5361 /* 5362 * In tgt_stop mode we also should allow all requests to pass. 5363 * Otherwise, some commands can stuck. 5364 */ 5365 5366 tgt->irq_cmd_count++; 5367 5368 switch (pkt->entry_type) { 5369 case CTIO_CRC2: 5370 case CTIO_TYPE7: 5371 { 5372 struct ctio7_from_24xx *entry = (struct ctio7_from_24xx *)pkt; 5373 qlt_do_ctio_completion(vha, entry->handle, 5374 le16_to_cpu(entry->status)|(pkt->entry_status << 16), 5375 entry); 5376 break; 5377 } 5378 5379 case ACCEPT_TGT_IO_TYPE: 5380 { 5381 struct atio_from_isp *atio = (struct atio_from_isp *)pkt; 5382 int rc; 5383 if (atio->u.isp2x.status != 5384 cpu_to_le16(ATIO_CDB_VALID)) { 5385 ql_dbg(ql_dbg_tgt, vha, 0xe05e, 5386 "qla_target(%d): ATIO with error " 5387 "status %x received\n", vha->vp_idx, 5388 le16_to_cpu(atio->u.isp2x.status)); 5389 break; 5390 } 5391 5392 rc = qlt_chk_qfull_thresh_hold(vha, atio, true); 5393 if (rc != 0) { 5394 tgt->irq_cmd_count--; 5395 return; 5396 } 5397 5398 rc = qlt_handle_cmd_for_atio(vha, atio); 5399 if (unlikely(rc != 0)) { 5400 if (rc == -ESRCH) { 5401 #if 1 /* With TERM EXCHANGE some FC cards refuse to boot */ 5402 qlt_send_busy(vha, atio, 0); 5403 #else 5404 qlt_send_term_exchange(vha, NULL, atio, 1, 0); 5405 #endif 5406 } else { 5407 if (tgt->tgt_stop) { 5408 ql_dbg(ql_dbg_tgt, vha, 0xe05f, 5409 "qla_target: Unable to send " 5410 "command to target, sending TERM " 5411 "EXCHANGE for rsp\n"); 5412 qlt_send_term_exchange(vha, NULL, 5413 atio, 1, 0); 5414 } else { 5415 ql_dbg(ql_dbg_tgt, vha, 0xe060, 5416 "qla_target(%d): Unable to send " 5417 "command to target, sending BUSY " 5418 "status\n", vha->vp_idx); 5419 qlt_send_busy(vha, atio, 0); 5420 } 5421 } 5422 } 5423 } 5424 break; 5425 5426 case CONTINUE_TGT_IO_TYPE: 5427 { 5428 struct ctio_to_2xxx *entry = (struct ctio_to_2xxx *)pkt; 5429 qlt_do_ctio_completion(vha, entry->handle, 5430 le16_to_cpu(entry->status)|(pkt->entry_status << 16), 5431 entry); 5432 break; 5433 } 5434 5435 case CTIO_A64_TYPE: 5436 { 5437 struct ctio_to_2xxx *entry = (struct ctio_to_2xxx *)pkt; 5438 qlt_do_ctio_completion(vha, entry->handle, 5439 le16_to_cpu(entry->status)|(pkt->entry_status << 16), 5440 entry); 5441 break; 5442 } 5443 5444 case IMMED_NOTIFY_TYPE: 5445 ql_dbg(ql_dbg_tgt, vha, 0xe035, "%s", "IMMED_NOTIFY\n"); 5446 qlt_handle_imm_notify(vha, (struct imm_ntfy_from_isp *)pkt); 5447 break; 5448 5449 case NOTIFY_ACK_TYPE: 5450 if (tgt->notify_ack_expected > 0) { 5451 struct nack_to_isp *entry = (struct nack_to_isp *)pkt; 5452 ql_dbg(ql_dbg_tgt, vha, 0xe036, 5453 "NOTIFY_ACK seq %08x status %x\n", 5454 le16_to_cpu(entry->u.isp2x.seq_id), 5455 le16_to_cpu(entry->u.isp2x.status)); 5456 tgt->notify_ack_expected--; 5457 if (entry->u.isp2x.status != 5458 cpu_to_le16(NOTIFY_ACK_SUCCESS)) { 5459 ql_dbg(ql_dbg_tgt, vha, 0xe061, 5460 "qla_target(%d): NOTIFY_ACK " 5461 "failed %x\n", vha->vp_idx, 5462 le16_to_cpu(entry->u.isp2x.status)); 5463 } 5464 } else { 5465 ql_dbg(ql_dbg_tgt, vha, 0xe062, 5466 "qla_target(%d): Unexpected NOTIFY_ACK received\n", 5467 vha->vp_idx); 5468 } 5469 break; 5470 5471 case ABTS_RECV_24XX: 5472 ql_dbg(ql_dbg_tgt, vha, 0xe037, 5473 "ABTS_RECV_24XX: instance %d\n", vha->vp_idx); 5474 qlt_24xx_handle_abts(vha, (struct abts_recv_from_24xx *)pkt); 5475 break; 5476 5477 case ABTS_RESP_24XX: 5478 if (tgt->abts_resp_expected > 0) { 5479 struct abts_resp_from_24xx_fw *entry = 5480 (struct abts_resp_from_24xx_fw *)pkt; 5481 ql_dbg(ql_dbg_tgt, vha, 0xe038, 5482 "ABTS_RESP_24XX: compl_status %x\n", 5483 entry->compl_status); 5484 tgt->abts_resp_expected--; 5485 if (le16_to_cpu(entry->compl_status) != 5486 ABTS_RESP_COMPL_SUCCESS) { 5487 if ((entry->error_subcode1 == 0x1E) && 5488 (entry->error_subcode2 == 0)) { 5489 /* 5490 * We've got a race here: aborted 5491 * exchange not terminated, i.e. 5492 * response for the aborted command was 5493 * sent between the abort request was 5494 * received and processed. 5495 * Unfortunately, the firmware has a 5496 * silly requirement that all aborted 5497 * exchanges must be explicitely 5498 * terminated, otherwise it refuses to 5499 * send responses for the abort 5500 * requests. So, we have to 5501 * (re)terminate the exchange and retry 5502 * the abort response. 5503 */ 5504 qlt_24xx_retry_term_exchange(vha, 5505 entry); 5506 } else 5507 ql_dbg(ql_dbg_tgt, vha, 0xe063, 5508 "qla_target(%d): ABTS_RESP_24XX " 5509 "failed %x (subcode %x:%x)", 5510 vha->vp_idx, entry->compl_status, 5511 entry->error_subcode1, 5512 entry->error_subcode2); 5513 } 5514 } else { 5515 ql_dbg(ql_dbg_tgt, vha, 0xe064, 5516 "qla_target(%d): Unexpected ABTS_RESP_24XX " 5517 "received\n", vha->vp_idx); 5518 } 5519 break; 5520 5521 default: 5522 ql_dbg(ql_dbg_tgt, vha, 0xe065, 5523 "qla_target(%d): Received unknown response pkt " 5524 "type %x\n", vha->vp_idx, pkt->entry_type); 5525 break; 5526 } 5527 5528 tgt->irq_cmd_count--; 5529 } 5530 5531 /* 5532 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire 5533 */ 5534 void qlt_async_event(uint16_t code, struct scsi_qla_host *vha, 5535 uint16_t *mailbox) 5536 { 5537 struct qla_hw_data *ha = vha->hw; 5538 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; 5539 int login_code; 5540 5541 if (!ha->tgt.tgt_ops) 5542 return; 5543 5544 if (unlikely(tgt == NULL)) { 5545 ql_dbg(ql_dbg_tgt, vha, 0xe03a, 5546 "ASYNC EVENT %#x, but no tgt (ha %p)\n", code, ha); 5547 return; 5548 } 5549 5550 if (((code == MBA_POINT_TO_POINT) || (code == MBA_CHG_IN_CONNECTION)) && 5551 IS_QLA2100(ha)) 5552 return; 5553 /* 5554 * In tgt_stop mode we also should allow all requests to pass. 5555 * Otherwise, some commands can stuck. 5556 */ 5557 5558 tgt->irq_cmd_count++; 5559 5560 switch (code) { 5561 case MBA_RESET: /* Reset */ 5562 case MBA_SYSTEM_ERR: /* System Error */ 5563 case MBA_REQ_TRANSFER_ERR: /* Request Transfer Error */ 5564 case MBA_RSP_TRANSFER_ERR: /* Response Transfer Error */ 5565 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03a, 5566 "qla_target(%d): System error async event %#x " 5567 "occurred", vha->vp_idx, code); 5568 break; 5569 case MBA_WAKEUP_THRES: /* Request Queue Wake-up. */ 5570 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 5571 break; 5572 5573 case MBA_LOOP_UP: 5574 { 5575 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03b, 5576 "qla_target(%d): Async LOOP_UP occurred " 5577 "(m[0]=%x, m[1]=%x, m[2]=%x, m[3]=%x)", vha->vp_idx, 5578 le16_to_cpu(mailbox[0]), le16_to_cpu(mailbox[1]), 5579 le16_to_cpu(mailbox[2]), le16_to_cpu(mailbox[3])); 5580 if (tgt->link_reinit_iocb_pending) { 5581 qlt_send_notify_ack(vha, (void *)&tgt->link_reinit_iocb, 5582 0, 0, 0, 0, 0, 0); 5583 tgt->link_reinit_iocb_pending = 0; 5584 } 5585 break; 5586 } 5587 5588 case MBA_LIP_OCCURRED: 5589 case MBA_LOOP_DOWN: 5590 case MBA_LIP_RESET: 5591 case MBA_RSCN_UPDATE: 5592 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03c, 5593 "qla_target(%d): Async event %#x occurred " 5594 "(m[0]=%x, m[1]=%x, m[2]=%x, m[3]=%x)", vha->vp_idx, code, 5595 le16_to_cpu(mailbox[0]), le16_to_cpu(mailbox[1]), 5596 le16_to_cpu(mailbox[2]), le16_to_cpu(mailbox[3])); 5597 break; 5598 5599 case MBA_REJECTED_FCP_CMD: 5600 ql_dbg(ql_dbg_tgt_mgt, vha, 0xffff, 5601 "qla_target(%d): Async event LS_REJECT occurred " 5602 "(m[0]=%x, m[1]=%x, m[2]=%x, m[3]=%x)", vha->vp_idx, 5603 le16_to_cpu(mailbox[0]), le16_to_cpu(mailbox[1]), 5604 le16_to_cpu(mailbox[2]), le16_to_cpu(mailbox[3])); 5605 5606 if (le16_to_cpu(mailbox[3]) == 1) { 5607 /* exchange starvation. */ 5608 vha->hw->exch_starvation++; 5609 if (vha->hw->exch_starvation > 5) { 5610 ql_log(ql_log_warn, vha, 0xffff, 5611 "Exchange starvation-. Resetting RISC\n"); 5612 5613 vha->hw->exch_starvation = 0; 5614 if (IS_P3P_TYPE(vha->hw)) 5615 set_bit(FCOE_CTX_RESET_NEEDED, 5616 &vha->dpc_flags); 5617 else 5618 set_bit(ISP_ABORT_NEEDED, 5619 &vha->dpc_flags); 5620 qla2xxx_wake_dpc(vha); 5621 } 5622 } 5623 break; 5624 5625 case MBA_PORT_UPDATE: 5626 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03d, 5627 "qla_target(%d): Port update async event %#x " 5628 "occurred: updating the ports database (m[0]=%x, m[1]=%x, " 5629 "m[2]=%x, m[3]=%x)", vha->vp_idx, code, 5630 le16_to_cpu(mailbox[0]), le16_to_cpu(mailbox[1]), 5631 le16_to_cpu(mailbox[2]), le16_to_cpu(mailbox[3])); 5632 5633 login_code = le16_to_cpu(mailbox[2]); 5634 if (login_code == 0x4) { 5635 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03e, 5636 "Async MB 2: Got PLOGI Complete\n"); 5637 vha->hw->exch_starvation = 0; 5638 } else if (login_code == 0x7) 5639 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03f, 5640 "Async MB 2: Port Logged Out\n"); 5641 break; 5642 default: 5643 break; 5644 } 5645 5646 tgt->irq_cmd_count--; 5647 } 5648 5649 static fc_port_t *qlt_get_port_database(struct scsi_qla_host *vha, 5650 uint16_t loop_id) 5651 { 5652 fc_port_t *fcport, *tfcp, *del; 5653 int rc; 5654 unsigned long flags; 5655 u8 newfcport = 0; 5656 5657 fcport = kzalloc(sizeof(*fcport), GFP_KERNEL); 5658 if (!fcport) { 5659 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06f, 5660 "qla_target(%d): Allocation of tmp FC port failed", 5661 vha->vp_idx); 5662 return NULL; 5663 } 5664 5665 fcport->loop_id = loop_id; 5666 5667 rc = qla24xx_gpdb_wait(vha, fcport, 0); 5668 if (rc != QLA_SUCCESS) { 5669 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf070, 5670 "qla_target(%d): Failed to retrieve fcport " 5671 "information -- get_port_database() returned %x " 5672 "(loop_id=0x%04x)", vha->vp_idx, rc, loop_id); 5673 kfree(fcport); 5674 return NULL; 5675 } 5676 5677 del = NULL; 5678 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); 5679 tfcp = qla2x00_find_fcport_by_wwpn(vha, fcport->port_name, 1); 5680 5681 if (tfcp) { 5682 tfcp->d_id = fcport->d_id; 5683 tfcp->port_type = fcport->port_type; 5684 tfcp->supported_classes = fcport->supported_classes; 5685 tfcp->flags |= fcport->flags; 5686 5687 del = fcport; 5688 fcport = tfcp; 5689 } else { 5690 if (vha->hw->current_topology == ISP_CFG_F) 5691 fcport->flags |= FCF_FABRIC_DEVICE; 5692 5693 list_add_tail(&fcport->list, &vha->vp_fcports); 5694 if (!IS_SW_RESV_ADDR(fcport->d_id)) 5695 vha->fcport_count++; 5696 fcport->login_gen++; 5697 fcport->disc_state = DSC_LOGIN_COMPLETE; 5698 fcport->login_succ = 1; 5699 newfcport = 1; 5700 } 5701 5702 fcport->deleted = 0; 5703 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); 5704 5705 switch (vha->host->active_mode) { 5706 case MODE_INITIATOR: 5707 case MODE_DUAL: 5708 if (newfcport) { 5709 if (!IS_IIDMA_CAPABLE(vha->hw) || !vha->hw->flags.gpsc_supported) { 5710 ql_dbg(ql_dbg_disc, vha, 0xffff, 5711 "%s %d %8phC post upd_fcport fcp_cnt %d\n", 5712 __func__, __LINE__, fcport->port_name, vha->fcport_count); 5713 qla24xx_post_upd_fcport_work(vha, fcport); 5714 } else { 5715 ql_dbg(ql_dbg_disc, vha, 0xffff, 5716 "%s %d %8phC post gpsc fcp_cnt %d\n", 5717 __func__, __LINE__, fcport->port_name, vha->fcport_count); 5718 qla24xx_post_gpsc_work(vha, fcport); 5719 } 5720 } 5721 break; 5722 5723 case MODE_TARGET: 5724 default: 5725 break; 5726 } 5727 if (del) 5728 qla2x00_free_fcport(del); 5729 5730 return fcport; 5731 } 5732 5733 /* Must be called under tgt_mutex */ 5734 static struct fc_port *qlt_make_local_sess(struct scsi_qla_host *vha, 5735 uint8_t *s_id) 5736 { 5737 struct fc_port *sess = NULL; 5738 fc_port_t *fcport = NULL; 5739 int rc, global_resets; 5740 uint16_t loop_id = 0; 5741 5742 if ((s_id[0] == 0xFF) && (s_id[1] == 0xFC)) { 5743 /* 5744 * This is Domain Controller, so it should be 5745 * OK to drop SCSI commands from it. 5746 */ 5747 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf042, 5748 "Unable to find initiator with S_ID %x:%x:%x", 5749 s_id[0], s_id[1], s_id[2]); 5750 return NULL; 5751 } 5752 5753 mutex_lock(&vha->vha_tgt.tgt_mutex); 5754 5755 retry: 5756 global_resets = 5757 atomic_read(&vha->vha_tgt.qla_tgt->tgt_global_resets_count); 5758 5759 rc = qla24xx_get_loop_id(vha, s_id, &loop_id); 5760 if (rc != 0) { 5761 mutex_unlock(&vha->vha_tgt.tgt_mutex); 5762 5763 ql_log(ql_log_info, vha, 0xf071, 5764 "qla_target(%d): Unable to find " 5765 "initiator with S_ID %x:%x:%x", 5766 vha->vp_idx, s_id[0], s_id[1], 5767 s_id[2]); 5768 5769 if (rc == -ENOENT) { 5770 qlt_port_logo_t logo; 5771 sid_to_portid(s_id, &logo.id); 5772 logo.cmd_count = 1; 5773 qlt_send_first_logo(vha, &logo); 5774 } 5775 5776 return NULL; 5777 } 5778 5779 fcport = qlt_get_port_database(vha, loop_id); 5780 if (!fcport) { 5781 mutex_unlock(&vha->vha_tgt.tgt_mutex); 5782 return NULL; 5783 } 5784 5785 if (global_resets != 5786 atomic_read(&vha->vha_tgt.qla_tgt->tgt_global_resets_count)) { 5787 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf043, 5788 "qla_target(%d): global reset during session discovery " 5789 "(counter was %d, new %d), retrying", vha->vp_idx, 5790 global_resets, 5791 atomic_read(&vha->vha_tgt. 5792 qla_tgt->tgt_global_resets_count)); 5793 goto retry; 5794 } 5795 5796 sess = qlt_create_sess(vha, fcport, true); 5797 5798 mutex_unlock(&vha->vha_tgt.tgt_mutex); 5799 5800 return sess; 5801 } 5802 5803 static void qlt_abort_work(struct qla_tgt *tgt, 5804 struct qla_tgt_sess_work_param *prm) 5805 { 5806 struct scsi_qla_host *vha = tgt->vha; 5807 struct qla_hw_data *ha = vha->hw; 5808 struct fc_port *sess = NULL; 5809 unsigned long flags = 0, flags2 = 0; 5810 uint32_t be_s_id; 5811 uint8_t s_id[3]; 5812 int rc; 5813 5814 spin_lock_irqsave(&ha->tgt.sess_lock, flags2); 5815 5816 if (tgt->tgt_stop) 5817 goto out_term2; 5818 5819 s_id[0] = prm->abts.fcp_hdr_le.s_id[2]; 5820 s_id[1] = prm->abts.fcp_hdr_le.s_id[1]; 5821 s_id[2] = prm->abts.fcp_hdr_le.s_id[0]; 5822 5823 sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, 5824 (unsigned char *)&be_s_id); 5825 if (!sess) { 5826 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags2); 5827 5828 sess = qlt_make_local_sess(vha, s_id); 5829 /* sess has got an extra creation ref */ 5830 5831 spin_lock_irqsave(&ha->tgt.sess_lock, flags2); 5832 if (!sess) 5833 goto out_term2; 5834 } else { 5835 if (sess->deleted) { 5836 sess = NULL; 5837 goto out_term2; 5838 } 5839 5840 if (!kref_get_unless_zero(&sess->sess_kref)) { 5841 ql_dbg(ql_dbg_tgt_tmr, vha, 0xffff, 5842 "%s: kref_get fail %8phC \n", 5843 __func__, sess->port_name); 5844 sess = NULL; 5845 goto out_term2; 5846 } 5847 } 5848 5849 rc = __qlt_24xx_handle_abts(vha, &prm->abts, sess); 5850 ha->tgt.tgt_ops->put_sess(sess); 5851 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags2); 5852 5853 if (rc != 0) 5854 goto out_term; 5855 return; 5856 5857 out_term2: 5858 if (sess) 5859 ha->tgt.tgt_ops->put_sess(sess); 5860 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags2); 5861 5862 out_term: 5863 spin_lock_irqsave(&ha->hardware_lock, flags); 5864 qlt_24xx_send_abts_resp(vha, &prm->abts, FCP_TMF_REJECTED, false); 5865 spin_unlock_irqrestore(&ha->hardware_lock, flags); 5866 } 5867 5868 static void qlt_tmr_work(struct qla_tgt *tgt, 5869 struct qla_tgt_sess_work_param *prm) 5870 { 5871 struct atio_from_isp *a = &prm->tm_iocb2; 5872 struct scsi_qla_host *vha = tgt->vha; 5873 struct qla_hw_data *ha = vha->hw; 5874 struct fc_port *sess = NULL; 5875 unsigned long flags; 5876 uint8_t *s_id = NULL; /* to hide compiler warnings */ 5877 int rc; 5878 uint32_t lun, unpacked_lun; 5879 int fn; 5880 void *iocb; 5881 5882 spin_lock_irqsave(&ha->tgt.sess_lock, flags); 5883 5884 if (tgt->tgt_stop) 5885 goto out_term2; 5886 5887 s_id = prm->tm_iocb2.u.isp24.fcp_hdr.s_id; 5888 sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, s_id); 5889 if (!sess) { 5890 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); 5891 5892 sess = qlt_make_local_sess(vha, s_id); 5893 /* sess has got an extra creation ref */ 5894 5895 spin_lock_irqsave(&ha->tgt.sess_lock, flags); 5896 if (!sess) 5897 goto out_term2; 5898 } else { 5899 if (sess->deleted) { 5900 sess = NULL; 5901 goto out_term2; 5902 } 5903 5904 if (!kref_get_unless_zero(&sess->sess_kref)) { 5905 ql_dbg(ql_dbg_tgt_tmr, vha, 0xffff, 5906 "%s: kref_get fail %8phC\n", 5907 __func__, sess->port_name); 5908 sess = NULL; 5909 goto out_term2; 5910 } 5911 } 5912 5913 iocb = a; 5914 lun = a->u.isp24.fcp_cmnd.lun; 5915 fn = a->u.isp24.fcp_cmnd.task_mgmt_flags; 5916 unpacked_lun = scsilun_to_int((struct scsi_lun *)&lun); 5917 5918 rc = qlt_issue_task_mgmt(sess, unpacked_lun, fn, iocb, 0); 5919 ha->tgt.tgt_ops->put_sess(sess); 5920 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); 5921 5922 if (rc != 0) 5923 goto out_term; 5924 return; 5925 5926 out_term2: 5927 if (sess) 5928 ha->tgt.tgt_ops->put_sess(sess); 5929 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); 5930 out_term: 5931 qlt_send_term_exchange(vha, NULL, &prm->tm_iocb2, 1, 0); 5932 } 5933 5934 static void qlt_sess_work_fn(struct work_struct *work) 5935 { 5936 struct qla_tgt *tgt = container_of(work, struct qla_tgt, sess_work); 5937 struct scsi_qla_host *vha = tgt->vha; 5938 unsigned long flags; 5939 5940 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf000, "Sess work (tgt %p)", tgt); 5941 5942 spin_lock_irqsave(&tgt->sess_work_lock, flags); 5943 while (!list_empty(&tgt->sess_works_list)) { 5944 struct qla_tgt_sess_work_param *prm = list_entry( 5945 tgt->sess_works_list.next, typeof(*prm), 5946 sess_works_list_entry); 5947 5948 /* 5949 * This work can be scheduled on several CPUs at time, so we 5950 * must delete the entry to eliminate double processing 5951 */ 5952 list_del(&prm->sess_works_list_entry); 5953 5954 spin_unlock_irqrestore(&tgt->sess_work_lock, flags); 5955 5956 switch (prm->type) { 5957 case QLA_TGT_SESS_WORK_ABORT: 5958 qlt_abort_work(tgt, prm); 5959 break; 5960 case QLA_TGT_SESS_WORK_TM: 5961 qlt_tmr_work(tgt, prm); 5962 break; 5963 default: 5964 BUG_ON(1); 5965 break; 5966 } 5967 5968 spin_lock_irqsave(&tgt->sess_work_lock, flags); 5969 5970 kfree(prm); 5971 } 5972 spin_unlock_irqrestore(&tgt->sess_work_lock, flags); 5973 } 5974 5975 /* Must be called under tgt_host_action_mutex */ 5976 int qlt_add_target(struct qla_hw_data *ha, struct scsi_qla_host *base_vha) 5977 { 5978 struct qla_tgt *tgt; 5979 5980 if (!QLA_TGT_MODE_ENABLED()) 5981 return 0; 5982 5983 if (!IS_TGT_MODE_CAPABLE(ha)) { 5984 ql_log(ql_log_warn, base_vha, 0xe070, 5985 "This adapter does not support target mode.\n"); 5986 return 0; 5987 } 5988 5989 ql_dbg(ql_dbg_tgt, base_vha, 0xe03b, 5990 "Registering target for host %ld(%p).\n", base_vha->host_no, ha); 5991 5992 BUG_ON(base_vha->vha_tgt.qla_tgt != NULL); 5993 5994 tgt = kzalloc(sizeof(struct qla_tgt), GFP_KERNEL); 5995 if (!tgt) { 5996 ql_dbg(ql_dbg_tgt, base_vha, 0xe066, 5997 "Unable to allocate struct qla_tgt\n"); 5998 return -ENOMEM; 5999 } 6000 6001 if (!(base_vha->host->hostt->supported_mode & MODE_TARGET)) 6002 base_vha->host->hostt->supported_mode |= MODE_TARGET; 6003 6004 tgt->ha = ha; 6005 tgt->vha = base_vha; 6006 init_waitqueue_head(&tgt->waitQ); 6007 INIT_LIST_HEAD(&tgt->del_sess_list); 6008 spin_lock_init(&tgt->sess_work_lock); 6009 INIT_WORK(&tgt->sess_work, qlt_sess_work_fn); 6010 INIT_LIST_HEAD(&tgt->sess_works_list); 6011 atomic_set(&tgt->tgt_global_resets_count, 0); 6012 6013 base_vha->vha_tgt.qla_tgt = tgt; 6014 6015 ql_dbg(ql_dbg_tgt, base_vha, 0xe067, 6016 "qla_target(%d): using 64 Bit PCI addressing", 6017 base_vha->vp_idx); 6018 tgt->tgt_enable_64bit_addr = 1; 6019 /* 3 is reserved */ 6020 tgt->sg_tablesize = QLA_TGT_MAX_SG_24XX(base_vha->req->length - 3); 6021 tgt->datasegs_per_cmd = QLA_TGT_DATASEGS_PER_CMD_24XX; 6022 tgt->datasegs_per_cont = QLA_TGT_DATASEGS_PER_CONT_24XX; 6023 6024 mutex_lock(&qla_tgt_mutex); 6025 list_add_tail(&tgt->tgt_list_entry, &qla_tgt_glist); 6026 mutex_unlock(&qla_tgt_mutex); 6027 6028 if (ha->tgt.tgt_ops && ha->tgt.tgt_ops->add_target) 6029 ha->tgt.tgt_ops->add_target(base_vha); 6030 6031 return 0; 6032 } 6033 6034 /* Must be called under tgt_host_action_mutex */ 6035 int qlt_remove_target(struct qla_hw_data *ha, struct scsi_qla_host *vha) 6036 { 6037 if (!vha->vha_tgt.qla_tgt) 6038 return 0; 6039 6040 if (vha->fc_vport) { 6041 qlt_release(vha->vha_tgt.qla_tgt); 6042 return 0; 6043 } 6044 6045 /* free left over qfull cmds */ 6046 qlt_init_term_exchange(vha); 6047 6048 mutex_lock(&qla_tgt_mutex); 6049 list_del(&vha->vha_tgt.qla_tgt->tgt_list_entry); 6050 mutex_unlock(&qla_tgt_mutex); 6051 6052 ql_dbg(ql_dbg_tgt, vha, 0xe03c, "Unregistering target for host %ld(%p)", 6053 vha->host_no, ha); 6054 qlt_release(vha->vha_tgt.qla_tgt); 6055 6056 return 0; 6057 } 6058 6059 void qlt_remove_target_resources(struct qla_hw_data *ha) 6060 { 6061 struct scsi_qla_host *node; 6062 u32 key = 0; 6063 6064 btree_for_each_safe32(&ha->tgt.host_map, key, node) 6065 btree_remove32(&ha->tgt.host_map, key); 6066 6067 btree_destroy32(&ha->tgt.host_map); 6068 } 6069 6070 static void qlt_lport_dump(struct scsi_qla_host *vha, u64 wwpn, 6071 unsigned char *b) 6072 { 6073 int i; 6074 6075 pr_debug("qla2xxx HW vha->node_name: "); 6076 for (i = 0; i < WWN_SIZE; i++) 6077 pr_debug("%02x ", vha->node_name[i]); 6078 pr_debug("\n"); 6079 pr_debug("qla2xxx HW vha->port_name: "); 6080 for (i = 0; i < WWN_SIZE; i++) 6081 pr_debug("%02x ", vha->port_name[i]); 6082 pr_debug("\n"); 6083 6084 pr_debug("qla2xxx passed configfs WWPN: "); 6085 put_unaligned_be64(wwpn, b); 6086 for (i = 0; i < WWN_SIZE; i++) 6087 pr_debug("%02x ", b[i]); 6088 pr_debug("\n"); 6089 } 6090 6091 /** 6092 * qla_tgt_lport_register - register lport with external module 6093 * 6094 * @qla_tgt_ops: Pointer for tcm_qla2xxx qla_tgt_ops 6095 * @wwpn: Passwd FC target WWPN 6096 * @callback: lport initialization callback for tcm_qla2xxx code 6097 * @target_lport_ptr: pointer for tcm_qla2xxx specific lport data 6098 */ 6099 int qlt_lport_register(void *target_lport_ptr, u64 phys_wwpn, 6100 u64 npiv_wwpn, u64 npiv_wwnn, 6101 int (*callback)(struct scsi_qla_host *, void *, u64, u64)) 6102 { 6103 struct qla_tgt *tgt; 6104 struct scsi_qla_host *vha; 6105 struct qla_hw_data *ha; 6106 struct Scsi_Host *host; 6107 unsigned long flags; 6108 int rc; 6109 u8 b[WWN_SIZE]; 6110 6111 mutex_lock(&qla_tgt_mutex); 6112 list_for_each_entry(tgt, &qla_tgt_glist, tgt_list_entry) { 6113 vha = tgt->vha; 6114 ha = vha->hw; 6115 6116 host = vha->host; 6117 if (!host) 6118 continue; 6119 6120 if (!(host->hostt->supported_mode & MODE_TARGET)) 6121 continue; 6122 6123 spin_lock_irqsave(&ha->hardware_lock, flags); 6124 if ((!npiv_wwpn || !npiv_wwnn) && host->active_mode & MODE_TARGET) { 6125 pr_debug("MODE_TARGET already active on qla2xxx(%d)\n", 6126 host->host_no); 6127 spin_unlock_irqrestore(&ha->hardware_lock, flags); 6128 continue; 6129 } 6130 if (tgt->tgt_stop) { 6131 pr_debug("MODE_TARGET in shutdown on qla2xxx(%d)\n", 6132 host->host_no); 6133 spin_unlock_irqrestore(&ha->hardware_lock, flags); 6134 continue; 6135 } 6136 spin_unlock_irqrestore(&ha->hardware_lock, flags); 6137 6138 if (!scsi_host_get(host)) { 6139 ql_dbg(ql_dbg_tgt, vha, 0xe068, 6140 "Unable to scsi_host_get() for" 6141 " qla2xxx scsi_host\n"); 6142 continue; 6143 } 6144 qlt_lport_dump(vha, phys_wwpn, b); 6145 6146 if (memcmp(vha->port_name, b, WWN_SIZE)) { 6147 scsi_host_put(host); 6148 continue; 6149 } 6150 rc = (*callback)(vha, target_lport_ptr, npiv_wwpn, npiv_wwnn); 6151 if (rc != 0) 6152 scsi_host_put(host); 6153 6154 mutex_unlock(&qla_tgt_mutex); 6155 return rc; 6156 } 6157 mutex_unlock(&qla_tgt_mutex); 6158 6159 return -ENODEV; 6160 } 6161 EXPORT_SYMBOL(qlt_lport_register); 6162 6163 /** 6164 * qla_tgt_lport_deregister - Degister lport 6165 * 6166 * @vha: Registered scsi_qla_host pointer 6167 */ 6168 void qlt_lport_deregister(struct scsi_qla_host *vha) 6169 { 6170 struct qla_hw_data *ha = vha->hw; 6171 struct Scsi_Host *sh = vha->host; 6172 /* 6173 * Clear the target_lport_ptr qla_target_template pointer in qla_hw_data 6174 */ 6175 vha->vha_tgt.target_lport_ptr = NULL; 6176 ha->tgt.tgt_ops = NULL; 6177 /* 6178 * Release the Scsi_Host reference for the underlying qla2xxx host 6179 */ 6180 scsi_host_put(sh); 6181 } 6182 EXPORT_SYMBOL(qlt_lport_deregister); 6183 6184 /* Must be called under HW lock */ 6185 static void qlt_set_mode(struct scsi_qla_host *vha) 6186 { 6187 switch (ql2x_ini_mode) { 6188 case QLA2XXX_INI_MODE_DISABLED: 6189 case QLA2XXX_INI_MODE_EXCLUSIVE: 6190 vha->host->active_mode = MODE_TARGET; 6191 break; 6192 case QLA2XXX_INI_MODE_ENABLED: 6193 vha->host->active_mode = MODE_UNKNOWN; 6194 break; 6195 case QLA2XXX_INI_MODE_DUAL: 6196 vha->host->active_mode = MODE_DUAL; 6197 break; 6198 default: 6199 break; 6200 } 6201 } 6202 6203 /* Must be called under HW lock */ 6204 static void qlt_clear_mode(struct scsi_qla_host *vha) 6205 { 6206 switch (ql2x_ini_mode) { 6207 case QLA2XXX_INI_MODE_DISABLED: 6208 vha->host->active_mode = MODE_UNKNOWN; 6209 break; 6210 case QLA2XXX_INI_MODE_EXCLUSIVE: 6211 vha->host->active_mode = MODE_INITIATOR; 6212 break; 6213 case QLA2XXX_INI_MODE_ENABLED: 6214 case QLA2XXX_INI_MODE_DUAL: 6215 vha->host->active_mode = MODE_INITIATOR; 6216 break; 6217 default: 6218 break; 6219 } 6220 } 6221 6222 /* 6223 * qla_tgt_enable_vha - NO LOCK HELD 6224 * 6225 * host_reset, bring up w/ Target Mode Enabled 6226 */ 6227 void 6228 qlt_enable_vha(struct scsi_qla_host *vha) 6229 { 6230 struct qla_hw_data *ha = vha->hw; 6231 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; 6232 unsigned long flags; 6233 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev); 6234 int rspq_ent = QLA83XX_RSPQ_MSIX_ENTRY_NUMBER; 6235 6236 if (!tgt) { 6237 ql_dbg(ql_dbg_tgt, vha, 0xe069, 6238 "Unable to locate qla_tgt pointer from" 6239 " struct qla_hw_data\n"); 6240 dump_stack(); 6241 return; 6242 } 6243 6244 spin_lock_irqsave(&ha->hardware_lock, flags); 6245 tgt->tgt_stopped = 0; 6246 qlt_set_mode(vha); 6247 spin_unlock_irqrestore(&ha->hardware_lock, flags); 6248 6249 if (vha->vp_idx) { 6250 qla24xx_disable_vp(vha); 6251 qla24xx_enable_vp(vha); 6252 } else { 6253 if (ha->msix_entries) { 6254 ql_dbg(ql_dbg_tgt, vha, 0xffff, 6255 "%s: host%ld : vector %d cpu %d\n", 6256 __func__, vha->host_no, 6257 ha->msix_entries[rspq_ent].vector, 6258 ha->msix_entries[rspq_ent].cpuid); 6259 6260 ha->tgt.rspq_vector_cpuid = 6261 ha->msix_entries[rspq_ent].cpuid; 6262 } 6263 6264 set_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags); 6265 qla2xxx_wake_dpc(base_vha); 6266 qla2x00_wait_for_hba_online(base_vha); 6267 } 6268 } 6269 EXPORT_SYMBOL(qlt_enable_vha); 6270 6271 /* 6272 * qla_tgt_disable_vha - NO LOCK HELD 6273 * 6274 * Disable Target Mode and reset the adapter 6275 */ 6276 static void qlt_disable_vha(struct scsi_qla_host *vha) 6277 { 6278 struct qla_hw_data *ha = vha->hw; 6279 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; 6280 unsigned long flags; 6281 6282 if (!tgt) { 6283 ql_dbg(ql_dbg_tgt, vha, 0xe06a, 6284 "Unable to locate qla_tgt pointer from" 6285 " struct qla_hw_data\n"); 6286 dump_stack(); 6287 return; 6288 } 6289 6290 spin_lock_irqsave(&ha->hardware_lock, flags); 6291 qlt_clear_mode(vha); 6292 spin_unlock_irqrestore(&ha->hardware_lock, flags); 6293 6294 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 6295 qla2xxx_wake_dpc(vha); 6296 qla2x00_wait_for_hba_online(vha); 6297 } 6298 6299 /* 6300 * Called from qla_init.c:qla24xx_vport_create() contex to setup 6301 * the target mode specific struct scsi_qla_host and struct qla_hw_data 6302 * members. 6303 */ 6304 void 6305 qlt_vport_create(struct scsi_qla_host *vha, struct qla_hw_data *ha) 6306 { 6307 vha->vha_tgt.qla_tgt = NULL; 6308 6309 mutex_init(&vha->vha_tgt.tgt_mutex); 6310 mutex_init(&vha->vha_tgt.tgt_host_action_mutex); 6311 6312 qlt_clear_mode(vha); 6313 6314 /* 6315 * NOTE: Currently the value is kept the same for <24xx and 6316 * >=24xx ISPs. If it is necessary to change it, 6317 * the check should be added for specific ISPs, 6318 * assigning the value appropriately. 6319 */ 6320 ha->tgt.atio_q_length = ATIO_ENTRY_CNT_24XX; 6321 6322 qlt_add_target(ha, vha); 6323 } 6324 6325 void 6326 qlt_rff_id(struct scsi_qla_host *vha, struct ct_sns_req *ct_req) 6327 { 6328 /* 6329 * FC-4 Feature bit 0 indicates target functionality to the name server. 6330 */ 6331 if (qla_tgt_mode_enabled(vha)) { 6332 ct_req->req.rff_id.fc4_feature = BIT_0; 6333 } else if (qla_ini_mode_enabled(vha)) { 6334 ct_req->req.rff_id.fc4_feature = BIT_1; 6335 } else if (qla_dual_mode_enabled(vha)) 6336 ct_req->req.rff_id.fc4_feature = BIT_0 | BIT_1; 6337 } 6338 6339 /* 6340 * qlt_init_atio_q_entries() - Initializes ATIO queue entries. 6341 * @ha: HA context 6342 * 6343 * Beginning of ATIO ring has initialization control block already built 6344 * by nvram config routine. 6345 * 6346 * Returns 0 on success. 6347 */ 6348 void 6349 qlt_init_atio_q_entries(struct scsi_qla_host *vha) 6350 { 6351 struct qla_hw_data *ha = vha->hw; 6352 uint16_t cnt; 6353 struct atio_from_isp *pkt = (struct atio_from_isp *)ha->tgt.atio_ring; 6354 6355 if (qla_ini_mode_enabled(vha)) 6356 return; 6357 6358 for (cnt = 0; cnt < ha->tgt.atio_q_length; cnt++) { 6359 pkt->u.raw.signature = ATIO_PROCESSED; 6360 pkt++; 6361 } 6362 6363 } 6364 6365 /* 6366 * qlt_24xx_process_atio_queue() - Process ATIO queue entries. 6367 * @ha: SCSI driver HA context 6368 */ 6369 void 6370 qlt_24xx_process_atio_queue(struct scsi_qla_host *vha, uint8_t ha_locked) 6371 { 6372 struct qla_hw_data *ha = vha->hw; 6373 struct atio_from_isp *pkt; 6374 int cnt, i; 6375 6376 if (!ha->flags.fw_started) 6377 return; 6378 6379 while ((ha->tgt.atio_ring_ptr->signature != ATIO_PROCESSED) || 6380 fcpcmd_is_corrupted(ha->tgt.atio_ring_ptr)) { 6381 pkt = (struct atio_from_isp *)ha->tgt.atio_ring_ptr; 6382 cnt = pkt->u.raw.entry_count; 6383 6384 if (unlikely(fcpcmd_is_corrupted(ha->tgt.atio_ring_ptr))) { 6385 /* 6386 * This packet is corrupted. The header + payload 6387 * can not be trusted. There is no point in passing 6388 * it further up. 6389 */ 6390 ql_log(ql_log_warn, vha, 0xffff, 6391 "corrupted fcp frame SID[%3phN] OXID[%04x] EXCG[%x] %64phN\n", 6392 pkt->u.isp24.fcp_hdr.s_id, 6393 be16_to_cpu(pkt->u.isp24.fcp_hdr.ox_id), 6394 le32_to_cpu(pkt->u.isp24.exchange_addr), pkt); 6395 6396 adjust_corrupted_atio(pkt); 6397 qlt_send_term_exchange(vha, NULL, pkt, ha_locked, 0); 6398 } else { 6399 qlt_24xx_atio_pkt_all_vps(vha, 6400 (struct atio_from_isp *)pkt, ha_locked); 6401 } 6402 6403 for (i = 0; i < cnt; i++) { 6404 ha->tgt.atio_ring_index++; 6405 if (ha->tgt.atio_ring_index == ha->tgt.atio_q_length) { 6406 ha->tgt.atio_ring_index = 0; 6407 ha->tgt.atio_ring_ptr = ha->tgt.atio_ring; 6408 } else 6409 ha->tgt.atio_ring_ptr++; 6410 6411 pkt->u.raw.signature = ATIO_PROCESSED; 6412 pkt = (struct atio_from_isp *)ha->tgt.atio_ring_ptr; 6413 } 6414 wmb(); 6415 } 6416 6417 /* Adjust ring index */ 6418 WRT_REG_DWORD(ISP_ATIO_Q_OUT(vha), ha->tgt.atio_ring_index); 6419 RD_REG_DWORD_RELAXED(ISP_ATIO_Q_OUT(vha)); 6420 } 6421 6422 void 6423 qlt_24xx_config_rings(struct scsi_qla_host *vha) 6424 { 6425 struct qla_hw_data *ha = vha->hw; 6426 if (!QLA_TGT_MODE_ENABLED()) 6427 return; 6428 6429 WRT_REG_DWORD(ISP_ATIO_Q_IN(vha), 0); 6430 WRT_REG_DWORD(ISP_ATIO_Q_OUT(vha), 0); 6431 RD_REG_DWORD(ISP_ATIO_Q_OUT(vha)); 6432 6433 if (IS_ATIO_MSIX_CAPABLE(ha)) { 6434 struct qla_msix_entry *msix = &ha->msix_entries[2]; 6435 struct init_cb_24xx *icb = (struct init_cb_24xx *)ha->init_cb; 6436 6437 icb->msix_atio = cpu_to_le16(msix->entry); 6438 ql_dbg(ql_dbg_init, vha, 0xf072, 6439 "Registering ICB vector 0x%x for atio que.\n", 6440 msix->entry); 6441 } 6442 } 6443 6444 void 6445 qlt_24xx_config_nvram_stage1(struct scsi_qla_host *vha, struct nvram_24xx *nv) 6446 { 6447 struct qla_hw_data *ha = vha->hw; 6448 u32 tmp; 6449 u16 t; 6450 6451 if (qla_tgt_mode_enabled(vha) || qla_dual_mode_enabled(vha)) { 6452 if (!ha->tgt.saved_set) { 6453 /* We save only once */ 6454 ha->tgt.saved_exchange_count = nv->exchange_count; 6455 ha->tgt.saved_firmware_options_1 = 6456 nv->firmware_options_1; 6457 ha->tgt.saved_firmware_options_2 = 6458 nv->firmware_options_2; 6459 ha->tgt.saved_firmware_options_3 = 6460 nv->firmware_options_3; 6461 ha->tgt.saved_set = 1; 6462 } 6463 6464 if (qla_tgt_mode_enabled(vha)) { 6465 nv->exchange_count = cpu_to_le16(0xFFFF); 6466 } else { /* dual */ 6467 if (ql_dm_tgt_ex_pct > 100) { 6468 ql_dm_tgt_ex_pct = 50; 6469 } else if (ql_dm_tgt_ex_pct == 100) { 6470 /* leave some for FW */ 6471 ql_dm_tgt_ex_pct = 95; 6472 } 6473 6474 tmp = ha->orig_fw_xcb_count * ql_dm_tgt_ex_pct; 6475 tmp = tmp/100; 6476 if (tmp > 0xffff) 6477 tmp = 0xffff; 6478 6479 t = tmp & 0xffff; 6480 nv->exchange_count = cpu_to_le16(t); 6481 } 6482 6483 /* Enable target mode */ 6484 nv->firmware_options_1 |= cpu_to_le32(BIT_4); 6485 6486 /* Disable ini mode, if requested */ 6487 if (qla_tgt_mode_enabled(vha)) 6488 nv->firmware_options_1 |= cpu_to_le32(BIT_5); 6489 6490 /* Disable Full Login after LIP */ 6491 nv->firmware_options_1 &= cpu_to_le32(~BIT_13); 6492 /* Enable initial LIP */ 6493 nv->firmware_options_1 &= cpu_to_le32(~BIT_9); 6494 if (ql2xtgt_tape_enable) 6495 /* Enable FC Tape support */ 6496 nv->firmware_options_2 |= cpu_to_le32(BIT_12); 6497 else 6498 /* Disable FC Tape support */ 6499 nv->firmware_options_2 &= cpu_to_le32(~BIT_12); 6500 6501 /* Disable Full Login after LIP */ 6502 nv->host_p &= cpu_to_le32(~BIT_10); 6503 6504 /* 6505 * clear BIT 15 explicitly as we have seen at least 6506 * a couple of instances where this was set and this 6507 * was causing the firmware to not be initialized. 6508 */ 6509 nv->firmware_options_1 &= cpu_to_le32(~BIT_15); 6510 /* Enable target PRLI control */ 6511 nv->firmware_options_2 |= cpu_to_le32(BIT_14); 6512 } else { 6513 if (ha->tgt.saved_set) { 6514 nv->exchange_count = ha->tgt.saved_exchange_count; 6515 nv->firmware_options_1 = 6516 ha->tgt.saved_firmware_options_1; 6517 nv->firmware_options_2 = 6518 ha->tgt.saved_firmware_options_2; 6519 nv->firmware_options_3 = 6520 ha->tgt.saved_firmware_options_3; 6521 } 6522 return; 6523 } 6524 6525 if (ha->tgt.enable_class_2) { 6526 if (vha->flags.init_done) 6527 fc_host_supported_classes(vha->host) = 6528 FC_COS_CLASS2 | FC_COS_CLASS3; 6529 6530 nv->firmware_options_2 |= cpu_to_le32(BIT_8); 6531 } else { 6532 if (vha->flags.init_done) 6533 fc_host_supported_classes(vha->host) = FC_COS_CLASS3; 6534 6535 nv->firmware_options_2 &= ~cpu_to_le32(BIT_8); 6536 } 6537 } 6538 6539 void 6540 qlt_24xx_config_nvram_stage2(struct scsi_qla_host *vha, 6541 struct init_cb_24xx *icb) 6542 { 6543 struct qla_hw_data *ha = vha->hw; 6544 6545 if (!QLA_TGT_MODE_ENABLED()) 6546 return; 6547 6548 if (ha->tgt.node_name_set) { 6549 memcpy(icb->node_name, ha->tgt.tgt_node_name, WWN_SIZE); 6550 icb->firmware_options_1 |= cpu_to_le32(BIT_14); 6551 } 6552 6553 /* disable ZIO at start time. */ 6554 if (!vha->flags.init_done) { 6555 uint32_t tmp; 6556 tmp = le32_to_cpu(icb->firmware_options_2); 6557 tmp &= ~(BIT_3 | BIT_2 | BIT_1 | BIT_0); 6558 icb->firmware_options_2 = cpu_to_le32(tmp); 6559 } 6560 } 6561 6562 void 6563 qlt_81xx_config_nvram_stage1(struct scsi_qla_host *vha, struct nvram_81xx *nv) 6564 { 6565 struct qla_hw_data *ha = vha->hw; 6566 u32 tmp; 6567 u16 t; 6568 6569 if (!QLA_TGT_MODE_ENABLED()) 6570 return; 6571 6572 if (qla_tgt_mode_enabled(vha) || qla_dual_mode_enabled(vha)) { 6573 if (!ha->tgt.saved_set) { 6574 /* We save only once */ 6575 ha->tgt.saved_exchange_count = nv->exchange_count; 6576 ha->tgt.saved_firmware_options_1 = 6577 nv->firmware_options_1; 6578 ha->tgt.saved_firmware_options_2 = 6579 nv->firmware_options_2; 6580 ha->tgt.saved_firmware_options_3 = 6581 nv->firmware_options_3; 6582 ha->tgt.saved_set = 1; 6583 } 6584 6585 if (qla_tgt_mode_enabled(vha)) { 6586 nv->exchange_count = cpu_to_le16(0xFFFF); 6587 } else { /* dual */ 6588 if (ql_dm_tgt_ex_pct > 100) { 6589 ql_dm_tgt_ex_pct = 50; 6590 } else if (ql_dm_tgt_ex_pct == 100) { 6591 /* leave some for FW */ 6592 ql_dm_tgt_ex_pct = 95; 6593 } 6594 6595 tmp = ha->orig_fw_xcb_count * ql_dm_tgt_ex_pct; 6596 tmp = tmp/100; 6597 if (tmp > 0xffff) 6598 tmp = 0xffff; 6599 t = tmp & 0xffff; 6600 nv->exchange_count = cpu_to_le16(t); 6601 } 6602 6603 /* Enable target mode */ 6604 nv->firmware_options_1 |= cpu_to_le32(BIT_4); 6605 6606 /* Disable ini mode, if requested */ 6607 if (qla_tgt_mode_enabled(vha)) 6608 nv->firmware_options_1 |= cpu_to_le32(BIT_5); 6609 /* Disable Full Login after LIP */ 6610 nv->firmware_options_1 &= cpu_to_le32(~BIT_13); 6611 /* Enable initial LIP */ 6612 nv->firmware_options_1 &= cpu_to_le32(~BIT_9); 6613 /* 6614 * clear BIT 15 explicitly as we have seen at 6615 * least a couple of instances where this was set 6616 * and this was causing the firmware to not be 6617 * initialized. 6618 */ 6619 nv->firmware_options_1 &= cpu_to_le32(~BIT_15); 6620 if (ql2xtgt_tape_enable) 6621 /* Enable FC tape support */ 6622 nv->firmware_options_2 |= cpu_to_le32(BIT_12); 6623 else 6624 /* Disable FC tape support */ 6625 nv->firmware_options_2 &= cpu_to_le32(~BIT_12); 6626 6627 /* Disable Full Login after LIP */ 6628 nv->host_p &= cpu_to_le32(~BIT_10); 6629 /* Enable target PRLI control */ 6630 nv->firmware_options_2 |= cpu_to_le32(BIT_14); 6631 } else { 6632 if (ha->tgt.saved_set) { 6633 nv->exchange_count = ha->tgt.saved_exchange_count; 6634 nv->firmware_options_1 = 6635 ha->tgt.saved_firmware_options_1; 6636 nv->firmware_options_2 = 6637 ha->tgt.saved_firmware_options_2; 6638 nv->firmware_options_3 = 6639 ha->tgt.saved_firmware_options_3; 6640 } 6641 return; 6642 } 6643 6644 if (ha->tgt.enable_class_2) { 6645 if (vha->flags.init_done) 6646 fc_host_supported_classes(vha->host) = 6647 FC_COS_CLASS2 | FC_COS_CLASS3; 6648 6649 nv->firmware_options_2 |= cpu_to_le32(BIT_8); 6650 } else { 6651 if (vha->flags.init_done) 6652 fc_host_supported_classes(vha->host) = FC_COS_CLASS3; 6653 6654 nv->firmware_options_2 &= ~cpu_to_le32(BIT_8); 6655 } 6656 } 6657 6658 void 6659 qlt_81xx_config_nvram_stage2(struct scsi_qla_host *vha, 6660 struct init_cb_81xx *icb) 6661 { 6662 struct qla_hw_data *ha = vha->hw; 6663 6664 if (!QLA_TGT_MODE_ENABLED()) 6665 return; 6666 6667 if (ha->tgt.node_name_set) { 6668 memcpy(icb->node_name, ha->tgt.tgt_node_name, WWN_SIZE); 6669 icb->firmware_options_1 |= cpu_to_le32(BIT_14); 6670 } 6671 6672 /* disable ZIO at start time. */ 6673 if (!vha->flags.init_done) { 6674 uint32_t tmp; 6675 tmp = le32_to_cpu(icb->firmware_options_2); 6676 tmp &= ~(BIT_3 | BIT_2 | BIT_1 | BIT_0); 6677 icb->firmware_options_2 = cpu_to_le32(tmp); 6678 } 6679 6680 } 6681 6682 void 6683 qlt_83xx_iospace_config(struct qla_hw_data *ha) 6684 { 6685 if (!QLA_TGT_MODE_ENABLED()) 6686 return; 6687 6688 ha->msix_count += 1; /* For ATIO Q */ 6689 } 6690 6691 int 6692 qlt_24xx_process_response_error(struct scsi_qla_host *vha, 6693 struct sts_entry_24xx *pkt) 6694 { 6695 switch (pkt->entry_type) { 6696 case ABTS_RECV_24XX: 6697 case ABTS_RESP_24XX: 6698 case CTIO_TYPE7: 6699 case NOTIFY_ACK_TYPE: 6700 case CTIO_CRC2: 6701 return 1; 6702 default: 6703 return 0; 6704 } 6705 } 6706 6707 void 6708 qlt_modify_vp_config(struct scsi_qla_host *vha, 6709 struct vp_config_entry_24xx *vpmod) 6710 { 6711 /* enable target mode. Bit5 = 1 => disable */ 6712 if (qla_tgt_mode_enabled(vha) || qla_dual_mode_enabled(vha)) 6713 vpmod->options_idx1 &= ~BIT_5; 6714 6715 /* Disable ini mode, if requested. bit4 = 1 => disable */ 6716 if (qla_tgt_mode_enabled(vha)) 6717 vpmod->options_idx1 &= ~BIT_4; 6718 } 6719 6720 void 6721 qlt_probe_one_stage1(struct scsi_qla_host *base_vha, struct qla_hw_data *ha) 6722 { 6723 int rc; 6724 6725 if (!QLA_TGT_MODE_ENABLED()) 6726 return; 6727 6728 if (ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha)) { 6729 ISP_ATIO_Q_IN(base_vha) = &ha->mqiobase->isp25mq.atio_q_in; 6730 ISP_ATIO_Q_OUT(base_vha) = &ha->mqiobase->isp25mq.atio_q_out; 6731 } else { 6732 ISP_ATIO_Q_IN(base_vha) = &ha->iobase->isp24.atio_q_in; 6733 ISP_ATIO_Q_OUT(base_vha) = &ha->iobase->isp24.atio_q_out; 6734 } 6735 6736 mutex_init(&base_vha->vha_tgt.tgt_mutex); 6737 mutex_init(&base_vha->vha_tgt.tgt_host_action_mutex); 6738 6739 INIT_LIST_HEAD(&base_vha->unknown_atio_list); 6740 INIT_DELAYED_WORK(&base_vha->unknown_atio_work, 6741 qlt_unknown_atio_work_fn); 6742 6743 qlt_clear_mode(base_vha); 6744 6745 rc = btree_init32(&ha->tgt.host_map); 6746 if (rc) 6747 ql_log(ql_log_info, base_vha, 0xffff, 6748 "Unable to initialize ha->host_map btree\n"); 6749 6750 qlt_update_vp_map(base_vha, SET_VP_IDX); 6751 } 6752 6753 irqreturn_t 6754 qla83xx_msix_atio_q(int irq, void *dev_id) 6755 { 6756 struct rsp_que *rsp; 6757 scsi_qla_host_t *vha; 6758 struct qla_hw_data *ha; 6759 unsigned long flags; 6760 6761 rsp = (struct rsp_que *) dev_id; 6762 ha = rsp->hw; 6763 vha = pci_get_drvdata(ha->pdev); 6764 6765 spin_lock_irqsave(&ha->tgt.atio_lock, flags); 6766 6767 qlt_24xx_process_atio_queue(vha, 0); 6768 6769 spin_unlock_irqrestore(&ha->tgt.atio_lock, flags); 6770 6771 return IRQ_HANDLED; 6772 } 6773 6774 static void 6775 qlt_handle_abts_recv_work(struct work_struct *work) 6776 { 6777 struct qla_tgt_sess_op *op = container_of(work, 6778 struct qla_tgt_sess_op, work); 6779 scsi_qla_host_t *vha = op->vha; 6780 struct qla_hw_data *ha = vha->hw; 6781 unsigned long flags; 6782 6783 if (qla2x00_reset_active(vha) || (op->chip_reset != ha->chip_reset)) 6784 return; 6785 6786 spin_lock_irqsave(&ha->tgt.atio_lock, flags); 6787 qlt_24xx_process_atio_queue(vha, 0); 6788 spin_unlock_irqrestore(&ha->tgt.atio_lock, flags); 6789 6790 spin_lock_irqsave(&ha->hardware_lock, flags); 6791 qlt_response_pkt_all_vps(vha, (response_t *)&op->atio); 6792 spin_unlock_irqrestore(&ha->hardware_lock, flags); 6793 6794 kfree(op); 6795 } 6796 6797 void 6798 qlt_handle_abts_recv(struct scsi_qla_host *vha, response_t *pkt) 6799 { 6800 struct qla_tgt_sess_op *op; 6801 6802 op = kzalloc(sizeof(*op), GFP_ATOMIC); 6803 6804 if (!op) { 6805 /* do not reach for ATIO queue here. This is best effort err 6806 * recovery at this point. 6807 */ 6808 qlt_response_pkt_all_vps(vha, pkt); 6809 return; 6810 } 6811 6812 memcpy(&op->atio, pkt, sizeof(*pkt)); 6813 op->vha = vha; 6814 op->chip_reset = vha->hw->chip_reset; 6815 INIT_WORK(&op->work, qlt_handle_abts_recv_work); 6816 queue_work(qla_tgt_wq, &op->work); 6817 return; 6818 } 6819 6820 int 6821 qlt_mem_alloc(struct qla_hw_data *ha) 6822 { 6823 if (!QLA_TGT_MODE_ENABLED()) 6824 return 0; 6825 6826 ha->tgt.tgt_vp_map = kzalloc(sizeof(struct qla_tgt_vp_map) * 6827 MAX_MULTI_ID_FABRIC, GFP_KERNEL); 6828 if (!ha->tgt.tgt_vp_map) 6829 return -ENOMEM; 6830 6831 ha->tgt.atio_ring = dma_alloc_coherent(&ha->pdev->dev, 6832 (ha->tgt.atio_q_length + 1) * sizeof(struct atio_from_isp), 6833 &ha->tgt.atio_dma, GFP_KERNEL); 6834 if (!ha->tgt.atio_ring) { 6835 kfree(ha->tgt.tgt_vp_map); 6836 return -ENOMEM; 6837 } 6838 return 0; 6839 } 6840 6841 void 6842 qlt_mem_free(struct qla_hw_data *ha) 6843 { 6844 if (!QLA_TGT_MODE_ENABLED()) 6845 return; 6846 6847 if (ha->tgt.atio_ring) { 6848 dma_free_coherent(&ha->pdev->dev, (ha->tgt.atio_q_length + 1) * 6849 sizeof(struct atio_from_isp), ha->tgt.atio_ring, 6850 ha->tgt.atio_dma); 6851 } 6852 kfree(ha->tgt.tgt_vp_map); 6853 } 6854 6855 /* vport_slock to be held by the caller */ 6856 void 6857 qlt_update_vp_map(struct scsi_qla_host *vha, int cmd) 6858 { 6859 void *slot; 6860 u32 key; 6861 int rc; 6862 6863 if (!QLA_TGT_MODE_ENABLED()) 6864 return; 6865 6866 key = vha->d_id.b24; 6867 6868 switch (cmd) { 6869 case SET_VP_IDX: 6870 vha->hw->tgt.tgt_vp_map[vha->vp_idx].vha = vha; 6871 break; 6872 case SET_AL_PA: 6873 slot = btree_lookup32(&vha->hw->tgt.host_map, key); 6874 if (!slot) { 6875 ql_dbg(ql_dbg_tgt_mgt, vha, 0xffff, 6876 "Save vha in host_map %p %06x\n", vha, key); 6877 rc = btree_insert32(&vha->hw->tgt.host_map, 6878 key, vha, GFP_ATOMIC); 6879 if (rc) 6880 ql_log(ql_log_info, vha, 0xffff, 6881 "Unable to insert s_id into host_map: %06x\n", 6882 key); 6883 return; 6884 } 6885 ql_dbg(ql_dbg_tgt_mgt, vha, 0xffff, 6886 "replace existing vha in host_map %p %06x\n", vha, key); 6887 btree_update32(&vha->hw->tgt.host_map, key, vha); 6888 break; 6889 case RESET_VP_IDX: 6890 vha->hw->tgt.tgt_vp_map[vha->vp_idx].vha = NULL; 6891 break; 6892 case RESET_AL_PA: 6893 ql_dbg(ql_dbg_tgt_mgt, vha, 0xffff, 6894 "clear vha in host_map %p %06x\n", vha, key); 6895 slot = btree_lookup32(&vha->hw->tgt.host_map, key); 6896 if (slot) 6897 btree_remove32(&vha->hw->tgt.host_map, key); 6898 vha->d_id.b24 = 0; 6899 break; 6900 } 6901 } 6902 6903 void qlt_update_host_map(struct scsi_qla_host *vha, port_id_t id) 6904 { 6905 unsigned long flags; 6906 struct qla_hw_data *ha = vha->hw; 6907 6908 if (!vha->d_id.b24) { 6909 spin_lock_irqsave(&ha->vport_slock, flags); 6910 vha->d_id = id; 6911 qlt_update_vp_map(vha, SET_AL_PA); 6912 spin_unlock_irqrestore(&ha->vport_slock, flags); 6913 } else if (vha->d_id.b24 != id.b24) { 6914 spin_lock_irqsave(&ha->vport_slock, flags); 6915 qlt_update_vp_map(vha, RESET_AL_PA); 6916 vha->d_id = id; 6917 qlt_update_vp_map(vha, SET_AL_PA); 6918 spin_unlock_irqrestore(&ha->vport_slock, flags); 6919 } 6920 } 6921 6922 static int __init qlt_parse_ini_mode(void) 6923 { 6924 if (strcasecmp(qlini_mode, QLA2XXX_INI_MODE_STR_EXCLUSIVE) == 0) 6925 ql2x_ini_mode = QLA2XXX_INI_MODE_EXCLUSIVE; 6926 else if (strcasecmp(qlini_mode, QLA2XXX_INI_MODE_STR_DISABLED) == 0) 6927 ql2x_ini_mode = QLA2XXX_INI_MODE_DISABLED; 6928 else if (strcasecmp(qlini_mode, QLA2XXX_INI_MODE_STR_ENABLED) == 0) 6929 ql2x_ini_mode = QLA2XXX_INI_MODE_ENABLED; 6930 else if (strcasecmp(qlini_mode, QLA2XXX_INI_MODE_STR_DUAL) == 0) 6931 ql2x_ini_mode = QLA2XXX_INI_MODE_DUAL; 6932 else 6933 return false; 6934 6935 return true; 6936 } 6937 6938 int __init qlt_init(void) 6939 { 6940 int ret; 6941 6942 if (!qlt_parse_ini_mode()) { 6943 ql_log(ql_log_fatal, NULL, 0xe06b, 6944 "qlt_parse_ini_mode() failed\n"); 6945 return -EINVAL; 6946 } 6947 6948 if (!QLA_TGT_MODE_ENABLED()) 6949 return 0; 6950 6951 qla_tgt_mgmt_cmd_cachep = kmem_cache_create("qla_tgt_mgmt_cmd_cachep", 6952 sizeof(struct qla_tgt_mgmt_cmd), __alignof__(struct 6953 qla_tgt_mgmt_cmd), 0, NULL); 6954 if (!qla_tgt_mgmt_cmd_cachep) { 6955 ql_log(ql_log_fatal, NULL, 0xe06d, 6956 "kmem_cache_create for qla_tgt_mgmt_cmd_cachep failed\n"); 6957 return -ENOMEM; 6958 } 6959 6960 qla_tgt_plogi_cachep = kmem_cache_create("qla_tgt_plogi_cachep", 6961 sizeof(struct qlt_plogi_ack_t), __alignof__(struct qlt_plogi_ack_t), 6962 0, NULL); 6963 6964 if (!qla_tgt_plogi_cachep) { 6965 ql_log(ql_log_fatal, NULL, 0xe06d, 6966 "kmem_cache_create for qla_tgt_plogi_cachep failed\n"); 6967 ret = -ENOMEM; 6968 goto out_mgmt_cmd_cachep; 6969 } 6970 6971 qla_tgt_mgmt_cmd_mempool = mempool_create(25, mempool_alloc_slab, 6972 mempool_free_slab, qla_tgt_mgmt_cmd_cachep); 6973 if (!qla_tgt_mgmt_cmd_mempool) { 6974 ql_log(ql_log_fatal, NULL, 0xe06e, 6975 "mempool_create for qla_tgt_mgmt_cmd_mempool failed\n"); 6976 ret = -ENOMEM; 6977 goto out_plogi_cachep; 6978 } 6979 6980 qla_tgt_wq = alloc_workqueue("qla_tgt_wq", 0, 0); 6981 if (!qla_tgt_wq) { 6982 ql_log(ql_log_fatal, NULL, 0xe06f, 6983 "alloc_workqueue for qla_tgt_wq failed\n"); 6984 ret = -ENOMEM; 6985 goto out_cmd_mempool; 6986 } 6987 /* 6988 * Return 1 to signal that initiator-mode is being disabled 6989 */ 6990 return (ql2x_ini_mode == QLA2XXX_INI_MODE_DISABLED) ? 1 : 0; 6991 6992 out_cmd_mempool: 6993 mempool_destroy(qla_tgt_mgmt_cmd_mempool); 6994 out_plogi_cachep: 6995 kmem_cache_destroy(qla_tgt_plogi_cachep); 6996 out_mgmt_cmd_cachep: 6997 kmem_cache_destroy(qla_tgt_mgmt_cmd_cachep); 6998 return ret; 6999 } 7000 7001 void qlt_exit(void) 7002 { 7003 if (!QLA_TGT_MODE_ENABLED()) 7004 return; 7005 7006 destroy_workqueue(qla_tgt_wq); 7007 mempool_destroy(qla_tgt_mgmt_cmd_mempool); 7008 kmem_cache_destroy(qla_tgt_plogi_cachep); 7009 kmem_cache_destroy(qla_tgt_mgmt_cmd_cachep); 7010 } 7011