1 /* 2 * qla_target.c SCSI LLD infrastructure for QLogic 22xx/23xx/24xx/25xx 3 * 4 * based on qla2x00t.c code: 5 * 6 * Copyright (C) 2004 - 2010 Vladislav Bolkhovitin <vst@vlnb.net> 7 * Copyright (C) 2004 - 2005 Leonid Stoljar 8 * Copyright (C) 2006 Nathaniel Clark <nate@misrule.us> 9 * Copyright (C) 2006 - 2010 ID7 Ltd. 10 * 11 * Forward port and refactoring to modern qla2xxx and target/configfs 12 * 13 * Copyright (C) 2010-2013 Nicholas A. Bellinger <nab@kernel.org> 14 * 15 * This program is free software; you can redistribute it and/or 16 * modify it under the terms of the GNU General Public License 17 * as published by the Free Software Foundation, version 2 18 * of the License. 19 * 20 * This program is distributed in the hope that it will be useful, 21 * but WITHOUT ANY WARRANTY; without even the implied warranty of 22 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 23 * GNU General Public License for more details. 24 */ 25 26 #include <linux/module.h> 27 #include <linux/init.h> 28 #include <linux/types.h> 29 #include <linux/blkdev.h> 30 #include <linux/interrupt.h> 31 #include <linux/pci.h> 32 #include <linux/delay.h> 33 #include <linux/list.h> 34 #include <linux/workqueue.h> 35 #include <asm/unaligned.h> 36 #include <scsi/scsi.h> 37 #include <scsi/scsi_host.h> 38 #include <scsi/scsi_tcq.h> 39 #include <target/target_core_base.h> 40 #include <target/target_core_fabric.h> 41 42 #include "qla_def.h" 43 #include "qla_target.h" 44 45 static int ql2xtgt_tape_enable; 46 module_param(ql2xtgt_tape_enable, int, S_IRUGO|S_IWUSR); 47 MODULE_PARM_DESC(ql2xtgt_tape_enable, 48 "Enables Sequence level error recovery (aka FC Tape). Default is 0 - no SLER. 1 - Enable SLER."); 49 50 static char *qlini_mode = QLA2XXX_INI_MODE_STR_ENABLED; 51 module_param(qlini_mode, charp, S_IRUGO); 52 MODULE_PARM_DESC(qlini_mode, 53 "Determines when initiator mode will be enabled. Possible values: " 54 "\"exclusive\" - initiator mode will be enabled on load, " 55 "disabled on enabling target mode and then on disabling target mode " 56 "enabled back; " 57 "\"disabled\" - initiator mode will never be enabled; " 58 "\"enabled\" (default) - initiator mode will always stay enabled."); 59 60 int ql2x_ini_mode = QLA2XXX_INI_MODE_EXCLUSIVE; 61 62 static int temp_sam_status = SAM_STAT_BUSY; 63 64 /* 65 * From scsi/fc/fc_fcp.h 66 */ 67 enum fcp_resp_rsp_codes { 68 FCP_TMF_CMPL = 0, 69 FCP_DATA_LEN_INVALID = 1, 70 FCP_CMND_FIELDS_INVALID = 2, 71 FCP_DATA_PARAM_MISMATCH = 3, 72 FCP_TMF_REJECTED = 4, 73 FCP_TMF_FAILED = 5, 74 FCP_TMF_INVALID_LUN = 9, 75 }; 76 77 /* 78 * fc_pri_ta from scsi/fc/fc_fcp.h 79 */ 80 #define FCP_PTA_SIMPLE 0 /* simple task attribute */ 81 #define FCP_PTA_HEADQ 1 /* head of queue task attribute */ 82 #define FCP_PTA_ORDERED 2 /* ordered task attribute */ 83 #define FCP_PTA_ACA 4 /* auto. contingent allegiance */ 84 #define FCP_PTA_MASK 7 /* mask for task attribute field */ 85 #define FCP_PRI_SHIFT 3 /* priority field starts in bit 3 */ 86 #define FCP_PRI_RESVD_MASK 0x80 /* reserved bits in priority field */ 87 88 /* 89 * This driver calls qla2x00_alloc_iocbs() and qla2x00_issue_marker(), which 90 * must be called under HW lock and could unlock/lock it inside. 91 * It isn't an issue, since in the current implementation on the time when 92 * those functions are called: 93 * 94 * - Either context is IRQ and only IRQ handler can modify HW data, 95 * including rings related fields, 96 * 97 * - Or access to target mode variables from struct qla_tgt doesn't 98 * cross those functions boundaries, except tgt_stop, which 99 * additionally protected by irq_cmd_count. 100 */ 101 /* Predefs for callbacks handed to qla2xxx LLD */ 102 static void qlt_24xx_atio_pkt(struct scsi_qla_host *ha, 103 struct atio_from_isp *pkt, uint8_t); 104 static void qlt_response_pkt(struct scsi_qla_host *ha, response_t *pkt); 105 static int qlt_issue_task_mgmt(struct qla_tgt_sess *sess, uint32_t lun, 106 int fn, void *iocb, int flags); 107 static void qlt_send_term_exchange(struct scsi_qla_host *ha, struct qla_tgt_cmd 108 *cmd, struct atio_from_isp *atio, int ha_locked); 109 static void qlt_reject_free_srr_imm(struct scsi_qla_host *ha, 110 struct qla_tgt_srr_imm *imm, int ha_lock); 111 static void qlt_abort_cmd_on_host_reset(struct scsi_qla_host *vha, 112 struct qla_tgt_cmd *cmd); 113 static void qlt_alloc_qfull_cmd(struct scsi_qla_host *vha, 114 struct atio_from_isp *atio, uint16_t status, int qfull); 115 static void qlt_disable_vha(struct scsi_qla_host *vha); 116 static void qlt_clear_tgt_db(struct qla_tgt *tgt); 117 static void qlt_send_notify_ack(struct scsi_qla_host *vha, 118 struct imm_ntfy_from_isp *ntfy, 119 uint32_t add_flags, uint16_t resp_code, int resp_code_valid, 120 uint16_t srr_flags, uint16_t srr_reject_code, uint8_t srr_explan); 121 static void qlt_send_term_imm_notif(struct scsi_qla_host *vha, 122 struct imm_ntfy_from_isp *imm, int ha_locked); 123 /* 124 * Global Variables 125 */ 126 static struct kmem_cache *qla_tgt_mgmt_cmd_cachep; 127 static struct kmem_cache *qla_tgt_plogi_cachep; 128 static mempool_t *qla_tgt_mgmt_cmd_mempool; 129 static struct workqueue_struct *qla_tgt_wq; 130 static DEFINE_MUTEX(qla_tgt_mutex); 131 static LIST_HEAD(qla_tgt_glist); 132 133 /* This API intentionally takes dest as a parameter, rather than returning 134 * int value to avoid caller forgetting to issue wmb() after the store */ 135 void qlt_do_generation_tick(struct scsi_qla_host *vha, int *dest) 136 { 137 scsi_qla_host_t *base_vha = pci_get_drvdata(vha->hw->pdev); 138 *dest = atomic_inc_return(&base_vha->generation_tick); 139 /* memory barrier */ 140 wmb(); 141 } 142 143 /* ha->hardware_lock supposed to be held on entry (to protect tgt->sess_list) */ 144 static struct qla_tgt_sess *qlt_find_sess_by_port_name( 145 struct qla_tgt *tgt, 146 const uint8_t *port_name) 147 { 148 struct qla_tgt_sess *sess; 149 150 list_for_each_entry(sess, &tgt->sess_list, sess_list_entry) { 151 if (!memcmp(sess->port_name, port_name, WWN_SIZE)) 152 return sess; 153 } 154 155 return NULL; 156 } 157 158 /* Might release hw lock, then reaquire!! */ 159 static inline int qlt_issue_marker(struct scsi_qla_host *vha, int vha_locked) 160 { 161 /* Send marker if required */ 162 if (unlikely(vha->marker_needed != 0)) { 163 int rc = qla2x00_issue_marker(vha, vha_locked); 164 if (rc != QLA_SUCCESS) { 165 ql_dbg(ql_dbg_tgt, vha, 0xe03d, 166 "qla_target(%d): issue_marker() failed\n", 167 vha->vp_idx); 168 } 169 return rc; 170 } 171 return QLA_SUCCESS; 172 } 173 174 static inline 175 struct scsi_qla_host *qlt_find_host_by_d_id(struct scsi_qla_host *vha, 176 uint8_t *d_id) 177 { 178 struct qla_hw_data *ha = vha->hw; 179 uint8_t vp_idx; 180 181 if ((vha->d_id.b.area != d_id[1]) || (vha->d_id.b.domain != d_id[0])) 182 return NULL; 183 184 if (vha->d_id.b.al_pa == d_id[2]) 185 return vha; 186 187 BUG_ON(ha->tgt.tgt_vp_map == NULL); 188 vp_idx = ha->tgt.tgt_vp_map[d_id[2]].idx; 189 if (likely(test_bit(vp_idx, ha->vp_idx_map))) 190 return ha->tgt.tgt_vp_map[vp_idx].vha; 191 192 return NULL; 193 } 194 195 static inline 196 struct scsi_qla_host *qlt_find_host_by_vp_idx(struct scsi_qla_host *vha, 197 uint16_t vp_idx) 198 { 199 struct qla_hw_data *ha = vha->hw; 200 201 if (vha->vp_idx == vp_idx) 202 return vha; 203 204 BUG_ON(ha->tgt.tgt_vp_map == NULL); 205 if (likely(test_bit(vp_idx, ha->vp_idx_map))) 206 return ha->tgt.tgt_vp_map[vp_idx].vha; 207 208 return NULL; 209 } 210 211 static inline void qlt_incr_num_pend_cmds(struct scsi_qla_host *vha) 212 { 213 unsigned long flags; 214 215 spin_lock_irqsave(&vha->hw->tgt.q_full_lock, flags); 216 217 vha->hw->tgt.num_pend_cmds++; 218 if (vha->hw->tgt.num_pend_cmds > vha->hw->qla_stats.stat_max_pend_cmds) 219 vha->hw->qla_stats.stat_max_pend_cmds = 220 vha->hw->tgt.num_pend_cmds; 221 spin_unlock_irqrestore(&vha->hw->tgt.q_full_lock, flags); 222 } 223 static inline void qlt_decr_num_pend_cmds(struct scsi_qla_host *vha) 224 { 225 unsigned long flags; 226 227 spin_lock_irqsave(&vha->hw->tgt.q_full_lock, flags); 228 vha->hw->tgt.num_pend_cmds--; 229 spin_unlock_irqrestore(&vha->hw->tgt.q_full_lock, flags); 230 } 231 232 static bool qlt_24xx_atio_pkt_all_vps(struct scsi_qla_host *vha, 233 struct atio_from_isp *atio, uint8_t ha_locked) 234 { 235 ql_dbg(ql_dbg_tgt, vha, 0xe072, 236 "%s: qla_target(%d): type %x ox_id %04x\n", 237 __func__, vha->vp_idx, atio->u.raw.entry_type, 238 be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id)); 239 240 switch (atio->u.raw.entry_type) { 241 case ATIO_TYPE7: 242 { 243 struct scsi_qla_host *host = qlt_find_host_by_d_id(vha, 244 atio->u.isp24.fcp_hdr.d_id); 245 if (unlikely(NULL == host)) { 246 ql_dbg(ql_dbg_tgt, vha, 0xe03e, 247 "qla_target(%d): Received ATIO_TYPE7 " 248 "with unknown d_id %x:%x:%x\n", vha->vp_idx, 249 atio->u.isp24.fcp_hdr.d_id[0], 250 atio->u.isp24.fcp_hdr.d_id[1], 251 atio->u.isp24.fcp_hdr.d_id[2]); 252 break; 253 } 254 qlt_24xx_atio_pkt(host, atio, ha_locked); 255 break; 256 } 257 258 case IMMED_NOTIFY_TYPE: 259 { 260 struct scsi_qla_host *host = vha; 261 struct imm_ntfy_from_isp *entry = 262 (struct imm_ntfy_from_isp *)atio; 263 264 if ((entry->u.isp24.vp_index != 0xFF) && 265 (entry->u.isp24.nport_handle != 0xFFFF)) { 266 host = qlt_find_host_by_vp_idx(vha, 267 entry->u.isp24.vp_index); 268 if (unlikely(!host)) { 269 ql_dbg(ql_dbg_tgt, vha, 0xe03f, 270 "qla_target(%d): Received " 271 "ATIO (IMMED_NOTIFY_TYPE) " 272 "with unknown vp_index %d\n", 273 vha->vp_idx, entry->u.isp24.vp_index); 274 break; 275 } 276 } 277 qlt_24xx_atio_pkt(host, atio, ha_locked); 278 break; 279 } 280 281 default: 282 ql_dbg(ql_dbg_tgt, vha, 0xe040, 283 "qla_target(%d): Received unknown ATIO atio " 284 "type %x\n", vha->vp_idx, atio->u.raw.entry_type); 285 break; 286 } 287 288 return false; 289 } 290 291 void qlt_response_pkt_all_vps(struct scsi_qla_host *vha, response_t *pkt) 292 { 293 switch (pkt->entry_type) { 294 case CTIO_CRC2: 295 ql_dbg(ql_dbg_tgt, vha, 0xe073, 296 "qla_target(%d):%s: CRC2 Response pkt\n", 297 vha->vp_idx, __func__); 298 case CTIO_TYPE7: 299 { 300 struct ctio7_from_24xx *entry = (struct ctio7_from_24xx *)pkt; 301 struct scsi_qla_host *host = qlt_find_host_by_vp_idx(vha, 302 entry->vp_index); 303 if (unlikely(!host)) { 304 ql_dbg(ql_dbg_tgt, vha, 0xe041, 305 "qla_target(%d): Response pkt (CTIO_TYPE7) " 306 "received, with unknown vp_index %d\n", 307 vha->vp_idx, entry->vp_index); 308 break; 309 } 310 qlt_response_pkt(host, pkt); 311 break; 312 } 313 314 case IMMED_NOTIFY_TYPE: 315 { 316 struct scsi_qla_host *host = vha; 317 struct imm_ntfy_from_isp *entry = 318 (struct imm_ntfy_from_isp *)pkt; 319 320 host = qlt_find_host_by_vp_idx(vha, entry->u.isp24.vp_index); 321 if (unlikely(!host)) { 322 ql_dbg(ql_dbg_tgt, vha, 0xe042, 323 "qla_target(%d): Response pkt (IMMED_NOTIFY_TYPE) " 324 "received, with unknown vp_index %d\n", 325 vha->vp_idx, entry->u.isp24.vp_index); 326 break; 327 } 328 qlt_response_pkt(host, pkt); 329 break; 330 } 331 332 case NOTIFY_ACK_TYPE: 333 { 334 struct scsi_qla_host *host = vha; 335 struct nack_to_isp *entry = (struct nack_to_isp *)pkt; 336 337 if (0xFF != entry->u.isp24.vp_index) { 338 host = qlt_find_host_by_vp_idx(vha, 339 entry->u.isp24.vp_index); 340 if (unlikely(!host)) { 341 ql_dbg(ql_dbg_tgt, vha, 0xe043, 342 "qla_target(%d): Response " 343 "pkt (NOTIFY_ACK_TYPE) " 344 "received, with unknown " 345 "vp_index %d\n", vha->vp_idx, 346 entry->u.isp24.vp_index); 347 break; 348 } 349 } 350 qlt_response_pkt(host, pkt); 351 break; 352 } 353 354 case ABTS_RECV_24XX: 355 { 356 struct abts_recv_from_24xx *entry = 357 (struct abts_recv_from_24xx *)pkt; 358 struct scsi_qla_host *host = qlt_find_host_by_vp_idx(vha, 359 entry->vp_index); 360 if (unlikely(!host)) { 361 ql_dbg(ql_dbg_tgt, vha, 0xe044, 362 "qla_target(%d): Response pkt " 363 "(ABTS_RECV_24XX) received, with unknown " 364 "vp_index %d\n", vha->vp_idx, entry->vp_index); 365 break; 366 } 367 qlt_response_pkt(host, pkt); 368 break; 369 } 370 371 case ABTS_RESP_24XX: 372 { 373 struct abts_resp_to_24xx *entry = 374 (struct abts_resp_to_24xx *)pkt; 375 struct scsi_qla_host *host = qlt_find_host_by_vp_idx(vha, 376 entry->vp_index); 377 if (unlikely(!host)) { 378 ql_dbg(ql_dbg_tgt, vha, 0xe045, 379 "qla_target(%d): Response pkt " 380 "(ABTS_RECV_24XX) received, with unknown " 381 "vp_index %d\n", vha->vp_idx, entry->vp_index); 382 break; 383 } 384 qlt_response_pkt(host, pkt); 385 break; 386 } 387 388 default: 389 qlt_response_pkt(vha, pkt); 390 break; 391 } 392 393 } 394 395 /* 396 * All qlt_plogi_ack_t operations are protected by hardware_lock 397 */ 398 399 /* 400 * This is a zero-base ref-counting solution, since hardware_lock 401 * guarantees that ref_count is not modified concurrently. 402 * Upon successful return content of iocb is undefined 403 */ 404 static qlt_plogi_ack_t * 405 qlt_plogi_ack_find_add(struct scsi_qla_host *vha, port_id_t *id, 406 struct imm_ntfy_from_isp *iocb) 407 { 408 qlt_plogi_ack_t *pla; 409 410 list_for_each_entry(pla, &vha->plogi_ack_list, list) { 411 if (pla->id.b24 == id->b24) { 412 qlt_send_term_imm_notif(vha, &pla->iocb, 1); 413 pla->iocb = *iocb; 414 return pla; 415 } 416 } 417 418 pla = kmem_cache_zalloc(qla_tgt_plogi_cachep, GFP_ATOMIC); 419 if (!pla) { 420 ql_dbg(ql_dbg_async, vha, 0x5088, 421 "qla_target(%d): Allocation of plogi_ack failed\n", 422 vha->vp_idx); 423 return NULL; 424 } 425 426 pla->iocb = *iocb; 427 pla->id = *id; 428 list_add_tail(&pla->list, &vha->plogi_ack_list); 429 430 return pla; 431 } 432 433 static void qlt_plogi_ack_unref(struct scsi_qla_host *vha, qlt_plogi_ack_t *pla) 434 { 435 BUG_ON(!pla->ref_count); 436 pla->ref_count--; 437 438 if (pla->ref_count) 439 return; 440 441 ql_dbg(ql_dbg_async, vha, 0x5089, 442 "Sending PLOGI ACK to wwn %8phC s_id %02x:%02x:%02x loop_id %#04x" 443 " exch %#x ox_id %#x\n", pla->iocb.u.isp24.port_name, 444 pla->iocb.u.isp24.port_id[2], pla->iocb.u.isp24.port_id[1], 445 pla->iocb.u.isp24.port_id[0], 446 le16_to_cpu(pla->iocb.u.isp24.nport_handle), 447 pla->iocb.u.isp24.exchange_address, pla->iocb.ox_id); 448 qlt_send_notify_ack(vha, &pla->iocb, 0, 0, 0, 0, 0, 0); 449 450 list_del(&pla->list); 451 kmem_cache_free(qla_tgt_plogi_cachep, pla); 452 } 453 454 static void 455 qlt_plogi_ack_link(struct scsi_qla_host *vha, qlt_plogi_ack_t *pla, 456 struct qla_tgt_sess *sess, qlt_plogi_link_t link) 457 { 458 /* Inc ref_count first because link might already be pointing at pla */ 459 pla->ref_count++; 460 461 if (sess->plogi_link[link]) 462 qlt_plogi_ack_unref(vha, sess->plogi_link[link]); 463 464 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf097, 465 "Linking sess %p [%d] wwn %8phC with PLOGI ACK to wwn %8phC" 466 " s_id %02x:%02x:%02x, ref=%d\n", sess, link, sess->port_name, 467 pla->iocb.u.isp24.port_name, pla->iocb.u.isp24.port_id[2], 468 pla->iocb.u.isp24.port_id[1], pla->iocb.u.isp24.port_id[0], 469 pla->ref_count); 470 471 sess->plogi_link[link] = pla; 472 } 473 474 typedef struct { 475 /* These fields must be initialized by the caller */ 476 port_id_t id; 477 /* 478 * number of cmds dropped while we were waiting for 479 * initiator to ack LOGO initialize to 1 if LOGO is 480 * triggered by a command, otherwise, to 0 481 */ 482 int cmd_count; 483 484 /* These fields are used by callee */ 485 struct list_head list; 486 } qlt_port_logo_t; 487 488 static void 489 qlt_send_first_logo(struct scsi_qla_host *vha, qlt_port_logo_t *logo) 490 { 491 qlt_port_logo_t *tmp; 492 int res; 493 494 mutex_lock(&vha->vha_tgt.tgt_mutex); 495 496 list_for_each_entry(tmp, &vha->logo_list, list) { 497 if (tmp->id.b24 == logo->id.b24) { 498 tmp->cmd_count += logo->cmd_count; 499 mutex_unlock(&vha->vha_tgt.tgt_mutex); 500 return; 501 } 502 } 503 504 list_add_tail(&logo->list, &vha->logo_list); 505 506 mutex_unlock(&vha->vha_tgt.tgt_mutex); 507 508 res = qla24xx_els_dcmd_iocb(vha, ELS_DCMD_LOGO, logo->id); 509 510 mutex_lock(&vha->vha_tgt.tgt_mutex); 511 list_del(&logo->list); 512 mutex_unlock(&vha->vha_tgt.tgt_mutex); 513 514 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf098, 515 "Finished LOGO to %02x:%02x:%02x, dropped %d cmds, res = %#x\n", 516 logo->id.b.domain, logo->id.b.area, logo->id.b.al_pa, 517 logo->cmd_count, res); 518 } 519 520 static void qlt_free_session_done(struct work_struct *work) 521 { 522 struct qla_tgt_sess *sess = container_of(work, struct qla_tgt_sess, 523 free_work); 524 struct qla_tgt *tgt = sess->tgt; 525 struct scsi_qla_host *vha = sess->vha; 526 struct qla_hw_data *ha = vha->hw; 527 unsigned long flags; 528 bool logout_started = false; 529 fc_port_t fcport; 530 531 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf084, 532 "%s: se_sess %p / sess %p from port %8phC loop_id %#04x" 533 " s_id %02x:%02x:%02x logout %d keep %d els_logo %d\n", 534 __func__, sess->se_sess, sess, sess->port_name, sess->loop_id, 535 sess->s_id.b.domain, sess->s_id.b.area, sess->s_id.b.al_pa, 536 sess->logout_on_delete, sess->keep_nport_handle, 537 sess->send_els_logo); 538 539 BUG_ON(!tgt); 540 541 if (sess->send_els_logo) { 542 qlt_port_logo_t logo; 543 logo.id = sess->s_id; 544 logo.cmd_count = 0; 545 qlt_send_first_logo(vha, &logo); 546 } 547 548 if (sess->logout_on_delete) { 549 int rc; 550 551 memset(&fcport, 0, sizeof(fcport)); 552 fcport.loop_id = sess->loop_id; 553 fcport.d_id = sess->s_id; 554 memcpy(fcport.port_name, sess->port_name, WWN_SIZE); 555 fcport.vha = vha; 556 fcport.tgt_session = sess; 557 558 rc = qla2x00_post_async_logout_work(vha, &fcport, NULL); 559 if (rc != QLA_SUCCESS) 560 ql_log(ql_log_warn, vha, 0xf085, 561 "Schedule logo failed sess %p rc %d\n", 562 sess, rc); 563 else 564 logout_started = true; 565 } 566 567 /* 568 * Release the target session for FC Nexus from fabric module code. 569 */ 570 if (sess->se_sess != NULL) 571 ha->tgt.tgt_ops->free_session(sess); 572 573 if (logout_started) { 574 bool traced = false; 575 576 while (!ACCESS_ONCE(sess->logout_completed)) { 577 if (!traced) { 578 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf086, 579 "%s: waiting for sess %p logout\n", 580 __func__, sess); 581 traced = true; 582 } 583 msleep(100); 584 } 585 586 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf087, 587 "%s: sess %p logout completed\n", 588 __func__, sess); 589 } 590 591 spin_lock_irqsave(&ha->hardware_lock, flags); 592 593 { 594 qlt_plogi_ack_t *own = 595 sess->plogi_link[QLT_PLOGI_LINK_SAME_WWN]; 596 qlt_plogi_ack_t *con = 597 sess->plogi_link[QLT_PLOGI_LINK_CONFLICT]; 598 599 if (con) { 600 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf099, 601 "se_sess %p / sess %p port %8phC is gone," 602 " %s (ref=%d), releasing PLOGI for %8phC (ref=%d)\n", 603 sess->se_sess, sess, sess->port_name, 604 own ? "releasing own PLOGI" : 605 "no own PLOGI pending", 606 own ? own->ref_count : -1, 607 con->iocb.u.isp24.port_name, con->ref_count); 608 qlt_plogi_ack_unref(vha, con); 609 } else { 610 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf09a, 611 "se_sess %p / sess %p port %8phC is gone, %s (ref=%d)\n", 612 sess->se_sess, sess, sess->port_name, 613 own ? "releasing own PLOGI" : 614 "no own PLOGI pending", 615 own ? own->ref_count : -1); 616 } 617 618 if (own) 619 qlt_plogi_ack_unref(vha, own); 620 } 621 622 list_del(&sess->sess_list_entry); 623 624 spin_unlock_irqrestore(&ha->hardware_lock, flags); 625 626 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf001, 627 "Unregistration of sess %p finished\n", sess); 628 629 kfree(sess); 630 /* 631 * We need to protect against race, when tgt is freed before or 632 * inside wake_up() 633 */ 634 tgt->sess_count--; 635 if (tgt->sess_count == 0) 636 wake_up_all(&tgt->waitQ); 637 } 638 639 /* ha->tgt.sess_lock supposed to be held on entry */ 640 void qlt_unreg_sess(struct qla_tgt_sess *sess) 641 { 642 struct scsi_qla_host *vha = sess->vha; 643 644 vha->hw->tgt.tgt_ops->clear_nacl_from_fcport_map(sess); 645 646 if (!list_empty(&sess->del_list_entry)) 647 list_del_init(&sess->del_list_entry); 648 sess->deleted = QLA_SESS_DELETION_IN_PROGRESS; 649 650 INIT_WORK(&sess->free_work, qlt_free_session_done); 651 schedule_work(&sess->free_work); 652 } 653 EXPORT_SYMBOL(qlt_unreg_sess); 654 655 656 static int qlt_reset(struct scsi_qla_host *vha, void *iocb, int mcmd) 657 { 658 struct qla_hw_data *ha = vha->hw; 659 struct qla_tgt_sess *sess = NULL; 660 uint32_t unpacked_lun, lun = 0; 661 uint16_t loop_id; 662 int res = 0; 663 struct imm_ntfy_from_isp *n = (struct imm_ntfy_from_isp *)iocb; 664 struct atio_from_isp *a = (struct atio_from_isp *)iocb; 665 unsigned long flags; 666 667 loop_id = le16_to_cpu(n->u.isp24.nport_handle); 668 if (loop_id == 0xFFFF) { 669 /* Global event */ 670 atomic_inc(&vha->vha_tgt.qla_tgt->tgt_global_resets_count); 671 spin_lock_irqsave(&ha->tgt.sess_lock, flags); 672 qlt_clear_tgt_db(vha->vha_tgt.qla_tgt); 673 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); 674 #if 0 /* FIXME: do we need to choose a session here? */ 675 if (!list_empty(&ha->tgt.qla_tgt->sess_list)) { 676 sess = list_entry(ha->tgt.qla_tgt->sess_list.next, 677 typeof(*sess), sess_list_entry); 678 switch (mcmd) { 679 case QLA_TGT_NEXUS_LOSS_SESS: 680 mcmd = QLA_TGT_NEXUS_LOSS; 681 break; 682 case QLA_TGT_ABORT_ALL_SESS: 683 mcmd = QLA_TGT_ABORT_ALL; 684 break; 685 case QLA_TGT_NEXUS_LOSS: 686 case QLA_TGT_ABORT_ALL: 687 break; 688 default: 689 ql_dbg(ql_dbg_tgt, vha, 0xe046, 690 "qla_target(%d): Not allowed " 691 "command %x in %s", vha->vp_idx, 692 mcmd, __func__); 693 sess = NULL; 694 break; 695 } 696 } else 697 sess = NULL; 698 #endif 699 } else { 700 spin_lock_irqsave(&ha->tgt.sess_lock, flags); 701 sess = ha->tgt.tgt_ops->find_sess_by_loop_id(vha, loop_id); 702 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); 703 } 704 705 ql_dbg(ql_dbg_tgt, vha, 0xe000, 706 "Using sess for qla_tgt_reset: %p\n", sess); 707 if (!sess) { 708 res = -ESRCH; 709 return res; 710 } 711 712 ql_dbg(ql_dbg_tgt, vha, 0xe047, 713 "scsi(%ld): resetting (session %p from port %8phC mcmd %x, " 714 "loop_id %d)\n", vha->host_no, sess, sess->port_name, 715 mcmd, loop_id); 716 717 lun = a->u.isp24.fcp_cmnd.lun; 718 unpacked_lun = scsilun_to_int((struct scsi_lun *)&lun); 719 720 return qlt_issue_task_mgmt(sess, unpacked_lun, mcmd, 721 iocb, QLA24XX_MGMT_SEND_NACK); 722 } 723 724 /* ha->tgt.sess_lock supposed to be held on entry */ 725 static void qlt_schedule_sess_for_deletion(struct qla_tgt_sess *sess, 726 bool immediate) 727 { 728 struct qla_tgt *tgt = sess->tgt; 729 uint32_t dev_loss_tmo = tgt->ha->port_down_retry_count + 5; 730 731 if (sess->deleted) { 732 /* Upgrade to unconditional deletion in case it was temporary */ 733 if (immediate && sess->deleted == QLA_SESS_DELETION_PENDING) 734 list_del(&sess->del_list_entry); 735 else 736 return; 737 } 738 739 ql_dbg(ql_dbg_tgt, sess->vha, 0xe001, 740 "Scheduling sess %p for deletion\n", sess); 741 742 if (immediate) { 743 dev_loss_tmo = 0; 744 sess->deleted = QLA_SESS_DELETION_IN_PROGRESS; 745 list_add(&sess->del_list_entry, &tgt->del_sess_list); 746 } else { 747 sess->deleted = QLA_SESS_DELETION_PENDING; 748 list_add_tail(&sess->del_list_entry, &tgt->del_sess_list); 749 } 750 751 sess->expires = jiffies + dev_loss_tmo * HZ; 752 753 ql_dbg(ql_dbg_tgt, sess->vha, 0xe048, 754 "qla_target(%d): session for port %8phC (loop ID %d s_id %02x:%02x:%02x)" 755 " scheduled for deletion in %u secs (expires: %lu) immed: %d, logout: %d, gen: %#x\n", 756 sess->vha->vp_idx, sess->port_name, sess->loop_id, 757 sess->s_id.b.domain, sess->s_id.b.area, sess->s_id.b.al_pa, 758 dev_loss_tmo, sess->expires, immediate, sess->logout_on_delete, 759 sess->generation); 760 761 if (immediate) 762 mod_delayed_work(system_wq, &tgt->sess_del_work, 0); 763 else 764 schedule_delayed_work(&tgt->sess_del_work, 765 sess->expires - jiffies); 766 } 767 768 /* ha->tgt.sess_lock supposed to be held on entry */ 769 static void qlt_clear_tgt_db(struct qla_tgt *tgt) 770 { 771 struct qla_tgt_sess *sess; 772 773 list_for_each_entry(sess, &tgt->sess_list, sess_list_entry) 774 qlt_schedule_sess_for_deletion(sess, true); 775 776 /* At this point tgt could be already dead */ 777 } 778 779 static int qla24xx_get_loop_id(struct scsi_qla_host *vha, const uint8_t *s_id, 780 uint16_t *loop_id) 781 { 782 struct qla_hw_data *ha = vha->hw; 783 dma_addr_t gid_list_dma; 784 struct gid_list_info *gid_list; 785 char *id_iter; 786 int res, rc, i; 787 uint16_t entries; 788 789 gid_list = dma_alloc_coherent(&ha->pdev->dev, qla2x00_gid_list_size(ha), 790 &gid_list_dma, GFP_KERNEL); 791 if (!gid_list) { 792 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf044, 793 "qla_target(%d): DMA Alloc failed of %u\n", 794 vha->vp_idx, qla2x00_gid_list_size(ha)); 795 return -ENOMEM; 796 } 797 798 /* Get list of logged in devices */ 799 rc = qla2x00_get_id_list(vha, gid_list, gid_list_dma, &entries); 800 if (rc != QLA_SUCCESS) { 801 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf045, 802 "qla_target(%d): get_id_list() failed: %x\n", 803 vha->vp_idx, rc); 804 res = -EBUSY; 805 goto out_free_id_list; 806 } 807 808 id_iter = (char *)gid_list; 809 res = -ENOENT; 810 for (i = 0; i < entries; i++) { 811 struct gid_list_info *gid = (struct gid_list_info *)id_iter; 812 if ((gid->al_pa == s_id[2]) && 813 (gid->area == s_id[1]) && 814 (gid->domain == s_id[0])) { 815 *loop_id = le16_to_cpu(gid->loop_id); 816 res = 0; 817 break; 818 } 819 id_iter += ha->gid_list_info_size; 820 } 821 822 out_free_id_list: 823 dma_free_coherent(&ha->pdev->dev, qla2x00_gid_list_size(ha), 824 gid_list, gid_list_dma); 825 return res; 826 } 827 828 /* ha->tgt.sess_lock supposed to be held on entry */ 829 static void qlt_undelete_sess(struct qla_tgt_sess *sess) 830 { 831 BUG_ON(sess->deleted != QLA_SESS_DELETION_PENDING); 832 833 list_del_init(&sess->del_list_entry); 834 sess->deleted = 0; 835 } 836 837 static void qlt_del_sess_work_fn(struct delayed_work *work) 838 { 839 struct qla_tgt *tgt = container_of(work, struct qla_tgt, 840 sess_del_work); 841 struct scsi_qla_host *vha = tgt->vha; 842 struct qla_hw_data *ha = vha->hw; 843 struct qla_tgt_sess *sess; 844 unsigned long flags, elapsed; 845 846 spin_lock_irqsave(&ha->tgt.sess_lock, flags); 847 while (!list_empty(&tgt->del_sess_list)) { 848 sess = list_entry(tgt->del_sess_list.next, typeof(*sess), 849 del_list_entry); 850 elapsed = jiffies; 851 if (time_after_eq(elapsed, sess->expires)) { 852 /* No turning back */ 853 list_del_init(&sess->del_list_entry); 854 sess->deleted = QLA_SESS_DELETION_IN_PROGRESS; 855 856 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf004, 857 "Timeout: sess %p about to be deleted\n", 858 sess); 859 ha->tgt.tgt_ops->shutdown_sess(sess); 860 ha->tgt.tgt_ops->put_sess(sess); 861 } else { 862 schedule_delayed_work(&tgt->sess_del_work, 863 sess->expires - elapsed); 864 break; 865 } 866 } 867 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); 868 } 869 870 /* 871 * Adds an extra ref to allow to drop hw lock after adding sess to the list. 872 * Caller must put it. 873 */ 874 static struct qla_tgt_sess *qlt_create_sess( 875 struct scsi_qla_host *vha, 876 fc_port_t *fcport, 877 bool local) 878 { 879 struct qla_hw_data *ha = vha->hw; 880 struct qla_tgt_sess *sess; 881 unsigned long flags; 882 unsigned char be_sid[3]; 883 884 /* Check to avoid double sessions */ 885 spin_lock_irqsave(&ha->tgt.sess_lock, flags); 886 list_for_each_entry(sess, &vha->vha_tgt.qla_tgt->sess_list, 887 sess_list_entry) { 888 if (!memcmp(sess->port_name, fcport->port_name, WWN_SIZE)) { 889 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf005, 890 "Double sess %p found (s_id %x:%x:%x, " 891 "loop_id %d), updating to d_id %x:%x:%x, " 892 "loop_id %d", sess, sess->s_id.b.domain, 893 sess->s_id.b.al_pa, sess->s_id.b.area, 894 sess->loop_id, fcport->d_id.b.domain, 895 fcport->d_id.b.al_pa, fcport->d_id.b.area, 896 fcport->loop_id); 897 898 /* Cannot undelete at this point */ 899 if (sess->deleted == QLA_SESS_DELETION_IN_PROGRESS) { 900 spin_unlock_irqrestore(&ha->tgt.sess_lock, 901 flags); 902 return NULL; 903 } 904 905 if (sess->deleted) 906 qlt_undelete_sess(sess); 907 908 kref_get(&sess->se_sess->sess_kref); 909 ha->tgt.tgt_ops->update_sess(sess, fcport->d_id, fcport->loop_id, 910 (fcport->flags & FCF_CONF_COMP_SUPPORTED)); 911 912 if (sess->local && !local) 913 sess->local = 0; 914 915 qlt_do_generation_tick(vha, &sess->generation); 916 917 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); 918 919 return sess; 920 } 921 } 922 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); 923 924 sess = kzalloc(sizeof(*sess), GFP_KERNEL); 925 if (!sess) { 926 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04a, 927 "qla_target(%u): session allocation failed, all commands " 928 "from port %8phC will be refused", vha->vp_idx, 929 fcport->port_name); 930 931 return NULL; 932 } 933 sess->tgt = vha->vha_tgt.qla_tgt; 934 sess->vha = vha; 935 sess->s_id = fcport->d_id; 936 sess->loop_id = fcport->loop_id; 937 sess->local = local; 938 INIT_LIST_HEAD(&sess->del_list_entry); 939 940 /* Under normal circumstances we want to logout from firmware when 941 * session eventually ends and release corresponding nport handle. 942 * In the exception cases (e.g. when new PLOGI is waiting) corresponding 943 * code will adjust these flags as necessary. */ 944 sess->logout_on_delete = 1; 945 sess->keep_nport_handle = 0; 946 947 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf006, 948 "Adding sess %p to tgt %p via ->check_initiator_node_acl()\n", 949 sess, vha->vha_tgt.qla_tgt); 950 951 be_sid[0] = sess->s_id.b.domain; 952 be_sid[1] = sess->s_id.b.area; 953 be_sid[2] = sess->s_id.b.al_pa; 954 /* 955 * Determine if this fc_port->port_name is allowed to access 956 * target mode using explict NodeACLs+MappedLUNs, or using 957 * TPG demo mode. If this is successful a target mode FC nexus 958 * is created. 959 */ 960 if (ha->tgt.tgt_ops->check_initiator_node_acl(vha, 961 &fcport->port_name[0], sess, &be_sid[0], fcport->loop_id) < 0) { 962 kfree(sess); 963 return NULL; 964 } 965 /* 966 * Take an extra reference to ->sess_kref here to handle qla_tgt_sess 967 * access across ->tgt.sess_lock reaquire. 968 */ 969 kref_get(&sess->se_sess->sess_kref); 970 971 sess->conf_compl_supported = (fcport->flags & FCF_CONF_COMP_SUPPORTED); 972 BUILD_BUG_ON(sizeof(sess->port_name) != sizeof(fcport->port_name)); 973 memcpy(sess->port_name, fcport->port_name, sizeof(sess->port_name)); 974 975 spin_lock_irqsave(&ha->tgt.sess_lock, flags); 976 list_add_tail(&sess->sess_list_entry, &vha->vha_tgt.qla_tgt->sess_list); 977 vha->vha_tgt.qla_tgt->sess_count++; 978 qlt_do_generation_tick(vha, &sess->generation); 979 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); 980 981 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04b, 982 "qla_target(%d): %ssession for wwn %8phC (loop_id %d, " 983 "s_id %x:%x:%x, confirmed completion %ssupported) added\n", 984 vha->vp_idx, local ? "local " : "", fcport->port_name, 985 fcport->loop_id, sess->s_id.b.domain, sess->s_id.b.area, 986 sess->s_id.b.al_pa, sess->conf_compl_supported ? "" : "not "); 987 988 return sess; 989 } 990 991 /* 992 * Called from qla2x00_reg_remote_port() 993 */ 994 void qlt_fc_port_added(struct scsi_qla_host *vha, fc_port_t *fcport) 995 { 996 struct qla_hw_data *ha = vha->hw; 997 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; 998 struct qla_tgt_sess *sess; 999 unsigned long flags; 1000 1001 if (!vha->hw->tgt.tgt_ops) 1002 return; 1003 1004 if (!tgt || (fcport->port_type != FCT_INITIATOR)) 1005 return; 1006 1007 if (qla_ini_mode_enabled(vha)) 1008 return; 1009 1010 spin_lock_irqsave(&ha->tgt.sess_lock, flags); 1011 if (tgt->tgt_stop) { 1012 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); 1013 return; 1014 } 1015 sess = qlt_find_sess_by_port_name(tgt, fcport->port_name); 1016 if (!sess) { 1017 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); 1018 1019 mutex_lock(&vha->vha_tgt.tgt_mutex); 1020 sess = qlt_create_sess(vha, fcport, false); 1021 mutex_unlock(&vha->vha_tgt.tgt_mutex); 1022 1023 spin_lock_irqsave(&ha->tgt.sess_lock, flags); 1024 } else if (sess->deleted == QLA_SESS_DELETION_IN_PROGRESS) { 1025 /* Point of no return */ 1026 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); 1027 return; 1028 } else { 1029 kref_get(&sess->se_sess->sess_kref); 1030 1031 if (sess->deleted) { 1032 qlt_undelete_sess(sess); 1033 1034 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04c, 1035 "qla_target(%u): %ssession for port %8phC " 1036 "(loop ID %d) reappeared\n", vha->vp_idx, 1037 sess->local ? "local " : "", sess->port_name, 1038 sess->loop_id); 1039 1040 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf007, 1041 "Reappeared sess %p\n", sess); 1042 } 1043 ha->tgt.tgt_ops->update_sess(sess, fcport->d_id, fcport->loop_id, 1044 (fcport->flags & FCF_CONF_COMP_SUPPORTED)); 1045 } 1046 1047 if (sess && sess->local) { 1048 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04d, 1049 "qla_target(%u): local session for " 1050 "port %8phC (loop ID %d) became global\n", vha->vp_idx, 1051 fcport->port_name, sess->loop_id); 1052 sess->local = 0; 1053 } 1054 ha->tgt.tgt_ops->put_sess(sess); 1055 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); 1056 } 1057 1058 /* 1059 * max_gen - specifies maximum session generation 1060 * at which this deletion requestion is still valid 1061 */ 1062 void 1063 qlt_fc_port_deleted(struct scsi_qla_host *vha, fc_port_t *fcport, int max_gen) 1064 { 1065 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; 1066 struct qla_tgt_sess *sess; 1067 unsigned long flags; 1068 1069 if (!vha->hw->tgt.tgt_ops) 1070 return; 1071 1072 if (!tgt) 1073 return; 1074 1075 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); 1076 if (tgt->tgt_stop) { 1077 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); 1078 return; 1079 } 1080 sess = qlt_find_sess_by_port_name(tgt, fcport->port_name); 1081 if (!sess) { 1082 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); 1083 return; 1084 } 1085 1086 if (max_gen - sess->generation < 0) { 1087 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); 1088 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf092, 1089 "Ignoring stale deletion request for se_sess %p / sess %p" 1090 " for port %8phC, req_gen %d, sess_gen %d\n", 1091 sess->se_sess, sess, sess->port_name, max_gen, 1092 sess->generation); 1093 return; 1094 } 1095 1096 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf008, "qla_tgt_fc_port_deleted %p", sess); 1097 1098 sess->local = 1; 1099 qlt_schedule_sess_for_deletion(sess, false); 1100 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); 1101 } 1102 1103 static inline int test_tgt_sess_count(struct qla_tgt *tgt) 1104 { 1105 struct qla_hw_data *ha = tgt->ha; 1106 unsigned long flags; 1107 int res; 1108 /* 1109 * We need to protect against race, when tgt is freed before or 1110 * inside wake_up() 1111 */ 1112 spin_lock_irqsave(&ha->hardware_lock, flags); 1113 ql_dbg(ql_dbg_tgt, tgt->vha, 0xe002, 1114 "tgt %p, empty(sess_list)=%d sess_count=%d\n", 1115 tgt, list_empty(&tgt->sess_list), tgt->sess_count); 1116 res = (tgt->sess_count == 0); 1117 spin_unlock_irqrestore(&ha->hardware_lock, flags); 1118 1119 return res; 1120 } 1121 1122 /* Called by tcm_qla2xxx configfs code */ 1123 int qlt_stop_phase1(struct qla_tgt *tgt) 1124 { 1125 struct scsi_qla_host *vha = tgt->vha; 1126 struct qla_hw_data *ha = tgt->ha; 1127 unsigned long flags; 1128 1129 mutex_lock(&qla_tgt_mutex); 1130 if (!vha->fc_vport) { 1131 struct Scsi_Host *sh = vha->host; 1132 struct fc_host_attrs *fc_host = shost_to_fc_host(sh); 1133 bool npiv_vports; 1134 1135 spin_lock_irqsave(sh->host_lock, flags); 1136 npiv_vports = (fc_host->npiv_vports_inuse); 1137 spin_unlock_irqrestore(sh->host_lock, flags); 1138 1139 if (npiv_vports) { 1140 mutex_unlock(&qla_tgt_mutex); 1141 return -EPERM; 1142 } 1143 } 1144 if (tgt->tgt_stop || tgt->tgt_stopped) { 1145 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04e, 1146 "Already in tgt->tgt_stop or tgt_stopped state\n"); 1147 mutex_unlock(&qla_tgt_mutex); 1148 return -EPERM; 1149 } 1150 1151 ql_dbg(ql_dbg_tgt, vha, 0xe003, "Stopping target for host %ld(%p)\n", 1152 vha->host_no, vha); 1153 /* 1154 * Mutex needed to sync with qla_tgt_fc_port_[added,deleted]. 1155 * Lock is needed, because we still can get an incoming packet. 1156 */ 1157 mutex_lock(&vha->vha_tgt.tgt_mutex); 1158 spin_lock_irqsave(&ha->tgt.sess_lock, flags); 1159 tgt->tgt_stop = 1; 1160 qlt_clear_tgt_db(tgt); 1161 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); 1162 mutex_unlock(&vha->vha_tgt.tgt_mutex); 1163 mutex_unlock(&qla_tgt_mutex); 1164 1165 flush_delayed_work(&tgt->sess_del_work); 1166 1167 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf009, 1168 "Waiting for sess works (tgt %p)", tgt); 1169 spin_lock_irqsave(&tgt->sess_work_lock, flags); 1170 while (!list_empty(&tgt->sess_works_list)) { 1171 spin_unlock_irqrestore(&tgt->sess_work_lock, flags); 1172 flush_scheduled_work(); 1173 spin_lock_irqsave(&tgt->sess_work_lock, flags); 1174 } 1175 spin_unlock_irqrestore(&tgt->sess_work_lock, flags); 1176 1177 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00a, 1178 "Waiting for tgt %p: list_empty(sess_list)=%d " 1179 "sess_count=%d\n", tgt, list_empty(&tgt->sess_list), 1180 tgt->sess_count); 1181 1182 wait_event(tgt->waitQ, test_tgt_sess_count(tgt)); 1183 1184 /* Big hammer */ 1185 if (!ha->flags.host_shutting_down && qla_tgt_mode_enabled(vha)) 1186 qlt_disable_vha(vha); 1187 1188 /* Wait for sessions to clear out (just in case) */ 1189 wait_event(tgt->waitQ, test_tgt_sess_count(tgt)); 1190 return 0; 1191 } 1192 EXPORT_SYMBOL(qlt_stop_phase1); 1193 1194 /* Called by tcm_qla2xxx configfs code */ 1195 void qlt_stop_phase2(struct qla_tgt *tgt) 1196 { 1197 struct qla_hw_data *ha = tgt->ha; 1198 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); 1199 unsigned long flags; 1200 1201 if (tgt->tgt_stopped) { 1202 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04f, 1203 "Already in tgt->tgt_stopped state\n"); 1204 dump_stack(); 1205 return; 1206 } 1207 1208 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00b, 1209 "Waiting for %d IRQ commands to complete (tgt %p)", 1210 tgt->irq_cmd_count, tgt); 1211 1212 mutex_lock(&vha->vha_tgt.tgt_mutex); 1213 spin_lock_irqsave(&ha->hardware_lock, flags); 1214 while ((tgt->irq_cmd_count != 0) || (tgt->atio_irq_cmd_count != 0)) { 1215 spin_unlock_irqrestore(&ha->hardware_lock, flags); 1216 udelay(2); 1217 spin_lock_irqsave(&ha->hardware_lock, flags); 1218 } 1219 tgt->tgt_stop = 0; 1220 tgt->tgt_stopped = 1; 1221 spin_unlock_irqrestore(&ha->hardware_lock, flags); 1222 mutex_unlock(&vha->vha_tgt.tgt_mutex); 1223 1224 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00c, "Stop of tgt %p finished", 1225 tgt); 1226 } 1227 EXPORT_SYMBOL(qlt_stop_phase2); 1228 1229 /* Called from qlt_remove_target() -> qla2x00_remove_one() */ 1230 static void qlt_release(struct qla_tgt *tgt) 1231 { 1232 scsi_qla_host_t *vha = tgt->vha; 1233 1234 if ((vha->vha_tgt.qla_tgt != NULL) && !tgt->tgt_stopped) 1235 qlt_stop_phase2(tgt); 1236 1237 vha->vha_tgt.qla_tgt = NULL; 1238 1239 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00d, 1240 "Release of tgt %p finished\n", tgt); 1241 1242 kfree(tgt); 1243 } 1244 1245 /* ha->hardware_lock supposed to be held on entry */ 1246 static int qlt_sched_sess_work(struct qla_tgt *tgt, int type, 1247 const void *param, unsigned int param_size) 1248 { 1249 struct qla_tgt_sess_work_param *prm; 1250 unsigned long flags; 1251 1252 prm = kzalloc(sizeof(*prm), GFP_ATOMIC); 1253 if (!prm) { 1254 ql_dbg(ql_dbg_tgt_mgt, tgt->vha, 0xf050, 1255 "qla_target(%d): Unable to create session " 1256 "work, command will be refused", 0); 1257 return -ENOMEM; 1258 } 1259 1260 ql_dbg(ql_dbg_tgt_mgt, tgt->vha, 0xf00e, 1261 "Scheduling work (type %d, prm %p)" 1262 " to find session for param %p (size %d, tgt %p)\n", 1263 type, prm, param, param_size, tgt); 1264 1265 prm->type = type; 1266 memcpy(&prm->tm_iocb, param, param_size); 1267 1268 spin_lock_irqsave(&tgt->sess_work_lock, flags); 1269 list_add_tail(&prm->sess_works_list_entry, &tgt->sess_works_list); 1270 spin_unlock_irqrestore(&tgt->sess_work_lock, flags); 1271 1272 schedule_work(&tgt->sess_work); 1273 1274 return 0; 1275 } 1276 1277 /* 1278 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire 1279 */ 1280 static void qlt_send_notify_ack(struct scsi_qla_host *vha, 1281 struct imm_ntfy_from_isp *ntfy, 1282 uint32_t add_flags, uint16_t resp_code, int resp_code_valid, 1283 uint16_t srr_flags, uint16_t srr_reject_code, uint8_t srr_explan) 1284 { 1285 struct qla_hw_data *ha = vha->hw; 1286 request_t *pkt; 1287 struct nack_to_isp *nack; 1288 1289 ql_dbg(ql_dbg_tgt, vha, 0xe004, "Sending NOTIFY_ACK (ha=%p)\n", ha); 1290 1291 /* Send marker if required */ 1292 if (qlt_issue_marker(vha, 1) != QLA_SUCCESS) 1293 return; 1294 1295 pkt = (request_t *)qla2x00_alloc_iocbs(vha, NULL); 1296 if (!pkt) { 1297 ql_dbg(ql_dbg_tgt, vha, 0xe049, 1298 "qla_target(%d): %s failed: unable to allocate " 1299 "request packet\n", vha->vp_idx, __func__); 1300 return; 1301 } 1302 1303 if (vha->vha_tgt.qla_tgt != NULL) 1304 vha->vha_tgt.qla_tgt->notify_ack_expected++; 1305 1306 pkt->entry_type = NOTIFY_ACK_TYPE; 1307 pkt->entry_count = 1; 1308 1309 nack = (struct nack_to_isp *)pkt; 1310 nack->ox_id = ntfy->ox_id; 1311 1312 nack->u.isp24.nport_handle = ntfy->u.isp24.nport_handle; 1313 if (le16_to_cpu(ntfy->u.isp24.status) == IMM_NTFY_ELS) { 1314 nack->u.isp24.flags = ntfy->u.isp24.flags & 1315 cpu_to_le32(NOTIFY24XX_FLAGS_PUREX_IOCB); 1316 } 1317 nack->u.isp24.srr_rx_id = ntfy->u.isp24.srr_rx_id; 1318 nack->u.isp24.status = ntfy->u.isp24.status; 1319 nack->u.isp24.status_subcode = ntfy->u.isp24.status_subcode; 1320 nack->u.isp24.fw_handle = ntfy->u.isp24.fw_handle; 1321 nack->u.isp24.exchange_address = ntfy->u.isp24.exchange_address; 1322 nack->u.isp24.srr_rel_offs = ntfy->u.isp24.srr_rel_offs; 1323 nack->u.isp24.srr_ui = ntfy->u.isp24.srr_ui; 1324 nack->u.isp24.srr_flags = cpu_to_le16(srr_flags); 1325 nack->u.isp24.srr_reject_code = srr_reject_code; 1326 nack->u.isp24.srr_reject_code_expl = srr_explan; 1327 nack->u.isp24.vp_index = ntfy->u.isp24.vp_index; 1328 1329 ql_dbg(ql_dbg_tgt, vha, 0xe005, 1330 "qla_target(%d): Sending 24xx Notify Ack %d\n", 1331 vha->vp_idx, nack->u.isp24.status); 1332 1333 /* Memory Barrier */ 1334 wmb(); 1335 qla2x00_start_iocbs(vha, vha->req); 1336 } 1337 1338 /* 1339 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire 1340 */ 1341 static void qlt_24xx_send_abts_resp(struct scsi_qla_host *vha, 1342 struct abts_recv_from_24xx *abts, uint32_t status, 1343 bool ids_reversed) 1344 { 1345 struct qla_hw_data *ha = vha->hw; 1346 struct abts_resp_to_24xx *resp; 1347 uint32_t f_ctl; 1348 uint8_t *p; 1349 1350 ql_dbg(ql_dbg_tgt, vha, 0xe006, 1351 "Sending task mgmt ABTS response (ha=%p, atio=%p, status=%x\n", 1352 ha, abts, status); 1353 1354 /* Send marker if required */ 1355 if (qlt_issue_marker(vha, 1) != QLA_SUCCESS) 1356 return; 1357 1358 resp = (struct abts_resp_to_24xx *)qla2x00_alloc_iocbs_ready(vha, NULL); 1359 if (!resp) { 1360 ql_dbg(ql_dbg_tgt, vha, 0xe04a, 1361 "qla_target(%d): %s failed: unable to allocate " 1362 "request packet", vha->vp_idx, __func__); 1363 return; 1364 } 1365 1366 resp->entry_type = ABTS_RESP_24XX; 1367 resp->entry_count = 1; 1368 resp->nport_handle = abts->nport_handle; 1369 resp->vp_index = vha->vp_idx; 1370 resp->sof_type = abts->sof_type; 1371 resp->exchange_address = abts->exchange_address; 1372 resp->fcp_hdr_le = abts->fcp_hdr_le; 1373 f_ctl = cpu_to_le32(F_CTL_EXCH_CONTEXT_RESP | 1374 F_CTL_LAST_SEQ | F_CTL_END_SEQ | 1375 F_CTL_SEQ_INITIATIVE); 1376 p = (uint8_t *)&f_ctl; 1377 resp->fcp_hdr_le.f_ctl[0] = *p++; 1378 resp->fcp_hdr_le.f_ctl[1] = *p++; 1379 resp->fcp_hdr_le.f_ctl[2] = *p; 1380 if (ids_reversed) { 1381 resp->fcp_hdr_le.d_id[0] = abts->fcp_hdr_le.d_id[0]; 1382 resp->fcp_hdr_le.d_id[1] = abts->fcp_hdr_le.d_id[1]; 1383 resp->fcp_hdr_le.d_id[2] = abts->fcp_hdr_le.d_id[2]; 1384 resp->fcp_hdr_le.s_id[0] = abts->fcp_hdr_le.s_id[0]; 1385 resp->fcp_hdr_le.s_id[1] = abts->fcp_hdr_le.s_id[1]; 1386 resp->fcp_hdr_le.s_id[2] = abts->fcp_hdr_le.s_id[2]; 1387 } else { 1388 resp->fcp_hdr_le.d_id[0] = abts->fcp_hdr_le.s_id[0]; 1389 resp->fcp_hdr_le.d_id[1] = abts->fcp_hdr_le.s_id[1]; 1390 resp->fcp_hdr_le.d_id[2] = abts->fcp_hdr_le.s_id[2]; 1391 resp->fcp_hdr_le.s_id[0] = abts->fcp_hdr_le.d_id[0]; 1392 resp->fcp_hdr_le.s_id[1] = abts->fcp_hdr_le.d_id[1]; 1393 resp->fcp_hdr_le.s_id[2] = abts->fcp_hdr_le.d_id[2]; 1394 } 1395 resp->exchange_addr_to_abort = abts->exchange_addr_to_abort; 1396 if (status == FCP_TMF_CMPL) { 1397 resp->fcp_hdr_le.r_ctl = R_CTL_BASIC_LINK_SERV | R_CTL_B_ACC; 1398 resp->payload.ba_acct.seq_id_valid = SEQ_ID_INVALID; 1399 resp->payload.ba_acct.low_seq_cnt = 0x0000; 1400 resp->payload.ba_acct.high_seq_cnt = 0xFFFF; 1401 resp->payload.ba_acct.ox_id = abts->fcp_hdr_le.ox_id; 1402 resp->payload.ba_acct.rx_id = abts->fcp_hdr_le.rx_id; 1403 } else { 1404 resp->fcp_hdr_le.r_ctl = R_CTL_BASIC_LINK_SERV | R_CTL_B_RJT; 1405 resp->payload.ba_rjt.reason_code = 1406 BA_RJT_REASON_CODE_UNABLE_TO_PERFORM; 1407 /* Other bytes are zero */ 1408 } 1409 1410 vha->vha_tgt.qla_tgt->abts_resp_expected++; 1411 1412 /* Memory Barrier */ 1413 wmb(); 1414 qla2x00_start_iocbs(vha, vha->req); 1415 } 1416 1417 /* 1418 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire 1419 */ 1420 static void qlt_24xx_retry_term_exchange(struct scsi_qla_host *vha, 1421 struct abts_resp_from_24xx_fw *entry) 1422 { 1423 struct ctio7_to_24xx *ctio; 1424 1425 ql_dbg(ql_dbg_tgt, vha, 0xe007, 1426 "Sending retry TERM EXCH CTIO7 (ha=%p)\n", vha->hw); 1427 /* Send marker if required */ 1428 if (qlt_issue_marker(vha, 1) != QLA_SUCCESS) 1429 return; 1430 1431 ctio = (struct ctio7_to_24xx *)qla2x00_alloc_iocbs_ready(vha, NULL); 1432 if (ctio == NULL) { 1433 ql_dbg(ql_dbg_tgt, vha, 0xe04b, 1434 "qla_target(%d): %s failed: unable to allocate " 1435 "request packet\n", vha->vp_idx, __func__); 1436 return; 1437 } 1438 1439 /* 1440 * We've got on entrance firmware's response on by us generated 1441 * ABTS response. So, in it ID fields are reversed. 1442 */ 1443 1444 ctio->entry_type = CTIO_TYPE7; 1445 ctio->entry_count = 1; 1446 ctio->nport_handle = entry->nport_handle; 1447 ctio->handle = QLA_TGT_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK; 1448 ctio->timeout = cpu_to_le16(QLA_TGT_TIMEOUT); 1449 ctio->vp_index = vha->vp_idx; 1450 ctio->initiator_id[0] = entry->fcp_hdr_le.d_id[0]; 1451 ctio->initiator_id[1] = entry->fcp_hdr_le.d_id[1]; 1452 ctio->initiator_id[2] = entry->fcp_hdr_le.d_id[2]; 1453 ctio->exchange_addr = entry->exchange_addr_to_abort; 1454 ctio->u.status1.flags = cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1 | 1455 CTIO7_FLAGS_TERMINATE); 1456 ctio->u.status1.ox_id = cpu_to_le16(entry->fcp_hdr_le.ox_id); 1457 1458 /* Memory Barrier */ 1459 wmb(); 1460 qla2x00_start_iocbs(vha, vha->req); 1461 1462 qlt_24xx_send_abts_resp(vha, (struct abts_recv_from_24xx *)entry, 1463 FCP_TMF_CMPL, true); 1464 } 1465 1466 static int abort_cmd_for_tag(struct scsi_qla_host *vha, uint32_t tag) 1467 { 1468 struct qla_tgt_sess_op *op; 1469 struct qla_tgt_cmd *cmd; 1470 1471 spin_lock(&vha->cmd_list_lock); 1472 1473 list_for_each_entry(op, &vha->qla_sess_op_cmd_list, cmd_list) { 1474 if (tag == op->atio.u.isp24.exchange_addr) { 1475 op->aborted = true; 1476 spin_unlock(&vha->cmd_list_lock); 1477 return 1; 1478 } 1479 } 1480 1481 list_for_each_entry(cmd, &vha->qla_cmd_list, cmd_list) { 1482 if (tag == cmd->atio.u.isp24.exchange_addr) { 1483 cmd->aborted = 1; 1484 spin_unlock(&vha->cmd_list_lock); 1485 return 1; 1486 } 1487 } 1488 1489 spin_unlock(&vha->cmd_list_lock); 1490 return 0; 1491 } 1492 1493 /* drop cmds for the given lun 1494 * XXX only looks for cmds on the port through which lun reset was recieved 1495 * XXX does not go through the list of other port (which may have cmds 1496 * for the same lun) 1497 */ 1498 static void abort_cmds_for_lun(struct scsi_qla_host *vha, 1499 uint32_t lun, uint8_t *s_id) 1500 { 1501 struct qla_tgt_sess_op *op; 1502 struct qla_tgt_cmd *cmd; 1503 uint32_t key; 1504 1505 key = sid_to_key(s_id); 1506 spin_lock(&vha->cmd_list_lock); 1507 list_for_each_entry(op, &vha->qla_sess_op_cmd_list, cmd_list) { 1508 uint32_t op_key; 1509 uint32_t op_lun; 1510 1511 op_key = sid_to_key(op->atio.u.isp24.fcp_hdr.s_id); 1512 op_lun = scsilun_to_int( 1513 (struct scsi_lun *)&op->atio.u.isp24.fcp_cmnd.lun); 1514 if (op_key == key && op_lun == lun) 1515 op->aborted = true; 1516 } 1517 list_for_each_entry(cmd, &vha->qla_cmd_list, cmd_list) { 1518 uint32_t cmd_key; 1519 uint32_t cmd_lun; 1520 1521 cmd_key = sid_to_key(cmd->atio.u.isp24.fcp_hdr.s_id); 1522 cmd_lun = scsilun_to_int( 1523 (struct scsi_lun *)&cmd->atio.u.isp24.fcp_cmnd.lun); 1524 if (cmd_key == key && cmd_lun == lun) 1525 cmd->aborted = 1; 1526 } 1527 spin_unlock(&vha->cmd_list_lock); 1528 } 1529 1530 /* ha->hardware_lock supposed to be held on entry */ 1531 static int __qlt_24xx_handle_abts(struct scsi_qla_host *vha, 1532 struct abts_recv_from_24xx *abts, struct qla_tgt_sess *sess) 1533 { 1534 struct qla_hw_data *ha = vha->hw; 1535 struct se_session *se_sess = sess->se_sess; 1536 struct qla_tgt_mgmt_cmd *mcmd; 1537 struct se_cmd *se_cmd; 1538 u32 lun = 0; 1539 int rc; 1540 bool found_lun = false; 1541 1542 spin_lock(&se_sess->sess_cmd_lock); 1543 list_for_each_entry(se_cmd, &se_sess->sess_cmd_list, se_cmd_list) { 1544 struct qla_tgt_cmd *cmd = 1545 container_of(se_cmd, struct qla_tgt_cmd, se_cmd); 1546 if (se_cmd->tag == abts->exchange_addr_to_abort) { 1547 lun = cmd->unpacked_lun; 1548 found_lun = true; 1549 break; 1550 } 1551 } 1552 spin_unlock(&se_sess->sess_cmd_lock); 1553 1554 /* cmd not in LIO lists, look in qla list */ 1555 if (!found_lun) { 1556 if (abort_cmd_for_tag(vha, abts->exchange_addr_to_abort)) { 1557 /* send TASK_ABORT response immediately */ 1558 qlt_24xx_send_abts_resp(vha, abts, FCP_TMF_CMPL, false); 1559 return 0; 1560 } else { 1561 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf081, 1562 "unable to find cmd in driver or LIO for tag 0x%x\n", 1563 abts->exchange_addr_to_abort); 1564 return -ENOENT; 1565 } 1566 } 1567 1568 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00f, 1569 "qla_target(%d): task abort (tag=%d)\n", 1570 vha->vp_idx, abts->exchange_addr_to_abort); 1571 1572 mcmd = mempool_alloc(qla_tgt_mgmt_cmd_mempool, GFP_ATOMIC); 1573 if (mcmd == NULL) { 1574 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf051, 1575 "qla_target(%d): %s: Allocation of ABORT cmd failed", 1576 vha->vp_idx, __func__); 1577 return -ENOMEM; 1578 } 1579 memset(mcmd, 0, sizeof(*mcmd)); 1580 1581 mcmd->sess = sess; 1582 memcpy(&mcmd->orig_iocb.abts, abts, sizeof(mcmd->orig_iocb.abts)); 1583 mcmd->reset_count = vha->hw->chip_reset; 1584 1585 rc = ha->tgt.tgt_ops->handle_tmr(mcmd, lun, TMR_ABORT_TASK, 1586 abts->exchange_addr_to_abort); 1587 if (rc != 0) { 1588 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf052, 1589 "qla_target(%d): tgt_ops->handle_tmr()" 1590 " failed: %d", vha->vp_idx, rc); 1591 mempool_free(mcmd, qla_tgt_mgmt_cmd_mempool); 1592 return -EFAULT; 1593 } 1594 1595 return 0; 1596 } 1597 1598 /* 1599 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire 1600 */ 1601 static void qlt_24xx_handle_abts(struct scsi_qla_host *vha, 1602 struct abts_recv_from_24xx *abts) 1603 { 1604 struct qla_hw_data *ha = vha->hw; 1605 struct qla_tgt_sess *sess; 1606 uint32_t tag = abts->exchange_addr_to_abort; 1607 uint8_t s_id[3]; 1608 int rc; 1609 unsigned long flags; 1610 1611 if (le32_to_cpu(abts->fcp_hdr_le.parameter) & ABTS_PARAM_ABORT_SEQ) { 1612 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf053, 1613 "qla_target(%d): ABTS: Abort Sequence not " 1614 "supported\n", vha->vp_idx); 1615 qlt_24xx_send_abts_resp(vha, abts, FCP_TMF_REJECTED, false); 1616 return; 1617 } 1618 1619 if (tag == ATIO_EXCHANGE_ADDRESS_UNKNOWN) { 1620 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf010, 1621 "qla_target(%d): ABTS: Unknown Exchange " 1622 "Address received\n", vha->vp_idx); 1623 qlt_24xx_send_abts_resp(vha, abts, FCP_TMF_REJECTED, false); 1624 return; 1625 } 1626 1627 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf011, 1628 "qla_target(%d): task abort (s_id=%x:%x:%x, " 1629 "tag=%d, param=%x)\n", vha->vp_idx, abts->fcp_hdr_le.s_id[2], 1630 abts->fcp_hdr_le.s_id[1], abts->fcp_hdr_le.s_id[0], tag, 1631 le32_to_cpu(abts->fcp_hdr_le.parameter)); 1632 1633 s_id[0] = abts->fcp_hdr_le.s_id[2]; 1634 s_id[1] = abts->fcp_hdr_le.s_id[1]; 1635 s_id[2] = abts->fcp_hdr_le.s_id[0]; 1636 1637 spin_lock_irqsave(&ha->tgt.sess_lock, flags); 1638 sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, s_id); 1639 if (!sess) { 1640 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf012, 1641 "qla_target(%d): task abort for non-existant session\n", 1642 vha->vp_idx); 1643 rc = qlt_sched_sess_work(vha->vha_tgt.qla_tgt, 1644 QLA_TGT_SESS_WORK_ABORT, abts, sizeof(*abts)); 1645 1646 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); 1647 1648 if (rc != 0) { 1649 qlt_24xx_send_abts_resp(vha, abts, FCP_TMF_REJECTED, 1650 false); 1651 } 1652 return; 1653 } 1654 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); 1655 1656 1657 if (sess->deleted == QLA_SESS_DELETION_IN_PROGRESS) { 1658 qlt_24xx_send_abts_resp(vha, abts, FCP_TMF_REJECTED, false); 1659 return; 1660 } 1661 1662 rc = __qlt_24xx_handle_abts(vha, abts, sess); 1663 if (rc != 0) { 1664 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf054, 1665 "qla_target(%d): __qlt_24xx_handle_abts() failed: %d\n", 1666 vha->vp_idx, rc); 1667 qlt_24xx_send_abts_resp(vha, abts, FCP_TMF_REJECTED, false); 1668 return; 1669 } 1670 } 1671 1672 /* 1673 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire 1674 */ 1675 static void qlt_24xx_send_task_mgmt_ctio(struct scsi_qla_host *ha, 1676 struct qla_tgt_mgmt_cmd *mcmd, uint32_t resp_code) 1677 { 1678 struct atio_from_isp *atio = &mcmd->orig_iocb.atio; 1679 struct ctio7_to_24xx *ctio; 1680 uint16_t temp; 1681 1682 ql_dbg(ql_dbg_tgt, ha, 0xe008, 1683 "Sending task mgmt CTIO7 (ha=%p, atio=%p, resp_code=%x\n", 1684 ha, atio, resp_code); 1685 1686 /* Send marker if required */ 1687 if (qlt_issue_marker(ha, 1) != QLA_SUCCESS) 1688 return; 1689 1690 ctio = (struct ctio7_to_24xx *)qla2x00_alloc_iocbs(ha, NULL); 1691 if (ctio == NULL) { 1692 ql_dbg(ql_dbg_tgt, ha, 0xe04c, 1693 "qla_target(%d): %s failed: unable to allocate " 1694 "request packet\n", ha->vp_idx, __func__); 1695 return; 1696 } 1697 1698 ctio->entry_type = CTIO_TYPE7; 1699 ctio->entry_count = 1; 1700 ctio->handle = QLA_TGT_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK; 1701 ctio->nport_handle = mcmd->sess->loop_id; 1702 ctio->timeout = cpu_to_le16(QLA_TGT_TIMEOUT); 1703 ctio->vp_index = ha->vp_idx; 1704 ctio->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2]; 1705 ctio->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1]; 1706 ctio->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0]; 1707 ctio->exchange_addr = atio->u.isp24.exchange_addr; 1708 ctio->u.status1.flags = (atio->u.isp24.attr << 9) | 1709 cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1 | CTIO7_FLAGS_SEND_STATUS); 1710 temp = be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id); 1711 ctio->u.status1.ox_id = cpu_to_le16(temp); 1712 ctio->u.status1.scsi_status = 1713 cpu_to_le16(SS_RESPONSE_INFO_LEN_VALID); 1714 ctio->u.status1.response_len = cpu_to_le16(8); 1715 ctio->u.status1.sense_data[0] = resp_code; 1716 1717 /* Memory Barrier */ 1718 wmb(); 1719 qla2x00_start_iocbs(ha, ha->req); 1720 } 1721 1722 void qlt_free_mcmd(struct qla_tgt_mgmt_cmd *mcmd) 1723 { 1724 mempool_free(mcmd, qla_tgt_mgmt_cmd_mempool); 1725 } 1726 EXPORT_SYMBOL(qlt_free_mcmd); 1727 1728 /* callback from target fabric module code */ 1729 void qlt_xmit_tm_rsp(struct qla_tgt_mgmt_cmd *mcmd) 1730 { 1731 struct scsi_qla_host *vha = mcmd->sess->vha; 1732 struct qla_hw_data *ha = vha->hw; 1733 unsigned long flags; 1734 1735 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf013, 1736 "TM response mcmd (%p) status %#x state %#x", 1737 mcmd, mcmd->fc_tm_rsp, mcmd->flags); 1738 1739 spin_lock_irqsave(&ha->hardware_lock, flags); 1740 1741 if (!vha->flags.online || mcmd->reset_count != ha->chip_reset) { 1742 /* 1743 * Either the port is not online or this request was from 1744 * previous life, just abort the processing. 1745 */ 1746 ql_dbg(ql_dbg_async, vha, 0xe100, 1747 "RESET-TMR online/active/old-count/new-count = %d/%d/%d/%d.\n", 1748 vha->flags.online, qla2x00_reset_active(vha), 1749 mcmd->reset_count, ha->chip_reset); 1750 ha->tgt.tgt_ops->free_mcmd(mcmd); 1751 spin_unlock_irqrestore(&ha->hardware_lock, flags); 1752 return; 1753 } 1754 1755 if (mcmd->flags == QLA24XX_MGMT_SEND_NACK) 1756 qlt_send_notify_ack(vha, &mcmd->orig_iocb.imm_ntfy, 1757 0, 0, 0, 0, 0, 0); 1758 else { 1759 if (mcmd->se_cmd.se_tmr_req->function == TMR_ABORT_TASK) 1760 qlt_24xx_send_abts_resp(vha, &mcmd->orig_iocb.abts, 1761 mcmd->fc_tm_rsp, false); 1762 else 1763 qlt_24xx_send_task_mgmt_ctio(vha, mcmd, 1764 mcmd->fc_tm_rsp); 1765 } 1766 /* 1767 * Make the callback for ->free_mcmd() to queue_work() and invoke 1768 * target_put_sess_cmd() to drop cmd_kref to 1. The final 1769 * target_put_sess_cmd() call will be made from TFO->check_stop_free() 1770 * -> tcm_qla2xxx_check_stop_free() to release the TMR associated se_cmd 1771 * descriptor after TFO->queue_tm_rsp() -> tcm_qla2xxx_queue_tm_rsp() -> 1772 * qlt_xmit_tm_rsp() returns here.. 1773 */ 1774 ha->tgt.tgt_ops->free_mcmd(mcmd); 1775 spin_unlock_irqrestore(&ha->hardware_lock, flags); 1776 } 1777 EXPORT_SYMBOL(qlt_xmit_tm_rsp); 1778 1779 /* No locks */ 1780 static int qlt_pci_map_calc_cnt(struct qla_tgt_prm *prm) 1781 { 1782 struct qla_tgt_cmd *cmd = prm->cmd; 1783 1784 BUG_ON(cmd->sg_cnt == 0); 1785 1786 prm->sg = (struct scatterlist *)cmd->sg; 1787 prm->seg_cnt = pci_map_sg(prm->tgt->ha->pdev, cmd->sg, 1788 cmd->sg_cnt, cmd->dma_data_direction); 1789 if (unlikely(prm->seg_cnt == 0)) 1790 goto out_err; 1791 1792 prm->cmd->sg_mapped = 1; 1793 1794 if (cmd->se_cmd.prot_op == TARGET_PROT_NORMAL) { 1795 /* 1796 * If greater than four sg entries then we need to allocate 1797 * the continuation entries 1798 */ 1799 if (prm->seg_cnt > prm->tgt->datasegs_per_cmd) 1800 prm->req_cnt += DIV_ROUND_UP(prm->seg_cnt - 1801 prm->tgt->datasegs_per_cmd, 1802 prm->tgt->datasegs_per_cont); 1803 } else { 1804 /* DIF */ 1805 if ((cmd->se_cmd.prot_op == TARGET_PROT_DIN_INSERT) || 1806 (cmd->se_cmd.prot_op == TARGET_PROT_DOUT_STRIP)) { 1807 prm->seg_cnt = DIV_ROUND_UP(cmd->bufflen, cmd->blk_sz); 1808 prm->tot_dsds = prm->seg_cnt; 1809 } else 1810 prm->tot_dsds = prm->seg_cnt; 1811 1812 if (cmd->prot_sg_cnt) { 1813 prm->prot_sg = cmd->prot_sg; 1814 prm->prot_seg_cnt = pci_map_sg(prm->tgt->ha->pdev, 1815 cmd->prot_sg, cmd->prot_sg_cnt, 1816 cmd->dma_data_direction); 1817 if (unlikely(prm->prot_seg_cnt == 0)) 1818 goto out_err; 1819 1820 if ((cmd->se_cmd.prot_op == TARGET_PROT_DIN_INSERT) || 1821 (cmd->se_cmd.prot_op == TARGET_PROT_DOUT_STRIP)) { 1822 /* Dif Bundling not support here */ 1823 prm->prot_seg_cnt = DIV_ROUND_UP(cmd->bufflen, 1824 cmd->blk_sz); 1825 prm->tot_dsds += prm->prot_seg_cnt; 1826 } else 1827 prm->tot_dsds += prm->prot_seg_cnt; 1828 } 1829 } 1830 1831 return 0; 1832 1833 out_err: 1834 ql_dbg(ql_dbg_tgt, prm->cmd->vha, 0xe04d, 1835 "qla_target(%d): PCI mapping failed: sg_cnt=%d", 1836 0, prm->cmd->sg_cnt); 1837 return -1; 1838 } 1839 1840 static void qlt_unmap_sg(struct scsi_qla_host *vha, struct qla_tgt_cmd *cmd) 1841 { 1842 struct qla_hw_data *ha = vha->hw; 1843 1844 if (!cmd->sg_mapped) 1845 return; 1846 1847 pci_unmap_sg(ha->pdev, cmd->sg, cmd->sg_cnt, cmd->dma_data_direction); 1848 cmd->sg_mapped = 0; 1849 1850 if (cmd->prot_sg_cnt) 1851 pci_unmap_sg(ha->pdev, cmd->prot_sg, cmd->prot_sg_cnt, 1852 cmd->dma_data_direction); 1853 1854 if (cmd->ctx_dsd_alloced) 1855 qla2x00_clean_dsd_pool(ha, NULL, cmd); 1856 1857 if (cmd->ctx) 1858 dma_pool_free(ha->dl_dma_pool, cmd->ctx, cmd->ctx->crc_ctx_dma); 1859 } 1860 1861 static int qlt_check_reserve_free_req(struct scsi_qla_host *vha, 1862 uint32_t req_cnt) 1863 { 1864 uint32_t cnt, cnt_in; 1865 1866 if (vha->req->cnt < (req_cnt + 2)) { 1867 cnt = (uint16_t)RD_REG_DWORD(vha->req->req_q_out); 1868 cnt_in = (uint16_t)RD_REG_DWORD(vha->req->req_q_in); 1869 1870 if (vha->req->ring_index < cnt) 1871 vha->req->cnt = cnt - vha->req->ring_index; 1872 else 1873 vha->req->cnt = vha->req->length - 1874 (vha->req->ring_index - cnt); 1875 } 1876 1877 if (unlikely(vha->req->cnt < (req_cnt + 2))) { 1878 ql_dbg(ql_dbg_io, vha, 0x305a, 1879 "qla_target(%d): There is no room in the request ring: vha->req->ring_index=%d, vha->req->cnt=%d, req_cnt=%d Req-out=%d Req-in=%d Req-Length=%d\n", 1880 vha->vp_idx, vha->req->ring_index, 1881 vha->req->cnt, req_cnt, cnt, cnt_in, vha->req->length); 1882 return -EAGAIN; 1883 } 1884 vha->req->cnt -= req_cnt; 1885 1886 return 0; 1887 } 1888 1889 /* 1890 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire 1891 */ 1892 static inline void *qlt_get_req_pkt(struct scsi_qla_host *vha) 1893 { 1894 /* Adjust ring index. */ 1895 vha->req->ring_index++; 1896 if (vha->req->ring_index == vha->req->length) { 1897 vha->req->ring_index = 0; 1898 vha->req->ring_ptr = vha->req->ring; 1899 } else { 1900 vha->req->ring_ptr++; 1901 } 1902 return (cont_entry_t *)vha->req->ring_ptr; 1903 } 1904 1905 /* ha->hardware_lock supposed to be held on entry */ 1906 static inline uint32_t qlt_make_handle(struct scsi_qla_host *vha) 1907 { 1908 struct qla_hw_data *ha = vha->hw; 1909 uint32_t h; 1910 1911 h = ha->tgt.current_handle; 1912 /* always increment cmd handle */ 1913 do { 1914 ++h; 1915 if (h > DEFAULT_OUTSTANDING_COMMANDS) 1916 h = 1; /* 0 is QLA_TGT_NULL_HANDLE */ 1917 if (h == ha->tgt.current_handle) { 1918 ql_dbg(ql_dbg_io, vha, 0x305b, 1919 "qla_target(%d): Ran out of " 1920 "empty cmd slots in ha %p\n", vha->vp_idx, ha); 1921 h = QLA_TGT_NULL_HANDLE; 1922 break; 1923 } 1924 } while ((h == QLA_TGT_NULL_HANDLE) || 1925 (h == QLA_TGT_SKIP_HANDLE) || 1926 (ha->tgt.cmds[h-1] != NULL)); 1927 1928 if (h != QLA_TGT_NULL_HANDLE) 1929 ha->tgt.current_handle = h; 1930 1931 return h; 1932 } 1933 1934 /* ha->hardware_lock supposed to be held on entry */ 1935 static int qlt_24xx_build_ctio_pkt(struct qla_tgt_prm *prm, 1936 struct scsi_qla_host *vha) 1937 { 1938 uint32_t h; 1939 struct ctio7_to_24xx *pkt; 1940 struct qla_hw_data *ha = vha->hw; 1941 struct atio_from_isp *atio = &prm->cmd->atio; 1942 uint16_t temp; 1943 1944 pkt = (struct ctio7_to_24xx *)vha->req->ring_ptr; 1945 prm->pkt = pkt; 1946 memset(pkt, 0, sizeof(*pkt)); 1947 1948 pkt->entry_type = CTIO_TYPE7; 1949 pkt->entry_count = (uint8_t)prm->req_cnt; 1950 pkt->vp_index = vha->vp_idx; 1951 1952 h = qlt_make_handle(vha); 1953 if (unlikely(h == QLA_TGT_NULL_HANDLE)) { 1954 /* 1955 * CTIO type 7 from the firmware doesn't provide a way to 1956 * know the initiator's LOOP ID, hence we can't find 1957 * the session and, so, the command. 1958 */ 1959 return -EAGAIN; 1960 } else 1961 ha->tgt.cmds[h-1] = prm->cmd; 1962 1963 pkt->handle = h | CTIO_COMPLETION_HANDLE_MARK; 1964 pkt->nport_handle = prm->cmd->loop_id; 1965 pkt->timeout = cpu_to_le16(QLA_TGT_TIMEOUT); 1966 pkt->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2]; 1967 pkt->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1]; 1968 pkt->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0]; 1969 pkt->exchange_addr = atio->u.isp24.exchange_addr; 1970 pkt->u.status0.flags |= (atio->u.isp24.attr << 9); 1971 temp = be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id); 1972 pkt->u.status0.ox_id = cpu_to_le16(temp); 1973 pkt->u.status0.relative_offset = cpu_to_le32(prm->cmd->offset); 1974 1975 return 0; 1976 } 1977 1978 /* 1979 * ha->hardware_lock supposed to be held on entry. We have already made sure 1980 * that there is sufficient amount of request entries to not drop it. 1981 */ 1982 static void qlt_load_cont_data_segments(struct qla_tgt_prm *prm, 1983 struct scsi_qla_host *vha) 1984 { 1985 int cnt; 1986 uint32_t *dword_ptr; 1987 int enable_64bit_addressing = prm->tgt->tgt_enable_64bit_addr; 1988 1989 /* Build continuation packets */ 1990 while (prm->seg_cnt > 0) { 1991 cont_a64_entry_t *cont_pkt64 = 1992 (cont_a64_entry_t *)qlt_get_req_pkt(vha); 1993 1994 /* 1995 * Make sure that from cont_pkt64 none of 1996 * 64-bit specific fields used for 32-bit 1997 * addressing. Cast to (cont_entry_t *) for 1998 * that. 1999 */ 2000 2001 memset(cont_pkt64, 0, sizeof(*cont_pkt64)); 2002 2003 cont_pkt64->entry_count = 1; 2004 cont_pkt64->sys_define = 0; 2005 2006 if (enable_64bit_addressing) { 2007 cont_pkt64->entry_type = CONTINUE_A64_TYPE; 2008 dword_ptr = 2009 (uint32_t *)&cont_pkt64->dseg_0_address; 2010 } else { 2011 cont_pkt64->entry_type = CONTINUE_TYPE; 2012 dword_ptr = 2013 (uint32_t *)&((cont_entry_t *) 2014 cont_pkt64)->dseg_0_address; 2015 } 2016 2017 /* Load continuation entry data segments */ 2018 for (cnt = 0; 2019 cnt < prm->tgt->datasegs_per_cont && prm->seg_cnt; 2020 cnt++, prm->seg_cnt--) { 2021 *dword_ptr++ = 2022 cpu_to_le32(pci_dma_lo32 2023 (sg_dma_address(prm->sg))); 2024 if (enable_64bit_addressing) { 2025 *dword_ptr++ = 2026 cpu_to_le32(pci_dma_hi32 2027 (sg_dma_address 2028 (prm->sg))); 2029 } 2030 *dword_ptr++ = cpu_to_le32(sg_dma_len(prm->sg)); 2031 2032 prm->sg = sg_next(prm->sg); 2033 } 2034 } 2035 } 2036 2037 /* 2038 * ha->hardware_lock supposed to be held on entry. We have already made sure 2039 * that there is sufficient amount of request entries to not drop it. 2040 */ 2041 static void qlt_load_data_segments(struct qla_tgt_prm *prm, 2042 struct scsi_qla_host *vha) 2043 { 2044 int cnt; 2045 uint32_t *dword_ptr; 2046 int enable_64bit_addressing = prm->tgt->tgt_enable_64bit_addr; 2047 struct ctio7_to_24xx *pkt24 = (struct ctio7_to_24xx *)prm->pkt; 2048 2049 pkt24->u.status0.transfer_length = cpu_to_le32(prm->cmd->bufflen); 2050 2051 /* Setup packet address segment pointer */ 2052 dword_ptr = pkt24->u.status0.dseg_0_address; 2053 2054 /* Set total data segment count */ 2055 if (prm->seg_cnt) 2056 pkt24->dseg_count = cpu_to_le16(prm->seg_cnt); 2057 2058 if (prm->seg_cnt == 0) { 2059 /* No data transfer */ 2060 *dword_ptr++ = 0; 2061 *dword_ptr = 0; 2062 return; 2063 } 2064 2065 /* If scatter gather */ 2066 2067 /* Load command entry data segments */ 2068 for (cnt = 0; 2069 (cnt < prm->tgt->datasegs_per_cmd) && prm->seg_cnt; 2070 cnt++, prm->seg_cnt--) { 2071 *dword_ptr++ = 2072 cpu_to_le32(pci_dma_lo32(sg_dma_address(prm->sg))); 2073 if (enable_64bit_addressing) { 2074 *dword_ptr++ = 2075 cpu_to_le32(pci_dma_hi32( 2076 sg_dma_address(prm->sg))); 2077 } 2078 *dword_ptr++ = cpu_to_le32(sg_dma_len(prm->sg)); 2079 2080 prm->sg = sg_next(prm->sg); 2081 } 2082 2083 qlt_load_cont_data_segments(prm, vha); 2084 } 2085 2086 static inline int qlt_has_data(struct qla_tgt_cmd *cmd) 2087 { 2088 return cmd->bufflen > 0; 2089 } 2090 2091 /* 2092 * Called without ha->hardware_lock held 2093 */ 2094 static int qlt_pre_xmit_response(struct qla_tgt_cmd *cmd, 2095 struct qla_tgt_prm *prm, int xmit_type, uint8_t scsi_status, 2096 uint32_t *full_req_cnt) 2097 { 2098 struct qla_tgt *tgt = cmd->tgt; 2099 struct scsi_qla_host *vha = tgt->vha; 2100 struct qla_hw_data *ha = vha->hw; 2101 struct se_cmd *se_cmd = &cmd->se_cmd; 2102 2103 prm->cmd = cmd; 2104 prm->tgt = tgt; 2105 prm->rq_result = scsi_status; 2106 prm->sense_buffer = &cmd->sense_buffer[0]; 2107 prm->sense_buffer_len = TRANSPORT_SENSE_BUFFER; 2108 prm->sg = NULL; 2109 prm->seg_cnt = -1; 2110 prm->req_cnt = 1; 2111 prm->add_status_pkt = 0; 2112 2113 /* Send marker if required */ 2114 if (qlt_issue_marker(vha, 0) != QLA_SUCCESS) 2115 return -EFAULT; 2116 2117 if ((xmit_type & QLA_TGT_XMIT_DATA) && qlt_has_data(cmd)) { 2118 if (qlt_pci_map_calc_cnt(prm) != 0) 2119 return -EAGAIN; 2120 } 2121 2122 *full_req_cnt = prm->req_cnt; 2123 2124 if (se_cmd->se_cmd_flags & SCF_UNDERFLOW_BIT) { 2125 prm->residual = se_cmd->residual_count; 2126 ql_dbg(ql_dbg_io + ql_dbg_verbose, vha, 0x305c, 2127 "Residual underflow: %d (tag %lld, op %x, bufflen %d, rq_result %x)\n", 2128 prm->residual, se_cmd->tag, 2129 se_cmd->t_task_cdb ? se_cmd->t_task_cdb[0] : 0, 2130 cmd->bufflen, prm->rq_result); 2131 prm->rq_result |= SS_RESIDUAL_UNDER; 2132 } else if (se_cmd->se_cmd_flags & SCF_OVERFLOW_BIT) { 2133 prm->residual = se_cmd->residual_count; 2134 ql_dbg(ql_dbg_io, vha, 0x305d, 2135 "Residual overflow: %d (tag %lld, op %x, bufflen %d, rq_result %x)\n", 2136 prm->residual, se_cmd->tag, se_cmd->t_task_cdb ? 2137 se_cmd->t_task_cdb[0] : 0, cmd->bufflen, prm->rq_result); 2138 prm->rq_result |= SS_RESIDUAL_OVER; 2139 } 2140 2141 if (xmit_type & QLA_TGT_XMIT_STATUS) { 2142 /* 2143 * If QLA_TGT_XMIT_DATA is not set, add_status_pkt will be 2144 * ignored in *xmit_response() below 2145 */ 2146 if (qlt_has_data(cmd)) { 2147 if (QLA_TGT_SENSE_VALID(prm->sense_buffer) || 2148 (IS_FWI2_CAPABLE(ha) && 2149 (prm->rq_result != 0))) { 2150 prm->add_status_pkt = 1; 2151 (*full_req_cnt)++; 2152 } 2153 } 2154 } 2155 2156 return 0; 2157 } 2158 2159 static inline int qlt_need_explicit_conf(struct qla_hw_data *ha, 2160 struct qla_tgt_cmd *cmd, int sending_sense) 2161 { 2162 if (ha->tgt.enable_class_2) 2163 return 0; 2164 2165 if (sending_sense) 2166 return cmd->conf_compl_supported; 2167 else 2168 return ha->tgt.enable_explicit_conf && 2169 cmd->conf_compl_supported; 2170 } 2171 2172 #ifdef CONFIG_QLA_TGT_DEBUG_SRR 2173 /* 2174 * Original taken from the XFS code 2175 */ 2176 static unsigned long qlt_srr_random(void) 2177 { 2178 static int Inited; 2179 static unsigned long RandomValue; 2180 static DEFINE_SPINLOCK(lock); 2181 /* cycles pseudo-randomly through all values between 1 and 2^31 - 2 */ 2182 register long rv; 2183 register long lo; 2184 register long hi; 2185 unsigned long flags; 2186 2187 spin_lock_irqsave(&lock, flags); 2188 if (!Inited) { 2189 RandomValue = jiffies; 2190 Inited = 1; 2191 } 2192 rv = RandomValue; 2193 hi = rv / 127773; 2194 lo = rv % 127773; 2195 rv = 16807 * lo - 2836 * hi; 2196 if (rv <= 0) 2197 rv += 2147483647; 2198 RandomValue = rv; 2199 spin_unlock_irqrestore(&lock, flags); 2200 return rv; 2201 } 2202 2203 static void qlt_check_srr_debug(struct qla_tgt_cmd *cmd, int *xmit_type) 2204 { 2205 #if 0 /* This is not a real status packets lost, so it won't lead to SRR */ 2206 if ((*xmit_type & QLA_TGT_XMIT_STATUS) && (qlt_srr_random() % 200) 2207 == 50) { 2208 *xmit_type &= ~QLA_TGT_XMIT_STATUS; 2209 ql_dbg(ql_dbg_tgt_mgt, cmd->vha, 0xf015, 2210 "Dropping cmd %p (tag %d) status", cmd, se_cmd->tag); 2211 } 2212 #endif 2213 /* 2214 * It's currently not possible to simulate SRRs for FCP_WRITE without 2215 * a physical link layer failure, so don't even try here.. 2216 */ 2217 if (cmd->dma_data_direction != DMA_FROM_DEVICE) 2218 return; 2219 2220 if (qlt_has_data(cmd) && (cmd->sg_cnt > 1) && 2221 ((qlt_srr_random() % 100) == 20)) { 2222 int i, leave = 0; 2223 unsigned int tot_len = 0; 2224 2225 while (leave == 0) 2226 leave = qlt_srr_random() % cmd->sg_cnt; 2227 2228 for (i = 0; i < leave; i++) 2229 tot_len += cmd->sg[i].length; 2230 2231 ql_dbg(ql_dbg_tgt_mgt, cmd->vha, 0xf016, 2232 "Cutting cmd %p (tag %d) buffer" 2233 " tail to len %d, sg_cnt %d (cmd->bufflen %d," 2234 " cmd->sg_cnt %d)", cmd, se_cmd->tag, tot_len, leave, 2235 cmd->bufflen, cmd->sg_cnt); 2236 2237 cmd->bufflen = tot_len; 2238 cmd->sg_cnt = leave; 2239 } 2240 2241 if (qlt_has_data(cmd) && ((qlt_srr_random() % 100) == 70)) { 2242 unsigned int offset = qlt_srr_random() % cmd->bufflen; 2243 2244 ql_dbg(ql_dbg_tgt_mgt, cmd->vha, 0xf017, 2245 "Cutting cmd %p (tag %d) buffer head " 2246 "to offset %d (cmd->bufflen %d)", cmd, se_cmd->tag, offset, 2247 cmd->bufflen); 2248 if (offset == 0) 2249 *xmit_type &= ~QLA_TGT_XMIT_DATA; 2250 else if (qlt_set_data_offset(cmd, offset)) { 2251 ql_dbg(ql_dbg_tgt_mgt, cmd->vha, 0xf018, 2252 "qlt_set_data_offset() failed (tag %d)", se_cmd->tag); 2253 } 2254 } 2255 } 2256 #else 2257 static inline void qlt_check_srr_debug(struct qla_tgt_cmd *cmd, int *xmit_type) 2258 {} 2259 #endif 2260 2261 static void qlt_24xx_init_ctio_to_isp(struct ctio7_to_24xx *ctio, 2262 struct qla_tgt_prm *prm) 2263 { 2264 prm->sense_buffer_len = min_t(uint32_t, prm->sense_buffer_len, 2265 (uint32_t)sizeof(ctio->u.status1.sense_data)); 2266 ctio->u.status0.flags |= cpu_to_le16(CTIO7_FLAGS_SEND_STATUS); 2267 if (qlt_need_explicit_conf(prm->tgt->ha, prm->cmd, 0)) { 2268 ctio->u.status0.flags |= cpu_to_le16( 2269 CTIO7_FLAGS_EXPLICIT_CONFORM | 2270 CTIO7_FLAGS_CONFORM_REQ); 2271 } 2272 ctio->u.status0.residual = cpu_to_le32(prm->residual); 2273 ctio->u.status0.scsi_status = cpu_to_le16(prm->rq_result); 2274 if (QLA_TGT_SENSE_VALID(prm->sense_buffer)) { 2275 int i; 2276 2277 if (qlt_need_explicit_conf(prm->tgt->ha, prm->cmd, 1)) { 2278 if (prm->cmd->se_cmd.scsi_status != 0) { 2279 ql_dbg(ql_dbg_tgt, prm->cmd->vha, 0xe017, 2280 "Skipping EXPLICIT_CONFORM and " 2281 "CTIO7_FLAGS_CONFORM_REQ for FCP READ w/ " 2282 "non GOOD status\n"); 2283 goto skip_explict_conf; 2284 } 2285 ctio->u.status1.flags |= cpu_to_le16( 2286 CTIO7_FLAGS_EXPLICIT_CONFORM | 2287 CTIO7_FLAGS_CONFORM_REQ); 2288 } 2289 skip_explict_conf: 2290 ctio->u.status1.flags &= 2291 ~cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_0); 2292 ctio->u.status1.flags |= 2293 cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1); 2294 ctio->u.status1.scsi_status |= 2295 cpu_to_le16(SS_SENSE_LEN_VALID); 2296 ctio->u.status1.sense_length = 2297 cpu_to_le16(prm->sense_buffer_len); 2298 for (i = 0; i < prm->sense_buffer_len/4; i++) 2299 ((uint32_t *)ctio->u.status1.sense_data)[i] = 2300 cpu_to_be32(((uint32_t *)prm->sense_buffer)[i]); 2301 #if 0 2302 if (unlikely((prm->sense_buffer_len % 4) != 0)) { 2303 static int q; 2304 if (q < 10) { 2305 ql_dbg(ql_dbg_tgt, vha, 0xe04f, 2306 "qla_target(%d): %d bytes of sense " 2307 "lost", prm->tgt->ha->vp_idx, 2308 prm->sense_buffer_len % 4); 2309 q++; 2310 } 2311 } 2312 #endif 2313 } else { 2314 ctio->u.status1.flags &= 2315 ~cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_0); 2316 ctio->u.status1.flags |= 2317 cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1); 2318 ctio->u.status1.sense_length = 0; 2319 memset(ctio->u.status1.sense_data, 0, 2320 sizeof(ctio->u.status1.sense_data)); 2321 } 2322 2323 /* Sense with len > 24, is it possible ??? */ 2324 } 2325 2326 2327 2328 /* diff */ 2329 static inline int 2330 qlt_hba_err_chk_enabled(struct se_cmd *se_cmd) 2331 { 2332 /* 2333 * Uncomment when corresponding SCSI changes are done. 2334 * 2335 if (!sp->cmd->prot_chk) 2336 return 0; 2337 * 2338 */ 2339 switch (se_cmd->prot_op) { 2340 case TARGET_PROT_DOUT_INSERT: 2341 case TARGET_PROT_DIN_STRIP: 2342 if (ql2xenablehba_err_chk >= 1) 2343 return 1; 2344 break; 2345 case TARGET_PROT_DOUT_PASS: 2346 case TARGET_PROT_DIN_PASS: 2347 if (ql2xenablehba_err_chk >= 2) 2348 return 1; 2349 break; 2350 case TARGET_PROT_DIN_INSERT: 2351 case TARGET_PROT_DOUT_STRIP: 2352 return 1; 2353 default: 2354 break; 2355 } 2356 return 0; 2357 } 2358 2359 /* 2360 * qla24xx_set_t10dif_tags_from_cmd - Extract Ref and App tags from SCSI command 2361 * 2362 */ 2363 static inline void 2364 qlt_set_t10dif_tags(struct se_cmd *se_cmd, struct crc_context *ctx) 2365 { 2366 uint32_t lba = 0xffffffff & se_cmd->t_task_lba; 2367 2368 /* wait til Mode Sense/Select cmd, modepage Ah, subpage 2 2369 * have been immplemented by TCM, before AppTag is avail. 2370 * Look for modesense_handlers[] 2371 */ 2372 ctx->app_tag = 0; 2373 ctx->app_tag_mask[0] = 0x0; 2374 ctx->app_tag_mask[1] = 0x0; 2375 2376 switch (se_cmd->prot_type) { 2377 case TARGET_DIF_TYPE0_PROT: 2378 /* 2379 * No check for ql2xenablehba_err_chk, as it would be an 2380 * I/O error if hba tag generation is not done. 2381 */ 2382 ctx->ref_tag = cpu_to_le32(lba); 2383 2384 if (!qlt_hba_err_chk_enabled(se_cmd)) 2385 break; 2386 2387 /* enable ALL bytes of the ref tag */ 2388 ctx->ref_tag_mask[0] = 0xff; 2389 ctx->ref_tag_mask[1] = 0xff; 2390 ctx->ref_tag_mask[2] = 0xff; 2391 ctx->ref_tag_mask[3] = 0xff; 2392 break; 2393 /* 2394 * For TYpe 1 protection: 16 bit GUARD tag, 32 bit REF tag, and 2395 * 16 bit app tag. 2396 */ 2397 case TARGET_DIF_TYPE1_PROT: 2398 ctx->ref_tag = cpu_to_le32(lba); 2399 2400 if (!qlt_hba_err_chk_enabled(se_cmd)) 2401 break; 2402 2403 /* enable ALL bytes of the ref tag */ 2404 ctx->ref_tag_mask[0] = 0xff; 2405 ctx->ref_tag_mask[1] = 0xff; 2406 ctx->ref_tag_mask[2] = 0xff; 2407 ctx->ref_tag_mask[3] = 0xff; 2408 break; 2409 /* 2410 * For TYPE 2 protection: 16 bit GUARD + 32 bit REF tag has to 2411 * match LBA in CDB + N 2412 */ 2413 case TARGET_DIF_TYPE2_PROT: 2414 ctx->ref_tag = cpu_to_le32(lba); 2415 2416 if (!qlt_hba_err_chk_enabled(se_cmd)) 2417 break; 2418 2419 /* enable ALL bytes of the ref tag */ 2420 ctx->ref_tag_mask[0] = 0xff; 2421 ctx->ref_tag_mask[1] = 0xff; 2422 ctx->ref_tag_mask[2] = 0xff; 2423 ctx->ref_tag_mask[3] = 0xff; 2424 break; 2425 2426 /* For Type 3 protection: 16 bit GUARD only */ 2427 case TARGET_DIF_TYPE3_PROT: 2428 ctx->ref_tag_mask[0] = ctx->ref_tag_mask[1] = 2429 ctx->ref_tag_mask[2] = ctx->ref_tag_mask[3] = 0x00; 2430 break; 2431 } 2432 } 2433 2434 2435 static inline int 2436 qlt_build_ctio_crc2_pkt(struct qla_tgt_prm *prm, scsi_qla_host_t *vha) 2437 { 2438 uint32_t *cur_dsd; 2439 uint32_t transfer_length = 0; 2440 uint32_t data_bytes; 2441 uint32_t dif_bytes; 2442 uint8_t bundling = 1; 2443 uint8_t *clr_ptr; 2444 struct crc_context *crc_ctx_pkt = NULL; 2445 struct qla_hw_data *ha; 2446 struct ctio_crc2_to_fw *pkt; 2447 dma_addr_t crc_ctx_dma; 2448 uint16_t fw_prot_opts = 0; 2449 struct qla_tgt_cmd *cmd = prm->cmd; 2450 struct se_cmd *se_cmd = &cmd->se_cmd; 2451 uint32_t h; 2452 struct atio_from_isp *atio = &prm->cmd->atio; 2453 uint16_t t16; 2454 2455 ha = vha->hw; 2456 2457 pkt = (struct ctio_crc2_to_fw *)vha->req->ring_ptr; 2458 prm->pkt = pkt; 2459 memset(pkt, 0, sizeof(*pkt)); 2460 2461 ql_dbg(ql_dbg_tgt, vha, 0xe071, 2462 "qla_target(%d):%s: se_cmd[%p] CRC2 prot_op[0x%x] cmd prot sg:cnt[%p:%x] lba[%llu]\n", 2463 vha->vp_idx, __func__, se_cmd, se_cmd->prot_op, 2464 prm->prot_sg, prm->prot_seg_cnt, se_cmd->t_task_lba); 2465 2466 if ((se_cmd->prot_op == TARGET_PROT_DIN_INSERT) || 2467 (se_cmd->prot_op == TARGET_PROT_DOUT_STRIP)) 2468 bundling = 0; 2469 2470 /* Compute dif len and adjust data len to incude protection */ 2471 data_bytes = cmd->bufflen; 2472 dif_bytes = (data_bytes / cmd->blk_sz) * 8; 2473 2474 switch (se_cmd->prot_op) { 2475 case TARGET_PROT_DIN_INSERT: 2476 case TARGET_PROT_DOUT_STRIP: 2477 transfer_length = data_bytes; 2478 data_bytes += dif_bytes; 2479 break; 2480 2481 case TARGET_PROT_DIN_STRIP: 2482 case TARGET_PROT_DOUT_INSERT: 2483 case TARGET_PROT_DIN_PASS: 2484 case TARGET_PROT_DOUT_PASS: 2485 transfer_length = data_bytes + dif_bytes; 2486 break; 2487 2488 default: 2489 BUG(); 2490 break; 2491 } 2492 2493 if (!qlt_hba_err_chk_enabled(se_cmd)) 2494 fw_prot_opts |= 0x10; /* Disable Guard tag checking */ 2495 /* HBA error checking enabled */ 2496 else if (IS_PI_UNINIT_CAPABLE(ha)) { 2497 if ((se_cmd->prot_type == TARGET_DIF_TYPE1_PROT) || 2498 (se_cmd->prot_type == TARGET_DIF_TYPE2_PROT)) 2499 fw_prot_opts |= PO_DIS_VALD_APP_ESC; 2500 else if (se_cmd->prot_type == TARGET_DIF_TYPE3_PROT) 2501 fw_prot_opts |= PO_DIS_VALD_APP_REF_ESC; 2502 } 2503 2504 switch (se_cmd->prot_op) { 2505 case TARGET_PROT_DIN_INSERT: 2506 case TARGET_PROT_DOUT_INSERT: 2507 fw_prot_opts |= PO_MODE_DIF_INSERT; 2508 break; 2509 case TARGET_PROT_DIN_STRIP: 2510 case TARGET_PROT_DOUT_STRIP: 2511 fw_prot_opts |= PO_MODE_DIF_REMOVE; 2512 break; 2513 case TARGET_PROT_DIN_PASS: 2514 case TARGET_PROT_DOUT_PASS: 2515 fw_prot_opts |= PO_MODE_DIF_PASS; 2516 /* FUTURE: does tcm require T10CRC<->IPCKSUM conversion? */ 2517 break; 2518 default:/* Normal Request */ 2519 fw_prot_opts |= PO_MODE_DIF_PASS; 2520 break; 2521 } 2522 2523 2524 /* ---- PKT ---- */ 2525 /* Update entry type to indicate Command Type CRC_2 IOCB */ 2526 pkt->entry_type = CTIO_CRC2; 2527 pkt->entry_count = 1; 2528 pkt->vp_index = vha->vp_idx; 2529 2530 h = qlt_make_handle(vha); 2531 if (unlikely(h == QLA_TGT_NULL_HANDLE)) { 2532 /* 2533 * CTIO type 7 from the firmware doesn't provide a way to 2534 * know the initiator's LOOP ID, hence we can't find 2535 * the session and, so, the command. 2536 */ 2537 return -EAGAIN; 2538 } else 2539 ha->tgt.cmds[h-1] = prm->cmd; 2540 2541 2542 pkt->handle = h | CTIO_COMPLETION_HANDLE_MARK; 2543 pkt->nport_handle = prm->cmd->loop_id; 2544 pkt->timeout = cpu_to_le16(QLA_TGT_TIMEOUT); 2545 pkt->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2]; 2546 pkt->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1]; 2547 pkt->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0]; 2548 pkt->exchange_addr = atio->u.isp24.exchange_addr; 2549 2550 /* silence compile warning */ 2551 t16 = be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id); 2552 pkt->ox_id = cpu_to_le16(t16); 2553 2554 t16 = (atio->u.isp24.attr << 9); 2555 pkt->flags |= cpu_to_le16(t16); 2556 pkt->relative_offset = cpu_to_le32(prm->cmd->offset); 2557 2558 /* Set transfer direction */ 2559 if (cmd->dma_data_direction == DMA_TO_DEVICE) 2560 pkt->flags = cpu_to_le16(CTIO7_FLAGS_DATA_IN); 2561 else if (cmd->dma_data_direction == DMA_FROM_DEVICE) 2562 pkt->flags = cpu_to_le16(CTIO7_FLAGS_DATA_OUT); 2563 2564 2565 pkt->dseg_count = prm->tot_dsds; 2566 /* Fibre channel byte count */ 2567 pkt->transfer_length = cpu_to_le32(transfer_length); 2568 2569 2570 /* ----- CRC context -------- */ 2571 2572 /* Allocate CRC context from global pool */ 2573 crc_ctx_pkt = cmd->ctx = 2574 dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC, &crc_ctx_dma); 2575 2576 if (!crc_ctx_pkt) 2577 goto crc_queuing_error; 2578 2579 /* Zero out CTX area. */ 2580 clr_ptr = (uint8_t *)crc_ctx_pkt; 2581 memset(clr_ptr, 0, sizeof(*crc_ctx_pkt)); 2582 2583 crc_ctx_pkt->crc_ctx_dma = crc_ctx_dma; 2584 INIT_LIST_HEAD(&crc_ctx_pkt->dsd_list); 2585 2586 /* Set handle */ 2587 crc_ctx_pkt->handle = pkt->handle; 2588 2589 qlt_set_t10dif_tags(se_cmd, crc_ctx_pkt); 2590 2591 pkt->crc_context_address[0] = cpu_to_le32(LSD(crc_ctx_dma)); 2592 pkt->crc_context_address[1] = cpu_to_le32(MSD(crc_ctx_dma)); 2593 pkt->crc_context_len = CRC_CONTEXT_LEN_FW; 2594 2595 2596 if (!bundling) { 2597 cur_dsd = (uint32_t *) &crc_ctx_pkt->u.nobundling.data_address; 2598 } else { 2599 /* 2600 * Configure Bundling if we need to fetch interlaving 2601 * protection PCI accesses 2602 */ 2603 fw_prot_opts |= PO_ENABLE_DIF_BUNDLING; 2604 crc_ctx_pkt->u.bundling.dif_byte_count = cpu_to_le32(dif_bytes); 2605 crc_ctx_pkt->u.bundling.dseg_count = 2606 cpu_to_le16(prm->tot_dsds - prm->prot_seg_cnt); 2607 cur_dsd = (uint32_t *) &crc_ctx_pkt->u.bundling.data_address; 2608 } 2609 2610 /* Finish the common fields of CRC pkt */ 2611 crc_ctx_pkt->blk_size = cpu_to_le16(cmd->blk_sz); 2612 crc_ctx_pkt->prot_opts = cpu_to_le16(fw_prot_opts); 2613 crc_ctx_pkt->byte_count = cpu_to_le32(data_bytes); 2614 crc_ctx_pkt->guard_seed = cpu_to_le16(0); 2615 2616 2617 /* Walks data segments */ 2618 pkt->flags |= cpu_to_le16(CTIO7_FLAGS_DSD_PTR); 2619 2620 if (!bundling && prm->prot_seg_cnt) { 2621 if (qla24xx_walk_and_build_sglist_no_difb(ha, NULL, cur_dsd, 2622 prm->tot_dsds, cmd)) 2623 goto crc_queuing_error; 2624 } else if (qla24xx_walk_and_build_sglist(ha, NULL, cur_dsd, 2625 (prm->tot_dsds - prm->prot_seg_cnt), cmd)) 2626 goto crc_queuing_error; 2627 2628 if (bundling && prm->prot_seg_cnt) { 2629 /* Walks dif segments */ 2630 pkt->add_flags |= CTIO_CRC2_AF_DIF_DSD_ENA; 2631 2632 cur_dsd = (uint32_t *) &crc_ctx_pkt->u.bundling.dif_address; 2633 if (qla24xx_walk_and_build_prot_sglist(ha, NULL, cur_dsd, 2634 prm->prot_seg_cnt, cmd)) 2635 goto crc_queuing_error; 2636 } 2637 return QLA_SUCCESS; 2638 2639 crc_queuing_error: 2640 /* Cleanup will be performed by the caller */ 2641 2642 return QLA_FUNCTION_FAILED; 2643 } 2644 2645 2646 /* 2647 * Callback to setup response of xmit_type of QLA_TGT_XMIT_DATA and * 2648 * QLA_TGT_XMIT_STATUS for >= 24xx silicon 2649 */ 2650 int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type, 2651 uint8_t scsi_status) 2652 { 2653 struct scsi_qla_host *vha = cmd->vha; 2654 struct qla_hw_data *ha = vha->hw; 2655 struct ctio7_to_24xx *pkt; 2656 struct qla_tgt_prm prm; 2657 uint32_t full_req_cnt = 0; 2658 unsigned long flags = 0; 2659 int res; 2660 2661 spin_lock_irqsave(&ha->hardware_lock, flags); 2662 if (cmd->sess && cmd->sess->deleted == QLA_SESS_DELETION_IN_PROGRESS) { 2663 cmd->state = QLA_TGT_STATE_PROCESSED; 2664 if (cmd->sess->logout_completed) 2665 /* no need to terminate. FW already freed exchange. */ 2666 qlt_abort_cmd_on_host_reset(cmd->vha, cmd); 2667 else 2668 qlt_send_term_exchange(vha, cmd, &cmd->atio, 1); 2669 spin_unlock_irqrestore(&ha->hardware_lock, flags); 2670 return 0; 2671 } 2672 spin_unlock_irqrestore(&ha->hardware_lock, flags); 2673 2674 memset(&prm, 0, sizeof(prm)); 2675 qlt_check_srr_debug(cmd, &xmit_type); 2676 2677 ql_dbg(ql_dbg_tgt, cmd->vha, 0xe018, 2678 "is_send_status=%d, cmd->bufflen=%d, cmd->sg_cnt=%d, cmd->dma_data_direction=%d se_cmd[%p]\n", 2679 (xmit_type & QLA_TGT_XMIT_STATUS) ? 2680 1 : 0, cmd->bufflen, cmd->sg_cnt, cmd->dma_data_direction, 2681 &cmd->se_cmd); 2682 2683 res = qlt_pre_xmit_response(cmd, &prm, xmit_type, scsi_status, 2684 &full_req_cnt); 2685 if (unlikely(res != 0)) { 2686 return res; 2687 } 2688 2689 spin_lock_irqsave(&ha->hardware_lock, flags); 2690 2691 if (xmit_type == QLA_TGT_XMIT_STATUS) 2692 vha->tgt_counters.core_qla_snd_status++; 2693 else 2694 vha->tgt_counters.core_qla_que_buf++; 2695 2696 if (!vha->flags.online || cmd->reset_count != ha->chip_reset) { 2697 /* 2698 * Either the port is not online or this request was from 2699 * previous life, just abort the processing. 2700 */ 2701 cmd->state = QLA_TGT_STATE_PROCESSED; 2702 qlt_abort_cmd_on_host_reset(cmd->vha, cmd); 2703 ql_dbg(ql_dbg_async, vha, 0xe101, 2704 "RESET-RSP online/active/old-count/new-count = %d/%d/%d/%d.\n", 2705 vha->flags.online, qla2x00_reset_active(vha), 2706 cmd->reset_count, ha->chip_reset); 2707 spin_unlock_irqrestore(&ha->hardware_lock, flags); 2708 return 0; 2709 } 2710 2711 /* Does F/W have an IOCBs for this request */ 2712 res = qlt_check_reserve_free_req(vha, full_req_cnt); 2713 if (unlikely(res)) 2714 goto out_unmap_unlock; 2715 2716 if (cmd->se_cmd.prot_op && (xmit_type & QLA_TGT_XMIT_DATA)) 2717 res = qlt_build_ctio_crc2_pkt(&prm, vha); 2718 else 2719 res = qlt_24xx_build_ctio_pkt(&prm, vha); 2720 if (unlikely(res != 0)) { 2721 vha->req->cnt += full_req_cnt; 2722 goto out_unmap_unlock; 2723 } 2724 2725 pkt = (struct ctio7_to_24xx *)prm.pkt; 2726 2727 if (qlt_has_data(cmd) && (xmit_type & QLA_TGT_XMIT_DATA)) { 2728 pkt->u.status0.flags |= 2729 cpu_to_le16(CTIO7_FLAGS_DATA_IN | 2730 CTIO7_FLAGS_STATUS_MODE_0); 2731 2732 if (cmd->se_cmd.prot_op == TARGET_PROT_NORMAL) 2733 qlt_load_data_segments(&prm, vha); 2734 2735 if (prm.add_status_pkt == 0) { 2736 if (xmit_type & QLA_TGT_XMIT_STATUS) { 2737 pkt->u.status0.scsi_status = 2738 cpu_to_le16(prm.rq_result); 2739 pkt->u.status0.residual = 2740 cpu_to_le32(prm.residual); 2741 pkt->u.status0.flags |= cpu_to_le16( 2742 CTIO7_FLAGS_SEND_STATUS); 2743 if (qlt_need_explicit_conf(ha, cmd, 0)) { 2744 pkt->u.status0.flags |= 2745 cpu_to_le16( 2746 CTIO7_FLAGS_EXPLICIT_CONFORM | 2747 CTIO7_FLAGS_CONFORM_REQ); 2748 } 2749 } 2750 2751 } else { 2752 /* 2753 * We have already made sure that there is sufficient 2754 * amount of request entries to not drop HW lock in 2755 * req_pkt(). 2756 */ 2757 struct ctio7_to_24xx *ctio = 2758 (struct ctio7_to_24xx *)qlt_get_req_pkt(vha); 2759 2760 ql_dbg(ql_dbg_io, vha, 0x305e, 2761 "Building additional status packet 0x%p.\n", 2762 ctio); 2763 2764 /* 2765 * T10Dif: ctio_crc2_to_fw overlay ontop of 2766 * ctio7_to_24xx 2767 */ 2768 memcpy(ctio, pkt, sizeof(*ctio)); 2769 /* reset back to CTIO7 */ 2770 ctio->entry_count = 1; 2771 ctio->entry_type = CTIO_TYPE7; 2772 ctio->dseg_count = 0; 2773 ctio->u.status1.flags &= ~cpu_to_le16( 2774 CTIO7_FLAGS_DATA_IN); 2775 2776 /* Real finish is ctio_m1's finish */ 2777 pkt->handle |= CTIO_INTERMEDIATE_HANDLE_MARK; 2778 pkt->u.status0.flags |= cpu_to_le16( 2779 CTIO7_FLAGS_DONT_RET_CTIO); 2780 2781 /* qlt_24xx_init_ctio_to_isp will correct 2782 * all neccessary fields that's part of CTIO7. 2783 * There should be no residual of CTIO-CRC2 data. 2784 */ 2785 qlt_24xx_init_ctio_to_isp((struct ctio7_to_24xx *)ctio, 2786 &prm); 2787 pr_debug("Status CTIO7: %p\n", ctio); 2788 } 2789 } else 2790 qlt_24xx_init_ctio_to_isp(pkt, &prm); 2791 2792 2793 cmd->state = QLA_TGT_STATE_PROCESSED; /* Mid-level is done processing */ 2794 cmd->cmd_sent_to_fw = 1; 2795 2796 /* Memory Barrier */ 2797 wmb(); 2798 qla2x00_start_iocbs(vha, vha->req); 2799 spin_unlock_irqrestore(&ha->hardware_lock, flags); 2800 2801 return 0; 2802 2803 out_unmap_unlock: 2804 qlt_unmap_sg(vha, cmd); 2805 spin_unlock_irqrestore(&ha->hardware_lock, flags); 2806 2807 return res; 2808 } 2809 EXPORT_SYMBOL(qlt_xmit_response); 2810 2811 int qlt_rdy_to_xfer(struct qla_tgt_cmd *cmd) 2812 { 2813 struct ctio7_to_24xx *pkt; 2814 struct scsi_qla_host *vha = cmd->vha; 2815 struct qla_hw_data *ha = vha->hw; 2816 struct qla_tgt *tgt = cmd->tgt; 2817 struct qla_tgt_prm prm; 2818 unsigned long flags; 2819 int res = 0; 2820 2821 memset(&prm, 0, sizeof(prm)); 2822 prm.cmd = cmd; 2823 prm.tgt = tgt; 2824 prm.sg = NULL; 2825 prm.req_cnt = 1; 2826 2827 /* Send marker if required */ 2828 if (qlt_issue_marker(vha, 0) != QLA_SUCCESS) 2829 return -EIO; 2830 2831 /* Calculate number of entries and segments required */ 2832 if (qlt_pci_map_calc_cnt(&prm) != 0) 2833 return -EAGAIN; 2834 2835 spin_lock_irqsave(&ha->hardware_lock, flags); 2836 2837 if (!vha->flags.online || (cmd->reset_count != ha->chip_reset) || 2838 (cmd->sess && cmd->sess->deleted == QLA_SESS_DELETION_IN_PROGRESS)) { 2839 /* 2840 * Either the port is not online or this request was from 2841 * previous life, just abort the processing. 2842 */ 2843 cmd->state = QLA_TGT_STATE_NEED_DATA; 2844 qlt_abort_cmd_on_host_reset(cmd->vha, cmd); 2845 ql_dbg(ql_dbg_async, vha, 0xe102, 2846 "RESET-XFR online/active/old-count/new-count = %d/%d/%d/%d.\n", 2847 vha->flags.online, qla2x00_reset_active(vha), 2848 cmd->reset_count, ha->chip_reset); 2849 spin_unlock_irqrestore(&ha->hardware_lock, flags); 2850 return 0; 2851 } 2852 2853 /* Does F/W have an IOCBs for this request */ 2854 res = qlt_check_reserve_free_req(vha, prm.req_cnt); 2855 if (res != 0) 2856 goto out_unlock_free_unmap; 2857 if (cmd->se_cmd.prot_op) 2858 res = qlt_build_ctio_crc2_pkt(&prm, vha); 2859 else 2860 res = qlt_24xx_build_ctio_pkt(&prm, vha); 2861 2862 if (unlikely(res != 0)) { 2863 vha->req->cnt += prm.req_cnt; 2864 goto out_unlock_free_unmap; 2865 } 2866 2867 pkt = (struct ctio7_to_24xx *)prm.pkt; 2868 pkt->u.status0.flags |= cpu_to_le16(CTIO7_FLAGS_DATA_OUT | 2869 CTIO7_FLAGS_STATUS_MODE_0); 2870 2871 if (cmd->se_cmd.prot_op == TARGET_PROT_NORMAL) 2872 qlt_load_data_segments(&prm, vha); 2873 2874 cmd->state = QLA_TGT_STATE_NEED_DATA; 2875 cmd->cmd_sent_to_fw = 1; 2876 2877 /* Memory Barrier */ 2878 wmb(); 2879 qla2x00_start_iocbs(vha, vha->req); 2880 spin_unlock_irqrestore(&ha->hardware_lock, flags); 2881 2882 return res; 2883 2884 out_unlock_free_unmap: 2885 qlt_unmap_sg(vha, cmd); 2886 spin_unlock_irqrestore(&ha->hardware_lock, flags); 2887 2888 return res; 2889 } 2890 EXPORT_SYMBOL(qlt_rdy_to_xfer); 2891 2892 2893 /* 2894 * Checks the guard or meta-data for the type of error 2895 * detected by the HBA. 2896 */ 2897 static inline int 2898 qlt_handle_dif_error(struct scsi_qla_host *vha, struct qla_tgt_cmd *cmd, 2899 struct ctio_crc_from_fw *sts) 2900 { 2901 uint8_t *ap = &sts->actual_dif[0]; 2902 uint8_t *ep = &sts->expected_dif[0]; 2903 uint32_t e_ref_tag, a_ref_tag; 2904 uint16_t e_app_tag, a_app_tag; 2905 uint16_t e_guard, a_guard; 2906 uint64_t lba = cmd->se_cmd.t_task_lba; 2907 2908 a_guard = be16_to_cpu(*(uint16_t *)(ap + 0)); 2909 a_app_tag = be16_to_cpu(*(uint16_t *)(ap + 2)); 2910 a_ref_tag = be32_to_cpu(*(uint32_t *)(ap + 4)); 2911 2912 e_guard = be16_to_cpu(*(uint16_t *)(ep + 0)); 2913 e_app_tag = be16_to_cpu(*(uint16_t *)(ep + 2)); 2914 e_ref_tag = be32_to_cpu(*(uint32_t *)(ep + 4)); 2915 2916 ql_dbg(ql_dbg_tgt, vha, 0xe075, 2917 "iocb(s) %p Returned STATUS.\n", sts); 2918 2919 ql_dbg(ql_dbg_tgt, vha, 0xf075, 2920 "dif check TGT cdb 0x%x lba 0x%llx: [Actual|Expected] Ref Tag[0x%x|0x%x], App Tag [0x%x|0x%x], Guard [0x%x|0x%x]\n", 2921 cmd->atio.u.isp24.fcp_cmnd.cdb[0], lba, 2922 a_ref_tag, e_ref_tag, a_app_tag, e_app_tag, a_guard, e_guard); 2923 2924 /* 2925 * Ignore sector if: 2926 * For type 3: ref & app tag is all 'f's 2927 * For type 0,1,2: app tag is all 'f's 2928 */ 2929 if ((a_app_tag == 0xffff) && 2930 ((cmd->se_cmd.prot_type != TARGET_DIF_TYPE3_PROT) || 2931 (a_ref_tag == 0xffffffff))) { 2932 uint32_t blocks_done; 2933 2934 /* 2TB boundary case covered automatically with this */ 2935 blocks_done = e_ref_tag - (uint32_t)lba + 1; 2936 cmd->se_cmd.bad_sector = e_ref_tag; 2937 cmd->se_cmd.pi_err = 0; 2938 ql_dbg(ql_dbg_tgt, vha, 0xf074, 2939 "need to return scsi good\n"); 2940 2941 /* Update protection tag */ 2942 if (cmd->prot_sg_cnt) { 2943 uint32_t i, k = 0, num_ent; 2944 struct scatterlist *sg, *sgl; 2945 2946 2947 sgl = cmd->prot_sg; 2948 2949 /* Patch the corresponding protection tags */ 2950 for_each_sg(sgl, sg, cmd->prot_sg_cnt, i) { 2951 num_ent = sg_dma_len(sg) / 8; 2952 if (k + num_ent < blocks_done) { 2953 k += num_ent; 2954 continue; 2955 } 2956 k = blocks_done; 2957 break; 2958 } 2959 2960 if (k != blocks_done) { 2961 ql_log(ql_log_warn, vha, 0xf076, 2962 "unexpected tag values tag:lba=%u:%llu)\n", 2963 e_ref_tag, (unsigned long long)lba); 2964 goto out; 2965 } 2966 2967 #if 0 2968 struct sd_dif_tuple *spt; 2969 /* TODO: 2970 * This section came from initiator. Is it valid here? 2971 * should ulp be override with actual val??? 2972 */ 2973 spt = page_address(sg_page(sg)) + sg->offset; 2974 spt += j; 2975 2976 spt->app_tag = 0xffff; 2977 if (cmd->se_cmd.prot_type == SCSI_PROT_DIF_TYPE3) 2978 spt->ref_tag = 0xffffffff; 2979 #endif 2980 } 2981 2982 return 0; 2983 } 2984 2985 /* check guard */ 2986 if (e_guard != a_guard) { 2987 cmd->se_cmd.pi_err = TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED; 2988 cmd->se_cmd.bad_sector = cmd->se_cmd.t_task_lba; 2989 2990 ql_log(ql_log_warn, vha, 0xe076, 2991 "Guard ERR: cdb 0x%x lba 0x%llx: [Actual|Expected] Ref Tag[0x%x|0x%x], App Tag [0x%x|0x%x], Guard [0x%x|0x%x] cmd=%p\n", 2992 cmd->atio.u.isp24.fcp_cmnd.cdb[0], lba, 2993 a_ref_tag, e_ref_tag, a_app_tag, e_app_tag, 2994 a_guard, e_guard, cmd); 2995 goto out; 2996 } 2997 2998 /* check ref tag */ 2999 if (e_ref_tag != a_ref_tag) { 3000 cmd->se_cmd.pi_err = TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED; 3001 cmd->se_cmd.bad_sector = e_ref_tag; 3002 3003 ql_log(ql_log_warn, vha, 0xe077, 3004 "Ref Tag ERR: cdb 0x%x lba 0x%llx: [Actual|Expected] Ref Tag[0x%x|0x%x], App Tag [0x%x|0x%x], Guard [0x%x|0x%x] cmd=%p\n", 3005 cmd->atio.u.isp24.fcp_cmnd.cdb[0], lba, 3006 a_ref_tag, e_ref_tag, a_app_tag, e_app_tag, 3007 a_guard, e_guard, cmd); 3008 goto out; 3009 } 3010 3011 /* check appl tag */ 3012 if (e_app_tag != a_app_tag) { 3013 cmd->se_cmd.pi_err = TCM_LOGICAL_BLOCK_APP_TAG_CHECK_FAILED; 3014 cmd->se_cmd.bad_sector = cmd->se_cmd.t_task_lba; 3015 3016 ql_log(ql_log_warn, vha, 0xe078, 3017 "App Tag ERR: cdb 0x%x lba 0x%llx: [Actual|Expected] Ref Tag[0x%x|0x%x], App Tag [0x%x|0x%x], Guard [0x%x|0x%x] cmd=%p\n", 3018 cmd->atio.u.isp24.fcp_cmnd.cdb[0], lba, 3019 a_ref_tag, e_ref_tag, a_app_tag, e_app_tag, 3020 a_guard, e_guard, cmd); 3021 goto out; 3022 } 3023 out: 3024 return 1; 3025 } 3026 3027 3028 /* If hardware_lock held on entry, might drop it, then reaquire */ 3029 /* This function sends the appropriate CTIO to ISP 2xxx or 24xx */ 3030 static int __qlt_send_term_imm_notif(struct scsi_qla_host *vha, 3031 struct imm_ntfy_from_isp *ntfy) 3032 { 3033 struct nack_to_isp *nack; 3034 struct qla_hw_data *ha = vha->hw; 3035 request_t *pkt; 3036 int ret = 0; 3037 3038 ql_dbg(ql_dbg_tgt_tmr, vha, 0xe01c, 3039 "Sending TERM ELS CTIO (ha=%p)\n", ha); 3040 3041 pkt = (request_t *)qla2x00_alloc_iocbs_ready(vha, NULL); 3042 if (pkt == NULL) { 3043 ql_dbg(ql_dbg_tgt, vha, 0xe080, 3044 "qla_target(%d): %s failed: unable to allocate " 3045 "request packet\n", vha->vp_idx, __func__); 3046 return -ENOMEM; 3047 } 3048 3049 pkt->entry_type = NOTIFY_ACK_TYPE; 3050 pkt->entry_count = 1; 3051 pkt->handle = QLA_TGT_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK; 3052 3053 nack = (struct nack_to_isp *)pkt; 3054 nack->ox_id = ntfy->ox_id; 3055 3056 nack->u.isp24.nport_handle = ntfy->u.isp24.nport_handle; 3057 if (le16_to_cpu(ntfy->u.isp24.status) == IMM_NTFY_ELS) { 3058 nack->u.isp24.flags = ntfy->u.isp24.flags & 3059 __constant_cpu_to_le32(NOTIFY24XX_FLAGS_PUREX_IOCB); 3060 } 3061 3062 /* terminate */ 3063 nack->u.isp24.flags |= 3064 __constant_cpu_to_le16(NOTIFY_ACK_FLAGS_TERMINATE); 3065 3066 nack->u.isp24.srr_rx_id = ntfy->u.isp24.srr_rx_id; 3067 nack->u.isp24.status = ntfy->u.isp24.status; 3068 nack->u.isp24.status_subcode = ntfy->u.isp24.status_subcode; 3069 nack->u.isp24.fw_handle = ntfy->u.isp24.fw_handle; 3070 nack->u.isp24.exchange_address = ntfy->u.isp24.exchange_address; 3071 nack->u.isp24.srr_rel_offs = ntfy->u.isp24.srr_rel_offs; 3072 nack->u.isp24.srr_ui = ntfy->u.isp24.srr_ui; 3073 nack->u.isp24.vp_index = ntfy->u.isp24.vp_index; 3074 3075 qla2x00_start_iocbs(vha, vha->req); 3076 return ret; 3077 } 3078 3079 static void qlt_send_term_imm_notif(struct scsi_qla_host *vha, 3080 struct imm_ntfy_from_isp *imm, int ha_locked) 3081 { 3082 unsigned long flags = 0; 3083 int rc; 3084 3085 if (qlt_issue_marker(vha, ha_locked) < 0) 3086 return; 3087 3088 if (ha_locked) { 3089 rc = __qlt_send_term_imm_notif(vha, imm); 3090 3091 #if 0 /* Todo */ 3092 if (rc == -ENOMEM) 3093 qlt_alloc_qfull_cmd(vha, imm, 0, 0); 3094 #endif 3095 goto done; 3096 } 3097 3098 spin_lock_irqsave(&vha->hw->hardware_lock, flags); 3099 rc = __qlt_send_term_imm_notif(vha, imm); 3100 3101 #if 0 /* Todo */ 3102 if (rc == -ENOMEM) 3103 qlt_alloc_qfull_cmd(vha, imm, 0, 0); 3104 #endif 3105 3106 done: 3107 if (!ha_locked) 3108 spin_unlock_irqrestore(&vha->hw->hardware_lock, flags); 3109 } 3110 3111 /* If hardware_lock held on entry, might drop it, then reaquire */ 3112 /* This function sends the appropriate CTIO to ISP 2xxx or 24xx */ 3113 static int __qlt_send_term_exchange(struct scsi_qla_host *vha, 3114 struct qla_tgt_cmd *cmd, 3115 struct atio_from_isp *atio) 3116 { 3117 struct ctio7_to_24xx *ctio24; 3118 struct qla_hw_data *ha = vha->hw; 3119 request_t *pkt; 3120 int ret = 0; 3121 uint16_t temp; 3122 3123 ql_dbg(ql_dbg_tgt, vha, 0xe01c, "Sending TERM EXCH CTIO (ha=%p)\n", ha); 3124 3125 pkt = (request_t *)qla2x00_alloc_iocbs_ready(vha, NULL); 3126 if (pkt == NULL) { 3127 ql_dbg(ql_dbg_tgt, vha, 0xe050, 3128 "qla_target(%d): %s failed: unable to allocate " 3129 "request packet\n", vha->vp_idx, __func__); 3130 return -ENOMEM; 3131 } 3132 3133 if (cmd != NULL) { 3134 if (cmd->state < QLA_TGT_STATE_PROCESSED) { 3135 ql_dbg(ql_dbg_tgt, vha, 0xe051, 3136 "qla_target(%d): Terminating cmd %p with " 3137 "incorrect state %d\n", vha->vp_idx, cmd, 3138 cmd->state); 3139 } else 3140 ret = 1; 3141 } 3142 3143 vha->tgt_counters.num_term_xchg_sent++; 3144 pkt->entry_count = 1; 3145 pkt->handle = QLA_TGT_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK; 3146 3147 ctio24 = (struct ctio7_to_24xx *)pkt; 3148 ctio24->entry_type = CTIO_TYPE7; 3149 ctio24->nport_handle = CTIO7_NHANDLE_UNRECOGNIZED; 3150 ctio24->timeout = cpu_to_le16(QLA_TGT_TIMEOUT); 3151 ctio24->vp_index = vha->vp_idx; 3152 ctio24->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2]; 3153 ctio24->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1]; 3154 ctio24->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0]; 3155 ctio24->exchange_addr = atio->u.isp24.exchange_addr; 3156 ctio24->u.status1.flags = (atio->u.isp24.attr << 9) | 3157 cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1 | 3158 CTIO7_FLAGS_TERMINATE); 3159 temp = be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id); 3160 ctio24->u.status1.ox_id = cpu_to_le16(temp); 3161 3162 /* Most likely, it isn't needed */ 3163 ctio24->u.status1.residual = get_unaligned((uint32_t *) 3164 &atio->u.isp24.fcp_cmnd.add_cdb[ 3165 atio->u.isp24.fcp_cmnd.add_cdb_len]); 3166 if (ctio24->u.status1.residual != 0) 3167 ctio24->u.status1.scsi_status |= SS_RESIDUAL_UNDER; 3168 3169 /* Memory Barrier */ 3170 wmb(); 3171 qla2x00_start_iocbs(vha, vha->req); 3172 return ret; 3173 } 3174 3175 static void qlt_send_term_exchange(struct scsi_qla_host *vha, 3176 struct qla_tgt_cmd *cmd, struct atio_from_isp *atio, int ha_locked) 3177 { 3178 unsigned long flags = 0; 3179 int rc; 3180 3181 if (qlt_issue_marker(vha, ha_locked) < 0) 3182 return; 3183 3184 if (ha_locked) { 3185 rc = __qlt_send_term_exchange(vha, cmd, atio); 3186 if (rc == -ENOMEM) 3187 qlt_alloc_qfull_cmd(vha, atio, 0, 0); 3188 goto done; 3189 } 3190 spin_lock_irqsave(&vha->hw->hardware_lock, flags); 3191 rc = __qlt_send_term_exchange(vha, cmd, atio); 3192 if (rc == -ENOMEM) 3193 qlt_alloc_qfull_cmd(vha, atio, 0, 0); 3194 3195 done: 3196 if (cmd && (!cmd->aborted || 3197 !cmd->cmd_sent_to_fw)) { 3198 if (cmd->sg_mapped) 3199 qlt_unmap_sg(vha, cmd); 3200 vha->hw->tgt.tgt_ops->free_cmd(cmd); 3201 } 3202 3203 if (!ha_locked) 3204 spin_unlock_irqrestore(&vha->hw->hardware_lock, flags); 3205 3206 return; 3207 } 3208 3209 static void qlt_init_term_exchange(struct scsi_qla_host *vha) 3210 { 3211 struct list_head free_list; 3212 struct qla_tgt_cmd *cmd, *tcmd; 3213 3214 vha->hw->tgt.leak_exchg_thresh_hold = 3215 (vha->hw->cur_fw_xcb_count/100) * LEAK_EXCHG_THRESH_HOLD_PERCENT; 3216 3217 cmd = tcmd = NULL; 3218 if (!list_empty(&vha->hw->tgt.q_full_list)) { 3219 INIT_LIST_HEAD(&free_list); 3220 list_splice_init(&vha->hw->tgt.q_full_list, &free_list); 3221 3222 list_for_each_entry_safe(cmd, tcmd, &free_list, cmd_list) { 3223 list_del(&cmd->cmd_list); 3224 /* This cmd was never sent to TCM. There is no need 3225 * to schedule free or call free_cmd 3226 */ 3227 qlt_free_cmd(cmd); 3228 vha->hw->tgt.num_qfull_cmds_alloc--; 3229 } 3230 } 3231 vha->hw->tgt.num_qfull_cmds_dropped = 0; 3232 } 3233 3234 static void qlt_chk_exch_leak_thresh_hold(struct scsi_qla_host *vha) 3235 { 3236 uint32_t total_leaked; 3237 3238 total_leaked = vha->hw->tgt.num_qfull_cmds_dropped; 3239 3240 if (vha->hw->tgt.leak_exchg_thresh_hold && 3241 (total_leaked > vha->hw->tgt.leak_exchg_thresh_hold)) { 3242 3243 ql_dbg(ql_dbg_tgt, vha, 0xe079, 3244 "Chip reset due to exchange starvation: %d/%d.\n", 3245 total_leaked, vha->hw->cur_fw_xcb_count); 3246 3247 if (IS_P3P_TYPE(vha->hw)) 3248 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags); 3249 else 3250 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 3251 qla2xxx_wake_dpc(vha); 3252 } 3253 3254 } 3255 3256 void qlt_abort_cmd(struct qla_tgt_cmd *cmd) 3257 { 3258 struct qla_tgt *tgt = cmd->tgt; 3259 struct scsi_qla_host *vha = tgt->vha; 3260 struct se_cmd *se_cmd = &cmd->se_cmd; 3261 3262 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf014, 3263 "qla_target(%d): terminating exchange for aborted cmd=%p " 3264 "(se_cmd=%p, tag=%llu)", vha->vp_idx, cmd, &cmd->se_cmd, 3265 se_cmd->tag); 3266 3267 cmd->aborted = 1; 3268 cmd->cmd_flags |= BIT_6; 3269 3270 qlt_send_term_exchange(vha, cmd, &cmd->atio, 0); 3271 } 3272 EXPORT_SYMBOL(qlt_abort_cmd); 3273 3274 void qlt_free_cmd(struct qla_tgt_cmd *cmd) 3275 { 3276 struct qla_tgt_sess *sess = cmd->sess; 3277 3278 ql_dbg(ql_dbg_tgt, cmd->vha, 0xe074, 3279 "%s: se_cmd[%p] ox_id %04x\n", 3280 __func__, &cmd->se_cmd, 3281 be16_to_cpu(cmd->atio.u.isp24.fcp_hdr.ox_id)); 3282 3283 BUG_ON(cmd->cmd_in_wq); 3284 3285 if (!cmd->q_full) 3286 qlt_decr_num_pend_cmds(cmd->vha); 3287 3288 BUG_ON(cmd->sg_mapped); 3289 cmd->jiffies_at_free = get_jiffies_64(); 3290 if (unlikely(cmd->free_sg)) 3291 kfree(cmd->sg); 3292 3293 if (!sess || !sess->se_sess) { 3294 WARN_ON(1); 3295 return; 3296 } 3297 cmd->jiffies_at_free = get_jiffies_64(); 3298 percpu_ida_free(&sess->se_sess->sess_tag_pool, cmd->se_cmd.map_tag); 3299 } 3300 EXPORT_SYMBOL(qlt_free_cmd); 3301 3302 /* ha->hardware_lock supposed to be held on entry */ 3303 static int qlt_prepare_srr_ctio(struct scsi_qla_host *vha, 3304 struct qla_tgt_cmd *cmd, void *ctio) 3305 { 3306 struct qla_tgt_srr_ctio *sc; 3307 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; 3308 struct qla_tgt_srr_imm *imm; 3309 3310 tgt->ctio_srr_id++; 3311 cmd->cmd_flags |= BIT_15; 3312 3313 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf019, 3314 "qla_target(%d): CTIO with SRR status received\n", vha->vp_idx); 3315 3316 if (!ctio) { 3317 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf055, 3318 "qla_target(%d): SRR CTIO, but ctio is NULL\n", 3319 vha->vp_idx); 3320 return -EINVAL; 3321 } 3322 3323 sc = kzalloc(sizeof(*sc), GFP_ATOMIC); 3324 if (sc != NULL) { 3325 sc->cmd = cmd; 3326 /* IRQ is already OFF */ 3327 spin_lock(&tgt->srr_lock); 3328 sc->srr_id = tgt->ctio_srr_id; 3329 list_add_tail(&sc->srr_list_entry, 3330 &tgt->srr_ctio_list); 3331 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01a, 3332 "CTIO SRR %p added (id %d)\n", sc, sc->srr_id); 3333 if (tgt->imm_srr_id == tgt->ctio_srr_id) { 3334 int found = 0; 3335 list_for_each_entry(imm, &tgt->srr_imm_list, 3336 srr_list_entry) { 3337 if (imm->srr_id == sc->srr_id) { 3338 found = 1; 3339 break; 3340 } 3341 } 3342 if (found) { 3343 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01b, 3344 "Scheduling srr work\n"); 3345 schedule_work(&tgt->srr_work); 3346 } else { 3347 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf056, 3348 "qla_target(%d): imm_srr_id " 3349 "== ctio_srr_id (%d), but there is no " 3350 "corresponding SRR IMM, deleting CTIO " 3351 "SRR %p\n", vha->vp_idx, 3352 tgt->ctio_srr_id, sc); 3353 list_del(&sc->srr_list_entry); 3354 spin_unlock(&tgt->srr_lock); 3355 3356 kfree(sc); 3357 return -EINVAL; 3358 } 3359 } 3360 spin_unlock(&tgt->srr_lock); 3361 } else { 3362 struct qla_tgt_srr_imm *ti; 3363 3364 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf057, 3365 "qla_target(%d): Unable to allocate SRR CTIO entry\n", 3366 vha->vp_idx); 3367 spin_lock(&tgt->srr_lock); 3368 list_for_each_entry_safe(imm, ti, &tgt->srr_imm_list, 3369 srr_list_entry) { 3370 if (imm->srr_id == tgt->ctio_srr_id) { 3371 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01c, 3372 "IMM SRR %p deleted (id %d)\n", 3373 imm, imm->srr_id); 3374 list_del(&imm->srr_list_entry); 3375 qlt_reject_free_srr_imm(vha, imm, 1); 3376 } 3377 } 3378 spin_unlock(&tgt->srr_lock); 3379 3380 return -ENOMEM; 3381 } 3382 3383 return 0; 3384 } 3385 3386 /* 3387 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire 3388 */ 3389 static int qlt_term_ctio_exchange(struct scsi_qla_host *vha, void *ctio, 3390 struct qla_tgt_cmd *cmd, uint32_t status) 3391 { 3392 int term = 0; 3393 3394 if (ctio != NULL) { 3395 struct ctio7_from_24xx *c = (struct ctio7_from_24xx *)ctio; 3396 term = !(c->flags & 3397 cpu_to_le16(OF_TERM_EXCH)); 3398 } else 3399 term = 1; 3400 3401 if (term) 3402 qlt_send_term_exchange(vha, cmd, &cmd->atio, 1); 3403 3404 return term; 3405 } 3406 3407 /* ha->hardware_lock supposed to be held on entry */ 3408 static inline struct qla_tgt_cmd *qlt_get_cmd(struct scsi_qla_host *vha, 3409 uint32_t handle) 3410 { 3411 struct qla_hw_data *ha = vha->hw; 3412 3413 handle--; 3414 if (ha->tgt.cmds[handle] != NULL) { 3415 struct qla_tgt_cmd *cmd = ha->tgt.cmds[handle]; 3416 ha->tgt.cmds[handle] = NULL; 3417 return cmd; 3418 } else 3419 return NULL; 3420 } 3421 3422 /* ha->hardware_lock supposed to be held on entry */ 3423 static struct qla_tgt_cmd *qlt_ctio_to_cmd(struct scsi_qla_host *vha, 3424 uint32_t handle, void *ctio) 3425 { 3426 struct qla_tgt_cmd *cmd = NULL; 3427 3428 /* Clear out internal marks */ 3429 handle &= ~(CTIO_COMPLETION_HANDLE_MARK | 3430 CTIO_INTERMEDIATE_HANDLE_MARK); 3431 3432 if (handle != QLA_TGT_NULL_HANDLE) { 3433 if (unlikely(handle == QLA_TGT_SKIP_HANDLE)) 3434 return NULL; 3435 3436 /* handle-1 is actually used */ 3437 if (unlikely(handle > DEFAULT_OUTSTANDING_COMMANDS)) { 3438 ql_dbg(ql_dbg_tgt, vha, 0xe052, 3439 "qla_target(%d): Wrong handle %x received\n", 3440 vha->vp_idx, handle); 3441 return NULL; 3442 } 3443 cmd = qlt_get_cmd(vha, handle); 3444 if (unlikely(cmd == NULL)) { 3445 ql_dbg(ql_dbg_tgt, vha, 0xe053, 3446 "qla_target(%d): Suspicious: unable to " 3447 "find the command with handle %x\n", vha->vp_idx, 3448 handle); 3449 return NULL; 3450 } 3451 } else if (ctio != NULL) { 3452 /* We can't get loop ID from CTIO7 */ 3453 ql_dbg(ql_dbg_tgt, vha, 0xe054, 3454 "qla_target(%d): Wrong CTIO received: QLA24xx doesn't " 3455 "support NULL handles\n", vha->vp_idx); 3456 return NULL; 3457 } 3458 3459 return cmd; 3460 } 3461 3462 /* hardware_lock should be held by caller. */ 3463 static void 3464 qlt_abort_cmd_on_host_reset(struct scsi_qla_host *vha, struct qla_tgt_cmd *cmd) 3465 { 3466 struct qla_hw_data *ha = vha->hw; 3467 uint32_t handle; 3468 3469 if (cmd->sg_mapped) 3470 qlt_unmap_sg(vha, cmd); 3471 3472 handle = qlt_make_handle(vha); 3473 3474 /* TODO: fix debug message type and ids. */ 3475 if (cmd->state == QLA_TGT_STATE_PROCESSED) { 3476 ql_dbg(ql_dbg_io, vha, 0xff00, 3477 "HOST-ABORT: handle=%d, state=PROCESSED.\n", handle); 3478 } else if (cmd->state == QLA_TGT_STATE_NEED_DATA) { 3479 cmd->write_data_transferred = 0; 3480 cmd->state = QLA_TGT_STATE_DATA_IN; 3481 3482 ql_dbg(ql_dbg_io, vha, 0xff01, 3483 "HOST-ABORT: handle=%d, state=DATA_IN.\n", handle); 3484 3485 ha->tgt.tgt_ops->handle_data(cmd); 3486 return; 3487 } else { 3488 ql_dbg(ql_dbg_io, vha, 0xff03, 3489 "HOST-ABORT: handle=%d, state=BAD(%d).\n", handle, 3490 cmd->state); 3491 dump_stack(); 3492 } 3493 3494 cmd->cmd_flags |= BIT_17; 3495 ha->tgt.tgt_ops->free_cmd(cmd); 3496 } 3497 3498 void 3499 qlt_host_reset_handler(struct qla_hw_data *ha) 3500 { 3501 struct qla_tgt_cmd *cmd; 3502 unsigned long flags; 3503 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev); 3504 scsi_qla_host_t *vha = NULL; 3505 struct qla_tgt *tgt = base_vha->vha_tgt.qla_tgt; 3506 uint32_t i; 3507 3508 if (!base_vha->hw->tgt.tgt_ops) 3509 return; 3510 3511 if (!tgt || qla_ini_mode_enabled(base_vha)) { 3512 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf003, 3513 "Target mode disabled\n"); 3514 return; 3515 } 3516 3517 ql_dbg(ql_dbg_tgt_mgt, vha, 0xff10, 3518 "HOST-ABORT-HNDLR: base_vha->dpc_flags=%lx.\n", 3519 base_vha->dpc_flags); 3520 3521 spin_lock_irqsave(&ha->hardware_lock, flags); 3522 for (i = 1; i < DEFAULT_OUTSTANDING_COMMANDS + 1; i++) { 3523 cmd = qlt_get_cmd(base_vha, i); 3524 if (!cmd) 3525 continue; 3526 /* ha->tgt.cmds entry is cleared by qlt_get_cmd. */ 3527 vha = cmd->vha; 3528 qlt_abort_cmd_on_host_reset(vha, cmd); 3529 } 3530 spin_unlock_irqrestore(&ha->hardware_lock, flags); 3531 } 3532 3533 3534 /* 3535 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire 3536 */ 3537 static void qlt_do_ctio_completion(struct scsi_qla_host *vha, uint32_t handle, 3538 uint32_t status, void *ctio) 3539 { 3540 struct qla_hw_data *ha = vha->hw; 3541 struct se_cmd *se_cmd; 3542 struct qla_tgt_cmd *cmd; 3543 3544 if (handle & CTIO_INTERMEDIATE_HANDLE_MARK) { 3545 /* That could happen only in case of an error/reset/abort */ 3546 if (status != CTIO_SUCCESS) { 3547 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01d, 3548 "Intermediate CTIO received" 3549 " (status %x)\n", status); 3550 } 3551 return; 3552 } 3553 3554 cmd = qlt_ctio_to_cmd(vha, handle, ctio); 3555 if (cmd == NULL) 3556 return; 3557 3558 se_cmd = &cmd->se_cmd; 3559 cmd->cmd_sent_to_fw = 0; 3560 3561 qlt_unmap_sg(vha, cmd); 3562 3563 if (unlikely(status != CTIO_SUCCESS)) { 3564 switch (status & 0xFFFF) { 3565 case CTIO_LIP_RESET: 3566 case CTIO_TARGET_RESET: 3567 case CTIO_ABORTED: 3568 /* driver request abort via Terminate exchange */ 3569 case CTIO_TIMEOUT: 3570 case CTIO_INVALID_RX_ID: 3571 /* They are OK */ 3572 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf058, 3573 "qla_target(%d): CTIO with " 3574 "status %#x received, state %x, se_cmd %p, " 3575 "(LIP_RESET=e, ABORTED=2, TARGET_RESET=17, " 3576 "TIMEOUT=b, INVALID_RX_ID=8)\n", vha->vp_idx, 3577 status, cmd->state, se_cmd); 3578 break; 3579 3580 case CTIO_PORT_LOGGED_OUT: 3581 case CTIO_PORT_UNAVAILABLE: 3582 { 3583 int logged_out = (status & 0xFFFF); 3584 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf059, 3585 "qla_target(%d): CTIO with %s status %x " 3586 "received (state %x, se_cmd %p)\n", vha->vp_idx, 3587 (logged_out == CTIO_PORT_LOGGED_OUT) ? 3588 "PORT LOGGED OUT" : "PORT UNAVAILABLE", 3589 status, cmd->state, se_cmd); 3590 3591 if (logged_out && cmd->sess) { 3592 /* 3593 * Session is already logged out, but we need 3594 * to notify initiator, who's not aware of this 3595 */ 3596 cmd->sess->logout_on_delete = 0; 3597 cmd->sess->send_els_logo = 1; 3598 qlt_schedule_sess_for_deletion(cmd->sess, true); 3599 } 3600 break; 3601 } 3602 case CTIO_SRR_RECEIVED: 3603 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05a, 3604 "qla_target(%d): CTIO with SRR_RECEIVED" 3605 " status %x received (state %x, se_cmd %p)\n", 3606 vha->vp_idx, status, cmd->state, se_cmd); 3607 if (qlt_prepare_srr_ctio(vha, cmd, ctio) != 0) 3608 break; 3609 else 3610 return; 3611 3612 case CTIO_DIF_ERROR: { 3613 struct ctio_crc_from_fw *crc = 3614 (struct ctio_crc_from_fw *)ctio; 3615 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf073, 3616 "qla_target(%d): CTIO with DIF_ERROR status %x received (state %x, se_cmd %p) actual_dif[0x%llx] expect_dif[0x%llx]\n", 3617 vha->vp_idx, status, cmd->state, se_cmd, 3618 *((u64 *)&crc->actual_dif[0]), 3619 *((u64 *)&crc->expected_dif[0])); 3620 3621 if (qlt_handle_dif_error(vha, cmd, ctio)) { 3622 if (cmd->state == QLA_TGT_STATE_NEED_DATA) { 3623 /* scsi Write/xfer rdy complete */ 3624 goto skip_term; 3625 } else { 3626 /* scsi read/xmit respond complete 3627 * call handle dif to send scsi status 3628 * rather than terminate exchange. 3629 */ 3630 cmd->state = QLA_TGT_STATE_PROCESSED; 3631 ha->tgt.tgt_ops->handle_dif_err(cmd); 3632 return; 3633 } 3634 } else { 3635 /* Need to generate a SCSI good completion. 3636 * because FW did not send scsi status. 3637 */ 3638 status = 0; 3639 goto skip_term; 3640 } 3641 break; 3642 } 3643 default: 3644 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05b, 3645 "qla_target(%d): CTIO with error status 0x%x received (state %x, se_cmd %p\n", 3646 vha->vp_idx, status, cmd->state, se_cmd); 3647 break; 3648 } 3649 3650 3651 /* "cmd->aborted" means 3652 * cmd is already aborted/terminated, we don't 3653 * need to terminate again. The exchange is already 3654 * cleaned up/freed at FW level. Just cleanup at driver 3655 * level. 3656 */ 3657 if ((cmd->state != QLA_TGT_STATE_NEED_DATA) && 3658 (!cmd->aborted)) { 3659 cmd->cmd_flags |= BIT_13; 3660 if (qlt_term_ctio_exchange(vha, ctio, cmd, status)) 3661 return; 3662 } 3663 } 3664 skip_term: 3665 3666 if (cmd->state == QLA_TGT_STATE_PROCESSED) { 3667 cmd->cmd_flags |= BIT_12; 3668 } else if (cmd->state == QLA_TGT_STATE_NEED_DATA) { 3669 cmd->state = QLA_TGT_STATE_DATA_IN; 3670 3671 if (status == CTIO_SUCCESS) 3672 cmd->write_data_transferred = 1; 3673 3674 ha->tgt.tgt_ops->handle_data(cmd); 3675 return; 3676 } else if (cmd->aborted) { 3677 cmd->cmd_flags |= BIT_18; 3678 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01e, 3679 "Aborted command %p (tag %lld) finished\n", cmd, se_cmd->tag); 3680 } else { 3681 cmd->cmd_flags |= BIT_19; 3682 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05c, 3683 "qla_target(%d): A command in state (%d) should " 3684 "not return a CTIO complete\n", vha->vp_idx, cmd->state); 3685 } 3686 3687 if (unlikely(status != CTIO_SUCCESS) && 3688 !cmd->aborted) { 3689 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01f, "Finishing failed CTIO\n"); 3690 dump_stack(); 3691 } 3692 3693 ha->tgt.tgt_ops->free_cmd(cmd); 3694 } 3695 3696 static inline int qlt_get_fcp_task_attr(struct scsi_qla_host *vha, 3697 uint8_t task_codes) 3698 { 3699 int fcp_task_attr; 3700 3701 switch (task_codes) { 3702 case ATIO_SIMPLE_QUEUE: 3703 fcp_task_attr = TCM_SIMPLE_TAG; 3704 break; 3705 case ATIO_HEAD_OF_QUEUE: 3706 fcp_task_attr = TCM_HEAD_TAG; 3707 break; 3708 case ATIO_ORDERED_QUEUE: 3709 fcp_task_attr = TCM_ORDERED_TAG; 3710 break; 3711 case ATIO_ACA_QUEUE: 3712 fcp_task_attr = TCM_ACA_TAG; 3713 break; 3714 case ATIO_UNTAGGED: 3715 fcp_task_attr = TCM_SIMPLE_TAG; 3716 break; 3717 default: 3718 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05d, 3719 "qla_target: unknown task code %x, use ORDERED instead\n", 3720 task_codes); 3721 fcp_task_attr = TCM_ORDERED_TAG; 3722 break; 3723 } 3724 3725 return fcp_task_attr; 3726 } 3727 3728 static struct qla_tgt_sess *qlt_make_local_sess(struct scsi_qla_host *, 3729 uint8_t *); 3730 /* 3731 * Process context for I/O path into tcm_qla2xxx code 3732 */ 3733 static void __qlt_do_work(struct qla_tgt_cmd *cmd) 3734 { 3735 scsi_qla_host_t *vha = cmd->vha; 3736 struct qla_hw_data *ha = vha->hw; 3737 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; 3738 struct qla_tgt_sess *sess = cmd->sess; 3739 struct atio_from_isp *atio = &cmd->atio; 3740 unsigned char *cdb; 3741 unsigned long flags; 3742 uint32_t data_length; 3743 int ret, fcp_task_attr, data_dir, bidi = 0; 3744 3745 cmd->cmd_in_wq = 0; 3746 cmd->cmd_flags |= BIT_1; 3747 if (tgt->tgt_stop) 3748 goto out_term; 3749 3750 if (cmd->aborted) { 3751 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf082, 3752 "cmd with tag %u is aborted\n", 3753 cmd->atio.u.isp24.exchange_addr); 3754 goto out_term; 3755 } 3756 3757 cdb = &atio->u.isp24.fcp_cmnd.cdb[0]; 3758 cmd->se_cmd.tag = atio->u.isp24.exchange_addr; 3759 cmd->unpacked_lun = scsilun_to_int( 3760 (struct scsi_lun *)&atio->u.isp24.fcp_cmnd.lun); 3761 3762 if (atio->u.isp24.fcp_cmnd.rddata && 3763 atio->u.isp24.fcp_cmnd.wrdata) { 3764 bidi = 1; 3765 data_dir = DMA_TO_DEVICE; 3766 } else if (atio->u.isp24.fcp_cmnd.rddata) 3767 data_dir = DMA_FROM_DEVICE; 3768 else if (atio->u.isp24.fcp_cmnd.wrdata) 3769 data_dir = DMA_TO_DEVICE; 3770 else 3771 data_dir = DMA_NONE; 3772 3773 fcp_task_attr = qlt_get_fcp_task_attr(vha, 3774 atio->u.isp24.fcp_cmnd.task_attr); 3775 data_length = be32_to_cpu(get_unaligned((uint32_t *) 3776 &atio->u.isp24.fcp_cmnd.add_cdb[ 3777 atio->u.isp24.fcp_cmnd.add_cdb_len])); 3778 3779 ret = ha->tgt.tgt_ops->handle_cmd(vha, cmd, cdb, data_length, 3780 fcp_task_attr, data_dir, bidi); 3781 if (ret != 0) 3782 goto out_term; 3783 /* 3784 * Drop extra session reference from qla_tgt_handle_cmd_for_atio*( 3785 */ 3786 spin_lock_irqsave(&ha->tgt.sess_lock, flags); 3787 ha->tgt.tgt_ops->put_sess(sess); 3788 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); 3789 return; 3790 3791 out_term: 3792 ql_dbg(ql_dbg_io, vha, 0x3060, "Terminating work cmd %p", cmd); 3793 /* 3794 * cmd has not sent to target yet, so pass NULL as the second 3795 * argument to qlt_send_term_exchange() and free the memory here. 3796 */ 3797 cmd->cmd_flags |= BIT_2; 3798 spin_lock_irqsave(&ha->hardware_lock, flags); 3799 qlt_send_term_exchange(vha, NULL, &cmd->atio, 1); 3800 3801 qlt_decr_num_pend_cmds(vha); 3802 percpu_ida_free(&sess->se_sess->sess_tag_pool, cmd->se_cmd.map_tag); 3803 spin_unlock_irqrestore(&ha->hardware_lock, flags); 3804 3805 spin_lock_irqsave(&ha->tgt.sess_lock, flags); 3806 ha->tgt.tgt_ops->put_sess(sess); 3807 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); 3808 } 3809 3810 static void qlt_do_work(struct work_struct *work) 3811 { 3812 struct qla_tgt_cmd *cmd = container_of(work, struct qla_tgt_cmd, work); 3813 scsi_qla_host_t *vha = cmd->vha; 3814 unsigned long flags; 3815 3816 spin_lock_irqsave(&vha->cmd_list_lock, flags); 3817 list_del(&cmd->cmd_list); 3818 spin_unlock_irqrestore(&vha->cmd_list_lock, flags); 3819 3820 __qlt_do_work(cmd); 3821 } 3822 3823 static struct qla_tgt_cmd *qlt_get_tag(scsi_qla_host_t *vha, 3824 struct qla_tgt_sess *sess, 3825 struct atio_from_isp *atio) 3826 { 3827 struct se_session *se_sess = sess->se_sess; 3828 struct qla_tgt_cmd *cmd; 3829 int tag; 3830 3831 tag = percpu_ida_alloc(&se_sess->sess_tag_pool, TASK_RUNNING); 3832 if (tag < 0) 3833 return NULL; 3834 3835 cmd = &((struct qla_tgt_cmd *)se_sess->sess_cmd_map)[tag]; 3836 memset(cmd, 0, sizeof(struct qla_tgt_cmd)); 3837 3838 memcpy(&cmd->atio, atio, sizeof(*atio)); 3839 cmd->state = QLA_TGT_STATE_NEW; 3840 cmd->tgt = vha->vha_tgt.qla_tgt; 3841 qlt_incr_num_pend_cmds(vha); 3842 cmd->vha = vha; 3843 cmd->se_cmd.map_tag = tag; 3844 cmd->sess = sess; 3845 cmd->loop_id = sess->loop_id; 3846 cmd->conf_compl_supported = sess->conf_compl_supported; 3847 3848 cmd->cmd_flags = 0; 3849 cmd->jiffies_at_alloc = get_jiffies_64(); 3850 3851 cmd->reset_count = vha->hw->chip_reset; 3852 3853 return cmd; 3854 } 3855 3856 static void qlt_send_busy(struct scsi_qla_host *, struct atio_from_isp *, 3857 uint16_t); 3858 3859 static void qlt_create_sess_from_atio(struct work_struct *work) 3860 { 3861 struct qla_tgt_sess_op *op = container_of(work, 3862 struct qla_tgt_sess_op, work); 3863 scsi_qla_host_t *vha = op->vha; 3864 struct qla_hw_data *ha = vha->hw; 3865 struct qla_tgt_sess *sess; 3866 struct qla_tgt_cmd *cmd; 3867 unsigned long flags; 3868 uint8_t *s_id = op->atio.u.isp24.fcp_hdr.s_id; 3869 3870 spin_lock_irqsave(&vha->cmd_list_lock, flags); 3871 list_del(&op->cmd_list); 3872 spin_unlock_irqrestore(&vha->cmd_list_lock, flags); 3873 3874 if (op->aborted) { 3875 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf083, 3876 "sess_op with tag %u is aborted\n", 3877 op->atio.u.isp24.exchange_addr); 3878 goto out_term; 3879 } 3880 3881 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf022, 3882 "qla_target(%d): Unable to find wwn login" 3883 " (s_id %x:%x:%x), trying to create it manually\n", 3884 vha->vp_idx, s_id[0], s_id[1], s_id[2]); 3885 3886 if (op->atio.u.raw.entry_count > 1) { 3887 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf023, 3888 "Dropping multy entry atio %p\n", &op->atio); 3889 goto out_term; 3890 } 3891 3892 sess = qlt_make_local_sess(vha, s_id); 3893 /* sess has an extra creation ref. */ 3894 3895 if (!sess) 3896 goto out_term; 3897 /* 3898 * Now obtain a pre-allocated session tag using the original op->atio 3899 * packet header, and dispatch into __qlt_do_work() using the existing 3900 * process context. 3901 */ 3902 cmd = qlt_get_tag(vha, sess, &op->atio); 3903 if (!cmd) { 3904 spin_lock_irqsave(&ha->hardware_lock, flags); 3905 qlt_send_busy(vha, &op->atio, SAM_STAT_BUSY); 3906 ha->tgt.tgt_ops->put_sess(sess); 3907 spin_unlock_irqrestore(&ha->hardware_lock, flags); 3908 kfree(op); 3909 return; 3910 } 3911 /* 3912 * __qlt_do_work() will call ha->tgt.tgt_ops->put_sess() to release 3913 * the extra reference taken above by qlt_make_local_sess() 3914 */ 3915 __qlt_do_work(cmd); 3916 kfree(op); 3917 return; 3918 3919 out_term: 3920 spin_lock_irqsave(&ha->hardware_lock, flags); 3921 qlt_send_term_exchange(vha, NULL, &op->atio, 1); 3922 spin_unlock_irqrestore(&ha->hardware_lock, flags); 3923 kfree(op); 3924 3925 } 3926 3927 /* ha->hardware_lock supposed to be held on entry */ 3928 static int qlt_handle_cmd_for_atio(struct scsi_qla_host *vha, 3929 struct atio_from_isp *atio) 3930 { 3931 struct qla_hw_data *ha = vha->hw; 3932 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; 3933 struct qla_tgt_sess *sess; 3934 struct qla_tgt_cmd *cmd; 3935 3936 if (unlikely(tgt->tgt_stop)) { 3937 ql_dbg(ql_dbg_io, vha, 0x3061, 3938 "New command while device %p is shutting down\n", tgt); 3939 return -EFAULT; 3940 } 3941 3942 sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, atio->u.isp24.fcp_hdr.s_id); 3943 if (unlikely(!sess)) { 3944 struct qla_tgt_sess_op *op = kzalloc(sizeof(struct qla_tgt_sess_op), 3945 GFP_ATOMIC); 3946 if (!op) 3947 return -ENOMEM; 3948 3949 memcpy(&op->atio, atio, sizeof(*atio)); 3950 op->vha = vha; 3951 3952 spin_lock(&vha->cmd_list_lock); 3953 list_add_tail(&op->cmd_list, &vha->qla_sess_op_cmd_list); 3954 spin_unlock(&vha->cmd_list_lock); 3955 3956 INIT_WORK(&op->work, qlt_create_sess_from_atio); 3957 queue_work(qla_tgt_wq, &op->work); 3958 return 0; 3959 } 3960 3961 /* Another WWN used to have our s_id. Our PLOGI scheduled its 3962 * session deletion, but it's still in sess_del_work wq */ 3963 if (sess->deleted == QLA_SESS_DELETION_IN_PROGRESS) { 3964 ql_dbg(ql_dbg_io, vha, 0x3061, 3965 "New command while old session %p is being deleted\n", 3966 sess); 3967 return -EFAULT; 3968 } 3969 3970 /* 3971 * Do kref_get() before returning + dropping qla_hw_data->hardware_lock. 3972 */ 3973 kref_get(&sess->se_sess->sess_kref); 3974 3975 cmd = qlt_get_tag(vha, sess, atio); 3976 if (!cmd) { 3977 ql_dbg(ql_dbg_io, vha, 0x3062, 3978 "qla_target(%d): Allocation of cmd failed\n", vha->vp_idx); 3979 ha->tgt.tgt_ops->put_sess(sess); 3980 return -ENOMEM; 3981 } 3982 3983 cmd->cmd_in_wq = 1; 3984 cmd->cmd_flags |= BIT_0; 3985 cmd->se_cmd.cpuid = -1; 3986 3987 spin_lock(&vha->cmd_list_lock); 3988 list_add_tail(&cmd->cmd_list, &vha->qla_cmd_list); 3989 spin_unlock(&vha->cmd_list_lock); 3990 3991 INIT_WORK(&cmd->work, qlt_do_work); 3992 if (ha->msix_count) { 3993 cmd->se_cmd.cpuid = ha->tgt.rspq_vector_cpuid; 3994 if (cmd->atio.u.isp24.fcp_cmnd.rddata) 3995 queue_work_on(smp_processor_id(), qla_tgt_wq, 3996 &cmd->work); 3997 else 3998 queue_work_on(cmd->se_cmd.cpuid, qla_tgt_wq, 3999 &cmd->work); 4000 } else { 4001 queue_work(qla_tgt_wq, &cmd->work); 4002 } 4003 return 0; 4004 4005 } 4006 4007 /* ha->hardware_lock supposed to be held on entry */ 4008 static int qlt_issue_task_mgmt(struct qla_tgt_sess *sess, uint32_t lun, 4009 int fn, void *iocb, int flags) 4010 { 4011 struct scsi_qla_host *vha = sess->vha; 4012 struct qla_hw_data *ha = vha->hw; 4013 struct qla_tgt_mgmt_cmd *mcmd; 4014 struct atio_from_isp *a = (struct atio_from_isp *)iocb; 4015 int res; 4016 uint8_t tmr_func; 4017 4018 mcmd = mempool_alloc(qla_tgt_mgmt_cmd_mempool, GFP_ATOMIC); 4019 if (!mcmd) { 4020 ql_dbg(ql_dbg_tgt_tmr, vha, 0x10009, 4021 "qla_target(%d): Allocation of management " 4022 "command failed, some commands and their data could " 4023 "leak\n", vha->vp_idx); 4024 return -ENOMEM; 4025 } 4026 memset(mcmd, 0, sizeof(*mcmd)); 4027 mcmd->sess = sess; 4028 4029 if (iocb) { 4030 memcpy(&mcmd->orig_iocb.imm_ntfy, iocb, 4031 sizeof(mcmd->orig_iocb.imm_ntfy)); 4032 } 4033 mcmd->tmr_func = fn; 4034 mcmd->flags = flags; 4035 mcmd->reset_count = vha->hw->chip_reset; 4036 4037 switch (fn) { 4038 case QLA_TGT_CLEAR_ACA: 4039 ql_dbg(ql_dbg_tgt_tmr, vha, 0x10000, 4040 "qla_target(%d): CLEAR_ACA received\n", sess->vha->vp_idx); 4041 tmr_func = TMR_CLEAR_ACA; 4042 break; 4043 4044 case QLA_TGT_TARGET_RESET: 4045 ql_dbg(ql_dbg_tgt_tmr, vha, 0x10001, 4046 "qla_target(%d): TARGET_RESET received\n", 4047 sess->vha->vp_idx); 4048 tmr_func = TMR_TARGET_WARM_RESET; 4049 break; 4050 4051 case QLA_TGT_LUN_RESET: 4052 ql_dbg(ql_dbg_tgt_tmr, vha, 0x10002, 4053 "qla_target(%d): LUN_RESET received\n", sess->vha->vp_idx); 4054 tmr_func = TMR_LUN_RESET; 4055 abort_cmds_for_lun(vha, lun, a->u.isp24.fcp_hdr.s_id); 4056 break; 4057 4058 case QLA_TGT_CLEAR_TS: 4059 ql_dbg(ql_dbg_tgt_tmr, vha, 0x10003, 4060 "qla_target(%d): CLEAR_TS received\n", sess->vha->vp_idx); 4061 tmr_func = TMR_CLEAR_TASK_SET; 4062 break; 4063 4064 case QLA_TGT_ABORT_TS: 4065 ql_dbg(ql_dbg_tgt_tmr, vha, 0x10004, 4066 "qla_target(%d): ABORT_TS received\n", sess->vha->vp_idx); 4067 tmr_func = TMR_ABORT_TASK_SET; 4068 break; 4069 #if 0 4070 case QLA_TGT_ABORT_ALL: 4071 ql_dbg(ql_dbg_tgt_tmr, vha, 0x10005, 4072 "qla_target(%d): Doing ABORT_ALL_TASKS\n", 4073 sess->vha->vp_idx); 4074 tmr_func = 0; 4075 break; 4076 4077 case QLA_TGT_ABORT_ALL_SESS: 4078 ql_dbg(ql_dbg_tgt_tmr, vha, 0x10006, 4079 "qla_target(%d): Doing ABORT_ALL_TASKS_SESS\n", 4080 sess->vha->vp_idx); 4081 tmr_func = 0; 4082 break; 4083 4084 case QLA_TGT_NEXUS_LOSS_SESS: 4085 ql_dbg(ql_dbg_tgt_tmr, vha, 0x10007, 4086 "qla_target(%d): Doing NEXUS_LOSS_SESS\n", 4087 sess->vha->vp_idx); 4088 tmr_func = 0; 4089 break; 4090 4091 case QLA_TGT_NEXUS_LOSS: 4092 ql_dbg(ql_dbg_tgt_tmr, vha, 0x10008, 4093 "qla_target(%d): Doing NEXUS_LOSS\n", sess->vha->vp_idx); 4094 tmr_func = 0; 4095 break; 4096 #endif 4097 default: 4098 ql_dbg(ql_dbg_tgt_tmr, vha, 0x1000a, 4099 "qla_target(%d): Unknown task mgmt fn 0x%x\n", 4100 sess->vha->vp_idx, fn); 4101 mempool_free(mcmd, qla_tgt_mgmt_cmd_mempool); 4102 return -ENOSYS; 4103 } 4104 4105 res = ha->tgt.tgt_ops->handle_tmr(mcmd, lun, tmr_func, 0); 4106 if (res != 0) { 4107 ql_dbg(ql_dbg_tgt_tmr, vha, 0x1000b, 4108 "qla_target(%d): tgt.tgt_ops->handle_tmr() failed: %d\n", 4109 sess->vha->vp_idx, res); 4110 mempool_free(mcmd, qla_tgt_mgmt_cmd_mempool); 4111 return -EFAULT; 4112 } 4113 4114 return 0; 4115 } 4116 4117 /* ha->hardware_lock supposed to be held on entry */ 4118 static int qlt_handle_task_mgmt(struct scsi_qla_host *vha, void *iocb) 4119 { 4120 struct atio_from_isp *a = (struct atio_from_isp *)iocb; 4121 struct qla_hw_data *ha = vha->hw; 4122 struct qla_tgt *tgt; 4123 struct qla_tgt_sess *sess; 4124 uint32_t lun, unpacked_lun; 4125 int fn; 4126 unsigned long flags; 4127 4128 tgt = vha->vha_tgt.qla_tgt; 4129 4130 lun = a->u.isp24.fcp_cmnd.lun; 4131 fn = a->u.isp24.fcp_cmnd.task_mgmt_flags; 4132 4133 spin_lock_irqsave(&ha->tgt.sess_lock, flags); 4134 sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, 4135 a->u.isp24.fcp_hdr.s_id); 4136 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); 4137 4138 unpacked_lun = scsilun_to_int((struct scsi_lun *)&lun); 4139 4140 if (!sess) { 4141 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf024, 4142 "qla_target(%d): task mgmt fn 0x%x for " 4143 "non-existant session\n", vha->vp_idx, fn); 4144 return qlt_sched_sess_work(tgt, QLA_TGT_SESS_WORK_TM, iocb, 4145 sizeof(struct atio_from_isp)); 4146 } 4147 4148 if (sess->deleted == QLA_SESS_DELETION_IN_PROGRESS) 4149 return -EFAULT; 4150 4151 return qlt_issue_task_mgmt(sess, unpacked_lun, fn, iocb, 0); 4152 } 4153 4154 /* ha->hardware_lock supposed to be held on entry */ 4155 static int __qlt_abort_task(struct scsi_qla_host *vha, 4156 struct imm_ntfy_from_isp *iocb, struct qla_tgt_sess *sess) 4157 { 4158 struct atio_from_isp *a = (struct atio_from_isp *)iocb; 4159 struct qla_hw_data *ha = vha->hw; 4160 struct qla_tgt_mgmt_cmd *mcmd; 4161 uint32_t lun, unpacked_lun; 4162 int rc; 4163 4164 mcmd = mempool_alloc(qla_tgt_mgmt_cmd_mempool, GFP_ATOMIC); 4165 if (mcmd == NULL) { 4166 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05f, 4167 "qla_target(%d): %s: Allocation of ABORT cmd failed\n", 4168 vha->vp_idx, __func__); 4169 return -ENOMEM; 4170 } 4171 memset(mcmd, 0, sizeof(*mcmd)); 4172 4173 mcmd->sess = sess; 4174 memcpy(&mcmd->orig_iocb.imm_ntfy, iocb, 4175 sizeof(mcmd->orig_iocb.imm_ntfy)); 4176 4177 lun = a->u.isp24.fcp_cmnd.lun; 4178 unpacked_lun = scsilun_to_int((struct scsi_lun *)&lun); 4179 mcmd->reset_count = vha->hw->chip_reset; 4180 4181 rc = ha->tgt.tgt_ops->handle_tmr(mcmd, unpacked_lun, TMR_ABORT_TASK, 4182 le16_to_cpu(iocb->u.isp2x.seq_id)); 4183 if (rc != 0) { 4184 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf060, 4185 "qla_target(%d): tgt_ops->handle_tmr() failed: %d\n", 4186 vha->vp_idx, rc); 4187 mempool_free(mcmd, qla_tgt_mgmt_cmd_mempool); 4188 return -EFAULT; 4189 } 4190 4191 return 0; 4192 } 4193 4194 /* ha->hardware_lock supposed to be held on entry */ 4195 static int qlt_abort_task(struct scsi_qla_host *vha, 4196 struct imm_ntfy_from_isp *iocb) 4197 { 4198 struct qla_hw_data *ha = vha->hw; 4199 struct qla_tgt_sess *sess; 4200 int loop_id; 4201 unsigned long flags; 4202 4203 loop_id = GET_TARGET_ID(ha, (struct atio_from_isp *)iocb); 4204 4205 spin_lock_irqsave(&ha->tgt.sess_lock, flags); 4206 sess = ha->tgt.tgt_ops->find_sess_by_loop_id(vha, loop_id); 4207 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); 4208 4209 if (sess == NULL) { 4210 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf025, 4211 "qla_target(%d): task abort for unexisting " 4212 "session\n", vha->vp_idx); 4213 return qlt_sched_sess_work(vha->vha_tgt.qla_tgt, 4214 QLA_TGT_SESS_WORK_ABORT, iocb, sizeof(*iocb)); 4215 } 4216 4217 return __qlt_abort_task(vha, iocb, sess); 4218 } 4219 4220 void qlt_logo_completion_handler(fc_port_t *fcport, int rc) 4221 { 4222 if (fcport->tgt_session) { 4223 if (rc != MBS_COMMAND_COMPLETE) { 4224 ql_dbg(ql_dbg_tgt_mgt, fcport->vha, 0xf093, 4225 "%s: se_sess %p / sess %p from" 4226 " port %8phC loop_id %#04x s_id %02x:%02x:%02x" 4227 " LOGO failed: %#x\n", 4228 __func__, 4229 fcport->tgt_session->se_sess, 4230 fcport->tgt_session, 4231 fcport->port_name, fcport->loop_id, 4232 fcport->d_id.b.domain, fcport->d_id.b.area, 4233 fcport->d_id.b.al_pa, rc); 4234 } 4235 4236 fcport->tgt_session->logout_completed = 1; 4237 } 4238 } 4239 4240 /* 4241 * ha->hardware_lock supposed to be held on entry (to protect tgt->sess_list) 4242 * 4243 * Schedules sessions with matching port_id/loop_id but different wwn for 4244 * deletion. Returns existing session with matching wwn if present. 4245 * Null otherwise. 4246 */ 4247 static struct qla_tgt_sess * 4248 qlt_find_sess_invalidate_other(struct qla_tgt *tgt, uint64_t wwn, 4249 port_id_t port_id, uint16_t loop_id, struct qla_tgt_sess **conflict_sess) 4250 { 4251 struct qla_tgt_sess *sess = NULL, *other_sess; 4252 uint64_t other_wwn; 4253 4254 *conflict_sess = NULL; 4255 4256 list_for_each_entry(other_sess, &tgt->sess_list, sess_list_entry) { 4257 4258 other_wwn = wwn_to_u64(other_sess->port_name); 4259 4260 if (wwn == other_wwn) { 4261 WARN_ON(sess); 4262 sess = other_sess; 4263 continue; 4264 } 4265 4266 /* find other sess with nport_id collision */ 4267 if (port_id.b24 == other_sess->s_id.b24) { 4268 if (loop_id != other_sess->loop_id) { 4269 ql_dbg(ql_dbg_tgt_tmr, tgt->vha, 0x1000c, 4270 "Invalidating sess %p loop_id %d wwn %llx.\n", 4271 other_sess, other_sess->loop_id, other_wwn); 4272 4273 /* 4274 * logout_on_delete is set by default, but another 4275 * session that has the same s_id/loop_id combo 4276 * might have cleared it when requested this session 4277 * deletion, so don't touch it 4278 */ 4279 qlt_schedule_sess_for_deletion(other_sess, true); 4280 } else { 4281 /* 4282 * Another wwn used to have our s_id/loop_id 4283 * kill the session, but don't free the loop_id 4284 */ 4285 other_sess->keep_nport_handle = 1; 4286 *conflict_sess = other_sess; 4287 qlt_schedule_sess_for_deletion(other_sess, 4288 true); 4289 } 4290 continue; 4291 } 4292 4293 /* find other sess with nport handle collision */ 4294 if (loop_id == other_sess->loop_id) { 4295 ql_dbg(ql_dbg_tgt_tmr, tgt->vha, 0x1000d, 4296 "Invalidating sess %p loop_id %d wwn %llx.\n", 4297 other_sess, other_sess->loop_id, other_wwn); 4298 4299 /* Same loop_id but different s_id 4300 * Ok to kill and logout */ 4301 qlt_schedule_sess_for_deletion(other_sess, true); 4302 } 4303 } 4304 4305 return sess; 4306 } 4307 4308 /* Abort any commands for this s_id waiting on qla_tgt_wq workqueue */ 4309 static int abort_cmds_for_s_id(struct scsi_qla_host *vha, port_id_t *s_id) 4310 { 4311 struct qla_tgt_sess_op *op; 4312 struct qla_tgt_cmd *cmd; 4313 uint32_t key; 4314 int count = 0; 4315 4316 key = (((u32)s_id->b.domain << 16) | 4317 ((u32)s_id->b.area << 8) | 4318 ((u32)s_id->b.al_pa)); 4319 4320 spin_lock(&vha->cmd_list_lock); 4321 list_for_each_entry(op, &vha->qla_sess_op_cmd_list, cmd_list) { 4322 uint32_t op_key = sid_to_key(op->atio.u.isp24.fcp_hdr.s_id); 4323 if (op_key == key) { 4324 op->aborted = true; 4325 count++; 4326 } 4327 } 4328 list_for_each_entry(cmd, &vha->qla_cmd_list, cmd_list) { 4329 uint32_t cmd_key = sid_to_key(cmd->atio.u.isp24.fcp_hdr.s_id); 4330 if (cmd_key == key) { 4331 cmd->aborted = 1; 4332 count++; 4333 } 4334 } 4335 spin_unlock(&vha->cmd_list_lock); 4336 4337 return count; 4338 } 4339 4340 /* 4341 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire 4342 */ 4343 static int qlt_24xx_handle_els(struct scsi_qla_host *vha, 4344 struct imm_ntfy_from_isp *iocb) 4345 { 4346 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; 4347 struct qla_hw_data *ha = vha->hw; 4348 struct qla_tgt_sess *sess = NULL, *conflict_sess = NULL; 4349 uint64_t wwn; 4350 port_id_t port_id; 4351 uint16_t loop_id; 4352 uint16_t wd3_lo; 4353 int res = 0; 4354 qlt_plogi_ack_t *pla; 4355 unsigned long flags; 4356 4357 wwn = wwn_to_u64(iocb->u.isp24.port_name); 4358 4359 port_id.b.domain = iocb->u.isp24.port_id[2]; 4360 port_id.b.area = iocb->u.isp24.port_id[1]; 4361 port_id.b.al_pa = iocb->u.isp24.port_id[0]; 4362 port_id.b.rsvd_1 = 0; 4363 4364 loop_id = le16_to_cpu(iocb->u.isp24.nport_handle); 4365 4366 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf026, 4367 "qla_target(%d): Port ID: 0x%3phC ELS opcode: 0x%02x\n", 4368 vha->vp_idx, iocb->u.isp24.port_id, iocb->u.isp24.status_subcode); 4369 4370 /* res = 1 means ack at the end of thread 4371 * res = 0 means ack async/later. 4372 */ 4373 switch (iocb->u.isp24.status_subcode) { 4374 case ELS_PLOGI: 4375 4376 /* Mark all stale commands in qla_tgt_wq for deletion */ 4377 abort_cmds_for_s_id(vha, &port_id); 4378 4379 if (wwn) { 4380 spin_lock_irqsave(&tgt->ha->tgt.sess_lock, flags); 4381 sess = qlt_find_sess_invalidate_other(tgt, wwn, 4382 port_id, loop_id, &conflict_sess); 4383 spin_unlock_irqrestore(&tgt->ha->tgt.sess_lock, flags); 4384 } 4385 4386 if (IS_SW_RESV_ADDR(port_id) || (!sess && !conflict_sess)) { 4387 res = 1; 4388 break; 4389 } 4390 4391 pla = qlt_plogi_ack_find_add(vha, &port_id, iocb); 4392 if (!pla) { 4393 qlt_send_term_imm_notif(vha, iocb, 1); 4394 4395 res = 0; 4396 break; 4397 } 4398 4399 res = 0; 4400 4401 if (conflict_sess) 4402 qlt_plogi_ack_link(vha, pla, conflict_sess, 4403 QLT_PLOGI_LINK_CONFLICT); 4404 4405 if (!sess) 4406 break; 4407 4408 qlt_plogi_ack_link(vha, pla, sess, QLT_PLOGI_LINK_SAME_WWN); 4409 /* 4410 * Under normal circumstances we want to release nport handle 4411 * during LOGO process to avoid nport handle leaks inside FW. 4412 * The exception is when LOGO is done while another PLOGI with 4413 * the same nport handle is waiting as might be the case here. 4414 * Note: there is always a possibily of a race where session 4415 * deletion has already started for other reasons (e.g. ACL 4416 * removal) and now PLOGI arrives: 4417 * 1. if PLOGI arrived in FW after nport handle has been freed, 4418 * FW must have assigned this PLOGI a new/same handle and we 4419 * can proceed ACK'ing it as usual when session deletion 4420 * completes. 4421 * 2. if PLOGI arrived in FW before LOGO with LCF_FREE_NPORT 4422 * bit reached it, the handle has now been released. We'll 4423 * get an error when we ACK this PLOGI. Nothing will be sent 4424 * back to initiator. Initiator should eventually retry 4425 * PLOGI and situation will correct itself. 4426 */ 4427 sess->keep_nport_handle = ((sess->loop_id == loop_id) && 4428 (sess->s_id.b24 == port_id.b24)); 4429 qlt_schedule_sess_for_deletion(sess, true); 4430 break; 4431 4432 case ELS_PRLI: 4433 wd3_lo = le16_to_cpu(iocb->u.isp24.u.prli.wd3_lo); 4434 4435 if (wwn) { 4436 spin_lock_irqsave(&tgt->ha->tgt.sess_lock, flags); 4437 sess = qlt_find_sess_invalidate_other(tgt, wwn, port_id, 4438 loop_id, &conflict_sess); 4439 spin_unlock_irqrestore(&tgt->ha->tgt.sess_lock, flags); 4440 } 4441 4442 if (conflict_sess) { 4443 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf09b, 4444 "PRLI with conflicting sess %p port %8phC\n", 4445 conflict_sess, conflict_sess->port_name); 4446 qlt_send_term_imm_notif(vha, iocb, 1); 4447 res = 0; 4448 break; 4449 } 4450 4451 if (sess != NULL) { 4452 if (sess->deleted) { 4453 /* 4454 * Impatient initiator sent PRLI before last 4455 * PLOGI could finish. Will force him to re-try, 4456 * while last one finishes. 4457 */ 4458 ql_log(ql_log_warn, sess->vha, 0xf095, 4459 "sess %p PRLI received, before plogi ack.\n", 4460 sess); 4461 qlt_send_term_imm_notif(vha, iocb, 1); 4462 res = 0; 4463 break; 4464 } 4465 4466 /* 4467 * This shouldn't happen under normal circumstances, 4468 * since we have deleted the old session during PLOGI 4469 */ 4470 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf096, 4471 "PRLI (loop_id %#04x) for existing sess %p (loop_id %#04x)\n", 4472 sess->loop_id, sess, iocb->u.isp24.nport_handle); 4473 4474 sess->local = 0; 4475 sess->loop_id = loop_id; 4476 sess->s_id = port_id; 4477 4478 if (wd3_lo & BIT_7) 4479 sess->conf_compl_supported = 1; 4480 4481 } 4482 res = 1; /* send notify ack */ 4483 4484 /* Make session global (not used in fabric mode) */ 4485 if (ha->current_topology != ISP_CFG_F) { 4486 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); 4487 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); 4488 qla2xxx_wake_dpc(vha); 4489 } else { 4490 /* todo: else - create sess here. */ 4491 res = 1; /* send notify ack */ 4492 } 4493 4494 break; 4495 4496 case ELS_LOGO: 4497 case ELS_PRLO: 4498 res = qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS_SESS); 4499 break; 4500 case ELS_PDISC: 4501 case ELS_ADISC: 4502 { 4503 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; 4504 if (tgt->link_reinit_iocb_pending) { 4505 qlt_send_notify_ack(vha, &tgt->link_reinit_iocb, 4506 0, 0, 0, 0, 0, 0); 4507 tgt->link_reinit_iocb_pending = 0; 4508 } 4509 res = 1; /* send notify ack */ 4510 break; 4511 } 4512 4513 case ELS_FLOGI: /* should never happen */ 4514 default: 4515 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf061, 4516 "qla_target(%d): Unsupported ELS command %x " 4517 "received\n", vha->vp_idx, iocb->u.isp24.status_subcode); 4518 res = qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS_SESS); 4519 break; 4520 } 4521 4522 return res; 4523 } 4524 4525 static int qlt_set_data_offset(struct qla_tgt_cmd *cmd, uint32_t offset) 4526 { 4527 #if 1 4528 /* 4529 * FIXME: Reject non zero SRR relative offset until we can test 4530 * this code properly. 4531 */ 4532 pr_debug("Rejecting non zero SRR rel_offs: %u\n", offset); 4533 return -1; 4534 #else 4535 struct scatterlist *sg, *sgp, *sg_srr, *sg_srr_start = NULL; 4536 size_t first_offset = 0, rem_offset = offset, tmp = 0; 4537 int i, sg_srr_cnt, bufflen = 0; 4538 4539 ql_dbg(ql_dbg_tgt, cmd->vha, 0xe023, 4540 "Entering qla_tgt_set_data_offset: cmd: %p, cmd->sg: %p, " 4541 "cmd->sg_cnt: %u, direction: %d\n", 4542 cmd, cmd->sg, cmd->sg_cnt, cmd->dma_data_direction); 4543 4544 if (!cmd->sg || !cmd->sg_cnt) { 4545 ql_dbg(ql_dbg_tgt, cmd->vha, 0xe055, 4546 "Missing cmd->sg or zero cmd->sg_cnt in" 4547 " qla_tgt_set_data_offset\n"); 4548 return -EINVAL; 4549 } 4550 /* 4551 * Walk the current cmd->sg list until we locate the new sg_srr_start 4552 */ 4553 for_each_sg(cmd->sg, sg, cmd->sg_cnt, i) { 4554 ql_dbg(ql_dbg_tgt, cmd->vha, 0xe024, 4555 "sg[%d]: %p page: %p, length: %d, offset: %d\n", 4556 i, sg, sg_page(sg), sg->length, sg->offset); 4557 4558 if ((sg->length + tmp) > offset) { 4559 first_offset = rem_offset; 4560 sg_srr_start = sg; 4561 ql_dbg(ql_dbg_tgt, cmd->vha, 0xe025, 4562 "Found matching sg[%d], using %p as sg_srr_start, " 4563 "and using first_offset: %zu\n", i, sg, 4564 first_offset); 4565 break; 4566 } 4567 tmp += sg->length; 4568 rem_offset -= sg->length; 4569 } 4570 4571 if (!sg_srr_start) { 4572 ql_dbg(ql_dbg_tgt, cmd->vha, 0xe056, 4573 "Unable to locate sg_srr_start for offset: %u\n", offset); 4574 return -EINVAL; 4575 } 4576 sg_srr_cnt = (cmd->sg_cnt - i); 4577 4578 sg_srr = kzalloc(sizeof(struct scatterlist) * sg_srr_cnt, GFP_KERNEL); 4579 if (!sg_srr) { 4580 ql_dbg(ql_dbg_tgt, cmd->vha, 0xe057, 4581 "Unable to allocate sgp\n"); 4582 return -ENOMEM; 4583 } 4584 sg_init_table(sg_srr, sg_srr_cnt); 4585 sgp = &sg_srr[0]; 4586 /* 4587 * Walk the remaining list for sg_srr_start, mapping to the newly 4588 * allocated sg_srr taking first_offset into account. 4589 */ 4590 for_each_sg(sg_srr_start, sg, sg_srr_cnt, i) { 4591 if (first_offset) { 4592 sg_set_page(sgp, sg_page(sg), 4593 (sg->length - first_offset), first_offset); 4594 first_offset = 0; 4595 } else { 4596 sg_set_page(sgp, sg_page(sg), sg->length, 0); 4597 } 4598 bufflen += sgp->length; 4599 4600 sgp = sg_next(sgp); 4601 if (!sgp) 4602 break; 4603 } 4604 4605 cmd->sg = sg_srr; 4606 cmd->sg_cnt = sg_srr_cnt; 4607 cmd->bufflen = bufflen; 4608 cmd->offset += offset; 4609 cmd->free_sg = 1; 4610 4611 ql_dbg(ql_dbg_tgt, cmd->vha, 0xe026, "New cmd->sg: %p\n", cmd->sg); 4612 ql_dbg(ql_dbg_tgt, cmd->vha, 0xe027, "New cmd->sg_cnt: %u\n", 4613 cmd->sg_cnt); 4614 ql_dbg(ql_dbg_tgt, cmd->vha, 0xe028, "New cmd->bufflen: %u\n", 4615 cmd->bufflen); 4616 ql_dbg(ql_dbg_tgt, cmd->vha, 0xe029, "New cmd->offset: %u\n", 4617 cmd->offset); 4618 4619 if (cmd->sg_cnt < 0) 4620 BUG(); 4621 4622 if (cmd->bufflen < 0) 4623 BUG(); 4624 4625 return 0; 4626 #endif 4627 } 4628 4629 static inline int qlt_srr_adjust_data(struct qla_tgt_cmd *cmd, 4630 uint32_t srr_rel_offs, int *xmit_type) 4631 { 4632 int res = 0, rel_offs; 4633 4634 rel_offs = srr_rel_offs - cmd->offset; 4635 ql_dbg(ql_dbg_tgt_mgt, cmd->vha, 0xf027, "srr_rel_offs=%d, rel_offs=%d", 4636 srr_rel_offs, rel_offs); 4637 4638 *xmit_type = QLA_TGT_XMIT_ALL; 4639 4640 if (rel_offs < 0) { 4641 ql_dbg(ql_dbg_tgt_mgt, cmd->vha, 0xf062, 4642 "qla_target(%d): SRR rel_offs (%d) < 0", 4643 cmd->vha->vp_idx, rel_offs); 4644 res = -1; 4645 } else if (rel_offs == cmd->bufflen) 4646 *xmit_type = QLA_TGT_XMIT_STATUS; 4647 else if (rel_offs > 0) 4648 res = qlt_set_data_offset(cmd, rel_offs); 4649 4650 return res; 4651 } 4652 4653 /* No locks, thread context */ 4654 static void qlt_handle_srr(struct scsi_qla_host *vha, 4655 struct qla_tgt_srr_ctio *sctio, struct qla_tgt_srr_imm *imm) 4656 { 4657 struct imm_ntfy_from_isp *ntfy = 4658 (struct imm_ntfy_from_isp *)&imm->imm_ntfy; 4659 struct qla_hw_data *ha = vha->hw; 4660 struct qla_tgt_cmd *cmd = sctio->cmd; 4661 struct se_cmd *se_cmd = &cmd->se_cmd; 4662 unsigned long flags; 4663 int xmit_type = 0, resp = 0; 4664 uint32_t offset; 4665 uint16_t srr_ui; 4666 4667 offset = le32_to_cpu(ntfy->u.isp24.srr_rel_offs); 4668 srr_ui = ntfy->u.isp24.srr_ui; 4669 4670 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf028, "SRR cmd %p, srr_ui %x\n", 4671 cmd, srr_ui); 4672 4673 switch (srr_ui) { 4674 case SRR_IU_STATUS: 4675 spin_lock_irqsave(&ha->hardware_lock, flags); 4676 qlt_send_notify_ack(vha, ntfy, 4677 0, 0, 0, NOTIFY_ACK_SRR_FLAGS_ACCEPT, 0, 0); 4678 spin_unlock_irqrestore(&ha->hardware_lock, flags); 4679 xmit_type = QLA_TGT_XMIT_STATUS; 4680 resp = 1; 4681 break; 4682 case SRR_IU_DATA_IN: 4683 if (!cmd->sg || !cmd->sg_cnt) { 4684 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf063, 4685 "Unable to process SRR_IU_DATA_IN due to" 4686 " missing cmd->sg, state: %d\n", cmd->state); 4687 dump_stack(); 4688 goto out_reject; 4689 } 4690 if (se_cmd->scsi_status != 0) { 4691 ql_dbg(ql_dbg_tgt, vha, 0xe02a, 4692 "Rejecting SRR_IU_DATA_IN with non GOOD " 4693 "scsi_status\n"); 4694 goto out_reject; 4695 } 4696 cmd->bufflen = se_cmd->data_length; 4697 4698 if (qlt_has_data(cmd)) { 4699 if (qlt_srr_adjust_data(cmd, offset, &xmit_type) != 0) 4700 goto out_reject; 4701 spin_lock_irqsave(&ha->hardware_lock, flags); 4702 qlt_send_notify_ack(vha, ntfy, 4703 0, 0, 0, NOTIFY_ACK_SRR_FLAGS_ACCEPT, 0, 0); 4704 spin_unlock_irqrestore(&ha->hardware_lock, flags); 4705 resp = 1; 4706 } else { 4707 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf064, 4708 "qla_target(%d): SRR for in data for cmd without them (tag %lld, SCSI status %d), reject", 4709 vha->vp_idx, se_cmd->tag, 4710 cmd->se_cmd.scsi_status); 4711 goto out_reject; 4712 } 4713 break; 4714 case SRR_IU_DATA_OUT: 4715 if (!cmd->sg || !cmd->sg_cnt) { 4716 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf065, 4717 "Unable to process SRR_IU_DATA_OUT due to" 4718 " missing cmd->sg\n"); 4719 dump_stack(); 4720 goto out_reject; 4721 } 4722 if (se_cmd->scsi_status != 0) { 4723 ql_dbg(ql_dbg_tgt, vha, 0xe02b, 4724 "Rejecting SRR_IU_DATA_OUT" 4725 " with non GOOD scsi_status\n"); 4726 goto out_reject; 4727 } 4728 cmd->bufflen = se_cmd->data_length; 4729 4730 if (qlt_has_data(cmd)) { 4731 if (qlt_srr_adjust_data(cmd, offset, &xmit_type) != 0) 4732 goto out_reject; 4733 spin_lock_irqsave(&ha->hardware_lock, flags); 4734 qlt_send_notify_ack(vha, ntfy, 4735 0, 0, 0, NOTIFY_ACK_SRR_FLAGS_ACCEPT, 0, 0); 4736 spin_unlock_irqrestore(&ha->hardware_lock, flags); 4737 if (xmit_type & QLA_TGT_XMIT_DATA) { 4738 cmd->cmd_flags |= BIT_8; 4739 qlt_rdy_to_xfer(cmd); 4740 } 4741 } else { 4742 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf066, 4743 "qla_target(%d): SRR for out data for cmd without them (tag %lld, SCSI status %d), reject", 4744 vha->vp_idx, se_cmd->tag, cmd->se_cmd.scsi_status); 4745 goto out_reject; 4746 } 4747 break; 4748 default: 4749 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf067, 4750 "qla_target(%d): Unknown srr_ui value %x", 4751 vha->vp_idx, srr_ui); 4752 goto out_reject; 4753 } 4754 4755 /* Transmit response in case of status and data-in cases */ 4756 if (resp) { 4757 cmd->cmd_flags |= BIT_7; 4758 qlt_xmit_response(cmd, xmit_type, se_cmd->scsi_status); 4759 } 4760 4761 return; 4762 4763 out_reject: 4764 spin_lock_irqsave(&ha->hardware_lock, flags); 4765 qlt_send_notify_ack(vha, ntfy, 0, 0, 0, 4766 NOTIFY_ACK_SRR_FLAGS_REJECT, 4767 NOTIFY_ACK_SRR_REJECT_REASON_UNABLE_TO_PERFORM, 4768 NOTIFY_ACK_SRR_FLAGS_REJECT_EXPL_NO_EXPL); 4769 if (cmd->state == QLA_TGT_STATE_NEED_DATA) { 4770 cmd->state = QLA_TGT_STATE_DATA_IN; 4771 dump_stack(); 4772 } else { 4773 cmd->cmd_flags |= BIT_9; 4774 qlt_send_term_exchange(vha, cmd, &cmd->atio, 1); 4775 } 4776 spin_unlock_irqrestore(&ha->hardware_lock, flags); 4777 } 4778 4779 static void qlt_reject_free_srr_imm(struct scsi_qla_host *vha, 4780 struct qla_tgt_srr_imm *imm, int ha_locked) 4781 { 4782 struct qla_hw_data *ha = vha->hw; 4783 unsigned long flags = 0; 4784 4785 #ifndef __CHECKER__ 4786 if (!ha_locked) 4787 spin_lock_irqsave(&ha->hardware_lock, flags); 4788 #endif 4789 4790 qlt_send_notify_ack(vha, (void *)&imm->imm_ntfy, 0, 0, 0, 4791 NOTIFY_ACK_SRR_FLAGS_REJECT, 4792 NOTIFY_ACK_SRR_REJECT_REASON_UNABLE_TO_PERFORM, 4793 NOTIFY_ACK_SRR_FLAGS_REJECT_EXPL_NO_EXPL); 4794 4795 #ifndef __CHECKER__ 4796 if (!ha_locked) 4797 spin_unlock_irqrestore(&ha->hardware_lock, flags); 4798 #endif 4799 4800 kfree(imm); 4801 } 4802 4803 static void qlt_handle_srr_work(struct work_struct *work) 4804 { 4805 struct qla_tgt *tgt = container_of(work, struct qla_tgt, srr_work); 4806 struct scsi_qla_host *vha = tgt->vha; 4807 struct qla_tgt_srr_ctio *sctio; 4808 unsigned long flags; 4809 4810 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf029, "Entering SRR work (tgt %p)\n", 4811 tgt); 4812 4813 restart: 4814 spin_lock_irqsave(&tgt->srr_lock, flags); 4815 list_for_each_entry(sctio, &tgt->srr_ctio_list, srr_list_entry) { 4816 struct qla_tgt_srr_imm *imm, *i, *ti; 4817 struct qla_tgt_cmd *cmd; 4818 struct se_cmd *se_cmd; 4819 4820 imm = NULL; 4821 list_for_each_entry_safe(i, ti, &tgt->srr_imm_list, 4822 srr_list_entry) { 4823 if (i->srr_id == sctio->srr_id) { 4824 list_del(&i->srr_list_entry); 4825 if (imm) { 4826 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf068, 4827 "qla_target(%d): There must be " 4828 "only one IMM SRR per CTIO SRR " 4829 "(IMM SRR %p, id %d, CTIO %p\n", 4830 vha->vp_idx, i, i->srr_id, sctio); 4831 qlt_reject_free_srr_imm(tgt->vha, i, 0); 4832 } else 4833 imm = i; 4834 } 4835 } 4836 4837 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf02a, 4838 "IMM SRR %p, CTIO SRR %p (id %d)\n", imm, sctio, 4839 sctio->srr_id); 4840 4841 if (imm == NULL) { 4842 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf02b, 4843 "Not found matching IMM for SRR CTIO (id %d)\n", 4844 sctio->srr_id); 4845 continue; 4846 } else 4847 list_del(&sctio->srr_list_entry); 4848 4849 spin_unlock_irqrestore(&tgt->srr_lock, flags); 4850 4851 cmd = sctio->cmd; 4852 /* 4853 * Reset qla_tgt_cmd SRR values and SGL pointer+count to follow 4854 * tcm_qla2xxx_write_pending() and tcm_qla2xxx_queue_data_in() 4855 * logic.. 4856 */ 4857 cmd->offset = 0; 4858 if (cmd->free_sg) { 4859 kfree(cmd->sg); 4860 cmd->sg = NULL; 4861 cmd->free_sg = 0; 4862 } 4863 se_cmd = &cmd->se_cmd; 4864 4865 cmd->sg_cnt = se_cmd->t_data_nents; 4866 cmd->sg = se_cmd->t_data_sg; 4867 4868 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf02c, 4869 "SRR cmd %p (se_cmd %p, tag %lld, op %x), sg_cnt=%d, offset=%d", 4870 cmd, &cmd->se_cmd, se_cmd->tag, se_cmd->t_task_cdb ? 4871 se_cmd->t_task_cdb[0] : 0, cmd->sg_cnt, cmd->offset); 4872 4873 qlt_handle_srr(vha, sctio, imm); 4874 4875 kfree(imm); 4876 kfree(sctio); 4877 goto restart; 4878 } 4879 spin_unlock_irqrestore(&tgt->srr_lock, flags); 4880 } 4881 4882 /* ha->hardware_lock supposed to be held on entry */ 4883 static void qlt_prepare_srr_imm(struct scsi_qla_host *vha, 4884 struct imm_ntfy_from_isp *iocb) 4885 { 4886 struct qla_tgt_srr_imm *imm; 4887 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; 4888 struct qla_tgt_srr_ctio *sctio; 4889 4890 tgt->imm_srr_id++; 4891 4892 ql_log(ql_log_warn, vha, 0xf02d, "qla_target(%d): SRR received\n", 4893 vha->vp_idx); 4894 4895 imm = kzalloc(sizeof(*imm), GFP_ATOMIC); 4896 if (imm != NULL) { 4897 memcpy(&imm->imm_ntfy, iocb, sizeof(imm->imm_ntfy)); 4898 4899 /* IRQ is already OFF */ 4900 spin_lock(&tgt->srr_lock); 4901 imm->srr_id = tgt->imm_srr_id; 4902 list_add_tail(&imm->srr_list_entry, 4903 &tgt->srr_imm_list); 4904 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf02e, 4905 "IMM NTFY SRR %p added (id %d, ui %x)\n", 4906 imm, imm->srr_id, iocb->u.isp24.srr_ui); 4907 if (tgt->imm_srr_id == tgt->ctio_srr_id) { 4908 int found = 0; 4909 list_for_each_entry(sctio, &tgt->srr_ctio_list, 4910 srr_list_entry) { 4911 if (sctio->srr_id == imm->srr_id) { 4912 found = 1; 4913 break; 4914 } 4915 } 4916 if (found) { 4917 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf02f, "%s", 4918 "Scheduling srr work\n"); 4919 schedule_work(&tgt->srr_work); 4920 } else { 4921 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf030, 4922 "qla_target(%d): imm_srr_id " 4923 "== ctio_srr_id (%d), but there is no " 4924 "corresponding SRR CTIO, deleting IMM " 4925 "SRR %p\n", vha->vp_idx, tgt->ctio_srr_id, 4926 imm); 4927 list_del(&imm->srr_list_entry); 4928 4929 kfree(imm); 4930 4931 spin_unlock(&tgt->srr_lock); 4932 goto out_reject; 4933 } 4934 } 4935 spin_unlock(&tgt->srr_lock); 4936 } else { 4937 struct qla_tgt_srr_ctio *ts; 4938 4939 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf069, 4940 "qla_target(%d): Unable to allocate SRR IMM " 4941 "entry, SRR request will be rejected\n", vha->vp_idx); 4942 4943 /* IRQ is already OFF */ 4944 spin_lock(&tgt->srr_lock); 4945 list_for_each_entry_safe(sctio, ts, &tgt->srr_ctio_list, 4946 srr_list_entry) { 4947 if (sctio->srr_id == tgt->imm_srr_id) { 4948 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf031, 4949 "CTIO SRR %p deleted (id %d)\n", 4950 sctio, sctio->srr_id); 4951 list_del(&sctio->srr_list_entry); 4952 qlt_send_term_exchange(vha, sctio->cmd, 4953 &sctio->cmd->atio, 1); 4954 kfree(sctio); 4955 } 4956 } 4957 spin_unlock(&tgt->srr_lock); 4958 goto out_reject; 4959 } 4960 4961 return; 4962 4963 out_reject: 4964 qlt_send_notify_ack(vha, iocb, 0, 0, 0, 4965 NOTIFY_ACK_SRR_FLAGS_REJECT, 4966 NOTIFY_ACK_SRR_REJECT_REASON_UNABLE_TO_PERFORM, 4967 NOTIFY_ACK_SRR_FLAGS_REJECT_EXPL_NO_EXPL); 4968 } 4969 4970 /* 4971 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire 4972 */ 4973 static void qlt_handle_imm_notify(struct scsi_qla_host *vha, 4974 struct imm_ntfy_from_isp *iocb) 4975 { 4976 struct qla_hw_data *ha = vha->hw; 4977 uint32_t add_flags = 0; 4978 int send_notify_ack = 1; 4979 uint16_t status; 4980 4981 status = le16_to_cpu(iocb->u.isp2x.status); 4982 switch (status) { 4983 case IMM_NTFY_LIP_RESET: 4984 { 4985 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf032, 4986 "qla_target(%d): LIP reset (loop %#x), subcode %x\n", 4987 vha->vp_idx, le16_to_cpu(iocb->u.isp24.nport_handle), 4988 iocb->u.isp24.status_subcode); 4989 4990 if (qlt_reset(vha, iocb, QLA_TGT_ABORT_ALL) == 0) 4991 send_notify_ack = 0; 4992 break; 4993 } 4994 4995 case IMM_NTFY_LIP_LINK_REINIT: 4996 { 4997 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; 4998 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf033, 4999 "qla_target(%d): LINK REINIT (loop %#x, " 5000 "subcode %x)\n", vha->vp_idx, 5001 le16_to_cpu(iocb->u.isp24.nport_handle), 5002 iocb->u.isp24.status_subcode); 5003 if (tgt->link_reinit_iocb_pending) { 5004 qlt_send_notify_ack(vha, &tgt->link_reinit_iocb, 5005 0, 0, 0, 0, 0, 0); 5006 } 5007 memcpy(&tgt->link_reinit_iocb, iocb, sizeof(*iocb)); 5008 tgt->link_reinit_iocb_pending = 1; 5009 /* 5010 * QLogic requires to wait after LINK REINIT for possible 5011 * PDISC or ADISC ELS commands 5012 */ 5013 send_notify_ack = 0; 5014 break; 5015 } 5016 5017 case IMM_NTFY_PORT_LOGOUT: 5018 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf034, 5019 "qla_target(%d): Port logout (loop " 5020 "%#x, subcode %x)\n", vha->vp_idx, 5021 le16_to_cpu(iocb->u.isp24.nport_handle), 5022 iocb->u.isp24.status_subcode); 5023 5024 if (qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS_SESS) == 0) 5025 send_notify_ack = 0; 5026 /* The sessions will be cleared in the callback, if needed */ 5027 break; 5028 5029 case IMM_NTFY_GLBL_TPRLO: 5030 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf035, 5031 "qla_target(%d): Global TPRLO (%x)\n", vha->vp_idx, status); 5032 if (qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS) == 0) 5033 send_notify_ack = 0; 5034 /* The sessions will be cleared in the callback, if needed */ 5035 break; 5036 5037 case IMM_NTFY_PORT_CONFIG: 5038 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf036, 5039 "qla_target(%d): Port config changed (%x)\n", vha->vp_idx, 5040 status); 5041 if (qlt_reset(vha, iocb, QLA_TGT_ABORT_ALL) == 0) 5042 send_notify_ack = 0; 5043 /* The sessions will be cleared in the callback, if needed */ 5044 break; 5045 5046 case IMM_NTFY_GLBL_LOGO: 5047 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06a, 5048 "qla_target(%d): Link failure detected\n", 5049 vha->vp_idx); 5050 /* I_T nexus loss */ 5051 if (qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS) == 0) 5052 send_notify_ack = 0; 5053 break; 5054 5055 case IMM_NTFY_IOCB_OVERFLOW: 5056 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06b, 5057 "qla_target(%d): Cannot provide requested " 5058 "capability (IOCB overflowed the immediate notify " 5059 "resource count)\n", vha->vp_idx); 5060 break; 5061 5062 case IMM_NTFY_ABORT_TASK: 5063 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf037, 5064 "qla_target(%d): Abort Task (S %08x I %#x -> " 5065 "L %#x)\n", vha->vp_idx, 5066 le16_to_cpu(iocb->u.isp2x.seq_id), 5067 GET_TARGET_ID(ha, (struct atio_from_isp *)iocb), 5068 le16_to_cpu(iocb->u.isp2x.lun)); 5069 if (qlt_abort_task(vha, iocb) == 0) 5070 send_notify_ack = 0; 5071 break; 5072 5073 case IMM_NTFY_RESOURCE: 5074 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06c, 5075 "qla_target(%d): Out of resources, host %ld\n", 5076 vha->vp_idx, vha->host_no); 5077 break; 5078 5079 case IMM_NTFY_MSG_RX: 5080 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf038, 5081 "qla_target(%d): Immediate notify task %x\n", 5082 vha->vp_idx, iocb->u.isp2x.task_flags); 5083 if (qlt_handle_task_mgmt(vha, iocb) == 0) 5084 send_notify_ack = 0; 5085 break; 5086 5087 case IMM_NTFY_ELS: 5088 if (qlt_24xx_handle_els(vha, iocb) == 0) 5089 send_notify_ack = 0; 5090 break; 5091 5092 case IMM_NTFY_SRR: 5093 qlt_prepare_srr_imm(vha, iocb); 5094 send_notify_ack = 0; 5095 break; 5096 5097 default: 5098 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06d, 5099 "qla_target(%d): Received unknown immediate " 5100 "notify status %x\n", vha->vp_idx, status); 5101 break; 5102 } 5103 5104 if (send_notify_ack) 5105 qlt_send_notify_ack(vha, iocb, add_flags, 0, 0, 0, 0, 0); 5106 } 5107 5108 /* 5109 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire 5110 * This function sends busy to ISP 2xxx or 24xx. 5111 */ 5112 static int __qlt_send_busy(struct scsi_qla_host *vha, 5113 struct atio_from_isp *atio, uint16_t status) 5114 { 5115 struct ctio7_to_24xx *ctio24; 5116 struct qla_hw_data *ha = vha->hw; 5117 request_t *pkt; 5118 struct qla_tgt_sess *sess = NULL; 5119 unsigned long flags; 5120 5121 spin_lock_irqsave(&ha->tgt.sess_lock, flags); 5122 sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, 5123 atio->u.isp24.fcp_hdr.s_id); 5124 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); 5125 if (!sess) { 5126 qlt_send_term_exchange(vha, NULL, atio, 1); 5127 return 0; 5128 } 5129 /* Sending marker isn't necessary, since we called from ISR */ 5130 5131 pkt = (request_t *)qla2x00_alloc_iocbs(vha, NULL); 5132 if (!pkt) { 5133 ql_dbg(ql_dbg_io, vha, 0x3063, 5134 "qla_target(%d): %s failed: unable to allocate " 5135 "request packet", vha->vp_idx, __func__); 5136 return -ENOMEM; 5137 } 5138 5139 vha->tgt_counters.num_q_full_sent++; 5140 pkt->entry_count = 1; 5141 pkt->handle = QLA_TGT_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK; 5142 5143 ctio24 = (struct ctio7_to_24xx *)pkt; 5144 ctio24->entry_type = CTIO_TYPE7; 5145 ctio24->nport_handle = sess->loop_id; 5146 ctio24->timeout = cpu_to_le16(QLA_TGT_TIMEOUT); 5147 ctio24->vp_index = vha->vp_idx; 5148 ctio24->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2]; 5149 ctio24->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1]; 5150 ctio24->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0]; 5151 ctio24->exchange_addr = atio->u.isp24.exchange_addr; 5152 ctio24->u.status1.flags = (atio->u.isp24.attr << 9) | 5153 cpu_to_le16( 5154 CTIO7_FLAGS_STATUS_MODE_1 | CTIO7_FLAGS_SEND_STATUS | 5155 CTIO7_FLAGS_DONT_RET_CTIO); 5156 /* 5157 * CTIO from fw w/o se_cmd doesn't provide enough info to retry it, 5158 * if the explicit conformation is used. 5159 */ 5160 ctio24->u.status1.ox_id = swab16(atio->u.isp24.fcp_hdr.ox_id); 5161 ctio24->u.status1.scsi_status = cpu_to_le16(status); 5162 /* Memory Barrier */ 5163 wmb(); 5164 qla2x00_start_iocbs(vha, vha->req); 5165 return 0; 5166 } 5167 5168 /* 5169 * This routine is used to allocate a command for either a QFull condition 5170 * (ie reply SAM_STAT_BUSY) or to terminate an exchange that did not go 5171 * out previously. 5172 */ 5173 static void 5174 qlt_alloc_qfull_cmd(struct scsi_qla_host *vha, 5175 struct atio_from_isp *atio, uint16_t status, int qfull) 5176 { 5177 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; 5178 struct qla_hw_data *ha = vha->hw; 5179 struct qla_tgt_sess *sess; 5180 struct se_session *se_sess; 5181 struct qla_tgt_cmd *cmd; 5182 int tag; 5183 5184 if (unlikely(tgt->tgt_stop)) { 5185 ql_dbg(ql_dbg_io, vha, 0x300a, 5186 "New command while device %p is shutting down\n", tgt); 5187 return; 5188 } 5189 5190 if ((vha->hw->tgt.num_qfull_cmds_alloc + 1) > MAX_QFULL_CMDS_ALLOC) { 5191 vha->hw->tgt.num_qfull_cmds_dropped++; 5192 if (vha->hw->tgt.num_qfull_cmds_dropped > 5193 vha->hw->qla_stats.stat_max_qfull_cmds_dropped) 5194 vha->hw->qla_stats.stat_max_qfull_cmds_dropped = 5195 vha->hw->tgt.num_qfull_cmds_dropped; 5196 5197 ql_dbg(ql_dbg_io, vha, 0x3068, 5198 "qla_target(%d): %s: QFull CMD dropped[%d]\n", 5199 vha->vp_idx, __func__, 5200 vha->hw->tgt.num_qfull_cmds_dropped); 5201 5202 qlt_chk_exch_leak_thresh_hold(vha); 5203 return; 5204 } 5205 5206 sess = ha->tgt.tgt_ops->find_sess_by_s_id 5207 (vha, atio->u.isp24.fcp_hdr.s_id); 5208 if (!sess) 5209 return; 5210 5211 se_sess = sess->se_sess; 5212 5213 tag = percpu_ida_alloc(&se_sess->sess_tag_pool, TASK_RUNNING); 5214 if (tag < 0) 5215 return; 5216 5217 cmd = &((struct qla_tgt_cmd *)se_sess->sess_cmd_map)[tag]; 5218 if (!cmd) { 5219 ql_dbg(ql_dbg_io, vha, 0x3009, 5220 "qla_target(%d): %s: Allocation of cmd failed\n", 5221 vha->vp_idx, __func__); 5222 5223 vha->hw->tgt.num_qfull_cmds_dropped++; 5224 if (vha->hw->tgt.num_qfull_cmds_dropped > 5225 vha->hw->qla_stats.stat_max_qfull_cmds_dropped) 5226 vha->hw->qla_stats.stat_max_qfull_cmds_dropped = 5227 vha->hw->tgt.num_qfull_cmds_dropped; 5228 5229 qlt_chk_exch_leak_thresh_hold(vha); 5230 return; 5231 } 5232 5233 memset(cmd, 0, sizeof(struct qla_tgt_cmd)); 5234 5235 qlt_incr_num_pend_cmds(vha); 5236 INIT_LIST_HEAD(&cmd->cmd_list); 5237 memcpy(&cmd->atio, atio, sizeof(*atio)); 5238 5239 cmd->tgt = vha->vha_tgt.qla_tgt; 5240 cmd->vha = vha; 5241 cmd->reset_count = vha->hw->chip_reset; 5242 cmd->q_full = 1; 5243 5244 if (qfull) { 5245 cmd->q_full = 1; 5246 /* NOTE: borrowing the state field to carry the status */ 5247 cmd->state = status; 5248 } else 5249 cmd->term_exchg = 1; 5250 5251 list_add_tail(&cmd->cmd_list, &vha->hw->tgt.q_full_list); 5252 5253 vha->hw->tgt.num_qfull_cmds_alloc++; 5254 if (vha->hw->tgt.num_qfull_cmds_alloc > 5255 vha->hw->qla_stats.stat_max_qfull_cmds_alloc) 5256 vha->hw->qla_stats.stat_max_qfull_cmds_alloc = 5257 vha->hw->tgt.num_qfull_cmds_alloc; 5258 } 5259 5260 int 5261 qlt_free_qfull_cmds(struct scsi_qla_host *vha) 5262 { 5263 struct qla_hw_data *ha = vha->hw; 5264 unsigned long flags; 5265 struct qla_tgt_cmd *cmd, *tcmd; 5266 struct list_head free_list; 5267 int rc = 0; 5268 5269 if (list_empty(&ha->tgt.q_full_list)) 5270 return 0; 5271 5272 INIT_LIST_HEAD(&free_list); 5273 5274 spin_lock_irqsave(&vha->hw->hardware_lock, flags); 5275 5276 if (list_empty(&ha->tgt.q_full_list)) { 5277 spin_unlock_irqrestore(&vha->hw->hardware_lock, flags); 5278 return 0; 5279 } 5280 5281 list_for_each_entry_safe(cmd, tcmd, &ha->tgt.q_full_list, cmd_list) { 5282 if (cmd->q_full) 5283 /* cmd->state is a borrowed field to hold status */ 5284 rc = __qlt_send_busy(vha, &cmd->atio, cmd->state); 5285 else if (cmd->term_exchg) 5286 rc = __qlt_send_term_exchange(vha, NULL, &cmd->atio); 5287 5288 if (rc == -ENOMEM) 5289 break; 5290 5291 if (cmd->q_full) 5292 ql_dbg(ql_dbg_io, vha, 0x3006, 5293 "%s: busy sent for ox_id[%04x]\n", __func__, 5294 be16_to_cpu(cmd->atio.u.isp24.fcp_hdr.ox_id)); 5295 else if (cmd->term_exchg) 5296 ql_dbg(ql_dbg_io, vha, 0x3007, 5297 "%s: Term exchg sent for ox_id[%04x]\n", __func__, 5298 be16_to_cpu(cmd->atio.u.isp24.fcp_hdr.ox_id)); 5299 else 5300 ql_dbg(ql_dbg_io, vha, 0x3008, 5301 "%s: Unexpected cmd in QFull list %p\n", __func__, 5302 cmd); 5303 5304 list_del(&cmd->cmd_list); 5305 list_add_tail(&cmd->cmd_list, &free_list); 5306 5307 /* piggy back on hardware_lock for protection */ 5308 vha->hw->tgt.num_qfull_cmds_alloc--; 5309 } 5310 spin_unlock_irqrestore(&vha->hw->hardware_lock, flags); 5311 5312 cmd = NULL; 5313 5314 list_for_each_entry_safe(cmd, tcmd, &free_list, cmd_list) { 5315 list_del(&cmd->cmd_list); 5316 /* This cmd was never sent to TCM. There is no need 5317 * to schedule free or call free_cmd 5318 */ 5319 qlt_free_cmd(cmd); 5320 } 5321 return rc; 5322 } 5323 5324 static void 5325 qlt_send_busy(struct scsi_qla_host *vha, 5326 struct atio_from_isp *atio, uint16_t status) 5327 { 5328 int rc = 0; 5329 5330 rc = __qlt_send_busy(vha, atio, status); 5331 if (rc == -ENOMEM) 5332 qlt_alloc_qfull_cmd(vha, atio, status, 1); 5333 } 5334 5335 static int 5336 qlt_chk_qfull_thresh_hold(struct scsi_qla_host *vha, 5337 struct atio_from_isp *atio) 5338 { 5339 struct qla_hw_data *ha = vha->hw; 5340 uint16_t status; 5341 5342 if (ha->tgt.num_pend_cmds < Q_FULL_THRESH_HOLD(ha)) 5343 return 0; 5344 5345 status = temp_sam_status; 5346 qlt_send_busy(vha, atio, status); 5347 return 1; 5348 } 5349 5350 /* ha->hardware_lock supposed to be held on entry */ 5351 /* called via callback from qla2xxx */ 5352 static void qlt_24xx_atio_pkt(struct scsi_qla_host *vha, 5353 struct atio_from_isp *atio, uint8_t ha_locked) 5354 { 5355 struct qla_hw_data *ha = vha->hw; 5356 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; 5357 int rc; 5358 unsigned long flags; 5359 5360 if (unlikely(tgt == NULL)) { 5361 ql_dbg(ql_dbg_io, vha, 0x3064, 5362 "ATIO pkt, but no tgt (ha %p)", ha); 5363 return; 5364 } 5365 /* 5366 * In tgt_stop mode we also should allow all requests to pass. 5367 * Otherwise, some commands can stuck. 5368 */ 5369 5370 tgt->atio_irq_cmd_count++; 5371 5372 switch (atio->u.raw.entry_type) { 5373 case ATIO_TYPE7: 5374 if (unlikely(atio->u.isp24.exchange_addr == 5375 ATIO_EXCHANGE_ADDRESS_UNKNOWN)) { 5376 ql_dbg(ql_dbg_io, vha, 0x3065, 5377 "qla_target(%d): ATIO_TYPE7 " 5378 "received with UNKNOWN exchange address, " 5379 "sending QUEUE_FULL\n", vha->vp_idx); 5380 if (!ha_locked) 5381 spin_lock_irqsave(&ha->hardware_lock, flags); 5382 qlt_send_busy(vha, atio, SAM_STAT_TASK_SET_FULL); 5383 if (!ha_locked) 5384 spin_unlock_irqrestore(&ha->hardware_lock, flags); 5385 break; 5386 } 5387 5388 5389 5390 if (likely(atio->u.isp24.fcp_cmnd.task_mgmt_flags == 0)) { 5391 rc = qlt_chk_qfull_thresh_hold(vha, atio); 5392 if (rc != 0) { 5393 tgt->atio_irq_cmd_count--; 5394 return; 5395 } 5396 rc = qlt_handle_cmd_for_atio(vha, atio); 5397 } else { 5398 rc = qlt_handle_task_mgmt(vha, atio); 5399 } 5400 if (unlikely(rc != 0)) { 5401 if (rc == -ESRCH) { 5402 if (!ha_locked) 5403 spin_lock_irqsave 5404 (&ha->hardware_lock, flags); 5405 5406 #if 1 /* With TERM EXCHANGE some FC cards refuse to boot */ 5407 qlt_send_busy(vha, atio, SAM_STAT_BUSY); 5408 #else 5409 qlt_send_term_exchange(vha, NULL, atio, 1); 5410 #endif 5411 5412 if (!ha_locked) 5413 spin_unlock_irqrestore 5414 (&ha->hardware_lock, flags); 5415 5416 } else { 5417 if (tgt->tgt_stop) { 5418 ql_dbg(ql_dbg_tgt, vha, 0xe059, 5419 "qla_target: Unable to send " 5420 "command to target for req, " 5421 "ignoring.\n"); 5422 } else { 5423 ql_dbg(ql_dbg_tgt, vha, 0xe05a, 5424 "qla_target(%d): Unable to send " 5425 "command to target, sending BUSY " 5426 "status.\n", vha->vp_idx); 5427 if (!ha_locked) 5428 spin_lock_irqsave( 5429 &ha->hardware_lock, flags); 5430 qlt_send_busy(vha, atio, SAM_STAT_BUSY); 5431 if (!ha_locked) 5432 spin_unlock_irqrestore( 5433 &ha->hardware_lock, flags); 5434 } 5435 } 5436 } 5437 break; 5438 5439 case IMMED_NOTIFY_TYPE: 5440 { 5441 if (unlikely(atio->u.isp2x.entry_status != 0)) { 5442 ql_dbg(ql_dbg_tgt, vha, 0xe05b, 5443 "qla_target(%d): Received ATIO packet %x " 5444 "with error status %x\n", vha->vp_idx, 5445 atio->u.raw.entry_type, 5446 atio->u.isp2x.entry_status); 5447 break; 5448 } 5449 ql_dbg(ql_dbg_tgt, vha, 0xe02e, "%s", "IMMED_NOTIFY ATIO"); 5450 5451 if (!ha_locked) 5452 spin_lock_irqsave(&ha->hardware_lock, flags); 5453 qlt_handle_imm_notify(vha, (struct imm_ntfy_from_isp *)atio); 5454 if (!ha_locked) 5455 spin_unlock_irqrestore(&ha->hardware_lock, flags); 5456 break; 5457 } 5458 5459 default: 5460 ql_dbg(ql_dbg_tgt, vha, 0xe05c, 5461 "qla_target(%d): Received unknown ATIO atio " 5462 "type %x\n", vha->vp_idx, atio->u.raw.entry_type); 5463 break; 5464 } 5465 5466 tgt->atio_irq_cmd_count--; 5467 } 5468 5469 /* ha->hardware_lock supposed to be held on entry */ 5470 /* called via callback from qla2xxx */ 5471 static void qlt_response_pkt(struct scsi_qla_host *vha, response_t *pkt) 5472 { 5473 struct qla_hw_data *ha = vha->hw; 5474 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; 5475 5476 if (unlikely(tgt == NULL)) { 5477 ql_dbg(ql_dbg_tgt, vha, 0xe05d, 5478 "qla_target(%d): Response pkt %x received, but no " 5479 "tgt (ha %p)\n", vha->vp_idx, pkt->entry_type, ha); 5480 return; 5481 } 5482 5483 /* 5484 * In tgt_stop mode we also should allow all requests to pass. 5485 * Otherwise, some commands can stuck. 5486 */ 5487 5488 tgt->irq_cmd_count++; 5489 5490 switch (pkt->entry_type) { 5491 case CTIO_CRC2: 5492 case CTIO_TYPE7: 5493 { 5494 struct ctio7_from_24xx *entry = (struct ctio7_from_24xx *)pkt; 5495 qlt_do_ctio_completion(vha, entry->handle, 5496 le16_to_cpu(entry->status)|(pkt->entry_status << 16), 5497 entry); 5498 break; 5499 } 5500 5501 case ACCEPT_TGT_IO_TYPE: 5502 { 5503 struct atio_from_isp *atio = (struct atio_from_isp *)pkt; 5504 int rc; 5505 if (atio->u.isp2x.status != 5506 cpu_to_le16(ATIO_CDB_VALID)) { 5507 ql_dbg(ql_dbg_tgt, vha, 0xe05e, 5508 "qla_target(%d): ATIO with error " 5509 "status %x received\n", vha->vp_idx, 5510 le16_to_cpu(atio->u.isp2x.status)); 5511 break; 5512 } 5513 5514 rc = qlt_chk_qfull_thresh_hold(vha, atio); 5515 if (rc != 0) { 5516 tgt->irq_cmd_count--; 5517 return; 5518 } 5519 5520 rc = qlt_handle_cmd_for_atio(vha, atio); 5521 if (unlikely(rc != 0)) { 5522 if (rc == -ESRCH) { 5523 #if 1 /* With TERM EXCHANGE some FC cards refuse to boot */ 5524 qlt_send_busy(vha, atio, 0); 5525 #else 5526 qlt_send_term_exchange(vha, NULL, atio, 1); 5527 #endif 5528 } else { 5529 if (tgt->tgt_stop) { 5530 ql_dbg(ql_dbg_tgt, vha, 0xe05f, 5531 "qla_target: Unable to send " 5532 "command to target, sending TERM " 5533 "EXCHANGE for rsp\n"); 5534 qlt_send_term_exchange(vha, NULL, 5535 atio, 1); 5536 } else { 5537 ql_dbg(ql_dbg_tgt, vha, 0xe060, 5538 "qla_target(%d): Unable to send " 5539 "command to target, sending BUSY " 5540 "status\n", vha->vp_idx); 5541 qlt_send_busy(vha, atio, 0); 5542 } 5543 } 5544 } 5545 } 5546 break; 5547 5548 case CONTINUE_TGT_IO_TYPE: 5549 { 5550 struct ctio_to_2xxx *entry = (struct ctio_to_2xxx *)pkt; 5551 qlt_do_ctio_completion(vha, entry->handle, 5552 le16_to_cpu(entry->status)|(pkt->entry_status << 16), 5553 entry); 5554 break; 5555 } 5556 5557 case CTIO_A64_TYPE: 5558 { 5559 struct ctio_to_2xxx *entry = (struct ctio_to_2xxx *)pkt; 5560 qlt_do_ctio_completion(vha, entry->handle, 5561 le16_to_cpu(entry->status)|(pkt->entry_status << 16), 5562 entry); 5563 break; 5564 } 5565 5566 case IMMED_NOTIFY_TYPE: 5567 ql_dbg(ql_dbg_tgt, vha, 0xe035, "%s", "IMMED_NOTIFY\n"); 5568 qlt_handle_imm_notify(vha, (struct imm_ntfy_from_isp *)pkt); 5569 break; 5570 5571 case NOTIFY_ACK_TYPE: 5572 if (tgt->notify_ack_expected > 0) { 5573 struct nack_to_isp *entry = (struct nack_to_isp *)pkt; 5574 ql_dbg(ql_dbg_tgt, vha, 0xe036, 5575 "NOTIFY_ACK seq %08x status %x\n", 5576 le16_to_cpu(entry->u.isp2x.seq_id), 5577 le16_to_cpu(entry->u.isp2x.status)); 5578 tgt->notify_ack_expected--; 5579 if (entry->u.isp2x.status != 5580 cpu_to_le16(NOTIFY_ACK_SUCCESS)) { 5581 ql_dbg(ql_dbg_tgt, vha, 0xe061, 5582 "qla_target(%d): NOTIFY_ACK " 5583 "failed %x\n", vha->vp_idx, 5584 le16_to_cpu(entry->u.isp2x.status)); 5585 } 5586 } else { 5587 ql_dbg(ql_dbg_tgt, vha, 0xe062, 5588 "qla_target(%d): Unexpected NOTIFY_ACK received\n", 5589 vha->vp_idx); 5590 } 5591 break; 5592 5593 case ABTS_RECV_24XX: 5594 ql_dbg(ql_dbg_tgt, vha, 0xe037, 5595 "ABTS_RECV_24XX: instance %d\n", vha->vp_idx); 5596 qlt_24xx_handle_abts(vha, (struct abts_recv_from_24xx *)pkt); 5597 break; 5598 5599 case ABTS_RESP_24XX: 5600 if (tgt->abts_resp_expected > 0) { 5601 struct abts_resp_from_24xx_fw *entry = 5602 (struct abts_resp_from_24xx_fw *)pkt; 5603 ql_dbg(ql_dbg_tgt, vha, 0xe038, 5604 "ABTS_RESP_24XX: compl_status %x\n", 5605 entry->compl_status); 5606 tgt->abts_resp_expected--; 5607 if (le16_to_cpu(entry->compl_status) != 5608 ABTS_RESP_COMPL_SUCCESS) { 5609 if ((entry->error_subcode1 == 0x1E) && 5610 (entry->error_subcode2 == 0)) { 5611 /* 5612 * We've got a race here: aborted 5613 * exchange not terminated, i.e. 5614 * response for the aborted command was 5615 * sent between the abort request was 5616 * received and processed. 5617 * Unfortunately, the firmware has a 5618 * silly requirement that all aborted 5619 * exchanges must be explicitely 5620 * terminated, otherwise it refuses to 5621 * send responses for the abort 5622 * requests. So, we have to 5623 * (re)terminate the exchange and retry 5624 * the abort response. 5625 */ 5626 qlt_24xx_retry_term_exchange(vha, 5627 entry); 5628 } else 5629 ql_dbg(ql_dbg_tgt, vha, 0xe063, 5630 "qla_target(%d): ABTS_RESP_24XX " 5631 "failed %x (subcode %x:%x)", 5632 vha->vp_idx, entry->compl_status, 5633 entry->error_subcode1, 5634 entry->error_subcode2); 5635 } 5636 } else { 5637 ql_dbg(ql_dbg_tgt, vha, 0xe064, 5638 "qla_target(%d): Unexpected ABTS_RESP_24XX " 5639 "received\n", vha->vp_idx); 5640 } 5641 break; 5642 5643 default: 5644 ql_dbg(ql_dbg_tgt, vha, 0xe065, 5645 "qla_target(%d): Received unknown response pkt " 5646 "type %x\n", vha->vp_idx, pkt->entry_type); 5647 break; 5648 } 5649 5650 tgt->irq_cmd_count--; 5651 } 5652 5653 /* 5654 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire 5655 */ 5656 void qlt_async_event(uint16_t code, struct scsi_qla_host *vha, 5657 uint16_t *mailbox) 5658 { 5659 struct qla_hw_data *ha = vha->hw; 5660 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; 5661 int login_code; 5662 5663 if (!ha->tgt.tgt_ops) 5664 return; 5665 5666 if (unlikely(tgt == NULL)) { 5667 ql_dbg(ql_dbg_tgt, vha, 0xe03a, 5668 "ASYNC EVENT %#x, but no tgt (ha %p)\n", code, ha); 5669 return; 5670 } 5671 5672 if (((code == MBA_POINT_TO_POINT) || (code == MBA_CHG_IN_CONNECTION)) && 5673 IS_QLA2100(ha)) 5674 return; 5675 /* 5676 * In tgt_stop mode we also should allow all requests to pass. 5677 * Otherwise, some commands can stuck. 5678 */ 5679 5680 tgt->irq_cmd_count++; 5681 5682 switch (code) { 5683 case MBA_RESET: /* Reset */ 5684 case MBA_SYSTEM_ERR: /* System Error */ 5685 case MBA_REQ_TRANSFER_ERR: /* Request Transfer Error */ 5686 case MBA_RSP_TRANSFER_ERR: /* Response Transfer Error */ 5687 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03a, 5688 "qla_target(%d): System error async event %#x " 5689 "occurred", vha->vp_idx, code); 5690 break; 5691 case MBA_WAKEUP_THRES: /* Request Queue Wake-up. */ 5692 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 5693 break; 5694 5695 case MBA_LOOP_UP: 5696 { 5697 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03b, 5698 "qla_target(%d): Async LOOP_UP occurred " 5699 "(m[0]=%x, m[1]=%x, m[2]=%x, m[3]=%x)", vha->vp_idx, 5700 le16_to_cpu(mailbox[0]), le16_to_cpu(mailbox[1]), 5701 le16_to_cpu(mailbox[2]), le16_to_cpu(mailbox[3])); 5702 if (tgt->link_reinit_iocb_pending) { 5703 qlt_send_notify_ack(vha, (void *)&tgt->link_reinit_iocb, 5704 0, 0, 0, 0, 0, 0); 5705 tgt->link_reinit_iocb_pending = 0; 5706 } 5707 break; 5708 } 5709 5710 case MBA_LIP_OCCURRED: 5711 case MBA_LOOP_DOWN: 5712 case MBA_LIP_RESET: 5713 case MBA_RSCN_UPDATE: 5714 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03c, 5715 "qla_target(%d): Async event %#x occurred " 5716 "(m[0]=%x, m[1]=%x, m[2]=%x, m[3]=%x)", vha->vp_idx, code, 5717 le16_to_cpu(mailbox[0]), le16_to_cpu(mailbox[1]), 5718 le16_to_cpu(mailbox[2]), le16_to_cpu(mailbox[3])); 5719 break; 5720 5721 case MBA_PORT_UPDATE: 5722 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03d, 5723 "qla_target(%d): Port update async event %#x " 5724 "occurred: updating the ports database (m[0]=%x, m[1]=%x, " 5725 "m[2]=%x, m[3]=%x)", vha->vp_idx, code, 5726 le16_to_cpu(mailbox[0]), le16_to_cpu(mailbox[1]), 5727 le16_to_cpu(mailbox[2]), le16_to_cpu(mailbox[3])); 5728 5729 login_code = le16_to_cpu(mailbox[2]); 5730 if (login_code == 0x4) 5731 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03e, 5732 "Async MB 2: Got PLOGI Complete\n"); 5733 else if (login_code == 0x7) 5734 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03f, 5735 "Async MB 2: Port Logged Out\n"); 5736 break; 5737 5738 default: 5739 break; 5740 } 5741 5742 tgt->irq_cmd_count--; 5743 } 5744 5745 static fc_port_t *qlt_get_port_database(struct scsi_qla_host *vha, 5746 uint16_t loop_id) 5747 { 5748 fc_port_t *fcport; 5749 int rc; 5750 5751 fcport = kzalloc(sizeof(*fcport), GFP_KERNEL); 5752 if (!fcport) { 5753 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06f, 5754 "qla_target(%d): Allocation of tmp FC port failed", 5755 vha->vp_idx); 5756 return NULL; 5757 } 5758 5759 fcport->loop_id = loop_id; 5760 5761 rc = qla2x00_get_port_database(vha, fcport, 0); 5762 if (rc != QLA_SUCCESS) { 5763 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf070, 5764 "qla_target(%d): Failed to retrieve fcport " 5765 "information -- get_port_database() returned %x " 5766 "(loop_id=0x%04x)", vha->vp_idx, rc, loop_id); 5767 kfree(fcport); 5768 return NULL; 5769 } 5770 5771 return fcport; 5772 } 5773 5774 /* Must be called under tgt_mutex */ 5775 static struct qla_tgt_sess *qlt_make_local_sess(struct scsi_qla_host *vha, 5776 uint8_t *s_id) 5777 { 5778 struct qla_tgt_sess *sess = NULL; 5779 fc_port_t *fcport = NULL; 5780 int rc, global_resets; 5781 uint16_t loop_id = 0; 5782 5783 mutex_lock(&vha->vha_tgt.tgt_mutex); 5784 5785 retry: 5786 global_resets = 5787 atomic_read(&vha->vha_tgt.qla_tgt->tgt_global_resets_count); 5788 5789 rc = qla24xx_get_loop_id(vha, s_id, &loop_id); 5790 if (rc != 0) { 5791 mutex_unlock(&vha->vha_tgt.tgt_mutex); 5792 5793 if ((s_id[0] == 0xFF) && 5794 (s_id[1] == 0xFC)) { 5795 /* 5796 * This is Domain Controller, so it should be 5797 * OK to drop SCSI commands from it. 5798 */ 5799 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf042, 5800 "Unable to find initiator with S_ID %x:%x:%x", 5801 s_id[0], s_id[1], s_id[2]); 5802 } else 5803 ql_log(ql_log_info, vha, 0xf071, 5804 "qla_target(%d): Unable to find " 5805 "initiator with S_ID %x:%x:%x", 5806 vha->vp_idx, s_id[0], s_id[1], 5807 s_id[2]); 5808 5809 if (rc == -ENOENT) { 5810 qlt_port_logo_t logo; 5811 sid_to_portid(s_id, &logo.id); 5812 logo.cmd_count = 1; 5813 qlt_send_first_logo(vha, &logo); 5814 } 5815 5816 return NULL; 5817 } 5818 5819 fcport = qlt_get_port_database(vha, loop_id); 5820 if (!fcport) { 5821 mutex_unlock(&vha->vha_tgt.tgt_mutex); 5822 return NULL; 5823 } 5824 5825 if (global_resets != 5826 atomic_read(&vha->vha_tgt.qla_tgt->tgt_global_resets_count)) { 5827 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf043, 5828 "qla_target(%d): global reset during session discovery " 5829 "(counter was %d, new %d), retrying", vha->vp_idx, 5830 global_resets, 5831 atomic_read(&vha->vha_tgt. 5832 qla_tgt->tgt_global_resets_count)); 5833 goto retry; 5834 } 5835 5836 sess = qlt_create_sess(vha, fcport, true); 5837 5838 mutex_unlock(&vha->vha_tgt.tgt_mutex); 5839 5840 kfree(fcport); 5841 return sess; 5842 } 5843 5844 static void qlt_abort_work(struct qla_tgt *tgt, 5845 struct qla_tgt_sess_work_param *prm) 5846 { 5847 struct scsi_qla_host *vha = tgt->vha; 5848 struct qla_hw_data *ha = vha->hw; 5849 struct qla_tgt_sess *sess = NULL; 5850 unsigned long flags = 0, flags2 = 0; 5851 uint32_t be_s_id; 5852 uint8_t s_id[3]; 5853 int rc; 5854 5855 spin_lock_irqsave(&ha->tgt.sess_lock, flags2); 5856 5857 if (tgt->tgt_stop) 5858 goto out_term2; 5859 5860 s_id[0] = prm->abts.fcp_hdr_le.s_id[2]; 5861 s_id[1] = prm->abts.fcp_hdr_le.s_id[1]; 5862 s_id[2] = prm->abts.fcp_hdr_le.s_id[0]; 5863 5864 sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, 5865 (unsigned char *)&be_s_id); 5866 if (!sess) { 5867 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags2); 5868 5869 sess = qlt_make_local_sess(vha, s_id); 5870 /* sess has got an extra creation ref */ 5871 5872 spin_lock_irqsave(&ha->tgt.sess_lock, flags2); 5873 if (!sess) 5874 goto out_term2; 5875 } else { 5876 if (sess->deleted == QLA_SESS_DELETION_IN_PROGRESS) { 5877 sess = NULL; 5878 goto out_term2; 5879 } 5880 5881 kref_get(&sess->se_sess->sess_kref); 5882 } 5883 5884 spin_lock_irqsave(&ha->hardware_lock, flags); 5885 5886 if (tgt->tgt_stop) 5887 goto out_term; 5888 5889 rc = __qlt_24xx_handle_abts(vha, &prm->abts, sess); 5890 if (rc != 0) 5891 goto out_term; 5892 spin_unlock_irqrestore(&ha->hardware_lock, flags); 5893 5894 ha->tgt.tgt_ops->put_sess(sess); 5895 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags2); 5896 return; 5897 5898 out_term2: 5899 spin_lock_irqsave(&ha->hardware_lock, flags); 5900 5901 out_term: 5902 qlt_24xx_send_abts_resp(vha, &prm->abts, FCP_TMF_REJECTED, false); 5903 spin_unlock_irqrestore(&ha->hardware_lock, flags); 5904 5905 if (sess) 5906 ha->tgt.tgt_ops->put_sess(sess); 5907 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags2); 5908 } 5909 5910 static void qlt_tmr_work(struct qla_tgt *tgt, 5911 struct qla_tgt_sess_work_param *prm) 5912 { 5913 struct atio_from_isp *a = &prm->tm_iocb2; 5914 struct scsi_qla_host *vha = tgt->vha; 5915 struct qla_hw_data *ha = vha->hw; 5916 struct qla_tgt_sess *sess = NULL; 5917 unsigned long flags; 5918 uint8_t *s_id = NULL; /* to hide compiler warnings */ 5919 int rc; 5920 uint32_t lun, unpacked_lun; 5921 int fn; 5922 void *iocb; 5923 5924 spin_lock_irqsave(&ha->tgt.sess_lock, flags); 5925 5926 if (tgt->tgt_stop) 5927 goto out_term; 5928 5929 s_id = prm->tm_iocb2.u.isp24.fcp_hdr.s_id; 5930 sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, s_id); 5931 if (!sess) { 5932 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); 5933 5934 sess = qlt_make_local_sess(vha, s_id); 5935 /* sess has got an extra creation ref */ 5936 5937 spin_lock_irqsave(&ha->tgt.sess_lock, flags); 5938 if (!sess) 5939 goto out_term; 5940 } else { 5941 if (sess->deleted == QLA_SESS_DELETION_IN_PROGRESS) { 5942 sess = NULL; 5943 goto out_term; 5944 } 5945 5946 kref_get(&sess->se_sess->sess_kref); 5947 } 5948 5949 iocb = a; 5950 lun = a->u.isp24.fcp_cmnd.lun; 5951 fn = a->u.isp24.fcp_cmnd.task_mgmt_flags; 5952 unpacked_lun = scsilun_to_int((struct scsi_lun *)&lun); 5953 5954 rc = qlt_issue_task_mgmt(sess, unpacked_lun, fn, iocb, 0); 5955 if (rc != 0) 5956 goto out_term; 5957 5958 ha->tgt.tgt_ops->put_sess(sess); 5959 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); 5960 return; 5961 5962 out_term: 5963 qlt_send_term_exchange(vha, NULL, &prm->tm_iocb2, 0); 5964 if (sess) 5965 ha->tgt.tgt_ops->put_sess(sess); 5966 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); 5967 } 5968 5969 static void qlt_sess_work_fn(struct work_struct *work) 5970 { 5971 struct qla_tgt *tgt = container_of(work, struct qla_tgt, sess_work); 5972 struct scsi_qla_host *vha = tgt->vha; 5973 unsigned long flags; 5974 5975 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf000, "Sess work (tgt %p)", tgt); 5976 5977 spin_lock_irqsave(&tgt->sess_work_lock, flags); 5978 while (!list_empty(&tgt->sess_works_list)) { 5979 struct qla_tgt_sess_work_param *prm = list_entry( 5980 tgt->sess_works_list.next, typeof(*prm), 5981 sess_works_list_entry); 5982 5983 /* 5984 * This work can be scheduled on several CPUs at time, so we 5985 * must delete the entry to eliminate double processing 5986 */ 5987 list_del(&prm->sess_works_list_entry); 5988 5989 spin_unlock_irqrestore(&tgt->sess_work_lock, flags); 5990 5991 switch (prm->type) { 5992 case QLA_TGT_SESS_WORK_ABORT: 5993 qlt_abort_work(tgt, prm); 5994 break; 5995 case QLA_TGT_SESS_WORK_TM: 5996 qlt_tmr_work(tgt, prm); 5997 break; 5998 default: 5999 BUG_ON(1); 6000 break; 6001 } 6002 6003 spin_lock_irqsave(&tgt->sess_work_lock, flags); 6004 6005 kfree(prm); 6006 } 6007 spin_unlock_irqrestore(&tgt->sess_work_lock, flags); 6008 } 6009 6010 /* Must be called under tgt_host_action_mutex */ 6011 int qlt_add_target(struct qla_hw_data *ha, struct scsi_qla_host *base_vha) 6012 { 6013 struct qla_tgt *tgt; 6014 6015 if (!QLA_TGT_MODE_ENABLED()) 6016 return 0; 6017 6018 if (!IS_TGT_MODE_CAPABLE(ha)) { 6019 ql_log(ql_log_warn, base_vha, 0xe070, 6020 "This adapter does not support target mode.\n"); 6021 return 0; 6022 } 6023 6024 ql_dbg(ql_dbg_tgt, base_vha, 0xe03b, 6025 "Registering target for host %ld(%p).\n", base_vha->host_no, ha); 6026 6027 BUG_ON(base_vha->vha_tgt.qla_tgt != NULL); 6028 6029 tgt = kzalloc(sizeof(struct qla_tgt), GFP_KERNEL); 6030 if (!tgt) { 6031 ql_dbg(ql_dbg_tgt, base_vha, 0xe066, 6032 "Unable to allocate struct qla_tgt\n"); 6033 return -ENOMEM; 6034 } 6035 6036 if (!(base_vha->host->hostt->supported_mode & MODE_TARGET)) 6037 base_vha->host->hostt->supported_mode |= MODE_TARGET; 6038 6039 tgt->ha = ha; 6040 tgt->vha = base_vha; 6041 init_waitqueue_head(&tgt->waitQ); 6042 INIT_LIST_HEAD(&tgt->sess_list); 6043 INIT_LIST_HEAD(&tgt->del_sess_list); 6044 INIT_DELAYED_WORK(&tgt->sess_del_work, 6045 (void (*)(struct work_struct *))qlt_del_sess_work_fn); 6046 spin_lock_init(&tgt->sess_work_lock); 6047 INIT_WORK(&tgt->sess_work, qlt_sess_work_fn); 6048 INIT_LIST_HEAD(&tgt->sess_works_list); 6049 spin_lock_init(&tgt->srr_lock); 6050 INIT_LIST_HEAD(&tgt->srr_ctio_list); 6051 INIT_LIST_HEAD(&tgt->srr_imm_list); 6052 INIT_WORK(&tgt->srr_work, qlt_handle_srr_work); 6053 atomic_set(&tgt->tgt_global_resets_count, 0); 6054 6055 base_vha->vha_tgt.qla_tgt = tgt; 6056 6057 ql_dbg(ql_dbg_tgt, base_vha, 0xe067, 6058 "qla_target(%d): using 64 Bit PCI addressing", 6059 base_vha->vp_idx); 6060 tgt->tgt_enable_64bit_addr = 1; 6061 /* 3 is reserved */ 6062 tgt->sg_tablesize = QLA_TGT_MAX_SG_24XX(base_vha->req->length - 3); 6063 tgt->datasegs_per_cmd = QLA_TGT_DATASEGS_PER_CMD_24XX; 6064 tgt->datasegs_per_cont = QLA_TGT_DATASEGS_PER_CONT_24XX; 6065 6066 if (base_vha->fc_vport) 6067 return 0; 6068 6069 mutex_lock(&qla_tgt_mutex); 6070 list_add_tail(&tgt->tgt_list_entry, &qla_tgt_glist); 6071 mutex_unlock(&qla_tgt_mutex); 6072 6073 return 0; 6074 } 6075 6076 /* Must be called under tgt_host_action_mutex */ 6077 int qlt_remove_target(struct qla_hw_data *ha, struct scsi_qla_host *vha) 6078 { 6079 if (!vha->vha_tgt.qla_tgt) 6080 return 0; 6081 6082 if (vha->fc_vport) { 6083 qlt_release(vha->vha_tgt.qla_tgt); 6084 return 0; 6085 } 6086 6087 /* free left over qfull cmds */ 6088 qlt_init_term_exchange(vha); 6089 6090 mutex_lock(&qla_tgt_mutex); 6091 list_del(&vha->vha_tgt.qla_tgt->tgt_list_entry); 6092 mutex_unlock(&qla_tgt_mutex); 6093 6094 ql_dbg(ql_dbg_tgt, vha, 0xe03c, "Unregistering target for host %ld(%p)", 6095 vha->host_no, ha); 6096 qlt_release(vha->vha_tgt.qla_tgt); 6097 6098 return 0; 6099 } 6100 6101 static void qlt_lport_dump(struct scsi_qla_host *vha, u64 wwpn, 6102 unsigned char *b) 6103 { 6104 int i; 6105 6106 pr_debug("qla2xxx HW vha->node_name: "); 6107 for (i = 0; i < WWN_SIZE; i++) 6108 pr_debug("%02x ", vha->node_name[i]); 6109 pr_debug("\n"); 6110 pr_debug("qla2xxx HW vha->port_name: "); 6111 for (i = 0; i < WWN_SIZE; i++) 6112 pr_debug("%02x ", vha->port_name[i]); 6113 pr_debug("\n"); 6114 6115 pr_debug("qla2xxx passed configfs WWPN: "); 6116 put_unaligned_be64(wwpn, b); 6117 for (i = 0; i < WWN_SIZE; i++) 6118 pr_debug("%02x ", b[i]); 6119 pr_debug("\n"); 6120 } 6121 6122 /** 6123 * qla_tgt_lport_register - register lport with external module 6124 * 6125 * @qla_tgt_ops: Pointer for tcm_qla2xxx qla_tgt_ops 6126 * @wwpn: Passwd FC target WWPN 6127 * @callback: lport initialization callback for tcm_qla2xxx code 6128 * @target_lport_ptr: pointer for tcm_qla2xxx specific lport data 6129 */ 6130 int qlt_lport_register(void *target_lport_ptr, u64 phys_wwpn, 6131 u64 npiv_wwpn, u64 npiv_wwnn, 6132 int (*callback)(struct scsi_qla_host *, void *, u64, u64)) 6133 { 6134 struct qla_tgt *tgt; 6135 struct scsi_qla_host *vha; 6136 struct qla_hw_data *ha; 6137 struct Scsi_Host *host; 6138 unsigned long flags; 6139 int rc; 6140 u8 b[WWN_SIZE]; 6141 6142 mutex_lock(&qla_tgt_mutex); 6143 list_for_each_entry(tgt, &qla_tgt_glist, tgt_list_entry) { 6144 vha = tgt->vha; 6145 ha = vha->hw; 6146 6147 host = vha->host; 6148 if (!host) 6149 continue; 6150 6151 if (!(host->hostt->supported_mode & MODE_TARGET)) 6152 continue; 6153 6154 spin_lock_irqsave(&ha->hardware_lock, flags); 6155 if ((!npiv_wwpn || !npiv_wwnn) && host->active_mode & MODE_TARGET) { 6156 pr_debug("MODE_TARGET already active on qla2xxx(%d)\n", 6157 host->host_no); 6158 spin_unlock_irqrestore(&ha->hardware_lock, flags); 6159 continue; 6160 } 6161 if (tgt->tgt_stop) { 6162 pr_debug("MODE_TARGET in shutdown on qla2xxx(%d)\n", 6163 host->host_no); 6164 spin_unlock_irqrestore(&ha->hardware_lock, flags); 6165 continue; 6166 } 6167 spin_unlock_irqrestore(&ha->hardware_lock, flags); 6168 6169 if (!scsi_host_get(host)) { 6170 ql_dbg(ql_dbg_tgt, vha, 0xe068, 6171 "Unable to scsi_host_get() for" 6172 " qla2xxx scsi_host\n"); 6173 continue; 6174 } 6175 qlt_lport_dump(vha, phys_wwpn, b); 6176 6177 if (memcmp(vha->port_name, b, WWN_SIZE)) { 6178 scsi_host_put(host); 6179 continue; 6180 } 6181 rc = (*callback)(vha, target_lport_ptr, npiv_wwpn, npiv_wwnn); 6182 if (rc != 0) 6183 scsi_host_put(host); 6184 6185 mutex_unlock(&qla_tgt_mutex); 6186 return rc; 6187 } 6188 mutex_unlock(&qla_tgt_mutex); 6189 6190 return -ENODEV; 6191 } 6192 EXPORT_SYMBOL(qlt_lport_register); 6193 6194 /** 6195 * qla_tgt_lport_deregister - Degister lport 6196 * 6197 * @vha: Registered scsi_qla_host pointer 6198 */ 6199 void qlt_lport_deregister(struct scsi_qla_host *vha) 6200 { 6201 struct qla_hw_data *ha = vha->hw; 6202 struct Scsi_Host *sh = vha->host; 6203 /* 6204 * Clear the target_lport_ptr qla_target_template pointer in qla_hw_data 6205 */ 6206 vha->vha_tgt.target_lport_ptr = NULL; 6207 ha->tgt.tgt_ops = NULL; 6208 /* 6209 * Release the Scsi_Host reference for the underlying qla2xxx host 6210 */ 6211 scsi_host_put(sh); 6212 } 6213 EXPORT_SYMBOL(qlt_lport_deregister); 6214 6215 /* Must be called under HW lock */ 6216 static void qlt_set_mode(struct scsi_qla_host *vha) 6217 { 6218 struct qla_hw_data *ha = vha->hw; 6219 6220 switch (ql2x_ini_mode) { 6221 case QLA2XXX_INI_MODE_DISABLED: 6222 case QLA2XXX_INI_MODE_EXCLUSIVE: 6223 vha->host->active_mode = MODE_TARGET; 6224 break; 6225 case QLA2XXX_INI_MODE_ENABLED: 6226 vha->host->active_mode |= MODE_TARGET; 6227 break; 6228 default: 6229 break; 6230 } 6231 6232 if (ha->tgt.ini_mode_force_reverse) 6233 qla_reverse_ini_mode(vha); 6234 } 6235 6236 /* Must be called under HW lock */ 6237 static void qlt_clear_mode(struct scsi_qla_host *vha) 6238 { 6239 struct qla_hw_data *ha = vha->hw; 6240 6241 switch (ql2x_ini_mode) { 6242 case QLA2XXX_INI_MODE_DISABLED: 6243 vha->host->active_mode = MODE_UNKNOWN; 6244 break; 6245 case QLA2XXX_INI_MODE_EXCLUSIVE: 6246 vha->host->active_mode = MODE_INITIATOR; 6247 break; 6248 case QLA2XXX_INI_MODE_ENABLED: 6249 vha->host->active_mode &= ~MODE_TARGET; 6250 break; 6251 default: 6252 break; 6253 } 6254 6255 if (ha->tgt.ini_mode_force_reverse) 6256 qla_reverse_ini_mode(vha); 6257 } 6258 6259 /* 6260 * qla_tgt_enable_vha - NO LOCK HELD 6261 * 6262 * host_reset, bring up w/ Target Mode Enabled 6263 */ 6264 void 6265 qlt_enable_vha(struct scsi_qla_host *vha) 6266 { 6267 struct qla_hw_data *ha = vha->hw; 6268 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; 6269 unsigned long flags; 6270 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev); 6271 int rspq_ent = QLA83XX_RSPQ_MSIX_ENTRY_NUMBER; 6272 6273 if (!tgt) { 6274 ql_dbg(ql_dbg_tgt, vha, 0xe069, 6275 "Unable to locate qla_tgt pointer from" 6276 " struct qla_hw_data\n"); 6277 dump_stack(); 6278 return; 6279 } 6280 6281 spin_lock_irqsave(&ha->hardware_lock, flags); 6282 tgt->tgt_stopped = 0; 6283 qlt_set_mode(vha); 6284 spin_unlock_irqrestore(&ha->hardware_lock, flags); 6285 6286 if (vha->vp_idx) { 6287 qla24xx_disable_vp(vha); 6288 qla24xx_enable_vp(vha); 6289 } else { 6290 if (ha->msix_entries) { 6291 ql_dbg(ql_dbg_tgt, vha, 0xffff, 6292 "%s: host%ld : vector %d cpu %d\n", 6293 __func__, vha->host_no, 6294 ha->msix_entries[rspq_ent].vector, 6295 ha->msix_entries[rspq_ent].cpuid); 6296 6297 ha->tgt.rspq_vector_cpuid = 6298 ha->msix_entries[rspq_ent].cpuid; 6299 } 6300 6301 set_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags); 6302 qla2xxx_wake_dpc(base_vha); 6303 qla2x00_wait_for_hba_online(base_vha); 6304 } 6305 } 6306 EXPORT_SYMBOL(qlt_enable_vha); 6307 6308 /* 6309 * qla_tgt_disable_vha - NO LOCK HELD 6310 * 6311 * Disable Target Mode and reset the adapter 6312 */ 6313 static void qlt_disable_vha(struct scsi_qla_host *vha) 6314 { 6315 struct qla_hw_data *ha = vha->hw; 6316 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; 6317 unsigned long flags; 6318 6319 if (!tgt) { 6320 ql_dbg(ql_dbg_tgt, vha, 0xe06a, 6321 "Unable to locate qla_tgt pointer from" 6322 " struct qla_hw_data\n"); 6323 dump_stack(); 6324 return; 6325 } 6326 6327 spin_lock_irqsave(&ha->hardware_lock, flags); 6328 qlt_clear_mode(vha); 6329 spin_unlock_irqrestore(&ha->hardware_lock, flags); 6330 6331 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 6332 qla2xxx_wake_dpc(vha); 6333 qla2x00_wait_for_hba_online(vha); 6334 } 6335 6336 /* 6337 * Called from qla_init.c:qla24xx_vport_create() contex to setup 6338 * the target mode specific struct scsi_qla_host and struct qla_hw_data 6339 * members. 6340 */ 6341 void 6342 qlt_vport_create(struct scsi_qla_host *vha, struct qla_hw_data *ha) 6343 { 6344 if (!qla_tgt_mode_enabled(vha)) 6345 return; 6346 6347 vha->vha_tgt.qla_tgt = NULL; 6348 6349 mutex_init(&vha->vha_tgt.tgt_mutex); 6350 mutex_init(&vha->vha_tgt.tgt_host_action_mutex); 6351 6352 qlt_clear_mode(vha); 6353 6354 /* 6355 * NOTE: Currently the value is kept the same for <24xx and 6356 * >=24xx ISPs. If it is necessary to change it, 6357 * the check should be added for specific ISPs, 6358 * assigning the value appropriately. 6359 */ 6360 ha->tgt.atio_q_length = ATIO_ENTRY_CNT_24XX; 6361 6362 qlt_add_target(ha, vha); 6363 } 6364 6365 void 6366 qlt_rff_id(struct scsi_qla_host *vha, struct ct_sns_req *ct_req) 6367 { 6368 /* 6369 * FC-4 Feature bit 0 indicates target functionality to the name server. 6370 */ 6371 if (qla_tgt_mode_enabled(vha)) { 6372 if (qla_ini_mode_enabled(vha)) 6373 ct_req->req.rff_id.fc4_feature = BIT_0 | BIT_1; 6374 else 6375 ct_req->req.rff_id.fc4_feature = BIT_0; 6376 } else if (qla_ini_mode_enabled(vha)) { 6377 ct_req->req.rff_id.fc4_feature = BIT_1; 6378 } 6379 } 6380 6381 /* 6382 * qlt_init_atio_q_entries() - Initializes ATIO queue entries. 6383 * @ha: HA context 6384 * 6385 * Beginning of ATIO ring has initialization control block already built 6386 * by nvram config routine. 6387 * 6388 * Returns 0 on success. 6389 */ 6390 void 6391 qlt_init_atio_q_entries(struct scsi_qla_host *vha) 6392 { 6393 struct qla_hw_data *ha = vha->hw; 6394 uint16_t cnt; 6395 struct atio_from_isp *pkt = (struct atio_from_isp *)ha->tgt.atio_ring; 6396 6397 if (!qla_tgt_mode_enabled(vha)) 6398 return; 6399 6400 for (cnt = 0; cnt < ha->tgt.atio_q_length; cnt++) { 6401 pkt->u.raw.signature = ATIO_PROCESSED; 6402 pkt++; 6403 } 6404 6405 } 6406 6407 /* 6408 * qlt_24xx_process_atio_queue() - Process ATIO queue entries. 6409 * @ha: SCSI driver HA context 6410 */ 6411 void 6412 qlt_24xx_process_atio_queue(struct scsi_qla_host *vha, uint8_t ha_locked) 6413 { 6414 struct qla_hw_data *ha = vha->hw; 6415 struct atio_from_isp *pkt; 6416 int cnt, i; 6417 6418 if (!vha->flags.online) 6419 return; 6420 6421 while (ha->tgt.atio_ring_ptr->signature != ATIO_PROCESSED) { 6422 pkt = (struct atio_from_isp *)ha->tgt.atio_ring_ptr; 6423 cnt = pkt->u.raw.entry_count; 6424 6425 qlt_24xx_atio_pkt_all_vps(vha, (struct atio_from_isp *)pkt, 6426 ha_locked); 6427 6428 for (i = 0; i < cnt; i++) { 6429 ha->tgt.atio_ring_index++; 6430 if (ha->tgt.atio_ring_index == ha->tgt.atio_q_length) { 6431 ha->tgt.atio_ring_index = 0; 6432 ha->tgt.atio_ring_ptr = ha->tgt.atio_ring; 6433 } else 6434 ha->tgt.atio_ring_ptr++; 6435 6436 pkt->u.raw.signature = ATIO_PROCESSED; 6437 pkt = (struct atio_from_isp *)ha->tgt.atio_ring_ptr; 6438 } 6439 wmb(); 6440 } 6441 6442 /* Adjust ring index */ 6443 WRT_REG_DWORD(ISP_ATIO_Q_OUT(vha), ha->tgt.atio_ring_index); 6444 RD_REG_DWORD_RELAXED(ISP_ATIO_Q_OUT(vha)); 6445 } 6446 6447 void 6448 qlt_24xx_config_rings(struct scsi_qla_host *vha) 6449 { 6450 struct qla_hw_data *ha = vha->hw; 6451 if (!QLA_TGT_MODE_ENABLED()) 6452 return; 6453 6454 WRT_REG_DWORD(ISP_ATIO_Q_IN(vha), 0); 6455 WRT_REG_DWORD(ISP_ATIO_Q_OUT(vha), 0); 6456 RD_REG_DWORD(ISP_ATIO_Q_OUT(vha)); 6457 6458 if (IS_ATIO_MSIX_CAPABLE(ha)) { 6459 struct qla_msix_entry *msix = &ha->msix_entries[2]; 6460 struct init_cb_24xx *icb = (struct init_cb_24xx *)ha->init_cb; 6461 6462 icb->msix_atio = cpu_to_le16(msix->entry); 6463 ql_dbg(ql_dbg_init, vha, 0xf072, 6464 "Registering ICB vector 0x%x for atio que.\n", 6465 msix->entry); 6466 } 6467 } 6468 6469 void 6470 qlt_24xx_config_nvram_stage1(struct scsi_qla_host *vha, struct nvram_24xx *nv) 6471 { 6472 struct qla_hw_data *ha = vha->hw; 6473 6474 if (qla_tgt_mode_enabled(vha)) { 6475 if (!ha->tgt.saved_set) { 6476 /* We save only once */ 6477 ha->tgt.saved_exchange_count = nv->exchange_count; 6478 ha->tgt.saved_firmware_options_1 = 6479 nv->firmware_options_1; 6480 ha->tgt.saved_firmware_options_2 = 6481 nv->firmware_options_2; 6482 ha->tgt.saved_firmware_options_3 = 6483 nv->firmware_options_3; 6484 ha->tgt.saved_set = 1; 6485 } 6486 6487 nv->exchange_count = cpu_to_le16(0xFFFF); 6488 6489 /* Enable target mode */ 6490 nv->firmware_options_1 |= cpu_to_le32(BIT_4); 6491 6492 /* Disable ini mode, if requested */ 6493 if (!qla_ini_mode_enabled(vha)) 6494 nv->firmware_options_1 |= cpu_to_le32(BIT_5); 6495 6496 /* Disable Full Login after LIP */ 6497 nv->firmware_options_1 &= cpu_to_le32(~BIT_13); 6498 /* Enable initial LIP */ 6499 nv->firmware_options_1 &= cpu_to_le32(~BIT_9); 6500 if (ql2xtgt_tape_enable) 6501 /* Enable FC Tape support */ 6502 nv->firmware_options_2 |= cpu_to_le32(BIT_12); 6503 else 6504 /* Disable FC Tape support */ 6505 nv->firmware_options_2 &= cpu_to_le32(~BIT_12); 6506 6507 /* Disable Full Login after LIP */ 6508 nv->host_p &= cpu_to_le32(~BIT_10); 6509 /* Enable target PRLI control */ 6510 nv->firmware_options_2 |= cpu_to_le32(BIT_14); 6511 } else { 6512 if (ha->tgt.saved_set) { 6513 nv->exchange_count = ha->tgt.saved_exchange_count; 6514 nv->firmware_options_1 = 6515 ha->tgt.saved_firmware_options_1; 6516 nv->firmware_options_2 = 6517 ha->tgt.saved_firmware_options_2; 6518 nv->firmware_options_3 = 6519 ha->tgt.saved_firmware_options_3; 6520 } 6521 return; 6522 } 6523 6524 /* out-of-order frames reassembly */ 6525 nv->firmware_options_3 |= BIT_6|BIT_9; 6526 6527 if (ha->tgt.enable_class_2) { 6528 if (vha->flags.init_done) 6529 fc_host_supported_classes(vha->host) = 6530 FC_COS_CLASS2 | FC_COS_CLASS3; 6531 6532 nv->firmware_options_2 |= cpu_to_le32(BIT_8); 6533 } else { 6534 if (vha->flags.init_done) 6535 fc_host_supported_classes(vha->host) = FC_COS_CLASS3; 6536 6537 nv->firmware_options_2 &= ~cpu_to_le32(BIT_8); 6538 } 6539 } 6540 6541 void 6542 qlt_24xx_config_nvram_stage2(struct scsi_qla_host *vha, 6543 struct init_cb_24xx *icb) 6544 { 6545 struct qla_hw_data *ha = vha->hw; 6546 6547 if (!QLA_TGT_MODE_ENABLED()) 6548 return; 6549 6550 if (ha->tgt.node_name_set) { 6551 memcpy(icb->node_name, ha->tgt.tgt_node_name, WWN_SIZE); 6552 icb->firmware_options_1 |= cpu_to_le32(BIT_14); 6553 } 6554 6555 /* disable ZIO at start time. */ 6556 if (!vha->flags.init_done) { 6557 uint32_t tmp; 6558 tmp = le32_to_cpu(icb->firmware_options_2); 6559 tmp &= ~(BIT_3 | BIT_2 | BIT_1 | BIT_0); 6560 icb->firmware_options_2 = cpu_to_le32(tmp); 6561 } 6562 } 6563 6564 void 6565 qlt_81xx_config_nvram_stage1(struct scsi_qla_host *vha, struct nvram_81xx *nv) 6566 { 6567 struct qla_hw_data *ha = vha->hw; 6568 6569 if (!QLA_TGT_MODE_ENABLED()) 6570 return; 6571 6572 if (qla_tgt_mode_enabled(vha)) { 6573 if (!ha->tgt.saved_set) { 6574 /* We save only once */ 6575 ha->tgt.saved_exchange_count = nv->exchange_count; 6576 ha->tgt.saved_firmware_options_1 = 6577 nv->firmware_options_1; 6578 ha->tgt.saved_firmware_options_2 = 6579 nv->firmware_options_2; 6580 ha->tgt.saved_firmware_options_3 = 6581 nv->firmware_options_3; 6582 ha->tgt.saved_set = 1; 6583 } 6584 6585 nv->exchange_count = cpu_to_le16(0xFFFF); 6586 6587 /* Enable target mode */ 6588 nv->firmware_options_1 |= cpu_to_le32(BIT_4); 6589 6590 /* Disable ini mode, if requested */ 6591 if (!qla_ini_mode_enabled(vha)) 6592 nv->firmware_options_1 |= cpu_to_le32(BIT_5); 6593 6594 /* Disable Full Login after LIP */ 6595 nv->firmware_options_1 &= cpu_to_le32(~BIT_13); 6596 /* Enable initial LIP */ 6597 nv->firmware_options_1 &= cpu_to_le32(~BIT_9); 6598 if (ql2xtgt_tape_enable) 6599 /* Enable FC tape support */ 6600 nv->firmware_options_2 |= cpu_to_le32(BIT_12); 6601 else 6602 /* Disable FC tape support */ 6603 nv->firmware_options_2 &= cpu_to_le32(~BIT_12); 6604 6605 /* Disable Full Login after LIP */ 6606 nv->host_p &= cpu_to_le32(~BIT_10); 6607 /* Enable target PRLI control */ 6608 nv->firmware_options_2 |= cpu_to_le32(BIT_14); 6609 } else { 6610 if (ha->tgt.saved_set) { 6611 nv->exchange_count = ha->tgt.saved_exchange_count; 6612 nv->firmware_options_1 = 6613 ha->tgt.saved_firmware_options_1; 6614 nv->firmware_options_2 = 6615 ha->tgt.saved_firmware_options_2; 6616 nv->firmware_options_3 = 6617 ha->tgt.saved_firmware_options_3; 6618 } 6619 return; 6620 } 6621 6622 /* out-of-order frames reassembly */ 6623 nv->firmware_options_3 |= BIT_6|BIT_9; 6624 6625 if (ha->tgt.enable_class_2) { 6626 if (vha->flags.init_done) 6627 fc_host_supported_classes(vha->host) = 6628 FC_COS_CLASS2 | FC_COS_CLASS3; 6629 6630 nv->firmware_options_2 |= cpu_to_le32(BIT_8); 6631 } else { 6632 if (vha->flags.init_done) 6633 fc_host_supported_classes(vha->host) = FC_COS_CLASS3; 6634 6635 nv->firmware_options_2 &= ~cpu_to_le32(BIT_8); 6636 } 6637 } 6638 6639 void 6640 qlt_81xx_config_nvram_stage2(struct scsi_qla_host *vha, 6641 struct init_cb_81xx *icb) 6642 { 6643 struct qla_hw_data *ha = vha->hw; 6644 6645 if (!QLA_TGT_MODE_ENABLED()) 6646 return; 6647 6648 if (ha->tgt.node_name_set) { 6649 memcpy(icb->node_name, ha->tgt.tgt_node_name, WWN_SIZE); 6650 icb->firmware_options_1 |= cpu_to_le32(BIT_14); 6651 } 6652 6653 /* disable ZIO at start time. */ 6654 if (!vha->flags.init_done) { 6655 uint32_t tmp; 6656 tmp = le32_to_cpu(icb->firmware_options_2); 6657 tmp &= ~(BIT_3 | BIT_2 | BIT_1 | BIT_0); 6658 icb->firmware_options_2 = cpu_to_le32(tmp); 6659 } 6660 6661 } 6662 6663 void 6664 qlt_83xx_iospace_config(struct qla_hw_data *ha) 6665 { 6666 if (!QLA_TGT_MODE_ENABLED()) 6667 return; 6668 6669 ha->msix_count += 1; /* For ATIO Q */ 6670 } 6671 6672 int 6673 qlt_24xx_process_response_error(struct scsi_qla_host *vha, 6674 struct sts_entry_24xx *pkt) 6675 { 6676 switch (pkt->entry_type) { 6677 case ABTS_RECV_24XX: 6678 case ABTS_RESP_24XX: 6679 case CTIO_TYPE7: 6680 case NOTIFY_ACK_TYPE: 6681 case CTIO_CRC2: 6682 return 1; 6683 default: 6684 return 0; 6685 } 6686 } 6687 6688 void 6689 qlt_modify_vp_config(struct scsi_qla_host *vha, 6690 struct vp_config_entry_24xx *vpmod) 6691 { 6692 if (qla_tgt_mode_enabled(vha)) 6693 vpmod->options_idx1 &= ~BIT_5; 6694 /* Disable ini mode, if requested */ 6695 if (!qla_ini_mode_enabled(vha)) 6696 vpmod->options_idx1 &= ~BIT_4; 6697 } 6698 6699 void 6700 qlt_probe_one_stage1(struct scsi_qla_host *base_vha, struct qla_hw_data *ha) 6701 { 6702 if (!QLA_TGT_MODE_ENABLED()) 6703 return; 6704 6705 if (ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha)) { 6706 ISP_ATIO_Q_IN(base_vha) = &ha->mqiobase->isp25mq.atio_q_in; 6707 ISP_ATIO_Q_OUT(base_vha) = &ha->mqiobase->isp25mq.atio_q_out; 6708 } else { 6709 ISP_ATIO_Q_IN(base_vha) = &ha->iobase->isp24.atio_q_in; 6710 ISP_ATIO_Q_OUT(base_vha) = &ha->iobase->isp24.atio_q_out; 6711 } 6712 6713 mutex_init(&base_vha->vha_tgt.tgt_mutex); 6714 mutex_init(&base_vha->vha_tgt.tgt_host_action_mutex); 6715 qlt_clear_mode(base_vha); 6716 } 6717 6718 irqreturn_t 6719 qla83xx_msix_atio_q(int irq, void *dev_id) 6720 { 6721 struct rsp_que *rsp; 6722 scsi_qla_host_t *vha; 6723 struct qla_hw_data *ha; 6724 unsigned long flags; 6725 6726 rsp = (struct rsp_que *) dev_id; 6727 ha = rsp->hw; 6728 vha = pci_get_drvdata(ha->pdev); 6729 6730 spin_lock_irqsave(&ha->tgt.atio_lock, flags); 6731 6732 qlt_24xx_process_atio_queue(vha, 0); 6733 6734 spin_unlock_irqrestore(&ha->tgt.atio_lock, flags); 6735 6736 return IRQ_HANDLED; 6737 } 6738 6739 static void 6740 qlt_handle_abts_recv_work(struct work_struct *work) 6741 { 6742 struct qla_tgt_sess_op *op = container_of(work, 6743 struct qla_tgt_sess_op, work); 6744 scsi_qla_host_t *vha = op->vha; 6745 struct qla_hw_data *ha = vha->hw; 6746 unsigned long flags; 6747 6748 if (qla2x00_reset_active(vha) || (op->chip_reset != ha->chip_reset)) 6749 return; 6750 6751 spin_lock_irqsave(&ha->tgt.atio_lock, flags); 6752 qlt_24xx_process_atio_queue(vha, 0); 6753 spin_unlock_irqrestore(&ha->tgt.atio_lock, flags); 6754 6755 spin_lock_irqsave(&ha->hardware_lock, flags); 6756 qlt_response_pkt_all_vps(vha, (response_t *)&op->atio); 6757 spin_unlock_irqrestore(&ha->hardware_lock, flags); 6758 } 6759 6760 void 6761 qlt_handle_abts_recv(struct scsi_qla_host *vha, response_t *pkt) 6762 { 6763 struct qla_tgt_sess_op *op; 6764 6765 op = kzalloc(sizeof(*op), GFP_ATOMIC); 6766 6767 if (!op) { 6768 /* do not reach for ATIO queue here. This is best effort err 6769 * recovery at this point. 6770 */ 6771 qlt_response_pkt_all_vps(vha, pkt); 6772 return; 6773 } 6774 6775 memcpy(&op->atio, pkt, sizeof(*pkt)); 6776 op->vha = vha; 6777 op->chip_reset = vha->hw->chip_reset; 6778 INIT_WORK(&op->work, qlt_handle_abts_recv_work); 6779 queue_work(qla_tgt_wq, &op->work); 6780 return; 6781 } 6782 6783 int 6784 qlt_mem_alloc(struct qla_hw_data *ha) 6785 { 6786 if (!QLA_TGT_MODE_ENABLED()) 6787 return 0; 6788 6789 ha->tgt.tgt_vp_map = kzalloc(sizeof(struct qla_tgt_vp_map) * 6790 MAX_MULTI_ID_FABRIC, GFP_KERNEL); 6791 if (!ha->tgt.tgt_vp_map) 6792 return -ENOMEM; 6793 6794 ha->tgt.atio_ring = dma_alloc_coherent(&ha->pdev->dev, 6795 (ha->tgt.atio_q_length + 1) * sizeof(struct atio_from_isp), 6796 &ha->tgt.atio_dma, GFP_KERNEL); 6797 if (!ha->tgt.atio_ring) { 6798 kfree(ha->tgt.tgt_vp_map); 6799 return -ENOMEM; 6800 } 6801 return 0; 6802 } 6803 6804 void 6805 qlt_mem_free(struct qla_hw_data *ha) 6806 { 6807 if (!QLA_TGT_MODE_ENABLED()) 6808 return; 6809 6810 if (ha->tgt.atio_ring) { 6811 dma_free_coherent(&ha->pdev->dev, (ha->tgt.atio_q_length + 1) * 6812 sizeof(struct atio_from_isp), ha->tgt.atio_ring, 6813 ha->tgt.atio_dma); 6814 } 6815 kfree(ha->tgt.tgt_vp_map); 6816 } 6817 6818 /* vport_slock to be held by the caller */ 6819 void 6820 qlt_update_vp_map(struct scsi_qla_host *vha, int cmd) 6821 { 6822 if (!QLA_TGT_MODE_ENABLED()) 6823 return; 6824 6825 switch (cmd) { 6826 case SET_VP_IDX: 6827 vha->hw->tgt.tgt_vp_map[vha->vp_idx].vha = vha; 6828 break; 6829 case SET_AL_PA: 6830 vha->hw->tgt.tgt_vp_map[vha->d_id.b.al_pa].idx = vha->vp_idx; 6831 break; 6832 case RESET_VP_IDX: 6833 vha->hw->tgt.tgt_vp_map[vha->vp_idx].vha = NULL; 6834 break; 6835 case RESET_AL_PA: 6836 vha->hw->tgt.tgt_vp_map[vha->d_id.b.al_pa].idx = 0; 6837 break; 6838 } 6839 } 6840 6841 static int __init qlt_parse_ini_mode(void) 6842 { 6843 if (strcasecmp(qlini_mode, QLA2XXX_INI_MODE_STR_EXCLUSIVE) == 0) 6844 ql2x_ini_mode = QLA2XXX_INI_MODE_EXCLUSIVE; 6845 else if (strcasecmp(qlini_mode, QLA2XXX_INI_MODE_STR_DISABLED) == 0) 6846 ql2x_ini_mode = QLA2XXX_INI_MODE_DISABLED; 6847 else if (strcasecmp(qlini_mode, QLA2XXX_INI_MODE_STR_ENABLED) == 0) 6848 ql2x_ini_mode = QLA2XXX_INI_MODE_ENABLED; 6849 else 6850 return false; 6851 6852 return true; 6853 } 6854 6855 int __init qlt_init(void) 6856 { 6857 int ret; 6858 6859 if (!qlt_parse_ini_mode()) { 6860 ql_log(ql_log_fatal, NULL, 0xe06b, 6861 "qlt_parse_ini_mode() failed\n"); 6862 return -EINVAL; 6863 } 6864 6865 if (!QLA_TGT_MODE_ENABLED()) 6866 return 0; 6867 6868 qla_tgt_mgmt_cmd_cachep = kmem_cache_create("qla_tgt_mgmt_cmd_cachep", 6869 sizeof(struct qla_tgt_mgmt_cmd), __alignof__(struct 6870 qla_tgt_mgmt_cmd), 0, NULL); 6871 if (!qla_tgt_mgmt_cmd_cachep) { 6872 ql_log(ql_log_fatal, NULL, 0xe06d, 6873 "kmem_cache_create for qla_tgt_mgmt_cmd_cachep failed\n"); 6874 return -ENOMEM; 6875 } 6876 6877 qla_tgt_plogi_cachep = kmem_cache_create("qla_tgt_plogi_cachep", 6878 sizeof(qlt_plogi_ack_t), 6879 __alignof__(qlt_plogi_ack_t), 6880 0, NULL); 6881 6882 if (!qla_tgt_plogi_cachep) { 6883 ql_log(ql_log_fatal, NULL, 0xe06d, 6884 "kmem_cache_create for qla_tgt_plogi_cachep failed\n"); 6885 ret = -ENOMEM; 6886 goto out_mgmt_cmd_cachep; 6887 } 6888 6889 qla_tgt_mgmt_cmd_mempool = mempool_create(25, mempool_alloc_slab, 6890 mempool_free_slab, qla_tgt_mgmt_cmd_cachep); 6891 if (!qla_tgt_mgmt_cmd_mempool) { 6892 ql_log(ql_log_fatal, NULL, 0xe06e, 6893 "mempool_create for qla_tgt_mgmt_cmd_mempool failed\n"); 6894 ret = -ENOMEM; 6895 goto out_plogi_cachep; 6896 } 6897 6898 qla_tgt_wq = alloc_workqueue("qla_tgt_wq", 0, 0); 6899 if (!qla_tgt_wq) { 6900 ql_log(ql_log_fatal, NULL, 0xe06f, 6901 "alloc_workqueue for qla_tgt_wq failed\n"); 6902 ret = -ENOMEM; 6903 goto out_cmd_mempool; 6904 } 6905 /* 6906 * Return 1 to signal that initiator-mode is being disabled 6907 */ 6908 return (ql2x_ini_mode == QLA2XXX_INI_MODE_DISABLED) ? 1 : 0; 6909 6910 out_cmd_mempool: 6911 mempool_destroy(qla_tgt_mgmt_cmd_mempool); 6912 out_plogi_cachep: 6913 kmem_cache_destroy(qla_tgt_plogi_cachep); 6914 out_mgmt_cmd_cachep: 6915 kmem_cache_destroy(qla_tgt_mgmt_cmd_cachep); 6916 return ret; 6917 } 6918 6919 void qlt_exit(void) 6920 { 6921 if (!QLA_TGT_MODE_ENABLED()) 6922 return; 6923 6924 destroy_workqueue(qla_tgt_wq); 6925 mempool_destroy(qla_tgt_mgmt_cmd_mempool); 6926 kmem_cache_destroy(qla_tgt_plogi_cachep); 6927 kmem_cache_destroy(qla_tgt_mgmt_cmd_cachep); 6928 } 6929