1 /* 2 * qla_target.c SCSI LLD infrastructure for QLogic 22xx/23xx/24xx/25xx 3 * 4 * based on qla2x00t.c code: 5 * 6 * Copyright (C) 2004 - 2010 Vladislav Bolkhovitin <vst@vlnb.net> 7 * Copyright (C) 2004 - 2005 Leonid Stoljar 8 * Copyright (C) 2006 Nathaniel Clark <nate@misrule.us> 9 * Copyright (C) 2006 - 2010 ID7 Ltd. 10 * 11 * Forward port and refactoring to modern qla2xxx and target/configfs 12 * 13 * Copyright (C) 2010-2013 Nicholas A. Bellinger <nab@kernel.org> 14 * 15 * This program is free software; you can redistribute it and/or 16 * modify it under the terms of the GNU General Public License 17 * as published by the Free Software Foundation, version 2 18 * of the License. 19 * 20 * This program is distributed in the hope that it will be useful, 21 * but WITHOUT ANY WARRANTY; without even the implied warranty of 22 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 23 * GNU General Public License for more details. 24 */ 25 26 #include <linux/module.h> 27 #include <linux/init.h> 28 #include <linux/types.h> 29 #include <linux/blkdev.h> 30 #include <linux/interrupt.h> 31 #include <linux/pci.h> 32 #include <linux/delay.h> 33 #include <linux/list.h> 34 #include <linux/workqueue.h> 35 #include <asm/unaligned.h> 36 #include <scsi/scsi.h> 37 #include <scsi/scsi_host.h> 38 #include <scsi/scsi_tcq.h> 39 #include <target/target_core_base.h> 40 #include <target/target_core_fabric.h> 41 42 #include "qla_def.h" 43 #include "qla_target.h" 44 45 static char *qlini_mode = QLA2XXX_INI_MODE_STR_ENABLED; 46 module_param(qlini_mode, charp, S_IRUGO); 47 MODULE_PARM_DESC(qlini_mode, 48 "Determines when initiator mode will be enabled. Possible values: " 49 "\"exclusive\" - initiator mode will be enabled on load, " 50 "disabled on enabling target mode and then on disabling target mode " 51 "enabled back; " 52 "\"disabled\" - initiator mode will never be enabled; " 53 "\"enabled\" (default) - initiator mode will always stay enabled."); 54 55 int ql2x_ini_mode = QLA2XXX_INI_MODE_EXCLUSIVE; 56 57 /* 58 * From scsi/fc/fc_fcp.h 59 */ 60 enum fcp_resp_rsp_codes { 61 FCP_TMF_CMPL = 0, 62 FCP_DATA_LEN_INVALID = 1, 63 FCP_CMND_FIELDS_INVALID = 2, 64 FCP_DATA_PARAM_MISMATCH = 3, 65 FCP_TMF_REJECTED = 4, 66 FCP_TMF_FAILED = 5, 67 FCP_TMF_INVALID_LUN = 9, 68 }; 69 70 /* 71 * fc_pri_ta from scsi/fc/fc_fcp.h 72 */ 73 #define FCP_PTA_SIMPLE 0 /* simple task attribute */ 74 #define FCP_PTA_HEADQ 1 /* head of queue task attribute */ 75 #define FCP_PTA_ORDERED 2 /* ordered task attribute */ 76 #define FCP_PTA_ACA 4 /* auto. contingent allegiance */ 77 #define FCP_PTA_MASK 7 /* mask for task attribute field */ 78 #define FCP_PRI_SHIFT 3 /* priority field starts in bit 3 */ 79 #define FCP_PRI_RESVD_MASK 0x80 /* reserved bits in priority field */ 80 81 /* 82 * This driver calls qla2x00_alloc_iocbs() and qla2x00_issue_marker(), which 83 * must be called under HW lock and could unlock/lock it inside. 84 * It isn't an issue, since in the current implementation on the time when 85 * those functions are called: 86 * 87 * - Either context is IRQ and only IRQ handler can modify HW data, 88 * including rings related fields, 89 * 90 * - Or access to target mode variables from struct qla_tgt doesn't 91 * cross those functions boundaries, except tgt_stop, which 92 * additionally protected by irq_cmd_count. 93 */ 94 /* Predefs for callbacks handed to qla2xxx LLD */ 95 static void qlt_24xx_atio_pkt(struct scsi_qla_host *ha, 96 struct atio_from_isp *pkt); 97 static void qlt_response_pkt(struct scsi_qla_host *ha, response_t *pkt); 98 static int qlt_issue_task_mgmt(struct qla_tgt_sess *sess, uint32_t lun, 99 int fn, void *iocb, int flags); 100 static void qlt_send_term_exchange(struct scsi_qla_host *ha, struct qla_tgt_cmd 101 *cmd, struct atio_from_isp *atio, int ha_locked); 102 static void qlt_reject_free_srr_imm(struct scsi_qla_host *ha, 103 struct qla_tgt_srr_imm *imm, int ha_lock); 104 /* 105 * Global Variables 106 */ 107 static struct kmem_cache *qla_tgt_cmd_cachep; 108 static struct kmem_cache *qla_tgt_mgmt_cmd_cachep; 109 static mempool_t *qla_tgt_mgmt_cmd_mempool; 110 static struct workqueue_struct *qla_tgt_wq; 111 static DEFINE_MUTEX(qla_tgt_mutex); 112 static LIST_HEAD(qla_tgt_glist); 113 114 /* ha->hardware_lock supposed to be held on entry (to protect tgt->sess_list) */ 115 static struct qla_tgt_sess *qlt_find_sess_by_port_name( 116 struct qla_tgt *tgt, 117 const uint8_t *port_name) 118 { 119 struct qla_tgt_sess *sess; 120 121 list_for_each_entry(sess, &tgt->sess_list, sess_list_entry) { 122 if (!memcmp(sess->port_name, port_name, WWN_SIZE)) 123 return sess; 124 } 125 126 return NULL; 127 } 128 129 /* Might release hw lock, then reaquire!! */ 130 static inline int qlt_issue_marker(struct scsi_qla_host *vha, int vha_locked) 131 { 132 /* Send marker if required */ 133 if (unlikely(vha->marker_needed != 0)) { 134 int rc = qla2x00_issue_marker(vha, vha_locked); 135 if (rc != QLA_SUCCESS) { 136 ql_dbg(ql_dbg_tgt, vha, 0xe03d, 137 "qla_target(%d): issue_marker() failed\n", 138 vha->vp_idx); 139 } 140 return rc; 141 } 142 return QLA_SUCCESS; 143 } 144 145 static inline 146 struct scsi_qla_host *qlt_find_host_by_d_id(struct scsi_qla_host *vha, 147 uint8_t *d_id) 148 { 149 struct qla_hw_data *ha = vha->hw; 150 uint8_t vp_idx; 151 152 if ((vha->d_id.b.area != d_id[1]) || (vha->d_id.b.domain != d_id[0])) 153 return NULL; 154 155 if (vha->d_id.b.al_pa == d_id[2]) 156 return vha; 157 158 BUG_ON(ha->tgt.tgt_vp_map == NULL); 159 vp_idx = ha->tgt.tgt_vp_map[d_id[2]].idx; 160 if (likely(test_bit(vp_idx, ha->vp_idx_map))) 161 return ha->tgt.tgt_vp_map[vp_idx].vha; 162 163 return NULL; 164 } 165 166 static inline 167 struct scsi_qla_host *qlt_find_host_by_vp_idx(struct scsi_qla_host *vha, 168 uint16_t vp_idx) 169 { 170 struct qla_hw_data *ha = vha->hw; 171 172 if (vha->vp_idx == vp_idx) 173 return vha; 174 175 BUG_ON(ha->tgt.tgt_vp_map == NULL); 176 if (likely(test_bit(vp_idx, ha->vp_idx_map))) 177 return ha->tgt.tgt_vp_map[vp_idx].vha; 178 179 return NULL; 180 } 181 182 void qlt_24xx_atio_pkt_all_vps(struct scsi_qla_host *vha, 183 struct atio_from_isp *atio) 184 { 185 switch (atio->u.raw.entry_type) { 186 case ATIO_TYPE7: 187 { 188 struct scsi_qla_host *host = qlt_find_host_by_d_id(vha, 189 atio->u.isp24.fcp_hdr.d_id); 190 if (unlikely(NULL == host)) { 191 ql_dbg(ql_dbg_tgt, vha, 0xe03e, 192 "qla_target(%d): Received ATIO_TYPE7 " 193 "with unknown d_id %x:%x:%x\n", vha->vp_idx, 194 atio->u.isp24.fcp_hdr.d_id[0], 195 atio->u.isp24.fcp_hdr.d_id[1], 196 atio->u.isp24.fcp_hdr.d_id[2]); 197 break; 198 } 199 qlt_24xx_atio_pkt(host, atio); 200 break; 201 } 202 203 case IMMED_NOTIFY_TYPE: 204 { 205 struct scsi_qla_host *host = vha; 206 struct imm_ntfy_from_isp *entry = 207 (struct imm_ntfy_from_isp *)atio; 208 209 if ((entry->u.isp24.vp_index != 0xFF) && 210 (entry->u.isp24.nport_handle != 0xFFFF)) { 211 host = qlt_find_host_by_vp_idx(vha, 212 entry->u.isp24.vp_index); 213 if (unlikely(!host)) { 214 ql_dbg(ql_dbg_tgt, vha, 0xe03f, 215 "qla_target(%d): Received " 216 "ATIO (IMMED_NOTIFY_TYPE) " 217 "with unknown vp_index %d\n", 218 vha->vp_idx, entry->u.isp24.vp_index); 219 break; 220 } 221 } 222 qlt_24xx_atio_pkt(host, atio); 223 break; 224 } 225 226 default: 227 ql_dbg(ql_dbg_tgt, vha, 0xe040, 228 "qla_target(%d): Received unknown ATIO atio " 229 "type %x\n", vha->vp_idx, atio->u.raw.entry_type); 230 break; 231 } 232 233 return; 234 } 235 236 void qlt_response_pkt_all_vps(struct scsi_qla_host *vha, response_t *pkt) 237 { 238 switch (pkt->entry_type) { 239 case CTIO_TYPE7: 240 { 241 struct ctio7_from_24xx *entry = (struct ctio7_from_24xx *)pkt; 242 struct scsi_qla_host *host = qlt_find_host_by_vp_idx(vha, 243 entry->vp_index); 244 if (unlikely(!host)) { 245 ql_dbg(ql_dbg_tgt, vha, 0xe041, 246 "qla_target(%d): Response pkt (CTIO_TYPE7) " 247 "received, with unknown vp_index %d\n", 248 vha->vp_idx, entry->vp_index); 249 break; 250 } 251 qlt_response_pkt(host, pkt); 252 break; 253 } 254 255 case IMMED_NOTIFY_TYPE: 256 { 257 struct scsi_qla_host *host = vha; 258 struct imm_ntfy_from_isp *entry = 259 (struct imm_ntfy_from_isp *)pkt; 260 261 host = qlt_find_host_by_vp_idx(vha, entry->u.isp24.vp_index); 262 if (unlikely(!host)) { 263 ql_dbg(ql_dbg_tgt, vha, 0xe042, 264 "qla_target(%d): Response pkt (IMMED_NOTIFY_TYPE) " 265 "received, with unknown vp_index %d\n", 266 vha->vp_idx, entry->u.isp24.vp_index); 267 break; 268 } 269 qlt_response_pkt(host, pkt); 270 break; 271 } 272 273 case NOTIFY_ACK_TYPE: 274 { 275 struct scsi_qla_host *host = vha; 276 struct nack_to_isp *entry = (struct nack_to_isp *)pkt; 277 278 if (0xFF != entry->u.isp24.vp_index) { 279 host = qlt_find_host_by_vp_idx(vha, 280 entry->u.isp24.vp_index); 281 if (unlikely(!host)) { 282 ql_dbg(ql_dbg_tgt, vha, 0xe043, 283 "qla_target(%d): Response " 284 "pkt (NOTIFY_ACK_TYPE) " 285 "received, with unknown " 286 "vp_index %d\n", vha->vp_idx, 287 entry->u.isp24.vp_index); 288 break; 289 } 290 } 291 qlt_response_pkt(host, pkt); 292 break; 293 } 294 295 case ABTS_RECV_24XX: 296 { 297 struct abts_recv_from_24xx *entry = 298 (struct abts_recv_from_24xx *)pkt; 299 struct scsi_qla_host *host = qlt_find_host_by_vp_idx(vha, 300 entry->vp_index); 301 if (unlikely(!host)) { 302 ql_dbg(ql_dbg_tgt, vha, 0xe044, 303 "qla_target(%d): Response pkt " 304 "(ABTS_RECV_24XX) received, with unknown " 305 "vp_index %d\n", vha->vp_idx, entry->vp_index); 306 break; 307 } 308 qlt_response_pkt(host, pkt); 309 break; 310 } 311 312 case ABTS_RESP_24XX: 313 { 314 struct abts_resp_to_24xx *entry = 315 (struct abts_resp_to_24xx *)pkt; 316 struct scsi_qla_host *host = qlt_find_host_by_vp_idx(vha, 317 entry->vp_index); 318 if (unlikely(!host)) { 319 ql_dbg(ql_dbg_tgt, vha, 0xe045, 320 "qla_target(%d): Response pkt " 321 "(ABTS_RECV_24XX) received, with unknown " 322 "vp_index %d\n", vha->vp_idx, entry->vp_index); 323 break; 324 } 325 qlt_response_pkt(host, pkt); 326 break; 327 } 328 329 default: 330 qlt_response_pkt(vha, pkt); 331 break; 332 } 333 334 } 335 336 static void qlt_free_session_done(struct work_struct *work) 337 { 338 struct qla_tgt_sess *sess = container_of(work, struct qla_tgt_sess, 339 free_work); 340 struct qla_tgt *tgt = sess->tgt; 341 struct scsi_qla_host *vha = sess->vha; 342 struct qla_hw_data *ha = vha->hw; 343 344 BUG_ON(!tgt); 345 /* 346 * Release the target session for FC Nexus from fabric module code. 347 */ 348 if (sess->se_sess != NULL) 349 ha->tgt.tgt_ops->free_session(sess); 350 351 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf001, 352 "Unregistration of sess %p finished\n", sess); 353 354 kfree(sess); 355 /* 356 * We need to protect against race, when tgt is freed before or 357 * inside wake_up() 358 */ 359 tgt->sess_count--; 360 if (tgt->sess_count == 0) 361 wake_up_all(&tgt->waitQ); 362 } 363 364 /* ha->hardware_lock supposed to be held on entry */ 365 void qlt_unreg_sess(struct qla_tgt_sess *sess) 366 { 367 struct scsi_qla_host *vha = sess->vha; 368 369 vha->hw->tgt.tgt_ops->clear_nacl_from_fcport_map(sess); 370 371 list_del(&sess->sess_list_entry); 372 if (sess->deleted) 373 list_del(&sess->del_list_entry); 374 375 INIT_WORK(&sess->free_work, qlt_free_session_done); 376 schedule_work(&sess->free_work); 377 } 378 EXPORT_SYMBOL(qlt_unreg_sess); 379 380 /* ha->hardware_lock supposed to be held on entry */ 381 static int qlt_reset(struct scsi_qla_host *vha, void *iocb, int mcmd) 382 { 383 struct qla_hw_data *ha = vha->hw; 384 struct qla_tgt_sess *sess = NULL; 385 uint32_t unpacked_lun, lun = 0; 386 uint16_t loop_id; 387 int res = 0; 388 struct imm_ntfy_from_isp *n = (struct imm_ntfy_from_isp *)iocb; 389 struct atio_from_isp *a = (struct atio_from_isp *)iocb; 390 391 loop_id = le16_to_cpu(n->u.isp24.nport_handle); 392 if (loop_id == 0xFFFF) { 393 #if 0 /* FIXME: Re-enable Global event handling.. */ 394 /* Global event */ 395 atomic_inc(&ha->tgt.qla_tgt->tgt_global_resets_count); 396 qlt_clear_tgt_db(ha->tgt.qla_tgt, 1); 397 if (!list_empty(&ha->tgt.qla_tgt->sess_list)) { 398 sess = list_entry(ha->tgt.qla_tgt->sess_list.next, 399 typeof(*sess), sess_list_entry); 400 switch (mcmd) { 401 case QLA_TGT_NEXUS_LOSS_SESS: 402 mcmd = QLA_TGT_NEXUS_LOSS; 403 break; 404 case QLA_TGT_ABORT_ALL_SESS: 405 mcmd = QLA_TGT_ABORT_ALL; 406 break; 407 case QLA_TGT_NEXUS_LOSS: 408 case QLA_TGT_ABORT_ALL: 409 break; 410 default: 411 ql_dbg(ql_dbg_tgt, vha, 0xe046, 412 "qla_target(%d): Not allowed " 413 "command %x in %s", vha->vp_idx, 414 mcmd, __func__); 415 sess = NULL; 416 break; 417 } 418 } else 419 sess = NULL; 420 #endif 421 } else { 422 sess = ha->tgt.tgt_ops->find_sess_by_loop_id(vha, loop_id); 423 } 424 425 ql_dbg(ql_dbg_tgt, vha, 0xe000, 426 "Using sess for qla_tgt_reset: %p\n", sess); 427 if (!sess) { 428 res = -ESRCH; 429 return res; 430 } 431 432 ql_dbg(ql_dbg_tgt, vha, 0xe047, 433 "scsi(%ld): resetting (session %p from port %8phC mcmd %x, " 434 "loop_id %d)\n", vha->host_no, sess, sess->port_name, 435 mcmd, loop_id); 436 437 lun = a->u.isp24.fcp_cmnd.lun; 438 unpacked_lun = scsilun_to_int((struct scsi_lun *)&lun); 439 440 return qlt_issue_task_mgmt(sess, unpacked_lun, mcmd, 441 iocb, QLA24XX_MGMT_SEND_NACK); 442 } 443 444 /* ha->hardware_lock supposed to be held on entry */ 445 static void qlt_schedule_sess_for_deletion(struct qla_tgt_sess *sess, 446 bool immediate) 447 { 448 struct qla_tgt *tgt = sess->tgt; 449 uint32_t dev_loss_tmo = tgt->ha->port_down_retry_count + 5; 450 451 if (sess->deleted) 452 return; 453 454 ql_dbg(ql_dbg_tgt, sess->vha, 0xe001, 455 "Scheduling sess %p for deletion\n", sess); 456 list_add_tail(&sess->del_list_entry, &tgt->del_sess_list); 457 sess->deleted = 1; 458 459 if (immediate) 460 dev_loss_tmo = 0; 461 462 sess->expires = jiffies + dev_loss_tmo * HZ; 463 464 ql_dbg(ql_dbg_tgt, sess->vha, 0xe048, 465 "qla_target(%d): session for port %8phC (loop ID %d) scheduled for " 466 "deletion in %u secs (expires: %lu) immed: %d\n", 467 sess->vha->vp_idx, sess->port_name, sess->loop_id, dev_loss_tmo, 468 sess->expires, immediate); 469 470 if (immediate) 471 schedule_delayed_work(&tgt->sess_del_work, 0); 472 else 473 schedule_delayed_work(&tgt->sess_del_work, 474 sess->expires - jiffies); 475 } 476 477 /* ha->hardware_lock supposed to be held on entry */ 478 static void qlt_clear_tgt_db(struct qla_tgt *tgt, bool local_only) 479 { 480 struct qla_tgt_sess *sess; 481 482 list_for_each_entry(sess, &tgt->sess_list, sess_list_entry) 483 qlt_schedule_sess_for_deletion(sess, true); 484 485 /* At this point tgt could be already dead */ 486 } 487 488 static int qla24xx_get_loop_id(struct scsi_qla_host *vha, const uint8_t *s_id, 489 uint16_t *loop_id) 490 { 491 struct qla_hw_data *ha = vha->hw; 492 dma_addr_t gid_list_dma; 493 struct gid_list_info *gid_list; 494 char *id_iter; 495 int res, rc, i; 496 uint16_t entries; 497 498 gid_list = dma_alloc_coherent(&ha->pdev->dev, qla2x00_gid_list_size(ha), 499 &gid_list_dma, GFP_KERNEL); 500 if (!gid_list) { 501 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf044, 502 "qla_target(%d): DMA Alloc failed of %u\n", 503 vha->vp_idx, qla2x00_gid_list_size(ha)); 504 return -ENOMEM; 505 } 506 507 /* Get list of logged in devices */ 508 rc = qla2x00_get_id_list(vha, gid_list, gid_list_dma, &entries); 509 if (rc != QLA_SUCCESS) { 510 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf045, 511 "qla_target(%d): get_id_list() failed: %x\n", 512 vha->vp_idx, rc); 513 res = -1; 514 goto out_free_id_list; 515 } 516 517 id_iter = (char *)gid_list; 518 res = -1; 519 for (i = 0; i < entries; i++) { 520 struct gid_list_info *gid = (struct gid_list_info *)id_iter; 521 if ((gid->al_pa == s_id[2]) && 522 (gid->area == s_id[1]) && 523 (gid->domain == s_id[0])) { 524 *loop_id = le16_to_cpu(gid->loop_id); 525 res = 0; 526 break; 527 } 528 id_iter += ha->gid_list_info_size; 529 } 530 531 out_free_id_list: 532 dma_free_coherent(&ha->pdev->dev, qla2x00_gid_list_size(ha), 533 gid_list, gid_list_dma); 534 return res; 535 } 536 537 /* ha->hardware_lock supposed to be held on entry */ 538 static void qlt_undelete_sess(struct qla_tgt_sess *sess) 539 { 540 BUG_ON(!sess->deleted); 541 542 list_del(&sess->del_list_entry); 543 sess->deleted = 0; 544 } 545 546 static void qlt_del_sess_work_fn(struct delayed_work *work) 547 { 548 struct qla_tgt *tgt = container_of(work, struct qla_tgt, 549 sess_del_work); 550 struct scsi_qla_host *vha = tgt->vha; 551 struct qla_hw_data *ha = vha->hw; 552 struct qla_tgt_sess *sess; 553 unsigned long flags, elapsed; 554 555 spin_lock_irqsave(&ha->hardware_lock, flags); 556 while (!list_empty(&tgt->del_sess_list)) { 557 sess = list_entry(tgt->del_sess_list.next, typeof(*sess), 558 del_list_entry); 559 elapsed = jiffies; 560 if (time_after_eq(elapsed, sess->expires)) { 561 qlt_undelete_sess(sess); 562 563 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf004, 564 "Timeout: sess %p about to be deleted\n", 565 sess); 566 ha->tgt.tgt_ops->shutdown_sess(sess); 567 ha->tgt.tgt_ops->put_sess(sess); 568 } else { 569 schedule_delayed_work(&tgt->sess_del_work, 570 sess->expires - elapsed); 571 break; 572 } 573 } 574 spin_unlock_irqrestore(&ha->hardware_lock, flags); 575 } 576 577 /* 578 * Adds an extra ref to allow to drop hw lock after adding sess to the list. 579 * Caller must put it. 580 */ 581 static struct qla_tgt_sess *qlt_create_sess( 582 struct scsi_qla_host *vha, 583 fc_port_t *fcport, 584 bool local) 585 { 586 struct qla_hw_data *ha = vha->hw; 587 struct qla_tgt_sess *sess; 588 unsigned long flags; 589 unsigned char be_sid[3]; 590 591 /* Check to avoid double sessions */ 592 spin_lock_irqsave(&ha->hardware_lock, flags); 593 list_for_each_entry(sess, &vha->vha_tgt.qla_tgt->sess_list, 594 sess_list_entry) { 595 if (!memcmp(sess->port_name, fcport->port_name, WWN_SIZE)) { 596 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf005, 597 "Double sess %p found (s_id %x:%x:%x, " 598 "loop_id %d), updating to d_id %x:%x:%x, " 599 "loop_id %d", sess, sess->s_id.b.domain, 600 sess->s_id.b.al_pa, sess->s_id.b.area, 601 sess->loop_id, fcport->d_id.b.domain, 602 fcport->d_id.b.al_pa, fcport->d_id.b.area, 603 fcport->loop_id); 604 605 if (sess->deleted) 606 qlt_undelete_sess(sess); 607 608 kref_get(&sess->se_sess->sess_kref); 609 ha->tgt.tgt_ops->update_sess(sess, fcport->d_id, fcport->loop_id, 610 (fcport->flags & FCF_CONF_COMP_SUPPORTED)); 611 612 if (sess->local && !local) 613 sess->local = 0; 614 spin_unlock_irqrestore(&ha->hardware_lock, flags); 615 616 return sess; 617 } 618 } 619 spin_unlock_irqrestore(&ha->hardware_lock, flags); 620 621 sess = kzalloc(sizeof(*sess), GFP_KERNEL); 622 if (!sess) { 623 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04a, 624 "qla_target(%u): session allocation failed, all commands " 625 "from port %8phC will be refused", vha->vp_idx, 626 fcport->port_name); 627 628 return NULL; 629 } 630 sess->tgt = vha->vha_tgt.qla_tgt; 631 sess->vha = vha; 632 sess->s_id = fcport->d_id; 633 sess->loop_id = fcport->loop_id; 634 sess->local = local; 635 636 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf006, 637 "Adding sess %p to tgt %p via ->check_initiator_node_acl()\n", 638 sess, vha->vha_tgt.qla_tgt); 639 640 be_sid[0] = sess->s_id.b.domain; 641 be_sid[1] = sess->s_id.b.area; 642 be_sid[2] = sess->s_id.b.al_pa; 643 /* 644 * Determine if this fc_port->port_name is allowed to access 645 * target mode using explict NodeACLs+MappedLUNs, or using 646 * TPG demo mode. If this is successful a target mode FC nexus 647 * is created. 648 */ 649 if (ha->tgt.tgt_ops->check_initiator_node_acl(vha, 650 &fcport->port_name[0], sess, &be_sid[0], fcport->loop_id) < 0) { 651 kfree(sess); 652 return NULL; 653 } 654 /* 655 * Take an extra reference to ->sess_kref here to handle qla_tgt_sess 656 * access across ->hardware_lock reaquire. 657 */ 658 kref_get(&sess->se_sess->sess_kref); 659 660 sess->conf_compl_supported = (fcport->flags & FCF_CONF_COMP_SUPPORTED); 661 BUILD_BUG_ON(sizeof(sess->port_name) != sizeof(fcport->port_name)); 662 memcpy(sess->port_name, fcport->port_name, sizeof(sess->port_name)); 663 664 spin_lock_irqsave(&ha->hardware_lock, flags); 665 list_add_tail(&sess->sess_list_entry, &vha->vha_tgt.qla_tgt->sess_list); 666 vha->vha_tgt.qla_tgt->sess_count++; 667 spin_unlock_irqrestore(&ha->hardware_lock, flags); 668 669 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04b, 670 "qla_target(%d): %ssession for wwn %8phC (loop_id %d, " 671 "s_id %x:%x:%x, confirmed completion %ssupported) added\n", 672 vha->vp_idx, local ? "local " : "", fcport->port_name, 673 fcport->loop_id, sess->s_id.b.domain, sess->s_id.b.area, 674 sess->s_id.b.al_pa, sess->conf_compl_supported ? "" : "not "); 675 676 return sess; 677 } 678 679 /* 680 * Called from drivers/scsi/qla2xxx/qla_init.c:qla2x00_reg_remote_port() 681 */ 682 void qlt_fc_port_added(struct scsi_qla_host *vha, fc_port_t *fcport) 683 { 684 struct qla_hw_data *ha = vha->hw; 685 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; 686 struct qla_tgt_sess *sess; 687 unsigned long flags; 688 689 if (!vha->hw->tgt.tgt_ops) 690 return; 691 692 if (!tgt || (fcport->port_type != FCT_INITIATOR)) 693 return; 694 695 if (qla_ini_mode_enabled(vha)) 696 return; 697 698 spin_lock_irqsave(&ha->hardware_lock, flags); 699 if (tgt->tgt_stop) { 700 spin_unlock_irqrestore(&ha->hardware_lock, flags); 701 return; 702 } 703 sess = qlt_find_sess_by_port_name(tgt, fcport->port_name); 704 if (!sess) { 705 spin_unlock_irqrestore(&ha->hardware_lock, flags); 706 707 mutex_lock(&vha->vha_tgt.tgt_mutex); 708 sess = qlt_create_sess(vha, fcport, false); 709 mutex_unlock(&vha->vha_tgt.tgt_mutex); 710 711 spin_lock_irqsave(&ha->hardware_lock, flags); 712 } else { 713 kref_get(&sess->se_sess->sess_kref); 714 715 if (sess->deleted) { 716 qlt_undelete_sess(sess); 717 718 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04c, 719 "qla_target(%u): %ssession for port %8phC " 720 "(loop ID %d) reappeared\n", vha->vp_idx, 721 sess->local ? "local " : "", sess->port_name, 722 sess->loop_id); 723 724 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf007, 725 "Reappeared sess %p\n", sess); 726 } 727 ha->tgt.tgt_ops->update_sess(sess, fcport->d_id, fcport->loop_id, 728 (fcport->flags & FCF_CONF_COMP_SUPPORTED)); 729 } 730 731 if (sess && sess->local) { 732 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04d, 733 "qla_target(%u): local session for " 734 "port %8phC (loop ID %d) became global\n", vha->vp_idx, 735 fcport->port_name, sess->loop_id); 736 sess->local = 0; 737 } 738 ha->tgt.tgt_ops->put_sess(sess); 739 spin_unlock_irqrestore(&ha->hardware_lock, flags); 740 } 741 742 void qlt_fc_port_deleted(struct scsi_qla_host *vha, fc_port_t *fcport) 743 { 744 struct qla_hw_data *ha = vha->hw; 745 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; 746 struct qla_tgt_sess *sess; 747 unsigned long flags; 748 749 if (!vha->hw->tgt.tgt_ops) 750 return; 751 752 if (!tgt || (fcport->port_type != FCT_INITIATOR)) 753 return; 754 755 spin_lock_irqsave(&ha->hardware_lock, flags); 756 if (tgt->tgt_stop) { 757 spin_unlock_irqrestore(&ha->hardware_lock, flags); 758 return; 759 } 760 sess = qlt_find_sess_by_port_name(tgt, fcport->port_name); 761 if (!sess) { 762 spin_unlock_irqrestore(&ha->hardware_lock, flags); 763 return; 764 } 765 766 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf008, "qla_tgt_fc_port_deleted %p", sess); 767 768 sess->local = 1; 769 qlt_schedule_sess_for_deletion(sess, false); 770 spin_unlock_irqrestore(&ha->hardware_lock, flags); 771 } 772 773 static inline int test_tgt_sess_count(struct qla_tgt *tgt) 774 { 775 struct qla_hw_data *ha = tgt->ha; 776 unsigned long flags; 777 int res; 778 /* 779 * We need to protect against race, when tgt is freed before or 780 * inside wake_up() 781 */ 782 spin_lock_irqsave(&ha->hardware_lock, flags); 783 ql_dbg(ql_dbg_tgt, tgt->vha, 0xe002, 784 "tgt %p, empty(sess_list)=%d sess_count=%d\n", 785 tgt, list_empty(&tgt->sess_list), tgt->sess_count); 786 res = (tgt->sess_count == 0); 787 spin_unlock_irqrestore(&ha->hardware_lock, flags); 788 789 return res; 790 } 791 792 /* Called by tcm_qla2xxx configfs code */ 793 int qlt_stop_phase1(struct qla_tgt *tgt) 794 { 795 struct scsi_qla_host *vha = tgt->vha; 796 struct qla_hw_data *ha = tgt->ha; 797 unsigned long flags; 798 799 mutex_lock(&qla_tgt_mutex); 800 if (!vha->fc_vport) { 801 struct Scsi_Host *sh = vha->host; 802 struct fc_host_attrs *fc_host = shost_to_fc_host(sh); 803 bool npiv_vports; 804 805 spin_lock_irqsave(sh->host_lock, flags); 806 npiv_vports = (fc_host->npiv_vports_inuse); 807 spin_unlock_irqrestore(sh->host_lock, flags); 808 809 if (npiv_vports) { 810 mutex_unlock(&qla_tgt_mutex); 811 return -EPERM; 812 } 813 } 814 if (tgt->tgt_stop || tgt->tgt_stopped) { 815 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04e, 816 "Already in tgt->tgt_stop or tgt_stopped state\n"); 817 mutex_unlock(&qla_tgt_mutex); 818 return -EPERM; 819 } 820 821 ql_dbg(ql_dbg_tgt, vha, 0xe003, "Stopping target for host %ld(%p)\n", 822 vha->host_no, vha); 823 /* 824 * Mutex needed to sync with qla_tgt_fc_port_[added,deleted]. 825 * Lock is needed, because we still can get an incoming packet. 826 */ 827 mutex_lock(&vha->vha_tgt.tgt_mutex); 828 spin_lock_irqsave(&ha->hardware_lock, flags); 829 tgt->tgt_stop = 1; 830 qlt_clear_tgt_db(tgt, true); 831 spin_unlock_irqrestore(&ha->hardware_lock, flags); 832 mutex_unlock(&vha->vha_tgt.tgt_mutex); 833 mutex_unlock(&qla_tgt_mutex); 834 835 flush_delayed_work(&tgt->sess_del_work); 836 837 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf009, 838 "Waiting for sess works (tgt %p)", tgt); 839 spin_lock_irqsave(&tgt->sess_work_lock, flags); 840 while (!list_empty(&tgt->sess_works_list)) { 841 spin_unlock_irqrestore(&tgt->sess_work_lock, flags); 842 flush_scheduled_work(); 843 spin_lock_irqsave(&tgt->sess_work_lock, flags); 844 } 845 spin_unlock_irqrestore(&tgt->sess_work_lock, flags); 846 847 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00a, 848 "Waiting for tgt %p: list_empty(sess_list)=%d " 849 "sess_count=%d\n", tgt, list_empty(&tgt->sess_list), 850 tgt->sess_count); 851 852 wait_event(tgt->waitQ, test_tgt_sess_count(tgt)); 853 854 /* Big hammer */ 855 if (!ha->flags.host_shutting_down && qla_tgt_mode_enabled(vha)) 856 qlt_disable_vha(vha); 857 858 /* Wait for sessions to clear out (just in case) */ 859 wait_event(tgt->waitQ, test_tgt_sess_count(tgt)); 860 return 0; 861 } 862 EXPORT_SYMBOL(qlt_stop_phase1); 863 864 /* Called by tcm_qla2xxx configfs code */ 865 void qlt_stop_phase2(struct qla_tgt *tgt) 866 { 867 struct qla_hw_data *ha = tgt->ha; 868 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); 869 unsigned long flags; 870 871 if (tgt->tgt_stopped) { 872 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04f, 873 "Already in tgt->tgt_stopped state\n"); 874 dump_stack(); 875 return; 876 } 877 878 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00b, 879 "Waiting for %d IRQ commands to complete (tgt %p)", 880 tgt->irq_cmd_count, tgt); 881 882 mutex_lock(&vha->vha_tgt.tgt_mutex); 883 spin_lock_irqsave(&ha->hardware_lock, flags); 884 while (tgt->irq_cmd_count != 0) { 885 spin_unlock_irqrestore(&ha->hardware_lock, flags); 886 udelay(2); 887 spin_lock_irqsave(&ha->hardware_lock, flags); 888 } 889 tgt->tgt_stop = 0; 890 tgt->tgt_stopped = 1; 891 spin_unlock_irqrestore(&ha->hardware_lock, flags); 892 mutex_unlock(&vha->vha_tgt.tgt_mutex); 893 894 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00c, "Stop of tgt %p finished", 895 tgt); 896 } 897 EXPORT_SYMBOL(qlt_stop_phase2); 898 899 /* Called from qlt_remove_target() -> qla2x00_remove_one() */ 900 static void qlt_release(struct qla_tgt *tgt) 901 { 902 scsi_qla_host_t *vha = tgt->vha; 903 904 if ((vha->vha_tgt.qla_tgt != NULL) && !tgt->tgt_stopped) 905 qlt_stop_phase2(tgt); 906 907 vha->vha_tgt.qla_tgt = NULL; 908 909 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00d, 910 "Release of tgt %p finished\n", tgt); 911 912 kfree(tgt); 913 } 914 915 /* ha->hardware_lock supposed to be held on entry */ 916 static int qlt_sched_sess_work(struct qla_tgt *tgt, int type, 917 const void *param, unsigned int param_size) 918 { 919 struct qla_tgt_sess_work_param *prm; 920 unsigned long flags; 921 922 prm = kzalloc(sizeof(*prm), GFP_ATOMIC); 923 if (!prm) { 924 ql_dbg(ql_dbg_tgt_mgt, tgt->vha, 0xf050, 925 "qla_target(%d): Unable to create session " 926 "work, command will be refused", 0); 927 return -ENOMEM; 928 } 929 930 ql_dbg(ql_dbg_tgt_mgt, tgt->vha, 0xf00e, 931 "Scheduling work (type %d, prm %p)" 932 " to find session for param %p (size %d, tgt %p)\n", 933 type, prm, param, param_size, tgt); 934 935 prm->type = type; 936 memcpy(&prm->tm_iocb, param, param_size); 937 938 spin_lock_irqsave(&tgt->sess_work_lock, flags); 939 list_add_tail(&prm->sess_works_list_entry, &tgt->sess_works_list); 940 spin_unlock_irqrestore(&tgt->sess_work_lock, flags); 941 942 schedule_work(&tgt->sess_work); 943 944 return 0; 945 } 946 947 /* 948 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire 949 */ 950 static void qlt_send_notify_ack(struct scsi_qla_host *vha, 951 struct imm_ntfy_from_isp *ntfy, 952 uint32_t add_flags, uint16_t resp_code, int resp_code_valid, 953 uint16_t srr_flags, uint16_t srr_reject_code, uint8_t srr_explan) 954 { 955 struct qla_hw_data *ha = vha->hw; 956 request_t *pkt; 957 struct nack_to_isp *nack; 958 959 ql_dbg(ql_dbg_tgt, vha, 0xe004, "Sending NOTIFY_ACK (ha=%p)\n", ha); 960 961 /* Send marker if required */ 962 if (qlt_issue_marker(vha, 1) != QLA_SUCCESS) 963 return; 964 965 pkt = (request_t *)qla2x00_alloc_iocbs(vha, NULL); 966 if (!pkt) { 967 ql_dbg(ql_dbg_tgt, vha, 0xe049, 968 "qla_target(%d): %s failed: unable to allocate " 969 "request packet\n", vha->vp_idx, __func__); 970 return; 971 } 972 973 if (vha->vha_tgt.qla_tgt != NULL) 974 vha->vha_tgt.qla_tgt->notify_ack_expected++; 975 976 pkt->entry_type = NOTIFY_ACK_TYPE; 977 pkt->entry_count = 1; 978 979 nack = (struct nack_to_isp *)pkt; 980 nack->ox_id = ntfy->ox_id; 981 982 nack->u.isp24.nport_handle = ntfy->u.isp24.nport_handle; 983 if (le16_to_cpu(ntfy->u.isp24.status) == IMM_NTFY_ELS) { 984 nack->u.isp24.flags = ntfy->u.isp24.flags & 985 __constant_cpu_to_le32(NOTIFY24XX_FLAGS_PUREX_IOCB); 986 } 987 nack->u.isp24.srr_rx_id = ntfy->u.isp24.srr_rx_id; 988 nack->u.isp24.status = ntfy->u.isp24.status; 989 nack->u.isp24.status_subcode = ntfy->u.isp24.status_subcode; 990 nack->u.isp24.fw_handle = ntfy->u.isp24.fw_handle; 991 nack->u.isp24.exchange_address = ntfy->u.isp24.exchange_address; 992 nack->u.isp24.srr_rel_offs = ntfy->u.isp24.srr_rel_offs; 993 nack->u.isp24.srr_ui = ntfy->u.isp24.srr_ui; 994 nack->u.isp24.srr_flags = cpu_to_le16(srr_flags); 995 nack->u.isp24.srr_reject_code = srr_reject_code; 996 nack->u.isp24.srr_reject_code_expl = srr_explan; 997 nack->u.isp24.vp_index = ntfy->u.isp24.vp_index; 998 999 ql_dbg(ql_dbg_tgt, vha, 0xe005, 1000 "qla_target(%d): Sending 24xx Notify Ack %d\n", 1001 vha->vp_idx, nack->u.isp24.status); 1002 1003 qla2x00_start_iocbs(vha, vha->req); 1004 } 1005 1006 /* 1007 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire 1008 */ 1009 static void qlt_24xx_send_abts_resp(struct scsi_qla_host *vha, 1010 struct abts_recv_from_24xx *abts, uint32_t status, 1011 bool ids_reversed) 1012 { 1013 struct qla_hw_data *ha = vha->hw; 1014 struct abts_resp_to_24xx *resp; 1015 uint32_t f_ctl; 1016 uint8_t *p; 1017 1018 ql_dbg(ql_dbg_tgt, vha, 0xe006, 1019 "Sending task mgmt ABTS response (ha=%p, atio=%p, status=%x\n", 1020 ha, abts, status); 1021 1022 /* Send marker if required */ 1023 if (qlt_issue_marker(vha, 1) != QLA_SUCCESS) 1024 return; 1025 1026 resp = (struct abts_resp_to_24xx *)qla2x00_alloc_iocbs(vha, NULL); 1027 if (!resp) { 1028 ql_dbg(ql_dbg_tgt, vha, 0xe04a, 1029 "qla_target(%d): %s failed: unable to allocate " 1030 "request packet", vha->vp_idx, __func__); 1031 return; 1032 } 1033 1034 resp->entry_type = ABTS_RESP_24XX; 1035 resp->entry_count = 1; 1036 resp->nport_handle = abts->nport_handle; 1037 resp->vp_index = vha->vp_idx; 1038 resp->sof_type = abts->sof_type; 1039 resp->exchange_address = abts->exchange_address; 1040 resp->fcp_hdr_le = abts->fcp_hdr_le; 1041 f_ctl = __constant_cpu_to_le32(F_CTL_EXCH_CONTEXT_RESP | 1042 F_CTL_LAST_SEQ | F_CTL_END_SEQ | 1043 F_CTL_SEQ_INITIATIVE); 1044 p = (uint8_t *)&f_ctl; 1045 resp->fcp_hdr_le.f_ctl[0] = *p++; 1046 resp->fcp_hdr_le.f_ctl[1] = *p++; 1047 resp->fcp_hdr_le.f_ctl[2] = *p; 1048 if (ids_reversed) { 1049 resp->fcp_hdr_le.d_id[0] = abts->fcp_hdr_le.d_id[0]; 1050 resp->fcp_hdr_le.d_id[1] = abts->fcp_hdr_le.d_id[1]; 1051 resp->fcp_hdr_le.d_id[2] = abts->fcp_hdr_le.d_id[2]; 1052 resp->fcp_hdr_le.s_id[0] = abts->fcp_hdr_le.s_id[0]; 1053 resp->fcp_hdr_le.s_id[1] = abts->fcp_hdr_le.s_id[1]; 1054 resp->fcp_hdr_le.s_id[2] = abts->fcp_hdr_le.s_id[2]; 1055 } else { 1056 resp->fcp_hdr_le.d_id[0] = abts->fcp_hdr_le.s_id[0]; 1057 resp->fcp_hdr_le.d_id[1] = abts->fcp_hdr_le.s_id[1]; 1058 resp->fcp_hdr_le.d_id[2] = abts->fcp_hdr_le.s_id[2]; 1059 resp->fcp_hdr_le.s_id[0] = abts->fcp_hdr_le.d_id[0]; 1060 resp->fcp_hdr_le.s_id[1] = abts->fcp_hdr_le.d_id[1]; 1061 resp->fcp_hdr_le.s_id[2] = abts->fcp_hdr_le.d_id[2]; 1062 } 1063 resp->exchange_addr_to_abort = abts->exchange_addr_to_abort; 1064 if (status == FCP_TMF_CMPL) { 1065 resp->fcp_hdr_le.r_ctl = R_CTL_BASIC_LINK_SERV | R_CTL_B_ACC; 1066 resp->payload.ba_acct.seq_id_valid = SEQ_ID_INVALID; 1067 resp->payload.ba_acct.low_seq_cnt = 0x0000; 1068 resp->payload.ba_acct.high_seq_cnt = 0xFFFF; 1069 resp->payload.ba_acct.ox_id = abts->fcp_hdr_le.ox_id; 1070 resp->payload.ba_acct.rx_id = abts->fcp_hdr_le.rx_id; 1071 } else { 1072 resp->fcp_hdr_le.r_ctl = R_CTL_BASIC_LINK_SERV | R_CTL_B_RJT; 1073 resp->payload.ba_rjt.reason_code = 1074 BA_RJT_REASON_CODE_UNABLE_TO_PERFORM; 1075 /* Other bytes are zero */ 1076 } 1077 1078 vha->vha_tgt.qla_tgt->abts_resp_expected++; 1079 1080 qla2x00_start_iocbs(vha, vha->req); 1081 } 1082 1083 /* 1084 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire 1085 */ 1086 static void qlt_24xx_retry_term_exchange(struct scsi_qla_host *vha, 1087 struct abts_resp_from_24xx_fw *entry) 1088 { 1089 struct ctio7_to_24xx *ctio; 1090 1091 ql_dbg(ql_dbg_tgt, vha, 0xe007, 1092 "Sending retry TERM EXCH CTIO7 (ha=%p)\n", vha->hw); 1093 /* Send marker if required */ 1094 if (qlt_issue_marker(vha, 1) != QLA_SUCCESS) 1095 return; 1096 1097 ctio = (struct ctio7_to_24xx *)qla2x00_alloc_iocbs(vha, NULL); 1098 if (ctio == NULL) { 1099 ql_dbg(ql_dbg_tgt, vha, 0xe04b, 1100 "qla_target(%d): %s failed: unable to allocate " 1101 "request packet\n", vha->vp_idx, __func__); 1102 return; 1103 } 1104 1105 /* 1106 * We've got on entrance firmware's response on by us generated 1107 * ABTS response. So, in it ID fields are reversed. 1108 */ 1109 1110 ctio->entry_type = CTIO_TYPE7; 1111 ctio->entry_count = 1; 1112 ctio->nport_handle = entry->nport_handle; 1113 ctio->handle = QLA_TGT_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK; 1114 ctio->timeout = __constant_cpu_to_le16(QLA_TGT_TIMEOUT); 1115 ctio->vp_index = vha->vp_idx; 1116 ctio->initiator_id[0] = entry->fcp_hdr_le.d_id[0]; 1117 ctio->initiator_id[1] = entry->fcp_hdr_le.d_id[1]; 1118 ctio->initiator_id[2] = entry->fcp_hdr_le.d_id[2]; 1119 ctio->exchange_addr = entry->exchange_addr_to_abort; 1120 ctio->u.status1.flags = 1121 __constant_cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1 | 1122 CTIO7_FLAGS_TERMINATE); 1123 ctio->u.status1.ox_id = entry->fcp_hdr_le.ox_id; 1124 1125 qla2x00_start_iocbs(vha, vha->req); 1126 1127 qlt_24xx_send_abts_resp(vha, (struct abts_recv_from_24xx *)entry, 1128 FCP_TMF_CMPL, true); 1129 } 1130 1131 /* ha->hardware_lock supposed to be held on entry */ 1132 static int __qlt_24xx_handle_abts(struct scsi_qla_host *vha, 1133 struct abts_recv_from_24xx *abts, struct qla_tgt_sess *sess) 1134 { 1135 struct qla_hw_data *ha = vha->hw; 1136 struct se_session *se_sess = sess->se_sess; 1137 struct qla_tgt_mgmt_cmd *mcmd; 1138 struct se_cmd *se_cmd; 1139 u32 lun = 0; 1140 int rc; 1141 bool found_lun = false; 1142 1143 spin_lock(&se_sess->sess_cmd_lock); 1144 list_for_each_entry(se_cmd, &se_sess->sess_cmd_list, se_cmd_list) { 1145 struct qla_tgt_cmd *cmd = 1146 container_of(se_cmd, struct qla_tgt_cmd, se_cmd); 1147 if (cmd->tag == abts->exchange_addr_to_abort) { 1148 lun = cmd->unpacked_lun; 1149 found_lun = true; 1150 break; 1151 } 1152 } 1153 spin_unlock(&se_sess->sess_cmd_lock); 1154 1155 if (!found_lun) 1156 return -ENOENT; 1157 1158 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00f, 1159 "qla_target(%d): task abort (tag=%d)\n", 1160 vha->vp_idx, abts->exchange_addr_to_abort); 1161 1162 mcmd = mempool_alloc(qla_tgt_mgmt_cmd_mempool, GFP_ATOMIC); 1163 if (mcmd == NULL) { 1164 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf051, 1165 "qla_target(%d): %s: Allocation of ABORT cmd failed", 1166 vha->vp_idx, __func__); 1167 return -ENOMEM; 1168 } 1169 memset(mcmd, 0, sizeof(*mcmd)); 1170 1171 mcmd->sess = sess; 1172 memcpy(&mcmd->orig_iocb.abts, abts, sizeof(mcmd->orig_iocb.abts)); 1173 1174 rc = ha->tgt.tgt_ops->handle_tmr(mcmd, lun, TMR_ABORT_TASK, 1175 abts->exchange_addr_to_abort); 1176 if (rc != 0) { 1177 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf052, 1178 "qla_target(%d): tgt_ops->handle_tmr()" 1179 " failed: %d", vha->vp_idx, rc); 1180 mempool_free(mcmd, qla_tgt_mgmt_cmd_mempool); 1181 return -EFAULT; 1182 } 1183 1184 return 0; 1185 } 1186 1187 /* 1188 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire 1189 */ 1190 static void qlt_24xx_handle_abts(struct scsi_qla_host *vha, 1191 struct abts_recv_from_24xx *abts) 1192 { 1193 struct qla_hw_data *ha = vha->hw; 1194 struct qla_tgt_sess *sess; 1195 uint32_t tag = abts->exchange_addr_to_abort; 1196 uint8_t s_id[3]; 1197 int rc; 1198 1199 if (le32_to_cpu(abts->fcp_hdr_le.parameter) & ABTS_PARAM_ABORT_SEQ) { 1200 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf053, 1201 "qla_target(%d): ABTS: Abort Sequence not " 1202 "supported\n", vha->vp_idx); 1203 qlt_24xx_send_abts_resp(vha, abts, FCP_TMF_REJECTED, false); 1204 return; 1205 } 1206 1207 if (tag == ATIO_EXCHANGE_ADDRESS_UNKNOWN) { 1208 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf010, 1209 "qla_target(%d): ABTS: Unknown Exchange " 1210 "Address received\n", vha->vp_idx); 1211 qlt_24xx_send_abts_resp(vha, abts, FCP_TMF_REJECTED, false); 1212 return; 1213 } 1214 1215 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf011, 1216 "qla_target(%d): task abort (s_id=%x:%x:%x, " 1217 "tag=%d, param=%x)\n", vha->vp_idx, abts->fcp_hdr_le.s_id[2], 1218 abts->fcp_hdr_le.s_id[1], abts->fcp_hdr_le.s_id[0], tag, 1219 le32_to_cpu(abts->fcp_hdr_le.parameter)); 1220 1221 s_id[0] = abts->fcp_hdr_le.s_id[2]; 1222 s_id[1] = abts->fcp_hdr_le.s_id[1]; 1223 s_id[2] = abts->fcp_hdr_le.s_id[0]; 1224 1225 sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, s_id); 1226 if (!sess) { 1227 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf012, 1228 "qla_target(%d): task abort for non-existant session\n", 1229 vha->vp_idx); 1230 rc = qlt_sched_sess_work(vha->vha_tgt.qla_tgt, 1231 QLA_TGT_SESS_WORK_ABORT, abts, sizeof(*abts)); 1232 if (rc != 0) { 1233 qlt_24xx_send_abts_resp(vha, abts, FCP_TMF_REJECTED, 1234 false); 1235 } 1236 return; 1237 } 1238 1239 rc = __qlt_24xx_handle_abts(vha, abts, sess); 1240 if (rc != 0) { 1241 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf054, 1242 "qla_target(%d): __qlt_24xx_handle_abts() failed: %d\n", 1243 vha->vp_idx, rc); 1244 qlt_24xx_send_abts_resp(vha, abts, FCP_TMF_REJECTED, false); 1245 return; 1246 } 1247 } 1248 1249 /* 1250 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire 1251 */ 1252 static void qlt_24xx_send_task_mgmt_ctio(struct scsi_qla_host *ha, 1253 struct qla_tgt_mgmt_cmd *mcmd, uint32_t resp_code) 1254 { 1255 struct atio_from_isp *atio = &mcmd->orig_iocb.atio; 1256 struct ctio7_to_24xx *ctio; 1257 1258 ql_dbg(ql_dbg_tgt, ha, 0xe008, 1259 "Sending task mgmt CTIO7 (ha=%p, atio=%p, resp_code=%x\n", 1260 ha, atio, resp_code); 1261 1262 /* Send marker if required */ 1263 if (qlt_issue_marker(ha, 1) != QLA_SUCCESS) 1264 return; 1265 1266 ctio = (struct ctio7_to_24xx *)qla2x00_alloc_iocbs(ha, NULL); 1267 if (ctio == NULL) { 1268 ql_dbg(ql_dbg_tgt, ha, 0xe04c, 1269 "qla_target(%d): %s failed: unable to allocate " 1270 "request packet\n", ha->vp_idx, __func__); 1271 return; 1272 } 1273 1274 ctio->entry_type = CTIO_TYPE7; 1275 ctio->entry_count = 1; 1276 ctio->handle = QLA_TGT_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK; 1277 ctio->nport_handle = mcmd->sess->loop_id; 1278 ctio->timeout = __constant_cpu_to_le16(QLA_TGT_TIMEOUT); 1279 ctio->vp_index = ha->vp_idx; 1280 ctio->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2]; 1281 ctio->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1]; 1282 ctio->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0]; 1283 ctio->exchange_addr = atio->u.isp24.exchange_addr; 1284 ctio->u.status1.flags = (atio->u.isp24.attr << 9) | 1285 __constant_cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1 | 1286 CTIO7_FLAGS_SEND_STATUS); 1287 ctio->u.status1.ox_id = swab16(atio->u.isp24.fcp_hdr.ox_id); 1288 ctio->u.status1.scsi_status = 1289 __constant_cpu_to_le16(SS_RESPONSE_INFO_LEN_VALID); 1290 ctio->u.status1.response_len = __constant_cpu_to_le16(8); 1291 ctio->u.status1.sense_data[0] = resp_code; 1292 1293 qla2x00_start_iocbs(ha, ha->req); 1294 } 1295 1296 void qlt_free_mcmd(struct qla_tgt_mgmt_cmd *mcmd) 1297 { 1298 mempool_free(mcmd, qla_tgt_mgmt_cmd_mempool); 1299 } 1300 EXPORT_SYMBOL(qlt_free_mcmd); 1301 1302 /* callback from target fabric module code */ 1303 void qlt_xmit_tm_rsp(struct qla_tgt_mgmt_cmd *mcmd) 1304 { 1305 struct scsi_qla_host *vha = mcmd->sess->vha; 1306 struct qla_hw_data *ha = vha->hw; 1307 unsigned long flags; 1308 1309 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf013, 1310 "TM response mcmd (%p) status %#x state %#x", 1311 mcmd, mcmd->fc_tm_rsp, mcmd->flags); 1312 1313 spin_lock_irqsave(&ha->hardware_lock, flags); 1314 if (mcmd->flags == QLA24XX_MGMT_SEND_NACK) 1315 qlt_send_notify_ack(vha, &mcmd->orig_iocb.imm_ntfy, 1316 0, 0, 0, 0, 0, 0); 1317 else { 1318 if (mcmd->se_cmd.se_tmr_req->function == TMR_ABORT_TASK) 1319 qlt_24xx_send_abts_resp(vha, &mcmd->orig_iocb.abts, 1320 mcmd->fc_tm_rsp, false); 1321 else 1322 qlt_24xx_send_task_mgmt_ctio(vha, mcmd, 1323 mcmd->fc_tm_rsp); 1324 } 1325 /* 1326 * Make the callback for ->free_mcmd() to queue_work() and invoke 1327 * target_put_sess_cmd() to drop cmd_kref to 1. The final 1328 * target_put_sess_cmd() call will be made from TFO->check_stop_free() 1329 * -> tcm_qla2xxx_check_stop_free() to release the TMR associated se_cmd 1330 * descriptor after TFO->queue_tm_rsp() -> tcm_qla2xxx_queue_tm_rsp() -> 1331 * qlt_xmit_tm_rsp() returns here.. 1332 */ 1333 ha->tgt.tgt_ops->free_mcmd(mcmd); 1334 spin_unlock_irqrestore(&ha->hardware_lock, flags); 1335 } 1336 EXPORT_SYMBOL(qlt_xmit_tm_rsp); 1337 1338 /* No locks */ 1339 static int qlt_pci_map_calc_cnt(struct qla_tgt_prm *prm) 1340 { 1341 struct qla_tgt_cmd *cmd = prm->cmd; 1342 1343 BUG_ON(cmd->sg_cnt == 0); 1344 1345 prm->sg = (struct scatterlist *)cmd->sg; 1346 prm->seg_cnt = pci_map_sg(prm->tgt->ha->pdev, cmd->sg, 1347 cmd->sg_cnt, cmd->dma_data_direction); 1348 if (unlikely(prm->seg_cnt == 0)) 1349 goto out_err; 1350 1351 prm->cmd->sg_mapped = 1; 1352 1353 /* 1354 * If greater than four sg entries then we need to allocate 1355 * the continuation entries 1356 */ 1357 if (prm->seg_cnt > prm->tgt->datasegs_per_cmd) 1358 prm->req_cnt += DIV_ROUND_UP(prm->seg_cnt - 1359 prm->tgt->datasegs_per_cmd, prm->tgt->datasegs_per_cont); 1360 1361 ql_dbg(ql_dbg_tgt, prm->cmd->vha, 0xe009, "seg_cnt=%d, req_cnt=%d\n", 1362 prm->seg_cnt, prm->req_cnt); 1363 return 0; 1364 1365 out_err: 1366 ql_dbg(ql_dbg_tgt, prm->cmd->vha, 0xe04d, 1367 "qla_target(%d): PCI mapping failed: sg_cnt=%d", 1368 0, prm->cmd->sg_cnt); 1369 return -1; 1370 } 1371 1372 static inline void qlt_unmap_sg(struct scsi_qla_host *vha, 1373 struct qla_tgt_cmd *cmd) 1374 { 1375 struct qla_hw_data *ha = vha->hw; 1376 1377 BUG_ON(!cmd->sg_mapped); 1378 pci_unmap_sg(ha->pdev, cmd->sg, cmd->sg_cnt, cmd->dma_data_direction); 1379 cmd->sg_mapped = 0; 1380 } 1381 1382 static int qlt_check_reserve_free_req(struct scsi_qla_host *vha, 1383 uint32_t req_cnt) 1384 { 1385 struct qla_hw_data *ha = vha->hw; 1386 device_reg_t __iomem *reg = ha->iobase; 1387 uint32_t cnt; 1388 1389 if (vha->req->cnt < (req_cnt + 2)) { 1390 cnt = (uint16_t)RD_REG_DWORD(®->isp24.req_q_out); 1391 1392 ql_dbg(ql_dbg_tgt, vha, 0xe00a, 1393 "Request ring circled: cnt=%d, vha->->ring_index=%d, " 1394 "vha->req->cnt=%d, req_cnt=%d\n", cnt, 1395 vha->req->ring_index, vha->req->cnt, req_cnt); 1396 if (vha->req->ring_index < cnt) 1397 vha->req->cnt = cnt - vha->req->ring_index; 1398 else 1399 vha->req->cnt = vha->req->length - 1400 (vha->req->ring_index - cnt); 1401 } 1402 1403 if (unlikely(vha->req->cnt < (req_cnt + 2))) { 1404 ql_dbg(ql_dbg_tgt, vha, 0xe00b, 1405 "qla_target(%d): There is no room in the " 1406 "request ring: vha->req->ring_index=%d, vha->req->cnt=%d, " 1407 "req_cnt=%d\n", vha->vp_idx, vha->req->ring_index, 1408 vha->req->cnt, req_cnt); 1409 return -EAGAIN; 1410 } 1411 vha->req->cnt -= req_cnt; 1412 1413 return 0; 1414 } 1415 1416 /* 1417 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire 1418 */ 1419 static inline void *qlt_get_req_pkt(struct scsi_qla_host *vha) 1420 { 1421 /* Adjust ring index. */ 1422 vha->req->ring_index++; 1423 if (vha->req->ring_index == vha->req->length) { 1424 vha->req->ring_index = 0; 1425 vha->req->ring_ptr = vha->req->ring; 1426 } else { 1427 vha->req->ring_ptr++; 1428 } 1429 return (cont_entry_t *)vha->req->ring_ptr; 1430 } 1431 1432 /* ha->hardware_lock supposed to be held on entry */ 1433 static inline uint32_t qlt_make_handle(struct scsi_qla_host *vha) 1434 { 1435 struct qla_hw_data *ha = vha->hw; 1436 uint32_t h; 1437 1438 h = ha->tgt.current_handle; 1439 /* always increment cmd handle */ 1440 do { 1441 ++h; 1442 if (h > DEFAULT_OUTSTANDING_COMMANDS) 1443 h = 1; /* 0 is QLA_TGT_NULL_HANDLE */ 1444 if (h == ha->tgt.current_handle) { 1445 ql_dbg(ql_dbg_tgt, vha, 0xe04e, 1446 "qla_target(%d): Ran out of " 1447 "empty cmd slots in ha %p\n", vha->vp_idx, ha); 1448 h = QLA_TGT_NULL_HANDLE; 1449 break; 1450 } 1451 } while ((h == QLA_TGT_NULL_HANDLE) || 1452 (h == QLA_TGT_SKIP_HANDLE) || 1453 (ha->tgt.cmds[h-1] != NULL)); 1454 1455 if (h != QLA_TGT_NULL_HANDLE) 1456 ha->tgt.current_handle = h; 1457 1458 return h; 1459 } 1460 1461 /* ha->hardware_lock supposed to be held on entry */ 1462 static int qlt_24xx_build_ctio_pkt(struct qla_tgt_prm *prm, 1463 struct scsi_qla_host *vha) 1464 { 1465 uint32_t h; 1466 struct ctio7_to_24xx *pkt; 1467 struct qla_hw_data *ha = vha->hw; 1468 struct atio_from_isp *atio = &prm->cmd->atio; 1469 1470 pkt = (struct ctio7_to_24xx *)vha->req->ring_ptr; 1471 prm->pkt = pkt; 1472 memset(pkt, 0, sizeof(*pkt)); 1473 1474 pkt->entry_type = CTIO_TYPE7; 1475 pkt->entry_count = (uint8_t)prm->req_cnt; 1476 pkt->vp_index = vha->vp_idx; 1477 1478 h = qlt_make_handle(vha); 1479 if (unlikely(h == QLA_TGT_NULL_HANDLE)) { 1480 /* 1481 * CTIO type 7 from the firmware doesn't provide a way to 1482 * know the initiator's LOOP ID, hence we can't find 1483 * the session and, so, the command. 1484 */ 1485 return -EAGAIN; 1486 } else 1487 ha->tgt.cmds[h-1] = prm->cmd; 1488 1489 pkt->handle = h | CTIO_COMPLETION_HANDLE_MARK; 1490 pkt->nport_handle = prm->cmd->loop_id; 1491 pkt->timeout = __constant_cpu_to_le16(QLA_TGT_TIMEOUT); 1492 pkt->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2]; 1493 pkt->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1]; 1494 pkt->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0]; 1495 pkt->exchange_addr = atio->u.isp24.exchange_addr; 1496 pkt->u.status0.flags |= (atio->u.isp24.attr << 9); 1497 pkt->u.status0.ox_id = swab16(atio->u.isp24.fcp_hdr.ox_id); 1498 pkt->u.status0.relative_offset = cpu_to_le32(prm->cmd->offset); 1499 1500 ql_dbg(ql_dbg_tgt, vha, 0xe00c, 1501 "qla_target(%d): handle(cmd) -> %08x, timeout %d, ox_id %#x\n", 1502 vha->vp_idx, pkt->handle, QLA_TGT_TIMEOUT, 1503 le16_to_cpu(pkt->u.status0.ox_id)); 1504 return 0; 1505 } 1506 1507 /* 1508 * ha->hardware_lock supposed to be held on entry. We have already made sure 1509 * that there is sufficient amount of request entries to not drop it. 1510 */ 1511 static void qlt_load_cont_data_segments(struct qla_tgt_prm *prm, 1512 struct scsi_qla_host *vha) 1513 { 1514 int cnt; 1515 uint32_t *dword_ptr; 1516 int enable_64bit_addressing = prm->tgt->tgt_enable_64bit_addr; 1517 1518 /* Build continuation packets */ 1519 while (prm->seg_cnt > 0) { 1520 cont_a64_entry_t *cont_pkt64 = 1521 (cont_a64_entry_t *)qlt_get_req_pkt(vha); 1522 1523 /* 1524 * Make sure that from cont_pkt64 none of 1525 * 64-bit specific fields used for 32-bit 1526 * addressing. Cast to (cont_entry_t *) for 1527 * that. 1528 */ 1529 1530 memset(cont_pkt64, 0, sizeof(*cont_pkt64)); 1531 1532 cont_pkt64->entry_count = 1; 1533 cont_pkt64->sys_define = 0; 1534 1535 if (enable_64bit_addressing) { 1536 cont_pkt64->entry_type = CONTINUE_A64_TYPE; 1537 dword_ptr = 1538 (uint32_t *)&cont_pkt64->dseg_0_address; 1539 } else { 1540 cont_pkt64->entry_type = CONTINUE_TYPE; 1541 dword_ptr = 1542 (uint32_t *)&((cont_entry_t *) 1543 cont_pkt64)->dseg_0_address; 1544 } 1545 1546 /* Load continuation entry data segments */ 1547 for (cnt = 0; 1548 cnt < prm->tgt->datasegs_per_cont && prm->seg_cnt; 1549 cnt++, prm->seg_cnt--) { 1550 *dword_ptr++ = 1551 cpu_to_le32(pci_dma_lo32 1552 (sg_dma_address(prm->sg))); 1553 if (enable_64bit_addressing) { 1554 *dword_ptr++ = 1555 cpu_to_le32(pci_dma_hi32 1556 (sg_dma_address 1557 (prm->sg))); 1558 } 1559 *dword_ptr++ = cpu_to_le32(sg_dma_len(prm->sg)); 1560 1561 ql_dbg(ql_dbg_tgt, vha, 0xe00d, 1562 "S/G Segment Cont. phys_addr=%llx:%llx, len=%d\n", 1563 (long long unsigned int) 1564 pci_dma_hi32(sg_dma_address(prm->sg)), 1565 (long long unsigned int) 1566 pci_dma_lo32(sg_dma_address(prm->sg)), 1567 (int)sg_dma_len(prm->sg)); 1568 1569 prm->sg = sg_next(prm->sg); 1570 } 1571 } 1572 } 1573 1574 /* 1575 * ha->hardware_lock supposed to be held on entry. We have already made sure 1576 * that there is sufficient amount of request entries to not drop it. 1577 */ 1578 static void qlt_load_data_segments(struct qla_tgt_prm *prm, 1579 struct scsi_qla_host *vha) 1580 { 1581 int cnt; 1582 uint32_t *dword_ptr; 1583 int enable_64bit_addressing = prm->tgt->tgt_enable_64bit_addr; 1584 struct ctio7_to_24xx *pkt24 = (struct ctio7_to_24xx *)prm->pkt; 1585 1586 ql_dbg(ql_dbg_tgt, vha, 0xe00e, 1587 "iocb->scsi_status=%x, iocb->flags=%x\n", 1588 le16_to_cpu(pkt24->u.status0.scsi_status), 1589 le16_to_cpu(pkt24->u.status0.flags)); 1590 1591 pkt24->u.status0.transfer_length = cpu_to_le32(prm->cmd->bufflen); 1592 1593 /* Setup packet address segment pointer */ 1594 dword_ptr = pkt24->u.status0.dseg_0_address; 1595 1596 /* Set total data segment count */ 1597 if (prm->seg_cnt) 1598 pkt24->dseg_count = cpu_to_le16(prm->seg_cnt); 1599 1600 if (prm->seg_cnt == 0) { 1601 /* No data transfer */ 1602 *dword_ptr++ = 0; 1603 *dword_ptr = 0; 1604 return; 1605 } 1606 1607 /* If scatter gather */ 1608 ql_dbg(ql_dbg_tgt, vha, 0xe00f, "%s", "Building S/G data segments..."); 1609 1610 /* Load command entry data segments */ 1611 for (cnt = 0; 1612 (cnt < prm->tgt->datasegs_per_cmd) && prm->seg_cnt; 1613 cnt++, prm->seg_cnt--) { 1614 *dword_ptr++ = 1615 cpu_to_le32(pci_dma_lo32(sg_dma_address(prm->sg))); 1616 if (enable_64bit_addressing) { 1617 *dword_ptr++ = 1618 cpu_to_le32(pci_dma_hi32( 1619 sg_dma_address(prm->sg))); 1620 } 1621 *dword_ptr++ = cpu_to_le32(sg_dma_len(prm->sg)); 1622 1623 ql_dbg(ql_dbg_tgt, vha, 0xe010, 1624 "S/G Segment phys_addr=%llx:%llx, len=%d\n", 1625 (long long unsigned int)pci_dma_hi32(sg_dma_address( 1626 prm->sg)), 1627 (long long unsigned int)pci_dma_lo32(sg_dma_address( 1628 prm->sg)), 1629 (int)sg_dma_len(prm->sg)); 1630 1631 prm->sg = sg_next(prm->sg); 1632 } 1633 1634 qlt_load_cont_data_segments(prm, vha); 1635 } 1636 1637 static inline int qlt_has_data(struct qla_tgt_cmd *cmd) 1638 { 1639 return cmd->bufflen > 0; 1640 } 1641 1642 /* 1643 * Called without ha->hardware_lock held 1644 */ 1645 static int qlt_pre_xmit_response(struct qla_tgt_cmd *cmd, 1646 struct qla_tgt_prm *prm, int xmit_type, uint8_t scsi_status, 1647 uint32_t *full_req_cnt) 1648 { 1649 struct qla_tgt *tgt = cmd->tgt; 1650 struct scsi_qla_host *vha = tgt->vha; 1651 struct qla_hw_data *ha = vha->hw; 1652 struct se_cmd *se_cmd = &cmd->se_cmd; 1653 1654 if (unlikely(cmd->aborted)) { 1655 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf014, 1656 "qla_target(%d): terminating exchange " 1657 "for aborted cmd=%p (se_cmd=%p, tag=%d)", vha->vp_idx, cmd, 1658 se_cmd, cmd->tag); 1659 1660 cmd->state = QLA_TGT_STATE_ABORTED; 1661 1662 qlt_send_term_exchange(vha, cmd, &cmd->atio, 0); 1663 1664 /* !! At this point cmd could be already freed !! */ 1665 return QLA_TGT_PRE_XMIT_RESP_CMD_ABORTED; 1666 } 1667 1668 ql_dbg(ql_dbg_tgt, vha, 0xe011, "qla_target(%d): tag=%u\n", 1669 vha->vp_idx, cmd->tag); 1670 1671 prm->cmd = cmd; 1672 prm->tgt = tgt; 1673 prm->rq_result = scsi_status; 1674 prm->sense_buffer = &cmd->sense_buffer[0]; 1675 prm->sense_buffer_len = TRANSPORT_SENSE_BUFFER; 1676 prm->sg = NULL; 1677 prm->seg_cnt = -1; 1678 prm->req_cnt = 1; 1679 prm->add_status_pkt = 0; 1680 1681 ql_dbg(ql_dbg_tgt, vha, 0xe012, "rq_result=%x, xmit_type=%x\n", 1682 prm->rq_result, xmit_type); 1683 1684 /* Send marker if required */ 1685 if (qlt_issue_marker(vha, 0) != QLA_SUCCESS) 1686 return -EFAULT; 1687 1688 ql_dbg(ql_dbg_tgt, vha, 0xe013, "CTIO start: vha(%d)\n", vha->vp_idx); 1689 1690 if ((xmit_type & QLA_TGT_XMIT_DATA) && qlt_has_data(cmd)) { 1691 if (qlt_pci_map_calc_cnt(prm) != 0) 1692 return -EAGAIN; 1693 } 1694 1695 *full_req_cnt = prm->req_cnt; 1696 1697 if (se_cmd->se_cmd_flags & SCF_UNDERFLOW_BIT) { 1698 prm->residual = se_cmd->residual_count; 1699 ql_dbg(ql_dbg_tgt, vha, 0xe014, 1700 "Residual underflow: %d (tag %d, " 1701 "op %x, bufflen %d, rq_result %x)\n", prm->residual, 1702 cmd->tag, se_cmd->t_task_cdb ? se_cmd->t_task_cdb[0] : 0, 1703 cmd->bufflen, prm->rq_result); 1704 prm->rq_result |= SS_RESIDUAL_UNDER; 1705 } else if (se_cmd->se_cmd_flags & SCF_OVERFLOW_BIT) { 1706 prm->residual = se_cmd->residual_count; 1707 ql_dbg(ql_dbg_tgt, vha, 0xe015, 1708 "Residual overflow: %d (tag %d, " 1709 "op %x, bufflen %d, rq_result %x)\n", prm->residual, 1710 cmd->tag, se_cmd->t_task_cdb ? se_cmd->t_task_cdb[0] : 0, 1711 cmd->bufflen, prm->rq_result); 1712 prm->rq_result |= SS_RESIDUAL_OVER; 1713 } 1714 1715 if (xmit_type & QLA_TGT_XMIT_STATUS) { 1716 /* 1717 * If QLA_TGT_XMIT_DATA is not set, add_status_pkt will be 1718 * ignored in *xmit_response() below 1719 */ 1720 if (qlt_has_data(cmd)) { 1721 if (QLA_TGT_SENSE_VALID(prm->sense_buffer) || 1722 (IS_FWI2_CAPABLE(ha) && 1723 (prm->rq_result != 0))) { 1724 prm->add_status_pkt = 1; 1725 (*full_req_cnt)++; 1726 } 1727 } 1728 } 1729 1730 ql_dbg(ql_dbg_tgt, vha, 0xe016, 1731 "req_cnt=%d, full_req_cnt=%d, add_status_pkt=%d\n", 1732 prm->req_cnt, *full_req_cnt, prm->add_status_pkt); 1733 1734 return 0; 1735 } 1736 1737 static inline int qlt_need_explicit_conf(struct qla_hw_data *ha, 1738 struct qla_tgt_cmd *cmd, int sending_sense) 1739 { 1740 if (ha->tgt.enable_class_2) 1741 return 0; 1742 1743 if (sending_sense) 1744 return cmd->conf_compl_supported; 1745 else 1746 return ha->tgt.enable_explicit_conf && 1747 cmd->conf_compl_supported; 1748 } 1749 1750 #ifdef CONFIG_QLA_TGT_DEBUG_SRR 1751 /* 1752 * Original taken from the XFS code 1753 */ 1754 static unsigned long qlt_srr_random(void) 1755 { 1756 static int Inited; 1757 static unsigned long RandomValue; 1758 static DEFINE_SPINLOCK(lock); 1759 /* cycles pseudo-randomly through all values between 1 and 2^31 - 2 */ 1760 register long rv; 1761 register long lo; 1762 register long hi; 1763 unsigned long flags; 1764 1765 spin_lock_irqsave(&lock, flags); 1766 if (!Inited) { 1767 RandomValue = jiffies; 1768 Inited = 1; 1769 } 1770 rv = RandomValue; 1771 hi = rv / 127773; 1772 lo = rv % 127773; 1773 rv = 16807 * lo - 2836 * hi; 1774 if (rv <= 0) 1775 rv += 2147483647; 1776 RandomValue = rv; 1777 spin_unlock_irqrestore(&lock, flags); 1778 return rv; 1779 } 1780 1781 static void qlt_check_srr_debug(struct qla_tgt_cmd *cmd, int *xmit_type) 1782 { 1783 #if 0 /* This is not a real status packets lost, so it won't lead to SRR */ 1784 if ((*xmit_type & QLA_TGT_XMIT_STATUS) && (qlt_srr_random() % 200) 1785 == 50) { 1786 *xmit_type &= ~QLA_TGT_XMIT_STATUS; 1787 ql_dbg(ql_dbg_tgt_mgt, cmd->vha, 0xf015, 1788 "Dropping cmd %p (tag %d) status", cmd, cmd->tag); 1789 } 1790 #endif 1791 /* 1792 * It's currently not possible to simulate SRRs for FCP_WRITE without 1793 * a physical link layer failure, so don't even try here.. 1794 */ 1795 if (cmd->dma_data_direction != DMA_FROM_DEVICE) 1796 return; 1797 1798 if (qlt_has_data(cmd) && (cmd->sg_cnt > 1) && 1799 ((qlt_srr_random() % 100) == 20)) { 1800 int i, leave = 0; 1801 unsigned int tot_len = 0; 1802 1803 while (leave == 0) 1804 leave = qlt_srr_random() % cmd->sg_cnt; 1805 1806 for (i = 0; i < leave; i++) 1807 tot_len += cmd->sg[i].length; 1808 1809 ql_dbg(ql_dbg_tgt_mgt, cmd->vha, 0xf016, 1810 "Cutting cmd %p (tag %d) buffer" 1811 " tail to len %d, sg_cnt %d (cmd->bufflen %d," 1812 " cmd->sg_cnt %d)", cmd, cmd->tag, tot_len, leave, 1813 cmd->bufflen, cmd->sg_cnt); 1814 1815 cmd->bufflen = tot_len; 1816 cmd->sg_cnt = leave; 1817 } 1818 1819 if (qlt_has_data(cmd) && ((qlt_srr_random() % 100) == 70)) { 1820 unsigned int offset = qlt_srr_random() % cmd->bufflen; 1821 1822 ql_dbg(ql_dbg_tgt_mgt, cmd->vha, 0xf017, 1823 "Cutting cmd %p (tag %d) buffer head " 1824 "to offset %d (cmd->bufflen %d)", cmd, cmd->tag, offset, 1825 cmd->bufflen); 1826 if (offset == 0) 1827 *xmit_type &= ~QLA_TGT_XMIT_DATA; 1828 else if (qlt_set_data_offset(cmd, offset)) { 1829 ql_dbg(ql_dbg_tgt_mgt, cmd->vha, 0xf018, 1830 "qlt_set_data_offset() failed (tag %d)", cmd->tag); 1831 } 1832 } 1833 } 1834 #else 1835 static inline void qlt_check_srr_debug(struct qla_tgt_cmd *cmd, int *xmit_type) 1836 {} 1837 #endif 1838 1839 static void qlt_24xx_init_ctio_to_isp(struct ctio7_to_24xx *ctio, 1840 struct qla_tgt_prm *prm) 1841 { 1842 prm->sense_buffer_len = min_t(uint32_t, prm->sense_buffer_len, 1843 (uint32_t)sizeof(ctio->u.status1.sense_data)); 1844 ctio->u.status0.flags |= 1845 __constant_cpu_to_le16(CTIO7_FLAGS_SEND_STATUS); 1846 if (qlt_need_explicit_conf(prm->tgt->ha, prm->cmd, 0)) { 1847 ctio->u.status0.flags |= __constant_cpu_to_le16( 1848 CTIO7_FLAGS_EXPLICIT_CONFORM | 1849 CTIO7_FLAGS_CONFORM_REQ); 1850 } 1851 ctio->u.status0.residual = cpu_to_le32(prm->residual); 1852 ctio->u.status0.scsi_status = cpu_to_le16(prm->rq_result); 1853 if (QLA_TGT_SENSE_VALID(prm->sense_buffer)) { 1854 int i; 1855 1856 if (qlt_need_explicit_conf(prm->tgt->ha, prm->cmd, 1)) { 1857 if (prm->cmd->se_cmd.scsi_status != 0) { 1858 ql_dbg(ql_dbg_tgt, prm->cmd->vha, 0xe017, 1859 "Skipping EXPLICIT_CONFORM and " 1860 "CTIO7_FLAGS_CONFORM_REQ for FCP READ w/ " 1861 "non GOOD status\n"); 1862 goto skip_explict_conf; 1863 } 1864 ctio->u.status1.flags |= __constant_cpu_to_le16( 1865 CTIO7_FLAGS_EXPLICIT_CONFORM | 1866 CTIO7_FLAGS_CONFORM_REQ); 1867 } 1868 skip_explict_conf: 1869 ctio->u.status1.flags &= 1870 ~__constant_cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_0); 1871 ctio->u.status1.flags |= 1872 __constant_cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1); 1873 ctio->u.status1.scsi_status |= 1874 __constant_cpu_to_le16(SS_SENSE_LEN_VALID); 1875 ctio->u.status1.sense_length = 1876 cpu_to_le16(prm->sense_buffer_len); 1877 for (i = 0; i < prm->sense_buffer_len/4; i++) 1878 ((uint32_t *)ctio->u.status1.sense_data)[i] = 1879 cpu_to_be32(((uint32_t *)prm->sense_buffer)[i]); 1880 #if 0 1881 if (unlikely((prm->sense_buffer_len % 4) != 0)) { 1882 static int q; 1883 if (q < 10) { 1884 ql_dbg(ql_dbg_tgt, vha, 0xe04f, 1885 "qla_target(%d): %d bytes of sense " 1886 "lost", prm->tgt->ha->vp_idx, 1887 prm->sense_buffer_len % 4); 1888 q++; 1889 } 1890 } 1891 #endif 1892 } else { 1893 ctio->u.status1.flags &= 1894 ~__constant_cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_0); 1895 ctio->u.status1.flags |= 1896 __constant_cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1); 1897 ctio->u.status1.sense_length = 0; 1898 memset(ctio->u.status1.sense_data, 0, 1899 sizeof(ctio->u.status1.sense_data)); 1900 } 1901 1902 /* Sense with len > 24, is it possible ??? */ 1903 } 1904 1905 /* 1906 * Callback to setup response of xmit_type of QLA_TGT_XMIT_DATA and * 1907 * QLA_TGT_XMIT_STATUS for >= 24xx silicon 1908 */ 1909 int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type, 1910 uint8_t scsi_status) 1911 { 1912 struct scsi_qla_host *vha = cmd->vha; 1913 struct qla_hw_data *ha = vha->hw; 1914 struct ctio7_to_24xx *pkt; 1915 struct qla_tgt_prm prm; 1916 uint32_t full_req_cnt = 0; 1917 unsigned long flags = 0; 1918 int res; 1919 1920 memset(&prm, 0, sizeof(prm)); 1921 qlt_check_srr_debug(cmd, &xmit_type); 1922 1923 ql_dbg(ql_dbg_tgt, cmd->vha, 0xe018, 1924 "is_send_status=%d, cmd->bufflen=%d, cmd->sg_cnt=%d, " 1925 "cmd->dma_data_direction=%d\n", (xmit_type & QLA_TGT_XMIT_STATUS) ? 1926 1 : 0, cmd->bufflen, cmd->sg_cnt, cmd->dma_data_direction); 1927 1928 res = qlt_pre_xmit_response(cmd, &prm, xmit_type, scsi_status, 1929 &full_req_cnt); 1930 if (unlikely(res != 0)) { 1931 if (res == QLA_TGT_PRE_XMIT_RESP_CMD_ABORTED) 1932 return 0; 1933 1934 return res; 1935 } 1936 1937 spin_lock_irqsave(&ha->hardware_lock, flags); 1938 1939 /* Does F/W have an IOCBs for this request */ 1940 res = qlt_check_reserve_free_req(vha, full_req_cnt); 1941 if (unlikely(res)) 1942 goto out_unmap_unlock; 1943 1944 res = qlt_24xx_build_ctio_pkt(&prm, vha); 1945 if (unlikely(res != 0)) 1946 goto out_unmap_unlock; 1947 1948 1949 pkt = (struct ctio7_to_24xx *)prm.pkt; 1950 1951 if (qlt_has_data(cmd) && (xmit_type & QLA_TGT_XMIT_DATA)) { 1952 pkt->u.status0.flags |= 1953 __constant_cpu_to_le16(CTIO7_FLAGS_DATA_IN | 1954 CTIO7_FLAGS_STATUS_MODE_0); 1955 1956 qlt_load_data_segments(&prm, vha); 1957 1958 if (prm.add_status_pkt == 0) { 1959 if (xmit_type & QLA_TGT_XMIT_STATUS) { 1960 pkt->u.status0.scsi_status = 1961 cpu_to_le16(prm.rq_result); 1962 pkt->u.status0.residual = 1963 cpu_to_le32(prm.residual); 1964 pkt->u.status0.flags |= __constant_cpu_to_le16( 1965 CTIO7_FLAGS_SEND_STATUS); 1966 if (qlt_need_explicit_conf(ha, cmd, 0)) { 1967 pkt->u.status0.flags |= 1968 __constant_cpu_to_le16( 1969 CTIO7_FLAGS_EXPLICIT_CONFORM | 1970 CTIO7_FLAGS_CONFORM_REQ); 1971 } 1972 } 1973 1974 } else { 1975 /* 1976 * We have already made sure that there is sufficient 1977 * amount of request entries to not drop HW lock in 1978 * req_pkt(). 1979 */ 1980 struct ctio7_to_24xx *ctio = 1981 (struct ctio7_to_24xx *)qlt_get_req_pkt(vha); 1982 1983 ql_dbg(ql_dbg_tgt, vha, 0xe019, 1984 "Building additional status packet\n"); 1985 1986 memcpy(ctio, pkt, sizeof(*ctio)); 1987 ctio->entry_count = 1; 1988 ctio->dseg_count = 0; 1989 ctio->u.status1.flags &= ~__constant_cpu_to_le16( 1990 CTIO7_FLAGS_DATA_IN); 1991 1992 /* Real finish is ctio_m1's finish */ 1993 pkt->handle |= CTIO_INTERMEDIATE_HANDLE_MARK; 1994 pkt->u.status0.flags |= __constant_cpu_to_le16( 1995 CTIO7_FLAGS_DONT_RET_CTIO); 1996 qlt_24xx_init_ctio_to_isp((struct ctio7_to_24xx *)ctio, 1997 &prm); 1998 pr_debug("Status CTIO7: %p\n", ctio); 1999 } 2000 } else 2001 qlt_24xx_init_ctio_to_isp(pkt, &prm); 2002 2003 2004 cmd->state = QLA_TGT_STATE_PROCESSED; /* Mid-level is done processing */ 2005 2006 ql_dbg(ql_dbg_tgt, vha, 0xe01a, 2007 "Xmitting CTIO7 response pkt for 24xx: %p scsi_status: 0x%02x\n", 2008 pkt, scsi_status); 2009 2010 qla2x00_start_iocbs(vha, vha->req); 2011 spin_unlock_irqrestore(&ha->hardware_lock, flags); 2012 2013 return 0; 2014 2015 out_unmap_unlock: 2016 if (cmd->sg_mapped) 2017 qlt_unmap_sg(vha, cmd); 2018 spin_unlock_irqrestore(&ha->hardware_lock, flags); 2019 2020 return res; 2021 } 2022 EXPORT_SYMBOL(qlt_xmit_response); 2023 2024 int qlt_rdy_to_xfer(struct qla_tgt_cmd *cmd) 2025 { 2026 struct ctio7_to_24xx *pkt; 2027 struct scsi_qla_host *vha = cmd->vha; 2028 struct qla_hw_data *ha = vha->hw; 2029 struct qla_tgt *tgt = cmd->tgt; 2030 struct qla_tgt_prm prm; 2031 unsigned long flags; 2032 int res = 0; 2033 2034 memset(&prm, 0, sizeof(prm)); 2035 prm.cmd = cmd; 2036 prm.tgt = tgt; 2037 prm.sg = NULL; 2038 prm.req_cnt = 1; 2039 2040 /* Send marker if required */ 2041 if (qlt_issue_marker(vha, 0) != QLA_SUCCESS) 2042 return -EIO; 2043 2044 ql_dbg(ql_dbg_tgt, vha, 0xe01b, "CTIO_start: vha(%d)", 2045 (int)vha->vp_idx); 2046 2047 /* Calculate number of entries and segments required */ 2048 if (qlt_pci_map_calc_cnt(&prm) != 0) 2049 return -EAGAIN; 2050 2051 spin_lock_irqsave(&ha->hardware_lock, flags); 2052 2053 /* Does F/W have an IOCBs for this request */ 2054 res = qlt_check_reserve_free_req(vha, prm.req_cnt); 2055 if (res != 0) 2056 goto out_unlock_free_unmap; 2057 2058 res = qlt_24xx_build_ctio_pkt(&prm, vha); 2059 if (unlikely(res != 0)) 2060 goto out_unlock_free_unmap; 2061 pkt = (struct ctio7_to_24xx *)prm.pkt; 2062 pkt->u.status0.flags |= __constant_cpu_to_le16(CTIO7_FLAGS_DATA_OUT | 2063 CTIO7_FLAGS_STATUS_MODE_0); 2064 qlt_load_data_segments(&prm, vha); 2065 2066 cmd->state = QLA_TGT_STATE_NEED_DATA; 2067 2068 qla2x00_start_iocbs(vha, vha->req); 2069 spin_unlock_irqrestore(&ha->hardware_lock, flags); 2070 2071 return res; 2072 2073 out_unlock_free_unmap: 2074 if (cmd->sg_mapped) 2075 qlt_unmap_sg(vha, cmd); 2076 spin_unlock_irqrestore(&ha->hardware_lock, flags); 2077 2078 return res; 2079 } 2080 EXPORT_SYMBOL(qlt_rdy_to_xfer); 2081 2082 /* If hardware_lock held on entry, might drop it, then reaquire */ 2083 /* This function sends the appropriate CTIO to ISP 2xxx or 24xx */ 2084 static int __qlt_send_term_exchange(struct scsi_qla_host *vha, 2085 struct qla_tgt_cmd *cmd, 2086 struct atio_from_isp *atio) 2087 { 2088 struct ctio7_to_24xx *ctio24; 2089 struct qla_hw_data *ha = vha->hw; 2090 request_t *pkt; 2091 int ret = 0; 2092 2093 ql_dbg(ql_dbg_tgt, vha, 0xe01c, "Sending TERM EXCH CTIO (ha=%p)\n", ha); 2094 2095 pkt = (request_t *)qla2x00_alloc_iocbs(vha, NULL); 2096 if (pkt == NULL) { 2097 ql_dbg(ql_dbg_tgt, vha, 0xe050, 2098 "qla_target(%d): %s failed: unable to allocate " 2099 "request packet\n", vha->vp_idx, __func__); 2100 return -ENOMEM; 2101 } 2102 2103 if (cmd != NULL) { 2104 if (cmd->state < QLA_TGT_STATE_PROCESSED) { 2105 ql_dbg(ql_dbg_tgt, vha, 0xe051, 2106 "qla_target(%d): Terminating cmd %p with " 2107 "incorrect state %d\n", vha->vp_idx, cmd, 2108 cmd->state); 2109 } else 2110 ret = 1; 2111 } 2112 2113 pkt->entry_count = 1; 2114 pkt->handle = QLA_TGT_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK; 2115 2116 ctio24 = (struct ctio7_to_24xx *)pkt; 2117 ctio24->entry_type = CTIO_TYPE7; 2118 ctio24->nport_handle = cmd ? cmd->loop_id : CTIO7_NHANDLE_UNRECOGNIZED; 2119 ctio24->timeout = __constant_cpu_to_le16(QLA_TGT_TIMEOUT); 2120 ctio24->vp_index = vha->vp_idx; 2121 ctio24->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2]; 2122 ctio24->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1]; 2123 ctio24->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0]; 2124 ctio24->exchange_addr = atio->u.isp24.exchange_addr; 2125 ctio24->u.status1.flags = (atio->u.isp24.attr << 9) | 2126 __constant_cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1 | 2127 CTIO7_FLAGS_TERMINATE); 2128 ctio24->u.status1.ox_id = swab16(atio->u.isp24.fcp_hdr.ox_id); 2129 2130 /* Most likely, it isn't needed */ 2131 ctio24->u.status1.residual = get_unaligned((uint32_t *) 2132 &atio->u.isp24.fcp_cmnd.add_cdb[ 2133 atio->u.isp24.fcp_cmnd.add_cdb_len]); 2134 if (ctio24->u.status1.residual != 0) 2135 ctio24->u.status1.scsi_status |= SS_RESIDUAL_UNDER; 2136 2137 qla2x00_start_iocbs(vha, vha->req); 2138 return ret; 2139 } 2140 2141 static void qlt_send_term_exchange(struct scsi_qla_host *vha, 2142 struct qla_tgt_cmd *cmd, struct atio_from_isp *atio, int ha_locked) 2143 { 2144 unsigned long flags; 2145 int rc; 2146 2147 if (qlt_issue_marker(vha, ha_locked) < 0) 2148 return; 2149 2150 if (ha_locked) { 2151 rc = __qlt_send_term_exchange(vha, cmd, atio); 2152 goto done; 2153 } 2154 spin_lock_irqsave(&vha->hw->hardware_lock, flags); 2155 rc = __qlt_send_term_exchange(vha, cmd, atio); 2156 spin_unlock_irqrestore(&vha->hw->hardware_lock, flags); 2157 done: 2158 if (rc == 1) { 2159 if (!ha_locked && !in_interrupt()) 2160 msleep(250); /* just in case */ 2161 2162 vha->hw->tgt.tgt_ops->free_cmd(cmd); 2163 } 2164 } 2165 2166 void qlt_free_cmd(struct qla_tgt_cmd *cmd) 2167 { 2168 BUG_ON(cmd->sg_mapped); 2169 2170 if (unlikely(cmd->free_sg)) 2171 kfree(cmd->sg); 2172 kmem_cache_free(qla_tgt_cmd_cachep, cmd); 2173 } 2174 EXPORT_SYMBOL(qlt_free_cmd); 2175 2176 /* ha->hardware_lock supposed to be held on entry */ 2177 static int qlt_prepare_srr_ctio(struct scsi_qla_host *vha, 2178 struct qla_tgt_cmd *cmd, void *ctio) 2179 { 2180 struct qla_tgt_srr_ctio *sc; 2181 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; 2182 struct qla_tgt_srr_imm *imm; 2183 2184 tgt->ctio_srr_id++; 2185 2186 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf019, 2187 "qla_target(%d): CTIO with SRR status received\n", vha->vp_idx); 2188 2189 if (!ctio) { 2190 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf055, 2191 "qla_target(%d): SRR CTIO, but ctio is NULL\n", 2192 vha->vp_idx); 2193 return -EINVAL; 2194 } 2195 2196 sc = kzalloc(sizeof(*sc), GFP_ATOMIC); 2197 if (sc != NULL) { 2198 sc->cmd = cmd; 2199 /* IRQ is already OFF */ 2200 spin_lock(&tgt->srr_lock); 2201 sc->srr_id = tgt->ctio_srr_id; 2202 list_add_tail(&sc->srr_list_entry, 2203 &tgt->srr_ctio_list); 2204 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01a, 2205 "CTIO SRR %p added (id %d)\n", sc, sc->srr_id); 2206 if (tgt->imm_srr_id == tgt->ctio_srr_id) { 2207 int found = 0; 2208 list_for_each_entry(imm, &tgt->srr_imm_list, 2209 srr_list_entry) { 2210 if (imm->srr_id == sc->srr_id) { 2211 found = 1; 2212 break; 2213 } 2214 } 2215 if (found) { 2216 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01b, 2217 "Scheduling srr work\n"); 2218 schedule_work(&tgt->srr_work); 2219 } else { 2220 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf056, 2221 "qla_target(%d): imm_srr_id " 2222 "== ctio_srr_id (%d), but there is no " 2223 "corresponding SRR IMM, deleting CTIO " 2224 "SRR %p\n", vha->vp_idx, 2225 tgt->ctio_srr_id, sc); 2226 list_del(&sc->srr_list_entry); 2227 spin_unlock(&tgt->srr_lock); 2228 2229 kfree(sc); 2230 return -EINVAL; 2231 } 2232 } 2233 spin_unlock(&tgt->srr_lock); 2234 } else { 2235 struct qla_tgt_srr_imm *ti; 2236 2237 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf057, 2238 "qla_target(%d): Unable to allocate SRR CTIO entry\n", 2239 vha->vp_idx); 2240 spin_lock(&tgt->srr_lock); 2241 list_for_each_entry_safe(imm, ti, &tgt->srr_imm_list, 2242 srr_list_entry) { 2243 if (imm->srr_id == tgt->ctio_srr_id) { 2244 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01c, 2245 "IMM SRR %p deleted (id %d)\n", 2246 imm, imm->srr_id); 2247 list_del(&imm->srr_list_entry); 2248 qlt_reject_free_srr_imm(vha, imm, 1); 2249 } 2250 } 2251 spin_unlock(&tgt->srr_lock); 2252 2253 return -ENOMEM; 2254 } 2255 2256 return 0; 2257 } 2258 2259 /* 2260 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire 2261 */ 2262 static int qlt_term_ctio_exchange(struct scsi_qla_host *vha, void *ctio, 2263 struct qla_tgt_cmd *cmd, uint32_t status) 2264 { 2265 int term = 0; 2266 2267 if (ctio != NULL) { 2268 struct ctio7_from_24xx *c = (struct ctio7_from_24xx *)ctio; 2269 term = !(c->flags & 2270 __constant_cpu_to_le16(OF_TERM_EXCH)); 2271 } else 2272 term = 1; 2273 2274 if (term) 2275 qlt_send_term_exchange(vha, cmd, &cmd->atio, 1); 2276 2277 return term; 2278 } 2279 2280 /* ha->hardware_lock supposed to be held on entry */ 2281 static inline struct qla_tgt_cmd *qlt_get_cmd(struct scsi_qla_host *vha, 2282 uint32_t handle) 2283 { 2284 struct qla_hw_data *ha = vha->hw; 2285 2286 handle--; 2287 if (ha->tgt.cmds[handle] != NULL) { 2288 struct qla_tgt_cmd *cmd = ha->tgt.cmds[handle]; 2289 ha->tgt.cmds[handle] = NULL; 2290 return cmd; 2291 } else 2292 return NULL; 2293 } 2294 2295 /* ha->hardware_lock supposed to be held on entry */ 2296 static struct qla_tgt_cmd *qlt_ctio_to_cmd(struct scsi_qla_host *vha, 2297 uint32_t handle, void *ctio) 2298 { 2299 struct qla_tgt_cmd *cmd = NULL; 2300 2301 /* Clear out internal marks */ 2302 handle &= ~(CTIO_COMPLETION_HANDLE_MARK | 2303 CTIO_INTERMEDIATE_HANDLE_MARK); 2304 2305 if (handle != QLA_TGT_NULL_HANDLE) { 2306 if (unlikely(handle == QLA_TGT_SKIP_HANDLE)) { 2307 ql_dbg(ql_dbg_tgt, vha, 0xe01d, "%s", 2308 "SKIP_HANDLE CTIO\n"); 2309 return NULL; 2310 } 2311 /* handle-1 is actually used */ 2312 if (unlikely(handle > DEFAULT_OUTSTANDING_COMMANDS)) { 2313 ql_dbg(ql_dbg_tgt, vha, 0xe052, 2314 "qla_target(%d): Wrong handle %x received\n", 2315 vha->vp_idx, handle); 2316 return NULL; 2317 } 2318 cmd = qlt_get_cmd(vha, handle); 2319 if (unlikely(cmd == NULL)) { 2320 ql_dbg(ql_dbg_tgt, vha, 0xe053, 2321 "qla_target(%d): Suspicious: unable to " 2322 "find the command with handle %x\n", vha->vp_idx, 2323 handle); 2324 return NULL; 2325 } 2326 } else if (ctio != NULL) { 2327 /* We can't get loop ID from CTIO7 */ 2328 ql_dbg(ql_dbg_tgt, vha, 0xe054, 2329 "qla_target(%d): Wrong CTIO received: QLA24xx doesn't " 2330 "support NULL handles\n", vha->vp_idx); 2331 return NULL; 2332 } 2333 2334 return cmd; 2335 } 2336 2337 /* 2338 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire 2339 */ 2340 static void qlt_do_ctio_completion(struct scsi_qla_host *vha, uint32_t handle, 2341 uint32_t status, void *ctio) 2342 { 2343 struct qla_hw_data *ha = vha->hw; 2344 struct se_cmd *se_cmd; 2345 struct target_core_fabric_ops *tfo; 2346 struct qla_tgt_cmd *cmd; 2347 2348 ql_dbg(ql_dbg_tgt, vha, 0xe01e, 2349 "qla_target(%d): handle(ctio %p status %#x) <- %08x\n", 2350 vha->vp_idx, ctio, status, handle); 2351 2352 if (handle & CTIO_INTERMEDIATE_HANDLE_MARK) { 2353 /* That could happen only in case of an error/reset/abort */ 2354 if (status != CTIO_SUCCESS) { 2355 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01d, 2356 "Intermediate CTIO received" 2357 " (status %x)\n", status); 2358 } 2359 return; 2360 } 2361 2362 cmd = qlt_ctio_to_cmd(vha, handle, ctio); 2363 if (cmd == NULL) 2364 return; 2365 2366 se_cmd = &cmd->se_cmd; 2367 tfo = se_cmd->se_tfo; 2368 2369 if (cmd->sg_mapped) 2370 qlt_unmap_sg(vha, cmd); 2371 2372 if (unlikely(status != CTIO_SUCCESS)) { 2373 switch (status & 0xFFFF) { 2374 case CTIO_LIP_RESET: 2375 case CTIO_TARGET_RESET: 2376 case CTIO_ABORTED: 2377 case CTIO_TIMEOUT: 2378 case CTIO_INVALID_RX_ID: 2379 /* They are OK */ 2380 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf058, 2381 "qla_target(%d): CTIO with " 2382 "status %#x received, state %x, se_cmd %p, " 2383 "(LIP_RESET=e, ABORTED=2, TARGET_RESET=17, " 2384 "TIMEOUT=b, INVALID_RX_ID=8)\n", vha->vp_idx, 2385 status, cmd->state, se_cmd); 2386 break; 2387 2388 case CTIO_PORT_LOGGED_OUT: 2389 case CTIO_PORT_UNAVAILABLE: 2390 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf059, 2391 "qla_target(%d): CTIO with PORT LOGGED " 2392 "OUT (29) or PORT UNAVAILABLE (28) status %x " 2393 "received (state %x, se_cmd %p)\n", vha->vp_idx, 2394 status, cmd->state, se_cmd); 2395 break; 2396 2397 case CTIO_SRR_RECEIVED: 2398 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05a, 2399 "qla_target(%d): CTIO with SRR_RECEIVED" 2400 " status %x received (state %x, se_cmd %p)\n", 2401 vha->vp_idx, status, cmd->state, se_cmd); 2402 if (qlt_prepare_srr_ctio(vha, cmd, ctio) != 0) 2403 break; 2404 else 2405 return; 2406 2407 default: 2408 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05b, 2409 "qla_target(%d): CTIO with error status " 2410 "0x%x received (state %x, se_cmd %p\n", 2411 vha->vp_idx, status, cmd->state, se_cmd); 2412 break; 2413 } 2414 2415 if (cmd->state != QLA_TGT_STATE_NEED_DATA) 2416 if (qlt_term_ctio_exchange(vha, ctio, cmd, status)) 2417 return; 2418 } 2419 2420 if (cmd->state == QLA_TGT_STATE_PROCESSED) { 2421 ql_dbg(ql_dbg_tgt, vha, 0xe01f, "Command %p finished\n", cmd); 2422 } else if (cmd->state == QLA_TGT_STATE_NEED_DATA) { 2423 int rx_status = 0; 2424 2425 cmd->state = QLA_TGT_STATE_DATA_IN; 2426 2427 if (unlikely(status != CTIO_SUCCESS)) 2428 rx_status = -EIO; 2429 else 2430 cmd->write_data_transferred = 1; 2431 2432 ql_dbg(ql_dbg_tgt, vha, 0xe020, 2433 "Data received, context %x, rx_status %d\n", 2434 0x0, rx_status); 2435 2436 ha->tgt.tgt_ops->handle_data(cmd); 2437 return; 2438 } else if (cmd->state == QLA_TGT_STATE_ABORTED) { 2439 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01e, 2440 "Aborted command %p (tag %d) finished\n", cmd, cmd->tag); 2441 } else { 2442 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05c, 2443 "qla_target(%d): A command in state (%d) should " 2444 "not return a CTIO complete\n", vha->vp_idx, cmd->state); 2445 } 2446 2447 if (unlikely(status != CTIO_SUCCESS)) { 2448 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01f, "Finishing failed CTIO\n"); 2449 dump_stack(); 2450 } 2451 2452 ha->tgt.tgt_ops->free_cmd(cmd); 2453 } 2454 2455 static inline int qlt_get_fcp_task_attr(struct scsi_qla_host *vha, 2456 uint8_t task_codes) 2457 { 2458 int fcp_task_attr; 2459 2460 switch (task_codes) { 2461 case ATIO_SIMPLE_QUEUE: 2462 fcp_task_attr = MSG_SIMPLE_TAG; 2463 break; 2464 case ATIO_HEAD_OF_QUEUE: 2465 fcp_task_attr = MSG_HEAD_TAG; 2466 break; 2467 case ATIO_ORDERED_QUEUE: 2468 fcp_task_attr = MSG_ORDERED_TAG; 2469 break; 2470 case ATIO_ACA_QUEUE: 2471 fcp_task_attr = MSG_ACA_TAG; 2472 break; 2473 case ATIO_UNTAGGED: 2474 fcp_task_attr = MSG_SIMPLE_TAG; 2475 break; 2476 default: 2477 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05d, 2478 "qla_target: unknown task code %x, use ORDERED instead\n", 2479 task_codes); 2480 fcp_task_attr = MSG_ORDERED_TAG; 2481 break; 2482 } 2483 2484 return fcp_task_attr; 2485 } 2486 2487 static struct qla_tgt_sess *qlt_make_local_sess(struct scsi_qla_host *, 2488 uint8_t *); 2489 /* 2490 * Process context for I/O path into tcm_qla2xxx code 2491 */ 2492 static void qlt_do_work(struct work_struct *work) 2493 { 2494 struct qla_tgt_cmd *cmd = container_of(work, struct qla_tgt_cmd, work); 2495 scsi_qla_host_t *vha = cmd->vha; 2496 struct qla_hw_data *ha = vha->hw; 2497 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; 2498 struct qla_tgt_sess *sess = NULL; 2499 struct atio_from_isp *atio = &cmd->atio; 2500 unsigned char *cdb; 2501 unsigned long flags; 2502 uint32_t data_length; 2503 int ret, fcp_task_attr, data_dir, bidi = 0; 2504 2505 if (tgt->tgt_stop) 2506 goto out_term; 2507 2508 spin_lock_irqsave(&ha->hardware_lock, flags); 2509 sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, 2510 atio->u.isp24.fcp_hdr.s_id); 2511 /* Do kref_get() before dropping qla_hw_data->hardware_lock. */ 2512 if (sess) 2513 kref_get(&sess->se_sess->sess_kref); 2514 spin_unlock_irqrestore(&ha->hardware_lock, flags); 2515 2516 if (unlikely(!sess)) { 2517 uint8_t *s_id = atio->u.isp24.fcp_hdr.s_id; 2518 2519 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf022, 2520 "qla_target(%d): Unable to find wwn login" 2521 " (s_id %x:%x:%x), trying to create it manually\n", 2522 vha->vp_idx, s_id[0], s_id[1], s_id[2]); 2523 2524 if (atio->u.raw.entry_count > 1) { 2525 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf023, 2526 "Dropping multy entry cmd %p\n", cmd); 2527 goto out_term; 2528 } 2529 2530 mutex_lock(&vha->vha_tgt.tgt_mutex); 2531 sess = qlt_make_local_sess(vha, s_id); 2532 /* sess has an extra creation ref. */ 2533 mutex_unlock(&vha->vha_tgt.tgt_mutex); 2534 2535 if (!sess) 2536 goto out_term; 2537 } 2538 2539 cmd->sess = sess; 2540 cmd->loop_id = sess->loop_id; 2541 cmd->conf_compl_supported = sess->conf_compl_supported; 2542 2543 cdb = &atio->u.isp24.fcp_cmnd.cdb[0]; 2544 cmd->tag = atio->u.isp24.exchange_addr; 2545 cmd->unpacked_lun = scsilun_to_int( 2546 (struct scsi_lun *)&atio->u.isp24.fcp_cmnd.lun); 2547 2548 if (atio->u.isp24.fcp_cmnd.rddata && 2549 atio->u.isp24.fcp_cmnd.wrdata) { 2550 bidi = 1; 2551 data_dir = DMA_TO_DEVICE; 2552 } else if (atio->u.isp24.fcp_cmnd.rddata) 2553 data_dir = DMA_FROM_DEVICE; 2554 else if (atio->u.isp24.fcp_cmnd.wrdata) 2555 data_dir = DMA_TO_DEVICE; 2556 else 2557 data_dir = DMA_NONE; 2558 2559 fcp_task_attr = qlt_get_fcp_task_attr(vha, 2560 atio->u.isp24.fcp_cmnd.task_attr); 2561 data_length = be32_to_cpu(get_unaligned((uint32_t *) 2562 &atio->u.isp24.fcp_cmnd.add_cdb[ 2563 atio->u.isp24.fcp_cmnd.add_cdb_len])); 2564 2565 ql_dbg(ql_dbg_tgt, vha, 0xe022, 2566 "qla_target: START qla command: %p lun: 0x%04x (tag %d)\n", 2567 cmd, cmd->unpacked_lun, cmd->tag); 2568 2569 ret = vha->hw->tgt.tgt_ops->handle_cmd(vha, cmd, cdb, data_length, 2570 fcp_task_attr, data_dir, bidi); 2571 if (ret != 0) 2572 goto out_term; 2573 /* 2574 * Drop extra session reference from qla_tgt_handle_cmd_for_atio*( 2575 */ 2576 spin_lock_irqsave(&ha->hardware_lock, flags); 2577 ha->tgt.tgt_ops->put_sess(sess); 2578 spin_unlock_irqrestore(&ha->hardware_lock, flags); 2579 return; 2580 2581 out_term: 2582 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf020, "Terminating work cmd %p", cmd); 2583 /* 2584 * cmd has not sent to target yet, so pass NULL as the second 2585 * argument to qlt_send_term_exchange() and free the memory here. 2586 */ 2587 spin_lock_irqsave(&ha->hardware_lock, flags); 2588 qlt_send_term_exchange(vha, NULL, &cmd->atio, 1); 2589 kmem_cache_free(qla_tgt_cmd_cachep, cmd); 2590 if (sess) 2591 ha->tgt.tgt_ops->put_sess(sess); 2592 spin_unlock_irqrestore(&ha->hardware_lock, flags); 2593 } 2594 2595 /* ha->hardware_lock supposed to be held on entry */ 2596 static int qlt_handle_cmd_for_atio(struct scsi_qla_host *vha, 2597 struct atio_from_isp *atio) 2598 { 2599 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; 2600 struct qla_tgt_cmd *cmd; 2601 2602 if (unlikely(tgt->tgt_stop)) { 2603 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf021, 2604 "New command while device %p is shutting down\n", tgt); 2605 return -EFAULT; 2606 } 2607 2608 cmd = kmem_cache_zalloc(qla_tgt_cmd_cachep, GFP_ATOMIC); 2609 if (!cmd) { 2610 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05e, 2611 "qla_target(%d): Allocation of cmd failed\n", vha->vp_idx); 2612 return -ENOMEM; 2613 } 2614 2615 memcpy(&cmd->atio, atio, sizeof(*atio)); 2616 cmd->state = QLA_TGT_STATE_NEW; 2617 cmd->tgt = vha->vha_tgt.qla_tgt; 2618 cmd->vha = vha; 2619 2620 INIT_WORK(&cmd->work, qlt_do_work); 2621 queue_work(qla_tgt_wq, &cmd->work); 2622 return 0; 2623 2624 } 2625 2626 /* ha->hardware_lock supposed to be held on entry */ 2627 static int qlt_issue_task_mgmt(struct qla_tgt_sess *sess, uint32_t lun, 2628 int fn, void *iocb, int flags) 2629 { 2630 struct scsi_qla_host *vha = sess->vha; 2631 struct qla_hw_data *ha = vha->hw; 2632 struct qla_tgt_mgmt_cmd *mcmd; 2633 int res; 2634 uint8_t tmr_func; 2635 2636 mcmd = mempool_alloc(qla_tgt_mgmt_cmd_mempool, GFP_ATOMIC); 2637 if (!mcmd) { 2638 ql_dbg(ql_dbg_tgt_tmr, vha, 0x10009, 2639 "qla_target(%d): Allocation of management " 2640 "command failed, some commands and their data could " 2641 "leak\n", vha->vp_idx); 2642 return -ENOMEM; 2643 } 2644 memset(mcmd, 0, sizeof(*mcmd)); 2645 mcmd->sess = sess; 2646 2647 if (iocb) { 2648 memcpy(&mcmd->orig_iocb.imm_ntfy, iocb, 2649 sizeof(mcmd->orig_iocb.imm_ntfy)); 2650 } 2651 mcmd->tmr_func = fn; 2652 mcmd->flags = flags; 2653 2654 switch (fn) { 2655 case QLA_TGT_CLEAR_ACA: 2656 ql_dbg(ql_dbg_tgt_tmr, vha, 0x10000, 2657 "qla_target(%d): CLEAR_ACA received\n", sess->vha->vp_idx); 2658 tmr_func = TMR_CLEAR_ACA; 2659 break; 2660 2661 case QLA_TGT_TARGET_RESET: 2662 ql_dbg(ql_dbg_tgt_tmr, vha, 0x10001, 2663 "qla_target(%d): TARGET_RESET received\n", 2664 sess->vha->vp_idx); 2665 tmr_func = TMR_TARGET_WARM_RESET; 2666 break; 2667 2668 case QLA_TGT_LUN_RESET: 2669 ql_dbg(ql_dbg_tgt_tmr, vha, 0x10002, 2670 "qla_target(%d): LUN_RESET received\n", sess->vha->vp_idx); 2671 tmr_func = TMR_LUN_RESET; 2672 break; 2673 2674 case QLA_TGT_CLEAR_TS: 2675 ql_dbg(ql_dbg_tgt_tmr, vha, 0x10003, 2676 "qla_target(%d): CLEAR_TS received\n", sess->vha->vp_idx); 2677 tmr_func = TMR_CLEAR_TASK_SET; 2678 break; 2679 2680 case QLA_TGT_ABORT_TS: 2681 ql_dbg(ql_dbg_tgt_tmr, vha, 0x10004, 2682 "qla_target(%d): ABORT_TS received\n", sess->vha->vp_idx); 2683 tmr_func = TMR_ABORT_TASK_SET; 2684 break; 2685 #if 0 2686 case QLA_TGT_ABORT_ALL: 2687 ql_dbg(ql_dbg_tgt_tmr, vha, 0x10005, 2688 "qla_target(%d): Doing ABORT_ALL_TASKS\n", 2689 sess->vha->vp_idx); 2690 tmr_func = 0; 2691 break; 2692 2693 case QLA_TGT_ABORT_ALL_SESS: 2694 ql_dbg(ql_dbg_tgt_tmr, vha, 0x10006, 2695 "qla_target(%d): Doing ABORT_ALL_TASKS_SESS\n", 2696 sess->vha->vp_idx); 2697 tmr_func = 0; 2698 break; 2699 2700 case QLA_TGT_NEXUS_LOSS_SESS: 2701 ql_dbg(ql_dbg_tgt_tmr, vha, 0x10007, 2702 "qla_target(%d): Doing NEXUS_LOSS_SESS\n", 2703 sess->vha->vp_idx); 2704 tmr_func = 0; 2705 break; 2706 2707 case QLA_TGT_NEXUS_LOSS: 2708 ql_dbg(ql_dbg_tgt_tmr, vha, 0x10008, 2709 "qla_target(%d): Doing NEXUS_LOSS\n", sess->vha->vp_idx); 2710 tmr_func = 0; 2711 break; 2712 #endif 2713 default: 2714 ql_dbg(ql_dbg_tgt_tmr, vha, 0x1000a, 2715 "qla_target(%d): Unknown task mgmt fn 0x%x\n", 2716 sess->vha->vp_idx, fn); 2717 mempool_free(mcmd, qla_tgt_mgmt_cmd_mempool); 2718 return -ENOSYS; 2719 } 2720 2721 res = ha->tgt.tgt_ops->handle_tmr(mcmd, lun, tmr_func, 0); 2722 if (res != 0) { 2723 ql_dbg(ql_dbg_tgt_tmr, vha, 0x1000b, 2724 "qla_target(%d): tgt.tgt_ops->handle_tmr() failed: %d\n", 2725 sess->vha->vp_idx, res); 2726 mempool_free(mcmd, qla_tgt_mgmt_cmd_mempool); 2727 return -EFAULT; 2728 } 2729 2730 return 0; 2731 } 2732 2733 /* ha->hardware_lock supposed to be held on entry */ 2734 static int qlt_handle_task_mgmt(struct scsi_qla_host *vha, void *iocb) 2735 { 2736 struct atio_from_isp *a = (struct atio_from_isp *)iocb; 2737 struct qla_hw_data *ha = vha->hw; 2738 struct qla_tgt *tgt; 2739 struct qla_tgt_sess *sess; 2740 uint32_t lun, unpacked_lun; 2741 int lun_size, fn; 2742 2743 tgt = vha->vha_tgt.qla_tgt; 2744 2745 lun = a->u.isp24.fcp_cmnd.lun; 2746 lun_size = sizeof(a->u.isp24.fcp_cmnd.lun); 2747 fn = a->u.isp24.fcp_cmnd.task_mgmt_flags; 2748 sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, 2749 a->u.isp24.fcp_hdr.s_id); 2750 unpacked_lun = scsilun_to_int((struct scsi_lun *)&lun); 2751 2752 if (!sess) { 2753 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf024, 2754 "qla_target(%d): task mgmt fn 0x%x for " 2755 "non-existant session\n", vha->vp_idx, fn); 2756 return qlt_sched_sess_work(tgt, QLA_TGT_SESS_WORK_TM, iocb, 2757 sizeof(struct atio_from_isp)); 2758 } 2759 2760 return qlt_issue_task_mgmt(sess, unpacked_lun, fn, iocb, 0); 2761 } 2762 2763 /* ha->hardware_lock supposed to be held on entry */ 2764 static int __qlt_abort_task(struct scsi_qla_host *vha, 2765 struct imm_ntfy_from_isp *iocb, struct qla_tgt_sess *sess) 2766 { 2767 struct atio_from_isp *a = (struct atio_from_isp *)iocb; 2768 struct qla_hw_data *ha = vha->hw; 2769 struct qla_tgt_mgmt_cmd *mcmd; 2770 uint32_t lun, unpacked_lun; 2771 int rc; 2772 2773 mcmd = mempool_alloc(qla_tgt_mgmt_cmd_mempool, GFP_ATOMIC); 2774 if (mcmd == NULL) { 2775 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05f, 2776 "qla_target(%d): %s: Allocation of ABORT cmd failed\n", 2777 vha->vp_idx, __func__); 2778 return -ENOMEM; 2779 } 2780 memset(mcmd, 0, sizeof(*mcmd)); 2781 2782 mcmd->sess = sess; 2783 memcpy(&mcmd->orig_iocb.imm_ntfy, iocb, 2784 sizeof(mcmd->orig_iocb.imm_ntfy)); 2785 2786 lun = a->u.isp24.fcp_cmnd.lun; 2787 unpacked_lun = scsilun_to_int((struct scsi_lun *)&lun); 2788 2789 rc = ha->tgt.tgt_ops->handle_tmr(mcmd, unpacked_lun, TMR_ABORT_TASK, 2790 le16_to_cpu(iocb->u.isp2x.seq_id)); 2791 if (rc != 0) { 2792 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf060, 2793 "qla_target(%d): tgt_ops->handle_tmr() failed: %d\n", 2794 vha->vp_idx, rc); 2795 mempool_free(mcmd, qla_tgt_mgmt_cmd_mempool); 2796 return -EFAULT; 2797 } 2798 2799 return 0; 2800 } 2801 2802 /* ha->hardware_lock supposed to be held on entry */ 2803 static int qlt_abort_task(struct scsi_qla_host *vha, 2804 struct imm_ntfy_from_isp *iocb) 2805 { 2806 struct qla_hw_data *ha = vha->hw; 2807 struct qla_tgt_sess *sess; 2808 int loop_id; 2809 2810 loop_id = GET_TARGET_ID(ha, (struct atio_from_isp *)iocb); 2811 2812 sess = ha->tgt.tgt_ops->find_sess_by_loop_id(vha, loop_id); 2813 if (sess == NULL) { 2814 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf025, 2815 "qla_target(%d): task abort for unexisting " 2816 "session\n", vha->vp_idx); 2817 return qlt_sched_sess_work(vha->vha_tgt.qla_tgt, 2818 QLA_TGT_SESS_WORK_ABORT, iocb, sizeof(*iocb)); 2819 } 2820 2821 return __qlt_abort_task(vha, iocb, sess); 2822 } 2823 2824 /* 2825 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire 2826 */ 2827 static int qlt_24xx_handle_els(struct scsi_qla_host *vha, 2828 struct imm_ntfy_from_isp *iocb) 2829 { 2830 int res = 0; 2831 2832 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf026, 2833 "qla_target(%d): Port ID: 0x%3phC ELS opcode: 0x%02x\n", 2834 vha->vp_idx, iocb->u.isp24.port_id, iocb->u.isp24.status_subcode); 2835 2836 switch (iocb->u.isp24.status_subcode) { 2837 case ELS_PLOGI: 2838 case ELS_FLOGI: 2839 case ELS_PRLI: 2840 case ELS_LOGO: 2841 case ELS_PRLO: 2842 res = qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS_SESS); 2843 break; 2844 case ELS_PDISC: 2845 case ELS_ADISC: 2846 { 2847 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; 2848 if (tgt->link_reinit_iocb_pending) { 2849 qlt_send_notify_ack(vha, &tgt->link_reinit_iocb, 2850 0, 0, 0, 0, 0, 0); 2851 tgt->link_reinit_iocb_pending = 0; 2852 } 2853 res = 1; /* send notify ack */ 2854 break; 2855 } 2856 2857 default: 2858 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf061, 2859 "qla_target(%d): Unsupported ELS command %x " 2860 "received\n", vha->vp_idx, iocb->u.isp24.status_subcode); 2861 res = qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS_SESS); 2862 break; 2863 } 2864 2865 return res; 2866 } 2867 2868 static int qlt_set_data_offset(struct qla_tgt_cmd *cmd, uint32_t offset) 2869 { 2870 struct scatterlist *sg, *sgp, *sg_srr, *sg_srr_start = NULL; 2871 size_t first_offset = 0, rem_offset = offset, tmp = 0; 2872 int i, sg_srr_cnt, bufflen = 0; 2873 2874 ql_dbg(ql_dbg_tgt, cmd->vha, 0xe023, 2875 "Entering qla_tgt_set_data_offset: cmd: %p, cmd->sg: %p, " 2876 "cmd->sg_cnt: %u, direction: %d\n", 2877 cmd, cmd->sg, cmd->sg_cnt, cmd->dma_data_direction); 2878 2879 /* 2880 * FIXME: Reject non zero SRR relative offset until we can test 2881 * this code properly. 2882 */ 2883 pr_debug("Rejecting non zero SRR rel_offs: %u\n", offset); 2884 return -1; 2885 2886 if (!cmd->sg || !cmd->sg_cnt) { 2887 ql_dbg(ql_dbg_tgt, cmd->vha, 0xe055, 2888 "Missing cmd->sg or zero cmd->sg_cnt in" 2889 " qla_tgt_set_data_offset\n"); 2890 return -EINVAL; 2891 } 2892 /* 2893 * Walk the current cmd->sg list until we locate the new sg_srr_start 2894 */ 2895 for_each_sg(cmd->sg, sg, cmd->sg_cnt, i) { 2896 ql_dbg(ql_dbg_tgt, cmd->vha, 0xe024, 2897 "sg[%d]: %p page: %p, length: %d, offset: %d\n", 2898 i, sg, sg_page(sg), sg->length, sg->offset); 2899 2900 if ((sg->length + tmp) > offset) { 2901 first_offset = rem_offset; 2902 sg_srr_start = sg; 2903 ql_dbg(ql_dbg_tgt, cmd->vha, 0xe025, 2904 "Found matching sg[%d], using %p as sg_srr_start, " 2905 "and using first_offset: %zu\n", i, sg, 2906 first_offset); 2907 break; 2908 } 2909 tmp += sg->length; 2910 rem_offset -= sg->length; 2911 } 2912 2913 if (!sg_srr_start) { 2914 ql_dbg(ql_dbg_tgt, cmd->vha, 0xe056, 2915 "Unable to locate sg_srr_start for offset: %u\n", offset); 2916 return -EINVAL; 2917 } 2918 sg_srr_cnt = (cmd->sg_cnt - i); 2919 2920 sg_srr = kzalloc(sizeof(struct scatterlist) * sg_srr_cnt, GFP_KERNEL); 2921 if (!sg_srr) { 2922 ql_dbg(ql_dbg_tgt, cmd->vha, 0xe057, 2923 "Unable to allocate sgp\n"); 2924 return -ENOMEM; 2925 } 2926 sg_init_table(sg_srr, sg_srr_cnt); 2927 sgp = &sg_srr[0]; 2928 /* 2929 * Walk the remaining list for sg_srr_start, mapping to the newly 2930 * allocated sg_srr taking first_offset into account. 2931 */ 2932 for_each_sg(sg_srr_start, sg, sg_srr_cnt, i) { 2933 if (first_offset) { 2934 sg_set_page(sgp, sg_page(sg), 2935 (sg->length - first_offset), first_offset); 2936 first_offset = 0; 2937 } else { 2938 sg_set_page(sgp, sg_page(sg), sg->length, 0); 2939 } 2940 bufflen += sgp->length; 2941 2942 sgp = sg_next(sgp); 2943 if (!sgp) 2944 break; 2945 } 2946 2947 cmd->sg = sg_srr; 2948 cmd->sg_cnt = sg_srr_cnt; 2949 cmd->bufflen = bufflen; 2950 cmd->offset += offset; 2951 cmd->free_sg = 1; 2952 2953 ql_dbg(ql_dbg_tgt, cmd->vha, 0xe026, "New cmd->sg: %p\n", cmd->sg); 2954 ql_dbg(ql_dbg_tgt, cmd->vha, 0xe027, "New cmd->sg_cnt: %u\n", 2955 cmd->sg_cnt); 2956 ql_dbg(ql_dbg_tgt, cmd->vha, 0xe028, "New cmd->bufflen: %u\n", 2957 cmd->bufflen); 2958 ql_dbg(ql_dbg_tgt, cmd->vha, 0xe029, "New cmd->offset: %u\n", 2959 cmd->offset); 2960 2961 if (cmd->sg_cnt < 0) 2962 BUG(); 2963 2964 if (cmd->bufflen < 0) 2965 BUG(); 2966 2967 return 0; 2968 } 2969 2970 static inline int qlt_srr_adjust_data(struct qla_tgt_cmd *cmd, 2971 uint32_t srr_rel_offs, int *xmit_type) 2972 { 2973 int res = 0, rel_offs; 2974 2975 rel_offs = srr_rel_offs - cmd->offset; 2976 ql_dbg(ql_dbg_tgt_mgt, cmd->vha, 0xf027, "srr_rel_offs=%d, rel_offs=%d", 2977 srr_rel_offs, rel_offs); 2978 2979 *xmit_type = QLA_TGT_XMIT_ALL; 2980 2981 if (rel_offs < 0) { 2982 ql_dbg(ql_dbg_tgt_mgt, cmd->vha, 0xf062, 2983 "qla_target(%d): SRR rel_offs (%d) < 0", 2984 cmd->vha->vp_idx, rel_offs); 2985 res = -1; 2986 } else if (rel_offs == cmd->bufflen) 2987 *xmit_type = QLA_TGT_XMIT_STATUS; 2988 else if (rel_offs > 0) 2989 res = qlt_set_data_offset(cmd, rel_offs); 2990 2991 return res; 2992 } 2993 2994 /* No locks, thread context */ 2995 static void qlt_handle_srr(struct scsi_qla_host *vha, 2996 struct qla_tgt_srr_ctio *sctio, struct qla_tgt_srr_imm *imm) 2997 { 2998 struct imm_ntfy_from_isp *ntfy = 2999 (struct imm_ntfy_from_isp *)&imm->imm_ntfy; 3000 struct qla_hw_data *ha = vha->hw; 3001 struct qla_tgt_cmd *cmd = sctio->cmd; 3002 struct se_cmd *se_cmd = &cmd->se_cmd; 3003 unsigned long flags; 3004 int xmit_type = 0, resp = 0; 3005 uint32_t offset; 3006 uint16_t srr_ui; 3007 3008 offset = le32_to_cpu(ntfy->u.isp24.srr_rel_offs); 3009 srr_ui = ntfy->u.isp24.srr_ui; 3010 3011 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf028, "SRR cmd %p, srr_ui %x\n", 3012 cmd, srr_ui); 3013 3014 switch (srr_ui) { 3015 case SRR_IU_STATUS: 3016 spin_lock_irqsave(&ha->hardware_lock, flags); 3017 qlt_send_notify_ack(vha, ntfy, 3018 0, 0, 0, NOTIFY_ACK_SRR_FLAGS_ACCEPT, 0, 0); 3019 spin_unlock_irqrestore(&ha->hardware_lock, flags); 3020 xmit_type = QLA_TGT_XMIT_STATUS; 3021 resp = 1; 3022 break; 3023 case SRR_IU_DATA_IN: 3024 if (!cmd->sg || !cmd->sg_cnt) { 3025 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf063, 3026 "Unable to process SRR_IU_DATA_IN due to" 3027 " missing cmd->sg, state: %d\n", cmd->state); 3028 dump_stack(); 3029 goto out_reject; 3030 } 3031 if (se_cmd->scsi_status != 0) { 3032 ql_dbg(ql_dbg_tgt, vha, 0xe02a, 3033 "Rejecting SRR_IU_DATA_IN with non GOOD " 3034 "scsi_status\n"); 3035 goto out_reject; 3036 } 3037 cmd->bufflen = se_cmd->data_length; 3038 3039 if (qlt_has_data(cmd)) { 3040 if (qlt_srr_adjust_data(cmd, offset, &xmit_type) != 0) 3041 goto out_reject; 3042 spin_lock_irqsave(&ha->hardware_lock, flags); 3043 qlt_send_notify_ack(vha, ntfy, 3044 0, 0, 0, NOTIFY_ACK_SRR_FLAGS_ACCEPT, 0, 0); 3045 spin_unlock_irqrestore(&ha->hardware_lock, flags); 3046 resp = 1; 3047 } else { 3048 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf064, 3049 "qla_target(%d): SRR for in data for cmd " 3050 "without them (tag %d, SCSI status %d), " 3051 "reject", vha->vp_idx, cmd->tag, 3052 cmd->se_cmd.scsi_status); 3053 goto out_reject; 3054 } 3055 break; 3056 case SRR_IU_DATA_OUT: 3057 if (!cmd->sg || !cmd->sg_cnt) { 3058 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf065, 3059 "Unable to process SRR_IU_DATA_OUT due to" 3060 " missing cmd->sg\n"); 3061 dump_stack(); 3062 goto out_reject; 3063 } 3064 if (se_cmd->scsi_status != 0) { 3065 ql_dbg(ql_dbg_tgt, vha, 0xe02b, 3066 "Rejecting SRR_IU_DATA_OUT" 3067 " with non GOOD scsi_status\n"); 3068 goto out_reject; 3069 } 3070 cmd->bufflen = se_cmd->data_length; 3071 3072 if (qlt_has_data(cmd)) { 3073 if (qlt_srr_adjust_data(cmd, offset, &xmit_type) != 0) 3074 goto out_reject; 3075 spin_lock_irqsave(&ha->hardware_lock, flags); 3076 qlt_send_notify_ack(vha, ntfy, 3077 0, 0, 0, NOTIFY_ACK_SRR_FLAGS_ACCEPT, 0, 0); 3078 spin_unlock_irqrestore(&ha->hardware_lock, flags); 3079 if (xmit_type & QLA_TGT_XMIT_DATA) 3080 qlt_rdy_to_xfer(cmd); 3081 } else { 3082 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf066, 3083 "qla_target(%d): SRR for out data for cmd " 3084 "without them (tag %d, SCSI status %d), " 3085 "reject", vha->vp_idx, cmd->tag, 3086 cmd->se_cmd.scsi_status); 3087 goto out_reject; 3088 } 3089 break; 3090 default: 3091 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf067, 3092 "qla_target(%d): Unknown srr_ui value %x", 3093 vha->vp_idx, srr_ui); 3094 goto out_reject; 3095 } 3096 3097 /* Transmit response in case of status and data-in cases */ 3098 if (resp) 3099 qlt_xmit_response(cmd, xmit_type, se_cmd->scsi_status); 3100 3101 return; 3102 3103 out_reject: 3104 spin_lock_irqsave(&ha->hardware_lock, flags); 3105 qlt_send_notify_ack(vha, ntfy, 0, 0, 0, 3106 NOTIFY_ACK_SRR_FLAGS_REJECT, 3107 NOTIFY_ACK_SRR_REJECT_REASON_UNABLE_TO_PERFORM, 3108 NOTIFY_ACK_SRR_FLAGS_REJECT_EXPL_NO_EXPL); 3109 if (cmd->state == QLA_TGT_STATE_NEED_DATA) { 3110 cmd->state = QLA_TGT_STATE_DATA_IN; 3111 dump_stack(); 3112 } else 3113 qlt_send_term_exchange(vha, cmd, &cmd->atio, 1); 3114 spin_unlock_irqrestore(&ha->hardware_lock, flags); 3115 } 3116 3117 static void qlt_reject_free_srr_imm(struct scsi_qla_host *vha, 3118 struct qla_tgt_srr_imm *imm, int ha_locked) 3119 { 3120 struct qla_hw_data *ha = vha->hw; 3121 unsigned long flags = 0; 3122 3123 if (!ha_locked) 3124 spin_lock_irqsave(&ha->hardware_lock, flags); 3125 3126 qlt_send_notify_ack(vha, (void *)&imm->imm_ntfy, 0, 0, 0, 3127 NOTIFY_ACK_SRR_FLAGS_REJECT, 3128 NOTIFY_ACK_SRR_REJECT_REASON_UNABLE_TO_PERFORM, 3129 NOTIFY_ACK_SRR_FLAGS_REJECT_EXPL_NO_EXPL); 3130 3131 if (!ha_locked) 3132 spin_unlock_irqrestore(&ha->hardware_lock, flags); 3133 3134 kfree(imm); 3135 } 3136 3137 static void qlt_handle_srr_work(struct work_struct *work) 3138 { 3139 struct qla_tgt *tgt = container_of(work, struct qla_tgt, srr_work); 3140 struct scsi_qla_host *vha = tgt->vha; 3141 struct qla_tgt_srr_ctio *sctio; 3142 unsigned long flags; 3143 3144 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf029, "Entering SRR work (tgt %p)\n", 3145 tgt); 3146 3147 restart: 3148 spin_lock_irqsave(&tgt->srr_lock, flags); 3149 list_for_each_entry(sctio, &tgt->srr_ctio_list, srr_list_entry) { 3150 struct qla_tgt_srr_imm *imm, *i, *ti; 3151 struct qla_tgt_cmd *cmd; 3152 struct se_cmd *se_cmd; 3153 3154 imm = NULL; 3155 list_for_each_entry_safe(i, ti, &tgt->srr_imm_list, 3156 srr_list_entry) { 3157 if (i->srr_id == sctio->srr_id) { 3158 list_del(&i->srr_list_entry); 3159 if (imm) { 3160 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf068, 3161 "qla_target(%d): There must be " 3162 "only one IMM SRR per CTIO SRR " 3163 "(IMM SRR %p, id %d, CTIO %p\n", 3164 vha->vp_idx, i, i->srr_id, sctio); 3165 qlt_reject_free_srr_imm(tgt->vha, i, 0); 3166 } else 3167 imm = i; 3168 } 3169 } 3170 3171 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf02a, 3172 "IMM SRR %p, CTIO SRR %p (id %d)\n", imm, sctio, 3173 sctio->srr_id); 3174 3175 if (imm == NULL) { 3176 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf02b, 3177 "Not found matching IMM for SRR CTIO (id %d)\n", 3178 sctio->srr_id); 3179 continue; 3180 } else 3181 list_del(&sctio->srr_list_entry); 3182 3183 spin_unlock_irqrestore(&tgt->srr_lock, flags); 3184 3185 cmd = sctio->cmd; 3186 /* 3187 * Reset qla_tgt_cmd SRR values and SGL pointer+count to follow 3188 * tcm_qla2xxx_write_pending() and tcm_qla2xxx_queue_data_in() 3189 * logic.. 3190 */ 3191 cmd->offset = 0; 3192 if (cmd->free_sg) { 3193 kfree(cmd->sg); 3194 cmd->sg = NULL; 3195 cmd->free_sg = 0; 3196 } 3197 se_cmd = &cmd->se_cmd; 3198 3199 cmd->sg_cnt = se_cmd->t_data_nents; 3200 cmd->sg = se_cmd->t_data_sg; 3201 3202 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf02c, 3203 "SRR cmd %p (se_cmd %p, tag %d, op %x), " 3204 "sg_cnt=%d, offset=%d", cmd, &cmd->se_cmd, cmd->tag, 3205 se_cmd->t_task_cdb ? se_cmd->t_task_cdb[0] : 0, 3206 cmd->sg_cnt, cmd->offset); 3207 3208 qlt_handle_srr(vha, sctio, imm); 3209 3210 kfree(imm); 3211 kfree(sctio); 3212 goto restart; 3213 } 3214 spin_unlock_irqrestore(&tgt->srr_lock, flags); 3215 } 3216 3217 /* ha->hardware_lock supposed to be held on entry */ 3218 static void qlt_prepare_srr_imm(struct scsi_qla_host *vha, 3219 struct imm_ntfy_from_isp *iocb) 3220 { 3221 struct qla_tgt_srr_imm *imm; 3222 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; 3223 struct qla_tgt_srr_ctio *sctio; 3224 3225 tgt->imm_srr_id++; 3226 3227 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf02d, "qla_target(%d): SRR received\n", 3228 vha->vp_idx); 3229 3230 imm = kzalloc(sizeof(*imm), GFP_ATOMIC); 3231 if (imm != NULL) { 3232 memcpy(&imm->imm_ntfy, iocb, sizeof(imm->imm_ntfy)); 3233 3234 /* IRQ is already OFF */ 3235 spin_lock(&tgt->srr_lock); 3236 imm->srr_id = tgt->imm_srr_id; 3237 list_add_tail(&imm->srr_list_entry, 3238 &tgt->srr_imm_list); 3239 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf02e, 3240 "IMM NTFY SRR %p added (id %d, ui %x)\n", 3241 imm, imm->srr_id, iocb->u.isp24.srr_ui); 3242 if (tgt->imm_srr_id == tgt->ctio_srr_id) { 3243 int found = 0; 3244 list_for_each_entry(sctio, &tgt->srr_ctio_list, 3245 srr_list_entry) { 3246 if (sctio->srr_id == imm->srr_id) { 3247 found = 1; 3248 break; 3249 } 3250 } 3251 if (found) { 3252 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf02f, "%s", 3253 "Scheduling srr work\n"); 3254 schedule_work(&tgt->srr_work); 3255 } else { 3256 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf030, 3257 "qla_target(%d): imm_srr_id " 3258 "== ctio_srr_id (%d), but there is no " 3259 "corresponding SRR CTIO, deleting IMM " 3260 "SRR %p\n", vha->vp_idx, tgt->ctio_srr_id, 3261 imm); 3262 list_del(&imm->srr_list_entry); 3263 3264 kfree(imm); 3265 3266 spin_unlock(&tgt->srr_lock); 3267 goto out_reject; 3268 } 3269 } 3270 spin_unlock(&tgt->srr_lock); 3271 } else { 3272 struct qla_tgt_srr_ctio *ts; 3273 3274 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf069, 3275 "qla_target(%d): Unable to allocate SRR IMM " 3276 "entry, SRR request will be rejected\n", vha->vp_idx); 3277 3278 /* IRQ is already OFF */ 3279 spin_lock(&tgt->srr_lock); 3280 list_for_each_entry_safe(sctio, ts, &tgt->srr_ctio_list, 3281 srr_list_entry) { 3282 if (sctio->srr_id == tgt->imm_srr_id) { 3283 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf031, 3284 "CTIO SRR %p deleted (id %d)\n", 3285 sctio, sctio->srr_id); 3286 list_del(&sctio->srr_list_entry); 3287 qlt_send_term_exchange(vha, sctio->cmd, 3288 &sctio->cmd->atio, 1); 3289 kfree(sctio); 3290 } 3291 } 3292 spin_unlock(&tgt->srr_lock); 3293 goto out_reject; 3294 } 3295 3296 return; 3297 3298 out_reject: 3299 qlt_send_notify_ack(vha, iocb, 0, 0, 0, 3300 NOTIFY_ACK_SRR_FLAGS_REJECT, 3301 NOTIFY_ACK_SRR_REJECT_REASON_UNABLE_TO_PERFORM, 3302 NOTIFY_ACK_SRR_FLAGS_REJECT_EXPL_NO_EXPL); 3303 } 3304 3305 /* 3306 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire 3307 */ 3308 static void qlt_handle_imm_notify(struct scsi_qla_host *vha, 3309 struct imm_ntfy_from_isp *iocb) 3310 { 3311 struct qla_hw_data *ha = vha->hw; 3312 uint32_t add_flags = 0; 3313 int send_notify_ack = 1; 3314 uint16_t status; 3315 3316 status = le16_to_cpu(iocb->u.isp2x.status); 3317 switch (status) { 3318 case IMM_NTFY_LIP_RESET: 3319 { 3320 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf032, 3321 "qla_target(%d): LIP reset (loop %#x), subcode %x\n", 3322 vha->vp_idx, le16_to_cpu(iocb->u.isp24.nport_handle), 3323 iocb->u.isp24.status_subcode); 3324 3325 if (qlt_reset(vha, iocb, QLA_TGT_ABORT_ALL) == 0) 3326 send_notify_ack = 0; 3327 break; 3328 } 3329 3330 case IMM_NTFY_LIP_LINK_REINIT: 3331 { 3332 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; 3333 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf033, 3334 "qla_target(%d): LINK REINIT (loop %#x, " 3335 "subcode %x)\n", vha->vp_idx, 3336 le16_to_cpu(iocb->u.isp24.nport_handle), 3337 iocb->u.isp24.status_subcode); 3338 if (tgt->link_reinit_iocb_pending) { 3339 qlt_send_notify_ack(vha, &tgt->link_reinit_iocb, 3340 0, 0, 0, 0, 0, 0); 3341 } 3342 memcpy(&tgt->link_reinit_iocb, iocb, sizeof(*iocb)); 3343 tgt->link_reinit_iocb_pending = 1; 3344 /* 3345 * QLogic requires to wait after LINK REINIT for possible 3346 * PDISC or ADISC ELS commands 3347 */ 3348 send_notify_ack = 0; 3349 break; 3350 } 3351 3352 case IMM_NTFY_PORT_LOGOUT: 3353 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf034, 3354 "qla_target(%d): Port logout (loop " 3355 "%#x, subcode %x)\n", vha->vp_idx, 3356 le16_to_cpu(iocb->u.isp24.nport_handle), 3357 iocb->u.isp24.status_subcode); 3358 3359 if (qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS_SESS) == 0) 3360 send_notify_ack = 0; 3361 /* The sessions will be cleared in the callback, if needed */ 3362 break; 3363 3364 case IMM_NTFY_GLBL_TPRLO: 3365 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf035, 3366 "qla_target(%d): Global TPRLO (%x)\n", vha->vp_idx, status); 3367 if (qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS) == 0) 3368 send_notify_ack = 0; 3369 /* The sessions will be cleared in the callback, if needed */ 3370 break; 3371 3372 case IMM_NTFY_PORT_CONFIG: 3373 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf036, 3374 "qla_target(%d): Port config changed (%x)\n", vha->vp_idx, 3375 status); 3376 if (qlt_reset(vha, iocb, QLA_TGT_ABORT_ALL) == 0) 3377 send_notify_ack = 0; 3378 /* The sessions will be cleared in the callback, if needed */ 3379 break; 3380 3381 case IMM_NTFY_GLBL_LOGO: 3382 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06a, 3383 "qla_target(%d): Link failure detected\n", 3384 vha->vp_idx); 3385 /* I_T nexus loss */ 3386 if (qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS) == 0) 3387 send_notify_ack = 0; 3388 break; 3389 3390 case IMM_NTFY_IOCB_OVERFLOW: 3391 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06b, 3392 "qla_target(%d): Cannot provide requested " 3393 "capability (IOCB overflowed the immediate notify " 3394 "resource count)\n", vha->vp_idx); 3395 break; 3396 3397 case IMM_NTFY_ABORT_TASK: 3398 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf037, 3399 "qla_target(%d): Abort Task (S %08x I %#x -> " 3400 "L %#x)\n", vha->vp_idx, 3401 le16_to_cpu(iocb->u.isp2x.seq_id), 3402 GET_TARGET_ID(ha, (struct atio_from_isp *)iocb), 3403 le16_to_cpu(iocb->u.isp2x.lun)); 3404 if (qlt_abort_task(vha, iocb) == 0) 3405 send_notify_ack = 0; 3406 break; 3407 3408 case IMM_NTFY_RESOURCE: 3409 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06c, 3410 "qla_target(%d): Out of resources, host %ld\n", 3411 vha->vp_idx, vha->host_no); 3412 break; 3413 3414 case IMM_NTFY_MSG_RX: 3415 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf038, 3416 "qla_target(%d): Immediate notify task %x\n", 3417 vha->vp_idx, iocb->u.isp2x.task_flags); 3418 if (qlt_handle_task_mgmt(vha, iocb) == 0) 3419 send_notify_ack = 0; 3420 break; 3421 3422 case IMM_NTFY_ELS: 3423 if (qlt_24xx_handle_els(vha, iocb) == 0) 3424 send_notify_ack = 0; 3425 break; 3426 3427 case IMM_NTFY_SRR: 3428 qlt_prepare_srr_imm(vha, iocb); 3429 send_notify_ack = 0; 3430 break; 3431 3432 default: 3433 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06d, 3434 "qla_target(%d): Received unknown immediate " 3435 "notify status %x\n", vha->vp_idx, status); 3436 break; 3437 } 3438 3439 if (send_notify_ack) 3440 qlt_send_notify_ack(vha, iocb, add_flags, 0, 0, 0, 0, 0); 3441 } 3442 3443 /* 3444 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire 3445 * This function sends busy to ISP 2xxx or 24xx. 3446 */ 3447 static void qlt_send_busy(struct scsi_qla_host *vha, 3448 struct atio_from_isp *atio, uint16_t status) 3449 { 3450 struct ctio7_to_24xx *ctio24; 3451 struct qla_hw_data *ha = vha->hw; 3452 request_t *pkt; 3453 struct qla_tgt_sess *sess = NULL; 3454 3455 sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, 3456 atio->u.isp24.fcp_hdr.s_id); 3457 if (!sess) { 3458 qlt_send_term_exchange(vha, NULL, atio, 1); 3459 return; 3460 } 3461 /* Sending marker isn't necessary, since we called from ISR */ 3462 3463 pkt = (request_t *)qla2x00_alloc_iocbs(vha, NULL); 3464 if (!pkt) { 3465 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06e, 3466 "qla_target(%d): %s failed: unable to allocate " 3467 "request packet", vha->vp_idx, __func__); 3468 return; 3469 } 3470 3471 pkt->entry_count = 1; 3472 pkt->handle = QLA_TGT_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK; 3473 3474 ctio24 = (struct ctio7_to_24xx *)pkt; 3475 ctio24->entry_type = CTIO_TYPE7; 3476 ctio24->nport_handle = sess->loop_id; 3477 ctio24->timeout = __constant_cpu_to_le16(QLA_TGT_TIMEOUT); 3478 ctio24->vp_index = vha->vp_idx; 3479 ctio24->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2]; 3480 ctio24->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1]; 3481 ctio24->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0]; 3482 ctio24->exchange_addr = atio->u.isp24.exchange_addr; 3483 ctio24->u.status1.flags = (atio->u.isp24.attr << 9) | 3484 __constant_cpu_to_le16( 3485 CTIO7_FLAGS_STATUS_MODE_1 | CTIO7_FLAGS_SEND_STATUS | 3486 CTIO7_FLAGS_DONT_RET_CTIO); 3487 /* 3488 * CTIO from fw w/o se_cmd doesn't provide enough info to retry it, 3489 * if the explicit conformation is used. 3490 */ 3491 ctio24->u.status1.ox_id = swab16(atio->u.isp24.fcp_hdr.ox_id); 3492 ctio24->u.status1.scsi_status = cpu_to_le16(status); 3493 ctio24->u.status1.residual = get_unaligned((uint32_t *) 3494 &atio->u.isp24.fcp_cmnd.add_cdb[ 3495 atio->u.isp24.fcp_cmnd.add_cdb_len]); 3496 if (ctio24->u.status1.residual != 0) 3497 ctio24->u.status1.scsi_status |= SS_RESIDUAL_UNDER; 3498 3499 qla2x00_start_iocbs(vha, vha->req); 3500 } 3501 3502 /* ha->hardware_lock supposed to be held on entry */ 3503 /* called via callback from qla2xxx */ 3504 static void qlt_24xx_atio_pkt(struct scsi_qla_host *vha, 3505 struct atio_from_isp *atio) 3506 { 3507 struct qla_hw_data *ha = vha->hw; 3508 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; 3509 int rc; 3510 3511 if (unlikely(tgt == NULL)) { 3512 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf039, 3513 "ATIO pkt, but no tgt (ha %p)", ha); 3514 return; 3515 } 3516 ql_dbg(ql_dbg_tgt, vha, 0xe02c, 3517 "qla_target(%d): ATIO pkt %p: type %02x count %02x", 3518 vha->vp_idx, atio, atio->u.raw.entry_type, 3519 atio->u.raw.entry_count); 3520 /* 3521 * In tgt_stop mode we also should allow all requests to pass. 3522 * Otherwise, some commands can stuck. 3523 */ 3524 3525 tgt->irq_cmd_count++; 3526 3527 switch (atio->u.raw.entry_type) { 3528 case ATIO_TYPE7: 3529 ql_dbg(ql_dbg_tgt, vha, 0xe02d, 3530 "ATIO_TYPE7 instance %d, lun %Lx, read/write %d/%d, " 3531 "add_cdb_len %d, data_length %04x, s_id %x:%x:%x\n", 3532 vha->vp_idx, atio->u.isp24.fcp_cmnd.lun, 3533 atio->u.isp24.fcp_cmnd.rddata, 3534 atio->u.isp24.fcp_cmnd.wrdata, 3535 atio->u.isp24.fcp_cmnd.add_cdb_len, 3536 be32_to_cpu(get_unaligned((uint32_t *) 3537 &atio->u.isp24.fcp_cmnd.add_cdb[ 3538 atio->u.isp24.fcp_cmnd.add_cdb_len])), 3539 atio->u.isp24.fcp_hdr.s_id[0], 3540 atio->u.isp24.fcp_hdr.s_id[1], 3541 atio->u.isp24.fcp_hdr.s_id[2]); 3542 3543 if (unlikely(atio->u.isp24.exchange_addr == 3544 ATIO_EXCHANGE_ADDRESS_UNKNOWN)) { 3545 ql_dbg(ql_dbg_tgt, vha, 0xe058, 3546 "qla_target(%d): ATIO_TYPE7 " 3547 "received with UNKNOWN exchange address, " 3548 "sending QUEUE_FULL\n", vha->vp_idx); 3549 qlt_send_busy(vha, atio, SAM_STAT_TASK_SET_FULL); 3550 break; 3551 } 3552 if (likely(atio->u.isp24.fcp_cmnd.task_mgmt_flags == 0)) 3553 rc = qlt_handle_cmd_for_atio(vha, atio); 3554 else 3555 rc = qlt_handle_task_mgmt(vha, atio); 3556 if (unlikely(rc != 0)) { 3557 if (rc == -ESRCH) { 3558 #if 1 /* With TERM EXCHANGE some FC cards refuse to boot */ 3559 qlt_send_busy(vha, atio, SAM_STAT_BUSY); 3560 #else 3561 qlt_send_term_exchange(vha, NULL, atio, 1); 3562 #endif 3563 } else { 3564 if (tgt->tgt_stop) { 3565 ql_dbg(ql_dbg_tgt, vha, 0xe059, 3566 "qla_target: Unable to send " 3567 "command to target for req, " 3568 "ignoring.\n"); 3569 } else { 3570 ql_dbg(ql_dbg_tgt, vha, 0xe05a, 3571 "qla_target(%d): Unable to send " 3572 "command to target, sending BUSY " 3573 "status.\n", vha->vp_idx); 3574 qlt_send_busy(vha, atio, SAM_STAT_BUSY); 3575 } 3576 } 3577 } 3578 break; 3579 3580 case IMMED_NOTIFY_TYPE: 3581 { 3582 if (unlikely(atio->u.isp2x.entry_status != 0)) { 3583 ql_dbg(ql_dbg_tgt, vha, 0xe05b, 3584 "qla_target(%d): Received ATIO packet %x " 3585 "with error status %x\n", vha->vp_idx, 3586 atio->u.raw.entry_type, 3587 atio->u.isp2x.entry_status); 3588 break; 3589 } 3590 ql_dbg(ql_dbg_tgt, vha, 0xe02e, "%s", "IMMED_NOTIFY ATIO"); 3591 qlt_handle_imm_notify(vha, (struct imm_ntfy_from_isp *)atio); 3592 break; 3593 } 3594 3595 default: 3596 ql_dbg(ql_dbg_tgt, vha, 0xe05c, 3597 "qla_target(%d): Received unknown ATIO atio " 3598 "type %x\n", vha->vp_idx, atio->u.raw.entry_type); 3599 break; 3600 } 3601 3602 tgt->irq_cmd_count--; 3603 } 3604 3605 /* ha->hardware_lock supposed to be held on entry */ 3606 /* called via callback from qla2xxx */ 3607 static void qlt_response_pkt(struct scsi_qla_host *vha, response_t *pkt) 3608 { 3609 struct qla_hw_data *ha = vha->hw; 3610 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; 3611 3612 if (unlikely(tgt == NULL)) { 3613 ql_dbg(ql_dbg_tgt, vha, 0xe05d, 3614 "qla_target(%d): Response pkt %x received, but no " 3615 "tgt (ha %p)\n", vha->vp_idx, pkt->entry_type, ha); 3616 return; 3617 } 3618 3619 ql_dbg(ql_dbg_tgt, vha, 0xe02f, 3620 "qla_target(%d): response pkt %p: T %02x C %02x S %02x " 3621 "handle %#x\n", vha->vp_idx, pkt, pkt->entry_type, 3622 pkt->entry_count, pkt->entry_status, pkt->handle); 3623 3624 /* 3625 * In tgt_stop mode we also should allow all requests to pass. 3626 * Otherwise, some commands can stuck. 3627 */ 3628 3629 tgt->irq_cmd_count++; 3630 3631 switch (pkt->entry_type) { 3632 case CTIO_TYPE7: 3633 { 3634 struct ctio7_from_24xx *entry = (struct ctio7_from_24xx *)pkt; 3635 ql_dbg(ql_dbg_tgt, vha, 0xe030, "CTIO_TYPE7: instance %d\n", 3636 vha->vp_idx); 3637 qlt_do_ctio_completion(vha, entry->handle, 3638 le16_to_cpu(entry->status)|(pkt->entry_status << 16), 3639 entry); 3640 break; 3641 } 3642 3643 case ACCEPT_TGT_IO_TYPE: 3644 { 3645 struct atio_from_isp *atio = (struct atio_from_isp *)pkt; 3646 int rc; 3647 ql_dbg(ql_dbg_tgt, vha, 0xe031, 3648 "ACCEPT_TGT_IO instance %d status %04x " 3649 "lun %04x read/write %d data_length %04x " 3650 "target_id %02x rx_id %04x\n ", vha->vp_idx, 3651 le16_to_cpu(atio->u.isp2x.status), 3652 le16_to_cpu(atio->u.isp2x.lun), 3653 atio->u.isp2x.execution_codes, 3654 le32_to_cpu(atio->u.isp2x.data_length), GET_TARGET_ID(ha, 3655 atio), atio->u.isp2x.rx_id); 3656 if (atio->u.isp2x.status != 3657 __constant_cpu_to_le16(ATIO_CDB_VALID)) { 3658 ql_dbg(ql_dbg_tgt, vha, 0xe05e, 3659 "qla_target(%d): ATIO with error " 3660 "status %x received\n", vha->vp_idx, 3661 le16_to_cpu(atio->u.isp2x.status)); 3662 break; 3663 } 3664 ql_dbg(ql_dbg_tgt, vha, 0xe032, 3665 "FCP CDB: 0x%02x, sizeof(cdb): %lu", 3666 atio->u.isp2x.cdb[0], (unsigned long 3667 int)sizeof(atio->u.isp2x.cdb)); 3668 3669 rc = qlt_handle_cmd_for_atio(vha, atio); 3670 if (unlikely(rc != 0)) { 3671 if (rc == -ESRCH) { 3672 #if 1 /* With TERM EXCHANGE some FC cards refuse to boot */ 3673 qlt_send_busy(vha, atio, 0); 3674 #else 3675 qlt_send_term_exchange(vha, NULL, atio, 1); 3676 #endif 3677 } else { 3678 if (tgt->tgt_stop) { 3679 ql_dbg(ql_dbg_tgt, vha, 0xe05f, 3680 "qla_target: Unable to send " 3681 "command to target, sending TERM " 3682 "EXCHANGE for rsp\n"); 3683 qlt_send_term_exchange(vha, NULL, 3684 atio, 1); 3685 } else { 3686 ql_dbg(ql_dbg_tgt, vha, 0xe060, 3687 "qla_target(%d): Unable to send " 3688 "command to target, sending BUSY " 3689 "status\n", vha->vp_idx); 3690 qlt_send_busy(vha, atio, 0); 3691 } 3692 } 3693 } 3694 } 3695 break; 3696 3697 case CONTINUE_TGT_IO_TYPE: 3698 { 3699 struct ctio_to_2xxx *entry = (struct ctio_to_2xxx *)pkt; 3700 ql_dbg(ql_dbg_tgt, vha, 0xe033, 3701 "CONTINUE_TGT_IO: instance %d\n", vha->vp_idx); 3702 qlt_do_ctio_completion(vha, entry->handle, 3703 le16_to_cpu(entry->status)|(pkt->entry_status << 16), 3704 entry); 3705 break; 3706 } 3707 3708 case CTIO_A64_TYPE: 3709 { 3710 struct ctio_to_2xxx *entry = (struct ctio_to_2xxx *)pkt; 3711 ql_dbg(ql_dbg_tgt, vha, 0xe034, "CTIO_A64: instance %d\n", 3712 vha->vp_idx); 3713 qlt_do_ctio_completion(vha, entry->handle, 3714 le16_to_cpu(entry->status)|(pkt->entry_status << 16), 3715 entry); 3716 break; 3717 } 3718 3719 case IMMED_NOTIFY_TYPE: 3720 ql_dbg(ql_dbg_tgt, vha, 0xe035, "%s", "IMMED_NOTIFY\n"); 3721 qlt_handle_imm_notify(vha, (struct imm_ntfy_from_isp *)pkt); 3722 break; 3723 3724 case NOTIFY_ACK_TYPE: 3725 if (tgt->notify_ack_expected > 0) { 3726 struct nack_to_isp *entry = (struct nack_to_isp *)pkt; 3727 ql_dbg(ql_dbg_tgt, vha, 0xe036, 3728 "NOTIFY_ACK seq %08x status %x\n", 3729 le16_to_cpu(entry->u.isp2x.seq_id), 3730 le16_to_cpu(entry->u.isp2x.status)); 3731 tgt->notify_ack_expected--; 3732 if (entry->u.isp2x.status != 3733 __constant_cpu_to_le16(NOTIFY_ACK_SUCCESS)) { 3734 ql_dbg(ql_dbg_tgt, vha, 0xe061, 3735 "qla_target(%d): NOTIFY_ACK " 3736 "failed %x\n", vha->vp_idx, 3737 le16_to_cpu(entry->u.isp2x.status)); 3738 } 3739 } else { 3740 ql_dbg(ql_dbg_tgt, vha, 0xe062, 3741 "qla_target(%d): Unexpected NOTIFY_ACK received\n", 3742 vha->vp_idx); 3743 } 3744 break; 3745 3746 case ABTS_RECV_24XX: 3747 ql_dbg(ql_dbg_tgt, vha, 0xe037, 3748 "ABTS_RECV_24XX: instance %d\n", vha->vp_idx); 3749 qlt_24xx_handle_abts(vha, (struct abts_recv_from_24xx *)pkt); 3750 break; 3751 3752 case ABTS_RESP_24XX: 3753 if (tgt->abts_resp_expected > 0) { 3754 struct abts_resp_from_24xx_fw *entry = 3755 (struct abts_resp_from_24xx_fw *)pkt; 3756 ql_dbg(ql_dbg_tgt, vha, 0xe038, 3757 "ABTS_RESP_24XX: compl_status %x\n", 3758 entry->compl_status); 3759 tgt->abts_resp_expected--; 3760 if (le16_to_cpu(entry->compl_status) != 3761 ABTS_RESP_COMPL_SUCCESS) { 3762 if ((entry->error_subcode1 == 0x1E) && 3763 (entry->error_subcode2 == 0)) { 3764 /* 3765 * We've got a race here: aborted 3766 * exchange not terminated, i.e. 3767 * response for the aborted command was 3768 * sent between the abort request was 3769 * received and processed. 3770 * Unfortunately, the firmware has a 3771 * silly requirement that all aborted 3772 * exchanges must be explicitely 3773 * terminated, otherwise it refuses to 3774 * send responses for the abort 3775 * requests. So, we have to 3776 * (re)terminate the exchange and retry 3777 * the abort response. 3778 */ 3779 qlt_24xx_retry_term_exchange(vha, 3780 entry); 3781 } else 3782 ql_dbg(ql_dbg_tgt, vha, 0xe063, 3783 "qla_target(%d): ABTS_RESP_24XX " 3784 "failed %x (subcode %x:%x)", 3785 vha->vp_idx, entry->compl_status, 3786 entry->error_subcode1, 3787 entry->error_subcode2); 3788 } 3789 } else { 3790 ql_dbg(ql_dbg_tgt, vha, 0xe064, 3791 "qla_target(%d): Unexpected ABTS_RESP_24XX " 3792 "received\n", vha->vp_idx); 3793 } 3794 break; 3795 3796 default: 3797 ql_dbg(ql_dbg_tgt, vha, 0xe065, 3798 "qla_target(%d): Received unknown response pkt " 3799 "type %x\n", vha->vp_idx, pkt->entry_type); 3800 break; 3801 } 3802 3803 tgt->irq_cmd_count--; 3804 } 3805 3806 /* 3807 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire 3808 */ 3809 void qlt_async_event(uint16_t code, struct scsi_qla_host *vha, 3810 uint16_t *mailbox) 3811 { 3812 struct qla_hw_data *ha = vha->hw; 3813 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; 3814 int login_code; 3815 3816 ql_dbg(ql_dbg_tgt, vha, 0xe039, 3817 "scsi(%ld): ha state %d init_done %d oper_mode %d topo %d\n", 3818 vha->host_no, atomic_read(&vha->loop_state), vha->flags.init_done, 3819 ha->operating_mode, ha->current_topology); 3820 3821 if (!ha->tgt.tgt_ops) 3822 return; 3823 3824 if (unlikely(tgt == NULL)) { 3825 ql_dbg(ql_dbg_tgt, vha, 0xe03a, 3826 "ASYNC EVENT %#x, but no tgt (ha %p)\n", code, ha); 3827 return; 3828 } 3829 3830 if (((code == MBA_POINT_TO_POINT) || (code == MBA_CHG_IN_CONNECTION)) && 3831 IS_QLA2100(ha)) 3832 return; 3833 /* 3834 * In tgt_stop mode we also should allow all requests to pass. 3835 * Otherwise, some commands can stuck. 3836 */ 3837 3838 tgt->irq_cmd_count++; 3839 3840 switch (code) { 3841 case MBA_RESET: /* Reset */ 3842 case MBA_SYSTEM_ERR: /* System Error */ 3843 case MBA_REQ_TRANSFER_ERR: /* Request Transfer Error */ 3844 case MBA_RSP_TRANSFER_ERR: /* Response Transfer Error */ 3845 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03a, 3846 "qla_target(%d): System error async event %#x " 3847 "occurred", vha->vp_idx, code); 3848 break; 3849 case MBA_WAKEUP_THRES: /* Request Queue Wake-up. */ 3850 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 3851 break; 3852 3853 case MBA_LOOP_UP: 3854 { 3855 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03b, 3856 "qla_target(%d): Async LOOP_UP occurred " 3857 "(m[0]=%x, m[1]=%x, m[2]=%x, m[3]=%x)", vha->vp_idx, 3858 le16_to_cpu(mailbox[0]), le16_to_cpu(mailbox[1]), 3859 le16_to_cpu(mailbox[2]), le16_to_cpu(mailbox[3])); 3860 if (tgt->link_reinit_iocb_pending) { 3861 qlt_send_notify_ack(vha, (void *)&tgt->link_reinit_iocb, 3862 0, 0, 0, 0, 0, 0); 3863 tgt->link_reinit_iocb_pending = 0; 3864 } 3865 break; 3866 } 3867 3868 case MBA_LIP_OCCURRED: 3869 case MBA_LOOP_DOWN: 3870 case MBA_LIP_RESET: 3871 case MBA_RSCN_UPDATE: 3872 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03c, 3873 "qla_target(%d): Async event %#x occurred " 3874 "(m[0]=%x, m[1]=%x, m[2]=%x, m[3]=%x)", vha->vp_idx, code, 3875 le16_to_cpu(mailbox[0]), le16_to_cpu(mailbox[1]), 3876 le16_to_cpu(mailbox[2]), le16_to_cpu(mailbox[3])); 3877 break; 3878 3879 case MBA_PORT_UPDATE: 3880 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03d, 3881 "qla_target(%d): Port update async event %#x " 3882 "occurred: updating the ports database (m[0]=%x, m[1]=%x, " 3883 "m[2]=%x, m[3]=%x)", vha->vp_idx, code, 3884 le16_to_cpu(mailbox[0]), le16_to_cpu(mailbox[1]), 3885 le16_to_cpu(mailbox[2]), le16_to_cpu(mailbox[3])); 3886 3887 login_code = le16_to_cpu(mailbox[2]); 3888 if (login_code == 0x4) 3889 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03e, 3890 "Async MB 2: Got PLOGI Complete\n"); 3891 else if (login_code == 0x7) 3892 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03f, 3893 "Async MB 2: Port Logged Out\n"); 3894 break; 3895 3896 default: 3897 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf040, 3898 "qla_target(%d): Async event %#x occurred: " 3899 "ignore (m[0]=%x, m[1]=%x, m[2]=%x, m[3]=%x)", vha->vp_idx, 3900 code, le16_to_cpu(mailbox[0]), le16_to_cpu(mailbox[1]), 3901 le16_to_cpu(mailbox[2]), le16_to_cpu(mailbox[3])); 3902 break; 3903 } 3904 3905 tgt->irq_cmd_count--; 3906 } 3907 3908 static fc_port_t *qlt_get_port_database(struct scsi_qla_host *vha, 3909 uint16_t loop_id) 3910 { 3911 fc_port_t *fcport; 3912 int rc; 3913 3914 fcport = kzalloc(sizeof(*fcport), GFP_KERNEL); 3915 if (!fcport) { 3916 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06f, 3917 "qla_target(%d): Allocation of tmp FC port failed", 3918 vha->vp_idx); 3919 return NULL; 3920 } 3921 3922 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf041, "loop_id %d", loop_id); 3923 3924 fcport->loop_id = loop_id; 3925 3926 rc = qla2x00_get_port_database(vha, fcport, 0); 3927 if (rc != QLA_SUCCESS) { 3928 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf070, 3929 "qla_target(%d): Failed to retrieve fcport " 3930 "information -- get_port_database() returned %x " 3931 "(loop_id=0x%04x)", vha->vp_idx, rc, loop_id); 3932 kfree(fcport); 3933 return NULL; 3934 } 3935 3936 return fcport; 3937 } 3938 3939 /* Must be called under tgt_mutex */ 3940 static struct qla_tgt_sess *qlt_make_local_sess(struct scsi_qla_host *vha, 3941 uint8_t *s_id) 3942 { 3943 struct qla_tgt_sess *sess = NULL; 3944 fc_port_t *fcport = NULL; 3945 int rc, global_resets; 3946 uint16_t loop_id = 0; 3947 3948 retry: 3949 global_resets = 3950 atomic_read(&vha->vha_tgt.qla_tgt->tgt_global_resets_count); 3951 3952 rc = qla24xx_get_loop_id(vha, s_id, &loop_id); 3953 if (rc != 0) { 3954 if ((s_id[0] == 0xFF) && 3955 (s_id[1] == 0xFC)) { 3956 /* 3957 * This is Domain Controller, so it should be 3958 * OK to drop SCSI commands from it. 3959 */ 3960 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf042, 3961 "Unable to find initiator with S_ID %x:%x:%x", 3962 s_id[0], s_id[1], s_id[2]); 3963 } else 3964 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf071, 3965 "qla_target(%d): Unable to find " 3966 "initiator with S_ID %x:%x:%x", 3967 vha->vp_idx, s_id[0], s_id[1], 3968 s_id[2]); 3969 return NULL; 3970 } 3971 3972 fcport = qlt_get_port_database(vha, loop_id); 3973 if (!fcport) 3974 return NULL; 3975 3976 if (global_resets != 3977 atomic_read(&vha->vha_tgt.qla_tgt->tgt_global_resets_count)) { 3978 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf043, 3979 "qla_target(%d): global reset during session discovery " 3980 "(counter was %d, new %d), retrying", vha->vp_idx, 3981 global_resets, 3982 atomic_read(&vha->vha_tgt. 3983 qla_tgt->tgt_global_resets_count)); 3984 goto retry; 3985 } 3986 3987 sess = qlt_create_sess(vha, fcport, true); 3988 3989 kfree(fcport); 3990 return sess; 3991 } 3992 3993 static void qlt_abort_work(struct qla_tgt *tgt, 3994 struct qla_tgt_sess_work_param *prm) 3995 { 3996 struct scsi_qla_host *vha = tgt->vha; 3997 struct qla_hw_data *ha = vha->hw; 3998 struct qla_tgt_sess *sess = NULL; 3999 unsigned long flags; 4000 uint32_t be_s_id; 4001 uint8_t s_id[3]; 4002 int rc; 4003 4004 spin_lock_irqsave(&ha->hardware_lock, flags); 4005 4006 if (tgt->tgt_stop) 4007 goto out_term; 4008 4009 s_id[0] = prm->abts.fcp_hdr_le.s_id[2]; 4010 s_id[1] = prm->abts.fcp_hdr_le.s_id[1]; 4011 s_id[2] = prm->abts.fcp_hdr_le.s_id[0]; 4012 4013 sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, 4014 (unsigned char *)&be_s_id); 4015 if (!sess) { 4016 spin_unlock_irqrestore(&ha->hardware_lock, flags); 4017 4018 mutex_lock(&vha->vha_tgt.tgt_mutex); 4019 sess = qlt_make_local_sess(vha, s_id); 4020 /* sess has got an extra creation ref */ 4021 mutex_unlock(&vha->vha_tgt.tgt_mutex); 4022 4023 spin_lock_irqsave(&ha->hardware_lock, flags); 4024 if (!sess) 4025 goto out_term; 4026 } else { 4027 kref_get(&sess->se_sess->sess_kref); 4028 } 4029 4030 if (tgt->tgt_stop) 4031 goto out_term; 4032 4033 rc = __qlt_24xx_handle_abts(vha, &prm->abts, sess); 4034 if (rc != 0) 4035 goto out_term; 4036 4037 ha->tgt.tgt_ops->put_sess(sess); 4038 spin_unlock_irqrestore(&ha->hardware_lock, flags); 4039 return; 4040 4041 out_term: 4042 qlt_24xx_send_abts_resp(vha, &prm->abts, FCP_TMF_REJECTED, false); 4043 if (sess) 4044 ha->tgt.tgt_ops->put_sess(sess); 4045 spin_unlock_irqrestore(&ha->hardware_lock, flags); 4046 } 4047 4048 static void qlt_tmr_work(struct qla_tgt *tgt, 4049 struct qla_tgt_sess_work_param *prm) 4050 { 4051 struct atio_from_isp *a = &prm->tm_iocb2; 4052 struct scsi_qla_host *vha = tgt->vha; 4053 struct qla_hw_data *ha = vha->hw; 4054 struct qla_tgt_sess *sess = NULL; 4055 unsigned long flags; 4056 uint8_t *s_id = NULL; /* to hide compiler warnings */ 4057 int rc; 4058 uint32_t lun, unpacked_lun; 4059 int lun_size, fn; 4060 void *iocb; 4061 4062 spin_lock_irqsave(&ha->hardware_lock, flags); 4063 4064 if (tgt->tgt_stop) 4065 goto out_term; 4066 4067 s_id = prm->tm_iocb2.u.isp24.fcp_hdr.s_id; 4068 sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, s_id); 4069 if (!sess) { 4070 spin_unlock_irqrestore(&ha->hardware_lock, flags); 4071 4072 mutex_lock(&vha->vha_tgt.tgt_mutex); 4073 sess = qlt_make_local_sess(vha, s_id); 4074 /* sess has got an extra creation ref */ 4075 mutex_unlock(&vha->vha_tgt.tgt_mutex); 4076 4077 spin_lock_irqsave(&ha->hardware_lock, flags); 4078 if (!sess) 4079 goto out_term; 4080 } else { 4081 kref_get(&sess->se_sess->sess_kref); 4082 } 4083 4084 iocb = a; 4085 lun = a->u.isp24.fcp_cmnd.lun; 4086 lun_size = sizeof(lun); 4087 fn = a->u.isp24.fcp_cmnd.task_mgmt_flags; 4088 unpacked_lun = scsilun_to_int((struct scsi_lun *)&lun); 4089 4090 rc = qlt_issue_task_mgmt(sess, unpacked_lun, fn, iocb, 0); 4091 if (rc != 0) 4092 goto out_term; 4093 4094 ha->tgt.tgt_ops->put_sess(sess); 4095 spin_unlock_irqrestore(&ha->hardware_lock, flags); 4096 return; 4097 4098 out_term: 4099 qlt_send_term_exchange(vha, NULL, &prm->tm_iocb2, 1); 4100 if (sess) 4101 ha->tgt.tgt_ops->put_sess(sess); 4102 spin_unlock_irqrestore(&ha->hardware_lock, flags); 4103 } 4104 4105 static void qlt_sess_work_fn(struct work_struct *work) 4106 { 4107 struct qla_tgt *tgt = container_of(work, struct qla_tgt, sess_work); 4108 struct scsi_qla_host *vha = tgt->vha; 4109 unsigned long flags; 4110 4111 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf000, "Sess work (tgt %p)", tgt); 4112 4113 spin_lock_irqsave(&tgt->sess_work_lock, flags); 4114 while (!list_empty(&tgt->sess_works_list)) { 4115 struct qla_tgt_sess_work_param *prm = list_entry( 4116 tgt->sess_works_list.next, typeof(*prm), 4117 sess_works_list_entry); 4118 4119 /* 4120 * This work can be scheduled on several CPUs at time, so we 4121 * must delete the entry to eliminate double processing 4122 */ 4123 list_del(&prm->sess_works_list_entry); 4124 4125 spin_unlock_irqrestore(&tgt->sess_work_lock, flags); 4126 4127 switch (prm->type) { 4128 case QLA_TGT_SESS_WORK_ABORT: 4129 qlt_abort_work(tgt, prm); 4130 break; 4131 case QLA_TGT_SESS_WORK_TM: 4132 qlt_tmr_work(tgt, prm); 4133 break; 4134 default: 4135 BUG_ON(1); 4136 break; 4137 } 4138 4139 spin_lock_irqsave(&tgt->sess_work_lock, flags); 4140 4141 kfree(prm); 4142 } 4143 spin_unlock_irqrestore(&tgt->sess_work_lock, flags); 4144 } 4145 4146 /* Must be called under tgt_host_action_mutex */ 4147 int qlt_add_target(struct qla_hw_data *ha, struct scsi_qla_host *base_vha) 4148 { 4149 struct qla_tgt *tgt; 4150 4151 if (!QLA_TGT_MODE_ENABLED()) 4152 return 0; 4153 4154 if (!IS_TGT_MODE_CAPABLE(ha)) { 4155 ql_log(ql_log_warn, base_vha, 0xe070, 4156 "This adapter does not support target mode.\n"); 4157 return 0; 4158 } 4159 4160 ql_dbg(ql_dbg_tgt, base_vha, 0xe03b, 4161 "Registering target for host %ld(%p).\n", base_vha->host_no, ha); 4162 4163 BUG_ON(base_vha->vha_tgt.qla_tgt != NULL); 4164 4165 tgt = kzalloc(sizeof(struct qla_tgt), GFP_KERNEL); 4166 if (!tgt) { 4167 ql_dbg(ql_dbg_tgt, base_vha, 0xe066, 4168 "Unable to allocate struct qla_tgt\n"); 4169 return -ENOMEM; 4170 } 4171 4172 if (!(base_vha->host->hostt->supported_mode & MODE_TARGET)) 4173 base_vha->host->hostt->supported_mode |= MODE_TARGET; 4174 4175 tgt->ha = ha; 4176 tgt->vha = base_vha; 4177 init_waitqueue_head(&tgt->waitQ); 4178 INIT_LIST_HEAD(&tgt->sess_list); 4179 INIT_LIST_HEAD(&tgt->del_sess_list); 4180 INIT_DELAYED_WORK(&tgt->sess_del_work, 4181 (void (*)(struct work_struct *))qlt_del_sess_work_fn); 4182 spin_lock_init(&tgt->sess_work_lock); 4183 INIT_WORK(&tgt->sess_work, qlt_sess_work_fn); 4184 INIT_LIST_HEAD(&tgt->sess_works_list); 4185 spin_lock_init(&tgt->srr_lock); 4186 INIT_LIST_HEAD(&tgt->srr_ctio_list); 4187 INIT_LIST_HEAD(&tgt->srr_imm_list); 4188 INIT_WORK(&tgt->srr_work, qlt_handle_srr_work); 4189 atomic_set(&tgt->tgt_global_resets_count, 0); 4190 4191 base_vha->vha_tgt.qla_tgt = tgt; 4192 4193 ql_dbg(ql_dbg_tgt, base_vha, 0xe067, 4194 "qla_target(%d): using 64 Bit PCI addressing", 4195 base_vha->vp_idx); 4196 tgt->tgt_enable_64bit_addr = 1; 4197 /* 3 is reserved */ 4198 tgt->sg_tablesize = QLA_TGT_MAX_SG_24XX(base_vha->req->length - 3); 4199 tgt->datasegs_per_cmd = QLA_TGT_DATASEGS_PER_CMD_24XX; 4200 tgt->datasegs_per_cont = QLA_TGT_DATASEGS_PER_CONT_24XX; 4201 4202 if (base_vha->fc_vport) 4203 return 0; 4204 4205 mutex_lock(&qla_tgt_mutex); 4206 list_add_tail(&tgt->tgt_list_entry, &qla_tgt_glist); 4207 mutex_unlock(&qla_tgt_mutex); 4208 4209 return 0; 4210 } 4211 4212 /* Must be called under tgt_host_action_mutex */ 4213 int qlt_remove_target(struct qla_hw_data *ha, struct scsi_qla_host *vha) 4214 { 4215 if (!vha->vha_tgt.qla_tgt) 4216 return 0; 4217 4218 if (vha->fc_vport) { 4219 qlt_release(vha->vha_tgt.qla_tgt); 4220 return 0; 4221 } 4222 mutex_lock(&qla_tgt_mutex); 4223 list_del(&vha->vha_tgt.qla_tgt->tgt_list_entry); 4224 mutex_unlock(&qla_tgt_mutex); 4225 4226 ql_dbg(ql_dbg_tgt, vha, 0xe03c, "Unregistering target for host %ld(%p)", 4227 vha->host_no, ha); 4228 qlt_release(vha->vha_tgt.qla_tgt); 4229 4230 return 0; 4231 } 4232 4233 static void qlt_lport_dump(struct scsi_qla_host *vha, u64 wwpn, 4234 unsigned char *b) 4235 { 4236 int i; 4237 4238 pr_debug("qla2xxx HW vha->node_name: "); 4239 for (i = 0; i < WWN_SIZE; i++) 4240 pr_debug("%02x ", vha->node_name[i]); 4241 pr_debug("\n"); 4242 pr_debug("qla2xxx HW vha->port_name: "); 4243 for (i = 0; i < WWN_SIZE; i++) 4244 pr_debug("%02x ", vha->port_name[i]); 4245 pr_debug("\n"); 4246 4247 pr_debug("qla2xxx passed configfs WWPN: "); 4248 put_unaligned_be64(wwpn, b); 4249 for (i = 0; i < WWN_SIZE; i++) 4250 pr_debug("%02x ", b[i]); 4251 pr_debug("\n"); 4252 } 4253 4254 /** 4255 * qla_tgt_lport_register - register lport with external module 4256 * 4257 * @qla_tgt_ops: Pointer for tcm_qla2xxx qla_tgt_ops 4258 * @wwpn: Passwd FC target WWPN 4259 * @callback: lport initialization callback for tcm_qla2xxx code 4260 * @target_lport_ptr: pointer for tcm_qla2xxx specific lport data 4261 */ 4262 int qlt_lport_register(void *target_lport_ptr, u64 phys_wwpn, 4263 u64 npiv_wwpn, u64 npiv_wwnn, 4264 int (*callback)(struct scsi_qla_host *, void *, u64, u64)) 4265 { 4266 struct qla_tgt *tgt; 4267 struct scsi_qla_host *vha; 4268 struct qla_hw_data *ha; 4269 struct Scsi_Host *host; 4270 unsigned long flags; 4271 int rc; 4272 u8 b[WWN_SIZE]; 4273 4274 mutex_lock(&qla_tgt_mutex); 4275 list_for_each_entry(tgt, &qla_tgt_glist, tgt_list_entry) { 4276 vha = tgt->vha; 4277 ha = vha->hw; 4278 4279 host = vha->host; 4280 if (!host) 4281 continue; 4282 4283 if (!(host->hostt->supported_mode & MODE_TARGET)) 4284 continue; 4285 4286 spin_lock_irqsave(&ha->hardware_lock, flags); 4287 if ((!npiv_wwpn || !npiv_wwnn) && host->active_mode & MODE_TARGET) { 4288 pr_debug("MODE_TARGET already active on qla2xxx(%d)\n", 4289 host->host_no); 4290 spin_unlock_irqrestore(&ha->hardware_lock, flags); 4291 continue; 4292 } 4293 if (tgt->tgt_stop) { 4294 pr_debug("MODE_TARGET in shutdown on qla2xxx(%d)\n", 4295 host->host_no); 4296 spin_unlock_irqrestore(&ha->hardware_lock, flags); 4297 continue; 4298 } 4299 spin_unlock_irqrestore(&ha->hardware_lock, flags); 4300 4301 if (!scsi_host_get(host)) { 4302 ql_dbg(ql_dbg_tgt, vha, 0xe068, 4303 "Unable to scsi_host_get() for" 4304 " qla2xxx scsi_host\n"); 4305 continue; 4306 } 4307 qlt_lport_dump(vha, phys_wwpn, b); 4308 4309 if (memcmp(vha->port_name, b, WWN_SIZE)) { 4310 scsi_host_put(host); 4311 continue; 4312 } 4313 rc = (*callback)(vha, target_lport_ptr, npiv_wwpn, npiv_wwnn); 4314 if (rc != 0) 4315 scsi_host_put(host); 4316 4317 mutex_unlock(&qla_tgt_mutex); 4318 return rc; 4319 } 4320 mutex_unlock(&qla_tgt_mutex); 4321 4322 return -ENODEV; 4323 } 4324 EXPORT_SYMBOL(qlt_lport_register); 4325 4326 /** 4327 * qla_tgt_lport_deregister - Degister lport 4328 * 4329 * @vha: Registered scsi_qla_host pointer 4330 */ 4331 void qlt_lport_deregister(struct scsi_qla_host *vha) 4332 { 4333 struct qla_hw_data *ha = vha->hw; 4334 struct Scsi_Host *sh = vha->host; 4335 /* 4336 * Clear the target_lport_ptr qla_target_template pointer in qla_hw_data 4337 */ 4338 vha->vha_tgt.target_lport_ptr = NULL; 4339 ha->tgt.tgt_ops = NULL; 4340 /* 4341 * Release the Scsi_Host reference for the underlying qla2xxx host 4342 */ 4343 scsi_host_put(sh); 4344 } 4345 EXPORT_SYMBOL(qlt_lport_deregister); 4346 4347 /* Must be called under HW lock */ 4348 void qlt_set_mode(struct scsi_qla_host *vha) 4349 { 4350 struct qla_hw_data *ha = vha->hw; 4351 4352 switch (ql2x_ini_mode) { 4353 case QLA2XXX_INI_MODE_DISABLED: 4354 case QLA2XXX_INI_MODE_EXCLUSIVE: 4355 vha->host->active_mode = MODE_TARGET; 4356 break; 4357 case QLA2XXX_INI_MODE_ENABLED: 4358 vha->host->active_mode |= MODE_TARGET; 4359 break; 4360 default: 4361 break; 4362 } 4363 4364 if (ha->tgt.ini_mode_force_reverse) 4365 qla_reverse_ini_mode(vha); 4366 } 4367 4368 /* Must be called under HW lock */ 4369 void qlt_clear_mode(struct scsi_qla_host *vha) 4370 { 4371 struct qla_hw_data *ha = vha->hw; 4372 4373 switch (ql2x_ini_mode) { 4374 case QLA2XXX_INI_MODE_DISABLED: 4375 vha->host->active_mode = MODE_UNKNOWN; 4376 break; 4377 case QLA2XXX_INI_MODE_EXCLUSIVE: 4378 vha->host->active_mode = MODE_INITIATOR; 4379 break; 4380 case QLA2XXX_INI_MODE_ENABLED: 4381 vha->host->active_mode &= ~MODE_TARGET; 4382 break; 4383 default: 4384 break; 4385 } 4386 4387 if (ha->tgt.ini_mode_force_reverse) 4388 qla_reverse_ini_mode(vha); 4389 } 4390 4391 /* 4392 * qla_tgt_enable_vha - NO LOCK HELD 4393 * 4394 * host_reset, bring up w/ Target Mode Enabled 4395 */ 4396 void 4397 qlt_enable_vha(struct scsi_qla_host *vha) 4398 { 4399 struct qla_hw_data *ha = vha->hw; 4400 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; 4401 unsigned long flags; 4402 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev); 4403 4404 if (!tgt) { 4405 ql_dbg(ql_dbg_tgt, vha, 0xe069, 4406 "Unable to locate qla_tgt pointer from" 4407 " struct qla_hw_data\n"); 4408 dump_stack(); 4409 return; 4410 } 4411 4412 spin_lock_irqsave(&ha->hardware_lock, flags); 4413 tgt->tgt_stopped = 0; 4414 qlt_set_mode(vha); 4415 spin_unlock_irqrestore(&ha->hardware_lock, flags); 4416 4417 if (vha->vp_idx) { 4418 qla24xx_disable_vp(vha); 4419 qla24xx_enable_vp(vha); 4420 } else { 4421 set_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags); 4422 qla2xxx_wake_dpc(base_vha); 4423 qla2x00_wait_for_hba_online(base_vha); 4424 } 4425 } 4426 EXPORT_SYMBOL(qlt_enable_vha); 4427 4428 /* 4429 * qla_tgt_disable_vha - NO LOCK HELD 4430 * 4431 * Disable Target Mode and reset the adapter 4432 */ 4433 void 4434 qlt_disable_vha(struct scsi_qla_host *vha) 4435 { 4436 struct qla_hw_data *ha = vha->hw; 4437 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; 4438 unsigned long flags; 4439 4440 if (!tgt) { 4441 ql_dbg(ql_dbg_tgt, vha, 0xe06a, 4442 "Unable to locate qla_tgt pointer from" 4443 " struct qla_hw_data\n"); 4444 dump_stack(); 4445 return; 4446 } 4447 4448 spin_lock_irqsave(&ha->hardware_lock, flags); 4449 qlt_clear_mode(vha); 4450 spin_unlock_irqrestore(&ha->hardware_lock, flags); 4451 4452 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 4453 qla2xxx_wake_dpc(vha); 4454 qla2x00_wait_for_hba_online(vha); 4455 } 4456 4457 /* 4458 * Called from qla_init.c:qla24xx_vport_create() contex to setup 4459 * the target mode specific struct scsi_qla_host and struct qla_hw_data 4460 * members. 4461 */ 4462 void 4463 qlt_vport_create(struct scsi_qla_host *vha, struct qla_hw_data *ha) 4464 { 4465 if (!qla_tgt_mode_enabled(vha)) 4466 return; 4467 4468 vha->vha_tgt.qla_tgt = NULL; 4469 4470 mutex_init(&vha->vha_tgt.tgt_mutex); 4471 mutex_init(&vha->vha_tgt.tgt_host_action_mutex); 4472 4473 qlt_clear_mode(vha); 4474 4475 /* 4476 * NOTE: Currently the value is kept the same for <24xx and 4477 * >=24xx ISPs. If it is necessary to change it, 4478 * the check should be added for specific ISPs, 4479 * assigning the value appropriately. 4480 */ 4481 ha->tgt.atio_q_length = ATIO_ENTRY_CNT_24XX; 4482 4483 qlt_add_target(ha, vha); 4484 } 4485 4486 void 4487 qlt_rff_id(struct scsi_qla_host *vha, struct ct_sns_req *ct_req) 4488 { 4489 /* 4490 * FC-4 Feature bit 0 indicates target functionality to the name server. 4491 */ 4492 if (qla_tgt_mode_enabled(vha)) { 4493 if (qla_ini_mode_enabled(vha)) 4494 ct_req->req.rff_id.fc4_feature = BIT_0 | BIT_1; 4495 else 4496 ct_req->req.rff_id.fc4_feature = BIT_0; 4497 } else if (qla_ini_mode_enabled(vha)) { 4498 ct_req->req.rff_id.fc4_feature = BIT_1; 4499 } 4500 } 4501 4502 /* 4503 * qlt_init_atio_q_entries() - Initializes ATIO queue entries. 4504 * @ha: HA context 4505 * 4506 * Beginning of ATIO ring has initialization control block already built 4507 * by nvram config routine. 4508 * 4509 * Returns 0 on success. 4510 */ 4511 void 4512 qlt_init_atio_q_entries(struct scsi_qla_host *vha) 4513 { 4514 struct qla_hw_data *ha = vha->hw; 4515 uint16_t cnt; 4516 struct atio_from_isp *pkt = (struct atio_from_isp *)ha->tgt.atio_ring; 4517 4518 if (!qla_tgt_mode_enabled(vha)) 4519 return; 4520 4521 for (cnt = 0; cnt < ha->tgt.atio_q_length; cnt++) { 4522 pkt->u.raw.signature = ATIO_PROCESSED; 4523 pkt++; 4524 } 4525 4526 } 4527 4528 /* 4529 * qlt_24xx_process_atio_queue() - Process ATIO queue entries. 4530 * @ha: SCSI driver HA context 4531 */ 4532 void 4533 qlt_24xx_process_atio_queue(struct scsi_qla_host *vha) 4534 { 4535 struct qla_hw_data *ha = vha->hw; 4536 struct atio_from_isp *pkt; 4537 int cnt, i; 4538 4539 if (!vha->flags.online) 4540 return; 4541 4542 while (ha->tgt.atio_ring_ptr->signature != ATIO_PROCESSED) { 4543 pkt = (struct atio_from_isp *)ha->tgt.atio_ring_ptr; 4544 cnt = pkt->u.raw.entry_count; 4545 4546 qlt_24xx_atio_pkt_all_vps(vha, (struct atio_from_isp *)pkt); 4547 4548 for (i = 0; i < cnt; i++) { 4549 ha->tgt.atio_ring_index++; 4550 if (ha->tgt.atio_ring_index == ha->tgt.atio_q_length) { 4551 ha->tgt.atio_ring_index = 0; 4552 ha->tgt.atio_ring_ptr = ha->tgt.atio_ring; 4553 } else 4554 ha->tgt.atio_ring_ptr++; 4555 4556 pkt->u.raw.signature = ATIO_PROCESSED; 4557 pkt = (struct atio_from_isp *)ha->tgt.atio_ring_ptr; 4558 } 4559 wmb(); 4560 } 4561 4562 /* Adjust ring index */ 4563 WRT_REG_DWORD(ISP_ATIO_Q_OUT(vha), ha->tgt.atio_ring_index); 4564 } 4565 4566 void 4567 qlt_24xx_config_rings(struct scsi_qla_host *vha) 4568 { 4569 struct qla_hw_data *ha = vha->hw; 4570 if (!QLA_TGT_MODE_ENABLED()) 4571 return; 4572 4573 WRT_REG_DWORD(ISP_ATIO_Q_IN(vha), 0); 4574 WRT_REG_DWORD(ISP_ATIO_Q_OUT(vha), 0); 4575 RD_REG_DWORD(ISP_ATIO_Q_OUT(vha)); 4576 4577 if (IS_ATIO_MSIX_CAPABLE(ha)) { 4578 struct qla_msix_entry *msix = &ha->msix_entries[2]; 4579 struct init_cb_24xx *icb = (struct init_cb_24xx *)ha->init_cb; 4580 4581 icb->msix_atio = cpu_to_le16(msix->entry); 4582 ql_dbg(ql_dbg_init, vha, 0xf072, 4583 "Registering ICB vector 0x%x for atio que.\n", 4584 msix->entry); 4585 } 4586 } 4587 4588 void 4589 qlt_24xx_config_nvram_stage1(struct scsi_qla_host *vha, struct nvram_24xx *nv) 4590 { 4591 struct qla_hw_data *ha = vha->hw; 4592 4593 if (qla_tgt_mode_enabled(vha)) { 4594 if (!ha->tgt.saved_set) { 4595 /* We save only once */ 4596 ha->tgt.saved_exchange_count = nv->exchange_count; 4597 ha->tgt.saved_firmware_options_1 = 4598 nv->firmware_options_1; 4599 ha->tgt.saved_firmware_options_2 = 4600 nv->firmware_options_2; 4601 ha->tgt.saved_firmware_options_3 = 4602 nv->firmware_options_3; 4603 ha->tgt.saved_set = 1; 4604 } 4605 4606 nv->exchange_count = __constant_cpu_to_le16(0xFFFF); 4607 4608 /* Enable target mode */ 4609 nv->firmware_options_1 |= __constant_cpu_to_le32(BIT_4); 4610 4611 /* Disable ini mode, if requested */ 4612 if (!qla_ini_mode_enabled(vha)) 4613 nv->firmware_options_1 |= __constant_cpu_to_le32(BIT_5); 4614 4615 /* Disable Full Login after LIP */ 4616 nv->firmware_options_1 &= __constant_cpu_to_le32(~BIT_13); 4617 /* Enable initial LIP */ 4618 nv->firmware_options_1 &= __constant_cpu_to_le32(~BIT_9); 4619 /* Enable FC tapes support */ 4620 nv->firmware_options_2 |= __constant_cpu_to_le32(BIT_12); 4621 /* Disable Full Login after LIP */ 4622 nv->host_p &= __constant_cpu_to_le32(~BIT_10); 4623 /* Enable target PRLI control */ 4624 nv->firmware_options_2 |= __constant_cpu_to_le32(BIT_14); 4625 } else { 4626 if (ha->tgt.saved_set) { 4627 nv->exchange_count = ha->tgt.saved_exchange_count; 4628 nv->firmware_options_1 = 4629 ha->tgt.saved_firmware_options_1; 4630 nv->firmware_options_2 = 4631 ha->tgt.saved_firmware_options_2; 4632 nv->firmware_options_3 = 4633 ha->tgt.saved_firmware_options_3; 4634 } 4635 return; 4636 } 4637 4638 /* out-of-order frames reassembly */ 4639 nv->firmware_options_3 |= BIT_6|BIT_9; 4640 4641 if (ha->tgt.enable_class_2) { 4642 if (vha->flags.init_done) 4643 fc_host_supported_classes(vha->host) = 4644 FC_COS_CLASS2 | FC_COS_CLASS3; 4645 4646 nv->firmware_options_2 |= __constant_cpu_to_le32(BIT_8); 4647 } else { 4648 if (vha->flags.init_done) 4649 fc_host_supported_classes(vha->host) = FC_COS_CLASS3; 4650 4651 nv->firmware_options_2 &= ~__constant_cpu_to_le32(BIT_8); 4652 } 4653 } 4654 4655 void 4656 qlt_24xx_config_nvram_stage2(struct scsi_qla_host *vha, 4657 struct init_cb_24xx *icb) 4658 { 4659 struct qla_hw_data *ha = vha->hw; 4660 4661 if (ha->tgt.node_name_set) { 4662 memcpy(icb->node_name, ha->tgt.tgt_node_name, WWN_SIZE); 4663 icb->firmware_options_1 |= __constant_cpu_to_le32(BIT_14); 4664 } 4665 } 4666 4667 void 4668 qlt_81xx_config_nvram_stage1(struct scsi_qla_host *vha, struct nvram_81xx *nv) 4669 { 4670 struct qla_hw_data *ha = vha->hw; 4671 4672 if (!QLA_TGT_MODE_ENABLED()) 4673 return; 4674 4675 if (qla_tgt_mode_enabled(vha)) { 4676 if (!ha->tgt.saved_set) { 4677 /* We save only once */ 4678 ha->tgt.saved_exchange_count = nv->exchange_count; 4679 ha->tgt.saved_firmware_options_1 = 4680 nv->firmware_options_1; 4681 ha->tgt.saved_firmware_options_2 = 4682 nv->firmware_options_2; 4683 ha->tgt.saved_firmware_options_3 = 4684 nv->firmware_options_3; 4685 ha->tgt.saved_set = 1; 4686 } 4687 4688 nv->exchange_count = __constant_cpu_to_le16(0xFFFF); 4689 4690 /* Enable target mode */ 4691 nv->firmware_options_1 |= __constant_cpu_to_le32(BIT_4); 4692 4693 /* Disable ini mode, if requested */ 4694 if (!qla_ini_mode_enabled(vha)) 4695 nv->firmware_options_1 |= 4696 __constant_cpu_to_le32(BIT_5); 4697 4698 /* Disable Full Login after LIP */ 4699 nv->firmware_options_1 &= __constant_cpu_to_le32(~BIT_13); 4700 /* Enable initial LIP */ 4701 nv->firmware_options_1 &= __constant_cpu_to_le32(~BIT_9); 4702 /* Enable FC tapes support */ 4703 nv->firmware_options_2 |= __constant_cpu_to_le32(BIT_12); 4704 /* Disable Full Login after LIP */ 4705 nv->host_p &= __constant_cpu_to_le32(~BIT_10); 4706 /* Enable target PRLI control */ 4707 nv->firmware_options_2 |= __constant_cpu_to_le32(BIT_14); 4708 } else { 4709 if (ha->tgt.saved_set) { 4710 nv->exchange_count = ha->tgt.saved_exchange_count; 4711 nv->firmware_options_1 = 4712 ha->tgt.saved_firmware_options_1; 4713 nv->firmware_options_2 = 4714 ha->tgt.saved_firmware_options_2; 4715 nv->firmware_options_3 = 4716 ha->tgt.saved_firmware_options_3; 4717 } 4718 return; 4719 } 4720 4721 /* out-of-order frames reassembly */ 4722 nv->firmware_options_3 |= BIT_6|BIT_9; 4723 4724 if (ha->tgt.enable_class_2) { 4725 if (vha->flags.init_done) 4726 fc_host_supported_classes(vha->host) = 4727 FC_COS_CLASS2 | FC_COS_CLASS3; 4728 4729 nv->firmware_options_2 |= __constant_cpu_to_le32(BIT_8); 4730 } else { 4731 if (vha->flags.init_done) 4732 fc_host_supported_classes(vha->host) = FC_COS_CLASS3; 4733 4734 nv->firmware_options_2 &= ~__constant_cpu_to_le32(BIT_8); 4735 } 4736 } 4737 4738 void 4739 qlt_81xx_config_nvram_stage2(struct scsi_qla_host *vha, 4740 struct init_cb_81xx *icb) 4741 { 4742 struct qla_hw_data *ha = vha->hw; 4743 4744 if (!QLA_TGT_MODE_ENABLED()) 4745 return; 4746 4747 if (ha->tgt.node_name_set) { 4748 memcpy(icb->node_name, ha->tgt.tgt_node_name, WWN_SIZE); 4749 icb->firmware_options_1 |= __constant_cpu_to_le32(BIT_14); 4750 } 4751 } 4752 4753 void 4754 qlt_83xx_iospace_config(struct qla_hw_data *ha) 4755 { 4756 if (!QLA_TGT_MODE_ENABLED()) 4757 return; 4758 4759 ha->msix_count += 1; /* For ATIO Q */ 4760 } 4761 4762 int 4763 qlt_24xx_process_response_error(struct scsi_qla_host *vha, 4764 struct sts_entry_24xx *pkt) 4765 { 4766 switch (pkt->entry_type) { 4767 case ABTS_RECV_24XX: 4768 case ABTS_RESP_24XX: 4769 case CTIO_TYPE7: 4770 case NOTIFY_ACK_TYPE: 4771 return 1; 4772 default: 4773 return 0; 4774 } 4775 } 4776 4777 void 4778 qlt_modify_vp_config(struct scsi_qla_host *vha, 4779 struct vp_config_entry_24xx *vpmod) 4780 { 4781 if (qla_tgt_mode_enabled(vha)) 4782 vpmod->options_idx1 &= ~BIT_5; 4783 /* Disable ini mode, if requested */ 4784 if (!qla_ini_mode_enabled(vha)) 4785 vpmod->options_idx1 &= ~BIT_4; 4786 } 4787 4788 void 4789 qlt_probe_one_stage1(struct scsi_qla_host *base_vha, struct qla_hw_data *ha) 4790 { 4791 if (!QLA_TGT_MODE_ENABLED()) 4792 return; 4793 4794 if (ha->mqenable || IS_QLA83XX(ha)) { 4795 ISP_ATIO_Q_IN(base_vha) = &ha->mqiobase->isp25mq.atio_q_in; 4796 ISP_ATIO_Q_OUT(base_vha) = &ha->mqiobase->isp25mq.atio_q_out; 4797 } else { 4798 ISP_ATIO_Q_IN(base_vha) = &ha->iobase->isp24.atio_q_in; 4799 ISP_ATIO_Q_OUT(base_vha) = &ha->iobase->isp24.atio_q_out; 4800 } 4801 4802 mutex_init(&base_vha->vha_tgt.tgt_mutex); 4803 mutex_init(&base_vha->vha_tgt.tgt_host_action_mutex); 4804 qlt_clear_mode(base_vha); 4805 } 4806 4807 irqreturn_t 4808 qla83xx_msix_atio_q(int irq, void *dev_id) 4809 { 4810 struct rsp_que *rsp; 4811 scsi_qla_host_t *vha; 4812 struct qla_hw_data *ha; 4813 unsigned long flags; 4814 4815 rsp = (struct rsp_que *) dev_id; 4816 ha = rsp->hw; 4817 vha = pci_get_drvdata(ha->pdev); 4818 4819 spin_lock_irqsave(&ha->hardware_lock, flags); 4820 4821 qlt_24xx_process_atio_queue(vha); 4822 qla24xx_process_response_queue(vha, rsp); 4823 4824 spin_unlock_irqrestore(&ha->hardware_lock, flags); 4825 4826 return IRQ_HANDLED; 4827 } 4828 4829 int 4830 qlt_mem_alloc(struct qla_hw_data *ha) 4831 { 4832 if (!QLA_TGT_MODE_ENABLED()) 4833 return 0; 4834 4835 ha->tgt.tgt_vp_map = kzalloc(sizeof(struct qla_tgt_vp_map) * 4836 MAX_MULTI_ID_FABRIC, GFP_KERNEL); 4837 if (!ha->tgt.tgt_vp_map) 4838 return -ENOMEM; 4839 4840 ha->tgt.atio_ring = dma_alloc_coherent(&ha->pdev->dev, 4841 (ha->tgt.atio_q_length + 1) * sizeof(struct atio_from_isp), 4842 &ha->tgt.atio_dma, GFP_KERNEL); 4843 if (!ha->tgt.atio_ring) { 4844 kfree(ha->tgt.tgt_vp_map); 4845 return -ENOMEM; 4846 } 4847 return 0; 4848 } 4849 4850 void 4851 qlt_mem_free(struct qla_hw_data *ha) 4852 { 4853 if (!QLA_TGT_MODE_ENABLED()) 4854 return; 4855 4856 if (ha->tgt.atio_ring) { 4857 dma_free_coherent(&ha->pdev->dev, (ha->tgt.atio_q_length + 1) * 4858 sizeof(struct atio_from_isp), ha->tgt.atio_ring, 4859 ha->tgt.atio_dma); 4860 } 4861 kfree(ha->tgt.tgt_vp_map); 4862 } 4863 4864 /* vport_slock to be held by the caller */ 4865 void 4866 qlt_update_vp_map(struct scsi_qla_host *vha, int cmd) 4867 { 4868 if (!QLA_TGT_MODE_ENABLED()) 4869 return; 4870 4871 switch (cmd) { 4872 case SET_VP_IDX: 4873 vha->hw->tgt.tgt_vp_map[vha->vp_idx].vha = vha; 4874 break; 4875 case SET_AL_PA: 4876 vha->hw->tgt.tgt_vp_map[vha->d_id.b.al_pa].idx = vha->vp_idx; 4877 break; 4878 case RESET_VP_IDX: 4879 vha->hw->tgt.tgt_vp_map[vha->vp_idx].vha = NULL; 4880 break; 4881 case RESET_AL_PA: 4882 vha->hw->tgt.tgt_vp_map[vha->d_id.b.al_pa].idx = 0; 4883 break; 4884 } 4885 } 4886 4887 static int __init qlt_parse_ini_mode(void) 4888 { 4889 if (strcasecmp(qlini_mode, QLA2XXX_INI_MODE_STR_EXCLUSIVE) == 0) 4890 ql2x_ini_mode = QLA2XXX_INI_MODE_EXCLUSIVE; 4891 else if (strcasecmp(qlini_mode, QLA2XXX_INI_MODE_STR_DISABLED) == 0) 4892 ql2x_ini_mode = QLA2XXX_INI_MODE_DISABLED; 4893 else if (strcasecmp(qlini_mode, QLA2XXX_INI_MODE_STR_ENABLED) == 0) 4894 ql2x_ini_mode = QLA2XXX_INI_MODE_ENABLED; 4895 else 4896 return false; 4897 4898 return true; 4899 } 4900 4901 int __init qlt_init(void) 4902 { 4903 int ret; 4904 4905 if (!qlt_parse_ini_mode()) { 4906 ql_log(ql_log_fatal, NULL, 0xe06b, 4907 "qlt_parse_ini_mode() failed\n"); 4908 return -EINVAL; 4909 } 4910 4911 if (!QLA_TGT_MODE_ENABLED()) 4912 return 0; 4913 4914 qla_tgt_cmd_cachep = kmem_cache_create("qla_tgt_cmd_cachep", 4915 sizeof(struct qla_tgt_cmd), __alignof__(struct qla_tgt_cmd), 0, 4916 NULL); 4917 if (!qla_tgt_cmd_cachep) { 4918 ql_log(ql_log_fatal, NULL, 0xe06c, 4919 "kmem_cache_create for qla_tgt_cmd_cachep failed\n"); 4920 return -ENOMEM; 4921 } 4922 4923 qla_tgt_mgmt_cmd_cachep = kmem_cache_create("qla_tgt_mgmt_cmd_cachep", 4924 sizeof(struct qla_tgt_mgmt_cmd), __alignof__(struct 4925 qla_tgt_mgmt_cmd), 0, NULL); 4926 if (!qla_tgt_mgmt_cmd_cachep) { 4927 ql_log(ql_log_fatal, NULL, 0xe06d, 4928 "kmem_cache_create for qla_tgt_mgmt_cmd_cachep failed\n"); 4929 ret = -ENOMEM; 4930 goto out; 4931 } 4932 4933 qla_tgt_mgmt_cmd_mempool = mempool_create(25, mempool_alloc_slab, 4934 mempool_free_slab, qla_tgt_mgmt_cmd_cachep); 4935 if (!qla_tgt_mgmt_cmd_mempool) { 4936 ql_log(ql_log_fatal, NULL, 0xe06e, 4937 "mempool_create for qla_tgt_mgmt_cmd_mempool failed\n"); 4938 ret = -ENOMEM; 4939 goto out_mgmt_cmd_cachep; 4940 } 4941 4942 qla_tgt_wq = alloc_workqueue("qla_tgt_wq", 0, 0); 4943 if (!qla_tgt_wq) { 4944 ql_log(ql_log_fatal, NULL, 0xe06f, 4945 "alloc_workqueue for qla_tgt_wq failed\n"); 4946 ret = -ENOMEM; 4947 goto out_cmd_mempool; 4948 } 4949 /* 4950 * Return 1 to signal that initiator-mode is being disabled 4951 */ 4952 return (ql2x_ini_mode == QLA2XXX_INI_MODE_DISABLED) ? 1 : 0; 4953 4954 out_cmd_mempool: 4955 mempool_destroy(qla_tgt_mgmt_cmd_mempool); 4956 out_mgmt_cmd_cachep: 4957 kmem_cache_destroy(qla_tgt_mgmt_cmd_cachep); 4958 out: 4959 kmem_cache_destroy(qla_tgt_cmd_cachep); 4960 return ret; 4961 } 4962 4963 void qlt_exit(void) 4964 { 4965 if (!QLA_TGT_MODE_ENABLED()) 4966 return; 4967 4968 destroy_workqueue(qla_tgt_wq); 4969 mempool_destroy(qla_tgt_mgmt_cmd_mempool); 4970 kmem_cache_destroy(qla_tgt_mgmt_cmd_cachep); 4971 kmem_cache_destroy(qla_tgt_cmd_cachep); 4972 } 4973