1 /* 2 * qla_target.c SCSI LLD infrastructure for QLogic 22xx/23xx/24xx/25xx 3 * 4 * based on qla2x00t.c code: 5 * 6 * Copyright (C) 2004 - 2010 Vladislav Bolkhovitin <vst@vlnb.net> 7 * Copyright (C) 2004 - 2005 Leonid Stoljar 8 * Copyright (C) 2006 Nathaniel Clark <nate@misrule.us> 9 * Copyright (C) 2006 - 2010 ID7 Ltd. 10 * 11 * Forward port and refactoring to modern qla2xxx and target/configfs 12 * 13 * Copyright (C) 2010-2013 Nicholas A. Bellinger <nab@kernel.org> 14 * 15 * This program is free software; you can redistribute it and/or 16 * modify it under the terms of the GNU General Public License 17 * as published by the Free Software Foundation, version 2 18 * of the License. 19 * 20 * This program is distributed in the hope that it will be useful, 21 * but WITHOUT ANY WARRANTY; without even the implied warranty of 22 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 23 * GNU General Public License for more details. 24 */ 25 26 #include <linux/module.h> 27 #include <linux/init.h> 28 #include <linux/types.h> 29 #include <linux/blkdev.h> 30 #include <linux/interrupt.h> 31 #include <linux/pci.h> 32 #include <linux/delay.h> 33 #include <linux/list.h> 34 #include <linux/workqueue.h> 35 #include <asm/unaligned.h> 36 #include <scsi/scsi.h> 37 #include <scsi/scsi_host.h> 38 #include <scsi/scsi_tcq.h> 39 #include <target/target_core_base.h> 40 #include <target/target_core_fabric.h> 41 42 #include "qla_def.h" 43 #include "qla_target.h" 44 45 static char *qlini_mode = QLA2XXX_INI_MODE_STR_ENABLED; 46 module_param(qlini_mode, charp, S_IRUGO); 47 MODULE_PARM_DESC(qlini_mode, 48 "Determines when initiator mode will be enabled. Possible values: " 49 "\"exclusive\" - initiator mode will be enabled on load, " 50 "disabled on enabling target mode and then on disabling target mode " 51 "enabled back; " 52 "\"disabled\" - initiator mode will never be enabled; " 53 "\"enabled\" (default) - initiator mode will always stay enabled."); 54 55 int ql2x_ini_mode = QLA2XXX_INI_MODE_EXCLUSIVE; 56 57 /* 58 * From scsi/fc/fc_fcp.h 59 */ 60 enum fcp_resp_rsp_codes { 61 FCP_TMF_CMPL = 0, 62 FCP_DATA_LEN_INVALID = 1, 63 FCP_CMND_FIELDS_INVALID = 2, 64 FCP_DATA_PARAM_MISMATCH = 3, 65 FCP_TMF_REJECTED = 4, 66 FCP_TMF_FAILED = 5, 67 FCP_TMF_INVALID_LUN = 9, 68 }; 69 70 /* 71 * fc_pri_ta from scsi/fc/fc_fcp.h 72 */ 73 #define FCP_PTA_SIMPLE 0 /* simple task attribute */ 74 #define FCP_PTA_HEADQ 1 /* head of queue task attribute */ 75 #define FCP_PTA_ORDERED 2 /* ordered task attribute */ 76 #define FCP_PTA_ACA 4 /* auto. contingent allegiance */ 77 #define FCP_PTA_MASK 7 /* mask for task attribute field */ 78 #define FCP_PRI_SHIFT 3 /* priority field starts in bit 3 */ 79 #define FCP_PRI_RESVD_MASK 0x80 /* reserved bits in priority field */ 80 81 /* 82 * This driver calls qla2x00_alloc_iocbs() and qla2x00_issue_marker(), which 83 * must be called under HW lock and could unlock/lock it inside. 84 * It isn't an issue, since in the current implementation on the time when 85 * those functions are called: 86 * 87 * - Either context is IRQ and only IRQ handler can modify HW data, 88 * including rings related fields, 89 * 90 * - Or access to target mode variables from struct qla_tgt doesn't 91 * cross those functions boundaries, except tgt_stop, which 92 * additionally protected by irq_cmd_count. 93 */ 94 /* Predefs for callbacks handed to qla2xxx LLD */ 95 static void qlt_24xx_atio_pkt(struct scsi_qla_host *ha, 96 struct atio_from_isp *pkt); 97 static void qlt_response_pkt(struct scsi_qla_host *ha, response_t *pkt); 98 static int qlt_issue_task_mgmt(struct qla_tgt_sess *sess, uint32_t lun, 99 int fn, void *iocb, int flags); 100 static void qlt_send_term_exchange(struct scsi_qla_host *ha, struct qla_tgt_cmd 101 *cmd, struct atio_from_isp *atio, int ha_locked); 102 static void qlt_reject_free_srr_imm(struct scsi_qla_host *ha, 103 struct qla_tgt_srr_imm *imm, int ha_lock); 104 /* 105 * Global Variables 106 */ 107 static struct kmem_cache *qla_tgt_cmd_cachep; 108 static struct kmem_cache *qla_tgt_mgmt_cmd_cachep; 109 static mempool_t *qla_tgt_mgmt_cmd_mempool; 110 static struct workqueue_struct *qla_tgt_wq; 111 static DEFINE_MUTEX(qla_tgt_mutex); 112 static LIST_HEAD(qla_tgt_glist); 113 114 /* ha->hardware_lock supposed to be held on entry (to protect tgt->sess_list) */ 115 static struct qla_tgt_sess *qlt_find_sess_by_port_name( 116 struct qla_tgt *tgt, 117 const uint8_t *port_name) 118 { 119 struct qla_tgt_sess *sess; 120 121 list_for_each_entry(sess, &tgt->sess_list, sess_list_entry) { 122 if (!memcmp(sess->port_name, port_name, WWN_SIZE)) 123 return sess; 124 } 125 126 return NULL; 127 } 128 129 /* Might release hw lock, then reaquire!! */ 130 static inline int qlt_issue_marker(struct scsi_qla_host *vha, int vha_locked) 131 { 132 /* Send marker if required */ 133 if (unlikely(vha->marker_needed != 0)) { 134 int rc = qla2x00_issue_marker(vha, vha_locked); 135 if (rc != QLA_SUCCESS) { 136 ql_dbg(ql_dbg_tgt, vha, 0xe03d, 137 "qla_target(%d): issue_marker() failed\n", 138 vha->vp_idx); 139 } 140 return rc; 141 } 142 return QLA_SUCCESS; 143 } 144 145 static inline 146 struct scsi_qla_host *qlt_find_host_by_d_id(struct scsi_qla_host *vha, 147 uint8_t *d_id) 148 { 149 struct qla_hw_data *ha = vha->hw; 150 uint8_t vp_idx; 151 152 if ((vha->d_id.b.area != d_id[1]) || (vha->d_id.b.domain != d_id[0])) 153 return NULL; 154 155 if (vha->d_id.b.al_pa == d_id[2]) 156 return vha; 157 158 BUG_ON(ha->tgt.tgt_vp_map == NULL); 159 vp_idx = ha->tgt.tgt_vp_map[d_id[2]].idx; 160 if (likely(test_bit(vp_idx, ha->vp_idx_map))) 161 return ha->tgt.tgt_vp_map[vp_idx].vha; 162 163 return NULL; 164 } 165 166 static inline 167 struct scsi_qla_host *qlt_find_host_by_vp_idx(struct scsi_qla_host *vha, 168 uint16_t vp_idx) 169 { 170 struct qla_hw_data *ha = vha->hw; 171 172 if (vha->vp_idx == vp_idx) 173 return vha; 174 175 BUG_ON(ha->tgt.tgt_vp_map == NULL); 176 if (likely(test_bit(vp_idx, ha->vp_idx_map))) 177 return ha->tgt.tgt_vp_map[vp_idx].vha; 178 179 return NULL; 180 } 181 182 void qlt_24xx_atio_pkt_all_vps(struct scsi_qla_host *vha, 183 struct atio_from_isp *atio) 184 { 185 switch (atio->u.raw.entry_type) { 186 case ATIO_TYPE7: 187 { 188 struct scsi_qla_host *host = qlt_find_host_by_d_id(vha, 189 atio->u.isp24.fcp_hdr.d_id); 190 if (unlikely(NULL == host)) { 191 ql_dbg(ql_dbg_tgt, vha, 0xe03e, 192 "qla_target(%d): Received ATIO_TYPE7 " 193 "with unknown d_id %x:%x:%x\n", vha->vp_idx, 194 atio->u.isp24.fcp_hdr.d_id[0], 195 atio->u.isp24.fcp_hdr.d_id[1], 196 atio->u.isp24.fcp_hdr.d_id[2]); 197 break; 198 } 199 qlt_24xx_atio_pkt(host, atio); 200 break; 201 } 202 203 case IMMED_NOTIFY_TYPE: 204 { 205 struct scsi_qla_host *host = vha; 206 struct imm_ntfy_from_isp *entry = 207 (struct imm_ntfy_from_isp *)atio; 208 209 if ((entry->u.isp24.vp_index != 0xFF) && 210 (entry->u.isp24.nport_handle != 0xFFFF)) { 211 host = qlt_find_host_by_vp_idx(vha, 212 entry->u.isp24.vp_index); 213 if (unlikely(!host)) { 214 ql_dbg(ql_dbg_tgt, vha, 0xe03f, 215 "qla_target(%d): Received " 216 "ATIO (IMMED_NOTIFY_TYPE) " 217 "with unknown vp_index %d\n", 218 vha->vp_idx, entry->u.isp24.vp_index); 219 break; 220 } 221 } 222 qlt_24xx_atio_pkt(host, atio); 223 break; 224 } 225 226 default: 227 ql_dbg(ql_dbg_tgt, vha, 0xe040, 228 "qla_target(%d): Received unknown ATIO atio " 229 "type %x\n", vha->vp_idx, atio->u.raw.entry_type); 230 break; 231 } 232 233 return; 234 } 235 236 void qlt_response_pkt_all_vps(struct scsi_qla_host *vha, response_t *pkt) 237 { 238 switch (pkt->entry_type) { 239 case CTIO_TYPE7: 240 { 241 struct ctio7_from_24xx *entry = (struct ctio7_from_24xx *)pkt; 242 struct scsi_qla_host *host = qlt_find_host_by_vp_idx(vha, 243 entry->vp_index); 244 if (unlikely(!host)) { 245 ql_dbg(ql_dbg_tgt, vha, 0xe041, 246 "qla_target(%d): Response pkt (CTIO_TYPE7) " 247 "received, with unknown vp_index %d\n", 248 vha->vp_idx, entry->vp_index); 249 break; 250 } 251 qlt_response_pkt(host, pkt); 252 break; 253 } 254 255 case IMMED_NOTIFY_TYPE: 256 { 257 struct scsi_qla_host *host = vha; 258 struct imm_ntfy_from_isp *entry = 259 (struct imm_ntfy_from_isp *)pkt; 260 261 host = qlt_find_host_by_vp_idx(vha, entry->u.isp24.vp_index); 262 if (unlikely(!host)) { 263 ql_dbg(ql_dbg_tgt, vha, 0xe042, 264 "qla_target(%d): Response pkt (IMMED_NOTIFY_TYPE) " 265 "received, with unknown vp_index %d\n", 266 vha->vp_idx, entry->u.isp24.vp_index); 267 break; 268 } 269 qlt_response_pkt(host, pkt); 270 break; 271 } 272 273 case NOTIFY_ACK_TYPE: 274 { 275 struct scsi_qla_host *host = vha; 276 struct nack_to_isp *entry = (struct nack_to_isp *)pkt; 277 278 if (0xFF != entry->u.isp24.vp_index) { 279 host = qlt_find_host_by_vp_idx(vha, 280 entry->u.isp24.vp_index); 281 if (unlikely(!host)) { 282 ql_dbg(ql_dbg_tgt, vha, 0xe043, 283 "qla_target(%d): Response " 284 "pkt (NOTIFY_ACK_TYPE) " 285 "received, with unknown " 286 "vp_index %d\n", vha->vp_idx, 287 entry->u.isp24.vp_index); 288 break; 289 } 290 } 291 qlt_response_pkt(host, pkt); 292 break; 293 } 294 295 case ABTS_RECV_24XX: 296 { 297 struct abts_recv_from_24xx *entry = 298 (struct abts_recv_from_24xx *)pkt; 299 struct scsi_qla_host *host = qlt_find_host_by_vp_idx(vha, 300 entry->vp_index); 301 if (unlikely(!host)) { 302 ql_dbg(ql_dbg_tgt, vha, 0xe044, 303 "qla_target(%d): Response pkt " 304 "(ABTS_RECV_24XX) received, with unknown " 305 "vp_index %d\n", vha->vp_idx, entry->vp_index); 306 break; 307 } 308 qlt_response_pkt(host, pkt); 309 break; 310 } 311 312 case ABTS_RESP_24XX: 313 { 314 struct abts_resp_to_24xx *entry = 315 (struct abts_resp_to_24xx *)pkt; 316 struct scsi_qla_host *host = qlt_find_host_by_vp_idx(vha, 317 entry->vp_index); 318 if (unlikely(!host)) { 319 ql_dbg(ql_dbg_tgt, vha, 0xe045, 320 "qla_target(%d): Response pkt " 321 "(ABTS_RECV_24XX) received, with unknown " 322 "vp_index %d\n", vha->vp_idx, entry->vp_index); 323 break; 324 } 325 qlt_response_pkt(host, pkt); 326 break; 327 } 328 329 default: 330 qlt_response_pkt(vha, pkt); 331 break; 332 } 333 334 } 335 336 static void qlt_free_session_done(struct work_struct *work) 337 { 338 struct qla_tgt_sess *sess = container_of(work, struct qla_tgt_sess, 339 free_work); 340 struct qla_tgt *tgt = sess->tgt; 341 struct scsi_qla_host *vha = sess->vha; 342 struct qla_hw_data *ha = vha->hw; 343 344 BUG_ON(!tgt); 345 /* 346 * Release the target session for FC Nexus from fabric module code. 347 */ 348 if (sess->se_sess != NULL) 349 ha->tgt.tgt_ops->free_session(sess); 350 351 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf001, 352 "Unregistration of sess %p finished\n", sess); 353 354 kfree(sess); 355 /* 356 * We need to protect against race, when tgt is freed before or 357 * inside wake_up() 358 */ 359 tgt->sess_count--; 360 if (tgt->sess_count == 0) 361 wake_up_all(&tgt->waitQ); 362 } 363 364 /* ha->hardware_lock supposed to be held on entry */ 365 void qlt_unreg_sess(struct qla_tgt_sess *sess) 366 { 367 struct scsi_qla_host *vha = sess->vha; 368 369 vha->hw->tgt.tgt_ops->clear_nacl_from_fcport_map(sess); 370 371 list_del(&sess->sess_list_entry); 372 if (sess->deleted) 373 list_del(&sess->del_list_entry); 374 375 INIT_WORK(&sess->free_work, qlt_free_session_done); 376 schedule_work(&sess->free_work); 377 } 378 EXPORT_SYMBOL(qlt_unreg_sess); 379 380 /* ha->hardware_lock supposed to be held on entry */ 381 static int qlt_reset(struct scsi_qla_host *vha, void *iocb, int mcmd) 382 { 383 struct qla_hw_data *ha = vha->hw; 384 struct qla_tgt_sess *sess = NULL; 385 uint32_t unpacked_lun, lun = 0; 386 uint16_t loop_id; 387 int res = 0; 388 struct imm_ntfy_from_isp *n = (struct imm_ntfy_from_isp *)iocb; 389 struct atio_from_isp *a = (struct atio_from_isp *)iocb; 390 391 loop_id = le16_to_cpu(n->u.isp24.nport_handle); 392 if (loop_id == 0xFFFF) { 393 #if 0 /* FIXME: Re-enable Global event handling.. */ 394 /* Global event */ 395 atomic_inc(&ha->tgt.qla_tgt->tgt_global_resets_count); 396 qlt_clear_tgt_db(ha->tgt.qla_tgt, 1); 397 if (!list_empty(&ha->tgt.qla_tgt->sess_list)) { 398 sess = list_entry(ha->tgt.qla_tgt->sess_list.next, 399 typeof(*sess), sess_list_entry); 400 switch (mcmd) { 401 case QLA_TGT_NEXUS_LOSS_SESS: 402 mcmd = QLA_TGT_NEXUS_LOSS; 403 break; 404 case QLA_TGT_ABORT_ALL_SESS: 405 mcmd = QLA_TGT_ABORT_ALL; 406 break; 407 case QLA_TGT_NEXUS_LOSS: 408 case QLA_TGT_ABORT_ALL: 409 break; 410 default: 411 ql_dbg(ql_dbg_tgt, vha, 0xe046, 412 "qla_target(%d): Not allowed " 413 "command %x in %s", vha->vp_idx, 414 mcmd, __func__); 415 sess = NULL; 416 break; 417 } 418 } else 419 sess = NULL; 420 #endif 421 } else { 422 sess = ha->tgt.tgt_ops->find_sess_by_loop_id(vha, loop_id); 423 } 424 425 ql_dbg(ql_dbg_tgt, vha, 0xe000, 426 "Using sess for qla_tgt_reset: %p\n", sess); 427 if (!sess) { 428 res = -ESRCH; 429 return res; 430 } 431 432 ql_dbg(ql_dbg_tgt, vha, 0xe047, 433 "scsi(%ld): resetting (session %p from port %8phC mcmd %x, " 434 "loop_id %d)\n", vha->host_no, sess, sess->port_name, 435 mcmd, loop_id); 436 437 lun = a->u.isp24.fcp_cmnd.lun; 438 unpacked_lun = scsilun_to_int((struct scsi_lun *)&lun); 439 440 return qlt_issue_task_mgmt(sess, unpacked_lun, mcmd, 441 iocb, QLA24XX_MGMT_SEND_NACK); 442 } 443 444 /* ha->hardware_lock supposed to be held on entry */ 445 static void qlt_schedule_sess_for_deletion(struct qla_tgt_sess *sess, 446 bool immediate) 447 { 448 struct qla_tgt *tgt = sess->tgt; 449 uint32_t dev_loss_tmo = tgt->ha->port_down_retry_count + 5; 450 451 if (sess->deleted) 452 return; 453 454 ql_dbg(ql_dbg_tgt, sess->vha, 0xe001, 455 "Scheduling sess %p for deletion\n", sess); 456 list_add_tail(&sess->del_list_entry, &tgt->del_sess_list); 457 sess->deleted = 1; 458 459 if (immediate) 460 dev_loss_tmo = 0; 461 462 sess->expires = jiffies + dev_loss_tmo * HZ; 463 464 ql_dbg(ql_dbg_tgt, sess->vha, 0xe048, 465 "qla_target(%d): session for port %8phC (loop ID %d) scheduled for " 466 "deletion in %u secs (expires: %lu) immed: %d\n", 467 sess->vha->vp_idx, sess->port_name, sess->loop_id, dev_loss_tmo, 468 sess->expires, immediate); 469 470 if (immediate) 471 schedule_delayed_work(&tgt->sess_del_work, 0); 472 else 473 schedule_delayed_work(&tgt->sess_del_work, 474 sess->expires - jiffies); 475 } 476 477 /* ha->hardware_lock supposed to be held on entry */ 478 static void qlt_clear_tgt_db(struct qla_tgt *tgt, bool local_only) 479 { 480 struct qla_tgt_sess *sess; 481 482 list_for_each_entry(sess, &tgt->sess_list, sess_list_entry) 483 qlt_schedule_sess_for_deletion(sess, true); 484 485 /* At this point tgt could be already dead */ 486 } 487 488 static int qla24xx_get_loop_id(struct scsi_qla_host *vha, const uint8_t *s_id, 489 uint16_t *loop_id) 490 { 491 struct qla_hw_data *ha = vha->hw; 492 dma_addr_t gid_list_dma; 493 struct gid_list_info *gid_list; 494 char *id_iter; 495 int res, rc, i; 496 uint16_t entries; 497 498 gid_list = dma_alloc_coherent(&ha->pdev->dev, qla2x00_gid_list_size(ha), 499 &gid_list_dma, GFP_KERNEL); 500 if (!gid_list) { 501 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf044, 502 "qla_target(%d): DMA Alloc failed of %u\n", 503 vha->vp_idx, qla2x00_gid_list_size(ha)); 504 return -ENOMEM; 505 } 506 507 /* Get list of logged in devices */ 508 rc = qla2x00_get_id_list(vha, gid_list, gid_list_dma, &entries); 509 if (rc != QLA_SUCCESS) { 510 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf045, 511 "qla_target(%d): get_id_list() failed: %x\n", 512 vha->vp_idx, rc); 513 res = -1; 514 goto out_free_id_list; 515 } 516 517 id_iter = (char *)gid_list; 518 res = -1; 519 for (i = 0; i < entries; i++) { 520 struct gid_list_info *gid = (struct gid_list_info *)id_iter; 521 if ((gid->al_pa == s_id[2]) && 522 (gid->area == s_id[1]) && 523 (gid->domain == s_id[0])) { 524 *loop_id = le16_to_cpu(gid->loop_id); 525 res = 0; 526 break; 527 } 528 id_iter += ha->gid_list_info_size; 529 } 530 531 out_free_id_list: 532 dma_free_coherent(&ha->pdev->dev, qla2x00_gid_list_size(ha), 533 gid_list, gid_list_dma); 534 return res; 535 } 536 537 /* ha->hardware_lock supposed to be held on entry */ 538 static void qlt_undelete_sess(struct qla_tgt_sess *sess) 539 { 540 BUG_ON(!sess->deleted); 541 542 list_del(&sess->del_list_entry); 543 sess->deleted = 0; 544 } 545 546 static void qlt_del_sess_work_fn(struct delayed_work *work) 547 { 548 struct qla_tgt *tgt = container_of(work, struct qla_tgt, 549 sess_del_work); 550 struct scsi_qla_host *vha = tgt->vha; 551 struct qla_hw_data *ha = vha->hw; 552 struct qla_tgt_sess *sess; 553 unsigned long flags, elapsed; 554 555 spin_lock_irqsave(&ha->hardware_lock, flags); 556 while (!list_empty(&tgt->del_sess_list)) { 557 sess = list_entry(tgt->del_sess_list.next, typeof(*sess), 558 del_list_entry); 559 elapsed = jiffies; 560 if (time_after_eq(elapsed, sess->expires)) { 561 qlt_undelete_sess(sess); 562 563 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf004, 564 "Timeout: sess %p about to be deleted\n", 565 sess); 566 ha->tgt.tgt_ops->shutdown_sess(sess); 567 ha->tgt.tgt_ops->put_sess(sess); 568 } else { 569 schedule_delayed_work(&tgt->sess_del_work, 570 sess->expires - elapsed); 571 break; 572 } 573 } 574 spin_unlock_irqrestore(&ha->hardware_lock, flags); 575 } 576 577 /* 578 * Adds an extra ref to allow to drop hw lock after adding sess to the list. 579 * Caller must put it. 580 */ 581 static struct qla_tgt_sess *qlt_create_sess( 582 struct scsi_qla_host *vha, 583 fc_port_t *fcport, 584 bool local) 585 { 586 struct qla_hw_data *ha = vha->hw; 587 struct qla_tgt_sess *sess; 588 unsigned long flags; 589 unsigned char be_sid[3]; 590 591 /* Check to avoid double sessions */ 592 spin_lock_irqsave(&ha->hardware_lock, flags); 593 list_for_each_entry(sess, &vha->vha_tgt.qla_tgt->sess_list, 594 sess_list_entry) { 595 if (!memcmp(sess->port_name, fcport->port_name, WWN_SIZE)) { 596 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf005, 597 "Double sess %p found (s_id %x:%x:%x, " 598 "loop_id %d), updating to d_id %x:%x:%x, " 599 "loop_id %d", sess, sess->s_id.b.domain, 600 sess->s_id.b.al_pa, sess->s_id.b.area, 601 sess->loop_id, fcport->d_id.b.domain, 602 fcport->d_id.b.al_pa, fcport->d_id.b.area, 603 fcport->loop_id); 604 605 if (sess->deleted) 606 qlt_undelete_sess(sess); 607 608 kref_get(&sess->se_sess->sess_kref); 609 ha->tgt.tgt_ops->update_sess(sess, fcport->d_id, fcport->loop_id, 610 (fcport->flags & FCF_CONF_COMP_SUPPORTED)); 611 612 if (sess->local && !local) 613 sess->local = 0; 614 spin_unlock_irqrestore(&ha->hardware_lock, flags); 615 616 return sess; 617 } 618 } 619 spin_unlock_irqrestore(&ha->hardware_lock, flags); 620 621 sess = kzalloc(sizeof(*sess), GFP_KERNEL); 622 if (!sess) { 623 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04a, 624 "qla_target(%u): session allocation failed, all commands " 625 "from port %8phC will be refused", vha->vp_idx, 626 fcport->port_name); 627 628 return NULL; 629 } 630 sess->tgt = vha->vha_tgt.qla_tgt; 631 sess->vha = vha; 632 sess->s_id = fcport->d_id; 633 sess->loop_id = fcport->loop_id; 634 sess->local = local; 635 636 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf006, 637 "Adding sess %p to tgt %p via ->check_initiator_node_acl()\n", 638 sess, vha->vha_tgt.qla_tgt); 639 640 be_sid[0] = sess->s_id.b.domain; 641 be_sid[1] = sess->s_id.b.area; 642 be_sid[2] = sess->s_id.b.al_pa; 643 /* 644 * Determine if this fc_port->port_name is allowed to access 645 * target mode using explict NodeACLs+MappedLUNs, or using 646 * TPG demo mode. If this is successful a target mode FC nexus 647 * is created. 648 */ 649 if (ha->tgt.tgt_ops->check_initiator_node_acl(vha, 650 &fcport->port_name[0], sess, &be_sid[0], fcport->loop_id) < 0) { 651 kfree(sess); 652 return NULL; 653 } 654 /* 655 * Take an extra reference to ->sess_kref here to handle qla_tgt_sess 656 * access across ->hardware_lock reaquire. 657 */ 658 kref_get(&sess->se_sess->sess_kref); 659 660 sess->conf_compl_supported = (fcport->flags & FCF_CONF_COMP_SUPPORTED); 661 BUILD_BUG_ON(sizeof(sess->port_name) != sizeof(fcport->port_name)); 662 memcpy(sess->port_name, fcport->port_name, sizeof(sess->port_name)); 663 664 spin_lock_irqsave(&ha->hardware_lock, flags); 665 list_add_tail(&sess->sess_list_entry, &vha->vha_tgt.qla_tgt->sess_list); 666 vha->vha_tgt.qla_tgt->sess_count++; 667 spin_unlock_irqrestore(&ha->hardware_lock, flags); 668 669 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04b, 670 "qla_target(%d): %ssession for wwn %8phC (loop_id %d, " 671 "s_id %x:%x:%x, confirmed completion %ssupported) added\n", 672 vha->vp_idx, local ? "local " : "", fcport->port_name, 673 fcport->loop_id, sess->s_id.b.domain, sess->s_id.b.area, 674 sess->s_id.b.al_pa, sess->conf_compl_supported ? "" : "not "); 675 676 return sess; 677 } 678 679 /* 680 * Called from drivers/scsi/qla2xxx/qla_init.c:qla2x00_reg_remote_port() 681 */ 682 void qlt_fc_port_added(struct scsi_qla_host *vha, fc_port_t *fcport) 683 { 684 struct qla_hw_data *ha = vha->hw; 685 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; 686 struct qla_tgt_sess *sess; 687 unsigned long flags; 688 689 if (!vha->hw->tgt.tgt_ops) 690 return; 691 692 if (!tgt || (fcport->port_type != FCT_INITIATOR)) 693 return; 694 695 if (qla_ini_mode_enabled(vha)) 696 return; 697 698 spin_lock_irqsave(&ha->hardware_lock, flags); 699 if (tgt->tgt_stop) { 700 spin_unlock_irqrestore(&ha->hardware_lock, flags); 701 return; 702 } 703 sess = qlt_find_sess_by_port_name(tgt, fcport->port_name); 704 if (!sess) { 705 spin_unlock_irqrestore(&ha->hardware_lock, flags); 706 707 mutex_lock(&vha->vha_tgt.tgt_mutex); 708 sess = qlt_create_sess(vha, fcport, false); 709 mutex_unlock(&vha->vha_tgt.tgt_mutex); 710 711 spin_lock_irqsave(&ha->hardware_lock, flags); 712 } else { 713 kref_get(&sess->se_sess->sess_kref); 714 715 if (sess->deleted) { 716 qlt_undelete_sess(sess); 717 718 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04c, 719 "qla_target(%u): %ssession for port %8phC " 720 "(loop ID %d) reappeared\n", vha->vp_idx, 721 sess->local ? "local " : "", sess->port_name, 722 sess->loop_id); 723 724 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf007, 725 "Reappeared sess %p\n", sess); 726 } 727 ha->tgt.tgt_ops->update_sess(sess, fcport->d_id, fcport->loop_id, 728 (fcport->flags & FCF_CONF_COMP_SUPPORTED)); 729 } 730 731 if (sess && sess->local) { 732 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04d, 733 "qla_target(%u): local session for " 734 "port %8phC (loop ID %d) became global\n", vha->vp_idx, 735 fcport->port_name, sess->loop_id); 736 sess->local = 0; 737 } 738 ha->tgt.tgt_ops->put_sess(sess); 739 spin_unlock_irqrestore(&ha->hardware_lock, flags); 740 } 741 742 void qlt_fc_port_deleted(struct scsi_qla_host *vha, fc_port_t *fcport) 743 { 744 struct qla_hw_data *ha = vha->hw; 745 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; 746 struct qla_tgt_sess *sess; 747 unsigned long flags; 748 749 if (!vha->hw->tgt.tgt_ops) 750 return; 751 752 if (!tgt || (fcport->port_type != FCT_INITIATOR)) 753 return; 754 755 spin_lock_irqsave(&ha->hardware_lock, flags); 756 if (tgt->tgt_stop) { 757 spin_unlock_irqrestore(&ha->hardware_lock, flags); 758 return; 759 } 760 sess = qlt_find_sess_by_port_name(tgt, fcport->port_name); 761 if (!sess) { 762 spin_unlock_irqrestore(&ha->hardware_lock, flags); 763 return; 764 } 765 766 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf008, "qla_tgt_fc_port_deleted %p", sess); 767 768 sess->local = 1; 769 qlt_schedule_sess_for_deletion(sess, false); 770 spin_unlock_irqrestore(&ha->hardware_lock, flags); 771 } 772 773 static inline int test_tgt_sess_count(struct qla_tgt *tgt) 774 { 775 struct qla_hw_data *ha = tgt->ha; 776 unsigned long flags; 777 int res; 778 /* 779 * We need to protect against race, when tgt is freed before or 780 * inside wake_up() 781 */ 782 spin_lock_irqsave(&ha->hardware_lock, flags); 783 ql_dbg(ql_dbg_tgt, tgt->vha, 0xe002, 784 "tgt %p, empty(sess_list)=%d sess_count=%d\n", 785 tgt, list_empty(&tgt->sess_list), tgt->sess_count); 786 res = (tgt->sess_count == 0); 787 spin_unlock_irqrestore(&ha->hardware_lock, flags); 788 789 return res; 790 } 791 792 /* Called by tcm_qla2xxx configfs code */ 793 void qlt_stop_phase1(struct qla_tgt *tgt) 794 { 795 struct scsi_qla_host *vha = tgt->vha; 796 struct qla_hw_data *ha = tgt->ha; 797 unsigned long flags; 798 799 if (tgt->tgt_stop || tgt->tgt_stopped) { 800 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04e, 801 "Already in tgt->tgt_stop or tgt_stopped state\n"); 802 dump_stack(); 803 return; 804 } 805 806 ql_dbg(ql_dbg_tgt, vha, 0xe003, "Stopping target for host %ld(%p)\n", 807 vha->host_no, vha); 808 /* 809 * Mutex needed to sync with qla_tgt_fc_port_[added,deleted]. 810 * Lock is needed, because we still can get an incoming packet. 811 */ 812 mutex_lock(&vha->vha_tgt.tgt_mutex); 813 spin_lock_irqsave(&ha->hardware_lock, flags); 814 tgt->tgt_stop = 1; 815 qlt_clear_tgt_db(tgt, true); 816 spin_unlock_irqrestore(&ha->hardware_lock, flags); 817 mutex_unlock(&vha->vha_tgt.tgt_mutex); 818 819 flush_delayed_work(&tgt->sess_del_work); 820 821 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf009, 822 "Waiting for sess works (tgt %p)", tgt); 823 spin_lock_irqsave(&tgt->sess_work_lock, flags); 824 while (!list_empty(&tgt->sess_works_list)) { 825 spin_unlock_irqrestore(&tgt->sess_work_lock, flags); 826 flush_scheduled_work(); 827 spin_lock_irqsave(&tgt->sess_work_lock, flags); 828 } 829 spin_unlock_irqrestore(&tgt->sess_work_lock, flags); 830 831 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00a, 832 "Waiting for tgt %p: list_empty(sess_list)=%d " 833 "sess_count=%d\n", tgt, list_empty(&tgt->sess_list), 834 tgt->sess_count); 835 836 wait_event(tgt->waitQ, test_tgt_sess_count(tgt)); 837 838 /* Big hammer */ 839 if (!ha->flags.host_shutting_down && qla_tgt_mode_enabled(vha)) 840 qlt_disable_vha(vha); 841 842 /* Wait for sessions to clear out (just in case) */ 843 wait_event(tgt->waitQ, test_tgt_sess_count(tgt)); 844 } 845 EXPORT_SYMBOL(qlt_stop_phase1); 846 847 /* Called by tcm_qla2xxx configfs code */ 848 void qlt_stop_phase2(struct qla_tgt *tgt) 849 { 850 struct qla_hw_data *ha = tgt->ha; 851 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); 852 unsigned long flags; 853 854 if (tgt->tgt_stopped) { 855 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04f, 856 "Already in tgt->tgt_stopped state\n"); 857 dump_stack(); 858 return; 859 } 860 861 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00b, 862 "Waiting for %d IRQ commands to complete (tgt %p)", 863 tgt->irq_cmd_count, tgt); 864 865 mutex_lock(&vha->vha_tgt.tgt_mutex); 866 spin_lock_irqsave(&ha->hardware_lock, flags); 867 while (tgt->irq_cmd_count != 0) { 868 spin_unlock_irqrestore(&ha->hardware_lock, flags); 869 udelay(2); 870 spin_lock_irqsave(&ha->hardware_lock, flags); 871 } 872 tgt->tgt_stop = 0; 873 tgt->tgt_stopped = 1; 874 spin_unlock_irqrestore(&ha->hardware_lock, flags); 875 mutex_unlock(&vha->vha_tgt.tgt_mutex); 876 877 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00c, "Stop of tgt %p finished", 878 tgt); 879 } 880 EXPORT_SYMBOL(qlt_stop_phase2); 881 882 /* Called from qlt_remove_target() -> qla2x00_remove_one() */ 883 static void qlt_release(struct qla_tgt *tgt) 884 { 885 scsi_qla_host_t *vha = tgt->vha; 886 887 if ((vha->vha_tgt.qla_tgt != NULL) && !tgt->tgt_stopped) 888 qlt_stop_phase2(tgt); 889 890 vha->vha_tgt.qla_tgt = NULL; 891 892 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00d, 893 "Release of tgt %p finished\n", tgt); 894 895 kfree(tgt); 896 } 897 898 /* ha->hardware_lock supposed to be held on entry */ 899 static int qlt_sched_sess_work(struct qla_tgt *tgt, int type, 900 const void *param, unsigned int param_size) 901 { 902 struct qla_tgt_sess_work_param *prm; 903 unsigned long flags; 904 905 prm = kzalloc(sizeof(*prm), GFP_ATOMIC); 906 if (!prm) { 907 ql_dbg(ql_dbg_tgt_mgt, tgt->vha, 0xf050, 908 "qla_target(%d): Unable to create session " 909 "work, command will be refused", 0); 910 return -ENOMEM; 911 } 912 913 ql_dbg(ql_dbg_tgt_mgt, tgt->vha, 0xf00e, 914 "Scheduling work (type %d, prm %p)" 915 " to find session for param %p (size %d, tgt %p)\n", 916 type, prm, param, param_size, tgt); 917 918 prm->type = type; 919 memcpy(&prm->tm_iocb, param, param_size); 920 921 spin_lock_irqsave(&tgt->sess_work_lock, flags); 922 list_add_tail(&prm->sess_works_list_entry, &tgt->sess_works_list); 923 spin_unlock_irqrestore(&tgt->sess_work_lock, flags); 924 925 schedule_work(&tgt->sess_work); 926 927 return 0; 928 } 929 930 /* 931 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire 932 */ 933 static void qlt_send_notify_ack(struct scsi_qla_host *vha, 934 struct imm_ntfy_from_isp *ntfy, 935 uint32_t add_flags, uint16_t resp_code, int resp_code_valid, 936 uint16_t srr_flags, uint16_t srr_reject_code, uint8_t srr_explan) 937 { 938 struct qla_hw_data *ha = vha->hw; 939 request_t *pkt; 940 struct nack_to_isp *nack; 941 942 ql_dbg(ql_dbg_tgt, vha, 0xe004, "Sending NOTIFY_ACK (ha=%p)\n", ha); 943 944 /* Send marker if required */ 945 if (qlt_issue_marker(vha, 1) != QLA_SUCCESS) 946 return; 947 948 pkt = (request_t *)qla2x00_alloc_iocbs(vha, NULL); 949 if (!pkt) { 950 ql_dbg(ql_dbg_tgt, vha, 0xe049, 951 "qla_target(%d): %s failed: unable to allocate " 952 "request packet\n", vha->vp_idx, __func__); 953 return; 954 } 955 956 if (vha->vha_tgt.qla_tgt != NULL) 957 vha->vha_tgt.qla_tgt->notify_ack_expected++; 958 959 pkt->entry_type = NOTIFY_ACK_TYPE; 960 pkt->entry_count = 1; 961 962 nack = (struct nack_to_isp *)pkt; 963 nack->ox_id = ntfy->ox_id; 964 965 nack->u.isp24.nport_handle = ntfy->u.isp24.nport_handle; 966 if (le16_to_cpu(ntfy->u.isp24.status) == IMM_NTFY_ELS) { 967 nack->u.isp24.flags = ntfy->u.isp24.flags & 968 __constant_cpu_to_le32(NOTIFY24XX_FLAGS_PUREX_IOCB); 969 } 970 nack->u.isp24.srr_rx_id = ntfy->u.isp24.srr_rx_id; 971 nack->u.isp24.status = ntfy->u.isp24.status; 972 nack->u.isp24.status_subcode = ntfy->u.isp24.status_subcode; 973 nack->u.isp24.fw_handle = ntfy->u.isp24.fw_handle; 974 nack->u.isp24.exchange_address = ntfy->u.isp24.exchange_address; 975 nack->u.isp24.srr_rel_offs = ntfy->u.isp24.srr_rel_offs; 976 nack->u.isp24.srr_ui = ntfy->u.isp24.srr_ui; 977 nack->u.isp24.srr_flags = cpu_to_le16(srr_flags); 978 nack->u.isp24.srr_reject_code = srr_reject_code; 979 nack->u.isp24.srr_reject_code_expl = srr_explan; 980 nack->u.isp24.vp_index = ntfy->u.isp24.vp_index; 981 982 ql_dbg(ql_dbg_tgt, vha, 0xe005, 983 "qla_target(%d): Sending 24xx Notify Ack %d\n", 984 vha->vp_idx, nack->u.isp24.status); 985 986 qla2x00_start_iocbs(vha, vha->req); 987 } 988 989 /* 990 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire 991 */ 992 static void qlt_24xx_send_abts_resp(struct scsi_qla_host *vha, 993 struct abts_recv_from_24xx *abts, uint32_t status, 994 bool ids_reversed) 995 { 996 struct qla_hw_data *ha = vha->hw; 997 struct abts_resp_to_24xx *resp; 998 uint32_t f_ctl; 999 uint8_t *p; 1000 1001 ql_dbg(ql_dbg_tgt, vha, 0xe006, 1002 "Sending task mgmt ABTS response (ha=%p, atio=%p, status=%x\n", 1003 ha, abts, status); 1004 1005 /* Send marker if required */ 1006 if (qlt_issue_marker(vha, 1) != QLA_SUCCESS) 1007 return; 1008 1009 resp = (struct abts_resp_to_24xx *)qla2x00_alloc_iocbs(vha, NULL); 1010 if (!resp) { 1011 ql_dbg(ql_dbg_tgt, vha, 0xe04a, 1012 "qla_target(%d): %s failed: unable to allocate " 1013 "request packet", vha->vp_idx, __func__); 1014 return; 1015 } 1016 1017 resp->entry_type = ABTS_RESP_24XX; 1018 resp->entry_count = 1; 1019 resp->nport_handle = abts->nport_handle; 1020 resp->vp_index = vha->vp_idx; 1021 resp->sof_type = abts->sof_type; 1022 resp->exchange_address = abts->exchange_address; 1023 resp->fcp_hdr_le = abts->fcp_hdr_le; 1024 f_ctl = __constant_cpu_to_le32(F_CTL_EXCH_CONTEXT_RESP | 1025 F_CTL_LAST_SEQ | F_CTL_END_SEQ | 1026 F_CTL_SEQ_INITIATIVE); 1027 p = (uint8_t *)&f_ctl; 1028 resp->fcp_hdr_le.f_ctl[0] = *p++; 1029 resp->fcp_hdr_le.f_ctl[1] = *p++; 1030 resp->fcp_hdr_le.f_ctl[2] = *p; 1031 if (ids_reversed) { 1032 resp->fcp_hdr_le.d_id[0] = abts->fcp_hdr_le.d_id[0]; 1033 resp->fcp_hdr_le.d_id[1] = abts->fcp_hdr_le.d_id[1]; 1034 resp->fcp_hdr_le.d_id[2] = abts->fcp_hdr_le.d_id[2]; 1035 resp->fcp_hdr_le.s_id[0] = abts->fcp_hdr_le.s_id[0]; 1036 resp->fcp_hdr_le.s_id[1] = abts->fcp_hdr_le.s_id[1]; 1037 resp->fcp_hdr_le.s_id[2] = abts->fcp_hdr_le.s_id[2]; 1038 } else { 1039 resp->fcp_hdr_le.d_id[0] = abts->fcp_hdr_le.s_id[0]; 1040 resp->fcp_hdr_le.d_id[1] = abts->fcp_hdr_le.s_id[1]; 1041 resp->fcp_hdr_le.d_id[2] = abts->fcp_hdr_le.s_id[2]; 1042 resp->fcp_hdr_le.s_id[0] = abts->fcp_hdr_le.d_id[0]; 1043 resp->fcp_hdr_le.s_id[1] = abts->fcp_hdr_le.d_id[1]; 1044 resp->fcp_hdr_le.s_id[2] = abts->fcp_hdr_le.d_id[2]; 1045 } 1046 resp->exchange_addr_to_abort = abts->exchange_addr_to_abort; 1047 if (status == FCP_TMF_CMPL) { 1048 resp->fcp_hdr_le.r_ctl = R_CTL_BASIC_LINK_SERV | R_CTL_B_ACC; 1049 resp->payload.ba_acct.seq_id_valid = SEQ_ID_INVALID; 1050 resp->payload.ba_acct.low_seq_cnt = 0x0000; 1051 resp->payload.ba_acct.high_seq_cnt = 0xFFFF; 1052 resp->payload.ba_acct.ox_id = abts->fcp_hdr_le.ox_id; 1053 resp->payload.ba_acct.rx_id = abts->fcp_hdr_le.rx_id; 1054 } else { 1055 resp->fcp_hdr_le.r_ctl = R_CTL_BASIC_LINK_SERV | R_CTL_B_RJT; 1056 resp->payload.ba_rjt.reason_code = 1057 BA_RJT_REASON_CODE_UNABLE_TO_PERFORM; 1058 /* Other bytes are zero */ 1059 } 1060 1061 vha->vha_tgt.qla_tgt->abts_resp_expected++; 1062 1063 qla2x00_start_iocbs(vha, vha->req); 1064 } 1065 1066 /* 1067 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire 1068 */ 1069 static void qlt_24xx_retry_term_exchange(struct scsi_qla_host *vha, 1070 struct abts_resp_from_24xx_fw *entry) 1071 { 1072 struct ctio7_to_24xx *ctio; 1073 1074 ql_dbg(ql_dbg_tgt, vha, 0xe007, 1075 "Sending retry TERM EXCH CTIO7 (ha=%p)\n", vha->hw); 1076 /* Send marker if required */ 1077 if (qlt_issue_marker(vha, 1) != QLA_SUCCESS) 1078 return; 1079 1080 ctio = (struct ctio7_to_24xx *)qla2x00_alloc_iocbs(vha, NULL); 1081 if (ctio == NULL) { 1082 ql_dbg(ql_dbg_tgt, vha, 0xe04b, 1083 "qla_target(%d): %s failed: unable to allocate " 1084 "request packet\n", vha->vp_idx, __func__); 1085 return; 1086 } 1087 1088 /* 1089 * We've got on entrance firmware's response on by us generated 1090 * ABTS response. So, in it ID fields are reversed. 1091 */ 1092 1093 ctio->entry_type = CTIO_TYPE7; 1094 ctio->entry_count = 1; 1095 ctio->nport_handle = entry->nport_handle; 1096 ctio->handle = QLA_TGT_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK; 1097 ctio->timeout = __constant_cpu_to_le16(QLA_TGT_TIMEOUT); 1098 ctio->vp_index = vha->vp_idx; 1099 ctio->initiator_id[0] = entry->fcp_hdr_le.d_id[0]; 1100 ctio->initiator_id[1] = entry->fcp_hdr_le.d_id[1]; 1101 ctio->initiator_id[2] = entry->fcp_hdr_le.d_id[2]; 1102 ctio->exchange_addr = entry->exchange_addr_to_abort; 1103 ctio->u.status1.flags = 1104 __constant_cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1 | 1105 CTIO7_FLAGS_TERMINATE); 1106 ctio->u.status1.ox_id = entry->fcp_hdr_le.ox_id; 1107 1108 qla2x00_start_iocbs(vha, vha->req); 1109 1110 qlt_24xx_send_abts_resp(vha, (struct abts_recv_from_24xx *)entry, 1111 FCP_TMF_CMPL, true); 1112 } 1113 1114 /* ha->hardware_lock supposed to be held on entry */ 1115 static int __qlt_24xx_handle_abts(struct scsi_qla_host *vha, 1116 struct abts_recv_from_24xx *abts, struct qla_tgt_sess *sess) 1117 { 1118 struct qla_hw_data *ha = vha->hw; 1119 struct se_session *se_sess = sess->se_sess; 1120 struct qla_tgt_mgmt_cmd *mcmd; 1121 struct se_cmd *se_cmd; 1122 u32 lun = 0; 1123 int rc; 1124 bool found_lun = false; 1125 1126 spin_lock(&se_sess->sess_cmd_lock); 1127 list_for_each_entry(se_cmd, &se_sess->sess_cmd_list, se_cmd_list) { 1128 struct qla_tgt_cmd *cmd = 1129 container_of(se_cmd, struct qla_tgt_cmd, se_cmd); 1130 if (cmd->tag == abts->exchange_addr_to_abort) { 1131 lun = cmd->unpacked_lun; 1132 found_lun = true; 1133 break; 1134 } 1135 } 1136 spin_unlock(&se_sess->sess_cmd_lock); 1137 1138 if (!found_lun) 1139 return -ENOENT; 1140 1141 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00f, 1142 "qla_target(%d): task abort (tag=%d)\n", 1143 vha->vp_idx, abts->exchange_addr_to_abort); 1144 1145 mcmd = mempool_alloc(qla_tgt_mgmt_cmd_mempool, GFP_ATOMIC); 1146 if (mcmd == NULL) { 1147 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf051, 1148 "qla_target(%d): %s: Allocation of ABORT cmd failed", 1149 vha->vp_idx, __func__); 1150 return -ENOMEM; 1151 } 1152 memset(mcmd, 0, sizeof(*mcmd)); 1153 1154 mcmd->sess = sess; 1155 memcpy(&mcmd->orig_iocb.abts, abts, sizeof(mcmd->orig_iocb.abts)); 1156 1157 rc = ha->tgt.tgt_ops->handle_tmr(mcmd, lun, TMR_ABORT_TASK, 1158 abts->exchange_addr_to_abort); 1159 if (rc != 0) { 1160 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf052, 1161 "qla_target(%d): tgt_ops->handle_tmr()" 1162 " failed: %d", vha->vp_idx, rc); 1163 mempool_free(mcmd, qla_tgt_mgmt_cmd_mempool); 1164 return -EFAULT; 1165 } 1166 1167 return 0; 1168 } 1169 1170 /* 1171 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire 1172 */ 1173 static void qlt_24xx_handle_abts(struct scsi_qla_host *vha, 1174 struct abts_recv_from_24xx *abts) 1175 { 1176 struct qla_hw_data *ha = vha->hw; 1177 struct qla_tgt_sess *sess; 1178 uint32_t tag = abts->exchange_addr_to_abort; 1179 uint8_t s_id[3]; 1180 int rc; 1181 1182 if (le32_to_cpu(abts->fcp_hdr_le.parameter) & ABTS_PARAM_ABORT_SEQ) { 1183 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf053, 1184 "qla_target(%d): ABTS: Abort Sequence not " 1185 "supported\n", vha->vp_idx); 1186 qlt_24xx_send_abts_resp(vha, abts, FCP_TMF_REJECTED, false); 1187 return; 1188 } 1189 1190 if (tag == ATIO_EXCHANGE_ADDRESS_UNKNOWN) { 1191 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf010, 1192 "qla_target(%d): ABTS: Unknown Exchange " 1193 "Address received\n", vha->vp_idx); 1194 qlt_24xx_send_abts_resp(vha, abts, FCP_TMF_REJECTED, false); 1195 return; 1196 } 1197 1198 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf011, 1199 "qla_target(%d): task abort (s_id=%x:%x:%x, " 1200 "tag=%d, param=%x)\n", vha->vp_idx, abts->fcp_hdr_le.s_id[2], 1201 abts->fcp_hdr_le.s_id[1], abts->fcp_hdr_le.s_id[0], tag, 1202 le32_to_cpu(abts->fcp_hdr_le.parameter)); 1203 1204 s_id[0] = abts->fcp_hdr_le.s_id[2]; 1205 s_id[1] = abts->fcp_hdr_le.s_id[1]; 1206 s_id[2] = abts->fcp_hdr_le.s_id[0]; 1207 1208 sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, s_id); 1209 if (!sess) { 1210 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf012, 1211 "qla_target(%d): task abort for non-existant session\n", 1212 vha->vp_idx); 1213 rc = qlt_sched_sess_work(vha->vha_tgt.qla_tgt, 1214 QLA_TGT_SESS_WORK_ABORT, abts, sizeof(*abts)); 1215 if (rc != 0) { 1216 qlt_24xx_send_abts_resp(vha, abts, FCP_TMF_REJECTED, 1217 false); 1218 } 1219 return; 1220 } 1221 1222 rc = __qlt_24xx_handle_abts(vha, abts, sess); 1223 if (rc != 0) { 1224 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf054, 1225 "qla_target(%d): __qlt_24xx_handle_abts() failed: %d\n", 1226 vha->vp_idx, rc); 1227 qlt_24xx_send_abts_resp(vha, abts, FCP_TMF_REJECTED, false); 1228 return; 1229 } 1230 } 1231 1232 /* 1233 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire 1234 */ 1235 static void qlt_24xx_send_task_mgmt_ctio(struct scsi_qla_host *ha, 1236 struct qla_tgt_mgmt_cmd *mcmd, uint32_t resp_code) 1237 { 1238 struct atio_from_isp *atio = &mcmd->orig_iocb.atio; 1239 struct ctio7_to_24xx *ctio; 1240 1241 ql_dbg(ql_dbg_tgt, ha, 0xe008, 1242 "Sending task mgmt CTIO7 (ha=%p, atio=%p, resp_code=%x\n", 1243 ha, atio, resp_code); 1244 1245 /* Send marker if required */ 1246 if (qlt_issue_marker(ha, 1) != QLA_SUCCESS) 1247 return; 1248 1249 ctio = (struct ctio7_to_24xx *)qla2x00_alloc_iocbs(ha, NULL); 1250 if (ctio == NULL) { 1251 ql_dbg(ql_dbg_tgt, ha, 0xe04c, 1252 "qla_target(%d): %s failed: unable to allocate " 1253 "request packet\n", ha->vp_idx, __func__); 1254 return; 1255 } 1256 1257 ctio->entry_type = CTIO_TYPE7; 1258 ctio->entry_count = 1; 1259 ctio->handle = QLA_TGT_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK; 1260 ctio->nport_handle = mcmd->sess->loop_id; 1261 ctio->timeout = __constant_cpu_to_le16(QLA_TGT_TIMEOUT); 1262 ctio->vp_index = ha->vp_idx; 1263 ctio->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2]; 1264 ctio->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1]; 1265 ctio->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0]; 1266 ctio->exchange_addr = atio->u.isp24.exchange_addr; 1267 ctio->u.status1.flags = (atio->u.isp24.attr << 9) | 1268 __constant_cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1 | 1269 CTIO7_FLAGS_SEND_STATUS); 1270 ctio->u.status1.ox_id = swab16(atio->u.isp24.fcp_hdr.ox_id); 1271 ctio->u.status1.scsi_status = 1272 __constant_cpu_to_le16(SS_RESPONSE_INFO_LEN_VALID); 1273 ctio->u.status1.response_len = __constant_cpu_to_le16(8); 1274 ctio->u.status1.sense_data[0] = resp_code; 1275 1276 qla2x00_start_iocbs(ha, ha->req); 1277 } 1278 1279 void qlt_free_mcmd(struct qla_tgt_mgmt_cmd *mcmd) 1280 { 1281 mempool_free(mcmd, qla_tgt_mgmt_cmd_mempool); 1282 } 1283 EXPORT_SYMBOL(qlt_free_mcmd); 1284 1285 /* callback from target fabric module code */ 1286 void qlt_xmit_tm_rsp(struct qla_tgt_mgmt_cmd *mcmd) 1287 { 1288 struct scsi_qla_host *vha = mcmd->sess->vha; 1289 struct qla_hw_data *ha = vha->hw; 1290 unsigned long flags; 1291 1292 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf013, 1293 "TM response mcmd (%p) status %#x state %#x", 1294 mcmd, mcmd->fc_tm_rsp, mcmd->flags); 1295 1296 spin_lock_irqsave(&ha->hardware_lock, flags); 1297 if (mcmd->flags == QLA24XX_MGMT_SEND_NACK) 1298 qlt_send_notify_ack(vha, &mcmd->orig_iocb.imm_ntfy, 1299 0, 0, 0, 0, 0, 0); 1300 else { 1301 if (mcmd->se_cmd.se_tmr_req->function == TMR_ABORT_TASK) 1302 qlt_24xx_send_abts_resp(vha, &mcmd->orig_iocb.abts, 1303 mcmd->fc_tm_rsp, false); 1304 else 1305 qlt_24xx_send_task_mgmt_ctio(vha, mcmd, 1306 mcmd->fc_tm_rsp); 1307 } 1308 /* 1309 * Make the callback for ->free_mcmd() to queue_work() and invoke 1310 * target_put_sess_cmd() to drop cmd_kref to 1. The final 1311 * target_put_sess_cmd() call will be made from TFO->check_stop_free() 1312 * -> tcm_qla2xxx_check_stop_free() to release the TMR associated se_cmd 1313 * descriptor after TFO->queue_tm_rsp() -> tcm_qla2xxx_queue_tm_rsp() -> 1314 * qlt_xmit_tm_rsp() returns here.. 1315 */ 1316 ha->tgt.tgt_ops->free_mcmd(mcmd); 1317 spin_unlock_irqrestore(&ha->hardware_lock, flags); 1318 } 1319 EXPORT_SYMBOL(qlt_xmit_tm_rsp); 1320 1321 /* No locks */ 1322 static int qlt_pci_map_calc_cnt(struct qla_tgt_prm *prm) 1323 { 1324 struct qla_tgt_cmd *cmd = prm->cmd; 1325 1326 BUG_ON(cmd->sg_cnt == 0); 1327 1328 prm->sg = (struct scatterlist *)cmd->sg; 1329 prm->seg_cnt = pci_map_sg(prm->tgt->ha->pdev, cmd->sg, 1330 cmd->sg_cnt, cmd->dma_data_direction); 1331 if (unlikely(prm->seg_cnt == 0)) 1332 goto out_err; 1333 1334 prm->cmd->sg_mapped = 1; 1335 1336 /* 1337 * If greater than four sg entries then we need to allocate 1338 * the continuation entries 1339 */ 1340 if (prm->seg_cnt > prm->tgt->datasegs_per_cmd) 1341 prm->req_cnt += DIV_ROUND_UP(prm->seg_cnt - 1342 prm->tgt->datasegs_per_cmd, prm->tgt->datasegs_per_cont); 1343 1344 ql_dbg(ql_dbg_tgt, prm->cmd->vha, 0xe009, "seg_cnt=%d, req_cnt=%d\n", 1345 prm->seg_cnt, prm->req_cnt); 1346 return 0; 1347 1348 out_err: 1349 ql_dbg(ql_dbg_tgt, prm->cmd->vha, 0xe04d, 1350 "qla_target(%d): PCI mapping failed: sg_cnt=%d", 1351 0, prm->cmd->sg_cnt); 1352 return -1; 1353 } 1354 1355 static inline void qlt_unmap_sg(struct scsi_qla_host *vha, 1356 struct qla_tgt_cmd *cmd) 1357 { 1358 struct qla_hw_data *ha = vha->hw; 1359 1360 BUG_ON(!cmd->sg_mapped); 1361 pci_unmap_sg(ha->pdev, cmd->sg, cmd->sg_cnt, cmd->dma_data_direction); 1362 cmd->sg_mapped = 0; 1363 } 1364 1365 static int qlt_check_reserve_free_req(struct scsi_qla_host *vha, 1366 uint32_t req_cnt) 1367 { 1368 struct qla_hw_data *ha = vha->hw; 1369 device_reg_t __iomem *reg = ha->iobase; 1370 uint32_t cnt; 1371 1372 if (vha->req->cnt < (req_cnt + 2)) { 1373 cnt = (uint16_t)RD_REG_DWORD(®->isp24.req_q_out); 1374 1375 ql_dbg(ql_dbg_tgt, vha, 0xe00a, 1376 "Request ring circled: cnt=%d, vha->->ring_index=%d, " 1377 "vha->req->cnt=%d, req_cnt=%d\n", cnt, 1378 vha->req->ring_index, vha->req->cnt, req_cnt); 1379 if (vha->req->ring_index < cnt) 1380 vha->req->cnt = cnt - vha->req->ring_index; 1381 else 1382 vha->req->cnt = vha->req->length - 1383 (vha->req->ring_index - cnt); 1384 } 1385 1386 if (unlikely(vha->req->cnt < (req_cnt + 2))) { 1387 ql_dbg(ql_dbg_tgt, vha, 0xe00b, 1388 "qla_target(%d): There is no room in the " 1389 "request ring: vha->req->ring_index=%d, vha->req->cnt=%d, " 1390 "req_cnt=%d\n", vha->vp_idx, vha->req->ring_index, 1391 vha->req->cnt, req_cnt); 1392 return -EAGAIN; 1393 } 1394 vha->req->cnt -= req_cnt; 1395 1396 return 0; 1397 } 1398 1399 /* 1400 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire 1401 */ 1402 static inline void *qlt_get_req_pkt(struct scsi_qla_host *vha) 1403 { 1404 /* Adjust ring index. */ 1405 vha->req->ring_index++; 1406 if (vha->req->ring_index == vha->req->length) { 1407 vha->req->ring_index = 0; 1408 vha->req->ring_ptr = vha->req->ring; 1409 } else { 1410 vha->req->ring_ptr++; 1411 } 1412 return (cont_entry_t *)vha->req->ring_ptr; 1413 } 1414 1415 /* ha->hardware_lock supposed to be held on entry */ 1416 static inline uint32_t qlt_make_handle(struct scsi_qla_host *vha) 1417 { 1418 struct qla_hw_data *ha = vha->hw; 1419 uint32_t h; 1420 1421 h = ha->tgt.current_handle; 1422 /* always increment cmd handle */ 1423 do { 1424 ++h; 1425 if (h > DEFAULT_OUTSTANDING_COMMANDS) 1426 h = 1; /* 0 is QLA_TGT_NULL_HANDLE */ 1427 if (h == ha->tgt.current_handle) { 1428 ql_dbg(ql_dbg_tgt, vha, 0xe04e, 1429 "qla_target(%d): Ran out of " 1430 "empty cmd slots in ha %p\n", vha->vp_idx, ha); 1431 h = QLA_TGT_NULL_HANDLE; 1432 break; 1433 } 1434 } while ((h == QLA_TGT_NULL_HANDLE) || 1435 (h == QLA_TGT_SKIP_HANDLE) || 1436 (ha->tgt.cmds[h-1] != NULL)); 1437 1438 if (h != QLA_TGT_NULL_HANDLE) 1439 ha->tgt.current_handle = h; 1440 1441 return h; 1442 } 1443 1444 /* ha->hardware_lock supposed to be held on entry */ 1445 static int qlt_24xx_build_ctio_pkt(struct qla_tgt_prm *prm, 1446 struct scsi_qla_host *vha) 1447 { 1448 uint32_t h; 1449 struct ctio7_to_24xx *pkt; 1450 struct qla_hw_data *ha = vha->hw; 1451 struct atio_from_isp *atio = &prm->cmd->atio; 1452 1453 pkt = (struct ctio7_to_24xx *)vha->req->ring_ptr; 1454 prm->pkt = pkt; 1455 memset(pkt, 0, sizeof(*pkt)); 1456 1457 pkt->entry_type = CTIO_TYPE7; 1458 pkt->entry_count = (uint8_t)prm->req_cnt; 1459 pkt->vp_index = vha->vp_idx; 1460 1461 h = qlt_make_handle(vha); 1462 if (unlikely(h == QLA_TGT_NULL_HANDLE)) { 1463 /* 1464 * CTIO type 7 from the firmware doesn't provide a way to 1465 * know the initiator's LOOP ID, hence we can't find 1466 * the session and, so, the command. 1467 */ 1468 return -EAGAIN; 1469 } else 1470 ha->tgt.cmds[h-1] = prm->cmd; 1471 1472 pkt->handle = h | CTIO_COMPLETION_HANDLE_MARK; 1473 pkt->nport_handle = prm->cmd->loop_id; 1474 pkt->timeout = __constant_cpu_to_le16(QLA_TGT_TIMEOUT); 1475 pkt->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2]; 1476 pkt->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1]; 1477 pkt->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0]; 1478 pkt->exchange_addr = atio->u.isp24.exchange_addr; 1479 pkt->u.status0.flags |= (atio->u.isp24.attr << 9); 1480 pkt->u.status0.ox_id = swab16(atio->u.isp24.fcp_hdr.ox_id); 1481 pkt->u.status0.relative_offset = cpu_to_le32(prm->cmd->offset); 1482 1483 ql_dbg(ql_dbg_tgt, vha, 0xe00c, 1484 "qla_target(%d): handle(cmd) -> %08x, timeout %d, ox_id %#x\n", 1485 vha->vp_idx, pkt->handle, QLA_TGT_TIMEOUT, 1486 le16_to_cpu(pkt->u.status0.ox_id)); 1487 return 0; 1488 } 1489 1490 /* 1491 * ha->hardware_lock supposed to be held on entry. We have already made sure 1492 * that there is sufficient amount of request entries to not drop it. 1493 */ 1494 static void qlt_load_cont_data_segments(struct qla_tgt_prm *prm, 1495 struct scsi_qla_host *vha) 1496 { 1497 int cnt; 1498 uint32_t *dword_ptr; 1499 int enable_64bit_addressing = prm->tgt->tgt_enable_64bit_addr; 1500 1501 /* Build continuation packets */ 1502 while (prm->seg_cnt > 0) { 1503 cont_a64_entry_t *cont_pkt64 = 1504 (cont_a64_entry_t *)qlt_get_req_pkt(vha); 1505 1506 /* 1507 * Make sure that from cont_pkt64 none of 1508 * 64-bit specific fields used for 32-bit 1509 * addressing. Cast to (cont_entry_t *) for 1510 * that. 1511 */ 1512 1513 memset(cont_pkt64, 0, sizeof(*cont_pkt64)); 1514 1515 cont_pkt64->entry_count = 1; 1516 cont_pkt64->sys_define = 0; 1517 1518 if (enable_64bit_addressing) { 1519 cont_pkt64->entry_type = CONTINUE_A64_TYPE; 1520 dword_ptr = 1521 (uint32_t *)&cont_pkt64->dseg_0_address; 1522 } else { 1523 cont_pkt64->entry_type = CONTINUE_TYPE; 1524 dword_ptr = 1525 (uint32_t *)&((cont_entry_t *) 1526 cont_pkt64)->dseg_0_address; 1527 } 1528 1529 /* Load continuation entry data segments */ 1530 for (cnt = 0; 1531 cnt < prm->tgt->datasegs_per_cont && prm->seg_cnt; 1532 cnt++, prm->seg_cnt--) { 1533 *dword_ptr++ = 1534 cpu_to_le32(pci_dma_lo32 1535 (sg_dma_address(prm->sg))); 1536 if (enable_64bit_addressing) { 1537 *dword_ptr++ = 1538 cpu_to_le32(pci_dma_hi32 1539 (sg_dma_address 1540 (prm->sg))); 1541 } 1542 *dword_ptr++ = cpu_to_le32(sg_dma_len(prm->sg)); 1543 1544 ql_dbg(ql_dbg_tgt, vha, 0xe00d, 1545 "S/G Segment Cont. phys_addr=%llx:%llx, len=%d\n", 1546 (long long unsigned int) 1547 pci_dma_hi32(sg_dma_address(prm->sg)), 1548 (long long unsigned int) 1549 pci_dma_lo32(sg_dma_address(prm->sg)), 1550 (int)sg_dma_len(prm->sg)); 1551 1552 prm->sg = sg_next(prm->sg); 1553 } 1554 } 1555 } 1556 1557 /* 1558 * ha->hardware_lock supposed to be held on entry. We have already made sure 1559 * that there is sufficient amount of request entries to not drop it. 1560 */ 1561 static void qlt_load_data_segments(struct qla_tgt_prm *prm, 1562 struct scsi_qla_host *vha) 1563 { 1564 int cnt; 1565 uint32_t *dword_ptr; 1566 int enable_64bit_addressing = prm->tgt->tgt_enable_64bit_addr; 1567 struct ctio7_to_24xx *pkt24 = (struct ctio7_to_24xx *)prm->pkt; 1568 1569 ql_dbg(ql_dbg_tgt, vha, 0xe00e, 1570 "iocb->scsi_status=%x, iocb->flags=%x\n", 1571 le16_to_cpu(pkt24->u.status0.scsi_status), 1572 le16_to_cpu(pkt24->u.status0.flags)); 1573 1574 pkt24->u.status0.transfer_length = cpu_to_le32(prm->cmd->bufflen); 1575 1576 /* Setup packet address segment pointer */ 1577 dword_ptr = pkt24->u.status0.dseg_0_address; 1578 1579 /* Set total data segment count */ 1580 if (prm->seg_cnt) 1581 pkt24->dseg_count = cpu_to_le16(prm->seg_cnt); 1582 1583 if (prm->seg_cnt == 0) { 1584 /* No data transfer */ 1585 *dword_ptr++ = 0; 1586 *dword_ptr = 0; 1587 return; 1588 } 1589 1590 /* If scatter gather */ 1591 ql_dbg(ql_dbg_tgt, vha, 0xe00f, "%s", "Building S/G data segments..."); 1592 1593 /* Load command entry data segments */ 1594 for (cnt = 0; 1595 (cnt < prm->tgt->datasegs_per_cmd) && prm->seg_cnt; 1596 cnt++, prm->seg_cnt--) { 1597 *dword_ptr++ = 1598 cpu_to_le32(pci_dma_lo32(sg_dma_address(prm->sg))); 1599 if (enable_64bit_addressing) { 1600 *dword_ptr++ = 1601 cpu_to_le32(pci_dma_hi32( 1602 sg_dma_address(prm->sg))); 1603 } 1604 *dword_ptr++ = cpu_to_le32(sg_dma_len(prm->sg)); 1605 1606 ql_dbg(ql_dbg_tgt, vha, 0xe010, 1607 "S/G Segment phys_addr=%llx:%llx, len=%d\n", 1608 (long long unsigned int)pci_dma_hi32(sg_dma_address( 1609 prm->sg)), 1610 (long long unsigned int)pci_dma_lo32(sg_dma_address( 1611 prm->sg)), 1612 (int)sg_dma_len(prm->sg)); 1613 1614 prm->sg = sg_next(prm->sg); 1615 } 1616 1617 qlt_load_cont_data_segments(prm, vha); 1618 } 1619 1620 static inline int qlt_has_data(struct qla_tgt_cmd *cmd) 1621 { 1622 return cmd->bufflen > 0; 1623 } 1624 1625 /* 1626 * Called without ha->hardware_lock held 1627 */ 1628 static int qlt_pre_xmit_response(struct qla_tgt_cmd *cmd, 1629 struct qla_tgt_prm *prm, int xmit_type, uint8_t scsi_status, 1630 uint32_t *full_req_cnt) 1631 { 1632 struct qla_tgt *tgt = cmd->tgt; 1633 struct scsi_qla_host *vha = tgt->vha; 1634 struct qla_hw_data *ha = vha->hw; 1635 struct se_cmd *se_cmd = &cmd->se_cmd; 1636 1637 if (unlikely(cmd->aborted)) { 1638 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf014, 1639 "qla_target(%d): terminating exchange " 1640 "for aborted cmd=%p (se_cmd=%p, tag=%d)", vha->vp_idx, cmd, 1641 se_cmd, cmd->tag); 1642 1643 cmd->state = QLA_TGT_STATE_ABORTED; 1644 1645 qlt_send_term_exchange(vha, cmd, &cmd->atio, 0); 1646 1647 /* !! At this point cmd could be already freed !! */ 1648 return QLA_TGT_PRE_XMIT_RESP_CMD_ABORTED; 1649 } 1650 1651 ql_dbg(ql_dbg_tgt, vha, 0xe011, "qla_target(%d): tag=%u\n", 1652 vha->vp_idx, cmd->tag); 1653 1654 prm->cmd = cmd; 1655 prm->tgt = tgt; 1656 prm->rq_result = scsi_status; 1657 prm->sense_buffer = &cmd->sense_buffer[0]; 1658 prm->sense_buffer_len = TRANSPORT_SENSE_BUFFER; 1659 prm->sg = NULL; 1660 prm->seg_cnt = -1; 1661 prm->req_cnt = 1; 1662 prm->add_status_pkt = 0; 1663 1664 ql_dbg(ql_dbg_tgt, vha, 0xe012, "rq_result=%x, xmit_type=%x\n", 1665 prm->rq_result, xmit_type); 1666 1667 /* Send marker if required */ 1668 if (qlt_issue_marker(vha, 0) != QLA_SUCCESS) 1669 return -EFAULT; 1670 1671 ql_dbg(ql_dbg_tgt, vha, 0xe013, "CTIO start: vha(%d)\n", vha->vp_idx); 1672 1673 if ((xmit_type & QLA_TGT_XMIT_DATA) && qlt_has_data(cmd)) { 1674 if (qlt_pci_map_calc_cnt(prm) != 0) 1675 return -EAGAIN; 1676 } 1677 1678 *full_req_cnt = prm->req_cnt; 1679 1680 if (se_cmd->se_cmd_flags & SCF_UNDERFLOW_BIT) { 1681 prm->residual = se_cmd->residual_count; 1682 ql_dbg(ql_dbg_tgt, vha, 0xe014, 1683 "Residual underflow: %d (tag %d, " 1684 "op %x, bufflen %d, rq_result %x)\n", prm->residual, 1685 cmd->tag, se_cmd->t_task_cdb ? se_cmd->t_task_cdb[0] : 0, 1686 cmd->bufflen, prm->rq_result); 1687 prm->rq_result |= SS_RESIDUAL_UNDER; 1688 } else if (se_cmd->se_cmd_flags & SCF_OVERFLOW_BIT) { 1689 prm->residual = se_cmd->residual_count; 1690 ql_dbg(ql_dbg_tgt, vha, 0xe015, 1691 "Residual overflow: %d (tag %d, " 1692 "op %x, bufflen %d, rq_result %x)\n", prm->residual, 1693 cmd->tag, se_cmd->t_task_cdb ? se_cmd->t_task_cdb[0] : 0, 1694 cmd->bufflen, prm->rq_result); 1695 prm->rq_result |= SS_RESIDUAL_OVER; 1696 } 1697 1698 if (xmit_type & QLA_TGT_XMIT_STATUS) { 1699 /* 1700 * If QLA_TGT_XMIT_DATA is not set, add_status_pkt will be 1701 * ignored in *xmit_response() below 1702 */ 1703 if (qlt_has_data(cmd)) { 1704 if (QLA_TGT_SENSE_VALID(prm->sense_buffer) || 1705 (IS_FWI2_CAPABLE(ha) && 1706 (prm->rq_result != 0))) { 1707 prm->add_status_pkt = 1; 1708 (*full_req_cnt)++; 1709 } 1710 } 1711 } 1712 1713 ql_dbg(ql_dbg_tgt, vha, 0xe016, 1714 "req_cnt=%d, full_req_cnt=%d, add_status_pkt=%d\n", 1715 prm->req_cnt, *full_req_cnt, prm->add_status_pkt); 1716 1717 return 0; 1718 } 1719 1720 static inline int qlt_need_explicit_conf(struct qla_hw_data *ha, 1721 struct qla_tgt_cmd *cmd, int sending_sense) 1722 { 1723 if (ha->tgt.enable_class_2) 1724 return 0; 1725 1726 if (sending_sense) 1727 return cmd->conf_compl_supported; 1728 else 1729 return ha->tgt.enable_explicit_conf && 1730 cmd->conf_compl_supported; 1731 } 1732 1733 #ifdef CONFIG_QLA_TGT_DEBUG_SRR 1734 /* 1735 * Original taken from the XFS code 1736 */ 1737 static unsigned long qlt_srr_random(void) 1738 { 1739 static int Inited; 1740 static unsigned long RandomValue; 1741 static DEFINE_SPINLOCK(lock); 1742 /* cycles pseudo-randomly through all values between 1 and 2^31 - 2 */ 1743 register long rv; 1744 register long lo; 1745 register long hi; 1746 unsigned long flags; 1747 1748 spin_lock_irqsave(&lock, flags); 1749 if (!Inited) { 1750 RandomValue = jiffies; 1751 Inited = 1; 1752 } 1753 rv = RandomValue; 1754 hi = rv / 127773; 1755 lo = rv % 127773; 1756 rv = 16807 * lo - 2836 * hi; 1757 if (rv <= 0) 1758 rv += 2147483647; 1759 RandomValue = rv; 1760 spin_unlock_irqrestore(&lock, flags); 1761 return rv; 1762 } 1763 1764 static void qlt_check_srr_debug(struct qla_tgt_cmd *cmd, int *xmit_type) 1765 { 1766 #if 0 /* This is not a real status packets lost, so it won't lead to SRR */ 1767 if ((*xmit_type & QLA_TGT_XMIT_STATUS) && (qlt_srr_random() % 200) 1768 == 50) { 1769 *xmit_type &= ~QLA_TGT_XMIT_STATUS; 1770 ql_dbg(ql_dbg_tgt_mgt, cmd->vha, 0xf015, 1771 "Dropping cmd %p (tag %d) status", cmd, cmd->tag); 1772 } 1773 #endif 1774 /* 1775 * It's currently not possible to simulate SRRs for FCP_WRITE without 1776 * a physical link layer failure, so don't even try here.. 1777 */ 1778 if (cmd->dma_data_direction != DMA_FROM_DEVICE) 1779 return; 1780 1781 if (qlt_has_data(cmd) && (cmd->sg_cnt > 1) && 1782 ((qlt_srr_random() % 100) == 20)) { 1783 int i, leave = 0; 1784 unsigned int tot_len = 0; 1785 1786 while (leave == 0) 1787 leave = qlt_srr_random() % cmd->sg_cnt; 1788 1789 for (i = 0; i < leave; i++) 1790 tot_len += cmd->sg[i].length; 1791 1792 ql_dbg(ql_dbg_tgt_mgt, cmd->vha, 0xf016, 1793 "Cutting cmd %p (tag %d) buffer" 1794 " tail to len %d, sg_cnt %d (cmd->bufflen %d," 1795 " cmd->sg_cnt %d)", cmd, cmd->tag, tot_len, leave, 1796 cmd->bufflen, cmd->sg_cnt); 1797 1798 cmd->bufflen = tot_len; 1799 cmd->sg_cnt = leave; 1800 } 1801 1802 if (qlt_has_data(cmd) && ((qlt_srr_random() % 100) == 70)) { 1803 unsigned int offset = qlt_srr_random() % cmd->bufflen; 1804 1805 ql_dbg(ql_dbg_tgt_mgt, cmd->vha, 0xf017, 1806 "Cutting cmd %p (tag %d) buffer head " 1807 "to offset %d (cmd->bufflen %d)", cmd, cmd->tag, offset, 1808 cmd->bufflen); 1809 if (offset == 0) 1810 *xmit_type &= ~QLA_TGT_XMIT_DATA; 1811 else if (qlt_set_data_offset(cmd, offset)) { 1812 ql_dbg(ql_dbg_tgt_mgt, cmd->vha, 0xf018, 1813 "qlt_set_data_offset() failed (tag %d)", cmd->tag); 1814 } 1815 } 1816 } 1817 #else 1818 static inline void qlt_check_srr_debug(struct qla_tgt_cmd *cmd, int *xmit_type) 1819 {} 1820 #endif 1821 1822 static void qlt_24xx_init_ctio_to_isp(struct ctio7_to_24xx *ctio, 1823 struct qla_tgt_prm *prm) 1824 { 1825 prm->sense_buffer_len = min_t(uint32_t, prm->sense_buffer_len, 1826 (uint32_t)sizeof(ctio->u.status1.sense_data)); 1827 ctio->u.status0.flags |= 1828 __constant_cpu_to_le16(CTIO7_FLAGS_SEND_STATUS); 1829 if (qlt_need_explicit_conf(prm->tgt->ha, prm->cmd, 0)) { 1830 ctio->u.status0.flags |= __constant_cpu_to_le16( 1831 CTIO7_FLAGS_EXPLICIT_CONFORM | 1832 CTIO7_FLAGS_CONFORM_REQ); 1833 } 1834 ctio->u.status0.residual = cpu_to_le32(prm->residual); 1835 ctio->u.status0.scsi_status = cpu_to_le16(prm->rq_result); 1836 if (QLA_TGT_SENSE_VALID(prm->sense_buffer)) { 1837 int i; 1838 1839 if (qlt_need_explicit_conf(prm->tgt->ha, prm->cmd, 1)) { 1840 if (prm->cmd->se_cmd.scsi_status != 0) { 1841 ql_dbg(ql_dbg_tgt, prm->cmd->vha, 0xe017, 1842 "Skipping EXPLICIT_CONFORM and " 1843 "CTIO7_FLAGS_CONFORM_REQ for FCP READ w/ " 1844 "non GOOD status\n"); 1845 goto skip_explict_conf; 1846 } 1847 ctio->u.status1.flags |= __constant_cpu_to_le16( 1848 CTIO7_FLAGS_EXPLICIT_CONFORM | 1849 CTIO7_FLAGS_CONFORM_REQ); 1850 } 1851 skip_explict_conf: 1852 ctio->u.status1.flags &= 1853 ~__constant_cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_0); 1854 ctio->u.status1.flags |= 1855 __constant_cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1); 1856 ctio->u.status1.scsi_status |= 1857 __constant_cpu_to_le16(SS_SENSE_LEN_VALID); 1858 ctio->u.status1.sense_length = 1859 cpu_to_le16(prm->sense_buffer_len); 1860 for (i = 0; i < prm->sense_buffer_len/4; i++) 1861 ((uint32_t *)ctio->u.status1.sense_data)[i] = 1862 cpu_to_be32(((uint32_t *)prm->sense_buffer)[i]); 1863 #if 0 1864 if (unlikely((prm->sense_buffer_len % 4) != 0)) { 1865 static int q; 1866 if (q < 10) { 1867 ql_dbg(ql_dbg_tgt, vha, 0xe04f, 1868 "qla_target(%d): %d bytes of sense " 1869 "lost", prm->tgt->ha->vp_idx, 1870 prm->sense_buffer_len % 4); 1871 q++; 1872 } 1873 } 1874 #endif 1875 } else { 1876 ctio->u.status1.flags &= 1877 ~__constant_cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_0); 1878 ctio->u.status1.flags |= 1879 __constant_cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1); 1880 ctio->u.status1.sense_length = 0; 1881 memset(ctio->u.status1.sense_data, 0, 1882 sizeof(ctio->u.status1.sense_data)); 1883 } 1884 1885 /* Sense with len > 24, is it possible ??? */ 1886 } 1887 1888 /* 1889 * Callback to setup response of xmit_type of QLA_TGT_XMIT_DATA and * 1890 * QLA_TGT_XMIT_STATUS for >= 24xx silicon 1891 */ 1892 int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type, 1893 uint8_t scsi_status) 1894 { 1895 struct scsi_qla_host *vha = cmd->vha; 1896 struct qla_hw_data *ha = vha->hw; 1897 struct ctio7_to_24xx *pkt; 1898 struct qla_tgt_prm prm; 1899 uint32_t full_req_cnt = 0; 1900 unsigned long flags = 0; 1901 int res; 1902 1903 memset(&prm, 0, sizeof(prm)); 1904 qlt_check_srr_debug(cmd, &xmit_type); 1905 1906 ql_dbg(ql_dbg_tgt, cmd->vha, 0xe018, 1907 "is_send_status=%d, cmd->bufflen=%d, cmd->sg_cnt=%d, " 1908 "cmd->dma_data_direction=%d\n", (xmit_type & QLA_TGT_XMIT_STATUS) ? 1909 1 : 0, cmd->bufflen, cmd->sg_cnt, cmd->dma_data_direction); 1910 1911 res = qlt_pre_xmit_response(cmd, &prm, xmit_type, scsi_status, 1912 &full_req_cnt); 1913 if (unlikely(res != 0)) { 1914 if (res == QLA_TGT_PRE_XMIT_RESP_CMD_ABORTED) 1915 return 0; 1916 1917 return res; 1918 } 1919 1920 spin_lock_irqsave(&ha->hardware_lock, flags); 1921 1922 /* Does F/W have an IOCBs for this request */ 1923 res = qlt_check_reserve_free_req(vha, full_req_cnt); 1924 if (unlikely(res)) 1925 goto out_unmap_unlock; 1926 1927 res = qlt_24xx_build_ctio_pkt(&prm, vha); 1928 if (unlikely(res != 0)) 1929 goto out_unmap_unlock; 1930 1931 1932 pkt = (struct ctio7_to_24xx *)prm.pkt; 1933 1934 if (qlt_has_data(cmd) && (xmit_type & QLA_TGT_XMIT_DATA)) { 1935 pkt->u.status0.flags |= 1936 __constant_cpu_to_le16(CTIO7_FLAGS_DATA_IN | 1937 CTIO7_FLAGS_STATUS_MODE_0); 1938 1939 qlt_load_data_segments(&prm, vha); 1940 1941 if (prm.add_status_pkt == 0) { 1942 if (xmit_type & QLA_TGT_XMIT_STATUS) { 1943 pkt->u.status0.scsi_status = 1944 cpu_to_le16(prm.rq_result); 1945 pkt->u.status0.residual = 1946 cpu_to_le32(prm.residual); 1947 pkt->u.status0.flags |= __constant_cpu_to_le16( 1948 CTIO7_FLAGS_SEND_STATUS); 1949 if (qlt_need_explicit_conf(ha, cmd, 0)) { 1950 pkt->u.status0.flags |= 1951 __constant_cpu_to_le16( 1952 CTIO7_FLAGS_EXPLICIT_CONFORM | 1953 CTIO7_FLAGS_CONFORM_REQ); 1954 } 1955 } 1956 1957 } else { 1958 /* 1959 * We have already made sure that there is sufficient 1960 * amount of request entries to not drop HW lock in 1961 * req_pkt(). 1962 */ 1963 struct ctio7_to_24xx *ctio = 1964 (struct ctio7_to_24xx *)qlt_get_req_pkt(vha); 1965 1966 ql_dbg(ql_dbg_tgt, vha, 0xe019, 1967 "Building additional status packet\n"); 1968 1969 memcpy(ctio, pkt, sizeof(*ctio)); 1970 ctio->entry_count = 1; 1971 ctio->dseg_count = 0; 1972 ctio->u.status1.flags &= ~__constant_cpu_to_le16( 1973 CTIO7_FLAGS_DATA_IN); 1974 1975 /* Real finish is ctio_m1's finish */ 1976 pkt->handle |= CTIO_INTERMEDIATE_HANDLE_MARK; 1977 pkt->u.status0.flags |= __constant_cpu_to_le16( 1978 CTIO7_FLAGS_DONT_RET_CTIO); 1979 qlt_24xx_init_ctio_to_isp((struct ctio7_to_24xx *)ctio, 1980 &prm); 1981 pr_debug("Status CTIO7: %p\n", ctio); 1982 } 1983 } else 1984 qlt_24xx_init_ctio_to_isp(pkt, &prm); 1985 1986 1987 cmd->state = QLA_TGT_STATE_PROCESSED; /* Mid-level is done processing */ 1988 1989 ql_dbg(ql_dbg_tgt, vha, 0xe01a, 1990 "Xmitting CTIO7 response pkt for 24xx: %p scsi_status: 0x%02x\n", 1991 pkt, scsi_status); 1992 1993 qla2x00_start_iocbs(vha, vha->req); 1994 spin_unlock_irqrestore(&ha->hardware_lock, flags); 1995 1996 return 0; 1997 1998 out_unmap_unlock: 1999 if (cmd->sg_mapped) 2000 qlt_unmap_sg(vha, cmd); 2001 spin_unlock_irqrestore(&ha->hardware_lock, flags); 2002 2003 return res; 2004 } 2005 EXPORT_SYMBOL(qlt_xmit_response); 2006 2007 int qlt_rdy_to_xfer(struct qla_tgt_cmd *cmd) 2008 { 2009 struct ctio7_to_24xx *pkt; 2010 struct scsi_qla_host *vha = cmd->vha; 2011 struct qla_hw_data *ha = vha->hw; 2012 struct qla_tgt *tgt = cmd->tgt; 2013 struct qla_tgt_prm prm; 2014 unsigned long flags; 2015 int res = 0; 2016 2017 memset(&prm, 0, sizeof(prm)); 2018 prm.cmd = cmd; 2019 prm.tgt = tgt; 2020 prm.sg = NULL; 2021 prm.req_cnt = 1; 2022 2023 /* Send marker if required */ 2024 if (qlt_issue_marker(vha, 0) != QLA_SUCCESS) 2025 return -EIO; 2026 2027 ql_dbg(ql_dbg_tgt, vha, 0xe01b, "CTIO_start: vha(%d)", 2028 (int)vha->vp_idx); 2029 2030 /* Calculate number of entries and segments required */ 2031 if (qlt_pci_map_calc_cnt(&prm) != 0) 2032 return -EAGAIN; 2033 2034 spin_lock_irqsave(&ha->hardware_lock, flags); 2035 2036 /* Does F/W have an IOCBs for this request */ 2037 res = qlt_check_reserve_free_req(vha, prm.req_cnt); 2038 if (res != 0) 2039 goto out_unlock_free_unmap; 2040 2041 res = qlt_24xx_build_ctio_pkt(&prm, vha); 2042 if (unlikely(res != 0)) 2043 goto out_unlock_free_unmap; 2044 pkt = (struct ctio7_to_24xx *)prm.pkt; 2045 pkt->u.status0.flags |= __constant_cpu_to_le16(CTIO7_FLAGS_DATA_OUT | 2046 CTIO7_FLAGS_STATUS_MODE_0); 2047 qlt_load_data_segments(&prm, vha); 2048 2049 cmd->state = QLA_TGT_STATE_NEED_DATA; 2050 2051 qla2x00_start_iocbs(vha, vha->req); 2052 spin_unlock_irqrestore(&ha->hardware_lock, flags); 2053 2054 return res; 2055 2056 out_unlock_free_unmap: 2057 if (cmd->sg_mapped) 2058 qlt_unmap_sg(vha, cmd); 2059 spin_unlock_irqrestore(&ha->hardware_lock, flags); 2060 2061 return res; 2062 } 2063 EXPORT_SYMBOL(qlt_rdy_to_xfer); 2064 2065 /* If hardware_lock held on entry, might drop it, then reaquire */ 2066 /* This function sends the appropriate CTIO to ISP 2xxx or 24xx */ 2067 static int __qlt_send_term_exchange(struct scsi_qla_host *vha, 2068 struct qla_tgt_cmd *cmd, 2069 struct atio_from_isp *atio) 2070 { 2071 struct ctio7_to_24xx *ctio24; 2072 struct qla_hw_data *ha = vha->hw; 2073 request_t *pkt; 2074 int ret = 0; 2075 2076 ql_dbg(ql_dbg_tgt, vha, 0xe01c, "Sending TERM EXCH CTIO (ha=%p)\n", ha); 2077 2078 pkt = (request_t *)qla2x00_alloc_iocbs(vha, NULL); 2079 if (pkt == NULL) { 2080 ql_dbg(ql_dbg_tgt, vha, 0xe050, 2081 "qla_target(%d): %s failed: unable to allocate " 2082 "request packet\n", vha->vp_idx, __func__); 2083 return -ENOMEM; 2084 } 2085 2086 if (cmd != NULL) { 2087 if (cmd->state < QLA_TGT_STATE_PROCESSED) { 2088 ql_dbg(ql_dbg_tgt, vha, 0xe051, 2089 "qla_target(%d): Terminating cmd %p with " 2090 "incorrect state %d\n", vha->vp_idx, cmd, 2091 cmd->state); 2092 } else 2093 ret = 1; 2094 } 2095 2096 pkt->entry_count = 1; 2097 pkt->handle = QLA_TGT_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK; 2098 2099 ctio24 = (struct ctio7_to_24xx *)pkt; 2100 ctio24->entry_type = CTIO_TYPE7; 2101 ctio24->nport_handle = cmd ? cmd->loop_id : CTIO7_NHANDLE_UNRECOGNIZED; 2102 ctio24->timeout = __constant_cpu_to_le16(QLA_TGT_TIMEOUT); 2103 ctio24->vp_index = vha->vp_idx; 2104 ctio24->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2]; 2105 ctio24->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1]; 2106 ctio24->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0]; 2107 ctio24->exchange_addr = atio->u.isp24.exchange_addr; 2108 ctio24->u.status1.flags = (atio->u.isp24.attr << 9) | 2109 __constant_cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1 | 2110 CTIO7_FLAGS_TERMINATE); 2111 ctio24->u.status1.ox_id = swab16(atio->u.isp24.fcp_hdr.ox_id); 2112 2113 /* Most likely, it isn't needed */ 2114 ctio24->u.status1.residual = get_unaligned((uint32_t *) 2115 &atio->u.isp24.fcp_cmnd.add_cdb[ 2116 atio->u.isp24.fcp_cmnd.add_cdb_len]); 2117 if (ctio24->u.status1.residual != 0) 2118 ctio24->u.status1.scsi_status |= SS_RESIDUAL_UNDER; 2119 2120 qla2x00_start_iocbs(vha, vha->req); 2121 return ret; 2122 } 2123 2124 static void qlt_send_term_exchange(struct scsi_qla_host *vha, 2125 struct qla_tgt_cmd *cmd, struct atio_from_isp *atio, int ha_locked) 2126 { 2127 unsigned long flags; 2128 int rc; 2129 2130 if (qlt_issue_marker(vha, ha_locked) < 0) 2131 return; 2132 2133 if (ha_locked) { 2134 rc = __qlt_send_term_exchange(vha, cmd, atio); 2135 goto done; 2136 } 2137 spin_lock_irqsave(&vha->hw->hardware_lock, flags); 2138 rc = __qlt_send_term_exchange(vha, cmd, atio); 2139 spin_unlock_irqrestore(&vha->hw->hardware_lock, flags); 2140 done: 2141 if (rc == 1) { 2142 if (!ha_locked && !in_interrupt()) 2143 msleep(250); /* just in case */ 2144 2145 vha->hw->tgt.tgt_ops->free_cmd(cmd); 2146 } 2147 } 2148 2149 void qlt_free_cmd(struct qla_tgt_cmd *cmd) 2150 { 2151 BUG_ON(cmd->sg_mapped); 2152 2153 if (unlikely(cmd->free_sg)) 2154 kfree(cmd->sg); 2155 kmem_cache_free(qla_tgt_cmd_cachep, cmd); 2156 } 2157 EXPORT_SYMBOL(qlt_free_cmd); 2158 2159 /* ha->hardware_lock supposed to be held on entry */ 2160 static int qlt_prepare_srr_ctio(struct scsi_qla_host *vha, 2161 struct qla_tgt_cmd *cmd, void *ctio) 2162 { 2163 struct qla_tgt_srr_ctio *sc; 2164 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; 2165 struct qla_tgt_srr_imm *imm; 2166 2167 tgt->ctio_srr_id++; 2168 2169 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf019, 2170 "qla_target(%d): CTIO with SRR status received\n", vha->vp_idx); 2171 2172 if (!ctio) { 2173 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf055, 2174 "qla_target(%d): SRR CTIO, but ctio is NULL\n", 2175 vha->vp_idx); 2176 return -EINVAL; 2177 } 2178 2179 sc = kzalloc(sizeof(*sc), GFP_ATOMIC); 2180 if (sc != NULL) { 2181 sc->cmd = cmd; 2182 /* IRQ is already OFF */ 2183 spin_lock(&tgt->srr_lock); 2184 sc->srr_id = tgt->ctio_srr_id; 2185 list_add_tail(&sc->srr_list_entry, 2186 &tgt->srr_ctio_list); 2187 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01a, 2188 "CTIO SRR %p added (id %d)\n", sc, sc->srr_id); 2189 if (tgt->imm_srr_id == tgt->ctio_srr_id) { 2190 int found = 0; 2191 list_for_each_entry(imm, &tgt->srr_imm_list, 2192 srr_list_entry) { 2193 if (imm->srr_id == sc->srr_id) { 2194 found = 1; 2195 break; 2196 } 2197 } 2198 if (found) { 2199 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01b, 2200 "Scheduling srr work\n"); 2201 schedule_work(&tgt->srr_work); 2202 } else { 2203 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf056, 2204 "qla_target(%d): imm_srr_id " 2205 "== ctio_srr_id (%d), but there is no " 2206 "corresponding SRR IMM, deleting CTIO " 2207 "SRR %p\n", vha->vp_idx, 2208 tgt->ctio_srr_id, sc); 2209 list_del(&sc->srr_list_entry); 2210 spin_unlock(&tgt->srr_lock); 2211 2212 kfree(sc); 2213 return -EINVAL; 2214 } 2215 } 2216 spin_unlock(&tgt->srr_lock); 2217 } else { 2218 struct qla_tgt_srr_imm *ti; 2219 2220 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf057, 2221 "qla_target(%d): Unable to allocate SRR CTIO entry\n", 2222 vha->vp_idx); 2223 spin_lock(&tgt->srr_lock); 2224 list_for_each_entry_safe(imm, ti, &tgt->srr_imm_list, 2225 srr_list_entry) { 2226 if (imm->srr_id == tgt->ctio_srr_id) { 2227 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01c, 2228 "IMM SRR %p deleted (id %d)\n", 2229 imm, imm->srr_id); 2230 list_del(&imm->srr_list_entry); 2231 qlt_reject_free_srr_imm(vha, imm, 1); 2232 } 2233 } 2234 spin_unlock(&tgt->srr_lock); 2235 2236 return -ENOMEM; 2237 } 2238 2239 return 0; 2240 } 2241 2242 /* 2243 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire 2244 */ 2245 static int qlt_term_ctio_exchange(struct scsi_qla_host *vha, void *ctio, 2246 struct qla_tgt_cmd *cmd, uint32_t status) 2247 { 2248 int term = 0; 2249 2250 if (ctio != NULL) { 2251 struct ctio7_from_24xx *c = (struct ctio7_from_24xx *)ctio; 2252 term = !(c->flags & 2253 __constant_cpu_to_le16(OF_TERM_EXCH)); 2254 } else 2255 term = 1; 2256 2257 if (term) 2258 qlt_send_term_exchange(vha, cmd, &cmd->atio, 1); 2259 2260 return term; 2261 } 2262 2263 /* ha->hardware_lock supposed to be held on entry */ 2264 static inline struct qla_tgt_cmd *qlt_get_cmd(struct scsi_qla_host *vha, 2265 uint32_t handle) 2266 { 2267 struct qla_hw_data *ha = vha->hw; 2268 2269 handle--; 2270 if (ha->tgt.cmds[handle] != NULL) { 2271 struct qla_tgt_cmd *cmd = ha->tgt.cmds[handle]; 2272 ha->tgt.cmds[handle] = NULL; 2273 return cmd; 2274 } else 2275 return NULL; 2276 } 2277 2278 /* ha->hardware_lock supposed to be held on entry */ 2279 static struct qla_tgt_cmd *qlt_ctio_to_cmd(struct scsi_qla_host *vha, 2280 uint32_t handle, void *ctio) 2281 { 2282 struct qla_tgt_cmd *cmd = NULL; 2283 2284 /* Clear out internal marks */ 2285 handle &= ~(CTIO_COMPLETION_HANDLE_MARK | 2286 CTIO_INTERMEDIATE_HANDLE_MARK); 2287 2288 if (handle != QLA_TGT_NULL_HANDLE) { 2289 if (unlikely(handle == QLA_TGT_SKIP_HANDLE)) { 2290 ql_dbg(ql_dbg_tgt, vha, 0xe01d, "%s", 2291 "SKIP_HANDLE CTIO\n"); 2292 return NULL; 2293 } 2294 /* handle-1 is actually used */ 2295 if (unlikely(handle > DEFAULT_OUTSTANDING_COMMANDS)) { 2296 ql_dbg(ql_dbg_tgt, vha, 0xe052, 2297 "qla_target(%d): Wrong handle %x received\n", 2298 vha->vp_idx, handle); 2299 return NULL; 2300 } 2301 cmd = qlt_get_cmd(vha, handle); 2302 if (unlikely(cmd == NULL)) { 2303 ql_dbg(ql_dbg_tgt, vha, 0xe053, 2304 "qla_target(%d): Suspicious: unable to " 2305 "find the command with handle %x\n", vha->vp_idx, 2306 handle); 2307 return NULL; 2308 } 2309 } else if (ctio != NULL) { 2310 /* We can't get loop ID from CTIO7 */ 2311 ql_dbg(ql_dbg_tgt, vha, 0xe054, 2312 "qla_target(%d): Wrong CTIO received: QLA24xx doesn't " 2313 "support NULL handles\n", vha->vp_idx); 2314 return NULL; 2315 } 2316 2317 return cmd; 2318 } 2319 2320 /* 2321 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire 2322 */ 2323 static void qlt_do_ctio_completion(struct scsi_qla_host *vha, uint32_t handle, 2324 uint32_t status, void *ctio) 2325 { 2326 struct qla_hw_data *ha = vha->hw; 2327 struct se_cmd *se_cmd; 2328 struct target_core_fabric_ops *tfo; 2329 struct qla_tgt_cmd *cmd; 2330 2331 ql_dbg(ql_dbg_tgt, vha, 0xe01e, 2332 "qla_target(%d): handle(ctio %p status %#x) <- %08x\n", 2333 vha->vp_idx, ctio, status, handle); 2334 2335 if (handle & CTIO_INTERMEDIATE_HANDLE_MARK) { 2336 /* That could happen only in case of an error/reset/abort */ 2337 if (status != CTIO_SUCCESS) { 2338 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01d, 2339 "Intermediate CTIO received" 2340 " (status %x)\n", status); 2341 } 2342 return; 2343 } 2344 2345 cmd = qlt_ctio_to_cmd(vha, handle, ctio); 2346 if (cmd == NULL) 2347 return; 2348 2349 se_cmd = &cmd->se_cmd; 2350 tfo = se_cmd->se_tfo; 2351 2352 if (cmd->sg_mapped) 2353 qlt_unmap_sg(vha, cmd); 2354 2355 if (unlikely(status != CTIO_SUCCESS)) { 2356 switch (status & 0xFFFF) { 2357 case CTIO_LIP_RESET: 2358 case CTIO_TARGET_RESET: 2359 case CTIO_ABORTED: 2360 case CTIO_TIMEOUT: 2361 case CTIO_INVALID_RX_ID: 2362 /* They are OK */ 2363 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf058, 2364 "qla_target(%d): CTIO with " 2365 "status %#x received, state %x, se_cmd %p, " 2366 "(LIP_RESET=e, ABORTED=2, TARGET_RESET=17, " 2367 "TIMEOUT=b, INVALID_RX_ID=8)\n", vha->vp_idx, 2368 status, cmd->state, se_cmd); 2369 break; 2370 2371 case CTIO_PORT_LOGGED_OUT: 2372 case CTIO_PORT_UNAVAILABLE: 2373 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf059, 2374 "qla_target(%d): CTIO with PORT LOGGED " 2375 "OUT (29) or PORT UNAVAILABLE (28) status %x " 2376 "received (state %x, se_cmd %p)\n", vha->vp_idx, 2377 status, cmd->state, se_cmd); 2378 break; 2379 2380 case CTIO_SRR_RECEIVED: 2381 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05a, 2382 "qla_target(%d): CTIO with SRR_RECEIVED" 2383 " status %x received (state %x, se_cmd %p)\n", 2384 vha->vp_idx, status, cmd->state, se_cmd); 2385 if (qlt_prepare_srr_ctio(vha, cmd, ctio) != 0) 2386 break; 2387 else 2388 return; 2389 2390 default: 2391 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05b, 2392 "qla_target(%d): CTIO with error status " 2393 "0x%x received (state %x, se_cmd %p\n", 2394 vha->vp_idx, status, cmd->state, se_cmd); 2395 break; 2396 } 2397 2398 if (cmd->state != QLA_TGT_STATE_NEED_DATA) 2399 if (qlt_term_ctio_exchange(vha, ctio, cmd, status)) 2400 return; 2401 } 2402 2403 if (cmd->state == QLA_TGT_STATE_PROCESSED) { 2404 ql_dbg(ql_dbg_tgt, vha, 0xe01f, "Command %p finished\n", cmd); 2405 } else if (cmd->state == QLA_TGT_STATE_NEED_DATA) { 2406 int rx_status = 0; 2407 2408 cmd->state = QLA_TGT_STATE_DATA_IN; 2409 2410 if (unlikely(status != CTIO_SUCCESS)) 2411 rx_status = -EIO; 2412 else 2413 cmd->write_data_transferred = 1; 2414 2415 ql_dbg(ql_dbg_tgt, vha, 0xe020, 2416 "Data received, context %x, rx_status %d\n", 2417 0x0, rx_status); 2418 2419 ha->tgt.tgt_ops->handle_data(cmd); 2420 return; 2421 } else if (cmd->state == QLA_TGT_STATE_ABORTED) { 2422 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01e, 2423 "Aborted command %p (tag %d) finished\n", cmd, cmd->tag); 2424 } else { 2425 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05c, 2426 "qla_target(%d): A command in state (%d) should " 2427 "not return a CTIO complete\n", vha->vp_idx, cmd->state); 2428 } 2429 2430 if (unlikely(status != CTIO_SUCCESS)) { 2431 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01f, "Finishing failed CTIO\n"); 2432 dump_stack(); 2433 } 2434 2435 ha->tgt.tgt_ops->free_cmd(cmd); 2436 } 2437 2438 static inline int qlt_get_fcp_task_attr(struct scsi_qla_host *vha, 2439 uint8_t task_codes) 2440 { 2441 int fcp_task_attr; 2442 2443 switch (task_codes) { 2444 case ATIO_SIMPLE_QUEUE: 2445 fcp_task_attr = MSG_SIMPLE_TAG; 2446 break; 2447 case ATIO_HEAD_OF_QUEUE: 2448 fcp_task_attr = MSG_HEAD_TAG; 2449 break; 2450 case ATIO_ORDERED_QUEUE: 2451 fcp_task_attr = MSG_ORDERED_TAG; 2452 break; 2453 case ATIO_ACA_QUEUE: 2454 fcp_task_attr = MSG_ACA_TAG; 2455 break; 2456 case ATIO_UNTAGGED: 2457 fcp_task_attr = MSG_SIMPLE_TAG; 2458 break; 2459 default: 2460 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05d, 2461 "qla_target: unknown task code %x, use ORDERED instead\n", 2462 task_codes); 2463 fcp_task_attr = MSG_ORDERED_TAG; 2464 break; 2465 } 2466 2467 return fcp_task_attr; 2468 } 2469 2470 static struct qla_tgt_sess *qlt_make_local_sess(struct scsi_qla_host *, 2471 uint8_t *); 2472 /* 2473 * Process context for I/O path into tcm_qla2xxx code 2474 */ 2475 static void qlt_do_work(struct work_struct *work) 2476 { 2477 struct qla_tgt_cmd *cmd = container_of(work, struct qla_tgt_cmd, work); 2478 scsi_qla_host_t *vha = cmd->vha; 2479 struct qla_hw_data *ha = vha->hw; 2480 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; 2481 struct qla_tgt_sess *sess = NULL; 2482 struct atio_from_isp *atio = &cmd->atio; 2483 unsigned char *cdb; 2484 unsigned long flags; 2485 uint32_t data_length; 2486 int ret, fcp_task_attr, data_dir, bidi = 0; 2487 2488 if (tgt->tgt_stop) 2489 goto out_term; 2490 2491 spin_lock_irqsave(&ha->hardware_lock, flags); 2492 sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, 2493 atio->u.isp24.fcp_hdr.s_id); 2494 /* Do kref_get() before dropping qla_hw_data->hardware_lock. */ 2495 if (sess) 2496 kref_get(&sess->se_sess->sess_kref); 2497 spin_unlock_irqrestore(&ha->hardware_lock, flags); 2498 2499 if (unlikely(!sess)) { 2500 uint8_t *s_id = atio->u.isp24.fcp_hdr.s_id; 2501 2502 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf022, 2503 "qla_target(%d): Unable to find wwn login" 2504 " (s_id %x:%x:%x), trying to create it manually\n", 2505 vha->vp_idx, s_id[0], s_id[1], s_id[2]); 2506 2507 if (atio->u.raw.entry_count > 1) { 2508 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf023, 2509 "Dropping multy entry cmd %p\n", cmd); 2510 goto out_term; 2511 } 2512 2513 mutex_lock(&vha->vha_tgt.tgt_mutex); 2514 sess = qlt_make_local_sess(vha, s_id); 2515 /* sess has an extra creation ref. */ 2516 mutex_unlock(&vha->vha_tgt.tgt_mutex); 2517 2518 if (!sess) 2519 goto out_term; 2520 } 2521 2522 cmd->sess = sess; 2523 cmd->loop_id = sess->loop_id; 2524 cmd->conf_compl_supported = sess->conf_compl_supported; 2525 2526 cdb = &atio->u.isp24.fcp_cmnd.cdb[0]; 2527 cmd->tag = atio->u.isp24.exchange_addr; 2528 cmd->unpacked_lun = scsilun_to_int( 2529 (struct scsi_lun *)&atio->u.isp24.fcp_cmnd.lun); 2530 2531 if (atio->u.isp24.fcp_cmnd.rddata && 2532 atio->u.isp24.fcp_cmnd.wrdata) { 2533 bidi = 1; 2534 data_dir = DMA_TO_DEVICE; 2535 } else if (atio->u.isp24.fcp_cmnd.rddata) 2536 data_dir = DMA_FROM_DEVICE; 2537 else if (atio->u.isp24.fcp_cmnd.wrdata) 2538 data_dir = DMA_TO_DEVICE; 2539 else 2540 data_dir = DMA_NONE; 2541 2542 fcp_task_attr = qlt_get_fcp_task_attr(vha, 2543 atio->u.isp24.fcp_cmnd.task_attr); 2544 data_length = be32_to_cpu(get_unaligned((uint32_t *) 2545 &atio->u.isp24.fcp_cmnd.add_cdb[ 2546 atio->u.isp24.fcp_cmnd.add_cdb_len])); 2547 2548 ql_dbg(ql_dbg_tgt, vha, 0xe022, 2549 "qla_target: START qla command: %p lun: 0x%04x (tag %d)\n", 2550 cmd, cmd->unpacked_lun, cmd->tag); 2551 2552 ret = vha->hw->tgt.tgt_ops->handle_cmd(vha, cmd, cdb, data_length, 2553 fcp_task_attr, data_dir, bidi); 2554 if (ret != 0) 2555 goto out_term; 2556 /* 2557 * Drop extra session reference from qla_tgt_handle_cmd_for_atio*( 2558 */ 2559 spin_lock_irqsave(&ha->hardware_lock, flags); 2560 ha->tgt.tgt_ops->put_sess(sess); 2561 spin_unlock_irqrestore(&ha->hardware_lock, flags); 2562 return; 2563 2564 out_term: 2565 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf020, "Terminating work cmd %p", cmd); 2566 /* 2567 * cmd has not sent to target yet, so pass NULL as the second 2568 * argument to qlt_send_term_exchange() and free the memory here. 2569 */ 2570 spin_lock_irqsave(&ha->hardware_lock, flags); 2571 qlt_send_term_exchange(vha, NULL, &cmd->atio, 1); 2572 kmem_cache_free(qla_tgt_cmd_cachep, cmd); 2573 if (sess) 2574 ha->tgt.tgt_ops->put_sess(sess); 2575 spin_unlock_irqrestore(&ha->hardware_lock, flags); 2576 } 2577 2578 /* ha->hardware_lock supposed to be held on entry */ 2579 static int qlt_handle_cmd_for_atio(struct scsi_qla_host *vha, 2580 struct atio_from_isp *atio) 2581 { 2582 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; 2583 struct qla_tgt_cmd *cmd; 2584 2585 if (unlikely(tgt->tgt_stop)) { 2586 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf021, 2587 "New command while device %p is shutting down\n", tgt); 2588 return -EFAULT; 2589 } 2590 2591 cmd = kmem_cache_zalloc(qla_tgt_cmd_cachep, GFP_ATOMIC); 2592 if (!cmd) { 2593 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05e, 2594 "qla_target(%d): Allocation of cmd failed\n", vha->vp_idx); 2595 return -ENOMEM; 2596 } 2597 2598 INIT_LIST_HEAD(&cmd->cmd_list); 2599 2600 memcpy(&cmd->atio, atio, sizeof(*atio)); 2601 cmd->state = QLA_TGT_STATE_NEW; 2602 cmd->tgt = vha->vha_tgt.qla_tgt; 2603 cmd->vha = vha; 2604 2605 INIT_WORK(&cmd->work, qlt_do_work); 2606 queue_work(qla_tgt_wq, &cmd->work); 2607 return 0; 2608 2609 } 2610 2611 /* ha->hardware_lock supposed to be held on entry */ 2612 static int qlt_issue_task_mgmt(struct qla_tgt_sess *sess, uint32_t lun, 2613 int fn, void *iocb, int flags) 2614 { 2615 struct scsi_qla_host *vha = sess->vha; 2616 struct qla_hw_data *ha = vha->hw; 2617 struct qla_tgt_mgmt_cmd *mcmd; 2618 int res; 2619 uint8_t tmr_func; 2620 2621 mcmd = mempool_alloc(qla_tgt_mgmt_cmd_mempool, GFP_ATOMIC); 2622 if (!mcmd) { 2623 ql_dbg(ql_dbg_tgt_tmr, vha, 0x10009, 2624 "qla_target(%d): Allocation of management " 2625 "command failed, some commands and their data could " 2626 "leak\n", vha->vp_idx); 2627 return -ENOMEM; 2628 } 2629 memset(mcmd, 0, sizeof(*mcmd)); 2630 mcmd->sess = sess; 2631 2632 if (iocb) { 2633 memcpy(&mcmd->orig_iocb.imm_ntfy, iocb, 2634 sizeof(mcmd->orig_iocb.imm_ntfy)); 2635 } 2636 mcmd->tmr_func = fn; 2637 mcmd->flags = flags; 2638 2639 switch (fn) { 2640 case QLA_TGT_CLEAR_ACA: 2641 ql_dbg(ql_dbg_tgt_tmr, vha, 0x10000, 2642 "qla_target(%d): CLEAR_ACA received\n", sess->vha->vp_idx); 2643 tmr_func = TMR_CLEAR_ACA; 2644 break; 2645 2646 case QLA_TGT_TARGET_RESET: 2647 ql_dbg(ql_dbg_tgt_tmr, vha, 0x10001, 2648 "qla_target(%d): TARGET_RESET received\n", 2649 sess->vha->vp_idx); 2650 tmr_func = TMR_TARGET_WARM_RESET; 2651 break; 2652 2653 case QLA_TGT_LUN_RESET: 2654 ql_dbg(ql_dbg_tgt_tmr, vha, 0x10002, 2655 "qla_target(%d): LUN_RESET received\n", sess->vha->vp_idx); 2656 tmr_func = TMR_LUN_RESET; 2657 break; 2658 2659 case QLA_TGT_CLEAR_TS: 2660 ql_dbg(ql_dbg_tgt_tmr, vha, 0x10003, 2661 "qla_target(%d): CLEAR_TS received\n", sess->vha->vp_idx); 2662 tmr_func = TMR_CLEAR_TASK_SET; 2663 break; 2664 2665 case QLA_TGT_ABORT_TS: 2666 ql_dbg(ql_dbg_tgt_tmr, vha, 0x10004, 2667 "qla_target(%d): ABORT_TS received\n", sess->vha->vp_idx); 2668 tmr_func = TMR_ABORT_TASK_SET; 2669 break; 2670 #if 0 2671 case QLA_TGT_ABORT_ALL: 2672 ql_dbg(ql_dbg_tgt_tmr, vha, 0x10005, 2673 "qla_target(%d): Doing ABORT_ALL_TASKS\n", 2674 sess->vha->vp_idx); 2675 tmr_func = 0; 2676 break; 2677 2678 case QLA_TGT_ABORT_ALL_SESS: 2679 ql_dbg(ql_dbg_tgt_tmr, vha, 0x10006, 2680 "qla_target(%d): Doing ABORT_ALL_TASKS_SESS\n", 2681 sess->vha->vp_idx); 2682 tmr_func = 0; 2683 break; 2684 2685 case QLA_TGT_NEXUS_LOSS_SESS: 2686 ql_dbg(ql_dbg_tgt_tmr, vha, 0x10007, 2687 "qla_target(%d): Doing NEXUS_LOSS_SESS\n", 2688 sess->vha->vp_idx); 2689 tmr_func = 0; 2690 break; 2691 2692 case QLA_TGT_NEXUS_LOSS: 2693 ql_dbg(ql_dbg_tgt_tmr, vha, 0x10008, 2694 "qla_target(%d): Doing NEXUS_LOSS\n", sess->vha->vp_idx); 2695 tmr_func = 0; 2696 break; 2697 #endif 2698 default: 2699 ql_dbg(ql_dbg_tgt_tmr, vha, 0x1000a, 2700 "qla_target(%d): Unknown task mgmt fn 0x%x\n", 2701 sess->vha->vp_idx, fn); 2702 mempool_free(mcmd, qla_tgt_mgmt_cmd_mempool); 2703 return -ENOSYS; 2704 } 2705 2706 res = ha->tgt.tgt_ops->handle_tmr(mcmd, lun, tmr_func, 0); 2707 if (res != 0) { 2708 ql_dbg(ql_dbg_tgt_tmr, vha, 0x1000b, 2709 "qla_target(%d): tgt.tgt_ops->handle_tmr() failed: %d\n", 2710 sess->vha->vp_idx, res); 2711 mempool_free(mcmd, qla_tgt_mgmt_cmd_mempool); 2712 return -EFAULT; 2713 } 2714 2715 return 0; 2716 } 2717 2718 /* ha->hardware_lock supposed to be held on entry */ 2719 static int qlt_handle_task_mgmt(struct scsi_qla_host *vha, void *iocb) 2720 { 2721 struct atio_from_isp *a = (struct atio_from_isp *)iocb; 2722 struct qla_hw_data *ha = vha->hw; 2723 struct qla_tgt *tgt; 2724 struct qla_tgt_sess *sess; 2725 uint32_t lun, unpacked_lun; 2726 int lun_size, fn; 2727 2728 tgt = vha->vha_tgt.qla_tgt; 2729 2730 lun = a->u.isp24.fcp_cmnd.lun; 2731 lun_size = sizeof(a->u.isp24.fcp_cmnd.lun); 2732 fn = a->u.isp24.fcp_cmnd.task_mgmt_flags; 2733 sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, 2734 a->u.isp24.fcp_hdr.s_id); 2735 unpacked_lun = scsilun_to_int((struct scsi_lun *)&lun); 2736 2737 if (!sess) { 2738 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf024, 2739 "qla_target(%d): task mgmt fn 0x%x for " 2740 "non-existant session\n", vha->vp_idx, fn); 2741 return qlt_sched_sess_work(tgt, QLA_TGT_SESS_WORK_TM, iocb, 2742 sizeof(struct atio_from_isp)); 2743 } 2744 2745 return qlt_issue_task_mgmt(sess, unpacked_lun, fn, iocb, 0); 2746 } 2747 2748 /* ha->hardware_lock supposed to be held on entry */ 2749 static int __qlt_abort_task(struct scsi_qla_host *vha, 2750 struct imm_ntfy_from_isp *iocb, struct qla_tgt_sess *sess) 2751 { 2752 struct atio_from_isp *a = (struct atio_from_isp *)iocb; 2753 struct qla_hw_data *ha = vha->hw; 2754 struct qla_tgt_mgmt_cmd *mcmd; 2755 uint32_t lun, unpacked_lun; 2756 int rc; 2757 2758 mcmd = mempool_alloc(qla_tgt_mgmt_cmd_mempool, GFP_ATOMIC); 2759 if (mcmd == NULL) { 2760 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05f, 2761 "qla_target(%d): %s: Allocation of ABORT cmd failed\n", 2762 vha->vp_idx, __func__); 2763 return -ENOMEM; 2764 } 2765 memset(mcmd, 0, sizeof(*mcmd)); 2766 2767 mcmd->sess = sess; 2768 memcpy(&mcmd->orig_iocb.imm_ntfy, iocb, 2769 sizeof(mcmd->orig_iocb.imm_ntfy)); 2770 2771 lun = a->u.isp24.fcp_cmnd.lun; 2772 unpacked_lun = scsilun_to_int((struct scsi_lun *)&lun); 2773 2774 rc = ha->tgt.tgt_ops->handle_tmr(mcmd, unpacked_lun, TMR_ABORT_TASK, 2775 le16_to_cpu(iocb->u.isp2x.seq_id)); 2776 if (rc != 0) { 2777 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf060, 2778 "qla_target(%d): tgt_ops->handle_tmr() failed: %d\n", 2779 vha->vp_idx, rc); 2780 mempool_free(mcmd, qla_tgt_mgmt_cmd_mempool); 2781 return -EFAULT; 2782 } 2783 2784 return 0; 2785 } 2786 2787 /* ha->hardware_lock supposed to be held on entry */ 2788 static int qlt_abort_task(struct scsi_qla_host *vha, 2789 struct imm_ntfy_from_isp *iocb) 2790 { 2791 struct qla_hw_data *ha = vha->hw; 2792 struct qla_tgt_sess *sess; 2793 int loop_id; 2794 2795 loop_id = GET_TARGET_ID(ha, (struct atio_from_isp *)iocb); 2796 2797 sess = ha->tgt.tgt_ops->find_sess_by_loop_id(vha, loop_id); 2798 if (sess == NULL) { 2799 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf025, 2800 "qla_target(%d): task abort for unexisting " 2801 "session\n", vha->vp_idx); 2802 return qlt_sched_sess_work(vha->vha_tgt.qla_tgt, 2803 QLA_TGT_SESS_WORK_ABORT, iocb, sizeof(*iocb)); 2804 } 2805 2806 return __qlt_abort_task(vha, iocb, sess); 2807 } 2808 2809 /* 2810 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire 2811 */ 2812 static int qlt_24xx_handle_els(struct scsi_qla_host *vha, 2813 struct imm_ntfy_from_isp *iocb) 2814 { 2815 int res = 0; 2816 2817 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf026, 2818 "qla_target(%d): Port ID: 0x%3phC ELS opcode: 0x%02x\n", 2819 vha->vp_idx, iocb->u.isp24.port_id, iocb->u.isp24.status_subcode); 2820 2821 switch (iocb->u.isp24.status_subcode) { 2822 case ELS_PLOGI: 2823 case ELS_FLOGI: 2824 case ELS_PRLI: 2825 case ELS_LOGO: 2826 case ELS_PRLO: 2827 res = qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS_SESS); 2828 break; 2829 case ELS_PDISC: 2830 case ELS_ADISC: 2831 { 2832 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; 2833 if (tgt->link_reinit_iocb_pending) { 2834 qlt_send_notify_ack(vha, &tgt->link_reinit_iocb, 2835 0, 0, 0, 0, 0, 0); 2836 tgt->link_reinit_iocb_pending = 0; 2837 } 2838 res = 1; /* send notify ack */ 2839 break; 2840 } 2841 2842 default: 2843 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf061, 2844 "qla_target(%d): Unsupported ELS command %x " 2845 "received\n", vha->vp_idx, iocb->u.isp24.status_subcode); 2846 res = qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS_SESS); 2847 break; 2848 } 2849 2850 return res; 2851 } 2852 2853 static int qlt_set_data_offset(struct qla_tgt_cmd *cmd, uint32_t offset) 2854 { 2855 struct scatterlist *sg, *sgp, *sg_srr, *sg_srr_start = NULL; 2856 size_t first_offset = 0, rem_offset = offset, tmp = 0; 2857 int i, sg_srr_cnt, bufflen = 0; 2858 2859 ql_dbg(ql_dbg_tgt, cmd->vha, 0xe023, 2860 "Entering qla_tgt_set_data_offset: cmd: %p, cmd->sg: %p, " 2861 "cmd->sg_cnt: %u, direction: %d\n", 2862 cmd, cmd->sg, cmd->sg_cnt, cmd->dma_data_direction); 2863 2864 /* 2865 * FIXME: Reject non zero SRR relative offset until we can test 2866 * this code properly. 2867 */ 2868 pr_debug("Rejecting non zero SRR rel_offs: %u\n", offset); 2869 return -1; 2870 2871 if (!cmd->sg || !cmd->sg_cnt) { 2872 ql_dbg(ql_dbg_tgt, cmd->vha, 0xe055, 2873 "Missing cmd->sg or zero cmd->sg_cnt in" 2874 " qla_tgt_set_data_offset\n"); 2875 return -EINVAL; 2876 } 2877 /* 2878 * Walk the current cmd->sg list until we locate the new sg_srr_start 2879 */ 2880 for_each_sg(cmd->sg, sg, cmd->sg_cnt, i) { 2881 ql_dbg(ql_dbg_tgt, cmd->vha, 0xe024, 2882 "sg[%d]: %p page: %p, length: %d, offset: %d\n", 2883 i, sg, sg_page(sg), sg->length, sg->offset); 2884 2885 if ((sg->length + tmp) > offset) { 2886 first_offset = rem_offset; 2887 sg_srr_start = sg; 2888 ql_dbg(ql_dbg_tgt, cmd->vha, 0xe025, 2889 "Found matching sg[%d], using %p as sg_srr_start, " 2890 "and using first_offset: %zu\n", i, sg, 2891 first_offset); 2892 break; 2893 } 2894 tmp += sg->length; 2895 rem_offset -= sg->length; 2896 } 2897 2898 if (!sg_srr_start) { 2899 ql_dbg(ql_dbg_tgt, cmd->vha, 0xe056, 2900 "Unable to locate sg_srr_start for offset: %u\n", offset); 2901 return -EINVAL; 2902 } 2903 sg_srr_cnt = (cmd->sg_cnt - i); 2904 2905 sg_srr = kzalloc(sizeof(struct scatterlist) * sg_srr_cnt, GFP_KERNEL); 2906 if (!sg_srr) { 2907 ql_dbg(ql_dbg_tgt, cmd->vha, 0xe057, 2908 "Unable to allocate sgp\n"); 2909 return -ENOMEM; 2910 } 2911 sg_init_table(sg_srr, sg_srr_cnt); 2912 sgp = &sg_srr[0]; 2913 /* 2914 * Walk the remaining list for sg_srr_start, mapping to the newly 2915 * allocated sg_srr taking first_offset into account. 2916 */ 2917 for_each_sg(sg_srr_start, sg, sg_srr_cnt, i) { 2918 if (first_offset) { 2919 sg_set_page(sgp, sg_page(sg), 2920 (sg->length - first_offset), first_offset); 2921 first_offset = 0; 2922 } else { 2923 sg_set_page(sgp, sg_page(sg), sg->length, 0); 2924 } 2925 bufflen += sgp->length; 2926 2927 sgp = sg_next(sgp); 2928 if (!sgp) 2929 break; 2930 } 2931 2932 cmd->sg = sg_srr; 2933 cmd->sg_cnt = sg_srr_cnt; 2934 cmd->bufflen = bufflen; 2935 cmd->offset += offset; 2936 cmd->free_sg = 1; 2937 2938 ql_dbg(ql_dbg_tgt, cmd->vha, 0xe026, "New cmd->sg: %p\n", cmd->sg); 2939 ql_dbg(ql_dbg_tgt, cmd->vha, 0xe027, "New cmd->sg_cnt: %u\n", 2940 cmd->sg_cnt); 2941 ql_dbg(ql_dbg_tgt, cmd->vha, 0xe028, "New cmd->bufflen: %u\n", 2942 cmd->bufflen); 2943 ql_dbg(ql_dbg_tgt, cmd->vha, 0xe029, "New cmd->offset: %u\n", 2944 cmd->offset); 2945 2946 if (cmd->sg_cnt < 0) 2947 BUG(); 2948 2949 if (cmd->bufflen < 0) 2950 BUG(); 2951 2952 return 0; 2953 } 2954 2955 static inline int qlt_srr_adjust_data(struct qla_tgt_cmd *cmd, 2956 uint32_t srr_rel_offs, int *xmit_type) 2957 { 2958 int res = 0, rel_offs; 2959 2960 rel_offs = srr_rel_offs - cmd->offset; 2961 ql_dbg(ql_dbg_tgt_mgt, cmd->vha, 0xf027, "srr_rel_offs=%d, rel_offs=%d", 2962 srr_rel_offs, rel_offs); 2963 2964 *xmit_type = QLA_TGT_XMIT_ALL; 2965 2966 if (rel_offs < 0) { 2967 ql_dbg(ql_dbg_tgt_mgt, cmd->vha, 0xf062, 2968 "qla_target(%d): SRR rel_offs (%d) < 0", 2969 cmd->vha->vp_idx, rel_offs); 2970 res = -1; 2971 } else if (rel_offs == cmd->bufflen) 2972 *xmit_type = QLA_TGT_XMIT_STATUS; 2973 else if (rel_offs > 0) 2974 res = qlt_set_data_offset(cmd, rel_offs); 2975 2976 return res; 2977 } 2978 2979 /* No locks, thread context */ 2980 static void qlt_handle_srr(struct scsi_qla_host *vha, 2981 struct qla_tgt_srr_ctio *sctio, struct qla_tgt_srr_imm *imm) 2982 { 2983 struct imm_ntfy_from_isp *ntfy = 2984 (struct imm_ntfy_from_isp *)&imm->imm_ntfy; 2985 struct qla_hw_data *ha = vha->hw; 2986 struct qla_tgt_cmd *cmd = sctio->cmd; 2987 struct se_cmd *se_cmd = &cmd->se_cmd; 2988 unsigned long flags; 2989 int xmit_type = 0, resp = 0; 2990 uint32_t offset; 2991 uint16_t srr_ui; 2992 2993 offset = le32_to_cpu(ntfy->u.isp24.srr_rel_offs); 2994 srr_ui = ntfy->u.isp24.srr_ui; 2995 2996 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf028, "SRR cmd %p, srr_ui %x\n", 2997 cmd, srr_ui); 2998 2999 switch (srr_ui) { 3000 case SRR_IU_STATUS: 3001 spin_lock_irqsave(&ha->hardware_lock, flags); 3002 qlt_send_notify_ack(vha, ntfy, 3003 0, 0, 0, NOTIFY_ACK_SRR_FLAGS_ACCEPT, 0, 0); 3004 spin_unlock_irqrestore(&ha->hardware_lock, flags); 3005 xmit_type = QLA_TGT_XMIT_STATUS; 3006 resp = 1; 3007 break; 3008 case SRR_IU_DATA_IN: 3009 if (!cmd->sg || !cmd->sg_cnt) { 3010 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf063, 3011 "Unable to process SRR_IU_DATA_IN due to" 3012 " missing cmd->sg, state: %d\n", cmd->state); 3013 dump_stack(); 3014 goto out_reject; 3015 } 3016 if (se_cmd->scsi_status != 0) { 3017 ql_dbg(ql_dbg_tgt, vha, 0xe02a, 3018 "Rejecting SRR_IU_DATA_IN with non GOOD " 3019 "scsi_status\n"); 3020 goto out_reject; 3021 } 3022 cmd->bufflen = se_cmd->data_length; 3023 3024 if (qlt_has_data(cmd)) { 3025 if (qlt_srr_adjust_data(cmd, offset, &xmit_type) != 0) 3026 goto out_reject; 3027 spin_lock_irqsave(&ha->hardware_lock, flags); 3028 qlt_send_notify_ack(vha, ntfy, 3029 0, 0, 0, NOTIFY_ACK_SRR_FLAGS_ACCEPT, 0, 0); 3030 spin_unlock_irqrestore(&ha->hardware_lock, flags); 3031 resp = 1; 3032 } else { 3033 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf064, 3034 "qla_target(%d): SRR for in data for cmd " 3035 "without them (tag %d, SCSI status %d), " 3036 "reject", vha->vp_idx, cmd->tag, 3037 cmd->se_cmd.scsi_status); 3038 goto out_reject; 3039 } 3040 break; 3041 case SRR_IU_DATA_OUT: 3042 if (!cmd->sg || !cmd->sg_cnt) { 3043 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf065, 3044 "Unable to process SRR_IU_DATA_OUT due to" 3045 " missing cmd->sg\n"); 3046 dump_stack(); 3047 goto out_reject; 3048 } 3049 if (se_cmd->scsi_status != 0) { 3050 ql_dbg(ql_dbg_tgt, vha, 0xe02b, 3051 "Rejecting SRR_IU_DATA_OUT" 3052 " with non GOOD scsi_status\n"); 3053 goto out_reject; 3054 } 3055 cmd->bufflen = se_cmd->data_length; 3056 3057 if (qlt_has_data(cmd)) { 3058 if (qlt_srr_adjust_data(cmd, offset, &xmit_type) != 0) 3059 goto out_reject; 3060 spin_lock_irqsave(&ha->hardware_lock, flags); 3061 qlt_send_notify_ack(vha, ntfy, 3062 0, 0, 0, NOTIFY_ACK_SRR_FLAGS_ACCEPT, 0, 0); 3063 spin_unlock_irqrestore(&ha->hardware_lock, flags); 3064 if (xmit_type & QLA_TGT_XMIT_DATA) 3065 qlt_rdy_to_xfer(cmd); 3066 } else { 3067 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf066, 3068 "qla_target(%d): SRR for out data for cmd " 3069 "without them (tag %d, SCSI status %d), " 3070 "reject", vha->vp_idx, cmd->tag, 3071 cmd->se_cmd.scsi_status); 3072 goto out_reject; 3073 } 3074 break; 3075 default: 3076 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf067, 3077 "qla_target(%d): Unknown srr_ui value %x", 3078 vha->vp_idx, srr_ui); 3079 goto out_reject; 3080 } 3081 3082 /* Transmit response in case of status and data-in cases */ 3083 if (resp) 3084 qlt_xmit_response(cmd, xmit_type, se_cmd->scsi_status); 3085 3086 return; 3087 3088 out_reject: 3089 spin_lock_irqsave(&ha->hardware_lock, flags); 3090 qlt_send_notify_ack(vha, ntfy, 0, 0, 0, 3091 NOTIFY_ACK_SRR_FLAGS_REJECT, 3092 NOTIFY_ACK_SRR_REJECT_REASON_UNABLE_TO_PERFORM, 3093 NOTIFY_ACK_SRR_FLAGS_REJECT_EXPL_NO_EXPL); 3094 if (cmd->state == QLA_TGT_STATE_NEED_DATA) { 3095 cmd->state = QLA_TGT_STATE_DATA_IN; 3096 dump_stack(); 3097 } else 3098 qlt_send_term_exchange(vha, cmd, &cmd->atio, 1); 3099 spin_unlock_irqrestore(&ha->hardware_lock, flags); 3100 } 3101 3102 static void qlt_reject_free_srr_imm(struct scsi_qla_host *vha, 3103 struct qla_tgt_srr_imm *imm, int ha_locked) 3104 { 3105 struct qla_hw_data *ha = vha->hw; 3106 unsigned long flags = 0; 3107 3108 if (!ha_locked) 3109 spin_lock_irqsave(&ha->hardware_lock, flags); 3110 3111 qlt_send_notify_ack(vha, (void *)&imm->imm_ntfy, 0, 0, 0, 3112 NOTIFY_ACK_SRR_FLAGS_REJECT, 3113 NOTIFY_ACK_SRR_REJECT_REASON_UNABLE_TO_PERFORM, 3114 NOTIFY_ACK_SRR_FLAGS_REJECT_EXPL_NO_EXPL); 3115 3116 if (!ha_locked) 3117 spin_unlock_irqrestore(&ha->hardware_lock, flags); 3118 3119 kfree(imm); 3120 } 3121 3122 static void qlt_handle_srr_work(struct work_struct *work) 3123 { 3124 struct qla_tgt *tgt = container_of(work, struct qla_tgt, srr_work); 3125 struct scsi_qla_host *vha = tgt->vha; 3126 struct qla_tgt_srr_ctio *sctio; 3127 unsigned long flags; 3128 3129 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf029, "Entering SRR work (tgt %p)\n", 3130 tgt); 3131 3132 restart: 3133 spin_lock_irqsave(&tgt->srr_lock, flags); 3134 list_for_each_entry(sctio, &tgt->srr_ctio_list, srr_list_entry) { 3135 struct qla_tgt_srr_imm *imm, *i, *ti; 3136 struct qla_tgt_cmd *cmd; 3137 struct se_cmd *se_cmd; 3138 3139 imm = NULL; 3140 list_for_each_entry_safe(i, ti, &tgt->srr_imm_list, 3141 srr_list_entry) { 3142 if (i->srr_id == sctio->srr_id) { 3143 list_del(&i->srr_list_entry); 3144 if (imm) { 3145 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf068, 3146 "qla_target(%d): There must be " 3147 "only one IMM SRR per CTIO SRR " 3148 "(IMM SRR %p, id %d, CTIO %p\n", 3149 vha->vp_idx, i, i->srr_id, sctio); 3150 qlt_reject_free_srr_imm(tgt->vha, i, 0); 3151 } else 3152 imm = i; 3153 } 3154 } 3155 3156 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf02a, 3157 "IMM SRR %p, CTIO SRR %p (id %d)\n", imm, sctio, 3158 sctio->srr_id); 3159 3160 if (imm == NULL) { 3161 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf02b, 3162 "Not found matching IMM for SRR CTIO (id %d)\n", 3163 sctio->srr_id); 3164 continue; 3165 } else 3166 list_del(&sctio->srr_list_entry); 3167 3168 spin_unlock_irqrestore(&tgt->srr_lock, flags); 3169 3170 cmd = sctio->cmd; 3171 /* 3172 * Reset qla_tgt_cmd SRR values and SGL pointer+count to follow 3173 * tcm_qla2xxx_write_pending() and tcm_qla2xxx_queue_data_in() 3174 * logic.. 3175 */ 3176 cmd->offset = 0; 3177 if (cmd->free_sg) { 3178 kfree(cmd->sg); 3179 cmd->sg = NULL; 3180 cmd->free_sg = 0; 3181 } 3182 se_cmd = &cmd->se_cmd; 3183 3184 cmd->sg_cnt = se_cmd->t_data_nents; 3185 cmd->sg = se_cmd->t_data_sg; 3186 3187 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf02c, 3188 "SRR cmd %p (se_cmd %p, tag %d, op %x), " 3189 "sg_cnt=%d, offset=%d", cmd, &cmd->se_cmd, cmd->tag, 3190 se_cmd->t_task_cdb[0], cmd->sg_cnt, cmd->offset); 3191 3192 qlt_handle_srr(vha, sctio, imm); 3193 3194 kfree(imm); 3195 kfree(sctio); 3196 goto restart; 3197 } 3198 spin_unlock_irqrestore(&tgt->srr_lock, flags); 3199 } 3200 3201 /* ha->hardware_lock supposed to be held on entry */ 3202 static void qlt_prepare_srr_imm(struct scsi_qla_host *vha, 3203 struct imm_ntfy_from_isp *iocb) 3204 { 3205 struct qla_tgt_srr_imm *imm; 3206 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; 3207 struct qla_tgt_srr_ctio *sctio; 3208 3209 tgt->imm_srr_id++; 3210 3211 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf02d, "qla_target(%d): SRR received\n", 3212 vha->vp_idx); 3213 3214 imm = kzalloc(sizeof(*imm), GFP_ATOMIC); 3215 if (imm != NULL) { 3216 memcpy(&imm->imm_ntfy, iocb, sizeof(imm->imm_ntfy)); 3217 3218 /* IRQ is already OFF */ 3219 spin_lock(&tgt->srr_lock); 3220 imm->srr_id = tgt->imm_srr_id; 3221 list_add_tail(&imm->srr_list_entry, 3222 &tgt->srr_imm_list); 3223 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf02e, 3224 "IMM NTFY SRR %p added (id %d, ui %x)\n", 3225 imm, imm->srr_id, iocb->u.isp24.srr_ui); 3226 if (tgt->imm_srr_id == tgt->ctio_srr_id) { 3227 int found = 0; 3228 list_for_each_entry(sctio, &tgt->srr_ctio_list, 3229 srr_list_entry) { 3230 if (sctio->srr_id == imm->srr_id) { 3231 found = 1; 3232 break; 3233 } 3234 } 3235 if (found) { 3236 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf02f, "%s", 3237 "Scheduling srr work\n"); 3238 schedule_work(&tgt->srr_work); 3239 } else { 3240 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf030, 3241 "qla_target(%d): imm_srr_id " 3242 "== ctio_srr_id (%d), but there is no " 3243 "corresponding SRR CTIO, deleting IMM " 3244 "SRR %p\n", vha->vp_idx, tgt->ctio_srr_id, 3245 imm); 3246 list_del(&imm->srr_list_entry); 3247 3248 kfree(imm); 3249 3250 spin_unlock(&tgt->srr_lock); 3251 goto out_reject; 3252 } 3253 } 3254 spin_unlock(&tgt->srr_lock); 3255 } else { 3256 struct qla_tgt_srr_ctio *ts; 3257 3258 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf069, 3259 "qla_target(%d): Unable to allocate SRR IMM " 3260 "entry, SRR request will be rejected\n", vha->vp_idx); 3261 3262 /* IRQ is already OFF */ 3263 spin_lock(&tgt->srr_lock); 3264 list_for_each_entry_safe(sctio, ts, &tgt->srr_ctio_list, 3265 srr_list_entry) { 3266 if (sctio->srr_id == tgt->imm_srr_id) { 3267 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf031, 3268 "CTIO SRR %p deleted (id %d)\n", 3269 sctio, sctio->srr_id); 3270 list_del(&sctio->srr_list_entry); 3271 qlt_send_term_exchange(vha, sctio->cmd, 3272 &sctio->cmd->atio, 1); 3273 kfree(sctio); 3274 } 3275 } 3276 spin_unlock(&tgt->srr_lock); 3277 goto out_reject; 3278 } 3279 3280 return; 3281 3282 out_reject: 3283 qlt_send_notify_ack(vha, iocb, 0, 0, 0, 3284 NOTIFY_ACK_SRR_FLAGS_REJECT, 3285 NOTIFY_ACK_SRR_REJECT_REASON_UNABLE_TO_PERFORM, 3286 NOTIFY_ACK_SRR_FLAGS_REJECT_EXPL_NO_EXPL); 3287 } 3288 3289 /* 3290 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire 3291 */ 3292 static void qlt_handle_imm_notify(struct scsi_qla_host *vha, 3293 struct imm_ntfy_from_isp *iocb) 3294 { 3295 struct qla_hw_data *ha = vha->hw; 3296 uint32_t add_flags = 0; 3297 int send_notify_ack = 1; 3298 uint16_t status; 3299 3300 status = le16_to_cpu(iocb->u.isp2x.status); 3301 switch (status) { 3302 case IMM_NTFY_LIP_RESET: 3303 { 3304 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf032, 3305 "qla_target(%d): LIP reset (loop %#x), subcode %x\n", 3306 vha->vp_idx, le16_to_cpu(iocb->u.isp24.nport_handle), 3307 iocb->u.isp24.status_subcode); 3308 3309 if (qlt_reset(vha, iocb, QLA_TGT_ABORT_ALL) == 0) 3310 send_notify_ack = 0; 3311 break; 3312 } 3313 3314 case IMM_NTFY_LIP_LINK_REINIT: 3315 { 3316 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; 3317 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf033, 3318 "qla_target(%d): LINK REINIT (loop %#x, " 3319 "subcode %x)\n", vha->vp_idx, 3320 le16_to_cpu(iocb->u.isp24.nport_handle), 3321 iocb->u.isp24.status_subcode); 3322 if (tgt->link_reinit_iocb_pending) { 3323 qlt_send_notify_ack(vha, &tgt->link_reinit_iocb, 3324 0, 0, 0, 0, 0, 0); 3325 } 3326 memcpy(&tgt->link_reinit_iocb, iocb, sizeof(*iocb)); 3327 tgt->link_reinit_iocb_pending = 1; 3328 /* 3329 * QLogic requires to wait after LINK REINIT for possible 3330 * PDISC or ADISC ELS commands 3331 */ 3332 send_notify_ack = 0; 3333 break; 3334 } 3335 3336 case IMM_NTFY_PORT_LOGOUT: 3337 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf034, 3338 "qla_target(%d): Port logout (loop " 3339 "%#x, subcode %x)\n", vha->vp_idx, 3340 le16_to_cpu(iocb->u.isp24.nport_handle), 3341 iocb->u.isp24.status_subcode); 3342 3343 if (qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS_SESS) == 0) 3344 send_notify_ack = 0; 3345 /* The sessions will be cleared in the callback, if needed */ 3346 break; 3347 3348 case IMM_NTFY_GLBL_TPRLO: 3349 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf035, 3350 "qla_target(%d): Global TPRLO (%x)\n", vha->vp_idx, status); 3351 if (qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS) == 0) 3352 send_notify_ack = 0; 3353 /* The sessions will be cleared in the callback, if needed */ 3354 break; 3355 3356 case IMM_NTFY_PORT_CONFIG: 3357 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf036, 3358 "qla_target(%d): Port config changed (%x)\n", vha->vp_idx, 3359 status); 3360 if (qlt_reset(vha, iocb, QLA_TGT_ABORT_ALL) == 0) 3361 send_notify_ack = 0; 3362 /* The sessions will be cleared in the callback, if needed */ 3363 break; 3364 3365 case IMM_NTFY_GLBL_LOGO: 3366 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06a, 3367 "qla_target(%d): Link failure detected\n", 3368 vha->vp_idx); 3369 /* I_T nexus loss */ 3370 if (qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS) == 0) 3371 send_notify_ack = 0; 3372 break; 3373 3374 case IMM_NTFY_IOCB_OVERFLOW: 3375 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06b, 3376 "qla_target(%d): Cannot provide requested " 3377 "capability (IOCB overflowed the immediate notify " 3378 "resource count)\n", vha->vp_idx); 3379 break; 3380 3381 case IMM_NTFY_ABORT_TASK: 3382 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf037, 3383 "qla_target(%d): Abort Task (S %08x I %#x -> " 3384 "L %#x)\n", vha->vp_idx, 3385 le16_to_cpu(iocb->u.isp2x.seq_id), 3386 GET_TARGET_ID(ha, (struct atio_from_isp *)iocb), 3387 le16_to_cpu(iocb->u.isp2x.lun)); 3388 if (qlt_abort_task(vha, iocb) == 0) 3389 send_notify_ack = 0; 3390 break; 3391 3392 case IMM_NTFY_RESOURCE: 3393 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06c, 3394 "qla_target(%d): Out of resources, host %ld\n", 3395 vha->vp_idx, vha->host_no); 3396 break; 3397 3398 case IMM_NTFY_MSG_RX: 3399 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf038, 3400 "qla_target(%d): Immediate notify task %x\n", 3401 vha->vp_idx, iocb->u.isp2x.task_flags); 3402 if (qlt_handle_task_mgmt(vha, iocb) == 0) 3403 send_notify_ack = 0; 3404 break; 3405 3406 case IMM_NTFY_ELS: 3407 if (qlt_24xx_handle_els(vha, iocb) == 0) 3408 send_notify_ack = 0; 3409 break; 3410 3411 case IMM_NTFY_SRR: 3412 qlt_prepare_srr_imm(vha, iocb); 3413 send_notify_ack = 0; 3414 break; 3415 3416 default: 3417 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06d, 3418 "qla_target(%d): Received unknown immediate " 3419 "notify status %x\n", vha->vp_idx, status); 3420 break; 3421 } 3422 3423 if (send_notify_ack) 3424 qlt_send_notify_ack(vha, iocb, add_flags, 0, 0, 0, 0, 0); 3425 } 3426 3427 /* 3428 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire 3429 * This function sends busy to ISP 2xxx or 24xx. 3430 */ 3431 static void qlt_send_busy(struct scsi_qla_host *vha, 3432 struct atio_from_isp *atio, uint16_t status) 3433 { 3434 struct ctio7_to_24xx *ctio24; 3435 struct qla_hw_data *ha = vha->hw; 3436 request_t *pkt; 3437 struct qla_tgt_sess *sess = NULL; 3438 3439 sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, 3440 atio->u.isp24.fcp_hdr.s_id); 3441 if (!sess) { 3442 qlt_send_term_exchange(vha, NULL, atio, 1); 3443 return; 3444 } 3445 /* Sending marker isn't necessary, since we called from ISR */ 3446 3447 pkt = (request_t *)qla2x00_alloc_iocbs(vha, NULL); 3448 if (!pkt) { 3449 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06e, 3450 "qla_target(%d): %s failed: unable to allocate " 3451 "request packet", vha->vp_idx, __func__); 3452 return; 3453 } 3454 3455 pkt->entry_count = 1; 3456 pkt->handle = QLA_TGT_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK; 3457 3458 ctio24 = (struct ctio7_to_24xx *)pkt; 3459 ctio24->entry_type = CTIO_TYPE7; 3460 ctio24->nport_handle = sess->loop_id; 3461 ctio24->timeout = __constant_cpu_to_le16(QLA_TGT_TIMEOUT); 3462 ctio24->vp_index = vha->vp_idx; 3463 ctio24->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2]; 3464 ctio24->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1]; 3465 ctio24->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0]; 3466 ctio24->exchange_addr = atio->u.isp24.exchange_addr; 3467 ctio24->u.status1.flags = (atio->u.isp24.attr << 9) | 3468 __constant_cpu_to_le16( 3469 CTIO7_FLAGS_STATUS_MODE_1 | CTIO7_FLAGS_SEND_STATUS | 3470 CTIO7_FLAGS_DONT_RET_CTIO); 3471 /* 3472 * CTIO from fw w/o se_cmd doesn't provide enough info to retry it, 3473 * if the explicit conformation is used. 3474 */ 3475 ctio24->u.status1.ox_id = swab16(atio->u.isp24.fcp_hdr.ox_id); 3476 ctio24->u.status1.scsi_status = cpu_to_le16(status); 3477 ctio24->u.status1.residual = get_unaligned((uint32_t *) 3478 &atio->u.isp24.fcp_cmnd.add_cdb[ 3479 atio->u.isp24.fcp_cmnd.add_cdb_len]); 3480 if (ctio24->u.status1.residual != 0) 3481 ctio24->u.status1.scsi_status |= SS_RESIDUAL_UNDER; 3482 3483 qla2x00_start_iocbs(vha, vha->req); 3484 } 3485 3486 /* ha->hardware_lock supposed to be held on entry */ 3487 /* called via callback from qla2xxx */ 3488 static void qlt_24xx_atio_pkt(struct scsi_qla_host *vha, 3489 struct atio_from_isp *atio) 3490 { 3491 struct qla_hw_data *ha = vha->hw; 3492 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; 3493 int rc; 3494 3495 if (unlikely(tgt == NULL)) { 3496 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf039, 3497 "ATIO pkt, but no tgt (ha %p)", ha); 3498 return; 3499 } 3500 ql_dbg(ql_dbg_tgt, vha, 0xe02c, 3501 "qla_target(%d): ATIO pkt %p: type %02x count %02x", 3502 vha->vp_idx, atio, atio->u.raw.entry_type, 3503 atio->u.raw.entry_count); 3504 /* 3505 * In tgt_stop mode we also should allow all requests to pass. 3506 * Otherwise, some commands can stuck. 3507 */ 3508 3509 tgt->irq_cmd_count++; 3510 3511 switch (atio->u.raw.entry_type) { 3512 case ATIO_TYPE7: 3513 ql_dbg(ql_dbg_tgt, vha, 0xe02d, 3514 "ATIO_TYPE7 instance %d, lun %Lx, read/write %d/%d, " 3515 "add_cdb_len %d, data_length %04x, s_id %x:%x:%x\n", 3516 vha->vp_idx, atio->u.isp24.fcp_cmnd.lun, 3517 atio->u.isp24.fcp_cmnd.rddata, 3518 atio->u.isp24.fcp_cmnd.wrdata, 3519 atio->u.isp24.fcp_cmnd.add_cdb_len, 3520 be32_to_cpu(get_unaligned((uint32_t *) 3521 &atio->u.isp24.fcp_cmnd.add_cdb[ 3522 atio->u.isp24.fcp_cmnd.add_cdb_len])), 3523 atio->u.isp24.fcp_hdr.s_id[0], 3524 atio->u.isp24.fcp_hdr.s_id[1], 3525 atio->u.isp24.fcp_hdr.s_id[2]); 3526 3527 if (unlikely(atio->u.isp24.exchange_addr == 3528 ATIO_EXCHANGE_ADDRESS_UNKNOWN)) { 3529 ql_dbg(ql_dbg_tgt, vha, 0xe058, 3530 "qla_target(%d): ATIO_TYPE7 " 3531 "received with UNKNOWN exchange address, " 3532 "sending QUEUE_FULL\n", vha->vp_idx); 3533 qlt_send_busy(vha, atio, SAM_STAT_TASK_SET_FULL); 3534 break; 3535 } 3536 if (likely(atio->u.isp24.fcp_cmnd.task_mgmt_flags == 0)) 3537 rc = qlt_handle_cmd_for_atio(vha, atio); 3538 else 3539 rc = qlt_handle_task_mgmt(vha, atio); 3540 if (unlikely(rc != 0)) { 3541 if (rc == -ESRCH) { 3542 #if 1 /* With TERM EXCHANGE some FC cards refuse to boot */ 3543 qlt_send_busy(vha, atio, SAM_STAT_BUSY); 3544 #else 3545 qlt_send_term_exchange(vha, NULL, atio, 1); 3546 #endif 3547 } else { 3548 if (tgt->tgt_stop) { 3549 ql_dbg(ql_dbg_tgt, vha, 0xe059, 3550 "qla_target: Unable to send " 3551 "command to target for req, " 3552 "ignoring.\n"); 3553 } else { 3554 ql_dbg(ql_dbg_tgt, vha, 0xe05a, 3555 "qla_target(%d): Unable to send " 3556 "command to target, sending BUSY " 3557 "status.\n", vha->vp_idx); 3558 qlt_send_busy(vha, atio, SAM_STAT_BUSY); 3559 } 3560 } 3561 } 3562 break; 3563 3564 case IMMED_NOTIFY_TYPE: 3565 { 3566 if (unlikely(atio->u.isp2x.entry_status != 0)) { 3567 ql_dbg(ql_dbg_tgt, vha, 0xe05b, 3568 "qla_target(%d): Received ATIO packet %x " 3569 "with error status %x\n", vha->vp_idx, 3570 atio->u.raw.entry_type, 3571 atio->u.isp2x.entry_status); 3572 break; 3573 } 3574 ql_dbg(ql_dbg_tgt, vha, 0xe02e, "%s", "IMMED_NOTIFY ATIO"); 3575 qlt_handle_imm_notify(vha, (struct imm_ntfy_from_isp *)atio); 3576 break; 3577 } 3578 3579 default: 3580 ql_dbg(ql_dbg_tgt, vha, 0xe05c, 3581 "qla_target(%d): Received unknown ATIO atio " 3582 "type %x\n", vha->vp_idx, atio->u.raw.entry_type); 3583 break; 3584 } 3585 3586 tgt->irq_cmd_count--; 3587 } 3588 3589 /* ha->hardware_lock supposed to be held on entry */ 3590 /* called via callback from qla2xxx */ 3591 static void qlt_response_pkt(struct scsi_qla_host *vha, response_t *pkt) 3592 { 3593 struct qla_hw_data *ha = vha->hw; 3594 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; 3595 3596 if (unlikely(tgt == NULL)) { 3597 ql_dbg(ql_dbg_tgt, vha, 0xe05d, 3598 "qla_target(%d): Response pkt %x received, but no " 3599 "tgt (ha %p)\n", vha->vp_idx, pkt->entry_type, ha); 3600 return; 3601 } 3602 3603 ql_dbg(ql_dbg_tgt, vha, 0xe02f, 3604 "qla_target(%d): response pkt %p: T %02x C %02x S %02x " 3605 "handle %#x\n", vha->vp_idx, pkt, pkt->entry_type, 3606 pkt->entry_count, pkt->entry_status, pkt->handle); 3607 3608 /* 3609 * In tgt_stop mode we also should allow all requests to pass. 3610 * Otherwise, some commands can stuck. 3611 */ 3612 3613 tgt->irq_cmd_count++; 3614 3615 switch (pkt->entry_type) { 3616 case CTIO_TYPE7: 3617 { 3618 struct ctio7_from_24xx *entry = (struct ctio7_from_24xx *)pkt; 3619 ql_dbg(ql_dbg_tgt, vha, 0xe030, "CTIO_TYPE7: instance %d\n", 3620 vha->vp_idx); 3621 qlt_do_ctio_completion(vha, entry->handle, 3622 le16_to_cpu(entry->status)|(pkt->entry_status << 16), 3623 entry); 3624 break; 3625 } 3626 3627 case ACCEPT_TGT_IO_TYPE: 3628 { 3629 struct atio_from_isp *atio = (struct atio_from_isp *)pkt; 3630 int rc; 3631 ql_dbg(ql_dbg_tgt, vha, 0xe031, 3632 "ACCEPT_TGT_IO instance %d status %04x " 3633 "lun %04x read/write %d data_length %04x " 3634 "target_id %02x rx_id %04x\n ", vha->vp_idx, 3635 le16_to_cpu(atio->u.isp2x.status), 3636 le16_to_cpu(atio->u.isp2x.lun), 3637 atio->u.isp2x.execution_codes, 3638 le32_to_cpu(atio->u.isp2x.data_length), GET_TARGET_ID(ha, 3639 atio), atio->u.isp2x.rx_id); 3640 if (atio->u.isp2x.status != 3641 __constant_cpu_to_le16(ATIO_CDB_VALID)) { 3642 ql_dbg(ql_dbg_tgt, vha, 0xe05e, 3643 "qla_target(%d): ATIO with error " 3644 "status %x received\n", vha->vp_idx, 3645 le16_to_cpu(atio->u.isp2x.status)); 3646 break; 3647 } 3648 ql_dbg(ql_dbg_tgt, vha, 0xe032, 3649 "FCP CDB: 0x%02x, sizeof(cdb): %lu", 3650 atio->u.isp2x.cdb[0], (unsigned long 3651 int)sizeof(atio->u.isp2x.cdb)); 3652 3653 rc = qlt_handle_cmd_for_atio(vha, atio); 3654 if (unlikely(rc != 0)) { 3655 if (rc == -ESRCH) { 3656 #if 1 /* With TERM EXCHANGE some FC cards refuse to boot */ 3657 qlt_send_busy(vha, atio, 0); 3658 #else 3659 qlt_send_term_exchange(vha, NULL, atio, 1); 3660 #endif 3661 } else { 3662 if (tgt->tgt_stop) { 3663 ql_dbg(ql_dbg_tgt, vha, 0xe05f, 3664 "qla_target: Unable to send " 3665 "command to target, sending TERM " 3666 "EXCHANGE for rsp\n"); 3667 qlt_send_term_exchange(vha, NULL, 3668 atio, 1); 3669 } else { 3670 ql_dbg(ql_dbg_tgt, vha, 0xe060, 3671 "qla_target(%d): Unable to send " 3672 "command to target, sending BUSY " 3673 "status\n", vha->vp_idx); 3674 qlt_send_busy(vha, atio, 0); 3675 } 3676 } 3677 } 3678 } 3679 break; 3680 3681 case CONTINUE_TGT_IO_TYPE: 3682 { 3683 struct ctio_to_2xxx *entry = (struct ctio_to_2xxx *)pkt; 3684 ql_dbg(ql_dbg_tgt, vha, 0xe033, 3685 "CONTINUE_TGT_IO: instance %d\n", vha->vp_idx); 3686 qlt_do_ctio_completion(vha, entry->handle, 3687 le16_to_cpu(entry->status)|(pkt->entry_status << 16), 3688 entry); 3689 break; 3690 } 3691 3692 case CTIO_A64_TYPE: 3693 { 3694 struct ctio_to_2xxx *entry = (struct ctio_to_2xxx *)pkt; 3695 ql_dbg(ql_dbg_tgt, vha, 0xe034, "CTIO_A64: instance %d\n", 3696 vha->vp_idx); 3697 qlt_do_ctio_completion(vha, entry->handle, 3698 le16_to_cpu(entry->status)|(pkt->entry_status << 16), 3699 entry); 3700 break; 3701 } 3702 3703 case IMMED_NOTIFY_TYPE: 3704 ql_dbg(ql_dbg_tgt, vha, 0xe035, "%s", "IMMED_NOTIFY\n"); 3705 qlt_handle_imm_notify(vha, (struct imm_ntfy_from_isp *)pkt); 3706 break; 3707 3708 case NOTIFY_ACK_TYPE: 3709 if (tgt->notify_ack_expected > 0) { 3710 struct nack_to_isp *entry = (struct nack_to_isp *)pkt; 3711 ql_dbg(ql_dbg_tgt, vha, 0xe036, 3712 "NOTIFY_ACK seq %08x status %x\n", 3713 le16_to_cpu(entry->u.isp2x.seq_id), 3714 le16_to_cpu(entry->u.isp2x.status)); 3715 tgt->notify_ack_expected--; 3716 if (entry->u.isp2x.status != 3717 __constant_cpu_to_le16(NOTIFY_ACK_SUCCESS)) { 3718 ql_dbg(ql_dbg_tgt, vha, 0xe061, 3719 "qla_target(%d): NOTIFY_ACK " 3720 "failed %x\n", vha->vp_idx, 3721 le16_to_cpu(entry->u.isp2x.status)); 3722 } 3723 } else { 3724 ql_dbg(ql_dbg_tgt, vha, 0xe062, 3725 "qla_target(%d): Unexpected NOTIFY_ACK received\n", 3726 vha->vp_idx); 3727 } 3728 break; 3729 3730 case ABTS_RECV_24XX: 3731 ql_dbg(ql_dbg_tgt, vha, 0xe037, 3732 "ABTS_RECV_24XX: instance %d\n", vha->vp_idx); 3733 qlt_24xx_handle_abts(vha, (struct abts_recv_from_24xx *)pkt); 3734 break; 3735 3736 case ABTS_RESP_24XX: 3737 if (tgt->abts_resp_expected > 0) { 3738 struct abts_resp_from_24xx_fw *entry = 3739 (struct abts_resp_from_24xx_fw *)pkt; 3740 ql_dbg(ql_dbg_tgt, vha, 0xe038, 3741 "ABTS_RESP_24XX: compl_status %x\n", 3742 entry->compl_status); 3743 tgt->abts_resp_expected--; 3744 if (le16_to_cpu(entry->compl_status) != 3745 ABTS_RESP_COMPL_SUCCESS) { 3746 if ((entry->error_subcode1 == 0x1E) && 3747 (entry->error_subcode2 == 0)) { 3748 /* 3749 * We've got a race here: aborted 3750 * exchange not terminated, i.e. 3751 * response for the aborted command was 3752 * sent between the abort request was 3753 * received and processed. 3754 * Unfortunately, the firmware has a 3755 * silly requirement that all aborted 3756 * exchanges must be explicitely 3757 * terminated, otherwise it refuses to 3758 * send responses for the abort 3759 * requests. So, we have to 3760 * (re)terminate the exchange and retry 3761 * the abort response. 3762 */ 3763 qlt_24xx_retry_term_exchange(vha, 3764 entry); 3765 } else 3766 ql_dbg(ql_dbg_tgt, vha, 0xe063, 3767 "qla_target(%d): ABTS_RESP_24XX " 3768 "failed %x (subcode %x:%x)", 3769 vha->vp_idx, entry->compl_status, 3770 entry->error_subcode1, 3771 entry->error_subcode2); 3772 } 3773 } else { 3774 ql_dbg(ql_dbg_tgt, vha, 0xe064, 3775 "qla_target(%d): Unexpected ABTS_RESP_24XX " 3776 "received\n", vha->vp_idx); 3777 } 3778 break; 3779 3780 default: 3781 ql_dbg(ql_dbg_tgt, vha, 0xe065, 3782 "qla_target(%d): Received unknown response pkt " 3783 "type %x\n", vha->vp_idx, pkt->entry_type); 3784 break; 3785 } 3786 3787 tgt->irq_cmd_count--; 3788 } 3789 3790 /* 3791 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire 3792 */ 3793 void qlt_async_event(uint16_t code, struct scsi_qla_host *vha, 3794 uint16_t *mailbox) 3795 { 3796 struct qla_hw_data *ha = vha->hw; 3797 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; 3798 int login_code; 3799 3800 ql_dbg(ql_dbg_tgt, vha, 0xe039, 3801 "scsi(%ld): ha state %d init_done %d oper_mode %d topo %d\n", 3802 vha->host_no, atomic_read(&vha->loop_state), vha->flags.init_done, 3803 ha->operating_mode, ha->current_topology); 3804 3805 if (!ha->tgt.tgt_ops) 3806 return; 3807 3808 if (unlikely(tgt == NULL)) { 3809 ql_dbg(ql_dbg_tgt, vha, 0xe03a, 3810 "ASYNC EVENT %#x, but no tgt (ha %p)\n", code, ha); 3811 return; 3812 } 3813 3814 if (((code == MBA_POINT_TO_POINT) || (code == MBA_CHG_IN_CONNECTION)) && 3815 IS_QLA2100(ha)) 3816 return; 3817 /* 3818 * In tgt_stop mode we also should allow all requests to pass. 3819 * Otherwise, some commands can stuck. 3820 */ 3821 3822 tgt->irq_cmd_count++; 3823 3824 switch (code) { 3825 case MBA_RESET: /* Reset */ 3826 case MBA_SYSTEM_ERR: /* System Error */ 3827 case MBA_REQ_TRANSFER_ERR: /* Request Transfer Error */ 3828 case MBA_RSP_TRANSFER_ERR: /* Response Transfer Error */ 3829 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03a, 3830 "qla_target(%d): System error async event %#x " 3831 "occurred", vha->vp_idx, code); 3832 break; 3833 case MBA_WAKEUP_THRES: /* Request Queue Wake-up. */ 3834 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 3835 break; 3836 3837 case MBA_LOOP_UP: 3838 { 3839 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03b, 3840 "qla_target(%d): Async LOOP_UP occurred " 3841 "(m[0]=%x, m[1]=%x, m[2]=%x, m[3]=%x)", vha->vp_idx, 3842 le16_to_cpu(mailbox[0]), le16_to_cpu(mailbox[1]), 3843 le16_to_cpu(mailbox[2]), le16_to_cpu(mailbox[3])); 3844 if (tgt->link_reinit_iocb_pending) { 3845 qlt_send_notify_ack(vha, (void *)&tgt->link_reinit_iocb, 3846 0, 0, 0, 0, 0, 0); 3847 tgt->link_reinit_iocb_pending = 0; 3848 } 3849 break; 3850 } 3851 3852 case MBA_LIP_OCCURRED: 3853 case MBA_LOOP_DOWN: 3854 case MBA_LIP_RESET: 3855 case MBA_RSCN_UPDATE: 3856 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03c, 3857 "qla_target(%d): Async event %#x occurred " 3858 "(m[0]=%x, m[1]=%x, m[2]=%x, m[3]=%x)", vha->vp_idx, code, 3859 le16_to_cpu(mailbox[0]), le16_to_cpu(mailbox[1]), 3860 le16_to_cpu(mailbox[2]), le16_to_cpu(mailbox[3])); 3861 break; 3862 3863 case MBA_PORT_UPDATE: 3864 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03d, 3865 "qla_target(%d): Port update async event %#x " 3866 "occurred: updating the ports database (m[0]=%x, m[1]=%x, " 3867 "m[2]=%x, m[3]=%x)", vha->vp_idx, code, 3868 le16_to_cpu(mailbox[0]), le16_to_cpu(mailbox[1]), 3869 le16_to_cpu(mailbox[2]), le16_to_cpu(mailbox[3])); 3870 3871 login_code = le16_to_cpu(mailbox[2]); 3872 if (login_code == 0x4) 3873 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03e, 3874 "Async MB 2: Got PLOGI Complete\n"); 3875 else if (login_code == 0x7) 3876 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03f, 3877 "Async MB 2: Port Logged Out\n"); 3878 break; 3879 3880 default: 3881 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf040, 3882 "qla_target(%d): Async event %#x occurred: " 3883 "ignore (m[0]=%x, m[1]=%x, m[2]=%x, m[3]=%x)", vha->vp_idx, 3884 code, le16_to_cpu(mailbox[0]), le16_to_cpu(mailbox[1]), 3885 le16_to_cpu(mailbox[2]), le16_to_cpu(mailbox[3])); 3886 break; 3887 } 3888 3889 tgt->irq_cmd_count--; 3890 } 3891 3892 static fc_port_t *qlt_get_port_database(struct scsi_qla_host *vha, 3893 uint16_t loop_id) 3894 { 3895 fc_port_t *fcport; 3896 int rc; 3897 3898 fcport = kzalloc(sizeof(*fcport), GFP_KERNEL); 3899 if (!fcport) { 3900 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06f, 3901 "qla_target(%d): Allocation of tmp FC port failed", 3902 vha->vp_idx); 3903 return NULL; 3904 } 3905 3906 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf041, "loop_id %d", loop_id); 3907 3908 fcport->loop_id = loop_id; 3909 3910 rc = qla2x00_get_port_database(vha, fcport, 0); 3911 if (rc != QLA_SUCCESS) { 3912 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf070, 3913 "qla_target(%d): Failed to retrieve fcport " 3914 "information -- get_port_database() returned %x " 3915 "(loop_id=0x%04x)", vha->vp_idx, rc, loop_id); 3916 kfree(fcport); 3917 return NULL; 3918 } 3919 3920 return fcport; 3921 } 3922 3923 /* Must be called under tgt_mutex */ 3924 static struct qla_tgt_sess *qlt_make_local_sess(struct scsi_qla_host *vha, 3925 uint8_t *s_id) 3926 { 3927 struct qla_tgt_sess *sess = NULL; 3928 fc_port_t *fcport = NULL; 3929 int rc, global_resets; 3930 uint16_t loop_id = 0; 3931 3932 retry: 3933 global_resets = 3934 atomic_read(&vha->vha_tgt.qla_tgt->tgt_global_resets_count); 3935 3936 rc = qla24xx_get_loop_id(vha, s_id, &loop_id); 3937 if (rc != 0) { 3938 if ((s_id[0] == 0xFF) && 3939 (s_id[1] == 0xFC)) { 3940 /* 3941 * This is Domain Controller, so it should be 3942 * OK to drop SCSI commands from it. 3943 */ 3944 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf042, 3945 "Unable to find initiator with S_ID %x:%x:%x", 3946 s_id[0], s_id[1], s_id[2]); 3947 } else 3948 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf071, 3949 "qla_target(%d): Unable to find " 3950 "initiator with S_ID %x:%x:%x", 3951 vha->vp_idx, s_id[0], s_id[1], 3952 s_id[2]); 3953 return NULL; 3954 } 3955 3956 fcport = qlt_get_port_database(vha, loop_id); 3957 if (!fcport) 3958 return NULL; 3959 3960 if (global_resets != 3961 atomic_read(&vha->vha_tgt.qla_tgt->tgt_global_resets_count)) { 3962 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf043, 3963 "qla_target(%d): global reset during session discovery " 3964 "(counter was %d, new %d), retrying", vha->vp_idx, 3965 global_resets, 3966 atomic_read(&vha->vha_tgt. 3967 qla_tgt->tgt_global_resets_count)); 3968 goto retry; 3969 } 3970 3971 sess = qlt_create_sess(vha, fcport, true); 3972 3973 kfree(fcport); 3974 return sess; 3975 } 3976 3977 static void qlt_abort_work(struct qla_tgt *tgt, 3978 struct qla_tgt_sess_work_param *prm) 3979 { 3980 struct scsi_qla_host *vha = tgt->vha; 3981 struct qla_hw_data *ha = vha->hw; 3982 struct qla_tgt_sess *sess = NULL; 3983 unsigned long flags; 3984 uint32_t be_s_id; 3985 uint8_t s_id[3]; 3986 int rc; 3987 3988 spin_lock_irqsave(&ha->hardware_lock, flags); 3989 3990 if (tgt->tgt_stop) 3991 goto out_term; 3992 3993 s_id[0] = prm->abts.fcp_hdr_le.s_id[2]; 3994 s_id[1] = prm->abts.fcp_hdr_le.s_id[1]; 3995 s_id[2] = prm->abts.fcp_hdr_le.s_id[0]; 3996 3997 sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, 3998 (unsigned char *)&be_s_id); 3999 if (!sess) { 4000 spin_unlock_irqrestore(&ha->hardware_lock, flags); 4001 4002 mutex_lock(&vha->vha_tgt.tgt_mutex); 4003 sess = qlt_make_local_sess(vha, s_id); 4004 /* sess has got an extra creation ref */ 4005 mutex_unlock(&vha->vha_tgt.tgt_mutex); 4006 4007 spin_lock_irqsave(&ha->hardware_lock, flags); 4008 if (!sess) 4009 goto out_term; 4010 } else { 4011 kref_get(&sess->se_sess->sess_kref); 4012 } 4013 4014 if (tgt->tgt_stop) 4015 goto out_term; 4016 4017 rc = __qlt_24xx_handle_abts(vha, &prm->abts, sess); 4018 if (rc != 0) 4019 goto out_term; 4020 4021 ha->tgt.tgt_ops->put_sess(sess); 4022 spin_unlock_irqrestore(&ha->hardware_lock, flags); 4023 return; 4024 4025 out_term: 4026 qlt_24xx_send_abts_resp(vha, &prm->abts, FCP_TMF_REJECTED, false); 4027 if (sess) 4028 ha->tgt.tgt_ops->put_sess(sess); 4029 spin_unlock_irqrestore(&ha->hardware_lock, flags); 4030 } 4031 4032 static void qlt_tmr_work(struct qla_tgt *tgt, 4033 struct qla_tgt_sess_work_param *prm) 4034 { 4035 struct atio_from_isp *a = &prm->tm_iocb2; 4036 struct scsi_qla_host *vha = tgt->vha; 4037 struct qla_hw_data *ha = vha->hw; 4038 struct qla_tgt_sess *sess = NULL; 4039 unsigned long flags; 4040 uint8_t *s_id = NULL; /* to hide compiler warnings */ 4041 int rc; 4042 uint32_t lun, unpacked_lun; 4043 int lun_size, fn; 4044 void *iocb; 4045 4046 spin_lock_irqsave(&ha->hardware_lock, flags); 4047 4048 if (tgt->tgt_stop) 4049 goto out_term; 4050 4051 s_id = prm->tm_iocb2.u.isp24.fcp_hdr.s_id; 4052 sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, s_id); 4053 if (!sess) { 4054 spin_unlock_irqrestore(&ha->hardware_lock, flags); 4055 4056 mutex_lock(&vha->vha_tgt.tgt_mutex); 4057 sess = qlt_make_local_sess(vha, s_id); 4058 /* sess has got an extra creation ref */ 4059 mutex_unlock(&vha->vha_tgt.tgt_mutex); 4060 4061 spin_lock_irqsave(&ha->hardware_lock, flags); 4062 if (!sess) 4063 goto out_term; 4064 } else { 4065 kref_get(&sess->se_sess->sess_kref); 4066 } 4067 4068 iocb = a; 4069 lun = a->u.isp24.fcp_cmnd.lun; 4070 lun_size = sizeof(lun); 4071 fn = a->u.isp24.fcp_cmnd.task_mgmt_flags; 4072 unpacked_lun = scsilun_to_int((struct scsi_lun *)&lun); 4073 4074 rc = qlt_issue_task_mgmt(sess, unpacked_lun, fn, iocb, 0); 4075 if (rc != 0) 4076 goto out_term; 4077 4078 ha->tgt.tgt_ops->put_sess(sess); 4079 spin_unlock_irqrestore(&ha->hardware_lock, flags); 4080 return; 4081 4082 out_term: 4083 qlt_send_term_exchange(vha, NULL, &prm->tm_iocb2, 1); 4084 if (sess) 4085 ha->tgt.tgt_ops->put_sess(sess); 4086 spin_unlock_irqrestore(&ha->hardware_lock, flags); 4087 } 4088 4089 static void qlt_sess_work_fn(struct work_struct *work) 4090 { 4091 struct qla_tgt *tgt = container_of(work, struct qla_tgt, sess_work); 4092 struct scsi_qla_host *vha = tgt->vha; 4093 unsigned long flags; 4094 4095 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf000, "Sess work (tgt %p)", tgt); 4096 4097 spin_lock_irqsave(&tgt->sess_work_lock, flags); 4098 while (!list_empty(&tgt->sess_works_list)) { 4099 struct qla_tgt_sess_work_param *prm = list_entry( 4100 tgt->sess_works_list.next, typeof(*prm), 4101 sess_works_list_entry); 4102 4103 /* 4104 * This work can be scheduled on several CPUs at time, so we 4105 * must delete the entry to eliminate double processing 4106 */ 4107 list_del(&prm->sess_works_list_entry); 4108 4109 spin_unlock_irqrestore(&tgt->sess_work_lock, flags); 4110 4111 switch (prm->type) { 4112 case QLA_TGT_SESS_WORK_ABORT: 4113 qlt_abort_work(tgt, prm); 4114 break; 4115 case QLA_TGT_SESS_WORK_TM: 4116 qlt_tmr_work(tgt, prm); 4117 break; 4118 default: 4119 BUG_ON(1); 4120 break; 4121 } 4122 4123 spin_lock_irqsave(&tgt->sess_work_lock, flags); 4124 4125 kfree(prm); 4126 } 4127 spin_unlock_irqrestore(&tgt->sess_work_lock, flags); 4128 } 4129 4130 /* Must be called under tgt_host_action_mutex */ 4131 int qlt_add_target(struct qla_hw_data *ha, struct scsi_qla_host *base_vha) 4132 { 4133 struct qla_tgt *tgt; 4134 4135 if (!QLA_TGT_MODE_ENABLED()) 4136 return 0; 4137 4138 if (!IS_TGT_MODE_CAPABLE(ha)) { 4139 ql_log(ql_log_warn, base_vha, 0xe070, 4140 "This adapter does not support target mode.\n"); 4141 return 0; 4142 } 4143 4144 ql_dbg(ql_dbg_tgt, base_vha, 0xe03b, 4145 "Registering target for host %ld(%p).\n", base_vha->host_no, ha); 4146 4147 BUG_ON(base_vha->vha_tgt.qla_tgt != NULL); 4148 4149 tgt = kzalloc(sizeof(struct qla_tgt), GFP_KERNEL); 4150 if (!tgt) { 4151 ql_dbg(ql_dbg_tgt, base_vha, 0xe066, 4152 "Unable to allocate struct qla_tgt\n"); 4153 return -ENOMEM; 4154 } 4155 4156 if (!(base_vha->host->hostt->supported_mode & MODE_TARGET)) 4157 base_vha->host->hostt->supported_mode |= MODE_TARGET; 4158 4159 tgt->ha = ha; 4160 tgt->vha = base_vha; 4161 init_waitqueue_head(&tgt->waitQ); 4162 INIT_LIST_HEAD(&tgt->sess_list); 4163 INIT_LIST_HEAD(&tgt->del_sess_list); 4164 INIT_DELAYED_WORK(&tgt->sess_del_work, 4165 (void (*)(struct work_struct *))qlt_del_sess_work_fn); 4166 spin_lock_init(&tgt->sess_work_lock); 4167 INIT_WORK(&tgt->sess_work, qlt_sess_work_fn); 4168 INIT_LIST_HEAD(&tgt->sess_works_list); 4169 spin_lock_init(&tgt->srr_lock); 4170 INIT_LIST_HEAD(&tgt->srr_ctio_list); 4171 INIT_LIST_HEAD(&tgt->srr_imm_list); 4172 INIT_WORK(&tgt->srr_work, qlt_handle_srr_work); 4173 atomic_set(&tgt->tgt_global_resets_count, 0); 4174 4175 base_vha->vha_tgt.qla_tgt = tgt; 4176 4177 ql_dbg(ql_dbg_tgt, base_vha, 0xe067, 4178 "qla_target(%d): using 64 Bit PCI addressing", 4179 base_vha->vp_idx); 4180 tgt->tgt_enable_64bit_addr = 1; 4181 /* 3 is reserved */ 4182 tgt->sg_tablesize = QLA_TGT_MAX_SG_24XX(base_vha->req->length - 3); 4183 tgt->datasegs_per_cmd = QLA_TGT_DATASEGS_PER_CMD_24XX; 4184 tgt->datasegs_per_cont = QLA_TGT_DATASEGS_PER_CONT_24XX; 4185 4186 mutex_lock(&qla_tgt_mutex); 4187 list_add_tail(&tgt->tgt_list_entry, &qla_tgt_glist); 4188 mutex_unlock(&qla_tgt_mutex); 4189 4190 return 0; 4191 } 4192 4193 /* Must be called under tgt_host_action_mutex */ 4194 int qlt_remove_target(struct qla_hw_data *ha, struct scsi_qla_host *vha) 4195 { 4196 if (!vha->vha_tgt.qla_tgt) 4197 return 0; 4198 4199 mutex_lock(&qla_tgt_mutex); 4200 list_del(&vha->vha_tgt.qla_tgt->tgt_list_entry); 4201 mutex_unlock(&qla_tgt_mutex); 4202 4203 ql_dbg(ql_dbg_tgt, vha, 0xe03c, "Unregistering target for host %ld(%p)", 4204 vha->host_no, ha); 4205 qlt_release(vha->vha_tgt.qla_tgt); 4206 4207 return 0; 4208 } 4209 4210 static void qlt_lport_dump(struct scsi_qla_host *vha, u64 wwpn, 4211 unsigned char *b) 4212 { 4213 int i; 4214 4215 pr_debug("qla2xxx HW vha->node_name: "); 4216 for (i = 0; i < WWN_SIZE; i++) 4217 pr_debug("%02x ", vha->node_name[i]); 4218 pr_debug("\n"); 4219 pr_debug("qla2xxx HW vha->port_name: "); 4220 for (i = 0; i < WWN_SIZE; i++) 4221 pr_debug("%02x ", vha->port_name[i]); 4222 pr_debug("\n"); 4223 4224 pr_debug("qla2xxx passed configfs WWPN: "); 4225 put_unaligned_be64(wwpn, b); 4226 for (i = 0; i < WWN_SIZE; i++) 4227 pr_debug("%02x ", b[i]); 4228 pr_debug("\n"); 4229 } 4230 4231 /** 4232 * qla_tgt_lport_register - register lport with external module 4233 * 4234 * @qla_tgt_ops: Pointer for tcm_qla2xxx qla_tgt_ops 4235 * @wwpn: Passwd FC target WWPN 4236 * @callback: lport initialization callback for tcm_qla2xxx code 4237 * @target_lport_ptr: pointer for tcm_qla2xxx specific lport data 4238 */ 4239 int qlt_lport_register(void *target_lport_ptr, u64 phys_wwpn, 4240 u64 npiv_wwpn, u64 npiv_wwnn, 4241 int (*callback)(struct scsi_qla_host *, void *, u64, u64)) 4242 { 4243 struct qla_tgt *tgt; 4244 struct scsi_qla_host *vha; 4245 struct qla_hw_data *ha; 4246 struct Scsi_Host *host; 4247 unsigned long flags; 4248 int rc; 4249 u8 b[WWN_SIZE]; 4250 4251 mutex_lock(&qla_tgt_mutex); 4252 list_for_each_entry(tgt, &qla_tgt_glist, tgt_list_entry) { 4253 vha = tgt->vha; 4254 ha = vha->hw; 4255 4256 host = vha->host; 4257 if (!host) 4258 continue; 4259 4260 if (!(host->hostt->supported_mode & MODE_TARGET)) 4261 continue; 4262 4263 spin_lock_irqsave(&ha->hardware_lock, flags); 4264 if ((!npiv_wwpn || !npiv_wwnn) && host->active_mode & MODE_TARGET) { 4265 pr_debug("MODE_TARGET already active on qla2xxx(%d)\n", 4266 host->host_no); 4267 spin_unlock_irqrestore(&ha->hardware_lock, flags); 4268 continue; 4269 } 4270 spin_unlock_irqrestore(&ha->hardware_lock, flags); 4271 4272 if (!scsi_host_get(host)) { 4273 ql_dbg(ql_dbg_tgt, vha, 0xe068, 4274 "Unable to scsi_host_get() for" 4275 " qla2xxx scsi_host\n"); 4276 continue; 4277 } 4278 qlt_lport_dump(vha, phys_wwpn, b); 4279 4280 if (memcmp(vha->port_name, b, WWN_SIZE)) { 4281 scsi_host_put(host); 4282 continue; 4283 } 4284 mutex_unlock(&qla_tgt_mutex); 4285 4286 rc = (*callback)(vha, target_lport_ptr, npiv_wwpn, npiv_wwnn); 4287 if (rc != 0) 4288 scsi_host_put(host); 4289 4290 return rc; 4291 } 4292 mutex_unlock(&qla_tgt_mutex); 4293 4294 return -ENODEV; 4295 } 4296 EXPORT_SYMBOL(qlt_lport_register); 4297 4298 /** 4299 * qla_tgt_lport_deregister - Degister lport 4300 * 4301 * @vha: Registered scsi_qla_host pointer 4302 */ 4303 void qlt_lport_deregister(struct scsi_qla_host *vha) 4304 { 4305 struct qla_hw_data *ha = vha->hw; 4306 struct Scsi_Host *sh = vha->host; 4307 /* 4308 * Clear the target_lport_ptr qla_target_template pointer in qla_hw_data 4309 */ 4310 vha->vha_tgt.target_lport_ptr = NULL; 4311 ha->tgt.tgt_ops = NULL; 4312 /* 4313 * Release the Scsi_Host reference for the underlying qla2xxx host 4314 */ 4315 scsi_host_put(sh); 4316 } 4317 EXPORT_SYMBOL(qlt_lport_deregister); 4318 4319 /* Must be called under HW lock */ 4320 void qlt_set_mode(struct scsi_qla_host *vha) 4321 { 4322 struct qla_hw_data *ha = vha->hw; 4323 4324 switch (ql2x_ini_mode) { 4325 case QLA2XXX_INI_MODE_DISABLED: 4326 case QLA2XXX_INI_MODE_EXCLUSIVE: 4327 vha->host->active_mode = MODE_TARGET; 4328 break; 4329 case QLA2XXX_INI_MODE_ENABLED: 4330 vha->host->active_mode |= MODE_TARGET; 4331 break; 4332 default: 4333 break; 4334 } 4335 4336 if (ha->tgt.ini_mode_force_reverse) 4337 qla_reverse_ini_mode(vha); 4338 } 4339 4340 /* Must be called under HW lock */ 4341 void qlt_clear_mode(struct scsi_qla_host *vha) 4342 { 4343 struct qla_hw_data *ha = vha->hw; 4344 4345 switch (ql2x_ini_mode) { 4346 case QLA2XXX_INI_MODE_DISABLED: 4347 vha->host->active_mode = MODE_UNKNOWN; 4348 break; 4349 case QLA2XXX_INI_MODE_EXCLUSIVE: 4350 vha->host->active_mode = MODE_INITIATOR; 4351 break; 4352 case QLA2XXX_INI_MODE_ENABLED: 4353 vha->host->active_mode &= ~MODE_TARGET; 4354 break; 4355 default: 4356 break; 4357 } 4358 4359 if (ha->tgt.ini_mode_force_reverse) 4360 qla_reverse_ini_mode(vha); 4361 } 4362 4363 /* 4364 * qla_tgt_enable_vha - NO LOCK HELD 4365 * 4366 * host_reset, bring up w/ Target Mode Enabled 4367 */ 4368 void 4369 qlt_enable_vha(struct scsi_qla_host *vha) 4370 { 4371 struct qla_hw_data *ha = vha->hw; 4372 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; 4373 unsigned long flags; 4374 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev); 4375 4376 if (!tgt) { 4377 ql_dbg(ql_dbg_tgt, vha, 0xe069, 4378 "Unable to locate qla_tgt pointer from" 4379 " struct qla_hw_data\n"); 4380 dump_stack(); 4381 return; 4382 } 4383 4384 spin_lock_irqsave(&ha->hardware_lock, flags); 4385 tgt->tgt_stopped = 0; 4386 qlt_set_mode(vha); 4387 spin_unlock_irqrestore(&ha->hardware_lock, flags); 4388 4389 if (vha->vp_idx) { 4390 qla24xx_disable_vp(vha); 4391 qla24xx_enable_vp(vha); 4392 } else { 4393 set_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags); 4394 qla2xxx_wake_dpc(base_vha); 4395 qla2x00_wait_for_hba_online(base_vha); 4396 } 4397 } 4398 EXPORT_SYMBOL(qlt_enable_vha); 4399 4400 /* 4401 * qla_tgt_disable_vha - NO LOCK HELD 4402 * 4403 * Disable Target Mode and reset the adapter 4404 */ 4405 void 4406 qlt_disable_vha(struct scsi_qla_host *vha) 4407 { 4408 struct qla_hw_data *ha = vha->hw; 4409 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; 4410 unsigned long flags; 4411 4412 if (!tgt) { 4413 ql_dbg(ql_dbg_tgt, vha, 0xe06a, 4414 "Unable to locate qla_tgt pointer from" 4415 " struct qla_hw_data\n"); 4416 dump_stack(); 4417 return; 4418 } 4419 4420 spin_lock_irqsave(&ha->hardware_lock, flags); 4421 qlt_clear_mode(vha); 4422 spin_unlock_irqrestore(&ha->hardware_lock, flags); 4423 4424 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 4425 qla2xxx_wake_dpc(vha); 4426 qla2x00_wait_for_hba_online(vha); 4427 } 4428 4429 /* 4430 * Called from qla_init.c:qla24xx_vport_create() contex to setup 4431 * the target mode specific struct scsi_qla_host and struct qla_hw_data 4432 * members. 4433 */ 4434 void 4435 qlt_vport_create(struct scsi_qla_host *vha, struct qla_hw_data *ha) 4436 { 4437 if (!qla_tgt_mode_enabled(vha)) 4438 return; 4439 4440 vha->vha_tgt.qla_tgt = NULL; 4441 4442 mutex_init(&vha->vha_tgt.tgt_mutex); 4443 mutex_init(&vha->vha_tgt.tgt_host_action_mutex); 4444 4445 qlt_clear_mode(vha); 4446 4447 /* 4448 * NOTE: Currently the value is kept the same for <24xx and 4449 * >=24xx ISPs. If it is necessary to change it, 4450 * the check should be added for specific ISPs, 4451 * assigning the value appropriately. 4452 */ 4453 ha->tgt.atio_q_length = ATIO_ENTRY_CNT_24XX; 4454 4455 qlt_add_target(ha, vha); 4456 } 4457 4458 void 4459 qlt_rff_id(struct scsi_qla_host *vha, struct ct_sns_req *ct_req) 4460 { 4461 /* 4462 * FC-4 Feature bit 0 indicates target functionality to the name server. 4463 */ 4464 if (qla_tgt_mode_enabled(vha)) { 4465 if (qla_ini_mode_enabled(vha)) 4466 ct_req->req.rff_id.fc4_feature = BIT_0 | BIT_1; 4467 else 4468 ct_req->req.rff_id.fc4_feature = BIT_0; 4469 } else if (qla_ini_mode_enabled(vha)) { 4470 ct_req->req.rff_id.fc4_feature = BIT_1; 4471 } 4472 } 4473 4474 /* 4475 * qlt_init_atio_q_entries() - Initializes ATIO queue entries. 4476 * @ha: HA context 4477 * 4478 * Beginning of ATIO ring has initialization control block already built 4479 * by nvram config routine. 4480 * 4481 * Returns 0 on success. 4482 */ 4483 void 4484 qlt_init_atio_q_entries(struct scsi_qla_host *vha) 4485 { 4486 struct qla_hw_data *ha = vha->hw; 4487 uint16_t cnt; 4488 struct atio_from_isp *pkt = (struct atio_from_isp *)ha->tgt.atio_ring; 4489 4490 if (!qla_tgt_mode_enabled(vha)) 4491 return; 4492 4493 for (cnt = 0; cnt < ha->tgt.atio_q_length; cnt++) { 4494 pkt->u.raw.signature = ATIO_PROCESSED; 4495 pkt++; 4496 } 4497 4498 } 4499 4500 /* 4501 * qlt_24xx_process_atio_queue() - Process ATIO queue entries. 4502 * @ha: SCSI driver HA context 4503 */ 4504 void 4505 qlt_24xx_process_atio_queue(struct scsi_qla_host *vha) 4506 { 4507 struct qla_hw_data *ha = vha->hw; 4508 struct atio_from_isp *pkt; 4509 int cnt, i; 4510 4511 if (!vha->flags.online) 4512 return; 4513 4514 while (ha->tgt.atio_ring_ptr->signature != ATIO_PROCESSED) { 4515 pkt = (struct atio_from_isp *)ha->tgt.atio_ring_ptr; 4516 cnt = pkt->u.raw.entry_count; 4517 4518 qlt_24xx_atio_pkt_all_vps(vha, (struct atio_from_isp *)pkt); 4519 4520 for (i = 0; i < cnt; i++) { 4521 ha->tgt.atio_ring_index++; 4522 if (ha->tgt.atio_ring_index == ha->tgt.atio_q_length) { 4523 ha->tgt.atio_ring_index = 0; 4524 ha->tgt.atio_ring_ptr = ha->tgt.atio_ring; 4525 } else 4526 ha->tgt.atio_ring_ptr++; 4527 4528 pkt->u.raw.signature = ATIO_PROCESSED; 4529 pkt = (struct atio_from_isp *)ha->tgt.atio_ring_ptr; 4530 } 4531 wmb(); 4532 } 4533 4534 /* Adjust ring index */ 4535 WRT_REG_DWORD(ISP_ATIO_Q_OUT(vha), ha->tgt.atio_ring_index); 4536 } 4537 4538 void 4539 qlt_24xx_config_rings(struct scsi_qla_host *vha) 4540 { 4541 struct qla_hw_data *ha = vha->hw; 4542 if (!QLA_TGT_MODE_ENABLED()) 4543 return; 4544 4545 WRT_REG_DWORD(ISP_ATIO_Q_IN(vha), 0); 4546 WRT_REG_DWORD(ISP_ATIO_Q_OUT(vha), 0); 4547 RD_REG_DWORD(ISP_ATIO_Q_OUT(vha)); 4548 4549 if (IS_ATIO_MSIX_CAPABLE(ha)) { 4550 struct qla_msix_entry *msix = &ha->msix_entries[2]; 4551 struct init_cb_24xx *icb = (struct init_cb_24xx *)ha->init_cb; 4552 4553 icb->msix_atio = cpu_to_le16(msix->entry); 4554 ql_dbg(ql_dbg_init, vha, 0xf072, 4555 "Registering ICB vector 0x%x for atio que.\n", 4556 msix->entry); 4557 } 4558 } 4559 4560 void 4561 qlt_24xx_config_nvram_stage1(struct scsi_qla_host *vha, struct nvram_24xx *nv) 4562 { 4563 struct qla_hw_data *ha = vha->hw; 4564 4565 if (qla_tgt_mode_enabled(vha)) { 4566 if (!ha->tgt.saved_set) { 4567 /* We save only once */ 4568 ha->tgt.saved_exchange_count = nv->exchange_count; 4569 ha->tgt.saved_firmware_options_1 = 4570 nv->firmware_options_1; 4571 ha->tgt.saved_firmware_options_2 = 4572 nv->firmware_options_2; 4573 ha->tgt.saved_firmware_options_3 = 4574 nv->firmware_options_3; 4575 ha->tgt.saved_set = 1; 4576 } 4577 4578 nv->exchange_count = __constant_cpu_to_le16(0xFFFF); 4579 4580 /* Enable target mode */ 4581 nv->firmware_options_1 |= __constant_cpu_to_le32(BIT_4); 4582 4583 /* Disable ini mode, if requested */ 4584 if (!qla_ini_mode_enabled(vha)) 4585 nv->firmware_options_1 |= __constant_cpu_to_le32(BIT_5); 4586 4587 /* Disable Full Login after LIP */ 4588 nv->firmware_options_1 &= __constant_cpu_to_le32(~BIT_13); 4589 /* Enable initial LIP */ 4590 nv->firmware_options_1 &= __constant_cpu_to_le32(~BIT_9); 4591 /* Enable FC tapes support */ 4592 nv->firmware_options_2 |= __constant_cpu_to_le32(BIT_12); 4593 /* Disable Full Login after LIP */ 4594 nv->host_p &= __constant_cpu_to_le32(~BIT_10); 4595 /* Enable target PRLI control */ 4596 nv->firmware_options_2 |= __constant_cpu_to_le32(BIT_14); 4597 } else { 4598 if (ha->tgt.saved_set) { 4599 nv->exchange_count = ha->tgt.saved_exchange_count; 4600 nv->firmware_options_1 = 4601 ha->tgt.saved_firmware_options_1; 4602 nv->firmware_options_2 = 4603 ha->tgt.saved_firmware_options_2; 4604 nv->firmware_options_3 = 4605 ha->tgt.saved_firmware_options_3; 4606 } 4607 return; 4608 } 4609 4610 /* out-of-order frames reassembly */ 4611 nv->firmware_options_3 |= BIT_6|BIT_9; 4612 4613 if (ha->tgt.enable_class_2) { 4614 if (vha->flags.init_done) 4615 fc_host_supported_classes(vha->host) = 4616 FC_COS_CLASS2 | FC_COS_CLASS3; 4617 4618 nv->firmware_options_2 |= __constant_cpu_to_le32(BIT_8); 4619 } else { 4620 if (vha->flags.init_done) 4621 fc_host_supported_classes(vha->host) = FC_COS_CLASS3; 4622 4623 nv->firmware_options_2 &= ~__constant_cpu_to_le32(BIT_8); 4624 } 4625 } 4626 4627 void 4628 qlt_24xx_config_nvram_stage2(struct scsi_qla_host *vha, 4629 struct init_cb_24xx *icb) 4630 { 4631 struct qla_hw_data *ha = vha->hw; 4632 4633 if (ha->tgt.node_name_set) { 4634 memcpy(icb->node_name, ha->tgt.tgt_node_name, WWN_SIZE); 4635 icb->firmware_options_1 |= __constant_cpu_to_le32(BIT_14); 4636 } 4637 } 4638 4639 void 4640 qlt_81xx_config_nvram_stage1(struct scsi_qla_host *vha, struct nvram_81xx *nv) 4641 { 4642 struct qla_hw_data *ha = vha->hw; 4643 4644 if (!QLA_TGT_MODE_ENABLED()) 4645 return; 4646 4647 if (qla_tgt_mode_enabled(vha)) { 4648 if (!ha->tgt.saved_set) { 4649 /* We save only once */ 4650 ha->tgt.saved_exchange_count = nv->exchange_count; 4651 ha->tgt.saved_firmware_options_1 = 4652 nv->firmware_options_1; 4653 ha->tgt.saved_firmware_options_2 = 4654 nv->firmware_options_2; 4655 ha->tgt.saved_firmware_options_3 = 4656 nv->firmware_options_3; 4657 ha->tgt.saved_set = 1; 4658 } 4659 4660 nv->exchange_count = __constant_cpu_to_le16(0xFFFF); 4661 4662 /* Enable target mode */ 4663 nv->firmware_options_1 |= __constant_cpu_to_le32(BIT_4); 4664 4665 /* Disable ini mode, if requested */ 4666 if (!qla_ini_mode_enabled(vha)) 4667 nv->firmware_options_1 |= 4668 __constant_cpu_to_le32(BIT_5); 4669 4670 /* Disable Full Login after LIP */ 4671 nv->firmware_options_1 &= __constant_cpu_to_le32(~BIT_13); 4672 /* Enable initial LIP */ 4673 nv->firmware_options_1 &= __constant_cpu_to_le32(~BIT_9); 4674 /* Enable FC tapes support */ 4675 nv->firmware_options_2 |= __constant_cpu_to_le32(BIT_12); 4676 /* Disable Full Login after LIP */ 4677 nv->host_p &= __constant_cpu_to_le32(~BIT_10); 4678 /* Enable target PRLI control */ 4679 nv->firmware_options_2 |= __constant_cpu_to_le32(BIT_14); 4680 } else { 4681 if (ha->tgt.saved_set) { 4682 nv->exchange_count = ha->tgt.saved_exchange_count; 4683 nv->firmware_options_1 = 4684 ha->tgt.saved_firmware_options_1; 4685 nv->firmware_options_2 = 4686 ha->tgt.saved_firmware_options_2; 4687 nv->firmware_options_3 = 4688 ha->tgt.saved_firmware_options_3; 4689 } 4690 return; 4691 } 4692 4693 /* out-of-order frames reassembly */ 4694 nv->firmware_options_3 |= BIT_6|BIT_9; 4695 4696 if (ha->tgt.enable_class_2) { 4697 if (vha->flags.init_done) 4698 fc_host_supported_classes(vha->host) = 4699 FC_COS_CLASS2 | FC_COS_CLASS3; 4700 4701 nv->firmware_options_2 |= __constant_cpu_to_le32(BIT_8); 4702 } else { 4703 if (vha->flags.init_done) 4704 fc_host_supported_classes(vha->host) = FC_COS_CLASS3; 4705 4706 nv->firmware_options_2 &= ~__constant_cpu_to_le32(BIT_8); 4707 } 4708 } 4709 4710 void 4711 qlt_81xx_config_nvram_stage2(struct scsi_qla_host *vha, 4712 struct init_cb_81xx *icb) 4713 { 4714 struct qla_hw_data *ha = vha->hw; 4715 4716 if (!QLA_TGT_MODE_ENABLED()) 4717 return; 4718 4719 if (ha->tgt.node_name_set) { 4720 memcpy(icb->node_name, ha->tgt.tgt_node_name, WWN_SIZE); 4721 icb->firmware_options_1 |= __constant_cpu_to_le32(BIT_14); 4722 } 4723 } 4724 4725 void 4726 qlt_83xx_iospace_config(struct qla_hw_data *ha) 4727 { 4728 if (!QLA_TGT_MODE_ENABLED()) 4729 return; 4730 4731 ha->msix_count += 1; /* For ATIO Q */ 4732 } 4733 4734 int 4735 qlt_24xx_process_response_error(struct scsi_qla_host *vha, 4736 struct sts_entry_24xx *pkt) 4737 { 4738 switch (pkt->entry_type) { 4739 case ABTS_RECV_24XX: 4740 case ABTS_RESP_24XX: 4741 case CTIO_TYPE7: 4742 case NOTIFY_ACK_TYPE: 4743 return 1; 4744 default: 4745 return 0; 4746 } 4747 } 4748 4749 void 4750 qlt_modify_vp_config(struct scsi_qla_host *vha, 4751 struct vp_config_entry_24xx *vpmod) 4752 { 4753 if (qla_tgt_mode_enabled(vha)) 4754 vpmod->options_idx1 &= ~BIT_5; 4755 /* Disable ini mode, if requested */ 4756 if (!qla_ini_mode_enabled(vha)) 4757 vpmod->options_idx1 &= ~BIT_4; 4758 } 4759 4760 void 4761 qlt_probe_one_stage1(struct scsi_qla_host *base_vha, struct qla_hw_data *ha) 4762 { 4763 if (!QLA_TGT_MODE_ENABLED()) 4764 return; 4765 4766 if (ha->mqenable || IS_QLA83XX(ha)) { 4767 ISP_ATIO_Q_IN(base_vha) = &ha->mqiobase->isp25mq.atio_q_in; 4768 ISP_ATIO_Q_OUT(base_vha) = &ha->mqiobase->isp25mq.atio_q_out; 4769 } else { 4770 ISP_ATIO_Q_IN(base_vha) = &ha->iobase->isp24.atio_q_in; 4771 ISP_ATIO_Q_OUT(base_vha) = &ha->iobase->isp24.atio_q_out; 4772 } 4773 4774 mutex_init(&base_vha->vha_tgt.tgt_mutex); 4775 mutex_init(&base_vha->vha_tgt.tgt_host_action_mutex); 4776 qlt_clear_mode(base_vha); 4777 } 4778 4779 irqreturn_t 4780 qla83xx_msix_atio_q(int irq, void *dev_id) 4781 { 4782 struct rsp_que *rsp; 4783 scsi_qla_host_t *vha; 4784 struct qla_hw_data *ha; 4785 unsigned long flags; 4786 4787 rsp = (struct rsp_que *) dev_id; 4788 ha = rsp->hw; 4789 vha = pci_get_drvdata(ha->pdev); 4790 4791 spin_lock_irqsave(&ha->hardware_lock, flags); 4792 4793 qlt_24xx_process_atio_queue(vha); 4794 qla24xx_process_response_queue(vha, rsp); 4795 4796 spin_unlock_irqrestore(&ha->hardware_lock, flags); 4797 4798 return IRQ_HANDLED; 4799 } 4800 4801 int 4802 qlt_mem_alloc(struct qla_hw_data *ha) 4803 { 4804 if (!QLA_TGT_MODE_ENABLED()) 4805 return 0; 4806 4807 ha->tgt.tgt_vp_map = kzalloc(sizeof(struct qla_tgt_vp_map) * 4808 MAX_MULTI_ID_FABRIC, GFP_KERNEL); 4809 if (!ha->tgt.tgt_vp_map) 4810 return -ENOMEM; 4811 4812 ha->tgt.atio_ring = dma_alloc_coherent(&ha->pdev->dev, 4813 (ha->tgt.atio_q_length + 1) * sizeof(struct atio_from_isp), 4814 &ha->tgt.atio_dma, GFP_KERNEL); 4815 if (!ha->tgt.atio_ring) { 4816 kfree(ha->tgt.tgt_vp_map); 4817 return -ENOMEM; 4818 } 4819 return 0; 4820 } 4821 4822 void 4823 qlt_mem_free(struct qla_hw_data *ha) 4824 { 4825 if (!QLA_TGT_MODE_ENABLED()) 4826 return; 4827 4828 if (ha->tgt.atio_ring) { 4829 dma_free_coherent(&ha->pdev->dev, (ha->tgt.atio_q_length + 1) * 4830 sizeof(struct atio_from_isp), ha->tgt.atio_ring, 4831 ha->tgt.atio_dma); 4832 } 4833 kfree(ha->tgt.tgt_vp_map); 4834 } 4835 4836 /* vport_slock to be held by the caller */ 4837 void 4838 qlt_update_vp_map(struct scsi_qla_host *vha, int cmd) 4839 { 4840 if (!QLA_TGT_MODE_ENABLED()) 4841 return; 4842 4843 switch (cmd) { 4844 case SET_VP_IDX: 4845 vha->hw->tgt.tgt_vp_map[vha->vp_idx].vha = vha; 4846 break; 4847 case SET_AL_PA: 4848 vha->hw->tgt.tgt_vp_map[vha->d_id.b.al_pa].idx = vha->vp_idx; 4849 break; 4850 case RESET_VP_IDX: 4851 vha->hw->tgt.tgt_vp_map[vha->vp_idx].vha = NULL; 4852 break; 4853 case RESET_AL_PA: 4854 vha->hw->tgt.tgt_vp_map[vha->d_id.b.al_pa].idx = 0; 4855 break; 4856 } 4857 } 4858 4859 static int __init qlt_parse_ini_mode(void) 4860 { 4861 if (strcasecmp(qlini_mode, QLA2XXX_INI_MODE_STR_EXCLUSIVE) == 0) 4862 ql2x_ini_mode = QLA2XXX_INI_MODE_EXCLUSIVE; 4863 else if (strcasecmp(qlini_mode, QLA2XXX_INI_MODE_STR_DISABLED) == 0) 4864 ql2x_ini_mode = QLA2XXX_INI_MODE_DISABLED; 4865 else if (strcasecmp(qlini_mode, QLA2XXX_INI_MODE_STR_ENABLED) == 0) 4866 ql2x_ini_mode = QLA2XXX_INI_MODE_ENABLED; 4867 else 4868 return false; 4869 4870 return true; 4871 } 4872 4873 int __init qlt_init(void) 4874 { 4875 int ret; 4876 4877 if (!qlt_parse_ini_mode()) { 4878 ql_log(ql_log_fatal, NULL, 0xe06b, 4879 "qlt_parse_ini_mode() failed\n"); 4880 return -EINVAL; 4881 } 4882 4883 if (!QLA_TGT_MODE_ENABLED()) 4884 return 0; 4885 4886 qla_tgt_cmd_cachep = kmem_cache_create("qla_tgt_cmd_cachep", 4887 sizeof(struct qla_tgt_cmd), __alignof__(struct qla_tgt_cmd), 0, 4888 NULL); 4889 if (!qla_tgt_cmd_cachep) { 4890 ql_log(ql_log_fatal, NULL, 0xe06c, 4891 "kmem_cache_create for qla_tgt_cmd_cachep failed\n"); 4892 return -ENOMEM; 4893 } 4894 4895 qla_tgt_mgmt_cmd_cachep = kmem_cache_create("qla_tgt_mgmt_cmd_cachep", 4896 sizeof(struct qla_tgt_mgmt_cmd), __alignof__(struct 4897 qla_tgt_mgmt_cmd), 0, NULL); 4898 if (!qla_tgt_mgmt_cmd_cachep) { 4899 ql_log(ql_log_fatal, NULL, 0xe06d, 4900 "kmem_cache_create for qla_tgt_mgmt_cmd_cachep failed\n"); 4901 ret = -ENOMEM; 4902 goto out; 4903 } 4904 4905 qla_tgt_mgmt_cmd_mempool = mempool_create(25, mempool_alloc_slab, 4906 mempool_free_slab, qla_tgt_mgmt_cmd_cachep); 4907 if (!qla_tgt_mgmt_cmd_mempool) { 4908 ql_log(ql_log_fatal, NULL, 0xe06e, 4909 "mempool_create for qla_tgt_mgmt_cmd_mempool failed\n"); 4910 ret = -ENOMEM; 4911 goto out_mgmt_cmd_cachep; 4912 } 4913 4914 qla_tgt_wq = alloc_workqueue("qla_tgt_wq", 0, 0); 4915 if (!qla_tgt_wq) { 4916 ql_log(ql_log_fatal, NULL, 0xe06f, 4917 "alloc_workqueue for qla_tgt_wq failed\n"); 4918 ret = -ENOMEM; 4919 goto out_cmd_mempool; 4920 } 4921 /* 4922 * Return 1 to signal that initiator-mode is being disabled 4923 */ 4924 return (ql2x_ini_mode == QLA2XXX_INI_MODE_DISABLED) ? 1 : 0; 4925 4926 out_cmd_mempool: 4927 mempool_destroy(qla_tgt_mgmt_cmd_mempool); 4928 out_mgmt_cmd_cachep: 4929 kmem_cache_destroy(qla_tgt_mgmt_cmd_cachep); 4930 out: 4931 kmem_cache_destroy(qla_tgt_cmd_cachep); 4932 return ret; 4933 } 4934 4935 void qlt_exit(void) 4936 { 4937 if (!QLA_TGT_MODE_ENABLED()) 4938 return; 4939 4940 destroy_workqueue(qla_tgt_wq); 4941 mempool_destroy(qla_tgt_mgmt_cmd_mempool); 4942 kmem_cache_destroy(qla_tgt_mgmt_cmd_cachep); 4943 kmem_cache_destroy(qla_tgt_cmd_cachep); 4944 } 4945